JohanBeytell's picture
Update app.py
ef59a7d verified
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import img_to_array, array_to_img
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import cv2
import gradio as gr
from PIL import Image
import io
import tempfile
from datetime import datetime
# Global variables
model = None
class_labels = {0: 'no', 1: 'yes'}
IMG_WIDTH, IMG_HEIGHT = 128, 128
# --- MODEL LOADING FUNCTION ---
def load_brain_tumor_model():
"""Load the brain tumor detection model from the file system"""
global model
# Common model file names to check
model_paths = [
'brain_tumor_classifier_v3.h5',
'model.h5',
'brain_tumor_model.h5',
'brain_tumor_classifier.h5'
]
for model_path in model_paths:
if os.path.exists(model_path):
try:
model = load_model(model_path)
print(f"βœ… Model loaded successfully from {model_path}")
return True
except Exception as e:
print(f"❌ Error loading model from {model_path}: {str(e)}")
continue
print("❌ No valid model file found. Please ensure your model is in the root directory.")
return False
# Load model on startup
model_loaded = load_brain_tumor_model()
# --- IMAGE PREPROCESSING FUNCTIONS ---
def preprocess_image(image, target_size=(128, 128)):
"""
Preprocess uploaded image for model prediction
"""
if image is None:
return None, "No image provided"
try:
# Convert to PIL Image if needed
if not isinstance(image, Image.Image):
image = Image.fromarray(image)
# Convert to RGB if needed
if image.mode != 'RGB':
image = image.convert('RGB')
# Resize image
image_resized = image.resize(target_size, Image.Resampling.LANCZOS)
# Convert to grayscale for display (optional)
image_gray = image_resized.convert('L').convert('RGB')
# Convert to array and normalize
img_array = img_to_array(image_resized) / 255.0
return image_resized, image_gray, img_array, "βœ… Image preprocessed successfully"
except Exception as e:
return None, None, None, f"❌ Error preprocessing image: {str(e)}"
# --- ENHANCED GRAD-CAM++ FUNCTIONS ---
def make_gradcampp_heatmap(img_array, model, last_conv_layer_name='last_conv_layer', pred_index=None):
"""
Creates an improved Grad-CAM++ heatmap with better numerical stability.
"""
if model is None:
return None
try:
grad_model = tf.keras.models.Model(
inputs=model.input,
outputs=[model.get_layer(last_conv_layer_name).output, model.output]
)
with tf.GradientTape(persistent=True) as tape1:
with tf.GradientTape(persistent=True) as tape2:
with tf.GradientTape() as tape3:
conv_outputs, predictions = grad_model(img_array)
if pred_index is None:
pred_index = tf.argmax(predictions[0])
class_channel = predictions[:, pred_index]
grads = tape3.gradient(class_channel, conv_outputs)
first_derivative = tape2.gradient(class_channel, conv_outputs)
second_derivative = tape1.gradient(first_derivative, conv_outputs)
del tape1, tape2
eps = 1e-8
alpha_num = second_derivative
alpha_denom = 2.0 * second_derivative + tf.reduce_sum(conv_outputs * grads, axis=[1, 2], keepdims=True)
alpha_denom = tf.where(tf.abs(alpha_denom) < eps, tf.ones_like(alpha_denom) * eps, alpha_denom)
alphas = alpha_num / alpha_denom
weights = tf.reduce_sum(alphas * tf.nn.relu(grads), axis=[1, 2])
weights = tf.nn.softmax(weights, axis=-1)
weights_reshaped = tf.reshape(weights, (1, 1, 1, -1))
heatmap = tf.reduce_sum(weights_reshaped * conv_outputs, axis=-1)
heatmap = tf.squeeze(heatmap, axis=0)
heatmap = tf.nn.relu(heatmap)
heatmap_np = heatmap.numpy()
heatmap_min = np.min(heatmap_np)
heatmap_max = np.max(heatmap_np)
if heatmap_max > heatmap_min:
heatmap_np = (heatmap_np - heatmap_min) / (heatmap_max - heatmap_min)
else:
heatmap_np = np.zeros_like(heatmap_np)
return heatmap_np
except Exception as e:
print(f"Error in Grad-CAM++: {str(e)}")
return None
def create_heatmap_visualizations(heatmap, img_shape):
"""Create multiple heatmap visualizations with different color schemes"""
heatmap_resized = cv2.resize(heatmap, (img_shape[1], img_shape[0]), interpolation=cv2.INTER_CUBIC)
heatmap_smooth = cv2.GaussianBlur(heatmap_resized, (5, 5), 0)
heatmap_enhanced = cv2.equalizeHist(np.uint8(255 * heatmap_smooth)) / 255.0
visualizations = {
'jet': {'heatmap': heatmap_smooth, 'colormap': 'jet', 'title': 'Jet Heatmap'},
'hot': {'heatmap': heatmap_smooth, 'colormap': 'hot', 'title': 'Hot Heatmap'},
'plasma': {'heatmap': heatmap_enhanced, 'colormap': 'plasma', 'title': 'Plasma Heatmap'},
'viridis': {'heatmap': heatmap_enhanced, 'colormap': 'viridis', 'title': 'Viridis Heatmap'},
'inferno': {'heatmap': heatmap_smooth, 'colormap': 'inferno', 'title': 'Inferno Heatmap'},
'cool': {'heatmap': heatmap_smooth, 'colormap': 'cool', 'title': 'Cool Heatmap'}
}
return visualizations
def superimpose_gradcam_enhanced(img, heatmap, colormap='jet', alpha=0.4):
"""Enhanced superimposition with different colormaps"""
if not isinstance(img, np.ndarray):
img = img_to_array(img)
if img.max() > 1.0:
img = img / 255.0
heatmap_resized = cv2.resize(heatmap, (img.shape[1], img.shape[0]), interpolation=cv2.INTER_CUBIC)
heatmap_uint8 = np.uint8(255 * heatmap_resized)
if hasattr(plt, 'colormaps'):
cmap = plt.colormaps[colormap]
else:
cmap = cm.get_cmap(colormap)
colored_heatmap = cmap(heatmap_uint8)[:, :, :3]
gamma = 2.2
img_gamma = np.power(img, 1/gamma)
colored_heatmap_gamma = np.power(colored_heatmap, 1/gamma)
blended_gamma = (colored_heatmap_gamma * alpha) + (img_gamma * (1 - alpha))
superimposed_img_float = np.power(blended_gamma, gamma)
superimposed_img_float = np.clip(superimposed_img_float, 0, 1)
return superimposed_img_float
# --- PREDICTION AND VISUALIZATION FUNCTIONS ---
def predict_brain_tumor(image):
"""Make prediction on uploaded image"""
if not model_loaded or model is None:
return "❌ Model not available. Please check if the model file exists in the space.", None, None
if image is None:
return "❌ No image provided.", None, None
try:
# Preprocess image
processed_img, gray_img, img_array, preprocess_msg = preprocess_image(image)
if processed_img is None:
return preprocess_msg, None, None
# Make prediction
img_batch = np.expand_dims(img_array, axis=0)
prediction = model.predict(img_batch, verbose=0)[0][0]
# Interpret results
predicted_class = int(round(prediction))
predicted_label = class_labels[predicted_class]
confidence = prediction if predicted_class == 1 else 1 - prediction
# Create result message
if predicted_class == 1:
status_emoji = "⚠️"
status_text = "**TUMOR DETECTED**"
status_color = "red"
else:
status_emoji = "βœ…"
status_text = "**NO TUMOR DETECTED**"
status_color = "green"
result_msg = f"""
## 🧠 Brain Tumor Detection Results
**Prediction:** {predicted_label.upper()}
**Confidence:** {confidence:.1%}
**Raw Score:** {prediction:.4f}
{status_emoji} {status_text}
"""
return result_msg, processed_img, gray_img
except Exception as e:
return f"❌ Error during prediction: {str(e)}", None, None
def create_detailed_analysis(image):
"""Create comprehensive Grad-CAM++ analysis"""
if not model_loaded or model is None or image is None:
return "❌ Please upload an image for analysis."
try:
# Preprocess and predict
processed_img, gray_img, img_array, _ = preprocess_image(image)
img_batch = np.expand_dims(img_array, axis=0)
prediction = model.predict(img_batch, verbose=0)[0][0]
predicted_class = int(round(prediction))
predicted_label = class_labels[predicted_class]
confidence = prediction if predicted_class == 1 else 1 - prediction
# Generate heatmap
heatmap = make_gradcampp_heatmap(img_batch, model)
if heatmap is None:
return "❌ Error generating heatmap."
# Create visualizations
visualizations = create_heatmap_visualizations(heatmap, img_array.shape)
# Create comprehensive plot with 4 rows to accommodate all visualizations
fig = plt.figure(figsize=(20, 16))
color = 'green' if predicted_class == 0 else 'red'
fig.suptitle(f'Comprehensive Grad-CAM++ Analysis\nPredicted: {predicted_label.upper()} ({confidence:.2%})',
fontsize=16, fontweight='bold', color=color)
# Row 1: Original image and heatmaps
# Original image
plt.subplot(4, 5, 1)
plt.imshow(processed_img)
plt.title("Original Image", fontsize=12, fontweight='bold')
plt.axis('off')
# Different heatmap visualizations (4 in first row)
viz_names = ['jet', 'hot', 'plasma', 'viridis']
for i, viz_name in enumerate(viz_names):
viz = visualizations[viz_name]
plt.subplot(4, 5, i + 2)
im = plt.imshow(viz['heatmap'], cmap=viz['colormap'])
plt.title(viz['title'], fontsize=12)
plt.axis('off')
plt.colorbar(im, fraction=0.046, pad=0.04)
# Row 2: Remaining heatmaps, attention profile, and statistics
# More heatmap styles
viz_names2 = ['inferno', 'cool']
for i, viz_name in enumerate(viz_names2):
viz = visualizations[viz_name]
plt.subplot(4, 5, i + 6)
im = plt.imshow(viz['heatmap'], cmap=viz['colormap'])
plt.title(viz['title'], fontsize=12)
plt.axis('off')
plt.colorbar(im, fraction=0.046, pad=0.04)
# Attention profile
plt.subplot(4, 5, 8)
attention_profile = np.mean(heatmap, axis=1)
plt.plot(attention_profile, range(len(attention_profile)), 'b-', linewidth=2)
plt.title('Vertical Attention Profile', fontsize=12)
plt.xlabel('Attention Intensity')
plt.ylabel('Image Height')
plt.gca().invert_yaxis()
plt.grid(True, alpha=0.3)
# Statistics
plt.subplot(4, 5, 9)
stats_text = f"""Heatmap Statistics:
Mean: {np.mean(heatmap):.3f}
Std: {np.std(heatmap):.3f}
Max: {np.max(heatmap):.3f}
Min: {np.min(heatmap):.3f}
Prediction:
Confidence: {confidence:.1%}
Raw Score: {prediction:.4f}
Class: {predicted_label}"""
plt.text(0.1, 0.5, stats_text, transform=plt.gca().transAxes, fontsize=10,
verticalalignment='center', bbox=dict(boxstyle="round,pad=0.3", facecolor="lightblue"))
plt.axis('off')
# Empty space for symmetry
plt.subplot(4, 5, 10)
plt.axis('off')
# Rows 3-4: All 6 superimposed views in 2 rows of 3
superimposed_colormaps = ['jet', 'hot', 'plasma', 'viridis', 'inferno', 'cool']
# Row 3: First 3 superimposed views (positions 11-13)
for i, cmap_name in enumerate(superimposed_colormaps[:3]):
superimposed_img = superimpose_gradcam_enhanced(img_array, heatmap, colormap=cmap_name, alpha=0.4)
plt.subplot(4, 5, i + 11)
plt.imshow(superimposed_img)
plt.title(f'Superimposed ({cmap_name.title()})', fontsize=12)
plt.axis('off')
# Empty spaces in row 3
plt.subplot(4, 5, 14)
plt.axis('off')
plt.subplot(4, 5, 15)
plt.axis('off')
# Row 4: Last 3 superimposed views (positions 16-18)
for i, cmap_name in enumerate(superimposed_colormaps[3:]):
superimposed_img = superimpose_gradcam_enhanced(img_array, heatmap, colormap=cmap_name, alpha=0.4)
plt.subplot(4, 5, i + 16)
plt.imshow(superimposed_img)
plt.title(f'Superimposed ({cmap_name.title()})', fontsize=12)
plt.axis('off')
# Empty spaces in row 4
plt.subplot(4, 5, 19)
plt.axis('off')
plt.subplot(4, 5, 20)
plt.axis('off')
plt.tight_layout()
plt.subplots_adjust(top=0.92)
# Save to temporary file and return
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.png')
plt.savefig(temp_file.name, dpi=300, bbox_inches='tight')
plt.close()
return temp_file.name
except Exception as e:
return f"❌ Error creating detailed analysis: {str(e)}"
def create_quick_analysis(image):
"""Create quick 2x3 comparison view"""
if not model_loaded or model is None or image is None:
return "❌ Please upload an image for analysis."
try:
# Preprocess and predict
processed_img, gray_img, img_array, _ = preprocess_image(image)
img_batch = np.expand_dims(img_array, axis=0)
prediction = model.predict(img_batch, verbose=0)[0][0]
predicted_class = int(round(prediction))
predicted_label = class_labels[predicted_class]
confidence = prediction if predicted_class == 1 else 1 - prediction
# Generate heatmap
heatmap = make_gradcampp_heatmap(img_batch, model)
if heatmap is None:
return "❌ Error generating heatmap."
# Create quick visualization with 3x3 layout to accommodate all colormaps
fig, axes = plt.subplots(3, 3, figsize=(15, 15))
color = 'green' if predicted_class == 0 else 'red'
fig.suptitle(f'Quick Grad-CAM++ Analysis | Predicted: {predicted_label.upper()} ({confidence:.2%})',
fontsize=14, fontweight='bold', color=color)
# Row 1: Original image and two main heatmaps
axes[0, 0].imshow(processed_img)
axes[0, 0].set_title("Original Image")
axes[0, 0].axis('off')
# Jet heatmap
heatmap_resized = cv2.resize(heatmap, (IMG_WIDTH, IMG_HEIGHT))
im1 = axes[0, 1].imshow(heatmap_resized, cmap='jet')
axes[0, 1].set_title("Jet Heatmap")
axes[0, 1].axis('off')
plt.colorbar(im1, ax=axes[0, 1], fraction=0.046)
# Plasma heatmap
im2 = axes[0, 2].imshow(heatmap_resized, cmap='plasma')
axes[0, 2].set_title("Plasma Heatmap")
axes[0, 2].axis('off')
plt.colorbar(im2, ax=axes[0, 2], fraction=0.046)
# Rows 2-3: All 6 superimposed views
superimposed_colormaps = ['jet', 'hot', 'plasma', 'viridis', 'inferno', 'cool']
# Row 2: First 3 superimposed views
for i, cmap_name in enumerate(superimposed_colormaps[:3]):
superimposed_img = superimpose_gradcam_enhanced(img_array, heatmap, cmap_name)
axes[1, i].imshow(superimposed_img)
axes[1, i].set_title(f"Superimposed ({cmap_name.title()})")
axes[1, i].axis('off')
# Row 3: Last 3 superimposed views
for i, cmap_name in enumerate(superimposed_colormaps[3:]):
superimposed_img = superimpose_gradcam_enhanced(img_array, heatmap, cmap_name)
axes[2, i].imshow(superimposed_img)
axes[2, i].set_title(f"Superimposed ({cmap_name.title()})")
axes[2, i].axis('off')
plt.tight_layout()
# Save to temporary file and return
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.png')
plt.savefig(temp_file.name, dpi=300, bbox_inches='tight')
plt.close()
return temp_file.name
except Exception as e:
return f"❌ Error creating quick analysis: {str(e)}"
# --- GRADIO APP INTERFACE ---
def create_gradio_app():
"""Create the main Gradio interface"""
# Custom CSS for better styling
custom_css = """
h2, h3{
margin: 2.5rem initial;
}
.main-header {
text-align: center;
margin-bottom: 5rem;
h1{
font-size: 2.5rem; /* Adjust as needed */
font-weight: bold;
text-align: center;
letter-spacing: -0.025em;
margin-bottom: 1rem;
/* Gradient masking */
background: linear-gradient(90deg, #667eea 0%, #764ba2 100%);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
}
p{
font-size: 1.25rem;
}
}
"""
theme = gr.themes.Base(
primary_hue="indigo",
)
with gr.Blocks(title="🧠 Bioset - Brain Tumor MRI Detection", theme=theme, css=custom_css) as app:
gr.HTML("""
<div class="main-header">
<h1>🧠 Bioset - Brain Tumor MRI Detection</h1>
<p>Advanced AI-powered MRI analysis with explainable attention visualization with Enhanced Grad-CAM++.</p>
</div>
""")
# Model status display
model_status = "βœ… Model loaded successfully" if model_loaded else "❌ Model not available"
gr.Markdown(f"**Model Status:** {model_status}")
gr.Markdown("Please read the disclaimer at the bottom of the page first before use.")
if not model_loaded:
gr.Markdown("⚠️ **Warning**: Model file not found. Please ensure your trained model (.h5) is in the space's root directory.")
gr.Markdown("""
---
## πŸ“– How to Use:
1. **Upload an MRI brain scan** (JPEG, PNG, or other image formats)
2. **View automatic preprocessing** and prediction results
3. **Choose analysis type**: Quick for rapid assessment, Detailed for comprehensive visualization
4. **Download results** for further analysis or documentation
""")
gr.Markdown("""
## Model Statistics:
- **accuracy:** `0.9913`
- **val_accuracy:** `0.8824`
""")
gr.Markdown("""
### Classification Report:
| Class | Precision | Recall | F1-Score | Support |
|---------------|-----------|--------|----------|---------|
| **no** | 0.89 | 0.85 | 0.87 | 20 |
| **yes** | 0.91 | 0.94 | 0.92 | 31 |
| **accuracy** | | | 0.90 | 51 |
| **macro avg** | 0.90 | 0.89 | 0.90 | 51 |
| **weighted avg** | 0.90 | 0.90 | 0.90 | 51 |
""")
with gr.Row():
with gr.Column(scale=2):
input_image = gr.Image(
label="πŸ“€ Upload MRI Brain Scan",
type="pil",
height=400
)
with gr.Column(scale=1):
gr.Markdown("### πŸ”„ Preprocessing Preview")
processed_image = gr.Image(
label="Processed (128x128 RGB)",
height=180,
interactive=False
)
grayscale_image = gr.Image(
label="Grayscale Preview",
height=180,
interactive=False
)
# Prediction results
gr.Markdown("## 🎯 Prediction Results")
prediction_output = gr.Markdown(value="Upload an image to see predictions...")
# Analysis buttons
gr.Markdown("## πŸ”¬ Grad-CAM++ Analysis")
gr.Markdown("Choose your preferred analysis type:")
with gr.Row():
quick_btn = gr.Button(
"⚑ Quick Analysis (2x3 Grid)",
variant="secondary",
size="lg",
scale=1
)
detailed_btn = gr.Button(
"πŸ”¬ Detailed Analysis (3x5 Grid)",
variant="primary",
size="lg",
scale=1
)
# Analysis output
analysis_output = gr.Image(
label="πŸ“Š Analysis Results",
height=700,
interactive=False,
show_download_button=True
)
# Information sections
with gr.Row():
with gr.Column():
gr.Markdown("""
### ⚑ Quick Analysis Features:
- **3x3 Grid Layout** for comprehensive quick view
- **Original Image** with preprocessing
- **Jet & Plasma Heatmaps** with colorbars
- **6 Superimposed Views** (All color schemes)
- **Fast Processing** (~3-4 seconds)
- **Perfect for screening** multiple images
---
""")
with gr.Column():
gr.Markdown("""
### πŸ”¬ Detailed Analysis Features:
- **4x5 Grid Layout** for comprehensive analysis
- **6 Heatmap Color Schemes** with individual colorbars
- **Attention Profile Plot** showing vertical focus
- **Statistical Analysis Panel** with quantitative metrics
- **6 Enhanced Superimposed Views** with gamma correction
- **Clinical-grade visualization** for detailed examination
---
""")
gr.Markdown("""
### 🎨 Color Scheme Guide:
- **πŸ”₯ Jet**: Classic blue β†’ green β†’ yellow β†’ red progression (high contrast)
- **πŸŒ‹ Hot**: Black β†’ red β†’ orange β†’ yellow (heat-like visualization)
- **🌌 Plasma**: Purple β†’ pink β†’ yellow (scientifically accurate)
- **🌿 Viridis**: Dark blue β†’ green β†’ yellow (perceptually uniform)
- **πŸ”₯ Inferno**: Black β†’ purple β†’ red β†’ yellow (high contrast heat)
- **❄️ Cool**: Cyan β†’ blue β†’ magenta (cool color palette)
### πŸ“Š Understanding the Results:
- **Bright regions** in heatmaps indicate areas the AI model focuses on
- **Different color schemes** can reveal different aspects of attention patterns
- **Confidence scores** above 80% are generally considered reliable
- **Superimposed views** help correlate AI attention with anatomical structures
""")
# Footer
gr.Markdown("""
---
**⚠️ Medical Disclaimer**: This tool is for research and educational purposes only.
Always consult qualified medical professionals for clinical diagnosis and treatment decisions.
""")
# Event handlers
def predict_and_update(image):
result, processed, grayscale = predict_brain_tumor(image)
return result, processed, grayscale
def quick_analysis_handler(image):
if not model_loaded:
return None
return create_quick_analysis(image)
def detailed_analysis_handler(image):
if not model_loaded:
return None
return create_detailed_analysis(image)
# Connect event handlers
input_image.change(
fn=predict_and_update,
inputs=[input_image],
outputs=[prediction_output, processed_image, grayscale_image]
)
quick_btn.click(
fn=quick_analysis_handler,
inputs=[input_image],
outputs=[analysis_output]
)
detailed_btn.click(
fn=detailed_analysis_handler,
inputs=[input_image],
outputs=[analysis_output]
)
return app
# --- LAUNCH THE APP ---
if __name__ == "__main__":
app = create_gradio_app()
app.launch()