import streamlit as st
st.set_page_config(layout="wide")
import streamlit.components.v1 as components
import cv2
from PIL import Image
import base64
import os
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from torch.utils.data import DataLoader
from PIL import Image
from io import BytesIO
from gradcam import GradCAM # Import your GradCAM class
from sklearn.metrics import classification_report,confusion_matrix, roc_curve, auc,precision_recall_curve, average_precision_score
from sklearn.preprocessing import label_binarize
import seaborn as sns
import torch
import torch.nn as nn
import torchvision.models as models
from torchvision import datasets, transforms
import torchvision.transforms as transforms
import io
import warnings
warnings.filterwarnings("ignore")
showWarningOnDirectExecution = False
# Path to your logo image
logo_path = "pytorch.png"
main_bg_ext = 'png'
# Read and encode the logo image
with open(logo_path, "rb") as image_file:
encoded_logo = base64.b64encode(image_file.read()).decode()
if "framework" not in st.session_state:
st.session_state.framework = "Tensorflow"
if "menu" not in st.session_state:
st.session_state.menu = "3"
if st.session_state.menu =="1":
st.session_state.show_summary = True
st.session_state.show_arch = False
st.session_state.show_desc = False
elif st.session_state.menu =="2":
st.session_state.show_arch = True
st.session_state.show_summary = False
st.session_state.show_desc = False
elif st.session_state.menu =="3":
st.session_state.show_arch = False
st.session_state.show_summary = False
st.session_state.show_desc = True
else:
st.session_state.show_desc = True
def encode_image(image_path):
with open(image_path, "rb") as img_file:
return base64.b64encode(img_file.read()).decode()
#**************************************************
# loading pytorch model
#********************************************
# Define the CustomVGG16 model
class CustomVGG16(nn.Module):
def __init__(self, num_classes=2):
super(CustomVGG16, self).__init__()
base_model = models.vgg16(pretrained=False)
self.features = base_model.features
self.avgpool = nn.AdaptiveAvgPool2d((2, 2))
self.flatten = nn.Flatten()
self.fc1 = nn.Linear(512 * 2 * 2, 512)
self.bn1 = nn.BatchNorm1d(512)
self.dropout = nn.Dropout(0.5)
self.fc2 = nn.Linear(512, num_classes)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = self.flatten(x)
x = self.fc1(x)
x = self.bn1(x)
x = torch.relu(x)
x = self.dropout(x)
x = self.fc2(x)
x = self.softmax(x)
return x
# Load the model
model = CustomVGG16(num_classes=2)
# Load the state_dict (weights only)
model.load_state_dict(torch.load('brain_model.pth', map_location=torch.device('cpu')))
model.eval()#model.eval() # Set the model to evaluation mode
target_layer = model.features[-1] # Typically last convolutional layer
gradcam = GradCAM(model, target_layer)
def preprocess_image(image):
preprocess = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), # For pretrained models like VGG16
])
return preprocess(image).unsqueeze(0) # Add batch dimension
def generate_gradcam(image, target_class):
# Preprocess the image and convert it to a tensor
input_image = preprocess_image(image)
# Instantiate GradCAM
gradcam = GradCAM(model, target_layer)
# Generate the CAM
cam = gradcam.generate(input_image, target_class)
return cam
# Function to get layer information
def get_layers_data(model, prefix=""):
layers_data = []
for name, layer in model.named_children(): # Iterate over layers
full_name = f"{prefix}.{name}" if prefix else name # Track hierarchy
try:
shape = str(list(layer.parameters())[0].shape) # Get shape of the first param
except Exception:
shape = "N/A"
param_count = sum(p.numel() for p in layer.parameters()) # Count parameters
layers_data.append((full_name, layer.__class__.__name__, shape, f"{param_count:,}"))
# Recursively get layers inside this layer (for nested structures)
layers_data.extend(get_layers_data(layer, full_name))
return layers_data
def convert_image_to_base64(pil_image):
buffered = BytesIO()
pil_image.save(buffered, format="PNG")
return base64.b64encode(buffered.getvalue()).decode()
def predict_image(image):
# Preprocess the image to match the model input requirements
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), # Standard VGG16 normalization
])
image = transform(image).unsqueeze(0) # Add batch dimension
# Move image to the same device as the model (GPU or CPU)
image = image
# Set the model to evaluation mode
model.eval()
with torch.no_grad(): # Disable gradient calculation
outputs = model(image) # Forward pass
# Get predicted probabilities (softmax for multi-class)
if outputs.shape[1] == 1:
probs = torch.sigmoid(outputs) # Apply sigmoid activation for binary classification
prob_class_1 = probs[0].item() # Probability for class 1
prob_class_0 = 1 - prob_class_1 # Probability for class 0
# If the output has two units (binary classification with softmax)
else:
probs = torch.nn.functional.softmax(outputs, dim=1)
prob_class_0 = probs[0, 0].item()
prob_class_1 = probs[0, 1].item()
# Get the predicted class
print("Raw model output (logits):", outputs)
return prob_class_0, prob_class_1, probs
# /#*********************************************/
# LOADING TEST DATASET
# *************************************************
test_dir = "test"
BATCH_SIZE = 32
IMG_SIZE = (224, 224)
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
test_dataset = datasets.ImageFolder(root='test', transform=transform)
test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)
# One-hot encode labels using CategoryEncoding
class_names = test_dataset.classes
class_labels = class_names
# One-hot encode labels using CategoryEncoding
#num_classes = len(class_names)
#def one_hot_encode(image, label):
##label = tf.one_hot(label, num_classes)
#return image, label
#test_dataset = test_dataset.map(one_hot_encode)
#######################################################
# Custom CSS to style the logo above the sidebar
st.markdown(
f"""
""", unsafe_allow_html=True
)
loading_html = """
"""
# Sidebar content
st.markdown(
"""
""",
unsafe_allow_html=True,
)
# Use radio buttons for navigation
# Set the page to "Home"
page = "Home"
selected_img =""
st.session_state.page = "Home"
# Display content based on the selected page
if st.session_state.page == "Home":
# Sidebar buttons
with st.sidebar:
if st.button("๐ Model Summary"):
st.session_state.menu ="1" # Store state
st.rerun()
# Add your model description logic here
if st.button("๐ Model Results Analysis",key="header"):
st.session_state.menu ="2"
st.rerun()
# Add model analysis logic here
if st.button("๐งช Model Testing"):
st.session_state.menu ="3"
st.rerun()
table_style = """
"""
print(test_loader)
with st.container(key="content_1"):
print(type(model)) # Should print and not OrderedDict
if st.session_state.show_summary:
# Load the model
layers_data = get_layers_data(model) # Get layer information
# Convert to HTML table
table_html = "Layer Name | Type | Output Shape | Param # |
"
for name, layer_type, shape, params in layers_data:
table_html += f"{name} | {layer_type} | {shape} | {params} |
"
table_html += "
"
st.markdown(table_style + table_html, unsafe_allow_html=True)
if st.session_state.show_arch:
model.eval()
# Initialize lists to store true labels and predicted labels
y_true = []
y_pred = []
for image, label in test_dataset: # test_dataset is an instance of ImageFolder or similar
image = image.unsqueeze(0) # Add batch dimension and move to device
label = label
with torch.no_grad():
output = model(image) # Get model output
_, predicted = torch.max(output, 1) # Get predicted class
y_true.append(label) # Append true label
y_pred.append(predicted.item()) # Append predicted label
# Generate the classification report
report_dict = classification_report(y_true, y_pred, target_names=class_names, output_dict=True)
# Convert to DataFrame for better readability
report_df = pd.DataFrame(report_dict).transpose().round(2)
accuracy = report_dict["accuracy"]
precision = report_df.loc["weighted avg", "precision"]
recall = report_df.loc["weighted avg", "recall"]
f1_score = report_df.loc["weighted avg", "f1-score"]
st.markdown("""
Precision
""" + f"{precision:.2f}" + """
Recall
""" + f"{recall:.2f}" + """
Accuracy
""" + f"{accuracy:.3f}" + """
F1-Score
""" + f"{f1_score:.3f}" + """
""", unsafe_allow_html=True)
# Remove last rows (accuracy/macro avg/weighted avg) and reset index
report_df = report_df.iloc[:-3].reset_index()
report_df.rename(columns={"index": "Class"}, inplace=True)
# Custom CSS for Table Styling
st.markdown("""
""", unsafe_allow_html=True)
col1,col2 = st.columns([3,3])
with col1:
# Convert DataFrame to HTML Table
report_html = report_df.to_html(index=False, classes="report-table", escape=False)
st.markdown(f'classification report
{report_html}', unsafe_allow_html=True)
# Generate Confusion Matrix
# Generate Confusion Matrix
cm = confusion_matrix(y_true, y_pred)
# Create Confusion Matrix Heatmap
fig, ax = plt.subplots(figsize=(1, 1))
fig.patch.set_alpha(0) # Make figure background transparent
# Seaborn Heatmap (Confusion Matrix)
sns.heatmap(cm, annot=True, fmt="d", cmap="Blues",
xticklabels=class_names, yticklabels=class_names,
linewidths=1, linecolor="black",
cbar=False, square=True, alpha=0.9,
annot_kws={"size": 5, "family": "Times New Roman"})
# Change font for tick labels
for text in ax.texts:
text.set_bbox(dict(facecolor='none', edgecolor='none', alpha=0))
plt.xticks(fontsize=4, family="Times New Roman") # X-axis font
plt.yticks(fontsize=4, family="Times New Roman") # Y-axis font
# Enhance Labels and Title
plt.title("Confusion Matrix", fontsize=5, family="Times New Roman",color="black", loc='center')
# Apply transparent background and double border (via Streamlit Markdown)
st.markdown("""
""", unsafe_allow_html=True)
# Show Plot in Streamlit inside a styled container
st.markdown('', unsafe_allow_html=True)
st.pyplot(fig)
st.markdown("
", unsafe_allow_html=True)
with col2:
# Convert the lists to numpy arrays
# y_true = np.concatenate(y_true)
y_pred_probs = np.array(y_pred) # Make sure this is a 2D array: [batch_size, 2]
y_true = np.array(y_true) # Ensure y_true is a numpy array
print(y_pred)
print(y_true)
# Binarize the true labels for multi-class classification
y_true_bin = label_binarize(y_true, classes=np.arange(len(class_names)))
# Initialize dictionaries for storing ROC curve data
# Calculating ROC curve and AUC for each class
fpr, tpr, roc_auc = {}, {}, {}
# Calculate ROC curve and AUC for the positive class (class 1)
fpr[0], tpr[0], _ = roc_curve(y_true_bin, y_pred_probs) # Use 1D probabilities for class 1
roc_auc[0] = auc(fpr[0], tpr[0]) # Calculate AUC for class 1
# Plotting the ROC curve for each class
plt.figure(figsize=(11, 9))
# Plot ROC curve for the positive class (class 1)
plt.plot(fpr[0], tpr[0], lw=2, label=f'Class 1 (AUC = {roc_auc[0]:.2f})')
# Plot random guess line (diagonal line)
plt.plot([0, 1], [0, 1], color='navy', lw=5, linestyle='--')
# Labels and legend
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate', fontsize=28, family="Times New Roman")
plt.ylabel('True Positive Rate', fontsize=28, family="Times New Roman")
plt.title('ROC Curve (One-vs-Rest) for Each Class', fontsize=30, family="Times New Roman", color="black", loc='center', pad=3)
plt.legend(loc='lower right', fontsize=18)
# Save the plot as an image file
plt.savefig('roc_curve.png', transparent=True)
plt.close()
# Display the ROC curve in Streamlit
with st.container(key="roc"):
st.image('roc_curve.png')
with st.container(key="precision"):
# Compute Precision-Recall curve
precision, recall, _ = precision_recall_curve(y_true_bin, y_pred_probs)
# Calculate AUC for Precision-Recall curve
pr_auc = auc(recall, precision)
# Plot Precision-Recall curve
plt.figure(figsize=(11, 9))
plt.plot(recall, precision, lw=2, label=f'Precision-Recall curve (AUC = {pr_auc:.2f})')
plt.xlabel('Recall', fontsize=28,family="Times New Roman")
plt.ylabel('Precision', fontsize=28,family="Times New Roman")
plt.title('Precision-Recall Curve for Each Class', fontsize=30, family="Times New Roman",color="black", loc='center',pad=3)
plt.legend(loc='lower left', fontsize=18)
plt.grid(True, linestyle='--', alpha=0.7)
plt.savefig('precision_recall_curve.png', transparent=True)
plt.close()
st.image('precision_recall_curve.png')
if st.session_state.show_desc:
# components.html(html_string) # JavaScript works
# st.markdown(html_string, unsafe_allow_html=True)
image_path = "new.jpg"
st.container()
st.markdown(
f"""
Brain Tummor Classfication Using Transfer learning
This web application utilizes transfer learning to classify kidney ultrasound images
into two categories: HEALTH and TUMOR Class.
Built with Streamlit and powered by a Pytorch transfer learning
model based on VGG16
the app provides a simple and efficient way for users
to upload brain scans and receive instant predictions. The model analyzes the image
and classifies it based on learned patterns, offering a confidence score for better interpretation.
""",
unsafe_allow_html=True,
)
uploaded_file = st.file_uploader(
"Choose a file", type=["png", "jpg", "jpeg"], key="upload-btn"
)
if uploaded_file is not None:
images = Image.open(uploaded_file)
# Rewind file pointer to the beginning
uploaded_file.seek(0)
file_content = uploaded_file.read() # Read file once
# Convert to base64 for HTML display
encoded_image = base64.b64encode(file_content).decode()
# Read and process image
pil_image = Image.open(uploaded_file).convert("RGB").resize((224, 224))
img_array = np.array(pil_image)
class0, class1,prediction = predict_image(images)
max_index = int(np.argmax(prediction[0]))
print(f"max index:{max_index}")
max_score = prediction[0][max_index]
predicted_class = np.argmax(prediction[0])
print(f"predicted class is :{predicted_class}")
cams = generate_gradcam(pil_image, predicted_class)
heatmap = cm.jet(cams)[..., :3]
heatmap = (heatmap * 255).astype(np.uint8)
overlayed_image = cv2.addWeighted(img_array, 0.6, heatmap, 0.4, 0)
# Convert to PIL
overlayed_pil = Image.fromarray(overlayed_image)
# Convert to base64
orig_b64 = convert_image_to_base64(pil_image)
overlay_b64 = convert_image_to_base64(overlayed_pil)
highlight_class = "highlight" # Special class for the highest confidence score
# Generate Grad-CAM
#cam = generate_gradcam(pil_image, predicted_class)
# Create overlay
#heatmap = cm.jet(cam)[..., :3]
#heatmap = (heatmap * 255).astype(np.uint8)
#overlayed_image = cv2.addWeighted(img_array, 0.6, heatmap, 0.4, 0)
# Convert to PIL
#overlayed_pil = Image.fromarray(overlayed_image)
# Convert to base64
orig_b64 = convert_image_to_base64(pil_image)
#overlay_b64 = convert_image_to_base64(overlayed_pil)
content = f"""
{class_labels[0]}
T Score: {class0 :.2f}
{class_labels[1]}
T Score: {class1 :.2f}
"""
# Close the gallery and content div
# Render the content
placeholder = st.empty() # Create a placeholder
placeholder.markdown(loading_html, unsafe_allow_html=True)
time.sleep(5) # Wait for 5 seconds
placeholder.empty()
st.markdown(content, unsafe_allow_html=True)
else:
default_image_path = "new.jpg"
with open(image_path, "rb") as image_file:
encoded_image = base64.b64encode(image_file.read()).decode()
st.markdown(
f"""
""",
unsafe_allow_html=True,
)