Spaces:
Sleeping
Sleeping
import streamlit as st | |
# Set the page layout | |
st.set_page_config(layout="wide") | |
import time | |
import base64 | |
import tensorflow as tf | |
import numpy as np | |
from PIL import Image | |
import torch | |
import os | |
import torch.nn as nn | |
from torchvision import transforms | |
import torch.nn.functional as F | |
if "model" not in st.session_state: | |
st.session_state.model = tf.keras.models.load_model('best_model.keras') | |
if "choice" not in st.session_state: | |
st.session_state.choice = "tensorflow" | |
#import matplotlib.pyplot as plt | |
# Path to your logo image | |
main_bg_ext = 'png' | |
main_bg = 'download (3).jfif' | |
#**************************************************************** | |
# TENSORFLOW MODEL CONFIGURATION | |
#**************************************************************** | |
class_labels=[ 'Cyst', 'Normal','Stone', 'Tumor'] | |
def load_tensorflow_model(): | |
# Example: Load a pre-trained model (e.g., MobileNetV2) | |
tf_model = tf.keras.models.load_model('best_model.keras') | |
return tf_model | |
def predict_image(image): | |
time.sleep(2) | |
image = image.resize((64, 64)) | |
image = np.array(image) / 255.0 | |
image = np.expand_dims(image, axis=0) | |
predictions = st.session_state.model.predict(image) | |
return predictions | |
#**************************************************************** | |
# PYTORCH MODEL CONFIGURATION | |
#**************************************************************** | |
class CNNModel(nn.Module): | |
def __init__(self, input_channels=3, num_classes=4): | |
super(CNNModel, self).__init__() | |
self.conv1 = nn.Conv2d(input_channels, 32, kernel_size=3, padding=1) | |
self.bn1 = nn.BatchNorm2d(32) | |
self.pool1 = nn.MaxPool2d(2, 2) | |
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1) | |
self.bn2 = nn.BatchNorm2d(64) | |
self.pool2 = nn.MaxPool2d(2, 2) | |
self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1) | |
self.bn3 = nn.BatchNorm2d(128) | |
self.pool3 = nn.MaxPool2d(2, 2) | |
self.conv4 = nn.Conv2d(128, 256, kernel_size=3, padding=1) | |
self.bn4 = nn.BatchNorm2d(256) | |
self.pool4 = nn.MaxPool2d(2, 2) | |
self.flatten = nn.Flatten() | |
self.fc1 = nn.Linear(256 * 4 * 4, 512) | |
self.dropout1 = nn.Dropout(0.4) | |
self.fc2 = nn.Linear(512, 256) | |
self.dropout2 = nn.Dropout(0.3) | |
self.fc3 = nn.Linear(256, num_classes) | |
def forward(self, x): | |
x = self.pool1(torch.relu(self.bn1(self.conv1(x)))) | |
x = self.pool2(torch.relu(self.bn2(self.conv2(x)))) | |
x = self.pool3(torch.relu(self.bn3(self.conv3(x)))) | |
x = self.pool4(torch.relu(self.bn4(self.conv4(x)))) | |
x = self.flatten(x) | |
x = self.dropout1(torch.relu(self.fc1(x))) | |
x = self.dropout2(torch.relu(self.fc2(x))) | |
x = self.fc3(x) | |
return x | |
#************************************************************* | |
def predict_with_pytorch(image): | |
# Defining the preprocessing pipeline | |
preprocess = transforms.Compose([ | |
transforms.Resize((64, 64)), | |
transforms.ToTensor(), | |
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), | |
]) | |
# Applying preprocessing transformations | |
image = preprocess(image).unsqueeze(0) | |
# Check if the image has the correct shape | |
print(f"Image shape after preprocessing: {image.shape}") | |
with torch.no_grad(): | |
output = st.session_state.model(image) | |
probabilities = F.softmax(output, dim=1) | |
class_probabilities = probabilities.squeeze().tolist() | |
predicted_classes = torch.argsort(probabilities, dim=1, descending=True) # | |
# Return all classes and their probabilities | |
result_dict = {} | |
for idx, prob in zip(predicted_classes[0], class_probabilities): | |
result_dict[idx.item()] = prob | |
return result_dict | |
#********************************************************** | |
def load_pytorch_model(): | |
# Example: Load a pre-trained model (e.g., ResNet18) | |
model = torch.load('torch_model.pth', map_location=torch.device('cpu')) # Forces the model to load on CPU | |
model.eval() | |
return model | |
#**************************************************************** | |
# PYTORCH MODEL CONFIGURATION | |
#**************************************************************** | |
# Custom CSS to style the logo above the sidebar and other elements | |
st.markdown( | |
f""" | |
<style> | |
/* Container for logo and text */ | |
.logo-text-container {{ | |
position: fixed; | |
top: 30px; /* Adjust vertical position */ | |
left: 50px; /* Align with sidebar */ | |
display: flex; | |
align-items: center; | |
gap: 15px; | |
justify-content: space-between; | |
width: 100%; | |
}} | |
/* Logo styling */ | |
.logo-text-container img {{ | |
width: 130px; /* Adjust logo size */ | |
border-radius: 10px; /* Optional: round edges */ | |
margin-top: 10px; | |
margin-left: 20px; | |
}} | |
/* Bold text styling */ | |
.logo-text-container h1 {{ | |
font-family: 'Times New Roman', serif; | |
font-size: 24px; | |
font-weight: bold; | |
text-align: center; | |
color: #FFD700; /* Golden color for text */ | |
}} | |
/* Sidebar styling */ | |
section[data-testid="stSidebar"][aria-expanded="true"] {{ | |
margin-top: 100px !important; /* Space for the logo */ | |
border-radius: 0 60px 0px 60px !important; /* Top-left and bottom-right corners */ | |
width: 200px !important; /* Sidebar width */ | |
background: none; /* No background */ | |
color: white !important; | |
}} | |
header[data-testid="stHeader"] {{ | |
background: transparent !important; | |
margin-right: 100px !important; | |
margin-top: 1px !important; | |
z-index: 1 !important; | |
color: blue; /* White text */ | |
font-family: "Times New Roman " !important; /* Font */ | |
font-size: 18px !important; /* Font size */ | |
font-weight: bold !important; /* Bold text */ | |
padding: 10px 20px; /* Padding for buttons */ | |
border: none; /* Remove border */ | |
border-radius: 35px; /* Rounded corners */ | |
box-shadow: 0px 4px 10px rgba(0, 0, 0, 0.2); /* Shadow effect */ | |
transition: all 0.3s ease-in-out; /* Smooth transition */ | |
display: flex; | |
align-items: center; | |
justify-content: center; | |
margin: 10px 0; | |
width:90%; | |
left:5.5%; | |
height:60px; | |
margin-top:70px; | |
backdrop-filter: blur(10px); | |
border: 2px solid rgba(255, 255, 255, 0.4); /* Light border */ | |
}} | |
div[data-testid="stDecoration"] {{ | |
background-image: none; | |
}} | |
div[data-testid="stApp"] {{ | |
background: url(data:image/{main_bg_ext};base64,{base64.b64encode(open(main_bg, "rb").read()).decode()}); | |
background-size: cover; /* Ensure the image covers the full page */ | |
background-position: center; | |
height: 98vh; | |
width: 98%; | |
border-radius: 20px !important; | |
margin-left: 10px; | |
margin-right: 10px; | |
margin-top: 10px; | |
overflow: hidden; | |
backdrop-filter: blur(10px); /* Glass effect */ | |
-webkit-backdrop-filter: blur(10px); | |
border: 1px solid rgba(255, 255, 255, 0.2); /* Light border */ | |
}} | |
div[data-testid="stSidebarNav"] {{ | |
display: none; | |
}} | |
/* Styling for the content container */ | |
[class*="st-key-content-container-1"] {{ | |
background: rgba(255, 255, 255, 0.5); /* Semi-transparent white background */ | |
border: 2px solid rgba(255, 255, 255, 0.4); /* Light border */ | |
backdrop-filter: blur(10px); /* Apply blur effect */ | |
-webkit-backdrop-filter: blur(10px); /* For Safari compatibility */ | |
border-radius: 20px; | |
padding: 20px; | |
box-shadow: 0 4px 10px rgba(0, 0, 0, 0.1); /* Subtle shadow for depth */ | |
width: 98%; /* Make it span across most of the screen */ | |
margin-left: 0.5%; | |
margin-right: 0.5%; | |
height: 92.5vh; /* Adjust to fill most of the screen */ | |
overflow-y: auto; /* Enable vertical scrolling */ | |
position: fixed; /* Keep the container fixed on the screen */ | |
top: 3.5%; /* Adjust top margin */ | |
left: 0.5%; /* Adjust left margin */ | |
z-index: 0; /* Keep behind sidebar and header */ | |
margin-bottom:2%; | |
}} | |
[class*="st-key-content-container-3"] {{ | |
width: 28%; /* Make it span across most of the screen */ | |
position:fixed; | |
top: -0.9%; /* Adjust top margin */ | |
left: 11%; /* Adjust left margin */ | |
z-index: 1; /* Keep behind sidebar and header */ | |
padding-left:20px; | |
align-item:center; | |
border: 2px solid rgba(255, 255, 255, 0.4); /* Light border */ | |
background: transparent !important; | |
margin-right: 100px !important; | |
border-right: 2px solid rgba(255, 255, 155, 0.4); /* Light border */ | |
z-index: 1 !important; | |
color: blue; /* White text */ | |
font-family: "Times New Roman " !important; /* Font */ | |
font-size: 18px !important; /* Font size */ | |
font-weight: bold !important; /* Bold text */ | |
padding: 10px 20px; /* Padding for buttons */ | |
border: none; /* Remove border */ | |
border-radius: 35px; /* Rounded corners */ | |
transition: all 0.3s ease-in-out; /* Smooth transition */ | |
display: flex; | |
align-items: center; | |
justify-content: center; | |
margin: 10px 0; | |
height:60px; | |
}} | |
/* Styling for the content container */ | |
[class*="st-key-content-container-2"] {{ | |
background-color: transparent; /* Transparent background */ | |
border-radius: 20px; | |
padding: 20px; | |
width: 50%; /* Make it span across most of the screen */ | |
height: 85vh; /* Adjust to fill most of the screen */ | |
overflow-y: auto; /* Enable vertical scrolling */ | |
position: fixed; /* Keep the container fixed on the screen */ | |
top: 7%; /* Adjust top margin */ | |
left: 49.5%; /* Adjust left margin */ | |
right:2%; | |
border-left: 3px solid rgba(255, 255, 155, 0.9); /* Light border */ | |
}} | |
/* Button row styling */ | |
.button-row {{ | |
display: flex; | |
justify-content: flex-start; | |
gap: 20px; | |
margin-bottom: 20px; | |
}} | |
.custom-button {{ | |
width: 100px; | |
height: 40px; | |
border-radius: 10px; | |
background-color: #007BFF; | |
color: white; | |
border: none; | |
cursor: pointer; | |
font-size: 16px; | |
}} | |
.custom-button:hover {{ | |
background-color: #0056b3; | |
}} | |
div.stButton > button {{ | |
background: rgba(255, 255, 255, 0.2); | |
color: blue; /* White text */ | |
font-family: "Times New Roman " !important; /* Font */ | |
font-size: 18px !important; /* Font size */ | |
font-weight: bold !important; /* Bold text */ | |
padding: 10px 20px; /* Padding for buttons */ | |
border: none; /* Remove border */ | |
border-radius: 35px; /* Rounded corners */ | |
box-shadow: 0px 4px 10px rgba(0, 0, 0, 0.2); /* Shadow effect */ | |
transition: all 0.3s ease-in-out; /* Smooth transition */ | |
display: flex; | |
align-items: center; | |
justify-content: center; | |
margin: 10px 0; | |
width:160px; | |
height:50px; | |
margin-top:5px; | |
}} | |
/* Hover effect */ | |
div.stButton > button:hover {{ | |
background: rgba(255, 255, 255, 0.2); | |
box-shadow: 0px 6px 12px rgba(0, 0, 0, 0.4); /* Enhanced shadow on hover */ | |
transform: scale(1.05); /* Slightly enlarge button */ | |
transform: scale(1.1); /* Slight zoom on hover */ | |
box-shadow: 0px 4px 12px rgba(255, 255, 255, 0.4); /* Glow effect */ | |
}} | |
/* Outer large circle with transparent background */ | |
.outer-circle {{ | |
width: 350px; | |
height: 350px; | |
border-radius: 40%; /* Circular shape */ | |
background-color: transparent; /* Transparent background */ | |
border: 1px solid white; /* Golden border */ | |
display: flex; | |
justify-content: center; | |
align-items: center; | |
box-shadow: 0 4px 15px rgba(0, 0, 0, 0.2); /* Shadow for depth */ | |
}} | |
/* Inner smaller circle with light grey background */ | |
.inner-circle {{ | |
width: 330px; | |
height: 330px; | |
backdrop-filter: blur(10px); | |
background: rgba(255, 255, 255, 0.2); | |
border-radius: 40%; /* Circular shape */ | |
display: flex; | |
justify-content: center; | |
align-items: center; | |
overflow: hidden; /* Ensure image is contained within the circle */ | |
box-shadow: 0 4px 15px rgba(0, 0, 0, 0.4); /* Shadow for depth */ | |
border: 1px solid white; /* Golden border */ | |
}} | |
/* Style for the image to fit within the inner circle */ | |
.inner-circle img {{ | |
width: 100%; | |
height: 100%; | |
object-fit: cover; /* Ensure the image covers the circular area */ | |
box-shadow: 0 4px 15px rgba(0, 0, 0, 0.2); /* Shadow for depth */ | |
}} | |
/* Style for the upload button */ | |
[class*="st-key-upload-btn"] {{ | |
position: absolute; | |
top: 50%; /* Position from the top of the inner circle */ | |
left: 5%; /* Position horizontally at the center */ | |
transform: translateX(-40%); /* Adjust to ensure it's centered */ | |
padding: 10px 20px; | |
color: black; | |
border: none; | |
border-radius: 20px; | |
cursor: pointer; | |
font-size: 23px; | |
with:300px; | |
height:100px; | |
z-index:1000; | |
}} | |
.upload-btn:hover {{ | |
background-color: rgba(0, 123, 255, 1); | |
}} | |
div[data-testid="stFileUploader"] label > div > p {{ | |
display:none; | |
color:white !important; | |
}} | |
section[data-testid="stFileUploaderDropzone"] {{ | |
width:190px; | |
height: 120px; | |
background-color: white; | |
border-radius: 40px; | |
display: flex; | |
justify-content: center; | |
align-items: center; | |
margin-top:-10px; | |
box-shadow: 0px 4px 8px rgba(0, 0, 0, 0.3); | |
margin:20px; | |
background-color: rgba(255, 255, 255, 0.7); /* Transparent blue background */ | |
color:white; | |
}} | |
div[data-testid="stFileUploaderDropzoneInstructions"] div > small{{ | |
color:white !important; | |
display:none; | |
}} | |
div[data-testid="stFileUploaderDropzoneInstructions"] span{{ | |
margin-left:60px; | |
}} | |
div[data-testid="stFileUploaderDropzoneInstructions"] div{{ | |
display:none; | |
}} | |
section[data-testid="stFileUploaderDropzone"] button{{ | |
display:none; | |
}} | |
div[data-testid="stMarkdownContainer"] p {{ | |
font-family: "Times New Roman" !important; /* Elegant font for title */ | |
color:white !important; | |
}} | |
.title {{ | |
font-family: "Times New Roman" !important; /* Elegant font for title */ | |
font-size: 1.rem; | |
font-weight: bold; | |
margin-left: 37px; | |
margin-top:10px; | |
margin-bottom:-100px; | |
padding: 0; | |
color: #333; /* Neutral color for text */ | |
}} | |
</style> | |
""", | |
unsafe_allow_html=True, | |
) | |
st.markdown( | |
""" | |
<style> | |
/* Outer container to define the grid */ | |
.grid-container { | |
display: grid; | |
grid-template-columns: repeat(2 1fr); /* 2 columns */ | |
grid-template-rows: repeat(2, 1fr); /* 2 rows */ | |
gap: 20px; /* Space between containers */ | |
width: 90%; | |
height: 5vh; | |
align-items: center; | |
} | |
/* Individual grid items (containers) */ | |
.grid-item { | |
padding: 20px; | |
border-radius: 10px; | |
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); | |
display: flex; | |
justify-content: left; | |
align-items: center; | |
text-align: left; | |
background: rgba(0, 0, 0, 0.2); /* Semi-transparent white background */ | |
border-radius: 20px; | |
padding: 20px; | |
width: 80%; /* Make it span across most of the screen */ | |
margin-left: 0.5%; | |
margin-right: 0.5%; | |
} | |
/* Optional styling for the subheader and content */ | |
.grid-item h3 { | |
margin: 0; | |
color: #333; | |
font-size:18px; | |
width:100px; | |
font-family: "Times New Roman" !important; /* Elegant font for title */ | |
font-size: 1.rem; | |
font-weight: bold; | |
} | |
.grid-item p { | |
color: #555; | |
} | |
.title-container { | |
display: flex; | |
align-items: center; /* Vertically center the title and the image */ | |
} | |
.title-container img { | |
width: 40px; /* Adjust the size of the image */ | |
height: 40px; /* Adjust the size of the image */ | |
margin-right: 10px; /* Space between the image and the title */ | |
} | |
.title { | |
font-size: 20px; | |
font-weight: bold; | |
} | |
</style> | |
""", unsafe_allow_html=True | |
) | |
# Create the main content area | |
with st.container(key="content-container-3"): | |
col1,_, col2 = st.columns([2,4, 2]) | |
with col1: | |
if st.button(" Tensorflow"): | |
st.session_state.model = load_tensorflow_model() | |
st.session_state.choice = "tensorflow" | |
with col2: | |
if st.button(" Pytorch"): | |
st.session_state.model = load_pytorch_model() | |
st.session_state.choice = "pytorch" | |
with st.container(key="content-container-1"): | |
image_path = "Load.png" | |
col1, col2 = st.columns([1, 9]) | |
with col1: | |
st.write("") | |
with col2: | |
st.write("") | |
if st.session_state.choice == "tensorflow": | |
st.markdown(f""" <div class="title-container"> | |
<img src="data:image/png;base64,{base64.b64encode(open("tensorflow.png","rb").read()).decode()}" alt="Uploaded Image"> | |
<h2 class="title">Tensorflow Model Information</h2></div>""", unsafe_allow_html=True) | |
st.write("This is a Convolutional Neural Network (CNN) model trained on image data.") | |
st.write(f"Input Shape: (64, 64, 3)") | |
st.write(f"Output Classes: {4} classes") | |
else : | |
st.markdown(f""" <div class="title-container"> | |
<img src="data:image/png;base64,{base64.b64encode(open("pytorch.png","rb").read()).decode()}" alt="Uploaded Image"> | |
<h2 class="title">Pytorch Model Information</h2></div>""", unsafe_allow_html=True) | |
st.write("This is a Convolutional Neural Network (CNN) model trained on image data.") | |
st.write(f"Input Shape: (64, 64, 3)") | |
st.write(f"Output Classes: {4} classes") | |
col3, col4 = st.columns([3, 7]) | |
with col3: | |
uploaded_file = st.file_uploader("Choose a file", type=["png", "jpg", "jpeg"],key="upload-btn") | |
if uploaded_file is not None: | |
with open(image_path, "rb") as image_file: | |
encoded_image = base64.b64encode(image_file.read()).decode() | |
# Display the circular container with the image inside | |
st.markdown( | |
f""" | |
<div class="outer-circle"> | |
<div class="inner-circle"> | |
<img src="data:image/png;base64,{base64.b64encode(uploaded_file.read()).decode()}" alt="Uploaded Image"> | |
</div> | |
</div> | |
""", | |
unsafe_allow_html=True, | |
) | |
else: | |
default_image_path = "Load.png" | |
with open(default_image_path, "rb") as image_file: | |
encoded_image = base64.b64encode(image_file.read()).decode() | |
# Display the circular container with the image inside | |
st.markdown( | |
f""" | |
<div class="outer-circle"> | |
<div class="inner-circle"> | |
<img src="data:image/png;base64,{encoded_image}" alt="Default Image"> | |
</div> | |
</div> | |
""", | |
unsafe_allow_html=True, | |
) | |
with col4: | |
with st.container(key="content-container-2"): | |
if uploaded_file != None: | |
images = Image.open(uploaded_file) | |
with st.spinner("Processing the image..."): | |
progress_bar = st.progress(0) | |
for i in range(1, 11): | |
time.sleep(0.6) # Simulated delay for each progress increment | |
progress_bar.progress(i * 10) | |
if st.session_state.choice == "tensorflow": | |
prediction = predict_image(images) | |
max_index = int(np.argmax(prediction[0])) | |
max_score = prediction[0][max_index] | |
descriptive_message = "" | |
if max_index == 0: | |
descriptive_message = f""" | |
This image is likely to represent a <b>{class_labels[max_index]} kideney</b> ,which is an indication of healthy tissue with no signs of abnormal growth. | |
We recommend maintaining a healthy lifestyle and continuing regular health check-ups to ensure the body remains in a natural, healthy state. | |
""" | |
elif max_index == 1: | |
descriptive_message = f""" | |
This image is likely to represent a <b>{class_labels[max_index]} kideney</b>, which is a fluid-filled sac that forms in various body parts. | |
Cysts are typically benign and may not require treatment unless they grow large or become infected. We recommend monitoring the cyst and consulting a healthcare provider if you notice any changes. | |
""" | |
elif max_index == 2: | |
descriptive_message = f""" | |
This image is likely to represent a <b>{class_labels[max_index]} kideney</b>, which is a solid mass that forms in organs like the kidneys or bladder due to crystallization of minerals or salts. | |
Stones can be painful, and treatment may include passing them naturally or removing them surgically. We recommend staying hydrated and avoiding excessive salt intake to prevent stones from forming. | |
""" | |
else: | |
descriptive_message = f""" | |
This image is likely to represent a <b>{class_labels[max_index]} kideney</b>, which is an abnormal growth of tissue. Tumors can be benign or malignant, and further testing is required to determine the exact nature. | |
We recommend consulting a healthcare provider for further investigation and treatment if necessary. | |
""" | |
if prediction is not None and len(prediction) > 0: # Check if prediction is valid | |
divs = f""" | |
<div class="grid-container"> | |
<div class="grid-item"> | |
<h3>{class_labels[0]}</h3> | |
<p>T Score: {prediction[0][0]:.2f}</p> | |
</div> | |
<div class="grid-item"> | |
<h3> {class_labels[1]}</h3> | |
<p>T Score: {prediction[0][1]:.2f}</p> | |
</div> | |
<div class="grid-item"> | |
<h3> {class_labels[2]}</h3> | |
<p>T Score: {prediction[0][2]:.2f}</p> | |
</div> | |
<div class="grid-item"> | |
<h3>{class_labels[3]}</h3> | |
<p>T Score: {prediction[0][3]:.2f}</p> | |
</div> | |
<h2 class = "title">Prediction: {class_labels[max_index]} with confidence {prediction[0][max_index]:.2f}</h2> | |
<p>{descriptive_message}</p> | |
</div> | |
""" | |
st.markdown(divs, unsafe_allow_html=True) | |
else : | |
predictions = predict_with_pytorch(images) | |
predictiont =list( predictions.keys()) | |
predicted_index = max(predictions, key=predictions.get) | |
print(f"classe {predictions}") | |
print(f"classes {predicted_index}") | |
descriptive_message = "" | |
if predicted_index == 0: | |
descriptive_message = f""" | |
This image is likely to represent a <b>{class_labels[predicted_index]} kideney</b>, which is an indication of healthy tissue with no signs of abnormal growth. | |
We recommend maintaining a healthy lifestyle and continuing regular health check-ups to ensure the body remains in a natural, healthy state. | |
""" | |
elif predicted_index == 1: | |
descriptive_message = f""" | |
This image is likely to represent a <b>{class_labels[predicted_index]} kideney</b>, which is a fluid-filled sac that forms in various body parts. | |
Cysts are typically benign and may not require treatment unless they grow large or become infected. We recommend monitoring the cyst and consulting a healthcare provider if you notice any changes. | |
""" | |
elif predicted_index == 2: | |
descriptive_message = f""" | |
This image is likely to represent a <b>{class_labels[predicted_index]} kideney</b>, which is a solid mass that forms in organs like the kidneys or bladder due to crystallization of minerals or salts. | |
Stones can be painful, and treatment may include passing them naturally or removing them surgically. We recommend staying hydrated and avoiding excessive salt intake to prevent stones from forming. | |
""" | |
else: | |
descriptive_message = f""" | |
This image is likely to represent a <b>{class_labels[predicted_index]} kideney</b>, which is an abnormal growth of tissue. Tumors can be benign or malignant, and further testing is required to determine the exact nature. | |
We recommend consulting a healthcare provider for further investigation and treatment if necessary. | |
""" | |
# Once preprocessing is done, show the content (grid in your case) | |
if predictiont: | |
st.markdown(f""" | |
<div class="grid-container"> | |
<div class="grid-item"> | |
<h3>{class_labels[predictiont[0]]} </h3> | |
<p>T Score: {predictions[predictiont[0]]:.2f}</p> | |
</div> | |
<div class="grid-item"> | |
<h3>{class_labels[predictiont[1]]} </h3> | |
<p>T Score: {predictions[predictiont[1]]:.2f}</p> | |
</div> | |
<div class="grid-item"> | |
<h3> {class_labels[predictiont[2]]} </h3> | |
<p>T Score: {predictions[predictiont[2]]:.2f}</p> | |
</div> | |
<div class="grid-item"> | |
<h3>{class_labels[predictiont[3]]} </h3> | |
<p>T Score: {predictions[predictiont[3]]:.2f}</p> | |
</div> | |
<h2 class = "title">Prediction: {class_labels[predicted_index]} with confidence {predictions[predicted_index]:.2f}</h2> | |
<p>{descriptive_message}</p> | |
</div> | |
""", unsafe_allow_html=True | |
) | |