Berzelius255's picture
Uploaded 3 files
fbedb17 verified
import streamlit as st
import numpy as np
import cv2
from PIL import Image
from io import BytesIO
from ultralytics import YOLO
from datetime import datetime
from gtts import gTTS
import tempfile
import os
import base64
import ollama
import bcrypt
import sqlite3
import time
from deep_translator import GoogleTranslator
#from transformers import AutoTokenizer, AutoModelForCausalLM, AutoImageProcessor, pipeline
#import torch
#from huggingface_hub import from_pretrained_keras
import requests
# Database setup
conn = sqlite3.connect('users.db')
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS users
(id INTEGER PRIMARY KEY AUTOINCREMENT,
username TEXT UNIQUE,
password_hash TEXT)''')
conn.commit()
# Password hashing and verification
def hash_password(password):
return bcrypt.hashpw(password.encode(), bcrypt.gensalt())
def verify_password(password, hashed_password):
return bcrypt.checkpw(password.encode(), hashed_password)
# Add a user
def add_user(username, password):
# Check if username already exists
c.execute("SELECT id FROM users WHERE username = ?", (username,))
result = c.fetchone()
if result:
return False # Username already exists
# Hash the password and insert the new user
password_hash = hash_password(password)
c.execute("INSERT INTO users (username, password_hash) VALUES (?, ?)",
(username, password_hash))
conn.commit()
return True
# Verify a user
def verify_user(username, password):
c.execute("SELECT password_hash FROM users WHERE username = ?", (username,))
result = c.fetchone()
if result:
return verify_password(password, result[0])
return False
# Login and logout
def login(username, password):
if not username or not password:
st.error("Username and password are required.")
return False
if verify_user(username, password):
st.session_state['authenticated'] = True
st.session_state['username'] = username
st.session_state['last_activity'] = time.time()
return True
st.error("Invalid username or password.")
return False
def logout():
st.session_state['authenticated'] = False
st.session_state['username'] = None
# Add this at the top of your file
def local_css():
st.markdown("""
<style>
.stButton>button {
width: 100%;
border-radius: 5px;
height: 3em;
margin-top: 10px;
}
.auth-container {
max-width: 400px;
margin: auto;
padding: 20px;
border-radius: 10px;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
background-color: white;
}
.auth-title {
text-align: center;
font-size: 24px;
margin-bottom: 20px;
color: #1f1f1f;
}
.auth-subtitle {
text-align: center;
font-size: 16px;
margin-bottom: 20px;
color: #666;
}
.hero-section {
text-align: center;
padding: 40px 20px;
background: linear-gradient(to right, #4f46e5, #3b82f6);
color: white;
margin-bottom: 30px;
}
.feature-container {
max-width: 1200px;
margin: auto;
padding: 20px;
display: grid;
grid-template-columns: repeat(auto-fit, minmax(300px, 1fr));
gap: 20px;
margin-bottom: 40px;
}
.feature-card {
background: white;
padding: 20px;
border-radius: 10px;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
transition: transform 0.3s ease, box-shadow 0.3s ease;
}
.feature-card:hover {
transform: scale(1.05);
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2);
}
.feature-title {
color: #1f1f1f;
font-size: 18px;
margin-bottom: 10px;
font-weight: bold;
}
.feature-text {
color: #666;
font-size: 14px;
}
.divider {
text-align: center;
margin: 20px 0;
position: relative;
}
.divider:before {
content: "";
position: absolute;
top: 50%;
left: 0;
right: 0;
height: 1px;
background-color: #e0e0e0;
z-index: -1;
}
.divider span {
background-color: white;
padding: 0 10px;
color: #666;
font-size: 14px;
}
@keyframes typing {
0% {
width: 0;
}
50% {
width: 100%;
}
60% {
width: 100%;
}
100% {
width: 0;
}
}
@keyframes blink {
50% {
border-color: transparent;
}
}
.hero-title{
display: inline-block;
font-size: 2.5em;
white-space: nowrap;
overflow: hidden;
border-right: 2px solid white;
width: 0;
animation: typing 6s steps(40, end) infinite, blink 0.5s step-end infinite;
}
.hero-section {
text-align: center;
padding: 40px 20px;
background: linear-gradient(45deg, #4f46e5, #3b82f6);
background-size: 300% 300%;
animation: gradientShift 8s ease infinite;
color: white;
margin-bottom: 30px;
opacity: 0;
animation: fadeIn 2s ease-in-out forwards;
}
@keyframes fadeIn {
from {
opacity: 0;
}
to {
opacity: 1;
}
}
@keyframes gradientShift {
0% {
background-position: 0% 50%;
}
50% {
background-position: 100% 50%;
}
100% {
background-position: 0% 50%;
}
}
/*.feature-container {
display: flex;
justify-content: center;
align-items: center;
gap: 20px;
position: relative;
width: 100%;
height: 300px;
animation: rotate 20s linear infinite; /* Rotate the container */
}
.feature-card {
background: white;
padding: 20px;
border-radius: 10px;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
transition: transform 0.3s ease, box-shadow 0.3s ease;
flex-shrink: 0;
width: 250px;
}
.feature-card:hover {
transform: scale(1.1);
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2);
}
@keyframes rotate {
from {
transform: rotate(0deg);
}
to {
transform: rotate(-360deg);
}
*/}
/*.feature-container {
display: flex;
justify-content: center;
align-items: center;
overflow: hidden;
position: relative;
width: 100%;
height: 300px;
}
.feature-track {
display: flex;
animation: circularMove 15s linear infinite;
}
.feature-card {
flex: 0 0 300px; /* Fixed width for each card */
margin: 0 20px;
background: white;
color: #333; /* Text color */
padding: 20px;
border-radius: 10px;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
text-align: center; /* Center-align the text */
overflow: hidden; /* Prevent overflow issues */
}
.feature-card h3 {
font-size: 1.2em;
margin-bottom: 10px;
text-align: center;
}
.feature-card p {
font-size: 0.9em;
line-height: 1.4;
text-align: center;
font-weight: bold;
}
.feature-card:hover {
transform: scale(1.1);
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2);
}
@keyframes circularMove {
0% {
transform: translateX(0);
}
100% {
transform: translateX(-100%);
}
*/}
.feature-container {
display: flex;
justify-content: center;
align-items: center;
height: 400px;
perspective: 1000px;
perspective-origin: 50% 50%;
background: linear-gradient(to bottom, #1e293b, #0f172a); /* Dark blue gradient background */
overflow: hidden;
position: relative;
padding: 40px 0;
}
.feature-track {
position: relative;
width: 100%;
height: 100%;
display: flex;
transform-style: preserve-3d;
animation: carousel 15s linear infinite;
}
.feature-card {
position: absolute;
width: 300px;
padding: 50px;
background: white;
border-radius: 15px;
box-shadow: 0 4px 20px rgba(0, 0, 0, 0.3); /* Enhanced shadow for better contrast */
backface-visibility: hidden;
transform-origin: center center;
transition: all 0.5s ease;
}
.feature-card h3 {
color: #1e293b;
font-size: 1.5em;
margin-bottom: 1rem;
font-weight: bold;
}
.feature-card p {
color: #475569;
line-height: 1.6;
}
/* Position and animate cards */
.feature-card:nth-child(1) {
transform: rotateY(0deg) translateZ(400px) translateX(0px);
}
.feature-card:nth-child(2) {
transform: rotateY(60deg) translateZ(400px) translateX(0px);
}
.feature-card:nth-child(3) {
transform: rotateY(120deg) translateZ(400px) translateX(0px);
}
.feature-card:nth-child(4) {
transform: rotateY(180deg) translateZ(400px) translateX(0px);
}
.feature-card:nth-child(5) {
transform: rotateY(240deg) translateZ(400px) translateX(0px);
}
.feature-card:nth-child(6) {
transform: rotateY(300deg) translateZ(400px) translateX(0px);
}
@keyframes carousel {
0% {
transform: translateZ(-400px) rotateY(0deg);
}
100% {
transform: translateZ(-400px) rotateY(-360deg);
}
}
/* Enhanced hover effect with glow */
.feature-card:hover {
transform: scale(1.1) translateZ(450px);
box-shadow: 0 8px 30px rgba(255, 255, 255, 0.1); /* Glowing effect */
z-index: 1;
}
/* Gradient overlays for depth effect */
.feature-container::before,
.feature-container::after {
content: '';
position: absolute;
width: 100%;
height: 100px;
z-index: 2;
pointer-events: none;
}
.feature-container::before {
top: 0;
background: linear-gradient(to bottom, #1e293b, rgba(30, 41, 59, 0));
}
.feature-container::after {
bottom: 0;
background: linear-gradient(to top, #1e293b, rgba(30, 41, 59, 0));
</style>
""", unsafe_allow_html=True)
# Check session expiry
if 'authenticated' in st.session_state and st.session_state['authenticated']:
if time.time() - st.session_state['last_activity'] > 1800: # 30 minutes
logout()
st.rerun()
st.session_state['last_activity'] = time.time()
# Initialize session state for registration form visibility
if 'show_register_form' not in st.session_state:
st.session_state['show_register_form'] = False
# Replace your login/registration section with this:
if 'authenticated' not in st.session_state or not st.session_state['authenticated']:
local_css()
# Landing page hero section
st.markdown("""
<div class="hero-section">
<h1 class="hero-title" style="font-size: 2.5em; margin-bottom: 20px;">Crop Disease Detection System</h1>
<p style="font-size: 1.2em; max-width: 800px; margin: 0 auto;">
An advanced AI-powered system that helps farmers and agricultural experts identify and manage crop diseases effectively
</p>
</div>
""", unsafe_allow_html=True)
# Features section using Streamlit columns
st.subheader("Key Features")
col1, col2, col3 = st.columns(3)
st.markdown("""
<div class="feature-container">
<div class="feature-track">
<div class="feature-card">
<h3>πŸ” Instant Detection</h3>
<p>Upload images of your crops and get immediate disease detection results using state-of-the-art AI technology.</p>
</div>
<div class="feature-card">
<h3>πŸ’‘ Expert Analysis</h3>
<p>Receive detailed analysis and recommendations from our plant pathology expert system.</p>
</div>
<div class="feature-card">
<h3>πŸ“Š Detailed Reports</h3>
<p>Generate comprehensive reports with treatment recommendations and preventive measures.</p>
</div>
<div class="feature-card">
<h3>πŸ” Instant Detection</h3>
<p>Upload images of your crops and get immediate disease detection results using state-of-the-art AI technology.</p>
</div>
<div class="feature-card">
<h3>πŸ’‘ Expert Analysis</h3>
<p>Receive detailed analysis and recommendations from our plant pathology expert system.</p>
</div>
<div class="feature-card">
<h3>πŸ“Š Detailed Reports</h3>
<p>Generate comprehensive reports with treatment recommendations and preventive measures.</p>
</div>
</div>
</div>
""", unsafe_allow_html=True)
# Crop carousel section
st.markdown("""
<div class="crop-carousel-container">
<div class="crop-carousel-track">
<div class="crop-card">
<img src="https://github.com/ROBERT-ADDO-ASANTE-DARKO/AI-powered-crop-disease-detection/blob/main/images/b034333ddcc732299d45abf753f3fa71f6ff48ffa3338bfecd615bc2.jpg?raw=true" alt="Crop 1">
<h4>Corn Leaf Blight</h4>
<p>Corn leaf blight is a fungal disease caused primarily by Exserohilum turcicum (Northern corn leaf blight) and Bipolaris maydis (Southern corn leaf blight).</p>
</div>
<div class="crop-card">
<img src="https://github.com/ROBERT-ADDO-ASANTE-DARKO/AI-powered-crop-disease-detection/blob/main/images/apple.jpg?raw=true" alt="Crop 2">
<h4>Apple Scab Leaf</h4>
<p>Apple scab is a fungal disease caused by Venturia inaequalis. It primarily affects apple and crabapple trees.</p>
</div>
<div class="crop-card">
<img src="https://github.com/ROBERT-ADDO-ASANTE-DARKO/AI-powered-crop-disease-detection/blob/main/images/tomato.jpg?raw=true" alt="Crop 3">
<h4>Tomato Leaf Late Blight</h4>
<p>Late blight of tomato is caused by the oomycete pathogen Phytophthora infestans. It is characterized by dark, water-soaked lesions on leaves, stems, and fruit.</p>
</div>
<div class="crop-card">
<img src="https://github.com/ROBERT-ADDO-ASANTE-DARKO/AI-powered-crop-disease-detection/blob/main/images/918d1d7a3dda5ce8fbdabf92e5bf38f104efd129ee09adcc6d1ad46c.jpg?raw=true" alt="Crop 4">
<h4>Tomato Leaf Yellow Virus</h4>
<p>Tomato leaf yellow virus (often referred to as Tomato yellow leaf curl virus, or TYLCV) is a viral disease transmitted by whiteflies. It causes yellowing and curling of tomato leaves.</p>
</div>
</div>
</div>
""", unsafe_allow_html=True)
st.markdown("""
<style>
.crop-carousel-container {
width: 100%;
max-width: 800px;
margin: auto;
overflow: hidden;
position: relative;
}
.crop-carousel-track {
display: flex;
animation: moveLeft 20s linear infinite; /* Move right to left */
}
.crop-card {
flex: 0 0 300px;
margin: 0 20px;
background: white;
color: #333;
padding: 20px;
border-radius: 10px;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
text-align: center;
overflow: hidden;
}
.crop-card img {
width: 100%;
height: 150px;
object-fit: cover;
border-radius: 10px;
margin-bottom: 10px;
}
.crop-card h4 {
font-size: 1.2em;
margin: 10px 0;
}
.crop-card p {
font-size: 0.9em;
line-height: 1.4;
color: #555;
}
@keyframes moveLeft {
0% {
transform: translateX(100%);
}
100% {
transform: translateX(-100%);
}
}
</style>
""", unsafe_allow_html=True)
# Add some spacing
st.markdown("<br>", unsafe_allow_html=True)
# Authentication container
st.markdown('<div class="auth-container">', unsafe_allow_html=True)
# Initialize password reset state
if 'show_reset_form' not in st.session_state:
st.session_state['show_reset_form'] = False
# Update password function
def update_password(username, new_password):
conn = sqlite3.connect('users.db')
c = conn.cursor()
# Check if username exists
c.execute("SELECT id FROM users WHERE username = ?", (username,))
if not c.fetchone():
return False
# Update password
password_hash = bcrypt.hashpw(new_password.encode(), bcrypt.gensalt())
c.execute("UPDATE users SET password_hash = ? WHERE username = ?",
(password_hash, username))
conn.commit()
conn.close()
return True
# Update the authentication container section
if not st.session_state.get('authenticated', False):
st.markdown('<div class="auth-container">', unsafe_allow_html=True)
# Reset Password Form
if st.session_state.get('show_reset_form', False):
st.markdown('<h1 class="auth-title">Reset Password</h1>', unsafe_allow_html=True)
st.markdown('<p class="auth-subtitle">Enter your username and new password</p>', unsafe_allow_html=True)
with st.form("reset_form"):
username = st.text_input("Username")
new_password = st.text_input("New Password", type="password")
confirm_password = st.text_input("Confirm Password", type="password")
submit = st.form_submit_button("Reset Password")
if submit:
if not username or not new_password or not confirm_password:
st.error("All fields are required.")
elif new_password != confirm_password:
st.error("Passwords do not match.")
elif update_password(username, new_password):
st.success("Password updated successfully!")
st.session_state['show_reset_form'] = False
time.sleep(1)
st.rerun()
else:
st.error("Username not found.")
if st.button("Back to Login"):
st.session_state['show_reset_form'] = False
st.rerun()
# Registration Form
elif st.session_state.get('show_register_form', False):
st.markdown('<h1 class="auth-title">Create Account</h1>', unsafe_allow_html=True)
st.markdown('<p class="auth-subtitle">Sign up to get started</p>', unsafe_allow_html=True)
with st.form("register_form"):
new_username = st.text_input("Username")
new_password = st.text_input("Password", type="password")
submit_button = st.form_submit_button("Create Account")
if submit_button:
if new_username and new_password:
if add_user(new_username, new_password):
st.success("Account created successfully!")
st.session_state['show_register_form'] = False
time.sleep(1)
st.rerun()
else:
st.error("Username already exists.")
else:
st.error("Username and password are required.")
st.markdown('<div class="divider"><span>OR</span></div>', unsafe_allow_html=True)
if st.button("Back to Login"):
st.session_state['show_register_form'] = False
st.rerun()
# Login Form (default)
else:
st.markdown('<h1 class="auth-title">Welcome Back</h1>', unsafe_allow_html=True)
st.markdown('<p class="auth-subtitle">Sign in to your account</p>', unsafe_allow_html=True)
with st.form("login_form"):
username = st.text_input("Username")
password = st.text_input("Password", type="password")
cols = st.columns([1, 1])
submit_button = cols[0].form_submit_button("Sign In")
forgot_password = cols[1].form_submit_button("Forgot Password?")
if submit_button:
if login(username, password):
st.success("Logged in successfully!")
time.sleep(1)
st.rerun()
elif forgot_password:
st.session_state['show_reset_form'] = True
st.rerun()
st.markdown('<div class="divider"><span>OR</span></div>', unsafe_allow_html=True)
if st.button("Create New Account"):
st.session_state['show_register_form'] = True
st.rerun()
st.markdown('</div>', unsafe_allow_html=True)
# Update the footer section (replace the existing footer with this)
st.markdown("""
<div style="background: linear-gradient(to right, #1e293b, #334155); color: white; padding: 40px 0; margin-top: 40px;">
<div style="max-width: 1200px; margin: auto; padding: 0 20px;">
<div style="display: flex; flex-wrap: wrap; justify-content: space-between; gap: 40px;">
<!-- About Section -->
<div style="flex: 1; min-width: 250px;">
<h3 style="color: #60a5fa; font-size: 1.5em; margin-bottom: 20px;">About Our Platform</h3>
<p style="color: #e2e8f0; line-height: 1.6; margin-bottom: 20px;">
Our AI-powered platform revolutionizes crop disease detection and management.
We combine cutting-edge technology with agricultural expertise to protect your crops
and maximize your yield.
</p>
</div>
<div style="flex: 1; min-width: 250px;">
<h3 style="color: #60a5fa; font-size: 1.5em; margin-bottom: 20px;">Key Features</h3>
<ul style="list-style: none; padding: 0; color: #e2e8f0;">
<li style="margin-bottom: 10px; display: flex; align-items: center;">
<span style="color: #60a5fa; margin-right: 10px;">βœ“</span> Real-time Disease Detection
</li>
<li style="margin-bottom: 10px; display: flex; align-items: center;">
<span style="color: #60a5fa; margin-right: 10px;">βœ“</span> Multi-language Support
</li>
<li style="margin-bottom: 10px; display: flex; align-items: center;">
<span style="color: #60a5fa; margin-right: 10px;">βœ“</span> Expert Analysis Reports
</li>
<li style="margin-bottom: 10px; display: flex; align-items: center;">
<span style="color: #60a5fa; margin-right: 10px;">βœ“</span> Treatment Recommendations
</li>
</ul>
</div>
<div style="flex: 1; min-width: 250px;">
<h3 style="color: #60a5fa; font-size: 1.5em; margin-bottom: 20px;">Contact Us</h3>
<p style="color: #e2e8f0; line-height: 1.6; margin-bottom: 10px;">
<span style="color: #60a5fa;">Email:</span> [email protected]
</p>
<p style="color: #e2e8f0; line-height: 1.6; margin-bottom: 20px;">
<span style="color: #60a5fa;">Phone:</span> +1 (234) 567-8900
</p>
<div style="display: flex; gap: 15px; margin-top: 20px;">
<a href="#" style="color: #60a5fa; text-decoration: none; font-size: 1.2em;">
<span>πŸ“±</span>
</a>
<a href="#" style="color: #60a5fa; text-decoration: none; font-size: 1.2em;">
<span>πŸ’¬</span>
</a>
<a href="#" style="color: #60a5fa; text-decoration: none; font-size: 1.2em;">
<span>πŸ“¨</span>
</a>
</div>
</div>
</div>
<div style="border-top: 1px solid #4b5563; margin-top: 40px; padding-top: 20px; text-align: center;">
<p style="color: #e2e8f0; font-size: 0.9em;">
Β© 2025 Crop Disease Detection System. All rights reserved.
</p>
<div style="margin-top: 10px;">
<a href="#" style="color: #e2e8f0; text-decoration: none; margin: 0 10px; font-size: 0.9em;">Privacy Policy</a>
<a href="#" style="color: #e2e8f0; text-decoration: none; margin: 0 10px; font-size: 0.9em;">Terms of Service</a>
<a href="#" style="color: #e2e8f0; text-decoration: none; margin: 0 10px; font-size: 0.9em;">FAQ</a>
</div>
</div>
</div>
</div>
""", unsafe_allow_html=True)
st.stop()
# Update database schema to include comments
def setup_feedback_db():
conn = sqlite3.connect('customer_feedback.db')
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS customer_feedback
(id INTEGER PRIMARY KEY AUTOINCREMENT,
question TEXT,
response TEXT,
feedback_type TEXT,
comment_type TEXT,
custom_comment TEXT,
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP)''')
conn.commit()
return conn, c
def save_feedback(question, response, feedback_type, comment_type=None, custom_comment=None):
conn, c = setup_feedback_db()
try:
c.execute("""INSERT INTO customer_feedback
(question, response, feedback_type, comment_type, custom_comment)
VALUES (?, ?, ?, ?, ?)""",
(question, response, feedback_type, comment_type, custom_comment))
conn.commit()
return True
except Exception as e:
st.error(f"Error saving feedback: {e}")
return False
finally:
conn.close()
# Update the conversation display section
def display_feedback_buttons(file_id, index, question, response):
# Suggested comments
SUGGESTED_COMMENTS = [
"Inaccurate information",
"Unclear explanation",
"Missing details",
"Not relevant to question",
"Technical error",
"Other"
]
# Initialize session state for feedback if it doesn't exist
if f"feedback_{file_id}_{index}" not in st.session_state:
st.session_state[f"feedback_{file_id}_{index}"] = {
"feedback_type": None, # Stores "πŸ‘" or "πŸ‘Ž"
"comment": None, # Stores the user's comment
"submitted": False # Tracks whether feedback has been submitted
}
col1, col2 = st.columns([1, 4])
with col1:
if st.button("πŸ‘", key=f"helpful_{file_id}_{index}"):
# Save positive feedback immediately
save_feedback(question, response, "πŸ‘")
st.success("Feedback saved!")
# Update session state to indicate feedback has been submitted
st.session_state[f"feedback_{file_id}_{index}"]["submitted"] = True
return
with col2:
if st.button("πŸ‘Ž", key=f"not_helpful_{file_id}_{index}"):
# Store the feedback type in session state
st.session_state[f"feedback_{file_id}_{index}"]["feedback_type"] = "πŸ‘Ž"
# Check if feedback_type is "πŸ‘Ž" before showing the comment input field
if st.session_state[f"feedback_{file_id}_{index}"].get("feedback_type") == "πŸ‘Ž":
# Display suggested comments in a dropdown menu
selected_comment = st.selectbox(
"What was the issue?",
options=SUGGESTED_COMMENTS,
key=f"suggested_comment_{file_id}_{index}"
)
# If the user selects "Other", allow them to provide a custom comment
custom_comment = None
if selected_comment == "Other":
custom_comment = st.text_area(
"Please describe the issue:",
key=f"custom_comment_{file_id}_{index}"
)
# Submit Feedback button
if st.button("Submit Feedback", key=f"submit_{file_id}_{index}"):
# Save feedback to the database
save_feedback(
question,
response,
st.session_state[f"feedback_{file_id}_{index}"]["feedback_type"],
custom_comment if selected_comment == "Other" else selected_comment
)
st.success("Thank you for your feedback!")
# Update session state to indicate feedback has been submitted
st.session_state[f"feedback_{file_id}_{index}"]["submitted"] = True
return
# Model configuration
SUPPORTED_MODELS = {
"llama3.2": {
"name": "llama3.2",
"system_prompt": "You are a helpful plant pathology expert assistant.",
"supports_vision": False
},
"llama3.1": {
"name": "llama3.1",
"system_prompt": "You are a helpful plant pathology expert assistant.",
"supports_vision": False
},
"llama2": {
"name": "llama2",
"system_prompt": "You are a helpful plant pathology expert assistant.",
"supports_vision": False
},
"llava": {
"name": "llava",
"system_prompt": "You are a helpful plant pathology expert assistant with vision capabilities.",
"supports_vision": True,
"vision_prompt": "Analyze the image and describe the diseases present."
},
"mistral": {
"name": "mistral",
"system_prompt": "You are a helpful plant pathology expert assistant.",
"supports_vision": False
},
"gemma": {
"name": "gemma",
"system_prompt": "You are a helpful plant pathology expert assistant.",
"supports_vision": False
},
"jyan1/paligemma-mix-224": {
"name": "jyan1/paligemma-mix-224",
"system_prompt": "You are a helpful plant pathology expert assistant.",
"supports_vision": True
}
}
# Initialize session state for conversation history if it doesn't exist
if 'conversation_history' not in st.session_state:
st.session_state.conversation_history = {}
# Load YOLOv8 model
yolo_model = YOLO("models/best.pt")
def preprocess_image(image, target_size=(224, 224)):
"""
Preprocess the image for vision-capable models.
"""
image = Image.fromarray(image)
image = image.resize(target_size)
return image
def text_to_speech(text, language='en'):
"""Convert text to speech using gTTS"""
try:
# Create temporary file
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as temp_audio:
# Generate audio file
tts = gTTS(text=text, lang=language, slow=False)
tts.save(temp_audio.name)
# Read the audio file
with open(temp_audio.name, 'rb') as audio_file:
audio_bytes = audio_file.read()
# Clean up
os.unlink(temp_audio.name)
return audio_bytes
except Exception as e:
st.error(f"Error generating speech: {str(e)}")
return None
def check_ollama_connection():
try:
response = requests.get("http://localhost:11434")
return response.status_code == 200
except Exception as e:
return False
def generate_ollama_response(prompt, model_name="llama2", conversation_history=None, image_data=None):
try:
if model_name not in SUPPORTED_MODELS:
return f"Error: Model {model_name} is not supported."
model_config = SUPPORTED_MODELS[model_name]
# Build the messages array
messages = [
{
"role": "system",
"content": model_config["system_prompt"]
}
]
# Add conversation history
if conversation_history:
for entry in conversation_history:
if len(entry) >= 2: # Handle tuples with 2 or 3 values
question, response = entry[:2]
messages.extend([
{"role": "user", "content": question},
{"role": "assistant", "content": response}
])
# Handle vision models differently
if model_config["supports_vision"] and image_data is not None:
if isinstance(image_data, np.ndarray):
image = Image.fromarray(image_data)
buffered = BytesIO()
image.save(buffered, format="JPEG")
img_str = base64.b64encode(buffered.getvalue()).decode()
messages.append({
"role": "user",
"content": [
{"type": "text", "text": prompt},
{"type": "image", "image": img_str}
]
})
else:
messages.append({
"role": "user",
"content": prompt
})
# Make an API call to Ollama
api_url = "http://localhost:11434/api/generate" # Ollama API endpoint
payload = {
"model": model_config["name"],
"prompt": prompt, # Use the prompt directly
"stream": False # Set to True if you want streaming responses
}
# Send the request
response = requests.post(api_url, json=payload)
# Check for errors
if response.status_code != 200:
return f"Error: API request failed with status code {response.status_code}. Response: {response.text}"
# Parse the response
response_data = response.json()
# Check if the response contains the expected key
if "response" in response_data:
return response_data["response"]
else:
return f"Error: Unexpected response format: {response_data}"
except Exception as e:
return f"Error connecting to Ollama API: {str(e)}"
def generate_improved_description(detected_classes, class_names, user_text, image_details=None, conversation_history=None):
"""
Generate a more detailed and contextual description using Ollama
"""
detected_objects = [class_names[cls] for cls in detected_classes]
# Create base context about detected diseases
disease_context = f"Detected diseases: {', '.join(detected_objects)}"
# Different prompt structure for initial vs. follow-up questions
if not conversation_history:
base_prompt = f"""As an expert plant pathologist, analyze the following crop diseases detected in the image: {', '.join(detected_objects)}.
For each detected disease, provide a structured analysis following this format:
1. Disease Name: [Name]
- Pathogen: [Causative organism]
- Severity Level: [Based on visual symptoms]
- Key Symptoms:
* [Symptom 1]
* [Symptom 2]
- Economic Impact:
* [Brief description of potential crop losses]
- Treatment Options:
* Immediate actions: [Short-term solutions]
* Long-term management: [Preventive measures]
- Environmental Conditions:
* Favorable conditions for disease development
* Risk factors
2. Recommendations:
- Immediate Steps:
* [Action items for immediate control]
- Prevention Strategy:
* [Long-term prevention measures]
- Monitoring Protocol:
* [What to watch for]
Initial Question/Context: {user_text if user_text else "Provide a general analysis"}
"""
else:
base_prompt = f"""Context: {disease_context}
Previous conversation context has been provided above. Please address the following follow-up question while maintaining consistency with previous responses:
{user_text}
Provide a detailed response that builds upon the previous context and specifically addresses this question."""
# Get the selected model from session state or default to llama2
selected_model = st.session_state.get('selected_model', 'llama2')
return generate_ollama_response(
base_prompt,
model_name=selected_model,
conversation_history=conversation_history,
image_data=image_details.get("image_data") if image_details else None
)
def inference(image):
"""
Enhanced inference function with confidence scores and bounding box information
"""
results = yolo_model(image, conf=0.4)
infer = np.zeros(image.shape, dtype=np.uint8)
classes = dict()
names_infer = []
confidence_scores = []
bounding_boxes = []
for r in results:
infer = r.plot()
classes = r.names
names_infer = r.boxes.cls.tolist()
confidence_scores = r.boxes.conf.tolist()
bounding_boxes = r.boxes.xyxy.tolist()
return infer, names_infer, classes, confidence_scores, bounding_boxes
# Streamlit application
st.title("Interactive Crop Disease Detection and Analysis🌾🌿πŸ₯¬β˜˜οΈ")
st.write(f"Welcome, {st.session_state['username']}!😊")
# Logout button
if st.button("Logout"):
logout()
st.rerun()
# Add sidebar for configuration
with st.sidebar:
st.header("Settings")
selected_model = st.selectbox(
"Select LLM Model",
list(SUPPORTED_MODELS.keys()),
index=0, # Default to first model (bart-large-cnn)
help="Choose the Ollama model to use for analysis"
)
# Store the selected model in session state
st.session_state['selected_model'] = selected_model
if SUPPORTED_MODELS[selected_model]["supports_vision"]:
st.info("This model supports vision capabilities and can analyze images directly.")
confidence_threshold = st.slider("Detection Confidence Threshold", 0.0, 1.0, 0.4)
show_confidence = st.checkbox("Show Confidence Scores", value=True)
show_bbox = st.checkbox("Show Bounding Boxes", value=True)
# TTS Settings
st.header("Text-to-Speech Settings")
tts_enabled = st.checkbox("Enable Text-to-Speech", value=True)
if tts_enabled:
language = st.selectbox("Speech Language",
options=['en', 'es', 'fr', 'de'],
format_func=lambda x: {
'en': 'English',
'es': 'Spanish',
'fr': 'French',
'de': 'German'
}[x],
help="Select speech language")
# Add option to clear conversation history
if st.button("Clear All Conversations"):
st.session_state.conversation_history = {}
st.success("Conversation history cleared!")
# Language selection
language = st.selectbox(
"Select Language",
options=['en', 'es', 'fr', 'de'], # Add more languages as needed
format_func=lambda x: {
'en': 'English',
'es': 'Spanish',
'fr': 'French',
'de': 'German'
}[x],
help="Select your preferred language"
)
# Main content
uploaded_files = st.file_uploader("Upload images for disease detection", type=["jpg", "jpeg", "png"], accept_multiple_files=True)
if uploaded_files:
for uploaded_file in uploaded_files:
file_id = uploaded_file.name
# Initialize conversation history for this image if it doesn't exist
if file_id not in st.session_state.conversation_history:
st.session_state.conversation_history[file_id] = []
st.header(f"Analysis for {file_id}")
# Create columns for side-by-side display
col1, col2 = st.columns(2)
# Process image
file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
image = cv2.imdecode(file_bytes, 1)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Display original image
with col1:
st.subheader("Original Image")
st.image(image, use_container_width=True)
# Process and display results
with st.spinner("Processing image..."):
infer_image, classes_in_image, classes_in_dataset, confidences, boxes = inference(image)
with col2:
st.subheader("Detected Diseases")
st.image(infer_image, use_container_width=True)
# Display detection details
if show_confidence:
st.subheader("Detection Details")
for cls, conf in zip(classes_in_image, confidences):
st.write(f"- {classes_in_dataset[cls]}: {conf:.2%} confidence")
# Display conversation history
if st.session_state.conversation_history[file_id]:
st.subheader("Conversation History")
for i, entry in enumerate(st.session_state.conversation_history[file_id]):
question, response = entry[:2]
with st.expander(f"Q{i+1}: {question[:50]}...", expanded=False):
st.write("**Question:**", question)
st.write("**Response:**", response)
# Display feedback buttons and handle comment collection
display_feedback_buttons(file_id, i, question, response)
# Audio playback option
if tts_enabled:
if st.button("πŸ”Š Listen", key=f"listen_history_{file_id}_{i}"):
with st.spinner("Generating audio..."):
audio_bytes = text_to_speech(response, language)
if audio_bytes:
st.audio(audio_bytes, format='audio/mp3')
# User input for questions
st.subheader("Ask Questions")
user_text = st.text_area(
"Enter your question about the detected diseases:",
placeholder="Example: What are the best treatment options for these diseases? What preventive measures should I take?",
key=f"question_{file_id}"
)
def translate_text(text, target_lang='en'):
translator = GoogleTranslator(source='auto', target=target_lang)
return translator.translate(text)
# Use the async function in your Streamlit app
if st.button("Get Analysis", key=f"analyze_{file_id}"):
with st.spinner(f"Generating analysis using {selected_model}..."):
# Perform translation
translated_input = translate_text(user_text, target_lang='en')
st.write(f"Translated Input (to English): {translated_input}")
# Create detailed image information dictionary
image_details = {
"confidence_scores": confidences,
"bounding_boxes": boxes,
"image_dimensions": image.shape,
"image_data": image # Add the image data for vision models
}
# Generate response
response = generate_improved_description(
classes_in_image,
classes_in_dataset,
translated_input,
image_details,
st.session_state.conversation_history[file_id]
)
# Translate LLM response
translated_response = translate_text(response, target_lang=language)
# Add to conversation history and display the response
st.session_state.conversation_history[file_id].append((user_text, translated_response, None))
st.markdown("### Latest Response")
st.markdown(translated_response)
# Add audio playback option for the latest response
if tts_enabled:
col1, col2 = st.columns([1, 4])
with col1:
if st.button("πŸ”Š Listen", key=f"listen_latest_{file_id}"):
with st.spinner("Generating audio..."):
audio_bytes = text_to_speech(translated_response, language)
if audio_bytes:
st.audio(audio_bytes, format='audio/mp3')
# Export conversation
if st.button("Export Conversation", key=f"export_{file_id}"):
conversation_text = f"""
# Crop Disease Analysis Report
## Image Information
- Filename: {file_id}
- Analysis Date: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
## Detected Diseases
{', '.join([classes_in_dataset[cls] for cls in classes_in_image])}
## Conversation History
"""
for i, entry in enumerate(st.session_state.conversation_history[file_id]):
if len(entry) == 2: # Handle legacy entries
question, response = entry
feedback = "No feedback"
else:
question, response, feedback = entry
conversation_text += f"\n### Question {i+1}:\n{question}\n\n### Answer {i+1}:\n{response}\n\n### Feedback {i+1}:\n{feedback}\n"
st.download_button(
label="Download Conversation",
data=conversation_text,
file_name=f"disease_analysis_{file_id}.md",
mime="text/markdown"
)
# Add a footer with clear instructions
st.markdown("""
---
### How to Use
1. Upload one or more images of crops with potential diseases
2. View the detected diseases and their confidence scores
3. Ask questions about the diseases, treatments, or prevention
4. Use the πŸ”Š Listen button to hear the responses
5. View previous questions and answers in the conversation history
6. Export the entire conversation for future reference
7. Use the sidebar to adjust settings or clear conversation history
""")