diff --git "a/app.py" "b/app.py" --- "a/app.py" +++ "b/app.py" @@ -1,1157 +1,1575 @@ import streamlit as st -import os -from PIL import Image -# Set the page layout -st.set_page_config(layout="wide") -import json -import base64 -import time -from dotenv import load_dotenv -import os -import requests -import pickle +import pandas as pd +import joblib import numpy as np - -# Load model once -with open("best_clf.pkl", "rb") as file: - best_clf = pickle.load(file) - - -# Try loading environment variables locally -try: - from dotenv import load_dotenv - load_dotenv() -except: - pass - -# Get the token from environment variables -HF_TOKEN = os.environ.get("HF_TOKEN") - - -def query_huggingface_model(selected_model: dict, input_data, input_type="text",max_tokens=512,task="text-classification",temperature=0.7, top_p=0.9 ): - API_URL = selected_model.get("url") - headers = {"Authorization": f"Bearer {HF_TOKEN}"} - - try: - if input_type == "text": - if task == "text-generation": - payload = { - "messages": [ - { - "role": "user", - "content": input_data - } - ], - "model":selected_model.get("model") - } - - else: - payload = { - "inputs": input_data , - - } - response = requests.post(API_URL, headers=headers, json=payload) - - elif input_type == "image": - with open(input_data, "rb") as f: - data = f.read() - response = requests.post(API_URL, headers=headers, data=data) - - else: - return {"error": f"Unsupported input_type: {input_type}"} - - response.raise_for_status() - return response.json() - - except requests.exceptions.RequestException as e: - return {"error": str(e)} -def extract_response_content(response): - print(f"Response is: {response}") - - # For text generation or image captioning - if isinstance(response, list): - if response and isinstance(response[0], dict) and "generated_text" in response[0]: - return response[0]["generated_text"] - - elif response and isinstance(response[0], list) and "label" in response[0][0]: - # For text classification - return [(item["label"], round(item["score"], 3)) for item in response[0]] +import warnings +from streamlit.components.v1 import html +import time # For simulating loading +# --- Set page configuration --- +st.set_page_config( + page_title="CreditIQ - AI-Powered Credit Expenditure Prediction", + page_icon="πŸ’³", + layout="wide", # Use wide layout to better accommodate complex design + initial_sidebar_state="collapsed" # Hide sidebar to give full canvas +) +# --- Initialize session state for page management --- +if 'active_page' not in st.session_state: + st.session_state.active_page = 'home' +if 'show_prediction' not in st.session_state: + st.session_state.show_prediction = False +if 'predicted_value' not in st.session_state: + st.session_state.predicted_value = 0.0 - # For OpenAI-style chat responses - elif isinstance(response, dict): - if "choices" in response and isinstance(response["choices"], list): - try: - return response["choices"][0]["message"]["content"] - except (KeyError, IndexError, TypeError): - return "Error: Could not extract message from choices" - - elif "error" in response: - return f"Error: {response['error']}" - - return "Unknown response format" - -# --- Step 1 --- -if 'name' not in st.session_state: - st.session_state.name = "Paul" - -if 'gender' not in st.session_state: - st.session_state.gender = "Male" - -if 'age' not in st.session_state: - st.session_state.age = 25 - -if 'currentSmoker' not in st.session_state: - st.session_state.currentSmoker = "Yes" - -if 'cigsPerDay' not in st.session_state: - st.session_state.cigsPerDay = 0 - -if 'BPMeds' not in st.session_state: - st.session_state.BPMeds = False -if 'diabetes' not in st.session_state: - st.session_state.diabetes = False +warnings.filterwarnings('ignore') -# --- Step 2 --- -if 'totChol' not in st.session_state: - st.session_state.totChol = 180 # mg/dL - -if 'sysBP' not in st.session_state: - st.session_state.sysBP = 120 # mmHg - -if 'diaBP' not in st.session_state: - st.session_state.diaBP = 80 # mmHg - -# --- Step 3 --- -if 'BMI' not in st.session_state: - st.session_state.BMI = 22.0 - -if 'heartRate' not in st.session_state: - st.session_state.heartRate = 70 # bpm +st.markdown(""" + +""", unsafe_allow_html=True) +# --- Inject Custom HTML, CSS, and Inline JavaScript --- +# This block defines the sophisticated styling and background animations, +# navigation, and overall page structure. +# Streamlit components will be injected into specific HTML placeholders. +st.markdown(f""" + + + + + + CreditIQ - AI-Powered Credit Expenditure Prediction + + + + + - - """, - unsafe_allow_html=True, - ) - # Overlay container - st.markdown( - f""" - + + +
+
+
+
+
+
+ + + +
+ +
+ + + + + +""", unsafe_allow_html=True) + + + +st.markdown(""" """, unsafe_allow_html=True) - st.markdown( - f"""
- Uploaded Image -

HeartCheck AI

-
- - """, - unsafe_allow_html=True, - ) +# --- Load the trained model --- +@st.cache_resource +def load_model(): + try: + model = joblib.load('credit_card_expenditure_model.joblib') + return model + except FileNotFoundError: + st.error("Model file 'credit_card_expenditure_model.joblib' not found. Please ensure it's in the same directory as this app.") + st.stop() # Stop the app if model is not found + except Exception as e: + st.error(f"Error loading model: {{e}}") + st.stop() + +model = load_model() + +# --- Streamlit Content Injection --- +# This section uses st.empty() to create a placeholder where our content +# for the active page will be rendered. +streamlit_content_placeholder = st.empty() + +# --- Page Rendering Logic based on active_page state --- +with streamlit_content_placeholder.container(): + # Home Page + if st.session_state.active_page == 'home': + st.markdown(f""" +
+
+
+
+
+

Predict Credit Expenditure with AI

+

Harness the power of advanced machine learning to accurately forecast credit card spending patterns and make informed financial decisions.

+
+
+
+ +

AI-Powered

+

Advanced algorithms analyze spending patterns

+
+
+ +

Secure

+

Bank-level security for your data

+
+
+ +

Fast

+

Instant predictions in seconds

+
+
+
+
+
+ """, unsafe_allow_html=True) + with st.container(key="navbtn"): + if st.button(" Start Predicting",key="pred"): + st.session_state.active_page = 'predictor' + st.rerun() + if st.button(" Learn More",key="abt"): + st.session_state.active_page = 'about' + st.rerun() + - st.markdown( - f"""
-
-


Predict Your Risk, Protect Your Heart -

-
-
- HeartCheck uses intelligent risk analysis to predict your likelihood of
heart disease. - β€”empowering you with personalized insights, early warnings, and lifestyle tips to keep your heart healthy and strong. -
-
- """, - unsafe_allow_html=True, - ) + # We need to trigger the JS function to set active state and navigate + elif st.session_state.active_page == 'predictor': + # --- Prediction Form --- +# We use st.form to group all the inputs. When the submit button is pressed, +# Streamlit re-runs the script with all the new input values. + with st.container(key ="main-form" ): + st.markdown('

Customer Financial Profile

', unsafe_allow_html=True) + with st.form(key="prediction_form"): + + # Create columns for the input grid layout + col1, col2, col3 = st.columns(3) + + with col1: + reports = st.number_input("Major Derogatory Reports", min_value=0, max_value=20, value=0, help="Number of serious credit issues reported") + age = st.number_input("Age (years)", min_value=18.0, max_value=100.0, value=30.0, step=0.1, help="Customer's age in years") + income = st.number_input("Yearly Income ($10,000s)", min_value=0.0, max_value=100.0, value=3.0, step=0.1, help="Annual income in tens of thousands") + owner = st.selectbox("Home Owner", ('no', 'yes'), help="Does customer own their home?") + + with col2: + card = st.selectbox("Card Owner", ('no', 'yes'), help="Does customer own a credit card?") + share = st.number_input("Monthly Expenditure Ratio", min_value=0.0, max_value=1.0, value=0.1, step=0.01, help="Ratio of monthly spending to yearly income") + active = st.number_input("Active Credit Accounts", min_value=0, max_value=50, value=5, help="Total number of active credit accounts") + selfemp = st.selectbox("Self Employed", ('no', 'yes'), help="Is the customer self-employed?") + + with col3: + dependents = st.number_input("Number of Dependents", min_value=0, max_value=10, value=0, help="Financially dependent individuals") + months = st.number_input("Months at Current Address", min_value=0, max_value=500, value=36, help="Duration at current residence") + majorcards = st.number_input("Major Credit Cards", min_value=0, max_value=10, value=1, help="Number of major credit cards owned") + payments = st.number_input("Credit Card Payments", min_value=0, max_value=50, value=6, help="Total credit card payments made") + + # The submit button for the form + submitted = st.form_submit_button("Predict Expenditure") + + + + # --- Model Prediction Logic --- + # This block runs only when the form has been submitted. + if st.button("Dismiss modal", key="dismiss_modal_btn", help="Hidden dismiss button"): + st.session_state.show_prediction = False + st.rerun() + if submitted: + if model is not None: + + # Show a loading spinner while processing + with st.spinner("Analyzing financial data with AI..."): + time.sleep(1) # Simulate processing time + + # Convert categorical inputs from 'yes'/'no' to 1/0 for the model + owner_encoded = 1 if owner == 'yes' else 0 + card_encoded = 1 if card == 'yes' else 0 + selfemp_encoded = 1 if selfemp == 'yes' else 0 + + # Create a pandas DataFrame with the exact structure the model expects + input_data = pd.DataFrame([{ + 'card': card, # "yes"/"no" as string + 'reports': reports, # int + 'age': age, # float + 'income': income, # float + 'share': share, # float + 'owner': owner, # "yes"/"no" as string + 'selfemp': selfemp, # "yes"/"no" as string + 'dependents': dependents, # int + 'months': months, # int + 'majorcards': majorcards, + 'active':active# int + # float or placeholder if unknown + }]) + expected_features = model.feature_names_in_ + + # Filter and reorder the input columns to match the model + input_filtered = input_data[expected_features] + print("input_data",input_data) + print("expected",model.feature_names_in_) # If using scikit-learn 1.0+ + + print("input_data_type",input_data.dtypes) + + try: + # Get the prediction from the model + prediction = model.predict(input_filtered)[0] + print("prediction:",prediction) + # Store the result and set the flag to show it + st.session_state.predicted_value = abs(prediction) # Ensure it's a positive number + st.session_state.show_prediction = True + except Exception as e: + st.error(f"Could not make a prediction. Error: {e}") -with st.container(key = "main"): - col1,col2 = st.columns([3,3]) - with col1: - with st.container(key= "side"): - if st.button("1",key="step_1"): - st.session_state.form1 = "back" - st.session_state.form2 = "back" - st.session_state.form3 = "back" - st.session_state.form4 = "back" - st.session_state.form5 = "back" + else: + st.error("The prediction model is not available. Please check server logs.") - st.rerun() - if st.button("2",key="step_2"): - st.session_state.form2 = "back" - st.session_state.form3 = "back" - st.session_state.form4 = "back" - st.session_state.form5 = "back" - - st.rerun() - if st.button("3",key="step_3"): - st.session_state.form3 = "back" - st.session_state.form4 = "back" - st.session_state.form5 = "back" - - st.rerun() - if st.button("4",key="step_4"): - st.session_state.form4 = "back" - st.session_state.form5 = "back" - st.rerun() + # --- Display Prediction Result --- + # This block displays the result card if a prediction has been made. + if st.session_state.show_prediction: + st.markdown(f""" + - with col2: - if st.session_state.form1 == "back": - with st.container(key="form1"): - st.write("🧍 Step 1: Personal Info") - with st.container(key="form-head"): - st.image("icon.png") - with st.form( key="first"): - with st.container(key="form-content"): - # Input fields - st.session_state.name = st.text_input("Name", value=st.session_state.name) - st.session_state.age = st.number_input("Age", min_value=0, max_value=120, step=1, value=st.session_state.age) - st.session_state.gender = st.radio("Sex:", ["Male", "Female"], horizontal=True, index=0 if st.session_state.gender == "Male" else 1) - - # Navigation buttons - col1, col2 = st.columns([4, 1]) - next = col2.form_submit_button("Next ") - - if next: - st.session_state.form1 = "next" - st.rerun() - elif st.session_state.form1 == "next" and st.session_state.form2 == "back": - with st.container(key="form2"): - st.write("🚬 Step 2: Clinical History") - - st.radio("Do you currently smoke?", ["Yes", "No"], horizontal=True, key="currentSmoker") - print(st.session_state.currentSmoker) - with st.form("form_step_2"): - with st.container(key="form-content1"): - # Show 'cigsPerDay' only if smoker - if st.session_state.currentSmoker == "Yes": - if st.session_state.cigsPerDay == 0: - st.session_state.cigsPerDay = 1 - else: - st.session_state.cigsPerDay = st.session_state.cigsPerDay - print(f"tessst:{st.session_state['currentSmoker']}") - - st.session_state.cigsPerDay = st.number_input("How many cigarettes per day?", min_value=1, max_value=60, step=1,value = st.session_state.cigsPerDay) - else: - st.session_state.cigsPerDay = 0 # default to 0 if non-smoker - r1,r2 = st.columns([6,3]) - with r1: - if st.session_state.BPMeds == "Yes": - bp = 0 - else: - bp = 1 - st.session_state.BPMeds = st.radio("Do you take blood pressure medication?", ["Yes", "No"], horizontal=True,index = bp) - with r2: - if st.session_state.diabetes == "Yes": - db = 0 - else: - db = 1 - st.session_state.diabetes = st.radio("Do you have diabetes?", ["Yes", "No"], horizontal=True,index = db ) - - - col1, col2 = st.columns([4,1]) - back = col1.form_submit_button("Back") - if back: - st.session_state.form1 = "back" - st.rerun() - next = col2.form_submit_button("Next") - if next: - st.session_state.form2 = "next" - st.rerun() - elif st.session_state.form2 == "next" and st.session_state.form3 == "back": - with st.container(key="form2"): - st.write("πŸ’‰ Step 3: Vital Signs & Cholesterol") - - with st.form("form_step_2"): - with st.container(key="form-content2"): - # Step 3 inputs - - st.session_state.totChol = st.number_input("Total Cholesterol (mg/dL)", min_value=100, max_value=400, step=1,value= st.session_state.totChol) - st.session_state.sysBP = st.number_input("Systolic Blood Pressure (mmHg)", min_value=80, max_value=250, step=1,value = st.session_state.sysBP) - st.session_state.diaBP = st.number_input("Diastolic Blood Pressure (mmHg)", min_value=50, max_value=150, step=1,value= st.session_state.diaBP) - - - col1, col2 = st.columns([4,1]) - back = col1.form_submit_button("Back") - if back: - st.session_state.form2 = "back" - - st.rerun() - next = col2.form_submit_button("Next") - if next: - st.session_state.form3 = "next" - st.rerun() - elif st.session_state.form3 == "next" and st.session_state.form4 == "back": - with st.container(key="form3"): - st.write("πŸ§ͺ Step 4: Body Metrics & Glucose") - - with st.form("form_step_3"): - with st.container(key="form-content3"): - # Step 3 inputs - - st.session_state.BMI = st.number_input("Body Mass Index (BMI)", min_value=10.0, max_value=60.0, step=0.1,value=st.session_state.BMI) - st.session_state.heartRate = st.number_input("Heart Rate (bpm)", min_value=40, max_value=200, step=1,value= st.session_state.heartRate) - st.session_state.glucose = st.number_input("Glucose Level (mg/dL)", min_value=50, max_value=300, step=1,value= st.session_state.glucose) - - - col1, col2 = st.columns([4,1]) - back = col1.form_submit_button("Back") - if back: - st.session_state.form3 = "back" - - st.rerun() - next = col2.form_submit_button("predict") - if next: - st.session_state.form4 = "next" - st.rerun() - elif st.session_state.form4 == "next" and st.session_state.form5 == "back": - - # Construct input array from collected values - new_data = np.array([[ - 1 if st.session_state.gender == "Male" else 0, # gender - st.session_state.age, - 1 if st.session_state.currentSmoker == "Yes" else 0, - float(st.session_state.cigsPerDay), - 1.0 if st.session_state.BPMeds else 0.0, - 1 if st.session_state.diabetes else 0, - st.session_state.totChol, - st.session_state.sysBP, - st.session_state.diaBP, - st.session_state.BMI, - st.session_state.heartRate, - st.session_state.glucose - ]]) - loading_placeholder = st.empty() - - with loading_placeholder.container(): - # Make prediction - with st.spinner("Analyzing your heart health..."): - st.image('load.gif', use_container_width=True) - time.sleep(3) # Wait for 1 second - # Remove the loading image - loading_placeholder.empty() - prediction = best_clf.predict(new_data) - prediction_proba = best_clf.predict_proba(new_data) - st.session_state.Risk = prediction - risk_percent = prediction_proba[0][1]*100 - risk_label = "At Risk of having a heart failure" - st.session_state.proba = risk_percent - - name = st.session_state.name # Get from session or fallback - with st.container(key = "result"): - # Display result - st.markdown(f""" -
-

Hi {name} πŸ‘‹, you are

-

{risk_percent:.2f}%

-

{risk_label}

-
- """, unsafe_allow_html=True) - st.session_state.form5 = "next" - - if st.button("explain the result", key = "explain"): - st.session_state.form5 = "next" - st.rerun() - - elif st.session_state.form5 == "next" : - def generate_stream_response(text): - # Yield the string one character at a time (for streaming) - for char in text: - yield char - time.sleep(0.02) - selected_model = { - "url": "https://router.huggingface.co/nebius/v1/chat/completions", # Replace with the Hugging Face API URL for your model - "model": "deepseek-ai/DeepSeek-V3" # Replace with the model name - } - task = "text-generation" - prompt = f""" - Hi! A person named {st.session_state.name} has just been assessed for heart disease risk. - - πŸ” **Prediction**: {"High Risk" if st.session_state.Risk == 1 else "Low Risk"} - πŸ“Š **Risk Percentage**: {st.session_state.proba:.2f}% - - πŸ“Œ **Input Parameters**: - - Sex: {st.session_state.gender} - - Age: {st.session_state.age} - - Current Smoker: {st.session_state.currentSmoker} - - Cigarettes per Day: {st.session_state.cigsPerDay} - - On Blood Pressure Meds: {"Yes" if st.session_state.BPMeds else "No"} - - Has Diabetes: {"Yes" if st.session_state.diabetes else "No"} - - Total Cholesterol: {st.session_state.totChol} mg/dL - - Systolic BP: {st.session_state.sysBP} mmHg - - Diastolic BP: {st.session_state.diaBP} mmHg - - BMI: {st.session_state.BMI} - - Heart Rate: {st.session_state.heartRate} bpm - - Glucose: {st.session_state.glucose} mg/dL - - πŸ’¬ Please give a personalized, kind, and easy-to-understand explanation of this result. Include practical lifestyle advice and possible early warning signs to watch for. Use an encouraging, empathetic tone.and sign with {selected_model['model']} - """ - - with st.container(key = "expert"): - with st.spinner("Model is Analysing your Results..."): - result = query_huggingface_model(selected_model, prompt , input_type="text",task=task) - response = extract_response_content(result) - - st.markdown(f""" -
- Uploaded Image - Personalized Heart Health Advice -
- """, unsafe_allow_html=True) + + """, + unsafe_allow_html=True, + ) - st.write_stream(generate_stream_response(response)) # This will stream the text one character at a time + my_js = """ + // Close modal if clicking outside the modal-content + const overlay = window.parent.document.getElementById("modal-overlay"); + overlay.addEventListener("click", function(e) {{ + if (e.target.id === "modal-overlay") {{ + const btn = window.parent.document.querySelector('[class*="st-key-dismiss_modal_btn"]>div >div >div >div >button'); + if (btn) { + btn.click(); + } + //overlay.style.display = "none"; + }} + }}); + """ + my_html = f"" + html(my_html) + + + # Features Page + elif st.session_state.active_page == 'features': + st.markdown(""" +
+
+ +
+
+ +

Accurate Predictions

+

Leverage state-of-the-art machine learning models for highly precise expenditure forecasts.

+
+
+ +

Data Security

+

Your financial data is protected with industry-leading encryption and security protocols.

+
+
+ +

Privacy Focused

+

We prioritize your privacy with strict data handling and anonymity measures.

+
+
+ +

Responsive Design

+

Access CreditIQ seamlessly on any device, from desktop to mobile.

+
+
+ +

Actionable Insights

+

Understand your spending habits and gain insights to optimize your finances.

+
+
+ +

Customizable Models

+

Tailor prediction models to fit unique financial scenarios and individual needs.

+
+
+
+
+ """, unsafe_allow_html=True) + + # About Page + elif st.session_state.active_page == 'about': + st.markdown(""" +
+
+ +
+
+

Revolutionizing Financial Forecasting

+

CreditIQ was founded on the principle that informed financial decisions lead to greater stability and growth. We believe that by providing highly accurate, AI-driven credit expenditure predictions, we can help our users better manage their budgets, identify trends, and plan for the future with confidence.

+

Our team of data scientists and financial experts has developed a robust platform that combines cutting-edge machine learning algorithms with user-friendly design. We are committed to continuous innovation and maintaining the highest standards of data security and privacy.

+
+
+
99%
+
Accuracy Rate
+
+
+
1M+
+
Predictions Made
+
+
+
+
+ + AI Financial Illustration +
+
+
+
+ """, unsafe_allow_html=True) + st.markdown(f"", unsafe_allow_html=True) + + # Contact Page + elif st.session_state.active_page == 'contact': + st.markdown(""" +
+
+ +
+
+ """, unsafe_allow_html=True) + # Inject Streamlit components into the Contact form + with st.container(key="contact-form"): + with st.form("contact_form",clear_on_submit=True): + st.text_input("Your Name", key="contact_name_input", help="Please enter your name.") + st.text_input("Your Email", key="contact_email_input", help="Please enter your email address.") + st.text_area("Message", key="contact_message_input", help="Type your message here.") + submittedcon = st.form_submit_button("Send Message", help="Click to send your message.") + + if submittedcon : + st.success("Thank you for your message! We will get back to you soon.") + + - \ No newline at end of file