cisemh commited on
Commit
679b404
·
verified ·
1 Parent(s): bc42626

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +145 -2
app.py CHANGED
@@ -1,4 +1,147 @@
1
  import streamlit as st
 
 
 
 
2
 
3
- x = st.slider('Select a value')
4
- st.write(x, 'squared is', x * x)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ import pandas as pd
3
+ import plotly.express as px
4
+ import plotly.graph_objects as go
5
+ import numpy as np
6
 
7
+ # Set page config
8
+ st.set_page_config(page_title="ML Models Comparison Dashboard", layout="wide")
9
+
10
+ # Title and description
11
+ st.title("Machine Learning Models Comparison Dashboard")
12
+ st.write("Compare performance metrics of different ML models on the CIFAR-10 dataset")
13
+
14
+ # Pre-computed metrics (replace these with your actual results)
15
+ results = {
16
+ 'Accuracy': {
17
+ 'KNN': 0.345, # Replace with your actual values
18
+ 'Logistic Regression': 0.389,
19
+ 'Random Forest': 0.412,
20
+ 'Naive Bayes': 0.298,
21
+ 'K-Means': 0.275,
22
+ 'CNN': 0.456
23
+ },
24
+ 'Precision': {
25
+ 'KNN': 0.342,
26
+ 'Logistic Regression': 0.387,
27
+ 'Random Forest': 0.409,
28
+ 'Naive Bayes': 0.295,
29
+ 'K-Means': 0.271,
30
+ 'CNN': 0.453
31
+ },
32
+ 'Recall': {
33
+ 'KNN': 0.345,
34
+ 'Logistic Regression': 0.389,
35
+ 'Random Forest': 0.412,
36
+ 'Naive Bayes': 0.298,
37
+ 'K-Means': 0.275,
38
+ 'CNN': 0.456
39
+ },
40
+ 'F1': {
41
+ 'KNN': 0.343,
42
+ 'Logistic Regression': 0.388,
43
+ 'Random Forest': 0.410,
44
+ 'Naive Bayes': 0.296,
45
+ 'K-Means': 0.273,
46
+ 'CNN': 0.454
47
+ }
48
+ }
49
+
50
+ # Pre-computed confusion matrices (replace these with your actual confusion matrices)
51
+ confusion_matrices = {
52
+ 'KNN': np.random.randint(0, 100, (10, 10)), # Replace with actual confusion matrices
53
+ 'Logistic Regression': np.random.randint(0, 100, (10, 10)),
54
+ 'Random Forest': np.random.randint(0, 100, (10, 10)),
55
+ 'Naive Bayes': np.random.randint(0, 100, (10, 10)),
56
+ 'K-Means': np.random.randint(0, 100, (10, 10)),
57
+ 'CNN': np.random.randint(0, 100, (10, 10))
58
+ }
59
+
60
+ # Create tabs for different visualizations
61
+ tab1, tab2, tab3 = st.tabs(["Metrics Comparison", "Confusion Matrices", "Radar Plot"])
62
+
63
+ with tab1:
64
+ st.header("Performance Metrics Comparison")
65
+
66
+ # Convert results to DataFrame for plotting
67
+ df_metrics = pd.DataFrame(results)
68
+ df_metrics.index.name = 'Model'
69
+ df_metrics = df_metrics.reset_index()
70
+
71
+ # Create bar plot using plotly
72
+ fig = px.bar(df_metrics.melt(id_vars=['Model'],
73
+ var_name='Metric',
74
+ value_name='Score'),
75
+ x='Model', y='Score', color='Metric', barmode='group',
76
+ title='Model Performance Comparison')
77
+ fig.update_layout(xaxis_tickangle=-45)
78
+ st.plotly_chart(fig)
79
+
80
+ # Display metrics table
81
+ st.subheader("Metrics Table")
82
+ st.dataframe(df_metrics.set_index('Model').style.format("{:.3f}"))
83
+
84
+ with tab2:
85
+ st.header("Confusion Matrices")
86
+
87
+ # Select model for confusion matrix
88
+ selected_model = st.selectbox("Select Model", list(confusion_matrices.keys()))
89
+
90
+ # Plot confusion matrix using plotly
91
+ fig = px.imshow(confusion_matrices[selected_model],
92
+ labels=dict(x="Predicted", y="True"),
93
+ title=f"Confusion Matrix - {selected_model}")
94
+ st.plotly_chart(fig)
95
+
96
+ with tab3:
97
+ st.header("Radar Plot Comparison")
98
+
99
+ # Create radar plot using plotly
100
+ fig = go.Figure()
101
+ metrics = list(results.keys())
102
+ models = list(results['Accuracy'].keys())
103
+
104
+ for model in models:
105
+ values = [results[metric][model] for metric in metrics]
106
+ values.append(values[0]) # Complete the circle
107
+
108
+ fig.add_trace(go.Scatterpolar(
109
+ r=values,
110
+ theta=metrics + [metrics[0]],
111
+ name=model
112
+ ))
113
+
114
+ fig.update_layout(
115
+ polar=dict(radialaxis=dict(visible=True, range=[0, 1])),
116
+ showlegend=True,
117
+ title="Model Comparison - All Metrics"
118
+ )
119
+
120
+ st.plotly_chart(fig)
121
+
122
+ # Add download button for metrics
123
+ @st.cache_data
124
+ def convert_df_to_csv():
125
+ return df_metrics.to_csv(index=False)
126
+
127
+ st.sidebar.header("Download Data")
128
+ csv = convert_df_to_csv()
129
+ st.sidebar.download_button(
130
+ label="Download metrics as CSV",
131
+ data=csv,
132
+ file_name='model_metrics.csv',
133
+ mime='text/csv',
134
+ )
135
+
136
+ # Add explanatory text
137
+ st.sidebar.markdown("""
138
+ ### Dashboard Features:
139
+ 1. View pre-computed metrics for all models
140
+ 2. Compare performance across different metrics
141
+ 3. Examine confusion matrices
142
+ 4. Download metrics data as CSV
143
+ """)
144
+
145
+ # Footer
146
+ st.markdown("---")
147
+ st.markdown("Dashboard created with Streamlit for ML Models Comparison")