cisemh commited on
Commit
f7d41c0
·
1 Parent(s): 41683f3

weather app

Browse files
Files changed (1) hide show
  1. app.py +270 -2
app.py CHANGED
@@ -1,4 +1,272 @@
1
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
2
 
3
- x = st.slider('Select a value')
4
- st.write(x, 'squared is', x * x)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ import pandas as pd
3
+ import numpy as np
4
+ import seaborn as sns
5
+ import matplotlib.pyplot as plt
6
+ from sklearn.model_selection import train_test_split, cross_val_score
7
+ from sklearn.naive_bayes import GaussianNB
8
+ from sklearn.tree import DecisionTreeClassifier, plot_tree
9
+ from sklearn.ensemble import RandomForestClassifier
10
+ from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
11
+ from sklearn.preprocessing import StandardScaler
12
 
13
+
14
+ # Page configuration
15
+ st.set_page_config(
16
+ page_title="Seattle Weather Analysis",
17
+ page_icon="🌦️",
18
+ layout="wide"
19
+ )
20
+
21
+
22
+ # Title and introduction
23
+ st.title("🌦️ Seattle Weather Machine Learning")
24
+ st.markdown("""
25
+ This dashboard analyzes Seattle weather data using different machine learning models.
26
+ The dataset includes weather attributes and their classification.
27
+ """)
28
+
29
+ def get_dataset_overview(df):
30
+ """
31
+ Generate a comprehensive overview of the dataset
32
+ """
33
+ return {
34
+ "Total Records": len(df),
35
+ "Features": len(df.columns) - 1, # Excluding target column
36
+ "Target Classes": len(df['weather'].unique()),
37
+ "Missing Values": df.isnull().sum().sum()
38
+ }
39
+
40
+
41
+ def load_data():
42
+ """Load and preprocess the Seattle weather dataset"""
43
+ df = pd.read_csv('seattle-weather.csv')
44
+ df_cleaned = df.drop(columns=['date'])
45
+ weather_mapping = {'drizzle': 0, 'rain': 1, 'sun': 2, 'snow': 3, 'fog': 4}
46
+ df_cleaned['weather_encoded'] = df_cleaned['weather'].map(weather_mapping)
47
+
48
+ # Split features and target
49
+ X = df_cleaned.drop(columns=['weather', 'weather_encoded'])
50
+ y = df_cleaned['weather_encoded']
51
+
52
+ # Scale features
53
+ scaler = StandardScaler()
54
+ X_scaled = scaler.fit_transform(X)
55
+ X_scaled = pd.DataFrame(X_scaled, columns=X.columns)
56
+
57
+ # Train-test split
58
+ X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.2, random_state=42)
59
+
60
+ return df, df_cleaned, X, y, X_train, X_test, y_train, y_test, weather_mapping
61
+
62
+
63
+ def plot_weather_distribution(df):
64
+ """Plot distribution of weather types"""
65
+ fig, ax = plt.subplots()
66
+ sns.countplot(x='weather', data=df, palette='viridis', ax=ax)
67
+ ax.set_title("Distribution of Weather Types")
68
+ st.pyplot(fig)
69
+
70
+
71
+ def plot_temp_relationship(df):
72
+ """Plot relationship between max and min temperatures"""
73
+ fig, ax = plt.subplots()
74
+ sns.scatterplot(x='temp_max', y='temp_min', hue='weather', data=df, ax=ax)
75
+ ax.set_title("Relationship Between Temp_max and Temp_min")
76
+ st.pyplot(fig)
77
+
78
+
79
+ def train_models(X_train, X_test, y_train, y_test):
80
+ """Train Naive Bayes, Decision Tree, and Random Forest models"""
81
+ models = {
82
+ 'Naive Bayes': GaussianNB(),
83
+ 'Decision Tree': DecisionTreeClassifier(random_state=42, max_depth=5),
84
+ 'Random Forest': RandomForestClassifier(n_estimators=100, random_state=42)
85
+ }
86
+
87
+ results = {}
88
+
89
+ for name, model in models.items():
90
+ model.fit(X_train, y_train)
91
+ y_pred = model.predict(X_test)
92
+ accuracy = accuracy_score(y_test, y_pred)
93
+ cv_scores = cross_val_score(model, X_train, y_train, cv=5)
94
+
95
+ results[name] = {
96
+ 'model': model,
97
+ 'accuracy': accuracy,
98
+ 'cv_mean': cv_scores.mean(),
99
+ 'cv_std': cv_scores.std(),
100
+ 'pred': y_pred
101
+ }
102
+
103
+ return results
104
+
105
+
106
+ def plot_confusion_matrix(y_test, y_pred, model_name, weather_mapping):
107
+ """Plot confusion matrix for a given model"""
108
+ fig, ax = plt.subplots()
109
+ conf_matrix = confusion_matrix(y_test, y_pred)
110
+ sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues',
111
+ xticklabels=list(weather_mapping.keys()),
112
+ yticklabels=list(weather_mapping.keys()), ax=ax)
113
+ ax.set_title(f"Confusion Matrix - {model_name}")
114
+ ax.set_xlabel("Predicted")
115
+ ax.set_ylabel("Actual")
116
+ st.pyplot(fig)
117
+
118
+
119
+ def plot_feature_importance(model, X, model_name):
120
+ """Plot feature importance for a given model"""
121
+ fig, ax = plt.subplots()
122
+ feature_importance = pd.DataFrame({
123
+ 'Feature': X.columns,
124
+ 'Importance': model.feature_importances_
125
+ }).sort_values('Importance', ascending=False)
126
+
127
+ sns.barplot(x='Importance', y='Feature', data=feature_importance, palette='viridis', ax=ax)
128
+ ax.set_title(f"{model_name} Feature Importance")
129
+ st.pyplot(fig)
130
+
131
+
132
+ def main():
133
+
134
+ # Load data
135
+ df, df_cleaned, X, y, X_train, X_test, y_train, y_test, weather_mapping = load_data()
136
+
137
+ # Sidebar menu
138
+ menu = st.sidebar.selectbox("Choose Analysis", [
139
+ "Data Overview",
140
+ "Data Visualization",
141
+ "Model Training",
142
+ "Model Comparison"
143
+ ])
144
+
145
+ if menu == "Data Overview":
146
+ st.header("Dataset Overview")
147
+
148
+ # Get dataset overview
149
+ overview = get_dataset_overview(df)
150
+
151
+ # Create columns for side-by-side display
152
+ col1, col2, col3, col4 = st.columns(4)
153
+
154
+ # Display overview metrics
155
+ with col1:
156
+ st.metric(label="Total Records", value=overview["Total Records"])
157
+
158
+ with col2:
159
+ st.metric(label="Features", value=overview["Features"])
160
+
161
+ with col3:
162
+ st.metric(label="Target Classes", value=overview["Target Classes"])
163
+
164
+ with col4:
165
+ st.metric(label="Missing Values", value=overview["Missing Values"])
166
+
167
+ # Display first few rows
168
+ st.subheader("First Few Rows")
169
+ st.dataframe(df.head())
170
+
171
+ # Weather Type Distribution
172
+ st.subheader("Weather Type Distribution")
173
+ weather_dist = df['weather'].value_counts()
174
+ col1, col2 = st.columns(2)
175
+
176
+ with col1:
177
+ st.dataframe(weather_dist)
178
+
179
+ with col2:
180
+ fig, ax = plt.subplots()
181
+ weather_dist.plot(kind='pie', autopct='%1.1f%%', ax=ax)
182
+ ax.set_title("Weather Type Percentage")
183
+ st.pyplot(fig)
184
+
185
+ # Descriptive Statistics
186
+ st.subheader("Descriptive Statistics")
187
+ st.dataframe(df.describe())
188
+
189
+ elif menu == "Data Visualization":
190
+ st.header("Weather Data Visualizations")
191
+
192
+ viz_option = st.selectbox("Choose Visualization", [
193
+ "Weather Type Distribution",
194
+ "Temperature Relationship",
195
+ "Correlation Heatmap"
196
+ ])
197
+
198
+ if viz_option == "Weather Type Distribution":
199
+ plot_weather_distribution(df)
200
+
201
+ elif viz_option == "Temperature Relationship":
202
+ plot_temp_relationship(df)
203
+
204
+ elif viz_option == "Correlation Heatmap":
205
+ fig, ax = plt.subplots(figsize=(10, 8))
206
+ corr_matrix = pd.concat([X, y], axis=1).corr()
207
+ sns.heatmap(corr_matrix, annot=True, fmt=".2f", cmap="coolwarm", vmin=-1, vmax=1, ax=ax)
208
+ ax.set_title("Correlation Heatmap")
209
+ st.pyplot(fig)
210
+
211
+ elif menu == "Model Training":
212
+ st.header("Machine Learning Models")
213
+
214
+ # Train models
215
+ results = train_models(X_train, X_test, y_train, y_test)
216
+
217
+ model_select = st.selectbox("Choose Model", list(results.keys()))
218
+
219
+ model_result = results[model_select]
220
+
221
+ st.write(f"{model_select} Results:")
222
+ st.write(f"Test Accuracy: {model_result['accuracy']:.4f}")
223
+ st.write(f"Cross-Validation Mean Accuracy: {model_result['cv_mean']:.4f}")
224
+ st.write(f"Cross-Validation Std: {model_result['cv_std']:.4f}")
225
+
226
+ # Confusion Matrix
227
+ plot_confusion_matrix(y_test, model_result['pred'], model_select, weather_mapping)
228
+
229
+ # Feature Importance (for Decision Tree and Random Forest)
230
+ if model_select != 'Naive Bayes':
231
+ plot_feature_importance(model_result['model'], X, model_select)
232
+
233
+ elif menu == "Model Comparison":
234
+ st.header("Model Performance Comparison")
235
+
236
+ # Train models if not already trained
237
+ results = train_models(X_train, X_test, y_train, y_test)
238
+
239
+ # Create comparison DataFrame
240
+ comparison_df = pd.DataFrame({
241
+ 'Model': list(results.keys()),
242
+ 'Test Accuracy': [results[model]['accuracy'] for model in results],
243
+ 'CV Mean Accuracy': [results[model]['cv_mean'] for model in results],
244
+ 'CV Std': [results[model]['cv_std'] for model in results]
245
+ })
246
+
247
+ st.write("Model Performance Comparison:")
248
+ st.dataframe(comparison_df)
249
+
250
+ # Bar plots for comparison
251
+ fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))
252
+
253
+ # Test Accuracy Comparison
254
+ sns.barplot(x='Model', y='Test Accuracy', data=comparison_df, ax=ax1)
255
+ ax1.set_title('Test Accuracy Comparison')
256
+ ax1.tick_params(axis='x', rotation=45)
257
+
258
+ # Cross-validation Comparison
259
+ sns.barplot(x='Model', y='CV Mean Accuracy', data=comparison_df, ax=ax2)
260
+ ax2.errorbar(x=range(len(comparison_df)),
261
+ y=comparison_df['CV Mean Accuracy'],
262
+ yerr=comparison_df['CV Std'] * 2,
263
+ fmt='none', color='black', capsize=5)
264
+ ax2.set_title('Cross-validation Accuracy')
265
+ ax2.tick_params(axis='x', rotation=45)
266
+
267
+ plt.tight_layout()
268
+ st.pyplot(fig)
269
+
270
+
271
+ if __name__ == "__main__":
272
+ main()