CCockrum commited on
Commit
20adb37
·
verified ·
1 Parent(s): 1d4b3b2

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +339 -0
app.py ADDED
@@ -0,0 +1,339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+ import numpy as np
4
+ import plotly.graph_objects as go
5
+ import plotly.express as px
6
+ from plotly.subplots import make_subplots
7
+ from sklearn.ensemble import IsolationForest
8
+ from sklearn.preprocessing import StandardScaler
9
+ from sklearn.cluster import DBSCAN
10
+ from scipy import stats
11
+ from typing import Dict, List, Tuple, Any, Optional
12
+ import warnings
13
+ warnings.filterwarnings('ignore')
14
+
15
+ class OutlierDetective:
16
+ def __init__(self):
17
+ self.df = None
18
+ self.outlier_results = {}
19
+ self.numeric_columns = []
20
+
21
+ def load_data(self, file_path: str) -> pd.DataFrame:
22
+ """Load data from various file formats"""
23
+ try:
24
+ if file_path.endswith('.csv'):
25
+ df = pd.read_csv(file_path, encoding='utf-8')
26
+ elif file_path.endswith(('.xlsx', '.xls')):
27
+ df = pd.read_excel(file_path)
28
+ elif file_path.endswith('.json'):
29
+ df = pd.read_json(file_path)
30
+ elif file_path.endswith('.parquet'):
31
+ df = pd.read_parquet(file_path)
32
+ else:
33
+ df = pd.read_csv(file_path)
34
+
35
+ self.df = df
36
+ # Identify numeric columns
37
+ self.numeric_columns = df.select_dtypes(include=[np.number]).columns.tolist()
38
+ return df
39
+ except Exception as e:
40
+ raise Exception(f"Error loading file: {str(e)}")
41
+
42
+ def detect_iqr_outliers(self, column: str) -> Dict[str, Any]:
43
+ """Detect outliers using Interquartile Range (IQR) method"""
44
+ if column not in self.numeric_columns:
45
+ return {}
46
+
47
+ series = self.df[column].dropna()
48
+ Q1 = series.quantile(0.25)
49
+ Q3 = series.quantile(0.75)
50
+ IQR = Q3 - Q1
51
+
52
+ lower_bound = Q1 - 1.5 * IQR
53
+ upper_bound = Q3 + 1.5 * IQR
54
+
55
+ outlier_mask = (series < lower_bound) | (series > upper_bound)
56
+ outlier_indices = series[outlier_mask].index.tolist()
57
+ outlier_values = series[outlier_mask].tolist()
58
+
59
+ return {
60
+ 'method': 'IQR',
61
+ 'lower_bound': lower_bound,
62
+ 'upper_bound': upper_bound,
63
+ 'outlier_indices': outlier_indices,
64
+ 'outlier_values': outlier_values,
65
+ 'outlier_count': len(outlier_indices),
66
+ 'outlier_percentage': (len(outlier_indices) / len(series)) * 100,
67
+ 'explanation': f"Values below {lower_bound:.2f} or above {upper_bound:.2f} are considered outliers"
68
+ }
69
+
70
+ def detect_zscore_outliers(self, column: str, threshold: float = 3) -> Dict[str, Any]:
71
+ """Detect outliers using Z-score method"""
72
+ if column not in self.numeric_columns:
73
+ return {}
74
+
75
+ series = self.df[column].dropna()
76
+ z_scores = np.abs(stats.zscore(series))
77
+
78
+ outlier_mask = z_scores > threshold
79
+ outlier_indices = series[outlier_mask].index.tolist()
80
+ outlier_values = series[outlier_mask].tolist()
81
+ outlier_zscores = z_scores[outlier_mask].tolist()
82
+
83
+ return {
84
+ 'method': 'Z-Score',
85
+ 'threshold': threshold,
86
+ 'outlier_indices': outlier_indices,
87
+ 'outlier_values': outlier_values,
88
+ 'outlier_zscores': outlier_zscores,
89
+ 'outlier_count': len(outlier_indices),
90
+ 'outlier_percentage': (len(outlier_indices) / len(series)) * 100,
91
+ 'explanation': f"Values with |z-score| > {threshold} are considered outliers"
92
+ }
93
+
94
+ def detect_modified_zscore_outliers(self, column: str, threshold: float = 3.5) -> Dict[str, Any]:
95
+ """Detect outliers using Modified Z-score (MAD) method"""
96
+ if column not in self.numeric_columns:
97
+ return {}
98
+
99
+ series = self.df[column].dropna()
100
+ median = series.median()
101
+ mad = stats.median_abs_deviation(series)
102
+
103
+ if mad == 0:
104
+ return {
105
+ 'method': 'Modified Z-Score',
106
+ 'outlier_count': 0,
107
+ 'outlier_percentage': 0,
108
+ 'explanation': "MAD is zero - no outliers detected using this method"
109
+ }
110
+
111
+ modified_z_scores = 0.6745 * (series - median) / mad
112
+
113
+ outlier_mask = np.abs(modified_z_scores) > threshold
114
+ outlier_indices = series[outlier_mask].index.tolist()
115
+ outlier_values = series[outlier_mask].tolist()
116
+ outlier_scores = modified_z_scores[outlier_mask].tolist()
117
+
118
+ return {
119
+ 'method': 'Modified Z-Score',
120
+ 'threshold': threshold,
121
+ 'median': median,
122
+ 'mad': mad,
123
+ 'outlier_indices': outlier_indices,
124
+ 'outlier_values': outlier_values,
125
+ 'outlier_scores': outlier_scores,
126
+ 'outlier_count': len(outlier_indices),
127
+ 'outlier_percentage': (len(outlier_indices) / len(series)) * 100,
128
+ 'explanation': f"Values with |modified z-score| > {threshold} are considered outliers (robust to extreme values)"
129
+ }
130
+
131
+ def detect_isolation_forest_outliers(self, columns: List[str], contamination: float = 0.1) -> Dict[str, Any]:
132
+ """Detect multivariate outliers using Isolation Forest"""
133
+ if not columns or len(columns) < 1:
134
+ return {}
135
+
136
+ # Filter to only numeric columns that exist
137
+ valid_columns = [col for col in columns if col in self.numeric_columns]
138
+ if not valid_columns:
139
+ return {}
140
+
141
+ # Prepare data
142
+ data = self.df[valid_columns].dropna()
143
+ if len(data) < 10: # Need minimum data points
144
+ return {}
145
+
146
+ # Standardize the data
147
+ scaler = StandardScaler()
148
+ scaled_data = scaler.fit_transform(data)
149
+
150
+ # Fit Isolation Forest
151
+ iso_forest = IsolationForest(contamination=contamination, random_state=42)
152
+ outlier_labels = iso_forest.fit_predict(scaled_data)
153
+
154
+ # Get outlier indices and scores
155
+ outlier_mask = outlier_labels == -1
156
+ outlier_indices = data[outlier_mask].index.tolist()
157
+ outlier_scores = iso_forest.score_samples(scaled_data)
158
+ outlier_score_values = outlier_scores[outlier_mask].tolist()
159
+
160
+ return {
161
+ 'method': 'Isolation Forest',
162
+ 'contamination': contamination,
163
+ 'columns_used': valid_columns,
164
+ 'outlier_indices': outlier_indices,
165
+ 'outlier_scores': outlier_score_values,
166
+ 'outlier_count': len(outlier_indices),
167
+ 'outlier_percentage': (len(outlier_indices) / len(data)) * 100,
168
+ 'explanation': f"Multivariate outlier detection using {len(valid_columns)} features with {contamination*100}% expected contamination"
169
+ }
170
+
171
+ def detect_dbscan_outliers(self, columns: List[str], eps: float = 0.5, min_samples: int = 5) -> Dict[str, Any]:
172
+ """Detect outliers using DBSCAN clustering"""
173
+ if not columns or len(columns) < 1:
174
+ return {}
175
+
176
+ # Filter to only numeric columns that exist
177
+ valid_columns = [col for col in columns if col in self.numeric_columns]
178
+ if not valid_columns:
179
+ return {}
180
+
181
+ # Prepare data
182
+ data = self.df[valid_columns].dropna()
183
+ if len(data) < min_samples * 2: # Need minimum data points
184
+ return {}
185
+
186
+ # Standardize the data
187
+ scaler = StandardScaler()
188
+ scaled_data = scaler.fit_transform(data)
189
+
190
+ # Apply DBSCAN
191
+ dbscan = DBSCAN(eps=eps, min_samples=min_samples)
192
+ cluster_labels = dbscan.fit_predict(scaled_data)
193
+
194
+ # Points labeled as -1 are outliers
195
+ outlier_mask = cluster_labels == -1
196
+ outlier_indices = data[outlier_mask].index.tolist()
197
+
198
+ # Count clusters
199
+ n_clusters = len(set(cluster_labels)) - (1 if -1 in cluster_labels else 0)
200
+
201
+ return {
202
+ 'method': 'DBSCAN',
203
+ 'eps': eps,
204
+ 'min_samples': min_samples,
205
+ 'columns_used': valid_columns,
206
+ 'n_clusters': n_clusters,
207
+ 'outlier_indices': outlier_indices,
208
+ 'outlier_count': len(outlier_indices),
209
+ 'outlier_percentage': (len(outlier_indices) / len(data)) * 100,
210
+ 'explanation': f"Density-based outlier detection found {n_clusters} clusters using {len(valid_columns)} features"
211
+ }
212
+
213
+ def analyze_outliers(self, selected_columns: List[str] = None, methods: List[str] = None) -> Dict[str, Any]:
214
+ """Comprehensive outlier analysis"""
215
+ if self.df is None:
216
+ return {}
217
+
218
+ if selected_columns is None:
219
+ selected_columns = self.numeric_columns
220
+ else:
221
+ # Filter to only numeric columns
222
+ selected_columns = [col for col in selected_columns if col in self.numeric_columns]
223
+
224
+ if not selected_columns:
225
+ return {}
226
+
227
+ if methods is None:
228
+ methods = ['IQR', 'Z-Score', 'Modified Z-Score', 'Isolation Forest']
229
+
230
+ results = {}
231
+
232
+ # Single-column methods
233
+ for column in selected_columns:
234
+ results[column] = {}
235
+
236
+ if 'IQR' in methods:
237
+ results[column]['IQR'] = self.detect_iqr_outliers(column)
238
+
239
+ if 'Z-Score' in methods:
240
+ results[column]['Z-Score'] = self.detect_zscore_outliers(column)
241
+
242
+ if 'Modified Z-Score' in methods:
243
+ results[column]['Modified Z-Score'] = self.detect_modified_zscore_outliers(column)
244
+
245
+ # Multi-column methods
246
+ if len(selected_columns) > 1:
247
+ if 'Isolation Forest' in methods:
248
+ results['Multivariate'] = {}
249
+ results['Multivariate']['Isolation Forest'] = self.detect_isolation_forest_outliers(selected_columns)
250
+
251
+ if 'DBSCAN' in methods:
252
+ if 'Multivariate' not in results:
253
+ results['Multivariate'] = {}
254
+ results['Multivariate']['DBSCAN'] = self.detect_dbscan_outliers(selected_columns)
255
+
256
+ self.outlier_results = results
257
+ return results
258
+
259
+ def generate_outlier_report(self) -> str:
260
+ """Generate comprehensive outlier analysis report"""
261
+ if not self.outlier_results:
262
+ return "No outlier analysis results available. Please run the analysis first."
263
+
264
+ report = "#Outlier Detection Report\n\n"
265
+
266
+ # Summary statistics
267
+ total_outliers_by_method = {}
268
+ all_outlier_indices = set()
269
+
270
+ for column, methods in self.outlier_results.items():
271
+ if column == 'Multivariate':
272
+ continue
273
+
274
+ for method, result in methods.items():
275
+ if isinstance(result, dict) and 'outlier_count' in result:
276
+ if method not in total_outliers_by_method:
277
+ total_outliers_by_method[method] = 0
278
+ total_outliers_by_method[method] += result['outlier_count']
279
+
280
+ if 'outlier_indices' in result:
281
+ all_outlier_indices.update(result['outlier_indices'])
282
+
283
+ # Add multivariate results
284
+ if 'Multivariate' in self.outlier_results:
285
+ for method, result in self.outlier_results['Multivariate'].items():
286
+ if isinstance(result, dict) and 'outlier_count' in result:
287
+ total_outliers_by_method[method] = result['outlier_count']
288
+ if 'outlier_indices' in result:
289
+ all_outlier_indices.update(result['outlier_indices'])
290
+
291
+ report += "## Summary\n"
292
+ report += f"- **Total rows analyzed:** {len(self.df):,}\n"
293
+ report += f"- **Unique outlier rows found:** {len(all_outlier_indices)}\n"
294
+ report += f"- **Percentage of outlier rows:** {(len(all_outlier_indices)/len(self.df)*100):.2f}%\n\n"
295
+
296
+ report += "### Outliers by Method:\n"
297
+ for method, count in total_outliers_by_method.items():
298
+ report += f"- **{method}:** {count} outliers\n"
299
+
300
+ report += "\n"
301
+
302
+ # Detailed results by column
303
+ report += "## Detailed Results\n\n"
304
+
305
+ for column, methods in self.outlier_results.items():
306
+ if column == 'Multivariate':
307
+ continue
308
+
309
+ report += f"### Column: `{column}`\n\n"
310
+
311
+ for method, result in methods.items():
312
+ if not isinstance(result, dict) or 'outlier_count' in result and result['outlier_count'] == 0:
313
+ report += f"**{method}:** No outliers detected\n"
314
+ continue
315
+
316
+ report += f"**{method}:**\n"
317
+ report += f"- Outliers found: {result['outlier_count']} ({result['outlier_percentage']:.2f}%)\n"
318
+ report += f"- Explanation: {result['explanation']}\n"
319
+
320
+ # Show some example outlier values
321
+ if 'outlier_values' in result and result['outlier_values']:
322
+ sample_values = result['outlier_values'][:5] # Show first 5
323
+ report += f"- Example outliers: {', '.join([f'{v:.3f}' if isinstance(v, (int, float)) else str(v) for v in sample_values])}"
324
+ if len(result['outlier_values']) > 5:
325
+ report += f" (and {len(result['outlier_values']) - 5} more)"
326
+ report += "\n"
327
+
328
+ report += "\n"
329
+
330
+ # Multivariate results
331
+ if 'Multivariate' in self.outlier_results:
332
+ report += "### Multivariate Analysis\n\n"
333
+
334
+ for method, result in self.outlier_results['Multivariate'].items():
335
+ if not isinstance(result, dict):
336
+ continue
337
+
338
+ report += f"**{method}:**\n"
339
+ report += f"- Outliers found: {result['outli