1077_252_49 / 1077_252_49.py
antitheft159's picture
Upload 1077_252_49.py
4160dc8 verified
# -*- coding: utf-8 -*-
"""1077_252_49
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1Oc-ciFRATiivfVFWe8Dd7m5LvNeQIQZT
"""
# Commented out IPython magic to ensure Python compatibility.
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.inspection import permutation_importance
sns.set(style='whitegrid')
plt.switch_backend('Agg')
file_path = '/content/air_quality_health_dataset.csv'
try:
df = pd.read_csv(file_path, encoding='ISO-8859-1', delimiter=',')
print('Data loaded successfully.')
except Expection as e:
print('Error loading data:', e)
df.head()
missing_values = df.isnull().sum()
print('Missing values in each column:')
print(missing_values)
# Handling missing values (basic strategy): drop rows with critical missing data
df.dropna(subset=['date', 'aqi', 'pm2_5', 'hospital_admissions'], inplace=True)
# Convert population_density to a numeric value if possible; if not possible, leave it as is
try:
df['population_density_numeric'] = pd.to_numeric(df['population_density'], errors='coerce')
print('Converted population_density to numeric where possible.')
except Exception as e:
print('Error converting population_density:', e)
# For any remaining non-numeric entries in population_density_numeric, we can fill them with the median
if 'population_density_numeric' in df.columns:
median_val = df['population_density_numeric'].median()
df['population_density_numeric'].fillna(median_val, inplace=True)
# Final sanity check
print('Data types after processing:')
print(df.dtypes)
numeric_df = df.select_dtypes(include=[np.number])
if numeric_df.shape[1] >= 4:
plt.figure(figsize=(10, 8))
corr = numeric_df.corr()
sns.heatmap(corr, annot=True, cmap='coolwarm', fmt='.2f')
plt.title('Correlation Heatmap')
plt.tight_layout()
plt.savefig('correlation_heatmap.png')
plt.show()
else:
print('Not enough numeric columns for a correlation heatmap.')
sns.pairplot(df[['aqi', 'pm2_5', 'pm10', 'hospital_admissions']])
plt.suptitle('Pair Plot of Selected Variables', y=1.02)
plt.savefig('pairplot.png')
plt.show()
numeric_cols = numeric_df.columns
for col in numeric_cols:
plt.figure(figsize=(6, 4))
sns.histplot(df[col].dropna(), kde=True, bins=30)
plt.title(f'Histogram of {col}')
plt.savefig(f'histogram_{col}.png')
plt.show()
plt.figure(figsize=(10, 6))
sns.countplot(data=df, x='city', order=df['city'].value_counts().index)
plt.title('Count Plot of Cities')
plt.xticks(rotation=45, ha='right')
plt.savefig('countplot_cities.png')
plt.show()
target = 'hospital_admissions'
features = ['aqi', 'pm2_5', 'pm10', 'no2', 'o3', 'temperature', 'humidity', 'hospital_capacity']
model_df = df.dropna(subset=features + [target]).copy()
X = model_df[features]
y = model_df[target]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
model = RandomForestRegressor(n_estimators=100, random_state=42)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
print(f'Mean Squared Error: {mse:2f}')
print(f'R*2 Score: {r2:.2F}')
perm_importance = permutation_importance(model, X_test, y_test, n_repeats=10, random_state=42)
feature_importance = pd.Series(perm_importance.importances_mean, index=features)
feature_importance = feature_importance.sort_values()
plt.figure(figsize=(8,6))
plt.barh(feature_importance.index, feature_importance.values, color='skyblue')
plt.xlabel('Permutation Importance')
plt.title('Feature Importance')
plt.savefig('feature_importance.png')
plt.show()