path
stringlengths
13
17
screenshot_names
listlengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
106212685/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd abnb = pd.read_csv('/kaggle/input/airbnbopendata/Airbnb_Open_Data.csv') abnb.columns abnb.dtypes abnb.memory_usage().sum() abnb.isnull() abnb.columns = [col.lower().replace(' ', '_') for col in abnb.columns] abnb.shape abnb['name'].head(5)
code
106212685/cell_16
[ "text_html_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd abnb = pd.read_csv('/kaggle/input/airbnbopendata/Airbnb_Open_Data.csv') abnb.columns abnb.dtypes abnb.memory_usage().sum() abnb.isnull() abnb.columns = [col.lower().replace(' ', '_') for col in abnb.columns] abnb.shape text = abnb.name[0] + abnb.name[1] + abnb.name[2] + abnb.name[4] + abnb.name[5] word_cloud = WordCloud(background_color='white').generate(text) plt.imshow(word_cloud, interpolation='bilinear') plt.axis('off') plt.show()
code
106212685/cell_3
[ "text_plain_output_4.png", "text_plain_output_3.png", "text_html_output_1.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from IPython.core.interactiveshell import InteractiveShell import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from wordcloud import WordCloud from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = 'all' import os, time, sys, gc for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
106212685/cell_35
[ "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd abnb = pd.read_csv('/kaggle/input/airbnbopendata/Airbnb_Open_Data.csv') abnb.columns abnb.dtypes abnb.memory_usage().sum() abnb.isnull() abnb.columns = [col.lower().replace(' ', '_') for col in abnb.columns] abnb.shape text = abnb.name[0] + abnb.name[1] + abnb.name[2] + abnb.name[4] + abnb.name[5] word_cloud = WordCloud(background_color='white').generate(text) plt.axis('off') abnb.columns abnb['service_fee'] = abnb['service_fee'].str.replace('$', '').str.replace(' ', '').str.replace(',', '').astype(float) abnb['service_fee'].head(5)
code
106212685/cell_43
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd abnb = pd.read_csv('/kaggle/input/airbnbopendata/Airbnb_Open_Data.csv') abnb.columns abnb.dtypes abnb.memory_usage().sum() abnb.isnull() abnb.columns = [col.lower().replace(' ', '_') for col in abnb.columns] abnb.shape text = abnb.name[0] + abnb.name[1] + abnb.name[2] + abnb.name[4] + abnb.name[5] word_cloud = WordCloud(background_color='white').generate(text) plt.axis('off') abnb.columns abnb.columns abnb.columns
code
106212685/cell_31
[ "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd abnb = pd.read_csv('/kaggle/input/airbnbopendata/Airbnb_Open_Data.csv') abnb.columns abnb.dtypes abnb.memory_usage().sum() abnb.isnull() abnb.columns = [col.lower().replace(' ', '_') for col in abnb.columns] abnb.shape text = abnb.name[0] + abnb.name[1] + abnb.name[2] + abnb.name[4] + abnb.name[5] word_cloud = WordCloud(background_color='white').generate(text) plt.axis('off') abnb.columns abnb['host_identity_verified'].unique() print('\n') abnb['host_identity_verified'].value_counts()
code
106212685/cell_46
[ "text_html_output_1.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd abnb = pd.read_csv('/kaggle/input/airbnbopendata/Airbnb_Open_Data.csv') abnb.columns abnb.dtypes abnb.memory_usage().sum() abnb.isnull() abnb.columns = [col.lower().replace(' ', '_') for col in abnb.columns] abnb.shape text = abnb.name[0] + abnb.name[1] + abnb.name[2] + abnb.name[4] + abnb.name[5] word_cloud = WordCloud(background_color='white').generate(text) plt.axis('off') abnb.columns abnb.columns abnb.columns 'Present memory: {} '.format(abnb.memory_usage().sum()) gc.collect() abnb.drop(columns=['lat', 'long', 'cancellation_policy', 'room_type', 'license'], inplace=True) 'Current memory usage: {} '.format(abnb.memory_usage().sum())
code
106212685/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd abnb = pd.read_csv('/kaggle/input/airbnbopendata/Airbnb_Open_Data.csv') abnb.columns abnb.dtypes abnb.memory_usage().sum() abnb.head(3)
code
106212685/cell_27
[ "text_plain_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd abnb = pd.read_csv('/kaggle/input/airbnbopendata/Airbnb_Open_Data.csv') abnb.columns abnb.dtypes abnb.memory_usage().sum() abnb.isnull() abnb.columns = [col.lower().replace(' ', '_') for col in abnb.columns] abnb.shape text = abnb.name[0] + abnb.name[1] + abnb.name[2] + abnb.name[4] + abnb.name[5] word_cloud = WordCloud(background_color='white').generate(text) plt.axis('off') abnb.columns abnb['room_type'].unique() print('\n') abnb['room_type'].value_counts()
code
106212685/cell_37
[ "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd abnb = pd.read_csv('/kaggle/input/airbnbopendata/Airbnb_Open_Data.csv') abnb.columns abnb.dtypes abnb.memory_usage().sum() abnb.isnull() abnb.columns = [col.lower().replace(' ', '_') for col in abnb.columns] abnb.shape text = abnb.name[0] + abnb.name[1] + abnb.name[2] + abnb.name[4] + abnb.name[5] word_cloud = WordCloud(background_color='white').generate(text) plt.axis('off') abnb.columns abnb['construction_year'].head(5) abnb['constructed_year'] = abnb['construction_year'].dt.year abnb['constructed_month'] = abnb['construction_year'].dt.month abnb['constructed_day'] = abnb['construction_year'].dt.day print('\n') abnb[['constructed_year', 'constructed_month', 'constructed_day']].head(5)
code
106212685/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd abnb = pd.read_csv('/kaggle/input/airbnbopendata/Airbnb_Open_Data.csv') abnb.columns abnb.dtypes abnb.memory_usage().sum() abnb.isnull() abnb.columns = [col.lower().replace(' ', '_') for col in abnb.columns] abnb.info()
code
106212685/cell_5
[ "text_plain_output_3.png", "text_plain_output_1.png" ]
import pandas as pd abnb = pd.read_csv('/kaggle/input/airbnbopendata/Airbnb_Open_Data.csv')
code
130014262/cell_21
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import OrdinalEncoder import numpy as np # linear algebra obj_cols = x_train.select_dtypes(include='object').columns obj_cols float_cols = x_train.select_dtypes(include='int64').columns float_cols from sklearn.preprocessing import LabelEncoder le = LabelEncoder() le.fit(y_train) y_train_processed = le.transform(y_train) y_test_processed = le.transform(y_test) from sklearn.preprocessing import OrdinalEncoder oe = OrdinalEncoder(categories=[x_train[i].unique() for i in obj_cols]) oe.fit(x_train[obj_cols]) x_train_cat_encoded = oe.transform(x_train[obj_cols]) x_test_cat_encoded = oe.transform(x_test[obj_cols]) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() scaler.fit(x_train[float_cols]) x_train_float_encoded = scaler.transform(x_train[float_cols]) x_test_float_encoded = scaler.transform(x_test[float_cols]) x_train_processed = np.hstack((x_train_cat_encoded, x_train_float_encoded)) x_test_processed = np.hstack((x_test_cat_encoded, x_test_float_encoded)) from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr.fit(x_train_processed, y_train_processed) y_pred = lr.predict(x_test_processed) print(accuracy_score(y_test_processed, y_pred)) print(confusion_matrix(y_test_processed, y_pred))
code
130014262/cell_13
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import LabelEncoder le = LabelEncoder() le.fit(y_train) y_train_processed = le.transform(y_train) y_test_processed = le.transform(y_test) y_train_processed
code
130014262/cell_9
[ "image_output_1.png" ]
obj_cols = x_train.select_dtypes(include='object').columns obj_cols
code
130014262/cell_25
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import OrdinalEncoder import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/autism-screening-for-toddlers/Toddler Autism dataset July 2018.csv') df.shape df.sample(5) df = df.rename(columns={'Class/ASD Traits ': 'ASD'}) x = df.drop('Case_No', axis=1) x = x.drop('Ethnicity', axis=1) x = x.drop('ASD', axis=1) y = df['ASD'] obj_cols = x_train.select_dtypes(include='object').columns obj_cols float_cols = x_train.select_dtypes(include='int64').columns float_cols from sklearn.preprocessing import LabelEncoder le = LabelEncoder() le.fit(y_train) y_train_processed = le.transform(y_train) y_test_processed = le.transform(y_test) from sklearn.preprocessing import OrdinalEncoder oe = OrdinalEncoder(categories=[x_train[i].unique() for i in obj_cols]) oe.fit(x_train[obj_cols]) x_train_cat_encoded = oe.transform(x_train[obj_cols]) x_test_cat_encoded = oe.transform(x_test[obj_cols]) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() scaler.fit(x_train[float_cols]) x_train_float_encoded = scaler.transform(x_train[float_cols]) x_test_float_encoded = scaler.transform(x_test[float_cols]) x_train_processed = np.hstack((x_train_cat_encoded, x_train_float_encoded)) x_test_processed = np.hstack((x_test_cat_encoded, x_test_float_encoded)) feature_names = np.concatenate([obj_cols, float_cols]) x_train_final = pd.DataFrame(x_train_processed, columns=feature_names) from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr.fit(x_train_processed, y_train_processed) y_pred = lr.predict(x_test_processed) def pretty_confusion_matrix(y_test, y_pred, labels=['Not_Diagnosed_with_ASD', 'ASD_Diagnosed']): cm = confusion_matrix(y_test, y_pred) pred_labels = ['Predicted ' + i for i in labels] df = pd.DataFrame(cm, columns=pred_labels, index=labels) return df results_plot = pretty_confusion_matrix(y_test_processed, y_pred, ['Not_Diagnosed_with_ASD', 'ASD_Diagnosed']) results_plot import seaborn as sns sns.heatmap(results_plot)
code
130014262/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/autism-screening-for-toddlers/Toddler Autism dataset July 2018.csv') df.shape df.sample(5)
code
130014262/cell_20
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import OrdinalEncoder import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/autism-screening-for-toddlers/Toddler Autism dataset July 2018.csv') obj_cols = x_train.select_dtypes(include='object').columns obj_cols float_cols = x_train.select_dtypes(include='int64').columns float_cols from sklearn.preprocessing import OrdinalEncoder oe = OrdinalEncoder(categories=[x_train[i].unique() for i in obj_cols]) oe.fit(x_train[obj_cols]) x_train_cat_encoded = oe.transform(x_train[obj_cols]) x_test_cat_encoded = oe.transform(x_test[obj_cols]) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() scaler.fit(x_train[float_cols]) x_train_float_encoded = scaler.transform(x_train[float_cols]) x_test_float_encoded = scaler.transform(x_test[float_cols]) x_train_processed = np.hstack((x_train_cat_encoded, x_train_float_encoded)) x_test_processed = np.hstack((x_test_cat_encoded, x_test_float_encoded)) feature_names = np.concatenate([obj_cols, float_cols]) x_train_final = pd.DataFrame(x_train_processed, columns=feature_names) x_train_final
code
130014262/cell_26
[ "text_html_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import OrdinalEncoder import numpy as np # linear algebra obj_cols = x_train.select_dtypes(include='object').columns obj_cols float_cols = x_train.select_dtypes(include='int64').columns float_cols from sklearn.preprocessing import LabelEncoder le = LabelEncoder() le.fit(y_train) y_train_processed = le.transform(y_train) y_test_processed = le.transform(y_test) from sklearn.preprocessing import OrdinalEncoder oe = OrdinalEncoder(categories=[x_train[i].unique() for i in obj_cols]) oe.fit(x_train[obj_cols]) x_train_cat_encoded = oe.transform(x_train[obj_cols]) x_test_cat_encoded = oe.transform(x_test[obj_cols]) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() scaler.fit(x_train[float_cols]) x_train_float_encoded = scaler.transform(x_train[float_cols]) x_test_float_encoded = scaler.transform(x_test[float_cols]) x_train_processed = np.hstack((x_train_cat_encoded, x_train_float_encoded)) x_test_processed = np.hstack((x_test_cat_encoded, x_test_float_encoded)) from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr.fit(x_train_processed, y_train_processed) y_pred = lr.predict(x_test_processed) lr.coef_
code
130014262/cell_11
[ "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import LabelEncoder le = LabelEncoder() le.fit(y_train)
code
130014262/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
130014262/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/autism-screening-for-toddlers/Toddler Autism dataset July 2018.csv') df.shape df.sample(5) df = df.rename(columns={'Class/ASD Traits ': 'ASD'}) x = df.drop('Case_No', axis=1) x = x.drop('Ethnicity', axis=1) x = x.drop('ASD', axis=1) y = df['ASD'] (x.shape, y.shape)
code
130014262/cell_28
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import OrdinalEncoder import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/autism-screening-for-toddlers/Toddler Autism dataset July 2018.csv') df.shape df.sample(5) df = df.rename(columns={'Class/ASD Traits ': 'ASD'}) x = df.drop('Case_No', axis=1) x = x.drop('Ethnicity', axis=1) x = x.drop('ASD', axis=1) y = df['ASD'] obj_cols = x_train.select_dtypes(include='object').columns obj_cols float_cols = x_train.select_dtypes(include='int64').columns float_cols from sklearn.preprocessing import LabelEncoder le = LabelEncoder() le.fit(y_train) y_train_processed = le.transform(y_train) y_test_processed = le.transform(y_test) from sklearn.preprocessing import OrdinalEncoder oe = OrdinalEncoder(categories=[x_train[i].unique() for i in obj_cols]) oe.fit(x_train[obj_cols]) x_train_cat_encoded = oe.transform(x_train[obj_cols]) x_test_cat_encoded = oe.transform(x_test[obj_cols]) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() scaler.fit(x_train[float_cols]) x_train_float_encoded = scaler.transform(x_train[float_cols]) x_test_float_encoded = scaler.transform(x_test[float_cols]) x_train_processed = np.hstack((x_train_cat_encoded, x_train_float_encoded)) x_test_processed = np.hstack((x_test_cat_encoded, x_test_float_encoded)) feature_names = np.concatenate([obj_cols, float_cols]) x_train_final = pd.DataFrame(x_train_processed, columns=feature_names) from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr.fit(x_train_processed, y_train_processed) y_pred = lr.predict(x_test_processed) def pretty_confusion_matrix(y_test, y_pred, labels=['Not_Diagnosed_with_ASD', 'ASD_Diagnosed']): cm = confusion_matrix(y_test, y_pred) pred_labels = ['Predicted ' + i for i in labels] df = pd.DataFrame(cm, columns=pred_labels, index=labels) return df lr.coef_ feature_dict = dict(zip(df.columns, list(lr.coef_[0]))) feature_dict feature_df = pd.DataFrame(feature_dict, index=[0]) feature_df.T.plot.bar(title='Feature Importance', legend=False)
code
130014262/cell_8
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/autism-screening-for-toddlers/Toddler Autism dataset July 2018.csv') df.shape df.sample(5) df = df.rename(columns={'Class/ASD Traits ': 'ASD'}) x = df.drop('Case_No', axis=1) x = x.drop('Ethnicity', axis=1) x = x.drop('ASD', axis=1) y = df['ASD'] (x.shape, y.shape) from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=36)
code
130014262/cell_15
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.preprocessing import OrdinalEncoder obj_cols = x_train.select_dtypes(include='object').columns obj_cols float_cols = x_train.select_dtypes(include='int64').columns float_cols from sklearn.preprocessing import OrdinalEncoder oe = OrdinalEncoder(categories=[x_train[i].unique() for i in obj_cols]) oe.fit(x_train[obj_cols]) x_train_cat_encoded = oe.transform(x_train[obj_cols]) x_test_cat_encoded = oe.transform(x_test[obj_cols]) x_train_cat_encoded
code
130014262/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/autism-screening-for-toddlers/Toddler Autism dataset July 2018.csv') df.shape
code
130014262/cell_24
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import OrdinalEncoder import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/autism-screening-for-toddlers/Toddler Autism dataset July 2018.csv') df.shape df.sample(5) df = df.rename(columns={'Class/ASD Traits ': 'ASD'}) x = df.drop('Case_No', axis=1) x = x.drop('Ethnicity', axis=1) x = x.drop('ASD', axis=1) y = df['ASD'] obj_cols = x_train.select_dtypes(include='object').columns obj_cols float_cols = x_train.select_dtypes(include='int64').columns float_cols from sklearn.preprocessing import LabelEncoder le = LabelEncoder() le.fit(y_train) y_train_processed = le.transform(y_train) y_test_processed = le.transform(y_test) from sklearn.preprocessing import OrdinalEncoder oe = OrdinalEncoder(categories=[x_train[i].unique() for i in obj_cols]) oe.fit(x_train[obj_cols]) x_train_cat_encoded = oe.transform(x_train[obj_cols]) x_test_cat_encoded = oe.transform(x_test[obj_cols]) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() scaler.fit(x_train[float_cols]) x_train_float_encoded = scaler.transform(x_train[float_cols]) x_test_float_encoded = scaler.transform(x_test[float_cols]) x_train_processed = np.hstack((x_train_cat_encoded, x_train_float_encoded)) x_test_processed = np.hstack((x_test_cat_encoded, x_test_float_encoded)) feature_names = np.concatenate([obj_cols, float_cols]) x_train_final = pd.DataFrame(x_train_processed, columns=feature_names) from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr.fit(x_train_processed, y_train_processed) y_pred = lr.predict(x_test_processed) def pretty_confusion_matrix(y_test, y_pred, labels=['Not_Diagnosed_with_ASD', 'ASD_Diagnosed']): cm = confusion_matrix(y_test, y_pred) pred_labels = ['Predicted ' + i for i in labels] df = pd.DataFrame(cm, columns=pred_labels, index=labels) return df results_plot = pretty_confusion_matrix(y_test_processed, y_pred, ['Not_Diagnosed_with_ASD', 'ASD_Diagnosed']) results_plot
code
130014262/cell_22
[ "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import LabelEncoder le = LabelEncoder() le.fit(y_train) y_train_processed = le.transform(y_train) y_test_processed = le.transform(y_test) le.inverse_transform([0, 1])
code
130014262/cell_10
[ "text_plain_output_1.png" ]
obj_cols = x_train.select_dtypes(include='object').columns obj_cols float_cols = x_train.select_dtypes(include='int64').columns float_cols
code
130014262/cell_27
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import OrdinalEncoder import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/autism-screening-for-toddlers/Toddler Autism dataset July 2018.csv') df.shape df.sample(5) df = df.rename(columns={'Class/ASD Traits ': 'ASD'}) x = df.drop('Case_No', axis=1) x = x.drop('Ethnicity', axis=1) x = x.drop('ASD', axis=1) y = df['ASD'] obj_cols = x_train.select_dtypes(include='object').columns obj_cols float_cols = x_train.select_dtypes(include='int64').columns float_cols from sklearn.preprocessing import LabelEncoder le = LabelEncoder() le.fit(y_train) y_train_processed = le.transform(y_train) y_test_processed = le.transform(y_test) from sklearn.preprocessing import OrdinalEncoder oe = OrdinalEncoder(categories=[x_train[i].unique() for i in obj_cols]) oe.fit(x_train[obj_cols]) x_train_cat_encoded = oe.transform(x_train[obj_cols]) x_test_cat_encoded = oe.transform(x_test[obj_cols]) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() scaler.fit(x_train[float_cols]) x_train_float_encoded = scaler.transform(x_train[float_cols]) x_test_float_encoded = scaler.transform(x_test[float_cols]) x_train_processed = np.hstack((x_train_cat_encoded, x_train_float_encoded)) x_test_processed = np.hstack((x_test_cat_encoded, x_test_float_encoded)) feature_names = np.concatenate([obj_cols, float_cols]) x_train_final = pd.DataFrame(x_train_processed, columns=feature_names) from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr.fit(x_train_processed, y_train_processed) y_pred = lr.predict(x_test_processed) def pretty_confusion_matrix(y_test, y_pred, labels=['Not_Diagnosed_with_ASD', 'ASD_Diagnosed']): cm = confusion_matrix(y_test, y_pred) pred_labels = ['Predicted ' + i for i in labels] df = pd.DataFrame(cm, columns=pred_labels, index=labels) return df lr.coef_ feature_dict = dict(zip(df.columns, list(lr.coef_[0]))) feature_dict
code
128005328/cell_42
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum() data_train.replace({'Sex': {'male': 0, 'female': 1}, 'Embarked': {'S': 0, 'C': 1, 'Q': 2}}, inplace=True) data_train.head()
code
128005328/cell_21
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum() sns.set() sns.countplot('Survived', data=data_train)
code
128005328/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) emb = pd.get_dummies(data_train['Embarked'], drop_first=True) emb
code
128005328/cell_25
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum() data_train[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean().sort_values(by='Survived', ascending=False)
code
128005328/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.tail()
code
128005328/cell_34
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum() data_train['Embarked'].value_counts()
code
128005328/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum() sns.set() sns.countplot('Sex', data=data_train)
code
128005328/cell_30
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum() data_train['Age'].value_counts()
code
128005328/cell_33
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum() data_train[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean().sort_values(by='Survived', ascending=False)
code
128005328/cell_44
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum() data_train.replace({'Sex': {'male': 0, 'female': 1}, 'Embarked': {'S': 0, 'C': 1, 'Q': 2}}, inplace=True) data_train.drop('Embarked', axis=1, inplace=True) data_train X = data_train.drop(columns=['PassengerId', 'Name', 'Ticket', 'Survived', 'Fare'], axis=1) X.head()
code
128005328/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.info() data_test.info()
code
128005328/cell_40
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum() data_train.replace({'Sex': {'male': 0, 'female': 1}, 'Embarked': {'S': 0, 'C': 1, 'Q': 2}}, inplace=True) data_train.head()
code
128005328/cell_29
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum() data_train[['Age', 'Survived']].groupby(['Age'], as_index=False).mean().sort_values(by='Survived', ascending=False)
code
128005328/cell_39
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) emb = pd.get_dummies(data_train['Embarked'], drop_first=True) emb data_train.isnull().sum() data_train.replace({'Sex': {'male': 0, 'female': 1}, 'Embarked': {'S': 0, 'C': 1, 'Q': 2}}, inplace=True) emb = pd.get_dummies(data_train['Embarked'], drop_first=True) emb
code
128005328/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum() data_train['Pclass'].value_counts()
code
128005328/cell_48
[ "text_html_output_1.png" ]
from sklearn.linear_model import LogisticRegression import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum() data_train.replace({'Sex': {'male': 0, 'female': 1}, 'Embarked': {'S': 0, 'C': 1, 'Q': 2}}, inplace=True) data_train.drop('Embarked', axis=1, inplace=True) data_train X = data_train.drop(columns=['PassengerId', 'Name', 'Ticket', 'Survived', 'Fare'], axis=1) LRmodel = LogisticRegression(max_iter=5000) LRmodel.fit(X_train, y_train) print(LRmodel.score(X_train, y_train) * 100) print(LRmodel.score(X_test, y_test) * 100)
code
128005328/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum() data_train['Survived'].value_counts()
code
128005328/cell_52
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum() data_train.replace({'Sex': {'male': 0, 'female': 1}, 'Embarked': {'S': 0, 'C': 1, 'Q': 2}}, inplace=True) data_train.drop('Embarked', axis=1, inplace=True) data_train X = data_train.drop(columns=['PassengerId', 'Name', 'Ticket', 'Survived', 'Fare'], axis=1) LRmodel = LogisticRegression(max_iter=5000) LRmodel.fit(X_train, y_train) X_train_predict = LRmodel.predict(X_train) X_train_predict X_train_predict = LRmodel.predict(X_train) X_train_predict
code
128005328/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum()
code
128005328/cell_45
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum() data_train.replace({'Sex': {'male': 0, 'female': 1}, 'Embarked': {'S': 0, 'C': 1, 'Q': 2}}, inplace=True) data_train.drop('Embarked', axis=1, inplace=True) data_train X = data_train.drop(columns=['PassengerId', 'Name', 'Ticket', 'Survived', 'Fare'], axis=1) y = data_train['Survived'] y.head()
code
128005328/cell_49
[ "text_html_output_1.png" ]
from sklearn.linear_model import LogisticRegression import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum() data_train.replace({'Sex': {'male': 0, 'female': 1}, 'Embarked': {'S': 0, 'C': 1, 'Q': 2}}, inplace=True) data_train.drop('Embarked', axis=1, inplace=True) data_train X = data_train.drop(columns=['PassengerId', 'Name', 'Ticket', 'Survived', 'Fare'], axis=1) LRmodel = LogisticRegression(max_iter=5000) LRmodel.fit(X_train, y_train) X_train_predict = LRmodel.predict(X_train) X_train_predict
code
128005328/cell_18
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum() data_train[['Sex', 'Survived']].groupby(['Sex'], as_index=False).mean().sort_values(by='Survived', ascending=False)
code
128005328/cell_32
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum() sns.set() sns.countplot('Age', hue='Survived', data=data_train)
code
128005328/cell_51
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum() data_train.replace({'Sex': {'male': 0, 'female': 1}, 'Embarked': {'S': 0, 'C': 1, 'Q': 2}}, inplace=True) data_train.drop('Embarked', axis=1, inplace=True) data_train X = data_train.drop(columns=['PassengerId', 'Name', 'Ticket', 'Survived', 'Fare'], axis=1) randommodel = RandomForestClassifier(n_estimators=1000) randommodel.fit(X_train, y_train) print(randommodel.score(X_train, y_train) * 100) print(randommodel.score(X_test, y_test) * 100)
code
128005328/cell_28
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum() sns.set() sns.countplot('Pclass', hue='Survived', data=data_train)
code
128005328/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() sns.heatmap(data_train.isnull())
code
128005328/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum()
code
128005328/cell_38
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum() data_train.replace({'Sex': {'male': 0, 'female': 1}, 'Embarked': {'S': 0, 'C': 1, 'Q': 2}}, inplace=True) data_train.head()
code
128005328/cell_47
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum() data_train.replace({'Sex': {'male': 0, 'female': 1}, 'Embarked': {'S': 0, 'C': 1, 'Q': 2}}, inplace=True) data_train.drop('Embarked', axis=1, inplace=True) data_train X = data_train.drop(columns=['PassengerId', 'Name', 'Ticket', 'Survived', 'Fare'], axis=1) print(X.shape, X_train.shape, X_test.shape)
code
128005328/cell_3
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.head()
code
128005328/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum() data_train.describe()
code
128005328/cell_35
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum() sns.set() sns.countplot('Embarked', data=data_train)
code
128005328/cell_43
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum() data_train.replace({'Sex': {'male': 0, 'female': 1}, 'Embarked': {'S': 0, 'C': 1, 'Q': 2}}, inplace=True) data_train.drop('Embarked', axis=1, inplace=True) data_train
code
128005328/cell_31
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum() sns.set() sns.countplot('Age', data=data_train)
code
128005328/cell_24
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum() sns.set() sns.countplot('Sex', hue='Survived', data=data_train)
code
128005328/cell_14
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) print(data_train['Embarked'].mode()[0])
code
128005328/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum() data_train['Sex'].value_counts()
code
128005328/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.head()
code
128005328/cell_27
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum() sns.set() sns.countplot('Pclass', data=data_train)
code
128005328/cell_12
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) print(data_train['Embarked'].mode())
code
128005328/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape
code
128005328/cell_36
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns data_train = pd.read_csv('D:\\projects ML\\titanic\\train.csv') data_train.shape data_train.isnull().sum() data_train = data_train.drop(columns='Cabin', axis=1) data_train.isnull().sum() sns.set() sns.countplot('Embarked', hue='Survived', data=data_train)
code
2012676/cell_21
[ "text_html_output_1.png" ]
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot from sklearn import cluster, mixture, metrics # For clustering import matplotlib.pyplot as plt # For graphics import pandas as pd # Dataframe manipulation import plotly.graph_objs as go whr_data = pd.read_csv('../input/2017.csv', header=0) whr_data_for_clus = whr_data.iloc[:, 5:] n_clusters = 2 bandwidth = 0.1 eps = 0.3 damping = 0.9 preference = -200 metric = 'euclidean' cluster_dist = {'Technique': ['K-means', 'Mean Shift', 'Mini Batch K-Means', 'Spectral', 'DBSCAN', 'Affinity Propagation', 'Birch', 'Gaussian Mixture Modeling'], 'FunctionName': ['Kmeans_Technique', 'MeanShift_Technique', 'MiniKmean_Technique', 'Spectral_Technique', 'Dbscan_Technique', 'AffProp_Technique', 'Birch_Technique', 'Gmm_Technique']} cluster_df = pd.DataFrame(cluster_dist) def Kmeans_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#k-means """ km = cluster.KMeans(n_clusters=n_clusters) return km.fit_predict(ds) def MeanShift_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#mean-shift """ ms = cluster.MeanShift(bandwidth=bandwidth) return ms.fit_predict(ds) def MiniKmean_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#mini-batch-k-means """ mkm = cluster.MiniBatchKMeans(n_clusters=n_clusters) return mkm.fit_predict(ds) def Spectral_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#spectral-clustering """ spectral = cluster.SpectralClustering(n_clusters=n_clusters) return spectral.fit_predict(ds) def Dbscan_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#dbscan """ dbscan = cluster.DBSCAN(eps=eps) return dbscan.fit_predict(ds) def AffProp_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#affinity-propagation """ ap = cluster.AffinityPropagation(damping=damping, preference=preference) return ap.fit_predict(ds) def Birch_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#birch """ birch = cluster.Birch(n_clusters=n_clusters) return birch.fit_predict(ds) def Gmm_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/generated/sklearn.mixture.GaussianMixture.html#sklearn.mixture.GaussianMixture """ gmm = mixture.GaussianMixture(n_components=n_clusters, covariance_type='full') gmm.fit(ds) return gmm.predict(ds) def GetSilhouetteCoeff(ds, result): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#silhouette-coefficient Input - - ds - The dataset for which the clustering was done - result - The labels after the clustering """ return metrics.silhouette_score(ds, result, metric=metric) def GetMethodName_Temp(method): m = str(method) m = m[1:m.index('\n')] return m.strip() for t in cluster_df.Technique: method = cluster_df[cluster_df.Technique == t].FunctionName m = GetMethodName_Temp(method) result = locals()[m](whr_data_for_clus) whr_data[t] = pd.DataFrame(result) if t != 'Affinity Propagation': cluster_df.loc[cluster_df.Technique == t, 'Silhouette.Coeff'] = GetSilhouetteCoeff(whr_data_for_clus, result) cluster_df.loc[cluster_df.Technique == 'Affinity Propagation', 'SilCoeff'] = 0 whr_data.iloc[:, [0, 12, 13, 14, 15, 16, 17, 18, 19]] cluster_df.iloc[:, [1, 2]] rows = 4 # No of rows for the plot cols = 2 # No of columns for the plot cdf = cluster_df['Technique'] # 4 X 2 plot fig,ax = plt.subplots(rows,cols, figsize=(15, 10)) x = 0 y = 0 for i in cdf: ax[x,y].scatter(whr_data.iloc[:, 6], whr_data.iloc[:, 5], c=whr_data.iloc[:, 12+(x*y)]) # Set the title for each of the plot ax[x,y].set_title(i + " Cluster Result") y = y + 1 if y == cols: x = x + 1 y = 0 plt.subplots_adjust(bottom=-0.5, top=1.5) plt.show() x = 0 y = 0 data = dict(type='choropleth', locations=whr_data['Country'], locationmode='country names', z=whr_data['Happiness.Score'], text=whr_data['Country'], colorbar={'title': 'Happiness Score'}) layout = dict(title='Global Happiness Score', geo=dict(showframe=False, projection={'type': 'Mercator'})) choromap3 = go.Figure(data=[data], layout=layout) iplot(choromap3)
code
2012676/cell_23
[ "text_html_output_1.png" ]
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot from sklearn import cluster, mixture, metrics # For clustering import matplotlib.pyplot as plt # For graphics import pandas as pd # Dataframe manipulation import plotly.graph_objs as go whr_data = pd.read_csv('../input/2017.csv', header=0) whr_data_for_clus = whr_data.iloc[:, 5:] n_clusters = 2 bandwidth = 0.1 eps = 0.3 damping = 0.9 preference = -200 metric = 'euclidean' cluster_dist = {'Technique': ['K-means', 'Mean Shift', 'Mini Batch K-Means', 'Spectral', 'DBSCAN', 'Affinity Propagation', 'Birch', 'Gaussian Mixture Modeling'], 'FunctionName': ['Kmeans_Technique', 'MeanShift_Technique', 'MiniKmean_Technique', 'Spectral_Technique', 'Dbscan_Technique', 'AffProp_Technique', 'Birch_Technique', 'Gmm_Technique']} cluster_df = pd.DataFrame(cluster_dist) def Kmeans_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#k-means """ km = cluster.KMeans(n_clusters=n_clusters) return km.fit_predict(ds) def MeanShift_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#mean-shift """ ms = cluster.MeanShift(bandwidth=bandwidth) return ms.fit_predict(ds) def MiniKmean_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#mini-batch-k-means """ mkm = cluster.MiniBatchKMeans(n_clusters=n_clusters) return mkm.fit_predict(ds) def Spectral_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#spectral-clustering """ spectral = cluster.SpectralClustering(n_clusters=n_clusters) return spectral.fit_predict(ds) def Dbscan_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#dbscan """ dbscan = cluster.DBSCAN(eps=eps) return dbscan.fit_predict(ds) def AffProp_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#affinity-propagation """ ap = cluster.AffinityPropagation(damping=damping, preference=preference) return ap.fit_predict(ds) def Birch_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#birch """ birch = cluster.Birch(n_clusters=n_clusters) return birch.fit_predict(ds) def Gmm_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/generated/sklearn.mixture.GaussianMixture.html#sklearn.mixture.GaussianMixture """ gmm = mixture.GaussianMixture(n_components=n_clusters, covariance_type='full') gmm.fit(ds) return gmm.predict(ds) def GetSilhouetteCoeff(ds, result): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#silhouette-coefficient Input - - ds - The dataset for which the clustering was done - result - The labels after the clustering """ return metrics.silhouette_score(ds, result, metric=metric) def GetMethodName_Temp(method): m = str(method) m = m[1:m.index('\n')] return m.strip() for t in cluster_df.Technique: method = cluster_df[cluster_df.Technique == t].FunctionName m = GetMethodName_Temp(method) result = locals()[m](whr_data_for_clus) whr_data[t] = pd.DataFrame(result) if t != 'Affinity Propagation': cluster_df.loc[cluster_df.Technique == t, 'Silhouette.Coeff'] = GetSilhouetteCoeff(whr_data_for_clus, result) cluster_df.loc[cluster_df.Technique == 'Affinity Propagation', 'SilCoeff'] = 0 whr_data.iloc[:, [0, 12, 13, 14, 15, 16, 17, 18, 19]] cluster_df.iloc[:, [1, 2]] rows = 4 # No of rows for the plot cols = 2 # No of columns for the plot cdf = cluster_df['Technique'] # 4 X 2 plot fig,ax = plt.subplots(rows,cols, figsize=(15, 10)) x = 0 y = 0 for i in cdf: ax[x,y].scatter(whr_data.iloc[:, 6], whr_data.iloc[:, 5], c=whr_data.iloc[:, 12+(x*y)]) # Set the title for each of the plot ax[x,y].set_title(i + " Cluster Result") y = y + 1 if y == cols: x = x + 1 y = 0 plt.subplots_adjust(bottom=-0.5, top=1.5) plt.show() x = 0 y = 0 data = dict(type='choropleth', locations=whr_data['Country'], locationmode='country names', z=whr_data['Happiness.Score'], text=whr_data['Country'], colorbar={'title': 'Happiness Score'}) layout = dict(title='Global Happiness Score', geo=dict(showframe=False, projection={'type': 'Mercator'})) choromap3 = go.Figure(data=[data], layout=layout) data = dict(type='choropleth', locations=whr_data['Country'], locationmode='country names', z=whr_data['K-means'], text=whr_data['Country'], colorbar={'title': 'Cluster Group'}) layout = dict(title='K-Means Clustering Visualization', geo=dict(showframe=False, projection={'type': 'Mercator'})) choromap3 = go.Figure(data=[data], layout=layout) iplot(choromap3)
code
2012676/cell_6
[ "image_output_1.png" ]
import pandas as pd # Dataframe manipulation whr_data = pd.read_csv('../input/2017.csv', header=0) whr_data.head()
code
2012676/cell_2
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn import cluster, mixture, metrics from sklearn.preprocessing import StandardScaler import plotly.graph_objs as go from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot init_notebook_mode(connected=True) import os import warnings warnings.filterwarnings('ignore')
code
2012676/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # Dataframe manipulation whr_data = pd.read_csv('../input/2017.csv', header=0) whr_data_for_clus = whr_data.iloc[:, 5:] whr_data_for_clus.head(3)
code
2012676/cell_18
[ "text_html_output_1.png" ]
from sklearn import cluster, mixture, metrics # For clustering import matplotlib.pyplot as plt # For graphics import pandas as pd # Dataframe manipulation whr_data = pd.read_csv('../input/2017.csv', header=0) whr_data_for_clus = whr_data.iloc[:, 5:] n_clusters = 2 bandwidth = 0.1 eps = 0.3 damping = 0.9 preference = -200 metric = 'euclidean' cluster_dist = {'Technique': ['K-means', 'Mean Shift', 'Mini Batch K-Means', 'Spectral', 'DBSCAN', 'Affinity Propagation', 'Birch', 'Gaussian Mixture Modeling'], 'FunctionName': ['Kmeans_Technique', 'MeanShift_Technique', 'MiniKmean_Technique', 'Spectral_Technique', 'Dbscan_Technique', 'AffProp_Technique', 'Birch_Technique', 'Gmm_Technique']} cluster_df = pd.DataFrame(cluster_dist) def Kmeans_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#k-means """ km = cluster.KMeans(n_clusters=n_clusters) return km.fit_predict(ds) def MeanShift_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#mean-shift """ ms = cluster.MeanShift(bandwidth=bandwidth) return ms.fit_predict(ds) def MiniKmean_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#mini-batch-k-means """ mkm = cluster.MiniBatchKMeans(n_clusters=n_clusters) return mkm.fit_predict(ds) def Spectral_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#spectral-clustering """ spectral = cluster.SpectralClustering(n_clusters=n_clusters) return spectral.fit_predict(ds) def Dbscan_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#dbscan """ dbscan = cluster.DBSCAN(eps=eps) return dbscan.fit_predict(ds) def AffProp_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#affinity-propagation """ ap = cluster.AffinityPropagation(damping=damping, preference=preference) return ap.fit_predict(ds) def Birch_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#birch """ birch = cluster.Birch(n_clusters=n_clusters) return birch.fit_predict(ds) def Gmm_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/generated/sklearn.mixture.GaussianMixture.html#sklearn.mixture.GaussianMixture """ gmm = mixture.GaussianMixture(n_components=n_clusters, covariance_type='full') gmm.fit(ds) return gmm.predict(ds) def GetSilhouetteCoeff(ds, result): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#silhouette-coefficient Input - - ds - The dataset for which the clustering was done - result - The labels after the clustering """ return metrics.silhouette_score(ds, result, metric=metric) def GetMethodName_Temp(method): m = str(method) m = m[1:m.index('\n')] return m.strip() for t in cluster_df.Technique: method = cluster_df[cluster_df.Technique == t].FunctionName m = GetMethodName_Temp(method) result = locals()[m](whr_data_for_clus) whr_data[t] = pd.DataFrame(result) if t != 'Affinity Propagation': cluster_df.loc[cluster_df.Technique == t, 'Silhouette.Coeff'] = GetSilhouetteCoeff(whr_data_for_clus, result) cluster_df.loc[cluster_df.Technique == 'Affinity Propagation', 'SilCoeff'] = 0 whr_data.iloc[:, [0, 12, 13, 14, 15, 16, 17, 18, 19]] cluster_df.iloc[:, [1, 2]] rows = 4 cols = 2 cdf = cluster_df['Technique'] fig, ax = plt.subplots(rows, cols, figsize=(15, 10)) x = 0 y = 0 for i in cdf: ax[x, y].scatter(whr_data.iloc[:, 6], whr_data.iloc[:, 5], c=whr_data.iloc[:, 12 + x * y]) ax[x, y].set_title(i + ' Cluster Result') y = y + 1 if y == cols: x = x + 1 y = 0 plt.subplots_adjust(bottom=-0.5, top=1.5) plt.show() x = 0 y = 0
code
2012676/cell_8
[ "text_html_output_1.png" ]
import pandas as pd # Dataframe manipulation whr_data = pd.read_csv('../input/2017.csv', header=0) whr_data_for_clus = whr_data.iloc[:, 5:] ss = StandardScaler() ss.fit_transform(whr_data_for_clus)
code
2012676/cell_15
[ "text_html_output_1.png" ]
from sklearn import cluster, mixture, metrics # For clustering import pandas as pd # Dataframe manipulation whr_data = pd.read_csv('../input/2017.csv', header=0) whr_data_for_clus = whr_data.iloc[:, 5:] n_clusters = 2 bandwidth = 0.1 eps = 0.3 damping = 0.9 preference = -200 metric = 'euclidean' cluster_dist = {'Technique': ['K-means', 'Mean Shift', 'Mini Batch K-Means', 'Spectral', 'DBSCAN', 'Affinity Propagation', 'Birch', 'Gaussian Mixture Modeling'], 'FunctionName': ['Kmeans_Technique', 'MeanShift_Technique', 'MiniKmean_Technique', 'Spectral_Technique', 'Dbscan_Technique', 'AffProp_Technique', 'Birch_Technique', 'Gmm_Technique']} cluster_df = pd.DataFrame(cluster_dist) def Kmeans_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#k-means """ km = cluster.KMeans(n_clusters=n_clusters) return km.fit_predict(ds) def MeanShift_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#mean-shift """ ms = cluster.MeanShift(bandwidth=bandwidth) return ms.fit_predict(ds) def MiniKmean_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#mini-batch-k-means """ mkm = cluster.MiniBatchKMeans(n_clusters=n_clusters) return mkm.fit_predict(ds) def Spectral_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#spectral-clustering """ spectral = cluster.SpectralClustering(n_clusters=n_clusters) return spectral.fit_predict(ds) def Dbscan_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#dbscan """ dbscan = cluster.DBSCAN(eps=eps) return dbscan.fit_predict(ds) def AffProp_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#affinity-propagation """ ap = cluster.AffinityPropagation(damping=damping, preference=preference) return ap.fit_predict(ds) def Birch_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#birch """ birch = cluster.Birch(n_clusters=n_clusters) return birch.fit_predict(ds) def Gmm_Technique(ds): """ Ref: http://scikit-learn.org/stable/modules/generated/sklearn.mixture.GaussianMixture.html#sklearn.mixture.GaussianMixture """ gmm = mixture.GaussianMixture(n_components=n_clusters, covariance_type='full') gmm.fit(ds) return gmm.predict(ds) def GetSilhouetteCoeff(ds, result): """ Ref: http://scikit-learn.org/stable/modules/clustering.html#silhouette-coefficient Input - - ds - The dataset for which the clustering was done - result - The labels after the clustering """ return metrics.silhouette_score(ds, result, metric=metric) def GetMethodName_Temp(method): m = str(method) m = m[1:m.index('\n')] return m.strip() for t in cluster_df.Technique: method = cluster_df[cluster_df.Technique == t].FunctionName m = GetMethodName_Temp(method) result = locals()[m](whr_data_for_clus) whr_data[t] = pd.DataFrame(result) if t != 'Affinity Propagation': cluster_df.loc[cluster_df.Technique == t, 'Silhouette.Coeff'] = GetSilhouetteCoeff(whr_data_for_clus, result) cluster_df.loc[cluster_df.Technique == 'Affinity Propagation', 'SilCoeff'] = 0 cluster_df.iloc[:, [1, 2]]
code
16120163/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn import datasets boston = datasets.load_boston() print(boston.keys()) print(boston.data.shape) print(boston.feature_names) print(boston.DESCR)
code
16120163/cell_20
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Input, Dense from keras.models import Model from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import statsmodels.api as sm boston = datasets.load_boston() bos = pd.DataFrame(boston.data, columns=boston.feature_names) target = pd.DataFrame(boston.target, columns=['MEDV']) bos['MEDV'] = target['MEDV'] correlation_matrix = bos.corr().round(2) X = bos[['RM', 'LSTAT']] Y = target['MEDV'] model = sm.OLS(Y, X).fit() predictions = model.predict(X) model.summary() X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.33, random_state=5) scalar = StandardScaler() X_train = scalar.fit_transform(X_train) X_test = scalar.transform(X_test) input_data = Input(shape=(2,)) firstlayer = Dense(2, activation='relu', name='input')(input_data) ff = Dense(2, activation='relu', name='ff')(firstlayer) secondlayer = Dense(1, activation='linear', name='prices')(ff) MLPRegModel = Model(inputs=input_data, outputs=secondlayer) MLPRegModel.compile(loss='mse', optimizer='rmsprop') MLPRegModel.fit(X_train, Y_train, epochs=250, batch_size=10) print('Now making predictions') predictions = MLPRegModel.predict(X_test) '""\n#this is for remainder purpose\nseed=3 best for adam\nin adam batch size=1 best result\nMSE: 21.7102 epoch-500 batch 2 adam\nMSE: 22.2704 epoch-250 batch 2 adam\nwithout tensorflow seeding\nMSE: 21.3172 rmsprop "\nMSE: 20.3754 rmsprop with ff layer\nMSE: 20.758 rmsprop epoch- 500 batch 1\nMSE: 20.4079 rmsprop epoch- 500 batch 2\nMSE: 20.3896 rmsprop epoch- 150 batch 2\nMSE: 20.4025 rmsprop epoch- 150 batch 1\nafter tensorflow seed\nMSE: 20.425 rmsprop epoch- 250 batch 2\nMSE: 20.3996 rmsprop epoch- 250 batch 3\nMSE: 20.3558 rmsprop epoch- 250 batch 10\nMSE: 20.4105 rmsprop epoch- 500 batch 10\n#original way to calculate mse\npred=pd.DataFrame(predictions)\npred.columns=["MEDV"]\npred["MEDV"]=pred.MEDV.astype(float)\nprint(pred.head())\nytest=pd.DataFrame(Y_test)\nprint(ytest.head())\npred.index=ytest.index\npred["diff"]=pred.loc[:,"MEDV"] - ytest.loc[:,"MEDV"]\nprint(pred.head())\nmse = (pred["diff"] ** 2).mean()\nprint(\'MSE: {}\'.format(round(mse, 4)))\n'
code
16120163/cell_6
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from sklearn import datasets import pandas as pd boston = datasets.load_boston() bos = pd.DataFrame(boston.data, columns=boston.feature_names) print(bos.head()) target = pd.DataFrame(boston.target, columns=['MEDV']) bos['MEDV'] = target['MEDV']
code
16120163/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn import datasets import numpy as np import pandas as pd import statsmodels.api as sm from sklearn.model_selection import train_test_split import seaborn as sns from keras.layers import Input, Dense from keras.models import Model from sklearn.preprocessing import StandardScaler from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error from math import sqrt import matplotlib.pyplot as plt
code
16120163/cell_18
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Input, Dense from keras.models import Model from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import statsmodels.api as sm boston = datasets.load_boston() bos = pd.DataFrame(boston.data, columns=boston.feature_names) target = pd.DataFrame(boston.target, columns=['MEDV']) bos['MEDV'] = target['MEDV'] correlation_matrix = bos.corr().round(2) X = bos[['RM', 'LSTAT']] Y = target['MEDV'] model = sm.OLS(Y, X).fit() predictions = model.predict(X) model.summary() X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.33, random_state=5) scalar = StandardScaler() X_train = scalar.fit_transform(X_train) X_test = scalar.transform(X_test) input_data = Input(shape=(2,)) firstlayer = Dense(2, activation='relu', name='input')(input_data) ff = Dense(2, activation='relu', name='ff')(firstlayer) secondlayer = Dense(1, activation='linear', name='prices')(ff) MLPRegModel = Model(inputs=input_data, outputs=secondlayer) MLPRegModel.compile(loss='mse', optimizer='rmsprop') MLPRegModel.fit(X_train, Y_train, epochs=250, batch_size=10)
code
16120163/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn import datasets import matplotlib.pyplot as plt import pandas as pd import seaborn as sns boston = datasets.load_boston() bos = pd.DataFrame(boston.data, columns=boston.feature_names) target = pd.DataFrame(boston.target, columns=['MEDV']) bos['MEDV'] = target['MEDV'] correlation_matrix = bos.corr().round(2) plt.figure(figsize=(12, 5)) sns.heatmap(data=correlation_matrix, annot=True)
code
16120163/cell_14
[ "text_plain_output_1.png" ]
from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import statsmodels.api as sm boston = datasets.load_boston() bos = pd.DataFrame(boston.data, columns=boston.feature_names) target = pd.DataFrame(boston.target, columns=['MEDV']) bos['MEDV'] = target['MEDV'] correlation_matrix = bos.corr().round(2) X = bos[['RM', 'LSTAT']] Y = target['MEDV'] model = sm.OLS(Y, X).fit() predictions = model.predict(X) model.summary() X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.33, random_state=5) scalar = StandardScaler() X_train = scalar.fit_transform(X_train) X_test = scalar.transform(X_test)
code
16120163/cell_22
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Input, Dense from keras.models import Model from math import sqrt from sklearn import datasets from sklearn.metrics import mean_squared_error from sklearn.metrics import r2_score from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import statsmodels.api as sm boston = datasets.load_boston() bos = pd.DataFrame(boston.data, columns=boston.feature_names) target = pd.DataFrame(boston.target, columns=['MEDV']) bos['MEDV'] = target['MEDV'] correlation_matrix = bos.corr().round(2) X = bos[['RM', 'LSTAT']] Y = target['MEDV'] model = sm.OLS(Y, X).fit() predictions = model.predict(X) model.summary() X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.33, random_state=5) scalar = StandardScaler() X_train = scalar.fit_transform(X_train) X_test = scalar.transform(X_test) input_data = Input(shape=(2,)) firstlayer = Dense(2, activation='relu', name='input')(input_data) ff = Dense(2, activation='relu', name='ff')(firstlayer) secondlayer = Dense(1, activation='linear', name='prices')(ff) MLPRegModel = Model(inputs=input_data, outputs=secondlayer) MLPRegModel.compile(loss='mse', optimizer='rmsprop') MLPRegModel.fit(X_train, Y_train, epochs=250, batch_size=10) predictions = MLPRegModel.predict(X_test) '""\n#this is for remainder purpose\nseed=3 best for adam\nin adam batch size=1 best result\nMSE: 21.7102 epoch-500 batch 2 adam\nMSE: 22.2704 epoch-250 batch 2 adam\n\nwithout tensorflow seeding\nMSE: 21.3172 rmsprop "\nMSE: 20.3754 rmsprop with ff layer\nMSE: 20.758 rmsprop epoch- 500 batch 1\nMSE: 20.4079 rmsprop epoch- 500 batch 2\nMSE: 20.3896 rmsprop epoch- 150 batch 2\nMSE: 20.4025 rmsprop epoch- 150 batch 1\n\nafter tensorflow seed\nMSE: 20.425 rmsprop epoch- 250 batch 2\nMSE: 20.3996 rmsprop epoch- 250 batch 3\nMSE: 20.3558 rmsprop epoch- 250 batch 10\nMSE: 20.4105 rmsprop epoch- 500 batch 10\n\n#original way to calculate mse\n\npred=pd.DataFrame(predictions)\npred.columns=["MEDV"]\npred["MEDV"]=pred.MEDV.astype(float)\nprint(pred.head())\nytest=pd.DataFrame(Y_test)\nprint(ytest.head())\npred.index=ytest.index\npred["diff"]=pred.loc[:,"MEDV"] - ytest.loc[:,"MEDV"]\nprint(pred.head())\nmse = (pred["diff"] ** 2).mean()\nprint(\'MSE: {}\'.format(round(mse, 4)))\n\n' print('R2 score: {}'.format(round(r2_score(Y_test, predictions), 4))) print('MSE: {}'.format(round(mean_squared_error(Y_test, predictions), 4))) print('RMSE: {}'.format(round(sqrt(mean_squared_error(Y_test, predictions)), 4)))
code
16120163/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn import datasets import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import statsmodels.api as sm boston = datasets.load_boston() bos = pd.DataFrame(boston.data, columns=boston.feature_names) target = pd.DataFrame(boston.target, columns=['MEDV']) bos['MEDV'] = target['MEDV'] correlation_matrix = bos.corr().round(2) X = bos[['RM', 'LSTAT']] Y = target['MEDV'] model = sm.OLS(Y, X).fit() predictions = model.predict(X) model.summary()
code
16120163/cell_12
[ "text_plain_output_1.png" ]
from sklearn import datasets from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import statsmodels.api as sm boston = datasets.load_boston() bos = pd.DataFrame(boston.data, columns=boston.feature_names) target = pd.DataFrame(boston.target, columns=['MEDV']) bos['MEDV'] = target['MEDV'] correlation_matrix = bos.corr().round(2) X = bos[['RM', 'LSTAT']] Y = target['MEDV'] model = sm.OLS(Y, X).fit() predictions = model.predict(X) model.summary() X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.33, random_state=5) print(X_train.shape) print(X_test.shape) print(Y_train.shape) print(Y_test.shape)
code
2002376/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn import svm import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/mushrooms.csv') data = np.array(data) train, test = (data[0:8000,], data[8000:,]) Xtrain, ytrain = (train[:, 0:-1], train[:, -1]) Xtest, ytest = (test[:, 0:-1], test[:, -1]) from sklearn import svm model = svm.SVC(kernel='linear', gamma=1) model.fit(Xtrain, ytrain)
code
2002376/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2002376/cell_3
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/mushrooms.csv') print(data.shape) data = np.array(data) train, test = (data[0:8000,], data[8000:,]) Xtrain, ytrain = (train[:, 0:-1], train[:, -1]) Xtest, ytest = (test[:, 0:-1], test[:, -1]) print(Xtrain.shape) print(ytrain.shape) print(Xtest.shape) print(ytest.shape)
code
128003343/cell_42
[ "text_plain_output_1.png" ]
import pandas as pd SHEET_ID = '10DM_ZqwV6CvOryD6u3pYHb5scjEfiwfkitUGTCG8l7Y' SHEET_NAME = 'AAPL' url = f'https://docs.google.com/spreadsheets/d/{SHEET_ID}/gviz/tq?tqx=out:csv&sheet={SHEET_NAME}' df = pd.read_csv(url, decimal=',') df_retention_ab = df.groupby('version').agg({'userid': 'count', 'retention_1': 'mean', 'retention_7': 'mean', 'sum_gamerounds': 'sum'}) df_retention_ab df_c = df[df['version'] == 'gate_30'] df_t = df[df['version'] == 'gate_40'] #calc of difference of retentionin between 2 groups ret1_dif = (df_c.describe(include='all').loc['mean']['retention_1'] - df_t.describe(include='all').loc['mean']['retention_1']) / df_t.describe(include='all').loc['mean']['retention_1'] ret7_dif = (df_c.describe(include='all').loc['mean']['retention_7'] - df_t.describe(include='all').loc['mean']['retention_7']) / df_t.describe(include='all').loc['mean']['retention_7'] ret1_dif, ret7_dif n1 = df_t.shape[0] n2 = df_c.shape[0] (n1, n2) n1 = df_t.shape[0] n2 = df_c.shape[0] (n1, n2)
code
128003343/cell_21
[ "text_html_output_1.png" ]
import pandas as pd import plotly.express as px # Интерактивная библиотека для графиков. SHEET_ID = '10DM_ZqwV6CvOryD6u3pYHb5scjEfiwfkitUGTCG8l7Y' SHEET_NAME = 'AAPL' url = f'https://docs.google.com/spreadsheets/d/{SHEET_ID}/gviz/tq?tqx=out:csv&sheet={SHEET_NAME}' df = pd.read_csv(url, decimal=',') df_retention_ab = df.groupby('version').agg({'userid': 'count', 'retention_1': 'mean', 'retention_7': 'mean', 'sum_gamerounds': 'sum'}) df_retention_ab fig = px.histogram(df['sum_gamerounds'][df['sum_gamerounds'] < 100], marginal='box') fig.update_layout(xaxis_title='gamerounds per user', yaxis_title='users', title='Distribution of game rounds', showlegend=False) fig.show(renderer='colab')
code
128003343/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd SHEET_ID = '10DM_ZqwV6CvOryD6u3pYHb5scjEfiwfkitUGTCG8l7Y' SHEET_NAME = 'AAPL' url = f'https://docs.google.com/spreadsheets/d/{SHEET_ID}/gviz/tq?tqx=out:csv&sheet={SHEET_NAME}' df = pd.read_csv(url, decimal=',') df.describe(include='object')
code
128003343/cell_4
[ "text_plain_output_1.png" ]
import numpy as np import matplotlib.pyplot as plt import seaborn as sns import pandas as pd from scipy.stats import ttest_1samp, mannwhitneyu, shapiro, norm, t, kstest, shapiro from statsmodels.stats.power import TTestIndPower from statsmodels.stats import proportion import plotly.express as px import math import statsmodels.stats.power as smp
code
128003343/cell_34
[ "text_plain_output_1.png" ]
if abs(z_pvalue) < 0.05: print('We may reject the null hypothesis!') else: print('We have failed to reject the null hypothesis')
code
128003343/cell_33
[ "text_html_output_1.png" ]
from statsmodels.stats import proportion import numpy as np import pandas as pd SHEET_ID = '10DM_ZqwV6CvOryD6u3pYHb5scjEfiwfkitUGTCG8l7Y' SHEET_NAME = 'AAPL' url = f'https://docs.google.com/spreadsheets/d/{SHEET_ID}/gviz/tq?tqx=out:csv&sheet={SHEET_NAME}' df = pd.read_csv(url, decimal=',') df_retention_ab = df.groupby('version').agg({'userid': 'count', 'retention_1': 'mean', 'retention_7': 'mean', 'sum_gamerounds': 'sum'}) df_retention_ab df_c = df[df['version'] == 'gate_30'] df_t = df[df['version'] == 'gate_40'] #calc of difference of retentionin between 2 groups ret1_dif = (df_c.describe(include='all').loc['mean']['retention_1'] - df_t.describe(include='all').loc['mean']['retention_1']) / df_t.describe(include='all').loc['mean']['retention_1'] ret7_dif = (df_c.describe(include='all').loc['mean']['retention_7'] - df_t.describe(include='all').loc['mean']['retention_7']) / df_t.describe(include='all').loc['mean']['retention_7'] ret1_dif, ret7_dif k1 = df_t['retention_1'].sum() k2 = df_c['retention_1'].sum() (k1, k2) n1 = df_t.shape[0] n2 = df_c.shape[0] (n1, n2) z_score, z_pvalue = proportion.proportions_ztest(np.array([k1, k2]), np.array([n1, n2])) print('Results are ', 'z_score =%.3f, pvalue = %.3f' % (z_score, z_pvalue))
code
128003343/cell_44
[ "text_plain_output_1.png" ]
if abs(z_pvalue) < 0.05: print('We may reject the null hypothesis!') else: print('We have failed to reject the null hypothesis')
code