path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
74048227/cell_15
[ "text_plain_output_1.png" ]
from sklearn.impute import SimpleImputer import matplotlib.pyplot as plt import numpy as np import pandas as pd import time train = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col='id') test = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv', index_col='id') train_df = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col='id') training_missing_val_count_by_column = train.isnull().values.sum() test_missing_val_count_by_column = test.isnull().values.sum() pd.set_option('display.max_rows', None) train.describe().T train2 = train.dropna(axis='rows') train3 = train.dropna(axis='columns') training_missing_val_count_by_column = train.isnull().sum() imputer = SimpleImputer(strategy='mean') train_imputed = pd.DataFrame(imputer.fit_transform(train)) train_imputed.columns = train.columns train = train_imputed print('Missing values imputed')
code
74048227/cell_3
[ "text_plain_output_1.png" ]
import os import time import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') from sklearn import ensemble, linear_model, metrics, model_selection, neighbors, preprocessing, svm, tree from sklearn.impute import SimpleImputer from sklearn.model_selection import cross_validate, train_test_split, KFold, GridSearchCV from sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler from catboost import CatBoostClassifier from xgboost import XGBClassifier print('Libraries imported', time.time())
code
74048227/cell_22
[ "text_plain_output_1.png" ]
from sklearn import ensemble, linear_model,metrics,model_selection,neighbors,preprocessing, svm, tree from sklearn.impute import SimpleImputer import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import time train = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col='id') test = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv', index_col='id') train_df = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col='id') training_missing_val_count_by_column = train.isnull().values.sum() test_missing_val_count_by_column = test.isnull().values.sum() pd.set_option('display.max_rows', None) train.describe().T train2 = train.dropna(axis='rows') train3 = train.dropna(axis='columns') training_missing_val_count_by_column = train.isnull().sum() imputer = SimpleImputer(strategy='mean') train_imputed = pd.DataFrame(imputer.fit_transform(train)) train_imputed.columns = train.columns train = train_imputed corr = train.corr() mask = np.triu(np.ones_like(corr, dtype=bool)) corr_matrix = train.corr().abs() high_corr = np.where(corr_matrix > 0.02) high_corr = [(corr_matrix.columns[x], corr_matrix.columns[y]) for x, y in zip(*high_corr) if x != y and x < y] featuresofinterest = ['f6', 'f15', 'f32', 'f34', 'f36', 'f45', 'f46', 'f51', 'f57', 'f86', 'f90', 'f97', 'f111'] train_normalized = preprocessing.normalize(train_imputed, norm='l2') train_normalized = pd.DataFrame(train_normalized) train_normalized.columns = train.columns train = train_normalized print('Data normalised')
code
74048227/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd import time train = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col='id') test = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv', index_col='id') train_df = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col='id') training_missing_val_count_by_column = train.isnull().values.sum() test_missing_val_count_by_column = test.isnull().values.sum() pd.set_option('display.max_rows', None) train.describe().T
code
74048227/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd import time train = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col='id') test = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv', index_col='id') train_df = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col='id') training_missing_val_count_by_column = train.isnull().values.sum() test_missing_val_count_by_column = test.isnull().values.sum() pd.set_option('display.max_rows', None) train.describe().T train2 = train.dropna(axis='rows') print('rows ; ', train.shape[0], '\nrows with missing data : ', train.shape[0] - train2.shape[0]) train3 = train.dropna(axis='columns') print('\ncolumns ; ', train.shape[1], '\ncolumns with missing data : ', train.shape[1] - train3.shape[1]) print('\n', time.time())
code
74048227/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import time train = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col='id') test = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv', index_col='id') train_df = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col='id') print('Data Import Complete', time.time())
code
130004107/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/titanicdataset-traincsv/train.csv') data data.count() data.plot(kind='scatter', x='Survived', y='Age', title='Scatter Plot of Survivors Separated by Age')
code
130004107/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/titanicdataset-traincsv/train.csv') data data.count() data['Pclass'].value_counts()
code
130004107/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/titanicdataset-traincsv/train.csv') data data.count()
code
130004107/cell_34
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/titanicdataset-traincsv/train.csv') data data.count() a = data.filter(['AgeBin', 'Survived']) b = a.pivot_table(index='AgeBin', columns=['Survived'], aggfunc=len) b data[(data['Sex'] == 'male') & (data['Survived'] == 1)]['Pclass'].value_counts().sort_index().plot(kind='bar', xlabel='Male Survivor', title='Male Survivors Separated by Cabin Class')
code
130004107/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/titanicdataset-traincsv/train.csv') data data.count() data[data['Survived'] == 1]['Age'].value_counts().sort_index().plot(kind='bar', title='Survivors by Age')
code
130004107/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/titanicdataset-traincsv/train.csv') data data.count() data[data['Survived'] == 1]['Pclass'].value_counts().sort_index().plot(kind='bar', title='Survivors Separated by Cabin Class')
code
130004107/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/titanicdataset-traincsv/train.csv') data data.count() (data['Age'].min(), data['Age'].max())
code
130004107/cell_29
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/titanicdataset-traincsv/train.csv') data data.count() a = data.filter(['AgeBin', 'Survived']) b = a.pivot_table(index='AgeBin', columns=['Survived'], aggfunc=len) b b.plot(kind='bar', stacked=True, xlabel='Age Group', ylabel='Count', title='Survivors by Age Group')
code
130004107/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/titanicdataset-traincsv/train.csv') data data.count() data['AgeBin'].value_counts().sort_index().plot(kind='bar', title='Passenger by Age Groups')
code
130004107/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/titanicdataset-traincsv/train.csv') data
code
130004107/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/titanicdataset-traincsv/train.csv') data data.count() data['Sex'].value_counts()
code
130004107/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/titanicdataset-traincsv/train.csv') data data.count() alpha_color = 0.5 data['Pclass'].value_counts().sort_index().plot(kind='bar', alpha=alpha_color, title='Passengers Separated by Cabin Class')
code
130004107/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/titanicdataset-traincsv/train.csv') data data.count() alpha_color = 0.5 data['Sex'].value_counts().sort_index().plot(kind='bar', color=['b', 'r'], alpha=alpha_color, title='Passengers Separated by Sex')
code
130004107/cell_32
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/titanicdataset-traincsv/train.csv') data data.count() a = data.filter(['AgeBin', 'Survived']) b = a.pivot_table(index='AgeBin', columns=['Survived'], aggfunc=len) b data[data['Sex'] == 'female']['Survived'].value_counts().plot(kind='bar', xlabel='Female Survivor', title='Number of Female Survivors and Death')
code
130004107/cell_28
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/titanicdataset-traincsv/train.csv') data data.count() data[data['Survived'] == 0]['AgeBin'].value_counts().sort_index().plot(kind='bar', title='Death by Age Groups')
code
130004107/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/titanicdataset-traincsv/train.csv') data data.count() data['Survived'].value_counts()
code
130004107/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/titanicdataset-traincsv/train.csv') data data.count() alive = data[data['Survived'].eq(1)]['Survived'].value_counts() round(data[data['Survived'].eq(1)]['Sex'].value_counts().astype(int) * 100 / alive[1], 2)
code
130004107/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/titanicdataset-traincsv/train.csv') data len(data)
code
130004107/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/titanicdataset-traincsv/train.csv') data data.count() alpha_color = 0.5 data['Survived'].value_counts().plot(kind='bar', title='Number of Survivors')
code
130004107/cell_35
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/titanicdataset-traincsv/train.csv') data data.count() a = data.filter(['AgeBin', 'Survived']) b = a.pivot_table(index='AgeBin', columns=['Survived'], aggfunc=len) b data[(data['Sex'] == 'female') & (data['Survived'] == 1)]['Pclass'].value_counts().sort_index().plot(kind='bar', xlabel='Female Survivor', title='Female Survivors Separated by Cabin Class')
code
130004107/cell_31
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/titanicdataset-traincsv/train.csv') data data.count() a = data.filter(['AgeBin', 'Survived']) b = a.pivot_table(index='AgeBin', columns=['Survived'], aggfunc=len) b data[data['Sex'] == 'male']['Survived'].value_counts().plot(kind='bar', xlabel='Male Survivor', title='Number of Male Survivors and Death')
code
130004107/cell_10
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/titanicdataset-traincsv/train.csv') data data.count() data['Survived'].value_counts() * 100 / len(data)
code
130004107/cell_27
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/titanicdataset-traincsv/train.csv') data data.count() data[data['Survived'] == 1]['AgeBin'].value_counts().sort_index().plot(kind='bar', title='Survivors by Age Groups')
code
72120651/cell_13
[ "text_html_output_1.png" ]
import pandas as pd countries_dataset = pd.read_csv('../input/countries-of-the-world/countries of the world.csv', decimal=',') countries_dataset.head()
code
72120651/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd countries_dataset = pd.read_csv('../input/countries-of-the-world/countries of the world.csv', decimal=',') print('Shape:', countries_dataset.shape, '\n') print('Missing values:') print(countries_dataset.isnull().sum(), '\n') print('Data types:') print(countries_dataset.dtypes, '\n')
code
105201140/cell_2
[ "text_plain_output_1.png" ]
# build dependency wheels !pip wheel --verbose --no-binary cython-bbox==0.1.3 cython-bbox -w /kaggle/working/ !pip wheel --verbose --no-binary lap==0.4.0 lap -w /kaggle/working/ !pip wheel --verbose --no-binary loguru-0.6.0 loguru -w /kaggle/working/ !pip wheel --verbose --no-binary thop-0.1.1.post2209072238 thop -w /kaggle/working/ # build yolox wheel !git clone https://github.com/ifzhang/ByteTrack.git !cd ByteTrack && python3 setup.py bdist_wheel && cp -r ./dist/* /kaggle/working/ # clean up !rm -rf /kaggle/working/ByteTrack !rm torch-1.12.1-cp37-cp37m-manylinux1_x86_64.whl !rm typing_extensions-4.3.0-py3-none-any.whl
code
328596/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn import datasets, svm, cross_validation, tree, preprocessing, metrics import numpy as np import pandas as pd titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) titanic_df.groupby('Pclass').mean() class_sex_grouping = titanic_df.groupby(['Pclass', 'Sex']).mean() group_by_age = pd.cut(titanic_df['Age'], np.arange(0, 90, 10)) age_grouping = titanic_df.groupby(group_by_age).mean() titanic_df.count() titanic_df = titanic_df.drop(['Cabin'], axis=1) titanic_df = titanic_df.dropna() titanic_df.count() def preprocess_titanic_df(df): processed_df = df.copy() le = preprocessing.LabelEncoder() processed_df.Sex = le.fit_transform(processed_df.Sex) processed_df.Embarked = le.fit_transform(processed_df.Embarked) processed_df = processed_df.drop(['Name', 'Ticket'], axis=1) return processed_df processed_df = preprocess_titanic_df(titanic_df) processed_df.count() processed_df X = processed_df.drop(['Survived'], axis=1).values Y = processed_df['Survived'].values print(X)
code
328596/cell_9
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) titanic_df.groupby('Pclass').mean() class_sex_grouping = titanic_df.groupby(['Pclass', 'Sex']).mean() group_by_age = pd.cut(titanic_df['Age'], np.arange(0, 90, 10)) age_grouping = titanic_df.groupby(group_by_age).mean() age_grouping['Survived'].plot.bar()
code
328596/cell_25
[ "text_plain_output_1.png" ]
from sklearn import datasets, svm, cross_validation, tree, preprocessing, metrics import numpy as np import pandas as pd titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) titanic_df.groupby('Pclass').mean() class_sex_grouping = titanic_df.groupby(['Pclass', 'Sex']).mean() group_by_age = pd.cut(titanic_df['Age'], np.arange(0, 90, 10)) age_grouping = titanic_df.groupby(group_by_age).mean() titanic_df.count() test_df.count() titanic_df = titanic_df.drop(['Cabin'], axis=1) test_df = test_df.drop(['Cabin'], axis=1) titanic_df = titanic_df.dropna() test_df = test_df.dropna() titanic_df.count() test_df.count() def preprocess_titanic_df(df): processed_df = df.copy() le = preprocessing.LabelEncoder() processed_df.Sex = le.fit_transform(processed_df.Sex) processed_df.Embarked = le.fit_transform(processed_df.Embarked) processed_df = processed_df.drop(['Name', 'Ticket'], axis=1) return processed_df processed_df = preprocess_titanic_df(titanic_df) processed_df.count() processed_df processed_test_df = preprocess_titanic_df(test_df) processed_test_df.count() processed_test_df X = processed_df.drop(['Survived'], axis=1).values Y = processed_df['Survived'].values X_test = processed_test_df.values clf_dt = tree.DecisionTreeClassifier(max_depth=10) clf_dt.fit(X, Y) Y_test = clf_dt.predict(X_test) clf_dt.score(X_test, Y_test)
code
328596/cell_4
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) test_df.head()
code
328596/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn import datasets, svm, cross_validation, tree, preprocessing, metrics import numpy as np import pandas as pd titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) test_df.count() test_df = test_df.drop(['Cabin'], axis=1) test_df = test_df.dropna() test_df.count() def preprocess_titanic_df(df): processed_df = df.copy() le = preprocessing.LabelEncoder() processed_df.Sex = le.fit_transform(processed_df.Sex) processed_df.Embarked = le.fit_transform(processed_df.Embarked) processed_df = processed_df.drop(['Name', 'Ticket'], axis=1) return processed_df processed_test_df = preprocess_titanic_df(test_df) processed_test_df.count() processed_test_df
code
328596/cell_6
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) titanic_df.groupby('Pclass').mean()
code
328596/cell_11
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) test_df.count()
code
328596/cell_19
[ "text_plain_output_1.png" ]
from sklearn import datasets, svm, cross_validation, tree, preprocessing, metrics import numpy as np import pandas as pd titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) titanic_df.groupby('Pclass').mean() class_sex_grouping = titanic_df.groupby(['Pclass', 'Sex']).mean() group_by_age = pd.cut(titanic_df['Age'], np.arange(0, 90, 10)) age_grouping = titanic_df.groupby(group_by_age).mean() titanic_df.count() titanic_df = titanic_df.drop(['Cabin'], axis=1) titanic_df = titanic_df.dropna() titanic_df.count() def preprocess_titanic_df(df): processed_df = df.copy() le = preprocessing.LabelEncoder() processed_df.Sex = le.fit_transform(processed_df.Sex) processed_df.Embarked = le.fit_transform(processed_df.Embarked) processed_df = processed_df.drop(['Name', 'Ticket'], axis=1) return processed_df processed_df = preprocess_titanic_df(titanic_df) processed_df.count() processed_df
code
328596/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import random import numpy as np import pandas as pd from sklearn import datasets, svm, cross_validation, tree, preprocessing, metrics import sklearn.ensemble as ske import tensorflow as tf from tensorflow.contrib import skflow
code
328596/cell_7
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) titanic_df.groupby('Pclass').mean() class_sex_grouping = titanic_df.groupby(['Pclass', 'Sex']).mean() print(class_sex_grouping['Survived'])
code
328596/cell_8
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) titanic_df.groupby('Pclass').mean() class_sex_grouping = titanic_df.groupby(['Pclass', 'Sex']).mean() class_sex_grouping['Survived'].plot.bar()
code
328596/cell_16
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) titanic_df.groupby('Pclass').mean() class_sex_grouping = titanic_df.groupby(['Pclass', 'Sex']).mean() group_by_age = pd.cut(titanic_df['Age'], np.arange(0, 90, 10)) age_grouping = titanic_df.groupby(group_by_age).mean() titanic_df.count() titanic_df = titanic_df.drop(['Cabin'], axis=1) titanic_df = titanic_df.dropna() titanic_df.count()
code
328596/cell_3
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) titanic_df.head()
code
328596/cell_17
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) test_df.count() test_df = test_df.drop(['Cabin'], axis=1) test_df = test_df.dropna() test_df.count()
code
328596/cell_10
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) titanic_df.groupby('Pclass').mean() class_sex_grouping = titanic_df.groupby(['Pclass', 'Sex']).mean() group_by_age = pd.cut(titanic_df['Age'], np.arange(0, 90, 10)) age_grouping = titanic_df.groupby(group_by_age).mean() titanic_df.count()
code
328596/cell_5
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) titanic_df['Survived'].mean()
code
122252822/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import os os.chdir('C:\\\\Users\\\\melanie.vercaempt\\\\Documents\\\\Code\\\\train-keyrus-academy-python\\\\data-viz')
code
122252822/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
from datetime import date import os import geopandas as gpd import folium import mapclassify import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable import numpy as np import pandas as pd import plotly.express as px import re import seaborn as sns from shapely.geometry import Point, Polygon from shapely.geometry import MultiPolygon
code
17122803/cell_13
[ "text_plain_output_1.png" ]
from nltk.tokenize import word_tokenize, sent_tokenize text = 'Mary had a little lamb. Her fleece was white as snow' sents = sent_tokenize(text) print(sents)
code
17122803/cell_9
[ "text_plain_output_1.png" ]
text1.concordance('monstrous') text1.dispersion_plot(['happy', 'sad'])
code
17122803/cell_25
[ "image_output_1.png" ]
from nltk.tokenize import word_tokenize, sent_tokenize import nltk text2.concordance('monstrous') text2.similar('monstrous') text2.common_contexts(['monstrous', 'very']) text2 = 'Mary closed on closing night when she was in the mood to close.' nltk.pos_tag(word_tokenize(text2))
code
17122803/cell_4
[ "image_output_1.png" ]
text2.concordance('monstrous')
code
17122803/cell_6
[ "text_plain_output_1.png" ]
text2.concordance('monstrous') text2.similar('monstrous') text2.common_contexts(['monstrous', 'very'])
code
17122803/cell_2
[ "text_plain_output_1.png" ]
from nltk.book import *
code
17122803/cell_19
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from nltk.tokenize import word_tokenize, sent_tokenize from string import punctuation text = 'Mary had a little lamb. Her fleece was white as snow' customStopWords = set(stopwords.words('english') + list(punctuation)) wordsWOStopwords = [word for word in word_tokenize(text) if word not in customStopWords] print(wordsWOStopwords)
code
17122803/cell_1
[ "text_plain_output_1.png" ]
import os import nltk import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
17122803/cell_7
[ "text_plain_output_1.png" ]
text4.dispersion_plot(['citizens', 'democracy', 'freedom', 'duties', 'America'])
code
17122803/cell_8
[ "text_plain_output_1.png" ]
text2.concordance('monstrous') text2.similar('monstrous') text2.common_contexts(['monstrous', 'very']) text2.dispersion_plot(['happy', 'sad'])
code
17122803/cell_3
[ "image_output_1.png" ]
text1.concordance('monstrous')
code
17122803/cell_24
[ "text_plain_output_1.png" ]
from nltk.stem.lancaster import LancasterStemmer from nltk.tokenize import word_tokenize, sent_tokenize text2.concordance('monstrous') text2.similar('monstrous') text2.common_contexts(['monstrous', 'very']) text2 = 'Mary closed on closing night when she was in the mood to close.' st = LancasterStemmer() stemmedWords = [st.stem(word) for word in word_tokenize(text2)] print(stemmedWords)
code
17122803/cell_14
[ "text_plain_output_1.png" ]
from nltk.tokenize import word_tokenize, sent_tokenize text = 'Mary had a little lamb. Her fleece was white as snow' sents = sent_tokenize(text) words = [word_tokenize(sent) for sent in sents] print(words)
code
17122803/cell_5
[ "text_plain_output_1.png" ]
text2.concordance('monstrous') text2.similar('monstrous')
code
90143099/cell_4
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd earning = pd.read_csv('/kaggle/input/cusersmarildownloadsearningcsv/earning.csv', delimiter=';') data = earning[['year', 'femalesmanagers', 'malemanagers', 'personmanagers', 'femalemachineryoperatorsanddrivers', 'malemachineryoperatorsanddrivers', 'personmachineryoperatorsanddrivers', 'femalesalesworkers', 'malesalesworkers', 'personsalesworkers']] data.columns = ['year', 'manager_f', 'manager_m', 'manager_tot', 'operator_f', 'operator_m', 'operator_tot', 'sales_f', 'sales_m', 'sales_tot'] data plt.plot(data['year'], data['manager_m'] - data['manager_f'], label='ManagerInnen') plt.plot(data['year'], data['operator_m'] - data['operator_f'], label='OperatorInnen') plt.plot(data['year'], data['sales_m'] - data['sales_f'], label='Sales') plt.plot(data['year'], np.zeros(len(data['year'])), color='red', linestyle='--') plt.title('Lohndifferenz Entwicklung') plt.ylabel('Differenz in US $') plt.xlabel('Jahre') plt.legend() plt.xlim(2004, 2017) plt.show()
code
90143099/cell_6
[ "text_html_output_1.png" ]
import pandas as pd earning = pd.read_csv('/kaggle/input/cusersmarildownloadsearningcsv/earning.csv', delimiter=';') data = earning[['year', 'femalesmanagers', 'malemanagers', 'personmanagers', 'femalemachineryoperatorsanddrivers', 'malemachineryoperatorsanddrivers', 'personmachineryoperatorsanddrivers', 'femalesalesworkers', 'malesalesworkers', 'personsalesworkers']] data.columns = ['year', 'manager_f', 'manager_m', 'manager_tot', 'operator_f', 'operator_m', 'operator_tot', 'sales_f', 'sales_m', 'sales_tot'] data new_data = pd.DataFrame({'year': data['year'], 'manager': data['manager_m'] - data['manager_f'], 'operator': data['operator_m'] - data['operator_f'], 'sales': data['sales_m'] - data['sales_f']}) new_data.index = new_data['year'] new_data = new_data.drop('year', axis=1) new_data
code
90143099/cell_2
[ "text_html_output_1.png" ]
import pandas as pd earning = pd.read_csv('/kaggle/input/cusersmarildownloadsearningcsv/earning.csv', delimiter=';') data = earning[['year', 'femalesmanagers', 'malemanagers', 'personmanagers', 'femalemachineryoperatorsanddrivers', 'malemachineryoperatorsanddrivers', 'personmachineryoperatorsanddrivers', 'femalesalesworkers', 'malesalesworkers', 'personsalesworkers']] data.columns = ['year', 'manager_f', 'manager_m', 'manager_tot', 'operator_f', 'operator_m', 'operator_tot', 'sales_f', 'sales_m', 'sales_tot'] data
code
90143099/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90143099/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd earning = pd.read_csv('/kaggle/input/cusersmarildownloadsearningcsv/earning.csv', delimiter=';') data = earning[['year', 'femalesmanagers', 'malemanagers', 'personmanagers', 'femalemachineryoperatorsanddrivers', 'malemachineryoperatorsanddrivers', 'personmachineryoperatorsanddrivers', 'femalesalesworkers', 'malesalesworkers', 'personsalesworkers']] data.columns = ['year', 'manager_f', 'manager_m', 'manager_tot', 'operator_f', 'operator_m', 'operator_tot', 'sales_f', 'sales_m', 'sales_tot'] data new_data = pd.DataFrame({'year': data['year'], 'manager': data['manager_m'] - data['manager_f'], 'operator': data['operator_m'] - data['operator_f'], 'sales': data['sales_m'] - data['sales_f']}) new_data.index = new_data['year'] new_data = new_data.drop('year', axis=1) new_data for beruf in new_data.columns: print(beruf)
code
90143099/cell_8
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd earning = pd.read_csv('/kaggle/input/cusersmarildownloadsearningcsv/earning.csv', delimiter=';') data = earning[['year', 'femalesmanagers', 'malemanagers', 'personmanagers', 'femalemachineryoperatorsanddrivers', 'malemachineryoperatorsanddrivers', 'personmachineryoperatorsanddrivers', 'femalesalesworkers', 'malesalesworkers', 'personsalesworkers']] data.columns = ['year', 'manager_f', 'manager_m', 'manager_tot', 'operator_f', 'operator_m', 'operator_tot', 'sales_f', 'sales_m', 'sales_tot'] data plt.xlim(2004, 2017) new_data = pd.DataFrame({'year': data['year'], 'manager': data['manager_m'] - data['manager_f'], 'operator': data['operator_m'] - data['operator_f'], 'sales': data['sales_m'] - data['sales_f']}) new_data.index = new_data['year'] new_data = new_data.drop('year', axis=1) new_data for beruf in new_data.columns: plt.bar(beruf, new_data.loc[2012, beruf])
code
1003686/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/train.csv') df_train.head()
code
1003686/cell_7
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/train.csv') df_train['SalePrice'].describe()
code
1003686/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df_train = pd.read_csv('../input/train.csv') sns.distplot(df_train['SalePrice'])
code
1003686/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/train.csv') print('How skewed is the data?, Skewness: {}'.format(df_train['SalePrice'].skew())) print('How sharp is the peak the data?, Kurtosis: {}'.format(df_train['SalePrice'].kurt()))
code
1003686/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('../input/train.csv') print(df_train.columns)
code
32071698/cell_21
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import os plt.style.use('fivethirtyeight') full_table = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv', parse_dates=['ObservationDate']) italy = pd.DataFrame(full_table[full_table['Country/Region'] == 'Italy']) france = pd.DataFrame(full_table[full_table['Country/Region'] == 'France']) germany = pd.DataFrame(full_table[full_table['Country/Region'] == 'Germany']) uk = pd.DataFrame(full_table[full_table['Country/Region'] == 'UK']) spain = pd.DataFrame(full_table[full_table['Country/Region'] == 'Spain']) turkey = pd.DataFrame(full_table[full_table['Country/Region'] == 'Turkey']) fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") italy[['ObservationDate','Confirmed','Deaths','Recovered']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus in Italy") fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") germany[['ObservationDate','Confirmed','Deaths','Recovered']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus in Germany") fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") spain[['ObservationDate','Confirmed','Deaths','Recovered']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus in Spain") fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") italy[['ObservationDate','Confirmed','Deaths']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus cases, deaths in Italy, Germany and Spain") germany[['ObservationDate','Confirmed','Deaths']].plot(x='ObservationDate',kind='line',ax=ax) spain[['ObservationDate','Confirmed','Deaths']].plot(x='ObservationDate',kind='line',ax=ax) ax.legend(['Confirmed Cases in Italy','Confirmed Deaths in Italy', 'Confirmed Cases in Germany','Confirmed Deaths in Germany', 'Confirmed Cases in Spain','Confirmed Deaths in Spain']) fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") turkey[['ObservationDate','Confirmed','Deaths','Recovered']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus in Turkey") uk_agg=pd.pivot_table(uk, index=['ObservationDate'],values=['Confirmed','Deaths','Recovered'],aggfunc=np.sum) #uk_agg fig, ax = plt.subplots(figsize=(15,7)) plt.plot(uk_agg.index,uk_agg.values) plt.legend(['Confirmed','Deaths','Recovered']) plt.title("Tracking Corona Virus in United Kingdom") plt.ylabel('Count of Confirmed Positive Cases, Deaths, and Recoveries') plt.xticks(rotation=90) fig, ax = plt.subplots(figsize=(15, 7)) ax.set_xlabel('Observation Date') ax.set_ylabel('Death Rate(%)') italy[['ObservationDate', 'Death Rate in Italy']].plot(x='ObservationDate', kind='line', ax=ax) germany[['ObservationDate', 'Death Rate in Germany']].plot(x='ObservationDate', kind='line', ax=ax) spain[['ObservationDate', 'Death Rate in Spain']].plot(x='ObservationDate', kind='line', ax=ax, title='Comparing Corona Virus in Italy, Germany and Spain') turkey[['ObservationDate', 'Death Rate in Turkey']].plot(x='ObservationDate', kind='line', ax=ax, title='Comparing Corona Virus in Italy, Germany, Turkey and Spain')
code
32071698/cell_13
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import os plt.style.use('fivethirtyeight') full_table = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv', parse_dates=['ObservationDate']) italy = pd.DataFrame(full_table[full_table['Country/Region'] == 'Italy']) france = pd.DataFrame(full_table[full_table['Country/Region'] == 'France']) germany = pd.DataFrame(full_table[full_table['Country/Region'] == 'Germany']) uk = pd.DataFrame(full_table[full_table['Country/Region'] == 'UK']) spain = pd.DataFrame(full_table[full_table['Country/Region'] == 'Spain']) turkey = pd.DataFrame(full_table[full_table['Country/Region'] == 'Turkey']) fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") italy[['ObservationDate','Confirmed','Deaths','Recovered']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus in Italy") fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") germany[['ObservationDate','Confirmed','Deaths','Recovered']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus in Germany") fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") spain[['ObservationDate','Confirmed','Deaths','Recovered']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus in Spain") fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") italy[['ObservationDate','Confirmed','Deaths']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus cases, deaths in Italy, Germany and Spain") germany[['ObservationDate','Confirmed','Deaths']].plot(x='ObservationDate',kind='line',ax=ax) spain[['ObservationDate','Confirmed','Deaths']].plot(x='ObservationDate',kind='line',ax=ax) ax.legend(['Confirmed Cases in Italy','Confirmed Deaths in Italy', 'Confirmed Cases in Germany','Confirmed Deaths in Germany', 'Confirmed Cases in Spain','Confirmed Deaths in Spain']) fig, ax = plt.subplots(figsize=(15, 7)) ax.set_xlabel('Observation Date') ax.set_ylabel('Count of Confirmed Positive Cases, Deaths, and Recoveries') turkey[['ObservationDate', 'Confirmed', 'Deaths', 'Recovered']].plot(x='ObservationDate', kind='line', ax=ax, title='Tracking Corona Virus in Turkey')
code
32071698/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) full_table = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv', parse_dates=['ObservationDate']) italy = pd.DataFrame(full_table[full_table['Country/Region'] == 'Italy']) france = pd.DataFrame(full_table[full_table['Country/Region'] == 'France']) germany = pd.DataFrame(full_table[full_table['Country/Region'] == 'Germany']) uk = pd.DataFrame(full_table[full_table['Country/Region'] == 'UK']) spain = pd.DataFrame(full_table[full_table['Country/Region'] == 'Spain']) turkey = pd.DataFrame(full_table[full_table['Country/Region'] == 'Turkey']) france.sample(10)
code
32071698/cell_2
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) plt.style.use('fivethirtyeight')
code
32071698/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import os plt.style.use('fivethirtyeight') full_table = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv', parse_dates=['ObservationDate']) italy = pd.DataFrame(full_table[full_table['Country/Region'] == 'Italy']) france = pd.DataFrame(full_table[full_table['Country/Region'] == 'France']) germany = pd.DataFrame(full_table[full_table['Country/Region'] == 'Germany']) uk = pd.DataFrame(full_table[full_table['Country/Region'] == 'UK']) spain = pd.DataFrame(full_table[full_table['Country/Region'] == 'Spain']) turkey = pd.DataFrame(full_table[full_table['Country/Region'] == 'Turkey']) fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") italy[['ObservationDate','Confirmed','Deaths','Recovered']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus in Italy") fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") germany[['ObservationDate','Confirmed','Deaths','Recovered']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus in Germany") fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") spain[['ObservationDate','Confirmed','Deaths','Recovered']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus in Spain") fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") italy[['ObservationDate','Confirmed','Deaths']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus cases, deaths in Italy, Germany and Spain") germany[['ObservationDate','Confirmed','Deaths']].plot(x='ObservationDate',kind='line',ax=ax) spain[['ObservationDate','Confirmed','Deaths']].plot(x='ObservationDate',kind='line',ax=ax) ax.legend(['Confirmed Cases in Italy','Confirmed Deaths in Italy', 'Confirmed Cases in Germany','Confirmed Deaths in Germany', 'Confirmed Cases in Spain','Confirmed Deaths in Spain']) fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") turkey[['ObservationDate','Confirmed','Deaths','Recovered']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus in Turkey") uk_agg=pd.pivot_table(uk, index=['ObservationDate'],values=['Confirmed','Deaths','Recovered'],aggfunc=np.sum) #uk_agg fig, ax = plt.subplots(figsize=(15,7)) plt.plot(uk_agg.index,uk_agg.values) plt.legend(['Confirmed','Deaths','Recovered']) plt.title("Tracking Corona Virus in United Kingdom") plt.ylabel('Count of Confirmed Positive Cases, Deaths, and Recoveries') plt.xticks(rotation=90) uk_agg['Death Rate in United Kingdom'] = uk_agg['Deaths'] / uk_agg['Confirmed'] * 100 uk_agg['Recovery Rate in United Kingdom'] = uk_agg['Recovered'] / uk_agg['Confirmed'] * 100 uk_agg.sample(10) uk2 = uk_agg.unstack() uk2
code
32071698/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) full_table = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv', parse_dates=['ObservationDate']) italy = pd.DataFrame(full_table[full_table['Country/Region'] == 'Italy']) france = pd.DataFrame(full_table[full_table['Country/Region'] == 'France']) germany = pd.DataFrame(full_table[full_table['Country/Region'] == 'Germany']) uk = pd.DataFrame(full_table[full_table['Country/Region'] == 'UK']) spain = pd.DataFrame(full_table[full_table['Country/Region'] == 'Spain']) turkey = pd.DataFrame(full_table[full_table['Country/Region'] == 'Turkey']) italy.head()
code
32071698/cell_15
[ "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import os plt.style.use('fivethirtyeight') full_table = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv', parse_dates=['ObservationDate']) italy = pd.DataFrame(full_table[full_table['Country/Region'] == 'Italy']) france = pd.DataFrame(full_table[full_table['Country/Region'] == 'France']) germany = pd.DataFrame(full_table[full_table['Country/Region'] == 'Germany']) uk = pd.DataFrame(full_table[full_table['Country/Region'] == 'UK']) spain = pd.DataFrame(full_table[full_table['Country/Region'] == 'Spain']) turkey = pd.DataFrame(full_table[full_table['Country/Region'] == 'Turkey']) fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") italy[['ObservationDate','Confirmed','Deaths','Recovered']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus in Italy") fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") germany[['ObservationDate','Confirmed','Deaths','Recovered']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus in Germany") fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") spain[['ObservationDate','Confirmed','Deaths','Recovered']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus in Spain") fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") italy[['ObservationDate','Confirmed','Deaths']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus cases, deaths in Italy, Germany and Spain") germany[['ObservationDate','Confirmed','Deaths']].plot(x='ObservationDate',kind='line',ax=ax) spain[['ObservationDate','Confirmed','Deaths']].plot(x='ObservationDate',kind='line',ax=ax) ax.legend(['Confirmed Cases in Italy','Confirmed Deaths in Italy', 'Confirmed Cases in Germany','Confirmed Deaths in Germany', 'Confirmed Cases in Spain','Confirmed Deaths in Spain']) fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") turkey[['ObservationDate','Confirmed','Deaths','Recovered']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus in Turkey") uk_agg = pd.pivot_table(uk, index=['ObservationDate'], values=['Confirmed', 'Deaths', 'Recovered'], aggfunc=np.sum) fig, ax = plt.subplots(figsize=(15, 7)) plt.plot(uk_agg.index, uk_agg.values) plt.legend(['Confirmed', 'Deaths', 'Recovered']) plt.title('Tracking Corona Virus in United Kingdom') plt.ylabel('Count of Confirmed Positive Cases, Deaths, and Recoveries') plt.xticks(rotation=90)
code
32071698/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import os plt.style.use('fivethirtyeight') full_table = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv', parse_dates=['ObservationDate']) italy = pd.DataFrame(full_table[full_table['Country/Region'] == 'Italy']) france = pd.DataFrame(full_table[full_table['Country/Region'] == 'France']) germany = pd.DataFrame(full_table[full_table['Country/Region'] == 'Germany']) uk = pd.DataFrame(full_table[full_table['Country/Region'] == 'UK']) spain = pd.DataFrame(full_table[full_table['Country/Region'] == 'Spain']) turkey = pd.DataFrame(full_table[full_table['Country/Region'] == 'Turkey']) fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") italy[['ObservationDate','Confirmed','Deaths','Recovered']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus in Italy") fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") germany[['ObservationDate','Confirmed','Deaths','Recovered']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus in Germany") fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") spain[['ObservationDate','Confirmed','Deaths','Recovered']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus in Spain") fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") italy[['ObservationDate','Confirmed','Deaths']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus cases, deaths in Italy, Germany and Spain") germany[['ObservationDate','Confirmed','Deaths']].plot(x='ObservationDate',kind='line',ax=ax) spain[['ObservationDate','Confirmed','Deaths']].plot(x='ObservationDate',kind='line',ax=ax) ax.legend(['Confirmed Cases in Italy','Confirmed Deaths in Italy', 'Confirmed Cases in Germany','Confirmed Deaths in Germany', 'Confirmed Cases in Spain','Confirmed Deaths in Spain']) fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") turkey[['ObservationDate','Confirmed','Deaths','Recovered']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus in Turkey") uk_agg=pd.pivot_table(uk, index=['ObservationDate'],values=['Confirmed','Deaths','Recovered'],aggfunc=np.sum) #uk_agg fig, ax = plt.subplots(figsize=(15,7)) plt.plot(uk_agg.index,uk_agg.values) plt.legend(['Confirmed','Deaths','Recovered']) plt.title("Tracking Corona Virus in United Kingdom") plt.ylabel('Count of Confirmed Positive Cases, Deaths, and Recoveries') plt.xticks(rotation=90) uk_agg['Death Rate in United Kingdom'] = uk_agg['Deaths'] / uk_agg['Confirmed'] * 100 uk_agg['Recovery Rate in United Kingdom'] = uk_agg['Recovered'] / uk_agg['Confirmed'] * 100 uk_agg.tail()
code
32071698/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import os plt.style.use('fivethirtyeight') full_table = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv', parse_dates=['ObservationDate']) italy = pd.DataFrame(full_table[full_table['Country/Region'] == 'Italy']) france = pd.DataFrame(full_table[full_table['Country/Region'] == 'France']) germany = pd.DataFrame(full_table[full_table['Country/Region'] == 'Germany']) uk = pd.DataFrame(full_table[full_table['Country/Region'] == 'UK']) spain = pd.DataFrame(full_table[full_table['Country/Region'] == 'Spain']) turkey = pd.DataFrame(full_table[full_table['Country/Region'] == 'Turkey']) fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") italy[['ObservationDate','Confirmed','Deaths','Recovered']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus in Italy") fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") germany[['ObservationDate','Confirmed','Deaths','Recovered']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus in Germany") fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") spain[['ObservationDate','Confirmed','Deaths','Recovered']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus in Spain") fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") italy[['ObservationDate','Confirmed','Deaths']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus cases, deaths in Italy, Germany and Spain") germany[['ObservationDate','Confirmed','Deaths']].plot(x='ObservationDate',kind='line',ax=ax) spain[['ObservationDate','Confirmed','Deaths']].plot(x='ObservationDate',kind='line',ax=ax) ax.legend(['Confirmed Cases in Italy','Confirmed Deaths in Italy', 'Confirmed Cases in Germany','Confirmed Deaths in Germany', 'Confirmed Cases in Spain','Confirmed Deaths in Spain']) fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") turkey[['ObservationDate','Confirmed','Deaths','Recovered']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus in Turkey") uk_agg=pd.pivot_table(uk, index=['ObservationDate'],values=['Confirmed','Deaths','Recovered'],aggfunc=np.sum) #uk_agg fig, ax = plt.subplots(figsize=(15,7)) plt.plot(uk_agg.index,uk_agg.values) plt.legend(['Confirmed','Deaths','Recovered']) plt.title("Tracking Corona Virus in United Kingdom") plt.ylabel('Count of Confirmed Positive Cases, Deaths, and Recoveries') plt.xticks(rotation=90) uk_agg['Death Rate in United Kingdom'] = uk_agg['Deaths'] / uk_agg['Confirmed'] * 100 uk_agg['Recovery Rate in United Kingdom'] = uk_agg['Recovered'] / uk_agg['Confirmed'] * 100 uk_agg.sample(10)
code
32071698/cell_10
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import os plt.style.use('fivethirtyeight') full_table = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv', parse_dates=['ObservationDate']) italy = pd.DataFrame(full_table[full_table['Country/Region'] == 'Italy']) france = pd.DataFrame(full_table[full_table['Country/Region'] == 'France']) germany = pd.DataFrame(full_table[full_table['Country/Region'] == 'Germany']) uk = pd.DataFrame(full_table[full_table['Country/Region'] == 'UK']) spain = pd.DataFrame(full_table[full_table['Country/Region'] == 'Spain']) turkey = pd.DataFrame(full_table[full_table['Country/Region'] == 'Turkey']) fig, ax = plt.subplots(figsize=(15, 7)) ax.set_xlabel('Observation Date') ax.set_ylabel('Count of Confirmed Positive Cases, Deaths, and Recoveries') italy[['ObservationDate', 'Confirmed', 'Deaths', 'Recovered']].plot(x='ObservationDate', kind='line', ax=ax, title='Tracking Corona Virus in Italy') fig, ax = plt.subplots(figsize=(15, 7)) ax.set_xlabel('Observation Date') ax.set_ylabel('Count of Confirmed Positive Cases, Deaths, and Recoveries') germany[['ObservationDate', 'Confirmed', 'Deaths', 'Recovered']].plot(x='ObservationDate', kind='line', ax=ax, title='Tracking Corona Virus in Germany') fig, ax = plt.subplots(figsize=(15, 7)) ax.set_xlabel('Observation Date') ax.set_ylabel('Count of Confirmed Positive Cases, Deaths, and Recoveries') spain[['ObservationDate', 'Confirmed', 'Deaths', 'Recovered']].plot(x='ObservationDate', kind='line', ax=ax, title='Tracking Corona Virus in Spain')
code
32071698/cell_12
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import os plt.style.use('fivethirtyeight') full_table = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv', parse_dates=['ObservationDate']) italy = pd.DataFrame(full_table[full_table['Country/Region'] == 'Italy']) france = pd.DataFrame(full_table[full_table['Country/Region'] == 'France']) germany = pd.DataFrame(full_table[full_table['Country/Region'] == 'Germany']) uk = pd.DataFrame(full_table[full_table['Country/Region'] == 'UK']) spain = pd.DataFrame(full_table[full_table['Country/Region'] == 'Spain']) turkey = pd.DataFrame(full_table[full_table['Country/Region'] == 'Turkey']) fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") italy[['ObservationDate','Confirmed','Deaths','Recovered']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus in Italy") fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") germany[['ObservationDate','Confirmed','Deaths','Recovered']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus in Germany") fig, ax = plt.subplots(figsize=(15,7)) ax.set_xlabel("Observation Date") ax.set_ylabel("Count of Confirmed Positive Cases, Deaths, and Recoveries") spain[['ObservationDate','Confirmed','Deaths','Recovered']].plot(x='ObservationDate',kind='line',ax=ax, title="Tracking Corona Virus in Spain") fig, ax = plt.subplots(figsize=(15, 7)) ax.set_xlabel('Observation Date') ax.set_ylabel('Count of Confirmed Positive Cases, Deaths, and Recoveries') italy[['ObservationDate', 'Confirmed', 'Deaths']].plot(x='ObservationDate', kind='line', ax=ax, title='Tracking Corona Virus cases, deaths in Italy, Germany and Spain') germany[['ObservationDate', 'Confirmed', 'Deaths']].plot(x='ObservationDate', kind='line', ax=ax) spain[['ObservationDate', 'Confirmed', 'Deaths']].plot(x='ObservationDate', kind='line', ax=ax) ax.legend(['Confirmed Cases in Italy', 'Confirmed Deaths in Italy', 'Confirmed Cases in Germany', 'Confirmed Deaths in Germany', 'Confirmed Cases in Spain', 'Confirmed Deaths in Spain'])
code
73074342/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/30days-folds/train_folds.csv') train = df df_test = pd.read_csv('../input/30-days-of-ml/test.csv') sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') cat_features = ['cat' + str(i) for i in range(10)] num_features = ['cont' + str(i) for i in range(14)] for col in cat_features: print(set(train[col].value_counts().index) == set(df_test[col].value_counts().index))
code
73074342/cell_6
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/30days-folds/train_folds.csv') train = df df_test = pd.read_csv('../input/30-days-of-ml/test.csv') sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') df.describe(percentiles=[0.1, 0.25, 0.5, 0.75, 0.9]).T
code
73074342/cell_11
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd df = pd.read_csv('../input/30days-folds/train_folds.csv') train = df df_test = pd.read_csv('../input/30-days-of-ml/test.csv') sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') # Comparing the datasets length fig, ax = plt.subplots(figsize=(5, 5)) pie = ax.pie([len(df), len(df_test)], labels=["Train dataset", "Test dataset"], colors=["salmon", "teal"], textprops={"fontsize": 15}, autopct='%1.1f%%') ax.axis("equal") ax.set_title("Dataset length comparison", fontsize=18) fig.set_facecolor('white') plt.show(); df.describe(percentiles=[0.1, 0.25, 0.5, 0.75, 0.9]).T fig, ax = plt.subplots(figsize=(16, 8)) bars = ax.hist(df["target"], bins=100, color="palevioletred", edgecolor="black") ax.set_title("Target distribution", fontsize=20, pad=15) ax.set_ylabel("Amount of values", fontsize=14, labelpad=15) ax.set_xlabel("Target value", fontsize=14, labelpad=10) ax.margins(0.025, 0.12) ax.grid(axis="y") plt.show(); cat_features = ['cat' + str(i) for i in range(10)] num_features = ['cont' + str(i) for i in range(14)] # Combined dataframe containing numerical features only df = pd.concat([df[num_features], df_test[num_features]], axis=0) columns = df.columns.values # Calculating required amount of rows to display all feature plots cols = 3 rows = len(columns) // cols + 1 fig, axs = plt.subplots(ncols=cols, nrows=rows, figsize=(16,20), sharex=False) # Adding some distance between plots plt.subplots_adjust(hspace = 0.3) # Plots counter i=0 for r in np.arange(0, rows, 1): for c in np.arange(0, cols, 1): if i >= len(columns): # If there is no more data columns to make plots from axs[r, c].set_visible(False) # Hiding axes so there will be clean background else: # Train data histogram hist1 = axs[r, c].hist(df[columns[i]].values, range=(df[columns[i]].min(), df[columns[i]].max()), bins=40, color="deepskyblue", edgecolor="black", alpha=0.7, label="Train Dataset") # Test data histogram hist2 = axs[r, c].hist(df_test[columns[i]].values, range=(df[columns[i]].min(), df[columns[i]].max()), bins=40, color="palevioletred", edgecolor="black", alpha=0.7, label="Test Dataset") axs[r, c].set_title(columns[i], fontsize=14, pad=5) axs[r, c].tick_params(axis="y", labelsize=13) axs[r, c].tick_params(axis="x", labelsize=13) axs[r, c].grid(axis="y") axs[r, c].legend(fontsize=13) i+=1 # plt.suptitle("Numerical feature values distribution in both datasets", y=0.99) plt.show(); df.head()
code
73074342/cell_7
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/30days-folds/train_folds.csv') train = df df_test = pd.read_csv('../input/30-days-of-ml/test.csv') sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') df.describe(percentiles=[0.1, 0.25, 0.5, 0.75, 0.9]).T print(f"{(df['target'] < 5).sum() / len(df) * 100:.3f}% of the target values are less than 5")
code
73074342/cell_8
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/30days-folds/train_folds.csv') train = df df_test = pd.read_csv('../input/30-days-of-ml/test.csv') sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') # Comparing the datasets length fig, ax = plt.subplots(figsize=(5, 5)) pie = ax.pie([len(df), len(df_test)], labels=["Train dataset", "Test dataset"], colors=["salmon", "teal"], textprops={"fontsize": 15}, autopct='%1.1f%%') ax.axis("equal") ax.set_title("Dataset length comparison", fontsize=18) fig.set_facecolor('white') plt.show(); df.describe(percentiles=[0.1, 0.25, 0.5, 0.75, 0.9]).T fig, ax = plt.subplots(figsize=(16, 8)) bars = ax.hist(df['target'], bins=100, color='palevioletred', edgecolor='black') ax.set_title('Target distribution', fontsize=20, pad=15) ax.set_ylabel('Amount of values', fontsize=14, labelpad=15) ax.set_xlabel('Target value', fontsize=14, labelpad=10) ax.margins(0.025, 0.12) ax.grid(axis='y') plt.show()
code
73074342/cell_15
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import random import seaborn as sns df = pd.read_csv('../input/30days-folds/train_folds.csv') train = df df_test = pd.read_csv('../input/30-days-of-ml/test.csv') sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') colors = ['lightcoral', 'sandybrown', 'darkorange', 'mediumseagreen', 'lightseagreen', 'cornflowerblue', 'mediumpurple', 'palevioletred', 'lightskyblue', 'sandybrown', 'yellowgreen', 'indianred', 'lightsteelblue', 'mediumorchid', 'deepskyblue'] # Comparing the datasets length fig, ax = plt.subplots(figsize=(5, 5)) pie = ax.pie([len(df), len(df_test)], labels=["Train dataset", "Test dataset"], colors=["salmon", "teal"], textprops={"fontsize": 15}, autopct='%1.1f%%') ax.axis("equal") ax.set_title("Dataset length comparison", fontsize=18) fig.set_facecolor('white') plt.show(); df.describe(percentiles=[0.1, 0.25, 0.5, 0.75, 0.9]).T fig, ax = plt.subplots(figsize=(16, 8)) bars = ax.hist(df["target"], bins=100, color="palevioletred", edgecolor="black") ax.set_title("Target distribution", fontsize=20, pad=15) ax.set_ylabel("Amount of values", fontsize=14, labelpad=15) ax.set_xlabel("Target value", fontsize=14, labelpad=10) ax.margins(0.025, 0.12) ax.grid(axis="y") plt.show(); cat_features = ['cat' + str(i) for i in range(10)] num_features = ['cont' + str(i) for i in range(14)] # Combined dataframe containing numerical features only df = pd.concat([df[num_features], df_test[num_features]], axis=0) columns = df.columns.values # Calculating required amount of rows to display all feature plots cols = 3 rows = len(columns) // cols + 1 fig, axs = plt.subplots(ncols=cols, nrows=rows, figsize=(16,20), sharex=False) # Adding some distance between plots plt.subplots_adjust(hspace = 0.3) # Plots counter i=0 for r in np.arange(0, rows, 1): for c in np.arange(0, cols, 1): if i >= len(columns): # If there is no more data columns to make plots from axs[r, c].set_visible(False) # Hiding axes so there will be clean background else: # Train data histogram hist1 = axs[r, c].hist(df[columns[i]].values, range=(df[columns[i]].min(), df[columns[i]].max()), bins=40, color="deepskyblue", edgecolor="black", alpha=0.7, label="Train Dataset") # Test data histogram hist2 = axs[r, c].hist(df_test[columns[i]].values, range=(df[columns[i]].min(), df[columns[i]].max()), bins=40, color="palevioletred", edgecolor="black", alpha=0.7, label="Test Dataset") axs[r, c].set_title(columns[i], fontsize=14, pad=5) axs[r, c].tick_params(axis="y", labelsize=13) axs[r, c].tick_params(axis="x", labelsize=13) axs[r, c].grid(axis="y") axs[r, c].legend(fontsize=13) i+=1 # plt.suptitle("Numerical feature values distribution in both datasets", y=0.99) plt.show(); # Bars position should be numerical because there will be arithmetical operations with them bars_pos = np.arange(len(cat_features)) width=0.3 fig, ax = plt.subplots(figsize=(14, 6)) # Making two bar objects. One is on the left from bar position and the other one is on the right bars1 = ax.bar(bars_pos-width/2, train[cat_features].nunique().values, width=width, color="darkorange", edgecolor="black") bars2 = ax.bar(bars_pos+width/2, train[cat_features].nunique().values, width=width, color="steelblue", edgecolor="black") ax.set_title("Amount of values in categorical features", fontsize=20, pad=15) ax.set_xlabel("Categorical feature", fontsize=15, labelpad=15) ax.set_ylabel("Amount of values", fontsize=15, labelpad=15) ax.set_xticks(bars_pos) ax.set_xticklabels(cat_features, fontsize=12) ax.tick_params(axis="y", labelsize=12) ax.grid(axis="y") plt.margins(0.01, 0.05) # Plot dataframe #df = train.drop("id", axis=1) # Encoding categorical features with OrdinalEncoder '''for col in cat_features: encoder = OrdinalEncoder() df[col] = encoder.fit_transform(np.array(df[col]).reshape(-1, 1)) ''' # Calculatin correlation values df = df.corr().round(2) # Mask to hide upper-right part of plot as it is a duplicate mask = np.zeros_like(df) mask[np.triu_indices_from(mask)] = True # Making a plot plt.figure(figsize=(14,14)) ax = sns.heatmap(df, annot=True, mask=mask, cmap="RdBu", annot_kws={"weight": "normal", "fontsize":9}) ax.set_title("Feature correlation heatmap", fontsize=17) plt.setp(ax.get_xticklabels(), rotation=90, ha="right", rotation_mode="anchor", weight="normal") plt.setp(ax.get_yticklabels(), weight="normal", rotation_mode="anchor", rotation=0, ha="right") plt.show(); columns = train.drop(['target'], axis=1).columns.values cols = 4 rows = len(columns) // cols + 1 fig, axs = plt.subplots(ncols=cols, nrows=rows, figsize=(16, 20), sharex=False) plt.subplots_adjust(hspace=0.3) i = 0 for r in np.arange(0, rows, 1): for c in np.arange(0, cols, 1): if i >= len(columns): axs[r, c].set_visible(False) else: scatter = axs[r, c].scatter(train[columns[i]].values, train['target'], color=random.choice(colors)) axs[r, c].set_title(columns[i], fontsize=14, pad=5) axs[r, c].tick_params(axis='y', labelsize=11) axs[r, c].tick_params(axis='x', labelsize=11) i += 1 plt.show()
code
73074342/cell_3
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/30days-folds/train_folds.csv') train = df df_test = pd.read_csv('../input/30-days-of-ml/test.csv') sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') df.head()
code
73074342/cell_14
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/30days-folds/train_folds.csv') train = df df_test = pd.read_csv('../input/30-days-of-ml/test.csv') sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') # Comparing the datasets length fig, ax = plt.subplots(figsize=(5, 5)) pie = ax.pie([len(df), len(df_test)], labels=["Train dataset", "Test dataset"], colors=["salmon", "teal"], textprops={"fontsize": 15}, autopct='%1.1f%%') ax.axis("equal") ax.set_title("Dataset length comparison", fontsize=18) fig.set_facecolor('white') plt.show(); df.describe(percentiles=[0.1, 0.25, 0.5, 0.75, 0.9]).T fig, ax = plt.subplots(figsize=(16, 8)) bars = ax.hist(df["target"], bins=100, color="palevioletred", edgecolor="black") ax.set_title("Target distribution", fontsize=20, pad=15) ax.set_ylabel("Amount of values", fontsize=14, labelpad=15) ax.set_xlabel("Target value", fontsize=14, labelpad=10) ax.margins(0.025, 0.12) ax.grid(axis="y") plt.show(); cat_features = ['cat' + str(i) for i in range(10)] num_features = ['cont' + str(i) for i in range(14)] # Combined dataframe containing numerical features only df = pd.concat([df[num_features], df_test[num_features]], axis=0) columns = df.columns.values # Calculating required amount of rows to display all feature plots cols = 3 rows = len(columns) // cols + 1 fig, axs = plt.subplots(ncols=cols, nrows=rows, figsize=(16,20), sharex=False) # Adding some distance between plots plt.subplots_adjust(hspace = 0.3) # Plots counter i=0 for r in np.arange(0, rows, 1): for c in np.arange(0, cols, 1): if i >= len(columns): # If there is no more data columns to make plots from axs[r, c].set_visible(False) # Hiding axes so there will be clean background else: # Train data histogram hist1 = axs[r, c].hist(df[columns[i]].values, range=(df[columns[i]].min(), df[columns[i]].max()), bins=40, color="deepskyblue", edgecolor="black", alpha=0.7, label="Train Dataset") # Test data histogram hist2 = axs[r, c].hist(df_test[columns[i]].values, range=(df[columns[i]].min(), df[columns[i]].max()), bins=40, color="palevioletred", edgecolor="black", alpha=0.7, label="Test Dataset") axs[r, c].set_title(columns[i], fontsize=14, pad=5) axs[r, c].tick_params(axis="y", labelsize=13) axs[r, c].tick_params(axis="x", labelsize=13) axs[r, c].grid(axis="y") axs[r, c].legend(fontsize=13) i+=1 # plt.suptitle("Numerical feature values distribution in both datasets", y=0.99) plt.show(); # Bars position should be numerical because there will be arithmetical operations with them bars_pos = np.arange(len(cat_features)) width=0.3 fig, ax = plt.subplots(figsize=(14, 6)) # Making two bar objects. One is on the left from bar position and the other one is on the right bars1 = ax.bar(bars_pos-width/2, train[cat_features].nunique().values, width=width, color="darkorange", edgecolor="black") bars2 = ax.bar(bars_pos+width/2, train[cat_features].nunique().values, width=width, color="steelblue", edgecolor="black") ax.set_title("Amount of values in categorical features", fontsize=20, pad=15) ax.set_xlabel("Categorical feature", fontsize=15, labelpad=15) ax.set_ylabel("Amount of values", fontsize=15, labelpad=15) ax.set_xticks(bars_pos) ax.set_xticklabels(cat_features, fontsize=12) ax.tick_params(axis="y", labelsize=12) ax.grid(axis="y") plt.margins(0.01, 0.05) """for col in cat_features: encoder = OrdinalEncoder() df[col] = encoder.fit_transform(np.array(df[col]).reshape(-1, 1)) """ df = df.corr().round(2) mask = np.zeros_like(df) mask[np.triu_indices_from(mask)] = True plt.figure(figsize=(14, 14)) ax = sns.heatmap(df, annot=True, mask=mask, cmap='RdBu', annot_kws={'weight': 'normal', 'fontsize': 9}) ax.set_title('Feature correlation heatmap', fontsize=17) plt.setp(ax.get_xticklabels(), rotation=90, ha='right', rotation_mode='anchor', weight='normal') plt.setp(ax.get_yticklabels(), weight='normal', rotation_mode='anchor', rotation=0, ha='right') plt.show()
code
73074342/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd df = pd.read_csv('../input/30days-folds/train_folds.csv') train = df df_test = pd.read_csv('../input/30-days-of-ml/test.csv') sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') # Comparing the datasets length fig, ax = plt.subplots(figsize=(5, 5)) pie = ax.pie([len(df), len(df_test)], labels=["Train dataset", "Test dataset"], colors=["salmon", "teal"], textprops={"fontsize": 15}, autopct='%1.1f%%') ax.axis("equal") ax.set_title("Dataset length comparison", fontsize=18) fig.set_facecolor('white') plt.show(); df.describe(percentiles=[0.1, 0.25, 0.5, 0.75, 0.9]).T fig, ax = plt.subplots(figsize=(16, 8)) bars = ax.hist(df["target"], bins=100, color="palevioletred", edgecolor="black") ax.set_title("Target distribution", fontsize=20, pad=15) ax.set_ylabel("Amount of values", fontsize=14, labelpad=15) ax.set_xlabel("Target value", fontsize=14, labelpad=10) ax.margins(0.025, 0.12) ax.grid(axis="y") plt.show(); cat_features = ['cat' + str(i) for i in range(10)] num_features = ['cont' + str(i) for i in range(14)] df = pd.concat([df[num_features], df_test[num_features]], axis=0) columns = df.columns.values cols = 3 rows = len(columns) // cols + 1 fig, axs = plt.subplots(ncols=cols, nrows=rows, figsize=(16, 20), sharex=False) plt.subplots_adjust(hspace=0.3) i = 0 for r in np.arange(0, rows, 1): for c in np.arange(0, cols, 1): if i >= len(columns): axs[r, c].set_visible(False) else: hist1 = axs[r, c].hist(df[columns[i]].values, range=(df[columns[i]].min(), df[columns[i]].max()), bins=40, color='deepskyblue', edgecolor='black', alpha=0.7, label='Train Dataset') hist2 = axs[r, c].hist(df_test[columns[i]].values, range=(df[columns[i]].min(), df[columns[i]].max()), bins=40, color='palevioletred', edgecolor='black', alpha=0.7, label='Test Dataset') axs[r, c].set_title(columns[i], fontsize=14, pad=5) axs[r, c].tick_params(axis='y', labelsize=13) axs[r, c].tick_params(axis='x', labelsize=13) axs[r, c].grid(axis='y') axs[r, c].legend(fontsize=13) i += 1 plt.show()
code
73074342/cell_12
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd df = pd.read_csv('../input/30days-folds/train_folds.csv') train = df df_test = pd.read_csv('../input/30-days-of-ml/test.csv') sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') # Comparing the datasets length fig, ax = plt.subplots(figsize=(5, 5)) pie = ax.pie([len(df), len(df_test)], labels=["Train dataset", "Test dataset"], colors=["salmon", "teal"], textprops={"fontsize": 15}, autopct='%1.1f%%') ax.axis("equal") ax.set_title("Dataset length comparison", fontsize=18) fig.set_facecolor('white') plt.show(); df.describe(percentiles=[0.1, 0.25, 0.5, 0.75, 0.9]).T fig, ax = plt.subplots(figsize=(16, 8)) bars = ax.hist(df["target"], bins=100, color="palevioletred", edgecolor="black") ax.set_title("Target distribution", fontsize=20, pad=15) ax.set_ylabel("Amount of values", fontsize=14, labelpad=15) ax.set_xlabel("Target value", fontsize=14, labelpad=10) ax.margins(0.025, 0.12) ax.grid(axis="y") plt.show(); cat_features = ['cat' + str(i) for i in range(10)] num_features = ['cont' + str(i) for i in range(14)] # Combined dataframe containing numerical features only df = pd.concat([df[num_features], df_test[num_features]], axis=0) columns = df.columns.values # Calculating required amount of rows to display all feature plots cols = 3 rows = len(columns) // cols + 1 fig, axs = plt.subplots(ncols=cols, nrows=rows, figsize=(16,20), sharex=False) # Adding some distance between plots plt.subplots_adjust(hspace = 0.3) # Plots counter i=0 for r in np.arange(0, rows, 1): for c in np.arange(0, cols, 1): if i >= len(columns): # If there is no more data columns to make plots from axs[r, c].set_visible(False) # Hiding axes so there will be clean background else: # Train data histogram hist1 = axs[r, c].hist(df[columns[i]].values, range=(df[columns[i]].min(), df[columns[i]].max()), bins=40, color="deepskyblue", edgecolor="black", alpha=0.7, label="Train Dataset") # Test data histogram hist2 = axs[r, c].hist(df_test[columns[i]].values, range=(df[columns[i]].min(), df[columns[i]].max()), bins=40, color="palevioletred", edgecolor="black", alpha=0.7, label="Test Dataset") axs[r, c].set_title(columns[i], fontsize=14, pad=5) axs[r, c].tick_params(axis="y", labelsize=13) axs[r, c].tick_params(axis="x", labelsize=13) axs[r, c].grid(axis="y") axs[r, c].legend(fontsize=13) i+=1 # plt.suptitle("Numerical feature values distribution in both datasets", y=0.99) plt.show(); bars_pos = np.arange(len(cat_features)) width = 0.3 fig, ax = plt.subplots(figsize=(14, 6)) bars1 = ax.bar(bars_pos - width / 2, train[cat_features].nunique().values, width=width, color='darkorange', edgecolor='black') bars2 = ax.bar(bars_pos + width / 2, train[cat_features].nunique().values, width=width, color='steelblue', edgecolor='black') ax.set_title('Amount of values in categorical features', fontsize=20, pad=15) ax.set_xlabel('Categorical feature', fontsize=15, labelpad=15) ax.set_ylabel('Amount of values', fontsize=15, labelpad=15) ax.set_xticks(bars_pos) ax.set_xticklabels(cat_features, fontsize=12) ax.tick_params(axis='y', labelsize=12) ax.grid(axis='y') plt.margins(0.01, 0.05)
code
73074342/cell_5
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/30days-folds/train_folds.csv') train = df df_test = pd.read_csv('../input/30-days-of-ml/test.csv') sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') fig, ax = plt.subplots(figsize=(5, 5)) pie = ax.pie([len(df), len(df_test)], labels=['Train dataset', 'Test dataset'], colors=['salmon', 'teal'], textprops={'fontsize': 15}, autopct='%1.1f%%') ax.axis('equal') ax.set_title('Dataset length comparison', fontsize=18) fig.set_facecolor('white') plt.show()
code
128010348/cell_13
[ "text_html_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import numpy as np import pandas as pd df = pd.read_csv('/kaggle/input/iris/Iris.csv') df = df.dropna() df = df.replace([np.inf, -np.inf], np.nan) df = df.dropna() df = df.drop(['Id'], axis=1) scaler = MinMaxScaler() cols_to_scale = df.columns[:-1] df_norm = pd.DataFrame(scaler.fit_transform(df[cols_to_scale]), columns=cols_to_scale) df_norm = pd.concat([df_norm, df.iloc[:, -1]], axis=1) df = df_norm df.info()
code
128010348/cell_25
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from catboost import CatBoostClassifier, Pool from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import GridSearchCV, RandomizedSearchCV from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from xgboost import XGBClassifier models = {'Logistic Regression': LogisticRegression(random_state=42), 'Random Forest': RandomForestClassifier(random_state=42), 'Gradient Boosting': GradientBoostingClassifier(random_state=42), 'Support Vector Machines': SVC(random_state=42), 'K-Nearest': KNeighborsClassifier(), 'XGB': XGBClassifier(random_state=42), 'Cat': CatBoostClassifier(random_state=42), 'Decision Tree': DecisionTreeClassifier(random_state=42)} params = {'Logistic Regression': {'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000], 'solver': ['newton-cg', 'lbfgs', 'liblinear']}, 'Random Forest': {'n_estimators': [10, 50, 100, 250, 500], 'max_depth': [5, 10, 20]}, 'Gradient Boosting': {'n_estimators': [10, 50, 100, 250, 500], 'learning_rate': [0.001, 0.005, 0.0001, 0.0005]}, 'Support Vector Machines': {'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000], 'kernel': ['linear', 'rbf']}, 'K-Nearest': {'n_neighbors': [3, 5, 7, 11, 21], 'weights': ['uniform', 'distance'], 'metric': ['euclidean', 'manhattan']}, 'XGB': {'max_depth': [5, 10, 20], 'n_estimators': [10, 50, 100, 250, 500], 'learning_rate': [0.001, 0.005, 0.0001, 0.0005]}, 'Cat': {'iterations': [50, 500, 5000], 'max_depth': [5, 10, 20], 'loss_function': ['Logloss', 'CrossEntropy', 'MultiClass'], 'learning_rate': [0.001, 0.005, 0.0001, 0.0005], 'eval_metric': ['MultiClass']}, 'Decision Tree': {'max_features': ['auto', 'sqrt', 'log2'], 'ccp_alpha': [0.1, 0.01, 0.001], 'max_depth': [5, 10, 20], 'criterion': ['gini', 'entropy']}} results = [] for name, model in models.items(): clf = RandomizedSearchCV(model, params[name], cv=5, n_jobs=-1, scoring='accuracy') clf.fit(X_train_ex, y_train_ex) results.append({'model': name, 'best_score': clf.best_score_, 'best_params': clf.best_params_}) for result in results: print(f"{result['model']}: Best score = {result['best_score']:.4f}, Best params = {result['best_params']}") "\nLogistic Regression: Best score = 1.0000, Best params = {'solver': 'newton-cg', 'C': 1}\nRandom Forest: Best score = 1.0000, Best params = {'n_estimators': 50, 'max_depth': 10}\nGradient Boosting: Best score = 1.0000, Best params = {'n_estimators': 10, 'learning_rate': 0.005}\nSupport Vector Machines: Best score = 1.0000, Best params = {'kernel': 'rbf', 'C': 100}\nK-Nearest: Best score = 1.0000, Best params = {'weights': 'uniform', 'n_neighbors': 5, 'metric': 'manhattan'}\nXGB: Best score = 1.0000, Best params = {'n_estimators': 250, 'max_depth': 10, 'learning_rate': 0.0001}\nCat: Best score = 1.0000, Best params = {'max_depth': 10, 'loss_function': 'MultiClass', 'learning_rate': 0.005, 'iterations': 50, 'eval_metric': 'MultiClass'}\nDecision Tree: Best score = 1.0000, Best params = {'max_features': 'sqrt', 'max_depth': 5, 'criterion': 'gini', 'ccp_alpha': 0.001}\n"
code
128010348/cell_30
[ "text_plain_output_35.png", "application_vnd.jupyter.stderr_output_24.png", "application_vnd.jupyter.stderr_output_16.png", "application_vnd.jupyter.stderr_output_52.png", "text_plain_output_43.png", "text_plain_output_37.png", "application_vnd.jupyter.stderr_output_32.png", "text_plain_output_5.png", "application_vnd.jupyter.stderr_output_48.png", "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_15.png", "application_vnd.jupyter.stderr_output_18.png", "text_plain_output_9.png", "application_vnd.jupyter.stderr_output_38.png", "application_vnd.jupyter.stderr_output_58.png", "text_plain_output_31.png", "application_vnd.jupyter.stderr_output_4.png", "application_vnd.jupyter.stderr_output_26.png", "application_vnd.jupyter.stderr_output_6.png", "text_plain_output_13.png", "text_plain_output_45.png", "application_vnd.jupyter.stderr_output_12.png", "text_plain_output_29.png", "application_vnd.jupyter.stderr_output_8.png", "text_plain_output_49.png", "text_plain_output_27.png", "text_plain_output_57.png", "application_vnd.jupyter.stderr_output_10.png", "text_plain_output_21.png", "text_plain_output_47.png", "text_plain_output_25.png", "application_vnd.jupyter.stderr_output_34.png", "text_plain_output_3.png", "application_vnd.jupyter.stderr_output_44.png", "application_vnd.jupyter.stderr_output_42.png", "application_vnd.jupyter.stderr_output_60.png", "text_plain_output_7.png", "application_vnd.jupyter.stderr_output_30.png", "text_plain_output_59.png", "application_vnd.jupyter.stderr_output_28.png", "application_vnd.jupyter.stderr_output_46.png", "text_plain_output_41.png", "text_plain_output_53.png", "application_vnd.jupyter.stderr_output_20.png", "text_plain_output_23.png", "application_vnd.jupyter.stderr_output_36.png", "application_vnd.jupyter.stderr_output_22.png", "text_plain_output_51.png", "application_vnd.jupyter.stderr_output_56.png", "application_vnd.jupyter.stderr_output_50.png", "text_plain_output_1.png", "text_plain_output_33.png", "text_plain_output_39.png", "text_plain_output_55.png", "text_plain_output_19.png", "text_plain_output_17.png", "text_plain_output_11.png", "application_vnd.jupyter.stderr_output_14.png", "application_vnd.jupyter.stderr_output_54.png", "text_plain_output_61.png", "application_vnd.jupyter.stderr_output_40.png" ]
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D from keras.models import Sequential, Model, load_model from sklearn.model_selection import GridSearchCV, RandomizedSearchCV from tensorflow.keras import regularizers from tensorflow.keras.layers import Conv2D, MaxPool2D, Activation, Dropout, BatchNormalization, LeakyReLU from tensorflow.keras.layers import Input, Dense, Flatten, GlobalAveragePooling2D, concatenate from tensorflow.keras.models import Model, Sequential from tensorflow.keras.optimizers import Adam, RMSprop from tensorflow.keras.wrappers.scikit_learn import KerasClassifier y_train_ex = to_categorical(y_train_ex) y_test_ex = to_categorical(y_test_ex) def create_model(neurons, dropout_rate, kernel_regularizer, learning_rate): input_shape = (X_train_ex.shape[1],) model = Sequential() model.add(Dense(neurons, activation='relu', input_shape=input_shape)) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 2, activation='relu')) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 4, activation='relu')) model.add(Dropout(dropout_rate)) model.add(Dense(neurons // 8, activation='relu', kernel_regularizer=regularizers.l2(kernel_regularizer))) model.add(Dropout(dropout_rate)) model.add(Dense(3, activation='softmax')) opt = Adam(learning_rate=learning_rate) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) return model model = KerasClassifier(build_fn=create_model, verbose=0) neurons = [64, 128, 256, 512, 1024, 2048] dropout_rate = [0, 0.25, 0.5, 0.75] kernel_regularizer = [0.01, 0.001, 0.0001] learning_rate = [0.01, 0.05, 0.001, 0.005, 0.0001, 0.0005] batch_size = [16, 32, 64] epochs = [50, 100, 150, 300, 500, 1000] param_grid = dict(neurons=neurons, dropout_rate=dropout_rate, kernel_regularizer=kernel_regularizer, learning_rate=learning_rate, batch_size=batch_size, epochs=epochs) n_iter_search = 50 random_search = RandomizedSearchCV(model, param_distributions=param_grid, n_iter=n_iter_search, cv=5, n_jobs=-1, scoring='accuracy') random_search.fit(X_train_ex, y_train_ex) print('Best parameters: ', random_search.best_params_) print('Best score: ', random_search.best_score_)
code