path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
105193974/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
105193974/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/feedback-prize-english-language-learning/train.csv') test = pd.read_csv('../input/feedback-prize-english-language-learning/test.csv') ss = pd.read_csv('../input/feedback-prize-english-language-learning/sample_submission.csv') target_cols = ['cohesion', 'syntax', 'vocabulary', 'phraseology', 'grammar', 'conventions'] train[target_cols].min() train[target_cols].max()
code
105193974/cell_8
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/feedback-prize-english-language-learning/train.csv') test = pd.read_csv('../input/feedback-prize-english-language-learning/test.csv') ss = pd.read_csv('../input/feedback-prize-english-language-learning/sample_submission.csv') from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer(smooth_idf=True, sublinear_tf=True) vectorizer.fit(raw_documents=train.full_text)
code
105193974/cell_5
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/feedback-prize-english-language-learning/train.csv') test = pd.read_csv('../input/feedback-prize-english-language-learning/test.csv') ss = pd.read_csv('../input/feedback-prize-english-language-learning/sample_submission.csv') train.head()
code
50208360/cell_42
[ "text_html_output_1.png" ]
from catboost import CatBoostRegressor, Pool from sklearn.preprocessing import LabelEncoder import numpy as np import pandas as pd train = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') train = train.replace([np.inf, -np.inf], np.nan) train = train.fillna(0) from sklearn.preprocessing import LabelEncoder le = LabelEncoder() def FunLabelEncoder(df): for c in df.columns: if df.dtypes[c] == object: le.fit(df[c].astype(str)) df[c] = le.transform(df[c].astype(str)) return df train = FunLabelEncoder(train) features = ['enrollee_id', 'city', 'city_development_index', 'gender', 'relevent_experience', 'enrolled_university', 'education_level', 'major_discipline', 'experience', 'company_size', 'company_type', 'last_new_job', 'training_hours'] target = 'target' from catboost import CatBoostRegressor, Pool from sklearn.metrics import r2_score, mean_squared_error model = CatBoostRegressor(objective='RMSE') model.fit(train[features], train[target])
code
50208360/cell_13
[ "text_html_output_1.png" ]
import pandas as pd import plotly.express as px train = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') import plotly.express as px from plotly.subplots import make_subplots import plotly.graph_objs as go cd = train['city_development_index'].value_counts().reset_index() cd.columns = ['city_development_index', 'count'] cd['city_development_index'] = cd['city_development_index'].astype(str) + '-' cd = cd.sort_values(['count']).tail(50) fig = px.bar(cd, x='count', y='city_development_index', orientation='h', title='City development index', width=1000, height=900) fig.show()
code
50208360/cell_9
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') mnj = train['target'].value_counts() plt.figure(figsize=(6, 4)) sns.barplot(mnj.index, mnj.values, alpha=0.8) plt.ylabel('Number of Data', fontsize=12) plt.xlabel('target', fontsize=9) plt.xticks(rotation=90) plt.show()
code
50208360/cell_30
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') print('Any missing sample in training set:', train.isnull().values.any())
code
50208360/cell_33
[ "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import numpy as np import pandas as pd train = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') train = train.replace([np.inf, -np.inf], np.nan) train = train.fillna(0) from sklearn.preprocessing import LabelEncoder le = LabelEncoder() def FunLabelEncoder(df): for c in df.columns: if df.dtypes[c] == object: le.fit(df[c].astype(str)) df[c] = le.transform(df[c].astype(str)) return df train = FunLabelEncoder(train) train.info()
code
50208360/cell_44
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from catboost import CatBoostRegressor, Pool from sklearn import metrics from sklearn.preprocessing import LabelEncoder import numpy as np import pandas as pd train = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') train = train.replace([np.inf, -np.inf], np.nan) train = train.fillna(0) from sklearn.preprocessing import LabelEncoder le = LabelEncoder() def FunLabelEncoder(df): for c in df.columns: if df.dtypes[c] == object: le.fit(df[c].astype(str)) df[c] = le.transform(df[c].astype(str)) return df train = FunLabelEncoder(train) test = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_test.csv') test = FunLabelEncoder(test) features = ['enrollee_id', 'city', 'city_development_index', 'gender', 'relevent_experience', 'enrolled_university', 'education_level', 'major_discipline', 'experience', 'company_size', 'company_type', 'last_new_job', 'training_hours'] target = 'target' from catboost import CatBoostRegressor, Pool from sklearn.metrics import r2_score, mean_squared_error model = CatBoostRegressor(objective='RMSE') model.fit(train[features], train[target]) predictions = model.predict(test[features]) predictions from sklearn import metrics fpr, tpr, thresholds = metrics.roc_curve(train[target], model.predict(train[features])) metrics.auc(fpr, tpr)
code
50208360/cell_20
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') def wmnj(x): y = train[['enrollee_id', 'city', 'city_development_index', 'gender', 'relevent_experience', 'enrolled_university', 'education_level', 'major_discipline', 'experience', 'company_size', 'company_type', 'last_new_job', 'training_hours', 'target']][train['education_level'] == x] y = y.sort_values(by='enrollee_id', ascending=False) return wmnj('Graduate')
code
50208360/cell_40
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import numpy as np import pandas as pd train = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') train = train.replace([np.inf, -np.inf], np.nan) train = train.fillna(0) from sklearn.preprocessing import LabelEncoder le = LabelEncoder() def FunLabelEncoder(df): for c in df.columns: if df.dtypes[c] == object: le.fit(df[c].astype(str)) df[c] = le.transform(df[c].astype(str)) return df train = FunLabelEncoder(train) features = ['enrollee_id', 'city', 'city_development_index', 'gender', 'relevent_experience', 'enrolled_university', 'education_level', 'major_discipline', 'experience', 'company_size', 'company_type', 'last_new_job', 'training_hours'] target = 'target' train[target].head(100).values
code
50208360/cell_39
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import numpy as np import pandas as pd train = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') train = train.replace([np.inf, -np.inf], np.nan) train = train.fillna(0) from sklearn.preprocessing import LabelEncoder le = LabelEncoder() def FunLabelEncoder(df): for c in df.columns: if df.dtypes[c] == object: le.fit(df[c].astype(str)) df[c] = le.transform(df[c].astype(str)) return df train = FunLabelEncoder(train) features = ['enrollee_id', 'city', 'city_development_index', 'gender', 'relevent_experience', 'enrolled_university', 'education_level', 'major_discipline', 'experience', 'company_size', 'company_type', 'last_new_job', 'training_hours'] target = 'target' train[features].head(10)
code
50208360/cell_26
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') def wmnj(x): y = train[['enrollee_id', 'city', 'city_development_index', 'gender', 'relevent_experience', 'enrolled_university', 'education_level', 'major_discipline', 'experience', 'company_size', 'company_type', 'last_new_job', 'training_hours', 'target']][train['education_level'] == x] y = y.sort_values(by='enrollee_id', ascending=False) return wmnj('Phd')
code
50208360/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') mnj = train['target'].value_counts() plt.xticks(rotation=90) EL = train['education_level'].value_counts() plt.figure(figsize=(6, 4)) sns.barplot(EL.index, EL.values, alpha=0.8) plt.ylabel('Number of Data', fontsize=12) plt.xlabel('education_level', fontsize=9) plt.xticks(rotation=90) plt.show()
code
50208360/cell_1
[ "text_plain_output_1.png" ]
import numpy as np import pylab as pl import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.utils import shuffle from sklearn.svm import SVC from sklearn.metrics import confusion_matrix, classification_report from sklearn.model_selection import cross_val_score, GridSearchCV import os print(os.listdir('../input'))
code
50208360/cell_7
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') display(train[['city', 'city_development_index', 'relevent_experience', 'gender', 'education_level', 'major_discipline', 'experience', 'company_size', 'company_type', 'target']].groupby(['gender', 'education_level', 'experience', 'company_size']).agg(['max', 'mean', 'min']).style.background_gradient(cmap='Oranges'))
code
50208360/cell_45
[ "text_plain_output_1.png" ]
from catboost import CatBoostRegressor, Pool from sklearn.preprocessing import LabelEncoder import numpy as np import pandas as pd train = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') train = train.replace([np.inf, -np.inf], np.nan) train = train.fillna(0) from sklearn.preprocessing import LabelEncoder le = LabelEncoder() def FunLabelEncoder(df): for c in df.columns: if df.dtypes[c] == object: le.fit(df[c].astype(str)) df[c] = le.transform(df[c].astype(str)) return df train = FunLabelEncoder(train) test = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_test.csv') test = FunLabelEncoder(test) features = ['enrollee_id', 'city', 'city_development_index', 'gender', 'relevent_experience', 'enrolled_university', 'education_level', 'major_discipline', 'experience', 'company_size', 'company_type', 'last_new_job', 'training_hours'] target = 'target' from catboost import CatBoostRegressor, Pool from sklearn.metrics import r2_score, mean_squared_error model = CatBoostRegressor(objective='RMSE') model.fit(train[features], train[target]) predictions = model.predict(test[features]) predictions submission = pd.DataFrame({'enrollee_id': test['enrollee_id'], 'target': predictions}) submission.head(10)
code
50208360/cell_28
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') def wmnj(x): y = train[['enrollee_id', 'city', 'city_development_index', 'gender', 'relevent_experience', 'enrolled_university', 'education_level', 'major_discipline', 'experience', 'company_size', 'company_type', 'last_new_job', 'training_hours', 'target']][train['education_level'] == x] y = y.sort_values(by='enrollee_id', ascending=False) return wmnj('Primary School')
code
50208360/cell_15
[ "image_output_1.png" ]
import pandas as pd import plotly.express as px import plotly.express as px train = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') import plotly.express as px from plotly.subplots import make_subplots import plotly.graph_objs as go cd = train['city_development_index'].value_counts().reset_index() cd.columns = [ 'city_development_index', 'count' ] cd['city_development_index'] = cd['city_development_index'].astype(str) + '-' cd = cd.sort_values(['count']).tail(50) fig = px.bar( cd, x='count', y='city_development_index', orientation='h', title='City development index', width=1000, height=900 ) fig.show() import plotly.express as px from plotly.subplots import make_subplots import plotly.graph_objs as go ep = train['experience'].value_counts().reset_index() ep.columns = ['experience', 'percent'] ep['percent'] /= len(train) fig = px.pie(ep, names='experience', values='percent', title='Experience', width=800, height=500) fig.show()
code
50208360/cell_3
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') train.head()
code
50208360/cell_35
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') test = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_test.csv') test.head()
code
50208360/cell_43
[ "text_plain_output_1.png" ]
from catboost import CatBoostRegressor, Pool from sklearn.preprocessing import LabelEncoder import numpy as np import pandas as pd train = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') train = train.replace([np.inf, -np.inf], np.nan) train = train.fillna(0) from sklearn.preprocessing import LabelEncoder le = LabelEncoder() def FunLabelEncoder(df): for c in df.columns: if df.dtypes[c] == object: le.fit(df[c].astype(str)) df[c] = le.transform(df[c].astype(str)) return df train = FunLabelEncoder(train) test = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_test.csv') test = FunLabelEncoder(test) features = ['enrollee_id', 'city', 'city_development_index', 'gender', 'relevent_experience', 'enrolled_university', 'education_level', 'major_discipline', 'experience', 'company_size', 'company_type', 'last_new_job', 'training_hours'] target = 'target' from catboost import CatBoostRegressor, Pool from sklearn.metrics import r2_score, mean_squared_error model = CatBoostRegressor(objective='RMSE') model.fit(train[features], train[target]) predictions = model.predict(test[features]) predictions
code
50208360/cell_46
[ "text_plain_output_1.png" ]
from catboost import CatBoostRegressor, Pool from sklearn.preprocessing import LabelEncoder import numpy as np import pandas as pd train = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') train = train.replace([np.inf, -np.inf], np.nan) train = train.fillna(0) from sklearn.preprocessing import LabelEncoder le = LabelEncoder() def FunLabelEncoder(df): for c in df.columns: if df.dtypes[c] == object: le.fit(df[c].astype(str)) df[c] = le.transform(df[c].astype(str)) return df train = FunLabelEncoder(train) test = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_test.csv') test = FunLabelEncoder(test) features = ['enrollee_id', 'city', 'city_development_index', 'gender', 'relevent_experience', 'enrolled_university', 'education_level', 'major_discipline', 'experience', 'company_size', 'company_type', 'last_new_job', 'training_hours'] target = 'target' from catboost import CatBoostRegressor, Pool from sklearn.metrics import r2_score, mean_squared_error model = CatBoostRegressor(objective='RMSE') model.fit(train[features], train[target]) predictions = model.predict(test[features]) predictions submission = pd.DataFrame({'enrollee_id': test['enrollee_id'], 'target': predictions}) filename = 'submission.csv' submission.to_csv(filename, index=False) print('Saved file: ' + filename)
code
50208360/cell_24
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') def wmnj(x): y = train[['enrollee_id', 'city', 'city_development_index', 'gender', 'relevent_experience', 'enrolled_university', 'education_level', 'major_discipline', 'experience', 'company_size', 'company_type', 'last_new_job', 'training_hours', 'target']][train['education_level'] == x] y = y.sort_values(by='enrollee_id', ascending=False) return wmnj('High School')
code
50208360/cell_22
[ "text_html_output_2.png" ]
import pandas as pd train = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') def wmnj(x): y = train[['enrollee_id', 'city', 'city_development_index', 'gender', 'relevent_experience', 'enrolled_university', 'education_level', 'major_discipline', 'experience', 'company_size', 'company_type', 'last_new_job', 'training_hours', 'target']][train['education_level'] == x] y = y.sort_values(by='enrollee_id', ascending=False) return wmnj('Masters')
code
50208360/cell_37
[ "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd train = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') from sklearn.preprocessing import LabelEncoder le = LabelEncoder() def FunLabelEncoder(df): for c in df.columns: if df.dtypes[c] == object: le.fit(df[c].astype(str)) df[c] = le.transform(df[c].astype(str)) return df test = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_test.csv') test = FunLabelEncoder(test) test.info()
code
50208360/cell_36
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_train.csv') test = pd.read_csv('../input/hr-analytics-job-change-of-data-scientists/aug_test.csv') print('Any missing sample in test set:', test.isnull().values.any(), '\n')
code
128049103/cell_9
[ "image_output_1.png" ]
import pandas as pd life = pd.read_csv('/kaggle/input/life-expectancy-who-updated/Life-Expectancy-Data-Updated.csv') life[['Adult_mortality', 'Infant_deaths', 'Under_five_deaths']] = life[['Adult_mortality', 'Infant_deaths', 'Under_five_deaths']] / 10 min_year = life['Year'].min() max_year = life['Year'].max() print('Time frame of this data : {}-{}'.format(min_year, max_year)) region = life['Region'].unique() print('Regions in this data : ', region) print('Country in North America : ') print(life[life['Region'] == 'North America']['Country'].unique()) print('Country in Asia : ') print(life[life['Region'] == 'Asia']['Country'].unique()) print('Consist of ', len(life['Country'].unique()))
code
128049103/cell_4
[ "image_output_1.png" ]
import pandas as pd life = pd.read_csv('/kaggle/input/life-expectancy-who-updated/Life-Expectancy-Data-Updated.csv') print(life.info())
code
128049103/cell_23
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns life = pd.read_csv('/kaggle/input/life-expectancy-who-updated/Life-Expectancy-Data-Updated.csv') life[['Adult_mortality', 'Infant_deaths', 'Under_five_deaths']] = life[['Adult_mortality', 'Infant_deaths', 'Under_five_deaths']] / 10 groupyears = life.groupby('Year')['Country'].value_counts() groupyears = groupyears.unstack() missing_country = [] for index, rows in groupyears.iterrows(): for col in groupyears.columns: if groupyears.loc[index, col] != 1: missing_country.append([index, col]) else: None average_timeline = life.groupby('Year')['Life_expectancy'].mean() average_compare = life[life['Year'].isin([2000, 2015])].groupby(['Year', 'Region', 'Country'])['Life_expectancy'].median() average_timeline = life.groupby('Year')['Life_expectancy'].mean() average_compare = life[life['Year'].isin([2000, 2015])].groupby(['Year', 'Region', 'Country'])['Life_expectancy'].median() plt.text(life[life['Year'] == 2000]['Life_expectancy'].median() + 2, y=0.015, s='Median life expectancy at year 2000', color='tan') plt.text(life[life['Year'] == 2015]['Life_expectancy'].median() + 2, y=0.02, s='Median life expectancy at year 2015', color='darkcyan') plt.xticks(rotation=90) alc_mortal = life[['Year', 'Region', 'Country', 'Adult_mortality', 'Alcohol_consumption']] sns.regplot(data=alc_mortal[alc_mortal['Region'] == 'European Union'], x='Alcohol_consumption', y='Adult_mortality', order=2, x_bins=20, color='darkcyan') plt.title('Correlation between Alcohol consumption and Adult mortality in European population.') plt.xlabel('Alcohol consumption(litre per capita)') plt.ylabel('Adult mortality rate.') plt.show()
code
128049103/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd import scipy.stats as stats life = pd.read_csv('/kaggle/input/life-expectancy-who-updated/Life-Expectancy-Data-Updated.csv') life[['Adult_mortality', 'Infant_deaths', 'Under_five_deaths']] = life[['Adult_mortality', 'Infant_deaths', 'Under_five_deaths']] / 10 groupyears = life.groupby('Year')['Country'].value_counts() groupyears = groupyears.unstack() missing_country = [] for index, rows in groupyears.iterrows(): for col in groupyears.columns: if groupyears.loc[index, col] != 1: missing_country.append([index, col]) else: None average_timeline = life.groupby('Year')['Life_expectancy'].mean() average_compare = life[life['Year'].isin([2000, 2015])].groupby(['Year', 'Region', 'Country'])['Life_expectancy'].median() average_timeline = life.groupby('Year')['Life_expectancy'].mean() average_compare = life[life['Year'].isin([2000, 2015])].groupby(['Year', 'Region', 'Country'])['Life_expectancy'].median() stat, pvalue = stats.wilcoxon(life[life['Year'] == 2015]['Life_expectancy'], life[life['Year'] == 2000]['Life_expectancy']) print('There is significant difference between life expectancy of people around the world in year 2015 compared with 2000.') print('Mean difference : ', life[life['Year'] == 2015]['Life_expectancy'].median() - life[life['Year'] == 2000]['Life_expectancy'].median(), 'years.') print('P-value : ', pvalue)
code
128049103/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd life = pd.read_csv('/kaggle/input/life-expectancy-who-updated/Life-Expectancy-Data-Updated.csv') life.head()
code
128049103/cell_26
[ "image_output_2.png", "image_output_1.png" ]
import pandas as pd life = pd.read_csv('/kaggle/input/life-expectancy-who-updated/Life-Expectancy-Data-Updated.csv') life[['Adult_mortality', 'Infant_deaths', 'Under_five_deaths']] = life[['Adult_mortality', 'Infant_deaths', 'Under_five_deaths']] / 10 groupyears = life.groupby('Year')['Country'].value_counts() groupyears = groupyears.unstack() missing_country = [] for index, rows in groupyears.iterrows(): for col in groupyears.columns: if groupyears.loc[index, col] != 1: missing_country.append([index, col]) else: None average_timeline = life.groupby('Year')['Life_expectancy'].mean() average_compare = life[life['Year'].isin([2000, 2015])].groupby(['Year', 'Region', 'Country'])['Life_expectancy'].median() average_timeline = life.groupby('Year')['Life_expectancy'].mean() average_compare = life[life['Year'].isin([2000, 2015])].groupby(['Year', 'Region', 'Country'])['Life_expectancy'].median() gdp_infant = life.groupby(['Country', 'Year'])[['GDP_per_capita', 'Infant_deaths', 'Under_five_deaths']].mean() print('GDP per capita and infant & underfive year children mortality rates (per 1000) of countries around the world, since 2000-2015.') gdp_infant = gdp_infant.reset_index() print(gdp_infant) gdp_infant_avg = life.groupby('Country')[['GDP_per_capita', 'Infant_deaths', 'Under_five_deaths']].mean() print('Average GDP per capita and infant & underfive year children mortality rates (per 1000) of countries around the world, since 2000-2015.') print(gdp_infant_avg)
code
128049103/cell_2
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import scipy.stats as stats import statsmodels.api as sm from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
code
128049103/cell_19
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns life = pd.read_csv('/kaggle/input/life-expectancy-who-updated/Life-Expectancy-Data-Updated.csv') life[['Adult_mortality', 'Infant_deaths', 'Under_five_deaths']] = life[['Adult_mortality', 'Infant_deaths', 'Under_five_deaths']] / 10 groupyears = life.groupby('Year')['Country'].value_counts() groupyears = groupyears.unstack() missing_country = [] for index, rows in groupyears.iterrows(): for col in groupyears.columns: if groupyears.loc[index, col] != 1: missing_country.append([index, col]) else: None average_timeline = life.groupby('Year')['Life_expectancy'].mean() average_compare = life[life['Year'].isin([2000, 2015])].groupby(['Year', 'Region', 'Country'])['Life_expectancy'].median() average_timeline = life.groupby('Year')['Life_expectancy'].mean() average_compare = life[life['Year'].isin([2000, 2015])].groupby(['Year', 'Region', 'Country'])['Life_expectancy'].median() sns.kdeplot(data=life[life['Year'].isin([2000, 2015])], x='Life_expectancy', hue='Year', fill=True, palette=sns.color_palette('BrBG', 2)) plt.axvline(x=life[life['Year'] == 2000]['Life_expectancy'].median(), linestyle='--', color='tan') plt.axvline(x=life[life['Year'] == 2015]['Life_expectancy'].median(), linestyle='--', color='darkcyan') plt.text(life[life['Year'] == 2000]['Life_expectancy'].median() + 2, y=0.015, s='Median life expectancy at year 2000', color='tan') plt.text(life[life['Year'] == 2015]['Life_expectancy'].median() + 2, y=0.02, s='Median life expectancy at year 2015', color='darkcyan') plt.xlabel('Life expectancy') plt.title('Average life expectancy of people around the world comparison between 2000 and 2015.') plt.show() sns.pointplot(data=life[life['Year'].isin([2000, 2015])], x='Region', y='Life_expectancy', hue='Year', estimator=np.median, palette=sns.color_palette('BrBG', 2)) plt.xlabel('Regions') plt.ylabel('Life expectancy') plt.xticks(rotation=90) plt.title('Average life expectancy of population in each region comparison between 2000 and 2015') plt.show()
code
128049103/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd life = pd.read_csv('/kaggle/input/life-expectancy-who-updated/Life-Expectancy-Data-Updated.csv') life[['Adult_mortality', 'Infant_deaths', 'Under_five_deaths']] = life[['Adult_mortality', 'Infant_deaths', 'Under_five_deaths']] / 10 life.head()
code
128049103/cell_18
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns life = pd.read_csv('/kaggle/input/life-expectancy-who-updated/Life-Expectancy-Data-Updated.csv') life[['Adult_mortality', 'Infant_deaths', 'Under_five_deaths']] = life[['Adult_mortality', 'Infant_deaths', 'Under_five_deaths']] / 10 groupyears = life.groupby('Year')['Country'].value_counts() groupyears = groupyears.unstack() missing_country = [] for index, rows in groupyears.iterrows(): for col in groupyears.columns: if groupyears.loc[index, col] != 1: missing_country.append([index, col]) else: None average_timeline = life.groupby('Year')['Life_expectancy'].mean() average_compare = life[life['Year'].isin([2000, 2015])].groupby(['Year', 'Region', 'Country'])['Life_expectancy'].median() average_timeline = life.groupby('Year')['Life_expectancy'].mean() average_compare = life[life['Year'].isin([2000, 2015])].groupby(['Year', 'Region', 'Country'])['Life_expectancy'].median() sns.lineplot(data=average_timeline) plt.xlabel('Years') plt.ylabel('Life expectancy') plt.title('Average life expectancy around the world, since 2000-2015.') plt.show() sns.lineplot(data=life[life['Region'].isin(['Africa', 'Asia', 'North America'])], x='Year', y='Life_expectancy', hue='Region') plt.xlabel('Years') plt.ylabel('Life expectancy') plt.title('Average life expectancy around the world, since 2000-2015.') plt.show()
code
128049103/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd life = pd.read_csv('/kaggle/input/life-expectancy-who-updated/Life-Expectancy-Data-Updated.csv') life[['Adult_mortality', 'Infant_deaths', 'Under_five_deaths']] = life[['Adult_mortality', 'Infant_deaths', 'Under_five_deaths']] / 10 groupyears = life.groupby('Year')['Country'].value_counts() groupyears = groupyears.unstack() missing_country = [] for index, rows in groupyears.iterrows(): for col in groupyears.columns: if groupyears.loc[index, col] != 1: missing_country.append([index, col]) else: None average_timeline = life.groupby('Year')['Life_expectancy'].mean() print('Average life expectancy around the world, since 2000-2015.') print(average_timeline) average_compare = life[life['Year'].isin([2000, 2015])].groupby(['Year', 'Region', 'Country'])['Life_expectancy'].median() print('Average life expectancy comparison between 2000 and 2015 of each area.') print(average_compare)
code
128049103/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd life = pd.read_csv('/kaggle/input/life-expectancy-who-updated/Life-Expectancy-Data-Updated.csv') life[['Adult_mortality', 'Infant_deaths', 'Under_five_deaths']] = life[['Adult_mortality', 'Infant_deaths', 'Under_five_deaths']] / 10 groupyears = life.groupby('Year')['Country'].value_counts() groupyears = groupyears.unstack() missing_country = [] for index, rows in groupyears.iterrows(): for col in groupyears.columns: if groupyears.loc[index, col] != 1: missing_country.append([index, col]) else: None average_timeline = life.groupby('Year')['Life_expectancy'].mean() average_compare = life[life['Year'].isin([2000, 2015])].groupby(['Year', 'Region', 'Country'])['Life_expectancy'].median() average_timeline = life.groupby('Year')['Life_expectancy'].mean() print('Average life expectancy around the world, since 2000-2015.') print(average_timeline) average_compare = life[life['Year'].isin([2000, 2015])].groupby(['Year', 'Region', 'Country'])['Life_expectancy'].median() print('Average life expectancy comparison between 2000 and 2015 of each area.') print(average_compare)
code
128049103/cell_24
[ "image_output_2.png", "image_output_1.png" ]
import pandas as pd import scipy.stats as stats life = pd.read_csv('/kaggle/input/life-expectancy-who-updated/Life-Expectancy-Data-Updated.csv') life[['Adult_mortality', 'Infant_deaths', 'Under_five_deaths']] = life[['Adult_mortality', 'Infant_deaths', 'Under_five_deaths']] / 10 groupyears = life.groupby('Year')['Country'].value_counts() groupyears = groupyears.unstack() missing_country = [] for index, rows in groupyears.iterrows(): for col in groupyears.columns: if groupyears.loc[index, col] != 1: missing_country.append([index, col]) else: None average_timeline = life.groupby('Year')['Life_expectancy'].mean() average_compare = life[life['Year'].isin([2000, 2015])].groupby(['Year', 'Region', 'Country'])['Life_expectancy'].median() average_timeline = life.groupby('Year')['Life_expectancy'].mean() average_compare = life[life['Year'].isin([2000, 2015])].groupby(['Year', 'Region', 'Country'])['Life_expectancy'].median() stat, pvalue = stats.wilcoxon(life[life['Year'] == 2015]['Life_expectancy'], life[life['Year'] == 2000]['Life_expectancy']) alc_mortal = life[['Year', 'Region', 'Country', 'Adult_mortality', 'Alcohol_consumption']] alc_mortal_EU = alc_mortal[alc_mortal['Region'] == 'European Union'] corr, pvalue = stats.spearmanr(alc_mortal_EU['Alcohol_consumption'], alc_mortal_EU['Adult_mortality']) print('There is significant positive correlation between alcohol consumption and adult mortality rate in European population.') print('Correlation coefficient : ', corr) print('P-value : ', pvalue) print('Caution for interpretion : It is important to note that correlation does not necessarily imply causation.')
code
128049103/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd life = pd.read_csv('/kaggle/input/life-expectancy-who-updated/Life-Expectancy-Data-Updated.csv') life[['Adult_mortality', 'Infant_deaths', 'Under_five_deaths']] = life[['Adult_mortality', 'Infant_deaths', 'Under_five_deaths']] / 10 groupyears = life.groupby('Year')['Country'].value_counts() groupyears = groupyears.unstack() missing_country = [] for index, rows in groupyears.iterrows(): for col in groupyears.columns: if groupyears.loc[index, col] != 1: missing_country.append([index, col]) else: None average_timeline = life.groupby('Year')['Life_expectancy'].mean() average_compare = life[life['Year'].isin([2000, 2015])].groupby(['Year', 'Region', 'Country'])['Life_expectancy'].median() average_timeline = life.groupby('Year')['Life_expectancy'].mean() average_compare = life[life['Year'].isin([2000, 2015])].groupby(['Year', 'Region', 'Country'])['Life_expectancy'].median() alc_mortal = life[['Year', 'Region', 'Country', 'Adult_mortality', 'Alcohol_consumption']] print(alc_mortal) print('Adult mortality rate : Probability of dying between 15 and 60 years.') print('Alcohol consumption : Alcohol, recorded per capita (15+) consumption (in litres of pure alcohol).')
code
128049103/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd life = pd.read_csv('/kaggle/input/life-expectancy-who-updated/Life-Expectancy-Data-Updated.csv') life[['Adult_mortality', 'Infant_deaths', 'Under_five_deaths']] = life[['Adult_mortality', 'Infant_deaths', 'Under_five_deaths']] / 10 groupyears = life.groupby('Year')['Country'].value_counts() print(groupyears) groupyears = groupyears.unstack() print(groupyears) missing_country = [] for index, rows in groupyears.iterrows(): for col in groupyears.columns: if groupyears.loc[index, col] != 1: missing_country.append([index, col]) else: None print('Positions in dataframe where have missing country in each year : ', missing_country)
code
128049103/cell_27
[ "text_plain_output_1.png" ]
import pandas as pd life = pd.read_csv('/kaggle/input/life-expectancy-who-updated/Life-Expectancy-Data-Updated.csv') life[['Adult_mortality', 'Infant_deaths', 'Under_five_deaths']] = life[['Adult_mortality', 'Infant_deaths', 'Under_five_deaths']] / 10 groupyears = life.groupby('Year')['Country'].value_counts() groupyears = groupyears.unstack() missing_country = [] for index, rows in groupyears.iterrows(): for col in groupyears.columns: if groupyears.loc[index, col] != 1: missing_country.append([index, col]) else: None average_timeline = life.groupby('Year')['Life_expectancy'].mean() average_compare = life[life['Year'].isin([2000, 2015])].groupby(['Year', 'Region', 'Country'])['Life_expectancy'].median() average_timeline = life.groupby('Year')['Life_expectancy'].mean() average_compare = life[life['Year'].isin([2000, 2015])].groupby(['Year', 'Region', 'Country'])['Life_expectancy'].median() gdp_infant = life.groupby(['Country', 'Year'])[['GDP_per_capita', 'Infant_deaths', 'Under_five_deaths']].mean() gdp_infant = gdp_infant.reset_index() gdp_infant_avg = life.groupby('Country')[['GDP_per_capita', 'Infant_deaths', 'Under_five_deaths']].mean() usa_th_gdp = gdp_infant[gdp_infant['Country'].isin(['United States', 'Thailand'])] print(usa_th_gdp)
code
128049103/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd life = pd.read_csv('/kaggle/input/life-expectancy-who-updated/Life-Expectancy-Data-Updated.csv') print(life.isna().sum())
code
72065286/cell_4
[ "text_plain_output_1.png" ]
string = 'Hello World!' print(string[0] + ' ' + string[6]) print(string[4] + ' ' + string[7]) print(string[2:4] + string[7:11]) print(string[::-1]) print(string[6:]) print(string[:5])
code
72065286/cell_6
[ "text_plain_output_1.png" ]
str1 = 'Welcome2' print('the alphabetic letter is:', str1.isalpha()) print('the lowercase letter is:', str1.islower()) print('the uppercase letter is:', str1.isupper()) print(str1, 'the alphanumeric is:', str1.isalnum()) str2 = 'Hello World!' print('the alphabetic letter is:', str2.isalpha()) print('the lowercase letter is:', str2.islower()) print('the uppercase letter is:', str2.isupper()) str3 = 'Now is the best time ever!' print('the alphabetic letter is:', str1.isalpha()) print('the lowercase letter is:', str1.islower()) print('the uppercase letter is:', str1.isupper()) print('the sentence starts with:', str3.startswith('Now')) print('the sentence ends with:', str3.endswith('Now')) str4 = '500017' print('the alphanumeric is:', str4.isalnum()) print('the digits is:', str4.isdigit()) str5 = 'Iphone 6' print('the alphanumeric is:', str5.isalnum()) print('the digits is:', str5.isdigit())
code
72065286/cell_2
[ "text_plain_output_1.png" ]
print('hello world') print('welcome to python language') print('\nthis is a multi line string\n ')
code
72065286/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
str1 = 'Welcome2' str2 = 'Hello World!' str3 = 'Now is the best time ever!' str4 = '500017' str5 = 'Iphone 6' str1 = input('Enter the your own sentence:') print('The input into title case:', str1.istitle())
code
49130814/cell_21
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv') df = df.drop(['Unnamed: 0', 'id'], axis=1) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.4) df[(df['Age'] >= 40) & (df['Age'] <= 60)]['Age'].count()
code
49130814/cell_9
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv') df = df.drop(['Unnamed: 0', 'id'], axis=1) sns.set_style('whitegrid') plt.figure(figsize=(14, 10)) sns.set_context('paper', font_scale=1.4) sns.heatmap(df.isnull(), yticklabels=False, cbar=False, cmap='viridis')
code
49130814/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv') df = df.drop(['Unnamed: 0', 'id'], axis=1) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.4) test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv') test = test.drop(['Unnamed: 0', 'id'], axis=1) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.4) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.5) sns.set_style('whitegrid') plt.figure(figsize=(10, 8)) sns.set_context('paper', font_scale=1.5) sns.histplot(x='Age', data=df, hue='satisfaction').set_title('Customer satisfaction by Age')
code
49130814/cell_6
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv') df.head()
code
49130814/cell_29
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv') df = df.drop(['Unnamed: 0', 'id'], axis=1) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.4) test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv') test = test.drop(['Unnamed: 0', 'id'], axis=1) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.4) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.5) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.5) # Graphs of satisfaction customers by Class, Customer Type and Type of Travel. sns.set_style('whitegrid') fig, ax = plt.subplots(1,3, figsize=(18,16)) sns.set_context('paper', font_scale=1.5) ax[0].set_title('Customer Satisfaction by Class') sns.countplot(x='satisfaction', data = df, hue = 'Class', ax=ax[0]) ax[1].set_title('Customer Satisfaction by Customer Type') sns.countplot(x='satisfaction', data = df, hue = 'Customer Type', ax=ax[1]) ax[2].set_title('Customer Satisfaction by Type of Travel') sns.countplot(x='satisfaction', data = df, hue = 'Type of Travel', ax=ax[2]) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.4) crash_mx = df.corr() # Creates a data frames that contains mean values. For satisfied and neutral or dissatisfied customers # Eco and Eco Plus Classes df_s = df[(df['satisfaction'] != 'neutral or dissatisfied') & (df['Class'] != 'Business')].describe() df_nds = df[(df['satisfaction'] == 'neutral or dissatisfied') & (df['Class'] != 'Business')].describe() # Creates a data frame that contains only a row with mean values for the selected featuers # satisfied df_s_mean = df_s[1:2][['Inflight wifi service', 'Departure/Arrival time convenient', 'Ease of Online booking', 'Gate location', 'Food and drink', 'Online boarding', 'Seat comfort', 'Inflight entertainment', 'On-board service', 'Leg room service', 'Baggage handling','Checkin service', 'Inflight service', 'Cleanliness']] # Changing the name of index from 'mean' to 'satisfied' df_s_mean = df_s_mean.rename(index = {'mean':'satisfied'}) ### # Creates a data frame that contains only a row with mean values for the selected featuers # neutral or dissatisfied df_nds_mean = df_nds[1:2][['Inflight wifi service', 'Departure/Arrival time convenient', 'Ease of Online booking', 'Gate location', 'Food and drink', 'Online boarding', 'Seat comfort', 'Inflight entertainment', 'On-board service', 'Leg room service', 'Baggage handling','Checkin service', 'Inflight service', 'Cleanliness']] # Changing the name of index from 'mean' to 'neutral or dissatisfied' df_nds_mean = df_nds_mean.rename(index = {'mean':'neutral or dissatisfied'}) ### # Combines two data frames into one final_mean = pd.concat([df_nds_mean, df_s_mean]) final_mean final_mean.T.plot(figsize=(16, 10), fontsize=15, kind='bar', title='Mean Grades in Eco and Eco Plus Class')
code
49130814/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv') df = df.drop(['Unnamed: 0', 'id'], axis=1) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.4) test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv') test = test.drop(['Unnamed: 0', 'id'], axis=1) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.4) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.5) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.5) # Graphs of satisfaction customers by Class, Customer Type and Type of Travel. sns.set_style('whitegrid') fig, ax = plt.subplots(1,3, figsize=(18,16)) sns.set_context('paper', font_scale=1.5) ax[0].set_title('Customer Satisfaction by Class') sns.countplot(x='satisfaction', data = df, hue = 'Class', ax=ax[0]) ax[1].set_title('Customer Satisfaction by Customer Type') sns.countplot(x='satisfaction', data = df, hue = 'Customer Type', ax=ax[1]) ax[2].set_title('Customer Satisfaction by Type of Travel') sns.countplot(x='satisfaction', data = df, hue = 'Type of Travel', ax=ax[2]) sns.set_style('whitegrid') plt.figure(figsize=(25, 15)) sns.set_context('paper', font_scale=1.4) crash_mx = df.corr() sns.heatmap(crash_mx, annot=True, cmap='Blues')
code
49130814/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv') df = df.drop(['Unnamed: 0', 'id'], axis=1) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.4) df['Arrival Delay in Minutes'].mean()
code
49130814/cell_19
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv') df = df.drop(['Unnamed: 0', 'id'], axis=1) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.4) test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv') test = test.drop(['Unnamed: 0', 'id'], axis=1) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.4) sns.set_style('whitegrid') plt.figure(figsize=(8, 6)) sns.set_context('paper', font_scale=1.5) sns.countplot(x='satisfaction', data=df).set_title('Neutral or Dissatisfied vs Statisfied')
code
49130814/cell_7
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv') df.info()
code
49130814/cell_28
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv') df = df.drop(['Unnamed: 0', 'id'], axis=1) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.4) test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv') test = test.drop(['Unnamed: 0', 'id'], axis=1) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.4) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.5) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.5) # Graphs of satisfaction customers by Class, Customer Type and Type of Travel. sns.set_style('whitegrid') fig, ax = plt.subplots(1,3, figsize=(18,16)) sns.set_context('paper', font_scale=1.5) ax[0].set_title('Customer Satisfaction by Class') sns.countplot(x='satisfaction', data = df, hue = 'Class', ax=ax[0]) ax[1].set_title('Customer Satisfaction by Customer Type') sns.countplot(x='satisfaction', data = df, hue = 'Customer Type', ax=ax[1]) ax[2].set_title('Customer Satisfaction by Type of Travel') sns.countplot(x='satisfaction', data = df, hue = 'Type of Travel', ax=ax[2]) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.4) crash_mx = df.corr() df_s = df[(df['satisfaction'] != 'neutral or dissatisfied') & (df['Class'] != 'Business')].describe() df_nds = df[(df['satisfaction'] == 'neutral or dissatisfied') & (df['Class'] != 'Business')].describe() df_s_mean = df_s[1:2][['Inflight wifi service', 'Departure/Arrival time convenient', 'Ease of Online booking', 'Gate location', 'Food and drink', 'Online boarding', 'Seat comfort', 'Inflight entertainment', 'On-board service', 'Leg room service', 'Baggage handling', 'Checkin service', 'Inflight service', 'Cleanliness']] df_s_mean = df_s_mean.rename(index={'mean': 'satisfied'}) df_nds_mean = df_nds[1:2][['Inflight wifi service', 'Departure/Arrival time convenient', 'Ease of Online booking', 'Gate location', 'Food and drink', 'Online boarding', 'Seat comfort', 'Inflight entertainment', 'On-board service', 'Leg room service', 'Baggage handling', 'Checkin service', 'Inflight service', 'Cleanliness']] df_nds_mean = df_nds_mean.rename(index={'mean': 'neutral or dissatisfied'}) final_mean = pd.concat([df_nds_mean, df_s_mean]) final_mean
code
49130814/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv') df = df.drop(['Unnamed: 0', 'id'], axis=1) df.info()
code
49130814/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv') df = df.drop(['Unnamed: 0', 'id'], axis=1) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.4) test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv') test = test.drop(['Unnamed: 0', 'id'], axis=1) sns.set_style('whitegrid') plt.figure(figsize=(14, 10)) sns.set_context('paper', font_scale=1.4) sns.heatmap(test.isnull(), yticklabels=False, cbar=False, cmap='viridis')
code
49130814/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv') df = df.drop(['Unnamed: 0', 'id'], axis=1) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.4) np.isnan(df['Arrival Delay in Minutes']).value_counts() df['Arrival Delay in Minutes'] = df['Arrival Delay in Minutes'].fillna(df['Arrival Delay in Minutes'].mean()) np.isnan(df['Arrival Delay in Minutes']).value_counts() test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv') test = test.drop(['Unnamed: 0', 'id'], axis=1) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.4) np.isnan(test['Arrival Delay in Minutes']).value_counts()
code
49130814/cell_17
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv') df = df.drop(['Unnamed: 0', 'id'], axis=1) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.4) np.isnan(df['Arrival Delay in Minutes']).value_counts() df['Arrival Delay in Minutes'] = df['Arrival Delay in Minutes'].fillna(df['Arrival Delay in Minutes'].mean()) np.isnan(df['Arrival Delay in Minutes']).value_counts() test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv') test = test.drop(['Unnamed: 0', 'id'], axis=1) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.4) np.isnan(test['Arrival Delay in Minutes']).value_counts() test['Arrival Delay in Minutes'] = test['Arrival Delay in Minutes'].fillna(test['Arrival Delay in Minutes'].mean()) np.isnan(test['Arrival Delay in Minutes']).value_counts()
code
49130814/cell_24
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv') df = df.drop(['Unnamed: 0', 'id'], axis=1) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.4) test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv') test = test.drop(['Unnamed: 0', 'id'], axis=1) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.4) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.5) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.5) sns.set_style('whitegrid') fig, ax = plt.subplots(1, 3, figsize=(18, 16)) sns.set_context('paper', font_scale=1.5) ax[0].set_title('Customer Satisfaction by Class') sns.countplot(x='satisfaction', data=df, hue='Class', ax=ax[0]) ax[1].set_title('Customer Satisfaction by Customer Type') sns.countplot(x='satisfaction', data=df, hue='Customer Type', ax=ax[1]) ax[2].set_title('Customer Satisfaction by Type of Travel') sns.countplot(x='satisfaction', data=df, hue='Type of Travel', ax=ax[2])
code
49130814/cell_14
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv') test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv') test = test.drop(['Unnamed: 0', 'id'], axis=1) test.info()
code
49130814/cell_22
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv') df = df.drop(['Unnamed: 0', 'id'], axis=1) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.4) df[df['Age'] < 40]['Age'].count()
code
49130814/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv') df = df.drop(['Unnamed: 0', 'id'], axis=1) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.4) np.isnan(df['Arrival Delay in Minutes']).value_counts()
code
49130814/cell_27
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv') df = df.drop(['Unnamed: 0', 'id'], axis=1) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.4) test = pd.read_csv('../input/airline-passenger-satisfaction/test.csv') test = test.drop(['Unnamed: 0', 'id'], axis=1) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.4) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.5) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.5) # Graphs of satisfaction customers by Class, Customer Type and Type of Travel. sns.set_style('whitegrid') fig, ax = plt.subplots(1,3, figsize=(18,16)) sns.set_context('paper', font_scale=1.5) ax[0].set_title('Customer Satisfaction by Class') sns.countplot(x='satisfaction', data = df, hue = 'Class', ax=ax[0]) ax[1].set_title('Customer Satisfaction by Customer Type') sns.countplot(x='satisfaction', data = df, hue = 'Customer Type', ax=ax[1]) ax[2].set_title('Customer Satisfaction by Type of Travel') sns.countplot(x='satisfaction', data = df, hue = 'Type of Travel', ax=ax[2]) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.4) crash_mx = df.corr() df[df['Class'] != 'Business'].describe()
code
49130814/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/train.csv') df = df.drop(['Unnamed: 0', 'id'], axis=1) sns.set_style('whitegrid') sns.set_context('paper', font_scale=1.4) np.isnan(df['Arrival Delay in Minutes']).value_counts() df['Arrival Delay in Minutes'] = df['Arrival Delay in Minutes'].fillna(df['Arrival Delay in Minutes'].mean()) np.isnan(df['Arrival Delay in Minutes']).value_counts()
code
18105196/cell_11
[ "text_plain_output_1.png" ]
from tensorflow import keras x_train = x_train.reshape(-1, 28, 28, 1).astype('float32') / 255 x_val = x_val.reshape(-1, 28, 28, 1).astype('float32') / 255 y_train = keras.utils.to_categorical(y_train) y_val = keras.utils.to_categorical(y_val) model = keras.models.Sequential([keras.layers.Conv2D(32, kernel_size=3, activation='relu', input_shape=(28, 28, 1)), keras.layers.BatchNormalization(), keras.layers.Conv2D(32, kernel_size=3, activation='relu'), keras.layers.BatchNormalization(), keras.layers.Conv2D(32, kernel_size=5, strides=2, padding='same', activation='relu'), keras.layers.BatchNormalization(), keras.layers.Dropout(rate=0.4), keras.layers.Conv2D(64, kernel_size=3, activation='relu'), keras.layers.BatchNormalization(), keras.layers.Conv2D(64, kernel_size=3, activation='relu'), keras.layers.BatchNormalization(), keras.layers.Conv2D(64, kernel_size=5, strides=2, padding='same', activation='relu'), keras.layers.BatchNormalization(), keras.layers.Dropout(rate=0.4), keras.layers.Flatten(), keras.layers.Dense(128, activation='relu'), keras.layers.BatchNormalization(), keras.layers.Dropout(rate=0.4), keras.layers.Dense(10, activation='softmax')]) model.summary() datagen = keras.preprocessing.image.ImageDataGenerator(zoom_range=0.1, height_shift_range=0.1, width_shift_range=0.1, rotation_range=10) datagen.fit(x_train) model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(lr=0.0001), metrics=['accuracy']) batch_size = 32 epochs = 25 model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size), epochs=epochs, verbose=2, validation_data=(x_val, y_val), steps_per_epoch=x_train.shape[0] // batch_size)
code
18105196/cell_8
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from tensorflow import keras y_train = keras.utils.to_categorical(y_train) y_val = keras.utils.to_categorical(y_val) model = keras.models.Sequential([keras.layers.Conv2D(32, kernel_size=3, activation='relu', input_shape=(28, 28, 1)), keras.layers.BatchNormalization(), keras.layers.Conv2D(32, kernel_size=3, activation='relu'), keras.layers.BatchNormalization(), keras.layers.Conv2D(32, kernel_size=5, strides=2, padding='same', activation='relu'), keras.layers.BatchNormalization(), keras.layers.Dropout(rate=0.4), keras.layers.Conv2D(64, kernel_size=3, activation='relu'), keras.layers.BatchNormalization(), keras.layers.Conv2D(64, kernel_size=3, activation='relu'), keras.layers.BatchNormalization(), keras.layers.Conv2D(64, kernel_size=5, strides=2, padding='same', activation='relu'), keras.layers.BatchNormalization(), keras.layers.Dropout(rate=0.4), keras.layers.Flatten(), keras.layers.Dense(128, activation='relu'), keras.layers.BatchNormalization(), keras.layers.Dropout(rate=0.4), keras.layers.Dense(10, activation='softmax')]) model.summary()
code
106211640/cell_7
[ "text_plain_output_1.png" ]
import os ALPHABET_SIZE = 256 def badCharHeuristic(string, size): badChar = [-1] * ALPHABET_SIZE for i in range(size): badChar[ord(string[i])] = i return badChar def BMMatch(text, pattern): text = text.lower() pattern = pattern.lower() counter = 0 m = len(pattern) n = len(text) badChar = badCharHeuristic(pattern, m) s = 0 while s <= n - m: j = m - 1 while j >= 0 and pattern[j] == text[s + j]: counter += 1 j -= 1 if j < 0: return s else: counter += 1 s += max(1, j - badChar[ord(text[s + j])]) return counter import os os.listdir('/kaggle/input') with open('../input/norskplaceholdertekst/Ibsen-PeerGynt2.txt', 'r') as file: text = file.read() patterns = ['prins', 'slutt', 'konge', 'lille', 'hjælp', 'kavri', 'serri', 'jenta', 'jente', 'stemm', 'elven', 'ørken', 'banan', 'bringe', 'vejen', 'vegen', 'veien', 'danse'] comparisons = [] for i in patterns: counter = BMMatch(text, i) comparisons.append(counter) comp_per_char = [] for i in comparisons: comp_per_char.append(i / len(text)) def average(list): return sum(list) / len(list) average(comp_per_char)
code
106211640/cell_5
[ "text_plain_output_1.png" ]
import os ALPHABET_SIZE = 256 def badCharHeuristic(string, size): badChar = [-1] * ALPHABET_SIZE for i in range(size): badChar[ord(string[i])] = i return badChar def BMMatch(text, pattern): text = text.lower() pattern = pattern.lower() counter = 0 m = len(pattern) n = len(text) badChar = badCharHeuristic(pattern, m) s = 0 while s <= n - m: j = m - 1 while j >= 0 and pattern[j] == text[s + j]: counter += 1 j -= 1 if j < 0: return s else: counter += 1 s += max(1, j - badChar[ord(text[s + j])]) return counter import os os.listdir('/kaggle/input') with open('../input/norskplaceholdertekst/Ibsen-PeerGynt2.txt', 'r') as file: text = file.read() patterns = ['prins', 'slutt', 'konge', 'lille', 'hjælp', 'kavri', 'serri', 'jenta', 'jente', 'stemm', 'elven', 'ørken', 'banan', 'bringe', 'vejen', 'vegen', 'veien', 'danse'] comparisons = [] for i in patterns: counter = BMMatch(text, i) comparisons.append(counter) comp_per_char = [] for i in comparisons: comp_per_char.append(i / len(text))
code
73081589/cell_2
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) X_full = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) X_full.describe(include='all')
code
73081589/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
73081589/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) X_full = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) X_full.head()
code
73081589/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) X_full = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) X_full.isna().sum()
code
33106636/cell_4
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') df_train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') print('Train shape:', df_train.shape) print('Test Shape:', df_test.shape)
code
33106636/cell_20
[ "image_output_1.png" ]
from warnings import filterwarnings import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import os import pandas as pd import matplotlib.pyplot as plt import numpy as np import seaborn as sns sns.set(style='darkgrid') from sklearn.linear_model import LinearRegression from sklearn.svm import SVR from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import LabelEncoder from xgboost import XGBRegressor from warnings import filterwarnings filterwarnings('ignore') df_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') df_train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') X_trainfull = df_train.drop(['SalePrice'], axis=1) y = df_train.SalePrice y = np.log1p(y) d_temp = X_trainfull.isna().sum().sort_values(ascending=False) d_temp = d_temp[d_temp > 0] d_temp = d_temp / df_train.shape[0] * 100 plt.xlim(0, 100) na_index = d_temp[d_temp > 20].index X_trainfull.drop(na_index, axis=1, inplace=True) num_cols = X_trainfull.corrwith(y).abs().sort_values(ascending=False).index X_num = X_trainfull[num_cols] X_cat = X_trainfull.drop(num_cols, axis=1) X_num.sample(5)
code
33106636/cell_8
[ "text_plain_output_1.png" ]
from warnings import filterwarnings import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import os import pandas as pd import matplotlib.pyplot as plt import numpy as np import seaborn as sns sns.set(style='darkgrid') from sklearn.linear_model import LinearRegression from sklearn.svm import SVR from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import LabelEncoder from xgboost import XGBRegressor from warnings import filterwarnings filterwarnings('ignore') df_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') df_train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') X_trainfull = df_train.drop(['SalePrice'], axis=1) y = df_train.SalePrice plt.figure(figsize=(8, 4)) plt.title('Distribution of Sales Price (y)') sns.distplot(y) plt.show()
code
33106636/cell_24
[ "image_output_1.png" ]
from warnings import filterwarnings import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import os import pandas as pd import matplotlib.pyplot as plt import numpy as np import seaborn as sns sns.set(style='darkgrid') from sklearn.linear_model import LinearRegression from sklearn.svm import SVR from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import LabelEncoder from xgboost import XGBRegressor from warnings import filterwarnings filterwarnings('ignore') df_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') df_train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') X_trainfull = df_train.drop(['SalePrice'], axis=1) y = df_train.SalePrice y = np.log1p(y) d_temp = X_trainfull.isna().sum().sort_values(ascending=False) d_temp = d_temp[d_temp > 0] d_temp = d_temp / df_train.shape[0] * 100 plt.xlim(0, 100) na_index = d_temp[d_temp > 20].index X_trainfull.drop(na_index, axis=1, inplace=True) num_cols = X_trainfull.corrwith(y).abs().sort_values(ascending=False).index X_num = X_trainfull[num_cols] X_cat = X_trainfull.drop(num_cols, axis=1) X_num.sample(5) high_corr_num = X_num.corrwith(y)[X_num.corrwith(y).abs() > 0.5].index X_num = X_num[high_corr_num] plt.figure(figsize=(10, 6)) sns.heatmap(X_num.corr(), annot=True, cmap='coolwarm') plt.show() print('Correlation of Each feature with target') X_num.corrwith(y)
code
33106636/cell_10
[ "text_plain_output_1.png" ]
from warnings import filterwarnings import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import os import pandas as pd import matplotlib.pyplot as plt import numpy as np import seaborn as sns sns.set(style='darkgrid') from sklearn.linear_model import LinearRegression from sklearn.svm import SVR from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import LabelEncoder from xgboost import XGBRegressor from warnings import filterwarnings filterwarnings('ignore') df_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') df_train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') X_trainfull = df_train.drop(['SalePrice'], axis=1) y = df_train.SalePrice y = np.log1p(y) plt.figure(figsize=(8, 4)) plt.title('Distribution of log Sales Price (y)') sns.distplot(y) plt.xlabel('Log of Sales Price') plt.show()
code
33106636/cell_12
[ "image_output_1.png" ]
from warnings import filterwarnings import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import os import pandas as pd import matplotlib.pyplot as plt import numpy as np import seaborn as sns sns.set(style='darkgrid') from sklearn.linear_model import LinearRegression from sklearn.svm import SVR from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import LabelEncoder from xgboost import XGBRegressor from warnings import filterwarnings filterwarnings('ignore') df_test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') df_train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') X_trainfull = df_train.drop(['SalePrice'], axis=1) y = df_train.SalePrice y = np.log1p(y) d_temp = X_trainfull.isna().sum().sort_values(ascending=False) d_temp = d_temp[d_temp > 0] d_temp = d_temp / df_train.shape[0] * 100 plt.figure(figsize=(8, 5)) plt.title('Features Vs Percentage Of Null Values') sns.barplot(y=d_temp.index, x=d_temp, orient='h') plt.xlim(0, 100) plt.xlabel('Null Values (%)') plt.show()
code
89132547/cell_13
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/supermarket-sales/supermarket_sales - Sheet1.csv') df.isnull().sum() df.describe()
code
89132547/cell_9
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/supermarket-sales/supermarket_sales - Sheet1.csv') df.isnull().sum()
code
89132547/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px df = pd.read_csv('../input/supermarket-sales/supermarket_sales - Sheet1.csv') df.isnull().sum() customertype_x_city = df.groupby('City')['Customer type'].value_counts() customertype_x_city #visualize Customer type per City best_payment_x_city_bar = px.histogram(df, x='Customer type', color='City') best_payment_x_city_bar.show() best_payment_x_city = df.groupby('City')['Payment'].value_counts() best_payment_x_city #visualize payment per City best_payment_x_city_bar = px.histogram(df, x='Payment', color='City') best_payment_x_city_bar.show() total_per_date = px.bar(df, x='Total', y='City', color='City', animation_frame='Date', animation_group='City') total_per_date.show()
code
89132547/cell_6
[ "text_html_output_2.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/supermarket-sales/supermarket_sales - Sheet1.csv') df.head()
code
89132547/cell_2
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input/supermarket-sales'): for filename in filenames: print(os.path.join(dirname, filename))
code
89132547/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px df = pd.read_csv('../input/supermarket-sales/supermarket_sales - Sheet1.csv') df.isnull().sum() customertype_x_city = df.groupby('City')['Customer type'].value_counts() customertype_x_city #visualize Customer type per City best_payment_x_city_bar = px.histogram(df, x='Customer type', color='City') best_payment_x_city_bar.show() best_payment_x_city = df.groupby('City')['Payment'].value_counts() best_payment_x_city best_payment_x_city_bar = px.histogram(df, x='Payment', color='City') best_payment_x_city_bar.show()
code
89132547/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/supermarket-sales/supermarket_sales - Sheet1.csv') df.info()
code
89132547/cell_18
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/supermarket-sales/supermarket_sales - Sheet1.csv') df.isnull().sum() customertype_x_city = df.groupby('City')['Customer type'].value_counts() customertype_x_city best_payment_x_city = df.groupby('City')['Payment'].value_counts() best_payment_x_city
code
89132547/cell_8
[ "text_html_output_1.png" ]
!mitosheet
code
89132547/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/supermarket-sales/supermarket_sales - Sheet1.csv') df.isnull().sum() df['Customer type'].value_counts()
code
89132547/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/supermarket-sales/supermarket_sales - Sheet1.csv') df.isnull().sum() customertype_x_city = df.groupby('City')['Customer type'].value_counts() customertype_x_city
code
89132547/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px df = pd.read_csv('../input/supermarket-sales/supermarket_sales - Sheet1.csv') df.isnull().sum() customertype_x_city = df.groupby('City')['Customer type'].value_counts() customertype_x_city best_payment_x_city_bar = px.histogram(df, x='Customer type', color='City') best_payment_x_city_bar.show()
code
89132547/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/supermarket-sales/supermarket_sales - Sheet1.csv') df.isnull().sum() df['Gender'].value_counts()
code
89132547/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/supermarket-sales/supermarket_sales - Sheet1.csv') df.isnull().sum() df['Invoice ID'].duplicated().sum()
code
89125628/cell_56
[ "image_output_1.png" ]
from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd from sklearn.preprocessing import StandardScaler details = {'col1': [1, 3, 5, 7, 9], 'col2': [7, 4, 35, 14, 56]} df = pd.DataFrame(details) scaler = StandardScaler() df = scaler.fit_transform(df) df = pd.DataFrame(df) plt = df.plot.bar() df = pd.DataFrame([[180000, 110, 18.9, 1400], [360000, 905, 23.4, 1800], [230000, 230, 14.0, 1300], [60000, 450, 13.5, 1500]], columns=['Col A', 'Col B', 'Col C', 'Col D']) import matplotlib.pyplot as plt plt = df.plot.bar()
code
89125628/cell_54
[ "text_html_output_1.png" ]
from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd from sklearn.preprocessing import StandardScaler details = {'col1': [1, 3, 5, 7, 9], 'col2': [7, 4, 35, 14, 56]} df = pd.DataFrame(details) scaler = StandardScaler() df = scaler.fit_transform(df) df = pd.DataFrame(df) plt = df.plot.bar() df = pd.DataFrame([[180000, 110, 18.9, 1400], [360000, 905, 23.4, 1800], [230000, 230, 14.0, 1300], [60000, 450, 13.5, 1500]], columns=['Col A', 'Col B', 'Col C', 'Col D']) display(df)
code
89125628/cell_50
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import StandardScaler import pandas as pd from sklearn.preprocessing import StandardScaler details = {'col1': [1, 3, 5, 7, 9], 'col2': [7, 4, 35, 14, 56]} df = pd.DataFrame(details) print(df) scaler = StandardScaler() df = scaler.fit_transform(df) df = pd.DataFrame(df) print(df)
code