path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
18147692/cell_45
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd dataset = pd.read_csv('../input/pseudo_facebook.tsv', sep='\t') dataset.shape data_age1 = dataset.groupby('age').mean() data_age1.reset_index(inplace=True) datacount = dataset.groupby('age').count() datacount = datacount.reset_index() data1000 = datacount[datacount['tenure'] >= 1000] dataset_1k = data_age1.loc[data_age1['age'].isin(data1000['age'])] dataset_1k[dataset_1k['tenure'] == dataset_1k['tenure'].max()] data2000 = datacount[datacount['tenure'] >= 2000] dataset_2k = data_age1.loc[data_age1['age'].isin(data2000['age'])] dataset_2k[dataset_2k['tenure'] == dataset_2k['tenure'].max()] dataset_2k['mobile_surfing'] = dataset_2k.mobile_likes + dataset_2k.mobile_likes_received dataset_2k['web_surfing'] = dataset_2k.www_likes + dataset_2k.www_likes_received dataset_2k[dataset_2k['mobile_likes'] == dataset_2k['mobile_likes'].max()]
code
18147692/cell_32
[ "text_html_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd dataset = pd.read_csv('../input/pseudo_facebook.tsv', sep='\t') dataset.shape data_age1 = dataset.groupby('age').mean() datacount = dataset.groupby('age').count() datacount = datacount.reset_index() data1000 = datacount[datacount['tenure'] >= 1000] dataset_gender_male = dataset[dataset['gender'] == 'male'] dataset_gender_female = dataset[dataset['gender'] == 'female'] dataset_gender_male.shape
code
18147692/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd dataset = pd.read_csv('../input/pseudo_facebook.tsv', sep='\t') dataset.shape data_age1 = dataset.groupby('age').mean() data_age1.head()
code
18147692/cell_15
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd dataset = pd.read_csv('../input/pseudo_facebook.tsv', sep='\t') dataset.shape data_age1 = dataset.groupby('age').mean() data_age1.reset_index(inplace=True) plt.bar(data_age1.age, data_age1.tenure) plt.xlabel('Age') plt.ylabel('Tenure')
code
18147692/cell_38
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd dataset = pd.read_csv('../input/pseudo_facebook.tsv', sep='\t') dataset.shape data_age1 = dataset.groupby('age').mean() data_age1.reset_index(inplace=True) datacount = dataset.groupby('age').count() datacount = datacount.reset_index() data1000 = datacount[datacount['tenure'] >= 1000] dataset_1k = data_age1.loc[data_age1['age'].isin(data1000['age'])] dataset_1k[dataset_1k['tenure'] == dataset_1k['tenure'].max()] data2000 = datacount[datacount['tenure'] >= 2000] dataset_2k = data_age1.loc[data_age1['age'].isin(data2000['age'])] dataset_2k[dataset_2k['tenure'] == dataset_2k['tenure'].max()] data_age1['mobile_surfing'] = data_age1.mobile_likes + data_age1.mobile_likes_received data_age1['web_surfing'] = data_age1.www_likes + data_age1.www_likes_received data_age1[data_age1.mobile_surfing > data_age1.web_surfing].shape
code
18147692/cell_3
[ "text_html_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd dataset = pd.read_csv('../input/pseudo_facebook.tsv', sep='\t') dataset.head()
code
18147692/cell_17
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd dataset = pd.read_csv('../input/pseudo_facebook.tsv', sep='\t') dataset.shape data_age1 = dataset.groupby('age').mean() data_age1.reset_index(inplace=True) data_age1[data_age1['tenure'] == data_age1['tenure'].max()]
code
18147692/cell_35
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd dataset = pd.read_csv('../input/pseudo_facebook.tsv', sep='\t') dataset.shape data_age1 = dataset.groupby('age').mean() data_age1.reset_index(inplace=True) datacount = dataset.groupby('age').count() datacount = datacount.reset_index() data1000 = datacount[datacount['tenure'] >= 1000] dataset_1k = data_age1.loc[data_age1['age'].isin(data1000['age'])] dataset_1k[dataset_1k['tenure'] == dataset_1k['tenure'].max()] data2000 = datacount[datacount['tenure'] >= 2000] dataset_2k = data_age1.loc[data_age1['age'].isin(data2000['age'])] dataset_2k[dataset_2k['tenure'] == dataset_2k['tenure'].max()] data_age1['mobile_surfing'] = data_age1.mobile_likes + data_age1.mobile_likes_received data_age1['web_surfing'] = data_age1.www_likes + data_age1.www_likes_received data_age1.head()
code
18147692/cell_43
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd dataset = pd.read_csv('../input/pseudo_facebook.tsv', sep='\t') dataset.shape data_age1 = dataset.groupby('age').mean() data_age1.reset_index(inplace=True) datacount = dataset.groupby('age').count() datacount = datacount.reset_index() data1000 = datacount[datacount['tenure'] >= 1000] dataset_1k = data_age1.loc[data_age1['age'].isin(data1000['age'])] dataset_1k[dataset_1k['tenure'] == dataset_1k['tenure'].max()] data2000 = datacount[datacount['tenure'] >= 2000] dataset_2k = data_age1.loc[data_age1['age'].isin(data2000['age'])] dataset_2k[dataset_2k['tenure'] == dataset_2k['tenure'].max()] dataset_2k['mobile_surfing'] = dataset_2k.mobile_likes + dataset_2k.mobile_likes_received dataset_2k['web_surfing'] = dataset_2k.www_likes + dataset_2k.www_likes_received dataset_2k[dataset_2k['mobile_surfing'] == dataset_2k['mobile_surfing'].max()]
code
18147692/cell_24
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd dataset = pd.read_csv('../input/pseudo_facebook.tsv', sep='\t') dataset.shape data_age1 = dataset.groupby('age').mean() data_age1.reset_index(inplace=True) datacount = dataset.groupby('age').count() datacount = datacount.reset_index() data1000 = datacount[datacount['tenure'] >= 1000] dataset_1k = data_age1.loc[data_age1['age'].isin(data1000['age'])] dataset_1k[dataset_1k['tenure'] == dataset_1k['tenure'].max()] data2000 = datacount[datacount['tenure'] >= 2000] dataset_2k = data_age1.loc[data_age1['age'].isin(data2000['age'])] dataset_2k[dataset_2k['tenure'] == dataset_2k['tenure'].max()] data_age1[data_age1['friend_count'] == data_age1['friend_count'].max()]
code
18147692/cell_22
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd dataset = pd.read_csv('../input/pseudo_facebook.tsv', sep='\t') dataset.shape data_age1 = dataset.groupby('age').mean() data_age1.reset_index(inplace=True) datacount = dataset.groupby('age').count() datacount = datacount.reset_index() data1000 = datacount[datacount['tenure'] >= 1000] dataset_1k = data_age1.loc[data_age1['age'].isin(data1000['age'])] dataset_1k[dataset_1k['tenure'] == dataset_1k['tenure'].max()] data2000 = datacount[datacount['tenure'] >= 2000] dataset_2k = data_age1.loc[data_age1['age'].isin(data2000['age'])] dataset_2k[dataset_2k['tenure'] == dataset_2k['tenure'].max()]
code
18147692/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd dataset = pd.read_csv('../input/pseudo_facebook.tsv', sep='\t') dataset.shape data_age1 = dataset.groupby('age').mean() data_age1.reset_index(inplace=True) data_age1.head()
code
18147692/cell_27
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd dataset = pd.read_csv('../input/pseudo_facebook.tsv', sep='\t') dataset.shape data_age1 = dataset.groupby('age').mean() data_age1.reset_index(inplace=True) datacount = dataset.groupby('age').count() datacount = datacount.reset_index() data1000 = datacount[datacount['tenure'] >= 1000] dataset_1k = data_age1.loc[data_age1['age'].isin(data1000['age'])] dataset_1k[dataset_1k['tenure'] == dataset_1k['tenure'].max()] dataset_1k[dataset_1k['friend_count'] == dataset_1k['friend_count'].max()]
code
18147692/cell_36
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd dataset = pd.read_csv('../input/pseudo_facebook.tsv', sep='\t') dataset.shape data_age1 = dataset.groupby('age').mean() data_age1.reset_index(inplace=True) datacount = dataset.groupby('age').count() datacount = datacount.reset_index() data1000 = datacount[datacount['tenure'] >= 1000] dataset_1k = data_age1.loc[data_age1['age'].isin(data1000['age'])] dataset_1k[dataset_1k['tenure'] == dataset_1k['tenure'].max()] data2000 = datacount[datacount['tenure'] >= 2000] dataset_2k = data_age1.loc[data_age1['age'].isin(data2000['age'])] dataset_2k[dataset_2k['tenure'] == dataset_2k['tenure'].max()] data_age1['mobile_surfing'] = data_age1.mobile_likes + data_age1.mobile_likes_received data_age1['web_surfing'] = data_age1.www_likes + data_age1.www_likes_received plt.figure(figsize=(10, 7)) plt.plot('age', 'mobile_surfing', 'bv--', data=data_age1) plt.plot('age', 'web_surfing', 'r*-', data=data_age1) plt.xlabel('Age') plt.ylabel('Mobile and Web Surfing') plt.title('Age Vs Surfing') plt.legend()
code
105203552/cell_21
[ "text_plain_output_5.png", "text_plain_output_4.png", "image_output_5.png", "text_plain_output_6.png", "text_plain_output_3.png", "image_output_4.png", "image_output_6.png", "text_plain_output_2.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train_df.columns train_df[['Sex', 'Survived']].groupby(['Sex'], as_index=False).mean().sort_values(by='Survived', ascending=False)
code
105203552/cell_9
[ "image_output_2.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train_df.columns train_df.info()
code
105203552/cell_23
[ "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train_df.columns train_df[['Parch', 'Survived']].groupby(['Parch'], as_index=False).mean().sort_values(by='Survived', ascending=False)
code
105203552/cell_30
[ "text_html_output_1.png" ]
from collections import Counter import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train_df.columns def detect_outliers(df, features): outlier_indices = [] for c in features: Q1 = np.percentile(df[c], 25) Q3 = np.percentile(df[c], 75) IQR = Q3 - Q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < Q1 - outlier_step) | (df[c] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers train_df.loc[detect_outliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare'])] tarin_df = train_df.drop(detect_outliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare']), axis=0).reset_index(drop=True) train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.head()
code
105203552/cell_33
[ "text_html_output_1.png" ]
from collections import Counter import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train_df.columns def detect_outliers(df, features): outlier_indices = [] for c in features: Q1 = np.percentile(df[c], 25) Q3 = np.percentile(df[c], 75) IQR = Q3 - Q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < Q1 - outlier_step) | (df[c] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers train_df.loc[detect_outliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare'])] tarin_df = train_df.drop(detect_outliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare']), axis=0).reset_index(drop=True) train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()] train_df.isnull().sum()
code
105203552/cell_44
[ "text_html_output_1.png" ]
from collections import Counter import matplotlib.pyplot as plt import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') import seaborn as sns from collections import Counter import warnings warnings.filterwarnings('ignore') import os train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train_df.columns def bar_plot(variable): """ input: variable eg: "Sex" output: bar plot & value count """ var = train_df[variable] varValue = var.value_counts() plt.xticks(varValue.index, varValue.index.values) def plot_hist(variable): pass def detect_outliers(df, features): outlier_indices = [] for c in features: Q1 = np.percentile(df[c], 25) Q3 = np.percentile(df[c], 75) IQR = Q3 - Q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < Q1 - outlier_step) | (df[c] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers train_df.loc[detect_outliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare'])] tarin_df = train_df.drop(detect_outliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare']), axis=0).reset_index(drop=True) train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()] train_df.isnull().sum() sns.factorplot(x='Sex', y='Age', data=train_df, kind='box') plt.show()
code
105203552/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train_df.columns train_df[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean().sort_values(by='Survived', ascending=False)
code
105203552/cell_6
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train_df.columns train_df.tail()
code
105203552/cell_39
[ "text_html_output_1.png" ]
from collections import Counter import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train_df.columns def detect_outliers(df, features): outlier_indices = [] for c in features: Q1 = np.percentile(df[c], 25) Q3 = np.percentile(df[c], 75) IQR = Q3 - Q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < Q1 - outlier_step) | (df[c] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers train_df.loc[detect_outliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare'])] tarin_df = train_df.drop(detect_outliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare']), axis=0).reset_index(drop=True) train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()] train_df.isnull().sum() train_df[train_df['Pclass'] == 3]
code
105203552/cell_26
[ "text_html_output_1.png" ]
from collections import Counter import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train_df.columns def detect_outliers(df, features): outlier_indices = [] for c in features: Q1 = np.percentile(df[c], 25) Q3 = np.percentile(df[c], 75) IQR = Q3 - Q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < Q1 - outlier_step) | (df[c] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers train_df.loc[detect_outliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare'])]
code
105203552/cell_41
[ "text_plain_output_1.png", "image_output_1.png" ]
from collections import Counter import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train_df.columns def detect_outliers(df, features): outlier_indices = [] for c in features: Q1 = np.percentile(df[c], 25) Q3 = np.percentile(df[c], 75) IQR = Q3 - Q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < Q1 - outlier_step) | (df[c] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers train_df.loc[detect_outliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare'])] tarin_df = train_df.drop(detect_outliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare']), axis=0).reset_index(drop=True) train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()] train_df.isnull().sum() train_df[train_df['Fare'].isnull()]
code
105203552/cell_2
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import os import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') import seaborn as sns from collections import Counter import warnings warnings.filterwarnings('ignore') import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
105203552/cell_7
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train_df.columns train_df.describe()
code
105203552/cell_45
[ "text_html_output_1.png" ]
from collections import Counter import matplotlib.pyplot as plt import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') import seaborn as sns from collections import Counter import warnings warnings.filterwarnings('ignore') import os train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train_df.columns def bar_plot(variable): """ input: variable eg: "Sex" output: bar plot & value count """ var = train_df[variable] varValue = var.value_counts() plt.xticks(varValue.index, varValue.index.values) def plot_hist(variable): pass def detect_outliers(df, features): outlier_indices = [] for c in features: Q1 = np.percentile(df[c], 25) Q3 = np.percentile(df[c], 75) IQR = Q3 - Q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < Q1 - outlier_step) | (df[c] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers train_df.loc[detect_outliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare'])] tarin_df = train_df.drop(detect_outliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare']), axis=0).reset_index(drop=True) train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()] train_df.isnull().sum() sns.factorplot(x='Sex', y='Age', hue='Pclass', data=train_df, kind='box') plt.show()
code
105203552/cell_18
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') import seaborn as sns from collections import Counter import warnings warnings.filterwarnings('ignore') import os train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train_df.columns def bar_plot(variable): """ input: variable eg: "Sex" output: bar plot & value count """ var = train_df[variable] varValue = var.value_counts() plt.xticks(varValue.index, varValue.index.values) def plot_hist(variable): pass numericVar = ['Fare', 'Age', 'PassengerId'] for n in numericVar: plot_hist(n)
code
105203552/cell_32
[ "text_html_output_1.png" ]
from collections import Counter import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train_df.columns def detect_outliers(df, features): outlier_indices = [] for c in features: Q1 = np.percentile(df[c], 25) Q3 = np.percentile(df[c], 75) IQR = Q3 - Q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < Q1 - outlier_step) | (df[c] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers train_df.loc[detect_outliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare'])] tarin_df = train_df.drop(detect_outliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare']), axis=0).reset_index(drop=True) train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()]
code
105203552/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train_df.columns category2 = ['Cabin', 'Name', 'Ticket'] for c in category2: print('{} \n'.format(train_df[c].value_counts()))
code
105203552/cell_38
[ "text_plain_output_1.png" ]
from collections import Counter import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train_df.columns def detect_outliers(df, features): outlier_indices = [] for c in features: Q1 = np.percentile(df[c], 25) Q3 = np.percentile(df[c], 75) IQR = Q3 - Q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < Q1 - outlier_step) | (df[c] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers train_df.loc[detect_outliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare'])] tarin_df = train_df.drop(detect_outliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare']), axis=0).reset_index(drop=True) train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()] train_df.isnull().sum() train_df[train_df['Fare'].isnull()]
code
105203552/cell_35
[ "text_html_output_1.png" ]
from collections import Counter import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train_df.columns def detect_outliers(df, features): outlier_indices = [] for c in features: Q1 = np.percentile(df[c], 25) Q3 = np.percentile(df[c], 75) IQR = Q3 - Q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < Q1 - outlier_step) | (df[c] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers train_df.loc[detect_outliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare'])] tarin_df = train_df.drop(detect_outliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare']), axis=0).reset_index(drop=True) train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()] train_df.isnull().sum() train_df[train_df['Embarked'].isnull()]
code
105203552/cell_43
[ "text_html_output_1.png" ]
from collections import Counter import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train_df.columns def detect_outliers(df, features): outlier_indices = [] for c in features: Q1 = np.percentile(df[c], 25) Q3 = np.percentile(df[c], 75) IQR = Q3 - Q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < Q1 - outlier_step) | (df[c] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers train_df.loc[detect_outliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare'])] tarin_df = train_df.drop(detect_outliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare']), axis=0).reset_index(drop=True) train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()] train_df.isnull().sum() train_df[train_df['Age'].isnull()]
code
105203552/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') import seaborn as sns from collections import Counter import warnings warnings.filterwarnings('ignore') import os train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train_df.columns def bar_plot(variable): """ input: variable eg: "Sex" output: bar plot & value count """ var = train_df[variable] varValue = var.value_counts() plt.xticks(varValue.index, varValue.index.values) category1 = ['Survived', 'Sex', 'Pclass', 'Embarked', 'SibSp', 'Parch'] for c in category1: bar_plot(c)
code
105203552/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train_df.columns train_df[['SibSp', 'Survived']].groupby(['SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False)
code
105203552/cell_37
[ "text_plain_output_1.png" ]
from collections import Counter import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train_df.columns def detect_outliers(df, features): outlier_indices = [] for c in features: Q1 = np.percentile(df[c], 25) Q3 = np.percentile(df[c], 75) IQR = Q3 - Q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < Q1 - outlier_step) | (df[c] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers train_df.loc[detect_outliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare'])] tarin_df = train_df.drop(detect_outliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare']), axis=0).reset_index(drop=True) train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()] train_df.isnull().sum() train_df['Embarked'] = train_df['Embarked'].fillna('C') train_df[train_df['Embarked'].isnull()]
code
105203552/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train_df.columns
code
105203552/cell_36
[ "text_html_output_1.png" ]
from collections import Counter import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train_df.columns def detect_outliers(df, features): outlier_indices = [] for c in features: Q1 = np.percentile(df[c], 25) Q3 = np.percentile(df[c], 75) IQR = Q3 - Q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < Q1 - outlier_step) | (df[c] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers train_df.loc[detect_outliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare'])] tarin_df = train_df.drop(detect_outliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare']), axis=0).reset_index(drop=True) train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()] train_df.isnull().sum() train_df.boxplot(column='Fare', by='Embarked')
code
106199562/cell_15
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd headers = ['symboling', 'normalized-losses', 'make', 'fuel-type', 'aspiration', 'num-of-doors', 'body-style', 'drive-wheels', 'engine-location', 'wheel-base', 'length', 'width', 'height', 'curb-weight', 'engine-type', 'num-of-cylinders', 'engine-size', 'fuel-system', 'bore', 'stroke', 'compression-ratio', 'horsepower', 'peak-rpm', 'city-mpg', 'highway-mpg', 'price'] df = pd.read_csv('../input/ucmachinelearning/imports-85.data', names=headers) df.replace('?', np.nan, inplace=True) df.head(5)
code
106199562/cell_12
[ "text_html_output_1.png" ]
import pandas as pd headers = ['symboling', 'normalized-losses', 'make', 'fuel-type', 'aspiration', 'num-of-doors', 'body-style', 'drive-wheels', 'engine-location', 'wheel-base', 'length', 'width', 'height', 'curb-weight', 'engine-type', 'num-of-cylinders', 'engine-size', 'fuel-system', 'bore', 'stroke', 'compression-ratio', 'horsepower', 'peak-rpm', 'city-mpg', 'highway-mpg', 'price'] df = pd.read_csv('../input/ucmachinelearning/imports-85.data', names=headers) df.head(10)
code
16153941/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sn filename = '../input/bike_sharing_hourly.csv' data = pd.read_csv(filename) data.isnull().sum() data.rename(columns={'weathersit': 'weather', 'mnth': 'month', 'hr': 'hour', 'yr': 'year', 'hum': 'humidity', 'cnt': 'count'}, inplace=True) data.dtypes data = data.drop(['instant', 'dteday'], axis=1) data['year'] = data.year.astype('category') data['season'] = data.season.astype('category') data['month'] = data.month.astype('category') data['hour'] = data.hour.astype('category') data['holiday'] = data.holiday.astype('category') data['weekday'] = data.weekday.astype('category') data['workingday'] = data.workingday.astype('category') data['weather'] = data.weather.astype('category') data.dtypes ## Exploratory Data Analysis # Analyzing the change in bike sharing pattern('count' variable in dataset) with categorical variables fig,[ax1,ax2,ax3,ax4,ax5,ax6,ax7,ax8] = plt.subplots(nrows=8, figsize=(15,25)) sn.barplot(x = data['weekday'], y = data['count'],ax = ax1) sn.barplot(x = data['season'], y = data['count'],ax = ax2) sn.barplot(x = data['month'], y = data['count'],ax = ax3) sn.barplot(x = data['holiday'], y = data['count'],ax = ax4) sn.barplot(x = data['hour'], y = data['count'],ax = ax5) sn.barplot(x = data['weather'], y = data['count'],ax = ax6) sn.barplot(x = data['workingday'], y = data['count'],ax = ax7) sn.barplot(x = data['year'], y = data['count'],ax = ax8) # Total bike users(count) is sum of registered and casual users. Need to analyze how they vary individually with hour # The variation is observed in different circumstances to check how those impact the bike users fig,axes = plt.subplots(nrows = 3,ncols = 3, figsize=(25,30)) sn.pointplot(x = 'hour', y = 'registered', hue = 'month',data = data,ax = axes[0][0]) sn.pointplot(x = 'hour', y = 'casual', hue = 'month', data = data,ax = axes[0][1]) sn.pointplot(x = 'hour', y = 'count', hue = 'month', size = 7, data = data,ax = axes[0][2]) sn.pointplot(x = 'hour', y = 'registered', hue = 'season',data = data,ax = axes[1][0]) sn.pointplot(x = 'hour', y = 'casual', hue = 'season', data = data,ax = axes[1][1]) sn.pointplot(x = 'hour', y = 'count', hue = 'season', size = 7, data = data,ax = axes[1][2]) sn.pointplot(x = 'hour', y = 'registered', hue = 'weather',data = data,ax = axes[2][0]) sn.pointplot(x = 'hour', y = 'casual', hue = 'weather', data = data,ax = axes[2][1]) sn.pointplot(x = 'hour', y = 'count', hue = 'weather', size = 7, data = data,ax = axes[2][2]) fig, [ax1, ax2, ax3] = plt.subplots(ncols=3, figsize=(20, 8)) plt.rc('xtick', labelsize=10) plt.rc('ytick', labelsize=10) sn.regplot(x='temp', y='count', data=data, ax=ax1) ax1.set(title='Relation between temperature and count') sn.regplot(x='humidity', y='count', data=data, ax=ax2) ax2.set(title='Relation between humidity and total count') sn.regplot(x='windspeed', y='count', data=data, ax=ax3) ax3.set(title='Relation between Windspeed and total count')
code
16153941/cell_9
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sn filename = '../input/bike_sharing_hourly.csv' data = pd.read_csv(filename) data.isnull().sum() data.rename(columns={'weathersit': 'weather', 'mnth': 'month', 'hr': 'hour', 'yr': 'year', 'hum': 'humidity', 'cnt': 'count'}, inplace=True) data.dtypes data = data.drop(['instant', 'dteday'], axis=1) data['year'] = data.year.astype('category') data['season'] = data.season.astype('category') data['month'] = data.month.astype('category') data['hour'] = data.hour.astype('category') data['holiday'] = data.holiday.astype('category') data['weekday'] = data.weekday.astype('category') data['workingday'] = data.workingday.astype('category') data['weather'] = data.weather.astype('category') data.dtypes fig, [ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8] = plt.subplots(nrows=8, figsize=(15, 25)) sn.barplot(x=data['weekday'], y=data['count'], ax=ax1) sn.barplot(x=data['season'], y=data['count'], ax=ax2) sn.barplot(x=data['month'], y=data['count'], ax=ax3) sn.barplot(x=data['holiday'], y=data['count'], ax=ax4) sn.barplot(x=data['hour'], y=data['count'], ax=ax5) sn.barplot(x=data['weather'], y=data['count'], ax=ax6) sn.barplot(x=data['workingday'], y=data['count'], ax=ax7) sn.barplot(x=data['year'], y=data['count'], ax=ax8)
code
16153941/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd filename = '../input/bike_sharing_hourly.csv' data = pd.read_csv(filename) data.isnull().sum()
code
16153941/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd filename = '../input/bike_sharing_hourly.csv' data = pd.read_csv(filename) data.isnull().sum() data.rename(columns={'weathersit': 'weather', 'mnth': 'month', 'hr': 'hour', 'yr': 'year', 'hum': 'humidity', 'cnt': 'count'}, inplace=True) data.head(2)
code
16153941/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sn filename = '../input/bike_sharing_hourly.csv' data = pd.read_csv(filename) data.isnull().sum() data.rename(columns={'weathersit': 'weather', 'mnth': 'month', 'hr': 'hour', 'yr': 'year', 'hum': 'humidity', 'cnt': 'count'}, inplace=True) data.dtypes data = data.drop(['instant', 'dteday'], axis=1) data['year'] = data.year.astype('category') data['season'] = data.season.astype('category') data['month'] = data.month.astype('category') data['hour'] = data.hour.astype('category') data['holiday'] = data.holiday.astype('category') data['weekday'] = data.weekday.astype('category') data['workingday'] = data.workingday.astype('category') data['weather'] = data.weather.astype('category') data.dtypes ## Exploratory Data Analysis # Analyzing the change in bike sharing pattern('count' variable in dataset) with categorical variables fig,[ax1,ax2,ax3,ax4,ax5,ax6,ax7,ax8] = plt.subplots(nrows=8, figsize=(15,25)) sn.barplot(x = data['weekday'], y = data['count'],ax = ax1) sn.barplot(x = data['season'], y = data['count'],ax = ax2) sn.barplot(x = data['month'], y = data['count'],ax = ax3) sn.barplot(x = data['holiday'], y = data['count'],ax = ax4) sn.barplot(x = data['hour'], y = data['count'],ax = ax5) sn.barplot(x = data['weather'], y = data['count'],ax = ax6) sn.barplot(x = data['workingday'], y = data['count'],ax = ax7) sn.barplot(x = data['year'], y = data['count'],ax = ax8) fig, axes = plt.subplots(nrows=3, ncols=3, figsize=(25, 30)) sn.pointplot(x='hour', y='registered', hue='month', data=data, ax=axes[0][0]) sn.pointplot(x='hour', y='casual', hue='month', data=data, ax=axes[0][1]) sn.pointplot(x='hour', y='count', hue='month', size=7, data=data, ax=axes[0][2]) sn.pointplot(x='hour', y='registered', hue='season', data=data, ax=axes[1][0]) sn.pointplot(x='hour', y='casual', hue='season', data=data, ax=axes[1][1]) sn.pointplot(x='hour', y='count', hue='season', size=7, data=data, ax=axes[1][2]) sn.pointplot(x='hour', y='registered', hue='weather', data=data, ax=axes[2][0]) sn.pointplot(x='hour', y='casual', hue='weather', data=data, ax=axes[2][1]) sn.pointplot(x='hour', y='count', hue='weather', size=7, data=data, ax=axes[2][2])
code
16153941/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd filename = '../input/bike_sharing_hourly.csv' data = pd.read_csv(filename) data.isnull().sum() data.rename(columns={'weathersit': 'weather', 'mnth': 'month', 'hr': 'hour', 'yr': 'year', 'hum': 'humidity', 'cnt': 'count'}, inplace=True) data.dtypes
code
16153941/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd filename = '../input/bike_sharing_hourly.csv' data = pd.read_csv(filename) data.isnull().sum() data.rename(columns={'weathersit': 'weather', 'mnth': 'month', 'hr': 'hour', 'yr': 'year', 'hum': 'humidity', 'cnt': 'count'}, inplace=True) data.dtypes data = data.drop(['instant', 'dteday'], axis=1) data['year'] = data.year.astype('category') data['season'] = data.season.astype('category') data['month'] = data.month.astype('category') data['hour'] = data.hour.astype('category') data['holiday'] = data.holiday.astype('category') data['weekday'] = data.weekday.astype('category') data['workingday'] = data.workingday.astype('category') data['weather'] = data.weather.astype('category') data.dtypes
code
16153941/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd filename = '../input/bike_sharing_hourly.csv' data = pd.read_csv(filename) data.head(2)
code
32067376/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) avocado = pd.read_csv('../input/avocados/Avocado.csv') avocado avocado.index avocado.columns avocado.shape avo_data = avocado.groupby(['year']).Small_Bags.sum() avocado.groupby(['year']).Small_Bags.sum() avocado.groupby(['year']).Small_Bags.mean() avocado.groupby(['year']).Small_Bags.median() avocado.groupby(['year']).Small_Bags.std() avocado.groupby(['year']).Small_Bags.size() avocado.groupby(['year']).Small_Bags.value_counts() new_avo = avocado.groupby('year')['Small_Bags', 'Large_Bags'].sum() new_avo new_avo.loc[2015]
code
32067376/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) avocado = pd.read_csv('../input/avocados/Avocado.csv') avocado avocado.index avocado.columns avocado.shape
code
32067376/cell_25
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) avocado = pd.read_csv('../input/avocados/Avocado.csv') avocado avocado.index avocado.columns avocado.shape avo_data = avocado.groupby(['year']).Small_Bags.sum() avocado.groupby(['year']).Small_Bags.sum() avocado.groupby(['year']).Small_Bags.mean() avocado.groupby(['year']).Small_Bags.median() avocado.groupby(['year']).Small_Bags.std() avocado.groupby(['year']).Small_Bags.size() avocado.groupby(['year']).Small_Bags.value_counts() new_avo = avocado.groupby('year')['Small_Bags', 'Large_Bags'].sum() new_avo avo_sorted_small = avocado.sort_values('Small_Bags', ascending=True) avo_sorted_small avo_sorted_small.sort_index()
code
32067376/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) avocado = pd.read_csv('../input/avocados/Avocado.csv') avocado avocado.head()
code
32067376/cell_26
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) avocado = pd.read_csv('../input/avocados/Avocado.csv') avocado avocado.index avocado.columns avocado.shape avo_data = avocado.groupby(['year']).Small_Bags.sum() avocado.groupby(['year']).Small_Bags.sum() avocado.groupby(['year']).Small_Bags.mean() avocado.groupby(['year']).Small_Bags.median() avocado.groupby(['year']).Small_Bags.std() avocado.groupby(['year']).Small_Bags.size() avocado.groupby(['year']).Small_Bags.value_counts() new_avo = avocado.groupby('year')['Small_Bags', 'Large_Bags'].sum() new_avo avo_sorted_small = avocado.sort_values('Small_Bags', ascending=True) avo_sorted_small avo_sorted_small.sort_index() avo_sorted_small.max()
code
32067376/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) avocado = pd.read_csv('../input/avocados/Avocado.csv') avocado
code
32067376/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) avocado = pd.read_csv('../input/avocados/Avocado.csv') avocado avocado.index avocado.columns avocado.shape avo_data = avocado.groupby(['year']).Small_Bags.sum() avocado.groupby(['year']).Small_Bags.sum()
code
32067376/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) avocado = pd.read_csv('../input/avocados/Avocado.csv') avocado avocado.index avocado.columns avocado.shape avo_data = avocado.groupby(['year']).Small_Bags.sum() avocado.groupby(['year']).Small_Bags.sum() avocado.groupby(['year']).Small_Bags.mean() avocado.groupby(['year']).Small_Bags.median() avocado.groupby(['year']).Small_Bags.std() avocado.groupby(['year']).Small_Bags.size() avocado.groupby(['year']).Small_Bags.value_counts() new_avo = avocado.groupby('year')['Small_Bags', 'Large_Bags'].sum() new_avo
code
32067376/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32067376/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) avocado = pd.read_csv('../input/avocados/Avocado.csv') avocado avocado.index
code
32067376/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) avocado = pd.read_csv('../input/avocados/Avocado.csv') avocado avocado.index avocado.columns avocado.shape avo_data = avocado.groupby(['year']).Small_Bags.sum() avocado.groupby(['year']).Small_Bags.sum() avocado.groupby(['year']).Small_Bags.mean() avocado.groupby(['year']).Small_Bags.median() avocado.groupby(['year']).Small_Bags.std() avocado.groupby(['year']).Small_Bags.size() avocado.groupby(['year']).Small_Bags.value_counts()
code
32067376/cell_28
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) avocado = pd.read_csv('../input/avocados/Avocado.csv') avocado avocado.index avocado.columns avocado.shape avo_data = avocado.groupby(['year']).Small_Bags.sum() avocado.groupby(['year']).Small_Bags.sum() avocado.groupby(['year']).Small_Bags.mean() avocado.groupby(['year']).Small_Bags.median() avocado.groupby(['year']).Small_Bags.std() avocado.groupby(['year']).Small_Bags.size() avocado.groupby(['year']).Small_Bags.value_counts() new_avo = avocado.groupby('year')['Small_Bags', 'Large_Bags'].sum() new_avo avo_sorted_small = avocado.sort_values('Small_Bags', ascending=True) avo_sorted_small avocado.groupby('year')['Small_Bags'].sum().plot.bar()
code
32067376/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) avocado = pd.read_csv('../input/avocados/Avocado.csv') avocado avocado.index avocado.columns
code
32067376/cell_15
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) avocado = pd.read_csv('../input/avocados/Avocado.csv') avocado avocado.index avocado.columns avocado.shape avo_data = avocado.groupby(['year']).Small_Bags.sum() avocado.groupby(['year']).Small_Bags.sum() avocado.groupby(['year']).Small_Bags.mean() avocado.groupby(['year']).Small_Bags.median()
code
32067376/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) avocado = pd.read_csv('../input/avocados/Avocado.csv') avocado avocado.index avocado.columns avocado.shape avo_data = avocado.groupby(['year']).Small_Bags.sum() avocado.groupby(['year']).Small_Bags.sum() avocado.groupby(['year']).Small_Bags.mean() avocado.groupby(['year']).Small_Bags.median() avocado.groupby(['year']).Small_Bags.std()
code
32067376/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) avocado = pd.read_csv('../input/avocados/Avocado.csv') avocado avocado.index avocado.columns avocado.shape avo_data = avocado.groupby(['year']).Small_Bags.sum() avocado.groupby(['year']).Small_Bags.sum() avocado.groupby(['year']).Small_Bags.mean() avocado.groupby(['year']).Small_Bags.median() avocado.groupby(['year']).Small_Bags.std() avocado.groupby(['year']).Small_Bags.size()
code
32067376/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) avocado = pd.read_csv('../input/avocados/Avocado.csv') avocado avocado.index avocado.columns avocado.shape avo_data = avocado.groupby(['year']).Small_Bags.sum() avocado.groupby(['year']).Small_Bags.sum() avocado.groupby(['year']).Small_Bags.mean() avocado.groupby(['year']).Small_Bags.median() avocado.groupby(['year']).Small_Bags.std() avocado.groupby(['year']).Small_Bags.size() avocado.groupby(['year']).Small_Bags.value_counts() new_avo = avocado.groupby('year')['Small_Bags', 'Large_Bags'].sum() new_avo avo_sorted_small = avocado.sort_values('Small_Bags', ascending=True) avo_sorted_small
code
32067376/cell_14
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) avocado = pd.read_csv('../input/avocados/Avocado.csv') avocado avocado.index avocado.columns avocado.shape avo_data = avocado.groupby(['year']).Small_Bags.sum() avocado.groupby(['year']).Small_Bags.sum() avocado.groupby(['year']).Small_Bags.mean()
code
32067376/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) avocado = pd.read_csv('../input/avocados/Avocado.csv') avocado avocado.index avocado.columns avocado.shape avo_data = avocado.groupby(['year']).Small_Bags.sum() avocado.groupby(['year']).Small_Bags.sum() avocado.groupby(['year']).Small_Bags.mean() avocado.groupby(['year']).Small_Bags.median() avocado.groupby(['year']).Small_Bags.std() avocado.groupby(['year']).Small_Bags.size() avocado.groupby(['year']).Small_Bags.value_counts() new_avo = avocado.groupby('year')['Small_Bags', 'Large_Bags'].sum() new_avo new_avo.loc[2015] new_avo.iloc[1, 1]
code
32067376/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) avocado = pd.read_csv('../input/avocados/Avocado.csv') avocado avocado.tail(3)
code
72104176/cell_21
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_vals = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/train_values.csv') train_labels = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/train_labels.csv') train_vals.dtypes.value_counts() train_df = train_vals.merge(train_labels, on='building_id') corr = train_df.corr()['damage_grade'].sort_values(ascending=False)[1:] corr = corr[abs(corr.values) > 0.01] cats = [] for column in train_df.columns: if train_df[column].dtype == 'object': cats.append(column) for cat in cats: train_df[cat] = train_df[cat].astype('category') len(cats) from sklearn.preprocessing import LabelEncoder lb_make = LabelEncoder() for cat in cats: train_df[cat] = lb_make.fit_transform(train_df[cat]) test_vals = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/test_values.csv') for cat in cats: test_vals[cat] = lb_make.fit_transform(test_vals[cat]) test_vals['Predicted_damage_grade'].value_counts()
code
72104176/cell_13
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_vals = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/train_values.csv') train_labels = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/train_labels.csv') train_vals.dtypes.value_counts() train_df = train_vals.merge(train_labels, on='building_id') numericals = ['count_floors_pre_eq', 'height_percentage', 'area_percentage', 'count_families'] fig, axes = plt.subplots(2,2, figsize = (10,10)) for i, feature in enumerate(numericals): sns.histplot(data = train_df, x = feature, ax = axes[i%2, i//2], color = 'purple') plt.show() corr = train_df.corr()['damage_grade'].sort_values(ascending=False)[1:] corr = corr[abs(corr.values) > 0.01] plt.xticks(rotation=90) cats = [] for column in train_df.columns: if train_df[column].dtype == 'object': cats.append(column) for cat in cats: train_df[cat] = train_df[cat].astype('category') len(cats) sns.set_palette('colorblind') fig, axes = plt.subplots(4, 2, figsize=(20, 20), sharex=False) for i, name in enumerate(cats): sns.countplot(data=train_df, x=name, ax=axes[i % 4, i // 4]) plt.show()
code
72104176/cell_9
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_vals = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/train_values.csv') train_labels = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/train_labels.csv') train_vals.dtypes.value_counts() train_df = train_vals.merge(train_labels, on='building_id') numericals = ['count_floors_pre_eq', 'height_percentage', 'area_percentage', 'count_families'] fig, axes = plt.subplots(2, 2, figsize=(10, 10)) for i, feature in enumerate(numericals): sns.histplot(data=train_df, x=feature, ax=axes[i % 2, i // 2], color='purple') plt.show()
code
72104176/cell_4
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_vals = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/train_values.csv') train_labels = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/train_labels.csv') print(train_vals.head()) print(train_labels.head())
code
72104176/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_vals = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/train_values.csv') train_labels = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/train_labels.csv') train_vals.info()
code
72104176/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_vals = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/train_values.csv') train_labels = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/train_labels.csv') train_vals.dtypes.value_counts() train_df = train_vals.merge(train_labels, on='building_id') numericals = ['count_floors_pre_eq', 'height_percentage', 'area_percentage', 'count_families'] fig, axes = plt.subplots(2,2, figsize = (10,10)) for i, feature in enumerate(numericals): sns.histplot(data = train_df, x = feature, ax = axes[i%2, i//2], color = 'purple') plt.show() corr = train_df.corr()['damage_grade'].sort_values(ascending=False)[1:] corr = corr[abs(corr.values) > 0.01] plt.figure(figsize=(10, 10)) sns.barplot(x=corr.index, y=corr, color='teal') plt.xticks(rotation=90) plt.show()
code
72104176/cell_19
[ "image_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_vals = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/train_values.csv') train_labels = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/train_labels.csv') train_vals.dtypes.value_counts() train_df = train_vals.merge(train_labels, on='building_id') corr = train_df.corr()['damage_grade'].sort_values(ascending=False)[1:] corr = corr[abs(corr.values) > 0.01] cats = [] for column in train_df.columns: if train_df[column].dtype == 'object': cats.append(column) for cat in cats: train_df[cat] = train_df[cat].astype('category') len(cats) from sklearn.preprocessing import LabelEncoder lb_make = LabelEncoder() for cat in cats: train_df[cat] = lb_make.fit_transform(train_df[cat]) test_vals = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/test_values.csv') for cat in cats: test_vals[cat] = lb_make.fit_transform(test_vals[cat]) test_vals.head()
code
72104176/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
72104176/cell_7
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_vals = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/train_values.csv') train_labels = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/train_labels.csv') train_vals.dtypes.value_counts()
code
72104176/cell_18
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_vals = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/train_values.csv') train_labels = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/train_labels.csv') test_vals = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/test_values.csv') test_vals.head()
code
72104176/cell_8
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_vals = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/train_values.csv') train_labels = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/train_labels.csv') train_vals.dtypes.value_counts() train_df = train_vals.merge(train_labels, on='building_id') train_df.head()
code
72104176/cell_15
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_vals = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/train_values.csv') train_labels = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/train_labels.csv') train_vals.dtypes.value_counts() train_df = train_vals.merge(train_labels, on='building_id') corr = train_df.corr()['damage_grade'].sort_values(ascending=False)[1:] corr = corr[abs(corr.values) > 0.01] cats = [] for column in train_df.columns: if train_df[column].dtype == 'object': cats.append(column) for cat in cats: train_df[cat] = train_df[cat].astype('category') len(cats) from sklearn.preprocessing import LabelEncoder lb_make = LabelEncoder() for cat in cats: train_df[cat] = lb_make.fit_transform(train_df[cat]) train_df.head()
code
72104176/cell_16
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_vals = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/train_values.csv') train_labels = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/train_labels.csv') train_vals.dtypes.value_counts() train_df = train_vals.merge(train_labels, on='building_id') corr = train_df.corr()['damage_grade'].sort_values(ascending=False)[1:] corr = corr[abs(corr.values) > 0.01] cats = [] for column in train_df.columns: if train_df[column].dtype == 'object': cats.append(column) for cat in cats: train_df[cat] = train_df[cat].astype('category') len(cats) from sklearn.preprocessing import LabelEncoder lb_make = LabelEncoder() for cat in cats: train_df[cat] = lb_make.fit_transform(train_df[cat]) train_df['damage_grade'].value_counts()
code
72104176/cell_3
[ "image_output_1.png" ]
import matplotlib import matplotlib for cname in matplotlib.colors.cnames: print(cname)
code
72104176/cell_17
[ "text_html_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import LabelEncoder import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_vals = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/train_values.csv') train_labels = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/train_labels.csv') train_vals.dtypes.value_counts() train_df = train_vals.merge(train_labels, on='building_id') corr = train_df.corr()['damage_grade'].sort_values(ascending=False)[1:] corr = corr[abs(corr.values) > 0.01] cats = [] for column in train_df.columns: if train_df[column].dtype == 'object': cats.append(column) for cat in cats: train_df[cat] = train_df[cat].astype('category') len(cats) from sklearn.preprocessing import LabelEncoder lb_make = LabelEncoder() for cat in cats: train_df[cat] = lb_make.fit_transform(train_df[cat]) logreg = LogisticRegression() X = train_df.drop('damage_grade', axis=1) y = train_df['damage_grade'] logreg.fit(X, y)
code
72104176/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_vals = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/train_values.csv') train_labels = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/train_labels.csv') train_vals.dtypes.value_counts() train_df = train_vals.merge(train_labels, on='building_id') corr = train_df.corr()['damage_grade'].sort_values(ascending=False)[1:] corr = corr[abs(corr.values) > 0.01] cats = [] for column in train_df.columns: if train_df[column].dtype == 'object': cats.append(column) for cat in cats: train_df[cat] = train_df[cat].astype('category') len(cats)
code
72104176/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_vals = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/train_values.csv') train_labels = pd.read_csv('../input/richters-predictor-modeling-earthquake-damage/train_labels.csv') train_vals.head()
code
128034496/cell_4
[ "image_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv('/kaggle/input/business-decision-research/data_retail.csv', sep=';') print('Lima data teratas:') print(df.head()) print('\nInfo dataset:') print(df.info())
code
128034496/cell_6
[ "image_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv('/kaggle/input/business-decision-research/data_retail.csv', sep=';') df['First_Transaction'] = pd.to_datetime(df['First_Transaction'] / 1000, unit='s', origin='1970-01-01') df['Last_Transaction'] = pd.to_datetime(df['Last_Transaction'] / 1000, unit='s', origin='1970-01-01') print('Lima data teratas:') print(df.head()) print('\nInfo dataset:') print(df.info())
code
128034496/cell_18
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv('/kaggle/input/business-decision-research/data_retail.csv', sep=';') df['First_Transaction'] = pd.to_datetime(df['First_Transaction'] / 1000, unit='s', origin='1970-01-01') df['Last_Transaction'] = pd.to_datetime(df['Last_Transaction'] / 1000, unit='s', origin='1970-01-01') df.loc[df['Last_Transaction'] <= '2018-08-01', 'is_churn'] = True df.loc[df['Last_Transaction'] > '2018-08-01', 'is_churn'] = False del df['no'] del df['Row_Num'] df['Year_First_Transaction'] = df['First_Transaction'].dt.year df['Year_Last_Transaction'] = df['Last_Transaction'].dt.year df_year = df.groupby(['Year_First_Transaction'])['Customer_ID'].count() plt.tight_layout() plt.clf() df_year = df.groupby(['Year_First_Transaction'])['Count_Transaction'].sum() plt.tight_layout() plt.tight_layout() df_piv = df.pivot_table(index='is_churn', columns='Product', values='Customer_ID', aggfunc='count', fill_value=0) plot_product = df_piv.count().sort_values(ascending=False).head(5).index df_piv = df_piv.reindex(columns=plot_product) df_piv.plot.pie(subplots=True, figsize=(10, 7), layout=(-1, 2), autopct='%1.0f%%', title='Proportion Churn by Product') plt.tight_layout() plt.show()
code
128034496/cell_8
[ "image_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv('/kaggle/input/business-decision-research/data_retail.csv', sep=';') df['First_Transaction'] = pd.to_datetime(df['First_Transaction'] / 1000, unit='s', origin='1970-01-01') df['Last_Transaction'] = pd.to_datetime(df['Last_Transaction'] / 1000, unit='s', origin='1970-01-01') print(max(df['Last_Transaction'])) df.loc[df['Last_Transaction'] <= '2018-08-01', 'is_churn'] = True df.loc[df['Last_Transaction'] > '2018-08-01', 'is_churn'] = False print('Lima data teratas:') print(df.head()) print('\nInfo dataset:') print(df.info())
code
128034496/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv('/kaggle/input/business-decision-research/data_retail.csv', sep=';') df['First_Transaction'] = pd.to_datetime(df['First_Transaction'] / 1000, unit='s', origin='1970-01-01') df['Last_Transaction'] = pd.to_datetime(df['Last_Transaction'] / 1000, unit='s', origin='1970-01-01') df.loc[df['Last_Transaction'] <= '2018-08-01', 'is_churn'] = True df.loc[df['Last_Transaction'] > '2018-08-01', 'is_churn'] = False del df['no'] del df['Row_Num'] df['Year_First_Transaction'] = df['First_Transaction'].dt.year df['Year_Last_Transaction'] = df['Last_Transaction'].dt.year df_year = df.groupby(['Year_First_Transaction'])['Customer_ID'].count() plt.tight_layout() plt.clf() df_year = df.groupby(['Year_First_Transaction'])['Count_Transaction'].sum() plt.tight_layout() sns.pointplot(data=df.groupby(['Product', 'Year_First_Transaction']).mean(numeric_only=True).reset_index(), x='Year_First_Transaction', y='Average_Transaction_Amount', hue='Product') plt.tight_layout() plt.show()
code
128034496/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv('/kaggle/input/business-decision-research/data_retail.csv', sep=';') df['First_Transaction'] = pd.to_datetime(df['First_Transaction'] / 1000, unit='s', origin='1970-01-01') df['Last_Transaction'] = pd.to_datetime(df['Last_Transaction'] / 1000, unit='s', origin='1970-01-01') df.loc[df['Last_Transaction'] <= '2018-08-01', 'is_churn'] = True df.loc[df['Last_Transaction'] > '2018-08-01', 'is_churn'] = False del df['no'] del df['Row_Num'] df['Year_First_Transaction'] = df['First_Transaction'].dt.year df['Year_Last_Transaction'] = df['Last_Transaction'].dt.year df_year = df.groupby(['Year_First_Transaction'])['Customer_ID'].count() plt.tight_layout() plt.clf() df_year = df.groupby(['Year_First_Transaction'])['Count_Transaction'].sum() df_year.plot(x='Year_First_Transaction', y='Count_Transaction', kind='bar', title='Graph of Transaction Customer') plt.xlabel('Year_First_Transaction') plt.ylabel('Num_of_Transaction') plt.tight_layout() plt.show()
code
128034496/cell_10
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv('/kaggle/input/business-decision-research/data_retail.csv', sep=';') df['First_Transaction'] = pd.to_datetime(df['First_Transaction'] / 1000, unit='s', origin='1970-01-01') df['Last_Transaction'] = pd.to_datetime(df['Last_Transaction'] / 1000, unit='s', origin='1970-01-01') df.loc[df['Last_Transaction'] <= '2018-08-01', 'is_churn'] = True df.loc[df['Last_Transaction'] > '2018-08-01', 'is_churn'] = False del df['no'] del df['Row_Num'] print(df.head())
code
128034496/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv('/kaggle/input/business-decision-research/data_retail.csv', sep=';') df['First_Transaction'] = pd.to_datetime(df['First_Transaction'] / 1000, unit='s', origin='1970-01-01') df['Last_Transaction'] = pd.to_datetime(df['Last_Transaction'] / 1000, unit='s', origin='1970-01-01') df.loc[df['Last_Transaction'] <= '2018-08-01', 'is_churn'] = True df.loc[df['Last_Transaction'] > '2018-08-01', 'is_churn'] = False del df['no'] del df['Row_Num'] df['Year_First_Transaction'] = df['First_Transaction'].dt.year df['Year_Last_Transaction'] = df['Last_Transaction'].dt.year df_year = df.groupby(['Year_First_Transaction'])['Customer_ID'].count() df_year.plot(x='Year_First_Transaction', y='Customer_ID', kind='bar', title='Graph of Customer Acquisition') plt.xlabel('Year_First_Transaction') plt.ylabel('Num_of_Customer') plt.tight_layout() plt.show()
code
130012258/cell_25
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd df = pd.read_csv('/kaggle/input/midjourney-v5-prompts-and-links/MJ_Part3.csv') df df = df.drop(['AuthorID', 'Author', 'Date', 'Reactions', 'Attachments'], axis=1) df df = df.dropna() df df = df[df['Content'] != 'No Match'] df from sklearn.feature_extraction.text import CountVectorizer cv = CountVectorizer(max_features=5000, stop_words='english') vectors = cv.fit_transform(df['Content']).toarray() vectors cv.get_feature_names_out()
code
130012258/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd df = pd.read_csv('/kaggle/input/midjourney-v5-prompts-and-links/MJ_Part3.csv') df
code
130012258/cell_23
[ "text_html_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd df = pd.read_csv('/kaggle/input/midjourney-v5-prompts-and-links/MJ_Part3.csv') df df = df.drop(['AuthorID', 'Author', 'Date', 'Reactions', 'Attachments'], axis=1) df df = df.dropna() df df = df[df['Content'] != 'No Match'] df from sklearn.feature_extraction.text import CountVectorizer cv = CountVectorizer(max_features=5000, stop_words='english') vectors = cv.fit_transform(df['Content']).toarray() vectors
code
130012258/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd df = pd.read_csv('/kaggle/input/midjourney-v5-prompts-and-links/MJ_Part3.csv') df df = df.drop(['AuthorID', 'Author', 'Date', 'Reactions', 'Attachments'], axis=1) df
code
130012258/cell_19
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd df = pd.read_csv('/kaggle/input/midjourney-v5-prompts-and-links/MJ_Part3.csv') df df = df.drop(['AuthorID', 'Author', 'Date', 'Reactions', 'Attachments'], axis=1) df df = df.dropna() df df = df[df['Content'] != 'No Match'] df df
code
130012258/cell_18
[ "text_html_output_1.png" ]
from nltk.stem.porter import PorterStemmer import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd df = pd.read_csv('/kaggle/input/midjourney-v5-prompts-and-links/MJ_Part3.csv') df df = df.drop(['AuthorID', 'Author', 'Date', 'Reactions', 'Attachments'], axis=1) df df = df.dropna() df df = df[df['Content'] != 'No Match'] df import nltk from nltk.stem.porter import PorterStemmer ps = PorterStemmer() def stem(text): y = [] for i in text.split(): y.append(ps.stem(i)) return ' '.join(y) df['Content'] = df['Content'].apply(stem)
code