path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
129035264/cell_16
[ "text_plain_output_1.png" ]
import missingno as msno import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') train.shape train.duplicated().sum() women = train.loc[train.Sex == 'female']['Survived'] women_sur_rate = sum(women) / len(women) men = train.loc[train.Sex == 'male']['Survived'] men_sur_rate = sum(men) / len(men) import missingno as msno msno.matrix(train) train.drop(['PassengerId', 'Ticket', 'Cabin', 'Name'], axis=1, inplace=True) msno.matrix(train) train.Age.fillna(train.Age.mean(), inplace=True) msno.matrix(train) train.dropna(inplace=True) msno.matrix(train)
code
129035264/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') train.head()
code
129035264/cell_14
[ "text_plain_output_1.png" ]
import missingno as msno import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') train.shape train.duplicated().sum() women = train.loc[train.Sex == 'female']['Survived'] women_sur_rate = sum(women) / len(women) men = train.loc[train.Sex == 'male']['Survived'] men_sur_rate = sum(men) / len(men) import missingno as msno msno.matrix(train) train.drop(['PassengerId', 'Ticket', 'Cabin', 'Name'], axis=1, inplace=True) msno.matrix(train)
code
129035264/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') train.shape train.duplicated().sum()
code
129035264/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') train.shape train.duplicated().sum() women = train.loc[train.Sex == 'female']['Survived'] women_sur_rate = sum(women) / len(women) print('% of women who survived:', women_sur_rate) men = train.loc[train.Sex == 'male']['Survived'] men_sur_rate = sum(men) / len(men) print('% of men who survived:', men_sur_rate)
code
104116914/cell_13
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/titanic/train.csv') df1 = pd.read_csv('../input/titanic/train.csv') df['Age'] = df['Age'].fillna(df['Age'].median()) plt.figure(figsize=(20, 3)) sns.countplot(y='Survived', data=df)
code
104116914/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df1 = pd.read_csv('../input/titanic/train.csv') df['Age'] = df['Age'].fillna(df['Age'].median()) df.head(10)
code
104116914/cell_25
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/titanic/train.csv') df1 = pd.read_csv('../input/titanic/train.csv') df['Age'] = df['Age'].fillna(df['Age'].median()) df.dtypes df['Sex'].value_counts() fig = plt.figure(figsize=(10, 10)) sns.distplot(df.loc[df['Survived'] == 1]['Sex']) sns.distplot(df.loc[df['Survived'] == 0]['Sex'])
code
104116914/cell_4
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df1 = pd.read_csv('../input/titanic/train.csv') df.head(10)
code
104116914/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df1 = pd.read_csv('../input/titanic/train.csv') df['Age'] = df['Age'].fillna(df['Age'].median()) df.dtypes df['Sex'].value_counts() l2 = df['Name'] l2_num = [] l5 = [] for i in l2: l3 = i.split(',') s = l3[-1] l4 = s.split('.') s1 = l4[0] l5.append(s1) if s1 == ' Mr': l2_num.append(1) elif s1 == ' Miss': l2_num.append(2) elif s1 == ' Mrs': l2_num.append(3) elif s1 == ' Master': l2_num.append(4) else: l2_num.append(5) df['Title'] = l5 df['Title_num'] = l2_num print(df['Title_num'].value_counts()) print(df['Title'].value_counts())
code
104116914/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df1 = pd.read_csv('../input/titanic/train.csv') df['Age'] = df['Age'].fillna(df['Age'].median()) df.dtypes df.head()
code
104116914/cell_6
[ "text_plain_output_1.png" ]
import missingno import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df1 = pd.read_csv('../input/titanic/train.csv') missingno.matrix(df, figsize=(20, 5))
code
104116914/cell_29
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/titanic/train.csv') df1 = pd.read_csv('../input/titanic/train.csv') df['Age'] = df['Age'].fillna(df['Age'].median()) df.dtypes df['Sex'].value_counts() fig=plt.figure(figsize=(10,10)) sns.distplot(df.loc[df['Survived']==1]['Sex']) sns.distplot(df.loc[df['Survived']==0]['Sex']) fig = plt.figure(figsize=(20, 10)) plt.subplot(1, 2, 1) sns.countplot(y='Fare', data=df) plt.subplot(1, 2, 2) sns.distplot(df1.loc[df['Survived'] == 1]['Fare']) sns.distplot(df1.loc[df['Survived'] == 0]['Fare']) sns.distplot()
code
104116914/cell_26
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/titanic/train.csv') df1 = pd.read_csv('../input/titanic/train.csv') df['Age'] = df['Age'].fillna(df['Age'].median()) df.dtypes df['Sex'].value_counts() fig=plt.figure(figsize=(10,10)) sns.distplot(df.loc[df['Survived']==1]['Sex']) sns.distplot(df.loc[df['Survived']==0]['Sex']) df['Ticket'].value_counts(sort=True, ascending=False)
code
104116914/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
104116914/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df1 = pd.read_csv('../input/titanic/train.csv') df['Age'] = df['Age'].fillna(df['Age'].median()) df.head(10)
code
104116914/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df1 = pd.read_csv('../input/titanic/train.csv') df['Age'] = df['Age'].fillna(df['Age'].median()) df.dtypes df['Embarked'].value_counts()
code
104116914/cell_7
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df1 = pd.read_csv('../input/titanic/train.csv') df['Age'] = df['Age'].fillna(df['Age'].median()) df.head(10)
code
104116914/cell_18
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df1 = pd.read_csv('../input/titanic/train.csv') df['Age'] = df['Age'].fillna(df['Age'].median()) df.dtypes l = df['Embarked'] l_num = [] for i in l: if i == 'S': l_num.append(1) elif i == 'C': l_num.append(2) else: l_num.append(3) df['Embarked'] = l_num df['Embarked'].head()
code
104116914/cell_28
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/titanic/train.csv') df1 = pd.read_csv('../input/titanic/train.csv') df['Age'] = df['Age'].fillna(df['Age'].median()) df.dtypes df['Sex'].value_counts() fig=plt.figure(figsize=(10,10)) sns.distplot(df.loc[df['Survived']==1]['Sex']) sns.distplot(df.loc[df['Survived']==0]['Sex']) df.head()
code
104116914/cell_8
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import missingno import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df1 = pd.read_csv('../input/titanic/train.csv') missingno.matrix(df, figsize=(20, 5)) df['Age'] = df['Age'].fillna(df['Age'].median()) missingno.matrix(df, figsize=(20, 5))
code
104116914/cell_15
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df1 = pd.read_csv('../input/titanic/train.csv') df['Age'] = df['Age'].fillna(df['Age'].median()) df.dtypes
code
104116914/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df1 = pd.read_csv('../input/titanic/train.csv') df['Age'] = df['Age'].fillna(df['Age'].median()) df.dtypes df['Age'].value_counts()
code
104116914/cell_17
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df1 = pd.read_csv('../input/titanic/train.csv') df['Age'] = df['Age'].fillna(df['Age'].median()) df.dtypes df['Embarked'].value_counts()
code
104116914/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df1 = pd.read_csv('../input/titanic/train.csv') df['Age'] = df['Age'].fillna(df['Age'].median()) df.dtypes df['Sex'].value_counts() df.tail(20)
code
104116914/cell_14
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/titanic/train.csv') df1 = pd.read_csv('../input/titanic/train.csv') df['Age'] = df['Age'].fillna(df['Age'].median()) plt.figure(figsize=(20, 3)) sns.countplot(y='Sex', data=df)
code
104116914/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df1 = pd.read_csv('../input/titanic/train.csv') df['Age'] = df['Age'].fillna(df['Age'].median()) df.dtypes df['Sex'].value_counts() df.head(10)
code
104116914/cell_12
[ "text_html_output_1.png" ]
import missingno import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df1 = pd.read_csv('../input/titanic/train.csv') missingno.matrix(df, figsize=(20, 5)) df['Age'] = df['Age'].fillna(df['Age'].median()) missingno.matrix(df, figsize=(20, 5)) missingno.matrix(df, figsize=(20, 2))
code
104116914/cell_5
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/titanic/train.csv') df1 = pd.read_csv('../input/titanic/train.csv') df.describe()
code
48167508/cell_13
[ "text_html_output_2.png", "text_html_output_1.png" ]
from datetime import timedelta import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_cs import seaborn as sns TARGET = 'Is_CANCELLED' NA_CATEGORY = 'NA_' train = pd.read_csv('/kaggle/input/techcom-ai-competition/train.csv') test = pd.read_csv('/kaggle/input/techcom-ai-competition/test.csv') sample_sub = pd.read_csv('/kaggle/input/techcom-ai-competition/sample_submission.csv') train.rename(columns={'MEMBERSHIP_STATUS': 'Is_CANCELLED'}, inplace=True) train[TARGET] = train[TARGET].map({'INFORCE': 0, 'CANCELLED': 1}) test['END_DATE'] = np.nan def add_date_features(df: pd.DataFrame): df['START_DATE'] = pd.to_datetime(df['START_DATE'], format='%Y%m%d') df['START_YEAR'] = df['START_DATE'].dt.year df['START_MONTH'] = df['START_DATE'].dt.month df['START_YM'] = df['START_YEAR'] * 100 + df['START_MONTH'] df['END_DATE'] = pd.to_datetime(df['END_DATE'], format='%Y%m%d') df['END_YEAR'] = df['END_DATE'].dt.year df['END_MONTH'] = df['END_DATE'].dt.month df['END_YM'] = df['END_YEAR'] * 100 + df['END_MONTH'] df['N_DAYS'] = (df['END_DATE'] - df['START_DATE']) / timedelta(days=1) return df train = add_date_features(train) test = add_date_features(test) years = list(range(2006, 2014)) plt.figure(figsize=(5, 4.5)) ax1 = sns.lineplot(x='START_YEAR', y='MEMBERSHIP_NUMBER', data=train.groupby('START_YEAR')['MEMBERSHIP_NUMBER'].count().reset_index(), label='START_YEAR') plt.ylabel('Join') ax2 = ax1.twinx() ax2 = sns.lineplot(x='END_YEAR', y='MEMBERSHIP_NUMBER', data=train.groupby('END_YEAR')['MEMBERSHIP_NUMBER'].count().reset_index(), ax=ax2, label='END_YEAR', color='orange', legend=False) plt.ylabel('Churn') plt.xlabel('START/END_YEAR') handler1, label1 = ax1.get_legend_handles_labels() handler2, label2 = ax2.get_legend_handles_labels() ax1.legend(handler1 + handler2, label1 + label2, loc=2, borderaxespad=0.0, bbox_to_anchor=(1.0, 1.05), frameon=False) plt.suptitle('Number of rows by year fields') for year_field in ('START_YEAR', 'END_YEAR'): display(train.groupby(year_field)['MEMBERSHIP_NUMBER'].count().sort_index().reset_index().rename(columns={'MEMBERSHIP_NUMBER': 'N_ROWS'}))
code
48167508/cell_20
[ "image_output_1.png" ]
from datetime import timedelta import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_cs import seaborn as sns TARGET = 'Is_CANCELLED' NA_CATEGORY = 'NA_' train = pd.read_csv('/kaggle/input/techcom-ai-competition/train.csv') test = pd.read_csv('/kaggle/input/techcom-ai-competition/test.csv') sample_sub = pd.read_csv('/kaggle/input/techcom-ai-competition/sample_submission.csv') train.rename(columns={'MEMBERSHIP_STATUS': 'Is_CANCELLED'}, inplace=True) train[TARGET] = train[TARGET].map({'INFORCE': 0, 'CANCELLED': 1}) test['END_DATE'] = np.nan def add_date_features(df: pd.DataFrame): df['START_DATE'] = pd.to_datetime(df['START_DATE'], format='%Y%m%d') df['START_YEAR'] = df['START_DATE'].dt.year df['START_MONTH'] = df['START_DATE'].dt.month df['START_YM'] = df['START_YEAR'] * 100 + df['START_MONTH'] df['END_DATE'] = pd.to_datetime(df['END_DATE'], format='%Y%m%d') df['END_YEAR'] = df['END_DATE'].dt.year df['END_MONTH'] = df['END_DATE'].dt.month df['END_YM'] = df['END_YEAR'] * 100 + df['END_MONTH'] df['N_DAYS'] = (df['END_DATE'] - df['START_DATE']) / timedelta(days=1) return df train = add_date_features(train) test = add_date_features(test) years = list(range(2006, 2014)) plt.figure(figsize=(5, 4.5)) ax1 = sns.lineplot(x='START_YEAR', y='MEMBERSHIP_NUMBER', data=train.groupby('START_YEAR')['MEMBERSHIP_NUMBER'].count().reset_index(), label='START_YEAR') plt.ylabel('Join') ax2 = ax1.twinx() ax2 = sns.lineplot(x='END_YEAR', y='MEMBERSHIP_NUMBER', data=train.groupby('END_YEAR')['MEMBERSHIP_NUMBER'].count().reset_index(), ax=ax2, label='END_YEAR', color='orange', legend=False) plt.ylabel('Churn') plt.xlabel('START/END_YEAR') handler1, label1 = ax1.get_legend_handles_labels() handler2, label2 = ax2.get_legend_handles_labels() ax1.legend(handler1 + handler2, label1 + label2, loc=2, borderaxespad=0., bbox_to_anchor=(1.0, 1.05), frameon=False) plt.suptitle('Number of rows by year fields') for year_field in ('START_YEAR', 'END_YEAR'): display(train.groupby(year_field)['MEMBERSHIP_NUMBER'] \ .count() \ .sort_index() \ .reset_index() \ .rename(columns={'MEMBERSHIP_NUMBER': 'N_ROWS'})) df_cancelled = train.query(f'{TARGET} == 1') # 辞めた会員は何年に入会したのか? plt.figure(figsize=(16.5, 8)) # years = np.sort(train.query(f'{TARGET} == 1').END_YEAR.unique()) for i, year in enumerate(years): plt.subplot(2, 4, i + 1) ax = sns.countplot(x='START_YEAR', data=train.query(f'END_YEAR == {year}'), order=[y for y in years if y <= year]) ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0) plt.xlabel('') plt.ylabel('') plt.title(f'END_YEAR = {year}') plt.ylim((0, 550)) plt.suptitle('When cancelled members join the club? ') display(pd.pivot_table(index='START_YEAR', columns='END_YEAR', values=TARGET, data=train, aggfunc=np.sum, fill_value=0, margins=True).astype(int)) sns.countplot(x='START_YEAR', hue='PAYMENT_MODE', data=train) plt.legend(bbox_to_anchor=(1, 1), loc='upper left', frameon=False) plt.title('Number of records by PAYMENT_MODE', fontsize=14)
code
48167508/cell_18
[ "text_html_output_2.png", "text_html_output_1.png", "image_output_1.png" ]
from datetime import timedelta import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_cs import seaborn as sns TARGET = 'Is_CANCELLED' NA_CATEGORY = 'NA_' train = pd.read_csv('/kaggle/input/techcom-ai-competition/train.csv') test = pd.read_csv('/kaggle/input/techcom-ai-competition/test.csv') sample_sub = pd.read_csv('/kaggle/input/techcom-ai-competition/sample_submission.csv') train.rename(columns={'MEMBERSHIP_STATUS': 'Is_CANCELLED'}, inplace=True) train[TARGET] = train[TARGET].map({'INFORCE': 0, 'CANCELLED': 1}) test['END_DATE'] = np.nan def add_date_features(df: pd.DataFrame): df['START_DATE'] = pd.to_datetime(df['START_DATE'], format='%Y%m%d') df['START_YEAR'] = df['START_DATE'].dt.year df['START_MONTH'] = df['START_DATE'].dt.month df['START_YM'] = df['START_YEAR'] * 100 + df['START_MONTH'] df['END_DATE'] = pd.to_datetime(df['END_DATE'], format='%Y%m%d') df['END_YEAR'] = df['END_DATE'].dt.year df['END_MONTH'] = df['END_DATE'].dt.month df['END_YM'] = df['END_YEAR'] * 100 + df['END_MONTH'] df['N_DAYS'] = (df['END_DATE'] - df['START_DATE']) / timedelta(days=1) return df train = add_date_features(train) test = add_date_features(test) years = list(range(2006, 2014)) plt.figure(figsize=(5, 4.5)) ax1 = sns.lineplot(x='START_YEAR', y='MEMBERSHIP_NUMBER', data=train.groupby('START_YEAR')['MEMBERSHIP_NUMBER'].count().reset_index(), label='START_YEAR') plt.ylabel('Join') ax2 = ax1.twinx() ax2 = sns.lineplot(x='END_YEAR', y='MEMBERSHIP_NUMBER', data=train.groupby('END_YEAR')['MEMBERSHIP_NUMBER'].count().reset_index(), ax=ax2, label='END_YEAR', color='orange', legend=False) plt.ylabel('Churn') plt.xlabel('START/END_YEAR') handler1, label1 = ax1.get_legend_handles_labels() handler2, label2 = ax2.get_legend_handles_labels() ax1.legend(handler1 + handler2, label1 + label2, loc=2, borderaxespad=0., bbox_to_anchor=(1.0, 1.05), frameon=False) plt.suptitle('Number of rows by year fields') for year_field in ('START_YEAR', 'END_YEAR'): display(train.groupby(year_field)['MEMBERSHIP_NUMBER'] \ .count() \ .sort_index() \ .reset_index() \ .rename(columns={'MEMBERSHIP_NUMBER': 'N_ROWS'})) df_cancelled = train.query(f'{TARGET} == 1') plt.figure(figsize=(16.5, 8)) for i, year in enumerate(years): plt.subplot(2, 4, i + 1) ax = sns.countplot(x='START_YEAR', data=train.query(f'END_YEAR == {year}'), order=[y for y in years if y <= year]) ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0) plt.xlabel('') plt.ylabel('') plt.title(f'END_YEAR = {year}') plt.ylim((0, 550)) plt.suptitle('When cancelled members join the club? ') display(pd.pivot_table(index='START_YEAR', columns='END_YEAR', values=TARGET, data=train, aggfunc=np.sum, fill_value=0, margins=True).astype(int))
code
48167508/cell_8
[ "text_html_output_1.png", "image_output_1.png" ]
from datetime import timedelta import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_cs TARGET = 'Is_CANCELLED' NA_CATEGORY = 'NA_' train = pd.read_csv('/kaggle/input/techcom-ai-competition/train.csv') test = pd.read_csv('/kaggle/input/techcom-ai-competition/test.csv') sample_sub = pd.read_csv('/kaggle/input/techcom-ai-competition/sample_submission.csv') train.rename(columns={'MEMBERSHIP_STATUS': 'Is_CANCELLED'}, inplace=True) train[TARGET] = train[TARGET].map({'INFORCE': 0, 'CANCELLED': 1}) test['END_DATE'] = np.nan def add_date_features(df: pd.DataFrame): df['START_DATE'] = pd.to_datetime(df['START_DATE'], format='%Y%m%d') df['START_YEAR'] = df['START_DATE'].dt.year df['START_MONTH'] = df['START_DATE'].dt.month df['START_YM'] = df['START_YEAR'] * 100 + df['START_MONTH'] df['END_DATE'] = pd.to_datetime(df['END_DATE'], format='%Y%m%d') df['END_YEAR'] = df['END_DATE'].dt.year df['END_MONTH'] = df['END_DATE'].dt.month df['END_YM'] = df['END_YEAR'] * 100 + df['END_MONTH'] df['N_DAYS'] = (df['END_DATE'] - df['START_DATE']) / timedelta(days=1) return df train = add_date_features(train) test = add_date_features(test) display(train[['START_DATE', 'START_YEAR', 'START_MONTH', 'START_YM', 'END_DATE', 'END_YEAR', 'END_MONTH', 'END_YM', 'N_DAYS', TARGET]]) display(test[['START_DATE', 'START_YEAR', 'START_MONTH', 'START_YM', 'END_DATE', 'END_YEAR', 'END_MONTH', 'END_YM']])
code
48167508/cell_16
[ "text_plain_output_1.png" ]
from datetime import timedelta import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_cs import seaborn as sns TARGET = 'Is_CANCELLED' NA_CATEGORY = 'NA_' train = pd.read_csv('/kaggle/input/techcom-ai-competition/train.csv') test = pd.read_csv('/kaggle/input/techcom-ai-competition/test.csv') sample_sub = pd.read_csv('/kaggle/input/techcom-ai-competition/sample_submission.csv') train.rename(columns={'MEMBERSHIP_STATUS': 'Is_CANCELLED'}, inplace=True) train[TARGET] = train[TARGET].map({'INFORCE': 0, 'CANCELLED': 1}) test['END_DATE'] = np.nan def add_date_features(df: pd.DataFrame): df['START_DATE'] = pd.to_datetime(df['START_DATE'], format='%Y%m%d') df['START_YEAR'] = df['START_DATE'].dt.year df['START_MONTH'] = df['START_DATE'].dt.month df['START_YM'] = df['START_YEAR'] * 100 + df['START_MONTH'] df['END_DATE'] = pd.to_datetime(df['END_DATE'], format='%Y%m%d') df['END_YEAR'] = df['END_DATE'].dt.year df['END_MONTH'] = df['END_DATE'].dt.month df['END_YM'] = df['END_YEAR'] * 100 + df['END_MONTH'] df['N_DAYS'] = (df['END_DATE'] - df['START_DATE']) / timedelta(days=1) return df train = add_date_features(train) test = add_date_features(test) years = list(range(2006, 2014)) plt.figure(figsize=(5, 4.5)) ax1 = sns.lineplot(x='START_YEAR', y='MEMBERSHIP_NUMBER', data=train.groupby('START_YEAR')['MEMBERSHIP_NUMBER'].count().reset_index(), label='START_YEAR') plt.ylabel('Join') ax2 = ax1.twinx() ax2 = sns.lineplot(x='END_YEAR', y='MEMBERSHIP_NUMBER', data=train.groupby('END_YEAR')['MEMBERSHIP_NUMBER'].count().reset_index(), ax=ax2, label='END_YEAR', color='orange', legend=False) plt.ylabel('Churn') plt.xlabel('START/END_YEAR') handler1, label1 = ax1.get_legend_handles_labels() handler2, label2 = ax2.get_legend_handles_labels() ax1.legend(handler1 + handler2, label1 + label2, loc=2, borderaxespad=0., bbox_to_anchor=(1.0, 1.05), frameon=False) plt.suptitle('Number of rows by year fields') for year_field in ('START_YEAR', 'END_YEAR'): display(train.groupby(year_field)['MEMBERSHIP_NUMBER'] \ .count() \ .sort_index() \ .reset_index() \ .rename(columns={'MEMBERSHIP_NUMBER': 'N_ROWS'})) df_cancelled = train.query(f'{TARGET} == 1') plt.figure(figsize=(12, 5)) sns.boxplot(x='START_YEAR', y='N_DAYS', data=df_cancelled) plt.title('N_DAYS by START_YEAR')
code
48167508/cell_10
[ "text_html_output_2.png", "text_html_output_1.png", "text_html_output_3.png" ]
from datetime import timedelta import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_cs TARGET = 'Is_CANCELLED' NA_CATEGORY = 'NA_' train = pd.read_csv('/kaggle/input/techcom-ai-competition/train.csv') test = pd.read_csv('/kaggle/input/techcom-ai-competition/test.csv') sample_sub = pd.read_csv('/kaggle/input/techcom-ai-competition/sample_submission.csv') train.rename(columns={'MEMBERSHIP_STATUS': 'Is_CANCELLED'}, inplace=True) train[TARGET] = train[TARGET].map({'INFORCE': 0, 'CANCELLED': 1}) test['END_DATE'] = np.nan def add_date_features(df: pd.DataFrame): df['START_DATE'] = pd.to_datetime(df['START_DATE'], format='%Y%m%d') df['START_YEAR'] = df['START_DATE'].dt.year df['START_MONTH'] = df['START_DATE'].dt.month df['START_YM'] = df['START_YEAR'] * 100 + df['START_MONTH'] df['END_DATE'] = pd.to_datetime(df['END_DATE'], format='%Y%m%d') df['END_YEAR'] = df['END_DATE'].dt.year df['END_MONTH'] = df['END_DATE'].dt.month df['END_YM'] = df['END_YEAR'] * 100 + df['END_MONTH'] df['N_DAYS'] = (df['END_DATE'] - df['START_DATE']) / timedelta(days=1) return df train = add_date_features(train) test = add_date_features(test) print(f'データの採取期間...\ntrain.csv:\n START_DATE:\n From. {train.START_DATE.min()} To. {train.START_DATE.max()}\n END_DATE:\n From. {train.END_DATE.min()} To. {train.END_DATE.max()}\ntest.csv\n START_DATE:\n From. {test.START_DATE.min()} To. {test.START_DATE.max()}')
code
48167508/cell_5
[ "image_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_cs TARGET = 'Is_CANCELLED' NA_CATEGORY = 'NA_' train = pd.read_csv('/kaggle/input/techcom-ai-competition/train.csv') test = pd.read_csv('/kaggle/input/techcom-ai-competition/test.csv') sample_sub = pd.read_csv('/kaggle/input/techcom-ai-competition/sample_submission.csv') train.rename(columns={'MEMBERSHIP_STATUS': 'Is_CANCELLED'}, inplace=True) train[TARGET] = train[TARGET].map({'INFORCE': 0, 'CANCELLED': 1}) test['END_DATE'] = np.nan display(train) display(test) display(sample_sub)
code
2041206/cell_42
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/winemag-data_first150k.csv') df = data.dropna(subset=['description']) # drop all NaNs df_sorted = df.sort_values(by='points', ascending=True) # sort by points num_of_wines = df_sorted.shape[0] # number of wines worst = df_sorted.head(int(0.25*num_of_wines)) # 25 % of worst wines listed best = df_sorted.tail(int(0.25*num_of_wines)) # 25 % of best wines listed best_sorted = best.sort_values(by='price', ascending=True) # sort by points num_best = best.shape[0] # number of wines cheapestngood = best_sorted.head(int(0.25*num_of_wines)) cheapngoodest = cheapestngood.sort_values(by = 'points', ascending = False) topareas = cheapestngood['region_1'].value_counts().head(10) topareas
code
2041206/cell_21
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer import operator import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/winemag-data_first150k.csv') df = data.dropna(subset=['description']) # drop all NaNs df_sorted = df.sort_values(by='points', ascending=True) # sort by points num_of_wines = df_sorted.shape[0] # number of wines worst = df_sorted.head(int(0.25*num_of_wines)) # 25 % of worst wines listed best = df_sorted.tail(int(0.25*num_of_wines)) # 25 % of best wines listed from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer(stop_words='english', analyzer='word') X1 = vectorizer.fit_transform(best['description']) idf = vectorizer.idf_ goodlist = vectorizer.vocabulary_ X = vectorizer.fit_transform(worst['description']) idf = vectorizer.idf_ not_so_good_list = vectorizer.vocabulary_ import operator sorted_good = sorted(goodlist.items(), key=operator.itemgetter(0)) sorted_bad = sorted(not_so_good_list.items(), key=operator.itemgetter(1), reverse=True) sorted_good
code
2041206/cell_9
[ "text_plain_output_1.png" ]
import matplotlib.mlab as mlab import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/winemag-data_first150k.csv') import matplotlib.mlab as mlab num_bins = 20 n, bins, patches = plt.hist(data['points'], num_bins, normed=1, facecolor='blue', alpha=0.5) plt.title('Distribution of Wine Scores') plt.xlabel('Score out of 100') plt.ylabel('Frequency') mu = 88 # mean of distribution sigma = 3 # standard deviation of distribution y = mlab.normpdf(bins, mu, sigma) # create the y line plt.plot(bins, y, 'r--') plt.scatter(data['points'], data['price'])
code
2041206/cell_33
[ "text_plain_output_1.png" ]
from sklearn import linear_model from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import mean_squared_error, r2_score from sklearn.preprocessing import LabelEncoder import matplotlib.mlab as mlab import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/winemag-data_first150k.csv') import matplotlib.mlab as mlab num_bins = 20 n, bins, patches = plt.hist(data['points'], num_bins, normed=1, facecolor='blue', alpha=0.5) plt.title('Distribution of Wine Scores') plt.xlabel('Score out of 100') plt.ylabel('Frequency') mu = 88 # mean of distribution sigma = 3 # standard deviation of distribution y = mlab.normpdf(bins, mu, sigma) # create the y line plt.plot(bins, y, 'r--') df = data.dropna(subset=['description']) # drop all NaNs df_sorted = df.sort_values(by='points', ascending=True) # sort by points num_of_wines = df_sorted.shape[0] # number of wines worst = df_sorted.head(int(0.25*num_of_wines)) # 25 % of worst wines listed best = df_sorted.tail(int(0.25*num_of_wines)) # 25 % of best wines listed from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer(stop_words='english', analyzer='word') X1 = vectorizer.fit_transform(best['description']) idf = vectorizer.idf_ goodlist = vectorizer.vocabulary_ X = vectorizer.fit_transform(worst['description']) idf = vectorizer.idf_ not_so_good_list = vectorizer.vocabulary_ from sklearn.preprocessing import LabelEncoder le = LabelEncoder() y = le.fit_transform(best['variety']) from sklearn.linear_model import LogisticRegression clf = LogisticRegression() clf.fit(x_train, y_train) pred = clf.predict(x_test) reg = linear_model.Ridge(alpha=0.5, solver='sag') y = data['points'] x = vectorizer.fit_transform(data['description']) x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=32) reg.fit(x_train, y_train) pred = reg.predict(x_test) r2_score(y_test, pred)
code
2041206/cell_44
[ "text_html_output_1.png" ]
from sklearn import linear_model from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.preprocessing import LabelEncoder import matplotlib.mlab as mlab import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/winemag-data_first150k.csv') import matplotlib.mlab as mlab num_bins = 20 n, bins, patches = plt.hist(data['points'], num_bins, normed=1, facecolor='blue', alpha=0.5) plt.title('Distribution of Wine Scores') plt.xlabel('Score out of 100') plt.ylabel('Frequency') mu = 88 # mean of distribution sigma = 3 # standard deviation of distribution y = mlab.normpdf(bins, mu, sigma) # create the y line plt.plot(bins, y, 'r--') df = data.dropna(subset=['description']) # drop all NaNs df_sorted = df.sort_values(by='points', ascending=True) # sort by points num_of_wines = df_sorted.shape[0] # number of wines worst = df_sorted.head(int(0.25*num_of_wines)) # 25 % of worst wines listed best = df_sorted.tail(int(0.25*num_of_wines)) # 25 % of best wines listed from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer(stop_words='english', analyzer='word') X1 = vectorizer.fit_transform(best['description']) idf = vectorizer.idf_ goodlist = vectorizer.vocabulary_ X = vectorizer.fit_transform(worst['description']) idf = vectorizer.idf_ not_so_good_list = vectorizer.vocabulary_ from sklearn.preprocessing import LabelEncoder le = LabelEncoder() y = le.fit_transform(best['variety']) reg = linear_model.Ridge(alpha=0.5, solver='sag') y = data['points'] x = vectorizer.fit_transform(data['description']) x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=32) reg.fit(x_train, y_train) import pandas as pd import numpy as np from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer import nltk from sklearn.metrics.pairwise import cosine_similarity from sklearn.model_selection import cross_val_score from sklearn.metrics.pairwise import euclidean_distances pd.set_option('display.max_colwidth', 1500) vectorizer = TfidfVectorizer(stop_words='english', binary=False, max_df=0.95, min_df=0.15, ngram_range=(1, 2), use_idf=False, norm=None) doc_vectors = vectorizer.fit_transform(data['description']) print(doc_vectors.shape) print(vectorizer.get_feature_names())
code
2041206/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer import operator import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/winemag-data_first150k.csv') df = data.dropna(subset=['description']) # drop all NaNs df_sorted = df.sort_values(by='points', ascending=True) # sort by points num_of_wines = df_sorted.shape[0] # number of wines worst = df_sorted.head(int(0.25*num_of_wines)) # 25 % of worst wines listed best = df_sorted.tail(int(0.25*num_of_wines)) # 25 % of best wines listed from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer(stop_words='english', analyzer='word') X1 = vectorizer.fit_transform(best['description']) idf = vectorizer.idf_ goodlist = vectorizer.vocabulary_ X = vectorizer.fit_transform(worst['description']) idf = vectorizer.idf_ not_so_good_list = vectorizer.vocabulary_ import operator sorted_good = sorted(goodlist.items(), key=operator.itemgetter(0)) sorted_bad = sorted(not_so_good_list.items(), key=operator.itemgetter(1), reverse=True) sorted_bad
code
2041206/cell_39
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/winemag-data_first150k.csv') df = data.dropna(subset=['description']) # drop all NaNs df_sorted = df.sort_values(by='points', ascending=True) # sort by points num_of_wines = df_sorted.shape[0] # number of wines worst = df_sorted.head(int(0.25*num_of_wines)) # 25 % of worst wines listed best = df_sorted.tail(int(0.25*num_of_wines)) # 25 % of best wines listed best_sorted = best.sort_values(by='price', ascending=True) # sort by points num_best = best.shape[0] # number of wines cheapestngood = best_sorted.head(int(0.25*num_of_wines)) cheapngoodest = cheapestngood.sort_values(by = 'points', ascending = False) cheapestngood['region_1'].value_counts()
code
2041206/cell_26
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.linear_model import LogisticRegression clf = LogisticRegression() clf.fit(x_train, y_train) pred = clf.predict(x_test) accuracy_score(y_test, pred)
code
2041206/cell_48
[ "text_plain_output_1.png" ]
from sklearn import linear_model from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity from sklearn.preprocessing import LabelEncoder import matplotlib.mlab as mlab import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/winemag-data_first150k.csv') import matplotlib.mlab as mlab num_bins = 20 n, bins, patches = plt.hist(data['points'], num_bins, normed=1, facecolor='blue', alpha=0.5) plt.title('Distribution of Wine Scores') plt.xlabel('Score out of 100') plt.ylabel('Frequency') mu = 88 # mean of distribution sigma = 3 # standard deviation of distribution y = mlab.normpdf(bins, mu, sigma) # create the y line plt.plot(bins, y, 'r--') df = data.dropna(subset=['description']) # drop all NaNs df_sorted = df.sort_values(by='points', ascending=True) # sort by points num_of_wines = df_sorted.shape[0] # number of wines worst = df_sorted.head(int(0.25*num_of_wines)) # 25 % of worst wines listed best = df_sorted.tail(int(0.25*num_of_wines)) # 25 % of best wines listed from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer(stop_words='english', analyzer='word') X1 = vectorizer.fit_transform(best['description']) idf = vectorizer.idf_ goodlist = vectorizer.vocabulary_ X = vectorizer.fit_transform(worst['description']) idf = vectorizer.idf_ not_so_good_list = vectorizer.vocabulary_ from sklearn.preprocessing import LabelEncoder le = LabelEncoder() y = le.fit_transform(best['variety']) reg = linear_model.Ridge(alpha=0.5, solver='sag') y = data['points'] x = vectorizer.fit_transform(data['description']) x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=32) reg.fit(x_train, y_train) import pandas as pd import numpy as np from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer import nltk from sklearn.metrics.pairwise import cosine_similarity from sklearn.model_selection import cross_val_score from sklearn.metrics.pairwise import euclidean_distances pd.set_option('display.max_colwidth', 1500) vectorizer = TfidfVectorizer(stop_words='english', binary=False, max_df=0.95, min_df=0.15, ngram_range=(1, 2), use_idf=False, norm=None) doc_vectors = vectorizer.fit_transform(data['description']) def comp_description(query, results_number=20): results = [] q_vector = vectorizer.transform([query]) results.append(cosine_similarity(q_vector, doc_vectors.toarray())) f = 0 elem_list = [] for i in results[:10]: for elem in i[0]: elem_list.append(elem) f += 1 comp_description('This wine highlights how the power of Lake County’s Red Hills seamlessly compliments the elegance and aromatic freshness of the High Valley. Aromas of plum, allspice and clove develop into flavors of fresh dark cherry and cedar on the palate. The Red Hills’ fine tannins provide a smoothly textured palate sensation from start to finish. Fresh acidity from the High Valley culminates in a bright finish of cherry with a gentle note of French oak.')
code
2041206/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2041206/cell_50
[ "text_plain_output_1.png" ]
from sklearn import linear_model from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity from sklearn.preprocessing import LabelEncoder import matplotlib.mlab as mlab import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/winemag-data_first150k.csv') import matplotlib.mlab as mlab num_bins = 20 n, bins, patches = plt.hist(data['points'], num_bins, normed=1, facecolor='blue', alpha=0.5) plt.title('Distribution of Wine Scores') plt.xlabel('Score out of 100') plt.ylabel('Frequency') mu = 88 # mean of distribution sigma = 3 # standard deviation of distribution y = mlab.normpdf(bins, mu, sigma) # create the y line plt.plot(bins, y, 'r--') df = data.dropna(subset=['description']) # drop all NaNs df_sorted = df.sort_values(by='points', ascending=True) # sort by points num_of_wines = df_sorted.shape[0] # number of wines worst = df_sorted.head(int(0.25*num_of_wines)) # 25 % of worst wines listed best = df_sorted.tail(int(0.25*num_of_wines)) # 25 % of best wines listed from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer(stop_words='english', analyzer='word') X1 = vectorizer.fit_transform(best['description']) idf = vectorizer.idf_ goodlist = vectorizer.vocabulary_ X = vectorizer.fit_transform(worst['description']) idf = vectorizer.idf_ not_so_good_list = vectorizer.vocabulary_ from sklearn.preprocessing import LabelEncoder le = LabelEncoder() y = le.fit_transform(best['variety']) reg = linear_model.Ridge(alpha=0.5, solver='sag') y = data['points'] x = vectorizer.fit_transform(data['description']) x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=32) reg.fit(x_train, y_train) import pandas as pd import numpy as np from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer import nltk from sklearn.metrics.pairwise import cosine_similarity from sklearn.model_selection import cross_val_score from sklearn.metrics.pairwise import euclidean_distances pd.set_option('display.max_colwidth', 1500) vectorizer = TfidfVectorizer(stop_words='english', binary=False, max_df=0.95, min_df=0.15, ngram_range=(1, 2), use_idf=False, norm=None) doc_vectors = vectorizer.fit_transform(data['description']) def comp_description(query, results_number=20): results = [] q_vector = vectorizer.transform([query]) results.append(cosine_similarity(q_vector, doc_vectors.toarray())) f = 0 elem_list = [] for i in results[:10]: for elem in i[0]: elem_list.append(elem) f += 1 comp_description('This wine is very bad, do not drink.')
code
2041206/cell_7
[ "text_plain_output_1.png" ]
import matplotlib.mlab as mlab import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/winemag-data_first150k.csv') import matplotlib.mlab as mlab num_bins = 20 n, bins, patches = plt.hist(data['points'], num_bins, normed=1, facecolor='blue', alpha=0.5) plt.title('Distribution of Wine Scores') plt.xlabel('Score out of 100') plt.ylabel('Frequency') mu = 88 sigma = 3 y = mlab.normpdf(bins, mu, sigma) plt.plot(bins, y, 'r--')
code
2041206/cell_49
[ "text_plain_output_1.png" ]
from sklearn import linear_model from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity from sklearn.preprocessing import LabelEncoder import matplotlib.mlab as mlab import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/winemag-data_first150k.csv') import matplotlib.mlab as mlab num_bins = 20 n, bins, patches = plt.hist(data['points'], num_bins, normed=1, facecolor='blue', alpha=0.5) plt.title('Distribution of Wine Scores') plt.xlabel('Score out of 100') plt.ylabel('Frequency') mu = 88 # mean of distribution sigma = 3 # standard deviation of distribution y = mlab.normpdf(bins, mu, sigma) # create the y line plt.plot(bins, y, 'r--') df = data.dropna(subset=['description']) # drop all NaNs df_sorted = df.sort_values(by='points', ascending=True) # sort by points num_of_wines = df_sorted.shape[0] # number of wines worst = df_sorted.head(int(0.25*num_of_wines)) # 25 % of worst wines listed best = df_sorted.tail(int(0.25*num_of_wines)) # 25 % of best wines listed from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer(stop_words='english', analyzer='word') X1 = vectorizer.fit_transform(best['description']) idf = vectorizer.idf_ goodlist = vectorizer.vocabulary_ X = vectorizer.fit_transform(worst['description']) idf = vectorizer.idf_ not_so_good_list = vectorizer.vocabulary_ from sklearn.preprocessing import LabelEncoder le = LabelEncoder() y = le.fit_transform(best['variety']) reg = linear_model.Ridge(alpha=0.5, solver='sag') y = data['points'] x = vectorizer.fit_transform(data['description']) x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=32) reg.fit(x_train, y_train) import pandas as pd import numpy as np from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer import nltk from sklearn.metrics.pairwise import cosine_similarity from sklearn.model_selection import cross_val_score from sklearn.metrics.pairwise import euclidean_distances pd.set_option('display.max_colwidth', 1500) vectorizer = TfidfVectorizer(stop_words='english', binary=False, max_df=0.95, min_df=0.15, ngram_range=(1, 2), use_idf=False, norm=None) doc_vectors = vectorizer.fit_transform(data['description']) def comp_description(query, results_number=20): results = [] q_vector = vectorizer.transform([query]) results.append(cosine_similarity(q_vector, doc_vectors.toarray())) f = 0 elem_list = [] for i in results[:10]: for elem in i[0]: elem_list.append(elem) f += 1 comp_description('On the nose are those awful love-heart candies, but the palate is nothing but Nesquik strawberry powder. This alcoholic Powerade is what gives box wine a bad name. Pair with BBQ chicken')
code
2041206/cell_15
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/winemag-data_first150k.csv') df = data.dropna(subset=['description']) # drop all NaNs df_sorted = df.sort_values(by='points', ascending=True) # sort by points num_of_wines = df_sorted.shape[0] # number of wines worst = df_sorted.head(int(0.25*num_of_wines)) # 25 % of worst wines listed best = df_sorted.tail(int(0.25*num_of_wines)) # 25 % of best wines listed from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer(stop_words='english', analyzer='word') X1 = vectorizer.fit_transform(best['description']) idf = vectorizer.idf_ goodlist = vectorizer.vocabulary_ goodlist
code
2041206/cell_38
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/winemag-data_first150k.csv') df = data.dropna(subset=['description']) # drop all NaNs df_sorted = df.sort_values(by='points', ascending=True) # sort by points num_of_wines = df_sorted.shape[0] # number of wines worst = df_sorted.head(int(0.25*num_of_wines)) # 25 % of worst wines listed best = df_sorted.tail(int(0.25*num_of_wines)) # 25 % of best wines listed best_sorted = best.sort_values(by='price', ascending=True) # sort by points num_best = best.shape[0] # number of wines cheapestngood = best_sorted.head(int(0.25*num_of_wines)) cheapngoodest = cheapestngood.sort_values(by = 'points', ascending = False) cheapngoodest.head(10)
code
2041206/cell_47
[ "text_plain_output_1.png" ]
from sklearn import linear_model from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity from sklearn.preprocessing import LabelEncoder import matplotlib.mlab as mlab import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/winemag-data_first150k.csv') import matplotlib.mlab as mlab num_bins = 20 n, bins, patches = plt.hist(data['points'], num_bins, normed=1, facecolor='blue', alpha=0.5) plt.title('Distribution of Wine Scores') plt.xlabel('Score out of 100') plt.ylabel('Frequency') mu = 88 # mean of distribution sigma = 3 # standard deviation of distribution y = mlab.normpdf(bins, mu, sigma) # create the y line plt.plot(bins, y, 'r--') df = data.dropna(subset=['description']) # drop all NaNs df_sorted = df.sort_values(by='points', ascending=True) # sort by points num_of_wines = df_sorted.shape[0] # number of wines worst = df_sorted.head(int(0.25*num_of_wines)) # 25 % of worst wines listed best = df_sorted.tail(int(0.25*num_of_wines)) # 25 % of best wines listed from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer(stop_words='english', analyzer='word') X1 = vectorizer.fit_transform(best['description']) idf = vectorizer.idf_ goodlist = vectorizer.vocabulary_ X = vectorizer.fit_transform(worst['description']) idf = vectorizer.idf_ not_so_good_list = vectorizer.vocabulary_ from sklearn.preprocessing import LabelEncoder le = LabelEncoder() y = le.fit_transform(best['variety']) reg = linear_model.Ridge(alpha=0.5, solver='sag') y = data['points'] x = vectorizer.fit_transform(data['description']) x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=32) reg.fit(x_train, y_train) import pandas as pd import numpy as np from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer import nltk from sklearn.metrics.pairwise import cosine_similarity from sklearn.model_selection import cross_val_score from sklearn.metrics.pairwise import euclidean_distances pd.set_option('display.max_colwidth', 1500) vectorizer = TfidfVectorizer(stop_words='english', binary=False, max_df=0.95, min_df=0.15, ngram_range=(1, 2), use_idf=False, norm=None) doc_vectors = vectorizer.fit_transform(data['description']) def comp_description(query, results_number=20): results = [] q_vector = vectorizer.transform([query]) results.append(cosine_similarity(q_vector, doc_vectors.toarray())) f = 0 elem_list = [] for i in results[:10]: for elem in i[0]: elem_list.append(elem) f += 1 comp_description('Delicate pink hue with strawberry flavors; easy to drink and very refreshing. Perfect with lighter foods. Serve chilled.')
code
2041206/cell_17
[ "text_html_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/winemag-data_first150k.csv') df = data.dropna(subset=['description']) # drop all NaNs df_sorted = df.sort_values(by='points', ascending=True) # sort by points num_of_wines = df_sorted.shape[0] # number of wines worst = df_sorted.head(int(0.25*num_of_wines)) # 25 % of worst wines listed best = df_sorted.tail(int(0.25*num_of_wines)) # 25 % of best wines listed from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer(stop_words='english', analyzer='word') X1 = vectorizer.fit_transform(best['description']) idf = vectorizer.idf_ goodlist = vectorizer.vocabulary_ X = vectorizer.fit_transform(worst['description']) idf = vectorizer.idf_ not_so_good_list = vectorizer.vocabulary_ not_so_good_list
code
2041206/cell_31
[ "text_plain_output_1.png" ]
from sklearn import linear_model from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.preprocessing import LabelEncoder import matplotlib.mlab as mlab import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/winemag-data_first150k.csv') import matplotlib.mlab as mlab num_bins = 20 n, bins, patches = plt.hist(data['points'], num_bins, normed=1, facecolor='blue', alpha=0.5) plt.title('Distribution of Wine Scores') plt.xlabel('Score out of 100') plt.ylabel('Frequency') mu = 88 # mean of distribution sigma = 3 # standard deviation of distribution y = mlab.normpdf(bins, mu, sigma) # create the y line plt.plot(bins, y, 'r--') df = data.dropna(subset=['description']) # drop all NaNs df_sorted = df.sort_values(by='points', ascending=True) # sort by points num_of_wines = df_sorted.shape[0] # number of wines worst = df_sorted.head(int(0.25*num_of_wines)) # 25 % of worst wines listed best = df_sorted.tail(int(0.25*num_of_wines)) # 25 % of best wines listed from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer(stop_words='english', analyzer='word') X1 = vectorizer.fit_transform(best['description']) idf = vectorizer.idf_ goodlist = vectorizer.vocabulary_ X = vectorizer.fit_transform(worst['description']) idf = vectorizer.idf_ not_so_good_list = vectorizer.vocabulary_ from sklearn.preprocessing import LabelEncoder le = LabelEncoder() y = le.fit_transform(best['variety']) reg = linear_model.Ridge(alpha=0.5, solver='sag') y = data['points'] x = vectorizer.fit_transform(data['description']) x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=32) reg.fit(x_train, y_train)
code
2041206/cell_46
[ "text_plain_output_1.png" ]
from sklearn import linear_model from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity from sklearn.preprocessing import LabelEncoder import matplotlib.mlab as mlab import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/winemag-data_first150k.csv') import matplotlib.mlab as mlab num_bins = 20 n, bins, patches = plt.hist(data['points'], num_bins, normed=1, facecolor='blue', alpha=0.5) plt.title('Distribution of Wine Scores') plt.xlabel('Score out of 100') plt.ylabel('Frequency') mu = 88 # mean of distribution sigma = 3 # standard deviation of distribution y = mlab.normpdf(bins, mu, sigma) # create the y line plt.plot(bins, y, 'r--') df = data.dropna(subset=['description']) # drop all NaNs df_sorted = df.sort_values(by='points', ascending=True) # sort by points num_of_wines = df_sorted.shape[0] # number of wines worst = df_sorted.head(int(0.25*num_of_wines)) # 25 % of worst wines listed best = df_sorted.tail(int(0.25*num_of_wines)) # 25 % of best wines listed from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer(stop_words='english', analyzer='word') X1 = vectorizer.fit_transform(best['description']) idf = vectorizer.idf_ goodlist = vectorizer.vocabulary_ X = vectorizer.fit_transform(worst['description']) idf = vectorizer.idf_ not_so_good_list = vectorizer.vocabulary_ from sklearn.preprocessing import LabelEncoder le = LabelEncoder() y = le.fit_transform(best['variety']) reg = linear_model.Ridge(alpha=0.5, solver='sag') y = data['points'] x = vectorizer.fit_transform(data['description']) x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=32) reg.fit(x_train, y_train) import pandas as pd import numpy as np from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer import nltk from sklearn.metrics.pairwise import cosine_similarity from sklearn.model_selection import cross_val_score from sklearn.metrics.pairwise import euclidean_distances pd.set_option('display.max_colwidth', 1500) vectorizer = TfidfVectorizer(stop_words='english', binary=False, max_df=0.95, min_df=0.15, ngram_range=(1, 2), use_idf=False, norm=None) doc_vectors = vectorizer.fit_transform(data['description']) def comp_description(query, results_number=20): results = [] q_vector = vectorizer.transform([query]) results.append(cosine_similarity(q_vector, doc_vectors.toarray())) f = 0 elem_list = [] for i in results[:10]: for elem in i[0]: elem_list.append(elem) f += 1 comp_description('Bright, fresh fruit aromas of cherry, raspberry, and blueberry.Youthfully with lots of sweet fruit on the palate with hints of spice and vanilla.')
code
2041206/cell_37
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/winemag-data_first150k.csv') df = data.dropna(subset=['description']) # drop all NaNs df_sorted = df.sort_values(by='points', ascending=True) # sort by points num_of_wines = df_sorted.shape[0] # number of wines worst = df_sorted.head(int(0.25*num_of_wines)) # 25 % of worst wines listed best = df_sorted.tail(int(0.25*num_of_wines)) # 25 % of best wines listed best_sorted = best.sort_values(by='price', ascending=True) # sort by points num_best = best.shape[0] # number of wines cheapestngood = best_sorted.head(int(0.25*num_of_wines)) cheapngoodest = cheapestngood.sort_values(by = 'points', ascending = False) cheapestngood.head(10)
code
2041206/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/winemag-data_first150k.csv') data.head(5)
code
72077953/cell_25
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from xgboost import plot_tree from xgboost import plot_tree model = RandomForestRegressor(random_state=1) model.fit(X_train, y_train) preds_valid = model.predict(X_valid) plot_tree(model, rankdir='LR', num_trees=1)
code
72077953/cell_4
[ "application_vnd.jupyter.stderr_output_9.png", "application_vnd.jupyter.stderr_output_7.png", "text_plain_output_4.png", "text_plain_output_6.png", "application_vnd.jupyter.stderr_output_3.png", "application_vnd.jupyter.stderr_output_5.png", "text_plain_output_8.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) train.head()
code
72077953/cell_34
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_squared_error from sklearn.model_selection import KFold from sklearn.preprocessing import OrdinalEncoder from xgboost import XGBRegressor import pandas as pd import warnings train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) y = train['target'] features = train.drop(['target'], axis=1) object_cols = [col for col in features.columns if 'cat' in col] X = features.copy() X_test = test.copy() ordinal_encoder = OrdinalEncoder() X[object_cols] = ordinal_encoder.fit_transform(X[object_cols]) X_test[object_cols] = ordinal_encoder.transform(X_test[object_cols]) X.isna().sum() model = RandomForestRegressor(random_state=1) model.fit(X_train, y_train) preds_valid = model.predict(X_valid) trees = model.get_booster().get_dump() importance_df = pd.DataFrame({'feature': X.columns, 'importance': model.feature_importances_}).sort_values('importance', ascending=False) from sklearn.model_selection import KFold def rmse(a, b): return mean_squared_error(a, b, squared=False) def train_and_evaluate(X_train, y_train, X_valid, y_valid, **params): model = XGBRegressor(random_state=42, n_jobs=-1, **params) model.fit(X_train, y_train) train_rmse = rmse(model.predict(X_train), y_train) val_rmse = rmse(model.predict(X_valid), y_valid) return (model, train_rmse, val_rmse) kfold = KFold(n_splits=5) models = [] for train_idxs, val_idxs in kfold.split(X): X_train, y_train = (X.iloc[train_idxs], y.iloc[train_idxs]) X_valid, y_valid = (X.iloc[val_idxs], y.iloc[val_idxs]) model, train_rmse, val_rmse = train_and_evaluate(X_train, y_train, X_valid, y_valid, max_depth=4, n_estimators=20) models.append(model) print('Train RMSE: {}, Validation RMSE: {}'.format(train_rmse, val_rmse)) import warnings warnings.filterwarnings('ignore')
code
72077953/cell_23
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) y = train['target'] features = train.drop(['target'], axis=1) plt.hist(train.target.sample(2000))
code
72077953/cell_30
[ "image_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.preprocessing import OrdinalEncoder import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) y = train['target'] features = train.drop(['target'], axis=1) object_cols = [col for col in features.columns if 'cat' in col] X = features.copy() X_test = test.copy() ordinal_encoder = OrdinalEncoder() X[object_cols] = ordinal_encoder.fit_transform(X[object_cols]) X_test[object_cols] = ordinal_encoder.transform(X_test[object_cols]) X.isna().sum() model = RandomForestRegressor(random_state=1) model.fit(X_train, y_train) preds_valid = model.predict(X_valid) trees = model.get_booster().get_dump() importance_df = pd.DataFrame({'feature': X.columns, 'importance': model.feature_importances_}).sort_values('importance', ascending=False) import seaborn as sns plt.figure(figsize=(10, 6)) plt.title('Feature Importance') sns.barplot(data=importance_df.head(10), x='importance', y='feature')
code
72077953/cell_6
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd import pandas_profiling as pp train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) pp.ProfileReport(train)
code
72077953/cell_29
[ "image_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.preprocessing import OrdinalEncoder import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) y = train['target'] features = train.drop(['target'], axis=1) object_cols = [col for col in features.columns if 'cat' in col] X = features.copy() X_test = test.copy() ordinal_encoder = OrdinalEncoder() X[object_cols] = ordinal_encoder.fit_transform(X[object_cols]) X_test[object_cols] = ordinal_encoder.transform(X_test[object_cols]) X.isna().sum() model = RandomForestRegressor(random_state=1) model.fit(X_train, y_train) preds_valid = model.predict(X_valid) trees = model.get_booster().get_dump() importance_df = pd.DataFrame({'feature': X.columns, 'importance': model.feature_importances_}).sort_values('importance', ascending=False) importance_df.head()
code
72077953/cell_39
[ "image_output_1.png" ]
model.fit(X, y)
code
72077953/cell_26
[ "image_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from xgboost import plot_tree from xgboost import plot_tree model = RandomForestRegressor(random_state=1) model.fit(X_train, y_train) preds_valid = model.predict(X_valid) plot_tree(model, rankdir='LR', num_trees=39)
code
72077953/cell_41
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import OrdinalEncoder import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) y = train['target'] features = train.drop(['target'], axis=1) object_cols = [col for col in features.columns if 'cat' in col] X = features.copy() X_test = test.copy() ordinal_encoder = OrdinalEncoder() X[object_cols] = ordinal_encoder.fit_transform(X[object_cols]) X_test[object_cols] = ordinal_encoder.transform(X_test[object_cols]) X_test.head()
code
72077953/cell_11
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from sklearn.preprocessing import OrdinalEncoder import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) y = train['target'] features = train.drop(['target'], axis=1) object_cols = [col for col in features.columns if 'cat' in col] X = features.copy() X_test = test.copy() ordinal_encoder = OrdinalEncoder() X[object_cols] = ordinal_encoder.fit_transform(X[object_cols]) X_test[object_cols] = ordinal_encoder.transform(X_test[object_cols]) X.isna().sum()
code
72077953/cell_19
[ "text_html_output_1.png" ]
from xgboost import XGBRegressor model = XGBRegressor(random_state=42, n_jobs=-1, n_estimators=40, max_depth=4, learning_rate=0.5) model.fit(X_train, y_train) preds = model.predict(X_valid) print(mean_squared_error(y_valid, preds, squared=False))
code
72077953/cell_18
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_squared_error model = RandomForestRegressor(random_state=1) model.fit(X_train, y_train) preds_valid = model.predict(X_valid) print(mean_squared_error(y_valid, preds_valid, squared=False))
code
72077953/cell_8
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) y = train['target'] features = train.drop(['target'], axis=1) features.head()
code
72077953/cell_24
[ "text_plain_output_1.png" ]
from matplotlib.pylab import rcParams from matplotlib.pylab import rcParams from sklearn.ensemble import RandomForestRegressor from xgboost import plot_tree from xgboost import plot_tree model = RandomForestRegressor(random_state=1) model.fit(X_train, y_train) preds_valid = model.predict(X_valid) rcParams['figure.figsize'] = (20, 20) plot_tree(model, rankdir='LR')
code
72077953/cell_10
[ "text_html_output_1.png" ]
from sklearn.preprocessing import OrdinalEncoder import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) y = train['target'] features = train.drop(['target'], axis=1) object_cols = [col for col in features.columns if 'cat' in col] X = features.copy() X_test = test.copy() ordinal_encoder = OrdinalEncoder() X[object_cols] = ordinal_encoder.fit_transform(X[object_cols]) X_test[object_cols] = ordinal_encoder.transform(X_test[object_cols]) X.head()
code
72077953/cell_27
[ "image_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor model = RandomForestRegressor(random_state=1) model.fit(X_train, y_train) preds_valid = model.predict(X_valid) trees = model.get_booster().get_dump() print(len(trees)) print(trees[0])
code
72077953/cell_36
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_squared_error from sklearn.model_selection import KFold from sklearn.preprocessing import OrdinalEncoder from xgboost import XGBRegressor import numpy as np import pandas as pd import warnings train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) y = train['target'] features = train.drop(['target'], axis=1) object_cols = [col for col in features.columns if 'cat' in col] X = features.copy() X_test = test.copy() ordinal_encoder = OrdinalEncoder() X[object_cols] = ordinal_encoder.fit_transform(X[object_cols]) X_test[object_cols] = ordinal_encoder.transform(X_test[object_cols]) X.isna().sum() model = RandomForestRegressor(random_state=1) model.fit(X_train, y_train) preds_valid = model.predict(X_valid) trees = model.get_booster().get_dump() importance_df = pd.DataFrame({'feature': X.columns, 'importance': model.feature_importances_}).sort_values('importance', ascending=False) from sklearn.model_selection import KFold def rmse(a, b): return mean_squared_error(a, b, squared=False) def train_and_evaluate(X_train, y_train, X_valid, y_valid, **params): model = XGBRegressor(random_state=42, n_jobs=-1, **params) model.fit(X_train, y_train) train_rmse = rmse(model.predict(X_train), y_train) val_rmse = rmse(model.predict(X_valid), y_valid) return (model, train_rmse, val_rmse) kfold = KFold(n_splits=5) models = [] for train_idxs, val_idxs in kfold.split(X): X_train, y_train = (X.iloc[train_idxs], y.iloc[train_idxs]) X_valid, y_valid = (X.iloc[val_idxs], y.iloc[val_idxs]) model, train_rmse, val_rmse = train_and_evaluate(X_train, y_train, X_valid, y_valid, max_depth=4, n_estimators=20) models.append(model) import warnings warnings.filterwarnings('ignore') def predict_avg(models, inputs): return np.mean([model.predict(inputs) for model in models], axis=0) preds = predict_avg(models, X) preds
code
1009871/cell_21
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('train.csv') train.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) med_age = train.Age.median() train.Age = train.Age.fillna(med_age) train.Embarked = train.Embarked.fillna(train.Embarked.mode()) test = pd.read_csv('test.csv') test.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) test.Age = test.Age.fillna(med_age) mean_fare0 = test.pivot_table(index='Pclass', values='Fare') mean_fare0 test.Fare = test[['Fare', 'Pclass']].apply(lambda row: mean_fare[row['Pclass']] if pd.isnull(row['Fare']) else row['Fare'], axis=1)
code
1009871/cell_13
[ "application_vnd.jupyter.stderr_output_1.png" ]
from scipy.stats import mode import pandas as pd train = pd.read_csv('train.csv') train.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) med_age = train.Age.median() train.Age = train.Age.fillna(med_age) train.Embarked = train.Embarked.fillna(train.Embarked.mode()) mode(train.Embarked.tolist())[0][0] train.Embarked.value_counts() train.Embarked = train.Embarked.fillna('S') train['Gender'] = train.Sex.map({'male': 1, 'female': 0}) train['Port'] = train.Embarked.map({'S': 1, 'C': 2, 'Q': 3}) train.drop(['Sex', 'Embarked'], axis=1, inplace=True) cols = train.columns.tolist() cols = cols[1:2] + cols[0:1] + cols[2:] train = train[cols] train_data = train.values
code
1009871/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
from scipy.stats import mode import pandas as pd train = pd.read_csv('train.csv') train.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) med_age = train.Age.median() train.Age = train.Age.fillna(med_age) train.Embarked = train.Embarked.fillna(train.Embarked.mode()) mode(train.Embarked.tolist())[0][0] train.Embarked.value_counts() train.Embarked = train.Embarked.fillna('S') train.info()
code
1009871/cell_25
[ "application_vnd.jupyter.stderr_output_1.png" ]
from scipy.stats import mode from sklearn.ensemble import RandomForestClassifier import pandas as pd train = pd.read_csv('train.csv') train.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) med_age = train.Age.median() train.Age = train.Age.fillna(med_age) train.Embarked = train.Embarked.fillna(train.Embarked.mode()) mode(train.Embarked.tolist())[0][0] train.Embarked.value_counts() train.Embarked = train.Embarked.fillna('S') train['Gender'] = train.Sex.map({'male': 1, 'female': 0}) train['Port'] = train.Embarked.map({'S': 1, 'C': 2, 'Q': 3}) train.drop(['Sex', 'Embarked'], axis=1, inplace=True) cols = train.columns.tolist() cols = cols[1:2] + cols[0:1] + cols[2:] train = train[cols] train_data = train.values from sklearn.ensemble import RandomForestClassifier model = RandomForestClassifier(n_estimators=100) model = model.fit(train_data[:, 2:], train_data[:, 0]) test = pd.read_csv('test.csv') test.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) test.Age = test.Age.fillna(med_age) mean_fare0 = test.pivot_table(index='Pclass', values='Fare') mean_fare0 test.Fare = test[['Fare', 'Pclass']].apply(lambda row: mean_fare[row['Pclass']] if pd.isnull(row['Fare']) else row['Fare'], axis=1) test['Gender'] = test.Sex.map({'male': 1, 'female': 0}) test['Port'] = test.Embarked.map({'S': 1, 'C': 2, 'Q': 3}) test.drop(['Sex', 'Embarked'], axis=1, inplace=True) test_data = test.values output = model.predict(test_data[:, 1:])
code
1009871/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('train.csv') train.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) train.info()
code
1009871/cell_23
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('train.csv') train.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) med_age = train.Age.median() train.Age = train.Age.fillna(med_age) train.Embarked = train.Embarked.fillna(train.Embarked.mode()) test = pd.read_csv('test.csv') test.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) test.Age = test.Age.fillna(med_age) mean_fare0 = test.pivot_table(index='Pclass', values='Fare') mean_fare0 test.Fare = test[['Fare', 'Pclass']].apply(lambda row: mean_fare[row['Pclass']] if pd.isnull(row['Fare']) else row['Fare'], axis=1) test['Gender'] = test.Sex.map({'male': 1, 'female': 0}) test['Port'] = test.Embarked.map({'S': 1, 'C': 2, 'Q': 3}) test.drop(['Sex', 'Embarked'], axis=1, inplace=True)
code
1009871/cell_20
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('train.csv') train.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) med_age = train.Age.median() train.Age = train.Age.fillna(med_age) train.Embarked = train.Embarked.fillna(train.Embarked.mode()) test = pd.read_csv('test.csv') test.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) test.Age = test.Age.fillna(med_age) mean_fare0 = test.pivot_table(index='Pclass', values='Fare') mean_fare0
code
1009871/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('train.csv') train.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) med_age = train.Age.median() train.Age = train.Age.fillna(med_age) train.Embarked = train.Embarked.fillna(train.Embarked.mode()) train.info()
code
1009871/cell_29
[ "application_vnd.jupyter.stderr_output_1.png" ]
from scipy.stats import mode from sklearn.ensemble import RandomForestClassifier import numpy as np import pandas as pd train = pd.read_csv('train.csv') train.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) med_age = train.Age.median() train.Age = train.Age.fillna(med_age) train.Embarked = train.Embarked.fillna(train.Embarked.mode()) mode(train.Embarked.tolist())[0][0] train.Embarked.value_counts() train.Embarked = train.Embarked.fillna('S') train['Gender'] = train.Sex.map({'male': 1, 'female': 0}) train['Port'] = train.Embarked.map({'S': 1, 'C': 2, 'Q': 3}) train.drop(['Sex', 'Embarked'], axis=1, inplace=True) cols = train.columns.tolist() cols = cols[1:2] + cols[0:1] + cols[2:] train = train[cols] train_data = train.values from sklearn.ensemble import RandomForestClassifier model = RandomForestClassifier(n_estimators=100) model = model.fit(train_data[:, 2:], train_data[:, 0]) test = pd.read_csv('test.csv') test.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) test.Age = test.Age.fillna(med_age) mean_fare0 = test.pivot_table(index='Pclass', values='Fare') mean_fare0 test.Fare = test[['Fare', 'Pclass']].apply(lambda row: mean_fare[row['Pclass']] if pd.isnull(row['Fare']) else row['Fare'], axis=1) test['Gender'] = test.Sex.map({'male': 1, 'female': 0}) test['Port'] = test.Embarked.map({'S': 1, 'C': 2, 'Q': 3}) test.drop(['Sex', 'Embarked'], axis=1, inplace=True) test_data = test.values output = model.predict(test_data[:, 1:]) result = np.c_[test_data[:, 0].astype(int), output.astype(int)] result_df = pd.DataFrame(result[:, 0:2], columns=['Passenger_id', 'Survived']) result_df.to_csv('result1.csv') result_df.shape
code
1009871/cell_26
[ "application_vnd.jupyter.stderr_output_1.png" ]
from scipy.stats import mode from sklearn.ensemble import RandomForestClassifier import numpy as np import pandas as pd train = pd.read_csv('train.csv') train.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) med_age = train.Age.median() train.Age = train.Age.fillna(med_age) train.Embarked = train.Embarked.fillna(train.Embarked.mode()) mode(train.Embarked.tolist())[0][0] train.Embarked.value_counts() train.Embarked = train.Embarked.fillna('S') train['Gender'] = train.Sex.map({'male': 1, 'female': 0}) train['Port'] = train.Embarked.map({'S': 1, 'C': 2, 'Q': 3}) train.drop(['Sex', 'Embarked'], axis=1, inplace=True) cols = train.columns.tolist() cols = cols[1:2] + cols[0:1] + cols[2:] train = train[cols] train_data = train.values from sklearn.ensemble import RandomForestClassifier model = RandomForestClassifier(n_estimators=100) model = model.fit(train_data[:, 2:], train_data[:, 0]) test = pd.read_csv('test.csv') test.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) test.Age = test.Age.fillna(med_age) mean_fare0 = test.pivot_table(index='Pclass', values='Fare') mean_fare0 test.Fare = test[['Fare', 'Pclass']].apply(lambda row: mean_fare[row['Pclass']] if pd.isnull(row['Fare']) else row['Fare'], axis=1) test['Gender'] = test.Sex.map({'male': 1, 'female': 0}) test['Port'] = test.Embarked.map({'S': 1, 'C': 2, 'Q': 3}) test.drop(['Sex', 'Embarked'], axis=1, inplace=True) test_data = test.values output = model.predict(test_data[:, 1:]) result = np.c_[test_data[:, 0].astype(int), output.astype(int)]
code
1009871/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
from scipy.stats import mode import pandas as pd train = pd.read_csv('train.csv') train.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) med_age = train.Age.median() train.Age = train.Age.fillna(med_age) train.Embarked = train.Embarked.fillna(train.Embarked.mode()) mode(train.Embarked.tolist())[0][0] train.Embarked.value_counts() train.Embarked = train.Embarked.fillna('S') train['Gender'] = train.Sex.map({'male': 1, 'female': 0}) train['Port'] = train.Embarked.map({'S': 1, 'C': 2, 'Q': 3}) train.drop(['Sex', 'Embarked'], axis=1, inplace=True)
code
1009871/cell_19
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('train.csv') train.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) med_age = train.Age.median() train.Age = train.Age.fillna(med_age) train.Embarked = train.Embarked.fillna(train.Embarked.mode()) test = pd.read_csv('test.csv') test.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) test.Age = test.Age.fillna(med_age) test.info()
code
1009871/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('train.csv') train.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) med_age = train.Age.median() train.Age = train.Age.fillna(med_age) train.Embarked = train.Embarked.fillna(train.Embarked.mode()) a = [] for i in range(1, len(train['Fare'])): a.append(train['Embarked'][i])
code
1009871/cell_18
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('train.csv') train.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) med_age = train.Age.median() train.Age = train.Age.fillna(med_age) train.Embarked = train.Embarked.fillna(train.Embarked.mode()) test = pd.read_csv('test.csv') test.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) test.Age = test.Age.fillna(med_age)
code
1009871/cell_28
[ "application_vnd.jupyter.stderr_output_1.png" ]
from scipy.stats import mode from sklearn.ensemble import RandomForestClassifier import numpy as np import pandas as pd train = pd.read_csv('train.csv') train.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) med_age = train.Age.median() train.Age = train.Age.fillna(med_age) train.Embarked = train.Embarked.fillna(train.Embarked.mode()) mode(train.Embarked.tolist())[0][0] train.Embarked.value_counts() train.Embarked = train.Embarked.fillna('S') train['Gender'] = train.Sex.map({'male': 1, 'female': 0}) train['Port'] = train.Embarked.map({'S': 1, 'C': 2, 'Q': 3}) train.drop(['Sex', 'Embarked'], axis=1, inplace=True) cols = train.columns.tolist() cols = cols[1:2] + cols[0:1] + cols[2:] train = train[cols] train_data = train.values from sklearn.ensemble import RandomForestClassifier model = RandomForestClassifier(n_estimators=100) model = model.fit(train_data[:, 2:], train_data[:, 0]) test = pd.read_csv('test.csv') test.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) test.Age = test.Age.fillna(med_age) mean_fare0 = test.pivot_table(index='Pclass', values='Fare') mean_fare0 test.Fare = test[['Fare', 'Pclass']].apply(lambda row: mean_fare[row['Pclass']] if pd.isnull(row['Fare']) else row['Fare'], axis=1) test['Gender'] = test.Sex.map({'male': 1, 'female': 0}) test['Port'] = test.Embarked.map({'S': 1, 'C': 2, 'Q': 3}) test.drop(['Sex', 'Embarked'], axis=1, inplace=True) test_data = test.values output = model.predict(test_data[:, 1:]) result = np.c_[test_data[:, 0].astype(int), output.astype(int)] result_df = pd.DataFrame(result[:, 0:2], columns=['Passenger_id', 'Survived']) result_df.to_csv('result1.csv')
code
1009871/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
from scipy.stats import mode import pandas as pd train = pd.read_csv('train.csv') train.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) med_age = train.Age.median() train.Age = train.Age.fillna(med_age) train.Embarked = train.Embarked.fillna(train.Embarked.mode()) mode(train.Embarked.tolist())[0][0]
code
1009871/cell_15
[ "application_vnd.jupyter.stderr_output_1.png" ]
from scipy.stats import mode from sklearn.ensemble import RandomForestClassifier import pandas as pd train = pd.read_csv('train.csv') train.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) med_age = train.Age.median() train.Age = train.Age.fillna(med_age) train.Embarked = train.Embarked.fillna(train.Embarked.mode()) mode(train.Embarked.tolist())[0][0] train.Embarked.value_counts() train.Embarked = train.Embarked.fillna('S') train['Gender'] = train.Sex.map({'male': 1, 'female': 0}) train['Port'] = train.Embarked.map({'S': 1, 'C': 2, 'Q': 3}) train.drop(['Sex', 'Embarked'], axis=1, inplace=True) cols = train.columns.tolist() cols = cols[1:2] + cols[0:1] + cols[2:] train = train[cols] train_data = train.values from sklearn.ensemble import RandomForestClassifier model = RandomForestClassifier(n_estimators=100) model = model.fit(train_data[:, 2:], train_data[:, 0])
code
1009871/cell_16
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('train.csv') test = pd.read_csv('test.csv')
code
1009871/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('train.csv')
code
1009871/cell_17
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('train.csv') test = pd.read_csv('test.csv') test.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True)
code
1009871/cell_24
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('train.csv') train.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) med_age = train.Age.median() train.Age = train.Age.fillna(med_age) train.Embarked = train.Embarked.fillna(train.Embarked.mode()) test = pd.read_csv('test.csv') test.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) test.Age = test.Age.fillna(med_age) mean_fare0 = test.pivot_table(index='Pclass', values='Fare') mean_fare0 test.Fare = test[['Fare', 'Pclass']].apply(lambda row: mean_fare[row['Pclass']] if pd.isnull(row['Fare']) else row['Fare'], axis=1) test['Gender'] = test.Sex.map({'male': 1, 'female': 0}) test['Port'] = test.Embarked.map({'S': 1, 'C': 2, 'Q': 3}) test.drop(['Sex', 'Embarked'], axis=1, inplace=True) test_data = test.values
code
1009871/cell_22
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('train.csv') train.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) med_age = train.Age.median() train.Age = train.Age.fillna(med_age) train.Embarked = train.Embarked.fillna(train.Embarked.mode()) test = pd.read_csv('test.csv') test.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) test.Age = test.Age.fillna(med_age) mean_fare0 = test.pivot_table(index='Pclass', values='Fare') mean_fare0 test.Fare = test[['Fare', 'Pclass']].apply(lambda row: mean_fare[row['Pclass']] if pd.isnull(row['Fare']) else row['Fare'], axis=1) test['Gender'] = test.Sex.map({'male': 1, 'female': 0}) test['Port'] = test.Embarked.map({'S': 1, 'C': 2, 'Q': 3}) test.info()
code
1009871/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
from scipy.stats import mode import pandas as pd train = pd.read_csv('train.csv') train.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) med_age = train.Age.median() train.Age = train.Age.fillna(med_age) train.Embarked = train.Embarked.fillna(train.Embarked.mode()) mode(train.Embarked.tolist())[0][0] train.Embarked.value_counts() train.Embarked = train.Embarked.fillna('S') train['Gender'] = train.Sex.map({'male': 1, 'female': 0}) train['Port'] = train.Embarked.map({'S': 1, 'C': 2, 'Q': 3}) train.info()
code
1009871/cell_27
[ "application_vnd.jupyter.stderr_output_1.png" ]
from scipy.stats import mode from sklearn.ensemble import RandomForestClassifier import numpy as np import pandas as pd train = pd.read_csv('train.csv') train.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) med_age = train.Age.median() train.Age = train.Age.fillna(med_age) train.Embarked = train.Embarked.fillna(train.Embarked.mode()) mode(train.Embarked.tolist())[0][0] train.Embarked.value_counts() train.Embarked = train.Embarked.fillna('S') train['Gender'] = train.Sex.map({'male': 1, 'female': 0}) train['Port'] = train.Embarked.map({'S': 1, 'C': 2, 'Q': 3}) train.drop(['Sex', 'Embarked'], axis=1, inplace=True) cols = train.columns.tolist() cols = cols[1:2] + cols[0:1] + cols[2:] train = train[cols] train_data = train.values from sklearn.ensemble import RandomForestClassifier model = RandomForestClassifier(n_estimators=100) model = model.fit(train_data[:, 2:], train_data[:, 0]) test = pd.read_csv('test.csv') test.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) test.Age = test.Age.fillna(med_age) mean_fare0 = test.pivot_table(index='Pclass', values='Fare') mean_fare0 test.Fare = test[['Fare', 'Pclass']].apply(lambda row: mean_fare[row['Pclass']] if pd.isnull(row['Fare']) else row['Fare'], axis=1) test['Gender'] = test.Sex.map({'male': 1, 'female': 0}) test['Port'] = test.Embarked.map({'S': 1, 'C': 2, 'Q': 3}) test.drop(['Sex', 'Embarked'], axis=1, inplace=True) test_data = test.values output = model.predict(test_data[:, 1:]) result = np.c_[test_data[:, 0].astype(int), output.astype(int)] result_df = pd.DataFrame(result[:, 0:2], columns=['Passenger_id', 'Survived'])
code
1009871/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
from scipy.stats import mode import pandas as pd train = pd.read_csv('train.csv') train.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) med_age = train.Age.median() train.Age = train.Age.fillna(med_age) train.Embarked = train.Embarked.fillna(train.Embarked.mode()) mode(train.Embarked.tolist())[0][0] train.Embarked.value_counts() train.Embarked = train.Embarked.fillna('S') train['Gender'] = train.Sex.map({'male': 1, 'female': 0}) train['Port'] = train.Embarked.map({'S': 1, 'C': 2, 'Q': 3}) train.drop(['Sex', 'Embarked'], axis=1, inplace=True) cols = train.columns.tolist() cols = cols[1:2] + cols[0:1] + cols[2:] train = train[cols]
code