path
stringlengths 13
17
| screenshot_names
listlengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
|---|---|---|---|
128020060/cell_12
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv')
seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv')
player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv')
seasons_stats[seasons_stats.Age > 40][seasons_stats.PTS > 100]
seasons_stats[seasons_stats.Player == 'LeBron James']
|
code
|
128020060/cell_5
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv')
seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv')
player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv')
players.isnull().sum()
players.info()
|
code
|
129002034/cell_4
|
[
"text_plain_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/loan-eligible-dataset/loan-train.csv')
print(df)
|
code
|
129002034/cell_1
|
[
"text_plain_output_1.png"
] |
import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
|
code
|
129002034/cell_10
|
[
"text_plain_output_1.png"
] |
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
models = {'Logistic Regression': LogisticRegression(), 'Decision Tree': DecisionTreeClassifier(), 'Random Forest': RandomForestClassifier(), 'Gradient Boosting': GradientBoostingClassifier()}
for model_name, model in models.items():
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print(f'{model_name} Accuracy: {accuracy:.4f}')
|
code
|
18118023/cell_9
|
[
"application_vnd.jupyter.stderr_output_1.png"
] |
from pathlib import Path
import os
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import os
import os
from pathlib import Path
path = Path('../input')
pd.read_csv(path / 'sample_submission_v2.csv').head(5)
df_tags = pd.read_csv(path / 'train_v2.csv')
df_tags['tags'].value_counts() / len(df_tags)
|
code
|
18118023/cell_4
|
[
"image_output_1.png"
] |
from pathlib import Path
import os
import os
import numpy as np
import pandas as pd
import os
import os
from pathlib import Path
path = Path('../input')
an_image_path = os.listdir(path / 'train-tif-v2')[1]
an_image_path
|
code
|
18118023/cell_6
|
[
"text_plain_output_1.png"
] |
from PIL import Image
from pathlib import Path
import os
import os
import numpy as np
import pandas as pd
import os
import os
from pathlib import Path
path = Path('../input')
from PIL import Image
Image.open(path / 'train-tif-v2' / 'train_0.tif')
|
code
|
18118023/cell_2
|
[
"text_plain_output_1.png"
] |
from pathlib import Path
import os
import os
import numpy as np
import pandas as pd
import os
import os
from pathlib import Path
path = Path('../input')
print(os.listdir(path))
|
code
|
18118023/cell_1
|
[
"text_plain_output_1.png"
] |
import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input'))
|
code
|
18118023/cell_8
|
[
"text_plain_output_1.png"
] |
from pathlib import Path
import os
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import os
import os
from pathlib import Path
path = Path('../input')
pd.read_csv(path / 'sample_submission_v2.csv').head(5)
df_tags = pd.read_csv(path / 'train_v2.csv')
df_tags.head()
|
code
|
18118023/cell_3
|
[
"text_plain_output_1.png"
] |
from pathlib import Path
import os
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import os
import os
from pathlib import Path
path = Path('../input')
pd.read_csv(path / 'sample_submission_v2.csv').head(5)
|
code
|
18118023/cell_10
|
[
"text_plain_output_1.png"
] |
from pathlib import Path
import os
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import os
import os
from pathlib import Path
path = Path('../input')
pd.read_csv(path / 'sample_submission_v2.csv').head(5)
df_tags = pd.read_csv(path / 'train_v2.csv')
df_tags['tags'].str.split()
|
code
|
18118023/cell_12
|
[
"text_html_output_1.png"
] |
from pathlib import Path
import numpy as np # linear algebra
import os
import os
import numpy as np
import pandas as pd
import os
import os
from pathlib import Path
path = Path('../input')
np.random.seed(42)
src = ImageFileList.from_folder(path).label_from_csv('train_v2.csv', sep=' ', folder='train-jpg', suffix='.jpg').random_split_by_pct(0.2)
|
code
|
18118023/cell_5
|
[
"text_html_output_1.png"
] |
from pathlib import Path
import os
import os
import numpy as np
import pandas as pd
import os
import os
from pathlib import Path
path = Path('../input')
an_image_path = os.listdir(path / 'train-tif-v2')[1]
an_image_path
an_image_path
|
code
|
1006327/cell_9
|
[
"application_vnd.jupyter.stderr_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
menu_data = pd.read_csv('../input/menu.csv')
menu_data.shape
type(menu_data['Item'][0])
|
code
|
1006327/cell_4
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
menu_data = pd.read_csv('../input/menu.csv')
menu_data.head()
|
code
|
1006327/cell_6
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
menu_data = pd.read_csv('../input/menu.csv')
menu_data.shape
|
code
|
1006327/cell_2
|
[
"text_plain_output_1.png"
] |
from subprocess import check_output
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8'))
|
code
|
1006327/cell_11
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
menu_data = pd.read_csv('../input/menu.csv')
menu_data.shape
menu_data.describe()
|
code
|
1006327/cell_8
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
menu_data = pd.read_csv('../input/menu.csv')
menu_data.shape
plt.figure(figsize=(13, 5))
sns.countplot(data=menu_data, x='Category')
|
code
|
1006327/cell_16
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
menu_data = pd.read_csv('../input/menu.csv')
menu_data.shape
Item_data = pd.DataFrame(menu_data['Item'], index=range(len(menu_data['Item'])))
Item_data
|
code
|
1006327/cell_17
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import jieba
|
code
|
1006327/cell_14
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
menu_data = pd.read_csv('../input/menu.csv')
menu_data.shape
y1 = menu_data['Calories'].tolist()
y2 = menu_data['Calories from Fat'].tolist()
y3 = menu_data['Total Fat'].tolist()
plt.figure(figsize=(13, 5))
plt.plot(y1, label='Calories')
plt.plot(y2, label='Calories from Fat')
plt.plot(y3, label='Total Fat')
plt.legend(loc='upper right')
|
code
|
50212750/cell_21
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
pd.pivot_table(t, index='SibSp', columns='Parch', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', columns='Survived', values='PassengerId', aggfunc='count', margins=True)
t['Title'] = t.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
pd.crosstab(t['Title'], t['Sex'])
|
code
|
50212750/cell_13
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
t[['Fare', 'Survived']].groupby(['Fare'], as_index=False).mean().sort_values(by=['Survived'], ascending=False)
|
code
|
50212750/cell_9
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
t[['Sex', 'Survived']].groupby(['Sex'], as_index=False).mean()
|
code
|
50212750/cell_25
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
pd.pivot_table(t, index='SibSp', columns='Parch', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', columns='Survived', values='PassengerId', aggfunc='count', margins=True)
t['Title'] = t.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
pd.crosstab(t['Title'], t['Sex'])
t['Title']
|
code
|
50212750/cell_4
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
t.describe()
|
code
|
50212750/cell_56
|
[
"text_html_output_1.png"
] |
import numpy as np
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
pd.pivot_table(t, index='SibSp', columns='Parch', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', columns='Survived', values='PassengerId', aggfunc='count', margins=True)
t['Title'] = t.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
pd.crosstab(t['Title'], t['Sex'])
t = t.drop(['Name', 'PassengerId', 'Ticket', 'Cabin'], axis=1)
import numpy as np
guess_ages = np.zeros((2, 3))
guess_ages
for i in range(0, 2):
for j in range(0, 3):
guess_df = t[(t['Sex'] == i) & (t['Pclass'] == j + 1)]['Age'].dropna()
age_guess = guess_df.median()
guess_ages[i, j] = int(age_guess / 0.5 + 0.5) * 0.5
for i in range(0, 2):
for j in range(0, 3):
t.loc[t.Age.isnull() & (t.Sex == i) & (t.Pclass == j + 1), 'Age'] = guess_ages[i, j]
t['Age'] = t['Age'].astype(int)
t = t.drop(['Age'], axis=1)
t.loc[t['Familysize'] == 1, 'Isalone'] = 1
t = t.drop(columns=['SibSp', 'Parch', 'Familysize'])
t
|
code
|
50212750/cell_33
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
pd.pivot_table(t, index='SibSp', columns='Parch', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', columns='Survived', values='PassengerId', aggfunc='count', margins=True)
t['Title'] = t.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
pd.crosstab(t['Title'], t['Sex'])
t = t.drop(['Name', 'PassengerId', 'Ticket', 'Cabin'], axis=1)
t.info()
|
code
|
50212750/cell_44
|
[
"text_plain_output_1.png"
] |
import numpy as np
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
pd.pivot_table(t, index='SibSp', columns='Parch', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', columns='Survived', values='PassengerId', aggfunc='count', margins=True)
t['Title'] = t.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
pd.crosstab(t['Title'], t['Sex'])
t = t.drop(['Name', 'PassengerId', 'Ticket', 'Cabin'], axis=1)
import numpy as np
guess_ages = np.zeros((2, 3))
guess_ages
for i in range(0, 2):
for j in range(0, 3):
guess_df = t[(t['Sex'] == i) & (t['Pclass'] == j + 1)]['Age'].dropna()
age_guess = guess_df.median()
guess_ages[i, j] = int(age_guess / 0.5 + 0.5) * 0.5
for i in range(0, 2):
for j in range(0, 3):
t.loc[t.Age.isnull() & (t.Sex == i) & (t.Pclass == j + 1), 'Age'] = guess_ages[i, j]
t['Age'] = t['Age'].astype(int)
t = t.drop(['Age'], axis=1)
t
|
code
|
50212750/cell_6
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
t.describe(include=['O'])
|
code
|
50212750/cell_40
|
[
"text_html_output_1.png"
] |
import numpy as np
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
pd.pivot_table(t, index='SibSp', columns='Parch', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', columns='Survived', values='PassengerId', aggfunc='count', margins=True)
t['Title'] = t.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
pd.crosstab(t['Title'], t['Sex'])
t = t.drop(['Name', 'PassengerId', 'Ticket', 'Cabin'], axis=1)
import numpy as np
guess_ages = np.zeros((2, 3))
guess_ages
for i in range(0, 2):
for j in range(0, 3):
guess_df = t[(t['Sex'] == i) & (t['Pclass'] == j + 1)]['Age'].dropna()
age_guess = guess_df.median()
guess_ages[i, j] = int(age_guess / 0.5 + 0.5) * 0.5
for i in range(0, 2):
for j in range(0, 3):
t.loc[t.Age.isnull() & (t.Sex == i) & (t.Pclass == j + 1), 'Age'] = guess_ages[i, j]
t['Age'] = t['Age'].astype(int)
t[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False).mean()
|
code
|
50212750/cell_29
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
pd.pivot_table(t, index='SibSp', columns='Parch', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', columns='Survived', values='PassengerId', aggfunc='count', margins=True)
t['Title'] = t.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
pd.crosstab(t['Title'], t['Sex'])
t = t.drop(['Name', 'PassengerId', 'Ticket', 'Cabin'], axis=1)
t
|
code
|
50212750/cell_26
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
pd.pivot_table(t, index='SibSp', columns='Parch', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', columns='Survived', values='PassengerId', aggfunc='count', margins=True)
t['Title'] = t.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
pd.crosstab(t['Title'], t['Sex'])
t
|
code
|
50212750/cell_65
|
[
"text_html_output_1.png"
] |
import numpy as np
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
pd.pivot_table(t, index='SibSp', columns='Parch', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', columns='Survived', values='PassengerId', aggfunc='count', margins=True)
t['Title'] = t.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
pd.crosstab(t['Title'], t['Sex'])
t = t.drop(['Name', 'PassengerId', 'Ticket', 'Cabin'], axis=1)
import numpy as np
guess_ages = np.zeros((2, 3))
guess_ages
for i in range(0, 2):
for j in range(0, 3):
guess_df = t[(t['Sex'] == i) & (t['Pclass'] == j + 1)]['Age'].dropna()
age_guess = guess_df.median()
guess_ages[i, j] = int(age_guess / 0.5 + 0.5) * 0.5
for i in range(0, 2):
for j in range(0, 3):
t.loc[t.Age.isnull() & (t.Sex == i) & (t.Pclass == j + 1), 'Age'] = guess_ages[i, j]
t['Age'] = t['Age'].astype(int)
t = t.drop(['Age'], axis=1)
t.loc[t['Familysize'] == 1, 'Isalone'] = 1
t = t.drop(columns=['SibSp', 'Parch', 'Familysize'])
ME = t.Embarked.dropna().mode()[0]
t.head()
|
code
|
50212750/cell_48
|
[
"text_html_output_1.png"
] |
import numpy as np
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
pd.pivot_table(t, index='SibSp', columns='Parch', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', columns='Survived', values='PassengerId', aggfunc='count', margins=True)
t['Title'] = t.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
pd.crosstab(t['Title'], t['Sex'])
t = t.drop(['Name', 'PassengerId', 'Ticket', 'Cabin'], axis=1)
import numpy as np
guess_ages = np.zeros((2, 3))
guess_ages
for i in range(0, 2):
for j in range(0, 3):
guess_df = t[(t['Sex'] == i) & (t['Pclass'] == j + 1)]['Age'].dropna()
age_guess = guess_df.median()
guess_ages[i, j] = int(age_guess / 0.5 + 0.5) * 0.5
for i in range(0, 2):
for j in range(0, 3):
t.loc[t.Age.isnull() & (t.Sex == i) & (t.Pclass == j + 1), 'Age'] = guess_ages[i, j]
t['Age'] = t['Age'].astype(int)
t = t.drop(['Age'], axis=1)
t[['Familysize', 'Survived']].groupby(['Familysize'], as_index=False).mean().sort_values(by='Survived', ascending=False)
|
code
|
50212750/cell_41
|
[
"text_plain_output_1.png"
] |
import numpy as np
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
pd.pivot_table(t, index='SibSp', columns='Parch', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', columns='Survived', values='PassengerId', aggfunc='count', margins=True)
t['Title'] = t.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
pd.crosstab(t['Title'], t['Sex'])
t = t.drop(['Name', 'PassengerId', 'Ticket', 'Cabin'], axis=1)
import numpy as np
guess_ages = np.zeros((2, 3))
guess_ages
for i in range(0, 2):
for j in range(0, 3):
guess_df = t[(t['Sex'] == i) & (t['Pclass'] == j + 1)]['Age'].dropna()
age_guess = guess_df.median()
guess_ages[i, j] = int(age_guess / 0.5 + 0.5) * 0.5
for i in range(0, 2):
for j in range(0, 3):
t.loc[t.Age.isnull() & (t.Sex == i) & (t.Pclass == j + 1), 'Age'] = guess_ages[i, j]
t['Age'] = t['Age'].astype(int)
t
|
code
|
50212750/cell_61
|
[
"text_html_output_1.png"
] |
import numpy as np
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
pd.pivot_table(t, index='SibSp', columns='Parch', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', columns='Survived', values='PassengerId', aggfunc='count', margins=True)
t['Title'] = t.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
pd.crosstab(t['Title'], t['Sex'])
t = t.drop(['Name', 'PassengerId', 'Ticket', 'Cabin'], axis=1)
import numpy as np
guess_ages = np.zeros((2, 3))
guess_ages
for i in range(0, 2):
for j in range(0, 3):
guess_df = t[(t['Sex'] == i) & (t['Pclass'] == j + 1)]['Age'].dropna()
age_guess = guess_df.median()
guess_ages[i, j] = int(age_guess / 0.5 + 0.5) * 0.5
for i in range(0, 2):
for j in range(0, 3):
t.loc[t.Age.isnull() & (t.Sex == i) & (t.Pclass == j + 1), 'Age'] = guess_ages[i, j]
t['Age'] = t['Age'].astype(int)
t = t.drop(['Age'], axis=1)
t.loc[t['Familysize'] == 1, 'Isalone'] = 1
t = t.drop(columns=['SibSp', 'Parch', 'Familysize'])
ME = t.Embarked.dropna().mode()[0]
t.describe(include=['O'])
|
code
|
50212750/cell_2
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
|
code
|
50212750/cell_11
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
t[['SibSp', 'Survived']].groupby(['SibSp'], as_index=False).mean().sort_values(by=['Survived'], ascending=False)
|
code
|
50212750/cell_19
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
pd.pivot_table(t, index='SibSp', columns='Parch', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', columns='Survived', values='PassengerId', aggfunc='count', margins=True)
|
code
|
50212750/cell_50
|
[
"text_html_output_1.png"
] |
import numpy as np
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
pd.pivot_table(t, index='SibSp', columns='Parch', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', columns='Survived', values='PassengerId', aggfunc='count', margins=True)
t['Title'] = t.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
pd.crosstab(t['Title'], t['Sex'])
t = t.drop(['Name', 'PassengerId', 'Ticket', 'Cabin'], axis=1)
import numpy as np
guess_ages = np.zeros((2, 3))
guess_ages
for i in range(0, 2):
for j in range(0, 3):
guess_df = t[(t['Sex'] == i) & (t['Pclass'] == j + 1)]['Age'].dropna()
age_guess = guess_df.median()
guess_ages[i, j] = int(age_guess / 0.5 + 0.5) * 0.5
for i in range(0, 2):
for j in range(0, 3):
t.loc[t.Age.isnull() & (t.Sex == i) & (t.Pclass == j + 1), 'Age'] = guess_ages[i, j]
t['Age'] = t['Age'].astype(int)
t = t.drop(['Age'], axis=1)
t
|
code
|
50212750/cell_52
|
[
"text_html_output_1.png"
] |
import numpy as np
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
pd.pivot_table(t, index='SibSp', columns='Parch', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', columns='Survived', values='PassengerId', aggfunc='count', margins=True)
t['Title'] = t.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
pd.crosstab(t['Title'], t['Sex'])
t = t.drop(['Name', 'PassengerId', 'Ticket', 'Cabin'], axis=1)
import numpy as np
guess_ages = np.zeros((2, 3))
guess_ages
for i in range(0, 2):
for j in range(0, 3):
guess_df = t[(t['Sex'] == i) & (t['Pclass'] == j + 1)]['Age'].dropna()
age_guess = guess_df.median()
guess_ages[i, j] = int(age_guess / 0.5 + 0.5) * 0.5
for i in range(0, 2):
for j in range(0, 3):
t.loc[t.Age.isnull() & (t.Sex == i) & (t.Pclass == j + 1), 'Age'] = guess_ages[i, j]
t['Age'] = t['Age'].astype(int)
t = t.drop(['Age'], axis=1)
t.loc[t['Familysize'] == 1, 'Isalone'] = 1
t
|
code
|
50212750/cell_18
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
pd.pivot_table(t, index='SibSp', columns='Parch', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', values='Survived', aggfunc='count', margins=True)
|
code
|
50212750/cell_32
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
pd.pivot_table(t, index='SibSp', columns='Parch', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', columns='Survived', values='PassengerId', aggfunc='count', margins=True)
t['Title'] = t.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
pd.crosstab(t['Title'], t['Sex'])
t = t.drop(['Name', 'PassengerId', 'Ticket', 'Cabin'], axis=1)
t
|
code
|
50212750/cell_62
|
[
"text_plain_output_1.png"
] |
import numpy as np
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
pd.pivot_table(t, index='SibSp', columns='Parch', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', columns='Survived', values='PassengerId', aggfunc='count', margins=True)
t['Title'] = t.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
pd.crosstab(t['Title'], t['Sex'])
t = t.drop(['Name', 'PassengerId', 'Ticket', 'Cabin'], axis=1)
import numpy as np
guess_ages = np.zeros((2, 3))
guess_ages
for i in range(0, 2):
for j in range(0, 3):
guess_df = t[(t['Sex'] == i) & (t['Pclass'] == j + 1)]['Age'].dropna()
age_guess = guess_df.median()
guess_ages[i, j] = int(age_guess / 0.5 + 0.5) * 0.5
for i in range(0, 2):
for j in range(0, 3):
t.loc[t.Age.isnull() & (t.Sex == i) & (t.Pclass == j + 1), 'Age'] = guess_ages[i, j]
t['Age'] = t['Age'].astype(int)
t = t.drop(['Age'], axis=1)
t.loc[t['Familysize'] == 1, 'Isalone'] = 1
t = t.drop(columns=['SibSp', 'Parch', 'Familysize'])
ME = t.Embarked.dropna().mode()[0]
t[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean()
|
code
|
50212750/cell_59
|
[
"text_html_output_1.png"
] |
import numpy as np
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
pd.pivot_table(t, index='SibSp', columns='Parch', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', columns='Survived', values='PassengerId', aggfunc='count', margins=True)
t['Title'] = t.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
pd.crosstab(t['Title'], t['Sex'])
t = t.drop(['Name', 'PassengerId', 'Ticket', 'Cabin'], axis=1)
import numpy as np
guess_ages = np.zeros((2, 3))
guess_ages
for i in range(0, 2):
for j in range(0, 3):
guess_df = t[(t['Sex'] == i) & (t['Pclass'] == j + 1)]['Age'].dropna()
age_guess = guess_df.median()
guess_ages[i, j] = int(age_guess / 0.5 + 0.5) * 0.5
for i in range(0, 2):
for j in range(0, 3):
t.loc[t.Age.isnull() & (t.Sex == i) & (t.Pclass == j + 1), 'Age'] = guess_ages[i, j]
t['Age'] = t['Age'].astype(int)
t = t.drop(['Age'], axis=1)
t.loc[t['Familysize'] == 1, 'Isalone'] = 1
t = t.drop(columns=['SibSp', 'Parch', 'Familysize'])
ME = t.Embarked.dropna().mode()[0]
ME
|
code
|
50212750/cell_8
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
t[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean()
|
code
|
50212750/cell_15
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
t[['Parch', 'Survived']].groupby(['Parch'], as_index=False).sum().sort_values(by=['Parch'], ascending=False)
|
code
|
50212750/cell_16
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
t[['SibSp', 'Parch', 'Survived']].groupby(['SibSp', 'Parch'], as_index=False).count().sort_values(by=['SibSp', 'Parch', 'Survived'], ascending=True)
|
code
|
50212750/cell_47
|
[
"text_html_output_1.png"
] |
import numpy as np
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
pd.pivot_table(t, index='SibSp', columns='Parch', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', columns='Survived', values='PassengerId', aggfunc='count', margins=True)
t['Title'] = t.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
pd.crosstab(t['Title'], t['Sex'])
t = t.drop(['Name', 'PassengerId', 'Ticket', 'Cabin'], axis=1)
import numpy as np
guess_ages = np.zeros((2, 3))
guess_ages
for i in range(0, 2):
for j in range(0, 3):
guess_df = t[(t['Sex'] == i) & (t['Pclass'] == j + 1)]['Age'].dropna()
age_guess = guess_df.median()
guess_ages[i, j] = int(age_guess / 0.5 + 0.5) * 0.5
for i in range(0, 2):
for j in range(0, 3):
t.loc[t.Age.isnull() & (t.Sex == i) & (t.Pclass == j + 1), 'Age'] = guess_ages[i, j]
t['Age'] = t['Age'].astype(int)
t = t.drop(['Age'], axis=1)
t
|
code
|
50212750/cell_17
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
pd.pivot_table(t, index='SibSp', columns='Parch', values='Survived', aggfunc='count', margins=True)
|
code
|
50212750/cell_35
|
[
"text_plain_output_1.png"
] |
import numpy as np
import numpy as np
guess_ages = np.zeros((2, 3))
guess_ages
|
code
|
50212750/cell_14
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
t[['SibSp', 'Survived']].groupby(['SibSp'], as_index=False).sum().sort_values(by=['Survived'], ascending=False)
|
code
|
50212750/cell_22
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
pd.pivot_table(t, index='SibSp', columns='Parch', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', columns='Survived', values='PassengerId', aggfunc='count', margins=True)
t['Title'] = t.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
pd.crosstab(t['Title'], t['Sex'])
t['Title'] = t['Title'].replace(['Lady', 'Countess', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
t['Title'] = t['Title'].replace('Mlle', 'Miss')
t['Title'] = t['Title'].replace('Ms', 'Miss')
t['Title'] = t['Title'].replace('Mme', 'Mrs')
t[['Title', 'Survived']].groupby(['Title'], as_index=False).mean()
|
code
|
50212750/cell_53
|
[
"text_html_output_1.png"
] |
import numpy as np
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
pd.pivot_table(t, index='SibSp', columns='Parch', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', columns='Survived', values='PassengerId', aggfunc='count', margins=True)
t['Title'] = t.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
pd.crosstab(t['Title'], t['Sex'])
t = t.drop(['Name', 'PassengerId', 'Ticket', 'Cabin'], axis=1)
import numpy as np
guess_ages = np.zeros((2, 3))
guess_ages
for i in range(0, 2):
for j in range(0, 3):
guess_df = t[(t['Sex'] == i) & (t['Pclass'] == j + 1)]['Age'].dropna()
age_guess = guess_df.median()
guess_ages[i, j] = int(age_guess / 0.5 + 0.5) * 0.5
for i in range(0, 2):
for j in range(0, 3):
t.loc[t.Age.isnull() & (t.Sex == i) & (t.Pclass == j + 1), 'Age'] = guess_ages[i, j]
t['Age'] = t['Age'].astype(int)
t = t.drop(['Age'], axis=1)
t.loc[t['Familysize'] == 1, 'Isalone'] = 1
t[['Isalone', 'Survived']].groupby(['Isalone'], as_index=False).mean()
|
code
|
50212750/cell_10
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
t[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean()
|
code
|
50212750/cell_37
|
[
"text_html_output_1.png"
] |
import numpy as np
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
pd.pivot_table(t, index='SibSp', columns='Parch', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', values='Survived', aggfunc='count', margins=True)
pd.pivot_table(t, index='SibSp', columns='Survived', values='PassengerId', aggfunc='count', margins=True)
t['Title'] = t.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
pd.crosstab(t['Title'], t['Sex'])
t = t.drop(['Name', 'PassengerId', 'Ticket', 'Cabin'], axis=1)
import numpy as np
guess_ages = np.zeros((2, 3))
guess_ages
for i in range(0, 2):
for j in range(0, 3):
guess_df = t[(t['Sex'] == i) & (t['Pclass'] == j + 1)]['Age'].dropna()
age_guess = guess_df.median()
guess_ages[i, j] = int(age_guess / 0.5 + 0.5) * 0.5
for i in range(0, 2):
for j in range(0, 3):
t.loc[t.Age.isnull() & (t.Sex == i) & (t.Pclass == j + 1), 'Age'] = guess_ages[i, j]
t['Age'] = t['Age'].astype(int)
t.head()
|
code
|
50212750/cell_12
|
[
"text_plain_output_1.png"
] |
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
t[['Parch', 'Survived']].groupby(['Parch'], as_index=False).mean().sort_values(by=['Survived'], ascending=False)
|
code
|
50212750/cell_5
|
[
"text_html_output_1.png"
] |
import pandas as pd
import pandas as pd
t = pd.read_csv('../input/titanic/train.csv')
t
t.info()
|
code
|
90116924/cell_13
|
[
"text_plain_output_1.png"
] |
import pandas as pd
train = pd.read_csv('../input/feedback-prize-2021/train.csv')
sample_submission = pd.read_csv('../input/feedback-prize-2021/sample_submission.csv')
train.shape
print(f'Training data consists of {train.shape[0]} annotaions')
|
code
|
90116924/cell_25
|
[
"text_html_output_1.png"
] |
import os
import pandas as pd
train = pd.read_csv('../input/feedback-prize-2021/train.csv')
sample_submission = pd.read_csv('../input/feedback-prize-2021/sample_submission.csv')
train.shape
raw_text_files = os.listdir('/kaggle/input/feedback-prize-2021/train')
train[train['id'] == '423A1CA112E2']
|
code
|
90116924/cell_28
|
[
"text_html_output_1.png"
] |
texts_df.head()
|
code
|
90116924/cell_16
|
[
"text_plain_output_1.png"
] |
import os
import pandas as pd
train = pd.read_csv('../input/feedback-prize-2021/train.csv')
sample_submission = pd.read_csv('../input/feedback-prize-2021/sample_submission.csv')
train.shape
raw_text_files = os.listdir('/kaggle/input/feedback-prize-2021/train')
print(f'Training data consists of {len(raw_text_files)} texts')
print(f'Each essay contains average {round(train.shape[0] / len(raw_text_files), 1)} annotaions.')
|
code
|
90116924/cell_24
|
[
"text_plain_output_1.png"
] |
with open('../input/feedback-prize-2021/train/423A1CA112E2.txt', 'r') as file:
first_txt = file.read()
print(first_txt)
|
code
|
90116924/cell_22
|
[
"text_plain_output_1.png"
] |
from glob import glob
train_txt = glob('../input/feedback-prize-2021/train/*.txt')
test_txt = glob('../input/feedback-prize-2021/test/*.txt')
train_txt
|
code
|
90116924/cell_27
|
[
"text_plain_output_1.png"
] |
texts = []
for file in raw_text_files:
with open(f'/kaggle/input/feedback-prize-2021/train/{file}') as f:
texts.append({'id': file[:-4], 'text': f.read()})
texts_df = pd.DataFrame(texts)
|
code
|
90116924/cell_12
|
[
"text_plain_output_1.png"
] |
import pandas as pd
train = pd.read_csv('../input/feedback-prize-2021/train.csv')
sample_submission = pd.read_csv('../input/feedback-prize-2021/sample_submission.csv')
train.shape
|
code
|
128018806/cell_57
|
[
"text_plain_output_1.png"
] |
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
from sklearn.model_selection import cross_val_score, train_test_split, GridSearchCV
from tqdm import notebook
import numpy as np
import pandas as pd
import torch
import transformers as ppb
DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
try:
data = pd.read_csv('https://code.s3.yandex.net/datasets/toxic_comments.csv')
except:
data = pd.read_csv('/kaggle/input/toxic-commentscsv/toxic_comments.csv')
data = data.drop('Unnamed: 0', axis=1)
model_class, tokenizer_class, pretrained_weights = (ppb.DistilBertModel, ppb.DistilBertTokenizer, 'distilbert-base-uncased')
tokenizer = tokenizer_class.from_pretrained(pretrained_weights)
model = model_class.from_pretrained(pretrained_weights).to(DEVICE)
tokenized = data['text'].apply(lambda x: tokenizer.encode(x, add_special_tokens=True, truncation=True))
max_len = 0
for i in tokenized.values:
if len(i) > max_len:
max_len = len(i)
padded = np.array([i + [0] * (max_len - len(i)) for i in tokenized.values])
attention_mask = np.where(padded != 0, 1, 0)
batch_size = 400
embeddings = []
for i in notebook.tqdm(range(padded.shape[0] // batch_size)):
batch = torch.LongTensor(padded[batch_size * i:batch_size * (i + 1)]).to(DEVICE)
attention_mask_batch = torch.LongTensor(attention_mask[batch_size * i:batch_size * (i + 1)]).to(DEVICE)
with torch.no_grad():
batch_embeddings = model(batch, attention_mask=attention_mask_batch)
embeddings.append(batch_embeddings[0][:, 0, :].cpu().detach().numpy())
features = np.concatenate(embeddings)
features.shape
target = data['toxic']
target = target[:159200]
params = {'C': [92], 'penalty': ['l2'], 'solver': ['lbfgs']}
lr_clf = LogisticRegression(max_iter=30000)
lr_model = GridSearchCV(lr_clf, param_grid=params, scoring='f1', cv=3)
lr_model.fit(features, target)
lr_model.best_score_
f1_score(target_train, lr_model.predict(features_train))
params = {'n_estimators': [10, 40, 100], 'max_depth': [1, 4, 9]}
rfr = RandomForestClassifier()
rfr_model = GridSearchCV(rfr, param_grid=params, scoring='f1', cv=3)
rfr_model.fit(features_train, target_train)
rfr_model.best_score_
|
code
|
128018806/cell_56
|
[
"text_plain_output_1.png"
] |
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
from sklearn.model_selection import cross_val_score, train_test_split, GridSearchCV
from tqdm import notebook
import numpy as np
import pandas as pd
import torch
import transformers as ppb
DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
try:
data = pd.read_csv('https://code.s3.yandex.net/datasets/toxic_comments.csv')
except:
data = pd.read_csv('/kaggle/input/toxic-commentscsv/toxic_comments.csv')
data = data.drop('Unnamed: 0', axis=1)
model_class, tokenizer_class, pretrained_weights = (ppb.DistilBertModel, ppb.DistilBertTokenizer, 'distilbert-base-uncased')
tokenizer = tokenizer_class.from_pretrained(pretrained_weights)
model = model_class.from_pretrained(pretrained_weights).to(DEVICE)
tokenized = data['text'].apply(lambda x: tokenizer.encode(x, add_special_tokens=True, truncation=True))
max_len = 0
for i in tokenized.values:
if len(i) > max_len:
max_len = len(i)
padded = np.array([i + [0] * (max_len - len(i)) for i in tokenized.values])
attention_mask = np.where(padded != 0, 1, 0)
batch_size = 400
embeddings = []
for i in notebook.tqdm(range(padded.shape[0] // batch_size)):
batch = torch.LongTensor(padded[batch_size * i:batch_size * (i + 1)]).to(DEVICE)
attention_mask_batch = torch.LongTensor(attention_mask[batch_size * i:batch_size * (i + 1)]).to(DEVICE)
with torch.no_grad():
batch_embeddings = model(batch, attention_mask=attention_mask_batch)
embeddings.append(batch_embeddings[0][:, 0, :].cpu().detach().numpy())
features = np.concatenate(embeddings)
features.shape
target = data['toxic']
target = target[:159200]
params = {'C': [92], 'penalty': ['l2'], 'solver': ['lbfgs']}
lr_clf = LogisticRegression(max_iter=30000)
lr_model = GridSearchCV(lr_clf, param_grid=params, scoring='f1', cv=3)
lr_model.fit(features, target)
lr_model.best_score_
f1_score(target_train, lr_model.predict(features_train))
params = {'n_estimators': [10, 40, 100], 'max_depth': [1, 4, 9]}
rfr = RandomForestClassifier()
rfr_model = GridSearchCV(rfr, param_grid=params, scoring='f1', cv=3)
rfr_model.fit(features_train, target_train)
|
code
|
128018806/cell_39
|
[
"text_plain_output_1.png"
] |
from tqdm import notebook
import numpy as np
import pandas as pd
import torch
import transformers as ppb
DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
try:
data = pd.read_csv('https://code.s3.yandex.net/datasets/toxic_comments.csv')
except:
data = pd.read_csv('/kaggle/input/toxic-commentscsv/toxic_comments.csv')
data = data.drop('Unnamed: 0', axis=1)
model_class, tokenizer_class, pretrained_weights = (ppb.DistilBertModel, ppb.DistilBertTokenizer, 'distilbert-base-uncased')
tokenizer = tokenizer_class.from_pretrained(pretrained_weights)
model = model_class.from_pretrained(pretrained_weights).to(DEVICE)
tokenized = data['text'].apply(lambda x: tokenizer.encode(x, add_special_tokens=True, truncation=True))
max_len = 0
for i in tokenized.values:
if len(i) > max_len:
max_len = len(i)
padded = np.array([i + [0] * (max_len - len(i)) for i in tokenized.values])
attention_mask = np.where(padded != 0, 1, 0)
batch_size = 400
embeddings = []
for i in notebook.tqdm(range(padded.shape[0] // batch_size)):
batch = torch.LongTensor(padded[batch_size * i:batch_size * (i + 1)]).to(DEVICE)
attention_mask_batch = torch.LongTensor(attention_mask[batch_size * i:batch_size * (i + 1)]).to(DEVICE)
with torch.no_grad():
batch_embeddings = model(batch, attention_mask=attention_mask_batch)
embeddings.append(batch_embeddings[0][:, 0, :].cpu().detach().numpy())
features = np.concatenate(embeddings)
features.shape
|
code
|
128018806/cell_61
|
[
"text_plain_output_1.png"
] |
from lightgbm import LGBMClassifier
from sklearn.model_selection import cross_val_score, train_test_split, GridSearchCV
lgr = LGBMClassifier()
param_dist = {'learning_rate': [0.1], 'num_leaves': [100], 'n_estimators': [300], 'device': ['gpu']}
LGBM_model = GridSearchCV(lgr, param_grid=param_dist, cv=3, scoring='f1', verbose=5)
LGBM_model.fit(features_train, target_train)
LGBM_model.best_estimator_
|
code
|
128018806/cell_72
|
[
"text_plain_output_1.png"
] |
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
from sklearn.model_selection import cross_val_score, train_test_split, GridSearchCV
from tqdm import notebook
import numpy as np
import pandas as pd
import torch
import transformers as ppb
DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
try:
data = pd.read_csv('https://code.s3.yandex.net/datasets/toxic_comments.csv')
except:
data = pd.read_csv('/kaggle/input/toxic-commentscsv/toxic_comments.csv')
data = data.drop('Unnamed: 0', axis=1)
model_class, tokenizer_class, pretrained_weights = (ppb.DistilBertModel, ppb.DistilBertTokenizer, 'distilbert-base-uncased')
tokenizer = tokenizer_class.from_pretrained(pretrained_weights)
model = model_class.from_pretrained(pretrained_weights).to(DEVICE)
tokenized = data['text'].apply(lambda x: tokenizer.encode(x, add_special_tokens=True, truncation=True))
max_len = 0
for i in tokenized.values:
if len(i) > max_len:
max_len = len(i)
padded = np.array([i + [0] * (max_len - len(i)) for i in tokenized.values])
attention_mask = np.where(padded != 0, 1, 0)
batch_size = 400
embeddings = []
for i in notebook.tqdm(range(padded.shape[0] // batch_size)):
batch = torch.LongTensor(padded[batch_size * i:batch_size * (i + 1)]).to(DEVICE)
attention_mask_batch = torch.LongTensor(attention_mask[batch_size * i:batch_size * (i + 1)]).to(DEVICE)
with torch.no_grad():
batch_embeddings = model(batch, attention_mask=attention_mask_batch)
embeddings.append(batch_embeddings[0][:, 0, :].cpu().detach().numpy())
features = np.concatenate(embeddings)
features.shape
target = data['toxic']
target = target[:159200]
params = {'C': [92], 'penalty': ['l2'], 'solver': ['lbfgs']}
lr_clf = LogisticRegression(max_iter=30000)
lr_model = GridSearchCV(lr_clf, param_grid=params, scoring='f1', cv=3)
lr_model.fit(features, target)
lr_model.best_score_
f1_score(target_train, lr_model.predict(features_train))
f1_score(target_test, lr_model.predict(features_test))
|
code
|
128018806/cell_50
|
[
"text_plain_output_1.png"
] |
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
from sklearn.model_selection import cross_val_score, train_test_split, GridSearchCV
from tqdm import notebook
import numpy as np
import pandas as pd
import torch
import transformers as ppb
DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
try:
data = pd.read_csv('https://code.s3.yandex.net/datasets/toxic_comments.csv')
except:
data = pd.read_csv('/kaggle/input/toxic-commentscsv/toxic_comments.csv')
data = data.drop('Unnamed: 0', axis=1)
model_class, tokenizer_class, pretrained_weights = (ppb.DistilBertModel, ppb.DistilBertTokenizer, 'distilbert-base-uncased')
tokenizer = tokenizer_class.from_pretrained(pretrained_weights)
model = model_class.from_pretrained(pretrained_weights).to(DEVICE)
tokenized = data['text'].apply(lambda x: tokenizer.encode(x, add_special_tokens=True, truncation=True))
max_len = 0
for i in tokenized.values:
if len(i) > max_len:
max_len = len(i)
padded = np.array([i + [0] * (max_len - len(i)) for i in tokenized.values])
attention_mask = np.where(padded != 0, 1, 0)
batch_size = 400
embeddings = []
for i in notebook.tqdm(range(padded.shape[0] // batch_size)):
batch = torch.LongTensor(padded[batch_size * i:batch_size * (i + 1)]).to(DEVICE)
attention_mask_batch = torch.LongTensor(attention_mask[batch_size * i:batch_size * (i + 1)]).to(DEVICE)
with torch.no_grad():
batch_embeddings = model(batch, attention_mask=attention_mask_batch)
embeddings.append(batch_embeddings[0][:, 0, :].cpu().detach().numpy())
features = np.concatenate(embeddings)
features.shape
target = data['toxic']
target = target[:159200]
params = {'C': [92], 'penalty': ['l2'], 'solver': ['lbfgs']}
lr_clf = LogisticRegression(max_iter=30000)
lr_model = GridSearchCV(lr_clf, param_grid=params, scoring='f1', cv=3)
lr_model.fit(features, target)
lr_model.best_score_
f1_score(target_train, lr_model.predict(features_train))
|
code
|
128018806/cell_7
|
[
"text_plain_output_1.png"
] |
import numpy as np
import pandas as pd
import torch
import re
import transformers as ppb
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score, train_test_split, GridSearchCV
from sklearn.metrics import f1_score
from tqdm import notebook
from sklearn.linear_model import LogisticRegression
from lightgbm import LGBMClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
|
code
|
128018806/cell_62
|
[
"text_plain_output_2.png",
"text_plain_output_1.png"
] |
from lightgbm import LGBMClassifier
from sklearn.model_selection import cross_val_score, train_test_split, GridSearchCV
lgr = LGBMClassifier()
param_dist = {'learning_rate': [0.1], 'num_leaves': [100], 'n_estimators': [300], 'device': ['gpu']}
LGBM_model = GridSearchCV(lgr, param_grid=param_dist, cv=3, scoring='f1', verbose=5)
LGBM_model.fit(features_train, target_train)
LGBM_model.best_estimator_
LGBM_model.best_score_
|
code
|
128018806/cell_28
|
[
"text_plain_output_1.png"
] |
import torch
import transformers as ppb
DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model_class, tokenizer_class, pretrained_weights = (ppb.DistilBertModel, ppb.DistilBertTokenizer, 'distilbert-base-uncased')
tokenizer = tokenizer_class.from_pretrained(pretrained_weights)
model = model_class.from_pretrained(pretrained_weights).to(DEVICE)
|
code
|
128018806/cell_15
|
[
"application_vnd.jupyter.stderr_output_1.png"
] |
import pandas as pd
try:
data = pd.read_csv('https://code.s3.yandex.net/datasets/toxic_comments.csv')
except:
data = pd.read_csv('/kaggle/input/toxic-commentscsv/toxic_comments.csv')
data.info()
|
code
|
128018806/cell_66
|
[
"text_plain_output_1.png"
] |
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
from sklearn.model_selection import cross_val_score, train_test_split, GridSearchCV
from sklearn.svm import LinearSVC
from tqdm import notebook
import numpy as np
import pandas as pd
import torch
import transformers as ppb
DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
try:
data = pd.read_csv('https://code.s3.yandex.net/datasets/toxic_comments.csv')
except:
data = pd.read_csv('/kaggle/input/toxic-commentscsv/toxic_comments.csv')
data = data.drop('Unnamed: 0', axis=1)
model_class, tokenizer_class, pretrained_weights = (ppb.DistilBertModel, ppb.DistilBertTokenizer, 'distilbert-base-uncased')
tokenizer = tokenizer_class.from_pretrained(pretrained_weights)
model = model_class.from_pretrained(pretrained_weights).to(DEVICE)
tokenized = data['text'].apply(lambda x: tokenizer.encode(x, add_special_tokens=True, truncation=True))
max_len = 0
for i in tokenized.values:
if len(i) > max_len:
max_len = len(i)
padded = np.array([i + [0] * (max_len - len(i)) for i in tokenized.values])
attention_mask = np.where(padded != 0, 1, 0)
batch_size = 400
embeddings = []
for i in notebook.tqdm(range(padded.shape[0] // batch_size)):
batch = torch.LongTensor(padded[batch_size * i:batch_size * (i + 1)]).to(DEVICE)
attention_mask_batch = torch.LongTensor(attention_mask[batch_size * i:batch_size * (i + 1)]).to(DEVICE)
with torch.no_grad():
batch_embeddings = model(batch, attention_mask=attention_mask_batch)
embeddings.append(batch_embeddings[0][:, 0, :].cpu().detach().numpy())
features = np.concatenate(embeddings)
features.shape
target = data['toxic']
target = target[:159200]
params = {'C': [92], 'penalty': ['l2'], 'solver': ['lbfgs']}
lr_clf = LogisticRegression(max_iter=30000)
lr_model = GridSearchCV(lr_clf, param_grid=params, scoring='f1', cv=3)
lr_model.fit(features, target)
lr_model.best_score_
f1_score(target_train, lr_model.predict(features_train))
params = {'n_estimators': [10, 40, 100], 'max_depth': [1, 4, 9]}
rfr = RandomForestClassifier()
rfr_model = GridSearchCV(rfr, param_grid=params, scoring='f1', cv=3)
rfr_model.fit(features_train, target_train)
params = {}
svc = LinearSVC(max_iter=30000)
svc_model = GridSearchCV(svc, param_grid=params, scoring='f1', cv=3)
svc_model.fit(features_train, target_train)
svc_model.best_score_
|
code
|
128018806/cell_35
|
[
"application_vnd.jupyter.stderr_output_1.png"
] |
from tqdm import notebook
import numpy as np
import pandas as pd
import torch
import transformers as ppb
DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
try:
data = pd.read_csv('https://code.s3.yandex.net/datasets/toxic_comments.csv')
except:
data = pd.read_csv('/kaggle/input/toxic-commentscsv/toxic_comments.csv')
data = data.drop('Unnamed: 0', axis=1)
model_class, tokenizer_class, pretrained_weights = (ppb.DistilBertModel, ppb.DistilBertTokenizer, 'distilbert-base-uncased')
tokenizer = tokenizer_class.from_pretrained(pretrained_weights)
model = model_class.from_pretrained(pretrained_weights).to(DEVICE)
tokenized = data['text'].apply(lambda x: tokenizer.encode(x, add_special_tokens=True, truncation=True))
max_len = 0
for i in tokenized.values:
if len(i) > max_len:
max_len = len(i)
padded = np.array([i + [0] * (max_len - len(i)) for i in tokenized.values])
attention_mask = np.where(padded != 0, 1, 0)
batch_size = 400
embeddings = []
for i in notebook.tqdm(range(padded.shape[0] // batch_size)):
batch = torch.LongTensor(padded[batch_size * i:batch_size * (i + 1)]).to(DEVICE)
attention_mask_batch = torch.LongTensor(attention_mask[batch_size * i:batch_size * (i + 1)]).to(DEVICE)
with torch.no_grad():
batch_embeddings = model(batch, attention_mask=attention_mask_batch)
embeddings.append(batch_embeddings[0][:, 0, :].cpu().detach().numpy())
|
code
|
106201316/cell_13
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
all_data = [train, test]
submition_id = test['PassengerId']
def check_missing(data, dtype='object'):
if dtype == 'object':
nulls = data.select_dtypes(include='object')
else:
nulls = data.select_dtypes(exclude='object')
nulls = nulls.isnull().sum()
nulls = nulls.drop(nulls[nulls == 0].index).sort_values(ascending=False)
return pd.DataFrame(nulls)
'\n this function detecting the missing values by the type\n and return data frame contains two columns \n feature name : how many NAN\n ----------\n parameters\n ----------\n data : which data frame you need to check it\n \n dtype : do you looking for numerical featuers or categorical ? \n \n '
def lower_case_features(data):
data.columns = [col.lower() for col in list(data)]
'\n simple function to return evry single column name in lowercase\n list(data) ==> contains every column title in the data frame \n \n '
lower_case_features(train)
lower_case_features(test)
for dataset in [train, test]:
dataset['agecat'] = pd.cut(dataset['age'], bins=5, labels=['0-16', '16-32', '32-48', '48-64', '64-85'], include_lowest=True)
pd.DataFrame(train[['agecat', 'survived']].value_counts())
|
code
|
106201316/cell_4
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
all_data = [train, test]
submition_id = test['PassengerId']
train.head(3)
|
code
|
106201316/cell_23
|
[
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
all_data = [train, test]
submition_id = test['PassengerId']
def check_missing(data, dtype='object'):
if dtype == 'object':
nulls = data.select_dtypes(include='object')
else:
nulls = data.select_dtypes(exclude='object')
nulls = nulls.isnull().sum()
nulls = nulls.drop(nulls[nulls == 0].index).sort_values(ascending=False)
return pd.DataFrame(nulls)
'\n this function detecting the missing values by the type\n and return data frame contains two columns \n feature name : how many NAN\n ----------\n parameters\n ----------\n data : which data frame you need to check it\n \n dtype : do you looking for numerical featuers or categorical ? \n \n '
def lower_case_features(data):
data.columns = [col.lower() for col in list(data)]
'\n simple function to return evry single column name in lowercase\n list(data) ==> contains every column title in the data frame \n \n '
lower_case_features(train)
lower_case_features(test)
for dataset in [train, test]:
dataset['agecat'] = pd.cut(dataset['age'], bins=5, labels=['0-16', '16-32', '32-48', '48-64', '64-85'], include_lowest=True)
pd.DataFrame(train[['agecat', 'survived']].value_counts())
pd.DataFrame(train.embarked.value_counts())
tr = train[['pclass', 'survived']].groupby(['pclass'], as_index=False).mean()
tr.index = [1, 2, 3]
tr
|
code
|
106201316/cell_33
|
[
"text_html_output_1.png",
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
all_data = [train, test]
submition_id = test['PassengerId']
def check_missing(data, dtype='object'):
if dtype == 'object':
nulls = data.select_dtypes(include='object')
else:
nulls = data.select_dtypes(exclude='object')
nulls = nulls.isnull().sum()
nulls = nulls.drop(nulls[nulls == 0].index).sort_values(ascending=False)
return pd.DataFrame(nulls)
'\n this function detecting the missing values by the type\n and return data frame contains two columns \n feature name : how many NAN\n ----------\n parameters\n ----------\n data : which data frame you need to check it\n \n dtype : do you looking for numerical featuers or categorical ? \n \n '
def lower_case_features(data):
data.columns = [col.lower() for col in list(data)]
'\n simple function to return evry single column name in lowercase\n list(data) ==> contains every column title in the data frame \n \n '
lower_case_features(train)
lower_case_features(test)
for dataset in [train, test]:
dataset['agecat'] = pd.cut(dataset['age'], bins=5, labels=['0-16', '16-32', '32-48', '48-64', '64-85'], include_lowest=True)
pd.DataFrame(train[['agecat', 'survived']].value_counts())
pd.DataFrame(train.embarked.value_counts())
for d in all_data:
d = d.drop(['passengerid', 'cabin', 'ticket'], axis=1, inplace=True)
def get_title(df):
sliced_name = df['name'].str.split(expand=True)
sliced_name[1] = sliced_name[1].str.replace('.', '', regex=True)
df['title'] = sliced_name[1]
df = df.drop(['name'], axis=1, inplace=True)
'\n this function split the name feature gets every single value between comma \n and append it to a feature in a pandas data frame\n \n ----------\n parameters\n ----------\n just your dataframe name\n \n -------\n returns\n -------\n \n your data frame with a new feature called title and without name feature\n \n '
train['title'].value_counts()
|
code
|
106201316/cell_44
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
all_data = [train, test]
submition_id = test['PassengerId']
def check_missing(data, dtype='object'):
if dtype == 'object':
nulls = data.select_dtypes(include='object')
else:
nulls = data.select_dtypes(exclude='object')
nulls = nulls.isnull().sum()
nulls = nulls.drop(nulls[nulls == 0].index).sort_values(ascending=False)
return pd.DataFrame(nulls)
'\n this function detecting the missing values by the type\n and return data frame contains two columns \n feature name : how many NAN\n ----------\n parameters\n ----------\n data : which data frame you need to check it\n \n dtype : do you looking for numerical featuers or categorical ? \n \n '
def lower_case_features(data):
data.columns = [col.lower() for col in list(data)]
'\n simple function to return evry single column name in lowercase\n list(data) ==> contains every column title in the data frame \n \n '
lower_case_features(train)
lower_case_features(test)
for dataset in [train, test]:
dataset['agecat'] = pd.cut(dataset['age'], bins=5, labels=['0-16', '16-32', '32-48', '48-64', '64-85'], include_lowest=True)
pd.DataFrame(train[['agecat', 'survived']].value_counts())
pd.DataFrame(train.embarked.value_counts())
for d in all_data:
d = d.drop(['passengerid', 'cabin', 'ticket'], axis=1, inplace=True)
def get_title(df):
sliced_name = df['name'].str.split(expand=True)
sliced_name[1] = sliced_name[1].str.replace('.', '', regex=True)
df['title'] = sliced_name[1]
df = df.drop(['name'], axis=1, inplace=True)
'\n this function split the name feature gets every single value between comma \n and append it to a feature in a pandas data frame\n \n ----------\n parameters\n ----------\n just your dataframe name\n \n -------\n returns\n -------\n \n your data frame with a new feature called title and without name feature\n \n '
n_train = train.shape[0]
y = train.survived
df = pd.concat((train, test)).reset_index(drop=True)
df.drop(['survived'], axis=1, inplace=True)
print('numeric missing vals is')
check_missing(df, 'ex')
|
code
|
106201316/cell_20
|
[
"image_output_1.png"
] |
import matplotlib.pyplot as plt
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import numpy as np
import pandas as pd
import seaborn as sns
sns.set(rc={'figure.figsize': (30, 18)})
import matplotlib.pyplot as plt
import os
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
all_data = [train, test]
submition_id = test['PassengerId']
def check_missing(data, dtype='object'):
if dtype == 'object':
nulls = data.select_dtypes(include='object')
else:
nulls = data.select_dtypes(exclude='object')
nulls = nulls.isnull().sum()
nulls = nulls.drop(nulls[nulls == 0].index).sort_values(ascending=False)
return pd.DataFrame(nulls)
'\n this function detecting the missing values by the type\n and return data frame contains two columns \n feature name : how many NAN\n ----------\n parameters\n ----------\n data : which data frame you need to check it\n \n dtype : do you looking for numerical featuers or categorical ? \n \n '
def lower_case_features(data):
data.columns = [col.lower() for col in list(data)]
'\n simple function to return evry single column name in lowercase\n list(data) ==> contains every column title in the data frame \n \n '
lower_case_features(train)
lower_case_features(test)
for dataset in [train, test]:
dataset['agecat'] = pd.cut(dataset['age'], bins=5, labels=['0-16', '16-32', '32-48', '48-64', '64-85'], include_lowest=True)
pd.DataFrame(train[['agecat', 'survived']].value_counts())
fig = sns.catplot(
data = train, x = 'agecat', y = 'survived', kind = 'bar',
palette = 'deep'
)
fig.set_axis_labels('Ages', 'survival rate', size = 15)
fig.fig.suptitle('survival rate per ages', verticalalignment = 'center', size = 15)
plt.show();
fig = sns.catplot(
data = train, x = 'sex', y = 'survived', kind = 'bar',
palette = 'deep'
)
fig.set_axis_labels('Sex', 'Survival rate', size = 15)
fig.fig.suptitle('survival rate per gender', verticalalignment = 'center', size = 15)
plt.show();
pd.DataFrame(train.embarked.value_counts())
fig = sns.catplot(data=train, x='embarked', y='survived', kind='bar', palette='deep')
fig.set_axis_labels('embarked', 'survival rate', size=15)
fig.fig.suptitle('survival rate per Port of Embarkation', verticalalignment='bottom', size=14)
plt.show()
|
code
|
106201316/cell_6
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
all_data = [train, test]
submition_id = test['PassengerId']
def check_missing(data, dtype='object'):
if dtype == 'object':
nulls = data.select_dtypes(include='object')
else:
nulls = data.select_dtypes(exclude='object')
nulls = nulls.isnull().sum()
nulls = nulls.drop(nulls[nulls == 0].index).sort_values(ascending=False)
return pd.DataFrame(nulls)
'\n this function detecting the missing values by the type\n and return data frame contains two columns \n feature name : how many NAN\n ----------\n parameters\n ----------\n data : which data frame you need to check it\n dtype : do you looking for numerical featuers or categorical ? \n '
print('done ===>> check missing data function')
|
code
|
106201316/cell_40
|
[
"text_plain_output_2.png",
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
all_data = [train, test]
submition_id = test['PassengerId']
def check_missing(data, dtype='object'):
if dtype == 'object':
nulls = data.select_dtypes(include='object')
else:
nulls = data.select_dtypes(exclude='object')
nulls = nulls.isnull().sum()
nulls = nulls.drop(nulls[nulls == 0].index).sort_values(ascending=False)
return pd.DataFrame(nulls)
'\n this function detecting the missing values by the type\n and return data frame contains two columns \n feature name : how many NAN\n ----------\n parameters\n ----------\n data : which data frame you need to check it\n \n dtype : do you looking for numerical featuers or categorical ? \n \n '
def lower_case_features(data):
data.columns = [col.lower() for col in list(data)]
'\n simple function to return evry single column name in lowercase\n list(data) ==> contains every column title in the data frame \n \n '
lower_case_features(train)
lower_case_features(test)
for dataset in [train, test]:
dataset['agecat'] = pd.cut(dataset['age'], bins=5, labels=['0-16', '16-32', '32-48', '48-64', '64-85'], include_lowest=True)
pd.DataFrame(train[['agecat', 'survived']].value_counts())
pd.DataFrame(train.embarked.value_counts())
for d in all_data:
d = d.drop(['passengerid', 'cabin', 'ticket'], axis=1, inplace=True)
def get_title(df):
sliced_name = df['name'].str.split(expand=True)
sliced_name[1] = sliced_name[1].str.replace('.', '', regex=True)
df['title'] = sliced_name[1]
df = df.drop(['name'], axis=1, inplace=True)
'\n this function split the name feature gets every single value between comma \n and append it to a feature in a pandas data frame\n \n ----------\n parameters\n ----------\n just your dataframe name\n \n -------\n returns\n -------\n \n your data frame with a new feature called title and without name feature\n \n '
train[['family', 'survived']].groupby(['family'], as_index=False).mean()
|
code
|
106201316/cell_29
|
[
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
all_data = [train, test]
submition_id = test['PassengerId']
def check_missing(data, dtype='object'):
if dtype == 'object':
nulls = data.select_dtypes(include='object')
else:
nulls = data.select_dtypes(exclude='object')
nulls = nulls.isnull().sum()
nulls = nulls.drop(nulls[nulls == 0].index).sort_values(ascending=False)
return pd.DataFrame(nulls)
'\n this function detecting the missing values by the type\n and return data frame contains two columns \n feature name : how many NAN\n ----------\n parameters\n ----------\n data : which data frame you need to check it\n \n dtype : do you looking for numerical featuers or categorical ? \n \n '
def lower_case_features(data):
data.columns = [col.lower() for col in list(data)]
'\n simple function to return evry single column name in lowercase\n list(data) ==> contains every column title in the data frame \n \n '
lower_case_features(train)
lower_case_features(test)
for dataset in [train, test]:
dataset['agecat'] = pd.cut(dataset['age'], bins=5, labels=['0-16', '16-32', '32-48', '48-64', '64-85'], include_lowest=True)
pd.DataFrame(train[['agecat', 'survived']].value_counts())
pd.DataFrame(train.embarked.value_counts())
for d in all_data:
d = d.drop(['passengerid', 'cabin', 'ticket'], axis=1, inplace=True)
train_name_slices = train['name'].str.split(expand=True)
print('name feature slices looks like that')
train_name_slices.head(2)
|
code
|
106201316/cell_39
|
[
"text_html_output_1.png",
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
all_data = [train, test]
submition_id = test['PassengerId']
def check_missing(data, dtype='object'):
if dtype == 'object':
nulls = data.select_dtypes(include='object')
else:
nulls = data.select_dtypes(exclude='object')
nulls = nulls.isnull().sum()
nulls = nulls.drop(nulls[nulls == 0].index).sort_values(ascending=False)
return pd.DataFrame(nulls)
'\n this function detecting the missing values by the type\n and return data frame contains two columns \n feature name : how many NAN\n ----------\n parameters\n ----------\n data : which data frame you need to check it\n \n dtype : do you looking for numerical featuers or categorical ? \n \n '
def lower_case_features(data):
data.columns = [col.lower() for col in list(data)]
'\n simple function to return evry single column name in lowercase\n list(data) ==> contains every column title in the data frame \n \n '
lower_case_features(train)
lower_case_features(test)
for dataset in [train, test]:
dataset['agecat'] = pd.cut(dataset['age'], bins=5, labels=['0-16', '16-32', '32-48', '48-64', '64-85'], include_lowest=True)
pd.DataFrame(train[['agecat', 'survived']].value_counts())
pd.DataFrame(train.embarked.value_counts())
for d in all_data:
d = d.drop(['passengerid', 'cabin', 'ticket'], axis=1, inplace=True)
def get_title(df):
sliced_name = df['name'].str.split(expand=True)
sliced_name[1] = sliced_name[1].str.replace('.', '', regex=True)
df['title'] = sliced_name[1]
df = df.drop(['name'], axis=1, inplace=True)
'\n this function split the name feature gets every single value between comma \n and append it to a feature in a pandas data frame\n \n ----------\n parameters\n ----------\n just your dataframe name\n \n -------\n returns\n -------\n \n your data frame with a new feature called title and without name feature\n \n '
print('now data looks like')
train.head(3)
|
code
|
106201316/cell_11
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
all_data = [train, test]
submition_id = test['PassengerId']
def check_missing(data, dtype='object'):
if dtype == 'object':
nulls = data.select_dtypes(include='object')
else:
nulls = data.select_dtypes(exclude='object')
nulls = nulls.isnull().sum()
nulls = nulls.drop(nulls[nulls == 0].index).sort_values(ascending=False)
return pd.DataFrame(nulls)
'\n this function detecting the missing values by the type\n and return data frame contains two columns \n feature name : how many NAN\n ----------\n parameters\n ----------\n data : which data frame you need to check it\n \n dtype : do you looking for numerical featuers or categorical ? \n \n '
def lower_case_features(data):
data.columns = [col.lower() for col in list(data)]
'\n simple function to return evry single column name in lowercase\n list(data) ==> contains every column title in the data frame \n \n '
lower_case_features(train)
lower_case_features(test)
train.describe()
|
code
|
106201316/cell_19
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
all_data = [train, test]
submition_id = test['PassengerId']
def check_missing(data, dtype='object'):
if dtype == 'object':
nulls = data.select_dtypes(include='object')
else:
nulls = data.select_dtypes(exclude='object')
nulls = nulls.isnull().sum()
nulls = nulls.drop(nulls[nulls == 0].index).sort_values(ascending=False)
return pd.DataFrame(nulls)
'\n this function detecting the missing values by the type\n and return data frame contains two columns \n feature name : how many NAN\n ----------\n parameters\n ----------\n data : which data frame you need to check it\n \n dtype : do you looking for numerical featuers or categorical ? \n \n '
def lower_case_features(data):
data.columns = [col.lower() for col in list(data)]
'\n simple function to return evry single column name in lowercase\n list(data) ==> contains every column title in the data frame \n \n '
lower_case_features(train)
lower_case_features(test)
for dataset in [train, test]:
dataset['agecat'] = pd.cut(dataset['age'], bins=5, labels=['0-16', '16-32', '32-48', '48-64', '64-85'], include_lowest=True)
pd.DataFrame(train[['agecat', 'survived']].value_counts())
pd.DataFrame(train.embarked.value_counts())
|
code
|
106201316/cell_50
|
[
"text_html_output_1.png",
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
all_data = [train, test]
submition_id = test['PassengerId']
def check_missing(data, dtype='object'):
if dtype == 'object':
nulls = data.select_dtypes(include='object')
else:
nulls = data.select_dtypes(exclude='object')
nulls = nulls.isnull().sum()
nulls = nulls.drop(nulls[nulls == 0].index).sort_values(ascending=False)
return pd.DataFrame(nulls)
'\n this function detecting the missing values by the type\n and return data frame contains two columns \n feature name : how many NAN\n ----------\n parameters\n ----------\n data : which data frame you need to check it\n \n dtype : do you looking for numerical featuers or categorical ? \n \n '
def lower_case_features(data):
data.columns = [col.lower() for col in list(data)]
'\n simple function to return evry single column name in lowercase\n list(data) ==> contains every column title in the data frame \n \n '
lower_case_features(train)
lower_case_features(test)
for dataset in [train, test]:
dataset['agecat'] = pd.cut(dataset['age'], bins=5, labels=['0-16', '16-32', '32-48', '48-64', '64-85'], include_lowest=True)
pd.DataFrame(train[['agecat', 'survived']].value_counts())
pd.DataFrame(train.embarked.value_counts())
for d in all_data:
d = d.drop(['passengerid', 'cabin', 'ticket'], axis=1, inplace=True)
def get_title(df):
sliced_name = df['name'].str.split(expand=True)
sliced_name[1] = sliced_name[1].str.replace('.', '', regex=True)
df['title'] = sliced_name[1]
df = df.drop(['name'], axis=1, inplace=True)
'\n this function split the name feature gets every single value between comma \n and append it to a feature in a pandas data frame\n \n ----------\n parameters\n ----------\n just your dataframe name\n \n -------\n returns\n -------\n \n your data frame with a new feature called title and without name feature\n \n '
n_train = train.shape[0]
y = train.survived
df = pd.concat((train, test)).reset_index(drop=True)
df.drop(['survived'], axis=1, inplace=True)
df['fare'] = df['fare'].fillna(value=df.fare.mean())
df.drop(['agecat'], axis=1, inplace=True)
df[df['age'].isnull()].head(2)
|
code
|
106201316/cell_52
|
[
"text_html_output_1.png",
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
all_data = [train, test]
submition_id = test['PassengerId']
def check_missing(data, dtype='object'):
if dtype == 'object':
nulls = data.select_dtypes(include='object')
else:
nulls = data.select_dtypes(exclude='object')
nulls = nulls.isnull().sum()
nulls = nulls.drop(nulls[nulls == 0].index).sort_values(ascending=False)
return pd.DataFrame(nulls)
'\n this function detecting the missing values by the type\n and return data frame contains two columns \n feature name : how many NAN\n ----------\n parameters\n ----------\n data : which data frame you need to check it\n \n dtype : do you looking for numerical featuers or categorical ? \n \n '
def lower_case_features(data):
data.columns = [col.lower() for col in list(data)]
'\n simple function to return evry single column name in lowercase\n list(data) ==> contains every column title in the data frame \n \n '
lower_case_features(train)
lower_case_features(test)
for dataset in [train, test]:
dataset['agecat'] = pd.cut(dataset['age'], bins=5, labels=['0-16', '16-32', '32-48', '48-64', '64-85'], include_lowest=True)
pd.DataFrame(train[['agecat', 'survived']].value_counts())
pd.DataFrame(train.embarked.value_counts())
for d in all_data:
d = d.drop(['passengerid', 'cabin', 'ticket'], axis=1, inplace=True)
def get_title(df):
sliced_name = df['name'].str.split(expand=True)
sliced_name[1] = sliced_name[1].str.replace('.', '', regex=True)
df['title'] = sliced_name[1]
df = df.drop(['name'], axis=1, inplace=True)
'\n this function split the name feature gets every single value between comma \n and append it to a feature in a pandas data frame\n \n ----------\n parameters\n ----------\n just your dataframe name\n \n -------\n returns\n -------\n \n your data frame with a new feature called title and without name feature\n \n '
def get_others(df):
keep = ['Mr', 'Miss', 'Mrs', 'Master']
titles = list(df['title'].values)
others = [i for i in titles if i not in keep]
df['title'] = df['title'].replace(others, 'other')
return df
"\n \n this function takes any value except ('Mr', 'Miss', 'Mrs', 'Master')\n and append it to a list and replace the values in the data frame with other\n \n ----------\n parameters\n ----------\n just the data set\n \n -------\n returns\n -------\n the data frame with title feature with \n "
n_train = train.shape[0]
y = train.survived
df = pd.concat((train, test)).reset_index(drop=True)
df.drop(['survived'], axis=1, inplace=True)
df['fare'] = df['fare'].fillna(value=df.fare.mean())
df.drop(['agecat'], axis=1, inplace=True)
titles = list(df['title'].unique())
for title in titles:
print(df[df['title'] == str(title)]['age'].mode().astype(int), title, '\n')
|
code
|
106201316/cell_1
|
[
"text_plain_output_1.png"
] |
import os
import seaborn as sns
import numpy as np
import pandas as pd
import seaborn as sns
sns.set(rc={'figure.figsize': (30, 18)})
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
|
code
|
106201316/cell_32
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
all_data = [train, test]
submition_id = test['PassengerId']
def check_missing(data, dtype='object'):
if dtype == 'object':
nulls = data.select_dtypes(include='object')
else:
nulls = data.select_dtypes(exclude='object')
nulls = nulls.isnull().sum()
nulls = nulls.drop(nulls[nulls == 0].index).sort_values(ascending=False)
return pd.DataFrame(nulls)
'\n this function detecting the missing values by the type\n and return data frame contains two columns \n feature name : how many NAN\n ----------\n parameters\n ----------\n data : which data frame you need to check it\n \n dtype : do you looking for numerical featuers or categorical ? \n \n '
def lower_case_features(data):
data.columns = [col.lower() for col in list(data)]
'\n simple function to return evry single column name in lowercase\n list(data) ==> contains every column title in the data frame \n \n '
lower_case_features(train)
lower_case_features(test)
for dataset in [train, test]:
dataset['agecat'] = pd.cut(dataset['age'], bins=5, labels=['0-16', '16-32', '32-48', '48-64', '64-85'], include_lowest=True)
pd.DataFrame(train[['agecat', 'survived']].value_counts())
pd.DataFrame(train.embarked.value_counts())
for d in all_data:
d = d.drop(['passengerid', 'cabin', 'ticket'], axis=1, inplace=True)
def get_title(df):
sliced_name = df['name'].str.split(expand=True)
sliced_name[1] = sliced_name[1].str.replace('.', '', regex=True)
df['title'] = sliced_name[1]
df = df.drop(['name'], axis=1, inplace=True)
'\n this function split the name feature gets every single value between comma \n and append it to a feature in a pandas data frame\n \n ----------\n parameters\n ----------\n just your dataframe name\n \n -------\n returns\n -------\n \n your data frame with a new feature called title and without name feature\n \n '
for DF in [train, test]:
get_title(DF)
print('now data looks like')
train.head(3)
|
code
|
106201316/cell_8
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
all_data = [train, test]
submition_id = test['PassengerId']
def check_missing(data, dtype='object'):
if dtype == 'object':
nulls = data.select_dtypes(include='object')
else:
nulls = data.select_dtypes(exclude='object')
nulls = nulls.isnull().sum()
nulls = nulls.drop(nulls[nulls == 0].index).sort_values(ascending=False)
return pd.DataFrame(nulls)
'\n this function detecting the missing values by the type\n and return data frame contains two columns \n feature name : how many NAN\n ----------\n parameters\n ----------\n data : which data frame you need to check it\n \n dtype : do you looking for numerical featuers or categorical ? \n \n '
def lower_case_features(data):
data.columns = [col.lower() for col in list(data)]
'\n simple function to return evry single column name in lowercase\n list(data) ==> contains every column title in the data frame \n '
lower_case_features(train)
lower_case_features(test)
print('now The features is \n', test.columns)
|
code
|
106201316/cell_15
|
[
"text_html_output_1.png"
] |
import matplotlib.pyplot as plt
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import numpy as np
import pandas as pd
import seaborn as sns
sns.set(rc={'figure.figsize': (30, 18)})
import matplotlib.pyplot as plt
import os
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
all_data = [train, test]
submition_id = test['PassengerId']
def check_missing(data, dtype='object'):
if dtype == 'object':
nulls = data.select_dtypes(include='object')
else:
nulls = data.select_dtypes(exclude='object')
nulls = nulls.isnull().sum()
nulls = nulls.drop(nulls[nulls == 0].index).sort_values(ascending=False)
return pd.DataFrame(nulls)
'\n this function detecting the missing values by the type\n and return data frame contains two columns \n feature name : how many NAN\n ----------\n parameters\n ----------\n data : which data frame you need to check it\n \n dtype : do you looking for numerical featuers or categorical ? \n \n '
def lower_case_features(data):
data.columns = [col.lower() for col in list(data)]
'\n simple function to return evry single column name in lowercase\n list(data) ==> contains every column title in the data frame \n \n '
lower_case_features(train)
lower_case_features(test)
fig = sns.catplot(data=train, x='agecat', y='survived', kind='bar', palette='deep')
fig.set_axis_labels('Ages', 'survival rate', size=15)
fig.fig.suptitle('survival rate per ages', verticalalignment='center', size=15)
plt.show()
|
code
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.