path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
105192343/cell_12 | [
"text_plain_output_1.png"
] | total_apple = 5890
no_of_people = 70
no_of_apple_to_each = total_apple / no_of_people
total_apple = 5890
no_of_people = 70
no_of_apple_reminded = total_apple % no_of_people
print('no of apple reminded is', no_of_apple_reminded) | code |
17098574/cell_4 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LinearRegression
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
X = df[['OverallQual']].values
y = df['SalePrice'].values
slr = LinearRegression()
slr.fit(X, y) | code |
17098574/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df_test[['Id', 'SalePrice']].head() | code |
17098574/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input'))
import seaborn as sns
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt | code |
17098574/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df_test.head() | code |
17098574/cell_8 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
X = df[['OverallQual']].values
y = df['SalePrice'].values
slr = LinearRegression()
slr.fit(X, y)
df_test = pd.read_csv('../input/test.csv')
X_test = df_test[['OverallQual']].values
y_test_pred = slr.predict(X_test)
y_test_pred | code |
17098574/cell_3 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
df.head() | code |
17098574/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df_test.head() | code |
17098574/cell_5 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
X = df[['OverallQual']].values
y = df['SalePrice'].values
slr = LinearRegression()
slr.fit(X, y)
plt.scatter(X, y)
plt.plot(X, slr.predict(X), color='red')
plt.show() | code |
327983/cell_21 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn import datasets, svm, cross_validation, tree, preprocessing, metrics
import numpy as np
import pandas as pd
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64})
titanic_df.groupby('Pclass').mean()
class_sex_grouping = titanic_df.groupby(['Pclass', 'Sex']).mean()
group_by_age = pd.cut(titanic_df['Age'], np.arange(0, 90, 10))
age_grouping = titanic_df.groupby(group_by_age).mean()
titanic_df.count()
titanic_df = titanic_df.drop(['Cabin'], axis=1)
titanic_df = titanic_df.dropna()
titanic_df.count()
def preprocess_titanic_df(df):
processed_df = df.copy()
le = preprocessing.LabelEncoder()
processed_df.Sex = le.fit_transform(processed_df.Sex)
processed_df.Embarked = le.fit_transform(processed_df.Embarked)
processed_df = processed_df.drop(['Name', 'Ticket'], axis=1)
return processed_df
processed_df = preprocess_titanic_df(titanic_df)
processed_df.count()
processed_df
X = processed_df.drop(['Survived'], axis=1).values
Y = processed_df['Survived'].values
print(X) | code |
327983/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64})
titanic_df.groupby('Pclass').mean()
class_sex_grouping = titanic_df.groupby(['Pclass', 'Sex']).mean()
group_by_age = pd.cut(titanic_df['Age'], np.arange(0, 90, 10))
age_grouping = titanic_df.groupby(group_by_age).mean()
age_grouping['Survived'].plot.bar() | code |
327983/cell_4 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64})
test_df.head() | code |
327983/cell_20 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn import datasets, svm, cross_validation, tree, preprocessing, metrics
import numpy as np
import pandas as pd
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64})
test_df.count()
test_df = test_df.dropna()
test_df.count()
def preprocess_titanic_df(df):
processed_df = df.copy()
le = preprocessing.LabelEncoder()
processed_df.Sex = le.fit_transform(processed_df.Sex)
processed_df.Embarked = le.fit_transform(processed_df.Embarked)
processed_df = processed_df.drop(['Name', 'Ticket'], axis=1)
return processed_df
processed_test_df = preprocess_titanic_df(test_df)
processed_test_df.count()
processed_test | code |
327983/cell_6 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64})
titanic_df.groupby('Pclass').mean() | code |
327983/cell_29 | [
"text_html_output_1.png"
] | from sklearn import datasets, svm, cross_validation, tree, preprocessing, metrics
import numpy as np
import pandas as pd
import sklearn.ensemble as ske
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64})
titanic_df.groupby('Pclass').mean()
class_sex_grouping = titanic_df.groupby(['Pclass', 'Sex']).mean()
group_by_age = pd.cut(titanic_df['Age'], np.arange(0, 90, 10))
age_grouping = titanic_df.groupby(group_by_age).mean()
titanic_df.count()
titanic_df = titanic_df.drop(['Cabin'], axis=1)
titanic_df = titanic_df.dropna()
titanic_df.count()
def preprocess_titanic_df(df):
processed_df = df.copy()
le = preprocessing.LabelEncoder()
processed_df.Sex = le.fit_transform(processed_df.Sex)
processed_df.Embarked = le.fit_transform(processed_df.Embarked)
processed_df = processed_df.drop(['Name', 'Ticket'], axis=1)
return processed_df
processed_df = preprocess_titanic_df(titanic_df)
processed_df.count()
processed_df
X = processed_df.drop(['Survived'], axis=1).values
Y = processed_df['Survived'].values
clf_dt = tree.DecisionTreeClassifier(max_depth=10)
clf_dt.fit(X, Y)
clf_dt.score(x_test, y_test)
shuffle_validator = cross_validation.ShuffleSplit(len(X), n_iter=20, test_size=0.2, random_state=0)
def test_classifier(clf):
scores = cross_validation.cross_val_score(clf, X, Y, cv=shuffle_validator)
clf_rf = ske.RandomForestClassifier(n_estimators=50)
test_classifier(clf_rf)
clf_gb = ske.GradientBoostingClassifier(n_estimators=50)
test_classifier(clf_gb)
eclf = ske.VotingClassifier([('dt', clf_dt), ('rf', clf_rf), ('gb', clf_gb)])
test_classifier(eclf) | code |
327983/cell_26 | [
"text_plain_output_1.png"
] | from sklearn import datasets, svm, cross_validation, tree, preprocessing, metrics
import numpy as np
import pandas as pd
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64})
titanic_df.groupby('Pclass').mean()
class_sex_grouping = titanic_df.groupby(['Pclass', 'Sex']).mean()
group_by_age = pd.cut(titanic_df['Age'], np.arange(0, 90, 10))
age_grouping = titanic_df.groupby(group_by_age).mean()
titanic_df.count()
titanic_df = titanic_df.drop(['Cabin'], axis=1)
titanic_df = titanic_df.dropna()
titanic_df.count()
def preprocess_titanic_df(df):
processed_df = df.copy()
le = preprocessing.LabelEncoder()
processed_df.Sex = le.fit_transform(processed_df.Sex)
processed_df.Embarked = le.fit_transform(processed_df.Embarked)
processed_df = processed_df.drop(['Name', 'Ticket'], axis=1)
return processed_df
processed_df = preprocess_titanic_df(titanic_df)
processed_df.count()
processed_df
X = processed_df.drop(['Survived'], axis=1).values
Y = processed_df['Survived'].values
clf_dt = tree.DecisionTreeClassifier(max_depth=10)
clf_dt.fit(X, Y)
clf_dt.score(x_test, y_test)
shuffle_validator = cross_validation.ShuffleSplit(len(X), n_iter=20, test_size=0.2, random_state=0)
def test_classifier(clf):
scores = cross_validation.cross_val_score(clf, X, Y, cv=shuffle_validator)
test_classifier(clf_dt) | code |
327983/cell_11 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64})
test_df.count() | code |
327983/cell_19 | [
"text_plain_output_1.png"
] | from sklearn import datasets, svm, cross_validation, tree, preprocessing, metrics
import numpy as np
import pandas as pd
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64})
titanic_df.groupby('Pclass').mean()
class_sex_grouping = titanic_df.groupby(['Pclass', 'Sex']).mean()
group_by_age = pd.cut(titanic_df['Age'], np.arange(0, 90, 10))
age_grouping = titanic_df.groupby(group_by_age).mean()
titanic_df.count()
titanic_df = titanic_df.drop(['Cabin'], axis=1)
titanic_df = titanic_df.dropna()
titanic_df.count()
def preprocess_titanic_df(df):
processed_df = df.copy()
le = preprocessing.LabelEncoder()
processed_df.Sex = le.fit_transform(processed_df.Sex)
processed_df.Embarked = le.fit_transform(processed_df.Embarked)
processed_df = processed_df.drop(['Name', 'Ticket'], axis=1)
return processed_df
processed_df = preprocess_titanic_df(titanic_df)
processed_df.count()
processed_df | code |
327983/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import random
import numpy as np
import pandas as pd
from sklearn import datasets, svm, cross_validation, tree, preprocessing, metrics
import sklearn.ensemble as ske
import tensorflow as tf
from tensorflow.contrib import skflow | code |
327983/cell_7 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64})
titanic_df.groupby('Pclass').mean()
class_sex_grouping = titanic_df.groupby(['Pclass', 'Sex']).mean()
print(class_sex_grouping['Survived']) | code |
327983/cell_28 | [
"text_plain_output_1.png"
] | from sklearn import datasets, svm, cross_validation, tree, preprocessing, metrics
import numpy as np
import pandas as pd
import sklearn.ensemble as ske
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64})
titanic_df.groupby('Pclass').mean()
class_sex_grouping = titanic_df.groupby(['Pclass', 'Sex']).mean()
group_by_age = pd.cut(titanic_df['Age'], np.arange(0, 90, 10))
age_grouping = titanic_df.groupby(group_by_age).mean()
titanic_df.count()
titanic_df = titanic_df.drop(['Cabin'], axis=1)
titanic_df = titanic_df.dropna()
titanic_df.count()
def preprocess_titanic_df(df):
processed_df = df.copy()
le = preprocessing.LabelEncoder()
processed_df.Sex = le.fit_transform(processed_df.Sex)
processed_df.Embarked = le.fit_transform(processed_df.Embarked)
processed_df = processed_df.drop(['Name', 'Ticket'], axis=1)
return processed_df
processed_df = preprocess_titanic_df(titanic_df)
processed_df.count()
processed_df
X = processed_df.drop(['Survived'], axis=1).values
Y = processed_df['Survived'].values
shuffle_validator = cross_validation.ShuffleSplit(len(X), n_iter=20, test_size=0.2, random_state=0)
def test_classifier(clf):
scores = cross_validation.cross_val_score(clf, X, Y, cv=shuffle_validator)
clf_rf = ske.RandomForestClassifier(n_estimators=50)
test_classifier(clf_rf)
clf_gb = ske.GradientBoostingClassifier(n_estimators=50)
test_classifier(clf_gb) | code |
327983/cell_8 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64})
titanic_df.groupby('Pclass').mean()
class_sex_grouping = titanic_df.groupby(['Pclass', 'Sex']).mean()
class_sex_grouping['Survived'].plot.bar() | code |
327983/cell_16 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64})
titanic_df.groupby('Pclass').mean()
class_sex_grouping = titanic_df.groupby(['Pclass', 'Sex']).mean()
group_by_age = pd.cut(titanic_df['Age'], np.arange(0, 90, 10))
age_grouping = titanic_df.groupby(group_by_age).mean()
titanic_df.count()
titanic_df = titanic_df.drop(['Cabin'], axis=1)
titanic_df = titanic_df.dropna()
titanic_df.count() | code |
327983/cell_3 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64})
titanic_df.head() | code |
327983/cell_17 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64})
test_df.count()
test_df = test_df.dropna()
test_df.count() | code |
327983/cell_31 | [
"text_plain_output_1.png"
] | from sklearn import datasets, svm, cross_validation, tree, preprocessing, metrics
from tensorflow.contrib import skflow
import numpy as np
import pandas as pd
import tensorflow as tf
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64})
titanic_df.groupby('Pclass').mean()
class_sex_grouping = titanic_df.groupby(['Pclass', 'Sex']).mean()
group_by_age = pd.cut(titanic_df['Age'], np.arange(0, 90, 10))
age_grouping = titanic_df.groupby(group_by_age).mean()
titanic_df.count()
titanic_df = titanic_df.drop(['Cabin'], axis=1)
titanic_df = titanic_df.dropna()
titanic_df.count()
def preprocess_titanic_df(df):
processed_df = df.copy()
le = preprocessing.LabelEncoder()
processed_df.Sex = le.fit_transform(processed_df.Sex)
processed_df.Embarked = le.fit_transform(processed_df.Embarked)
processed_df = processed_df.drop(['Name', 'Ticket'], axis=1)
return processed_df
processed_df = preprocess_titanic_df(titanic_df)
processed_df.count()
processed_df
X = processed_df.drop(['Survived'], axis=1).values
Y = processed_df['Survived'].values
def custom_model(X, Y):
layers = skflow.ops.dnn(X, [20, 40, 20], tf.tanh)
return skflow.models.logistic_regression(layers, Y)
tf_clf_c = skflow.TensorFlowEstimator(model_fn=custom_model, n_classes=2, batch_size=256, steps=1000, learning_rate=0.05)
tf_clf_c.fit(x_train, y_train)
metrics.accuracy_score(y_test, tf_clf_c.predict(x_test)) | code |
327983/cell_24 | [
"text_plain_output_1.png"
] | from sklearn import datasets, svm, cross_validation, tree, preprocessing, metrics
import numpy as np
import pandas as pd
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64})
titanic_df.groupby('Pclass').mean()
class_sex_grouping = titanic_df.groupby(['Pclass', 'Sex']).mean()
group_by_age = pd.cut(titanic_df['Age'], np.arange(0, 90, 10))
age_grouping = titanic_df.groupby(group_by_age).mean()
titanic_df.count()
titanic_df = titanic_df.drop(['Cabin'], axis=1)
titanic_df = titanic_df.dropna()
titanic_df.count()
def preprocess_titanic_df(df):
processed_df = df.copy()
le = preprocessing.LabelEncoder()
processed_df.Sex = le.fit_transform(processed_df.Sex)
processed_df.Embarked = le.fit_transform(processed_df.Embarked)
processed_df = processed_df.drop(['Name', 'Ticket'], axis=1)
return processed_df
processed_df = preprocess_titanic_df(titanic_df)
processed_df.count()
processed_df
X = processed_df.drop(['Survived'], axis=1).values
Y = processed_df['Survived'].values
clf_dt = tree.DecisionTreeClassifier(max_depth=10)
clf_dt.fit(X, Y)
clf_dt.score(x_test, y_test) | code |
327983/cell_10 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64})
titanic_df.groupby('Pclass').mean()
class_sex_grouping = titanic_df.groupby(['Pclass', 'Sex']).mean()
group_by_age = pd.cut(titanic_df['Age'], np.arange(0, 90, 10))
age_grouping = titanic_df.groupby(group_by_age).mean()
titanic_df.count() | code |
327983/cell_27 | [
"text_plain_output_1.png"
] | from sklearn import datasets, svm, cross_validation, tree, preprocessing, metrics
import numpy as np
import pandas as pd
import sklearn.ensemble as ske
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64})
titanic_df.groupby('Pclass').mean()
class_sex_grouping = titanic_df.groupby(['Pclass', 'Sex']).mean()
group_by_age = pd.cut(titanic_df['Age'], np.arange(0, 90, 10))
age_grouping = titanic_df.groupby(group_by_age).mean()
titanic_df.count()
titanic_df = titanic_df.drop(['Cabin'], axis=1)
titanic_df = titanic_df.dropna()
titanic_df.count()
def preprocess_titanic_df(df):
processed_df = df.copy()
le = preprocessing.LabelEncoder()
processed_df.Sex = le.fit_transform(processed_df.Sex)
processed_df.Embarked = le.fit_transform(processed_df.Embarked)
processed_df = processed_df.drop(['Name', 'Ticket'], axis=1)
return processed_df
processed_df = preprocess_titanic_df(titanic_df)
processed_df.count()
processed_df
X = processed_df.drop(['Survived'], axis=1).values
Y = processed_df['Survived'].values
shuffle_validator = cross_validation.ShuffleSplit(len(X), n_iter=20, test_size=0.2, random_state=0)
def test_classifier(clf):
scores = cross_validation.cross_val_score(clf, X, Y, cv=shuffle_validator)
clf_rf = ske.RandomForestClassifier(n_estimators=50)
test_classifier(clf_rf) | code |
327983/cell_5 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
titanic_df = pd.read_csv('../input/train.csv', dtype={'Age': np.float64})
test_df = pd.read_csv('../input/test.csv', dtype={'Age': np.float64})
titanic_df['Survived'].mean() | code |
50242244/cell_13 | [
"text_plain_output_1.png"
] | from lightfm import LightFM
from lightfm.datasets import fetch_movielens
import numpy as np
import numpy as np # linear algebra
data = fetch_movielens(min_rating=3.0)
model_0 = LightFM(loss='warp')
model_0.fit(data['train'], epochs=70, num_threads=4)
def recommendation(model, data, ids):
n_users, n_items = data['test'].shape
for i in ids:
pos = data['item_labels'][data['test'].tocsr()[i].indices]
scores = model.predict(i, np.arange(n_items))
top_items = data['item_labels'][np.argsort(-scores)]
recommendation(model_0, data, [215, 489, 116]) | code |
50242244/cell_9 | [
"text_plain_output_1.png"
] | from lightfm import LightFM
from lightfm.datasets import fetch_movielens
data = fetch_movielens(min_rating=3.0)
model_1 = LightFM(loss='bpr')
model_1.fit(data['train'], epochs=70, num_threads=4) | code |
50242244/cell_6 | [
"text_plain_output_1.png"
] | from lightfm.datasets import fetch_movielens
data = fetch_movielens(min_rating=3.0)
print(repr(data['train']))
print(repr(data['test'])) | code |
50242244/cell_8 | [
"text_plain_output_1.png"
] | from lightfm import LightFM
from lightfm.datasets import fetch_movielens
data = fetch_movielens(min_rating=3.0)
model_0 = LightFM(loss='warp')
model_0.fit(data['train'], epochs=70, num_threads=4) | code |
50242244/cell_10 | [
"text_plain_output_1.png"
] | from lightfm import LightFM
from lightfm.datasets import fetch_movielens
from lightfm.evaluation import precision_at_k,auc_score
data = fetch_movielens(min_rating=3.0)
model_0 = LightFM(loss='warp')
model_0.fit(data['train'], epochs=70, num_threads=4)
model_1 = LightFM(loss='bpr')
model_1.fit(data['train'], epochs=70, num_threads=4)
test_precision_0 = auc_score(model_0, data['test'], data['train']).mean()
test_precision_1 = auc_score(model_1, data['test'], data['train']).mean()
print(test_precision_0, test_precision_1) | code |
50242244/cell_5 | [
"text_plain_output_1.png"
] | from lightfm.datasets import fetch_movielens
data = fetch_movielens(min_rating=3.0)
data | code |
17121162/cell_13 | [
"text_plain_output_1.png"
] | param = {'lr': (0.1, 10, 10), 'batch_size': [32, 64, 128, 256, 512], 'epochs': [10, 20, 50], 'validation_split': [0.1, 0.2, 0.5], 'dropout': [0.1, 0.25, 0.5, 0.8], 'optimizer': [Adam, Nadam], 'loss': ['categorical_crossentropy'], 'last_activation': ['softmax'], 'weight_regulizer': [None]} | code |
17121162/cell_4 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
Accepted = [540, 602, 643, 783, 979, 1300]
Submitted = [1807, 2123, 2145, 2620, 3303, 5160]
Year = [2014, 2015, 2016, 2017, 2018, 2019]
list_of_tuples = list(zip(Year, Accepted, Submitted))
df = pd.DataFrame(list_of_tuples, columns=['Year', 'Accepted', 'Submitted'])
df
train = pd.read_csv('../input/petfinder-adoption-prediction/train/train.csv')
train.AdoptionSpeed.value_counts(normalize=True) | code |
17121162/cell_6 | [
"text_plain_output_1.png"
] | from sklearn.metrics import cohen_kappa_score
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
Accepted = [540, 602, 643, 783, 979, 1300]
Submitted = [1807, 2123, 2145, 2620, 3303, 5160]
Year = [2014, 2015, 2016, 2017, 2018, 2019]
list_of_tuples = list(zip(Year, Accepted, Submitted))
df = pd.DataFrame(list_of_tuples, columns=['Year', 'Accepted', 'Submitted'])
df
train = pd.read_csv('../input/petfinder-adoption-prediction/train/train.csv')
train.AdoptionSpeed.value_counts(normalize=True)
from sklearn.metrics import cohen_kappa_score
from itertools import repeat
import random
distribution = list(reversed(list(train.AdoptionSpeed.value_counts(normalize=True))))
y_true = train['AdoptionSpeed'].tolist()
y_pred = list(np.random.choice([0, 1, 2, 3, 4], p=distribution, size=len(y_true)))
cohen_kappa_score(y_true, y_pred, weights='quadratic')
y_pred = list(np.random.choice([0, 1, 2, 3, 4], p=[0.2, 0.2, 0.2, 0.2, 0.2], size=len(y_true)))
cohen_kappa_score(y_true, y_pred, weights='quadratic') | code |
17121162/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
Accepted = [540, 602, 643, 783, 979, 1300]
Submitted = [1807, 2123, 2145, 2620, 3303, 5160]
Year = [2014, 2015, 2016, 2017, 2018, 2019]
list_of_tuples = list(zip(Year, Accepted, Submitted))
df = pd.DataFrame(list_of_tuples, columns=['Year', 'Accepted', 'Submitted'])
df | code |
17121162/cell_11 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.metrics import cohen_kappa_score
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
Accepted = [540, 602, 643, 783, 979, 1300]
Submitted = [1807, 2123, 2145, 2620, 3303, 5160]
Year = [2014, 2015, 2016, 2017, 2018, 2019]
list_of_tuples = list(zip(Year, Accepted, Submitted))
df = pd.DataFrame(list_of_tuples, columns=['Year', 'Accepted', 'Submitted'])
df
train = pd.read_csv('../input/petfinder-adoption-prediction/train/train.csv')
train.AdoptionSpeed.value_counts(normalize=True)
from sklearn.metrics import cohen_kappa_score
from itertools import repeat
import random
distribution = list(reversed(list(train.AdoptionSpeed.value_counts(normalize=True))))
y_true = train['AdoptionSpeed'].tolist()
y_pred = list(np.random.choice([0, 1, 2, 3, 4], p=distribution, size=len(y_true)))
cohen_kappa_score(y_true, y_pred, weights='quadratic')
train = pd.read_csv('../input/petfinder-adoption-prediction/train/train.csv')
test = pd.read_csv('../input/petfinder-adoption-prediction/test/test.csv')
train['has_photo'] = train['PhotoAmt'].apply(lambda x: True if x > 0 else False)
test['has_photo'] = test['PhotoAmt'].apply(lambda x: True if x > 0 else False)
train[train.has_photo == False].AdoptionSpeed.value_counts() | code |
17121162/cell_19 | [
"text_plain_output_1.png"
] | from sklearn.metrics import cohen_kappa_score
from sklearn.utils import class_weight
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
Accepted = [540, 602, 643, 783, 979, 1300]
Submitted = [1807, 2123, 2145, 2620, 3303, 5160]
Year = [2014, 2015, 2016, 2017, 2018, 2019]
list_of_tuples = list(zip(Year, Accepted, Submitted))
df = pd.DataFrame(list_of_tuples, columns=['Year', 'Accepted', 'Submitted'])
df
train = pd.read_csv('../input/petfinder-adoption-prediction/train/train.csv')
train.AdoptionSpeed.value_counts(normalize=True)
from sklearn.metrics import cohen_kappa_score
from itertools import repeat
import random
distribution = list(reversed(list(train.AdoptionSpeed.value_counts(normalize=True))))
y_true = train['AdoptionSpeed'].tolist()
y_pred = list(np.random.choice([0, 1, 2, 3, 4], p=distribution, size=len(y_true)))
cohen_kappa_score(y_true, y_pred, weights='quadratic')
y_pred = list(np.random.choice([0, 1, 2, 3, 4], p=[0.2, 0.2, 0.2, 0.2, 0.2], size=len(y_true)))
cohen_kappa_score(y_true, y_pred, weights='quadratic')
y_pred = list(np.random.choice([0, 1, 2, 3, 4], p=[0.0, 0.0, 0.0, 0.01, 0.99], size=len(y_true)))
cohen_kappa_score(y_true, y_pred, weights='quadratic')
y_pred = list(np.random.choice([0, 1, 2, 3, 4], p=[0.99, 0.0, 0.0, 0.0, 0.01], size=len(y_true)))
cohen_kappa_score(y_true, y_pred, weights='quadratic')
from sklearn.utils import class_weight
result = []
for x in range(781):
result.append(0)
for x in range(6768):
result.append(1)
for x in range(9949):
result.append(2)
for x in range(9467):
result.append(3)
for x in range(7960):
result.append(4)
result = np.asarray(result)
class_weight.compute_class_weight('balanced', np.unique(result), result) | code |
17121162/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
17121162/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.metrics import cohen_kappa_score
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
Accepted = [540, 602, 643, 783, 979, 1300]
Submitted = [1807, 2123, 2145, 2620, 3303, 5160]
Year = [2014, 2015, 2016, 2017, 2018, 2019]
list_of_tuples = list(zip(Year, Accepted, Submitted))
df = pd.DataFrame(list_of_tuples, columns=['Year', 'Accepted', 'Submitted'])
df
train = pd.read_csv('../input/petfinder-adoption-prediction/train/train.csv')
train.AdoptionSpeed.value_counts(normalize=True)
from sklearn.metrics import cohen_kappa_score
from itertools import repeat
import random
distribution = list(reversed(list(train.AdoptionSpeed.value_counts(normalize=True))))
y_true = train['AdoptionSpeed'].tolist()
y_pred = list(np.random.choice([0, 1, 2, 3, 4], p=distribution, size=len(y_true)))
cohen_kappa_score(y_true, y_pred, weights='quadratic')
y_pred = list(np.random.choice([0, 1, 2, 3, 4], p=[0.2, 0.2, 0.2, 0.2, 0.2], size=len(y_true)))
cohen_kappa_score(y_true, y_pred, weights='quadratic')
y_pred = list(np.random.choice([0, 1, 2, 3, 4], p=[0.0, 0.0, 0.0, 0.01, 0.99], size=len(y_true)))
cohen_kappa_score(y_true, y_pred, weights='quadratic') | code |
17121162/cell_8 | [
"text_plain_output_1.png"
] | from sklearn.metrics import cohen_kappa_score
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
Accepted = [540, 602, 643, 783, 979, 1300]
Submitted = [1807, 2123, 2145, 2620, 3303, 5160]
Year = [2014, 2015, 2016, 2017, 2018, 2019]
list_of_tuples = list(zip(Year, Accepted, Submitted))
df = pd.DataFrame(list_of_tuples, columns=['Year', 'Accepted', 'Submitted'])
df
train = pd.read_csv('../input/petfinder-adoption-prediction/train/train.csv')
train.AdoptionSpeed.value_counts(normalize=True)
from sklearn.metrics import cohen_kappa_score
from itertools import repeat
import random
distribution = list(reversed(list(train.AdoptionSpeed.value_counts(normalize=True))))
y_true = train['AdoptionSpeed'].tolist()
y_pred = list(np.random.choice([0, 1, 2, 3, 4], p=distribution, size=len(y_true)))
cohen_kappa_score(y_true, y_pred, weights='quadratic')
y_pred = list(np.random.choice([0, 1, 2, 3, 4], p=[0.2, 0.2, 0.2, 0.2, 0.2], size=len(y_true)))
cohen_kappa_score(y_true, y_pred, weights='quadratic')
y_pred = list(np.random.choice([0, 1, 2, 3, 4], p=[0.0, 0.0, 0.0, 0.01, 0.99], size=len(y_true)))
cohen_kappa_score(y_true, y_pred, weights='quadratic')
y_pred = list(np.random.choice([0, 1, 2, 3, 4], p=[0.99, 0.0, 0.0, 0.0, 0.01], size=len(y_true)))
cohen_kappa_score(y_true, y_pred, weights='quadratic') | code |
17121162/cell_15 | [
"text_plain_output_1.png"
] | from talos import Reporting
from talos import Reporting
r = Reporting('../input/resnet50-talos-score/resnet50_talos_score.csv')
r.data.sort_values(['val_acc'], ascending=False)
r.best_params()[0] | code |
17121162/cell_16 | [
"text_plain_output_1.png"
] | from talos import Reporting
from talos import Reporting
r = Reporting('../input/resnet50-talos-score/resnet50_talos_score.csv')
r.data.sort_values(['val_acc'], ascending=False)
r.best_params()[0]
r.correlate('val_loss') | code |
17121162/cell_3 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
Accepted = [540, 602, 643, 783, 979, 1300]
Submitted = [1807, 2123, 2145, 2620, 3303, 5160]
Year = [2014, 2015, 2016, 2017, 2018, 2019]
list_of_tuples = list(zip(Year, Accepted, Submitted))
df = pd.DataFrame(list_of_tuples, columns=['Year', 'Accepted', 'Submitted'])
df
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize=(20, 10))
df.plot.bar(x='Year', stacked=False)
ax = plt.gca()
ax.grid(which='major', axis='y', linestyle='--')
plt.xticks(rotation=0)
plt.ylabel('# of papers')
plt.savefig('submissions.png') | code |
17121162/cell_17 | [
"text_plain_output_1.png"
] | from keras.layers import Dense
from keras.models import Sequential
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
model.add(Dense(units=64, activation='relu', input_dim=100))
model.add(Dropout())
model.add(Dense(units=10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5, batch_size=32) | code |
17121162/cell_14 | [
"text_plain_output_1.png"
] | from talos import Reporting
from talos import Reporting
r = Reporting('../input/resnet50-talos-score/resnet50_talos_score.csv')
r.data.sort_values(['val_acc'], ascending=False) | code |
17121162/cell_10 | [
"text_html_output_1.png"
] | from sklearn.metrics import cohen_kappa_score
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
Accepted = [540, 602, 643, 783, 979, 1300]
Submitted = [1807, 2123, 2145, 2620, 3303, 5160]
Year = [2014, 2015, 2016, 2017, 2018, 2019]
list_of_tuples = list(zip(Year, Accepted, Submitted))
df = pd.DataFrame(list_of_tuples, columns=['Year', 'Accepted', 'Submitted'])
df
train = pd.read_csv('../input/petfinder-adoption-prediction/train/train.csv')
train.AdoptionSpeed.value_counts(normalize=True)
from sklearn.metrics import cohen_kappa_score
from itertools import repeat
import random
distribution = list(reversed(list(train.AdoptionSpeed.value_counts(normalize=True))))
y_true = train['AdoptionSpeed'].tolist()
y_pred = list(np.random.choice([0, 1, 2, 3, 4], p=distribution, size=len(y_true)))
cohen_kappa_score(y_true, y_pred, weights='quadratic')
train = pd.read_csv('../input/petfinder-adoption-prediction/train/train.csv')
test = pd.read_csv('../input/petfinder-adoption-prediction/test/test.csv')
train['has_photo'] = train['PhotoAmt'].apply(lambda x: True if x > 0 else False)
test['has_photo'] = test['PhotoAmt'].apply(lambda x: True if x > 0 else False)
print('Missing photos in train set: %d' % train.has_photo.value_counts()[0])
print('Missing photos in test set: %d' % test.has_photo.value_counts()[0])
print('Percent missing in test set: %.2f' % (test.has_photo.value_counts()[0] / test.shape[0] * 100)) | code |
17121162/cell_12 | [
"text_plain_output_1.png"
] | !pip install talos | code |
17121162/cell_5 | [
"text_plain_output_1.png"
] | from sklearn.metrics import cohen_kappa_score
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
Accepted = [540, 602, 643, 783, 979, 1300]
Submitted = [1807, 2123, 2145, 2620, 3303, 5160]
Year = [2014, 2015, 2016, 2017, 2018, 2019]
list_of_tuples = list(zip(Year, Accepted, Submitted))
df = pd.DataFrame(list_of_tuples, columns=['Year', 'Accepted', 'Submitted'])
df
train = pd.read_csv('../input/petfinder-adoption-prediction/train/train.csv')
train.AdoptionSpeed.value_counts(normalize=True)
from sklearn.metrics import cohen_kappa_score
from itertools import repeat
import random
distribution = list(reversed(list(train.AdoptionSpeed.value_counts(normalize=True))))
y_true = train['AdoptionSpeed'].tolist()
y_pred = list(np.random.choice([0, 1, 2, 3, 4], p=distribution, size=len(y_true)))
cohen_kappa_score(y_true, y_pred, weights='quadratic') | code |
18120034/cell_13 | [
"text_plain_output_1.png"
] | from google.cloud import bigquery
from google.cloud import bigquery
client = bigquery.Client()
dataset_ref = client.dataset('openaq', project='bigquery-public-data')
dataset = client.get_dataset(dataset_ref)
tables = list(client.list_tables(dataset))
table_ref = dataset_ref.table('global_air_quality')
table = client.get_table(table_ref)
client.list_rows(table, max_results=5).to_dataframe()
query = "\n SELECT city\n FROM `bigquery-public-data.openaq.global_air_quality`\n WHERE country = 'US'\n"
query_job = client.query(query)
us_cities = query_job.to_dataframe()
us_cities.city.value_counts().head() | code |
18120034/cell_9 | [
"text_html_output_1.png"
] | query = "\n SELECT city\n FROM `bigquery-public-data.openaq.global_air_quality`\n WHERE country = 'US'\n"
query | code |
18120034/cell_4 | [
"text_plain_output_1.png"
] | from google.cloud import bigquery
from google.cloud import bigquery
client = bigquery.Client()
dataset_ref = client.dataset('openaq', project='bigquery-public-data')
dataset = client.get_dataset(dataset_ref)
tables = list(client.list_tables(dataset))
for table in tables:
print(table.table_id) | code |
18120034/cell_6 | [
"text_plain_output_1.png"
] | from google.cloud import bigquery
from google.cloud import bigquery
client = bigquery.Client()
dataset_ref = client.dataset('openaq', project='bigquery-public-data')
dataset = client.get_dataset(dataset_ref)
tables = list(client.list_tables(dataset))
table_ref = dataset_ref.table('global_air_quality')
table = client.get_table(table_ref)
type(table) | code |
18120034/cell_2 | [
"text_plain_output_1.png"
] | from google.cloud import bigquery
from google.cloud import bigquery
client = bigquery.Client()
dataset_ref = client.dataset('openaq', project='bigquery-public-data')
dataset = client.get_dataset(dataset_ref) | code |
18120034/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
18120034/cell_7 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from google.cloud import bigquery
from google.cloud import bigquery
client = bigquery.Client()
dataset_ref = client.dataset('openaq', project='bigquery-public-data')
dataset = client.get_dataset(dataset_ref)
tables = list(client.list_tables(dataset))
table_ref = dataset_ref.table('global_air_quality')
table = client.get_table(table_ref)
client.list_rows(table, max_results=5).to_dataframe() | code |
18120034/cell_15 | [
"text_html_output_1.png"
] | from google.cloud import bigquery
from google.cloud import bigquery
client = bigquery.Client()
dataset_ref = client.dataset('openaq', project='bigquery-public-data')
dataset = client.get_dataset(dataset_ref)
tables = list(client.list_tables(dataset))
table_ref = dataset_ref.table('global_air_quality')
table = client.get_table(table_ref)
client.list_rows(table, max_results=5).to_dataframe()
query = "\n SELECT city\n FROM `bigquery-public-data.openaq.global_air_quality`\n WHERE country = 'US'\n"
query_job = client.query(query)
ONE_MB = 1000 * 1000
safe_config = bigquery.QueryJobConfig(maximum_bytes_billed=ONE_MB)
safe_query_job = client.query(query, job_config=safe_config)
safe_query_job.to_dataframe().head() | code |
18120034/cell_14 | [
"text_plain_output_1.png"
] | query_1 = "\n SELECT city, country, source_name\n FROM `bigquery-public-data.openaq.global_air_quality`\n WHERE country = 'US'\n"
query_job_1 = client.query(query_1)
df_1 = query_job_1.to_dataframe()
df_1.head() | code |
18120034/cell_12 | [
"text_plain_output_1.png"
] | from google.cloud import bigquery
from google.cloud import bigquery
client = bigquery.Client()
dataset_ref = client.dataset('openaq', project='bigquery-public-data')
dataset = client.get_dataset(dataset_ref)
tables = list(client.list_tables(dataset))
table_ref = dataset_ref.table('global_air_quality')
table = client.get_table(table_ref)
client.list_rows(table, max_results=5).to_dataframe()
query = "\n SELECT city\n FROM `bigquery-public-data.openaq.global_air_quality`\n WHERE country = 'US'\n"
query_job = client.query(query)
us_cities = query_job.to_dataframe()
type(us_cities) | code |
16163769/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
df
simple_feature_cutting_df = df.drop(['PassengerId', 'Name', 'Ticket', 'Cabin', 'Embarked'], axis=1)
simple_feature_cutting_df = simple_feature_cutting_df.dropna()
simple_feature_cutting_df = pd.get_dummies(simple_feature_cutting_df, columns=['Sex'])
simple_feature_cutting_df.index = range(0, len(simple_feature_cutting_df))
simple_feature_cutting_df | code |
16163769/cell_6 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
df
simple_feature_cutting_df = df.drop(['PassengerId', 'Name', 'Ticket', 'Cabin', 'Embarked'], axis=1)
simple_feature_cutting_df = simple_feature_cutting_df.dropna()
simple_feature_cutting_df = pd.get_dummies(simple_feature_cutting_df, columns=['Sex'])
simple_feature_cutting_df.index = range(0, len(simple_feature_cutting_df))
simple_feature_cutting_df
test_data_set = simple_feature_cutting_df[:100]
train_data_set = simple_feature_cutting_df[100:]
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier()
label_data = train_data_set['Survived']
train_data = train_data_set.drop('Survived', axis=1)
model.fit(train_data, label_data) | code |
16163769/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
df | code |
16163769/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
16163769/cell_7 | [
"text_html_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
df
simple_feature_cutting_df = df.drop(['PassengerId', 'Name', 'Ticket', 'Cabin', 'Embarked'], axis=1)
simple_feature_cutting_df = simple_feature_cutting_df.dropna()
simple_feature_cutting_df = pd.get_dummies(simple_feature_cutting_df, columns=['Sex'])
simple_feature_cutting_df.index = range(0, len(simple_feature_cutting_df))
simple_feature_cutting_df
test_data_set = simple_feature_cutting_df[:100]
train_data_set = simple_feature_cutting_df[100:]
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier()
label_data = train_data_set['Survived']
train_data = train_data_set.drop('Survived', axis=1)
model.fit(train_data, label_data)
result_test_predict = model.predict(test_data_set.drop('Survived', axis=1))
real_test_observations = np.array(test_data_set['Survived'])
result = pd.DataFrame({'predict': result_test_predict, 'real': real_test_observations})
result['Correct'] = result.apply(lambda row: row['predict'] == row['real'], axis=1)
result | code |
16163769/cell_8 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
df
simple_feature_cutting_df = df.drop(['PassengerId', 'Name', 'Ticket', 'Cabin', 'Embarked'], axis=1)
simple_feature_cutting_df = simple_feature_cutting_df.dropna()
simple_feature_cutting_df = pd.get_dummies(simple_feature_cutting_df, columns=['Sex'])
simple_feature_cutting_df.index = range(0, len(simple_feature_cutting_df))
simple_feature_cutting_df
test_data_set = simple_feature_cutting_df[:100]
train_data_set = simple_feature_cutting_df[100:]
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier()
label_data = train_data_set['Survived']
train_data = train_data_set.drop('Survived', axis=1)
model.fit(train_data, label_data)
result_test_predict = model.predict(test_data_set.drop('Survived', axis=1))
real_test_observations = np.array(test_data_set['Survived'])
result = pd.DataFrame({'predict': result_test_predict, 'real': real_test_observations})
result['Correct'] = result.apply(lambda row: row['predict'] == row['real'], axis=1)
result
num_correct = len(result[result['Correct']])
num_total = len(result)
num_correct / num_total | code |
16163769/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/train.csv')
df
import matplotlib.pyplot as plt
df['Age'].hist(bins=20) | code |
72069261/cell_21 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/breast-cancer-csv/breastCancer.csv')
df
df.shape
df.columns
c = {col: df[df[col] == '?'].shape[0] for col in df.columns}
c
import numpy as np
for i in range(df.shape[1]):
for j in range(df.shape[0]):
if df.iloc[j, i] == '?':
df.iloc[j, i] = np.NaN
df.corr()
fig1 = plt.figure(figsize=(10,8))
sns.heatmap(df.corr(),annot=True,cmap='YlGnBu',vmax=1.0,vmin=-1.0)
fig2 = plt.figure(figsize=(6,6))
sns.pairplot(df.iloc[:,1:],hue='class',palette='Set2')
X_train, X_test, y_train, y_test = train_test_split(df.iloc[:, 1:-1], df.iloc[:, -1])
print(X_train, '\n')
print(X_test, '\n')
print(y_train, '\n')
print(y_test, '\n')
print('The dimension of X_train is : ', X_train.shape, '\n')
print('The dimension of X_test is : ', X_test.shape, '\n')
print('The dimension of y_train is : ', y_train.shape, '\n')
print('The dimension of y_test is : ', y_test.shape, '\n') | code |
72069261/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/breast-cancer-csv/breastCancer.csv')
df
df.shape
df.columns
print(df['class'].value_counts() / 6.99)
df['class'].value_counts() | code |
72069261/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/breast-cancer-csv/breastCancer.csv')
df
df.head(10) | code |
72069261/cell_33 | [
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/breast-cancer-csv/breastCancer.csv')
df
df.shape
df.columns
c = {col: df[df[col] == '?'].shape[0] for col in df.columns}
c
import numpy as np
for i in range(df.shape[1]):
for j in range(df.shape[0]):
if df.iloc[j, i] == '?':
df.iloc[j, i] = np.NaN
df.corr()
fig1 = plt.figure(figsize=(10,8))
sns.heatmap(df.corr(),annot=True,cmap='YlGnBu',vmax=1.0,vmin=-1.0)
fig2 = plt.figure(figsize=(6,6))
sns.pairplot(df.iloc[:,1:],hue='class',palette='Set2')
X_train, X_test, y_train, y_test = train_test_split(df.iloc[:, 1:-1], df.iloc[:, -1])
gaussnb = GaussianNB()
gaussnb.fit(X_train, y_train)
gaussnbpred = gaussnb.predict(X_test)
gaussnbresults = confusion_matrix(y_test, gaussnbpred)
gaussnbacc_score = accuracy_score(y_test, gaussnbpred)
print('The accuracy of NaiveBayes model is : %0.4f ', gaussnbacc_score)
print('The confusion matrix is :\n', gaussnbresults) | code |
72069261/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/breast-cancer-csv/breastCancer.csv')
df
df.shape
df.describe() | code |
72069261/cell_29 | [
"text_html_output_1.png"
] | from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/breast-cancer-csv/breastCancer.csv')
df
df.shape
df.columns
c = {col: df[df[col] == '?'].shape[0] for col in df.columns}
c
import numpy as np
for i in range(df.shape[1]):
for j in range(df.shape[0]):
if df.iloc[j, i] == '?':
df.iloc[j, i] = np.NaN
df.corr()
fig1 = plt.figure(figsize=(10,8))
sns.heatmap(df.corr(),annot=True,cmap='YlGnBu',vmax=1.0,vmin=-1.0)
fig2 = plt.figure(figsize=(6,6))
sns.pairplot(df.iloc[:,1:],hue='class',palette='Set2')
X_train, X_test, y_train, y_test = train_test_split(df.iloc[:, 1:-1], df.iloc[:, -1])
error_rate = []
for i in range(1, 40):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train, y_train)
pred = knn.predict(X_test)
error_rate.append(np.mean(pred != y_test))
print(accuracy_score(y_test, pred)) | code |
72069261/cell_19 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/breast-cancer-csv/breastCancer.csv')
df
df.shape
df.columns
c = {col: df[df[col] == '?'].shape[0] for col in df.columns}
c
import numpy as np
for i in range(df.shape[1]):
for j in range(df.shape[0]):
if df.iloc[j, i] == '?':
df.iloc[j, i] = np.NaN
df.corr()
fig1 = plt.figure(figsize=(10,8))
sns.heatmap(df.corr(),annot=True,cmap='YlGnBu',vmax=1.0,vmin=-1.0)
fig2 = plt.figure(figsize=(6, 6))
sns.pairplot(df.iloc[:, 1:], hue='class', palette='Set2') | code |
72069261/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
72069261/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/breast-cancer-csv/breastCancer.csv')
df
df.shape
df.info() | code |
72069261/cell_18 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/breast-cancer-csv/breastCancer.csv')
df
df.shape
df.columns
c = {col: df[df[col] == '?'].shape[0] for col in df.columns}
c
import numpy as np
for i in range(df.shape[1]):
for j in range(df.shape[0]):
if df.iloc[j, i] == '?':
df.iloc[j, i] = np.NaN
df.corr()
fig1 = plt.figure(figsize=(10, 8))
sns.heatmap(df.corr(), annot=True, cmap='YlGnBu', vmax=1.0, vmin=-1.0) | code |
72069261/cell_28 | [
"text_plain_output_1.png"
] | from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/breast-cancer-csv/breastCancer.csv')
df
df.shape
df.columns
c = {col: df[df[col] == '?'].shape[0] for col in df.columns}
c
import numpy as np
for i in range(df.shape[1]):
for j in range(df.shape[0]):
if df.iloc[j, i] == '?':
df.iloc[j, i] = np.NaN
df.corr()
fig1 = plt.figure(figsize=(10,8))
sns.heatmap(df.corr(),annot=True,cmap='YlGnBu',vmax=1.0,vmin=-1.0)
fig2 = plt.figure(figsize=(6,6))
sns.pairplot(df.iloc[:,1:],hue='class',palette='Set2')
X_train, X_test, y_train, y_test = train_test_split(df.iloc[:, 1:-1], df.iloc[:, -1])
model1 = KNeighborsClassifier(n_neighbors=4).fit(X_train, y_train)
print(classification_report(model1.predict(X_train), y_train))
print(classification_report(model1.predict(X_test), y_test)) | code |
72069261/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/breast-cancer-csv/breastCancer.csv')
df
df.shape
df.columns | code |
72069261/cell_15 | [
"text_html_output_1.png"
] | import numpy as np
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/breast-cancer-csv/breastCancer.csv')
df
df.shape
df.columns
c = {col: df[df[col] == '?'].shape[0] for col in df.columns}
c
import numpy as np
for i in range(df.shape[1]):
for j in range(df.shape[0]):
if df.iloc[j, i] == '?':
df.iloc[j, i] = np.NaN
list(df['bare_nucleoli'].mode()) | code |
72069261/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/breast-cancer-csv/breastCancer.csv')
df | code |
72069261/cell_17 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/breast-cancer-csv/breastCancer.csv')
df
df.shape
df.columns
c = {col: df[df[col] == '?'].shape[0] for col in df.columns}
c
import numpy as np
for i in range(df.shape[1]):
for j in range(df.shape[0]):
if df.iloc[j, i] == '?':
df.iloc[j, i] = np.NaN
df.corr() | code |
72069261/cell_24 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/breast-cancer-csv/breastCancer.csv')
df
df.shape
df.columns
c = {col: df[df[col] == '?'].shape[0] for col in df.columns}
c
import numpy as np
for i in range(df.shape[1]):
for j in range(df.shape[0]):
if df.iloc[j, i] == '?':
df.iloc[j, i] = np.NaN
df.corr()
fig1 = plt.figure(figsize=(10,8))
sns.heatmap(df.corr(),annot=True,cmap='YlGnBu',vmax=1.0,vmin=-1.0)
fig2 = plt.figure(figsize=(6,6))
sns.pairplot(df.iloc[:,1:],hue='class',palette='Set2')
X_train, X_test, y_train, y_test = train_test_split(df.iloc[:, 1:-1], df.iloc[:, -1])
error_rate = []
for i in range(1, 40):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train, y_train)
pred = knn.predict(X_test)
error_rate.append(np.mean(pred != y_test))
plt.figure(figsize=(10, 6))
plt.plot(range(1, 40), error_rate, 'o--')
plt.ylabel('Error Rate')
plt.xlabel('K') | code |
72069261/cell_27 | [
"text_plain_output_1.png"
] | from sklearn.metrics import plot_confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/breast-cancer-csv/breastCancer.csv')
df
df.shape
df.columns
c = {col: df[df[col] == '?'].shape[0] for col in df.columns}
c
import numpy as np
for i in range(df.shape[1]):
for j in range(df.shape[0]):
if df.iloc[j, i] == '?':
df.iloc[j, i] = np.NaN
df.corr()
fig1 = plt.figure(figsize=(10,8))
sns.heatmap(df.corr(),annot=True,cmap='YlGnBu',vmax=1.0,vmin=-1.0)
fig2 = plt.figure(figsize=(6,6))
sns.pairplot(df.iloc[:,1:],hue='class',palette='Set2')
X_train, X_test, y_train, y_test = train_test_split(df.iloc[:, 1:-1], df.iloc[:, -1])
error_rate = []
for i in range(1, 40):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train, y_train)
pred = knn.predict(X_test)
error_rate.append(np.mean(pred != y_test))
model1 = KNeighborsClassifier(n_neighbors=4).fit(X_train, y_train)
fig3, axs = plt.subplots(figsize=(5, 5))
plot_confusion_matrix(model1, X_test, y_test, ax=axs) | code |
72069261/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/breast-cancer-csv/breastCancer.csv')
df
df.shape
df.columns
c = {col: df[df[col] == '?'].shape[0] for col in df.columns}
c | code |
72069261/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/breast-cancer-csv/breastCancer.csv')
df
df.shape | code |
128033738/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
path = '/kaggle/input/news-headlines/news_summary.csv'
df = pd.read_csv(path)
df.head() | code |
128033738/cell_24 | [
"text_html_output_82.png",
"text_html_output_255.png",
"text_html_output_149.png",
"text_html_output_277.png",
"text_html_output_338.png",
"text_html_output_282.png",
"text_html_output_219.png",
"text_html_output_130.png",
"text_html_output_320.png",
"text_html_output_155.png",
"text_html_output_328.png",
"text_html_output_327.png",
"text_html_output_307.png",
"text_html_output_314.png",
"text_html_output_103.png",
"text_html_output_321.png",
"text_html_output_197.png",
"text_html_output_88.png",
"text_html_output_80.png",
"text_html_output_29.png",
"text_html_output_89.png",
"text_html_output_146.png",
"text_html_output_261.png",
"text_html_output_60.png",
"text_html_output_227.png",
"text_html_output_159.png",
"text_html_output_237.png",
"text_html_output_68.png",
"text_html_output_341.png",
"text_html_output_178.png",
"text_html_output_117.png",
"text_html_output_27.png",
"text_html_output_115.png",
"text_html_output_239.png",
"text_html_output_235.png",
"text_html_output_28.png",
"text_html_output_209.png",
"text_html_output_356.png",
"text_html_output_263.png",
"text_html_output_310.png",
"text_html_output_141.png",
"text_html_output_323.png",
"text_html_output_45.png",
"text_html_output_90.png",
"text_html_output_274.png",
"text_html_output_329.png",
"text_html_output_353.png",
"text_html_output_304.png",
"text_html_output_10.png",
"text_html_output_183.png",
"text_html_output_165.png",
"text_html_output_59.png",
"text_html_output_319.png",
"text_html_output_180.png",
"text_html_output_116.png",
"text_html_output_161.png",
"text_html_output_347.png",
"text_html_output_190.png",
"text_html_output_258.png",
"text_html_output_142.png",
"text_html_output_218.png",
"text_html_output_224.png",
"text_html_output_181.png",
"text_html_output_186.png",
"text_html_output_22.png",
"text_html_output_230.png",
"text_html_output_350.png",
"text_html_output_214.png",
"text_html_output_125.png",
"text_html_output_150.png",
"text_html_output_234.png",
"text_html_output_16.png",
"text_html_output_337.png",
"text_html_output_40.png",
"text_html_output_223.png",
"text_html_output_91.png",
"text_html_output_100.png",
"text_html_output_293.png",
"text_html_output_317.png",
"text_html_output_176.png",
"text_html_output_199.png",
"text_html_output_144.png",
"text_html_output_173.png",
"text_html_output_305.png",
"text_html_output_153.png",
"text_html_output_52.png",
"text_html_output_122.png",
"text_html_output_166.png",
"text_html_output_53.png",
"text_html_output_4.png",
"text_html_output_194.png",
"text_html_output_41.png",
"text_html_output_81.png",
"text_html_output_205.png",
"text_html_output_300.png",
"text_html_output_132.png",
"text_html_output_57.png",
"text_html_output_73.png",
"text_html_output_147.png",
"text_html_output_203.png",
"text_html_output_158.png",
"text_html_output_254.png",
"text_html_output_35.png",
"text_html_output_306.png",
"text_html_output_244.png",
"text_html_output_6.png",
"text_html_output_47.png",
"text_html_output_139.png",
"text_html_output_113.png",
"text_html_output_241.png",
"text_html_output_269.png",
"text_html_output_339.png",
"text_html_output_168.png",
"text_html_output_61.png",
"text_html_output_253.png",
"text_html_output_311.png",
"text_html_output_288.png",
"text_html_output_63.png",
"text_html_output_123.png",
"text_html_output_94.png",
"text_html_output_136.png",
"text_html_output_222.png",
"text_html_output_140.png",
"text_html_output_248.png",
"text_html_output_333.png",
"text_html_output_266.png",
"text_html_output_352.png",
"text_html_output_211.png",
"text_html_output_26.png",
"text_html_output_316.png",
"text_html_output_346.png",
"text_html_output_37.png",
"text_html_output_270.png",
"text_html_output_2.png",
"text_html_output_38.png",
"text_html_output_246.png",
"text_html_output_15.png",
"text_html_output_5.png",
"text_html_output_271.png",
"text_html_output_281.png",
"text_html_output_358.png",
"text_html_output_124.png",
"text_html_output_295.png",
"text_html_output_252.png",
"text_html_output_163.png",
"text_html_output_200.png",
"text_html_output_75.png",
"text_html_output_208.png",
"text_html_output_64.png",
"text_html_output_204.png",
"text_html_output_110.png",
"text_html_output_206.png",
"text_html_output_14.png",
"text_html_output_191.png",
"text_html_output_210.png",
"text_html_output_332.png",
"text_html_output_71.png",
"text_html_output_98.png",
"text_html_output_120.png",
"text_html_output_157.png",
"text_html_output_65.png",
"text_html_output_43.png",
"text_html_output_104.png",
"text_html_output_101.png",
"text_html_output_95.png",
"text_html_output_268.png",
"text_html_output_108.png",
"text_html_output_175.png",
"text_html_output_336.png",
"text_html_output_131.png",
"text_html_output_127.png",
"text_html_output_121.png",
"text_html_output_170.png",
"text_html_output_23.png",
"text_html_output_278.png",
"text_html_output_118.png",
"text_html_output_309.png",
"text_html_output_249.png",
"text_html_output_290.png",
"text_html_output_291.png",
"text_html_output_19.png",
"text_html_output_283.png",
"text_html_output_196.png",
"text_html_output_137.png",
"text_html_output_238.png",
"text_html_output_48.png",
"text_html_output_324.png",
"text_html_output_298.png",
"text_html_output_72.png",
"text_html_output_354.png",
"text_html_output_251.png",
"text_html_output_169.png",
"text_html_output_9.png",
"text_html_output_109.png",
"text_html_output_46.png",
"text_html_output_201.png",
"text_html_output_49.png",
"text_html_output_13.png",
"text_html_output_20.png",
"text_html_output_250.png",
"text_html_output_114.png",
"text_html_output_70.png",
"text_html_output_51.png",
"text_html_output_151.png",
"text_html_output_192.png",
"text_html_output_259.png",
"text_html_output_357.png",
"text_html_output_129.png",
"text_html_output_105.png",
"text_html_output_164.png",
"text_html_output_326.png",
"text_html_output_134.png",
"text_html_output_111.png",
"text_html_output_188.png",
"text_html_output_126.png",
"text_html_output_256.png",
"text_html_output_233.png",
"text_html_output_148.png",
"text_html_output_135.png",
"text_html_output_212.png",
"text_html_output_184.png",
"text_html_output_225.png",
"text_html_output_21.png",
"text_html_output_69.png",
"text_html_output_285.png",
"text_html_output_128.png",
"text_html_output_349.png",
"text_html_output_83.png",
"text_html_output_215.png",
"text_html_output_79.png",
"text_html_output_308.png",
"text_html_output_289.png",
"text_html_output_276.png",
"text_html_output_335.png",
"text_html_output_302.png",
"text_html_output_187.png",
"text_html_output_172.png",
"text_html_output_202.png",
"text_html_output_348.png",
"text_html_output_257.png",
"text_html_output_322.png",
"text_html_output_343.png",
"text_html_output_267.png",
"text_html_output_92.png",
"text_html_output_1.png",
"text_html_output_93.png",
"text_html_output_66.png",
"text_html_output_36.png",
"text_html_output_78.png",
"text_html_output_17.png",
"text_html_output_33.png",
"text_html_output_42.png",
"text_html_output_189.png",
"text_html_output_44.png",
"text_html_output_301.png",
"text_html_output_160.png",
"text_html_output_96.png",
"text_html_output_87.png",
"text_html_output_247.png",
"text_html_output_299.png",
"text_html_output_242.png",
"text_html_output_50.png",
"text_html_output_243.png",
"text_html_output_30.png",
"text_html_output_18.png",
"text_html_output_236.png",
"text_html_output_85.png",
"text_html_output_334.png",
"text_html_output_280.png",
"text_html_output_330.png",
"text_html_output_77.png",
"text_html_output_275.png",
"text_html_output_351.png",
"text_html_output_54.png",
"text_html_output_286.png",
"text_html_output_162.png",
"text_html_output_229.png",
"text_html_output_179.png",
"text_html_output_156.png",
"text_html_output_76.png",
"text_html_output_325.png",
"text_html_output_345.png",
"text_html_output_12.png",
"text_html_output_213.png",
"text_html_output_303.png",
"text_html_output_292.png",
"text_html_output_97.png",
"text_html_output_34.png",
"text_html_output_221.png",
"text_html_output_193.png",
"text_html_output_74.png",
"text_html_output_272.png",
"text_html_output_62.png",
"text_html_output_273.png",
"text_html_output_198.png",
"text_html_output_56.png",
"text_html_output_297.png",
"text_html_output_55.png",
"text_html_output_216.png",
"text_html_output_240.png",
"text_html_output_220.png",
"text_html_output_106.png",
"text_html_output_86.png",
"text_html_output_217.png",
"text_html_output_145.png",
"text_html_output_102.png",
"text_html_output_264.png",
"application_vnd.jupyter.stderr_output_1.png",
"text_html_output_11.png",
"text_html_output_342.png",
"text_html_output_195.png",
"text_html_output_84.png",
"text_html_output_24.png",
"text_html_output_355.png",
"text_html_output_284.png",
"text_html_output_182.png",
"text_html_output_167.png",
"text_html_output_287.png",
"text_html_output_31.png",
"text_html_output_260.png",
"text_html_output_313.png",
"text_html_output_228.png",
"text_html_output_344.png",
"text_html_output_99.png",
"text_html_output_143.png",
"text_html_output_154.png",
"text_html_output_177.png",
"text_html_output_8.png",
"text_html_output_67.png",
"text_html_output_25.png",
"text_html_output_231.png",
"text_html_output_331.png",
"text_html_output_58.png",
"text_html_output_294.png",
"text_html_output_171.png",
"text_html_output_265.png",
"text_html_output_232.png",
"text_html_output_152.png",
"text_html_output_39.png",
"text_html_output_279.png",
"text_html_output_207.png",
"text_html_output_312.png",
"text_html_output_174.png",
"text_html_output_315.png",
"text_html_output_112.png",
"text_html_output_245.png",
"text_html_output_32.png",
"text_html_output_119.png",
"text_html_output_296.png",
"text_html_output_3.png",
"text_html_output_185.png",
"text_html_output_133.png",
"text_html_output_107.png",
"text_html_output_138.png",
"text_html_output_226.png",
"text_html_output_262.png",
"text_html_output_7.png",
"text_html_output_318.png",
"text_html_output_340.png"
] | from rich import box
from rich.console import Console
from rich.table import Column, Table
from torch import cuda
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
from transformers import T5Tokenizer, T5ForConditionalGeneration
import numpy as np
import os
import os
import pandas as pd
import pandas as pd
import torch
import pandas as pd
path = '/kaggle/input/news-headlines/news_summary.csv'
df = pd.read_csv(path)
console = Console(record=True)
def display_df(df):
"""display dataframe in ASCII format"""
console = Console()
table = Table(Column('source_text', justify='center'), Column('target_text', justify='center'), title='Sample Data', pad_edge=False, box=box.ASCII)
for i, row in enumerate(df.values.tolist()):
table.add_row(row[0], row[1])
training_logger = Table(Column('Epoch', justify='center'), Column('Steps', justify='center'), Column('Loss', justify='center'), title='Training Status', pad_edge=False, box=box.ASCII)
from torch import cuda
device = 'cuda' if cuda.is_available() else 'cpu'
device
class CustomDataSetClass(Dataset):
"""
Creating a custom dataset for reading the dataset and
loading it into the dataloader to pass it to the
transformer for finetuning the model
"""
def __init__(self, dataframe, tokenizer, source_len, target_len, source_text, target_text):
"""
Initializes a Dataset class
Args:
dataframe (pandas.DataFrame): Input dataframe
tokenizer (transformers.tokenizer): Transformers tokenizer
source_len (int): Max length of source text
target_len (int): Max length of target text
source_text (str): column name of source text
target_text (str): column name of target text
"""
self.tokenizer = tokenizer
self.data = dataframe
self.source_len = source_len
self.summ_len = target_len
self.target_text = self.data[target_text]
self.source_text = self.data[source_text]
def __len__(self):
"""returns the length of dataframe"""
return len(self.target_text)
def __getitem__(self, index):
"""return the input ids, attention masks and target ids"""
source_text = str(self.source_text[index])
target_text = str(self.target_text[index])
source_text = ' '.join(source_text.split())
target_text = ' '.join(target_text.split())
source = self.tokenizer.batch_encode_plus([source_text], max_length=self.source_len, pad_to_max_length=True, truncation=True, padding='max_length', return_tensors='pt')
target = self.tokenizer.batch_encode_plus([target_text], max_length=self.summ_len, pad_to_max_length=True, truncation=True, padding='max_length', return_tensors='pt')
source_ids = source['input_ids'].squeeze()
source_mask = source['attention_mask'].squeeze()
target_ids = target['input_ids'].squeeze()
target_mask = target['attention_mask'].squeeze()
return {'source_ids': source_ids.to(dtype=torch.long), 'source_mask': source_mask.to(dtype=torch.long), 'target_ids': target_ids.to(dtype=torch.long), 'target_ids_y': target_ids.to(dtype=torch.long)}
def train(epoch, tokenizer, model, device, loader, optimizer):
"""
Function to be called for training with the parameters passed from main function
"""
model.train()
for _, data in enumerate(loader, 0):
y = data['target_ids'].to(device, dtype=torch.long)
y_ids = y[:, :-1].contiguous()
lm_labels = y[:, 1:].clone().detach()
lm_labels[y[:, 1:] == tokenizer.pad_token_id] = -100
ids = data['source_ids'].to(device, dtype=torch.long)
mask = data['source_mask'].to(device, dtype=torch.long)
outputs = model(input_ids=ids, attention_mask=mask, decoder_input_ids=y_ids, labels=lm_labels)
loss = outputs[0]
if _ % 100 == 0:
training_logger.add_row(str(epoch), str(_), str(loss))
optimizer.zero_grad()
loss.backward()
optimizer.step()
def validate(epoch, tokenizer, model, device, loader):
"""
Function to evaluate model for predictions
"""
model.eval()
predictions = []
actuals = []
with torch.no_grad():
for _, data in enumerate(loader, 0):
y = data['target_ids'].to(device, dtype=torch.long)
ids = data['source_ids'].to(device, dtype=torch.long)
mask = data['source_mask'].to(device, dtype=torch.long)
generated_ids = model.generate(input_ids=ids, attention_mask=mask, max_length=150, num_beams=2, repetition_penalty=2.5, length_penalty=1.0, early_stopping=True)
preds = [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=True) for g in generated_ids]
target = [tokenizer.decode(t, skip_special_tokens=True, clean_up_tokenization_spaces=True) for t in y]
predictions.extend(preds)
actuals.extend(target)
return (predictions, actuals)
def T5Trainer(dataframe, source_text, target_text, model_params, output_dir='/kaggle/working/'):
"""
T5 trainer
"""
torch.manual_seed(model_params['SEED'])
np.random.seed(model_params['SEED'])
torch.backends.cudnn.deterministic = True
console.log(f"[Model]: Loading {model_params['MODEL']}...\n")
tokenizer = T5Tokenizer.from_pretrained(model_params['MODEL'])
model = T5ForConditionalGeneration.from_pretrained(model_params['MODEL'])
model = model.to(device)
console.log(f'[Data]: Reading data...\n')
dataframe = dataframe[[source_text, target_text]]
train_size = 0.8
train_dataset = dataframe.sample(frac=train_size, random_state=model_params['SEED'])
val_dataset = dataframe.drop(train_dataset.index).reset_index(drop=True)
train_dataset = train_dataset.reset_index(drop=True)
training_set = CustomDataSetClass(train_dataset, tokenizer, model_params['MAX_SOURCE_TEXT_LENGTH'], model_params['MAX_TARGET_TEXT_LENGTH'], source_text, target_text)
val_set = CustomDataSetClass(val_dataset, tokenizer, model_params['MAX_SOURCE_TEXT_LENGTH'], model_params['MAX_TARGET_TEXT_LENGTH'], source_text, target_text)
train_params = {'batch_size': model_params['TRAIN_BATCH_SIZE'], 'shuffle': True, 'num_workers': 0}
val_params = {'batch_size': model_params['VALID_BATCH_SIZE'], 'shuffle': False, 'num_workers': 0}
training_loader = DataLoader(training_set, **train_params)
val_loader = DataLoader(val_set, **val_params)
optimizer = torch.optim.Adam(params=model.parameters(), lr=model_params['LEARNING_RATE'])
console.log(f'[Initiating Fine Tuning]...\n')
for epoch in range(model_params['TRAIN_EPOCHS']):
train(epoch, tokenizer, model, device, training_loader, optimizer)
console.log(f'[Saving Model]...\n')
path = os.path.join(output_dir, 'model_files')
model.save_pretrained(path)
tokenizer.save_pretrained(path)
console.log(f'[Initiating Validation]...\n')
for epoch in range(model_params['VAL_EPOCHS']):
predictions, actuals = validate(epoch, tokenizer, model, device, val_loader)
final_df = pd.DataFrame({'Generated Text': predictions, 'Actual Text': actuals})
final_df.to_csv(os.path.join(output_dir, 'predictions.csv'))
console.save_text(os.path.join(output_dir, 'logs.txt'))
console.log(f'[Validation Completed.]\n')
return final_df
model_params = {'MODEL': 't5-base', 'TRAIN_BATCH_SIZE': 8, 'VALID_BATCH_SIZE': 8, 'TRAIN_EPOCHS': 1, 'VAL_EPOCHS': 1, 'LEARNING_RATE': 0.0001, 'MAX_SOURCE_TEXT_LENGTH': 512, 'MAX_TARGET_TEXT_LENGTH': 50, 'SEED': 42}
df['text'] = 'summarize: ' + df['text']
predictions = T5Trainer(dataframe=df, source_text='text', target_text='headlines', model_params=model_params, output_dir='outputs')
predictions.sample(20) | code |
128033738/cell_22 | [
"text_plain_output_1.png"
] | from rich import box
from rich.console import Console
from rich.table import Column, Table
from torch import cuda
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
from transformers import T5Tokenizer, T5ForConditionalGeneration
import numpy as np
import os
import os
import pandas as pd
import pandas as pd
import torch
import pandas as pd
path = '/kaggle/input/news-headlines/news_summary.csv'
df = pd.read_csv(path)
console = Console(record=True)
def display_df(df):
"""display dataframe in ASCII format"""
console = Console()
table = Table(Column('source_text', justify='center'), Column('target_text', justify='center'), title='Sample Data', pad_edge=False, box=box.ASCII)
for i, row in enumerate(df.values.tolist()):
table.add_row(row[0], row[1])
training_logger = Table(Column('Epoch', justify='center'), Column('Steps', justify='center'), Column('Loss', justify='center'), title='Training Status', pad_edge=False, box=box.ASCII)
from torch import cuda
device = 'cuda' if cuda.is_available() else 'cpu'
device
class CustomDataSetClass(Dataset):
"""
Creating a custom dataset for reading the dataset and
loading it into the dataloader to pass it to the
transformer for finetuning the model
"""
def __init__(self, dataframe, tokenizer, source_len, target_len, source_text, target_text):
"""
Initializes a Dataset class
Args:
dataframe (pandas.DataFrame): Input dataframe
tokenizer (transformers.tokenizer): Transformers tokenizer
source_len (int): Max length of source text
target_len (int): Max length of target text
source_text (str): column name of source text
target_text (str): column name of target text
"""
self.tokenizer = tokenizer
self.data = dataframe
self.source_len = source_len
self.summ_len = target_len
self.target_text = self.data[target_text]
self.source_text = self.data[source_text]
def __len__(self):
"""returns the length of dataframe"""
return len(self.target_text)
def __getitem__(self, index):
"""return the input ids, attention masks and target ids"""
source_text = str(self.source_text[index])
target_text = str(self.target_text[index])
source_text = ' '.join(source_text.split())
target_text = ' '.join(target_text.split())
source = self.tokenizer.batch_encode_plus([source_text], max_length=self.source_len, pad_to_max_length=True, truncation=True, padding='max_length', return_tensors='pt')
target = self.tokenizer.batch_encode_plus([target_text], max_length=self.summ_len, pad_to_max_length=True, truncation=True, padding='max_length', return_tensors='pt')
source_ids = source['input_ids'].squeeze()
source_mask = source['attention_mask'].squeeze()
target_ids = target['input_ids'].squeeze()
target_mask = target['attention_mask'].squeeze()
return {'source_ids': source_ids.to(dtype=torch.long), 'source_mask': source_mask.to(dtype=torch.long), 'target_ids': target_ids.to(dtype=torch.long), 'target_ids_y': target_ids.to(dtype=torch.long)}
def train(epoch, tokenizer, model, device, loader, optimizer):
"""
Function to be called for training with the parameters passed from main function
"""
model.train()
for _, data in enumerate(loader, 0):
y = data['target_ids'].to(device, dtype=torch.long)
y_ids = y[:, :-1].contiguous()
lm_labels = y[:, 1:].clone().detach()
lm_labels[y[:, 1:] == tokenizer.pad_token_id] = -100
ids = data['source_ids'].to(device, dtype=torch.long)
mask = data['source_mask'].to(device, dtype=torch.long)
outputs = model(input_ids=ids, attention_mask=mask, decoder_input_ids=y_ids, labels=lm_labels)
loss = outputs[0]
if _ % 100 == 0:
training_logger.add_row(str(epoch), str(_), str(loss))
optimizer.zero_grad()
loss.backward()
optimizer.step()
def validate(epoch, tokenizer, model, device, loader):
"""
Function to evaluate model for predictions
"""
model.eval()
predictions = []
actuals = []
with torch.no_grad():
for _, data in enumerate(loader, 0):
y = data['target_ids'].to(device, dtype=torch.long)
ids = data['source_ids'].to(device, dtype=torch.long)
mask = data['source_mask'].to(device, dtype=torch.long)
generated_ids = model.generate(input_ids=ids, attention_mask=mask, max_length=150, num_beams=2, repetition_penalty=2.5, length_penalty=1.0, early_stopping=True)
preds = [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=True) for g in generated_ids]
target = [tokenizer.decode(t, skip_special_tokens=True, clean_up_tokenization_spaces=True) for t in y]
predictions.extend(preds)
actuals.extend(target)
return (predictions, actuals)
def T5Trainer(dataframe, source_text, target_text, model_params, output_dir='/kaggle/working/'):
"""
T5 trainer
"""
torch.manual_seed(model_params['SEED'])
np.random.seed(model_params['SEED'])
torch.backends.cudnn.deterministic = True
console.log(f"[Model]: Loading {model_params['MODEL']}...\n")
tokenizer = T5Tokenizer.from_pretrained(model_params['MODEL'])
model = T5ForConditionalGeneration.from_pretrained(model_params['MODEL'])
model = model.to(device)
console.log(f'[Data]: Reading data...\n')
dataframe = dataframe[[source_text, target_text]]
train_size = 0.8
train_dataset = dataframe.sample(frac=train_size, random_state=model_params['SEED'])
val_dataset = dataframe.drop(train_dataset.index).reset_index(drop=True)
train_dataset = train_dataset.reset_index(drop=True)
training_set = CustomDataSetClass(train_dataset, tokenizer, model_params['MAX_SOURCE_TEXT_LENGTH'], model_params['MAX_TARGET_TEXT_LENGTH'], source_text, target_text)
val_set = CustomDataSetClass(val_dataset, tokenizer, model_params['MAX_SOURCE_TEXT_LENGTH'], model_params['MAX_TARGET_TEXT_LENGTH'], source_text, target_text)
train_params = {'batch_size': model_params['TRAIN_BATCH_SIZE'], 'shuffle': True, 'num_workers': 0}
val_params = {'batch_size': model_params['VALID_BATCH_SIZE'], 'shuffle': False, 'num_workers': 0}
training_loader = DataLoader(training_set, **train_params)
val_loader = DataLoader(val_set, **val_params)
optimizer = torch.optim.Adam(params=model.parameters(), lr=model_params['LEARNING_RATE'])
console.log(f'[Initiating Fine Tuning]...\n')
for epoch in range(model_params['TRAIN_EPOCHS']):
train(epoch, tokenizer, model, device, training_loader, optimizer)
console.log(f'[Saving Model]...\n')
path = os.path.join(output_dir, 'model_files')
model.save_pretrained(path)
tokenizer.save_pretrained(path)
console.log(f'[Initiating Validation]...\n')
for epoch in range(model_params['VAL_EPOCHS']):
predictions, actuals = validate(epoch, tokenizer, model, device, val_loader)
final_df = pd.DataFrame({'Generated Text': predictions, 'Actual Text': actuals})
final_df.to_csv(os.path.join(output_dir, 'predictions.csv'))
console.save_text(os.path.join(output_dir, 'logs.txt'))
console.log(f'[Validation Completed.]\n')
return final_df
model_params = {'MODEL': 't5-base', 'TRAIN_BATCH_SIZE': 8, 'VALID_BATCH_SIZE': 8, 'TRAIN_EPOCHS': 1, 'VAL_EPOCHS': 1, 'LEARNING_RATE': 0.0001, 'MAX_SOURCE_TEXT_LENGTH': 512, 'MAX_TARGET_TEXT_LENGTH': 50, 'SEED': 42}
df['text'] = 'summarize: ' + df['text']
predictions = T5Trainer(dataframe=df, source_text='text', target_text='headlines', model_params=model_params, output_dir='outputs') | code |
128033738/cell_10 | [
"text_html_output_1.png"
] | from torch import cuda
from torch import cuda
device = 'cuda' if cuda.is_available() else 'cpu'
device | code |
16168139/cell_9 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
structures = pd.read_csv('../input/structures.csv')
M = 8000
fig, ax = plt.subplots(1,3,figsize=(20,5))
colors = ["darkred", "dodgerblue", "mediumseagreen", "gold", "purple"]
atoms = structures.atom.unique()
for n in range(len(atoms)):
ax[0].scatter(structures.loc[structures.atom==atoms[n]].x.values[0:M],
structures.loc[structures.atom==atoms[n]].y.values[0:M],
color=colors[n], s=2, alpha=0.5, label=atoms[n])
ax[0].legend()
ax[0].set_xlabel("x")
ax[0].set_xlabel("y")
ax[1].scatter(structures.loc[structures.atom==atoms[n]].x.values[0:M],
structures.loc[structures.atom==atoms[n]].z.values[0:M],
color=colors[n], s=2, alpha=0.5, label=atoms[n])
ax[1].legend()
ax[1].set_xlabel("x")
ax[1].set_xlabel("z")
ax[2].scatter(structures.loc[structures.atom==atoms[n]].y.values[0:M],
structures.loc[structures.atom==atoms[n]].z.values[0:M],
color=colors[n], s=2, alpha=0.5, label=atoms[n])
ax[2].legend()
ax[2].set_xlabel("y")
ax[2].set_xlabel("z")
M = 200000
fig, ax = plt.subplots(1, 3, figsize=(20, 5))
ax[0].scatter(structures.x.values[0:M], structures.y.values[0:M], c=structures.index.values[0:M], s=2, alpha=0.5, cmap='magma')
ax[0].set_xlabel('x')
ax[0].set_xlabel('y')
ax[1].scatter(structures.x.values[0:M], structures.z.values[0:M], c=structures.index.values[0:M], s=2, alpha=0.5, cmap='magma')
ax[1].set_xlabel('x')
ax[1].set_xlabel('z')
ax[2].scatter(structures.y.values[0:M], structures.z.values[0:M], c=structures.index.values[0:M], s=2, alpha=0.5, cmap='magma')
ax[2].set_xlabel('y')
ax[2].set_xlabel('z') | code |
16168139/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
structures = pd.read_csv('../input/structures.csv')
structures.head() | code |
16168139/cell_2 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import os
print(os.listdir('../input')) | code |
16168139/cell_7 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
structures = pd.read_csv('../input/structures.csv')
M = 8000
fig, ax = plt.subplots(1, 3, figsize=(20, 5))
colors = ['darkred', 'dodgerblue', 'mediumseagreen', 'gold', 'purple']
atoms = structures.atom.unique()
for n in range(len(atoms)):
ax[0].scatter(structures.loc[structures.atom == atoms[n]].x.values[0:M], structures.loc[structures.atom == atoms[n]].y.values[0:M], color=colors[n], s=2, alpha=0.5, label=atoms[n])
ax[0].legend()
ax[0].set_xlabel('x')
ax[0].set_xlabel('y')
ax[1].scatter(structures.loc[structures.atom == atoms[n]].x.values[0:M], structures.loc[structures.atom == atoms[n]].z.values[0:M], color=colors[n], s=2, alpha=0.5, label=atoms[n])
ax[1].legend()
ax[1].set_xlabel('x')
ax[1].set_xlabel('z')
ax[2].scatter(structures.loc[structures.atom == atoms[n]].y.values[0:M], structures.loc[structures.atom == atoms[n]].z.values[0:M], color=colors[n], s=2, alpha=0.5, label=atoms[n])
ax[2].legend()
ax[2].set_xlabel('y')
ax[2].set_xlabel('z') | code |
106196484/cell_13 | [
"text_plain_output_1.png"
] | from pipelines import pipeline
nlp = pipeline('multitask-qa-qg') | code |
106196484/cell_2 | [
"text_plain_output_1.png"
] | !pip install Wikipedia-API
import wikipediaapi
wiki_wiki = wikipediaapi.Wikipedia('en')
ml_art = wiki_wiki.page('Machine_Learning')
print("Page - Exists: %s" % ml_art.exists())
print("Page - Title: %s" % ml_art.title)
print("Page - Summary: %s" % ml_art.summary[0:60])
print(ml_art.fullurl)
ml_ftxt = ml_art.text | code |
106196484/cell_7 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | !pip install git+https://github.com/boudinfl/pke.git
import pke | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.