path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
17118075/cell_23 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/heart.csv')
df.shape
df.target.value_counts()
dataset = pd.get_dummies(df, columns=['sex', 'cp', 'fbs', 'restecg', 'exang', 'slope', 'ca', 'thal'])
X = dataset.drop('target', axis=1)
y = df['target']
cross_val_score(LogisticRegression(), X, y) | code |
17118075/cell_33 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
lr = LogisticRegression()
lr.fit(X_train, y_train)
pred = lr.predict(X_test)
pred
from sklearn.metrics import classification_report
report = classification_report(y_test, pred)
print(report) | code |
17118075/cell_20 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import cross_val_score
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/heart.csv')
df.shape
df.target.value_counts()
dataset = pd.get_dummies(df, columns=['sex', 'cp', 'fbs', 'restecg', 'exang', 'slope', 'ca', 'thal'])
X = dataset.drop('target', axis=1)
y = df['target']
cross_val_score(DecisionTreeClassifier(), X, y) | code |
17118075/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/heart.csv')
df.info() | code |
17118075/cell_29 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(X_train, y_train)
pred = lr.predict(X_test)
pred | code |
17118075/cell_2 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
import os
print(os.listdir('../input')) | code |
17118075/cell_11 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/heart.csv')
df.shape
df.target.value_counts()
plt.figure(figsize=(10, 7))
sns.set_style('whitegrid')
sns.countplot('target', data=df) | code |
17118075/cell_19 | [
"text_html_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/heart.csv')
df.shape
df.target.value_counts()
dataset = pd.get_dummies(df, columns=['sex', 'cp', 'fbs', 'restecg', 'exang', 'slope', 'ca', 'thal'])
X = dataset.drop('target', axis=1)
y = df['target']
cross_val_score(RandomForestClassifier(n_estimators=60), X, y) | code |
17118075/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/heart.csv')
df.describe() | code |
17118075/cell_32 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/heart.csv')
df.shape
df.target.value_counts()
sns.set_style('whitegrid')
dataset = pd.get_dummies(df, columns=['sex', 'cp', 'fbs', 'restecg', 'exang', 'slope', 'ca', 'thal'])
lr = LogisticRegression()
lr.fit(X_train, y_train)
pred = lr.predict(X_test)
pred
confusion_matrix = pd.crosstab(y_test, pred, rownames=['Actual'], colnames=['prediction'])
confusion_matrix
plt.figure(figsize=(12, 7))
plt.title('Confusion Matrix')
sns.heatmap(confusion_matrix, annot=True, cmap='coolwarm') | code |
17118075/cell_28 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(X_train, y_train) | code |
17118075/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/heart.csv')
df.shape | code |
17118075/cell_31 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/heart.csv')
df.shape
df.target.value_counts()
dataset = pd.get_dummies(df, columns=['sex', 'cp', 'fbs', 'restecg', 'exang', 'slope', 'ca', 'thal'])
lr = LogisticRegression()
lr.fit(X_train, y_train)
pred = lr.predict(X_test)
pred
confusion_matrix = pd.crosstab(y_test, pred, rownames=['Actual'], colnames=['prediction'])
confusion_matrix | code |
17118075/cell_14 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/heart.csv')
df.shape
df.target.value_counts()
dataset = pd.get_dummies(df, columns=['sex', 'cp', 'fbs', 'restecg', 'exang', 'slope', 'ca', 'thal'])
dataset.head() | code |
17118075/cell_22 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.model_selection import cross_val_score
from sklearn.naive_bayes import GaussianNB
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/heart.csv')
df.shape
df.target.value_counts()
dataset = pd.get_dummies(df, columns=['sex', 'cp', 'fbs', 'restecg', 'exang', 'slope', 'ca', 'thal'])
X = dataset.drop('target', axis=1)
y = df['target']
cross_val_score(GaussianNB(), X, y) | code |
17118075/cell_10 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/heart.csv')
df.shape
df.target.value_counts()
plt.figure(figsize=(12, 7))
sns.heatmap(df.corr(), cmap='coolwarm', annot=True) | code |
2006619/cell_13 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
nrow_train = train.shape[0]
nrow_test = test.shape[0]
sum = nrow_train + nrow_test
x=train.iloc[:,2:].sum()
#plot
plt.figure(figsize=(8,4))
ax= sns.barplot(x.index, x.values, alpha=0.8)
plt.title("# per class")
plt.ylabel('# of Occurrences', fontsize=12)
plt.xlabel('Type ', fontsize=12)
#adding the text labels
rects = ax.patches
labels = x.values
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height + 5, label, ha='center', va='bottom')
plt.show()
rowsums=train.iloc[:,2:].sum(axis=1)
x=rowsums.value_counts()
#plot
plt.figure(figsize=(8,4))
ax = sns.barplot(x.index, x.values, alpha=0.8,color=color[2])
plt.title("Multiple tags per comment")
plt.ylabel('# of Occurrences', fontsize=12)
plt.xlabel('# of tags ', fontsize=12)
#adding the text labels
rects = ax.patches
labels = x.values
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height + 5, label, ha='center', va='bottom')
plt.show()
print('toxic:')
print(train[train.severe_toxic == 1].iloc[3, 1])
print(train[train.severe_toxic == 1].iloc[5, 1]) | code |
2006619/cell_9 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
nrow_train = train.shape[0]
nrow_test = test.shape[0]
sum = nrow_train + nrow_test
x=train.iloc[:,2:].sum()
#plot
plt.figure(figsize=(8,4))
ax= sns.barplot(x.index, x.values, alpha=0.8)
plt.title("# per class")
plt.ylabel('# of Occurrences', fontsize=12)
plt.xlabel('Type ', fontsize=12)
#adding the text labels
rects = ax.patches
labels = x.values
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height + 5, label, ha='center', va='bottom')
plt.show()
print('Total tags =', x.sum()) | code |
2006619/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
nrow_train = train.shape[0]
nrow_test = test.shape[0]
sum = nrow_train + nrow_test
print(' : train : test')
print('rows :', nrow_train, ':', nrow_test)
print('perc :', round(nrow_train * 100 / sum), ' :', round(nrow_test * 100 / sum)) | code |
2006619/cell_2 | [
"text_plain_output_1.png"
] | #Check the dataset sizes(in MB)
!du -l ../input/* | code |
2006619/cell_11 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
nrow_train = train.shape[0]
nrow_test = test.shape[0]
sum = nrow_train + nrow_test
x=train.iloc[:,2:].sum()
#plot
plt.figure(figsize=(8,4))
ax= sns.barplot(x.index, x.values, alpha=0.8)
plt.title("# per class")
plt.ylabel('# of Occurrences', fontsize=12)
plt.xlabel('Type ', fontsize=12)
#adding the text labels
rects = ax.patches
labels = x.values
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height + 5, label, ha='center', va='bottom')
plt.show()
rowsums = train.iloc[:, 2:].sum(axis=1)
x = rowsums.value_counts()
plt.figure(figsize=(8, 4))
ax = sns.barplot(x.index, x.values, alpha=0.8, color=color[2])
plt.title('Multiple tags per comment')
plt.ylabel('# of Occurrences', fontsize=12)
plt.xlabel('# of tags ', fontsize=12)
rects = ax.patches
labels = x.values
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2, height + 5, label, ha='center', va='bottom')
plt.show() | code |
2006619/cell_8 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
nrow_train = train.shape[0]
nrow_test = test.shape[0]
sum = nrow_train + nrow_test
x = train.iloc[:, 2:].sum()
plt.figure(figsize=(8, 4))
ax = sns.barplot(x.index, x.values, alpha=0.8)
plt.title('# per class')
plt.ylabel('# of Occurrences', fontsize=12)
plt.xlabel('Type ', fontsize=12)
rects = ax.patches
labels = x.values
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2, height + 5, label, ha='center', va='bottom')
plt.show() | code |
2006619/cell_16 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
nrow_train = train.shape[0]
nrow_test = test.shape[0]
sum = nrow_train + nrow_test
x=train.iloc[:,2:].sum()
#plot
plt.figure(figsize=(8,4))
ax= sns.barplot(x.index, x.values, alpha=0.8)
plt.title("# per class")
plt.ylabel('# of Occurrences', fontsize=12)
plt.xlabel('Type ', fontsize=12)
#adding the text labels
rects = ax.patches
labels = x.values
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height + 5, label, ha='center', va='bottom')
plt.show()
rowsums=train.iloc[:,2:].sum(axis=1)
x=rowsums.value_counts()
#plot
plt.figure(figsize=(8,4))
ax = sns.barplot(x.index, x.values, alpha=0.8,color=color[2])
plt.title("Multiple tags per comment")
plt.ylabel('# of Occurrences', fontsize=12)
plt.xlabel('# of tags ', fontsize=12)
#adding the text labels
rects = ax.patches
labels = x.values
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height + 5, label, ha='center', va='bottom')
plt.show()
print('Threat:')
print(train[train.threat == 1].iloc[1, 1])
print(train[train.threat == 1].iloc[2, 1]) | code |
2006619/cell_17 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
nrow_train = train.shape[0]
nrow_test = test.shape[0]
sum = nrow_train + nrow_test
x=train.iloc[:,2:].sum()
#plot
plt.figure(figsize=(8,4))
ax= sns.barplot(x.index, x.values, alpha=0.8)
plt.title("# per class")
plt.ylabel('# of Occurrences', fontsize=12)
plt.xlabel('Type ', fontsize=12)
#adding the text labels
rects = ax.patches
labels = x.values
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height + 5, label, ha='center', va='bottom')
plt.show()
rowsums=train.iloc[:,2:].sum(axis=1)
x=rowsums.value_counts()
#plot
plt.figure(figsize=(8,4))
ax = sns.barplot(x.index, x.values, alpha=0.8,color=color[2])
plt.title("Multiple tags per comment")
plt.ylabel('# of Occurrences', fontsize=12)
plt.xlabel('# of tags ', fontsize=12)
#adding the text labels
rects = ax.patches
labels = x.values
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height + 5, label, ha='center', va='bottom')
plt.show()
print('Obscene:')
print(train[train.obscene == 1].iloc[1, 1])
print(train[train.obscene == 1].iloc[2, 1]) | code |
2006619/cell_14 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
nrow_train = train.shape[0]
nrow_test = test.shape[0]
sum = nrow_train + nrow_test
x=train.iloc[:,2:].sum()
#plot
plt.figure(figsize=(8,4))
ax= sns.barplot(x.index, x.values, alpha=0.8)
plt.title("# per class")
plt.ylabel('# of Occurrences', fontsize=12)
plt.xlabel('Type ', fontsize=12)
#adding the text labels
rects = ax.patches
labels = x.values
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height + 5, label, ha='center', va='bottom')
plt.show()
rowsums=train.iloc[:,2:].sum(axis=1)
x=rowsums.value_counts()
#plot
plt.figure(figsize=(8,4))
ax = sns.barplot(x.index, x.values, alpha=0.8,color=color[2])
plt.title("Multiple tags per comment")
plt.ylabel('# of Occurrences', fontsize=12)
plt.xlabel('# of tags ', fontsize=12)
#adding the text labels
rects = ax.patches
labels = x.values
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height + 5, label, ha='center', va='bottom')
plt.show()
print('severe_toxic:')
print(train[train.severe_toxic == 1].iloc[3, 1])
print(train[train.severe_toxic == 1].iloc[4, 1]) | code |
2006619/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.tail(10) | code |
74067198/cell_9 | [
"text_plain_output_1.png"
] | from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
import pandas as pd
train_df = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv')
test_df = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv')
sample_df = pd.read_csv('../input/tabular-playground-series-sep-2021/sample_solution.csv')
X, y = (train_df.iloc[:, 1:-1], train_df.iloc[:, -1])
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=33)
results = []
for n_params in range(1, 100):
pipe = make_pipeline(SimpleImputer(strategy='mean'), StandardScaler(), SelectKBest(f_classif, k=n_params), LogisticRegression(random_state=33))
pipe.fit(X_train, y_train)
score = pipe.score(X_test, y_test)
results.append([n_params, score])
n_params = 60
mpipe = make_pipeline(SimpleImputer(strategy='mean'), StandardScaler(), SelectKBest(f_classif, k=n_params), MLPClassifier(random_state=33, early_stopping=True))
pipe.fit(X_train, y_train)
print(f'score for {n_params} best params: {pipe.score(X_test, y_test)}') | code |
74067198/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv')
test_df = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv')
sample_df = pd.read_csv('../input/tabular-playground-series-sep-2021/sample_solution.csv')
train_df.describe() | code |
74067198/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.model_selection import train_test_split
import pandas as pd
train_df = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv')
test_df = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv')
sample_df = pd.read_csv('../input/tabular-playground-series-sep-2021/sample_solution.csv')
X, y = (train_df.iloc[:, 1:-1], train_df.iloc[:, -1])
print(X.shape, y.shape)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=33)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape) | code |
74067198/cell_8 | [
"text_plain_output_1.png"
] | from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
import pandas as pd
import seaborn as sns
train_df = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv')
test_df = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv')
sample_df = pd.read_csv('../input/tabular-playground-series-sep-2021/sample_solution.csv')
X, y = (train_df.iloc[:, 1:-1], train_df.iloc[:, -1])
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=33)
results = []
for n_params in range(1, 100):
pipe = make_pipeline(SimpleImputer(strategy='mean'), StandardScaler(), SelectKBest(f_classif, k=n_params), LogisticRegression(random_state=33))
pipe.fit(X_train, y_train)
score = pipe.score(X_test, y_test)
results.append([n_params, score])
sns.scatterplot(x=[i[0] for i in results], y=[i[1] for i in results]) | code |
74067198/cell_10 | [
"text_html_output_1.png"
] | from sklearn.ensemble import GradientBoostingClassifier
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
import pandas as pd
train_df = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv')
test_df = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv')
sample_df = pd.read_csv('../input/tabular-playground-series-sep-2021/sample_solution.csv')
X, y = (train_df.iloc[:, 1:-1], train_df.iloc[:, -1])
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=33)
results = []
for n_params in range(1, 100):
pipe = make_pipeline(SimpleImputer(strategy='mean'), StandardScaler(), SelectKBest(f_classif, k=n_params), LogisticRegression(random_state=33))
pipe.fit(X_train, y_train)
score = pipe.score(X_test, y_test)
results.append([n_params, score])
n_params = 60
mpipe = make_pipeline(SimpleImputer(strategy='mean'), StandardScaler(), SelectKBest(f_classif, k=n_params), MLPClassifier(random_state=33, early_stopping=True))
pipe.fit(X_train, y_train)
n_params = 60
pipe = make_pipeline(SimpleImputer(strategy='mean'), StandardScaler(), SelectKBest(f_classif, k=n_params), GradientBoostingClassifier(n_estimators=1000, learning_rate=0.001, max_depth=4, random_state=33))
pipe.fit(X_train, y_train)
print(f'score for {n_params} best params: {pipe.score(X_test, y_test)}') | code |
74067198/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv')
test_df = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv')
sample_df = pd.read_csv('../input/tabular-playground-series-sep-2021/sample_solution.csv')
train_df.isna().sum().describe() | code |
90140085/cell_9 | [
"text_html_output_1.png"
] | from bs4 import BeautifulSoup
import pandas as pd
import requests
url = 'https://info.ctfx.jp/service/market/data_download.html'
res = requests.get(url)
soup = BeautifulSoup(res.text, 'html.parser')
DIR = 'https://info.ctfx.jp/service/market/'
table0 = soup.find_all('a')
table1 = []
for item in table0:
if item.text == '日足(CSV)':
table1 += [DIR + item['href']]
import pandas as pd
link = pd.DataFrame(columns=['name', 'link'])
names = []
for item in table1:
names += [item.split('/')[-1][3:9]]
link['name'] = names
link['link'] = table1
link
NAMES = names.copy()
LINK = names.copy()
for i, item in enumerate(names):
if 'JPY' in item:
print(i, item) | code |
90140085/cell_6 | [
"text_plain_output_1.png"
] | from bs4 import BeautifulSoup
import requests
url = 'https://info.ctfx.jp/service/market/data_download.html'
res = requests.get(url)
soup = BeautifulSoup(res.text, 'html.parser')
DIR = 'https://info.ctfx.jp/service/market/'
table0 = soup.find_all('a')
table1 = []
for item in table0:
if item.text == '日足(CSV)':
table1 += [DIR + item['href']]
print(table1) | code |
90140085/cell_2 | [
"text_plain_output_1.png"
] | !pip install requests
!pip install BeautifulSoup4 | code |
90140085/cell_11 | [
"text_plain_output_1.png"
] | from bs4 import BeautifulSoup
import pandas as pd
import requests
url = 'https://info.ctfx.jp/service/market/data_download.html'
res = requests.get(url)
soup = BeautifulSoup(res.text, 'html.parser')
DIR = 'https://info.ctfx.jp/service/market/'
table0 = soup.find_all('a')
table1 = []
for item in table0:
if item.text == '日足(CSV)':
table1 += [DIR + item['href']]
import pandas as pd
link = pd.DataFrame(columns=['name', 'link'])
names = []
for item in table1:
names += [item.split('/')[-1][3:9]]
link['name'] = names
link['link'] = table1
link
NAMES = names.copy()
LINK = names.copy()
pd.read_csv(LINK[0], encoding='SHIFT_JIS') | code |
90140085/cell_7 | [
"text_html_output_1.png"
] | from bs4 import BeautifulSoup
import pandas as pd
import requests
url = 'https://info.ctfx.jp/service/market/data_download.html'
res = requests.get(url)
soup = BeautifulSoup(res.text, 'html.parser')
DIR = 'https://info.ctfx.jp/service/market/'
table0 = soup.find_all('a')
table1 = []
for item in table0:
if item.text == '日足(CSV)':
table1 += [DIR + item['href']]
import pandas as pd
link = pd.DataFrame(columns=['name', 'link'])
names = []
for item in table1:
names += [item.split('/')[-1][3:9]]
link['name'] = names
link['link'] = table1
link | code |
90140085/cell_8 | [
"text_html_output_2.png"
] | from bs4 import BeautifulSoup
import pandas as pd
import requests
url = 'https://info.ctfx.jp/service/market/data_download.html'
res = requests.get(url)
soup = BeautifulSoup(res.text, 'html.parser')
DIR = 'https://info.ctfx.jp/service/market/'
table0 = soup.find_all('a')
table1 = []
for item in table0:
if item.text == '日足(CSV)':
table1 += [DIR + item['href']]
import pandas as pd
link = pd.DataFrame(columns=['name', 'link'])
names = []
for item in table1:
names += [item.split('/')[-1][3:9]]
link['name'] = names
link['link'] = table1
link
NAMES = names.copy()
LINK = names.copy()
print(LINK) | code |
90140085/cell_15 | [
"text_html_output_1.png"
] | from bs4 import BeautifulSoup
from plotly.subplots import make_subplots
import pandas as pd
import plotly.graph_objects as go
import requests
url = 'https://info.ctfx.jp/service/market/data_download.html'
res = requests.get(url)
soup = BeautifulSoup(res.text, 'html.parser')
DIR = 'https://info.ctfx.jp/service/market/'
table0 = soup.find_all('a')
table1 = []
for item in table0:
if item.text == '日足(CSV)':
table1 += [DIR + item['href']]
import pandas as pd
link = pd.DataFrame(columns=['name', 'link'])
names = []
for item in table1:
names += [item.split('/')[-1][3:9]]
link['name'] = names
link['link'] = table1
link
NAMES = names.copy()
LINK = names.copy()
fig = make_subplots(specs=[[{'secondary_y': False}]])
for i in [0, 1, 3, 5, 7, 12, 15, 22]:
fig.add_trace(go.Scatter(x=NAMES[i]['日付'], y=NAMES[i]['終値'], name=names[i]), secondary_y=False)
fig.update_layout(autosize=False, width=900, height=600, title_text='Exchange Rate')
fig.update_xaxes(title_text='Date')
fig.update_yaxes(title_text='Yen', secondary_y=False)
fig.show() | code |
90140085/cell_16 | [
"text_plain_output_1.png"
] | from bs4 import BeautifulSoup
from plotly.subplots import make_subplots
import pandas as pd
import plotly.graph_objects as go
import requests
url = 'https://info.ctfx.jp/service/market/data_download.html'
res = requests.get(url)
soup = BeautifulSoup(res.text, 'html.parser')
DIR = 'https://info.ctfx.jp/service/market/'
table0 = soup.find_all('a')
table1 = []
for item in table0:
if item.text == '日足(CSV)':
table1 += [DIR + item['href']]
import pandas as pd
link = pd.DataFrame(columns=['name', 'link'])
names = []
for item in table1:
names += [item.split('/')[-1][3:9]]
link['name'] = names
link['link'] = table1
link
NAMES = names.copy()
LINK = names.copy()
fig=make_subplots(specs=[[{"secondary_y":False}]])
for i in [0,1,3,5,7,12,15,22]:
fig.add_trace(go.Scatter(x=NAMES[i]['日付'],y=NAMES[i]['終値'],name=names[i]),secondary_y=False,)
fig.update_layout(autosize=False,width=900,height=600,title_text="Exchange Rate")
fig.update_xaxes(title_text="Date")
fig.update_yaxes(title_text="Yen",secondary_y=False)
fig.show()
fig = make_subplots(specs=[[{'secondary_y': False}]])
for i in [0, 1, 3, 5, 7, 12, 15, 22]:
fig.add_trace(go.Scatter(x=NAMES[i]['日付'], y=NAMES[i]['slope'], name=names[i]), secondary_y=False)
fig.update_layout(autosize=False, width=900, height=600, title_text='Slope of Exchange Rate')
fig.update_xaxes(title_text='Date')
fig.update_yaxes(title_text='Slope', secondary_y=False)
fig.show() | code |
90140085/cell_10 | [
"text_plain_output_1.png"
] | from bs4 import BeautifulSoup
import pandas as pd
import requests
url = 'https://info.ctfx.jp/service/market/data_download.html'
res = requests.get(url)
soup = BeautifulSoup(res.text, 'html.parser')
DIR = 'https://info.ctfx.jp/service/market/'
table0 = soup.find_all('a')
table1 = []
for item in table0:
if item.text == '日足(CSV)':
table1 += [DIR + item['href']]
import pandas as pd
link = pd.DataFrame(columns=['name', 'link'])
names = []
for item in table1:
names += [item.split('/')[-1][3:9]]
link['name'] = names
link['link'] = table1
link
NAMES = names.copy()
LINK = names.copy()
for i in range(25):
LINK[i] = link.iloc[i, 1]
print(LINK) | code |
1009667/cell_2 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from glob import glob
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.DataFrame([dict(path=c_path, image_name=os.path.basename(c_path), type_cat=os.path.basename(os.path.dirname(c_path))) for c_path in glob('../input/train/*/*')])
print('Total Samples', train_df.shape[0])
print('Sample Summary\n', pd.value_counts(train_df['type_cat']))
train_df.sample(3) | code |
1009667/cell_3 | [
"text_html_output_1.png"
] | from glob import glob
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.DataFrame([dict(path=c_path, image_name=os.path.basename(c_path), type_cat=os.path.basename(os.path.dirname(c_path))) for c_path in glob('../input/train/*/*')])
train_df.sample(3)
test_df = pd.DataFrame([dict(path=c_path, image_name=os.path.basename(c_path)) for c_path in glob('../input/test/*')])
test_df.sample(3) | code |
1009667/cell_5 | [
"text_html_output_1.png"
] | from glob import glob
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.DataFrame([dict(path=c_path, image_name=os.path.basename(c_path), type_cat=os.path.basename(os.path.dirname(c_path))) for c_path in glob('../input/train/*/*')])
train_df.sample(3)
test_df = pd.DataFrame([dict(path=c_path, image_name=os.path.basename(c_path)) for c_path in glob('../input/test/*')])
test_df.sample(3)
guess_df = test_df[['image_name']]
w_values = pd.value_counts(train_df['type_cat']) / train_df.shape[0]
for col, val in sorted(w_values.to_dict().items()):
guess_df[col] = val
guess_df.sample(3) | code |
73080780/cell_9 | [
"application_vnd.jupyter.stderr_output_9.png",
"application_vnd.jupyter.stderr_output_7.png",
"application_vnd.jupyter.stderr_output_11.png",
"text_plain_output_20.png",
"text_plain_output_4.png",
"text_plain_output_14.png",
"text_plain_output_10.png",
"text_plain_output_6.png",
"text_plain_output_21.png",
"text_plain_output_18.png",
"application_vnd.jupyter.stderr_output_19.png",
"application_vnd.jupyter.stderr_output_13.png",
"application_vnd.jupyter.stderr_output_3.png",
"application_vnd.jupyter.stderr_output_5.png",
"text_plain_output_16.png",
"application_vnd.jupyter.stderr_output_15.png",
"text_plain_output_8.png",
"application_vnd.jupyter.stderr_output_17.png",
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"text_plain_output_12.png"
] | from sklearn import compose
from sklearn import impute
from sklearn import metrics
from sklearn import model_selection
from sklearn import pipeline
from sklearn import preprocessing
import lightgbm as lgbm
import numpy as np
import pandas as pd
import random
import xgboost as xgb
import glob
import pandas as pd
import numpy as np
import optuna
import random
from sklearn import compose
from sklearn import impute
from sklearn import linear_model
from sklearn import metrics
from sklearn import model_selection
from sklearn import pipeline
from sklearn import preprocessing
import lightgbm as lgbm
import xgboost as xgb
GPU_ENABLED = False
FOLDS = 10
df_train = pd.read_csv('../input/30-days-of-ml/train.csv')
kfold = model_selection.KFold(n_splits=FOLDS, shuffle=True, random_state=42)
df_train['fold'] = -1
for fold, (_, valid_idx) in enumerate(kfold.split(df_train)):
df_train.loc[valid_idx, 'fold'] = fold
df_train.to_csv('folds.csv', index=False)
df_train = pd.read_csv('folds.csv')
df_test = pd.read_csv('../input/30-days-of-ml/test.csv')
cont_features = [f for f in df_train.columns.tolist() if f.startswith('cont')]
cat_features = [f for f in df_train.columns.tolist() if f.startswith('cat')]
dummies = pd.get_dummies(df_train[cat_features])
df_train[dummies.columns] = dummies.iloc[:len(df_train), :]
df_test[dummies.columns] = dummies.iloc[len(df_test):, :]
class Trainer(object):
def __init__(self, df_train, df_test):
self.df_train = df_train
self.df_test = df_test
self.predictions_valid = None
self.predictions_test = None
def fit(self, models):
model_scores = dict()
self.predictions_valid = dict()
self.predictions_test = dict()
for model_index in range(len(models)):
model_scores.setdefault(model_index, [])
self.predictions_valid.setdefault(model_index, dict())
self.predictions_test.setdefault(model_index, [])
self.preprocessed_data = dict()
folds = df_train.fold.max() + 1
for fold in range(folds):
X_train_fold = self.df_train[self.df_train.fold != fold]
X_valid_fold = self.df_train[self.df_train.fold == fold]
valid_idx = X_valid_fold.id.values.tolist()
y_train = X_train_fold.target
y_valid = X_valid_fold.target
for model_index, (preprocessor, features, model_fn) in enumerate(models):
xtrain = X_train_fold[features]
xvalid = X_valid_fold[features]
xtest = self.df_test[features]
preprocessor_id = f'{fold}_{id(preprocessor)}'
if preprocessor_id not in self.preprocessed_data:
self.preprocessed_data[preprocessor_id] = {'X_train': preprocessor.fit_transform(xtrain, y_train), 'X_valid': preprocessor.transform(xvalid), 'X_test': preprocessor.transform(xtest)}
X_train = self.preprocessed_data[preprocessor_id]['X_train']
X_valid = self.preprocessed_data[preprocessor_id]['X_valid']
X_test = self.preprocessed_data[preprocessor_id]['X_test']
model = model_fn(fold)
model.fit(X_train, y_train, early_stopping_rounds=100, eval_set=[(X_valid, y_valid)], verbose=False)
yhat_valid = model.predict(X_valid)
yhat_test = model.predict(X_test)
self.predictions_valid[model_index].update(dict(zip(valid_idx, yhat_valid)))
self.predictions_test[model_index].append(yhat_test)
rmse = metrics.mean_squared_error(y_valid, yhat_valid, squared=False)
model_scores[model_index].append(rmse)
scores = []
for index in range(len(models)):
score = np.mean(model_scores[index])
scores.append(score)
self._save()
return scores
def _save(self):
if self.predictions_valid is None:
return
for index in range(len(self.predictions_valid.keys())):
model_id = random.randint(1000000000, 9999999999)
df_predictions_valid = pd.DataFrame.from_dict(self.predictions_valid[index], orient='index').reset_index()
df_predictions_valid.columns = ['id', f'predictions_{model_id}']
df_predictions_valid.to_csv(f'predictions_train_model_{model_id}.csv', index=False)
target = np.mean(np.column_stack(self.predictions_test[index]), axis=1)
df_predictions_test = pd.DataFrame({'id': self.df_test.id, f'predictions_{model_id}': target})
df_predictions_test.to_csv(f'predictions_test_model_{model_id}.csv', index=False)
numerical_preprocessor = pipeline.Pipeline(steps=[('imputer', impute.SimpleImputer(strategy='mean')), ('scaler', preprocessing.MinMaxScaler())])
categorical_preprocessor = pipeline.Pipeline(steps=[('imputer', impute.SimpleImputer(strategy='most_frequent')), ('ordinal', preprocessing.OrdinalEncoder())])
onehot_preprocessor = preprocessing.OneHotEncoder(handle_unknown='ignore')
preprocessor1 = compose.ColumnTransformer(transformers=[('numerical', numerical_preprocessor, cont_features), ('categorical', categorical_preprocessor, ['cat0', 'cat1', 'cat2', 'cat6', 'cat7', 'cat8', 'cat9']), ('onehot', onehot_preprocessor, ['cat3', 'cat4', 'cat5'])])
preprocessor2 = compose.ColumnTransformer(transformers=[('numerical', numerical_preprocessor, cont_features), ('categorical', categorical_preprocessor, cat_features)])
preprocessor3 = compose.ColumnTransformer(transformers=[('numerical', numerical_preprocessor, cont_features), ('categorical', categorical_preprocessor, ['cat1', 'cat5', 'cat8'])])
def model_xgb1(random_state):
model_params = {'n_estimators': 5000, 'colsample_bytree': 0.14503672812818655, 'learning_rate': 0.026154895111343116, 'max_depth': 6, 'reg_alpha': 33.58185735675347, 'reg_lambda': 0.0006654723943414539, 'subsample': 0.7737874287937573}
if GPU_ENABLED:
model_params['tree_method'] = 'gpu_hist'
model_params['predictor'] = 'gpu_predictor'
else:
model_params['n_jobs'] = 4
return xgb.XGBRegressor(objective='reg:squarederror', random_state=random_state, **model_params)
def model_xgb2(random_state):
model_params = {'n_estimators': 5000, 'colsample_bytree': 0.13404738492192508, 'learning_rate': 0.016208647588692938, 'max_depth': 6, 'reg_alpha': 17.228932163667228, 'reg_lambda': 6.33772225860826e-05, 'subsample': 0.5455951254226656}
if GPU_ENABLED:
model_params['tree_method'] = 'gpu_hist'
model_params['predictor'] = 'gpu_predictor'
else:
model_params['n_jobs'] = 4
return xgb.XGBRegressor(objective='reg:squarederror', random_state=random_state, **model_params)
def model_xgb3(random_state):
model_params = {'n_estimators': 5000, 'colsample_bytree': 0.1260369083045909, 'learning_rate': 0.08194751952715394, 'max_depth': 3, 'reg_alpha': 52.63823514724582, 'reg_lambda': 3.101903064457115, 'subsample': 0.6466414975747573}
if GPU_ENABLED:
model_params['tree_method'] = 'gpu_hist'
model_params['predictor'] = 'gpu_predictor'
else:
model_params['n_jobs'] = 4
return xgb.XGBRegressor(objective='reg:squarederror', random_state=random_state, **model_params)
def model_lgbm(random_state):
model_params = {'n_estimators': 20000, 'bagging_fraction': 0.8974383620638076, 'bagging_freq': 6, 'feature_fraction': 0.44911144741980047, 'lambda_l1': 8.576512805347963e-06, 'lambda_l2': 2.0513310788967636, 'learning_rate': 0.01499785501240656, 'min_child_samples': 92, 'num_leaves': 4}
if GPU_ENABLED:
model_params['device'] = 'gpu'
model_params['gpu_platform_id'] = 0
model_params['gpu_device_id'] = 0
else:
model_params['n_jobs'] = 4
return lgbm.LGBMRegressor(objective='regression', metric='rmse', random_state=random_state, **model_params)
features1 = cont_features + cat_features
features2 = cont_features + ['cat1', 'cat5', 'cat8', 'cat1_A', 'cat3_C', 'cat8_C', 'cat8_E']
trainer = Trainer(df_train, df_test)
trainer.fit([(preprocessor1, features1, model_xgb1), (preprocessor2, features1, model_xgb2), (preprocessor3, features2, model_xgb3), (preprocessor2, features1, model_lgbm)]) | code |
73080780/cell_11 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn import model_selection
import numpy as np
import pandas as pd
import random
FOLDS = 10
df_train = pd.read_csv('../input/30-days-of-ml/train.csv')
kfold = model_selection.KFold(n_splits=FOLDS, shuffle=True, random_state=42)
df_train['fold'] = -1
for fold, (_, valid_idx) in enumerate(kfold.split(df_train)):
df_train.loc[valid_idx, 'fold'] = fold
df_train.to_csv('folds.csv', index=False)
df_train = pd.read_csv('folds.csv')
df_test = pd.read_csv('../input/30-days-of-ml/test.csv')
cont_features = [f for f in df_train.columns.tolist() if f.startswith('cont')]
cat_features = [f for f in df_train.columns.tolist() if f.startswith('cat')]
dummies = pd.get_dummies(df_train[cat_features])
df_train[dummies.columns] = dummies.iloc[:len(df_train), :]
df_test[dummies.columns] = dummies.iloc[len(df_test):, :]
class Trainer(object):
def __init__(self, df_train, df_test):
self.df_train = df_train
self.df_test = df_test
self.predictions_valid = None
self.predictions_test = None
def fit(self, models):
model_scores = dict()
self.predictions_valid = dict()
self.predictions_test = dict()
for model_index in range(len(models)):
model_scores.setdefault(model_index, [])
self.predictions_valid.setdefault(model_index, dict())
self.predictions_test.setdefault(model_index, [])
self.preprocessed_data = dict()
folds = df_train.fold.max() + 1
for fold in range(folds):
X_train_fold = self.df_train[self.df_train.fold != fold]
X_valid_fold = self.df_train[self.df_train.fold == fold]
valid_idx = X_valid_fold.id.values.tolist()
y_train = X_train_fold.target
y_valid = X_valid_fold.target
for model_index, (preprocessor, features, model_fn) in enumerate(models):
xtrain = X_train_fold[features]
xvalid = X_valid_fold[features]
xtest = self.df_test[features]
preprocessor_id = f'{fold}_{id(preprocessor)}'
if preprocessor_id not in self.preprocessed_data:
self.preprocessed_data[preprocessor_id] = {'X_train': preprocessor.fit_transform(xtrain, y_train), 'X_valid': preprocessor.transform(xvalid), 'X_test': preprocessor.transform(xtest)}
X_train = self.preprocessed_data[preprocessor_id]['X_train']
X_valid = self.preprocessed_data[preprocessor_id]['X_valid']
X_test = self.preprocessed_data[preprocessor_id]['X_test']
model = model_fn(fold)
model.fit(X_train, y_train, early_stopping_rounds=100, eval_set=[(X_valid, y_valid)], verbose=False)
yhat_valid = model.predict(X_valid)
yhat_test = model.predict(X_test)
self.predictions_valid[model_index].update(dict(zip(valid_idx, yhat_valid)))
self.predictions_test[model_index].append(yhat_test)
rmse = metrics.mean_squared_error(y_valid, yhat_valid, squared=False)
model_scores[model_index].append(rmse)
scores = []
for index in range(len(models)):
score = np.mean(model_scores[index])
scores.append(score)
self._save()
return scores
def _save(self):
if self.predictions_valid is None:
return
for index in range(len(self.predictions_valid.keys())):
model_id = random.randint(1000000000, 9999999999)
df_predictions_valid = pd.DataFrame.from_dict(self.predictions_valid[index], orient='index').reset_index()
df_predictions_valid.columns = ['id', f'predictions_{model_id}']
df_predictions_valid.to_csv(f'predictions_train_model_{model_id}.csv', index=False)
target = np.mean(np.column_stack(self.predictions_test[index]), axis=1)
df_predictions_test = pd.DataFrame({'id': self.df_test.id, f'predictions_{model_id}': target})
df_predictions_test.to_csv(f'predictions_test_model_{model_id}.csv', index=False)
blender = Blender(df_train, df_test)
blender.blend('submission.csv') | code |
18159892/cell_21 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import cv2
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
def display_train_images(df, col=3, row=3):
tol = 30 # initializing our tolerence level for cropping
matrix = col*row
fig = plt.figure(figsize=(10, 10)) #definingthe size of our inline plot from matplotlib
for i in range(matrix):
image_path = df.loc[i,'id_code'] # assigning the id_code of each eye image to a certain path
image_id = df.loc[i,'diagnosis']
image = cv2.imread(f'../input/train_images/{image_path}.png')
img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # converting images from a RGB format to a grayscale format
mask = img>tol
img = img[np.ix_(mask.any(1),mask.any(0))] # converting the images into an adjusted or cropped image
img = cv2.resize(img, (img_size, img_size)) # resizing our images all to a single size
img=cv2.addWeighted(img, 4, cv2.GaussianBlur(img, (0,0), img_size/10),-4, 128) # creating a guassian blur from the grayscale image
fig.add_subplot(row, col, i+1)
plt.title(image_id)
plt.axis('off')
plt.imshow(img,cmap='gray')
plt.tight_layout()
return df
train_df = display_train_images(train_df) | code |
18159892/cell_4 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
train_df.head() | code |
18159892/cell_34 | [
"text_plain_output_1.png"
] | from keras import layers
from keras.applications import DenseNet121
from keras.models import Sequential
from keras.optimizers import Adam
densenet = DenseNet121(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
def build_model():
model = Sequential()
model.add(densenet)
model.add(layers.GlobalAveragePooling2D())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(10, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer=Adam(lr=5e-05), metrics=['accuracy'])
return model
model = build_model()
model.summary() | code |
18159892/cell_1 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | # This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import cv2
import tensorflow as tf
!pip install seaborn
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from sklearn.metrics import cohen_kappa_score, accuracy_score
from keras.applications import DenseNet121
from keras import layers
from keras.models import Sequential
from keras.optimizers import Adam
from keras.callbacks import Callback, ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
img_size = 512 # setting our image size to a standard value
# Any results you write to the current directory are saved as output. | code |
18159892/cell_16 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
train_df['diagnosis'].value_counts().plot.bar()
plt.title('Visualization of DR Trainig Dataset')
plt.xlabel('DR Diagnosis Type')
plt.ylabel('Patient Count') | code |
18159892/cell_35 | [
"text_plain_output_1.png"
] | from keras import layers
from keras.applications import DenseNet121
from keras.callbacks import Callback, ModelCheckpoint
from keras.models import Sequential
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import cohen_kappa_score, accuracy_score
BATCH_SIZE = 32
def create_datagen():
return ImageDataGenerator(zoom_range=0.15, fill_mode='constant', cval=0.0, horizontal_flip=True, vertical_flip=True)
class Metrics(Callback):
def on_train_begin(self, logs={}):
self.val_kappas = []
def on_epoch_end(self, epoch, logs={}):
X_val, y_val = self.validation_data[:2]
y_val = y_val.sum(axis=1) - 1
y_pred = self.model.predict(X_val) > 0.5
y_pred = y_pred.astype(int).sum(axis=1) - 1
_val_kappa = cohen_kappa_score(y_val, y_pred, weights='quadratic')
self.val_kappas.append(_val_kappa)
if _val_kappa == max(self.val_kappas):
self.model.save('model.h5')
return
densenet = DenseNet121(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
def build_model():
model = Sequential()
model.add(densenet)
model.add(layers.GlobalAveragePooling2D())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(10, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer=Adam(lr=5e-05), metrics=['accuracy'])
return model
model = build_model()
model.summary()
kappa_metrics = Metrics()
history = model.fit_generator(data_generator, steps_per_epoch=x_train.shape[0] / BATCH_SIZE, epochs=15, validation_data=(x_val, y_val), callbacks=[kappa_metrics]) | code |
18159892/cell_31 | [
"image_output_1.png"
] | from keras.applications import DenseNet121
densenet = DenseNet121(weights='imagenet', include_top=False, input_shape=(224, 224, 3)) | code |
18159892/cell_24 | [
"image_output_1.png"
] | import cv2
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
def display_train_images(df, col=3, row=3):
tol = 30 # initializing our tolerence level for cropping
matrix = col*row
fig = plt.figure(figsize=(10, 10)) #definingthe size of our inline plot from matplotlib
for i in range(matrix):
image_path = df.loc[i,'id_code'] # assigning the id_code of each eye image to a certain path
image_id = df.loc[i,'diagnosis']
image = cv2.imread(f'../input/train_images/{image_path}.png')
img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # converting images from a RGB format to a grayscale format
mask = img>tol
img = img[np.ix_(mask.any(1),mask.any(0))] # converting the images into an adjusted or cropped image
img = cv2.resize(img, (img_size, img_size)) # resizing our images all to a single size
img=cv2.addWeighted(img, 4, cv2.GaussianBlur(img, (0,0), img_size/10),-4, 128) # creating a guassian blur from the grayscale image
fig.add_subplot(row, col, i+1)
plt.title(image_id)
plt.axis('off')
plt.imshow(img,cmap='gray')
plt.tight_layout()
return df
def display_test_images(df, col=3, row=3):
tol = 30 # initializing our tolerence level for cropping
matrix = col*row
fig = plt.figure(figsize=(10, 10)) #definingthe size of our inline plot from matplotlib
for i in range(matrix):
image_path = df.loc[i,'id_code'] # assigning the id_code of each eye image to a certain path
image = cv2.imread(f'../input/test_images/{image_path}.png')
img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # converting images from a RGB format to a grayscale format
mask = img>tol
img = img[np.ix_(mask.any(1),mask.any(0))] # converting the images into an adjusted or cropped image
img = cv2.resize(img, (img_size, img_size)) # resizing our images all to a single size
fig.add_subplot(row, col, i+1)
plt.axis('off')
plt.imshow(img,cmap='gray')
plt.tight_layout()
display_test_images(test_df) | code |
18159892/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
train_df['diagnosis'].value_counts() | code |
32068582/cell_6 | [
"text_plain_output_1.png"
] | from scipy.optimize import curve_fit
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
popDict = {'Afghanistan_NONE': 34656032, 'Albania_NONE': 2876101, 'Algeria_NONE': 40606052, 'Andorra_NONE': 77281, 'Angola_NONE': 28813463, 'Antigua and Barbuda_NONE': 100963, 'Argentina_NONE': 43847430, 'Armenia_NONE': 2924816, 'Australia_Australian Capital Territory': 24127159, 'Australia_New South Wales': 24127159, 'Australia_Northern Territory': 24127159, 'Australia_Queensland': 24127159, 'Australia_South Australia': 24127159, 'Australia_Tasmania': 24127159, 'Australia_Victoria': 24127159, 'Australia_Western Australia': 24127159, 'Austria_NONE': 8747358, 'Azerbaijan_NONE': 9762274, 'Bahamas_NONE': 391232, 'Bahrain_NONE': 1425171, 'Bangladesh_NONE': 162951560, 'Barbados_NONE': 284996, 'Belarus_NONE': 9507120, 'Belgium_NONE': 11348159, 'Belize_NONE': 201674, 'Benin_NONE': 10872298, 'Bhutan_NONE': 797765, 'Bolivia_NONE': 10887882, 'Bosnia and Herzegovina_NONE': 3516816, 'Botswana_NONE': 2250000, 'Brazil_NONE': 207652865, 'Brunei_NONE': 423196, 'Bulgaria_NONE': 7127822, 'Burkina Faso_NONE': 18646433, 'Burma_NONE': 54000000, 'Burundi_NONE': 11200000, 'Cabo Verde_NONE': 539560, 'Cambodia_NONE': 15762370, 'Cameroon_NONE': 23439189, 'Canada_Alberta': 4413146, 'Canada_British Columbia': 5110917, 'Canada_Manitoba': 1377517, 'Canada_New Brunswick': 779993, 'Canada_Newfoundland and Labrador': 521365, 'Canada_Northwest Territories': 45000, 'Canada_Nova Scotia': 977457, 'Canada_Ontario': 14711827, 'Canada_Prince Edward Island': 158158, 'Canada_Quebec': 8537674, 'Canada_Saskatchewan': 1181666, 'Canada_Yukon': 40000, 'Central African Republic_NONE': 4594621, 'Chad_NONE': 14452543, 'Chile_NONE': 17909754, 'China_Anhui': 62200728, 'China_Beijing': 25215113, 'China_Chongqing': 31791850, 'China_Fujian': 39789666, 'China_Gansu': 26406335, 'China_Guangdong': 13302000, 'China_Guangxi': 50027447, 'China_Guizhou': 35669112, 'China_Hainan': 9645206, 'China_Hebei': 76878832, 'China_Heilongjiang': 38434821, 'China_Henan': 94375586, 'China_Hong Kong': 7478895, 'China_Hubei': 59783978, 'China_Hunan': 69804905, 'China_Inner Mongolia': 25617169, 'China_Jiangsu': 81134214, 'China_Jiangxi': 46784338, 'China_Jilin': 27653275, 'China_Liaoning': 44260682, 'China_Macau': 649335, 'China_Ningxia': 7139502, 'China_Qinghai': 6146631, 'China_Shaanxi': 38379669, 'China_Shandong': 101027107, 'China_Shanghai': 27014899, 'China_Shanxi': 37702752, 'China_Sichuan': 82613096, 'China_Tianjin': 19898557, 'China_Tibet': 3414708, 'China_Xinjiang': 24697132, 'China_Yunnan': 49033220, 'China_Zhejiang': 56292546, 'Colombia_NONE': 48653419, 'Congo (Brazzaville)_NONE': 5125821, 'Congo (Kinshasa)_NONE': 78736153, 'Costa Rica_NONE': 4857274, "Cote d'Ivoire_NONE": 23695919, 'Croatia_NONE': 4170600, 'Cuba_NONE': 11475982, 'Cyprus_NONE': 1170125, 'Czechia_NONE': 10561633, 'Denmark_Faroe Islands': 49117, 'Denmark_Greenland': 56186, 'Denmark_NONE': 5731118, 'Diamond Princess_NONE': 4000, 'Djibouti_NONE': 942333, 'Dominica_NONE': 73543, 'Dominican Republic_NONE': 10648791, 'Ecuador_NONE': 16385068, 'Egypt_NONE': 95688681, 'El Salvador_NONE': 6344722, 'Equatorial Guinea_NONE': 1221490, 'Eritrea_NONE': 3452786, 'Estonia_NONE': 1316481, 'Eswatini_NONE': 1136281, 'Ethiopia_NONE': 102403196, 'Fiji_NONE': 898760, 'Finland_NONE': 5495096, 'France_French Guiana': 275713, 'France_French Polynesia': 202016, 'France_Guadeloupe': 390704, 'France_Martinique': 371246, 'France_Mayotte': 259154, 'France_New Caledonia': 278000, 'France_Reunion': 865826, 'France_Saint Barthelemy': 9625, 'France_Saint Pierre and Miquelon': 6000, 'France_St Martin': 31949, 'France_NONE': 66896109, 'Gabon_NONE': 1979786, 'Gambia_NONE': 2038501, 'Georgia_NONE': 3719300, 'Germany_NONE': 82667685, 'Ghana_NONE': 28206728, 'Greece_NONE': 10746740, 'Grenada_NONE': 107317, 'Guatemala_NONE': 16582469, 'Guinea_NONE': 12395924, 'Guinea-Bissau_NONE': 1815698, 'Guyana_NONE': 773303, 'Haiti_NONE': 10847334, 'Holy See_NONE': 800, 'Honduras_NONE': 9112867, 'Hungary_NONE': 9817958, 'Iceland_NONE': 334252, 'India_NONE': 1324171354, 'Indonesia_NONE': 261115456, 'Iran_NONE': 80277428, 'Iraq_NONE': 37202572, 'Ireland_NONE': 4773095, 'Israel_NONE': 8547100, 'Italy_NONE': 60600590, 'Jamaica_NONE': 2881355, 'Japan_NONE': 126994511, 'Jordan_NONE': 9455802, 'Kazakhstan_NONE': 17797032, 'Kenya_NONE': 48461567, 'Korea, South_NONE': 51245707, 'Kosovo_NONE': 1800000, 'Kuwait_NONE': 4052584, 'Kyrgyzstan_NONE': 6082700, 'Laos_NONE': 6758353, 'Latvia_NONE': 1960424, 'Lebanon_NONE': 6006668, 'Liberia_NONE': 4613823, 'Libya_NONE': 6293253, 'Liechtenstein_NONE': 37666, 'Lithuania_NONE': 2872298, 'Luxembourg_NONE': 582972, 'Madagascar_NONE': 24894551, 'Malaysia_NONE': 31187265, 'Maldives_NONE': 417492, 'Malawi_NONE': 18400000, 'Mali_NONE': 17994837, 'Malta_NONE': 436947, 'Mauritania_NONE': 4301018, 'Mauritius_NONE': 1263473, 'Mexico_NONE': 127540423, 'Moldova_NONE': 3552000, 'Monaco_NONE': 38499, 'Mongolia_NONE': 3027398, 'Montenegro_NONE': 622781, 'Morocco_NONE': 35276786, 'Mozambique_NONE': 28829476, 'MS Zaandam_NONE': 4000, 'Namibia_NONE': 2479713, 'Nepal_NONE': 28982771, 'Netherlands_Aruba': 17018408, 'Netherlands_Bonaire, Sint Eustatius and Saba': 25000, 'Netherlands_Curacao': 17018408, 'Netherlands_Sint Maarten': 17018408, 'Netherlands_NONE': 17018408, 'New Zealand_NONE': 4692700, 'Nicaragua_NONE': 6149928, 'Niger_NONE': 20672987, 'Nigeria_NONE': 185989640, 'North Macedonia_NONE': 2081206, 'Norway_NONE': 5232929, 'Oman_NONE': 4424762, 'Pakistan_NONE': 193203476, 'Panama_NONE': 4034119, 'Papua New Guinea_NONE': 8084991, 'Paraguay_NONE': 6725308, 'Peru_NONE': 31773839, 'Philippines_NONE': 103320222, 'Poland_NONE': 37948016, 'Portugal_NONE': 10324611, 'Qatar_NONE': 2569804, 'Romania_NONE': 19705301, 'Russia_NONE': 143201676, 'Rwanda_NONE': 11917508, 'Saint Kitts and Nevis_NONE': 54821, 'Saint Lucia_NONE': 178015, 'Saint Vincent and the Grenadines_NONE': 109643, 'San Marino_NONE': 33203, 'Sao Tome and Principe_NONE': 211000, 'Saudi Arabia_NONE': 32275687, 'Senegal_NONE': 15411614, 'Serbia_NONE': 7057412, 'Seychelles_NONE': 94677, 'Sierra Leone_NONE': 7000000, 'Singapore_NONE': 5607283, 'Slovakia_NONE': 5428704, 'Slovenia_NONE': 2064845, 'Somalia_NONE': 14317996, 'South Africa_NONE': 55908865, 'South Sudan_NONE': 11000000, 'Spain_NONE': 46443959, 'Sri Lanka_NONE': 21203000, 'Sudan_NONE': 39578828, 'Suriname_NONE': 558368, 'Sweden_NONE': 9903122, 'Switzerland_NONE': 8372098, 'Syria_NONE': 18430453, 'Taiwan*_NONE': 23780452, 'Tanzania_NONE': 55572201, 'Thailand_NONE': 68863514, 'Timor-Leste_NONE': 1268671, 'Togo_NONE': 7606374, 'Trinidad and Tobago_NONE': 1364962, 'Tunisia_NONE': 11403248, 'Turkey_NONE': 79512426, 'US_Alabama': 4888949, 'US_Alaska': 738068, 'US_Arizona': 7123898, 'US_Arkansas': 3020327, 'US_California': 39776830, 'US_Colorado': 5684203, 'US_Connecticut': 3588683, 'US_Delaware': 971180, 'US_District of Columbia': 703608, 'US_Florida': 21312211, 'US_Georgia': 10545138, 'US_Guam': 162896, 'US_Hawaii': 1426393, 'US_Idaho': 1753860, 'US_Illinois': 12768320, 'US_Indiana': 6699629, 'US_Iowa': 3160553, 'US_Kansas': 2918515, 'US_Kentucky': 4472265, 'US_Louisiana': 4682509, 'US_Maine': 1341582, 'US_Maryland': 6079602, 'US_Massachusetts': 6895917, 'US_Michigan': 9991177, 'US_Minnesota': 5628162, 'US_Mississippi': 2982785, 'US_Missouri': 6135888, 'US_Montana': 1062330, 'US_Nebraska': 1932549, 'US_Nevada': 3056824, 'US_New Hampshire': 1350575, 'US_New Jersey': 9032872, 'US_New Mexico': 2090708, 'US_New York': 19862512, 'US_North Carolina': 10390149, 'US_North Dakota': 755238, 'US_Ohio': 11694664, 'US_Oklahoma': 3940521, 'US_Oregon': 4199563, 'US_Pennsylvania': 12823989, 'US_Puerto Rico': 3411307, 'US_Rhode Island': 1061712, 'US_South Carolina': 5088916, 'US_South Dakota': 877790, 'US_Tennessee': 6782564, 'US_Texas': 28704330, 'US_Utah': 3159345, 'US_Vermont': 623960, 'US_Virgin Islands': 102951, 'US_Virginia': 8525660, 'US_Washington': 7530552, 'US_West Virginia': 1803077, 'US_Wisconsin': 5818049, 'US_Wyoming': 573720, 'Uganda_NONE': 41487965, 'Ukraine_NONE': 45004645, 'United Arab Emirates_NONE': 9269612, 'United Kingdom_Anguilla': 15000, 'United Kingdom_Bermuda': 65331, 'United Kingdom_British Virgin Islands': 32000, 'United Kingdom_Cayman Islands': 60765, 'United Kingdom_Channel Islands': 164541, 'United Kingdom_Falkland Islands (Malvinas)': 3500, 'United Kingdom_Gibraltar': 34408, 'United Kingdom_Isle of Man': 83737, 'United Kingdom_Montserrat': 5000, 'United Kingdom_NONE': 65637239, 'United Kingdom_Turks and Caicos Islands': 32000, 'Uruguay_NONE': 3201607, 'Uzbekistan_NONE': 31848200, 'Venezuela_NONE': 31568179, 'Vietnam_NONE': 92701100, 'West Bank and Gaza_NONE': 5000000, 'Western Sahara_NONE': 560000, 'Zambia_NONE': 16591390, 'Zimbabwe_NONE': 16150362}
def func0(x, a, b):
return a * x + b
def func1(x, a, b, c):
return a * np.exp(b * (x - c))
def func2(x, a2, b2, c2):
return a2 * 1 / (1 + np.exp(-b2 * (x - c2)))
def getDataForLocation(df, k):
cty = dfCtyPrs.iloc[k]['Country_Region']
prs = dfCtyPrs.iloc[k]['Province_State']
pop = popDict[cty + '_' + prs]
dfSelectTrain = dfTrain[(dfTrain['Country_Region'] == cty) & (dfTrain['Province_State'] == prs)]
dfSelectTrain = dfSelectTrain[['dayCount', 'ConfirmedCases', 'Fatalities']]
dfSelectTest = dfTest[(dfTrain['Country_Region'] == cty) & (dfTrain['Province_State'] == prs)]
dfSelectTest = dfSelectTest[['dayCount']]
initPoint = 0
selectX = dfSelectTrain['dayCount'][initPoint:]
selectY1 = dfSelectTrain['ConfirmedCases'][initPoint:]
selectY2 = dfSelectTrain['Fatalities'][initPoint:]
xDataFitCC, yDataFitCC, yDataFitF = (None, None, None)
return (cty, prs, pop, selectX, selectY1, selectY2, xDataFitCC, yDataFitCC, yDataFitF)
def getFit(k):
cty, prs, pop, selectX, selectY1, selectY2, xDataFit, yDataFitCC, yDataFitF = getDataForLocation(dfTrain,k)
selectY = selectY1
bestRmse1 = np.inf
bestPopt1 = None
bestFit1 = None
numberOfLeadingZeros = np.where(selectY<np.max(selectY+1)/100)[0].shape[0]+1
for skipStart1 in range(numberOfLeadingZeros):
if cty == 'China':
popt, pcov = curve_fit(func2,selectX[skipStart1:],selectY[skipStart1:],p0=[selectY.to_numpy()[-1],0.3,0],bounds=[[selectY.to_numpy()[-1]-1,0,0],[selectY.to_numpy()[-1]+1,0.4,100]]) #,p0=poptBest
yDataFit01 = func2(xPoints, popt[0], popt[1], popt[2])
elif cty == 'Diamond Princess':
popt, pcov = curve_fit(func2,selectX[skipStart1:],selectY[skipStart1:],p0=[selectY.to_numpy()[-1],0.4,25],bounds=[[selectY.to_numpy()[-1]-1,0.3,20],[selectY.to_numpy()[-1]+1,0.5,30]]) #,p0=poptBest
yDataFit01 = func2(xPoints, popt[0], popt[1], popt[2])
else:
popt, pcov = curve_fit(func2,selectX[skipStart1:],selectY[skipStart1:],p0=[0.01*pop,0.3,50],bounds=[[0.01*pop-1,0,0],[0.01*pop+1,0.4,100]]) #,p0=poptBest
yDataFit01 = func2(xPoints, popt[0], popt[1], popt[2])
rmse = np.sqrt(mean_squared_error(yDataFit01[:len(selectY)],selectY))
if rmse < bestRmse1:
bestPopt1 = popt
bestRmse1 = rmse
bestFit1 = yDataFit01
selectY = selectY2
bestRmse2 = np.inf
bestPopt2 = None
bestFit2 = None
numberOfLeadingZeros = np.where(selectY<np.max(selectY+1)/100)[0].shape[0]
for skipStart2 in range(numberOfLeadingZeros):
mortality = 0.05
if cty == 'China':
popt, pcov = curve_fit(func2,selectX[skipStart2:],selectY[skipStart2:],p0=[selectY.to_numpy()[-1],0.3,10],bounds=[[selectY.to_numpy()[-1]-1,0,0],[selectY.to_numpy()[-1]+1,0.4,100]]) #,p0=poptBest
yDataFit02 = func2(xPoints, popt[0], popt[1], popt[2])
elif cty == 'Diamond Princess':
popt, pcov = curve_fit(func2,selectX[skipStart2:],selectY[skipStart2:],p0=[selectY.to_numpy()[-1],0.1,45],bounds=[[selectY.to_numpy()[-1]-1,0,40],[selectY.to_numpy()[-1]+1,0.4,50]]) #,p0=poptBest
yDataFit02 = func2(xPoints, popt[0], popt[1], popt[2])
else:
popt, pcov = curve_fit(func2,selectX[skipStart2:],selectY[skipStart2:],p0=[mortality*bestFit1[-1],0.3,50],bounds=[[mortality*bestFit1[-1]-1,0,0],[mortality*bestFit1[-1]+1,0.4,100]]) #,p0=poptBest
yDataFit02 = func2(xPoints, popt[0], popt[1], popt[2])
rmse = np.sqrt(mean_squared_error(yDataFit02[:len(selectY)],selectY))
if rmse < bestRmse2:
bestPopt2 = popt
bestRmse2 = rmse
bestFit2 = yDataFit02
print(k, cty, prs, bestPopt1,bestRmse1)
print(k, cty, prs, bestPopt2,bestRmse2)
if 1==0:
fig, ax = plt.subplots(1,4, figsize=(20,3))
ax[0].plot(selectX,selectY1,marker='+',linewidth=0,color='red')
ax[0].plot(xPoints,yDataFit01,linewidth=0.5,color='red')
ax[0].text(0,0,str(k)+' '+cty+'/'+prs, fontsize=12)
ax[0].grid(True)
ax[0].set_ylim(0,2*np.max(selectY1));
ax[1].plot(selectX,selectY1,marker='+',linewidth=0,color='red')
ax[1].plot(xPoints,yDataFit01,linewidth=0.5,color='red')
ax[1].text(0,0,pop, fontsize=12)
ax[1].grid(True)
ax[2].plot(selectX,selectY2,marker='+',linewidth=0,color='blue')
ax[2].plot(xPoints,yDataFit02,linewidth=0.5,color='blue')
ax[2].grid(True)
ax[2].set_ylim(0,2*np.max(selectY2));
ax[3].plot(selectX,selectY2,marker='+',linewidth=0,color='blue')
ax[3].plot(xPoints,yDataFit02,linewidth=0.5,color='blue')
ax[3].grid(True)
return cty, prs, pop, selectX, selectY1, selectY2, xPoints, yDataFit01, yDataFit02
outCC = np.array([])
outF = np.array([])
dataStorage = []
n = dfCtyPrs.shape[0]
for k in range(n):
cty, prs, pop, selectX, selectY1, selectY2, xDataFit, yDataFitCC, yDataFitF = getFit(k)
outCC = np.concatenate((outCC, np.around(selectY1[72:82]).astype(int)))
outCC = np.concatenate((outCC, np.around(yDataFitCC[82:115]).astype(int)))
outF = np.concatenate((outF, np.around(selectY2[72:82]).astype(int)))
outF = np.concatenate((outF, np.around(yDataFitF[82:115]).astype(int)))
dataStorage.append([selectX, selectY1, selectY2, xDataFit, yDataFitCC, yDataFitF]) | code |
32068582/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
50221500/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/titanic-machine-learning-from-disaster/train.csv')
test_df = pd.read_csv('../input/titanic-machine-learning-from-disaster/test.csv')
test_df.head() | code |
50221500/cell_1 | [
"text_plain_output_1.png"
] | import os
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
50221500/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/titanic-machine-learning-from-disaster/train.csv')
test_df = pd.read_csv('../input/titanic-machine-learning-from-disaster/test.csv')
gender_df = pd.read_csv('../input/titanic-machine-learning-from-disaster/gender_submission.csv')
gender_df.head() | code |
50221500/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/titanic-machine-learning-from-disaster/train.csv')
train_df.head() | code |
18143939/cell_13 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import bs4
import pandas as pd
import requests
r1 = requests.get('https://www.washingtonpost.com/politics/2019/07/31/transcript-first-night-second-democratic-debate')
r2 = requests.get('https://www.washingtonpost.com/politics/2019/08/01/transcript-night-second-democratic-debate/')
def parse_requests(r, night=None):
soup = bs4.BeautifulSoup(r.content)
graphs = soup.find_all('p')
utterances = [x.get_text() for x in graphs if 'data-elm-loc' in x.attrs.keys()]
utterances = utterances[2:]
seq = 0
data = []
for i in utterances:
i = i.replace('DE BLASIO:', 'DEBLASIO:')
graph = i.split()
if graph[0][-1] == ':':
text = ' '.join(graph[1:])
num_words = len(graph) - 1
name = graph[0][:-1]
seq += 1
elif len(graph) > 1 and graph[1] == '(?):':
text = ' '.join(graph[2:])
num_words = len(graph) - 2
name = graph[0]
seq += 1
else:
text = ' '.join(graph)
data.append({'name': name, 'graph': text, 'seq': seq, 'num_words': num_words, 'night': night})
return data
data = parse_requests(r1, night=0) + parse_requests(r2, night=1)
df = pd.DataFrame(data)
df.name.unique()
df = df[df.name != '(UNKNOWN)']
df['name'] = df['name'].apply(lambda x: ''.join([char for char in x if char.isalpha()]))
df = df[df.name != 'PROTESTOR']
df.name.unique()
# Example quick plotting
import matplotlib.pyplot as ply
plt.style.use('fivethirtyeight')
words_freq_plot = df.groupby('name').sum()['num_words'].plot(
kind='bar', figsize=(8, 4)
);
words_freq_plot.set_ylabel('Words Spoken')
words_freq_plot.set_title("Candidate Approx Word Totals");
df['graph'] = df.groupby('seq')['graph'].transform(' '.join)
df = df[['graph', 'seq', 'name', 'night']].drop_duplicates()
df.name.unique() | code |
18143939/cell_9 | [
"image_output_1.png"
] | import bs4
import requests
r1 = requests.get('https://www.washingtonpost.com/politics/2019/07/31/transcript-first-night-second-democratic-debate')
r2 = requests.get('https://www.washingtonpost.com/politics/2019/08/01/transcript-night-second-democratic-debate/')
def parse_requests(r, night=None):
soup = bs4.BeautifulSoup(r.content)
graphs = soup.find_all('p')
utterances = [x.get_text() for x in graphs if 'data-elm-loc' in x.attrs.keys()]
utterances = utterances[2:]
seq = 0
data = []
for i in utterances:
i = i.replace('DE BLASIO:', 'DEBLASIO:')
graph = i.split()
if graph[0][-1] == ':':
text = ' '.join(graph[1:])
num_words = len(graph) - 1
name = graph[0][:-1]
seq += 1
elif len(graph) > 1 and graph[1] == '(?):':
text = ' '.join(graph[2:])
num_words = len(graph) - 2
name = graph[0]
seq += 1
else:
text = ' '.join(graph)
data.append({'name': name, 'graph': text, 'seq': seq, 'num_words': num_words, 'night': night})
return data
data = parse_requests(r1, night=0) + parse_requests(r2, night=1)
data.head() | code |
18143939/cell_20 | [
"text_html_output_1.png"
] | from nltk.stem.porter import PorterStemmer
from wordcloud import WordCloud
import bs4
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import nltk
import pandas as pd
import requests
import sklearn.feature_extraction.text as skt
r1 = requests.get('https://www.washingtonpost.com/politics/2019/07/31/transcript-first-night-second-democratic-debate')
r2 = requests.get('https://www.washingtonpost.com/politics/2019/08/01/transcript-night-second-democratic-debate/')
def parse_requests(r, night=None):
soup = bs4.BeautifulSoup(r.content)
graphs = soup.find_all('p')
utterances = [x.get_text() for x in graphs if 'data-elm-loc' in x.attrs.keys()]
utterances = utterances[2:]
seq = 0
data = []
for i in utterances:
i = i.replace('DE BLASIO:', 'DEBLASIO:')
graph = i.split()
if graph[0][-1] == ':':
text = ' '.join(graph[1:])
num_words = len(graph) - 1
name = graph[0][:-1]
seq += 1
elif len(graph) > 1 and graph[1] == '(?):':
text = ' '.join(graph[2:])
num_words = len(graph) - 2
name = graph[0]
seq += 1
else:
text = ' '.join(graph)
data.append({'name': name, 'graph': text, 'seq': seq, 'num_words': num_words, 'night': night})
return data
data = parse_requests(r1, night=0) + parse_requests(r2, night=1)
df = pd.DataFrame(data)
df.name.unique()
df = df[df.name != '(UNKNOWN)']
df['name'] = df['name'].apply(lambda x: ''.join([char for char in x if char.isalpha()]))
df = df[df.name != 'PROTESTOR']
df.name.unique()
# Example quick plotting
import matplotlib.pyplot as ply
plt.style.use('fivethirtyeight')
words_freq_plot = df.groupby('name').sum()['num_words'].plot(
kind='bar', figsize=(8, 4)
);
words_freq_plot.set_ylabel('Words Spoken')
words_freq_plot.set_title("Candidate Approx Word Totals");
df['graph'] = df.groupby('seq')['graph'].transform(' '.join)
df = df[['graph', 'seq', 'name', 'night']].drop_duplicates()
df.name.unique()
import numpy as np
import sklearn.feature_extraction.text as skt
from wordcloud import WordCloud
import nltk
from nltk import word_tokenize
from nltk.stem.porter import PorterStemmer
stemmer = PorterStemmer()
def stem_tokens(tokens, stemmer):
stemmed = []
for item in tokens:
stemmed.append(stemmer.stem(item))
return stemmed
def tokenize(text):
tokens = nltk.word_tokenize(text)
return tokens
def topwords_candidate(candidate_name, n):
vectorizer = skt.TfidfVectorizer(stop_words='english', tokenizer=tokenize)
X = vectorizer.fit_transform(df[df['name'] == candidate_name]['graph'])
feature_names = vectorizer.get_feature_names()
doc = 0
feature_index = X[doc, :].nonzero()[1]
tfidf_scores = zip(feature_index, [X[doc, x] for x in feature_index])
scored_features = sorted([(feature_names[i], s) for i, s in tfidf_scores], key=lambda x: x[1])
data = scored_features[-n:]
wordcloud = WordCloud().generate(' '.join([x[0] for x in data][::-1]))
import matplotlib.pyplot as plt
plt.axis('off')
return (data, wordcloud)
import matplotlib.pyplot as plt
figs, axs = plt.subplots(4, 5, figsize=(24, 8))
figs.suptitle("""Top TF-IDF Weighted Words in Opening Speeches, by Candidate""",
fontsize=24)
candidates = list(filter(lambda x: x not in ['BASH', 'TAPPER', 'LEMON'], df.name.unique()))
for k in range(4):
for i in range(5):
mod = k*5
axs[k][i].imshow(topwords_candidate(candidates[i+mod], 10)[1])
axs[k][i].axis('off')
axs[k][i].set_title(candidates[i+mod], fontsize=16)
def get_all_text(candidate):
all_docs = df[df.name == candidate]['graph']
all_docs = ' '.join(all_docs.values)
return all_docs
corpus = [get_all_text(cand) for cand in df.name.unique()]
vect = skt.TfidfVectorizer(min_df=1)
tfidf = vect.fit_transform(corpus)
distance_matrix = (tfidf * tfidf.T).A
def colorval_name(k):
if k in ['BASH', 'TAPPER', 'LEMON']:
return ('r', 'CNN')
elif k in ['SANDERS', 'WARREN', 'HARRIS', 'BIDEN']:
return ('b', 'Tier 1')
else:
return ('g', 'Tier 2')
drawn_labels = []
for i, name in enumerate(df.name.unique()):
c, label = colorval_name(name)
plt.scatter(distance_matrix[i, 0], distance_matrix[i, 1], c=c, label=label if label not in drawn_labels else '')
drawn_labels.append(label)
plt.title('Candidate Similarity', fontsize=20)
legend = plt.legend(loc='lower right') | code |
18143939/cell_6 | [
"text_plain_output_1.png"
] | import bs4
import pandas as pd
import requests
r1 = requests.get('https://www.washingtonpost.com/politics/2019/07/31/transcript-first-night-second-democratic-debate')
r2 = requests.get('https://www.washingtonpost.com/politics/2019/08/01/transcript-night-second-democratic-debate/')
def parse_requests(r, night=None):
soup = bs4.BeautifulSoup(r.content)
graphs = soup.find_all('p')
utterances = [x.get_text() for x in graphs if 'data-elm-loc' in x.attrs.keys()]
utterances = utterances[2:]
seq = 0
data = []
for i in utterances:
i = i.replace('DE BLASIO:', 'DEBLASIO:')
graph = i.split()
if graph[0][-1] == ':':
text = ' '.join(graph[1:])
num_words = len(graph) - 1
name = graph[0][:-1]
seq += 1
elif len(graph) > 1 and graph[1] == '(?):':
text = ' '.join(graph[2:])
num_words = len(graph) - 2
name = graph[0]
seq += 1
else:
text = ' '.join(graph)
data.append({'name': name, 'graph': text, 'seq': seq, 'num_words': num_words, 'night': night})
return data
data = parse_requests(r1, night=0) + parse_requests(r2, night=1)
df = pd.DataFrame(data)
df.name.unique() | code |
18143939/cell_7 | [
"image_output_1.png"
] | import bs4
import pandas as pd
import requests
r1 = requests.get('https://www.washingtonpost.com/politics/2019/07/31/transcript-first-night-second-democratic-debate')
r2 = requests.get('https://www.washingtonpost.com/politics/2019/08/01/transcript-night-second-democratic-debate/')
def parse_requests(r, night=None):
soup = bs4.BeautifulSoup(r.content)
graphs = soup.find_all('p')
utterances = [x.get_text() for x in graphs if 'data-elm-loc' in x.attrs.keys()]
utterances = utterances[2:]
seq = 0
data = []
for i in utterances:
i = i.replace('DE BLASIO:', 'DEBLASIO:')
graph = i.split()
if graph[0][-1] == ':':
text = ' '.join(graph[1:])
num_words = len(graph) - 1
name = graph[0][:-1]
seq += 1
elif len(graph) > 1 and graph[1] == '(?):':
text = ' '.join(graph[2:])
num_words = len(graph) - 2
name = graph[0]
seq += 1
else:
text = ' '.join(graph)
data.append({'name': name, 'graph': text, 'seq': seq, 'num_words': num_words, 'night': night})
return data
data = parse_requests(r1, night=0) + parse_requests(r2, night=1)
df = pd.DataFrame(data)
df.name.unique()
df = df[df.name != '(UNKNOWN)']
df['name'] = df['name'].apply(lambda x: ''.join([char for char in x if char.isalpha()]))
df = df[df.name != 'PROTESTOR']
df.name.unique() | code |
18143939/cell_17 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from nltk.stem.porter import PorterStemmer
from wordcloud import WordCloud
import bs4
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import nltk
import pandas as pd
import requests
import sklearn.feature_extraction.text as skt
r1 = requests.get('https://www.washingtonpost.com/politics/2019/07/31/transcript-first-night-second-democratic-debate')
r2 = requests.get('https://www.washingtonpost.com/politics/2019/08/01/transcript-night-second-democratic-debate/')
def parse_requests(r, night=None):
soup = bs4.BeautifulSoup(r.content)
graphs = soup.find_all('p')
utterances = [x.get_text() for x in graphs if 'data-elm-loc' in x.attrs.keys()]
utterances = utterances[2:]
seq = 0
data = []
for i in utterances:
i = i.replace('DE BLASIO:', 'DEBLASIO:')
graph = i.split()
if graph[0][-1] == ':':
text = ' '.join(graph[1:])
num_words = len(graph) - 1
name = graph[0][:-1]
seq += 1
elif len(graph) > 1 and graph[1] == '(?):':
text = ' '.join(graph[2:])
num_words = len(graph) - 2
name = graph[0]
seq += 1
else:
text = ' '.join(graph)
data.append({'name': name, 'graph': text, 'seq': seq, 'num_words': num_words, 'night': night})
return data
data = parse_requests(r1, night=0) + parse_requests(r2, night=1)
df = pd.DataFrame(data)
df.name.unique()
df = df[df.name != '(UNKNOWN)']
df['name'] = df['name'].apply(lambda x: ''.join([char for char in x if char.isalpha()]))
df = df[df.name != 'PROTESTOR']
df.name.unique()
# Example quick plotting
import matplotlib.pyplot as ply
plt.style.use('fivethirtyeight')
words_freq_plot = df.groupby('name').sum()['num_words'].plot(
kind='bar', figsize=(8, 4)
);
words_freq_plot.set_ylabel('Words Spoken')
words_freq_plot.set_title("Candidate Approx Word Totals");
df['graph'] = df.groupby('seq')['graph'].transform(' '.join)
df = df[['graph', 'seq', 'name', 'night']].drop_duplicates()
df.name.unique()
import numpy as np
import sklearn.feature_extraction.text as skt
from wordcloud import WordCloud
import nltk
from nltk import word_tokenize
from nltk.stem.porter import PorterStemmer
stemmer = PorterStemmer()
def stem_tokens(tokens, stemmer):
stemmed = []
for item in tokens:
stemmed.append(stemmer.stem(item))
return stemmed
def tokenize(text):
tokens = nltk.word_tokenize(text)
return tokens
def topwords_candidate(candidate_name, n):
vectorizer = skt.TfidfVectorizer(stop_words='english', tokenizer=tokenize)
X = vectorizer.fit_transform(df[df['name'] == candidate_name]['graph'])
feature_names = vectorizer.get_feature_names()
doc = 0
feature_index = X[doc, :].nonzero()[1]
tfidf_scores = zip(feature_index, [X[doc, x] for x in feature_index])
scored_features = sorted([(feature_names[i], s) for i, s in tfidf_scores], key=lambda x: x[1])
data = scored_features[-n:]
wordcloud = WordCloud().generate(' '.join([x[0] for x in data][::-1]))
import matplotlib.pyplot as plt
plt.axis('off')
return (data, wordcloud)
import matplotlib.pyplot as plt
figs, axs = plt.subplots(4, 5, figsize=(24, 8))
figs.suptitle('Top TF-IDF Weighted Words in Opening Speeches, by Candidate', fontsize=24)
candidates = list(filter(lambda x: x not in ['BASH', 'TAPPER', 'LEMON'], df.name.unique()))
for k in range(4):
for i in range(5):
mod = k * 5
axs[k][i].imshow(topwords_candidate(candidates[i + mod], 10)[1])
axs[k][i].axis('off')
axs[k][i].set_title(candidates[i + mod], fontsize=16) | code |
18143939/cell_10 | [
"text_plain_output_1.png"
] | import bs4
import pandas as pd
import requests
r1 = requests.get('https://www.washingtonpost.com/politics/2019/07/31/transcript-first-night-second-democratic-debate')
r2 = requests.get('https://www.washingtonpost.com/politics/2019/08/01/transcript-night-second-democratic-debate/')
def parse_requests(r, night=None):
soup = bs4.BeautifulSoup(r.content)
graphs = soup.find_all('p')
utterances = [x.get_text() for x in graphs if 'data-elm-loc' in x.attrs.keys()]
utterances = utterances[2:]
seq = 0
data = []
for i in utterances:
i = i.replace('DE BLASIO:', 'DEBLASIO:')
graph = i.split()
if graph[0][-1] == ':':
text = ' '.join(graph[1:])
num_words = len(graph) - 1
name = graph[0][:-1]
seq += 1
elif len(graph) > 1 and graph[1] == '(?):':
text = ' '.join(graph[2:])
num_words = len(graph) - 2
name = graph[0]
seq += 1
else:
text = ' '.join(graph)
data.append({'name': name, 'graph': text, 'seq': seq, 'num_words': num_words, 'night': night})
return data
data = parse_requests(r1, night=0) + parse_requests(r2, night=1)
df = pd.DataFrame(data)
df.name.unique()
df = df[df.name != '(UNKNOWN)']
df['name'] = df['name'].apply(lambda x: ''.join([char for char in x if char.isalpha()]))
df = df[df.name != 'PROTESTOR']
df.name.unique()
import matplotlib.pyplot as ply
plt.style.use('fivethirtyeight')
words_freq_plot = df.groupby('name').sum()['num_words'].plot(kind='bar', figsize=(8, 4))
words_freq_plot.set_ylabel('Words Spoken')
words_freq_plot.set_title('Candidate Approx Word Totals') | code |
18143939/cell_12 | [
"text_plain_output_1.png"
] | import bs4
import pandas as pd
import requests
r1 = requests.get('https://www.washingtonpost.com/politics/2019/07/31/transcript-first-night-second-democratic-debate')
r2 = requests.get('https://www.washingtonpost.com/politics/2019/08/01/transcript-night-second-democratic-debate/')
def parse_requests(r, night=None):
soup = bs4.BeautifulSoup(r.content)
graphs = soup.find_all('p')
utterances = [x.get_text() for x in graphs if 'data-elm-loc' in x.attrs.keys()]
utterances = utterances[2:]
seq = 0
data = []
for i in utterances:
i = i.replace('DE BLASIO:', 'DEBLASIO:')
graph = i.split()
if graph[0][-1] == ':':
text = ' '.join(graph[1:])
num_words = len(graph) - 1
name = graph[0][:-1]
seq += 1
elif len(graph) > 1 and graph[1] == '(?):':
text = ' '.join(graph[2:])
num_words = len(graph) - 2
name = graph[0]
seq += 1
else:
text = ' '.join(graph)
data.append({'name': name, 'graph': text, 'seq': seq, 'num_words': num_words, 'night': night})
return data
data = parse_requests(r1, night=0) + parse_requests(r2, night=1)
df = pd.DataFrame(data)
df.name.unique()
df = df[df.name != '(UNKNOWN)']
df['name'] = df['name'].apply(lambda x: ''.join([char for char in x if char.isalpha()]))
df = df[df.name != 'PROTESTOR']
df.name.unique()
# Example quick plotting
import matplotlib.pyplot as ply
plt.style.use('fivethirtyeight')
words_freq_plot = df.groupby('name').sum()['num_words'].plot(
kind='bar', figsize=(8, 4)
);
words_freq_plot.set_ylabel('Words Spoken')
words_freq_plot.set_title("Candidate Approx Word Totals");
df['graph'] = df.groupby('seq')['graph'].transform(' '.join)
df = df[['graph', 'seq', 'name', 'night']].drop_duplicates()
df.head() | code |
105198632/cell_4 | [
"text_plain_output_1.png"
] | j = 0
for i in range(1, 31):
j = j + i
i = 0
j = 0
while i <= 30:
j = j + i
i = i + 1
print(j) | code |
105198632/cell_2 | [
"text_plain_output_1.png"
] | j = 0
for i in range(1, 31):
j = j + i
print(j) | code |
309674/cell_3 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
from subprocess import check_output
comments = pd.read_csv('../input/comment.csv')
likes = pd.read_csv('../input/like.csv')
members = pd.read_csv('../input/member.csv')
posts = pd.read_csv('../input/post.csv')
likeResponse = pd.merge(likes.loc[likes['gid'] == 117291968282998], posts.loc[posts['gid'] == 117291968282998, ['pid', 'name']], left_on='pid', right_on='pid')
result = likeResponse.groupby(['name_y', 'name_x'])['response'].count()
finalResult = pd.DataFrame(result.index.values, columns=['NameCombo'])
finalResult['Weight'] = result.values
finalResult['From'] = finalResult['NameCombo'].map(lambda x: x[0])
finalResult['To'] = finalResult['NameCombo'].map(lambda x: x[1])
del finalResult['NameCombo']
g = nx.Graph()
plt.figure()
g.add_edges_from([(row['From'], row['To']) for index, row in finalResult.iterrows()])
d = nx.degree(g)
spring_pos = nx.spring_layout(g)
plt.axis('off')
nx.draw_networkx(g, spring_pos, with_labels=False, nodelist=d.keys(), node_size=[v * 10 for v in d.values()])
plt.savefig('LIKE_PLOT_GROUP1.png')
plt.clf() | code |
74045780/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib as plt
import os
df = pd.read_csv('../input/amrc-data/Efaecium_AMRC.csv')
df.dtypes
df.shape
df.AMR.value_counts(ascending=True)
df.CRISPR_Cas.value_counts(ascending=True) | code |
74045780/cell_4 | [
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib as plt
import os
df = pd.read_csv('../input/amrc-data/Efaecium_AMRC.csv')
df.dtypes | code |
74045780/cell_6 | [
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib as plt
import os
df = pd.read_csv('../input/amrc-data/Efaecium_AMRC.csv')
df.dtypes
df.shape
df.AMR.plot() | code |
74045780/cell_2 | [
"text_html_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib as plt
import os
df = pd.read_csv('../input/amrc-data/Efaecium_AMRC.csv')
df.head() | code |
74045780/cell_1 | [
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib as plt
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
df = pd.read_csv('../input/amrc-data/Efaecium_AMRC.csv') | code |
74045780/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib as plt
import os
df = pd.read_csv('../input/amrc-data/Efaecium_AMRC.csv')
df.dtypes
df.shape
df.AMR.value_counts(ascending=True) | code |
74045780/cell_8 | [
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib as plt
import os
df = pd.read_csv('../input/amrc-data/Efaecium_AMRC.csv')
df.dtypes
df.shape
df.AMR.value_counts(ascending=True)
df.CRISPR_Cas.plot() | code |
74045780/cell_3 | [
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib as plt
import os
df = pd.read_csv('../input/amrc-data/Efaecium_AMRC.csv')
df.describe() | code |
74045780/cell_10 | [
"text_html_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib as plt
import os
df = pd.read_csv('../input/amrc-data/Efaecium_AMRC.csv')
df.dtypes
df.shape
df.AMR.value_counts(ascending=True)
df.CRISPR_Cas.value_counts(ascending=True)
df.boxplot() | code |
74045780/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import matplotlib as plt
import os
df = pd.read_csv('../input/amrc-data/Efaecium_AMRC.csv')
df.dtypes
df.shape | code |
128044853/cell_2 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import nltk
import os
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
import lightgbm as lgb
import nltk
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
import joblib
from spacy.util import minibatch, compounding
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
from nltk.tokenize import sent_tokenize
from nltk.sentiment import SentimentIntensityAnalyzer
import multiprocessing
from tqdm import tqdm
from sklearn.model_selection import KFold
le = LabelEncoder()
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
128044853/cell_7 | [
"text_html_output_1.png"
] | from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm
import joblib
import lightgbm as lgb
import nltk
import numpy as np # linear algebra
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
import lightgbm as lgb
import nltk
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
import joblib
from spacy.util import minibatch, compounding
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
from nltk.tokenize import sent_tokenize
from nltk.sentiment import SentimentIntensityAnalyzer
import multiprocessing
from tqdm import tqdm
from sklearn.model_selection import KFold
le = LabelEncoder()
import os
df = pd.read_csv('/kaggle/input/WELFakeAdditional/final (1).csv')
def get_pos_counts(text):
tokens = nltk.word_tokenize(text)
pos_tags = nltk.pos_tag(tokens)
pos_counts = {'NN': 0, 'VB': 0, 'JJ': 0}
for word, tag in pos_tags:
if tag.startswith('NN'):
pos_counts['NN'] += 1
elif tag.startswith('VB'):
pos_counts['VB'] += 1
elif tag.startswith('JJ'):
pos_counts['JJ'] += 1
return (pos_counts['NN'], pos_counts['VB'], pos_counts['JJ'])
df = pd.read_csv('/kaggle/input/WELFakeAdditional/final (1).csv')
df.drop('sentiment', axis=1, inplace=True)
df.drop('subject', axis=1, inplace=True)
df.drop('date', axis=1, inplace=True)
df.drop('subject.1', axis=1, inplace=True)
df.drop('date.1', axis=1, inplace=True)
df.dropna(inplace=True)
tqdm.pandas(desc='Processing title column')
df[['title_nouns', 'title_verbs', 'title_adjectives']] = df['title'].progress_apply(get_pos_counts).apply(pd.Series)
tqdm.pandas(desc='Processing text column')
df[['text_nouns', 'text_verbs', 'text_adjectives']] = df['text'].progress_apply(get_pos_counts).apply(pd.Series)
df.drop('text', axis=1, inplace=True)
df.drop('title', axis=1, inplace=True)
train_data = df.sample(frac=0.8, random_state=42)
val_data = df.drop(train_data.index)
X = df.drop('is_fake', axis=1)
y = df['is_fake']
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
num_models = 10
params = {'objective': 'binary', 'metric': 'binary_logloss', 'num_leaves': 31, 'learning_rate': 0.05, 'min_child_samples': 20, 'feature_fraction': 0.8}
kf = KFold(n_splits=num_models, shuffle=True, random_state=42)
models = []
for i, (train_idx, val_idx) in enumerate(kf.split(X)):
X_train, X_val = (X.iloc[train_idx], X.iloc[val_idx])
y_train, y_val = (y.iloc[train_idx], y.iloc[val_idx])
train_set = lgb.Dataset(X_train, y_train)
val_set = lgb.Dataset(X_val, y_val)
model = lgb.train(params, train_set, num_boost_round=1000, early_stopping_rounds=100, valid_sets=[val_set])
models.append(model)
y_pred = np.zeros((len(X_val), num_models))
for i, model in enumerate(models):
y_pred[:, i] = model.predict(X_val)
y_pred_avg = np.mean(y_pred, axis=1)
y_pred_binary = [1 if pred >= 0.5 else 0 for pred in y_pred_avg]
accuracy = sum(y_pred_binary == y_val) / len(y_val)
joblib.dump(models, 'models.joblib')
joblib.dump(le, 'label_encoder.joblib') | code |
128044853/cell_8 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm
import lightgbm as lgb
import nltk
import numpy as np # linear algebra
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
import lightgbm as lgb
import nltk
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
import joblib
from spacy.util import minibatch, compounding
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
from nltk.tokenize import sent_tokenize
from nltk.sentiment import SentimentIntensityAnalyzer
import multiprocessing
from tqdm import tqdm
from sklearn.model_selection import KFold
le = LabelEncoder()
import os
df = pd.read_csv('/kaggle/input/WELFakeAdditional/final (1).csv')
def get_pos_counts(text):
tokens = nltk.word_tokenize(text)
pos_tags = nltk.pos_tag(tokens)
pos_counts = {'NN': 0, 'VB': 0, 'JJ': 0}
for word, tag in pos_tags:
if tag.startswith('NN'):
pos_counts['NN'] += 1
elif tag.startswith('VB'):
pos_counts['VB'] += 1
elif tag.startswith('JJ'):
pos_counts['JJ'] += 1
return (pos_counts['NN'], pos_counts['VB'], pos_counts['JJ'])
df = pd.read_csv('/kaggle/input/WELFakeAdditional/final (1).csv')
df.drop('sentiment', axis=1, inplace=True)
df.drop('subject', axis=1, inplace=True)
df.drop('date', axis=1, inplace=True)
df.drop('subject.1', axis=1, inplace=True)
df.drop('date.1', axis=1, inplace=True)
df.dropna(inplace=True)
tqdm.pandas(desc='Processing title column')
df[['title_nouns', 'title_verbs', 'title_adjectives']] = df['title'].progress_apply(get_pos_counts).apply(pd.Series)
tqdm.pandas(desc='Processing text column')
df[['text_nouns', 'text_verbs', 'text_adjectives']] = df['text'].progress_apply(get_pos_counts).apply(pd.Series)
df.drop('text', axis=1, inplace=True)
df.drop('title', axis=1, inplace=True)
train_data = df.sample(frac=0.8, random_state=42)
val_data = df.drop(train_data.index)
X = df.drop('is_fake', axis=1)
y = df['is_fake']
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
num_models = 10
params = {'objective': 'binary', 'metric': 'binary_logloss', 'num_leaves': 31, 'learning_rate': 0.05, 'min_child_samples': 20, 'feature_fraction': 0.8}
kf = KFold(n_splits=num_models, shuffle=True, random_state=42)
models = []
for i, (train_idx, val_idx) in enumerate(kf.split(X)):
X_train, X_val = (X.iloc[train_idx], X.iloc[val_idx])
y_train, y_val = (y.iloc[train_idx], y.iloc[val_idx])
train_set = lgb.Dataset(X_train, y_train)
val_set = lgb.Dataset(X_val, y_val)
model = lgb.train(params, train_set, num_boost_round=1000, early_stopping_rounds=100, valid_sets=[val_set])
models.append(model)
y_pred = np.zeros((len(X_val), num_models))
for i, model in enumerate(models):
y_pred[:, i] = model.predict(X_val)
y_pred_avg = np.mean(y_pred, axis=1)
y_pred_binary = [1 if pred >= 0.5 else 0 for pred in y_pred_avg]
accuracy = sum(y_pred_binary == y_val) / len(y_val)
df | code |
128044853/cell_10 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
from nltk.sentiment import SentimentIntensityAnalyzer
from nltk.tokenize import sent_tokenize
from nltk.tokenize import word_tokenize
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm
import joblib
import lightgbm as lgb
import nltk
import numpy as np # linear algebra
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
import lightgbm as lgb
import nltk
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
import joblib
from spacy.util import minibatch, compounding
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
from nltk.tokenize import sent_tokenize
from nltk.sentiment import SentimentIntensityAnalyzer
import multiprocessing
from tqdm import tqdm
from sklearn.model_selection import KFold
le = LabelEncoder()
import os
df = pd.read_csv('/kaggle/input/WELFakeAdditional/final (1).csv')
def get_pos_counts(text):
tokens = nltk.word_tokenize(text)
pos_tags = nltk.pos_tag(tokens)
pos_counts = {'NN': 0, 'VB': 0, 'JJ': 0}
for word, tag in pos_tags:
if tag.startswith('NN'):
pos_counts['NN'] += 1
elif tag.startswith('VB'):
pos_counts['VB'] += 1
elif tag.startswith('JJ'):
pos_counts['JJ'] += 1
return (pos_counts['NN'], pos_counts['VB'], pos_counts['JJ'])
df = pd.read_csv('/kaggle/input/WELFakeAdditional/final (1).csv')
df.drop('sentiment', axis=1, inplace=True)
df.drop('subject', axis=1, inplace=True)
df.drop('date', axis=1, inplace=True)
df.drop('subject.1', axis=1, inplace=True)
df.drop('date.1', axis=1, inplace=True)
df.dropna(inplace=True)
tqdm.pandas(desc='Processing title column')
df[['title_nouns', 'title_verbs', 'title_adjectives']] = df['title'].progress_apply(get_pos_counts).apply(pd.Series)
tqdm.pandas(desc='Processing text column')
df[['text_nouns', 'text_verbs', 'text_adjectives']] = df['text'].progress_apply(get_pos_counts).apply(pd.Series)
df.drop('text', axis=1, inplace=True)
df.drop('title', axis=1, inplace=True)
train_data = df.sample(frac=0.8, random_state=42)
val_data = df.drop(train_data.index)
X = df.drop('is_fake', axis=1)
y = df['is_fake']
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
num_models = 10
params = {'objective': 'binary', 'metric': 'binary_logloss', 'num_leaves': 31, 'learning_rate': 0.05, 'min_child_samples': 20, 'feature_fraction': 0.8}
kf = KFold(n_splits=num_models, shuffle=True, random_state=42)
models = []
for i, (train_idx, val_idx) in enumerate(kf.split(X)):
X_train, X_val = (X.iloc[train_idx], X.iloc[val_idx])
y_train, y_val = (y.iloc[train_idx], y.iloc[val_idx])
train_set = lgb.Dataset(X_train, y_train)
val_set = lgb.Dataset(X_val, y_val)
model = lgb.train(params, train_set, num_boost_round=1000, early_stopping_rounds=100, valid_sets=[val_set])
models.append(model)
y_pred = np.zeros((len(X_val), num_models))
for i, model in enumerate(models):
y_pred[:, i] = model.predict(X_val)
y_pred_avg = np.mean(y_pred, axis=1)
y_pred_binary = [1 if pred >= 0.5 else 0 for pred in y_pred_avg]
accuracy = sum(y_pred_binary == y_val) / len(y_val)
joblib.dump(models, 'models.joblib')
joblib.dump(le, 'label_encoder.joblib')
models = joblib.load('models.joblib')
le = joblib.load('label_encoder.joblib')
def preprocess(title, body):
sentCnt_tit = len(sent_tokenize(title))
sentCnt = len(sent_tokenize(body))
wrdCnt_tit = len(word_tokenize(title))
wrdCnt = len(word_tokenize(body))
stop_words = set(stopwords.words('english'))
swCnt_tit = len([w for w in word_tokenize(title) if w in stop_words])
swCnt = len([w for w in word_tokenize(body) if w in stop_words])
entCnt = len(nltk.ne_chunk(nltk.pos_tag(nltk.word_tokenize(body))).leaves())
avLen_tit = np.mean([len(w) for w in word_tokenize(title)])
avLen = np.mean([len(w) for w in word_tokenize(body)])
sid = SentimentIntensityAnalyzer()
sent_tit = sid.polarity_scores(title)
sent = sid.polarity_scores(body)
neg_tit, neu_tit, pos_tit, compound_tit = (sent_tit['neg'], sent_tit['neu'], sent_tit['pos'], sent_tit['compound'])
neg, neu, pos, compound = (sent['neg'], sent['neu'], sent['pos'], sent['compound'])
title_nouns, title_verbs, title_adjectives = get_pos_counts(title)
text_nouns, text_verbs, text_adjectives = get_pos_counts(body)
features = {'sentCnt_tit': sentCnt_tit, 'wrdCnt_tit': wrdCnt_tit, 'swCnt_tit': swCnt_tit, 'avLen_tit': avLen_tit, 'sentCnt': sentCnt, 'wrdCnt': wrdCnt, 'swCnt': swCnt, 'entCnt': entCnt, 'avLen': avLen, 'neg': neg, 'neu': neu, 'pos': pos, 'compound': compound, 'title_nouns': title_nouns, 'title_verbs': title_verbs, 'title_adjectives': title_adjectives, 'text_nouns': text_nouns, 'text_verbs': text_verbs, 'text_adjectives': text_adjectives}
return pd.DataFrame(features, index=[0])
def get_pos_counts(text):
tokens = nltk.word_tokenize(text)
pos_tags = nltk.pos_tag(tokens)
pos_counts = {'NN': 0, 'VB': 0, 'JJ': 0}
for word, tag in pos_tags:
if tag.startswith('NN'):
pos_counts['NN'] += 1
elif tag.startswith('VB'):
pos_counts['VB'] += 1
elif tag.startswith('JJ'):
pos_counts['JJ'] += 1
return (pos_counts['NN'], pos_counts['VB'], pos_counts['JJ'])
def predict(title, body):
features = preprocess(title, body)
y_pred = np.zeros((len(features), len(models)))
for i, model in enumerate(models):
y_pred[:, i] = model.predict(features)
y_pred_avg = np.mean(y_pred, axis=1)
y_pred_label = [1 if pred >= 0.5 else 0 for pred in y_pred_avg]
proba = {'fake': y_pred_avg[0], 'real': 1 - y_pred_avg[0]}
return (y_pred_label, proba)
title = "Trump says he will not attend Biden's inauguration"
body = "President Trump announced on Friday that he will not attend President-elect Joe Biden's inauguration on Jan. 20. The announcement came in a tweet, ending days of speculation about whether Trump would participate in the centuries-old tradition of a peaceful transfer of power."
predicted_label = predict(title, body)
print(predicted_label) | code |
128044853/cell_5 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm
import nltk
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
import lightgbm as lgb
import nltk
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
import joblib
from spacy.util import minibatch, compounding
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
from nltk.tokenize import sent_tokenize
from nltk.sentiment import SentimentIntensityAnalyzer
import multiprocessing
from tqdm import tqdm
from sklearn.model_selection import KFold
le = LabelEncoder()
import os
df = pd.read_csv('/kaggle/input/WELFakeAdditional/final (1).csv')
def get_pos_counts(text):
tokens = nltk.word_tokenize(text)
pos_tags = nltk.pos_tag(tokens)
pos_counts = {'NN': 0, 'VB': 0, 'JJ': 0}
for word, tag in pos_tags:
if tag.startswith('NN'):
pos_counts['NN'] += 1
elif tag.startswith('VB'):
pos_counts['VB'] += 1
elif tag.startswith('JJ'):
pos_counts['JJ'] += 1
return (pos_counts['NN'], pos_counts['VB'], pos_counts['JJ'])
df = pd.read_csv('/kaggle/input/WELFakeAdditional/final (1).csv')
df.drop('sentiment', axis=1, inplace=True)
df.drop('subject', axis=1, inplace=True)
df.drop('date', axis=1, inplace=True)
df.drop('subject.1', axis=1, inplace=True)
df.drop('date.1', axis=1, inplace=True)
df.dropna(inplace=True)
tqdm.pandas(desc='Processing title column')
df[['title_nouns', 'title_verbs', 'title_adjectives']] = df['title'].progress_apply(get_pos_counts).apply(pd.Series)
tqdm.pandas(desc='Processing text column')
df[['text_nouns', 'text_verbs', 'text_adjectives']] = df['text'].progress_apply(get_pos_counts).apply(pd.Series)
df.head() | code |
122256136/cell_13 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv')
pd.set_option('display.max_columns', None)
plt.rcParams['figure.figsize'] = (8, 6)
n_missing = data.isnull().sum().values[:-1]
keys_missing = data.isnull().sum().keys()[:-1]
sns.boxplot(data=data, x='HomePage', y='HomePage_Duration')
plt.xticks(rotation=90)
plt.xlabel('No. of times HomePage was visited')
plt.ylabel('Time spent on Home Page')
plt.show() | code |
122256136/cell_6 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv')
pd.set_option('display.max_columns', None)
plt.rcParams['figure.figsize'] = (8, 6)
sns.countplot(data=data, x='Made_Purchase')
plt.title('Count of classes in the target variable', c='Blue')
plt.show() | code |
122256136/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv')
pd.set_option('display.max_columns', None)
data.head() | code |
122256136/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
122256136/cell_18 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv')
pd.set_option('display.max_columns', None)
plt.rcParams['figure.figsize'] = (8, 6)
n_missing = data.isnull().sum().values[:-1]
keys_missing = data.isnull().sum().keys()[:-1]
plt.xticks(rotation=90)
plt.xticks(rotation=90)
pd.unique(data.Zone)
pd.unique(data.OS)
pd.value_counts(data.OS) | code |
122256136/cell_8 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv')
pd.set_option('display.max_columns', None)
plt.rcParams['figure.figsize'] = (8, 6)
n_missing = data.isnull().sum().values[:-1]
keys_missing = data.isnull().sum().keys()[:-1]
sns.barplot(y=keys_missing, x=n_missing)
plt.xlabel('Count')
plt.ylabel('Column Names')
plt.title('Count of Missing Values', c='Blue')
plt.show() | code |
122256136/cell_15 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv')
pd.set_option('display.max_columns', None)
plt.rcParams['figure.figsize'] = (8, 6)
n_missing = data.isnull().sum().values[:-1]
keys_missing = data.isnull().sum().keys()[:-1]
plt.xticks(rotation=90)
plt.xticks(rotation=90)
sns.barplot(x=data.Month_SeasonalPurchase, y=data.HomePage)
plt.xlabel('Month of Seasonal Purchase')
plt.ylabel('No. of times the Home Page was visited')
plt.show() | code |
122256136/cell_16 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv')
pd.set_option('display.max_columns', None)
plt.rcParams['figure.figsize'] = (8, 6)
n_missing = data.isnull().sum().values[:-1]
keys_missing = data.isnull().sum().keys()[:-1]
plt.xticks(rotation=90)
plt.xticks(rotation=90)
pd.unique(data.Zone) | code |
122256136/cell_17 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv')
pd.set_option('display.max_columns', None)
plt.rcParams['figure.figsize'] = (8, 6)
n_missing = data.isnull().sum().values[:-1]
keys_missing = data.isnull().sum().keys()[:-1]
plt.xticks(rotation=90)
plt.xticks(rotation=90)
pd.unique(data.Zone)
pd.unique(data.OS) | code |
122256136/cell_14 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv')
pd.set_option('display.max_columns', None)
plt.rcParams['figure.figsize'] = (8, 6)
n_missing = data.isnull().sum().values[:-1]
keys_missing = data.isnull().sum().keys()[:-1]
plt.xticks(rotation=90)
sns.boxplot(data=data, x='LandingPage', y='LandingPage_Duration')
plt.xticks(rotation=90)
plt.xlabel('No. of times LandingPage was visited')
plt.ylabel('Time spent on Landing Page')
plt.show() | code |
122256136/cell_10 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv')
pd.set_option('display.max_columns', None)
plt.rcParams['figure.figsize'] = (8, 6)
n_missing = data.isnull().sum().values[:-1]
keys_missing = data.isnull().sum().keys()[:-1]
data.describe() | code |
122256136/cell_12 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/e-commerce-shoppers-behaviour-understanding/train_data_v2.csv')
pd.set_option('display.max_columns', None)
plt.rcParams['figure.figsize'] = (8, 6)
n_missing = data.isnull().sum().values[:-1]
keys_missing = data.isnull().sum().keys()[:-1]
sns.scatterplot(data=data, x='HomePage_Duration', y='LandingPage_Duration', alpha=0.5)
plt.title('Scatterplot between time spent on the Home Page and the Landing Page', c='Blue')
plt.show() | code |
2010673/cell_13 | [
"text_html_output_1.png"
] | from collections import Counter
import matplotlib.pyplot as plt
import nltk
import os
import pandas as pd
path = '../input/state-of-the-union-corpus-1989-2017'
dirs = os.listdir(path)
def count_words(word, filename):
file = open(path + '/' + filename, encoding='utf8')
text = file.read().lower()
words = nltk.word_tokenize(text)
word_counter = Counter(words)
word_count = word_counter[word]
return word_count
file_dict_list = []
for filename in dirs:
file_dict = {}
job_word_count = count_words('job', filename) + count_words('jobs', filename)
file_dict['year'] = int(filename[-8:-4])
file_dict['job_word_count'] = job_word_count
file_dict_list.append(file_dict)
df = pd.DataFrame(file_dict_list)
df
df.set_index('year', inplace=True)
df
years = df.index
job_word_count = df['job_word_count'].values
jobless_rate = pd.read_csv('../input/usa-unemployment-rate-from-1989-to-2017/unemployment_rate.csv', sep=',')
jobless_rate.set_index('Year', inplace=True)
jobless_rate['Annual'] = jobless_rate.mean(axis=1)
jobless_rate
years = jobless_rate.index
joblessness = jobless_rate['Annual'].values
plt.plot(years, joblessness)
plt.xlabel('Year')
plt.ylabel('Unemployment Rate (%)')
plt.title('Unemployment Rate Trend')
plt.show() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.