path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
17111876/cell_26 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
plt.imshow(train_x[10][:, :, 0]) | code |
17111876/cell_41 | [
"image_output_1.png"
] | from sklearn.metrics import confusion_matrix
from tensorflow import keras
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import tensorflow as tf
train_imgs = pd.read_csv('../input/train.csv')
test_imgs = pd.read_csv('../input/test.csv')
label_train = train_imgs['label']
label_train = keras.utils.to_categorical(label_train, num_classes=10)
def CNN(n_conv):
"""
Build a Convolutional neural network for n_conv number of convolutional layers.
n_conv: Integer.
"""
model = keras.Sequential()
for _ in range(n_conv):
model.add(keras.layers.Conv2D(64, kernel_size=2, activation=tf.nn.relu, input_shape=(28, 28, 1)))
model.add(keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(keras.layers.Dropout(0.2))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(256, kernel_regularizer=keras.regularizers.l2(0.001), activation=tf.nn.relu))
model.add(keras.layers.Dense(10, activation=tf.nn.softmax))
return model
model = CNN(n_conv=3)
model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.categorical_crossentropy, metrics=['accuracy'])
visible = keras.layers.Input(shape=(28, 28, 1))
conv1 = keras.layers.Conv2D(32, kernel_size=4, activation='relu')(visible)
pool1 = keras.layers.MaxPooling2D(pool_size=(2, 2))(conv1)
flat1 = keras.layers.Flatten()(pool1)
conv2 = keras.layers.Conv2D(32, kernel_size=4, activation='relu')(visible)
pool2 = keras.layers.MaxPooling2D(pool_size=(2, 2))(conv2)
flat2 = keras.layers.Flatten()(pool2)
merge = keras.layers.concatenate([flat1, flat2])
hidden1 = keras.layers.Dense(256, activation='relu')(merge)
drop = keras.layers.Dropout(0.5)(hidden1)
output = keras.layers.Dense(10, activation='softmax')(drop)
model = keras.Model(inputs=visible, outputs=output)
model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.categorical_crossentropy, metrics=['accuracy'])
earlystop = keras.callbacks.EarlyStopping(monitor='val_acc', min_delta=0.001, patience=7, mode='min')
reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_acc', factor=0.2, patience=5, min_lr=0.001)
history = model.fit(train_x, train_y, epochs=20, batch_size=200, validation_data=(test_x, test_y), verbose=1, callbacks=[reduce_lr])
#accuracy
train_accuracy = history.history['acc']
validation_accuracy = history.history['val_acc']
#loss
train_loss = history.history['loss']
validation_loss = history.history['val_loss']
#Epochs
epoch_range = range(1,len(train_accuracy)+1)
#Plot
fig, ax = plt.subplots(1, 2, figsize=(12,5))
ax[0].set_title('Accuracy per Epoch')
sns.lineplot(x=epoch_range,y=train_accuracy,marker='o',ax=ax[0])
sns.lineplot(x=epoch_range,y=validation_accuracy,marker='o',ax=ax[0])
ax[0].legend(['training','validation'])
ax[0].set_xlabel('Epoch')
ax[0].set_ylabel('Accuracy')
ax[1].set_title('Loss per Epoch')
sns.lineplot(x=epoch_range,y=train_loss,marker='o',ax=ax[1])
sns.lineplot(x=epoch_range,y=validation_loss,marker='o',ax=ax[1])
ax[1].legend(['training','validation'])
ax[1].set_xlabel('Epoch')
ax[1].set_ylabel('Loss')
plt.show()
from sklearn.utils.multiclass import unique_labels
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax
Predict = model.predict(test_x)
Predict_classes = np.argmax(Predict, axis=1)
True_classes = np.argmax(test_y, axis=1)
plot_confusion_matrix(True_classes, Predict_classes, classes=range(10), normalize=False, title='Confusion Matrix') | code |
17111876/cell_11 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_imgs = pd.read_csv('../input/train.csv')
test_imgs = pd.read_csv('../input/test.csv')
img_train = train_imgs.drop(labels='label', axis=1)
del train_imgs
img_train.max().max() | code |
17111876/cell_1 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from tensorflow import keras
import tensorflow as tf
import os
print(os.listdir('../input')) | code |
17111876/cell_32 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from tensorflow import keras
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
train_imgs = pd.read_csv('../input/train.csv')
test_imgs = pd.read_csv('../input/test.csv')
label_train = train_imgs['label']
label_train = keras.utils.to_categorical(label_train, num_classes=10)
def CNN(n_conv):
"""
Build a Convolutional neural network for n_conv number of convolutional layers.
n_conv: Integer.
"""
model = keras.Sequential()
for _ in range(n_conv):
model.add(keras.layers.Conv2D(64, kernel_size=2, activation=tf.nn.relu, input_shape=(28, 28, 1)))
model.add(keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(keras.layers.Dropout(0.2))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(256, kernel_regularizer=keras.regularizers.l2(0.001), activation=tf.nn.relu))
model.add(keras.layers.Dense(10, activation=tf.nn.softmax))
return model
model = CNN(n_conv=3)
model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.categorical_crossentropy, metrics=['accuracy'])
visible = keras.layers.Input(shape=(28, 28, 1))
conv1 = keras.layers.Conv2D(32, kernel_size=4, activation='relu')(visible)
pool1 = keras.layers.MaxPooling2D(pool_size=(2, 2))(conv1)
flat1 = keras.layers.Flatten()(pool1)
conv2 = keras.layers.Conv2D(32, kernel_size=4, activation='relu')(visible)
pool2 = keras.layers.MaxPooling2D(pool_size=(2, 2))(conv2)
flat2 = keras.layers.Flatten()(pool2)
merge = keras.layers.concatenate([flat1, flat2])
hidden1 = keras.layers.Dense(256, activation='relu')(merge)
drop = keras.layers.Dropout(0.5)(hidden1)
output = keras.layers.Dense(10, activation='softmax')(drop)
model = keras.Model(inputs=visible, outputs=output)
print(model.summary())
model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.categorical_crossentropy, metrics=['accuracy']) | code |
17111876/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_imgs = pd.read_csv('../input/train.csv')
test_imgs = pd.read_csv('../input/test.csv')
label_train = train_imgs['label']
sns.countplot(label_train) | code |
17111876/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_imgs = pd.read_csv('../input/train.csv')
test_imgs = pd.read_csv('../input/test.csv')
img_train = train_imgs.drop(labels='label', axis=1)
del train_imgs
img_train.max().max()
test_imgs.max().max()
img_train = img_train / 255.0
test_imgs = test_imgs / 255.0
print(img_train.max().max())
print(test_imgs.max().max()) | code |
17111876/cell_38 | [
"text_plain_output_1.png"
] | from tensorflow import keras
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import tensorflow as tf
train_imgs = pd.read_csv('../input/train.csv')
test_imgs = pd.read_csv('../input/test.csv')
label_train = train_imgs['label']
label_train = keras.utils.to_categorical(label_train, num_classes=10)
def CNN(n_conv):
"""
Build a Convolutional neural network for n_conv number of convolutional layers.
n_conv: Integer.
"""
model = keras.Sequential()
for _ in range(n_conv):
model.add(keras.layers.Conv2D(64, kernel_size=2, activation=tf.nn.relu, input_shape=(28, 28, 1)))
model.add(keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(keras.layers.Dropout(0.2))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(256, kernel_regularizer=keras.regularizers.l2(0.001), activation=tf.nn.relu))
model.add(keras.layers.Dense(10, activation=tf.nn.softmax))
return model
model = CNN(n_conv=3)
model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.categorical_crossentropy, metrics=['accuracy'])
visible = keras.layers.Input(shape=(28, 28, 1))
conv1 = keras.layers.Conv2D(32, kernel_size=4, activation='relu')(visible)
pool1 = keras.layers.MaxPooling2D(pool_size=(2, 2))(conv1)
flat1 = keras.layers.Flatten()(pool1)
conv2 = keras.layers.Conv2D(32, kernel_size=4, activation='relu')(visible)
pool2 = keras.layers.MaxPooling2D(pool_size=(2, 2))(conv2)
flat2 = keras.layers.Flatten()(pool2)
merge = keras.layers.concatenate([flat1, flat2])
hidden1 = keras.layers.Dense(256, activation='relu')(merge)
drop = keras.layers.Dropout(0.5)(hidden1)
output = keras.layers.Dense(10, activation='softmax')(drop)
model = keras.Model(inputs=visible, outputs=output)
model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.categorical_crossentropy, metrics=['accuracy'])
earlystop = keras.callbacks.EarlyStopping(monitor='val_acc', min_delta=0.001, patience=7, mode='min')
reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_acc', factor=0.2, patience=5, min_lr=0.001)
history = model.fit(train_x, train_y, epochs=20, batch_size=200, validation_data=(test_x, test_y), verbose=1, callbacks=[reduce_lr])
train_accuracy = history.history['acc']
validation_accuracy = history.history['val_acc']
train_loss = history.history['loss']
validation_loss = history.history['val_loss']
epoch_range = range(1, len(train_accuracy) + 1)
fig, ax = plt.subplots(1, 2, figsize=(12, 5))
ax[0].set_title('Accuracy per Epoch')
sns.lineplot(x=epoch_range, y=train_accuracy, marker='o', ax=ax[0])
sns.lineplot(x=epoch_range, y=validation_accuracy, marker='o', ax=ax[0])
ax[0].legend(['training', 'validation'])
ax[0].set_xlabel('Epoch')
ax[0].set_ylabel('Accuracy')
ax[1].set_title('Loss per Epoch')
sns.lineplot(x=epoch_range, y=train_loss, marker='o', ax=ax[1])
sns.lineplot(x=epoch_range, y=validation_loss, marker='o', ax=ax[1])
ax[1].legend(['training', 'validation'])
ax[1].set_xlabel('Epoch')
ax[1].set_ylabel('Loss')
plt.show() | code |
17111876/cell_47 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_imgs = pd.read_csv('../input/train.csv')
test_imgs = pd.read_csv('../input/test.csv')
sub = pd.read_csv('../input/sample_submission.csv')
sub.head() | code |
17111876/cell_22 | [
"text_plain_output_1.png"
] | from tensorflow import keras
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_imgs = pd.read_csv('../input/train.csv')
test_imgs = pd.read_csv('../input/test.csv')
label_train = train_imgs['label']
label_train = keras.utils.to_categorical(label_train, num_classes=10)
label_train[0] | code |
17111876/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_imgs = pd.read_csv('../input/train.csv')
test_imgs = pd.read_csv('../input/test.csv')
test_imgs.max().max() | code |
17111876/cell_36 | [
"text_plain_output_1.png"
] | from tensorflow import keras
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
train_imgs = pd.read_csv('../input/train.csv')
test_imgs = pd.read_csv('../input/test.csv')
label_train = train_imgs['label']
label_train = keras.utils.to_categorical(label_train, num_classes=10)
def CNN(n_conv):
"""
Build a Convolutional neural network for n_conv number of convolutional layers.
n_conv: Integer.
"""
model = keras.Sequential()
for _ in range(n_conv):
model.add(keras.layers.Conv2D(64, kernel_size=2, activation=tf.nn.relu, input_shape=(28, 28, 1)))
model.add(keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(keras.layers.Dropout(0.2))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(256, kernel_regularizer=keras.regularizers.l2(0.001), activation=tf.nn.relu))
model.add(keras.layers.Dense(10, activation=tf.nn.softmax))
return model
model = CNN(n_conv=3)
model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.categorical_crossentropy, metrics=['accuracy'])
visible = keras.layers.Input(shape=(28, 28, 1))
conv1 = keras.layers.Conv2D(32, kernel_size=4, activation='relu')(visible)
pool1 = keras.layers.MaxPooling2D(pool_size=(2, 2))(conv1)
flat1 = keras.layers.Flatten()(pool1)
conv2 = keras.layers.Conv2D(32, kernel_size=4, activation='relu')(visible)
pool2 = keras.layers.MaxPooling2D(pool_size=(2, 2))(conv2)
flat2 = keras.layers.Flatten()(pool2)
merge = keras.layers.concatenate([flat1, flat2])
hidden1 = keras.layers.Dense(256, activation='relu')(merge)
drop = keras.layers.Dropout(0.5)(hidden1)
output = keras.layers.Dense(10, activation='softmax')(drop)
model = keras.Model(inputs=visible, outputs=output)
model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.categorical_crossentropy, metrics=['accuracy'])
earlystop = keras.callbacks.EarlyStopping(monitor='val_acc', min_delta=0.001, patience=7, mode='min')
reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_acc', factor=0.2, patience=5, min_lr=0.001)
history = model.fit(train_x, train_y, epochs=20, batch_size=200, validation_data=(test_x, test_y), verbose=1, callbacks=[reduce_lr]) | code |
32068525/cell_11 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv')
shops_data = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv')
sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv')
test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv')
sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv')
item_category = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv')
plt.figure(figsize=(13, 10))
plt.subplot(221)
plt.hist(sales_train.shop_id, bins=25)
plt.xlabel('shop_id')
plt.ylabel('counts')
plt.title('train_data')
plt.subplot(222)
plt.hist(test.shop_id, bins=25)
plt.ylabel('counts')
plt.xlabel('shop_id')
plt.title('test_data')
plt.subplot(223)
plt.hist(sales_train.item_id, bins=25)
plt.xlabel('item_id')
plt.ylabel('counts')
plt.title('train_data')
plt.subplot(224)
plt.hist(test.item_id, bins=25)
plt.ylabel('counts')
plt.xlabel('item_id')
plt.title('test_data')
plt.show() | code |
32068525/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
l = []
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
l.append(os.path.join(dirname, filename))
l | code |
32068525/cell_15 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv')
shops_data = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv')
sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv')
test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv')
sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv')
item_category = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv')
sales_train = sales_train[sales_train.shop_id.isin(test.shop_id.unique())]
sales_train = sales_train[sales_train.item_id.isin(test.item_id.unique())]
plt.figure(figsize=(13, 5))
plt.subplot(121)
plt.hist(sales_train.shop_id, color='red', label='train', density=True, alpha=0.3)
plt.hist(test.shop_id, color='blue', label='test', density=True, alpha=0.3)
plt.ylabel('counts')
plt.xlabel('shop_id')
plt.legend()
plt.subplot(122)
plt.hist(sales_train.item_id, color='red', label='train', density=True, alpha=0.3)
plt.hist(test.item_id, color='blue', label='test', density=True, alpha=0.3)
plt.ylabel('counts')
plt.xlabel('item_id')
plt.legend()
plt.show() | code |
32068525/cell_10 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv')
shops_data = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv')
sales_train = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv')
test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv')
sample_submission = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv')
item_category = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv')
plt.figure(figsize=(13, 5))
plt.subplot(121)
plt.scatter(sales_train.shop_id, sales_train.item_id)
plt.xlabel('shop_id')
plt.ylabel('item_id')
plt.title('train_data')
plt.subplot(122)
plt.scatter(test.shop_id, test.item_id)
plt.xlabel('shop_id')
plt.ylabel('item_id')
plt.title('test_data')
plt.show() | code |
34139025/cell_42 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
q5b = abstracts[abstracts['abstract'].str.contains('improve access')]
q5b.shape | code |
34139025/cell_63 | [
"text_plain_output_1.png"
] | from googlesearch import search
from googlesearch import search
from googlesearch import search
from googlesearch import search
try:
from googlesearch import search
except ImportError:
print('Error/Not found')
query4 = 'recommendations for PPE problems'
for j4 in search(query4, tld='co.in', num=10, stop=10, pause=2):
print(j4) | code |
34139025/cell_21 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
Q1E = abstracts[abstracts['abstract'].str.contains('risk population')]
Q1E.shape | code |
34139025/cell_13 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.head(2) | code |
34139025/cell_25 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
Q2a = abstracts[abstracts['abstract'].str.contains('homeless')]
Q2a.shape | code |
34139025/cell_56 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
Q1A = abstracts[abstracts['abstract'].str.contains('communicating')]
Q1A.shape
Q1B = abstracts[abstracts['abstract'].str.contains('reaching out')]
Q1B
Q1C = abstracts[abstracts['abstract'].str.contains('contacting')]
Q1C.shape
Q1D = abstracts[abstracts['abstract'].str.contains('elderly')]
Q1D.shape
Q1D = abstracts[abstracts['abstract'].str.contains('health care workers')]
Q1D.shape
Q1E = abstracts[abstracts['abstract'].str.contains('risk population')]
Q1E.shape
Question1 = pd.concat([Q1A, Q1B, Q1C, Q1D, Q1E])
Question1.dropna(inplace=True)
Question1.shape
q5a = abstracts[abstracts['abstract'].str.contains('not reach')]
q5a.shape
q5b = abstracts[abstracts['abstract'].str.contains('improve access')]
q5b.shape
q5c = abstracts[abstracts['abstract'].str.contains('access to resource')]
q5c.shape
q5e = abstracts[abstracts['abstract'].str.contains('faulty')]
q5e.shape
q5f = abstracts[abstracts['abstract'].str.contains('meet demand')]
q5f.shape
q5g = abstracts[abstracts['abstract'].str.contains('waste')]
q5g.shape
Question5 = pd.concat([q5a, q5b, q5c, q5b, q5e, q5f, q5g])
Question5.dropna(inplace=True)
Question5.shape
StudiesDictionary = {'url': ['https://www.cdc.gov/coronavirus/2019-ncov/covid-data/serology-surveillance/index.html', 'https://www.thelancet.com/journals/lancet/article/PIIS0140-6736(20)30854-0/fulltext', 'https://iussp.org/fr/node/11297', 'https://www.nejm.org/doi/full/10.1056/NEJMp2006761', 'https://www.vox.com/2020/4/24/21229415/coronavirus-antibody-testing-covid-19-california-survey', 'https://www.statnews.com/2020/04/04/cdc-launches-studies-to-get-more-precise-count-of-undetected-covid-19-cases/', 'https://ourworldindata.org/coronavirus', 'https://www.360dx.com/infectious-disease/new-york-california-serology-studies-give-early-estimates-covid-19-prevalence', 'https://www.popcouncil.org/research/responding-to-the-COVID-19-pandemic', 'https://www.nature.com/articles/s41591-020-0883-7']}
StudiesDF = pd.DataFrame.from_dict(StudiesDictionary)
StudiesDF | code |
34139025/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
Q1A = abstracts[abstracts['abstract'].str.contains('communicating')]
Q1A.shape
Q1B = abstracts[abstracts['abstract'].str.contains('reaching out')]
Q1B
Q1C = abstracts[abstracts['abstract'].str.contains('contacting')]
Q1C.shape
Q1D = abstracts[abstracts['abstract'].str.contains('elderly')]
Q1D.shape
Q1D = abstracts[abstracts['abstract'].str.contains('health care workers')]
Q1D.shape
Q1E = abstracts[abstracts['abstract'].str.contains('risk population')]
Q1E.shape
Question1 = pd.concat([Q1A, Q1B, Q1C, Q1D, Q1E])
Question1.dropna(inplace=True)
Question1.shape
Question1 | code |
34139025/cell_30 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
q3a = abstracts[abstracts['abstract'].str.contains('nosocomial')]
q3a.shape | code |
34139025/cell_33 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
q3d = abstracts[abstracts['abstract'].str.contains('nosocomial outbreak')]
q3d.shape | code |
34139025/cell_44 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
q5d = abstracts[abstracts['abstract'].str.contains('outreach')]
q5d.shape | code |
34139025/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
Q1D = abstracts[abstracts['abstract'].str.contains('elderly')]
Q1D.shape
Q1D = abstracts[abstracts['abstract'].str.contains('health care workers')]
Q1D.shape | code |
34139025/cell_26 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
Q2b = abstracts[abstracts['abstract'].str.contains('low income')]
Q2b.shape | code |
34139025/cell_48 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
Q1A = abstracts[abstracts['abstract'].str.contains('communicating')]
Q1A.shape
Q1B = abstracts[abstracts['abstract'].str.contains('reaching out')]
Q1B
Q1C = abstracts[abstracts['abstract'].str.contains('contacting')]
Q1C.shape
Q1D = abstracts[abstracts['abstract'].str.contains('elderly')]
Q1D.shape
Q1D = abstracts[abstracts['abstract'].str.contains('health care workers')]
Q1D.shape
Q1E = abstracts[abstracts['abstract'].str.contains('risk population')]
Q1E.shape
Question1 = pd.concat([Q1A, Q1B, Q1C, Q1D, Q1E])
Question1.dropna(inplace=True)
Question1.shape
q5a = abstracts[abstracts['abstract'].str.contains('not reach')]
q5a.shape
q5b = abstracts[abstracts['abstract'].str.contains('improve access')]
q5b.shape
q5c = abstracts[abstracts['abstract'].str.contains('access to resource')]
q5c.shape
q5e = abstracts[abstracts['abstract'].str.contains('faulty')]
q5e.shape
q5f = abstracts[abstracts['abstract'].str.contains('meet demand')]
q5f.shape
q5g = abstracts[abstracts['abstract'].str.contains('waste')]
q5g.shape
Question5 = pd.concat([q5a, q5b, q5c, q5b, q5e, q5f, q5g])
Question5.dropna(inplace=True)
Question5.shape | code |
34139025/cell_41 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
q5a = abstracts[abstracts['abstract'].str.contains('not reach')]
q5a.shape | code |
34139025/cell_61 | [
"text_html_output_1.png"
] | from googlesearch import search
from googlesearch import search
try:
from googlesearch import search
except ImportError:
print('Error/Not found')
query2 = 'recommendations for COVID 19 resources limits'
for j2 in search(query2, tld='co.in', num=10, stop=10, pause=2):
print(j2) | code |
34139025/cell_54 | [
"text_plain_output_1.png"
] | from googlesearch import search
try:
from googlesearch import search
except ImportError:
print('Error/Not found')
query = 'COVID 19 population studies'
for j in search(query, tld='co.in', num=10, stop=10, pause=2):
print(j) | code |
34139025/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
Q1D = abstracts[abstracts['abstract'].str.contains('elderly')]
Q1D.shape | code |
34139025/cell_52 | [
"text_plain_output_1.png"
] | pip install beautifulsoup4 | code |
34139025/cell_64 | [
"text_plain_output_1.png"
] | from googlesearch import search
from googlesearch import search
from googlesearch import search
from googlesearch import search
from googlesearch import search
try:
from googlesearch import search
except ImportError:
print('Error/Not found')
query5 = 'recommendations for improving access to COVID 19 resources'
for j5 in search(query5, tld='co.in', num=10, stop=10, pause=2):
print(j5) | code |
34139025/cell_45 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
q5e = abstracts[abstracts['abstract'].str.contains('faulty')]
q5e.shape | code |
34139025/cell_18 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
Q1C = abstracts[abstracts['abstract'].str.contains('contacting')]
Q1C.shape | code |
34139025/cell_32 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
q3c = abstracts[abstracts['abstract'].str.contains('hospital patients')]
q3c.shape | code |
34139025/cell_62 | [
"text_plain_output_1.png"
] | from googlesearch import search
from googlesearch import search
from googlesearch import search
try:
from googlesearch import search
except ImportError:
print('Error/Not found')
query3 = 'recommendations for COVID 19 testing problems'
for j3 in search(query3, tld='co.in', num=10, stop=10, pause=2):
print(j3) | code |
34139025/cell_28 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
Q2d = abstracts[abstracts['abstract'].str.contains('housing')]
Q2d.shape | code |
34139025/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
tablesTable = t[['Question', 'Table Format']]
tablesTable | code |
34139025/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
Q1A = abstracts[abstracts['abstract'].str.contains('communicating')]
Q1A.shape | code |
34139025/cell_38 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
q4D = abstracts[abstracts['abstract'].str.contains('methods to prevent')]
q4D.shape | code |
34139025/cell_47 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
q5g = abstracts[abstracts['abstract'].str.contains('waste')]
q5g.shape | code |
34139025/cell_66 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
Q1A = abstracts[abstracts['abstract'].str.contains('communicating')]
Q1A.shape
Q1B = abstracts[abstracts['abstract'].str.contains('reaching out')]
Q1B
Q1C = abstracts[abstracts['abstract'].str.contains('contacting')]
Q1C.shape
Q1D = abstracts[abstracts['abstract'].str.contains('elderly')]
Q1D.shape
Q1D = abstracts[abstracts['abstract'].str.contains('health care workers')]
Q1D.shape
Q1E = abstracts[abstracts['abstract'].str.contains('risk population')]
Q1E.shape
Question1 = pd.concat([Q1A, Q1B, Q1C, Q1D, Q1E])
Question1.dropna(inplace=True)
Question1.shape
q5a = abstracts[abstracts['abstract'].str.contains('not reach')]
q5a.shape
q5b = abstracts[abstracts['abstract'].str.contains('improve access')]
q5b.shape
q5c = abstracts[abstracts['abstract'].str.contains('access to resource')]
q5c.shape
q5e = abstracts[abstracts['abstract'].str.contains('faulty')]
q5e.shape
q5f = abstracts[abstracts['abstract'].str.contains('meet demand')]
q5f.shape
q5g = abstracts[abstracts['abstract'].str.contains('waste')]
q5g.shape
Question5 = pd.concat([q5a, q5b, q5c, q5b, q5e, q5f, q5g])
Question5.dropna(inplace=True)
Question5.shape
StudiesDictionary = {'url': ['https://www.cdc.gov/coronavirus/2019-ncov/covid-data/serology-surveillance/index.html', 'https://www.thelancet.com/journals/lancet/article/PIIS0140-6736(20)30854-0/fulltext', 'https://iussp.org/fr/node/11297', 'https://www.nejm.org/doi/full/10.1056/NEJMp2006761', 'https://www.vox.com/2020/4/24/21229415/coronavirus-antibody-testing-covid-19-california-survey', 'https://www.statnews.com/2020/04/04/cdc-launches-studies-to-get-more-precise-count-of-undetected-covid-19-cases/', 'https://ourworldindata.org/coronavirus', 'https://www.360dx.com/infectious-disease/new-york-california-serology-studies-give-early-estimates-covid-19-prevalence', 'https://www.popcouncil.org/research/responding-to-the-COVID-19-pandemic', 'https://www.nature.com/articles/s41591-020-0883-7']}
StudiesDF = pd.DataFrame.from_dict(StudiesDictionary)
StudiesDF
Q5RecomDictionary = {'url': ['https://www.ama-assn.org/delivering-care/public-health/covid-19-policy-recommendations-oud-pain-harm-reduction', 'https://www.statnews.com/2020/03/31/covid-19-overcoming-testing-challenges/', 'https://apps.who.int/iris/bitstream/handle/10665/331509/WHO-COVID-19-lab_testing-2020.1-eng.pdf', 'https://www.modernhealthcare.com/technology/covid-19-testing-problems-started-early-us-still-playing-behind', 'https://www.modernhealthcare.com/technology/labs-face-challenges-creating-diagnosis-testing-covid-19', 'https://www.ama-assn.org/delivering-care/public-health/covid-19-frequently-asked-questions', 'https://www.vox.com/recode/2020/4/24/21229774/coronavirus-covid-19-testing-social-distancing', 'https://www.vdh.virginia.gov/coronavirus/health-professionals/vdh-updated-guidance-on-testing-for-covid-19/', 'https://www.mckinsey.com/industries/healthcare-systems-and-services/our-insights/major-challenges-remain-in-covid-19-testing', 'https://www.fda.gov/medical-devices/emergency-situations-medical-devices/faqs-testing-sars-cov-2', 'https://www.jointcommission.org/resources/news-and-multimedia/news/2020/03/statement-on-shortages-of-personal-protective-equipment-amid-covid-19-pandemic/', 'https://jamanetwork.com/journals/jama/fullarticle/2764238', 'https://www.ncbi.nlm.nih.gov/books/NBK209587/', 'https://www.cdc.gov/coronavirus/2019-ncov/hcp/ppe-strategy/index.html', 'https://www.cdc.gov/coronavirus/2019-ncov/hcp/ppe-strategy/burn-calculator.html', 'https://www.cdc.gov/coronavirus/2019-ncov/hcp/ppe-strategy/face-masks.html', 'https://www.cdc.gov/coronavirus/2019-ncov/hcp/respirators-strategy/index.html', 'https://www.cdc.gov/coronavirus/2019-ncov/hcp/ppe-strategy/eye-protection.html', 'http://www.infectioncontroltoday.com/personal-protective-equipment/addressing-challenges-ppe-non-compliance', 'https://www.healio.com/gastroenterology/practice-management/news/online/%7B331d768c-91dd-4094-a2fd-6c9b0c07627d%7D/aga-issues-covid-19-recommendations-for-ppe-use-during-gi-procedures', 'https://www.facs.org/covid-19/ppe/additional']}
Q5Recomm = pd.DataFrame.from_dict(Q5RecomDictionary)
Q5Recomm | code |
34139025/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
Q1B = abstracts[abstracts['abstract'].str.contains('reaching out')]
Q1B | code |
34139025/cell_35 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
q4A = abstracts[abstracts['abstract'].str.contains('compliance')]
q4A.shape | code |
34139025/cell_43 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
q5c = abstracts[abstracts['abstract'].str.contains('access to resource')]
q5c.shape | code |
34139025/cell_31 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
q3b = abstracts[abstracts['abstract'].str.contains('hospital spread')]
q3b.shape | code |
34139025/cell_46 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
q5f = abstracts[abstracts['abstract'].str.contains('meet demand')]
q5f.shape | code |
34139025/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape | code |
34139025/cell_22 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
Q1A = abstracts[abstracts['abstract'].str.contains('communicating')]
Q1A.shape
Q1B = abstracts[abstracts['abstract'].str.contains('reaching out')]
Q1B
Q1C = abstracts[abstracts['abstract'].str.contains('contacting')]
Q1C.shape
Q1D = abstracts[abstracts['abstract'].str.contains('elderly')]
Q1D.shape
Q1D = abstracts[abstracts['abstract'].str.contains('health care workers')]
Q1D.shape
Q1E = abstracts[abstracts['abstract'].str.contains('risk population')]
Q1E.shape
Question1 = pd.concat([Q1A, Q1B, Q1C, Q1D, Q1E])
Question1.dropna(inplace=True)
Question1.shape | code |
34139025/cell_53 | [
"text_plain_output_1.png"
] | pip install google | code |
34139025/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape | code |
34139025/cell_27 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
Q2c = abstracts[abstracts['abstract'].str.contains('poor')]
Q2c.shape | code |
34139025/cell_37 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
q4C = abstracts[abstracts['abstract'].str.contains('prevent spread')]
q4C.shape | code |
34139025/cell_12 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
journals['words'].head() | code |
34139025/cell_36 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.shape
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
q4B = abstracts[abstracts['abstract'].str.contains('community spread')]
q4B.shape | code |
130008103/cell_13 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
lung_df = pd.read_csv('/kaggle/input/lung-cancer/survey lung cancer.csv')
lung_df = lung_df.dropna(how='any')
lung_df['LUNG_CANCER'] = lung_df['LUNG_CANCER'].map({'NO': 0, 'YES': 1})
sns.barplot(data=lung_df, x='GENDER', y='LUNG_CANCER')
plt.xlabel('Gender')
plt.ylabel('Probability of Lung Cancer')
plt.title('Lung Cancer Presence by Gender')
plt.show() | code |
130008103/cell_11 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
lung_df = pd.read_csv('/kaggle/input/lung-cancer/survey lung cancer.csv')
lung_df = lung_df.dropna(how='any')
sns.countplot(data=lung_df, x='GENDER')
plt.xlabel('Gender')
plt.ylabel('Count')
plt.title('Distribution of Gender')
plt.show() | code |
130008103/cell_1 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
import seaborn as sns
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
130008103/cell_7 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
lung_df = pd.read_csv('/kaggle/input/lung-cancer/survey lung cancer.csv')
lung_df.describe()
lung_df.info() | code |
130008103/cell_15 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
lung_df = pd.read_csv('/kaggle/input/lung-cancer/survey lung cancer.csv')
lung_df = lung_df.dropna(how='any')
lung_df['LUNG_CANCER'] = lung_df['LUNG_CANCER'].map({'NO': 0, 'YES': 1})
sns.scatterplot(data=lung_df, x='AGE', y='SMOKING')
plt.xlabel('Age')
plt.ylabel('Smoking Status')
plt.title('Age vs. Smoking Status')
plt.show() | code |
130008103/cell_5 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
lung_df = pd.read_csv('/kaggle/input/lung-cancer/survey lung cancer.csv')
lung_df.head(5) | code |
73096402/cell_13 | [
"text_plain_output_1.png"
] | from itertools import product
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tmnist-typeface-mnist/TMNIST_data.csv')
unique_typefaces = df.names.unique()
typeface_map = {}
for i in range(len(unique_typefaces)):
typeface_map[unique_typefaces[i]] = i
df = df.replace({'names': typeface_map})
df = df.sample(frac=1).reset_index(drop=True)
training_percent = 0.7
validation_percent = 0.2
total_examples = df.shape[0]
train_val_breakpoint = int(training_percent * total_examples)
val_test_breakpoint = int((training_percent + validation_percent) * total_examples)
train_df = df.iloc[None:train_val_breakpoint, :]
val_df = df.iloc[train_val_breakpoint:val_test_breakpoint, :]
test_df = df.iloc[val_test_breakpoint:None, :]
typefaces_num = len(typeface_map)
to_ndarr = lambda obj: np.array(list(obj))
for df_name in ('train_df', 'val_df', 'test_df'):
df = eval(df_name)
diff = np.setdiff1d(to_ndarr(typeface_map.values()), to_ndarr(df.names))
ndiff = len(diff)
def dataframeToNumpy(df):
Xdf = df.iloc[:, 2:None]
ydf = df.iloc[:, 1]
zdf = df.iloc[:, 0]
return (np.array(Xdf), np.array(ydf), np.array(zdf))
Xtrain, ytrain, ztrain = dataframeToNumpy(train_df)
Xval, yval, zval = dataframeToNumpy(val_df)
Xtest, ytest, ztest = dataframeToNumpy(test_df)
names = []
for p in product(['train', 'val', 'test'], ['X', 'y', 'z']):
names.append(''.join([p[1], p[0]]))
for name in names:
print(f'Shape of {name}:', eval(name).shape) | code |
73096402/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tmnist-typeface-mnist/TMNIST_data.csv')
print(df.shape)
df.head() | code |
73096402/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tmnist-typeface-mnist/TMNIST_data.csv')
unique_typefaces = df.names.unique()
typeface_map = {}
for i in range(len(unique_typefaces)):
typeface_map[unique_typefaces[i]] = i
df = df.replace({'names': typeface_map})
df.head() | code |
73096402/cell_11 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tmnist-typeface-mnist/TMNIST_data.csv')
unique_typefaces = df.names.unique()
typeface_map = {}
for i in range(len(unique_typefaces)):
typeface_map[unique_typefaces[i]] = i
df = df.replace({'names': typeface_map})
df = df.sample(frac=1).reset_index(drop=True)
training_percent = 0.7
validation_percent = 0.2
total_examples = df.shape[0]
train_val_breakpoint = int(training_percent * total_examples)
val_test_breakpoint = int((training_percent + validation_percent) * total_examples)
train_df = df.iloc[None:train_val_breakpoint, :]
val_df = df.iloc[train_val_breakpoint:val_test_breakpoint, :]
test_df = df.iloc[val_test_breakpoint:None, :]
typefaces_num = len(typeface_map)
to_ndarr = lambda obj: np.array(list(obj))
for df_name in ('train_df', 'val_df', 'test_df'):
df = eval(df_name)
diff = np.setdiff1d(to_ndarr(typeface_map.values()), to_ndarr(df.names))
ndiff = len(diff)
print(ndiff) | code |
73096402/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
from itertools import product
from tqdm import tqdm
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
73096402/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tmnist-typeface-mnist/TMNIST_data.csv')
unique_typefaces = df.names.unique()
typeface_map = {}
for i in range(len(unique_typefaces)):
typeface_map[unique_typefaces[i]] = i
df = df.replace({'names': typeface_map})
df = df.sample(frac=1).reset_index(drop=True)
training_percent = 0.7
validation_percent = 0.2
total_examples = df.shape[0]
train_val_breakpoint = int(training_percent * total_examples)
val_test_breakpoint = int((training_percent + validation_percent) * total_examples)
train_df = df.iloc[None:train_val_breakpoint, :]
val_df = df.iloc[train_val_breakpoint:val_test_breakpoint, :]
test_df = df.iloc[val_test_breakpoint:None, :]
for z in zip(['train_df', 'val_df', 'test_df'], [train_df, val_df, test_df]):
print(f'Shape of {z[0]}:', z[1].shape) | code |
73096402/cell_15 | [
"text_html_output_1.png"
] | from itertools import product
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tmnist-typeface-mnist/TMNIST_data.csv')
unique_typefaces = df.names.unique()
typeface_map = {}
for i in range(len(unique_typefaces)):
typeface_map[unique_typefaces[i]] = i
df = df.replace({'names': typeface_map})
df = df.sample(frac=1).reset_index(drop=True)
training_percent = 0.7
validation_percent = 0.2
total_examples = df.shape[0]
train_val_breakpoint = int(training_percent * total_examples)
val_test_breakpoint = int((training_percent + validation_percent) * total_examples)
train_df = df.iloc[None:train_val_breakpoint, :]
val_df = df.iloc[train_val_breakpoint:val_test_breakpoint, :]
test_df = df.iloc[val_test_breakpoint:None, :]
typefaces_num = len(typeface_map)
to_ndarr = lambda obj: np.array(list(obj))
for df_name in ('train_df', 'val_df', 'test_df'):
df = eval(df_name)
diff = np.setdiff1d(to_ndarr(typeface_map.values()), to_ndarr(df.names))
ndiff = len(diff)
def dataframeToNumpy(df):
Xdf = df.iloc[:, 2:None]
ydf = df.iloc[:, 1]
zdf = df.iloc[:, 0]
return (np.array(Xdf), np.array(ydf), np.array(zdf))
Xtrain, ytrain, ztrain = dataframeToNumpy(train_df)
Xval, yval, zval = dataframeToNumpy(val_df)
Xtest, ytest, ztest = dataframeToNumpy(test_df)
names = []
for p in product(['train', 'val', 'test'], ['X', 'y', 'z']):
names.append(''.join([p[1], p[0]]))
img_dim = int(np.sqrt(Xtrain.shape[1]))
randIdx = np.random.randint(0, X.shape[0], 100).reshape(img_dim, img_dim)
fig, ax = plt.subplots(10, 10)
for i in range(randIdx.shape[0]):
for j in range(randIdx.shape[1]):
example = X[randIdx[i, j], :]
example = example.reshape((20, 20)).T
ax[i, j].imshow(example, vmin=-1, vmax=1, cmap='gray')
ax[i, j].set_xticks([])
ax[i, j].set_yticks([])
plt.show() | code |
73096402/cell_10 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tmnist-typeface-mnist/TMNIST_data.csv')
unique_typefaces = df.names.unique()
typeface_map = {}
for i in range(len(unique_typefaces)):
typeface_map[unique_typefaces[i]] = i
df = df.replace({'names': typeface_map})
df = df.sample(frac=1).reset_index(drop=True)
training_percent = 0.7
validation_percent = 0.2
total_examples = df.shape[0]
train_val_breakpoint = int(training_percent * total_examples)
val_test_breakpoint = int((training_percent + validation_percent) * total_examples)
train_df = df.iloc[None:train_val_breakpoint, :]
val_df = df.iloc[train_val_breakpoint:val_test_breakpoint, :]
test_df = df.iloc[val_test_breakpoint:None, :]
df.head() | code |
2010817/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.describe(include='all') | code |
2010817/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import tensorflow as tf | code |
2010817/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train = train.drop(['Cabin', 'Ticket'], axis=1)
test = test.drop(['Cabin', 'Ticket'], axis=1)
train.head() | code |
2010817/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.head() | code |
2010817/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train = train.drop(['Cabin', 'Ticket'], axis=1)
test = test.drop(['Cabin', 'Ticket'], axis=1)
train['Name'] = train.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
test['Name'] = test.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
train.head() | code |
2010817/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
test.describe(include='all') | code |
73081521/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
import warnings
import warnings
warnings.simplefilter('ignore')
import numpy as np
import pandas as pd
from PIL import Image, ImageFilter, ImageOps
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
import time
from contextlib import contextmanager
import gc
import os
import cv2
from glob import glob
import os
import requests
import io
pd.set_option('display.max_rows', 200)
INPUT_DIR = '../input/shigglecup-1st/DATA/'
train_df = pd.read_csv(f'{INPUT_DIR}train.csv')
test_df = pd.read_csv(f'{INPUT_DIR}test.csv')
train_df['train_id'] = 1
test_df['train_id'] = 0
all_df = pd.concat([train_df, test_df]).reset_index(drop=True)
all_df.isna().sum().T
print(all_df['color_2'].nunique())
print(all_df['type_2'].nunique()) | code |
73081521/cell_20 | [
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import cv2
import matplotlib.pyplot as plt
import os
import os
import pandas as pd
import warnings
import warnings
warnings.simplefilter('ignore')
import numpy as np
import pandas as pd
from PIL import Image, ImageFilter, ImageOps
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
import time
from contextlib import contextmanager
import gc
import os
import cv2
from glob import glob
import os
import requests
import io
pd.set_option('display.max_rows', 200)
INPUT_DIR = '../input/shigglecup-1st/DATA/'
train_df = pd.read_csv(f'{INPUT_DIR}train.csv')
test_df = pd.read_csv(f'{INPUT_DIR}test.csv')
train_df['train_id'] = 1
test_df['train_id'] = 0
all_df = pd.concat([train_df, test_df]).reset_index(drop=True)
all_df.isna().sum().T
def show_images_glob(images, color, figsize=(20, 10), columns=5):
for i, image in enumerate(images):
if image != image:
continue
if not os.path.exists(image):
continue
img = cv2.imread(image)
im_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
def show_gbc(colors):
img_df = all_df[[colors]].dropna()
cnt = 0
for i in range(len(img_df.dropna()[colors].unique())):
if cnt > 2:
break
color = img_df.dropna()[colors].unique()[i]
color_list = f'{INPUT_DIR}pokemon_images/' + train_df[train_df[colors] == color].reset_index(drop=True)['url_image']
if len(color_list) == 0:
continue
cnt += 1
poke = all_df.loc[all_df['pokemon'] == 'altaria', :]
poke.T
for colors in ['color_1', 'color_2', 'color_f']:
color = poke[colors].values[0]
color_list = f'{INPUT_DIR}pokemon_images/' + train_df[train_df[colors] == color].reset_index(drop=True)['url_image']
show_images_glob(color_list, color) | code |
73081521/cell_6 | [
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import pandas as pd
import warnings
import warnings
warnings.simplefilter('ignore')
import numpy as np
import pandas as pd
from PIL import Image, ImageFilter, ImageOps
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
import time
from contextlib import contextmanager
import gc
import os
import cv2
from glob import glob
import os
import requests
import io
pd.set_option('display.max_rows', 200)
INPUT_DIR = '../input/shigglecup-1st/DATA/'
train_df = pd.read_csv(f'{INPUT_DIR}train.csv')
test_df = pd.read_csv(f'{INPUT_DIR}test.csv')
train_df['train_id'] = 1
test_df['train_id'] = 0
all_df = pd.concat([train_df, test_df]).reset_index(drop=True)
all_df.isna().sum().T | code |
73081521/cell_11 | [
"text_plain_output_1.png"
] | import cv2
import matplotlib.pyplot as plt
import os
import os
import pandas as pd
import warnings
import warnings
warnings.simplefilter('ignore')
import numpy as np
import pandas as pd
from PIL import Image, ImageFilter, ImageOps
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
import time
from contextlib import contextmanager
import gc
import os
import cv2
from glob import glob
import os
import requests
import io
pd.set_option('display.max_rows', 200)
INPUT_DIR = '../input/shigglecup-1st/DATA/'
train_df = pd.read_csv(f'{INPUT_DIR}train.csv')
test_df = pd.read_csv(f'{INPUT_DIR}test.csv')
train_df['train_id'] = 1
test_df['train_id'] = 0
all_df = pd.concat([train_df, test_df]).reset_index(drop=True)
all_df.isna().sum().T
def show_images_glob(images, color, figsize=(20, 10), columns=5):
for i, image in enumerate(images):
if image != image:
continue
if not os.path.exists(image):
continue
img = cv2.imread(image)
im_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
def show_gbc(colors):
img_df = all_df[[colors]].dropna()
cnt = 0
for i in range(len(img_df.dropna()[colors].unique())):
if cnt > 2:
break
color = img_df.dropna()[colors].unique()[i]
color_list = f'{INPUT_DIR}pokemon_images/' + train_df[train_df[colors] == color].reset_index(drop=True)['url_image']
if len(color_list) == 0:
continue
cnt += 1
show_gbc('color_1') | code |
73081521/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd
import warnings
import warnings
warnings.simplefilter('ignore')
import numpy as np
import pandas as pd
from PIL import Image, ImageFilter, ImageOps
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
import time
from contextlib import contextmanager
import gc
import os
import cv2
from glob import glob
import os
import requests
import io
pd.set_option('display.max_rows', 200)
INPUT_DIR = '../input/shigglecup-1st/DATA/'
train_df = pd.read_csv(f'{INPUT_DIR}train.csv')
test_df = pd.read_csv(f'{INPUT_DIR}test.csv')
train_df['train_id'] = 1
test_df['train_id'] = 0
all_df = pd.concat([train_df, test_df]).reset_index(drop=True)
all_df.isna().sum().T
poke = all_df.loc[all_df['pokemon'] == 'altaria', :]
poke.T | code |
73081521/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
import warnings
import warnings
warnings.simplefilter('ignore')
import numpy as np
import pandas as pd
from PIL import Image, ImageFilter, ImageOps
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
import time
from contextlib import contextmanager
import gc
import os
import cv2
from glob import glob
import os
import requests
import io
pd.set_option('display.max_rows', 200)
INPUT_DIR = '../input/shigglecup-1st/DATA/'
train_df = pd.read_csv(f'{INPUT_DIR}train.csv')
test_df = pd.read_csv(f'{INPUT_DIR}test.csv')
train_df['train_id'] = 1
test_df['train_id'] = 0
all_df = pd.concat([train_df, test_df]).reset_index(drop=True)
all_df.isna().sum().T
print(all_df['color_f'].nunique())
print((all_df['type_1'] + all_df['type_2']).nunique()) | code |
73081521/cell_17 | [
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import cv2
import matplotlib.pyplot as plt
import os
import os
import pandas as pd
import warnings
import warnings
warnings.simplefilter('ignore')
import numpy as np
import pandas as pd
from PIL import Image, ImageFilter, ImageOps
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
import time
from contextlib import contextmanager
import gc
import os
import cv2
from glob import glob
import os
import requests
import io
pd.set_option('display.max_rows', 200)
INPUT_DIR = '../input/shigglecup-1st/DATA/'
train_df = pd.read_csv(f'{INPUT_DIR}train.csv')
test_df = pd.read_csv(f'{INPUT_DIR}test.csv')
train_df['train_id'] = 1
test_df['train_id'] = 0
all_df = pd.concat([train_df, test_df]).reset_index(drop=True)
all_df.isna().sum().T
def show_images_glob(images, color, figsize=(20, 10), columns=5):
for i, image in enumerate(images):
if image != image:
continue
if not os.path.exists(image):
continue
img = cv2.imread(image)
im_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
def show_gbc(colors):
img_df = all_df[[colors]].dropna()
cnt = 0
for i in range(len(img_df.dropna()[colors].unique())):
if cnt > 2:
break
color = img_df.dropna()[colors].unique()[i]
color_list = f'{INPUT_DIR}pokemon_images/' + train_df[train_df[colors] == color].reset_index(drop=True)['url_image']
if len(color_list) == 0:
continue
cnt += 1
show_gbc('color_f') | code |
73081521/cell_14 | [
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import cv2
import matplotlib.pyplot as plt
import os
import os
import pandas as pd
import warnings
import warnings
warnings.simplefilter('ignore')
import numpy as np
import pandas as pd
from PIL import Image, ImageFilter, ImageOps
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
import time
from contextlib import contextmanager
import gc
import os
import cv2
from glob import glob
import os
import requests
import io
pd.set_option('display.max_rows', 200)
INPUT_DIR = '../input/shigglecup-1st/DATA/'
train_df = pd.read_csv(f'{INPUT_DIR}train.csv')
test_df = pd.read_csv(f'{INPUT_DIR}test.csv')
train_df['train_id'] = 1
test_df['train_id'] = 0
all_df = pd.concat([train_df, test_df]).reset_index(drop=True)
all_df.isna().sum().T
def show_images_glob(images, color, figsize=(20, 10), columns=5):
for i, image in enumerate(images):
if image != image:
continue
if not os.path.exists(image):
continue
img = cv2.imread(image)
im_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
def show_gbc(colors):
img_df = all_df[[colors]].dropna()
cnt = 0
for i in range(len(img_df.dropna()[colors].unique())):
if cnt > 2:
break
color = img_df.dropna()[colors].unique()[i]
color_list = f'{INPUT_DIR}pokemon_images/' + train_df[train_df[colors] == color].reset_index(drop=True)['url_image']
if len(color_list) == 0:
continue
cnt += 1
show_gbc('color_2') | code |
73081521/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
import warnings
import warnings
warnings.simplefilter('ignore')
import numpy as np
import pandas as pd
from PIL import Image, ImageFilter, ImageOps
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
import time
from contextlib import contextmanager
import gc
import os
import cv2
from glob import glob
import os
import requests
import io
pd.set_option('display.max_rows', 200)
INPUT_DIR = '../input/shigglecup-1st/DATA/'
train_df = pd.read_csv(f'{INPUT_DIR}train.csv')
test_df = pd.read_csv(f'{INPUT_DIR}test.csv')
train_df['train_id'] = 1
test_df['train_id'] = 0
all_df = pd.concat([train_df, test_df]).reset_index(drop=True)
all_df.isna().sum().T
print(all_df['color_1'].nunique())
print(all_df['type_1'].nunique()) | code |
73081521/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
import warnings
import warnings
warnings.simplefilter('ignore')
import numpy as np
import pandas as pd
from PIL import Image, ImageFilter, ImageOps
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
import time
from contextlib import contextmanager
import gc
import os
import cv2
from glob import glob
import os
import requests
import io
pd.set_option('display.max_rows', 200)
INPUT_DIR = '../input/shigglecup-1st/DATA/'
train_df = pd.read_csv(f'{INPUT_DIR}train.csv')
test_df = pd.read_csv(f'{INPUT_DIR}test.csv')
train_df['train_id'] = 1
test_df['train_id'] = 0
all_df = pd.concat([train_df, test_df]).reset_index(drop=True)
print(train_df.shape)
print(test_df.shape)
print(all_df.shape) | code |
325602/cell_6 | [
"text_plain_output_1.png"
] | from sklearn.cross_validation import train_test_split
from sklearn.ensemble import RandomForestClassifier
"""
Boiler-Plate/Feature-Engineering to get frame into a testable format
"""
used_downs = [1, 2, 3]
df = df[df['down'].isin(used_downs)]
valid_plays = ['Pass', 'Run', 'Sack']
df = df[df['PlayType'].isin(valid_plays)]
pass_plays = ['Pass', 'Sack']
df['is_pass'] = df['PlayType'].isin(pass_plays).astype('int')
df = df[['down', 'yrdline100', 'ScoreDiff', 'PosTeamScore', 'DefTeamScore', 'ydstogo', 'TimeSecs', 'ydsnet', 'is_pass', 'Drive']]
X, test = train_test_split(df, test_size=0.2)
y = X.pop('is_pass')
test_y = test.pop('is_pass')
parameters = {}
clf = RandomForestClassifier(n_jobs=-1, oob_score=True, n_estimators=100, min_samples_leaf=12, max_features=0.8)
clf.fit(X, y)
clf.score(test, test_y) | code |
325602/cell_2 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn import grid_search
from sklearn.preprocessing import LabelEncoder
df = pd.read_csv('../input/nflplaybyplay2015.csv', low_memory=False)
df.columns | code |
325602/cell_5 | [
"text_plain_output_1.png"
] | from sklearn.cross_validation import train_test_split
from sklearn.ensemble import RandomForestClassifier
"""
Boiler-Plate/Feature-Engineering to get frame into a testable format
"""
used_downs = [1, 2, 3]
df = df[df['down'].isin(used_downs)]
valid_plays = ['Pass', 'Run', 'Sack']
df = df[df['PlayType'].isin(valid_plays)]
pass_plays = ['Pass', 'Sack']
df['is_pass'] = df['PlayType'].isin(pass_plays).astype('int')
df = df[['down', 'yrdline100', 'ScoreDiff', 'PosTeamScore', 'DefTeamScore', 'ydstogo', 'TimeSecs', 'ydsnet', 'is_pass', 'Drive']]
X, test = train_test_split(df, test_size=0.2)
y = X.pop('is_pass')
test_y = test.pop('is_pass')
parameters = {}
clf = RandomForestClassifier(n_jobs=-1, oob_score=True, n_estimators=100, min_samples_leaf=12, max_features=0.8)
clf.fit(X, y) | code |
32068016/cell_9 | [
"text_html_output_2.png",
"text_html_output_1.png"
] | from statsmodels.tsa.api import ExponentialSmoothing, SimpleExpSmoothing, Holt
import numpy as np # linear algebra
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px
import plotly.graph_objs as go
from plotly.subplots import make_subplots
def RMSLE(pred, actual):
return np.sqrt(np.mean(np.power(np.log(pred + 1) - np.log(actual + 1), 2)))
pd.set_option('mode.chained_assignment', None)
test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
train['Province_State'].fillna('', inplace=True)
test['Province_State'].fillna('', inplace=True)
train['Date'] = pd.to_datetime(train['Date'])
test['Date'] = pd.to_datetime(test['Date'])
train = train.sort_values(['Country_Region', 'Province_State', 'Date'])
test = test.sort_values(['Country_Region', 'Province_State', 'Date'])
train.query('Country_Region == "US"')[0:20]
from statsmodels.tsa.api import ExponentialSmoothing, SimpleExpSmoothing, Holt
feature_day = [1, 20, 50, 100, 200, 500, 1000]
def CreateInput(data):
feature = []
for day in feature_day:
data.loc[:, 'Number day from ' + str(day) + ' case'] = 0
if train[(train['Country_Region'] == country) & (train['Province_State'] == province) & (train['ConfirmedCases'] < day)]['Date'].count() > 0:
fromday = train[(train['Country_Region'] == country) & (train['Province_State'] == province) & (train['ConfirmedCases'] < day)]['Date'].max()
else:
fromday = train[(train['Country_Region'] == country) & (train['Province_State'] == province)]['Date'].min()
for i in range(0, len(data)):
if data['Date'].iloc[i] > fromday:
day_denta = data['Date'].iloc[i] - fromday
data['Number day from ' + str(day) + ' case'].iloc[i] = day_denta.days
feature = feature + ['Number day from ' + str(day) + ' case']
return data[feature]
pred_data_all = pd.DataFrame()
for country in train['Country_Region'].unique():
for province in train[train['Country_Region'] == country]['Province_State'].unique():
df_train = train[(train['Country_Region'] == country) & (train['Province_State'] == province)]
df_test = test[(test['Country_Region'] == country) & (test['Province_State'] == province)]
X_train = CreateInput(df_train)
y_train_confirmed = df_train['ConfirmedCases'].ravel()
y_train_fatalities = df_train['Fatalities'].ravel()
X_pred = CreateInput(df_test)
for day in sorted(feature_day, reverse=True):
feature_use = 'Number day from ' + str(day) + ' case'
idx = X_train[X_train[feature_use] == 0].shape[0]
if X_train[X_train[feature_use] > 0].shape[0] >= 20:
break
adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1)
adjusted_y_train_confirmed = y_train_confirmed[idx:]
adjusted_y_train_fatalities = y_train_fatalities[idx:]
idx = X_pred[X_pred[feature_use] == 0].shape[0]
adjusted_X_pred = X_pred[idx:][feature_use].values.reshape(-1, 1)
pred_data = test[(test['Country_Region'] == country) & (test['Province_State'] == province)]
max_train_date = train[(train['Country_Region'] == country) & (train['Province_State'] == province)]['Date'].max()
min_test_date = pred_data['Date'].min()
model = ExponentialSmoothing(adjusted_y_train_confirmed, trend='additive').fit()
y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0])
y_train_confirmed = train[(train['Country_Region'] == country) & (train['Province_State'] == province) & (train['Date'] >= min_test_date)]['ConfirmedCases'].values
y_hat_confirmed = np.concatenate((y_train_confirmed, y_hat_confirmed), axis=0)
model = ExponentialSmoothing(adjusted_y_train_fatalities, trend='additive').fit()
y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0])
y_train_fatalities = train[(train['Country_Region'] == country) & (train['Province_State'] == province) & (train['Date'] >= min_test_date)]['Fatalities'].values
y_hat_fatalities = np.concatenate((y_train_fatalities, y_hat_fatalities), axis=0)
pred_data['ConfirmedCases_hat'] = y_hat_confirmed
pred_data['Fatalities_hat'] = y_hat_fatalities
pred_data_all = pred_data_all.append(pred_data)
df_val = pd.merge(pred_data_all, train[['Date', 'Country_Region', 'Province_State', 'ConfirmedCases', 'Fatalities']], on=['Date', 'Country_Region', 'Province_State'], how='left')
df_val.loc[df_val['Fatalities_hat'] < 0, 'Fatalities_hat'] = 0
df_val.loc[df_val['ConfirmedCases_hat'] < 0, 'ConfirmedCases_hat'] = 0
df_val_1 = df_val.copy() | code |
32068016/cell_6 | [
"text_html_output_2.png",
"text_html_output_3.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pd.set_option('mode.chained_assignment', None)
test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
train['Province_State'].fillna('', inplace=True)
test['Province_State'].fillna('', inplace=True)
train['Date'] = pd.to_datetime(train['Date'])
test['Date'] = pd.to_datetime(test['Date'])
train = train.sort_values(['Country_Region', 'Province_State', 'Date'])
test = test.sort_values(['Country_Region', 'Province_State', 'Date'])
train.head() | code |
32068016/cell_11 | [
"text_html_output_1.png"
] | from statsmodels.tsa.api import ExponentialSmoothing, SimpleExpSmoothing, Holt
import numpy as np # linear algebra
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px
import plotly.express as px
import plotly.graph_objs as go
from plotly.subplots import make_subplots
def RMSLE(pred, actual):
return np.sqrt(np.mean(np.power(np.log(pred + 1) - np.log(actual + 1), 2)))
pd.set_option('mode.chained_assignment', None)
test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
train['Province_State'].fillna('', inplace=True)
test['Province_State'].fillna('', inplace=True)
train['Date'] = pd.to_datetime(train['Date'])
test['Date'] = pd.to_datetime(test['Date'])
train = train.sort_values(['Country_Region', 'Province_State', 'Date'])
test = test.sort_values(['Country_Region', 'Province_State', 'Date'])
train.query('Country_Region == "US"')[0:20]
from statsmodels.tsa.api import ExponentialSmoothing, SimpleExpSmoothing, Holt
feature_day = [1, 20, 50, 100, 200, 500, 1000]
def CreateInput(data):
feature = []
for day in feature_day:
data.loc[:, 'Number day from ' + str(day) + ' case'] = 0
if train[(train['Country_Region'] == country) & (train['Province_State'] == province) & (train['ConfirmedCases'] < day)]['Date'].count() > 0:
fromday = train[(train['Country_Region'] == country) & (train['Province_State'] == province) & (train['ConfirmedCases'] < day)]['Date'].max()
else:
fromday = train[(train['Country_Region'] == country) & (train['Province_State'] == province)]['Date'].min()
for i in range(0, len(data)):
if data['Date'].iloc[i] > fromday:
day_denta = data['Date'].iloc[i] - fromday
data['Number day from ' + str(day) + ' case'].iloc[i] = day_denta.days
feature = feature + ['Number day from ' + str(day) + ' case']
return data[feature]
pred_data_all = pd.DataFrame()
for country in train['Country_Region'].unique():
for province in train[train['Country_Region'] == country]['Province_State'].unique():
df_train = train[(train['Country_Region'] == country) & (train['Province_State'] == province)]
df_test = test[(test['Country_Region'] == country) & (test['Province_State'] == province)]
X_train = CreateInput(df_train)
y_train_confirmed = df_train['ConfirmedCases'].ravel()
y_train_fatalities = df_train['Fatalities'].ravel()
X_pred = CreateInput(df_test)
for day in sorted(feature_day, reverse=True):
feature_use = 'Number day from ' + str(day) + ' case'
idx = X_train[X_train[feature_use] == 0].shape[0]
if X_train[X_train[feature_use] > 0].shape[0] >= 20:
break
adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1)
adjusted_y_train_confirmed = y_train_confirmed[idx:]
adjusted_y_train_fatalities = y_train_fatalities[idx:]
idx = X_pred[X_pred[feature_use] == 0].shape[0]
adjusted_X_pred = X_pred[idx:][feature_use].values.reshape(-1, 1)
pred_data = test[(test['Country_Region'] == country) & (test['Province_State'] == province)]
max_train_date = train[(train['Country_Region'] == country) & (train['Province_State'] == province)]['Date'].max()
min_test_date = pred_data['Date'].min()
model = ExponentialSmoothing(adjusted_y_train_confirmed, trend='additive').fit()
y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0])
y_train_confirmed = train[(train['Country_Region'] == country) & (train['Province_State'] == province) & (train['Date'] >= min_test_date)]['ConfirmedCases'].values
y_hat_confirmed = np.concatenate((y_train_confirmed, y_hat_confirmed), axis=0)
model = ExponentialSmoothing(adjusted_y_train_fatalities, trend='additive').fit()
y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0])
y_train_fatalities = train[(train['Country_Region'] == country) & (train['Province_State'] == province) & (train['Date'] >= min_test_date)]['Fatalities'].values
y_hat_fatalities = np.concatenate((y_train_fatalities, y_hat_fatalities), axis=0)
pred_data['ConfirmedCases_hat'] = y_hat_confirmed
pred_data['Fatalities_hat'] = y_hat_fatalities
pred_data_all = pred_data_all.append(pred_data)
df_val = pd.merge(pred_data_all, train[['Date', 'Country_Region', 'Province_State', 'ConfirmedCases', 'Fatalities']], on=['Date', 'Country_Region', 'Province_State'], how='left')
df_val.loc[df_val['Fatalities_hat'] < 0, 'Fatalities_hat'] = 0
df_val.loc[df_val['ConfirmedCases_hat'] < 0, 'ConfirmedCases_hat'] = 0
df_val_1 = df_val.copy()
country = 'Finland'
df_country = df_val[df_val['Country_Region'] == country].groupby(['Date', 'Country_Region']).sum().reset_index()
idx = df_country[(df_country['ConfirmedCases'].isnull() == False) & (df_country['ConfirmedCases'] > 0)].shape[0]
fig = px.line(df_country, x='Date', y='ConfirmedCases_hat', title='Total Cases of ' + df_country['Country_Region'].values[0])
fig = px.line(df_country, x='Date', y='Fatalities_hat', title='Total Fatalities of ' + df_country['Country_Region'].values[0])
country = 'US'
df_country = df_val[df_val['Country_Region'] == country].groupby(['Date', 'Country_Region']).sum().reset_index()
idx = df_country[(df_country['ConfirmedCases'].isnull() == False) & (df_country['ConfirmedCases'] > 0)].shape[0]
fig = px.line(df_country, x='Date', y='ConfirmedCases_hat', title='Total Cases of ' + df_country['Country_Region'].values[0])
fig.add_scatter(x=df_country['Date'][0:idx], y=df_country['ConfirmedCases'][0:idx], mode='lines', name='Actual', showlegend=False)
fig.show()
fig = px.line(df_country, x='Date', y='Fatalities_hat', title='Total Fatalities of ' + df_country['Country_Region'].values[0])
fig.add_scatter(x=df_country['Date'][0:idx], y=df_country['Fatalities'][0:idx], mode='lines', name='Actual', showlegend=False)
fig.show() | code |
32068016/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
32068016/cell_7 | [
"text_html_output_2.png",
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pd.set_option('mode.chained_assignment', None)
test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
train['Province_State'].fillna('', inplace=True)
test['Province_State'].fillna('', inplace=True)
train['Date'] = pd.to_datetime(train['Date'])
test['Date'] = pd.to_datetime(test['Date'])
train = train.sort_values(['Country_Region', 'Province_State', 'Date'])
test = test.sort_values(['Country_Region', 'Province_State', 'Date'])
train.query('Country_Region == "US"')[0:20] | code |
32068016/cell_10 | [
"text_html_output_1.png"
] | from statsmodels.tsa.api import ExponentialSmoothing, SimpleExpSmoothing, Holt
import numpy as np # linear algebra
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px
import plotly.express as px
import plotly.graph_objs as go
from plotly.subplots import make_subplots
def RMSLE(pred, actual):
return np.sqrt(np.mean(np.power(np.log(pred + 1) - np.log(actual + 1), 2)))
pd.set_option('mode.chained_assignment', None)
test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
train['Province_State'].fillna('', inplace=True)
test['Province_State'].fillna('', inplace=True)
train['Date'] = pd.to_datetime(train['Date'])
test['Date'] = pd.to_datetime(test['Date'])
train = train.sort_values(['Country_Region', 'Province_State', 'Date'])
test = test.sort_values(['Country_Region', 'Province_State', 'Date'])
train.query('Country_Region == "US"')[0:20]
from statsmodels.tsa.api import ExponentialSmoothing, SimpleExpSmoothing, Holt
feature_day = [1, 20, 50, 100, 200, 500, 1000]
def CreateInput(data):
feature = []
for day in feature_day:
data.loc[:, 'Number day from ' + str(day) + ' case'] = 0
if train[(train['Country_Region'] == country) & (train['Province_State'] == province) & (train['ConfirmedCases'] < day)]['Date'].count() > 0:
fromday = train[(train['Country_Region'] == country) & (train['Province_State'] == province) & (train['ConfirmedCases'] < day)]['Date'].max()
else:
fromday = train[(train['Country_Region'] == country) & (train['Province_State'] == province)]['Date'].min()
for i in range(0, len(data)):
if data['Date'].iloc[i] > fromday:
day_denta = data['Date'].iloc[i] - fromday
data['Number day from ' + str(day) + ' case'].iloc[i] = day_denta.days
feature = feature + ['Number day from ' + str(day) + ' case']
return data[feature]
pred_data_all = pd.DataFrame()
for country in train['Country_Region'].unique():
for province in train[train['Country_Region'] == country]['Province_State'].unique():
df_train = train[(train['Country_Region'] == country) & (train['Province_State'] == province)]
df_test = test[(test['Country_Region'] == country) & (test['Province_State'] == province)]
X_train = CreateInput(df_train)
y_train_confirmed = df_train['ConfirmedCases'].ravel()
y_train_fatalities = df_train['Fatalities'].ravel()
X_pred = CreateInput(df_test)
for day in sorted(feature_day, reverse=True):
feature_use = 'Number day from ' + str(day) + ' case'
idx = X_train[X_train[feature_use] == 0].shape[0]
if X_train[X_train[feature_use] > 0].shape[0] >= 20:
break
adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1)
adjusted_y_train_confirmed = y_train_confirmed[idx:]
adjusted_y_train_fatalities = y_train_fatalities[idx:]
idx = X_pred[X_pred[feature_use] == 0].shape[0]
adjusted_X_pred = X_pred[idx:][feature_use].values.reshape(-1, 1)
pred_data = test[(test['Country_Region'] == country) & (test['Province_State'] == province)]
max_train_date = train[(train['Country_Region'] == country) & (train['Province_State'] == province)]['Date'].max()
min_test_date = pred_data['Date'].min()
model = ExponentialSmoothing(adjusted_y_train_confirmed, trend='additive').fit()
y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0])
y_train_confirmed = train[(train['Country_Region'] == country) & (train['Province_State'] == province) & (train['Date'] >= min_test_date)]['ConfirmedCases'].values
y_hat_confirmed = np.concatenate((y_train_confirmed, y_hat_confirmed), axis=0)
model = ExponentialSmoothing(adjusted_y_train_fatalities, trend='additive').fit()
y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0])
y_train_fatalities = train[(train['Country_Region'] == country) & (train['Province_State'] == province) & (train['Date'] >= min_test_date)]['Fatalities'].values
y_hat_fatalities = np.concatenate((y_train_fatalities, y_hat_fatalities), axis=0)
pred_data['ConfirmedCases_hat'] = y_hat_confirmed
pred_data['Fatalities_hat'] = y_hat_fatalities
pred_data_all = pred_data_all.append(pred_data)
df_val = pd.merge(pred_data_all, train[['Date', 'Country_Region', 'Province_State', 'ConfirmedCases', 'Fatalities']], on=['Date', 'Country_Region', 'Province_State'], how='left')
df_val.loc[df_val['Fatalities_hat'] < 0, 'Fatalities_hat'] = 0
df_val.loc[df_val['ConfirmedCases_hat'] < 0, 'ConfirmedCases_hat'] = 0
df_val_1 = df_val.copy()
country = 'Finland'
df_country = df_val[df_val['Country_Region'] == country].groupby(['Date', 'Country_Region']).sum().reset_index()
idx = df_country[(df_country['ConfirmedCases'].isnull() == False) & (df_country['ConfirmedCases'] > 0)].shape[0]
fig = px.line(df_country, x='Date', y='ConfirmedCases_hat', title='Total Cases of ' + df_country['Country_Region'].values[0])
fig.add_scatter(x=df_country['Date'][0:idx], y=df_country['ConfirmedCases'][0:idx], mode='lines', name='Actual', showlegend=False)
fig.show()
fig = px.line(df_country, x='Date', y='Fatalities_hat', title='Total Fatalities of ' + df_country['Country_Region'].values[0])
fig.add_scatter(x=df_country['Date'][0:idx], y=df_country['Fatalities'][0:idx], mode='lines', name='Actual', showlegend=False)
fig.show() | code |
334762/cell_9 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date'])
act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date'])
act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date'])
act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train')
act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test')
positive_counts = pd.DataFrame({'positive_counts': act_train[act_train['outcome'] == 1].groupby('people_id', as_index=True).size()}).reset_index()
negative_counts = pd.DataFrame({'negative_counts': act_train[act_train['outcome'] == 0].groupby('people_id', as_index=True).size()}).reset_index()
hstry = positive_counts.merge(negative_counts, on='people_id', how='outer')
hstry['positive_counts'] = hstry['positive_counts'].fillna('0').astype(np.int64)
hstry['negative_counts'] = hstry['negative_counts'].fillna('0').astype(np.int64)
hstry['profit'] = hstry['positive_counts'] - hstry['negative_counts']
hstry.sort_values(by='positive_counts', ascending=False).head(10) | code |
334762/cell_25 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, AdaBoostRegressor
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date'])
act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date'])
act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date'])
act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train')
act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test')
positive_counts = pd.DataFrame({'positive_counts': act_train[act_train['outcome'] == 1].groupby('people_id', as_index=True).size()}).reset_index()
negative_counts = pd.DataFrame({'negative_counts': act_train[act_train['outcome'] == 0].groupby('people_id', as_index=True).size()}).reset_index()
hstry = positive_counts.merge(negative_counts, on='people_id', how='outer')
hstry['positive_counts'] = hstry['positive_counts'].fillna('0').astype(np.int64)
hstry['negative_counts'] = hstry['negative_counts'].fillna('0').astype(np.int64)
hstry['profit'] = hstry['positive_counts'] - hstry['negative_counts']
hstry.sort_values(by='positive_counts', ascending=False).head(10)
hstry.sort_values(by='negative_counts', ascending=False).head(10)
hstry['prof_label'] = pd.to_numeric(hstry['profit'] < -5).astype(int) * 4 + pd.to_numeric(hstry['profit'].isin(range(-5, 1))).astype(int) * 3 + pd.to_numeric(hstry['profit'].isin(range(1, 6))).astype(int) * 2 + pd.to_numeric(hstry['profit'] > 5).astype(int) * 1
people2 = pd.merge(people, hstry, on='people_id', how='inner')
people2['positive_counts'] = people2['positive_counts'].fillna('0').astype(np.int64)
people2['negative_counts'] = people2['negative_counts'].fillna('0').astype(np.int64)
people2['profit'] = people2['profit'].fillna('0').astype(np.int64)
xfeats = list(people2.columns)
xfeats.remove('people_id')
xfeats.remove('profit')
xfeats.remove('prof_label')
xfeats.remove('positive_counts')
xfeats.remove('negative_counts')
X, Y = (people2[xfeats], people2['prof_label'])
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
clf = RandomForestRegressor(n_estimators=50)
clf.fit(X_train, y_train)
sortedfeats = sorted(zip(xfeats, clf.feature_importances_), key=lambda x: x[1])
newfeats = []
for i in range(1, 6):
newfeats.append(sortedfeats[len(sortedfeats) - i])
newfeats = [x[0] for x in newfeats]
print(newfeats) | code |
334762/cell_4 | [
"text_plain_output_5.png",
"text_plain_output_9.png",
"application_vnd.jupyter.stderr_output_4.png",
"text_plain_output_10.png",
"text_plain_output_6.png",
"text_plain_output_3.png",
"text_plain_output_7.png",
"text_plain_output_8.png",
"text_plain_output_2.png",
"text_plain_output_1.png",
"text_plain_output_11.png",
"text_plain_output_12.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date'])
act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date'])
act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date'])
act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train')
act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test')
plt.legend()
plt.show() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.