path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
16136283/cell_14 | [
"text_plain_output_1.png"
] | X_train.shape | code |
16136283/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/Amazon_Unlocked_Mobile.csv')
df.dropna(inplace=True)
df = df[df['Rating'] != 3]
df.head() | code |
16136283/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/Amazon_Unlocked_Mobile.csv')
df.describe() | code |
2016758/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from keras.applications.vgg16 import VGG16
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.layers import Input, Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
np.random.seed(7) | code |
2016758/cell_5 | [
"text_plain_output_1.png"
] | from keras.applications.vgg16 import VGG16
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Input, Dense, Dropout, Flatten
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from keras.applications.vgg16 import VGG16
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.layers import Input, Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
np.random.seed(7)
def make_df(path, mode):
"""
params
--------
path(str): path to json
mode(str): "train" or "test"
outputs
--------
X(np.array): list of images shape=(None, 75, 75, 3)
Y(np.array): list of labels shape=(None,)
df(pd.DataFrame): data frame from json
"""
df = pd.read_json(path)
df.inc_angle = df.inc_angle.replace('na', 0)
X = _get_scaled_imgs(df)
if mode == 'test':
return (X, df)
Y = np.array(df['is_iceberg'])
idx_tr = np.where(df.inc_angle > 0)
X = X[idx_tr[0]]
Y = Y[idx_tr[0], ...]
return (X, Y)
def _get_scaled_imgs(df):
imgs = []
for i, row in df.iterrows():
band_1 = np.array(row['band_1']).reshape(75, 75)
band_2 = np.array(row['band_2']).reshape(75, 75)
band_3 = band_1 + band_2
a = (band_1 - band_1.mean()) / (band_1.max() - band_1.min())
b = (band_2 - band_2.mean()) / (band_2.max() - band_2.min())
c = (band_3 - band_3.mean()) / (band_3.max() - band_3.min())
imgs.append(np.dstack((a, b, c)))
return np.array(imgs)
def SmallCNN():
model = Sequential()
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', input_shape=(75, 75, 3)))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.3))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))
return model
def Vgg16():
input_tensor = Input(shape=(75, 75, 3))
vgg16 = VGG16(include_top=False, weights='imagenet', input_tensor=input_tensor)
top_model = Sequential()
top_model.add(Flatten(input_shape=vgg16.output_shape[1:]))
top_model.add(Dense(512, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(1, activation='sigmoid'))
model = Model(input=vgg16.input, output=top_model(vgg16.output))
for layer in model.layers[:13]:
layer.trainable = False
return model
if __name__ == '__main__':
x, y = make_df('../input/train.json', 'train')
xtr, xval, ytr, yval = train_test_split(x, y, test_size=0.25, random_state=7)
model = SmallCNN()
optimizer = Adam(lr=0.001, decay=0.0)
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
earlyStopping = EarlyStopping(monitor='val_loss', patience=20, verbose=0, mode='min')
ckpt = ModelCheckpoint('.model.hdf5', save_best_only=True, monitor='val_loss', mode='min')
reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=7, verbose=1, epsilon=0.0001, mode='min')
gen = ImageDataGenerator(horizontal_flip=True, vertical_flip=True, width_shift_range=0, height_shift_range=0, channel_shift_range=0, zoom_range=0.2, rotation_range=10)
gen.fit(xtr)
model.fit_generator(gen.flow(xtr, ytr, batch_size=32), steps_per_epoch=len(xtr), epochs=1, callbacks=[earlyStopping, ckpt, reduce_lr_loss], validation_data=(xval, yval))
model.load_weights(filepath='.model.hdf5')
score = model.evaluate(xtr, ytr, verbose=1)
print('Train score:', score[0], 'Train accuracy:', score[1])
xtest, df_test = make_df('../input/test.json', 'test')
pred_test = model.predict(xtest)
pred_test = pred_test.reshape(pred_test.shape[0])
submission = pd.DataFrame({'id': df_test['id'], 'is_iceberg': pred_test})
submission.to_csv('submission.csv', index=False) | code |
32062145/cell_21 | [
"text_plain_output_1.png"
] | from six.moves import xrange
import collections
import math
import numpy as np
import os
import random
import tensorflow as tf
import tensorflow.compat.v1 as tf
folder_dir = '/kaggle/input/108-2-ntut-dl-app-hw2'
filename = 'tag_list.txt'
vocabulary_size = 990
batch_size = 128
embedding_size = 64
skip_window = 1
num_skips = 2
valid_size = 32
valid_window = 200
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
file_path = os.path.join(folder_dir, filename)
with open(file_path, 'r', encoding='utf-8') as f:
words = f.read().split()
word_count = [['UNK', -1]]
word_count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in word_count:
dictionary[word] = len(dictionary)
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0
unk_count += 1
data.append(index)
word_count[0][1] = unk_count
data_index = 0
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=batch_size, dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return (batch, labels)
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
with tf.variable_scope('EMBEDDING'):
with tf.device('/cpu:0'):
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
with tf.variable_scope('NCE_WEIGHT'):
nce_weights = tf.Variable(tf.truncated_normal([vocabulary_size, embedding_size], stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
with tf.device('/cpu:0'):
num_sampled = 64
loss = tf.reduce_mean(tf.nn.nce_loss(weights=nce_weights, biases=nce_biases, labels=train_labels, inputs=embed, num_sampled=num_sampled, num_classes=vocabulary_size))
optm = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)
siml = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b=True)
sess = tf.Session()
sess.run(tf.initialize_all_variables())
average_loss = 0
num_steps = 10001
for iter in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
_, loss_val = sess.run([optm, loss], feed_dict=feed_dict)
average_loss += loss_val
if iter % 2000 == 0:
average_loss /= 2000
if iter % 10000 == 0:
siml_val = sess.run(siml)
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 6
nearest = (-siml_val[i, :]).argsort()[1:top_k + 1]
log_str = "Nearest to '%s':" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s '%s'," % (log_str, close_word)
final_embeddings = sess.run(normalized_embeddings)
np.savez(filename[0:-4] + '_word2vec_' + str(embedding_size), word_count=word_count, dictionary=dictionary, reverse_dictionary=reverse_dictionary, word_embeddings=final_embeddings)
K = 10
target = 'drunk'
scores = final_embeddings[dictionary[target]].dot(final_embeddings.transpose())
scores = scores / np.linalg.norm(final_embeddings, axis=1)
k_neighbors = (-scores).argsort()[0:K + 1]
print('The nearest neighbors of', target, 'are:')
for k in k_neighbors:
print(reverse_dictionary[k], ' ', scores[k]) | code |
32062145/cell_23 | [
"text_plain_output_1.png"
] | from IPython.display import FileLink
from IPython.display import FileLink
from IPython.display import FileLink
FileLink('meta.tsv') | code |
32062145/cell_26 | [
"text_html_output_1.png"
] | from IPython.display import FileLink
from IPython.display import FileLink
from IPython.display import FileLink
folder_dir = '/kaggle/input/108-2-ntut-dl-app-hw2'
filename = 'tag_list.txt'
vocabulary_size = 990
batch_size = 128
embedding_size = 64
skip_window = 1
num_skips = 2
from IPython.display import FileLink
FileLink(filename[0:-4] + '_word2vec_' + str(embedding_size) + '.npz') | code |
32062145/cell_11 | [
"text_plain_output_1.png"
] | import collections
import os
folder_dir = '/kaggle/input/108-2-ntut-dl-app-hw2'
filename = 'tag_list.txt'
vocabulary_size = 990
file_path = os.path.join(folder_dir, filename)
with open(file_path, 'r', encoding='utf-8') as f:
words = f.read().split()
word_count = [['UNK', -1]]
word_count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
print('Most common words (+UNK) are: %s' % word_count[:10]) | code |
32062145/cell_7 | [
"text_html_output_1.png"
] | import os
folder_dir = '/kaggle/input/108-2-ntut-dl-app-hw2'
filename = 'tag_list.txt'
vocabulary_size = 990
file_path = os.path.join(folder_dir, filename)
with open(file_path, 'r', encoding='utf-8') as f:
words = f.read().split()
words | code |
32062145/cell_18 | [
"text_plain_output_1.png"
] | from six.moves import xrange
from sklearn.manifold import TSNE
import collections
import math
import matplotlib.pyplot as plt
import numpy as np
import os
import random
import tensorflow as tf
import tensorflow.compat.v1 as tf
folder_dir = '/kaggle/input/108-2-ntut-dl-app-hw2'
filename = 'tag_list.txt'
vocabulary_size = 990
batch_size = 128
embedding_size = 64
skip_window = 1
num_skips = 2
valid_size = 32
valid_window = 200
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
file_path = os.path.join(folder_dir, filename)
with open(file_path, 'r', encoding='utf-8') as f:
words = f.read().split()
word_count = [['UNK', -1]]
word_count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in word_count:
dictionary[word] = len(dictionary)
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0
unk_count += 1
data.append(index)
word_count[0][1] = unk_count
data_index = 0
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=batch_size, dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return (batch, labels)
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
with tf.variable_scope('EMBEDDING'):
with tf.device('/cpu:0'):
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
with tf.variable_scope('NCE_WEIGHT'):
nce_weights = tf.Variable(tf.truncated_normal([vocabulary_size, embedding_size], stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
with tf.device('/cpu:0'):
num_sampled = 64
loss = tf.reduce_mean(tf.nn.nce_loss(weights=nce_weights, biases=nce_biases, labels=train_labels, inputs=embed, num_sampled=num_sampled, num_classes=vocabulary_size))
optm = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)
siml = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b=True)
sess = tf.Session()
sess.run(tf.initialize_all_variables())
average_loss = 0
num_steps = 10001
for iter in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
_, loss_val = sess.run([optm, loss], feed_dict=feed_dict)
average_loss += loss_val
if iter % 2000 == 0:
average_loss /= 2000
if iter % 10000 == 0:
siml_val = sess.run(siml)
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 6
nearest = (-siml_val[i, :]).argsort()[1:top_k + 1]
log_str = "Nearest to '%s':" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s '%s'," % (log_str, close_word)
final_embeddings = sess.run(normalized_embeddings)
num_points = 100
tsne = TSNE(perplexity=10, n_components=2, init='pca', n_iter=5000)
two_d_embeddings = tsne.fit_transform(final_embeddings[1:num_points + 1, :])
def plot(embeddings, labels):
assert embeddings.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(15, 15))
for i, label in enumerate(labels):
x, y = embeddings[i, :]
plt.scatter(x, y, color=['blue'])
plt.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom')
plt.show()
words = [reverse_dictionary[i] for i in range(1, num_points + 1)]
plot(two_d_embeddings, words) | code |
32062145/cell_8 | [
"text_html_output_1.png"
] | import collections
import os
folder_dir = '/kaggle/input/108-2-ntut-dl-app-hw2'
filename = 'tag_list.txt'
vocabulary_size = 990
file_path = os.path.join(folder_dir, filename)
with open(file_path, 'r', encoding='utf-8') as f:
words = f.read().split()
word_count = [['UNK', -1]]
word_count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
print('%s' % word_count[0:10]) | code |
32062145/cell_16 | [
"text_plain_output_1.png"
] | import math
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf
folder_dir = '/kaggle/input/108-2-ntut-dl-app-hw2'
filename = 'tag_list.txt'
vocabulary_size = 990
batch_size = 128
embedding_size = 64
skip_window = 1
num_skips = 2
valid_size = 32
valid_window = 200
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
with tf.variable_scope('EMBEDDING'):
with tf.device('/cpu:0'):
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
with tf.variable_scope('NCE_WEIGHT'):
nce_weights = tf.Variable(tf.truncated_normal([vocabulary_size, embedding_size], stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
with tf.device('/cpu:0'):
num_sampled = 64
loss = tf.reduce_mean(tf.nn.nce_loss(weights=nce_weights, biases=nce_biases, labels=train_labels, inputs=embed, num_sampled=num_sampled, num_classes=vocabulary_size))
optm = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)
siml = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b=True)
print(normalized_embeddings.shape) | code |
32062145/cell_17 | [
"text_plain_output_1.png"
] | from six.moves import xrange
import collections
import math
import numpy as np
import os
import random
import tensorflow as tf
import tensorflow.compat.v1 as tf
folder_dir = '/kaggle/input/108-2-ntut-dl-app-hw2'
filename = 'tag_list.txt'
vocabulary_size = 990
batch_size = 128
embedding_size = 64
skip_window = 1
num_skips = 2
valid_size = 32
valid_window = 200
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
file_path = os.path.join(folder_dir, filename)
with open(file_path, 'r', encoding='utf-8') as f:
words = f.read().split()
word_count = [['UNK', -1]]
word_count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in word_count:
dictionary[word] = len(dictionary)
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0
unk_count += 1
data.append(index)
word_count[0][1] = unk_count
data_index = 0
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=batch_size, dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return (batch, labels)
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
with tf.variable_scope('EMBEDDING'):
with tf.device('/cpu:0'):
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
with tf.variable_scope('NCE_WEIGHT'):
nce_weights = tf.Variable(tf.truncated_normal([vocabulary_size, embedding_size], stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
with tf.device('/cpu:0'):
num_sampled = 64
loss = tf.reduce_mean(tf.nn.nce_loss(weights=nce_weights, biases=nce_biases, labels=train_labels, inputs=embed, num_sampled=num_sampled, num_classes=vocabulary_size))
optm = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)
siml = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b=True)
sess = tf.Session()
sess.run(tf.initialize_all_variables())
average_loss = 0
num_steps = 10001
for iter in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
_, loss_val = sess.run([optm, loss], feed_dict=feed_dict)
average_loss += loss_val
if iter % 2000 == 0:
average_loss /= 2000
print('Average loss at step %d is %.3f' % (iter, average_loss))
if iter % 10000 == 0:
siml_val = sess.run(siml)
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 6
nearest = (-siml_val[i, :]).argsort()[1:top_k + 1]
log_str = "Nearest to '%s':" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s '%s'," % (log_str, close_word)
print(log_str)
final_embeddings = sess.run(normalized_embeddings) | code |
32062145/cell_22 | [
"image_output_1.png"
] | from IPython.display import FileLink
from six.moves import xrange
import collections
import math
import numpy as np
import os
import random
import tensorflow as tf
import tensorflow.compat.v1 as tf
folder_dir = '/kaggle/input/108-2-ntut-dl-app-hw2'
filename = 'tag_list.txt'
vocabulary_size = 990
batch_size = 128
embedding_size = 64
skip_window = 1
num_skips = 2
valid_size = 32
valid_window = 200
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
file_path = os.path.join(folder_dir, filename)
with open(file_path, 'r', encoding='utf-8') as f:
words = f.read().split()
word_count = [['UNK', -1]]
word_count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in word_count:
dictionary[word] = len(dictionary)
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0
unk_count += 1
data.append(index)
word_count[0][1] = unk_count
data_index = 0
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=batch_size, dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return (batch, labels)
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
with tf.variable_scope('EMBEDDING'):
with tf.device('/cpu:0'):
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
with tf.variable_scope('NCE_WEIGHT'):
nce_weights = tf.Variable(tf.truncated_normal([vocabulary_size, embedding_size], stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
with tf.device('/cpu:0'):
num_sampled = 64
loss = tf.reduce_mean(tf.nn.nce_loss(weights=nce_weights, biases=nce_biases, labels=train_labels, inputs=embed, num_sampled=num_sampled, num_classes=vocabulary_size))
optm = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)
siml = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b=True)
sess = tf.Session()
sess.run(tf.initialize_all_variables())
average_loss = 0
num_steps = 10001
for iter in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
_, loss_val = sess.run([optm, loss], feed_dict=feed_dict)
average_loss += loss_val
if iter % 2000 == 0:
average_loss /= 2000
if iter % 10000 == 0:
siml_val = sess.run(siml)
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 6
nearest = (-siml_val[i, :]).argsort()[1:top_k + 1]
log_str = "Nearest to '%s':" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s '%s'," % (log_str, close_word)
final_embeddings = sess.run(normalized_embeddings)
np.savez(filename[0:-4] + '_word2vec_' + str(embedding_size), word_count=word_count, dictionary=dictionary, reverse_dictionary=reverse_dictionary, word_embeddings=final_embeddings)
K = 10
target = 'drunk'
scores = final_embeddings[dictionary[target]].dot(final_embeddings.transpose())
scores = scores / np.linalg.norm(final_embeddings, axis=1)
k_neighbors = (-scores).argsort()[0:K + 1]
out_v = open('vecs.tsv', 'w', encoding='utf-8')
out_m = open('meta.tsv', 'w', encoding='utf-8')
for num, word in enumerate(dictionary):
vec = final_embeddings[num]
out_m.write(word + '\n')
out_v.write('\t'.join([str(x) for x in vec]) + '\n')
out_v.close()
out_m.close()
from IPython.display import FileLink
FileLink('vecs.tsv') | code |
32062145/cell_12 | [
"text_plain_output_1.png"
] | import collections
import os
folder_dir = '/kaggle/input/108-2-ntut-dl-app-hw2'
filename = 'tag_list.txt'
vocabulary_size = 990
file_path = os.path.join(folder_dir, filename)
with open(file_path, 'r', encoding='utf-8') as f:
words = f.read().split()
word_count = [['UNK', -1]]
word_count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in word_count:
dictionary[word] = len(dictionary)
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0
unk_count += 1
data.append(index)
word_count[0][1] = unk_count
print('Sample data corresponds to\n__________________')
for i in range(10):
print('%d->%s' % (data[i], reverse_dictionary[data[i]])) | code |
32071559/cell_2 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
import pandas as pd
import numpy as np
from keras import backend as K
from keras.models import Model
from keras.callbacks import EarlyStopping
from keras.layers import Input, LSTM, Dense, BatchNormalization, Lambda, Flatten, Reshape
from sklearn.preprocessing import MinMaxScaler
from keras.layers.normalization import BatchNormalization
from keras import backend as K
from keras.layers import Input, Dense, Flatten, Dropout, Lambda, TimeDistributed, Permute, RepeatVector, LSTM, GRU, Add, Concatenate, Reshape, Multiply, merge, Dot, Activation, concatenate, dot, Subtract
from keras.initializers import Identity
from keras.activations import sigmoid
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import Conv1D
from keras.models import Sequential, Model
from keras.optimizers import Adam, SGD, RMSprop
from sklearn.neighbors import KernelDensity
from scipy.stats import ks_2samp, trim_mean, shapiro, normaltest, anderson
from keras.losses import mse, binary_crossentropy, sparse_categorical_crossentropy
from keras import backend as K
import matplotlib.pyplot as plt | code |
32071559/cell_11 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from keras.callbacks import EarlyStopping
from keras.initializers import Identity
from keras.layers import Input, Dense, Flatten, Dropout, Lambda, \
from keras.layers import Input, LSTM, Dense, BatchNormalization, Lambda, Flatten, Reshape
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.models import Sequential, Model
from keras.optimizers import Adam, SGD, RMSprop
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
def read_data():
data = pd.read_csv('../input/datatrain4/train4.csv')
data = data.values
return data
data = read_data()
def handle_country_text(data):
stats = list(np.unique(data[:, 2]))
for idx, d in enumerate(data):
country = d[2]
id = stats.index(country)
d[2] = id
return (stats, data)
def create_sequences(data, stats):
sequences = []
to_compute = []
for idx, s in enumerate(stats):
seq = data[data[:, 2] == idx]
if pd.isnull(seq[0, 1]):
seq = np.delete(seq, [1], 1)
else:
to_compute.append(seq)
stats_p = list(np.unique(seq[:, 1]))
for idx2, s2 in enumerate(stats_p):
seqs2 = seq[seq[:, 1] == s2]
seqs2 = np.delete(seqs2, [0, 1, 3], 1)
for idx, value in enumerate(reversed(seqs2[:, 1:])):
if idx + 1 < len(seqs2):
cases = value[0] - seqs2[-(idx + 2), 1]
deaths = value[1] - seqs2[-(idx + 2), 2]
seqs2[-(idx + 1), 1] = cases
seqs2[-(idx + 1), 2] = deaths
offset = float(idx2) / 10
seqs2[:, 0] = seqs2[:, 0] + offset
sequences.append(seqs2)
continue
seq = np.delete(seq, [0, 2], 1)
for idx, value in enumerate(reversed(seq[:, 1:])):
if idx + 1 < len(seq):
cases = value[0] - seq[-(idx + 2), 1]
deaths = value[1] - seq[-(idx + 2), 2]
seq[-(idx + 1), 1] = cases
seq[-(idx + 1), 2] = deaths
sequences.append(seq)
return np.array(sequences)
sequences = create_sequences(data, stats)
sequences = np.array(sequences)
sequences_train = np.delete(sequences, [0], 2)
sequences_train = np.array(sequences_train)
def dain(input):
n_features = 2
mean = Lambda(lambda x: K.mean(input, axis=1))(input)
adaptive_avg = Dense(n_features, kernel_initializer=Identity(gain=1.0), bias=False)(mean)
adaptive_avg = Reshape((1, n_features))(adaptive_avg)
X = Lambda(lambda inputs: inputs[0] - inputs[1])([input, adaptive_avg])
std = Lambda(lambda x: K.mean(x ** 2, axis=1))(X)
std = Lambda(lambda x: K.sqrt(x + 1e-08))(std)
adaptive_std = Dense(n_features, bias=False)(std)
adaptive_std = Reshape((1, n_features))(adaptive_std)
X = Lambda(lambda inputs: inputs[0] / inputs[1])([X, adaptive_std])
avg = Lambda(lambda x: K.mean(x, axis=1))(X)
gate = Dense(n_features, activation='sigmoid', kernel_initializer=Identity(gain=1.0), bias=False)(avg)
gate = Reshape((1, n_features))(gate)
X = Lambda(lambda inputs: inputs[0] * inputs[1])([X, gate])
return (X, adaptive_avg, adaptive_std)
def build_generator(encoder_input_shape, missing_len, verbose=True):
learning_rate = 0.0002
optimizer = Adam(lr=learning_rate)
generator_decoder_type = 'seq2seq'
encoder_inputs = Input(shape=encoder_input_shape)
hidden, avg, std = dain(encoder_inputs)
decoder_outputs = []
encoder = LSTM(128, return_sequences=True, return_state=True)
lstm_outputs, state_h, state_c = encoder(hidden)
if generator_decoder_type == 'seq2seq':
states = [state_h, state_c]
decoder_lstm = LSTM(128, return_sequences=True, return_state=True)
decoder_cases = Dense(1, activation='relu')
decoder_deaths = Dense(1, activation='relu')
all_outputs_c = []
all_outputs_d = []
inputs = lstm_outputs
for idx in range(missing_len):
outputs, state_h, state_c = decoder_lstm(inputs, initial_state=states)
inputs = outputs
outputs = BatchNormalization()(outputs)
outputs = Flatten()(outputs)
outputs_cases = decoder_cases(outputs)
outputs_deaths = decoder_deaths(outputs)
states = [state_h, state_c]
std_c = Lambda(lambda inputs: inputs[:, 0, 0])(std)
avg_c = Lambda(lambda inputs: inputs[:, 0, 0])(avg)
outputs_cases = Multiply()([outputs_cases, std_c])
outputs_cases = Add()([outputs_cases, avg_c])
std_d = Lambda(lambda inputs: inputs[:, 0, 1])(std)
avg_d = Lambda(lambda inputs: inputs[:, 0, 1])(avg)
outputs_deaths = Multiply()([outputs_deaths, std_d])
outputs_deaths = Add()([outputs_deaths, avg_d])
all_outputs_c.append(outputs_cases)
all_outputs_d.append(outputs_deaths)
decoder_outputs_c = Lambda(lambda x: x)(outputs_cases)
decoder_outputs_d = Lambda(lambda x: x)(outputs_deaths)
model = Model(inputs=encoder_inputs, outputs=[decoder_outputs_c, decoder_outputs_d])
model.compile(loss='mean_squared_logarithmic_error', optimizer=optimizer)
return model
given = 80
missing = 1
total_missing = 33
model = build_generator(sequences_train[:, :given, :].shape[1:], missing)
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=30)
history = model.fit(x=sequences_train[:, :given, :], y=[sequences_train[:, given:, 0], sequences_train[:, given:, 1]], epochs=1, validation_split=0.2, shuffle=True, callbacks=[es])
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.savefig('plots/losses.png')
plt.close() | code |
32071559/cell_16 | [
"text_plain_output_1.png"
] | from keras.callbacks import EarlyStopping
from keras.initializers import Identity
from keras.layers import Input, Dense, Flatten, Dropout, Lambda, \
from keras.layers import Input, LSTM, Dense, BatchNormalization, Lambda, Flatten, Reshape
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.models import Sequential, Model
from keras.optimizers import Adam, SGD, RMSprop
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
def read_data():
data = pd.read_csv('../input/datatrain4/train4.csv')
data = data.values
return data
data = read_data()
def handle_country_text(data):
stats = list(np.unique(data[:, 2]))
for idx, d in enumerate(data):
country = d[2]
id = stats.index(country)
d[2] = id
return (stats, data)
def create_sequences(data, stats):
sequences = []
to_compute = []
for idx, s in enumerate(stats):
seq = data[data[:, 2] == idx]
if pd.isnull(seq[0, 1]):
seq = np.delete(seq, [1], 1)
else:
to_compute.append(seq)
stats_p = list(np.unique(seq[:, 1]))
for idx2, s2 in enumerate(stats_p):
seqs2 = seq[seq[:, 1] == s2]
seqs2 = np.delete(seqs2, [0, 1, 3], 1)
for idx, value in enumerate(reversed(seqs2[:, 1:])):
if idx + 1 < len(seqs2):
cases = value[0] - seqs2[-(idx + 2), 1]
deaths = value[1] - seqs2[-(idx + 2), 2]
seqs2[-(idx + 1), 1] = cases
seqs2[-(idx + 1), 2] = deaths
offset = float(idx2) / 10
seqs2[:, 0] = seqs2[:, 0] + offset
sequences.append(seqs2)
continue
seq = np.delete(seq, [0, 2], 1)
for idx, value in enumerate(reversed(seq[:, 1:])):
if idx + 1 < len(seq):
cases = value[0] - seq[-(idx + 2), 1]
deaths = value[1] - seq[-(idx + 2), 2]
seq[-(idx + 1), 1] = cases
seq[-(idx + 1), 2] = deaths
sequences.append(seq)
return np.array(sequences)
sequences = create_sequences(data, stats)
sequences = np.array(sequences)
sequences_train = np.delete(sequences, [0], 2)
sequences_train = np.array(sequences_train)
def dain(input):
n_features = 2
mean = Lambda(lambda x: K.mean(input, axis=1))(input)
adaptive_avg = Dense(n_features, kernel_initializer=Identity(gain=1.0), bias=False)(mean)
adaptive_avg = Reshape((1, n_features))(adaptive_avg)
X = Lambda(lambda inputs: inputs[0] - inputs[1])([input, adaptive_avg])
std = Lambda(lambda x: K.mean(x ** 2, axis=1))(X)
std = Lambda(lambda x: K.sqrt(x + 1e-08))(std)
adaptive_std = Dense(n_features, bias=False)(std)
adaptive_std = Reshape((1, n_features))(adaptive_std)
X = Lambda(lambda inputs: inputs[0] / inputs[1])([X, adaptive_std])
avg = Lambda(lambda x: K.mean(x, axis=1))(X)
gate = Dense(n_features, activation='sigmoid', kernel_initializer=Identity(gain=1.0), bias=False)(avg)
gate = Reshape((1, n_features))(gate)
X = Lambda(lambda inputs: inputs[0] * inputs[1])([X, gate])
return (X, adaptive_avg, adaptive_std)
def build_generator(encoder_input_shape, missing_len, verbose=True):
learning_rate = 0.0002
optimizer = Adam(lr=learning_rate)
generator_decoder_type = 'seq2seq'
encoder_inputs = Input(shape=encoder_input_shape)
hidden, avg, std = dain(encoder_inputs)
decoder_outputs = []
encoder = LSTM(128, return_sequences=True, return_state=True)
lstm_outputs, state_h, state_c = encoder(hidden)
if generator_decoder_type == 'seq2seq':
states = [state_h, state_c]
decoder_lstm = LSTM(128, return_sequences=True, return_state=True)
decoder_cases = Dense(1, activation='relu')
decoder_deaths = Dense(1, activation='relu')
all_outputs_c = []
all_outputs_d = []
inputs = lstm_outputs
for idx in range(missing_len):
outputs, state_h, state_c = decoder_lstm(inputs, initial_state=states)
inputs = outputs
outputs = BatchNormalization()(outputs)
outputs = Flatten()(outputs)
outputs_cases = decoder_cases(outputs)
outputs_deaths = decoder_deaths(outputs)
states = [state_h, state_c]
std_c = Lambda(lambda inputs: inputs[:, 0, 0])(std)
avg_c = Lambda(lambda inputs: inputs[:, 0, 0])(avg)
outputs_cases = Multiply()([outputs_cases, std_c])
outputs_cases = Add()([outputs_cases, avg_c])
std_d = Lambda(lambda inputs: inputs[:, 0, 1])(std)
avg_d = Lambda(lambda inputs: inputs[:, 0, 1])(avg)
outputs_deaths = Multiply()([outputs_deaths, std_d])
outputs_deaths = Add()([outputs_deaths, avg_d])
all_outputs_c.append(outputs_cases)
all_outputs_d.append(outputs_deaths)
decoder_outputs_c = Lambda(lambda x: x)(outputs_cases)
decoder_outputs_d = Lambda(lambda x: x)(outputs_deaths)
model = Model(inputs=encoder_inputs, outputs=[decoder_outputs_c, decoder_outputs_d])
model.compile(loss='mean_squared_logarithmic_error', optimizer=optimizer)
return model
given = 80
missing = 1
total_missing = 33
model = build_generator(sequences_train[:, :given, :].shape[1:], missing)
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=30)
history = model.fit(x=sequences_train[:, :given, :], y=[sequences_train[:, given:, 0], sequences_train[:, given:, 1]], epochs=1, validation_split=0.2, shuffle=True, callbacks=[es])
plt.close()
def backtest2(sequences, model, given, missing):
sequences_test = sequences[:, -given:]
pred_d = []
pred_c = []
for i in range(0, missing):
predictions = model.predict(sequences_test[:, :])
predictions[0][predictions[0] < 0] = 0
predictions[1][predictions[1] < 0] = 0
predictions[1] = np.around(predictions[1].astype(np.double))
predictions[0] = np.around(predictions[0].astype(np.double))
pred = np.concatenate([np.expand_dims(predictions[0], axis=2), np.expand_dims(predictions[1], axis=2)], axis=2)
pred_c.append(pred)
pred_d.append(predictions[1])
sequences_test = np.concatenate([sequences_test[:, 1:], pred], axis=1)
predictions = np.array(pred_c[0])
for i in range(1, len(pred_c)):
predictions = np.concatenate([predictions, pred_c[i]], axis=1)
seq_cases = sequences[:, :, 0]
seq_death = sequences[:, :, 1]
death = np.cumsum(seq_death, axis=1)
cases = np.cumsum(seq_cases, axis=1)
cases = np.around(cases.astype(np.double))
cases[cases < 0] = 0
cases_csv = np.expand_dims(cases[:, -1], axis=1)
predictions[0] = np.around(predictions[0].astype(np.double))
cases_csv = np.concatenate((cases_csv, predictions[:, :, 0]), axis=1)
death = np.around(death.astype(np.double))
death[death < 0] = 0
death_csv = np.expand_dims(death[:, -1], axis=1)
predictions[1] = np.around(predictions[1].astype(np.double))
death_csv = np.concatenate((death_csv, predictions[:, :, 1]), axis=1)
cases_csv = np.cumsum(cases_csv, axis=1)
death_csv = np.cumsum(death_csv, axis=1)
death_csv = death_csv[:, 1:]
cases_csv = cases_csv[:, 1:]
death_csv = np.concatenate((death[:, -11:], death_csv), axis=1)
cases_csv = np.concatenate((cases[:, -11:], cases_csv), axis=1)
csv = []
cases_csv = np.reshape(cases_csv[:, 1:], (-1, 1))
death_csv = np.reshape(death_csv[:, 1:], (-1, 1))
j = 1
for idx, (c, d) in enumerate(zip(cases_csv, death_csv)):
csv.append([j, c, d])
j += 1
sub = pd.read_csv('../input/result2w4/submission.csv', header=None, dtype=np.float32)
sub = pd.DataFrame(sub.values, columns=['ForecastId', 'ConfirmedCases', 'Fatalities'])
sub.ConfirmedCases.astype(np.double)
sub.Fatalities.astype(np.double)
sub.ForecastId = sub.ForecastId.astype(np.int)
sub.to_csv('submission.csv', index=False)
print('done') | code |
32071559/cell_14 | [
"application_vnd.jupyter.stderr_output_3.png",
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | from keras.callbacks import EarlyStopping
from keras.initializers import Identity
from keras.layers import Input, Dense, Flatten, Dropout, Lambda, \
from keras.layers import Input, LSTM, Dense, BatchNormalization, Lambda, Flatten, Reshape
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.models import Sequential, Model
from keras.optimizers import Adam, SGD, RMSprop
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
def read_data():
data = pd.read_csv('../input/datatrain4/train4.csv')
data = data.values
return data
data = read_data()
def handle_country_text(data):
stats = list(np.unique(data[:, 2]))
for idx, d in enumerate(data):
country = d[2]
id = stats.index(country)
d[2] = id
return (stats, data)
def create_sequences(data, stats):
sequences = []
to_compute = []
for idx, s in enumerate(stats):
seq = data[data[:, 2] == idx]
if pd.isnull(seq[0, 1]):
seq = np.delete(seq, [1], 1)
else:
to_compute.append(seq)
stats_p = list(np.unique(seq[:, 1]))
for idx2, s2 in enumerate(stats_p):
seqs2 = seq[seq[:, 1] == s2]
seqs2 = np.delete(seqs2, [0, 1, 3], 1)
for idx, value in enumerate(reversed(seqs2[:, 1:])):
if idx + 1 < len(seqs2):
cases = value[0] - seqs2[-(idx + 2), 1]
deaths = value[1] - seqs2[-(idx + 2), 2]
seqs2[-(idx + 1), 1] = cases
seqs2[-(idx + 1), 2] = deaths
offset = float(idx2) / 10
seqs2[:, 0] = seqs2[:, 0] + offset
sequences.append(seqs2)
continue
seq = np.delete(seq, [0, 2], 1)
for idx, value in enumerate(reversed(seq[:, 1:])):
if idx + 1 < len(seq):
cases = value[0] - seq[-(idx + 2), 1]
deaths = value[1] - seq[-(idx + 2), 2]
seq[-(idx + 1), 1] = cases
seq[-(idx + 1), 2] = deaths
sequences.append(seq)
return np.array(sequences)
sequences = create_sequences(data, stats)
sequences = np.array(sequences)
sequences_train = np.delete(sequences, [0], 2)
sequences_train = np.array(sequences_train)
def dain(input):
n_features = 2
mean = Lambda(lambda x: K.mean(input, axis=1))(input)
adaptive_avg = Dense(n_features, kernel_initializer=Identity(gain=1.0), bias=False)(mean)
adaptive_avg = Reshape((1, n_features))(adaptive_avg)
X = Lambda(lambda inputs: inputs[0] - inputs[1])([input, adaptive_avg])
std = Lambda(lambda x: K.mean(x ** 2, axis=1))(X)
std = Lambda(lambda x: K.sqrt(x + 1e-08))(std)
adaptive_std = Dense(n_features, bias=False)(std)
adaptive_std = Reshape((1, n_features))(adaptive_std)
X = Lambda(lambda inputs: inputs[0] / inputs[1])([X, adaptive_std])
avg = Lambda(lambda x: K.mean(x, axis=1))(X)
gate = Dense(n_features, activation='sigmoid', kernel_initializer=Identity(gain=1.0), bias=False)(avg)
gate = Reshape((1, n_features))(gate)
X = Lambda(lambda inputs: inputs[0] * inputs[1])([X, gate])
return (X, adaptive_avg, adaptive_std)
def build_generator(encoder_input_shape, missing_len, verbose=True):
learning_rate = 0.0002
optimizer = Adam(lr=learning_rate)
generator_decoder_type = 'seq2seq'
encoder_inputs = Input(shape=encoder_input_shape)
hidden, avg, std = dain(encoder_inputs)
decoder_outputs = []
encoder = LSTM(128, return_sequences=True, return_state=True)
lstm_outputs, state_h, state_c = encoder(hidden)
if generator_decoder_type == 'seq2seq':
states = [state_h, state_c]
decoder_lstm = LSTM(128, return_sequences=True, return_state=True)
decoder_cases = Dense(1, activation='relu')
decoder_deaths = Dense(1, activation='relu')
all_outputs_c = []
all_outputs_d = []
inputs = lstm_outputs
for idx in range(missing_len):
outputs, state_h, state_c = decoder_lstm(inputs, initial_state=states)
inputs = outputs
outputs = BatchNormalization()(outputs)
outputs = Flatten()(outputs)
outputs_cases = decoder_cases(outputs)
outputs_deaths = decoder_deaths(outputs)
states = [state_h, state_c]
std_c = Lambda(lambda inputs: inputs[:, 0, 0])(std)
avg_c = Lambda(lambda inputs: inputs[:, 0, 0])(avg)
outputs_cases = Multiply()([outputs_cases, std_c])
outputs_cases = Add()([outputs_cases, avg_c])
std_d = Lambda(lambda inputs: inputs[:, 0, 1])(std)
avg_d = Lambda(lambda inputs: inputs[:, 0, 1])(avg)
outputs_deaths = Multiply()([outputs_deaths, std_d])
outputs_deaths = Add()([outputs_deaths, avg_d])
all_outputs_c.append(outputs_cases)
all_outputs_d.append(outputs_deaths)
decoder_outputs_c = Lambda(lambda x: x)(outputs_cases)
decoder_outputs_d = Lambda(lambda x: x)(outputs_deaths)
model = Model(inputs=encoder_inputs, outputs=[decoder_outputs_c, decoder_outputs_d])
model.compile(loss='mean_squared_logarithmic_error', optimizer=optimizer)
return model
given = 80
missing = 1
total_missing = 33
model = build_generator(sequences_train[:, :given, :].shape[1:], missing)
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=30)
history = model.fit(x=sequences_train[:, :given, :], y=[sequences_train[:, given:, 0], sequences_train[:, given:, 1]], epochs=1, validation_split=0.2, shuffle=True, callbacks=[es])
plt.close()
def backtest2(sequences, model, given, missing):
sequences_test = sequences[:, -given:]
pred_d = []
pred_c = []
for i in range(0, missing):
predictions = model.predict(sequences_test[:, :])
predictions[0][predictions[0] < 0] = 0
predictions[1][predictions[1] < 0] = 0
predictions[1] = np.around(predictions[1].astype(np.double))
predictions[0] = np.around(predictions[0].astype(np.double))
pred = np.concatenate([np.expand_dims(predictions[0], axis=2), np.expand_dims(predictions[1], axis=2)], axis=2)
pred_c.append(pred)
pred_d.append(predictions[1])
sequences_test = np.concatenate([sequences_test[:, 1:], pred], axis=1)
predictions = np.array(pred_c[0])
for i in range(1, len(pred_c)):
predictions = np.concatenate([predictions, pred_c[i]], axis=1)
seq_cases = sequences[:, :, 0]
seq_death = sequences[:, :, 1]
death = np.cumsum(seq_death, axis=1)
cases = np.cumsum(seq_cases, axis=1)
cases = np.around(cases.astype(np.double))
cases[cases < 0] = 0
cases_csv = np.expand_dims(cases[:, -1], axis=1)
predictions[0] = np.around(predictions[0].astype(np.double))
cases_csv = np.concatenate((cases_csv, predictions[:, :, 0]), axis=1)
death = np.around(death.astype(np.double))
death[death < 0] = 0
death_csv = np.expand_dims(death[:, -1], axis=1)
predictions[1] = np.around(predictions[1].astype(np.double))
death_csv = np.concatenate((death_csv, predictions[:, :, 1]), axis=1)
cases_csv = np.cumsum(cases_csv, axis=1)
death_csv = np.cumsum(death_csv, axis=1)
death_csv = death_csv[:, 1:]
cases_csv = cases_csv[:, 1:]
death_csv = np.concatenate((death[:, -11:], death_csv), axis=1)
cases_csv = np.concatenate((cases[:, -11:], cases_csv), axis=1)
csv = []
cases_csv = np.reshape(cases_csv[:, 1:], (-1, 1))
death_csv = np.reshape(death_csv[:, 1:], (-1, 1))
j = 1
for idx, (c, d) in enumerate(zip(cases_csv, death_csv)):
csv.append([j, c, d])
j += 1
backtest2(sequences_train, model, given, total_missing) | code |
34122628/cell_13 | [
"text_html_output_1.png"
] | from keras.preprocessing import image
from tqdm import tqdm
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/coronahack-chest-xraydataset/Chest_xray_Corona_Metadata.csv')
train.columns
train_image = []
path = '../input/coronahack-chest-xraydataset/Coronahack-Chest-XRay-Dataset/Coronahack-Chest-XRay-Dataset/'
for i in tqdm(range(train.shape[0])):
put = 'train' if train['Dataset_type'][i] == 'TRAIN' else 'test'
img = image.load_img('../input/coronahack-chest-xraydataset/Coronahack-Chest-XRay-Dataset/Coronahack-Chest-XRay-Dataset/' + put + '/' + train['X_ray_image_name'][i], target_size=(256, 256, 3))
img = image.img_to_array(img)
img = img / 255
train_image.append(img)
X = np.array(train_image)
for i, item in train.iteritems():
print(item.unique()) | code |
34122628/cell_9 | [
"text_plain_output_1.png"
] | from keras.preprocessing import image
from tqdm import tqdm
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/coronahack-chest-xraydataset/Chest_xray_Corona_Metadata.csv')
train.columns
train_image = []
path = '../input/coronahack-chest-xraydataset/Coronahack-Chest-XRay-Dataset/Coronahack-Chest-XRay-Dataset/'
for i in tqdm(range(train.shape[0])):
put = 'train' if train['Dataset_type'][i] == 'TRAIN' else 'test'
img = image.load_img('../input/coronahack-chest-xraydataset/Coronahack-Chest-XRay-Dataset/Coronahack-Chest-XRay-Dataset/' + put + '/' + train['X_ray_image_name'][i], target_size=(256, 256, 3))
img = image.img_to_array(img)
img = img / 255
train_image.append(img)
X = np.array(train_image) | code |
34122628/cell_25 | [
"text_plain_output_1.png"
] | """
model = Sequential()
model.add(Conv2D(filters=64, kernel_size=(5, 5), input_shape=(256, 256, 3), activation='relu'))
model.add(BatchNormalization(axis=3))
model.add(Conv2D(filters=64, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(BatchNormalization(axis=3))
model.add(Dropout(0.1))
model.add(Conv2D(filters=128, kernel_size=(5, 5), activation='relu'))
model.add(BatchNormalization(axis=3))
model.add(Conv2D(filters=128, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(BatchNormalization(axis=3))
model.add(Dropout(0.1))
model.add(Conv2D(filters=256, kernel_size=(5, 5), activation='relu'))
model.add(BatchNormalization(axis=3))
model.add(Conv2D(filters=256, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(BatchNormalization(axis=3))
model.add(Dropout(0.1))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(256, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(9, activation='sigmoid'))
""" | code |
34122628/cell_20 | [
"text_plain_output_1.png"
] | from keras.preprocessing import image
from tqdm import tqdm
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/coronahack-chest-xraydataset/Chest_xray_Corona_Metadata.csv')
train.columns
train_image = []
path = '../input/coronahack-chest-xraydataset/Coronahack-Chest-XRay-Dataset/Coronahack-Chest-XRay-Dataset/'
for i in tqdm(range(train.shape[0])):
put = 'train' if train['Dataset_type'][i] == 'TRAIN' else 'test'
img = image.load_img('../input/coronahack-chest-xraydataset/Coronahack-Chest-XRay-Dataset/Coronahack-Chest-XRay-Dataset/' + put + '/' + train['X_ray_image_name'][i], target_size=(256, 256, 3))
img = image.img_to_array(img)
img = img / 255
train_image.append(img)
X = np.array(train_image)
train['Normal'] = 0
train['Pnemonia'] = 0
train['Virus'] = 0
train['bacteria'] = 0
train['Stress-Smoking'] = 0
train['Streptococcus'] = 0
train['COVID-19'] = 0
train['ARDS'] = 0
train['SARS'] = 0
train.loc[train.Label == 'Normal', 'Normal'] = 1
train.loc[train.Label == 'Pnemonia', 'Pnemonia'] = 1
train.loc[train.Label_2_Virus_category == 'Streptococcus', 'Streptococcus'] = 1
train.loc[train.Label_2_Virus_category == 'COVID-19', 'COVID-19'] = 1
train.loc[train.Label_2_Virus_category == 'ARDS', 'ARDS'] = 1
train.loc[train.Label_2_Virus_category == 'SARS', 'SARS'] = 1
train.loc[train.Label_1_Virus_category == 'Virus', 'Virus'] = 1
train.loc[train.Label_1_Virus_category == 'bacteria', 'bacteria'] = 1
train.loc[train.Label_1_Virus_category == 'Stress-Smoking', 'Stress-Smoking'] = 1
y = np.array(train.drop(['Unnamed: 0', 'X_ray_image_name', 'Dataset_type', 'Label_2_Virus_category', 'Label_1_Virus_category', 'Label'], axis=1))
y.shape | code |
34122628/cell_11 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.preprocessing import image
from tqdm import tqdm
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/coronahack-chest-xraydataset/Chest_xray_Corona_Metadata.csv')
train.columns
train_image = []
path = '../input/coronahack-chest-xraydataset/Coronahack-Chest-XRay-Dataset/Coronahack-Chest-XRay-Dataset/'
for i in tqdm(range(train.shape[0])):
put = 'train' if train['Dataset_type'][i] == 'TRAIN' else 'test'
img = image.load_img('../input/coronahack-chest-xraydataset/Coronahack-Chest-XRay-Dataset/Coronahack-Chest-XRay-Dataset/' + put + '/' + train['X_ray_image_name'][i], target_size=(256, 256, 3))
img = image.img_to_array(img)
img = img / 255
train_image.append(img)
X = np.array(train_image)
X.shape | code |
34122628/cell_1 | [
"text_plain_output_4.png",
"text_plain_output_3.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
34122628/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/coronahack-chest-xraydataset/Chest_xray_Corona_Metadata.csv')
train.columns | code |
34122628/cell_18 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.preprocessing import image
from tqdm import tqdm
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/coronahack-chest-xraydataset/Chest_xray_Corona_Metadata.csv')
train.columns
train_image = []
path = '../input/coronahack-chest-xraydataset/Coronahack-Chest-XRay-Dataset/Coronahack-Chest-XRay-Dataset/'
for i in tqdm(range(train.shape[0])):
put = 'train' if train['Dataset_type'][i] == 'TRAIN' else 'test'
img = image.load_img('../input/coronahack-chest-xraydataset/Coronahack-Chest-XRay-Dataset/Coronahack-Chest-XRay-Dataset/' + put + '/' + train['X_ray_image_name'][i], target_size=(256, 256, 3))
img = image.img_to_array(img)
img = img / 255
train_image.append(img)
X = np.array(train_image)
train['Normal'] = 0
train['Pnemonia'] = 0
train['Virus'] = 0
train['bacteria'] = 0
train['Stress-Smoking'] = 0
train['Streptococcus'] = 0
train['COVID-19'] = 0
train['ARDS'] = 0
train['SARS'] = 0
train.loc[train.Label == 'Normal', 'Normal'] = 1
train.loc[train.Label == 'Pnemonia', 'Pnemonia'] = 1
train.loc[train.Label_2_Virus_category == 'Streptococcus', 'Streptococcus'] = 1
train.loc[train.Label_2_Virus_category == 'COVID-19', 'COVID-19'] = 1
train.loc[train.Label_2_Virus_category == 'ARDS', 'ARDS'] = 1
train.loc[train.Label_2_Virus_category == 'SARS', 'SARS'] = 1
train.loc[train.Label_1_Virus_category == 'Virus', 'Virus'] = 1
train.loc[train.Label_1_Virus_category == 'bacteria', 'bacteria'] = 1
train.loc[train.Label_1_Virus_category == 'Stress-Smoking', 'Stress-Smoking'] = 1
train.head() | code |
34122628/cell_15 | [
"text_plain_output_1.png"
] | from keras.preprocessing import image
from tqdm import tqdm
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/coronahack-chest-xraydataset/Chest_xray_Corona_Metadata.csv')
train.columns
train_image = []
path = '../input/coronahack-chest-xraydataset/Coronahack-Chest-XRay-Dataset/Coronahack-Chest-XRay-Dataset/'
for i in tqdm(range(train.shape[0])):
put = 'train' if train['Dataset_type'][i] == 'TRAIN' else 'test'
img = image.load_img('../input/coronahack-chest-xraydataset/Coronahack-Chest-XRay-Dataset/Coronahack-Chest-XRay-Dataset/' + put + '/' + train['X_ray_image_name'][i], target_size=(256, 256, 3))
img = image.img_to_array(img)
img = img / 255
train_image.append(img)
X = np.array(train_image)
train['Normal'] = 0
train['Pnemonia'] = 0
train['Virus'] = 0
train['bacteria'] = 0
train['Stress-Smoking'] = 0
train['Streptococcus'] = 0
train['COVID-19'] = 0
train['ARDS'] = 0
train['SARS'] = 0
train.head() | code |
34122628/cell_3 | [
"text_html_output_1.png"
] | import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.utils import to_categorical
from keras.preprocessing import image
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from tqdm import tqdm | code |
34122628/cell_31 | [
"text_plain_output_1.png"
] | from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Dense, Dropout, Flatten
from keras.models import Sequential
model = Sequential()
model.add(Conv2D(filters=16, kernel_size=(5, 5), activation='relu', input_shape=(256, 256, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=32, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(9, activation='sigmoid'))
model.summary()
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, y_train, validation_data=(X_test, y_test)) | code |
34122628/cell_27 | [
"text_html_output_1.png"
] | from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Dense, Dropout, Flatten
from keras.models import Sequential
model = Sequential()
model.add(Conv2D(filters=16, kernel_size=(5, 5), activation='relu', input_shape=(256, 256, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=32, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(9, activation='sigmoid'))
model.summary() | code |
34122628/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/coronahack-chest-xraydataset/Chest_xray_Corona_Metadata.csv')
train.head() | code |
105201902/cell_9 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/chocolate-bar-ratings/chocolate_bars.csv')
df.info() | code |
105201902/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
105201902/cell_18 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/chocolate-bar-ratings/chocolate_bars.csv')
plt.xticks(rotation=90)
y = df['cocoa_percent']
x = df['rating']
correlation = y.corr(x)
plt.xticks(rotation=0)
plt.figure(figsize=(25, 10))
df.groupby('company_location').mean()['rating'].plot(kind='bar', color='tan')
plt.xticks(rotation=90)
plt.xlabel('Company location', fontsize=14)
plt.ylabel("Rating'", fontsize=14)
plt.title('Company location versus rating', fontsize=14)
plt.show() | code |
105201902/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/chocolate-bar-ratings/chocolate_bars.csv')
df.head(-5) | code |
105201902/cell_15 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/chocolate-bar-ratings/chocolate_bars.csv')
plt.xticks(rotation=90)
y = df['cocoa_percent']
x = df['rating']
correlation = y.corr(x)
print(correlation)
plt.figure(figsize=(25, 10))
sns.regplot(x='cocoa_percent', y='rating', data=df)
plt.title('Bean Origin vs Rating', fontsize=14)
plt.xticks(rotation=0)
plt.xlabel('Cacao persentage', fontsize=14)
plt.ylabel('Ratings', fontsize=14)
plt.show() | code |
105201902/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/chocolate-bar-ratings/chocolate_bars.csv')
df.describe() | code |
105201902/cell_12 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/chocolate-bar-ratings/chocolate_bars.csv')
plt.figure(figsize=(10, 25))
sns.catplot(x='bean_origin', y='rating', kind='bar', height=10, aspect=2, data=df.head(2530)).set(title='Bean Origin vs Ratings')
plt.xticks(rotation=90)
plt.xlabel('Bean Origin', fontsize=14)
plt.ylabel('Ratings', fontsize=14)
plt.show() | code |
32069765/cell_21 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/aipowered-literature-review-csvs/kaggle/working/TIE/Proportion of patients who were asymptomatic.csv')
df = df.rename(columns={'Unnamed: 0': 'unnamed', 'Asymptomatic Proportion': 'asymptomatic'})
dfcorr = df.corr()
dfcorr
df1 = pd.read_csv('../input/aipowered-literature-review-csvs/kaggle/working/TIE/Pediatric patients who were asymptomatic.csv')
df1 = df1.rename(columns={'Unnamed: 0': 'unnamed1', 'Asymptomatic Proportion': 'asymptomatic1', 'Age': 'age'})
fig = sns.lmplot(x='asymptomatic1', y='unnamed1', data=df1) | code |
32069765/cell_25 | [
"text_html_output_2.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px
import seaborn as sns
df = pd.read_csv('../input/aipowered-literature-review-csvs/kaggle/working/TIE/Proportion of patients who were asymptomatic.csv')
df = df.rename(columns={'Unnamed: 0': 'unnamed', 'Asymptomatic Proportion': 'asymptomatic'})
dfcorr = df.corr()
dfcorr
df1 = pd.read_csv('../input/aipowered-literature-review-csvs/kaggle/working/TIE/Pediatric patients who were asymptomatic.csv')
df1 = df1.rename(columns={'Unnamed: 0': 'unnamed1', 'Asymptomatic Proportion': 'asymptomatic1', 'Age': 'age'})
fig=sns.lmplot(x="asymptomatic1", y="unnamed1",data=df1)
df1_age = pd.DataFrame({'Date': df1.Date, 'age': df1.age})
fig = px.line(df1_age, x='Date', y='age', title='Pediatric Asymptomatic Patients ')
fig = px.bar(df1, x='Date', y='age', color_discrete_sequence=['#21bf73'], title='Pediatric Asymptomatic Patients', text='age')
fig.show() | code |
32069765/cell_4 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/aipowered-literature-review-csvs/kaggle/working/TIE/Proportion of patients who were asymptomatic.csv')
df = df.rename(columns={'Unnamed: 0': 'unnamed', 'Asymptomatic Proportion': 'asymptomatic'})
dfcorr = df.corr()
dfcorr
plt.figure(figsize=(10, 4))
sns.heatmap(df.corr(), annot=False, cmap='summer')
plt.show() | code |
32069765/cell_26 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px
import seaborn as sns
df = pd.read_csv('../input/aipowered-literature-review-csvs/kaggle/working/TIE/Proportion of patients who were asymptomatic.csv')
df = df.rename(columns={'Unnamed: 0': 'unnamed', 'Asymptomatic Proportion': 'asymptomatic'})
dfcorr = df.corr()
dfcorr
df1 = pd.read_csv('../input/aipowered-literature-review-csvs/kaggle/working/TIE/Pediatric patients who were asymptomatic.csv')
df1 = df1.rename(columns={'Unnamed: 0': 'unnamed1', 'Asymptomatic Proportion': 'asymptomatic1', 'Age': 'age'})
fig=sns.lmplot(x="asymptomatic1", y="unnamed1",data=df1)
df1_age = pd.DataFrame({'Date': df1.Date, 'age': df1.age})
fig = px.line(df1_age, x='Date', y='age', title='Pediatric Asymptomatic Patients ')
fig = px.bar(df1,
x='Date', y='age', color_discrete_sequence=['#21bf73'],
title='Pediatric Asymptomatic Patients', text='age')
fig.show()
fig = px.line(df1, x='Date', y='asymptomatic1', color_discrete_sequence=['#ff2e63'], title='Pediatric Asymptomatic Patients', text='asymptomatic1')
fig.show() | code |
32069765/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/aipowered-literature-review-csvs/kaggle/working/TIE/Proportion of patients who were asymptomatic.csv')
df.head() | code |
32069765/cell_19 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/aipowered-literature-review-csvs/kaggle/working/TIE/Proportion of patients who were asymptomatic.csv')
df1 = pd.read_csv('../input/aipowered-literature-review-csvs/kaggle/working/TIE/Pediatric patients who were asymptomatic.csv')
df1.head() | code |
32069765/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.graph_objs as go
import plotly.offline as py
import plotly.express as px
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
32069765/cell_18 | [
"image_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/aipowered-literature-review-csvs/kaggle/working/TIE/Proportion of patients who were asymptomatic.csv')
df = df.rename(columns={'Unnamed: 0': 'unnamed', 'Asymptomatic Proportion': 'asymptomatic'})
dfcorr = df.corr()
dfcorr
dfmodel = df.copy()
for col in dfmodel.columns[dfmodel.dtypes == 'object']:
le = LabelEncoder()
dfmodel[col] = dfmodel[col].astype(str)
le.fit(dfmodel[col])
dfmodel[col] = le.transform(dfmodel[col])
df.dtypes | code |
32069765/cell_28 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/aipowered-literature-review-csvs/kaggle/working/TIE/Proportion of patients who were asymptomatic.csv')
df = df.rename(columns={'Unnamed: 0': 'unnamed', 'Asymptomatic Proportion': 'asymptomatic'})
dfcorr = df.corr()
dfcorr
df1 = pd.read_csv('../input/aipowered-literature-review-csvs/kaggle/working/TIE/Pediatric patients who were asymptomatic.csv')
df1 = df1.rename(columns={'Unnamed: 0': 'unnamed1', 'Asymptomatic Proportion': 'asymptomatic1', 'Age': 'age'})
df1_age = pd.DataFrame({'Date': df1.Date, 'age': df1.age})
import networkx as nx
df1 = pd.DataFrame(df1['asymptomatic1']).groupby(['asymptomatic1']).size().reset_index()
G = nx.from_pandas_edgelist(df1, 'asymptomatic1', 'asymptomatic1', [0])
colors = []
for node in G:
if node in df1['asymptomatic1'].unique():
colors.append('green')
else:
colors.append('lightgreen')
labels = df1['asymptomatic1'].value_counts().index
size = df1['asymptomatic1'].value_counts()
colors = ['#ff2e63', '#3F3FBF']
plt.pie(size, labels=labels, colors=colors, shadow=True, autopct='%1.1f%%', startangle=90)
plt.title('Pediatric Asymptomatic Patients', fontsize=20)
plt.legend()
plt.show() | code |
32069765/cell_15 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
import lightgbm as lgb
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import random
import seaborn as sns
import shap
df = pd.read_csv('../input/aipowered-literature-review-csvs/kaggle/working/TIE/Proportion of patients who were asymptomatic.csv')
df = df.rename(columns={'Unnamed: 0': 'unnamed', 'Asymptomatic Proportion': 'asymptomatic'})
dfcorr = df.corr()
dfcorr
SEED = 99
random.seed(SEED)
np.random.seed(SEED)
dfmodel = df.copy()
for col in dfmodel.columns[dfmodel.dtypes == 'object']:
le = LabelEncoder()
dfmodel[col] = dfmodel[col].astype(str)
le.fit(dfmodel[col])
dfmodel[col] = le.transform(dfmodel[col])
dfmodel.columns = [''.join((c if c.isalnum() else '_' for c in str(x))) for x in dfmodel.columns]
X = dfmodel.drop(['unnamed', 'asymptomatic'], axis=1)
y = dfmodel['unnamed']
lgb_params = {'objective': 'binary', 'metric': 'auc', 'n_jobs': -1, 'learning_rate': 0.005, 'num_leaves': 20, 'max_depth': -1, 'subsample': 0.9, 'n_estimators': 2500, 'seed': SEED, 'early_stopping_rounds': 100}
K = 5
folds = KFold(K, shuffle=True, random_state=SEED)
best_scorecv = 0
best_iteration = 0
for fold, (train_index, test_index) in enumerate(folds.split(X, y)):
X_traincv, X_testcv = (X.iloc[train_index], X.iloc[test_index])
y_traincv, y_testcv = (y.iloc[train_index], y.iloc[test_index])
train_data = lgb.Dataset(X_traincv, y_traincv)
val_data = lgb.Dataset(X_testcv, y_testcv)
LGBM = lgb.train(lgb_params, train_data, valid_sets=[train_data, val_data], verbose_eval=250)
best_scorecv += LGBM.best_score['valid_1']['auc']
best_iteration += LGBM.best_iteration
best_scorecv /= K
best_iteration /= K
lgb_params = {'objective': 'binary', 'metric': 'auc', 'n_jobs': -1, 'learning_rate': 0.05, 'num_leaves': 20, 'max_depth': -1, 'subsample': 0.9, 'n_estimators': round(best_iteration), 'seed': SEED, 'early_stopping_rounds': None}
train_data_final = lgb.Dataset(X, y)
LGBM = lgb.train(lgb_params, train_data)
explainer = shap.TreeExplainer(LGBM)
shap_values = explainer.shap_values(X) | code |
32069765/cell_16 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
import lightgbm as lgb
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import random
import seaborn as sns
import shap
df = pd.read_csv('../input/aipowered-literature-review-csvs/kaggle/working/TIE/Proportion of patients who were asymptomatic.csv')
df = df.rename(columns={'Unnamed: 0': 'unnamed', 'Asymptomatic Proportion': 'asymptomatic'})
dfcorr = df.corr()
dfcorr
SEED = 99
random.seed(SEED)
np.random.seed(SEED)
dfmodel = df.copy()
for col in dfmodel.columns[dfmodel.dtypes == 'object']:
le = LabelEncoder()
dfmodel[col] = dfmodel[col].astype(str)
le.fit(dfmodel[col])
dfmodel[col] = le.transform(dfmodel[col])
dfmodel.columns = [''.join((c if c.isalnum() else '_' for c in str(x))) for x in dfmodel.columns]
X = dfmodel.drop(['unnamed', 'asymptomatic'], axis=1)
y = dfmodel['unnamed']
lgb_params = {'objective': 'binary', 'metric': 'auc', 'n_jobs': -1, 'learning_rate': 0.005, 'num_leaves': 20, 'max_depth': -1, 'subsample': 0.9, 'n_estimators': 2500, 'seed': SEED, 'early_stopping_rounds': 100}
K = 5
folds = KFold(K, shuffle=True, random_state=SEED)
best_scorecv = 0
best_iteration = 0
for fold, (train_index, test_index) in enumerate(folds.split(X, y)):
X_traincv, X_testcv = (X.iloc[train_index], X.iloc[test_index])
y_traincv, y_testcv = (y.iloc[train_index], y.iloc[test_index])
train_data = lgb.Dataset(X_traincv, y_traincv)
val_data = lgb.Dataset(X_testcv, y_testcv)
LGBM = lgb.train(lgb_params, train_data, valid_sets=[train_data, val_data], verbose_eval=250)
best_scorecv += LGBM.best_score['valid_1']['auc']
best_iteration += LGBM.best_iteration
best_scorecv /= K
best_iteration /= K
lgb_params = {'objective': 'binary', 'metric': 'auc', 'n_jobs': -1, 'learning_rate': 0.05, 'num_leaves': 20, 'max_depth': -1, 'subsample': 0.9, 'n_estimators': round(best_iteration), 'seed': SEED, 'early_stopping_rounds': None}
train_data_final = lgb.Dataset(X, y)
LGBM = lgb.train(lgb_params, train_data)
explainer = shap.TreeExplainer(LGBM)
shap_values = explainer.shap_values(X)
shap.summary_plot(shap_values[1], X, plot_type='bar') | code |
32069765/cell_17 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
import lightgbm as lgb
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import random
import seaborn as sns
import shap
df = pd.read_csv('../input/aipowered-literature-review-csvs/kaggle/working/TIE/Proportion of patients who were asymptomatic.csv')
df = df.rename(columns={'Unnamed: 0': 'unnamed', 'Asymptomatic Proportion': 'asymptomatic'})
dfcorr = df.corr()
dfcorr
SEED = 99
random.seed(SEED)
np.random.seed(SEED)
dfmodel = df.copy()
for col in dfmodel.columns[dfmodel.dtypes == 'object']:
le = LabelEncoder()
dfmodel[col] = dfmodel[col].astype(str)
le.fit(dfmodel[col])
dfmodel[col] = le.transform(dfmodel[col])
dfmodel.columns = [''.join((c if c.isalnum() else '_' for c in str(x))) for x in dfmodel.columns]
X = dfmodel.drop(['unnamed', 'asymptomatic'], axis=1)
y = dfmodel['unnamed']
lgb_params = {'objective': 'binary', 'metric': 'auc', 'n_jobs': -1, 'learning_rate': 0.005, 'num_leaves': 20, 'max_depth': -1, 'subsample': 0.9, 'n_estimators': 2500, 'seed': SEED, 'early_stopping_rounds': 100}
K = 5
folds = KFold(K, shuffle=True, random_state=SEED)
best_scorecv = 0
best_iteration = 0
for fold, (train_index, test_index) in enumerate(folds.split(X, y)):
X_traincv, X_testcv = (X.iloc[train_index], X.iloc[test_index])
y_traincv, y_testcv = (y.iloc[train_index], y.iloc[test_index])
train_data = lgb.Dataset(X_traincv, y_traincv)
val_data = lgb.Dataset(X_testcv, y_testcv)
LGBM = lgb.train(lgb_params, train_data, valid_sets=[train_data, val_data], verbose_eval=250)
best_scorecv += LGBM.best_score['valid_1']['auc']
best_iteration += LGBM.best_iteration
best_scorecv /= K
best_iteration /= K
lgb_params = {'objective': 'binary', 'metric': 'auc', 'n_jobs': -1, 'learning_rate': 0.05, 'num_leaves': 20, 'max_depth': -1, 'subsample': 0.9, 'n_estimators': round(best_iteration), 'seed': SEED, 'early_stopping_rounds': None}
train_data_final = lgb.Dataset(X, y)
LGBM = lgb.train(lgb_params, train_data)
explainer = shap.TreeExplainer(LGBM)
shap_values = explainer.shap_values(X)
shap.summary_plot(shap_values[1], X) | code |
32069765/cell_24 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px
import seaborn as sns
df = pd.read_csv('../input/aipowered-literature-review-csvs/kaggle/working/TIE/Proportion of patients who were asymptomatic.csv')
df = df.rename(columns={'Unnamed: 0': 'unnamed', 'Asymptomatic Proportion': 'asymptomatic'})
dfcorr = df.corr()
dfcorr
df1 = pd.read_csv('../input/aipowered-literature-review-csvs/kaggle/working/TIE/Pediatric patients who were asymptomatic.csv')
df1 = df1.rename(columns={'Unnamed: 0': 'unnamed1', 'Asymptomatic Proportion': 'asymptomatic1', 'Age': 'age'})
fig=sns.lmplot(x="asymptomatic1", y="unnamed1",data=df1)
df1_age = pd.DataFrame({'Date': df1.Date, 'age': df1.age})
fig = px.line(df1_age, x='Date', y='age', title='Pediatric Asymptomatic Patients ')
fig.show() | code |
32069765/cell_14 | [
"image_output_1.png"
] | from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
import lightgbm as lgb
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import random
import seaborn as sns
df = pd.read_csv('../input/aipowered-literature-review-csvs/kaggle/working/TIE/Proportion of patients who were asymptomatic.csv')
df = df.rename(columns={'Unnamed: 0': 'unnamed', 'Asymptomatic Proportion': 'asymptomatic'})
dfcorr = df.corr()
dfcorr
SEED = 99
random.seed(SEED)
np.random.seed(SEED)
dfmodel = df.copy()
for col in dfmodel.columns[dfmodel.dtypes == 'object']:
le = LabelEncoder()
dfmodel[col] = dfmodel[col].astype(str)
le.fit(dfmodel[col])
dfmodel[col] = le.transform(dfmodel[col])
dfmodel.columns = [''.join((c if c.isalnum() else '_' for c in str(x))) for x in dfmodel.columns]
X = dfmodel.drop(['unnamed', 'asymptomatic'], axis=1)
y = dfmodel['unnamed']
lgb_params = {'objective': 'binary', 'metric': 'auc', 'n_jobs': -1, 'learning_rate': 0.005, 'num_leaves': 20, 'max_depth': -1, 'subsample': 0.9, 'n_estimators': 2500, 'seed': SEED, 'early_stopping_rounds': 100}
K = 5
folds = KFold(K, shuffle=True, random_state=SEED)
best_scorecv = 0
best_iteration = 0
for fold, (train_index, test_index) in enumerate(folds.split(X, y)):
X_traincv, X_testcv = (X.iloc[train_index], X.iloc[test_index])
y_traincv, y_testcv = (y.iloc[train_index], y.iloc[test_index])
train_data = lgb.Dataset(X_traincv, y_traincv)
val_data = lgb.Dataset(X_testcv, y_testcv)
LGBM = lgb.train(lgb_params, train_data, valid_sets=[train_data, val_data], verbose_eval=250)
best_scorecv += LGBM.best_score['valid_1']['auc']
best_iteration += LGBM.best_iteration
best_scorecv /= K
best_iteration /= K
lgb_params = {'objective': 'binary', 'metric': 'auc', 'n_jobs': -1, 'learning_rate': 0.05, 'num_leaves': 20, 'max_depth': -1, 'subsample': 0.9, 'n_estimators': round(best_iteration), 'seed': SEED, 'early_stopping_rounds': None}
train_data_final = lgb.Dataset(X, y)
LGBM = lgb.train(lgb_params, train_data)
print(LGBM) | code |
32069765/cell_27 | [
"text_html_output_1.png"
] | import networkx as nx
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/aipowered-literature-review-csvs/kaggle/working/TIE/Proportion of patients who were asymptomatic.csv')
df1 = pd.read_csv('../input/aipowered-literature-review-csvs/kaggle/working/TIE/Pediatric patients who were asymptomatic.csv')
df1 = df1.rename(columns={'Unnamed: 0': 'unnamed1', 'Asymptomatic Proportion': 'asymptomatic1', 'Age': 'age'})
df1_age = pd.DataFrame({'Date': df1.Date, 'age': df1.age})
import networkx as nx
df1 = pd.DataFrame(df1['asymptomatic1']).groupby(['asymptomatic1']).size().reset_index()
G = nx.from_pandas_edgelist(df1, 'asymptomatic1', 'asymptomatic1', [0])
colors = []
for node in G:
if node in df1['asymptomatic1'].unique():
colors.append('green')
else:
colors.append('lightgreen')
nx.draw(nx.from_pandas_edgelist(df1, 'asymptomatic1', 'asymptomatic1', [0]), with_labels=True, node_color=colors) | code |
32069765/cell_12 | [
"text_html_output_1.png"
] | from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
import lightgbm as lgb
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import random
import seaborn as sns
df = pd.read_csv('../input/aipowered-literature-review-csvs/kaggle/working/TIE/Proportion of patients who were asymptomatic.csv')
df = df.rename(columns={'Unnamed: 0': 'unnamed', 'Asymptomatic Proportion': 'asymptomatic'})
dfcorr = df.corr()
dfcorr
SEED = 99
random.seed(SEED)
np.random.seed(SEED)
dfmodel = df.copy()
for col in dfmodel.columns[dfmodel.dtypes == 'object']:
le = LabelEncoder()
dfmodel[col] = dfmodel[col].astype(str)
le.fit(dfmodel[col])
dfmodel[col] = le.transform(dfmodel[col])
dfmodel.columns = [''.join((c if c.isalnum() else '_' for c in str(x))) for x in dfmodel.columns]
X = dfmodel.drop(['unnamed', 'asymptomatic'], axis=1)
y = dfmodel['unnamed']
lgb_params = {'objective': 'binary', 'metric': 'auc', 'n_jobs': -1, 'learning_rate': 0.005, 'num_leaves': 20, 'max_depth': -1, 'subsample': 0.9, 'n_estimators': 2500, 'seed': SEED, 'early_stopping_rounds': 100}
K = 5
folds = KFold(K, shuffle=True, random_state=SEED)
best_scorecv = 0
best_iteration = 0
for fold, (train_index, test_index) in enumerate(folds.split(X, y)):
print('Fold:', fold + 1)
X_traincv, X_testcv = (X.iloc[train_index], X.iloc[test_index])
y_traincv, y_testcv = (y.iloc[train_index], y.iloc[test_index])
train_data = lgb.Dataset(X_traincv, y_traincv)
val_data = lgb.Dataset(X_testcv, y_testcv)
LGBM = lgb.train(lgb_params, train_data, valid_sets=[train_data, val_data], verbose_eval=250)
best_scorecv += LGBM.best_score['valid_1']['auc']
best_iteration += LGBM.best_iteration
best_scorecv /= K
best_iteration /= K
print('\n Mean AUC score:', best_scorecv)
print('\n Mean best iteration:', best_iteration) | code |
128004504/cell_13 | [
"text_plain_output_1.png"
] | from keras.callbacks import ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MultiLabelBinarizer
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.models import Model
from vit_keras import vit, utils
import csv
import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np
import os
import re
X = []
Y = []
input_shape = (96, 96, 3)
path_to_subset = f'../input/apparel-images-dataset/'
for folder in os.listdir(path_to_subset):
for image in os.listdir(os.path.join(path_to_subset, folder)):
path_to_image = os.path.join(path_to_subset, folder, image)
image = cv.imread(path_to_image)
image = cv.resize(image, (input_shape[1], input_shape[0]))
label = re.findall('\\w+\\_\\w+', path_to_image)[0].split('_')
X.append(image)
Y.append(label)
X = np.array(X) / 255.0
Y = np.array(Y)
mlb = MultiLabelBinarizer()
Y = mlb.fit_transform(Y)
x, test_x, y, test_y = train_test_split(X, Y, test_size=0.1, stratify=Y, shuffle=True, random_state=1)
train_x, val_x, train_y, val_y = train_test_split(x, y, test_size=0.2, stratify=y, shuffle=True, random_state=1)
datagen = ImageDataGenerator(rotation_range=45, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.2, horizontal_flip=True, validation_split=0.2)
input_shape = (96, 96, 3)
num_classes = Y.shape[1]
vit_model = vit.vit_b16(image_size=input_shape[0], activation='softmax', pretrained=True, include_top=False, pretrained_top=False, classes=num_classes, weights='imagenet21k')
for layer in vit_model.layers:
layer.trainable = False
output_layer = Dense(num_classes, activation='sigmoid')(vit_model.output)
model = Model(inputs=vit_model.input, outputs=output_layer)
checkpoint = ModelCheckpoint('../working/visiontransformermodel.tf', save_best_only=True, monitor='val_loss', verbose=1)
batch_size = 64
epochs = 100
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(datagen.flow(train_x, train_y, batch_size=batch_size, subset='training'), validation_data=datagen.flow(train_x, train_y, batch_size=batch_size, subset='validation'), epochs=epochs, callbacks=[checkpoint])
training_acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
filename = '../working/accuracy.csv'
with open(filename, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(['Epoch', 'Training Accuracy', 'Validation Accuracy'])
for epoch, (train_acc, val_acc) in enumerate(zip(training_acc, val_acc), 1):
csvwriter.writerow([epoch, train_acc, val_acc])
training_loss = history.history['loss']
val_loss = history.history['val_loss']
filename = '../working/loss.csv'
with open(filename, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(['Epoch', 'loss', 'val_loss'])
for epoch, (loss, val_loss) in enumerate(zip(training_loss, val_loss), 1):
csvwriter.writerow([epoch, loss, val_loss])
filename = "../working/accuracy.csv"
epoch = []
train_acc = []
val_acc = []
with open(filename, 'r') as csvfile:
csvreader = csv.reader(csvfile)
header = next(csvreader)
for row in csvreader:
epoch.append(int(row[0]))
train_acc.append(float(row[1]))
val_acc.append(float(row[2]))
fig = plt.figure(figsize=(20,7))
plt.subplot(121)
plt.plot(epoch, train_acc, label='acc')
plt.plot(epoch, val_acc, label='val_acc')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.grid()
plt.legend()
plt.show()
filename = '../working/loss.csv'
epoch = []
train_loss = []
val_loss = []
with open(filename, 'r') as csvfile:
csvreader = csv.reader(csvfile)
header = next(csvreader)
for row in csvreader:
epoch.append(int(row[0]))
train_loss.append(float(row[1]))
val_loss.append(float(row[2]))
fig = plt.figure(figsize=(20, 7))
plt.subplot(121)
plt.plot(epoch, train_loss, label='loss')
plt.plot(epoch, val_loss, label='val_loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.grid()
plt.legend()
plt.show() | code |
128004504/cell_9 | [
"image_output_1.png"
] | from keras.callbacks import ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MultiLabelBinarizer
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.models import Model
from vit_keras import vit, utils
import cv2 as cv
import numpy as np
import os
import re
X = []
Y = []
input_shape = (96, 96, 3)
path_to_subset = f'../input/apparel-images-dataset/'
for folder in os.listdir(path_to_subset):
for image in os.listdir(os.path.join(path_to_subset, folder)):
path_to_image = os.path.join(path_to_subset, folder, image)
image = cv.imread(path_to_image)
image = cv.resize(image, (input_shape[1], input_shape[0]))
label = re.findall('\\w+\\_\\w+', path_to_image)[0].split('_')
X.append(image)
Y.append(label)
X = np.array(X) / 255.0
Y = np.array(Y)
mlb = MultiLabelBinarizer()
Y = mlb.fit_transform(Y)
x, test_x, y, test_y = train_test_split(X, Y, test_size=0.1, stratify=Y, shuffle=True, random_state=1)
train_x, val_x, train_y, val_y = train_test_split(x, y, test_size=0.2, stratify=y, shuffle=True, random_state=1)
datagen = ImageDataGenerator(rotation_range=45, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.2, horizontal_flip=True, validation_split=0.2)
input_shape = (96, 96, 3)
num_classes = Y.shape[1]
vit_model = vit.vit_b16(image_size=input_shape[0], activation='softmax', pretrained=True, include_top=False, pretrained_top=False, classes=num_classes, weights='imagenet21k')
for layer in vit_model.layers:
layer.trainable = False
output_layer = Dense(num_classes, activation='sigmoid')(vit_model.output)
model = Model(inputs=vit_model.input, outputs=output_layer)
checkpoint = ModelCheckpoint('../working/visiontransformermodel.tf', save_best_only=True, monitor='val_loss', verbose=1)
batch_size = 64
epochs = 100
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(datagen.flow(train_x, train_y, batch_size=batch_size, subset='training'), validation_data=datagen.flow(train_x, train_y, batch_size=batch_size, subset='validation'), epochs=epochs, callbacks=[checkpoint])
import csv
model.summary()
model.save('../working/ViTm.hdf5') | code |
128004504/cell_4 | [
"text_plain_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MultiLabelBinarizer
import cv2 as cv
import numpy as np
import os
import re
X = []
Y = []
input_shape = (96, 96, 3)
path_to_subset = f'../input/apparel-images-dataset/'
for folder in os.listdir(path_to_subset):
for image in os.listdir(os.path.join(path_to_subset, folder)):
path_to_image = os.path.join(path_to_subset, folder, image)
image = cv.imread(path_to_image)
image = cv.resize(image, (input_shape[1], input_shape[0]))
label = re.findall('\\w+\\_\\w+', path_to_image)[0].split('_')
X.append(image)
Y.append(label)
X = np.array(X) / 255.0
Y = np.array(Y)
mlb = MultiLabelBinarizer()
Y = mlb.fit_transform(Y)
x, test_x, y, test_y = train_test_split(X, Y, test_size=0.1, stratify=Y, shuffle=True, random_state=1)
train_x, val_x, train_y, val_y = train_test_split(x, y, test_size=0.2, stratify=y, shuffle=True, random_state=1)
print(x.shape, test_x.shape, y.shape, test_y.shape)
print(train_x.shape, val_x.shape, train_y.shape, val_y.shape)
datagen = ImageDataGenerator(rotation_range=45, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.2, horizontal_flip=True, validation_split=0.2) | code |
128004504/cell_6 | [
"image_output_1.png"
] | from sklearn.preprocessing import MultiLabelBinarizer
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.models import Model
from vit_keras import vit, utils
import cv2 as cv
import numpy as np
import os
import re
X = []
Y = []
input_shape = (96, 96, 3)
path_to_subset = f'../input/apparel-images-dataset/'
for folder in os.listdir(path_to_subset):
for image in os.listdir(os.path.join(path_to_subset, folder)):
path_to_image = os.path.join(path_to_subset, folder, image)
image = cv.imread(path_to_image)
image = cv.resize(image, (input_shape[1], input_shape[0]))
label = re.findall('\\w+\\_\\w+', path_to_image)[0].split('_')
X.append(image)
Y.append(label)
X = np.array(X) / 255.0
Y = np.array(Y)
mlb = MultiLabelBinarizer()
Y = mlb.fit_transform(Y)
input_shape = (96, 96, 3)
num_classes = Y.shape[1]
vit_model = vit.vit_b16(image_size=input_shape[0], activation='softmax', pretrained=True, include_top=False, pretrained_top=False, classes=num_classes, weights='imagenet21k')
for layer in vit_model.layers:
layer.trainable = False
output_layer = Dense(num_classes, activation='sigmoid')(vit_model.output)
model = Model(inputs=vit_model.input, outputs=output_layer) | code |
128004504/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import matplotlib.pyplot as plt
import os
import cv2 as cv
import re
import requests
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.model_selection import train_test_split
import tensorflow
from vit_keras import vit, utils
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint | code |
128004504/cell_7 | [
"image_output_1.png"
] | from keras.callbacks import ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MultiLabelBinarizer
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.models import Model
from vit_keras import vit, utils
import cv2 as cv
import numpy as np
import os
import re
X = []
Y = []
input_shape = (96, 96, 3)
path_to_subset = f'../input/apparel-images-dataset/'
for folder in os.listdir(path_to_subset):
for image in os.listdir(os.path.join(path_to_subset, folder)):
path_to_image = os.path.join(path_to_subset, folder, image)
image = cv.imread(path_to_image)
image = cv.resize(image, (input_shape[1], input_shape[0]))
label = re.findall('\\w+\\_\\w+', path_to_image)[0].split('_')
X.append(image)
Y.append(label)
X = np.array(X) / 255.0
Y = np.array(Y)
mlb = MultiLabelBinarizer()
Y = mlb.fit_transform(Y)
x, test_x, y, test_y = train_test_split(X, Y, test_size=0.1, stratify=Y, shuffle=True, random_state=1)
train_x, val_x, train_y, val_y = train_test_split(x, y, test_size=0.2, stratify=y, shuffle=True, random_state=1)
datagen = ImageDataGenerator(rotation_range=45, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.2, horizontal_flip=True, validation_split=0.2)
input_shape = (96, 96, 3)
num_classes = Y.shape[1]
vit_model = vit.vit_b16(image_size=input_shape[0], activation='softmax', pretrained=True, include_top=False, pretrained_top=False, classes=num_classes, weights='imagenet21k')
for layer in vit_model.layers:
layer.trainable = False
output_layer = Dense(num_classes, activation='sigmoid')(vit_model.output)
model = Model(inputs=vit_model.input, outputs=output_layer)
checkpoint = ModelCheckpoint('../working/visiontransformermodel.tf', save_best_only=True, monitor='val_loss', verbose=1)
batch_size = 64
epochs = 100
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(datagen.flow(train_x, train_y, batch_size=batch_size, subset='training'), validation_data=datagen.flow(train_x, train_y, batch_size=batch_size, subset='validation'), epochs=epochs, callbacks=[checkpoint]) | code |
128004504/cell_3 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import MultiLabelBinarizer
import cv2 as cv
import numpy as np
import os
import re
X = []
Y = []
input_shape = (96, 96, 3)
path_to_subset = f'../input/apparel-images-dataset/'
for folder in os.listdir(path_to_subset):
for image in os.listdir(os.path.join(path_to_subset, folder)):
path_to_image = os.path.join(path_to_subset, folder, image)
image = cv.imread(path_to_image)
image = cv.resize(image, (input_shape[1], input_shape[0]))
label = re.findall('\\w+\\_\\w+', path_to_image)[0].split('_')
X.append(image)
Y.append(label)
X = np.array(X) / 255.0
Y = np.array(Y)
mlb = MultiLabelBinarizer()
Y = mlb.fit_transform(Y)
print(mlb.classes_)
print(Y[0]) | code |
128004504/cell_12 | [
"text_plain_output_1.png"
] | from keras.callbacks import ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MultiLabelBinarizer
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.models import Model
from vit_keras import vit, utils
import csv
import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np
import os
import re
X = []
Y = []
input_shape = (96, 96, 3)
path_to_subset = f'../input/apparel-images-dataset/'
for folder in os.listdir(path_to_subset):
for image in os.listdir(os.path.join(path_to_subset, folder)):
path_to_image = os.path.join(path_to_subset, folder, image)
image = cv.imread(path_to_image)
image = cv.resize(image, (input_shape[1], input_shape[0]))
label = re.findall('\\w+\\_\\w+', path_to_image)[0].split('_')
X.append(image)
Y.append(label)
X = np.array(X) / 255.0
Y = np.array(Y)
mlb = MultiLabelBinarizer()
Y = mlb.fit_transform(Y)
x, test_x, y, test_y = train_test_split(X, Y, test_size=0.1, stratify=Y, shuffle=True, random_state=1)
train_x, val_x, train_y, val_y = train_test_split(x, y, test_size=0.2, stratify=y, shuffle=True, random_state=1)
datagen = ImageDataGenerator(rotation_range=45, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.2, horizontal_flip=True, validation_split=0.2)
input_shape = (96, 96, 3)
num_classes = Y.shape[1]
vit_model = vit.vit_b16(image_size=input_shape[0], activation='softmax', pretrained=True, include_top=False, pretrained_top=False, classes=num_classes, weights='imagenet21k')
for layer in vit_model.layers:
layer.trainable = False
output_layer = Dense(num_classes, activation='sigmoid')(vit_model.output)
model = Model(inputs=vit_model.input, outputs=output_layer)
checkpoint = ModelCheckpoint('../working/visiontransformermodel.tf', save_best_only=True, monitor='val_loss', verbose=1)
batch_size = 64
epochs = 100
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(datagen.flow(train_x, train_y, batch_size=batch_size, subset='training'), validation_data=datagen.flow(train_x, train_y, batch_size=batch_size, subset='validation'), epochs=epochs, callbacks=[checkpoint])
training_acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
filename = '../working/accuracy.csv'
with open(filename, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(['Epoch', 'Training Accuracy', 'Validation Accuracy'])
for epoch, (train_acc, val_acc) in enumerate(zip(training_acc, val_acc), 1):
csvwriter.writerow([epoch, train_acc, val_acc])
training_loss = history.history['loss']
val_loss = history.history['val_loss']
filename = '../working/loss.csv'
with open(filename, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(['Epoch', 'loss', 'val_loss'])
for epoch, (loss, val_loss) in enumerate(zip(training_loss, val_loss), 1):
csvwriter.writerow([epoch, loss, val_loss])
filename = '../working/accuracy.csv'
epoch = []
train_acc = []
val_acc = []
with open(filename, 'r') as csvfile:
csvreader = csv.reader(csvfile)
header = next(csvreader)
for row in csvreader:
epoch.append(int(row[0]))
train_acc.append(float(row[1]))
val_acc.append(float(row[2]))
fig = plt.figure(figsize=(20, 7))
plt.subplot(121)
plt.plot(epoch, train_acc, label='acc')
plt.plot(epoch, val_acc, label='val_acc')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.grid()
plt.legend()
plt.show() | code |
128001783/cell_21 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv')
tc = df.sort_values(by='Total Cases', ascending=False)
tc
ta = df.sort_values(by='Active', ascending=False)
ta
td = df.sort_values(by='Discharged', ascending=False)
td
tdh = df.sort_values(by='Deaths', ascending=False)
tdh
tp = df.sort_values(by='Population', ascending=False)
tp | code |
128001783/cell_13 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv')
tc = df.sort_values(by='Total Cases', ascending=False)
tc
plt.xticks(rotation=90)
plt.ticklabel_format(style='plain', axis='y')
ta = df.sort_values(by='Active', ascending=False)
ta
sns.barplot(x='State/UTs', y='Active', data=ta)
plt.xticks(rotation=90)
plt.ticklabel_format(style='plain', axis='y')
plt.title('Active cases satate/uts')
plt.show() | code |
128001783/cell_9 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv')
tc = df.sort_values(by='Total Cases', ascending=False)
tc | code |
128001783/cell_20 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv')
tc = df.sort_values(by='Total Cases', ascending=False)
tc
plt.xticks(rotation=90)
plt.ticklabel_format(style='plain', axis='y')
ta = df.sort_values(by='Active', ascending=False)
ta
plt.xticks(rotation=90)
plt.ticklabel_format(style='plain', axis='y')
td = df.sort_values(by='Discharged', ascending=False)
td
plt.xticks(rotation=90)
plt.ticklabel_format(style='plain', axis='y')
tdh = df.sort_values(by='Deaths', ascending=False)
tdh
plt.xticks(rotation=90)
plt.ticklabel_format(style='plain', axis='y')
plt.pie(x='Deaths', data=tdh[:5], labels=tdh['State/UTs'][:5], autopct='%0.2f%%')
plt.title('top 5 deaths state %')
plt.show() | code |
128001783/cell_6 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv')
df.info() | code |
128001783/cell_2 | [
"image_output_1.png"
] | import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore') | code |
128001783/cell_11 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv')
tc = df.sort_values(by='Total Cases', ascending=False)
tc
plt.xticks(rotation=90)
plt.ticklabel_format(style='plain', axis='y')
plt.pie(x='Total Cases', data=tc[:5], labels=tc['State/UTs'][:5], autopct='%0.2f%%')
plt.title('top 5 total cases state %')
plt.show() | code |
128001783/cell_19 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv')
tc = df.sort_values(by='Total Cases', ascending=False)
tc
plt.xticks(rotation=90)
plt.ticklabel_format(style='plain', axis='y')
ta = df.sort_values(by='Active', ascending=False)
ta
plt.xticks(rotation=90)
plt.ticklabel_format(style='plain', axis='y')
td = df.sort_values(by='Discharged', ascending=False)
td
plt.xticks(rotation=90)
plt.ticklabel_format(style='plain', axis='y')
tdh = df.sort_values(by='Deaths', ascending=False)
tdh
sns.barplot(x='State/UTs', y='Deaths', data=tdh)
plt.xticks(rotation=90)
plt.ticklabel_format(style='plain', axis='y')
plt.title('Deaths cases satate/uts')
plt.show() | code |
128001783/cell_7 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv')
df.describe() | code |
128001783/cell_18 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv')
tc = df.sort_values(by='Total Cases', ascending=False)
tc
ta = df.sort_values(by='Active', ascending=False)
ta
td = df.sort_values(by='Discharged', ascending=False)
td
tdh = df.sort_values(by='Deaths', ascending=False)
tdh | code |
128001783/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv')
for col in df.describe(include='object').columns:
print(col)
print(df[col].unique())
print('--' * 50) | code |
128001783/cell_15 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv')
tc = df.sort_values(by='Total Cases', ascending=False)
tc
ta = df.sort_values(by='Active', ascending=False)
ta
td = df.sort_values(by='Discharged', ascending=False)
td | code |
128001783/cell_16 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv')
tc = df.sort_values(by='Total Cases', ascending=False)
tc
plt.xticks(rotation=90)
plt.ticklabel_format(style='plain', axis='y')
ta = df.sort_values(by='Active', ascending=False)
ta
plt.xticks(rotation=90)
plt.ticklabel_format(style='plain', axis='y')
td = df.sort_values(by='Discharged', ascending=False)
td
sns.barplot(x='State/UTs', y='Discharged', data=td)
plt.xticks(rotation=90)
plt.ticklabel_format(style='plain', axis='y')
plt.title('Discharged cases satate/uts')
plt.show() | code |
128001783/cell_17 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv')
tc = df.sort_values(by='Total Cases', ascending=False)
tc
plt.xticks(rotation=90)
plt.ticklabel_format(style='plain', axis='y')
ta = df.sort_values(by='Active', ascending=False)
ta
plt.xticks(rotation=90)
plt.ticklabel_format(style='plain', axis='y')
td = df.sort_values(by='Discharged', ascending=False)
td
plt.xticks(rotation=90)
plt.ticklabel_format(style='plain', axis='y')
plt.pie(x='Discharged', data=td[:5], labels=td['State/UTs'][:5], autopct='%0.2f%%')
plt.title('top 5 discharged state %')
plt.show() | code |
128001783/cell_14 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv')
tc = df.sort_values(by='Total Cases', ascending=False)
tc
plt.xticks(rotation=90)
plt.ticklabel_format(style='plain', axis='y')
ta = df.sort_values(by='Active', ascending=False)
ta
plt.xticks(rotation=90)
plt.ticklabel_format(style='plain', axis='y')
plt.pie(x='Active', data=ta[:5], labels=ta['State/UTs'][:5], autopct='%0.2f%%')
plt.title('top 5 active state %')
plt.show() | code |
128001783/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv')
tc = df.sort_values(by='Total Cases', ascending=False)
tc
sns.barplot(x='State/UTs', y='Total Cases', data=tc)
plt.xticks(rotation=90)
plt.ticklabel_format(style='plain', axis='y')
plt.title('total cases satate/uts')
plt.show() | code |
128001783/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv')
tc = df.sort_values(by='Total Cases', ascending=False)
tc
ta = df.sort_values(by='Active', ascending=False)
ta | code |
128001783/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv')
df | code |
32064609/cell_4 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
base = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv', parse_dates=['Last Update'])
df = base[base['Country/Region'] == 'Brazil']
df['Contamined'] = df['Confirmed'] - df['Deaths'] - df['Recovered'] | code |
32064609/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
base = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv', parse_dates=['Last Update'])
df = base[base['Country/Region'] == 'Brazil']
df[df['Last Update'] == '2020-03-08 05:31:00'] | code |
32064609/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
base = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv', parse_dates=['Last Update'])
df = base[base['Country/Region'] == 'Brazil']
df | code |
322480/cell_9 | [
"text_plain_output_3.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from subprocess import check_output
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
titanic = pd.read_csv('../input/train.csv')
titanic.loc[titanic['Sex'] == 'male', 'Sex'] = 0
titanic.loc[titanic['Sex'] == 'female', 'Sex'] = 1
print(titanic['Sex']) | code |
322480/cell_4 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
titanic = pd.read_csv('../input/train.csv')
print(titanic.head()) | code |
322480/cell_11 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
titanic = pd.read_csv('../input/train.csv')
titanic.loc[titanic['Sex'] == 'male', 'Sex'] = 0
titanic.loc[titanic['Sex'] == 'female', 'Sex'] = 1
titanic['Embarked'] = titanic['Embarked'].fillna('S') | code |
322480/cell_1 | [
"application_vnd.jupyter.stderr_output_9.png",
"text_plain_output_5.png",
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_6.png",
"text_plain_output_4.png",
"text_plain_output_3.png",
"text_plain_output_7.png",
"text_plain_output_8.png",
"text_plain_output_1.png"
] | from subprocess import check_output
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8'))
titanic = pd.read_csv('../input/train.csv')
print(titanic.describe()) | code |
322480/cell_7 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
titanic = pd.read_csv('../input/train.csv')
titanic.loc[titanic['Sex'] == 'male', 'Sex'] = 0
print(titanic['Sex']) | code |
322480/cell_3 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
titanic = pd.read_csv('../input/train.csv')
print(titanic.describe()) | code |
322480/cell_10 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
titanic = pd.read_csv('../input/train.csv')
titanic.loc[titanic['Sex'] == 'male', 'Sex'] = 0
titanic.loc[titanic['Sex'] == 'female', 'Sex'] = 1
print(titanic['Embarked'].count())
print(titanic['Embarked'].unique()) | code |
322480/cell_12 | [
"text_plain_output_5.png",
"application_vnd.jupyter.stderr_output_4.png",
"text_plain_output_3.png",
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from subprocess import check_output
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
titanic = pd.read_csv('../input/train.csv')
titanic.loc[titanic['Sex'] == 'male', 'Sex'] = 0
titanic.loc[titanic['Sex'] == 'female', 'Sex'] = 1
print(titanic['Embarked'].unique()) | code |
322480/cell_5 | [
"text_plain_output_5.png",
"application_vnd.jupyter.stderr_output_4.png",
"application_vnd.jupyter.stderr_output_6.png",
"text_plain_output_3.png",
"text_plain_output_7.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from subprocess import check_output
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
titanic = pd.read_csv('../input/train.csv')
print(titanic['Cabin'].count()) | code |
18161218/cell_21 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/train.csv', dtype={'YearBuilt': 'str', 'YrSold': 'str', 'GarageYrBlt': 'str', 'YearRemodAdd': 'str'})
data.shape
missing_list = data.columns[data.isna().any()].tolist()
data.columns[data.isna().any()].tolist()
data.shape
data_org = data
data_org.shape
data.drop(['Alley', 'MasVnrArea', 'PoolQC', 'Fence', 'MiscFeature'], inplace=True, axis=1)
data.shape
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
categorical = ['object']
for cols in list(data.select_dtypes(include=numerics).columns.values):
data[cols] = data[cols].replace(np.nan, data[cols].median())
for cols in list(data.select_dtypes(include=categorical).columns.values):
data[cols] = data[cols].replace(np.nan, 'Not_Available')
data.columns[data.isna().any()].tolist()
a = data.select_dtypes(include=numerics)
a.drop(['Id'], inplace=True, axis=1)
df = a.iloc[:, 2:3]
df.shape
a = data.select_dtypes(include=numerics)
df = pd.DataFrame(data=a.iloc[:, 1:2])
import seaborn as sns
import matplotlib.pyplot as plt
for i in range(0, len(data.select_dtypes(include=numerics))):
df = pd.DataFrame(data=data.select_dtypes(include=numerics).iloc[:, i:i + 4])
data.shape | code |
18161218/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/train.csv', dtype={'YearBuilt': 'str', 'YrSold': 'str', 'GarageYrBlt': 'str', 'YearRemodAdd': 'str'})
data.shape
missing_list = data.columns[data.isna().any()].tolist()
data.columns[data.isna().any()].tolist() | code |
18161218/cell_4 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/train.csv', dtype={'YearBuilt': 'str', 'YrSold': 'str', 'GarageYrBlt': 'str', 'YearRemodAdd': 'str'})
data.shape
data.info(verbose=True) | code |
18161218/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/train.csv', dtype={'YearBuilt': 'str', 'YrSold': 'str', 'GarageYrBlt': 'str', 'YearRemodAdd': 'str'})
data.shape
data.describe() | code |
18161218/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/train.csv', dtype={'YearBuilt': 'str', 'YrSold': 'str', 'GarageYrBlt': 'str', 'YearRemodAdd': 'str'})
data.shape
missing_list = data.columns[data.isna().any()].tolist()
data.columns[data.isna().any()].tolist()
data.shape | code |
18161218/cell_19 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/train.csv', dtype={'YearBuilt': 'str', 'YrSold': 'str', 'GarageYrBlt': 'str', 'YearRemodAdd': 'str'})
data.shape
missing_list = data.columns[data.isna().any()].tolist()
data.columns[data.isna().any()].tolist()
data.shape
data_org = data
data_org.shape
data.drop(['Alley', 'MasVnrArea', 'PoolQC', 'Fence', 'MiscFeature'], inplace=True, axis=1)
data.shape
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
categorical = ['object']
for cols in list(data.select_dtypes(include=numerics).columns.values):
data[cols] = data[cols].replace(np.nan, data[cols].median())
for cols in list(data.select_dtypes(include=categorical).columns.values):
data[cols] = data[cols].replace(np.nan, 'Not_Available')
data.columns[data.isna().any()].tolist()
a = data.select_dtypes(include=numerics)
a.drop(['Id'], inplace=True, axis=1)
df = a.iloc[:, 2:3]
df.shape
a = data.select_dtypes(include=numerics)
df = pd.DataFrame(data=a.iloc[:, 1:2])
import seaborn as sns
import matplotlib.pyplot as plt
for i in range(0, len(data.select_dtypes(include=numerics))):
df = pd.DataFrame(data=data.select_dtypes(include=numerics).iloc[:, i:i + 4])
sns.boxplot(pd.melt(df))
plt.show() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.