path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
2011179/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd
movies = pd.read_csv('../input/movie.csv')
tags = pd.read_csv('../input/tag.csv')
ratings = pd.read_csv('../input/rating.csv')
movies.isnull().values.any()
movies.isnull().values.any() | code |
2011179/cell_22 | [
"text_html_output_1.png"
] | import pandas as pd
movies = pd.read_csv('../input/movie.csv')
tags = pd.read_csv('../input/tag.csv')
ratings = pd.read_csv('../input/rating.csv')
movies.isnull().values.any()
movies.isnull().values.any()
movies = movies.dropna()
ind_animation = 'Animation'
ind_children = 'Children'
animation1 = movies['genres'].str.contains(ind_animation)
animation0 = ~movies['genres'].str.contains(ind_animation)
children1 = movies['genres'].str.contains(ind_children)
children0 = ~movies['genres'].str.contains(ind_children)
both = movies[animation1 & children1]
just_anim = movies[animation1 & children0]
just_chil = movies[animation0 & children1]
just_chil_plt = just_chil[['rating', 'year']]
just_chil_plt = just_chil_plt.groupby(['year'], as_index=False).mean()
just_chil_plt.head(15) | code |
2011179/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
movies = pd.read_csv('../input/movie.csv')
tags = pd.read_csv('../input/tag.csv')
ratings = pd.read_csv('../input/rating.csv')
movies.head() | code |
74064945/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
df.head() | code |
74064945/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
df.quality.hist() | code |
74064945/cell_14 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
fig = plt.figure(figsize=(5,5))
sns.barplot(x='quality', y='volatile acidity', data=df)
figure = plt.figure(figsize=(10, 10))
correlation = df.corr()
sns.heatmap(correlation, annot=True) | code |
74064945/cell_10 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
fig = plt.figure(figsize=(5, 5))
sns.barplot(x='quality', y='volatile acidity', data=df) | code |
74064945/cell_12 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
fig = plt.figure(figsize=(5,5))
sns.barplot(x='quality', y='volatile acidity', data=df)
sns.barplot(x='quality', y='citric acid', data=df) | code |
74064945/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
df.describe() | code |
106192728/cell_21 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/heart-disease-detection/heart_disease.csv')
df.columns
df.shape
df.isnull().sum()
df.describe().T
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 6))
sns.countplot(x='disease', hue='gender', data=df, palette='colorblind', edgecolor=sns.color_palette('dark', n_colors=1))
plt.show() | code |
106192728/cell_9 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/heart-disease-detection/heart_disease.csv')
df.columns
df.shape
df.isnull().sum()
df.describe().T
df['disease'].value_counts() | code |
106192728/cell_25 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/heart-disease-detection/heart_disease.csv')
df.columns
df.shape
df.isnull().sum()
df.describe().T
import seaborn as sns
import matplotlib.pyplot as plt
df.corr()
df.drop(columns='age', inplace=True)
plt.figure(figsize=(10, 10))
sns.heatmap(df.corr(), annot=True, fmt='.0%')
plt.show() | code |
106192728/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/heart-disease-detection/heart_disease.csv')
df.columns | code |
106192728/cell_34 | [
"image_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
rf_model = RandomForestClassifier()
rf_params = {'n_estimators': [5, 50, 250], 'max_depth': [2, 4, 8, 16, 32, None]}
cv = GridSearchCV(rf_model, rf_params, cv=5)
cv.fit(x_train, y_train)
rf_model = RandomForestClassifier(max_depth=8, n_estimators=50)
rf_model.fit(x_train, y_train)
rf_model.score(x_test, y_test)
rf_pred = rf_model.predict(x_test)
from sklearn.metrics import classification_report
print(classification_report(rf_pred, y_test)) | code |
106192728/cell_23 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/heart-disease-detection/heart_disease.csv')
df.columns
df.shape
df.isnull().sum()
df.describe().T
df.corr() | code |
106192728/cell_30 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
rf_model = RandomForestClassifier()
rf_params = {'n_estimators': [5, 50, 250], 'max_depth': [2, 4, 8, 16, 32, None]}
cv = GridSearchCV(rf_model, rf_params, cv=5)
cv.fit(x_train, y_train) | code |
106192728/cell_20 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/heart-disease-detection/heart_disease.csv')
df.columns
df.shape
df.isnull().sum()
df.describe().T
import seaborn as sns
import matplotlib.pyplot as plt
sns.countplot(x=df['gender']) | code |
106192728/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/heart-disease-detection/heart_disease.csv')
df.columns
df.shape
df.isnull().sum() | code |
106192728/cell_19 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/heart-disease-detection/heart_disease.csv')
df.columns
df.shape
df.isnull().sum()
df.describe().T
import seaborn as sns
import matplotlib.pyplot as plt
sns.countplot(x=df['alco']) | code |
106192728/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
106192728/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/heart-disease-detection/heart_disease.csv')
df.columns
df.shape
df.isnull().sum()
df.info() | code |
106192728/cell_18 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/heart-disease-detection/heart_disease.csv')
df.columns
df.shape
df.isnull().sum()
df.describe().T
import seaborn as sns
import matplotlib.pyplot as plt
sns.countplot(x=df['smoke']) | code |
106192728/cell_32 | [
"image_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
rf_model = RandomForestClassifier()
rf_params = {'n_estimators': [5, 50, 250], 'max_depth': [2, 4, 8, 16, 32, None]}
cv = GridSearchCV(rf_model, rf_params, cv=5)
cv.fit(x_train, y_train)
rf_model = RandomForestClassifier(max_depth=8, n_estimators=50)
rf_model.fit(x_train, y_train)
rf_model.score(x_test, y_test) | code |
106192728/cell_8 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/heart-disease-detection/heart_disease.csv')
df.columns
df.shape
df.isnull().sum()
df.describe().T | code |
106192728/cell_15 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/heart-disease-detection/heart_disease.csv')
df.columns
df.shape
df.isnull().sum()
df.describe().T
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 6))
sns.countplot(x='age_in_years', hue='disease', data=df, palette='colorblind', edgecolor=sns.color_palette('dark', n_colors=1))
plt.show() | code |
106192728/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/heart-disease-detection/heart_disease.csv')
df.head() | code |
106192728/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/heart-disease-detection/heart_disease.csv')
df.columns
df.shape
df.isnull().sum()
df.describe().T
df['smoke'].value_counts() | code |
106192728/cell_35 | [
"text_html_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/heart-disease-detection/heart_disease.csv')
df.columns
df.shape
df.isnull().sum()
df.describe().T
import seaborn as sns
import matplotlib.pyplot as plt
df.corr()
df.drop(columns='age', inplace=True)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
rf_model = RandomForestClassifier()
rf_params = {'n_estimators': [5, 50, 250], 'max_depth': [2, 4, 8, 16, 32, None]}
cv = GridSearchCV(rf_model, rf_params, cv=5)
cv.fit(x_train, y_train)
rf_model = RandomForestClassifier(max_depth=8, n_estimators=50)
rf_model.fit(x_train, y_train)
rf_model.score(x_test, y_test)
rf_pred = rf_model.predict(x_test)
from sklearn.metrics import confusion_matrix
plt.figure(figsize=(10, 10))
cf_matrix = confusion_matrix(y_test, rf_pred)
sns.heatmap(cf_matrix, annot=True, annot_kws={'size': 25})
plt.show() | code |
106192728/cell_31 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
rf_model = RandomForestClassifier()
rf_params = {'n_estimators': [5, 50, 250], 'max_depth': [2, 4, 8, 16, 32, None]}
cv = GridSearchCV(rf_model, rf_params, cv=5)
cv.fit(x_train, y_train)
cv.best_estimator_ | code |
106192728/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/heart-disease-detection/heart_disease.csv')
df.columns
df.shape
df.isnull().sum()
df.describe().T
df.head() | code |
106192728/cell_22 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/heart-disease-detection/heart_disease.csv')
df.columns
df.shape
df.isnull().sum()
df.describe().T
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 6))
sns.countplot(x='disease', hue='smoke', data=df, palette='colorblind', edgecolor=sns.color_palette('dark', n_colors=1))
plt.show() | code |
106192728/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/heart-disease-detection/heart_disease.csv')
df.columns
df.shape
df.isnull().sum()
df.describe().T
import seaborn as sns
sns.countplot(x=df['disease']) | code |
106192728/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/heart-disease-detection/heart_disease.csv')
df.columns
df.shape
df.isnull().sum()
df.describe().T
df.head() | code |
106192728/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/heart-disease-detection/heart_disease.csv')
df.columns
df.shape | code |
49129658/cell_21 | [
"text_plain_output_1.png",
"image_output_2.png",
"image_output_1.png"
] | from keras.layers import Activation, Flatten, Dense, Dropout ,Input
from tensorflow.keras import backend
from tensorflow.keras import backend, models, layers, regularizers , optimizers
from tensorflow.keras.applications import InceptionV3
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import BatchNormalization , Concatenate
from tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import os
import os
import random
import numpy as np
import pandas as pd
import os
train_directory = '../input/100-bird-species/train'
val_directory = '../input/100-bird-species/valid'
test_directory = '../input/100-bird-species/test'
train_datagen = ImageDataGenerator(rescale=1 / 255)
val_datagen = ImageDataGenerator(rescale=1 / 255)
test_datagen = ImageDataGenerator(rescale=1 / 255)
train_generator = train_datagen.flow_from_directory(train_directory, target_size=(224, 224), batch_size=32, color_mode='rgb', class_mode='sparse')
validation_generator = train_datagen.flow_from_directory(val_directory, target_size=(224, 224), batch_size=32, color_mode='rgb', class_mode='sparse')
test_generator = test_datagen.flow_from_directory(test_directory, target_size=(224, 224), batch_size=32, color_mode='rgb', class_mode='sparse')
train_generator.class_indices
num_classes = len(train_generator.class_indices)
def display_random_grid(ncols=5, ds_path=train_directory):
fig, ax = plt.subplots(ncols=ncols, nrows=ncols, figsize=(15, 15))
for i in range(ncols):
for j in range(ncols):
bird_species = random.choice(os.listdir(ds_path))
random_bird_path = random.choice(os.listdir(ds_path + '/'+ bird_species))
random_bird = mpimg.imread(ds_path + '/' + bird_species + '/' + random_bird_path)
ax[i, j].imshow(random_bird)
ax[i, j].set_title(bird_species)
ax[i, j].axis('off')
display_random_grid()
backend.clear_session()
model_base = models.Sequential()
model_base.add(layers.Conv2D(512, (3, 3), activation='relu', input_shape=(224, 224, 3)))
model_base.add(layers.MaxPool2D((2, 2)))
model_base.add(BatchNormalization())
model_base.add(layers.Conv2D(512, (3, 3), activation='relu'))
model_base.add(layers.MaxPool2D((2, 2)))
model_base.add(BatchNormalization())
model_base.add(layers.Conv2D(256, (3, 3), activation='relu'))
model_base.add(layers.MaxPool2D((2, 2)))
model_base.add(BatchNormalization())
model_base.add(layers.Flatten())
model_base.add(layers.Dense(128, activation='relu'))
model_base.add(layers.Dropout(0.5))
model_base.add(layers.Dense(num_classes, activation='softmax'))
model_base.compile(optimizer=optimizers.Adam(lr=0.001), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
history = model_base.fit_generator(train_generator, epochs=50, validation_data=validation_generator, validation_steps=20, verbose=1, callbacks=[EarlyStopping(monitor='val_accuracy', patience=5, restore_best_weights=True)])
test_loss, test_acc = model_base.evaluate_generator(test_generator, steps=20)
backend.clear_session()
visible = Input(shape=(224, 224, 3))
conv_1_1 = Conv2D(64, (1, 1), padding='same', activation='relu', strides=2)(visible)
conv_1_2 = Conv2D(64, (1, 1), padding='same', activation='relu')(visible)
conv_2_2 = Conv2D(64, (3, 3), padding='same', activation='relu', strides=2)(conv_1_2)
conv_1_3 = AveragePooling2D((3, 3), padding='same', strides=2)(visible)
conv_2_3 = Conv2D(64, (3, 3), padding='same', activation='relu')(conv_1_3)
conv_1_4 = Conv2D(64, (1, 1), padding='same', activation='relu')(visible)
conv_2_4 = Conv2D(64, (3, 3), padding='same', activation='relu')(conv_1_4)
conv_3_4 = Conv2D(64, (3, 3), padding='same', activation='relu', strides=2)(conv_2_4)
merge = Concatenate(axis=-1)([conv_1_1, conv_2_2, conv_2_3, conv_3_4])
flat = Flatten()(merge)
hidden = Dense(32, activation='relu')(flat)
drop = Dropout(0.5)(hidden)
output = Dense(num_classes, activation='softmax')(drop)
model_birdcl = Model(inputs=visible, outputs=output)
model_birdcl.compile(optimizer=optimizers.Adam(lr=0.001), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
history = model_birdcl.fit_generator(train_generator, steps_per_epoch=1001, epochs=50, validation_data=validation_generator, validation_steps=29, verbose=1, callbacks=[EarlyStopping(monitor='val_accuracy', patience=5, restore_best_weights=True)])
test_loss, test_acc = model_birdcl.evaluate_generator(test_generator, steps=29)
history_dict = history.history
loss_values = history_dict['loss']
val_loss_values = history_dict['val_loss']
acc_values = history_dict['accuracy']
val_acc_values = history_dict['val_accuracy']
epochs = range(1, len(history_dict['accuracy']) + 1)
backend.clear_session()
incbasemodel4 = InceptionV3(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
incbasemodel4.trainable = False
modelinceptionv3_vers4 = models.Sequential()
modelinceptionv3_vers4.add(incbasemodel4)
modelinceptionv3_vers4.add(layers.Conv2D(1024, (3, 3), padding='same', activation='relu'))
modelinceptionv3_vers4.add(BatchNormalization())
modelinceptionv3_vers4.add(layers.Dropout(0.5))
modelinceptionv3_vers4.add(layers.Flatten())
modelinceptionv3_vers4.add(layers.Dense(512, activation='relu'))
modelinceptionv3_vers4.add(layers.Dropout(0.5))
modelinceptionv3_vers4.add(layers.Dense(num_classes, activation='softmax'))
modelinceptionv3_vers4.compile(optimizer=optimizers.Adam(lr=0.001), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
history = modelinceptionv3_vers4.fit_generator(train_generator, steps_per_epoch=1001, epochs=50, validation_data=validation_generator, validation_steps=29, verbose=1, callbacks=[EarlyStopping(monitor='val_accuracy', patience=5, restore_best_weights=True)])
test_loss, test_acc = modelinceptionv3_vers4.evaluate_generator(test_generator, steps=29)
print('Using InceptionV3 Model and Adam Optimizer the accuracy is ---', test_acc)
history_dict = history.history
loss_values = history_dict['loss']
val_loss_values = history_dict['val_loss']
acc_values = history_dict['accuracy']
val_acc_values = history_dict['val_accuracy']
epochs = range(1, len(history_dict['accuracy']) + 1)
plt.plot(epochs, loss_values, 'bo', label='Training loss')
plt.plot(epochs, val_loss_values, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.plot(epochs, acc_values, 'bo', label='Training accuracy')
plt.plot(epochs, val_acc_values, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show() | code |
49129658/cell_20 | [
"image_output_1.png"
] | from keras.layers import Activation, Flatten, Dense, Dropout ,Input
from tensorflow.keras import backend
from tensorflow.keras import backend, models, layers, regularizers , optimizers
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import BatchNormalization , Concatenate
from tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import plot_model
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import os
import os
import random
import numpy as np
import pandas as pd
import os
train_directory = '../input/100-bird-species/train'
val_directory = '../input/100-bird-species/valid'
test_directory = '../input/100-bird-species/test'
train_datagen = ImageDataGenerator(rescale=1 / 255)
val_datagen = ImageDataGenerator(rescale=1 / 255)
test_datagen = ImageDataGenerator(rescale=1 / 255)
train_generator = train_datagen.flow_from_directory(train_directory, target_size=(224, 224), batch_size=32, color_mode='rgb', class_mode='sparse')
validation_generator = train_datagen.flow_from_directory(val_directory, target_size=(224, 224), batch_size=32, color_mode='rgb', class_mode='sparse')
test_generator = test_datagen.flow_from_directory(test_directory, target_size=(224, 224), batch_size=32, color_mode='rgb', class_mode='sparse')
train_generator.class_indices
num_classes = len(train_generator.class_indices)
def display_random_grid(ncols=5, ds_path=train_directory):
fig, ax = plt.subplots(ncols=ncols, nrows=ncols, figsize=(15, 15))
for i in range(ncols):
for j in range(ncols):
bird_species = random.choice(os.listdir(ds_path))
random_bird_path = random.choice(os.listdir(ds_path + '/'+ bird_species))
random_bird = mpimg.imread(ds_path + '/' + bird_species + '/' + random_bird_path)
ax[i, j].imshow(random_bird)
ax[i, j].set_title(bird_species)
ax[i, j].axis('off')
display_random_grid()
backend.clear_session()
model_base = models.Sequential()
model_base.add(layers.Conv2D(512, (3, 3), activation='relu', input_shape=(224, 224, 3)))
model_base.add(layers.MaxPool2D((2, 2)))
model_base.add(BatchNormalization())
model_base.add(layers.Conv2D(512, (3, 3), activation='relu'))
model_base.add(layers.MaxPool2D((2, 2)))
model_base.add(BatchNormalization())
model_base.add(layers.Conv2D(256, (3, 3), activation='relu'))
model_base.add(layers.MaxPool2D((2, 2)))
model_base.add(BatchNormalization())
model_base.add(layers.Flatten())
model_base.add(layers.Dense(128, activation='relu'))
model_base.add(layers.Dropout(0.5))
model_base.add(layers.Dense(num_classes, activation='softmax'))
model_base.compile(optimizer=optimizers.Adam(lr=0.001), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
history = model_base.fit_generator(train_generator, epochs=50, validation_data=validation_generator, validation_steps=20, verbose=1, callbacks=[EarlyStopping(monitor='val_accuracy', patience=5, restore_best_weights=True)])
test_loss, test_acc = model_base.evaluate_generator(test_generator, steps=20)
backend.clear_session()
visible = Input(shape=(224, 224, 3))
conv_1_1 = Conv2D(64, (1, 1), padding='same', activation='relu', strides=2)(visible)
conv_1_2 = Conv2D(64, (1, 1), padding='same', activation='relu')(visible)
conv_2_2 = Conv2D(64, (3, 3), padding='same', activation='relu', strides=2)(conv_1_2)
conv_1_3 = AveragePooling2D((3, 3), padding='same', strides=2)(visible)
conv_2_3 = Conv2D(64, (3, 3), padding='same', activation='relu')(conv_1_3)
conv_1_4 = Conv2D(64, (1, 1), padding='same', activation='relu')(visible)
conv_2_4 = Conv2D(64, (3, 3), padding='same', activation='relu')(conv_1_4)
conv_3_4 = Conv2D(64, (3, 3), padding='same', activation='relu', strides=2)(conv_2_4)
merge = Concatenate(axis=-1)([conv_1_1, conv_2_2, conv_2_3, conv_3_4])
flat = Flatten()(merge)
hidden = Dense(32, activation='relu')(flat)
drop = Dropout(0.5)(hidden)
output = Dense(num_classes, activation='softmax')(drop)
model_birdcl = Model(inputs=visible, outputs=output)
model_birdcl.compile(optimizer=optimizers.Adam(lr=0.001), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
history = model_birdcl.fit_generator(train_generator, steps_per_epoch=1001, epochs=50, validation_data=validation_generator, validation_steps=29, verbose=1, callbacks=[EarlyStopping(monitor='val_accuracy', patience=5, restore_best_weights=True)])
test_loss, test_acc = model_birdcl.evaluate_generator(test_generator, steps=29)
history_dict = history.history
loss_values = history_dict['loss']
val_loss_values = history_dict['val_loss']
acc_values = history_dict['accuracy']
val_acc_values = history_dict['val_accuracy']
epochs = range(1, len(history_dict['accuracy']) + 1)
plot_model(model_birdcl) | code |
49129658/cell_11 | [
"text_plain_output_1.png"
] | from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_directory = '../input/100-bird-species/train'
val_directory = '../input/100-bird-species/valid'
test_directory = '../input/100-bird-species/test'
train_datagen = ImageDataGenerator(rescale=1 / 255)
val_datagen = ImageDataGenerator(rescale=1 / 255)
test_datagen = ImageDataGenerator(rescale=1 / 255)
train_generator = train_datagen.flow_from_directory(train_directory, target_size=(224, 224), batch_size=32, color_mode='rgb', class_mode='sparse')
validation_generator = train_datagen.flow_from_directory(val_directory, target_size=(224, 224), batch_size=32, color_mode='rgb', class_mode='sparse')
test_generator = test_datagen.flow_from_directory(test_directory, target_size=(224, 224), batch_size=32, color_mode='rgb', class_mode='sparse')
train_generator.class_indices | code |
49129658/cell_1 | [
"text_plain_output_100.png",
"text_plain_output_84.png",
"text_plain_output_56.png",
"text_plain_output_137.png",
"text_plain_output_139.png",
"text_plain_output_35.png",
"text_plain_output_130.png",
"text_plain_output_117.png",
"text_plain_output_98.png",
"text_plain_output_43.png",
"text_plain_output_78.png",
"text_plain_output_143.png",
"text_plain_output_106.png",
"text_plain_output_37.png",
"text_plain_output_138.png",
"text_plain_output_147.png",
"text_plain_output_90.png",
"text_plain_output_79.png",
"text_plain_output_5.png",
"text_plain_output_75.png",
"text_plain_output_48.png",
"text_plain_output_116.png",
"text_plain_output_128.png",
"text_plain_output_30.png",
"text_plain_output_73.png",
"text_plain_output_126.png",
"text_plain_output_115.png",
"text_plain_output_15.png",
"text_plain_output_133.png",
"text_plain_output_114.png",
"text_plain_output_70.png",
"text_plain_output_9.png",
"text_plain_output_44.png",
"text_plain_output_119.png",
"text_plain_output_86.png",
"text_plain_output_118.png",
"text_plain_output_131.png",
"text_plain_output_40.png",
"text_plain_output_123.png",
"text_plain_output_74.png",
"text_plain_output_31.png",
"text_plain_output_20.png",
"text_plain_output_102.png",
"text_plain_output_111.png",
"text_plain_output_101.png",
"text_plain_output_144.png",
"text_plain_output_132.png",
"text_plain_output_60.png",
"text_plain_output_68.png",
"text_plain_output_4.png",
"text_plain_output_65.png",
"text_plain_output_64.png",
"text_plain_output_13.png",
"text_plain_output_107.png",
"text_plain_output_52.png",
"text_plain_output_66.png",
"text_plain_output_45.png",
"text_plain_output_14.png",
"text_plain_output_32.png",
"text_plain_output_88.png",
"text_plain_output_29.png",
"text_plain_output_140.png",
"text_plain_output_129.png",
"text_plain_output_58.png",
"text_plain_output_49.png",
"text_plain_output_63.png",
"text_plain_output_27.png",
"text_plain_output_76.png",
"text_plain_output_108.png",
"text_plain_output_54.png",
"text_plain_output_142.png",
"text_plain_output_10.png",
"text_plain_output_6.png",
"text_plain_output_92.png",
"text_plain_output_57.png",
"text_plain_output_120.png",
"text_plain_output_24.png",
"text_plain_output_21.png",
"text_plain_output_104.png",
"text_plain_output_47.png",
"text_plain_output_121.png",
"text_plain_output_25.png",
"text_plain_output_134.png",
"text_plain_output_77.png",
"text_plain_output_18.png",
"text_plain_output_50.png",
"text_plain_output_36.png",
"text_plain_output_96.png",
"text_plain_output_87.png",
"text_plain_output_3.png",
"text_plain_output_141.png",
"text_plain_output_112.png",
"text_plain_output_113.png",
"text_plain_output_22.png",
"text_plain_output_81.png",
"text_plain_output_69.png",
"text_plain_output_146.png",
"text_plain_output_145.png",
"text_plain_output_125.png",
"text_plain_output_38.png",
"text_plain_output_7.png",
"text_plain_output_91.png",
"text_plain_output_16.png",
"text_plain_output_59.png",
"text_plain_output_103.png",
"text_plain_output_71.png",
"text_plain_output_8.png",
"text_plain_output_122.png",
"text_plain_output_26.png",
"text_plain_output_109.png",
"text_plain_output_41.png",
"text_plain_output_34.png",
"text_plain_output_85.png",
"text_plain_output_42.png",
"text_plain_output_110.png",
"text_plain_output_67.png",
"text_plain_output_53.png",
"text_plain_output_23.png",
"text_plain_output_89.png",
"text_plain_output_51.png",
"text_plain_output_28.png",
"text_plain_output_72.png",
"text_plain_output_99.png",
"text_plain_output_136.png",
"text_plain_output_2.png",
"text_plain_output_127.png",
"text_plain_output_97.png",
"text_plain_output_1.png",
"text_plain_output_33.png",
"text_plain_output_39.png",
"text_plain_output_55.png",
"text_plain_output_82.png",
"text_plain_output_93.png",
"text_plain_output_19.png",
"text_plain_output_105.png",
"text_plain_output_80.png",
"text_plain_output_94.png",
"text_plain_output_124.png",
"text_plain_output_17.png",
"text_plain_output_11.png",
"text_plain_output_12.png",
"text_plain_output_62.png",
"text_plain_output_95.png",
"text_plain_output_61.png",
"text_plain_output_83.png",
"text_plain_output_135.png",
"text_plain_output_46.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
49129658/cell_18 | [
"text_plain_output_1.png",
"image_output_2.png",
"image_output_1.png"
] | from keras.layers import Activation, Flatten, Dense, Dropout ,Input
from tensorflow.keras import backend
from tensorflow.keras import backend, models, layers, regularizers , optimizers
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import BatchNormalization , Concatenate
from tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import os
import os
import random
import numpy as np
import pandas as pd
import os
train_directory = '../input/100-bird-species/train'
val_directory = '../input/100-bird-species/valid'
test_directory = '../input/100-bird-species/test'
train_datagen = ImageDataGenerator(rescale=1 / 255)
val_datagen = ImageDataGenerator(rescale=1 / 255)
test_datagen = ImageDataGenerator(rescale=1 / 255)
train_generator = train_datagen.flow_from_directory(train_directory, target_size=(224, 224), batch_size=32, color_mode='rgb', class_mode='sparse')
validation_generator = train_datagen.flow_from_directory(val_directory, target_size=(224, 224), batch_size=32, color_mode='rgb', class_mode='sparse')
test_generator = test_datagen.flow_from_directory(test_directory, target_size=(224, 224), batch_size=32, color_mode='rgb', class_mode='sparse')
train_generator.class_indices
num_classes = len(train_generator.class_indices)
def display_random_grid(ncols=5, ds_path=train_directory):
fig, ax = plt.subplots(ncols=ncols, nrows=ncols, figsize=(15, 15))
for i in range(ncols):
for j in range(ncols):
bird_species = random.choice(os.listdir(ds_path))
random_bird_path = random.choice(os.listdir(ds_path + '/'+ bird_species))
random_bird = mpimg.imread(ds_path + '/' + bird_species + '/' + random_bird_path)
ax[i, j].imshow(random_bird)
ax[i, j].set_title(bird_species)
ax[i, j].axis('off')
display_random_grid()
backend.clear_session()
model_base = models.Sequential()
model_base.add(layers.Conv2D(512, (3, 3), activation='relu', input_shape=(224, 224, 3)))
model_base.add(layers.MaxPool2D((2, 2)))
model_base.add(BatchNormalization())
model_base.add(layers.Conv2D(512, (3, 3), activation='relu'))
model_base.add(layers.MaxPool2D((2, 2)))
model_base.add(BatchNormalization())
model_base.add(layers.Conv2D(256, (3, 3), activation='relu'))
model_base.add(layers.MaxPool2D((2, 2)))
model_base.add(BatchNormalization())
model_base.add(layers.Flatten())
model_base.add(layers.Dense(128, activation='relu'))
model_base.add(layers.Dropout(0.5))
model_base.add(layers.Dense(num_classes, activation='softmax'))
model_base.compile(optimizer=optimizers.Adam(lr=0.001), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
history = model_base.fit_generator(train_generator, epochs=50, validation_data=validation_generator, validation_steps=20, verbose=1, callbacks=[EarlyStopping(monitor='val_accuracy', patience=5, restore_best_weights=True)])
test_loss, test_acc = model_base.evaluate_generator(test_generator, steps=20)
backend.clear_session()
visible = Input(shape=(224, 224, 3))
conv_1_1 = Conv2D(64, (1, 1), padding='same', activation='relu', strides=2)(visible)
conv_1_2 = Conv2D(64, (1, 1), padding='same', activation='relu')(visible)
conv_2_2 = Conv2D(64, (3, 3), padding='same', activation='relu', strides=2)(conv_1_2)
conv_1_3 = AveragePooling2D((3, 3), padding='same', strides=2)(visible)
conv_2_3 = Conv2D(64, (3, 3), padding='same', activation='relu')(conv_1_3)
conv_1_4 = Conv2D(64, (1, 1), padding='same', activation='relu')(visible)
conv_2_4 = Conv2D(64, (3, 3), padding='same', activation='relu')(conv_1_4)
conv_3_4 = Conv2D(64, (3, 3), padding='same', activation='relu', strides=2)(conv_2_4)
merge = Concatenate(axis=-1)([conv_1_1, conv_2_2, conv_2_3, conv_3_4])
flat = Flatten()(merge)
hidden = Dense(32, activation='relu')(flat)
drop = Dropout(0.5)(hidden)
output = Dense(num_classes, activation='softmax')(drop)
model_birdcl = Model(inputs=visible, outputs=output)
model_birdcl.compile(optimizer=optimizers.Adam(lr=0.001), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
history = model_birdcl.fit_generator(train_generator, steps_per_epoch=1001, epochs=50, validation_data=validation_generator, validation_steps=29, verbose=1, callbacks=[EarlyStopping(monitor='val_accuracy', patience=5, restore_best_weights=True)])
test_loss, test_acc = model_birdcl.evaluate_generator(test_generator, steps=29)
print('Using Functional API and Adam Optimizer the accuracy is ---', test_acc)
history_dict = history.history
loss_values = history_dict['loss']
val_loss_values = history_dict['val_loss']
acc_values = history_dict['accuracy']
val_acc_values = history_dict['val_accuracy']
epochs = range(1, len(history_dict['accuracy']) + 1)
plt.plot(epochs, loss_values, 'bo', label='Training loss')
plt.plot(epochs, val_loss_values, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.plot(epochs, acc_values, 'bo', label='Training accuracy')
plt.plot(epochs, val_acc_values, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show() | code |
49129658/cell_16 | [
"text_plain_output_1.png"
] | from tensorflow.keras import backend
from tensorflow.keras import backend, models, layers, regularizers , optimizers
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import BatchNormalization , Concatenate
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_directory = '../input/100-bird-species/train'
val_directory = '../input/100-bird-species/valid'
test_directory = '../input/100-bird-species/test'
train_datagen = ImageDataGenerator(rescale=1 / 255)
val_datagen = ImageDataGenerator(rescale=1 / 255)
test_datagen = ImageDataGenerator(rescale=1 / 255)
train_generator = train_datagen.flow_from_directory(train_directory, target_size=(224, 224), batch_size=32, color_mode='rgb', class_mode='sparse')
validation_generator = train_datagen.flow_from_directory(val_directory, target_size=(224, 224), batch_size=32, color_mode='rgb', class_mode='sparse')
test_generator = test_datagen.flow_from_directory(test_directory, target_size=(224, 224), batch_size=32, color_mode='rgb', class_mode='sparse')
train_generator.class_indices
num_classes = len(train_generator.class_indices)
backend.clear_session()
model_base = models.Sequential()
model_base.add(layers.Conv2D(512, (3, 3), activation='relu', input_shape=(224, 224, 3)))
model_base.add(layers.MaxPool2D((2, 2)))
model_base.add(BatchNormalization())
model_base.add(layers.Conv2D(512, (3, 3), activation='relu'))
model_base.add(layers.MaxPool2D((2, 2)))
model_base.add(BatchNormalization())
model_base.add(layers.Conv2D(256, (3, 3), activation='relu'))
model_base.add(layers.MaxPool2D((2, 2)))
model_base.add(BatchNormalization())
model_base.add(layers.Flatten())
model_base.add(layers.Dense(128, activation='relu'))
model_base.add(layers.Dropout(0.5))
model_base.add(layers.Dense(num_classes, activation='softmax'))
model_base.compile(optimizer=optimizers.Adam(lr=0.001), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
history = model_base.fit_generator(train_generator, epochs=50, validation_data=validation_generator, validation_steps=20, verbose=1, callbacks=[EarlyStopping(monitor='val_accuracy', patience=5, restore_best_weights=True)])
test_loss, test_acc = model_base.evaluate_generator(test_generator, steps=20)
print('test_acc:', test_acc) | code |
49129658/cell_14 | [
"image_output_1.png"
] | import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import os
import os
import random
import numpy as np
import pandas as pd
import os
train_directory = '../input/100-bird-species/train'
val_directory = '../input/100-bird-species/valid'
test_directory = '../input/100-bird-species/test'
def display_random_grid(ncols=5, ds_path=train_directory):
fig, ax = plt.subplots(ncols=ncols, nrows=ncols, figsize=(15, 15))
for i in range(ncols):
for j in range(ncols):
bird_species = random.choice(os.listdir(ds_path))
random_bird_path = random.choice(os.listdir(ds_path + '/' + bird_species))
random_bird = mpimg.imread(ds_path + '/' + bird_species + '/' + random_bird_path)
ax[i, j].imshow(random_bird)
ax[i, j].set_title(bird_species)
ax[i, j].axis('off')
display_random_grid() | code |
49129658/cell_10 | [
"text_plain_output_1.png"
] | from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_directory = '../input/100-bird-species/train'
val_directory = '../input/100-bird-species/valid'
test_directory = '../input/100-bird-species/test'
train_datagen = ImageDataGenerator(rescale=1 / 255)
val_datagen = ImageDataGenerator(rescale=1 / 255)
test_datagen = ImageDataGenerator(rescale=1 / 255)
train_generator = train_datagen.flow_from_directory(train_directory, target_size=(224, 224), batch_size=32, color_mode='rgb', class_mode='sparse')
validation_generator = train_datagen.flow_from_directory(val_directory, target_size=(224, 224), batch_size=32, color_mode='rgb', class_mode='sparse')
test_generator = test_datagen.flow_from_directory(test_directory, target_size=(224, 224), batch_size=32, color_mode='rgb', class_mode='sparse') | code |
49129658/cell_12 | [
"text_plain_output_1.png"
] | from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_directory = '../input/100-bird-species/train'
val_directory = '../input/100-bird-species/valid'
test_directory = '../input/100-bird-species/test'
train_datagen = ImageDataGenerator(rescale=1 / 255)
val_datagen = ImageDataGenerator(rescale=1 / 255)
test_datagen = ImageDataGenerator(rescale=1 / 255)
train_generator = train_datagen.flow_from_directory(train_directory, target_size=(224, 224), batch_size=32, color_mode='rgb', class_mode='sparse')
validation_generator = train_datagen.flow_from_directory(val_directory, target_size=(224, 224), batch_size=32, color_mode='rgb', class_mode='sparse')
test_generator = test_datagen.flow_from_directory(test_directory, target_size=(224, 224), batch_size=32, color_mode='rgb', class_mode='sparse')
train_generator.class_indices
num_classes = len(train_generator.class_indices)
print(num_classes) | code |
2034195/cell_9 | [
"text_html_output_1.png"
] | import missingno as msno
import pandas as pd
path = '../input/'
state_ts = pd.read_csv(path + 'State_time_series.csv', parse_dates=['Date'])
msno.bar(state_ts, color='r') | code |
2034195/cell_25 | [
"text_plain_output_1.png"
] | import pandas as pd
import plotly.graph_objs as go
import plotly.offline as py
path = '../input/'
state_ts = pd.read_csv(path + 'State_time_series.csv', parse_dates=['Date'])
missing = state_ts.isnull().sum().sum()
missing * 100 / (state_ts.shape[0] * state_ts.shape[1])
data = [go.Scatter(x=state_ts['Date'], y=state_ts['DaysOnZillow_AllHomes'], line=dict(color='#17BECF'))]
layout = {'title': 'Days On Zillow All Homes', 'font': dict(size=16), 'xaxis': {'range': ['2010-01-01', '2017-08-01']}}
state_vise = state_ts.groupby(['RegionName']).median()
state_vise.shape
state_month = state_ts.resample('M', on='Date').median()
state_month = state_month.reset_index()
state_month.shape
data = [go.Scatter(x=state_month['Date'], y=state_month['DaysOnZillow_AllHomes'])]
layout = {'title': 'Days On Zillow All Homes', 'font': dict(size=16), 'xaxis': {'range': ['2010-01-01', '2017-09-01']}}
data = [go.Scatter(x=state_month['Date'], y=state_month['InventorySeasonallyAdjusted_AllHomes'], name='Seasonally'), go.Scatter(x=state_month['Date'], y=state_month['InventoryRaw_AllHomes'], name='Raw')]
layout = {'title': 'Inventory of All Homes', 'font': dict(size=16), 'xaxis': {'range': ['2009-01-01', '2017-10-01']}}
data = [go.Scatter(x=state_month['Date'], y=state_month['HomesSoldAsForeclosuresRatio_AllHomes'], name='Sold')]
layout = {'title': 'Home Sold As Foreclosure Ratio of All Homes', 'font': dict(size=16)}
py.iplot({'data': data, 'layout': layout}) | code |
2034195/cell_4 | [
"image_output_1.png"
] | import pandas as pd
path = '../input/'
state_ts = pd.read_csv(path + 'State_time_series.csv', parse_dates=['Date'])
print('Number of rows and columns in state ts:', state_ts.shape) | code |
2034195/cell_23 | [
"text_html_output_1.png"
] | import pandas as pd
import plotly.graph_objs as go
import plotly.offline as py
path = '../input/'
state_ts = pd.read_csv(path + 'State_time_series.csv', parse_dates=['Date'])
missing = state_ts.isnull().sum().sum()
missing * 100 / (state_ts.shape[0] * state_ts.shape[1])
data = [go.Scatter(x=state_ts['Date'], y=state_ts['DaysOnZillow_AllHomes'], line=dict(color='#17BECF'))]
layout = {'title': 'Days On Zillow All Homes', 'font': dict(size=16), 'xaxis': {'range': ['2010-01-01', '2017-08-01']}}
state_vise = state_ts.groupby(['RegionName']).median()
state_vise.shape
state_month = state_ts.resample('M', on='Date').median()
state_month = state_month.reset_index()
state_month.shape
data = [go.Scatter(x=state_month['Date'], y=state_month['DaysOnZillow_AllHomes'])]
layout = {'title': 'Days On Zillow All Homes', 'font': dict(size=16), 'xaxis': {'range': ['2010-01-01', '2017-09-01']}}
data = [go.Scatter(x=state_month['Date'], y=state_month['InventorySeasonallyAdjusted_AllHomes'], name='Seasonally'), go.Scatter(x=state_month['Date'], y=state_month['InventoryRaw_AllHomes'], name='Raw')]
layout = {'title': 'Inventory of All Homes', 'font': dict(size=16), 'xaxis': {'range': ['2009-01-01', '2017-10-01']}}
py.iplot({'data': data, 'layout': layout}) | code |
2034195/cell_30 | [
"image_output_1.png"
] | import pandas as pd
import plotly.graph_objs as go
import plotly.offline as py
path = '../input/'
state_ts = pd.read_csv(path + 'State_time_series.csv', parse_dates=['Date'])
missing = state_ts.isnull().sum().sum()
missing * 100 / (state_ts.shape[0] * state_ts.shape[1])
data = [go.Scatter(x=state_ts['Date'], y=state_ts['DaysOnZillow_AllHomes'], line=dict(color='#17BECF'))]
layout = {'title': 'Days On Zillow All Homes', 'font': dict(size=16), 'xaxis': {'range': ['2010-01-01', '2017-08-01']}}
state_vise = state_ts.groupby(['RegionName']).median()
state_vise.shape
state_month = state_ts.resample('M', on='Date').median()
state_month = state_month.reset_index()
state_month.shape
data = [go.Scatter(x=state_month['Date'], y=state_month['DaysOnZillow_AllHomes'])]
layout = {'title': 'Days On Zillow All Homes', 'font': dict(size=16), 'xaxis': {'range': ['2010-01-01', '2017-09-01']}}
data = [go.Scatter(x=state_month['Date'], y=state_month['InventorySeasonallyAdjusted_AllHomes'], name='Seasonally'), go.Scatter(x=state_month['Date'], y=state_month['InventoryRaw_AllHomes'], name='Raw')]
layout = {'title': 'Inventory of All Homes', 'font': dict(size=16), 'xaxis': {'range': ['2009-01-01', '2017-10-01']}}
data = [go.Scatter(x=state_month['Date'], y=state_month['HomesSoldAsForeclosuresRatio_AllHomes'], name='Sold')]
layout = {'title': 'Home Sold As Foreclosure Ratio of All Homes', 'font': dict(size=16)}
data = [go.Scatter(x=state_month['Date'], y=state_month['MedianListingPricePerSqft_1Bedroom'], name='1 Bedroom'), go.Scatter(x=state_month['Date'], y=state_month['MedianListingPricePerSqft_2Bedroom'], name='2 Bedroom'), go.Scatter(x=state_month['Date'], y=state_month['MedianListingPricePerSqft_3Bedroom'], name='3 Bedroom'), go.Scatter(x=state_month['Date'], y=state_month['MedianListingPricePerSqft_4Bedroom'], name='4 Bedroom'), go.Scatter(x=state_month['Date'], y=state_month['MedianListingPricePerSqft_5BedroomOrMore'], name='5 or more Bedroom'), go.Scatter(x=state_month['Date'], y=state_month['MedianListingPricePerSqft_CondoCoop'], name='Condo Coop'), go.Scatter(x=state_month['Date'], y=state_month['MedianListingPricePerSqft_DuplexTriplex'], name='Duplex Triplex'), go.Scatter(x=state_month['Date'], y=state_month['MedianListingPricePerSqft_SingleFamilyResidence'], name='Single Family')]
layout = {'title': 'Median Listing Price$/sqft', 'font': dict(size=16), 'xaxis': {'range': ['2009-01-01', '2017-10-01']}}
py.iplot({'data': data, 'layout': layout}) | code |
2034195/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd
import plotly.graph_objs as go
import plotly.offline as py
path = '../input/'
state_ts = pd.read_csv(path + 'State_time_series.csv', parse_dates=['Date'])
missing = state_ts.isnull().sum().sum()
missing * 100 / (state_ts.shape[0] * state_ts.shape[1])
data = [go.Scatter(x=state_ts['Date'], y=state_ts['DaysOnZillow_AllHomes'], line=dict(color='#17BECF'))]
layout = {'title': 'Days On Zillow All Homes', 'font': dict(size=16), 'xaxis': {'range': ['2010-01-01', '2017-08-01']}}
state_vise = state_ts.groupby(['RegionName']).median()
state_vise.shape
state_month = state_ts.resample('M', on='Date').median()
state_month = state_month.reset_index()
state_month.shape
data = [go.Scatter(x=state_month['Date'], y=state_month['DaysOnZillow_AllHomes'])]
layout = {'title': 'Days On Zillow All Homes', 'font': dict(size=16), 'xaxis': {'range': ['2010-01-01', '2017-09-01']}}
py.iplot({'data': data, 'layout': layout}) | code |
2034195/cell_6 | [
"image_output_1.png"
] | import pandas as pd
path = '../input/'
state_ts = pd.read_csv(path + 'State_time_series.csv', parse_dates=['Date'])
state_ts.info() | code |
2034195/cell_2 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls
plt.style.use('fivethirtyeight') | code |
2034195/cell_19 | [
"image_output_1.png"
] | import pandas as pd
path = '../input/'
state_ts = pd.read_csv(path + 'State_time_series.csv', parse_dates=['Date'])
missing = state_ts.isnull().sum().sum()
missing * 100 / (state_ts.shape[0] * state_ts.shape[1])
state_vise = state_ts.groupby(['RegionName']).median()
state_vise.shape
state_month = state_ts.resample('M', on='Date').median()
state_month = state_month.reset_index()
state_month.shape | code |
2034195/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
path = '../input/'
state_ts = pd.read_csv(path + 'State_time_series.csv', parse_dates=['Date'])
state_ts.describe() | code |
2034195/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
import plotly.graph_objs as go
import plotly.offline as py
path = '../input/'
state_ts = pd.read_csv(path + 'State_time_series.csv', parse_dates=['Date'])
missing = state_ts.isnull().sum().sum()
missing * 100 / (state_ts.shape[0] * state_ts.shape[1])
data = [go.Scatter(x=state_ts['Date'], y=state_ts['DaysOnZillow_AllHomes'], line=dict(color='#17BECF'))]
layout = {'title': 'Days On Zillow All Homes', 'font': dict(size=16), 'xaxis': {'range': ['2010-01-01', '2017-08-01']}}
py.iplot({'data': data, 'layout': layout}) | code |
2034195/cell_17 | [
"text_html_output_1.png"
] | import pandas as pd
path = '../input/'
state_ts = pd.read_csv(path + 'State_time_series.csv', parse_dates=['Date'])
missing = state_ts.isnull().sum().sum()
missing * 100 / (state_ts.shape[0] * state_ts.shape[1])
state_vise = state_ts.groupby(['RegionName']).median()
state_vise.shape | code |
2034195/cell_24 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
path = '../input/'
state_ts = pd.read_csv(path + 'State_time_series.csv', parse_dates=['Date'])
missing = state_ts.isnull().sum().sum()
missing * 100 / (state_ts.shape[0] * state_ts.shape[1])
cnt = state_ts['RegionName'].value_counts().to_frame()
state_vise = state_ts.groupby(['RegionName']).median()
state_vise.shape
fig, ax = plt.subplots(1, 2, figsize=(16, 10), sharey='all')
ax1, ax2 = ax.flatten()
sns.barplot(state_vise['InventorySeasonallyAdjusted_AllHomes'], state_vise.index, palette='magma', ax=ax1)
sns.barplot(state_vise['InventoryRaw_AllHomes'], state_vise.index, palette='magma', ax=ax2) | code |
2034195/cell_14 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
path = '../input/'
state_ts = pd.read_csv(path + 'State_time_series.csv', parse_dates=['Date'])
missing = state_ts.isnull().sum().sum()
missing * 100 / (state_ts.shape[0] * state_ts.shape[1])
cnt = state_ts['RegionName'].value_counts().to_frame()
print('Number of States', state_ts['RegionName'].nunique())
plt.figure(figsize=(15, 10))
sns.barplot(cnt['RegionName'], cnt.index) | code |
2034195/cell_22 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
path = '../input/'
state_ts = pd.read_csv(path + 'State_time_series.csv', parse_dates=['Date'])
missing = state_ts.isnull().sum().sum()
missing * 100 / (state_ts.shape[0] * state_ts.shape[1])
cnt = state_ts['RegionName'].value_counts().to_frame()
state_vise = state_ts.groupby(['RegionName']).median()
state_vise.shape
plt.figure(figsize=(14, 10))
sns.barplot(state_vise['DaysOnZillow_AllHomes'], state_vise.index, palette='magma') | code |
2034195/cell_27 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
path = '../input/'
state_ts = pd.read_csv(path + 'State_time_series.csv', parse_dates=['Date'])
missing = state_ts.isnull().sum().sum()
missing * 100 / (state_ts.shape[0] * state_ts.shape[1])
cnt = state_ts['RegionName'].value_counts().to_frame()
state_vise = state_ts.groupby(['RegionName']).median()
state_vise.shape
fig,ax = plt.subplots(1,2,figsize=(16,10),sharey='all')
ax1,ax2 = ax.flatten()
sns.barplot(state_vise['InventorySeasonallyAdjusted_AllHomes'],state_vise.index,palette='magma',ax=ax1)
sns.barplot(state_vise['InventoryRaw_AllHomes'],state_vise.index,palette='magma',ax=ax2);
plt.figure(figsize=(14, 10))
sns.barplot(state_vise['HomesSoldAsForeclosuresRatio_AllHomes'], state_vise.index, palette='magma') | code |
2034195/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
path = '../input/'
state_ts = pd.read_csv(path + 'State_time_series.csv', parse_dates=['Date'])
missing = state_ts.isnull().sum().sum()
missing * 100 / (state_ts.shape[0] * state_ts.shape[1])
print('Date range:{} to {}'.format(state_ts['Date'].min(), state_ts['Date'].max()))
print('\n', state_ts['Date'].describe()) | code |
2034195/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
path = '../input/'
state_ts = pd.read_csv(path + 'State_time_series.csv', parse_dates=['Date'])
state_ts.head() | code |
32068113/cell_21 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
collision = pd.read_csv('../input/traffic-collision-data-from-2010-to-present/traffic-collision-data-from-2010-to-present.csv')
cols = collision.columns
cols = cols.map(lambda x: x.replace(' ', '_'))
collision.columns = cols
collision.shape[0]
collision.columns
collision[collision.Area_Name == 'Hollywood']
collision[(collision.Victim_Age == 29.0) & (collision.Victim_Sex == 'F') & (collision.Time_Occurred == 1450)].set_index('Area_Name').loc['Hollywood'].iloc[0]
collision.groupby('Area_Name').size().sort_values(ascending=True).plot.barh() | code |
32068113/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
collision = pd.read_csv('../input/traffic-collision-data-from-2010-to-present/traffic-collision-data-from-2010-to-present.csv')
cols = collision.columns
cols = cols.map(lambda x: x.replace(' ', '_'))
collision.columns = cols
collision.shape[0]
collision.columns
collision[collision.Area_Name == 'Hollywood']
collision[(collision.Victim_Age == 29.0) & (collision.Victim_Sex == 'F') & (collision.Time_Occurred == 1450)].set_index('Area_Name').loc['Hollywood'].iloc[0] | code |
32068113/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
collision = pd.read_csv('../input/traffic-collision-data-from-2010-to-present/traffic-collision-data-from-2010-to-present.csv')
cols = collision.columns
cols = cols.map(lambda x: x.replace(' ', '_'))
collision.columns = cols
collision.shape[0]
collision.columns | code |
32068113/cell_25 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
collision = pd.read_csv('../input/traffic-collision-data-from-2010-to-present/traffic-collision-data-from-2010-to-present.csv')
cols = collision.columns
cols = cols.map(lambda x: x.replace(' ', '_'))
collision.columns = cols
collision.shape[0]
collision.columns
collision[collision.Area_Name == 'Hollywood']
collision[(collision.Victim_Age == 29.0) & (collision.Victim_Sex == 'F') & (collision.Time_Occurred == 1450)].set_index('Area_Name').loc['Hollywood'].iloc[0]
collision.groupby('Area_Name').size().sort_values(ascending=True).plot.barh()
collision['Year'] = collision['Date_Occurred'].str[:4]
import matplotlib.pyplot as plt
plt.figure(figsize=(15, 6))
collision['Year_Month'] = collision['Date_Occurred'].str[:7]
collision.groupby('Year_Month').size().plot() | code |
32068113/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
collision = pd.read_csv('../input/traffic-collision-data-from-2010-to-present/traffic-collision-data-from-2010-to-present.csv')
cols = collision.columns
cols = cols.map(lambda x: x.replace(' ', '_'))
collision.columns = cols
collision.shape[0]
collision.columns
collision[collision.Area_Name == 'Hollywood']
collision[(collision.Victim_Age == 29.0) & (collision.Victim_Sex == 'F') & (collision.Time_Occurred == 1450)].set_index('Area_Name').loc['Hollywood'].iloc[0]
collision.groupby('Area_Name').size().sort_values(ascending=True).plot.barh()
collision['Year'] = collision['Date_Occurred'].str[:4]
collision.groupby('Year').size().plot.bar() | code |
32068113/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
collision = pd.read_csv('../input/traffic-collision-data-from-2010-to-present/traffic-collision-data-from-2010-to-present.csv')
cols = collision.columns
cols = cols.map(lambda x: x.replace(' ', '_'))
collision.columns = cols
collision.shape[0]
collision.columns
collision[collision.Area_Name == 'Hollywood'] | code |
32068113/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
collision = pd.read_csv('../input/traffic-collision-data-from-2010-to-present/traffic-collision-data-from-2010-to-present.csv')
cols = collision.columns
cols = cols.map(lambda x: x.replace(' ', '_'))
collision.columns = cols
collision.shape[0] | code |
32068113/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
collision = pd.read_csv('../input/traffic-collision-data-from-2010-to-present/traffic-collision-data-from-2010-to-present.csv')
cols = collision.columns
cols = cols.map(lambda x: x.replace(' ', '_'))
collision.columns = cols
collision.shape[0]
collision.columns
collision[collision.Area_Name == 'Hollywood']
collision[(collision.Victim_Age == 29.0) & (collision.Victim_Sex == 'F') & (collision.Time_Occurred == 1450)].set_index('Area_Name').loc['Hollywood'].iloc[0]
collision[['Time_Occurred', 'Victim_Age']].mean() | code |
32068113/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
32068113/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
collision = pd.read_csv('../input/traffic-collision-data-from-2010-to-present/traffic-collision-data-from-2010-to-present.csv')
cols = collision.columns
cols = cols.map(lambda x: x.replace(' ', '_'))
collision.columns = cols
collision.shape[0]
collision.columns
collision[collision.Area_Name == 'Hollywood']
collision[(collision.Victim_Age == 29.0) & (collision.Victim_Sex == 'F') & (collision.Time_Occurred == 1450)].set_index('Area_Name').loc['Hollywood'].iloc[0]
collision.sort_values('Time_Occurred', ascending=False).Time_Occurred.value_counts().iloc[:20].plot.bar() | code |
32068113/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
collision = pd.read_csv('../input/traffic-collision-data-from-2010-to-present/traffic-collision-data-from-2010-to-present.csv')
cols = collision.columns
cols = cols.map(lambda x: x.replace(' ', '_'))
collision.columns = cols
collision.head() | code |
32071330/cell_34 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt # plotting
import numpy as np
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
def plotPerColumnDistribution(df, nGraphShown, nGraphPerRow):
nunique = df.nunique()
df = df[[col for col in df if nunique[col] > 1 and nunique[col] < 50]]
nRow, nCol = df.shape
columnNames = list(df)
nGraphRow = (nCol + nGraphPerRow - 1) / nGraphPerRow
for i in range(min(nCol, nGraphShown)):
columnDf = df.iloc[:, i]
if not np.issubdtype(type(columnDf.iloc[0]), np.number):
valueCounts = columnDf.value_counts()
plt.xticks(rotation=90)
plt.tight_layout(pad=1.0, w_pad=1.0, h_pad=1.0)
# Correlation matrix
def plotCorrelationMatrix(df, graphWidth):
filename = df.dataframeName
df = df.dropna('columns') # drop columns with NaN
df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values
if df.shape[1] < 2:
print(f'No correlation plots shown: The number of non-NaN or constant columns ({df.shape[1]}) is less than 2')
return
corr = df.corr()
plt.figure(num=None, figsize=(graphWidth, graphWidth), dpi=80, facecolor='w', edgecolor='k')
corrMat = plt.matshow(corr, fignum = 1)
plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)
plt.yticks(range(len(corr.columns)), corr.columns)
plt.gca().xaxis.tick_bottom()
plt.colorbar(corrMat)
plt.title(f'Correlation Matrix for {filename}', fontsize=15)
plt.show()
# Scatter and density plots
def plotScatterMatrix(df, plotSize, textSize):
df = df.select_dtypes(include =[np.number]) # keep only numerical columns
# Remove rows and columns that would lead to df being singular
df = df.dropna('columns')
df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values
columnNames = list(df)
if len(columnNames) > 10: # reduce the number of columns for matrix inversion of kernel density plots
columnNames = columnNames[:10]
df = df[columnNames]
ax = pd.plotting.scatter_matrix(df, alpha=0.75, figsize=[plotSize, plotSize], diagonal='kde')
corrs = df.corr().values
for i, j in zip(*plt.np.triu_indices_from(ax, k = 1)):
ax[i, j].annotate('Corr. coef = %.3f' % corrs[i, j], (0.8, 0.2), xycoords='axes fraction', ha='center', va='center', size=textSize)
plt.suptitle('Scatter and Density Plot')
plt.show()
nRowsRead = 1000
df1 = pd.read_csv('/kaggle/input/testingdataworldwide_April_13.csv', delimiter=',', nrows=nRowsRead)
df1.dataframeName = 'testingdataworldwide_April_13.csv'
nRow, nCol = df1.shape
nRowsRead = 1000
df2 = pd.read_csv('/kaggle/input/testingdataworldwide_April_14.csv', delimiter=',', nrows=nRowsRead)
df2.dataframeName = 'testingdataworldwide_April_14.csv'
nRow, nCol = df2.shape
df_worlddata = pd.read_csv('/kaggle/input/testingdataworldwide_April_13.csv', delimiter=',', nrows=nRowsRead)
df_worlddata.dataframeName = 'testingdataworldwide_April_13.csv'
nRow, nCol = df1.shape
df_worlddata.index = df_worlddata['Country']
df_worlddata = df_worlddata.drop(['Country'], axis=1)
df_worlddata.head() | code |
32071330/cell_30 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt # plotting
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
def plotPerColumnDistribution(df, nGraphShown, nGraphPerRow):
nunique = df.nunique()
df = df[[col for col in df if nunique[col] > 1 and nunique[col] < 50]]
nRow, nCol = df.shape
columnNames = list(df)
nGraphRow = (nCol + nGraphPerRow - 1) / nGraphPerRow
for i in range(min(nCol, nGraphShown)):
columnDf = df.iloc[:, i]
if not np.issubdtype(type(columnDf.iloc[0]), np.number):
valueCounts = columnDf.value_counts()
plt.xticks(rotation=90)
plt.tight_layout(pad=1.0, w_pad=1.0, h_pad=1.0)
# Correlation matrix
def plotCorrelationMatrix(df, graphWidth):
filename = df.dataframeName
df = df.dropna('columns') # drop columns with NaN
df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values
if df.shape[1] < 2:
print(f'No correlation plots shown: The number of non-NaN or constant columns ({df.shape[1]}) is less than 2')
return
corr = df.corr()
plt.figure(num=None, figsize=(graphWidth, graphWidth), dpi=80, facecolor='w', edgecolor='k')
corrMat = plt.matshow(corr, fignum = 1)
plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)
plt.yticks(range(len(corr.columns)), corr.columns)
plt.gca().xaxis.tick_bottom()
plt.colorbar(corrMat)
plt.title(f'Correlation Matrix for {filename}', fontsize=15)
plt.show()
# Scatter and density plots
def plotScatterMatrix(df, plotSize, textSize):
df = df.select_dtypes(include =[np.number]) # keep only numerical columns
# Remove rows and columns that would lead to df being singular
df = df.dropna('columns')
df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values
columnNames = list(df)
if len(columnNames) > 10: # reduce the number of columns for matrix inversion of kernel density plots
columnNames = columnNames[:10]
df = df[columnNames]
ax = pd.plotting.scatter_matrix(df, alpha=0.75, figsize=[plotSize, plotSize], diagonal='kde')
corrs = df.corr().values
for i, j in zip(*plt.np.triu_indices_from(ax, k = 1)):
ax[i, j].annotate('Corr. coef = %.3f' % corrs[i, j], (0.8, 0.2), xycoords='axes fraction', ha='center', va='center', size=textSize)
plt.suptitle('Scatter and Density Plot')
plt.show()
nRowsRead = 1000
df1 = pd.read_csv('/kaggle/input/testingdataworldwide_April_13.csv', delimiter=',', nrows=nRowsRead)
df1.dataframeName = 'testingdataworldwide_April_13.csv'
nRow, nCol = df1.shape
nRowsRead = 1000
df2 = pd.read_csv('/kaggle/input/testingdataworldwide_April_14.csv', delimiter=',', nrows=nRowsRead)
df2.dataframeName = 'testingdataworldwide_April_14.csv'
nRow, nCol = df2.shape
plotScatterMatrix(df2, 20, 10) | code |
32071330/cell_33 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt # plotting
import numpy as np
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
def plotPerColumnDistribution(df, nGraphShown, nGraphPerRow):
nunique = df.nunique()
df = df[[col for col in df if nunique[col] > 1 and nunique[col] < 50]]
nRow, nCol = df.shape
columnNames = list(df)
nGraphRow = (nCol + nGraphPerRow - 1) / nGraphPerRow
for i in range(min(nCol, nGraphShown)):
columnDf = df.iloc[:, i]
if not np.issubdtype(type(columnDf.iloc[0]), np.number):
valueCounts = columnDf.value_counts()
plt.xticks(rotation=90)
plt.tight_layout(pad=1.0, w_pad=1.0, h_pad=1.0)
# Correlation matrix
def plotCorrelationMatrix(df, graphWidth):
filename = df.dataframeName
df = df.dropna('columns') # drop columns with NaN
df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values
if df.shape[1] < 2:
print(f'No correlation plots shown: The number of non-NaN or constant columns ({df.shape[1]}) is less than 2')
return
corr = df.corr()
plt.figure(num=None, figsize=(graphWidth, graphWidth), dpi=80, facecolor='w', edgecolor='k')
corrMat = plt.matshow(corr, fignum = 1)
plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)
plt.yticks(range(len(corr.columns)), corr.columns)
plt.gca().xaxis.tick_bottom()
plt.colorbar(corrMat)
plt.title(f'Correlation Matrix for {filename}', fontsize=15)
plt.show()
# Scatter and density plots
def plotScatterMatrix(df, plotSize, textSize):
df = df.select_dtypes(include =[np.number]) # keep only numerical columns
# Remove rows and columns that would lead to df being singular
df = df.dropna('columns')
df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values
columnNames = list(df)
if len(columnNames) > 10: # reduce the number of columns for matrix inversion of kernel density plots
columnNames = columnNames[:10]
df = df[columnNames]
ax = pd.plotting.scatter_matrix(df, alpha=0.75, figsize=[plotSize, plotSize], diagonal='kde')
corrs = df.corr().values
for i, j in zip(*plt.np.triu_indices_from(ax, k = 1)):
ax[i, j].annotate('Corr. coef = %.3f' % corrs[i, j], (0.8, 0.2), xycoords='axes fraction', ha='center', va='center', size=textSize)
plt.suptitle('Scatter and Density Plot')
plt.show()
nRowsRead = 1000
df1 = pd.read_csv('/kaggle/input/testingdataworldwide_April_13.csv', delimiter=',', nrows=nRowsRead)
df1.dataframeName = 'testingdataworldwide_April_13.csv'
nRow, nCol = df1.shape
nRowsRead = 1000
df2 = pd.read_csv('/kaggle/input/testingdataworldwide_April_14.csv', delimiter=',', nrows=nRowsRead)
df2.dataframeName = 'testingdataworldwide_April_14.csv'
nRow, nCol = df2.shape
df_worlddata = pd.read_csv('/kaggle/input/testingdataworldwide_April_13.csv', delimiter=',', nrows=nRowsRead)
df_worlddata.dataframeName = 'testingdataworldwide_April_13.csv'
nRow, nCol = df1.shape
print(f'There are {nRow} rows and {nCol} columns') | code |
32071330/cell_20 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt # plotting
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
def plotPerColumnDistribution(df, nGraphShown, nGraphPerRow):
nunique = df.nunique()
df = df[[col for col in df if nunique[col] > 1 and nunique[col] < 50]]
nRow, nCol = df.shape
columnNames = list(df)
nGraphRow = (nCol + nGraphPerRow - 1) / nGraphPerRow
for i in range(min(nCol, nGraphShown)):
columnDf = df.iloc[:, i]
if not np.issubdtype(type(columnDf.iloc[0]), np.number):
valueCounts = columnDf.value_counts()
plt.xticks(rotation=90)
plt.tight_layout(pad=1.0, w_pad=1.0, h_pad=1.0)
# Correlation matrix
def plotCorrelationMatrix(df, graphWidth):
filename = df.dataframeName
df = df.dropna('columns') # drop columns with NaN
df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values
if df.shape[1] < 2:
print(f'No correlation plots shown: The number of non-NaN or constant columns ({df.shape[1]}) is less than 2')
return
corr = df.corr()
plt.figure(num=None, figsize=(graphWidth, graphWidth), dpi=80, facecolor='w', edgecolor='k')
corrMat = plt.matshow(corr, fignum = 1)
plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)
plt.yticks(range(len(corr.columns)), corr.columns)
plt.gca().xaxis.tick_bottom()
plt.colorbar(corrMat)
plt.title(f'Correlation Matrix for {filename}', fontsize=15)
plt.show()
# Scatter and density plots
def plotScatterMatrix(df, plotSize, textSize):
df = df.select_dtypes(include =[np.number]) # keep only numerical columns
# Remove rows and columns that would lead to df being singular
df = df.dropna('columns')
df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values
columnNames = list(df)
if len(columnNames) > 10: # reduce the number of columns for matrix inversion of kernel density plots
columnNames = columnNames[:10]
df = df[columnNames]
ax = pd.plotting.scatter_matrix(df, alpha=0.75, figsize=[plotSize, plotSize], diagonal='kde')
corrs = df.corr().values
for i, j in zip(*plt.np.triu_indices_from(ax, k = 1)):
ax[i, j].annotate('Corr. coef = %.3f' % corrs[i, j], (0.8, 0.2), xycoords='axes fraction', ha='center', va='center', size=textSize)
plt.suptitle('Scatter and Density Plot')
plt.show()
nRowsRead = 1000
df1 = pd.read_csv('/kaggle/input/testingdataworldwide_April_13.csv', delimiter=',', nrows=nRowsRead)
df1.dataframeName = 'testingdataworldwide_April_13.csv'
nRow, nCol = df1.shape
plotScatterMatrix(df1, 20, 10) | code |
32071330/cell_6 | [
"image_output_1.png"
] | !pip install pycountry_convert
!pip install folium
!pip install plotly | code |
32071330/cell_26 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt # plotting
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
def plotPerColumnDistribution(df, nGraphShown, nGraphPerRow):
nunique = df.nunique()
df = df[[col for col in df if nunique[col] > 1 and nunique[col] < 50]]
nRow, nCol = df.shape
columnNames = list(df)
nGraphRow = (nCol + nGraphPerRow - 1) / nGraphPerRow
for i in range(min(nCol, nGraphShown)):
columnDf = df.iloc[:, i]
if not np.issubdtype(type(columnDf.iloc[0]), np.number):
valueCounts = columnDf.value_counts()
plt.xticks(rotation=90)
plt.tight_layout(pad=1.0, w_pad=1.0, h_pad=1.0)
# Correlation matrix
def plotCorrelationMatrix(df, graphWidth):
filename = df.dataframeName
df = df.dropna('columns') # drop columns with NaN
df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values
if df.shape[1] < 2:
print(f'No correlation plots shown: The number of non-NaN or constant columns ({df.shape[1]}) is less than 2')
return
corr = df.corr()
plt.figure(num=None, figsize=(graphWidth, graphWidth), dpi=80, facecolor='w', edgecolor='k')
corrMat = plt.matshow(corr, fignum = 1)
plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)
plt.yticks(range(len(corr.columns)), corr.columns)
plt.gca().xaxis.tick_bottom()
plt.colorbar(corrMat)
plt.title(f'Correlation Matrix for {filename}', fontsize=15)
plt.show()
# Scatter and density plots
def plotScatterMatrix(df, plotSize, textSize):
df = df.select_dtypes(include =[np.number]) # keep only numerical columns
# Remove rows and columns that would lead to df being singular
df = df.dropna('columns')
df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values
columnNames = list(df)
if len(columnNames) > 10: # reduce the number of columns for matrix inversion of kernel density plots
columnNames = columnNames[:10]
df = df[columnNames]
ax = pd.plotting.scatter_matrix(df, alpha=0.75, figsize=[plotSize, plotSize], diagonal='kde')
corrs = df.corr().values
for i, j in zip(*plt.np.triu_indices_from(ax, k = 1)):
ax[i, j].annotate('Corr. coef = %.3f' % corrs[i, j], (0.8, 0.2), xycoords='axes fraction', ha='center', va='center', size=textSize)
plt.suptitle('Scatter and Density Plot')
plt.show()
nRowsRead = 1000
df1 = pd.read_csv('/kaggle/input/testingdataworldwide_April_13.csv', delimiter=',', nrows=nRowsRead)
df1.dataframeName = 'testingdataworldwide_April_13.csv'
nRow, nCol = df1.shape
nRowsRead = 1000
df2 = pd.read_csv('/kaggle/input/testingdataworldwide_April_14.csv', delimiter=',', nrows=nRowsRead)
df2.dataframeName = 'testingdataworldwide_April_14.csv'
nRow, nCol = df2.shape
plotPerColumnDistribution(df2, 10, 5) | code |
32071330/cell_18 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt # plotting
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
def plotPerColumnDistribution(df, nGraphShown, nGraphPerRow):
nunique = df.nunique()
df = df[[col for col in df if nunique[col] > 1 and nunique[col] < 50]]
nRow, nCol = df.shape
columnNames = list(df)
nGraphRow = (nCol + nGraphPerRow - 1) / nGraphPerRow
for i in range(min(nCol, nGraphShown)):
columnDf = df.iloc[:, i]
if not np.issubdtype(type(columnDf.iloc[0]), np.number):
valueCounts = columnDf.value_counts()
plt.xticks(rotation=90)
plt.tight_layout(pad=1.0, w_pad=1.0, h_pad=1.0)
# Correlation matrix
def plotCorrelationMatrix(df, graphWidth):
filename = df.dataframeName
df = df.dropna('columns') # drop columns with NaN
df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values
if df.shape[1] < 2:
print(f'No correlation plots shown: The number of non-NaN or constant columns ({df.shape[1]}) is less than 2')
return
corr = df.corr()
plt.figure(num=None, figsize=(graphWidth, graphWidth), dpi=80, facecolor='w', edgecolor='k')
corrMat = plt.matshow(corr, fignum = 1)
plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)
plt.yticks(range(len(corr.columns)), corr.columns)
plt.gca().xaxis.tick_bottom()
plt.colorbar(corrMat)
plt.title(f'Correlation Matrix for {filename}', fontsize=15)
plt.show()
# Scatter and density plots
def plotScatterMatrix(df, plotSize, textSize):
df = df.select_dtypes(include =[np.number]) # keep only numerical columns
# Remove rows and columns that would lead to df being singular
df = df.dropna('columns')
df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values
columnNames = list(df)
if len(columnNames) > 10: # reduce the number of columns for matrix inversion of kernel density plots
columnNames = columnNames[:10]
df = df[columnNames]
ax = pd.plotting.scatter_matrix(df, alpha=0.75, figsize=[plotSize, plotSize], diagonal='kde')
corrs = df.corr().values
for i, j in zip(*plt.np.triu_indices_from(ax, k = 1)):
ax[i, j].annotate('Corr. coef = %.3f' % corrs[i, j], (0.8, 0.2), xycoords='axes fraction', ha='center', va='center', size=textSize)
plt.suptitle('Scatter and Density Plot')
plt.show()
nRowsRead = 1000
df1 = pd.read_csv('/kaggle/input/testingdataworldwide_April_13.csv', delimiter=',', nrows=nRowsRead)
df1.dataframeName = 'testingdataworldwide_April_13.csv'
nRow, nCol = df1.shape
plotCorrelationMatrix(df1, 8) | code |
32071330/cell_28 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt # plotting
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
def plotPerColumnDistribution(df, nGraphShown, nGraphPerRow):
nunique = df.nunique()
df = df[[col for col in df if nunique[col] > 1 and nunique[col] < 50]]
nRow, nCol = df.shape
columnNames = list(df)
nGraphRow = (nCol + nGraphPerRow - 1) / nGraphPerRow
for i in range(min(nCol, nGraphShown)):
columnDf = df.iloc[:, i]
if not np.issubdtype(type(columnDf.iloc[0]), np.number):
valueCounts = columnDf.value_counts()
plt.xticks(rotation=90)
plt.tight_layout(pad=1.0, w_pad=1.0, h_pad=1.0)
# Correlation matrix
def plotCorrelationMatrix(df, graphWidth):
filename = df.dataframeName
df = df.dropna('columns') # drop columns with NaN
df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values
if df.shape[1] < 2:
print(f'No correlation plots shown: The number of non-NaN or constant columns ({df.shape[1]}) is less than 2')
return
corr = df.corr()
plt.figure(num=None, figsize=(graphWidth, graphWidth), dpi=80, facecolor='w', edgecolor='k')
corrMat = plt.matshow(corr, fignum = 1)
plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)
plt.yticks(range(len(corr.columns)), corr.columns)
plt.gca().xaxis.tick_bottom()
plt.colorbar(corrMat)
plt.title(f'Correlation Matrix for {filename}', fontsize=15)
plt.show()
# Scatter and density plots
def plotScatterMatrix(df, plotSize, textSize):
df = df.select_dtypes(include =[np.number]) # keep only numerical columns
# Remove rows and columns that would lead to df being singular
df = df.dropna('columns')
df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values
columnNames = list(df)
if len(columnNames) > 10: # reduce the number of columns for matrix inversion of kernel density plots
columnNames = columnNames[:10]
df = df[columnNames]
ax = pd.plotting.scatter_matrix(df, alpha=0.75, figsize=[plotSize, plotSize], diagonal='kde')
corrs = df.corr().values
for i, j in zip(*plt.np.triu_indices_from(ax, k = 1)):
ax[i, j].annotate('Corr. coef = %.3f' % corrs[i, j], (0.8, 0.2), xycoords='axes fraction', ha='center', va='center', size=textSize)
plt.suptitle('Scatter and Density Plot')
plt.show()
nRowsRead = 1000
df1 = pd.read_csv('/kaggle/input/testingdataworldwide_April_13.csv', delimiter=',', nrows=nRowsRead)
df1.dataframeName = 'testingdataworldwide_April_13.csv'
nRow, nCol = df1.shape
nRowsRead = 1000
df2 = pd.read_csv('/kaggle/input/testingdataworldwide_April_14.csv', delimiter=',', nrows=nRowsRead)
df2.dataframeName = 'testingdataworldwide_April_14.csv'
nRow, nCol = df2.shape
plotCorrelationMatrix(df2, 8) | code |
32071330/cell_16 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt # plotting
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
def plotPerColumnDistribution(df, nGraphShown, nGraphPerRow):
nunique = df.nunique()
df = df[[col for col in df if nunique[col] > 1 and nunique[col] < 50]]
nRow, nCol = df.shape
columnNames = list(df)
nGraphRow = (nCol + nGraphPerRow - 1) / nGraphPerRow
for i in range(min(nCol, nGraphShown)):
columnDf = df.iloc[:, i]
if not np.issubdtype(type(columnDf.iloc[0]), np.number):
valueCounts = columnDf.value_counts()
plt.xticks(rotation=90)
plt.tight_layout(pad=1.0, w_pad=1.0, h_pad=1.0)
# Correlation matrix
def plotCorrelationMatrix(df, graphWidth):
filename = df.dataframeName
df = df.dropna('columns') # drop columns with NaN
df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values
if df.shape[1] < 2:
print(f'No correlation plots shown: The number of non-NaN or constant columns ({df.shape[1]}) is less than 2')
return
corr = df.corr()
plt.figure(num=None, figsize=(graphWidth, graphWidth), dpi=80, facecolor='w', edgecolor='k')
corrMat = plt.matshow(corr, fignum = 1)
plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)
plt.yticks(range(len(corr.columns)), corr.columns)
plt.gca().xaxis.tick_bottom()
plt.colorbar(corrMat)
plt.title(f'Correlation Matrix for {filename}', fontsize=15)
plt.show()
# Scatter and density plots
def plotScatterMatrix(df, plotSize, textSize):
df = df.select_dtypes(include =[np.number]) # keep only numerical columns
# Remove rows and columns that would lead to df being singular
df = df.dropna('columns')
df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values
columnNames = list(df)
if len(columnNames) > 10: # reduce the number of columns for matrix inversion of kernel density plots
columnNames = columnNames[:10]
df = df[columnNames]
ax = pd.plotting.scatter_matrix(df, alpha=0.75, figsize=[plotSize, plotSize], diagonal='kde')
corrs = df.corr().values
for i, j in zip(*plt.np.triu_indices_from(ax, k = 1)):
ax[i, j].annotate('Corr. coef = %.3f' % corrs[i, j], (0.8, 0.2), xycoords='axes fraction', ha='center', va='center', size=textSize)
plt.suptitle('Scatter and Density Plot')
plt.show()
nRowsRead = 1000
df1 = pd.read_csv('/kaggle/input/testingdataworldwide_April_13.csv', delimiter=',', nrows=nRowsRead)
df1.dataframeName = 'testingdataworldwide_April_13.csv'
nRow, nCol = df1.shape
plotPerColumnDistribution(df1, 10, 5) | code |
32071330/cell_24 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt # plotting
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
def plotPerColumnDistribution(df, nGraphShown, nGraphPerRow):
nunique = df.nunique()
df = df[[col for col in df if nunique[col] > 1 and nunique[col] < 50]]
nRow, nCol = df.shape
columnNames = list(df)
nGraphRow = (nCol + nGraphPerRow - 1) / nGraphPerRow
for i in range(min(nCol, nGraphShown)):
columnDf = df.iloc[:, i]
if not np.issubdtype(type(columnDf.iloc[0]), np.number):
valueCounts = columnDf.value_counts()
plt.xticks(rotation=90)
plt.tight_layout(pad=1.0, w_pad=1.0, h_pad=1.0)
# Correlation matrix
def plotCorrelationMatrix(df, graphWidth):
filename = df.dataframeName
df = df.dropna('columns') # drop columns with NaN
df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values
if df.shape[1] < 2:
print(f'No correlation plots shown: The number of non-NaN or constant columns ({df.shape[1]}) is less than 2')
return
corr = df.corr()
plt.figure(num=None, figsize=(graphWidth, graphWidth), dpi=80, facecolor='w', edgecolor='k')
corrMat = plt.matshow(corr, fignum = 1)
plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)
plt.yticks(range(len(corr.columns)), corr.columns)
plt.gca().xaxis.tick_bottom()
plt.colorbar(corrMat)
plt.title(f'Correlation Matrix for {filename}', fontsize=15)
plt.show()
# Scatter and density plots
def plotScatterMatrix(df, plotSize, textSize):
df = df.select_dtypes(include =[np.number]) # keep only numerical columns
# Remove rows and columns that would lead to df being singular
df = df.dropna('columns')
df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values
columnNames = list(df)
if len(columnNames) > 10: # reduce the number of columns for matrix inversion of kernel density plots
columnNames = columnNames[:10]
df = df[columnNames]
ax = pd.plotting.scatter_matrix(df, alpha=0.75, figsize=[plotSize, plotSize], diagonal='kde')
corrs = df.corr().values
for i, j in zip(*plt.np.triu_indices_from(ax, k = 1)):
ax[i, j].annotate('Corr. coef = %.3f' % corrs[i, j], (0.8, 0.2), xycoords='axes fraction', ha='center', va='center', size=textSize)
plt.suptitle('Scatter and Density Plot')
plt.show()
nRowsRead = 1000
df1 = pd.read_csv('/kaggle/input/testingdataworldwide_April_13.csv', delimiter=',', nrows=nRowsRead)
df1.dataframeName = 'testingdataworldwide_April_13.csv'
nRow, nCol = df1.shape
nRowsRead = 1000
df2 = pd.read_csv('/kaggle/input/testingdataworldwide_April_14.csv', delimiter=',', nrows=nRowsRead)
df2.dataframeName = 'testingdataworldwide_April_14.csv'
nRow, nCol = df2.shape
df2.head(5) | code |
32071330/cell_14 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt # plotting
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
def plotPerColumnDistribution(df, nGraphShown, nGraphPerRow):
nunique = df.nunique()
df = df[[col for col in df if nunique[col] > 1 and nunique[col] < 50]]
nRow, nCol = df.shape
columnNames = list(df)
nGraphRow = (nCol + nGraphPerRow - 1) / nGraphPerRow
for i in range(min(nCol, nGraphShown)):
columnDf = df.iloc[:, i]
if not np.issubdtype(type(columnDf.iloc[0]), np.number):
valueCounts = columnDf.value_counts()
plt.xticks(rotation=90)
plt.tight_layout(pad=1.0, w_pad=1.0, h_pad=1.0)
# Correlation matrix
def plotCorrelationMatrix(df, graphWidth):
filename = df.dataframeName
df = df.dropna('columns') # drop columns with NaN
df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values
if df.shape[1] < 2:
print(f'No correlation plots shown: The number of non-NaN or constant columns ({df.shape[1]}) is less than 2')
return
corr = df.corr()
plt.figure(num=None, figsize=(graphWidth, graphWidth), dpi=80, facecolor='w', edgecolor='k')
corrMat = plt.matshow(corr, fignum = 1)
plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)
plt.yticks(range(len(corr.columns)), corr.columns)
plt.gca().xaxis.tick_bottom()
plt.colorbar(corrMat)
plt.title(f'Correlation Matrix for {filename}', fontsize=15)
plt.show()
# Scatter and density plots
def plotScatterMatrix(df, plotSize, textSize):
df = df.select_dtypes(include =[np.number]) # keep only numerical columns
# Remove rows and columns that would lead to df being singular
df = df.dropna('columns')
df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values
columnNames = list(df)
if len(columnNames) > 10: # reduce the number of columns for matrix inversion of kernel density plots
columnNames = columnNames[:10]
df = df[columnNames]
ax = pd.plotting.scatter_matrix(df, alpha=0.75, figsize=[plotSize, plotSize], diagonal='kde')
corrs = df.corr().values
for i, j in zip(*plt.np.triu_indices_from(ax, k = 1)):
ax[i, j].annotate('Corr. coef = %.3f' % corrs[i, j], (0.8, 0.2), xycoords='axes fraction', ha='center', va='center', size=textSize)
plt.suptitle('Scatter and Density Plot')
plt.show()
nRowsRead = 1000
df1 = pd.read_csv('/kaggle/input/testingdataworldwide_April_13.csv', delimiter=',', nrows=nRowsRead)
df1.dataframeName = 'testingdataworldwide_April_13.csv'
nRow, nCol = df1.shape
df1.head(5) | code |
32071330/cell_22 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt # plotting
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
def plotPerColumnDistribution(df, nGraphShown, nGraphPerRow):
nunique = df.nunique()
df = df[[col for col in df if nunique[col] > 1 and nunique[col] < 50]]
nRow, nCol = df.shape
columnNames = list(df)
nGraphRow = (nCol + nGraphPerRow - 1) / nGraphPerRow
for i in range(min(nCol, nGraphShown)):
columnDf = df.iloc[:, i]
if not np.issubdtype(type(columnDf.iloc[0]), np.number):
valueCounts = columnDf.value_counts()
plt.xticks(rotation=90)
plt.tight_layout(pad=1.0, w_pad=1.0, h_pad=1.0)
# Correlation matrix
def plotCorrelationMatrix(df, graphWidth):
filename = df.dataframeName
df = df.dropna('columns') # drop columns with NaN
df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values
if df.shape[1] < 2:
print(f'No correlation plots shown: The number of non-NaN or constant columns ({df.shape[1]}) is less than 2')
return
corr = df.corr()
plt.figure(num=None, figsize=(graphWidth, graphWidth), dpi=80, facecolor='w', edgecolor='k')
corrMat = plt.matshow(corr, fignum = 1)
plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)
plt.yticks(range(len(corr.columns)), corr.columns)
plt.gca().xaxis.tick_bottom()
plt.colorbar(corrMat)
plt.title(f'Correlation Matrix for {filename}', fontsize=15)
plt.show()
# Scatter and density plots
def plotScatterMatrix(df, plotSize, textSize):
df = df.select_dtypes(include =[np.number]) # keep only numerical columns
# Remove rows and columns that would lead to df being singular
df = df.dropna('columns')
df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values
columnNames = list(df)
if len(columnNames) > 10: # reduce the number of columns for matrix inversion of kernel density plots
columnNames = columnNames[:10]
df = df[columnNames]
ax = pd.plotting.scatter_matrix(df, alpha=0.75, figsize=[plotSize, plotSize], diagonal='kde')
corrs = df.corr().values
for i, j in zip(*plt.np.triu_indices_from(ax, k = 1)):
ax[i, j].annotate('Corr. coef = %.3f' % corrs[i, j], (0.8, 0.2), xycoords='axes fraction', ha='center', va='center', size=textSize)
plt.suptitle('Scatter and Density Plot')
plt.show()
nRowsRead = 1000
df1 = pd.read_csv('/kaggle/input/testingdataworldwide_April_13.csv', delimiter=',', nrows=nRowsRead)
df1.dataframeName = 'testingdataworldwide_April_13.csv'
nRow, nCol = df1.shape
nRowsRead = 1000
df2 = pd.read_csv('/kaggle/input/testingdataworldwide_April_14.csv', delimiter=',', nrows=nRowsRead)
df2.dataframeName = 'testingdataworldwide_April_14.csv'
nRow, nCol = df2.shape
print(f'There are {nRow} rows and {nCol} columns') | code |
32071330/cell_37 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt # plotting
import numpy as np
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
def plotPerColumnDistribution(df, nGraphShown, nGraphPerRow):
nunique = df.nunique()
df = df[[col for col in df if nunique[col] > 1 and nunique[col] < 50]]
nRow, nCol = df.shape
columnNames = list(df)
nGraphRow = (nCol + nGraphPerRow - 1) / nGraphPerRow
for i in range(min(nCol, nGraphShown)):
columnDf = df.iloc[:, i]
if not np.issubdtype(type(columnDf.iloc[0]), np.number):
valueCounts = columnDf.value_counts()
plt.xticks(rotation=90)
plt.tight_layout(pad=1.0, w_pad=1.0, h_pad=1.0)
# Correlation matrix
def plotCorrelationMatrix(df, graphWidth):
filename = df.dataframeName
df = df.dropna('columns') # drop columns with NaN
df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values
if df.shape[1] < 2:
print(f'No correlation plots shown: The number of non-NaN or constant columns ({df.shape[1]}) is less than 2')
return
corr = df.corr()
plt.figure(num=None, figsize=(graphWidth, graphWidth), dpi=80, facecolor='w', edgecolor='k')
corrMat = plt.matshow(corr, fignum = 1)
plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)
plt.yticks(range(len(corr.columns)), corr.columns)
plt.gca().xaxis.tick_bottom()
plt.colorbar(corrMat)
plt.title(f'Correlation Matrix for {filename}', fontsize=15)
plt.show()
# Scatter and density plots
def plotScatterMatrix(df, plotSize, textSize):
df = df.select_dtypes(include =[np.number]) # keep only numerical columns
# Remove rows and columns that would lead to df being singular
df = df.dropna('columns')
df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values
columnNames = list(df)
if len(columnNames) > 10: # reduce the number of columns for matrix inversion of kernel density plots
columnNames = columnNames[:10]
df = df[columnNames]
ax = pd.plotting.scatter_matrix(df, alpha=0.75, figsize=[plotSize, plotSize], diagonal='kde')
corrs = df.corr().values
for i, j in zip(*plt.np.triu_indices_from(ax, k = 1)):
ax[i, j].annotate('Corr. coef = %.3f' % corrs[i, j], (0.8, 0.2), xycoords='axes fraction', ha='center', va='center', size=textSize)
plt.suptitle('Scatter and Density Plot')
plt.show()
nRowsRead = 1000
df1 = pd.read_csv('/kaggle/input/testingdataworldwide_April_13.csv', delimiter=',', nrows=nRowsRead)
df1.dataframeName = 'testingdataworldwide_April_13.csv'
nRow, nCol = df1.shape
nRowsRead = 1000
df2 = pd.read_csv('/kaggle/input/testingdataworldwide_April_14.csv', delimiter=',', nrows=nRowsRead)
df2.dataframeName = 'testingdataworldwide_April_14.csv'
nRow, nCol = df2.shape
df_worlddata = pd.read_csv('/kaggle/input/testingdataworldwide_April_13.csv', delimiter=',', nrows=nRowsRead)
df_worlddata.dataframeName = 'testingdataworldwide_April_13.csv'
nRow, nCol = df1.shape
df_worlddata.index = df_worlddata['Country']
df_worlddata = df_worlddata.drop(['Country'], axis=1)
df_test = df_worlddata.drop(['Total Cases', 'Cases', 'Total Deaths', 'Deaths', 'Total Recovers', 'Active', 'Total Cases/1M pop', 'Deaths/1M pop'], axis=1)
f = plt.figure(figsize=(20, 15))
f.add_subplot(111)
plt.axes(axisbelow=True)
plt.barh(df_test.sort_values('Tests/1M pop')['Tests/1M pop'].index[-50:], df_test.sort_values('Tests/1M pop')['Tests/1M pop'].values[-50:], color='red')
plt.tick_params(size=5, labelsize=13)
plt.xlabel('Tests/1M pop ', fontsize=18)
plt.title('Top Countries (Tests/1M pop )', fontsize=20)
plt.grid(alpha=0.3) | code |
32071330/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt # plotting
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
def plotPerColumnDistribution(df, nGraphShown, nGraphPerRow):
nunique = df.nunique()
df = df[[col for col in df if nunique[col] > 1 and nunique[col] < 50]]
nRow, nCol = df.shape
columnNames = list(df)
nGraphRow = (nCol + nGraphPerRow - 1) / nGraphPerRow
for i in range(min(nCol, nGraphShown)):
columnDf = df.iloc[:, i]
if not np.issubdtype(type(columnDf.iloc[0]), np.number):
valueCounts = columnDf.value_counts()
plt.xticks(rotation=90)
plt.tight_layout(pad=1.0, w_pad=1.0, h_pad=1.0)
# Correlation matrix
def plotCorrelationMatrix(df, graphWidth):
filename = df.dataframeName
df = df.dropna('columns') # drop columns with NaN
df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values
if df.shape[1] < 2:
print(f'No correlation plots shown: The number of non-NaN or constant columns ({df.shape[1]}) is less than 2')
return
corr = df.corr()
plt.figure(num=None, figsize=(graphWidth, graphWidth), dpi=80, facecolor='w', edgecolor='k')
corrMat = plt.matshow(corr, fignum = 1)
plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)
plt.yticks(range(len(corr.columns)), corr.columns)
plt.gca().xaxis.tick_bottom()
plt.colorbar(corrMat)
plt.title(f'Correlation Matrix for {filename}', fontsize=15)
plt.show()
# Scatter and density plots
def plotScatterMatrix(df, plotSize, textSize):
df = df.select_dtypes(include =[np.number]) # keep only numerical columns
# Remove rows and columns that would lead to df being singular
df = df.dropna('columns')
df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values
columnNames = list(df)
if len(columnNames) > 10: # reduce the number of columns for matrix inversion of kernel density plots
columnNames = columnNames[:10]
df = df[columnNames]
ax = pd.plotting.scatter_matrix(df, alpha=0.75, figsize=[plotSize, plotSize], diagonal='kde')
corrs = df.corr().values
for i, j in zip(*plt.np.triu_indices_from(ax, k = 1)):
ax[i, j].annotate('Corr. coef = %.3f' % corrs[i, j], (0.8, 0.2), xycoords='axes fraction', ha='center', va='center', size=textSize)
plt.suptitle('Scatter and Density Plot')
plt.show()
nRowsRead = 1000
df1 = pd.read_csv('/kaggle/input/testingdataworldwide_April_13.csv', delimiter=',', nrows=nRowsRead)
df1.dataframeName = 'testingdataworldwide_April_13.csv'
nRow, nCol = df1.shape
print(f'There are {nRow} rows and {nCol} columns') | code |
32071330/cell_5 | [
"image_output_1.png"
] | import os # accessing directory structure
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
73097119/cell_30 | [
"text_plain_output_1.png"
] | from os import listdir
from os.path import isfile, join
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import load_model
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import random
K.clear_session()
img_width = img_height = 224
training_data_dir = '../input/my-dogs-vs-cats/my-dogs-vs-cats/training'
validation_data_dir = '../input/my-dogs-vs-cats/my-dogs-vs-cats/validation'
batch_size = 32
train_datagen = ImageDataGenerator(rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
train_generator = train_datagen.flow_from_directory(training_data_dir, target_size=(img_width, img_height), batch_size=batch_size, class_mode='binary')
valid_datagen = ImageDataGenerator(rescale=1.0 / 255)
validation_generator = valid_datagen.flow_from_directory(validation_data_dir, target_size=(img_width, img_height), batch_size=batch_size, class_mode='binary')
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
def VGG16():
model = Sequential()
model.add(Conv2D(filters=64, kernel_size=(3, 3), padding='same', input_shape=input_shape, activation='relu', kernel_initializer='he_uniform'))
model.add(Conv2D(filters=64, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_uniform'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Conv2D(filters=128, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_uniform'))
model.add(Conv2D(filters=128, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_uniform'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Conv2D(filters=256, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_uniform'))
model.add(Conv2D(filters=256, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_uniform'))
model.add(Conv2D(filters=256, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_uniform'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Conv2D(filters=512, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_uniform'))
model.add(Conv2D(filters=512, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_uniform'))
model.add(Conv2D(filters=512, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_uniform'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Conv2D(filters=512, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_uniform'))
model.add(Conv2D(filters=512, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_uniform'))
model.add(Conv2D(filters=512, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_uniform'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Flatten(name='flatten'))
model.add(Dense(4096, activation='relu', name='fc1'))
model.add(Dense(4096, activation='relu', name='fc2'))
model.add(Dense(1, activation='sigmoid', name='output'))
return model
model = VGG16()
early_stopping = EarlyStopping(monitor='val_accuracy', mode='auto', verbose=1, patience=3)
model_checkpoint = ModelCheckpoint(filepath='./checkpoint.h5', monitor='val_accuracy', save_best_only=True, mode='auto')
callbacks = [early_stopping, model_checkpoint]
opt = SGD(learning_rate=0.001, momentum=0.9)
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
model.summary()
epochs = 2
steps_per_epoch = len(train_generator)
validation_steps = len(validation_generator)
history = model.fit_generator(generator=train_generator, validation_data=validation_generator, epochs=epochs, steps_per_epoch=steps_per_epoch, validation_steps=validation_steps, callbacks=callbacks, verbose=1)
model = load_model('../input/modelcheckpoint/final_model.h5')
import matplotlib.pyplot as plt
import random
import pandas as pd
from tensorflow.keras.preprocessing import image
df = pd.read_csv('../input/my-dogs-vs-cats/my-dogs-vs-cats/sampleSubmission.csv')
def updateValue(df: pd.DataFrame, id: int, value: str):
df.loc[df['id'] == id, 'label'] = value
predict_dir_path = '../input/my-dogs-vs-cats/my-dogs-vs-cats/test'
onlyfiles = [f for f in listdir(predict_dir_path) if isfile(join(predict_dir_path, f))]
random.shuffle(onlyfiles)
dog_counter = 0
cat_counter = 0
counter = 0
times = 100
for file, counter in zip(onlyfiles, range(times)):
img = image.load_img(predict_dir_path + '/' + file, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = model.predict(images, batch_size=10)
classes = classes[0][0]
id = str(file).split('.')
id = int(id[0])
if classes == 0:
updateValue(df, id, 'cat')
cat_counter += 1
else:
updateValue(df, id, 'dog')
dog_counter += 1
df.to_csv('./sampleSubmission.csv', index=False)
df = pd.read_csv('./sampleSubmission.csv')
print(df.loc[df['label'] == 'dog'].to_string(index=False))
print('-------------------------')
print(df.loc[df['label'] == 'cat'].to_string(index=False)) | code |
73097119/cell_29 | [
"image_output_11.png",
"image_output_98.png",
"image_output_74.png",
"image_output_82.png",
"image_output_24.png",
"image_output_46.png",
"image_output_85.png",
"image_output_25.png",
"image_output_77.png",
"image_output_47.png",
"image_output_78.png",
"image_output_17.png",
"image_output_30.png",
"image_output_73.png",
"image_output_72.png",
"image_output_14.png",
"image_output_59.png",
"image_output_39.png",
"image_output_97.png",
"image_output_28.png",
"image_output_86.png",
"image_output_84.png",
"image_output_81.png",
"image_output_23.png",
"image_output_34.png",
"image_output_64.png",
"image_output_13.png",
"image_output_40.png",
"image_output_5.png",
"image_output_48.png",
"image_output_68.png",
"image_output_75.png",
"image_output_18.png",
"image_output_58.png",
"image_output_92.png",
"image_output_21.png",
"image_output_52.png",
"image_output_60.png",
"image_output_7.png",
"image_output_62.png",
"image_output_96.png",
"image_output_56.png",
"image_output_31.png",
"image_output_65.png",
"image_output_20.png",
"image_output_69.png",
"image_output_32.png",
"image_output_53.png",
"image_output_4.png",
"image_output_51.png",
"image_output_83.png",
"image_output_42.png",
"image_output_35.png",
"image_output_90.png",
"image_output_41.png",
"image_output_57.png",
"image_output_36.png",
"image_output_8.png",
"image_output_37.png",
"image_output_66.png",
"image_output_16.png",
"image_output_91.png",
"image_output_70.png",
"image_output_67.png",
"image_output_27.png",
"image_output_54.png",
"image_output_6.png",
"image_output_45.png",
"image_output_63.png",
"image_output_71.png",
"image_output_80.png",
"image_output_95.png",
"image_output_93.png",
"image_output_12.png",
"image_output_22.png",
"image_output_89.png",
"image_output_55.png",
"text_plain_output_1.png",
"image_output_94.png",
"image_output_3.png",
"image_output_29.png",
"image_output_44.png",
"image_output_43.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"image_output_88.png",
"image_output_33.png",
"image_output_87.png",
"image_output_50.png",
"image_output_15.png",
"image_output_99.png",
"image_output_49.png",
"image_output_100.png",
"image_output_76.png",
"image_output_9.png",
"image_output_19.png",
"image_output_79.png",
"image_output_61.png",
"image_output_38.png",
"image_output_26.png"
] | from os import listdir
from os.path import isfile, join
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import load_model
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import random
K.clear_session()
img_width = img_height = 224
training_data_dir = '../input/my-dogs-vs-cats/my-dogs-vs-cats/training'
validation_data_dir = '../input/my-dogs-vs-cats/my-dogs-vs-cats/validation'
batch_size = 32
train_datagen = ImageDataGenerator(rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
train_generator = train_datagen.flow_from_directory(training_data_dir, target_size=(img_width, img_height), batch_size=batch_size, class_mode='binary')
valid_datagen = ImageDataGenerator(rescale=1.0 / 255)
validation_generator = valid_datagen.flow_from_directory(validation_data_dir, target_size=(img_width, img_height), batch_size=batch_size, class_mode='binary')
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
def VGG16():
model = Sequential()
model.add(Conv2D(filters=64, kernel_size=(3, 3), padding='same', input_shape=input_shape, activation='relu', kernel_initializer='he_uniform'))
model.add(Conv2D(filters=64, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_uniform'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Conv2D(filters=128, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_uniform'))
model.add(Conv2D(filters=128, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_uniform'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Conv2D(filters=256, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_uniform'))
model.add(Conv2D(filters=256, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_uniform'))
model.add(Conv2D(filters=256, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_uniform'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Conv2D(filters=512, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_uniform'))
model.add(Conv2D(filters=512, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_uniform'))
model.add(Conv2D(filters=512, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_uniform'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Conv2D(filters=512, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_uniform'))
model.add(Conv2D(filters=512, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_uniform'))
model.add(Conv2D(filters=512, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_uniform'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Flatten(name='flatten'))
model.add(Dense(4096, activation='relu', name='fc1'))
model.add(Dense(4096, activation='relu', name='fc2'))
model.add(Dense(1, activation='sigmoid', name='output'))
return model
model = VGG16()
early_stopping = EarlyStopping(monitor='val_accuracy', mode='auto', verbose=1, patience=3)
model_checkpoint = ModelCheckpoint(filepath='./checkpoint.h5', monitor='val_accuracy', save_best_only=True, mode='auto')
callbacks = [early_stopping, model_checkpoint]
opt = SGD(learning_rate=0.001, momentum=0.9)
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
model.summary()
epochs = 2
steps_per_epoch = len(train_generator)
validation_steps = len(validation_generator)
history = model.fit_generator(generator=train_generator, validation_data=validation_generator, epochs=epochs, steps_per_epoch=steps_per_epoch, validation_steps=validation_steps, callbacks=callbacks, verbose=1)
model = load_model('../input/modelcheckpoint/final_model.h5')
import matplotlib.pyplot as plt
import random
import pandas as pd
from tensorflow.keras.preprocessing import image
df = pd.read_csv('../input/my-dogs-vs-cats/my-dogs-vs-cats/sampleSubmission.csv')
def updateValue(df: pd.DataFrame, id: int, value: str):
df.loc[df['id'] == id, 'label'] = value
predict_dir_path = '../input/my-dogs-vs-cats/my-dogs-vs-cats/test'
onlyfiles = [f for f in listdir(predict_dir_path) if isfile(join(predict_dir_path, f))]
random.shuffle(onlyfiles)
dog_counter = 0
cat_counter = 0
counter = 0
times = 100
for file, counter in zip(onlyfiles, range(times)):
img = image.load_img(predict_dir_path + '/' + file, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = model.predict(images, batch_size=10)
classes = classes[0][0]
id = str(file).split('.')
id = int(id[0])
if classes == 0:
updateValue(df, id, 'cat')
if cat_counter < 100:
plt.title('cat')
plt.imshow(img)
plt.show()
cat_counter += 1
else:
updateValue(df, id, 'dog')
if dog_counter < 100:
plt.title('dog')
plt.imshow(img)
plt.show()
dog_counter += 1
df.to_csv('./sampleSubmission.csv', index=False)
print('Total Dogs :', dog_counter)
print('Total Cats :', cat_counter) | code |
121151245/cell_13 | [
"text_plain_output_1.png"
] | code |
|
121151245/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import os
import os
import numpy as np
import pandas as pd
import os
path_validation = Path('/kaggle/input/food-validation/images/')
path_training = Path('/kaggle/input/food-training/images/')
image_files = os.listdir(path_training)
for i in range(2):
image_path = os.path.join(path_training, image_files[i])
print(image_path)
image_pathv = os.path.join(path_validation, image_files[i])
print(image_pathv) | code |
121151245/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from PIL import Image
import os
import os
import numpy as np
import pandas as pd
import os
path_validation = Path('/kaggle/input/food-validation/images/')
path_training = Path('/kaggle/input/food-training/images/')
image_files = os.listdir(path_training)
for i in range(2):
image_path = os.path.join(path_training, image_files[i])
image_pathv = os.path.join(path_validation, image_files[i])
path_annotations = Path('/kaggle/input/food-training/annotations.json')
label_func = lambda x: annotations[str(x.parent.name + '/' + x.name)]['name']
sample_image_file = os.listdir(path_training)[0]
print(sample_image_file)
sample_image = Image.open(path_training / sample_image_file)
sample_label = label_func(sample_image)
print(sample_label) | code |
121151245/cell_3 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | !ls /kaggle/input/food-training | code |
121151245/cell_10 | [
"text_plain_output_1.png"
] | import os
import os
import numpy as np
import pandas as pd
import os
path_validation = Path('/kaggle/input/food-validation/images/')
path_training = Path('/kaggle/input/food-training/images/')
image_files = os.listdir(path_training)
for i in range(2):
image_path = os.path.join(path_training, image_files[i])
image_pathv = os.path.join(path_validation, image_files[i])
path_annotations = Path('/kaggle/input/food-training/annotations.json')
label_func = lambda x: annotations[str(x.parent.name + '/' + x.name)]['name']
dls = ImageDataLoaders.from_folder(path_training, valid_path=path_validation, item_tfms=Resize(460), batch_tfms=aug_transforms(size=224, min_scale=0.75), label_func=label_func) | code |
18108547/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/data.csv')
list(df.columns)
def clean_d(string):
last_char = string[-1]
if last_char == '0':
return 0
string = string[1:-1]
num = float(string)
if last_char == 'K':
num = num * 1000
elif last_char == 'M':
num = num * 1000000
return num
df['Wage_Num'] = df.apply(lambda row: clean_d(row['Wage']), axis=1)
df['Value_Num'] = df.apply(lambda row: clean_d(row['Value']), axis=1)
df.head() | code |
18108547/cell_25 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/data.csv')
list(df.columns)
def clean_d(string):
last_char = string[-1]
if last_char == '0':
return 0
string = string[1:-1]
num = float(string)
if last_char == 'K':
num = num * 1000
elif last_char == 'M':
num = num * 1000000
return num
df['Wage_Num'] = df.apply(lambda row: clean_d(row['Wage']), axis=1)
df['Value_Num'] = df.apply(lambda row: clean_d(row['Value']), axis=1)
df.shape
df.isna().sum()
df = df.dropna(axis=0, subset=['Preferred Foot'])
df.isna().sum()
import seaborn as sns
sns.set()
df['Growth_Left'] = df['Potential'] - df['Overall']
sns.lineplot(x='Age', y='Growth_Left', data=df) | code |
18108547/cell_34 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/data.csv')
list(df.columns)
def clean_d(string):
last_char = string[-1]
if last_char == '0':
return 0
string = string[1:-1]
num = float(string)
if last_char == 'K':
num = num * 1000
elif last_char == 'M':
num = num * 1000000
return num
df['Wage_Num'] = df.apply(lambda row: clean_d(row['Wage']), axis=1)
df['Value_Num'] = df.apply(lambda row: clean_d(row['Value']), axis=1)
df.shape
df.isna().sum()
df = df.dropna(axis=0, subset=['Preferred Foot'])
df.isna().sum()
top_100 = df[:100]
top_100.shape
age_100_plots = top_100['Age'].value_counts()
age_100_plots.plot(kind='bar') | code |
18108547/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/data.csv')
list(df.columns)
def clean_d(string):
last_char = string[-1]
if last_char == '0':
return 0
string = string[1:-1]
num = float(string)
if last_char == 'K':
num = num * 1000
elif last_char == 'M':
num = num * 1000000
return num
df['Wage_Num'] = df.apply(lambda row: clean_d(row['Wage']), axis=1)
df['Value_Num'] = df.apply(lambda row: clean_d(row['Value']), axis=1)
df.shape
df.isna().sum()
df = df.dropna(axis=0, subset=['Preferred Foot'])
df.isna().sum()
import seaborn as sns
sns.set()
sns.lineplot(x='Overall', y='Wage_Num', data=df) | code |
18108547/cell_30 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/data.csv')
list(df.columns)
def clean_d(string):
last_char = string[-1]
if last_char == '0':
return 0
string = string[1:-1]
num = float(string)
if last_char == 'K':
num = num * 1000
elif last_char == 'M':
num = num * 1000000
return num
df['Wage_Num'] = df.apply(lambda row: clean_d(row['Wage']), axis=1)
df['Value_Num'] = df.apply(lambda row: clean_d(row['Value']), axis=1)
df.shape
df.isna().sum()
df = df.dropna(axis=0, subset=['Preferred Foot'])
df.isna().sum()
import seaborn as sns
sns.set()
df['Growth_Left'] = df['Potential'] - df['Overall']
sns.lineplot(x='Age', y='Overall', data=df) | code |
18108547/cell_33 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/data.csv')
list(df.columns)
def clean_d(string):
last_char = string[-1]
if last_char == '0':
return 0
string = string[1:-1]
num = float(string)
if last_char == 'K':
num = num * 1000
elif last_char == 'M':
num = num * 1000000
return num
df['Wage_Num'] = df.apply(lambda row: clean_d(row['Wage']), axis=1)
df['Value_Num'] = df.apply(lambda row: clean_d(row['Value']), axis=1)
df.shape
df.isna().sum()
df = df.dropna(axis=0, subset=['Preferred Foot'])
df.isna().sum()
top_100 = df[:100]
top_100.shape
nationality_100_plots = top_100['Nationality'].value_counts()
nationality_100_plots.plot(kind='bar') | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.