path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
1009871/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('train.csv') train.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) train.head()
code
105176805/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import os import pandas as pd import openpyxl import yfinance as yf import datetime import time import requests import io import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90128404/cell_13
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow.keras.layers import Dense, Conv2D, Flatten, MaxPooling2D, Dropout from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.image import load_img, img_to_array from tensorflow.keras.utils import to_categorical import matplotlib.pyplot as plot import numpy as np import pandas as pd import seaborn as sb data = pd.read_csv('../input/hardfakevsrealfaces/data.csv') height, width = (128, 128) X = np.empty((data.shape[0], height, width, 3)) for i in range(data.shape[0]): img = load_img('../input/hardfakevsrealfaces/{}/{}.jpg'.format(data.loc[i, 'label'], data.loc[i, 'images_id']), target_size=(height, width)) X[i] = img_to_array(img) X.shape def changeLabels(x): return labels[x] labels = data.label.unique() labels = {labels[i]: i for i in range(labels.size)} y = data.label.apply(changeLabels) y[:5] y = to_categorical(y, len(labels)) y = y.astype(int) y[:5] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=8) (X_train.shape, y_train.shape) model = Sequential() model.add(Conv2D(64, kernel_size=3, activation='relu', input_shape=(height, width, 3))) model.add(MaxPooling2D(pool_size=(3, 3))) model.add(Conv2D(32, kernel_size=3, activation='relu')) model.add(MaxPooling2D(pool_size=(3, 3))) model.add(Conv2D(32, kernel_size=3, activation='relu')) model.add(MaxPooling2D(pool_size=(3, 3))) model.add(Flatten()) model.add(Dense(64, activation='relu')) model.add(Dropout(0.45)) model.add(Dense(2, activation='softmax')) epochs = 8 model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) stats = model.fit(X_train, y_train, epochs=epochs, validation_split=0.2) model.summary() stats_df = pd.DataFrame(stats.history) stats_df['epoch'] = list(range(1, epochs + 1)) stats_df = pd.DataFrame(stats.history) stats_df['epoch'] = list(range(1, epochs + 1)) plot.figure(figsize=(10, 8)) sb.lineplot(y='accuracy', x='epoch', data=stats_df, color='deeppink', linewidth=2.5, label='Training accuracy') sb.lineplot(y='val_accuracy', x='epoch', data=stats_df, color='darkturquoise', linewidth=2.5, label='Validation accuracy') plot.grid() plot.legend() plot.title('Training and validation accuracy')
code
90128404/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
from tensorflow.keras.preprocessing.image import load_img, img_to_array import numpy as np import pandas as pd data = pd.read_csv('../input/hardfakevsrealfaces/data.csv') height, width = (128, 128) X = np.empty((data.shape[0], height, width, 3)) for i in range(data.shape[0]): img = load_img('../input/hardfakevsrealfaces/{}/{}.jpg'.format(data.loc[i, 'label'], data.loc[i, 'images_id']), target_size=(height, width)) X[i] = img_to_array(img) X.shape def changeLabels(x): return labels[x] labels = data.label.unique() labels = {labels[i]: i for i in range(labels.size)} y = data.label.apply(changeLabels) y[:5]
code
90128404/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
from tensorflow.keras.preprocessing.image import load_img, img_to_array from tensorflow.keras.utils import to_categorical import numpy as np import pandas as pd data = pd.read_csv('../input/hardfakevsrealfaces/data.csv') height, width = (128, 128) X = np.empty((data.shape[0], height, width, 3)) for i in range(data.shape[0]): img = load_img('../input/hardfakevsrealfaces/{}/{}.jpg'.format(data.loc[i, 'label'], data.loc[i, 'images_id']), target_size=(height, width)) X[i] = img_to_array(img) X.shape def changeLabels(x): return labels[x] labels = data.label.unique() labels = {labels[i]: i for i in range(labels.size)} y = data.label.apply(changeLabels) y[:5] y = to_categorical(y, len(labels)) y = y.astype(int) y[:5]
code
90128404/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/hardfakevsrealfaces/data.csv') data.head()
code
90128404/cell_11
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow.keras.layers import Dense, Conv2D, Flatten, MaxPooling2D, Dropout from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.image import load_img, img_to_array from tensorflow.keras.utils import to_categorical import numpy as np import pandas as pd data = pd.read_csv('../input/hardfakevsrealfaces/data.csv') height, width = (128, 128) X = np.empty((data.shape[0], height, width, 3)) for i in range(data.shape[0]): img = load_img('../input/hardfakevsrealfaces/{}/{}.jpg'.format(data.loc[i, 'label'], data.loc[i, 'images_id']), target_size=(height, width)) X[i] = img_to_array(img) X.shape def changeLabels(x): return labels[x] labels = data.label.unique() labels = {labels[i]: i for i in range(labels.size)} y = data.label.apply(changeLabels) y[:5] y = to_categorical(y, len(labels)) y = y.astype(int) y[:5] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=8) (X_train.shape, y_train.shape) model = Sequential() model.add(Conv2D(64, kernel_size=3, activation='relu', input_shape=(height, width, 3))) model.add(MaxPooling2D(pool_size=(3, 3))) model.add(Conv2D(32, kernel_size=3, activation='relu')) model.add(MaxPooling2D(pool_size=(3, 3))) model.add(Conv2D(32, kernel_size=3, activation='relu')) model.add(MaxPooling2D(pool_size=(3, 3))) model.add(Flatten()) model.add(Dense(64, activation='relu')) model.add(Dropout(0.45)) model.add(Dense(2, activation='softmax')) epochs = 8 model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) stats = model.fit(X_train, y_train, epochs=epochs, validation_split=0.2) model.summary()
code
90128404/cell_8
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow.keras.preprocessing.image import load_img, img_to_array from tensorflow.keras.utils import to_categorical import numpy as np import pandas as pd data = pd.read_csv('../input/hardfakevsrealfaces/data.csv') height, width = (128, 128) X = np.empty((data.shape[0], height, width, 3)) for i in range(data.shape[0]): img = load_img('../input/hardfakevsrealfaces/{}/{}.jpg'.format(data.loc[i, 'label'], data.loc[i, 'images_id']), target_size=(height, width)) X[i] = img_to_array(img) X.shape def changeLabels(x): return labels[x] labels = data.label.unique() labels = {labels[i]: i for i in range(labels.size)} y = data.label.apply(changeLabels) y[:5] y = to_categorical(y, len(labels)) y = y.astype(int) y[:5] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=8) (X_train.shape, y_train.shape)
code
90128404/cell_3
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from tensorflow.keras.preprocessing.image import load_img, img_to_array import numpy as np import pandas as pd data = pd.read_csv('../input/hardfakevsrealfaces/data.csv') height, width = (128, 128) X = np.empty((data.shape[0], height, width, 3)) for i in range(data.shape[0]): img = load_img('../input/hardfakevsrealfaces/{}/{}.jpg'.format(data.loc[i, 'label'], data.loc[i, 'images_id']), target_size=(height, width)) X[i] = img_to_array(img) X.shape
code
90128404/cell_14
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow.keras.layers import Dense, Conv2D, Flatten, MaxPooling2D, Dropout from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.image import load_img, img_to_array from tensorflow.keras.utils import to_categorical import numpy as np import pandas as pd data = pd.read_csv('../input/hardfakevsrealfaces/data.csv') height, width = (128, 128) X = np.empty((data.shape[0], height, width, 3)) for i in range(data.shape[0]): img = load_img('../input/hardfakevsrealfaces/{}/{}.jpg'.format(data.loc[i, 'label'], data.loc[i, 'images_id']), target_size=(height, width)) X[i] = img_to_array(img) X.shape def changeLabels(x): return labels[x] labels = data.label.unique() labels = {labels[i]: i for i in range(labels.size)} y = data.label.apply(changeLabels) y[:5] y = to_categorical(y, len(labels)) y = y.astype(int) y[:5] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=8) (X_train.shape, y_train.shape) model = Sequential() model.add(Conv2D(64, kernel_size=3, activation='relu', input_shape=(height, width, 3))) model.add(MaxPooling2D(pool_size=(3, 3))) model.add(Conv2D(32, kernel_size=3, activation='relu')) model.add(MaxPooling2D(pool_size=(3, 3))) model.add(Conv2D(32, kernel_size=3, activation='relu')) model.add(MaxPooling2D(pool_size=(3, 3))) model.add(Flatten()) model.add(Dense(64, activation='relu')) model.add(Dropout(0.45)) model.add(Dense(2, activation='softmax')) epochs = 8 model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) stats = model.fit(X_train, y_train, epochs=epochs, validation_split=0.2) model.summary() print('Accuracy:') _, accuracy = model.evaluate(X_test, y_test)
code
90128404/cell_10
[ "text_html_output_1.png" ]
from tensorflow.keras.layers import Dense, Conv2D, Flatten, MaxPooling2D, Dropout from tensorflow.keras.models import Sequential model = Sequential() model.add(Conv2D(64, kernel_size=3, activation='relu', input_shape=(height, width, 3))) model.add(MaxPooling2D(pool_size=(3, 3))) model.add(Conv2D(32, kernel_size=3, activation='relu')) model.add(MaxPooling2D(pool_size=(3, 3))) model.add(Conv2D(32, kernel_size=3, activation='relu')) model.add(MaxPooling2D(pool_size=(3, 3))) model.add(Flatten()) model.add(Dense(64, activation='relu')) model.add(Dropout(0.45)) model.add(Dense(2, activation='softmax'))
code
90128404/cell_12
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow.keras.layers import Dense, Conv2D, Flatten, MaxPooling2D, Dropout from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.image import load_img, img_to_array from tensorflow.keras.utils import to_categorical import matplotlib.pyplot as plot import numpy as np import pandas as pd import seaborn as sb data = pd.read_csv('../input/hardfakevsrealfaces/data.csv') height, width = (128, 128) X = np.empty((data.shape[0], height, width, 3)) for i in range(data.shape[0]): img = load_img('../input/hardfakevsrealfaces/{}/{}.jpg'.format(data.loc[i, 'label'], data.loc[i, 'images_id']), target_size=(height, width)) X[i] = img_to_array(img) X.shape def changeLabels(x): return labels[x] labels = data.label.unique() labels = {labels[i]: i for i in range(labels.size)} y = data.label.apply(changeLabels) y[:5] y = to_categorical(y, len(labels)) y = y.astype(int) y[:5] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=8) (X_train.shape, y_train.shape) model = Sequential() model.add(Conv2D(64, kernel_size=3, activation='relu', input_shape=(height, width, 3))) model.add(MaxPooling2D(pool_size=(3, 3))) model.add(Conv2D(32, kernel_size=3, activation='relu')) model.add(MaxPooling2D(pool_size=(3, 3))) model.add(Conv2D(32, kernel_size=3, activation='relu')) model.add(MaxPooling2D(pool_size=(3, 3))) model.add(Flatten()) model.add(Dense(64, activation='relu')) model.add(Dropout(0.45)) model.add(Dense(2, activation='softmax')) epochs = 8 model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) stats = model.fit(X_train, y_train, epochs=epochs, validation_split=0.2) model.summary() stats_df = pd.DataFrame(stats.history) stats_df['epoch'] = list(range(1, epochs + 1)) plot.figure(figsize=(10, 8)) sb.lineplot(y='loss', x='epoch', data=stats_df, color='deeppink', linewidth=2.5, label='Training loss') sb.lineplot(y='val_loss', x='epoch', data=stats_df, color='darkturquoise', linewidth=2.5, label='Validation loss') plot.grid() plot.legend() plot.title('Training and validation loss')
code
1003217/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) print('Skewness: %f' % train['SalePrice'].skew()) print('Kurtosis: %f' % train['SalePrice'].kurt())
code
1003217/cell_33
[ "text_html_output_1.png" ]
import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) var = 'GrLivArea' data = pd.concat([train['SalePrice'], train[var]], axis=1) #box plot overallqual/saleprice var = 'OverallQual' data = pd.concat([train['SalePrice'], train[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y="SalePrice", data=data) fig.axis(ymin=0, ymax=800000); matplotlib.rcParams['figure.figsize'] = (12.0, 6.0) prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])}) corr = train.select_dtypes(include=['float64', 'int64']).iloc[:, 1:].corr() total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) missing_data.head(20)
code
1003217/cell_29
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) var = 'GrLivArea' data = pd.concat([train['SalePrice'], train[var]], axis=1) #box plot overallqual/saleprice var = 'OverallQual' data = pd.concat([train['SalePrice'], train[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y="SalePrice", data=data) fig.axis(ymin=0, ymax=800000); corr = train.select_dtypes(include=['float64', 'int64']).iloc[:, 1:].corr() plt.figure(figsize=(12, 6)) sns.countplot(x='Neighborhood', data=train) xt = plt.xticks(rotation=45)
code
1003217/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) train['SalePrice'].describe()
code
1003217/cell_28
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) var = 'GrLivArea' data = pd.concat([train['SalePrice'], train[var]], axis=1) #box plot overallqual/saleprice var = 'OverallQual' data = pd.concat([train['SalePrice'], train[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y="SalePrice", data=data) fig.axis(ymin=0, ymax=800000); corr = train.select_dtypes(include=['float64', 'int64']).iloc[:, 1:].corr() cor_dict = corr['SalePrice'].to_dict() del cor_dict['SalePrice'] print('List the numerical features decendingly by their correlation with Sale Price:\n') for ele in sorted(cor_dict.items(), key=lambda x: -abs(x[1])): print('{0}: \t{1}'.format(*ele))
code
1003217/cell_15
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) sns.distplot(train['SalePrice'])
code
1003217/cell_16
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) var = 'GrLivArea' data = pd.concat([train['SalePrice'], train[var]], axis=1) var = 'OverallQual' data = pd.concat([train['SalePrice'], train[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y='SalePrice', data=data) fig.axis(ymin=0, ymax=800000)
code
1003217/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) var = 'GrLivArea' data = pd.concat([train['SalePrice'], train[var]], axis=1) #box plot overallqual/saleprice var = 'OverallQual' data = pd.concat([train['SalePrice'], train[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y="SalePrice", data=data) fig.axis(ymin=0, ymax=800000); matplotlib.rcParams['figure.figsize'] = (12.0, 6.0) prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])}) prices.hist()
code
1003217/cell_31
[ "image_output_1.png" ]
from sklearn.model_selection import cross_val_score import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) var = 'GrLivArea' data = pd.concat([train['SalePrice'], train[var]], axis=1) #box plot overallqual/saleprice var = 'OverallQual' data = pd.concat([train['SalePrice'], train[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y="SalePrice", data=data) fig.axis(ymin=0, ymax=800000); matplotlib.rcParams['figure.figsize'] = (12.0, 6.0) prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])}) from sklearn.linear_model import LinearRegression, Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV from sklearn.model_selection import cross_val_score def rmse_cv(model): rmse = np.sqrt(-cross_val_score(model, X_train, y, scoring='neg_mean_squared_error', cv=5)) return rmse corr = train.select_dtypes(include=['float64', 'int64']).iloc[:, 1:].corr() xt = plt.xticks(rotation=45) k = 10 cols = corr.nlargest(k, 'SalePrice')['SalePrice'].index cm = np.corrcoef(train[cols].values.T) sns.set(font_scale=1.25) hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) plt.show()
code
1003217/cell_14
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) var = 'GrLivArea' data = pd.concat([train['SalePrice'], train[var]], axis=1) data.plot.scatter(x=var, y='SalePrice', ylim=(0, 800000))
code
1003217/cell_10
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) train.head()
code
1003217/cell_27
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) var = 'GrLivArea' data = pd.concat([train['SalePrice'], train[var]], axis=1) #box plot overallqual/saleprice var = 'OverallQual' data = pd.concat([train['SalePrice'], train[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y="SalePrice", data=data) fig.axis(ymin=0, ymax=800000); corr = train.select_dtypes(include=['float64', 'int64']).iloc[:, 1:].corr() plt.figure(figsize=(12, 12)) sns.heatmap(corr, vmax=1, square=True)
code
1003217/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) train['SalePrice'].describe()
code
1009496/cell_9
[ "image_output_1.png" ]
from glob import glob import cv2 import matplotlib.pylab as plt import numpy as np # linear algebra import os import os from glob import glob TRAIN_DATA = '../input/train' type_1_files = glob(os.path.join(TRAIN_DATA, 'Type_1', '*.jpg')) type_1_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_1')) + 1:-4] for s in type_1_files]) type_2_files = glob(os.path.join(TRAIN_DATA, 'Type_2', '*.jpg')) type_2_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_2')) + 1:-4] for s in type_2_files]) type_3_files = glob(os.path.join(TRAIN_DATA, 'Type_3', '*.jpg')) type_3_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_3')) + 1:-4] for s in type_3_files]) def get_filename(image_id, image_type): """ Method to get image file path from its id and type """ try: ['Type_1', 'Type_2', 'Type_3'].index(image_type) except: raise Exception("Image type '%s' is not recognized" % image_type) ext = 'jpg' data_path = os.path.join(TRAIN_DATA, image_type) return os.path.join(data_path, '{}.{}'.format(image_id, ext)) import cv2 def get_image_data(image_id, image_type): """ Method to get image data as np.array specifying image id and type """ fname = get_filename(image_id, image_type) img = cv2.imread(fname) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) return img img = get_image_data('497', 'Type_1') tile_size = (256, 256) n = 10 m = int(np.floor(len(type_1_ids) / n)) complete_image = np.zeros((m * (tile_size[0] + 2), n * (tile_size[1] + 2), 3), dtype=np.uint8) train_ids = sorted(type_1_ids) counter = 0 for i in range(m): ys = i * (tile_size[1] + 2) ye = ys + tile_size[1] for j in range(n): xs = j * (tile_size[0] + 2) xe = xs + tile_size[0] image_id = train_ids[counter] counter += 1 img = get_image_data(image_id, 'Type_1') img = cv2.resize(img, dsize=tile_size) complete_image[ys:ye, xs:xe] = img[:, :, :] plt_st(20, 20) _ = plt.imshow(complete_image)
code
1009496/cell_2
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1009496/cell_8
[ "text_plain_output_1.png" ]
from glob import glob import cv2 import numpy as np # linear algebra import os import os from glob import glob TRAIN_DATA = '../input/train' type_1_files = glob(os.path.join(TRAIN_DATA, 'Type_1', '*.jpg')) type_1_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_1')) + 1:-4] for s in type_1_files]) type_2_files = glob(os.path.join(TRAIN_DATA, 'Type_2', '*.jpg')) type_2_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_2')) + 1:-4] for s in type_2_files]) type_3_files = glob(os.path.join(TRAIN_DATA, 'Type_3', '*.jpg')) type_3_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_3')) + 1:-4] for s in type_3_files]) def get_filename(image_id, image_type): """ Method to get image file path from its id and type """ try: ['Type_1', 'Type_2', 'Type_3'].index(image_type) except: raise Exception("Image type '%s' is not recognized" % image_type) ext = 'jpg' data_path = os.path.join(TRAIN_DATA, image_type) return os.path.join(data_path, '{}.{}'.format(image_id, ext)) import cv2 def get_image_data(image_id, image_type): """ Method to get image data as np.array specifying image id and type """ fname = get_filename(image_id, image_type) img = cv2.imread(fname) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) return img img = get_image_data('497', 'Type_1') tile_size = (256, 256) n = 10 m = int(np.floor(len(type_1_ids) / n)) complete_image = np.zeros((m * (tile_size[0] + 2), n * (tile_size[1] + 2), 3), dtype=np.uint8) train_ids = sorted(type_1_ids) counter = 0 for i in range(m): ys = i * (tile_size[1] + 2) ye = ys + tile_size[1] for j in range(n): xs = j * (tile_size[0] + 2) xe = xs + tile_size[0] image_id = train_ids[counter] counter += 1 img = get_image_data(image_id, 'Type_1') img = cv2.resize(img, dsize=tile_size) complete_image[ys:ye, xs:xe] = img[:, :, :] print(complete_image.shape)
code
1009496/cell_3
[ "text_plain_output_1.png" ]
from glob import glob import numpy as np # linear algebra import os import os from glob import glob TRAIN_DATA = '../input/train' type_1_files = glob(os.path.join(TRAIN_DATA, 'Type_1', '*.jpg')) type_1_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_1')) + 1:-4] for s in type_1_files]) type_2_files = glob(os.path.join(TRAIN_DATA, 'Type_2', '*.jpg')) type_2_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_2')) + 1:-4] for s in type_2_files]) type_3_files = glob(os.path.join(TRAIN_DATA, 'Type_3', '*.jpg')) type_3_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_3')) + 1:-4] for s in type_3_files]) print(len(type_1_files), len(type_2_files), len(type_3_files)) print('Type 1', type_1_ids[:10]) print('Type 2', type_2_ids[:10]) print('Type 3', type_3_ids[:10])
code
1009496/cell_5
[ "text_plain_output_1.png" ]
from glob import glob import cv2 import numpy as np # linear algebra import os import os from glob import glob TRAIN_DATA = '../input/train' type_1_files = glob(os.path.join(TRAIN_DATA, 'Type_1', '*.jpg')) type_1_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_1')) + 1:-4] for s in type_1_files]) type_2_files = glob(os.path.join(TRAIN_DATA, 'Type_2', '*.jpg')) type_2_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_2')) + 1:-4] for s in type_2_files]) type_3_files = glob(os.path.join(TRAIN_DATA, 'Type_3', '*.jpg')) type_3_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_3')) + 1:-4] for s in type_3_files]) def get_filename(image_id, image_type): """ Method to get image file path from its id and type """ try: ['Type_1', 'Type_2', 'Type_3'].index(image_type) except: raise Exception("Image type '%s' is not recognized" % image_type) ext = 'jpg' data_path = os.path.join(TRAIN_DATA, image_type) return os.path.join(data_path, '{}.{}'.format(image_id, ext)) import cv2 def get_image_data(image_id, image_type): """ Method to get image data as np.array specifying image id and type """ fname = get_filename(image_id, image_type) img = cv2.imread(fname) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) return img img = get_image_data('497', 'Type_1') print(img.shape)
code
50230145/cell_30
[ "text_html_output_1.png" ]
from bs4 import BeautifulSoup import numpy as np import pandas as pd import requests titles = [] years = [] urls = [] ranks = [i for i in range(1, 1001)] def JazzStandardsTable(url): r = requests.get(url) soup = BeautifulSoup(r.content, 'html.parser') for i in range(25, 125): titles.append(soup.find_all('a')[i].get_text()) for i in range(25, 125): urls.append(soup.find_all('a')[i].get('href')) for i in range(0, 100): years.append(soup.find_all('tr', class_='JSContentsLine')[i].get_text().split('\xa0')[0][-4:]) url = 'https://www.jazzstandards.com/compositions/index.htm' JazzStandardsTable(url) for i in range(2, 11): index = url.find('index') url = url[:index + 5] + str(i) + '.htm' JazzStandardsTable(url) title = pd.Series(titles, name='Title') year = pd.Series(years, name='Year') rank = pd.Series(ranks, name='Rank') url = pd.Series(urls, name='URL') JazzStandards = pd.concat([rank, title, year, url], axis=1) music = [] lyrics = [] for url in urls: r = requests.get(url) soup = BeautifulSoup(r.content, 'html.parser') music.append(soup.find('table', id='table33').find_all('tr', class_='JSQuote')[-2].get_text().strip().split('\n')[2:]) lyrics.append(soup.find('table', id='table33').find_all('tr', class_='JSQuote')[-1].get_text().strip().split('\n')[2:]) Music = pd.Series(music, name='Composer(s)') Lyrics = pd.Series(lyrics, name='Lyricist(s)') Music = Music.apply(lambda x: np.nan if len(x) == 0 else x) Music.fillna(Lyrics, inplace=True) JazzStandards = pd.concat([JazzStandards, Music, Lyrics], axis=1) JazzStandards = JazzStandards[['Rank', 'Title', 'Year', 'Composer(s)', 'Lyricist(s)', 'URL']] JazzStandards.head(60)
code
50230145/cell_14
[ "text_html_output_1.png" ]
from bs4 import BeautifulSoup import pandas as pd import requests titles = [] years = [] urls = [] ranks = [i for i in range(1, 1001)] def JazzStandardsTable(url): r = requests.get(url) soup = BeautifulSoup(r.content, 'html.parser') for i in range(25, 125): titles.append(soup.find_all('a')[i].get_text()) for i in range(25, 125): urls.append(soup.find_all('a')[i].get('href')) for i in range(0, 100): years.append(soup.find_all('tr', class_='JSContentsLine')[i].get_text().split('\xa0')[0][-4:]) url = 'https://www.jazzstandards.com/compositions/index.htm' JazzStandardsTable(url) for i in range(2, 11): index = url.find('index') url = url[:index + 5] + str(i) + '.htm' JazzStandardsTable(url) title = pd.Series(titles, name='Title') year = pd.Series(years, name='Year') rank = pd.Series(ranks, name='Rank') url = pd.Series(urls, name='URL') JazzStandards = pd.concat([rank, title, year, url], axis=1) JazzStandards
code
73079773/cell_30
[ "image_output_11.png", "image_output_24.png", "image_output_46.png", "image_output_25.png", "image_output_47.png", "image_output_17.png", "image_output_30.png", "image_output_14.png", "image_output_39.png", "image_output_28.png", "image_output_23.png", "image_output_34.png", "image_output_13.png", "image_output_40.png", "image_output_5.png", "image_output_48.png", "image_output_18.png", "image_output_21.png", "image_output_7.png", "image_output_31.png", "image_output_20.png", "image_output_32.png", "image_output_4.png", "image_output_42.png", "image_output_35.png", "image_output_41.png", "image_output_36.png", "image_output_8.png", "image_output_37.png", "image_output_16.png", "image_output_27.png", "image_output_6.png", "image_output_45.png", "image_output_12.png", "image_output_22.png", "image_output_3.png", "image_output_29.png", "image_output_44.png", "image_output_43.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_33.png", "image_output_50.png", "image_output_15.png", "image_output_49.png", "image_output_9.png", "image_output_19.png", "image_output_38.png", "image_output_26.png" ]
from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.backend import clear_session from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping from tensorflow.keras.layers import Convolution2D,MaxPooling2D, Dense, Flatten, InputLayer from tensorflow.keras.models import Sequential, Model, load_model from tensorflow.keras.optimizers import SGD from tensorflow.keras.preprocessing.image import ImageDataGenerator import matplotlib.pyplot as plt import numpy as np import pandas as pd import zipfile import zipfile input_path = '/kaggle/input/dogs-vs-cats' work_path = '/kaggle/working/data' train_path = os.path.join(input_path, 'train.zip') test_path = os.path.join(input_path, 'test1.zip') with zipfile.ZipFile(train_path, 'r') as zip_ref: zip_ref.extractall(work_path) with zipfile.ZipFile(test_path, 'r') as zip_ref: zip_ref.extractall(work_path) data_dir = work_path train_dir = data_dir + '/train' test_dir = data_dir + '/test1' df = pd.DataFrame() fnames = os.listdir(train_dir) class_name = [] for name in fnames: class_name.append(name.split('.')[0]) data = {'filename': fnames, 'class': class_name} df = pd.DataFrame(data) df = df.sample(frac=1) train_datagen = ImageDataGenerator(rescale=1 / 255, rotation_range=20, shear_range=0.2, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True, zoom_range=0.2) valid_datagen = ImageDataGenerator(rescale=1 / 255) idx = int(0.8 * len(df)) train_df = df.iloc[:idx] valid_df = df.iloc[idx:] target = (224, 224) train_set = train_datagen.flow_from_dataframe(train_df, directory=train_dir, shuffle=True, target_size=target, batch_size=64, class_mode='binary') valid_set = valid_datagen.flow_from_dataframe(valid_df, directory=train_dir, shuffle=False, target_size=target, batch_size=32, class_mode='binary') clear_session() model = Sequential([InputLayer(input_shape=target + (3,)), Convolution2D(16, 3, activation='relu'), MaxPooling2D(2), Convolution2D(32, 3, activation='relu'), MaxPooling2D(2), Convolution2D(64, 3, activation='relu'), MaxPooling2D(2), Flatten(), Dense(512, activation='relu'), Dense(1, activation='sigmoid')]) model.summary() opt = SGD(learning_rate=0.05, momentum=0.9, nesterov=True) model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['acc']) model.optimizer.get_config() clear_session() model = VGG16(include_top=False, input_shape=target + (3,)) for layer in model.layers: layer.trainable = False flat1 = Flatten()(model.layers[-1].output) class1 = Dense(128, activation='relu', kernel_initializer='he_uniform')(flat1) output = Dense(1, activation='sigmoid')(class1) model = Model(inputs=model.inputs, outputs=output) model.summary() opt = SGD(learning_rate=0.001, momentum=0.9) model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['acc']) model.optimizer.get_config() checkpoint = ModelCheckpoint('temp_model.h5', monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto') early = EarlyStopping(monitor='val_acc', min_delta=0, patience=10, verbose=1, mode='auto') history = model.fit(train_set, validation_data=valid_set, steps_per_epoch=train_set.n // train_set.batch_size, validation_steps=valid_set.n // valid_set.batch_size, epochs=50, callbacks=[checkpoint, early]) legend = ['train', 'validation'] model.save('my_model-3_block-aug-50_epoch.h5') model = load_model('temp_model.h5') layer_outputs = [] for layer in model.layers: if 'conv' not in layer.name: continue layer_outputs.append(layer.output) activation_model = Model(inputs=model.input, outputs=layer_outputs) def preprocess(img): img = cv2.resize(img,target) img = img/255 return np.array(img) def predict(img): img = preprocess(img) img = img.reshape((1,)+img.shape) probability = model.predict(img) return probability def getLabel(probability): if probability<0.5: probability=0 else: probability=1 return list(train_set.class_indices)[probability] def visualize(img): img = preprocess(img) img = img.reshape((1,)+img.shape) fmaps = activation_model.predict(img) for i in range(len(fmaps)): activation = fmaps[i] fig = plt.figure(figsize=(20,15)) fig.suptitle(layer_outputs[i].name) for j in range(min(8*8,activation.shape[-1])): plt.subplot(8,8,j+1) plt.imshow(activation[0,:,:,j],cmap='gray') plt.show() WIN_SIZES=[] for i in range(100,260,20): WIN_SIZES.append(i) def get_box(img,step=20,win_sizes=WIN_SIZES): best_box = None best_distance = 1 raw_prob = predict(img) if (raw_prob<0.5): raw_prob=0 else: raw_prob=1 for win in win_sizes: print("Run with window size:",str(win)) for top in range(0,img.shape[0]-win+1,step): for left in range(0,img.shape[1]-win+1,step): box = (left,top,left+win,top+win) crop = img[box[1]:box[3],box[0]:box[2]] prob = predict(crop) distance = abs(raw_prob-prob) if (distance<best_distance): best_box = box best_distance = distance return (best_box, best_distance) test_fnames = os.listdir(test_dir) random.shuffle(test_fnames) result = [] nPic = 50 for fnames in test_fnames: pred_path = os.path.join(test_dir, fnames) img = cv2.imread(pred_path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) title = getLabel(predict(img)) plt.imshow(img) plt.title(title) plt.show() nPic -= 1 if nPic == 0: break
code
73079773/cell_2
[ "image_output_2.png", "image_output_1.png" ]
code
73079773/cell_18
[ "text_plain_output_1.png" ]
from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.backend import clear_session from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping from tensorflow.keras.layers import Convolution2D,MaxPooling2D, Dense, Flatten, InputLayer from tensorflow.keras.models import Sequential, Model, load_model from tensorflow.keras.optimizers import SGD from tensorflow.keras.preprocessing.image import ImageDataGenerator import matplotlib.pyplot as plt import pandas as pd import zipfile import zipfile input_path = '/kaggle/input/dogs-vs-cats' work_path = '/kaggle/working/data' train_path = os.path.join(input_path, 'train.zip') test_path = os.path.join(input_path, 'test1.zip') with zipfile.ZipFile(train_path, 'r') as zip_ref: zip_ref.extractall(work_path) with zipfile.ZipFile(test_path, 'r') as zip_ref: zip_ref.extractall(work_path) data_dir = work_path train_dir = data_dir + '/train' test_dir = data_dir + '/test1' df = pd.DataFrame() fnames = os.listdir(train_dir) class_name = [] for name in fnames: class_name.append(name.split('.')[0]) data = {'filename': fnames, 'class': class_name} df = pd.DataFrame(data) df = df.sample(frac=1) train_datagen = ImageDataGenerator(rescale=1 / 255, rotation_range=20, shear_range=0.2, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True, zoom_range=0.2) valid_datagen = ImageDataGenerator(rescale=1 / 255) idx = int(0.8 * len(df)) train_df = df.iloc[:idx] valid_df = df.iloc[idx:] target = (224, 224) train_set = train_datagen.flow_from_dataframe(train_df, directory=train_dir, shuffle=True, target_size=target, batch_size=64, class_mode='binary') valid_set = valid_datagen.flow_from_dataframe(valid_df, directory=train_dir, shuffle=False, target_size=target, batch_size=32, class_mode='binary') clear_session() model = Sequential([InputLayer(input_shape=target + (3,)), Convolution2D(16, 3, activation='relu'), MaxPooling2D(2), Convolution2D(32, 3, activation='relu'), MaxPooling2D(2), Convolution2D(64, 3, activation='relu'), MaxPooling2D(2), Flatten(), Dense(512, activation='relu'), Dense(1, activation='sigmoid')]) model.summary() opt = SGD(learning_rate=0.05, momentum=0.9, nesterov=True) model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['acc']) model.optimizer.get_config() clear_session() model = VGG16(include_top=False, input_shape=target + (3,)) for layer in model.layers: layer.trainable = False flat1 = Flatten()(model.layers[-1].output) class1 = Dense(128, activation='relu', kernel_initializer='he_uniform')(flat1) output = Dense(1, activation='sigmoid')(class1) model = Model(inputs=model.inputs, outputs=output) model.summary() opt = SGD(learning_rate=0.001, momentum=0.9) model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['acc']) model.optimizer.get_config() checkpoint = ModelCheckpoint('temp_model.h5', monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto') early = EarlyStopping(monitor='val_acc', min_delta=0, patience=10, verbose=1, mode='auto') history = model.fit(train_set, validation_data=valid_set, steps_per_epoch=train_set.n // train_set.batch_size, validation_steps=valid_set.n // valid_set.batch_size, epochs=50, callbacks=[checkpoint, early]) legend = ['train', 'validation'] plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.title('Accuracy') plt.xlabel('epochs') plt.ylabel('acc') plt.legend(legend, loc='upper left') plt.show() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Binary cross-entropy loss') plt.xlabel('epochs') plt.ylabel('loss') plt.legend(legend, loc='upper left') plt.show()
code
73079773/cell_16
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.backend import clear_session from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping from tensorflow.keras.layers import Convolution2D,MaxPooling2D, Dense, Flatten, InputLayer from tensorflow.keras.models import Sequential, Model, load_model from tensorflow.keras.optimizers import SGD from tensorflow.keras.preprocessing.image import ImageDataGenerator import pandas as pd import zipfile import zipfile input_path = '/kaggle/input/dogs-vs-cats' work_path = '/kaggle/working/data' train_path = os.path.join(input_path, 'train.zip') test_path = os.path.join(input_path, 'test1.zip') with zipfile.ZipFile(train_path, 'r') as zip_ref: zip_ref.extractall(work_path) with zipfile.ZipFile(test_path, 'r') as zip_ref: zip_ref.extractall(work_path) data_dir = work_path train_dir = data_dir + '/train' test_dir = data_dir + '/test1' df = pd.DataFrame() fnames = os.listdir(train_dir) class_name = [] for name in fnames: class_name.append(name.split('.')[0]) data = {'filename': fnames, 'class': class_name} df = pd.DataFrame(data) df = df.sample(frac=1) train_datagen = ImageDataGenerator(rescale=1 / 255, rotation_range=20, shear_range=0.2, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True, zoom_range=0.2) valid_datagen = ImageDataGenerator(rescale=1 / 255) idx = int(0.8 * len(df)) train_df = df.iloc[:idx] valid_df = df.iloc[idx:] target = (224, 224) train_set = train_datagen.flow_from_dataframe(train_df, directory=train_dir, shuffle=True, target_size=target, batch_size=64, class_mode='binary') valid_set = valid_datagen.flow_from_dataframe(valid_df, directory=train_dir, shuffle=False, target_size=target, batch_size=32, class_mode='binary') clear_session() model = Sequential([InputLayer(input_shape=target + (3,)), Convolution2D(16, 3, activation='relu'), MaxPooling2D(2), Convolution2D(32, 3, activation='relu'), MaxPooling2D(2), Convolution2D(64, 3, activation='relu'), MaxPooling2D(2), Flatten(), Dense(512, activation='relu'), Dense(1, activation='sigmoid')]) model.summary() opt = SGD(learning_rate=0.05, momentum=0.9, nesterov=True) model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['acc']) model.optimizer.get_config() clear_session() model = VGG16(include_top=False, input_shape=target + (3,)) for layer in model.layers: layer.trainable = False flat1 = Flatten()(model.layers[-1].output) class1 = Dense(128, activation='relu', kernel_initializer='he_uniform')(flat1) output = Dense(1, activation='sigmoid')(class1) model = Model(inputs=model.inputs, outputs=output) model.summary() opt = SGD(learning_rate=0.001, momentum=0.9) model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['acc']) model.optimizer.get_config() checkpoint = ModelCheckpoint('temp_model.h5', monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto') early = EarlyStopping(monitor='val_acc', min_delta=0, patience=10, verbose=1, mode='auto') history = model.fit(train_set, validation_data=valid_set, steps_per_epoch=train_set.n // train_set.batch_size, validation_steps=valid_set.n // valid_set.batch_size, epochs=50, callbacks=[checkpoint, early])
code
73079773/cell_24
[ "text_plain_output_1.png" ]
from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.backend import clear_session from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping from tensorflow.keras.layers import Convolution2D,MaxPooling2D, Dense, Flatten, InputLayer from tensorflow.keras.models import Sequential, Model, load_model from tensorflow.keras.optimizers import SGD from tensorflow.keras.preprocessing.image import ImageDataGenerator import pandas as pd import zipfile import zipfile input_path = '/kaggle/input/dogs-vs-cats' work_path = '/kaggle/working/data' train_path = os.path.join(input_path, 'train.zip') test_path = os.path.join(input_path, 'test1.zip') with zipfile.ZipFile(train_path, 'r') as zip_ref: zip_ref.extractall(work_path) with zipfile.ZipFile(test_path, 'r') as zip_ref: zip_ref.extractall(work_path) data_dir = work_path train_dir = data_dir + '/train' test_dir = data_dir + '/test1' df = pd.DataFrame() fnames = os.listdir(train_dir) class_name = [] for name in fnames: class_name.append(name.split('.')[0]) data = {'filename': fnames, 'class': class_name} df = pd.DataFrame(data) df = df.sample(frac=1) train_datagen = ImageDataGenerator(rescale=1 / 255, rotation_range=20, shear_range=0.2, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True, zoom_range=0.2) valid_datagen = ImageDataGenerator(rescale=1 / 255) idx = int(0.8 * len(df)) train_df = df.iloc[:idx] valid_df = df.iloc[idx:] target = (224, 224) train_set = train_datagen.flow_from_dataframe(train_df, directory=train_dir, shuffle=True, target_size=target, batch_size=64, class_mode='binary') valid_set = valid_datagen.flow_from_dataframe(valid_df, directory=train_dir, shuffle=False, target_size=target, batch_size=32, class_mode='binary') clear_session() model = Sequential([InputLayer(input_shape=target + (3,)), Convolution2D(16, 3, activation='relu'), MaxPooling2D(2), Convolution2D(32, 3, activation='relu'), MaxPooling2D(2), Convolution2D(64, 3, activation='relu'), MaxPooling2D(2), Flatten(), Dense(512, activation='relu'), Dense(1, activation='sigmoid')]) model.summary() opt = SGD(learning_rate=0.05, momentum=0.9, nesterov=True) model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['acc']) model.optimizer.get_config() clear_session() model = VGG16(include_top=False, input_shape=target + (3,)) for layer in model.layers: layer.trainable = False flat1 = Flatten()(model.layers[-1].output) class1 = Dense(128, activation='relu', kernel_initializer='he_uniform')(flat1) output = Dense(1, activation='sigmoid')(class1) model = Model(inputs=model.inputs, outputs=output) model.summary() opt = SGD(learning_rate=0.001, momentum=0.9) model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['acc']) model.optimizer.get_config() checkpoint = ModelCheckpoint('temp_model.h5', monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto') early = EarlyStopping(monitor='val_acc', min_delta=0, patience=10, verbose=1, mode='auto') history = model.fit(train_set, validation_data=valid_set, steps_per_epoch=train_set.n // train_set.batch_size, validation_steps=valid_set.n // valid_set.batch_size, epochs=50, callbacks=[checkpoint, early]) model.save('my_model-3_block-aug-50_epoch.h5') model = load_model('temp_model.h5') print('Accuracy:', model.evaluate(valid_set))
code
73079773/cell_14
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.backend import clear_session from tensorflow.keras.layers import Convolution2D,MaxPooling2D, Dense, Flatten, InputLayer from tensorflow.keras.models import Sequential, Model, load_model from tensorflow.keras.optimizers import SGD from tensorflow.keras.preprocessing.image import ImageDataGenerator import pandas as pd import zipfile import zipfile input_path = '/kaggle/input/dogs-vs-cats' work_path = '/kaggle/working/data' train_path = os.path.join(input_path, 'train.zip') test_path = os.path.join(input_path, 'test1.zip') with zipfile.ZipFile(train_path, 'r') as zip_ref: zip_ref.extractall(work_path) with zipfile.ZipFile(test_path, 'r') as zip_ref: zip_ref.extractall(work_path) data_dir = work_path train_dir = data_dir + '/train' test_dir = data_dir + '/test1' df = pd.DataFrame() fnames = os.listdir(train_dir) class_name = [] for name in fnames: class_name.append(name.split('.')[0]) data = {'filename': fnames, 'class': class_name} df = pd.DataFrame(data) df = df.sample(frac=1) train_datagen = ImageDataGenerator(rescale=1 / 255, rotation_range=20, shear_range=0.2, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True, zoom_range=0.2) valid_datagen = ImageDataGenerator(rescale=1 / 255) idx = int(0.8 * len(df)) train_df = df.iloc[:idx] valid_df = df.iloc[idx:] target = (224, 224) train_set = train_datagen.flow_from_dataframe(train_df, directory=train_dir, shuffle=True, target_size=target, batch_size=64, class_mode='binary') valid_set = valid_datagen.flow_from_dataframe(valid_df, directory=train_dir, shuffle=False, target_size=target, batch_size=32, class_mode='binary') clear_session() model = Sequential([InputLayer(input_shape=target + (3,)), Convolution2D(16, 3, activation='relu'), MaxPooling2D(2), Convolution2D(32, 3, activation='relu'), MaxPooling2D(2), Convolution2D(64, 3, activation='relu'), MaxPooling2D(2), Flatten(), Dense(512, activation='relu'), Dense(1, activation='sigmoid')]) model.summary() opt = SGD(learning_rate=0.05, momentum=0.9, nesterov=True) model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['acc']) model.optimizer.get_config() clear_session() model = VGG16(include_top=False, input_shape=target + (3,)) for layer in model.layers: layer.trainable = False flat1 = Flatten()(model.layers[-1].output) class1 = Dense(128, activation='relu', kernel_initializer='he_uniform')(flat1) output = Dense(1, activation='sigmoid')(class1) model = Model(inputs=model.inputs, outputs=output) model.summary() opt = SGD(learning_rate=0.001, momentum=0.9) model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['acc']) model.optimizer.get_config()
code
73079773/cell_10
[ "text_plain_output_1.png" ]
from tensorflow.keras.preprocessing.image import ImageDataGenerator import pandas as pd import zipfile import zipfile input_path = '/kaggle/input/dogs-vs-cats' work_path = '/kaggle/working/data' train_path = os.path.join(input_path, 'train.zip') test_path = os.path.join(input_path, 'test1.zip') with zipfile.ZipFile(train_path, 'r') as zip_ref: zip_ref.extractall(work_path) with zipfile.ZipFile(test_path, 'r') as zip_ref: zip_ref.extractall(work_path) data_dir = work_path train_dir = data_dir + '/train' test_dir = data_dir + '/test1' df = pd.DataFrame() fnames = os.listdir(train_dir) class_name = [] for name in fnames: class_name.append(name.split('.')[0]) data = {'filename': fnames, 'class': class_name} df = pd.DataFrame(data) df = df.sample(frac=1) train_datagen = ImageDataGenerator(rescale=1 / 255, rotation_range=20, shear_range=0.2, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True, zoom_range=0.2) valid_datagen = ImageDataGenerator(rescale=1 / 255) idx = int(0.8 * len(df)) train_df = df.iloc[:idx] valid_df = df.iloc[idx:] target = (224, 224) train_set = train_datagen.flow_from_dataframe(train_df, directory=train_dir, shuffle=True, target_size=target, batch_size=64, class_mode='binary') valid_set = valid_datagen.flow_from_dataframe(valid_df, directory=train_dir, shuffle=False, target_size=target, batch_size=32, class_mode='binary')
code
73079773/cell_12
[ "text_plain_output_1.png" ]
from tensorflow.keras.backend import clear_session from tensorflow.keras.layers import Convolution2D,MaxPooling2D, Dense, Flatten, InputLayer from tensorflow.keras.models import Sequential, Model, load_model from tensorflow.keras.optimizers import SGD from tensorflow.keras.preprocessing.image import ImageDataGenerator import pandas as pd import zipfile import zipfile input_path = '/kaggle/input/dogs-vs-cats' work_path = '/kaggle/working/data' train_path = os.path.join(input_path, 'train.zip') test_path = os.path.join(input_path, 'test1.zip') with zipfile.ZipFile(train_path, 'r') as zip_ref: zip_ref.extractall(work_path) with zipfile.ZipFile(test_path, 'r') as zip_ref: zip_ref.extractall(work_path) data_dir = work_path train_dir = data_dir + '/train' test_dir = data_dir + '/test1' df = pd.DataFrame() fnames = os.listdir(train_dir) class_name = [] for name in fnames: class_name.append(name.split('.')[0]) data = {'filename': fnames, 'class': class_name} df = pd.DataFrame(data) df = df.sample(frac=1) train_datagen = ImageDataGenerator(rescale=1 / 255, rotation_range=20, shear_range=0.2, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True, zoom_range=0.2) valid_datagen = ImageDataGenerator(rescale=1 / 255) idx = int(0.8 * len(df)) train_df = df.iloc[:idx] valid_df = df.iloc[idx:] target = (224, 224) train_set = train_datagen.flow_from_dataframe(train_df, directory=train_dir, shuffle=True, target_size=target, batch_size=64, class_mode='binary') valid_set = valid_datagen.flow_from_dataframe(valid_df, directory=train_dir, shuffle=False, target_size=target, batch_size=32, class_mode='binary') clear_session() model = Sequential([InputLayer(input_shape=target + (3,)), Convolution2D(16, 3, activation='relu'), MaxPooling2D(2), Convolution2D(32, 3, activation='relu'), MaxPooling2D(2), Convolution2D(64, 3, activation='relu'), MaxPooling2D(2), Flatten(), Dense(512, activation='relu'), Dense(1, activation='sigmoid')]) model.summary() opt = SGD(learning_rate=0.05, momentum=0.9, nesterov=True) model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['acc']) model.optimizer.get_config()
code
128008433/cell_21
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import pandas as pd data = pd.read_csv('/kaggle/input/loan-data-set/loan_data_set.csv') data.drop('Loan_ID', axis=1, inplace=True) data.shape data = data.dropna() data.shape X = data.drop('Loan_Status', axis=1) y = data['Loan_Status'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) knn = KNeighborsClassifier() knn.fit(X_train, y_train) y_train_pred = knn.predict(X_train) y_test_pred = knn.predict(X_test) train_acc_knn = accuracy_score(y_train, y_train_pred) test_acc_knn = accuracy_score(y_test, y_test_pred) dt = DecisionTreeClassifier() dt.fit(X_train, y_train) y_train_pred = dt.predict(X_train) y_test_pred = dt.predict(X_test) train_acc_dt = accuracy_score(y_train, y_train_pred) test_acc_dt = accuracy_score(y_test, y_test_pred) svm = SVC() svm.fit(X_train, y_train) y_train_pred = svm.predict(X_train) y_test_pred = svm.predict(X_test) train_acc_svm = accuracy_score(y_train, y_train_pred) test_acc_svm = accuracy_score(y_test, y_test_pred) lr = LogisticRegression() lr.fit(X_train, y_train) y_train_pred = lr.predict(X_train) y_test_pred = lr.predict(X_test) train_acc_lr = accuracy_score(y_train, y_train_pred) test_acc_lr = accuracy_score(y_test, y_test_pred) print('Potential of overfitting for KNN: ', train_acc_knn - test_acc_knn) print('Potential of overfitting for Decision Tree: ', train_acc_dt - test_acc_dt) print('Potential of overfitting for SVM: ', train_acc_svm - test_acc_svm) print('Potential of overfitting for Logistic Regression: ', train_acc_lr - test_acc_lr)
code
128008433/cell_13
[ "text_html_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd data = pd.read_csv('/kaggle/input/loan-data-set/loan_data_set.csv') data.drop('Loan_ID', axis=1, inplace=True) data.shape data = data.dropna() data.shape X = data.drop('Loan_Status', axis=1) y = data['Loan_Status'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) data
code
128008433/cell_23
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import pandas as pd data = pd.read_csv('/kaggle/input/loan-data-set/loan_data_set.csv') data.drop('Loan_ID', axis=1, inplace=True) data.shape data = data.dropna() data.shape X = data.drop('Loan_Status', axis=1) y = data['Loan_Status'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) knn = KNeighborsClassifier() knn.fit(X_train, y_train) y_train_pred = knn.predict(X_train) y_test_pred = knn.predict(X_test) train_acc_knn = accuracy_score(y_train, y_train_pred) test_acc_knn = accuracy_score(y_test, y_test_pred) dt = DecisionTreeClassifier() dt.fit(X_train, y_train) y_train_pred = dt.predict(X_train) y_test_pred = dt.predict(X_test) train_acc_dt = accuracy_score(y_train, y_train_pred) test_acc_dt = accuracy_score(y_test, y_test_pred) svm = SVC() svm.fit(X_train, y_train) y_train_pred = svm.predict(X_train) y_test_pred = svm.predict(X_test) train_acc_svm = accuracy_score(y_train, y_train_pred) test_acc_svm = accuracy_score(y_test, y_test_pred) lr = LogisticRegression() lr.fit(X_train, y_train) y_train_pred = lr.predict(X_train) y_test_pred = lr.predict(X_test) train_acc_lr = accuracy_score(y_train, y_train_pred) test_acc_lr = accuracy_score(y_test, y_test_pred) from sklearn.metrics import confusion_matrix knn_cm = confusion_matrix(y_test, y_test_pred) print('KNN confusion matrix:\n', knn_cm) dt_cm = confusion_matrix(y_test, y_test_pred) print('Decision Tree confusion matrix:\n', dt_cm) svm_cm = confusion_matrix(y_test, y_test_pred) print('SVM confusion matrix:\n', svm_cm) lr_cm = confusion_matrix(y_test, y_test_pred) print('Logistic Regression confusion matrix:\n', lr_cm)
code
128008433/cell_30
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import recall_score from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import MinMaxScaler, StandardScaler, LabelEncoder from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import pandas as pd data = pd.read_csv('/kaggle/input/loan-data-set/loan_data_set.csv') data.drop('Loan_ID', axis=1, inplace=True) data.shape data = data.dropna() data.shape X = data.drop('Loan_Status', axis=1) y = data['Loan_Status'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) knn = KNeighborsClassifier() knn.fit(X_train, y_train) y_train_pred = knn.predict(X_train) y_test_pred = knn.predict(X_test) train_acc_knn = accuracy_score(y_train, y_train_pred) test_acc_knn = accuracy_score(y_test, y_test_pred) dt = DecisionTreeClassifier() dt.fit(X_train, y_train) y_train_pred = dt.predict(X_train) y_test_pred = dt.predict(X_test) train_acc_dt = accuracy_score(y_train, y_train_pred) test_acc_dt = accuracy_score(y_test, y_test_pred) svm = SVC() svm.fit(X_train, y_train) y_train_pred = svm.predict(X_train) y_test_pred = svm.predict(X_test) train_acc_svm = accuracy_score(y_train, y_train_pred) test_acc_svm = accuracy_score(y_test, y_test_pred) lr = LogisticRegression() lr.fit(X_train, y_train) y_train_pred = lr.predict(X_train) y_test_pred = lr.predict(X_test) train_acc_lr = accuracy_score(y_train, y_train_pred) test_acc_lr = accuracy_score(y_test, y_test_pred) from sklearn.preprocessing import MinMaxScaler, StandardScaler, LabelEncoder scaler = MinMaxScaler() X_train_scaled = scaler.fit_transform(X_train) X_test_scaled = scaler.transform(X_test) knn = KNeighborsClassifier() knn.fit(X_train_scaled, y_train) y_train_pred = knn.predict(X_train_scaled) y_test_pred = knn.predict(X_test_scaled) train_acc_knn_scaled = accuracy_score(y_train, y_train_pred) test_acc_knn_scaled = accuracy_score(y_test, y_test_pred) train_recall_knn_scaled = recall_score(y_train, y_train_pred) test_recall_knn_scaled = recall_score(y_test, y_test_pred) dt = DecisionTreeClassifier() dt.fit(X_train_scaled, y_train) y_train_pred = dt.predict(X_train_scaled) y_test_pred = dt.predict(X_test_scaled) train_acc_dt_scaled = accuracy_score(y_train, y_train_pred) test_acc_dt_scaled = accuracy_score(y_test, y_test_pred) train_recall_dt_scaled = recall_score(y_train, y_train_pred) test_recall_dt_scaled = recall_score(y_test, y_test_pred) print('Training accuracy of Decision Tree (scaled data): ', train_acc_dt_scaled) print('Testing accuracy of Decision Tree (scaled data): ', test_acc_dt_scaled) print('Training recall of Decision Tree (scaled data): ', train_recall_dt_scaled) print('Testing recall of Decision Tree (scaled data): ', test_recall_dt_scaled)
code
128008433/cell_29
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import recall_score from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import MinMaxScaler, StandardScaler, LabelEncoder from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import pandas as pd data = pd.read_csv('/kaggle/input/loan-data-set/loan_data_set.csv') data.drop('Loan_ID', axis=1, inplace=True) data.shape data = data.dropna() data.shape X = data.drop('Loan_Status', axis=1) y = data['Loan_Status'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) knn = KNeighborsClassifier() knn.fit(X_train, y_train) y_train_pred = knn.predict(X_train) y_test_pred = knn.predict(X_test) train_acc_knn = accuracy_score(y_train, y_train_pred) test_acc_knn = accuracy_score(y_test, y_test_pred) dt = DecisionTreeClassifier() dt.fit(X_train, y_train) y_train_pred = dt.predict(X_train) y_test_pred = dt.predict(X_test) train_acc_dt = accuracy_score(y_train, y_train_pred) test_acc_dt = accuracy_score(y_test, y_test_pred) svm = SVC() svm.fit(X_train, y_train) y_train_pred = svm.predict(X_train) y_test_pred = svm.predict(X_test) train_acc_svm = accuracy_score(y_train, y_train_pred) test_acc_svm = accuracy_score(y_test, y_test_pred) lr = LogisticRegression() lr.fit(X_train, y_train) y_train_pred = lr.predict(X_train) y_test_pred = lr.predict(X_test) train_acc_lr = accuracy_score(y_train, y_train_pred) test_acc_lr = accuracy_score(y_test, y_test_pred) from sklearn.preprocessing import MinMaxScaler, StandardScaler, LabelEncoder scaler = MinMaxScaler() X_train_scaled = scaler.fit_transform(X_train) X_test_scaled = scaler.transform(X_test) knn = KNeighborsClassifier() knn.fit(X_train_scaled, y_train) y_train_pred = knn.predict(X_train_scaled) y_test_pred = knn.predict(X_test_scaled) train_acc_knn_scaled = accuracy_score(y_train, y_train_pred) test_acc_knn_scaled = accuracy_score(y_test, y_test_pred) train_recall_knn_scaled = recall_score(y_train, y_train_pred) test_recall_knn_scaled = recall_score(y_test, y_test_pred) print('Training accuracy of KNN (scaled data): ', train_acc_knn_scaled) print('Testing accuracy of KNN (scaled data): ', test_acc_knn_scaled) print('Training recall of KNN (scaled data): ', train_recall_knn_scaled) print('Testing recall of KNN (scaled data): ', test_recall_knn_scaled)
code
128008433/cell_26
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import pandas as pd data = pd.read_csv('/kaggle/input/loan-data-set/loan_data_set.csv') data.drop('Loan_ID', axis=1, inplace=True) data.shape data = data.dropna() data.shape X = data.drop('Loan_Status', axis=1) y = data['Loan_Status'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) knn = KNeighborsClassifier() knn.fit(X_train, y_train) y_train_pred = knn.predict(X_train) y_test_pred = knn.predict(X_test) train_acc_knn = accuracy_score(y_train, y_train_pred) test_acc_knn = accuracy_score(y_test, y_test_pred) dt = DecisionTreeClassifier() dt.fit(X_train, y_train) y_train_pred = dt.predict(X_train) y_test_pred = dt.predict(X_test) train_acc_dt = accuracy_score(y_train, y_train_pred) test_acc_dt = accuracy_score(y_test, y_test_pred) svm = SVC() svm.fit(X_train, y_train) y_train_pred = svm.predict(X_train) y_test_pred = svm.predict(X_test) train_acc_svm = accuracy_score(y_train, y_train_pred) test_acc_svm = accuracy_score(y_test, y_test_pred) lr = LogisticRegression() lr.fit(X_train, y_train) y_train_pred = lr.predict(X_train) y_test_pred = lr.predict(X_test) train_acc_lr = accuracy_score(y_train, y_train_pred) test_acc_lr = accuracy_score(y_test, y_test_pred) lr = LogisticRegression() lr.fit(X, y)
code
128008433/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/loan-data-set/loan_data_set.csv') data.drop('Loan_ID', axis=1, inplace=True) data.shape data = data.dropna() data.shape data
code
128008433/cell_19
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import pandas as pd data = pd.read_csv('/kaggle/input/loan-data-set/loan_data_set.csv') data.drop('Loan_ID', axis=1, inplace=True) data.shape data = data.dropna() data.shape X = data.drop('Loan_Status', axis=1) y = data['Loan_Status'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) knn = KNeighborsClassifier() knn.fit(X_train, y_train) y_train_pred = knn.predict(X_train) y_test_pred = knn.predict(X_test) train_acc_knn = accuracy_score(y_train, y_train_pred) test_acc_knn = accuracy_score(y_test, y_test_pred) dt = DecisionTreeClassifier() dt.fit(X_train, y_train) y_train_pred = dt.predict(X_train) y_test_pred = dt.predict(X_test) train_acc_dt = accuracy_score(y_train, y_train_pred) test_acc_dt = accuracy_score(y_test, y_test_pred) svm = SVC() svm.fit(X_train, y_train) y_train_pred = svm.predict(X_train) y_test_pred = svm.predict(X_test) train_acc_svm = accuracy_score(y_train, y_train_pred) test_acc_svm = accuracy_score(y_test, y_test_pred) lr = LogisticRegression() lr.fit(X_train, y_train) y_train_pred = lr.predict(X_train) y_test_pred = lr.predict(X_test) train_acc_lr = accuracy_score(y_train, y_train_pred) test_acc_lr = accuracy_score(y_test, y_test_pred) print('Training accuracy of Logistic Regression: ', train_acc_lr) print('Testing accuracy of Logistic Regression: ', test_acc_lr)
code
128008433/cell_18
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import pandas as pd data = pd.read_csv('/kaggle/input/loan-data-set/loan_data_set.csv') data.drop('Loan_ID', axis=1, inplace=True) data.shape data = data.dropna() data.shape X = data.drop('Loan_Status', axis=1) y = data['Loan_Status'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) knn = KNeighborsClassifier() knn.fit(X_train, y_train) y_train_pred = knn.predict(X_train) y_test_pred = knn.predict(X_test) train_acc_knn = accuracy_score(y_train, y_train_pred) test_acc_knn = accuracy_score(y_test, y_test_pred) dt = DecisionTreeClassifier() dt.fit(X_train, y_train) y_train_pred = dt.predict(X_train) y_test_pred = dt.predict(X_test) train_acc_dt = accuracy_score(y_train, y_train_pred) test_acc_dt = accuracy_score(y_test, y_test_pred) svm = SVC() svm.fit(X_train, y_train) y_train_pred = svm.predict(X_train) y_test_pred = svm.predict(X_test) train_acc_svm = accuracy_score(y_train, y_train_pred) test_acc_svm = accuracy_score(y_test, y_test_pred) print('Training accuracy of SVM: ', train_acc_svm) print('Testing accuracy of SVM: ', test_acc_svm)
code
128008433/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/loan-data-set/loan_data_set.csv') data.drop('Loan_ID', axis=1, inplace=True) data.shape data = data.dropna() data.shape
code
128008433/cell_16
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier import pandas as pd data = pd.read_csv('/kaggle/input/loan-data-set/loan_data_set.csv') data.drop('Loan_ID', axis=1, inplace=True) data.shape data = data.dropna() data.shape X = data.drop('Loan_Status', axis=1) y = data['Loan_Status'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) knn = KNeighborsClassifier() knn.fit(X_train, y_train) y_train_pred = knn.predict(X_train) y_test_pred = knn.predict(X_test) train_acc_knn = accuracy_score(y_train, y_train_pred) test_acc_knn = accuracy_score(y_test, y_test_pred) print('Training accuracy of KNN: ', train_acc_knn) print('Testing accuracy of KNN: ', test_acc_knn)
code
128008433/cell_17
[ "text_html_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier import pandas as pd data = pd.read_csv('/kaggle/input/loan-data-set/loan_data_set.csv') data.drop('Loan_ID', axis=1, inplace=True) data.shape data = data.dropna() data.shape X = data.drop('Loan_Status', axis=1) y = data['Loan_Status'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) knn = KNeighborsClassifier() knn.fit(X_train, y_train) y_train_pred = knn.predict(X_train) y_test_pred = knn.predict(X_test) train_acc_knn = accuracy_score(y_train, y_train_pred) test_acc_knn = accuracy_score(y_test, y_test_pred) dt = DecisionTreeClassifier() dt.fit(X_train, y_train) y_train_pred = dt.predict(X_train) y_test_pred = dt.predict(X_test) train_acc_dt = accuracy_score(y_train, y_train_pred) test_acc_dt = accuracy_score(y_test, y_test_pred) print('Training accuracy of Decision Tree: ', train_acc_dt) print('Testing accuracy of Decision Tree: ', test_acc_dt)
code
128008433/cell_31
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import recall_score from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import MinMaxScaler, StandardScaler, LabelEncoder from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import pandas as pd data = pd.read_csv('/kaggle/input/loan-data-set/loan_data_set.csv') data.drop('Loan_ID', axis=1, inplace=True) data.shape data = data.dropna() data.shape X = data.drop('Loan_Status', axis=1) y = data['Loan_Status'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) knn = KNeighborsClassifier() knn.fit(X_train, y_train) y_train_pred = knn.predict(X_train) y_test_pred = knn.predict(X_test) train_acc_knn = accuracy_score(y_train, y_train_pred) test_acc_knn = accuracy_score(y_test, y_test_pred) dt = DecisionTreeClassifier() dt.fit(X_train, y_train) y_train_pred = dt.predict(X_train) y_test_pred = dt.predict(X_test) train_acc_dt = accuracy_score(y_train, y_train_pred) test_acc_dt = accuracy_score(y_test, y_test_pred) svm = SVC() svm.fit(X_train, y_train) y_train_pred = svm.predict(X_train) y_test_pred = svm.predict(X_test) train_acc_svm = accuracy_score(y_train, y_train_pred) test_acc_svm = accuracy_score(y_test, y_test_pred) lr = LogisticRegression() lr.fit(X_train, y_train) y_train_pred = lr.predict(X_train) y_test_pred = lr.predict(X_test) train_acc_lr = accuracy_score(y_train, y_train_pred) test_acc_lr = accuracy_score(y_test, y_test_pred) from sklearn.preprocessing import MinMaxScaler, StandardScaler, LabelEncoder scaler = MinMaxScaler() X_train_scaled = scaler.fit_transform(X_train) X_test_scaled = scaler.transform(X_test) knn = KNeighborsClassifier() knn.fit(X_train_scaled, y_train) y_train_pred = knn.predict(X_train_scaled) y_test_pred = knn.predict(X_test_scaled) train_acc_knn_scaled = accuracy_score(y_train, y_train_pred) test_acc_knn_scaled = accuracy_score(y_test, y_test_pred) train_recall_knn_scaled = recall_score(y_train, y_train_pred) test_recall_knn_scaled = recall_score(y_test, y_test_pred) dt = DecisionTreeClassifier() dt.fit(X_train_scaled, y_train) y_train_pred = dt.predict(X_train_scaled) y_test_pred = dt.predict(X_test_scaled) train_acc_dt_scaled = accuracy_score(y_train, y_train_pred) test_acc_dt_scaled = accuracy_score(y_test, y_test_pred) train_recall_dt_scaled = recall_score(y_train, y_train_pred) test_recall_dt_scaled = recall_score(y_test, y_test_pred) svm = SVC() svm.fit(X_train_scaled, y_train) y_train_pred = svm.predict(X_train_scaled) y_test_pred = svm.predict(X_test_scaled) train_acc_svm_scaled = accuracy_score(y_train, y_train_pred) test_acc_svm_scaled = accuracy_score(y_test, y_test_pred) train_recall_svm_scaled = recall_score(y_train, y_train_pred) test_recall_svm_scaled = recall_score(y_test, y_test_pred) print('Training accuracy of SVM (scaled data): ', train_acc_svm_scaled) print('Testing accuracy of SVM (scaled data): ', test_acc_svm_scaled) print('Training recall of SVM (scaled data): ', train_recall_svm_scaled) print('Testing recall of SVM (scaled data): ', test_recall_svm_scaled)
code
128008433/cell_24
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import recall_score from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import pandas as pd data = pd.read_csv('/kaggle/input/loan-data-set/loan_data_set.csv') data.drop('Loan_ID', axis=1, inplace=True) data.shape data = data.dropna() data.shape X = data.drop('Loan_Status', axis=1) y = data['Loan_Status'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) knn = KNeighborsClassifier() knn.fit(X_train, y_train) y_train_pred = knn.predict(X_train) y_test_pred = knn.predict(X_test) train_acc_knn = accuracy_score(y_train, y_train_pred) test_acc_knn = accuracy_score(y_test, y_test_pred) dt = DecisionTreeClassifier() dt.fit(X_train, y_train) y_train_pred = dt.predict(X_train) y_test_pred = dt.predict(X_test) train_acc_dt = accuracy_score(y_train, y_train_pred) test_acc_dt = accuracy_score(y_test, y_test_pred) svm = SVC() svm.fit(X_train, y_train) y_train_pred = svm.predict(X_train) y_test_pred = svm.predict(X_test) train_acc_svm = accuracy_score(y_train, y_train_pred) test_acc_svm = accuracy_score(y_test, y_test_pred) lr = LogisticRegression() lr.fit(X_train, y_train) y_train_pred = lr.predict(X_train) y_test_pred = lr.predict(X_test) train_acc_lr = accuracy_score(y_train, y_train_pred) test_acc_lr = accuracy_score(y_test, y_test_pred) from sklearn.metrics import recall_score knn_recall = recall_score(y_test, y_test_pred) print('KNN recall score:', knn_recall) dt_recall = recall_score(y_test, y_test_pred) print('Decision Tree recall score:', dt_recall) svm_recall = recall_score(y_test, y_test_pred) print('SVM recall score:', svm_recall) lr_recall = recall_score(y_test, y_test_pred) print('Logistic Regression recall score:', lr_recall)
code
128008433/cell_12
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd data = pd.read_csv('/kaggle/input/loan-data-set/loan_data_set.csv') data.drop('Loan_ID', axis=1, inplace=True) data.shape data = data.dropna() data.shape X = data.drop('Loan_Status', axis=1) y = data['Loan_Status'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) print('Training set shape:', X_train.shape) print('Testing set shape:', X_test.shape)
code
128008433/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/loan-data-set/loan_data_set.csv') data.drop('Loan_ID', axis=1, inplace=True) data.shape
code
128020267/cell_13
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/amazon-fine-food-reviews/Reviews.csv', index_col='Id') data = data.drop({'ProductId', 'UserId', 'ProfileName', 'HelpfulnessNumerator', 'HelpfulnessDenominator', 'Time', 'Summary'}, axis=1) data.Score = ['positive' if i >= 4 else 'negative' for i in data.Score] data.head()
code
128020267/cell_2
[ "text_html_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
128020267/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/amazon-fine-food-reviews/Reviews.csv', index_col='Id') data.head()
code
128020267/cell_8
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/amazon-fine-food-reviews/Reviews.csv', index_col='Id') data.describe()
code
128020267/cell_16
[ "text_html_output_1.png" ]
!pip install gensim pandas import pandas as pd import gensim
code
128020267/cell_17
[ "text_html_output_1.png" ]
import gensim import pandas as pd import pandas as pd import re data = pd.read_csv('../input/amazon-fine-food-reviews/Reviews.csv', index_col='Id') data = data.drop({'ProductId', 'UserId', 'ProfileName', 'HelpfulnessNumerator', 'HelpfulnessDenominator', 'Time', 'Summary'}, axis=1) data.Score = ['positive' if i >= 4 else 'negative' for i in data.Score] import re data.Text = [i.lower() for i in data.Text] data.Text = [re.sub('[^\\w\\s]', '', i) for i in data.Text] def preprocess_text(text): tokens = gensim.utils.simple_preprocess(text) return tokens data['tokens'] = data['Text'].apply(preprocess_text) data.head(25)
code
128020267/cell_14
[ "text_html_output_1.png" ]
import pandas as pd import re data = pd.read_csv('../input/amazon-fine-food-reviews/Reviews.csv', index_col='Id') data = data.drop({'ProductId', 'UserId', 'ProfileName', 'HelpfulnessNumerator', 'HelpfulnessDenominator', 'Time', 'Summary'}, axis=1) data.Score = ['positive' if i >= 4 else 'negative' for i in data.Score] import re data.Text = [i.lower() for i in data.Text] data.Text = [re.sub('[^\\w\\s]', '', i) for i in data.Text] data.head()
code
128020267/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/amazon-fine-food-reviews/Reviews.csv', index_col='Id') data = data.drop({'ProductId', 'UserId', 'ProfileName', 'HelpfulnessNumerator', 'HelpfulnessDenominator', 'Time', 'Summary'}, axis=1) data.head()
code
129012188/cell_20
[ "text_plain_output_1.png" ]
from copy import deepcopy from copy import deepcopy from datasets import list_metrics,load_metric from random import randint from random import randint,shuffle from sentence_transformers import SentenceTransformer, util from sklearn.metrics import confusion_matrix import numpy as np import pandas as pd import pandas as pd import plotly.express as px def performance(y_ture,y_pred): f1_metric = load_metric("f1") re_metric = load_metric("recall") pre_metric = load_metric("precision") type_c_int = list(set(np.concatenate([y_ture, y_pred]))) type_c = [str(i) for i in type_c_int] f1_m_list = [] re_m_list = [] pre_m_list = [] for i in type_c_int: bi_ture = list(y_ture == i) bi_pred = list(y_pred == i) f1_m_results = f1_metric.compute(predictions=bi_pred, references=bi_ture, average="macro") re_m_results = re_metric.compute(predictions=bi_pred, references=bi_ture, average="macro") pre_m_results = pre_metric.compute(predictions=bi_pred, references=bi_ture, average="macro") f1_m_list.append(f1_m_results["f1"]) re_m_list.append(re_m_results["recall"]) pre_m_list.append(pre_m_results["precision"]) data = {'Class_type':type_c_int,'F1-macro':f1_m_list,'Recall-macro':re_m_list,'Precision-macro':pre_m_list} df = pd.DataFrame(data) display(df) z = confusion_matrix(y_ture, y_pred) x_lab = type_c fig = px.imshow(z, text_auto=True, labels=dict(x="True label", y="Predicted label", color="times"), x=x_lab, y=x_lab) # fig.show() Accuarcy = sum([1 for i in range(len(y_ture)) if y_pred[i] == y_ture[i]])/len(y_ture) print("Accuarcy is", Accuarcy) return z,fig cf_matrix_test, figure_test = performance([1, 3, 1, 2, 2, 1], [2, 3, 1, 3, 3, 2]) def read_and_split_the_excel(QA_path): """ :func: 根据xlsx文件获取问题list和答案list(需要更新openyxl) :param path: 文件路径 :return: 问题list,答案list """ df1 = pd.read_excel(QA_path) question_list = df1.iloc[:, 0].tolist() answer_list = df1.iloc[:, 1].tolist() return (question_list, answer_list) def read_and_split_the_01(zero_one_path): """ :func: 根据xlsx文件获取原始list和测试list和label :param path: 文件路径 :return: 问题list,答案list """ df1 = pd.read_csv(zero_one_path) Sen1_list = df1.iloc[:, 0].tolist() Sen2_list = df1.iloc[:, 1].tolist() label_list = df1.iloc[:, 2].tolist() return (Sen1_list, Sen2_list, label_list) def shuffle_without_repeated(list_): temp_list = deepcopy(list_) m = len(temp_list) m = m - 1 for i_current in range(m, 1, -1): rest = i_current - 1 i_replace = randint(0, rest) temp_list[i_current], temp_list[i_replace] = (temp_list[i_replace], temp_list[i_current]) return temp_list def obtain_shuffle_01(ori_list): shuffle_q_list = shuffle_without_repeated(ori_list) shuffle_label_list = [0] * len(shuffle_q_list) return (ori_list, shuffle_q_list, shuffle_label_list) question_list = ['The cat sits outside', 'A man is playing guitar', 'The new movie is awesome', 'The new opera is nice'] obtain_shuffle_01(question_list) def read_qa_and_expand_training_set(QA_path, zero_one_path): question_list, answer_list = read_and_split_the_excel(QA_path) Sen1_list, Sen2_list, label_list = read_and_split_the_01(zero_one_path) return (question_list, answer_list, Sen1_list, Sen2_list, label_list) QA_path = '../input/uic-cn-admission/CN_QA_dataset_all.xlsx' zero_one_path = '/kaggle/input/01-uic-rm-dup/df_test.csv' question_list, answer_list, Sen1_list, Sen1_list_index, Sen2_list, label_list = read_qa_and_expand_training_set(QA_path, zero_one_path) from sentence_transformers import SentenceTransformer, util import pandas as pd from copy import deepcopy from random import randint from termcolor import colored def SBERT_get_reply(model, query, question_list, answer_list, question_list_emb, topk_SBERT, threshold_SBERT): queries = [query] query_embeddings = model.encode(queries, convert_to_tensor=True) index_ranked = [] tensor_scores = [] cosine_scores = util.pytorch_cos_sim(query_embeddings, question_list_emb)[0] results = zip(range(len(cosine_scores)), cosine_scores) results = sorted(results, key=lambda x: x[1], reverse=True) for index, tensor_score in results: index_ranked.append(question_list[index]) tensor_scores.append(tensor_score) topk_idx_SBERT = index_ranked[:topk_SBERT] return (topk_idx_SBERT, tensor_scores) def use_model_qa(model_path, QA_path, zero_one_path): model = SentenceTransformer(model_path, device='cuda') topk_SBERT = 3 threshold_SBERT = 0.6 question_list, answer_list = read_and_split_the_excel(QA_path) question_embeddings = model.encode(question_list, convert_to_tensor=True) question_list, answer_list, Sen1_list, Sen2_list, label_list = read_qa_and_expand_training_set(QA_path, zero_one_path) predict_result = [] df = pd.read_csv(zero_one_path) match = [] score = [] for index, test_query in enumerate(Sen2_list): topk_idx_SBERT, tensor_scores = SBERT_get_reply(model, test_query, question_list, answer_list, question_embeddings, topk_SBERT, threshold_SBERT) match.append(topk_idx_SBERT[0]) score.append(tensor_scores[0].cpu().numpy()) if topk_idx_SBERT[0] == Sen1_list[index]: prediction = 1 else: prediction = 0 predict_result.append(prediction) new = pd.DataFrame({'roberta_fine_tune_match': match, 'roberta_fine_tune_score': score, 'roberta_fine_tune_label': predict_result}) merged_df = pd.concat([df, new], axis=1) merged_df.to_excel('UIC问题匹配结果比较roberta.xlsx', index=None) cf_matrix_test, figure_test = performance(label_list, predict_result) return cf_matrix_test def SBERT_QA_test(model_path): QA_path = '../input/uic-cn-admission/CN_QA_dataset_all.xlsx' zero_one_path = '/kaggle/input/01-uic-rm-dup/df_test.csv' model_path = '/kaggle/input/sbert-fine-tune/roberta_fine_tune' SBERT_QA_test(model_path)
code
129012188/cell_6
[ "text_plain_output_100.png", "text_plain_output_201.png", "text_plain_output_261.png", "text_plain_output_84.png", "text_plain_output_322.png", "text_plain_output_205.png", "text_plain_output_271.png", "text_plain_output_56.png", "text_plain_output_158.png", "text_plain_output_223.png", "text_plain_output_218.png", "text_plain_output_264.png", "text_plain_output_282.png", "text_plain_output_287.png", "text_plain_output_232.png", "text_plain_output_181.png", "text_plain_output_137.png", "text_plain_output_139.png", "text_plain_output_35.png", "text_plain_output_258.png", "text_plain_output_130.png", "text_plain_output_117.png", "text_plain_output_286.png", "text_plain_output_262.png", "text_plain_output_278.png", "text_plain_output_254.png", "text_plain_output_307.png", "text_plain_output_98.png", "text_plain_output_236.png", "text_plain_output_195.png", "text_plain_output_219.png", "text_plain_output_237.png", "text_plain_output_43.png", "text_plain_output_284.png", "text_plain_output_187.png", "text_plain_output_309.png", "text_plain_output_78.png", "text_plain_output_143.png", "text_plain_output_106.png", "text_plain_output_37.png", "text_plain_output_138.png", "text_plain_output_192.png", "text_plain_output_184.png", "text_plain_output_274.png", "text_plain_output_172.png", "text_plain_output_147.png", "text_plain_output_327.png", "text_plain_output_256.png", "text_plain_output_90.png", "text_plain_output_79.png", "text_plain_output_5.png", "text_plain_output_75.png", "text_plain_output_48.png", "text_plain_output_116.png", "text_plain_output_128.png", "text_plain_output_30.png", "text_plain_output_167.png", "text_plain_output_213.png", "text_plain_output_73.png", "text_plain_output_126.png", "text_plain_output_321.png", "text_plain_output_272.png", "text_plain_output_115.png", "text_plain_output_316.png", "text_plain_output_15.png", "text_plain_output_133.png", "text_plain_output_198.png", "text_plain_output_178.png", "text_plain_output_226.png", "text_plain_output_154.png", "text_plain_output_234.png", "text_plain_output_114.png", "text_plain_output_157.png", "text_plain_output_317.png", "text_plain_output_251.png", "text_plain_output_70.png", "text_plain_output_9.png", "text_plain_output_44.png", "text_plain_output_203.png", "text_plain_output_119.png", "text_plain_output_86.png", "text_plain_output_244.png", "text_plain_output_118.png", "text_plain_output_131.png", "text_plain_output_40.png", "text_plain_output_123.png", "text_plain_output_74.png", "text_plain_output_190.png", "text_plain_output_302.png", "text_plain_output_31.png", "text_plain_output_281.png", "text_plain_output_20.png", "text_plain_output_273.png", "text_plain_output_263.png", "text_plain_output_102.png", "text_plain_output_229.png", "text_plain_output_111.png", "text_plain_output_222.png", "text_plain_output_101.png", "text_plain_output_169.png", "text_plain_output_144.png", "text_plain_output_161.png", "text_plain_output_305.png", "text_plain_output_275.png", "text_plain_output_301.png", "text_plain_output_132.png", "text_plain_output_60.png", "text_plain_output_221.png", "text_plain_output_155.png", "text_plain_output_68.png", "text_plain_output_4.png", "text_plain_output_65.png", "text_plain_output_64.png", "text_plain_output_215.png", "text_plain_output_189.png", "text_plain_output_13.png", "text_plain_output_200.png", "text_plain_output_107.png", "text_plain_output_312.png", "text_plain_output_248.png", "text_html_output_2.png", "text_plain_output_318.png", "text_plain_output_52.png", "text_plain_output_66.png", "text_plain_output_243.png", "text_plain_output_45.png", "text_plain_output_300.png", "text_plain_output_257.png", "text_plain_output_277.png", "text_plain_output_171.png", "text_plain_output_14.png", "text_plain_output_159.png", "text_plain_output_32.png", "text_plain_output_304.png", "text_plain_output_88.png", "text_plain_output_240.png", "text_plain_output_29.png", "text_plain_output_140.png", "text_plain_output_280.png", "text_plain_output_129.png", "text_plain_output_242.png", "text_plain_output_289.png", "text_plain_output_255.png", "text_plain_output_160.png", "text_plain_output_58.png", "text_plain_output_49.png", "text_plain_output_63.png", "text_plain_output_260.png", "text_plain_output_294.png", "text_plain_output_27.png", "text_plain_output_320.png", "text_plain_output_177.png", "text_plain_output_76.png", "text_plain_output_108.png", "text_plain_output_54.png", "text_plain_output_142.png", "text_plain_output_10.png", "text_plain_output_269.png", "text_plain_output_276.png", "text_plain_output_6.png", "text_plain_output_326.png", "text_plain_output_153.png", "text_plain_output_170.png", "text_plain_output_92.png", "text_plain_output_57.png", "text_plain_output_120.png", "text_plain_output_24.png", "text_plain_output_21.png", "application_vnd.jupyter.stderr_output_325.png", "text_plain_output_104.png", "text_plain_output_270.png", "text_plain_output_47.png", "text_plain_output_121.png", "text_plain_output_25.png", "text_plain_output_134.png", "text_plain_output_77.png", "text_plain_output_288.png", "text_plain_output_18.png", "text_plain_output_183.png", "text_plain_output_266.png", "text_plain_output_149.png", "text_plain_output_208.png", "text_plain_output_50.png", "text_plain_output_36.png", "text_plain_output_207.png", "text_plain_output_96.png", "text_plain_output_87.png", "text_plain_output_3.png", "text_plain_output_217.png", "text_plain_output_180.png", "text_plain_output_141.png", "text_plain_output_210.png", "text_plain_output_112.png", "text_plain_output_152.png", "text_plain_output_225.png", "text_plain_output_191.png", "text_plain_output_259.png", "text_plain_output_290.png", "text_plain_output_283.png", "text_plain_output_247.png", "text_plain_output_113.png", "text_plain_output_324.png", "text_plain_output_22.png", "text_plain_output_188.png", "text_plain_output_81.png", "text_plain_output_69.png", "text_plain_output_175.png", "text_plain_output_165.png", "text_plain_output_146.png", "text_plain_output_145.png", "text_plain_output_125.png", "text_plain_output_197.png", "text_plain_output_315.png", "text_plain_output_38.png", "text_plain_output_7.png", "text_plain_output_214.png", "text_plain_output_166.png", "text_plain_output_314.png", "text_plain_output_91.png", "text_plain_output_308.png", "text_plain_output_245.png", "text_plain_output_16.png", "text_plain_output_174.png", "text_plain_output_212.png", "text_plain_output_230.png", "text_plain_output_265.png", "text_html_output_1.png", "text_plain_output_59.png", "text_plain_output_206.png", "text_plain_output_103.png", "text_plain_output_71.png", "text_plain_output_8.png", "text_plain_output_122.png", "text_plain_output_211.png", "text_plain_output_182.png", "text_plain_output_26.png", "text_plain_output_310.png", "text_plain_output_220.png", "text_plain_output_109.png", "text_plain_output_238.png", "text_plain_output_41.png", "text_plain_output_34.png", "text_plain_output_253.png", "text_plain_output_291.png", "text_plain_output_168.png", "text_plain_output_204.png", "text_plain_output_241.png", "text_plain_output_231.png", "text_plain_output_209.png", "text_plain_output_185.png", "text_plain_output_85.png", "text_plain_output_42.png", "text_plain_output_110.png", "text_plain_output_67.png", "text_plain_output_297.png", "text_plain_output_53.png", "text_plain_output_313.png", "text_plain_output_224.png", "text_plain_output_193.png", "text_plain_output_23.png", "text_plain_output_173.png", "text_plain_output_235.png", "text_plain_output_151.png", "text_plain_output_89.png", "text_plain_output_299.png", "text_plain_output_51.png", "text_plain_output_252.png", "text_plain_output_296.png", "text_plain_output_28.png", "text_plain_output_72.png", "text_plain_output_99.png", "text_plain_output_163.png", "text_plain_output_179.png", "text_plain_output_162.png", "text_plain_output_136.png", "text_plain_output_246.png", "text_plain_output_2.png", "text_plain_output_239.png", "text_plain_output_127.png", "text_plain_output_311.png", "text_plain_output_295.png", "text_plain_output_279.png", "text_plain_output_196.png", "text_plain_output_97.png", "text_plain_output_227.png", "text_plain_output_1.png", "text_plain_output_33.png", "text_plain_output_150.png", "text_plain_output_39.png", "text_plain_output_176.png", "text_plain_output_186.png", "text_plain_output_233.png", "text_plain_output_228.png", "text_plain_output_55.png", "text_plain_output_293.png", "text_plain_output_268.png", "text_plain_output_199.png", "text_plain_output_319.png", "text_plain_output_82.png", "text_plain_output_202.png", "text_plain_output_93.png", "text_plain_output_19.png", "text_plain_output_105.png", "text_plain_output_80.png", "text_plain_output_94.png", "text_plain_output_164.png", "text_plain_output_249.png", "text_plain_output_216.png", "text_plain_output_124.png", "text_plain_output_17.png", "text_plain_output_148.png", "text_plain_output_323.png", "text_plain_output_250.png", "text_plain_output_11.png", "text_plain_output_12.png", "text_plain_output_267.png", "text_plain_output_194.png", "text_plain_output_62.png", "text_plain_output_303.png", "text_plain_output_95.png", "text_plain_output_156.png", "text_plain_output_298.png", "text_plain_output_61.png", "text_plain_output_83.png", "text_plain_output_292.png", "text_plain_output_135.png", "text_plain_output_285.png", "text_plain_output_306.png", "text_plain_output_46.png" ]
!pip install -U sentence-transformers !pip install openpyxl
code
50237666/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
import os import os import numpy as np import pandas as pd import os import os def mkdir(p): if not os.path.exists(p): os.mkdir(p) def link(src, dst): if not os.path.exists(dst): os.symlink(src, dst, target_is_directory=True) os.mkdir('../input/fruits/fruits-360/smallImages') classes = ['Banana', 'Strawerry', 'Raspberry'] train_from_path = os.path.abspath('../large_files/fruits-360/Training')
code
50237666/cell_3
[ "text_plain_output_1.png" ]
import os import os import numpy as np import pandas as pd import os import os def mkdir(p): if not os.path.exists(p): os.mkdir(p) def link(src, dst): if not os.path.exists(dst): os.symlink(src, dst, target_is_directory=True) os.mkdir('../input/fruits/fruits-360/smallImages') classes = ['Banana', 'Strawerry', 'Raspberry'] train_from_path = os.path.abspath('../large_files/fruits-360/Training') os.makedir('../input/fruits/fruits-360/smallImages')
code
50237666/cell_5
[ "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
from glob import glob from keras.applications.vgg16 import VGG16 from keras.layers import Input, Lambda, Dense, Flatten from keras.models import Model from keras.preprocessing import image from keras.preprocessing.image import ImageDataGenerator from sklearn.metrics import confusion_matrix from utils import plot_confusion_matrix import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra IMAGE_SIZE = [100, 100] epochs = 5 batch_size = 32 train_path = '../input/fruits/fruits-360/Training' valid_path = '../input/fruits/fruits-360/Test' image_files = glob(train_path + '/*/*.jp*g') valid_image_files = glob(valid_path + '/*/*.jp*g') folders = glob(train_path + '/*') plt.imshow(image.load_img(np.random.choice(image_files))) plt.show() vgg = VGG16(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False) for layer in vgg.layers: layer.trainable = False x = Flatten()(vgg.output) prediction = Dense(len(folders), activation='softmax')(x) model = Model(inputs=vgg.input, outputs=prediction) model.summary() model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) gen = ImageDataGenerator(rotation_range=20, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.1, zoom_range=0.2, horizontal_flip=True, vertical_flip=True) test_gen = gen.flow_from_directory(valid_path, target_size=IMAGE_SIZE) print(test_gen.class_indices) labels = [None] * len(test_gen.class_indices) for k, v in test_gen.class_indices.items(): labels[v] = k for x, y in test_gen: print('min:', x[0].min(), 'max:', x[0].max()) plt.title(labels[np.argmax(y[0])]) plt.imshow(x[0]) plt.show() break train_generator = gen.flow_from_directory(train_path, target_size=IMAGE_SIZE, shuffle=True, batch_size=batch_size) valid_generator = gen.flow_from_directory(valid_path, target_size=IMAGE_SIZE, shuffle=True, batch_size=batch_size) r = model.fit_generator(train_generator, validation_data=valid_generator, epochs=epochs, steps_per_epoch=len(image_files) // batch_size, validation_steps=len(valid_image_files) // batch_size) def get_confusion_matrix(data_path, N): print('Generating Confusion Matrix', N) predictions = [] targets = [] i = 0 for x, y in gen.flow_from_directory(data_path, target_size=IMAGE_SIZE, shuffle=False, batch_size=batch_size * 2): i += 1 if i % 50 == 0: print(i) p = model.predict(x) p = np.argmax(p, axis=1) y = np.argmax(y, axis=1) predictions = np.concatenate((predictions, p)) targets = np.concatenate((targets, y)) if len(targets) >= N: break cm = confusion_matrix(targets, predictions) return cm cm = get_confusion_matrix(train_path, len(image_files)) print(cm) valid_cm = get_confusion_matrix(valid_path, len(valid_image_files)) print(valid_cm) plt.plot(r.history['loss'], label='train loss') plt.plot(r.history['val_loss'], label='val loss') plt.legend() plt.show() plt.plot(r.history['acc'], label='train acc') plt.plot(r.history['val_acc'], label='val acc') plt.legend() plt.show() from utils import plot_confusion_matrix plot_confusion_matrix(cm, labels, title='Train Confusion Matrix') plot_confusion_matrix(valid_cm, labels, title='Validation Confusion Matrix')
code
128006002/cell_42
[ "text_html_output_2.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import precision_score from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import precision_score import xgboost as xgb def build_random_forest(x_train, y_train, x_test, y_test, n_estimators): rndforest = RandomForestClassifier(n_estimators=n_estimators, n_jobs=-1) rndforest.fit(x_train, y_train) print('Cantidad de estimadores:', n_estimators) print('TRAINING ACCURACY:', rndforest.score(x_train, y_train)) mean_score = rndforest.score(x_test, y_test) print('Accuracy: ', mean_score) print('-------------------------------------') precision = precision_score(y_test, rndforest.predict(x_test), average='macro') print('Precisión del Random Forest: ', precision) build_random_forest(x_train, y_train.values.ravel(), x_test, y_test.values.ravel(), n_estimators=250)
code
128006002/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) all_data = pd.read_csv('/kaggle/input/data-science-salaries-2023/ds_salaries.csv') all_data = all_data.drop('salary', axis=1) all_data = all_data.drop('salary_currency', axis=1) all_data = all_data.drop('employee_residence', axis=1) all_data.nunique() for columna in ['work_year', 'experience_level', 'employment_type', 'company_size', 'remote_ratio', 'job_title']: print(all_data[columna].unique())
code
128006002/cell_4
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) all_data = pd.read_csv('/kaggle/input/data-science-salaries-2023/ds_salaries.csv') all_data
code
128006002/cell_6
[ "text_html_output_1.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) all_data = pd.read_csv('/kaggle/input/data-science-salaries-2023/ds_salaries.csv') diferencias = all_data['employee_residence'].compare(all_data['company_location']) print(diferencias)
code
128006002/cell_29
[ "text_html_output_1.png" ]
from matplotlib import pyplot from matplotlib import pyplot from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_squared_error from sklearn.metrics import r2_score from sklearn.metrics import r2_score from sklearn.tree import DecisionTreeRegressor import xgboost as xgb from sklearn.tree import DecisionTreeRegressor from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error from sklearn.tree import DecisionTreeClassifier from matplotlib import pyplot def evaluate_decision_tree(x_train, y_train, x_test, y_test): train_scores = [] test_scores = [] train_scores_mse, test_scores_mse = ([], []) values = [i for i in range(1, 20)] for i in values: model = DecisionTreeRegressor(max_depth=i) model.fit(x_train, y_train) train_predict = model.predict(x_train) train_r2 = round(r2_score(y_train, train_predict), 3) train_scores.append(train_r2) train_mse = round(mean_squared_error(y_train, train_predict), 3) train_scores_mse.append(train_mse) test_predict = model.predict(x_test) test_r2 = round(r2_score(y_test, test_predict), 3) test_scores.append(test_r2) test_mse = round(mean_squared_error(y_test, test_predict), 3) test_scores_mse.append(test_mse) import xgboost as xgb from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error from matplotlib import pyplot def evaluate_xgboost(x_train, y_train, x_test, y_test): train_scores = [] test_scores = [] train_scores_mse, test_scores_mse = ([], []) values = [i for i in range(1, 5)] for i in values: model = xgb.XGBRegressor(max_depth=i, objective='reg:squarederror') model.fit(x_train, y_train) train_predict = model.predict(x_train) train_r2 = round(r2_score(y_train, train_predict), 3) train_scores.append(train_r2) train_mse = round(mean_squared_error(y_train, train_predict), 3) train_scores_mse.append(train_mse) test_predict = model.predict(x_test) test_r2 = round(r2_score(y_test, test_predict), 3) test_scores.append(test_r2) test_mse = round(mean_squared_error(y_test, test_predict), 3) test_scores_mse.append(test_mse) evaluate_xgboost(x_train, y_train, x_test, y_test)
code
128006002/cell_26
[ "text_html_output_1.png" ]
from matplotlib import pyplot from sklearn.metrics import mean_squared_error from sklearn.metrics import r2_score from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error from sklearn.tree import DecisionTreeClassifier from matplotlib import pyplot def evaluate_decision_tree(x_train, y_train, x_test, y_test): train_scores = [] test_scores = [] train_scores_mse, test_scores_mse = ([], []) values = [i for i in range(1, 20)] for i in values: model = DecisionTreeRegressor(max_depth=i) model.fit(x_train, y_train) train_predict = model.predict(x_train) train_r2 = round(r2_score(y_train, train_predict), 3) train_scores.append(train_r2) train_mse = round(mean_squared_error(y_train, train_predict), 3) train_scores_mse.append(train_mse) test_predict = model.predict(x_test) test_r2 = round(r2_score(y_test, test_predict), 3) test_scores.append(test_r2) test_mse = round(mean_squared_error(y_test, test_predict), 3) test_scores_mse.append(test_mse) evaluate_decision_tree(x_train, y_train, x_test, y_test)
code
128006002/cell_41
[ "text_html_output_1.png" ]
print(x_train.shape, x_test.shape, y_train.shape, y_test.shape)
code
128006002/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
128006002/cell_18
[ "image_output_1.png" ]
import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) all_data = pd.read_csv('/kaggle/input/data-science-salaries-2023/ds_salaries.csv') all_data = all_data.drop('salary', axis=1) all_data = all_data.drop('salary_currency', axis=1) all_data = all_data.drop('employee_residence', axis=1) all_data.nunique() all_data = all_data.drop('salary_in_usd', axis=1) df_reg = pd.get_dummies(all_data, drop_first=True, columns=['experience_level', 'employment_type', 'company_size', 'job_title', 'company_location']) df_reg
code
128006002/cell_32
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) all_data = pd.read_csv('/kaggle/input/data-science-salaries-2023/ds_salaries.csv') all_data = all_data.drop('salary', axis=1) all_data = all_data.drop('salary_currency', axis=1) all_data = all_data.drop('employee_residence', axis=1) all_data.nunique() all_data = all_data.drop('salary_in_usd', axis=1) df_reg = pd.get_dummies(all_data, drop_first=True, columns=['experience_level', 'employment_type', 'company_size', 'job_title', 'company_location']) df_reg aux = df_reg scaler = MinMaxScaler() aux = pd.DataFrame(scaler.fit_transform(aux), columns=aux.columns) df_clas = pd.get_dummies(all_data, drop_first=True, columns=['experience_level', 'employment_type', 'company_size', 'company_location']) df_clas
code
128006002/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) all_data = pd.read_csv('/kaggle/input/data-science-salaries-2023/ds_salaries.csv') all_data = all_data.drop('salary', axis=1) all_data = all_data.drop('salary_currency', axis=1) all_data = all_data.drop('employee_residence', axis=1) all_data.nunique()
code
128006002/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) all_data = pd.read_csv('/kaggle/input/data-science-salaries-2023/ds_salaries.csv') all_data = all_data.drop('salary', axis=1) all_data = all_data.drop('salary_currency', axis=1) all_data = all_data.drop('employee_residence', axis=1) all_data.nunique() all_data = all_data.drop('salary_in_usd', axis=1) job_counts = all_data['job_title'].value_counts() print(job_counts)
code
128006002/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) all_data = pd.read_csv('/kaggle/input/data-science-salaries-2023/ds_salaries.csv') all_data = all_data.drop('salary', axis=1) all_data = all_data.drop('salary_currency', axis=1) all_data = all_data.drop('employee_residence', axis=1) all_data.nunique() all_data = all_data.drop('salary_in_usd', axis=1) sel_jobs = ['Data Engineer', 'Data Scientist', 'Data Analyst', 'Machine Learning Engineer', 'Analytics Engineer'] all_data['job_title'] = all_data['job_title'].apply(lambda x: x if x in sel_jobs else 'Otro') all_data
code
128006002/cell_38
[ "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
from collections import Counter from sklearn.preprocessing import MinMaxScaler import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.graph_objects as go all_data = pd.read_csv('/kaggle/input/data-science-salaries-2023/ds_salaries.csv') all_data = all_data.drop('salary', axis=1) all_data = all_data.drop('salary_currency', axis=1) all_data = all_data.drop('employee_residence', axis=1) all_data.nunique() all_data = all_data.drop('salary_in_usd', axis=1) df_reg = pd.get_dummies(all_data, drop_first=True, columns=['experience_level', 'employment_type', 'company_size', 'job_title', 'company_location']) df_reg aux = df_reg scaler = MinMaxScaler() aux = pd.DataFrame(scaler.fit_transform(aux), columns=aux.columns) target = 'salary_in_euro' X = aux.loc[:, aux.columns != target] y = aux.loc[:, aux.columns == target] df_clas = pd.get_dummies(all_data, drop_first=True, columns=['experience_level', 'employment_type', 'company_size', 'company_location']) df_clas from collections import Counter import plotly.graph_objects as go def plot_class_distribution(df, class_col): categories = sorted(df[class_col].unique(), reverse=False) hist = Counter(df[class_col]) fig = go.Figure(layout=go.Layout(height=400, width=600)) fig.add_trace(go.Bar(x=categories, y=[hist[cat] for cat in categories])) target = 'job_title' X = df_clas.loc[:, df_clas.columns != target] y = df_clas.loc[:, df_clas.columns == target] plot_class_distribution(y, 'job_title')
code
128006002/cell_35
[ "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
from collections import Counter from sklearn.preprocessing import MinMaxScaler import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.graph_objects as go all_data = pd.read_csv('/kaggle/input/data-science-salaries-2023/ds_salaries.csv') all_data = all_data.drop('salary', axis=1) all_data = all_data.drop('salary_currency', axis=1) all_data = all_data.drop('employee_residence', axis=1) all_data.nunique() all_data = all_data.drop('salary_in_usd', axis=1) df_reg = pd.get_dummies(all_data, drop_first=True, columns=['experience_level', 'employment_type', 'company_size', 'job_title', 'company_location']) df_reg aux = df_reg scaler = MinMaxScaler() aux = pd.DataFrame(scaler.fit_transform(aux), columns=aux.columns) df_clas = pd.get_dummies(all_data, drop_first=True, columns=['experience_level', 'employment_type', 'company_size', 'company_location']) df_clas from collections import Counter import plotly.graph_objects as go def plot_class_distribution(df, class_col): categories = sorted(df[class_col].unique(), reverse=False) hist = Counter(df[class_col]) fig = go.Figure(layout=go.Layout(height=400, width=600)) fig.add_trace(go.Bar(x=categories, y=[hist[cat] for cat in categories])) plot_class_distribution(df_clas, 'job_title')
code
128006002/cell_31
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) all_data = pd.read_csv('/kaggle/input/data-science-salaries-2023/ds_salaries.csv') all_data = all_data.drop('salary', axis=1) all_data = all_data.drop('salary_currency', axis=1) all_data = all_data.drop('employee_residence', axis=1) all_data.nunique() all_data = all_data.drop('salary_in_usd', axis=1) all_data
code
128006002/cell_24
[ "text_plain_output_1.png" ]
print(x_train.shape, x_test.shape, y_train.shape, y_test.shape)
code
128006002/cell_22
[ "text_html_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) all_data = pd.read_csv('/kaggle/input/data-science-salaries-2023/ds_salaries.csv') all_data = all_data.drop('salary', axis=1) all_data = all_data.drop('salary_currency', axis=1) all_data = all_data.drop('employee_residence', axis=1) all_data.nunique() all_data = all_data.drop('salary_in_usd', axis=1) df_reg = pd.get_dummies(all_data, drop_first=True, columns=['experience_level', 'employment_type', 'company_size', 'job_title', 'company_location']) df_reg aux = df_reg scaler = MinMaxScaler() aux = pd.DataFrame(scaler.fit_transform(aux), columns=aux.columns) target = 'salary_in_euro' X = aux.loc[:, aux.columns != target] y = aux.loc[:, aux.columns == target] y
code
128006002/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import missingno as msno import missingno as msno import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) all_data = pd.read_csv('/kaggle/input/data-science-salaries-2023/ds_salaries.csv') all_data = all_data.drop('salary', axis=1) all_data = all_data.drop('salary_currency', axis=1) all_data = all_data.drop('employee_residence', axis=1) all_data.nunique() fig, ax = plt.subplots(figsize=(14, 5)) graph = msno.matrix(all_data, ax=ax, sparkline=False) ax.set_xticklabels(ax.get_xticklabels(), rotation=90, ha='center') plt.show()
code
128006002/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) all_data = pd.read_csv('/kaggle/input/data-science-salaries-2023/ds_salaries.csv') all_data = all_data.drop('salary', axis=1) all_data = all_data.drop('salary_currency', axis=1) all_data = all_data.drop('employee_residence', axis=1) all_data.nunique() tasa_conversion = 0.9 all_data['salary_in_euro'] = (all_data['salary_in_usd'] * tasa_conversion).round(0).astype(int) all_data
code
32068608/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_tests = pd.read_csv('../input/covid19-in-turkey/test_numbers.csv') data = pd.read_csv('../input/covid19-in-turkey/covid_19_data_tr.csv') data.rename(columns={'Country/Region': 'Ülke', 'Last_Update': 'Tarih', 'Confirmed': 'Vaka_Sayisi', 'Deaths': 'Vefat_Sayisi', 'Recovered': 'Tedavi_Sayisi'}, inplace=True) data.drop('Province/State', axis=1, inplace=True) test_sayisi = data_tests.iloc[0, 4:].values test_sayisi.sort() data['Test_Sayisi'] = test_sayisi data.columns vaka_artis = [0] vefat_artis = [0] iyilesen_artis = [0] for i in range(len(data) - 1): vaka_artis.append(data['Vaka_Sayisi'][i + 1] - data['Vaka_Sayisi'][i]) vefat_artis.append(data['Vefat_Sayisi'][i + 1] - data['Vefat_Sayisi'][i]) iyilesen_artis.append(data['Tedavi_Sayisi'][i + 1] - data['Tedavi_Sayisi'][i]) data['Vaka_Artış_Sayısı'] = vaka_artis data['Vefat_Artış_Sayısı'] = vefat_artis data['Tedavi_Artış_Sayısı'] = iyilesen_artis data
code
32068608/cell_9
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_tests = pd.read_csv('../input/covid19-in-turkey/test_numbers.csv') data = pd.read_csv('../input/covid19-in-turkey/covid_19_data_tr.csv') data.rename(columns={'Country/Region': 'Ülke', 'Last_Update': 'Tarih', 'Confirmed': 'Vaka_Sayisi', 'Deaths': 'Vefat_Sayisi', 'Recovered': 'Tedavi_Sayisi'}, inplace=True) data.drop('Province/State', axis=1, inplace=True) test_sayisi = data_tests.iloc[0, 4:].values test_sayisi.sort() data['Test_Sayisi'] = test_sayisi data.columns vaka_artis = [0] vefat_artis = [0] iyilesen_artis = [0] for i in range(len(data) - 1): vaka_artis.append(data['Vaka_Sayisi'][i + 1] - data['Vaka_Sayisi'][i]) vefat_artis.append(data['Vefat_Sayisi'][i + 1] - data['Vefat_Sayisi'][i]) iyilesen_artis.append(data['Tedavi_Sayisi'][i + 1] - data['Tedavi_Sayisi'][i]) data['Vaka_Artış_Sayısı'] = vaka_artis data['Vefat_Artış_Sayısı'] = vefat_artis data['Tedavi_Artış_Sayısı'] = iyilesen_artis data.info()
code
32068608/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_tests = pd.read_csv('../input/covid19-in-turkey/test_numbers.csv') data = pd.read_csv('../input/covid19-in-turkey/covid_19_data_tr.csv') data.info()
code
32068608/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_tests = pd.read_csv('../input/covid19-in-turkey/test_numbers.csv') data = pd.read_csv('../input/covid19-in-turkey/covid_19_data_tr.csv') data_tests
code
32068608/cell_2
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32068608/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_tests = pd.read_csv('../input/covid19-in-turkey/test_numbers.csv') data = pd.read_csv('../input/covid19-in-turkey/covid_19_data_tr.csv') data.rename(columns={'Country/Region': 'Ülke', 'Last_Update': 'Tarih', 'Confirmed': 'Vaka_Sayisi', 'Deaths': 'Vefat_Sayisi', 'Recovered': 'Tedavi_Sayisi'}, inplace=True) data.drop('Province/State', axis=1, inplace=True) test_sayisi = data_tests.iloc[0, 4:].values test_sayisi.sort() data['Test_Sayisi'] = test_sayisi data.columns vaka_artis = [0] vefat_artis = [0] iyilesen_artis = [0] for i in range(len(data) - 1): vaka_artis.append(data['Vaka_Sayisi'][i + 1] - data['Vaka_Sayisi'][i]) vefat_artis.append(data['Vefat_Sayisi'][i + 1] - data['Vefat_Sayisi'][i]) iyilesen_artis.append(data['Tedavi_Sayisi'][i + 1] - data['Tedavi_Sayisi'][i]) data['Vaka_Artış_Sayısı'] = vaka_artis data['Vefat_Artış_Sayısı'] = vefat_artis data['Tedavi_Artış_Sayısı'] = iyilesen_artis data.tail(1)
code
32068608/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_tests = pd.read_csv('../input/covid19-in-turkey/test_numbers.csv') data = pd.read_csv('../input/covid19-in-turkey/covid_19_data_tr.csv') data.rename(columns={'Country/Region': 'Ülke', 'Last_Update': 'Tarih', 'Confirmed': 'Vaka_Sayisi', 'Deaths': 'Vefat_Sayisi', 'Recovered': 'Tedavi_Sayisi'}, inplace=True) data.drop('Province/State', axis=1, inplace=True) test_sayisi = data_tests.iloc[0, 4:].values test_sayisi.sort() data['Test_Sayisi'] = test_sayisi data.columns
code
32068608/cell_8
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_tests = pd.read_csv('../input/covid19-in-turkey/test_numbers.csv') data = pd.read_csv('../input/covid19-in-turkey/covid_19_data_tr.csv') data.rename(columns={'Country/Region': 'Ülke', 'Last_Update': 'Tarih', 'Confirmed': 'Vaka_Sayisi', 'Deaths': 'Vefat_Sayisi', 'Recovered': 'Tedavi_Sayisi'}, inplace=True) data.drop('Province/State', axis=1, inplace=True) test_sayisi = data_tests.iloc[0, 4:].values test_sayisi.sort() data['Test_Sayisi'] = test_sayisi data.columns vaka_artis = [0] vefat_artis = [0] iyilesen_artis = [0] for i in range(len(data) - 1): vaka_artis.append(data['Vaka_Sayisi'][i + 1] - data['Vaka_Sayisi'][i]) vefat_artis.append(data['Vefat_Sayisi'][i + 1] - data['Vefat_Sayisi'][i]) iyilesen_artis.append(data['Tedavi_Sayisi'][i + 1] - data['Tedavi_Sayisi'][i]) data['Vaka_Artış_Sayısı'] = vaka_artis data['Vefat_Artış_Sayısı'] = vefat_artis data['Tedavi_Artış_Sayısı'] = iyilesen_artis data.info()
code
32068608/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_tests = pd.read_csv('../input/covid19-in-turkey/test_numbers.csv') data = pd.read_csv('../input/covid19-in-turkey/covid_19_data_tr.csv') data.rename(columns={'Country/Region': 'Ülke', 'Last_Update': 'Tarih', 'Confirmed': 'Vaka_Sayisi', 'Deaths': 'Vefat_Sayisi', 'Recovered': 'Tedavi_Sayisi'}, inplace=True) data.drop('Province/State', axis=1, inplace=True) test_sayisi = data_tests.iloc[0, 4:].values test_sayisi.sort() data['Test_Sayisi'] = test_sayisi data.columns vaka_artis = [0] vefat_artis = [0] iyilesen_artis = [0] for i in range(len(data) - 1): vaka_artis.append(data['Vaka_Sayisi'][i + 1] - data['Vaka_Sayisi'][i]) vefat_artis.append(data['Vefat_Sayisi'][i + 1] - data['Vefat_Sayisi'][i]) iyilesen_artis.append(data['Tedavi_Sayisi'][i + 1] - data['Tedavi_Sayisi'][i]) data['Vaka_Artış_Sayısı'] = vaka_artis data['Vefat_Artış_Sayısı'] = vefat_artis data['Tedavi_Artış_Sayısı'] = iyilesen_artis date_x = data.Tarih vaka_l = data.Vaka_Sayisi vefat_l = data.Vefat_Sayisi iyileşen_l = data.Tedavi_Sayisi test_l = data.Test_Sayisi fgr = plt.figure(figsize=(20, 10), dpi=150, facecolor='w') ax = fgr.add_subplot(111) ax.patch.set_facecolor('w') ax.patch.set_alpha(1) plt.plot(date_x, vaka_l, color='orange', linewidth=2, alpha=1, label='VAKA SAYISI') plt.plot(date_x, vefat_l, color='red', linewidth=2, alpha=1, label='VEFAT SAYISI') plt.plot(date_x, iyileşen_l, color='blue', linewidth=2, alpha=1, label='İYİLEŞEN SAYISI') plt.plot(date_x, test_l, color='black', linewidth=0.7, alpha=0.5, label='GÜNLÜK YAPILAN TEST SAYISI') plt.scatter(date_x, vaka_l, color='orange', linewidth=0.5, alpha=1) plt.scatter(date_x, vefat_l, color='red', linewidth=0.5, alpha=1) plt.scatter(date_x, iyileşen_l, color='blue', linewidth=0.5, alpha=1) plt.scatter(date_x, test_l, color='gray', linewidth=0.1, alpha=0.5) plt.title("TÜRKİYE'DEKİ GÜNCEL SON DURUM") plt.xticks(rotation='vertical') plt.xlabel('TARİH') plt.ylabel('SAYI') plt.legend(loc=0) plt.grid(color='black', linestyle='--', linewidth=0.5, alpha=0.5, dash_joinstyle='bevel') plt.show()
code
32068608/cell_17
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_tests = pd.read_csv('../input/covid19-in-turkey/test_numbers.csv') data = pd.read_csv('../input/covid19-in-turkey/covid_19_data_tr.csv') data.rename(columns={'Country/Region': 'Ülke', 'Last_Update': 'Tarih', 'Confirmed': 'Vaka_Sayisi', 'Deaths': 'Vefat_Sayisi', 'Recovered': 'Tedavi_Sayisi'}, inplace=True) data.drop('Province/State', axis=1, inplace=True) test_sayisi = data_tests.iloc[0, 4:].values test_sayisi.sort() data['Test_Sayisi'] = test_sayisi data.columns vaka_artis = [0] vefat_artis = [0] iyilesen_artis = [0] for i in range(len(data) - 1): vaka_artis.append(data['Vaka_Sayisi'][i + 1] - data['Vaka_Sayisi'][i]) vefat_artis.append(data['Vefat_Sayisi'][i + 1] - data['Vefat_Sayisi'][i]) iyilesen_artis.append(data['Tedavi_Sayisi'][i + 1] - data['Tedavi_Sayisi'][i]) data['Vaka_Artış_Sayısı'] = vaka_artis data['Vefat_Artış_Sayısı'] = vefat_artis data['Tedavi_Artış_Sayısı'] = iyilesen_artis date_x = data.Tarih vaka_l = data.Vaka_Sayisi vefat_l = data.Vefat_Sayisi iyileşen_l = data.Tedavi_Sayisi test_l = data.Test_Sayisi fgr = plt.figure(figsize=(20, 10), dpi=150, facecolor='w') ax = fgr.add_subplot(111) ax.patch.set_facecolor('w') ax.patch.set_alpha(1) plt.plot(date_x,vaka_l,color='orange',linewidth = 2, alpha=1 ,label = "VAKA SAYISI"); plt.plot(date_x,vefat_l,color='red',linewidth = 2, alpha=1 ,label = "VEFAT SAYISI"); plt.plot(date_x,iyileşen_l,color='blue',linewidth = 2, alpha=1 ,label = "İYİLEŞEN SAYISI"); plt.plot(date_x,test_l,color='black',linewidth = 0.7, alpha=0.5 ,label = "GÜNLÜK YAPILAN TEST SAYISI"); plt.scatter(date_x,vaka_l,color='orange',linewidth = 0.5, alpha=1); plt.scatter(date_x,vefat_l,color='red',linewidth = 0.5, alpha=1 ); plt.scatter(date_x,iyileşen_l,color='blue',linewidth = 0.5, alpha=1); plt.scatter(date_x,test_l,color='gray',linewidth = 0.1, alpha=0.5); plt.title('TÜRKİYE\'DEKİ GÜNCEL SON DURUM') plt.xticks(rotation='vertical') plt.xlabel('TARİH') plt.ylabel('SAYI') plt.legend(loc = 0) plt.grid(color='black', linestyle="--", linewidth=0.5,alpha=0.5 ,dash_joinstyle = "bevel") plt.show() date_x = data.Tarih vaka_l = data.Vaka_Artış_Sayısı vefat_l = data.Vefat_Artış_Sayısı iyileşen_l = data.Tedavi_Artış_Sayısı test_l = data.Test_Sayisi fig, ((ax1, ax4), (ax2, ax3)) = plt.subplots(2, 2, dpi=150, figsize=(20, 10), sharex='col') fig.suptitle('GÜNLÜK ARTIŞ MİKTARLARI') ax1.plot(date_x, vaka_l, '-o', color='orange', linewidth=2, alpha=1) ax2.plot(date_x, vefat_l, '-o', color='red', linewidth=2, alpha=1) ax3.plot(date_x, iyileşen_l, '-o', color='blue', linewidth=2, alpha=1) ax4.plot(date_x, test_l, '-o', color='blue', linewidth=2, alpha=1) ax1.set_xticklabels(date_x, rotation=90) ax2.set_xticklabels(date_x, rotation=90) ax3.set_xticklabels(date_x, rotation=90) ax4.set_xticklabels(date_x, rotation=90) ax1.grid(color='black', linestyle='--', linewidth=1, alpha=0.5, dash_joinstyle='bevel') ax2.grid(color='black', linestyle='--', linewidth=1, alpha=0.5, dash_joinstyle='bevel') ax3.grid(color='black', linestyle='--', linewidth=1, alpha=0.5, dash_joinstyle='bevel') ax4.grid(color='black', linestyle='--', linewidth=1, alpha=0.5, dash_joinstyle='bevel') ax1.set_title('VAKA SAYISI') ax2.set_title('VEFAT SAYISI') ax3.set_title('İYİLEŞEN SAYISI') ax4.set_title('TEST SAYISI') plt.show()
code
32068608/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_tests = pd.read_csv('../input/covid19-in-turkey/test_numbers.csv') data = pd.read_csv('../input/covid19-in-turkey/covid_19_data_tr.csv') data.rename(columns={'Country/Region': 'Ülke', 'Last_Update': 'Tarih', 'Confirmed': 'Vaka_Sayisi', 'Deaths': 'Vefat_Sayisi', 'Recovered': 'Tedavi_Sayisi'}, inplace=True) data.drop('Province/State', axis=1, inplace=True) data.info()
code