path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
16148029/cell_22
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf import warnings from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np import pandas as pd import tensorflow as tf from tensorflow import keras import warnings warnings.filterwarnings('ignore') import matplotlib.pyplot as plt train = pd.read_csv('../input/fashion-mnist_train.csv') train.shape test = pd.read_csv('../input/fashion-mnist_test.csv') test.shape class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] train_images = train.iloc[:, 1:785] train_labels = train.iloc[:, 0] test_images = test.iloc[:, 1:785] test_labels = test.iloc[:, 0] plt.colorbar() plt.xticks([]) plt.yticks([]) plt.colorbar() plt.xticks([]) plt.yticks([]) train_images = train_images / 255.0 test_images = test_images / 255.0 for i in range(25): plt.xticks([]) plt.yticks([]) model = tf.keras.models.Sequential() model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu)) model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu)) model.add(tf.keras.layers.Dense(64, activation=tf.nn.relu)) model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax)) model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy']) train_images = train_images.values train_labels = train_labels.values test_images = test_images.values test_labels = test_labels.values model.fit(train_images, train_labels, epochs=10) test_loss, test_acc = model.evaluate(test_images, test_labels) predictions = model.predict(test_images) np.argmax(predictions[0]) x = np.argmax(predictions[999]) print(x) print('\n') print(test_labels[999])
code
16148029/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/fashion-mnist_train.csv') train.shape test = pd.read_csv('../input/fashion-mnist_test.csv') test.shape class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] train_images = train.iloc[:, 1:785] train_labels = train.iloc[:, 0] test_images = test.iloc[:, 1:785] test_labels = test.iloc[:, 0] plt.colorbar() plt.xticks([]) plt.yticks([]) plt.figure() plt.imshow(train_images.iloc[4].as_matrix().reshape(28, 28)) plt.colorbar() plt.xticks([]) plt.yticks([]) plt.show() print(class_names[3])
code
16148029/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/fashion-mnist_train.csv') train.shape test = pd.read_csv('../input/fashion-mnist_test.csv') test.shape class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] train_images = train.iloc[:, 1:785] train_labels = train.iloc[:, 0] test_images = test.iloc[:, 1:785] test_labels = test.iloc[:, 0] plt.colorbar() plt.xticks([]) plt.yticks([]) plt.colorbar() plt.xticks([]) plt.yticks([]) train_images = train_images / 255.0 test_images = test_images / 255.0 plt.figure(figsize=(10, 10)) for i in range(25): plt.subplot(5, 5, i + 1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(train_images.iloc[i].as_matrix().reshape(28, 28)) plt.xlabel(class_names[train_labels[i]]) plt.show()
code
16148029/cell_5
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/fashion-mnist_train.csv') test = pd.read_csv('../input/fashion-mnist_test.csv') test.head()
code
72077843/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os import os import pandas as pd os.getcwd() os.chdir('/kaggle/') os.listdir('/kaggle/input') df = pd.read_csv('input/titanic/train.csv') df df.shape df.groupby('Sex')[['Survived']].mean() df.pivot_table('Survived', index='Sex', columns='Pclass') age = pd.cut(df['Age'], [0, 50, 80]) df.pivot_table('Survived', index=['Sex', age], columns='Pclass') plt.scatter(df['Fare'], df['Pclass'], color='Orange', label='Passeneger Paid') plt.ylabel('Pclass') plt.xlabel('Price') plt.legend()
code
72077843/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os import os import pandas as pd os.getcwd() os.chdir('/kaggle/') os.listdir('/kaggle/input') df = pd.read_csv('input/titanic/train.csv') df df.shape df.groupby('Sex')[['Survived']].mean() df.pivot_table('Survived', index='Sex', columns='Pclass')
code
72077843/cell_4
[ "text_html_output_1.png" ]
import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os import os import pandas as pd os.getcwd() os.chdir('/kaggle/') os.listdir('/kaggle/input') df = pd.read_csv('input/titanic/train.csv') df df.shape df.describe()
code
72077843/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os import os import pandas as pd os.getcwd() os.chdir('/kaggle/') os.listdir('/kaggle/input') df = pd.read_csv('input/titanic/train.csv') df df.shape import seaborn as sns sns.countplot(df['Survived'])
code
72077843/cell_2
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os import os import pandas as pd os.getcwd() os.chdir('/kaggle/') os.listdir('/kaggle/input') df = pd.read_csv('input/titanic/train.csv') df
code
72077843/cell_11
[ "text_plain_output_1.png" ]
import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os import os import pandas as pd os.getcwd() os.chdir('/kaggle/') os.listdir('/kaggle/input') df = pd.read_csv('input/titanic/train.csv') df df.shape import seaborn as sns df.groupby('Sex')[['Survived']].mean() df.pivot_table('Survived', index='Sex', columns='Pclass') sns.barplot(x='Pclass', y='Survived', data=df)
code
72077843/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
72077843/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt cols = ['Sex', 'Pclass', 'Age', 'SibSp'] n_rows = 2 n_cols = 2 fig, axs = plt.subplots(n_rows, n_cols, figsize=(n_cols * 8, n_rows * 8)) for r in range(0, n_rows): for c in range(0, n_cols): i = r * n_cols + c ax = axs[r][c] sns.countplot(df[cols[i]], hue=df['Survived'], ax=ax) ax.set_title(cols[i]) ax.legend(title='survived', loc='upper right') plt.tight_layout()
code
72077843/cell_8
[ "text_html_output_1.png" ]
import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os import os import pandas as pd os.getcwd() os.chdir('/kaggle/') os.listdir('/kaggle/input') df = pd.read_csv('input/titanic/train.csv') df df.shape df.groupby('Sex')[['Survived']].mean()
code
72077843/cell_3
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os import os import pandas as pd os.getcwd() os.chdir('/kaggle/') os.listdir('/kaggle/input') df = pd.read_csv('input/titanic/train.csv') df df.shape
code
72077843/cell_10
[ "text_html_output_1.png" ]
import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os import os import pandas as pd os.getcwd() os.chdir('/kaggle/') os.listdir('/kaggle/input') df = pd.read_csv('input/titanic/train.csv') df df.shape df.groupby('Sex')[['Survived']].mean() df.pivot_table('Survived', index='Sex', columns='Pclass') df.pivot_table('Survived', index='Sex', columns='Pclass').plot()
code
72077843/cell_12
[ "text_html_output_1.png" ]
import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os import os import pandas as pd os.getcwd() os.chdir('/kaggle/') os.listdir('/kaggle/input') df = pd.read_csv('input/titanic/train.csv') df df.shape df.groupby('Sex')[['Survived']].mean() df.pivot_table('Survived', index='Sex', columns='Pclass') age = pd.cut(df['Age'], [0, 50, 80]) df.pivot_table('Survived', index=['Sex', age], columns='Pclass')
code
72077843/cell_5
[ "text_html_output_1.png" ]
import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os import os import pandas as pd os.getcwd() os.chdir('/kaggle/') os.listdir('/kaggle/input') df = pd.read_csv('input/titanic/train.csv') df df.shape df['Survived'].value_counts()
code
122251959/cell_21
[ "text_plain_output_1.png" ]
import math math.e math.pi a = 3.4 math.ceil(a) math.floor(a) b = 5.3145 math.trunc(b) x = 3 math.exp(x) math.log(1000) math.log(1000, 10) math.sin(math.pi / 2) degree = 90 math.sin(math.radians(degree)) math.sqrt(4) math.factorial(5)
code
122251959/cell_13
[ "text_plain_output_1.png" ]
import math math.e math.pi a = 3.4 math.ceil(a) math.floor(a) b = 5.3145 math.trunc(b) x = 3 math.exp(x)
code
122251959/cell_9
[ "text_plain_output_1.png" ]
import math math.e math.pi a = 3.4 math.ceil(a) math.floor(a)
code
122251959/cell_4
[ "text_plain_output_1.png" ]
import math math.e
code
122251959/cell_23
[ "text_plain_output_1.png" ]
import math math.e math.pi a = 3.4 math.ceil(a) math.floor(a) b = 5.3145 math.trunc(b) x = 3 math.exp(x) math.log(1000) math.log(1000, 10) math.sin(math.pi / 2) degree = 90 math.sin(math.radians(degree)) math.sqrt(4) math.factorial(5) l = [1.2, 2.3, 3.4, 4.5] sum(l) math.fsum(l)
code
122251959/cell_20
[ "text_plain_output_1.png" ]
import math math.e math.pi a = 3.4 math.ceil(a) math.floor(a) b = 5.3145 math.trunc(b) x = 3 math.exp(x) math.log(1000) math.log(1000, 10) math.sin(math.pi / 2) degree = 90 math.sin(math.radians(degree)) math.sqrt(4)
code
122251959/cell_18
[ "text_plain_output_1.png" ]
import math math.e math.pi a = 3.4 math.ceil(a) math.floor(a) b = 5.3145 math.trunc(b) x = 3 math.exp(x) math.log(1000) math.log(1000, 10) math.sin(math.pi / 2) degree = 90 math.sin(math.radians(degree))
code
122251959/cell_8
[ "text_plain_output_1.png" ]
import math math.e math.pi a = 3.4 math.ceil(a)
code
122251959/cell_15
[ "text_plain_output_1.png" ]
import math math.e math.pi a = 3.4 math.ceil(a) math.floor(a) b = 5.3145 math.trunc(b) x = 3 math.exp(x) math.log(1000) math.log(1000, 10)
code
122251959/cell_17
[ "text_plain_output_1.png" ]
import math math.e math.pi a = 3.4 math.ceil(a) math.floor(a) b = 5.3145 math.trunc(b) x = 3 math.exp(x) math.log(1000) math.log(1000, 10) math.sin(math.pi / 2)
code
122251959/cell_14
[ "text_plain_output_1.png" ]
import math math.e math.pi a = 3.4 math.ceil(a) math.floor(a) b = 5.3145 math.trunc(b) x = 3 math.exp(x) math.log(1000)
code
122251959/cell_22
[ "text_plain_output_1.png" ]
l = [1.2, 2.3, 3.4, 4.5] sum(l)
code
122251959/cell_10
[ "text_plain_output_1.png" ]
import math math.e math.pi a = 3.4 math.ceil(a) math.floor(a) b = 5.3145 math.trunc(b)
code
122251959/cell_5
[ "text_plain_output_1.png" ]
import math math.e math.pi
code
33100906/cell_6
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test['PassengerId'] train.columns train.head()
code
33100906/cell_2
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import os import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') import seaborn as sns from collections import Counter import warnings warnings.filterwarnings('ignore') import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
33100906/cell_7
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test['PassengerId'] train.columns train.describe()
code
33100906/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test['PassengerId'] train.columns
code
49119627/cell_6
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LogisticRegression logreg = LogisticRegression(C=10) logreg.fit(X_train, Y_train) Y_predict1 = logreg.predict(X_test) score_logreg = logreg.score(X_test, Y_test) print(score_logreg)
code
49119627/cell_2
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/breast-cancer-prediction-dataset/Breast_cancer_data.csv') print('Dataset :', data.shape) x = data.iloc[:, [0, 1, 2, 3]].values data.info() data[0:10]
code
49119627/cell_7
[ "text_plain_output_1.png" ]
from sklearn.feature_selection import RFE from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LogisticRegression logreg = LogisticRegression(C=10) logreg.fit(X_train, Y_train) Y_predict1 = logreg.predict(X_test) score_logreg = logreg.score(X_test, Y_test) from sklearn.feature_selection import RFE logreg_2 = LogisticRegression() rfe = RFE(estimator=logreg_2, n_features_to_select=5, step=1) rfe = rfe.fit(X_train, Y_train) print('Chosen best 5 feature by rfe:', X_train.columns[rfe.support_]) X_train_3 = rfe.transform(X_train) X_test_3 = rfe.transform(X_test) logreg_2 = LogisticRegression() logreg_2 = logreg_2.fit(X_train_3, Y_train) Y_predict2 = logreg.predict(X_test_3) score_logreg = logreg_2.score(X_test_3, Y_test) print(score_logreg)
code
49119627/cell_5
[ "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix import matplotlib.pyplot as plt import seaborn as sns import seaborn as sns from sklearn.linear_model import LogisticRegression logreg = LogisticRegression(C=10) logreg.fit(X_train, Y_train) Y_predict1 = logreg.predict(X_test) from sklearn.metrics import confusion_matrix import seaborn as sns logreg_cm = confusion_matrix(Y_test, Y_predict1) f, ax = plt.subplots(figsize=(5, 5)) sns.heatmap(logreg_cm, annot=True, linewidth=0.7, linecolor='red', fmt='g', ax=ax, cmap='BuPu') plt.title('Logistic Regression Classification Confusion Matrix') plt.xlabel('Y predict') plt.ylabel('Y test') plt.show()
code
105199751/cell_21
[ "text_plain_output_1.png" ]
from datetime import datetime import json import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os BASE_DIR = '/kaggle/input/youtube-dataset-of-countries/Youtube_data/Countries_data' LIST_CSV = [] LIST_JSON = [] for file in os.listdir(BASE_DIR): if file.endswith('.json'): LIST_JSON.append(file) else: LIST_CSV.append(file) import json pd.DataFrame.from_dict(json.loads(open(os.path.join(BASE_DIR, 'IN_category_id.json')).read())) IN_JSON = json.loads(open(os.path.join(BASE_DIR, 'IN_category_id.json')).read()) IN_JSON['items'][0]['snippet']['title'] Category_info = pd.json_normalize(IN_JSON['items']) Category_info IN_CSV = pd.read_csv(os.path.join(BASE_DIR, 'INvideos.csv')) IN_CSV Category_info.id = Category_info.id.astype('int64') IN_CSV = IN_CSV.merge(Category_info, left_on='category_id', right_on='id') IN_CSV.drop(columns=['kind', 'etag', 'id', 'snippet.channelId', 'snippet.assignable'], axis=1, inplace=True) IN_CSV.rename(columns={'snippet.title': 'cat_title'}, inplace=True) IN_CSV.columns IN_CSV.publish_time '2020-01-06T00:00:00.000Z'[:-1] def change_to_datetime(time): time = datetime.fromisoformat(time[:-1]) time.strftime('%Y-%m-%d %H:%M:%S') return time IN_CSV.publish_time = IN_CSV.publish_time.apply(lambda val: change_to_datetime(val)) IN_CSV.publish_time IN_CSV.trending_date = pd.to_datetime(IN_CSV.trending_date, format='%y.%d.%m') IN_CSV.trending_date
code
105199751/cell_13
[ "text_plain_output_1.png" ]
import json import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os BASE_DIR = '/kaggle/input/youtube-dataset-of-countries/Youtube_data/Countries_data' LIST_CSV = [] LIST_JSON = [] for file in os.listdir(BASE_DIR): if file.endswith('.json'): LIST_JSON.append(file) else: LIST_CSV.append(file) import json pd.DataFrame.from_dict(json.loads(open(os.path.join(BASE_DIR, 'IN_category_id.json')).read())) IN_JSON = json.loads(open(os.path.join(BASE_DIR, 'IN_category_id.json')).read()) IN_JSON['items'][0]['snippet']['title'] Category_info = pd.json_normalize(IN_JSON['items']) Category_info IN_CSV = pd.read_csv(os.path.join(BASE_DIR, 'INvideos.csv')) IN_CSV Category_info.id = Category_info.id.astype('int64') IN_CSV = IN_CSV.merge(Category_info, left_on='category_id', right_on='id') IN_CSV.drop(columns=['kind', 'etag', 'id', 'snippet.channelId', 'snippet.assignable'], axis=1, inplace=True) IN_CSV.rename(columns={'snippet.title': 'cat_title'}, inplace=True) IN_CSV.columns
code
105199751/cell_4
[ "text_plain_output_1.png" ]
import json import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os BASE_DIR = '/kaggle/input/youtube-dataset-of-countries/Youtube_data/Countries_data' LIST_CSV = [] LIST_JSON = [] for file in os.listdir(BASE_DIR): if file.endswith('.json'): LIST_JSON.append(file) else: LIST_CSV.append(file) import json pd.DataFrame.from_dict(json.loads(open(os.path.join(BASE_DIR, 'IN_category_id.json')).read())) IN_JSON = json.loads(open(os.path.join(BASE_DIR, 'IN_category_id.json')).read()) IN_JSON['items'][0]['snippet']['title']
code
105199751/cell_6
[ "text_plain_output_1.png" ]
import json import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os BASE_DIR = '/kaggle/input/youtube-dataset-of-countries/Youtube_data/Countries_data' LIST_CSV = [] LIST_JSON = [] for file in os.listdir(BASE_DIR): if file.endswith('.json'): LIST_JSON.append(file) else: LIST_CSV.append(file) import json pd.DataFrame.from_dict(json.loads(open(os.path.join(BASE_DIR, 'IN_category_id.json')).read())) IN_JSON = json.loads(open(os.path.join(BASE_DIR, 'IN_category_id.json')).read()) IN_JSON['items'][0]['snippet']['title'] Category_info = pd.json_normalize(IN_JSON['items']) Category_info
code
105199751/cell_2
[ "text_html_output_1.png" ]
import os import numpy as np import pandas as pd import os BASE_DIR = '/kaggle/input/youtube-dataset-of-countries/Youtube_data/Countries_data' LIST_CSV = [] LIST_JSON = [] for file in os.listdir(BASE_DIR): if file.endswith('.json'): LIST_JSON.append(file) else: LIST_CSV.append(file) print(LIST_JSON) print(LIST_CSV)
code
105199751/cell_19
[ "text_plain_output_1.png" ]
from datetime import datetime import json import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os BASE_DIR = '/kaggle/input/youtube-dataset-of-countries/Youtube_data/Countries_data' LIST_CSV = [] LIST_JSON = [] for file in os.listdir(BASE_DIR): if file.endswith('.json'): LIST_JSON.append(file) else: LIST_CSV.append(file) import json pd.DataFrame.from_dict(json.loads(open(os.path.join(BASE_DIR, 'IN_category_id.json')).read())) IN_JSON = json.loads(open(os.path.join(BASE_DIR, 'IN_category_id.json')).read()) IN_JSON['items'][0]['snippet']['title'] Category_info = pd.json_normalize(IN_JSON['items']) Category_info IN_CSV = pd.read_csv(os.path.join(BASE_DIR, 'INvideos.csv')) IN_CSV Category_info.id = Category_info.id.astype('int64') IN_CSV = IN_CSV.merge(Category_info, left_on='category_id', right_on='id') IN_CSV.drop(columns=['kind', 'etag', 'id', 'snippet.channelId', 'snippet.assignable'], axis=1, inplace=True) IN_CSV.rename(columns={'snippet.title': 'cat_title'}, inplace=True) IN_CSV.columns IN_CSV.publish_time '2020-01-06T00:00:00.000Z'[:-1] def change_to_datetime(time): time = datetime.fromisoformat(time[:-1]) time.strftime('%Y-%m-%d %H:%M:%S') return time IN_CSV.publish_time = IN_CSV.publish_time.apply(lambda val: change_to_datetime(val)) IN_CSV.publish_time
code
105199751/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
105199751/cell_7
[ "text_plain_output_1.png" ]
import json import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os BASE_DIR = '/kaggle/input/youtube-dataset-of-countries/Youtube_data/Countries_data' LIST_CSV = [] LIST_JSON = [] for file in os.listdir(BASE_DIR): if file.endswith('.json'): LIST_JSON.append(file) else: LIST_CSV.append(file) import json pd.DataFrame.from_dict(json.loads(open(os.path.join(BASE_DIR, 'IN_category_id.json')).read())) IN_JSON = json.loads(open(os.path.join(BASE_DIR, 'IN_category_id.json')).read()) IN_JSON['items'][0]['snippet']['title'] IN_JSON
code
105199751/cell_8
[ "text_html_output_1.png" ]
import json import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os BASE_DIR = '/kaggle/input/youtube-dataset-of-countries/Youtube_data/Countries_data' LIST_CSV = [] LIST_JSON = [] for file in os.listdir(BASE_DIR): if file.endswith('.json'): LIST_JSON.append(file) else: LIST_CSV.append(file) import json pd.DataFrame.from_dict(json.loads(open(os.path.join(BASE_DIR, 'IN_category_id.json')).read())) IN_JSON = json.loads(open(os.path.join(BASE_DIR, 'IN_category_id.json')).read()) IN_JSON['items'][0]['snippet']['title'] Category_info = pd.json_normalize(IN_JSON['items']) Category_info IN_CSV = pd.read_csv(os.path.join(BASE_DIR, 'INvideos.csv')) IN_CSV
code
105199751/cell_3
[ "text_plain_output_1.png" ]
import json import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os BASE_DIR = '/kaggle/input/youtube-dataset-of-countries/Youtube_data/Countries_data' LIST_CSV = [] LIST_JSON = [] for file in os.listdir(BASE_DIR): if file.endswith('.json'): LIST_JSON.append(file) else: LIST_CSV.append(file) import json pd.DataFrame.from_dict(json.loads(open(os.path.join(BASE_DIR, 'IN_category_id.json')).read()))
code
105199751/cell_14
[ "text_html_output_1.png" ]
import json import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os BASE_DIR = '/kaggle/input/youtube-dataset-of-countries/Youtube_data/Countries_data' LIST_CSV = [] LIST_JSON = [] for file in os.listdir(BASE_DIR): if file.endswith('.json'): LIST_JSON.append(file) else: LIST_CSV.append(file) import json pd.DataFrame.from_dict(json.loads(open(os.path.join(BASE_DIR, 'IN_category_id.json')).read())) IN_JSON = json.loads(open(os.path.join(BASE_DIR, 'IN_category_id.json')).read()) IN_JSON['items'][0]['snippet']['title'] Category_info = pd.json_normalize(IN_JSON['items']) Category_info IN_CSV = pd.read_csv(os.path.join(BASE_DIR, 'INvideos.csv')) IN_CSV Category_info.id = Category_info.id.astype('int64') IN_CSV = IN_CSV.merge(Category_info, left_on='category_id', right_on='id') IN_CSV.drop(columns=['kind', 'etag', 'id', 'snippet.channelId', 'snippet.assignable'], axis=1, inplace=True) IN_CSV.rename(columns={'snippet.title': 'cat_title'}, inplace=True) IN_CSV.columns IN_CSV.publish_time
code
105199751/cell_10
[ "text_plain_output_1.png" ]
import json import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os BASE_DIR = '/kaggle/input/youtube-dataset-of-countries/Youtube_data/Countries_data' LIST_CSV = [] LIST_JSON = [] for file in os.listdir(BASE_DIR): if file.endswith('.json'): LIST_JSON.append(file) else: LIST_CSV.append(file) import json pd.DataFrame.from_dict(json.loads(open(os.path.join(BASE_DIR, 'IN_category_id.json')).read())) IN_JSON = json.loads(open(os.path.join(BASE_DIR, 'IN_category_id.json')).read()) IN_JSON['items'][0]['snippet']['title'] Category_info = pd.json_normalize(IN_JSON['items']) Category_info IN_CSV = pd.read_csv(os.path.join(BASE_DIR, 'INvideos.csv')) IN_CSV Category_info.id = Category_info.id.astype('int64') IN_CSV = IN_CSV.merge(Category_info, left_on='category_id', right_on='id') print(IN_CSV.columns) IN_CSV.drop(columns=['kind', 'etag', 'id', 'snippet.channelId', 'snippet.assignable'], axis=1, inplace=True)
code
105199751/cell_12
[ "text_html_output_1.png" ]
import json import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os BASE_DIR = '/kaggle/input/youtube-dataset-of-countries/Youtube_data/Countries_data' LIST_CSV = [] LIST_JSON = [] for file in os.listdir(BASE_DIR): if file.endswith('.json'): LIST_JSON.append(file) else: LIST_CSV.append(file) import json pd.DataFrame.from_dict(json.loads(open(os.path.join(BASE_DIR, 'IN_category_id.json')).read())) IN_JSON = json.loads(open(os.path.join(BASE_DIR, 'IN_category_id.json')).read()) IN_JSON['items'][0]['snippet']['title'] Category_info = pd.json_normalize(IN_JSON['items']) Category_info IN_CSV = pd.read_csv(os.path.join(BASE_DIR, 'INvideos.csv')) IN_CSV Category_info.id = Category_info.id.astype('int64') IN_CSV = IN_CSV.merge(Category_info, left_on='category_id', right_on='id') IN_CSV.drop(columns=['kind', 'etag', 'id', 'snippet.channelId', 'snippet.assignable'], axis=1, inplace=True) IN_CSV
code
129022538/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/Parkinson_disease.csv') df.info()
code
129022538/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import warnings import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import random import warnings warnings.filterwarnings('ignore') from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LinearRegression, Ridge from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor from sklearn.metrics import r2_score, mean_squared_error, mean_squared_log_error from sklearn.model_selection import train_test_split, cross_val_score, KFold, GridSearchCV from sklearn.metrics import confusion_matrix, precision_recall_curve, auc, roc_auc_score, roc_curve, recall_score, classification_report from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score, mean_squared_error from sklearn.ensemble import GradientBoostingRegressor from imblearn.over_sampling import SMOTE
code
90111237/cell_13
[ "application_vnd.jupyter.stderr_output_1.png" ]
from pathlib import Path from pathlib import Path from pytorch_lightning import LightningModule from sklearn.manifold import TSNE from tokenizers import Tokenizer from torchvision import models from tqdm import tqdm import albumentations as A import cv2 import math import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torch import numpy as np import pandas as pd from pathlib import Path import os BASE_PATH = Path('/kaggle/input/h-and-m-personalized-fashion-recommendations/') MODEL_PATH = Path('/kaggle/input/image-text-embeddings/ssl_resnet18_1337.ckpt') TOKENIZER_PATH = Path('/kaggle/input/image-text-embeddings/tokenizer.json') from tokenizers import Tokenizer TOKENIZER = Tokenizer.from_file(str(TOKENIZER_PATH)) CLS_IDX = TOKENIZER.token_to_id('[CLS]') PAD_IDX = TOKENIZER.token_to_id('[PAD]') SEP_IDX = TOKENIZER.token_to_id('[SEP]') def tokenize(text: str): raw_tokens = TOKENIZER.encode(text) return raw_tokens.ids def pad_list(list_integers, context_size: int=90, pad_val: int=PAD_IDX, mode='right'): """ :param list_integers: :param context_size: :param pad_val: :param mode: :return: """ list_integers = list_integers[:context_size] if len(list_integers) < context_size: if mode == 'left': list_integers = [pad_val] * (context_size - len(list_integers)) + list_integers else: list_integers = list_integers + [pad_val] * (context_size - len(list_integers)) return list_integers import random from pathlib import Path import cv2 import numpy as np cv2.setNumThreads(0) cv2.ocl.setUseOpenCL(False) import albumentations as A SIZE = 128 SCALE = 255.0 RESIZE = A.Compose([A.LongestMaxSize(max_size=SIZE, p=1.0), A.PadIfNeeded(min_height=SIZE, min_width=SIZE, p=1.0)]) NORMALIZE = A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=SCALE) def read_image(image_path: Path) -> np.ndarray: bgr_image = cv2.imread(str(image_path)) rgb_image = bgr_image[:, :, ::-1] return rgb_image def resize(image: np.ndarray) -> np.ndarray: reshaped = RESIZE(image=image)['image'] return reshaped def normalize(image: np.ndarray) -> np.ndarray: normalized = NORMALIZE(image=image)['image'] return normalized def preprocess(image: np.ndarray) -> np.ndarray: return normalize(resize(image)) import math import torch import torch.nn.functional as F from pytorch_lightning import LightningModule from torchvision import models from transformers import get_cosine_schedule_with_warmup class PositionalEncoding(torch.nn.Module): def __init__(self, d_model: int, dropout: float=0.1, max_len: int=5000): super().__init__() self.dropout = torch.nn.Dropout(p=dropout) position = torch.arange(max_len).unsqueeze(1) div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model)) pe = torch.zeros(1, max_len, d_model) pe[0:, :, 0::2] = torch.sin(position * div_term) pe[0:, :, 1::2] = torch.cos(position * div_term) self.register_buffer('pe', pe) self.d_model = d_model def forward(self, x: torch.Tensor) -> torch.Tensor: """ Args: x: Tensor, shape [seq_len, batch_size, embedding_dim] """ x = x + self.pe[:, :x.size(1)] / math.sqrt(self.d_model) return self.dropout(x) class Cola(LightningModule): def __init__(self, lr=0.001, use_pretrained=False, dropout=0.2, d_model=128, n_vocab=30000, smoothing=0.1): super().__init__() self.dropout = dropout self.lr = lr self.d_model = d_model self.n_vocab = n_vocab self.smoothing = smoothing self.model = models.resnet18(pretrained=use_pretrained) self.model.fc = torch.nn.Linear(self.model.fc.in_features, self.d_model) self.item_embeddings = torch.nn.Embedding(self.n_vocab, self.d_model) self.pos_encoder = PositionalEncoding(d_model=self.d_model, dropout=self.dropout) encoder_layer = torch.nn.TransformerEncoderLayer(d_model=self.d_model, nhead=4, dropout=self.dropout, batch_first=True) self.encoder = torch.nn.TransformerEncoder(encoder_layer, num_layers=4) self.layer_norm = torch.nn.LayerNorm(normalized_shape=self.d_model) self.linear = torch.nn.Linear(self.d_model, self.d_model, bias=False) self.do = torch.nn.Dropout(p=self.dropout) self.save_hyperparameters() def encode_image(self, x): x = x.permute(0, 3, 1, 2) x = self.do(self.model(x)) x = torch.tanh(self.layer_norm(x)) return x def encode_text(self, x): x = self.item_embeddings(x) x = self.pos_encoder(x) x = self.encoder(x) return x[:, 0, :] def forward(self, x): image, text = x encoded_image = self.encode_image(image) encoded_image_w = self.linear(encoded_image) encoded_text = self.encode_text(text) return (encoded_image_w, encoded_text) df = pd.read_csv(BASE_PATH / 'articles.csv', nrows=None, dtype={'article_id': str}) df['text'] = df.apply(lambda x: ' '.join([str(x['prod_name']), str(x['product_type_name']), str(x['product_group_name']), str(x['graphical_appearance_name']), str(x['colour_group_name']), str(x['perceived_colour_value_name']), str(x['index_name']), str(x['section_name']), str(x['detail_desc'])]), axis=1) df['image_path'] = df.article_id.apply(lambda x: BASE_PATH / 'images' / x[:3] / f'{x}.jpg') df = df.sample(n=5000) model = Cola(lr=0.0001) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model.load_state_dict(torch.load(MODEL_PATH, map_location=device)['state_dict']) model.to(device) model.eval() text_embeddings = [] image_embeddings = [] for image_path, text in tqdm(zip(df.image_path.values, df.text.values), total=len(df)): if image_path.is_file(): image = read_image(image_path) else: image = np.zeros((128, 128, 3)) image = preprocess(image) image_t = torch.from_numpy(image).unsqueeze(0) image_t = image_t.to(device) text_t = tokenize(text) text_t = torch.tensor(pad_list(text_t), dtype=torch.long, device=device).unsqueeze(0) with torch.no_grad(): text_embed = model.encode_text(text_t) image_embed = model.encode_image(image_t) text_embed = text_embed.squeeze().cpu().tolist() image_embed = image_embed.squeeze().cpu().tolist() text_embeddings.append(text_embed) image_embeddings.append(image_embed) text_embeddings = np.array(text_embeddings) image_embeddings = np.array(image_embeddings) tsne = TSNE(n_components=2, init='random', random_state=0, learning_rate='auto', n_iter=300) Y = tsne.fit_transform(image_embeddings) fig = plt.figure(figsize=(12, 12)) for index_name in df.index_name.unique(): plt.scatter(Y[df.index_name == index_name, 0], Y[df.index_name == index_name, 1], label=index_name, s=3) plt.title('Cola Image embeddings by index_name') plt.legend() plt.show()
code
90111237/cell_2
[ "image_output_1.png" ]
from pathlib import Path import os import numpy as np import pandas as pd from pathlib import Path import os for dirname, _, filenames in os.walk('../input/image-text-embeddings'): for filename in filenames: print(os.path.join(dirname, filename)) BASE_PATH = Path('/kaggle/input/h-and-m-personalized-fashion-recommendations/') MODEL_PATH = Path('/kaggle/input/image-text-embeddings/ssl_resnet18_1337.ckpt') TOKENIZER_PATH = Path('/kaggle/input/image-text-embeddings/tokenizer.json')
code
90111237/cell_11
[ "text_plain_output_1.png" ]
from pathlib import Path from pathlib import Path from pytorch_lightning import LightningModule from tokenizers import Tokenizer from torchvision import models from tqdm import tqdm import albumentations as A import cv2 import math import numpy as np import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torch import numpy as np import pandas as pd from pathlib import Path import os BASE_PATH = Path('/kaggle/input/h-and-m-personalized-fashion-recommendations/') MODEL_PATH = Path('/kaggle/input/image-text-embeddings/ssl_resnet18_1337.ckpt') TOKENIZER_PATH = Path('/kaggle/input/image-text-embeddings/tokenizer.json') from tokenizers import Tokenizer TOKENIZER = Tokenizer.from_file(str(TOKENIZER_PATH)) CLS_IDX = TOKENIZER.token_to_id('[CLS]') PAD_IDX = TOKENIZER.token_to_id('[PAD]') SEP_IDX = TOKENIZER.token_to_id('[SEP]') def tokenize(text: str): raw_tokens = TOKENIZER.encode(text) return raw_tokens.ids def pad_list(list_integers, context_size: int=90, pad_val: int=PAD_IDX, mode='right'): """ :param list_integers: :param context_size: :param pad_val: :param mode: :return: """ list_integers = list_integers[:context_size] if len(list_integers) < context_size: if mode == 'left': list_integers = [pad_val] * (context_size - len(list_integers)) + list_integers else: list_integers = list_integers + [pad_val] * (context_size - len(list_integers)) return list_integers import random from pathlib import Path import cv2 import numpy as np cv2.setNumThreads(0) cv2.ocl.setUseOpenCL(False) import albumentations as A SIZE = 128 SCALE = 255.0 RESIZE = A.Compose([A.LongestMaxSize(max_size=SIZE, p=1.0), A.PadIfNeeded(min_height=SIZE, min_width=SIZE, p=1.0)]) NORMALIZE = A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=SCALE) def read_image(image_path: Path) -> np.ndarray: bgr_image = cv2.imread(str(image_path)) rgb_image = bgr_image[:, :, ::-1] return rgb_image def resize(image: np.ndarray) -> np.ndarray: reshaped = RESIZE(image=image)['image'] return reshaped def normalize(image: np.ndarray) -> np.ndarray: normalized = NORMALIZE(image=image)['image'] return normalized def preprocess(image: np.ndarray) -> np.ndarray: return normalize(resize(image)) import math import torch import torch.nn.functional as F from pytorch_lightning import LightningModule from torchvision import models from transformers import get_cosine_schedule_with_warmup class PositionalEncoding(torch.nn.Module): def __init__(self, d_model: int, dropout: float=0.1, max_len: int=5000): super().__init__() self.dropout = torch.nn.Dropout(p=dropout) position = torch.arange(max_len).unsqueeze(1) div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model)) pe = torch.zeros(1, max_len, d_model) pe[0:, :, 0::2] = torch.sin(position * div_term) pe[0:, :, 1::2] = torch.cos(position * div_term) self.register_buffer('pe', pe) self.d_model = d_model def forward(self, x: torch.Tensor) -> torch.Tensor: """ Args: x: Tensor, shape [seq_len, batch_size, embedding_dim] """ x = x + self.pe[:, :x.size(1)] / math.sqrt(self.d_model) return self.dropout(x) class Cola(LightningModule): def __init__(self, lr=0.001, use_pretrained=False, dropout=0.2, d_model=128, n_vocab=30000, smoothing=0.1): super().__init__() self.dropout = dropout self.lr = lr self.d_model = d_model self.n_vocab = n_vocab self.smoothing = smoothing self.model = models.resnet18(pretrained=use_pretrained) self.model.fc = torch.nn.Linear(self.model.fc.in_features, self.d_model) self.item_embeddings = torch.nn.Embedding(self.n_vocab, self.d_model) self.pos_encoder = PositionalEncoding(d_model=self.d_model, dropout=self.dropout) encoder_layer = torch.nn.TransformerEncoderLayer(d_model=self.d_model, nhead=4, dropout=self.dropout, batch_first=True) self.encoder = torch.nn.TransformerEncoder(encoder_layer, num_layers=4) self.layer_norm = torch.nn.LayerNorm(normalized_shape=self.d_model) self.linear = torch.nn.Linear(self.d_model, self.d_model, bias=False) self.do = torch.nn.Dropout(p=self.dropout) self.save_hyperparameters() def encode_image(self, x): x = x.permute(0, 3, 1, 2) x = self.do(self.model(x)) x = torch.tanh(self.layer_norm(x)) return x def encode_text(self, x): x = self.item_embeddings(x) x = self.pos_encoder(x) x = self.encoder(x) return x[:, 0, :] def forward(self, x): image, text = x encoded_image = self.encode_image(image) encoded_image_w = self.linear(encoded_image) encoded_text = self.encode_text(text) return (encoded_image_w, encoded_text) df = pd.read_csv(BASE_PATH / 'articles.csv', nrows=None, dtype={'article_id': str}) df['text'] = df.apply(lambda x: ' '.join([str(x['prod_name']), str(x['product_type_name']), str(x['product_group_name']), str(x['graphical_appearance_name']), str(x['colour_group_name']), str(x['perceived_colour_value_name']), str(x['index_name']), str(x['section_name']), str(x['detail_desc'])]), axis=1) df['image_path'] = df.article_id.apply(lambda x: BASE_PATH / 'images' / x[:3] / f'{x}.jpg') df = df.sample(n=5000) model = Cola(lr=0.0001) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model.load_state_dict(torch.load(MODEL_PATH, map_location=device)['state_dict']) model.to(device) model.eval() text_embeddings = [] image_embeddings = [] for image_path, text in tqdm(zip(df.image_path.values, df.text.values), total=len(df)): if image_path.is_file(): image = read_image(image_path) else: image = np.zeros((128, 128, 3)) image = preprocess(image) image_t = torch.from_numpy(image).unsqueeze(0) image_t = image_t.to(device) text_t = tokenize(text) text_t = torch.tensor(pad_list(text_t), dtype=torch.long, device=device).unsqueeze(0) with torch.no_grad(): text_embed = model.encode_text(text_t) image_embed = model.encode_image(image_t) text_embed = text_embed.squeeze().cpu().tolist() image_embed = image_embed.squeeze().cpu().tolist() text_embeddings.append(text_embed) image_embeddings.append(image_embed) text_embeddings = np.array(text_embeddings) image_embeddings = np.array(image_embeddings)
code
90111237/cell_16
[ "image_output_1.png" ]
from pathlib import Path from pathlib import Path from pytorch_lightning import LightningModule from sklearn.manifold import TSNE from tokenizers import Tokenizer from torchvision import models from tqdm import tqdm import albumentations as A import cv2 import math import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torch import numpy as np import pandas as pd from pathlib import Path import os BASE_PATH = Path('/kaggle/input/h-and-m-personalized-fashion-recommendations/') MODEL_PATH = Path('/kaggle/input/image-text-embeddings/ssl_resnet18_1337.ckpt') TOKENIZER_PATH = Path('/kaggle/input/image-text-embeddings/tokenizer.json') from tokenizers import Tokenizer TOKENIZER = Tokenizer.from_file(str(TOKENIZER_PATH)) CLS_IDX = TOKENIZER.token_to_id('[CLS]') PAD_IDX = TOKENIZER.token_to_id('[PAD]') SEP_IDX = TOKENIZER.token_to_id('[SEP]') def tokenize(text: str): raw_tokens = TOKENIZER.encode(text) return raw_tokens.ids def pad_list(list_integers, context_size: int=90, pad_val: int=PAD_IDX, mode='right'): """ :param list_integers: :param context_size: :param pad_val: :param mode: :return: """ list_integers = list_integers[:context_size] if len(list_integers) < context_size: if mode == 'left': list_integers = [pad_val] * (context_size - len(list_integers)) + list_integers else: list_integers = list_integers + [pad_val] * (context_size - len(list_integers)) return list_integers import random from pathlib import Path import cv2 import numpy as np cv2.setNumThreads(0) cv2.ocl.setUseOpenCL(False) import albumentations as A SIZE = 128 SCALE = 255.0 RESIZE = A.Compose([A.LongestMaxSize(max_size=SIZE, p=1.0), A.PadIfNeeded(min_height=SIZE, min_width=SIZE, p=1.0)]) NORMALIZE = A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=SCALE) def read_image(image_path: Path) -> np.ndarray: bgr_image = cv2.imread(str(image_path)) rgb_image = bgr_image[:, :, ::-1] return rgb_image def resize(image: np.ndarray) -> np.ndarray: reshaped = RESIZE(image=image)['image'] return reshaped def normalize(image: np.ndarray) -> np.ndarray: normalized = NORMALIZE(image=image)['image'] return normalized def preprocess(image: np.ndarray) -> np.ndarray: return normalize(resize(image)) import math import torch import torch.nn.functional as F from pytorch_lightning import LightningModule from torchvision import models from transformers import get_cosine_schedule_with_warmup class PositionalEncoding(torch.nn.Module): def __init__(self, d_model: int, dropout: float=0.1, max_len: int=5000): super().__init__() self.dropout = torch.nn.Dropout(p=dropout) position = torch.arange(max_len).unsqueeze(1) div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model)) pe = torch.zeros(1, max_len, d_model) pe[0:, :, 0::2] = torch.sin(position * div_term) pe[0:, :, 1::2] = torch.cos(position * div_term) self.register_buffer('pe', pe) self.d_model = d_model def forward(self, x: torch.Tensor) -> torch.Tensor: """ Args: x: Tensor, shape [seq_len, batch_size, embedding_dim] """ x = x + self.pe[:, :x.size(1)] / math.sqrt(self.d_model) return self.dropout(x) class Cola(LightningModule): def __init__(self, lr=0.001, use_pretrained=False, dropout=0.2, d_model=128, n_vocab=30000, smoothing=0.1): super().__init__() self.dropout = dropout self.lr = lr self.d_model = d_model self.n_vocab = n_vocab self.smoothing = smoothing self.model = models.resnet18(pretrained=use_pretrained) self.model.fc = torch.nn.Linear(self.model.fc.in_features, self.d_model) self.item_embeddings = torch.nn.Embedding(self.n_vocab, self.d_model) self.pos_encoder = PositionalEncoding(d_model=self.d_model, dropout=self.dropout) encoder_layer = torch.nn.TransformerEncoderLayer(d_model=self.d_model, nhead=4, dropout=self.dropout, batch_first=True) self.encoder = torch.nn.TransformerEncoder(encoder_layer, num_layers=4) self.layer_norm = torch.nn.LayerNorm(normalized_shape=self.d_model) self.linear = torch.nn.Linear(self.d_model, self.d_model, bias=False) self.do = torch.nn.Dropout(p=self.dropout) self.save_hyperparameters() def encode_image(self, x): x = x.permute(0, 3, 1, 2) x = self.do(self.model(x)) x = torch.tanh(self.layer_norm(x)) return x def encode_text(self, x): x = self.item_embeddings(x) x = self.pos_encoder(x) x = self.encoder(x) return x[:, 0, :] def forward(self, x): image, text = x encoded_image = self.encode_image(image) encoded_image_w = self.linear(encoded_image) encoded_text = self.encode_text(text) return (encoded_image_w, encoded_text) df = pd.read_csv(BASE_PATH / 'articles.csv', nrows=None, dtype={'article_id': str}) df['text'] = df.apply(lambda x: ' '.join([str(x['prod_name']), str(x['product_type_name']), str(x['product_group_name']), str(x['graphical_appearance_name']), str(x['colour_group_name']), str(x['perceived_colour_value_name']), str(x['index_name']), str(x['section_name']), str(x['detail_desc'])]), axis=1) df['image_path'] = df.article_id.apply(lambda x: BASE_PATH / 'images' / x[:3] / f'{x}.jpg') df = df.sample(n=5000) model = Cola(lr=0.0001) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model.load_state_dict(torch.load(MODEL_PATH, map_location=device)['state_dict']) model.to(device) model.eval() text_embeddings = [] image_embeddings = [] for image_path, text in tqdm(zip(df.image_path.values, df.text.values), total=len(df)): if image_path.is_file(): image = read_image(image_path) else: image = np.zeros((128, 128, 3)) image = preprocess(image) image_t = torch.from_numpy(image).unsqueeze(0) image_t = image_t.to(device) text_t = tokenize(text) text_t = torch.tensor(pad_list(text_t), dtype=torch.long, device=device).unsqueeze(0) with torch.no_grad(): text_embed = model.encode_text(text_t) image_embed = model.encode_image(image_t) text_embed = text_embed.squeeze().cpu().tolist() image_embed = image_embed.squeeze().cpu().tolist() text_embeddings.append(text_embed) image_embeddings.append(image_embed) text_embeddings = np.array(text_embeddings) image_embeddings = np.array(image_embeddings) tsne = TSNE( n_components=2, init="random", random_state=0, learning_rate="auto", n_iter=300, ) Y = tsne.fit_transform(image_embeddings) fig = plt.figure(figsize=(12, 12)) for index_name in df.index_name.unique(): plt.scatter(Y[df.index_name == index_name, 0], Y[df.index_name == index_name, 1], label=index_name, s=3) plt.title("Cola Image embeddings by index_name") plt.legend() plt.show() tsne = TSNE( n_components=2, init="random", random_state=0, learning_rate="auto", n_iter=300, ) Y = tsne.fit_transform(text_embeddings) fig = plt.figure(figsize=(12, 12)) for index_name in df.index_name.unique(): plt.scatter(Y[df.index_name == index_name, 0], Y[df.index_name == index_name, 1], label=index_name, s=3) plt.title("Cola Text embeddings by index_name") plt.legend() plt.show() index = 10 most_similar = np.argsort(-image_embeddings @ image_embeddings[index, :].T)[:9].tolist() _, axs = plt.subplots(3, 3, figsize=(12, 12)) axs = axs.flatten() for i, ax in zip(most_similar, axs): ax.imshow(read_image(df.image_path.values[i])) ax.axis('off') if i == index: ax.title.set_text('Query image') else: ax.title.set_text('Result Image') plt.axis('off') plt.show()
code
90111237/cell_14
[ "image_output_1.png" ]
from pathlib import Path from pathlib import Path from pytorch_lightning import LightningModule from sklearn.manifold import TSNE from tokenizers import Tokenizer from torchvision import models from tqdm import tqdm import albumentations as A import cv2 import math import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torch import numpy as np import pandas as pd from pathlib import Path import os BASE_PATH = Path('/kaggle/input/h-and-m-personalized-fashion-recommendations/') MODEL_PATH = Path('/kaggle/input/image-text-embeddings/ssl_resnet18_1337.ckpt') TOKENIZER_PATH = Path('/kaggle/input/image-text-embeddings/tokenizer.json') from tokenizers import Tokenizer TOKENIZER = Tokenizer.from_file(str(TOKENIZER_PATH)) CLS_IDX = TOKENIZER.token_to_id('[CLS]') PAD_IDX = TOKENIZER.token_to_id('[PAD]') SEP_IDX = TOKENIZER.token_to_id('[SEP]') def tokenize(text: str): raw_tokens = TOKENIZER.encode(text) return raw_tokens.ids def pad_list(list_integers, context_size: int=90, pad_val: int=PAD_IDX, mode='right'): """ :param list_integers: :param context_size: :param pad_val: :param mode: :return: """ list_integers = list_integers[:context_size] if len(list_integers) < context_size: if mode == 'left': list_integers = [pad_val] * (context_size - len(list_integers)) + list_integers else: list_integers = list_integers + [pad_val] * (context_size - len(list_integers)) return list_integers import random from pathlib import Path import cv2 import numpy as np cv2.setNumThreads(0) cv2.ocl.setUseOpenCL(False) import albumentations as A SIZE = 128 SCALE = 255.0 RESIZE = A.Compose([A.LongestMaxSize(max_size=SIZE, p=1.0), A.PadIfNeeded(min_height=SIZE, min_width=SIZE, p=1.0)]) NORMALIZE = A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=SCALE) def read_image(image_path: Path) -> np.ndarray: bgr_image = cv2.imread(str(image_path)) rgb_image = bgr_image[:, :, ::-1] return rgb_image def resize(image: np.ndarray) -> np.ndarray: reshaped = RESIZE(image=image)['image'] return reshaped def normalize(image: np.ndarray) -> np.ndarray: normalized = NORMALIZE(image=image)['image'] return normalized def preprocess(image: np.ndarray) -> np.ndarray: return normalize(resize(image)) import math import torch import torch.nn.functional as F from pytorch_lightning import LightningModule from torchvision import models from transformers import get_cosine_schedule_with_warmup class PositionalEncoding(torch.nn.Module): def __init__(self, d_model: int, dropout: float=0.1, max_len: int=5000): super().__init__() self.dropout = torch.nn.Dropout(p=dropout) position = torch.arange(max_len).unsqueeze(1) div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model)) pe = torch.zeros(1, max_len, d_model) pe[0:, :, 0::2] = torch.sin(position * div_term) pe[0:, :, 1::2] = torch.cos(position * div_term) self.register_buffer('pe', pe) self.d_model = d_model def forward(self, x: torch.Tensor) -> torch.Tensor: """ Args: x: Tensor, shape [seq_len, batch_size, embedding_dim] """ x = x + self.pe[:, :x.size(1)] / math.sqrt(self.d_model) return self.dropout(x) class Cola(LightningModule): def __init__(self, lr=0.001, use_pretrained=False, dropout=0.2, d_model=128, n_vocab=30000, smoothing=0.1): super().__init__() self.dropout = dropout self.lr = lr self.d_model = d_model self.n_vocab = n_vocab self.smoothing = smoothing self.model = models.resnet18(pretrained=use_pretrained) self.model.fc = torch.nn.Linear(self.model.fc.in_features, self.d_model) self.item_embeddings = torch.nn.Embedding(self.n_vocab, self.d_model) self.pos_encoder = PositionalEncoding(d_model=self.d_model, dropout=self.dropout) encoder_layer = torch.nn.TransformerEncoderLayer(d_model=self.d_model, nhead=4, dropout=self.dropout, batch_first=True) self.encoder = torch.nn.TransformerEncoder(encoder_layer, num_layers=4) self.layer_norm = torch.nn.LayerNorm(normalized_shape=self.d_model) self.linear = torch.nn.Linear(self.d_model, self.d_model, bias=False) self.do = torch.nn.Dropout(p=self.dropout) self.save_hyperparameters() def encode_image(self, x): x = x.permute(0, 3, 1, 2) x = self.do(self.model(x)) x = torch.tanh(self.layer_norm(x)) return x def encode_text(self, x): x = self.item_embeddings(x) x = self.pos_encoder(x) x = self.encoder(x) return x[:, 0, :] def forward(self, x): image, text = x encoded_image = self.encode_image(image) encoded_image_w = self.linear(encoded_image) encoded_text = self.encode_text(text) return (encoded_image_w, encoded_text) df = pd.read_csv(BASE_PATH / 'articles.csv', nrows=None, dtype={'article_id': str}) df['text'] = df.apply(lambda x: ' '.join([str(x['prod_name']), str(x['product_type_name']), str(x['product_group_name']), str(x['graphical_appearance_name']), str(x['colour_group_name']), str(x['perceived_colour_value_name']), str(x['index_name']), str(x['section_name']), str(x['detail_desc'])]), axis=1) df['image_path'] = df.article_id.apply(lambda x: BASE_PATH / 'images' / x[:3] / f'{x}.jpg') df = df.sample(n=5000) model = Cola(lr=0.0001) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model.load_state_dict(torch.load(MODEL_PATH, map_location=device)['state_dict']) model.to(device) model.eval() text_embeddings = [] image_embeddings = [] for image_path, text in tqdm(zip(df.image_path.values, df.text.values), total=len(df)): if image_path.is_file(): image = read_image(image_path) else: image = np.zeros((128, 128, 3)) image = preprocess(image) image_t = torch.from_numpy(image).unsqueeze(0) image_t = image_t.to(device) text_t = tokenize(text) text_t = torch.tensor(pad_list(text_t), dtype=torch.long, device=device).unsqueeze(0) with torch.no_grad(): text_embed = model.encode_text(text_t) image_embed = model.encode_image(image_t) text_embed = text_embed.squeeze().cpu().tolist() image_embed = image_embed.squeeze().cpu().tolist() text_embeddings.append(text_embed) image_embeddings.append(image_embed) text_embeddings = np.array(text_embeddings) image_embeddings = np.array(image_embeddings) tsne = TSNE( n_components=2, init="random", random_state=0, learning_rate="auto", n_iter=300, ) Y = tsne.fit_transform(image_embeddings) fig = plt.figure(figsize=(12, 12)) for index_name in df.index_name.unique(): plt.scatter(Y[df.index_name == index_name, 0], Y[df.index_name == index_name, 1], label=index_name, s=3) plt.title("Cola Image embeddings by index_name") plt.legend() plt.show() tsne = TSNE(n_components=2, init='random', random_state=0, learning_rate='auto', n_iter=300) Y = tsne.fit_transform(text_embeddings) fig = plt.figure(figsize=(12, 12)) for index_name in df.index_name.unique(): plt.scatter(Y[df.index_name == index_name, 0], Y[df.index_name == index_name, 1], label=index_name, s=3) plt.title('Cola Text embeddings by index_name') plt.legend() plt.show()
code
88083134/cell_4
[ "image_output_1.png" ]
import pandas as pd drug = pd.read_csv('../input/drug-classification/drug200.csv') drug.columns
code
88083134/cell_6
[ "image_output_1.png" ]
import pandas as pd drug = pd.read_csv('../input/drug-classification/drug200.csv') drug.columns (drug.size, drug.shape) drug.info()
code
88083134/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd drug = pd.read_csv('../input/drug-classification/drug200.csv') drug.columns (drug.size, drug.shape) drug.isnull().sum().sum()
code
88083134/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import OneHotEncoder, LabelBinarizer, StandardScaler from sklearn.model_selection import train_test_split from imblearn.over_sampling import SMOTE plt.style.use('fivethirtyeight') import warnings from sklearn.tree import DecisionTreeClassifier, plot_tree, export_text, export_graphviz import graphviz from sklearn.metrics import confusion_matrix, plot_confusion_matrix, classification_report, RocCurveDisplay from sklearn.model_selection import train_test_split, GridSearchCV warnings.filterwarnings('ignore') drug = pd.read_csv('../input/drug-classification/drug200.csv') drug.columns (drug.size, drug.shape) drug.isnull().sum().sum() sns.kdeplot(drug['Age'], hue=drug['Drug'])
code
88083134/cell_8
[ "image_output_1.png" ]
import pandas as pd drug = pd.read_csv('../input/drug-classification/drug200.csv') drug.columns (drug.size, drug.shape) drug.isnull().sum().sum() drug.describe()
code
88083134/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import OneHotEncoder, LabelBinarizer, StandardScaler from sklearn.model_selection import train_test_split from imblearn.over_sampling import SMOTE plt.style.use('fivethirtyeight') import warnings from sklearn.tree import DecisionTreeClassifier, plot_tree, export_text, export_graphviz import graphviz from sklearn.metrics import confusion_matrix, plot_confusion_matrix, classification_report, RocCurveDisplay from sklearn.model_selection import train_test_split, GridSearchCV warnings.filterwarnings('ignore') drug = pd.read_csv('../input/drug-classification/drug200.csv') drug.columns (drug.size, drug.shape) drug.isnull().sum().sum() plt.figure(figsize=(12, 5)) plt.subplot(121) sns.distplot(drug['Na_to_K'], kde=False, color='violet') plt.subplot(122) sns.kdeplot(drug['Na_to_K'], shade=True, color='teal') plt.axvline(drug['Na_to_K'].mean(), color='red', label='mean->' + str(drug['Na_to_K'].mean())) plt.axvline(drug['Na_to_K'].median(), color='black', label='median->' + str(drug['Na_to_K'].median())) plt.legend(bbox_to_anchor=(1.3, 1)) plt.suptitle('Distribution of the Sodium to Potassium Ratio Variable', fontsize=20, color='Orangered', fontstyle='italic') plt.show()
code
88083134/cell_3
[ "image_output_1.png" ]
import pandas as pd drug = pd.read_csv('../input/drug-classification/drug200.csv') drug.head()
code
88083134/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import OneHotEncoder, LabelBinarizer, StandardScaler from sklearn.model_selection import train_test_split from imblearn.over_sampling import SMOTE plt.style.use('fivethirtyeight') import warnings from sklearn.tree import DecisionTreeClassifier, plot_tree, export_text, export_graphviz import graphviz from sklearn.metrics import confusion_matrix, plot_confusion_matrix, classification_report, RocCurveDisplay from sklearn.model_selection import train_test_split, GridSearchCV warnings.filterwarnings('ignore') drug = pd.read_csv('../input/drug-classification/drug200.csv') drug.columns (drug.size, drug.shape) drug.isnull().sum().sum() plt.figure(figsize=(12, 5)) plt.subplot(121) sns.distplot(drug['Age'], kde=False, color='green') plt.subplot(122) sns.kdeplot(drug['Age'], shade=True, color='orangered') plt.axvline(drug['Age'].mean(), color='red', label='mean->' + str(drug['Age'].mean())) plt.axvline(drug['Age'].median(), color='black', label='median->' + str(drug['Age'].median())) plt.legend(bbox_to_anchor=(1.2, 1)) plt.suptitle('Distribution of the Age Variable', fontsize=20, color='DarkSlateBlue', fontstyle='italic') plt.show()
code
88083134/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import OneHotEncoder, LabelBinarizer, StandardScaler from sklearn.model_selection import train_test_split from imblearn.over_sampling import SMOTE plt.style.use('fivethirtyeight') import warnings from sklearn.tree import DecisionTreeClassifier, plot_tree, export_text, export_graphviz import graphviz from sklearn.metrics import confusion_matrix, plot_confusion_matrix, classification_report, RocCurveDisplay from sklearn.model_selection import train_test_split, GridSearchCV warnings.filterwarnings('ignore') drug = pd.read_csv('../input/drug-classification/drug200.csv') drug.columns (drug.size, drug.shape) drug.isnull().sum().sum() sns.countplot(drug['Drug'], orient='v') plt.show()
code
88083134/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import OneHotEncoder, LabelBinarizer, StandardScaler from sklearn.model_selection import train_test_split from imblearn.over_sampling import SMOTE plt.style.use('fivethirtyeight') import warnings from sklearn.tree import DecisionTreeClassifier, plot_tree, export_text, export_graphviz import graphviz from sklearn.metrics import confusion_matrix, plot_confusion_matrix, classification_report, RocCurveDisplay from sklearn.model_selection import train_test_split, GridSearchCV warnings.filterwarnings('ignore') drug = pd.read_csv('../input/drug-classification/drug200.csv') drug.columns (drug.size, drug.shape) drug.isnull().sum().sum() plt.figure(figsize=(14, 5)) plt.subplot(131) plt.pie(drug['Sex'].value_counts(), labels=drug['Sex'].value_counts().index, autopct='%0.2f%%', wedgeprops={'width': 0.2}, shadow=True) plt.title('Gender Composition') plt.subplot(132) plt.pie(drug['BP'].value_counts(), labels=drug['BP'].value_counts().index, autopct='%0.2f%%', wedgeprops={'width': 0.3}, shadow=True, colors=['red', 'green', 'orange'], pctdistance=0.4) plt.title('Blood Pressure Composition') plt.subplot(133) plt.pie(drug['Cholesterol'].value_counts(), labels=drug['Cholesterol'].value_counts().index, autopct='%0.2f%%', wedgeprops={'width': 0.2}, shadow=True) plt.title('Cholesterol Composition') plt.show()
code
88083134/cell_5
[ "image_output_1.png" ]
import pandas as pd drug = pd.read_csv('../input/drug-classification/drug200.csv') drug.columns (drug.size, drug.shape)
code
122246772/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_test = pd.read_csv('/kaggle/input/titanic/test.csv') (df_train.shape, df_test.shape) df = pd.concat([df_train, df_test], axis=0) df.isnull().sum() df.drop(['Name', 'Cabin'], axis=1, inplace=True) df.groupby('Sex').size() df.drop(['Ticket'], axis=1, inplace=True) df.isnull().sum() df.isna().sum() df_train = df[df['Survived'].isna() == False] y = df_train.loc[:, 'Survived'] y X = df_train.drop(['Survived'], axis=1) X
code
122246772/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_test = pd.read_csv('/kaggle/input/titanic/test.csv') (df_train.shape, df_test.shape) df = pd.concat([df_train, df_test], axis=0) df.isnull().sum() df.drop(['Name', 'Cabin'], axis=1, inplace=True) df.info()
code
122246772/cell_4
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_test = pd.read_csv('/kaggle/input/titanic/test.csv') df_test.head()
code
122246772/cell_34
[ "text_plain_output_1.png" ]
from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import f1_score ,accuracy_score from sklearn.model_selection import GridSearchCV from sklearn.model_selection import GridSearchCV from xgboost import XGBClassifier xgb = XGBClassifier() model1 = xgb.fit(X_train, y_train) y_hat = model1.predict(X_test) f1 = f1_score(y_test, y_hat) acc = accuracy_score(y_test, y_hat) (f1, acc) from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier() model2 = rfc.fit(X_train, y_train) y_hat2 = model2.predict(X_test) f1 = f1_score(y_test, y_hat2) acc = accuracy_score(y_test, y_hat2) (f1, acc) parameters = {'booster': ['gbtree', 'gblinear', 'dart'], 'learning_rate': [0.1, 0.3, 0.6, 0.9, 1], 'n_estimators': [0.1, 1, 10, 15]} grid = GridSearchCV(xgb, param_grid=parameters, cv=5, verbose=0) grid.fit(X_train, y_train) xgb = XGBClassifier(booster='gbtree', learning_rate=0.3, n_estimators=10) model1 = xgb.fit(X_train, y_train) y_hat = model1.predict(X_test) f1 = f1_score(y_test, y_hat) acc = accuracy_score(y_test, y_hat) (f1, acc) from sklearn.model_selection import GridSearchCV rfc = RandomForestClassifier() parameters = {'criterion': ['gini', 'entropy'], 'max_features': ['auto', 'sqrt', 'log2'], 'n_estimators': [15, 50, 100, 200]} grid = GridSearchCV(rfc, param_grid=parameters, cv=5, verbose=1) grid.fit(X_train, y_train) from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier(criterion='entropy', max_features='auto', n_estimators=50, random_state=123) model3 = rfc.fit(X_train, y_train) y_hat3 = model3.predict(X_test) f1 = f1_score(y_test, y_hat3) acc = accuracy_score(y_test, y_hat3) (f1, acc) from sklearn.ensemble import GradientBoostingClassifier gb = GradientBoostingClassifier() parameters = {'loss': ['deviance', 'exponential'], 'criterion': ['friedman_mse', 'squared_error'], 'learning_rate': [0.1, 0.3, 10], 'n_estimators': [10, 100, 200]} grid = GridSearchCV(gb, param_grid=parameters, cv=5, verbose=1) grid.fit(X_train, y_train) gb = GradientBoostingClassifier(criterion='friedman_mse', learning_rate=0.1, loss='exponential', n_estimators=100, random_state=123) model = gb.fit(X_train, y_train) y_hat = model.predict(X_test) f1 = f1_score(y_test, y_hat) acc = accuracy_score(y_test, y_hat) (f1, acc)
code
122246772/cell_23
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import Normalizer import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_test = pd.read_csv('/kaggle/input/titanic/test.csv') (df_train.shape, df_test.shape) df = pd.concat([df_train, df_test], axis=0) df.isnull().sum() df.drop(['Name', 'Cabin'], axis=1, inplace=True) df.groupby('Sex').size() df.drop(['Ticket'], axis=1, inplace=True) df.isnull().sum() df.isna().sum() df_train = df[df['Survived'].isna() == False] y = df_train.loc[:, 'Survived'] y X = df_train.drop(['Survived'], axis=1) X norm = Normalizer() X = norm.fit_transform(X) X
code
122246772/cell_30
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import f1_score ,accuracy_score from xgboost import XGBClassifier xgb = XGBClassifier() model1 = xgb.fit(X_train, y_train) y_hat = model1.predict(X_test) f1 = f1_score(y_test, y_hat) acc = accuracy_score(y_test, y_hat) (f1, acc) from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier() model2 = rfc.fit(X_train, y_train) y_hat2 = model2.predict(X_test) f1 = f1_score(y_test, y_hat2) acc = accuracy_score(y_test, y_hat2) (f1, acc) xgb = XGBClassifier(booster='gbtree', learning_rate=0.3, n_estimators=10) model1 = xgb.fit(X_train, y_train) y_hat = model1.predict(X_test) f1 = f1_score(y_test, y_hat) acc = accuracy_score(y_test, y_hat) (f1, acc)
code
122246772/cell_33
[ "text_plain_output_1.png" ]
from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import f1_score ,accuracy_score from sklearn.model_selection import GridSearchCV from sklearn.model_selection import GridSearchCV from xgboost import XGBClassifier xgb = XGBClassifier() model1 = xgb.fit(X_train, y_train) y_hat = model1.predict(X_test) f1 = f1_score(y_test, y_hat) acc = accuracy_score(y_test, y_hat) (f1, acc) from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier() model2 = rfc.fit(X_train, y_train) y_hat2 = model2.predict(X_test) f1 = f1_score(y_test, y_hat2) acc = accuracy_score(y_test, y_hat2) (f1, acc) parameters = {'booster': ['gbtree', 'gblinear', 'dart'], 'learning_rate': [0.1, 0.3, 0.6, 0.9, 1], 'n_estimators': [0.1, 1, 10, 15]} grid = GridSearchCV(xgb, param_grid=parameters, cv=5, verbose=0) grid.fit(X_train, y_train) from sklearn.model_selection import GridSearchCV rfc = RandomForestClassifier() parameters = {'criterion': ['gini', 'entropy'], 'max_features': ['auto', 'sqrt', 'log2'], 'n_estimators': [15, 50, 100, 200]} grid = GridSearchCV(rfc, param_grid=parameters, cv=5, verbose=1) grid.fit(X_train, y_train) from sklearn.ensemble import GradientBoostingClassifier gb = GradientBoostingClassifier() parameters = {'loss': ['deviance', 'exponential'], 'criterion': ['friedman_mse', 'squared_error'], 'learning_rate': [0.1, 0.3, 10], 'n_estimators': [10, 100, 200]} grid = GridSearchCV(gb, param_grid=parameters, cv=5, verbose=1) grid.fit(X_train, y_train) print(grid.best_params_)
code
122246772/cell_20
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_test = pd.read_csv('/kaggle/input/titanic/test.csv') (df_train.shape, df_test.shape) df = pd.concat([df_train, df_test], axis=0) df.isnull().sum() df.drop(['Name', 'Cabin'], axis=1, inplace=True) df.groupby('Sex').size() df.drop(['Ticket'], axis=1, inplace=True) df.isnull().sum() df.isna().sum() df_train = df[df['Survived'].isna() == False] y = df_train.loc[:, 'Survived'] y
code
122246772/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_test = pd.read_csv('/kaggle/input/titanic/test.csv') (df_train.shape, df_test.shape) df = pd.concat([df_train, df_test], axis=0) df.tail()
code
122246772/cell_29
[ "text_plain_output_1.png" ]
from sklearn.metrics import f1_score ,accuracy_score from sklearn.model_selection import GridSearchCV from xgboost import XGBClassifier xgb = XGBClassifier() model1 = xgb.fit(X_train, y_train) y_hat = model1.predict(X_test) f1 = f1_score(y_test, y_hat) acc = accuracy_score(y_test, y_hat) (f1, acc) parameters = {'booster': ['gbtree', 'gblinear', 'dart'], 'learning_rate': [0.1, 0.3, 0.6, 0.9, 1], 'n_estimators': [0.1, 1, 10, 15]} grid = GridSearchCV(xgb, param_grid=parameters, cv=5, verbose=0) grid.fit(X_train, y_train) print(grid.best_params_)
code
122246772/cell_26
[ "text_plain_output_1.png" ]
from sklearn.metrics import f1_score ,accuracy_score from xgboost import XGBClassifier xgb = XGBClassifier() model1 = xgb.fit(X_train, y_train) y_hat = model1.predict(X_test) f1 = f1_score(y_test, y_hat) acc = accuracy_score(y_test, y_hat) (f1, acc)
code
122246772/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
122246772/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_test = pd.read_csv('/kaggle/input/titanic/test.csv') (df_train.shape, df_test.shape) df = pd.concat([df_train, df_test], axis=0) df.isnull().sum()
code
122246772/cell_32
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import f1_score ,accuracy_score from sklearn.model_selection import GridSearchCV from sklearn.model_selection import GridSearchCV from xgboost import XGBClassifier xgb = XGBClassifier() model1 = xgb.fit(X_train, y_train) y_hat = model1.predict(X_test) f1 = f1_score(y_test, y_hat) acc = accuracy_score(y_test, y_hat) (f1, acc) from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier() model2 = rfc.fit(X_train, y_train) y_hat2 = model2.predict(X_test) f1 = f1_score(y_test, y_hat2) acc = accuracy_score(y_test, y_hat2) (f1, acc) parameters = {'booster': ['gbtree', 'gblinear', 'dart'], 'learning_rate': [0.1, 0.3, 0.6, 0.9, 1], 'n_estimators': [0.1, 1, 10, 15]} grid = GridSearchCV(xgb, param_grid=parameters, cv=5, verbose=0) grid.fit(X_train, y_train) xgb = XGBClassifier(booster='gbtree', learning_rate=0.3, n_estimators=10) model1 = xgb.fit(X_train, y_train) y_hat = model1.predict(X_test) f1 = f1_score(y_test, y_hat) acc = accuracy_score(y_test, y_hat) (f1, acc) from sklearn.model_selection import GridSearchCV rfc = RandomForestClassifier() parameters = {'criterion': ['gini', 'entropy'], 'max_features': ['auto', 'sqrt', 'log2'], 'n_estimators': [15, 50, 100, 200]} grid = GridSearchCV(rfc, param_grid=parameters, cv=5, verbose=1) grid.fit(X_train, y_train) from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier(criterion='entropy', max_features='auto', n_estimators=50, random_state=123) model3 = rfc.fit(X_train, y_train) y_hat3 = model3.predict(X_test) f1 = f1_score(y_test, y_hat3) acc = accuracy_score(y_test, y_hat3) (f1, acc)
code
122246772/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_test = pd.read_csv('/kaggle/input/titanic/test.csv') (df_train.shape, df_test.shape) df = pd.concat([df_train, df_test], axis=0) df.isnull().sum() df.info()
code
122246772/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_train.head()
code
122246772/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_test = pd.read_csv('/kaggle/input/titanic/test.csv') (df_train.shape, df_test.shape) df = pd.concat([df_train, df_test], axis=0) df.isnull().sum() df.drop(['Name', 'Cabin'], axis=1, inplace=True) df.groupby('Sex').size() df.drop(['Ticket'], axis=1, inplace=True) df.isnull().sum() df.isna().sum()
code
122246772/cell_31
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import f1_score ,accuracy_score from sklearn.model_selection import GridSearchCV from sklearn.model_selection import GridSearchCV from xgboost import XGBClassifier xgb = XGBClassifier() model1 = xgb.fit(X_train, y_train) y_hat = model1.predict(X_test) f1 = f1_score(y_test, y_hat) acc = accuracy_score(y_test, y_hat) (f1, acc) from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier() model2 = rfc.fit(X_train, y_train) y_hat2 = model2.predict(X_test) f1 = f1_score(y_test, y_hat2) acc = accuracy_score(y_test, y_hat2) (f1, acc) parameters = {'booster': ['gbtree', 'gblinear', 'dart'], 'learning_rate': [0.1, 0.3, 0.6, 0.9, 1], 'n_estimators': [0.1, 1, 10, 15]} grid = GridSearchCV(xgb, param_grid=parameters, cv=5, verbose=0) grid.fit(X_train, y_train) from sklearn.model_selection import GridSearchCV rfc = RandomForestClassifier() parameters = {'criterion': ['gini', 'entropy'], 'max_features': ['auto', 'sqrt', 'log2'], 'n_estimators': [15, 50, 100, 200]} grid = GridSearchCV(rfc, param_grid=parameters, cv=5, verbose=1) grid.fit(X_train, y_train) print(grid.best_params_)
code
122246772/cell_14
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_test = pd.read_csv('/kaggle/input/titanic/test.csv') (df_train.shape, df_test.shape) df = pd.concat([df_train, df_test], axis=0) df.isnull().sum() df.drop(['Name', 'Cabin'], axis=1, inplace=True) df.groupby('Sex').size() df.drop(['Ticket'], axis=1, inplace=True) df.isnull().sum()
code
122246772/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_test = pd.read_csv('/kaggle/input/titanic/test.csv') (df_train.shape, df_test.shape) df = pd.concat([df_train, df_test], axis=0) df.isnull().sum() df.drop(['Name', 'Cabin'], axis=1, inplace=True) df.groupby('Sex').size()
code
122246772/cell_27
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import f1_score ,accuracy_score from xgboost import XGBClassifier xgb = XGBClassifier() model1 = xgb.fit(X_train, y_train) y_hat = model1.predict(X_test) f1 = f1_score(y_test, y_hat) acc = accuracy_score(y_test, y_hat) (f1, acc) from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier() model2 = rfc.fit(X_train, y_train) y_hat2 = model2.predict(X_test) f1 = f1_score(y_test, y_hat2) acc = accuracy_score(y_test, y_hat2) (f1, acc)
code
122246772/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_test = pd.read_csv('/kaggle/input/titanic/test.csv') (df_train.shape, df_test.shape)
code
128030195/cell_13
[ "text_plain_output_1.png" ]
from keras.models import Model, load_model from tensorflow.keras.applications import ResNet50 from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.layers import Dense, Flatten, GlobalAveragePooling2D, Dropout from tensorflow.keras.layers import Dense, Input, Conv2D, MaxPool2D, Flatten ,Activation,Dropout,BatchNormalization from tensorflow.keras.models import Model from tensorflow.keras.models import Sequential from tensorflow.keras.models import Sequential import tensorflow as tf data_dir = '/kaggle/input/rgb-arabic-alphabets-sign-language-dataset-jpg/images_after' images = tf.keras.utils.image_dataset_from_directory(data_dir, labels='inferred', label_mode='int', class_names=None, color_mode='rgb', batch_size=32, image_size=(256, 256), shuffle=True, seed=123, validation_split=0.2, subset='training', interpolation='bilinear') data_dir = '/kaggle/input/rgb-arabic-alphabets-sign-language-dataset-jpg/images_after' images_validation = tf.keras.utils.image_dataset_from_directory(data_dir, labels='inferred', label_mode='int', class_names=None, color_mode='rgb', batch_size=32, image_size=(256, 256), shuffle=True, seed=123, validation_split=0.2, subset='validation', interpolation='bilinear') early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=2, restore_best_weights=True) from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.models import Model import tensorflow.keras as keras resnet = ResNet50(include_top=False, weights='imagenet', input_shape=(256, 256, 3), pooling='max') output = resnet.layers[-1].output output = tf.keras.layers.Flatten()(output) resnet = Model(resnet.input, output) res_name = [] for layer in resnet.layers: res_name.append(layer.name) set_trainable = False for layer in resnet.layers: if layer.name in res_name[-22:]: set_trainable = True if set_trainable: layer.trainable = True else: layer.trainable = False from tensorflow.keras.applications import ResNet50 from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Flatten, GlobalAveragePooling2D, Dropout model6 = Sequential() model6.add(resnet) model6.add(BatchNormalization()) model6.add(Dense(2048, activation='relu')) model6.add(Dropout(0.2)) model6.add(Dense(1024, activation='relu')) model6.add(Dropout(0.2)) model6.add(Dense(512, activation='relu')) model6.add(Dropout(0.2)) model6.add(Dense(256, activation='relu')) model6.add(Dropout(0.2)) model6.add(Dense(128, activation='relu')) model6.add(Dropout(0.2)) model6.add(Dense(64, activation='relu')) model6.add(Dropout(0.2)) model6.add(Dense(31, activation='softmax')) model6.summary()
code
128030195/cell_4
[ "text_plain_output_5.png", "text_plain_output_9.png", "text_plain_output_4.png", "text_plain_output_13.png", "text_plain_output_14.png", "text_plain_output_10.png", "text_plain_output_6.png", "text_plain_output_3.png", "text_plain_output_7.png", "text_plain_output_8.png", "text_plain_output_2.png", "text_plain_output_1.png", "text_plain_output_11.png", "text_plain_output_12.png" ]
import tensorflow as tf data_dir = '/kaggle/input/rgb-arabic-alphabets-sign-language-dataset-jpg/images_after' images = tf.keras.utils.image_dataset_from_directory(data_dir, labels='inferred', label_mode='int', class_names=None, color_mode='rgb', batch_size=32, image_size=(256, 256), shuffle=True, seed=123, validation_split=0.2, subset='training', interpolation='bilinear') data_dir = '/kaggle/input/rgb-arabic-alphabets-sign-language-dataset-jpg/images_after' images_validation = tf.keras.utils.image_dataset_from_directory(data_dir, labels='inferred', label_mode='int', class_names=None, color_mode='rgb', batch_size=32, image_size=(256, 256), shuffle=True, seed=123, validation_split=0.2, subset='validation', interpolation='bilinear')
code
128030195/cell_2
[ "text_html_output_2.png" ]
import matplotlib.pyplot as plt import os import cv2 import pickle import numpy as np import matplotlib.pyplot as plt import seaborn as sns import tensorflow as tf from tqdm import tqdm from sklearn.preprocessing import OneHotEncoder from sklearn.metrics import confusion_matrix from keras.models import Model, load_model from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.models import Sequential from tensorflow.keras import layers from tensorflow.keras import activations from tensorflow.keras.layers import Dense, Input, Conv2D, MaxPool2D, Flatten, Activation, Dropout, BatchNormalization import glob import skimage plt.rc('font', size=14) plt.rc('axes', labelsize=14, titlesize=14) plt.rc('legend', fontsize=14) plt.rc('xtick', labelsize=10) plt.rc('ytick', labelsize=10)
code
128030195/cell_1
[ "text_plain_output_5.png", "text_plain_output_9.png", "text_plain_output_4.png", "text_plain_output_6.png", "text_plain_output_3.png", "text_plain_output_7.png", "text_plain_output_8.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
128030195/cell_7
[ "image_output_1.png" ]
import plotly.express as px import tensorflow as tf data_dir = '/kaggle/input/rgb-arabic-alphabets-sign-language-dataset-jpg/images_after' images = tf.keras.utils.image_dataset_from_directory(data_dir, labels='inferred', label_mode='int', class_names=None, color_mode='rgb', batch_size=32, image_size=(256, 256), shuffle=True, seed=123, validation_split=0.2, subset='training', interpolation='bilinear') class_names = images.class_names class_names px.pie(names=class_names, title='Train').show()
code
128030195/cell_8
[ "text_plain_output_100.png", "text_plain_output_334.png", "text_plain_output_445.png", "text_plain_output_201.png", "text_plain_output_261.png", "text_plain_output_565.png", "text_plain_output_522.png", "text_plain_output_84.png", "text_plain_output_521.png", "text_plain_output_322.png", "text_plain_output_205.png", "text_plain_output_511.png", "text_plain_output_271.png", "text_plain_output_56.png", "text_plain_output_475.png", "text_plain_output_158.png", "text_plain_output_455.png", "text_plain_output_223.png", "text_plain_output_218.png", "text_plain_output_264.png", "text_plain_output_282.png", "text_plain_output_396.png", "text_plain_output_287.png", "text_plain_output_232.png", "text_plain_output_181.png", "text_plain_output_137.png", "text_plain_output_139.png", "text_plain_output_362.png", "text_plain_output_35.png", "text_plain_output_501.png", "text_plain_output_258.png", "text_plain_output_452.png", "text_plain_output_130.png", "text_plain_output_490.png", "text_plain_output_449.png", "text_plain_output_462.png", "text_plain_output_117.png", "text_plain_output_286.png", "text_plain_output_367.png", "text_plain_output_262.png", "text_plain_output_278.png", "text_plain_output_395.png", "text_plain_output_254.png", "text_plain_output_307.png", "text_plain_output_570.png", "text_plain_output_98.png", "text_plain_output_399.png", "text_plain_output_236.png", "text_plain_output_195.png", "text_plain_output_471.png", "text_plain_output_219.png", "text_plain_output_420.png", "text_plain_output_514.png", "text_plain_output_485.png", "text_plain_output_237.png", "text_plain_output_43.png", "text_plain_output_284.png", "text_plain_output_187.png", "text_plain_output_309.png", "text_plain_output_78.png", "text_plain_output_143.png", "text_plain_output_106.png", "text_plain_output_37.png", "text_plain_output_138.png", "text_plain_output_544.png", "text_plain_output_192.png", "text_plain_output_426.png", "text_plain_output_184.png", "text_plain_output_477.png", "text_plain_output_274.png", "text_plain_output_172.png", "text_plain_output_332.png", "text_plain_output_147.png", "text_plain_output_443.png", "text_plain_output_327.png", "text_plain_output_256.png", "text_plain_output_90.png", "text_plain_output_79.png", "text_plain_output_331.png", "text_plain_output_5.png", "text_plain_output_550.png", "text_plain_output_75.png", "text_plain_output_48.png", "text_plain_output_388.png", "text_plain_output_422.png", "text_plain_output_116.png", "text_plain_output_128.png", "text_plain_output_30.png", "text_plain_output_167.png", "text_plain_output_213.png", "text_plain_output_73.png", "text_plain_output_126.png", "text_plain_output_492.png", "text_plain_output_321.png", "text_plain_output_272.png", "text_plain_output_115.png", "text_plain_output_474.png", "text_plain_output_407.png", "text_plain_output_482.png", "text_plain_output_316.png", "text_plain_output_355.png", "text_plain_output_15.png", "text_plain_output_390.png", "text_plain_output_133.png", "text_plain_output_437.png", "text_plain_output_198.png", "text_plain_output_387.png", "text_plain_output_555.png", "text_plain_output_548.png", "text_plain_output_178.png", "text_plain_output_226.png", "text_plain_output_154.png", "text_plain_output_234.png", "text_plain_output_375.png", "text_plain_output_404.png", "text_plain_output_114.png", "text_plain_output_515.png", "text_plain_output_157.png", "text_plain_output_494.png", "text_plain_output_317.png", "text_plain_output_251.png", "text_plain_output_470.png", "text_plain_output_496.png", "text_plain_output_423.png", "text_plain_output_70.png", "text_plain_output_9.png", "text_plain_output_484.png", "text_plain_output_44.png", "text_plain_output_325.png", "text_plain_output_203.png", "text_plain_output_505.png", "text_plain_output_119.png", "text_plain_output_546.png", "text_plain_output_540.png", "text_plain_output_373.png", "text_plain_output_504.png", "text_plain_output_86.png", "text_plain_output_244.png", "text_plain_output_118.png", "text_plain_output_551.png", "text_plain_output_131.png", "text_plain_output_40.png", "text_plain_output_343.png", "text_plain_output_123.png", "text_plain_output_74.png", "text_plain_output_190.png", "text_plain_output_302.png", "text_plain_output_31.png", "text_plain_output_340.png", "text_plain_output_379.png", "text_plain_output_281.png", "text_plain_output_20.png", "text_plain_output_557.png", "text_plain_output_273.png", "text_plain_output_263.png", "text_plain_output_102.png", "text_plain_output_229.png", "text_plain_output_111.png", "text_plain_output_414.png", "text_plain_output_461.png", "text_plain_output_510.png", "text_plain_output_222.png", "text_plain_output_101.png", "text_plain_output_530.png", "text_plain_output_169.png", "text_plain_output_531.png", "text_plain_output_144.png", "text_plain_output_161.png", "text_plain_output_489.png", "text_plain_output_305.png", "text_plain_output_275.png", "text_plain_output_301.png", "text_plain_output_132.png", "text_plain_output_60.png", "text_plain_output_467.png", "text_plain_output_502.png", "text_plain_output_221.png", "text_plain_output_564.png", "text_plain_output_552.png", "text_plain_output_330.png", "text_plain_output_155.png", "text_plain_output_434.png", "text_plain_output_68.png", "text_plain_output_4.png", "text_plain_output_65.png", "text_plain_output_64.png", "text_plain_output_419.png", "text_plain_output_215.png", "text_plain_output_532.png", "text_plain_output_189.png", "text_plain_output_415.png", "text_plain_output_13.png", "text_plain_output_200.png", "text_plain_output_107.png", "text_plain_output_567.png", "text_plain_output_398.png", "text_plain_output_312.png", "text_plain_output_248.png", "text_plain_output_318.png", "text_plain_output_417.png", "text_plain_output_52.png", "text_plain_output_545.png", "text_plain_output_393.png", "text_plain_output_572.png", "text_plain_output_66.png", "text_plain_output_446.png", "text_plain_output_243.png", "text_plain_output_45.png", "text_plain_output_380.png", "text_plain_output_442.png", "text_plain_output_300.png", "text_plain_output_257.png", "text_plain_output_405.png", "text_plain_output_353.png", "text_plain_output_476.png", "text_plain_output_277.png", "text_plain_output_457.png", "text_plain_output_361.png", "text_plain_output_171.png", "text_plain_output_518.png", "text_plain_output_561.png", "text_plain_output_431.png", "text_plain_output_14.png", "text_plain_output_159.png", "text_plain_output_32.png", "text_plain_output_516.png", "text_plain_output_304.png", "text_plain_output_88.png", "text_plain_output_240.png", "text_plain_output_29.png", "text_plain_output_359.png", "text_plain_output_529.png", "text_plain_output_347.png", "text_plain_output_140.png", "text_plain_output_376.png", "text_plain_output_280.png", "text_plain_output_129.png", "text_plain_output_349.png", "text_plain_output_242.png", "text_plain_output_483.png", "text_plain_output_460.png", "text_plain_output_363.png", "text_plain_output_289.png", "text_plain_output_255.png", "text_plain_output_160.png", "text_plain_output_58.png", "text_plain_output_329.png", "text_plain_output_49.png", "text_plain_output_63.png", "text_plain_output_260.png", "text_plain_output_294.png", "text_plain_output_27.png", "text_plain_output_392.png", "text_plain_output_320.png", "text_plain_output_177.png", "text_plain_output_386.png", "text_plain_output_438.png", "text_plain_output_76.png", "text_plain_output_333.png", "text_plain_output_108.png", "text_plain_output_54.png", "text_plain_output_142.png", "text_plain_output_10.png", "text_plain_output_269.png", "text_plain_output_276.png", "text_plain_output_6.png", "text_plain_output_326.png", "text_plain_output_503.png", "text_plain_output_153.png", "text_plain_output_170.png", "text_plain_output_92.png", "text_plain_output_57.png", "text_plain_output_120.png", "text_plain_output_469.png", "text_plain_output_24.png", "text_plain_output_357.png", "text_plain_output_21.png", "text_plain_output_344.png", "text_plain_output_104.png", "text_plain_output_270.png", "text_plain_output_47.png", "text_plain_output_466.png", "text_plain_output_568.png", "text_plain_output_121.png", "text_plain_output_25.png", "text_plain_output_134.png", "text_plain_output_523.png", "text_plain_output_401.png", "text_plain_output_77.png", "text_plain_output_421.png", "text_plain_output_288.png", "text_plain_output_535.png", "text_plain_output_527.png", "text_plain_output_488.png", "text_plain_output_18.png", "text_plain_output_183.png", "text_plain_output_266.png", "text_plain_output_149.png", "text_plain_output_208.png", "text_plain_output_50.png", "text_plain_output_36.png", "text_plain_output_383.png", "text_plain_output_207.png", "text_plain_output_391.png", "text_plain_output_413.png", "text_plain_output_96.png", "text_plain_output_87.png", "text_plain_output_3.png", "text_plain_output_217.png", "text_plain_output_418.png", "text_plain_output_427.png", "text_plain_output_180.png", "text_plain_output_556.png", "text_plain_output_141.png", "text_plain_output_210.png", "text_plain_output_112.png", "text_plain_output_152.png", "text_plain_output_225.png", "text_plain_output_191.png", "text_plain_output_259.png", "text_plain_output_447.png", "text_plain_output_290.png", "text_plain_output_506.png", "text_plain_output_283.png", "text_plain_output_495.png", "text_plain_output_247.png", "text_plain_output_113.png", "text_plain_output_371.png", "text_plain_output_479.png", "text_plain_output_324.png", "text_plain_output_22.png", "text_plain_output_188.png", "text_plain_output_366.png", "text_plain_output_328.png", "text_plain_output_81.png", "text_plain_output_69.png", "text_plain_output_368.png", "text_plain_output_372.png", "text_plain_output_175.png", "text_plain_output_165.png", "text_plain_output_542.png", "text_plain_output_146.png", "text_plain_output_145.png", "text_plain_output_125.png", "text_plain_output_454.png", "text_plain_output_487.png", "text_plain_output_338.png", "text_plain_output_197.png", "text_plain_output_512.png", "text_plain_output_382.png", "text_plain_output_315.png", "text_plain_output_429.png", "text_plain_output_38.png", "text_plain_output_517.png", "text_plain_output_433.png", "text_plain_output_7.png", "text_plain_output_528.png", "text_plain_output_214.png", "text_plain_output_166.png", "text_plain_output_358.png", "text_plain_output_513.png", "text_plain_output_314.png", "text_plain_output_410.png", "text_plain_output_432.png", "text_plain_output_411.png", "text_plain_output_91.png", "text_plain_output_308.png", "text_plain_output_245.png", "text_plain_output_16.png", "text_plain_output_497.png", "text_plain_output_174.png", "text_plain_output_212.png", "text_plain_output_230.png", "text_plain_output_265.png", "text_plain_output_430.png", "text_plain_output_435.png", "text_plain_output_378.png", "text_plain_output_59.png", "text_plain_output_409.png", "text_plain_output_206.png", "text_plain_output_103.png", "text_plain_output_71.png", "text_plain_output_539.png", "text_plain_output_8.png", "text_plain_output_122.png", "text_plain_output_384.png", "text_plain_output_498.png", "text_plain_output_211.png", "text_plain_output_182.png", "text_plain_output_26.png", "text_plain_output_554.png", "text_plain_output_536.png", "text_plain_output_406.png", "text_plain_output_310.png", "text_plain_output_456.png", "text_plain_output_541.png", "text_plain_output_558.png", "text_plain_output_220.png", "text_plain_output_543.png", "text_plain_output_109.png", "text_plain_output_459.png", "text_plain_output_238.png", "text_plain_output_520.png", "text_plain_output_41.png", "text_plain_output_34.png", "text_plain_output_253.png", "text_plain_output_346.png", "text_plain_output_291.png", "text_plain_output_168.png", "text_plain_output_394.png", "text_plain_output_204.png", "text_plain_output_241.png", "text_plain_output_231.png", "text_plain_output_533.png", "text_plain_output_345.png", "text_plain_output_350.png", "text_plain_output_209.png", "text_plain_output_185.png", "text_plain_output_85.png", "text_plain_output_42.png", "text_plain_output_110.png", "text_plain_output_549.png", "text_plain_output_67.png", "text_plain_output_508.png", "text_plain_output_468.png", "text_plain_output_370.png", "text_plain_output_297.png", "text_plain_output_53.png", "text_plain_output_313.png", "text_plain_output_224.png", "text_plain_output_193.png", "text_plain_output_441.png", "text_plain_output_403.png", "text_plain_output_23.png", "text_plain_output_173.png", "text_plain_output_235.png", "text_plain_output_151.png", "text_plain_output_89.png", "text_plain_output_299.png", "text_plain_output_51.png", "text_plain_output_450.png", "text_plain_output_252.png", "text_plain_output_296.png", "text_plain_output_525.png", "text_plain_output_28.png", "text_plain_output_72.png", "text_plain_output_99.png", "text_plain_output_381.png", "text_plain_output_571.png", "text_plain_output_163.png", "text_plain_output_179.png", "text_plain_output_537.png", "text_plain_output_162.png", "text_plain_output_136.png", "text_plain_output_246.png", "text_plain_output_2.png", "text_plain_output_569.png", "text_plain_output_239.png", "text_plain_output_127.png", "text_plain_output_559.png", "text_plain_output_311.png", "text_plain_output_500.png", "text_plain_output_295.png", "text_plain_output_279.png", "text_plain_output_507.png", "text_plain_output_509.png", "text_plain_output_337.png", "text_plain_output_562.png", "text_plain_output_499.png", "text_plain_output_196.png", "text_plain_output_342.png", "text_plain_output_563.png", "text_plain_output_97.png", "text_plain_output_227.png", "text_plain_output_453.png", "text_plain_output_1.png", "text_plain_output_33.png", "text_plain_output_150.png", "text_plain_output_39.png", "text_plain_output_176.png", "text_plain_output_335.png", "text_plain_output_186.png", "text_plain_output_233.png", "text_plain_output_228.png", "text_plain_output_473.png", "text_plain_output_385.png", "text_plain_output_478.png", "text_plain_output_55.png", "text_plain_output_412.png", "text_plain_output_293.png", "text_plain_output_268.png", "text_plain_output_436.png", "text_plain_output_199.png", "text_plain_output_354.png", "text_plain_output_463.png", "text_plain_output_360.png", "text_plain_output_319.png", "text_plain_output_82.png", "text_plain_output_356.png", "text_plain_output_202.png", "text_plain_output_93.png", "text_plain_output_336.png", "text_plain_output_19.png", "text_plain_output_439.png", "text_plain_output_341.png", "text_plain_output_105.png", "text_plain_output_465.png", "text_plain_output_80.png", "text_plain_output_491.png", "text_plain_output_94.png", "text_plain_output_164.png", "text_plain_output_249.png", "text_plain_output_534.png", "text_plain_output_444.png", "text_plain_output_216.png", "text_plain_output_124.png", "text_plain_output_17.png", "text_plain_output_148.png", "text_plain_output_323.png", "text_plain_output_402.png", "text_plain_output_424.png", "text_plain_output_486.png", "text_plain_output_250.png", "text_plain_output_11.png", "text_plain_output_481.png", "text_plain_output_560.png", "text_plain_output_526.png", "text_plain_output_400.png", "text_plain_output_524.png", "text_plain_output_538.png", "text_plain_output_12.png", "text_plain_output_267.png", "text_plain_output_553.png", "text_plain_output_408.png", "text_plain_output_425.png", "text_plain_output_428.png", "text_plain_output_416.png", "text_plain_output_194.png", "text_plain_output_519.png", "text_plain_output_62.png", "text_plain_output_480.png", "text_plain_output_303.png", "text_plain_output_377.png", "text_plain_output_440.png", "text_plain_output_95.png", "text_plain_output_339.png", "text_plain_output_458.png", "text_plain_output_464.png", "text_plain_output_156.png", "text_plain_output_547.png", "text_plain_output_298.png", "text_plain_output_369.png", "text_plain_output_348.png", "text_plain_output_448.png", "text_plain_output_364.png", "text_plain_output_365.png", "text_plain_output_61.png", "text_plain_output_352.png", "text_plain_output_83.png", "text_plain_output_374.png", "text_plain_output_472.png", "text_plain_output_566.png", "text_plain_output_397.png", "text_plain_output_389.png", "text_plain_output_292.png", "text_plain_output_351.png", "text_plain_output_135.png", "text_plain_output_285.png", "text_plain_output_306.png", "text_plain_output_493.png", "text_plain_output_46.png" ]
import matplotlib.pyplot as plt import tensorflow as tf import os import cv2 import pickle import numpy as np import matplotlib.pyplot as plt import seaborn as sns import tensorflow as tf from tqdm import tqdm from sklearn.preprocessing import OneHotEncoder from sklearn.metrics import confusion_matrix from keras.models import Model, load_model from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.models import Sequential from tensorflow.keras import layers from tensorflow.keras import activations from tensorflow.keras.layers import Dense, Input, Conv2D, MaxPool2D, Flatten, Activation, Dropout, BatchNormalization import glob import skimage plt.rc('font', size=14) plt.rc('axes', labelsize=14, titlesize=14) plt.rc('legend', fontsize=14) plt.rc('xtick', labelsize=10) plt.rc('ytick', labelsize=10) data_dir = '/kaggle/input/rgb-arabic-alphabets-sign-language-dataset-jpg/images_after' images = tf.keras.utils.image_dataset_from_directory(data_dir, labels='inferred', label_mode='int', class_names=None, color_mode='rgb', batch_size=32, image_size=(256, 256), shuffle=True, seed=123, validation_split=0.2, subset='training', interpolation='bilinear') class_names = images.class_names class_names plt.figure(figsize=(10, 10)) for image, label in images.take(1): for i in range(25): ax = plt.subplot(5, 5, i + 1) plt.imshow(image[i].numpy().astype('uint8')) plt.title(class_names[label[i]]) plt.axis('off')
code
128030195/cell_16
[ "text_plain_output_1.png" ]
from keras.models import Model, load_model from tensorflow.keras.applications import ResNet50 from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.layers import Dense, Flatten, GlobalAveragePooling2D, Dropout from tensorflow.keras.layers import Dense, Input, Conv2D, MaxPool2D, Flatten ,Activation,Dropout,BatchNormalization from tensorflow.keras.models import Model from tensorflow.keras.models import Sequential from tensorflow.keras.models import Sequential import matplotlib.pyplot as plt import tensorflow as tf import os import cv2 import pickle import numpy as np import matplotlib.pyplot as plt import seaborn as sns import tensorflow as tf from tqdm import tqdm from sklearn.preprocessing import OneHotEncoder from sklearn.metrics import confusion_matrix from keras.models import Model, load_model from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.models import Sequential from tensorflow.keras import layers from tensorflow.keras import activations from tensorflow.keras.layers import Dense, Input, Conv2D, MaxPool2D, Flatten, Activation, Dropout, BatchNormalization import glob import skimage plt.rc('font', size=14) plt.rc('axes', labelsize=14, titlesize=14) plt.rc('legend', fontsize=14) plt.rc('xtick', labelsize=10) plt.rc('ytick', labelsize=10) data_dir = '/kaggle/input/rgb-arabic-alphabets-sign-language-dataset-jpg/images_after' images = tf.keras.utils.image_dataset_from_directory(data_dir, labels='inferred', label_mode='int', class_names=None, color_mode='rgb', batch_size=32, image_size=(256, 256), shuffle=True, seed=123, validation_split=0.2, subset='training', interpolation='bilinear') data_dir = '/kaggle/input/rgb-arabic-alphabets-sign-language-dataset-jpg/images_after' images_validation = tf.keras.utils.image_dataset_from_directory(data_dir, labels='inferred', label_mode='int', class_names=None, color_mode='rgb', batch_size=32, image_size=(256, 256), shuffle=True, seed=123, validation_split=0.2, subset='validation', interpolation='bilinear') class_names = images.class_names class_names plt.figure(figsize=(10, 10)) for image, label in images.take(1): for i in range(25): ax = plt.subplot(5, 5, i +1) plt.imshow(image[i].numpy().astype("uint8")) plt.title(class_names[label[i]]) plt.axis("off") early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=2, restore_best_weights=True) from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.models import Model import tensorflow.keras as keras resnet = ResNet50(include_top=False, weights='imagenet', input_shape=(256, 256, 3), pooling='max') output = resnet.layers[-1].output output = tf.keras.layers.Flatten()(output) resnet = Model(resnet.input, output) res_name = [] for layer in resnet.layers: res_name.append(layer.name) set_trainable = False for layer in resnet.layers: if layer.name in res_name[-22:]: set_trainable = True if set_trainable: layer.trainable = True else: layer.trainable = False from tensorflow.keras.applications import ResNet50 from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Flatten, GlobalAveragePooling2D, Dropout model6 = Sequential() model6.add(resnet) model6.add(BatchNormalization()) model6.add(Dense(2048, activation='relu')) model6.add(Dropout(0.2)) model6.add(Dense(1024, activation='relu')) model6.add(Dropout(0.2)) model6.add(Dense(512, activation='relu')) model6.add(Dropout(0.2)) model6.add(Dense(256, activation='relu')) model6.add(Dropout(0.2)) model6.add(Dense(128, activation='relu')) model6.add(Dropout(0.2)) model6.add(Dense(64, activation='relu')) model6.add(Dropout(0.2)) model6.add(Dense(31, activation='softmax')) model6.summary() model6.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy']) EPOCHS = 25 BATCH_SIZE = 64 history6 = model6.fit(images, epochs=EPOCHS, batch_size=BATCH_SIZE, verbose=1, validation_data=images_validation, callbacks=[early_stop])
code
128030195/cell_3
[ "image_output_1.png" ]
import tensorflow as tf data_dir = '/kaggle/input/rgb-arabic-alphabets-sign-language-dataset-jpg/images_after' images = tf.keras.utils.image_dataset_from_directory(data_dir, labels='inferred', label_mode='int', class_names=None, color_mode='rgb', batch_size=32, image_size=(256, 256), shuffle=True, seed=123, validation_split=0.2, subset='training', interpolation='bilinear')
code