path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
130003860/cell_49
[ "text_html_output_1.png" ]
from sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier import pandas as pd data = pd.read_csv('/kaggle/input/icr-identify-age-related-conditions/train.csv') (x_train.shape, y_train.shape, x_test.shape, y_test.shape) gb = GradientBoostingClassifier(n_estimators=1000, max_depth=9, subsample=0.8, max_features='log2', min_samples_leaf=9, random_state=42) gb.fit(x_train, y_train) y_pred = gb.predict(x_test) test = pd.read_csv('/kaggle/input/icr-identify-age-related-conditions/test.csv') ID = test['Id'] test.drop('Id', axis=1, inplace=True) test.shape prediction = gb.predict_proba(test) result = pd.DataFrame(prediction, index=ID, columns=['class_0', 'class_1']) result
code
130003860/cell_8
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/icr-identify-age-related-conditions/train.csv') data.shape data.isnull().sum()
code
130003860/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('/kaggle/input/icr-identify-age-related-conditions/train.csv') data.shape data.isnull().sum() data.dtypes[data.dtypes == 'object'] data.dtypes[data.dtypes == 'object'].isnull() data.drop('Id', axis=1, inplace=True) plt.figure(figsize=(12, 10)) plt.subplot(2, 3, 1) sns.distplot(data['BQ']) plt.subplot(2, 3, 2) sns.distplot(data['DU']) plt.subplot(2, 3, 3) sns.distplot(data['EL']) plt.subplot(2, 3, 4) sns.distplot(data['FC']) plt.subplot(2, 3, 5) sns.distplot(data['FL']) plt.subplot(2, 3, 6) sns.distplot(data['FS'])
code
130003860/cell_17
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('/kaggle/input/icr-identify-age-related-conditions/train.csv') data.shape data.isnull().sum() data.dtypes[data.dtypes == 'object'] data.dtypes[data.dtypes == 'object'].isnull() data.drop('Id', axis=1, inplace=True) sns.countplot(x=data['EJ'])
code
130003860/cell_31
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/icr-identify-age-related-conditions/train.csv') data.shape data.isnull().sum() data.dtypes[data.dtypes == 'object'] data.dtypes[data.dtypes == 'object'].isnull() data.drop('Id', axis=1, inplace=True) data.columns x = data.iloc[:, :-1] y = data.iloc[:, -1] (x.shape, y.shape)
code
130003860/cell_46
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/icr-identify-age-related-conditions/train.csv') test = pd.read_csv('/kaggle/input/icr-identify-age-related-conditions/test.csv') test.drop('Id', axis=1, inplace=True) test.shape
code
130003860/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/icr-identify-age-related-conditions/train.csv') data.shape data.isnull().sum() data.dtypes[data.dtypes == 'object'] data.dtypes[data.dtypes == 'object'].isnull() data.drop('Id', axis=1, inplace=True) data[['BQ', 'DU', 'EL', 'FC', 'FL', 'FS', 'GL', 'CB', 'CC']].nunique()
code
130003860/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/icr-identify-age-related-conditions/train.csv') data.shape data.isnull().sum() data.dtypes[data.dtypes == 'object']
code
130003860/cell_27
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/icr-identify-age-related-conditions/train.csv') data.shape data.isnull().sum() data.dtypes[data.dtypes == 'object'] data.dtypes[data.dtypes == 'object'].isnull() data.drop('Id', axis=1, inplace=True) data.columns data['EJ'].head()
code
130003860/cell_37
[ "text_plain_output_1.png" ]
from sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier (x_train.shape, y_train.shape, x_test.shape, y_test.shape) gb = GradientBoostingClassifier(n_estimators=1000, max_depth=9, subsample=0.8, max_features='log2', min_samples_leaf=9, random_state=42) gb.fit(x_train, y_train)
code
129002850/cell_4
[ "text_plain_output_1.png" ]
from keras.layers import Dense, Activation from keras.models import Sequential from keras.optimizers import Adam from sklearn.metrics import mean_squared_error from sklearn.tree import DecisionTreeRegressor import numpy as np import numpy as np import random import random import random import numpy as np from sklearn.tree import DecisionTreeRegressor from sklearn.metrics import mean_squared_error import matplotlib.pyplot as plt def perform_operation(a, b, op): if op == 0: return a + b elif op == 1: return a - b elif op == 2: return a * b elif op == 3: return a / b if b != 0 else 0 train_x = [] train_y = [] for i in range(50000000): a = random.randint(0, 10000) b = random.randint(0, 10000) op = random.randint(0, 3) result = perform_operation(a, b, op) train_x.append([a, b, op]) train_y.append(result) train_x = np.array(train_x) train_y = np.array(train_y) model = DecisionTreeRegressor() model.fit(train_x, train_y) test_x = np.array([[5, 10, 0], [15, 20, 1], [30, 40, 2], [50, 60, 3]]) predictions = model.predict(test_x) mse = mean_squared_error(train_y, model.predict(train_x)) import numpy as np import random from keras.models import Sequential from keras.layers import Dense, Activation from keras.optimizers import Adam def generate_random_polynomial_and_derivative(degree, max_coeff=100): coeffs = [random.randint(-max_coeff, max_coeff) for _ in range(degree + 1)] derivative_coeffs = [coeffs[i] * i for i in range(1, len(coeffs))] return (coeffs, derivative_coeffs) n = 7 train_x = [] train_y = [] for i in range(50000): coeffs, derivative_coeffs = generate_random_polynomial_and_derivative(n) train_x.append(coeffs) train_y.append(derivative_coeffs) train_x = np.array(train_x) train_y = np.array(train_y) model = Sequential() model.add(Dense(128, input_dim=n + 1)) model.add(Activation('relu')) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dense(n)) optimizer = Adam(learning_rate=0.001) model.compile(optimizer=optimizer, loss='mse') model.fit(train_x, train_y, epochs=100, batch_size=32) def test_model_on_polynomial(coeffs): test_x = np.array([coeffs]) predictions = model.predict(test_x) return predictions custom_polynomial = [-9, 1, 5, 2, 8, 5, 4, 1] predicted_derivative_coeffs = test_model_on_polynomial(custom_polynomial) print(predicted_derivative_coeffs)
code
129002850/cell_2
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error from sklearn.tree import DecisionTreeRegressor import numpy as np import random import random import numpy as np from sklearn.tree import DecisionTreeRegressor from sklearn.metrics import mean_squared_error import matplotlib.pyplot as plt def perform_operation(a, b, op): if op == 0: return a + b elif op == 1: return a - b elif op == 2: return a * b elif op == 3: return a / b if b != 0 else 0 train_x = [] train_y = [] for i in range(50000000): a = random.randint(0, 10000) b = random.randint(0, 10000) op = random.randint(0, 3) result = perform_operation(a, b, op) train_x.append([a, b, op]) train_y.append(result) train_x = np.array(train_x) train_y = np.array(train_y) model = DecisionTreeRegressor() model.fit(train_x, train_y) test_x = np.array([[5, 10, 0], [15, 20, 1], [30, 40, 2], [50, 60, 3]]) predictions = model.predict(test_x) print(predictions) mse = mean_squared_error(train_y, model.predict(train_x)) print('Mean squared error: ', mse)
code
90104804/cell_4
[ "text_html_output_1.png" ]
import pandas as pd graffiti_filepath = '../input/graffiti-incidents/Graffiti_Incidents.csv' graffiti_data = pd.read_csv(graffiti_filepath, parse_dates=True, low_memory=False) graffiti_data.head()
code
90104804/cell_1
[ "text_plain_output_1.png" ]
import pandas as pd pd.plotting.register_matplotlib_converters() import matplotlib.pyplot as plt import seaborn as sns print('Setup Complete')
code
122264476/cell_4
[ "image_output_1.png" ]
import cv2 import matplotlib.pyplot as plt img = cv2.imread('/kaggle/input/brain-tumor-mri-images-44c/Carcinoma T2/112._big_gallery.jpeg') gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) threshold_value = 70 max_value = 255 threshold_type = cv2.THRESH_BINARY _, binary_img = cv2.threshold(gray_img, threshold_value, max_value, threshold_type) plt.figure(figsize=(12, 5)) plt.title('Pixel Value Distribution') plt.subplot(1, 2, 1) plt.hist(gray_img.ravel(), 256, [0, 256]) plt.axvline(x=threshold_value, color='r') plt.title('Grayscale Image') plt.subplot(1, 2, 2) plt.hist(binary_img.ravel(), 256, [0, 256]) plt.title('Binarized Image') plt.suptitle('Pixel Value Distribution', fontsize=20) plt.show()
code
122264476/cell_3
[ "image_output_1.png" ]
import cv2 img = cv2.imread('/kaggle/input/brain-tumor-mri-images-44c/Carcinoma T2/112._big_gallery.jpeg') print(img.shape) gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) print(gray_img.shape) threshold_value = 70 max_value = 255 threshold_type = cv2.THRESH_BINARY _, binary_img = cv2.threshold(gray_img, threshold_value, max_value, threshold_type) print(binary_img.shape)
code
122264476/cell_10
[ "text_plain_output_1.png" ]
from matplotlib import animation, rc import cv2 import matplotlib.image as mpimg import matplotlib.pyplot as plt import os img = cv2.imread('/kaggle/input/brain-tumor-mri-images-44c/Carcinoma T2/112._big_gallery.jpeg') gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) threshold_value = 70 max_value = 255 threshold_type = cv2.THRESH_BINARY _, binary_img = cv2.threshold(gray_img, threshold_value, max_value, threshold_type) img1 = img img2 = gray_img img3 = binary_img fig, axs = plt.subplots(1,3,figsize=(12,4)) fig.suptitle('Binarization of Image') axs[0].imshow(cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)) axs[1].imshow(img2, cmap='gray') axs[2].imshow(img3) for ax in axs: ax.set_xticks([]) ax.set_yticks([]) plt.show() for i in range(0, 255, 5): gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) threshold_value = i max_value = 255 threshold_type = cv2.THRESH_BINARY _, binary_img = cv2.threshold(gray_img, threshold_value, max_value, threshold_type) mpimg.imsave(str(i).zfill(3) + '.png', binary_img) from matplotlib import animation, rc rc('animation', html='jshtml') def create_animation(ims): fig=plt.figure(figsize=(4,4)) #plt.axis('off') im=plt.imshow(cv2.cvtColor(ims[5],cv2.COLOR_BGR2RGB)) plt.close() def animate_func(i): im.set_array(cv2.cvtColor(ims[i],cv2.COLOR_BGR2RGB)) return [im] return animation.FuncAnimation(fig, animate_func, frames=len(ims), interval=1000//3) paths = [] for dirname, _, filenames in os.walk('/kaggle/working/'): for filename in filenames: if filename[-4:] == '.png': paths += [os.path.join(dirname, filename)] paths = sorted(paths) images = [] for i in range(len(paths)): images += [cv2.imread(paths[i])] create_animation(images)
code
122264476/cell_5
[ "text_html_output_1.png" ]
import cv2 import matplotlib.pyplot as plt img = cv2.imread('/kaggle/input/brain-tumor-mri-images-44c/Carcinoma T2/112._big_gallery.jpeg') gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) threshold_value = 70 max_value = 255 threshold_type = cv2.THRESH_BINARY _, binary_img = cv2.threshold(gray_img, threshold_value, max_value, threshold_type) img1 = img img2 = gray_img img3 = binary_img fig, axs = plt.subplots(1, 3, figsize=(12, 4)) fig.suptitle('Binarization of Image') axs[0].imshow(cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)) axs[1].imshow(img2, cmap='gray') axs[2].imshow(img3) for ax in axs: ax.set_xticks([]) ax.set_yticks([]) plt.show()
code
17141097/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
2003283/cell_2
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd test_corpus = np.load('../input/preprocessing/test_corp.npy') train_corpus = np.load('../input/preprocessing/train_corp.npy') glove_table = pd.read_csv('../input/preprocessing/filled_glove_table.csv', index_col=0) glove_table.describe()
code
2003283/cell_3
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd test_corpus = np.load('../input/preprocessing/test_corp.npy') train_corpus = np.load('../input/preprocessing/train_corp.npy') glove_table = pd.read_csv('../input/preprocessing/filled_glove_table.csv', index_col=0) glove_table.loc[['man', 'woman', 'man']].as_matrix().shape
code
18127333/cell_2
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') train.head()
code
18127333/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
18127333/cell_3
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') test.head()
code
122259999/cell_21
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd import pandas as pd df = pd.read_csv('/kaggle/input/review/diabetes.csv') X = df.drop('Outcome', axis=1).values y = df['Outcome'].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=101) df = pd.read_csv('/kaggle/input/review/diabetes.csv') X = df.drop('Outcome', axis=1).values y = df['Outcome'].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=101) df = pd.read_csv('/kaggle/input/iris-dataset/Iris.csv') X = df.drop('species', axis=1).values y = df['species'].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=101) print(df.head())
code
122259999/cell_13
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd df = pd.read_csv('/kaggle/input/review/diabetes.csv') X = df.drop('Outcome', axis=1).values y = df['Outcome'].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=101) df = pd.read_csv('/kaggle/input/review/diabetes.csv') X = df.drop('Outcome', axis=1).values y = df['Outcome'].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=101) print(df.head())
code
122259999/cell_9
[ "text_plain_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier import sklearn.metrics as pm knn_model = KNeighborsClassifier(n_neighbors=7) knn_model.fit(X_train, y_train) y_pred = knn_model.predict(X_test) import sklearn.metrics as pm pm.confusion_matrix(y_test, y_pred) tn, fp, fn, tp = pm.confusion_matrix(y_test, y_pred).ravel() print('Performance measures over testing data set:') print(' - precision is ', pm.precision_score(y_test, y_pred)) print(' - recall is ', pm.recall_score(y_test, y_pred)) print(' - f-measure is', pm.f1_score(y_test, y_pred))
code
122259999/cell_23
[ "text_plain_output_1.png" ]
from sklearn.naive_bayes import GaussianNB GNB_model = GaussianNB() GNB_model.fit(X_train, y_train) y_pred = GNB_model.predict(X_test) GNB_model.predict([[5.1, 2.5, 3.0, 1.1]])
code
122259999/cell_6
[ "image_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier import sklearn.metrics as pm knn_model = KNeighborsClassifier(n_neighbors=7) knn_model.fit(X_train, y_train) y_pred = knn_model.predict(X_test) import sklearn.metrics as pm pm.confusion_matrix(y_test, y_pred)
code
122259999/cell_11
[ "text_plain_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier import matplotlib.pylab as plt import seaborn as sns import sklearn.metrics as pm knn_model = KNeighborsClassifier(n_neighbors=7) knn_model.fit(X_train, y_train) y_pred = knn_model.predict(X_test) import sklearn.metrics as pm pm.confusion_matrix(y_test, y_pred) tn, fp, fn, tp = pm.confusion_matrix(y_test, y_pred).ravel() predictions = knn_model.predict(X_train) import seaborn as sns import numpy as np from sklearn.metrics import confusion_matrix cf_matrix = pm.confusion_matrix(y_test, y_pred) ax = sns.heatmap(cf_matrix, annot=True, cmap='Blues', fmt='.2f') ax.set_title('Confusion Matrix\n\n') ax.set_xlabel('\nActual Values') ax.set_ylabel('Predicted Values ') ax.xaxis.set_ticklabels(['healthy (0)', 'diabetic (1)']) ax.yaxis.set_ticklabels(['healthy (0)', 'diabetic (1)']) plt.show()
code
122259999/cell_19
[ "image_output_1.png" ]
from sklearn.naive_bayes import MultinomialNB from sklearn.neighbors import KNeighborsClassifier import matplotlib.pylab as plt import matplotlib.pylab as plt import seaborn as sns import seaborn as sns import sklearn.metrics as pm import sklearn.metrics as pm knn_model = KNeighborsClassifier(n_neighbors=7) knn_model.fit(X_train, y_train) y_pred = knn_model.predict(X_test) import sklearn.metrics as pm pm.confusion_matrix(y_test, y_pred) tn, fp, fn, tp = pm.confusion_matrix(y_test, y_pred).ravel() predictions = knn_model.predict(X_train) #show the confusion matrix is using heatmap from the seaborn library. import seaborn as sns import numpy as np from sklearn.metrics import confusion_matrix #Generate the confusion matrix cf_matrix = pm.confusion_matrix(y_test, y_pred) ax = sns.heatmap(cf_matrix, annot=True, cmap='Blues', fmt='.2f') ax.set_title('Confusion Matrix\n\n'); ax.set_xlabel('\nActual Values') ax.set_ylabel('Predicted Values '); ax.xaxis.set_ticklabels(['healthy (0)','diabetic (1)']) ax.yaxis.set_ticklabels(['healthy (0)','diabetic (1)']) # Display the visualization of the Confusion Matrix. plt.show() nb_model = MultinomialNB() nb_model.fit(X_train, y_train) y_pred = nb_model.predict(X_test) import sklearn.metrics as pm tn, fp, fn, tp = pm.confusion_matrix(y_test, y_pred).ravel() predictions = nb_model.predict(X_train) import seaborn as sns import numpy as np from sklearn.metrics import confusion_matrix cf_matrix = pm.confusion_matrix(y_test, y_pred) ax = sns.heatmap(cf_matrix, annot=True, cmap='Greens', fmt='.2f') ax.set_title('Confusion Matrix\n\n') ax.set_xlabel('\nActual Values') ax.set_ylabel('Predicted Values ') ax.xaxis.set_ticklabels(['healthy (0)', 'diabetic (1)']) ax.yaxis.set_ticklabels(['healthy (0)', 'diabetic (1)']) plt.show()
code
122259999/cell_7
[ "text_plain_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier import sklearn.metrics as pm knn_model = KNeighborsClassifier(n_neighbors=7) knn_model.fit(X_train, y_train) y_pred = knn_model.predict(X_test) import sklearn.metrics as pm pm.confusion_matrix(y_test, y_pred) tn, fp, fn, tp = pm.confusion_matrix(y_test, y_pred).ravel() print('true negative: ', tn) print('false positive: ', fp) print('false negative: ', fn) print('true positive: ', tp)
code
122259999/cell_18
[ "text_plain_output_1.png" ]
from sklearn.naive_bayes import MultinomialNB from sklearn.neighbors import KNeighborsClassifier import matplotlib.pylab as plt import matplotlib.pylab as plt import seaborn as sns import sklearn.metrics as pm import sklearn.metrics as pm knn_model = KNeighborsClassifier(n_neighbors=7) knn_model.fit(X_train, y_train) y_pred = knn_model.predict(X_test) import sklearn.metrics as pm pm.confusion_matrix(y_test, y_pred) tn, fp, fn, tp = pm.confusion_matrix(y_test, y_pred).ravel() predictions = knn_model.predict(X_train) #show the confusion matrix is using heatmap from the seaborn library. import seaborn as sns import numpy as np from sklearn.metrics import confusion_matrix #Generate the confusion matrix cf_matrix = pm.confusion_matrix(y_test, y_pred) ax = sns.heatmap(cf_matrix, annot=True, cmap='Blues', fmt='.2f') ax.set_title('Confusion Matrix\n\n'); ax.set_xlabel('\nActual Values') ax.set_ylabel('Predicted Values '); ax.xaxis.set_ticklabels(['healthy (0)','diabetic (1)']) ax.yaxis.set_ticklabels(['healthy (0)','diabetic (1)']) # Display the visualization of the Confusion Matrix. plt.show() nb_model = MultinomialNB() nb_model.fit(X_train, y_train) y_pred = nb_model.predict(X_test) import sklearn.metrics as pm tn, fp, fn, tp = pm.confusion_matrix(y_test, y_pred).ravel() predictions = nb_model.predict(X_train) print('Performance measures over training data set:') print(' - precision is ', pm.precision_score(y_train, predictions)) print(' - recall is ', pm.recall_score(y_train, predictions)) print(' - f-measure is', pm.f1_score(y_train, predictions))
code
122259999/cell_8
[ "text_plain_output_1.png" ]
print(60 / (60 + 49)) print(60 / (60 + 46)) print(2 * (60 / (60 + 49) * 60 / (60 + 46)) / (60 / (60 + 49) + 60 / (60 + 46)))
code
122259999/cell_15
[ "text_plain_output_1.png" ]
from sklearn.naive_bayes import MultinomialNB from sklearn.neighbors import KNeighborsClassifier knn_model = KNeighborsClassifier(n_neighbors=7) knn_model.fit(X_train, y_train) y_pred = knn_model.predict(X_test) nb_model = MultinomialNB() nb_model.fit(X_train, y_train) y_pred = nb_model.predict(X_test) print(y_pred) print(y_test)
code
122259999/cell_16
[ "text_plain_output_1.png" ]
from sklearn.naive_bayes import MultinomialNB from sklearn.neighbors import KNeighborsClassifier import matplotlib.pylab as plt import matplotlib.pylab as plt import seaborn as sns import sklearn.metrics as pm import sklearn.metrics as pm knn_model = KNeighborsClassifier(n_neighbors=7) knn_model.fit(X_train, y_train) y_pred = knn_model.predict(X_test) import sklearn.metrics as pm pm.confusion_matrix(y_test, y_pred) tn, fp, fn, tp = pm.confusion_matrix(y_test, y_pred).ravel() predictions = knn_model.predict(X_train) #show the confusion matrix is using heatmap from the seaborn library. import seaborn as sns import numpy as np from sklearn.metrics import confusion_matrix #Generate the confusion matrix cf_matrix = pm.confusion_matrix(y_test, y_pred) ax = sns.heatmap(cf_matrix, annot=True, cmap='Blues', fmt='.2f') ax.set_title('Confusion Matrix\n\n'); ax.set_xlabel('\nActual Values') ax.set_ylabel('Predicted Values '); ax.xaxis.set_ticklabels(['healthy (0)','diabetic (1)']) ax.yaxis.set_ticklabels(['healthy (0)','diabetic (1)']) # Display the visualization of the Confusion Matrix. plt.show() nb_model = MultinomialNB() nb_model.fit(X_train, y_train) y_pred = nb_model.predict(X_test) import sklearn.metrics as pm tn, fp, fn, tp = pm.confusion_matrix(y_test, y_pred).ravel() print('true negative: ', tn) print('false positive: ', fp) print('false negative: ', fn) print('true positive: ', tp)
code
122259999/cell_3
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd df = pd.read_csv('/kaggle/input/review/diabetes.csv') X = df.drop('Outcome', axis=1).values y = df['Outcome'].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=101) print(df.head())
code
122259999/cell_17
[ "text_plain_output_1.png" ]
from sklearn.naive_bayes import MultinomialNB from sklearn.neighbors import KNeighborsClassifier import matplotlib.pylab as plt import matplotlib.pylab as plt import seaborn as sns import sklearn.metrics as pm import sklearn.metrics as pm knn_model = KNeighborsClassifier(n_neighbors=7) knn_model.fit(X_train, y_train) y_pred = knn_model.predict(X_test) import sklearn.metrics as pm pm.confusion_matrix(y_test, y_pred) tn, fp, fn, tp = pm.confusion_matrix(y_test, y_pred).ravel() predictions = knn_model.predict(X_train) #show the confusion matrix is using heatmap from the seaborn library. import seaborn as sns import numpy as np from sklearn.metrics import confusion_matrix #Generate the confusion matrix cf_matrix = pm.confusion_matrix(y_test, y_pred) ax = sns.heatmap(cf_matrix, annot=True, cmap='Blues', fmt='.2f') ax.set_title('Confusion Matrix\n\n'); ax.set_xlabel('\nActual Values') ax.set_ylabel('Predicted Values '); ax.xaxis.set_ticklabels(['healthy (0)','diabetic (1)']) ax.yaxis.set_ticklabels(['healthy (0)','diabetic (1)']) # Display the visualization of the Confusion Matrix. plt.show() nb_model = MultinomialNB() nb_model.fit(X_train, y_train) y_pred = nb_model.predict(X_test) import sklearn.metrics as pm tn, fp, fn, tp = pm.confusion_matrix(y_test, y_pred).ravel() print('Performance measures over testing data set:') print(' - precision is ', pm.precision_score(y_test, y_pred)) print(' - recall is ', pm.recall_score(y_test, y_pred)) print(' - f-measure is', pm.f1_score(y_test, y_pred))
code
122259999/cell_24
[ "text_plain_output_1.png" ]
from sklearn.naive_bayes import GaussianNB GNB_model = GaussianNB() GNB_model.fit(X_train, y_train) y_pred = GNB_model.predict(X_test) GNB_model.predict([[5.1, 2.5, 3.0, 1.1]]) GNB_model.predict([[6.5, 3.0, 5.5, 1.8]])
code
122259999/cell_10
[ "text_plain_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier import sklearn.metrics as pm knn_model = KNeighborsClassifier(n_neighbors=7) knn_model.fit(X_train, y_train) y_pred = knn_model.predict(X_test) import sklearn.metrics as pm pm.confusion_matrix(y_test, y_pred) tn, fp, fn, tp = pm.confusion_matrix(y_test, y_pred).ravel() predictions = knn_model.predict(X_train) print('Performance measures over training data set:') print(' - precision is ', pm.precision_score(y_train, predictions)) print(' - recall is ', pm.recall_score(y_train, predictions)) print(' - f-measure is', pm.f1_score(y_train, predictions))
code
122259999/cell_5
[ "text_plain_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier knn_model = KNeighborsClassifier(n_neighbors=7) knn_model.fit(X_train, y_train) y_pred = knn_model.predict(X_test) print(y_pred) print(y_test)
code
2003618/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.tail()
code
2003618/cell_19
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer import numpy as np import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train_ = train['comment_text'] test_ = test['comment_text'] alldata = pd.concat([train_, test_], axis=0) alldata = pd.DataFrame(alldata) alldata.comment_text.fillna('blllllllllllllllllllllllaaaaaaaaaaaaaaaahhhhhhhh...!!!', inplace=True) countvec = CountVectorizer(max_features=1500, ngram_range=(1, 2)) countvecdata = countvec.fit_transform(alldata['comment_text']) countvec_df = pd.DataFrame(countvecdata.todense()) col = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate'] preds = np.zeros((test.shape[0], len(col))) subm = pd.read_csv('sample_submission.csv') submid = pd.DataFrame({'id': subm['id']}) submission = pd.concat([submid, pd.DataFrame(preds, columns=col)], axis=1) submission.to_csv('submission_001.csv', index=False)
code
2003618/cell_18
[ "text_html_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer from sklearn.model_selection import train_test_split import lightgbm as lgbm import numpy as np import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train_ = train['comment_text'] test_ = test['comment_text'] alldata = pd.concat([train_, test_], axis=0) alldata = pd.DataFrame(alldata) alldata.comment_text.fillna('blllllllllllllllllllllllaaaaaaaaaaaaaaaahhhhhhhh...!!!', inplace=True) countvec = CountVectorizer(max_features=1500, ngram_range=(1, 2)) countvecdata = countvec.fit_transform(alldata['comment_text']) countvec_df = pd.DataFrame(countvecdata.todense()) countvec_df_train = countvecdata[:len(train_)] countvec_df_test = countvecdata[len(train_):] countvec_df_train_ = countvec_df_train.astype('float32') countvec_df_test_ = countvec_df_test.astype('float32') col = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate'] preds = np.zeros((test.shape[0], len(col))) params = {'objective': 'binary', 'learning_rate': 0.02, 'num_leaves': 76, 'feature_fraction': 0.64, 'bagging_fraction': 0.8, 'bagging_freq': 1, 'boosting_type': 'gbdt', 'metric': 'binary_logloss'} for i, j in enumerate(col): print('fitting column : ' + j) X_train, X_valid, Y_train, Y_valid = train_test_split(countvec_df_train_, train[j], random_state=7, test_size=0.33) d_train = lgbm.Dataset(X_train, Y_train) d_valid = lgbm.Dataset(X_valid, Y_valid) bst = lgbm.train(params, d_train, 5000, valid_sets=[d_valid], verbose_eval=50, early_stopping_rounds=100) print('predicting for :' + j) preds[:, i] = bst.predict(countvec_df_test_) print('Fininshed Training')
code
128011911/cell_6
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import string df = pd.read_csv('/kaggle/input/sentiment140/training.1600000.processed.noemoticon.csv', encoding='latin', header=None) df.columns = ['sentiment', 'id', 'date', 'query', 'user_id', 'text'] df = df.drop(['id', 'date', 'query', 'user_id'], axis=1) df.sentiment = df.sentiment.map({0: 0, 4: 1}) stop_words = set(stopwords.words('english')) def data_preprocessing(text): text = text.lower() text = re.sub('<.*?>', '', text) text = ''.join([c for c in text if c not in string.punctuation]) text = [word for word in text.split() if word not in stop_words] text = ' '.join(text) return text df['text'] = df['text'].astype(str).apply(data_preprocessing) MAX_LEN = max([len(x.split()) for x in df['text']]) print(MAX_LEN)
code
128011911/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
128011911/cell_18
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import string import tensorflow as tf import torch import torch import torch.nn as nn df = pd.read_csv('/kaggle/input/sentiment140/training.1600000.processed.noemoticon.csv', encoding='latin', header=None) df.columns = ['sentiment', 'id', 'date', 'query', 'user_id', 'text'] df = df.drop(['id', 'date', 'query', 'user_id'], axis=1) df.sentiment = df.sentiment.map({0: 0, 4: 1}) stop_words = set(stopwords.words('english')) def data_preprocessing(text): text = text.lower() text = re.sub('<.*?>', '', text) text = ''.join([c for c in text if c not in string.punctuation]) text = [word for word in text.split() if word not in stop_words] text = ' '.join(text) return text df['text'] = df['text'].astype(str).apply(data_preprocessing) MAX_LEN = max([len(x.split()) for x in df['text']]) import torch class Dataset: def __init__(self, text, sentiment): self.text = text self.sentiment = sentiment def __len__(self): return len(self.text) def __getitem__(self, item): text = self.text[item, :] target = self.sentiment[item] return {'text': torch.tensor(text, dtype=torch.long), 'target': torch.tensor(target, dtype=torch.long)} def load_vectors(fname): fin = open(fname) data = {} for line in fin: tokens = line.split() data[tokens[0]] = np.array([float(value) for value in tokens[1:]]) return data def create_embedding_matrix(word_index, embedding_dict): """ This function creates the embedding matrix :param word_index: a dictionary of word: index_value :param embedding_dict: :return a numpy array with embedding vectors for all known words """ embedding_matrix = np.zeros((len(word_index) + 1, 300)) for word, i in word_index.items(): if word in embedding_dict: embedding_matrix[i] = embedding_dict[word] return embedding_matrix class sentimentBiLSTM(nn.Module): """ The RNN model that will be used to perform Sentiment analysis. """ def __init__(self, embedding_matrix, hidden_dim, output_size): """ Initialize the model by setting up the layers. """ super(sentimentBiLSTM, self).__init__() self.embedding_matrix = embedding_matrix self.hidden_dim = hidden_dim num_words = self.embedding_matrix.shape[0] embed_dim = self.embedding_matrix.shape[1] self.embedding = nn.Embedding(num_embeddings=num_words, embedding_dim=embed_dim) self.embedding.weight = nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float32)) self.embedding.weight.requires_grad = False self.lstm = nn.LSTM(embed_dim, hidden_dim, bidirectional=True, batch_first=True) self.fc = nn.Linear(hidden_dim * 2, output_size) self.sigmoid = nn.Sigmoid() def forward(self, x): batch_size = x.size(0) embeds = self.embedding(x) lstm_out, _ = self.lstm(embeds) lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim * 2) out = self.fc(lstm_out) sig_out = self.sigmoid(out) sig_out = sig_out.view(batch_size, -1) sig_out = sig_out[:, -1] return sig_out from sklearn.model_selection import train_test_split y = df.sentiment.values train_df, test_df = train_test_split(df, test_size=0.2, stratify=y) import tensorflow as tf BATCH_SIZE = 16 tokenizer = tf.keras.preprocessing.text.Tokenizer() tokenizer.fit_on_texts(df.text.values.tolist()) xtrain = tokenizer.texts_to_sequences(train_df.text.values) xtest = tokenizer.texts_to_sequences(test_df.text.values) xtrain = tf.keras.preprocessing.sequence.pad_sequences(xtrain, maxlen=MAX_LEN) xtest = tf.keras.preprocessing.sequence.pad_sequences(xtest, maxlen=MAX_LEN) train_dataset = Dataset(text=xtrain, sentiment=train_df.sentiment.values) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=BATCH_SIZE, drop_last=True) valid_dataset = Dataset(text=xtest, sentiment=test_df.sentiment.values) valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=BATCH_SIZE, drop_last=True) embedding_dict = load_vectors('../input/glove-embeddings/glove.6B.300d.txt') embedding_matrix = create_embedding_matrix(tokenizer.word_index, embedding_dict) if torch.cuda.is_available(): device = torch.device('cuda') print('GPU is available') else: device = torch.device('cpu') print('GPU not available, CPU used') hidden_dim = 64 output_size = 1 model = sentimentBiLSTM(embedding_matrix, hidden_dim, output_size) model = model.to(device) print(model)
code
128011911/cell_15
[ "text_html_output_1.png" ]
from nltk.corpus import stopwords from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import string import tensorflow as tf import torch import torch import torch.nn as nn df = pd.read_csv('/kaggle/input/sentiment140/training.1600000.processed.noemoticon.csv', encoding='latin', header=None) df.columns = ['sentiment', 'id', 'date', 'query', 'user_id', 'text'] df = df.drop(['id', 'date', 'query', 'user_id'], axis=1) df.sentiment = df.sentiment.map({0: 0, 4: 1}) stop_words = set(stopwords.words('english')) def data_preprocessing(text): text = text.lower() text = re.sub('<.*?>', '', text) text = ''.join([c for c in text if c not in string.punctuation]) text = [word for word in text.split() if word not in stop_words] text = ' '.join(text) return text df['text'] = df['text'].astype(str).apply(data_preprocessing) MAX_LEN = max([len(x.split()) for x in df['text']]) import torch class Dataset: def __init__(self, text, sentiment): self.text = text self.sentiment = sentiment def __len__(self): return len(self.text) def __getitem__(self, item): text = self.text[item, :] target = self.sentiment[item] return {'text': torch.tensor(text, dtype=torch.long), 'target': torch.tensor(target, dtype=torch.long)} def load_vectors(fname): fin = open(fname) data = {} for line in fin: tokens = line.split() data[tokens[0]] = np.array([float(value) for value in tokens[1:]]) return data def create_embedding_matrix(word_index, embedding_dict): """ This function creates the embedding matrix :param word_index: a dictionary of word: index_value :param embedding_dict: :return a numpy array with embedding vectors for all known words """ embedding_matrix = np.zeros((len(word_index) + 1, 300)) for word, i in word_index.items(): if word in embedding_dict: embedding_matrix[i] = embedding_dict[word] return embedding_matrix class sentimentBiLSTM(nn.Module): """ The RNN model that will be used to perform Sentiment analysis. """ def __init__(self, embedding_matrix, hidden_dim, output_size): """ Initialize the model by setting up the layers. """ super(sentimentBiLSTM, self).__init__() self.embedding_matrix = embedding_matrix self.hidden_dim = hidden_dim num_words = self.embedding_matrix.shape[0] embed_dim = self.embedding_matrix.shape[1] self.embedding = nn.Embedding(num_embeddings=num_words, embedding_dim=embed_dim) self.embedding.weight = nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float32)) self.embedding.weight.requires_grad = False self.lstm = nn.LSTM(embed_dim, hidden_dim, bidirectional=True, batch_first=True) self.fc = nn.Linear(hidden_dim * 2, output_size) self.sigmoid = nn.Sigmoid() def forward(self, x): batch_size = x.size(0) embeds = self.embedding(x) lstm_out, _ = self.lstm(embeds) lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim * 2) out = self.fc(lstm_out) sig_out = self.sigmoid(out) sig_out = sig_out.view(batch_size, -1) sig_out = sig_out[:, -1] return sig_out from sklearn.model_selection import train_test_split y = df.sentiment.values train_df, test_df = train_test_split(df, test_size=0.2, stratify=y) import tensorflow as tf BATCH_SIZE = 16 print('Fitting tokenizer') tokenizer = tf.keras.preprocessing.text.Tokenizer() tokenizer.fit_on_texts(df.text.values.tolist()) xtrain = tokenizer.texts_to_sequences(train_df.text.values) xtest = tokenizer.texts_to_sequences(test_df.text.values) xtrain = tf.keras.preprocessing.sequence.pad_sequences(xtrain, maxlen=MAX_LEN) xtest = tf.keras.preprocessing.sequence.pad_sequences(xtest, maxlen=MAX_LEN) train_dataset = Dataset(text=xtrain, sentiment=train_df.sentiment.values) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=BATCH_SIZE, drop_last=True) valid_dataset = Dataset(text=xtest, sentiment=test_df.sentiment.values) valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=BATCH_SIZE, drop_last=True)
code
128011911/cell_16
[ "text_html_output_1.png" ]
from nltk.corpus import stopwords from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import string import tensorflow as tf import torch import torch import torch.nn as nn df = pd.read_csv('/kaggle/input/sentiment140/training.1600000.processed.noemoticon.csv', encoding='latin', header=None) df.columns = ['sentiment', 'id', 'date', 'query', 'user_id', 'text'] df = df.drop(['id', 'date', 'query', 'user_id'], axis=1) df.sentiment = df.sentiment.map({0: 0, 4: 1}) stop_words = set(stopwords.words('english')) def data_preprocessing(text): text = text.lower() text = re.sub('<.*?>', '', text) text = ''.join([c for c in text if c not in string.punctuation]) text = [word for word in text.split() if word not in stop_words] text = ' '.join(text) return text df['text'] = df['text'].astype(str).apply(data_preprocessing) MAX_LEN = max([len(x.split()) for x in df['text']]) import torch class Dataset: def __init__(self, text, sentiment): self.text = text self.sentiment = sentiment def __len__(self): return len(self.text) def __getitem__(self, item): text = self.text[item, :] target = self.sentiment[item] return {'text': torch.tensor(text, dtype=torch.long), 'target': torch.tensor(target, dtype=torch.long)} def load_vectors(fname): fin = open(fname) data = {} for line in fin: tokens = line.split() data[tokens[0]] = np.array([float(value) for value in tokens[1:]]) return data def create_embedding_matrix(word_index, embedding_dict): """ This function creates the embedding matrix :param word_index: a dictionary of word: index_value :param embedding_dict: :return a numpy array with embedding vectors for all known words """ embedding_matrix = np.zeros((len(word_index) + 1, 300)) for word, i in word_index.items(): if word in embedding_dict: embedding_matrix[i] = embedding_dict[word] return embedding_matrix class sentimentBiLSTM(nn.Module): """ The RNN model that will be used to perform Sentiment analysis. """ def __init__(self, embedding_matrix, hidden_dim, output_size): """ Initialize the model by setting up the layers. """ super(sentimentBiLSTM, self).__init__() self.embedding_matrix = embedding_matrix self.hidden_dim = hidden_dim num_words = self.embedding_matrix.shape[0] embed_dim = self.embedding_matrix.shape[1] self.embedding = nn.Embedding(num_embeddings=num_words, embedding_dim=embed_dim) self.embedding.weight = nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float32)) self.embedding.weight.requires_grad = False self.lstm = nn.LSTM(embed_dim, hidden_dim, bidirectional=True, batch_first=True) self.fc = nn.Linear(hidden_dim * 2, output_size) self.sigmoid = nn.Sigmoid() def forward(self, x): batch_size = x.size(0) embeds = self.embedding(x) lstm_out, _ = self.lstm(embeds) lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim * 2) out = self.fc(lstm_out) sig_out = self.sigmoid(out) sig_out = sig_out.view(batch_size, -1) sig_out = sig_out[:, -1] return sig_out from sklearn.model_selection import train_test_split y = df.sentiment.values train_df, test_df = train_test_split(df, test_size=0.2, stratify=y) import tensorflow as tf BATCH_SIZE = 16 tokenizer = tf.keras.preprocessing.text.Tokenizer() tokenizer.fit_on_texts(df.text.values.tolist()) xtrain = tokenizer.texts_to_sequences(train_df.text.values) xtest = tokenizer.texts_to_sequences(test_df.text.values) xtrain = tf.keras.preprocessing.sequence.pad_sequences(xtrain, maxlen=MAX_LEN) xtest = tf.keras.preprocessing.sequence.pad_sequences(xtest, maxlen=MAX_LEN) train_dataset = Dataset(text=xtrain, sentiment=train_df.sentiment.values) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=BATCH_SIZE, drop_last=True) valid_dataset = Dataset(text=xtest, sentiment=test_df.sentiment.values) valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=BATCH_SIZE, drop_last=True) embedding_dict = load_vectors('../input/glove-embeddings/glove.6B.300d.txt') embedding_matrix = create_embedding_matrix(tokenizer.word_index, embedding_dict) print('Embeddings loaded')
code
128011911/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/sentiment140/training.1600000.processed.noemoticon.csv', encoding='latin', header=None) df.columns = ['sentiment', 'id', 'date', 'query', 'user_id', 'text'] df = df.drop(['id', 'date', 'query', 'user_id'], axis=1) df.head()
code
128011911/cell_5
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import string df = pd.read_csv('/kaggle/input/sentiment140/training.1600000.processed.noemoticon.csv', encoding='latin', header=None) df.columns = ['sentiment', 'id', 'date', 'query', 'user_id', 'text'] df = df.drop(['id', 'date', 'query', 'user_id'], axis=1) df.sentiment = df.sentiment.map({0: 0, 4: 1}) stop_words = set(stopwords.words('english')) def data_preprocessing(text): text = text.lower() text = re.sub('<.*?>', '', text) text = ''.join([c for c in text if c not in string.punctuation]) text = [word for word in text.split() if word not in stop_words] text = ' '.join(text) return text df['text'] = df['text'].astype(str).apply(data_preprocessing) df.head()
code
73089211/cell_2
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) X_full = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) X_full.describe(include='all')
code
73089211/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
73089211/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) X_full = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) X_full.head()
code
73089211/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) X_full = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) X_full.isna().sum()
code
33115444/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.cluster import KMeans from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV from sklearn.model_selection import GridSearchCV from sklearn.model_selection import GridSearchCV from sklearn.model_selection import cross_validate from sklearn.preprocessing import StandardScaler import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import pandas as pd import os import numpy as np from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import GridSearchCV import os data = pd.read_csv('/kaggle/input/otto-group-product-classification-challenge/train.csv') data.sample(10) X = np.asarray(data.iloc[:, 1:-1].dropna(), dtype=np.float32) Y = np.asarray(data.iloc[:, -1]) from sklearn.preprocessing import StandardScaler X_standard = StandardScaler().fit_transform(X) X_standard from sklearn.model_selection import cross_validate from sklearn.cluster import KMeans km = KMeans(n_clusters=9) cv_results = cross_validate(knn, X_standard, Y, cv=5) cv_results['test_score'] from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier() parameters = {'n_estimators': [50, 100, 200]} clf = GridSearchCV(rfc, parameters, cv=5) clf.fit(X_standard, Y) cv_results = clf.cv_results_ cv_results from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier() parameters = {'n_estimators': [50, 100, 200]} clf = GridSearchCV(rfc, parameters, cv=5) clf.fit(X, Y) cv_results = clf.cv_results_ cv_results
code
33115444/cell_4
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import pandas as pd import os import numpy as np from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import GridSearchCV import os data = pd.read_csv('/kaggle/input/otto-group-product-classification-challenge/train.csv') data.sample(10) X = np.asarray(data.iloc[:, 1:-1].dropna(), dtype=np.float32) Y = np.asarray(data.iloc[:, -1]) np.sum(data.iloc[:, 1:94] > 40)
code
33115444/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.preprocessing import StandardScaler import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import pandas as pd import os import numpy as np from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import GridSearchCV import os data = pd.read_csv('/kaggle/input/otto-group-product-classification-challenge/train.csv') data.sample(10) X = np.asarray(data.iloc[:, 1:-1].dropna(), dtype=np.float32) Y = np.asarray(data.iloc[:, -1]) from sklearn.preprocessing import StandardScaler X_standard = StandardScaler().fit_transform(X) X_standard
code
33115444/cell_2
[ "text_html_output_1.png" ]
import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import pandas as pd import os import numpy as np from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import GridSearchCV import os data = pd.read_csv('/kaggle/input/otto-group-product-classification-challenge/train.csv') data.sample(10) X = np.asarray(data.iloc[:, 1:-1].dropna(), dtype=np.float32) print(X.shape) Y = np.asarray(data.iloc[:, -1]) print(Y, Y.shape, len(np.unique(Y)))
code
33115444/cell_11
[ "text_plain_output_1.png" ]
from sklearn.cluster import KMeans from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV from sklearn.model_selection import GridSearchCV from sklearn.model_selection import GridSearchCV from sklearn.model_selection import cross_validate from sklearn.preprocessing import StandardScaler import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import pandas as pd import os import numpy as np from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import GridSearchCV import os data = pd.read_csv('/kaggle/input/otto-group-product-classification-challenge/train.csv') data.sample(10) X = np.asarray(data.iloc[:, 1:-1].dropna(), dtype=np.float32) Y = np.asarray(data.iloc[:, -1]) np.sum(data.iloc[:, 1:94] > 40) from sklearn.preprocessing import StandardScaler X_standard = StandardScaler().fit_transform(X) X_standard from sklearn.model_selection import cross_validate from sklearn.cluster import KMeans km = KMeans(n_clusters=9) cv_results = cross_validate(knn, X_standard, Y, cv=5) cv_results['test_score'] from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier() parameters = {'n_estimators': [50, 100, 200]} clf = GridSearchCV(rfc, parameters, cv=5) clf.fit(X_standard, Y) cv_results = clf.cv_results_ cv_results from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier() parameters = {'n_estimators': [50, 100, 200]} clf = GridSearchCV(rfc, parameters, cv=5) clf.fit(X, Y) cv_results = clf.cv_results_ cv_results test_data = pd.read_csv('/kaggle/input/otto-group-product-classification-challenge/test.csv') X_test = np.asarray(test_data.iloc[:, 1:].dropna(), dtype=np.float32) rfc = RandomForestClassifier(n_estimators=200) rfc.fit(X, Y) y = rfc.predict(X_test) c1 = np.zeros(X_test.shape[0], dtype=int) c1[y == 'Class_1'] = 1 c2 = np.zeros(X_test.shape[0], dtype=int) c2[y == 'Class_2'] = 1 c3 = np.zeros(X_test.shape[0], dtype=int) c3[y == 'Class_3'] = 1 c4 = np.zeros(X_test.shape[0], dtype=int) c4[y == 'Class_4'] = 1 c5 = np.zeros(X_test.shape[0], dtype=int) c5[y == 'Class_5'] = 1 c6 = np.zeros(X_test.shape[0], dtype=int) c6[y == 'Class_6'] = 1 c7 = np.zeros(X_test.shape[0], dtype=int) c7[y == 'Class_7'] = 1 c8 = np.zeros(X_test.shape[0], dtype=int) c8[y == 'Class_8'] = 1 c9 = np.zeros(X_test.shape[0], dtype=int) c9[y == 'Class_9'] = 1 o = test_data[['id']] o['Class_1'] = pd.Series(c1) o['Class_2'] = pd.Series(c2) o['Class_3'] = pd.Series(c3) o['Class_4'] = pd.Series(c4) o['Class_5'] = pd.Series(c5) o['Class_6'] = pd.Series(c6) o['Class_7'] = pd.Series(c7) o['Class_8'] = pd.Series(c8) o['Class_9'] = pd.Series(c9) o.head() o.to_csv('submit.csv', index=False)
code
33115444/cell_1
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import pandas as pd import os import numpy as np from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import GridSearchCV import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) data = pd.read_csv('/kaggle/input/otto-group-product-classification-challenge/train.csv') data.sample(10)
code
33115444/cell_7
[ "text_plain_output_1.png" ]
from sklearn.cluster import KMeans from sklearn.model_selection import cross_validate from sklearn.preprocessing import StandardScaler import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import pandas as pd import os import numpy as np from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import GridSearchCV import os data = pd.read_csv('/kaggle/input/otto-group-product-classification-challenge/train.csv') data.sample(10) X = np.asarray(data.iloc[:, 1:-1].dropna(), dtype=np.float32) Y = np.asarray(data.iloc[:, -1]) from sklearn.preprocessing import StandardScaler X_standard = StandardScaler().fit_transform(X) X_standard from sklearn.model_selection import cross_validate from sklearn.cluster import KMeans km = KMeans(n_clusters=9) cv_results = cross_validate(knn, X_standard, Y, cv=5) cv_results['test_score']
code
33115444/cell_8
[ "text_plain_output_1.png" ]
from sklearn.cluster import KMeans from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV from sklearn.model_selection import GridSearchCV from sklearn.model_selection import cross_validate from sklearn.preprocessing import StandardScaler import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import pandas as pd import os import numpy as np from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import GridSearchCV import os data = pd.read_csv('/kaggle/input/otto-group-product-classification-challenge/train.csv') data.sample(10) X = np.asarray(data.iloc[:, 1:-1].dropna(), dtype=np.float32) Y = np.asarray(data.iloc[:, -1]) from sklearn.preprocessing import StandardScaler X_standard = StandardScaler().fit_transform(X) X_standard from sklearn.model_selection import cross_validate from sklearn.cluster import KMeans km = KMeans(n_clusters=9) cv_results = cross_validate(knn, X_standard, Y, cv=5) cv_results['test_score'] from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestClassifier rfc = RandomForestClassifier() parameters = {'n_estimators': [50, 100, 200]} clf = GridSearchCV(rfc, parameters, cv=5) clf.fit(X_standard, Y) cv_results = clf.cv_results_ cv_results
code
33115444/cell_3
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import pandas as pd import os import numpy as np from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import GridSearchCV import os data = pd.read_csv('/kaggle/input/otto-group-product-classification-challenge/train.csv') data.sample(10) X = np.asarray(data.iloc[:, 1:-1].dropna(), dtype=np.float32) Y = np.asarray(data.iloc[:, -1]) data.describe()
code
33115444/cell_5
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import pandas as pd import os import numpy as np from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import GridSearchCV import os data = pd.read_csv('/kaggle/input/otto-group-product-classification-challenge/train.csv') data.sample(10) X = np.asarray(data.iloc[:, 1:-1].dropna(), dtype=np.float32) Y = np.asarray(data.iloc[:, -1]) np.sum(data.iloc[:, 1:94] > 40) data.isnull().sum()
code
323155/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sklearn.linear_model as sk import matplotlib.pyplot as plt import numpy as np import pandas as pd import sklearn.linear_model as sk from sklearn import preprocessing full_data_set = pd.read_csv('../input/nflplaybyplay2015.csv', low_memory=False) Pass_Plays = full_data_set.loc[full_data_set.PlayType == 'Pass'] Sack_Plays = full_data_set.loc[full_data_set.PlayType == 'Sack'] P_S_data = pd.concat([Pass_Plays, Sack_Plays]) good_columns = ['Drive', 'qtr', 'down', 'TimeUnder', 'TimeSecs', 'PlayTimeDiff', 'yrdline100', 'ydstogo'] good_columns += ['ScoreDiff', 'PosTeamScore', 'DefTeamScore'] good_columns += ['Sack'] uncleaned_data = P_S_data[good_columns] uncleaned_data.qtr.unique() def quarter_binary(df, name, number): df[name] = np.where(df['qtr'] == number, 1, 0) return df for x in [['qt1', 1], ['qt2', 2], ['qt3', 3], ['qt4', 4], ['qt5', 5]]: uncleaned_data = quarter_binary(uncleaned_data, x[0], x[1]) del uncleaned_data['qtr'] logreg = sk.LogisticRegressionCV() logreg.fit(X_all, y_all) coef_array = np.abs(logreg.coef_) x = np.arange(1, coef_array.shape[1] + 1, 1) plt.scatter(x, coef_array, marker='x', color='r') plt.axhline(0, color='b')
code
323155/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import numpy as np import pandas as pd import sklearn.linear_model as sk from sklearn import preprocessing full_data_set = pd.read_csv('../input/nflplaybyplay2015.csv', low_memory=False) Pass_Plays = full_data_set.loc[full_data_set.PlayType == 'Pass'] Sack_Plays = full_data_set.loc[full_data_set.PlayType == 'Sack'] P_S_data = pd.concat([Pass_Plays, Sack_Plays]) good_columns = ['Drive', 'qtr', 'down', 'TimeUnder', 'TimeSecs', 'PlayTimeDiff', 'yrdline100', 'ydstogo'] good_columns += ['ScoreDiff', 'PosTeamScore', 'DefTeamScore'] good_columns += ['Sack'] uncleaned_data = P_S_data[good_columns] uncleaned_data.qtr.unique()
code
323155/cell_7
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import numpy as np import pandas as pd import sklearn.linear_model as sk from sklearn import preprocessing full_data_set = pd.read_csv('../input/nflplaybyplay2015.csv', low_memory=False) Pass_Plays = full_data_set.loc[full_data_set.PlayType == 'Pass'] Sack_Plays = full_data_set.loc[full_data_set.PlayType == 'Sack'] P_S_data = pd.concat([Pass_Plays, Sack_Plays]) good_columns = ['Drive', 'qtr', 'down', 'TimeUnder', 'TimeSecs', 'PlayTimeDiff', 'yrdline100', 'ydstogo'] good_columns += ['ScoreDiff', 'PosTeamScore', 'DefTeamScore'] good_columns += ['Sack'] uncleaned_data = P_S_data[good_columns] uncleaned_data.qtr.unique() def quarter_binary(df, name, number): df[name] = np.where(df['qtr'] == number, 1, 0) return df for x in [['qt1', 1], ['qt2', 2], ['qt3', 3], ['qt4', 4], ['qt5', 5]]: uncleaned_data = quarter_binary(uncleaned_data, x[0], x[1]) del uncleaned_data['qtr']
code
323155/cell_14
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import numpy as np import pandas as pd import sklearn.linear_model as sk from sklearn import preprocessing full_data_set = pd.read_csv('../input/nflplaybyplay2015.csv', low_memory=False) Pass_Plays = full_data_set.loc[full_data_set.PlayType == 'Pass'] Sack_Plays = full_data_set.loc[full_data_set.PlayType == 'Sack'] P_S_data = pd.concat([Pass_Plays, Sack_Plays]) good_columns = ['Drive', 'qtr', 'down', 'TimeUnder', 'TimeSecs', 'PlayTimeDiff', 'yrdline100', 'ydstogo'] good_columns += ['ScoreDiff', 'PosTeamScore', 'DefTeamScore'] good_columns += ['Sack'] uncleaned_data = P_S_data[good_columns] uncleaned_data.qtr.unique() def quarter_binary(df, name, number): df[name] = np.where(df['qtr'] == number, 1, 0) return df for x in [['qt1', 1], ['qt2', 2], ['qt3', 3], ['qt4', 4], ['qt5', 5]]: uncleaned_data = quarter_binary(uncleaned_data, x[0], x[1]) del uncleaned_data['qtr'] cleaned_data = uncleaned_data.dropna() explanatory_variables = cleaned_data.columns explanatory_variables[1]
code
89129128/cell_9
[ "image_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.model_selection import GridSearchCV from sklearn.pipeline import Pipeline from sklearn.svm import SVC pipeline = Pipeline(steps=[('vect', CountVectorizer()), ('cls', SVC())]) parameters = {'cls__C': (0.001, 0.01, 1, 10), 'cls__kernel': ['linear', 'poly', 'rbf'], 'cls__degree': (2, 3, 4), 'cls__gamma': [0.0001, 0.001, 0.01, 0.1, 1]} grid_search = GridSearchCV(estimator=pipeline, param_grid=parameters, scoring='roc_auc', verbose=1, n_jobs=-1, cv=10) grid_search.fit(X_train, y_train) grid_search.best_params_
code
89129128/cell_4
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/dementiapredictionnlp/Data.csv', sep=';') len(df)
code
89129128/cell_20
[ "text_html_output_1.png" ]
from sklearn import metrics from sklearn import metrics from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.model_selection import GridSearchCV from sklearn.pipeline import Pipeline from sklearn.svm import SVC import matplotlib.pyplot as plt import numpy as np import pandas as pd df = pd.read_csv('/kaggle/input/dementiapredictionnlp/Data.csv', sep=';') pipeline = Pipeline(steps=[('vect', CountVectorizer()), ('cls', SVC())]) parameters = {'cls__C': (0.001, 0.01, 1, 10), 'cls__kernel': ['linear', 'poly', 'rbf'], 'cls__degree': (2, 3, 4), 'cls__gamma': [0.0001, 0.001, 0.01, 0.1, 1]} grid_search = GridSearchCV(estimator=pipeline, param_grid=parameters, scoring='roc_auc', verbose=1, n_jobs=-1, cv=10) grid_search.fit(X_train, y_train) grid_search.best_params_ best_svm = grid_search.best_estimator_ from sklearn.metrics import accuracy_score predictions = best_svm.predict(X_test) confusion_matrix_model = metrics.confusion_matrix(y_test, predictions) target = ['Control', 'Dementia'] confusion_matrix_df = pd.DataFrame(confusion_matrix_model, index=target, columns=target) confusion_matrix_df.columns.name = 'Predicted' confusion_matrix_df.index.name = 'Real' confusion_matrix_df pipeline_other = Pipeline(steps=[('vect', CountVectorizer()), ('cls', SVC())]) parameters_other = {'cls__C': [0.01], 'cls__kernel': ['linear'], 'cls__probability': [True], 'cls__class_weight': ['balanced']} grid_search_other = GridSearchCV(estimator=pipeline_other, param_grid=parameters_other, scoring='roc_auc', verbose=1, n_jobs=-1, cv=10) grid_search_other.fit(X_train_other, y_train_other) best_svm_app = grid_search_other.best_estimator_ predictions_other = best_svm_app.predict(X_test_other) confusion_matrix_model_other = metrics.confusion_matrix(y_test_other, predictions_other) target_other = ['Control', 'Dementia'] confusion_matrix_df_other = pd.DataFrame(confusion_matrix_model_other, index=target_other, columns=target_other) confusion_matrix_df_other.columns.name = 'Predicted' confusion_matrix_df_other.index.name = 'Real' confusion_matrix_df_other from sklearn import metrics fpr_lr, tpr_lr, thresholds_lr = metrics.roc_curve(y_test, predictions) auc_lr = metrics.roc_auc_score(y_test, predictions) fpr_svm, tpr_svm, thresholds_svm = metrics.roc_curve(y_test_other, predictions_other) auc_svm = metrics.roc_auc_score(y_test_other, predictions_other) import matplotlib.pyplot as plt fig, ax = plt.subplots(figsize=(7, 7)) plt.xticks(np.arange(0, 1, 0.1)) plt.yticks(np.arange(0, 1, 0.1)) plt.plot([0, 1], [0, 1], linestyle='--') ax.plot(fpr_lr, tpr_lr, marker='.', color='b', label='LinearSVC_one, AUC = ' + str(round(auc_lr, 4))) ax.plot(fpr_svm, tpr_svm, marker='o', color='r', label='LinearSVC, AUC = ' + str(round(auc_svm, 4))) plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') plt.title('ROC Curve') plt.grid(True) legend = ax.legend(loc='lower right', shadow=True, fontsize='medium')
code
89129128/cell_11
[ "text_html_output_1.png" ]
from sklearn import metrics from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.model_selection import GridSearchCV from sklearn.pipeline import Pipeline from sklearn.svm import SVC import pandas as pd df = pd.read_csv('/kaggle/input/dementiapredictionnlp/Data.csv', sep=';') pipeline = Pipeline(steps=[('vect', CountVectorizer()), ('cls', SVC())]) parameters = {'cls__C': (0.001, 0.01, 1, 10), 'cls__kernel': ['linear', 'poly', 'rbf'], 'cls__degree': (2, 3, 4), 'cls__gamma': [0.0001, 0.001, 0.01, 0.1, 1]} grid_search = GridSearchCV(estimator=pipeline, param_grid=parameters, scoring='roc_auc', verbose=1, n_jobs=-1, cv=10) grid_search.fit(X_train, y_train) grid_search.best_params_ best_svm = grid_search.best_estimator_ from sklearn.metrics import accuracy_score predictions = best_svm.predict(X_test) confusion_matrix_model = metrics.confusion_matrix(y_test, predictions) target = ['Control', 'Dementia'] confusion_matrix_df = pd.DataFrame(confusion_matrix_model, index=target, columns=target) confusion_matrix_df.columns.name = 'Predicted' confusion_matrix_df.index.name = 'Real' confusion_matrix_df
code
89129128/cell_1
[ "text_plain_output_1.png" ]
import os import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
89129128/cell_18
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.model_selection import GridSearchCV from sklearn.pipeline import Pipeline from sklearn.svm import SVC import pandas as pd df = pd.read_csv('/kaggle/input/dementiapredictionnlp/Data.csv', sep=';') pipeline = Pipeline(steps=[('vect', CountVectorizer()), ('cls', SVC())]) parameters = {'cls__C': (0.001, 0.01, 1, 10), 'cls__kernel': ['linear', 'poly', 'rbf'], 'cls__degree': (2, 3, 4), 'cls__gamma': [0.0001, 0.001, 0.01, 0.1, 1]} grid_search = GridSearchCV(estimator=pipeline, param_grid=parameters, scoring='roc_auc', verbose=1, n_jobs=-1, cv=10) grid_search.fit(X_train, y_train) grid_search.best_params_ best_svm = grid_search.best_estimator_ from sklearn.metrics import accuracy_score predictions = best_svm.predict(X_test) confusion_matrix_model = metrics.confusion_matrix(y_test, predictions) target = ['Control', 'Dementia'] confusion_matrix_df = pd.DataFrame(confusion_matrix_model, index=target, columns=target) confusion_matrix_df.columns.name = 'Predicted' confusion_matrix_df.index.name = 'Real' confusion_matrix_df pipeline_other = Pipeline(steps=[('vect', CountVectorizer()), ('cls', SVC())]) parameters_other = {'cls__C': [0.01], 'cls__kernel': ['linear'], 'cls__probability': [True], 'cls__class_weight': ['balanced']} grid_search_other = GridSearchCV(estimator=pipeline_other, param_grid=parameters_other, scoring='roc_auc', verbose=1, n_jobs=-1, cv=10) grid_search_other.fit(X_train_other, y_train_other) best_svm_app = grid_search_other.best_estimator_ predictions_other = best_svm_app.predict(X_test_other) confusion_matrix_model_other = metrics.confusion_matrix(y_test_other, predictions_other) target_other = ['Control', 'Dementia'] confusion_matrix_df_other = pd.DataFrame(confusion_matrix_model_other, index=target_other, columns=target_other) confusion_matrix_df_other.columns.name = 'Predicted' confusion_matrix_df_other.index.name = 'Real' confusion_matrix_df_other print('accuracy: ', metrics.accuracy_score(y_test_other, predictions_other)) print('precision: ', metrics.precision_score(y_test_other, predictions_other)) print('recall: ', metrics.recall_score(y_test_other, predictions_other)) print('f1: ', metrics.f1_score(y_test_other, predictions_other))
code
89129128/cell_8
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.model_selection import GridSearchCV from sklearn.pipeline import Pipeline from sklearn.svm import SVC pipeline = Pipeline(steps=[('vect', CountVectorizer()), ('cls', SVC())]) parameters = {'cls__C': (0.001, 0.01, 1, 10), 'cls__kernel': ['linear', 'poly', 'rbf'], 'cls__degree': (2, 3, 4), 'cls__gamma': [0.0001, 0.001, 0.01, 0.1, 1]} grid_search = GridSearchCV(estimator=pipeline, param_grid=parameters, scoring='roc_auc', verbose=1, n_jobs=-1, cv=10) grid_search.fit(X_train, y_train)
code
89129128/cell_15
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.model_selection import GridSearchCV from sklearn.pipeline import Pipeline from sklearn.svm import SVC pipeline_other = Pipeline(steps=[('vect', CountVectorizer()), ('cls', SVC())]) parameters_other = {'cls__C': [0.01], 'cls__kernel': ['linear'], 'cls__probability': [True], 'cls__class_weight': ['balanced']} grid_search_other = GridSearchCV(estimator=pipeline_other, param_grid=parameters_other, scoring='roc_auc', verbose=1, n_jobs=-1, cv=10) grid_search_other.fit(X_train_other, y_train_other)
code
89129128/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/dementiapredictionnlp/Data.csv', sep=';') df.head()
code
89129128/cell_17
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.model_selection import GridSearchCV from sklearn.pipeline import Pipeline from sklearn.svm import SVC import pandas as pd df = pd.read_csv('/kaggle/input/dementiapredictionnlp/Data.csv', sep=';') pipeline = Pipeline(steps=[('vect', CountVectorizer()), ('cls', SVC())]) parameters = {'cls__C': (0.001, 0.01, 1, 10), 'cls__kernel': ['linear', 'poly', 'rbf'], 'cls__degree': (2, 3, 4), 'cls__gamma': [0.0001, 0.001, 0.01, 0.1, 1]} grid_search = GridSearchCV(estimator=pipeline, param_grid=parameters, scoring='roc_auc', verbose=1, n_jobs=-1, cv=10) grid_search.fit(X_train, y_train) grid_search.best_params_ best_svm = grid_search.best_estimator_ from sklearn.metrics import accuracy_score predictions = best_svm.predict(X_test) confusion_matrix_model = metrics.confusion_matrix(y_test, predictions) target = ['Control', 'Dementia'] confusion_matrix_df = pd.DataFrame(confusion_matrix_model, index=target, columns=target) confusion_matrix_df.columns.name = 'Predicted' confusion_matrix_df.index.name = 'Real' confusion_matrix_df pipeline_other = Pipeline(steps=[('vect', CountVectorizer()), ('cls', SVC())]) parameters_other = {'cls__C': [0.01], 'cls__kernel': ['linear'], 'cls__probability': [True], 'cls__class_weight': ['balanced']} grid_search_other = GridSearchCV(estimator=pipeline_other, param_grid=parameters_other, scoring='roc_auc', verbose=1, n_jobs=-1, cv=10) grid_search_other.fit(X_train_other, y_train_other) best_svm_app = grid_search_other.best_estimator_ predictions_other = best_svm_app.predict(X_test_other) confusion_matrix_model_other = metrics.confusion_matrix(y_test_other, predictions_other) target_other = ['Control', 'Dementia'] confusion_matrix_df_other = pd.DataFrame(confusion_matrix_model_other, index=target_other, columns=target_other) confusion_matrix_df_other.columns.name = 'Predicted' confusion_matrix_df_other.index.name = 'Real' confusion_matrix_df_other
code
89129128/cell_12
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.model_selection import GridSearchCV from sklearn.pipeline import Pipeline from sklearn.svm import SVC import pandas as pd df = pd.read_csv('/kaggle/input/dementiapredictionnlp/Data.csv', sep=';') pipeline = Pipeline(steps=[('vect', CountVectorizer()), ('cls', SVC())]) parameters = {'cls__C': (0.001, 0.01, 1, 10), 'cls__kernel': ['linear', 'poly', 'rbf'], 'cls__degree': (2, 3, 4), 'cls__gamma': [0.0001, 0.001, 0.01, 0.1, 1]} grid_search = GridSearchCV(estimator=pipeline, param_grid=parameters, scoring='roc_auc', verbose=1, n_jobs=-1, cv=10) grid_search.fit(X_train, y_train) grid_search.best_params_ best_svm = grid_search.best_estimator_ from sklearn.metrics import accuracy_score predictions = best_svm.predict(X_test) confusion_matrix_model = metrics.confusion_matrix(y_test, predictions) target = ['Control', 'Dementia'] confusion_matrix_df = pd.DataFrame(confusion_matrix_model, index=target, columns=target) confusion_matrix_df.columns.name = 'Predicted' confusion_matrix_df.index.name = 'Real' confusion_matrix_df print('accuracy: ', metrics.accuracy_score(y_test, predictions)) print('precision: ', metrics.precision_score(y_test, predictions)) print('recall: ', metrics.recall_score(y_test, predictions)) print('f1: ', metrics.f1_score(y_test, predictions))
code
89129128/cell_5
[ "text_html_output_1.png" ]
import pandas as pd import pandas_profiling as pp df = pd.read_csv('/kaggle/input/dementiapredictionnlp/Data.csv', sep=';') profile = pp.ProfileReport(df) profile
code
2036115/cell_4
[ "text_plain_output_1.png" ]
from gensim import corpora from gensim import corpora, models, similarities from nltk.corpus import stopwords from string import punctuation from subprocess import check_output import os import pandas as pd import tempfile import os import pandas as pd from gensim import corpora, models, similarities from subprocess import check_output import gensim import logging import tempfile TEMP_FOLDER = tempfile.gettempdir() from gensim import corpora from nltk.corpus import stopwords from string import punctuation df = pd.DataFrame() df = pd.read_csv('../input/winemag-data-130k-v2.csv') df.shape corpus = [] a = [] for i in range(len(df['description'])): a = df['description'][i] corpus.append(a) corpus[0:2] stoplist = stopwords.words('english') + list(punctuation) texts = [[word for word in str(document).lower().split() if word not in stoplist] for document in corpus] dictionary = corpora.Dictionary(texts) dictionary.save(os.path.join(TEMP_FOLDER, 'wine.dict')) corpus = [dictionary.doc2bow(text) for text in texts] corpora.MmCorpus.serialize(os.path.join(TEMP_FOLDER, 'wine.mm'), corpus) tfidf = models.TfidfModel(corpus) corpus_tfidf = tfidf[corpus] total_topics = 20 lda = models.LdaModel(corpus, id2word=dictionary, num_topics=total_topics) corpus_lda = lda[corpus_tfidf] lda.show_topics(total_topics, 5)
code
2036115/cell_6
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
from collections import OrderedDict from gensim import corpora from gensim import corpora, models, similarities from nltk.corpus import stopwords from string import punctuation from subprocess import check_output import os import pandas as pd import tempfile import os import pandas as pd from gensim import corpora, models, similarities from subprocess import check_output import gensim import logging import tempfile TEMP_FOLDER = tempfile.gettempdir() from gensim import corpora from nltk.corpus import stopwords from string import punctuation df = pd.DataFrame() df = pd.read_csv('../input/winemag-data-130k-v2.csv') df.shape corpus = [] a = [] for i in range(len(df['description'])): a = df['description'][i] corpus.append(a) corpus[0:2] stoplist = stopwords.words('english') + list(punctuation) texts = [[word for word in str(document).lower().split() if word not in stoplist] for document in corpus] dictionary = corpora.Dictionary(texts) dictionary.save(os.path.join(TEMP_FOLDER, 'wine.dict')) corpus = [dictionary.doc2bow(text) for text in texts] corpora.MmCorpus.serialize(os.path.join(TEMP_FOLDER, 'wine.mm'), corpus) tfidf = models.TfidfModel(corpus) corpus_tfidf = tfidf[corpus] total_topics = 20 lda = models.LdaModel(corpus, id2word=dictionary, num_topics=total_topics) corpus_lda = lda[corpus_tfidf] from collections import OrderedDict data_lda = {i: OrderedDict() for i in range(total_topics)} df_lda = pd.DataFrame(data_lda) df_lda = df_lda.fillna(0).T import pyLDAvis.gensim pyLDAvis.enable_notebook() panel = pyLDAvis.gensim.prepare(lda, corpus_lda, dictionary, mds='tsne') panel
code
2036115/cell_2
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from subprocess import check_output import pandas as pd import tempfile import os import pandas as pd from gensim import corpora, models, similarities from subprocess import check_output import gensim import logging import tempfile TEMP_FOLDER = tempfile.gettempdir() print('Folder "{}" will be used to save temporary dictionary and corpus.'.format(TEMP_FOLDER)) from gensim import corpora from nltk.corpus import stopwords from string import punctuation print(check_output(['ls', '../input']).decode('utf8')) df = pd.DataFrame() df = pd.read_csv('../input/winemag-data-130k-v2.csv') df.shape
code
2036115/cell_3
[ "text_plain_output_1.png" ]
from subprocess import check_output import pandas as pd import tempfile import os import pandas as pd from gensim import corpora, models, similarities from subprocess import check_output import gensim import logging import tempfile TEMP_FOLDER = tempfile.gettempdir() from gensim import corpora from nltk.corpus import stopwords from string import punctuation df = pd.DataFrame() df = pd.read_csv('../input/winemag-data-130k-v2.csv') df.shape corpus = [] a = [] for i in range(len(df['description'])): a = df['description'][i] corpus.append(a) corpus[0:2]
code
2036115/cell_5
[ "text_plain_output_1.png" ]
from collections import OrderedDict from gensim import corpora from gensim import corpora, models, similarities from nltk.corpus import stopwords from string import punctuation from subprocess import check_output import os import pandas as pd import tempfile import os import pandas as pd from gensim import corpora, models, similarities from subprocess import check_output import gensim import logging import tempfile TEMP_FOLDER = tempfile.gettempdir() from gensim import corpora from nltk.corpus import stopwords from string import punctuation df = pd.DataFrame() df = pd.read_csv('../input/winemag-data-130k-v2.csv') df.shape corpus = [] a = [] for i in range(len(df['description'])): a = df['description'][i] corpus.append(a) corpus[0:2] stoplist = stopwords.words('english') + list(punctuation) texts = [[word for word in str(document).lower().split() if word not in stoplist] for document in corpus] dictionary = corpora.Dictionary(texts) dictionary.save(os.path.join(TEMP_FOLDER, 'wine.dict')) corpus = [dictionary.doc2bow(text) for text in texts] corpora.MmCorpus.serialize(os.path.join(TEMP_FOLDER, 'wine.mm'), corpus) tfidf = models.TfidfModel(corpus) corpus_tfidf = tfidf[corpus] total_topics = 20 lda = models.LdaModel(corpus, id2word=dictionary, num_topics=total_topics) corpus_lda = lda[corpus_tfidf] from collections import OrderedDict data_lda = {i: OrderedDict(lda.show_topic(i, 25)) for i in range(total_topics)} df_lda = pd.DataFrame(data_lda) print(df_lda.shape) df_lda = df_lda.fillna(0).T print(df_lda.shape)
code
74046505/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/used-bikes-prices-in-india/Used_Bikes.csv') df df.shape categoric_cols = [categoric for categoric in df.columns if df[categoric].dtype in ['object']] categoric_cols numeric_cols = [numeric for numeric in df.columns if df[numeric].dtype in ['float64']] numeric_cols df.brand.unique() df.bike_name.unique() df.city.nunique() df_missing = df.isna().sum() df.describe()
code
74046505/cell_9
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/used-bikes-prices-in-india/Used_Bikes.csv') df df.shape categoric_cols = [categoric for categoric in df.columns if df[categoric].dtype in ['object']] categoric_cols numeric_cols = [numeric for numeric in df.columns if df[numeric].dtype in ['float64']] numeric_cols df.brand.unique() df.bike_name.unique()
code
74046505/cell_25
[ "text_plain_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.ensemble import RandomForestRegressor from sklearn.pipeline import Pipeline from sklearn.preprocessing import MinMaxScaler, StandardScaler, OneHotEncoder, LabelEncoder numerical_pipeline = Pipeline([('scaling', MinMaxScaler())]) categoric_second_pipeline = Pipeline([('label', OneHotEncoder(sparse=False))]) preprocessing = ColumnTransformer([('numeric', numerical_pipeline, ['kms_driven', 'age', 'power']), ('cat_second', categoric_second_pipeline, ['owner', 'bike_name', 'city', 'brand'])]) pipeline = Pipeline([('algo', preprocessing), ('model', RandomForestRegressor(random_state=42))]) pipeline.fit(X_train, y_train) pipeline.get_params()
code
74046505/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/used-bikes-prices-in-india/Used_Bikes.csv') df df.shape sns.countplot(df.dtypes.map(str))
code
74046505/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/used-bikes-prices-in-india/Used_Bikes.csv') df df.shape categoric_cols = [categoric for categoric in df.columns if df[categoric].dtype in ['object']] categoric_cols numeric_cols = [numeric for numeric in df.columns if df[numeric].dtype in ['float64']] numeric_cols
code
74046505/cell_2
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/used-bikes-prices-in-india/Used_Bikes.csv') df
code
74046505/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/used-bikes-prices-in-india/Used_Bikes.csv') df df.shape categoric_cols = [categoric for categoric in df.columns if df[categoric].dtype in ['object']] categoric_cols numeric_cols = [numeric for numeric in df.columns if df[numeric].dtype in ['float64']] numeric_cols df.brand.unique() df.bike_name.unique() df.city.nunique()
code
74046505/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/used-bikes-prices-in-india/Used_Bikes.csv') df df.shape categoric_cols = [categoric for categoric in df.columns if df[categoric].dtype in ['object']] categoric_cols numeric_cols = [numeric for numeric in df.columns if df[numeric].dtype in ['float64']] numeric_cols sns.countplot(df.owner)
code
74046505/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/used-bikes-prices-in-india/Used_Bikes.csv') df df.shape categoric_cols = [categoric for categoric in df.columns if df[categoric].dtype in ['object']] categoric_cols numeric_cols = [numeric for numeric in df.columns if df[numeric].dtype in ['float64']] numeric_cols df.brand.unique()
code
74046505/cell_15
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/used-bikes-prices-in-india/Used_Bikes.csv') df df.shape categoric_cols = [categoric for categoric in df.columns if df[categoric].dtype in ['object']] categoric_cols numeric_cols = [numeric for numeric in df.columns if df[numeric].dtype in ['float64']] numeric_cols df.brand.unique() df.bike_name.unique() df.city.nunique() df_missing = df.isna().sum() sns.regplot(x='kms_driven', y='price', data=df)
code
74046505/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/used-bikes-prices-in-india/Used_Bikes.csv') df df.shape categoric_cols = [categoric for categoric in df.columns if df[categoric].dtype in ['object']] categoric_cols numeric_cols = [numeric for numeric in df.columns if df[numeric].dtype in ['float64']] numeric_cols df.brand.unique() df.bike_name.unique() df.city.nunique() df_missing = df.isna().sum() sns.regplot(x='age', y='price', data=df)
code
74046505/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/used-bikes-prices-in-india/Used_Bikes.csv') df df.shape
code
74046505/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/used-bikes-prices-in-india/Used_Bikes.csv') df df.shape categoric_cols = [categoric for categoric in df.columns if df[categoric].dtype in ['object']] categoric_cols numeric_cols = [numeric for numeric in df.columns if df[numeric].dtype in ['float64']] numeric_cols df.brand.unique() df.bike_name.unique() df.city.nunique() df_missing = df.isna().sum() sns.regplot(x='power', y='price', data=df)
code
74046505/cell_24
[ "text_plain_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.ensemble import RandomForestRegressor from sklearn.pipeline import Pipeline from sklearn.preprocessing import MinMaxScaler, StandardScaler, OneHotEncoder, LabelEncoder numerical_pipeline = Pipeline([('scaling', MinMaxScaler())]) categoric_second_pipeline = Pipeline([('label', OneHotEncoder(sparse=False))]) preprocessing = ColumnTransformer([('numeric', numerical_pipeline, ['kms_driven', 'age', 'power']), ('cat_second', categoric_second_pipeline, ['owner', 'bike_name', 'city', 'brand'])]) pipeline = Pipeline([('algo', preprocessing), ('model', RandomForestRegressor(random_state=42))]) pipeline.fit(X_train, y_train)
code