path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
17118879/cell_28
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pathlib import random import seaborn as sns import tensorflow as tf train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') sample_sub_df = pd.read_csv('../input/train.csv') train_images_path = '../input/train_images' test_images_path = '../input/test_images' train_df[train_df.id_code == '5d024177e214'] classes_dist = pd.DataFrame(train_df['diagnosis'].value_counts()/train_df.shape[0]).reset_index() # barplot ax = sns.barplot(x="index", y="diagnosis", data=classes_dist) # Imbalanced dataset with 49% - no DR, 8% proliferative - i.e most severe DR # Model Building - Need to do oversampling for minority classes root_path = pathlib.Path(train_images_path) for item in root_path.iterdir(): break all_paths = list(root_path.glob('*.png')) all_paths[0] all_paths = [str(path) for path in all_paths] random.shuffle(all_paths) img = tf.read_file(all_paths) img def preprocess_image(image): img_tensor = tf.image.decode_png(image, channels=3) img_tensor = tf.cast(img_tensor, tf.float32) img_tensor /= 255.0 return img_tensor def load_and_preprocess_image(path): image = tf.read_file(path) return preprocess_image(image) train_df.columns train_df['image_path'] = '../input/train_images/' + train_df['id_code'] np.array(train_df['diagnosis']) labels = tf.convert_to_tensor(np.array(train_df['diagnosis']), dtype=tf.int32) paths = tf.convert_to_tensor(np.array(train_df['image_path']), dtype=tf.string) image, label = tf.train.slice_input_producer([paths, labels], shuffle=True) path_ds = tf.data.Dataset.from_tensor_slices(train_df['image_path']) AUTOTUNE = tf.data.experimental.AUTOTUNE image_ds = path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE) image_ds.take(1)
code
17118879/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') sample_sub_df = pd.read_csv('../input/train.csv') train_df.head(4)
code
17118879/cell_15
[ "text_plain_output_1.png" ]
import pathlib train_images_path = '../input/train_images' test_images_path = '../input/test_images' root_path = pathlib.Path(train_images_path) for item in root_path.iterdir(): break all_paths = list(root_path.glob('*.png')) all_paths[0]
code
17118879/cell_3
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os os.getcwd() os.listdir()
code
17118879/cell_24
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') sample_sub_df = pd.read_csv('../input/train.csv') train_df[train_df.id_code == '5d024177e214'] classes_dist = pd.DataFrame(train_df['diagnosis'].value_counts()/train_df.shape[0]).reset_index() # barplot ax = sns.barplot(x="index", y="diagnosis", data=classes_dist) # Imbalanced dataset with 49% - no DR, 8% proliferative - i.e most severe DR # Model Building - Need to do oversampling for minority classes train_df.columns train_df['image_path'] = '../input/train_images/' + train_df['id_code'] np.array(train_df['diagnosis'])
code
17118879/cell_14
[ "text_html_output_1.png" ]
import pathlib train_images_path = '../input/train_images' test_images_path = '../input/test_images' root_path = pathlib.Path(train_images_path) for item in root_path.iterdir(): print(item) break
code
17118879/cell_22
[ "text_plain_output_1.png" ]
import pathlib import random import tensorflow as tf train_images_path = '../input/train_images' test_images_path = '../input/test_images' root_path = pathlib.Path(train_images_path) for item in root_path.iterdir(): break all_paths = list(root_path.glob('*.png')) all_paths[0] all_paths = [str(path) for path in all_paths] random.shuffle(all_paths) img = tf.read_file(all_paths) img def preprocess_image(image): img_tensor = tf.image.decode_png(image, channels=3) img_tensor = tf.cast(img_tensor, tf.float32) img_tensor /= 255.0 return img_tensor def load_and_preprocess_image(path): image = tf.read_file(path) return preprocess_image(image) print(img_tensor)
code
17118879/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') sample_sub_df = pd.read_csv('../input/train.csv') train_df[train_df.id_code == '5d024177e214']
code
17118879/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') sample_sub_df = pd.read_csv('../input/train.csv') test_df.info()
code
128029150/cell_21
[ "text_plain_output_1.png" ]
from pandas import DataFrame from sklearn import neighbors from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer from sklearn.metrics import classification_report from sklearn.metrics import mean_squared_error import numpy as np import pandas as pd import re data_meta = pd.read_csv('/kaggle/input/content-based/meta_full_csv') data_meta = data_meta.sample(50000) data_meta['summaryRev'] = data_meta['summary'] + ' ' + data_meta['reviewText'] data_meta.query("asin == 'B00GIDADP0'") data_meta.query("asin == 'B002Q46RDW'") data_meta_1 = data_meta[['asin', 'overall', 'unixReviewTime']] data_meta_1 data_meta_2 = data_meta[['asin', 'overall', 'summaryRev']] data_meta_2 regEx = re.compile('[^a-z]+') def cleanReviews(reviewText): reviewText = reviewText.lower() reviewText = regEx.sub(' ', reviewText).strip() return reviewText data_meta_2['summaryClean'] = data_meta_2['summaryRev'].apply(cleanReviews) data_meta_2 = data_meta_2.reset_index() reviews = data_meta_2['summaryClean'] countVector = CountVectorizer(max_features=300, stop_words='english') transformedReviews = countVector.fit_transform(reviews) dfReviews = DataFrame(transformedReviews.A, columns=countVector.get_feature_names()) dfReviews = dfReviews.astype(int) X = np.array(dfReviews) tpercent = 0.9 tsize = int(np.floor(tpercent * len(dfReviews))) dfReviews_train = X[:tsize] dfReviews_test = X[tsize:] lentrain = len(dfReviews_train) lentest = len(dfReviews_test) df5_train_target = data_meta_2['overall'][:lentrain] df5_test_target = data_meta_2['overall'][lentrain:lentrain + lentest] df5_train_target = df5_train_target.astype(int) df5_test_target = df5_test_target.astype(int) n_neighbors = 3 knnclf = neighbors.KNeighborsClassifier(n_neighbors, weights='distance') knnclf.fit(dfReviews_train, df5_train_target) knnpreds_test = knnclf.predict(dfReviews_test) df5_train_target = data_meta_2['overall'][:lentrain] df5_test_target = data_meta_2['overall'][lentrain:lentrain + lentest] df5_train_target = df5_train_target.astype(int) df5_test_target = df5_test_target.astype(int) n_neighbors = 28 knnclf = neighbors.KNeighborsClassifier(n_neighbors, weights='distance') knnclf.fit(dfReviews_train, df5_train_target) knnpreds_test = knnclf.predict(dfReviews_test) print(mean_squared_error(df5_test_target, knnpreds_test))
code
128029150/cell_13
[ "text_html_output_1.png" ]
from pandas import DataFrame from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer import numpy as np import pandas as pd import re data_meta = pd.read_csv('/kaggle/input/content-based/meta_full_csv') data_meta = data_meta.sample(50000) data_meta['summaryRev'] = data_meta['summary'] + ' ' + data_meta['reviewText'] data_meta.query("asin == 'B00GIDADP0'") data_meta.query("asin == 'B002Q46RDW'") data_meta_1 = data_meta[['asin', 'overall', 'unixReviewTime']] data_meta_1 data_meta_2 = data_meta[['asin', 'overall', 'summaryRev']] data_meta_2 regEx = re.compile('[^a-z]+') def cleanReviews(reviewText): reviewText = reviewText.lower() reviewText = regEx.sub(' ', reviewText).strip() return reviewText data_meta_2['summaryClean'] = data_meta_2['summaryRev'].apply(cleanReviews) data_meta_2 = data_meta_2.reset_index() reviews = data_meta_2['summaryClean'] countVector = CountVectorizer(max_features=300, stop_words='english') transformedReviews = countVector.fit_transform(reviews) dfReviews = DataFrame(transformedReviews.A, columns=countVector.get_feature_names()) dfReviews = dfReviews.astype(int) X = np.array(dfReviews) tpercent = 0.9 tsize = int(np.floor(tpercent * len(dfReviews))) dfReviews_train = X[:tsize] dfReviews_test = X[tsize:] lentrain = len(dfReviews_train) lentest = len(dfReviews_test) print(lentrain) print(lentest)
code
128029150/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import re data_meta = pd.read_csv('/kaggle/input/content-based/meta_full_csv') data_meta = data_meta.sample(50000) data_meta['summaryRev'] = data_meta['summary'] + ' ' + data_meta['reviewText'] data_meta.query("asin == 'B00GIDADP0'") data_meta.query("asin == 'B002Q46RDW'") data_meta_1 = data_meta[['asin', 'overall', 'unixReviewTime']] data_meta_1 data_meta_2 = data_meta[['asin', 'overall', 'summaryRev']] data_meta_2 regEx = re.compile('[^a-z]+') def cleanReviews(reviewText): reviewText = reviewText.lower() reviewText = regEx.sub(' ', reviewText).strip() return reviewText data_meta_2['summaryClean'] = data_meta_2['summaryRev'].apply(cleanReviews) data_meta_2 = data_meta_2.reset_index() data_meta_2
code
128029150/cell_25
[ "text_plain_output_1.png" ]
from pandas import DataFrame from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer from sklearn.neighbors import NearestNeighbors import numpy as np import pandas as pd import re data_meta = pd.read_csv('/kaggle/input/content-based/meta_full_csv') data_meta = data_meta.sample(50000) data_meta['summaryRev'] = data_meta['summary'] + ' ' + data_meta['reviewText'] data_meta.query("asin == 'B00GIDADP0'") data_meta.query("asin == 'B002Q46RDW'") data_meta_1 = data_meta[['asin', 'overall', 'unixReviewTime']] data_meta_1 data_meta_2 = data_meta[['asin', 'overall', 'summaryRev']] data_meta_2 regEx = re.compile('[^a-z]+') def cleanReviews(reviewText): reviewText = reviewText.lower() reviewText = regEx.sub(' ', reviewText).strip() return reviewText data_meta_2['summaryClean'] = data_meta_2['summaryRev'].apply(cleanReviews) data_meta_2 = data_meta_2.reset_index() reviews = data_meta_2['summaryClean'] countVector = CountVectorizer(max_features=300, stop_words='english') transformedReviews = countVector.fit_transform(reviews) dfReviews = DataFrame(transformedReviews.A, columns=countVector.get_feature_names()) dfReviews = dfReviews.astype(int) X = np.array(dfReviews) tpercent = 0.9 tsize = int(np.floor(tpercent * len(dfReviews))) dfReviews_train = X[:tsize] dfReviews_test = X[tsize:] lentrain = len(dfReviews_train) lentest = len(dfReviews_test) neighbor = NearestNeighbors(n_neighbors=3, algorithm='ball_tree').fit(dfReviews_train) distances, indices = neighbor.kneighbors(dfReviews_train) for i in range(lentest): a = neighbor.kneighbors([dfReviews_test[i]]) related_product_list = a[1] first_related_product = [item[0] for item in related_product_list] first_related_product = str(first_related_product).strip('[]') first_related_product = int(first_related_product) second_related_product = [item[1] for item in related_product_list] second_related_product = str(second_related_product).strip('[]') second_related_product = int(second_related_product) X = np.array(dfReviews) tpercent = 0.85 tsize = int(np.floor(tpercent * len(dfReviews))) dfReviews_train = X[:tsize] dfReviews_test = X[tsize:] lentrain = len(dfReviews_train) lentest = len(dfReviews_test) neighbor = NearestNeighbors(n_neighbors=3, algorithm='ball_tree').fit(dfReviews_train) distances, indices = neighbor.kneighbors(dfReviews_train) for i in range(lentest): a = neighbor.kneighbors([dfReviews_test[i]]) related_product_list = a[1] first_related_product = [item[0] for item in related_product_list] first_related_product = str(first_related_product).strip('[]') first_related_product = int(first_related_product) second_related_product = [item[1] for item in related_product_list] second_related_product = str(second_related_product).strip('[]') second_related_product = int(second_related_product) print('Based on product reviews, for ', data_meta_2['asin'][lentrain + i], ' average rating is ', data_meta_2['overall'][lentrain + i]) print('The first similar product is ', data_meta_2['asin'][first_related_product], ' average rating is ', data_meta_2['overall'][first_related_product]) print('The second similar product is ', data_meta_2['asin'][second_related_product], ' average rating is ', data_meta_2['overall'][second_related_product]) print('-----------------------------------------------------------')
code
128029150/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd data_meta = pd.read_csv('/kaggle/input/content-based/meta_full_csv') data_meta = data_meta.sample(50000) data_meta['summaryRev'] = data_meta['summary'] + ' ' + data_meta['reviewText'] data_meta.query("asin == 'B00GIDADP0'")
code
128029150/cell_20
[ "text_plain_output_1.png" ]
from pandas import DataFrame from sklearn import neighbors from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report import numpy as np import pandas as pd import re data_meta = pd.read_csv('/kaggle/input/content-based/meta_full_csv') data_meta = data_meta.sample(50000) data_meta['summaryRev'] = data_meta['summary'] + ' ' + data_meta['reviewText'] data_meta.query("asin == 'B00GIDADP0'") data_meta.query("asin == 'B002Q46RDW'") data_meta_1 = data_meta[['asin', 'overall', 'unixReviewTime']] data_meta_1 data_meta_2 = data_meta[['asin', 'overall', 'summaryRev']] data_meta_2 regEx = re.compile('[^a-z]+') def cleanReviews(reviewText): reviewText = reviewText.lower() reviewText = regEx.sub(' ', reviewText).strip() return reviewText data_meta_2['summaryClean'] = data_meta_2['summaryRev'].apply(cleanReviews) data_meta_2 = data_meta_2.reset_index() reviews = data_meta_2['summaryClean'] countVector = CountVectorizer(max_features=300, stop_words='english') transformedReviews = countVector.fit_transform(reviews) dfReviews = DataFrame(transformedReviews.A, columns=countVector.get_feature_names()) dfReviews = dfReviews.astype(int) X = np.array(dfReviews) tpercent = 0.9 tsize = int(np.floor(tpercent * len(dfReviews))) dfReviews_train = X[:tsize] dfReviews_test = X[tsize:] lentrain = len(dfReviews_train) lentest = len(dfReviews_test) df5_train_target = data_meta_2['overall'][:lentrain] df5_test_target = data_meta_2['overall'][lentrain:lentrain + lentest] df5_train_target = df5_train_target.astype(int) df5_test_target = df5_test_target.astype(int) n_neighbors = 3 knnclf = neighbors.KNeighborsClassifier(n_neighbors, weights='distance') knnclf.fit(dfReviews_train, df5_train_target) knnpreds_test = knnclf.predict(dfReviews_test) df5_train_target = data_meta_2['overall'][:lentrain] df5_test_target = data_meta_2['overall'][lentrain:lentrain + lentest] df5_train_target = df5_train_target.astype(int) df5_test_target = df5_test_target.astype(int) n_neighbors = 28 knnclf = neighbors.KNeighborsClassifier(n_neighbors, weights='distance') knnclf.fit(dfReviews_train, df5_train_target) knnpreds_test = knnclf.predict(dfReviews_test) print(accuracy_score(df5_test_target, knnpreds_test))
code
128029150/cell_26
[ "text_plain_output_1.png" ]
from pandas import DataFrame from sklearn import neighbors from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer from sklearn.metrics import classification_report import numpy as np import pandas as pd import re data_meta = pd.read_csv('/kaggle/input/content-based/meta_full_csv') data_meta = data_meta.sample(50000) data_meta['summaryRev'] = data_meta['summary'] + ' ' + data_meta['reviewText'] data_meta.query("asin == 'B00GIDADP0'") data_meta.query("asin == 'B002Q46RDW'") data_meta_1 = data_meta[['asin', 'overall', 'unixReviewTime']] data_meta_1 data_meta_2 = data_meta[['asin', 'overall', 'summaryRev']] data_meta_2 regEx = re.compile('[^a-z]+') def cleanReviews(reviewText): reviewText = reviewText.lower() reviewText = regEx.sub(' ', reviewText).strip() return reviewText data_meta_2['summaryClean'] = data_meta_2['summaryRev'].apply(cleanReviews) data_meta_2 = data_meta_2.reset_index() reviews = data_meta_2['summaryClean'] countVector = CountVectorizer(max_features=300, stop_words='english') transformedReviews = countVector.fit_transform(reviews) dfReviews = DataFrame(transformedReviews.A, columns=countVector.get_feature_names()) dfReviews = dfReviews.astype(int) X = np.array(dfReviews) tpercent = 0.9 tsize = int(np.floor(tpercent * len(dfReviews))) dfReviews_train = X[:tsize] dfReviews_test = X[tsize:] lentrain = len(dfReviews_train) lentest = len(dfReviews_test) df5_train_target = data_meta_2['overall'][:lentrain] df5_test_target = data_meta_2['overall'][lentrain:lentrain + lentest] df5_train_target = df5_train_target.astype(int) df5_test_target = df5_test_target.astype(int) n_neighbors = 3 knnclf = neighbors.KNeighborsClassifier(n_neighbors, weights='distance') knnclf.fit(dfReviews_train, df5_train_target) knnpreds_test = knnclf.predict(dfReviews_test) df5_train_target = data_meta_2['overall'][:lentrain] df5_test_target = data_meta_2['overall'][lentrain:lentrain + lentest] df5_train_target = df5_train_target.astype(int) df5_test_target = df5_test_target.astype(int) n_neighbors = 28 knnclf = neighbors.KNeighborsClassifier(n_neighbors, weights='distance') knnclf.fit(dfReviews_train, df5_train_target) knnpreds_test = knnclf.predict(dfReviews_test) X = np.array(dfReviews) tpercent = 0.85 tsize = int(np.floor(tpercent * len(dfReviews))) dfReviews_train = X[:tsize] dfReviews_test = X[tsize:] lentrain = len(dfReviews_train) lentest = len(dfReviews_test) df5_train_target = data_meta_2['overall'][:lentrain] df5_test_target = data_meta_2['overall'][lentrain:lentrain + lentest] df5_train_target = df5_train_target.astype(int) df5_test_target = df5_test_target.astype(int) n_neighbors = 7 knnclf = neighbors.KNeighborsClassifier(n_neighbors, weights='distance') knnclf.fit(dfReviews_train, df5_train_target) knnpreds_test = knnclf.predict(dfReviews_test) print(classification_report(df5_test_target, knnpreds_test))
code
128029150/cell_11
[ "text_html_output_1.png" ]
from pandas import DataFrame from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer import pandas as pd import re data_meta = pd.read_csv('/kaggle/input/content-based/meta_full_csv') data_meta = data_meta.sample(50000) data_meta['summaryRev'] = data_meta['summary'] + ' ' + data_meta['reviewText'] data_meta.query("asin == 'B00GIDADP0'") data_meta.query("asin == 'B002Q46RDW'") data_meta_1 = data_meta[['asin', 'overall', 'unixReviewTime']] data_meta_1 data_meta_2 = data_meta[['asin', 'overall', 'summaryRev']] data_meta_2 regEx = re.compile('[^a-z]+') def cleanReviews(reviewText): reviewText = reviewText.lower() reviewText = regEx.sub(' ', reviewText).strip() return reviewText data_meta_2['summaryClean'] = data_meta_2['summaryRev'].apply(cleanReviews) data_meta_2 = data_meta_2.reset_index() reviews = data_meta_2['summaryClean'] countVector = CountVectorizer(max_features=300, stop_words='english') transformedReviews = countVector.fit_transform(reviews) dfReviews = DataFrame(transformedReviews.A, columns=countVector.get_feature_names()) dfReviews = dfReviews.astype(int) dfReviews
code
128029150/cell_19
[ "text_plain_output_1.png" ]
from pandas import DataFrame from sklearn import neighbors from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer from sklearn.metrics import classification_report import numpy as np import pandas as pd import re data_meta = pd.read_csv('/kaggle/input/content-based/meta_full_csv') data_meta = data_meta.sample(50000) data_meta['summaryRev'] = data_meta['summary'] + ' ' + data_meta['reviewText'] data_meta.query("asin == 'B00GIDADP0'") data_meta.query("asin == 'B002Q46RDW'") data_meta_1 = data_meta[['asin', 'overall', 'unixReviewTime']] data_meta_1 data_meta_2 = data_meta[['asin', 'overall', 'summaryRev']] data_meta_2 regEx = re.compile('[^a-z]+') def cleanReviews(reviewText): reviewText = reviewText.lower() reviewText = regEx.sub(' ', reviewText).strip() return reviewText data_meta_2['summaryClean'] = data_meta_2['summaryRev'].apply(cleanReviews) data_meta_2 = data_meta_2.reset_index() reviews = data_meta_2['summaryClean'] countVector = CountVectorizer(max_features=300, stop_words='english') transformedReviews = countVector.fit_transform(reviews) dfReviews = DataFrame(transformedReviews.A, columns=countVector.get_feature_names()) dfReviews = dfReviews.astype(int) X = np.array(dfReviews) tpercent = 0.9 tsize = int(np.floor(tpercent * len(dfReviews))) dfReviews_train = X[:tsize] dfReviews_test = X[tsize:] lentrain = len(dfReviews_train) lentest = len(dfReviews_test) df5_train_target = data_meta_2['overall'][:lentrain] df5_test_target = data_meta_2['overall'][lentrain:lentrain + lentest] df5_train_target = df5_train_target.astype(int) df5_test_target = df5_test_target.astype(int) n_neighbors = 3 knnclf = neighbors.KNeighborsClassifier(n_neighbors, weights='distance') knnclf.fit(dfReviews_train, df5_train_target) knnpreds_test = knnclf.predict(dfReviews_test) df5_train_target = data_meta_2['overall'][:lentrain] df5_test_target = data_meta_2['overall'][lentrain:lentrain + lentest] df5_train_target = df5_train_target.astype(int) df5_test_target = df5_test_target.astype(int) n_neighbors = 28 knnclf = neighbors.KNeighborsClassifier(n_neighbors, weights='distance') knnclf.fit(dfReviews_train, df5_train_target) knnpreds_test = knnclf.predict(dfReviews_test) print(classification_report(df5_test_target, knnpreds_test))
code
128029150/cell_18
[ "text_html_output_1.png" ]
from pandas import DataFrame from sklearn import neighbors from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer from sklearn.metrics import classification_report from sklearn.metrics import mean_squared_error import numpy as np import pandas as pd import re data_meta = pd.read_csv('/kaggle/input/content-based/meta_full_csv') data_meta = data_meta.sample(50000) data_meta['summaryRev'] = data_meta['summary'] + ' ' + data_meta['reviewText'] data_meta.query("asin == 'B00GIDADP0'") data_meta.query("asin == 'B002Q46RDW'") data_meta_1 = data_meta[['asin', 'overall', 'unixReviewTime']] data_meta_1 data_meta_2 = data_meta[['asin', 'overall', 'summaryRev']] data_meta_2 regEx = re.compile('[^a-z]+') def cleanReviews(reviewText): reviewText = reviewText.lower() reviewText = regEx.sub(' ', reviewText).strip() return reviewText data_meta_2['summaryClean'] = data_meta_2['summaryRev'].apply(cleanReviews) data_meta_2 = data_meta_2.reset_index() reviews = data_meta_2['summaryClean'] countVector = CountVectorizer(max_features=300, stop_words='english') transformedReviews = countVector.fit_transform(reviews) dfReviews = DataFrame(transformedReviews.A, columns=countVector.get_feature_names()) dfReviews = dfReviews.astype(int) X = np.array(dfReviews) tpercent = 0.9 tsize = int(np.floor(tpercent * len(dfReviews))) dfReviews_train = X[:tsize] dfReviews_test = X[tsize:] lentrain = len(dfReviews_train) lentest = len(dfReviews_test) df5_train_target = data_meta_2['overall'][:lentrain] df5_test_target = data_meta_2['overall'][lentrain:lentrain + lentest] df5_train_target = df5_train_target.astype(int) df5_test_target = df5_test_target.astype(int) n_neighbors = 3 knnclf = neighbors.KNeighborsClassifier(n_neighbors, weights='distance') knnclf.fit(dfReviews_train, df5_train_target) knnpreds_test = knnclf.predict(dfReviews_test) print(mean_squared_error(df5_test_target, knnpreds_test))
code
128029150/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import re data_meta = pd.read_csv('/kaggle/input/content-based/meta_full_csv') data_meta = data_meta.sample(50000) data_meta['summaryRev'] = data_meta['summary'] + ' ' + data_meta['reviewText'] data_meta.query("asin == 'B00GIDADP0'") data_meta.query("asin == 'B002Q46RDW'") data_meta_1 = data_meta[['asin', 'overall', 'unixReviewTime']] data_meta_1 data_meta_2 = data_meta[['asin', 'overall', 'summaryRev']] data_meta_2 regEx = re.compile('[^a-z]+') def cleanReviews(reviewText): reviewText = reviewText.lower() reviewText = regEx.sub(' ', reviewText).strip() return reviewText data_meta_2['summaryClean'] = data_meta_2['summaryRev'].apply(cleanReviews) data_meta_2 = data_meta_2.reset_index()
code
128029150/cell_15
[ "application_vnd.jupyter.stderr_output_1.png" ]
from pandas import DataFrame from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer from sklearn.neighbors import NearestNeighbors import numpy as np import pandas as pd import re data_meta = pd.read_csv('/kaggle/input/content-based/meta_full_csv') data_meta = data_meta.sample(50000) data_meta['summaryRev'] = data_meta['summary'] + ' ' + data_meta['reviewText'] data_meta.query("asin == 'B00GIDADP0'") data_meta.query("asin == 'B002Q46RDW'") data_meta_1 = data_meta[['asin', 'overall', 'unixReviewTime']] data_meta_1 data_meta_2 = data_meta[['asin', 'overall', 'summaryRev']] data_meta_2 regEx = re.compile('[^a-z]+') def cleanReviews(reviewText): reviewText = reviewText.lower() reviewText = regEx.sub(' ', reviewText).strip() return reviewText data_meta_2['summaryClean'] = data_meta_2['summaryRev'].apply(cleanReviews) data_meta_2 = data_meta_2.reset_index() reviews = data_meta_2['summaryClean'] countVector = CountVectorizer(max_features=300, stop_words='english') transformedReviews = countVector.fit_transform(reviews) dfReviews = DataFrame(transformedReviews.A, columns=countVector.get_feature_names()) dfReviews = dfReviews.astype(int) X = np.array(dfReviews) tpercent = 0.9 tsize = int(np.floor(tpercent * len(dfReviews))) dfReviews_train = X[:tsize] dfReviews_test = X[tsize:] lentrain = len(dfReviews_train) lentest = len(dfReviews_test) neighbor = NearestNeighbors(n_neighbors=3, algorithm='ball_tree').fit(dfReviews_train) distances, indices = neighbor.kneighbors(dfReviews_train) for i in range(lentest): a = neighbor.kneighbors([dfReviews_test[i]]) related_product_list = a[1] first_related_product = [item[0] for item in related_product_list] first_related_product = str(first_related_product).strip('[]') first_related_product = int(first_related_product) second_related_product = [item[1] for item in related_product_list] second_related_product = str(second_related_product).strip('[]') second_related_product = int(second_related_product) print('Based on product reviews, for ', data_meta_2['asin'][lentrain + i], ' average rating is ', data_meta_2['overall'][lentrain + i]) print('The first similar product is ', data_meta_2['asin'][first_related_product], ' average rating is ', data_meta_2['overall'][first_related_product]) print('The second similar product is ', data_meta_2['asin'][second_related_product], ' average rating is ', data_meta_2['overall'][second_related_product]) print('-----------------------------------------------------------')
code
128029150/cell_16
[ "text_html_output_1.png" ]
from pandas import DataFrame from sklearn import neighbors from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer from sklearn.metrics import classification_report import numpy as np import pandas as pd import re data_meta = pd.read_csv('/kaggle/input/content-based/meta_full_csv') data_meta = data_meta.sample(50000) data_meta['summaryRev'] = data_meta['summary'] + ' ' + data_meta['reviewText'] data_meta.query("asin == 'B00GIDADP0'") data_meta.query("asin == 'B002Q46RDW'") data_meta_1 = data_meta[['asin', 'overall', 'unixReviewTime']] data_meta_1 data_meta_2 = data_meta[['asin', 'overall', 'summaryRev']] data_meta_2 regEx = re.compile('[^a-z]+') def cleanReviews(reviewText): reviewText = reviewText.lower() reviewText = regEx.sub(' ', reviewText).strip() return reviewText data_meta_2['summaryClean'] = data_meta_2['summaryRev'].apply(cleanReviews) data_meta_2 = data_meta_2.reset_index() reviews = data_meta_2['summaryClean'] countVector = CountVectorizer(max_features=300, stop_words='english') transformedReviews = countVector.fit_transform(reviews) dfReviews = DataFrame(transformedReviews.A, columns=countVector.get_feature_names()) dfReviews = dfReviews.astype(int) X = np.array(dfReviews) tpercent = 0.9 tsize = int(np.floor(tpercent * len(dfReviews))) dfReviews_train = X[:tsize] dfReviews_test = X[tsize:] lentrain = len(dfReviews_train) lentest = len(dfReviews_test) df5_train_target = data_meta_2['overall'][:lentrain] df5_test_target = data_meta_2['overall'][lentrain:lentrain + lentest] df5_train_target = df5_train_target.astype(int) df5_test_target = df5_test_target.astype(int) n_neighbors = 3 knnclf = neighbors.KNeighborsClassifier(n_neighbors, weights='distance') knnclf.fit(dfReviews_train, df5_train_target) knnpreds_test = knnclf.predict(dfReviews_test) print(classification_report(df5_test_target, knnpreds_test))
code
128029150/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd data_meta = pd.read_csv('/kaggle/input/content-based/meta_full_csv') data_meta = data_meta.sample(50000) data_meta['summaryRev'] = data_meta['summary'] + ' ' + data_meta['reviewText'] data_meta
code
128029150/cell_17
[ "application_vnd.jupyter.stderr_output_1.png" ]
from pandas import DataFrame from sklearn import neighbors from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report import numpy as np import pandas as pd import re data_meta = pd.read_csv('/kaggle/input/content-based/meta_full_csv') data_meta = data_meta.sample(50000) data_meta['summaryRev'] = data_meta['summary'] + ' ' + data_meta['reviewText'] data_meta.query("asin == 'B00GIDADP0'") data_meta.query("asin == 'B002Q46RDW'") data_meta_1 = data_meta[['asin', 'overall', 'unixReviewTime']] data_meta_1 data_meta_2 = data_meta[['asin', 'overall', 'summaryRev']] data_meta_2 regEx = re.compile('[^a-z]+') def cleanReviews(reviewText): reviewText = reviewText.lower() reviewText = regEx.sub(' ', reviewText).strip() return reviewText data_meta_2['summaryClean'] = data_meta_2['summaryRev'].apply(cleanReviews) data_meta_2 = data_meta_2.reset_index() reviews = data_meta_2['summaryClean'] countVector = CountVectorizer(max_features=300, stop_words='english') transformedReviews = countVector.fit_transform(reviews) dfReviews = DataFrame(transformedReviews.A, columns=countVector.get_feature_names()) dfReviews = dfReviews.astype(int) X = np.array(dfReviews) tpercent = 0.9 tsize = int(np.floor(tpercent * len(dfReviews))) dfReviews_train = X[:tsize] dfReviews_test = X[tsize:] lentrain = len(dfReviews_train) lentest = len(dfReviews_test) df5_train_target = data_meta_2['overall'][:lentrain] df5_test_target = data_meta_2['overall'][lentrain:lentrain + lentest] df5_train_target = df5_train_target.astype(int) df5_test_target = df5_test_target.astype(int) n_neighbors = 3 knnclf = neighbors.KNeighborsClassifier(n_neighbors, weights='distance') knnclf.fit(dfReviews_train, df5_train_target) knnpreds_test = knnclf.predict(dfReviews_test) print(accuracy_score(df5_test_target, knnpreds_test))
code
128029150/cell_10
[ "text_html_output_1.png" ]
from pandas import DataFrame from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer import pandas as pd import re data_meta = pd.read_csv('/kaggle/input/content-based/meta_full_csv') data_meta = data_meta.sample(50000) data_meta['summaryRev'] = data_meta['summary'] + ' ' + data_meta['reviewText'] data_meta.query("asin == 'B00GIDADP0'") data_meta.query("asin == 'B002Q46RDW'") data_meta_1 = data_meta[['asin', 'overall', 'unixReviewTime']] data_meta_1 data_meta_2 = data_meta[['asin', 'overall', 'summaryRev']] data_meta_2 regEx = re.compile('[^a-z]+') def cleanReviews(reviewText): reviewText = reviewText.lower() reviewText = regEx.sub(' ', reviewText).strip() return reviewText data_meta_2['summaryClean'] = data_meta_2['summaryRev'].apply(cleanReviews) data_meta_2 = data_meta_2.reset_index() reviews = data_meta_2['summaryClean'] countVector = CountVectorizer(max_features=300, stop_words='english') transformedReviews = countVector.fit_transform(reviews) dfReviews = DataFrame(transformedReviews.A, columns=countVector.get_feature_names()) dfReviews = dfReviews.astype(int)
code
128029150/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd data_meta = pd.read_csv('/kaggle/input/content-based/meta_full_csv') data_meta = data_meta.sample(50000) data_meta['summaryRev'] = data_meta['summary'] + ' ' + data_meta['reviewText'] data_meta.query("asin == 'B00GIDADP0'") data_meta.query("asin == 'B002Q46RDW'")
code
16135360/cell_42
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.describe().T df = df.drop('location', axis=1) df.head().T df.state.nunique() df.city.count() city_count = df.groupby('state')['city'].count().sort_values(ascending=True) df.popmale.sum() df.popfemale.sum() df[df.popmale == df.popmale.max()] df[df.popfemale == df.popfemale.max()] df.columns lit_states = df.groupby('state').agg({'litertotal': np.sum}) popstates = df.groupby('state').agg({'poptotal': np.sum}) literate_rate = lit_states.litertotal * 100 / popstates.poptotal literate_rate = literate_rate.sort_values(ascending=False) df[['city', 'sexratio']].sort_values(by='sexratio', ascending=False).head(10)
code
16135360/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.describe().T df = df.drop('location', axis=1) df.head().T df.state.nunique() df.city.count() city_count = df.groupby('state')['city'].count().sort_values(ascending=True) population_state = df[['state', 'poptotal']].groupby('state').sum().sort_values(by='poptotal', ascending=False) population_state.head(10)
code
16135360/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.describe().T df = df.drop('location', axis=1) df.head().T df.state.nunique()
code
16135360/cell_25
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.describe().T df = df.drop('location', axis=1) df.head().T df.state.nunique() df.city.count() city_count = df.groupby('state')['city'].count().sort_values(ascending=True) df[['city', 'poptotal']].sort_values(by='poptotal', ascending=False).tail(10)
code
16135360/cell_4
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum()
code
16135360/cell_34
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.describe().T df = df.drop('location', axis=1) df.head().T df.state.nunique() df.city.count() city_count = df.groupby('state')['city'].count().sort_values(ascending=True) df.popmale.sum() df.popfemale.sum() df[df.popmale == df.popmale.max()] df[df.popfemale == df.popfemale.max()] df.columns
code
16135360/cell_23
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.describe().T df = df.drop('location', axis=1) df.head().T df.state.nunique() df.city.count() city_count = df.groupby('state')['city'].count().sort_values(ascending=True) dc = df[['state', 'dcode']].groupby(['state']).count() dc = dc.sort_values(by='dcode', ascending=False) dc population_state = df[['state', 'poptotal']].groupby('state').sum().sort_values(by='poptotal', ascending=False) plt.figure(figsize=[14, 10]) sns.barplot(y=population_state.index, x=population_state.poptotal) plt.title('States according to Total Population', fontsize=25) plt.xlabel('Population', fontsize=20) plt.ylabel('State', fontsize=20) plt.show()
code
16135360/cell_30
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.describe().T df = df.drop('location', axis=1) df.head().T df.state.nunique() df.city.count() city_count = df.groupby('state')['city'].count().sort_values(ascending=True) df.popmale.sum() df.popfemale.sum() df[df.popmale == df.popmale.max()]
code
16135360/cell_33
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.describe().T df = df.drop('location', axis=1) df.head().T df.state.nunique() df.city.count() city_count = df.groupby('state')['city'].count().sort_values(ascending=True) df.popmale.sum() df.popfemale.sum() df[df.popmale == df.popmale.max()] df[df.popfemale == df.popfemale.max()] df[['city', '0-6poptotal']].sort_values(by='0-6poptotal', ascending=False).tail(10)
code
16135360/cell_6
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.describe().T
code
16135360/cell_40
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.describe().T df = df.drop('location', axis=1) df.head().T df.state.nunique() df.city.count() city_count = df.groupby('state')['city'].count().sort_values(ascending=True) dc = df[['state', 'dcode']].groupby(['state']).count() dc = dc.sort_values(by='dcode', ascending=False) dc population_state = df[['state', 'poptotal']].groupby('state').sum().sort_values(by='poptotal', ascending=False) df.popmale.sum() df.popfemale.sum() df[df.popmale == df.popmale.max()] df[df.popfemale == df.popfemale.max()] df.columns lit_states = df.groupby('state').agg({'litertotal': np.sum}) popstates = df.groupby('state').agg({'poptotal': np.sum}) literate_rate = lit_states.litertotal * 100 / popstates.poptotal literate_rate = literate_rate.sort_values(ascending=False) plt.figure(figsize=[14, 10]) sns.barplot(x=literate_rate, y=literate_rate.index) plt.title('States according to literacy rate', fontsize=25) plt.xlabel('Literacy Rate', fontsize=20) plt.ylabel('State', fontsize=20) plt.show()
code
16135360/cell_29
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.describe().T df = df.drop('location', axis=1) df.head().T df.state.nunique() df.city.count() city_count = df.groupby('state')['city'].count().sort_values(ascending=True) df.popmale.sum() df.popfemale.sum() df['0-6popfemale'].sum()
code
16135360/cell_39
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.describe().T df = df.drop('location', axis=1) df.head().T df.state.nunique() df.city.count() city_count = df.groupby('state')['city'].count().sort_values(ascending=True) df.popmale.sum() df.popfemale.sum() df[df.popmale == df.popmale.max()] df[df.popfemale == df.popfemale.max()] df.columns lit_states = df.groupby('state').agg({'litertotal': np.sum}) popstates = df.groupby('state').agg({'poptotal': np.sum}) literate_rate = lit_states.litertotal * 100 / popstates.poptotal literate_rate = literate_rate.sort_values(ascending=False) literate_rate
code
16135360/cell_26
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.describe().T df = df.drop('location', axis=1) df.head().T df.state.nunique() df.city.count() city_count = df.groupby('state')['city'].count().sort_values(ascending=True) df.popmale.sum()
code
16135360/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cities_r2.csv') df.head().T
code
16135360/cell_11
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.describe().T df = df.drop('location', axis=1) df.head().T
code
16135360/cell_19
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.describe().T df = df.drop('location', axis=1) df.head().T df.state.nunique() df.city.count() city_count = df.groupby('state')['city'].count().sort_values(ascending=True) dc = df[['state', 'dcode']].groupby(['state']).count() dc = dc.sort_values(by='dcode', ascending=False) dc plt.figure(figsize=[14, 10]) sns.barplot(y=dc.index, x=dc.dcode) plt.title('Number of Districts in each States Taken', fontsize=25) plt.ylabel('State', fontsize=20) plt.xlabel('No. of Districts', fontsize=20) plt.show()
code
16135360/cell_1
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np from matplotlib import pyplot as plt import seaborn as sns import os print(os.listdir('../input'))
code
16135360/cell_7
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.describe().T city_pop = df['poptotal'].sum() total_indian_pop = 1247200000 percentage = city_pop / total_indian_pop * 100 print('{0:.1f}% of population living in cities'.format(percentage))
code
16135360/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.describe().T df = df.drop('location', axis=1) df.head().T df.state.nunique() df.city.count() city_count = df.groupby('state')['city'].count().sort_values(ascending=True) dc = df[['state', 'dcode']].groupby(['state']).count() dc = dc.sort_values(by='dcode', ascending=False) print('No. of districts present in each state') dc
code
16135360/cell_32
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.describe().T df = df.drop('location', axis=1) df.head().T df.state.nunique() df.city.count() city_count = df.groupby('state')['city'].count().sort_values(ascending=True) df.popmale.sum() df.popfemale.sum() df[df.popmale == df.popmale.max()] df[df.popfemale == df.popfemale.max()] df[['city', '0-6poptotal']].sort_values(by='0-6poptotal', ascending=False).head(10)
code
16135360/cell_28
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.describe().T df = df.drop('location', axis=1) df.head().T df.state.nunique() df.city.count() city_count = df.groupby('state')['city'].count().sort_values(ascending=True) df.popmale.sum() df.popfemale.sum() df['0-6popmale'].sum()
code
16135360/cell_8
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.describe().T df['gradratio'] = df['totalgrad'] / df['poptotal'] grad_avg = df['gradratio'].mean() print('Graduate Ratio in Indian Cities: {0:.2f}'.format(grad_avg))
code
16135360/cell_15
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.describe().T df = df.drop('location', axis=1) df.head().T df.state.nunique() df.city.count() plt.figure(figsize=(12, 8)) city_count = df.groupby('state')['city'].count().sort_values(ascending=True) city_count.plot(kind='barh', fontsize=10, width=1, edgecolor='black') plt.xlabel('No of cities', fontsize=15) plt.ylabel('States', fontsize=15) plt.title('Count of Cities taken from each State', fontsize=20) plt.show()
code
16135360/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.describe().T df = df.drop('location', axis=1) df.head().T df.state.nunique() df.city.count() city_count = df.groupby('state')['city'].count().sort_values(ascending=True) sc = df[['state', 'scode']].groupby(['state', 'scode']).count() print('State codes') sc
code
16135360/cell_43
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.describe().T df = df.drop('location', axis=1) df.head().T df.state.nunique() df.city.count() city_count = df.groupby('state')['city'].count().sort_values(ascending=True) df.popmale.sum() df.popfemale.sum() df[df.popmale == df.popmale.max()] df[df.popfemale == df.popfemale.max()] df.columns lit_states = df.groupby('state').agg({'litertotal': np.sum}) popstates = df.groupby('state').agg({'poptotal': np.sum}) literate_rate = lit_states.litertotal * 100 / popstates.poptotal literate_rate = literate_rate.sort_values(ascending=False) df[['city', 'sexratio']].sort_values(by='sexratio', ascending=False).tail(10)
code
16135360/cell_31
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.describe().T df = df.drop('location', axis=1) df.head().T df.state.nunique() df.city.count() city_count = df.groupby('state')['city'].count().sort_values(ascending=True) df.popmale.sum() df.popfemale.sum() df[df.popmale == df.popmale.max()] df[df.popfemale == df.popfemale.max()]
code
16135360/cell_24
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.describe().T df = df.drop('location', axis=1) df.head().T df.state.nunique() df.city.count() city_count = df.groupby('state')['city'].count().sort_values(ascending=True) df[['city', 'poptotal']].sort_values(by='poptotal', ascending=False).head(10)
code
16135360/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.describe().T df = df.drop('location', axis=1) df.head().T df.state.nunique() df.city.count()
code
16135360/cell_22
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.describe().T df = df.drop('location', axis=1) df.head().T df.state.nunique() df.city.count() city_count = df.groupby('state')['city'].count().sort_values(ascending=True) population_state = df[['state', 'poptotal']].groupby('state').sum().sort_values(by='poptotal', ascending=False) population_state.tail(10)
code
16135360/cell_27
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.describe().T df = df.drop('location', axis=1) df.head().T df.state.nunique() df.city.count() city_count = df.groupby('state')['city'].count().sort_values(ascending=True) df.popmale.sum() df.popfemale.sum()
code
16135360/cell_37
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.describe().T df = df.drop('location', axis=1) df.head().T df.state.nunique() df.city.count() city_count = df.groupby('state')['city'].count().sort_values(ascending=True) df.popmale.sum() df.popfemale.sum() df[df.popmale == df.popmale.max()] df[df.popfemale == df.popfemale.max()] df.columns df[['city', 'elrtotal']].sort_values(by='elrtotal', ascending=False).tail(10)
code
16135360/cell_5
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.info()
code
16135360/cell_36
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cities_r2.csv') df.head().T df.columns = ['city', 'scode', 'state', 'dcode', 'poptotal', 'popmale', 'popfemale', '0-6poptotal', '0-6popmale', '0-6popfemale', 'litertotal', 'litermale', 'literfemale', 'sexratio', '0-6sexratio', 'elrtotal', 'elrmale', 'elrfemale', 'location', 'totalgrad', 'malegrad', 'femalegrad'] df.isnull().sum() df.describe().T df = df.drop('location', axis=1) df.head().T df.state.nunique() df.city.count() city_count = df.groupby('state')['city'].count().sort_values(ascending=True) df.popmale.sum() df.popfemale.sum() df[df.popmale == df.popmale.max()] df[df.popfemale == df.popfemale.max()] df.columns df[['city', 'elrtotal']].sort_values(by='elrtotal', ascending=False).head(10)
code
16166088/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) _data = pd.read_csv('../input/zomato.csv') data = _data.drop(columns=['url', 'address', 'phone'], axis=1) columns = data.columns splist = [] cuisine = [] for i in range(0, data['cuisines'].count()): splist = str(data['cuisines'][i]).split(', ') for item in splist: if item not in cuisine: cuisine.append(item) cuisine
code
16166088/cell_6
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) _data = pd.read_csv('../input/zomato.csv') data = _data.drop(columns=['url', 'address', 'phone'], axis=1) columns = data.columns splist = [] cuisine = [] for i in range(0, data['cuisines'].count()): splist = str(data['cuisines'][i]).split(', ') for item in splist: if item not in cuisine: cuisine.append(item) cuisine cuisineCount = pd.DataFrame(columns=['cuisines', 'count']) i = 0 for c in cuisine: restaurant = data['cuisines'].str.contains(c, case=False, regex=True, na=False) cuisineCount.loc[i] = [c, restaurant[restaurant == True].count()] i = i + 1 cuisineCount.sort_values(by='count', axis=0, ascending=False, inplace=True) import matplotlib.pyplot as plt import seaborn as sns plt.figure(1, figsize=(30, 15)) data_CuisineCount = cuisineCount.iloc[0:25, :] plt.subplot(2, 1, 1) plt.subplots_adjust(hspace=0.5, wspace=0.5) plt.bar(data_CuisineCount['cuisines'], data_CuisineCount['count']) plt.subplot(2, 1, 2) plt.subplots_adjust(hspace=0.5, wspace=0.5) plt.pie(x=data_CuisineCount['count'], labels=data_CuisineCount['cuisines']) plt.show()
code
16166088/cell_2
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) _data = pd.read_csv('../input/zomato.csv') _data.head()
code
16166088/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
16166088/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) _data = pd.read_csv('../input/zomato.csv') print('Original set of columns:{}'.format(_data.columns)) data = _data.drop(columns=['url', 'address', 'phone'], axis=1) columns = data.columns print('New columns : {}'.format(columns))
code
16166088/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) _data = pd.read_csv('../input/zomato.csv') data = _data.drop(columns=['url', 'address', 'phone'], axis=1) columns = data.columns splist = [] cuisine = [] for i in range(0, data['cuisines'].count()): splist = str(data['cuisines'][i]).split(', ') for item in splist: if item not in cuisine: cuisine.append(item) cuisine cuisineCount = pd.DataFrame(columns=['cuisines', 'count']) i = 0 for c in cuisine: restaurant = data['cuisines'].str.contains(c, case=False, regex=True, na=False) cuisineCount.loc[i] = [c, restaurant[restaurant == True].count()] i = i + 1 cuisineCount.sort_values(by='count', axis=0, ascending=False, inplace=True) print('The top 10 cuisines sold in bangalore:\n{}'.format(cuisineCount.head(25)))
code
105212943/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px data = '../input/bank-customer-churn-dataset/Bank Customer Churn Prediction.csv' df = pd.read_csv(data) df.shape df.describe df.dtypes features = df.keys() features = features.drop('churn') subsets = ['credit_score'] df.groupby('churn')[features].mean().style.background_gradient(cmap='ocean') fig = px.histogram(df, x="age", y="balance", color="churn", marginal="box", hover_data=df.columns) fig.show() sizes = [df.churn[df['churn']==1].count(), df.churn[df['churn']==0].count()] labels = ['Churned', 'Not Churned'] figure, axes = plt.subplots(figsize=(10, 8)) axes.pie(sizes, labels=labels,shadow=True,autopct = '%1.2f%%') plt.legend() plt.title("Churned VS Not Churned", size = 15) plt.show() churned_french = df.churn[(df.country == 'France') & (df.churn == 1)].count() count_french = df.churn[df.country == 'France'].count() churned_german = df.churn[(df.country == 'Germany') & (df.churn == 1)].count() count_german = df.churn[df.country == 'Germany'].count() churned_spain = df.churn[(df.country == 'Spain') & (df.churn == 1)].count() count_spain = df.churn[df.country == 'Spain'].count() px.histogram(df, x='country', color='churn', barmode='group')
code
105212943/cell_9
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = '../input/bank-customer-churn-dataset/Bank Customer Churn Prediction.csv' df = pd.read_csv(data) df.shape df.describe df.dtypes features = df.keys() features = features.drop('churn') subsets = ['credit_score'] df.groupby('churn')[features].mean().style.background_gradient(cmap='ocean')
code
105212943/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = '../input/bank-customer-churn-dataset/Bank Customer Churn Prediction.csv' df = pd.read_csv(data) df.shape
code
105212943/cell_2
[ "text_plain_output_1.png" ]
import seaborn as sns import matplotlib.pyplot as plt import plotly.express as px import missingno as msno from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler,LabelEncoder import scipy.special import scipy.stats as stats from scipy.stats import skew, kurtosis, shapiro !pip install --pre --quiet pycaret from pycaret.classification import * import warnings warnings.filterwarnings('ignore')
code
105212943/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px data = '../input/bank-customer-churn-dataset/Bank Customer Churn Prediction.csv' df = pd.read_csv(data) df.shape df.describe df.dtypes features = df.keys() features = features.drop('churn') subsets = ['credit_score'] df.groupby('churn')[features].mean().style.background_gradient(cmap='ocean') fig = px.histogram(df, x="age", y="balance", color="churn", marginal="box", hover_data=df.columns) fig.show() sizes = [df.churn[df['churn'] == 1].count(), df.churn[df['churn'] == 0].count()] labels = ['Churned', 'Not Churned'] figure, axes = plt.subplots(figsize=(10, 8)) axes.pie(sizes, labels=labels, shadow=True, autopct='%1.2f%%') plt.legend() plt.title('Churned VS Not Churned', size=15) plt.show()
code
105212943/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
105212943/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = '../input/bank-customer-churn-dataset/Bank Customer Churn Prediction.csv' df = pd.read_csv(data) df.shape df.describe
code
105212943/cell_18
[ "text_html_output_2.png" ]
from collections import Counter import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px import seaborn as sns data = '../input/bank-customer-churn-dataset/Bank Customer Churn Prediction.csv' df = pd.read_csv(data) df.shape df.describe df.dtypes features = df.keys() features = features.drop('churn') subsets = ['credit_score'] df.groupby('churn')[features].mean().style.background_gradient(cmap='ocean') fig = px.histogram(df, x="age", y="balance", color="churn", marginal="box", hover_data=df.columns) fig.show() sizes = [df.churn[df['churn']==1].count(), df.churn[df['churn']==0].count()] labels = ['Churned', 'Not Churned'] figure, axes = plt.subplots(figsize=(10, 8)) axes.pie(sizes, labels=labels,shadow=True,autopct = '%1.2f%%') plt.legend() plt.title("Churned VS Not Churned", size = 15) plt.show() churned_french = df.churn[(df.country == 'France') & (df.churn == 1)].count() count_french = df.churn[df.country == 'France'].count() churned_german = df.churn[(df.country == 'Germany') & (df.churn == 1)].count() count_german = df.churn[df.country == 'Germany'].count() churned_spain = df.churn[(df.country == 'Spain') & (df.churn == 1)].count() count_spain = df.churn[df.country == 'Spain'].count() from collections import Counter def detect_outliers(df, features): outlier_indices = [] for c in features: Q1 = np.percentile(df[c], 25) Q3 = np.percentile(df[c], 75) IQR = Q3 - Q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < Q1 - outlier_step) | (df[c] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers df.loc[detect_outliers(df, ['credit_score', 'balance', 'age', 'estimated_salary'])] plt.subplots(figsize=(20, 10)) sns.heatmap(df.corr(), annot=True, fmt='.2f', cmap='viridis')
code
105212943/cell_8
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = '../input/bank-customer-churn-dataset/Bank Customer Churn Prediction.csv' df = pd.read_csv(data) df.shape df.describe df.dtypes
code
105212943/cell_17
[ "text_html_output_1.png" ]
from collections import Counter import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px data = '../input/bank-customer-churn-dataset/Bank Customer Churn Prediction.csv' df = pd.read_csv(data) df.shape df.describe df.dtypes features = df.keys() features = features.drop('churn') subsets = ['credit_score'] df.groupby('churn')[features].mean().style.background_gradient(cmap='ocean') fig = px.histogram(df, x="age", y="balance", color="churn", marginal="box", hover_data=df.columns) fig.show() sizes = [df.churn[df['churn']==1].count(), df.churn[df['churn']==0].count()] labels = ['Churned', 'Not Churned'] figure, axes = plt.subplots(figsize=(10, 8)) axes.pie(sizes, labels=labels,shadow=True,autopct = '%1.2f%%') plt.legend() plt.title("Churned VS Not Churned", size = 15) plt.show() churned_french = df.churn[(df.country == 'France') & (df.churn == 1)].count() count_french = df.churn[df.country == 'France'].count() churned_german = df.churn[(df.country == 'Germany') & (df.churn == 1)].count() count_german = df.churn[df.country == 'Germany'].count() churned_spain = df.churn[(df.country == 'Spain') & (df.churn == 1)].count() count_spain = df.churn[df.country == 'Spain'].count() from collections import Counter def detect_outliers(df, features): outlier_indices = [] for c in features: Q1 = np.percentile(df[c], 25) Q3 = np.percentile(df[c], 75) IQR = Q3 - Q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < Q1 - outlier_step) | (df[c] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers df.loc[detect_outliers(df, ['credit_score', 'balance', 'age', 'estimated_salary'])]
code
105212943/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px data = '../input/bank-customer-churn-dataset/Bank Customer Churn Prediction.csv' df = pd.read_csv(data) df.shape df.describe df.dtypes features = df.keys() features = features.drop('churn') subsets = ['credit_score'] df.groupby('churn')[features].mean().style.background_gradient(cmap='ocean') fig = px.histogram(df, x="age", y="balance", color="churn", marginal="box", hover_data=df.columns) fig.show() sizes = [df.churn[df['churn']==1].count(), df.churn[df['churn']==0].count()] labels = ['Churned', 'Not Churned'] figure, axes = plt.subplots(figsize=(10, 8)) axes.pie(sizes, labels=labels,shadow=True,autopct = '%1.2f%%') plt.legend() plt.title("Churned VS Not Churned", size = 15) plt.show() churned_french = df.churn[(df.country == 'France') & (df.churn == 1)].count() count_french = df.churn[df.country == 'France'].count() churned_german = df.churn[(df.country == 'Germany') & (df.churn == 1)].count() count_german = df.churn[df.country == 'Germany'].count() churned_spain = df.churn[(df.country == 'Spain') & (df.churn == 1)].count() count_spain = df.churn[df.country == 'Spain'].count() for col in df[['country', 'gender', 'products_number', 'credit_card', 'active_member', 'churn']]: print('******************') print(col) print('******************') print(df[col].value_counts(dropna=False, normalize=True)) print('_____________________________________________________')
code
105212943/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px data = '../input/bank-customer-churn-dataset/Bank Customer Churn Prediction.csv' df = pd.read_csv(data) df.shape df.describe df.dtypes features = df.keys() features = features.drop('churn') subsets = ['credit_score'] df.groupby('churn')[features].mean().style.background_gradient(cmap='ocean') fig = px.histogram(df, x='age', y='balance', color='churn', marginal='box', hover_data=df.columns) fig.show()
code
105212943/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px data = '../input/bank-customer-churn-dataset/Bank Customer Churn Prediction.csv' df = pd.read_csv(data) df.shape df.describe df.dtypes features = df.keys() features = features.drop('churn') subsets = ['credit_score'] df.groupby('churn')[features].mean().style.background_gradient(cmap='ocean') fig = px.histogram(df, x="age", y="balance", color="churn", marginal="box", hover_data=df.columns) fig.show() sizes = [df.churn[df['churn']==1].count(), df.churn[df['churn']==0].count()] labels = ['Churned', 'Not Churned'] figure, axes = plt.subplots(figsize=(10, 8)) axes.pie(sizes, labels=labels,shadow=True,autopct = '%1.2f%%') plt.legend() plt.title("Churned VS Not Churned", size = 15) plt.show() churned_french = df.churn[(df.country == 'France') & (df.churn == 1)].count() count_french = df.churn[df.country == 'France'].count() print('Percent of French People Who Churned --->', churned_french * 100 / count_french, '%') churned_german = df.churn[(df.country == 'Germany') & (df.churn == 1)].count() count_german = df.churn[df.country == 'Germany'].count() print('Percent of German People Who Churned --->', churned_german * 100 / count_german, '%') churned_spain = df.churn[(df.country == 'Spain') & (df.churn == 1)].count() count_spain = df.churn[df.country == 'Spain'].count() print('Percent of Spanish People Who Churned --->', churned_spain * 100 / count_spain, '%')
code
105212943/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = '../input/bank-customer-churn-dataset/Bank Customer Churn Prediction.csv' df = pd.read_csv(data) df.head()
code
49129186/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
49129186/cell_3
[ "image_output_2.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/kaggle-survey-2020/kaggle_survey_2020_responses.csv', header=1, na_values='NaN') data = data.fillna('Not Given') data.head()
code
49129186/cell_12
[ "text_html_output_1.png" ]
from IPython.display import display,clear_output from ipywidgets import interact, interactive, fixed, interact_manual,VBox,HBox,Layout import ipywidgets as widgets import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.graph_objects as go data = pd.read_csv('/kaggle/input/kaggle-survey-2020/kaggle_survey_2020_responses.csv', header=1, na_values='NaN') data = data.fillna('Not Given') def plot_map(grp_data): fig = go.Figure(data=go.Choropleth(locations=grp_data['Country'], z=grp_data['people'], text=grp_data['Country'], colorscale='earth', locationmode='country names', autocolorscale=False, reversescale=False, marker_line_color='darkgray', marker_line_width=0.5, colorbar_tickprefix='', colorbar_title='people')) fig.update_layout(title_text='People by country', height=500, width=500, geo=dict(showframe=False, showcoastlines=False, projection_type='equirectangular'), annotations=[dict(x=0.55, y=0.1, xref='paper', yref='paper', showarrow=False)]) def map_gender_plot(df): gender = data['What is your gender? - Selected Choice'].unique().tolist() xvar = widgets.Dropdown(options=gender, description='Gender:') def f(xvar_): grp_data = data[data['What is your gender? - Selected Choice'] == xvar_].groupby(by='In which country do you currently reside?').size().sort_values(ascending=False).reset_index() grp_data.columns = ['Country', 'people'] a = interactive(f, xvar_=xvar) ctrl_map = HBox(a.children[:-1], layout=Layout(flex_flow='row wrap')) out_map = a.children[-1] return [ctrl_map, out_map] def map_age_plot(df): age = data['What is your age (# years)?'].sort_values().unique().tolist() xvar = widgets.Dropdown(options=age, description='Age:') def f(xvar_): grp_data = data[data['What is your age (# years)?'] == xvar_].groupby(by='In which country do you currently reside?').size().sort_values(ascending=False).reset_index() grp_data.columns = ['Country', 'people'] a = interactive(f, xvar_=xvar) ctrl_map = HBox(a.children[:-1], layout=Layout(flex_flow='row wrap')) out_map = a.children[-1] return [ctrl_map, out_map] def map_education_plot(df): age = data['What is the highest level of formal education that you have attained or plan to attain within the next 2 years?'].sort_values().unique().tolist() xvar = widgets.Dropdown(options=age, description='Education:') def f(xvar_): grp_data = data[data['What is the highest level of formal education that you have attained or plan to attain within the next 2 years?'] == xvar_].groupby(by='In which country do you currently reside?').size().sort_values(ascending=False).reset_index() grp_data.columns = ['Country', 'people'] a = interactive(f, xvar_=xvar) ctrl_map = HBox(a.children[:-1], layout=Layout(flex_flow='row wrap')) out_map = a.children[-1] return [ctrl_map, out_map] def map_role_plot(df): age = data['Select the title most similar to your current role (or most recent title if retired): - Selected Choice'].sort_values().unique().tolist() xvar = widgets.Dropdown(options=age, description='Current Role:') def f(xvar_): grp_data = data[data['Select the title most similar to your current role (or most recent title if retired): - Selected Choice'] == xvar_].groupby(by='In which country do you currently reside?').size().sort_values(ascending=False).reset_index() grp_data.columns = ['Country', 'people'] a = interactive(f, xvar_=xvar) ctrl_map = HBox(a.children[:-1], layout=Layout(flex_flow='row wrap')) out_map = a.children[-1] return [ctrl_map, out_map] tab1 = HBox(map_gender_plot(data)) tab2 = HBox(map_age_plot(data)) tab3 = HBox(map_education_plot(data)) tab4 = HBox(map_role_plot(data)) tab = widgets.Tab(children=[tab1, tab2, tab3, tab4]) tab.set_title(0, 'Gender') tab.set_title(1, 'Age') tab.set_title(2, 'Education') tab.set_title(3, 'Role') display(tab)
code
18153034/cell_21
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import re sns.set(color_codes=True, style='darkgrid') games = pd.read_csv('../input/games.csv') games = games[games.rated] games['mean_rating'] = (games.white_rating + games.black_rating) / 2 games['rating_diff'] = abs(games.white_rating - games.black_rating) games.victory_status.value_counts() under_1500 = games[games.mean_rating < 1500] under_2000 = games[games.mean_rating < 2000] over_2000 = games[games.mean_rating > 2000] brackets = [under_1500, under_2000, over_2000] bracket_titles = ['Under 1500', 'Under 2000', 'Over 2000'] for i, bracket in enumerate(brackets): victory_status = bracket.victory_status.value_counts() mate_games = games[games.victory_status == 'mate'] under_1500 = mate_games[mate_games.mean_rating < 1500] under_2000 = mate_games[mate_games.mean_rating < 2000] over_2000 = mate_games[mate_games.mean_rating > 2000] m_brackets = [under_1500, under_2000, over_2000] turn_means = [b.turns.mean() for b in m_brackets] plt.ylim(0, 100) plt.figure(figsize=(10, 5)) plt.scatter(mate_games.mean_rating, mate_games.turns)
code
18153034/cell_9
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import re sns.set(color_codes=True, style='darkgrid') games = pd.read_csv('../input/games.csv') games = games[games.rated] games['mean_rating'] = (games.white_rating + games.black_rating) / 2 games['rating_diff'] = abs(games.white_rating - games.black_rating) plt.figure(figsize=(10, 5)) sns.distplot(games['mean_rating'])
code
18153034/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import re sns.set(color_codes=True, style='darkgrid') games = pd.read_csv('../input/games.csv') games = games[games.rated] games['mean_rating'] = (games.white_rating + games.black_rating) / 2 games['rating_diff'] = abs(games.white_rating - games.black_rating) games.victory_status.value_counts() under_1500 = games[games.mean_rating < 1500] under_2000 = games[games.mean_rating < 2000] over_2000 = games[games.mean_rating > 2000] brackets = [under_1500, under_2000, over_2000] bracket_titles = ['Under 1500', 'Under 2000', 'Over 2000'] for i, bracket in enumerate(brackets): victory_status = bracket.victory_status.value_counts() mate_games = games[games.victory_status == 'mate'] under_1500 = mate_games[mate_games.mean_rating < 1500] under_2000 = mate_games[mate_games.mean_rating < 2000] over_2000 = mate_games[mate_games.mean_rating > 2000] m_brackets = [under_1500, under_2000, over_2000] turn_means = [b.turns.mean() for b in m_brackets] plt.ylim(0, 100) mate_games.loc[mate_games['turns'].idxmax()]
code
18153034/cell_30
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import re sns.set(color_codes=True, style='darkgrid') games = pd.read_csv('../input/games.csv') games = games[games.rated] games['mean_rating'] = (games.white_rating + games.black_rating) / 2 games['rating_diff'] = abs(games.white_rating - games.black_rating) games.victory_status.value_counts() under_1500 = games[games.mean_rating < 1500] under_2000 = games[games.mean_rating < 2000] over_2000 = games[games.mean_rating > 2000] brackets = [under_1500, under_2000, over_2000] bracket_titles = ['Under 1500', 'Under 2000', 'Over 2000'] mate_games = games[games.victory_status == 'mate'] under_1500 = mate_games[mate_games.mean_rating < 1500] under_2000 = mate_games[mate_games.mean_rating < 2000] over_2000 = mate_games[mate_games.mean_rating > 2000] m_brackets = [under_1500, under_2000, over_2000] white_upsets = games[(games.winner == 'white') & (games.white_rating < games.black_rating)] black_upsets = games[(games.winner == 'black') & (games.black_rating < games.white_rating)] upsets = pd.concat([white_upsets, black_upsets]) THRESHOLD = 900 STEP = 50 u_percentages = [] print(f'Rating difference : Percentage of wins by weaker player') for i in range(0 + STEP, THRESHOLD, STEP): th_upsets = upsets[upsets.rating_diff > i] th_games = games[games.rating_diff > i] upsets_percentage = th_upsets.shape[0] / th_games.shape[0] * 100 u_percentages.append([i, upsets_percentage]) print(f'{str(i).ljust(18)}: {upsets_percentage:.2f}%')
code
18153034/cell_20
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import re sns.set(color_codes=True, style='darkgrid') games = pd.read_csv('../input/games.csv') games = games[games.rated] games['mean_rating'] = (games.white_rating + games.black_rating) / 2 games['rating_diff'] = abs(games.white_rating - games.black_rating) games.victory_status.value_counts() under_1500 = games[games.mean_rating < 1500] under_2000 = games[games.mean_rating < 2000] over_2000 = games[games.mean_rating > 2000] brackets = [under_1500, under_2000, over_2000] bracket_titles = ['Under 1500', 'Under 2000', 'Over 2000'] for i, bracket in enumerate(brackets): victory_status = bracket.victory_status.value_counts() mate_games = games[games.victory_status == 'mate'] under_1500 = mate_games[mate_games.mean_rating < 1500] under_2000 = mate_games[mate_games.mean_rating < 2000] over_2000 = mate_games[mate_games.mean_rating > 2000] m_brackets = [under_1500, under_2000, over_2000] turn_means = [b.turns.mean() for b in m_brackets] plt.figure(figsize=(10, 5)) plt.ylim(0, 100) plt.title('Number of turns until mate') plt.plot(bracket_titles, turn_means, 'o-', color='r')
code
18153034/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd games = pd.read_csv('../input/games.csv') games.head(2)
code
18153034/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import re sns.set(color_codes=True, style='darkgrid') games = pd.read_csv('../input/games.csv') games = games[games.rated] games['mean_rating'] = (games.white_rating + games.black_rating) / 2 games['rating_diff'] = abs(games.white_rating - games.black_rating) games.victory_status.value_counts() under_1500 = games[games.mean_rating < 1500] under_2000 = games[games.mean_rating < 2000] over_2000 = games[games.mean_rating > 2000] brackets = [under_1500, under_2000, over_2000] bracket_titles = ['Under 1500', 'Under 2000', 'Over 2000'] for i, bracket in enumerate(brackets): victory_status = bracket.victory_status.value_counts() mate_games = games[games.victory_status == 'mate'] under_1500 = mate_games[mate_games.mean_rating < 1500] under_2000 = mate_games[mate_games.mean_rating < 2000] over_2000 = mate_games[mate_games.mean_rating > 2000] m_brackets = [under_1500, under_2000, over_2000] turn_means = [b.turns.mean() for b in m_brackets] plt.ylim(0, 100) mate_games.loc[mate_games['turns'].idxmax()] scholar_mates = mate_games[mate_games.turns == 4] scholar_mates
code
18153034/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import re sns.set(color_codes=True, style='darkgrid') games = pd.read_csv('../input/games.csv') games = games[games.rated] games['mean_rating'] = (games.white_rating + games.black_rating) / 2 games['rating_diff'] = abs(games.white_rating - games.black_rating) plt.figure(figsize=(10, 5)) sns.distplot(games.turns)
code
18153034/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import re sns.set(color_codes=True, style='darkgrid') games = pd.read_csv('../input/games.csv') games = games[games.rated] games['mean_rating'] = (games.white_rating + games.black_rating) / 2 games['rating_diff'] = abs(games.white_rating - games.black_rating) games.victory_status.value_counts() under_1500 = games[games.mean_rating < 1500] under_2000 = games[games.mean_rating < 2000] over_2000 = games[games.mean_rating > 2000] brackets = [under_1500, under_2000, over_2000] bracket_titles = ['Under 1500', 'Under 2000', 'Over 2000'] plt.figure(figsize=(15, 11)) for i, bracket in enumerate(brackets): victory_status = bracket.victory_status.value_counts() plt.subplot(1, 4, i + 1) plt.title(bracket_titles[i]) plt.pie(victory_status, labels=victory_status.index)
code
18153034/cell_31
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import re sns.set(color_codes=True, style='darkgrid') games = pd.read_csv('../input/games.csv') games = games[games.rated] games['mean_rating'] = (games.white_rating + games.black_rating) / 2 games['rating_diff'] = abs(games.white_rating - games.black_rating) games.victory_status.value_counts() under_1500 = games[games.mean_rating < 1500] under_2000 = games[games.mean_rating < 2000] over_2000 = games[games.mean_rating > 2000] brackets = [under_1500, under_2000, over_2000] bracket_titles = ['Under 1500', 'Under 2000', 'Over 2000'] for i, bracket in enumerate(brackets): victory_status = bracket.victory_status.value_counts() mate_games = games[games.victory_status == 'mate'] under_1500 = mate_games[mate_games.mean_rating < 1500] under_2000 = mate_games[mate_games.mean_rating < 2000] over_2000 = mate_games[mate_games.mean_rating > 2000] m_brackets = [under_1500, under_2000, over_2000] turn_means = [b.turns.mean() for b in m_brackets] plt.ylim(0, 100) white_upsets = games[(games.winner == 'white') & (games.white_rating < games.black_rating)] black_upsets = games[(games.winner == 'black') & (games.black_rating < games.white_rating)] upsets = pd.concat([white_upsets, black_upsets]) THRESHOLD = 900 STEP = 50 u_percentages = [] for i in range(0 + STEP, THRESHOLD, STEP): th_upsets = upsets[upsets.rating_diff > i] th_games = games[games.rating_diff > i] upsets_percentage = th_upsets.shape[0] / th_games.shape[0] * 100 u_percentages.append([i, upsets_percentage]) plt.figure(figsize=(10, 5)) plt.plot(*zip(*u_percentages)) plt.xlabel('rating difference') plt.ylabel('upsets percentage')
code
18153034/cell_14
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import re sns.set(color_codes=True, style='darkgrid') games = pd.read_csv('../input/games.csv') games = games[games.rated] games['mean_rating'] = (games.white_rating + games.black_rating) / 2 games['rating_diff'] = abs(games.white_rating - games.black_rating) games.victory_status.value_counts()
code
122262213/cell_9
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier, plot_tree, export_text from sklearn.tree import DecisionTreeClassifier clf = DecisionTreeClassifier() clf.fit(X_train, y_train) from sklearn.metrics import accuracy_score, confusion_matrix, classification_report prediction = clf.predict(X_test) accuracy_score(y_test, prediction) print(classification_report(y_test, prediction))
code
122262213/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/iris-flower-dataset/IRIS.csv') df.columns df.head()
code
122262213/cell_6
[ "text_plain_output_1.png" ]
from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier, plot_tree, export_text from sklearn.tree import DecisionTreeClassifier clf = DecisionTreeClassifier() clf.fit(X_train, y_train)
code
122262213/cell_7
[ "text_html_output_1.png" ]
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier, plot_tree, export_text from sklearn.tree import DecisionTreeClassifier clf = DecisionTreeClassifier() clf.fit(X_train, y_train) from sklearn.metrics import accuracy_score, confusion_matrix, classification_report prediction = clf.predict(X_test) accuracy_score(y_test, prediction)
code
122262213/cell_8
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier, plot_tree, export_text import pandas as pd df = pd.read_csv('/kaggle/input/iris-flower-dataset/IRIS.csv') from sklearn.tree import DecisionTreeClassifier clf = DecisionTreeClassifier() clf.fit(X_train, y_train) from sklearn.metrics import accuracy_score, confusion_matrix, classification_report prediction = clf.predict(X_test) accuracy_score(y_test, prediction) pd.crosstab(y_test, prediction, rownames=['Reel'], colnames=['Predisction'])
code
122262213/cell_3
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/iris-flower-dataset/IRIS.csv') df.columns
code