path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
18159957/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt labels_house = ['yes', 'no', 'unknown'] sizes_house = [2175, 1839, 105] colors_house = ['#ff6666', '#ffcc99', '#ffb3e6'] labels_loan = ['yes', 'no', 'unknown'] sizes_loan = [665, 3349, 105] colors_loan = ['#c2c2f0', '#ffb3e6', '#66b3ff'] labels_contact = ['cellular', 'telephone'] sizes_contact = [2652, 1467] colors_contact = ['#ff9999', '#ffcc99'] labels_default = ['no', 'unknown', 'yes'] sizes_default = [3523, 454, 142] colors_default = ['#99ff99', '#66b3ff', '#ff6666'] plt.rcParams.update({'font.size': 15}) plt.figure(0) plt.pie(sizes_house, labels=labels_house, colors=colors_house, autopct='%1.1f%%', startangle=90, pctdistance=0.8) plt.title('Housing Loan') centre_circle = plt.Circle((0, 0), 0.5, color='black', fc='white', linewidth=0) fig = plt.gcf() fig.gca().add_artist(centre_circle) plt.show() plt.figure(1) plt.pie(sizes_loan, labels=labels_loan, colors=colors_loan, autopct='%1.1f%%', startangle=90, pctdistance=0.8) plt.title('Personal Loan') centre_circle = plt.Circle((0, 0), 0.5, color='black', fc='white', linewidth=0) fig = plt.gcf() fig.gca().add_artist(centre_circle) plt.show() plt.figure(2) plt.pie(sizes_contact, labels=labels_contact, colors=colors_contact, autopct='%1.1f%%', startangle=90, pctdistance=0.8) plt.title('Contact Method') centre_circle = plt.Circle((0, 0), 0.5, color='black', fc='white', linewidth=0) fig = plt.gcf() fig.gca().add_artist(centre_circle) plt.show() plt.figure(3) plt.pie(sizes_default, labels=labels_default, colors=colors_default, autopct='%1.1f%%', startangle=90, pctdistance=0.8) plt.title('default') centre_circle = plt.Circle((0, 0), 0.5, color='black', fc='white', linewidth=0) fig = plt.gcf() fig.gca().add_artist(centre_circle) plt.show()
code
16122877/cell_21
[ "text_html_output_1.png" ]
from nltk.corpus import stopwords from sklearn.decomposition import NMF from sklearn.feature_extraction.text import TfidfVectorizer import matplotlib import matplotlib.pyplot as plt import pandas as pd import pandas as pd df = pd.read_csv('../input/taylor_swift_lyrics.csv', encoding='latin-1') pd.options.display.max_colwidth = 5000 songs = df.groupby('track_title').agg({'lyric': lambda x: ' '.join(x), 'year': 'mean'}).reset_index() stop_words = stopwords.words('english') from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer(stop_words=stop_words, min_df=0.1) stop_words.extend(['back', 'said', 'come', 'things', 'get', 'oh', 'one', 'yeah', 'place', 'would', 'like', 'know', 'stay', 'go', 'let', 'cause', 'could', 'wanna', 'would', 'gonna']) tfidf = vectorizer.fit_transform(songs['lyric']) nmf = NMF(n_components=6) topic_values = nmf.fit_transform(tfidf) for topic_num, topic in enumerate(nmf.components_): message = 'Topc #{}:'.format(topic_num + 1) message += ' '.join([vectorizer.get_feature_names()[i] for i in topic.argsort()[:-11:-1]]) topic_labels = ['love/beauty', 'growing up', 'home', 'bad/remorse', 'hope/better', 'party/dance'] df_topics = pd.DataFrame(topic_values, columns=topic_labels) songs = songs.join(df_topics) for topic in topic_labels: songs.loc[songs[topic] >= 0.1, topic] = 1 songs.loc[songs[topic] < 0.1, topic] = 0 year_topics = songs.groupby('year').sum().reset_index() year_topics import matplotlib import matplotlib.pyplot as plt matplotlib.rcParams.update({'font.size': 30, 'lines.linewidth': 8}) plt.figure(figsize=(30, 15)) plt.grid(True) for topic in topic_labels: plt.plot(year_topics['year'], year_topics[topic], label=topic, linewidth=7.0) plt.legend() plt.xlabel('year') plt.ylabel('# of songs per topic') plt.title("Topic modeling of Taylor Swift's lyrics") plt.show()
code
16122877/cell_4
[ "image_output_1.png" ]
import pandas as pd import pandas as pd df = pd.read_csv('../input/taylor_swift_lyrics.csv', encoding='latin-1') songs = df.groupby('track_title').agg({'lyric': lambda x: ' '.join(x), 'year': 'mean'}).reset_index() len(songs)
code
16122877/cell_20
[ "text_html_output_1.png" ]
from nltk.corpus import stopwords from sklearn.decomposition import NMF from sklearn.feature_extraction.text import TfidfVectorizer import pandas as pd import pandas as pd df = pd.read_csv('../input/taylor_swift_lyrics.csv', encoding='latin-1') pd.options.display.max_colwidth = 5000 songs = df.groupby('track_title').agg({'lyric': lambda x: ' '.join(x), 'year': 'mean'}).reset_index() stop_words = stopwords.words('english') from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer(stop_words=stop_words, min_df=0.1) stop_words.extend(['back', 'said', 'come', 'things', 'get', 'oh', 'one', 'yeah', 'place', 'would', 'like', 'know', 'stay', 'go', 'let', 'cause', 'could', 'wanna', 'would', 'gonna']) tfidf = vectorizer.fit_transform(songs['lyric']) nmf = NMF(n_components=6) topic_values = nmf.fit_transform(tfidf) for topic_num, topic in enumerate(nmf.components_): message = 'Topc #{}:'.format(topic_num + 1) message += ' '.join([vectorizer.get_feature_names()[i] for i in topic.argsort()[:-11:-1]]) topic_labels = ['love/beauty', 'growing up', 'home', 'bad/remorse', 'hope/better', 'party/dance'] df_topics = pd.DataFrame(topic_values, columns=topic_labels) songs = songs.join(df_topics) for topic in topic_labels: songs.loc[songs[topic] >= 0.1, topic] = 1 songs.loc[songs[topic] < 0.1, topic] = 0 year_topics = songs.groupby('year').sum().reset_index() year_topics
code
16122877/cell_19
[ "text_html_output_1.png" ]
from nltk.corpus import stopwords from sklearn.decomposition import NMF from sklearn.feature_extraction.text import TfidfVectorizer import pandas as pd import pandas as pd df = pd.read_csv('../input/taylor_swift_lyrics.csv', encoding='latin-1') pd.options.display.max_colwidth = 5000 songs = df.groupby('track_title').agg({'lyric': lambda x: ' '.join(x), 'year': 'mean'}).reset_index() stop_words = stopwords.words('english') from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer(stop_words=stop_words, min_df=0.1) stop_words.extend(['back', 'said', 'come', 'things', 'get', 'oh', 'one', 'yeah', 'place', 'would', 'like', 'know', 'stay', 'go', 'let', 'cause', 'could', 'wanna', 'would', 'gonna']) tfidf = vectorizer.fit_transform(songs['lyric']) nmf = NMF(n_components=6) topic_values = nmf.fit_transform(tfidf) for topic_num, topic in enumerate(nmf.components_): message = 'Topc #{}:'.format(topic_num + 1) message += ' '.join([vectorizer.get_feature_names()[i] for i in topic.argsort()[:-11:-1]]) topic_labels = ['love/beauty', 'growing up', 'home', 'bad/remorse', 'hope/better', 'party/dance'] df_topics = pd.DataFrame(topic_values, columns=topic_labels) songs = songs.join(df_topics) for topic in topic_labels: songs.loc[songs[topic] >= 0.1, topic] = 1 songs.loc[songs[topic] < 0.1, topic] = 0 songs.head()
code
16122877/cell_1
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd df = pd.read_csv('../input/taylor_swift_lyrics.csv', encoding='latin-1') df.head()
code
16122877/cell_16
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from sklearn.decomposition import NMF from sklearn.feature_extraction.text import TfidfVectorizer import pandas as pd import pandas as pd df = pd.read_csv('../input/taylor_swift_lyrics.csv', encoding='latin-1') pd.options.display.max_colwidth = 5000 songs = df.groupby('track_title').agg({'lyric': lambda x: ' '.join(x), 'year': 'mean'}).reset_index() stop_words = stopwords.words('english') from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer(stop_words=stop_words, min_df=0.1) stop_words.extend(['back', 'said', 'come', 'things', 'get', 'oh', 'one', 'yeah', 'place', 'would', 'like', 'know', 'stay', 'go', 'let', 'cause', 'could', 'wanna', 'would', 'gonna']) tfidf = vectorizer.fit_transform(songs['lyric']) nmf = NMF(n_components=6) topic_values = nmf.fit_transform(tfidf) topic_labels = ['love/beauty', 'growing up', 'home', 'bad/remorse', 'hope/better', 'party/dance'] df_topics = pd.DataFrame(topic_values, columns=topic_labels) df_topics.head()
code
16122877/cell_3
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd df = pd.read_csv('../input/taylor_swift_lyrics.csv', encoding='latin-1') songs = df.groupby('track_title').agg({'lyric': lambda x: ' '.join(x), 'year': 'mean'}).reset_index() songs.head()
code
16122877/cell_17
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from sklearn.decomposition import NMF from sklearn.feature_extraction.text import TfidfVectorizer import pandas as pd import pandas as pd df = pd.read_csv('../input/taylor_swift_lyrics.csv', encoding='latin-1') pd.options.display.max_colwidth = 5000 songs = df.groupby('track_title').agg({'lyric': lambda x: ' '.join(x), 'year': 'mean'}).reset_index() stop_words = stopwords.words('english') from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer(stop_words=stop_words, min_df=0.1) stop_words.extend(['back', 'said', 'come', 'things', 'get', 'oh', 'one', 'yeah', 'place', 'would', 'like', 'know', 'stay', 'go', 'let', 'cause', 'could', 'wanna', 'would', 'gonna']) tfidf = vectorizer.fit_transform(songs['lyric']) nmf = NMF(n_components=6) topic_values = nmf.fit_transform(tfidf) topic_labels = ['love/beauty', 'growing up', 'home', 'bad/remorse', 'hope/better', 'party/dance'] df_topics = pd.DataFrame(topic_values, columns=topic_labels) songs = songs.join(df_topics) songs.head()
code
16122877/cell_14
[ "text_html_output_1.png" ]
from nltk.corpus import stopwords from sklearn.decomposition import NMF from sklearn.feature_extraction.text import TfidfVectorizer import pandas as pd import pandas as pd df = pd.read_csv('../input/taylor_swift_lyrics.csv', encoding='latin-1') songs = df.groupby('track_title').agg({'lyric': lambda x: ' '.join(x), 'year': 'mean'}).reset_index() stop_words = stopwords.words('english') from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer(stop_words=stop_words, min_df=0.1) stop_words.extend(['back', 'said', 'come', 'things', 'get', 'oh', 'one', 'yeah', 'place', 'would', 'like', 'know', 'stay', 'go', 'let', 'cause', 'could', 'wanna', 'would', 'gonna']) tfidf = vectorizer.fit_transform(songs['lyric']) nmf = NMF(n_components=6) topic_values = nmf.fit_transform(tfidf) for topic_num, topic in enumerate(nmf.components_): message = 'Topc #{}:'.format(topic_num + 1) message += ' '.join([vectorizer.get_feature_names()[i] for i in topic.argsort()[:-11:-1]]) print(message)
code
327861/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import tensorflow as tf train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') f, axarr = plt.subplots(10, 10) for row in range(10): for column in range(10): entry = train_data[train_data['label']==column].iloc[row].drop('label').as_matrix() axarr[row, column].imshow(entry.reshape([28, 28])) axarr[row, column].get_xaxis().set_visible(False) axarr[row, column].get_yaxis().set_visible(False) x = tf.placeholder(tf.float32, [None, 784]) W = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) y = tf.nn.softmax(tf.matmul(x, W) + b) y_ = tf.placeholder(tf.float32, [None, 10]) cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1])) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) train_val_ratio = 0.7 train_data_size = len(train_data) train_set = train_data[:int(train_data_size * train_val_ratio)] val_set = train_data[int(train_data_size * train_val_ratio) + 1:] init = tf.initialize_all_variables() saver = tf.train.Saver() sess = tf.Session() sess.run(init) train_eval_list = [] val_eval_list = [] for i in range(1000): batch = train_set.sample(frac=0.1) batch_xs = batch.drop('label', axis=1).as_matrix() / 255.0 batch_ys = pd.get_dummies(batch['label']).as_matrix() val_xs = val_set.drop('label', axis=1).as_matrix() / 255.0 val_ys = pd.get_dummies(val_set['label']).as_matrix() sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys}) train_eval = sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys}) val_eval = sess.run(accuracy, feed_dict={x: val_xs, y_: val_ys}) train_eval_list.append(train_eval) val_eval_list.append(val_eval) saver.save(sess, 'logistic_regression.ckpt') sess.close() plt.plot(train_eval_list, label='Train set') plt.plot(val_eval_list, label='Validation set') plt.xlabel('Epoch') plt.ylabel('Accuracy') plt.legend(loc=4)
code
327861/cell_3
[ "image_output_1.png" ]
import pandas as pd train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') print(train_data.shape) print(test_data.shape)
code
327861/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') f, axarr = plt.subplots(10, 10) for row in range(10): for column in range(10): entry = train_data[train_data['label'] == column].iloc[row].drop('label').as_matrix() axarr[row, column].imshow(entry.reshape([28, 28])) axarr[row, column].get_xaxis().set_visible(False) axarr[row, column].get_yaxis().set_visible(False)
code
33095970/cell_9
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import warnings papers = pd.read_csv('/kaggle/input/nips-papers/papers.csv') papers_2010 = papers.loc[papers['year'] == 2010].copy(deep=False) papers_2017 = papers.loc[papers['year'] == 2017].copy(deep=False) import re papers_2010['title_processed'] = papers_2010['title'].map(lambda x: re.sub('[,\\.!?]', '', x)) papers_2017['title_processed'] = papers_2017['title'].map(lambda x: re.sub('[,\\.!?]', '', x)) papers_2010['title_processed'] = papers_2010['title_processed'].map(lambda x: x.lower()) papers_2017['title_processed'] = papers_2017['title_processed'].map(lambda x: x.lower()) from sklearn.feature_extraction.text import CountVectorizer import numpy as np def plot_10_most_common_words(count_data, count_vectorizer): import matplotlib.pyplot as plt words = count_vectorizer.get_feature_names() total_counts = np.zeros(len(words)) for t in count_data: total_counts += t.toarray()[0] count_dict = zip(words, total_counts) count_dict = sorted(count_dict, key=lambda x: x[1], reverse=True)[1:51] words = [w[0] for w in count_dict] counts = [w[1] for w in count_dict] x_pos = np.arange(len(words)) plt.xticks(x_pos, words, rotation=90) count_vectorizer = CountVectorizer(stop_words='english') count_data = count_vectorizer.fit_transform(papers_2010['title_processed']) dict = [count_data, count_vectorizer] import warnings warnings.simplefilter('ignore', DeprecationWarning) from sklearn.decomposition import LatentDirichletAllocation as LDA def print_topics(model, count_vectorizer, n_top_words): words = count_vectorizer.get_feature_names() for topic_idx, topic in enumerate(model.components_): print('\nTopic #%d:' % topic_idx) print(' '.join([words[i] for i in topic.argsort()[:-n_top_words - 1:-1]])) number_topics = 5 number_words = 6 lda = LDA(n_components=number_topics) lda.fit(count_data) print('Topics found via LDA:') print_topics(lda, count_vectorizer, number_words)
code
33095970/cell_2
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) papers = pd.read_csv('/kaggle/input/nips-papers/papers.csv') print(type(papers))
code
33095970/cell_11
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import CountVectorizer import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import warnings import warnings papers = pd.read_csv('/kaggle/input/nips-papers/papers.csv') papers_2010 = papers.loc[papers['year'] == 2010].copy(deep=False) papers_2017 = papers.loc[papers['year'] == 2017].copy(deep=False) import re papers_2010['title_processed'] = papers_2010['title'].map(lambda x: re.sub('[,\\.!?]', '', x)) papers_2017['title_processed'] = papers_2017['title'].map(lambda x: re.sub('[,\\.!?]', '', x)) papers_2010['title_processed'] = papers_2010['title_processed'].map(lambda x: x.lower()) papers_2017['title_processed'] = papers_2017['title_processed'].map(lambda x: x.lower()) from sklearn.feature_extraction.text import CountVectorizer import numpy as np def plot_10_most_common_words(count_data, count_vectorizer): import matplotlib.pyplot as plt words = count_vectorizer.get_feature_names() total_counts = np.zeros(len(words)) for t in count_data: total_counts += t.toarray()[0] count_dict = zip(words, total_counts) count_dict = sorted(count_dict, key=lambda x: x[1], reverse=True)[1:51] words = [w[0] for w in count_dict] counts = [w[1] for w in count_dict] x_pos = np.arange(len(words)) plt.xticks(x_pos, words, rotation=90) count_vectorizer = CountVectorizer(stop_words='english') count_data = count_vectorizer.fit_transform(papers_2010['title_processed']) dict = [count_data, count_vectorizer] import warnings warnings.simplefilter('ignore', DeprecationWarning) from sklearn.decomposition import LatentDirichletAllocation as LDA def print_topics(model, count_vectorizer, n_top_words): words = count_vectorizer.get_feature_names() number_topics = 5 number_words = 6 lda = LDA(n_components=number_topics) lda.fit(count_data) from sklearn.feature_extraction.text import CountVectorizer import numpy as np def plot_50_most_common_words(count_data, count_vectorizer): import matplotlib.pyplot as plt words = count_vectorizer.get_feature_names() total_counts = np.zeros(len(words)) for t in count_data: total_counts += t.toarray()[0] count_dict = zip(words, total_counts) count_dict = sorted(count_dict, key=lambda x: x[1], reverse=True)[1:21] words = [w[0] for w in count_dict] counts = [w[1] for w in count_dict] x_pos = np.arange(len(words)) plt.xticks(x_pos, words, rotation=90) count_vectorizer = CountVectorizer(stop_words='english') count_data = count_vectorizer.fit_transform(papers_2017['title_processed']) count_data.shape import warnings warnings.simplefilter('ignore', DeprecationWarning) from sklearn.decomposition import LatentDirichletAllocation as LDA def print_topics(model, count_vectorizer, n_top_words): words = count_vectorizer.get_feature_names() for topic_idx, topic in enumerate(model.components_): print('\nTopic #%d:' % topic_idx) print(' '.join([words[i] for i in topic.argsort()[:-n_top_words - 1:-1]])) number_topics = 5 number_words = 6 lda = LDA(n_components=number_topics) lda.fit(count_data) print('Topics found via LDA:') print_topics(lda, count_vectorizer, number_words)
code
33095970/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import nltk import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
33095970/cell_7
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re papers = pd.read_csv('/kaggle/input/nips-papers/papers.csv') papers_2010 = papers.loc[papers['year'] == 2010].copy(deep=False) papers_2017 = papers.loc[papers['year'] == 2017].copy(deep=False) import re papers_2010['title_processed'] = papers_2010['title'].map(lambda x: re.sub('[,\\.!?]', '', x)) papers_2017['title_processed'] = papers_2017['title'].map(lambda x: re.sub('[,\\.!?]', '', x)) papers_2010['title_processed'] = papers_2010['title_processed'].map(lambda x: x.lower()) papers_2017['title_processed'] = papers_2017['title_processed'].map(lambda x: x.lower()) from sklearn.feature_extraction.text import CountVectorizer import numpy as np def plot_10_most_common_words(count_data, count_vectorizer): import matplotlib.pyplot as plt words = count_vectorizer.get_feature_names() total_counts = np.zeros(len(words)) for t in count_data: total_counts += t.toarray()[0] count_dict = zip(words, total_counts) count_dict = sorted(count_dict, key=lambda x: x[1], reverse=True)[1:51] words = [w[0] for w in count_dict] counts = [w[1] for w in count_dict] x_pos = np.arange(len(words)) plt.bar(x_pos, counts, align='center') plt.xticks(x_pos, words, rotation=90) plt.xlabel('words') plt.ylabel('counts') plt.title('20 most common words') plt.show() count_vectorizer = CountVectorizer(stop_words='english') count_data = count_vectorizer.fit_transform(papers_2010['title_processed']) plot_10_most_common_words(count_data, count_vectorizer) dict = [count_data, count_vectorizer]
code
33095970/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re papers = pd.read_csv('/kaggle/input/nips-papers/papers.csv') papers_2010 = papers.loc[papers['year'] == 2010].copy(deep=False) papers_2017 = papers.loc[papers['year'] == 2017].copy(deep=False) import re papers_2010['title_processed'] = papers_2010['title'].map(lambda x: re.sub('[,\\.!?]', '', x)) papers_2017['title_processed'] = papers_2017['title'].map(lambda x: re.sub('[,\\.!?]', '', x)) papers_2010['title_processed'] = papers_2010['title_processed'].map(lambda x: x.lower()) papers_2017['title_processed'] = papers_2017['title_processed'].map(lambda x: x.lower()) from sklearn.feature_extraction.text import CountVectorizer import numpy as np def plot_10_most_common_words(count_data, count_vectorizer): import matplotlib.pyplot as plt words = count_vectorizer.get_feature_names() total_counts = np.zeros(len(words)) for t in count_data: total_counts += t.toarray()[0] count_dict = zip(words, total_counts) count_dict = sorted(count_dict, key=lambda x: x[1], reverse=True)[1:51] words = [w[0] for w in count_dict] counts = [w[1] for w in count_dict] x_pos = np.arange(len(words)) plt.xticks(x_pos, words, rotation=90) count_vectorizer = CountVectorizer(stop_words='english') count_data = count_vectorizer.fit_transform(papers_2010['title_processed']) dict = [count_data, count_vectorizer] dict
code
33095970/cell_3
[ "text_plain_output_1.png" ]
groups = papers.groupby('year') counts = groups.size() import matplotlib.pyplot counts.plot()
code
33095970/cell_10
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import CountVectorizer import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import warnings papers = pd.read_csv('/kaggle/input/nips-papers/papers.csv') papers_2010 = papers.loc[papers['year'] == 2010].copy(deep=False) papers_2017 = papers.loc[papers['year'] == 2017].copy(deep=False) import re papers_2010['title_processed'] = papers_2010['title'].map(lambda x: re.sub('[,\\.!?]', '', x)) papers_2017['title_processed'] = papers_2017['title'].map(lambda x: re.sub('[,\\.!?]', '', x)) papers_2010['title_processed'] = papers_2010['title_processed'].map(lambda x: x.lower()) papers_2017['title_processed'] = papers_2017['title_processed'].map(lambda x: x.lower()) from sklearn.feature_extraction.text import CountVectorizer import numpy as np def plot_10_most_common_words(count_data, count_vectorizer): import matplotlib.pyplot as plt words = count_vectorizer.get_feature_names() total_counts = np.zeros(len(words)) for t in count_data: total_counts += t.toarray()[0] count_dict = zip(words, total_counts) count_dict = sorted(count_dict, key=lambda x: x[1], reverse=True)[1:51] words = [w[0] for w in count_dict] counts = [w[1] for w in count_dict] x_pos = np.arange(len(words)) plt.xticks(x_pos, words, rotation=90) count_vectorizer = CountVectorizer(stop_words='english') count_data = count_vectorizer.fit_transform(papers_2010['title_processed']) dict = [count_data, count_vectorizer] import warnings warnings.simplefilter('ignore', DeprecationWarning) from sklearn.decomposition import LatentDirichletAllocation as LDA def print_topics(model, count_vectorizer, n_top_words): words = count_vectorizer.get_feature_names() number_topics = 5 number_words = 6 lda = LDA(n_components=number_topics) lda.fit(count_data) from sklearn.feature_extraction.text import CountVectorizer import numpy as np def plot_50_most_common_words(count_data, count_vectorizer): import matplotlib.pyplot as plt words = count_vectorizer.get_feature_names() total_counts = np.zeros(len(words)) for t in count_data: total_counts += t.toarray()[0] count_dict = zip(words, total_counts) count_dict = sorted(count_dict, key=lambda x: x[1], reverse=True)[1:21] words = [w[0] for w in count_dict] counts = [w[1] for w in count_dict] x_pos = np.arange(len(words)) plt.bar(x_pos, counts, align='center') plt.xticks(x_pos, words, rotation=90) plt.xlabel('words') plt.ylabel('counts') plt.title('50 most common words') plt.show() count_vectorizer = CountVectorizer(stop_words='english') count_data = count_vectorizer.fit_transform(papers_2017['title_processed']) plot_50_most_common_words(count_data, count_vectorizer) count_data.shape
code
122261632/cell_63
[ "text_plain_output_1.png" ]
print('Shape of X_test', X_test.shape)
code
122261632/cell_57
[ "text_plain_output_1.png" ]
from imblearn.pipeline import Pipeline from sklearn.impute import SimpleImputer from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler import numpy as np cat_pipe = Pipeline([('imputer', SimpleImputer(strategy='most_frequent', missing_values=np.nan)), ('encoder', OneHotEncoder(handle_unknown='ignore', sparse=False)), ('scaler', StandardScaler())]) num_pipe = Pipeline([('imputer', SimpleImputer(strategy='most_frequent', missing_values=np.nan)), ('scaler', StandardScaler())]) print(num_pipe)
code
122261632/cell_33
[ "text_plain_output_1.png" ]
import pandas as pd train_df_sample = pd.read_csv('../input/amex-default-prediction/train_data.csv', nrows=100000) train_label_df = pd.read_csv('../input/amex-default-prediction/train_labels.csv') test_df = pd.read_csv('../input/amex-default-prediction/test_data.csv', nrows=100000, index_col='customer_ID') train_df = pd.merge(train_df_sample, train_label_df, how='inner', on=['customer_ID']) train_df.drop(axis=1, columns=['customer_ID', 'S_2'], inplace=True) if any(train_df.duplicated()): print('Yes') else: print('No')
code
122261632/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd train_df_sample = pd.read_csv('../input/amex-default-prediction/train_data.csv', nrows=100000) train_label_df = pd.read_csv('../input/amex-default-prediction/train_labels.csv') print('Shape of dataset is:', train_label_df.shape) train_label_df.info()
code
122261632/cell_55
[ "text_plain_output_1.png" ]
from imblearn.pipeline import Pipeline from sklearn.impute import SimpleImputer from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler import numpy as np cat_pipe = Pipeline([('imputer', SimpleImputer(strategy='most_frequent', missing_values=np.nan)), ('encoder', OneHotEncoder(handle_unknown='ignore', sparse=False)), ('scaler', StandardScaler())]) print(cat_pipe)
code
122261632/cell_6
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import GradientBoostingClassifier from xgboost import XGBClassifier from lightgbm import LGBMClassifier from catboost import CatBoostClassifier from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score from sklearn.metrics import classification_report from sklearn.metrics import make_scorer from sklearn.metrics import confusion_matrix from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import RepeatedKFold from sklearn.model_selection import cross_validate from sklearn.model_selection import cross_val_predict from sklearn.model_selection import cross_val_score from sklearn.model_selection import validation_curve from sklearn.model_selection import RepeatedStratifiedKFold from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV from yellowbrick.model_selection import ValidationCurve
code
122261632/cell_39
[ "text_plain_output_1.png" ]
import pandas as pd train_df_sample = pd.read_csv('../input/amex-default-prediction/train_data.csv', nrows=100000) train_label_df = pd.read_csv('../input/amex-default-prediction/train_labels.csv') test_df = pd.read_csv('../input/amex-default-prediction/test_data.csv', nrows=100000, index_col='customer_ID') test_df.drop(axis=1, columns=['S_2'], inplace=True) if any(test_df.isna().sum()): print('Yes') else: print('No')
code
122261632/cell_65
[ "text_plain_output_1.png" ]
print('Shape of y_test', y_test.shape)
code
122261632/cell_50
[ "text_plain_output_1.png" ]
import pandas as pd train_df_sample = pd.read_csv('../input/amex-default-prediction/train_data.csv', nrows=100000) train_label_df = pd.read_csv('../input/amex-default-prediction/train_labels.csv') test_df = pd.read_csv('../input/amex-default-prediction/test_data.csv', nrows=100000, index_col='customer_ID') train_df = pd.merge(train_df_sample, train_label_df, how='inner', on=['customer_ID']) train_df.drop(axis=1, columns=['customer_ID', 'S_2'], inplace=True) test_df.drop(axis=1, columns=['S_2'], inplace=True) i = 0 for col in train_df.columns: if train_df[col].isnull().sum() / len(train_df[col]) * 100 >= 75: train_df.drop(labels=col, axis=1, inplace=True) i = i + 1 i = 0 for col in test_df.columns: if test_df[col].isnull().sum() / len(test_df[col]) * 100 >= 75: test_df.drop(labels=col, axis=1, inplace=True) i = i + 1 train_df = train_df.astype({'B_30': 'str', 'B_38': 'str'}) test_df = test_df.astype({'B_30': 'str', 'B_38': 'str'}) train_df = train_df.astype({'D_114': 'str', 'D_116': 'str', 'D_117': 'str', 'D_120': 'str', 'D_126': 'str', 'D_68': 'str'}) test_df = test_df.astype({'D_114': 'str', 'D_116': 'str', 'D_117': 'str', 'D_120': 'str', 'D_126': 'str', 'D_68': 'str'}) X = train_df.drop(columns='target') y = train_df['target'] print('Shape of X', X.shape)
code
122261632/cell_64
[ "text_plain_output_1.png" ]
print('Shape of y_train', y_train.shape)
code
122261632/cell_45
[ "text_plain_output_1.png" ]
import pandas as pd train_df_sample = pd.read_csv('../input/amex-default-prediction/train_data.csv', nrows=100000) train_label_df = pd.read_csv('../input/amex-default-prediction/train_labels.csv') test_df = pd.read_csv('../input/amex-default-prediction/test_data.csv', nrows=100000, index_col='customer_ID') train_df = pd.merge(train_df_sample, train_label_df, how='inner', on=['customer_ID']) train_df.drop(axis=1, columns=['customer_ID', 'S_2'], inplace=True) test_df.drop(axis=1, columns=['S_2'], inplace=True) i = 0 for col in train_df.columns: if train_df[col].isnull().sum() / len(train_df[col]) * 100 >= 75: train_df.drop(labels=col, axis=1, inplace=True) i = i + 1 i = 0 for col in test_df.columns: if test_df[col].isnull().sum() / len(test_df[col]) * 100 >= 75: print('Dropping column', col) test_df.drop(labels=col, axis=1, inplace=True) i = i + 1 print('Total number of columns dropped in test dataframe', i)
code
122261632/cell_51
[ "text_plain_output_1.png" ]
import pandas as pd train_df_sample = pd.read_csv('../input/amex-default-prediction/train_data.csv', nrows=100000) train_label_df = pd.read_csv('../input/amex-default-prediction/train_labels.csv') test_df = pd.read_csv('../input/amex-default-prediction/test_data.csv', nrows=100000, index_col='customer_ID') train_df = pd.merge(train_df_sample, train_label_df, how='inner', on=['customer_ID']) train_df.drop(axis=1, columns=['customer_ID', 'S_2'], inplace=True) test_df.drop(axis=1, columns=['S_2'], inplace=True) i = 0 for col in train_df.columns: if train_df[col].isnull().sum() / len(train_df[col]) * 100 >= 75: train_df.drop(labels=col, axis=1, inplace=True) i = i + 1 i = 0 for col in test_df.columns: if test_df[col].isnull().sum() / len(test_df[col]) * 100 >= 75: test_df.drop(labels=col, axis=1, inplace=True) i = i + 1 train_df = train_df.astype({'B_30': 'str', 'B_38': 'str'}) test_df = test_df.astype({'B_30': 'str', 'B_38': 'str'}) train_df = train_df.astype({'D_114': 'str', 'D_116': 'str', 'D_117': 'str', 'D_120': 'str', 'D_126': 'str', 'D_68': 'str'}) test_df = test_df.astype({'D_114': 'str', 'D_116': 'str', 'D_117': 'str', 'D_120': 'str', 'D_126': 'str', 'D_68': 'str'}) X = train_df.drop(columns='target') y = train_df['target'] print('Shape of y', y.shape)
code
122261632/cell_62
[ "text_plain_output_1.png" ]
print('Shape of X_train', X_train.shape)
code
122261632/cell_59
[ "text_plain_output_1.png" ]
from imblearn.pipeline import Pipeline from sklearn.compose import ColumnTransformer from sklearn.impute import SimpleImputer from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler import numpy as np import pandas as pd train_df_sample = pd.read_csv('../input/amex-default-prediction/train_data.csv', nrows=100000) train_label_df = pd.read_csv('../input/amex-default-prediction/train_labels.csv') test_df = pd.read_csv('../input/amex-default-prediction/test_data.csv', nrows=100000, index_col='customer_ID') train_df = pd.merge(train_df_sample, train_label_df, how='inner', on=['customer_ID']) train_df.drop(axis=1, columns=['customer_ID', 'S_2'], inplace=True) test_df.drop(axis=1, columns=['S_2'], inplace=True) i = 0 for col in train_df.columns: if train_df[col].isnull().sum() / len(train_df[col]) * 100 >= 75: train_df.drop(labels=col, axis=1, inplace=True) i = i + 1 i = 0 for col in test_df.columns: if test_df[col].isnull().sum() / len(test_df[col]) * 100 >= 75: test_df.drop(labels=col, axis=1, inplace=True) i = i + 1 train_df = train_df.astype({'B_30': 'str', 'B_38': 'str'}) test_df = test_df.astype({'B_30': 'str', 'B_38': 'str'}) train_df = train_df.astype({'D_114': 'str', 'D_116': 'str', 'D_117': 'str', 'D_120': 'str', 'D_126': 'str', 'D_68': 'str'}) test_df = test_df.astype({'D_114': 'str', 'D_116': 'str', 'D_117': 'str', 'D_120': 'str', 'D_126': 'str', 'D_68': 'str'}) X = train_df.drop(columns='target') y = train_df['target'] categorical = list(X.select_dtypes('object').columns) numerical = list(X.select_dtypes('number').columns) cat_pipe = Pipeline([('imputer', SimpleImputer(strategy='most_frequent', missing_values=np.nan)), ('encoder', OneHotEncoder(handle_unknown='ignore', sparse=False)), ('scaler', StandardScaler())]) num_pipe = Pipeline([('imputer', SimpleImputer(strategy='most_frequent', missing_values=np.nan)), ('scaler', StandardScaler())]) preprocess = ColumnTransformer([('cat', cat_pipe, categorical), ('num', num_pipe, numerical)]) print(preprocess)
code
122261632/cell_28
[ "text_plain_output_1.png" ]
import pandas as pd train_df_sample = pd.read_csv('../input/amex-default-prediction/train_data.csv', nrows=100000) train_label_df = pd.read_csv('../input/amex-default-prediction/train_labels.csv') test_df = pd.read_csv('../input/amex-default-prediction/test_data.csv', nrows=100000, index_col='customer_ID') train_df = pd.merge(train_df_sample, train_label_df, how='inner', on=['customer_ID']) train_df.info()
code
122261632/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd train_df_sample = pd.read_csv('../input/amex-default-prediction/train_data.csv', nrows=100000) print('Shape of dataset is:', train_df_sample.shape) train_df_sample.info()
code
122261632/cell_75
[ "text_plain_output_1.png" ]
import pandas as pd train_df_sample = pd.read_csv('../input/amex-default-prediction/train_data.csv', nrows=100000) train_label_df = pd.read_csv('../input/amex-default-prediction/train_labels.csv') test_df = pd.read_csv('../input/amex-default-prediction/test_data.csv', nrows=100000, index_col='customer_ID') train_df = pd.merge(train_df_sample, train_label_df, how='inner', on=['customer_ID']) def amex_metric(y_true: pd.DataFrame, y_pred: pd.DataFrame) -> float: def top_four_percent_captured(y_true: pd.DataFrame, y_pred: pd.DataFrame) -> float: df = pd.concat([y_true, y_pred], axis='columns').sort_values('prediction', ascending=False) df['weight'] = df['target'].apply(lambda x: 20 if x == 0 else 1) four_pct_cutoff = int(0.04 * df['weight'].sum()) df['weight_cumsum'] = df['weight'].cumsum() df_cutoff = df.loc[df['weight_cumsum'] <= four_pct_cutoff] return (df_cutoff['target'] == 1).sum() / (df['target'] == 1).sum() def weighted_gini(y_true: pd.DataFrame, y_pred: pd.DataFrame) -> float: df = pd.concat([y_true, y_pred], axis='columns').sort_values('prediction', ascending=False) df['weight'] = df['target'].apply(lambda x: 20 if x == 0 else 1) df['random'] = (df['weight'] / df['weight'].sum()).cumsum() total_pos = (df['target'] * df['weight']).sum() df['cum_pos_found'] = (df['target'] * df['weight']).cumsum() df['lorentz'] = df['cum_pos_found'] / total_pos df['gini'] = (df['lorentz'] - df['random']) * df['weight'] return df['gini'].sum() def normalized_weighted_gini(y_true: pd.DataFrame, y_pred: pd.DataFrame) -> float: y_true_pred = y_true.rename(columns={'target': 'prediction'}) return weighted_gini(y_true, y_pred) / weighted_gini(y_true, y_true_pred) g = normalized_weighted_gini(y_true, y_pred) d = top_four_percent_captured(y_true, y_pred) return 0.5 * (g + d) def model_score(model_name): model = pipe.fit(X_train, y_train) model_score('RandomForestClassifier')
code
122261632/cell_35
[ "text_plain_output_1.png" ]
import pandas as pd train_df_sample = pd.read_csv('../input/amex-default-prediction/train_data.csv', nrows=100000) train_label_df = pd.read_csv('../input/amex-default-prediction/train_labels.csv') test_df = pd.read_csv('../input/amex-default-prediction/test_data.csv', nrows=100000, index_col='customer_ID') test_df.drop(axis=1, columns=['S_2'], inplace=True) if any(test_df.duplicated()): print('Yes') else: print('No')
code
122261632/cell_43
[ "text_plain_output_1.png" ]
import pandas as pd train_df_sample = pd.read_csv('../input/amex-default-prediction/train_data.csv', nrows=100000) train_label_df = pd.read_csv('../input/amex-default-prediction/train_labels.csv') test_df = pd.read_csv('../input/amex-default-prediction/test_data.csv', nrows=100000, index_col='customer_ID') train_df = pd.merge(train_df_sample, train_label_df, how='inner', on=['customer_ID']) train_df.drop(axis=1, columns=['customer_ID', 'S_2'], inplace=True) i = 0 for col in train_df.columns: if train_df[col].isnull().sum() / len(train_df[col]) * 100 >= 75: print('Dropping column', col) train_df.drop(labels=col, axis=1, inplace=True) i = i + 1 print('Total number of columns dropped in train dataframe', i)
code
122261632/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd train_df_sample = pd.read_csv('../input/amex-default-prediction/train_data.csv', nrows=100000) train_label_df = pd.read_csv('../input/amex-default-prediction/train_labels.csv') test_df = pd.read_csv('../input/amex-default-prediction/test_data.csv', nrows=100000, index_col='customer_ID') print('Shape of dataset is:', test_df.shape) test_df.info()
code
122261632/cell_53
[ "text_plain_output_1.png" ]
import pandas as pd train_df_sample = pd.read_csv('../input/amex-default-prediction/train_data.csv', nrows=100000) train_label_df = pd.read_csv('../input/amex-default-prediction/train_labels.csv') test_df = pd.read_csv('../input/amex-default-prediction/test_data.csv', nrows=100000, index_col='customer_ID') train_df = pd.merge(train_df_sample, train_label_df, how='inner', on=['customer_ID']) train_df.drop(axis=1, columns=['customer_ID', 'S_2'], inplace=True) test_df.drop(axis=1, columns=['S_2'], inplace=True) i = 0 for col in train_df.columns: if train_df[col].isnull().sum() / len(train_df[col]) * 100 >= 75: train_df.drop(labels=col, axis=1, inplace=True) i = i + 1 i = 0 for col in test_df.columns: if test_df[col].isnull().sum() / len(test_df[col]) * 100 >= 75: test_df.drop(labels=col, axis=1, inplace=True) i = i + 1 train_df = train_df.astype({'B_30': 'str', 'B_38': 'str'}) test_df = test_df.astype({'B_30': 'str', 'B_38': 'str'}) train_df = train_df.astype({'D_114': 'str', 'D_116': 'str', 'D_117': 'str', 'D_120': 'str', 'D_126': 'str', 'D_68': 'str'}) test_df = test_df.astype({'D_114': 'str', 'D_116': 'str', 'D_117': 'str', 'D_120': 'str', 'D_126': 'str', 'D_68': 'str'}) X = train_df.drop(columns='target') y = train_df['target'] categorical = list(X.select_dtypes('object').columns) print(f'Categorical variables (columns) are: {categorical}') numerical = list(X.select_dtypes('number').columns) print(f'Numerical variables (columns) are: {numerical}')
code
122261632/cell_37
[ "text_plain_output_1.png" ]
import pandas as pd train_df_sample = pd.read_csv('../input/amex-default-prediction/train_data.csv', nrows=100000) train_label_df = pd.read_csv('../input/amex-default-prediction/train_labels.csv') test_df = pd.read_csv('../input/amex-default-prediction/test_data.csv', nrows=100000, index_col='customer_ID') train_df = pd.merge(train_df_sample, train_label_df, how='inner', on=['customer_ID']) train_df.drop(axis=1, columns=['customer_ID', 'S_2'], inplace=True) if any(train_df.isna().sum()): print('Yes') else: print('No')
code
88095734/cell_25
[ "text_html_output_1.png" ]
from sklearn.ensemble import HistGradientBoostingClassifier from sklearn.model_selection import cross_val_score from sklearn.preprocessing import StandardScaler import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.describe().columns df_num = train[['Age', 'SibSp', 'Parch', 'Fare']] df_cat = train[['Survived', 'Pclass', 'Sex', 'Ticket', 'Cabin', 'Embarked']] df_cat.Cabin train['cabin_multiple'] = train.Cabin.apply(lambda x: 0 if pd.isna(x) else len(x.split(' '))) train['cabin_multiple'].value_counts() pd.pivot_table(train, index='Survived', columns='cabin_multiple', values='Ticket', aggfunc='count') train['cabin_adv'] = train.Cabin.apply(lambda x: str(x)[0]) train['numeric_ticket'] = train.Ticket.apply(lambda x: 1 if x.isnumeric() else 0) train['ticket_letters'] = train.Ticket.apply(lambda x: ''.join(x.split(' ')[:-1]).replace('.', '').replace('/', '').lower() if len(x.split(' ')[:-1]) > 0 else 0) train['numeric_ticket'].value_counts() pd.set_option('max_rows', None) train['ticket_letters'].value_counts() pd.pivot_table(train, index='Survived', columns='numeric_ticket', values='Ticket', aggfunc='count') pd.pivot_table(train, index='Survived', columns='ticket_letters', values='Ticket', aggfunc='count') train['name_title'] = train.Name.apply(lambda x: x.split(',')[1].split('.')[0].strip()) train['name_title'].value_counts() data['cabin_multiple'] = data.Cabin.apply(lambda x: 0 if pd.isna(x) else len(x.split(' '))) data['cabin_adv'] = data.Cabin.apply(lambda x: str(x)[0]) data['numeric_ticket'] = data.Ticket.apply(lambda x: 1 if x.isnumeric() else 0) data['ticket_letters'] = data.Ticket.apply(lambda x: ''.join(x.split(' ')[:-1]).replace('.', '').replace('/', '').lower() if len(x.split(' ')[:-1]) > 0 else 0) data['name_title'] = data.Name.apply(lambda x: x.split(',')[1].split('.')[0].strip()) data.dropna(subset=['Embarked'], inplace=True) original_age = data.Age original_fare = data.Fare data['norm_sibsp'] = np.log(data.SibSp + 1) data['norm_fare'] = np.log(data.Fare + 1) data.Pclass = data.Pclass.astype(str) all_dummies = pd.get_dummies(data[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'norm_fare', 'Embarked', 'cabin_adv', 'cabin_multiple', 'numeric_ticket', 'name_title', 'train_test']]) X_train = all_dummies[all_dummies.train_test == 1].drop(['train_test'], axis=1) X_test = all_dummies[all_dummies.train_test == 0].drop(['train_test'], axis=1) y_train = data[data.train_test == 1].Survived y_train.shape from sklearn.preprocessing import StandardScaler scale = StandardScaler() all_dummies_scaled = all_dummies.copy() all_dummies_scaled[['Age', 'SibSp', 'Parch', 'norm_fare']] = scale.fit_transform(all_dummies_scaled[['Age', 'SibSp', 'Parch', 'norm_fare']]) all_dummies_scaled X_train_scaled = all_dummies_scaled[all_dummies_scaled.train_test == 1].drop(['train_test'], axis=1) X_test_scaled = all_dummies_scaled[all_dummies_scaled.train_test == 0].drop(['train_test'], axis=1) y_train = data[data.train_test == 1].Survived hgb = HistGradientBoostingClassifier() cv = cross_val_score(hgb, X_train_scaled, y_train, cv=5) print(cv) print(f'{cv.mean()} +/-{cv.std():.2f}')
code
88095734/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.info()
code
88095734/cell_20
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.describe().columns df_num = train[['Age', 'SibSp', 'Parch', 'Fare']] df_cat = train[['Survived', 'Pclass', 'Sex', 'Ticket', 'Cabin', 'Embarked']] df_cat.Cabin train['cabin_multiple'] = train.Cabin.apply(lambda x: 0 if pd.isna(x) else len(x.split(' '))) train['cabin_multiple'].value_counts() pd.pivot_table(train, index='Survived', columns='cabin_multiple', values='Ticket', aggfunc='count') train['cabin_adv'] = train.Cabin.apply(lambda x: str(x)[0]) train['numeric_ticket'] = train.Ticket.apply(lambda x: 1 if x.isnumeric() else 0) train['ticket_letters'] = train.Ticket.apply(lambda x: ''.join(x.split(' ')[:-1]).replace('.', '').replace('/', '').lower() if len(x.split(' ')[:-1]) > 0 else 0) train['numeric_ticket'].value_counts() pd.set_option('max_rows', None) train['ticket_letters'].value_counts() pd.pivot_table(train, index='Survived', columns='numeric_ticket', values='Ticket', aggfunc='count') pd.pivot_table(train, index='Survived', columns='ticket_letters', values='Ticket', aggfunc='count') train['name_title'] = train.Name.apply(lambda x: x.split(',')[1].split('.')[0].strip()) train['name_title'].value_counts() data['cabin_multiple'] = data.Cabin.apply(lambda x: 0 if pd.isna(x) else len(x.split(' '))) data['cabin_adv'] = data.Cabin.apply(lambda x: str(x)[0]) data['numeric_ticket'] = data.Ticket.apply(lambda x: 1 if x.isnumeric() else 0) data['ticket_letters'] = data.Ticket.apply(lambda x: ''.join(x.split(' ')[:-1]).replace('.', '').replace('/', '').lower() if len(x.split(' ')[:-1]) > 0 else 0) data['name_title'] = data.Name.apply(lambda x: x.split(',')[1].split('.')[0].strip()) data.dropna(subset=['Embarked'], inplace=True) original_age = data.Age original_fare = data.Fare data['norm_sibsp'] = np.log(data.SibSp + 1) data['norm_sibsp'].hist() data['norm_fare'] = np.log(data.Fare + 1) data['norm_fare'].hist() data.Pclass = data.Pclass.astype(str) all_dummies = pd.get_dummies(data[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'norm_fare', 'Embarked', 'cabin_adv', 'cabin_multiple', 'numeric_ticket', 'name_title', 'train_test']]) X_train = all_dummies[all_dummies.train_test == 1].drop(['train_test'], axis=1) X_test = all_dummies[all_dummies.train_test == 0].drop(['train_test'], axis=1) y_train = data[data.train_test == 1].Survived y_train.shape
code
88095734/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.describe().columns
code
88095734/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.describe().columns df_num = train[['Age', 'SibSp', 'Parch', 'Fare']] df_cat = train[['Survived', 'Pclass', 'Sex', 'Ticket', 'Cabin', 'Embarked']] df_cat.Cabin train['cabin_multiple'] = train.Cabin.apply(lambda x: 0 if pd.isna(x) else len(x.split(' '))) train['cabin_multiple'].value_counts() pd.pivot_table(train, index='Survived', columns='cabin_multiple', values='Ticket', aggfunc='count')
code
88095734/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
88095734/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.describe().columns df_num = train[['Age', 'SibSp', 'Parch', 'Fare']] df_cat = train[['Survived', 'Pclass', 'Sex', 'Ticket', 'Cabin', 'Embarked']] df_cat.Cabin train['cabin_multiple'] = train.Cabin.apply(lambda x: 0 if pd.isna(x) else len(x.split(' '))) train['cabin_multiple'].value_counts() train['cabin_adv'] = train.Cabin.apply(lambda x: str(x)[0]) train['numeric_ticket'] = train.Ticket.apply(lambda x: 1 if x.isnumeric() else 0) train['ticket_letters'] = train.Ticket.apply(lambda x: ''.join(x.split(' ')[:-1]).replace('.', '').replace('/', '').lower() if len(x.split(' ')[:-1]) > 0 else 0) train['numeric_ticket'].value_counts() train.Name.head(50) train['name_title'] = train.Name.apply(lambda x: x.split(',')[1].split('.')[0].strip()) train['name_title'].value_counts()
code
88095734/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.describe().columns df_num = train[['Age', 'SibSp', 'Parch', 'Fare']] df_cat = train[['Survived', 'Pclass', 'Sex', 'Ticket', 'Cabin', 'Embarked']] df_cat.Cabin train['cabin_multiple'] = train.Cabin.apply(lambda x: 0 if pd.isna(x) else len(x.split(' '))) train['cabin_multiple'].value_counts() pd.pivot_table(train, index='Survived', columns='cabin_multiple', values='Ticket', aggfunc='count') train['cabin_adv'] = train.Cabin.apply(lambda x: str(x)[0]) train['numeric_ticket'] = train.Ticket.apply(lambda x: 1 if x.isnumeric() else 0) train['ticket_letters'] = train.Ticket.apply(lambda x: ''.join(x.split(' ')[:-1]).replace('.', '').replace('/', '').lower() if len(x.split(' ')[:-1]) > 0 else 0) train['numeric_ticket'].value_counts() pd.set_option('max_rows', None) train['ticket_letters'].value_counts()
code
88095734/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.describe().columns df_num = train[['Age', 'SibSp', 'Parch', 'Fare']] df_cat = train[['Survived', 'Pclass', 'Sex', 'Ticket', 'Cabin', 'Embarked']] df_cat.Cabin train['cabin_multiple'] = train.Cabin.apply(lambda x: 0 if pd.isna(x) else len(x.split(' '))) train['cabin_multiple'].value_counts() pd.pivot_table(train, index='Survived', columns='cabin_multiple', values='Ticket', aggfunc='count') train['cabin_adv'] = train.Cabin.apply(lambda x: str(x)[0]) train['numeric_ticket'] = train.Ticket.apply(lambda x: 1 if x.isnumeric() else 0) train['ticket_letters'] = train.Ticket.apply(lambda x: ''.join(x.split(' ')[:-1]).replace('.', '').replace('/', '').lower() if len(x.split(' ')[:-1]) > 0 else 0) train['numeric_ticket'].value_counts() pd.set_option('max_rows', None) train['ticket_letters'].value_counts() pd.pivot_table(train, index='Survived', columns='numeric_ticket', values='Ticket', aggfunc='count')
code
88095734/cell_3
[ "text_plain_output_1.png" ]
train['train_test'] = 1 test['train_test'] = 0 test['Survived'] = np.NaN data = pd.concat([train, test]) data.columns
code
88095734/cell_17
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.describe().columns df_num = train[['Age', 'SibSp', 'Parch', 'Fare']] df_cat = train[['Survived', 'Pclass', 'Sex', 'Ticket', 'Cabin', 'Embarked']] df_cat.Cabin train['cabin_multiple'] = train.Cabin.apply(lambda x: 0 if pd.isna(x) else len(x.split(' '))) train['cabin_multiple'].value_counts() pd.pivot_table(train, index='Survived', columns='cabin_multiple', values='Ticket', aggfunc='count') train['cabin_adv'] = train.Cabin.apply(lambda x: str(x)[0]) train['numeric_ticket'] = train.Ticket.apply(lambda x: 1 if x.isnumeric() else 0) train['ticket_letters'] = train.Ticket.apply(lambda x: ''.join(x.split(' ')[:-1]).replace('.', '').replace('/', '').lower() if len(x.split(' ')[:-1]) > 0 else 0) train['numeric_ticket'].value_counts() pd.set_option('max_rows', None) train['ticket_letters'].value_counts() pd.pivot_table(train, index='Survived', columns='numeric_ticket', values='Ticket', aggfunc='count') pd.pivot_table(train, index='Survived', columns='ticket_letters', values='Ticket', aggfunc='count')
code
88095734/cell_14
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.describe().columns df_num = train[['Age', 'SibSp', 'Parch', 'Fare']] df_cat = train[['Survived', 'Pclass', 'Sex', 'Ticket', 'Cabin', 'Embarked']] df_cat.Cabin train['cabin_multiple'] = train.Cabin.apply(lambda x: 0 if pd.isna(x) else len(x.split(' '))) train['cabin_multiple'].value_counts() train['cabin_adv'] = train.Cabin.apply(lambda x: str(x)[0]) train['numeric_ticket'] = train.Ticket.apply(lambda x: 1 if x.isnumeric() else 0) train['ticket_letters'] = train.Ticket.apply(lambda x: ''.join(x.split(' ')[:-1]).replace('.', '').replace('/', '').lower() if len(x.split(' ')[:-1]) > 0 else 0) train['numeric_ticket'].value_counts()
code
88095734/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.describe().columns df_num = train[['Age', 'SibSp', 'Parch', 'Fare']] df_cat = train[['Survived', 'Pclass', 'Sex', 'Ticket', 'Cabin', 'Embarked']] df_cat.Cabin train['cabin_multiple'] = train.Cabin.apply(lambda x: 0 if pd.isna(x) else len(x.split(' '))) train['cabin_multiple'].value_counts()
code
88095734/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.describe()
code
128029410/cell_4
[ "text_plain_output_1.png" ]
!pip --version
code
128029410/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
from monai.config import print_config import os import json import shutil import tempfile import time import matplotlib.pyplot as plt import numpy as np import nibabel as nib from monai.losses import DiceLoss from monai.inferers import sliding_window_inference from monai import transforms from monai.transforms import AsDiscrete, Activations from monai.config import print_config from monai.metrics import DiceMetric from monai.utils.enums import MetricReduction from monai.networks.nets import SwinUNETR from monai import data from monai.data import decollate_batch from functools import partial import torch print_config()
code
128029410/cell_2
[ "text_plain_output_1.png" ]
!nvidia-smi
code
128029410/cell_18
[ "text_plain_output_1.png" ]
from monai import data from monai import transforms import json import matplotlib.pyplot as plt import nibabel as nib import numpy as np import os import tempfile import torch directory = os.environ.get('MONAI_DATA_DIRECTORY') root_dir = tempfile.mkdtemp() if directory is None else directory class AverageMeter(object): def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = np.where(self.count > 0, self.sum / self.count, self.sum) def datafold_read(datalist, basedir, fold=0, key='training'): with open(datalist) as f: json_data = json.load(f) json_data = json_data[key] for d in json_data: for k, v in d.items(): if isinstance(d[k], list): d[k] = [os.path.join(basedir, iv) for iv in d[k]] elif isinstance(d[k], str): d[k] = os.path.join(basedir, d[k]) if len(d[k]) > 0 else d[k] tr = [] val = [] for d in json_data: if 'fold' in d and d['fold'] == fold: val.append(d) else: tr.append(d) return (tr, val) def save_checkpoint(model, epoch, filename='model.pt', best_acc=0, dir_add=root_dir): state_dict = model.state_dict() save_dict = {'epoch': epoch, 'best_acc': best_acc, 'state_dict': state_dict} filename = os.path.join(dir_add, filename) torch.save(save_dict, filename) def get_loader(batch_size, data_dir, json_list, fold, roi): data_dir = data_dir datalist_json = json_list train_files, validation_files = datafold_read(datalist=datalist_json, basedir=data_dir, fold=fold) train_transform = transforms.Compose([transforms.LoadImaged(keys=['image', 'label']), transforms.ConvertToMultiChannelBasedOnBratsClassesd(keys='label'), transforms.CropForegroundd(keys=['image', 'label'], source_key='image', k_divisible=[roi[0], roi[1], roi[2]]), transforms.RandSpatialCropd(keys=['image', 'label'], roi_size=[roi[0], roi[1], roi[2]], random_size=False), transforms.RandFlipd(keys=['image', 'label'], prob=0.5, spatial_axis=0), transforms.RandFlipd(keys=['image', 'label'], prob=0.5, spatial_axis=1), transforms.RandFlipd(keys=['image', 'label'], prob=0.5, spatial_axis=2), transforms.NormalizeIntensityd(keys='image', nonzero=True, channel_wise=True), transforms.RandScaleIntensityd(keys='image', factors=0.1, prob=1.0), transforms.RandShiftIntensityd(keys='image', offsets=0.1, prob=1.0)]) val_transform = transforms.Compose([transforms.LoadImaged(keys=['image', 'label']), transforms.ConvertToMultiChannelBasedOnBratsClassesd(keys='label'), transforms.NormalizeIntensityd(keys='image', nonzero=True, channel_wise=True)]) train_ds = data.Dataset(data=train_files, transform=train_transform) train_loader = data.DataLoader(train_ds, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True) val_ds = data.Dataset(data=validation_files, transform=val_transform) val_loader = data.DataLoader(val_ds, batch_size=1, shuffle=False, num_workers=4, pin_memory=True) return (train_loader, val_loader) data_dir = '/kaggle/input/brats2021-training-data-son/BraTS2021_Training_Data_Son' json_list = '/kaggle/input/dataSwinUnet/brats21_folds_d.json' roi = (128, 128, 128) batch_size = 1 sw_batch_size = 1 fold = 4 infer_overlap = 0.5 max_epochs = 150 val_every = 10 train_loader, val_loader = get_loader(batch_size, data_dir, json_list, fold, roi) img_add = os.path.join(data_dir, 'BraTS2021_00006/BraTS2021_00006_flair.nii') label_add = os.path.join(data_dir, 'BraTS2021_00006/BraTS2021_00006_seg.nii') img = nib.load(img_add).get_fdata() label = nib.load(label_add).get_fdata() print(img.shape, label.shape) print(f'image shape: {img.shape}, label shape: {label.shape}') plt.figure('image', (18, 6)) plt.subplot(1, 2, 1) plt.title('image') plt.imshow(img[:, :, 78], cmap='gray') plt.subplot(1, 2, 2) plt.title('label') plt.imshow(label[:, :, 78]) plt.show()
code
128029410/cell_8
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
import os import tempfile directory = os.environ.get('MONAI_DATA_DIRECTORY') root_dir = tempfile.mkdtemp() if directory is None else directory print(root_dir)
code
128029410/cell_15
[ "text_plain_output_1.png" ]
from monai import data from monai import transforms import json import numpy as np import os import tempfile import torch directory = os.environ.get('MONAI_DATA_DIRECTORY') root_dir = tempfile.mkdtemp() if directory is None else directory class AverageMeter(object): def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = np.where(self.count > 0, self.sum / self.count, self.sum) def datafold_read(datalist, basedir, fold=0, key='training'): with open(datalist) as f: json_data = json.load(f) json_data = json_data[key] for d in json_data: for k, v in d.items(): if isinstance(d[k], list): d[k] = [os.path.join(basedir, iv) for iv in d[k]] elif isinstance(d[k], str): d[k] = os.path.join(basedir, d[k]) if len(d[k]) > 0 else d[k] tr = [] val = [] for d in json_data: if 'fold' in d and d['fold'] == fold: val.append(d) else: tr.append(d) return (tr, val) def save_checkpoint(model, epoch, filename='model.pt', best_acc=0, dir_add=root_dir): state_dict = model.state_dict() save_dict = {'epoch': epoch, 'best_acc': best_acc, 'state_dict': state_dict} filename = os.path.join(dir_add, filename) torch.save(save_dict, filename) def get_loader(batch_size, data_dir, json_list, fold, roi): data_dir = data_dir datalist_json = json_list train_files, validation_files = datafold_read(datalist=datalist_json, basedir=data_dir, fold=fold) train_transform = transforms.Compose([transforms.LoadImaged(keys=['image', 'label']), transforms.ConvertToMultiChannelBasedOnBratsClassesd(keys='label'), transforms.CropForegroundd(keys=['image', 'label'], source_key='image', k_divisible=[roi[0], roi[1], roi[2]]), transforms.RandSpatialCropd(keys=['image', 'label'], roi_size=[roi[0], roi[1], roi[2]], random_size=False), transforms.RandFlipd(keys=['image', 'label'], prob=0.5, spatial_axis=0), transforms.RandFlipd(keys=['image', 'label'], prob=0.5, spatial_axis=1), transforms.RandFlipd(keys=['image', 'label'], prob=0.5, spatial_axis=2), transforms.NormalizeIntensityd(keys='image', nonzero=True, channel_wise=True), transforms.RandScaleIntensityd(keys='image', factors=0.1, prob=1.0), transforms.RandShiftIntensityd(keys='image', offsets=0.1, prob=1.0)]) val_transform = transforms.Compose([transforms.LoadImaged(keys=['image', 'label']), transforms.ConvertToMultiChannelBasedOnBratsClassesd(keys='label'), transforms.NormalizeIntensityd(keys='image', nonzero=True, channel_wise=True)]) train_ds = data.Dataset(data=train_files, transform=train_transform) train_loader = data.DataLoader(train_ds, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True) val_ds = data.Dataset(data=validation_files, transform=val_transform) val_loader = data.DataLoader(val_ds, batch_size=1, shuffle=False, num_workers=4, pin_memory=True) return (train_loader, val_loader) print(train_loader, val_loader)
code
128029410/cell_16
[ "text_plain_output_1.png" ]
from monai import data from monai import transforms import json import numpy as np import os import tempfile import torch directory = os.environ.get('MONAI_DATA_DIRECTORY') root_dir = tempfile.mkdtemp() if directory is None else directory class AverageMeter(object): def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = np.where(self.count > 0, self.sum / self.count, self.sum) def datafold_read(datalist, basedir, fold=0, key='training'): with open(datalist) as f: json_data = json.load(f) json_data = json_data[key] for d in json_data: for k, v in d.items(): if isinstance(d[k], list): d[k] = [os.path.join(basedir, iv) for iv in d[k]] elif isinstance(d[k], str): d[k] = os.path.join(basedir, d[k]) if len(d[k]) > 0 else d[k] tr = [] val = [] for d in json_data: if 'fold' in d and d['fold'] == fold: val.append(d) else: tr.append(d) return (tr, val) def save_checkpoint(model, epoch, filename='model.pt', best_acc=0, dir_add=root_dir): state_dict = model.state_dict() save_dict = {'epoch': epoch, 'best_acc': best_acc, 'state_dict': state_dict} filename = os.path.join(dir_add, filename) torch.save(save_dict, filename) def get_loader(batch_size, data_dir, json_list, fold, roi): data_dir = data_dir datalist_json = json_list train_files, validation_files = datafold_read(datalist=datalist_json, basedir=data_dir, fold=fold) train_transform = transforms.Compose([transforms.LoadImaged(keys=['image', 'label']), transforms.ConvertToMultiChannelBasedOnBratsClassesd(keys='label'), transforms.CropForegroundd(keys=['image', 'label'], source_key='image', k_divisible=[roi[0], roi[1], roi[2]]), transforms.RandSpatialCropd(keys=['image', 'label'], roi_size=[roi[0], roi[1], roi[2]], random_size=False), transforms.RandFlipd(keys=['image', 'label'], prob=0.5, spatial_axis=0), transforms.RandFlipd(keys=['image', 'label'], prob=0.5, spatial_axis=1), transforms.RandFlipd(keys=['image', 'label'], prob=0.5, spatial_axis=2), transforms.NormalizeIntensityd(keys='image', nonzero=True, channel_wise=True), transforms.RandScaleIntensityd(keys='image', factors=0.1, prob=1.0), transforms.RandShiftIntensityd(keys='image', offsets=0.1, prob=1.0)]) val_transform = transforms.Compose([transforms.LoadImaged(keys=['image', 'label']), transforms.ConvertToMultiChannelBasedOnBratsClassesd(keys='label'), transforms.NormalizeIntensityd(keys='image', nonzero=True, channel_wise=True)]) train_ds = data.Dataset(data=train_files, transform=train_transform) train_loader = data.DataLoader(train_ds, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True) val_ds = data.Dataset(data=validation_files, transform=val_transform) val_loader = data.DataLoader(val_ds, batch_size=1, shuffle=False, num_workers=4, pin_memory=True) return (train_loader, val_loader) x = next(iter(train_loader)) y = x.get(0) print(f'y shape={type(y)} dtype={y}')
code
128029410/cell_3
[ "text_plain_output_1.png" ]
!pip install "monai[einops]"
code
128029410/cell_14
[ "text_plain_output_1.png" ]
from monai import data from monai import transforms import json import numpy as np import os import tempfile import torch directory = os.environ.get('MONAI_DATA_DIRECTORY') root_dir = tempfile.mkdtemp() if directory is None else directory class AverageMeter(object): def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = np.where(self.count > 0, self.sum / self.count, self.sum) def datafold_read(datalist, basedir, fold=0, key='training'): with open(datalist) as f: json_data = json.load(f) json_data = json_data[key] for d in json_data: for k, v in d.items(): if isinstance(d[k], list): d[k] = [os.path.join(basedir, iv) for iv in d[k]] elif isinstance(d[k], str): d[k] = os.path.join(basedir, d[k]) if len(d[k]) > 0 else d[k] tr = [] val = [] for d in json_data: if 'fold' in d and d['fold'] == fold: val.append(d) else: tr.append(d) return (tr, val) def save_checkpoint(model, epoch, filename='model.pt', best_acc=0, dir_add=root_dir): state_dict = model.state_dict() save_dict = {'epoch': epoch, 'best_acc': best_acc, 'state_dict': state_dict} filename = os.path.join(dir_add, filename) torch.save(save_dict, filename) def get_loader(batch_size, data_dir, json_list, fold, roi): data_dir = data_dir datalist_json = json_list train_files, validation_files = datafold_read(datalist=datalist_json, basedir=data_dir, fold=fold) train_transform = transforms.Compose([transforms.LoadImaged(keys=['image', 'label']), transforms.ConvertToMultiChannelBasedOnBratsClassesd(keys='label'), transforms.CropForegroundd(keys=['image', 'label'], source_key='image', k_divisible=[roi[0], roi[1], roi[2]]), transforms.RandSpatialCropd(keys=['image', 'label'], roi_size=[roi[0], roi[1], roi[2]], random_size=False), transforms.RandFlipd(keys=['image', 'label'], prob=0.5, spatial_axis=0), transforms.RandFlipd(keys=['image', 'label'], prob=0.5, spatial_axis=1), transforms.RandFlipd(keys=['image', 'label'], prob=0.5, spatial_axis=2), transforms.NormalizeIntensityd(keys='image', nonzero=True, channel_wise=True), transforms.RandScaleIntensityd(keys='image', factors=0.1, prob=1.0), transforms.RandShiftIntensityd(keys='image', offsets=0.1, prob=1.0)]) val_transform = transforms.Compose([transforms.LoadImaged(keys=['image', 'label']), transforms.ConvertToMultiChannelBasedOnBratsClassesd(keys='label'), transforms.NormalizeIntensityd(keys='image', nonzero=True, channel_wise=True)]) train_ds = data.Dataset(data=train_files, transform=train_transform) train_loader = data.DataLoader(train_ds, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True) val_ds = data.Dataset(data=validation_files, transform=val_transform) val_loader = data.DataLoader(val_ds, batch_size=1, shuffle=False, num_workers=4, pin_memory=True) return (train_loader, val_loader) data_dir = '/kaggle/input/brats2021-training-data-son/BraTS2021_Training_Data_Son' json_list = '/kaggle/input/dataSwinUnet/brats21_folds_d.json' roi = (128, 128, 128) batch_size = 1 sw_batch_size = 1 fold = 4 infer_overlap = 0.5 max_epochs = 150 val_every = 10 train_loader, val_loader = get_loader(batch_size, data_dir, json_list, fold, roi)
code
18149087/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np, pandas as pd, os from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis from sklearn.model_selection import StratifiedKFold from sklearn.feature_selection import VarianceThreshold from tqdm import tqdm from sklearn.covariance import EmpiricalCovariance from sklearn.covariance import GraphicalLasso from sklearn.metrics import roc_auc_score from sklearn.decomposition import PCA from matplotlib import pyplot as plt from sklearn.preprocessing import StandardScaler from sklearn.mixture import GaussianMixture from sklearn.svm import NuSVC from sklearn import svm, neighbors, linear_model, neural_network from sklearn.ensemble import RandomForestClassifier from tqdm import tqdm_notebook train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.head()
code
18149087/cell_6
[ "text_plain_output_1.png" ]
from sklearn.covariance import GraphicalLasso from sklearn.feature_selection import VarianceThreshold from sklearn.metrics import roc_auc_score from sklearn.mixture import GaussianMixture from sklearn.model_selection import StratifiedKFold from tqdm import tqdm import numpy as np, pandas as pd, os from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis from sklearn.model_selection import StratifiedKFold from sklearn.feature_selection import VarianceThreshold from tqdm import tqdm from sklearn.covariance import EmpiricalCovariance from sklearn.covariance import GraphicalLasso from sklearn.metrics import roc_auc_score from sklearn.decomposition import PCA from matplotlib import pyplot as plt from sklearn.preprocessing import StandardScaler from sklearn.mixture import GaussianMixture from sklearn.svm import NuSVC from sklearn import svm, neighbors, linear_model, neural_network from sklearn.ensemble import RandomForestClassifier from tqdm import tqdm_notebook train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') def get_mean_cov(x, y): model = GraphicalLasso() ones = (y == 1).astype(bool) x2 = x[ones] model.fit(x2) p1 = model.precision_ m1 = model.location_ onesb = (y == 0).astype(bool) x2b = x[onesb] model.fit(x2b) p2 = model.precision_ m2 = model.location_ ms = np.stack([m1, m2]) ps = np.stack([p1, p2]) return (ms, ps) cols = [c for c in train.columns if c not in ['id', 'target']] cols.remove('wheezy-copper-turtle-magic') oof = np.zeros(len(train)) preds = np.zeros(len(test)) for i in tqdm(range(512)): train2 = train[train['wheezy-copper-turtle-magic'] == i] test2 = test[test['wheezy-copper-turtle-magic'] == i] idx1 = train2.index idx2 = test2.index train2.reset_index(drop=True, inplace=True) sel = VarianceThreshold(threshold=1.5).fit(train2[cols]) train3 = sel.transform(train2[cols]) test3 = sel.transform(test2[cols]) skf = StratifiedKFold(n_splits=11, random_state=42, shuffle=True) for train_index, test_index in skf.split(train3, train2['target']): ms, ps = get_mean_cov(train3[train_index, :], train2.loc[train_index]['target'].values) gm = GaussianMixture(n_components=2, init_params='random', covariance_type='full', tol=0.001, reg_covar=0.001, max_iter=100, n_init=1, means_init=ms, precisions_init=ps) gm.fit(np.concatenate([train3, test3], axis=0)) oof[idx1[test_index]] = gm.predict_proba(train3[test_index, :])[:, 0] preds[idx2] += gm.predict_proba(test3)[:, 0] / skf.n_splits auc = roc_auc_score(train['target'], oof) print('QDA scores CV =', round(auc, 5))
code
18149087/cell_8
[ "text_plain_output_1.png" ]
from sklearn.covariance import GraphicalLasso from sklearn.feature_selection import VarianceThreshold from sklearn.metrics import roc_auc_score from sklearn.mixture import GaussianMixture from sklearn.model_selection import StratifiedKFold from tqdm import tqdm import numpy as np, pandas as pd, os from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis from sklearn.model_selection import StratifiedKFold from sklearn.feature_selection import VarianceThreshold from tqdm import tqdm from sklearn.covariance import EmpiricalCovariance from sklearn.covariance import GraphicalLasso from sklearn.metrics import roc_auc_score from sklearn.decomposition import PCA from matplotlib import pyplot as plt from sklearn.preprocessing import StandardScaler from sklearn.mixture import GaussianMixture from sklearn.svm import NuSVC from sklearn import svm, neighbors, linear_model, neural_network from sklearn.ensemble import RandomForestClassifier from tqdm import tqdm_notebook train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') def get_mean_cov(x, y): model = GraphicalLasso() ones = (y == 1).astype(bool) x2 = x[ones] model.fit(x2) p1 = model.precision_ m1 = model.location_ onesb = (y == 0).astype(bool) x2b = x[onesb] model.fit(x2b) p2 = model.precision_ m2 = model.location_ ms = np.stack([m1, m2]) ps = np.stack([p1, p2]) return (ms, ps) cols = [c for c in train.columns if c not in ['id', 'target']] cols.remove('wheezy-copper-turtle-magic') oof = np.zeros(len(train)) preds = np.zeros(len(test)) for i in tqdm(range(512)): train2 = train[train['wheezy-copper-turtle-magic'] == i] test2 = test[test['wheezy-copper-turtle-magic'] == i] idx1 = train2.index idx2 = test2.index train2.reset_index(drop=True, inplace=True) sel = VarianceThreshold(threshold=1.5).fit(train2[cols]) train3 = sel.transform(train2[cols]) test3 = sel.transform(test2[cols]) skf = StratifiedKFold(n_splits=11, random_state=42, shuffle=True) for train_index, test_index in skf.split(train3, train2['target']): ms, ps = get_mean_cov(train3[train_index, :], train2.loc[train_index]['target'].values) gm = GaussianMixture(n_components=2, init_params='random', covariance_type='full', tol=0.001, reg_covar=0.001, max_iter=100, n_init=1, means_init=ms, precisions_init=ps) gm.fit(np.concatenate([train3, test3], axis=0)) oof[idx1[test_index]] = gm.predict_proba(train3[test_index, :])[:, 0] preds[idx2] += gm.predict_proba(test3)[:, 0] / skf.n_splits auc = roc_auc_score(train['target'], oof) cat_dict = dict() cols = [c for c in train.columns if c not in ['id', 'target']] cols.remove('wheezy-copper-turtle-magic') for i in range(512): train2 = train[train['wheezy-copper-turtle-magic'] == i] test2 = test[test['wheezy-copper-turtle-magic'] == i] idx1 = train2.index idx2 = test2.index train2.reset_index(drop=True, inplace=True) sel = VarianceThreshold(threshold=1.5).fit(train2[cols]) train3 = sel.transform(train2[cols]) test3 = sel.transform(test2[cols]) cat_dict[i] = train3.shape[1] pd.DataFrame(list(cat_dict.items()))[1].value_counts().plot.barh()
code
18149087/cell_10
[ "text_html_output_1.png" ]
from sklearn import svm, neighbors, linear_model, neural_network from sklearn.covariance import GraphicalLasso from sklearn.decomposition import PCA from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis from sklearn.ensemble import RandomForestClassifier from sklearn.feature_selection import VarianceThreshold from sklearn.metrics import roc_auc_score from sklearn.mixture import GaussianMixture from sklearn.model_selection import StratifiedKFold from sklearn.svm import NuSVC from tqdm import tqdm import numpy as np, pandas as pd, os from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis from sklearn.model_selection import StratifiedKFold from sklearn.feature_selection import VarianceThreshold from tqdm import tqdm from sklearn.covariance import EmpiricalCovariance from sklearn.covariance import GraphicalLasso from sklearn.metrics import roc_auc_score from sklearn.decomposition import PCA from matplotlib import pyplot as plt from sklearn.preprocessing import StandardScaler from sklearn.mixture import GaussianMixture from sklearn.svm import NuSVC from sklearn import svm, neighbors, linear_model, neural_network from sklearn.ensemble import RandomForestClassifier from tqdm import tqdm_notebook train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') def get_mean_cov(x, y): model = GraphicalLasso() ones = (y == 1).astype(bool) x2 = x[ones] model.fit(x2) p1 = model.precision_ m1 = model.location_ onesb = (y == 0).astype(bool) x2b = x[onesb] model.fit(x2b) p2 = model.precision_ m2 = model.location_ ms = np.stack([m1, m2]) ps = np.stack([p1, p2]) return (ms, ps) cols = [c for c in train.columns if c not in ['id', 'target']] cols.remove('wheezy-copper-turtle-magic') oof = np.zeros(len(train)) preds = np.zeros(len(test)) for i in tqdm(range(512)): train2 = train[train['wheezy-copper-turtle-magic'] == i] test2 = test[test['wheezy-copper-turtle-magic'] == i] idx1 = train2.index idx2 = test2.index train2.reset_index(drop=True, inplace=True) sel = VarianceThreshold(threshold=1.5).fit(train2[cols]) train3 = sel.transform(train2[cols]) test3 = sel.transform(test2[cols]) skf = StratifiedKFold(n_splits=11, random_state=42, shuffle=True) for train_index, test_index in skf.split(train3, train2['target']): ms, ps = get_mean_cov(train3[train_index, :], train2.loc[train_index]['target'].values) gm = GaussianMixture(n_components=2, init_params='random', covariance_type='full', tol=0.001, reg_covar=0.001, max_iter=100, n_init=1, means_init=ms, precisions_init=ps) gm.fit(np.concatenate([train3, test3], axis=0)) oof[idx1[test_index]] = gm.predict_proba(train3[test_index, :])[:, 0] preds[idx2] += gm.predict_proba(test3)[:, 0] / skf.n_splits auc = roc_auc_score(train['target'], oof) cat_dict = dict() cols = [c for c in train.columns if c not in ['id', 'target']] cols.remove('wheezy-copper-turtle-magic') for i in range(512): train2 = train[train['wheezy-copper-turtle-magic'] == i] test2 = test[test['wheezy-copper-turtle-magic'] == i] idx1 = train2.index idx2 = test2.index train2.reset_index(drop=True, inplace=True) sel = VarianceThreshold(threshold=1.5).fit(train2[cols]) train3 = sel.transform(train2[cols]) test3 = sel.transform(test2[cols]) cat_dict[i] = train3.shape[1] pd.DataFrame(list(cat_dict.items()))[1].value_counts().plot.barh() test['target'] = preds oof_qda = np.zeros(len(train)) preds_qda = np.zeros(len(test)) oof_knn = np.zeros(len(train)) preds_knn = np.zeros(len(test)) oof_svnu = np.zeros(len(train)) preds_svnu = np.zeros(len(test)) oof_svc = np.zeros(len(train)) preds_svc = np.zeros(len(test)) oof_rf = np.zeros(len(train)) preds_rf = np.zeros(len(test)) oof_mlp = np.zeros(len(train)) preds_mlp = np.zeros(len(test)) for k in range(512): train2 = train[train['wheezy-copper-turtle-magic'] == k] train2p = train2.copy() idx1 = train2.index test2 = test[test['wheezy-copper-turtle-magic'] == k] test2p = test2[(test2['target'] <= 0.01) | (test2['target'] >= 0.99)].copy() test2p.loc[test2p['target'] >= 0.5, 'target'] = 1 test2p.loc[test2p['target'] < 0.5, 'target'] = 0 train2p = pd.concat([train2p, test2p], axis=0) train2p.reset_index(drop=True, inplace=True) pca = PCA(n_components=cat_dict[k], random_state=1234) pca.fit(train2p[cols]) train3p = pca.transform(train2p[cols]) train3 = pca.transform(train2[cols]) test3 = pca.transform(test2[cols]) skf = StratifiedKFold(n_splits=11, random_state=42, shuffle=True) for train_index, test_index in skf.split(train3p, train2p['target']): test_index3 = test_index[test_index < len(train3)] clf = QuadraticDiscriminantAnalysis(reg_param=0.5) clf.fit(train3p[train_index, :], train2p.loc[train_index]['target']) oof_qda[idx1[test_index3]] = clf.predict_proba(train3[test_index3, :])[:, 1] preds_qda[test2.index] += clf.predict_proba(test3)[:, 1] / skf.n_splits clf = neighbors.KNeighborsClassifier(n_neighbors=17, p=2.9) clf.fit(train3p[train_index, :], train2p.loc[train_index]['target']) oof_knn[idx1[test_index3]] = clf.predict_proba(train3[test_index3, :])[:, 1] preds_knn[test2.index] += clf.predict_proba(test3)[:, 1] / skf.n_splits clf = NuSVC(probability=True, kernel='poly', degree=4, gamma='auto', random_state=4, nu=0.59, coef0=0.053) clf.fit(train3p[train_index, :], train2p.loc[train_index]['target']) oof_svnu[idx1[test_index3]] = clf.predict_proba(train3[test_index3, :])[:, 1] preds_svnu[test2.index] += clf.predict_proba(test3)[:, 1] / skf.n_splits clf = svm.SVC(probability=True, kernel='poly', degree=4, gamma='auto', random_state=42) clf.fit(train3p[train_index, :], train2p.loc[train_index]['target']) oof_svc[idx1[test_index3]] = clf.predict_proba(train3[test_index3, :])[:, 1] preds_svc[test2.index] += clf.predict_proba(test3)[:, 1] / skf.n_splits clf = RandomForestClassifier(n_estimators=100, random_state=1) clf.fit(train3p[train_index, :], train2p.loc[train_index]['target']) oof_rf[idx1[test_index3]] = clf.predict_proba(train3[test_index3, :])[:, 1] preds_rf[test2.index] += clf.predict_proba(test3)[:, 1] / skf.n_splits clf = neural_network.MLPClassifier(random_state=3, activation='relu', solver='lbfgs', tol=1e-06, hidden_layer_sizes=(250,)) clf.fit(train3p[train_index, :], train2p.loc[train_index]['target']) oof_mlp[idx1[test_index3]] = clf.predict_proba(train3[test_index3, :])[:, 1] preds_mlp[test2.index] += clf.predict_proba(test3)[:, 1] / skf.n_splits if k % 32 == 0: print(k) auc = roc_auc_score(train['target'], oof_qda) print('Pseudo Labeled QDA scores CV =', round(auc, 5)) auc = roc_auc_score(train['target'], oof_knn) print('Pseudo Labeled KNN scores CV =', round(auc, 5)) auc = roc_auc_score(train['target'], oof_svnu) print('Pseudo Labeled SVNU scores CV =', round(auc, 5)) auc = roc_auc_score(train['target'], oof_svc) print('Pseudo Labeled SVC scores CV =', round(auc, 5)) auc = roc_auc_score(train['target'], oof_rf) print('Pseudo Labeled RF scores CV =', round(auc, 5)) auc = roc_auc_score(train['target'], oof_mlp) print('Pseudo Labeled MLP scores CV =', round(auc, 5))
code
2003574/cell_4
[ "text_plain_output_1.png" ]
from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier() clf.fit(train_X, train_y) from sklearn.ensemble import GradientBoostingClassifier clf = GradientBoostingClassifier() clf.fit(train_X, train_y) print('The training score is: {}\n'.format(clf.score(train_X, train_y))) print('The test score is: {}\n'.format(clf.score(test_X, test_y)))
code
2003574/cell_6
[ "text_plain_output_1.png" ]
from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier() clf.fit(train_X, train_y) from sklearn.ensemble import GradientBoostingClassifier clf = GradientBoostingClassifier() clf.fit(train_X, train_y) from sklearn.svm import SVC clf = SVC(gamma=1) clf.fit(train_X, train_y) from sklearn.linear_model import LogisticRegression clf = LogisticRegression() clf.fit(train_X, train_y) print('The training score is: {:.3f}\n'.format(clf.score(train_X, train_y))) print('The test score is: {:.3f}\n'.format(clf.score(test_X, test_y)))
code
2003574/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2003574/cell_3
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier() clf.fit(train_X, train_y) print('The training score is: {}\n'.format(clf.score(train_X, train_y))) print('The test score is: {}\n'.format(clf.score(test_X, test_y)))
code
2003574/cell_5
[ "text_plain_output_1.png" ]
from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier() clf.fit(train_X, train_y) from sklearn.ensemble import GradientBoostingClassifier clf = GradientBoostingClassifier() clf.fit(train_X, train_y) from sklearn.svm import SVC clf = SVC(gamma=1) clf.fit(train_X, train_y) print('The training score is: {:.3f}\n'.format(clf.score(train_X, train_y))) print('The test score is: {:.3f}\n'.format(clf.score(test_X, test_y)))
code
128024415/cell_21
[ "text_html_output_1.png" ]
from json import loads , dumps file = open('/kaggle/input/swiggy-restaurants-dataset/data.json', 'r') data = file.read() file.close() js = loads(data) price = [] for i in js['Abohar']['restaurants'].keys(): if 'North Indian' in js['Abohar']['restaurants'][i]['cuisine']: price.append(int(js['Abohar']['restaurants'][i]['cost'].split(' ')[-1])) print(sum(price) // len(price))
code
128024415/cell_25
[ "text_plain_output_1.png" ]
from json import loads , dumps import pandas as pd file = open('/kaggle/input/swiggy-restaurants-dataset/data.json', 'r') data = file.read() file.close() js = loads(data) cuisines = [] for i in js['Abohar']['restaurants'].keys(): cuisines += js['Abohar']['restaurants'][i]['cuisine'].split(',') cuisines = list(set(cuisines)) pop_cui = [] for cuisine in cuisines: c = 0 for i in js['Abohar']['restaurants'].keys(): if cuisine in js['Abohar']['restaurants'][i]['cuisine']: c += 1 pop_cui.append([cuisine, c]) df = pd.DataFrame(pop_cui, columns=['item', 'freq']) df[df['freq'] == max(df['freq'])] pop_cui = [] for cuisine in cuisines: c = 0 for i in js['Abohar']['restaurants'].keys(): if cuisine in js['Abohar']['restaurants'][i]['cuisine']: c += 1 pop_cui.append([cuisine, c]) df = pd.DataFrame(pop_cui, columns=['item', 'freq']) df.sort_values(by='freq', ascending=False).head(5) price = [] for i in js['Abohar']['restaurants'].keys(): if 'North Indian' in js['Abohar']['restaurants'][i]['cuisine']: price.append(int(js['Abohar']['restaurants'][i]['cost'].split(' ')[-1])) avg_cuisine = [] for cuisine in cuisines: price = [] for i in js['Abohar']['restaurants'].keys(): if cuisine in js['Abohar']['restaurants'][i]['cuisine']: price.append(int(js['Abohar']['restaurants'][i]['cost'].split(' ')[-1])) avg_cuisine.append([cuisine, sum(price) // len(price)]) avg_cuisine = pd.DataFrame(avg_cuisine, columns=['cuisine', 'avg_cost']) avg_cuisine.sort_values(by='avg_cost', ascending=False).head()
code
128024415/cell_23
[ "text_plain_output_1.png" ]
from json import loads , dumps import pandas as pd file = open('/kaggle/input/swiggy-restaurants-dataset/data.json', 'r') data = file.read() file.close() js = loads(data) cuisines = [] for i in js['Abohar']['restaurants'].keys(): cuisines += js['Abohar']['restaurants'][i]['cuisine'].split(',') cuisines = list(set(cuisines)) pop_cui = [] for cuisine in cuisines: c = 0 for i in js['Abohar']['restaurants'].keys(): if cuisine in js['Abohar']['restaurants'][i]['cuisine']: c += 1 pop_cui.append([cuisine, c]) df = pd.DataFrame(pop_cui, columns=['item', 'freq']) df[df['freq'] == max(df['freq'])] pop_cui = [] for cuisine in cuisines: c = 0 for i in js['Abohar']['restaurants'].keys(): if cuisine in js['Abohar']['restaurants'][i]['cuisine']: c += 1 pop_cui.append([cuisine, c]) df = pd.DataFrame(pop_cui, columns=['item', 'freq']) df.sort_values(by='freq', ascending=False).head(5) price = [] for i in js['Abohar']['restaurants'].keys(): if 'North Indian' in js['Abohar']['restaurants'][i]['cuisine']: price.append(int(js['Abohar']['restaurants'][i]['cost'].split(' ')[-1])) avg_cuisine = [] for cuisine in cuisines: price = [] for i in js['Abohar']['restaurants'].keys(): if cuisine in js['Abohar']['restaurants'][i]['cuisine']: price.append(int(js['Abohar']['restaurants'][i]['cost'].split(' ')[-1])) avg_cuisine.append([cuisine, sum(price) // len(price)]) avg_cuisine = pd.DataFrame(avg_cuisine, columns=['cuisine', 'avg_cost']) avg_cuisine.head()
code
128024415/cell_33
[ "text_html_output_1.png" ]
from json import loads , dumps import numpy as np import pandas as pd file = open('/kaggle/input/swiggy-restaurants-dataset/data.json', 'r') data = file.read() file.close() js = loads(data) cuisines = [] for i in js['Abohar']['restaurants'].keys(): cuisines += js['Abohar']['restaurants'][i]['cuisine'].split(',') cuisines = list(set(cuisines)) pop_cui = [] for cuisine in cuisines: c = 0 for i in js['Abohar']['restaurants'].keys(): if cuisine in js['Abohar']['restaurants'][i]['cuisine']: c += 1 pop_cui.append([cuisine, c]) df = pd.DataFrame(pop_cui, columns=['item', 'freq']) df[df['freq'] == max(df['freq'])] pop_cui = [] for cuisine in cuisines: c = 0 for i in js['Abohar']['restaurants'].keys(): if cuisine in js['Abohar']['restaurants'][i]['cuisine']: c += 1 pop_cui.append([cuisine, c]) df = pd.DataFrame(pop_cui, columns=['item', 'freq']) df.sort_values(by='freq', ascending=False).head(5) price = [] for i in js['Abohar']['restaurants'].keys(): if 'North Indian' in js['Abohar']['restaurants'][i]['cuisine']: price.append(int(js['Abohar']['restaurants'][i]['cost'].split(' ')[-1])) avg_cuisine = [] for cuisine in cuisines: price = [] for i in js['Abohar']['restaurants'].keys(): if cuisine in js['Abohar']['restaurants'][i]['cuisine']: price.append(int(js['Abohar']['restaurants'][i]['cost'].split(' ')[-1])) avg_cuisine.append([cuisine, sum(price) // len(price)]) avg_cuisine = pd.DataFrame(avg_cuisine, columns=['cuisine', 'avg_cost']) rest_city = [] for i in js.keys(): c = 0 if 'restaurants' in js[i].keys(): c = len(js[i]['restaurants']) else: for region in js[i].keys(): if 'restaurants' in js[i][region].keys(): c = len(js[i][region]['restaurants']) rest_city.append([i, c]) rest_city = pd.DataFrame(rest_city, columns=['city', 'total_restaurants']) c = 0 arr = [] for city in js.keys(): if 'restaurants' in js[city].keys(): for rest in js[city]['restaurants'].keys(): if 'menu' in js[city]['restaurants'][rest].keys(): if len(js[city]['restaurants'][rest]['menu'].keys()) == 0: c += 1 arr.append([rest]) else: c += 1 arr.append([rest]) else: for regions in js[city].keys(): if 'restaurants' in js[city][regions].keys(): for rest in js[city][regions]['restaurants'].keys(): if 'menu' in js[city][regions]['restaurants'][rest].keys(): if len(js[city][regions]['restaurants'][rest]['menu'].keys()) == 0: c += 1 arr.append([rest]) else: c += 1 arr.append([rest]) print(c) arr = np.array(arr) np.save('incompleted_rest_data.npy', arr)
code
128024415/cell_6
[ "text_html_output_1.png" ]
from json import loads , dumps file = open('/kaggle/input/swiggy-restaurants-dataset/data.json', 'r') data = file.read() file.close() js = loads(data) print(len(js.keys()))
code
128024415/cell_19
[ "text_html_output_1.png" ]
from json import loads , dumps file = open('/kaggle/input/swiggy-restaurants-dataset/data.json', 'r') data = file.read() file.close() js = loads(data) cost = [] for i in js['Abohar']['restaurants'].keys(): cost.append(int(js['Abohar']['restaurants'][i]['cost'].split(' ')[-1])) avg_cost = round(sum(cost) / len(cost), 2) print('Average cost of eating outside in abohar is : Rs. ' + str(avg_cost))
code
128024415/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os from json import loads, dumps for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
128024415/cell_8
[ "text_html_output_1.png" ]
from json import loads , dumps file = open('/kaggle/input/swiggy-restaurants-dataset/data.json', 'r') data = file.read() file.close() js = loads(data) print(len(js['Abohar']['restaurants'].keys()))
code
128024415/cell_16
[ "text_plain_output_1.png" ]
from json import loads , dumps import pandas as pd file = open('/kaggle/input/swiggy-restaurants-dataset/data.json', 'r') data = file.read() file.close() js = loads(data) cuisines = [] for i in js['Abohar']['restaurants'].keys(): cuisines += js['Abohar']['restaurants'][i]['cuisine'].split(',') cuisines = list(set(cuisines)) pop_cui = [] for cuisine in cuisines: c = 0 for i in js['Abohar']['restaurants'].keys(): if cuisine in js['Abohar']['restaurants'][i]['cuisine']: c += 1 pop_cui.append([cuisine, c]) df = pd.DataFrame(pop_cui, columns=['item', 'freq']) df[df['freq'] == max(df['freq'])]
code
128024415/cell_17
[ "text_plain_output_1.png" ]
from json import loads , dumps import pandas as pd file = open('/kaggle/input/swiggy-restaurants-dataset/data.json', 'r') data = file.read() file.close() js = loads(data) cuisines = [] for i in js['Abohar']['restaurants'].keys(): cuisines += js['Abohar']['restaurants'][i]['cuisine'].split(',') cuisines = list(set(cuisines)) pop_cui = [] for cuisine in cuisines: c = 0 for i in js['Abohar']['restaurants'].keys(): if cuisine in js['Abohar']['restaurants'][i]['cuisine']: c += 1 pop_cui.append([cuisine, c]) df = pd.DataFrame(pop_cui, columns=['item', 'freq']) df[df['freq'] == max(df['freq'])] pop_cui = [] for cuisine in cuisines: c = 0 for i in js['Abohar']['restaurants'].keys(): if cuisine in js['Abohar']['restaurants'][i]['cuisine']: c += 1 pop_cui.append([cuisine, c]) df = pd.DataFrame(pop_cui, columns=['item', 'freq']) df.sort_values(by='freq', ascending=False).head(5)
code
128024415/cell_35
[ "text_html_output_1.png" ]
from json import loads , dumps import numpy as np import pandas as pd file = open('/kaggle/input/swiggy-restaurants-dataset/data.json', 'r') data = file.read() file.close() js = loads(data) cuisines = [] for i in js['Abohar']['restaurants'].keys(): cuisines += js['Abohar']['restaurants'][i]['cuisine'].split(',') cuisines = list(set(cuisines)) pop_cui = [] for cuisine in cuisines: c = 0 for i in js['Abohar']['restaurants'].keys(): if cuisine in js['Abohar']['restaurants'][i]['cuisine']: c += 1 pop_cui.append([cuisine, c]) df = pd.DataFrame(pop_cui, columns=['item', 'freq']) df[df['freq'] == max(df['freq'])] pop_cui = [] for cuisine in cuisines: c = 0 for i in js['Abohar']['restaurants'].keys(): if cuisine in js['Abohar']['restaurants'][i]['cuisine']: c += 1 pop_cui.append([cuisine, c]) df = pd.DataFrame(pop_cui, columns=['item', 'freq']) df.sort_values(by='freq', ascending=False).head(5) cost = [] for i in js['Abohar']['restaurants'].keys(): cost.append(int(js['Abohar']['restaurants'][i]['cost'].split(' ')[-1])) avg_cost = round(sum(cost) / len(cost), 2) price = [] for i in js['Abohar']['restaurants'].keys(): if 'North Indian' in js['Abohar']['restaurants'][i]['cuisine']: price.append(int(js['Abohar']['restaurants'][i]['cost'].split(' ')[-1])) avg_cuisine = [] for cuisine in cuisines: price = [] for i in js['Abohar']['restaurants'].keys(): if cuisine in js['Abohar']['restaurants'][i]['cuisine']: price.append(int(js['Abohar']['restaurants'][i]['cost'].split(' ')[-1])) avg_cuisine.append([cuisine, sum(price) // len(price)]) avg_cuisine = pd.DataFrame(avg_cuisine, columns=['cuisine', 'avg_cost']) rest_city = [] for i in js.keys(): c = 0 if 'restaurants' in js[i].keys(): c = len(js[i]['restaurants']) else: for region in js[i].keys(): if 'restaurants' in js[i][region].keys(): c = len(js[i][region]['restaurants']) rest_city.append([i, c]) rest_city = pd.DataFrame(rest_city, columns=['city', 'total_restaurants']) c = 0 arr = [] for city in js.keys(): if 'restaurants' in js[city].keys(): for rest in js[city]['restaurants'].keys(): if 'menu' in js[city]['restaurants'][rest].keys(): if len(js[city]['restaurants'][rest]['menu'].keys()) == 0: c += 1 arr.append([rest]) else: c += 1 arr.append([rest]) else: for regions in js[city].keys(): if 'restaurants' in js[city][regions].keys(): for rest in js[city][regions]['restaurants'].keys(): if 'menu' in js[city][regions]['restaurants'][rest].keys(): if len(js[city][regions]['restaurants'][rest]['menu'].keys()) == 0: c += 1 arr.append([rest]) else: c += 1 arr.append([rest]) arr = np.array(arr) np.save('incompleted_rest_data.npy', arr) avg_cost = [] for city in js.keys(): cost = [] if 'restaurants' in js[city].keys(): for rest in js[city]['restaurants'].keys(): try: cost.append(int(js[city]['restaurants'][rest]['cost'].split(' ')[-1])) except: pass else: for region in js[city].keys(): if 'restaurants' in js[city][region].keys(): for rest in js[city][region]['restaurants'].keys(): try: cost.append(int(js[city][region]['restaurants'][rest]['cost'].split(' ')[-1])) except: pass try: avg_cost.append([city, sum(cost) // len(cost)]) except: pass df_ = pd.DataFrame(avg_cost, columns=['city', 'avg_cost']) df_.sort_values(by='avg_cost', ascending=False).head(5)
code
128024415/cell_31
[ "text_html_output_1.png" ]
from json import loads , dumps import pandas as pd file = open('/kaggle/input/swiggy-restaurants-dataset/data.json', 'r') data = file.read() file.close() js = loads(data) cuisines = [] for i in js['Abohar']['restaurants'].keys(): cuisines += js['Abohar']['restaurants'][i]['cuisine'].split(',') cuisines = list(set(cuisines)) pop_cui = [] for cuisine in cuisines: c = 0 for i in js['Abohar']['restaurants'].keys(): if cuisine in js['Abohar']['restaurants'][i]['cuisine']: c += 1 pop_cui.append([cuisine, c]) df = pd.DataFrame(pop_cui, columns=['item', 'freq']) df[df['freq'] == max(df['freq'])] pop_cui = [] for cuisine in cuisines: c = 0 for i in js['Abohar']['restaurants'].keys(): if cuisine in js['Abohar']['restaurants'][i]['cuisine']: c += 1 pop_cui.append([cuisine, c]) df = pd.DataFrame(pop_cui, columns=['item', 'freq']) df.sort_values(by='freq', ascending=False).head(5) price = [] for i in js['Abohar']['restaurants'].keys(): if 'North Indian' in js['Abohar']['restaurants'][i]['cuisine']: price.append(int(js['Abohar']['restaurants'][i]['cost'].split(' ')[-1])) avg_cuisine = [] for cuisine in cuisines: price = [] for i in js['Abohar']['restaurants'].keys(): if cuisine in js['Abohar']['restaurants'][i]['cuisine']: price.append(int(js['Abohar']['restaurants'][i]['cost'].split(' ')[-1])) avg_cuisine.append([cuisine, sum(price) // len(price)]) avg_cuisine = pd.DataFrame(avg_cuisine, columns=['cuisine', 'avg_cost']) rest_city = [] for i in js.keys(): c = 0 if 'restaurants' in js[i].keys(): c = len(js[i]['restaurants']) else: for region in js[i].keys(): if 'restaurants' in js[i][region].keys(): c = len(js[i][region]['restaurants']) rest_city.append([i, c]) rest_city = pd.DataFrame(rest_city, columns=['city', 'total_restaurants']) rest_city.sort_values(by='total_restaurants', ascending=False).head()
code
128024415/cell_14
[ "text_plain_output_1.png" ]
from json import loads , dumps file = open('/kaggle/input/swiggy-restaurants-dataset/data.json', 'r') data = file.read() file.close() js = loads(data) cuisines = [] for i in js['Abohar']['restaurants'].keys(): cuisines += js['Abohar']['restaurants'][i]['cuisine'].split(',') cuisines = list(set(cuisines)) print(len(cuisines))
code
128024415/cell_10
[ "text_plain_output_1.png" ]
from json import loads , dumps file = open('/kaggle/input/swiggy-restaurants-dataset/data.json', 'r') data = file.read() file.close() js = loads(data) for i in js['Abohar']['restaurants'].keys(): print(js['Abohar']['restaurants'][i]['name'], '|', len(js['Abohar']['restaurants'][i]['menu'].keys()))
code
128024415/cell_27
[ "text_html_output_1.png" ]
from json import loads , dumps import pandas as pd file = open('/kaggle/input/swiggy-restaurants-dataset/data.json', 'r') data = file.read() file.close() js = loads(data) cuisines = [] for i in js['Abohar']['restaurants'].keys(): cuisines += js['Abohar']['restaurants'][i]['cuisine'].split(',') cuisines = list(set(cuisines)) pop_cui = [] for cuisine in cuisines: c = 0 for i in js['Abohar']['restaurants'].keys(): if cuisine in js['Abohar']['restaurants'][i]['cuisine']: c += 1 pop_cui.append([cuisine, c]) df = pd.DataFrame(pop_cui, columns=['item', 'freq']) df[df['freq'] == max(df['freq'])] pop_cui = [] for cuisine in cuisines: c = 0 for i in js['Abohar']['restaurants'].keys(): if cuisine in js['Abohar']['restaurants'][i]['cuisine']: c += 1 pop_cui.append([cuisine, c]) df = pd.DataFrame(pop_cui, columns=['item', 'freq']) df.sort_values(by='freq', ascending=False).head(5) price = [] for i in js['Abohar']['restaurants'].keys(): if 'North Indian' in js['Abohar']['restaurants'][i]['cuisine']: price.append(int(js['Abohar']['restaurants'][i]['cost'].split(' ')[-1])) avg_cuisine = [] for cuisine in cuisines: price = [] for i in js['Abohar']['restaurants'].keys(): if cuisine in js['Abohar']['restaurants'][i]['cuisine']: price.append(int(js['Abohar']['restaurants'][i]['cost'].split(' ')[-1])) avg_cuisine.append([cuisine, sum(price) // len(price)]) avg_cuisine = pd.DataFrame(avg_cuisine, columns=['cuisine', 'avg_cost']) avg_cuisine.sort_values(by='avg_cost', ascending=False).head() avg_cuisine.sort_values(by='avg_cost', ascending=True).head()
code
128024415/cell_12
[ "text_plain_output_1.png" ]
from json import loads , dumps file = open('/kaggle/input/swiggy-restaurants-dataset/data.json', 'r') data = file.read() file.close() js = loads(data) for i in js['Abohar']['restaurants'].keys(): if len(js['Abohar']['restaurants'][i]['menu']) == 0: print(js['Abohar']['restaurants'][i]['name'], '|', i)
code
105190732/cell_13
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) PATH = '../input/titanic/' df_train = pd.read_csv(f'{PATH}/train.csv', low_memory=False) df_test = pd.read_csv(f'{PATH}/test.csv', low_memory=False) df_train.dtypes label = df_train['Survived'] label.unique() if label.isnull().sum() == 0: print('No missing values') else: print(label.isnull().sum(), 'missing values found in dataset')
code
105190732/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) PATH = '../input/titanic/' df_train = pd.read_csv(f'{PATH}/train.csv', low_memory=False) df_test = pd.read_csv(f'{PATH}/test.csv', low_memory=False) df_train.dtypes
code
105190732/cell_30
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns PATH = '../input/titanic/' df_train = pd.read_csv(f'{PATH}/train.csv', low_memory=False) df_test = pd.read_csv(f'{PATH}/test.csv', low_memory=False) df_train.dtypes df_train['Age_NA'] = np.where(df_train.Age.isnull(), 1, 0) df_test['Age_NA'] = np.where(df_test.Age.isnull(), 1, 0) print(df_train['Age_NA'].value_counts()) sns.factorplot('Age_NA', 'Survived', data=df_train)
code
105190732/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) PATH = '../input/titanic/' df_train = pd.read_csv(f'{PATH}/train.csv', low_memory=False) df_test = pd.read_csv(f'{PATH}/test.csv', low_memory=False) df_train.dtypes df_train.Age.describe()
code
105190732/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) PATH = '../input/titanic/' df_train = pd.read_csv(f'{PATH}/train.csv', low_memory=False) df_test = pd.read_csv(f'{PATH}/test.csv', low_memory=False) df_train.dtypes for column in df_train.columns: print(column, len(df_train[column].unique()))
code
105190732/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd from scipy import stats from scipy.cluster import hierarchy as hc import sklearn import IPython import matplotlib.pyplot as plt from sklearn.model_selection import ParameterGrid from sklearn.model_selection import train_test_split from sklearn.metrics import roc_auc_score, mean_squared_error from sklearn.preprocessing import OneHotEncoder from pandas.api.types import is_string_dtype, is_numeric_dtype, is_categorical_dtype import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.pylab as pylab import seaborn as sns import string import math import sys import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
105190732/cell_7
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) PATH = '../input/titanic/' df_train = pd.read_csv(f'{PATH}/train.csv', low_memory=False) df_test = pd.read_csv(f'{PATH}/test.csv', low_memory=False) print('Train Shape', df_train.shape) print('Test Shape', df_test.shape)
code
105190732/cell_32
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns PATH = '../input/titanic/' df_train = pd.read_csv(f'{PATH}/train.csv', low_memory=False) df_test = pd.read_csv(f'{PATH}/test.csv', low_memory=False) df_train.dtypes df_train['Age_NA'] = np.where(df_train.Age.isnull(), 1, 0) df_test['Age_NA'] = np.where(df_test.Age.isnull(), 1, 0) a = sns.FacetGrid(df_train, hue='Survived', aspect=4) a.map(sns.kdeplot, 'Age', shade=True) a.set(xlim=(0, df_train['Age'].max())) a.add_legend() print('Skew for train data:', df_train.Age.skew())
code
105190732/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) PATH = '../input/titanic/' df_train = pd.read_csv(f'{PATH}/train.csv', low_memory=False) df_test = pd.read_csv(f'{PATH}/test.csv', low_memory=False) df_train.head().transpose()
code