path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
89129165/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from wordcloud import WordCloud, ImageColorGenerator, STOPWORDS pd.set_option('display.max_colwidth', None) import warnings warnings.filterwarnings('ignore') custom_colors = ['#74a09e', '#86c1b2', '#98e2c6', '#f3c969', '#f2a553', '#d96548', '#c14953'] sns.set_style('whitegrid') sns.despine(left=True, bottom=True) books = pd.read_csv('/kaggle/input/goodreadsbooks/books.csv', error_bad_lines=False) books[:10] books.set_index('bookID', inplace=True) books.index.rename('BookID') books = books.drop(columns=['isbn', 'isbn13']) books.columns
code
89129165/cell_7
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from wordcloud import WordCloud, ImageColorGenerator, STOPWORDS pd.set_option('display.max_colwidth', None) import warnings warnings.filterwarnings('ignore') custom_colors = ['#74a09e', '#86c1b2', '#98e2c6', '#f3c969', '#f2a553', '#d96548', '#c14953'] sns.set_style('whitegrid') sns.despine(left=True, bottom=True) books = pd.read_csv('/kaggle/input/goodreadsbooks/books.csv', error_bad_lines=False) books[:10]
code
89129165/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from wordcloud import WordCloud, ImageColorGenerator, STOPWORDS pd.set_option('display.max_colwidth', None) import warnings warnings.filterwarnings('ignore') custom_colors = ['#74a09e', '#86c1b2', '#98e2c6', '#f3c969', '#f2a553', '#d96548', '#c14953'] sns.set_style('whitegrid') sns.despine(left=True, bottom=True) books = pd.read_csv('/kaggle/input/goodreadsbooks/books.csv', error_bad_lines=False) books[:10] books.set_index('bookID', inplace=True) books.index.rename('BookID') books = books.drop(columns=['isbn', 'isbn13']) books.columns books.columns = ['Title', 'Authors', 'Avg_Rating', 'Lang_Code', '#Pages', '#Ratings', '#Text_Reviews', 'Publication_Date', 'Publisher'] books.shape books.info()
code
89129165/cell_28
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from wordcloud import WordCloud, ImageColorGenerator, STOPWORDS pd.set_option('display.max_colwidth', None) import warnings warnings.filterwarnings('ignore') custom_colors = ['#74a09e', '#86c1b2', '#98e2c6', '#f3c969', '#f2a553', '#d96548', '#c14953'] sns.set_style('whitegrid') sns.despine(left=True, bottom=True) books = pd.read_csv('/kaggle/input/goodreadsbooks/books.csv', error_bad_lines=False) books[:10] books.set_index('bookID', inplace=True) books.index.rename('BookID') books = books.drop(columns=['isbn', 'isbn13']) books.columns books.columns = ['Title', 'Authors', 'Avg_Rating', 'Lang_Code', '#Pages', '#Ratings', '#Text_Reviews', 'Publication_Date', 'Publisher'] books.shape books.Lang_Code.unique() books.Lang_Code.describe()
code
89129165/cell_8
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from wordcloud import WordCloud, ImageColorGenerator, STOPWORDS pd.set_option('display.max_colwidth', None) import warnings warnings.filterwarnings('ignore') custom_colors = ['#74a09e', '#86c1b2', '#98e2c6', '#f3c969', '#f2a553', '#d96548', '#c14953'] sns.set_style('whitegrid') sns.despine(left=True, bottom=True) books = pd.read_csv('/kaggle/input/goodreadsbooks/books.csv', error_bad_lines=False) books[:10] books.set_index('bookID', inplace=True) books.index.rename('BookID')
code
89129165/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from wordcloud import WordCloud, ImageColorGenerator, STOPWORDS pd.set_option('display.max_colwidth', None) import warnings warnings.filterwarnings('ignore') custom_colors = ['#74a09e', '#86c1b2', '#98e2c6', '#f3c969', '#f2a553', '#d96548', '#c14953'] sns.set_style('whitegrid') sns.despine(left=True, bottom=True) books = pd.read_csv('/kaggle/input/goodreadsbooks/books.csv', error_bad_lines=False) books[:10] books.set_index('bookID', inplace=True) books.index.rename('BookID') books = books.drop(columns=['isbn', 'isbn13']) books.columns books.columns = ['Title', 'Authors', 'Avg_Rating', 'Lang_Code', '#Pages', '#Ratings', '#Text_Reviews', 'Publication_Date', 'Publisher'] books.shape
code
89129165/cell_16
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from wordcloud import WordCloud, ImageColorGenerator, STOPWORDS pd.set_option('display.max_colwidth', None) import warnings warnings.filterwarnings('ignore') custom_colors = ['#74a09e', '#86c1b2', '#98e2c6', '#f3c969', '#f2a553', '#d96548', '#c14953'] sns.set_style('whitegrid') sns.despine(left=True, bottom=True) books = pd.read_csv('/kaggle/input/goodreadsbooks/books.csv', error_bad_lines=False) books[:10] books.set_index('bookID', inplace=True) books.index.rename('BookID') books = books.drop(columns=['isbn', 'isbn13']) books.columns books.columns = ['Title', 'Authors', 'Avg_Rating', 'Lang_Code', '#Pages', '#Ratings', '#Text_Reviews', 'Publication_Date', 'Publisher'] books.shape books.describe()
code
89129165/cell_31
[ "text_plain_output_1.png" ]
from wordcloud import WordCloud,ImageColorGenerator,STOPWORDS import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from wordcloud import WordCloud, ImageColorGenerator, STOPWORDS pd.set_option('display.max_colwidth', None) import warnings warnings.filterwarnings('ignore') custom_colors = ['#74a09e', '#86c1b2', '#98e2c6', '#f3c969', '#f2a553', '#d96548', '#c14953'] sns.set_style('whitegrid') sns.despine(left=True, bottom=True) books = pd.read_csv('/kaggle/input/goodreadsbooks/books.csv', error_bad_lines=False) books[:10] books.set_index('bookID', inplace=True) books.index.rename('BookID') books = books.drop(columns=['isbn', 'isbn13']) books.columns books.columns = ['Title', 'Authors', 'Avg_Rating', 'Lang_Code', '#Pages', '#Ratings', '#Text_Reviews', 'Publication_Date', 'Publisher'] books.shape all_books = ' '.join((token for token in books['Title'])) stopwords = set(STOPWORDS) font_path = '../input/newghanesfont/NewGhanesFont.otf' wordcloud = WordCloud(stopwords=stopwords, font_path=font_path, max_words=500, max_font_size=350, random_state=42, width=2500, height=1000, colormap='twilight_shifted_r') wordcloud.generate(all_books) plt.axis('off') books.Lang_Code.unique() books.Lang_Code.nunique() sns.set_style('whitegrid') plt.figure(figsize=(12, 4)) sns.distplot(books.Lang_Code, bins=30, norm_hist=False, color='Purple')
code
89129165/cell_14
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from wordcloud import WordCloud, ImageColorGenerator, STOPWORDS pd.set_option('display.max_colwidth', None) import warnings warnings.filterwarnings('ignore') custom_colors = ['#74a09e', '#86c1b2', '#98e2c6', '#f3c969', '#f2a553', '#d96548', '#c14953'] sns.set_style('whitegrid') sns.despine(left=True, bottom=True) books = pd.read_csv('/kaggle/input/goodreadsbooks/books.csv', error_bad_lines=False) books[:10] books.set_index('bookID', inplace=True) books.index.rename('BookID') books = books.drop(columns=['isbn', 'isbn13']) books.columns books.columns = ['Title', 'Authors', 'Avg_Rating', 'Lang_Code', '#Pages', '#Ratings', '#Text_Reviews', 'Publication_Date', 'Publisher'] books.head()
code
89129165/cell_10
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from wordcloud import WordCloud, ImageColorGenerator, STOPWORDS pd.set_option('display.max_colwidth', None) import warnings warnings.filterwarnings('ignore') custom_colors = ['#74a09e', '#86c1b2', '#98e2c6', '#f3c969', '#f2a553', '#d96548', '#c14953'] sns.set_style('whitegrid') sns.despine(left=True, bottom=True) books = pd.read_csv('/kaggle/input/goodreadsbooks/books.csv', error_bad_lines=False) books[:10] books.set_index('bookID', inplace=True) books.index.rename('BookID') books = books.drop(columns=['isbn', 'isbn13']) books.head()
code
89129165/cell_27
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from wordcloud import WordCloud, ImageColorGenerator, STOPWORDS pd.set_option('display.max_colwidth', None) import warnings warnings.filterwarnings('ignore') custom_colors = ['#74a09e', '#86c1b2', '#98e2c6', '#f3c969', '#f2a553', '#d96548', '#c14953'] sns.set_style('whitegrid') sns.despine(left=True, bottom=True) books = pd.read_csv('/kaggle/input/goodreadsbooks/books.csv', error_bad_lines=False) books[:10] books.set_index('bookID', inplace=True) books.index.rename('BookID') books = books.drop(columns=['isbn', 'isbn13']) books.columns books.columns = ['Title', 'Authors', 'Avg_Rating', 'Lang_Code', '#Pages', '#Ratings', '#Text_Reviews', 'Publication_Date', 'Publisher'] books.shape books.Lang_Code.unique()
code
89129165/cell_5
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from wordcloud import WordCloud, ImageColorGenerator, STOPWORDS pd.set_option('display.max_colwidth', None) import warnings warnings.filterwarnings('ignore') custom_colors = ['#74a09e', '#86c1b2', '#98e2c6', '#f3c969', '#f2a553', '#d96548', '#c14953'] sns.palplot(sns.color_palette(custom_colors)) sns.set_style('whitegrid') sns.despine(left=True, bottom=True)
code
89129165/cell_36
[ "text_plain_output_1.png" ]
from wordcloud import WordCloud,ImageColorGenerator,STOPWORDS import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from wordcloud import WordCloud, ImageColorGenerator, STOPWORDS pd.set_option('display.max_colwidth', None) import warnings warnings.filterwarnings('ignore') custom_colors = ['#74a09e', '#86c1b2', '#98e2c6', '#f3c969', '#f2a553', '#d96548', '#c14953'] sns.set_style('whitegrid') sns.despine(left=True, bottom=True) books = pd.read_csv('/kaggle/input/goodreadsbooks/books.csv', error_bad_lines=False) books[:10] books.set_index('bookID', inplace=True) books.index.rename('BookID') books = books.drop(columns=['isbn', 'isbn13']) books.columns books.columns = ['Title', 'Authors', 'Avg_Rating', 'Lang_Code', '#Pages', '#Ratings', '#Text_Reviews', 'Publication_Date', 'Publisher'] books.shape all_books = ' '.join((token for token in books['Title'])) stopwords = set(STOPWORDS) font_path = '../input/newghanesfont/NewGhanesFont.otf' wordcloud = WordCloud(stopwords=stopwords, font_path=font_path, max_words=500, max_font_size=350, random_state=42, width=2500, height=1000, colormap='twilight_shifted_r') wordcloud.generate(all_books) plt.axis('off') books.Lang_Code.unique() books.Lang_Code.nunique() sns.set_style('whitegrid') sns.set_style('whitegrid') top_rated_books = books.sort_values(by=['Avg_Rating'], ascending=False) top_rated_books
code
17134171/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
import plotly import os import numpy as np import pandas as pd import re from datetime import datetime import seaborn as sns import matplotlib.pyplot as plt import plotly import plotly.plotly as py import plotly.graph_objs as go import colorlover as cl plotly.offline.init_notebook_mode() from sklearn.model_selection import StratifiedKFold from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.utils.multiclass import unique_labels from sklearn.decomposition import PCA from keras.models import Model from keras.optimizers import Adamax from sklearn.metrics import log_loss from keras.utils import to_categorical from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.layers import Input, Embedding, Dense, Bidirectional, CuDNNGRU, GlobalMaxPooling1D import seaborn as sns import matplotlib.pyplot as plt
code
17134171/cell_8
[ "text_html_output_1.png" ]
from datetime import datetime from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer import numpy as np import os import pandas as pd import plotly import plotly.graph_objs as go import re import os import numpy as np import pandas as pd import re from datetime import datetime import seaborn as sns import matplotlib.pyplot as plt import plotly import plotly.plotly as py import plotly.graph_objs as go import colorlover as cl plotly.offline.init_notebook_mode() from sklearn.model_selection import StratifiedKFold from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.utils.multiclass import unique_labels from sklearn.decomposition import PCA from keras.models import Model from keras.optimizers import Adamax from sklearn.metrics import log_loss from keras.utils import to_categorical from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.layers import Input, Embedding, Dense, Bidirectional, CuDNNGRU, GlobalMaxPooling1D import seaborn as sns import matplotlib.pyplot as plt def clean(text): text = re.sub('#\\S*', ' ', text) text = re.sub('@\\S*', ' ', text) text = re.sub('http\\S*', ' ', text) for ch in ['\\', '`', '*', '_', '{', '}', '[', ']', '(', ')', '>', '+', '-', '.', '!', "'", '\\”', '"', '\\β€œ', '\\’', '?', ':', '-', ',', '//t', '&amp;', '/', "'", "'", '…', '-', '’', '\\β€”', 'β€”', '–', 'β€œ', '”']: if ch in text: text = text.replace(ch, ' ') return text def clean_tweet(tweet): return ' '.join(clean(tweet.lower()).split()) def print_table(header_values, content, colors): data = go.Table(header=dict(values=header_values, line=dict(color='rgb(70,130,180)'), fill=dict(color='rgb(70,130,180)'), align='center', font=dict(color='black', size=9)), cells=dict(values=content, fill=colors, align='center', font=dict(color='black', size=9), height=40)) def load_glove(word_index): EMBEDDING_FILE = '../input/glove840b300dtxt/glove.840B.300d.txt' def get_coefs(word, *arr): return (word, np.asarray(arr, dtype='float32')) embeddings_index = dict((get_coefs(*o.split(' ')) for o in open(EMBEDDING_FILE))) all_embs = np.stack(embeddings_index.values()) emb_mean, emb_std = (all_embs.mean(), all_embs.std()) embed_size = all_embs.shape[1] nb_words = min(18000, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std, (nb_words, embed_size)) for word, i in word_index.items(): if i >= 18000: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix def load_data(X, y, train_index, test_index): Xtr = X.iloc[train_index] Xte = X.iloc[test_index] tokenizer = Tokenizer(num_words=18000) tokenizer.fit_on_texts(list(Xtr)) Xtr = tokenizer.texts_to_sequences(Xtr) Xte = tokenizer.texts_to_sequences(Xte) Xtr = pad_sequences(Xtr, maxlen=50) Xte = pad_sequences(Xte, maxlen=50) ytr = y.iloc[train_index] yte = y.iloc[test_index] word_index = tokenizer.word_index return (Xtr, ytr, Xte, yte, word_index) twitter_files = os.listdir('../input/2020-united-states-presidential-election/twitter') twitter_users_files = os.listdir('../input/2020-united-states-presidential-election/twitter_users') pic_files = os.listdir('../input/2020-united-states-presidential-election/pics') metadata = pd.read_csv('../input/2020-united-states-presidential-election/candidates_info.csv') metadata['filename'] = metadata['handle'].apply(lambda x: x[1:]) metadata['age'] = ((datetime.today() - pd.to_datetime(metadata['born'])).dt.days / 365).astype(int) metadata = metadata.sort_values('filename') metadata.reset_index(inplace=True) dataset = pd.DataFrame() for index, row in metadata.iterrows(): df = pd.read_csv('../input/2020-united-states-presidential-election/twitter/%s.csv' % row['filename']) dataset = pd.concat([dataset, df], ignore_index=True) dataset['clean tweet'] = dataset['Text'].apply(clean_tweet) dataset['number of characters'] = dataset['clean tweet'].str.len() metadata['number of all tweets'] = dataset.groupby(['Screen Name']).count()['Tweet Id'].values metadata['tweets in english'] = dataset[dataset['Language'] == 'English'].groupby(['Screen Name']).count()['Tweet Id'].values metadata['tweets not in english'] = dataset[dataset['Language'] != 'English'].groupby(['Screen Name']).count()['Tweet Id'].values metadata['tweets only'] = dataset[dataset['Tweet Type'] == 'Tweet'].groupby(['Screen Name']).count()['Tweet Id'].values metadata['retweets only'] = dataset[dataset['Tweet Type'] == 'Retweet'].groupby(['Screen Name']).count()['Tweet Id'].values metadata['replies only'] = dataset[dataset['Tweet Type'] == 'Reply'].groupby(['Screen Name']).count()['Tweet Id'].values metadata['after 2019'] = dataset[dataset['Created At'].astype('datetime64').dt.year > 2018].groupby(['Screen Name']).count()['Tweet Id'].values metadata['before 2019'] = metadata['number of all tweets'] - metadata['after 2019'] metadata['more than 40 characters'] = dataset[dataset['number of characters'] > 39].groupby(['Screen Name']).count()['Tweet Id'].values metadata['less than 40 characters'] = dataset[dataset['number of characters'] < 40].groupby(['Screen Name']).count()['Tweet Id'].values dataset = dataset[dataset['Created At'].astype('datetime64').dt.year > 2018] dataset = dataset[dataset['number of characters'] > 39] dataset = dataset[dataset['Tweet Type'] == 'Tweet'] dataset = dataset[dataset['Language'] == 'English'] X = dataset['clean tweet'] y = dataset['Name'] metadata['useful tweets'] = dataset.groupby(['Screen Name']).count()['Tweet Id'].values columns = ['name', 'number of all tweets', 'tweets in english', 'tweets not in english', 'tweets only', 'retweets only', 'replies only', 'after 2019', 'before 2019', 'more than 40 characters', 'less than 40 characters', 'useful tweets'] header_values = ['<b>%s</b>' % x for x in columns] content = metadata.sort_values('useful tweets', ascending=False)[columns].T colors = dict() print_table(header_values, content, colors)
code
17134171/cell_14
[ "text_plain_output_1.png" ]
from datetime import datetime from keras.layers import Input, Embedding, Dense, Bidirectional, CuDNNGRU, GlobalMaxPooling1D from keras.models import Model from keras.optimizers import Adamax from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from sklearn.decomposition import PCA from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.model_selection import StratifiedKFold import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import plotly import plotly.graph_objs as go import re import seaborn as sns import seaborn as sns import warnings import os import numpy as np import pandas as pd import re from datetime import datetime import seaborn as sns import matplotlib.pyplot as plt import plotly import plotly.plotly as py import plotly.graph_objs as go import colorlover as cl plotly.offline.init_notebook_mode() from sklearn.model_selection import StratifiedKFold from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.utils.multiclass import unique_labels from sklearn.decomposition import PCA from keras.models import Model from keras.optimizers import Adamax from sklearn.metrics import log_loss from keras.utils import to_categorical from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.layers import Input, Embedding, Dense, Bidirectional, CuDNNGRU, GlobalMaxPooling1D import seaborn as sns import matplotlib.pyplot as plt def clean(text): text = re.sub('#\\S*', ' ', text) text = re.sub('@\\S*', ' ', text) text = re.sub('http\\S*', ' ', text) for ch in ['\\', '`', '*', '_', '{', '}', '[', ']', '(', ')', '>', '+', '-', '.', '!', "'", '\\”', '"', '\\β€œ', '\\’', '?', ':', '-', ',', '//t', '&amp;', '/', "'", "'", '…', '-', '’', '\\β€”', 'β€”', '–', 'β€œ', '”']: if ch in text: text = text.replace(ch, ' ') return text def clean_tweet(tweet): return ' '.join(clean(tweet.lower()).split()) def print_table(header_values, content, colors): data = go.Table(header=dict(values=header_values, line=dict(color='rgb(70,130,180)'), fill=dict(color='rgb(70,130,180)'), align='center', font=dict(color='black', size=9)), cells=dict(values=content, fill=colors, align='center', font=dict(color='black', size=9), height=40)) def load_glove(word_index): EMBEDDING_FILE = '../input/glove840b300dtxt/glove.840B.300d.txt' def get_coefs(word, *arr): return (word, np.asarray(arr, dtype='float32')) embeddings_index = dict((get_coefs(*o.split(' ')) for o in open(EMBEDDING_FILE))) all_embs = np.stack(embeddings_index.values()) emb_mean, emb_std = (all_embs.mean(), all_embs.std()) embed_size = all_embs.shape[1] nb_words = min(18000, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std, (nb_words, embed_size)) for word, i in word_index.items(): if i >= 18000: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix def load_data(X, y, train_index, test_index): Xtr = X.iloc[train_index] Xte = X.iloc[test_index] tokenizer = Tokenizer(num_words=18000) tokenizer.fit_on_texts(list(Xtr)) Xtr = tokenizer.texts_to_sequences(Xtr) Xte = tokenizer.texts_to_sequences(Xte) Xtr = pad_sequences(Xtr, maxlen=50) Xte = pad_sequences(Xte, maxlen=50) ytr = y.iloc[train_index] yte = y.iloc[test_index] word_index = tokenizer.word_index return (Xtr, ytr, Xte, yte, word_index) twitter_files = os.listdir('../input/2020-united-states-presidential-election/twitter') twitter_users_files = os.listdir('../input/2020-united-states-presidential-election/twitter_users') pic_files = os.listdir('../input/2020-united-states-presidential-election/pics') metadata = pd.read_csv('../input/2020-united-states-presidential-election/candidates_info.csv') metadata['filename'] = metadata['handle'].apply(lambda x: x[1:]) metadata['age'] = ((datetime.today() - pd.to_datetime(metadata['born'])).dt.days / 365).astype(int) metadata = metadata.sort_values('filename') metadata.reset_index(inplace=True) dataset = pd.DataFrame() for index, row in metadata.iterrows(): df = pd.read_csv('../input/2020-united-states-presidential-election/twitter/%s.csv' % row['filename']) dataset = pd.concat([dataset, df], ignore_index=True) dataset['clean tweet'] = dataset['Text'].apply(clean_tweet) dataset['number of characters'] = dataset['clean tweet'].str.len() metadata['number of all tweets'] = dataset.groupby(['Screen Name']).count()['Tweet Id'].values metadata['tweets in english'] = dataset[dataset['Language'] == 'English'].groupby(['Screen Name']).count()['Tweet Id'].values metadata['tweets not in english'] = dataset[dataset['Language'] != 'English'].groupby(['Screen Name']).count()['Tweet Id'].values metadata['tweets only'] = dataset[dataset['Tweet Type'] == 'Tweet'].groupby(['Screen Name']).count()['Tweet Id'].values metadata['retweets only'] = dataset[dataset['Tweet Type'] == 'Retweet'].groupby(['Screen Name']).count()['Tweet Id'].values metadata['replies only'] = dataset[dataset['Tweet Type'] == 'Reply'].groupby(['Screen Name']).count()['Tweet Id'].values metadata['after 2019'] = dataset[dataset['Created At'].astype('datetime64').dt.year > 2018].groupby(['Screen Name']).count()['Tweet Id'].values metadata['before 2019'] = metadata['number of all tweets'] - metadata['after 2019'] metadata['more than 40 characters'] = dataset[dataset['number of characters'] > 39].groupby(['Screen Name']).count()['Tweet Id'].values metadata['less than 40 characters'] = dataset[dataset['number of characters'] < 40].groupby(['Screen Name']).count()['Tweet Id'].values dataset = dataset[dataset['Created At'].astype('datetime64').dt.year > 2018] dataset = dataset[dataset['number of characters'] > 39] dataset = dataset[dataset['Tweet Type'] == 'Tweet'] dataset = dataset[dataset['Language'] == 'English'] X = dataset['clean tweet'] y = dataset['Name'] metadata['useful tweets'] = dataset.groupby(['Screen Name']).count()['Tweet Id'].values import warnings warnings.simplefilter(action='ignore', category=FutureWarning) kfold = StratifiedKFold(n_splits=10, random_state=0, shuffle=True) scores = [] test_list = [] predict_list = [] test_df = pd.DataFrame() pred_df = pd.DataFrame() for train_index, test_index in kfold.split(X, y): yoh = pd.get_dummies(y) train_X, train_y, test_X, test_y, word_index = load_data(X, yoh, train_index, test_index) embedding = load_glove(word_index) inp = Input(shape=(50,)) x = Embedding(18000, 300, weights=[embedding])(inp) x = Bidirectional(CuDNNGRU(256, return_sequences=True))(x) x = Bidirectional(CuDNNGRU(128, return_sequences=True))(x) x = GlobalMaxPooling1D()(x) x = Dense(train_y.shape[1], activation='softmax')(x) model = Model(inputs=inp, outputs=x) model.compile(loss='categorical_crossentropy', optimizer=Adamax(lr=0.002), metrics=['accuracy']) model.fit(train_X, train_y, batch_size=1024, epochs=15, validation_data=(test_X, test_y), verbose=0) pred_y = model.predict(test_X) test_df = pd.concat([test_df, test_y], axis=0, ignore_index=True) pred_df = pd.concat([pred_df, pd.DataFrame(pred_y)], axis=0, ignore_index=True) pred_df.columns = test_df.columns.values test_label = test_df.idxmax(axis=1) pred_label = pred_df.idxmax(axis=1) cm = confusion_matrix(test_label, pred_label, test_df.columns.values) cm = ((cm.astype('float')*100 / cm.sum(axis=1)[:, np.newaxis] + 0.5).astype('int'))/100 f, ax = plt.subplots(figsize=(14, 12)) sns.set(font_scale=1.4)#for label size sns.heatmap(cm, annot=True,annot_kws={"size": 8}, xticklabels=test_df.columns.values, yticklabels=test_df.columns.values) progressive_candidates = ['Bernie Sanders', 'Elizabeth Warren', 'Kamala Harris', 'Cory Booker', 'Kirsten Gillibrand'] republican = ['Donald J. Trump', 'Mike Pence', 'Gov. Bill Weld'] all_others = ['Amy Klobuchar', 'Andrew Yang', "Beto O'Rourke", 'Bill de Blasio', 'Eric Swalwell', 'Jay Inslee', 'John Delaney', 'John Hickenlooper', 'JuliΓ‘n Castro', 'Marianne Williamson', 'Michael Bennet', 'Sen. Mike Gravel', 'Seth Moulton', 'Steve Bullock', 'Tim Ryan', 'Tulsi Gabbard', 'Wayne Messam', 'Pete Buttigieg', 'Joe Biden'] pred_df['true_label'] = test_label average_pred = pred_df.groupby(['true_label']).sum() / pred_df.groupby(['true_label']).count() pca = PCA(n_components=2) pca.fit(average_pred) boiled_down = pd.DataFrame(data=pca.transform(average_pred), index=test_df.columns.values, columns=['a', 'b']) fig = {'data': [{'x': boiled_down.loc[progressive_candidates].a, 'y': boiled_down.loc[progressive_candidates].b, 'text': boiled_down.loc[progressive_candidates].index, 'marker': {'color': 'rgb(251,169,46)', 'size': 6}, 'mode': 'markers', 'name': 'Progressive Senators'}, {'x': boiled_down.loc[all_others].a, 'y': boiled_down.loc[all_others].b, 'text': boiled_down.loc[all_others].index, 'marker': {'color': 'rgb(0,138,147)', 'size': 6}, 'mode': 'markers', 'name': 'All Others'}, {'x': boiled_down.loc[republican].a, 'y': boiled_down.loc[republican].b, 'text': boiled_down.loc[republican].index, 'marker': {'color': 'rgb(143,26,29)', 'size': 6}, 'mode': 'markers', 'name': 'Republican'}]} plotly.offline.iplot(fig)
code
17134171/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
from datetime import datetime from keras.layers import Input, Embedding, Dense, Bidirectional, CuDNNGRU, GlobalMaxPooling1D from keras.models import Model from keras.optimizers import Adamax from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from sklearn.metrics import accuracy_score from sklearn.model_selection import StratifiedKFold import numpy as np import os import pandas as pd import plotly import plotly.graph_objs as go import re import warnings import os import numpy as np import pandas as pd import re from datetime import datetime import seaborn as sns import matplotlib.pyplot as plt import plotly import plotly.plotly as py import plotly.graph_objs as go import colorlover as cl plotly.offline.init_notebook_mode() from sklearn.model_selection import StratifiedKFold from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.utils.multiclass import unique_labels from sklearn.decomposition import PCA from keras.models import Model from keras.optimizers import Adamax from sklearn.metrics import log_loss from keras.utils import to_categorical from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.layers import Input, Embedding, Dense, Bidirectional, CuDNNGRU, GlobalMaxPooling1D import seaborn as sns import matplotlib.pyplot as plt def clean(text): text = re.sub('#\\S*', ' ', text) text = re.sub('@\\S*', ' ', text) text = re.sub('http\\S*', ' ', text) for ch in ['\\', '`', '*', '_', '{', '}', '[', ']', '(', ')', '>', '+', '-', '.', '!', "'", '\\”', '"', '\\β€œ', '\\’', '?', ':', '-', ',', '//t', '&amp;', '/', "'", "'", '…', '-', '’', '\\β€”', 'β€”', '–', 'β€œ', '”']: if ch in text: text = text.replace(ch, ' ') return text def clean_tweet(tweet): return ' '.join(clean(tweet.lower()).split()) def print_table(header_values, content, colors): data = go.Table(header=dict(values=header_values, line=dict(color='rgb(70,130,180)'), fill=dict(color='rgb(70,130,180)'), align='center', font=dict(color='black', size=9)), cells=dict(values=content, fill=colors, align='center', font=dict(color='black', size=9), height=40)) def load_glove(word_index): EMBEDDING_FILE = '../input/glove840b300dtxt/glove.840B.300d.txt' def get_coefs(word, *arr): return (word, np.asarray(arr, dtype='float32')) embeddings_index = dict((get_coefs(*o.split(' ')) for o in open(EMBEDDING_FILE))) all_embs = np.stack(embeddings_index.values()) emb_mean, emb_std = (all_embs.mean(), all_embs.std()) embed_size = all_embs.shape[1] nb_words = min(18000, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std, (nb_words, embed_size)) for word, i in word_index.items(): if i >= 18000: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix def load_data(X, y, train_index, test_index): Xtr = X.iloc[train_index] Xte = X.iloc[test_index] tokenizer = Tokenizer(num_words=18000) tokenizer.fit_on_texts(list(Xtr)) Xtr = tokenizer.texts_to_sequences(Xtr) Xte = tokenizer.texts_to_sequences(Xte) Xtr = pad_sequences(Xtr, maxlen=50) Xte = pad_sequences(Xte, maxlen=50) ytr = y.iloc[train_index] yte = y.iloc[test_index] word_index = tokenizer.word_index return (Xtr, ytr, Xte, yte, word_index) twitter_files = os.listdir('../input/2020-united-states-presidential-election/twitter') twitter_users_files = os.listdir('../input/2020-united-states-presidential-election/twitter_users') pic_files = os.listdir('../input/2020-united-states-presidential-election/pics') metadata = pd.read_csv('../input/2020-united-states-presidential-election/candidates_info.csv') metadata['filename'] = metadata['handle'].apply(lambda x: x[1:]) metadata['age'] = ((datetime.today() - pd.to_datetime(metadata['born'])).dt.days / 365).astype(int) metadata = metadata.sort_values('filename') metadata.reset_index(inplace=True) dataset = pd.DataFrame() for index, row in metadata.iterrows(): df = pd.read_csv('../input/2020-united-states-presidential-election/twitter/%s.csv' % row['filename']) dataset = pd.concat([dataset, df], ignore_index=True) dataset['clean tweet'] = dataset['Text'].apply(clean_tweet) dataset['number of characters'] = dataset['clean tweet'].str.len() metadata['number of all tweets'] = dataset.groupby(['Screen Name']).count()['Tweet Id'].values metadata['tweets in english'] = dataset[dataset['Language'] == 'English'].groupby(['Screen Name']).count()['Tweet Id'].values metadata['tweets not in english'] = dataset[dataset['Language'] != 'English'].groupby(['Screen Name']).count()['Tweet Id'].values metadata['tweets only'] = dataset[dataset['Tweet Type'] == 'Tweet'].groupby(['Screen Name']).count()['Tweet Id'].values metadata['retweets only'] = dataset[dataset['Tweet Type'] == 'Retweet'].groupby(['Screen Name']).count()['Tweet Id'].values metadata['replies only'] = dataset[dataset['Tweet Type'] == 'Reply'].groupby(['Screen Name']).count()['Tweet Id'].values metadata['after 2019'] = dataset[dataset['Created At'].astype('datetime64').dt.year > 2018].groupby(['Screen Name']).count()['Tweet Id'].values metadata['before 2019'] = metadata['number of all tweets'] - metadata['after 2019'] metadata['more than 40 characters'] = dataset[dataset['number of characters'] > 39].groupby(['Screen Name']).count()['Tweet Id'].values metadata['less than 40 characters'] = dataset[dataset['number of characters'] < 40].groupby(['Screen Name']).count()['Tweet Id'].values dataset = dataset[dataset['Created At'].astype('datetime64').dt.year > 2018] dataset = dataset[dataset['number of characters'] > 39] dataset = dataset[dataset['Tweet Type'] == 'Tweet'] dataset = dataset[dataset['Language'] == 'English'] X = dataset['clean tweet'] y = dataset['Name'] metadata['useful tweets'] = dataset.groupby(['Screen Name']).count()['Tweet Id'].values import warnings warnings.simplefilter(action='ignore', category=FutureWarning) kfold = StratifiedKFold(n_splits=10, random_state=0, shuffle=True) scores = [] test_list = [] predict_list = [] test_df = pd.DataFrame() pred_df = pd.DataFrame() for train_index, test_index in kfold.split(X, y): yoh = pd.get_dummies(y) train_X, train_y, test_X, test_y, word_index = load_data(X, yoh, train_index, test_index) embedding = load_glove(word_index) inp = Input(shape=(50,)) x = Embedding(18000, 300, weights=[embedding])(inp) x = Bidirectional(CuDNNGRU(256, return_sequences=True))(x) x = Bidirectional(CuDNNGRU(128, return_sequences=True))(x) x = GlobalMaxPooling1D()(x) x = Dense(train_y.shape[1], activation='softmax')(x) model = Model(inputs=inp, outputs=x) model.compile(loss='categorical_crossentropy', optimizer=Adamax(lr=0.002), metrics=['accuracy']) model.fit(train_X, train_y, batch_size=1024, epochs=15, validation_data=(test_X, test_y), verbose=0) pred_y = model.predict(test_X) test_df = pd.concat([test_df, test_y], axis=0, ignore_index=True) pred_df = pd.concat([pred_df, pd.DataFrame(pred_y)], axis=0, ignore_index=True) pred_df.columns = test_df.columns.values test_label = test_df.idxmax(axis=1) pred_label = pred_df.idxmax(axis=1) print('Accuracy:', accuracy_score(test_label, pred_label))
code
17134171/cell_12
[ "text_html_output_1.png" ]
from datetime import datetime from keras.layers import Input, Embedding, Dense, Bidirectional, CuDNNGRU, GlobalMaxPooling1D from keras.models import Model from keras.optimizers import Adamax from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.model_selection import StratifiedKFold import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import plotly import plotly.graph_objs as go import re import seaborn as sns import seaborn as sns import warnings import os import numpy as np import pandas as pd import re from datetime import datetime import seaborn as sns import matplotlib.pyplot as plt import plotly import plotly.plotly as py import plotly.graph_objs as go import colorlover as cl plotly.offline.init_notebook_mode() from sklearn.model_selection import StratifiedKFold from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.utils.multiclass import unique_labels from sklearn.decomposition import PCA from keras.models import Model from keras.optimizers import Adamax from sklearn.metrics import log_loss from keras.utils import to_categorical from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.layers import Input, Embedding, Dense, Bidirectional, CuDNNGRU, GlobalMaxPooling1D import seaborn as sns import matplotlib.pyplot as plt def clean(text): text = re.sub('#\\S*', ' ', text) text = re.sub('@\\S*', ' ', text) text = re.sub('http\\S*', ' ', text) for ch in ['\\', '`', '*', '_', '{', '}', '[', ']', '(', ')', '>', '+', '-', '.', '!', "'", '\\”', '"', '\\β€œ', '\\’', '?', ':', '-', ',', '//t', '&amp;', '/', "'", "'", '…', '-', '’', '\\β€”', 'β€”', '–', 'β€œ', '”']: if ch in text: text = text.replace(ch, ' ') return text def clean_tweet(tweet): return ' '.join(clean(tweet.lower()).split()) def print_table(header_values, content, colors): data = go.Table(header=dict(values=header_values, line=dict(color='rgb(70,130,180)'), fill=dict(color='rgb(70,130,180)'), align='center', font=dict(color='black', size=9)), cells=dict(values=content, fill=colors, align='center', font=dict(color='black', size=9), height=40)) def load_glove(word_index): EMBEDDING_FILE = '../input/glove840b300dtxt/glove.840B.300d.txt' def get_coefs(word, *arr): return (word, np.asarray(arr, dtype='float32')) embeddings_index = dict((get_coefs(*o.split(' ')) for o in open(EMBEDDING_FILE))) all_embs = np.stack(embeddings_index.values()) emb_mean, emb_std = (all_embs.mean(), all_embs.std()) embed_size = all_embs.shape[1] nb_words = min(18000, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std, (nb_words, embed_size)) for word, i in word_index.items(): if i >= 18000: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix def load_data(X, y, train_index, test_index): Xtr = X.iloc[train_index] Xte = X.iloc[test_index] tokenizer = Tokenizer(num_words=18000) tokenizer.fit_on_texts(list(Xtr)) Xtr = tokenizer.texts_to_sequences(Xtr) Xte = tokenizer.texts_to_sequences(Xte) Xtr = pad_sequences(Xtr, maxlen=50) Xte = pad_sequences(Xte, maxlen=50) ytr = y.iloc[train_index] yte = y.iloc[test_index] word_index = tokenizer.word_index return (Xtr, ytr, Xte, yte, word_index) twitter_files = os.listdir('../input/2020-united-states-presidential-election/twitter') twitter_users_files = os.listdir('../input/2020-united-states-presidential-election/twitter_users') pic_files = os.listdir('../input/2020-united-states-presidential-election/pics') metadata = pd.read_csv('../input/2020-united-states-presidential-election/candidates_info.csv') metadata['filename'] = metadata['handle'].apply(lambda x: x[1:]) metadata['age'] = ((datetime.today() - pd.to_datetime(metadata['born'])).dt.days / 365).astype(int) metadata = metadata.sort_values('filename') metadata.reset_index(inplace=True) dataset = pd.DataFrame() for index, row in metadata.iterrows(): df = pd.read_csv('../input/2020-united-states-presidential-election/twitter/%s.csv' % row['filename']) dataset = pd.concat([dataset, df], ignore_index=True) dataset['clean tweet'] = dataset['Text'].apply(clean_tweet) dataset['number of characters'] = dataset['clean tweet'].str.len() metadata['number of all tweets'] = dataset.groupby(['Screen Name']).count()['Tweet Id'].values metadata['tweets in english'] = dataset[dataset['Language'] == 'English'].groupby(['Screen Name']).count()['Tweet Id'].values metadata['tweets not in english'] = dataset[dataset['Language'] != 'English'].groupby(['Screen Name']).count()['Tweet Id'].values metadata['tweets only'] = dataset[dataset['Tweet Type'] == 'Tweet'].groupby(['Screen Name']).count()['Tweet Id'].values metadata['retweets only'] = dataset[dataset['Tweet Type'] == 'Retweet'].groupby(['Screen Name']).count()['Tweet Id'].values metadata['replies only'] = dataset[dataset['Tweet Type'] == 'Reply'].groupby(['Screen Name']).count()['Tweet Id'].values metadata['after 2019'] = dataset[dataset['Created At'].astype('datetime64').dt.year > 2018].groupby(['Screen Name']).count()['Tweet Id'].values metadata['before 2019'] = metadata['number of all tweets'] - metadata['after 2019'] metadata['more than 40 characters'] = dataset[dataset['number of characters'] > 39].groupby(['Screen Name']).count()['Tweet Id'].values metadata['less than 40 characters'] = dataset[dataset['number of characters'] < 40].groupby(['Screen Name']).count()['Tweet Id'].values dataset = dataset[dataset['Created At'].astype('datetime64').dt.year > 2018] dataset = dataset[dataset['number of characters'] > 39] dataset = dataset[dataset['Tweet Type'] == 'Tweet'] dataset = dataset[dataset['Language'] == 'English'] X = dataset['clean tweet'] y = dataset['Name'] metadata['useful tweets'] = dataset.groupby(['Screen Name']).count()['Tweet Id'].values import warnings warnings.simplefilter(action='ignore', category=FutureWarning) kfold = StratifiedKFold(n_splits=10, random_state=0, shuffle=True) scores = [] test_list = [] predict_list = [] test_df = pd.DataFrame() pred_df = pd.DataFrame() for train_index, test_index in kfold.split(X, y): yoh = pd.get_dummies(y) train_X, train_y, test_X, test_y, word_index = load_data(X, yoh, train_index, test_index) embedding = load_glove(word_index) inp = Input(shape=(50,)) x = Embedding(18000, 300, weights=[embedding])(inp) x = Bidirectional(CuDNNGRU(256, return_sequences=True))(x) x = Bidirectional(CuDNNGRU(128, return_sequences=True))(x) x = GlobalMaxPooling1D()(x) x = Dense(train_y.shape[1], activation='softmax')(x) model = Model(inputs=inp, outputs=x) model.compile(loss='categorical_crossentropy', optimizer=Adamax(lr=0.002), metrics=['accuracy']) model.fit(train_X, train_y, batch_size=1024, epochs=15, validation_data=(test_X, test_y), verbose=0) pred_y = model.predict(test_X) test_df = pd.concat([test_df, test_y], axis=0, ignore_index=True) pred_df = pd.concat([pred_df, pd.DataFrame(pred_y)], axis=0, ignore_index=True) pred_df.columns = test_df.columns.values test_label = test_df.idxmax(axis=1) pred_label = pred_df.idxmax(axis=1) cm = confusion_matrix(test_label, pred_label, test_df.columns.values) cm = (cm.astype('float') * 100 / cm.sum(axis=1)[:, np.newaxis] + 0.5).astype('int') / 100 f, ax = plt.subplots(figsize=(14, 12)) sns.set(font_scale=1.4) sns.heatmap(cm, annot=True, annot_kws={'size': 8}, xticklabels=test_df.columns.values, yticklabels=test_df.columns.values)
code
88102651/cell_21
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import OneHotEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/income/train.csv') data.shape all_columns = list(data.columns) all_columns data.isna().sum() cat_columns = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'gender', 'native-country'] def event_rate_analysis(column, target): temp = (data.groupby(column)[target].sum() / data[target].sum()).to_frame().reset_index() temp['Volume'] = (data.groupby(column)[target].count() / data[target].count()).to_list() temp = temp.sort_values(target, ascending=False) from sklearn.preprocessing import OneHotEncoder ohe = OneHotEncoder(sparse=False) data_ohe = ohe.fit_transform(data[['race', 'gender', 'relationship', 'occupation', 'workclass', 'native-country', 'marital-status', 'education']]) cat_columns = [] for cat in ohe.categories_: cat_columns.extend(cat) cat_df = pd.DataFrame(data_ohe, columns=cat_columns) cat_df.head()
code
88102651/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/income/train.csv') data.shape all_columns = list(data.columns) all_columns data.isna().sum() def event_rate_analysis(column, target): temp = (data.groupby(column)[target].sum() / data[target].sum()).to_frame().reset_index() temp['Volume'] = (data.groupby(column)[target].count() / data[target].count()).to_list() temp = temp.sort_values(target, ascending=False) event_rate_analysis('occupation', 'income_>50K')
code
88102651/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/income/train.csv') data.shape all_columns = list(data.columns) all_columns data.isna().sum() cat_columns = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'gender', 'native-country'] for col in cat_columns: print('Unique values for ', col, 'is : ', data[col].nunique())
code
88102651/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/income/train.csv') data.shape all_columns = list(data.columns) all_columns data.info()
code
88102651/cell_34
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import OneHotEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/income/train.csv') data.shape all_columns = list(data.columns) all_columns data.isna().sum() cat_columns = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'gender', 'native-country'] def event_rate_analysis(column, target): temp = (data.groupby(column)[target].sum() / data[target].sum()).to_frame().reset_index() temp['Volume'] = (data.groupby(column)[target].count() / data[target].count()).to_list() temp = temp.sort_values(target, ascending=False) from sklearn.preprocessing import OneHotEncoder ohe = OneHotEncoder(sparse=False) data_ohe = ohe.fit_transform(data[['race', 'gender', 'relationship', 'occupation', 'workclass', 'native-country', 'marital-status', 'education']]) cat_columns = [] for cat in ohe.categories_: cat_columns.extend(cat) cat_df = pd.DataFrame(data_ohe, columns=cat_columns) final_df = pd.concat([data.drop(['race', 'gender', 'relationship', 'occupation', 'workclass', 'native-country', 'marital-status', 'education'], axis=1), cat_df], axis=1) final_df.shape X = final_df.drop('income_>50K', axis=1) y = final_df['income_>50K'] data1 = X_train[X_train['Husband'] == 1] data2 = X_train[X_train['Married-civ-spouse'] == 1] data3 = X_train.loc[(X_train['Sales'] == 1) | (X_train['Prof-specialty'] == 1) | (X_train['Exec-managerial'] == 1)] data4 = X_train.loc[~((X_train['Husband'] == 1) | (X_train['Married-civ-spouse'] == 1) | ((X_train['Sales'] == 1) | (X_train['Prof-specialty'] == 1) | (X_train['Exec-managerial'] == 1)))] label1 = y.iloc[list(data1.index)] label2 = y.iloc[list(data2.index)] label3 = y.iloc[list(data3.index)] label4 = y.iloc[list(data4.index)] print(label1.shape, label2.shape, label3.shape, label4.shape)
code
88102651/cell_23
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import OneHotEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/income/train.csv') data.shape all_columns = list(data.columns) all_columns data.isna().sum() cat_columns = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'gender', 'native-country'] def event_rate_analysis(column, target): temp = (data.groupby(column)[target].sum() / data[target].sum()).to_frame().reset_index() temp['Volume'] = (data.groupby(column)[target].count() / data[target].count()).to_list() temp = temp.sort_values(target, ascending=False) from sklearn.preprocessing import OneHotEncoder ohe = OneHotEncoder(sparse=False) data_ohe = ohe.fit_transform(data[['race', 'gender', 'relationship', 'occupation', 'workclass', 'native-country', 'marital-status', 'education']]) cat_columns = [] for cat in ohe.categories_: cat_columns.extend(cat) cat_df = pd.DataFrame(data_ohe, columns=cat_columns) final_df = pd.concat([data.drop(['race', 'gender', 'relationship', 'occupation', 'workclass', 'native-country', 'marital-status', 'education'], axis=1), cat_df], axis=1) final_df.shape final_df.head()
code
88102651/cell_30
[ "text_plain_output_1.png" ]
from sklearn.metrics import confusion_matrix,recall_score, accuracy_score , classification_report, balanced_accuracy_score from sklearn.tree import DecisionTreeClassifier y_train.value_counts(normalize=True) from sklearn.tree import DecisionTreeClassifier clf = DecisionTreeClassifier(max_depth=20, criterion='entropy', random_state=42) clf.fit(X_train, y_train) from sklearn.metrics import confusion_matrix, recall_score, accuracy_score, classification_report, balanced_accuracy_score def print_performance(text, y_pred): pass y_pred = clf.predict(X_test) print_performance('Single Decision Tree', y_pred)
code
88102651/cell_33
[ "text_html_output_1.png" ]
data1 = X_train[X_train['Husband'] == 1] data2 = X_train[X_train['Married-civ-spouse'] == 1] data3 = X_train.loc[(X_train['Sales'] == 1) | (X_train['Prof-specialty'] == 1) | (X_train['Exec-managerial'] == 1)] data4 = X_train.loc[~((X_train['Husband'] == 1) | (X_train['Married-civ-spouse'] == 1) | ((X_train['Sales'] == 1) | (X_train['Prof-specialty'] == 1) | (X_train['Exec-managerial'] == 1)))] print(data1.shape, data2.shape, data3.shape, data4.shape)
code
88102651/cell_20
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import OneHotEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/income/train.csv') data.shape all_columns = list(data.columns) all_columns data.isna().sum() cat_columns = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'gender', 'native-country'] def event_rate_analysis(column, target): temp = (data.groupby(column)[target].sum() / data[target].sum()).to_frame().reset_index() temp['Volume'] = (data.groupby(column)[target].count() / data[target].count()).to_list() temp = temp.sort_values(target, ascending=False) from sklearn.preprocessing import OneHotEncoder ohe = OneHotEncoder(sparse=False) data_ohe = ohe.fit_transform(data[['race', 'gender', 'relationship', 'occupation', 'workclass', 'native-country', 'marital-status', 'education']]) cat_columns = [] for cat in ohe.categories_: cat_columns.extend(cat) print(cat_columns)
code
88102651/cell_26
[ "text_plain_output_1.png" ]
y_train.value_counts(normalize=True)
code
88102651/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/income/train.csv') data.shape
code
88102651/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/income/train.csv') data.shape all_columns = list(data.columns) all_columns data.isna().sum() def event_rate_analysis(column, target): temp = (data.groupby(column)[target].sum() / data[target].sum()).to_frame().reset_index() temp['Volume'] = (data.groupby(column)[target].count() / data[target].count()).to_list() temp = temp.sort_values(target, ascending=False) event_rate_analysis('gender', 'income_>50K')
code
88102651/cell_19
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import OneHotEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/income/train.csv') data.shape all_columns = list(data.columns) all_columns data.isna().sum() def event_rate_analysis(column, target): temp = (data.groupby(column)[target].sum() / data[target].sum()).to_frame().reset_index() temp['Volume'] = (data.groupby(column)[target].count() / data[target].count()).to_list() temp = temp.sort_values(target, ascending=False) from sklearn.preprocessing import OneHotEncoder ohe = OneHotEncoder(sparse=False) print(data.shape) data_ohe = ohe.fit_transform(data[['race', 'gender', 'relationship', 'occupation', 'workclass', 'native-country', 'marital-status', 'education']]) print(data_ohe.shape)
code
88102651/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
88102651/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/income/train.csv') data.shape all_columns = list(data.columns) all_columns data.isna().sum() def event_rate_analysis(column, target): temp = (data.groupby(column)[target].sum() / data[target].sum()).to_frame().reset_index() temp['Volume'] = (data.groupby(column)[target].count() / data[target].count()).to_list() temp = temp.sort_values(target, ascending=False) event_rate_analysis('native-country', 'income_>50K')
code
88102651/cell_28
[ "text_plain_output_1.png" ]
from sklearn.tree import DecisionTreeClassifier y_train.value_counts(normalize=True) from sklearn.tree import DecisionTreeClassifier clf = DecisionTreeClassifier(max_depth=20, criterion='entropy', random_state=42) clf.fit(X_train, y_train)
code
88102651/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/income/train.csv') data.shape all_columns = list(data.columns) all_columns data.isna().sum()
code
88102651/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/income/train.csv') data.shape all_columns = list(data.columns) all_columns data.isna().sum() def event_rate_analysis(column, target): temp = (data.groupby(column)[target].sum() / data[target].sum()).to_frame().reset_index() temp['Volume'] = (data.groupby(column)[target].count() / data[target].count()).to_list() temp = temp.sort_values(target, ascending=False) event_rate_analysis('relationship', 'income_>50K')
code
88102651/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/income/train.csv') data.shape all_columns = list(data.columns) all_columns data.isna().sum() def event_rate_analysis(column, target): temp = (data.groupby(column)[target].sum() / data[target].sum()).to_frame().reset_index() temp['Volume'] = (data.groupby(column)[target].count() / data[target].count()).to_list() temp = temp.sort_values(target, ascending=False) event_rate_analysis('marital-status', 'income_>50K')
code
88102651/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/income/train.csv') data.shape all_columns = list(data.columns) all_columns
code
88102651/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/income/train.csv') data.shape all_columns = list(data.columns) all_columns data.isna().sum() def event_rate_analysis(column, target): temp = (data.groupby(column)[target].sum() / data[target].sum()).to_frame().reset_index() temp['Volume'] = (data.groupby(column)[target].count() / data[target].count()).to_list() temp = temp.sort_values(target, ascending=False) event_rate_analysis('education', 'income_>50K')
code
88102651/cell_35
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import OneHotEncoder from sklearn.tree import DecisionTreeClassifier import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/income/train.csv') data.shape all_columns = list(data.columns) all_columns data.isna().sum() cat_columns = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'gender', 'native-country'] def event_rate_analysis(column, target): temp = (data.groupby(column)[target].sum() / data[target].sum()).to_frame().reset_index() temp['Volume'] = (data.groupby(column)[target].count() / data[target].count()).to_list() temp = temp.sort_values(target, ascending=False) from sklearn.preprocessing import OneHotEncoder ohe = OneHotEncoder(sparse=False) data_ohe = ohe.fit_transform(data[['race', 'gender', 'relationship', 'occupation', 'workclass', 'native-country', 'marital-status', 'education']]) cat_columns = [] for cat in ohe.categories_: cat_columns.extend(cat) cat_df = pd.DataFrame(data_ohe, columns=cat_columns) final_df = pd.concat([data.drop(['race', 'gender', 'relationship', 'occupation', 'workclass', 'native-country', 'marital-status', 'education'], axis=1), cat_df], axis=1) final_df.shape X = final_df.drop('income_>50K', axis=1) y = final_df['income_>50K'] dt1 = DecisionTreeClassifier(max_depth=20, criterion='entropy', random_state=42) dt2 = DecisionTreeClassifier(max_depth=20, criterion='entropy', random_state=42) dt3 = DecisionTreeClassifier(max_depth=20, criterion='entropy', random_state=42) dt4 = DecisionTreeClassifier(max_depth=20, criterion='entropy', random_state=42) data1 = X_train[X_train['Husband'] == 1] data2 = X_train[X_train['Married-civ-spouse'] == 1] data3 = X_train.loc[(X_train['Sales'] == 1) | (X_train['Prof-specialty'] == 1) | (X_train['Exec-managerial'] == 1)] data4 = X_train.loc[~((X_train['Husband'] == 1) | (X_train['Married-civ-spouse'] == 1) | ((X_train['Sales'] == 1) | (X_train['Prof-specialty'] == 1) | (X_train['Exec-managerial'] == 1)))] label1 = y.iloc[list(data1.index)] label2 = y.iloc[list(data2.index)] label3 = y.iloc[list(data3.index)] label4 = y.iloc[list(data4.index)] dt1.fit(data1, label1) dt2.fit(data2, label2) dt3.fit(data3, label3) dt4.fit(data4, label4)
code
88102651/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/income/train.csv') data.shape all_columns = list(data.columns) all_columns data.isna().sum() def event_rate_analysis(column, target): temp = (data.groupby(column)[target].sum() / data[target].sum()).to_frame().reset_index() temp['Volume'] = (data.groupby(column)[target].count() / data[target].count()).to_list() temp = temp.sort_values(target, ascending=False) event_rate_analysis('workclass', 'income_>50K')
code
88102651/cell_22
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import OneHotEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/income/train.csv') data.shape all_columns = list(data.columns) all_columns data.isna().sum() cat_columns = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'gender', 'native-country'] def event_rate_analysis(column, target): temp = (data.groupby(column)[target].sum() / data[target].sum()).to_frame().reset_index() temp['Volume'] = (data.groupby(column)[target].count() / data[target].count()).to_list() temp = temp.sort_values(target, ascending=False) from sklearn.preprocessing import OneHotEncoder ohe = OneHotEncoder(sparse=False) data_ohe = ohe.fit_transform(data[['race', 'gender', 'relationship', 'occupation', 'workclass', 'native-country', 'marital-status', 'education']]) cat_columns = [] for cat in ohe.categories_: cat_columns.extend(cat) cat_df = pd.DataFrame(data_ohe, columns=cat_columns) final_df = pd.concat([data.drop(['race', 'gender', 'relationship', 'occupation', 'workclass', 'native-country', 'marital-status', 'education'], axis=1), cat_df], axis=1) final_df.shape
code
88102651/cell_37
[ "text_plain_output_1.png" ]
from sklearn.metrics import confusion_matrix,recall_score, accuracy_score , classification_report, balanced_accuracy_score from sklearn.preprocessing import OneHotEncoder from sklearn.tree import DecisionTreeClassifier import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/income/train.csv') data.shape all_columns = list(data.columns) all_columns data.isna().sum() cat_columns = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'gender', 'native-country'] def event_rate_analysis(column, target): temp = (data.groupby(column)[target].sum() / data[target].sum()).to_frame().reset_index() temp['Volume'] = (data.groupby(column)[target].count() / data[target].count()).to_list() temp = temp.sort_values(target, ascending=False) from sklearn.preprocessing import OneHotEncoder ohe = OneHotEncoder(sparse=False) data_ohe = ohe.fit_transform(data[['race', 'gender', 'relationship', 'occupation', 'workclass', 'native-country', 'marital-status', 'education']]) cat_columns = [] for cat in ohe.categories_: cat_columns.extend(cat) cat_df = pd.DataFrame(data_ohe, columns=cat_columns) final_df = pd.concat([data.drop(['race', 'gender', 'relationship', 'occupation', 'workclass', 'native-country', 'marital-status', 'education'], axis=1), cat_df], axis=1) final_df.shape X = final_df.drop('income_>50K', axis=1) y = final_df['income_>50K'] y_train.value_counts(normalize=True) from sklearn.tree import DecisionTreeClassifier clf = DecisionTreeClassifier(max_depth=20, criterion='entropy', random_state=42) clf.fit(X_train, y_train) from sklearn.metrics import confusion_matrix, recall_score, accuracy_score, classification_report, balanced_accuracy_score def print_performance(text, y_pred): pass y_pred = clf.predict(X_test) dt1 = DecisionTreeClassifier(max_depth=20, criterion='entropy', random_state=42) dt2 = DecisionTreeClassifier(max_depth=20, criterion='entropy', random_state=42) dt3 = DecisionTreeClassifier(max_depth=20, criterion='entropy', random_state=42) dt4 = DecisionTreeClassifier(max_depth=20, criterion='entropy', random_state=42) data1 = X_train[X_train['Husband'] == 1] data2 = X_train[X_train['Married-civ-spouse'] == 1] data3 = X_train.loc[(X_train['Sales'] == 1) | (X_train['Prof-specialty'] == 1) | (X_train['Exec-managerial'] == 1)] data4 = X_train.loc[~((X_train['Husband'] == 1) | (X_train['Married-civ-spouse'] == 1) | ((X_train['Sales'] == 1) | (X_train['Prof-specialty'] == 1) | (X_train['Exec-managerial'] == 1)))] label1 = y.iloc[list(data1.index)] label2 = y.iloc[list(data2.index)] label3 = y.iloc[list(data3.index)] label4 = y.iloc[list(data4.index)] dt1.fit(data1, label1) dt2.fit(data2, label2) dt3.fit(data3, label3) dt4.fit(data4, label4) y_pred = [] for index, row in X_test.iterrows(): if row['Husband'] == 1: pred = dt1.predict([row])[0] if pred == 1: y_pred.append(pred) continue if row['Married-civ-spouse'] == 1: pred = dt2.predict([row])[0] if pred == 1: y_pred.append(pred) continue if row['Sales'] == 1 or row['Exec-managerial'] == 1 or row['Prof-specialty'] == 1: pred = dt2.predict([row])[0] if pred == 1: y_pred.append(pred) continue pred = dt3.predict([row])[0] if pred == 1: y_pred.append(pred) else: y_pred.append(0) print_performance('Segmented Decision Tree', y_pred)
code
88102651/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/income/train.csv') data.shape all_columns = list(data.columns) all_columns data.isna().sum() def event_rate_analysis(column, target): temp = (data.groupby(column)[target].sum() / data[target].sum()).to_frame().reset_index() temp['Volume'] = (data.groupby(column)[target].count() / data[target].count()).to_list() temp = temp.sort_values(target, ascending=False) event_rate_analysis('race', 'income_>50K')
code
88102651/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/income/train.csv') data.shape all_columns = list(data.columns) all_columns print('Unique Occupation :', data['occupation'].nunique()) print(data['occupation'].unique()) print('Unique workclass :', data['workclass'].nunique()) print(data['workclass'].unique()) print('Unique native-country :', data['native-country'].nunique()) print(data['native-country'].unique())
code
33116081/cell_42
[ "text_html_output_1.png" ]
from collections import Counter import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt from collections import Counter import seaborn as sns plt.style.use('seaborn-colorblind') plt.style.use('seaborn-whitegrid') import os plt.style.available test_df = pd.read_csv('/kaggle/input/titanic/test.csv') train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_passengerId = test_df['PassengerId'] def bar_plot(variable): """ input: varible exmple: "sex" output: bar plot& value count """ var = train_df[variable] varValue = var.value_counts() plt.xticks(varValue.index, varValue.index.values) def plot_hist(variable): pass def detectOutliers(df, features): outlier_indices = [] for i in features: Q1 = np.percentile(df[i], 25) Q3 = np.percentile(df[i], 75) IQR = Q3 - Q1 outlierStep = IQR * 1.5 outlier_listCol = df[(df[i] < Q1 - outlierStep) | (df[i] > Q3 + outlierStep)].index outlier_indices.extend(outlier_listCol) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, k in outlier_indices.items() if k > 2)) return multiple_outliers train_df.loc[detectOutliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare'])] train_df = train_df.drop(detectOutliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare']), axis=0).reset_index(drop=True) train_df train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()] train_df.isnull().sum() list1 = ['SibSp', 'Parch', 'Age', 'Fare', 'Survived'] sns.heatmap(train_df[list1].corr(), annot=True, fmt='.3f')
code
33116081/cell_21
[ "text_html_output_1.png" ]
import pandas as pd test_df = pd.read_csv('/kaggle/input/titanic/test.csv') train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_passengerId = test_df['PassengerId'] train_df[['Parch', 'Survived']].groupby(['Parch'], as_index=False).mean().sort_values(by='Survived', ascending=False)
code
33116081/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd test_df = pd.read_csv('/kaggle/input/titanic/test.csv') train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_passengerId = test_df['PassengerId'] category2 = ['Cabin', 'Name', 'Ticket'] for c in category2: print('{} \n'.format(train_df[c].value_counts()))
code
33116081/cell_34
[ "text_plain_output_1.png" ]
from collections import Counter import numpy as np import pandas as pd test_df = pd.read_csv('/kaggle/input/titanic/test.csv') train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_passengerId = test_df['PassengerId'] def detectOutliers(df, features): outlier_indices = [] for i in features: Q1 = np.percentile(df[i], 25) Q3 = np.percentile(df[i], 75) IQR = Q3 - Q1 outlierStep = IQR * 1.5 outlier_listCol = df[(df[i] < Q1 - outlierStep) | (df[i] > Q3 + outlierStep)].index outlier_indices.extend(outlier_listCol) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, k in outlier_indices.items() if k > 2)) return multiple_outliers train_df.loc[detectOutliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare'])] train_df = train_df.drop(detectOutliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare']), axis=0).reset_index(drop=True) train_df train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()] train_df.isnull().sum() train_df[train_df['Embarked'].isnull()]
code
33116081/cell_23
[ "text_html_output_1.png" ]
import pandas as pd test_df = pd.read_csv('/kaggle/input/titanic/test.csv') train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_passengerId = test_df['PassengerId'] train_df[['Fare', 'Survived']].groupby(['Fare'], as_index=False).mean().sort_values(by='Survived', ascending=False)
code
33116081/cell_20
[ "text_html_output_1.png" ]
import pandas as pd test_df = pd.read_csv('/kaggle/input/titanic/test.csv') train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_passengerId = test_df['PassengerId'] train_df[['SibSp', 'Survived']].groupby(['SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False)
code
33116081/cell_29
[ "text_html_output_1.png" ]
from collections import Counter import numpy as np import pandas as pd test_df = pd.read_csv('/kaggle/input/titanic/test.csv') train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_passengerId = test_df['PassengerId'] def detectOutliers(df, features): outlier_indices = [] for i in features: Q1 = np.percentile(df[i], 25) Q3 = np.percentile(df[i], 75) IQR = Q3 - Q1 outlierStep = IQR * 1.5 outlier_listCol = df[(df[i] < Q1 - outlierStep) | (df[i] > Q3 + outlierStep)].index outlier_indices.extend(outlier_listCol) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, k in outlier_indices.items() if k > 2)) return multiple_outliers train_df.loc[detectOutliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare'])] train_df = train_df.drop(detectOutliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare']), axis=0).reset_index(drop=True) train_df train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True)
code
33116081/cell_39
[ "text_html_output_1.png" ]
from collections import Counter import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import pandas as pd import numpy as np import matplotlib.pyplot as plt from collections import Counter import seaborn as sns plt.style.use('seaborn-colorblind') plt.style.use('seaborn-whitegrid') import os plt.style.available test_df = pd.read_csv('/kaggle/input/titanic/test.csv') train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_passengerId = test_df['PassengerId'] def bar_plot(variable): """ input: varible exmple: "sex" output: bar plot& value count """ var = train_df[variable] varValue = var.value_counts() plt.xticks(varValue.index, varValue.index.values) def plot_hist(variable): pass def detectOutliers(df, features): outlier_indices = [] for i in features: Q1 = np.percentile(df[i], 25) Q3 = np.percentile(df[i], 75) IQR = Q3 - Q1 outlierStep = IQR * 1.5 outlier_listCol = df[(df[i] < Q1 - outlierStep) | (df[i] > Q3 + outlierStep)].index outlier_indices.extend(outlier_listCol) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, k in outlier_indices.items() if k > 2)) return multiple_outliers train_df.loc[detectOutliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare'])] train_df = train_df.drop(detectOutliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare']), axis=0).reset_index(drop=True) train_df train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()] train_df.isnull().sum() train_df[train_df['Fare'].isnull()]
code
33116081/cell_48
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from collections import Counter import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt from collections import Counter import seaborn as sns plt.style.use('seaborn-colorblind') plt.style.use('seaborn-whitegrid') import os plt.style.available test_df = pd.read_csv('/kaggle/input/titanic/test.csv') train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_passengerId = test_df['PassengerId'] def bar_plot(variable): """ input: varible exmple: "sex" output: bar plot& value count """ var = train_df[variable] varValue = var.value_counts() plt.xticks(varValue.index, varValue.index.values) def plot_hist(variable): pass def detectOutliers(df, features): outlier_indices = [] for i in features: Q1 = np.percentile(df[i], 25) Q3 = np.percentile(df[i], 75) IQR = Q3 - Q1 outlierStep = IQR * 1.5 outlier_listCol = df[(df[i] < Q1 - outlierStep) | (df[i] > Q3 + outlierStep)].index outlier_indices.extend(outlier_listCol) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, k in outlier_indices.items() if k > 2)) return multiple_outliers train_df.loc[detectOutliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare'])] train_df = train_df.drop(detectOutliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare']), axis=0).reset_index(drop=True) train_df train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()] train_df.isnull().sum() list1 = ['SibSp', 'Parch', 'Age', 'Fare', 'Survived'] g = sns.factorplot(x="SibSp",y = "Survived",data = train_df,kind = "bar",size = 6) g.set_ylabels("Survived Probabilty") plt.show() g = sns.factorplot(x='Parch', y='Survived', data=train_df, kind='bar', size=5) g.set_ylabels('Survied Probability') plt.show()
code
33116081/cell_2
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd import numpy as np import matplotlib.pyplot as plt from collections import Counter import seaborn as sns plt.style.use('seaborn-colorblind') plt.style.use('seaborn-whitegrid') import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
33116081/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd test_df = pd.read_csv('/kaggle/input/titanic/test.csv') train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_passengerId = test_df['PassengerId'] train_df[['Sex', 'Survived']].groupby(['Sex'], as_index=False).mean().sort_values(by='Survived', ascending=False)
code
33116081/cell_7
[ "image_output_1.png" ]
import pandas as pd test_df = pd.read_csv('/kaggle/input/titanic/test.csv') train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_passengerId = test_df['PassengerId'] train_df.info()
code
33116081/cell_45
[ "text_plain_output_1.png", "image_output_1.png" ]
from collections import Counter import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt from collections import Counter import seaborn as sns plt.style.use('seaborn-colorblind') plt.style.use('seaborn-whitegrid') import os plt.style.available test_df = pd.read_csv('/kaggle/input/titanic/test.csv') train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_passengerId = test_df['PassengerId'] def bar_plot(variable): """ input: varible exmple: "sex" output: bar plot& value count """ var = train_df[variable] varValue = var.value_counts() plt.xticks(varValue.index, varValue.index.values) def plot_hist(variable): pass def detectOutliers(df, features): outlier_indices = [] for i in features: Q1 = np.percentile(df[i], 25) Q3 = np.percentile(df[i], 75) IQR = Q3 - Q1 outlierStep = IQR * 1.5 outlier_listCol = df[(df[i] < Q1 - outlierStep) | (df[i] > Q3 + outlierStep)].index outlier_indices.extend(outlier_listCol) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, k in outlier_indices.items() if k > 2)) return multiple_outliers train_df.loc[detectOutliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare'])] train_df = train_df.drop(detectOutliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare']), axis=0).reset_index(drop=True) train_df train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()] train_df.isnull().sum() list1 = ['SibSp', 'Parch', 'Age', 'Fare', 'Survived'] g = sns.factorplot(x='SibSp', y='Survived', data=train_df, kind='bar', size=6) g.set_ylabels('Survived Probabilty') plt.show()
code
33116081/cell_18
[ "text_plain_output_5.png", "text_plain_output_4.png", "image_output_5.png", "text_plain_output_6.png", "text_plain_output_3.png", "image_output_4.png", "image_output_6.png", "text_plain_output_2.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd test_df = pd.read_csv('/kaggle/input/titanic/test.csv') train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_passengerId = test_df['PassengerId'] train_df[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean().sort_values(by='Survived', ascending=False)
code
33116081/cell_32
[ "text_plain_output_1.png" ]
from collections import Counter import numpy as np import pandas as pd test_df = pd.read_csv('/kaggle/input/titanic/test.csv') train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_passengerId = test_df['PassengerId'] def detectOutliers(df, features): outlier_indices = [] for i in features: Q1 = np.percentile(df[i], 25) Q3 = np.percentile(df[i], 75) IQR = Q3 - Q1 outlierStep = IQR * 1.5 outlier_listCol = df[(df[i] < Q1 - outlierStep) | (df[i] > Q3 + outlierStep)].index outlier_indices.extend(outlier_listCol) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, k in outlier_indices.items() if k > 2)) return multiple_outliers train_df.loc[detectOutliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare'])] train_df = train_df.drop(detectOutliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare']), axis=0).reset_index(drop=True) train_df train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()] train_df.isnull().sum()
code
33116081/cell_51
[ "image_output_1.png" ]
from collections import Counter import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt from collections import Counter import seaborn as sns plt.style.use('seaborn-colorblind') plt.style.use('seaborn-whitegrid') import os plt.style.available test_df = pd.read_csv('/kaggle/input/titanic/test.csv') train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_passengerId = test_df['PassengerId'] def bar_plot(variable): """ input: varible exmple: "sex" output: bar plot& value count """ var = train_df[variable] varValue = var.value_counts() plt.xticks(varValue.index, varValue.index.values) def plot_hist(variable): pass def detectOutliers(df, features): outlier_indices = [] for i in features: Q1 = np.percentile(df[i], 25) Q3 = np.percentile(df[i], 75) IQR = Q3 - Q1 outlierStep = IQR * 1.5 outlier_listCol = df[(df[i] < Q1 - outlierStep) | (df[i] > Q3 + outlierStep)].index outlier_indices.extend(outlier_listCol) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, k in outlier_indices.items() if k > 2)) return multiple_outliers train_df.loc[detectOutliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare'])] train_df = train_df.drop(detectOutliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare']), axis=0).reset_index(drop=True) train_df train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()] train_df.isnull().sum() list1 = ['SibSp', 'Parch', 'Age', 'Fare', 'Survived'] g = sns.factorplot(x="SibSp",y = "Survived",data = train_df,kind = "bar",size = 6) g.set_ylabels("Survived Probabilty") plt.show() g = sns.factorplot(x= "Parch",y= "Survived",data = train_df,kind="bar",size=5) g.set_ylabels("Survied Probability") plt.show() g = sns.factorplot(x='Pclass', y='Survived', data=train_df, kind='bar', size=5) g.set_ylabels('Survived Probability') plt.show()
code
33116081/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd import pandas as pd import numpy as np import matplotlib.pyplot as plt from collections import Counter import seaborn as sns plt.style.use('seaborn-colorblind') plt.style.use('seaborn-whitegrid') import os plt.style.available test_df = pd.read_csv('/kaggle/input/titanic/test.csv') train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_passengerId = test_df['PassengerId'] def bar_plot(variable): """ input: varible exmple: "sex" output: bar plot& value count """ var = train_df[variable] varValue = var.value_counts() plt.xticks(varValue.index, varValue.index.values) def plot_hist(variable): pass numVar = ['Fare', 'Age', 'PassengerId'] for n in numVar: plot_hist(n)
code
33116081/cell_3
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd import numpy as np import matplotlib.pyplot as plt from collections import Counter import seaborn as sns plt.style.use('seaborn-colorblind') plt.style.use('seaborn-whitegrid') import os plt.style.available
code
33116081/cell_35
[ "text_html_output_1.png" ]
from collections import Counter import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import pandas as pd import numpy as np import matplotlib.pyplot as plt from collections import Counter import seaborn as sns plt.style.use('seaborn-colorblind') plt.style.use('seaborn-whitegrid') import os plt.style.available test_df = pd.read_csv('/kaggle/input/titanic/test.csv') train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_passengerId = test_df['PassengerId'] def bar_plot(variable): """ input: varible exmple: "sex" output: bar plot& value count """ var = train_df[variable] varValue = var.value_counts() plt.xticks(varValue.index, varValue.index.values) def plot_hist(variable): pass def detectOutliers(df, features): outlier_indices = [] for i in features: Q1 = np.percentile(df[i], 25) Q3 = np.percentile(df[i], 75) IQR = Q3 - Q1 outlierStep = IQR * 1.5 outlier_listCol = df[(df[i] < Q1 - outlierStep) | (df[i] > Q3 + outlierStep)].index outlier_indices.extend(outlier_listCol) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, k in outlier_indices.items() if k > 2)) return multiple_outliers train_df.loc[detectOutliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare'])] train_df = train_df.drop(detectOutliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare']), axis=0).reset_index(drop=True) train_df train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()] train_df.isnull().sum() train_df.boxplot(column='Fare', by='Embarked') plt.show()
code
33116081/cell_31
[ "application_vnd.jupyter.stderr_output_1.png" ]
from collections import Counter import numpy as np import pandas as pd test_df = pd.read_csv('/kaggle/input/titanic/test.csv') train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_passengerId = test_df['PassengerId'] def detectOutliers(df, features): outlier_indices = [] for i in features: Q1 = np.percentile(df[i], 25) Q3 = np.percentile(df[i], 75) IQR = Q3 - Q1 outlierStep = IQR * 1.5 outlier_listCol = df[(df[i] < Q1 - outlierStep) | (df[i] > Q3 + outlierStep)].index outlier_indices.extend(outlier_listCol) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, k in outlier_indices.items() if k > 2)) return multiple_outliers train_df.loc[detectOutliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare'])] train_df = train_df.drop(detectOutliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare']), axis=0).reset_index(drop=True) train_df train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()]
code
33116081/cell_24
[ "text_html_output_1.png" ]
import pandas as pd test_df = pd.read_csv('/kaggle/input/titanic/test.csv') train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_passengerId = test_df['PassengerId'] train_df
code
33116081/cell_22
[ "text_html_output_1.png" ]
import pandas as pd test_df = pd.read_csv('/kaggle/input/titanic/test.csv') train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_passengerId = test_df['PassengerId'] train_df[['Parch', 'SibSp', 'Survived']].groupby(['Parch', 'SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False)
code
33116081/cell_27
[ "text_html_output_1.png" ]
from collections import Counter import numpy as np import pandas as pd test_df = pd.read_csv('/kaggle/input/titanic/test.csv') train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_passengerId = test_df['PassengerId'] def detectOutliers(df, features): outlier_indices = [] for i in features: Q1 = np.percentile(df[i], 25) Q3 = np.percentile(df[i], 75) IQR = Q3 - Q1 outlierStep = IQR * 1.5 outlier_listCol = df[(df[i] < Q1 - outlierStep) | (df[i] > Q3 + outlierStep)].index outlier_indices.extend(outlier_listCol) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, k in outlier_indices.items() if k > 2)) return multiple_outliers train_df.loc[detectOutliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare'])] train_df = train_df.drop(detectOutliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare']), axis=0).reset_index(drop=True) train_df
code
33116081/cell_37
[ "image_output_1.png" ]
from collections import Counter import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import pandas as pd import numpy as np import matplotlib.pyplot as plt from collections import Counter import seaborn as sns plt.style.use('seaborn-colorblind') plt.style.use('seaborn-whitegrid') import os plt.style.available test_df = pd.read_csv('/kaggle/input/titanic/test.csv') train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_passengerId = test_df['PassengerId'] def bar_plot(variable): """ input: varible exmple: "sex" output: bar plot& value count """ var = train_df[variable] varValue = var.value_counts() plt.xticks(varValue.index, varValue.index.values) def plot_hist(variable): pass def detectOutliers(df, features): outlier_indices = [] for i in features: Q1 = np.percentile(df[i], 25) Q3 = np.percentile(df[i], 75) IQR = Q3 - Q1 outlierStep = IQR * 1.5 outlier_listCol = df[(df[i] < Q1 - outlierStep) | (df[i] > Q3 + outlierStep)].index outlier_indices.extend(outlier_listCol) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, k in outlier_indices.items() if k > 2)) return multiple_outliers train_df.loc[detectOutliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare'])] train_df = train_df.drop(detectOutliers(train_df, ['Age', 'SibSp', 'Parch', 'Fare']), axis=0).reset_index(drop=True) train_df train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()] train_df.isnull().sum() train_df[train_df['Fare'].isnull()]
code
33116081/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd import pandas as pd import numpy as np import matplotlib.pyplot as plt from collections import Counter import seaborn as sns plt.style.use('seaborn-colorblind') plt.style.use('seaborn-whitegrid') import os plt.style.available test_df = pd.read_csv('/kaggle/input/titanic/test.csv') train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_passengerId = test_df['PassengerId'] def bar_plot(variable): """ input: varible exmple: "sex" output: bar plot& value count """ var = train_df[variable] varValue = var.value_counts() plt.xticks(varValue.index, varValue.index.values) category1 = ['Survived', 'Sex', 'Pclass', 'Embarked', 'SibSp', 'Parch'] for c in category1: bar_plot(c)
code
88096014/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/dataset-for-practicing-classification/exercises-logistic-regression-exercise-1/nba_logreg.csv') data.describe().T data.columns.to_list() cols_pred = [] col_target = 'TARGET_5Yrs' for col in data.columns.to_list(): if col not in ['Name', 'TARGET_5Yrs']: cols_pred.append(col) data[cols_pred].dtypes
code
88096014/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/dataset-for-practicing-classification/exercises-logistic-regression-exercise-1/nba_logreg.csv') data.describe().T
code
88096014/cell_2
[ "text_plain_output_1.png" ]
!pip install skorecard optbinning
code
88096014/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('/kaggle/input/dataset-for-practicing-classification/exercises-logistic-regression-exercise-1/nba_logreg.csv') data.describe().T data.columns.to_list() cols_pred = [] col_target = 'TARGET_5Yrs' for col in data.columns.to_list(): if col not in ['Name', 'TARGET_5Yrs']: cols_pred.append(col) sns.countplot(x=col_target, data=data) plt.show()
code
88096014/cell_7
[ "image_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/dataset-for-practicing-classification/exercises-logistic-regression-exercise-1/nba_logreg.csv') data.describe().T data.columns.to_list()
code
88096014/cell_8
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/dataset-for-practicing-classification/exercises-logistic-regression-exercise-1/nba_logreg.csv') data.describe().T data.columns.to_list() cols_pred = [] col_target = 'TARGET_5Yrs' for col in data.columns.to_list(): if col not in ['Name', 'TARGET_5Yrs']: cols_pred.append(col) print(cols_pred)
code
88096014/cell_16
[ "text_html_output_1.png" ]
from sklearn.pipeline import make_pipeline from skorecard.bucketers import DecisionTreeBucketer, OptimalBucketer from skorecard.pipeline import BucketingProcess from skorecard.preprocessing import WoeEncoder import pandas as pd data = pd.read_csv('/kaggle/input/dataset-for-practicing-classification/exercises-logistic-regression-exercise-1/nba_logreg.csv') data.describe().T data.columns.to_list() cols_pred = [] col_target = 'TARGET_5Yrs' for col in data.columns.to_list(): if col not in ['Name', 'TARGET_5Yrs']: cols_pred.append(col) from skorecard.bucketers import DecisionTreeBucketer, OptimalBucketer from skorecard.preprocessing import WoeEncoder from skorecard.pipeline import BucketingProcess from sklearn.pipeline import make_pipeline bucketing_process = BucketingProcess(prebucketing_pipeline=make_pipeline(DecisionTreeBucketer(variables=cols_pred, max_n_bins=100, min_bin_size=0.05)), bucketing_pipeline=make_pipeline(OptimalBucketer(variables=cols_pred, max_n_bins=10, min_bin_size=0.05))) woe_pipe = make_pipeline(bucketing_process, WoeEncoder()) x_train_woe = woe_pipe.fit_transform(x_train, y_train) x_test_woe = woe_pipe.transform(x_test) bucketing_process.summary()
code
88096014/cell_17
[ "text_plain_output_1.png" ]
data_woe
code
88096014/cell_10
[ "text_plain_output_1.png" ]
import missingno as msno msno.matrix(data[cols_pred + [col_target]])
code
88096014/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/dataset-for-practicing-classification/exercises-logistic-regression-exercise-1/nba_logreg.csv') data.head()
code
128026152/cell_21
[ "text_plain_output_1.png" ]
name
code
128026152/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import ipywidgets as widgets widgets.FloatSlider(value=7.5, min=0, max=10.0, step=0.1, description='Test:', disabled=False, continuous_update=False, orientation='vertical', readout=True, readout_format='.1f') widgets.IntRangeSlider(value=[5, 7], min=0, max=10, step=1, description='Test:', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='d') widgets.IntProgress(value=7, min=0, max=10, description='Loading:', bar_style='', orientation='horizontal') widgets.BoundedIntText(value=7, min=0, max=10, step=1, description='Text:', disabled=False) widgets.IntText(value=7, description='Any:', disabled=False) btn = widgets.ToggleButton(value=False, description='Click me', disabled=False, button_style='success', tooltip='Description', icon='check') btn
code
128026152/cell_17
[ "text_plain_output_1.png" ]
import ipywidgets as widgets widgets.FloatSlider(value=7.5, min=0, max=10.0, step=0.1, description='Test:', disabled=False, continuous_update=False, orientation='vertical', readout=True, readout_format='.1f') widgets.IntRangeSlider(value=[5, 7], min=0, max=10, step=1, description='Test:', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='d') widgets.IntProgress(value=7, min=0, max=10, description='Loading:', bar_style='', orientation='horizontal') widgets.BoundedIntText(value=7, min=0, max=10, step=1, description='Text:', disabled=False) widgets.IntText(value=7, description='Any:', disabled=False) btn = widgets.ToggleButton(value=False, description='Click me', disabled=False, button_style='success', tooltip='Description', icon='check') btn widgets.Checkbox(value=False, description='Check me', disabled=False, indent=False) widgets.Valid(value=True, description='Valid!') widgets.Dropdown(options=['1', '2', '3'], value='2', description='Number:', disabled=False) ddd = widgets.Dropdown(options=[('One', 1), ('Two', 2), ('Three', 3)], value=1, description='Number:') ddd widgets.RadioButtons(options=['pepperoni', 'pineapple', 'anchovies'], description='Pizza topping:', disabled=False) widgets.Box([widgets.Label(value='Pizza topping with a very long label:'), widgets.RadioButtons(options=['pepperoni', 'pineapple', 'anchovies', 'and the long name that will fit fine and the long name that will fit fine and the long name that will fit fine '], layout={'width': 'max-content'})])
code
128026152/cell_10
[ "text_plain_output_1.png" ]
import ipywidgets as widgets widgets.FloatSlider(value=7.5, min=0, max=10.0, step=0.1, description='Test:', disabled=False, continuous_update=False, orientation='vertical', readout=True, readout_format='.1f') widgets.IntRangeSlider(value=[5, 7], min=0, max=10, step=1, description='Test:', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='d') widgets.IntProgress(value=7, min=0, max=10, description='Loading:', bar_style='', orientation='horizontal') widgets.BoundedIntText(value=7, min=0, max=10, step=1, description='Text:', disabled=False) widgets.IntText(value=7, description='Any:', disabled=False) btn = widgets.ToggleButton(value=False, description='Click me', disabled=False, button_style='success', tooltip='Description', icon='check') btn btn.value
code
17130389/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) quartet = pd.read_csv('../input/quartet.csv', index_col='id') print(quartet)
code
17130389/cell_8
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) quartet = pd.read_csv('../input/quartet.csv', index_col='id') quartet.describe()
code
17130389/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) quartet = pd.read_csv('../input/quartet.csv', index_col='id') quartet.groupby('dataset').agg(['mean', 'std'])
code
325211/cell_4
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns act_df = pd.read_csv('../input/act_train.csv', sep=',') sns.countplot(x='activity_category', data=act_df, hue='outcome') sns.plt.show()
code
325211/cell_6
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns act_df = pd.read_csv('../input/act_train.csv', sep=',') fig, ax = plt.subplots() fig.set_size_inches(30, 20) h = sns.countplot(x='char_1',data=act_df,hue='outcome',ax=ax) h.set_xticklabels(h.get_xticklabels(),rotation=50) sns.plt.show() fig, ax = plt.subplots() fig.set_size_inches(30, 20) people_df = pd.read_csv('../input/people.csv', sep=',') group_based_ppl_count = people_df.groupby(['group_1']).count().sort_values(by='people_id', ascending=[0]) group_based_ppl_count = group_based_ppl_count.reset_index() group_based_ppl_count = group_based_ppl_count.ix[:20,] g = sns.barplot(x='group_1', y='people_id', data=group_based_ppl_count, ax=ax)
code
325211/cell_8
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns act_df = pd.read_csv('../input/act_train.csv', sep=',') fig, ax = plt.subplots() fig.set_size_inches(30, 20) h = sns.countplot(x='char_1',data=act_df,hue='outcome',ax=ax) h.set_xticklabels(h.get_xticklabels(),rotation=50) sns.plt.show() fig, ax = plt.subplots() fig.set_size_inches(30, 20) people_df = pd.read_csv('../input/people.csv',sep=',') group_based_ppl_count = people_df.groupby(['group_1']).count().sort_values(by='people_id',ascending=[0]) group_based_ppl_count = group_based_ppl_count.reset_index() group_based_ppl_count = group_based_ppl_count.ix[:20,] g = sns.barplot(x='group_1',y='people_id',data=group_based_ppl_count,ax=ax) people_df = pd.read_csv('../input/people.csv', sep=',', parse_dates=['date']) activity_df = pd.read_csv('../input/act_train.csv', sep=',', parse_dates=['date']) def sanitizepeople(): sn_fileds = ['char_1', 'group_1', 'char_2', 'date', 'char_3', 'char_4', 'char_5', 'char_6', 'char_7', 'char_8', 'char_9'] for filed in sn_fileds: if 'group' in filed: people_df[filed] = people_df[filed].str.lstrip('group ').astype(np.float) elif 'char_' in filed: people_df[filed] = people_df[filed].fillna('-999') people_df[filed] = people_df[filed].str.lstrip('type ').astype(np.float) else: people_df['year'] = people_df[filed].dt.year people_df['month'] = people_df[filed].dt.month people_df['day'] = people_df[filed].dt.day people_df1 = people_df.drop(['date'], axis=1) return people_df1 def sanitizeactivity(): sn_fileds = ['date', 'activity_category', 'char_1', 'char_2', 'char_3', 'char_4', 'char_5', 'char_6', 'char_7', 'char_8', 'char_9', 'char_10'] for filed in sn_fileds: if 'char_' in filed or 'activity' in filed: activity_df[filed] = activity_df[filed].fillna('-999') activity_df[filed] = activity_df[filed].str.lstrip('type ').astype(np.float) else: activity_df['year'] = activity_df[filed].dt.year activity_df['month'] = activity_df[filed].dt.month activity_df['day'] = activity_df[filed].dt.day activity_df1 = activity_df.drop(['date'], axis=1) return activity_df1 people_nrm_df = sanitizepeople() activity_nrm_df = sanitizeactivity() j_df = pd.merge(people_nrm_df, activity_nrm_df, how='left', on='people_id', left_index='True') fig, ax = plt.subplots() fig.set_size_inches(30, 20) j_top20grp_grpby = j_df.groupby(['group_1']).sum().sort_values(by='outcome', ascending=[0]) j_top20grp_grpby = j_top20grp_grpby.reset_index() top20group = j_top20grp_grpby['group_1'].astype(np.int).tolist() top20group = top20group[:50] j_top20grp_df = j_df.loc[j_df['group_1'].isin(top20group)] j_top20grp_df = j_top20grp_df[['group_1', 'outcome']] h = sns.countplot(x='group_1', data=j_top20grp_df, hue='outcome', ax=ax) h.set_xticklabels(h.get_xticklabels(), rotation=50) sns.plt.show()
code
325211/cell_3
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns act_df = pd.read_csv('../input/act_train.csv', sep=',') sns.countplot(x='outcome', data=act_df) sns.plt.show()
code
325211/cell_5
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns act_df = pd.read_csv('../input/act_train.csv', sep=',') fig, ax = plt.subplots() fig.set_size_inches(30, 20) h = sns.countplot(x='char_1', data=act_df, hue='outcome', ax=ax) h.set_xticklabels(h.get_xticklabels(), rotation=50) sns.plt.show()
code
128020060/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv') seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv') player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv') players.isnull().sum()
code
128020060/cell_6
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv') seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv') player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv') players.isnull().sum() players.describe()
code
128020060/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv') seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv') player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv') seasons_stats[seasons_stats.Age > 40][seasons_stats.PTS > 100] seasons_stats[seasons_stats.Player == 'LeBron James'].plot(x='Year', y='PTS', figsize=(12, 8))
code
128020060/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
128020060/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv') seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv') player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv') players.isnull().sum() print('Tallest player : {0} - {1} cm'.format(players['height'].idxmax(), players['height'].max())) print('Smallest player: {0} - {1} cm'.format(players['height'].idxmin(), players['height'].min())) print() print('Heaviest player: {0} - {1} kg'.format(players['weight'].idxmax(), players['weight'].max())) print('Lightest player: {0} - {1} kg'.format(players['weight'].idxmin(), players['weight'].min())) print() print('Height average of players: ', players['height'].mean()) print('Weight average of players: ', players['weight'].mean())
code
128020060/cell_8
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv') seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv') player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv') players.isnull().sum() players.plot(x='height', y='weight', kind='scatter', figsize=(12, 8))
code
128020060/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv') seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv') player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv') len(players)
code
128020060/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv') seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv') player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv') seasons_stats[seasons_stats.Age > 40][seasons_stats.PTS > 100]
code