path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
2010673/cell_9 | [
"image_output_1.png"
] | from collections import Counter
import nltk
import os
import pandas as pd
path = '../input/state-of-the-union-corpus-1989-2017'
dirs = os.listdir(path)
def count_words(word, filename):
file = open(path + '/' + filename, encoding='utf8')
text = file.read().lower()
words = nltk.word_tokenize(text)
word_counter = Counter(words)
word_count = word_counter[word]
return word_count
file_dict_list = []
for filename in dirs:
file_dict = {}
job_word_count = count_words('job', filename) + count_words('jobs', filename)
file_dict['year'] = int(filename[-8:-4])
file_dict['job_word_count'] = job_word_count
file_dict_list.append(file_dict)
df = pd.DataFrame(file_dict_list)
df | code |
2010673/cell_6 | [
"image_output_1.png"
] | import os
path = '../input/state-of-the-union-corpus-1989-2017'
dirs = os.listdir(path)
dirs | code |
2010673/cell_11 | [
"text_plain_output_1.png"
] | from collections import Counter
import matplotlib.pyplot as plt
import nltk
import os
import pandas as pd
path = '../input/state-of-the-union-corpus-1989-2017'
dirs = os.listdir(path)
def count_words(word, filename):
file = open(path + '/' + filename, encoding='utf8')
text = file.read().lower()
words = nltk.word_tokenize(text)
word_counter = Counter(words)
word_count = word_counter[word]
return word_count
file_dict_list = []
for filename in dirs:
file_dict = {}
job_word_count = count_words('job', filename) + count_words('jobs', filename)
file_dict['year'] = int(filename[-8:-4])
file_dict['job_word_count'] = job_word_count
file_dict_list.append(file_dict)
df = pd.DataFrame(file_dict_list)
df
df.set_index('year', inplace=True)
df
years = df.index
job_word_count = df['job_word_count'].values
plt.bar(years, job_word_count)
plt.show() | code |
2010673/cell_8 | [
"text_plain_output_1.png"
] | from collections import Counter
import nltk
import os
path = '../input/state-of-the-union-corpus-1989-2017'
dirs = os.listdir(path)
def count_words(word, filename):
file = open(path + '/' + filename, encoding='utf8')
text = file.read().lower()
words = nltk.word_tokenize(text)
word_counter = Counter(words)
word_count = word_counter[word]
return word_count
file_dict_list = []
for filename in dirs:
file_dict = {}
job_word_count = count_words('job', filename) + count_words('jobs', filename)
file_dict['year'] = int(filename[-8:-4])
file_dict['job_word_count'] = job_word_count
file_dict_list.append(file_dict)
print(file_dict_list) | code |
2010673/cell_15 | [
"image_output_1.png"
] | from collections import Counter
import matplotlib.pyplot as plt
import nltk
import os
import pandas as pd
path = '../input/state-of-the-union-corpus-1989-2017'
dirs = os.listdir(path)
def count_words(word, filename):
file = open(path + '/' + filename, encoding='utf8')
text = file.read().lower()
words = nltk.word_tokenize(text)
word_counter = Counter(words)
word_count = word_counter[word]
return word_count
file_dict_list = []
for filename in dirs:
file_dict = {}
job_word_count = count_words('job', filename) + count_words('jobs', filename)
file_dict['year'] = int(filename[-8:-4])
file_dict['job_word_count'] = job_word_count
file_dict_list.append(file_dict)
df = pd.DataFrame(file_dict_list)
df
df.set_index('year', inplace=True)
df
years = df.index
job_word_count = df['job_word_count'].values
jobless_rate = pd.read_csv('../input/usa-unemployment-rate-from-1989-to-2017/unemployment_rate.csv', sep=',')
jobless_rate.set_index('Year', inplace=True)
jobless_rate['Annual'] = jobless_rate.mean(axis=1)
jobless_rate
years = jobless_rate.index
joblessness = jobless_rate['Annual'].values
final_df = pd.merge(jobless_rate, df, left_index=True, right_index=True)
final_df
fig, ax1 = plt.subplots(figsize=(8, 5))
final_df['job_word_count'].plot(kind='bar', stacked=False, ax=ax1, label='No. of times "job" is mentioned')
ax2 = ax1.twinx()
ax2.plot(ax1.get_xticks(), final_df['Annual'].values, linestyle='-', marker='o', color='k', linewidth=1.0, label='Unemployment Rate (%)')
lines, labels = ax1.get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax1.legend(lines + lines2, labels + labels2, loc='best')
ax1.set_title('How State of the Union Addresses Reflect the Reality', fontweight='bold', size=15)
ax1.set_ylabel('"Job" Word Count', fontsize=12)
ax1.set_xlabel('Year', fontsize=12)
ax2.set_ylabel('Unemployment Rate (%)', fontsize=12)
plt.show() | code |
2010673/cell_16 | [
"text_html_output_1.png"
] | from collections import Counter
import nltk
import os
import string
path = '../input/state-of-the-union-corpus-1989-2017'
dirs = os.listdir(path)
def count_words(word, filename):
file = open(path + '/' + filename, encoding='utf8')
text = file.read().lower()
words = nltk.word_tokenize(text)
word_counter = Counter(words)
word_count = word_counter[word]
return word_count
bush_2003_filename = 'Bush_2003.txt'
bush_2003_file = open(path + '/' + 'Bush_2003.txt', encoding='utf8')
bush_2003_text = bush_2003_file.read().lower()
bush_2003_words = nltk.word_tokenize(bush_2003_text)
useless_words = nltk.corpus.stopwords.words('english') + list(string.punctuation) + ['will', 'americans', 'america', 'american', '—', "'s"]
bush_2003_words_filtered = [word for word in bush_2003_words if word not in useless_words]
bush_2003_word_counter = Counter(bush_2003_words_filtered)
most_common_words_2003 = bush_2003_word_counter.most_common()
most_common_words_2003 | code |
2010673/cell_17 | [
"image_output_1.png"
] | from collections import Counter
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
import nltk
import os
import pandas as pd
import string
path = '../input/state-of-the-union-corpus-1989-2017'
dirs = os.listdir(path)
def count_words(word, filename):
file = open(path + '/' + filename, encoding='utf8')
text = file.read().lower()
words = nltk.word_tokenize(text)
word_counter = Counter(words)
word_count = word_counter[word]
return word_count
file_dict_list = []
for filename in dirs:
file_dict = {}
job_word_count = count_words('job', filename) + count_words('jobs', filename)
file_dict['year'] = int(filename[-8:-4])
file_dict['job_word_count'] = job_word_count
file_dict_list.append(file_dict)
df = pd.DataFrame(file_dict_list)
df
df.set_index('year', inplace=True)
df
years = df.index
job_word_count = df['job_word_count'].values
jobless_rate = pd.read_csv('../input/usa-unemployment-rate-from-1989-to-2017/unemployment_rate.csv', sep=',')
jobless_rate.set_index('Year', inplace=True)
jobless_rate['Annual'] = jobless_rate.mean(axis=1)
jobless_rate
years = jobless_rate.index
joblessness = jobless_rate['Annual'].values
final_df = pd.merge(jobless_rate, df, left_index=True, right_index=True)
final_df
#Plotting the two trends in the same plot.
fig, ax1 = plt.subplots(figsize=(8,5))
final_df['job_word_count'].plot(kind='bar', stacked=False, ax=ax1, label='No. of times "job" is mentioned')
ax2 = ax1.twinx()
ax2.plot(ax1.get_xticks(), final_df['Annual'].values, linestyle='-', marker='o', color='k', linewidth=1.0, label='Unemployment Rate (%)')
lines, labels = ax1.get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax1.legend(lines + lines2, labels + labels2, loc='best')
ax1.set_title('How State of the Union Addresses Reflect the Reality',fontweight="bold", size=15)
ax1.set_ylabel('"Job" Word Count', fontsize=12)
ax1.set_xlabel("Year", fontsize=12)
ax2.set_ylabel("Unemployment Rate (%)", fontsize=12)
plt.show()
bush_2003_filename = 'Bush_2003.txt'
bush_2003_file = open(path + '/' + 'Bush_2003.txt', encoding='utf8')
bush_2003_text = bush_2003_file.read().lower()
bush_2003_words = nltk.word_tokenize(bush_2003_text)
useless_words = nltk.corpus.stopwords.words('english') + list(string.punctuation) + ['will', 'americans', 'america', 'american', '—', "'s"]
bush_2003_words_filtered = [word for word in bush_2003_words if word not in useless_words]
bush_2003_word_counter = Counter(bush_2003_words_filtered)
most_common_words_2003 = bush_2003_word_counter.most_common()
most_common_words_2003
useless_word_set = set(useless_words)
word_cloud = WordCloud(background_color='white', stopwords=useless_word_set)
word_cloud.generate(bush_2003_text)
plt.figure(figsize=(20, 10))
plt.imshow(word_cloud, interpolation='bilinear')
plt.axis('off')
plt.show() | code |
2010673/cell_14 | [
"text_html_output_1.png"
] | from collections import Counter
import matplotlib.pyplot as plt
import nltk
import os
import pandas as pd
path = '../input/state-of-the-union-corpus-1989-2017'
dirs = os.listdir(path)
def count_words(word, filename):
file = open(path + '/' + filename, encoding='utf8')
text = file.read().lower()
words = nltk.word_tokenize(text)
word_counter = Counter(words)
word_count = word_counter[word]
return word_count
file_dict_list = []
for filename in dirs:
file_dict = {}
job_word_count = count_words('job', filename) + count_words('jobs', filename)
file_dict['year'] = int(filename[-8:-4])
file_dict['job_word_count'] = job_word_count
file_dict_list.append(file_dict)
df = pd.DataFrame(file_dict_list)
df
df.set_index('year', inplace=True)
df
years = df.index
job_word_count = df['job_word_count'].values
jobless_rate = pd.read_csv('../input/usa-unemployment-rate-from-1989-to-2017/unemployment_rate.csv', sep=',')
jobless_rate.set_index('Year', inplace=True)
jobless_rate['Annual'] = jobless_rate.mean(axis=1)
jobless_rate
years = jobless_rate.index
joblessness = jobless_rate['Annual'].values
final_df = pd.merge(jobless_rate, df, left_index=True, right_index=True)
final_df | code |
2010673/cell_10 | [
"text_plain_output_1.png"
] | from collections import Counter
import nltk
import os
import pandas as pd
path = '../input/state-of-the-union-corpus-1989-2017'
dirs = os.listdir(path)
def count_words(word, filename):
file = open(path + '/' + filename, encoding='utf8')
text = file.read().lower()
words = nltk.word_tokenize(text)
word_counter = Counter(words)
word_count = word_counter[word]
return word_count
file_dict_list = []
for filename in dirs:
file_dict = {}
job_word_count = count_words('job', filename) + count_words('jobs', filename)
file_dict['year'] = int(filename[-8:-4])
file_dict['job_word_count'] = job_word_count
file_dict_list.append(file_dict)
df = pd.DataFrame(file_dict_list)
df
df.set_index('year', inplace=True)
df | code |
2010673/cell_12 | [
"text_plain_output_1.png"
] | from collections import Counter
import nltk
import os
import pandas as pd
path = '../input/state-of-the-union-corpus-1989-2017'
dirs = os.listdir(path)
def count_words(word, filename):
file = open(path + '/' + filename, encoding='utf8')
text = file.read().lower()
words = nltk.word_tokenize(text)
word_counter = Counter(words)
word_count = word_counter[word]
return word_count
file_dict_list = []
for filename in dirs:
file_dict = {}
job_word_count = count_words('job', filename) + count_words('jobs', filename)
file_dict['year'] = int(filename[-8:-4])
file_dict['job_word_count'] = job_word_count
file_dict_list.append(file_dict)
df = pd.DataFrame(file_dict_list)
df
jobless_rate = pd.read_csv('../input/usa-unemployment-rate-from-1989-to-2017/unemployment_rate.csv', sep=',')
jobless_rate.set_index('Year', inplace=True)
jobless_rate['Annual'] = jobless_rate.mean(axis=1)
jobless_rate | code |
2010673/cell_5 | [
"text_html_output_1.png"
] | import os
path = '../input/state-of-the-union-corpus-1989-2017'
dirs = os.listdir(path)
path1 = '../input'
dirs1 = os.listdir(path1)
dirs1 | code |
128033920/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
import os
data = pd.read_csv('/kaggle/input/heart-disease/heart.csv')
data.shape
data.columns
data.nunique()
data.dtypes
data['ca'].value_counts() | code |
128033920/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
import os
data = pd.read_csv('/kaggle/input/heart-disease/heart.csv')
data.shape
data.columns
data.nunique() | code |
128033920/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
import os
data = pd.read_csv('/kaggle/input/heart-disease/heart.csv')
data.head() | code |
128033920/cell_30 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import os
data = pd.read_csv('/kaggle/input/heart-disease/heart.csv')
data.shape
data.columns
data.nunique()
data.dtypes
data.loc[data['ca'] == 4, 'ca'] = np.NaN
data.loc[data['thal'] == 0, 'thal'] = np.NaN
data.isnull().sum()
data.isnull().sum()
data.duplicated().sum() | code |
128033920/cell_33 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import os
data = pd.read_csv('/kaggle/input/heart-disease/heart.csv')
data.shape
data.columns
data.nunique()
data.dtypes
data.loc[data['ca'] == 4, 'ca'] = np.NaN
data.loc[data['thal'] == 0, 'thal'] = np.NaN
data.isnull().sum()
data.isnull().sum()
data.duplicated().sum()
data[data.duplicated()]
data = data.drop_duplicates(keep='first')
data
data.duplicated().sum() | code |
128033920/cell_20 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import os
data = pd.read_csv('/kaggle/input/heart-disease/heart.csv')
data.shape
data.columns
data.nunique()
data.dtypes
data.loc[data['ca'] == 4, 'ca'] = np.NaN
data['thal'].value_counts() | code |
128033920/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import os
data = pd.read_csv('/kaggle/input/heart-disease/heart.csv')
data.shape
data.columns | code |
128033920/cell_39 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
import seaborn as sns
import os
data = pd.read_csv('/kaggle/input/heart-disease/heart.csv')
data.shape
data.columns
data.nunique()
data.dtypes
data.loc[data['ca'] == 4, 'ca'] = np.NaN
data.loc[data['thal'] == 0, 'thal'] = np.NaN
data.isnull().sum()
data.isnull().sum()
data.duplicated().sum()
data[data.duplicated()]
data = data.drop_duplicates(keep='first')
data
data.duplicated().sum()
sns.boxplot(data, x='target', y='oldpeak', color='deeppink') | code |
128033920/cell_26 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
import os
data = pd.read_csv('/kaggle/input/heart-disease/heart.csv')
data.shape
data.columns
data.nunique()
data.dtypes
data.loc[data['ca'] == 4, 'ca'] = np.NaN
data.loc[data['thal'] == 0, 'thal'] = np.NaN
data.isnull().sum() | code |
128033920/cell_41 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import os
data = pd.read_csv('/kaggle/input/heart-disease/heart.csv')
data.shape
data.columns
data.nunique()
data.dtypes
data.loc[data['ca'] == 4, 'ca'] = np.NaN
data.loc[data['thal'] == 0, 'thal'] = np.NaN
data.isnull().sum()
data.isnull().sum()
data.duplicated().sum()
data[data.duplicated()]
data = data.drop_duplicates(keep='first')
data
data.duplicated().sum()
continous_features = ['age', 'trestbps', 'chol', 'thalach', 'oldpeak']
def outliers(data_out, drop=False):
for each_feature in data_out.columns:
feature_data = data_out[each_feature]
Q1 = np.percentile(feature_data, 25.0)
Q3 = np.percentile(feature_data, 75.0)
IQR = Q3 - Q1
outlier_step = IQR * 1.5
outliers = feature_data[~((feature_data >= Q1 - outlier_step) & (feature_data <= Q3 + outlier_step))].index.tolist()
if not drop:
print('For the feature {}, No of Outliers is {}'.format(each_feature, len(outliers)))
if drop:
data.drop(outliers, inplace=True, errors='ignore')
print('Outliers from {} feature removed'.format(each_feature))
outliers(data[continous_features]) | code |
128033920/cell_19 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import os
data = pd.read_csv('/kaggle/input/heart-disease/heart.csv')
data.shape
data.columns
data.nunique()
data.dtypes
data.loc[data['ca'] == 4, 'ca'] = np.NaN
data['thal'].nunique() | code |
128033920/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
import os
data = pd.read_csv('/kaggle/input/heart-disease/heart.csv')
data.shape
data.columns
data.info() | code |
128033920/cell_32 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import os
data = pd.read_csv('/kaggle/input/heart-disease/heart.csv')
data.shape
data.columns
data.nunique()
data.dtypes
data.loc[data['ca'] == 4, 'ca'] = np.NaN
data.loc[data['thal'] == 0, 'thal'] = np.NaN
data.isnull().sum()
data.isnull().sum()
data.duplicated().sum()
data[data.duplicated()]
data = data.drop_duplicates(keep='first')
data | code |
128033920/cell_28 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import os
data = pd.read_csv('/kaggle/input/heart-disease/heart.csv')
data.shape
data.columns
data.nunique()
data.dtypes
data.loc[data['ca'] == 4, 'ca'] = np.NaN
data.loc[data['thal'] == 0, 'thal'] = np.NaN
data.isnull().sum()
data.isnull().sum() | code |
128033920/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd
import os
data = pd.read_csv('/kaggle/input/heart-disease/heart.csv')
data.shape
data.columns
data.nunique()
data.dtypes
data[data['ca'] == 4] | code |
128033920/cell_38 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import seaborn as sns
import os
data = pd.read_csv('/kaggle/input/heart-disease/heart.csv')
data.shape
data.columns
data.nunique()
data.dtypes
data.loc[data['ca'] == 4, 'ca'] = np.NaN
data.loc[data['thal'] == 0, 'thal'] = np.NaN
data.isnull().sum()
data.isnull().sum()
data.duplicated().sum()
data[data.duplicated()]
data = data.drop_duplicates(keep='first')
data
data.duplicated().sum()
sns.boxplot(data, x='target', y='chol', color='deeppink') | code |
128033920/cell_3 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import math
import random
import seaborn as sns
import matplotlib.pyplot as plt | code |
128033920/cell_17 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import os
data = pd.read_csv('/kaggle/input/heart-disease/heart.csv')
data.shape
data.columns
data.nunique()
data.dtypes
data.loc[data['ca'] == 4, 'ca'] = np.NaN
data['ca'].value_counts() | code |
128033920/cell_31 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
import os
data = pd.read_csv('/kaggle/input/heart-disease/heart.csv')
data.shape
data.columns
data.nunique()
data.dtypes
data.loc[data['ca'] == 4, 'ca'] = np.NaN
data.loc[data['thal'] == 0, 'thal'] = np.NaN
data.isnull().sum()
data.isnull().sum()
data.duplicated().sum()
data[data.duplicated()] | code |
128033920/cell_24 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import os
data = pd.read_csv('/kaggle/input/heart-disease/heart.csv')
data.shape
data.columns
data.nunique()
data.dtypes
data.loc[data['ca'] == 4, 'ca'] = np.NaN
data.loc[data['thal'] == 0, 'thal'] = np.NaN
data['thal'].value_counts() | code |
128033920/cell_22 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import os
data = pd.read_csv('/kaggle/input/heart-disease/heart.csv')
data.shape
data.columns
data.nunique()
data.dtypes
data.loc[data['ca'] == 4, 'ca'] = np.NaN
data[data['thal'] == 0] | code |
128033920/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import os
data = pd.read_csv('/kaggle/input/heart-disease/heart.csv')
data.shape
data.columns
data.nunique()
data.dtypes | code |
128033920/cell_37 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import os
data = pd.read_csv('/kaggle/input/heart-disease/heart.csv')
data.shape
data.columns
data.nunique()
data.dtypes
data.loc[data['ca'] == 4, 'ca'] = np.NaN
data.loc[data['thal'] == 0, 'thal'] = np.NaN
data.isnull().sum()
data.isnull().sum()
data.duplicated().sum()
data[data.duplicated()]
data = data.drop_duplicates(keep='first')
data
data.duplicated().sum()
data.plot(kind='box', subplots=True, layout=(2, 7), sharex=False, sharey=False, figsize=(20, 10), color='deeppink') | code |
128033920/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
import os
data = pd.read_csv('/kaggle/input/heart-disease/heart.csv')
data.shape
data.columns
data.nunique()
data.dtypes
data['ca'].nunique() | code |
128033920/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import os
data = pd.read_csv('/kaggle/input/heart-disease/heart.csv')
data.shape | code |
34149133/cell_21 | [
"image_output_1.png"
] | from matplotlib.pylab import rcParams
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/playoff_shots.csv')
def shooting_summary(df, by):
df_summary = df.copy()
df_3p = df_summary[df_summary['shot_type'] == '3PT']
df_2p = df_summary[df_summary['shot_type'] == '2PT']
df_ft = df_summary[df_summary['shot_type'] == 'FT']
result = pd.DataFrame()
all_make = df_summary[df_summary['result'] == 'make'].groupby(by).count()['game_id']
all_miss = df_summary[df_summary['result'] == 'miss'].groupby(by).count()['game_id']
result['Shot Attempts'] = all_make + all_miss
result['Shot %'] = all_make / result['Shot Attempts'] * 100
make_3p = df_3p[df_3p['result'] == 'make'].groupby(by).count()['game_id']
miss_3p = df_3p[df_3p['result'] == 'miss'].groupby(by).count()['game_id']
result['3p Attempts'] = make_3p + miss_3p
result['3p %'] = make_3p / result['3p Attempts'] * 100
make_2p = df_2p[df_2p['result'] == 'make'].groupby(by).count()['game_id']
miss_2p = df_2p[df_2p['result'] == 'miss'].groupby(by).count()['game_id']
result['2p Attempts'] = make_2p + miss_2p
result['2p %'] = make_2p / result['2p Attempts'] * 100
make_ft = df_ft[df_ft['result'] == 'make'].groupby(by).count()['game_id']
miss_ft = df_ft[df_ft['result'] == 'miss'].groupby(by).count()['game_id']
result['FT Attempts'] = make_ft + miss_ft
result['FT %'] = make_ft / result['FT Attempts'] * 100
result['TS Proportion'] = result['Shot Attempts'] / np.sum(result['Shot Attempts']) * 100
return result
### Shooting summary of GSW
rcParams['figure.figsize'] = [15,5]
plt.style.use('fivethirtyeight')
sns.set_style('whitegrid')
grid = gridspec.GridSpec(1,2)
# Plot prep
gsw = df[df['team']=='Golden State Warriors']
df_plot = shooting_summary(gsw, 'player_name').fillna(0).sort_values('Shot Attempts', ascending=False).head(5)
# Plot shooting percentage
ax1 = plt.subplot(grid[0])
sns.heatmap(df_plot[['Shot %', '3p %', '2p %', 'FT %', 'TS Proportion']], annot=True, ax=ax1, cmap='Pastel1_r', linewidths=2)
ax1.set_title('GSW Shooting Percentage', fontname='Monospace', fontsize='20', fontweight='bold')
# Plot shooting attempts
ax2 = plt.subplot(grid[1])
sns.heatmap(df_plot[['3p Attempts', '2p Attempts', 'FT Attempts']], annot=True, fmt='.2f', cmap='Pastel1_r', ax=ax2, linewidths=2)
ax2.set_title('GSW Shooting Attempts', fontname='Monospace', fontsize='20', fontweight='bold')
plt.tight_layout()
rcParams['figure.figsize'] = [10, 5]
plt.style.use('fivethirtyeight')
sns.set_style('whitegrid')
kevin_durant_prop = 26.2757
result = pd.DataFrame()
for team in df['team'].unique():
df_team = df[df['team'] == team]
summary = shooting_summary(df_team, 'player_name')
bool_team_shot = summary['TS Proportion'] >= kevin_durant_prop
result = pd.concat([result, summary[bool_team_shot]])
cmap = sns.color_palette('Pastel1_r')
result['TS Proportion'].sort_values().plot(kind='barh', color=cmap)
list_x = list(np.round(result['TS Proportion'].sort_values(), 2))
list_y = list(np.arange(-0.1, 5.9, 1))
for x, y in zip(list_x, list_y):
plt.text(x + 0.5, y, str(x), fontweight='bold', fontsize=14, fontname='Monospace')
plt.text(np.max(list_x) + 0.5, 0.4, 'Threshold', fontname='Monospace', fontsize=14)
plt.xlim(0, np.max(list_x) * 1.2)
def highlight_max(s):
"""
highlight the maximum
"""
is_max = s == s.max()
return ['background-color:#f85a40' if v else '' for v in is_max]
result[result.index.isin(['LeBron James', 'Kevin Durant'])].T.style.apply(highlight_max, axis=1)
df_final = df[df['game_description'].str.contains('CLE v GS')]
result = player_shooting_summary(df_final)
result[result.index.isin(['LeBron James', 'Kevin Durant'])].T.style.apply(highlight_max, axis=1)
rcParams['figure.figsize'] = [10, 5]
plt.style.use('fivethirtyeight')
sns.set_style('whitegrid')
list_var = ['Shot %', '3p %', '2p %', 'FT %']
team = 'Golden State Warriors'
result = pd.DataFrame()
first_round_desc = list(df['game_description'].unique())[:44]
first_round_desc.append('CLE v IND, G7')
first_round = df[df['game_description'].isin(first_round_desc)]
summary_first_round = shooting_summary(first_round, 'team')[list_var]
for var in list_var:
summary_first_round[var] = summary_first_round[var].rank(ascending=False)
team_summary = summary_first_round[summary_first_round.index == team]
team_summary.index = ['First Round (16)']
result = pd.concat([result, team_summary])
conf_semi_desc = list(df['game_description'].unique())[44:64]
conf_semi_desc.remove('CLE v IND, G7')
conf_semi = df[df['game_description'].isin(conf_semi_desc)]
summary_conf_semi = shooting_summary(conf_semi, 'team')[list_var]
for var in list_var:
summary_conf_semi[var] = summary_conf_semi[var].rank(ascending=False, method='min')
team_summary = summary_conf_semi[summary_conf_semi.index == team]
team_summary.index = ['Conf. Semifinals (8)']
result = pd.concat([result, team_summary])
conf_finals_desc = list(df['game_description'].unique())[64:78]
conf_finals = df[df['game_description'].isin(conf_finals_desc)]
summary_conf_finals = shooting_summary(conf_finals, 'team')[list_var]
for var in list_var:
summary_conf_finals[var] = summary_conf_finals[var].rank(ascending=False)
team_summary = summary_conf_finals[summary_conf_finals.index == team]
team_summary.index = ['Conf. Finals (4)']
result = pd.concat([result, team_summary])
finals_desc = list(df['game_description'].unique())[78:]
finals = df[df['game_description'].isin(finals_desc)]
summary_finals = shooting_summary(finals, 'team')[list_var]
for var in list_var:
summary_finals[var] = summary_finals[var].rank(ascending=False)
team_summary = summary_finals[summary_finals.index == team]
team_summary.index = ['Finals (2)']
result = pd.concat([result, team_summary])
first_round_desc = list(df['game_description'].unique())[:44]
first_round_desc.append('CLE v IND, G7')
first_round = df[df['game_description'].isin(first_round_desc)]
shooting_summary(first_round, 'team').sort_values('2p %', ascending=False).style.apply(highlight_max) | code |
34149133/cell_13 | [
"text_plain_output_1.png"
] | from matplotlib.pylab import rcParams
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/playoff_shots.csv')
def shooting_summary(df, by):
df_summary = df.copy()
df_3p = df_summary[df_summary['shot_type'] == '3PT']
df_2p = df_summary[df_summary['shot_type'] == '2PT']
df_ft = df_summary[df_summary['shot_type'] == 'FT']
result = pd.DataFrame()
all_make = df_summary[df_summary['result'] == 'make'].groupby(by).count()['game_id']
all_miss = df_summary[df_summary['result'] == 'miss'].groupby(by).count()['game_id']
result['Shot Attempts'] = all_make + all_miss
result['Shot %'] = all_make / result['Shot Attempts'] * 100
make_3p = df_3p[df_3p['result'] == 'make'].groupby(by).count()['game_id']
miss_3p = df_3p[df_3p['result'] == 'miss'].groupby(by).count()['game_id']
result['3p Attempts'] = make_3p + miss_3p
result['3p %'] = make_3p / result['3p Attempts'] * 100
make_2p = df_2p[df_2p['result'] == 'make'].groupby(by).count()['game_id']
miss_2p = df_2p[df_2p['result'] == 'miss'].groupby(by).count()['game_id']
result['2p Attempts'] = make_2p + miss_2p
result['2p %'] = make_2p / result['2p Attempts'] * 100
make_ft = df_ft[df_ft['result'] == 'make'].groupby(by).count()['game_id']
miss_ft = df_ft[df_ft['result'] == 'miss'].groupby(by).count()['game_id']
result['FT Attempts'] = make_ft + miss_ft
result['FT %'] = make_ft / result['FT Attempts'] * 100
result['TS Proportion'] = result['Shot Attempts'] / np.sum(result['Shot Attempts']) * 100
return result
### Shooting summary of GSW
rcParams['figure.figsize'] = [15,5]
plt.style.use('fivethirtyeight')
sns.set_style('whitegrid')
grid = gridspec.GridSpec(1,2)
# Plot prep
gsw = df[df['team']=='Golden State Warriors']
df_plot = shooting_summary(gsw, 'player_name').fillna(0).sort_values('Shot Attempts', ascending=False).head(5)
# Plot shooting percentage
ax1 = plt.subplot(grid[0])
sns.heatmap(df_plot[['Shot %', '3p %', '2p %', 'FT %', 'TS Proportion']], annot=True, ax=ax1, cmap='Pastel1_r', linewidths=2)
ax1.set_title('GSW Shooting Percentage', fontname='Monospace', fontsize='20', fontweight='bold')
# Plot shooting attempts
ax2 = plt.subplot(grid[1])
sns.heatmap(df_plot[['3p Attempts', '2p Attempts', 'FT Attempts']], annot=True, fmt='.2f', cmap='Pastel1_r', ax=ax2, linewidths=2)
ax2.set_title('GSW Shooting Attempts', fontname='Monospace', fontsize='20', fontweight='bold')
plt.tight_layout()
rcParams['figure.figsize'] = [10, 5]
plt.style.use('fivethirtyeight')
sns.set_style('whitegrid')
kevin_durant_prop = 26.2757
result = pd.DataFrame()
for team in df['team'].unique():
df_team = df[df['team'] == team]
summary = shooting_summary(df_team, 'player_name')
bool_team_shot = summary['TS Proportion'] >= kevin_durant_prop
result = pd.concat([result, summary[bool_team_shot]])
cmap = sns.color_palette('Pastel1_r')
result['TS Proportion'].sort_values().plot(kind='barh', color=cmap)
list_x = list(np.round(result['TS Proportion'].sort_values(), 2))
list_y = list(np.arange(-0.1, 5.9, 1))
for x, y in zip(list_x, list_y):
plt.text(x + 0.5, y, str(x), fontweight='bold', fontsize=14, fontname='Monospace')
plt.text(np.max(list_x) + 0.5, 0.4, 'Threshold', fontname='Monospace', fontsize=14)
plt.xlim(0, np.max(list_x) * 1.2)
def highlight_max(s):
"""
highlight the maximum
"""
is_max = s == s.max()
return ['background-color:#f85a40' if v else '' for v in is_max]
result[result.index.isin(['LeBron James', 'Kevin Durant'])].T.style.apply(highlight_max, axis=1) | code |
34149133/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from matplotlib.pylab import rcParams
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/playoff_shots.csv')
def shooting_summary(df, by):
df_summary = df.copy()
df_3p = df_summary[df_summary['shot_type'] == '3PT']
df_2p = df_summary[df_summary['shot_type'] == '2PT']
df_ft = df_summary[df_summary['shot_type'] == 'FT']
result = pd.DataFrame()
all_make = df_summary[df_summary['result'] == 'make'].groupby(by).count()['game_id']
all_miss = df_summary[df_summary['result'] == 'miss'].groupby(by).count()['game_id']
result['Shot Attempts'] = all_make + all_miss
result['Shot %'] = all_make / result['Shot Attempts'] * 100
make_3p = df_3p[df_3p['result'] == 'make'].groupby(by).count()['game_id']
miss_3p = df_3p[df_3p['result'] == 'miss'].groupby(by).count()['game_id']
result['3p Attempts'] = make_3p + miss_3p
result['3p %'] = make_3p / result['3p Attempts'] * 100
make_2p = df_2p[df_2p['result'] == 'make'].groupby(by).count()['game_id']
miss_2p = df_2p[df_2p['result'] == 'miss'].groupby(by).count()['game_id']
result['2p Attempts'] = make_2p + miss_2p
result['2p %'] = make_2p / result['2p Attempts'] * 100
make_ft = df_ft[df_ft['result'] == 'make'].groupby(by).count()['game_id']
miss_ft = df_ft[df_ft['result'] == 'miss'].groupby(by).count()['game_id']
result['FT Attempts'] = make_ft + miss_ft
result['FT %'] = make_ft / result['FT Attempts'] * 100
result['TS Proportion'] = result['Shot Attempts'] / np.sum(result['Shot Attempts']) * 100
return result
rcParams['figure.figsize'] = [15, 5]
plt.style.use('fivethirtyeight')
sns.set_style('whitegrid')
grid = gridspec.GridSpec(1, 2)
gsw = df[df['team'] == 'Golden State Warriors']
df_plot = shooting_summary(gsw, 'player_name').fillna(0).sort_values('Shot Attempts', ascending=False).head(5)
ax1 = plt.subplot(grid[0])
sns.heatmap(df_plot[['Shot %', '3p %', '2p %', 'FT %', 'TS Proportion']], annot=True, ax=ax1, cmap='Pastel1_r', linewidths=2)
ax1.set_title('GSW Shooting Percentage', fontname='Monospace', fontsize='20', fontweight='bold')
ax2 = plt.subplot(grid[1])
sns.heatmap(df_plot[['3p Attempts', '2p Attempts', 'FT Attempts']], annot=True, fmt='.2f', cmap='Pastel1_r', ax=ax2, linewidths=2)
ax2.set_title('GSW Shooting Attempts', fontname='Monospace', fontsize='20', fontweight='bold')
plt.tight_layout() | code |
34149133/cell_25 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.read_csv('/kaggle/input/playoff_shots.csv')
def shooting_summary(df, by):
df_summary = df.copy()
df_3p = df_summary[df_summary['shot_type'] == '3PT']
df_2p = df_summary[df_summary['shot_type'] == '2PT']
df_ft = df_summary[df_summary['shot_type'] == 'FT']
result = pd.DataFrame()
all_make = df_summary[df_summary['result'] == 'make'].groupby(by).count()['game_id']
all_miss = df_summary[df_summary['result'] == 'miss'].groupby(by).count()['game_id']
result['Shot Attempts'] = all_make + all_miss
result['Shot %'] = all_make / result['Shot Attempts'] * 100
make_3p = df_3p[df_3p['result'] == 'make'].groupby(by).count()['game_id']
miss_3p = df_3p[df_3p['result'] == 'miss'].groupby(by).count()['game_id']
result['3p Attempts'] = make_3p + miss_3p
result['3p %'] = make_3p / result['3p Attempts'] * 100
make_2p = df_2p[df_2p['result'] == 'make'].groupby(by).count()['game_id']
miss_2p = df_2p[df_2p['result'] == 'miss'].groupby(by).count()['game_id']
result['2p Attempts'] = make_2p + miss_2p
result['2p %'] = make_2p / result['2p Attempts'] * 100
make_ft = df_ft[df_ft['result'] == 'make'].groupby(by).count()['game_id']
miss_ft = df_ft[df_ft['result'] == 'miss'].groupby(by).count()['game_id']
result['FT Attempts'] = make_ft + miss_ft
result['FT %'] = make_ft / result['FT Attempts'] * 100
result['TS Proportion'] = result['Shot Attempts'] / np.sum(result['Shot Attempts']) * 100
return result
sort_order = ['player_id', 'shot_type', 'game_id', 'period', 'period_time']
ascending = [True, True, True, True, False]
df_ordered = df.sort_values(sort_order, ascending=ascending).reset_index(drop=True) | code |
34149133/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/playoff_shots.csv')
df.head(10) | code |
34149133/cell_20 | [
"text_html_output_1.png"
] | a | code |
34149133/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/playoff_shots.csv')
dict_player = {}
for team in df['team'].unique():
dict_player[team] = df[df['team'] == team]['player_name'].unique()
display(dict_player['Golden State Warriors']) | code |
34149133/cell_11 | [
"text_html_output_1.png"
] | from matplotlib.pylab import rcParams
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/playoff_shots.csv')
def shooting_summary(df, by):
df_summary = df.copy()
df_3p = df_summary[df_summary['shot_type'] == '3PT']
df_2p = df_summary[df_summary['shot_type'] == '2PT']
df_ft = df_summary[df_summary['shot_type'] == 'FT']
result = pd.DataFrame()
all_make = df_summary[df_summary['result'] == 'make'].groupby(by).count()['game_id']
all_miss = df_summary[df_summary['result'] == 'miss'].groupby(by).count()['game_id']
result['Shot Attempts'] = all_make + all_miss
result['Shot %'] = all_make / result['Shot Attempts'] * 100
make_3p = df_3p[df_3p['result'] == 'make'].groupby(by).count()['game_id']
miss_3p = df_3p[df_3p['result'] == 'miss'].groupby(by).count()['game_id']
result['3p Attempts'] = make_3p + miss_3p
result['3p %'] = make_3p / result['3p Attempts'] * 100
make_2p = df_2p[df_2p['result'] == 'make'].groupby(by).count()['game_id']
miss_2p = df_2p[df_2p['result'] == 'miss'].groupby(by).count()['game_id']
result['2p Attempts'] = make_2p + miss_2p
result['2p %'] = make_2p / result['2p Attempts'] * 100
make_ft = df_ft[df_ft['result'] == 'make'].groupby(by).count()['game_id']
miss_ft = df_ft[df_ft['result'] == 'miss'].groupby(by).count()['game_id']
result['FT Attempts'] = make_ft + miss_ft
result['FT %'] = make_ft / result['FT Attempts'] * 100
result['TS Proportion'] = result['Shot Attempts'] / np.sum(result['Shot Attempts']) * 100
return result
### Shooting summary of GSW
rcParams['figure.figsize'] = [15,5]
plt.style.use('fivethirtyeight')
sns.set_style('whitegrid')
grid = gridspec.GridSpec(1,2)
# Plot prep
gsw = df[df['team']=='Golden State Warriors']
df_plot = shooting_summary(gsw, 'player_name').fillna(0).sort_values('Shot Attempts', ascending=False).head(5)
# Plot shooting percentage
ax1 = plt.subplot(grid[0])
sns.heatmap(df_plot[['Shot %', '3p %', '2p %', 'FT %', 'TS Proportion']], annot=True, ax=ax1, cmap='Pastel1_r', linewidths=2)
ax1.set_title('GSW Shooting Percentage', fontname='Monospace', fontsize='20', fontweight='bold')
# Plot shooting attempts
ax2 = plt.subplot(grid[1])
sns.heatmap(df_plot[['3p Attempts', '2p Attempts', 'FT Attempts']], annot=True, fmt='.2f', cmap='Pastel1_r', ax=ax2, linewidths=2)
ax2.set_title('GSW Shooting Attempts', fontname='Monospace', fontsize='20', fontweight='bold')
plt.tight_layout()
rcParams['figure.figsize'] = [10, 5]
plt.style.use('fivethirtyeight')
sns.set_style('whitegrid')
kevin_durant_prop = 26.2757
result = pd.DataFrame()
for team in df['team'].unique():
df_team = df[df['team'] == team]
summary = shooting_summary(df_team, 'player_name')
bool_team_shot = summary['TS Proportion'] >= kevin_durant_prop
result = pd.concat([result, summary[bool_team_shot]])
cmap = sns.color_palette('Pastel1_r')
result['TS Proportion'].sort_values().plot(kind='barh', color=cmap)
list_x = list(np.round(result['TS Proportion'].sort_values(), 2))
list_y = list(np.arange(-0.1, 5.9, 1))
for x, y in zip(list_x, list_y):
plt.text(x + 0.5, y, str(x), fontweight='bold', fontsize=14, fontname='Monospace')
plt.plot([0, np.max(list_x)], [0.5, 0.5], '--', color='grey')
plt.text(np.max(list_x) + 0.5, 0.4, 'Threshold', fontname='Monospace', fontsize=14)
plt.xlabel('Team Shot Proportion')
plt.title('Top Player based on TS Proportion', fontname='Monospace', fontsize=20, fontweight='bold')
plt.xlim(0, np.max(list_x) * 1.2) | code |
34149133/cell_19 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.read_csv('/kaggle/input/playoff_shots.csv')
def shooting_summary(df, by):
df_summary = df.copy()
df_3p = df_summary[df_summary['shot_type'] == '3PT']
df_2p = df_summary[df_summary['shot_type'] == '2PT']
df_ft = df_summary[df_summary['shot_type'] == 'FT']
result = pd.DataFrame()
all_make = df_summary[df_summary['result'] == 'make'].groupby(by).count()['game_id']
all_miss = df_summary[df_summary['result'] == 'miss'].groupby(by).count()['game_id']
result['Shot Attempts'] = all_make + all_miss
result['Shot %'] = all_make / result['Shot Attempts'] * 100
make_3p = df_3p[df_3p['result'] == 'make'].groupby(by).count()['game_id']
miss_3p = df_3p[df_3p['result'] == 'miss'].groupby(by).count()['game_id']
result['3p Attempts'] = make_3p + miss_3p
result['3p %'] = make_3p / result['3p Attempts'] * 100
make_2p = df_2p[df_2p['result'] == 'make'].groupby(by).count()['game_id']
miss_2p = df_2p[df_2p['result'] == 'miss'].groupby(by).count()['game_id']
result['2p Attempts'] = make_2p + miss_2p
result['2p %'] = make_2p / result['2p Attempts'] * 100
make_ft = df_ft[df_ft['result'] == 'make'].groupby(by).count()['game_id']
miss_ft = df_ft[df_ft['result'] == 'miss'].groupby(by).count()['game_id']
result['FT Attempts'] = make_ft + miss_ft
result['FT %'] = make_ft / result['FT Attempts'] * 100
result['TS Proportion'] = result['Shot Attempts'] / np.sum(result['Shot Attempts']) * 100
return result
list(df['game_description'].unique())[78:] | code |
34149133/cell_1 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas_profiling as pp
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import seaborn as sns
from tqdm import tqdm_notebook as tqdm
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = [10, 5]
plt.style.use('fivethirtyeight')
sns.set_style('whitegrid')
import warnings
warnings.filterwarnings('ignore')
from tqdm import tqdm
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 200)
pd.options.display.float_format = '{:.5f}'.format
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
34149133/cell_15 | [
"image_output_1.png"
] | from matplotlib.pylab import rcParams
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/playoff_shots.csv')
def shooting_summary(df, by):
df_summary = df.copy()
df_3p = df_summary[df_summary['shot_type'] == '3PT']
df_2p = df_summary[df_summary['shot_type'] == '2PT']
df_ft = df_summary[df_summary['shot_type'] == 'FT']
result = pd.DataFrame()
all_make = df_summary[df_summary['result'] == 'make'].groupby(by).count()['game_id']
all_miss = df_summary[df_summary['result'] == 'miss'].groupby(by).count()['game_id']
result['Shot Attempts'] = all_make + all_miss
result['Shot %'] = all_make / result['Shot Attempts'] * 100
make_3p = df_3p[df_3p['result'] == 'make'].groupby(by).count()['game_id']
miss_3p = df_3p[df_3p['result'] == 'miss'].groupby(by).count()['game_id']
result['3p Attempts'] = make_3p + miss_3p
result['3p %'] = make_3p / result['3p Attempts'] * 100
make_2p = df_2p[df_2p['result'] == 'make'].groupby(by).count()['game_id']
miss_2p = df_2p[df_2p['result'] == 'miss'].groupby(by).count()['game_id']
result['2p Attempts'] = make_2p + miss_2p
result['2p %'] = make_2p / result['2p Attempts'] * 100
make_ft = df_ft[df_ft['result'] == 'make'].groupby(by).count()['game_id']
miss_ft = df_ft[df_ft['result'] == 'miss'].groupby(by).count()['game_id']
result['FT Attempts'] = make_ft + miss_ft
result['FT %'] = make_ft / result['FT Attempts'] * 100
result['TS Proportion'] = result['Shot Attempts'] / np.sum(result['Shot Attempts']) * 100
return result
### Shooting summary of GSW
rcParams['figure.figsize'] = [15,5]
plt.style.use('fivethirtyeight')
sns.set_style('whitegrid')
grid = gridspec.GridSpec(1,2)
# Plot prep
gsw = df[df['team']=='Golden State Warriors']
df_plot = shooting_summary(gsw, 'player_name').fillna(0).sort_values('Shot Attempts', ascending=False).head(5)
# Plot shooting percentage
ax1 = plt.subplot(grid[0])
sns.heatmap(df_plot[['Shot %', '3p %', '2p %', 'FT %', 'TS Proportion']], annot=True, ax=ax1, cmap='Pastel1_r', linewidths=2)
ax1.set_title('GSW Shooting Percentage', fontname='Monospace', fontsize='20', fontweight='bold')
# Plot shooting attempts
ax2 = plt.subplot(grid[1])
sns.heatmap(df_plot[['3p Attempts', '2p Attempts', 'FT Attempts']], annot=True, fmt='.2f', cmap='Pastel1_r', ax=ax2, linewidths=2)
ax2.set_title('GSW Shooting Attempts', fontname='Monospace', fontsize='20', fontweight='bold')
plt.tight_layout()
rcParams['figure.figsize'] = [10, 5]
plt.style.use('fivethirtyeight')
sns.set_style('whitegrid')
kevin_durant_prop = 26.2757
result = pd.DataFrame()
for team in df['team'].unique():
df_team = df[df['team'] == team]
summary = shooting_summary(df_team, 'player_name')
bool_team_shot = summary['TS Proportion'] >= kevin_durant_prop
result = pd.concat([result, summary[bool_team_shot]])
cmap = sns.color_palette('Pastel1_r')
result['TS Proportion'].sort_values().plot(kind='barh', color=cmap)
list_x = list(np.round(result['TS Proportion'].sort_values(), 2))
list_y = list(np.arange(-0.1, 5.9, 1))
for x, y in zip(list_x, list_y):
plt.text(x + 0.5, y, str(x), fontweight='bold', fontsize=14, fontname='Monospace')
plt.text(np.max(list_x) + 0.5, 0.4, 'Threshold', fontname='Monospace', fontsize=14)
plt.xlim(0, np.max(list_x) * 1.2)
def highlight_max(s):
"""
highlight the maximum
"""
is_max = s == s.max()
return ['background-color:#f85a40' if v else '' for v in is_max]
result[result.index.isin(['LeBron James', 'Kevin Durant'])].T.style.apply(highlight_max, axis=1)
df_final = df[df['game_description'].str.contains('CLE v GS')]
result = player_shooting_summary(df_final)
result[result.index.isin(['LeBron James', 'Kevin Durant'])].T.style.apply(highlight_max, axis=1) | code |
34149133/cell_17 | [
"image_output_1.png"
] | from matplotlib.pylab import rcParams
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/playoff_shots.csv')
def shooting_summary(df, by):
df_summary = df.copy()
df_3p = df_summary[df_summary['shot_type'] == '3PT']
df_2p = df_summary[df_summary['shot_type'] == '2PT']
df_ft = df_summary[df_summary['shot_type'] == 'FT']
result = pd.DataFrame()
all_make = df_summary[df_summary['result'] == 'make'].groupby(by).count()['game_id']
all_miss = df_summary[df_summary['result'] == 'miss'].groupby(by).count()['game_id']
result['Shot Attempts'] = all_make + all_miss
result['Shot %'] = all_make / result['Shot Attempts'] * 100
make_3p = df_3p[df_3p['result'] == 'make'].groupby(by).count()['game_id']
miss_3p = df_3p[df_3p['result'] == 'miss'].groupby(by).count()['game_id']
result['3p Attempts'] = make_3p + miss_3p
result['3p %'] = make_3p / result['3p Attempts'] * 100
make_2p = df_2p[df_2p['result'] == 'make'].groupby(by).count()['game_id']
miss_2p = df_2p[df_2p['result'] == 'miss'].groupby(by).count()['game_id']
result['2p Attempts'] = make_2p + miss_2p
result['2p %'] = make_2p / result['2p Attempts'] * 100
make_ft = df_ft[df_ft['result'] == 'make'].groupby(by).count()['game_id']
miss_ft = df_ft[df_ft['result'] == 'miss'].groupby(by).count()['game_id']
result['FT Attempts'] = make_ft + miss_ft
result['FT %'] = make_ft / result['FT Attempts'] * 100
result['TS Proportion'] = result['Shot Attempts'] / np.sum(result['Shot Attempts']) * 100
return result
### Shooting summary of GSW
rcParams['figure.figsize'] = [15,5]
plt.style.use('fivethirtyeight')
sns.set_style('whitegrid')
grid = gridspec.GridSpec(1,2)
# Plot prep
gsw = df[df['team']=='Golden State Warriors']
df_plot = shooting_summary(gsw, 'player_name').fillna(0).sort_values('Shot Attempts', ascending=False).head(5)
# Plot shooting percentage
ax1 = plt.subplot(grid[0])
sns.heatmap(df_plot[['Shot %', '3p %', '2p %', 'FT %', 'TS Proportion']], annot=True, ax=ax1, cmap='Pastel1_r', linewidths=2)
ax1.set_title('GSW Shooting Percentage', fontname='Monospace', fontsize='20', fontweight='bold')
# Plot shooting attempts
ax2 = plt.subplot(grid[1])
sns.heatmap(df_plot[['3p Attempts', '2p Attempts', 'FT Attempts']], annot=True, fmt='.2f', cmap='Pastel1_r', ax=ax2, linewidths=2)
ax2.set_title('GSW Shooting Attempts', fontname='Monospace', fontsize='20', fontweight='bold')
plt.tight_layout()
rcParams['figure.figsize'] = [10, 5]
plt.style.use('fivethirtyeight')
sns.set_style('whitegrid')
kevin_durant_prop = 26.2757
result = pd.DataFrame()
for team in df['team'].unique():
df_team = df[df['team'] == team]
summary = shooting_summary(df_team, 'player_name')
bool_team_shot = summary['TS Proportion'] >= kevin_durant_prop
result = pd.concat([result, summary[bool_team_shot]])
cmap = sns.color_palette('Pastel1_r')
result['TS Proportion'].sort_values().plot(kind='barh', color=cmap)
list_x = list(np.round(result['TS Proportion'].sort_values(), 2))
list_y = list(np.arange(-0.1, 5.9, 1))
for x, y in zip(list_x, list_y):
plt.text(x + 0.5, y, str(x), fontweight='bold', fontsize=14, fontname='Monospace')
plt.text(np.max(list_x) + 0.5, 0.4, 'Threshold', fontname='Monospace', fontsize=14)
plt.xlim(0, np.max(list_x) * 1.2)
def highlight_max(s):
"""
highlight the maximum
"""
is_max = s == s.max()
return ['background-color:#f85a40' if v else '' for v in is_max]
result[result.index.isin(['LeBron James', 'Kevin Durant'])].T.style.apply(highlight_max, axis=1)
df_final = df[df['game_description'].str.contains('CLE v GS')]
result = player_shooting_summary(df_final)
result[result.index.isin(['LeBron James', 'Kevin Durant'])].T.style.apply(highlight_max, axis=1)
rcParams['figure.figsize'] = [10, 5]
plt.style.use('fivethirtyeight')
sns.set_style('whitegrid')
list_var = ['Shot %', '3p %', '2p %', 'FT %']
team = 'Golden State Warriors'
result = pd.DataFrame()
first_round_desc = list(df['game_description'].unique())[:44]
first_round_desc.append('CLE v IND, G7')
first_round = df[df['game_description'].isin(first_round_desc)]
summary_first_round = shooting_summary(first_round, 'team')[list_var]
for var in list_var:
summary_first_round[var] = summary_first_round[var].rank(ascending=False)
team_summary = summary_first_round[summary_first_round.index == team]
team_summary.index = ['First Round (16)']
result = pd.concat([result, team_summary])
conf_semi_desc = list(df['game_description'].unique())[44:64]
conf_semi_desc.remove('CLE v IND, G7')
conf_semi = df[df['game_description'].isin(conf_semi_desc)]
summary_conf_semi = shooting_summary(conf_semi, 'team')[list_var]
for var in list_var:
summary_conf_semi[var] = summary_conf_semi[var].rank(ascending=False, method='min')
team_summary = summary_conf_semi[summary_conf_semi.index == team]
team_summary.index = ['Conf. Semifinals (8)']
result = pd.concat([result, team_summary])
conf_finals_desc = list(df['game_description'].unique())[64:78]
conf_finals = df[df['game_description'].isin(conf_finals_desc)]
summary_conf_finals = shooting_summary(conf_finals, 'team')[list_var]
for var in list_var:
summary_conf_finals[var] = summary_conf_finals[var].rank(ascending=False)
team_summary = summary_conf_finals[summary_conf_finals.index == team]
team_summary.index = ['Conf. Finals (4)']
result = pd.concat([result, team_summary])
finals_desc = list(df['game_description'].unique())[78:]
finals = df[df['game_description'].isin(finals_desc)]
summary_finals = shooting_summary(finals, 'team')[list_var]
for var in list_var:
summary_finals[var] = summary_finals[var].rank(ascending=False)
team_summary = summary_finals[summary_finals.index == team]
team_summary.index = ['Finals (2)']
result = pd.concat([result, team_summary])
sns.heatmap(result, annot=True, cmap='Pastel1', linewidths=2)
plt.title('GSW Shooting % Rank', fontname='Monospace', fontsize='20', fontweight='bold') | code |
34149133/cell_24 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/playoff_shots.csv')
dict_player = {}
for team in df['team'].unique():
dict_player[team] = df[df['team'] == team]['player_name'].unique()
display(dict_player['Cleveland Cavaliers']) | code |
106201341/cell_8 | [
"image_output_1.png"
] | import json
import matplotlib.pyplot as plt
import pandas as pd
import requests
import seaborn as sns
token_address = '0xd7efb00d12c2c13131fd319336fdf952525da2af'
base_url = 'https://api.ethplorer.io'
url = base_url + f'/getTokenInfo/{token_address}?apiKey=freekey'
response = requests.get(url)
if response.status_code == 200:
token_info_response = json.loads(response.text)
token_info_response
url = base_url + f'/getTopTokenHolders/{token_address}?apiKey=freekey&limit=100'
response = requests.get(url)
if response.status_code == 200:
token_holders_response = json.loads(response.text)
token_holders_df = pd.DataFrame(token_holders_response['holders'])
n_top_holders_list = ['1', '2', '3', '5', '10', '25', '100']
shares_list = [round(token_holders_df['share'].values[:int(n)].sum(), 2) for n in n_top_holders_list]
plt.figure(figsize=(12, 6))
ax = sns.barplot(x=n_top_holders_list, y=shares_list, alpha=0.8, color=color[1])
ax.bar_label(ax.containers[0])
plt.xlabel('Top N Wallets', fontsize=12)
plt.ylabel('Cumulative percentage of Token share', fontsize=12)
plt.title('Percentage of tokens hodl by top N wallets', fontsize=16)
plt.show() | code |
106201341/cell_3 | [
"image_output_1.png"
] | import json
import time
import requests
import datetime
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
color = sns.color_palette()
from plotly import tools
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
pd.options.mode.chained_assignment = None
pd.options.display.max_columns = 999 | code |
106201341/cell_17 | [
"image_output_1.png"
] | import datetime
import json
import matplotlib.pyplot as plt
import pandas as pd
import requests
import seaborn as sns
import time
token_address = '0xd7efb00d12c2c13131fd319336fdf952525da2af'
base_url = 'https://api.ethplorer.io'
url = base_url + f'/getTokenInfo/{token_address}?apiKey=freekey'
response = requests.get(url)
if response.status_code == 200:
token_info_response = json.loads(response.text)
token_info_response
url = base_url + f"/getTopTokenHolders/{token_address}?apiKey=freekey&limit=100"
response = requests.get(url)
if response.status_code == 200:
token_holders_response = json.loads(response.text)
token_holders_df = pd.DataFrame(token_holders_response["holders"])
n_top_holders_list = ['1', '2', '3', '5', '10', '25', '100']
shares_list = [round(token_holders_df["share"].values[:int(n)].sum(),2) for n in n_top_holders_list]
plt.figure(figsize=(12,6))
ax = sns.barplot(x=n_top_holders_list, y=shares_list, alpha=0.8, color=color[1])
ax.bar_label(ax.containers[0])
# plt.xticks(rotation='vertical')
plt.xlabel('Top N Wallets', fontsize=12)
plt.ylabel('Cumulative percentage of Token share', fontsize=12)
plt.title('Percentage of tokens hodl by top N wallets', fontsize=16)
plt.show()
url = base_url + f"/getTokenPriceHistoryGrouped/{token_address}?apiKey=freekey&period=90"
response = requests.get(url)
if response.status_code == 200:
token_price_history_response = json.loads(response.text)
def get_date_from_id(value):
return datetime.date(value["year"], value["month"], value["day"])
token_price_history_df = pd.DataFrame(token_price_history_response["history"]["countTxs"])
token_price_history_df["date"] = token_price_history_df["_id"].apply(lambda x: get_date_from_id(x))
plt.figure(figsize=(25,6))
ax = sns.barplot(x=token_price_history_df["date"], y=token_price_history_df["cnt"], alpha=0.8, color=color[1])
ax.bar_label(ax.containers[0])
plt.xticks(rotation='vertical')
plt.xlabel('Date', fontsize=12)
plt.ylabel('Number of transactions', fontsize=12)
plt.title('Number of transactions in the last 90 days', fontsize=16)
plt.show()
url = base_url + f"/getTokenHistory/{token_address}?apiKey=freekey&limit=100"
response = requests.get(url)
if response.status_code == 200:
token_transaction_history_response = json.loads(response.text)
token_transaction_history_df = pd.DataFrame(token_transaction_history_response["operations"])
token_transaction_history_df["date"] = token_transaction_history_df["timestamp"].apply(lambda x: datetime.datetime.fromtimestamp(x).date())
cutoff_date = (datetime.datetime.today() - datetime.timedelta(days=5)).date()
# print(cutoff_date)
for _ in range(100):
min_df_date = token_transaction_history_df["date"].min()
offset_time = token_transaction_history_df["timestamp"].values[-1] - 1
# print(min_df_date, offset_time)
if min_df_date < cutoff_date:
break
else:
time.sleep(3) # Honor the free limit
url = base_url + f"/getTokenHistory/{token_address}?apiKey=freekey&limit=100×tamp={offset_time}"
response = requests.get(url)
if response.status_code == 200:
temp_token_transaction_history_response = json.loads(response.text)
temp_token_transaction_history_df = pd.DataFrame(temp_token_transaction_history_response["operations"])
temp_token_transaction_history_df["date"] = temp_token_transaction_history_df["timestamp"].apply(lambda x: datetime.datetime.fromtimestamp(x).date())
token_transaction_history_df = pd.concat([token_transaction_history_df, temp_token_transaction_history_df])
tth_df = token_transaction_history_df[token_transaction_history_df["date"] >= cutoff_date]
tth_df = tth_df[tth_df["date"] < datetime.date.today()].reset_index(drop=True)
tth_df["value"] = tth_df["value"].astype(int)
temp_df = tth_df.groupby("date")["value"].sum().reset_index()
decimal_value = int(tth_df["tokenInfo"].values[0]["decimals"])
temp_df["value"] = temp_df["value"] / (10**decimal_value)
plt.figure(figsize=(12,6))
ax = sns.barplot(x=temp_df["date"], y=temp_df["value"], alpha=0.8, color=color[1])
ax.bar_label(ax.containers[0])
plt.xticks(rotation='vertical')
plt.xlabel('Date', fontsize=12)
plt.ylabel('Number of Tokens Transferred', fontsize=12)
plt.title('Number of Tokens Transferred in the last 5 days', fontsize=16)
plt.show()
date_value_list = []
total_addresses_list = []
for day_value in range(1, 6):
date_value = datetime.date.today() - datetime.timedelta(days=day_value)
temp_df = tth_df[tth_df['date'] == date_value]
from_list = list(temp_df['from'].values)
to_list = list(temp_df['to'].values)
total_addresses = len(set(from_list + to_list))
date_value_list.append(date_value)
total_addresses_list.append(total_addresses)
plt.figure(figsize=(12, 6))
ax = sns.barplot(x=date_value_list[::-1], y=total_addresses_list[::-1], alpha=0.8, color=color[1])
ax.bar_label(ax.containers[0])
plt.xticks(rotation='vertical')
plt.xlabel('Date', fontsize=12)
plt.ylabel('Number of unique addresses involved in transfers', fontsize=12)
plt.title('Number of unique addresses in the last 5 days', fontsize=16)
plt.show() | code |
106201341/cell_14 | [
"text_plain_output_1.png"
] | import datetime
import json
import matplotlib.pyplot as plt
import pandas as pd
import requests
import seaborn as sns
import time
token_address = '0xd7efb00d12c2c13131fd319336fdf952525da2af'
base_url = 'https://api.ethplorer.io'
url = base_url + f'/getTokenInfo/{token_address}?apiKey=freekey'
response = requests.get(url)
if response.status_code == 200:
token_info_response = json.loads(response.text)
token_info_response
url = base_url + f"/getTopTokenHolders/{token_address}?apiKey=freekey&limit=100"
response = requests.get(url)
if response.status_code == 200:
token_holders_response = json.loads(response.text)
token_holders_df = pd.DataFrame(token_holders_response["holders"])
n_top_holders_list = ['1', '2', '3', '5', '10', '25', '100']
shares_list = [round(token_holders_df["share"].values[:int(n)].sum(),2) for n in n_top_holders_list]
plt.figure(figsize=(12,6))
ax = sns.barplot(x=n_top_holders_list, y=shares_list, alpha=0.8, color=color[1])
ax.bar_label(ax.containers[0])
# plt.xticks(rotation='vertical')
plt.xlabel('Top N Wallets', fontsize=12)
plt.ylabel('Cumulative percentage of Token share', fontsize=12)
plt.title('Percentage of tokens hodl by top N wallets', fontsize=16)
plt.show()
url = base_url + f"/getTokenPriceHistoryGrouped/{token_address}?apiKey=freekey&period=90"
response = requests.get(url)
if response.status_code == 200:
token_price_history_response = json.loads(response.text)
def get_date_from_id(value):
return datetime.date(value["year"], value["month"], value["day"])
token_price_history_df = pd.DataFrame(token_price_history_response["history"]["countTxs"])
token_price_history_df["date"] = token_price_history_df["_id"].apply(lambda x: get_date_from_id(x))
plt.figure(figsize=(25,6))
ax = sns.barplot(x=token_price_history_df["date"], y=token_price_history_df["cnt"], alpha=0.8, color=color[1])
ax.bar_label(ax.containers[0])
plt.xticks(rotation='vertical')
plt.xlabel('Date', fontsize=12)
plt.ylabel('Number of transactions', fontsize=12)
plt.title('Number of transactions in the last 90 days', fontsize=16)
plt.show()
url = base_url + f'/getTokenHistory/{token_address}?apiKey=freekey&limit=100'
response = requests.get(url)
if response.status_code == 200:
token_transaction_history_response = json.loads(response.text)
token_transaction_history_df = pd.DataFrame(token_transaction_history_response['operations'])
token_transaction_history_df['date'] = token_transaction_history_df['timestamp'].apply(lambda x: datetime.datetime.fromtimestamp(x).date())
cutoff_date = (datetime.datetime.today() - datetime.timedelta(days=5)).date()
for _ in range(100):
min_df_date = token_transaction_history_df['date'].min()
offset_time = token_transaction_history_df['timestamp'].values[-1] - 1
if min_df_date < cutoff_date:
break
else:
time.sleep(3)
url = base_url + f'/getTokenHistory/{token_address}?apiKey=freekey&limit=100×tamp={offset_time}'
response = requests.get(url)
if response.status_code == 200:
temp_token_transaction_history_response = json.loads(response.text)
temp_token_transaction_history_df = pd.DataFrame(temp_token_transaction_history_response['operations'])
temp_token_transaction_history_df['date'] = temp_token_transaction_history_df['timestamp'].apply(lambda x: datetime.datetime.fromtimestamp(x).date())
token_transaction_history_df = pd.concat([token_transaction_history_df, temp_token_transaction_history_df])
tth_df = token_transaction_history_df[token_transaction_history_df['date'] >= cutoff_date]
tth_df = tth_df[tth_df['date'] < datetime.date.today()].reset_index(drop=True)
tth_df['value'] = tth_df['value'].astype(int)
temp_df = tth_df.groupby('date')['value'].sum().reset_index()
decimal_value = int(tth_df['tokenInfo'].values[0]['decimals'])
temp_df['value'] = temp_df['value'] / 10 ** decimal_value
plt.figure(figsize=(12, 6))
ax = sns.barplot(x=temp_df['date'], y=temp_df['value'], alpha=0.8, color=color[1])
ax.bar_label(ax.containers[0])
plt.xticks(rotation='vertical')
plt.xlabel('Date', fontsize=12)
plt.ylabel('Number of Tokens Transferred', fontsize=12)
plt.title('Number of Tokens Transferred in the last 5 days', fontsize=16)
plt.show() | code |
106201341/cell_5 | [
"image_output_1.png"
] | import json
import requests
token_address = '0xd7efb00d12c2c13131fd319336fdf952525da2af'
base_url = 'https://api.ethplorer.io'
url = base_url + f'/getTokenInfo/{token_address}?apiKey=freekey'
response = requests.get(url)
if response.status_code == 200:
token_info_response = json.loads(response.text)
token_info_response | code |
18100844/cell_21 | [
"text_plain_output_1.png"
] | from IPython.display import Image
import os
!ls ../input/
Image("../input/charts1/chart2.png") | code |
18100844/cell_13 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from warnings import simplefilter
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/classification/data.csv')
data.drop(['id', 'Unnamed: 32'], axis=1, inplace=True)
data.diagnosis = [1 if each == 'M' else 0 for each in data.diagnosis]
y = data.diagnosis.values
x_data = data.drop(['diagnosis'], axis=1)
x = (x_data - np.min(x_data)) / (np.max(x_data) - np.min(x_data))
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=42)
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=8)
knn.fit(x_train, y_train)
prediction = knn.predict(x_test)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=1)
from sklearn.svm import SVC
from warnings import simplefilter
simplefilter(action='ignore', category=FutureWarning)
svm = SVC(random_state=1)
svm.fit(x_train, y_train)
from sklearn.naive_bayes import GaussianNB
nb = GaussianNB()
nb.fit(x_train, y_train)
print('accuracy of naive bayes algorithm: ', svm.score(x_test, y_test)) | code |
18100844/cell_9 | [
"image_output_1.png"
] | from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/classification/data.csv')
data.drop(['id', 'Unnamed: 32'], axis=1, inplace=True)
M = data[data['diagnosis'] == 'M']
B = data[data['diagnosis'] == 'B']
score_list = []
for each in range(1, 15):
knn2 = KNeighborsClassifier(n_neighbors=each)
knn2.fit(x_train, y_train)
current_score = knn2.score(x_test, y_test)
score_list.append(current_score)
knn3 = KNeighborsClassifier(n_neighbors=4)
knn3.fit(x_train, y_train)
current_score = knn3.score(x_test, y_test)
current_score | code |
18100844/cell_34 | [
"text_plain_output_1.png"
] | from sklearn.datasets import load_breast_cancer
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/classification/data.csv')
data.drop(['id', 'Unnamed: 32'], axis=1, inplace=True)
M = data[data['diagnosis'] == 'M']
B = data[data['diagnosis'] == 'B']
data.diagnosis = [1 if each == 'M' else 0 for each in data.diagnosis]
y = data.diagnosis.values
x_data = data.drop(['diagnosis'], axis=1)
x = (x_data - np.min(x_data)) / (np.max(x_data) - np.min(x_data))
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=42)
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=8)
knn.fit(x_train, y_train)
prediction = knn.predict(x_test)
score_list = []
for each in range(1, 15):
knn2 = KNeighborsClassifier(n_neighbors=each)
knn2.fit(x_train, y_train)
current_score = knn2.score(x_test, y_test)
score_list.append(current_score)
pima = pd.read_csv('../input/pima-indians-diabetes-database/diabetes.csv')
pima['Pregnancies'] = pima['Pregnancies'].astype('float')
feature_cols = ['pregnant', 'insulin', 'bmi', 'age', 'glucose', 'bp', 'pedigree']
y = pima.Outcome
X = pima.drop(['Outcome'], axis=1)
clf = DecisionTreeClassifier()
clf = clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
bc = load_breast_cancer()
X = bc.data
y = bc.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
decision_tree = DecisionTreeClassifier()
random_forest = RandomForestClassifier(n_estimators=100)
decision_tree.fit(X_train, y_train)
random_forest.fit(X_train, y_train)
dt_pred = decision_tree.predict(X_test)
rf_pred = random_forest.predict(X_test)
dt_cm = confusion_matrix(y_test, dt_pred)
rf_cm = confusion_matrix(y_test, rf_pred)
data = pd.read_csv('../input/classification/data.csv')
data.drop(['id', 'Unnamed: 32'], axis=1, inplace=True)
data.diagnosis = [1 if each == 'M' else 0 for each in data.diagnosis]
y = data.diagnosis.values
x_data = data.drop(['diagnosis'], axis=1)
import numpy as np
x = (x_data - np.min(x_data)) / (np.max(x_data) - np.min(x_data))
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.15, random_state=42)
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators=100, random_state=1)
rf.fit(x_train, y_train)
y_pred = rf.predict(x_test)
y_true = y_test
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_true, y_pred)
import seaborn as sns
import matplotlib.pyplot as plt
f,ax=plt.subplots(figsize=(5,5))
sns.heatmap(cm,annot=True,linewidth=0.5,linecolor='red',fmt='.0f',ax=ax)
plt.xlabel('y_pred')
plt.ylabel('y_true')
plt.show()
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
x1 = np.random.normal(25, 5, 1000)
y1 = np.random.normal(25, 5, 1000)
x2 = np.random.normal(55, 5, 1000)
y2 = np.random.normal(60, 5, 1000)
x3 = np.random.normal(55, 5, 1000)
y3 = np.random.normal(15, 5, 1000)
x = np.concatenate((x1, x2, x3), axis=0)
y = np.concatenate((y1, y2, y3), axis=0)
dictionary = {'x': x, 'y': y}
data = pd.DataFrame(dictionary)
plt.scatter(x1, y1, color='black')
plt.scatter(x2, y2, color='black')
plt.scatter(x3, y3, color='black')
plt.show() | code |
18100844/cell_30 | [
"text_html_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.metrics import confusion_matrix
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/classification/data.csv')
data.drop(['id', 'Unnamed: 32'], axis=1, inplace=True)
M = data[data['diagnosis'] == 'M']
B = data[data['diagnosis'] == 'B']
score_list = []
for each in range(1, 15):
knn2 = KNeighborsClassifier(n_neighbors=each)
knn2.fit(x_train, y_train)
current_score = knn2.score(x_test, y_test)
score_list.append(current_score)
clf = DecisionTreeClassifier()
clf = clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators=100, random_state=1)
rf.fit(x_train, y_train)
y_pred = rf.predict(x_test)
y_true = y_test
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_true, y_pred)
import seaborn as sns
import matplotlib.pyplot as plt
f, ax = plt.subplots(figsize=(5, 5))
sns.heatmap(cm, annot=True, linewidth=0.5, linecolor='red', fmt='.0f', ax=ax)
plt.xlabel('y_pred')
plt.ylabel('y_true')
plt.show() | code |
18100844/cell_20 | [
"text_plain_output_1.png"
] | clf = DecisionTreeClassifier()
clf = clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print('Accuracy:', metrics.accuracy_score(y_test, y_pred)) | code |
18100844/cell_6 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/classification/data.csv')
data.drop(['id', 'Unnamed: 32'], axis=1, inplace=True)
M = data[data['diagnosis'] == 'M']
B = data[data['diagnosis'] == 'B']
plt.scatter(M.radius_mean, M.texture_mean, color='red', label='bad')
plt.scatter(B.radius_mean, B.texture_mean, color='green', label='Good')
plt.xlabel('radius mean')
plt.ylabel('texture mean')
plt.legend()
plt.show() | code |
18100844/cell_2 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import os
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
print(os.listdir('../input')) | code |
18100844/cell_1 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from IPython.display import Image
import os
!ls ../input/
Image("../input/charts1/MachL.png") | code |
18100844/cell_7 | [
"image_output_1.png"
] | from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/classification/data.csv')
data.drop(['id', 'Unnamed: 32'], axis=1, inplace=True)
data.diagnosis = [1 if each == 'M' else 0 for each in data.diagnosis]
y = data.diagnosis.values
x_data = data.drop(['diagnosis'], axis=1)
x = (x_data - np.min(x_data)) / (np.max(x_data) - np.min(x_data))
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=42)
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=8)
knn.fit(x_train, y_train)
prediction = knn.predict(x_test)
print('{} nn score: {}'.format(8, knn.score(x_test, y_test))) | code |
18100844/cell_28 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators=100, random_state=1)
rf.fit(x_train, y_train)
print('Random Forest Classification score: ', rf.score(x_test, y_test))
y_pred = rf.predict(x_test)
y_true = y_test | code |
18100844/cell_8 | [
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/classification/data.csv')
data.drop(['id', 'Unnamed: 32'], axis=1, inplace=True)
M = data[data['diagnosis'] == 'M']
B = data[data['diagnosis'] == 'B']
score_list = []
for each in range(1, 15):
knn2 = KNeighborsClassifier(n_neighbors=each)
knn2.fit(x_train, y_train)
current_score = knn2.score(x_test, y_test)
score_list.append(current_score)
plt.plot(range(1, 15), score_list)
plt.xlabel('k values')
plt.ylabel('accuracy') | code |
18100844/cell_38 | [
"text_plain_output_1.png"
] | from sklearn.cluster import KMeans
from sklearn.datasets import load_breast_cancer
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/classification/data.csv')
data.drop(['id', 'Unnamed: 32'], axis=1, inplace=True)
M = data[data['diagnosis'] == 'M']
B = data[data['diagnosis'] == 'B']
data.diagnosis = [1 if each == 'M' else 0 for each in data.diagnosis]
y = data.diagnosis.values
x_data = data.drop(['diagnosis'], axis=1)
x = (x_data - np.min(x_data)) / (np.max(x_data) - np.min(x_data))
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=42)
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=8)
knn.fit(x_train, y_train)
prediction = knn.predict(x_test)
score_list = []
for each in range(1, 15):
knn2 = KNeighborsClassifier(n_neighbors=each)
knn2.fit(x_train, y_train)
current_score = knn2.score(x_test, y_test)
score_list.append(current_score)
pima = pd.read_csv('../input/pima-indians-diabetes-database/diabetes.csv')
pima['Pregnancies'] = pima['Pregnancies'].astype('float')
feature_cols = ['pregnant', 'insulin', 'bmi', 'age', 'glucose', 'bp', 'pedigree']
y = pima.Outcome
X = pima.drop(['Outcome'], axis=1)
clf = DecisionTreeClassifier()
clf = clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
bc = load_breast_cancer()
X = bc.data
y = bc.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
decision_tree = DecisionTreeClassifier()
random_forest = RandomForestClassifier(n_estimators=100)
decision_tree.fit(X_train, y_train)
random_forest.fit(X_train, y_train)
dt_pred = decision_tree.predict(X_test)
rf_pred = random_forest.predict(X_test)
dt_cm = confusion_matrix(y_test, dt_pred)
rf_cm = confusion_matrix(y_test, rf_pred)
data = pd.read_csv('../input/classification/data.csv')
data.drop(['id', 'Unnamed: 32'], axis=1, inplace=True)
data.diagnosis = [1 if each == 'M' else 0 for each in data.diagnosis]
y = data.diagnosis.values
x_data = data.drop(['diagnosis'], axis=1)
import numpy as np
x = (x_data - np.min(x_data)) / (np.max(x_data) - np.min(x_data))
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.15, random_state=42)
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators=100, random_state=1)
rf.fit(x_train, y_train)
y_pred = rf.predict(x_test)
y_true = y_test
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_true, y_pred)
import seaborn as sns
import matplotlib.pyplot as plt
f,ax=plt.subplots(figsize=(5,5))
sns.heatmap(cm,annot=True,linewidth=0.5,linecolor='red',fmt='.0f',ax=ax)
plt.xlabel('y_pred')
plt.ylabel('y_true')
plt.show()
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
x1 = np.random.normal(25, 5, 1000)
y1 = np.random.normal(25, 5, 1000)
x2 = np.random.normal(55, 5, 1000)
y2 = np.random.normal(60, 5, 1000)
x3 = np.random.normal(55, 5, 1000)
y3 = np.random.normal(15, 5, 1000)
x = np.concatenate((x1, x2, x3), axis=0)
y = np.concatenate((y1, y2, y3), axis=0)
dictionary = {'x': x, 'y': y}
data = pd.DataFrame(dictionary)
from sklearn.cluster import KMeans
wcss = []
for k in range(1, 15):
kmeans = KMeans(n_clusters=k)
kmeans.fit(data)
wcss.append(kmeans.inertia_)
kmeans2 = KMeans(n_clusters=3)
clusters = kmeans2.fit_predict(data)
print(clusters[:20])
data['label'] = clusters
plt.scatter(data.x[data.label == 0], data.y[data.label == 0], color='red')
plt.scatter(data.x[data.label == 1], data.y[data.label == 1], color='blue')
plt.scatter(data.x[data.label == 2], data.y[data.label == 2], color='green')
plt.scatter(kmeans2.cluster_centers_[:, 0], kmeans2.cluster_centers_[:, 1], color='yellow') | code |
18100844/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/classification/data.csv')
data.drop(['id', 'Unnamed: 32'], axis=1, inplace=True)
pima = pd.read_csv('../input/pima-indians-diabetes-database/diabetes.csv')
pima['Pregnancies'] = pima['Pregnancies'].astype('float')
pima.head() | code |
18100844/cell_35 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.datasets import load_breast_cancer
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/classification/data.csv')
data.drop(['id', 'Unnamed: 32'], axis=1, inplace=True)
M = data[data['diagnosis'] == 'M']
B = data[data['diagnosis'] == 'B']
data.diagnosis = [1 if each == 'M' else 0 for each in data.diagnosis]
y = data.diagnosis.values
x_data = data.drop(['diagnosis'], axis=1)
x = (x_data - np.min(x_data)) / (np.max(x_data) - np.min(x_data))
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=42)
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=8)
knn.fit(x_train, y_train)
prediction = knn.predict(x_test)
score_list = []
for each in range(1, 15):
knn2 = KNeighborsClassifier(n_neighbors=each)
knn2.fit(x_train, y_train)
current_score = knn2.score(x_test, y_test)
score_list.append(current_score)
pima = pd.read_csv('../input/pima-indians-diabetes-database/diabetes.csv')
pima['Pregnancies'] = pima['Pregnancies'].astype('float')
feature_cols = ['pregnant', 'insulin', 'bmi', 'age', 'glucose', 'bp', 'pedigree']
y = pima.Outcome
X = pima.drop(['Outcome'], axis=1)
clf = DecisionTreeClassifier()
clf = clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
bc = load_breast_cancer()
X = bc.data
y = bc.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
decision_tree = DecisionTreeClassifier()
random_forest = RandomForestClassifier(n_estimators=100)
decision_tree.fit(X_train, y_train)
random_forest.fit(X_train, y_train)
dt_pred = decision_tree.predict(X_test)
rf_pred = random_forest.predict(X_test)
dt_cm = confusion_matrix(y_test, dt_pred)
rf_cm = confusion_matrix(y_test, rf_pred)
data = pd.read_csv('../input/classification/data.csv')
data.drop(['id', 'Unnamed: 32'], axis=1, inplace=True)
data.diagnosis = [1 if each == 'M' else 0 for each in data.diagnosis]
y = data.diagnosis.values
x_data = data.drop(['diagnosis'], axis=1)
import numpy as np
x = (x_data - np.min(x_data)) / (np.max(x_data) - np.min(x_data))
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.15, random_state=42)
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators=100, random_state=1)
rf.fit(x_train, y_train)
y_pred = rf.predict(x_test)
y_true = y_test
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_true, y_pred)
import seaborn as sns
import matplotlib.pyplot as plt
f,ax=plt.subplots(figsize=(5,5))
sns.heatmap(cm,annot=True,linewidth=0.5,linecolor='red',fmt='.0f',ax=ax)
plt.xlabel('y_pred')
plt.ylabel('y_true')
plt.show()
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
x1 = np.random.normal(25, 5, 1000)
y1 = np.random.normal(25, 5, 1000)
x2 = np.random.normal(55, 5, 1000)
y2 = np.random.normal(60, 5, 1000)
x3 = np.random.normal(55, 5, 1000)
y3 = np.random.normal(15, 5, 1000)
x = np.concatenate((x1, x2, x3), axis=0)
y = np.concatenate((y1, y2, y3), axis=0)
dictionary = {'x': x, 'y': y}
data = pd.DataFrame(dictionary)
data.head() | code |
18100844/cell_24 | [
"text_plain_output_1.png"
] | from sklearn.datasets import load_breast_cancer
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/classification/data.csv')
data.drop(['id', 'Unnamed: 32'], axis=1, inplace=True)
data.diagnosis = [1 if each == 'M' else 0 for each in data.diagnosis]
y = data.diagnosis.values
x_data = data.drop(['diagnosis'], axis=1)
x = (x_data - np.min(x_data)) / (np.max(x_data) - np.min(x_data))
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=42)
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=8)
knn.fit(x_train, y_train)
prediction = knn.predict(x_test)
pima = pd.read_csv('../input/pima-indians-diabetes-database/diabetes.csv')
pima['Pregnancies'] = pima['Pregnancies'].astype('float')
feature_cols = ['pregnant', 'insulin', 'bmi', 'age', 'glucose', 'bp', 'pedigree']
y = pima.Outcome
X = pima.drop(['Outcome'], axis=1)
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
bc = load_breast_cancer()
X = bc.data
y = bc.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
decision_tree = DecisionTreeClassifier()
random_forest = RandomForestClassifier(n_estimators=100)
decision_tree.fit(X_train, y_train)
random_forest.fit(X_train, y_train)
dt_pred = decision_tree.predict(X_test)
rf_pred = random_forest.predict(X_test)
print('Decision Tree Model')
print(classification_report(y_test, dt_pred, target_names=bc.target_names))
print('Random Forest Model')
print(classification_report(y_test, rf_pred, target_names=bc.target_names))
dt_cm = confusion_matrix(y_test, dt_pred)
rf_cm = confusion_matrix(y_test, rf_pred) | code |
18100844/cell_14 | [
"image_output_1.png"
] | from IPython.display import Image
import os
!ls ../input/
Image("../input/charts1/chart.png") | code |
18100844/cell_12 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from warnings import simplefilter
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/classification/data.csv')
data.drop(['id', 'Unnamed: 32'], axis=1, inplace=True)
data.diagnosis = [1 if each == 'M' else 0 for each in data.diagnosis]
y = data.diagnosis.values
x_data = data.drop(['diagnosis'], axis=1)
x = (x_data - np.min(x_data)) / (np.max(x_data) - np.min(x_data))
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=42)
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=8)
knn.fit(x_train, y_train)
prediction = knn.predict(x_test)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=1)
from sklearn.svm import SVC
from warnings import simplefilter
simplefilter(action='ignore', category=FutureWarning)
svm = SVC(random_state=1)
svm.fit(x_train, y_train)
print('accuracy of svm algorithm: ', svm.score(x_test, y_test)) | code |
18100844/cell_5 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/classification/data.csv')
data.drop(['id', 'Unnamed: 32'], axis=1, inplace=True)
data.head() | code |
18100844/cell_36 | [
"text_plain_output_1.png"
] | from sklearn.cluster import KMeans
from sklearn.datasets import load_breast_cancer
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/classification/data.csv')
data.drop(['id', 'Unnamed: 32'], axis=1, inplace=True)
M = data[data['diagnosis'] == 'M']
B = data[data['diagnosis'] == 'B']
data.diagnosis = [1 if each == 'M' else 0 for each in data.diagnosis]
y = data.diagnosis.values
x_data = data.drop(['diagnosis'], axis=1)
x = (x_data - np.min(x_data)) / (np.max(x_data) - np.min(x_data))
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=42)
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=8)
knn.fit(x_train, y_train)
prediction = knn.predict(x_test)
score_list = []
for each in range(1, 15):
knn2 = KNeighborsClassifier(n_neighbors=each)
knn2.fit(x_train, y_train)
current_score = knn2.score(x_test, y_test)
score_list.append(current_score)
pima = pd.read_csv('../input/pima-indians-diabetes-database/diabetes.csv')
pima['Pregnancies'] = pima['Pregnancies'].astype('float')
feature_cols = ['pregnant', 'insulin', 'bmi', 'age', 'glucose', 'bp', 'pedigree']
y = pima.Outcome
X = pima.drop(['Outcome'], axis=1)
clf = DecisionTreeClassifier()
clf = clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
bc = load_breast_cancer()
X = bc.data
y = bc.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
decision_tree = DecisionTreeClassifier()
random_forest = RandomForestClassifier(n_estimators=100)
decision_tree.fit(X_train, y_train)
random_forest.fit(X_train, y_train)
dt_pred = decision_tree.predict(X_test)
rf_pred = random_forest.predict(X_test)
dt_cm = confusion_matrix(y_test, dt_pred)
rf_cm = confusion_matrix(y_test, rf_pred)
data = pd.read_csv('../input/classification/data.csv')
data.drop(['id', 'Unnamed: 32'], axis=1, inplace=True)
data.diagnosis = [1 if each == 'M' else 0 for each in data.diagnosis]
y = data.diagnosis.values
x_data = data.drop(['diagnosis'], axis=1)
import numpy as np
x = (x_data - np.min(x_data)) / (np.max(x_data) - np.min(x_data))
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.15, random_state=42)
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators=100, random_state=1)
rf.fit(x_train, y_train)
y_pred = rf.predict(x_test)
y_true = y_test
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_true, y_pred)
import seaborn as sns
import matplotlib.pyplot as plt
f,ax=plt.subplots(figsize=(5,5))
sns.heatmap(cm,annot=True,linewidth=0.5,linecolor='red',fmt='.0f',ax=ax)
plt.xlabel('y_pred')
plt.ylabel('y_true')
plt.show()
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
x1 = np.random.normal(25, 5, 1000)
y1 = np.random.normal(25, 5, 1000)
x2 = np.random.normal(55, 5, 1000)
y2 = np.random.normal(60, 5, 1000)
x3 = np.random.normal(55, 5, 1000)
y3 = np.random.normal(15, 5, 1000)
x = np.concatenate((x1, x2, x3), axis=0)
y = np.concatenate((y1, y2, y3), axis=0)
dictionary = {'x': x, 'y': y}
data = pd.DataFrame(dictionary)
from sklearn.cluster import KMeans
wcss = []
for k in range(1, 15):
kmeans = KMeans(n_clusters=k)
kmeans.fit(data)
wcss.append(kmeans.inertia_)
plt.plot(range(1, 15), wcss)
plt.xlabel('number of k(cluster value)')
plt.ylabel('wcss')
plt.show() | code |
33101395/cell_13 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
tests = '../input/students-performance-in-exams/StudentsPerformance.csv'
test1 = pd.read_csv(tests, sep=',')
testscores = pd.DataFrame(test1)
testscores.info
plt.xticks(rotation=90)
plt.xticks(rotation=90)
plt.xticks(rotation=90)
plt.xticks(rotation=90)
plt.rcParams.update({'font.size': 14})
plt.figure(figsize=(10, 10))
plt.title('Test Prepers Reading Score Avg')
plt.rcParams.update({'font.size': 14})
sns.barplot(testscores['test preparation course'], testscores['reading score'], data=testscores) | code |
33101395/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
tests = '../input/students-performance-in-exams/StudentsPerformance.csv'
test1 = pd.read_csv(tests, sep=',')
testscores = pd.DataFrame(test1)
testscores.info
plt.xticks(rotation=90)
plt.xticks(rotation=90)
plt.xticks(rotation=90)
plt.figure(figsize=(10, 10))
plt.xticks(rotation=90)
plt.rcParams.update({'font.size': 14})
plt.title('Reading Scores for race/ethnicity')
sns.barplot(testscores['race/ethnicity'], testscores['reading score'], data=testscores) | code |
33101395/cell_4 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
tests = '../input/students-performance-in-exams/StudentsPerformance.csv'
test1 = pd.read_csv(tests, sep=',')
testscores = pd.DataFrame(test1)
testscores.info
type(testscores) | code |
33101395/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
tests = '../input/students-performance-in-exams/StudentsPerformance.csv'
test1 = pd.read_csv(tests, sep=',')
testscores = pd.DataFrame(test1)
testscores.info
plt.figure(figsize=(15, 2))
plt.xticks(rotation=90)
plt.title('Math Male To Female Testing')
sns.scatterplot(testscores['math score'], testscores['gender'])
plt.show() | code |
33101395/cell_11 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
tests = '../input/students-performance-in-exams/StudentsPerformance.csv'
test1 = pd.read_csv(tests, sep=',')
testscores = pd.DataFrame(test1)
testscores.info
plt.xticks(rotation=90)
plt.xticks(rotation=90)
plt.xticks(rotation=90)
plt.xticks(rotation=90)
plt.rcParams.update({'font.size': 14})
sns.catplot('gender', 'reading score', 'race/ethnicity', kind='bar', data=testscores) | code |
33101395/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib as mlp
import matplotlib.pyplot as plt
import seaborn as sns
import glob
from sklearn import tree
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
33101395/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
tests = '../input/students-performance-in-exams/StudentsPerformance.csv'
test1 = pd.read_csv(tests, sep=',')
testscores = pd.DataFrame(test1)
testscores.info
plt.xticks(rotation=90)
plt.figure(figsize=(15, 2))
plt.xticks(rotation=90)
plt.title('Reading Male to Female Testing')
sns.scatterplot(testscores['reading score'], testscores['gender'])
plt.show() | code |
33101395/cell_8 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
tests = '../input/students-performance-in-exams/StudentsPerformance.csv'
test1 = pd.read_csv(tests, sep=',')
testscores = pd.DataFrame(test1)
testscores.info
plt.xticks(rotation=90)
plt.xticks(rotation=90)
plt.figure(figsize=(15, 2))
plt.xticks(rotation=90)
plt.title('Writing Male to female testing')
sns.scatterplot(testscores['writing score'], testscores['gender'])
plt.show() | code |
33101395/cell_15 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
tests = '../input/students-performance-in-exams/StudentsPerformance.csv'
test1 = pd.read_csv(tests, sep=',')
testscores = pd.DataFrame(test1)
testscores.info
plt.xticks(rotation=90)
plt.xticks(rotation=90)
plt.xticks(rotation=90)
plt.xticks(rotation=90)
plt.rcParams.update({'font.size': 14})
plt.rcParams.update({'font.size': 14})
plt.rcParams.update({'font.size': 14})
plt.figure(figsize=(10, 10))
plt.title('Test Prepers Writing Score Avg')
plt.rcParams.update({'font.size': 14})
sns.barplot(testscores['test preparation course'], testscores['writing score'], data=testscores) | code |
33101395/cell_16 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
tests = '../input/students-performance-in-exams/StudentsPerformance.csv'
test1 = pd.read_csv(tests, sep=',')
testscores = pd.DataFrame(test1)
testscores.info
testscores_lunch_free = testscores.loc[testscores['lunch'] == 'free/reduced']
testscores_lunch_free | code |
33101395/cell_3 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
tests = '../input/students-performance-in-exams/StudentsPerformance.csv'
test1 = pd.read_csv(tests, sep=',')
testscores = pd.DataFrame(test1)
testscores.info | code |
33101395/cell_14 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
tests = '../input/students-performance-in-exams/StudentsPerformance.csv'
test1 = pd.read_csv(tests, sep=',')
testscores = pd.DataFrame(test1)
testscores.info
plt.xticks(rotation=90)
plt.xticks(rotation=90)
plt.xticks(rotation=90)
plt.xticks(rotation=90)
plt.rcParams.update({'font.size': 14})
plt.rcParams.update({'font.size': 14})
plt.figure(figsize=(10, 10))
plt.title('Test Prepers Math Score Avg')
plt.rcParams.update({'font.size': 14})
sns.barplot(testscores['test preparation course'], testscores['math score'], data=testscores) | code |
33101395/cell_10 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
tests = '../input/students-performance-in-exams/StudentsPerformance.csv'
test1 = pd.read_csv(tests, sep=',')
testscores = pd.DataFrame(test1)
testscores.info
plt.xticks(rotation=90)
plt.xticks(rotation=90)
plt.xticks(rotation=90)
plt.xticks(rotation=90)
plt.rcParams.update({'font.size': 14})
sns.catplot('gender', 'math score', 'race/ethnicity', kind='bar', data=testscores) | code |
33101395/cell_12 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
tests = '../input/students-performance-in-exams/StudentsPerformance.csv'
test1 = pd.read_csv(tests, sep=',')
testscores = pd.DataFrame(test1)
testscores.info
plt.xticks(rotation=90)
plt.xticks(rotation=90)
plt.xticks(rotation=90)
plt.xticks(rotation=90)
plt.rcParams.update({'font.size': 14})
sns.catplot('gender', 'writing score', 'race/ethnicity', kind='bar', data=testscores) | code |
33101395/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
tests = '../input/students-performance-in-exams/StudentsPerformance.csv'
test1 = pd.read_csv(tests, sep=',')
testscores = pd.DataFrame(test1)
testscores.info
testscores | code |
18111545/cell_21 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import pandas_profiling as pdp
from sklearn.linear_model import LogisticRegression
pd.set_option('max_rows', 1200)
pd.set_option('max_columns', 1000)
cr = pd.read_csv('../input/Loan payments data.csv')
cr.profile_report(style={'full_width': True})
cr.fillna('0', axis=1, inplace=True)
cr.sample(10)
cr.head().append(cr.tail()).append(cr.sample(10)) | code |
18111545/cell_13 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
import pandas_profiling as pdp
from sklearn.linear_model import LogisticRegression
pd.set_option('max_rows', 1200)
pd.set_option('max_columns', 1000)
cr = pd.read_csv('../input/Loan payments data.csv')
cr.profile_report(style={'full_width': True})
cr.fillna('0', axis=1, inplace=True)
cr.sample(10)
plt.figure(figsize=(60, 20))
sns.factorplot(data=cr, x='loan_status', y='age', hue='Gender', col='education', kind='box', order=['PAIDOFF', 'COLLECTION_PAIDOFF', 'COLLECTION'], aspect=1.5)
plt.show() | code |
18111545/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import pandas_profiling as pdp
from sklearn.linear_model import LogisticRegression
pd.set_option('max_rows', 1200)
pd.set_option('max_columns', 1000)
cr = pd.read_csv('../input/Loan payments data.csv')
cr.profile_report(style={'full_width': True})
cr.fillna('0', axis=1, inplace=True)
cr.sample(10)
print(cr['Gender'].value_counts())
print(cr['education'].value_counts())
print(cr['Principal'].value_counts())
print(cr['loan_status'].value_counts())
print(cr['terms'].value_counts()) | code |
18111545/cell_25 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import pandas_profiling as pdp
from sklearn.linear_model import LogisticRegression
pd.set_option('max_rows', 1200)
pd.set_option('max_columns', 1000)
cr = pd.read_csv('../input/Loan payments data.csv')
cr.profile_report(style={'full_width': True})
cr.fillna('0', axis=1, inplace=True)
cr.sample(10)
pd.crosstab(cr['Gender'], cr['past_due_days'], rownames=['gender'], colnames=['Loan paidafter due date'])
pd.crosstab(cr['Gender'], cr['loan_status'], rownames=['gender'], colnames=['loan status'])
pd.crosstab(cr['terms'], cr['loan_status'], rownames=['terms'], colnames=['loan status'])
pd.crosstab(cr['Principal'], cr['loan_status'], rownames=['principal'], colnames=['loan status'])
pd.crosstab(cr['education'], cr['loan_status'], rownames=['education'], colnames=['loan status'])
pd.crosstab(cr['education'], cr['Gender'], rownames=['education'], colnames=['gender'])
pd.crosstab(cr['Gender'], cr['age'], rownames=['gender'], colnames=['age'])
cr['loan_status'].replace('PAIDOFF', 0, inplace=True)
cr['loan_status'].replace('COLLECTION_PAIDOFF', 1, inplace=True)
cr['loan_status'].replace('COLLECTION', 2, inplace=True)
cr.sample(20)
education_dummies = pd.get_dummies(cr.education, prefix='education')
education_dummies.sample(4)
education_dummies.drop(education_dummies.columns[0], axis=1, inplace=True)
education_dummies.head(5) | code |
18111545/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import pandas_profiling as pdp
from sklearn.linear_model import LogisticRegression
pd.set_option('max_rows', 1200)
pd.set_option('max_columns', 1000)
cr = pd.read_csv('../input/Loan payments data.csv')
cr.profile_report(style={'full_width': True}) | code |
18111545/cell_23 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import pandas_profiling as pdp
from sklearn.linear_model import LogisticRegression
pd.set_option('max_rows', 1200)
pd.set_option('max_columns', 1000)
cr = pd.read_csv('../input/Loan payments data.csv')
cr.profile_report(style={'full_width': True})
cr.fillna('0', axis=1, inplace=True)
cr.sample(10)
pd.crosstab(cr['Gender'], cr['past_due_days'], rownames=['gender'], colnames=['Loan paidafter due date'])
pd.crosstab(cr['Gender'], cr['loan_status'], rownames=['gender'], colnames=['loan status'])
pd.crosstab(cr['terms'], cr['loan_status'], rownames=['terms'], colnames=['loan status'])
pd.crosstab(cr['Principal'], cr['loan_status'], rownames=['principal'], colnames=['loan status'])
pd.crosstab(cr['education'], cr['loan_status'], rownames=['education'], colnames=['loan status'])
pd.crosstab(cr['education'], cr['Gender'], rownames=['education'], colnames=['gender'])
pd.crosstab(cr['Gender'], cr['age'], rownames=['gender'], colnames=['age'])
cr['loan_status'].replace('PAIDOFF', 0, inplace=True)
cr['loan_status'].replace('COLLECTION_PAIDOFF', 1, inplace=True)
cr['loan_status'].replace('COLLECTION', 2, inplace=True)
cr.sample(20)
education_dummies = pd.get_dummies(cr.education, prefix='education')
education_dummies.sample(4) | code |
18111545/cell_20 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import pandas_profiling as pdp
from sklearn.linear_model import LogisticRegression
pd.set_option('max_rows', 1200)
pd.set_option('max_columns', 1000)
cr = pd.read_csv('../input/Loan payments data.csv')
cr.profile_report(style={'full_width': True})
cr.fillna('0', axis=1, inplace=True)
cr.sample(10)
pd.crosstab(cr['Gender'], cr['past_due_days'], rownames=['gender'], colnames=['Loan paidafter due date'])
pd.crosstab(cr['Gender'], cr['loan_status'], rownames=['gender'], colnames=['loan status'])
pd.crosstab(cr['terms'], cr['loan_status'], rownames=['terms'], colnames=['loan status'])
pd.crosstab(cr['Principal'], cr['loan_status'], rownames=['principal'], colnames=['loan status'])
pd.crosstab(cr['education'], cr['loan_status'], rownames=['education'], colnames=['loan status'])
pd.crosstab(cr['education'], cr['Gender'], rownames=['education'], colnames=['gender'])
pd.crosstab(cr['Gender'], cr['age'], rownames=['gender'], colnames=['age']) | code |
18111545/cell_26 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import pandas_profiling as pdp
from sklearn.linear_model import LogisticRegression
pd.set_option('max_rows', 1200)
pd.set_option('max_columns', 1000)
cr = pd.read_csv('../input/Loan payments data.csv')
cr.profile_report(style={'full_width': True})
cr.fillna('0', axis=1, inplace=True)
cr.sample(10)
pd.crosstab(cr['Gender'], cr['past_due_days'], rownames=['gender'], colnames=['Loan paidafter due date'])
pd.crosstab(cr['Gender'], cr['loan_status'], rownames=['gender'], colnames=['loan status'])
pd.crosstab(cr['terms'], cr['loan_status'], rownames=['terms'], colnames=['loan status'])
pd.crosstab(cr['Principal'], cr['loan_status'], rownames=['principal'], colnames=['loan status'])
pd.crosstab(cr['education'], cr['loan_status'], rownames=['education'], colnames=['loan status'])
pd.crosstab(cr['education'], cr['Gender'], rownames=['education'], colnames=['gender'])
pd.crosstab(cr['Gender'], cr['age'], rownames=['gender'], colnames=['age'])
cr['loan_status'].replace('PAIDOFF', 0, inplace=True)
cr['loan_status'].replace('COLLECTION_PAIDOFF', 1, inplace=True)
cr['loan_status'].replace('COLLECTION', 2, inplace=True)
cr.sample(20)
education_dummies = pd.get_dummies(cr.education, prefix='education')
education_dummies.sample(4)
education_dummies.drop(education_dummies.columns[0], axis=1, inplace=True)
cr = pd.concat([cr, education_dummies], axis=1)
cr.head(5) | code |
18111545/cell_11 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
import pandas_profiling as pdp
from sklearn.linear_model import LogisticRegression
pd.set_option('max_rows', 1200)
pd.set_option('max_columns', 1000)
cr = pd.read_csv('../input/Loan payments data.csv')
cr.profile_report(style={'full_width': True})
cr.fillna('0', axis=1, inplace=True)
cr.sample(10)
plt.figure(figsize=(15, 10))
sns.boxplot(data=cr, x='loan_status', y='age', hue='education', linewidth=2, order=['PAIDOFF', 'COLLECTION_PAIDOFF', 'COLLECTION'])
plt.show() | code |
18111545/cell_19 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
import numpy as np
import pandas_profiling as pdp
from sklearn.linear_model import LogisticRegression
pd.set_option('max_rows', 1200)
pd.set_option('max_columns', 1000)
cr = pd.read_csv('../input/Loan payments data.csv')
cr.profile_report(style={'full_width': True})
cr.fillna('0', axis=1, inplace=True)
cr.sample(10)
pd.crosstab(cr['Gender'], cr['past_due_days'], rownames=['gender'], colnames=['Loan paidafter due date'])
pd.crosstab(cr['Gender'], cr['loan_status'], rownames=['gender'], colnames=['loan status'])
pd.crosstab(cr['terms'], cr['loan_status'], rownames=['terms'], colnames=['loan status'])
pd.crosstab(cr['Principal'], cr['loan_status'], rownames=['principal'], colnames=['loan status'])
pd.crosstab(cr['education'], cr['loan_status'], rownames=['education'], colnames=['loan status'])
pd.crosstab(cr['education'], cr['Gender'], rownames=['education'], colnames=['gender']) | code |
18111545/cell_7 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import seaborn as sns
corr = cr.corr()
sns.heatmap(corr, annot=True) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.