path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
1007485/cell_7
[ "image_output_1.png" ]
import matplotlib.pyplot as plt data.shape Color_Count = data.color.value_counts() idx = range(2) labels = ['Color', 'Black & White'] plt.xticks(idx, labels) Director = data.director_name.value_counts() D_Name = Director.head(n=10).index New_D = data[(data['director_name'].isin(D_Name))] New_D.pivot_table(index=['director_name','imdb_score'],aggfunc='mean') plt.figure(1,figsize=(12,6)) plt.subplot(1,2,1) Director.head(n=10).sort_index().plot(kind='bar') plt.title('Top 10 directors that have most volume movies') plt.subplot(1,2,2) New_D.groupby(['director_name'])['imdb_score'].mean().plot(kind='bar') plt.xlabel("") plt.title("Top 10 direcotors' average IMDB scores") plt.show() Language = data.language.value_counts() Language.head(n=10).plot(kind='bar') plt.title('Top 10 movie languages') plt.show()
code
1007485/cell_8
[ "image_output_1.png" ]
import matplotlib.pyplot as plt data.shape Color_Count = data.color.value_counts() idx = range(2) labels = ['Color', 'Black & White'] plt.xticks(idx, labels) Director = data.director_name.value_counts() D_Name = Director.head(n=10).index New_D = data[(data['director_name'].isin(D_Name))] New_D.pivot_table(index=['director_name','imdb_score'],aggfunc='mean') plt.figure(1,figsize=(12,6)) plt.subplot(1,2,1) Director.head(n=10).sort_index().plot(kind='bar') plt.title('Top 10 directors that have most volume movies') plt.subplot(1,2,2) New_D.groupby(['director_name'])['imdb_score'].mean().plot(kind='bar') plt.xlabel("") plt.title("Top 10 direcotors' average IMDB scores") plt.show() Language = data.language.value_counts() Country = data.country.value_counts() Country.head(n=10).plot(kind='barh') plt.title('Top 10 Countries that produce movies') plt.show()
code
1007485/cell_3
[ "image_output_1.png" ]
data.shape
code
1007485/cell_10
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt data.shape Color_Count = data.color.value_counts() idx = range(2) labels = ['Color', 'Black & White'] plt.xticks(idx, labels) Director = data.director_name.value_counts() D_Name = Director.head(n=10).index New_D = data[(data['director_name'].isin(D_Name))] New_D.pivot_table(index=['director_name','imdb_score'],aggfunc='mean') plt.figure(1,figsize=(12,6)) plt.subplot(1,2,1) Director.head(n=10).sort_index().plot(kind='bar') plt.title('Top 10 directors that have most volume movies') plt.subplot(1,2,2) New_D.groupby(['director_name'])['imdb_score'].mean().plot(kind='bar') plt.xlabel("") plt.title("Top 10 direcotors' average IMDB scores") plt.show() Language = data.language.value_counts() Country = data.country.value_counts() score_by_content = data.pivot_table(index=['content_rating'], values='imdb_score', aggfunc='mean') Contents = data.content_rating.value_counts().sort_index() Year = data.title_year.value_counts().sort_index().tail(50) year = range(50) plt.figure(1, figsize=(12, 6)) loc = range(3, 49, 5) ticks = range(1970, 2017, 5) plt.bar(year, Year) plt.xticks(loc, ticks) plt.xlabel('Year') plt.title('Number of movies titled in recent 50 years', fontsize=15) plt.show()
code
1007485/cell_5
[ "image_output_1.png" ]
import matplotlib.pyplot as plt data.shape Color_Count = data.color.value_counts() plt.figure(1, figsize=(6, 6)) idx = range(2) labels = ['Color', 'Black & White'] plt.bar(idx, Color_Count, width=0.3) plt.xticks(idx, labels) plt.show()
code
18104028/cell_4
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/market_data_02.csv') df.columns fig, ax = plt.subplots(figsize=(16, 7)) df['descricao'].value_counts().sort_values(ascending=False).head(20).plot.bar(width=0.5, edgecolor='k', align='center', linewidth=1) plt.xlabel('Product Item', fontsize=20) plt.ylabel('Number of transactions', fontsize=17) ax.tick_params(labelsize=20) plt.title('20 Most Sold Items', fontsize=20) plt.grid() plt.ioff()
code
18104028/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/market_data_02.csv') df.columns from mlxtend.frequent_patterns import apriori from mlxtend.frequent_patterns import association_rules hot_encoded_df = df.groupby(['nota_fiscal_id', 'descricao'])['descricao'].count().unstack().reset_index().fillna(0).set_index('nota_fiscal_id') hot_encoded_df.head()
code
18104028/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/market_data_02.csv') df.head() df.info() df.columns
code
18104028/cell_11
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from mlxtend.frequent_patterns import apriori from mlxtend.frequent_patterns import association_rules import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns df = pd.read_csv('../input/market_data_02.csv') df.columns # Print the most sold items by transaction fig, ax=plt.subplots(figsize=(16,7)) df['descricao'].value_counts().sort_values(ascending=False).head(20).plot.bar(width=0.5,edgecolor='k',align='center',linewidth=1) plt.xlabel('Product Item',fontsize=20) plt.ylabel('Number of transactions',fontsize=17) ax.tick_params(labelsize=20) plt.title('20 Most Sold Items',fontsize=20) plt.grid() plt.ioff() from mlxtend.frequent_patterns import apriori from mlxtend.frequent_patterns import association_rules hot_encoded_df = df.groupby(['nota_fiscal_id', 'descricao'])['descricao'].count().unstack().reset_index().fillna(0).set_index('nota_fiscal_id') def encode_units(x): if x <= 0: return 0 if x >= 1: return 1 hot_encoded_df = hot_encoded_df.applymap(encode_units) frequent_itemsets = apriori(hot_encoded_df, min_support=0.01, use_colnames=True) rules = association_rules(frequent_itemsets, metric='lift', min_threshold=1) rules.to_csv('market_data_out_all_results.csv') support = rules.as_matrix(columns=['support']) confidence = rules.as_matrix(columns=['confidence']) import seaborn as sns for i in range(len(support)): support[i] = support[i] confidence[i] = confidence[i] plt.title('Assonciation Rules') plt.xlabel('support') plt.ylabel('confidance') sns.regplot(x=support, y=confidence, fit_reg=False)
code
18104028/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import warnings import seaborn as sns import datetime import matplotlib.pyplot as plt import os print(os.listdir('../input'))
code
18104028/cell_8
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from mlxtend.frequent_patterns import apriori from mlxtend.frequent_patterns import association_rules import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/market_data_02.csv') df.columns from mlxtend.frequent_patterns import apriori from mlxtend.frequent_patterns import association_rules hot_encoded_df = df.groupby(['nota_fiscal_id', 'descricao'])['descricao'].count().unstack().reset_index().fillna(0).set_index('nota_fiscal_id') def encode_units(x): if x <= 0: return 0 if x >= 1: return 1 hot_encoded_df = hot_encoded_df.applymap(encode_units) frequent_itemsets = apriori(hot_encoded_df, min_support=0.01, use_colnames=True) rules = association_rules(frequent_itemsets, metric='lift', min_threshold=1) rules.head(10)
code
18104028/cell_3
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/market_data_02.csv') df.columns print('Unique products: ' + str(len(df['cod_prod'].unique())))
code
1004531/cell_13
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas as pd nRecords = 200000 snRecords = 1000 maindf = pd.read_csv('../input/database.csv') maindf.drop(['Year', 'Month', 'Incident', 'City', 'Agency Name', 'Agency Type', 'Record Source', 'Agency Name'], axis=1, inplace=True) df = maindf[maindf['Record ID'] < nRecords] sdf = df[(maindf['Record ID'] < snRecords) & df['Victim Count'] > 0] from IPython.display import display df['Crime Solved'].replace('No', 0, inplace=True) df['Crime Solved'].replace('Yes', 1, inplace=True) print(pd.value_counts(df['Perpetrator Count'])) df.loc[df['Perpetrator Count'] <= 1, 'Perpetrator Count'] = 0 df.loc[df['Perpetrator Count'] > 1, 'Perpetrator Count'] = 1 multiple_caught = pd.value_counts(df['Perpetrator Count'] * df['Crime Solved'], sort=False)[1] one_escape = pd.value_counts(df['Perpetrator Count'] | df['Crime Solved'], sort=False)[0] one_caught = pd.value_counts(df['Perpetrator Count'] < df['Crime Solved'], sort=False)[True] multiple_escape = pd.value_counts(df['Perpetrator Count'] > df['Crime Solved'], sort=False)[True] res = np.matrix([[one_caught, multiple_caught], [one_escape, multiple_escape]])
code
1004531/cell_4
[ "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd nRecords = 200000 snRecords = 1000 maindf = pd.read_csv('../input/database.csv') maindf.drop(['Year', 'Month', 'Incident', 'City', 'Agency Name', 'Agency Type', 'Record Source', 'Agency Name'], axis=1, inplace=True) df = maindf[maindf['Record ID'] < nRecords] sdf = df[(maindf['Record ID'] < snRecords) & df['Victim Count'] > 0]
code
1004531/cell_6
[ "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns nRecords = 200000 snRecords = 1000 maindf = pd.read_csv('../input/database.csv') maindf.drop(['Year', 'Month', 'Incident', 'City', 'Agency Name', 'Agency Type', 'Record Source', 'Agency Name'], axis=1, inplace=True) df = maindf[maindf['Record ID'] < nRecords] sdf = df[(maindf['Record ID'] < snRecords) & df['Victim Count'] > 0] races = df['Perpetrator Race'].unique() sns.jointplot(x='Perpetrator Count', y='Victim Count', data=df) plt.show()
code
1004531/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.ensemble import RandomForestClassifier from sklearn import preprocessing from sklearn.cross_validation import cross_val_score from sklearn import tree from sklearn.manifold import TSNE from sklearn.decomposition import PCA from sklearn.cluster import KMeans
code
1004531/cell_8
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns nRecords = 200000 snRecords = 1000 maindf = pd.read_csv('../input/database.csv') maindf.drop(['Year', 'Month', 'Incident', 'City', 'Agency Name', 'Agency Type', 'Record Source', 'Agency Name'], axis=1, inplace=True) df = maindf[maindf['Record ID'] < nRecords] sdf = df[(maindf['Record ID'] < snRecords) & df['Victim Count'] > 0] races = df['Perpetrator Race'].unique() sns.countplot(x=df['Perpetrator Race'], hue=df['Crime Solved'], palette=sns.color_palette('Paired', len(races)), data=df) plt.show() sns.countplot(x=df['Victim Race'], hue=df['Crime Solved'], palette=sns.color_palette('Paired', len(races)), data=df) plt.show() sns.countplot(x=df['Victim Age'], hue=df['Crime Solved'], palette=sns.color_palette('Paired', len(races)), data=df) plt.show()
code
1004531/cell_3
[ "image_output_1.png" ]
import pandas as pd nRecords = 200000 snRecords = 1000 maindf = pd.read_csv('../input/database.csv')
code
1004531/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns nRecords = 200000 snRecords = 1000 maindf = pd.read_csv('../input/database.csv') maindf.drop(['Year', 'Month', 'Incident', 'City', 'Agency Name', 'Agency Type', 'Record Source', 'Agency Name'], axis=1, inplace=True) df = maindf[maindf['Record ID'] < nRecords] sdf = df[(maindf['Record ID'] < snRecords) & df['Victim Count'] > 0] races = df['Perpetrator Race'].unique() sns.swarmplot(x=sdf['Weapon'], y=sdf['Victim Count'].astype(float), data=sdf) plt.show() sns.countplot(x=df['Victim Sex'], hue=df['Crime Solved'], palette=sns.color_palette('Paired', len(races)), data=df) plt.show() sns.countplot(x=df['Weapon'], hue=df['Crime Solved'], palette=sns.color_palette('Paired', len(races)), data=df) plt.show()
code
50234570/cell_20
[ "text_plain_output_1.png" ]
from collections import Counter import matplotlib.pyplot as plt import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') import seaborn as sns from collections import Counter import warnings warnings.filterwarnings('ignore') import os suicide_rates = pd.read_csv('/kaggle/input/suicide-rates-overview-1985-to-2016/master.csv') suicide_rates.columns def detect_outliers(df, columnNames): outlier_indices = [] for c in columnNames: Q1 = np.percentile(df[c], 25) Q3 = np.percentile(df[c], 75) IQR = Q3 - Q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < Q1 - outlier_step) | (df[c] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 1)) return multiple_outliers suicide_rates.loc[detect_outliers(suicide_rates, ['population', 'gdp_per_capita ($)', 'suicides/100k pop'])] suicide_rates = suicide_rates.drop(detect_outliers(suicide_rates, ['population', 'gdp_per_capita ($)', 'suicides/100k pop']), axis=0).reset_index(drop=True) year_list = list(suicide_rates.year.unique()) year_suicide_ratio = [] for i in year_list: a = suicide_rates[suicide_rates['year'] == i] year_suicide_rate = sum(a.index) / len(a) year_suicide_ratio.append(year_suicide_rate) data = pd.DataFrame({'year_list': year_list, 'year_suicide_ratio': year_suicide_ratio}) new_index = data['year_suicide_ratio'].sort_values(ascending=True).index.values sorted_data = data.reindex(new_index) plt.figure(figsize=(15, 10)) sns.barplot(x=sorted_data['year_list'], y=sorted_data['year_suicide_ratio']) plt.xticks(rotation=90) plt.xlabel('Years') plt.ylabel('Suicides') plt.title('Which year did the most suicides occour')
code
50234570/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) suicide_rates = pd.read_csv('/kaggle/input/suicide-rates-overview-1985-to-2016/master.csv') suicide_rates.columns
code
50234570/cell_2
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import os import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use('seaborn-whitegrid') import seaborn as sns from collections import Counter import warnings warnings.filterwarnings('ignore') import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
50234570/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) suicide_rates = pd.read_csv('/kaggle/input/suicide-rates-overview-1985-to-2016/master.csv') suicide_rates.columns suicide_rates.info()
code
50234570/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) suicide_rates = pd.read_csv('/kaggle/input/suicide-rates-overview-1985-to-2016/master.csv') suicide_rates.columns suicide_rates.describe()
code
50234570/cell_18
[ "text_plain_output_1.png" ]
from collections import Counter import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) suicide_rates = pd.read_csv('/kaggle/input/suicide-rates-overview-1985-to-2016/master.csv') suicide_rates.columns def detect_outliers(df, columnNames): outlier_indices = [] for c in columnNames: Q1 = np.percentile(df[c], 25) Q3 = np.percentile(df[c], 75) IQR = Q3 - Q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < Q1 - outlier_step) | (df[c] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 1)) return multiple_outliers suicide_rates.loc[detect_outliers(suicide_rates, ['population', 'gdp_per_capita ($)', 'suicides/100k pop'])] suicide_rates = suicide_rates.drop(detect_outliers(suicide_rates, ['population', 'gdp_per_capita ($)', 'suicides/100k pop']), axis=0).reset_index(drop=True) suicide_rates.head()
code
50234570/cell_15
[ "text_html_output_1.png" ]
from collections import Counter import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) suicide_rates = pd.read_csv('/kaggle/input/suicide-rates-overview-1985-to-2016/master.csv') suicide_rates.columns def detect_outliers(df, columnNames): outlier_indices = [] for c in columnNames: Q1 = np.percentile(df[c], 25) Q3 = np.percentile(df[c], 75) IQR = Q3 - Q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < Q1 - outlier_step) | (df[c] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 1)) return multiple_outliers suicide_rates.loc[detect_outliers(suicide_rates, ['population', 'gdp_per_capita ($)', 'suicides/100k pop'])]
code
50234570/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) suicide_rates = pd.read_csv('/kaggle/input/suicide-rates-overview-1985-to-2016/master.csv') suicide_rates.head()
code
17121464/cell_9
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra correlations = df1.corr() names = ['hair', 'feathers', 'eggs', 'milk', 'airborne', 'aquatic', 'predator', 'toothed', 'backbone', 'breathes', 'venomous', 'fins', 'legs', 'tail', 'domestic', 'catsize'] fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(correlations, vmin=-1, vmax=1) fig.colorbar(cax) ticks = np.arange(0, 15, 1) ax.set_xticks(ticks) ax.set_yticks(ticks) ax.set_xticklabels(names) ax.set_yticklabels(names) plt.show()
code
17121464/cell_4
[ "text_plain_output_1.png" ]
type(df)
code
17121464/cell_6
[ "image_output_1.png" ]
print('Row: ', df1.shape[0]) print('Column: ', df1.shape[1])
code
17121464/cell_2
[ "text_plain_output_1.png" ]
df.head()
code
17121464/cell_7
[ "image_output_1.png" ]
import matplotlib.pyplot as plt df.plot(kind='density', subplots=False, layout=(3, 3), sharex=False) plt.show()
code
17121464/cell_8
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt df.hist() plt.show()
code
17121464/cell_3
[ "text_plain_output_1.png" ]
df1.head()
code
17121464/cell_10
[ "text_plain_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier import matplotlib.pyplot as plt import numpy as np # linear algebra correlations = df1.corr() names = ['hair','feathers','eggs','milk','airborne','aquatic','predator','toothed','backbone','breathes','venomous','fins','legs','tail','domestic','catsize'] # plot correlation matrix fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(correlations, vmin=-1, vmax=1) fig.colorbar(cax) ticks = np.arange(0,15,1) ax.set_xticks(ticks) ax.set_yticks(ticks) ax.set_xticklabels(names) ax.set_yticklabels(names) plt.show() from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors=5, p=2) knn.fit(df1[['predator', 'toothed']], df1.backbone)
code
17121464/cell_12
[ "text_plain_output_1.png" ]
from matplotlib.colors import ListedColormap from sklearn.neighbors import KNeighborsClassifier import matplotlib.pyplot as plt import numpy as np # linear algebra correlations = df1.corr() names = ['hair','feathers','eggs','milk','airborne','aquatic','predator','toothed','backbone','breathes','venomous','fins','legs','tail','domestic','catsize'] # plot correlation matrix fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(correlations, vmin=-1, vmax=1) fig.colorbar(cax) ticks = np.arange(0,15,1) ax.set_xticks(ticks) ax.set_yticks(ticks) ax.set_xticklabels(names) ax.set_yticklabels(names) plt.show() from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors=5, p=2) knn.fit(df1[['predator', 'toothed']], df1.backbone) def plotMesh(): h = 100 cmap_light = ListedColormap(['#ffffb3', '#ff9999', '#d6d6f5', '#ccffdd']) colormap = np.array(['black', 'yellow', 'red', 'blue', 'green']) x_min, x_max = (df1.predator.min() - 1000, df1.toothed.max() + 1000) y_min, y_max = (df1.predator.min() - 1000, df1.toothed.max() + 1000) xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Z = knn.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) fig = plt.figure() plt.pcolormesh(xx, yy, Z, cmap=cmap_light) plt.scatter(df1.predator, df1.toothed, c=colormap[df1.backbone], s=120) plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.title('4-Class classification \n(k = %i)\n Loan 1 - Yellow, Loan 2 - Red, Loan 3 - Blue, Loan 4 - green' % 5) ax = fig.add_subplot(111) plotMesh()
code
17121464/cell_5
[ "image_output_1.png" ]
print('Row: ', df.shape[0]) print('Column: ', df.shape[1])
code
106214047/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import numpy as np import math from datetime import datetime import requests import seaborn as sns sns.set() import matplotlib import matplotlib.pyplot as plt plt.rcParams['axes.labelsize'] = 10 plt.rcParams['xtick.labelsize'] = 10 plt.rcParams['ytick.labelsize'] = 10 import re from scipy.stats import stats from sklearn.feature_selection import SelectKBest from sklearn.model_selection import train_test_split from sklearn.model_selection import cross_val_score from sklearn.model_selection import GridSearchCV, StratifiedKFold import xgboost from xgboost import XGBClassifier from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.svm import LinearSVC from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import AdaBoostClassifier from sklearn.linear_model import LassoCV from sklearn.linear_model import RidgeClassifierCV from sklearn.svm import SVC from sklearn.model_selection import RepeatedStratifiedKFold from sklearn.impute import KNNImputer from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.preprocessing import StandardScaler from sklearn.impute import SimpleImputer from sklearn import preprocessing from imblearn.over_sampling import SMOTE from imblearn.combine import SMOTETomek from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2 from sklearn.linear_model import LinearRegression from sklearn.metrics import classification_report from sklearn.metrics import accuracy_score, auc, confusion_matrix, f1_score, precision_score, recall_score, roc_curve, classification_report from sklearn.metrics import ConfusionMatrixDisplay import mysql.connector from datetime import timedelta, datetime, date
code
128047933/cell_6
[ "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
def estimateF(img_1, img_2): img1 = cv2.cvtColor(img_1, cv2.COLOR_BGR2GRAY) img2 = cv2.cvtColor(img_2, cv2.COLOR_BGR2GRAY) sift = cv2.SIFT_create() kp1, des1 = sift.detectAndCompute(img1, None) kp2, des2 = sift.detectAndCompute(img2, None) bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True) cv_matches = bf.match(des1, des2) cur_kp_1 = ArrayFromCvKps(kp1) cur_kp_2 = ArrayFromCvKps(kp2) matches = np.array([[m.queryIdx, m.trainIdx] for m in cv_matches]) im_matches = DrawMatches(img_1, img_2, cur_kp_1, cur_kp_2, matches) fig = plt.figure(figsize=(25, 25)) plt.title('Matches before RANSAC') plt.imshow(im_matches) plt.axis('off') plt.show() F, inlier_mask = cv2.findFundamentalMat(cur_kp_1[matches[:, 0]], cur_kp_2[matches[:, 1]], cv2.USAC_MAGSAC, ransacReprojThreshold=0.25, confidence=0.99999, maxIters=10000) matches_after_ransac = np.array([match for match, is_inlier in zip(matches, inlier_mask) if is_inlier]) im_inliers = DrawMatches(img_1, img_2, cur_kp_1, cur_kp_2, matches_after_ransac) fig = plt.figure(figsize=(25, 25)) plt.title('Matches after RANSAC') plt.imshow(im_inliers) plt.axis('off') plt.show() scaling_dict = pd.read_csv(train_csv) inlier_kp_1 = ArrayFromCvKps([kp for i, kp in enumerate(kp1) if i in matches_after_ransac[:, 0]]) inlier_kp_2 = ArrayFromCvKps([kp for i, kp in enumerate(kp2) if i in matches_after_ransac[:, 1]]) E, R, T = ComputeEssentialMatrix(F, df['K'][0], df['K'][1], inlier_kp_1, inlier_kp_2) q = QuaternionFromMatrix(R) T = T.flatten() R1_gt, T1_gt = (df['R'][0], df['T'][0].reshape((3, 1))) R2_gt, T2_gt = (df['R'][1], df['T'][1].reshape((3, 1))) dR_gt = np.dot(R2_gt, R1_gt.T) dT_gt = (T2_gt - np.dot(dR_gt, T1_gt)).flatten() q_gt = QuaternionFromMatrix(dR_gt) q_gt = q_gt / (np.linalg.norm(q_gt) + eps) err_q, err_t = ComputeErrorForOneExample(q_gt, dT_gt, q, T, scaling_dict['scaling_factor'][0]) print(f'rotation_error={err_q:.02f} (deg), translation_error={err_t:.02f} (m)', flush=True) estimateF(df['imgs'][0], df['imgs'][1])
code
128047933/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import cv2 import matplotlib.pyplot as plt import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import scipy import math import numpy as np import pandas as pd import cv2 import matplotlib.pyplot as plt import os eps = 1e-15 train_calibration_csvs = [] train_pair_covisibility_csvs = [] test_calibration_csvs = [] test_pair_covisibility_csvs = [] for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: if 'train' in dirname and 'pair_covisibility.csv' in filename: train_pair_covisibility_csvs.append(os.path.join(dirname, filename)) if 'train' in dirname and 'calibration.csv' in filename: train_calibration_csvs.append(os.path.join(dirname, filename)) if 'test' in dirname and 'pair_covisibility.csv' in filename: test_pair_covisibility_csvs.append(os.path.join(dirname, filename)) if 'test' in dirname and 'calibration.csv' in filename: test_calibration_csvs.append(os.path.join(dirname, filename)) test_csv = '/kaggle/input/image-matching-challenge-2022/test.csv' train_csv = '/kaggle/input/image-matching-challenge-2022/train/scaling_factors.csv' def getImagesFromCSV(train_csvf, calibration_csvs, pair_covisibility_csvs, category_index, pair_index): train_csv = pd.read_csv(train_csvf) category_csv = pd.read_csv(calibration_csvs[category_index]) pair_covisibility = pd.read_csv(pair_covisibility_csvs[category_index]) pair_covisibility = pair_covisibility[pair_covisibility['covisibility'] >= 0.1] imgs = pair_covisibility['pair'][pair_index].split('-') F = pair_covisibility['fundamental_matrix'][pair_index] p_new = [] R = [] T = [] K = [] for img in imgs: image_loc = '/kaggle/input/image-matching-challenge-2022/train/' + str(train_csv['scene'][category_index]) + '/images/' + str(img) + '.jpg' p_new.append(cv2.imread(image_loc)) R.append(np.array(np.array(category_csv[category_csv['image_id'] == img]['rotation_matrix'])[0].split(' '), dtype=np.float32).reshape((3, 3))) T.append(np.array(np.array(category_csv[category_csv['image_id'] == img]['translation_vector'])[0].split(' '), dtype=np.float32).reshape((3,))) K.append(np.array(np.array(category_csv[category_csv['image_id'] == img]['camera_intrinsics'])[0].split(' '), dtype=np.float32).reshape((3, 3))) df = pd.DataFrame({'imgs': p_new, 'R': R, 'T': T, 'K': K, 'F': F}) return df df = getImagesFromCSV(train_csv, train_calibration_csvs, train_pair_covisibility_csvs, 0, 0) def extract_sift(img, step_size=1): """ Extract SIFT features for a given grayscale image. Instead of detecting keypoints, we will set the keypoints to be uniformly distanced pixels. Feel free to use OpenCV functions. Note: Check sift.compute and cv2.KeyPoint Args: img: Grayscale image of shape (H, W) step_size: Size of the step between keypoints. Return: descriptors: numpy array of shape (int(img.shape[0]/step_size) * int(img.shape[1]/step_size), 128) contains sift feature. """ sift = cv2.SIFT_create() descriptors = np.zeros((int(img.shape[0] / step_size) * int(img.shape[1] / step_size), 128)) keypoints = [cv2.KeyPoint(x, y, step_size) for y in range(0, img.shape[0], step_size) for x in range(0, img.shape[1], step_size)] _, descriptors = sift.compute(img, keypoints) return descriptors def extract_sift_for_dataset(data, step_size=1): all_features = [] for i in range(len(data)): img = data[i] img = cv2.cvtColor(np.uint8(img), cv2.COLOR_BGR2GRAY) descriptors = extract_sift(img, step_size) all_features.append(descriptors) # Distribution of keypoint responses def distributionKeypointResponses(keypoints): responses = [] for keypoint in keypoints: responses.append(keypoint.response) n, bins, patches = plt.hist(responses, 100, density = 1, color ='green', alpha = 0.7) plt.xlabel('Keypoint Responses') plt.ylabel('Count') plt.title('Distribution of Keypoint Response Intensity', fontweight ="bold") plt.show() def decodeFundamental(f_matrix): F = np.zeros((3,3)) for index, value in enumerate(f_matrix.split(" ")): F[int(np.floor(index/3))][index%3] = float(value) return F def encodeFundamental(f_matrix): F = np.zeros((9,)) for index, value in enumerate(f_matrix.ravel()): F[index] = value return F def NormalizeKeypoints(keypoints, K): C_x = K[0, 2] C_y = K[1, 2] f_x = K[0, 0] f_y = K[1, 1] keypoints = (keypoints - np.array([[C_x, C_y]])) / np.array([[f_x, f_y]]) return keypoints def ComputeEssentialMatrix(F, K1, K2, kp1, kp2): '''Compute the Essential matrix from the Fundamental matrix, given the calibration matrices. Note that we ask participants to estimate F, i.e., without relying on known intrinsics.''' # Warning! Old versions of OpenCV's RANSAC could return multiple F matrices, encoded as a single matrix size 6x3 or 9x3, rather than 3x3. # We do not account for this here, as the modern RANSACs do not do this: # https://opencv.org/evaluating-opencvs-new-ransacs assert F.shape[0] == 3, 'Malformed F?' # Use OpenCV's recoverPose to solve the cheirality check: # https://docs.opencv.org/4.5.4/d9/d0c/group__calib3d.html#gadb7d2dfcc184c1d2f496d8639f4371c0 E = np.matmul(np.matmul(K2.T, F), K1).astype(np.float64) kp1n = NormalizeKeypoints(kp1, K1) kp2n = NormalizeKeypoints(kp2, K2) num_inliers, R, T, mask = cv2.recoverPose(E, kp1n, kp2n) return E, R, T def QuaternionFromMatrix(matrix): '''Transform a rotation matrix into a quaternion.''' M = np.array(matrix, dtype=np.float64, copy=False)[:4, :4] m00 = M[0, 0] m01 = M[0, 1] m02 = M[0, 2] m10 = M[1, 0] m11 = M[1, 1] m12 = M[1, 2] m20 = M[2, 0] m21 = M[2, 1] m22 = M[2, 2] K = np.array([[m00 - m11 - m22, 0.0, 0.0, 0.0], [m01 + m10, m11 - m00 - m22, 0.0, 0.0], [m02 + m20, m12 + m21, m22 - m00 - m11, 0.0], [m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22]]) K /= 3.0 # The quaternion is the eigenvector of K that corresponds to the largest eigenvalue. w, V = np.linalg.eigh(K) q = V[[3, 0, 1, 2], np.argmax(w)] if q[0] < 0: np.negative(q, q) return q def ComputeErrorForOneExample(q_gt, T_gt, q, T, scale): '''Compute the error metric for a single example. The function returns two errors, over rotation and translation. These are combined at different thresholds by ComputeMaa in order to compute the mean Average Accuracy.''' q_gt_norm = q_gt / (np.linalg.norm(q_gt) + eps) q_norm = q / (np.linalg.norm(q) + eps) loss_q = np.maximum(eps, (1.0 - np.sum(q_norm * q_gt_norm)**2)) err_q = np.arccos(1 - 2 * loss_q) # Apply the scaling factor for this scene. T_gt_scaled = T_gt * scale T_scaled = T * np.linalg.norm(T_gt) * scale / (np.linalg.norm(T) + eps) err_t = min(np.linalg.norm(T_gt_scaled - T_scaled), np.linalg.norm(T_gt_scaled + T_scaled)) return err_q * 180 / np.pi, err_t def BuildCompositeImage(im1, im2, axis=1, margin=0, background=1): '''Convenience function to stack two images with different sizes.''' if background != 0 and background != 1: background = 1 if axis != 0 and axis != 1: raise RuntimeError('Axis must be 0 (vertical) or 1 (horizontal') h1, w1, _ = im1.shape h2, w2, _ = im2.shape if axis == 1: composite = np.zeros((max(h1, h2), w1 + w2 + margin, 3), dtype=np.uint8) + 255 * background if h1 > h2: voff1, voff2 = 0, (h1 - h2) // 2 else: voff1, voff2 = (h2 - h1) // 2, 0 hoff1, hoff2 = 0, w1 + margin else: composite = np.zeros((h1 + h2 + margin, max(w1, w2), 3), dtype=np.uint8) + 255 * background if w1 > w2: hoff1, hoff2 = 0, (w1 - w2) // 2 else: hoff1, hoff2 = (w2 - w1) // 2, 0 voff1, voff2 = 0, h1 + margin composite[voff1:voff1 + h1, hoff1:hoff1 + w1, :] = im1 composite[voff2:voff2 + h2, hoff2:hoff2 + w2, :] = im2 return (composite, (voff1, voff2), (hoff1, hoff2)) def DrawMatches(im1, im2, kp1, kp2, matches, axis=1, margin=0, background=0, linewidth=2): '''Draw keypoints and matches.''' composite, v_offset, h_offset = BuildCompositeImage(im1, im2, axis, margin, background) # Draw all keypoints. for coord_a, coord_b in zip(kp1, kp2): composite = cv2.drawMarker(composite, (int(coord_a[0] + h_offset[0]), int(coord_a[1] + v_offset[0])), color=(255, 0, 0), markerType=cv2.MARKER_CROSS, markerSize=5, thickness=1) composite = cv2.drawMarker(composite, (int(coord_b[0] + h_offset[1]), int(coord_b[1] + v_offset[1])), color=(255, 0, 0), markerType=cv2.MARKER_CROSS, markerSize=5, thickness=1) # Draw matches, and highlight keypoints used in matches. for idx_a, idx_b in matches: composite = cv2.drawMarker(composite, (int(kp1[idx_a, 0] + h_offset[0]), int(kp1[idx_a, 1] + v_offset[0])), color=(0, 0, 255), markerType=cv2.MARKER_CROSS, markerSize=12, thickness=1) composite = cv2.drawMarker(composite, (int(kp2[idx_b, 0] + h_offset[1]), int(kp2[idx_b, 1] + v_offset[1])), color=(0, 0, 255), markerType=cv2.MARKER_CROSS, markerSize=12, thickness=1) composite = cv2.line(composite, tuple([int(kp1[idx_a][0] + h_offset[0]), int(kp1[idx_a][1] + v_offset[0])]), tuple([int(kp2[idx_b][0] + h_offset[1]), int(kp2[idx_b][1] + v_offset[1])]), color=(0, 0, 255), thickness=1) return composite def ArrayFromCvKps(kps): '''Convenience function to convert OpenCV keypoints into a simple numpy array.''' return np.array([kp.pt for kp in kps]) k = np.min(decodeFundamental(df['F'][0])) print(encodeFundamental(F * k).reshape(9)) print(decodeFundamental(df['F'][0]).reshape((9,))) print('Absolute Diff') print(np.sum(np.absolute(k * encodeFundamental(F).reshape((3, 3)) - decodeFundamental(df['F'][0])))) plt.clf() plt.bar(np.linspace(1, 9, 9), encodeFundamental(F * k) - decodeFundamental(df['F'][0]).reshape((9,))) plt.show()
code
128047933/cell_3
[ "text_plain_output_1.png" ]
import cv2 import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import scipy import math import numpy as np import pandas as pd import cv2 import matplotlib.pyplot as plt import os eps = 1e-15 train_calibration_csvs = [] train_pair_covisibility_csvs = [] test_calibration_csvs = [] test_pair_covisibility_csvs = [] for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: if 'train' in dirname and 'pair_covisibility.csv' in filename: train_pair_covisibility_csvs.append(os.path.join(dirname, filename)) if 'train' in dirname and 'calibration.csv' in filename: train_calibration_csvs.append(os.path.join(dirname, filename)) if 'test' in dirname and 'pair_covisibility.csv' in filename: test_pair_covisibility_csvs.append(os.path.join(dirname, filename)) if 'test' in dirname and 'calibration.csv' in filename: test_calibration_csvs.append(os.path.join(dirname, filename)) test_csv = '/kaggle/input/image-matching-challenge-2022/test.csv' train_csv = '/kaggle/input/image-matching-challenge-2022/train/scaling_factors.csv' def getImagesFromCSV(train_csvf, calibration_csvs, pair_covisibility_csvs, category_index, pair_index): train_csv = pd.read_csv(train_csvf) category_csv = pd.read_csv(calibration_csvs[category_index]) pair_covisibility = pd.read_csv(pair_covisibility_csvs[category_index]) pair_covisibility = pair_covisibility[pair_covisibility['covisibility'] >= 0.1] imgs = pair_covisibility['pair'][pair_index].split('-') F = pair_covisibility['fundamental_matrix'][pair_index] p_new = [] R = [] T = [] K = [] for img in imgs: image_loc = '/kaggle/input/image-matching-challenge-2022/train/' + str(train_csv['scene'][category_index]) + '/images/' + str(img) + '.jpg' p_new.append(cv2.imread(image_loc)) R.append(np.array(np.array(category_csv[category_csv['image_id'] == img]['rotation_matrix'])[0].split(' '), dtype=np.float32).reshape((3, 3))) T.append(np.array(np.array(category_csv[category_csv['image_id'] == img]['translation_vector'])[0].split(' '), dtype=np.float32).reshape((3,))) K.append(np.array(np.array(category_csv[category_csv['image_id'] == img]['camera_intrinsics'])[0].split(' '), dtype=np.float32).reshape((3, 3))) df = pd.DataFrame({'imgs': p_new, 'R': R, 'T': T, 'K': K, 'F': F}) return df df = getImagesFromCSV(train_csv, train_calibration_csvs, train_pair_covisibility_csvs, 0, 0) print(df['imgs'][0].shape)
code
90140147/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
import data_utils import pandas as pd import pandas as pd import data_utils holiday_df = pd.read_csv('../input/singapore-holiday/holiday.csv') df = data_utils.sg_holiday_feature(holiday_df=holiday_df.copy(), startDate='20140101', endDate='20211231', holiday_dummy=False) df, dist = data_utils.set_label(df=df, label_column='Holiday') df = data_utils.get_date_dummy(df, date_column='DATE') df.set_index('DATE', inplace=True) df = df.drop(columns=['Day']) df = df['2014-01-01':'2021-12-31'] train_data = data_utils.switch_y_column(df=df.copy(), column_name='Holiday') X_train_seq, y_train_seq, X_val_seq, y_val_seq = data_utils.split_sequence(train_data.values, look_back=look_back, look_forward=look_forward, split_val=True, print_shape=True) n_features = X_train_seq.shape[2]
code
90140147/cell_2
[ "text_html_output_1.png" ]
import data_utils import pandas as pd import pandas as pd import data_utils holiday_df = pd.read_csv('../input/singapore-holiday/holiday.csv') df = data_utils.sg_holiday_feature(holiday_df=holiday_df.copy(), startDate='20140101', endDate='20211231', holiday_dummy=False) df, dist = data_utils.set_label(df=df, label_column='Holiday') df = data_utils.get_date_dummy(df, date_column='DATE') df.set_index('DATE', inplace=True) df = df.drop(columns=['Day']) df = df['2014-01-01':'2021-12-31'] df.tail()
code
90139583/cell_21
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns housing_data = pd.read_csv('/kaggle/input/california-housing-prices/housing.csv') data = housing_data.copy() housing_data.shape housing_data.duplicated().values.any() housing_data.isnull().sum() def outlier_check(data_check): q1 = data_check.quantile(0.25) q3 = data_check.quantile(0.75) iqr = q3 - q1 lower_limit = q1 - 1.5 * iqr upper_limi = q3 + 1.5 * iqr lower_outlier = data_check < lower_limit upper_outlier = data_check > upper_limi return data_check[lower_outlier | upper_outlier] for col_name, values in housing_data.items(): if housing_data[col_name].dtype == 'float64': percntge = len(outlier_check(housing_data[col_name])) / len(housing_data) * 100 plt.figure(figsize=(18, 18)) sns.heatmap(housing_data.corr(), annot=True, cmap='RdYlGn') plt.show()
code
90139583/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing_data = pd.read_csv('/kaggle/input/california-housing-prices/housing.csv') data = housing_data.copy() housing_data.shape housing_data.describe()
code
90139583/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing_data = pd.read_csv('/kaggle/input/california-housing-prices/housing.csv') data = housing_data.copy() housing_data.shape
code
90139583/cell_26
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns housing_data = pd.read_csv('/kaggle/input/california-housing-prices/housing.csv') data = housing_data.copy() housing_data.shape housing_data.duplicated().values.any() housing_data.isnull().sum() def outlier_check(data_check): q1 = data_check.quantile(0.25) q3 = data_check.quantile(0.75) iqr = q3 - q1 lower_limit = q1 - 1.5 * iqr upper_limi = q3 + 1.5 * iqr lower_outlier = data_check < lower_limit upper_outlier = data_check > upper_limi return data_check[lower_outlier | upper_outlier] for col_name, values in housing_data.items(): if housing_data[col_name].dtype == 'float64': percntge = len(outlier_check(housing_data[col_name])) / len(housing_data) * 100 corr_matrix = housing_data.corr() corr_matrix corr_matrix['median_house_value'].sort_values(ascending=False) housing_data.plot(kind='scatter', x='longitude', y='latitude', alpha=0.4, s=housing_data['population'] / 100, label='population', figsize=(10, 7), c='median_house_value', cmap=plt.get_cmap('jet'), colorbar=True, title='Visualizing Geographical Data')
code
90139583/cell_11
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing_data = pd.read_csv('/kaggle/input/california-housing-prices/housing.csv') data = housing_data.copy() housing_data.shape housing_data.duplicated().values.any()
code
90139583/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90139583/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing_data = pd.read_csv('/kaggle/input/california-housing-prices/housing.csv') data = housing_data.copy() housing_data.shape housing_data.head()
code
90139583/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing_data = pd.read_csv('/kaggle/input/california-housing-prices/housing.csv') data = housing_data.copy() housing_data.shape housing_data.duplicated().values.any() housing_data.isnull().sum() def outlier_check(data_check): q1 = data_check.quantile(0.25) q3 = data_check.quantile(0.75) iqr = q3 - q1 lower_limit = q1 - 1.5 * iqr upper_limi = q3 + 1.5 * iqr lower_outlier = data_check < lower_limit upper_outlier = data_check > upper_limi return data_check[lower_outlier | upper_outlier] for col_name, values in housing_data.items(): if housing_data[col_name].dtype == 'float64': percntge = len(outlier_check(housing_data[col_name])) / len(housing_data) * 100 housing_data.hist(bins=70, figsize=(20, 15)) plt.show()
code
90139583/cell_28
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns housing_data = pd.read_csv('/kaggle/input/california-housing-prices/housing.csv') data = housing_data.copy() housing_data.shape housing_data.duplicated().values.any() housing_data.isnull().sum() def outlier_check(data_check): q1 = data_check.quantile(0.25) q3 = data_check.quantile(0.75) iqr = q3 - q1 lower_limit = q1 - 1.5 * iqr upper_limi = q3 + 1.5 * iqr lower_outlier = data_check < lower_limit upper_outlier = data_check > upper_limi return data_check[lower_outlier | upper_outlier] for col_name, values in housing_data.items(): if housing_data[col_name].dtype == 'float64': percntge = len(outlier_check(housing_data[col_name])) / len(housing_data) * 100 corr_matrix = housing_data.corr() corr_matrix corr_matrix['median_house_value'].sort_values(ascending=False) plt.figure(figsize=(10, 7)) plt.title('median_house_value vs ocean_proximity') sns.stripplot(data=housing_data, x='ocean_proximity', y='median_house_value', jitter=0.2)
code
90139583/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing_data = pd.read_csv('/kaggle/input/california-housing-prices/housing.csv') data = housing_data.copy() housing_data.shape housing_data.duplicated().values.any() housing_data.isnull().sum() housing_data.plot(kind='box', figsize=(20, 15), subplots=True, layout=(3, 3)) plt.show()
code
90139583/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing_data = pd.read_csv('/kaggle/input/california-housing-prices/housing.csv') data = housing_data.copy() housing_data.shape housing_data.duplicated().values.any() housing_data.isnull().sum() def outlier_check(data_check): q1 = data_check.quantile(0.25) q3 = data_check.quantile(0.75) iqr = q3 - q1 lower_limit = q1 - 1.5 * iqr upper_limi = q3 + 1.5 * iqr lower_outlier = data_check < lower_limit upper_outlier = data_check > upper_limi return data_check[lower_outlier | upper_outlier] for col_name, values in housing_data.items(): if housing_data[col_name].dtype == 'float64': percntge = len(outlier_check(housing_data[col_name])) / len(housing_data) * 100 print(col_name, ':', percntge, '%')
code
90139583/cell_24
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns housing_data = pd.read_csv('/kaggle/input/california-housing-prices/housing.csv') data = housing_data.copy() housing_data.shape housing_data.duplicated().values.any() housing_data.isnull().sum() def outlier_check(data_check): q1 = data_check.quantile(0.25) q3 = data_check.quantile(0.75) iqr = q3 - q1 lower_limit = q1 - 1.5 * iqr upper_limi = q3 + 1.5 * iqr lower_outlier = data_check < lower_limit upper_outlier = data_check > upper_limi return data_check[lower_outlier | upper_outlier] for col_name, values in housing_data.items(): if housing_data[col_name].dtype == 'float64': percntge = len(outlier_check(housing_data[col_name])) / len(housing_data) * 100 corr_matrix = housing_data.corr() corr_matrix corr_matrix['median_house_value'].sort_values(ascending=False)
code
90139583/cell_22
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns housing_data = pd.read_csv('/kaggle/input/california-housing-prices/housing.csv') data = housing_data.copy() housing_data.shape housing_data.duplicated().values.any() housing_data.isnull().sum() def outlier_check(data_check): q1 = data_check.quantile(0.25) q3 = data_check.quantile(0.75) iqr = q3 - q1 lower_limit = q1 - 1.5 * iqr upper_limi = q3 + 1.5 * iqr lower_outlier = data_check < lower_limit upper_outlier = data_check > upper_limi return data_check[lower_outlier | upper_outlier] for col_name, values in housing_data.items(): if housing_data[col_name].dtype == 'float64': percntge = len(outlier_check(housing_data[col_name])) / len(housing_data) * 100 sns.pairplot(housing_data, diag_kind='kde')
code
90139583/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing_data = pd.read_csv('/kaggle/input/california-housing-prices/housing.csv') data = housing_data.copy() housing_data.shape housing_data.info()
code
90139583/cell_12
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) housing_data = pd.read_csv('/kaggle/input/california-housing-prices/housing.csv') data = housing_data.copy() housing_data.shape housing_data.duplicated().values.any() housing_data.isnull().sum()
code
32062272/cell_21
[ "text_plain_output_1.png" ]
from time import time import inverness import pandas as pd import re import inverness model = inverness.Model('/kaggle/input/cord-19-inverness-all-v7/').load(['fun', 'meta', 'phraser', 'dictionary', 'tfidf', 'lsi', 'dense_ann']) pd.read_csv('/kaggle/input/CORD-19-research-challenge/metadata.csv', nrows=3) meta_by_sha = {} meta_by_pmc = {} t0 = time() COLS = ['cord_uid', 'sha', 'pmcid', 'publish_time', 'journal', 'url', 'title', 'authors'] df = pd.read_csv('/kaggle/input/CORD-19-research-challenge/metadata.csv') selected = df[df['full_text_file'] != ''][COLS] rows = selected.iterrows() for _, r in rows: if type(r['sha']) is str: for sha in r['sha'].split(';'): sha = sha.strip() meta = {k: r[k] for k in COLS} meta_by_sha[sha] = meta if type(r['pmcid']) is str: pmc = r['pmcid'] meta = {k: r[k] for k in COLS} meta_by_pmc[pmc] = meta def score_text(text, criteria): """""" total = 0 value = 1 for c in criteria: if type(c) in (int, float): value = c else: c = c.replace('_', '\\b') matches = re.findall(c, text, re.I) cnt = len(matches) total += value * cnt_to_score(cnt) return total def cnt_to_score(cnt): return min(2, cnt) def score_results(i_d_lists, criteria): """""" results = [] for i, d in zip(*i_d_lists): score = 0 paper_id = model.meta[i]['paper_id'] if paper_id in meta_by_sha: meta = meta_by_sha[paper_id] else: meta = meta_by_pmc[paper_id] score += score_text(meta['title'], criteria) doc = model.get_doc(i) text = model.doc_to_text(doc).replace('\n', ' ').replace('\r', ' ') html = highlight(text, criteria, style_by_group_id, default_style) score += score_text(text, criteria) rec = (score, d, i, html, meta) results += [rec] results.sort(key=lambda x: (-x[0], x[1])) return results def score_queries(queries, criteria, K=50): """""" by_score = [] for query in queries: q = model.text_to_dense(query) i_d = model.dense_ann_query(q, K) results = score_results(i_d, criteria) score = agg_results(results) by_score += [(score, query)] by_score.sort() return by_score def highlight(text, criteria, styles={}, default='w=bold'): """""" group_id = 0 for c in criteria: if type(c) in (int, float): group_id += 1 else: c = c.replace('_', '\\b') c = f'({c}\\w*)' style = styles.get(group_id, default) style_props = [] for prop in style.split(','): k, _, v = prop.partition('=') if k == 'w': style_props += [f'font-weight:{v}'] if k == 'fg': style_props += [f'color:{v}'] if k == 'bg': style_props += [f'background-color:{v}'] before = f'''<span style="{';'.join(style_props)}">''' after = '</span>' text = re.sub(c, before + '\\1' + after, text, flags=re.I) return text def agg_results(results): """""" scores = [x[0] for x in results] return sum([x * x for x in scores]) ** 0.5 def plot_results(results, title=''): """""" scores = [x[0] for x in results] scores.sort(reverse=True) score = agg_results(results) plt.figtext(0.4, 1, f'total L2 score: {score:.02f}') criteria = [100, 'mechanical', 'ventilat', 20, 'adjust', '_age', '_years', '_old', 'elder', 'young', 2, '_surviv', 'discharge', 'extubate', 'alive', 2, 'nonsurviv', 'non-surviv', '_died', 'dead', 'death', 'mortality', 'complication', 10, 'Kaplan.Meier', 'APACHE', 'SOFA', 'RIFLE', 'Glasgow.Coma', 'GCS', 'SAPS', '_RESP_', 'RSBI', '1000.person_', 10, 'figure \\d+', '_fig[.]\\s*\\d+', '_table \\d+', 2, 'outcome', 'result', 'occurr', 'cohort', 'median', '_n\\s*=\\s*\\d+', '(?<=[ (])\\d+ patients', '(?<=[ (])\\d+ cases', 150, 'covid|sars-cov|cov-2|cov2|wuhan'] style_by_group_id = {1: 'w=bold', 2: 'bg=#FFFF00', 3: 'bg=#00FF00', 4: 'bg=#FFAAAA', 5: 'bg=#FFCC00', 6: 'bg=#FFAAFF', 7: 'bg=#00FFFF', 9: 'w=bold,fg=#FF0000'} default_style = '' K = 50 queries = ['Outcomes data for COVID-19 after mechanical ventilation adjusted for age', 'Outcomes data for COVID-19 / SARS-CoV-2 after mechanical ventilation adjusted for age', 'Results for COVID-19 / SARS-CoV-2 after mechanical ventilation adjusted for age', 'COVID-19 SARS-CoV-2 results after mechnical ventilation discharged dead died', 'COVID-19 SARS-CoV-2 results after mechnical ventilation discharged dead died survived survivors adjusted age years old', 'COVID-19 SARS-CoV-2 results after mechnical ventilation discharged died survived survivors extubated adjusted', 'COVID-19 SARS-CoV-2 results after mechnical ventilation discharged dead died survived survivors adjusted age years old', 'COVID-19 SARS-CoV-2 results after mechnical ventilation discharged died survived extubated adjusted', 'COVID-19 SARS-CoV-2 results after mechnical ventilation discharged died survived extubated adjusted age', 'COVID-19 SARS-CoV-2 outcomes after mechnical ventilation discharged died survived extubated adjusted', 'COVID-19 SARS-CoV-2 results outcomes after mechnical ventilation discharged died survived extubated adjusted age', 'COVID-19 SARS-CoV-2 results outcomes after mechnical ventilation discharged died survived extubated', 'COVID-19 SARS-CoV-2 results outcomes mechnical ventilation discharged died survived extubated', 'COVID-19 results outcomes after mechnical ventilation discharged died survived extubated adjusted', 'Covid-19 results outcomes after mechnical ventilation discharged died survived extubated adjusted', 'COVID-19 covid-19 SARS-CoV-2 results outcomes after mechnical ventilation discharged died survived extubated adjusted', 'COVID-19 SARS-CoV-2 results outcomes after mechnical ventilation discharged died survived extubated adjusted', 'COVID-19 COVID-2019 SARS-CoV-2 SARS-CoV2 results outcomes after mechnical ventilation discharged died survived extubated adjusted', 'COVID-19 SARS-CoV-2 results outcmes data after mechnical ventilation discharged died survived extubated adjusted age'] query = 'Outcomes data for COVID-19 after mechanical ventilation adjusted for age' K = 500 q = model.text_to_dense(query) i_d_lists = model.dense_ann_query(q, K) results = score_results(i_d_lists, criteria) plot_results(results, title='Query result score by rank (descencing scores)')
code
32062272/cell_13
[ "text_html_output_1.png" ]
from pprint import pprint from time import time import pandas as pd pd.read_csv('/kaggle/input/CORD-19-research-challenge/metadata.csv', nrows=3) meta_by_sha = {} meta_by_pmc = {} t0 = time() COLS = ['cord_uid', 'sha', 'pmcid', 'publish_time', 'journal', 'url', 'title', 'authors'] df = pd.read_csv('/kaggle/input/CORD-19-research-challenge/metadata.csv') selected = df[df['full_text_file'] != ''][COLS] rows = selected.iterrows() for _, r in rows: if type(r['sha']) is str: for sha in r['sha'].split(';'): sha = sha.strip() meta = {k: r[k] for k in COLS} meta_by_sha[sha] = meta if type(r['pmcid']) is str: pmc = r['pmcid'] meta = {k: r[k] for k in COLS} meta_by_pmc[pmc] = meta print('Paper metadata sample:\n') for sha in meta_by_sha: pprint(meta_by_sha[sha]) break
code
32062272/cell_23
[ "text_plain_output_1.png" ]
from IPython.core.display import display, HTML from time import time import inverness import pandas as pd import re import inverness model = inverness.Model('/kaggle/input/cord-19-inverness-all-v7/').load(['fun', 'meta', 'phraser', 'dictionary', 'tfidf', 'lsi', 'dense_ann']) pd.read_csv('/kaggle/input/CORD-19-research-challenge/metadata.csv', nrows=3) meta_by_sha = {} meta_by_pmc = {} t0 = time() COLS = ['cord_uid', 'sha', 'pmcid', 'publish_time', 'journal', 'url', 'title', 'authors'] df = pd.read_csv('/kaggle/input/CORD-19-research-challenge/metadata.csv') selected = df[df['full_text_file'] != ''][COLS] rows = selected.iterrows() for _, r in rows: if type(r['sha']) is str: for sha in r['sha'].split(';'): sha = sha.strip() meta = {k: r[k] for k in COLS} meta_by_sha[sha] = meta if type(r['pmcid']) is str: pmc = r['pmcid'] meta = {k: r[k] for k in COLS} meta_by_pmc[pmc] = meta def score_text(text, criteria): """""" total = 0 value = 1 for c in criteria: if type(c) in (int, float): value = c else: c = c.replace('_', '\\b') matches = re.findall(c, text, re.I) cnt = len(matches) total += value * cnt_to_score(cnt) return total def cnt_to_score(cnt): return min(2, cnt) def score_results(i_d_lists, criteria): """""" results = [] for i, d in zip(*i_d_lists): score = 0 paper_id = model.meta[i]['paper_id'] if paper_id in meta_by_sha: meta = meta_by_sha[paper_id] else: meta = meta_by_pmc[paper_id] score += score_text(meta['title'], criteria) doc = model.get_doc(i) text = model.doc_to_text(doc).replace('\n', ' ').replace('\r', ' ') html = highlight(text, criteria, style_by_group_id, default_style) score += score_text(text, criteria) rec = (score, d, i, html, meta) results += [rec] results.sort(key=lambda x: (-x[0], x[1])) return results def score_queries(queries, criteria, K=50): """""" by_score = [] for query in queries: q = model.text_to_dense(query) i_d = model.dense_ann_query(q, K) results = score_results(i_d, criteria) score = agg_results(results) by_score += [(score, query)] by_score.sort() return by_score def highlight(text, criteria, styles={}, default='w=bold'): """""" group_id = 0 for c in criteria: if type(c) in (int, float): group_id += 1 else: c = c.replace('_', '\\b') c = f'({c}\\w*)' style = styles.get(group_id, default) style_props = [] for prop in style.split(','): k, _, v = prop.partition('=') if k == 'w': style_props += [f'font-weight:{v}'] if k == 'fg': style_props += [f'color:{v}'] if k == 'bg': style_props += [f'background-color:{v}'] before = f'''<span style="{';'.join(style_props)}">''' after = '</span>' text = re.sub(c, before + '\\1' + after, text, flags=re.I) return text def agg_results(results): """""" scores = [x[0] for x in results] return sum([x * x for x in scores]) ** 0.5 def plot_results(results, title=''): """""" scores = [x[0] for x in results] scores.sort(reverse=True) score = agg_results(results) plt.figtext(0.4, 1, f'total L2 score: {score:.02f}') criteria = [100, 'mechanical', 'ventilat', 20, 'adjust', '_age', '_years', '_old', 'elder', 'young', 2, '_surviv', 'discharge', 'extubate', 'alive', 2, 'nonsurviv', 'non-surviv', '_died', 'dead', 'death', 'mortality', 'complication', 10, 'Kaplan.Meier', 'APACHE', 'SOFA', 'RIFLE', 'Glasgow.Coma', 'GCS', 'SAPS', '_RESP_', 'RSBI', '1000.person_', 10, 'figure \\d+', '_fig[.]\\s*\\d+', '_table \\d+', 2, 'outcome', 'result', 'occurr', 'cohort', 'median', '_n\\s*=\\s*\\d+', '(?<=[ (])\\d+ patients', '(?<=[ (])\\d+ cases', 150, 'covid|sars-cov|cov-2|cov2|wuhan'] style_by_group_id = {1: 'w=bold', 2: 'bg=#FFFF00', 3: 'bg=#00FF00', 4: 'bg=#FFAAAA', 5: 'bg=#FFCC00', 6: 'bg=#FFAAFF', 7: 'bg=#00FFFF', 9: 'w=bold,fg=#FF0000'} default_style = '' K = 50 queries = ['Outcomes data for COVID-19 after mechanical ventilation adjusted for age', 'Outcomes data for COVID-19 / SARS-CoV-2 after mechanical ventilation adjusted for age', 'Results for COVID-19 / SARS-CoV-2 after mechanical ventilation adjusted for age', 'COVID-19 SARS-CoV-2 results after mechnical ventilation discharged dead died', 'COVID-19 SARS-CoV-2 results after mechnical ventilation discharged dead died survived survivors adjusted age years old', 'COVID-19 SARS-CoV-2 results after mechnical ventilation discharged died survived survivors extubated adjusted', 'COVID-19 SARS-CoV-2 results after mechnical ventilation discharged dead died survived survivors adjusted age years old', 'COVID-19 SARS-CoV-2 results after mechnical ventilation discharged died survived extubated adjusted', 'COVID-19 SARS-CoV-2 results after mechnical ventilation discharged died survived extubated adjusted age', 'COVID-19 SARS-CoV-2 outcomes after mechnical ventilation discharged died survived extubated adjusted', 'COVID-19 SARS-CoV-2 results outcomes after mechnical ventilation discharged died survived extubated adjusted age', 'COVID-19 SARS-CoV-2 results outcomes after mechnical ventilation discharged died survived extubated', 'COVID-19 SARS-CoV-2 results outcomes mechnical ventilation discharged died survived extubated', 'COVID-19 results outcomes after mechnical ventilation discharged died survived extubated adjusted', 'Covid-19 results outcomes after mechnical ventilation discharged died survived extubated adjusted', 'COVID-19 covid-19 SARS-CoV-2 results outcomes after mechnical ventilation discharged died survived extubated adjusted', 'COVID-19 SARS-CoV-2 results outcomes after mechnical ventilation discharged died survived extubated adjusted', 'COVID-19 COVID-2019 SARS-CoV-2 SARS-CoV2 results outcomes after mechnical ventilation discharged died survived extubated adjusted', 'COVID-19 SARS-CoV-2 results outcmes data after mechnical ventilation discharged died survived extubated adjusted age'] query = 'Outcomes data for COVID-19 after mechanical ventilation adjusted for age' K = 500 q = model.text_to_dense(query) i_d_lists = model.dense_ann_query(q, K) results = score_results(i_d_lists, criteria) N = 20 for score, dist, i, html, meta in results[:N]: display(HTML(f'''\n <h3>{meta['title']}</h3>\n <p>{meta['publish_time']} -- {meta['journal']} -- <a href="{meta['url']}">link</a></p>\n <p style="color:#AAAAAA">score:{score} -- dist:{dist:.03f} -- cord_uid:{meta['cord_uid']} -- paragraph_id:{i}</p>\n {html}\n '''))
code
32062272/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd pd.read_csv('/kaggle/input/CORD-19-research-challenge/metadata.csv', nrows=3)
code
32062272/cell_19
[ "text_plain_output_1.png" ]
from time import time import inverness import pandas as pd import re import inverness model = inverness.Model('/kaggle/input/cord-19-inverness-all-v7/').load(['fun', 'meta', 'phraser', 'dictionary', 'tfidf', 'lsi', 'dense_ann']) pd.read_csv('/kaggle/input/CORD-19-research-challenge/metadata.csv', nrows=3) meta_by_sha = {} meta_by_pmc = {} t0 = time() COLS = ['cord_uid', 'sha', 'pmcid', 'publish_time', 'journal', 'url', 'title', 'authors'] df = pd.read_csv('/kaggle/input/CORD-19-research-challenge/metadata.csv') selected = df[df['full_text_file'] != ''][COLS] rows = selected.iterrows() for _, r in rows: if type(r['sha']) is str: for sha in r['sha'].split(';'): sha = sha.strip() meta = {k: r[k] for k in COLS} meta_by_sha[sha] = meta if type(r['pmcid']) is str: pmc = r['pmcid'] meta = {k: r[k] for k in COLS} meta_by_pmc[pmc] = meta def score_text(text, criteria): """""" total = 0 value = 1 for c in criteria: if type(c) in (int, float): value = c else: c = c.replace('_', '\\b') matches = re.findall(c, text, re.I) cnt = len(matches) total += value * cnt_to_score(cnt) return total def cnt_to_score(cnt): return min(2, cnt) def score_results(i_d_lists, criteria): """""" results = [] for i, d in zip(*i_d_lists): score = 0 paper_id = model.meta[i]['paper_id'] if paper_id in meta_by_sha: meta = meta_by_sha[paper_id] else: meta = meta_by_pmc[paper_id] score += score_text(meta['title'], criteria) doc = model.get_doc(i) text = model.doc_to_text(doc).replace('\n', ' ').replace('\r', ' ') html = highlight(text, criteria, style_by_group_id, default_style) score += score_text(text, criteria) rec = (score, d, i, html, meta) results += [rec] results.sort(key=lambda x: (-x[0], x[1])) return results def score_queries(queries, criteria, K=50): """""" by_score = [] for query in queries: q = model.text_to_dense(query) i_d = model.dense_ann_query(q, K) results = score_results(i_d, criteria) score = agg_results(results) by_score += [(score, query)] by_score.sort() return by_score def highlight(text, criteria, styles={}, default='w=bold'): """""" group_id = 0 for c in criteria: if type(c) in (int, float): group_id += 1 else: c = c.replace('_', '\\b') c = f'({c}\\w*)' style = styles.get(group_id, default) style_props = [] for prop in style.split(','): k, _, v = prop.partition('=') if k == 'w': style_props += [f'font-weight:{v}'] if k == 'fg': style_props += [f'color:{v}'] if k == 'bg': style_props += [f'background-color:{v}'] before = f'''<span style="{';'.join(style_props)}">''' after = '</span>' text = re.sub(c, before + '\\1' + after, text, flags=re.I) return text def agg_results(results): """""" scores = [x[0] for x in results] return sum([x * x for x in scores]) ** 0.5 def plot_results(results, title=''): """""" scores = [x[0] for x in results] scores.sort(reverse=True) score = agg_results(results) plt.figtext(0.4, 1, f'total L2 score: {score:.02f}') criteria = [100, 'mechanical', 'ventilat', 20, 'adjust', '_age', '_years', '_old', 'elder', 'young', 2, '_surviv', 'discharge', 'extubate', 'alive', 2, 'nonsurviv', 'non-surviv', '_died', 'dead', 'death', 'mortality', 'complication', 10, 'Kaplan.Meier', 'APACHE', 'SOFA', 'RIFLE', 'Glasgow.Coma', 'GCS', 'SAPS', '_RESP_', 'RSBI', '1000.person_', 10, 'figure \\d+', '_fig[.]\\s*\\d+', '_table \\d+', 2, 'outcome', 'result', 'occurr', 'cohort', 'median', '_n\\s*=\\s*\\d+', '(?<=[ (])\\d+ patients', '(?<=[ (])\\d+ cases', 150, 'covid|sars-cov|cov-2|cov2|wuhan'] style_by_group_id = {1: 'w=bold', 2: 'bg=#FFFF00', 3: 'bg=#00FF00', 4: 'bg=#FFAAAA', 5: 'bg=#FFCC00', 6: 'bg=#FFAAFF', 7: 'bg=#00FFFF', 9: 'w=bold,fg=#FF0000'} default_style = '' K = 50 queries = ['Outcomes data for COVID-19 after mechanical ventilation adjusted for age', 'Outcomes data for COVID-19 / SARS-CoV-2 after mechanical ventilation adjusted for age', 'Results for COVID-19 / SARS-CoV-2 after mechanical ventilation adjusted for age', 'COVID-19 SARS-CoV-2 results after mechnical ventilation discharged dead died', 'COVID-19 SARS-CoV-2 results after mechnical ventilation discharged dead died survived survivors adjusted age years old', 'COVID-19 SARS-CoV-2 results after mechnical ventilation discharged died survived survivors extubated adjusted', 'COVID-19 SARS-CoV-2 results after mechnical ventilation discharged dead died survived survivors adjusted age years old', 'COVID-19 SARS-CoV-2 results after mechnical ventilation discharged died survived extubated adjusted', 'COVID-19 SARS-CoV-2 results after mechnical ventilation discharged died survived extubated adjusted age', 'COVID-19 SARS-CoV-2 outcomes after mechnical ventilation discharged died survived extubated adjusted', 'COVID-19 SARS-CoV-2 results outcomes after mechnical ventilation discharged died survived extubated adjusted age', 'COVID-19 SARS-CoV-2 results outcomes after mechnical ventilation discharged died survived extubated', 'COVID-19 SARS-CoV-2 results outcomes mechnical ventilation discharged died survived extubated', 'COVID-19 results outcomes after mechnical ventilation discharged died survived extubated adjusted', 'Covid-19 results outcomes after mechnical ventilation discharged died survived extubated adjusted', 'COVID-19 covid-19 SARS-CoV-2 results outcomes after mechnical ventilation discharged died survived extubated adjusted', 'COVID-19 SARS-CoV-2 results outcomes after mechnical ventilation discharged died survived extubated adjusted', 'COVID-19 COVID-2019 SARS-CoV-2 SARS-CoV2 results outcomes after mechnical ventilation discharged died survived extubated adjusted', 'COVID-19 SARS-CoV-2 results outcmes data after mechnical ventilation discharged died survived extubated adjusted age'] for score, query in score_queries(queries, criteria, K): print(f'{score:10.02f} -- {query}')
code
32062272/cell_7
[ "text_plain_output_1.png" ]
import inverness import inverness model = inverness.Model('/kaggle/input/cord-19-inverness-all-v7/').load(['fun', 'meta', 'phraser', 'dictionary', 'tfidf', 'lsi', 'dense_ann'])
code
32062272/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
from time import time import pandas as pd pd.read_csv('/kaggle/input/CORD-19-research-challenge/metadata.csv', nrows=3) meta_by_sha = {} meta_by_pmc = {} t0 = time() COLS = ['cord_uid', 'sha', 'pmcid', 'publish_time', 'journal', 'url', 'title', 'authors'] df = pd.read_csv('/kaggle/input/CORD-19-research-challenge/metadata.csv') selected = df[df['full_text_file'] != ''][COLS] rows = selected.iterrows() for _, r in rows: if type(r['sha']) is str: for sha in r['sha'].split(';'): sha = sha.strip() meta = {k: r[k] for k in COLS} meta_by_sha[sha] = meta if type(r['pmcid']) is str: pmc = r['pmcid'] meta = {k: r[k] for k in COLS} meta_by_pmc[pmc] = meta print(f'done in {time() - t0:.01f} seconds')
code
32062272/cell_5
[ "text_html_output_10.png", "text_html_output_16.png", "text_html_output_4.png", "text_html_output_6.png", "text_html_output_2.png", "text_html_output_15.png", "text_html_output_5.png", "text_html_output_14.png", "text_html_output_19.png", "text_html_output_9.png", "text_html_output_13.png", "text_html_output_20.png", "text_html_output_1.png", "text_html_output_17.png", "text_html_output_18.png", "text_html_output_12.png", "text_html_output_11.png", "text_html_output_8.png", "text_html_output_3.png", "text_html_output_7.png" ]
!pip install inverness
code
105171993/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) inex = pd.read_csv('../input/incomeexpendmerged/MergedNew.csv') inex.dtypes
code
105171993/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) inex = pd.read_csv('../input/incomeexpendmerged/MergedNew.csv') inex.dtypes leagues5 = inex.groupby(by=['League', 'Year'])['Expenditure', 'Arrivals', 'Income', 'Depatures', 'Balance'].sum()
code
105171993/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
105171993/cell_7
[ "text_html_output_2.png", "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) inex = pd.read_csv('../input/incomeexpendmerged/MergedNew.csv') inex.dtypes leagues5 = inex.groupby(by=['League', 'Year'])['Expenditure', 'Arrivals', 'Income', 'Depatures', 'Balance'].sum() leagues11 = inex[(inex['League'] == 'LaLiga') | (inex['League'] == 'Premier League') | (inex['League'] == 'Serie A') | (inex['League'] == 'Ligue 1') | (inex['League'] == 'Bundesliga')] leagues11
code
105171993/cell_15
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns inex = pd.read_csv('../input/incomeexpendmerged/MergedNew.csv') inex.dtypes leagues5 = inex.groupby(by=['League', 'Year'])['Expenditure', 'Arrivals', 'Income', 'Depatures', 'Balance'].sum() leagues11 = inex[(inex['League'] == 'LaLiga') | (inex['League'] == 'Premier League') | (inex['League'] == 'Serie A') | (inex['League'] == 'Ligue 1') | (inex['League'] == 'Bundesliga')] leagues11 #Number of Players Transfered per league last 12 years f, ax1 = plt.subplots(figsize=(15, 8)) sns.set_style('whitegrid') ax1 = sns.barplot(y = 'Arrivals' , x = 'Year',hue = 'League', data = leagues11,estimator = sum, ci = False, palette = sns.color_palette("husl",5) ) ax1.bar_label(ax1.containers[2], color = 'black', size = 15) #ax1.bar_label(ax1.containers[3], color = 'black', size = 9) ax1.tick_params(labelsize = 15) plt.xticks(rotation = 0, size = 15) plt.yticks(size = 15) plt.xlabel('Year', size = 18) plt.ylabel('Number of Transfers', size = 18) plt.title('Total Number of Players Transfered Per League [2011 to 2022]', size = 20) #Bar Plot import seaborn as sns #palette = sns.color_palette(['darkblue','red','green','yellow','pink']) sns.set(rc={'figure.figsize':(15,8)}) sns.set_style('whitegrid') plot = sns.barplot(y = 'Expenditure' , x = 'Year', data = leagues11, estimator = sum, hue = 'League' ,ci = False, palette = sns.color_palette("husl", 5)) plot1 = plot.get_figure() plt.xlabel('Year', size = 15) plt.ylabel('Transfer Value (Million Euros)', size = 18) plt.title('Money Spent by Europes Top 5 Leagues [2011 to 2022]', size = 20) plot1.savefig("BarPlot.png") sns.set(rc={'figure.figsize': (15, 8)}) sns.set_style('white') sns.set(rc={'figure.figsize': (15, 8)}) sns.set_style('whitegrid') sns.histplot(y='Expenditure', x='League', hue='League', data=leagues5, palette=sns.color_palette(['olive', 'teal', 'darkorchid', 'lightcoral', 'seagreen'])) plt.xlabel('Leagues', size=15) plt.ylabel('Transfer Value(Million Euros)', size=18) plt.xticks(rotation=0, size=15) plt.yticks(size=15) plt.savefig('histplotClubs3.jpg')
code
105171993/cell_16
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns inex = pd.read_csv('../input/incomeexpendmerged/MergedNew.csv') inex.dtypes leagues5 = inex.groupby(by=['League', 'Year'])['Expenditure', 'Arrivals', 'Income', 'Depatures', 'Balance'].sum() leagues11 = inex[(inex['League'] == 'LaLiga') | (inex['League'] == 'Premier League') | (inex['League'] == 'Serie A') | (inex['League'] == 'Ligue 1') | (inex['League'] == 'Bundesliga')] leagues11 #Number of Players Transfered per league last 12 years f, ax1 = plt.subplots(figsize=(15, 8)) sns.set_style('whitegrid') ax1 = sns.barplot(y = 'Arrivals' , x = 'Year',hue = 'League', data = leagues11,estimator = sum, ci = False, palette = sns.color_palette("husl",5) ) ax1.bar_label(ax1.containers[2], color = 'black', size = 15) #ax1.bar_label(ax1.containers[3], color = 'black', size = 9) ax1.tick_params(labelsize = 15) plt.xticks(rotation = 0, size = 15) plt.yticks(size = 15) plt.xlabel('Year', size = 18) plt.ylabel('Number of Transfers', size = 18) plt.title('Total Number of Players Transfered Per League [2011 to 2022]', size = 20) #Bar Plot import seaborn as sns #palette = sns.color_palette(['darkblue','red','green','yellow','pink']) sns.set(rc={'figure.figsize':(15,8)}) sns.set_style('whitegrid') plot = sns.barplot(y = 'Expenditure' , x = 'Year', data = leagues11, estimator = sum, hue = 'League' ,ci = False, palette = sns.color_palette("husl", 5)) plot1 = plot.get_figure() plt.xlabel('Year', size = 15) plt.ylabel('Transfer Value (Million Euros)', size = 18) plt.title('Money Spent by Europes Top 5 Leagues [2011 to 2022]', size = 20) plot1.savefig("BarPlot.png") sns.set(rc={'figure.figsize': (15, 8)}) sns.set_style('white') sns.set(rc={'figure.figsize': (15, 8)}) sns.set_style('whitegrid') plt.xticks(rotation=0, size=15) plt.yticks(size=15) sns.set_style('whitegrid') sns.set(rc={'figure.figsize': (15, 8)}) sns.relplot(y='Expenditure', x='Year', ci=None, hue='League', estimator=sum, palette=sns.color_palette('husl', 5), kind='line', data=leagues11[leagues11.League.isin(['Serie A', 'Premier League', 'LaLiga', 'Bundesliga', 'Ligue 1'])])
code
105171993/cell_17
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns inex = pd.read_csv('../input/incomeexpendmerged/MergedNew.csv') inex.dtypes leagues5 = inex.groupby(by=['League', 'Year'])['Expenditure', 'Arrivals', 'Income', 'Depatures', 'Balance'].sum() leagues11 = inex[(inex['League'] == 'LaLiga') | (inex['League'] == 'Premier League') | (inex['League'] == 'Serie A') | (inex['League'] == 'Ligue 1') | (inex['League'] == 'Bundesliga')] leagues11 #Number of Players Transfered per league last 12 years f, ax1 = plt.subplots(figsize=(15, 8)) sns.set_style('whitegrid') ax1 = sns.barplot(y = 'Arrivals' , x = 'Year',hue = 'League', data = leagues11,estimator = sum, ci = False, palette = sns.color_palette("husl",5) ) ax1.bar_label(ax1.containers[2], color = 'black', size = 15) #ax1.bar_label(ax1.containers[3], color = 'black', size = 9) ax1.tick_params(labelsize = 15) plt.xticks(rotation = 0, size = 15) plt.yticks(size = 15) plt.xlabel('Year', size = 18) plt.ylabel('Number of Transfers', size = 18) plt.title('Total Number of Players Transfered Per League [2011 to 2022]', size = 20) #Bar Plot import seaborn as sns #palette = sns.color_palette(['darkblue','red','green','yellow','pink']) sns.set(rc={'figure.figsize':(15,8)}) sns.set_style('whitegrid') plot = sns.barplot(y = 'Expenditure' , x = 'Year', data = leagues11, estimator = sum, hue = 'League' ,ci = False, palette = sns.color_palette("husl", 5)) plot1 = plot.get_figure() plt.xlabel('Year', size = 15) plt.ylabel('Transfer Value (Million Euros)', size = 18) plt.title('Money Spent by Europes Top 5 Leagues [2011 to 2022]', size = 20) plot1.savefig("BarPlot.png") sns.set(rc={'figure.figsize': (15, 8)}) sns.set_style('white') sns.set(rc={'figure.figsize': (15, 8)}) sns.set_style('whitegrid') plt.xticks(rotation=0, size=15) plt.yticks(size=15) sns.set_style('whitegrid') sns.set(rc={'figure.figsize': (15, 8)}) sns.set(rc={'figure.figsize': (18, 8)}) sns.set_style('whitegrid') sns.boxplot(data=leagues11, y='Expenditure', x='League', palette=sns.color_palette('husl', 5)) plt.xticks(rotation=0, size=15) plt.yticks(size=15) plt.xlabel('Leagues', size=15) plt.ylabel('Transfer Value (Million Euros)', size=18) plt.title('Box Plot : Money Spent by Different Leagues[2011 - 2022]', size=20)
code
105171993/cell_14
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns inex = pd.read_csv('../input/incomeexpendmerged/MergedNew.csv') inex.dtypes leagues5 = inex.groupby(by=['League', 'Year'])['Expenditure', 'Arrivals', 'Income', 'Depatures', 'Balance'].sum() leagues11 = inex[(inex['League'] == 'LaLiga') | (inex['League'] == 'Premier League') | (inex['League'] == 'Serie A') | (inex['League'] == 'Ligue 1') | (inex['League'] == 'Bundesliga')] leagues11 #Number of Players Transfered per league last 12 years f, ax1 = plt.subplots(figsize=(15, 8)) sns.set_style('whitegrid') ax1 = sns.barplot(y = 'Arrivals' , x = 'Year',hue = 'League', data = leagues11,estimator = sum, ci = False, palette = sns.color_palette("husl",5) ) ax1.bar_label(ax1.containers[2], color = 'black', size = 15) #ax1.bar_label(ax1.containers[3], color = 'black', size = 9) ax1.tick_params(labelsize = 15) plt.xticks(rotation = 0, size = 15) plt.yticks(size = 15) plt.xlabel('Year', size = 18) plt.ylabel('Number of Transfers', size = 18) plt.title('Total Number of Players Transfered Per League [2011 to 2022]', size = 20) #Bar Plot import seaborn as sns #palette = sns.color_palette(['darkblue','red','green','yellow','pink']) sns.set(rc={'figure.figsize':(15,8)}) sns.set_style('whitegrid') plot = sns.barplot(y = 'Expenditure' , x = 'Year', data = leagues11, estimator = sum, hue = 'League' ,ci = False, palette = sns.color_palette("husl", 5)) plot1 = plot.get_figure() plt.xlabel('Year', size = 15) plt.ylabel('Transfer Value (Million Euros)', size = 18) plt.title('Money Spent by Europes Top 5 Leagues [2011 to 2022]', size = 20) plot1.savefig("BarPlot.png") sns.set(rc={'figure.figsize': (15, 8)}) sns.set_style('white') sns.lmplot(y='Expenditure', x='Year', ci=None, data=leagues11[leagues11.League.isin(['Serie A', 'Premier League', 'LaLiga', 'Bundesliga', 'Ligue 1'])], hue='League', palette=sns.color_palette('husl', 5), col='League', line_kws={'color': 'black', 'lw': 5}, scatter_kws={'s': 200, 'edgecolor': 'black', 'alpha': 0.4}) plt.savefig('LMPlot.jpg')
code
105171993/cell_10
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns inex = pd.read_csv('../input/incomeexpendmerged/MergedNew.csv') inex.dtypes leagues5 = inex.groupby(by=['League', 'Year'])['Expenditure', 'Arrivals', 'Income', 'Depatures', 'Balance'].sum() leagues11 = inex[(inex['League'] == 'LaLiga') | (inex['League'] == 'Premier League') | (inex['League'] == 'Serie A') | (inex['League'] == 'Ligue 1') | (inex['League'] == 'Bundesliga')] leagues11 f, ax1 = plt.subplots(figsize=(15, 8)) sns.set_style('whitegrid') ax1 = sns.barplot(y='Arrivals', x='Year', hue='League', data=leagues11, estimator=sum, ci=False, palette=sns.color_palette('husl', 5)) ax1.bar_label(ax1.containers[2], color='black', size=15) ax1.tick_params(labelsize=15) plt.xticks(rotation=0, size=15) plt.yticks(size=15) plt.xlabel('Year', size=18) plt.ylabel('Number of Transfers', size=18) plt.title('Total Number of Players Transfered Per League [2011 to 2022]', size=20)
code
105171993/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns inex = pd.read_csv('../input/incomeexpendmerged/MergedNew.csv') inex.dtypes leagues5 = inex.groupby(by=['League', 'Year'])['Expenditure', 'Arrivals', 'Income', 'Depatures', 'Balance'].sum() leagues11 = inex[(inex['League'] == 'LaLiga') | (inex['League'] == 'Premier League') | (inex['League'] == 'Serie A') | (inex['League'] == 'Ligue 1') | (inex['League'] == 'Bundesliga')] leagues11 #Number of Players Transfered per league last 12 years f, ax1 = plt.subplots(figsize=(15, 8)) sns.set_style('whitegrid') ax1 = sns.barplot(y = 'Arrivals' , x = 'Year',hue = 'League', data = leagues11,estimator = sum, ci = False, palette = sns.color_palette("husl",5) ) ax1.bar_label(ax1.containers[2], color = 'black', size = 15) #ax1.bar_label(ax1.containers[3], color = 'black', size = 9) ax1.tick_params(labelsize = 15) plt.xticks(rotation = 0, size = 15) plt.yticks(size = 15) plt.xlabel('Year', size = 18) plt.ylabel('Number of Transfers', size = 18) plt.title('Total Number of Players Transfered Per League [2011 to 2022]', size = 20) import seaborn as sns sns.set(rc={'figure.figsize': (15, 8)}) sns.set_style('whitegrid') plot = sns.barplot(y='Expenditure', x='Year', data=leagues11, estimator=sum, hue='League', ci=False, palette=sns.color_palette('husl', 5)) plot1 = plot.get_figure() plt.xlabel('Year', size=15) plt.ylabel('Transfer Value (Million Euros)', size=18) plt.title('Money Spent by Europes Top 5 Leagues [2011 to 2022]', size=20) plot1.savefig('BarPlot.png')
code
18132466/cell_21
[ "text_plain_output_1.png" ]
from scipy.linalg import eigh from sklearn import decomposition from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn import numpy as np import pandas as pd import matplotlib.pyplot as plt d0 = pd.read_csv('../input/train.csv') l = d0['label'] d = d0.drop('label', axis=1) idx = 150 grid_data = d.iloc[idx].as_matrix().reshape(28, 28) #Pick first 15k data-points to work on for time-efficiency. #Exercise: Perform the same analysis on all of 42K data-point labels = l.head(15000) data = d.head(15000) print("the shape of sample data = ", data.shape) from sklearn.preprocessing import StandardScaler standardized_data = StandardScaler().fit_transform(data) sample_data = standardized_data covar_matrix = np.matmul(sample_data.T, sample_data) from scipy.linalg import eigh values, vectors = eigh(covar_matrix, eigvals=(782, 783)) vectors = vectors.T import matplotlib.pyplot as plt new_coordinates = np.matmul(vectors, sample_data.T) import pandas as pd new_coordinates = np.vstack((new_coordinates, labels)).T dataframe = pd.DataFrame(data=new_coordinates, columns=('1st_principal', '2nd_principal', 'label')) import seaborn as sn sn.FacetGrid(dataframe, hue='label', size=6).map(plt.scatter, '1st_principal', '2nd_principal').add_legend() from sklearn import decomposition pca = decomposition.PCA() pca.n_components = 2 pca_data = pca.fit_transform(sample_data) pca_data = np.vstack((pca_data.T, labels)).T pca_df = pd.DataFrame(data=pca_data, columns=('1st_principal', '2nd_principal', 'label')) sn.FacetGrid(pca_df, hue='label', size=6).map(plt.scatter, '1st_principal', '2nd_principal').add_legend() pca.n_components = 784 pca_data = pca.fit_transform(sample_data) percentage_var_explained = pca.explained_variance_ / np.sum(pca.explained_variance_) cum_var_explained = np.cumsum(percentage_var_explained) plt.figure(1, figsize=(6, 4)) plt.clf() plt.plot(cum_var_explained, linewidth=2) plt.axis('tight') plt.grid() plt.xlabel('n_components') plt.ylabel('Cumulative_explained_variance') plt.show()
code
18132466/cell_13
[ "text_plain_output_1.png" ]
from scipy.linalg import eigh from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt d0 = pd.read_csv('../input/train.csv') l = d0['label'] d = d0.drop('label', axis=1) idx = 150 grid_data = d.iloc[idx].as_matrix().reshape(28, 28) #Pick first 15k data-points to work on for time-efficiency. #Exercise: Perform the same analysis on all of 42K data-point labels = l.head(15000) data = d.head(15000) print("the shape of sample data = ", data.shape) from sklearn.preprocessing import StandardScaler standardized_data = StandardScaler().fit_transform(data) sample_data = standardized_data covar_matrix = np.matmul(sample_data.T, sample_data) from scipy.linalg import eigh values, vectors = eigh(covar_matrix, eigvals=(782, 783)) vectors = vectors.T import matplotlib.pyplot as plt new_coordinates = np.matmul(vectors, sample_data.T) print('resultant new data points shape ', vectors.shape, 'X', sample_data.shape)
code
18132466/cell_9
[ "image_output_1.png" ]
from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt d0 = pd.read_csv('../input/train.csv') l = d0['label'] d = d0.drop('label', axis=1) idx = 150 grid_data = d.iloc[idx].as_matrix().reshape(28, 28) #Pick first 15k data-points to work on for time-efficiency. #Exercise: Perform the same analysis on all of 42K data-point labels = l.head(15000) data = d.head(15000) print("the shape of sample data = ", data.shape) from sklearn.preprocessing import StandardScaler standardized_data = StandardScaler().fit_transform(data) print(standardized_data.shape)
code
18132466/cell_4
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt d0 = pd.read_csv('../input/train.csv') l = d0['label'] d = d0.drop('label', axis=1) print(d.shape) print(l.shape)
code
18132466/cell_2
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
18132466/cell_19
[ "text_plain_output_1.png" ]
from scipy.linalg import eigh from sklearn import decomposition from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn import numpy as np import pandas as pd import matplotlib.pyplot as plt d0 = pd.read_csv('../input/train.csv') l = d0['label'] d = d0.drop('label', axis=1) idx = 150 grid_data = d.iloc[idx].as_matrix().reshape(28, 28) #Pick first 15k data-points to work on for time-efficiency. #Exercise: Perform the same analysis on all of 42K data-point labels = l.head(15000) data = d.head(15000) print("the shape of sample data = ", data.shape) from sklearn.preprocessing import StandardScaler standardized_data = StandardScaler().fit_transform(data) sample_data = standardized_data covar_matrix = np.matmul(sample_data.T, sample_data) from scipy.linalg import eigh values, vectors = eigh(covar_matrix, eigvals=(782, 783)) vectors = vectors.T import matplotlib.pyplot as plt new_coordinates = np.matmul(vectors, sample_data.T) import pandas as pd new_coordinates = np.vstack((new_coordinates, labels)).T dataframe = pd.DataFrame(data=new_coordinates, columns=('1st_principal', '2nd_principal', 'label')) import seaborn as sn sn.FacetGrid(dataframe, hue='label', size=6).map(plt.scatter, '1st_principal', '2nd_principal').add_legend() from sklearn import decomposition pca = decomposition.PCA() pca.n_components = 2 pca_data = pca.fit_transform(sample_data) pca_data = np.vstack((pca_data.T, labels)).T pca_df = pd.DataFrame(data=pca_data, columns=('1st_principal', '2nd_principal', 'label')) sn.FacetGrid(pca_df, hue='label', size=6).map(plt.scatter, '1st_principal', '2nd_principal').add_legend() plt.show()
code
18132466/cell_7
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt d0 = pd.read_csv('../input/train.csv') l = d0['label'] d = d0.drop('label', axis=1) idx = 150 grid_data = d.iloc[idx].as_matrix().reshape(28, 28) labels = l.head(15000) data = d.head(15000) print('the shape of sample data = ', data.shape)
code
18132466/cell_18
[ "text_plain_output_1.png" ]
from scipy.linalg import eigh from sklearn import decomposition from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt d0 = pd.read_csv('../input/train.csv') l = d0['label'] d = d0.drop('label', axis=1) idx = 150 grid_data = d.iloc[idx].as_matrix().reshape(28, 28) #Pick first 15k data-points to work on for time-efficiency. #Exercise: Perform the same analysis on all of 42K data-point labels = l.head(15000) data = d.head(15000) print("the shape of sample data = ", data.shape) from sklearn.preprocessing import StandardScaler standardized_data = StandardScaler().fit_transform(data) sample_data = standardized_data covar_matrix = np.matmul(sample_data.T, sample_data) from scipy.linalg import eigh values, vectors = eigh(covar_matrix, eigvals=(782, 783)) vectors = vectors.T import matplotlib.pyplot as plt new_coordinates = np.matmul(vectors, sample_data.T) from sklearn import decomposition pca = decomposition.PCA() pca.n_components = 2 pca_data = pca.fit_transform(sample_data) print('shape of pca_reduced.shape= ', pca_data.shape)
code
18132466/cell_15
[ "text_plain_output_1.png" ]
from scipy.linalg import eigh from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn import numpy as np import pandas as pd import matplotlib.pyplot as plt d0 = pd.read_csv('../input/train.csv') l = d0['label'] d = d0.drop('label', axis=1) idx = 150 grid_data = d.iloc[idx].as_matrix().reshape(28, 28) #Pick first 15k data-points to work on for time-efficiency. #Exercise: Perform the same analysis on all of 42K data-point labels = l.head(15000) data = d.head(15000) print("the shape of sample data = ", data.shape) from sklearn.preprocessing import StandardScaler standardized_data = StandardScaler().fit_transform(data) sample_data = standardized_data covar_matrix = np.matmul(sample_data.T, sample_data) from scipy.linalg import eigh values, vectors = eigh(covar_matrix, eigvals=(782, 783)) vectors = vectors.T import matplotlib.pyplot as plt new_coordinates = np.matmul(vectors, sample_data.T) import pandas as pd new_coordinates = np.vstack((new_coordinates, labels)).T dataframe = pd.DataFrame(data=new_coordinates, columns=('1st_principal', '2nd_principal', 'label')) import seaborn as sn sn.FacetGrid(dataframe, hue='label', size=6).map(plt.scatter, '1st_principal', '2nd_principal').add_legend() plt.show()
code
18132466/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt d0 = pd.read_csv('../input/train.csv') print(d0.head(5))
code
18132466/cell_14
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from scipy.linalg import eigh from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt d0 = pd.read_csv('../input/train.csv') l = d0['label'] d = d0.drop('label', axis=1) idx = 150 grid_data = d.iloc[idx].as_matrix().reshape(28, 28) #Pick first 15k data-points to work on for time-efficiency. #Exercise: Perform the same analysis on all of 42K data-point labels = l.head(15000) data = d.head(15000) print("the shape of sample data = ", data.shape) from sklearn.preprocessing import StandardScaler standardized_data = StandardScaler().fit_transform(data) sample_data = standardized_data covar_matrix = np.matmul(sample_data.T, sample_data) from scipy.linalg import eigh values, vectors = eigh(covar_matrix, eigvals=(782, 783)) vectors = vectors.T import matplotlib.pyplot as plt new_coordinates = np.matmul(vectors, sample_data.T) import pandas as pd new_coordinates = np.vstack((new_coordinates, labels)).T dataframe = pd.DataFrame(data=new_coordinates, columns=('1st_principal', '2nd_principal', 'label')) print(dataframe.head())
code
18132466/cell_10
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt d0 = pd.read_csv('../input/train.csv') l = d0['label'] d = d0.drop('label', axis=1) idx = 150 grid_data = d.iloc[idx].as_matrix().reshape(28, 28) #Pick first 15k data-points to work on for time-efficiency. #Exercise: Perform the same analysis on all of 42K data-point labels = l.head(15000) data = d.head(15000) print("the shape of sample data = ", data.shape) from sklearn.preprocessing import StandardScaler standardized_data = StandardScaler().fit_transform(data) sample_data = standardized_data covar_matrix = np.matmul(sample_data.T, sample_data) print('The shape of variance matrix = ', covar_matrix.shape)
code
18132466/cell_12
[ "text_plain_output_1.png" ]
from scipy.linalg import eigh from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt d0 = pd.read_csv('../input/train.csv') l = d0['label'] d = d0.drop('label', axis=1) idx = 150 grid_data = d.iloc[idx].as_matrix().reshape(28, 28) #Pick first 15k data-points to work on for time-efficiency. #Exercise: Perform the same analysis on all of 42K data-point labels = l.head(15000) data = d.head(15000) print("the shape of sample data = ", data.shape) from sklearn.preprocessing import StandardScaler standardized_data = StandardScaler().fit_transform(data) sample_data = standardized_data covar_matrix = np.matmul(sample_data.T, sample_data) from scipy.linalg import eigh values, vectors = eigh(covar_matrix, eigvals=(782, 783)) print('Shape of eigen vectors = ', vectors.shape) vectors = vectors.T print('Updated shape of eigen vectors =', vectors.shape)
code
18132466/cell_5
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt d0 = pd.read_csv('../input/train.csv') l = d0['label'] d = d0.drop('label', axis=1) plt.figure(figsize=(7, 7)) idx = 150 grid_data = d.iloc[idx].as_matrix().reshape(28, 28) plt.imshow(grid_data, interpolation='none', cmap='gray') plt.show() print(l[idx])
code
34119712/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/iris/Iris.csv') df.head()
code
34119712/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/iris/Iris.csv') df.describe()
code
34119712/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
34119712/cell_5
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/iris/Iris.csv') df.info()
code
18118019/cell_2
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from keras.applications.vgg19 import VGG19 from keras.layers import GlobalAveragePooling2D, Dropout, Dense, Conv2D from keras.layers import MaxPooling2D, Flatten, Dense from keras.models import Model, Sequential from keras.applications.vgg19 import VGG19 from keras.preprocessing import image from keras.preprocessing.image import img_to_array from keras.applications.vgg19 import preprocess_input from keras.models import Model, Sequential from keras.layers import GlobalAveragePooling2D, Dropout, Dense, Conv2D from keras.layers import MaxPooling2D, Flatten, Dense vgg19 = VGG19(weights='../input/vgg19/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5', include_top=False, input_shape=(224, 224, 3)) for l in vgg19.layers: if l is not None: l.trainable = False x = vgg19.output x = Conv2D(filters=128, kernel_size=(3, 3), activation='relu')(x) x = MaxPooling2D(pool_size=(2, 2))(x) x = Flatten()(x) x = Dense(256, activation='relu')(x) predictions = Dense(5, activation='softmax')(x) model = Model(inputs=vgg19.input, outputs=predictions) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
code
18118019/cell_3
[ "text_plain_output_1.png" ]
from PIL import Image from keras.applications.vgg19 import VGG19 from keras.applications.vgg19 import preprocess_input from keras.layers import GlobalAveragePooling2D, Dropout, Dense, Conv2D from keras.layers import MaxPooling2D, Flatten, Dense from keras.models import Model, Sequential from keras.preprocessing import image from keras.preprocessing.image import img_to_array import numpy as np import pandas as pd import numpy as np from PIL import Image import pandas as pd import os train = pd.read_csv('../input/aptos2019-blindness-detection/train.csv') test = pd.read_csv('../input/aptos2019-blindness-detection/test.csv') submit = pd.read_csv('../input/aptos2019-blindness-detection/sample_submission.csv') diagnosis_encoded = pd.get_dummies(train.diagnosis) from keras.applications.vgg19 import VGG19 from keras.preprocessing import image from keras.preprocessing.image import img_to_array from keras.applications.vgg19 import preprocess_input from keras.models import Model, Sequential from keras.layers import GlobalAveragePooling2D, Dropout, Dense, Conv2D from keras.layers import MaxPooling2D, Flatten, Dense vgg19 = VGG19(weights='../input/vgg19/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5', include_top=False, input_shape=(224, 224, 3)) for l in vgg19.layers: if l is not None: l.trainable = False x = vgg19.output x = Conv2D(filters=128, kernel_size=(3, 3), activation='relu')(x) x = MaxPooling2D(pool_size=(2, 2))(x) x = Flatten()(x) x = Dense(256, activation='relu')(x) predictions = Dense(5, activation='softmax')(x) model = Model(inputs=vgg19.input, outputs=predictions) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) x_train_list = [] y_train_list = [] for index, row in train.iterrows(): my_pic_name = row.id_code im = Image.open('../input/aptos2019-blindness-detection/train_images/' + my_pic_name + '.png') im_224 = im.resize((224, 224), Image.ANTIALIAS) image = img_to_array(im_224) image = preprocess_input(image) x_train_list.append(image) y_train_list.append(diagnosis_encoded.loc[index]) x_train_raw = np.array(x_train_list, np.float32) / 255.0 y_train_raw = np.array(y_train_list, np.uint8) if len(x_train_list) % 200 == 0: model.train_on_batch(x_train_raw, y_train_raw) x_train_list = [] y_train_list = [] print('train on batch ...')
code
2010222/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) total = merged_dataset.isnull().sum().sort_values(ascending=False) percent = (merged_dataset.isnull().sum() / merged_dataset.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) print(missing_data)
code
2010222/cell_11
[ "text_plain_output_1.png" ]
from scipy.stats import skew from sklearn.linear_model import LinearRegression import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) total = merged_dataset.isnull().sum().sort_values(ascending=False) percent = (merged_dataset.isnull().sum() / merged_dataset.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) corrmat = merged_dataset.corr()['SalePrice'] corrmat = corrmat.sort_values(axis=0, ascending=False) corrmat[corrmat > 0.5] merged_dataset.select_dtypes(include=['int64', 'float64']).columns merged_dataset['Alley'].fillna('None', inplace=True) merged_dataset['BsmtQual'].fillna(value='None', inplace=True) merged_dataset['BsmtCond'].fillna(value='None', inplace=True) merged_dataset['BsmtExposure'].fillna(value='None', inplace=True) merged_dataset['BsmtFinType1'].fillna(value='None', inplace=True) merged_dataset['BsmtFinType2'].fillna(value='None', inplace=True) merged_dataset['BsmtFinSF1'].fillna(value=0, inplace=True) merged_dataset['BsmtFinSF2'].fillna(value=0, inplace=True) merged_dataset['BsmtFullBath'].fillna(value=0, inplace=True) merged_dataset['BsmtHalfBath'].fillna(value=0, inplace=True) merged_dataset['BsmtUnfSF'].fillna(value=0, inplace=True) merged_dataset['TotalBsmtSF'].fillna(value=0, inplace=True) merged_dataset['Electrical'].fillna(value='SBrkr', inplace=True) merged_dataset['FireplaceQu'].fillna(value='None', inplace=True) merged_dataset['GarageType'].fillna(value='None', inplace=True) merged_dataset['GarageYrBlt'].fillna(value='None', inplace=True) merged_dataset['GarageFinish'].fillna(value='None', inplace=True) merged_dataset['GarageQual'].fillna(value='None', inplace=True) merged_dataset['GarageCond'].fillna(value='None', inplace=True) merged_dataset['GarageArea'].fillna(value=0, inplace=True) merged_dataset['GarageCars'].fillna(value=0, inplace=True) merged_dataset['PoolQC'].fillna(value='None', inplace=True) merged_dataset['LotFrontage'].fillna(value=0, inplace=True) merged_dataset['MiscFeature'].fillna(value='None', inplace=True) merged_dataset['Exterior1st'].fillna(value='None', inplace=True) merged_dataset['Exterior2nd'].fillna(value='None', inplace=True) merged_dataset['Functional'].fillna(value='None', inplace=True) merged_dataset['KitchenQual'].fillna(value='None', inplace=True) merged_dataset['MSZoning'].fillna(value='None', inplace=True) merged_dataset['SaleType'].fillna(value='None', inplace=True) merged_dataset['Utilities'].fillna(value='None', inplace=True) merged_dataset['MasVnrType'].fillna(value='None', inplace=True) merged_dataset['MasVnrArea'].fillna(value=0, inplace=True) merged_dataset['Fence'].fillna(value='None', inplace=True) merged_dataset['SalePrice'] = np.log1p(merged_dataset['SalePrice']) numeric_feats = merged_dataset.dtypes[merged_dataset.dtypes != 'object'].index skewed_feats = merged_dataset[numeric_feats].apply(lambda x: skew(x.dropna())) skewed_feats = skewed_feats[skewed_feats > 0.75] skewed_feats = skewed_feats.index merged_dataset[skewed_feats] = np.log1p(merged_dataset[skewed_feats]) new_train = merged_dataset[:1460] X_train = new_train.drop('SalePrice', axis=1) y_train = new_train['SalePrice'] new_test = merged_dataset[1460:] X_test = new_test.drop('SalePrice', axis=1) lr = LinearRegression().fit(X_train, y_train) prediction = np.expm1(lr.predict(X_test)) print(prediction)
code
2010222/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from scipy.stats import skew from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.linear_model import LinearRegression from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) train_data = pd.read_csv('../input/train.csv') test_data = pd.read_csv('../input/test.csv') merged_dataset = pd.concat([train_data, test_data], axis=0)
code
2010222/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) total = merged_dataset.isnull().sum().sort_values(ascending=False) percent = (merged_dataset.isnull().sum() / merged_dataset.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) corrmat = merged_dataset.corr()['SalePrice'] corrmat = corrmat.sort_values(axis=0, ascending=False) corrmat[corrmat > 0.5]
code