path
stringlengths
13
17
screenshot_names
listlengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
72121245/cell_8
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') train.dtypes cat_cols = [col for col in train.columns if train[col].dtype == 'object'] cat_cols cont_cols = [col for col in train.columns if train[col].dtype != 'object' and col not in ('id', 'target')] cont_cols train['target'].hist()
code
72121245/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') test.isna().sum()[test.isna().sum() > 0]
code
72121245/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') print(f'Train Shape: {train.shape}\nTest Shape: {test.shape}')
code
72121245/cell_24
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') sample_submission.head()
code
72121245/cell_14
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') train.dtypes cat_cols = [col for col in train.columns if train[col].dtype == 'object'] cat_cols cont_cols = [col for col in train.columns if train[col].dtype != 'object' and col not in ('id', 'target')] cont_cols mean = train['target'].mean() std = train['target'].std() cut_off = std * 3 lower, upper = (mean - cut_off, mean + cut_off) outliers = train[(train['target'] > upper) | (train['target'] < lower)] train.drop(outliers.index.to_list(), inplace=True) train.shape q25, q75 = (np.percentile(train['target'], 25), np.percentile(train['target'], 75)) iqr = q75 - q25 cut_off = iqr * 1.5 lower, upper = (q25 - cut_off, q75 + cut_off) cut_off = iqr * 1.5 lower, upper = (q25 - cut_off, q75 + cut_off) outliers = train[(train['target'] > upper) | (train['target'] < lower)] train.drop(outliers.index.to_list(), inplace=True) train.shape train.isna().sum()[train.isna().sum() > 0]
code
72121245/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') train.dtypes cat_cols = [col for col in train.columns if train[col].dtype == 'object'] cat_cols cont_cols = [col for col in train.columns if train[col].dtype != 'object' and col not in ('id', 'target')] cont_cols mean = train['target'].mean() std = train['target'].std() cut_off = std * 3 lower, upper = (mean - cut_off, mean + cut_off) outliers = train[(train['target'] > upper) | (train['target'] < lower)] train.drop(outliers.index.to_list(), inplace=True) train.shape
code
72121245/cell_12
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') train.dtypes cat_cols = [col for col in train.columns if train[col].dtype == 'object'] cat_cols cont_cols = [col for col in train.columns if train[col].dtype != 'object' and col not in ('id', 'target')] cont_cols mean = train['target'].mean() std = train['target'].std() cut_off = std * 3 lower, upper = (mean - cut_off, mean + cut_off) outliers = train[(train['target'] > upper) | (train['target'] < lower)] train.drop(outliers.index.to_list(), inplace=True) train.shape q25, q75 = (np.percentile(train['target'], 25), np.percentile(train['target'], 75)) iqr = q75 - q25 cut_off = iqr * 1.5 lower, upper = (q25 - cut_off, q75 + cut_off) cut_off = iqr * 1.5 lower, upper = (q25 - cut_off, q75 + cut_off) outliers = train[(train['target'] > upper) | (train['target'] < lower)] train.drop(outliers.index.to_list(), inplace=True) train.shape train['target'].hist()
code
72121245/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') sample_submission = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') train.dtypes cat_cols = [col for col in train.columns if train[col].dtype == 'object'] cat_cols
code
1002958/cell_21
[ "image_output_1.png" ]
import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) var = 'GrLivArea' data = pd.concat([train['SalePrice'], train[var]], axis=1) #box plot overallqual/saleprice var = 'OverallQual' data = pd.concat([train['SalePrice'], train[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y="SalePrice", data=data) fig.axis(ymin=0, ymax=800000); matplotlib.rcParams['figure.figsize'] = (12.0, 6.0) prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])}) var = 'GrLivArea' data = pd.concat([train['SalePrice'], train[var]], axis=1) data.plot.scatter(x=var, y='SalePrice', ylim=(0, 800000))
code
1002958/cell_13
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) var = 'GrLivArea' data = pd.concat([train['SalePrice'], train[var]], axis=1) data.plot.scatter(x=var, y='SalePrice', ylim=(0, 800000))
code
1002958/cell_34
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) var = 'GrLivArea' data = pd.concat([train['SalePrice'], train[var]], axis=1) #box plot overallqual/saleprice var = 'OverallQual' data = pd.concat([train['SalePrice'], train[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y="SalePrice", data=data) fig.axis(ymin=0, ymax=800000); train.sort_values(by='GrLivArea', ascending=False)[:2] train = train.drop(train[train['Id'] == 1299].index) train = train.drop(train[train['Id'] == 524].index) corr = train.select_dtypes(include=['float64', 'int64']).iloc[:, 1:].corr() plt.figure(figsize=(12, 12)) sns.heatmap(corr, vmax=1, square=True)
code
1002958/cell_23
[ "image_output_1.png" ]
import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) var = 'GrLivArea' data = pd.concat([train['SalePrice'], train[var]], axis=1) #box plot overallqual/saleprice var = 'OverallQual' data = pd.concat([train['SalePrice'], train[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y="SalePrice", data=data) fig.axis(ymin=0, ymax=800000); matplotlib.rcParams['figure.figsize'] = (12.0, 6.0) prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])}) var = 'GrLivArea' data = pd.concat([train['SalePrice'], train[var]], axis=1) train.sort_values(by='GrLivArea', ascending=False)[:2] train = train.drop(train[train['Id'] == 1299].index) train = train.drop(train[train['Id'] == 524].index) var = 'TotalBsmtSF' data = pd.concat([train['SalePrice'], train[var]], axis=1) data.plot.scatter(x=var, y='SalePrice', ylim=(0, 800000))
code
1002958/cell_33
[ "application_vnd.jupyter.stderr_output_1.png" ]
cor_dict = corr['SalePrice'].to_dict() del cor_dict['SalePrice'] print('List the numerical features decendingly by their correlation with Sale Price:\n') for ele in sorted(cor_dict.items(), key=lambda x: -abs(x[1])): print('{0}: \t{1}'.format(*ele))
code
1002958/cell_39
[ "image_output_1.png" ]
from sklearn.model_selection import cross_val_score import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) var = 'GrLivArea' data = pd.concat([train['SalePrice'], train[var]], axis=1) #box plot overallqual/saleprice var = 'OverallQual' data = pd.concat([train['SalePrice'], train[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y="SalePrice", data=data) fig.axis(ymin=0, ymax=800000); matplotlib.rcParams['figure.figsize'] = (12.0, 6.0) prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])}) saleprice_scaled = StandardScaler().fit_transform(train['SalePrice'][:, np.newaxis]) low_range = saleprice_scaled[saleprice_scaled[:, 0].argsort()][:10] high_range = saleprice_scaled[saleprice_scaled[:, 0].argsort()][-10:] train.sort_values(by='GrLivArea', ascending=False)[:2] train = train.drop(train[train['Id'] == 1299].index) train = train.drop(train[train['Id'] == 524].index) from sklearn.linear_model import LinearRegression, Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV from sklearn.model_selection import cross_val_score def rmse_cv(model): rmse = np.sqrt(-cross_val_score(model, X_train, y, scoring='neg_mean_squared_error', cv=5)) return rmse corr = train.select_dtypes(include=['float64', 'int64']).iloc[:, 1:].corr() xt = plt.xticks(rotation=45) #saleprice correlation matrix k = 10 #number of variables for heatmap cols = corr.nlargest(k, 'SalePrice')['SalePrice'].index cm = np.corrcoef(train[cols].values.T) sns.set(font_scale=1.25) hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) plt.show() sns.set() cols = ['SalePrice', 'OverallQual', 'GrLivArea', 'GarageCars', 'TotalBsmtSF', 'FullBath', 'YearBuilt'] sns.pairplot(train[cols], size=2.5) plt.show()
code
1002958/cell_41
[ "text_html_output_1.png" ]
import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) var = 'GrLivArea' data = pd.concat([train['SalePrice'], train[var]], axis=1) #box plot overallqual/saleprice var = 'OverallQual' data = pd.concat([train['SalePrice'], train[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y="SalePrice", data=data) fig.axis(ymin=0, ymax=800000); matplotlib.rcParams['figure.figsize'] = (12.0, 6.0) prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])}) var = 'GrLivArea' data = pd.concat([train['SalePrice'], train[var]], axis=1) train.sort_values(by='GrLivArea', ascending=False)[:2] train = train.drop(train[train['Id'] == 1299].index) train = train.drop(train[train['Id'] == 524].index) var = 'TotalBsmtSF' data = pd.concat([train['SalePrice'], train[var]], axis=1) corr = train.select_dtypes(include=['float64', 'int64']).iloc[:, 1:].corr() total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) missing_data.head(20)
code
1002958/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) train['SalePrice'].describe()
code
1002958/cell_19
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) var = 'GrLivArea' data = pd.concat([train['SalePrice'], train[var]], axis=1) #box plot overallqual/saleprice var = 'OverallQual' data = pd.concat([train['SalePrice'], train[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y="SalePrice", data=data) fig.axis(ymin=0, ymax=800000); matplotlib.rcParams['figure.figsize'] = (12.0, 6.0) prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])}) saleprice_scaled = StandardScaler().fit_transform(train['SalePrice'][:, np.newaxis]) low_range = saleprice_scaled[saleprice_scaled[:, 0].argsort()][:10] high_range = saleprice_scaled[saleprice_scaled[:, 0].argsort()][-10:] print('outer range (low) of the distribution:') print(low_range) print('\nouter range (high) of the distribution:') print(high_range)
code
1002958/cell_15
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) train.head()
code
1002958/cell_16
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) var = 'GrLivArea' data = pd.concat([train['SalePrice'], train[var]], axis=1) var = 'OverallQual' data = pd.concat([train['SalePrice'], train[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y='SalePrice', data=data) fig.axis(ymin=0, ymax=800000)
code
1002958/cell_38
[ "image_output_1.png" ]
from sklearn.model_selection import cross_val_score import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) var = 'GrLivArea' data = pd.concat([train['SalePrice'], train[var]], axis=1) #box plot overallqual/saleprice var = 'OverallQual' data = pd.concat([train['SalePrice'], train[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y="SalePrice", data=data) fig.axis(ymin=0, ymax=800000); matplotlib.rcParams['figure.figsize'] = (12.0, 6.0) prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])}) saleprice_scaled = StandardScaler().fit_transform(train['SalePrice'][:, np.newaxis]) low_range = saleprice_scaled[saleprice_scaled[:, 0].argsort()][:10] high_range = saleprice_scaled[saleprice_scaled[:, 0].argsort()][-10:] train.sort_values(by='GrLivArea', ascending=False)[:2] train = train.drop(train[train['Id'] == 1299].index) train = train.drop(train[train['Id'] == 524].index) from sklearn.linear_model import LinearRegression, Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV from sklearn.model_selection import cross_val_score def rmse_cv(model): rmse = np.sqrt(-cross_val_score(model, X_train, y, scoring='neg_mean_squared_error', cv=5)) return rmse corr = train.select_dtypes(include=['float64', 'int64']).iloc[:, 1:].corr() xt = plt.xticks(rotation=45) k = 10 cols = corr.nlargest(k, 'SalePrice')['SalePrice'].index cm = np.corrcoef(train[cols].values.T) sns.set(font_scale=1.25) hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) plt.show()
code
1002958/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) var = 'GrLivArea' data = pd.concat([train['SalePrice'], train[var]], axis=1) #box plot overallqual/saleprice var = 'OverallQual' data = pd.concat([train['SalePrice'], train[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y="SalePrice", data=data) fig.axis(ymin=0, ymax=800000); matplotlib.rcParams['figure.figsize'] = (12.0, 6.0) prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])}) prices.hist()
code
1002958/cell_35
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) var = 'GrLivArea' data = pd.concat([train['SalePrice'], train[var]], axis=1) #box plot overallqual/saleprice var = 'OverallQual' data = pd.concat([train['SalePrice'], train[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y="SalePrice", data=data) fig.axis(ymin=0, ymax=800000); train.sort_values(by='GrLivArea', ascending=False)[:2] train = train.drop(train[train['Id'] == 1299].index) train = train.drop(train[train['Id'] == 524].index) corr = train.select_dtypes(include=['float64', 'int64']).iloc[:, 1:].corr() plt.figure(figsize=(12, 6)) sns.countplot(x='Neighborhood', data=train) xt = plt.xticks(rotation=45)
code
1002958/cell_14
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) sns.distplot(train['SalePrice'])
code
1002958/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) train['SalePrice'].describe()
code
1002958/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) print('Skewness: %f' % train['SalePrice'].skew()) print('Kurtosis: %f' % train['SalePrice'].kurt())
code
1002958/cell_36
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition'])) var = 'GrLivArea' data = pd.concat([train['SalePrice'], train[var]], axis=1) #box plot overallqual/saleprice var = 'OverallQual' data = pd.concat([train['SalePrice'], train[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y="SalePrice", data=data) fig.axis(ymin=0, ymax=800000); train.sort_values(by='GrLivArea', ascending=False)[:2] train = train.drop(train[train['Id'] == 1299].index) train = train.drop(train[train['Id'] == 524].index) corr = train.select_dtypes(include=['float64', 'int64']).iloc[:, 1:].corr() xt = plt.xticks(rotation=45) sns.regplot(x='OverallQual', y='SalePrice', data=train, color='green')
code
121148265/cell_9
[ "text_plain_output_1.png" ]
from transformers import BartTokenizer, BartModel, BartForConditionalGeneration from transformers import PegasusForConditionalGeneration, PegasusTokenizer tokenizer_model_1 = PegasusTokenizer.from_pretrained('google/pegasus-large') loaded_model_1 = PegasusForConditionalGeneration.from_pretrained('google/pegasus-large') tokenizer_model_2 = BartTokenizer.from_pretrained('facebook/bart-large') model = BartForConditionalGeneration.from_pretrained('facebook/bart-large') def generate_summary_google(text): pred = [] r = len(text) for i in range(r): texts = text[i] tokens = tokenizer_model_1(texts, truncation=True, padding='longest', return_tensors='pt') summary = loaded_model_1.generate(**tokens) prediction = tokenizer_model_1.decode(summary[0]) prediction = tokenizer_model_1.decode(summary[0]).replace('<pad>', '').replace('</s>', '') pred.append(prediction) return pred def generate_summary_facebook(text): pred = [] for i in range(len(text)): texts = text[i] tokens = tokenizer_model_2.batch_encode_plus([texts], return_tensors='pt') summary = model.generate(tokens['input_ids'], max_length=100, early_stopping=True) prediction = tokenizer_model_2.decode(summary[0], skip_special_tokens=True) pred.append(prediction) return pred if __name__ == '__main__': user_input = input('Enter text to summarize: ') text = [user_input] summaries_google = generate_summary_google(text) summaries_facebook = generate_summary_facebook(text) print('Google Pegasus Summary:') print(summaries_google[0])
code
121148265/cell_7
[ "text_plain_output_1.png" ]
from transformers import BartTokenizer, BartModel, BartForConditionalGeneration from transformers import PegasusForConditionalGeneration, PegasusTokenizer tokenizer_model_1 = PegasusTokenizer.from_pretrained('google/pegasus-large') loaded_model_1 = PegasusForConditionalGeneration.from_pretrained('google/pegasus-large') tokenizer_model_2 = BartTokenizer.from_pretrained('facebook/bart-large') model = BartForConditionalGeneration.from_pretrained('facebook/bart-large') def generate_summary_google(text): pred = [] r = len(text) for i in range(r): texts = text[i] tokens = tokenizer_model_1(texts, truncation=True, padding='longest', return_tensors='pt') summary = loaded_model_1.generate(**tokens) prediction = tokenizer_model_1.decode(summary[0]) prediction = tokenizer_model_1.decode(summary[0]).replace('<pad>', '').replace('</s>', '') pred.append(prediction) return pred def generate_summary_facebook(text): pred = [] for i in range(len(text)): texts = text[i] tokens = tokenizer_model_2.batch_encode_plus([texts], return_tensors='pt') summary = model.generate(tokens['input_ids'], max_length=100, early_stopping=True) prediction = tokenizer_model_2.decode(summary[0], skip_special_tokens=True) pred.append(prediction) return pred if __name__ == '__main__': user_input = input('Enter text to summarize: ') text = [user_input] summaries_google = generate_summary_google(text) summaries_facebook = generate_summary_facebook(text)
code
121148265/cell_8
[ "text_plain_output_1.png" ]
from transformers import BartTokenizer, BartModel, BartForConditionalGeneration from transformers import PegasusForConditionalGeneration, PegasusTokenizer tokenizer_model_1 = PegasusTokenizer.from_pretrained('google/pegasus-large') loaded_model_1 = PegasusForConditionalGeneration.from_pretrained('google/pegasus-large') tokenizer_model_2 = BartTokenizer.from_pretrained('facebook/bart-large') model = BartForConditionalGeneration.from_pretrained('facebook/bart-large') def generate_summary_google(text): pred = [] r = len(text) for i in range(r): texts = text[i] tokens = tokenizer_model_1(texts, truncation=True, padding='longest', return_tensors='pt') summary = loaded_model_1.generate(**tokens) prediction = tokenizer_model_1.decode(summary[0]) prediction = tokenizer_model_1.decode(summary[0]).replace('<pad>', '').replace('</s>', '') pred.append(prediction) return pred def generate_summary_facebook(text): pred = [] for i in range(len(text)): texts = text[i] tokens = tokenizer_model_2.batch_encode_plus([texts], return_tensors='pt') summary = model.generate(tokens['input_ids'], max_length=100, early_stopping=True) prediction = tokenizer_model_2.decode(summary[0], skip_special_tokens=True) pred.append(prediction) return pred if __name__ == '__main__': user_input = input('Enter text to summarize: ') text = [user_input] summaries_google = generate_summary_google(text) summaries_facebook = generate_summary_facebook(text) text
code
121148265/cell_3
[ "text_plain_output_5.png", "text_plain_output_4.png", "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from transformers import PegasusForConditionalGeneration, PegasusTokenizer tokenizer_model_1 = PegasusTokenizer.from_pretrained('google/pegasus-large') loaded_model_1 = PegasusForConditionalGeneration.from_pretrained('google/pegasus-large')
code
121148265/cell_10
[ "text_plain_output_5.png", "text_plain_output_4.png", "text_plain_output_6.png", "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from transformers import BartTokenizer, BartModel, BartForConditionalGeneration from transformers import PegasusForConditionalGeneration, PegasusTokenizer tokenizer_model_1 = PegasusTokenizer.from_pretrained('google/pegasus-large') loaded_model_1 = PegasusForConditionalGeneration.from_pretrained('google/pegasus-large') tokenizer_model_2 = BartTokenizer.from_pretrained('facebook/bart-large') model = BartForConditionalGeneration.from_pretrained('facebook/bart-large') def generate_summary_google(text): pred = [] r = len(text) for i in range(r): texts = text[i] tokens = tokenizer_model_1(texts, truncation=True, padding='longest', return_tensors='pt') summary = loaded_model_1.generate(**tokens) prediction = tokenizer_model_1.decode(summary[0]) prediction = tokenizer_model_1.decode(summary[0]).replace('<pad>', '').replace('</s>', '') pred.append(prediction) return pred def generate_summary_facebook(text): pred = [] for i in range(len(text)): texts = text[i] tokens = tokenizer_model_2.batch_encode_plus([texts], return_tensors='pt') summary = model.generate(tokens['input_ids'], max_length=100, early_stopping=True) prediction = tokenizer_model_2.decode(summary[0], skip_special_tokens=True) pred.append(prediction) return pred if __name__ == '__main__': user_input = input('Enter text to summarize: ') text = [user_input] summaries_google = generate_summary_google(text) summaries_facebook = generate_summary_facebook(text) print('\nFacebook BART Summary:') print(summaries_facebook[0])
code
129035267/cell_6
[ "image_output_1.png" ]
import cv2 import matplotlib.pyplot as plt path = '/kaggle/input/sports-image-dataset/data/badminton/00000052.jpg' img = cv2.imread(path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) plt.axis('off') gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) plt.imshow(gray) plt.axis('off') plt.show()
code
129035267/cell_7
[ "image_output_1.png" ]
import cv2 import matplotlib.pyplot as plt path = '/kaggle/input/sports-image-dataset/data/badminton/00000052.jpg' img = cv2.imread(path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) plt.axis('off') gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) plt.axis('off') canny = cv2.Canny(gray, 100, 200) plt.imshow(canny) plt.axis('off') plt.show()
code
129035267/cell_8
[ "image_output_1.png" ]
import cv2 import matplotlib.pyplot as plt import numpy as np path = '/kaggle/input/sports-image-dataset/data/badminton/00000052.jpg' img = cv2.imread(path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) plt.axis('off') gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) plt.axis('off') canny = cv2.Canny(gray, 100, 200) plt.axis('off') edges = cv2.Canny(gray, 100, 200, apertureSize=3) lines = cv2.HoughLines(edges, rho=1, theta=np.pi / 180, threshold=150) for line in lines: rho, theta = line[0] a = np.cos(theta) b = np.sin(theta) x0 = a * rho y0 = b * rho x1 = int(x0 + 1000 * -b) y1 = int(y0 + 1000 * a) x2 = int(x0 - 1000 * -b) y2 = int(y0 - 1000 * a) cv2.line(img, (x1, y1), (x2, y2), (255, 255, 0), 3) plt.imshow(img) plt.axis('off') plt.show()
code
129035267/cell_5
[ "image_output_1.png" ]
import cv2 import matplotlib.pyplot as plt path = '/kaggle/input/sports-image-dataset/data/badminton/00000052.jpg' img = cv2.imread(path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) plt.imshow(img) plt.axis('off') plt.show()
code
129023258/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd datasets_to_combine = [('/kaggle/input/uci-bag-of-words/docword.enron.txt', '/kaggle/input/uci-bag-of-words/vocab.enron.txt'), ('/kaggle/input/uci-bag-of-words/docword.kos.txt', '/kaggle/input/uci-bag-of-words/vocab.kos.txt'), ('/kaggle/input/uci-bag-of-words/docword.nips.txt', '/kaggle/input/uci-bag-of-words/vocab.nips.txt')] dataset_column_names = ['documentID', 'wordID', 'count'] merged_docwords = [] for dataset_tuple in datasets_to_combine: docword = pd.read_csv(dataset_tuple[0], delim_whitespace=True, header=None, skiprows=3, names=dataset_column_names) vocab = pd.read_csv(dataset_tuple[1], delim_whitespace=True, header=None, names=['word']) merged = pd.merge(docword, vocab.reset_index(), how='inner', left_on='wordID', right_on='index').drop('index', axis=1) merged_docwords.append(merged) corpus = pd.concat(merged_docwords, axis=0, ignore_index=True) corpus_sampled = corpus.sample(150000, random_state=7) wdm = pd.pivot_table(corpus_sampled, values='count', index='word', columns='documentID', fill_value=0) wdm.head()
code
129023258/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd datasets_to_combine = [('/kaggle/input/uci-bag-of-words/docword.enron.txt', '/kaggle/input/uci-bag-of-words/vocab.enron.txt'), ('/kaggle/input/uci-bag-of-words/docword.kos.txt', '/kaggle/input/uci-bag-of-words/vocab.kos.txt'), ('/kaggle/input/uci-bag-of-words/docword.nips.txt', '/kaggle/input/uci-bag-of-words/vocab.nips.txt')] dataset_column_names = ['documentID', 'wordID', 'count'] merged_docwords = [] for dataset_tuple in datasets_to_combine: docword = pd.read_csv(dataset_tuple[0], delim_whitespace=True, header=None, skiprows=3, names=dataset_column_names) vocab = pd.read_csv(dataset_tuple[1], delim_whitespace=True, header=None, names=['word']) merged = pd.merge(docword, vocab.reset_index(), how='inner', left_on='wordID', right_on='index').drop('index', axis=1) merged_docwords.append(merged) corpus = pd.concat(merged_docwords, axis=0, ignore_index=True) print(len(corpus)) corpus_sampled = corpus.sample(150000, random_state=7) print(len(corpus_sampled))
code
129023258/cell_19
[ "text_html_output_1.png" ]
from sklearn.decomposition import TruncatedSVD import pandas as pd datasets_to_combine = [('/kaggle/input/uci-bag-of-words/docword.enron.txt', '/kaggle/input/uci-bag-of-words/vocab.enron.txt'), ('/kaggle/input/uci-bag-of-words/docword.kos.txt', '/kaggle/input/uci-bag-of-words/vocab.kos.txt'), ('/kaggle/input/uci-bag-of-words/docword.nips.txt', '/kaggle/input/uci-bag-of-words/vocab.nips.txt')] dataset_column_names = ['documentID', 'wordID', 'count'] merged_docwords = [] for dataset_tuple in datasets_to_combine: docword = pd.read_csv(dataset_tuple[0], delim_whitespace=True, header=None, skiprows=3, names=dataset_column_names) vocab = pd.read_csv(dataset_tuple[1], delim_whitespace=True, header=None, names=['word']) merged = pd.merge(docword, vocab.reset_index(), how='inner', left_on='wordID', right_on='index').drop('index', axis=1) merged_docwords.append(merged) corpus = pd.concat(merged_docwords, axis=0, ignore_index=True) corpus_sampled = corpus.sample(150000, random_state=7) wdm = pd.pivot_table(corpus_sampled, values='count', index='word', columns='documentID', fill_value=0) svd = TruncatedSVD(n_components=100, random_state=7) svd.fit(wdm) wdm_transformed = pd.DataFrame(svd.transform()) wdm_transformed
code
129023258/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd datasets_to_combine = [('/kaggle/input/uci-bag-of-words/docword.enron.txt', '/kaggle/input/uci-bag-of-words/vocab.enron.txt'), ('/kaggle/input/uci-bag-of-words/docword.kos.txt', '/kaggle/input/uci-bag-of-words/vocab.kos.txt'), ('/kaggle/input/uci-bag-of-words/docword.nips.txt', '/kaggle/input/uci-bag-of-words/vocab.nips.txt')] dataset_column_names = ['documentID', 'wordID', 'count'] merged_docwords = [] for dataset_tuple in datasets_to_combine: docword = pd.read_csv(dataset_tuple[0], delim_whitespace=True, header=None, skiprows=3, names=dataset_column_names) vocab = pd.read_csv(dataset_tuple[1], delim_whitespace=True, header=None, names=['word']) merged = pd.merge(docword, vocab.reset_index(), how='inner', left_on='wordID', right_on='index').drop('index', axis=1) merged_docwords.append(merged) corpus = pd.concat(merged_docwords, axis=0, ignore_index=True) print(corpus[:30]) print(corpus[-30:]) print(corpus.info())
code
129023258/cell_8
[ "text_html_output_1.png" ]
import pandas as pd datasets_to_combine = [('/kaggle/input/uci-bag-of-words/docword.enron.txt', '/kaggle/input/uci-bag-of-words/vocab.enron.txt'), ('/kaggle/input/uci-bag-of-words/docword.kos.txt', '/kaggle/input/uci-bag-of-words/vocab.kos.txt'), ('/kaggle/input/uci-bag-of-words/docword.nips.txt', '/kaggle/input/uci-bag-of-words/vocab.nips.txt')] dataset_column_names = ['documentID', 'wordID', 'count'] merged_docwords = [] for dataset_tuple in datasets_to_combine: docword = pd.read_csv(dataset_tuple[0], delim_whitespace=True, header=None, skiprows=3, names=dataset_column_names) vocab = pd.read_csv(dataset_tuple[1], delim_whitespace=True, header=None, names=['word']) merged = pd.merge(docword, vocab.reset_index(), how='inner', left_on='wordID', right_on='index').drop('index', axis=1) merged_docwords.append(merged) corpus = pd.concat(merged_docwords, axis=0, ignore_index=True) print(len(corpus.word.unique()))
code
129023258/cell_15
[ "text_plain_output_1.png" ]
from sklearn.decomposition import TruncatedSVD import pandas as pd datasets_to_combine = [('/kaggle/input/uci-bag-of-words/docword.enron.txt', '/kaggle/input/uci-bag-of-words/vocab.enron.txt'), ('/kaggle/input/uci-bag-of-words/docword.kos.txt', '/kaggle/input/uci-bag-of-words/vocab.kos.txt'), ('/kaggle/input/uci-bag-of-words/docword.nips.txt', '/kaggle/input/uci-bag-of-words/vocab.nips.txt')] dataset_column_names = ['documentID', 'wordID', 'count'] merged_docwords = [] for dataset_tuple in datasets_to_combine: docword = pd.read_csv(dataset_tuple[0], delim_whitespace=True, header=None, skiprows=3, names=dataset_column_names) vocab = pd.read_csv(dataset_tuple[1], delim_whitespace=True, header=None, names=['word']) merged = pd.merge(docword, vocab.reset_index(), how='inner', left_on='wordID', right_on='index').drop('index', axis=1) merged_docwords.append(merged) corpus = pd.concat(merged_docwords, axis=0, ignore_index=True) corpus_sampled = corpus.sample(150000, random_state=7) wdm = pd.pivot_table(corpus_sampled, values='count', index='word', columns='documentID', fill_value=0) svd = TruncatedSVD(n_components=100, random_state=7) svd.fit(wdm)
code
129023258/cell_16
[ "text_plain_output_1.png" ]
from sklearn.decomposition import TruncatedSVD import pandas as pd datasets_to_combine = [('/kaggle/input/uci-bag-of-words/docword.enron.txt', '/kaggle/input/uci-bag-of-words/vocab.enron.txt'), ('/kaggle/input/uci-bag-of-words/docword.kos.txt', '/kaggle/input/uci-bag-of-words/vocab.kos.txt'), ('/kaggle/input/uci-bag-of-words/docword.nips.txt', '/kaggle/input/uci-bag-of-words/vocab.nips.txt')] dataset_column_names = ['documentID', 'wordID', 'count'] merged_docwords = [] for dataset_tuple in datasets_to_combine: docword = pd.read_csv(dataset_tuple[0], delim_whitespace=True, header=None, skiprows=3, names=dataset_column_names) vocab = pd.read_csv(dataset_tuple[1], delim_whitespace=True, header=None, names=['word']) merged = pd.merge(docword, vocab.reset_index(), how='inner', left_on='wordID', right_on='index').drop('index', axis=1) merged_docwords.append(merged) corpus = pd.concat(merged_docwords, axis=0, ignore_index=True) corpus_sampled = corpus.sample(150000, random_state=7) wdm = pd.pivot_table(corpus_sampled, values='count', index='word', columns='documentID', fill_value=0) svd = TruncatedSVD(n_components=100, random_state=7) svd.fit(wdm) print(svd.explained_variance_ratio_)
code
129023258/cell_17
[ "text_html_output_1.png" ]
from sklearn.decomposition import TruncatedSVD import pandas as pd datasets_to_combine = [('/kaggle/input/uci-bag-of-words/docword.enron.txt', '/kaggle/input/uci-bag-of-words/vocab.enron.txt'), ('/kaggle/input/uci-bag-of-words/docword.kos.txt', '/kaggle/input/uci-bag-of-words/vocab.kos.txt'), ('/kaggle/input/uci-bag-of-words/docword.nips.txt', '/kaggle/input/uci-bag-of-words/vocab.nips.txt')] dataset_column_names = ['documentID', 'wordID', 'count'] merged_docwords = [] for dataset_tuple in datasets_to_combine: docword = pd.read_csv(dataset_tuple[0], delim_whitespace=True, header=None, skiprows=3, names=dataset_column_names) vocab = pd.read_csv(dataset_tuple[1], delim_whitespace=True, header=None, names=['word']) merged = pd.merge(docword, vocab.reset_index(), how='inner', left_on='wordID', right_on='index').drop('index', axis=1) merged_docwords.append(merged) corpus = pd.concat(merged_docwords, axis=0, ignore_index=True) corpus_sampled = corpus.sample(150000, random_state=7) wdm = pd.pivot_table(corpus_sampled, values='count', index='word', columns='documentID', fill_value=0) svd = TruncatedSVD(n_components=100, random_state=7) svd.fit(wdm) print(svd.singular_values_)
code
129023258/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd datasets_to_combine = [('/kaggle/input/uci-bag-of-words/docword.enron.txt', '/kaggle/input/uci-bag-of-words/vocab.enron.txt'), ('/kaggle/input/uci-bag-of-words/docword.kos.txt', '/kaggle/input/uci-bag-of-words/vocab.kos.txt'), ('/kaggle/input/uci-bag-of-words/docword.nips.txt', '/kaggle/input/uci-bag-of-words/vocab.nips.txt')] dataset_column_names = ['documentID', 'wordID', 'count'] merged_docwords = [] for dataset_tuple in datasets_to_combine: docword = pd.read_csv(dataset_tuple[0], delim_whitespace=True, header=None, skiprows=3, names=dataset_column_names) vocab = pd.read_csv(dataset_tuple[1], delim_whitespace=True, header=None, names=['word']) merged = pd.merge(docword, vocab.reset_index(), how='inner', left_on='wordID', right_on='index').drop('index', axis=1) merged_docwords.append(merged) print(merged_docwords)
code
106205317/cell_21
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns mapdata1 = pd.read_csv('../input/surviving-mars-maps/MapData-Evans-GP-Flatten.csv') mapdata2 = pd.read_csv('../input/surviving-mars-maps/MapData-Evans-GP.csv') mapdata1.shape bl_col = mapdata1.select_dtypes(include='boolean') int_col = mapdata1.select_dtypes(include='int').columns str_col = mapdata1.select_dtypes(include='object').columns sh = mapdata1.select_dtypes(exclude='boolean') sh
code
106205317/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd mapdata1 = pd.read_csv('../input/surviving-mars-maps/MapData-Evans-GP-Flatten.csv') mapdata2 = pd.read_csv('../input/surviving-mars-maps/MapData-Evans-GP.csv') mapdata1.shape mapdata1.info()
code
106205317/cell_25
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns mapdata1 = pd.read_csv('../input/surviving-mars-maps/MapData-Evans-GP-Flatten.csv') mapdata2 = pd.read_csv('../input/surviving-mars-maps/MapData-Evans-GP.csv') mapdata1.shape bl_col = mapdata1.select_dtypes(include='boolean') int_col = mapdata1.select_dtypes(include='int').columns str_col = mapdata1.select_dtypes(include='object').columns sh = mapdata1.select_dtypes(exclude='boolean') sh x_data = sh.drop(['Temperature', 'Cold Waves'], axis='columns') x_data.corr()
code
106205317/cell_30
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns mapdata1 = pd.read_csv('../input/surviving-mars-maps/MapData-Evans-GP-Flatten.csv') mapdata2 = pd.read_csv('../input/surviving-mars-maps/MapData-Evans-GP.csv') mapdata1.shape bl_col = mapdata1.select_dtypes(include='boolean') int_col = mapdata1.select_dtypes(include='int').columns str_col = mapdata1.select_dtypes(include='object').columns sh = mapdata1.select_dtypes(exclude='boolean') sh x_data = sh.drop(['Temperature', 'Cold Waves'], axis='columns') x_data.corr() def correlation(x_data, threshold): col_corr = set() corr_matrix = x_data.corr() for i in range(len(corr_matrix.columns)): for j in range(i): if corr_matrix.iloc[i, j] >= threshold and corr_matrix.columns[j] not in col_corr: colname = corr_matrix.columns[i] col_corr.add(colname) if colname in x_data.columns: del x_data[colname] x_data.shape sns.pairplot(x_data)
code
106205317/cell_44
[ "text_plain_output_1.png" ]
from sklearn import tree model = tree.DecisionTreeClassifier(criterion='entropy', random_state=0) model.fit(x_train, y_train)
code
106205317/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd mapdata1 = pd.read_csv('../input/surviving-mars-maps/MapData-Evans-GP-Flatten.csv') mapdata2 = pd.read_csv('../input/surviving-mars-maps/MapData-Evans-GP.csv') mapdata1.shape bl_col = mapdata1.select_dtypes(include='boolean') int_col = mapdata1.select_dtypes(include='int').columns str_col = mapdata1.select_dtypes(include='object').columns (bl_col.columns, int_col, str_col)
code
106205317/cell_40
[ "text_plain_output_1.png" ]
"""confusion = metrics.confusion_matrix(x_test, y_test) confusion"""
code
106205317/cell_29
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns mapdata1 = pd.read_csv('../input/surviving-mars-maps/MapData-Evans-GP-Flatten.csv') mapdata2 = pd.read_csv('../input/surviving-mars-maps/MapData-Evans-GP.csv') mapdata1.shape bl_col = mapdata1.select_dtypes(include='boolean') int_col = mapdata1.select_dtypes(include='int').columns str_col = mapdata1.select_dtypes(include='object').columns sh = mapdata1.select_dtypes(exclude='boolean') sh x_data = sh.drop(['Temperature', 'Cold Waves'], axis='columns') x_data.corr() def correlation(x_data, threshold): col_corr = set() corr_matrix = x_data.corr() for i in range(len(corr_matrix.columns)): for j in range(i): if corr_matrix.iloc[i, j] >= threshold and corr_matrix.columns[j] not in col_corr: colname = corr_matrix.columns[i] col_corr.add(colname) if colname in x_data.columns: del x_data[colname] x_data.shape
code
106205317/cell_39
[ "text_plain_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors=5) knn.fit(x_train, y_train) predict = knn.predict(x_test) predict knn.predict_proba(x_test)
code
106205317/cell_26
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns mapdata1 = pd.read_csv('../input/surviving-mars-maps/MapData-Evans-GP-Flatten.csv') mapdata2 = pd.read_csv('../input/surviving-mars-maps/MapData-Evans-GP.csv') mapdata1.shape bl_col = mapdata1.select_dtypes(include='boolean') int_col = mapdata1.select_dtypes(include='int').columns str_col = mapdata1.select_dtypes(include='object').columns sh = mapdata1.select_dtypes(exclude='boolean') sh x_data = sh.drop(['Temperature', 'Cold Waves'], axis='columns') x_data.corr() plt.figure(figsize=(18, 16)) sns.heatmap(x_data.corr(), annot=True, cmap=plt.cm.CMRmap_r) plt.show()
code
106205317/cell_19
[ "image_output_4.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns mapdata1 = pd.read_csv('../input/surviving-mars-maps/MapData-Evans-GP-Flatten.csv') mapdata2 = pd.read_csv('../input/surviving-mars-maps/MapData-Evans-GP.csv') mapdata1.shape bl_col = mapdata1.select_dtypes(include='boolean') int_col = mapdata1.select_dtypes(include='int').columns str_col = mapdata1.select_dtypes(include='object').columns sns.jointplot(data=mapdata1, x='Latitude', y='Temperature') sns.jointplot(data=mapdata1, x='Longitude', y='Temperature') sns.jointplot(data=mapdata1, x='Named Location', y='Temperature') sns.jointplot(data=mapdata1, x='Map Name', y='Temperature')
code
106205317/cell_45
[ "text_plain_output_1.png" ]
from sklearn import tree from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors=5) knn.fit(x_train, y_train) predict = knn.predict(x_test) predict model = tree.DecisionTreeClassifier(criterion='entropy', random_state=0) model.fit(x_train, y_train) predict = model.predict(x_test) predict
code
106205317/cell_18
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns mapdata1 = pd.read_csv('../input/surviving-mars-maps/MapData-Evans-GP-Flatten.csv') mapdata2 = pd.read_csv('../input/surviving-mars-maps/MapData-Evans-GP.csv') mapdata1.shape bl_col = mapdata1.select_dtypes(include='boolean') int_col = mapdata1.select_dtypes(include='int').columns str_col = mapdata1.select_dtypes(include='object').columns mapdata1.hist(figsize=(18, 10)) plt.show()
code
106205317/cell_38
[ "text_plain_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors=5) knn.fit(x_train, y_train) predict = knn.predict(x_test) predict
code
106205317/cell_17
[ "image_output_11.png", "image_output_24.png", "image_output_46.png", "image_output_25.png", "image_output_47.png", "image_output_17.png", "image_output_30.png", "image_output_14.png", "image_output_59.png", "image_output_39.png", "image_output_28.png", "image_output_23.png", "image_output_34.png", "image_output_13.png", "image_output_40.png", "image_output_5.png", "image_output_48.png", "image_output_18.png", "image_output_58.png", "image_output_21.png", "image_output_52.png", "image_output_60.png", "image_output_7.png", "image_output_56.png", "image_output_31.png", "image_output_20.png", "image_output_32.png", "image_output_53.png", "image_output_4.png", "image_output_51.png", "image_output_42.png", "image_output_35.png", "image_output_41.png", "image_output_57.png", "image_output_36.png", "image_output_8.png", "image_output_37.png", "image_output_16.png", "image_output_27.png", "image_output_54.png", "image_output_6.png", "image_output_45.png", "image_output_12.png", "image_output_22.png", "image_output_55.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_3.png", "image_output_29.png", "image_output_44.png", "image_output_43.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_33.png", "image_output_50.png", "image_output_15.png", "image_output_49.png", "image_output_9.png", "image_output_19.png", "image_output_61.png", "image_output_38.png", "image_output_26.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns mapdata1 = pd.read_csv('../input/surviving-mars-maps/MapData-Evans-GP-Flatten.csv') mapdata2 = pd.read_csv('../input/surviving-mars-maps/MapData-Evans-GP.csv') mapdata1.shape bl_col = mapdata1.select_dtypes(include='boolean') int_col = mapdata1.select_dtypes(include='int').columns str_col = mapdata1.select_dtypes(include='object').columns for i, col in enumerate(bl_col): plt.figure(i) sns.countplot(x=col, data=bl_col)
code
106205317/cell_31
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns mapdata1 = pd.read_csv('../input/surviving-mars-maps/MapData-Evans-GP-Flatten.csv') mapdata2 = pd.read_csv('../input/surviving-mars-maps/MapData-Evans-GP.csv') mapdata1.shape bl_col = mapdata1.select_dtypes(include='boolean') int_col = mapdata1.select_dtypes(include='int').columns str_col = mapdata1.select_dtypes(include='object').columns sh = mapdata1.select_dtypes(exclude='boolean') sh x_data = sh.drop(['Temperature', 'Cold Waves'], axis='columns') y_data = sh[['Temperature', 'Cold Waves']] x_data.corr() def correlation(x_data, threshold): col_corr = set() corr_matrix = x_data.corr() for i in range(len(corr_matrix.columns)): for j in range(i): if corr_matrix.iloc[i, j] >= threshold and corr_matrix.columns[j] not in col_corr: colname = corr_matrix.columns[i] col_corr.add(colname) if colname in x_data.columns: del x_data[colname] x_data.shape sns.pairplot(y_data)
code
106205317/cell_14
[ "text_html_output_1.png" ]
import pandas as pd mapdata1 = pd.read_csv('../input/surviving-mars-maps/MapData-Evans-GP-Flatten.csv') mapdata2 = pd.read_csv('../input/surviving-mars-maps/MapData-Evans-GP.csv') mapdata1.shape mapdata1.head()
code
106205317/cell_22
[ "text_plain_output_1.png" ]
"""for col in mapdata1: if mapdata1.values.type == boolean: mapdata1.drop(axis=1, inplace=True)"""
code
106205317/cell_27
[ "text_plain_output_1.png" ]
"""def correlation(x_data, threshold): col_corr = set() corr_matrix = x_data.corr() for i in range(len(corr_matrix.columns)): for j in range(i): if abs(corr_matrix.iloc[i, j] > threshold: colname = corr_matrix.columns[i] col_corr.add(colname) return col_corr"""
code
106205317/cell_37
[ "text_plain_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors=5) knn.fit(x_train, y_train)
code
106205317/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd mapdata1 = pd.read_csv('../input/surviving-mars-maps/MapData-Evans-GP-Flatten.csv') mapdata2 = pd.read_csv('../input/surviving-mars-maps/MapData-Evans-GP.csv') mapdata1.shape
code
328714/cell_2
[ "text_plain_output_5.png", "text_plain_output_9.png", "application_vnd.jupyter.stderr_output_4.png", "application_vnd.jupyter.stderr_output_6.png", "application_vnd.jupyter.stderr_output_8.png", "application_vnd.jupyter.stderr_output_10.png", "text_plain_output_3.png", "text_plain_output_7.png", "text_plain_output_2.png", "text_plain_output_1.png", "text_plain_output_11.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd names_data = pd.read_csv('../input/NationalNames.csv') frequent_names = names_data[names_data['Count'] > 1000] indexed_names = frequent_names.set_index(['Year', 'Name'])['Count'] def ambiguity_measure(grouped_frame): return 2 * (1 - grouped_frame.max() / grouped_frame.sum()) ambiguity_data = ambiguity_measure(indexed_names.groupby(level=['Year', 'Name'])) yearly_ambiguity = ambiguity_data.groupby(level='Year') print('Average ambiguity: %s\n' % str(ambiguity_data.mean())) print('Average by year: %s\n' % str(yearly_ambiguity.mean())) print('Most ambiguous by year: %s' % str(yearly_ambiguity.idxmax().apply(lambda x: x[1])))
code
328714/cell_1
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd names_data = pd.read_csv('../input/NationalNames.csv') frequent_names = names_data[names_data['Count'] > 1000] indexed_names = frequent_names.set_index(['Year', 'Name'])['Count'] def ambiguity_measure(grouped_frame): return 2 * (1 - grouped_frame.max() / grouped_frame.sum()) ambiguity_data = ambiguity_measure(indexed_names.groupby(level=['Year', 'Name'])) yearly_ambiguity = ambiguity_data.groupby(level='Year')
code
50234024/cell_4
[ "text_plain_output_1.png" ]
WORK_DIR = '../input/ranzcr-clip-catheter-line-classification' os.listdir(WORK_DIR) print('Train images: %d' % len(os.listdir(os.path.join(WORK_DIR, 'train'))))
code
50234024/cell_6
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns WORK_DIR = '../input/ranzcr-clip-catheter-line-classification' os.listdir(WORK_DIR) train = pd.read_csv(os.path.join(WORK_DIR, 'train.csv')) train_images = '../input/ranzcr-clip-catheter-line-classification' + '/train/' + train['StudyInstanceUID'] + '.jpg' ss = pd.read_csv(os.path.join(WORK_DIR, 'sample_submission.csv')) test_images = '../input/ranzcr-clip-catheter-line-classification' + '/test/' + ss['StudyInstanceUID'] + '.jpg' label_cols = ss.columns[1:] labels = train[label_cols].values train_annot = pd.read_csv(os.path.join(WORK_DIR, 'train_annotations.csv')) sns.set_style('whitegrid') fig = plt.figure(figsize=(15, 12), dpi=300) plt.suptitle('Labels count', fontfamily='serif', size=15) for ind, i in enumerate(label_cols): fig.add_subplot(4, 3, ind + 1) sns.countplot(train[i], edgecolor='black', palette=reversed(sns.color_palette('viridis', 2))) plt.xlabel('') plt.ylabel('') plt.xticks(fontfamily='serif', size=10) plt.yticks(fontfamily='serif', size=10) plt.title(i, fontfamily='serif', size=10) plt.show()
code
50234024/cell_2
[ "image_output_1.png" ]
import tensorflow as tf try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() print(f'Running on TPU {tpu.master()}') except ValueError: tpu = None if tpu: tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) else: strategy = tf.distribute.get_strategy() AUTO = tf.data.experimental.AUTOTUNE REPLICAS = strategy.num_replicas_in_sync print(f'REPLICAS: {REPLICAS}')
code
50234024/cell_7
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns WORK_DIR = '../input/ranzcr-clip-catheter-line-classification' os.listdir(WORK_DIR) train = pd.read_csv(os.path.join(WORK_DIR, 'train.csv')) train_images = '../input/ranzcr-clip-catheter-line-classification' + '/train/' + train['StudyInstanceUID'] + '.jpg' ss = pd.read_csv(os.path.join(WORK_DIR, 'sample_submission.csv')) test_images = '../input/ranzcr-clip-catheter-line-classification' + '/test/' + ss['StudyInstanceUID'] + '.jpg' label_cols = ss.columns[1:] labels = train[label_cols].values train_annot = pd.read_csv(os.path.join(WORK_DIR, 'train_annotations.csv')) sns.set_style("whitegrid") fig = plt.figure(figsize = (15, 12), dpi = 300) plt.suptitle('Labels count', fontfamily = 'serif', size = 15) for ind, i in enumerate(label_cols): fig.add_subplot(4, 3, ind + 1) sns.countplot(train[i], edgecolor = 'black', palette = reversed(sns.color_palette('viridis', 2))) plt.xlabel('') plt.ylabel('') plt.xticks(fontfamily = 'serif', size = 10) plt.yticks(fontfamily = 'serif', size = 10) plt.title(i, fontfamily = 'serif', size = 10) plt.show() sample = train.sample(9) plt.figure(figsize=(10, 7), dpi=300) for ind, image_id in enumerate(sample.StudyInstanceUID): plt.subplot(3, 3, ind + 1) image = image_id + '.jpg' img = cv2.imread(os.path.join(WORK_DIR, 'train', image)) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) plt.imshow(img) plt.title('Shape: {}'.format(img.shape[:2])) plt.axis('off') plt.show()
code
50234024/cell_16
[ "text_plain_output_1.png" ]
from tensorflow.keras import models, layers from tensorflow.keras.applications import Xception from tensorflow.keras.optimizers import Adam import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import tensorflow as tf try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: tpu = None if tpu: tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) else: strategy = tf.distribute.get_strategy() AUTO = tf.data.experimental.AUTOTUNE REPLICAS = strategy.num_replicas_in_sync WORK_DIR = '../input/ranzcr-clip-catheter-line-classification' os.listdir(WORK_DIR) train = pd.read_csv(os.path.join(WORK_DIR, 'train.csv')) train_images = '../input/ranzcr-clip-catheter-line-classification' + '/train/' + train['StudyInstanceUID'] + '.jpg' ss = pd.read_csv(os.path.join(WORK_DIR, 'sample_submission.csv')) test_images = '../input/ranzcr-clip-catheter-line-classification' + '/test/' + ss['StudyInstanceUID'] + '.jpg' label_cols = ss.columns[1:] labels = train[label_cols].values train_annot = pd.read_csv(os.path.join(WORK_DIR, 'train_annotations.csv')) sns.set_style("whitegrid") fig = plt.figure(figsize = (15, 12), dpi = 300) plt.suptitle('Labels count', fontfamily = 'serif', size = 15) for ind, i in enumerate(label_cols): fig.add_subplot(4, 3, ind + 1) sns.countplot(train[i], edgecolor = 'black', palette = reversed(sns.color_palette('viridis', 2))) plt.xlabel('') plt.ylabel('') plt.xticks(fontfamily = 'serif', size = 10) plt.yticks(fontfamily = 'serif', size = 10) plt.title(i, fontfamily = 'serif', size = 10) plt.show() sample = train.sample(9) for ind, image_id in enumerate(sample.StudyInstanceUID): image = image_id + '.jpg' img = cv2.imread(os.path.join(WORK_DIR, 'train', image)) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) plt.axis('off') BATCH_SIZE = 8 * REPLICAS STEPS_PER_EPOCH = len(train) * 0.8 / BATCH_SIZE VALIDATION_STEPS = len(train) * 0.2 / BATCH_SIZE EPOCHS = 30 TARGET_SIZE = 750 def build_decoder(with_labels=True, target_size=(TARGET_SIZE, TARGET_SIZE), ext='jpg'): def decode(path): file_bytes = tf.io.read_file(path) if ext == 'png': img = tf.image.decode_png(file_bytes, channels=3) elif ext in ['jpg', 'jpeg']: img = tf.image.decode_jpeg(file_bytes, channels=3) else: raise ValueError('Image extension not supported') img = tf.cast(img, tf.float32) / 255.0 img = tf.image.resize(img, target_size) return img def decode_with_labels(path, label): return (decode(path), label) return decode_with_labels if with_labels else decode def build_augmenter(with_labels=True): def augment(img): img = tf.image.random_flip_left_right(img) img = tf.image.random_flip_up_down(img) img = tf.image.adjust_brightness(img, 0.1) return img def augment_with_labels(img, label): return (augment(img), label) return augment_with_labels if with_labels else augment def build_dataset(paths, labels=None, bsize=32, cache=True, decode_fn=None, augment_fn=None, augment=True, repeat=True, shuffle=1024, cache_dir=''): if cache_dir != '' and cache is True: os.makedirs(cache_dir, exist_ok=True) if decode_fn is None: decode_fn = build_decoder(labels is not None) if augment_fn is None: augment_fn = build_augmenter(labels is not None) AUTO = tf.data.experimental.AUTOTUNE slices = paths if labels is None else (paths, labels) dset = tf.data.Dataset.from_tensor_slices(slices) dset = dset.map(decode_fn, num_parallel_calls=AUTO) dset = dset.cache(cache_dir) if cache else dset dset = dset.map(augment_fn, num_parallel_calls=AUTO) if augment else dset dset = dset.repeat() if repeat else dset dset = dset.shuffle(shuffle) if shuffle else dset dset = dset.batch(bsize).prefetch(AUTO) return dset train_df = build_dataset(train_img, train_labels, bsize=BATCH_SIZE, cache=True) valid_df = build_dataset(valid_img, valid_labels, bsize=BATCH_SIZE, repeat=False, shuffle=False, augment=False, cache=True) test_df = build_dataset(test_images, bsize=BATCH_SIZE, repeat=False, shuffle=False, augment=False, cache=False) def create_model(): conv_base = Xception(include_top=False, weights='imagenet', input_shape=(TARGET_SIZE, TARGET_SIZE, 3)) model = conv_base.output model = layers.GlobalAveragePooling2D()(model) model = layers.Dropout(0.2)(model) model = layers.Dense(11, activation='sigmoid')(model) model = models.Model(conv_base.input, model) model.compile(optimizer=Adam(lr=0.001), loss='binary_crossentropy', metrics=[tf.keras.metrics.AUC(multi_label=True)]) return model with strategy.scope(): model = create_model() model.summary() model.load_weights('../input/ranzcr-xception-tpu-baseline/Xception_750_TPU.h5') ss[label_cols] = model.predict(test_df, verbose=1)
code
50234024/cell_3
[ "image_output_1.png" ]
WORK_DIR = '../input/ranzcr-clip-catheter-line-classification' os.listdir(WORK_DIR)
code
50234024/cell_17
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd WORK_DIR = '../input/ranzcr-clip-catheter-line-classification' os.listdir(WORK_DIR) train = pd.read_csv(os.path.join(WORK_DIR, 'train.csv')) train_images = '../input/ranzcr-clip-catheter-line-classification' + '/train/' + train['StudyInstanceUID'] + '.jpg' ss = pd.read_csv(os.path.join(WORK_DIR, 'sample_submission.csv')) test_images = '../input/ranzcr-clip-catheter-line-classification' + '/test/' + ss['StudyInstanceUID'] + '.jpg' label_cols = ss.columns[1:] labels = train[label_cols].values train_annot = pd.read_csv(os.path.join(WORK_DIR, 'train_annotations.csv')) ss.head()
code
50234024/cell_14
[ "text_plain_output_1.png" ]
from tensorflow.keras import models, layers from tensorflow.keras.applications import Xception from tensorflow.keras.optimizers import Adam import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import tensorflow as tf try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: tpu = None if tpu: tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) else: strategy = tf.distribute.get_strategy() AUTO = tf.data.experimental.AUTOTUNE REPLICAS = strategy.num_replicas_in_sync WORK_DIR = '../input/ranzcr-clip-catheter-line-classification' os.listdir(WORK_DIR) train = pd.read_csv(os.path.join(WORK_DIR, 'train.csv')) train_images = '../input/ranzcr-clip-catheter-line-classification' + '/train/' + train['StudyInstanceUID'] + '.jpg' ss = pd.read_csv(os.path.join(WORK_DIR, 'sample_submission.csv')) test_images = '../input/ranzcr-clip-catheter-line-classification' + '/test/' + ss['StudyInstanceUID'] + '.jpg' label_cols = ss.columns[1:] labels = train[label_cols].values train_annot = pd.read_csv(os.path.join(WORK_DIR, 'train_annotations.csv')) sns.set_style("whitegrid") fig = plt.figure(figsize = (15, 12), dpi = 300) plt.suptitle('Labels count', fontfamily = 'serif', size = 15) for ind, i in enumerate(label_cols): fig.add_subplot(4, 3, ind + 1) sns.countplot(train[i], edgecolor = 'black', palette = reversed(sns.color_palette('viridis', 2))) plt.xlabel('') plt.ylabel('') plt.xticks(fontfamily = 'serif', size = 10) plt.yticks(fontfamily = 'serif', size = 10) plt.title(i, fontfamily = 'serif', size = 10) plt.show() sample = train.sample(9) for ind, image_id in enumerate(sample.StudyInstanceUID): image = image_id + '.jpg' img = cv2.imread(os.path.join(WORK_DIR, 'train', image)) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) plt.axis('off') BATCH_SIZE = 8 * REPLICAS STEPS_PER_EPOCH = len(train) * 0.8 / BATCH_SIZE VALIDATION_STEPS = len(train) * 0.2 / BATCH_SIZE EPOCHS = 30 TARGET_SIZE = 750 def build_decoder(with_labels=True, target_size=(TARGET_SIZE, TARGET_SIZE), ext='jpg'): def decode(path): file_bytes = tf.io.read_file(path) if ext == 'png': img = tf.image.decode_png(file_bytes, channels=3) elif ext in ['jpg', 'jpeg']: img = tf.image.decode_jpeg(file_bytes, channels=3) else: raise ValueError('Image extension not supported') img = tf.cast(img, tf.float32) / 255.0 img = tf.image.resize(img, target_size) return img def decode_with_labels(path, label): return (decode(path), label) return decode_with_labels if with_labels else decode def build_augmenter(with_labels=True): def augment(img): img = tf.image.random_flip_left_right(img) img = tf.image.random_flip_up_down(img) img = tf.image.adjust_brightness(img, 0.1) return img def augment_with_labels(img, label): return (augment(img), label) return augment_with_labels if with_labels else augment def build_dataset(paths, labels=None, bsize=32, cache=True, decode_fn=None, augment_fn=None, augment=True, repeat=True, shuffle=1024, cache_dir=''): if cache_dir != '' and cache is True: os.makedirs(cache_dir, exist_ok=True) if decode_fn is None: decode_fn = build_decoder(labels is not None) if augment_fn is None: augment_fn = build_augmenter(labels is not None) AUTO = tf.data.experimental.AUTOTUNE slices = paths if labels is None else (paths, labels) dset = tf.data.Dataset.from_tensor_slices(slices) dset = dset.map(decode_fn, num_parallel_calls=AUTO) dset = dset.cache(cache_dir) if cache else dset dset = dset.map(augment_fn, num_parallel_calls=AUTO) if augment else dset dset = dset.repeat() if repeat else dset dset = dset.shuffle(shuffle) if shuffle else dset dset = dset.batch(bsize).prefetch(AUTO) return dset def create_model(): conv_base = Xception(include_top=False, weights='imagenet', input_shape=(TARGET_SIZE, TARGET_SIZE, 3)) model = conv_base.output model = layers.GlobalAveragePooling2D()(model) model = layers.Dropout(0.2)(model) model = layers.Dense(11, activation='sigmoid')(model) model = models.Model(conv_base.input, model) model.compile(optimizer=Adam(lr=0.001), loss='binary_crossentropy', metrics=[tf.keras.metrics.AUC(multi_label=True)]) return model with strategy.scope(): model = create_model() model.summary()
code
50234024/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import tensorflow as tf try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: tpu = None if tpu: tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) else: strategy = tf.distribute.get_strategy() AUTO = tf.data.experimental.AUTOTUNE REPLICAS = strategy.num_replicas_in_sync WORK_DIR = '../input/ranzcr-clip-catheter-line-classification' os.listdir(WORK_DIR) train = pd.read_csv(os.path.join(WORK_DIR, 'train.csv')) train_images = '../input/ranzcr-clip-catheter-line-classification' + '/train/' + train['StudyInstanceUID'] + '.jpg' ss = pd.read_csv(os.path.join(WORK_DIR, 'sample_submission.csv')) test_images = '../input/ranzcr-clip-catheter-line-classification' + '/test/' + ss['StudyInstanceUID'] + '.jpg' label_cols = ss.columns[1:] labels = train[label_cols].values train_annot = pd.read_csv(os.path.join(WORK_DIR, 'train_annotations.csv')) sns.set_style("whitegrid") fig = plt.figure(figsize = (15, 12), dpi = 300) plt.suptitle('Labels count', fontfamily = 'serif', size = 15) for ind, i in enumerate(label_cols): fig.add_subplot(4, 3, ind + 1) sns.countplot(train[i], edgecolor = 'black', palette = reversed(sns.color_palette('viridis', 2))) plt.xlabel('') plt.ylabel('') plt.xticks(fontfamily = 'serif', size = 10) plt.yticks(fontfamily = 'serif', size = 10) plt.title(i, fontfamily = 'serif', size = 10) plt.show() sample = train.sample(9) for ind, image_id in enumerate(sample.StudyInstanceUID): image = image_id + '.jpg' img = cv2.imread(os.path.join(WORK_DIR, 'train', image)) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) plt.axis('off') BATCH_SIZE = 8 * REPLICAS STEPS_PER_EPOCH = len(train) * 0.8 / BATCH_SIZE VALIDATION_STEPS = len(train) * 0.2 / BATCH_SIZE EPOCHS = 30 TARGET_SIZE = 750 def build_decoder(with_labels=True, target_size=(TARGET_SIZE, TARGET_SIZE), ext='jpg'): def decode(path): file_bytes = tf.io.read_file(path) if ext == 'png': img = tf.image.decode_png(file_bytes, channels=3) elif ext in ['jpg', 'jpeg']: img = tf.image.decode_jpeg(file_bytes, channels=3) else: raise ValueError('Image extension not supported') img = tf.cast(img, tf.float32) / 255.0 img = tf.image.resize(img, target_size) return img def decode_with_labels(path, label): return (decode(path), label) return decode_with_labels if with_labels else decode def build_augmenter(with_labels=True): def augment(img): img = tf.image.random_flip_left_right(img) img = tf.image.random_flip_up_down(img) img = tf.image.adjust_brightness(img, 0.1) return img def augment_with_labels(img, label): return (augment(img), label) return augment_with_labels if with_labels else augment def build_dataset(paths, labels=None, bsize=32, cache=True, decode_fn=None, augment_fn=None, augment=True, repeat=True, shuffle=1024, cache_dir=''): if cache_dir != '' and cache is True: os.makedirs(cache_dir, exist_ok=True) if decode_fn is None: decode_fn = build_decoder(labels is not None) if augment_fn is None: augment_fn = build_augmenter(labels is not None) AUTO = tf.data.experimental.AUTOTUNE slices = paths if labels is None else (paths, labels) dset = tf.data.Dataset.from_tensor_slices(slices) dset = dset.map(decode_fn, num_parallel_calls=AUTO) dset = dset.cache(cache_dir) if cache else dset dset = dset.map(augment_fn, num_parallel_calls=AUTO) if augment else dset dset = dset.repeat() if repeat else dset dset = dset.shuffle(shuffle) if shuffle else dset dset = dset.batch(bsize).prefetch(AUTO) return dset train_df = build_dataset(train_img, train_labels, bsize=BATCH_SIZE, cache=True) valid_df = build_dataset(valid_img, valid_labels, bsize=BATCH_SIZE, repeat=False, shuffle=False, augment=False, cache=True) test_df = build_dataset(test_images, bsize=BATCH_SIZE, repeat=False, shuffle=False, augment=False, cache=False) train_df
code
50234024/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd WORK_DIR = '../input/ranzcr-clip-catheter-line-classification' os.listdir(WORK_DIR) train = pd.read_csv(os.path.join(WORK_DIR, 'train.csv')) train_images = '../input/ranzcr-clip-catheter-line-classification' + '/train/' + train['StudyInstanceUID'] + '.jpg' ss = pd.read_csv(os.path.join(WORK_DIR, 'sample_submission.csv')) test_images = '../input/ranzcr-clip-catheter-line-classification' + '/test/' + ss['StudyInstanceUID'] + '.jpg' label_cols = ss.columns[1:] labels = train[label_cols].values train_annot = pd.read_csv(os.path.join(WORK_DIR, 'train_annotations.csv')) print('Labels:\n', '*' * 20, '\n', label_cols.values) print('*' * 50) train.head()
code
333909/cell_4
[ "text_plain_output_5.png", "text_plain_output_4.png", "text_plain_output_6.png", "text_plain_output_3.png", "text_plain_output_7.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from keras.layers.core import Dense, Dropout, Activation, Flatten, MaxoutDense from keras.models import Sequential from keras.optimizers import Adam , RMSprop, Adadelta, SGD from sklearn import preprocessing from sklearn.cross_validation import train_test_split import pandas as pd import numpy as np import pandas as pd from keras.models import Model from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D, ZeroPadding2D from keras.optimizers import Adam, RMSprop, Adadelta, SGD from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping from keras.layers.normalization import BatchNormalization from keras import backend as K from keras.layers.core import Dense, Dropout, Activation, Flatten, MaxoutDense from keras.layers.advanced_activations import PReLU, ELU act_train = pd.read_csv('../input/act_train.csv') act_test = pd.read_csv('../input/act_test.csv') people = pd.read_csv('../input/people.csv') test_ids = act_test['activity_id'] def preprocess_acts(data, train_set=True): data = data.drop(['date', 'activity_id'], axis=1) if train_set: data = data.drop(['outcome'], axis=1) data['people_id'] = data['people_id'].apply(lambda x: x.split('_')[1]) data['people_id'] = pd.to_numeric(data['people_id']).astype(int) columns = list(data.columns) for col in columns[1:]: data[col] = data[col].fillna('type 0') data[col] = data[col].apply(lambda x: x.split(' ')[1]) data[col] = pd.to_numeric(data[col]).astype(int) return data def preprocess_people(data): data = data.drop(['date'], axis=1) data['people_id'] = data['people_id'].apply(lambda x: x.split('_')[1]) data['people_id'] = pd.to_numeric(data['people_id']).astype(int) columns = list(data.columns) bools = columns[11:] strings = columns[1:11] for col in bools: data[col] = pd.to_numeric(data[col]).astype(int) for col in strings: data[col] = data[col].fillna('type 0') data[col] = data[col].apply(lambda x: x.split(' ')[1]) data[col] = pd.to_numeric(data[col]).astype(int) return data peeps = preprocess_people(people) actions_train = preprocess_acts(act_train) actions_test = preprocess_acts(act_test, train_set=False) features = actions_train.merge(peeps, how='left', on='people_id') labels = act_train['outcome'] test = actions_test.merge(peeps, how='left', on='people_id') features.sample(10) from keras.models import Sequential from keras.layers.normalization import BatchNormalization from keras.layers.advanced_activations import PReLU from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping def create_model_v1(input_dim): nb_classes = 1 model = Sequential() model.add(Dense(100, input_dim=input_dim, activation='relu')) model.add(Dense(100, activation='relu')) model.add(Dense(nb_classes)) model.add(Activation('sigmoid')) sgd = SGD(lr=0.05, decay=0, momentum=0.95, nesterov=True) model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy']) return model from sklearn import preprocessing from sklearn.cross_validation import train_test_split features = features.as_matrix() scaler = preprocessing.StandardScaler().fit(features) features = scaler.transform(features) num_test = 0.2 X_train, X_test, y_train, y_test = train_test_split(features, labels.as_matrix(), test_size=num_test, random_state=1337) print(features[0, :])
code
333909/cell_1
[ "text_html_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd from keras.models import Model from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D, ZeroPadding2D from keras.optimizers import Adam, RMSprop, Adadelta, SGD from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping from keras.layers.normalization import BatchNormalization from keras import backend as K from keras.layers.core import Dense, Dropout, Activation, Flatten, MaxoutDense from keras.layers.advanced_activations import PReLU, ELU act_train = pd.read_csv('../input/act_train.csv') act_test = pd.read_csv('../input/act_test.csv') people = pd.read_csv('../input/people.csv') test_ids = act_test['activity_id'] def preprocess_acts(data, train_set=True): data = data.drop(['date', 'activity_id'], axis=1) if train_set: data = data.drop(['outcome'], axis=1) data['people_id'] = data['people_id'].apply(lambda x: x.split('_')[1]) data['people_id'] = pd.to_numeric(data['people_id']).astype(int) columns = list(data.columns) for col in columns[1:]: data[col] = data[col].fillna('type 0') data[col] = data[col].apply(lambda x: x.split(' ')[1]) data[col] = pd.to_numeric(data[col]).astype(int) return data def preprocess_people(data): data = data.drop(['date'], axis=1) data['people_id'] = data['people_id'].apply(lambda x: x.split('_')[1]) data['people_id'] = pd.to_numeric(data['people_id']).astype(int) columns = list(data.columns) bools = columns[11:] strings = columns[1:11] for col in bools: data[col] = pd.to_numeric(data[col]).astype(int) for col in strings: data[col] = data[col].fillna('type 0') data[col] = data[col].apply(lambda x: x.split(' ')[1]) data[col] = pd.to_numeric(data[col]).astype(int) return data peeps = preprocess_people(people) actions_train = preprocess_acts(act_train) actions_test = preprocess_acts(act_test, train_set=False) features = actions_train.merge(peeps, how='left', on='people_id') labels = act_train['outcome'] test = actions_test.merge(peeps, how='left', on='people_id') features.sample(10)
code
333909/cell_3
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_3.png", "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers.core import Dense, Dropout, Activation, Flatten, MaxoutDense from keras.models import Sequential from keras.optimizers import Adam , RMSprop, Adadelta, SGD from sklearn import preprocessing from sklearn.cross_validation import train_test_split import pandas as pd import numpy as np import pandas as pd from keras.models import Model from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D, ZeroPadding2D from keras.optimizers import Adam, RMSprop, Adadelta, SGD from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping from keras.layers.normalization import BatchNormalization from keras import backend as K from keras.layers.core import Dense, Dropout, Activation, Flatten, MaxoutDense from keras.layers.advanced_activations import PReLU, ELU act_train = pd.read_csv('../input/act_train.csv') act_test = pd.read_csv('../input/act_test.csv') people = pd.read_csv('../input/people.csv') test_ids = act_test['activity_id'] def preprocess_acts(data, train_set=True): data = data.drop(['date', 'activity_id'], axis=1) if train_set: data = data.drop(['outcome'], axis=1) data['people_id'] = data['people_id'].apply(lambda x: x.split('_')[1]) data['people_id'] = pd.to_numeric(data['people_id']).astype(int) columns = list(data.columns) for col in columns[1:]: data[col] = data[col].fillna('type 0') data[col] = data[col].apply(lambda x: x.split(' ')[1]) data[col] = pd.to_numeric(data[col]).astype(int) return data def preprocess_people(data): data = data.drop(['date'], axis=1) data['people_id'] = data['people_id'].apply(lambda x: x.split('_')[1]) data['people_id'] = pd.to_numeric(data['people_id']).astype(int) columns = list(data.columns) bools = columns[11:] strings = columns[1:11] for col in bools: data[col] = pd.to_numeric(data[col]).astype(int) for col in strings: data[col] = data[col].fillna('type 0') data[col] = data[col].apply(lambda x: x.split(' ')[1]) data[col] = pd.to_numeric(data[col]).astype(int) return data peeps = preprocess_people(people) actions_train = preprocess_acts(act_train) actions_test = preprocess_acts(act_test, train_set=False) features = actions_train.merge(peeps, how='left', on='people_id') labels = act_train['outcome'] test = actions_test.merge(peeps, how='left', on='people_id') features.sample(10) from keras.models import Sequential from keras.layers.normalization import BatchNormalization from keras.layers.advanced_activations import PReLU from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping def create_model_v1(input_dim): nb_classes = 1 model = Sequential() model.add(Dense(100, input_dim=input_dim, activation='relu')) model.add(Dense(100, activation='relu')) model.add(Dense(nb_classes)) model.add(Activation('sigmoid')) sgd = SGD(lr=0.05, decay=0, momentum=0.95, nesterov=True) model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy']) return model from sklearn import preprocessing from sklearn.cross_validation import train_test_split features = features.as_matrix() scaler = preprocessing.StandardScaler().fit(features) features = scaler.transform(features) num_test = 0.2 X_train, X_test, y_train, y_test = train_test_split(features, labels.as_matrix(), test_size=num_test, random_state=1337)
code
129009155/cell_42
[ "text_plain_output_1.png" ]
from gensim.models import KeyedVectors from sklearn.decomposition import TruncatedSVD from sklearn.feature_extraction.text import TfidfVectorizer import numpy as np import numpy as np import pandas as pd import spacy folder_path_train = '/content/drive/MyDrive/HMD_project/train.jsonl' folder_path_dev = '/content/drive/MyDrive/HMD_project/dev.jsonl' df_train = pd.read_json(folder_path_train, lines=True) df_dev = pd.read_json(folder_path_dev, lines=True) with open('/content/drive/MyDrive/HMD_project/new/text_aug_norm.txt', 'r') as f: lines = f.readlines() file_names = [] for i in lines: file_names.append(i[:i.find('\n')]) for i in range(len(file_names)): df_train.loc[len(df_train.index)] = [1, 'img/1', 1, file_names[i]] data = df_train from gensim.models import KeyedVectors wv = KeyedVectors.load('/content/drive/MyDrive/HMD_project/glove-twitter-200') import spacy.cli spacy.cli.download('en_core_web_sm') nlp = spacy.load('en_core_web_sm') def preprocess(text): doc = nlp(text) filtered_token = [] for token in doc: if token.is_punct or token.is_space or token.is_bracket or token.is_stop: continue else: token = token.lemma_ filtered_token.append(token) return filtered_token df_dev['processed_text_val'] = df_dev['text'].apply(lambda x: preprocess(x)) data['processed_text'] = data['text'].apply(lambda x: preprocess(x)) import numpy as np def gensim_vector(token): vec_size = wv.vector_size wv_final = np.zeros(vec_size) count = 1 for t in token: if t in wv: count += 1 wv_final += wv[t] return wv_final / count data['text_vector'] = data['processed_text'].apply(gensim_vector) df_dev['text_vector_val'] = df_dev['processed_text_val'].apply(gensim_vector) len(data.text_vector.iloc[0]) vectorizer = TfidfVectorizer(stop_words='english') svd = TruncatedSVD(n_components=1000) processed_text = data['processed_text'].apply(lambda x: ' '.join(x)) text_vec_tfidf = vectorizer.fit_transform(processed_text) print('CountVectorizer shape_training dataset', text_vec_tfidf.shape) processed_text_val = df_dev['processed_text_val'].apply(lambda x: ' '.join(x)) text_vec_tfidf_val = vectorizer.transform(processed_text_val) print('CountVectorizer shape_val dataset', text_vec_tfidf_val.shape) lsa_text = svd.fit_transform(text_vec_tfidf) print('\nvariance_captured_by 1000 components', svd.explained_variance_ratio_.sum()) lsa_text_val = svd.transform(text_vec_tfidf_val) print('\n', lsa_text.shape) print(lsa_text_val.shape)
code
129009155/cell_21
[ "text_plain_output_1.png" ]
from gensim.models import KeyedVectors from gensim.models import KeyedVectors wv = KeyedVectors.load('/content/drive/MyDrive/HMD_project/glove-twitter-200') w = wv['hate'] print(w) print('\n\nlength of word vector', len(w)) print('\n\n type of word vector model ', type(wv)) print('\n\n word vector type', type(w))
code
129009155/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd folder_path_train = '/content/drive/MyDrive/HMD_project/train.jsonl' folder_path_dev = '/content/drive/MyDrive/HMD_project/dev.jsonl' df_train = pd.read_json(folder_path_train, lines=True) df_dev = pd.read_json(folder_path_dev, lines=True) print(df_train.isna().sum()) print('\n\n', df_dev.isna().sum())
code
129009155/cell_25
[ "text_plain_output_1.png" ]
import spacy import spacy.cli spacy.cli.download('en_core_web_sm') nlp = spacy.load('en_core_web_sm')
code
129009155/cell_29
[ "text_plain_output_1.png" ]
import spacy import spacy.cli spacy.cli.download('en_core_web_sm') nlp = spacy.load('en_core_web_sm') def preprocess(text): doc = nlp(text) filtered_token = [] for token in doc: if token.is_punct or token.is_space or token.is_bracket or token.is_stop: continue else: token = token.lemma_ filtered_token.append(token) return filtered_token tokens = preprocess('My best friend Anu, (who is three months older than me) is coming to my house tonight!!!.') tokens
code
129009155/cell_48
[ "text_plain_output_1.png" ]
from gensim.models import KeyedVectors from sklearn.decomposition import NMF from sklearn.decomposition import TruncatedSVD from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer import numpy as np import numpy as np import pandas as pd import spacy folder_path_train = '/content/drive/MyDrive/HMD_project/train.jsonl' folder_path_dev = '/content/drive/MyDrive/HMD_project/dev.jsonl' df_train = pd.read_json(folder_path_train, lines=True) df_dev = pd.read_json(folder_path_dev, lines=True) with open('/content/drive/MyDrive/HMD_project/new/text_aug_norm.txt', 'r') as f: lines = f.readlines() file_names = [] for i in lines: file_names.append(i[:i.find('\n')]) for i in range(len(file_names)): df_train.loc[len(df_train.index)] = [1, 'img/1', 1, file_names[i]] data = df_train from gensim.models import KeyedVectors wv = KeyedVectors.load('/content/drive/MyDrive/HMD_project/glove-twitter-200') import spacy.cli spacy.cli.download('en_core_web_sm') nlp = spacy.load('en_core_web_sm') def preprocess(text): doc = nlp(text) filtered_token = [] for token in doc: if token.is_punct or token.is_space or token.is_bracket or token.is_stop: continue else: token = token.lemma_ filtered_token.append(token) return filtered_token df_dev['processed_text_val'] = df_dev['text'].apply(lambda x: preprocess(x)) data['processed_text'] = data['text'].apply(lambda x: preprocess(x)) import numpy as np def gensim_vector(token): vec_size = wv.vector_size wv_final = np.zeros(vec_size) count = 1 for t in token: if t in wv: count += 1 wv_final += wv[t] return wv_final / count data['text_vector'] = data['processed_text'].apply(gensim_vector) df_dev['text_vector_val'] = df_dev['processed_text_val'].apply(gensim_vector) len(data.text_vector.iloc[0]) text_vector = data['text_vector'] text_vector = np.stack(text_vector) np.save('/content/drive/MyDrive/HMD_project/new/twitter_embedding_train_text.npy', text_vector) text_vector_val = df_dev['text_vector_val'] text_vector_val = np.stack(text_vector_val) np.save('/content/drive/MyDrive/HMD_project/new/twitter_embedding_val_text.npy', text_vector_val) vectorizer = TfidfVectorizer(stop_words='english') svd = TruncatedSVD(n_components=1000) processed_text = data['processed_text'].apply(lambda x: ' '.join(x)) text_vec_tfidf = vectorizer.fit_transform(processed_text) processed_text_val = df_dev['processed_text_val'].apply(lambda x: ' '.join(x)) text_vec_tfidf_val = vectorizer.transform(processed_text_val) lsa_text = svd.fit_transform(text_vec_tfidf) lsa_text_val = svd.transform(text_vec_tfidf_val) np.save('/content/drive/MyDrive/HMD_project/new/lsa_tfidf_train_text.npy', lsa_text) np.save('/content/drive/MyDrive/HMD_project/new/lsa_tfidf_val_text.npy', lsa_text_val) vectorizer = CountVectorizer(stop_words='english') svd2 = TruncatedSVD(n_components=1000) processed_text = data['processed_text'].apply(lambda x: ' '.join(x)) text_vec_bow = vectorizer.fit_transform(processed_text) processed_text_val = df_dev['processed_text_val'].apply(lambda x: ' '.join(x)) text_vec_bow_val = vectorizer.transform(processed_text_val) lsa_bow_text = svd2.fit_transform(text_vec_bow) lsa_bow_text_val = svd2.transform(text_vec_bow_val) np.save('/content/drive/MyDrive/HMD_project/new/lsa_bow_train_text.npy', lsa_bow_text) np.save('/content/drive/MyDrive/HMD_project/new/lsa_bow_val_text.npy', lsa_bow_text_val) from sklearn.decomposition import NMF vectorizer = TfidfVectorizer() nmf = NMF(n_components=100) processed_text = data['processed_text'].apply(lambda x: ' '.join(x)) text_vec_tfidf = vectorizer.fit_transform(processed_text) print('CountVectorizer shape_training dataset', text_vec_tfidf.shape) processed_text_val = df_dev['processed_text_val'].apply(lambda x: ' '.join(x)) text_vec_tfidf_val = vectorizer.transform(processed_text_val) print('CountVectorizer shape_val dataset', text_vec_tfidf_val.shape) nmf_text = nmf.fit_transform(text_vec_tfidf) nmf_text_val = nmf.transform(text_vec_tfidf_val) print(nmf_text.shape) print(nmf_text_val.shape)
code
129009155/cell_11
[ "text_plain_output_1.png" ]
with open('/content/drive/MyDrive/HMD_project/new/text_aug_norm.txt', 'r') as f: lines = f.readlines() file_names = [] for i in lines: file_names.append(i[:i.find('\n')]) print(type(file_names[0]))
code
129009155/cell_1
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns print('success')
code
129009155/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd folder_path_train = '/content/drive/MyDrive/HMD_project/train.jsonl' folder_path_dev = '/content/drive/MyDrive/HMD_project/dev.jsonl' df_train = pd.read_json(folder_path_train, lines=True) df_dev = pd.read_json(folder_path_dev, lines=True) df_train['label'].value_counts().plot(kind='bar', figsize=(6, 6), width=0.2, title='Training data') print('Distribution of training dataset\n', df_train.label.value_counts(), '\n') print('Distribution of validation dataset\n', df_dev.label.value_counts())
code
129009155/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
import gensim.downloader print(list(gensim.downloader.info()['models'].keys()))
code
129009155/cell_38
[ "text_html_output_1.png" ]
from gensim.models import KeyedVectors import numpy as np import numpy as np import pandas as pd import spacy folder_path_train = '/content/drive/MyDrive/HMD_project/train.jsonl' folder_path_dev = '/content/drive/MyDrive/HMD_project/dev.jsonl' df_train = pd.read_json(folder_path_train, lines=True) df_dev = pd.read_json(folder_path_dev, lines=True) with open('/content/drive/MyDrive/HMD_project/new/text_aug_norm.txt', 'r') as f: lines = f.readlines() file_names = [] for i in lines: file_names.append(i[:i.find('\n')]) for i in range(len(file_names)): df_train.loc[len(df_train.index)] = [1, 'img/1', 1, file_names[i]] data = df_train from gensim.models import KeyedVectors wv = KeyedVectors.load('/content/drive/MyDrive/HMD_project/glove-twitter-200') import spacy.cli spacy.cli.download('en_core_web_sm') nlp = spacy.load('en_core_web_sm') def preprocess(text): doc = nlp(text) filtered_token = [] for token in doc: if token.is_punct or token.is_space or token.is_bracket or token.is_stop: continue else: token = token.lemma_ filtered_token.append(token) return filtered_token df_dev['processed_text_val'] = df_dev['text'].apply(lambda x: preprocess(x)) data['processed_text'] = data['text'].apply(lambda x: preprocess(x)) import numpy as np def gensim_vector(token): vec_size = wv.vector_size wv_final = np.zeros(vec_size) count = 1 for t in token: if t in wv: count += 1 wv_final += wv[t] return wv_final / count data['text_vector'] = data['processed_text'].apply(gensim_vector) df_dev['text_vector_val'] = df_dev['processed_text_val'].apply(gensim_vector) len(data.text_vector.iloc[0]) text_vector = data['text_vector'] text_vector = np.stack(text_vector) print(text_vector.shape) print(text_vector[0].shape) np.save('/content/drive/MyDrive/HMD_project/new/twitter_embedding_train_text.npy', text_vector) text_vector_val = df_dev['text_vector_val'] text_vector_val = np.stack(text_vector_val) print(text_vector_val.shape) print(text_vector_val[0].shape) np.save('/content/drive/MyDrive/HMD_project/new/twitter_embedding_val_text.npy', text_vector_val)
code
129009155/cell_3
[ "text_plain_output_1.png" ]
from google.colab import drive from google.colab import drive drive.mount('/content/drive')
code
129009155/cell_35
[ "text_plain_output_1.png" ]
from gensim.models import KeyedVectors import numpy as np import numpy as np import pandas as pd import spacy folder_path_train = '/content/drive/MyDrive/HMD_project/train.jsonl' folder_path_dev = '/content/drive/MyDrive/HMD_project/dev.jsonl' df_train = pd.read_json(folder_path_train, lines=True) df_dev = pd.read_json(folder_path_dev, lines=True) with open('/content/drive/MyDrive/HMD_project/new/text_aug_norm.txt', 'r') as f: lines = f.readlines() file_names = [] for i in lines: file_names.append(i[:i.find('\n')]) for i in range(len(file_names)): df_train.loc[len(df_train.index)] = [1, 'img/1', 1, file_names[i]] data = df_train from gensim.models import KeyedVectors wv = KeyedVectors.load('/content/drive/MyDrive/HMD_project/glove-twitter-200') import spacy.cli spacy.cli.download('en_core_web_sm') nlp = spacy.load('en_core_web_sm') def preprocess(text): doc = nlp(text) filtered_token = [] for token in doc: if token.is_punct or token.is_space or token.is_bracket or token.is_stop: continue else: token = token.lemma_ filtered_token.append(token) return filtered_token df_dev['processed_text_val'] = df_dev['text'].apply(lambda x: preprocess(x)) data['processed_text'] = data['text'].apply(lambda x: preprocess(x)) import numpy as np def gensim_vector(token): vec_size = wv.vector_size wv_final = np.zeros(vec_size) count = 1 for t in token: if t in wv: count += 1 wv_final += wv[t] return wv_final / count data['text_vector'] = data['processed_text'].apply(gensim_vector) df_dev['text_vector_val'] = df_dev['processed_text_val'].apply(gensim_vector) print(data.head(), '\n\n') print(df_dev.head())
code
129009155/cell_31
[ "text_plain_output_1.png" ]
import pandas as pd import spacy folder_path_train = '/content/drive/MyDrive/HMD_project/train.jsonl' folder_path_dev = '/content/drive/MyDrive/HMD_project/dev.jsonl' df_train = pd.read_json(folder_path_train, lines=True) df_dev = pd.read_json(folder_path_dev, lines=True) with open('/content/drive/MyDrive/HMD_project/new/text_aug_norm.txt', 'r') as f: lines = f.readlines() file_names = [] for i in lines: file_names.append(i[:i.find('\n')]) for i in range(len(file_names)): df_train.loc[len(df_train.index)] = [1, 'img/1', 1, file_names[i]] data = df_train import spacy.cli spacy.cli.download('en_core_web_sm') nlp = spacy.load('en_core_web_sm') def preprocess(text): doc = nlp(text) filtered_token = [] for token in doc: if token.is_punct or token.is_space or token.is_bracket or token.is_stop: continue else: token = token.lemma_ filtered_token.append(token) return filtered_token df_dev['processed_text_val'] = df_dev['text'].apply(lambda x: preprocess(x)) data['processed_text'] = data['text'].apply(lambda x: preprocess(x)) data.head()
code
129009155/cell_46
[ "text_plain_output_1.png" ]
from gensim.models import KeyedVectors from sklearn.decomposition import TruncatedSVD from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer import numpy as np import numpy as np import pandas as pd import spacy folder_path_train = '/content/drive/MyDrive/HMD_project/train.jsonl' folder_path_dev = '/content/drive/MyDrive/HMD_project/dev.jsonl' df_train = pd.read_json(folder_path_train, lines=True) df_dev = pd.read_json(folder_path_dev, lines=True) with open('/content/drive/MyDrive/HMD_project/new/text_aug_norm.txt', 'r') as f: lines = f.readlines() file_names = [] for i in lines: file_names.append(i[:i.find('\n')]) for i in range(len(file_names)): df_train.loc[len(df_train.index)] = [1, 'img/1', 1, file_names[i]] data = df_train from gensim.models import KeyedVectors wv = KeyedVectors.load('/content/drive/MyDrive/HMD_project/glove-twitter-200') import spacy.cli spacy.cli.download('en_core_web_sm') nlp = spacy.load('en_core_web_sm') def preprocess(text): doc = nlp(text) filtered_token = [] for token in doc: if token.is_punct or token.is_space or token.is_bracket or token.is_stop: continue else: token = token.lemma_ filtered_token.append(token) return filtered_token df_dev['processed_text_val'] = df_dev['text'].apply(lambda x: preprocess(x)) data['processed_text'] = data['text'].apply(lambda x: preprocess(x)) import numpy as np def gensim_vector(token): vec_size = wv.vector_size wv_final = np.zeros(vec_size) count = 1 for t in token: if t in wv: count += 1 wv_final += wv[t] return wv_final / count data['text_vector'] = data['processed_text'].apply(gensim_vector) df_dev['text_vector_val'] = df_dev['processed_text_val'].apply(gensim_vector) len(data.text_vector.iloc[0]) text_vector = data['text_vector'] text_vector = np.stack(text_vector) np.save('/content/drive/MyDrive/HMD_project/new/twitter_embedding_train_text.npy', text_vector) text_vector_val = df_dev['text_vector_val'] text_vector_val = np.stack(text_vector_val) np.save('/content/drive/MyDrive/HMD_project/new/twitter_embedding_val_text.npy', text_vector_val) vectorizer = TfidfVectorizer(stop_words='english') svd = TruncatedSVD(n_components=1000) processed_text = data['processed_text'].apply(lambda x: ' '.join(x)) text_vec_tfidf = vectorizer.fit_transform(processed_text) processed_text_val = df_dev['processed_text_val'].apply(lambda x: ' '.join(x)) text_vec_tfidf_val = vectorizer.transform(processed_text_val) lsa_text = svd.fit_transform(text_vec_tfidf) lsa_text_val = svd.transform(text_vec_tfidf_val) np.save('/content/drive/MyDrive/HMD_project/new/lsa_tfidf_train_text.npy', lsa_text) np.save('/content/drive/MyDrive/HMD_project/new/lsa_tfidf_val_text.npy', lsa_text_val) vectorizer = CountVectorizer(stop_words='english') svd2 = TruncatedSVD(n_components=1000) processed_text = data['processed_text'].apply(lambda x: ' '.join(x)) text_vec_bow = vectorizer.fit_transform(processed_text) print('CountVectorizer shape_training dataset', text_vec_bow.shape) processed_text_val = df_dev['processed_text_val'].apply(lambda x: ' '.join(x)) text_vec_bow_val = vectorizer.transform(processed_text_val) print('CountVectorizer shape_val dataset', text_vec_bow_val.shape) lsa_bow_text = svd2.fit_transform(text_vec_bow) print('\nvariance_captured_by 1000 components', svd2.explained_variance_ratio_.sum()) lsa_bow_text_val = svd2.transform(text_vec_bow_val) print('\n', lsa_bow_text.shape) print(lsa_bow_text_val.shape) np.save('/content/drive/MyDrive/HMD_project/new/lsa_bow_train_text.npy', lsa_bow_text) np.save('/content/drive/MyDrive/HMD_project/new/lsa_bow_val_text.npy', lsa_bow_text_val)
code
129009155/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd folder_path_train = '/content/drive/MyDrive/HMD_project/train.jsonl' folder_path_dev = '/content/drive/MyDrive/HMD_project/dev.jsonl' df_train = pd.read_json(folder_path_train, lines=True) df_dev = pd.read_json(folder_path_dev, lines=True) with open('/content/drive/MyDrive/HMD_project/new/text_aug_norm.txt', 'r') as f: lines = f.readlines() file_names = [] for i in lines: file_names.append(i[:i.find('\n')]) for i in range(len(file_names)): df_train.loc[len(df_train.index)] = [1, 'img/1', 1, file_names[i]] data = df_train print('Distribution of training dataset after augmentation\n', data.label.value_counts())
code
129009155/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd folder_path_train = '/content/drive/MyDrive/HMD_project/train.jsonl' folder_path_dev = '/content/drive/MyDrive/HMD_project/dev.jsonl' df_train = pd.read_json(folder_path_train, lines=True) df_dev = pd.read_json(folder_path_dev, lines=True) print(df_dev.tail())
code
129009155/cell_36
[ "text_plain_output_1.png" ]
from gensim.models import KeyedVectors import numpy as np import numpy as np import pandas as pd import spacy folder_path_train = '/content/drive/MyDrive/HMD_project/train.jsonl' folder_path_dev = '/content/drive/MyDrive/HMD_project/dev.jsonl' df_train = pd.read_json(folder_path_train, lines=True) df_dev = pd.read_json(folder_path_dev, lines=True) with open('/content/drive/MyDrive/HMD_project/new/text_aug_norm.txt', 'r') as f: lines = f.readlines() file_names = [] for i in lines: file_names.append(i[:i.find('\n')]) for i in range(len(file_names)): df_train.loc[len(df_train.index)] = [1, 'img/1', 1, file_names[i]] data = df_train from gensim.models import KeyedVectors wv = KeyedVectors.load('/content/drive/MyDrive/HMD_project/glove-twitter-200') import spacy.cli spacy.cli.download('en_core_web_sm') nlp = spacy.load('en_core_web_sm') def preprocess(text): doc = nlp(text) filtered_token = [] for token in doc: if token.is_punct or token.is_space or token.is_bracket or token.is_stop: continue else: token = token.lemma_ filtered_token.append(token) return filtered_token df_dev['processed_text_val'] = df_dev['text'].apply(lambda x: preprocess(x)) data['processed_text'] = data['text'].apply(lambda x: preprocess(x)) import numpy as np def gensim_vector(token): vec_size = wv.vector_size wv_final = np.zeros(vec_size) count = 1 for t in token: if t in wv: count += 1 wv_final += wv[t] return wv_final / count data['text_vector'] = data['processed_text'].apply(gensim_vector) df_dev['text_vector_val'] = df_dev['processed_text_val'].apply(gensim_vector) len(data.text_vector.iloc[0])
code
48165025/cell_9
[ "image_output_1.png" ]
import pandas as pd import warnings warnings.simplefilter(action='ignore', category=FutureWarning) pd.set_option('display.float_format', lambda x: '%.3f' % x) train = pd.read_csv('../input/titanic/train.csv', index_col='PassengerId') test = pd.read_csv('../input/titanic/test.csv', index_col='PassengerId') train.describe()
code
48165025/cell_25
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings warnings.simplefilter(action='ignore', category=FutureWarning) pd.set_option('display.float_format', lambda x: '%.3f' % x) train = pd.read_csv('../input/titanic/train.csv', index_col='PassengerId') test = pd.read_csv('../input/titanic/test.csv', index_col='PassengerId') train.isnull().sum() sns.set(font_scale=1.4) sns.set_style('whitegrid') plt.figure(figsize=(10, 6)) sns.set_style('whitegrid') sns.countplot(x='Survived', hue='Sex', data=train, palette='RdBu_r') plt.title('Survived/not survived by sex') plt.show()
code