path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
17136778/cell_11 | [
"image_output_1.png"
] | test = CustomImageList.from_csv_custom(path=path, csv_name='test.csv', imgIdx=0)
data = CustomImageList.from_csv_custom(path=path, csv_name='train.csv', imgIdx=1).split_by_rand_pct(0.2).label_from_df(cols='label').add_test(test, label=0).transform(get_transforms(do_flip=False)).databunch(bs=128, num_workers=0).normalize(imagenet_stats)
learn = cnn_learner(data, models.resnet18, metrics=[accuracy], model_dir='/kaggle/working/models')
learn.lr_find()
learn.recorder.plot(suggestion=True) | code |
17136778/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | test = CustomImageList.from_csv_custom(path=path, csv_name='test.csv', imgIdx=0)
data = CustomImageList.from_csv_custom(path=path, csv_name='train.csv', imgIdx=1).split_by_rand_pct(0.2).label_from_df(cols='label').add_test(test, label=0).transform(get_transforms(do_flip=False)).databunch(bs=128, num_workers=0).normalize(imagenet_stats)
data.show_batch(rows=3, figsize=(5, 5)) | code |
17136778/cell_15 | [
"text_plain_output_1.png",
"image_output_1.png"
] | test = CustomImageList.from_csv_custom(path=path, csv_name='test.csv', imgIdx=0)
data = CustomImageList.from_csv_custom(path=path, csv_name='train.csv', imgIdx=1).split_by_rand_pct(0.2).label_from_df(cols='label').add_test(test, label=0).transform(get_transforms(do_flip=False)).databunch(bs=128, num_workers=0).normalize(imagenet_stats)
learn = cnn_learner(data, models.resnet18, metrics=[accuracy], model_dir='/kaggle/working/models')
learn.lr_find()
learn.fit_one_cycle(4, max_lr=0.01)
learn.unfreeze()
learn.lr_find()
learn.fit_one_cycle(10, max_lr=slice(1e-06, 0.0001))
interp = ClassificationInterpretation.from_learner(learn)
interp.plot_confusion_matrix() | code |
17136778/cell_14 | [
"text_html_output_1.png"
] | test = CustomImageList.from_csv_custom(path=path, csv_name='test.csv', imgIdx=0)
data = CustomImageList.from_csv_custom(path=path, csv_name='train.csv', imgIdx=1).split_by_rand_pct(0.2).label_from_df(cols='label').add_test(test, label=0).transform(get_transforms(do_flip=False)).databunch(bs=128, num_workers=0).normalize(imagenet_stats)
learn = cnn_learner(data, models.resnet18, metrics=[accuracy], model_dir='/kaggle/working/models')
learn.lr_find()
learn.fit_one_cycle(4, max_lr=0.01)
learn.unfreeze()
learn.lr_find()
learn.fit_one_cycle(10, max_lr=slice(1e-06, 0.0001)) | code |
17136778/cell_10 | [
"text_plain_output_1.png"
] | test = CustomImageList.from_csv_custom(path=path, csv_name='test.csv', imgIdx=0)
data = CustomImageList.from_csv_custom(path=path, csv_name='train.csv', imgIdx=1).split_by_rand_pct(0.2).label_from_df(cols='label').add_test(test, label=0).transform(get_transforms(do_flip=False)).databunch(bs=128, num_workers=0).normalize(imagenet_stats)
learn = cnn_learner(data, models.resnet18, metrics=[accuracy], model_dir='/kaggle/working/models') | code |
17136778/cell_12 | [
"application_vnd.jupyter.stderr_output_1.png"
] | test = CustomImageList.from_csv_custom(path=path, csv_name='test.csv', imgIdx=0)
data = CustomImageList.from_csv_custom(path=path, csv_name='train.csv', imgIdx=1).split_by_rand_pct(0.2).label_from_df(cols='label').add_test(test, label=0).transform(get_transforms(do_flip=False)).databunch(bs=128, num_workers=0).normalize(imagenet_stats)
learn = cnn_learner(data, models.resnet18, metrics=[accuracy], model_dir='/kaggle/working/models')
learn.lr_find()
learn.fit_one_cycle(4, max_lr=0.01) | code |
89130914/cell_42 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('../input/bharti-airtel-stock-proce/BHARTIARTL.NS.csv')
df.shape
df.columns
df.sample(5)
df_1 = df.set_index('Date')
df_1.sample(5)
df_1['Rolling 7: 7Days Rolling'] = df_1.High.rolling(7).mean()
df_1['Rolling 30: 30Days Rolling'] = df_1.High.rolling(30).mean()
df_1['Close'].plot(xlim=['2017-12-31', '2018-12-31'], figsize=(20, 5), color='r')
plt.title('Airtel in 2018 Crash', fontsize=18) | code |
89130914/cell_34 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('../input/bharti-airtel-stock-proce/BHARTIARTL.NS.csv')
df.shape
df.columns
df.sample(5)
df_1 = df.set_index('Date')
df_1.sample(5)
df_1['Rolling 7: 7Days Rolling'] = df_1.High.rolling(7).mean()
df_1['Rolling 30: 30Days Rolling'] = df_1.High.rolling(30).mean()
df_1[['Close', 'Rolling 30: 30Days Rolling', 'Rolling 7: 7Days Rolling']].plot(figsize=(20, 9), color=['green', 'blue', 'orange'])
plt.title('AIRTEL Stock Price - 5Y (7 Days and 30 days rolling)', fontsize=18)
plt.plot() | code |
89130914/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/bharti-airtel-stock-proce/BHARTIARTL.NS.csv')
df.shape
df.columns
df.sample(5)
df.info() | code |
89130914/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/bharti-airtel-stock-proce/BHARTIARTL.NS.csv')
df.shape
df.columns
df.sample(5)
df.info() | code |
89130914/cell_26 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/bharti-airtel-stock-proce/BHARTIARTL.NS.csv')
df.shape
df.columns
df.sample(5)
df_1 = df.set_index('Date')
df_1.sample(5) | code |
89130914/cell_54 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('../input/bharti-airtel-stock-proce/BHARTIARTL.NS.csv')
df.shape
df.columns
df.sample(5)
df_1 = df.set_index('Date')
df_1.sample(5)
df_1['Rolling 7: 7Days Rolling'] = df_1.High.rolling(7).mean()
df_1['Rolling 30: 30Days Rolling'] = df_1.High.rolling(30).mean()
df_1['Close'].plot(xlim=['2021-10-01', '2021-11-30'], figsize=(20, 5), color='r')
plt.title('Effect During plan Price Hike', fontsize=18) | code |
89130914/cell_50 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('../input/bharti-airtel-stock-proce/BHARTIARTL.NS.csv')
df.shape
df.columns
df.sample(5)
df_1 = df.set_index('Date')
df_1.sample(5)
df_1['Rolling 7: 7Days Rolling'] = df_1.High.rolling(7).mean()
df_1['Rolling 30: 30Days Rolling'] = df_1.High.rolling(30).mean()
df_1.resample(rule='W').max()['Close'].plot(xlim=['2020-02-20', '2020-04-07'], figsize=(12, 5), color='Orange', ls='dashed')
plt.ylabel('High')
plt.title('Airtel Stock Price in 2020 Market Crash (Weekly)', fontsize=15) | code |
89130914/cell_45 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('../input/bharti-airtel-stock-proce/BHARTIARTL.NS.csv')
df.shape
df.columns
df.sample(5)
df_1 = df.set_index('Date')
df_1.sample(5)
df_1['Rolling 7: 7Days Rolling'] = df_1.High.rolling(7).mean()
df_1['Rolling 30: 30Days Rolling'] = df_1.High.rolling(30).mean()
df_1['Close'].plot(xlim=['2018-12-31', '2019-12-31'], figsize=(20, 5), color='r', ls='dashed')
plt.title('Airtel After 2018 Crash (for next 1 year)', fontsize=18) | code |
89130914/cell_49 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('../input/bharti-airtel-stock-proce/BHARTIARTL.NS.csv')
df.shape
df.columns
df.sample(5)
df_1 = df.set_index('Date')
df_1.sample(5)
df_1['Rolling 7: 7Days Rolling'] = df_1.High.rolling(7).mean()
df_1['Rolling 30: 30Days Rolling'] = df_1.High.rolling(30).mean()
df_1['Close'].plot(xlim=['2020-02-20', '2020-04-07'], figsize=(12, 5), color='r')
plt.ylabel('Closing Price')
plt.title('Airtel Stock Price in 2020 Market Crash', fontsize=15) | code |
89130914/cell_18 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/bharti-airtel-stock-proce/BHARTIARTL.NS.csv')
df.shape
df.columns
df.sample(5)
df.tail(2) | code |
89130914/cell_32 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('../input/bharti-airtel-stock-proce/BHARTIARTL.NS.csv')
df.shape
df.columns
df.sample(5)
df_1 = df.set_index('Date')
df_1.sample(5)
df_1['Close'].plot(figsize=(20, 5), color='g')
plt.title('AIRTEL Stock Price - 5Y', fontsize=20) | code |
89130914/cell_28 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/bharti-airtel-stock-proce/BHARTIARTL.NS.csv')
df.shape
df.columns
df.sample(5)
df_1 = df.set_index('Date')
df_1.sample(5)
df_1.plot() | code |
89130914/cell_16 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/bharti-airtel-stock-proce/BHARTIARTL.NS.csv')
df.shape
df.columns
df.head(2) | code |
89130914/cell_17 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/bharti-airtel-stock-proce/BHARTIARTL.NS.csv')
df.shape
df.columns
df.sample(5) | code |
89130914/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/bharti-airtel-stock-proce/BHARTIARTL.NS.csv')
df.shape
df.columns | code |
89130914/cell_37 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('../input/bharti-airtel-stock-proce/BHARTIARTL.NS.csv')
df.shape
df.columns
df.sample(5)
df_1 = df.set_index('Date')
df_1.sample(5)
df_1['Rolling 7: 7Days Rolling'] = df_1.High.rolling(7).mean()
df_1['Rolling 30: 30Days Rolling'] = df_1.High.rolling(30).mean()
df_1['Close'].plot(xlim=['2022-02-05', '2022-02-25'], figsize=(12, 5), color='g')
plt.title('Airtel Stock Price in last 20 Days', fontsize=17) | code |
89130914/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/bharti-airtel-stock-proce/BHARTIARTL.NS.csv')
df.shape | code |
74056813/cell_6 | [
"image_output_1.png"
] | from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set
data = pd.read_csv('../input/boston-housing-dataset/HousingData.csv')
data = data.dropna()
data
X = data.drop('MEDV', axis=1).values
Y = data['MEDV'].values
X
Room_number = X[:, 5]
Room_number = Room_number.reshape(-1, 1)
Y = Y.reshape(-1, 1)
from sklearn.linear_model import LinearRegression
regression = LinearRegression()
regression.fit(Room_number, Y)
regression_line = np.linspace(min(Room_number), max(Room_number))
plt.scatter(Room_number, Y, color='green')
plt.xlabel('Average Room number')
plt.ylabel('Average price (x1000 $)')
plt.title('The relationship between the number of rooms and the price of the house')
plt.plot(regression_line, regression.predict(regression_line), color='black', linewidth=3)
plt.show() | code |
74056813/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set
data = pd.read_csv('../input/boston-housing-dataset/HousingData.csv')
data = data.dropna()
data | code |
74056813/cell_7 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set
data = pd.read_csv('../input/boston-housing-dataset/HousingData.csv')
data = data.dropna()
data
X = data.drop('MEDV', axis=1).values
Y = data['MEDV'].values
X
Room_number = X[:, 5]
Room_number = Room_number.reshape(-1, 1)
Y = Y.reshape(-1, 1)
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=30)
regression_all = LinearRegression()
regression_all.fit(X_train, Y_train)
Y_prediction = regression_all.predict(X_test)
print('R^2: {}'.format(regression_all.score(X_test, Y_test))) | code |
74056813/cell_8 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set
data = pd.read_csv('../input/boston-housing-dataset/HousingData.csv')
data = data.dropna()
data
X = data.drop('MEDV', axis=1).values
Y = data['MEDV'].values
X
Room_number = X[:, 5]
Room_number = Room_number.reshape(-1, 1)
Y = Y.reshape(-1, 1)
from sklearn.linear_model import LinearRegression
regression = LinearRegression()
regression.fit(Room_number, Y)
regression_line = np.linspace(min(Room_number), max(Room_number))
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=30)
regression_all = LinearRegression()
regression_all.fit(X_train, Y_train)
Y_prediction = regression_all.predict(X_test)
from sklearn.metrics import mean_squared_error
Y_prediction = regression_all.predict(X_test)
error = np.sqrt(mean_squared_error(Y_test, Y_prediction))
print('error: {}'.format(error)) | code |
74056813/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set
data = pd.read_csv('../input/boston-housing-dataset/HousingData.csv')
data = data.dropna()
data
X = data.drop('MEDV', axis=1).values
Y = data['MEDV'].values
X | code |
74056813/cell_5 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set
data = pd.read_csv('../input/boston-housing-dataset/HousingData.csv')
data = data.dropna()
data
X = data.drop('MEDV', axis=1).values
Y = data['MEDV'].values
X
Room_number = X[:, 5]
Room_number = Room_number.reshape(-1, 1)
Y = Y.reshape(-1, 1)
plt.scatter(Room_number, Y)
plt.xlabel('Average Room number')
plt.ylabel('Average price (x1000 $)')
plt.title('The relationship between the number of rooms and the price of the house')
plt.show() | code |
128041288/cell_13 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import LabelEncoder, StandardScaler
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/student-performance-in-mathematics/exams.csv')
df
df.isnull().sum()
df.describe().T
fig, axis= plt.subplots(nrows=1, ncols=3, figsize= (16,6))
sns.boxplot(data=df[['math score']], ax= axis[0]);
sns.boxplot(data=df[['reading score']], ax= axis[1]);
sns.boxplot(data=df[['writing score']], ax= axis[2]);
df.skew()
df.corr
from sklearn.preprocessing import LabelEncoder, StandardScaler
le = LabelEncoder()
df['gender'] = le.fit_transform(df['gender'])
df['lunch'] = le.fit_transform(df['lunch'])
df['test preparation course'] = le.fit_transform(df['test preparation course'])
df.head() | code |
128041288/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/student-performance-in-mathematics/exams.csv')
df
df.isnull().sum()
df.describe().T
df.skew() | code |
128041288/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/student-performance-in-mathematics/exams.csv')
df
df.isnull().sum() | code |
128041288/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/student-performance-in-mathematics/exams.csv')
df
df.isnull().sum()
df.describe().T | code |
128041288/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/student-performance-in-mathematics/exams.csv')
df
df.isnull().sum()
df.describe().T
df.skew()
df.corr | code |
128041288/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
128041288/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/student-performance-in-mathematics/exams.csv')
df
df.isnull().sum()
df.describe().T
df.hist(figsize=(16, 10), color='green') | code |
128041288/cell_18 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.preprocessing import LabelEncoder, StandardScaler
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/student-performance-in-mathematics/exams.csv')
df
df.isnull().sum()
df.describe().T
fig, axis= plt.subplots(nrows=1, ncols=3, figsize= (16,6))
sns.boxplot(data=df[['math score']], ax= axis[0]);
sns.boxplot(data=df[['reading score']], ax= axis[1]);
sns.boxplot(data=df[['writing score']], ax= axis[2]);
df.skew()
df.corr
from sklearn.preprocessing import LabelEncoder, StandardScaler
le = LabelEncoder()
df['gender'] = le.fit_transform(df['gender'])
df['lunch'] = le.fit_transform(df['lunch'])
df['test preparation course'] = le.fit_transform(df['test preparation course'])
race_dummies = pd.get_dummies(df['race/ethnicity'], prefix='race')
df = pd.concat([df, race_dummies], axis=1)
df = df.drop('race/ethnicity', axis=1)
parental_dummies = pd.get_dummies(df['parental level of education'], prefix='LOE')
df = pd.concat([df, parental_dummies], axis=1)
df = df.drop('parental level of education', axis=1)
X = df.drop(['math score'], axis=1)
Y = df['math score']
print(X.shape)
print(Y.shape) | code |
128041288/cell_8 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/student-performance-in-mathematics/exams.csv')
df
df.isnull().sum()
df.describe().T
fig, axis = plt.subplots(nrows=1, ncols=3, figsize=(16, 6))
sns.boxplot(data=df[['math score']], ax=axis[0])
sns.boxplot(data=df[['reading score']], ax=axis[1])
sns.boxplot(data=df[['writing score']], ax=axis[2]) | code |
128041288/cell_15 | [
"image_output_1.png"
] | from sklearn.preprocessing import LabelEncoder, StandardScaler
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/student-performance-in-mathematics/exams.csv')
df
df.isnull().sum()
df.describe().T
fig, axis= plt.subplots(nrows=1, ncols=3, figsize= (16,6))
sns.boxplot(data=df[['math score']], ax= axis[0]);
sns.boxplot(data=df[['reading score']], ax= axis[1]);
sns.boxplot(data=df[['writing score']], ax= axis[2]);
df.skew()
df.corr
from sklearn.preprocessing import LabelEncoder, StandardScaler
le = LabelEncoder()
df['gender'] = le.fit_transform(df['gender'])
df['lunch'] = le.fit_transform(df['lunch'])
df['test preparation course'] = le.fit_transform(df['test preparation course'])
race_dummies = pd.get_dummies(df['race/ethnicity'], prefix='race')
df = pd.concat([df, race_dummies], axis=1)
df = df.drop('race/ethnicity', axis=1)
parental_dummies = pd.get_dummies(df['parental level of education'], prefix='LOE')
df = pd.concat([df, parental_dummies], axis=1)
df = df.drop('parental level of education', axis=1)
df.head() | code |
128041288/cell_3 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/student-performance-in-mathematics/exams.csv')
df | code |
128041288/cell_17 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.preprocessing import LabelEncoder, StandardScaler
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/student-performance-in-mathematics/exams.csv')
df
df.isnull().sum()
df.describe().T
fig, axis= plt.subplots(nrows=1, ncols=3, figsize= (16,6))
sns.boxplot(data=df[['math score']], ax= axis[0]);
sns.boxplot(data=df[['reading score']], ax= axis[1]);
sns.boxplot(data=df[['writing score']], ax= axis[2]);
df.skew()
df.corr
from sklearn.preprocessing import LabelEncoder, StandardScaler
le = LabelEncoder()
df['gender'] = le.fit_transform(df['gender'])
df['lunch'] = le.fit_transform(df['lunch'])
df['test preparation course'] = le.fit_transform(df['test preparation course'])
race_dummies = pd.get_dummies(df['race/ethnicity'], prefix='race')
df = pd.concat([df, race_dummies], axis=1)
df = df.drop('race/ethnicity', axis=1)
parental_dummies = pd.get_dummies(df['parental level of education'], prefix='LOE')
df = pd.concat([df, parental_dummies], axis=1)
df = df.drop('parental level of education', axis=1)
df.head() | code |
128041288/cell_14 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.preprocessing import LabelEncoder, StandardScaler
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/student-performance-in-mathematics/exams.csv')
df
df.isnull().sum()
df.describe().T
fig, axis= plt.subplots(nrows=1, ncols=3, figsize= (16,6))
sns.boxplot(data=df[['math score']], ax= axis[0]);
sns.boxplot(data=df[['reading score']], ax= axis[1]);
sns.boxplot(data=df[['writing score']], ax= axis[2]);
df.skew()
df.corr
from sklearn.preprocessing import LabelEncoder, StandardScaler
le = LabelEncoder()
df['gender'] = le.fit_transform(df['gender'])
df['lunch'] = le.fit_transform(df['lunch'])
df['test preparation course'] = le.fit_transform(df['test preparation course'])
race_dummies = pd.get_dummies(df['race/ethnicity'], prefix='race')
df = pd.concat([df, race_dummies], axis=1)
df = df.drop('race/ethnicity', axis=1)
df.head() | code |
128041288/cell_10 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/student-performance-in-mathematics/exams.csv')
df
df.isnull().sum()
df.describe().T
fig, axis= plt.subplots(nrows=1, ncols=3, figsize= (16,6))
sns.boxplot(data=df[['math score']], ax= axis[0]);
sns.boxplot(data=df[['reading score']], ax= axis[1]);
sns.boxplot(data=df[['writing score']], ax= axis[2]);
df.skew()
sns.pairplot(df, hue='gender') | code |
128041288/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/student-performance-in-mathematics/exams.csv')
df
df.isnull().sum()
df.describe().T
fig, axis= plt.subplots(nrows=1, ncols=3, figsize= (16,6))
sns.boxplot(data=df[['math score']], ax= axis[0]);
sns.boxplot(data=df[['reading score']], ax= axis[1]);
sns.boxplot(data=df[['writing score']], ax= axis[2]);
df.skew()
df.corr
plt.figure(figsize=(16, 9))
sns.heatmap(df.corr(), annot=True) | code |
128041288/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/student-performance-in-mathematics/exams.csv')
df
df.isnull().sum()
df.info() | code |
32068524/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/titanic/train.csv', index_col='PassengerId')
cols_with_missing = [col for col in train_data.columns if train_data[col].isnull().any()]
cols_with_missing | code |
32068524/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/titanic/train.csv', index_col='PassengerId')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv', index_col='PassengerId')
test_data.head() | code |
32068524/cell_4 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestClassifier
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
32068524/cell_34 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/titanic/train.csv', index_col='PassengerId')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv', index_col='PassengerId')
cols_with_missing = [col for col in train_data.columns if train_data[col].isnull().any()]
cols_with_missing
cat = train_data.dtypes == 'object'
object_cols = list(cat[cat].index)
object_cols
test_cols_with_missing = [col for col in test_data.columns if test_data[col].isnull().any()]
test_cols_with_missing
test_cat = test_data.dtypes == 'object'
test_object_cols = list(test_cat[test_cat].index)
test_object_cols
object_cols = [col for col in train_data.columns if train_data[col].dtype == 'object']
good_label_cols = [col for col in object_cols if set(train_data[col]) == set(test_data[col])]
bad_label_cols = list(set(object_cols) - set(good_label_cols))
cat = X_train.dtypes == 'object'
object_cols = list(cat[cat].index)
object_cols
object_cols = [col for col in X_train.columns if X_train[col].dtype == 'object']
good_label_cols = [col for col in object_cols if set(X_train[col]) == set(X_valid[col])]
bad_label_cols = list(set(object_cols) - set(good_label_cols))
print('Categorical columns that will be label encoded:', good_label_cols)
print('\nCategorical columns that will be dropped from the dataset:', bad_label_cols) | code |
32068524/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/titanic/train.csv', index_col='PassengerId')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv', index_col='PassengerId')
cols_with_missing = [col for col in train_data.columns if train_data[col].isnull().any()]
cols_with_missing
cat = train_data.dtypes == 'object'
object_cols = list(cat[cat].index)
object_cols
test_cols_with_missing = [col for col in test_data.columns if test_data[col].isnull().any()]
test_cols_with_missing
test_cat = test_data.dtypes == 'object'
test_object_cols = list(test_cat[test_cat].index)
test_object_cols
object_nunique = list(map(lambda col: train_data[col].nunique(), object_cols))
d = dict(zip(object_cols, object_nunique))
object_nunique_test = list(map(lambda col: test_data[col].nunique(), test_object_cols))
d_test = dict(zip(test_object_cols, object_nunique_test))
sorted(d.items(), key=lambda x: x[1])
sorted(d.items(), key=lambda x: x[1]) | code |
32068524/cell_44 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from sklearn.compose import ColumnTransformer
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.impute import SimpleImputer
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import OneHotEncoder
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/titanic/train.csv', index_col='PassengerId')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv', index_col='PassengerId')
cols_with_missing = [col for col in train_data.columns if train_data[col].isnull().any()]
cols_with_missing
cat = train_data.dtypes == 'object'
object_cols = list(cat[cat].index)
object_cols
test_cols_with_missing = [col for col in test_data.columns if test_data[col].isnull().any()]
test_cols_with_missing
test_cat = test_data.dtypes == 'object'
test_object_cols = list(test_cat[test_cat].index)
test_object_cols
object_cols = [col for col in train_data.columns if train_data[col].dtype == 'object']
good_label_cols = [col for col in object_cols if set(train_data[col]) == set(test_data[col])]
bad_label_cols = list(set(object_cols) - set(good_label_cols))
X = train_data
X_test = test_data
X.dropna(axis=0, subset=['Survived'], inplace=True)
y = X.Survived
X.drop(['Survived'], axis=1, inplace=True)
def score_dataset(X_train, X_valid, y_train, y_valid):
model = RandomForestRegressor(n_estimators=100, random_state=0)
model.fit(X_train, y_train)
preds = model.predict(X_valid)
return mean_absolute_error(y_valid, preds)
cat = X_train.dtypes == 'object'
object_cols = list(cat[cat].index)
object_cols
object_cols = [col for col in X_train.columns if X_train[col].dtype == 'object']
good_label_cols = [col for col in object_cols if set(X_train[col]) == set(X_valid[col])]
bad_label_cols = list(set(object_cols) - set(good_label_cols))
from sklearn.preprocessing import OneHotEncoder
label_X_train = X_train.copy()
label_X_valid = X_valid.copy()
OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)
OH_col_train = pd.DataFrame(OH_encoder.fit_transform(X_train[object_cols]))
OH_col_valid = pd.DataFrame(OH_encoder.fit_transform(X_valid[object_cols]))
OH_col_train.index = X_train.index
OH_col_valid.index = X_valid.index
num_X_train = X_train.drop(object_cols, axis=1)
num_X_valid = X_valid.drop(object_cols, axis=1)
OH_X_train = pd.concat([num_X_train, OH_col_train], axis=1)
OH_X_valid = pd.concat([num_X_valid, OH_col_valid], axis=1)
import pandas as pd
from sklearn.model_selection import train_test_split
X = train_data
X_test = test_data
X.dropna(axis=0, subset=['Survived'], inplace=True)
y = X.Survived
X.drop(['Survived'], axis=1, inplace=True)
X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0)
categorical_cols = [cname for cname in X_train.columns if X_train[cname].nunique() < 10 and X_train[cname].dtype == 'object']
numerical_cols = [cname for cname in X_train.columns if X_train[cname].dtype in ['int64', 'float64']]
my_cols = categorical_cols + numerical_cols
X_train = X_train[my_cols].copy()
X_valid = X_valid[my_cols].copy()
X_test = X_test[my_cols].copy()
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
numerical_transformer = SimpleImputer(strategy='constant')
categorical_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='most_frequent')), ('onehot', OneHotEncoder(handle_unknown='ignore'))])
preprocessor = ColumnTransformer(transformers=[('num', numerical_transformer, numerical_cols), ('cat', categorical_transformer, categorical_cols)])
from sklearn.ensemble import RandomForestRegressor
model = RandomForestRegressor(n_estimators=100, random_state=0)
from sklearn.metrics import mean_absolute_error
my_pipeline = Pipeline(steps=[('preprocessor', preprocessor), ('model', model)])
my_pipeline.fit(X_train, y_train)
preds = my_pipeline.predict(X_valid)
preds_test = my_pipeline.predict(X_test)
score = mean_absolute_error(y_valid, preds)
print('MAE:', score) | code |
32068524/cell_20 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/titanic/train.csv', index_col='PassengerId')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv', index_col='PassengerId')
test_cols_with_missing = [col for col in test_data.columns if test_data[col].isnull().any()]
test_cols_with_missing
test_cat = test_data.dtypes == 'object'
test_object_cols = list(test_cat[test_cat].index)
test_object_cols | code |
32068524/cell_40 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import OneHotEncoder
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/titanic/train.csv', index_col='PassengerId')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv', index_col='PassengerId')
cols_with_missing = [col for col in train_data.columns if train_data[col].isnull().any()]
cols_with_missing
cat = train_data.dtypes == 'object'
object_cols = list(cat[cat].index)
object_cols
test_cols_with_missing = [col for col in test_data.columns if test_data[col].isnull().any()]
test_cols_with_missing
test_cat = test_data.dtypes == 'object'
test_object_cols = list(test_cat[test_cat].index)
test_object_cols
object_cols = [col for col in train_data.columns if train_data[col].dtype == 'object']
good_label_cols = [col for col in object_cols if set(train_data[col]) == set(test_data[col])]
bad_label_cols = list(set(object_cols) - set(good_label_cols))
X = train_data
X_test = test_data
X.dropna(axis=0, subset=['Survived'], inplace=True)
y = X.Survived
X.drop(['Survived'], axis=1, inplace=True)
cat = X_train.dtypes == 'object'
object_cols = list(cat[cat].index)
object_cols
object_cols = [col for col in X_train.columns if X_train[col].dtype == 'object']
good_label_cols = [col for col in object_cols if set(X_train[col]) == set(X_valid[col])]
bad_label_cols = list(set(object_cols) - set(good_label_cols))
from sklearn.preprocessing import OneHotEncoder
label_X_train = X_train.copy()
label_X_valid = X_valid.copy()
OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)
OH_col_train = pd.DataFrame(OH_encoder.fit_transform(X_train[object_cols]))
OH_col_valid = pd.DataFrame(OH_encoder.fit_transform(X_valid[object_cols]))
OH_col_train.index = X_train.index
OH_col_valid.index = X_valid.index
num_X_train = X_train.drop(object_cols, axis=1)
num_X_valid = X_valid.drop(object_cols, axis=1)
OH_X_train = pd.concat([num_X_train, OH_col_train], axis=1)
OH_X_valid = pd.concat([num_X_valid, OH_col_valid], axis=1)
import pandas as pd
from sklearn.model_selection import train_test_split
X = train_data
X_test = test_data
X.dropna(axis=0, subset=['Survived'], inplace=True)
y = X.Survived
X.drop(['Survived'], axis=1, inplace=True)
X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0)
categorical_cols = [cname for cname in X_train.columns if X_train[cname].nunique() < 10 and X_train[cname].dtype == 'object']
numerical_cols = [cname for cname in X_train.columns if X_train[cname].dtype in ['int64', 'float64']]
my_cols = categorical_cols + numerical_cols
X_train = X_train[my_cols].copy()
X_valid = X_valid[my_cols].copy()
X_test = X_test[my_cols].copy()
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
numerical_transformer = SimpleImputer(strategy='constant')
categorical_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='most_frequent')), ('onehot', OneHotEncoder(handle_unknown='ignore'))])
preprocessor = ColumnTransformer(transformers=[('num', numerical_transformer, numerical_cols), ('cat', categorical_transformer, categorical_cols)]) | code |
32068524/cell_26 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/titanic/train.csv', index_col='PassengerId')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv', index_col='PassengerId')
cols_with_missing = [col for col in train_data.columns if train_data[col].isnull().any()]
cols_with_missing
cat = train_data.dtypes == 'object'
object_cols = list(cat[cat].index)
object_cols
test_cols_with_missing = [col for col in test_data.columns if test_data[col].isnull().any()]
test_cols_with_missing
test_cat = test_data.dtypes == 'object'
test_object_cols = list(test_cat[test_cat].index)
test_object_cols
object_cols = [col for col in train_data.columns if train_data[col].dtype == 'object']
good_label_cols = [col for col in object_cols if set(train_data[col]) == set(test_data[col])]
bad_label_cols = list(set(object_cols) - set(good_label_cols))
print('Categorical columns that will be label encoded:', good_label_cols)
print('\nCategorical columns that will be dropped from the dataset:', bad_label_cols) | code |
32068524/cell_19 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/titanic/train.csv', index_col='PassengerId')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv', index_col='PassengerId')
test_cols_with_missing = [col for col in test_data.columns if test_data[col].isnull().any()]
test_cols_with_missing | code |
32068524/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/titanic/train.csv', index_col='PassengerId')
train_data.head() | code |
32068524/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/titanic/train.csv', index_col='PassengerId')
train_data.describe() | code |
32068524/cell_16 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/titanic/train.csv', index_col='PassengerId')
cols_with_missing = [col for col in train_data.columns if train_data[col].isnull().any()]
cols_with_missing
cat = train_data.dtypes == 'object'
object_cols = list(cat[cat].index)
object_cols | code |
32068524/cell_38 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/titanic/train.csv', index_col='PassengerId')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv', index_col='PassengerId')
cols_with_missing = [col for col in train_data.columns if train_data[col].isnull().any()]
cols_with_missing
cat = train_data.dtypes == 'object'
object_cols = list(cat[cat].index)
object_cols
test_cols_with_missing = [col for col in test_data.columns if test_data[col].isnull().any()]
test_cols_with_missing
test_cat = test_data.dtypes == 'object'
test_object_cols = list(test_cat[test_cat].index)
test_object_cols
object_cols = [col for col in train_data.columns if train_data[col].dtype == 'object']
good_label_cols = [col for col in object_cols if set(train_data[col]) == set(test_data[col])]
bad_label_cols = list(set(object_cols) - set(good_label_cols))
X = train_data
X_test = test_data
X.dropna(axis=0, subset=['Survived'], inplace=True)
y = X.Survived
X.drop(['Survived'], axis=1, inplace=True)
cat = X_train.dtypes == 'object'
object_cols = list(cat[cat].index)
object_cols
object_cols = [col for col in X_train.columns if X_train[col].dtype == 'object']
good_label_cols = [col for col in object_cols if set(X_train[col]) == set(X_valid[col])]
bad_label_cols = list(set(object_cols) - set(good_label_cols))
from sklearn.preprocessing import OneHotEncoder
label_X_train = X_train.copy()
label_X_valid = X_valid.copy()
OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)
OH_col_train = pd.DataFrame(OH_encoder.fit_transform(X_train[object_cols]))
OH_col_valid = pd.DataFrame(OH_encoder.fit_transform(X_valid[object_cols]))
OH_col_train.index = X_train.index
OH_col_valid.index = X_valid.index
num_X_train = X_train.drop(object_cols, axis=1)
num_X_valid = X_valid.drop(object_cols, axis=1)
OH_X_train = pd.concat([num_X_train, OH_col_train], axis=1)
OH_X_valid = pd.concat([num_X_valid, OH_col_valid], axis=1)
import pandas as pd
from sklearn.model_selection import train_test_split
X = train_data
X_test = test_data
X.dropna(axis=0, subset=['Survived'], inplace=True)
y = X.Survived
X.drop(['Survived'], axis=1, inplace=True)
X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0)
categorical_cols = [cname for cname in X_train.columns if X_train[cname].nunique() < 10 and X_train[cname].dtype == 'object']
numerical_cols = [cname for cname in X_train.columns if X_train[cname].dtype in ['int64', 'float64']]
my_cols = categorical_cols + numerical_cols
X_train = X_train[my_cols].copy()
X_valid = X_valid[my_cols].copy()
X_test = X_test[my_cols].copy() | code |
32068524/cell_35 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/titanic/train.csv', index_col='PassengerId')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv', index_col='PassengerId')
cols_with_missing = [col for col in train_data.columns if train_data[col].isnull().any()]
cols_with_missing
cat = train_data.dtypes == 'object'
object_cols = list(cat[cat].index)
object_cols
test_cols_with_missing = [col for col in test_data.columns if test_data[col].isnull().any()]
test_cols_with_missing
test_cat = test_data.dtypes == 'object'
test_object_cols = list(test_cat[test_cat].index)
test_object_cols
object_cols = [col for col in train_data.columns if train_data[col].dtype == 'object']
good_label_cols = [col for col in object_cols if set(train_data[col]) == set(test_data[col])]
bad_label_cols = list(set(object_cols) - set(good_label_cols))
cat = X_train.dtypes == 'object'
object_cols = list(cat[cat].index)
object_cols
object_cols = [col for col in X_train.columns if X_train[col].dtype == 'object']
good_label_cols = [col for col in object_cols if set(X_train[col]) == set(X_valid[col])]
bad_label_cols = list(set(object_cols) - set(good_label_cols))
from sklearn.preprocessing import OneHotEncoder
label_X_train = X_train.copy()
label_X_valid = X_valid.copy()
OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)
OH_col_train = pd.DataFrame(OH_encoder.fit_transform(X_train[object_cols]))
OH_col_valid = pd.DataFrame(OH_encoder.fit_transform(X_valid[object_cols]))
OH_col_train.index = X_train.index
OH_col_valid.index = X_valid.index
num_X_train = X_train.drop(object_cols, axis=1)
num_X_valid = X_valid.drop(object_cols, axis=1)
OH_X_train = pd.concat([num_X_train, OH_col_train], axis=1)
OH_X_valid = pd.concat([num_X_valid, OH_col_valid], axis=1) | code |
32068524/cell_24 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/titanic/train.csv', index_col='PassengerId')
cols_with_missing = [col for col in train_data.columns if train_data[col].isnull().any()]
cols_with_missing
cat = train_data.dtypes == 'object'
object_cols = list(cat[cat].index)
object_cols
low_cardinality_cols = [col for col in object_cols if train_data[col].nunique() < 10]
high_cardinality_cols = list(set(object_cols) - set(low_cardinality_cols))
print('Categorical columns that will be one-hot encoded:', low_cardinality_cols)
print('\nCategorical columns that will be dropped from the dataset:', high_cardinality_cols) | code |
32068524/cell_22 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/titanic/train.csv', index_col='PassengerId')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv', index_col='PassengerId')
cols_with_missing = [col for col in train_data.columns if train_data[col].isnull().any()]
cols_with_missing
cat = train_data.dtypes == 'object'
object_cols = list(cat[cat].index)
object_cols
test_cols_with_missing = [col for col in test_data.columns if test_data[col].isnull().any()]
test_cols_with_missing
test_cat = test_data.dtypes == 'object'
test_object_cols = list(test_cat[test_cat].index)
test_object_cols
object_nunique = list(map(lambda col: train_data[col].nunique(), object_cols))
d = dict(zip(object_cols, object_nunique))
object_nunique_test = list(map(lambda col: test_data[col].nunique(), test_object_cols))
d_test = dict(zip(test_object_cols, object_nunique_test))
sorted(d.items(), key=lambda x: x[1]) | code |
32068524/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/titanic/train.csv', index_col='PassengerId')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv', index_col='PassengerId')
test_data.describe() | code |
32068524/cell_36 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.preprocessing import OneHotEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/titanic/train.csv', index_col='PassengerId')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv', index_col='PassengerId')
cols_with_missing = [col for col in train_data.columns if train_data[col].isnull().any()]
cols_with_missing
cat = train_data.dtypes == 'object'
object_cols = list(cat[cat].index)
object_cols
test_cols_with_missing = [col for col in test_data.columns if test_data[col].isnull().any()]
test_cols_with_missing
test_cat = test_data.dtypes == 'object'
test_object_cols = list(test_cat[test_cat].index)
test_object_cols
object_cols = [col for col in train_data.columns if train_data[col].dtype == 'object']
good_label_cols = [col for col in object_cols if set(train_data[col]) == set(test_data[col])]
bad_label_cols = list(set(object_cols) - set(good_label_cols))
def score_dataset(X_train, X_valid, y_train, y_valid):
model = RandomForestRegressor(n_estimators=100, random_state=0)
model.fit(X_train, y_train)
preds = model.predict(X_valid)
return mean_absolute_error(y_valid, preds)
cat = X_train.dtypes == 'object'
object_cols = list(cat[cat].index)
object_cols
object_cols = [col for col in X_train.columns if X_train[col].dtype == 'object']
good_label_cols = [col for col in object_cols if set(X_train[col]) == set(X_valid[col])]
bad_label_cols = list(set(object_cols) - set(good_label_cols))
from sklearn.preprocessing import OneHotEncoder
label_X_train = X_train.copy()
label_X_valid = X_valid.copy()
OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)
OH_col_train = pd.DataFrame(OH_encoder.fit_transform(X_train[object_cols]))
OH_col_valid = pd.DataFrame(OH_encoder.fit_transform(X_valid[object_cols]))
OH_col_train.index = X_train.index
OH_col_valid.index = X_valid.index
num_X_train = X_train.drop(object_cols, axis=1)
num_X_valid = X_valid.drop(object_cols, axis=1)
OH_X_train = pd.concat([num_X_train, OH_col_train], axis=1)
OH_X_valid = pd.concat([num_X_valid, OH_col_valid], axis=1)
print('MAE from OHencoder :')
print(score_dataset(OH_X_train, OH_X_valid, y_train, y_valid)) | code |
90128354/cell_4 | [
"text_html_output_1.png",
"image_output_1.png"
] | import os
import pandas as pd
data_path = '/kaggle/input/covidx9a/'
images_path = '/kaggle/input/covidx-cxr2/train'
data_file = 'train_COVIDx9A.txt'
train = pd.read_csv(os.path.join(data_path, data_file), header=None, sep=' ')
train.columns = ['patient id', 'filename', 'class', 'data source']
print('Training data shape:', train.shape)
display(train.head()) | code |
90128354/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import os
import pandas as pd
import seaborn as sns
data_path = '/kaggle/input/covidx9a/'
images_path = '/kaggle/input/covidx-cxr2/train'
data_file = 'train_COVIDx9A.txt'
train = pd.read_csv(os.path.join(data_path, data_file), header=None, sep=' ')
train.columns = ['patient id', 'filename', 'class', 'data source']
plt.figure(figsize=(8, 6))
sns.histplot(data=train, x='data source', hue='class')
plt.show()
data_classes = train['class'].unique()
df_summary_count = pd.DataFrame()
for dataset in ['cohen', 'fig1', 'actmed', 'sirm', 'ricord', 'rsna', 'stonybrook', 'bimcv', 'rnsa']:
num_negative = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[0]), 'filename'].count()
if len(data_classes) == 2:
num_positive = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[1]), 'filename'].count()
df_new = pd.DataFrame({'Dataset': [dataset], 'Covid': [num_positive], 'Negative': [num_negative]})
elif len(data_classes) == 3:
num_pneumonia = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[1]), 'filename'].count()
num_positive = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[2]), 'filename'].count()
df_new = pd.DataFrame({'Dataset': [dataset], 'Covid': [num_positive], 'Pneumonia': [num_pneumonia], 'Negative': [num_negative]})
else:
print(f'Error! Not accounting for {len(data_classes)} no. of classes.')
df_summary_count = pd.concat([df_summary_count, df_new])
display(df_summary_count) | code |
90128354/cell_11 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import seaborn as sns
data_path = '/kaggle/input/covidx9a/'
images_path = '/kaggle/input/covidx-cxr2/train'
data_file = 'train_COVIDx9A.txt'
train = pd.read_csv(os.path.join(data_path, data_file), header=None, sep=' ')
train.columns = ['patient id', 'filename', 'class', 'data source']
data_classes = train['class'].unique()
df_summary_count = pd.DataFrame()
for dataset in ['cohen', 'fig1', 'actmed', 'sirm', 'ricord', 'rsna', 'stonybrook', 'bimcv', 'rnsa']:
num_negative = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[0]), 'filename'].count()
if len(data_classes) == 2:
num_positive = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[1]), 'filename'].count()
df_new = pd.DataFrame({'Dataset': [dataset], 'Covid': [num_positive], 'Negative': [num_negative]})
elif len(data_classes) == 3:
num_pneumonia = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[1]), 'filename'].count()
num_positive = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[2]), 'filename'].count()
df_new = pd.DataFrame({'Dataset': [dataset], 'Covid': [num_positive], 'Pneumonia': [num_pneumonia], 'Negative': [num_negative]})
df_summary_count = pd.concat([df_summary_count, df_new])
patient_distribution = train.groupby(['patient id', 'data source', 'class']).count().reset_index()
patient_distribution.rename(columns={'filename': 'num_patients'}, inplace=True)
num_patients_bydata = patient_distribution[['data source', 'num_patients']].groupby(['data source']).count()
num_patients_byclass = patient_distribution[['class', 'num_patients']].groupby(['class']).count()
print('Images are saved at:', images_path)
fig, axs = plt.subplots(3, 3, figsize=(18, 14))
for i in range(3):
for j in range(3):
if j == 0:
file_name, class_label = train[train['class'] == data_classes[0]].iloc[i, [1, 2]]
elif j == 1:
file_name, class_label = train[train['class'] == data_classes[1]].iloc[i, [1, 2]]
elif j == 2 and len(data_classes) == 3:
file_name, class_label = train[train['class'] == data_classes[2]].iloc[i, [1, 2]]
else:
print('Out of bounds')
image_file = os.path.join(images_path, file_name)
img = Image.open(image_file)
print('Original:', 3 * i + j, np.asarray(img).shape)
img = img.convert('L')
axs[i, j].set_title(f'Class: {class_label} - Image Size: {np.asarray(img).shape}')
axs[i, j].axis('off')
axs[i, j].imshow(img, cmap='gray')
plt.show() | code |
90128354/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import os
import pandas as pd
import seaborn as sns
data_path = '/kaggle/input/covidx9a/'
images_path = '/kaggle/input/covidx-cxr2/train'
data_file = 'train_COVIDx9A.txt'
train = pd.read_csv(os.path.join(data_path, data_file), header=None, sep=' ')
train.columns = ['patient id', 'filename', 'class', 'data source']
data_classes = train['class'].unique()
df_summary_count = pd.DataFrame()
for dataset in ['cohen', 'fig1', 'actmed', 'sirm', 'ricord', 'rsna', 'stonybrook', 'bimcv', 'rnsa']:
num_negative = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[0]), 'filename'].count()
if len(data_classes) == 2:
num_positive = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[1]), 'filename'].count()
df_new = pd.DataFrame({'Dataset': [dataset], 'Covid': [num_positive], 'Negative': [num_negative]})
elif len(data_classes) == 3:
num_pneumonia = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[1]), 'filename'].count()
num_positive = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[2]), 'filename'].count()
df_new = pd.DataFrame({'Dataset': [dataset], 'Covid': [num_positive], 'Pneumonia': [num_pneumonia], 'Negative': [num_negative]})
df_summary_count = pd.concat([df_summary_count, df_new])
patient_distribution = train.groupby(['patient id', 'data source', 'class']).count().reset_index()
patient_distribution.rename(columns={'filename': 'num_patients'}, inplace=True)
print('No. of unique patients by data source:')
num_patients_bydata = patient_distribution[['data source', 'num_patients']].groupby(['data source']).count()
display(num_patients_bydata)
print('No. of unqiue patients by class:')
num_patients_byclass = patient_distribution[['class', 'num_patients']].groupby(['class']).count()
display(num_patients_byclass) | code |
90128354/cell_12 | [
"text_plain_output_1.png"
] | from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import seaborn as sns
data_path = '/kaggle/input/covidx9a/'
images_path = '/kaggle/input/covidx-cxr2/train'
data_file = 'train_COVIDx9A.txt'
train = pd.read_csv(os.path.join(data_path, data_file), header=None, sep=' ')
train.columns = ['patient id', 'filename', 'class', 'data source']
data_classes = train['class'].unique()
df_summary_count = pd.DataFrame()
for dataset in ['cohen', 'fig1', 'actmed', 'sirm', 'ricord', 'rsna', 'stonybrook', 'bimcv', 'rnsa']:
num_negative = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[0]), 'filename'].count()
if len(data_classes) == 2:
num_positive = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[1]), 'filename'].count()
df_new = pd.DataFrame({'Dataset': [dataset], 'Covid': [num_positive], 'Negative': [num_negative]})
elif len(data_classes) == 3:
num_pneumonia = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[1]), 'filename'].count()
num_positive = train.loc[(train['data source'] == dataset) & (train['class'] == data_classes[2]), 'filename'].count()
df_new = pd.DataFrame({'Dataset': [dataset], 'Covid': [num_positive], 'Pneumonia': [num_pneumonia], 'Negative': [num_negative]})
df_summary_count = pd.concat([df_summary_count, df_new])
patient_distribution = train.groupby(['patient id', 'data source', 'class']).count().reset_index()
patient_distribution.rename(columns={'filename': 'num_patients'}, inplace=True)
num_patients_bydata = patient_distribution[['data source', 'num_patients']].groupby(['data source']).count()
num_patients_byclass = patient_distribution[['class', 'num_patients']].groupby(['class']).count()
def crop_resize_image(gray_img, final_size=224):
""" Set the new dimensions so the cropped image is a square
"""
width, height = gray_img.size
diff = abs(width - height)
left, right, top, bottom = (0, 0, 0, 0)
if diff % 2 == 0:
if width > height:
bottom = height
left = diff / 2
right = width - left
elif height > width:
top = diff / 2
bottom = height - top
right = width
elif width > height:
bottom = height
left = diff / 2 + 0.5
right = width - left + 1
elif height > width:
top = diff / 2 + 0.5
bottom = height - top + 1
right = width
img_cropped = gray_img.crop((left, top, right, bottom))
img_final = img_cropped.resize((final_size, final_size))
return img_final
### Look at a few images to explore:
# a) what do the scans look like for each class?
# b) what is the image resolution?
# c) is there anything noticeable across classes / images?
# Kaggle dataset
print('Images are saved at:', images_path)
fig, axs = plt.subplots(3, 3, figsize = (18,14))
for i in range(3):
for j in range(3):
if j==0:
file_name, class_label = train[train['class']==data_classes[0]].iloc[i,[1,2]]
elif j==1:
file_name, class_label = train[train['class']==data_classes[1]].iloc[i,[1,2]]
elif j==2 and len(data_classes)==3:
file_name, class_label = train[train['class']==data_classes[2]].iloc[i,[1,2]]
else:
print('Out of bounds')
image_file = os.path.join(images_path, file_name)
img = Image.open(image_file)
print('Original:', (3*i+j), np.asarray(img).shape)
# Greyscale convert
img = img.convert('L')
axs[i,j].set_title(f'Class: {class_label} - Image Size: {np.asarray(img).shape}')
axs[i,j].axis('off')
axs[i,j].imshow(img, cmap = 'gray')
plt.show()
final_size = 224
fig, axs = plt.subplots(3, 3, figsize=(18, 14))
for i in range(3):
for j in range(3):
if j == 0:
file_name, class_label = train[train['class'] == data_classes[0]].iloc[i, [1, 2]]
elif j == 1:
file_name, class_label = train[train['class'] == data_classes[1]].iloc[i, [1, 2]]
elif j == 2 and len(data_classes) == 3:
file_name, class_label = train[train['class'] == data_classes[2]].iloc[i, [1, 2]]
else:
print('Out of bounds')
image_file = os.path.join(images_path, file_name)
img = Image.open(image_file)
img = img.convert('L')
img = crop_resize_image(img, final_size=224)
axs[i, j].set_title(f'Class: {class_label} - Image Size: {np.asarray(img_final).shape}')
axs[i, j].axis('off')
axs[i, j].imshow(img_final, cmap='gray')
plt.show() | code |
90128354/cell_5 | [
"text_html_output_2.png",
"text_html_output_1.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import os
import pandas as pd
data_path = '/kaggle/input/covidx9a/'
images_path = '/kaggle/input/covidx-cxr2/train'
data_file = 'train_COVIDx9A.txt'
train = pd.read_csv(os.path.join(data_path, data_file), header=None, sep=' ')
train.columns = ['patient id', 'filename', 'class', 'data source']
print('Classes:\n', train['class'].unique())
print('Data sources:\n', train['data source'].unique())
print('---------------------------------')
print('No. of unique patients:', train['patient id'].nunique(), 'out of', train.shape[0], 'images.') | code |
32071200/cell_21 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
team_stats = pd.read_csv('/kaggle/input/college-basketball-dataset/cbb.csv')
team_stats.groupby('YEAR').size()
team_stats.groupby('TEAM').size()[team_stats.groupby('TEAM').size() == 1]
team_stats['ADJOE'].idxmax()
team_stats.loc[1]['POSTSEASON'] | code |
32071200/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
team_stats = pd.read_csv('/kaggle/input/college-basketball-dataset/cbb.csv')
team_stats.head(5) | code |
32071200/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
team_stats = pd.read_csv('/kaggle/input/college-basketball-dataset/cbb.csv')
team_stats.groupby('YEAR').size()
team_stats.groupby('TEAM').size()[team_stats.groupby('TEAM').size() == 1] | code |
32071200/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
team_stats = pd.read_csv('/kaggle/input/college-basketball-dataset/cbb.csv')
team_stats.groupby('YEAR').size()
team_stats.groupby('TEAM').size()[team_stats.groupby('TEAM').size() == 1]
avg_off = team_stats['ADJOE'].mean()
avg_def = team_stats['ADJDE'].mean()
avg_def - team_stats[team_stats['POSTSEASON'] == 'Champions']['ADJDE'].mean() | code |
32071200/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
team_stats = pd.read_csv('/kaggle/input/college-basketball-dataset/cbb.csv')
team_stats.groupby('YEAR').size() | code |
32071200/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
team_stats = pd.read_csv('/kaggle/input/college-basketball-dataset/cbb.csv')
team_stats.groupby('YEAR').size()
team_stats.groupby('TEAM').size()[team_stats.groupby('TEAM').size() == 1]
avg_off = team_stats['ADJOE'].mean()
avg_def = team_stats['ADJDE'].mean()
team_stats[team_stats['POSTSEASON'] == 'Champions']['ADJOE'].mean() - avg_off | code |
72074805/cell_13 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/bike-sharing-demand/train.csv')
test_df = pd.read_csv('/kaggle/input/bike-sharing-demand/test.csv')
train_df = train_df[np.abs(train_df['count'] - train_df['count'].mean()) <= 3 * train_df['count'].std()]
train_df.columns | code |
72074805/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/bike-sharing-demand/train.csv')
test_df = pd.read_csv('/kaggle/input/bike-sharing-demand/test.csv')
train_df.info() | code |
72074805/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/bike-sharing-demand/train.csv')
test_df = pd.read_csv('/kaggle/input/bike-sharing-demand/test.csv')
submission_df = pd.read_csv('/kaggle/input/bike-sharing-demand/sampleSubmission.csv')
submission_df.head() | code |
72074805/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/bike-sharing-demand/train.csv')
test_df = pd.read_csv('/kaggle/input/bike-sharing-demand/test.csv')
print(train_df.isnull().sum())
print('*' * 50)
print(test_df.isnull().sum()) | code |
72074805/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/bike-sharing-demand/train.csv')
train_df.head() | code |
72074805/cell_11 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/bike-sharing-demand/train.csv')
test_df = pd.read_csv('/kaggle/input/bike-sharing-demand/test.csv')
import matplotlib.pyplot as plt
import seaborn as sns
fig, axes = plt.subplots(nrows=2, ncols=2)
fig.set_size_inches(12, 10)
sns.boxplot(data=train_df, y='count', orient='v', ax=axes[0][0])
sns.boxplot(data=train_df, y='count', x='season', orient='v', ax=axes[0][1])
sns.boxplot(data=train_df, y='count', x='hour', orient='v', ax=axes[1][0])
sns.boxplot(data=train_df, y='count', x='workingday', orient='v', ax=axes[1][1])
axes[0][0].set(ylabel='Count', title='Box Plot On Count')
axes[0][1].set(xlabel='Season', ylabel='Count', title='Box Plot On Count Across Season')
axes[1][0].set(xlabel='Hour Of The Day', ylabel='Count', title='Box Plot On Count Across Hour Of The Day')
axes[1][1].set(xlabel='Working Day', ylabel='Count', title='Box Plot On Count Across Working Day') | code |
72074805/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
72074805/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/bike-sharing-demand/train.csv')
test_df = pd.read_csv('/kaggle/input/bike-sharing-demand/test.csv')
train_df.describe() | code |
72074805/cell_15 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/bike-sharing-demand/train.csv')
test_df = pd.read_csv('/kaggle/input/bike-sharing-demand/test.csv')
import matplotlib.pyplot as plt
import seaborn as sns
fig, axes = plt.subplots(nrows=2,ncols=2)
fig.set_size_inches(12, 10)
sns.boxplot(data=train_df,y="count",orient="v",ax=axes[0][0])
sns.boxplot(data=train_df,y="count",x="season",orient="v",ax=axes[0][1])
sns.boxplot(data=train_df,y="count",x="hour",orient="v",ax=axes[1][0])
sns.boxplot(data=train_df,y="count",x="workingday",orient="v",ax=axes[1][1])
axes[0][0].set(ylabel='Count',title="Box Plot On Count")
axes[0][1].set(xlabel='Season', ylabel='Count',title="Box Plot On Count Across Season")
axes[1][0].set(xlabel='Hour Of The Day', ylabel='Count',title="Box Plot On Count Across Hour Of The Day")
axes[1][1].set(xlabel='Working Day', ylabel='Count',title="Box Plot On Count Across Working Day")
train_df = train_df[np.abs(train_df['count'] - train_df['count'].mean()) <= 3 * train_df['count'].std()]
train_df.columns
train_df = train_df.drop(['datetime'], axis=1)
fig, ax = plt.subplots(figsize=(15, 15))
sns.heatmap(train_df.corr(), annot=True, ax=ax) | code |
72074805/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/bike-sharing-demand/train.csv')
test_df = pd.read_csv('/kaggle/input/bike-sharing-demand/test.csv')
test_df.head() | code |
72074805/cell_12 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/bike-sharing-demand/train.csv')
test_df = pd.read_csv('/kaggle/input/bike-sharing-demand/test.csv')
print('shape with outliers: ', train_df.shape)
train_df = train_df[np.abs(train_df['count'] - train_df['count'].mean()) <= 3 * train_df['count'].std()]
print('shape without outliers: ', train_df.shape) | code |
72074805/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/bike-sharing-demand/train.csv')
test_df = pd.read_csv('/kaggle/input/bike-sharing-demand/test.csv')
submission_df = pd.read_csv('/kaggle/input/bike-sharing-demand/sampleSubmission.csv')
submission_df['count'].value_counts() | code |
32068402/cell_42 | [
"text_plain_output_1.png"
] | from matplotlib import pylab
from sklearn.manifold import TSNE
from sklearn.preprocessing import normalize
import numpy as np
import os
fasttext_model_dir = '../input/fasttext-no-subwords-trigrams'
num_points = 400
first_line = True
index_to_word = []
with open(os.path.join(fasttext_model_dir, 'word-vectors-100d.txt'), 'r') as f:
for line_num, line in enumerate(f):
if first_line:
dim = int(line.strip().split()[1])
word_vecs = np.zeros((num_points, dim), dtype=float)
first_line = False
continue
line = line.strip()
word = line.split()[0]
vec = word_vecs[line_num - 1]
for index, vec_val in enumerate(line.split()[1:]):
vec[index] = float(vec_val)
index_to_word.append(word)
if line_num >= num_points:
break
word_vecs = normalize(word_vecs, copy=False, return_norm=False)
tsne = TSNE(perplexity=40, n_components=2, init='pca', n_iter=10000)
two_d_embeddings = tsne.fit_transform(word_vecs[:num_points])
labels = index_to_word[:num_points]
def plot(embeddings, labels):
pylab.figure(figsize=(20, 20))
for i, label in enumerate(labels):
x, y = embeddings[i, :]
pylab.scatter(x, y)
pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom')
pylab.show()
plot(two_d_embeddings, labels) | code |
32068402/cell_56 | [
"text_plain_output_1.png"
] | from gensim.models.phrases import Phraser
from pprint import pprint
from sklearn.preprocessing import normalize
import gensim.models.keyedvectors as word2vec
import numpy as np
import os
import pandas as pd
sentences_df = pd.read_csv('../input/covid19sentencesmetadata/sentences_with_metadata.csv')
bigram_model = Phraser.load('../input/covid19phrasesmodels/covid_bigram_model_v0.pkl')
trigram_model = Phraser.load('../input/covid19phrasesmodels/covid_trigram_model_v0.pkl')
fasttext_model_dir = '../input/fasttext-no-subwords-trigrams'
num_points = 400
first_line = True
index_to_word = []
with open(os.path.join(fasttext_model_dir, 'word-vectors-100d.txt'), 'r') as f:
for line_num, line in enumerate(f):
if first_line:
dim = int(line.strip().split()[1])
word_vecs = np.zeros((num_points, dim), dtype=float)
first_line = False
continue
line = line.strip()
word = line.split()[0]
vec = word_vecs[line_num - 1]
for index, vec_val in enumerate(line.split()[1:]):
vec[index] = float(vec_val)
index_to_word.append(word)
if line_num >= num_points:
break
word_vecs = normalize(word_vecs, copy=False, return_norm=False)
from pprint import pprint
import gensim.models.keyedvectors as word2vec
fasttext_model = word2vec.KeyedVectors.load_word2vec_format(os.path.join(fasttext_model_dir, 'word-vectors-100d.txt'))
def print_most_similar(search_term):
synonyms = fasttext_model.most_similar(search_term)
def create_articles_metadata_mapping(sentences_df: pd.DataFrame) -> dict:
sentence_id_to_metadata = {}
for row_count, row in sentences_df.iterrows():
sentence_id_to_metadata[row_count] = dict(paper_id=row['paper_id'], cord_uid=row['cord_uid'], source=row['source'], url=row['url'], publish_time=row['publish_time'], authors=row['authors'], section=row['section'], sentence=row['sentence'])
return sentence_id_to_metadata
sentence_id_to_metadata = create_articles_metadata_mapping(sentences_df)
search_engine = SearchEngine(sentence_id_to_metadata, sentences_df, bigram_model, trigram_model, fasttext_model) | code |
32068402/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd
sentences_df = pd.read_csv('../input/covid19sentencesmetadata/sentences_with_metadata.csv')
sentences_df.head() | code |
32068402/cell_33 | [
"text_plain_output_1.png"
] | from gensim.models.phrases import Phraser
from typing import List
import contractions
import ftfy
import re
import spacy
import string
import spacy
import scispacy
nlp = spacy.load('../input/scispacymodels/en_core_sci_sm/en_core_sci_sm-0.2.4')
nlp.max_length = 2000000
import re
CURRENCIES = {'$': 'USD', 'zł': 'PLN', '£': 'GBP', '¥': 'JPY', '฿': 'THB', '₡': 'CRC', '₦': 'NGN', '₩': 'KRW', '₪': 'ILS', '₫': 'VND', '€': 'EUR', '₱': 'PHP', '₲': 'PYG', '₴': 'UAH', '₹': 'INR'}
RE_NUMBER = re.compile('(?:^|(?<=[^\\w,.]))[+–-]?(([1-9]\\d{0,2}(,\\d{3})+(\\.\\d*)?)|([1-9]\\d{0,2}([ .]\\d{3})+(,\\d*)?)|(\\d*?[.,]\\d+)|\\d+)(?:$|(?=\\b))')
RE_URL = re.compile('((http://www\\.|https://www\\.|http://|https://)?' + '[a-z0-9]+([\\-.][a-z0-9]+)*\\.[a-z]{2,5}(:[0-9]{1,5})?(/.*)?)')
STOP_WORDS = {'a', 'an', 'and', 'are', 'as', 'at', 'be', 'but', 'by', 'for', 'if', 'in', 'into', 'is', 'it', 'no', 'not', 'of', 'on', 'or', 'such', 'that', 'the', 'their', 'then', 'there', 'these', 'they', 'this', 'to', 'was', 'will', 'with'}
import string
from typing import List
import ftfy
import contractions
def clean_tokenized_sentence(tokens: List[str], unicode_normalization='NFC', unpack_contractions=False, replace_currency_symbols=False, remove_punct=True, remove_numbers=False, lowercase=True, remove_urls=True, remove_stop_words=True) -> str:
if remove_stop_words:
tokens = [token for token in tokens if token not in STOP_WORDS]
sentence = ' '.join(tokens)
if unicode_normalization:
sentence = ftfy.fix_text(sentence, normalization=unicode_normalization)
if unpack_contractions:
sentence = contractions.fix(sentence, slang=False)
if replace_currency_symbols:
for currency_sign, currency_tok in CURRENCIES.items():
sentence = sentence.replace(currency_sign, f'{currency_tok} ')
if remove_urls:
sentence = RE_URL.sub('_URL_', sentence)
if remove_punct:
sentence = sentence.translate(str.maketrans('', '', string.punctuation))
sentence = re.sub(' +', ' ', sentence)
if remove_numbers:
sentence = RE_NUMBER.sub('_NUMBER_', sentence)
if lowercase:
sentence = sentence.lower()
return sentence
def clean_sentence(sentence) -> str:
doc = nlp(sentence)
tokens = [str(token) for token in doc]
return clean_tokenized_sentence(tokens)
bigram_model = Phraser.load('../input/covid19phrasesmodels/covid_bigram_model_v0.pkl')
trigram_model = Phraser.load('../input/covid19phrasesmodels/covid_trigram_model_v0.pkl')
def clean_sentence(sentence, bigram_model=None, trigram_model=None) -> str:
doc = nlp(sentence)
tokens = [str(token) for token in doc]
cleaned_sentence = clean_tokenized_sentence(tokens)
if bigram_model and trigram_model:
sentence_with_bigrams = bigram_model[cleaned_sentence.split(' ')]
sentence_with_trigrams = trigram_model[sentence_with_bigrams]
return ' '.join(sentence_with_trigrams)
return cleaned_sentence
print(clean_sentence('On 23 January 2020, the Coalition for Epidemic Preparedness Innovations (CEPI) announced that they will fund vaccine development programmes with Inovio', bigram_model, trigram_model)) | code |
32068402/cell_65 | [
"text_plain_output_1.png"
] | bart_summarizer = BartSummarizer() | code |
32068402/cell_48 | [
"text_plain_output_1.png"
] | from pprint import pprint
from sklearn.preprocessing import normalize
import gensim.models.keyedvectors as word2vec
import numpy as np
import os
fasttext_model_dir = '../input/fasttext-no-subwords-trigrams'
num_points = 400
first_line = True
index_to_word = []
with open(os.path.join(fasttext_model_dir, 'word-vectors-100d.txt'), 'r') as f:
for line_num, line in enumerate(f):
if first_line:
dim = int(line.strip().split()[1])
word_vecs = np.zeros((num_points, dim), dtype=float)
first_line = False
continue
line = line.strip()
word = line.split()[0]
vec = word_vecs[line_num - 1]
for index, vec_val in enumerate(line.split()[1:]):
vec[index] = float(vec_val)
index_to_word.append(word)
if line_num >= num_points:
break
word_vecs = normalize(word_vecs, copy=False, return_norm=False)
from pprint import pprint
import gensim.models.keyedvectors as word2vec
fasttext_model = word2vec.KeyedVectors.load_word2vec_format(os.path.join(fasttext_model_dir, 'word-vectors-100d.txt'))
def print_most_similar(search_term):
synonyms = fasttext_model.most_similar(search_term)
print_most_similar('pathogen') | code |
32068402/cell_73 | [
"text_plain_output_1.png"
] | from IPython.display import display, HTML
from datetime import datetime
from gensim.models.phrases import Phraser
from pprint import pprint
from sklearn.preprocessing import normalize
from transformers import BartTokenizer, BartForConditionalGeneration
from typing import List
import contractions
import ftfy
import gensim.models.keyedvectors as word2vec
import json
import numpy as np
import operator
import os
import pandas as pd
import re
import string
import torch
import re
CURRENCIES = {'$': 'USD', 'zł': 'PLN', '£': 'GBP', '¥': 'JPY', '฿': 'THB', '₡': 'CRC', '₦': 'NGN', '₩': 'KRW', '₪': 'ILS', '₫': 'VND', '€': 'EUR', '₱': 'PHP', '₲': 'PYG', '₴': 'UAH', '₹': 'INR'}
RE_NUMBER = re.compile('(?:^|(?<=[^\\w,.]))[+–-]?(([1-9]\\d{0,2}(,\\d{3})+(\\.\\d*)?)|([1-9]\\d{0,2}([ .]\\d{3})+(,\\d*)?)|(\\d*?[.,]\\d+)|\\d+)(?:$|(?=\\b))')
RE_URL = re.compile('((http://www\\.|https://www\\.|http://|https://)?' + '[a-z0-9]+([\\-.][a-z0-9]+)*\\.[a-z]{2,5}(:[0-9]{1,5})?(/.*)?)')
STOP_WORDS = {'a', 'an', 'and', 'are', 'as', 'at', 'be', 'but', 'by', 'for', 'if', 'in', 'into', 'is', 'it', 'no', 'not', 'of', 'on', 'or', 'such', 'that', 'the', 'their', 'then', 'there', 'these', 'they', 'this', 'to', 'was', 'will', 'with'}
import string
from typing import List
import ftfy
import contractions
def clean_tokenized_sentence(tokens: List[str], unicode_normalization='NFC', unpack_contractions=False, replace_currency_symbols=False, remove_punct=True, remove_numbers=False, lowercase=True, remove_urls=True, remove_stop_words=True) -> str:
if remove_stop_words:
tokens = [token for token in tokens if token not in STOP_WORDS]
sentence = ' '.join(tokens)
if unicode_normalization:
sentence = ftfy.fix_text(sentence, normalization=unicode_normalization)
if unpack_contractions:
sentence = contractions.fix(sentence, slang=False)
if replace_currency_symbols:
for currency_sign, currency_tok in CURRENCIES.items():
sentence = sentence.replace(currency_sign, f'{currency_tok} ')
if remove_urls:
sentence = RE_URL.sub('_URL_', sentence)
if remove_punct:
sentence = sentence.translate(str.maketrans('', '', string.punctuation))
sentence = re.sub(' +', ' ', sentence)
if remove_numbers:
sentence = RE_NUMBER.sub('_NUMBER_', sentence)
if lowercase:
sentence = sentence.lower()
return sentence
sentences_df = pd.read_csv('../input/covid19sentencesmetadata/sentences_with_metadata.csv')
bigram_model = Phraser.load('../input/covid19phrasesmodels/covid_bigram_model_v0.pkl')
trigram_model = Phraser.load('../input/covid19phrasesmodels/covid_trigram_model_v0.pkl')
fasttext_model_dir = '../input/fasttext-no-subwords-trigrams'
num_points = 400
first_line = True
index_to_word = []
with open(os.path.join(fasttext_model_dir, 'word-vectors-100d.txt'), 'r') as f:
for line_num, line in enumerate(f):
if first_line:
dim = int(line.strip().split()[1])
word_vecs = np.zeros((num_points, dim), dtype=float)
first_line = False
continue
line = line.strip()
word = line.split()[0]
vec = word_vecs[line_num - 1]
for index, vec_val in enumerate(line.split()[1:]):
vec[index] = float(vec_val)
index_to_word.append(word)
if line_num >= num_points:
break
word_vecs = normalize(word_vecs, copy=False, return_norm=False)
from pprint import pprint
import gensim.models.keyedvectors as word2vec
fasttext_model = word2vec.KeyedVectors.load_word2vec_format(os.path.join(fasttext_model_dir, 'word-vectors-100d.txt'))
def print_most_similar(search_term):
synonyms = fasttext_model.most_similar(search_term)
def create_articles_metadata_mapping(sentences_df: pd.DataFrame) -> dict:
sentence_id_to_metadata = {}
for row_count, row in sentences_df.iterrows():
sentence_id_to_metadata[row_count] = dict(paper_id=row['paper_id'], cord_uid=row['cord_uid'], source=row['source'], url=row['url'], publish_time=row['publish_time'], authors=row['authors'], section=row['section'], sentence=row['sentence'])
return sentence_id_to_metadata
sentence_id_to_metadata = create_articles_metadata_mapping(sentences_df)
import operator
from datetime import datetime
class SearchEngine:
def __init__(self, sentence_id_to_metadata: dict, sentences_df: pd.DataFrame, bigram_model, trigram_model, fasttext_model):
self.sentence_id_to_metadata = sentence_id_to_metadata
self.cleaned_sentences = sentences_df['cleaned_sentence'].tolist()
self.bigram_model = bigram_model
self.trigram_model = trigram_model
self.fasttext_model = fasttext_model
def _get_search_terms(self, keywords, synonyms_threshold):
cleaned_terms = [clean_tokenized_sentence(keyword.split(' ')) for keyword in keywords]
cleaned_terms = [term for term in cleaned_terms if term]
terms_with_bigrams = self.bigram_model[' '.join(cleaned_terms).split(' ')]
terms_with_trigrams = self.trigram_model[terms_with_bigrams]
search_terms = [self.fasttext_model.most_similar(token) for token in terms_with_trigrams]
search_terms = [synonym[0] for synonyms in search_terms for synonym in synonyms if synonym[1] >= synonyms_threshold]
search_terms = list(terms_with_trigrams) + search_terms
return search_terms
def search(self, keywords: List[str], optional_keywords=None, top_n: int=10, synonyms_threshold=0.7, keyword_weight: float=3.0, optional_keyword_weight: float=0.5) -> List[dict]:
if optional_keywords is None:
optional_keywords = []
search_terms = self._get_search_terms(keywords, synonyms_threshold)
optional_search_terms = self._get_search_terms(optional_keywords, synonyms_threshold) if optional_keywords else []
date_today = datetime.today()
indexes = []
match_counts = []
days_diffs = []
for sentence_index, sentence in enumerate(self.cleaned_sentences):
sentence_tokens = sentence.split(' ')
sentence_tokens_set = set(sentence_tokens)
match_count = sum([keyword_weight if keyword in sentence_tokens_set else 0 for keyword in search_terms])
if match_count > 0:
indexes.append(sentence_index)
if optional_search_terms:
match_count += sum([optional_keyword_weight if keyword in sentence_tokens_set else 0 for keyword in optional_search_terms])
match_counts.append(match_count)
article_date = self.sentence_id_to_metadata[sentence_index]['publish_time']
if article_date == '2020':
article_date = '2020-01-01'
article_date = datetime.strptime(article_date, '%Y-%m-%d')
days_diff = (date_today - article_date).days
days_diffs.append(days_diff)
match_counts = [float(match_count) / sum(match_counts) for match_count in match_counts]
days_diffs = [max(days_diffs) - days_diff for days_diff in days_diffs]
days_diffs = [float(days_diff) / sum(days_diffs) for days_diff in days_diffs]
index_to_score = {}
for index, match_count, days_diff in zip(indexes, match_counts, days_diffs):
index_to_score[index] = 0.7 * match_count + 0.3 * days_diff
sorted_indexes = sorted(index_to_score.items(), key=operator.itemgetter(1), reverse=True)
sorted_indexes = [item[0] for item in sorted_indexes]
sorted_indexes = sorted_indexes[0:min(top_n, len(sorted_indexes))]
results = []
for index in sorted_indexes:
results.append(self.sentence_id_to_metadata[index])
return results
task_id = 2
import json
with open(f'../input/covid19seedsentences/{task_id}.json') as in_fp:
seed_sentences_json = json.load(in_fp)
import torch
from transformers import BartTokenizer, BartForConditionalGeneration
class BartSummarizer:
def __init__(self):
self.torch_device = 'cuda' if torch.cuda.is_available() else 'cpu'
model_name = 'bart-large-cnn'
self.tokenizer_summarize = BartTokenizer.from_pretrained(model_name)
self.model_summarize = BartForConditionalGeneration.from_pretrained(model_name)
self.model_summarize.to(self.torch_device)
self.model_summarize.eval()
def create_summary(self, text: str, repetition_penalty=1.0) -> str:
text_input_ids = self.tokenizer_summarize.batch_encode_plus([text], return_tensors='pt', max_length=1024)['input_ids'].to(self.torch_device)
summary_ids = self.model_summarize.generate(text_input_ids, num_beams=4, max_length=1024, min_length=256, no_repeat_ngram_size=4, repetition_penalty=repetition_penalty)
summary = self.tokenizer_summarize.decode(summary_ids.squeeze(), skip_special_tokens=True)
return summary
bart_summarizer = BartSummarizer()
with open(f'../input/covid19seedsentences/{task_id}_relevant_sentences.json') as in_fp:
relevant_sentences_json = json.load(in_fp)
answers_results = []
for idx, sub_task_json in enumerate(relevant_sentences_json['subTasks']):
sub_task_description = sub_task_json['sub_task_description']
best_sentences = seed_sentences_json['subTasks'][idx]['bestSentences']
relevant_sentences = sub_task_json['relevant_sentences']
relevant_sentences_texts = [result['sentence'] for result in relevant_sentences]
sub_task_summary = bart_summarizer.create_summary(' '.join(best_sentences + relevant_sentences_texts))
answers_results.append(dict(sub_task_description=sub_task_description, relevant_sentences=relevant_sentences, sub_task_summary=sub_task_summary))
from IPython.display import display, HTML
pd.set_option('display.max_colwidth', 0)
def display_summary(summary: str):
return
def display_sub_task_description(sub_task_description):
return
def display_task_name(task_name):
return
def visualize_output(sub_task_json):
"""
Prints output for each sub-task
"""
results = sub_task_json.get('relevant_sentences')
sentence_output = pd.DataFrame(sub_task_json.get('relevant_sentences'))
sentence_output.rename(columns={'sentence': 'Relevant Sentence', 'cord_id': 'CORD UID', 'publish_time': 'Publish Time', 'url': 'URL', 'source': 'Source'}, inplace=True)
display_task_name(seed_sentences_json['taskName'])
for sub_task_json in answers_results:
visualize_output(sub_task_json) | code |
32068402/cell_61 | [
"text_plain_output_1.png"
] | import json
task_id = 2
import json
with open(f'../input/covid19seedsentences/{task_id}.json') as in_fp:
seed_sentences_json = json.load(in_fp)
print(seed_sentences_json['taskName']) | code |
32068402/cell_11 | [
"text_plain_output_1.png"
] | # Install scispacy package
!pip install scispacy | code |
32068402/cell_19 | [
"text_plain_output_1.png"
] | from typing import List
import contractions
import ftfy
import re
import spacy
import string
import spacy
import scispacy
nlp = spacy.load('../input/scispacymodels/en_core_sci_sm/en_core_sci_sm-0.2.4')
nlp.max_length = 2000000
import re
CURRENCIES = {'$': 'USD', 'zł': 'PLN', '£': 'GBP', '¥': 'JPY', '฿': 'THB', '₡': 'CRC', '₦': 'NGN', '₩': 'KRW', '₪': 'ILS', '₫': 'VND', '€': 'EUR', '₱': 'PHP', '₲': 'PYG', '₴': 'UAH', '₹': 'INR'}
RE_NUMBER = re.compile('(?:^|(?<=[^\\w,.]))[+–-]?(([1-9]\\d{0,2}(,\\d{3})+(\\.\\d*)?)|([1-9]\\d{0,2}([ .]\\d{3})+(,\\d*)?)|(\\d*?[.,]\\d+)|\\d+)(?:$|(?=\\b))')
RE_URL = re.compile('((http://www\\.|https://www\\.|http://|https://)?' + '[a-z0-9]+([\\-.][a-z0-9]+)*\\.[a-z]{2,5}(:[0-9]{1,5})?(/.*)?)')
STOP_WORDS = {'a', 'an', 'and', 'are', 'as', 'at', 'be', 'but', 'by', 'for', 'if', 'in', 'into', 'is', 'it', 'no', 'not', 'of', 'on', 'or', 'such', 'that', 'the', 'their', 'then', 'there', 'these', 'they', 'this', 'to', 'was', 'will', 'with'}
import string
from typing import List
import ftfy
import contractions
def clean_tokenized_sentence(tokens: List[str], unicode_normalization='NFC', unpack_contractions=False, replace_currency_symbols=False, remove_punct=True, remove_numbers=False, lowercase=True, remove_urls=True, remove_stop_words=True) -> str:
if remove_stop_words:
tokens = [token for token in tokens if token not in STOP_WORDS]
sentence = ' '.join(tokens)
if unicode_normalization:
sentence = ftfy.fix_text(sentence, normalization=unicode_normalization)
if unpack_contractions:
sentence = contractions.fix(sentence, slang=False)
if replace_currency_symbols:
for currency_sign, currency_tok in CURRENCIES.items():
sentence = sentence.replace(currency_sign, f'{currency_tok} ')
if remove_urls:
sentence = RE_URL.sub('_URL_', sentence)
if remove_punct:
sentence = sentence.translate(str.maketrans('', '', string.punctuation))
sentence = re.sub(' +', ' ', sentence)
if remove_numbers:
sentence = RE_NUMBER.sub('_NUMBER_', sentence)
if lowercase:
sentence = sentence.lower()
return sentence
def clean_sentence(sentence) -> str:
doc = nlp(sentence)
tokens = [str(token) for token in doc]
return clean_tokenized_sentence(tokens)
print(clean_sentence("Let's clean this sentence!")) | code |
32068402/cell_50 | [
"text_plain_output_1.png"
] | [(0, '0.079"•" + 0.019"blood" + 0.015"associated" + 0.013"cells" + 0.012"ace2" + 0.012"protein" + 0.011"important" + 0.011"levels" + 0.010"diseases" + 0.010"cell"'), (1, '0.110"who" + 0.088"it" + 0.056"response" + 0.043"could" + 0.036"under" + 0.035"available" + 0.032"major" + 0.032"as" + 0.030"without" + 0.024"muscle"'), (2, '0.173"■" + 0.020"some" + 0.013"drugs" + 0.010"transmission" + 0.009"surgery" + 0.009"must" + 0.009"drug" + 0.009"there" + 0.008"increased" + 0.008"high"'), (3, '0.071"de" + 0.036"were" + 0.025"patient" + 0.023"1" + 0.022"after" + 0.018"a" + 0.018"more" + 0.015"all" + 0.015"when" + 0.014"cause"'), (4, '0.044"the" + 0.035"from" + 0.028"should" + 0.019"other" + 0.018"risk" + 0.017"oral" + 0.017"which" + 0.017"in" + 0.013"use" + 0.013"cases"'), (5, '0.069"may" + 0.033"can" + 0.031"have" + 0.029"disease" + 0.028"dental" + 0.022"also" + 0.020"has" + 0.020"been" + 0.018"health" + 0.016"virus"'), (6, '0.051"la" + 0.031"en" + 0.025"2" + 0.023"3" + 0.016"que" + 0.016"el" + 0.016"y" + 0.014"los" + 0.014"4" + 0.013"les"'), (7, '0.045"s" + 0.041"et" + 0.031"during" + 0.023"al" + 0.022"had" + 0.021"people" + 0.020"à" + 0.018"local" + 0.017"days" + 0.016"2020"'), (8, '0.062"patients" + 0.030"treatment" + 0.028"care" + 0.020"used" + 0.014"clinical" + 0.014"infection" + 0.013"common" + 0.013"severe" + 0.013"respiratory" + 0.012"dentistry"'), (9, '0.030"using" + 0.020"areas" + 0.018"ct" + 0.014"described" + 0.014"performed" + 0.013"lesions" + 0.013"above" + 0.012"day" + 0.011"learning" + 0.011"reactions"')] | code |
32068402/cell_68 | [
"text_plain_output_5.png",
"text_plain_output_9.png",
"text_plain_output_4.png",
"text_plain_output_6.png",
"text_plain_output_8.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import json
task_id = 2
import json
with open(f'../input/covid19seedsentences/{task_id}.json') as in_fp:
seed_sentences_json = json.load(in_fp)
bart_summarizer = BartSummarizer()
with open(f'../input/covid19seedsentences/{task_id}_relevant_sentences.json') as in_fp:
relevant_sentences_json = json.load(in_fp)
answers_results = []
for idx, sub_task_json in enumerate(relevant_sentences_json['subTasks']):
sub_task_description = sub_task_json['sub_task_description']
print(f'Working on task: {sub_task_description}')
best_sentences = seed_sentences_json['subTasks'][idx]['bestSentences']
relevant_sentences = sub_task_json['relevant_sentences']
relevant_sentences_texts = [result['sentence'] for result in relevant_sentences]
sub_task_summary = bart_summarizer.create_summary(' '.join(best_sentences + relevant_sentences_texts))
answers_results.append(dict(sub_task_description=sub_task_description, relevant_sentences=relevant_sentences, sub_task_summary=sub_task_summary)) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.