path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
33098715/cell_10 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
import seaborn as sns
data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv', index_col=0)
data.isnull().sum()
plt.tight_layout()
plt.xticks(rotation=90)
plt.figure(figsize=(18, 8))
sns.countplot(data['model'])
plt.tight_layout()
plt.xticks(rotation=90)
plt.xlabel('Car Models')
plt.show() | code |
33098715/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
import seaborn as sns
data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv', index_col=0)
data.isnull().sum()
plt.tight_layout()
plt.xticks(rotation=90)
plt.tight_layout()
plt.xticks(rotation=90)
plt.figure(figsize=(18, 8))
data.groupby('brand')['price'].mean().sort_values(ascending=False).plot.bar()
plt.xticks(rotation=90)
plt.ylabel('Mean Price')
plt.xlabel('Car Brands')
plt.tight_layout()
plt.show() | code |
33098715/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
data = pd.read_csv('/kaggle/input/usa-cers-dataset/USA_cars_datasets.csv', index_col=0)
print(f'This dataset has {data.shape[0]} rows')
print(f'This dataset has {data.shape[1]} columns') | code |
34134065/cell_42 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
q5d = abstracts[abstracts['abstract'].str.contains('distribution')]
q5d.shape | code |
34134065/cell_21 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
Q1E = abstracts[abstracts['abstract'].str.contains('risk population')]
Q1E.shape | code |
34134065/cell_13 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.head(3) | code |
34134065/cell_25 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
Q2c = abstracts[abstracts['abstract'].str.contains('poor')]
Q2c.shape | code |
34134065/cell_34 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
q4B = abstracts[abstracts['abstract'].str.contains('community spread')]
q4B.shape | code |
34134065/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
Q2a = abstracts[abstracts['abstract'].str.contains('homeless')]
Q2a.shape | code |
34134065/cell_30 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
q3c = abstracts[abstracts['abstract'].str.contains('hospital patients')]
q3c.shape | code |
34134065/cell_33 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
q4A = abstracts[abstracts['abstract'].str.contains('compliance')]
q4A.shape | code |
34134065/cell_44 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
q5b = abstracts[abstracts['abstract'].str.contains('improve access')]
q5b.shape
q5c = abstracts[abstracts['abstract'].str.contains('access to')]
q5c.shape
Question5 = pd.concat([q5c, q5b])
Question5.dropna(inplace=True)
Question5.shape
Question5 | code |
34134065/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
Q1D = abstracts[abstracts['abstract'].str.contains('elderly')]
Q1D.shape
Q1D = abstracts[abstracts['abstract'].str.contains('health care workers')]
Q1D.shape | code |
34134065/cell_40 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
q5b = abstracts[abstracts['abstract'].str.contains('improve access')]
q5b.shape | code |
34134065/cell_29 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
q3b = abstracts[abstracts['abstract'].str.contains('hospital spread')]
q3b.shape | code |
34134065/cell_39 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
q5a = abstracts[abstracts['abstract'].str.contains('resources')]
q5a.shape | code |
34134065/cell_26 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
Q2d = abstracts[abstracts['abstract'].str.contains('housing')]
Q2d.shape | code |
34134065/cell_48 | [
"text_plain_output_1.png"
] | pip install google | code |
34134065/cell_41 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
q5c = abstracts[abstracts['abstract'].str.contains('access to')]
q5c.shape | code |
34134065/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
Q1D = abstracts[abstracts['abstract'].str.contains('elderly')]
Q1D.shape | code |
34134065/cell_50 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
q5b = abstracts[abstracts['abstract'].str.contains('improve access')]
q5b.shape
q5c = abstracts[abstracts['abstract'].str.contains('access to')]
q5c.shape
Question5 = pd.concat([q5c, q5b])
Question5.dropna(inplace=True)
Question5.shape
PopStudies = pd.DataFrame(j, columns=['url'])
PopStudies | code |
34134065/cell_49 | [
"text_plain_output_1.png"
] | from googlesearch import search
try:
from googlesearch import search
except ImportError:
print('Error/Not found')
query = 'COVID 19 population studies'
for j in search(query, tld='co.in', num=10, stop=10, pause=2):
print(j) | code |
34134065/cell_18 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
Q1C = abstracts[abstracts['abstract'].str.contains('contacting')]
Q1C.shape | code |
34134065/cell_51 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from googlesearch import search
from googlesearch import search
try:
from googlesearch import search
except ImportError:
print('Error/Not found')
query2 = 'COVID 19 resources failure'
for j2 in search(query2, tld='co.in', num=10, stop=10, pause=2):
print(j2) | code |
34134065/cell_28 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
q3a = abstracts[abstracts['abstract'].str.contains('nosocomial')]
q3a.shape | code |
34134065/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
tablesTable = t[['Question', 'Table Format']]
tablesTable | code |
34134065/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
Q1A = abstracts[abstracts['abstract'].str.contains('communicating')]
Q1A.shape | code |
34134065/cell_47 | [
"text_html_output_1.png"
] | pip install beautifulsoup4 | code |
34134065/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
Q1B = abstracts[abstracts['abstract'].str.contains('reaching out')]
Q1B | code |
34134065/cell_35 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
q4C = abstracts[abstracts['abstract'].str.contains('prevent spread')]
q4C.shape | code |
34134065/cell_43 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
q5b = abstracts[abstracts['abstract'].str.contains('improve access')]
q5b.shape
q5c = abstracts[abstracts['abstract'].str.contains('access to')]
q5c.shape
Question5 = pd.concat([q5c, q5b])
Question5.dropna(inplace=True)
Question5.shape | code |
34134065/cell_31 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
q3d = abstracts[abstracts['abstract'].str.contains('nosocomial outbreak')]
q3d.shape | code |
34134065/cell_24 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
Q2b = abstracts[abstracts['abstract'].str.contains('low income')]
Q2b.shape | code |
34134065/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape | code |
34134065/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
df1.head(3) | code |
34134065/cell_12 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
journals['words'].head() | code |
34134065/cell_36 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TABLEFORMAT = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/list_of_tables_and_table_formats.csv')
t = TABLEFORMAT[0:5]
df1 = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv')
journals = df1[['title', 'abstract', 'publish_time', 'url', 'authors']]
journals['words'] = journals.abstract.str.strip().str.split('[\\W_]+')
abstracts = journals[journals.words.str.len() > 0]
abstracts.to_csv('COVID19_Journal_Abrstracts.csv')
abstracts.shape
q4D = abstracts[abstracts['abstract'].str.contains('methods to prevent')]
q4D.shape | code |
2002739/cell_13 | [
"image_output_1.png"
] | from pandas.plotting import autocorrelation_plot, lag_plot
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
cityTable = pd.read_csv('../input/city_attributes.csv')
temperatureDF = pd.read_csv('../input/temperature.csv', index_col=0)
temperatureDF.index = pd.to_datetime(temperatureDF.index)
cityTable
#%% show several temperature plots to get a feel for the dataset
citiesToShow = ['Los Angeles','Chicago','Montreal','Houston']
t0 = temperatureDF.index
t1 = pd.date_range(pd.to_datetime('1/7/2015',dayfirst=True),pd.to_datetime('1/10/2016',dayfirst=True),freq='H')
t2 = pd.date_range(pd.to_datetime('1/7/2015',dayfirst=True),pd.to_datetime('1/9/2015' ,dayfirst=True),freq='H')
t3 = pd.date_range(pd.to_datetime('1/7/2015',dayfirst=True),pd.to_datetime('21/7/2015',dayfirst=True),freq='H')
fig, ax = plt.subplots(nrows=4,ncols=1,figsize=(20,15))
temperatureDF.loc[t0,citiesToShow].plot(ax=ax[0]);
temperatureDF.loc[t1,citiesToShow].plot(ax=ax[1],legend=False);
temperatureDF.loc[t2,citiesToShow].plot(ax=ax[2],legend=False);
temperatureDF.loc[t3,citiesToShow].plot(ax=ax[3],legend=False);
ax[0].legend(loc='upper left',fontsize=20,bbox_to_anchor=(0.02,1.3), ncol=len(citiesToShow))
for i in range(len(ax)): ax[i].set_ylabel('Temperature [$^\circ$K]', fontsize=15)
plt.tight_layout()
#%% show autocorr and lag plots
cityToShow = 'Los Angeles'
selectedLagPoints = [1,3,6,9,12,24,36,48,60]
maxLagDays = 7
originalSignal = temperatureDF[cityToShow]
# set grid spec of the subplots
plt.figure(figsize=(12,6))
gs = gridspec.GridSpec(2, len(selectedLagPoints))
axTopRow = plt.subplot(gs[0, :])
axBottomRow = []
for i in range(len(selectedLagPoints)):
axBottomRow.append(plt.subplot(gs[1, i]))
# plot autocorr
allTimeLags = np.arange(1,maxLagDays*24)
autoCorr = [originalSignal.autocorr(lag=dt) for dt in allTimeLags]
axTopRow.plot(allTimeLags,autoCorr); axTopRow.set_title('Autocorrelation Plot of Temperature Signal')
axTopRow.set_xlabel('time lag [hours]'); axTopRow.set_ylabel('correlation coefficient')
selectedAutoCorr = [originalSignal.autocorr(lag=dt) for dt in selectedLagPoints]
axTopRow.scatter(x=selectedLagPoints, y=selectedAutoCorr, s=50, c='r')
# plot scatter plot of selected points
for i in range(len(selectedLagPoints)):
lag_plot(originalSignal, lag=selectedLagPoints[i], s=5, ax=axBottomRow[i])
if i >= 1:
axBottomRow[i].set_yticks([],[])
plt.tight_layout()
#%% zoom in and out on the autocorr plot
fig, ax = plt.subplots(nrows=4,ncols=1, figsize=(13,11))
timeLags = np.arange(1,25*24*30)
autoCorr = [originalSignal.autocorr(lag=dt) for dt in timeLags]
ax[0].plot(1.0/(24*30)*timeLags, autoCorr); ax[0].set_title('Autocorrelation Plot')
ax[0].set_xlabel('time lag [months]'); ax[0].set_ylabel('correlation coeff')
timeLags = np.arange(1,20*24*7)
autoCorr = [originalSignal.autocorr(lag=dt) for dt in timeLags]
ax[1].plot(1.0/(24*7)*timeLags, autoCorr);
ax[1].set_xlabel('time lag [weeks]'); ax[1].set_ylabel('correlation coeff')
timeLags = np.arange(1,20*24)
autoCorr = [originalSignal.autocorr(lag=dt) for dt in timeLags]
ax[2].plot(1.0/24*timeLags, autoCorr);
ax[2].set_xlabel('time lag [days]'); ax[2].set_ylabel('correlation coeff')
timeLags = np.arange(1,3*24)
autoCorr = [originalSignal.autocorr(lag=dt) for dt in timeLags]
ax[3].plot(timeLags, autoCorr);
ax[3].set_xlabel('time lag [hours]'); ax[3].set_ylabel('correlation coeff')
windowSize = 5 * 24
lowPassFilteredSignal = originalSignal.rolling(windowSize, center=True).mean()
t0 = temperatureDF.index
t1 = pd.date_range(pd.to_datetime('1/7/2015', dayfirst=True), pd.to_datetime('1/10/2016', dayfirst=True), freq='H')
t2 = pd.date_range(pd.to_datetime('1/7/2015', dayfirst=True), pd.to_datetime('1/9/2015', dayfirst=True), freq='H')
t3 = pd.date_range(pd.to_datetime('1/7/2015', dayfirst=True), pd.to_datetime('21/7/2015', dayfirst=True), freq='H')
fig, ax = plt.subplots(nrows=4, ncols=1, figsize=(20, 15))
ax[0].plot(t0, originalSignal)
ax[0].plot(t0, lowPassFilteredSignal)
ax[1].plot(t1, originalSignal[t1])
ax[1].plot(t1, lowPassFilteredSignal[t1])
ax[2].plot(t2, originalSignal[t2])
ax[2].plot(t2, lowPassFilteredSignal[t2])
ax[3].plot(t3, originalSignal[t3])
ax[3].plot(t3, lowPassFilteredSignal[t3])
ax[0].legend(['original', 'filtered'], fontsize=20, loc='upper left', bbox_to_anchor=(0.02, 1.3), ncol=len(citiesToShow))
for i in range(len(ax)):
ax[i].set_ylabel('Temperature [$^\\circ$K]') | code |
2002739/cell_4 | [
"image_output_1.png"
] | import pandas as pd
cityTable = pd.read_csv('../input/city_attributes.csv')
temperatureDF = pd.read_csv('../input/temperature.csv', index_col=0)
temperatureDF.index = pd.to_datetime(temperatureDF.index)
cityTable | code |
2002739/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
cityTable = pd.read_csv('../input/city_attributes.csv')
temperatureDF = pd.read_csv('../input/temperature.csv', index_col=0)
temperatureDF.index = pd.to_datetime(temperatureDF.index)
cityTable
citiesToShow = ['Los Angeles', 'Chicago', 'Montreal', 'Houston']
t0 = temperatureDF.index
t1 = pd.date_range(pd.to_datetime('1/7/2015', dayfirst=True), pd.to_datetime('1/10/2016', dayfirst=True), freq='H')
t2 = pd.date_range(pd.to_datetime('1/7/2015', dayfirst=True), pd.to_datetime('1/9/2015', dayfirst=True), freq='H')
t3 = pd.date_range(pd.to_datetime('1/7/2015', dayfirst=True), pd.to_datetime('21/7/2015', dayfirst=True), freq='H')
fig, ax = plt.subplots(nrows=4, ncols=1, figsize=(20, 15))
temperatureDF.loc[t0, citiesToShow].plot(ax=ax[0])
temperatureDF.loc[t1, citiesToShow].plot(ax=ax[1], legend=False)
temperatureDF.loc[t2, citiesToShow].plot(ax=ax[2], legend=False)
temperatureDF.loc[t3, citiesToShow].plot(ax=ax[3], legend=False)
ax[0].legend(loc='upper left', fontsize=20, bbox_to_anchor=(0.02, 1.3), ncol=len(citiesToShow))
for i in range(len(ax)):
ax[i].set_ylabel('Temperature [$^\\circ$K]', fontsize=15)
plt.tight_layout() | code |
2002739/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from pandas.plotting import autocorrelation_plot, lag_plot
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
cityTable = pd.read_csv('../input/city_attributes.csv')
temperatureDF = pd.read_csv('../input/temperature.csv', index_col=0)
temperatureDF.index = pd.to_datetime(temperatureDF.index)
cityTable
#%% show several temperature plots to get a feel for the dataset
citiesToShow = ['Los Angeles','Chicago','Montreal','Houston']
t0 = temperatureDF.index
t1 = pd.date_range(pd.to_datetime('1/7/2015',dayfirst=True),pd.to_datetime('1/10/2016',dayfirst=True),freq='H')
t2 = pd.date_range(pd.to_datetime('1/7/2015',dayfirst=True),pd.to_datetime('1/9/2015' ,dayfirst=True),freq='H')
t3 = pd.date_range(pd.to_datetime('1/7/2015',dayfirst=True),pd.to_datetime('21/7/2015',dayfirst=True),freq='H')
fig, ax = plt.subplots(nrows=4,ncols=1,figsize=(20,15))
temperatureDF.loc[t0,citiesToShow].plot(ax=ax[0]);
temperatureDF.loc[t1,citiesToShow].plot(ax=ax[1],legend=False);
temperatureDF.loc[t2,citiesToShow].plot(ax=ax[2],legend=False);
temperatureDF.loc[t3,citiesToShow].plot(ax=ax[3],legend=False);
ax[0].legend(loc='upper left',fontsize=20,bbox_to_anchor=(0.02,1.3), ncol=len(citiesToShow))
for i in range(len(ax)): ax[i].set_ylabel('Temperature [$^\circ$K]', fontsize=15)
plt.tight_layout()
cityToShow = 'Los Angeles'
selectedLagPoints = [1, 3, 6, 9, 12, 24, 36, 48, 60]
maxLagDays = 7
originalSignal = temperatureDF[cityToShow]
plt.figure(figsize=(12, 6))
gs = gridspec.GridSpec(2, len(selectedLagPoints))
axTopRow = plt.subplot(gs[0, :])
axBottomRow = []
for i in range(len(selectedLagPoints)):
axBottomRow.append(plt.subplot(gs[1, i]))
allTimeLags = np.arange(1, maxLagDays * 24)
autoCorr = [originalSignal.autocorr(lag=dt) for dt in allTimeLags]
axTopRow.plot(allTimeLags, autoCorr)
axTopRow.set_title('Autocorrelation Plot of Temperature Signal')
axTopRow.set_xlabel('time lag [hours]')
axTopRow.set_ylabel('correlation coefficient')
selectedAutoCorr = [originalSignal.autocorr(lag=dt) for dt in selectedLagPoints]
axTopRow.scatter(x=selectedLagPoints, y=selectedAutoCorr, s=50, c='r')
for i in range(len(selectedLagPoints)):
lag_plot(originalSignal, lag=selectedLagPoints[i], s=5, ax=axBottomRow[i])
if i >= 1:
axBottomRow[i].set_yticks([], [])
plt.tight_layout() | code |
2002739/cell_15 | [
"image_output_1.png"
] | from pandas.plotting import autocorrelation_plot, lag_plot
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
cityTable = pd.read_csv('../input/city_attributes.csv')
temperatureDF = pd.read_csv('../input/temperature.csv', index_col=0)
temperatureDF.index = pd.to_datetime(temperatureDF.index)
cityTable
#%% show several temperature plots to get a feel for the dataset
citiesToShow = ['Los Angeles','Chicago','Montreal','Houston']
t0 = temperatureDF.index
t1 = pd.date_range(pd.to_datetime('1/7/2015',dayfirst=True),pd.to_datetime('1/10/2016',dayfirst=True),freq='H')
t2 = pd.date_range(pd.to_datetime('1/7/2015',dayfirst=True),pd.to_datetime('1/9/2015' ,dayfirst=True),freq='H')
t3 = pd.date_range(pd.to_datetime('1/7/2015',dayfirst=True),pd.to_datetime('21/7/2015',dayfirst=True),freq='H')
fig, ax = plt.subplots(nrows=4,ncols=1,figsize=(20,15))
temperatureDF.loc[t0,citiesToShow].plot(ax=ax[0]);
temperatureDF.loc[t1,citiesToShow].plot(ax=ax[1],legend=False);
temperatureDF.loc[t2,citiesToShow].plot(ax=ax[2],legend=False);
temperatureDF.loc[t3,citiesToShow].plot(ax=ax[3],legend=False);
ax[0].legend(loc='upper left',fontsize=20,bbox_to_anchor=(0.02,1.3), ncol=len(citiesToShow))
for i in range(len(ax)): ax[i].set_ylabel('Temperature [$^\circ$K]', fontsize=15)
plt.tight_layout()
#%% show autocorr and lag plots
cityToShow = 'Los Angeles'
selectedLagPoints = [1,3,6,9,12,24,36,48,60]
maxLagDays = 7
originalSignal = temperatureDF[cityToShow]
# set grid spec of the subplots
plt.figure(figsize=(12,6))
gs = gridspec.GridSpec(2, len(selectedLagPoints))
axTopRow = plt.subplot(gs[0, :])
axBottomRow = []
for i in range(len(selectedLagPoints)):
axBottomRow.append(plt.subplot(gs[1, i]))
# plot autocorr
allTimeLags = np.arange(1,maxLagDays*24)
autoCorr = [originalSignal.autocorr(lag=dt) for dt in allTimeLags]
axTopRow.plot(allTimeLags,autoCorr); axTopRow.set_title('Autocorrelation Plot of Temperature Signal')
axTopRow.set_xlabel('time lag [hours]'); axTopRow.set_ylabel('correlation coefficient')
selectedAutoCorr = [originalSignal.autocorr(lag=dt) for dt in selectedLagPoints]
axTopRow.scatter(x=selectedLagPoints, y=selectedAutoCorr, s=50, c='r')
# plot scatter plot of selected points
for i in range(len(selectedLagPoints)):
lag_plot(originalSignal, lag=selectedLagPoints[i], s=5, ax=axBottomRow[i])
if i >= 1:
axBottomRow[i].set_yticks([],[])
plt.tight_layout()
#%% zoom in and out on the autocorr plot
fig, ax = plt.subplots(nrows=4,ncols=1, figsize=(13,11))
timeLags = np.arange(1,25*24*30)
autoCorr = [originalSignal.autocorr(lag=dt) for dt in timeLags]
ax[0].plot(1.0/(24*30)*timeLags, autoCorr); ax[0].set_title('Autocorrelation Plot')
ax[0].set_xlabel('time lag [months]'); ax[0].set_ylabel('correlation coeff')
timeLags = np.arange(1,20*24*7)
autoCorr = [originalSignal.autocorr(lag=dt) for dt in timeLags]
ax[1].plot(1.0/(24*7)*timeLags, autoCorr);
ax[1].set_xlabel('time lag [weeks]'); ax[1].set_ylabel('correlation coeff')
timeLags = np.arange(1,20*24)
autoCorr = [originalSignal.autocorr(lag=dt) for dt in timeLags]
ax[2].plot(1.0/24*timeLags, autoCorr);
ax[2].set_xlabel('time lag [days]'); ax[2].set_ylabel('correlation coeff')
timeLags = np.arange(1,3*24)
autoCorr = [originalSignal.autocorr(lag=dt) for dt in timeLags]
ax[3].plot(timeLags, autoCorr);
ax[3].set_xlabel('time lag [hours]'); ax[3].set_ylabel('correlation coeff')
#%% apply rolling mean and plot the signal (low pass filter)
windowSize = 5*24
lowPassFilteredSignal = originalSignal.rolling(windowSize, center=True).mean()
t0 = temperatureDF.index
t1 = pd.date_range(pd.to_datetime('1/7/2015',dayfirst=True),pd.to_datetime('1/10/2016',dayfirst=True),freq='H')
t2 = pd.date_range(pd.to_datetime('1/7/2015',dayfirst=True),pd.to_datetime('1/9/2015' ,dayfirst=True),freq='H')
t3 = pd.date_range(pd.to_datetime('1/7/2015',dayfirst=True),pd.to_datetime('21/7/2015',dayfirst=True),freq='H')
fig, ax = plt.subplots(nrows=4,ncols=1,figsize=(20,15))
ax[0].plot(t0,originalSignal)
ax[0].plot(t0,lowPassFilteredSignal)
ax[1].plot(t1,originalSignal[t1])
ax[1].plot(t1,lowPassFilteredSignal[t1])
ax[2].plot(t2,originalSignal[t2])
ax[2].plot(t2,lowPassFilteredSignal[t2])
ax[3].plot(t3,originalSignal[t3])
ax[3].plot(t3,lowPassFilteredSignal[t3])
ax[0].legend(['original','filtered'],fontsize=20,loc='upper left',bbox_to_anchor=(0.02,1.3), ncol=len(citiesToShow))
for i in range(len(ax)): ax[i].set_ylabel('Temperature [$^\circ$K]')
highPassFilteredSignal = originalSignal - lowPassFilteredSignal
fig, ax = plt.subplots(nrows=4, ncols=1, figsize=(20, 15))
ax[0].plot(t0, highPassFilteredSignal)
ax[1].plot(t1, highPassFilteredSignal[t1])
ax[2].plot(t2, highPassFilteredSignal[t2])
ax[3].plot(t3, highPassFilteredSignal[t3])
ax[0].set_title('deflection of temperature from local mean', fontsize=20)
for i in range(len(ax)):
ax[i].set_ylabel('$\\Delta$ Temperature [$^\\circ$K]') | code |
2002739/cell_17 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from pandas.plotting import autocorrelation_plot, lag_plot
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
cityTable = pd.read_csv('../input/city_attributes.csv')
temperatureDF = pd.read_csv('../input/temperature.csv', index_col=0)
temperatureDF.index = pd.to_datetime(temperatureDF.index)
cityTable
#%% show several temperature plots to get a feel for the dataset
citiesToShow = ['Los Angeles','Chicago','Montreal','Houston']
t0 = temperatureDF.index
t1 = pd.date_range(pd.to_datetime('1/7/2015',dayfirst=True),pd.to_datetime('1/10/2016',dayfirst=True),freq='H')
t2 = pd.date_range(pd.to_datetime('1/7/2015',dayfirst=True),pd.to_datetime('1/9/2015' ,dayfirst=True),freq='H')
t3 = pd.date_range(pd.to_datetime('1/7/2015',dayfirst=True),pd.to_datetime('21/7/2015',dayfirst=True),freq='H')
fig, ax = plt.subplots(nrows=4,ncols=1,figsize=(20,15))
temperatureDF.loc[t0,citiesToShow].plot(ax=ax[0]);
temperatureDF.loc[t1,citiesToShow].plot(ax=ax[1],legend=False);
temperatureDF.loc[t2,citiesToShow].plot(ax=ax[2],legend=False);
temperatureDF.loc[t3,citiesToShow].plot(ax=ax[3],legend=False);
ax[0].legend(loc='upper left',fontsize=20,bbox_to_anchor=(0.02,1.3), ncol=len(citiesToShow))
for i in range(len(ax)): ax[i].set_ylabel('Temperature [$^\circ$K]', fontsize=15)
plt.tight_layout()
#%% show autocorr and lag plots
cityToShow = 'Los Angeles'
selectedLagPoints = [1,3,6,9,12,24,36,48,60]
maxLagDays = 7
originalSignal = temperatureDF[cityToShow]
# set grid spec of the subplots
plt.figure(figsize=(12,6))
gs = gridspec.GridSpec(2, len(selectedLagPoints))
axTopRow = plt.subplot(gs[0, :])
axBottomRow = []
for i in range(len(selectedLagPoints)):
axBottomRow.append(plt.subplot(gs[1, i]))
# plot autocorr
allTimeLags = np.arange(1,maxLagDays*24)
autoCorr = [originalSignal.autocorr(lag=dt) for dt in allTimeLags]
axTopRow.plot(allTimeLags,autoCorr); axTopRow.set_title('Autocorrelation Plot of Temperature Signal')
axTopRow.set_xlabel('time lag [hours]'); axTopRow.set_ylabel('correlation coefficient')
selectedAutoCorr = [originalSignal.autocorr(lag=dt) for dt in selectedLagPoints]
axTopRow.scatter(x=selectedLagPoints, y=selectedAutoCorr, s=50, c='r')
# plot scatter plot of selected points
for i in range(len(selectedLagPoints)):
lag_plot(originalSignal, lag=selectedLagPoints[i], s=5, ax=axBottomRow[i])
if i >= 1:
axBottomRow[i].set_yticks([],[])
plt.tight_layout()
#%% zoom in and out on the autocorr plot
fig, ax = plt.subplots(nrows=4,ncols=1, figsize=(13,11))
timeLags = np.arange(1,25*24*30)
autoCorr = [originalSignal.autocorr(lag=dt) for dt in timeLags]
ax[0].plot(1.0/(24*30)*timeLags, autoCorr); ax[0].set_title('Autocorrelation Plot')
ax[0].set_xlabel('time lag [months]'); ax[0].set_ylabel('correlation coeff')
timeLags = np.arange(1,20*24*7)
autoCorr = [originalSignal.autocorr(lag=dt) for dt in timeLags]
ax[1].plot(1.0/(24*7)*timeLags, autoCorr);
ax[1].set_xlabel('time lag [weeks]'); ax[1].set_ylabel('correlation coeff')
timeLags = np.arange(1,20*24)
autoCorr = [originalSignal.autocorr(lag=dt) for dt in timeLags]
ax[2].plot(1.0/24*timeLags, autoCorr);
ax[2].set_xlabel('time lag [days]'); ax[2].set_ylabel('correlation coeff')
timeLags = np.arange(1,3*24)
autoCorr = [originalSignal.autocorr(lag=dt) for dt in timeLags]
ax[3].plot(timeLags, autoCorr);
ax[3].set_xlabel('time lag [hours]'); ax[3].set_ylabel('correlation coeff')
#%% apply rolling mean and plot the signal (low pass filter)
windowSize = 5*24
lowPassFilteredSignal = originalSignal.rolling(windowSize, center=True).mean()
t0 = temperatureDF.index
t1 = pd.date_range(pd.to_datetime('1/7/2015',dayfirst=True),pd.to_datetime('1/10/2016',dayfirst=True),freq='H')
t2 = pd.date_range(pd.to_datetime('1/7/2015',dayfirst=True),pd.to_datetime('1/9/2015' ,dayfirst=True),freq='H')
t3 = pd.date_range(pd.to_datetime('1/7/2015',dayfirst=True),pd.to_datetime('21/7/2015',dayfirst=True),freq='H')
fig, ax = plt.subplots(nrows=4,ncols=1,figsize=(20,15))
ax[0].plot(t0,originalSignal)
ax[0].plot(t0,lowPassFilteredSignal)
ax[1].plot(t1,originalSignal[t1])
ax[1].plot(t1,lowPassFilteredSignal[t1])
ax[2].plot(t2,originalSignal[t2])
ax[2].plot(t2,lowPassFilteredSignal[t2])
ax[3].plot(t3,originalSignal[t3])
ax[3].plot(t3,lowPassFilteredSignal[t3])
ax[0].legend(['original','filtered'],fontsize=20,loc='upper left',bbox_to_anchor=(0.02,1.3), ncol=len(citiesToShow))
for i in range(len(ax)): ax[i].set_ylabel('Temperature [$^\circ$K]')
#%% subtract the low pass filtered singal from the original to get high pass filtered signal
highPassFilteredSignal = originalSignal - lowPassFilteredSignal
fig, ax = plt.subplots(nrows=4,ncols=1,figsize=(20,15))
ax[0].plot(t0,highPassFilteredSignal)
ax[1].plot(t1,highPassFilteredSignal[t1])
ax[2].plot(t2,highPassFilteredSignal[t2])
ax[3].plot(t3,highPassFilteredSignal[t3])
ax[0].set_title('deflection of temperature from local mean',fontsize=20)
for i in range(len(ax)): ax[i].set_ylabel('$\Delta$ Temperature [$^\circ$K]')
fig, ax = plt.subplots(nrows=4, ncols=1, figsize=(13, 11))
timeLags = np.arange(1, 25 * 24 * 30)
autoCorr = [lowPassFilteredSignal.autocorr(lag=dt) for dt in timeLags]
ax[0].plot(1.0 / (24 * 30) * timeLags, autoCorr)
ax[0].set_title('Autocorrelation Plot of low Pass Filtered Signal')
ax[0].set_xlabel('time lag [months]')
ax[0].set_ylabel('correlation coeff')
timeLags = np.arange(1, 20 * 24 * 7)
autoCorr = [lowPassFilteredSignal.autocorr(lag=dt) for dt in timeLags]
ax[1].plot(1.0 / (24 * 7) * timeLags, autoCorr)
ax[1].set_xlabel('time lag [weeks]')
ax[1].set_ylabel('correlation coeff')
timeLags = np.arange(1, 20 * 24)
autoCorr = [lowPassFilteredSignal.autocorr(lag=dt) for dt in timeLags]
ax[2].plot(1.0 / 24 * timeLags, autoCorr)
ax[2].set_xlabel('time lag [days]')
ax[2].set_ylabel('correlation coeff')
timeLags = np.arange(1, 3 * 24)
autoCorr = [lowPassFilteredSignal.autocorr(lag=dt) for dt in timeLags]
ax[3].plot(timeLags, autoCorr)
ax[3].set_xlabel('time lag [hours]')
ax[3].set_ylabel('correlation coeff') | code |
2002739/cell_10 | [
"text_html_output_1.png"
] | from pandas.plotting import autocorrelation_plot, lag_plot
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
cityTable = pd.read_csv('../input/city_attributes.csv')
temperatureDF = pd.read_csv('../input/temperature.csv', index_col=0)
temperatureDF.index = pd.to_datetime(temperatureDF.index)
cityTable
#%% show several temperature plots to get a feel for the dataset
citiesToShow = ['Los Angeles','Chicago','Montreal','Houston']
t0 = temperatureDF.index
t1 = pd.date_range(pd.to_datetime('1/7/2015',dayfirst=True),pd.to_datetime('1/10/2016',dayfirst=True),freq='H')
t2 = pd.date_range(pd.to_datetime('1/7/2015',dayfirst=True),pd.to_datetime('1/9/2015' ,dayfirst=True),freq='H')
t3 = pd.date_range(pd.to_datetime('1/7/2015',dayfirst=True),pd.to_datetime('21/7/2015',dayfirst=True),freq='H')
fig, ax = plt.subplots(nrows=4,ncols=1,figsize=(20,15))
temperatureDF.loc[t0,citiesToShow].plot(ax=ax[0]);
temperatureDF.loc[t1,citiesToShow].plot(ax=ax[1],legend=False);
temperatureDF.loc[t2,citiesToShow].plot(ax=ax[2],legend=False);
temperatureDF.loc[t3,citiesToShow].plot(ax=ax[3],legend=False);
ax[0].legend(loc='upper left',fontsize=20,bbox_to_anchor=(0.02,1.3), ncol=len(citiesToShow))
for i in range(len(ax)): ax[i].set_ylabel('Temperature [$^\circ$K]', fontsize=15)
plt.tight_layout()
#%% show autocorr and lag plots
cityToShow = 'Los Angeles'
selectedLagPoints = [1,3,6,9,12,24,36,48,60]
maxLagDays = 7
originalSignal = temperatureDF[cityToShow]
# set grid spec of the subplots
plt.figure(figsize=(12,6))
gs = gridspec.GridSpec(2, len(selectedLagPoints))
axTopRow = plt.subplot(gs[0, :])
axBottomRow = []
for i in range(len(selectedLagPoints)):
axBottomRow.append(plt.subplot(gs[1, i]))
# plot autocorr
allTimeLags = np.arange(1,maxLagDays*24)
autoCorr = [originalSignal.autocorr(lag=dt) for dt in allTimeLags]
axTopRow.plot(allTimeLags,autoCorr); axTopRow.set_title('Autocorrelation Plot of Temperature Signal')
axTopRow.set_xlabel('time lag [hours]'); axTopRow.set_ylabel('correlation coefficient')
selectedAutoCorr = [originalSignal.autocorr(lag=dt) for dt in selectedLagPoints]
axTopRow.scatter(x=selectedLagPoints, y=selectedAutoCorr, s=50, c='r')
# plot scatter plot of selected points
for i in range(len(selectedLagPoints)):
lag_plot(originalSignal, lag=selectedLagPoints[i], s=5, ax=axBottomRow[i])
if i >= 1:
axBottomRow[i].set_yticks([],[])
plt.tight_layout()
fig, ax = plt.subplots(nrows=4, ncols=1, figsize=(13, 11))
timeLags = np.arange(1, 25 * 24 * 30)
autoCorr = [originalSignal.autocorr(lag=dt) for dt in timeLags]
ax[0].plot(1.0 / (24 * 30) * timeLags, autoCorr)
ax[0].set_title('Autocorrelation Plot')
ax[0].set_xlabel('time lag [months]')
ax[0].set_ylabel('correlation coeff')
timeLags = np.arange(1, 20 * 24 * 7)
autoCorr = [originalSignal.autocorr(lag=dt) for dt in timeLags]
ax[1].plot(1.0 / (24 * 7) * timeLags, autoCorr)
ax[1].set_xlabel('time lag [weeks]')
ax[1].set_ylabel('correlation coeff')
timeLags = np.arange(1, 20 * 24)
autoCorr = [originalSignal.autocorr(lag=dt) for dt in timeLags]
ax[2].plot(1.0 / 24 * timeLags, autoCorr)
ax[2].set_xlabel('time lag [days]')
ax[2].set_ylabel('correlation coeff')
timeLags = np.arange(1, 3 * 24)
autoCorr = [originalSignal.autocorr(lag=dt) for dt in timeLags]
ax[3].plot(timeLags, autoCorr)
ax[3].set_xlabel('time lag [hours]')
ax[3].set_ylabel('correlation coeff') | code |
88100476/cell_19 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing.image import ImageDataGenerator
val_batch = 10
train_batch = 32
train_datagen = ImageDataGenerator(rescale=1.0 / 255, shear_range=0.4, zoom_range=0.3, validation_split=0.3, horizontal_flip=True)
train_generator = train_datagen.flow_from_directory('/kaggle/working/train', target_size=(130, 130), batch_size=train_batch, class_mode='binary', subset='training', color_mode='rgb', shuffle=True)
validation_generator = train_datagen.flow_from_directory('/kaggle/working/train', target_size=(130, 130), batch_size=val_batch, class_mode='binary', subset='validation', color_mode='rgb', shuffle=True) | code |
88100476/cell_31 | [
"text_plain_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
test_generator = test_datagen.flow_from_directory('/kaggle/input/dogsvscatsmytestdata/training_set/', target_size=(130, 130), batch_size=32, class_mode='binary', color_mode='rgb') | code |
88099900/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
raw_data = pd.read_csv('/kaggle/input/titanic/train.csv')
raw_test = pd.read_csv('/kaggle/input/titanic/test.csv')
train_copy = raw_data.copy()
train_copy.set_index('PassengerId', inplace=True, drop=True)
test_copy = raw_test.copy()
test_copy.set_index('PassengerId', inplace=True, drop=True)
train_copy.isnull().sum() | code |
88099900/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
raw_data = pd.read_csv('/kaggle/input/titanic/train.csv')
raw_test = pd.read_csv('/kaggle/input/titanic/test.csv')
train_copy = raw_data.copy()
train_copy.set_index('PassengerId', inplace=True, drop=True)
test_copy = raw_test.copy()
test_copy.set_index('PassengerId', inplace=True, drop=True)
train_copy.info() | code |
88099900/cell_19 | [
"text_html_output_1.png"
] | list(prefixes) | code |
88099900/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
88099900/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
raw_data = pd.read_csv('/kaggle/input/titanic/train.csv')
raw_test = pd.read_csv('/kaggle/input/titanic/test.csv')
train_copy = raw_data.copy()
train_copy.set_index('PassengerId', inplace=True, drop=True)
test_copy = raw_test.copy()
test_copy.set_index('PassengerId', inplace=True, drop=True)
train_copy.isnull().sum()
test_data = test_copy.copy()
train_data = train_copy.copy()
train_data['modified_name'] = train_data.Name.str.split(',', expand=True)[1]
train_data['prefix'] = train_data['modified_name'].str.split('.', expand=True)[0]
test_data['modified_name'] = test_data.Name.str.split(',', expand=True)[1]
test_data['prefix'] = test_data['modified_name'].str.split('.', expand=True)[0]
fill = train_data[train_data.Age.isnull()]['prefix'].unique()
criteria = train_data[train_data['prefix'].isin(fill)]
train_age_summary = criteria.groupby(['prefix', 'Pclass'])['Age'].agg(['mean', 'count'])
train_age_summary
fillt = test_data[test_data.Age.isnull()]['prefix'].unique()
criteria1 = test_data[test_data['prefix'].isin(fill)]
test_age_summary = criteria.groupby(['prefix', 'Pclass'])['Age'].agg(['mean', 'count'])
test_age_summary | code |
88099900/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
raw_data = pd.read_csv('/kaggle/input/titanic/train.csv')
raw_test = pd.read_csv('/kaggle/input/titanic/test.csv')
train_copy = raw_data.copy()
train_copy.set_index('PassengerId', inplace=True, drop=True)
test_copy = raw_test.copy()
test_copy.set_index('PassengerId', inplace=True, drop=True)
print('Train Df- ', train_copy.shape)
print('Test Df- ', test_copy.shape) | code |
88099900/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
raw_data = pd.read_csv('/kaggle/input/titanic/train.csv')
raw_test = pd.read_csv('/kaggle/input/titanic/test.csv')
train_copy = raw_data.copy()
train_copy.set_index('PassengerId', inplace=True, drop=True)
test_copy = raw_test.copy()
test_copy.set_index('PassengerId', inplace=True, drop=True)
train_copy.isnull().sum()
test_data = test_copy.copy()
train_data = train_copy.copy()
train_data['modified_name'] = train_data.Name.str.split(',', expand=True)[1]
train_data['prefix'] = train_data['modified_name'].str.split('.', expand=True)[0]
fill = train_data[train_data.Age.isnull()]['prefix'].unique()
criteria = train_data[train_data['prefix'].isin(fill)]
train_age_summary = criteria.groupby(['prefix', 'Pclass'])['Age'].agg(['mean', 'count'])
train_age_summary | code |
88099900/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
raw_data = pd.read_csv('/kaggle/input/titanic/train.csv')
raw_test = pd.read_csv('/kaggle/input/titanic/test.csv')
train_copy = raw_data.copy()
train_copy.set_index('PassengerId', inplace=True, drop=True)
test_copy = raw_test.copy()
test_copy.set_index('PassengerId', inplace=True, drop=True)
train_copy.describe() | code |
88099900/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
raw_data = pd.read_csv('/kaggle/input/titanic/train.csv')
raw_test = pd.read_csv('/kaggle/input/titanic/test.csv')
raw_data.head(10) | code |
130002559/cell_9 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | from PIL import Image
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report, ConfusionMatrixDisplay, confusion_matrix, roc_curve, auc
from torch import nn
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
import os
import pandas as pd
import timm
import torch
import numpy as np
import pandas as pd
import os
import cv2
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from torch.optim import Adam
import glob
from tqdm.notebook import tqdm
from PIL import Image
import torchvision
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report, ConfusionMatrixDisplay, confusion_matrix, roc_curve, auc
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
class BreastCancerDataset(Dataset):
def __init__(self, data_path_file, train=True):
super(Dataset, self).__init__()
self.data = pd.read_csv(data_path_file, index_col=0)
self.global_path = '/kaggle/input/meta-data/Data_image'
if train:
self.transform = transforms.Compose([transforms.Grayscale(num_output_channels=3), transforms.Resize(size=(224, 224)), transforms.RandomHorizontalFlip(), transforms.RandomRotation(10), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
else:
self.transform = transforms.Compose([transforms.Grayscale(num_output_channels=3), transforms.Resize(size=(224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
row = self.data.iloc[idx]
label = row['status']
img_path = os.path.join(self.global_path, row['Path'])
labels = torch.tensor(label)
image = Image.open(img_path)
if self.transform:
image = self.transform(image)
return (image, labels)
path_train_fold_0 = '/kaggle/input/data-ddsm-cdd-mias/DDSM/DDSM/DDSM_Fold_4/train_DDSM_fold_3.csv'
path_val_fold_0 = '/kaggle/input/data-ddsm-cdd-mias/DDSM/DDSM/DDSM_Fold_4/valid_DDSM_fold_3.csv'
path_test_DDSM = '/kaggle/input/data-ddsm-cdd-mias/Data_Test/Data_Test/Test_ddsm.csv'
train_dataset = BreastCancerDataset(path_train_fold_0, train=True)
val_dataset = BreastCancerDataset(path_val_fold_0, train=False)
test_dataset_DDSM = BreastCancerDataset(path_test_DDSM, train=False)
batch_size = 64
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True)
test_loader_DDSM = DataLoader(test_dataset_DDSM, batch_size=batch_size, shuffle=True)
def save_model(epochs, model, optimizer, criterion, pretrained='True'):
"""
Function to save the trained model to disk.
"""
torch.save({'epoch': epochs, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': criterion}, f'/kaggle/working/model_pretrained_{pretrained}_{epochs}.pth')
def save_plots(train_acc, valid_acc, train_loss, valid_loss, pretrained='True'):
"""
Function to save the loss and accuracy plots to disk.
"""
import timm
model = timm.create_model('hrnet_w18', pretrained=True, num_classes=3)
criterion = nn.CrossEntropyLoss()
optimizer = Adam(model.parameters(), lr=0.001)
list_acc_train, list_acc_val, list_loss_train, list_loss_val = ([], [], [], [])
num_epochs = 100
best_f1 = 0.0
print('Begin Training')
for epoch in tqdm(range(1, num_epochs + 1)):
model.train()
train_loss = 0.0
train_acc = 0.0
total = 0
for inputs, labels in tqdm(train_loader):
optimizer.zero_grad()
model.to(device)
inputs, labels = (inputs.to(device), labels.to(device))
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
train_loss += loss.item()
predicted = torch.max(outputs, 1)[1].to(device)
total += labels.size(0)
train_acc += (predicted == labels).sum().item()
train_acc /= total
train_loss /= len(train_loader)
list_loss_train.append(train_loss)
list_acc_train.append(train_acc)
model.eval()
with torch.no_grad():
val_loss = 0.0
val_acc = 0.0
f1_scr = 0.0
total = 0.0
label_list, label_pred_list = ([], [])
for inputs, labels in val_loader:
inputs, labels = (inputs.to(device), labels.to(device))
outputs = model(inputs)
loss = criterion(outputs, labels)
val_loss += loss.item()
predicted = torch.max(outputs, 1)[1].to(device)
total += labels.size(0)
val_acc += (predicted == labels).sum().item()
label_list.append(labels)
label_pred_list.append(predicted)
val_acc = val_acc / total
label_list, label_pred_list = (torch.cat(label_list, 0), torch.cat(label_pred_list, 0))
f1_scr = f1_score(label_list.cpu(), label_pred_list.cpu(), average='macro')
val_loss /= len(val_loader)
list_loss_val.append(val_loss)
list_acc_val.append(val_acc)
print(f'Epoch {epoch + 1:2d}/{num_epochs}: train_loss = {train_loss:.3f}, train_acc = {train_acc:.3f}, val_loss = {val_loss:.3f}, val_acc = {val_acc:.3f}')
if f1_scr > best_f1:
best_f1 = f1_scr
save_model(epoch, model, optimizer, criterion)
history = {'loss_train': list_loss_train, 'acc_train': list_acc_train, 'loss_val': list_loss_val, 'acc_val': list_acc_val}
save_plots(list_acc_train, list_acc_val, list_loss_train, list_loss_val)
df_history = pd.DataFrame(history)
df_history.to_csv('/kaggle/working/history.csv') | code |
130002559/cell_1 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import torch
import numpy as np
import pandas as pd
import os
import cv2
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from torch.optim import Adam
import glob
from tqdm.notebook import tqdm
from PIL import Image
import torchvision
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report, ConfusionMatrixDisplay, confusion_matrix, roc_curve, auc
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('The model will be running on', device, 'device') | code |
130002559/cell_8 | [
"text_plain_output_100.png",
"text_plain_output_84.png",
"text_plain_output_56.png",
"text_plain_output_35.png",
"text_plain_output_98.png",
"text_plain_output_43.png",
"text_plain_output_78.png",
"text_plain_output_37.png",
"text_plain_output_90.png",
"text_plain_output_79.png",
"text_plain_output_5.png",
"text_plain_output_75.png",
"text_plain_output_48.png",
"text_plain_output_30.png",
"text_plain_output_73.png",
"text_plain_output_15.png",
"text_plain_output_70.png",
"text_plain_output_9.png",
"text_plain_output_44.png",
"text_plain_output_86.png",
"text_plain_output_40.png",
"text_plain_output_74.png",
"text_plain_output_31.png",
"text_plain_output_20.png",
"text_plain_output_101.png",
"text_plain_output_60.png",
"text_plain_output_68.png",
"text_plain_output_4.png",
"text_plain_output_65.png",
"text_plain_output_64.png",
"text_plain_output_13.png",
"text_plain_output_52.png",
"text_plain_output_66.png",
"text_plain_output_45.png",
"text_plain_output_14.png",
"text_plain_output_32.png",
"text_plain_output_88.png",
"text_plain_output_29.png",
"text_plain_output_58.png",
"text_plain_output_49.png",
"text_plain_output_63.png",
"text_plain_output_27.png",
"text_plain_output_76.png",
"text_plain_output_54.png",
"text_plain_output_10.png",
"text_plain_output_6.png",
"text_plain_output_92.png",
"text_plain_output_57.png",
"text_plain_output_24.png",
"text_plain_output_21.png",
"text_plain_output_47.png",
"text_plain_output_25.png",
"text_plain_output_77.png",
"text_plain_output_18.png",
"text_plain_output_50.png",
"text_plain_output_36.png",
"text_plain_output_96.png",
"text_plain_output_87.png",
"text_plain_output_3.png",
"text_plain_output_22.png",
"text_plain_output_81.png",
"text_plain_output_69.png",
"text_plain_output_38.png",
"text_plain_output_7.png",
"text_plain_output_91.png",
"text_plain_output_16.png",
"text_plain_output_59.png",
"text_plain_output_71.png",
"text_plain_output_8.png",
"text_plain_output_26.png",
"text_plain_output_41.png",
"text_plain_output_34.png",
"text_plain_output_85.png",
"text_plain_output_42.png",
"text_plain_output_67.png",
"text_plain_output_53.png",
"text_plain_output_23.png",
"text_plain_output_89.png",
"text_plain_output_51.png",
"text_plain_output_28.png",
"text_plain_output_72.png",
"text_plain_output_99.png",
"text_plain_output_2.png",
"text_plain_output_97.png",
"text_plain_output_1.png",
"text_plain_output_33.png",
"text_plain_output_39.png",
"text_plain_output_55.png",
"text_plain_output_82.png",
"text_plain_output_93.png",
"text_plain_output_19.png",
"image_output_2.png",
"image_output_1.png",
"text_plain_output_80.png",
"text_plain_output_94.png",
"text_plain_output_17.png",
"text_plain_output_11.png",
"text_plain_output_12.png",
"text_plain_output_62.png",
"text_plain_output_95.png",
"text_plain_output_61.png",
"text_plain_output_83.png",
"text_plain_output_46.png"
] | from PIL import Image
from torch import nn
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import os
import pandas as pd
import timm
import torch
import numpy as np
import pandas as pd
import os
import cv2
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from torch.optim import Adam
import glob
from tqdm.notebook import tqdm
from PIL import Image
import torchvision
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report, ConfusionMatrixDisplay, confusion_matrix, roc_curve, auc
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
class BreastCancerDataset(Dataset):
def __init__(self, data_path_file, train=True):
super(Dataset, self).__init__()
self.data = pd.read_csv(data_path_file, index_col=0)
self.global_path = '/kaggle/input/meta-data/Data_image'
if train:
self.transform = transforms.Compose([transforms.Grayscale(num_output_channels=3), transforms.Resize(size=(224, 224)), transforms.RandomHorizontalFlip(), transforms.RandomRotation(10), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
else:
self.transform = transforms.Compose([transforms.Grayscale(num_output_channels=3), transforms.Resize(size=(224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
row = self.data.iloc[idx]
label = row['status']
img_path = os.path.join(self.global_path, row['Path'])
labels = torch.tensor(label)
image = Image.open(img_path)
if self.transform:
image = self.transform(image)
return (image, labels)
def save_model(epochs, model, optimizer, criterion, pretrained='True'):
"""
Function to save the trained model to disk.
"""
torch.save({'epoch': epochs, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': criterion}, f'/kaggle/working/model_pretrained_{pretrained}_{epochs}.pth')
import timm
model = timm.create_model('hrnet_w18', pretrained=True, num_classes=3)
criterion = nn.CrossEntropyLoss()
optimizer = Adam(model.parameters(), lr=0.001) | code |
130002559/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from PIL import Image
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report, ConfusionMatrixDisplay, confusion_matrix, roc_curve, auc
from sklearn.metrics import confusion_matrix, roc_curve, auc
from torch import nn
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
import os
import pandas as pd
import timm
import torch
import numpy as np
import pandas as pd
import os
import cv2
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from torch.optim import Adam
import glob
from tqdm.notebook import tqdm
from PIL import Image
import torchvision
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report, ConfusionMatrixDisplay, confusion_matrix, roc_curve, auc
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
class BreastCancerDataset(Dataset):
def __init__(self, data_path_file, train=True):
super(Dataset, self).__init__()
self.data = pd.read_csv(data_path_file, index_col=0)
self.global_path = '/kaggle/input/meta-data/Data_image'
if train:
self.transform = transforms.Compose([transforms.Grayscale(num_output_channels=3), transforms.Resize(size=(224, 224)), transforms.RandomHorizontalFlip(), transforms.RandomRotation(10), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
else:
self.transform = transforms.Compose([transforms.Grayscale(num_output_channels=3), transforms.Resize(size=(224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
row = self.data.iloc[idx]
label = row['status']
img_path = os.path.join(self.global_path, row['Path'])
labels = torch.tensor(label)
image = Image.open(img_path)
if self.transform:
image = self.transform(image)
return (image, labels)
path_train_fold_0 = '/kaggle/input/data-ddsm-cdd-mias/DDSM/DDSM/DDSM_Fold_4/train_DDSM_fold_3.csv'
path_val_fold_0 = '/kaggle/input/data-ddsm-cdd-mias/DDSM/DDSM/DDSM_Fold_4/valid_DDSM_fold_3.csv'
path_test_DDSM = '/kaggle/input/data-ddsm-cdd-mias/Data_Test/Data_Test/Test_ddsm.csv'
train_dataset = BreastCancerDataset(path_train_fold_0, train=True)
val_dataset = BreastCancerDataset(path_val_fold_0, train=False)
test_dataset_DDSM = BreastCancerDataset(path_test_DDSM, train=False)
batch_size = 64
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True)
test_loader_DDSM = DataLoader(test_dataset_DDSM, batch_size=batch_size, shuffle=True)
def save_model(epochs, model, optimizer, criterion, pretrained='True'):
"""
Function to save the trained model to disk.
"""
torch.save({'epoch': epochs, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': criterion}, f'/kaggle/working/model_pretrained_{pretrained}_{epochs}.pth')
def save_plots(train_acc, valid_acc, train_loss, valid_loss, pretrained='True'):
"""
Function to save the loss and accuracy plots to disk.
"""
import timm
model = timm.create_model('hrnet_w18', pretrained=True, num_classes=3)
criterion = nn.CrossEntropyLoss()
optimizer = Adam(model.parameters(), lr=0.001)
list_acc_train, list_acc_val, list_loss_train, list_loss_val = ([], [], [], [])
num_epochs = 100
best_f1 = 0.0
for epoch in tqdm(range(1, num_epochs + 1)):
model.train()
train_loss = 0.0
train_acc = 0.0
total = 0
for inputs, labels in tqdm(train_loader):
optimizer.zero_grad()
model.to(device)
inputs, labels = (inputs.to(device), labels.to(device))
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
train_loss += loss.item()
predicted = torch.max(outputs, 1)[1].to(device)
total += labels.size(0)
train_acc += (predicted == labels).sum().item()
train_acc /= total
train_loss /= len(train_loader)
list_loss_train.append(train_loss)
list_acc_train.append(train_acc)
model.eval()
with torch.no_grad():
val_loss = 0.0
val_acc = 0.0
f1_scr = 0.0
total = 0.0
label_list, label_pred_list = ([], [])
for inputs, labels in val_loader:
inputs, labels = (inputs.to(device), labels.to(device))
outputs = model(inputs)
loss = criterion(outputs, labels)
val_loss += loss.item()
predicted = torch.max(outputs, 1)[1].to(device)
total += labels.size(0)
val_acc += (predicted == labels).sum().item()
label_list.append(labels)
label_pred_list.append(predicted)
val_acc = val_acc / total
label_list, label_pred_list = (torch.cat(label_list, 0), torch.cat(label_pred_list, 0))
f1_scr = f1_score(label_list.cpu(), label_pred_list.cpu(), average='macro')
val_loss /= len(val_loader)
list_loss_val.append(val_loss)
list_acc_val.append(val_acc)
if f1_scr > best_f1:
best_f1 = f1_scr
save_model(epoch, model, optimizer, criterion)
history = {'loss_train': list_loss_train, 'acc_train': list_acc_train, 'loss_val': list_loss_val, 'acc_val': list_acc_val}
df_history = pd.DataFrame(history)
df_history.to_csv('/kaggle/working/history.csv')
from sklearn.metrics import confusion_matrix, roc_curve, auc
with torch.no_grad():
y_true = []
y_pred = []
for inputs, labels in tqdm(test_loader_DDSM):
inputs = inputs.to(device)
labels = labels.to(device)
model.to(device)
outputs = model(inputs)
predicted = torch.max(outputs, dim=1)[1]
y_true += labels.tolist()
y_pred += predicted.tolist()
accuracy = accuracy_score(y_true, y_pred)
precision = precision_score(y_true, y_pred, average='weighted')
recall = recall_score(y_true, y_pred, average='weighted')
f1 = f1_score(y_true, y_pred, average='weighted')
fpr, tpr, thresholds = roc_curve(y_true, y_pred, pos_label=3)
auc_score = auc(fpr, tpr)
print(f'Accuracy: {accuracy:.2f}')
print(f'Precision: {precision:.2f}')
print(f'Recall: {recall:.2f}')
print(f'F1 score: {f1:.2f}')
print(f'AUC score : {f1:.2f}')
cm = confusion_matrix(y_true, y_pred)
cr = classification_report(y_true, y_pred)
disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=['normal', 'benign', 'malignant'])
disp.plot()
plt.savefig('confusion_matrix_DDSM.png')
plt.show() | code |
2003266/cell_42 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from itertools import chain
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Dense, Embedding, Dropout, LSTM, Bidirectional, GlobalMaxPool1D, InputLayer, BatchNormalization, Activation
from keras.models import Sequential
from keras.preprocessing import text, sequence
from nltk.tokenize import wordpunct_tokenize
from sklearn.metrics import confusion_matrix, log_loss
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
train.fillna('nan')
test = pd.read_csv('../input/test.csv')
test.fillna('nan')
targets = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
y = np.array(train[targets])
texts = np.array(train['comment_text'])
texts_test = np.array(test['comment_text'])
label_mapping = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 1], [0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0, 0, 1, 1, 0], [0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 1, 0], [0, 0, 1, 0, 1, 1], [0, 0, 1, 1, 0, 1], [0, 0, 1, 1, 1, 1], [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1], [0, 1, 0, 0, 1, 0], [0, 1, 0, 0, 1, 1], [0, 1, 0, 1, 0, 0], [0, 1, 0, 1, 0, 1], [0, 1, 0, 1, 1, 0], [0, 1, 0, 1, 1, 1], [0, 1, 1, 0, 0, 0], [0, 1, 1, 0, 0, 1], [0, 1, 1, 0, 1, 0], [0, 1, 1, 0, 1, 1], [0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 0, 1], [0, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 1], [1, 0, 0, 0, 1, 0], [1, 0, 0, 0, 1, 1], [1, 0, 0, 1, 0, 0], [1, 0, 0, 1, 0, 1], [1, 0, 0, 1, 1, 0], [1, 0, 0, 1, 1, 1], [1, 0, 1, 0, 0, 0], [1, 0, 1, 0, 0, 1], [1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 1], [1, 0, 1, 1, 0, 0], [1, 0, 1, 1, 0, 1], [1, 0, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1], [1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 1], [1, 1, 0, 0, 1, 0], [1, 1, 0, 0, 1, 1], [1, 1, 0, 1, 0, 0], [1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 1, 0], [1, 1, 0, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 1], [1, 1, 1, 0, 1, 0], [1, 1, 1, 0, 1, 1], [1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1]])
y_converted = np.zeros([len(y)])
for i in range(len(label_mapping)):
idx = (y == label_mapping[i]).sum(axis=1) == 6
y_converted[idx] = i
train_indices, val_indices, _, _ = train_test_split(np.fromiter(range(len(y)), dtype=np.int32), y_converted, test_size=0.1, stratify=y_converted)
with open('fasttext-embedding-train.txt', 'w', encoding='utf-8') as target:
for text in texts_train:
target.write('__label__0\t{0}\n'.format(text.strip()))
train_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
test_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
train_text_tokens = set(chain(*train_texts_tokenized))
test_text_tokens = set(chain(*test_texts_tokenized))
text_tokens = sorted(train_text_tokens | test_text_tokens)
with open('fasttext-words.txt', 'w', encoding='utf-8') as target:
for word in text_tokens:
target.write('{0}\n'.format(word.strip()))
embedding_matrix = np.zeros([len(text_tokens) + 1, 100])
word2index = {}
with open('fasttext-vectors.txt', 'r', encoding='utf-8') as src:
for i, line in enumerate(src):
parts = line.strip().split(' ')
word = parts[0]
vector = map(float, parts[1:])
word2index[word] = len(word2index)
embedding_matrix[i] = np.fromiter(vector, dtype=np.float)
def text2sequence(text):
return list(map(lambda token: word2index.get(token, len(word2index) - 1), wordpunct_tokenize(str(text))))
X_train = sequence.pad_sequences(list(map(text2sequence, texts_train)), maxlen=100)
X_val = sequence.pad_sequences(list(map(text2sequence, texts_val)), maxlen=100)
X_test = sequence.pad_sequences(list(map(text2sequence, texts_test)), maxlen=100)
embed_size = 100
model = Sequential([InputLayer(input_shape=(100,), dtype='int32'), Embedding(len(embedding_matrix), embed_size), Bidirectional(LSTM(50, return_sequences=True)), GlobalMaxPool1D(), Dropout(0.3), Dense(50, activation='relu'), Dropout(0.3), Dense(6, activation='sigmoid')])
embedding = model.layers[1]
embedding.set_weights([embedding_matrix])
embedding.trainable = False
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
model.fit(X_train, y_train, batch_size=64, epochs=10, validation_data=(X_val, y_val), verbose=True, callbacks=[ModelCheckpoint('model.h5', save_best_only=True), EarlyStopping(patience=3)])
model.load_weights('model.h5')
test_prediction = model.predict(X_test, verbose=True)
val_prediction = model.predict(X_val, verbose=True)
log_loss(y_val[:, 2], val_prediction[:, 2]) | code |
2003266/cell_25 | [
"text_html_output_1.png"
] | from itertools import chain
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Dense, Embedding, Dropout, LSTM, Bidirectional, GlobalMaxPool1D, InputLayer, BatchNormalization, Activation
from keras.models import Sequential
from keras.preprocessing import text, sequence
from nltk.tokenize import wordpunct_tokenize
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
train.fillna('nan')
test = pd.read_csv('../input/test.csv')
test.fillna('nan')
targets = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
y = np.array(train[targets])
texts = np.array(train['comment_text'])
texts_test = np.array(test['comment_text'])
label_mapping = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 1], [0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0, 0, 1, 1, 0], [0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 1, 0], [0, 0, 1, 0, 1, 1], [0, 0, 1, 1, 0, 1], [0, 0, 1, 1, 1, 1], [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1], [0, 1, 0, 0, 1, 0], [0, 1, 0, 0, 1, 1], [0, 1, 0, 1, 0, 0], [0, 1, 0, 1, 0, 1], [0, 1, 0, 1, 1, 0], [0, 1, 0, 1, 1, 1], [0, 1, 1, 0, 0, 0], [0, 1, 1, 0, 0, 1], [0, 1, 1, 0, 1, 0], [0, 1, 1, 0, 1, 1], [0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 0, 1], [0, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 1], [1, 0, 0, 0, 1, 0], [1, 0, 0, 0, 1, 1], [1, 0, 0, 1, 0, 0], [1, 0, 0, 1, 0, 1], [1, 0, 0, 1, 1, 0], [1, 0, 0, 1, 1, 1], [1, 0, 1, 0, 0, 0], [1, 0, 1, 0, 0, 1], [1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 1], [1, 0, 1, 1, 0, 0], [1, 0, 1, 1, 0, 1], [1, 0, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1], [1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 1], [1, 1, 0, 0, 1, 0], [1, 1, 0, 0, 1, 1], [1, 1, 0, 1, 0, 0], [1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 1, 0], [1, 1, 0, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 1], [1, 1, 1, 0, 1, 0], [1, 1, 1, 0, 1, 1], [1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1]])
y_converted = np.zeros([len(y)])
for i in range(len(label_mapping)):
idx = (y == label_mapping[i]).sum(axis=1) == 6
y_converted[idx] = i
train_indices, val_indices, _, _ = train_test_split(np.fromiter(range(len(y)), dtype=np.int32), y_converted, test_size=0.1, stratify=y_converted)
with open('fasttext-embedding-train.txt', 'w', encoding='utf-8') as target:
for text in texts_train:
target.write('__label__0\t{0}\n'.format(text.strip()))
train_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
test_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
train_text_tokens = set(chain(*train_texts_tokenized))
test_text_tokens = set(chain(*test_texts_tokenized))
text_tokens = sorted(train_text_tokens | test_text_tokens)
with open('fasttext-words.txt', 'w', encoding='utf-8') as target:
for word in text_tokens:
target.write('{0}\n'.format(word.strip()))
embedding_matrix = np.zeros([len(text_tokens) + 1, 100])
word2index = {}
with open('fasttext-vectors.txt', 'r', encoding='utf-8') as src:
for i, line in enumerate(src):
parts = line.strip().split(' ')
word = parts[0]
vector = map(float, parts[1:])
word2index[word] = len(word2index)
embedding_matrix[i] = np.fromiter(vector, dtype=np.float)
def text2sequence(text):
return list(map(lambda token: word2index.get(token, len(word2index) - 1), wordpunct_tokenize(str(text))))
X_train = sequence.pad_sequences(list(map(text2sequence, texts_train)), maxlen=100)
X_val = sequence.pad_sequences(list(map(text2sequence, texts_val)), maxlen=100)
X_test = sequence.pad_sequences(list(map(text2sequence, texts_test)), maxlen=100)
embed_size = 100
model = Sequential([InputLayer(input_shape=(100,), dtype='int32'), Embedding(len(embedding_matrix), embed_size), Bidirectional(LSTM(50, return_sequences=True)), GlobalMaxPool1D(), Dropout(0.3), Dense(50, activation='relu'), Dropout(0.3), Dense(6, activation='sigmoid')])
embedding = model.layers[1]
embedding.set_weights([embedding_matrix])
embedding.trainable = False
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
model.fit(X_train, y_train, batch_size=64, epochs=10, validation_data=(X_val, y_val), verbose=True, callbacks=[ModelCheckpoint('model.h5', save_best_only=True), EarlyStopping(patience=3)]) | code |
2003266/cell_4 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
train.fillna('nan')
test = pd.read_csv('../input/test.csv')
test.fillna('nan')
test.head() | code |
2003266/cell_30 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
train.fillna('nan')
test = pd.read_csv('../input/test.csv')
test.fillna('nan')
submission = pd.read_csv('../input/sample_submission.csv')
submission.head() | code |
2003266/cell_33 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from itertools import chain
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Dense, Embedding, Dropout, LSTM, Bidirectional, GlobalMaxPool1D, InputLayer, BatchNormalization, Activation
from keras.models import Sequential
from keras.preprocessing import text, sequence
from nltk.tokenize import wordpunct_tokenize
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
train.fillna('nan')
test = pd.read_csv('../input/test.csv')
test.fillna('nan')
targets = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
y = np.array(train[targets])
texts = np.array(train['comment_text'])
texts_test = np.array(test['comment_text'])
label_mapping = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 1], [0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0, 0, 1, 1, 0], [0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 1, 0], [0, 0, 1, 0, 1, 1], [0, 0, 1, 1, 0, 1], [0, 0, 1, 1, 1, 1], [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1], [0, 1, 0, 0, 1, 0], [0, 1, 0, 0, 1, 1], [0, 1, 0, 1, 0, 0], [0, 1, 0, 1, 0, 1], [0, 1, 0, 1, 1, 0], [0, 1, 0, 1, 1, 1], [0, 1, 1, 0, 0, 0], [0, 1, 1, 0, 0, 1], [0, 1, 1, 0, 1, 0], [0, 1, 1, 0, 1, 1], [0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 0, 1], [0, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 1], [1, 0, 0, 0, 1, 0], [1, 0, 0, 0, 1, 1], [1, 0, 0, 1, 0, 0], [1, 0, 0, 1, 0, 1], [1, 0, 0, 1, 1, 0], [1, 0, 0, 1, 1, 1], [1, 0, 1, 0, 0, 0], [1, 0, 1, 0, 0, 1], [1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 1], [1, 0, 1, 1, 0, 0], [1, 0, 1, 1, 0, 1], [1, 0, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1], [1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 1], [1, 1, 0, 0, 1, 0], [1, 1, 0, 0, 1, 1], [1, 1, 0, 1, 0, 0], [1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 1, 0], [1, 1, 0, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 1], [1, 1, 1, 0, 1, 0], [1, 1, 1, 0, 1, 1], [1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1]])
y_converted = np.zeros([len(y)])
for i in range(len(label_mapping)):
idx = (y == label_mapping[i]).sum(axis=1) == 6
y_converted[idx] = i
train_indices, val_indices, _, _ = train_test_split(np.fromiter(range(len(y)), dtype=np.int32), y_converted, test_size=0.1, stratify=y_converted)
with open('fasttext-embedding-train.txt', 'w', encoding='utf-8') as target:
for text in texts_train:
target.write('__label__0\t{0}\n'.format(text.strip()))
train_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
test_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
train_text_tokens = set(chain(*train_texts_tokenized))
test_text_tokens = set(chain(*test_texts_tokenized))
text_tokens = sorted(train_text_tokens | test_text_tokens)
with open('fasttext-words.txt', 'w', encoding='utf-8') as target:
for word in text_tokens:
target.write('{0}\n'.format(word.strip()))
embedding_matrix = np.zeros([len(text_tokens) + 1, 100])
word2index = {}
with open('fasttext-vectors.txt', 'r', encoding='utf-8') as src:
for i, line in enumerate(src):
parts = line.strip().split(' ')
word = parts[0]
vector = map(float, parts[1:])
word2index[word] = len(word2index)
embedding_matrix[i] = np.fromiter(vector, dtype=np.float)
def text2sequence(text):
return list(map(lambda token: word2index.get(token, len(word2index) - 1), wordpunct_tokenize(str(text))))
X_train = sequence.pad_sequences(list(map(text2sequence, texts_train)), maxlen=100)
X_val = sequence.pad_sequences(list(map(text2sequence, texts_val)), maxlen=100)
X_test = sequence.pad_sequences(list(map(text2sequence, texts_test)), maxlen=100)
embed_size = 100
model = Sequential([InputLayer(input_shape=(100,), dtype='int32'), Embedding(len(embedding_matrix), embed_size), Bidirectional(LSTM(50, return_sequences=True)), GlobalMaxPool1D(), Dropout(0.3), Dense(50, activation='relu'), Dropout(0.3), Dense(6, activation='sigmoid')])
embedding = model.layers[1]
embedding.set_weights([embedding_matrix])
embedding.trainable = False
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
model.fit(X_train, y_train, batch_size=64, epochs=10, validation_data=(X_val, y_val), verbose=True, callbacks=[ModelCheckpoint('model.h5', save_best_only=True), EarlyStopping(patience=3)])
model.load_weights('model.h5')
test_prediction = model.predict(X_test, verbose=True)
val_prediction = model.predict(X_val, verbose=True) | code |
2003266/cell_40 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from collections import OrderedDict
from itertools import chain
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Dense, Embedding, Dropout, LSTM, Bidirectional, GlobalMaxPool1D, InputLayer, BatchNormalization, Activation
from keras.models import Sequential
from keras.preprocessing import text, sequence
from nltk.tokenize import wordpunct_tokenize
from sklearn.metrics import confusion_matrix, log_loss
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
train.fillna('nan')
test = pd.read_csv('../input/test.csv')
test.fillna('nan')
submission = pd.read_csv('../input/sample_submission.csv')
targets = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
y = np.array(train[targets])
texts = np.array(train['comment_text'])
texts_test = np.array(test['comment_text'])
label_mapping = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 1], [0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0, 0, 1, 1, 0], [0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 1, 0], [0, 0, 1, 0, 1, 1], [0, 0, 1, 1, 0, 1], [0, 0, 1, 1, 1, 1], [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1], [0, 1, 0, 0, 1, 0], [0, 1, 0, 0, 1, 1], [0, 1, 0, 1, 0, 0], [0, 1, 0, 1, 0, 1], [0, 1, 0, 1, 1, 0], [0, 1, 0, 1, 1, 1], [0, 1, 1, 0, 0, 0], [0, 1, 1, 0, 0, 1], [0, 1, 1, 0, 1, 0], [0, 1, 1, 0, 1, 1], [0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 0, 1], [0, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 1], [1, 0, 0, 0, 1, 0], [1, 0, 0, 0, 1, 1], [1, 0, 0, 1, 0, 0], [1, 0, 0, 1, 0, 1], [1, 0, 0, 1, 1, 0], [1, 0, 0, 1, 1, 1], [1, 0, 1, 0, 0, 0], [1, 0, 1, 0, 0, 1], [1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 1], [1, 0, 1, 1, 0, 0], [1, 0, 1, 1, 0, 1], [1, 0, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1], [1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 1], [1, 1, 0, 0, 1, 0], [1, 1, 0, 0, 1, 1], [1, 1, 0, 1, 0, 0], [1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 1, 0], [1, 1, 0, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 1], [1, 1, 1, 0, 1, 0], [1, 1, 1, 0, 1, 1], [1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1]])
y_converted = np.zeros([len(y)])
for i in range(len(label_mapping)):
idx = (y == label_mapping[i]).sum(axis=1) == 6
y_converted[idx] = i
train_indices, val_indices, _, _ = train_test_split(np.fromiter(range(len(y)), dtype=np.int32), y_converted, test_size=0.1, stratify=y_converted)
with open('fasttext-embedding-train.txt', 'w', encoding='utf-8') as target:
for text in texts_train:
target.write('__label__0\t{0}\n'.format(text.strip()))
train_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
test_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
train_text_tokens = set(chain(*train_texts_tokenized))
test_text_tokens = set(chain(*test_texts_tokenized))
text_tokens = sorted(train_text_tokens | test_text_tokens)
with open('fasttext-words.txt', 'w', encoding='utf-8') as target:
for word in text_tokens:
target.write('{0}\n'.format(word.strip()))
embedding_matrix = np.zeros([len(text_tokens) + 1, 100])
word2index = {}
with open('fasttext-vectors.txt', 'r', encoding='utf-8') as src:
for i, line in enumerate(src):
parts = line.strip().split(' ')
word = parts[0]
vector = map(float, parts[1:])
word2index[word] = len(word2index)
embedding_matrix[i] = np.fromiter(vector, dtype=np.float)
def text2sequence(text):
return list(map(lambda token: word2index.get(token, len(word2index) - 1), wordpunct_tokenize(str(text))))
X_train = sequence.pad_sequences(list(map(text2sequence, texts_train)), maxlen=100)
X_val = sequence.pad_sequences(list(map(text2sequence, texts_val)), maxlen=100)
X_test = sequence.pad_sequences(list(map(text2sequence, texts_test)), maxlen=100)
embed_size = 100
model = Sequential([InputLayer(input_shape=(100,), dtype='int32'), Embedding(len(embedding_matrix), embed_size), Bidirectional(LSTM(50, return_sequences=True)), GlobalMaxPool1D(), Dropout(0.3), Dense(50, activation='relu'), Dropout(0.3), Dense(6, activation='sigmoid')])
embedding = model.layers[1]
embedding.set_weights([embedding_matrix])
embedding.trainable = False
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
model.fit(X_train, y_train, batch_size=64, epochs=10, validation_data=(X_val, y_val), verbose=True, callbacks=[ModelCheckpoint('model.h5', save_best_only=True), EarlyStopping(patience=3)])
model.load_weights('model.h5')
test_prediction = model.predict(X_test, verbose=True)
val_prediction = model.predict(X_val, verbose=True)
def show_confustion_matrix(y_true, y_pred):
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
df = pd.DataFrame(OrderedDict([('true-class', ['negative', 'positive']), ('negative-classified', [tn, fn]), ('positive-classified', [fp, tp])]))
return df.set_index('true-class')
show_confustion_matrix(y_val[:, 1], val_prediction[:, 1] > 0.5) | code |
2003266/cell_29 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from itertools import chain
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Dense, Embedding, Dropout, LSTM, Bidirectional, GlobalMaxPool1D, InputLayer, BatchNormalization, Activation
from keras.models import Sequential
from keras.preprocessing import text, sequence
from nltk.tokenize import wordpunct_tokenize
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
train.fillna('nan')
test = pd.read_csv('../input/test.csv')
test.fillna('nan')
submission = pd.read_csv('../input/sample_submission.csv')
targets = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
y = np.array(train[targets])
texts = np.array(train['comment_text'])
texts_test = np.array(test['comment_text'])
label_mapping = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 1], [0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0, 0, 1, 1, 0], [0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 1, 0], [0, 0, 1, 0, 1, 1], [0, 0, 1, 1, 0, 1], [0, 0, 1, 1, 1, 1], [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1], [0, 1, 0, 0, 1, 0], [0, 1, 0, 0, 1, 1], [0, 1, 0, 1, 0, 0], [0, 1, 0, 1, 0, 1], [0, 1, 0, 1, 1, 0], [0, 1, 0, 1, 1, 1], [0, 1, 1, 0, 0, 0], [0, 1, 1, 0, 0, 1], [0, 1, 1, 0, 1, 0], [0, 1, 1, 0, 1, 1], [0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 0, 1], [0, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 1], [1, 0, 0, 0, 1, 0], [1, 0, 0, 0, 1, 1], [1, 0, 0, 1, 0, 0], [1, 0, 0, 1, 0, 1], [1, 0, 0, 1, 1, 0], [1, 0, 0, 1, 1, 1], [1, 0, 1, 0, 0, 0], [1, 0, 1, 0, 0, 1], [1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 1], [1, 0, 1, 1, 0, 0], [1, 0, 1, 1, 0, 1], [1, 0, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1], [1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 1], [1, 1, 0, 0, 1, 0], [1, 1, 0, 0, 1, 1], [1, 1, 0, 1, 0, 0], [1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 1, 0], [1, 1, 0, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 1], [1, 1, 1, 0, 1, 0], [1, 1, 1, 0, 1, 1], [1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1]])
y_converted = np.zeros([len(y)])
for i in range(len(label_mapping)):
idx = (y == label_mapping[i]).sum(axis=1) == 6
y_converted[idx] = i
train_indices, val_indices, _, _ = train_test_split(np.fromiter(range(len(y)), dtype=np.int32), y_converted, test_size=0.1, stratify=y_converted)
with open('fasttext-embedding-train.txt', 'w', encoding='utf-8') as target:
for text in texts_train:
target.write('__label__0\t{0}\n'.format(text.strip()))
train_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
test_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
train_text_tokens = set(chain(*train_texts_tokenized))
test_text_tokens = set(chain(*test_texts_tokenized))
text_tokens = sorted(train_text_tokens | test_text_tokens)
with open('fasttext-words.txt', 'w', encoding='utf-8') as target:
for word in text_tokens:
target.write('{0}\n'.format(word.strip()))
embedding_matrix = np.zeros([len(text_tokens) + 1, 100])
word2index = {}
with open('fasttext-vectors.txt', 'r', encoding='utf-8') as src:
for i, line in enumerate(src):
parts = line.strip().split(' ')
word = parts[0]
vector = map(float, parts[1:])
word2index[word] = len(word2index)
embedding_matrix[i] = np.fromiter(vector, dtype=np.float)
def text2sequence(text):
return list(map(lambda token: word2index.get(token, len(word2index) - 1), wordpunct_tokenize(str(text))))
X_train = sequence.pad_sequences(list(map(text2sequence, texts_train)), maxlen=100)
X_val = sequence.pad_sequences(list(map(text2sequence, texts_val)), maxlen=100)
X_test = sequence.pad_sequences(list(map(text2sequence, texts_test)), maxlen=100)
embed_size = 100
model = Sequential([InputLayer(input_shape=(100,), dtype='int32'), Embedding(len(embedding_matrix), embed_size), Bidirectional(LSTM(50, return_sequences=True)), GlobalMaxPool1D(), Dropout(0.3), Dense(50, activation='relu'), Dropout(0.3), Dense(6, activation='sigmoid')])
embedding = model.layers[1]
embedding.set_weights([embedding_matrix])
embedding.trainable = False
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
model.fit(X_train, y_train, batch_size=64, epochs=10, validation_data=(X_val, y_val), verbose=True, callbacks=[ModelCheckpoint('model.h5', save_best_only=True), EarlyStopping(patience=3)])
model.load_weights('model.h5')
test_prediction = model.predict(X_test, verbose=True)
for i, label in enumerate(['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']):
submission[label] = test_prediction[:, i] | code |
2003266/cell_39 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from itertools import chain
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Dense, Embedding, Dropout, LSTM, Bidirectional, GlobalMaxPool1D, InputLayer, BatchNormalization, Activation
from keras.models import Sequential
from keras.preprocessing import text, sequence
from nltk.tokenize import wordpunct_tokenize
from sklearn.metrics import confusion_matrix, log_loss
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
train.fillna('nan')
test = pd.read_csv('../input/test.csv')
test.fillna('nan')
targets = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
y = np.array(train[targets])
texts = np.array(train['comment_text'])
texts_test = np.array(test['comment_text'])
label_mapping = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 1], [0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0, 0, 1, 1, 0], [0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 1, 0], [0, 0, 1, 0, 1, 1], [0, 0, 1, 1, 0, 1], [0, 0, 1, 1, 1, 1], [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1], [0, 1, 0, 0, 1, 0], [0, 1, 0, 0, 1, 1], [0, 1, 0, 1, 0, 0], [0, 1, 0, 1, 0, 1], [0, 1, 0, 1, 1, 0], [0, 1, 0, 1, 1, 1], [0, 1, 1, 0, 0, 0], [0, 1, 1, 0, 0, 1], [0, 1, 1, 0, 1, 0], [0, 1, 1, 0, 1, 1], [0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 0, 1], [0, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 1], [1, 0, 0, 0, 1, 0], [1, 0, 0, 0, 1, 1], [1, 0, 0, 1, 0, 0], [1, 0, 0, 1, 0, 1], [1, 0, 0, 1, 1, 0], [1, 0, 0, 1, 1, 1], [1, 0, 1, 0, 0, 0], [1, 0, 1, 0, 0, 1], [1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 1], [1, 0, 1, 1, 0, 0], [1, 0, 1, 1, 0, 1], [1, 0, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1], [1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 1], [1, 1, 0, 0, 1, 0], [1, 1, 0, 0, 1, 1], [1, 1, 0, 1, 0, 0], [1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 1, 0], [1, 1, 0, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 1], [1, 1, 1, 0, 1, 0], [1, 1, 1, 0, 1, 1], [1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1]])
y_converted = np.zeros([len(y)])
for i in range(len(label_mapping)):
idx = (y == label_mapping[i]).sum(axis=1) == 6
y_converted[idx] = i
train_indices, val_indices, _, _ = train_test_split(np.fromiter(range(len(y)), dtype=np.int32), y_converted, test_size=0.1, stratify=y_converted)
with open('fasttext-embedding-train.txt', 'w', encoding='utf-8') as target:
for text in texts_train:
target.write('__label__0\t{0}\n'.format(text.strip()))
train_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
test_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
train_text_tokens = set(chain(*train_texts_tokenized))
test_text_tokens = set(chain(*test_texts_tokenized))
text_tokens = sorted(train_text_tokens | test_text_tokens)
with open('fasttext-words.txt', 'w', encoding='utf-8') as target:
for word in text_tokens:
target.write('{0}\n'.format(word.strip()))
embedding_matrix = np.zeros([len(text_tokens) + 1, 100])
word2index = {}
with open('fasttext-vectors.txt', 'r', encoding='utf-8') as src:
for i, line in enumerate(src):
parts = line.strip().split(' ')
word = parts[0]
vector = map(float, parts[1:])
word2index[word] = len(word2index)
embedding_matrix[i] = np.fromiter(vector, dtype=np.float)
def text2sequence(text):
return list(map(lambda token: word2index.get(token, len(word2index) - 1), wordpunct_tokenize(str(text))))
X_train = sequence.pad_sequences(list(map(text2sequence, texts_train)), maxlen=100)
X_val = sequence.pad_sequences(list(map(text2sequence, texts_val)), maxlen=100)
X_test = sequence.pad_sequences(list(map(text2sequence, texts_test)), maxlen=100)
embed_size = 100
model = Sequential([InputLayer(input_shape=(100,), dtype='int32'), Embedding(len(embedding_matrix), embed_size), Bidirectional(LSTM(50, return_sequences=True)), GlobalMaxPool1D(), Dropout(0.3), Dense(50, activation='relu'), Dropout(0.3), Dense(6, activation='sigmoid')])
embedding = model.layers[1]
embedding.set_weights([embedding_matrix])
embedding.trainable = False
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
model.fit(X_train, y_train, batch_size=64, epochs=10, validation_data=(X_val, y_val), verbose=True, callbacks=[ModelCheckpoint('model.h5', save_best_only=True), EarlyStopping(patience=3)])
model.load_weights('model.h5')
test_prediction = model.predict(X_test, verbose=True)
val_prediction = model.predict(X_val, verbose=True)
log_loss(y_val[:, 1], val_prediction[:, 1]) | code |
2003266/cell_26 | [
"text_plain_output_1.png"
] | from itertools import chain
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Dense, Embedding, Dropout, LSTM, Bidirectional, GlobalMaxPool1D, InputLayer, BatchNormalization, Activation
from keras.models import Sequential
from keras.preprocessing import text, sequence
from nltk.tokenize import wordpunct_tokenize
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
train.fillna('nan')
test = pd.read_csv('../input/test.csv')
test.fillna('nan')
targets = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
y = np.array(train[targets])
texts = np.array(train['comment_text'])
texts_test = np.array(test['comment_text'])
label_mapping = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 1], [0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0, 0, 1, 1, 0], [0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 1, 0], [0, 0, 1, 0, 1, 1], [0, 0, 1, 1, 0, 1], [0, 0, 1, 1, 1, 1], [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1], [0, 1, 0, 0, 1, 0], [0, 1, 0, 0, 1, 1], [0, 1, 0, 1, 0, 0], [0, 1, 0, 1, 0, 1], [0, 1, 0, 1, 1, 0], [0, 1, 0, 1, 1, 1], [0, 1, 1, 0, 0, 0], [0, 1, 1, 0, 0, 1], [0, 1, 1, 0, 1, 0], [0, 1, 1, 0, 1, 1], [0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 0, 1], [0, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 1], [1, 0, 0, 0, 1, 0], [1, 0, 0, 0, 1, 1], [1, 0, 0, 1, 0, 0], [1, 0, 0, 1, 0, 1], [1, 0, 0, 1, 1, 0], [1, 0, 0, 1, 1, 1], [1, 0, 1, 0, 0, 0], [1, 0, 1, 0, 0, 1], [1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 1], [1, 0, 1, 1, 0, 0], [1, 0, 1, 1, 0, 1], [1, 0, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1], [1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 1], [1, 1, 0, 0, 1, 0], [1, 1, 0, 0, 1, 1], [1, 1, 0, 1, 0, 0], [1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 1, 0], [1, 1, 0, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 1], [1, 1, 1, 0, 1, 0], [1, 1, 1, 0, 1, 1], [1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1]])
y_converted = np.zeros([len(y)])
for i in range(len(label_mapping)):
idx = (y == label_mapping[i]).sum(axis=1) == 6
y_converted[idx] = i
train_indices, val_indices, _, _ = train_test_split(np.fromiter(range(len(y)), dtype=np.int32), y_converted, test_size=0.1, stratify=y_converted)
with open('fasttext-embedding-train.txt', 'w', encoding='utf-8') as target:
for text in texts_train:
target.write('__label__0\t{0}\n'.format(text.strip()))
train_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
test_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
train_text_tokens = set(chain(*train_texts_tokenized))
test_text_tokens = set(chain(*test_texts_tokenized))
text_tokens = sorted(train_text_tokens | test_text_tokens)
with open('fasttext-words.txt', 'w', encoding='utf-8') as target:
for word in text_tokens:
target.write('{0}\n'.format(word.strip()))
embedding_matrix = np.zeros([len(text_tokens) + 1, 100])
word2index = {}
with open('fasttext-vectors.txt', 'r', encoding='utf-8') as src:
for i, line in enumerate(src):
parts = line.strip().split(' ')
word = parts[0]
vector = map(float, parts[1:])
word2index[word] = len(word2index)
embedding_matrix[i] = np.fromiter(vector, dtype=np.float)
def text2sequence(text):
return list(map(lambda token: word2index.get(token, len(word2index) - 1), wordpunct_tokenize(str(text))))
X_train = sequence.pad_sequences(list(map(text2sequence, texts_train)), maxlen=100)
X_val = sequence.pad_sequences(list(map(text2sequence, texts_val)), maxlen=100)
X_test = sequence.pad_sequences(list(map(text2sequence, texts_test)), maxlen=100)
embed_size = 100
model = Sequential([InputLayer(input_shape=(100,), dtype='int32'), Embedding(len(embedding_matrix), embed_size), Bidirectional(LSTM(50, return_sequences=True)), GlobalMaxPool1D(), Dropout(0.3), Dense(50, activation='relu'), Dropout(0.3), Dense(6, activation='sigmoid')])
embedding = model.layers[1]
embedding.set_weights([embedding_matrix])
embedding.trainable = False
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
model.fit(X_train, y_train, batch_size=64, epochs=10, validation_data=(X_val, y_val), verbose=True, callbacks=[ModelCheckpoint('model.h5', save_best_only=True), EarlyStopping(patience=3)])
model.load_weights('model.h5') | code |
2003266/cell_48 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from itertools import chain
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Dense, Embedding, Dropout, LSTM, Bidirectional, GlobalMaxPool1D, InputLayer, BatchNormalization, Activation
from keras.models import Sequential
from keras.preprocessing import text, sequence
from nltk.tokenize import wordpunct_tokenize
from sklearn.metrics import confusion_matrix, log_loss
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
train.fillna('nan')
test = pd.read_csv('../input/test.csv')
test.fillna('nan')
targets = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
y = np.array(train[targets])
texts = np.array(train['comment_text'])
texts_test = np.array(test['comment_text'])
label_mapping = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 1], [0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0, 0, 1, 1, 0], [0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 1, 0], [0, 0, 1, 0, 1, 1], [0, 0, 1, 1, 0, 1], [0, 0, 1, 1, 1, 1], [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1], [0, 1, 0, 0, 1, 0], [0, 1, 0, 0, 1, 1], [0, 1, 0, 1, 0, 0], [0, 1, 0, 1, 0, 1], [0, 1, 0, 1, 1, 0], [0, 1, 0, 1, 1, 1], [0, 1, 1, 0, 0, 0], [0, 1, 1, 0, 0, 1], [0, 1, 1, 0, 1, 0], [0, 1, 1, 0, 1, 1], [0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 0, 1], [0, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 1], [1, 0, 0, 0, 1, 0], [1, 0, 0, 0, 1, 1], [1, 0, 0, 1, 0, 0], [1, 0, 0, 1, 0, 1], [1, 0, 0, 1, 1, 0], [1, 0, 0, 1, 1, 1], [1, 0, 1, 0, 0, 0], [1, 0, 1, 0, 0, 1], [1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 1], [1, 0, 1, 1, 0, 0], [1, 0, 1, 1, 0, 1], [1, 0, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1], [1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 1], [1, 1, 0, 0, 1, 0], [1, 1, 0, 0, 1, 1], [1, 1, 0, 1, 0, 0], [1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 1, 0], [1, 1, 0, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 1], [1, 1, 1, 0, 1, 0], [1, 1, 1, 0, 1, 1], [1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1]])
y_converted = np.zeros([len(y)])
for i in range(len(label_mapping)):
idx = (y == label_mapping[i]).sum(axis=1) == 6
y_converted[idx] = i
train_indices, val_indices, _, _ = train_test_split(np.fromiter(range(len(y)), dtype=np.int32), y_converted, test_size=0.1, stratify=y_converted)
with open('fasttext-embedding-train.txt', 'w', encoding='utf-8') as target:
for text in texts_train:
target.write('__label__0\t{0}\n'.format(text.strip()))
train_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
test_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
train_text_tokens = set(chain(*train_texts_tokenized))
test_text_tokens = set(chain(*test_texts_tokenized))
text_tokens = sorted(train_text_tokens | test_text_tokens)
with open('fasttext-words.txt', 'w', encoding='utf-8') as target:
for word in text_tokens:
target.write('{0}\n'.format(word.strip()))
embedding_matrix = np.zeros([len(text_tokens) + 1, 100])
word2index = {}
with open('fasttext-vectors.txt', 'r', encoding='utf-8') as src:
for i, line in enumerate(src):
parts = line.strip().split(' ')
word = parts[0]
vector = map(float, parts[1:])
word2index[word] = len(word2index)
embedding_matrix[i] = np.fromiter(vector, dtype=np.float)
def text2sequence(text):
return list(map(lambda token: word2index.get(token, len(word2index) - 1), wordpunct_tokenize(str(text))))
X_train = sequence.pad_sequences(list(map(text2sequence, texts_train)), maxlen=100)
X_val = sequence.pad_sequences(list(map(text2sequence, texts_val)), maxlen=100)
X_test = sequence.pad_sequences(list(map(text2sequence, texts_test)), maxlen=100)
embed_size = 100
model = Sequential([InputLayer(input_shape=(100,), dtype='int32'), Embedding(len(embedding_matrix), embed_size), Bidirectional(LSTM(50, return_sequences=True)), GlobalMaxPool1D(), Dropout(0.3), Dense(50, activation='relu'), Dropout(0.3), Dense(6, activation='sigmoid')])
embedding = model.layers[1]
embedding.set_weights([embedding_matrix])
embedding.trainable = False
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
model.fit(X_train, y_train, batch_size=64, epochs=10, validation_data=(X_val, y_val), verbose=True, callbacks=[ModelCheckpoint('model.h5', save_best_only=True), EarlyStopping(patience=3)])
model.load_weights('model.h5')
test_prediction = model.predict(X_test, verbose=True)
val_prediction = model.predict(X_val, verbose=True)
log_loss(y_val[:, 4], val_prediction[:, 4]) | code |
2003266/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd
import numpy as np
from itertools import chain
from nltk.tokenize import wordpunct_tokenize
from keras.preprocessing import text, sequence
from keras.layers import Dense, Embedding, Dropout, LSTM, Bidirectional, GlobalMaxPool1D, InputLayer, BatchNormalization, Activation
from keras.models import Sequential
from keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.model_selection import train_test_split
from subprocess import call
from sklearn.utils import compute_sample_weight
from sklearn.metrics import confusion_matrix, log_loss
from collections import OrderedDict | code |
2003266/cell_19 | [
"application_vnd.jupyter.stderr_output_1.png"
] | !fasttext print-word-vectors embedding-model.bin < fasttext-words.txt > fasttext-vectors.txt | code |
2003266/cell_52 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from collections import OrderedDict
from itertools import chain
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Dense, Embedding, Dropout, LSTM, Bidirectional, GlobalMaxPool1D, InputLayer, BatchNormalization, Activation
from keras.models import Sequential
from keras.preprocessing import text, sequence
from nltk.tokenize import wordpunct_tokenize
from sklearn.metrics import confusion_matrix, log_loss
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
train.fillna('nan')
test = pd.read_csv('../input/test.csv')
test.fillna('nan')
submission = pd.read_csv('../input/sample_submission.csv')
targets = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
y = np.array(train[targets])
texts = np.array(train['comment_text'])
texts_test = np.array(test['comment_text'])
label_mapping = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 1], [0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0, 0, 1, 1, 0], [0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 1, 0], [0, 0, 1, 0, 1, 1], [0, 0, 1, 1, 0, 1], [0, 0, 1, 1, 1, 1], [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1], [0, 1, 0, 0, 1, 0], [0, 1, 0, 0, 1, 1], [0, 1, 0, 1, 0, 0], [0, 1, 0, 1, 0, 1], [0, 1, 0, 1, 1, 0], [0, 1, 0, 1, 1, 1], [0, 1, 1, 0, 0, 0], [0, 1, 1, 0, 0, 1], [0, 1, 1, 0, 1, 0], [0, 1, 1, 0, 1, 1], [0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 0, 1], [0, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 1], [1, 0, 0, 0, 1, 0], [1, 0, 0, 0, 1, 1], [1, 0, 0, 1, 0, 0], [1, 0, 0, 1, 0, 1], [1, 0, 0, 1, 1, 0], [1, 0, 0, 1, 1, 1], [1, 0, 1, 0, 0, 0], [1, 0, 1, 0, 0, 1], [1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 1], [1, 0, 1, 1, 0, 0], [1, 0, 1, 1, 0, 1], [1, 0, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1], [1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 1], [1, 1, 0, 0, 1, 0], [1, 1, 0, 0, 1, 1], [1, 1, 0, 1, 0, 0], [1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 1, 0], [1, 1, 0, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 1], [1, 1, 1, 0, 1, 0], [1, 1, 1, 0, 1, 1], [1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1]])
y_converted = np.zeros([len(y)])
for i in range(len(label_mapping)):
idx = (y == label_mapping[i]).sum(axis=1) == 6
y_converted[idx] = i
train_indices, val_indices, _, _ = train_test_split(np.fromiter(range(len(y)), dtype=np.int32), y_converted, test_size=0.1, stratify=y_converted)
with open('fasttext-embedding-train.txt', 'w', encoding='utf-8') as target:
for text in texts_train:
target.write('__label__0\t{0}\n'.format(text.strip()))
train_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
test_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
train_text_tokens = set(chain(*train_texts_tokenized))
test_text_tokens = set(chain(*test_texts_tokenized))
text_tokens = sorted(train_text_tokens | test_text_tokens)
with open('fasttext-words.txt', 'w', encoding='utf-8') as target:
for word in text_tokens:
target.write('{0}\n'.format(word.strip()))
embedding_matrix = np.zeros([len(text_tokens) + 1, 100])
word2index = {}
with open('fasttext-vectors.txt', 'r', encoding='utf-8') as src:
for i, line in enumerate(src):
parts = line.strip().split(' ')
word = parts[0]
vector = map(float, parts[1:])
word2index[word] = len(word2index)
embedding_matrix[i] = np.fromiter(vector, dtype=np.float)
def text2sequence(text):
return list(map(lambda token: word2index.get(token, len(word2index) - 1), wordpunct_tokenize(str(text))))
X_train = sequence.pad_sequences(list(map(text2sequence, texts_train)), maxlen=100)
X_val = sequence.pad_sequences(list(map(text2sequence, texts_val)), maxlen=100)
X_test = sequence.pad_sequences(list(map(text2sequence, texts_test)), maxlen=100)
embed_size = 100
model = Sequential([InputLayer(input_shape=(100,), dtype='int32'), Embedding(len(embedding_matrix), embed_size), Bidirectional(LSTM(50, return_sequences=True)), GlobalMaxPool1D(), Dropout(0.3), Dense(50, activation='relu'), Dropout(0.3), Dense(6, activation='sigmoid')])
embedding = model.layers[1]
embedding.set_weights([embedding_matrix])
embedding.trainable = False
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
model.fit(X_train, y_train, batch_size=64, epochs=10, validation_data=(X_val, y_val), verbose=True, callbacks=[ModelCheckpoint('model.h5', save_best_only=True), EarlyStopping(patience=3)])
model.load_weights('model.h5')
test_prediction = model.predict(X_test, verbose=True)
val_prediction = model.predict(X_val, verbose=True)
def show_confustion_matrix(y_true, y_pred):
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
df = pd.DataFrame(OrderedDict([('true-class', ['negative', 'positive']), ('negative-classified', [tn, fn]), ('positive-classified', [fp, tp])]))
return df.set_index('true-class')
show_confustion_matrix(y_val[:, 5], val_prediction[:, 5] > 0.5) | code |
2003266/cell_45 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from itertools import chain
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Dense, Embedding, Dropout, LSTM, Bidirectional, GlobalMaxPool1D, InputLayer, BatchNormalization, Activation
from keras.models import Sequential
from keras.preprocessing import text, sequence
from nltk.tokenize import wordpunct_tokenize
from sklearn.metrics import confusion_matrix, log_loss
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
train.fillna('nan')
test = pd.read_csv('../input/test.csv')
test.fillna('nan')
targets = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
y = np.array(train[targets])
texts = np.array(train['comment_text'])
texts_test = np.array(test['comment_text'])
label_mapping = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 1], [0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0, 0, 1, 1, 0], [0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 1, 0], [0, 0, 1, 0, 1, 1], [0, 0, 1, 1, 0, 1], [0, 0, 1, 1, 1, 1], [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1], [0, 1, 0, 0, 1, 0], [0, 1, 0, 0, 1, 1], [0, 1, 0, 1, 0, 0], [0, 1, 0, 1, 0, 1], [0, 1, 0, 1, 1, 0], [0, 1, 0, 1, 1, 1], [0, 1, 1, 0, 0, 0], [0, 1, 1, 0, 0, 1], [0, 1, 1, 0, 1, 0], [0, 1, 1, 0, 1, 1], [0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 0, 1], [0, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 1], [1, 0, 0, 0, 1, 0], [1, 0, 0, 0, 1, 1], [1, 0, 0, 1, 0, 0], [1, 0, 0, 1, 0, 1], [1, 0, 0, 1, 1, 0], [1, 0, 0, 1, 1, 1], [1, 0, 1, 0, 0, 0], [1, 0, 1, 0, 0, 1], [1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 1], [1, 0, 1, 1, 0, 0], [1, 0, 1, 1, 0, 1], [1, 0, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1], [1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 1], [1, 1, 0, 0, 1, 0], [1, 1, 0, 0, 1, 1], [1, 1, 0, 1, 0, 0], [1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 1, 0], [1, 1, 0, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 1], [1, 1, 1, 0, 1, 0], [1, 1, 1, 0, 1, 1], [1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1]])
y_converted = np.zeros([len(y)])
for i in range(len(label_mapping)):
idx = (y == label_mapping[i]).sum(axis=1) == 6
y_converted[idx] = i
train_indices, val_indices, _, _ = train_test_split(np.fromiter(range(len(y)), dtype=np.int32), y_converted, test_size=0.1, stratify=y_converted)
with open('fasttext-embedding-train.txt', 'w', encoding='utf-8') as target:
for text in texts_train:
target.write('__label__0\t{0}\n'.format(text.strip()))
train_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
test_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
train_text_tokens = set(chain(*train_texts_tokenized))
test_text_tokens = set(chain(*test_texts_tokenized))
text_tokens = sorted(train_text_tokens | test_text_tokens)
with open('fasttext-words.txt', 'w', encoding='utf-8') as target:
for word in text_tokens:
target.write('{0}\n'.format(word.strip()))
embedding_matrix = np.zeros([len(text_tokens) + 1, 100])
word2index = {}
with open('fasttext-vectors.txt', 'r', encoding='utf-8') as src:
for i, line in enumerate(src):
parts = line.strip().split(' ')
word = parts[0]
vector = map(float, parts[1:])
word2index[word] = len(word2index)
embedding_matrix[i] = np.fromiter(vector, dtype=np.float)
def text2sequence(text):
return list(map(lambda token: word2index.get(token, len(word2index) - 1), wordpunct_tokenize(str(text))))
X_train = sequence.pad_sequences(list(map(text2sequence, texts_train)), maxlen=100)
X_val = sequence.pad_sequences(list(map(text2sequence, texts_val)), maxlen=100)
X_test = sequence.pad_sequences(list(map(text2sequence, texts_test)), maxlen=100)
embed_size = 100
model = Sequential([InputLayer(input_shape=(100,), dtype='int32'), Embedding(len(embedding_matrix), embed_size), Bidirectional(LSTM(50, return_sequences=True)), GlobalMaxPool1D(), Dropout(0.3), Dense(50, activation='relu'), Dropout(0.3), Dense(6, activation='sigmoid')])
embedding = model.layers[1]
embedding.set_weights([embedding_matrix])
embedding.trainable = False
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
model.fit(X_train, y_train, batch_size=64, epochs=10, validation_data=(X_val, y_val), verbose=True, callbacks=[ModelCheckpoint('model.h5', save_best_only=True), EarlyStopping(patience=3)])
model.load_weights('model.h5')
test_prediction = model.predict(X_test, verbose=True)
val_prediction = model.predict(X_val, verbose=True)
log_loss(y_val[:, 3], val_prediction[:, 3]) | code |
2003266/cell_49 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from collections import OrderedDict
from itertools import chain
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Dense, Embedding, Dropout, LSTM, Bidirectional, GlobalMaxPool1D, InputLayer, BatchNormalization, Activation
from keras.models import Sequential
from keras.preprocessing import text, sequence
from nltk.tokenize import wordpunct_tokenize
from sklearn.metrics import confusion_matrix, log_loss
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
train.fillna('nan')
test = pd.read_csv('../input/test.csv')
test.fillna('nan')
submission = pd.read_csv('../input/sample_submission.csv')
targets = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
y = np.array(train[targets])
texts = np.array(train['comment_text'])
texts_test = np.array(test['comment_text'])
label_mapping = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 1], [0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0, 0, 1, 1, 0], [0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 1, 0], [0, 0, 1, 0, 1, 1], [0, 0, 1, 1, 0, 1], [0, 0, 1, 1, 1, 1], [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1], [0, 1, 0, 0, 1, 0], [0, 1, 0, 0, 1, 1], [0, 1, 0, 1, 0, 0], [0, 1, 0, 1, 0, 1], [0, 1, 0, 1, 1, 0], [0, 1, 0, 1, 1, 1], [0, 1, 1, 0, 0, 0], [0, 1, 1, 0, 0, 1], [0, 1, 1, 0, 1, 0], [0, 1, 1, 0, 1, 1], [0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 0, 1], [0, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 1], [1, 0, 0, 0, 1, 0], [1, 0, 0, 0, 1, 1], [1, 0, 0, 1, 0, 0], [1, 0, 0, 1, 0, 1], [1, 0, 0, 1, 1, 0], [1, 0, 0, 1, 1, 1], [1, 0, 1, 0, 0, 0], [1, 0, 1, 0, 0, 1], [1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 1], [1, 0, 1, 1, 0, 0], [1, 0, 1, 1, 0, 1], [1, 0, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1], [1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 1], [1, 1, 0, 0, 1, 0], [1, 1, 0, 0, 1, 1], [1, 1, 0, 1, 0, 0], [1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 1, 0], [1, 1, 0, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 1], [1, 1, 1, 0, 1, 0], [1, 1, 1, 0, 1, 1], [1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1]])
y_converted = np.zeros([len(y)])
for i in range(len(label_mapping)):
idx = (y == label_mapping[i]).sum(axis=1) == 6
y_converted[idx] = i
train_indices, val_indices, _, _ = train_test_split(np.fromiter(range(len(y)), dtype=np.int32), y_converted, test_size=0.1, stratify=y_converted)
with open('fasttext-embedding-train.txt', 'w', encoding='utf-8') as target:
for text in texts_train:
target.write('__label__0\t{0}\n'.format(text.strip()))
train_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
test_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
train_text_tokens = set(chain(*train_texts_tokenized))
test_text_tokens = set(chain(*test_texts_tokenized))
text_tokens = sorted(train_text_tokens | test_text_tokens)
with open('fasttext-words.txt', 'w', encoding='utf-8') as target:
for word in text_tokens:
target.write('{0}\n'.format(word.strip()))
embedding_matrix = np.zeros([len(text_tokens) + 1, 100])
word2index = {}
with open('fasttext-vectors.txt', 'r', encoding='utf-8') as src:
for i, line in enumerate(src):
parts = line.strip().split(' ')
word = parts[0]
vector = map(float, parts[1:])
word2index[word] = len(word2index)
embedding_matrix[i] = np.fromiter(vector, dtype=np.float)
def text2sequence(text):
return list(map(lambda token: word2index.get(token, len(word2index) - 1), wordpunct_tokenize(str(text))))
X_train = sequence.pad_sequences(list(map(text2sequence, texts_train)), maxlen=100)
X_val = sequence.pad_sequences(list(map(text2sequence, texts_val)), maxlen=100)
X_test = sequence.pad_sequences(list(map(text2sequence, texts_test)), maxlen=100)
embed_size = 100
model = Sequential([InputLayer(input_shape=(100,), dtype='int32'), Embedding(len(embedding_matrix), embed_size), Bidirectional(LSTM(50, return_sequences=True)), GlobalMaxPool1D(), Dropout(0.3), Dense(50, activation='relu'), Dropout(0.3), Dense(6, activation='sigmoid')])
embedding = model.layers[1]
embedding.set_weights([embedding_matrix])
embedding.trainable = False
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
model.fit(X_train, y_train, batch_size=64, epochs=10, validation_data=(X_val, y_val), verbose=True, callbacks=[ModelCheckpoint('model.h5', save_best_only=True), EarlyStopping(patience=3)])
model.load_weights('model.h5')
test_prediction = model.predict(X_test, verbose=True)
val_prediction = model.predict(X_val, verbose=True)
def show_confustion_matrix(y_true, y_pred):
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
df = pd.DataFrame(OrderedDict([('true-class', ['negative', 'positive']), ('negative-classified', [tn, fn]), ('positive-classified', [fp, tp])]))
return df.set_index('true-class')
show_confustion_matrix(y_val[:, 4], val_prediction[:, 4] > 0.5) | code |
2003266/cell_51 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from itertools import chain
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Dense, Embedding, Dropout, LSTM, Bidirectional, GlobalMaxPool1D, InputLayer, BatchNormalization, Activation
from keras.models import Sequential
from keras.preprocessing import text, sequence
from nltk.tokenize import wordpunct_tokenize
from sklearn.metrics import confusion_matrix, log_loss
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
train.fillna('nan')
test = pd.read_csv('../input/test.csv')
test.fillna('nan')
targets = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
y = np.array(train[targets])
texts = np.array(train['comment_text'])
texts_test = np.array(test['comment_text'])
label_mapping = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 1], [0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0, 0, 1, 1, 0], [0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 1, 0], [0, 0, 1, 0, 1, 1], [0, 0, 1, 1, 0, 1], [0, 0, 1, 1, 1, 1], [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1], [0, 1, 0, 0, 1, 0], [0, 1, 0, 0, 1, 1], [0, 1, 0, 1, 0, 0], [0, 1, 0, 1, 0, 1], [0, 1, 0, 1, 1, 0], [0, 1, 0, 1, 1, 1], [0, 1, 1, 0, 0, 0], [0, 1, 1, 0, 0, 1], [0, 1, 1, 0, 1, 0], [0, 1, 1, 0, 1, 1], [0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 0, 1], [0, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 1], [1, 0, 0, 0, 1, 0], [1, 0, 0, 0, 1, 1], [1, 0, 0, 1, 0, 0], [1, 0, 0, 1, 0, 1], [1, 0, 0, 1, 1, 0], [1, 0, 0, 1, 1, 1], [1, 0, 1, 0, 0, 0], [1, 0, 1, 0, 0, 1], [1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 1], [1, 0, 1, 1, 0, 0], [1, 0, 1, 1, 0, 1], [1, 0, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1], [1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 1], [1, 1, 0, 0, 1, 0], [1, 1, 0, 0, 1, 1], [1, 1, 0, 1, 0, 0], [1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 1, 0], [1, 1, 0, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 1], [1, 1, 1, 0, 1, 0], [1, 1, 1, 0, 1, 1], [1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1]])
y_converted = np.zeros([len(y)])
for i in range(len(label_mapping)):
idx = (y == label_mapping[i]).sum(axis=1) == 6
y_converted[idx] = i
train_indices, val_indices, _, _ = train_test_split(np.fromiter(range(len(y)), dtype=np.int32), y_converted, test_size=0.1, stratify=y_converted)
with open('fasttext-embedding-train.txt', 'w', encoding='utf-8') as target:
for text in texts_train:
target.write('__label__0\t{0}\n'.format(text.strip()))
train_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
test_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
train_text_tokens = set(chain(*train_texts_tokenized))
test_text_tokens = set(chain(*test_texts_tokenized))
text_tokens = sorted(train_text_tokens | test_text_tokens)
with open('fasttext-words.txt', 'w', encoding='utf-8') as target:
for word in text_tokens:
target.write('{0}\n'.format(word.strip()))
embedding_matrix = np.zeros([len(text_tokens) + 1, 100])
word2index = {}
with open('fasttext-vectors.txt', 'r', encoding='utf-8') as src:
for i, line in enumerate(src):
parts = line.strip().split(' ')
word = parts[0]
vector = map(float, parts[1:])
word2index[word] = len(word2index)
embedding_matrix[i] = np.fromiter(vector, dtype=np.float)
def text2sequence(text):
return list(map(lambda token: word2index.get(token, len(word2index) - 1), wordpunct_tokenize(str(text))))
X_train = sequence.pad_sequences(list(map(text2sequence, texts_train)), maxlen=100)
X_val = sequence.pad_sequences(list(map(text2sequence, texts_val)), maxlen=100)
X_test = sequence.pad_sequences(list(map(text2sequence, texts_test)), maxlen=100)
embed_size = 100
model = Sequential([InputLayer(input_shape=(100,), dtype='int32'), Embedding(len(embedding_matrix), embed_size), Bidirectional(LSTM(50, return_sequences=True)), GlobalMaxPool1D(), Dropout(0.3), Dense(50, activation='relu'), Dropout(0.3), Dense(6, activation='sigmoid')])
embedding = model.layers[1]
embedding.set_weights([embedding_matrix])
embedding.trainable = False
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
model.fit(X_train, y_train, batch_size=64, epochs=10, validation_data=(X_val, y_val), verbose=True, callbacks=[ModelCheckpoint('model.h5', save_best_only=True), EarlyStopping(patience=3)])
model.load_weights('model.h5')
test_prediction = model.predict(X_test, verbose=True)
val_prediction = model.predict(X_val, verbose=True)
log_loss(y_val[:, 5], val_prediction[:, 5]) | code |
2003266/cell_28 | [
"text_plain_output_1.png"
] | from itertools import chain
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Dense, Embedding, Dropout, LSTM, Bidirectional, GlobalMaxPool1D, InputLayer, BatchNormalization, Activation
from keras.models import Sequential
from keras.preprocessing import text, sequence
from nltk.tokenize import wordpunct_tokenize
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
train.fillna('nan')
test = pd.read_csv('../input/test.csv')
test.fillna('nan')
targets = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
y = np.array(train[targets])
texts = np.array(train['comment_text'])
texts_test = np.array(test['comment_text'])
label_mapping = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 1], [0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0, 0, 1, 1, 0], [0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 1, 0], [0, 0, 1, 0, 1, 1], [0, 0, 1, 1, 0, 1], [0, 0, 1, 1, 1, 1], [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1], [0, 1, 0, 0, 1, 0], [0, 1, 0, 0, 1, 1], [0, 1, 0, 1, 0, 0], [0, 1, 0, 1, 0, 1], [0, 1, 0, 1, 1, 0], [0, 1, 0, 1, 1, 1], [0, 1, 1, 0, 0, 0], [0, 1, 1, 0, 0, 1], [0, 1, 1, 0, 1, 0], [0, 1, 1, 0, 1, 1], [0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 0, 1], [0, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 1], [1, 0, 0, 0, 1, 0], [1, 0, 0, 0, 1, 1], [1, 0, 0, 1, 0, 0], [1, 0, 0, 1, 0, 1], [1, 0, 0, 1, 1, 0], [1, 0, 0, 1, 1, 1], [1, 0, 1, 0, 0, 0], [1, 0, 1, 0, 0, 1], [1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 1], [1, 0, 1, 1, 0, 0], [1, 0, 1, 1, 0, 1], [1, 0, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1], [1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 1], [1, 1, 0, 0, 1, 0], [1, 1, 0, 0, 1, 1], [1, 1, 0, 1, 0, 0], [1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 1, 0], [1, 1, 0, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 1], [1, 1, 1, 0, 1, 0], [1, 1, 1, 0, 1, 1], [1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1]])
y_converted = np.zeros([len(y)])
for i in range(len(label_mapping)):
idx = (y == label_mapping[i]).sum(axis=1) == 6
y_converted[idx] = i
train_indices, val_indices, _, _ = train_test_split(np.fromiter(range(len(y)), dtype=np.int32), y_converted, test_size=0.1, stratify=y_converted)
with open('fasttext-embedding-train.txt', 'w', encoding='utf-8') as target:
for text in texts_train:
target.write('__label__0\t{0}\n'.format(text.strip()))
train_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
test_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
train_text_tokens = set(chain(*train_texts_tokenized))
test_text_tokens = set(chain(*test_texts_tokenized))
text_tokens = sorted(train_text_tokens | test_text_tokens)
with open('fasttext-words.txt', 'w', encoding='utf-8') as target:
for word in text_tokens:
target.write('{0}\n'.format(word.strip()))
embedding_matrix = np.zeros([len(text_tokens) + 1, 100])
word2index = {}
with open('fasttext-vectors.txt', 'r', encoding='utf-8') as src:
for i, line in enumerate(src):
parts = line.strip().split(' ')
word = parts[0]
vector = map(float, parts[1:])
word2index[word] = len(word2index)
embedding_matrix[i] = np.fromiter(vector, dtype=np.float)
def text2sequence(text):
return list(map(lambda token: word2index.get(token, len(word2index) - 1), wordpunct_tokenize(str(text))))
X_train = sequence.pad_sequences(list(map(text2sequence, texts_train)), maxlen=100)
X_val = sequence.pad_sequences(list(map(text2sequence, texts_val)), maxlen=100)
X_test = sequence.pad_sequences(list(map(text2sequence, texts_test)), maxlen=100)
embed_size = 100
model = Sequential([InputLayer(input_shape=(100,), dtype='int32'), Embedding(len(embedding_matrix), embed_size), Bidirectional(LSTM(50, return_sequences=True)), GlobalMaxPool1D(), Dropout(0.3), Dense(50, activation='relu'), Dropout(0.3), Dense(6, activation='sigmoid')])
embedding = model.layers[1]
embedding.set_weights([embedding_matrix])
embedding.trainable = False
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
model.fit(X_train, y_train, batch_size=64, epochs=10, validation_data=(X_val, y_val), verbose=True, callbacks=[ModelCheckpoint('model.h5', save_best_only=True), EarlyStopping(patience=3)])
model.load_weights('model.h5')
test_prediction = model.predict(X_test, verbose=True) | code |
2003266/cell_3 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
train.fillna('nan')
train.head() | code |
2003266/cell_43 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from collections import OrderedDict
from itertools import chain
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Dense, Embedding, Dropout, LSTM, Bidirectional, GlobalMaxPool1D, InputLayer, BatchNormalization, Activation
from keras.models import Sequential
from keras.preprocessing import text, sequence
from nltk.tokenize import wordpunct_tokenize
from sklearn.metrics import confusion_matrix, log_loss
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
train.fillna('nan')
test = pd.read_csv('../input/test.csv')
test.fillna('nan')
submission = pd.read_csv('../input/sample_submission.csv')
targets = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
y = np.array(train[targets])
texts = np.array(train['comment_text'])
texts_test = np.array(test['comment_text'])
label_mapping = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 1], [0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0, 0, 1, 1, 0], [0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 1, 0], [0, 0, 1, 0, 1, 1], [0, 0, 1, 1, 0, 1], [0, 0, 1, 1, 1, 1], [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1], [0, 1, 0, 0, 1, 0], [0, 1, 0, 0, 1, 1], [0, 1, 0, 1, 0, 0], [0, 1, 0, 1, 0, 1], [0, 1, 0, 1, 1, 0], [0, 1, 0, 1, 1, 1], [0, 1, 1, 0, 0, 0], [0, 1, 1, 0, 0, 1], [0, 1, 1, 0, 1, 0], [0, 1, 1, 0, 1, 1], [0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 0, 1], [0, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 1], [1, 0, 0, 0, 1, 0], [1, 0, 0, 0, 1, 1], [1, 0, 0, 1, 0, 0], [1, 0, 0, 1, 0, 1], [1, 0, 0, 1, 1, 0], [1, 0, 0, 1, 1, 1], [1, 0, 1, 0, 0, 0], [1, 0, 1, 0, 0, 1], [1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 1], [1, 0, 1, 1, 0, 0], [1, 0, 1, 1, 0, 1], [1, 0, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1], [1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 1], [1, 1, 0, 0, 1, 0], [1, 1, 0, 0, 1, 1], [1, 1, 0, 1, 0, 0], [1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 1, 0], [1, 1, 0, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 1], [1, 1, 1, 0, 1, 0], [1, 1, 1, 0, 1, 1], [1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1]])
y_converted = np.zeros([len(y)])
for i in range(len(label_mapping)):
idx = (y == label_mapping[i]).sum(axis=1) == 6
y_converted[idx] = i
train_indices, val_indices, _, _ = train_test_split(np.fromiter(range(len(y)), dtype=np.int32), y_converted, test_size=0.1, stratify=y_converted)
with open('fasttext-embedding-train.txt', 'w', encoding='utf-8') as target:
for text in texts_train:
target.write('__label__0\t{0}\n'.format(text.strip()))
train_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
test_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
train_text_tokens = set(chain(*train_texts_tokenized))
test_text_tokens = set(chain(*test_texts_tokenized))
text_tokens = sorted(train_text_tokens | test_text_tokens)
with open('fasttext-words.txt', 'w', encoding='utf-8') as target:
for word in text_tokens:
target.write('{0}\n'.format(word.strip()))
embedding_matrix = np.zeros([len(text_tokens) + 1, 100])
word2index = {}
with open('fasttext-vectors.txt', 'r', encoding='utf-8') as src:
for i, line in enumerate(src):
parts = line.strip().split(' ')
word = parts[0]
vector = map(float, parts[1:])
word2index[word] = len(word2index)
embedding_matrix[i] = np.fromiter(vector, dtype=np.float)
def text2sequence(text):
return list(map(lambda token: word2index.get(token, len(word2index) - 1), wordpunct_tokenize(str(text))))
X_train = sequence.pad_sequences(list(map(text2sequence, texts_train)), maxlen=100)
X_val = sequence.pad_sequences(list(map(text2sequence, texts_val)), maxlen=100)
X_test = sequence.pad_sequences(list(map(text2sequence, texts_test)), maxlen=100)
embed_size = 100
model = Sequential([InputLayer(input_shape=(100,), dtype='int32'), Embedding(len(embedding_matrix), embed_size), Bidirectional(LSTM(50, return_sequences=True)), GlobalMaxPool1D(), Dropout(0.3), Dense(50, activation='relu'), Dropout(0.3), Dense(6, activation='sigmoid')])
embedding = model.layers[1]
embedding.set_weights([embedding_matrix])
embedding.trainable = False
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
model.fit(X_train, y_train, batch_size=64, epochs=10, validation_data=(X_val, y_val), verbose=True, callbacks=[ModelCheckpoint('model.h5', save_best_only=True), EarlyStopping(patience=3)])
model.load_weights('model.h5')
test_prediction = model.predict(X_test, verbose=True)
val_prediction = model.predict(X_val, verbose=True)
def show_confustion_matrix(y_true, y_pred):
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
df = pd.DataFrame(OrderedDict([('true-class', ['negative', 'positive']), ('negative-classified', [tn, fn]), ('positive-classified', [fp, tp])]))
return df.set_index('true-class')
show_confustion_matrix(y_val[:, 2], val_prediction[:, 2] > 0.5) | code |
2003266/cell_46 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from collections import OrderedDict
from itertools import chain
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Dense, Embedding, Dropout, LSTM, Bidirectional, GlobalMaxPool1D, InputLayer, BatchNormalization, Activation
from keras.models import Sequential
from keras.preprocessing import text, sequence
from nltk.tokenize import wordpunct_tokenize
from sklearn.metrics import confusion_matrix, log_loss
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
train.fillna('nan')
test = pd.read_csv('../input/test.csv')
test.fillna('nan')
submission = pd.read_csv('../input/sample_submission.csv')
targets = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
y = np.array(train[targets])
texts = np.array(train['comment_text'])
texts_test = np.array(test['comment_text'])
label_mapping = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 1], [0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0, 0, 1, 1, 0], [0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 1, 0], [0, 0, 1, 0, 1, 1], [0, 0, 1, 1, 0, 1], [0, 0, 1, 1, 1, 1], [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1], [0, 1, 0, 0, 1, 0], [0, 1, 0, 0, 1, 1], [0, 1, 0, 1, 0, 0], [0, 1, 0, 1, 0, 1], [0, 1, 0, 1, 1, 0], [0, 1, 0, 1, 1, 1], [0, 1, 1, 0, 0, 0], [0, 1, 1, 0, 0, 1], [0, 1, 1, 0, 1, 0], [0, 1, 1, 0, 1, 1], [0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 0, 1], [0, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 1], [1, 0, 0, 0, 1, 0], [1, 0, 0, 0, 1, 1], [1, 0, 0, 1, 0, 0], [1, 0, 0, 1, 0, 1], [1, 0, 0, 1, 1, 0], [1, 0, 0, 1, 1, 1], [1, 0, 1, 0, 0, 0], [1, 0, 1, 0, 0, 1], [1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 1], [1, 0, 1, 1, 0, 0], [1, 0, 1, 1, 0, 1], [1, 0, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1], [1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 1], [1, 1, 0, 0, 1, 0], [1, 1, 0, 0, 1, 1], [1, 1, 0, 1, 0, 0], [1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 1, 0], [1, 1, 0, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 1], [1, 1, 1, 0, 1, 0], [1, 1, 1, 0, 1, 1], [1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1]])
y_converted = np.zeros([len(y)])
for i in range(len(label_mapping)):
idx = (y == label_mapping[i]).sum(axis=1) == 6
y_converted[idx] = i
train_indices, val_indices, _, _ = train_test_split(np.fromiter(range(len(y)), dtype=np.int32), y_converted, test_size=0.1, stratify=y_converted)
with open('fasttext-embedding-train.txt', 'w', encoding='utf-8') as target:
for text in texts_train:
target.write('__label__0\t{0}\n'.format(text.strip()))
train_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
test_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
train_text_tokens = set(chain(*train_texts_tokenized))
test_text_tokens = set(chain(*test_texts_tokenized))
text_tokens = sorted(train_text_tokens | test_text_tokens)
with open('fasttext-words.txt', 'w', encoding='utf-8') as target:
for word in text_tokens:
target.write('{0}\n'.format(word.strip()))
embedding_matrix = np.zeros([len(text_tokens) + 1, 100])
word2index = {}
with open('fasttext-vectors.txt', 'r', encoding='utf-8') as src:
for i, line in enumerate(src):
parts = line.strip().split(' ')
word = parts[0]
vector = map(float, parts[1:])
word2index[word] = len(word2index)
embedding_matrix[i] = np.fromiter(vector, dtype=np.float)
def text2sequence(text):
return list(map(lambda token: word2index.get(token, len(word2index) - 1), wordpunct_tokenize(str(text))))
X_train = sequence.pad_sequences(list(map(text2sequence, texts_train)), maxlen=100)
X_val = sequence.pad_sequences(list(map(text2sequence, texts_val)), maxlen=100)
X_test = sequence.pad_sequences(list(map(text2sequence, texts_test)), maxlen=100)
embed_size = 100
model = Sequential([InputLayer(input_shape=(100,), dtype='int32'), Embedding(len(embedding_matrix), embed_size), Bidirectional(LSTM(50, return_sequences=True)), GlobalMaxPool1D(), Dropout(0.3), Dense(50, activation='relu'), Dropout(0.3), Dense(6, activation='sigmoid')])
embedding = model.layers[1]
embedding.set_weights([embedding_matrix])
embedding.trainable = False
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
model.fit(X_train, y_train, batch_size=64, epochs=10, validation_data=(X_val, y_val), verbose=True, callbacks=[ModelCheckpoint('model.h5', save_best_only=True), EarlyStopping(patience=3)])
model.load_weights('model.h5')
test_prediction = model.predict(X_test, verbose=True)
val_prediction = model.predict(X_val, verbose=True)
def show_confustion_matrix(y_true, y_pred):
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
df = pd.DataFrame(OrderedDict([('true-class', ['negative', 'positive']), ('negative-classified', [tn, fn]), ('positive-classified', [fp, tp])]))
return df.set_index('true-class')
show_confustion_matrix(y_val[:, 3], val_prediction[:, 3] > 0.5) | code |
2003266/cell_24 | [
"text_html_output_1.png"
] | from itertools import chain
from keras.layers import Dense, Embedding, Dropout, LSTM, Bidirectional, GlobalMaxPool1D, InputLayer, BatchNormalization, Activation
from keras.models import Sequential
from keras.preprocessing import text, sequence
from nltk.tokenize import wordpunct_tokenize
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
train.fillna('nan')
test = pd.read_csv('../input/test.csv')
test.fillna('nan')
targets = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
y = np.array(train[targets])
texts = np.array(train['comment_text'])
texts_test = np.array(test['comment_text'])
label_mapping = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 1], [0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0, 0, 1, 1, 0], [0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 1, 0], [0, 0, 1, 0, 1, 1], [0, 0, 1, 1, 0, 1], [0, 0, 1, 1, 1, 1], [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1], [0, 1, 0, 0, 1, 0], [0, 1, 0, 0, 1, 1], [0, 1, 0, 1, 0, 0], [0, 1, 0, 1, 0, 1], [0, 1, 0, 1, 1, 0], [0, 1, 0, 1, 1, 1], [0, 1, 1, 0, 0, 0], [0, 1, 1, 0, 0, 1], [0, 1, 1, 0, 1, 0], [0, 1, 1, 0, 1, 1], [0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 0, 1], [0, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 1], [1, 0, 0, 0, 1, 0], [1, 0, 0, 0, 1, 1], [1, 0, 0, 1, 0, 0], [1, 0, 0, 1, 0, 1], [1, 0, 0, 1, 1, 0], [1, 0, 0, 1, 1, 1], [1, 0, 1, 0, 0, 0], [1, 0, 1, 0, 0, 1], [1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 1], [1, 0, 1, 1, 0, 0], [1, 0, 1, 1, 0, 1], [1, 0, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1], [1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 1], [1, 1, 0, 0, 1, 0], [1, 1, 0, 0, 1, 1], [1, 1, 0, 1, 0, 0], [1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 1, 0], [1, 1, 0, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 1], [1, 1, 1, 0, 1, 0], [1, 1, 1, 0, 1, 1], [1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1]])
y_converted = np.zeros([len(y)])
for i in range(len(label_mapping)):
idx = (y == label_mapping[i]).sum(axis=1) == 6
y_converted[idx] = i
train_indices, val_indices, _, _ = train_test_split(np.fromiter(range(len(y)), dtype=np.int32), y_converted, test_size=0.1, stratify=y_converted)
with open('fasttext-embedding-train.txt', 'w', encoding='utf-8') as target:
for text in texts_train:
target.write('__label__0\t{0}\n'.format(text.strip()))
train_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
test_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
train_text_tokens = set(chain(*train_texts_tokenized))
test_text_tokens = set(chain(*test_texts_tokenized))
text_tokens = sorted(train_text_tokens | test_text_tokens)
with open('fasttext-words.txt', 'w', encoding='utf-8') as target:
for word in text_tokens:
target.write('{0}\n'.format(word.strip()))
embedding_matrix = np.zeros([len(text_tokens) + 1, 100])
word2index = {}
with open('fasttext-vectors.txt', 'r', encoding='utf-8') as src:
for i, line in enumerate(src):
parts = line.strip().split(' ')
word = parts[0]
vector = map(float, parts[1:])
word2index[word] = len(word2index)
embedding_matrix[i] = np.fromiter(vector, dtype=np.float)
embed_size = 100
model = Sequential([InputLayer(input_shape=(100,), dtype='int32'), Embedding(len(embedding_matrix), embed_size), Bidirectional(LSTM(50, return_sequences=True)), GlobalMaxPool1D(), Dropout(0.3), Dense(50, activation='relu'), Dropout(0.3), Dense(6, activation='sigmoid')])
embedding = model.layers[1]
embedding.set_weights([embedding_matrix])
embedding.trainable = False
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary() | code |
2003266/cell_37 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from collections import OrderedDict
from itertools import chain
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Dense, Embedding, Dropout, LSTM, Bidirectional, GlobalMaxPool1D, InputLayer, BatchNormalization, Activation
from keras.models import Sequential
from keras.preprocessing import text, sequence
from nltk.tokenize import wordpunct_tokenize
from sklearn.metrics import confusion_matrix, log_loss
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
train.fillna('nan')
test = pd.read_csv('../input/test.csv')
test.fillna('nan')
submission = pd.read_csv('../input/sample_submission.csv')
targets = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
y = np.array(train[targets])
texts = np.array(train['comment_text'])
texts_test = np.array(test['comment_text'])
label_mapping = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 1], [0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0, 0, 1, 1, 0], [0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 1, 0], [0, 0, 1, 0, 1, 1], [0, 0, 1, 1, 0, 1], [0, 0, 1, 1, 1, 1], [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1], [0, 1, 0, 0, 1, 0], [0, 1, 0, 0, 1, 1], [0, 1, 0, 1, 0, 0], [0, 1, 0, 1, 0, 1], [0, 1, 0, 1, 1, 0], [0, 1, 0, 1, 1, 1], [0, 1, 1, 0, 0, 0], [0, 1, 1, 0, 0, 1], [0, 1, 1, 0, 1, 0], [0, 1, 1, 0, 1, 1], [0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 0, 1], [0, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 1], [1, 0, 0, 0, 1, 0], [1, 0, 0, 0, 1, 1], [1, 0, 0, 1, 0, 0], [1, 0, 0, 1, 0, 1], [1, 0, 0, 1, 1, 0], [1, 0, 0, 1, 1, 1], [1, 0, 1, 0, 0, 0], [1, 0, 1, 0, 0, 1], [1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 1], [1, 0, 1, 1, 0, 0], [1, 0, 1, 1, 0, 1], [1, 0, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1], [1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 1], [1, 1, 0, 0, 1, 0], [1, 1, 0, 0, 1, 1], [1, 1, 0, 1, 0, 0], [1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 1, 0], [1, 1, 0, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 1], [1, 1, 1, 0, 1, 0], [1, 1, 1, 0, 1, 1], [1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1]])
y_converted = np.zeros([len(y)])
for i in range(len(label_mapping)):
idx = (y == label_mapping[i]).sum(axis=1) == 6
y_converted[idx] = i
train_indices, val_indices, _, _ = train_test_split(np.fromiter(range(len(y)), dtype=np.int32), y_converted, test_size=0.1, stratify=y_converted)
with open('fasttext-embedding-train.txt', 'w', encoding='utf-8') as target:
for text in texts_train:
target.write('__label__0\t{0}\n'.format(text.strip()))
train_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
test_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
train_text_tokens = set(chain(*train_texts_tokenized))
test_text_tokens = set(chain(*test_texts_tokenized))
text_tokens = sorted(train_text_tokens | test_text_tokens)
with open('fasttext-words.txt', 'w', encoding='utf-8') as target:
for word in text_tokens:
target.write('{0}\n'.format(word.strip()))
embedding_matrix = np.zeros([len(text_tokens) + 1, 100])
word2index = {}
with open('fasttext-vectors.txt', 'r', encoding='utf-8') as src:
for i, line in enumerate(src):
parts = line.strip().split(' ')
word = parts[0]
vector = map(float, parts[1:])
word2index[word] = len(word2index)
embedding_matrix[i] = np.fromiter(vector, dtype=np.float)
def text2sequence(text):
return list(map(lambda token: word2index.get(token, len(word2index) - 1), wordpunct_tokenize(str(text))))
X_train = sequence.pad_sequences(list(map(text2sequence, texts_train)), maxlen=100)
X_val = sequence.pad_sequences(list(map(text2sequence, texts_val)), maxlen=100)
X_test = sequence.pad_sequences(list(map(text2sequence, texts_test)), maxlen=100)
embed_size = 100
model = Sequential([InputLayer(input_shape=(100,), dtype='int32'), Embedding(len(embedding_matrix), embed_size), Bidirectional(LSTM(50, return_sequences=True)), GlobalMaxPool1D(), Dropout(0.3), Dense(50, activation='relu'), Dropout(0.3), Dense(6, activation='sigmoid')])
embedding = model.layers[1]
embedding.set_weights([embedding_matrix])
embedding.trainable = False
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
model.fit(X_train, y_train, batch_size=64, epochs=10, validation_data=(X_val, y_val), verbose=True, callbacks=[ModelCheckpoint('model.h5', save_best_only=True), EarlyStopping(patience=3)])
model.load_weights('model.h5')
test_prediction = model.predict(X_test, verbose=True)
val_prediction = model.predict(X_val, verbose=True)
def show_confustion_matrix(y_true, y_pred):
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
df = pd.DataFrame(OrderedDict([('true-class', ['negative', 'positive']), ('negative-classified', [tn, fn]), ('positive-classified', [fp, tp])]))
return df.set_index('true-class')
show_confustion_matrix(y_val[:, 0], val_prediction[:, 0] > 0.5) | code |
2003266/cell_5 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
train.fillna('nan')
test = pd.read_csv('../input/test.csv')
test.fillna('nan')
submission = pd.read_csv('../input/sample_submission.csv')
submission.head() | code |
2003266/cell_36 | [
"text_html_output_1.png"
] | from itertools import chain
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Dense, Embedding, Dropout, LSTM, Bidirectional, GlobalMaxPool1D, InputLayer, BatchNormalization, Activation
from keras.models import Sequential
from keras.preprocessing import text, sequence
from nltk.tokenize import wordpunct_tokenize
from sklearn.metrics import confusion_matrix, log_loss
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
train.fillna('nan')
test = pd.read_csv('../input/test.csv')
test.fillna('nan')
targets = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
y = np.array(train[targets])
texts = np.array(train['comment_text'])
texts_test = np.array(test['comment_text'])
label_mapping = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1, 1], [0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 1], [0, 0, 0, 1, 1, 0], [0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 1, 0], [0, 0, 1, 0, 1, 1], [0, 0, 1, 1, 0, 1], [0, 0, 1, 1, 1, 1], [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1], [0, 1, 0, 0, 1, 0], [0, 1, 0, 0, 1, 1], [0, 1, 0, 1, 0, 0], [0, 1, 0, 1, 0, 1], [0, 1, 0, 1, 1, 0], [0, 1, 0, 1, 1, 1], [0, 1, 1, 0, 0, 0], [0, 1, 1, 0, 0, 1], [0, 1, 1, 0, 1, 0], [0, 1, 1, 0, 1, 1], [0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 0, 1], [0, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 1], [1, 0, 0, 0, 1, 0], [1, 0, 0, 0, 1, 1], [1, 0, 0, 1, 0, 0], [1, 0, 0, 1, 0, 1], [1, 0, 0, 1, 1, 0], [1, 0, 0, 1, 1, 1], [1, 0, 1, 0, 0, 0], [1, 0, 1, 0, 0, 1], [1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 1], [1, 0, 1, 1, 0, 0], [1, 0, 1, 1, 0, 1], [1, 0, 1, 1, 1, 0], [1, 0, 1, 1, 1, 1], [1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 1], [1, 1, 0, 0, 1, 0], [1, 1, 0, 0, 1, 1], [1, 1, 0, 1, 0, 0], [1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 1, 0], [1, 1, 0, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 1], [1, 1, 1, 0, 1, 0], [1, 1, 1, 0, 1, 1], [1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1]])
y_converted = np.zeros([len(y)])
for i in range(len(label_mapping)):
idx = (y == label_mapping[i]).sum(axis=1) == 6
y_converted[idx] = i
train_indices, val_indices, _, _ = train_test_split(np.fromiter(range(len(y)), dtype=np.int32), y_converted, test_size=0.1, stratify=y_converted)
with open('fasttext-embedding-train.txt', 'w', encoding='utf-8') as target:
for text in texts_train:
target.write('__label__0\t{0}\n'.format(text.strip()))
train_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
test_texts_tokenized = map(wordpunct_tokenize, train['comment_text'])
train_text_tokens = set(chain(*train_texts_tokenized))
test_text_tokens = set(chain(*test_texts_tokenized))
text_tokens = sorted(train_text_tokens | test_text_tokens)
with open('fasttext-words.txt', 'w', encoding='utf-8') as target:
for word in text_tokens:
target.write('{0}\n'.format(word.strip()))
embedding_matrix = np.zeros([len(text_tokens) + 1, 100])
word2index = {}
with open('fasttext-vectors.txt', 'r', encoding='utf-8') as src:
for i, line in enumerate(src):
parts = line.strip().split(' ')
word = parts[0]
vector = map(float, parts[1:])
word2index[word] = len(word2index)
embedding_matrix[i] = np.fromiter(vector, dtype=np.float)
def text2sequence(text):
return list(map(lambda token: word2index.get(token, len(word2index) - 1), wordpunct_tokenize(str(text))))
X_train = sequence.pad_sequences(list(map(text2sequence, texts_train)), maxlen=100)
X_val = sequence.pad_sequences(list(map(text2sequence, texts_val)), maxlen=100)
X_test = sequence.pad_sequences(list(map(text2sequence, texts_test)), maxlen=100)
embed_size = 100
model = Sequential([InputLayer(input_shape=(100,), dtype='int32'), Embedding(len(embedding_matrix), embed_size), Bidirectional(LSTM(50, return_sequences=True)), GlobalMaxPool1D(), Dropout(0.3), Dense(50, activation='relu'), Dropout(0.3), Dense(6, activation='sigmoid')])
embedding = model.layers[1]
embedding.set_weights([embedding_matrix])
embedding.trainable = False
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
model.fit(X_train, y_train, batch_size=64, epochs=10, validation_data=(X_val, y_val), verbose=True, callbacks=[ModelCheckpoint('model.h5', save_best_only=True), EarlyStopping(patience=3)])
model.load_weights('model.h5')
test_prediction = model.predict(X_test, verbose=True)
val_prediction = model.predict(X_val, verbose=True)
log_loss(y_val[:, 0], val_prediction[:, 0]) | code |
90123657/cell_13 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
df.shape
df = df.drop('company', axis=1)
ind = df['adr'].idxmax()
ind
name = df.iloc[ind]['name']
amount = df.iloc[ind]['adr']
np.around(df['adr'].mean(), 2)
np.around((df['stays_in_week_nights'] + df['stays_in_weekend_nights']).mean(), 2) | code |
90123657/cell_9 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
df.shape
df = df.drop('company', axis=1)
ind = df['adr'].idxmax()
ind
name = df.iloc[ind]['name']
amount = df.iloc[ind]['adr']
print(f'name: {name}\namount = {amount}') | code |
90123657/cell_25 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
df.shape
df = df.drop('company', axis=1)
ind = df['adr'].idxmax()
ind
name = df.iloc[ind]['name']
amount = df.iloc[ind]['adr']
df['last name'] = df['name'].apply(lambda x: x[x.index(' ') + 1:])
idx = (df['children'] + df['babies']).idxmax()
maximum = (df['children'] + df['babies']).iloc[idx]
df['phone-number'].apply(lambda x: x[:3]).value_counts()[:5] | code |
90123657/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
df.shape | code |
90123657/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
df.shape
df = df.drop('company', axis=1)
ind = df['adr'].idxmax()
ind
name = df.iloc[ind]['name']
amount = df.iloc[ind]['adr']
idx = (df['children'] + df['babies']).idxmax()
maximum = (df['children'] + df['babies']).iloc[idx]
maximum | code |
90123657/cell_6 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
df.shape
df = df.drop('company', axis=1)
df['country'].value_counts()[:5] | code |
90123657/cell_29 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme(style='whitegrid')
df = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
df.shape
df = df.drop('company', axis=1)
ind = df['adr'].idxmax()
ind
name = df.iloc[ind]['name']
amount = df.iloc[ind]['adr']
idx = (df['children'] + df['babies']).idxmax()
maximum = (df['children'] + df['babies']).iloc[idx]
fig = plt.figure ( figsize = (10,5))
ax1 = fig.add_axes ([0,0,1,1])
ax2 = fig.add_axes ([0.75,0,1,1])
sns.countplot(data=df, x='arrival_date_month', ax=ax1,
hue='arrival_date_year', order=df['arrival_date_month'].value_counts().index, palette="Set1")
years = dict(df['arrival_date_year'].value_counts())
ax2.pie(x=list(years.values()), labels=list(years.keys()), explode=[0.03]*3, autopct='%1.1f', shadow=True);
plt.figure(figsize=(13, 8))
sns.countplot(data=df, x='agent', order=df['agent'].value_counts().keys(), palette='Set1').set_xlim(0, 9)
plt.show() | code |
90123657/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
df.head() | code |
90123657/cell_11 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
df.shape
df = df.drop('company', axis=1)
ind = df['adr'].idxmax()
ind
name = df.iloc[ind]['name']
amount = df.iloc[ind]['adr']
np.around(df['adr'].mean(), 2) | code |
90123657/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
df.shape
df = df.drop('company', axis=1)
ind = df['adr'].idxmax()
ind
name = df.iloc[ind]['name']
amount = df.iloc[ind]['adr']
df['last name'].value_counts()[:5] | code |
90123657/cell_7 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
df.shape
df = df.drop('company', axis=1)
ind = df['adr'].idxmax()
ind | code |
90123657/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
df.shape
df = df.drop('company', axis=1)
ind = df['adr'].idxmax()
ind
name = df.iloc[ind]['name']
amount = df.iloc[ind]['adr']
df.head() | code |
90123657/cell_28 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme(style='whitegrid')
df = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
df.shape
df = df.drop('company', axis=1)
ind = df['adr'].idxmax()
ind
name = df.iloc[ind]['name']
amount = df.iloc[ind]['adr']
idx = (df['children'] + df['babies']).idxmax()
maximum = (df['children'] + df['babies']).iloc[idx]
fig = plt.figure ( figsize = (10,5))
ax1 = fig.add_axes ([0,0,1,1])
ax2 = fig.add_axes ([0.75,0,1,1])
sns.countplot(data=df, x='arrival_date_month', ax=ax1,
hue='arrival_date_year', order=df['arrival_date_month'].value_counts().index, palette="Set1")
years = dict(df['arrival_date_year'].value_counts())
ax2.pie(x=list(years.values()), labels=list(years.keys()), explode=[0.03]*3, autopct='%1.1f', shadow=True);
plt.figure(figsize=(13, 8))
sns.countplot(data=df, x='country', order=df['country'].value_counts().keys(), palette='Set2').set_xlim(0, 9)
plt.show() | code |
90123657/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
df.shape
df = df.drop('company', axis=1)
ind = df['adr'].idxmax()
ind
name = df.iloc[ind]['name']
amount = df.iloc[ind]['adr']
df[df['total_of_special_requests'] == 5][['name', 'email']] | code |
90123657/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
df.info() | code |
90123657/cell_22 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
df.shape
df = df.drop('company', axis=1)
ind = df['adr'].idxmax()
ind
name = df.iloc[ind]['name']
amount = df.iloc[ind]['adr']
idx = (df['children'] + df['babies']).idxmax()
maximum = (df['children'] + df['babies']).iloc[idx]
for i in range(idx, 119390):
if df.iloc[i]['babies'] + df.iloc[i]['children'] == maximum:
print(f"name: {df.iloc[i]['name']}\nindex = {i}\n") | code |
90123657/cell_27 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme(style='whitegrid')
df = pd.read_csv('../input/hotel-booking/hotel_booking.csv')
df.shape
df = df.drop('company', axis=1)
ind = df['adr'].idxmax()
ind
name = df.iloc[ind]['name']
amount = df.iloc[ind]['adr']
idx = (df['children'] + df['babies']).idxmax()
maximum = (df['children'] + df['babies']).iloc[idx]
fig = plt.figure(figsize=(10, 5))
ax1 = fig.add_axes([0, 0, 1, 1])
ax2 = fig.add_axes([0.75, 0, 1, 1])
sns.countplot(data=df, x='arrival_date_month', ax=ax1, hue='arrival_date_year', order=df['arrival_date_month'].value_counts().index, palette='Set1')
years = dict(df['arrival_date_year'].value_counts())
ax2.pie(x=list(years.values()), labels=list(years.keys()), explode=[0.03] * 3, autopct='%1.1f', shadow=True) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.