path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
32062628/cell_11 | [
"text_html_output_1.png"
] | from IPython.core.display import display, HTML
from copy import deepcopy
from spacy.matcher import Matcher
from spacy.matcher import PhraseMatcher
import gc
import json
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly.express as px
import spacy
import time
SAMPLE_SIZE_BIORXIV = 100
SAMPLE_SIZE_COMM = 100
SAMPLE_SIZE_NON_COMM = 100
SAMPLE_SIZE_CUSTOM_LICENSE = 100
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import json
from pprint import pprint
from copy import deepcopy
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
import spacy
from spacy.matcher import PhraseMatcher
from spacy.matcher import Matcher
nlp = spacy.load('en_core_web_sm')
import torch
import scipy.spatial
import time
import gc
import plotly.express as px
from IPython.core.display import display, HTML
RUN_MODE = "SUBSET" #"ALL"
RUN_SAMPLES = 100
BIORXIV = "biorxiv"
COMM = "comm"
NON_COMM = "non_comm"
CUSTOM_LICENSE = "custom_license"
FILE_BIORXIV_ARTICLES_INFO = "biorxiv_article_info.txt"
FILE_COMM_ARTICLES_INFO = "comm_article_info.txt"
FILE_NON_COMM_ARTICLES_INFO = "non_comm_article_info.txt"
FILE_CUSTOM_LICENSE_ARTICLES_INFO = "custom_license_article_info.txt"
lst_url_exclusions = ['//github.com', 'https://doi.org','https://doi.org/10','perpetuity.is', 'https://doi.org/10.1101/2020.03', 'https://doi.org/10.1101/2020.04']
## Functions to generate CSVs from Json files, four types of datasets are available here.
def save_article_info(obj, filename):
with open(filename, 'a') as the_file:
the_file.write("# PAPER_ID ----- : " + obj.paper_id + "\n")
the_file.write("# TITLE -----------: " + obj.title + "\n")
the_file.write("# RELEVANT SENTENCES ----------:")
the_file.write("\n")
for item in obj.lst_sentences:
the_file.write("\n ==>")
the_file.write("%s " % item)
the_file.write("\n")
if (len(obj.lst_rapid_assessment_sentences) > 0):
the_file.write("# ASSESSMENT RELATED SENTENCES ----------:")
the_file.write("\n")
for item in obj.lst_rapid_assessment_sentences:
the_file.write("\n ==>")
the_file.write("%s " % item)
the_file.write("\n")
if (len(obj.lst_rapid_design_sentences) > 0):
the_file.write("# DESIGN RELATED SENTENCES ----------:")
the_file.write("\n")
for item in obj.lst_rapid_design_sentences:
the_file.write("\n ==>")
the_file.write("%s " % item)
the_file.write("\n")
if (len(obj.lst_design_experiments_sentences) > 0):
the_file.write("# EXPERIMENT RELATED SENTENCES ----------:")
the_file.write("\n")
for item in obj.lst_design_experiments_sentences:
the_file.write("\n ==>")
the_file.write("%s " % item)
the_file.write("\n")
the_file.write("# URL -------------:")
for item in obj.lst_urls:
the_file.write("\n ==>")
the_file.write("%s " % item)
if (len(obj.lst_urls)==0):
the_file.write("No urls found.")
the_file.write("\n")
author_out = obj.authors
if (obj.authors.strip() == ""):
author_out = "NOT_FOUND"
the_file.write("\n")
the_file.write("# AUTHORS -----------: " + obj.authors + "\n")
the_file.write("# SCORE -----------: " + str(obj.score) + "\n")
the_file.write("# =========================================================: " + "\n")
def format_name(author):
middle_name = " ".join(author['middle'])
if author['middle']:
return " ".join([author['first'], middle_name, author['last']])
else:
return " ".join([author['first'], author['last']])
def format_affiliation(affiliation):
text = []
location = affiliation.get('location')
if location:
text.extend(list(affiliation['location'].values()))
institution = affiliation.get('institution')
if institution:
text = [institution] + text
return ", ".join(text)
def format_authors(authors, with_affiliation=False):
name_ls = []
for author in authors:
name = format_name(author)
if with_affiliation:
affiliation = format_affiliation(author['affiliation'])
if affiliation:
name_ls.append(f"{name} ({affiliation})")
else:
name_ls.append(name)
else:
name_ls.append(name)
return ", ".join(name_ls)
def format_body(body_text):
texts = [(di['section'], di['text']) for di in body_text]
texts_di = {di['section']: "" for di in body_text}
for section, text in texts:
texts_di[section] += text
body = ""
for section, text in texts_di.items():
if (section.strip() != ""):
body += section.upper()
body += " : "
body += text
body += "."
return body
def format_bib(bibs):
if type(bibs) == dict:
bibs = list(bibs.values())
bibs = deepcopy(bibs)
formatted = []
for bib in bibs:
bib['authors'] = format_authors(
bib['authors'],
with_affiliation=False
)
formatted_ls = [str(bib[k]) for k in ['title', 'authors', 'venue', 'year']]
formatted.append(", ".join(formatted_ls))
return "; ".join(formatted)
def load_files(dirname, SAMPLE_SIZE = 50):
filenames = os.listdir(dirname)
lst_orig_count = len(filenames)
raw_files = []
if (RUN_MODE == "SUBSET"):
filenames = filenames[0: SAMPLE_SIZE]
for filename in (filenames):
filename = dirname + filename
file = json.load(open(filename, 'rb'))
raw_files.append(file)
return (raw_files, lst_orig_count)
def generate_clean_df(all_files):
cleaned_files = []
for file in (all_files):
features = [
file['paper_id'],
file['metadata']['title'],
format_authors(file['metadata']['authors']),
format_authors(file['metadata']['authors'],
with_affiliation=True),
format_body(file['abstract']),
format_body(file['body_text']),
format_bib(file['bib_entries']),
file['metadata']['authors'],
file['bib_entries']
]
cleaned_files.append(features)
col_names = ['paper_id', 'title', 'authors',
'affiliations', 'abstract', 'text',
'bibliography','raw_authors','raw_bibliography']
clean_df = pd.DataFrame(cleaned_files, columns=col_names)
clean_df.head()
return clean_df
def find_phrases_in_title_npi(doc , span_start = 5 , span_end = 5):
matcher = Matcher(nlp.vocab)
pattern1= [{'LOWER': 'non'}, {'LOWER': 'pharmaceutical'}, {'LOWER': 'intervention'}]
pattern2 = [{'LOWER': 'non'}, {'LOWER': 'pharmaceutical'}, {'LOWER': 'interventions'}]
pattern3 = [{'LOWER': 'non'}, {'IS_PUNCT': True, 'OP' : '*'} , {'LOWER': 'pharmaceutical'}, {'IS_PUNCT': True, 'OP' : '*'}, {'LOWER': 'interventions'}]
pattern4 = [{'LOWER': 'non'}, {'IS_PUNCT': True, 'OP' : '*'} , {'LOWER': 'pharmaceutical'}, {'IS_PUNCT': True, 'OP' : '*'}, {'LOWER': 'intervention'}]
lst_spans = []
#matcher.add('titlematcher', None, *phrase_patterns)
matcher.add('titlematcher', None, pattern1, pattern2, pattern3, pattern4)
found_matches = matcher(doc)
find_count = len(found_matches)
for match_id, start, end in found_matches:
string_id = nlp.vocab.strings[match_id]
end = min(end + span_end, len(doc))
start = max(start - span_start,0)
span = doc[start:end]
lst_spans.append(span.text)
snippets = '| '.join([lst for lst in lst_spans])
return find_count, snippets
def prepare_dataframe_for_nlp(df, nlp):
df.fillna('', inplace=True)
return(df)
def get_sents_from_snippets(lst_snippets, nlpdoc, paper_id):
"""
Finding full sentences when snippets are passed to this function.
"""
phrase_patterns = [nlp(text) for text in lst_snippets]
matcher = PhraseMatcher(nlp.vocab)
matcher.add('xyz', None, *phrase_patterns)
sentences = nlpdoc
res_sentences = []
for sent in sentences.sents:
found_matches = matcher(nlp(sent.text))
find_count = len(found_matches)
if len(found_matches) > 0:
res_sentences.append(sent.text)
res_sentences = list(set(res_sentences))
return(res_sentences)
def limit_text_size(text):
# if (len(text) > (10000)):
text = text[0:40000]
return(text)
def find_phrases_in_text(doc , phrase_list, span_start = 5 , span_end = 5):
matcher = PhraseMatcher(nlp.vocab)
#print(phrase_list)
lst_spans = []
phrase_patterns = [nlp(text) for text in phrase_list]
matcher.add('covidmatcher', None, *phrase_patterns)
found_matches = matcher(doc)
find_count = len(found_matches)
for match_id, start, end in found_matches:
string_id = nlp.vocab.strings[match_id]
end = min(end + span_end, len(doc) - 1)
start = max(start - span_start,0)
span = doc[start:end]
lst_spans.append(span.text)
#print("found a match.", span.text)
snippets = '| '.join([lst for lst in lst_spans])
ret_list = list(set(lst_spans))
return(find_count, ret_list)
def generate_data(dir_path, SAMPLE_SIZE = 50):
_files, count_files_orig = load_files(dir_path, SAMPLE_SIZE)
df = generate_clean_df(_files)
return(df, count_files_orig)
def add_lists(lst1, lst2, lst3, lst4):
lst_final = list(lst1) + list(lst2) + list(lst3) + list(lst4)
return(lst_final)
def do_scoring_npi(title_find_count
, text_find_count_in
, text_find_count_ph
, text_find_count_non
, text_find_count_npi):
if ((text_find_count_in > 0) & (text_find_count_ph > 0) & (text_find_count_non > 0)):
ret = 30 * title_find_count + 10 * text_find_count_npi + text_find_count_in + text_find_count_non + text_find_count_ph
else:
ret = 30 * title_find_count + 10 * text_find_count_npi
return(ret)
def process_url(url):
ret = url
#print(url in lst_url_exclusions)
if url in lst_url_exclusions:
ret = ''
return(ret)
def is_main_url(d):
if d.startswith('https://doi.org/'): # Could use /10.1101
return (True)
else:
return (False)
def find_url_in_text(doc):
main_url = "NOT FOUND"
lst_urls = []
matcher = Matcher(nlp.vocab)
pattern = [{'LIKE_URL': True}]
matcher.add('url', None, pattern)
found_matches = matcher(doc)
#print(found_matches)
for match_id, start, end in found_matches:
url = doc[start:end]
url = process_url(url.text)
#print(url)
if (url != ""):
lst_urls.append(url)
if is_main_url(url):
main_url = url
return(main_url , list(set(lst_urls)))
def get_summary_row_for_df(processed_articles, count_find, count_fund_infra, count_cost_benefits, module):
dict_row ={"Module":module, "Processed": processed_articles, "Found": count_find
, "Found Funding and Infrastructure": count_fund_infra
, "Count Cost benefits": count_cost_benefits}
return(dict_row)
def process_a_module(path, SAMPLE_SIZE = 50, MODULE = "provide-module"):
df_data, count_orig_files = generate_data(path, SAMPLE_SIZE)
df_master = df_data.copy()[["paper_id", "title"]]
df_data = prepare_dataframe_for_nlp(df_data, nlp)
df_data['small_text'] = list(map(limit_text_size, (df_data['text'])))
df_data['nlp_title'] = list(map(nlp, (df_data['title'])))
with nlp.disable_pipes("tagger", "parser", "ner"):
df_data['nlp_snall_text'] = list(map(nlp, (df_data['small_text'])))
df_master['title_find_count'], df_master['title_found_snippets'] = zip(*df_data['nlp_title'].apply(lambda title: find_phrases_in_title_npi((title))))
phrase_list = [u"intervention"]
df_master['text_find_count_in'], df_master['text_found_snippets_in'] = zip(*df_data['nlp_snall_text'].apply(lambda nlptext: find_phrases_in_text((nlptext), phrase_list)))
phrase_list = [u"pharmaceutical"]
df_master['text_find_count_ph'], df_master['text_found_snippets_ph'] = zip(*df_data['nlp_snall_text'].apply(lambda nlptext: find_phrases_in_text((nlptext), phrase_list)))
phrase_list = [u"non"]
df_master['text_find_count_non'], df_master['text_found_snippets_non'] = zip(*df_data['nlp_snall_text'].apply(lambda nlptext: find_phrases_in_text((nlptext), phrase_list)))
phrase_list = [u"NPI"]
df_master['text_find_count_npi'], df_master['text_found_snippets_npi'] = zip(*df_data['nlp_snall_text'].apply(lambda nlptext: find_phrases_in_text((nlptext), phrase_list)))
df_master['lst_snippets'] = list(map(add_lists
, (df_master['text_found_snippets_ph'])
, (df_master['text_found_snippets_npi'])
, (df_master['text_found_snippets_non'])
, (df_master['text_found_snippets_in'])
) )
df_master["score"] = list(map(do_scoring_npi
, df_master['title_find_count']
, df_master['text_find_count_in']
, df_master['text_find_count_ph']
, df_master['text_find_count_non']
, df_master['text_find_count_npi']
))
df_master = df_master.sort_values('score', ascending = False)
df_master['module'] = MODULE
df_data['module'] = MODULE
_ = gc.collect()
return(df_master, df_data, count_orig_files)
def get_paper_info(paper_id, journal):
if (journal == BIORXIV):
df = df_biorxiv[df_biorxiv['paper_id'] == paper_id]
if (journal == COMM):
df = df_comm[df_comm['paper_id'] == paper_id]
if (journal == NON_COMM):
df = df_non_comm[df_non_comm['paper_id'] == paper_id]
if (journal == CUSTOM_LICENSE):
df = df_custom_license[df_custom_license['paper_id'] == paper_id]
text = df.iloc[0]['text']#[0:5000]
title = df.iloc[0]['title']
authors = df.iloc[0]['authors']
return(text, title, authors)
def print_list(lst, number_to_print = 5, shuffle = True):
if len(lst) < number_to_print:
number_to_print = len(lst)
for i in range(-1*number_to_print, 0):
print( lst[i])
def get_stats_from_articles(lst_articles):
count_articles = 0
count_cost_benefits = 0
count_fund_infra = 0
lst_cost_benefits = []
lst_fund_infra = []
for obj in lst_articles:
count_articles = count_articles + 1
if len(obj.lst_cost_benefits_sentences) > 0:
count_cost_benefits = count_cost_benefits + 1
lst_cost_benefits.append((obj.title, obj.lst_urls, obj.score))
if len(obj.lst_funding_infra_sentences) > 0:
count_fund_infra = count_fund_infra + 1
lst_fund_infra.append((obj.title, obj.lst_urls, obj.score))
return(count_articles, count_cost_benefits, count_fund_infra, lst_cost_benefits)
def create_file(filename):
with open(filename, 'w') as the_file:
the_file.close()
def write_to_file(filename, Text):
with open(filename, 'a') as the_file:
the_file.write(Text)
the_file.close()
def get_nlp_text_for_paper_id(paper_id, module):
text, x, y = get_paper_info(paper_id, module)
with nlp.disable_pipes("tagger", "parser", "ner"):
return(nlp(text))
def get_td_string():
tdstring = '<td style="text-align: left; vertical-align: middle; font-size:1.2em;">'
return(tdstring)
def get_sentence_tr(sent):
row = get_td_string() + f'{sent}</td></tr>'
return(row)
#return( f'<tr>' + f'<td align = "left">{sent}</td>' + '<td> </td></tr>')
def display_article(serial , title, url , sentences, score , lst_other_keywords
, lst_cost_benefits, lst_funding_infra_sentences
, lst_all_urls, authors, publish_date, npi_count, paper_id):
if (publish_date == NOT_FOUND):
publish_date = "N/A"
if (url != "NOT FOUND"):
link_text = f'<a href="{url}" target="_blank">{url}</a>'
else:
link_text = "N/A"
text = f'<h3>{serial}: {title}</h3><table border = "1">'
tdstring = get_td_string() #'<td style="text-align: left; vertical-align: middle;">'
text_info = f' <b>Score:</b> {score} <b>Date:</b> {publish_date} NPI Count:{npi_count}'
text_1 = '<tr>' + tdstring + '<b>URL:</b>' + link_text + f'{text_info}</td>' + '</tr>'
text_paper = '<tr>' + tdstring + '<b>Paper ID:</b>'+ f'{paper_id}</td>' + '</tr>'
text_author = '<tr>' + tdstring + f'<b>Author(s): </b>{authors}</td></tr>'
text = text + text_1 + text_paper + text_author
#text += ''.join([f'<td><b>{col}</b></td>' for col in df.columns.values]) + '</tr>'
i = 0
if (len(sentences) > 0):
#text += f'<tr><td align ="left"><b>Relevant Sentences</b></td></tr>'
text += tdstring + '<b>Relevant Sentences</b></td></tr>'
for sent in sentences:
i = i + 1
text += get_sentence_tr(sent)
if (len(lst_other_keywords) > 0):
text += tdstring + '<b>Sentences containing keywords - "rapid", "design", "experiments", "assessment" (and/or)</b></td></tr>'
for sent in lst_other_keywords:
i = i + 1
text += get_sentence_tr(sent)
if (len(lst_cost_benefits) > 0):
text += tdstring + '<b>Sentences containing keywords - "cost", "benefits" (and/or)</b></td></tr>'
for sent in lst_cost_benefits:
i = i + 1
text += get_sentence_tr(sent)
if (len(lst_funding_infra_sentences) > 0):
text += tdstring + '<b>Sentences containing keywords - "funding", "infra","authorities" (and/or) </b></td></tr>'
for sent in lst_funding_infra_sentences:
i = i + 1
text += get_sentence_tr(sent)
if (len(lst_all_urls) > 0):
text += tdstring + '<b>All urls which appear in the article</b></td></tr>'
str_urls = '<br> '.join([u for u in lst_all_urls])
text += get_sentence_tr(str_urls)
text += '</table>'
display(HTML(text))
def get_df_from_article_list(lst_articles):
lst = []
serial = 0
for l in lst_articles:
serial +=1
str_rel = "Low"
url = l.main_url
if (l.main_url == "NOT FOUND"):
url = "N/A"
if (l.npi_count > 0):
str_rel = "High"
dict_row = {"Serial": serial, "Title":l.title, "URL": url, "Score": l.score , "Relevance": str_rel, "PaperID": l.paper_id}
lst.append(dict_row)
#print(len(lst_articles), len(lst))
return(pd.DataFrame(lst))
def get_processing_flag(module):
retval = False
if (module == BIORXIV):
if (SAMPLE_SIZE_BIORXIV != -1): retval = True
if (module == COMM):
if (SAMPLE_SIZE_COMM != -1): retval = True
if (module == NON_COMM):
if (SAMPLE_SIZE_NON_COMM != -1): retval = True
if (module == CUSTOM_LICENSE):
if (SAMPLE_SIZE_CUSTOM_LICENSE != -1): retval = True
return(retval)
def print_user_message(mess = "Pl provide" ):
m = f'<font size=4 , color=grey >{mess}</font>'
display(HTML(m))
def display_dataframe(df, title = ""):
#tdstring = f'<td style="text-align: left; vertical-align: middle; font-size:1.2em;">{v}</td>'
if (title != ""):
text = f'<h2>{title}</h2><table><tr>'
else:
text = '<table><tr>'
text += ''.join([f'<td style="text-align: left; vertical-align: middle; font-size:1.2em;"><b>{col}</b></td>' for col in df.columns.values]) + '</tr>'
for row in df.itertuples():
#text += '<tr>' + ''.join([f'<td valign="top">{v}</td>' for v in row[1:]]) + '</tr>'
text += '<tr>' + ''.join([ f'<td style="text-align: left; vertical-align: middle; font-size:1.1em;">{v}</td>' for v in row[1:]]) + '</tr>'
text += '</table>'
display(HTML(text))
def start_td():
tdstring = '<td style="text-align: center; vertical-align: middle; font-size:1.2em;">'
return(tdstring)
def end_td():
tdstring = '</td>'
return(tdstring)
def get_bolded(tstr):
tdstring = '<b>'+ tstr + '</b>'
return(tdstring)
def get_sentence_tr_vs(sent):
row = get_td_string() + f'{sent}</td></tr>'
return(row)
#return( f'<tr>' + f'<td align = "left">{sent}</td>' + '<td> </td></tr>')
def display_data_processing_info():
text = f'<h3>Table: Data Processing Information</h3><table border = "1">'
td_header_1 = start_td() + get_bolded("Module") + end_td()
td_header_2 = start_td() + get_bolded("Total articles") + end_td()
td_header_3 = start_td() + get_bolded("Processed articles") + end_td()
td_header_4 = start_td() + get_bolded("Number of articles of interest") + end_td()
td_header_5 = start_td() + get_bolded("Excerpts of interest") + end_td()
text_header = "\n<tr>" + td_header_1 + td_header_2 + td_header_3 + td_header_4 + td_header_5+ "</tr>\n"
#text_header = text_header + "<tr>" + start_td() + get_bolded("total articles") + end_td() + "</tr>\n"
text = text + text_header
if get_processing_flag(BIORXIV):
td_data_1 = start_td() + "Biorxiv/Medrxiv" + end_td()
td_data_2 = start_td() + str(count_biorxiv_orig) + end_td()
td_data_3 = start_td() + str(df_biorxiv.shape[0]) + end_td()
td_data_4 = start_td() + str(df_biorxiv_filter.shape[0]) + end_td()
td_data_5 = start_td() + str(sum_bio_sents) + end_td()
text_row = "\n<tr>" + td_data_1 + td_data_2 + td_data_3 + td_data_4 + td_data_5 + "</tr>\n"
text = text + text_row
if get_processing_flag(NON_COMM):
td_data_1 = start_td() + "Non Comm" + end_td()
td_data_2 = start_td() + str(count_non_comm_orig) + end_td()
td_data_3 = start_td() + str(df_non_comm.shape[0]) + end_td()
td_data_4 = start_td() + str(df_non_comm_filter.shape[0]) + end_td()
td_data_5 = start_td() + str(sum_non_comm_sents) + end_td()
text_row = "\n<tr>" + td_data_1 + td_data_2 + td_data_3 + td_data_4 + td_data_5 + "</tr>\n"
text = text + text_row
if get_processing_flag(COMM):
td_data_1 = start_td() + "Comm" + end_td()
td_data_2 = start_td() + str(count_comm_orig) + end_td()
td_data_3 = start_td() + str(df_comm.shape[0]) + end_td()
td_data_4 = start_td() + str(df_comm_filter.shape[0]) + end_td()
td_data_5 = start_td() + str(sum_comm_sents) + end_td()
text_row = "\n<tr>" + td_data_1 + td_data_2 + td_data_3 + td_data_4 + td_data_5 + "</tr>\n"
text = text + text_row
if get_processing_flag(CUSTOM_LICENSE):
td_data_1 = start_td() + "Custom License" + end_td()
td_data_2 = start_td() + str(count_custom_license_orig) + end_td()
td_data_3 = start_td() + str(df_custom_license.shape[0]) + end_td()
td_data_4 = start_td() + str(df_custom_license_filter.shape[0]) + end_td()
td_data_5 = start_td() + str(sum_cl_sents) + end_td()
text_row = "\n<tr>" + td_data_1 + td_data_2 + td_data_3 + td_data_4 + td_data_5 + "</tr>\n"
text = text + text_row
text += '\n</table>'
display(HTML(text))
NOT_FOUND = "<not found>"
def add_list(*lsts):
retlist = []
for l in lsts:
retlist =retlist + l
return(list(set(retlist)))
def get_date(paper_url):
retval = NOT_FOUND
if paper_url.startswith('https://doi.org/'): # Could use /10.1101
retval = paper_url.replace('https://doi.org/10.1101/', '')[0:10]
return(retval)
class article():
def __init__(self, paper_id, score, journal, lst_snippets, npi_count):
self.publish_date = ""
self.npi_count = npi_count
self.paper_id = paper_id
self.main_url = ""
self.score = score
self.journal = journal
self.lst_sentences = []
self.lst_snippets = lst_snippets
self.nlp_text = None
self.text = None
self.lst_urls = []
self.title = None
self.authors = None
self.lst_funding_infra_snippets = []
self.lst_funding_infra_sentences = []
self.lst_cost_benefits_snippets =[]
self.lst_cost_benefits_sentences = []
self.lst_all_sentences = []
self.lst_other_keywords_snippets = []
self.lst_other_keywords_sentences = []
self.count_sentences = 0
self.initialize()
self.consolidate_all_sentences()
def consolidate_all_sentences(self):
self.lst_all_sentences = add_list(self.lst_sentences
, self.lst_funding_infra_sentences
, self.lst_cost_benefits_sentences
, self.lst_other_keywords_sentences)
self.count_sentences = len(self.lst_all_sentences)
def save_biorxiv_all_info(self):
write_to_file(FILE_BIORXIV_ARTICLES_INFO, "===================== START ===========================\n")
write_to_file(FILE_BIORXIV_ARTICLES_INFO, "TITLE:" + self.title + "\n")
write_to_file(FILE_BIORXIV_ARTICLES_INFO, "SENTENCES:" + ' \n'.join(self.lst_sentences))
write_to_file(FILE_BIORXIV_ARTICLES_INFO, "===================== END ===========================\n")
def find_url_in_text(self):
self.main_url , self.lst_urls = find_url_in_text(self.nlp_text)
def initialize(self):
self.text, self.title, self.authors = get_paper_info(self.paper_id, self.journal)
self.nlp_text = nlp(self.text)
self.find_url_in_text()
self.get_sents_from_snippets()
self.get_cost_benefits_info()
self.get_funding_infra_info()
self.get_other_keywords_info()
self.publish_date = get_date(self.main_url)
def get_sents_from_snippets(self):
self.lst_sentences = []
self.lst_sentences = get_sents_from_snippets(self.lst_snippets, self.nlp_text, self.paper_id)
def get_funding_infra_info(self):
phrase_list = [ u"funding", u"fund"
, u"authorities"
, u"infrastructure"]
count, snippets = find_phrases_in_text(self.nlp_text, phrase_list, span_start = 1, span_end = 1 )
self.lst_funding_infra_snippets = snippets
if (count > 0):
self.lst_funding_infra_sentences = get_sents_from_snippets(self.lst_funding_infra_snippets, self.nlp_text, self.paper_id)
def get_other_keywords_info(self):
phrase_list = [ u"experiment", u"rapid", u"assesment", u"design"]
count, snippets = find_phrases_in_text(self.nlp_text, phrase_list, span_start = 1, span_end = 1 )
self.lst_other_keywords_snippets = snippets
if (count > 0):
self.lst_other_keywords_sentences = get_sents_from_snippets(snippets, self.nlp_text, self.paper_id)
def get_cost_benefits_info(self):
phrase_list = [u"cost", u"benefit"]
count, snippets = find_phrases_in_text(self.nlp_text, phrase_list, span_start = 1, span_end = 1 )
self.lst_cost_benefits_snippets = snippets
if (count > 0):
self.lst_cost_benefits_sentences = get_sents_from_snippets(self.lst_cost_benefits_snippets, self.nlp_text, self.paper_id)
def info_cost_benefits(self):
if ((len(self.lst_cost_benefits_sentences) > 0) & (len(self.lst_cost_benefits_sentences) < 10)):
self.print_header()
print("Cost Benefits Information:", self.lst_cost_benefits_sentences)
print("Number of cost benefits sentences found:", len(self.lst_cost_benefits_sentences))
self.print_footer()
def print_header(self):
strformat = "================== START ===========================\n TITLE: {} \n".format(self.title)
print(strformat)
def print_footer(self):
strformat = "RELEVANT URLS:\n {} \n PAPER ID {}".format(self.lst_urls, self.paper_id)
print(strformat)
print("PaperID: ", self.paper_id , " Score:" , self.score)
print("======================= END ==========================================\n")
def print_1_basic_article_information(self):
self.print_header()
print(" -------------- PRINTING SOME EXTRACTED SENTENCES (MAX 5) Related to NPI -------------- ")
if len(self.lst_sentences) > 5:
print_list(self.lst_sentences[0:5])
#print(self.lst_sentences[0:5])
else:
print_list(self.lst_sentences)
self.print_footer()
def get_objectlist_from_df(df):
lst_objs = list(map(article,
(df['paper_id']) ,
df['score'],
df['module'],
df['lst_snippets'],
df['text_find_count_npi']))
#print("sorting")
#lst_objs.sort(key=lambda x: x.score, reverse=True)
return (lst_objs)
if get_processing_flag(BIORXIV):
path = '/kaggle/input/CORD-19-research-challenge/biorxiv_medrxiv/biorxiv_medrxiv/pdf_json/'
start_time = time.time()
df_biorxiv_master, df_biorxiv, count_biorxiv_orig = process_a_module(path, SAMPLE_SIZE=SAMPLE_SIZE_BIORXIV, MODULE=BIORXIV)
df_biorxiv_filter = df_biorxiv_master[df_biorxiv_master['score'] > 0].reset_index()
lst_obj_biorxiv = get_objectlist_from_df(df_biorxiv_filter)
lst_obj_biorxiv.sort(key=lambda x: x.score, reverse=True)
sum_bio_sents = sum((c.count_sentences for c in lst_obj_biorxiv))
if get_processing_flag(COMM):
path = '/kaggle/input/CORD-19-research-challenge/comm_use_subset/comm_use_subset/pdf_json/'
start_time = time.time()
df_comm_master, df_comm, count_comm_orig = process_a_module(path, SAMPLE_SIZE_COMM, COMM)
df_comm_filter = df_comm_master[df_comm_master['score'] > 0].reset_index()
lst_obj_comm = get_objectlist_from_df(df_comm_filter)
lst_obj_comm.sort(key=lambda x: x.score, reverse=True)
sum_comm_sents = sum((c.count_sentences for c in lst_obj_comm))
if get_processing_flag(NON_COMM):
path = '/kaggle/input/CORD-19-research-challenge/noncomm_use_subset/noncomm_use_subset/pdf_json/'
start_time = time.time()
df_non_comm_master, df_non_comm, count_non_comm_orig = process_a_module(path, SAMPLE_SIZE_NON_COMM, NON_COMM)
df_non_comm_filter = df_non_comm_master[df_non_comm_master['score'] > 0].reset_index()
lst_obj_non_comm = get_objectlist_from_df(df_non_comm_filter)
lst_obj_non_comm.sort(key=lambda x: x.score, reverse=True)
sum_non_comm_sents = sum((c.count_sentences for c in lst_obj_non_comm))
if get_processing_flag(CUSTOM_LICENSE):
path = '/kaggle/input/CORD-19-research-challenge/custom_license/custom_license/pdf_json/'
start_time = time.time()
df_custom_license_master, df_custom_license, count_custom_license_orig = process_a_module(path, SAMPLE_SIZE_CUSTOM_LICENSE, CUSTOM_LICENSE)
df_custom_license_filter = df_custom_license_master[df_custom_license_master['score'] > 0].reset_index()
lst_obj_custom_license = get_objectlist_from_df(df_custom_license_filter)
lst_obj_custom_license.sort(key=lambda x: x.score, reverse=True)
sum_cl_sents = sum((c.count_sentences for c in lst_obj_custom_license))
lst = []
lst_hit_ratio = []
if get_processing_flag(BIORXIV):
dict_hit_ratio = {'module': 'biorxiv', 'ratio': df_biorxiv_filter.shape[0] / df_biorxiv.shape[0], 'type': 'article_hit_ratio'}
lst_hit_ratio.append(dict_hit_ratio)
dict_hit_ratio = {'module': 'biorxiv', 'ratio': sum_bio_sents / df_biorxiv.shape[0], 'type': 'snippets_hit_ratio'}
lst_hit_ratio.append(dict_hit_ratio)
dict_row = {'count': count_biorxiv_orig, 'module': 'biorxiv', 'type': 'total'}
lst.append(dict_row)
dict_row = {'count': df_biorxiv.shape[0], 'module': 'biorxiv', 'type': 'processed'}
lst.append(dict_row)
dict_row = {'count': df_biorxiv_filter.shape[0], 'module': 'biorxiv', 'type': 'found'}
lst.append(dict_row)
dict_row = {'count': sum_bio_sents, 'module': 'biorxiv', 'type': 'excerpts'}
lst.append(dict_row)
if get_processing_flag(NON_COMM):
dict_hit_ratio = {'module': 'non_comm', 'ratio': df_non_comm_filter.shape[0] / df_non_comm.shape[0], 'type': 'article_hit_ratio'}
lst_hit_ratio.append(dict_hit_ratio)
dict_hit_ratio = {'module': 'non_comm', 'ratio': sum_non_comm_sents / df_non_comm.shape[0], 'type': 'snippets_hit_ratio'}
lst_hit_ratio.append(dict_hit_ratio)
dict_row = {'count': count_non_comm_orig, 'module': 'non_comm', 'type': 'total'}
lst.append(dict_row)
dict_row = {'count': df_non_comm.shape[0], 'module': 'non_comm', 'type': 'processed'}
lst.append(dict_row)
dict_row = {'count': df_non_comm_filter.shape[0], 'module': 'non_comm', 'type': 'found'}
lst.append(dict_row)
dict_row = {'count': sum_non_comm_sents, 'module': 'non_comm', 'type': 'excerpts'}
lst.append(dict_row)
if get_processing_flag(COMM):
dict_hit_ratio = {'module': 'comm', 'ratio': df_comm_filter.shape[0] / df_comm.shape[0], 'type': 'article_hit_ratio'}
lst_hit_ratio.append(dict_hit_ratio)
dict_hit_ratio = {'module': 'comm', 'ratio': sum_comm_sents / df_comm.shape[0], 'type': 'snippets_hit_ratio'}
lst_hit_ratio.append(dict_hit_ratio)
dict_row = {'count': count_comm_orig, 'module': 'comm', 'type': 'total'}
lst.append(dict_row)
dict_row = {'count': df_comm.shape[0], 'module': 'comm', 'type': 'processed'}
lst.append(dict_row)
dict_row = {'count': df_comm_filter.shape[0], 'module': 'comm', 'type': 'found'}
lst.append(dict_row)
dict_row = {'count': sum_comm_sents, 'module': 'comm', 'type': 'excerpts'}
lst.append(dict_row)
if get_processing_flag(CUSTOM_LICENSE):
dict_hit_ratio = {'module': 'custom_license', 'ratio': df_custom_license_filter.shape[0] / df_custom_license.shape[0], 'type': 'article_hit_ratio'}
lst_hit_ratio.append(dict_hit_ratio)
dict_hit_ratio = {'module': 'custom_license', 'ratio': sum_cl_sents / df_custom_license.shape[0], 'type': 'snippets_hit_ratio'}
lst_hit_ratio.append(dict_hit_ratio)
dict_row = {'count': count_custom_license_orig, 'module': 'custom_license', 'type': 'total'}
lst.append(dict_row)
dict_row = {'count': df_custom_license.shape[0], 'module': 'custom_license', 'type': 'processed'}
lst.append(dict_row)
dict_row = {'count': df_custom_license_filter.shape[0], 'module': 'custom_license', 'type': 'found'}
lst.append(dict_row)
dict_row = {'count': sum_cl_sents, 'module': 'custom_license', 'type': 'excerpts'}
lst.append(dict_row)
df_data = pd.DataFrame(lst)
df_hit_ratio = pd.DataFrame(lst_hit_ratio)
fig = px.bar(df_hit_ratio, x='module', y='ratio', color='type', barmode='group', title='Hit Ratio of various modules', template='plotly_dark')
fig.show() | code |
32062628/cell_8 | [
"text_html_output_1.png"
] | from IPython.core.display import display, HTML
from copy import deepcopy
from spacy.matcher import Matcher
from spacy.matcher import PhraseMatcher
import gc
import json
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import spacy
import time
SAMPLE_SIZE_BIORXIV = 100
SAMPLE_SIZE_COMM = 100
SAMPLE_SIZE_NON_COMM = 100
SAMPLE_SIZE_CUSTOM_LICENSE = 100
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import json
from pprint import pprint
from copy import deepcopy
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
import spacy
from spacy.matcher import PhraseMatcher
from spacy.matcher import Matcher
nlp = spacy.load('en_core_web_sm')
import torch
import scipy.spatial
import time
import gc
import plotly.express as px
from IPython.core.display import display, HTML
RUN_MODE = "SUBSET" #"ALL"
RUN_SAMPLES = 100
BIORXIV = "biorxiv"
COMM = "comm"
NON_COMM = "non_comm"
CUSTOM_LICENSE = "custom_license"
FILE_BIORXIV_ARTICLES_INFO = "biorxiv_article_info.txt"
FILE_COMM_ARTICLES_INFO = "comm_article_info.txt"
FILE_NON_COMM_ARTICLES_INFO = "non_comm_article_info.txt"
FILE_CUSTOM_LICENSE_ARTICLES_INFO = "custom_license_article_info.txt"
lst_url_exclusions = ['//github.com', 'https://doi.org','https://doi.org/10','perpetuity.is', 'https://doi.org/10.1101/2020.03', 'https://doi.org/10.1101/2020.04']
## Functions to generate CSVs from Json files, four types of datasets are available here.
def save_article_info(obj, filename):
with open(filename, 'a') as the_file:
the_file.write("# PAPER_ID ----- : " + obj.paper_id + "\n")
the_file.write("# TITLE -----------: " + obj.title + "\n")
the_file.write("# RELEVANT SENTENCES ----------:")
the_file.write("\n")
for item in obj.lst_sentences:
the_file.write("\n ==>")
the_file.write("%s " % item)
the_file.write("\n")
if (len(obj.lst_rapid_assessment_sentences) > 0):
the_file.write("# ASSESSMENT RELATED SENTENCES ----------:")
the_file.write("\n")
for item in obj.lst_rapid_assessment_sentences:
the_file.write("\n ==>")
the_file.write("%s " % item)
the_file.write("\n")
if (len(obj.lst_rapid_design_sentences) > 0):
the_file.write("# DESIGN RELATED SENTENCES ----------:")
the_file.write("\n")
for item in obj.lst_rapid_design_sentences:
the_file.write("\n ==>")
the_file.write("%s " % item)
the_file.write("\n")
if (len(obj.lst_design_experiments_sentences) > 0):
the_file.write("# EXPERIMENT RELATED SENTENCES ----------:")
the_file.write("\n")
for item in obj.lst_design_experiments_sentences:
the_file.write("\n ==>")
the_file.write("%s " % item)
the_file.write("\n")
the_file.write("# URL -------------:")
for item in obj.lst_urls:
the_file.write("\n ==>")
the_file.write("%s " % item)
if (len(obj.lst_urls)==0):
the_file.write("No urls found.")
the_file.write("\n")
author_out = obj.authors
if (obj.authors.strip() == ""):
author_out = "NOT_FOUND"
the_file.write("\n")
the_file.write("# AUTHORS -----------: " + obj.authors + "\n")
the_file.write("# SCORE -----------: " + str(obj.score) + "\n")
the_file.write("# =========================================================: " + "\n")
def format_name(author):
middle_name = " ".join(author['middle'])
if author['middle']:
return " ".join([author['first'], middle_name, author['last']])
else:
return " ".join([author['first'], author['last']])
def format_affiliation(affiliation):
text = []
location = affiliation.get('location')
if location:
text.extend(list(affiliation['location'].values()))
institution = affiliation.get('institution')
if institution:
text = [institution] + text
return ", ".join(text)
def format_authors(authors, with_affiliation=False):
name_ls = []
for author in authors:
name = format_name(author)
if with_affiliation:
affiliation = format_affiliation(author['affiliation'])
if affiliation:
name_ls.append(f"{name} ({affiliation})")
else:
name_ls.append(name)
else:
name_ls.append(name)
return ", ".join(name_ls)
def format_body(body_text):
texts = [(di['section'], di['text']) for di in body_text]
texts_di = {di['section']: "" for di in body_text}
for section, text in texts:
texts_di[section] += text
body = ""
for section, text in texts_di.items():
if (section.strip() != ""):
body += section.upper()
body += " : "
body += text
body += "."
return body
def format_bib(bibs):
if type(bibs) == dict:
bibs = list(bibs.values())
bibs = deepcopy(bibs)
formatted = []
for bib in bibs:
bib['authors'] = format_authors(
bib['authors'],
with_affiliation=False
)
formatted_ls = [str(bib[k]) for k in ['title', 'authors', 'venue', 'year']]
formatted.append(", ".join(formatted_ls))
return "; ".join(formatted)
def load_files(dirname, SAMPLE_SIZE = 50):
filenames = os.listdir(dirname)
lst_orig_count = len(filenames)
raw_files = []
if (RUN_MODE == "SUBSET"):
filenames = filenames[0: SAMPLE_SIZE]
for filename in (filenames):
filename = dirname + filename
file = json.load(open(filename, 'rb'))
raw_files.append(file)
return (raw_files, lst_orig_count)
def generate_clean_df(all_files):
cleaned_files = []
for file in (all_files):
features = [
file['paper_id'],
file['metadata']['title'],
format_authors(file['metadata']['authors']),
format_authors(file['metadata']['authors'],
with_affiliation=True),
format_body(file['abstract']),
format_body(file['body_text']),
format_bib(file['bib_entries']),
file['metadata']['authors'],
file['bib_entries']
]
cleaned_files.append(features)
col_names = ['paper_id', 'title', 'authors',
'affiliations', 'abstract', 'text',
'bibliography','raw_authors','raw_bibliography']
clean_df = pd.DataFrame(cleaned_files, columns=col_names)
clean_df.head()
return clean_df
def find_phrases_in_title_npi(doc , span_start = 5 , span_end = 5):
matcher = Matcher(nlp.vocab)
pattern1= [{'LOWER': 'non'}, {'LOWER': 'pharmaceutical'}, {'LOWER': 'intervention'}]
pattern2 = [{'LOWER': 'non'}, {'LOWER': 'pharmaceutical'}, {'LOWER': 'interventions'}]
pattern3 = [{'LOWER': 'non'}, {'IS_PUNCT': True, 'OP' : '*'} , {'LOWER': 'pharmaceutical'}, {'IS_PUNCT': True, 'OP' : '*'}, {'LOWER': 'interventions'}]
pattern4 = [{'LOWER': 'non'}, {'IS_PUNCT': True, 'OP' : '*'} , {'LOWER': 'pharmaceutical'}, {'IS_PUNCT': True, 'OP' : '*'}, {'LOWER': 'intervention'}]
lst_spans = []
#matcher.add('titlematcher', None, *phrase_patterns)
matcher.add('titlematcher', None, pattern1, pattern2, pattern3, pattern4)
found_matches = matcher(doc)
find_count = len(found_matches)
for match_id, start, end in found_matches:
string_id = nlp.vocab.strings[match_id]
end = min(end + span_end, len(doc))
start = max(start - span_start,0)
span = doc[start:end]
lst_spans.append(span.text)
snippets = '| '.join([lst for lst in lst_spans])
return find_count, snippets
def prepare_dataframe_for_nlp(df, nlp):
df.fillna('', inplace=True)
return(df)
def get_sents_from_snippets(lst_snippets, nlpdoc, paper_id):
"""
Finding full sentences when snippets are passed to this function.
"""
phrase_patterns = [nlp(text) for text in lst_snippets]
matcher = PhraseMatcher(nlp.vocab)
matcher.add('xyz', None, *phrase_patterns)
sentences = nlpdoc
res_sentences = []
for sent in sentences.sents:
found_matches = matcher(nlp(sent.text))
find_count = len(found_matches)
if len(found_matches) > 0:
res_sentences.append(sent.text)
res_sentences = list(set(res_sentences))
return(res_sentences)
def limit_text_size(text):
# if (len(text) > (10000)):
text = text[0:40000]
return(text)
def find_phrases_in_text(doc , phrase_list, span_start = 5 , span_end = 5):
matcher = PhraseMatcher(nlp.vocab)
#print(phrase_list)
lst_spans = []
phrase_patterns = [nlp(text) for text in phrase_list]
matcher.add('covidmatcher', None, *phrase_patterns)
found_matches = matcher(doc)
find_count = len(found_matches)
for match_id, start, end in found_matches:
string_id = nlp.vocab.strings[match_id]
end = min(end + span_end, len(doc) - 1)
start = max(start - span_start,0)
span = doc[start:end]
lst_spans.append(span.text)
#print("found a match.", span.text)
snippets = '| '.join([lst for lst in lst_spans])
ret_list = list(set(lst_spans))
return(find_count, ret_list)
def generate_data(dir_path, SAMPLE_SIZE = 50):
_files, count_files_orig = load_files(dir_path, SAMPLE_SIZE)
df = generate_clean_df(_files)
return(df, count_files_orig)
def add_lists(lst1, lst2, lst3, lst4):
lst_final = list(lst1) + list(lst2) + list(lst3) + list(lst4)
return(lst_final)
def do_scoring_npi(title_find_count
, text_find_count_in
, text_find_count_ph
, text_find_count_non
, text_find_count_npi):
if ((text_find_count_in > 0) & (text_find_count_ph > 0) & (text_find_count_non > 0)):
ret = 30 * title_find_count + 10 * text_find_count_npi + text_find_count_in + text_find_count_non + text_find_count_ph
else:
ret = 30 * title_find_count + 10 * text_find_count_npi
return(ret)
def process_url(url):
ret = url
#print(url in lst_url_exclusions)
if url in lst_url_exclusions:
ret = ''
return(ret)
def is_main_url(d):
if d.startswith('https://doi.org/'): # Could use /10.1101
return (True)
else:
return (False)
def find_url_in_text(doc):
main_url = "NOT FOUND"
lst_urls = []
matcher = Matcher(nlp.vocab)
pattern = [{'LIKE_URL': True}]
matcher.add('url', None, pattern)
found_matches = matcher(doc)
#print(found_matches)
for match_id, start, end in found_matches:
url = doc[start:end]
url = process_url(url.text)
#print(url)
if (url != ""):
lst_urls.append(url)
if is_main_url(url):
main_url = url
return(main_url , list(set(lst_urls)))
def get_summary_row_for_df(processed_articles, count_find, count_fund_infra, count_cost_benefits, module):
dict_row ={"Module":module, "Processed": processed_articles, "Found": count_find
, "Found Funding and Infrastructure": count_fund_infra
, "Count Cost benefits": count_cost_benefits}
return(dict_row)
def process_a_module(path, SAMPLE_SIZE = 50, MODULE = "provide-module"):
df_data, count_orig_files = generate_data(path, SAMPLE_SIZE)
df_master = df_data.copy()[["paper_id", "title"]]
df_data = prepare_dataframe_for_nlp(df_data, nlp)
df_data['small_text'] = list(map(limit_text_size, (df_data['text'])))
df_data['nlp_title'] = list(map(nlp, (df_data['title'])))
with nlp.disable_pipes("tagger", "parser", "ner"):
df_data['nlp_snall_text'] = list(map(nlp, (df_data['small_text'])))
df_master['title_find_count'], df_master['title_found_snippets'] = zip(*df_data['nlp_title'].apply(lambda title: find_phrases_in_title_npi((title))))
phrase_list = [u"intervention"]
df_master['text_find_count_in'], df_master['text_found_snippets_in'] = zip(*df_data['nlp_snall_text'].apply(lambda nlptext: find_phrases_in_text((nlptext), phrase_list)))
phrase_list = [u"pharmaceutical"]
df_master['text_find_count_ph'], df_master['text_found_snippets_ph'] = zip(*df_data['nlp_snall_text'].apply(lambda nlptext: find_phrases_in_text((nlptext), phrase_list)))
phrase_list = [u"non"]
df_master['text_find_count_non'], df_master['text_found_snippets_non'] = zip(*df_data['nlp_snall_text'].apply(lambda nlptext: find_phrases_in_text((nlptext), phrase_list)))
phrase_list = [u"NPI"]
df_master['text_find_count_npi'], df_master['text_found_snippets_npi'] = zip(*df_data['nlp_snall_text'].apply(lambda nlptext: find_phrases_in_text((nlptext), phrase_list)))
df_master['lst_snippets'] = list(map(add_lists
, (df_master['text_found_snippets_ph'])
, (df_master['text_found_snippets_npi'])
, (df_master['text_found_snippets_non'])
, (df_master['text_found_snippets_in'])
) )
df_master["score"] = list(map(do_scoring_npi
, df_master['title_find_count']
, df_master['text_find_count_in']
, df_master['text_find_count_ph']
, df_master['text_find_count_non']
, df_master['text_find_count_npi']
))
df_master = df_master.sort_values('score', ascending = False)
df_master['module'] = MODULE
df_data['module'] = MODULE
_ = gc.collect()
return(df_master, df_data, count_orig_files)
def get_paper_info(paper_id, journal):
if (journal == BIORXIV):
df = df_biorxiv[df_biorxiv['paper_id'] == paper_id]
if (journal == COMM):
df = df_comm[df_comm['paper_id'] == paper_id]
if (journal == NON_COMM):
df = df_non_comm[df_non_comm['paper_id'] == paper_id]
if (journal == CUSTOM_LICENSE):
df = df_custom_license[df_custom_license['paper_id'] == paper_id]
text = df.iloc[0]['text']#[0:5000]
title = df.iloc[0]['title']
authors = df.iloc[0]['authors']
return(text, title, authors)
def print_list(lst, number_to_print = 5, shuffle = True):
if len(lst) < number_to_print:
number_to_print = len(lst)
for i in range(-1*number_to_print, 0):
print( lst[i])
def get_stats_from_articles(lst_articles):
count_articles = 0
count_cost_benefits = 0
count_fund_infra = 0
lst_cost_benefits = []
lst_fund_infra = []
for obj in lst_articles:
count_articles = count_articles + 1
if len(obj.lst_cost_benefits_sentences) > 0:
count_cost_benefits = count_cost_benefits + 1
lst_cost_benefits.append((obj.title, obj.lst_urls, obj.score))
if len(obj.lst_funding_infra_sentences) > 0:
count_fund_infra = count_fund_infra + 1
lst_fund_infra.append((obj.title, obj.lst_urls, obj.score))
return(count_articles, count_cost_benefits, count_fund_infra, lst_cost_benefits)
def create_file(filename):
with open(filename, 'w') as the_file:
the_file.close()
def write_to_file(filename, Text):
with open(filename, 'a') as the_file:
the_file.write(Text)
the_file.close()
def get_nlp_text_for_paper_id(paper_id, module):
text, x, y = get_paper_info(paper_id, module)
with nlp.disable_pipes("tagger", "parser", "ner"):
return(nlp(text))
def get_td_string():
tdstring = '<td style="text-align: left; vertical-align: middle; font-size:1.2em;">'
return(tdstring)
def get_sentence_tr(sent):
row = get_td_string() + f'{sent}</td></tr>'
return(row)
#return( f'<tr>' + f'<td align = "left">{sent}</td>' + '<td> </td></tr>')
def display_article(serial , title, url , sentences, score , lst_other_keywords
, lst_cost_benefits, lst_funding_infra_sentences
, lst_all_urls, authors, publish_date, npi_count, paper_id):
if (publish_date == NOT_FOUND):
publish_date = "N/A"
if (url != "NOT FOUND"):
link_text = f'<a href="{url}" target="_blank">{url}</a>'
else:
link_text = "N/A"
text = f'<h3>{serial}: {title}</h3><table border = "1">'
tdstring = get_td_string() #'<td style="text-align: left; vertical-align: middle;">'
text_info = f' <b>Score:</b> {score} <b>Date:</b> {publish_date} NPI Count:{npi_count}'
text_1 = '<tr>' + tdstring + '<b>URL:</b>' + link_text + f'{text_info}</td>' + '</tr>'
text_paper = '<tr>' + tdstring + '<b>Paper ID:</b>'+ f'{paper_id}</td>' + '</tr>'
text_author = '<tr>' + tdstring + f'<b>Author(s): </b>{authors}</td></tr>'
text = text + text_1 + text_paper + text_author
#text += ''.join([f'<td><b>{col}</b></td>' for col in df.columns.values]) + '</tr>'
i = 0
if (len(sentences) > 0):
#text += f'<tr><td align ="left"><b>Relevant Sentences</b></td></tr>'
text += tdstring + '<b>Relevant Sentences</b></td></tr>'
for sent in sentences:
i = i + 1
text += get_sentence_tr(sent)
if (len(lst_other_keywords) > 0):
text += tdstring + '<b>Sentences containing keywords - "rapid", "design", "experiments", "assessment" (and/or)</b></td></tr>'
for sent in lst_other_keywords:
i = i + 1
text += get_sentence_tr(sent)
if (len(lst_cost_benefits) > 0):
text += tdstring + '<b>Sentences containing keywords - "cost", "benefits" (and/or)</b></td></tr>'
for sent in lst_cost_benefits:
i = i + 1
text += get_sentence_tr(sent)
if (len(lst_funding_infra_sentences) > 0):
text += tdstring + '<b>Sentences containing keywords - "funding", "infra","authorities" (and/or) </b></td></tr>'
for sent in lst_funding_infra_sentences:
i = i + 1
text += get_sentence_tr(sent)
if (len(lst_all_urls) > 0):
text += tdstring + '<b>All urls which appear in the article</b></td></tr>'
str_urls = '<br> '.join([u for u in lst_all_urls])
text += get_sentence_tr(str_urls)
text += '</table>'
display(HTML(text))
def get_df_from_article_list(lst_articles):
lst = []
serial = 0
for l in lst_articles:
serial +=1
str_rel = "Low"
url = l.main_url
if (l.main_url == "NOT FOUND"):
url = "N/A"
if (l.npi_count > 0):
str_rel = "High"
dict_row = {"Serial": serial, "Title":l.title, "URL": url, "Score": l.score , "Relevance": str_rel, "PaperID": l.paper_id}
lst.append(dict_row)
#print(len(lst_articles), len(lst))
return(pd.DataFrame(lst))
def get_processing_flag(module):
retval = False
if (module == BIORXIV):
if (SAMPLE_SIZE_BIORXIV != -1): retval = True
if (module == COMM):
if (SAMPLE_SIZE_COMM != -1): retval = True
if (module == NON_COMM):
if (SAMPLE_SIZE_NON_COMM != -1): retval = True
if (module == CUSTOM_LICENSE):
if (SAMPLE_SIZE_CUSTOM_LICENSE != -1): retval = True
return(retval)
def print_user_message(mess = "Pl provide" ):
m = f'<font size=4 , color=grey >{mess}</font>'
display(HTML(m))
def display_dataframe(df, title = ""):
#tdstring = f'<td style="text-align: left; vertical-align: middle; font-size:1.2em;">{v}</td>'
if (title != ""):
text = f'<h2>{title}</h2><table><tr>'
else:
text = '<table><tr>'
text += ''.join([f'<td style="text-align: left; vertical-align: middle; font-size:1.2em;"><b>{col}</b></td>' for col in df.columns.values]) + '</tr>'
for row in df.itertuples():
#text += '<tr>' + ''.join([f'<td valign="top">{v}</td>' for v in row[1:]]) + '</tr>'
text += '<tr>' + ''.join([ f'<td style="text-align: left; vertical-align: middle; font-size:1.1em;">{v}</td>' for v in row[1:]]) + '</tr>'
text += '</table>'
display(HTML(text))
def start_td():
tdstring = '<td style="text-align: center; vertical-align: middle; font-size:1.2em;">'
return(tdstring)
def end_td():
tdstring = '</td>'
return(tdstring)
def get_bolded(tstr):
tdstring = '<b>'+ tstr + '</b>'
return(tdstring)
def get_sentence_tr_vs(sent):
row = get_td_string() + f'{sent}</td></tr>'
return(row)
#return( f'<tr>' + f'<td align = "left">{sent}</td>' + '<td> </td></tr>')
def display_data_processing_info():
text = f'<h3>Table: Data Processing Information</h3><table border = "1">'
td_header_1 = start_td() + get_bolded("Module") + end_td()
td_header_2 = start_td() + get_bolded("Total articles") + end_td()
td_header_3 = start_td() + get_bolded("Processed articles") + end_td()
td_header_4 = start_td() + get_bolded("Number of articles of interest") + end_td()
td_header_5 = start_td() + get_bolded("Excerpts of interest") + end_td()
text_header = "\n<tr>" + td_header_1 + td_header_2 + td_header_3 + td_header_4 + td_header_5+ "</tr>\n"
#text_header = text_header + "<tr>" + start_td() + get_bolded("total articles") + end_td() + "</tr>\n"
text = text + text_header
if get_processing_flag(BIORXIV):
td_data_1 = start_td() + "Biorxiv/Medrxiv" + end_td()
td_data_2 = start_td() + str(count_biorxiv_orig) + end_td()
td_data_3 = start_td() + str(df_biorxiv.shape[0]) + end_td()
td_data_4 = start_td() + str(df_biorxiv_filter.shape[0]) + end_td()
td_data_5 = start_td() + str(sum_bio_sents) + end_td()
text_row = "\n<tr>" + td_data_1 + td_data_2 + td_data_3 + td_data_4 + td_data_5 + "</tr>\n"
text = text + text_row
if get_processing_flag(NON_COMM):
td_data_1 = start_td() + "Non Comm" + end_td()
td_data_2 = start_td() + str(count_non_comm_orig) + end_td()
td_data_3 = start_td() + str(df_non_comm.shape[0]) + end_td()
td_data_4 = start_td() + str(df_non_comm_filter.shape[0]) + end_td()
td_data_5 = start_td() + str(sum_non_comm_sents) + end_td()
text_row = "\n<tr>" + td_data_1 + td_data_2 + td_data_3 + td_data_4 + td_data_5 + "</tr>\n"
text = text + text_row
if get_processing_flag(COMM):
td_data_1 = start_td() + "Comm" + end_td()
td_data_2 = start_td() + str(count_comm_orig) + end_td()
td_data_3 = start_td() + str(df_comm.shape[0]) + end_td()
td_data_4 = start_td() + str(df_comm_filter.shape[0]) + end_td()
td_data_5 = start_td() + str(sum_comm_sents) + end_td()
text_row = "\n<tr>" + td_data_1 + td_data_2 + td_data_3 + td_data_4 + td_data_5 + "</tr>\n"
text = text + text_row
if get_processing_flag(CUSTOM_LICENSE):
td_data_1 = start_td() + "Custom License" + end_td()
td_data_2 = start_td() + str(count_custom_license_orig) + end_td()
td_data_3 = start_td() + str(df_custom_license.shape[0]) + end_td()
td_data_4 = start_td() + str(df_custom_license_filter.shape[0]) + end_td()
td_data_5 = start_td() + str(sum_cl_sents) + end_td()
text_row = "\n<tr>" + td_data_1 + td_data_2 + td_data_3 + td_data_4 + td_data_5 + "</tr>\n"
text = text + text_row
text += '\n</table>'
display(HTML(text))
NOT_FOUND = "<not found>"
def add_list(*lsts):
retlist = []
for l in lsts:
retlist =retlist + l
return(list(set(retlist)))
def get_date(paper_url):
retval = NOT_FOUND
if paper_url.startswith('https://doi.org/'): # Could use /10.1101
retval = paper_url.replace('https://doi.org/10.1101/', '')[0:10]
return(retval)
class article():
def __init__(self, paper_id, score, journal, lst_snippets, npi_count):
self.publish_date = ""
self.npi_count = npi_count
self.paper_id = paper_id
self.main_url = ""
self.score = score
self.journal = journal
self.lst_sentences = []
self.lst_snippets = lst_snippets
self.nlp_text = None
self.text = None
self.lst_urls = []
self.title = None
self.authors = None
self.lst_funding_infra_snippets = []
self.lst_funding_infra_sentences = []
self.lst_cost_benefits_snippets =[]
self.lst_cost_benefits_sentences = []
self.lst_all_sentences = []
self.lst_other_keywords_snippets = []
self.lst_other_keywords_sentences = []
self.count_sentences = 0
self.initialize()
self.consolidate_all_sentences()
def consolidate_all_sentences(self):
self.lst_all_sentences = add_list(self.lst_sentences
, self.lst_funding_infra_sentences
, self.lst_cost_benefits_sentences
, self.lst_other_keywords_sentences)
self.count_sentences = len(self.lst_all_sentences)
def save_biorxiv_all_info(self):
write_to_file(FILE_BIORXIV_ARTICLES_INFO, "===================== START ===========================\n")
write_to_file(FILE_BIORXIV_ARTICLES_INFO, "TITLE:" + self.title + "\n")
write_to_file(FILE_BIORXIV_ARTICLES_INFO, "SENTENCES:" + ' \n'.join(self.lst_sentences))
write_to_file(FILE_BIORXIV_ARTICLES_INFO, "===================== END ===========================\n")
def find_url_in_text(self):
self.main_url , self.lst_urls = find_url_in_text(self.nlp_text)
def initialize(self):
self.text, self.title, self.authors = get_paper_info(self.paper_id, self.journal)
self.nlp_text = nlp(self.text)
self.find_url_in_text()
self.get_sents_from_snippets()
self.get_cost_benefits_info()
self.get_funding_infra_info()
self.get_other_keywords_info()
self.publish_date = get_date(self.main_url)
def get_sents_from_snippets(self):
self.lst_sentences = []
self.lst_sentences = get_sents_from_snippets(self.lst_snippets, self.nlp_text, self.paper_id)
def get_funding_infra_info(self):
phrase_list = [ u"funding", u"fund"
, u"authorities"
, u"infrastructure"]
count, snippets = find_phrases_in_text(self.nlp_text, phrase_list, span_start = 1, span_end = 1 )
self.lst_funding_infra_snippets = snippets
if (count > 0):
self.lst_funding_infra_sentences = get_sents_from_snippets(self.lst_funding_infra_snippets, self.nlp_text, self.paper_id)
def get_other_keywords_info(self):
phrase_list = [ u"experiment", u"rapid", u"assesment", u"design"]
count, snippets = find_phrases_in_text(self.nlp_text, phrase_list, span_start = 1, span_end = 1 )
self.lst_other_keywords_snippets = snippets
if (count > 0):
self.lst_other_keywords_sentences = get_sents_from_snippets(snippets, self.nlp_text, self.paper_id)
def get_cost_benefits_info(self):
phrase_list = [u"cost", u"benefit"]
count, snippets = find_phrases_in_text(self.nlp_text, phrase_list, span_start = 1, span_end = 1 )
self.lst_cost_benefits_snippets = snippets
if (count > 0):
self.lst_cost_benefits_sentences = get_sents_from_snippets(self.lst_cost_benefits_snippets, self.nlp_text, self.paper_id)
def info_cost_benefits(self):
if ((len(self.lst_cost_benefits_sentences) > 0) & (len(self.lst_cost_benefits_sentences) < 10)):
self.print_header()
print("Cost Benefits Information:", self.lst_cost_benefits_sentences)
print("Number of cost benefits sentences found:", len(self.lst_cost_benefits_sentences))
self.print_footer()
def print_header(self):
strformat = "================== START ===========================\n TITLE: {} \n".format(self.title)
print(strformat)
def print_footer(self):
strformat = "RELEVANT URLS:\n {} \n PAPER ID {}".format(self.lst_urls, self.paper_id)
print(strformat)
print("PaperID: ", self.paper_id , " Score:" , self.score)
print("======================= END ==========================================\n")
def print_1_basic_article_information(self):
self.print_header()
print(" -------------- PRINTING SOME EXTRACTED SENTENCES (MAX 5) Related to NPI -------------- ")
if len(self.lst_sentences) > 5:
print_list(self.lst_sentences[0:5])
#print(self.lst_sentences[0:5])
else:
print_list(self.lst_sentences)
self.print_footer()
def get_objectlist_from_df(df):
lst_objs = list(map(article,
(df['paper_id']) ,
df['score'],
df['module'],
df['lst_snippets'],
df['text_find_count_npi']))
#print("sorting")
#lst_objs.sort(key=lambda x: x.score, reverse=True)
return (lst_objs)
if get_processing_flag(BIORXIV):
path = '/kaggle/input/CORD-19-research-challenge/biorxiv_medrxiv/biorxiv_medrxiv/pdf_json/'
start_time = time.time()
df_biorxiv_master, df_biorxiv, count_biorxiv_orig = process_a_module(path, SAMPLE_SIZE=SAMPLE_SIZE_BIORXIV, MODULE=BIORXIV)
df_biorxiv_filter = df_biorxiv_master[df_biorxiv_master['score'] > 0].reset_index()
lst_obj_biorxiv = get_objectlist_from_df(df_biorxiv_filter)
lst_obj_biorxiv.sort(key=lambda x: x.score, reverse=True)
sum_bio_sents = sum((c.count_sentences for c in lst_obj_biorxiv))
if get_processing_flag(COMM):
path = '/kaggle/input/CORD-19-research-challenge/comm_use_subset/comm_use_subset/pdf_json/'
start_time = time.time()
df_comm_master, df_comm, count_comm_orig = process_a_module(path, SAMPLE_SIZE_COMM, COMM)
df_comm_filter = df_comm_master[df_comm_master['score'] > 0].reset_index()
lst_obj_comm = get_objectlist_from_df(df_comm_filter)
lst_obj_comm.sort(key=lambda x: x.score, reverse=True)
sum_comm_sents = sum((c.count_sentences for c in lst_obj_comm))
if get_processing_flag(NON_COMM):
path = '/kaggle/input/CORD-19-research-challenge/noncomm_use_subset/noncomm_use_subset/pdf_json/'
start_time = time.time()
df_non_comm_master, df_non_comm, count_non_comm_orig = process_a_module(path, SAMPLE_SIZE_NON_COMM, NON_COMM)
df_non_comm_filter = df_non_comm_master[df_non_comm_master['score'] > 0].reset_index()
lst_obj_non_comm = get_objectlist_from_df(df_non_comm_filter)
lst_obj_non_comm.sort(key=lambda x: x.score, reverse=True)
sum_non_comm_sents = sum((c.count_sentences for c in lst_obj_non_comm))
if get_processing_flag(CUSTOM_LICENSE):
path = '/kaggle/input/CORD-19-research-challenge/custom_license/custom_license/pdf_json/'
start_time = time.time()
df_custom_license_master, df_custom_license, count_custom_license_orig = process_a_module(path, SAMPLE_SIZE_CUSTOM_LICENSE, CUSTOM_LICENSE)
df_custom_license_filter = df_custom_license_master[df_custom_license_master['score'] > 0].reset_index()
lst_obj_custom_license = get_objectlist_from_df(df_custom_license_filter)
lst_obj_custom_license.sort(key=lambda x: x.score, reverse=True)
sum_cl_sents = sum((c.count_sentences for c in lst_obj_custom_license))
display_data_processing_info() | code |
1010130/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import random
import random
import tensorflow as tf
import tensorflow as tf
features = ['Pclass_1', 'Pclass_2', 'Pclass_3', 'Sex_female', 'Age', 'SibSp', 'Parch', 'Fare']
df = pd.read_csv('../input/train.csv')
t = pd.DataFrame({'Validation': list(map(lambda x: random.random() < 0.3, range(891)))})
x_preprocess = pd.DataFrame({'Pclass_1': (df['Pclass'] == 1) * 1, 'Pclass_2': (df['Pclass'] == 2) * 1, 'Pclass_3': (df['Pclass'] == 3) * 1, 'Sex_female': (df['Sex'] == 'female') * 1, 'Age': df['Age'] / pd.Series.std(df['Age']), 'SibSp': df['SibSp'], 'Parch': df['Parch'], 'Fare': df['Fare'] / pd.Series.std(df['Fare'])})
x_preprocess.fillna(0, inplace=True)
y = pd.DataFrame({'Survived': df['Survived']})
y_train = y[t['Validation'] == False]
x_train = x_preprocess[t['Validation'] == False]
y_validation = y[t['Validation']]
x_validation = x_preprocess[t['Validation']]
x = tf.placeholder(dtype=tf.float32, shape=[None, len(features)])
y = tf.placeholder(dtype=tf.float32, shape=[None, 1])
h1_feature = 32
h2_feature = 32
epic_max = 1000
mini_batch = 64
W1 = tf.Variable(tf.truncated_normal(shape=[len(features), h1_feature], stddev=0.1))
W2 = tf.Variable(tf.truncated_normal(shape=[h1_feature, h2_feature], stddev=0.1))
W3 = tf.Variable(tf.truncated_normal(shape=[h2_feature, 1], stddev=0.1))
b1 = tf.Variable(tf.ones(shape=[1, h1_feature]))
b2 = tf.Variable(tf.ones(shape=[1, h2_feature]))
b3 = tf.Variable(tf.ones(shape=[1, 1]))
init = tf.global_variables_initializer()
x_dropout = tf.nn.dropout(x, 0.5)
h1_relu = tf.nn.sigmoid(tf.matmul(x, W1) + b1)
h2_relu = tf.nn.sigmoid(tf.matmul(h1_relu, W2) + b2)
h3_sigmoid = tf.nn.sigmoid(tf.matmul(h2_relu, W3) + b3)
cross_entropy = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=h3_sigmoid, logits=y))
train_step = tf.train.GradientDescentOptimizer(learning_rate=0.5).minimize(cross_entropy)
accuracy = tf.reduce_mean(tf.cast(tf.equal(h3_sigmoid, y), tf.float32))
with tf.Session() as sess:
sess.run(init)
feed_train = {x: x_train[features].values, y: y_train[['Survived']].values}
feed_validation = {x: x_validation[features].values, y: y_validation[['Survived']].values}
for j in range(100):
for i in range(100):
sess.run(train_step, feed_dict=feed_train)
features = ['Pclass_1', 'Pclass_2', 'Pclass_3', 'Sex_female', 'Age', 'SibSp', 'Parch', 'Fare']
df = pd.read_csv('../input/train.csv')
t = pd.DataFrame({'Validation': list(map(lambda x: random.random() < 0.3, range(891)))})
x = pd.DataFrame({'Pclass_1': (df['Pclass'] == 1) * 1, 'Pclass_2': (df['Pclass'] == 2) * 1, 'Pclass_3': (df['Pclass'] == 3) * 1, 'Sex_female': (df['Sex'] == 'female') * 1, 'Age': df['Age'] / pd.Series.std(df['Age']), 'SibSp': df['SibSp'], 'Parch': df['Parch'], 'Fare': df['Fare'] / pd.Series.std(df['Fare'])})
x.fillna(0, inplace=True)
y = pd.DataFrame({'Survived': df['Survived']})
y_train = y[t['Validation'] == False]
x_train = x[t['Validation'] == False]
y_validation = y[t['Validation']]
x_validation = x[t['Validation']]
x = tf.placeholder(dtype=tf.float32, shape=[None, len(features)])
y = tf.placeholder(dtype=tf.float32, shape=[None, 1])
h1_feature = 32
h2_feature = 32
epic_max = 1000
mini_batch = 64
W1 = tf.Variable(tf.truncated_normal(shape=[len(features), h1_feature], stddev=0.1))
W2 = tf.Variable(tf.truncated_normal(shape=[h1_feature, h2_feature], stddev=0.1))
W3 = tf.Variable(tf.truncated_normal(shape=[h2_feature, 1], stddev=0.01))
b1 = tf.Variable(tf.zeros(shape=[1, h1_feature]))
b2 = tf.Variable(tf.zeros(shape=[1, h2_feature]))
b3 = tf.Variable(tf.zeros(shape=[1, 1]))
x_dropout = tf.nn.dropout(x, 0.5)
h1_tanh = tf.nn.tanh(tf.matmul(x, W1) + b1)
h2_tanh = tf.nn.tanh(tf.matmul(h1_tanh, W2) + b2)
h3_sigmoid = tf.nn.sigmoid(tf.matmul(h2_tanh, W3) + b3)
cross_entropy = -tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=h3_sigmoid, logits=y))
train_step = tf.train.AdamOptimizer(learning_rate=0.5).minimize(cross_entropy)
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.round(h3_sigmoid), y), tf.float32))
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
feed_train = {x: x_train[features].values, y: y_train[['Survived']].values}
feed_validation = {x: x_validation[features].values, y: y_validation[['Survived']].values}
print(sess.run([cross_entropy, accuracy], feed_dict=feed_train))
print(sess.run([cross_entropy, accuracy], feed_dict=feed_validation))
for j in range(10):
for i in range(1000):
sess.run(train_step, feed_dict=feed_train)
print(sess.run([cross_entropy, accuracy], feed_dict=feed_train))
print(sess.run([cross_entropy, accuracy], feed_dict=feed_validation)) | code |
1010130/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import random
import tensorflow as tf
features = ['Pclass_1', 'Pclass_2', 'Pclass_3', 'Sex_female', 'Age', 'SibSp', 'Parch', 'Fare']
df = pd.read_csv('../input/train.csv')
t = pd.DataFrame({'Validation': list(map(lambda x: random.random() < 0.3, range(891)))})
x_preprocess = pd.DataFrame({'Pclass_1': (df['Pclass'] == 1) * 1, 'Pclass_2': (df['Pclass'] == 2) * 1, 'Pclass_3': (df['Pclass'] == 3) * 1, 'Sex_female': (df['Sex'] == 'female') * 1, 'Age': df['Age'] / pd.Series.std(df['Age']), 'SibSp': df['SibSp'], 'Parch': df['Parch'], 'Fare': df['Fare'] / pd.Series.std(df['Fare'])})
x_preprocess.fillna(0, inplace=True)
y = pd.DataFrame({'Survived': df['Survived']})
y_train = y[t['Validation'] == False]
x_train = x_preprocess[t['Validation'] == False]
y_validation = y[t['Validation']]
x_validation = x_preprocess[t['Validation']]
x = tf.placeholder(dtype=tf.float32, shape=[None, len(features)])
y = tf.placeholder(dtype=tf.float32, shape=[None, 1])
h1_feature = 32
h2_feature = 32
epic_max = 1000
mini_batch = 64
W1 = tf.Variable(tf.truncated_normal(shape=[len(features), h1_feature], stddev=0.1))
W2 = tf.Variable(tf.truncated_normal(shape=[h1_feature, h2_feature], stddev=0.1))
W3 = tf.Variable(tf.truncated_normal(shape=[h2_feature, 1], stddev=0.1))
b1 = tf.Variable(tf.ones(shape=[1, h1_feature]))
b2 = tf.Variable(tf.ones(shape=[1, h2_feature]))
b3 = tf.Variable(tf.ones(shape=[1, 1]))
init = tf.global_variables_initializer()
x_dropout = tf.nn.dropout(x, 0.5)
h1_relu = tf.nn.sigmoid(tf.matmul(x, W1) + b1)
h2_relu = tf.nn.sigmoid(tf.matmul(h1_relu, W2) + b2)
h3_sigmoid = tf.nn.sigmoid(tf.matmul(h2_relu, W3) + b3)
cross_entropy = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=h3_sigmoid, logits=y))
train_step = tf.train.GradientDescentOptimizer(learning_rate=0.5).minimize(cross_entropy)
accuracy = tf.reduce_mean(tf.cast(tf.equal(h3_sigmoid, y), tf.float32))
with tf.Session() as sess:
sess.run(init)
feed_train = {x: x_train[features].values, y: y_train[['Survived']].values}
feed_validation = {x: x_validation[features].values, y: y_validation[['Survived']].values}
for j in range(100):
for i in range(100):
sess.run(train_step, feed_dict=feed_train)
print(sess.run(cross_entropy, feed_dict=feed_train))
print(sess.run(cross_entropy, feed_dict=feed_validation)) | code |
1010130/cell_2 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
import tensorflow as tf
import random
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
1010130/cell_7 | [
"text_plain_output_1.png"
] | from subprocess import check_output
from subprocess import check_output
import numpy as np
import pandas as pd
import tensorflow as tf
import random
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
1010130/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import random
import random
import tensorflow as tf
import tensorflow as tf
features = ['Pclass_1', 'Pclass_2', 'Pclass_3', 'Sex_female', 'Age', 'SibSp', 'Parch', 'Fare']
df = pd.read_csv('../input/train.csv')
t = pd.DataFrame({'Validation': list(map(lambda x: random.random() < 0.3, range(891)))})
x_preprocess = pd.DataFrame({'Pclass_1': (df['Pclass'] == 1) * 1, 'Pclass_2': (df['Pclass'] == 2) * 1, 'Pclass_3': (df['Pclass'] == 3) * 1, 'Sex_female': (df['Sex'] == 'female') * 1, 'Age': df['Age'] / pd.Series.std(df['Age']), 'SibSp': df['SibSp'], 'Parch': df['Parch'], 'Fare': df['Fare'] / pd.Series.std(df['Fare'])})
x_preprocess.fillna(0, inplace=True)
y = pd.DataFrame({'Survived': df['Survived']})
y_train = y[t['Validation'] == False]
x_train = x_preprocess[t['Validation'] == False]
y_validation = y[t['Validation']]
x_validation = x_preprocess[t['Validation']]
x = tf.placeholder(dtype=tf.float32, shape=[None, len(features)])
y = tf.placeholder(dtype=tf.float32, shape=[None, 1])
h1_feature = 32
h2_feature = 32
epic_max = 1000
mini_batch = 64
W1 = tf.Variable(tf.truncated_normal(shape=[len(features), h1_feature], stddev=0.1))
W2 = tf.Variable(tf.truncated_normal(shape=[h1_feature, h2_feature], stddev=0.1))
W3 = tf.Variable(tf.truncated_normal(shape=[h2_feature, 1], stddev=0.1))
b1 = tf.Variable(tf.ones(shape=[1, h1_feature]))
b2 = tf.Variable(tf.ones(shape=[1, h2_feature]))
b3 = tf.Variable(tf.ones(shape=[1, 1]))
init = tf.global_variables_initializer()
x_dropout = tf.nn.dropout(x, 0.5)
h1_relu = tf.nn.sigmoid(tf.matmul(x, W1) + b1)
h2_relu = tf.nn.sigmoid(tf.matmul(h1_relu, W2) + b2)
h3_sigmoid = tf.nn.sigmoid(tf.matmul(h2_relu, W3) + b3)
cross_entropy = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=h3_sigmoid, logits=y))
train_step = tf.train.GradientDescentOptimizer(learning_rate=0.5).minimize(cross_entropy)
accuracy = tf.reduce_mean(tf.cast(tf.equal(h3_sigmoid, y), tf.float32))
with tf.Session() as sess:
sess.run(init)
feed_train = {x: x_train[features].values, y: y_train[['Survived']].values}
feed_validation = {x: x_validation[features].values, y: y_validation[['Survived']].values}
for j in range(100):
for i in range(100):
sess.run(train_step, feed_dict=feed_train)
features = ['Pclass_1', 'Pclass_2', 'Pclass_3', 'Sex_female', 'Age', 'SibSp', 'Parch', 'Fare']
df = pd.read_csv('../input/train.csv')
t = pd.DataFrame({'Validation': list(map(lambda x: random.random() < 0.3, range(891)))})
x = pd.DataFrame({'Pclass_1': (df['Pclass'] == 1) * 1, 'Pclass_2': (df['Pclass'] == 2) * 1, 'Pclass_3': (df['Pclass'] == 3) * 1, 'Sex_female': (df['Sex'] == 'female') * 1, 'Age': df['Age'] / pd.Series.std(df['Age']), 'SibSp': df['SibSp'], 'Parch': df['Parch'], 'Fare': df['Fare'] / pd.Series.std(df['Fare'])})
x.fillna(0, inplace=True)
y = pd.DataFrame({'Survived': df['Survived']})
y_train = y[t['Validation'] == False]
x_train = x[t['Validation'] == False]
y_validation = y[t['Validation']]
x_validation = x[t['Validation']]
print(x_train[features].values) | code |
1010130/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import random
features = ['Pclass_1', 'Pclass_2', 'Pclass_3', 'Sex_female', 'Age', 'SibSp', 'Parch', 'Fare']
df = pd.read_csv('../input/train.csv')
t = pd.DataFrame({'Validation': list(map(lambda x: random.random() < 0.3, range(891)))})
x_preprocess = pd.DataFrame({'Pclass_1': (df['Pclass'] == 1) * 1, 'Pclass_2': (df['Pclass'] == 2) * 1, 'Pclass_3': (df['Pclass'] == 3) * 1, 'Sex_female': (df['Sex'] == 'female') * 1, 'Age': df['Age'] / pd.Series.std(df['Age']), 'SibSp': df['SibSp'], 'Parch': df['Parch'], 'Fare': df['Fare'] / pd.Series.std(df['Fare'])})
x_preprocess.fillna(0, inplace=True)
y = pd.DataFrame({'Survived': df['Survived']})
y_train = y[t['Validation'] == False]
x_train = x_preprocess[t['Validation'] == False]
y_validation = y[t['Validation']]
x_validation = x_preprocess[t['Validation']]
print(y_train[['Survived']].values) | code |
129029982/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/suicide-attempts-in-shandong-china/SuicideChina.csv')
data.info() | code |
129029982/cell_1 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
129029982/cell_7 | [
"text_plain_output_5.png",
"text_plain_output_9.png",
"text_plain_output_4.png",
"image_output_5.png",
"text_plain_output_6.png",
"image_output_7.png",
"text_plain_output_3.png",
"image_output_4.png",
"text_plain_output_7.png",
"image_output_8.png",
"text_plain_output_8.png",
"image_output_6.png",
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png",
"image_output_9.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/suicide-attempts-in-shandong-china/SuicideChina.csv')
data = data.drop(columns=['Unnamed: 0', 'Person_ID'])
data.head() | code |
129029982/cell_8 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/suicide-attempts-in-shandong-china/SuicideChina.csv')
data = data.drop(columns=['Unnamed: 0', 'Person_ID'])
data.Age.describe() | code |
129029982/cell_10 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/suicide-attempts-in-shandong-china/SuicideChina.csv')
data = data.drop(columns=['Unnamed: 0', 'Person_ID'])
def bar_plot(variable):
"""
input: variable ex: "Sex"
output: bar plot & Value count.
"""
var = data[variable]
varValue = var.value_counts()
plt.figure(figsize=(9, 3))
plt.bar(varValue.index, varValue)
plt.xticks(varValue.index, varValue.index.values, rotation=45)
plt.ylabel('Frequency')
plt.title(variable)
plt.grid()
plt.show()
print('{}: \n {}'.format(variable, varValue))
category1 = ['Hospitalised', 'Died', 'Urban', 'Sex', 'Education', 'Occupation', 'method', 'Year', 'Month']
for c in category1:
bar_plot(c) | code |
129029982/cell_12 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/suicide-attempts-in-shandong-china/SuicideChina.csv')
data = data.drop(columns=['Unnamed: 0', 'Person_ID'])
def bar_plot(variable):
"""
input: variable ex: "Sex"
output: bar plot & Value count.
"""
var = data[variable]
varValue = var.value_counts()
plt.xticks(varValue.index, varValue.index.values, rotation=45)
category1 = ['Hospitalised', 'Died', 'Urban', 'Sex', 'Education', 'Occupation', 'method', 'Year', 'Month']
plt.figure(figsize=(9, 3))
plt.hist(data['Age'], bins=50)
plt.xlabel('Age')
plt.ylabel('Frequency')
plt.title('Age distrubution of suicidals')
plt.grid()
plt.show() | code |
129029982/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/suicide-attempts-in-shandong-china/SuicideChina.csv')
data.head() | code |
2005813/cell_9 | [
"text_html_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from tqdm import tqdm
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
datagen = ImageDataGenerator(rescale=1.0 / 255)
train_generator = datagen.flow_from_directory('../input/train/', batch_size=1, class_mode='categorical')
x, y = train_generator.next()
X_data, Y_data = ([], [])
for _ in tqdm(range(2750)):
x, y = train_generator.next()
X_data.append(x[0])
Y_data.append(y[0])
X_data = np.asarray(X_data)
Y_data = np.asarray(Y_data)
X_test = []
sub = pd.read_csv('../input/sample_submission.csv')
for fname in tqdm(sub['fname']):
filepath = '../input/test/' + fname
X_test.append(img_to_array(load_img(filepath, target_size=(256, 256))))
X_test = np.asarray(X_test) | code |
2005813/cell_4 | [
"text_plain_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
import matplotlib.pyplot as plt
import numpy as np
datagen = ImageDataGenerator(rescale=1.0 / 255)
train_generator = datagen.flow_from_directory('../input/train/', batch_size=1, class_mode='categorical')
x, y = train_generator.next()
plt.imshow((x[0] * 255).astype('uint8'))
print(list(train_generator.class_indices.keys())[np.argmax(y)]) | code |
2005813/cell_2 | [
"text_plain_output_1.png",
"image_output_1.png"
] | !ls ../input/train/ | code |
2005813/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import pandas as pd
from glob import glob
from tqdm import tqdm
import matplotlib.pyplot as plt
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras.applications.vgg16 import VGG16
from keras.applications.inception_v3 import InceptionV3
from keras.callbacks import EarlyStopping
from keras.utils import plot_model
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from sklearn.model_selection import train_test_split
from tqdm import tqdm | code |
2005813/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | def get_model():
input_img = Input((256, 256, 3))
X = BatchNormalization()(input_img)
X = Convolution2D(16, (3, 3), activation='relu')(X)
X = BatchNormalization()(X)
X = Convolution2D(16, (3, 3), activation='relu')(X)
X = MaxPooling2D()(X)
X = Convolution2D(32, (3, 3), activation='relu')(X)
X = BatchNormalization()(X)
X = Convolution2D(32, (3, 3), activation='relu')(X)
X = GlobalMaxPooling2D()(X)
X = BatchNormalization()(X)
X = Dense(512, activation='relu')(X)
X = Dropout(0.2)(X)
X = Dense(10, activation='softmax')(X)
model = Model(inputs=input_img, outputs=X)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
model.summary()
return model
model = get_model() | code |
2005813/cell_8 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from keras.callbacks import EarlyStopping
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from tqdm import tqdm
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
datagen = ImageDataGenerator(rescale=1.0 / 255)
train_generator = datagen.flow_from_directory('../input/train/', batch_size=1, class_mode='categorical')
x, y = train_generator.next()
X_data, Y_data = ([], [])
for _ in tqdm(range(2750)):
x, y = train_generator.next()
X_data.append(x[0])
Y_data.append(y[0])
X_data = np.asarray(X_data)
Y_data = np.asarray(Y_data)
def get_model():
input_img = Input((256, 256, 3))
X = BatchNormalization()(input_img)
X = Convolution2D(16, (3, 3), activation='relu')(X)
X = BatchNormalization()(X)
X = Convolution2D(16, (3, 3), activation='relu')(X)
X = MaxPooling2D()(X)
X = Convolution2D(32, (3, 3), activation='relu')(X)
X = BatchNormalization()(X)
X = Convolution2D(32, (3, 3), activation='relu')(X)
X = GlobalMaxPooling2D()(X)
X = BatchNormalization()(X)
X = Dense(512, activation='relu')(X)
X = Dropout(0.2)(X)
X = Dense(10, activation='softmax')(X)
model = Model(inputs=input_img, outputs=X)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
model.summary()
return model
model = get_model()
model_history = model.fit(X_data, Y_data, batch_size=10, epochs=3, validation_split=0.2, callbacks=[EarlyStopping(monitor='val_acc', patience=3, verbose=1)]) | code |
2005813/cell_3 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
datagen = ImageDataGenerator(rescale=1.0 / 255)
train_generator = datagen.flow_from_directory('../input/train/', batch_size=1, class_mode='categorical') | code |
2005813/cell_10 | [
"text_plain_output_1.png"
] | from keras.callbacks import EarlyStopping
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from tqdm import tqdm
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
datagen = ImageDataGenerator(rescale=1.0 / 255)
train_generator = datagen.flow_from_directory('../input/train/', batch_size=1, class_mode='categorical')
x, y = train_generator.next()
X_data, Y_data = ([], [])
for _ in tqdm(range(2750)):
x, y = train_generator.next()
X_data.append(x[0])
Y_data.append(y[0])
X_data = np.asarray(X_data)
Y_data = np.asarray(Y_data)
def get_model():
input_img = Input((256, 256, 3))
X = BatchNormalization()(input_img)
X = Convolution2D(16, (3, 3), activation='relu')(X)
X = BatchNormalization()(X)
X = Convolution2D(16, (3, 3), activation='relu')(X)
X = MaxPooling2D()(X)
X = Convolution2D(32, (3, 3), activation='relu')(X)
X = BatchNormalization()(X)
X = Convolution2D(32, (3, 3), activation='relu')(X)
X = GlobalMaxPooling2D()(X)
X = BatchNormalization()(X)
X = Dense(512, activation='relu')(X)
X = Dropout(0.2)(X)
X = Dense(10, activation='softmax')(X)
model = Model(inputs=input_img, outputs=X)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
model.summary()
return model
model = get_model()
model_history = model.fit(X_data, Y_data, batch_size=10, epochs=3, validation_split=0.2, callbacks=[EarlyStopping(monitor='val_acc', patience=3, verbose=1)])
X_test = []
sub = pd.read_csv('../input/sample_submission.csv')
for fname in tqdm(sub['fname']):
filepath = '../input/test/' + fname
X_test.append(img_to_array(load_img(filepath, target_size=(256, 256))))
X_test = np.asarray(X_test)
preds = model.predict(X_test, verbose=1)
preds = np.argmax(preds, axis=1)
preds = [list(train_generator.class_indices.keys())[p] for p in tqdm(preds)] | code |
2005813/cell_12 | [
"text_plain_output_1.png"
] | from keras.callbacks import EarlyStopping
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from tqdm import tqdm
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
datagen = ImageDataGenerator(rescale=1.0 / 255)
train_generator = datagen.flow_from_directory('../input/train/', batch_size=1, class_mode='categorical')
x, y = train_generator.next()
X_data, Y_data = ([], [])
for _ in tqdm(range(2750)):
x, y = train_generator.next()
X_data.append(x[0])
Y_data.append(y[0])
X_data = np.asarray(X_data)
Y_data = np.asarray(Y_data)
def get_model():
input_img = Input((256, 256, 3))
X = BatchNormalization()(input_img)
X = Convolution2D(16, (3, 3), activation='relu')(X)
X = BatchNormalization()(X)
X = Convolution2D(16, (3, 3), activation='relu')(X)
X = MaxPooling2D()(X)
X = Convolution2D(32, (3, 3), activation='relu')(X)
X = BatchNormalization()(X)
X = Convolution2D(32, (3, 3), activation='relu')(X)
X = GlobalMaxPooling2D()(X)
X = BatchNormalization()(X)
X = Dense(512, activation='relu')(X)
X = Dropout(0.2)(X)
X = Dense(10, activation='softmax')(X)
model = Model(inputs=input_img, outputs=X)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
model.summary()
return model
model = get_model()
model_history = model.fit(X_data, Y_data, batch_size=10, epochs=3, validation_split=0.2, callbacks=[EarlyStopping(monitor='val_acc', patience=3, verbose=1)])
X_test = []
sub = pd.read_csv('../input/sample_submission.csv')
for fname in tqdm(sub['fname']):
filepath = '../input/test/' + fname
X_test.append(img_to_array(load_img(filepath, target_size=(256, 256))))
X_test = np.asarray(X_test)
preds = model.predict(X_test, verbose=1)
preds = np.argmax(preds, axis=1)
preds = [list(train_generator.class_indices.keys())[p] for p in tqdm(preds)]
sub['camera'] = preds
sub.to_csv('sub.csv', index=False)
sub.head() | code |
2005813/cell_5 | [
"text_plain_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from tqdm import tqdm
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
datagen = ImageDataGenerator(rescale=1.0 / 255)
train_generator = datagen.flow_from_directory('../input/train/', batch_size=1, class_mode='categorical')
x, y = train_generator.next()
X_data, Y_data = ([], [])
for _ in tqdm(range(2750)):
x, y = train_generator.next()
X_data.append(x[0])
Y_data.append(y[0])
X_data = np.asarray(X_data)
Y_data = np.asarray(Y_data) | code |
333447/cell_9 | [
"text_plain_output_1.png"
] | from sklearn import cross_validation
from sklearn.cross_validation import KFold
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from subprocess import check_output
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
titanic = pd.read_csv('../input/train.csv')
titanic_test = pd.read_csv('../input/test.csv')
titanic['Age'] = titanic['Age'].fillna(titanic['Age'].median())
titanic.loc[titanic['Sex'] == 'male', 'Sex'] = 0
titanic.loc[titanic['Sex'] == 'female', 'Sex'] = 1
titanic['Embarked'] = titanic['Embarked'].fillna('S')
titanic.loc[titanic['Embarked'] == 'S', 'Embarked'] = 0
titanic.loc[titanic['Embarked'] == 'C', 'Embarked'] = 1
titanic.loc[titanic['Embarked'] == 'Q', 'Embarked'] = 2
from sklearn.linear_model import LinearRegression
from sklearn.cross_validation import KFold
predictors = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']
alg = LinearRegression()
kf = KFold(titanic.shape[0], n_folds=3, random_state=1)
predictions = []
for train, test in kf:
train_predictors = titanic[predictors].iloc[train, :]
train_target = titanic['Survived'].iloc[train]
alg.fit(train_predictors, train_target)
test_predictions = alg.predict(titanic[predictors].iloc[test, :])
predictions.append(test_predictions)
predictions = np.concatenate(predictions, axis=0)
predictions[predictions > 0.5] = 1
predictions[predictions <= 0.5] = 0
accuracy = sum(predictions[predictions == titanic['Survived']]) / len(predictions)
from sklearn.linear_model import LogisticRegression
from sklearn import cross_validation
alg = LogisticRegression(random_state=1)
scores = cross_validation.cross_val_score(alg, titanic[predictors], titanic['Survived'], cv=3)
titanic_test['Age'] = titanic_test['Age'].fillna(titanic['Age'].median())
titanic_test.loc[titanic_test['Sex'] == 'male', 'Sex'] = 0
titanic_test.loc[titanic_test['Sex'] == 'female', 'Sex'] = 1
titanic_test['Embarked'] = titanic_test['Embarked'].fillna('S')
titanic_test.loc[titanic_test['Embarked'] == 'S', 'Embarked'] = 0
titanic_test.loc[titanic_test['Embarked'] == 'C', 'Embarked'] = 1
titanic_test.loc[titanic_test['Embarked'] == 'Q', 'Embarked'] = 2
titanic_test['Fare'] = titanic_test['Fare'].fillna(titanic_test['Fare'].median())
alg = LogisticRegression(random_state=1)
alg.fit(titanic[predictors], titanic['Survived'])
predictions = alg.predict(titanic_test[predictors])
submission = pd.DataFrame({'PassengerId': titanic_test['PassengerId'], 'Survived': predictions})
print(submission) | code |
333447/cell_4 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
titanic = pd.read_csv('../input/train.csv')
titanic_test = pd.read_csv('../input/test.csv')
titanic['Age'] = titanic['Age'].fillna(titanic['Age'].median())
titanic.loc[titanic['Sex'] == 'male', 'Sex'] = 0
titanic.loc[titanic['Sex'] == 'female', 'Sex'] = 1
titanic['Embarked'] = titanic['Embarked'].fillna('S')
titanic.loc[titanic['Embarked'] == 'S', 'Embarked'] = 0
titanic.loc[titanic['Embarked'] == 'C', 'Embarked'] = 1
titanic.loc[titanic['Embarked'] == 'Q', 'Embarked'] = 2
print(titanic['Embarked'].unique()) | code |
333447/cell_6 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from sklearn.cross_validation import KFold
from sklearn.linear_model import LinearRegression
from subprocess import check_output
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
titanic = pd.read_csv('../input/train.csv')
titanic_test = pd.read_csv('../input/test.csv')
titanic['Age'] = titanic['Age'].fillna(titanic['Age'].median())
titanic.loc[titanic['Sex'] == 'male', 'Sex'] = 0
titanic.loc[titanic['Sex'] == 'female', 'Sex'] = 1
titanic['Embarked'] = titanic['Embarked'].fillna('S')
titanic.loc[titanic['Embarked'] == 'S', 'Embarked'] = 0
titanic.loc[titanic['Embarked'] == 'C', 'Embarked'] = 1
titanic.loc[titanic['Embarked'] == 'Q', 'Embarked'] = 2
from sklearn.linear_model import LinearRegression
from sklearn.cross_validation import KFold
predictors = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']
alg = LinearRegression()
kf = KFold(titanic.shape[0], n_folds=3, random_state=1)
predictions = []
for train, test in kf:
train_predictors = titanic[predictors].iloc[train, :]
train_target = titanic['Survived'].iloc[train]
alg.fit(train_predictors, train_target)
test_predictions = alg.predict(titanic[predictors].iloc[test, :])
predictions.append(test_predictions)
predictions = np.concatenate(predictions, axis=0)
predictions[predictions > 0.5] = 1
predictions[predictions <= 0.5] = 0
accuracy = sum(predictions[predictions == titanic['Survived']]) / len(predictions)
print(accuracy) | code |
333447/cell_2 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from subprocess import check_output
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
titanic = pd.read_csv('../input/train.csv')
titanic_test = pd.read_csv('../input/test.csv')
titanic.describe() | code |
333447/cell_1 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from subprocess import check_output
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8'))
titanic = pd.read_csv('../input/train.csv')
titanic_test = pd.read_csv('../input/test.csv')
titanic.head() | code |
333447/cell_7 | [
"text_plain_output_1.png"
] | from sklearn import cross_validation
from sklearn.cross_validation import KFold
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from subprocess import check_output
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
titanic = pd.read_csv('../input/train.csv')
titanic_test = pd.read_csv('../input/test.csv')
titanic['Age'] = titanic['Age'].fillna(titanic['Age'].median())
titanic.loc[titanic['Sex'] == 'male', 'Sex'] = 0
titanic.loc[titanic['Sex'] == 'female', 'Sex'] = 1
titanic['Embarked'] = titanic['Embarked'].fillna('S')
titanic.loc[titanic['Embarked'] == 'S', 'Embarked'] = 0
titanic.loc[titanic['Embarked'] == 'C', 'Embarked'] = 1
titanic.loc[titanic['Embarked'] == 'Q', 'Embarked'] = 2
from sklearn.linear_model import LinearRegression
from sklearn.cross_validation import KFold
predictors = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']
alg = LinearRegression()
kf = KFold(titanic.shape[0], n_folds=3, random_state=1)
predictions = []
for train, test in kf:
train_predictors = titanic[predictors].iloc[train, :]
train_target = titanic['Survived'].iloc[train]
alg.fit(train_predictors, train_target)
test_predictions = alg.predict(titanic[predictors].iloc[test, :])
predictions.append(test_predictions)
from sklearn.linear_model import LogisticRegression
from sklearn import cross_validation
alg = LogisticRegression(random_state=1)
scores = cross_validation.cross_val_score(alg, titanic[predictors], titanic['Survived'], cv=3)
print(scores.mean()) | code |
333447/cell_3 | [
"text_html_output_1.png"
] | from subprocess import check_output
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
titanic = pd.read_csv('../input/train.csv')
titanic_test = pd.read_csv('../input/test.csv')
titanic['Age'] = titanic['Age'].fillna(titanic['Age'].median())
titanic.describe() | code |
333447/cell_5 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.cross_validation import KFold
from sklearn.linear_model import LinearRegression
from subprocess import check_output
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from subprocess import check_output
titanic = pd.read_csv('../input/train.csv')
titanic_test = pd.read_csv('../input/test.csv')
titanic['Age'] = titanic['Age'].fillna(titanic['Age'].median())
titanic.loc[titanic['Sex'] == 'male', 'Sex'] = 0
titanic.loc[titanic['Sex'] == 'female', 'Sex'] = 1
titanic['Embarked'] = titanic['Embarked'].fillna('S')
titanic.loc[titanic['Embarked'] == 'S', 'Embarked'] = 0
titanic.loc[titanic['Embarked'] == 'C', 'Embarked'] = 1
titanic.loc[titanic['Embarked'] == 'Q', 'Embarked'] = 2
from sklearn.linear_model import LinearRegression
from sklearn.cross_validation import KFold
predictors = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']
alg = LinearRegression()
kf = KFold(titanic.shape[0], n_folds=3, random_state=1)
predictions = []
for train, test in kf:
train_predictors = titanic[predictors].iloc[train, :]
train_target = titanic['Survived'].iloc[train]
alg.fit(train_predictors, train_target)
test_predictions = alg.predict(titanic[predictors].iloc[test, :])
predictions.append(test_predictions) | code |
18157867/cell_13 | [
"text_plain_output_1.png"
] | import torch
t1 = torch.tensor(4.0)
t1
t2 = torch.tensor([1.0, 2, 3, 4])
t2
t2.shape | code |
18157867/cell_9 | [
"text_plain_output_1.png"
] | import torch
t1 = torch.tensor(4.0)
t1
t2 = torch.tensor([1.0, 2, 3, 4])
t2
t3 = torch.tensor([[5.0, 6], [7, 8], [9, 10]])
t3 | code |
18157867/cell_4 | [
"text_plain_output_1.png"
] | import torch
t1 = torch.tensor(4.0)
t1 | code |
18157867/cell_23 | [
"text_plain_output_1.png"
] | import torch
t1 = torch.tensor(4.0)
t1
t2 = torch.tensor([1.0, 2, 3, 4])
t2
t3 = torch.tensor([[5.0, 6], [7, 8], [9, 10]])
t3
t4 = torch.tensor([[[11, 12, 13], [13, 14, 15]], [[15, 16, 17], [17, 18, 19.0]]])
t4
x = torch.tensor(3.0)
w = torch.tensor(4.0, requires_grad=True)
b = torch.tensor(5.0, requires_grad=True)
print('dy/dx:', x.grad)
print('dy/dw:', w.grad)
print('dy/db:', b.grad) | code |
18157867/cell_30 | [
"text_plain_output_1.png"
] | import numpy as np
import torch
t1 = torch.tensor(4.0)
t1
t2 = torch.tensor([1.0, 2, 3, 4])
t2
t3 = torch.tensor([[5.0, 6], [7, 8], [9, 10]])
t3
t4 = torch.tensor([[[11, 12, 13], [13, 14, 15]], [[15, 16, 17], [17, 18, 19.0]]])
t4
x = torch.tensor(3.0)
w = torch.tensor(4.0, requires_grad=True)
b = torch.tensor(5.0, requires_grad=True)
y = w * x + b
y
y.backward()
import numpy as np
x = np.array([[1, 2], [3, 4]])
x
y = torch.from_numpy(x)
y
(x.dtype, y.dtype) | code |
18157867/cell_6 | [
"text_plain_output_1.png"
] | import torch
t1 = torch.tensor(4.0)
t1
t1.dtype | code |
18157867/cell_26 | [
"text_plain_output_1.png"
] | import numpy as np
import torch
t1 = torch.tensor(4.0)
t1
t2 = torch.tensor([1.0, 2, 3, 4])
t2
t3 = torch.tensor([[5.0, 6], [7, 8], [9, 10]])
t3
t4 = torch.tensor([[[11, 12, 13], [13, 14, 15]], [[15, 16, 17], [17, 18, 19.0]]])
t4
x = torch.tensor(3.0)
w = torch.tensor(4.0, requires_grad=True)
b = torch.tensor(5.0, requires_grad=True)
import numpy as np
x = np.array([[1, 2], [3, 4]])
x | code |
18157867/cell_19 | [
"text_plain_output_1.png"
] | import torch
t1 = torch.tensor(4.0)
t1
t2 = torch.tensor([1.0, 2, 3, 4])
t2
t3 = torch.tensor([[5.0, 6], [7, 8], [9, 10]])
t3
t4 = torch.tensor([[[11, 12, 13], [13, 14, 15]], [[15, 16, 17], [17, 18, 19.0]]])
t4
x = torch.tensor(3.0)
w = torch.tensor(4.0, requires_grad=True)
b = torch.tensor(5.0, requires_grad=True)
y = w * x + b
y | code |
18157867/cell_32 | [
"text_plain_output_1.png"
] | import numpy as np
import torch
t1 = torch.tensor(4.0)
t1
t2 = torch.tensor([1.0, 2, 3, 4])
t2
t3 = torch.tensor([[5.0, 6], [7, 8], [9, 10]])
t3
t4 = torch.tensor([[[11, 12, 13], [13, 14, 15]], [[15, 16, 17], [17, 18, 19.0]]])
t4
x = torch.tensor(3.0)
w = torch.tensor(4.0, requires_grad=True)
b = torch.tensor(5.0, requires_grad=True)
y = w * x + b
y
y.backward()
import numpy as np
x = np.array([[1, 2], [3, 4]])
x
y = torch.from_numpy(x)
y
(x.dtype, y.dtype)
z = y.numpy()
z | code |
18157867/cell_28 | [
"text_plain_output_1.png"
] | import numpy as np
import torch
t1 = torch.tensor(4.0)
t1
t2 = torch.tensor([1.0, 2, 3, 4])
t2
t3 = torch.tensor([[5.0, 6], [7, 8], [9, 10]])
t3
t4 = torch.tensor([[[11, 12, 13], [13, 14, 15]], [[15, 16, 17], [17, 18, 19.0]]])
t4
x = torch.tensor(3.0)
w = torch.tensor(4.0, requires_grad=True)
b = torch.tensor(5.0, requires_grad=True)
y = w * x + b
y
y.backward()
import numpy as np
x = np.array([[1, 2], [3, 4]])
x
y = torch.from_numpy(x)
y | code |
18157867/cell_8 | [
"text_plain_output_1.png"
] | import torch
t1 = torch.tensor(4.0)
t1
t2 = torch.tensor([1.0, 2, 3, 4])
t2 | code |
18157867/cell_15 | [
"text_plain_output_1.png"
] | import torch
t1 = torch.tensor(4.0)
t1
t2 = torch.tensor([1.0, 2, 3, 4])
t2
t3 = torch.tensor([[5.0, 6], [7, 8], [9, 10]])
t3
t4 = torch.tensor([[[11, 12, 13], [13, 14, 15]], [[15, 16, 17], [17, 18, 19.0]]])
t4
t4.shape | code |
18157867/cell_35 | [
"text_plain_output_1.png"
] | import jovian
import jovian
jovian.commit() | code |
18157867/cell_14 | [
"text_plain_output_1.png"
] | import torch
t1 = torch.tensor(4.0)
t1
t2 = torch.tensor([1.0, 2, 3, 4])
t2
t3 = torch.tensor([[5.0, 6], [7, 8], [9, 10]])
t3
t3.shape | code |
18157867/cell_10 | [
"text_plain_output_1.png"
] | import torch
t1 = torch.tensor(4.0)
t1
t2 = torch.tensor([1.0, 2, 3, 4])
t2
t3 = torch.tensor([[5.0, 6], [7, 8], [9, 10]])
t3
t4 = torch.tensor([[[11, 12, 13], [13, 14, 15]], [[15, 16, 17], [17, 18, 19.0]]])
t4 | code |
18157867/cell_12 | [
"text_plain_output_1.png"
] | import torch
t1 = torch.tensor(4.0)
t1
t1.dtype
t1.shape | code |
34144897/cell_4 | [
"text_plain_output_1.png"
] | from torchvision import datasets, models, transforms
from torchvision.datasets import ImageFolder
import torch
from torchvision import datasets, models, transforms
import urllib
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
from collections import OrderedDict
from torch.utils.data import Dataset, DataLoader
import cv2
from os import listdir
from torchvision.datasets import ImageFolder
import sys
import time
import torchvision
preprocess = transforms.Compose([transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
ImageFolder('training', transform=preprocess)
ImageFolder('testing', transform=preprocess) | code |
73060659/cell_21 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
wine = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
wine.columns
#getting the histograms of the data
fig = plt.figure(figsize=(30, 20))
plt.suptitle('Histograms of the respective columns', fontsize=20)
for i in range(wine.shape[1]):
plt.subplot(6, 3, i + 1)
f = plt.gca()
f.set_title(wine.columns.values[i])
vals = np.size(wine.iloc[:, i].unique())
if vals >= 100:
vals = 100
plt.hist(wine.iloc[:, i], bins=vals, color='#5D2A4C')
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
wine.isna().any()
sns.set(style='white')
corr = wine.corr()
#generation of mask
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# matplotlib figure
f, ax = plt.subplots(figsize=(18, 15))
# set up ustom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
X = wine.drop('quality', axis=1)
y = wine['quality']
wine.columns[:11]
features_label = wine.columns[:11]
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators=200, criterion='entropy', random_state=0)
classifier.fit(X, y)
importances = classifier.feature_importances_
indices = np.argsort(importances)[::-1]
for i in range(X.shape[1]):
print('%2d) %-*s %f' % (i + 1, 30, features_label[i], importances[indices[i]])) | code |
73060659/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
wine = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
wine.columns
wine[wine.columns[:11]].describe() | code |
73060659/cell_25 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.decomposition import PCA
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
wine = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
from sklearn.preprocessing import LabelEncoder
bins = (2, 6.5, 8)
group_names = ['bad', 'good']
wine['quality'] = pd.cut(wine['quality'], bins=bins, labels=group_names)
label_quality = LabelEncoder()
wine['quality'] = label_quality.fit_transform(wine['quality'])
wine['quality'].value_counts()
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train2 = pd.DataFrame(sc.fit_transform(X_train))
X_test2 = pd.DataFrame(sc.transform(X_test))
X_train2.columns = X_train.columns.values
X_test2.columns = X_test.columns.values
X_train2.index = X_train.index.values
X_test2.index = X_test.index.values
X_train = X_train2
X_test = X_test2
from sklearn.decomposition import PCA
pca = PCA(n_components=4)
X_train = pca.fit_transform(X_train)
X_test = pca.transform(X_test)
explained_variance = pca.explained_variance_ratio_
print(pd.DataFrame(explained_variance)) | code |
73060659/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
wine = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
wine.head() | code |
73060659/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
wine = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
sns.countplot(wine['quality']) | code |
73060659/cell_26 | [
"text_html_output_1.png"
] | from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, precision_score, recall_score
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
wine = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
from sklearn.preprocessing import LabelEncoder
bins = (2, 6.5, 8)
group_names = ['bad', 'good']
wine['quality'] = pd.cut(wine['quality'], bins=bins, labels=group_names)
label_quality = LabelEncoder()
wine['quality'] = label_quality.fit_transform(wine['quality'])
wine['quality'].value_counts()
wine.columns
#getting the histograms of the data
fig = plt.figure(figsize=(30, 20))
plt.suptitle('Histograms of the respective columns', fontsize=20)
for i in range(wine.shape[1]):
plt.subplot(6, 3, i + 1)
f = plt.gca()
f.set_title(wine.columns.values[i])
vals = np.size(wine.iloc[:, i].unique())
if vals >= 100:
vals = 100
plt.hist(wine.iloc[:, i], bins=vals, color='#5D2A4C')
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
wine.isna().any()
sns.set(style='white')
corr = wine.corr()
#generation of mask
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# matplotlib figure
f, ax = plt.subplots(figsize=(18, 15))
# set up ustom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
X = wine.drop('quality', axis=1)
y = wine['quality']
wine.columns[:11]
features_label = wine.columns[:11]
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators=200, criterion='entropy', random_state=0)
classifier.fit(X, y)
importances = classifier.feature_importances_
indices = np.argsort(importances)[::-1]
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train2 = pd.DataFrame(sc.fit_transform(X_train))
X_test2 = pd.DataFrame(sc.transform(X_test))
X_train2.columns = X_train.columns.values
X_test2.columns = X_test.columns.values
X_train2.index = X_train.index.values
X_test2.index = X_test.index.values
X_train = X_train2
X_test = X_test2
from sklearn.decomposition import PCA
pca = PCA(n_components=4)
X_train = pca.fit_transform(X_train)
X_test = pca.transform(X_test)
explained_variance = pca.explained_variance_ratio_
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state=0, penalty='l1', solver='liblinear')
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, precision_score, recall_score
acc = accuracy_score(y_test, y_pred)
prec = precision_score(y_test, y_pred)
rec = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
results = pd.DataFrame([['Logistic Regression', acc, prec, rec, f1]], columns=['Model', 'Accuracy', 'Precision', 'Recall', 'F1 Score'])
print(results) | code |
73060659/cell_11 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
wine = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
wine.columns
#getting the histograms of the data
fig = plt.figure(figsize=(30, 20))
plt.suptitle('Histograms of the respective columns', fontsize=20)
for i in range(wine.shape[1]):
plt.subplot(6, 3, i + 1)
f = plt.gca()
f.set_title(wine.columns.values[i])
vals = np.size(wine.iloc[:, i].unique())
if vals >= 100:
vals = 100
plt.hist(wine.iloc[:, i], bins=vals, color='#5D2A4C')
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
wine.isna().any() | code |
73060659/cell_19 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
wine = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
wine.columns
#getting the histograms of the data
fig = plt.figure(figsize=(30, 20))
plt.suptitle('Histograms of the respective columns', fontsize=20)
for i in range(wine.shape[1]):
plt.subplot(6, 3, i + 1)
f = plt.gca()
f.set_title(wine.columns.values[i])
vals = np.size(wine.iloc[:, i].unique())
if vals >= 100:
vals = 100
plt.hist(wine.iloc[:, i], bins=vals, color='#5D2A4C')
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
wine.isna().any()
sns.set(style='white')
corr = wine.corr()
X = wine.drop('quality', axis=1)
y = wine['quality']
wine.columns[:11] | code |
73060659/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
73060659/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
wine = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
wine.columns | code |
73060659/cell_18 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
wine = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
wine.columns
#getting the histograms of the data
fig = plt.figure(figsize=(30, 20))
plt.suptitle('Histograms of the respective columns', fontsize=20)
for i in range(wine.shape[1]):
plt.subplot(6, 3, i + 1)
f = plt.gca()
f.set_title(wine.columns.values[i])
vals = np.size(wine.iloc[:, i].unique())
if vals >= 100:
vals = 100
plt.hist(wine.iloc[:, i], bins=vals, color='#5D2A4C')
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
wine.isna().any()
sns.set(style='white')
corr = wine.corr()
X = wine.drop('quality', axis=1)
y = wine['quality']
y.head() | code |
73060659/cell_8 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
wine = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
wine.columns
sns.pairplot(wine) | code |
73060659/cell_15 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
wine = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
wine.columns
#getting the histograms of the data
fig = plt.figure(figsize=(30, 20))
plt.suptitle('Histograms of the respective columns', fontsize=20)
for i in range(wine.shape[1]):
plt.subplot(6, 3, i + 1)
f = plt.gca()
f.set_title(wine.columns.values[i])
vals = np.size(wine.iloc[:, i].unique())
if vals >= 100:
vals = 100
plt.hist(wine.iloc[:, i], bins=vals, color='#5D2A4C')
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
wine.isna().any()
sns.set(style='white')
corr = wine.corr()
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
f, ax = plt.subplots(figsize=(18, 15))
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=0.3, center=0, square=True, linewidths=0.5, cbar_kws={'shrink': 0.5}) | code |
73060659/cell_17 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
wine = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
wine.columns
#getting the histograms of the data
fig = plt.figure(figsize=(30, 20))
plt.suptitle('Histograms of the respective columns', fontsize=20)
for i in range(wine.shape[1]):
plt.subplot(6, 3, i + 1)
f = plt.gca()
f.set_title(wine.columns.values[i])
vals = np.size(wine.iloc[:, i].unique())
if vals >= 100:
vals = 100
plt.hist(wine.iloc[:, i], bins=vals, color='#5D2A4C')
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
wine.isna().any()
sns.set(style='white')
corr = wine.corr()
X = wine.drop('quality', axis=1)
y = wine['quality']
X.head() | code |
73060659/cell_14 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
wine = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
wine.columns
#getting the histograms of the data
fig = plt.figure(figsize=(30, 20))
plt.suptitle('Histograms of the respective columns', fontsize=20)
for i in range(wine.shape[1]):
plt.subplot(6, 3, i + 1)
f = plt.gca()
f.set_title(wine.columns.values[i])
vals = np.size(wine.iloc[:, i].unique())
if vals >= 100:
vals = 100
plt.hist(wine.iloc[:, i], bins=vals, color='#5D2A4C')
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
wine.isna().any()
sns.set(style='white')
corr = wine.corr()
corr.head() | code |
73060659/cell_22 | [
"text_html_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
wine = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
wine.columns
#getting the histograms of the data
fig = plt.figure(figsize=(30, 20))
plt.suptitle('Histograms of the respective columns', fontsize=20)
for i in range(wine.shape[1]):
plt.subplot(6, 3, i + 1)
f = plt.gca()
f.set_title(wine.columns.values[i])
vals = np.size(wine.iloc[:, i].unique())
if vals >= 100:
vals = 100
plt.hist(wine.iloc[:, i], bins=vals, color='#5D2A4C')
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
wine.isna().any()
sns.set(style='white')
corr = wine.corr()
#generation of mask
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# matplotlib figure
f, ax = plt.subplots(figsize=(18, 15))
# set up ustom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
X = wine.drop('quality', axis=1)
y = wine['quality']
wine.columns[:11]
features_label = wine.columns[:11]
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators=200, criterion='entropy', random_state=0)
classifier.fit(X, y)
importances = classifier.feature_importances_
indices = np.argsort(importances)[::-1]
plt.title('Feature Importances')
plt.bar(range(X.shape[1]), importances[indices], color='blue', align='edge')
plt.xticks(range(X.shape[1]), features_label, rotation=90)
plt.xlim([-1, X.shape[1]])
plt.show() | code |
73060659/cell_10 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
wine = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
wine.columns
fig = plt.figure(figsize=(30, 20))
plt.suptitle('Histograms of the respective columns', fontsize=20)
for i in range(wine.shape[1]):
plt.subplot(6, 3, i + 1)
f = plt.gca()
f.set_title(wine.columns.values[i])
vals = np.size(wine.iloc[:, i].unique())
if vals >= 100:
vals = 100
plt.hist(wine.iloc[:, i], bins=vals, color='#5D2A4C')
plt.tight_layout(rect=[0, 0.03, 1, 0.95]) | code |
73060659/cell_27 | [
"text_plain_output_1.png"
] | from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, precision_score, recall_score
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
wine = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
from sklearn.preprocessing import LabelEncoder
bins = (2, 6.5, 8)
group_names = ['bad', 'good']
wine['quality'] = pd.cut(wine['quality'], bins=bins, labels=group_names)
label_quality = LabelEncoder()
wine['quality'] = label_quality.fit_transform(wine['quality'])
wine['quality'].value_counts()
wine.columns
#getting the histograms of the data
fig = plt.figure(figsize=(30, 20))
plt.suptitle('Histograms of the respective columns', fontsize=20)
for i in range(wine.shape[1]):
plt.subplot(6, 3, i + 1)
f = plt.gca()
f.set_title(wine.columns.values[i])
vals = np.size(wine.iloc[:, i].unique())
if vals >= 100:
vals = 100
plt.hist(wine.iloc[:, i], bins=vals, color='#5D2A4C')
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
wine.isna().any()
sns.set(style='white')
corr = wine.corr()
#generation of mask
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# matplotlib figure
f, ax = plt.subplots(figsize=(18, 15))
# set up ustom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
X = wine.drop('quality', axis=1)
y = wine['quality']
wine.columns[:11]
features_label = wine.columns[:11]
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators=200, criterion='entropy', random_state=0)
classifier.fit(X, y)
importances = classifier.feature_importances_
indices = np.argsort(importances)[::-1]
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train2 = pd.DataFrame(sc.fit_transform(X_train))
X_test2 = pd.DataFrame(sc.transform(X_test))
X_train2.columns = X_train.columns.values
X_test2.columns = X_test.columns.values
X_train2.index = X_train.index.values
X_test2.index = X_test.index.values
X_train = X_train2
X_test = X_test2
from sklearn.decomposition import PCA
pca = PCA(n_components=4)
X_train = pca.fit_transform(X_train)
X_test = pca.transform(X_test)
explained_variance = pca.explained_variance_ratio_
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state=0, penalty='l1', solver='liblinear')
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, precision_score, recall_score
acc = accuracy_score(y_test, y_pred)
prec = precision_score(y_test, y_pred)
rec = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
results = pd.DataFrame([['Logistic Regression', acc, prec, rec, f1]], columns=['Model', 'Accuracy', 'Precision', 'Recall', 'F1 Score'])
from sklearn.svm import SVC
classifier = SVC(random_state=0, kernel='linear')
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
acc = accuracy_score(y_test, y_pred)
prec = precision_score(y_test, y_pred)
rec = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
model_results = pd.DataFrame([['SVM (Linear)', acc, prec, rec, f1]], columns=['Model', 'Accuracy', 'Precision', 'Recall', 'F1 Score'])
results = results.append(model_results, ignore_index=True)
print(results) | code |
73060659/cell_12 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
wine = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
wine.columns
#getting the histograms of the data
fig = plt.figure(figsize=(30, 20))
plt.suptitle('Histograms of the respective columns', fontsize=20)
for i in range(wine.shape[1]):
plt.subplot(6, 3, i + 1)
f = plt.gca()
f.set_title(wine.columns.values[i])
vals = np.size(wine.iloc[:, i].unique())
if vals >= 100:
vals = 100
plt.hist(wine.iloc[:, i], bins=vals, color='#5D2A4C')
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
wine.isna().any()
wine.corrwith(wine.quality).plot.bar(figsize=(20, 10), title='Correlation with quality', fontsize=15, rot=45, grid=True) | code |
73060659/cell_5 | [
"image_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
wine = pd.read_csv('../input/red-wine-quality-cortez-et-al-2009/winequality-red.csv')
from sklearn.preprocessing import LabelEncoder
bins = (2, 6.5, 8)
group_names = ['bad', 'good']
wine['quality'] = pd.cut(wine['quality'], bins=bins, labels=group_names)
label_quality = LabelEncoder()
wine['quality'] = label_quality.fit_transform(wine['quality'])
wine['quality'].value_counts() | code |
130002580/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import tensorflow as tf
training_inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.float32)
training_outputs = np.array([[0], [1], [1], [1]], dtype=np.float32)
i = tf.keras.Input(2)
x = tf.keras.layers.Dense(8, activation='relu')(i)
x = tf.keras.layers.Dense(16, activation='relu')(x)
x = tf.keras.layers.Dense(32, activation='tanh')(x)
x = tf.keras.layers.Dense(8, activation='tanh')(x)
x = tf.keras.layers.Dense(1, activation='sigmoid')(x)
model = tf.keras.models.Model(i, x)
model.summary()
model.compile(loss='mean_squared_error', optimizer='adam', metrics='accuracy')
r = model.fit(training_inputs, training_outputs, epochs=100)
test_input = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.float32)
predictions = model.predict(test_input)
for i in range(len(test_input)):
print('Input: {} Predicted Output: {}'.format(test_input[i], predictions[i].round())) | code |
130002580/cell_2 | [
"text_plain_output_1.png"
] | def or_gate(a, b):
return a or b
print('A\tB\tOutput')
print('-' * 25)
for a in [False, True]:
for b in [False, True]:
output = or_gate(a, b)
print(f'{a}\t{b}\t{output}') | code |
130002580/cell_11 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
training_inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.float32)
training_outputs = np.array([[0], [1], [1], [1]], dtype=np.float32)
i = tf.keras.Input(2)
x = tf.keras.layers.Dense(8, activation='relu')(i)
x = tf.keras.layers.Dense(16, activation='relu')(x)
x = tf.keras.layers.Dense(32, activation='tanh')(x)
x = tf.keras.layers.Dense(8, activation='tanh')(x)
x = tf.keras.layers.Dense(1, activation='sigmoid')(x)
model = tf.keras.models.Model(i, x)
model.summary()
model.compile(loss='mean_squared_error', optimizer='adam', metrics='accuracy')
r = model.fit(training_inputs, training_outputs, epochs=100)
plt.plot(r.history['loss'], label='loss')
plt.xlabel('epoch')
plt.legend() | code |
130002580/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt | code |
130002580/cell_7 | [
"text_plain_output_1.png"
] | import numpy as np
import tensorflow as tf
training_inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.float32)
training_outputs = np.array([[0], [1], [1], [1]], dtype=np.float32)
i = tf.keras.Input(2)
x = tf.keras.layers.Dense(8, activation='relu')(i)
x = tf.keras.layers.Dense(16, activation='relu')(x)
x = tf.keras.layers.Dense(32, activation='tanh')(x)
x = tf.keras.layers.Dense(8, activation='tanh')(x)
x = tf.keras.layers.Dense(1, activation='sigmoid')(x)
model = tf.keras.models.Model(i, x)
model.summary()
model.compile(loss='mean_squared_error', optimizer='adam', metrics='accuracy')
r = model.fit(training_inputs, training_outputs, epochs=100) | code |
130002580/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import tensorflow as tf
training_inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.float32)
training_outputs = np.array([[0], [1], [1], [1]], dtype=np.float32)
i = tf.keras.Input(2)
x = tf.keras.layers.Dense(8, activation='relu')(i)
x = tf.keras.layers.Dense(16, activation='relu')(x)
x = tf.keras.layers.Dense(32, activation='tanh')(x)
x = tf.keras.layers.Dense(8, activation='tanh')(x)
x = tf.keras.layers.Dense(1, activation='sigmoid')(x)
model = tf.keras.models.Model(i, x)
model.summary()
model.compile(loss='mean_squared_error', optimizer='adam', metrics='accuracy')
r = model.fit(training_inputs, training_outputs, epochs=100)
test_input = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.float32)
predictions = model.predict(test_input) | code |
130002580/cell_10 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
training_inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.float32)
training_outputs = np.array([[0], [1], [1], [1]], dtype=np.float32)
i = tf.keras.Input(2)
x = tf.keras.layers.Dense(8, activation='relu')(i)
x = tf.keras.layers.Dense(16, activation='relu')(x)
x = tf.keras.layers.Dense(32, activation='tanh')(x)
x = tf.keras.layers.Dense(8, activation='tanh')(x)
x = tf.keras.layers.Dense(1, activation='sigmoid')(x)
model = tf.keras.models.Model(i, x)
model.summary()
model.compile(loss='mean_squared_error', optimizer='adam', metrics='accuracy')
r = model.fit(training_inputs, training_outputs, epochs=100)
plt.plot(r.history['accuracy'], label='accuracy')
plt.xlabel('epoch')
plt.legend() | code |
130002580/cell_5 | [
"text_plain_output_1.png"
] | import tensorflow as tf
i = tf.keras.Input(2)
x = tf.keras.layers.Dense(8, activation='relu')(i)
x = tf.keras.layers.Dense(16, activation='relu')(x)
x = tf.keras.layers.Dense(32, activation='tanh')(x)
x = tf.keras.layers.Dense(8, activation='tanh')(x)
x = tf.keras.layers.Dense(1, activation='sigmoid')(x)
model = tf.keras.models.Model(i, x)
model.summary() | code |
329567/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import trueskill as ts
def cleanResults(raceName,raceColumns,dfResultsTemp,dfResults):
for raceCol in raceColumns:
dfResultsTemp.index = dfResultsTemp.index.str.replace(r"(\w)([A-Z])", r"\1 \2")
dfResultsTemp.index = dfResultsTemp.index.str.title()
raceIndex = raceName + '-' + raceCol
dfResultsTemp[raceCol] = dfResultsTemp[raceCol].astype(str)
dfResultsTemp[raceCol] = dfResultsTemp[raceCol].str.replace('\(|\)|DNF-|RET-|SCP-|RDG-|RCT-|DNS-[0-9]*|DNC-[0-9]*|OCS-[0-9]*','')
dfResultsTemp[raceCol] = dfResultsTemp[raceCol].str.replace('DNF',str(len(dfResults)+1))
dfResultsTemp[raceCol] = pd.to_numeric(dfResultsTemp[raceCol])
dfResultsTemp[raceIndex] = dfResultsTemp[raceCol]
del(dfResultsTemp[raceCol])
dfResults = pd.merge(dfResults,dfResultsTemp[[raceIndex]],left_index=True,right_index=True,how='outer')
return dfResults
def doRating(numRaces, dfResults, dfRatings):
for raceCol in range(1, numRaces + 1):
competed = dfRatings['Name'].isin(dfResults['Name'][dfResults['R' + str(raceCol)].notnull()])
rating_group = list(zip(dfRatings['Rating'][competed].T.values.tolist()))
dfRatings['Rating'][competed] = ts.rate(rating_group, ranks=dfResults['R' + str(raceCol)][competed].T.values.tolist())
return pd.DataFrame(dfRatings)
dfResults = pd.DataFrame()
dfResultsTemp = pd.read_csv('../input/20160323-LaVentana-HydrofoilProTour.csv')
dfResultsTemp = dfResultsTemp.set_index(dfResultsTemp['Name'] + ' ' + dfResultsTemp['LastName'])
raceColumns = ['R1', 'R2', 'R3', 'R4', 'R5', 'R6']
dfResults = cleanResults('20160323-LaVentana-HydrofoilProTour', raceColumns, dfResultsTemp, dfResults)
dfResultsTemp = pd.read_csv('../input/20160807-SanFracisco-HydrofoilProTour.csv')
dfResultsTemp = dfResultsTemp.set_index(dfResultsTemp['Name'])
raceColumns = ['R1', 'R2', 'R3', 'R4', 'R5', 'R6', 'R7', 'R8', 'R9', 'R10', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16']
dfResults = cleanResults('20160807-SanFracisco-HydrofoilProTour', raceColumns, dfResultsTemp, dfResults)
dfResults | code |
329567/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png"
] | dfRatings.index = dfRatings['mu_minus_3sigma'].rank(ascending=False)
dfRatings.sort('mu_minus_3sigma', ascending=False) | code |
34140243/cell_25 | [
"text_plain_output_1.png"
] | from bs4 import BeautifulSoup
import requests #library used to download web pages.
G = 6.674 * 10 ** (-11)
M_e = 5.97 * 10 ** 24
R_e = 6.37 * 10 ** 6
def escape_velocity():
pass
import requests
URL = 'https://nssdc.gsfc.nasa.gov/planetary/factsheet/planet_table_ratio.html'
page = requests.get(URL)
page.status_code
HTMLstr = page.text
soup = BeautifulSoup(HTMLstr, 'html.parser')
soup.title
soup.a
all_links = soup.find_all('a')
all_tables = soup.find_all('table')
planet_table = soup.find('table')
A = []
B = []
C = []
D = []
E = []
F = []
G = []
H = []
I = []
J = []
K = []
for row in planet_table.findAll('tr'):
body = row.findAll('td')
cells = row.findAll('td')
if len(cells) == 11:
A.append(cells[0].find(text=True))
B.append(cells[1].find(text=True))
C.append(cells[2].find(text=True))
D.append(cells[3].find(text=True))
E.append(cells[4].find(text=True))
F.append(cells[5].find(text=True))
G.append(cells[6].find(text=True))
H.append(cells[7].find(text=True))
I.append(cells[8].find(text=True))
J.append(cells[9].find(text=True))
K.append(cells[10].find(text=True))
A | code |
34140243/cell_33 | [
"text_html_output_1.png"
] | from bs4 import BeautifulSoup
import pandas as pd
import requests #library used to download web pages.
G = 6.674 * 10 ** (-11)
M_e = 5.97 * 10 ** 24
R_e = 6.37 * 10 ** 6
def escape_velocity():
pass
import requests
URL = 'https://nssdc.gsfc.nasa.gov/planetary/factsheet/planet_table_ratio.html'
page = requests.get(URL)
page.status_code
HTMLstr = page.text
soup = BeautifulSoup(HTMLstr, 'html.parser')
soup.title
soup.a
all_links = soup.find_all('a')
all_tables = soup.find_all('table')
planet_table = soup.find('table')
A = []
B = []
C = []
D = []
E = []
F = []
G = []
H = []
I = []
J = []
K = []
for row in planet_table.findAll('tr'):
body = row.findAll('td')
cells = row.findAll('td')
if len(cells) == 11:
A.append(cells[0].find(text=True))
B.append(cells[1].find(text=True))
C.append(cells[2].find(text=True))
D.append(cells[3].find(text=True))
E.append(cells[4].find(text=True))
F.append(cells[5].find(text=True))
G.append(cells[6].find(text=True))
H.append(cells[7].find(text=True))
I.append(cells[8].find(text=True))
J.append(cells[9].find(text=True))
K.append(cells[10].find(text=True))
import pandas as pd
df = pd.DataFrame(A, columns=['Physical_Measurement'])
df['Mercury'] = B
df['Venus'] = C
df['Earth'] = D
df['Moon'] = E
df['Mars'] = F
df['Jupiter'] = G
df['Saturn'] = H
df['Uranus'] = I
df['Neptune'] = J
df['Pluto'] = K
df = df.fillna(0)
df = df.replace(to_replace='Unknown*', value=0)
df
df = df.drop(df.index[0])
df = df.drop(df.index[-1])
df
df.dtypes | code |
34140243/cell_29 | [
"text_plain_output_1.png"
] | from bs4 import BeautifulSoup
import pandas as pd
import requests #library used to download web pages.
G = 6.674 * 10 ** (-11)
M_e = 5.97 * 10 ** 24
R_e = 6.37 * 10 ** 6
def escape_velocity():
pass
import requests
URL = 'https://nssdc.gsfc.nasa.gov/planetary/factsheet/planet_table_ratio.html'
page = requests.get(URL)
page.status_code
HTMLstr = page.text
soup = BeautifulSoup(HTMLstr, 'html.parser')
soup.title
soup.a
all_links = soup.find_all('a')
all_tables = soup.find_all('table')
planet_table = soup.find('table')
A = []
B = []
C = []
D = []
E = []
F = []
G = []
H = []
I = []
J = []
K = []
for row in planet_table.findAll('tr'):
body = row.findAll('td')
cells = row.findAll('td')
if len(cells) == 11:
A.append(cells[0].find(text=True))
B.append(cells[1].find(text=True))
C.append(cells[2].find(text=True))
D.append(cells[3].find(text=True))
E.append(cells[4].find(text=True))
F.append(cells[5].find(text=True))
G.append(cells[6].find(text=True))
H.append(cells[7].find(text=True))
I.append(cells[8].find(text=True))
J.append(cells[9].find(text=True))
K.append(cells[10].find(text=True))
import pandas as pd
df = pd.DataFrame(A, columns=['Physical_Measurement'])
df['Mercury'] = B
df['Venus'] = C
df['Earth'] = D
df['Moon'] = E
df['Mars'] = F
df['Jupiter'] = G
df['Saturn'] = H
df['Uranus'] = I
df['Neptune'] = J
df['Pluto'] = K
df = df.fillna(0)
df = df.replace(to_replace='Unknown*', value=0)
df | code |
34140243/cell_11 | [
"text_plain_output_1.png"
] | import requests #library used to download web pages.
import requests
URL = 'https://nssdc.gsfc.nasa.gov/planetary/factsheet/planet_table_ratio.html'
page = requests.get(URL)
page.status_code | code |
34140243/cell_19 | [
"text_plain_output_1.png"
] | from bs4 import BeautifulSoup
import requests #library used to download web pages.
import requests
URL = 'https://nssdc.gsfc.nasa.gov/planetary/factsheet/planet_table_ratio.html'
page = requests.get(URL)
page.status_code
HTMLstr = page.text
soup = BeautifulSoup(HTMLstr, 'html.parser')
soup.title
soup.a
all_links = soup.find_all('a')
for link in all_links[0:20]:
print(link.get('href')) | code |
34140243/cell_16 | [
"text_plain_output_1.png"
] | from bs4 import BeautifulSoup
import requests #library used to download web pages.
import requests
URL = 'https://nssdc.gsfc.nasa.gov/planetary/factsheet/planet_table_ratio.html'
page = requests.get(URL)
page.status_code
HTMLstr = page.text
soup = BeautifulSoup(HTMLstr, 'html.parser')
soup.title | code |
34140243/cell_17 | [
"text_plain_output_1.png"
] | from bs4 import BeautifulSoup
import requests #library used to download web pages.
import requests
URL = 'https://nssdc.gsfc.nasa.gov/planetary/factsheet/planet_table_ratio.html'
page = requests.get(URL)
page.status_code
HTMLstr = page.text
soup = BeautifulSoup(HTMLstr, 'html.parser')
soup.title
soup.a | code |
34140243/cell_35 | [
"text_plain_output_1.png"
] | from bs4 import BeautifulSoup
import pandas as pd
import requests #library used to download web pages.
G = 6.674 * 10 ** (-11)
M_e = 5.97 * 10 ** 24
R_e = 6.37 * 10 ** 6
def escape_velocity():
pass
import requests
URL = 'https://nssdc.gsfc.nasa.gov/planetary/factsheet/planet_table_ratio.html'
page = requests.get(URL)
page.status_code
HTMLstr = page.text
soup = BeautifulSoup(HTMLstr, 'html.parser')
soup.title
soup.a
all_links = soup.find_all('a')
all_tables = soup.find_all('table')
planet_table = soup.find('table')
A = []
B = []
C = []
D = []
E = []
F = []
G = []
H = []
I = []
J = []
K = []
for row in planet_table.findAll('tr'):
body = row.findAll('td')
cells = row.findAll('td')
if len(cells) == 11:
A.append(cells[0].find(text=True))
B.append(cells[1].find(text=True))
C.append(cells[2].find(text=True))
D.append(cells[3].find(text=True))
E.append(cells[4].find(text=True))
F.append(cells[5].find(text=True))
G.append(cells[6].find(text=True))
H.append(cells[7].find(text=True))
I.append(cells[8].find(text=True))
J.append(cells[9].find(text=True))
K.append(cells[10].find(text=True))
import pandas as pd
df = pd.DataFrame(A, columns=['Physical_Measurement'])
df['Mercury'] = B
df['Venus'] = C
df['Earth'] = D
df['Moon'] = E
df['Mars'] = F
df['Jupiter'] = G
df['Saturn'] = H
df['Uranus'] = I
df['Neptune'] = J
df['Pluto'] = K
df = df.fillna(0)
df = df.replace(to_replace='Unknown*', value=0)
df
df = df.drop(df.index[0])
df = df.drop(df.index[-1])
df
df.dtypes
df = df.applymap(lambda x: x.strip('*') if isinstance(x, str) else x)
df | code |
34140243/cell_31 | [
"text_html_output_1.png"
] | from bs4 import BeautifulSoup
import pandas as pd
import requests #library used to download web pages.
G = 6.674 * 10 ** (-11)
M_e = 5.97 * 10 ** 24
R_e = 6.37 * 10 ** 6
def escape_velocity():
pass
import requests
URL = 'https://nssdc.gsfc.nasa.gov/planetary/factsheet/planet_table_ratio.html'
page = requests.get(URL)
page.status_code
HTMLstr = page.text
soup = BeautifulSoup(HTMLstr, 'html.parser')
soup.title
soup.a
all_links = soup.find_all('a')
all_tables = soup.find_all('table')
planet_table = soup.find('table')
A = []
B = []
C = []
D = []
E = []
F = []
G = []
H = []
I = []
J = []
K = []
for row in planet_table.findAll('tr'):
body = row.findAll('td')
cells = row.findAll('td')
if len(cells) == 11:
A.append(cells[0].find(text=True))
B.append(cells[1].find(text=True))
C.append(cells[2].find(text=True))
D.append(cells[3].find(text=True))
E.append(cells[4].find(text=True))
F.append(cells[5].find(text=True))
G.append(cells[6].find(text=True))
H.append(cells[7].find(text=True))
I.append(cells[8].find(text=True))
J.append(cells[9].find(text=True))
K.append(cells[10].find(text=True))
import pandas as pd
df = pd.DataFrame(A, columns=['Physical_Measurement'])
df['Mercury'] = B
df['Venus'] = C
df['Earth'] = D
df['Moon'] = E
df['Mars'] = F
df['Jupiter'] = G
df['Saturn'] = H
df['Uranus'] = I
df['Neptune'] = J
df['Pluto'] = K
df = df.fillna(0)
df = df.replace(to_replace='Unknown*', value=0)
df
df = df.drop(df.index[0])
df = df.drop(df.index[-1])
df | code |
34140243/cell_10 | [
"text_plain_output_1.png"
] | import requests #library used to download web pages.
import requests
URL = 'https://nssdc.gsfc.nasa.gov/planetary/factsheet/planet_table_ratio.html'
page = requests.get(URL)
type(page) | code |
34140243/cell_37 | [
"text_html_output_1.png"
] | from bs4 import BeautifulSoup
import pandas as pd
import requests #library used to download web pages.
G = 6.674 * 10 ** (-11)
M_e = 5.97 * 10 ** 24
R_e = 6.37 * 10 ** 6
def escape_velocity():
pass
import requests
URL = 'https://nssdc.gsfc.nasa.gov/planetary/factsheet/planet_table_ratio.html'
page = requests.get(URL)
page.status_code
HTMLstr = page.text
soup = BeautifulSoup(HTMLstr, 'html.parser')
soup.title
soup.a
all_links = soup.find_all('a')
all_tables = soup.find_all('table')
planet_table = soup.find('table')
A = []
B = []
C = []
D = []
E = []
F = []
G = []
H = []
I = []
J = []
K = []
for row in planet_table.findAll('tr'):
body = row.findAll('td')
cells = row.findAll('td')
if len(cells) == 11:
A.append(cells[0].find(text=True))
B.append(cells[1].find(text=True))
C.append(cells[2].find(text=True))
D.append(cells[3].find(text=True))
E.append(cells[4].find(text=True))
F.append(cells[5].find(text=True))
G.append(cells[6].find(text=True))
H.append(cells[7].find(text=True))
I.append(cells[8].find(text=True))
J.append(cells[9].find(text=True))
K.append(cells[10].find(text=True))
import pandas as pd
df = pd.DataFrame(A, columns=['Physical_Measurement'])
df['Mercury'] = B
df['Venus'] = C
df['Earth'] = D
df['Moon'] = E
df['Mars'] = F
df['Jupiter'] = G
df['Saturn'] = H
df['Uranus'] = I
df['Neptune'] = J
df['Pluto'] = K
df = df.fillna(0)
df = df.replace(to_replace='Unknown*', value=0)
df
df = df.drop(df.index[0])
df = df.drop(df.index[-1])
df
df.dtypes
df = df.applymap(lambda x: x.strip('*') if isinstance(x, str) else x)
df
df = df.replace(to_replace=['No', 'Yes', 'Unknown'], value=[0, 1, 2])
df | code |
34140243/cell_12 | [
"text_plain_output_1.png"
] | import requests #library used to download web pages.
import requests
URL = 'https://nssdc.gsfc.nasa.gov/planetary/factsheet/planet_table_ratio.html'
page = requests.get(URL)
page.status_code
HTMLstr = page.text
print(HTMLstr[:1000]) | code |
34140243/cell_5 | [
"text_html_output_1.png"
] | G = 6.674 * 10 ** (-11)
M_e = 5.97 * 10 ** 24
R_e = 6.37 * 10 ** 6
def escape_velocity():
pass
escape_velocity() | code |
121148449/cell_13 | [
"text_plain_output_1.png"
] | from keras.layers import Conv2D, MaxPooling2D, Activation, Dropout, Flatten, Dense
from keras.models import Sequential
from keras.utils import normalize
from keras.utils import to_categorical
(x_train.shape, y_train.shape)
(x_test.shape, y_test.shape)
x_train = normalize(x_train, axis=1)
x_test = normalize(x_test, axis=1)
Y_train = to_categorical(y_train, num_classes=2)
Y_test = to_categorical(y_test, num_classes=2)
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(64, 64, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3), kernel_initializer='he_uniform'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), kernel_initializer='he_uniform'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(2))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(x_train, Y_train, batch_size=16, verbose=True, epochs=10, validation_data=(x_test, Y_test), shuffle=False) | code |
121148449/cell_4 | [
"text_plain_output_1.png"
] | from PIL import Image
import cv2
import numpy as np
import os
image_dir = 'datasets/'
no_tumour = os.listdir(image_dir + 'no/')
yes_tumour = os.listdir(image_dir + 'yes/')
dataset = []
label = []
for i, image_name in enumerate(no_tumour):
if image_name.split('.')[1] == 'jpg':
image = cv2.imread(image_dir + 'no/' + image_name)
image = Image.fromarray(image, 'RGB')
image = image.resize((64, 64))
dataset.append(np.array(image))
label.append(0)
for i, image_name in enumerate(yes_tumour):
if image_name.split('.')[1] == 'jpg':
image = cv2.imread(image_dir + 'yes/' + image_name)
image = Image.fromarray(image, 'RGB')
image = image.resize((64, 64))
dataset.append(np.array(image))
label.append(1)
print(dataset) | code |
121148449/cell_7 | [
"text_plain_output_1.png"
] | (x_train.shape, y_train.shape) | code |
121148449/cell_8 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | (x_test.shape, y_test.shape) | code |
90108519/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/chinas-population-by-gender-and-urbanrural/Chinas Population En.csv')
df.columns = ['year', 'total', 'male', 'female', 'urban', 'rural']
df.sort_values(by='year', ignore_index=True, inplace=True)
print('Cases of nonconformity by gender: {}'.format(sum(df['total'] - df['male'] - df['female']))) | code |
90108519/cell_9 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/chinas-population-by-gender-and-urbanrural/Chinas Population En.csv')
df.columns = ['year', 'total', 'male', 'female', 'urban', 'rural']
df.sort_values(by='year', ignore_index=True, inplace=True)
df.head() | code |
90108519/cell_23 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('../input/chinas-population-by-gender-and-urbanrural/Chinas Population En.csv')
df.columns = ['year', 'total', 'male', 'female', 'urban', 'rural']
df.sort_values(by='year', ignore_index=True, inplace=True)
tmp_mask = df['total'] - df['male'] - df['female'] != 0
df[tmp_mask]
df.loc[tmp_mask, 'total'] = df.loc[tmp_mask, 'male'] + df.loc[tmp_mask, 'female']
plt.figure(figsize=(12, 5))
plt.title("Characteristics of China's population over the period of 70 years", fontweight='bold', fontsize=12)
plt.plot(df['year'], df['female'], linewidth=3, label='female')
plt.plot(df['year'], df['male'], linewidth=3, label='male')
plt.plot(df['year'], df['urban'], linewidth=3, linestyle='--', label='urban')
plt.plot(df['year'], df['rural'], linewidth=3, linestyle='--', label='rural')
plt.xlabel('year', fontweight='bold', fontsize=10)
plt.ylabel('population', fontweight='bold', fontsize=10)
plt.grid(axis='x', color='0.95')
plt.legend(title='Features:')
plt.show() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.