path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
128020295/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
128020295/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv')
seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv')
player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv')
players.isnull().sum()
print('Tallest player : {0} - {1} cm'.format(players['height'].idxmax(), players['height'].max()))
print('Smallest player: {0} - {1} cm'.format(players['height'].idxmin(), players['height'].min()))
print()
print('Heaviest player: {0} - {1} kg'.format(players['weight'].idxmax(), players['weight'].max()))
print('Lightest player: {0} - {1} kg'.format(players['weight'].idxmin(), players['weight'].min()))
print()
print('Height average of players: ', players['height'].mean())
print('Weight average of players: ', players['weight'].mean()) | code |
128020295/cell_18 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv')
seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv')
player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv')
seasons_stats[seasons_stats.Age > 40][seasons_stats.PTS > 100]
seasons_stats.Pos.value_counts().iloc[0:5]
seasons_stats[seasons_stats.Player == 'LeBron James']
seasons_stats[seasons_stats.PTS > 2500].plot(x='Player', y='PTS', kind='bar', figsize=(12, 8)) | code |
128020295/cell_28 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv')
seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv')
player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv')
seasons_stats[seasons_stats.Age > 40][seasons_stats.PTS > 100]
seasons_stats.Pos.value_counts().iloc[0:5]
seasons_stats[seasons_stats.Player == 'LeBron James']
triples = seasons_stats.groupby('Year')['3P'].sum()
seasons_stats.sort_values('3P', ascending=False)[:25]
seasons_stats[seasons_stats.GS < 41].sort_values('MP', ascending=False)[:25]
seasons_stats[seasons_stats.GS > 50][seasons_stats.MP < 1000]
real_players = seasons_stats[seasons_stats.MP > 1000]
real_players.groupby('Pos').mean().sort_values('STL', ascending=False)['STL'].plot.pie() | code |
128020295/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv')
seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv')
player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv')
players.isnull().sum()
players[players['height'] == players['height'].max()] | code |
128020295/cell_15 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv')
seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv')
player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv')
seasons_stats[seasons_stats.Age > 40][seasons_stats.PTS > 100]
seasons_stats.Pos.value_counts().iloc[0:5] | code |
128020295/cell_16 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv')
seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv')
player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv')
seasons_stats[seasons_stats.Age > 40][seasons_stats.PTS > 100]
seasons_stats.Pos.value_counts().iloc[0:5]
seasons_stats[seasons_stats.Player == 'LeBron James'] | code |
128020295/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv')
seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv')
player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv')
len(players) | code |
128020295/cell_17 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv')
seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv')
player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv')
seasons_stats[seasons_stats.Age > 40][seasons_stats.PTS > 100]
seasons_stats.Pos.value_counts().iloc[0:5]
seasons_stats[seasons_stats.Player == 'LeBron James']
seasons_stats[seasons_stats.Player == 'LeBron James'].plot(x='Year', y='PTS', figsize=(12, 8)) | code |
128020295/cell_24 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv')
seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv')
player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv')
seasons_stats[seasons_stats.Age > 40][seasons_stats.PTS > 100]
seasons_stats.Pos.value_counts().iloc[0:5]
seasons_stats[seasons_stats.Player == 'LeBron James']
triples = seasons_stats.groupby('Year')['3P'].sum()
seasons_stats.sort_values('3P', ascending=False)[:25]
seasons_stats[seasons_stats.GS < 41].sort_values('MP', ascending=False)[:25] | code |
128020295/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv')
seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv')
player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv')
seasons_stats[seasons_stats.Age > 40][seasons_stats.PTS > 100] | code |
128020295/cell_22 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv')
seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv')
player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv')
seasons_stats[seasons_stats.Age > 40][seasons_stats.PTS > 100]
seasons_stats.Pos.value_counts().iloc[0:5]
seasons_stats[seasons_stats.Player == 'LeBron James']
triples = seasons_stats.groupby('Year')['3P'].sum()
seasons_stats.sort_values('3P', ascending=False)[:25]
seasons_stats['G'].corr(seasons_stats['MP']) | code |
128020295/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv')
seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv')
player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv')
players.isnull().sum()
players[players['weight'] == players['weight'].max()] | code |
128020295/cell_27 | [
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv')
seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv')
player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv')
seasons_stats[seasons_stats.Age > 40][seasons_stats.PTS > 100]
seasons_stats.Pos.value_counts().iloc[0:5]
seasons_stats[seasons_stats.Player == 'LeBron James']
triples = seasons_stats.groupby('Year')['3P'].sum()
seasons_stats.sort_values('3P', ascending=False)[:25]
seasons_stats[seasons_stats.GS < 41].sort_values('MP', ascending=False)[:25]
seasons_stats[seasons_stats.GS > 50][seasons_stats.MP < 1000]
real_players = seasons_stats[seasons_stats.MP > 1000]
real_players.groupby('Pos').mean().sort_values('BLK', ascending=False)['BLK'].plot.pie() | code |
128020295/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv')
seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv')
player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv')
players.isnull().sum()
players.plot(x='height', y='weight', kind='scatter', figsize=(12, 8)) | code |
128020295/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
players = pd.read_csv('/kaggle/input/nba-players-stats/Players.csv')
seasons_stats = pd.read_csv('/kaggle/input/nba-players-stats/Seasons_Stats.csv')
player_data = pd.read_csv('/kaggle/input/nba-players-stats/player_data.csv')
players.isnull().sum()
players.info() | code |
32068455/cell_13 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from tqdm import tqdm
import glob
import json
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
pd.set_option('display.expand_frame_repr', False)
pd.options.mode.chained_assignment = None
tqdm.pandas()
def filepath(*args):
if len(args) < 1:
return None
elif len(args) == 1:
return args[0]
else:
return f'{args[0]}/{filepath(*args[1:])}'
def addtimebar(L, threshold=1000):
if len(L) > threshold:
return tqdm(L)
else:
return L
class FileReader:
def __init__(self, file_path):
with open(file_path) as file:
content = json.load(file)
self.paper_id = content['paper_id']
self.abstract = []
self.body_text = []
try:
for entry in content['abstract']:
self.abstract.append(entry['text'])
except KeyError:
pass
try:
for entry in content['body_text']:
self.body_text.append(entry['text'])
except KeyError:
pass
self.abstract = '\n'.join(self.abstract)
self.body_text = '\n'.join(self.body_text)
def __repr__(self):
return f'{self.paper_id}: {self.abstract[:200]}... {self.body_text[:200]}...'
def get_breaks(content, length):
data = ''
words = content.split(' ')
total_chars = 0
for i in range(len(words)):
total_chars += len(words[i])
if total_chars > length:
data = data + '<br>' + words[i]
total_chars = 0
else:
data = data + ' ' + words[i]
return data
def compose(*funcs):
*funcs, penultimate, last = funcs
if funcs:
penultimate = compose(*funcs, penultimate)
return lambda *args: penultimate(last(*args))
path = '../input/CORD-19-research-challenge'
meta = 'metadata.csv'
all_jsons = glob.glob(filepath(path, '**', '*.json'), recursive=True)
meta_df = pd.read_csv(filepath(path, meta), dtype={'pubmed_id': str, 'Microsoft Academic Paper ID': str, 'doi': str, 'journal': str}, low_memory=False)
dict_ = {'paper_id': [], 'abstract': [], 'body_text': [], 'authors': [], 'title': [], 'publish_time': [], 'journal': [], 'abstract_summary': []}
for entry in all_jsons[:10]:
content = FileReader(entry)
print(content.abstract)
meta_data = meta_df.loc[meta_df['sha'] == content.paper_id]
if len(meta_data) == 0:
continue
dict_['paper_id'].append(content.paper_id)
dict_['abstract'].append(content.abstract)
dict_['body_text'].append(content.body_text)
print(content.paper_id)
if len(content.abstract) == 0:
dict_['abstract_summary'].append('Not provided.')
elif len(content.abstract.split(' ')) > 100:
info = content.abstract.split(' ')[:100]
summary = get_breaks(' '.join(info), 40)
dict_['abstract_summary'].append(summary + '...')
else:
summary = get_breaks(content.abstract, 40)
dict_['abstract_summary'].append(summary)
meta_data = meta_df.loc[meta_df['sha'] == content.paper_id]
try:
authors = meta_data['authors'].values[0].split(';')
if len(authors) > 2:
dict_['authors'].append('. '.join(authors[:2]) + '...')
else:
dict_['authors'].append('. '.join(authors))
except Exception as e:
dict_['authors'].append(meta_data['authors'].values[0])
try:
title = get_breaks(meta_data['title'].values[0], 40)
dict_['title'].append(title)
except Exception as e:
dict_['title'].append(meta_data['title'].values[0])
try:
publish_time = get_breaks(meta_data['publish_time'].values[0], 40)
dict_['publish_time'].append(publish_time)
except Exception as e:
dict_['publish_time'].append(meta_data['publish_time'].values[0])
dict_['journal'].append(meta_data['journal'].values[0])
df_covid = pd.DataFrame(dict_, columns=['paper_id', 'abstract', 'body_text', 'authors', 'title', 'journal', 'publish_time', 'abstract_summary'])
df_covid.head() | code |
32068455/cell_4 | [
"text_html_output_1.png"
] | from pandarallel import pandarallel
import nltk
import spacy
import numpy as np
import pandas as pd
import glob
import json
import re
import itertools
from tqdm import tqdm
import nltk
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')
from nltk import tokenize
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from collections import defaultdict
from nltk.corpus import wordnet as wn
from langdetect import detect
from nltk.corpus import stopwords
import contractions
import inflect
from nltk.stem import PorterStemmer
from nltk.stem import LancasterStemmer
from nltk.stem import WordNetLemmatizer
from pandarallel import pandarallel
import pickle
from PIL import Image
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import matplotlib.pyplot as plt
import seaborn as sns
from sentence_transformers import SentenceTransformer
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import cosine_similarity
import re
import bs4
import requests
import spacy
from spacy import displacy
nlp = spacy.load('en_core_web_sm')
from spacy.matcher import Matcher
from spacy.tokens import Span
import networkx as nx
pandarallel.initialize(use_memory_fs=False, nb_workers=8) | code |
32068455/cell_33 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from langdetect import detect
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.stem import WordNetLemmatizer
from os import path
from pandarallel import pandarallel
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from tqdm import tqdm
import contractions
import glob
import json
import nltk
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
import re
import seaborn as sns
import seaborn as sns
import spacy
import numpy as np
import pandas as pd
import glob
import json
import re
import itertools
from tqdm import tqdm
import nltk
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')
from nltk import tokenize
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from collections import defaultdict
from nltk.corpus import wordnet as wn
from langdetect import detect
from nltk.corpus import stopwords
import contractions
import inflect
from nltk.stem import PorterStemmer
from nltk.stem import LancasterStemmer
from nltk.stem import WordNetLemmatizer
from pandarallel import pandarallel
import pickle
from PIL import Image
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import matplotlib.pyplot as plt
import seaborn as sns
from sentence_transformers import SentenceTransformer
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import cosine_similarity
import re
import bs4
import requests
import spacy
from spacy import displacy
nlp = spacy.load('en_core_web_sm')
from spacy.matcher import Matcher
from spacy.tokens import Span
import networkx as nx
pandarallel.initialize(use_memory_fs=False, nb_workers=8)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
pd.set_option('display.expand_frame_repr', False)
pd.options.mode.chained_assignment = None
tqdm.pandas()
def filepath(*args):
if len(args) < 1:
return None
elif len(args) == 1:
return args[0]
else:
return f'{args[0]}/{filepath(*args[1:])}'
def addtimebar(L, threshold=1000):
if len(L) > threshold:
return tqdm(L)
else:
return L
class FileReader:
def __init__(self, file_path):
with open(file_path) as file:
content = json.load(file)
self.paper_id = content['paper_id']
self.abstract = []
self.body_text = []
try:
for entry in content['abstract']:
self.abstract.append(entry['text'])
except KeyError:
pass
try:
for entry in content['body_text']:
self.body_text.append(entry['text'])
except KeyError:
pass
self.abstract = '\n'.join(self.abstract)
self.body_text = '\n'.join(self.body_text)
def __repr__(self):
return f'{self.paper_id}: {self.abstract[:200]}... {self.body_text[:200]}...'
def get_breaks(content, length):
data = ''
words = content.split(' ')
total_chars = 0
for i in range(len(words)):
total_chars += len(words[i])
if total_chars > length:
data = data + '<br>' + words[i]
total_chars = 0
else:
data = data + ' ' + words[i]
return data
def compose(*funcs):
*funcs, penultimate, last = funcs
if funcs:
penultimate = compose(*funcs, penultimate)
return lambda *args: penultimate(last(*args))
path = '../input/CORD-19-research-challenge'
meta = 'metadata.csv'
all_jsons = glob.glob(filepath(path, '**', '*.json'), recursive=True)
meta_df = pd.read_csv(filepath(path, meta), dtype={'pubmed_id': str, 'Microsoft Academic Paper ID': str, 'doi': str, 'journal': str}, low_memory=False)
dict_ = {'paper_id': [], 'abstract': [], 'body_text': [], 'authors': [], 'title': [], 'publish_time': [], 'journal': [], 'abstract_summary': []}
for entry in all_jsons[:10]:
content = FileReader(entry)
meta_data = meta_df.loc[meta_df['sha'] == content.paper_id]
if len(meta_data) == 0:
continue
dict_['paper_id'].append(content.paper_id)
dict_['abstract'].append(content.abstract)
dict_['body_text'].append(content.body_text)
if len(content.abstract) == 0:
dict_['abstract_summary'].append('Not provided.')
elif len(content.abstract.split(' ')) > 100:
info = content.abstract.split(' ')[:100]
summary = get_breaks(' '.join(info), 40)
dict_['abstract_summary'].append(summary + '...')
else:
summary = get_breaks(content.abstract, 40)
dict_['abstract_summary'].append(summary)
meta_data = meta_df.loc[meta_df['sha'] == content.paper_id]
try:
authors = meta_data['authors'].values[0].split(';')
if len(authors) > 2:
dict_['authors'].append('. '.join(authors[:2]) + '...')
else:
dict_['authors'].append('. '.join(authors))
except Exception as e:
dict_['authors'].append(meta_data['authors'].values[0])
try:
title = get_breaks(meta_data['title'].values[0], 40)
dict_['title'].append(title)
except Exception as e:
dict_['title'].append(meta_data['title'].values[0])
try:
publish_time = get_breaks(meta_data['publish_time'].values[0], 40)
dict_['publish_time'].append(publish_time)
except Exception as e:
dict_['publish_time'].append(meta_data['publish_time'].values[0])
dict_['journal'].append(meta_data['journal'].values[0])
df_covid = pd.DataFrame(dict_, columns=['paper_id', 'abstract', 'body_text', 'authors', 'title', 'journal', 'publish_time', 'abstract_summary'])
def is_lang(row, item, lang, dropNA=True):
if row[item] != None and row[item] != '' and (row[item] != 'None') and isinstance(row[item], str):
try:
return detect(row[item]) == lang
except Exception as e:
return False
else:
return not dropNA
def select_article_lang_multi(df, basedon='abstract', lang='en'):
return df[df.parallel_apply(lambda text: is_lang(text, basedon, lang), axis=1)]
df_covid_eng = select_article_lang_multi(df_covid)
def replace_brackets_with_whitespace(text):
text = text.replace('(', '')
text = text.replace(')', '')
text = text.replace('[', '')
text = text.replace(']', '')
return text
def replace_contractions(text):
return contractions.fix(text)
def strip_characters(text):
t = re.sub('\\(|\\)|:|,|;|\\.|’||“|\\?|%|>|<', '', text)
t = re.sub('/', ' ', t)
t = t.replace("'", '')
return t
def to_lowercase(word):
return word.lower()
def do_stemming(stemmer):
return lambda word: stemmer.stem(word)
def do_lemmatizing(lemmatizer):
return lambda word: lemmatizer.lemmatize(word, pos='v')
def is_stopword(word):
return word in stopwords.words('english')
def process_word_by(word_cleanner, uniqueYN):
def cond(word):
return len(word) > 1 and (not is_stopword(word)) and (not word.isnumeric()) and word.isalnum() and (word != len(word) * word[0])
def clean_byword(text):
return list(take_unique(uniqueYN)((word_cleanner(word) for word in text if cond(word))))
return clean_byword
def take_unique(YN):
return set if YN else lambda x: x
text_processor = compose(replace_brackets_with_whitespace, replace_contractions, strip_characters)
word_processor = compose(to_lowercase, do_lemmatizing(WordNetLemmatizer()), do_stemming(PorterStemmer()))
def pre_processing(df, text_tools, word_tools):
def inner(col, uniqueYN=False):
return df[col].parallel_apply(text_tools).parallel_apply(nltk.word_tokenize).parallel_apply(process_word_by(word_tools, uniqueYN=uniqueYN))
return inner
def get_top_nK_words(corpus, K=1, n=None):
vec1 = CountVectorizer(max_df=0.7, stop_words=stopwords.words('english'), ngram_range=(K, K), max_features=2000).fit(corpus)
bag_of_words = vec1.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx]) for word, idx in vec1.vocabulary_.items()]
words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True)
return words_freq[:n]
#Convert most freq words to dataframe for plotting bar plot
top_words = get_top_nK_words(corpus, K=1, n=20)
top_df = pd.DataFrame(top_words)
top_df.columns=["Word", "Freq"]
#Barplot of most freq words
sns.set(rc={'figure.figsize':(13,8)})
g = sns.barplot(x="Word", y="Freq", data=top_df)
g.set_xticklabels(g.get_xticklabels(), rotation=30)
top2_words = get_top_nK_words(corpus, K=2, n=20)
top2_df = pd.DataFrame(top2_words)
top2_df.columns = ['Bi-gram', 'Freq']
print(top2_df)
import seaborn as sns
sns.set(rc={'figure.figsize': (13, 8)})
h = sns.barplot(x='Bi-gram', y='Freq', data=top2_df)
h.set_xticklabels(h.get_xticklabels(), rotation=45)
fig = h.get_figure() | code |
32068455/cell_39 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from langdetect import detect
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.stem import WordNetLemmatizer
from os import path
from pandarallel import pandarallel
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from tqdm import tqdm
import contractions
import glob
import json
import nltk
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pickle
import re
import re
import seaborn as sns
import seaborn as sns
import seaborn as sns
import spacy
import numpy as np
import pandas as pd
import glob
import json
import re
import itertools
from tqdm import tqdm
import nltk
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')
from nltk import tokenize
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from collections import defaultdict
from nltk.corpus import wordnet as wn
from langdetect import detect
from nltk.corpus import stopwords
import contractions
import inflect
from nltk.stem import PorterStemmer
from nltk.stem import LancasterStemmer
from nltk.stem import WordNetLemmatizer
from pandarallel import pandarallel
import pickle
from PIL import Image
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import matplotlib.pyplot as plt
import seaborn as sns
from sentence_transformers import SentenceTransformer
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import cosine_similarity
import re
import bs4
import requests
import spacy
from spacy import displacy
nlp = spacy.load('en_core_web_sm')
from spacy.matcher import Matcher
from spacy.tokens import Span
import networkx as nx
pandarallel.initialize(use_memory_fs=False, nb_workers=8)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
pd.set_option('display.expand_frame_repr', False)
pd.options.mode.chained_assignment = None
tqdm.pandas()
def filepath(*args):
if len(args) < 1:
return None
elif len(args) == 1:
return args[0]
else:
return f'{args[0]}/{filepath(*args[1:])}'
def addtimebar(L, threshold=1000):
if len(L) > threshold:
return tqdm(L)
else:
return L
class FileReader:
def __init__(self, file_path):
with open(file_path) as file:
content = json.load(file)
self.paper_id = content['paper_id']
self.abstract = []
self.body_text = []
try:
for entry in content['abstract']:
self.abstract.append(entry['text'])
except KeyError:
pass
try:
for entry in content['body_text']:
self.body_text.append(entry['text'])
except KeyError:
pass
self.abstract = '\n'.join(self.abstract)
self.body_text = '\n'.join(self.body_text)
def __repr__(self):
return f'{self.paper_id}: {self.abstract[:200]}... {self.body_text[:200]}...'
def get_breaks(content, length):
data = ''
words = content.split(' ')
total_chars = 0
for i in range(len(words)):
total_chars += len(words[i])
if total_chars > length:
data = data + '<br>' + words[i]
total_chars = 0
else:
data = data + ' ' + words[i]
return data
def compose(*funcs):
*funcs, penultimate, last = funcs
if funcs:
penultimate = compose(*funcs, penultimate)
return lambda *args: penultimate(last(*args))
path = '../input/CORD-19-research-challenge'
meta = 'metadata.csv'
all_jsons = glob.glob(filepath(path, '**', '*.json'), recursive=True)
meta_df = pd.read_csv(filepath(path, meta), dtype={'pubmed_id': str, 'Microsoft Academic Paper ID': str, 'doi': str, 'journal': str}, low_memory=False)
dict_ = {'paper_id': [], 'abstract': [], 'body_text': [], 'authors': [], 'title': [], 'publish_time': [], 'journal': [], 'abstract_summary': []}
for entry in all_jsons[:10]:
content = FileReader(entry)
meta_data = meta_df.loc[meta_df['sha'] == content.paper_id]
if len(meta_data) == 0:
continue
dict_['paper_id'].append(content.paper_id)
dict_['abstract'].append(content.abstract)
dict_['body_text'].append(content.body_text)
if len(content.abstract) == 0:
dict_['abstract_summary'].append('Not provided.')
elif len(content.abstract.split(' ')) > 100:
info = content.abstract.split(' ')[:100]
summary = get_breaks(' '.join(info), 40)
dict_['abstract_summary'].append(summary + '...')
else:
summary = get_breaks(content.abstract, 40)
dict_['abstract_summary'].append(summary)
meta_data = meta_df.loc[meta_df['sha'] == content.paper_id]
try:
authors = meta_data['authors'].values[0].split(';')
if len(authors) > 2:
dict_['authors'].append('. '.join(authors[:2]) + '...')
else:
dict_['authors'].append('. '.join(authors))
except Exception as e:
dict_['authors'].append(meta_data['authors'].values[0])
try:
title = get_breaks(meta_data['title'].values[0], 40)
dict_['title'].append(title)
except Exception as e:
dict_['title'].append(meta_data['title'].values[0])
try:
publish_time = get_breaks(meta_data['publish_time'].values[0], 40)
dict_['publish_time'].append(publish_time)
except Exception as e:
dict_['publish_time'].append(meta_data['publish_time'].values[0])
dict_['journal'].append(meta_data['journal'].values[0])
df_covid = pd.DataFrame(dict_, columns=['paper_id', 'abstract', 'body_text', 'authors', 'title', 'journal', 'publish_time', 'abstract_summary'])
def is_lang(row, item, lang, dropNA=True):
if row[item] != None and row[item] != '' and (row[item] != 'None') and isinstance(row[item], str):
try:
return detect(row[item]) == lang
except Exception as e:
return False
else:
return not dropNA
def select_article_lang_multi(df, basedon='abstract', lang='en'):
return df[df.parallel_apply(lambda text: is_lang(text, basedon, lang), axis=1)]
df_covid_eng = select_article_lang_multi(df_covid)
def replace_brackets_with_whitespace(text):
text = text.replace('(', '')
text = text.replace(')', '')
text = text.replace('[', '')
text = text.replace(']', '')
return text
def replace_contractions(text):
return contractions.fix(text)
def strip_characters(text):
t = re.sub('\\(|\\)|:|,|;|\\.|’||“|\\?|%|>|<', '', text)
t = re.sub('/', ' ', t)
t = t.replace("'", '')
return t
def to_lowercase(word):
return word.lower()
def do_stemming(stemmer):
return lambda word: stemmer.stem(word)
def do_lemmatizing(lemmatizer):
return lambda word: lemmatizer.lemmatize(word, pos='v')
def is_stopword(word):
return word in stopwords.words('english')
def process_word_by(word_cleanner, uniqueYN):
def cond(word):
return len(word) > 1 and (not is_stopword(word)) and (not word.isnumeric()) and word.isalnum() and (word != len(word) * word[0])
def clean_byword(text):
return list(take_unique(uniqueYN)((word_cleanner(word) for word in text if cond(word))))
return clean_byword
def take_unique(YN):
return set if YN else lambda x: x
text_processor = compose(replace_brackets_with_whitespace, replace_contractions, strip_characters)
word_processor = compose(to_lowercase, do_lemmatizing(WordNetLemmatizer()), do_stemming(PorterStemmer()))
def pre_processing(df, text_tools, word_tools):
def inner(col, uniqueYN=False):
return df[col].parallel_apply(text_tools).parallel_apply(nltk.word_tokenize).parallel_apply(process_word_by(word_tools, uniqueYN=uniqueYN))
return inner
tokenized_df = df_covid_eng.sort_values(by='publish_time', ascending=False)
processor = pre_processing(tokenized_df, text_processor, word_processor)
tokenized_df['abstract_token'] = processor('abstract')
tokenized_df = tokenized_df.reset_index(drop=True)
tokenized_df.head()['abstract_token']
with open('../data/df_kaggle_all_eng_tokenized.pkl', 'rb') as fp:
tokenized_df = pickle.load(fp)
def get_top_nK_words(corpus, K=1, n=None):
vec1 = CountVectorizer(max_df=0.7, stop_words=stopwords.words('english'), ngram_range=(K, K), max_features=2000).fit(corpus)
bag_of_words = vec1.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx]) for word, idx in vec1.vocabulary_.items()]
words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True)
return words_freq[:n]
#Convert most freq words to dataframe for plotting bar plot
top_words = get_top_nK_words(corpus, K=1, n=20)
top_df = pd.DataFrame(top_words)
top_df.columns=["Word", "Freq"]
#Barplot of most freq words
sns.set(rc={'figure.figsize':(13,8)})
g = sns.barplot(x="Word", y="Freq", data=top_df)
g.set_xticklabels(g.get_xticklabels(), rotation=30)
# Top bi-grams
top2_words = get_top_nK_words(corpus, K=2, n=20)
top2_df = pd.DataFrame(top2_words)
top2_df.columns=["Bi-gram", "Freq"]
print(top2_df)
#Barplot of most freq Bi-grams
import seaborn as sns
sns.set(rc={'figure.figsize':(13,8)})
h=sns.barplot(x="Bi-gram", y="Freq", data=top2_df)
h.set_xticklabels(h.get_xticklabels(), rotation=45)
fig = h.get_figure()
top3_words = get_top_nK_words(corpus, K=3, n=20)
top3_df = pd.DataFrame(top3_words)
top3_df.columns=["Tri-gram", "Freq"]
print(top3_df)
#Barplot of most freq Tri-grams
import seaborn as sns
sns.set(rc={'figure.figsize':(13,8)})
j=sns.barplot(x="Tri-gram", y="Freq", data=top3_df)
j.set_xticklabels(j.get_xticklabels(), rotation=45)
fig = j.get_figure()
def tfidf_(df):
myvectorizer = TfidfVectorizer()
vectors = myvectorizer.fit_transform(df['abstract_token'].parallel_apply(lambda x: ' '.join(x))).toarray()
feature_names = myvectorizer.get_feature_names()
veclist = vectors.tolist()
out_tfidf = pd.DataFrame(veclist, columns=feature_names)
return out_tfidf
tfidf_(tokenized_df[:5000]).head() | code |
32068455/cell_2 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | # install packages
!pip install nltk --user
!pip install owlready2 --user
!pip install pronto --user
!pip install ipynb-py-convert --user
!pip install langdetect --user
!pip install contractions --user
!pip install inflect --user
!pip install num2words --user
!pip install tables --user
!pip install h5py --user
!pip install sentence-transformers --user
!pip install pandas --user
!pip install tqdm --user
!pip install seaborn --user
!pip install numpy --user
!pip install scipy --user
!pip install matplotlib --user
!pip install numpy --user
!pip install bottleneck --user
!pip install pandarallel --user
!pip install wordcloud --user
!pip install --user spacy
!pip install --user https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-2.2.0/en_core_web_sm-2.2.0.tar.gz | code |
32068455/cell_11 | [
"text_plain_output_1.png"
] | from tqdm import tqdm
import glob
import json
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
pd.set_option('display.expand_frame_repr', False)
pd.options.mode.chained_assignment = None
tqdm.pandas()
def filepath(*args):
if len(args) < 1:
return None
elif len(args) == 1:
return args[0]
else:
return f'{args[0]}/{filepath(*args[1:])}'
def addtimebar(L, threshold=1000):
if len(L) > threshold:
return tqdm(L)
else:
return L
class FileReader:
def __init__(self, file_path):
with open(file_path) as file:
content = json.load(file)
self.paper_id = content['paper_id']
self.abstract = []
self.body_text = []
try:
for entry in content['abstract']:
self.abstract.append(entry['text'])
except KeyError:
pass
try:
for entry in content['body_text']:
self.body_text.append(entry['text'])
except KeyError:
pass
self.abstract = '\n'.join(self.abstract)
self.body_text = '\n'.join(self.body_text)
def __repr__(self):
return f'{self.paper_id}: {self.abstract[:200]}... {self.body_text[:200]}...'
def get_breaks(content, length):
data = ''
words = content.split(' ')
total_chars = 0
for i in range(len(words)):
total_chars += len(words[i])
if total_chars > length:
data = data + '<br>' + words[i]
total_chars = 0
else:
data = data + ' ' + words[i]
return data
def compose(*funcs):
*funcs, penultimate, last = funcs
if funcs:
penultimate = compose(*funcs, penultimate)
return lambda *args: penultimate(last(*args))
path = '../input/CORD-19-research-challenge'
meta = 'metadata.csv'
all_jsons = glob.glob(filepath(path, '**', '*.json'), recursive=True)
first_row = FileReader(all_jsons[0])
print(first_row) | code |
32068455/cell_16 | [
"text_plain_output_1.png"
] | from langdetect import detect
from tqdm import tqdm
import glob
import json
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
pd.set_option('display.expand_frame_repr', False)
pd.options.mode.chained_assignment = None
tqdm.pandas()
def filepath(*args):
if len(args) < 1:
return None
elif len(args) == 1:
return args[0]
else:
return f'{args[0]}/{filepath(*args[1:])}'
def addtimebar(L, threshold=1000):
if len(L) > threshold:
return tqdm(L)
else:
return L
class FileReader:
def __init__(self, file_path):
with open(file_path) as file:
content = json.load(file)
self.paper_id = content['paper_id']
self.abstract = []
self.body_text = []
try:
for entry in content['abstract']:
self.abstract.append(entry['text'])
except KeyError:
pass
try:
for entry in content['body_text']:
self.body_text.append(entry['text'])
except KeyError:
pass
self.abstract = '\n'.join(self.abstract)
self.body_text = '\n'.join(self.body_text)
def __repr__(self):
return f'{self.paper_id}: {self.abstract[:200]}... {self.body_text[:200]}...'
def get_breaks(content, length):
data = ''
words = content.split(' ')
total_chars = 0
for i in range(len(words)):
total_chars += len(words[i])
if total_chars > length:
data = data + '<br>' + words[i]
total_chars = 0
else:
data = data + ' ' + words[i]
return data
def compose(*funcs):
*funcs, penultimate, last = funcs
if funcs:
penultimate = compose(*funcs, penultimate)
return lambda *args: penultimate(last(*args))
path = '../input/CORD-19-research-challenge'
meta = 'metadata.csv'
all_jsons = glob.glob(filepath(path, '**', '*.json'), recursive=True)
meta_df = pd.read_csv(filepath(path, meta), dtype={'pubmed_id': str, 'Microsoft Academic Paper ID': str, 'doi': str, 'journal': str}, low_memory=False)
dict_ = {'paper_id': [], 'abstract': [], 'body_text': [], 'authors': [], 'title': [], 'publish_time': [], 'journal': [], 'abstract_summary': []}
for entry in all_jsons[:10]:
content = FileReader(entry)
meta_data = meta_df.loc[meta_df['sha'] == content.paper_id]
if len(meta_data) == 0:
continue
dict_['paper_id'].append(content.paper_id)
dict_['abstract'].append(content.abstract)
dict_['body_text'].append(content.body_text)
if len(content.abstract) == 0:
dict_['abstract_summary'].append('Not provided.')
elif len(content.abstract.split(' ')) > 100:
info = content.abstract.split(' ')[:100]
summary = get_breaks(' '.join(info), 40)
dict_['abstract_summary'].append(summary + '...')
else:
summary = get_breaks(content.abstract, 40)
dict_['abstract_summary'].append(summary)
meta_data = meta_df.loc[meta_df['sha'] == content.paper_id]
try:
authors = meta_data['authors'].values[0].split(';')
if len(authors) > 2:
dict_['authors'].append('. '.join(authors[:2]) + '...')
else:
dict_['authors'].append('. '.join(authors))
except Exception as e:
dict_['authors'].append(meta_data['authors'].values[0])
try:
title = get_breaks(meta_data['title'].values[0], 40)
dict_['title'].append(title)
except Exception as e:
dict_['title'].append(meta_data['title'].values[0])
try:
publish_time = get_breaks(meta_data['publish_time'].values[0], 40)
dict_['publish_time'].append(publish_time)
except Exception as e:
dict_['publish_time'].append(meta_data['publish_time'].values[0])
dict_['journal'].append(meta_data['journal'].values[0])
df_covid = pd.DataFrame(dict_, columns=['paper_id', 'abstract', 'body_text', 'authors', 'title', 'journal', 'publish_time', 'abstract_summary'])
def is_lang(row, item, lang, dropNA=True):
if row[item] != None and row[item] != '' and (row[item] != 'None') and isinstance(row[item], str):
try:
return detect(row[item]) == lang
except Exception as e:
return False
else:
return not dropNA
def select_article_lang_multi(df, basedon='abstract', lang='en'):
return df[df.parallel_apply(lambda text: is_lang(text, basedon, lang), axis=1)]
df_covid_eng = select_article_lang_multi(df_covid)
print('Number of English Articles: {}/{}'.format(len(df_covid_eng), len(df_covid)))
df_covid_eng.head(n=2) | code |
32068455/cell_35 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from langdetect import detect
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.stem import WordNetLemmatizer
from os import path
from pandarallel import pandarallel
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from tqdm import tqdm
import contractions
import glob
import json
import nltk
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
import re
import seaborn as sns
import seaborn as sns
import seaborn as sns
import spacy
import numpy as np
import pandas as pd
import glob
import json
import re
import itertools
from tqdm import tqdm
import nltk
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')
from nltk import tokenize
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from collections import defaultdict
from nltk.corpus import wordnet as wn
from langdetect import detect
from nltk.corpus import stopwords
import contractions
import inflect
from nltk.stem import PorterStemmer
from nltk.stem import LancasterStemmer
from nltk.stem import WordNetLemmatizer
from pandarallel import pandarallel
import pickle
from PIL import Image
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import matplotlib.pyplot as plt
import seaborn as sns
from sentence_transformers import SentenceTransformer
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import cosine_similarity
import re
import bs4
import requests
import spacy
from spacy import displacy
nlp = spacy.load('en_core_web_sm')
from spacy.matcher import Matcher
from spacy.tokens import Span
import networkx as nx
pandarallel.initialize(use_memory_fs=False, nb_workers=8)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
pd.set_option('display.expand_frame_repr', False)
pd.options.mode.chained_assignment = None
tqdm.pandas()
def filepath(*args):
if len(args) < 1:
return None
elif len(args) == 1:
return args[0]
else:
return f'{args[0]}/{filepath(*args[1:])}'
def addtimebar(L, threshold=1000):
if len(L) > threshold:
return tqdm(L)
else:
return L
class FileReader:
def __init__(self, file_path):
with open(file_path) as file:
content = json.load(file)
self.paper_id = content['paper_id']
self.abstract = []
self.body_text = []
try:
for entry in content['abstract']:
self.abstract.append(entry['text'])
except KeyError:
pass
try:
for entry in content['body_text']:
self.body_text.append(entry['text'])
except KeyError:
pass
self.abstract = '\n'.join(self.abstract)
self.body_text = '\n'.join(self.body_text)
def __repr__(self):
return f'{self.paper_id}: {self.abstract[:200]}... {self.body_text[:200]}...'
def get_breaks(content, length):
data = ''
words = content.split(' ')
total_chars = 0
for i in range(len(words)):
total_chars += len(words[i])
if total_chars > length:
data = data + '<br>' + words[i]
total_chars = 0
else:
data = data + ' ' + words[i]
return data
def compose(*funcs):
*funcs, penultimate, last = funcs
if funcs:
penultimate = compose(*funcs, penultimate)
return lambda *args: penultimate(last(*args))
path = '../input/CORD-19-research-challenge'
meta = 'metadata.csv'
all_jsons = glob.glob(filepath(path, '**', '*.json'), recursive=True)
meta_df = pd.read_csv(filepath(path, meta), dtype={'pubmed_id': str, 'Microsoft Academic Paper ID': str, 'doi': str, 'journal': str}, low_memory=False)
dict_ = {'paper_id': [], 'abstract': [], 'body_text': [], 'authors': [], 'title': [], 'publish_time': [], 'journal': [], 'abstract_summary': []}
for entry in all_jsons[:10]:
content = FileReader(entry)
meta_data = meta_df.loc[meta_df['sha'] == content.paper_id]
if len(meta_data) == 0:
continue
dict_['paper_id'].append(content.paper_id)
dict_['abstract'].append(content.abstract)
dict_['body_text'].append(content.body_text)
if len(content.abstract) == 0:
dict_['abstract_summary'].append('Not provided.')
elif len(content.abstract.split(' ')) > 100:
info = content.abstract.split(' ')[:100]
summary = get_breaks(' '.join(info), 40)
dict_['abstract_summary'].append(summary + '...')
else:
summary = get_breaks(content.abstract, 40)
dict_['abstract_summary'].append(summary)
meta_data = meta_df.loc[meta_df['sha'] == content.paper_id]
try:
authors = meta_data['authors'].values[0].split(';')
if len(authors) > 2:
dict_['authors'].append('. '.join(authors[:2]) + '...')
else:
dict_['authors'].append('. '.join(authors))
except Exception as e:
dict_['authors'].append(meta_data['authors'].values[0])
try:
title = get_breaks(meta_data['title'].values[0], 40)
dict_['title'].append(title)
except Exception as e:
dict_['title'].append(meta_data['title'].values[0])
try:
publish_time = get_breaks(meta_data['publish_time'].values[0], 40)
dict_['publish_time'].append(publish_time)
except Exception as e:
dict_['publish_time'].append(meta_data['publish_time'].values[0])
dict_['journal'].append(meta_data['journal'].values[0])
df_covid = pd.DataFrame(dict_, columns=['paper_id', 'abstract', 'body_text', 'authors', 'title', 'journal', 'publish_time', 'abstract_summary'])
def is_lang(row, item, lang, dropNA=True):
if row[item] != None and row[item] != '' and (row[item] != 'None') and isinstance(row[item], str):
try:
return detect(row[item]) == lang
except Exception as e:
return False
else:
return not dropNA
def select_article_lang_multi(df, basedon='abstract', lang='en'):
return df[df.parallel_apply(lambda text: is_lang(text, basedon, lang), axis=1)]
df_covid_eng = select_article_lang_multi(df_covid)
def replace_brackets_with_whitespace(text):
text = text.replace('(', '')
text = text.replace(')', '')
text = text.replace('[', '')
text = text.replace(']', '')
return text
def replace_contractions(text):
return contractions.fix(text)
def strip_characters(text):
t = re.sub('\\(|\\)|:|,|;|\\.|’||“|\\?|%|>|<', '', text)
t = re.sub('/', ' ', t)
t = t.replace("'", '')
return t
def to_lowercase(word):
return word.lower()
def do_stemming(stemmer):
return lambda word: stemmer.stem(word)
def do_lemmatizing(lemmatizer):
return lambda word: lemmatizer.lemmatize(word, pos='v')
def is_stopword(word):
return word in stopwords.words('english')
def process_word_by(word_cleanner, uniqueYN):
def cond(word):
return len(word) > 1 and (not is_stopword(word)) and (not word.isnumeric()) and word.isalnum() and (word != len(word) * word[0])
def clean_byword(text):
return list(take_unique(uniqueYN)((word_cleanner(word) for word in text if cond(word))))
return clean_byword
def take_unique(YN):
return set if YN else lambda x: x
text_processor = compose(replace_brackets_with_whitespace, replace_contractions, strip_characters)
word_processor = compose(to_lowercase, do_lemmatizing(WordNetLemmatizer()), do_stemming(PorterStemmer()))
def pre_processing(df, text_tools, word_tools):
def inner(col, uniqueYN=False):
return df[col].parallel_apply(text_tools).parallel_apply(nltk.word_tokenize).parallel_apply(process_word_by(word_tools, uniqueYN=uniqueYN))
return inner
def get_top_nK_words(corpus, K=1, n=None):
vec1 = CountVectorizer(max_df=0.7, stop_words=stopwords.words('english'), ngram_range=(K, K), max_features=2000).fit(corpus)
bag_of_words = vec1.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx]) for word, idx in vec1.vocabulary_.items()]
words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True)
return words_freq[:n]
#Convert most freq words to dataframe for plotting bar plot
top_words = get_top_nK_words(corpus, K=1, n=20)
top_df = pd.DataFrame(top_words)
top_df.columns=["Word", "Freq"]
#Barplot of most freq words
sns.set(rc={'figure.figsize':(13,8)})
g = sns.barplot(x="Word", y="Freq", data=top_df)
g.set_xticklabels(g.get_xticklabels(), rotation=30)
# Top bi-grams
top2_words = get_top_nK_words(corpus, K=2, n=20)
top2_df = pd.DataFrame(top2_words)
top2_df.columns=["Bi-gram", "Freq"]
print(top2_df)
#Barplot of most freq Bi-grams
import seaborn as sns
sns.set(rc={'figure.figsize':(13,8)})
h=sns.barplot(x="Bi-gram", y="Freq", data=top2_df)
h.set_xticklabels(h.get_xticklabels(), rotation=45)
fig = h.get_figure()
top3_words = get_top_nK_words(corpus, K=3, n=20)
top3_df = pd.DataFrame(top3_words)
top3_df.columns = ['Tri-gram', 'Freq']
print(top3_df)
import seaborn as sns
sns.set(rc={'figure.figsize': (13, 8)})
j = sns.barplot(x='Tri-gram', y='Freq', data=top3_df)
j.set_xticklabels(j.get_xticklabels(), rotation=45)
fig = j.get_figure() | code |
32068455/cell_31 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from langdetect import detect
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.stem import WordNetLemmatizer
from os import path
from pandarallel import pandarallel
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from tqdm import tqdm
import contractions
import glob
import json
import nltk
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
import re
import seaborn as sns
import spacy
import numpy as np
import pandas as pd
import glob
import json
import re
import itertools
from tqdm import tqdm
import nltk
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')
from nltk import tokenize
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from collections import defaultdict
from nltk.corpus import wordnet as wn
from langdetect import detect
from nltk.corpus import stopwords
import contractions
import inflect
from nltk.stem import PorterStemmer
from nltk.stem import LancasterStemmer
from nltk.stem import WordNetLemmatizer
from pandarallel import pandarallel
import pickle
from PIL import Image
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import matplotlib.pyplot as plt
import seaborn as sns
from sentence_transformers import SentenceTransformer
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import cosine_similarity
import re
import bs4
import requests
import spacy
from spacy import displacy
nlp = spacy.load('en_core_web_sm')
from spacy.matcher import Matcher
from spacy.tokens import Span
import networkx as nx
pandarallel.initialize(use_memory_fs=False, nb_workers=8)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
pd.set_option('display.expand_frame_repr', False)
pd.options.mode.chained_assignment = None
tqdm.pandas()
def filepath(*args):
if len(args) < 1:
return None
elif len(args) == 1:
return args[0]
else:
return f'{args[0]}/{filepath(*args[1:])}'
def addtimebar(L, threshold=1000):
if len(L) > threshold:
return tqdm(L)
else:
return L
class FileReader:
def __init__(self, file_path):
with open(file_path) as file:
content = json.load(file)
self.paper_id = content['paper_id']
self.abstract = []
self.body_text = []
try:
for entry in content['abstract']:
self.abstract.append(entry['text'])
except KeyError:
pass
try:
for entry in content['body_text']:
self.body_text.append(entry['text'])
except KeyError:
pass
self.abstract = '\n'.join(self.abstract)
self.body_text = '\n'.join(self.body_text)
def __repr__(self):
return f'{self.paper_id}: {self.abstract[:200]}... {self.body_text[:200]}...'
def get_breaks(content, length):
data = ''
words = content.split(' ')
total_chars = 0
for i in range(len(words)):
total_chars += len(words[i])
if total_chars > length:
data = data + '<br>' + words[i]
total_chars = 0
else:
data = data + ' ' + words[i]
return data
def compose(*funcs):
*funcs, penultimate, last = funcs
if funcs:
penultimate = compose(*funcs, penultimate)
return lambda *args: penultimate(last(*args))
path = '../input/CORD-19-research-challenge'
meta = 'metadata.csv'
all_jsons = glob.glob(filepath(path, '**', '*.json'), recursive=True)
meta_df = pd.read_csv(filepath(path, meta), dtype={'pubmed_id': str, 'Microsoft Academic Paper ID': str, 'doi': str, 'journal': str}, low_memory=False)
dict_ = {'paper_id': [], 'abstract': [], 'body_text': [], 'authors': [], 'title': [], 'publish_time': [], 'journal': [], 'abstract_summary': []}
for entry in all_jsons[:10]:
content = FileReader(entry)
meta_data = meta_df.loc[meta_df['sha'] == content.paper_id]
if len(meta_data) == 0:
continue
dict_['paper_id'].append(content.paper_id)
dict_['abstract'].append(content.abstract)
dict_['body_text'].append(content.body_text)
if len(content.abstract) == 0:
dict_['abstract_summary'].append('Not provided.')
elif len(content.abstract.split(' ')) > 100:
info = content.abstract.split(' ')[:100]
summary = get_breaks(' '.join(info), 40)
dict_['abstract_summary'].append(summary + '...')
else:
summary = get_breaks(content.abstract, 40)
dict_['abstract_summary'].append(summary)
meta_data = meta_df.loc[meta_df['sha'] == content.paper_id]
try:
authors = meta_data['authors'].values[0].split(';')
if len(authors) > 2:
dict_['authors'].append('. '.join(authors[:2]) + '...')
else:
dict_['authors'].append('. '.join(authors))
except Exception as e:
dict_['authors'].append(meta_data['authors'].values[0])
try:
title = get_breaks(meta_data['title'].values[0], 40)
dict_['title'].append(title)
except Exception as e:
dict_['title'].append(meta_data['title'].values[0])
try:
publish_time = get_breaks(meta_data['publish_time'].values[0], 40)
dict_['publish_time'].append(publish_time)
except Exception as e:
dict_['publish_time'].append(meta_data['publish_time'].values[0])
dict_['journal'].append(meta_data['journal'].values[0])
df_covid = pd.DataFrame(dict_, columns=['paper_id', 'abstract', 'body_text', 'authors', 'title', 'journal', 'publish_time', 'abstract_summary'])
def is_lang(row, item, lang, dropNA=True):
if row[item] != None and row[item] != '' and (row[item] != 'None') and isinstance(row[item], str):
try:
return detect(row[item]) == lang
except Exception as e:
return False
else:
return not dropNA
def select_article_lang_multi(df, basedon='abstract', lang='en'):
return df[df.parallel_apply(lambda text: is_lang(text, basedon, lang), axis=1)]
df_covid_eng = select_article_lang_multi(df_covid)
def replace_brackets_with_whitespace(text):
text = text.replace('(', '')
text = text.replace(')', '')
text = text.replace('[', '')
text = text.replace(']', '')
return text
def replace_contractions(text):
return contractions.fix(text)
def strip_characters(text):
t = re.sub('\\(|\\)|:|,|;|\\.|’||“|\\?|%|>|<', '', text)
t = re.sub('/', ' ', t)
t = t.replace("'", '')
return t
def to_lowercase(word):
return word.lower()
def do_stemming(stemmer):
return lambda word: stemmer.stem(word)
def do_lemmatizing(lemmatizer):
return lambda word: lemmatizer.lemmatize(word, pos='v')
def is_stopword(word):
return word in stopwords.words('english')
def process_word_by(word_cleanner, uniqueYN):
def cond(word):
return len(word) > 1 and (not is_stopword(word)) and (not word.isnumeric()) and word.isalnum() and (word != len(word) * word[0])
def clean_byword(text):
return list(take_unique(uniqueYN)((word_cleanner(word) for word in text if cond(word))))
return clean_byword
def take_unique(YN):
return set if YN else lambda x: x
text_processor = compose(replace_brackets_with_whitespace, replace_contractions, strip_characters)
word_processor = compose(to_lowercase, do_lemmatizing(WordNetLemmatizer()), do_stemming(PorterStemmer()))
def pre_processing(df, text_tools, word_tools):
def inner(col, uniqueYN=False):
return df[col].parallel_apply(text_tools).parallel_apply(nltk.word_tokenize).parallel_apply(process_word_by(word_tools, uniqueYN=uniqueYN))
return inner
def get_top_nK_words(corpus, K=1, n=None):
vec1 = CountVectorizer(max_df=0.7, stop_words=stopwords.words('english'), ngram_range=(K, K), max_features=2000).fit(corpus)
bag_of_words = vec1.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx]) for word, idx in vec1.vocabulary_.items()]
words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True)
return words_freq[:n]
top_words = get_top_nK_words(corpus, K=1, n=20)
top_df = pd.DataFrame(top_words)
top_df.columns = ['Word', 'Freq']
sns.set(rc={'figure.figsize': (13, 8)})
g = sns.barplot(x='Word', y='Freq', data=top_df)
g.set_xticklabels(g.get_xticklabels(), rotation=30) | code |
32068455/cell_24 | [
"text_html_output_1.png"
] | from langdetect import detect
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.stem import WordNetLemmatizer
from pandarallel import pandarallel
from tqdm import tqdm
import contractions
import glob
import json
import nltk
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
import re
import spacy
import numpy as np
import pandas as pd
import glob
import json
import re
import itertools
from tqdm import tqdm
import nltk
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')
from nltk import tokenize
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from collections import defaultdict
from nltk.corpus import wordnet as wn
from langdetect import detect
from nltk.corpus import stopwords
import contractions
import inflect
from nltk.stem import PorterStemmer
from nltk.stem import LancasterStemmer
from nltk.stem import WordNetLemmatizer
from pandarallel import pandarallel
import pickle
from PIL import Image
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import matplotlib.pyplot as plt
import seaborn as sns
from sentence_transformers import SentenceTransformer
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import cosine_similarity
import re
import bs4
import requests
import spacy
from spacy import displacy
nlp = spacy.load('en_core_web_sm')
from spacy.matcher import Matcher
from spacy.tokens import Span
import networkx as nx
pandarallel.initialize(use_memory_fs=False, nb_workers=8)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
pd.set_option('display.expand_frame_repr', False)
pd.options.mode.chained_assignment = None
tqdm.pandas()
def filepath(*args):
if len(args) < 1:
return None
elif len(args) == 1:
return args[0]
else:
return f'{args[0]}/{filepath(*args[1:])}'
def addtimebar(L, threshold=1000):
if len(L) > threshold:
return tqdm(L)
else:
return L
class FileReader:
def __init__(self, file_path):
with open(file_path) as file:
content = json.load(file)
self.paper_id = content['paper_id']
self.abstract = []
self.body_text = []
try:
for entry in content['abstract']:
self.abstract.append(entry['text'])
except KeyError:
pass
try:
for entry in content['body_text']:
self.body_text.append(entry['text'])
except KeyError:
pass
self.abstract = '\n'.join(self.abstract)
self.body_text = '\n'.join(self.body_text)
def __repr__(self):
return f'{self.paper_id}: {self.abstract[:200]}... {self.body_text[:200]}...'
def get_breaks(content, length):
data = ''
words = content.split(' ')
total_chars = 0
for i in range(len(words)):
total_chars += len(words[i])
if total_chars > length:
data = data + '<br>' + words[i]
total_chars = 0
else:
data = data + ' ' + words[i]
return data
def compose(*funcs):
*funcs, penultimate, last = funcs
if funcs:
penultimate = compose(*funcs, penultimate)
return lambda *args: penultimate(last(*args))
path = '../input/CORD-19-research-challenge'
meta = 'metadata.csv'
all_jsons = glob.glob(filepath(path, '**', '*.json'), recursive=True)
meta_df = pd.read_csv(filepath(path, meta), dtype={'pubmed_id': str, 'Microsoft Academic Paper ID': str, 'doi': str, 'journal': str}, low_memory=False)
dict_ = {'paper_id': [], 'abstract': [], 'body_text': [], 'authors': [], 'title': [], 'publish_time': [], 'journal': [], 'abstract_summary': []}
for entry in all_jsons[:10]:
content = FileReader(entry)
meta_data = meta_df.loc[meta_df['sha'] == content.paper_id]
if len(meta_data) == 0:
continue
dict_['paper_id'].append(content.paper_id)
dict_['abstract'].append(content.abstract)
dict_['body_text'].append(content.body_text)
if len(content.abstract) == 0:
dict_['abstract_summary'].append('Not provided.')
elif len(content.abstract.split(' ')) > 100:
info = content.abstract.split(' ')[:100]
summary = get_breaks(' '.join(info), 40)
dict_['abstract_summary'].append(summary + '...')
else:
summary = get_breaks(content.abstract, 40)
dict_['abstract_summary'].append(summary)
meta_data = meta_df.loc[meta_df['sha'] == content.paper_id]
try:
authors = meta_data['authors'].values[0].split(';')
if len(authors) > 2:
dict_['authors'].append('. '.join(authors[:2]) + '...')
else:
dict_['authors'].append('. '.join(authors))
except Exception as e:
dict_['authors'].append(meta_data['authors'].values[0])
try:
title = get_breaks(meta_data['title'].values[0], 40)
dict_['title'].append(title)
except Exception as e:
dict_['title'].append(meta_data['title'].values[0])
try:
publish_time = get_breaks(meta_data['publish_time'].values[0], 40)
dict_['publish_time'].append(publish_time)
except Exception as e:
dict_['publish_time'].append(meta_data['publish_time'].values[0])
dict_['journal'].append(meta_data['journal'].values[0])
df_covid = pd.DataFrame(dict_, columns=['paper_id', 'abstract', 'body_text', 'authors', 'title', 'journal', 'publish_time', 'abstract_summary'])
def is_lang(row, item, lang, dropNA=True):
if row[item] != None and row[item] != '' and (row[item] != 'None') and isinstance(row[item], str):
try:
return detect(row[item]) == lang
except Exception as e:
return False
else:
return not dropNA
def select_article_lang_multi(df, basedon='abstract', lang='en'):
return df[df.parallel_apply(lambda text: is_lang(text, basedon, lang), axis=1)]
df_covid_eng = select_article_lang_multi(df_covid)
def replace_brackets_with_whitespace(text):
text = text.replace('(', '')
text = text.replace(')', '')
text = text.replace('[', '')
text = text.replace(']', '')
return text
def replace_contractions(text):
return contractions.fix(text)
def strip_characters(text):
t = re.sub('\\(|\\)|:|,|;|\\.|’||“|\\?|%|>|<', '', text)
t = re.sub('/', ' ', t)
t = t.replace("'", '')
return t
def to_lowercase(word):
return word.lower()
def do_stemming(stemmer):
return lambda word: stemmer.stem(word)
def do_lemmatizing(lemmatizer):
return lambda word: lemmatizer.lemmatize(word, pos='v')
def is_stopword(word):
return word in stopwords.words('english')
def process_word_by(word_cleanner, uniqueYN):
def cond(word):
return len(word) > 1 and (not is_stopword(word)) and (not word.isnumeric()) and word.isalnum() and (word != len(word) * word[0])
def clean_byword(text):
return list(take_unique(uniqueYN)((word_cleanner(word) for word in text if cond(word))))
return clean_byword
def take_unique(YN):
return set if YN else lambda x: x
text_processor = compose(replace_brackets_with_whitespace, replace_contractions, strip_characters)
word_processor = compose(to_lowercase, do_lemmatizing(WordNetLemmatizer()), do_stemming(PorterStemmer()))
def pre_processing(df, text_tools, word_tools):
def inner(col, uniqueYN=False):
return df[col].parallel_apply(text_tools).parallel_apply(nltk.word_tokenize).parallel_apply(process_word_by(word_tools, uniqueYN=uniqueYN))
return inner
tokenized_df = df_covid_eng.sort_values(by='publish_time', ascending=False)
processor = pre_processing(tokenized_df, text_processor, word_processor)
tokenized_df['abstract_token'] = processor('abstract')
tokenized_df = tokenized_df.reset_index(drop=True)
tokenized_df.head()['abstract_token'] | code |
32068455/cell_22 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from langdetect import detect
from tqdm import tqdm
import glob
import json
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
pd.set_option('display.expand_frame_repr', False)
pd.options.mode.chained_assignment = None
tqdm.pandas()
def filepath(*args):
if len(args) < 1:
return None
elif len(args) == 1:
return args[0]
else:
return f'{args[0]}/{filepath(*args[1:])}'
def addtimebar(L, threshold=1000):
if len(L) > threshold:
return tqdm(L)
else:
return L
class FileReader:
def __init__(self, file_path):
with open(file_path) as file:
content = json.load(file)
self.paper_id = content['paper_id']
self.abstract = []
self.body_text = []
try:
for entry in content['abstract']:
self.abstract.append(entry['text'])
except KeyError:
pass
try:
for entry in content['body_text']:
self.body_text.append(entry['text'])
except KeyError:
pass
self.abstract = '\n'.join(self.abstract)
self.body_text = '\n'.join(self.body_text)
def __repr__(self):
return f'{self.paper_id}: {self.abstract[:200]}... {self.body_text[:200]}...'
def get_breaks(content, length):
data = ''
words = content.split(' ')
total_chars = 0
for i in range(len(words)):
total_chars += len(words[i])
if total_chars > length:
data = data + '<br>' + words[i]
total_chars = 0
else:
data = data + ' ' + words[i]
return data
def compose(*funcs):
*funcs, penultimate, last = funcs
if funcs:
penultimate = compose(*funcs, penultimate)
return lambda *args: penultimate(last(*args))
path = '../input/CORD-19-research-challenge'
meta = 'metadata.csv'
all_jsons = glob.glob(filepath(path, '**', '*.json'), recursive=True)
meta_df = pd.read_csv(filepath(path, meta), dtype={'pubmed_id': str, 'Microsoft Academic Paper ID': str, 'doi': str, 'journal': str}, low_memory=False)
dict_ = {'paper_id': [], 'abstract': [], 'body_text': [], 'authors': [], 'title': [], 'publish_time': [], 'journal': [], 'abstract_summary': []}
for entry in all_jsons[:10]:
content = FileReader(entry)
meta_data = meta_df.loc[meta_df['sha'] == content.paper_id]
if len(meta_data) == 0:
continue
dict_['paper_id'].append(content.paper_id)
dict_['abstract'].append(content.abstract)
dict_['body_text'].append(content.body_text)
if len(content.abstract) == 0:
dict_['abstract_summary'].append('Not provided.')
elif len(content.abstract.split(' ')) > 100:
info = content.abstract.split(' ')[:100]
summary = get_breaks(' '.join(info), 40)
dict_['abstract_summary'].append(summary + '...')
else:
summary = get_breaks(content.abstract, 40)
dict_['abstract_summary'].append(summary)
meta_data = meta_df.loc[meta_df['sha'] == content.paper_id]
try:
authors = meta_data['authors'].values[0].split(';')
if len(authors) > 2:
dict_['authors'].append('. '.join(authors[:2]) + '...')
else:
dict_['authors'].append('. '.join(authors))
except Exception as e:
dict_['authors'].append(meta_data['authors'].values[0])
try:
title = get_breaks(meta_data['title'].values[0], 40)
dict_['title'].append(title)
except Exception as e:
dict_['title'].append(meta_data['title'].values[0])
try:
publish_time = get_breaks(meta_data['publish_time'].values[0], 40)
dict_['publish_time'].append(publish_time)
except Exception as e:
dict_['publish_time'].append(meta_data['publish_time'].values[0])
dict_['journal'].append(meta_data['journal'].values[0])
df_covid = pd.DataFrame(dict_, columns=['paper_id', 'abstract', 'body_text', 'authors', 'title', 'journal', 'publish_time', 'abstract_summary'])
def is_lang(row, item, lang, dropNA=True):
if row[item] != None and row[item] != '' and (row[item] != 'None') and isinstance(row[item], str):
try:
return detect(row[item]) == lang
except Exception as e:
return False
else:
return not dropNA
def select_article_lang_multi(df, basedon='abstract', lang='en'):
return df[df.parallel_apply(lambda text: is_lang(text, basedon, lang), axis=1)]
df_covid_eng = select_article_lang_multi(df_covid)
tokenized_df = df_covid_eng.sort_values(by='publish_time', ascending=False)
tokenized_df.head(n=3) | code |
32068455/cell_10 | [
"text_plain_output_3.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from tqdm import tqdm
import glob
import json
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
pd.set_option('display.expand_frame_repr', False)
pd.options.mode.chained_assignment = None
tqdm.pandas()
def filepath(*args):
if len(args) < 1:
return None
elif len(args) == 1:
return args[0]
else:
return f'{args[0]}/{filepath(*args[1:])}'
def addtimebar(L, threshold=1000):
if len(L) > threshold:
return tqdm(L)
else:
return L
class FileReader:
def __init__(self, file_path):
with open(file_path) as file:
content = json.load(file)
self.paper_id = content['paper_id']
self.abstract = []
self.body_text = []
try:
for entry in content['abstract']:
self.abstract.append(entry['text'])
except KeyError:
pass
try:
for entry in content['body_text']:
self.body_text.append(entry['text'])
except KeyError:
pass
self.abstract = '\n'.join(self.abstract)
self.body_text = '\n'.join(self.body_text)
def __repr__(self):
return f'{self.paper_id}: {self.abstract[:200]}... {self.body_text[:200]}...'
def get_breaks(content, length):
data = ''
words = content.split(' ')
total_chars = 0
for i in range(len(words)):
total_chars += len(words[i])
if total_chars > length:
data = data + '<br>' + words[i]
total_chars = 0
else:
data = data + ' ' + words[i]
return data
def compose(*funcs):
*funcs, penultimate, last = funcs
if funcs:
penultimate = compose(*funcs, penultimate)
return lambda *args: penultimate(last(*args))
path = '../input/CORD-19-research-challenge'
meta = 'metadata.csv'
all_jsons = glob.glob(filepath(path, '**', '*.json'), recursive=True)
meta_df = pd.read_csv(filepath(path, meta), dtype={'pubmed_id': str, 'Microsoft Academic Paper ID': str, 'doi': str, 'journal': str}, low_memory=False)
print(len(meta_df))
meta_df.head(n=2) | code |
32068455/cell_27 | [
"text_plain_output_1.png"
] | tokenized_df['abstract_corpus'] = tokenized_df['abstract_token'].apply(lambda tokens: ' '.join(tokens))
corpus = tokenized_df['abstract_corpus'].tolist()
from os import path
from PIL import Image
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import matplotlib.pyplot as plt
wordcloud = WordCloud(background_color='white', stopwords=stopwords.words('english'), max_words=100, max_font_size=50, random_state=42).generate(' '.join(corpus))
print(wordcloud)
fig = plt.figure(1)
plt.imshow(wordcloud)
plt.axis('off')
plt.show() | code |
128029297/cell_20 | [
"text_plain_output_1.png"
] | from pathlib import Path
from pathlib import Path
from skimage.io import imread
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from tensorflow.keras.preprocessing import image
import cv2 as cv
import numpy as np
image_height = 128
image_width = 128
DATA = '/kaggle/input/malimg-dataset9010/dataset_9010/dataset_9010/malimg_dataset'
TRAIN_DATA = DATA + '/train'
VALIDATION_DATA = DATA + '/validation'
from pathlib import Path
train_image_dir = Path(TRAIN_DATA)
train_folders = [directory for directory in train_image_dir.iterdir() if directory.is_dir()]
classes = [fo.name for fo in train_folders]
from pathlib import Path
val_image_dir = Path(VALIDATION_DATA)
val_folders = [directory for directory in val_image_dir.iterdir() if directory.is_dir()]
from skimage.io import imread
from tensorflow.keras.preprocessing import image
import cv2 as cv
train_img = []
y_train = []
for i, direc in enumerate(train_folders):
for file in direc.iterdir():
img = imread(file)
img_pred = cv.resize(img, (50, 50), interpolation=cv.INTER_AREA)
img_pred = image.img_to_array(img_pred)
img_pred = img_pred / 255
train_img.append(img_pred)
y_train.append(i)
val_img = []
y_val = []
for i, direc in enumerate(val_folders):
for file in direc.iterdir():
img = imread(file)
img_pred = cv.resize(img, (50, 50), interpolation=cv.INTER_AREA)
img_pred = image.img_to_array(img_pred)
img_pred = img_pred / 255
val_img.append(img_pred)
y_val.append(i)
import numpy as np
X_train = np.array(train_img)
X_val = np.array(val_img)
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=100)
hist = model.fit(X_train, y_train)
score = model.score(X_val, y_val)
Y_pred = model.predict(X_val)
from sklearn.metrics import classification_report
print(classification_report(y_val, Y_pred, target_names=classes)) | code |
128029297/cell_7 | [
"text_plain_output_1.png"
] | from pathlib import Path
from pathlib import Path
from skimage.io import imread
from tensorflow.keras.preprocessing import image
import cv2 as cv
image_height = 128
image_width = 128
DATA = '/kaggle/input/malimg-dataset9010/dataset_9010/dataset_9010/malimg_dataset'
TRAIN_DATA = DATA + '/train'
VALIDATION_DATA = DATA + '/validation'
from pathlib import Path
train_image_dir = Path(TRAIN_DATA)
train_folders = [directory for directory in train_image_dir.iterdir() if directory.is_dir()]
classes = [fo.name for fo in train_folders]
from skimage.io import imread
from tensorflow.keras.preprocessing import image
import cv2 as cv
train_img = []
y_train = []
for i, direc in enumerate(train_folders):
for file in direc.iterdir():
img = imread(file)
img_pred = cv.resize(img, (50, 50), interpolation=cv.INTER_AREA)
img_pred = image.img_to_array(img_pred)
img_pred = img_pred / 255
train_img.append(img_pred)
y_train.append(i) | code |
128029297/cell_18 | [
"text_plain_output_1.png"
] | from pathlib import Path
from pathlib import Path
from skimage.io import imread
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
from tensorflow.keras.preprocessing import image
import cv2 as cv
import numpy as np
image_height = 128
image_width = 128
DATA = '/kaggle/input/malimg-dataset9010/dataset_9010/dataset_9010/malimg_dataset'
TRAIN_DATA = DATA + '/train'
VALIDATION_DATA = DATA + '/validation'
from pathlib import Path
train_image_dir = Path(TRAIN_DATA)
train_folders = [directory for directory in train_image_dir.iterdir() if directory.is_dir()]
classes = [fo.name for fo in train_folders]
from pathlib import Path
val_image_dir = Path(VALIDATION_DATA)
val_folders = [directory for directory in val_image_dir.iterdir() if directory.is_dir()]
from skimage.io import imread
from tensorflow.keras.preprocessing import image
import cv2 as cv
train_img = []
y_train = []
for i, direc in enumerate(train_folders):
for file in direc.iterdir():
img = imread(file)
img_pred = cv.resize(img, (50, 50), interpolation=cv.INTER_AREA)
img_pred = image.img_to_array(img_pred)
img_pred = img_pred / 255
train_img.append(img_pred)
y_train.append(i)
val_img = []
y_val = []
for i, direc in enumerate(val_folders):
for file in direc.iterdir():
img = imread(file)
img_pred = cv.resize(img, (50, 50), interpolation=cv.INTER_AREA)
img_pred = image.img_to_array(img_pred)
img_pred = img_pred / 255
val_img.append(img_pred)
y_val.append(i)
import numpy as np
X_train = np.array(train_img)
X_val = np.array(val_img)
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=100)
hist = model.fit(X_train, y_train)
score = model.score(X_val, y_val)
Y_pred = model.predict(X_val)
from sklearn.metrics import confusion_matrix
print(confusion_matrix(y_true=y_val, y_pred=Y_pred)) | code |
128029297/cell_14 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from pathlib import Path
from pathlib import Path
from skimage.io import imread
from sklearn.ensemble import RandomForestClassifier
from tensorflow.keras.preprocessing import image
import cv2 as cv
import numpy as np
image_height = 128
image_width = 128
DATA = '/kaggle/input/malimg-dataset9010/dataset_9010/dataset_9010/malimg_dataset'
TRAIN_DATA = DATA + '/train'
VALIDATION_DATA = DATA + '/validation'
from pathlib import Path
train_image_dir = Path(TRAIN_DATA)
train_folders = [directory for directory in train_image_dir.iterdir() if directory.is_dir()]
classes = [fo.name for fo in train_folders]
from pathlib import Path
val_image_dir = Path(VALIDATION_DATA)
val_folders = [directory for directory in val_image_dir.iterdir() if directory.is_dir()]
from skimage.io import imread
from tensorflow.keras.preprocessing import image
import cv2 as cv
train_img = []
y_train = []
for i, direc in enumerate(train_folders):
for file in direc.iterdir():
img = imread(file)
img_pred = cv.resize(img, (50, 50), interpolation=cv.INTER_AREA)
img_pred = image.img_to_array(img_pred)
img_pred = img_pred / 255
train_img.append(img_pred)
y_train.append(i)
val_img = []
y_val = []
for i, direc in enumerate(val_folders):
for file in direc.iterdir():
img = imread(file)
img_pred = cv.resize(img, (50, 50), interpolation=cv.INTER_AREA)
img_pred = image.img_to_array(img_pred)
img_pred = img_pred / 255
val_img.append(img_pred)
y_val.append(i)
import numpy as np
X_train = np.array(train_img)
X_val = np.array(val_img)
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=100)
hist = model.fit(X_train, y_train)
score = model.score(X_val, y_val)
print('Accuracy:', score) | code |
32070571/cell_13 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
path = '../input/CORD-19-research-challenge/'
all_sources = pd.read_csv(path + 'metadata.csv')
all_sources.isna().sum()
headline_length = all_sources['title'].str.len()
headline_length = all_sources['abstract'].str.len()
plt.hist(headline_length)
plt.show() | code |
32070571/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
path = '../input/CORD-19-research-challenge/'
all_sources = pd.read_csv(path + 'metadata.csv')
all_sources.isna().sum() | code |
32070571/cell_44 | [
"text_plain_output_1.png"
] | from collections import Counter
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer,PorterStemmer
from sklearn.cluster import DBSCAN
from sklearn.metrics.pairwise import cosine_similarity
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import spacy
path = '../input/CORD-19-research-challenge/'
all_sources = pd.read_csv(path + 'metadata.csv')
all_sources.isna().sum()
headline_length = all_sources['title'].str.len()
headline_length = all_sources['abstract'].str.len()
stop = set(stopwords.words('english'))
def build_list(df, col='title'):
corpus = []
lem = WordNetLemmatizer()
stop = set(stopwords.words('english'))
new = df[col].dropna().str.split()
new = new.values.tolist()
corpus = [lem.lemmatize(word.lower()) for i in new for word in i if word not in stop]
return corpus
corpus = build_list(all_sources)
counter = Counter(corpus)
most = counter.most_common()
x = []
y = []
for word, count in most[:10]:
if word not in stop:
x.append(word)
y.append(count)
corpus = build_list(all_sources, 'abstract')
counter = Counter(corpus)
most = counter.most_common()
x = []
y = []
for word, count in most[:10]:
if word not in stop:
x.append(word)
y.append(count)
def prepare_similarity(vectors):
similarity = cosine_similarity(vectors)
return similarity
def get_top_similar(sentence, sentence_list, similarity_matrix, topN):
index = sentence_list.index(sentence)
similarity_row = np.array(similarity_matrix[index, :])
indices = similarity_row.argsort()[-topN:][::-1]
return [(i, sentence_list[i]) for i in indices]
nlp = spacy.load('en_core_web_sm')
sent_vecs = {}
docs = []
for i in tqdm(all_sources['title'].fillna('unknown')[:1000]):
doc = nlp(i)
docs.append(doc)
sent_vecs.update({i: doc.vector})
sentences = list(sent_vecs.keys())
vectors = list(sent_vecs.values())
x = np.array(vectors)
dbscan = DBSCAN(eps=0.08, min_samples=2, metric='cosine').fit(x)
df_cluster = pd.DataFrame({'sentences': sentences, 'label': dbscan.labels_})
df_cluster.label.unique() | code |
32070571/cell_55 | [
"text_html_output_1.png"
] | from collections import Counter
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer,PorterStemmer
from sklearn.cluster import DBSCAN
from sklearn.metrics.pairwise import cosine_similarity
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import spacy
path = '../input/CORD-19-research-challenge/'
all_sources = pd.read_csv(path + 'metadata.csv')
all_sources.isna().sum()
headline_length = all_sources['title'].str.len()
headline_length = all_sources['abstract'].str.len()
stop = set(stopwords.words('english'))
def build_list(df, col='title'):
corpus = []
lem = WordNetLemmatizer()
stop = set(stopwords.words('english'))
new = df[col].dropna().str.split()
new = new.values.tolist()
corpus = [lem.lemmatize(word.lower()) for i in new for word in i if word not in stop]
return corpus
corpus = build_list(all_sources)
counter = Counter(corpus)
most = counter.most_common()
x = []
y = []
for word, count in most[:10]:
if word not in stop:
x.append(word)
y.append(count)
corpus = build_list(all_sources, 'abstract')
counter = Counter(corpus)
most = counter.most_common()
x = []
y = []
for word, count in most[:10]:
if word not in stop:
x.append(word)
y.append(count)
def prepare_similarity(vectors):
similarity = cosine_similarity(vectors)
return similarity
def get_top_similar(sentence, sentence_list, similarity_matrix, topN):
index = sentence_list.index(sentence)
similarity_row = np.array(similarity_matrix[index, :])
indices = similarity_row.argsort()[-topN:][::-1]
return [(i, sentence_list[i]) for i in indices]
nlp = spacy.load('en_core_web_sm')
sent_vecs = {}
docs = []
for i in tqdm(all_sources['title'].fillna('unknown')[:1000]):
doc = nlp(i)
docs.append(doc)
sent_vecs.update({i: doc.vector})
sentences = list(sent_vecs.keys())
vectors = list(sent_vecs.values())
x = np.array(vectors)
dbscan = DBSCAN(eps=0.08, min_samples=2, metric='cosine').fit(x)
df_cluster = pd.DataFrame({'sentences': sentences, 'label': dbscan.labels_})
path = '../input/cord-19-eda-parse-json-and-generate-clean-csv/'
clean_comm = pd.read_csv(path + 'clean_comm_use.csv', nrows=5000)
clean_comm['source'] = 'clean_comm'
biox = pd.read_csv(path + 'biorxiv_clean.csv')
biox['source'] = 'biorx'
all_articles = pd.concat([biox, clean_comm])
all_articles.shape | code |
32070571/cell_6 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from nltk.stem import WordNetLemmatizer, PorterStemmer
from nltk.tokenize import word_tokenize
from sklearn.cluster import DBSCAN
from nltk.corpus import stopwords
from spacy.matcher import Matcher
from collections import Counter
import matplotlib.pyplot as plt
from spacy.tokens import Span
import tensorflow_hub as hub
from rake_nltk import Rake
import tensorflow as tf
import pyLDAvis.gensim
from tqdm import tqdm
import seaborn as sns
import networkx as nx
import pandas as pd
import numpy as np
import pyLDAvis
import gensim
import spacy
import os
import gc | code |
32070571/cell_29 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from collections import Counter
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer,PorterStemmer
from nltk.tokenize import word_tokenize
import gensim
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
path = '../input/CORD-19-research-challenge/'
all_sources = pd.read_csv(path + 'metadata.csv')
all_sources.isna().sum()
headline_length = all_sources['title'].str.len()
headline_length = all_sources['abstract'].str.len()
stop = set(stopwords.words('english'))
def build_list(df, col='title'):
corpus = []
lem = WordNetLemmatizer()
stop = set(stopwords.words('english'))
new = df[col].dropna().str.split()
new = new.values.tolist()
corpus = [lem.lemmatize(word.lower()) for i in new for word in i if word not in stop]
return corpus
corpus = build_list(all_sources)
counter = Counter(corpus)
most = counter.most_common()
x = []
y = []
for word, count in most[:10]:
if word not in stop:
x.append(word)
y.append(count)
corpus = build_list(all_sources, 'abstract')
counter = Counter(corpus)
most = counter.most_common()
x = []
y = []
for word, count in most[:10]:
if word not in stop:
x.append(word)
y.append(count)
def preprocess_news(df):
corpus = []
stem = PorterStemmer()
lem = WordNetLemmatizer()
for news in df['title'].dropna()[:5000]:
words = [w for w in word_tokenize(news) if w not in stop]
words = [lem.lemmatize(w) for w in words if len(w) > 2]
corpus.append(words)
return corpus
corpus = preprocess_news(all_sources)
dic = gensim.corpora.Dictionary(corpus)
bow_corpus = [dic.doc2bow(doc) for doc in corpus]
lda_model = gensim.models.LdaMulticore(bow_corpus, num_topics=4, id2word=dic, passes=10, workers=2)
lda_model.show_topics() | code |
32070571/cell_39 | [
"text_plain_output_1.png"
] | import gc
del corpus, top_n_bigrams, lda_model, bow_corpus, top_tri_grams
gc.collect()
del embed_vectors, sentence_list, similarity_matrix
gc.collect() | code |
32070571/cell_48 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from collections import Counter
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer,PorterStemmer
from sklearn.cluster import DBSCAN
from sklearn.metrics.pairwise import cosine_similarity
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import spacy
path = '../input/CORD-19-research-challenge/'
all_sources = pd.read_csv(path + 'metadata.csv')
all_sources.isna().sum()
headline_length = all_sources['title'].str.len()
headline_length = all_sources['abstract'].str.len()
stop = set(stopwords.words('english'))
def build_list(df, col='title'):
corpus = []
lem = WordNetLemmatizer()
stop = set(stopwords.words('english'))
new = df[col].dropna().str.split()
new = new.values.tolist()
corpus = [lem.lemmatize(word.lower()) for i in new for word in i if word not in stop]
return corpus
corpus = build_list(all_sources)
counter = Counter(corpus)
most = counter.most_common()
x = []
y = []
for word, count in most[:10]:
if word not in stop:
x.append(word)
y.append(count)
corpus = build_list(all_sources, 'abstract')
counter = Counter(corpus)
most = counter.most_common()
x = []
y = []
for word, count in most[:10]:
if word not in stop:
x.append(word)
y.append(count)
def prepare_similarity(vectors):
similarity = cosine_similarity(vectors)
return similarity
def get_top_similar(sentence, sentence_list, similarity_matrix, topN):
index = sentence_list.index(sentence)
similarity_row = np.array(similarity_matrix[index, :])
indices = similarity_row.argsort()[-topN:][::-1]
return [(i, sentence_list[i]) for i in indices]
nlp = spacy.load('en_core_web_sm')
sent_vecs = {}
docs = []
for i in tqdm(all_sources['title'].fillna('unknown')[:1000]):
doc = nlp(i)
docs.append(doc)
sent_vecs.update({i: doc.vector})
sentences = list(sent_vecs.keys())
vectors = list(sent_vecs.values())
x = np.array(vectors)
dbscan = DBSCAN(eps=0.08, min_samples=2, metric='cosine').fit(x)
df_cluster = pd.DataFrame({'sentences': sentences, 'label': dbscan.labels_})
df_cluster.label.unique()
df_cluster[df_cluster['label'] == 1].head() | code |
32070571/cell_73 | [
"text_plain_output_1.png"
] | !pip install python-rake | code |
32070571/cell_41 | [
"text_plain_output_1.png"
] | from tqdm import tqdm
import pandas as pd
import spacy
path = '../input/CORD-19-research-challenge/'
all_sources = pd.read_csv(path + 'metadata.csv')
all_sources.isna().sum()
nlp = spacy.load('en_core_web_sm')
sent_vecs = {}
docs = []
for i in tqdm(all_sources['title'].fillna('unknown')[:1000]):
doc = nlp(i)
docs.append(doc)
sent_vecs.update({i: doc.vector}) | code |
32070571/cell_54 | [
"text_html_output_1.png"
] | import gc
del corpus, top_n_bigrams, lda_model, bow_corpus, top_tri_grams
gc.collect()
del embed_vectors, sentence_list, similarity_matrix
gc.collect()
del biox, clean_comm
gc.collect() | code |
32070571/cell_11 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
path = '../input/CORD-19-research-challenge/'
all_sources = pd.read_csv(path + 'metadata.csv')
all_sources.isna().sum()
headline_length = all_sources['title'].str.len()
sns.distplot(headline_length)
plt.show() | code |
32070571/cell_60 | [
"text_html_output_1.png"
] | from collections import Counter
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer,PorterStemmer
from sklearn.cluster import DBSCAN
from sklearn.metrics.pairwise import cosine_similarity
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import spacy
path = '../input/CORD-19-research-challenge/'
all_sources = pd.read_csv(path + 'metadata.csv')
all_sources.isna().sum()
headline_length = all_sources['title'].str.len()
headline_length = all_sources['abstract'].str.len()
stop = set(stopwords.words('english'))
def build_list(df, col='title'):
corpus = []
lem = WordNetLemmatizer()
stop = set(stopwords.words('english'))
new = df[col].dropna().str.split()
new = new.values.tolist()
corpus = [lem.lemmatize(word.lower()) for i in new for word in i if word not in stop]
return corpus
corpus = build_list(all_sources)
counter = Counter(corpus)
most = counter.most_common()
x = []
y = []
for word, count in most[:10]:
if word not in stop:
x.append(word)
y.append(count)
corpus = build_list(all_sources, 'abstract')
counter = Counter(corpus)
most = counter.most_common()
x = []
y = []
for word, count in most[:10]:
if word not in stop:
x.append(word)
y.append(count)
def prepare_similarity(vectors):
similarity = cosine_similarity(vectors)
return similarity
def get_top_similar(sentence, sentence_list, similarity_matrix, topN):
index = sentence_list.index(sentence)
similarity_row = np.array(similarity_matrix[index, :])
indices = similarity_row.argsort()[-topN:][::-1]
return [(i, sentence_list[i]) for i in indices]
nlp = spacy.load('en_core_web_sm')
sent_vecs = {}
docs = []
for i in tqdm(all_sources['title'].fillna('unknown')[:1000]):
doc = nlp(i)
docs.append(doc)
sent_vecs.update({i: doc.vector})
sentences = list(sent_vecs.keys())
vectors = list(sent_vecs.values())
x = np.array(vectors)
dbscan = DBSCAN(eps=0.08, min_samples=2, metric='cosine').fit(x)
df_cluster = pd.DataFrame({'sentences': sentences, 'label': dbscan.labels_})
path = '../input/cord-19-eda-parse-json-and-generate-clean-csv/'
clean_comm = pd.read_csv(path + 'clean_comm_use.csv', nrows=5000)
clean_comm['source'] = 'clean_comm'
biox = pd.read_csv(path + 'biorxiv_clean.csv')
biox['source'] = 'biorx'
all_articles = pd.concat([biox, clean_comm])
all_articles.shape
tasks = ['What is known about transmission, incubation, and environmental stability', 'What do we know about COVID-19 risk factors', 'What do we know about virus genetics, origin, and evolution', 'What do we know about vaccines and therapeutics', 'What do we know about non-pharmaceutical interventions', 'What do we know about diagnostics and surveillance', 'What has been published about ethical and social science considerations', 'Role of the environment in transmission', 'Range of incubation periods for the disease in humans', 'Prevalence of asymptomatic shedding and transmission', 'Seasonality of transmission', 'Persistence of virus on surfaces of different materials (e,g., copper, stainless steel, plastic)', 'Susceptibility of populations', 'Public health mitigation measures that could be effective for control', 'Transmission dynamics of the virus', 'Evidence that livestock could be infected', 'Socioeconomic and behavioral risk factors for this spill-over', 'Sustainable risk reduction strategies']
task_df = pd.DataFrame({'title': tasks, 'source': 'task'})
all_articles = pd.concat([all_articles, task_df])
all_articles.fillna('Unknown', inplace=True) | code |
32070571/cell_19 | [
"image_output_1.png"
] | from collections import Counter
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer,PorterStemmer
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
path = '../input/CORD-19-research-challenge/'
all_sources = pd.read_csv(path + 'metadata.csv')
all_sources.isna().sum()
headline_length = all_sources['title'].str.len()
headline_length = all_sources['abstract'].str.len()
stop = set(stopwords.words('english'))
def build_list(df, col='title'):
corpus = []
lem = WordNetLemmatizer()
stop = set(stopwords.words('english'))
new = df[col].dropna().str.split()
new = new.values.tolist()
corpus = [lem.lemmatize(word.lower()) for i in new for word in i if word not in stop]
return corpus
corpus = build_list(all_sources)
counter = Counter(corpus)
most = counter.most_common()
x = []
y = []
for word, count in most[:10]:
if word not in stop:
x.append(word)
y.append(count)
corpus = build_list(all_sources, 'abstract')
counter = Counter(corpus)
most = counter.most_common()
x = []
y = []
for word, count in most[:10]:
if word not in stop:
x.append(word)
y.append(count)
plt.figure(figsize=(9, 7))
sns.barplot(x=y, y=x) | code |
32070571/cell_64 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from collections import Counter
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer,PorterStemmer
from sklearn.cluster import DBSCAN
from sklearn.metrics.pairwise import cosine_similarity
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import spacy
import tensorflow_hub as hub
path = '../input/CORD-19-research-challenge/'
all_sources = pd.read_csv(path + 'metadata.csv')
all_sources.isna().sum()
headline_length = all_sources['title'].str.len()
headline_length = all_sources['abstract'].str.len()
stop = set(stopwords.words('english'))
def build_list(df, col='title'):
corpus = []
lem = WordNetLemmatizer()
stop = set(stopwords.words('english'))
new = df[col].dropna().str.split()
new = new.values.tolist()
corpus = [lem.lemmatize(word.lower()) for i in new for word in i if word not in stop]
return corpus
corpus = build_list(all_sources)
counter = Counter(corpus)
most = counter.most_common()
x = []
y = []
for word, count in most[:10]:
if word not in stop:
x.append(word)
y.append(count)
corpus = build_list(all_sources, 'abstract')
counter = Counter(corpus)
most = counter.most_common()
x = []
y = []
for word, count in most[:10]:
if word not in stop:
x.append(word)
y.append(count)
def prepare_similarity(vectors):
similarity = cosine_similarity(vectors)
return similarity
def get_top_similar(sentence, sentence_list, similarity_matrix, topN):
index = sentence_list.index(sentence)
similarity_row = np.array(similarity_matrix[index, :])
indices = similarity_row.argsort()[-topN:][::-1]
return [(i, sentence_list[i]) for i in indices]
module_url = '../input/universalsentenceencoderlarge4'
embed = hub.load(module_url)
titles = all_sources['title'].fillna('Unknown')
embed_vectors = embed(titles[:100].values)['outputs'].numpy()
sentence_list = titles.values.tolist()
sentence = titles.iloc[5]
similarity_matrix = prepare_similarity(embed_vectors)
similar = get_top_similar(sentence, sentence_list, similarity_matrix, 6)
nlp = spacy.load('en_core_web_sm')
sent_vecs = {}
docs = []
for i in tqdm(all_sources['title'].fillna('unknown')[:1000]):
doc = nlp(i)
docs.append(doc)
sent_vecs.update({i: doc.vector})
sentences = list(sent_vecs.keys())
vectors = list(sent_vecs.values())
x = np.array(vectors)
dbscan = DBSCAN(eps=0.08, min_samples=2, metric='cosine').fit(x)
df_cluster = pd.DataFrame({'sentences': sentences, 'label': dbscan.labels_})
path = '../input/cord-19-eda-parse-json-and-generate-clean-csv/'
clean_comm = pd.read_csv(path + 'clean_comm_use.csv', nrows=5000)
clean_comm['source'] = 'clean_comm'
biox = pd.read_csv(path + 'biorxiv_clean.csv')
biox['source'] = 'biorx'
all_articles = pd.concat([biox, clean_comm])
all_articles.shape
tasks = ['What is known about transmission, incubation, and environmental stability', 'What do we know about COVID-19 risk factors', 'What do we know about virus genetics, origin, and evolution', 'What do we know about vaccines and therapeutics', 'What do we know about non-pharmaceutical interventions', 'What do we know about diagnostics and surveillance', 'What has been published about ethical and social science considerations', 'Role of the environment in transmission', 'Range of incubation periods for the disease in humans', 'Prevalence of asymptomatic shedding and transmission', 'Seasonality of transmission', 'Persistence of virus on surfaces of different materials (e,g., copper, stainless steel, plastic)', 'Susceptibility of populations', 'Public health mitigation measures that could be effective for control', 'Transmission dynamics of the virus', 'Evidence that livestock could be infected', 'Socioeconomic and behavioral risk factors for this spill-over', 'Sustainable risk reduction strategies']
task_df = pd.DataFrame({'title': tasks, 'source': 'task'})
all_articles = pd.concat([all_articles, task_df])
all_articles.fillna('Unknown', inplace=True)
sentence_list = all_articles.title.values.tolist()
embed_vectors = embed(sentence_list)['outputs'].numpy()
similarity_matrix = prepare_similarity(embed_vectors)
sentence = 'Role of the environment in transmission'
similar = get_top_similar(sentence, sentence_list, similarity_matrix, 10)
for sent in similar:
print(sent[1]) | code |
32070571/cell_32 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import gc
del corpus, top_n_bigrams, lda_model, bow_corpus, top_tri_grams
gc.collect() | code |
32070571/cell_59 | [
"text_plain_output_1.png"
] | from collections import Counter
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer,PorterStemmer
from sklearn.cluster import DBSCAN
from sklearn.metrics.pairwise import cosine_similarity
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import spacy
path = '../input/CORD-19-research-challenge/'
all_sources = pd.read_csv(path + 'metadata.csv')
all_sources.isna().sum()
headline_length = all_sources['title'].str.len()
headline_length = all_sources['abstract'].str.len()
stop = set(stopwords.words('english'))
def build_list(df, col='title'):
corpus = []
lem = WordNetLemmatizer()
stop = set(stopwords.words('english'))
new = df[col].dropna().str.split()
new = new.values.tolist()
corpus = [lem.lemmatize(word.lower()) for i in new for word in i if word not in stop]
return corpus
corpus = build_list(all_sources)
counter = Counter(corpus)
most = counter.most_common()
x = []
y = []
for word, count in most[:10]:
if word not in stop:
x.append(word)
y.append(count)
corpus = build_list(all_sources, 'abstract')
counter = Counter(corpus)
most = counter.most_common()
x = []
y = []
for word, count in most[:10]:
if word not in stop:
x.append(word)
y.append(count)
def prepare_similarity(vectors):
similarity = cosine_similarity(vectors)
return similarity
def get_top_similar(sentence, sentence_list, similarity_matrix, topN):
index = sentence_list.index(sentence)
similarity_row = np.array(similarity_matrix[index, :])
indices = similarity_row.argsort()[-topN:][::-1]
return [(i, sentence_list[i]) for i in indices]
nlp = spacy.load('en_core_web_sm')
sent_vecs = {}
docs = []
for i in tqdm(all_sources['title'].fillna('unknown')[:1000]):
doc = nlp(i)
docs.append(doc)
sent_vecs.update({i: doc.vector})
sentences = list(sent_vecs.keys())
vectors = list(sent_vecs.values())
x = np.array(vectors)
dbscan = DBSCAN(eps=0.08, min_samples=2, metric='cosine').fit(x)
df_cluster = pd.DataFrame({'sentences': sentences, 'label': dbscan.labels_})
path = '../input/cord-19-eda-parse-json-and-generate-clean-csv/'
clean_comm = pd.read_csv(path + 'clean_comm_use.csv', nrows=5000)
clean_comm['source'] = 'clean_comm'
biox = pd.read_csv(path + 'biorxiv_clean.csv')
biox['source'] = 'biorx'
all_articles = pd.concat([biox, clean_comm])
tasks = ['What is known about transmission, incubation, and environmental stability', 'What do we know about COVID-19 risk factors', 'What do we know about virus genetics, origin, and evolution', 'What do we know about vaccines and therapeutics', 'What do we know about non-pharmaceutical interventions', 'What do we know about diagnostics and surveillance', 'What has been published about ethical and social science considerations', 'Role of the environment in transmission', 'Range of incubation periods for the disease in humans', 'Prevalence of asymptomatic shedding and transmission', 'Seasonality of transmission', 'Persistence of virus on surfaces of different materials (e,g., copper, stainless steel, plastic)', 'Susceptibility of populations', 'Public health mitigation measures that could be effective for control', 'Transmission dynamics of the virus', 'Evidence that livestock could be infected', 'Socioeconomic and behavioral risk factors for this spill-over', 'Sustainable risk reduction strategies']
task_df = pd.DataFrame({'title': tasks, 'source': 'task'})
task_df.head() | code |
32070571/cell_38 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
import pandas as pd
import tensorflow_hub as hub
path = '../input/CORD-19-research-challenge/'
all_sources = pd.read_csv(path + 'metadata.csv')
all_sources.isna().sum()
def prepare_similarity(vectors):
similarity = cosine_similarity(vectors)
return similarity
def get_top_similar(sentence, sentence_list, similarity_matrix, topN):
index = sentence_list.index(sentence)
similarity_row = np.array(similarity_matrix[index, :])
indices = similarity_row.argsort()[-topN:][::-1]
return [(i, sentence_list[i]) for i in indices]
module_url = '../input/universalsentenceencoderlarge4'
embed = hub.load(module_url)
titles = all_sources['title'].fillna('Unknown')
embed_vectors = embed(titles[:100].values)['outputs'].numpy()
sentence_list = titles.values.tolist()
sentence = titles.iloc[5]
similarity_matrix = prepare_similarity(embed_vectors)
similar = get_top_similar(sentence, sentence_list, similarity_matrix, 6)
for sentence in similar:
print(sentence)
print('\n') | code |
32070571/cell_75 | [
"text_plain_output_1.png"
] | from collections import Counter
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer,PorterStemmer
from nltk.tokenize import word_tokenize
from sklearn.cluster import DBSCAN
from sklearn.metrics.pairwise import cosine_similarity
from tqdm import tqdm
import RAKE
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import re
import seaborn as sns
import spacy
import tensorflow_hub as hub
path = '../input/CORD-19-research-challenge/'
all_sources = pd.read_csv(path + 'metadata.csv')
all_sources.isna().sum()
headline_length = all_sources['title'].str.len()
headline_length = all_sources['abstract'].str.len()
stop = set(stopwords.words('english'))
def build_list(df, col='title'):
corpus = []
lem = WordNetLemmatizer()
stop = set(stopwords.words('english'))
new = df[col].dropna().str.split()
new = new.values.tolist()
corpus = [lem.lemmatize(word.lower()) for i in new for word in i if word not in stop]
return corpus
corpus = build_list(all_sources)
counter = Counter(corpus)
most = counter.most_common()
x = []
y = []
for word, count in most[:10]:
if word not in stop:
x.append(word)
y.append(count)
corpus = build_list(all_sources, 'abstract')
counter = Counter(corpus)
most = counter.most_common()
x = []
y = []
for word, count in most[:10]:
if word not in stop:
x.append(word)
y.append(count)
def preprocess_news(df):
corpus = []
stem = PorterStemmer()
lem = WordNetLemmatizer()
for news in df['title'].dropna()[:5000]:
words = [w for w in word_tokenize(news) if w not in stop]
words = [lem.lemmatize(w) for w in words if len(w) > 2]
corpus.append(words)
return corpus
def prepare_similarity(vectors):
similarity = cosine_similarity(vectors)
return similarity
def get_top_similar(sentence, sentence_list, similarity_matrix, topN):
index = sentence_list.index(sentence)
similarity_row = np.array(similarity_matrix[index, :])
indices = similarity_row.argsort()[-topN:][::-1]
return [(i, sentence_list[i]) for i in indices]
module_url = '../input/universalsentenceencoderlarge4'
embed = hub.load(module_url)
titles = all_sources['title'].fillna('Unknown')
embed_vectors = embed(titles[:100].values)['outputs'].numpy()
sentence_list = titles.values.tolist()
sentence = titles.iloc[5]
similarity_matrix = prepare_similarity(embed_vectors)
similar = get_top_similar(sentence, sentence_list, similarity_matrix, 6)
nlp = spacy.load('en_core_web_sm')
sent_vecs = {}
docs = []
for i in tqdm(all_sources['title'].fillna('unknown')[:1000]):
doc = nlp(i)
docs.append(doc)
sent_vecs.update({i: doc.vector})
sentences = list(sent_vecs.keys())
vectors = list(sent_vecs.values())
x = np.array(vectors)
dbscan = DBSCAN(eps=0.08, min_samples=2, metric='cosine').fit(x)
df_cluster = pd.DataFrame({'sentences': sentences, 'label': dbscan.labels_})
path = '../input/cord-19-eda-parse-json-and-generate-clean-csv/'
clean_comm = pd.read_csv(path + 'clean_comm_use.csv', nrows=5000)
clean_comm['source'] = 'clean_comm'
biox = pd.read_csv(path + 'biorxiv_clean.csv')
biox['source'] = 'biorx'
all_articles = pd.concat([biox, clean_comm])
all_articles.shape
tasks = ['What is known about transmission, incubation, and environmental stability', 'What do we know about COVID-19 risk factors', 'What do we know about virus genetics, origin, and evolution', 'What do we know about vaccines and therapeutics', 'What do we know about non-pharmaceutical interventions', 'What do we know about diagnostics and surveillance', 'What has been published about ethical and social science considerations', 'Role of the environment in transmission', 'Range of incubation periods for the disease in humans', 'Prevalence of asymptomatic shedding and transmission', 'Seasonality of transmission', 'Persistence of virus on surfaces of different materials (e,g., copper, stainless steel, plastic)', 'Susceptibility of populations', 'Public health mitigation measures that could be effective for control', 'Transmission dynamics of the virus', 'Evidence that livestock could be infected', 'Socioeconomic and behavioral risk factors for this spill-over', 'Sustainable risk reduction strategies']
task_df = pd.DataFrame({'title': tasks, 'source': 'task'})
all_articles = pd.concat([all_articles, task_df])
all_articles.fillna('Unknown', inplace=True)
sentence_list = all_articles.title.values.tolist()
embed_vectors = embed(sentence_list)['outputs'].numpy()
similarity_matrix = prepare_similarity(embed_vectors)
sentence = 'Role of the environment in transmission'
similar = get_top_similar(sentence, sentence_list, similarity_matrix, 10)
ind, title = list(map(list, zip(*similar)))
titles = []
texts = []
for i in ind:
titles.append(all_articles.iloc[i]['title'])
texts.append(all_articles.iloc[i]['abstract'])
import re
def clean(txt):
txt = re.sub('\\n', '', txt)
txt = re.sub('\\([^()]*\\)', '', txt)
txt = re.sub('https?:\\S+\\sdoi', '', txt)
return txt
texts = list(map(clean, texts))
text_list = ' '.join(texts)
import RAKE
import operator
stop_dir = '../input/stopwordsforrake/SmartStoplist.txt'
rake_object = RAKE.Rake(stop_dir)
keywords = rake_object.run(text_list)
words, score = list(map(list, zip(*keywords)))
for word in words[:10]:
print(word) | code |
32070571/cell_47 | [
"text_plain_output_1.png"
] | from collections import Counter
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer,PorterStemmer
from sklearn.cluster import DBSCAN
from sklearn.metrics.pairwise import cosine_similarity
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import spacy
path = '../input/CORD-19-research-challenge/'
all_sources = pd.read_csv(path + 'metadata.csv')
all_sources.isna().sum()
headline_length = all_sources['title'].str.len()
headline_length = all_sources['abstract'].str.len()
stop = set(stopwords.words('english'))
def build_list(df, col='title'):
corpus = []
lem = WordNetLemmatizer()
stop = set(stopwords.words('english'))
new = df[col].dropna().str.split()
new = new.values.tolist()
corpus = [lem.lemmatize(word.lower()) for i in new for word in i if word not in stop]
return corpus
corpus = build_list(all_sources)
counter = Counter(corpus)
most = counter.most_common()
x = []
y = []
for word, count in most[:10]:
if word not in stop:
x.append(word)
y.append(count)
corpus = build_list(all_sources, 'abstract')
counter = Counter(corpus)
most = counter.most_common()
x = []
y = []
for word, count in most[:10]:
if word not in stop:
x.append(word)
y.append(count)
def prepare_similarity(vectors):
similarity = cosine_similarity(vectors)
return similarity
def get_top_similar(sentence, sentence_list, similarity_matrix, topN):
index = sentence_list.index(sentence)
similarity_row = np.array(similarity_matrix[index, :])
indices = similarity_row.argsort()[-topN:][::-1]
return [(i, sentence_list[i]) for i in indices]
nlp = spacy.load('en_core_web_sm')
sent_vecs = {}
docs = []
for i in tqdm(all_sources['title'].fillna('unknown')[:1000]):
doc = nlp(i)
docs.append(doc)
sent_vecs.update({i: doc.vector})
sentences = list(sent_vecs.keys())
vectors = list(sent_vecs.values())
x = np.array(vectors)
dbscan = DBSCAN(eps=0.08, min_samples=2, metric='cosine').fit(x)
df_cluster = pd.DataFrame({'sentences': sentences, 'label': dbscan.labels_})
df_cluster.label.unique()
df_cluster[df_cluster['label'] == 0].head() | code |
32070571/cell_17 | [
"text_plain_output_1.png"
] | from collections import Counter
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer,PorterStemmer
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
path = '../input/CORD-19-research-challenge/'
all_sources = pd.read_csv(path + 'metadata.csv')
all_sources.isna().sum()
headline_length = all_sources['title'].str.len()
headline_length = all_sources['abstract'].str.len()
stop = set(stopwords.words('english'))
def build_list(df, col='title'):
corpus = []
lem = WordNetLemmatizer()
stop = set(stopwords.words('english'))
new = df[col].dropna().str.split()
new = new.values.tolist()
corpus = [lem.lemmatize(word.lower()) for i in new for word in i if word not in stop]
return corpus
corpus = build_list(all_sources)
counter = Counter(corpus)
most = counter.most_common()
x = []
y = []
for word, count in most[:10]:
if word not in stop:
x.append(word)
y.append(count)
plt.figure(figsize=(9, 7))
sns.barplot(x=y, y=x) | code |
32070571/cell_77 | [
"text_plain_output_1.png"
] | !pip install pytextrank | code |
32070571/cell_31 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from collections import Counter
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer,PorterStemmer
from nltk.tokenize import word_tokenize
import gensim
import matplotlib.pyplot as plt
import pandas as pd
import pyLDAvis
import seaborn as sns
path = '../input/CORD-19-research-challenge/'
all_sources = pd.read_csv(path + 'metadata.csv')
all_sources.isna().sum()
headline_length = all_sources['title'].str.len()
headline_length = all_sources['abstract'].str.len()
stop = set(stopwords.words('english'))
def build_list(df, col='title'):
corpus = []
lem = WordNetLemmatizer()
stop = set(stopwords.words('english'))
new = df[col].dropna().str.split()
new = new.values.tolist()
corpus = [lem.lemmatize(word.lower()) for i in new for word in i if word not in stop]
return corpus
corpus = build_list(all_sources)
counter = Counter(corpus)
most = counter.most_common()
x = []
y = []
for word, count in most[:10]:
if word not in stop:
x.append(word)
y.append(count)
corpus = build_list(all_sources, 'abstract')
counter = Counter(corpus)
most = counter.most_common()
x = []
y = []
for word, count in most[:10]:
if word not in stop:
x.append(word)
y.append(count)
def preprocess_news(df):
corpus = []
stem = PorterStemmer()
lem = WordNetLemmatizer()
for news in df['title'].dropna()[:5000]:
words = [w for w in word_tokenize(news) if w not in stop]
words = [lem.lemmatize(w) for w in words if len(w) > 2]
corpus.append(words)
return corpus
corpus = preprocess_news(all_sources)
dic = gensim.corpora.Dictionary(corpus)
bow_corpus = [dic.doc2bow(doc) for doc in corpus]
lda_model = gensim.models.LdaMulticore(bow_corpus, num_topics=4, id2word=dic, passes=10, workers=2)
pyLDAvis.enable_notebook()
vis = pyLDAvis.gensim.prepare(lda_model, bow_corpus, dic)
vis | code |
32070571/cell_24 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from collections import Counter
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer,PorterStemmer
from sklearn.feature_extraction.text import CountVectorizer
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
path = '../input/CORD-19-research-challenge/'
all_sources = pd.read_csv(path + 'metadata.csv')
all_sources.isna().sum()
headline_length = all_sources['title'].str.len()
headline_length = all_sources['abstract'].str.len()
stop = set(stopwords.words('english'))
def build_list(df, col='title'):
corpus = []
lem = WordNetLemmatizer()
stop = set(stopwords.words('english'))
new = df[col].dropna().str.split()
new = new.values.tolist()
corpus = [lem.lemmatize(word.lower()) for i in new for word in i if word not in stop]
return corpus
corpus = build_list(all_sources)
counter = Counter(corpus)
most = counter.most_common()
x = []
y = []
for word, count in most[:10]:
if word not in stop:
x.append(word)
y.append(count)
corpus = build_list(all_sources, 'abstract')
counter = Counter(corpus)
most = counter.most_common()
x = []
y = []
for word, count in most[:10]:
if word not in stop:
x.append(word)
y.append(count)
def get_top_ngram(corpus, n=None):
vec = CountVectorizer(ngram_range=(n, n)).fit(corpus)
bag_of_words = vec.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()]
words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True)
return words_freq[:10]
top_n_bigrams = get_top_ngram(all_sources['title'].dropna(), 2)[:10]
x, y = map(list, zip(*top_n_bigrams))
top_tri_grams = get_top_ngram(all_sources['title'].dropna(), n=3)
x, y = map(list, zip(*top_tri_grams))
plt.figure(figsize=(9, 7))
sns.barplot(x=y, y=x) | code |
32070571/cell_22 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | from collections import Counter
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer,PorterStemmer
from sklearn.feature_extraction.text import CountVectorizer
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
path = '../input/CORD-19-research-challenge/'
all_sources = pd.read_csv(path + 'metadata.csv')
all_sources.isna().sum()
headline_length = all_sources['title'].str.len()
headline_length = all_sources['abstract'].str.len()
stop = set(stopwords.words('english'))
def build_list(df, col='title'):
corpus = []
lem = WordNetLemmatizer()
stop = set(stopwords.words('english'))
new = df[col].dropna().str.split()
new = new.values.tolist()
corpus = [lem.lemmatize(word.lower()) for i in new for word in i if word not in stop]
return corpus
corpus = build_list(all_sources)
counter = Counter(corpus)
most = counter.most_common()
x = []
y = []
for word, count in most[:10]:
if word not in stop:
x.append(word)
y.append(count)
corpus = build_list(all_sources, 'abstract')
counter = Counter(corpus)
most = counter.most_common()
x = []
y = []
for word, count in most[:10]:
if word not in stop:
x.append(word)
y.append(count)
def get_top_ngram(corpus, n=None):
vec = CountVectorizer(ngram_range=(n, n)).fit(corpus)
bag_of_words = vec.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()]
words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True)
return words_freq[:10]
top_n_bigrams = get_top_ngram(all_sources['title'].dropna(), 2)[:10]
x, y = map(list, zip(*top_n_bigrams))
plt.figure(figsize=(9, 7))
sns.barplot(x=y, y=x) | code |
32070571/cell_37 | [
"text_plain_output_1.png"
] | from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
import pandas as pd
import tensorflow_hub as hub
path = '../input/CORD-19-research-challenge/'
all_sources = pd.read_csv(path + 'metadata.csv')
all_sources.isna().sum()
def prepare_similarity(vectors):
similarity = cosine_similarity(vectors)
return similarity
def get_top_similar(sentence, sentence_list, similarity_matrix, topN):
index = sentence_list.index(sentence)
similarity_row = np.array(similarity_matrix[index, :])
indices = similarity_row.argsort()[-topN:][::-1]
return [(i, sentence_list[i]) for i in indices]
module_url = '../input/universalsentenceencoderlarge4'
embed = hub.load(module_url)
titles = all_sources['title'].fillna('Unknown')
embed_vectors = embed(titles[:100].values)['outputs'].numpy()
sentence_list = titles.values.tolist()
sentence = titles.iloc[5]
print('Find similar research papers for :')
print(sentence)
similarity_matrix = prepare_similarity(embed_vectors)
similar = get_top_similar(sentence, sentence_list, similarity_matrix, 6) | code |
32070571/cell_5 | [
"text_plain_output_1.png"
] | !pip install rake-nltk | code |
74065110/cell_42 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
elec_meter_data_all = pd.read_csv('../input/buildingdatagenomeproject2/electricity.csv', index_col='timestamp', parse_dates=True)
site_data_example = elec_meter_data_all.loc[:, elec_meter_data_all.columns.str.contains('Panther') & elec_meter_data_all.columns.str.contains('office')]
site_data_example_2017 = site_data_example.truncate(before='2017-01-01')
weather_data = pd.read_csv('../input/buildingdatagenomeproject2/weather.csv', index_col='timestamp', parse_dates=True)
weather_data_site = weather_data[weather_data.site_id == 'Panther']
sample_meter = pd.DataFrame(site_data_example_2017['Panther_office_Lavinia'])
sample_meter_nooutlier = sample_meter[sample_meter > 10]
sample_meter_nooutlier_nogaps = sample_meter_nooutlier.fillna(method='ffill')
temp_data = pd.DataFrame(weather_data_site['airTemperature'].truncate(before='01-01-2017'))
comparison = pd.concat([temp_data, sample_meter_nooutlier_nogaps], axis=1)
comparison.resample('D').mean().plot(kind='scatter', x='airTemperature', y='Panther_office_Lavinia', figsize=(10, 10)) | code |
74065110/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
elec_meter_data_all = pd.read_csv('../input/buildingdatagenomeproject2/electricity.csv', index_col='timestamp', parse_dates=True)
weather_data = pd.read_csv('../input/buildingdatagenomeproject2/weather.csv', index_col='timestamp', parse_dates=True)
weather_data_site = weather_data[weather_data.site_id == 'Panther']
weather_data_site.head() | code |
74065110/cell_25 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
elec_meter_data_all = pd.read_csv('../input/buildingdatagenomeproject2/electricity.csv', index_col='timestamp', parse_dates=True)
site_data_example = elec_meter_data_all.loc[:, elec_meter_data_all.columns.str.contains('Panther') & elec_meter_data_all.columns.str.contains('office')]
site_data_example_2017 = site_data_example.truncate(before='2017-01-01')
weather_data = pd.read_csv('../input/buildingdatagenomeproject2/weather.csv', index_col='timestamp', parse_dates=True)
sample_meter = pd.DataFrame(site_data_example_2017['Panther_office_Lavinia'])
sample_meter_nooutlier = sample_meter[sample_meter > 10]
sample_meter_nooutlier.info() | code |
74065110/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
elec_meter_data_all = pd.read_csv('../input/buildingdatagenomeproject2/electricity.csv', index_col='timestamp', parse_dates=True)
elec_meter_data_all.info() | code |
74065110/cell_34 | [
"text_plain_output_1.png"
] | import pandas as pd
elec_meter_data_all = pd.read_csv('../input/buildingdatagenomeproject2/electricity.csv', index_col='timestamp', parse_dates=True)
site_data_example = elec_meter_data_all.loc[:, elec_meter_data_all.columns.str.contains('Panther') & elec_meter_data_all.columns.str.contains('office')]
site_data_example_2017 = site_data_example.truncate(before='2017-01-01')
weather_data = pd.read_csv('../input/buildingdatagenomeproject2/weather.csv', index_col='timestamp', parse_dates=True)
weather_data_site = weather_data[weather_data.site_id == 'Panther']
sample_meter = pd.DataFrame(site_data_example_2017['Panther_office_Lavinia'])
sample_meter_nooutlier = sample_meter[sample_meter > 10]
sample_meter_nooutlier_nogaps = sample_meter_nooutlier.fillna(method='ffill')
temp_data = pd.DataFrame(weather_data_site['airTemperature'].truncate(before='01-01-2017'))
comparison = pd.concat([temp_data, sample_meter_nooutlier_nogaps], axis=1)
comparison.info() | code |
74065110/cell_23 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
elec_meter_data_all = pd.read_csv('../input/buildingdatagenomeproject2/electricity.csv', index_col='timestamp', parse_dates=True)
site_data_example = elec_meter_data_all.loc[:, elec_meter_data_all.columns.str.contains('Panther') & elec_meter_data_all.columns.str.contains('office')]
site_data_example_2017 = site_data_example.truncate(before='2017-01-01')
weather_data = pd.read_csv('../input/buildingdatagenomeproject2/weather.csv', index_col='timestamp', parse_dates=True)
sample_meter = pd.DataFrame(site_data_example_2017['Panther_office_Lavinia'])
sample_meter_nooutlier = sample_meter[sample_meter > 10]
sample_meter_nooutlier.truncate(before='02-10-2017', after='02-20-2017').plot(figsize=(10, 4)) | code |
74065110/cell_20 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
elec_meter_data_all = pd.read_csv('../input/buildingdatagenomeproject2/electricity.csv', index_col='timestamp', parse_dates=True)
site_data_example = elec_meter_data_all.loc[:, elec_meter_data_all.columns.str.contains('Panther') & elec_meter_data_all.columns.str.contains('office')]
site_data_example_2017 = site_data_example.truncate(before='2017-01-01')
weather_data = pd.read_csv('../input/buildingdatagenomeproject2/weather.csv', index_col='timestamp', parse_dates=True)
sample_meter = pd.DataFrame(site_data_example_2017['Panther_office_Lavinia'])
sample_meter.truncate(before='02-10-2017', after='02-20-2017').plot(figsize=(10, 4)) | code |
74065110/cell_39 | [
"text_plain_output_1.png"
] | import pandas as pd
elec_meter_data_all = pd.read_csv('../input/buildingdatagenomeproject2/electricity.csv', index_col='timestamp', parse_dates=True)
site_data_example = elec_meter_data_all.loc[:, elec_meter_data_all.columns.str.contains('Panther') & elec_meter_data_all.columns.str.contains('office')]
site_data_example_2017 = site_data_example.truncate(before='2017-01-01')
weather_data = pd.read_csv('../input/buildingdatagenomeproject2/weather.csv', index_col='timestamp', parse_dates=True)
weather_data_site = weather_data[weather_data.site_id == 'Panther']
sample_meter = pd.DataFrame(site_data_example_2017['Panther_office_Lavinia'])
sample_meter_nooutlier = sample_meter[sample_meter > 10]
sample_meter_nooutlier_nogaps = sample_meter_nooutlier.fillna(method='ffill')
temp_data = pd.DataFrame(weather_data_site['airTemperature'].truncate(before='01-01-2017'))
comparison = pd.concat([temp_data, sample_meter_nooutlier_nogaps], axis=1)
comparison.plot(figsize=(20, 10), subplots=True) | code |
74065110/cell_41 | [
"text_plain_output_1.png"
] | import pandas as pd
elec_meter_data_all = pd.read_csv('../input/buildingdatagenomeproject2/electricity.csv', index_col='timestamp', parse_dates=True)
site_data_example = elec_meter_data_all.loc[:, elec_meter_data_all.columns.str.contains('Panther') & elec_meter_data_all.columns.str.contains('office')]
site_data_example_2017 = site_data_example.truncate(before='2017-01-01')
weather_data = pd.read_csv('../input/buildingdatagenomeproject2/weather.csv', index_col='timestamp', parse_dates=True)
weather_data_site = weather_data[weather_data.site_id == 'Panther']
sample_meter = pd.DataFrame(site_data_example_2017['Panther_office_Lavinia'])
sample_meter_nooutlier = sample_meter[sample_meter > 10]
sample_meter_nooutlier_nogaps = sample_meter_nooutlier.fillna(method='ffill')
temp_data = pd.DataFrame(weather_data_site['airTemperature'].truncate(before='01-01-2017'))
comparison = pd.concat([temp_data, sample_meter_nooutlier_nogaps], axis=1)
comparison.plot(kind='scatter', x='airTemperature', y='Panther_office_Lavinia', figsize=(10, 10)) | code |
74065110/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
elec_meter_data_all = pd.read_csv('../input/buildingdatagenomeproject2/electricity.csv', index_col='timestamp', parse_dates=True)
weather_data = pd.read_csv('../input/buildingdatagenomeproject2/weather.csv', index_col='timestamp', parse_dates=True)
weather_data.info() | code |
74065110/cell_19 | [
"text_html_output_1.png"
] | import pandas as pd
elec_meter_data_all = pd.read_csv('../input/buildingdatagenomeproject2/electricity.csv', index_col='timestamp', parse_dates=True)
site_data_example = elec_meter_data_all.loc[:, elec_meter_data_all.columns.str.contains('Panther') & elec_meter_data_all.columns.str.contains('office')]
site_data_example_2017 = site_data_example.truncate(before='2017-01-01')
weather_data = pd.read_csv('../input/buildingdatagenomeproject2/weather.csv', index_col='timestamp', parse_dates=True)
sample_meter = pd.DataFrame(site_data_example_2017['Panther_office_Lavinia'])
sample_meter.plot(figsize=(10, 4)) | code |
74065110/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
elec_meter_data_all = pd.read_csv('../input/buildingdatagenomeproject2/electricity.csv', index_col='timestamp', parse_dates=True)
site_data_example = elec_meter_data_all.loc[:, elec_meter_data_all.columns.str.contains('Panther') & elec_meter_data_all.columns.str.contains('office')]
site_data_example_2017 = site_data_example.truncate(before='2017-01-01')
site_data_example_2017.info() | code |
74065110/cell_45 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
elec_meter_data_all = pd.read_csv('../input/buildingdatagenomeproject2/electricity.csv', index_col='timestamp', parse_dates=True)
site_data_example = elec_meter_data_all.loc[:, elec_meter_data_all.columns.str.contains('Panther') & elec_meter_data_all.columns.str.contains('office')]
site_data_example_2017 = site_data_example.truncate(before='2017-01-01')
weather_data = pd.read_csv('../input/buildingdatagenomeproject2/weather.csv', index_col='timestamp', parse_dates=True)
weather_data_site = weather_data[weather_data.site_id == 'Panther']
sample_meter = pd.DataFrame(site_data_example_2017['Panther_office_Lavinia'])
sample_meter_nooutlier = sample_meter[sample_meter > 10]
sample_meter_nooutlier_nogaps = sample_meter_nooutlier.fillna(method='ffill')
temp_data = pd.DataFrame(weather_data_site['airTemperature'].truncate(before='01-01-2017'))
comparison = pd.concat([temp_data, sample_meter_nooutlier_nogaps], axis=1)
comparison.info() | code |
74065110/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd
elec_meter_data_all = pd.read_csv('../input/buildingdatagenomeproject2/electricity.csv', index_col='timestamp', parse_dates=True)
site_data_example = elec_meter_data_all.loc[:, elec_meter_data_all.columns.str.contains('Panther') & elec_meter_data_all.columns.str.contains('office')]
site_data_example_2017 = site_data_example.truncate(before='2017-01-01')
weather_data = pd.read_csv('../input/buildingdatagenomeproject2/weather.csv', index_col='timestamp', parse_dates=True)
sample_meter = pd.DataFrame(site_data_example_2017['Panther_office_Lavinia'])
sample_meter.head() | code |
74065110/cell_28 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
elec_meter_data_all = pd.read_csv('../input/buildingdatagenomeproject2/electricity.csv', index_col='timestamp', parse_dates=True)
site_data_example = elec_meter_data_all.loc[:, elec_meter_data_all.columns.str.contains('Panther') & elec_meter_data_all.columns.str.contains('office')]
site_data_example_2017 = site_data_example.truncate(before='2017-01-01')
weather_data = pd.read_csv('../input/buildingdatagenomeproject2/weather.csv', index_col='timestamp', parse_dates=True)
sample_meter = pd.DataFrame(site_data_example_2017['Panther_office_Lavinia'])
sample_meter_nooutlier = sample_meter[sample_meter > 10]
sample_meter_nooutlier_nogaps = sample_meter_nooutlier.fillna(method='ffill')
sample_meter_nooutlier_nogaps.truncate(before='02-10-2017', after='02-20-2017').plot(figsize=(10, 4)) | code |
74065110/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
elec_meter_data_all = pd.read_csv('../input/buildingdatagenomeproject2/electricity.csv', index_col='timestamp', parse_dates=True)
site_data_example = elec_meter_data_all.loc[:, elec_meter_data_all.columns.str.contains('Panther') & elec_meter_data_all.columns.str.contains('office')]
site_data_example_2017 = site_data_example.truncate(before='2017-01-01')
site_data_example_2017.plot(figsize=(15, 50), subplots=True) | code |
74065110/cell_15 | [
"image_output_1.png"
] | import pandas as pd
elec_meter_data_all = pd.read_csv('../input/buildingdatagenomeproject2/electricity.csv', index_col='timestamp', parse_dates=True)
weather_data = pd.read_csv('../input/buildingdatagenomeproject2/weather.csv', index_col='timestamp', parse_dates=True)
weather_data_site = weather_data[weather_data.site_id == 'Panther']
weather_data_site['airTemperature'].plot(figsize=(20, 4)) | code |
74065110/cell_38 | [
"text_plain_output_1.png"
] | import pandas as pd
elec_meter_data_all = pd.read_csv('../input/buildingdatagenomeproject2/electricity.csv', index_col='timestamp', parse_dates=True)
site_data_example = elec_meter_data_all.loc[:, elec_meter_data_all.columns.str.contains('Panther') & elec_meter_data_all.columns.str.contains('office')]
site_data_example_2017 = site_data_example.truncate(before='2017-01-01')
weather_data = pd.read_csv('../input/buildingdatagenomeproject2/weather.csv', index_col='timestamp', parse_dates=True)
weather_data_site = weather_data[weather_data.site_id == 'Panther']
sample_meter = pd.DataFrame(site_data_example_2017['Panther_office_Lavinia'])
sample_meter_nooutlier = sample_meter[sample_meter > 10]
sample_meter_nooutlier_nogaps = sample_meter_nooutlier.fillna(method='ffill')
temp_data = pd.DataFrame(weather_data_site['airTemperature'].truncate(before='01-01-2017'))
comparison = pd.concat([temp_data, sample_meter_nooutlier_nogaps], axis=1)
comparison_merged = pd.merge(temp_data, sample_meter_nooutlier_nogaps, left_index=True, right_index=True, how='outer')
comparison_merged.info() | code |
74065110/cell_35 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
elec_meter_data_all = pd.read_csv('../input/buildingdatagenomeproject2/electricity.csv', index_col='timestamp', parse_dates=True)
site_data_example = elec_meter_data_all.loc[:, elec_meter_data_all.columns.str.contains('Panther') & elec_meter_data_all.columns.str.contains('office')]
site_data_example_2017 = site_data_example.truncate(before='2017-01-01')
weather_data = pd.read_csv('../input/buildingdatagenomeproject2/weather.csv', index_col='timestamp', parse_dates=True)
weather_data_site = weather_data[weather_data.site_id == 'Panther']
sample_meter = pd.DataFrame(site_data_example_2017['Panther_office_Lavinia'])
sample_meter_nooutlier = sample_meter[sample_meter > 10]
sample_meter_nooutlier_nogaps = sample_meter_nooutlier.fillna(method='ffill')
temp_data = pd.DataFrame(weather_data_site['airTemperature'].truncate(before='01-01-2017'))
comparison = pd.concat([temp_data, sample_meter_nooutlier_nogaps], axis=1)
comparison.head() | code |
74065110/cell_31 | [
"text_plain_output_1.png"
] | import pandas as pd
elec_meter_data_all = pd.read_csv('../input/buildingdatagenomeproject2/electricity.csv', index_col='timestamp', parse_dates=True)
site_data_example = elec_meter_data_all.loc[:, elec_meter_data_all.columns.str.contains('Panther') & elec_meter_data_all.columns.str.contains('office')]
site_data_example_2017 = site_data_example.truncate(before='2017-01-01')
weather_data = pd.read_csv('../input/buildingdatagenomeproject2/weather.csv', index_col='timestamp', parse_dates=True)
weather_data_site = weather_data[weather_data.site_id == 'Panther']
sample_meter = pd.DataFrame(site_data_example_2017['Panther_office_Lavinia'])
temp_data = pd.DataFrame(weather_data_site['airTemperature'].truncate(before='01-01-2017'))
temp_data.info() | code |
74065110/cell_22 | [
"text_html_output_1.png"
] | import pandas as pd
elec_meter_data_all = pd.read_csv('../input/buildingdatagenomeproject2/electricity.csv', index_col='timestamp', parse_dates=True)
site_data_example = elec_meter_data_all.loc[:, elec_meter_data_all.columns.str.contains('Panther') & elec_meter_data_all.columns.str.contains('office')]
site_data_example_2017 = site_data_example.truncate(before='2017-01-01')
weather_data = pd.read_csv('../input/buildingdatagenomeproject2/weather.csv', index_col='timestamp', parse_dates=True)
sample_meter = pd.DataFrame(site_data_example_2017['Panther_office_Lavinia'])
sample_meter_nooutlier = sample_meter[sample_meter > 10]
sample_meter_nooutlier.plot(figsize=(10, 4)) | code |
74065110/cell_27 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
elec_meter_data_all = pd.read_csv('../input/buildingdatagenomeproject2/electricity.csv', index_col='timestamp', parse_dates=True)
site_data_example = elec_meter_data_all.loc[:, elec_meter_data_all.columns.str.contains('Panther') & elec_meter_data_all.columns.str.contains('office')]
site_data_example_2017 = site_data_example.truncate(before='2017-01-01')
weather_data = pd.read_csv('../input/buildingdatagenomeproject2/weather.csv', index_col='timestamp', parse_dates=True)
sample_meter = pd.DataFrame(site_data_example_2017['Panther_office_Lavinia'])
sample_meter_nooutlier = sample_meter[sample_meter > 10]
sample_meter_nooutlier_nogaps = sample_meter_nooutlier.fillna(method='ffill')
sample_meter_nooutlier_nogaps.info() | code |
73078429/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/insurance-churn-prediction-weekend-hackathon/Insurance_Churn_ParticipantsData/Train.csv')
test_df = pd.read_csv('/kaggle/input/insurance-churn-prediction-weekend-hackathon/Insurance_Churn_ParticipantsData/Test.csv')
train_df.rename(columns={'labels': 'churn'}, inplace=True)
train_df.describe().T | code |
73078429/cell_9 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/insurance-churn-prediction-weekend-hackathon/Insurance_Churn_ParticipantsData/Train.csv')
test_df = pd.read_csv('/kaggle/input/insurance-churn-prediction-weekend-hackathon/Insurance_Churn_ParticipantsData/Test.csv')
print(train_df.shape)
print(test_df.shape)
print(train_df.size)
print(test_df.size) | code |
73078429/cell_25 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/insurance-churn-prediction-weekend-hackathon/Insurance_Churn_ParticipantsData/Train.csv')
test_df = pd.read_csv('/kaggle/input/insurance-churn-prediction-weekend-hackathon/Insurance_Churn_ParticipantsData/Test.csv')
train_df.rename(columns={'labels': 'churn'}, inplace=True)
train_df.describe().T
features = list(train_df.columns)
features.remove('churn')
features
float_features = [i for i in train_df.columns if train_df[i].dtype == 'float64']
float_features | code |
73078429/cell_34 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/insurance-churn-prediction-weekend-hackathon/Insurance_Churn_ParticipantsData/Train.csv')
test_df = pd.read_csv('/kaggle/input/insurance-churn-prediction-weekend-hackathon/Insurance_Churn_ParticipantsData/Test.csv')
train_df.rename(columns={'labels': 'churn'}, inplace=True)
train_df.describe().T
def summary(df):
Types = df.dtypes
Counts = df.apply(lambda x: x.count())
Uniques = df.apply(lambda x: x.unique().shape[0])
cols = ['Types', 'Counts', 'Uniques']
str = pd.concat([Types, Counts, Uniques], axis=1, sort=True)
str.columns = cols
summary(df=train_df)
sizes = [29941,3967]
labels='NO','YES'
explode = (0, 0.1)
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode,autopct='%1.1f%%',shadow=True, startangle=75 )
ax1.axis('equal')
ax1.set_title("Client Churn Distribution")
ax1.legend(labels)
plt.show()
#ratio of those who churn and those who don't
def show_correlations(df, show_chart = True):
fig = plt.figure(figsize = (20,10))
corr = df.corr()
if show_chart == True:
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values,
annot=True)
return corr
correlation_df = show_correlations(train_df,show_chart=True)
#Get Correlation of "churn" with other variables:
features = list(train_df.columns)
features.remove('churn')
features
float_features = [i for i in train_df.columns if train_df[i].dtype == 'float64']
float_features
int_features = [i for i in train_df.columns if train_df[i].dtype == 'int64']
int_features.remove('churn')
int_features
fig, ax = plt.subplots(4, 2, figsize = (15, 10))
ax = ax.flatten()
for i, c in enumerate(float_features):
sns.boxplot(x = train_df[c], ax = ax[i], palette = 'Set3')
plt.suptitle('Box Plot', fontsize = 25)
fig.tight_layout()
#Box plot of float features
def plot_hist(variable):
pass
fig, ax = plt.subplots(5, 2, figsize = (15, 10))
ax = ax.flatten()
for i, c in enumerate(int_features):
sns.boxplot(x = train_df[c], ax = ax[i], palette = 'Set3')
plt.suptitle('Box Plot', fontsize = 25)
fig.tight_layout()
#Box plot of integer features
def bar_plot(variable):
"""
input: variable
output: bar plot & value count
"""
var = train_df[variable]
varValue = var.value_counts()
plt.xticks(varValue.index, varValue.index.values)
for c in int_features:
bar_plot(c) | code |
73078429/cell_30 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/insurance-churn-prediction-weekend-hackathon/Insurance_Churn_ParticipantsData/Train.csv')
test_df = pd.read_csv('/kaggle/input/insurance-churn-prediction-weekend-hackathon/Insurance_Churn_ParticipantsData/Test.csv')
train_df.rename(columns={'labels': 'churn'}, inplace=True)
train_df.describe().T
def summary(df):
Types = df.dtypes
Counts = df.apply(lambda x: x.count())
Uniques = df.apply(lambda x: x.unique().shape[0])
cols = ['Types', 'Counts', 'Uniques']
str = pd.concat([Types, Counts, Uniques], axis=1, sort=True)
str.columns = cols
summary(df=train_df)
sizes = [29941,3967]
labels='NO','YES'
explode = (0, 0.1)
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode,autopct='%1.1f%%',shadow=True, startangle=75 )
ax1.axis('equal')
ax1.set_title("Client Churn Distribution")
ax1.legend(labels)
plt.show()
#ratio of those who churn and those who don't
def show_correlations(df, show_chart = True):
fig = plt.figure(figsize = (20,10))
corr = df.corr()
if show_chart == True:
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values,
annot=True)
return corr
correlation_df = show_correlations(train_df,show_chart=True)
#Get Correlation of "churn" with other variables:
features = list(train_df.columns)
features.remove('churn')
features
float_features = [i for i in train_df.columns if train_df[i].dtype == 'float64']
float_features
int_features = [i for i in train_df.columns if train_df[i].dtype == 'int64']
int_features.remove('churn')
int_features
fig, ax = plt.subplots(4, 2, figsize = (15, 10))
ax = ax.flatten()
for i, c in enumerate(float_features):
sns.boxplot(x = train_df[c], ax = ax[i], palette = 'Set3')
plt.suptitle('Box Plot', fontsize = 25)
fig.tight_layout()
#Box plot of float features
def plot_hist(variable):
pass
for n in float_features:
plot_hist(n) | code |
73078429/cell_20 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
sizes = [29941, 3967]
labels = ('NO', 'YES')
explode = (0, 0.1)
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, autopct='%1.1f%%', shadow=True, startangle=75)
ax1.axis('equal')
ax1.set_title('Client Churn Distribution')
ax1.legend(labels)
plt.show() | code |
73078429/cell_6 | [
"text_plain_output_5.png",
"text_plain_output_9.png",
"text_plain_output_4.png",
"image_output_5.png",
"text_plain_output_6.png",
"image_output_7.png",
"text_plain_output_3.png",
"image_output_4.png",
"text_plain_output_7.png",
"image_output_8.png",
"text_plain_output_8.png",
"image_output_6.png",
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png",
"image_output_9.png"
] | import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
import warnings
warnings.filterwarnings('ignore')
from time import time, strftime, gmtime
start = time()
import datetime
print(str(datetime.datetime.now())) | code |
73078429/cell_26 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/insurance-churn-prediction-weekend-hackathon/Insurance_Churn_ParticipantsData/Train.csv')
test_df = pd.read_csv('/kaggle/input/insurance-churn-prediction-weekend-hackathon/Insurance_Churn_ParticipantsData/Test.csv')
train_df.rename(columns={'labels': 'churn'}, inplace=True)
train_df.describe().T
features = list(train_df.columns)
features.remove('churn')
features
float_features = [i for i in train_df.columns if train_df[i].dtype == 'float64']
float_features
int_features = [i for i in train_df.columns if train_df[i].dtype == 'int64']
int_features.remove('churn')
int_features | code |
73078429/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/insurance-churn-prediction-weekend-hackathon/Insurance_Churn_ParticipantsData/Train.csv')
test_df = pd.read_csv('/kaggle/input/insurance-churn-prediction-weekend-hackathon/Insurance_Churn_ParticipantsData/Test.csv')
test_df.head() | code |
73078429/cell_19 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/insurance-churn-prediction-weekend-hackathon/Insurance_Churn_ParticipantsData/Train.csv')
test_df = pd.read_csv('/kaggle/input/insurance-churn-prediction-weekend-hackathon/Insurance_Churn_ParticipantsData/Test.csv')
train_df.rename(columns={'labels': 'churn'}, inplace=True)
train_df.describe().T
dataset = train_df['churn'].value_counts()
dataset | code |
73078429/cell_32 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/insurance-churn-prediction-weekend-hackathon/Insurance_Churn_ParticipantsData/Train.csv')
test_df = pd.read_csv('/kaggle/input/insurance-churn-prediction-weekend-hackathon/Insurance_Churn_ParticipantsData/Test.csv')
train_df.rename(columns={'labels': 'churn'}, inplace=True)
train_df.describe().T
def summary(df):
Types = df.dtypes
Counts = df.apply(lambda x: x.count())
Uniques = df.apply(lambda x: x.unique().shape[0])
cols = ['Types', 'Counts', 'Uniques']
str = pd.concat([Types, Counts, Uniques], axis=1, sort=True)
str.columns = cols
summary(df=train_df)
sizes = [29941,3967]
labels='NO','YES'
explode = (0, 0.1)
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode,autopct='%1.1f%%',shadow=True, startangle=75 )
ax1.axis('equal')
ax1.set_title("Client Churn Distribution")
ax1.legend(labels)
plt.show()
#ratio of those who churn and those who don't
def show_correlations(df, show_chart = True):
fig = plt.figure(figsize = (20,10))
corr = df.corr()
if show_chart == True:
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values,
annot=True)
return corr
correlation_df = show_correlations(train_df,show_chart=True)
#Get Correlation of "churn" with other variables:
features = list(train_df.columns)
features.remove('churn')
features
float_features = [i for i in train_df.columns if train_df[i].dtype == 'float64']
float_features
int_features = [i for i in train_df.columns if train_df[i].dtype == 'int64']
int_features.remove('churn')
int_features
fig, ax = plt.subplots(4, 2, figsize = (15, 10))
ax = ax.flatten()
for i, c in enumerate(float_features):
sns.boxplot(x = train_df[c], ax = ax[i], palette = 'Set3')
plt.suptitle('Box Plot', fontsize = 25)
fig.tight_layout()
#Box plot of float features
def plot_hist(variable):
pass
fig, ax = plt.subplots(5, 2, figsize=(15, 10))
ax = ax.flatten()
for i, c in enumerate(int_features):
sns.boxplot(x=train_df[c], ax=ax[i], palette='Set3')
plt.suptitle('Box Plot', fontsize=25)
fig.tight_layout() | code |
73078429/cell_28 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/insurance-churn-prediction-weekend-hackathon/Insurance_Churn_ParticipantsData/Train.csv')
test_df = pd.read_csv('/kaggle/input/insurance-churn-prediction-weekend-hackathon/Insurance_Churn_ParticipantsData/Test.csv')
train_df.rename(columns={'labels': 'churn'}, inplace=True)
train_df.describe().T
def summary(df):
Types = df.dtypes
Counts = df.apply(lambda x: x.count())
Uniques = df.apply(lambda x: x.unique().shape[0])
cols = ['Types', 'Counts', 'Uniques']
str = pd.concat([Types, Counts, Uniques], axis=1, sort=True)
str.columns = cols
summary(df=train_df)
sizes = [29941,3967]
labels='NO','YES'
explode = (0, 0.1)
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode,autopct='%1.1f%%',shadow=True, startangle=75 )
ax1.axis('equal')
ax1.set_title("Client Churn Distribution")
ax1.legend(labels)
plt.show()
#ratio of those who churn and those who don't
def show_correlations(df, show_chart = True):
fig = plt.figure(figsize = (20,10))
corr = df.corr()
if show_chart == True:
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values,
annot=True)
return corr
correlation_df = show_correlations(train_df,show_chart=True)
#Get Correlation of "churn" with other variables:
features = list(train_df.columns)
features.remove('churn')
features
float_features = [i for i in train_df.columns if train_df[i].dtype == 'float64']
float_features
int_features = [i for i in train_df.columns if train_df[i].dtype == 'int64']
int_features.remove('churn')
int_features
fig, ax = plt.subplots(4, 2, figsize=(15, 10))
ax = ax.flatten()
for i, c in enumerate(float_features):
sns.boxplot(x=train_df[c], ax=ax[i], palette='Set3')
plt.suptitle('Box Plot', fontsize=25)
fig.tight_layout() | code |
73078429/cell_15 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/insurance-churn-prediction-weekend-hackathon/Insurance_Churn_ParticipantsData/Train.csv')
test_df = pd.read_csv('/kaggle/input/insurance-churn-prediction-weekend-hackathon/Insurance_Churn_ParticipantsData/Test.csv')
train_df.rename(columns={'labels': 'churn'}, inplace=True)
train_df.describe().T
def summary(df):
Types = df.dtypes
Counts = df.apply(lambda x: x.count())
Uniques = df.apply(lambda x: x.unique().shape[0])
cols = ['Types', 'Counts', 'Uniques']
str = pd.concat([Types, Counts, Uniques], axis=1, sort=True)
str.columns = cols
display(str.sort_values(by='Uniques', ascending=False))
print('__________Data Types__________\n')
print(str.Types.value_counts())
summary(df=train_df) | code |
73078429/cell_17 | [
"text_html_output_1.png"
] | import missingno as msno
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/insurance-churn-prediction-weekend-hackathon/Insurance_Churn_ParticipantsData/Train.csv')
test_df = pd.read_csv('/kaggle/input/insurance-churn-prediction-weekend-hackathon/Insurance_Churn_ParticipantsData/Test.csv')
train_df.rename(columns={'labels': 'churn'}, inplace=True)
train_df.describe().T
import missingno as msno
msno.matrix(train_df) | code |
73078429/cell_24 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/insurance-churn-prediction-weekend-hackathon/Insurance_Churn_ParticipantsData/Train.csv')
test_df = pd.read_csv('/kaggle/input/insurance-churn-prediction-weekend-hackathon/Insurance_Churn_ParticipantsData/Test.csv')
train_df.rename(columns={'labels': 'churn'}, inplace=True)
train_df.describe().T
features = list(train_df.columns)
features.remove('churn')
features | code |
73078429/cell_22 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_df = pd.read_csv('/kaggle/input/insurance-churn-prediction-weekend-hackathon/Insurance_Churn_ParticipantsData/Train.csv')
test_df = pd.read_csv('/kaggle/input/insurance-churn-prediction-weekend-hackathon/Insurance_Churn_ParticipantsData/Test.csv')
train_df.rename(columns={'labels': 'churn'}, inplace=True)
train_df.describe().T
def summary(df):
Types = df.dtypes
Counts = df.apply(lambda x: x.count())
Uniques = df.apply(lambda x: x.unique().shape[0])
cols = ['Types', 'Counts', 'Uniques']
str = pd.concat([Types, Counts, Uniques], axis=1, sort=True)
str.columns = cols
summary(df=train_df)
sizes = [29941,3967]
labels='NO','YES'
explode = (0, 0.1)
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode,autopct='%1.1f%%',shadow=True, startangle=75 )
ax1.axis('equal')
ax1.set_title("Client Churn Distribution")
ax1.legend(labels)
plt.show()
#ratio of those who churn and those who don't
def show_correlations(df, show_chart=True):
fig = plt.figure(figsize=(20, 10))
corr = df.corr()
if show_chart == True:
sns.heatmap(corr, xticklabels=corr.columns.values, yticklabels=corr.columns.values, annot=True)
return corr
correlation_df = show_correlations(train_df, show_chart=True) | code |
73078429/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_df = pd.read_csv('/kaggle/input/insurance-churn-prediction-weekend-hackathon/Insurance_Churn_ParticipantsData/Train.csv')
test_df = pd.read_csv('/kaggle/input/insurance-churn-prediction-weekend-hackathon/Insurance_Churn_ParticipantsData/Test.csv')
train_df.head() | code |
73078429/cell_5 | [
"image_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.