awacke1's picture
Update app.py
8ddf073
raw
history blame
1.45 kB
import requests
from bs4 import BeautifulSoup
import streamlit as st
import time
urls = ['https://en.wikipedia.org/wiki/Health_care',
'https://en.wikipedia.org/wiki/Health_information_on_the_Internet',
'https://www.who.int/health-topics/coronavirus#tab=tab_1']
def scrape_wikipedia(url):
try:
start_time = time.time()
response = requests.get(url)
end_time = time.time()
soup = BeautifulSoup(response.content, 'html.parser')
div_element = soup.find('div', {'class': 'div-col columns column-width'})
if div_element is not None:
articles_list = div_element.find_all('li')
else:
articles_list = []
return {'url': url, 'response_time': end_time - start_time, 'articles': articles_list}
except:
return {'url': url, 'response_time': None, 'articles': []}
def main():
st.title("List of Articles on Health Care")
data = []
for url in urls:
st.write(f"Scraping {url}...")
scraped_data = scrape_wikipedia(url)
st.write(f"Response time: {scraped_data['response_time']}")
for article in scraped_data['articles']:
data.append({'url': scraped_data['url'], 'article': article.text})
st.write('## Dataset')
st.dataframe(data)
st.write('## Grid')
st.write('url', 'article')
for d in data:
st.write(d['url'], d['article'])
if __name__ == '__main__':
main()