File size: 1,183 Bytes
e7120e9
 
 
 
bee76fe
 
 
 
f025397
 
 
 
 
 
 
 
 
 
 
 
 
bee76fe
b4e1b44
bee76fe
 
 
f025397
 
 
 
bee76fe
 
 
 
 
 
 
 
e7120e9
b4e1b44
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import requests
from bs4 import BeautifulSoup
import streamlit as st

urls = ['https://en.wikipedia.org/wiki/Health_care',
        'https://en.wikipedia.org/wiki/Health_information_on_the_Internet',
        'https://www.who.int/health-topics/coronavirus#tab=tab_1']

def scrape_wikipedia(url):
    try:
        response = requests.get(url)
        soup = BeautifulSoup(response.content, 'html.parser')
        div_element = soup.find('div', {'class': 'div-col columns column-width'})
        if div_element is not None:
            articles_list = div_element.find_all('li')
        else:
            articles_list = []
        return articles_list
    except:
        st.write(f"Error scraping {url}")
        return []

def main():
    st.title("List of Articles on Health Care")

    data = []
    for url in urls:
        articles_list = scrape_wikipedia(url)
        for article in articles_list:
            data.append({'url': urls.index(url), 'article': article.text})

    st.write('## Dataset')
    st.dataframe(data)

    st.write('## Grid')
    st.write('url', 'article')
    for d in data:
        st.write(d['url'], d['article'])

if __name__ == '__main__':
    main()