File size: 1,245 Bytes
e7120e9
4508b3b
e7120e9
8ddf073
e7120e9
bee76fe
 
 
 
f025397
 
8ddf073
f025397
8ddf073
c485d4d
f025397
c485d4d
bee76fe
b4e1b44
bee76fe
 
f025397
8ddf073
 
 
4508b3b
 
 
 
c485d4d
4508b3b
e7120e9
b4e1b44
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import requests
from bs4 import BeautifulSoup
import streamlit as st
import time

urls = ['https://en.wikipedia.org/wiki/Health_care',
        'https://en.wikipedia.org/wiki/Health_information_on_the_Internet',
        'https://www.who.int/health-topics/coronavirus#tab=tab_1']

def scrape_wikipedia(url):
    try:
        start_time = time.time()
        response = requests.get(url)
        end_time = time.time()
        return {'url': url, 'response_time': end_time - start_time, 'content': response.content}
    except:
        return {'url': url, 'response_time': None, 'content': ""}

def main():
    st.title("List of Articles on Health Care")

    for url in urls:
        st.write(f"Scraping {url}...")
        scraped_data = scrape_wikipedia(url)
        st.write(f"Response time: {scraped_data['response_time']}")
        content = scraped_data['content']
        if b'\r\n' not in content:
            soup = BeautifulSoup(content, 'html.parser')
            content = soup.prettify().encode()
        st.write(f"Content: ")
        st.text_area("", content.decode(), height=200, max_chars=None, key=None, help=None, return_streamlit=False, value=None, on_change=None, args=None, kwargs=None)

if __name__ == '__main__':
    main()