awacke1 commited on
Commit
8ddf073
·
1 Parent(s): f025397

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -6
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import requests
2
  from bs4 import BeautifulSoup
3
  import streamlit as st
 
4
 
5
  urls = ['https://en.wikipedia.org/wiki/Health_care',
6
  'https://en.wikipedia.org/wiki/Health_information_on_the_Internet',
@@ -8,26 +9,29 @@ urls = ['https://en.wikipedia.org/wiki/Health_care',
8
 
9
  def scrape_wikipedia(url):
10
  try:
 
11
  response = requests.get(url)
 
12
  soup = BeautifulSoup(response.content, 'html.parser')
13
  div_element = soup.find('div', {'class': 'div-col columns column-width'})
14
  if div_element is not None:
15
  articles_list = div_element.find_all('li')
16
  else:
17
  articles_list = []
18
- return articles_list
19
  except:
20
- st.write(f"Error scraping {url}")
21
- return []
22
 
23
  def main():
24
  st.title("List of Articles on Health Care")
25
 
26
  data = []
27
  for url in urls:
28
- articles_list = scrape_wikipedia(url)
29
- for article in articles_list:
30
- data.append({'url': urls.index(url), 'article': article.text})
 
 
31
 
32
  st.write('## Dataset')
33
  st.dataframe(data)
 
1
  import requests
2
  from bs4 import BeautifulSoup
3
  import streamlit as st
4
+ import time
5
 
6
  urls = ['https://en.wikipedia.org/wiki/Health_care',
7
  'https://en.wikipedia.org/wiki/Health_information_on_the_Internet',
 
9
 
10
  def scrape_wikipedia(url):
11
  try:
12
+ start_time = time.time()
13
  response = requests.get(url)
14
+ end_time = time.time()
15
  soup = BeautifulSoup(response.content, 'html.parser')
16
  div_element = soup.find('div', {'class': 'div-col columns column-width'})
17
  if div_element is not None:
18
  articles_list = div_element.find_all('li')
19
  else:
20
  articles_list = []
21
+ return {'url': url, 'response_time': end_time - start_time, 'articles': articles_list}
22
  except:
23
+ return {'url': url, 'response_time': None, 'articles': []}
 
24
 
25
  def main():
26
  st.title("List of Articles on Health Care")
27
 
28
  data = []
29
  for url in urls:
30
+ st.write(f"Scraping {url}...")
31
+ scraped_data = scrape_wikipedia(url)
32
+ st.write(f"Response time: {scraped_data['response_time']}")
33
+ for article in scraped_data['articles']:
34
+ data.append({'url': scraped_data['url'], 'article': article.text})
35
 
36
  st.write('## Dataset')
37
  st.dataframe(data)