awacke1 commited on
Commit
a1dc890
·
verified ·
1 Parent(s): 6b8e2bc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -66
app.py CHANGED
@@ -1,62 +1,27 @@
1
  import requests
2
  from bs4 import BeautifulSoup
3
  import streamlit as st
4
- from urllib.parse import quote
5
-
6
- @st.cache_resource
7
- def display_glossary_entity(k):
8
- search_urls = {
9
- "🚀🌌ArXiv": lambda k: f"https://arxiv.org/search/?query={quote(k)}&searchtype=all&source=header",
10
- "📖Wiki": lambda k: f"https://en.wikipedia.org/wiki/{quote(k)}",
11
- "🔍Google": lambda k: f"https://www.google.com/search?q={quote(k)}",
12
- "🔎Bing": lambda k: f"https://www.bing.com/search?q={quote(k)}",
13
- "🎥YouTube": lambda k: f"https://www.youtube.com/results?search_query={quote(k)}",
14
- "🐦Twitter": lambda k: f"https://twitter.com/search?q={quote(k)}",
15
- }
16
- links_md = ' '.join([f"[{emoji}]({url(k)})" for emoji, url in search_urls.items()])
17
- st.markdown(f"**{k}** <small>{links_md}</small>", unsafe_allow_html=True)
18
 
19
  def perform_search(search_query, search_engine):
20
  if search_engine == "Google":
21
  url = f"https://www.google.com/search?q={search_query}"
22
- response = requests.get(url)
23
- soup = BeautifulSoup(response.text, "html.parser")
24
- search_results = soup.select(".g")
25
- results = []
26
- for result in search_results:
27
- title = result.select_one(".r").text
28
- link = result.select_one(".r a")["href"]
29
- results.append({"title": title, "link": link})
30
  elif search_engine == "Bing":
31
  url = f"https://www.bing.com/search?q={search_query}"
32
- response = requests.get(url)
33
- soup = BeautifulSoup(response.text, "html.parser")
 
 
 
 
 
 
34
  search_results = soup.select(".b_algo h2 a")
35
- results = []
36
- for result in search_results:
37
- title = result.text
38
- link = result["href"]
39
- results.append({"title": title, "link": link})
40
- elif search_engine == "Wikipedia":
41
- url = f"https://en.wikipedia.org/w/index.php?search={search_query}"
42
- response = requests.get(url)
43
- soup = BeautifulSoup(response.text, "html.parser")
44
- search_results = soup.select(".mw-search-result-heading a")
45
- results = []
46
- for result in search_results:
47
- title = result.text
48
- link = result["href"]
49
- results.append({"title": title, "link": link})
50
- elif search_engine == "Twitter":
51
- url = f"https://twitter.com/search?q={search_query}"
52
- response = requests.get(url)
53
- soup = BeautifulSoup(response.text, "html.parser")
54
- search_results = soup.select(".css-1dbjc4n.r-1loqt21.r-18u37iz.r-1ny4l3l.r-1udh08x.r-1qhn6m8.r-i023vh.r-o7ynqc.r-6416eg")
55
- results = []
56
- for result in search_results:
57
- title = result.select_one(".css-901oao.css-16my406.r-poiln3.r-bcqeeo.r-qvutc0").text
58
- link = f"https://twitter.com{result.select_one('.css-4rbku5')['href']}"
59
- results.append({"title": title, "link": link})
60
 
61
  return results
62
 
@@ -64,12 +29,11 @@ def main():
64
  st.set_page_config(page_title="Web Search App", page_icon=":mag:", layout="wide")
65
 
66
  st.title("Web Search App")
67
- st.write("Search Google, Bing, Wikipedia, and Twitter simultaneously!")
68
 
69
  search_query = st.text_input("Enter your search query")
70
- display_glossary_entity(search_query)
71
 
72
- col1, col2, col3, col4 = st.columns(4)
73
 
74
  with col1:
75
  st.header("Google Search Results")
@@ -84,20 +48,6 @@ def main():
84
  bing_results = perform_search(search_query, "Bing")
85
  for result in bing_results:
86
  st.write(f"[{result['title']}]({result['link']})")
87
-
88
- with col3:
89
- st.header("Wikipedia Search Results")
90
- if st.button("Search Wikipedia"):
91
- wikipedia_results = perform_search(search_query, "Wikipedia")
92
- for result in wikipedia_results:
93
- st.write(f"[{result['title']}]({result['link']})")
94
-
95
- with col4:
96
- st.header("Twitter Search Results")
97
- if st.button("Search Twitter"):
98
- twitter_results = perform_search(search_query, "Twitter")
99
- for result in twitter_results:
100
- st.write(f"[{result['title']}]({result['link']})")
101
 
102
  if __name__ == "__main__":
103
  main()
 
1
  import requests
2
  from bs4 import BeautifulSoup
3
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
  def perform_search(search_query, search_engine):
6
  if search_engine == "Google":
7
  url = f"https://www.google.com/search?q={search_query}"
 
 
 
 
 
 
 
 
8
  elif search_engine == "Bing":
9
  url = f"https://www.bing.com/search?q={search_query}"
10
+
11
+ response = requests.get(url)
12
+ soup = BeautifulSoup(response.text, "html.parser")
13
+
14
+ # Extract search results based on the search engine
15
+ if search_engine == "Google":
16
+ search_results = soup.select(".yuRUbf a")
17
+ elif search_engine == "Bing":
18
  search_results = soup.select(".b_algo h2 a")
19
+
20
+ results = []
21
+ for result in search_results:
22
+ title = result.text
23
+ link = result["href"]
24
+ results.append({"title": title, "link": link})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
  return results
27
 
 
29
  st.set_page_config(page_title="Web Search App", page_icon=":mag:", layout="wide")
30
 
31
  st.title("Web Search App")
32
+ st.write("Search Google and Bing simultaneously!")
33
 
34
  search_query = st.text_input("Enter your search query")
 
35
 
36
+ col1, col2 = st.columns(2)
37
 
38
  with col1:
39
  st.header("Google Search Results")
 
48
  bing_results = perform_search(search_query, "Bing")
49
  for result in bing_results:
50
  st.write(f"[{result['title']}]({result['link']})")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
  if __name__ == "__main__":
53
  main()