Spaces:
Sleeping
Sleeping
File size: 4,447 Bytes
914042b 97e73ab 914042b d132bad 914042b d132bad 914042b d132bad 914042b d132bad 914042b d132bad 914042b d132bad 914042b d132bad 914042b d132bad 914042b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 |
import requests
from bs4 import BeautifulSoup
import streamlit as st
from urllib.parse import quote
@st.cache_resource
def display_glossary_entity(k):
search_urls = {
"🚀🌌ArXiv": lambda k: f"https://arxiv.org/search/?query={quote(k)}&searchtype=all&source=header",
"📖Wiki": lambda k: f"https://en.wikipedia.org/wiki/{quote(k)}",
"🔍Google": lambda k: f"https://www.google.com/search?q={quote(k)}",
"🔎Bing": lambda k: f"https://www.bing.com/search?q={quote(k)}",
"🎥YouTube": lambda k: f"https://www.youtube.com/results?search_query={quote(k)}",
"🐦Twitter": lambda k: f"https://twitter.com/search?q={quote(k)}",
}
links_md = ' '.join([f"[{emoji}]({url(k)})" for emoji, url in search_urls.items()])
st.markdown(f"**{k}** <small>{links_md}</small>", unsafe_allow_html=True)
def perform_search(search_query, search_engine):
if search_engine == "Google":
url = f"https://www.google.com/search?q={search_query}"
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
search_results = soup.select(".g")
results = [result.get_text(strip=True) for result in search_results]
elif search_engine == "Bing":
url = f"https://www.bing.com/search?q={search_query}"
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
search_results = soup.select(".b_algo")
results = []
for result in search_results:
title = result.select_one("h2").text
link = result.select_one("h2 a")["href"]
snippet = result.select_one(".b_caption p").text
results.append({"title": title, "link": link, "snippet": snippet})
elif search_engine == "Wikipedia":
url = f"https://en.wikipedia.org/w/index.php?search={search_query}"
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
search_results = soup.select(".mw-search-result")
results = []
for result in search_results:
title = result.select_one(".mw-search-result-heading a").text
link = f"https://en.wikipedia.org{result.select_one('.mw-search-result-heading a')['href']}"
snippet = result.select_one(".searchresult").text.strip()
results.append({"title": title, "link": link, "snippet": snippet})
elif search_engine == "Twitter":
url = f"https://twitter.com/search?q={search_query}"
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
search_results = soup.select(".tweet")
results = [result.get_text(strip=True) for result in search_results]
return results
def main():
st.set_page_config(page_title="Web Search App", page_icon=":mag:", layout="wide")
st.title("Web Search App")
st.write("Search Google, Bing, Wikipedia, and Twitter simultaneously!")
search_query = st.text_input("Enter your search query")
display_glossary_entity(search_query)
col1, col2, col3, col4 = st.columns(4)
with col1:
st.header("Google Search Results")
if st.button("Search Google"):
google_results = perform_search(search_query, "Google")
for result in google_results:
st.write(result)
with col2:
st.header("Bing Search Results")
if st.button("Search Bing"):
bing_results = perform_search(search_query, "Bing")
for result in bing_results:
st.write(f"**{result['title']}**")
st.write(result["snippet"])
st.write(f"[Link]({result['link']})")
st.write("---")
with col3:
st.header("Wikipedia Search Results")
if st.button("Search Wikipedia"):
wikipedia_results = perform_search(search_query, "Wikipedia")
for result in wikipedia_results:
st.write(f"**{result['title']}**")
st.write(result["snippet"])
st.write(f"[Link]({result['link']})")
st.write("---")
with col4:
st.header("Twitter Search Results")
if st.button("Search Twitter"):
twitter_results = perform_search(search_query, "Twitter")
for result in twitter_results:
st.write(result)
if __name__ == "__main__":
main() |