File size: 3,728 Bytes
914042b
 
 
 
 
 
 
 
97e73ab
914042b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import requests
from bs4 import BeautifulSoup
import streamlit as st
from urllib.parse import quote

@st.cache_resource
def display_glossary_entity(k):
    search_urls = {
        "🚀🌌ArXiv": lambda k: f"https://arxiv.org/search/?query={quote(k)}&searchtype=all&source=header",
        "📖Wiki": lambda k: f"https://en.wikipedia.org/wiki/{quote(k)}",
        "🔍Google": lambda k: f"https://www.google.com/search?q={quote(k)}",
        "🔎Bing": lambda k: f"https://www.bing.com/search?q={quote(k)}",
        "🎥YouTube": lambda k: f"https://www.youtube.com/results?search_query={quote(k)}",
        "🐦Twitter": lambda k: f"https://twitter.com/search?q={quote(k)}",
    }
    links_md = ' '.join([f"[{emoji}]({url(k)})" for emoji, url in search_urls.items()])
    st.markdown(f"**{k}** <small>{links_md}</small>", unsafe_allow_html=True)

def perform_search(search_query, search_engine):
    if search_engine == "Google":
        url = f"https://www.google.com/search?q={search_query}"
        response = requests.get(url)
        soup = BeautifulSoup(response.text, "html.parser")
        search_results = soup.select(".yuRUbf a")
    elif search_engine == "Bing":
        url = f"https://www.bing.com/search?q={search_query}"
        response = requests.get(url)
        soup = BeautifulSoup(response.text, "html.parser")
        search_results = soup.select(".b_algo h2 a")
    elif search_engine == "Wikipedia":
        url = f"https://en.wikipedia.org/w/index.php?search={search_query}"
        response = requests.get(url)
        soup = BeautifulSoup(response.text, "html.parser")
        search_results = soup.select(".mw-search-result-heading a")
    elif search_engine == "Twitter":
        url = f"https://twitter.com/search?q={search_query}"
        response = requests.get(url)
        soup = BeautifulSoup(response.text, "html.parser")
        search_results = soup.select(".css-4rbku5")
    
    results = []
    for result in search_results:
        title = result.text
        link = result["href"]
        if search_engine == "Twitter":
            link = f"https://twitter.com{link}"
        results.append({"title": title, "link": link})
    
    return results

def main():
    st.set_page_config(page_title="Web Search App", page_icon=":mag:", layout="wide")
    
    st.title("Web Search App")
    st.write("Search Google, Bing, Wikipedia, and Twitter simultaneously!")
    
    search_query = st.text_input("Enter your search query")
    display_glossary_entity(search_query)
    
    col1, col2, col3, col4 = st.columns(4)
    
    with col1:
        st.header("Google Search Results")
        if st.button("Search Google"):
            google_results = perform_search(search_query, "Google")
            for result in google_results:
                st.write(f"[{result['title']}]({result['link']})")
    
    with col2:
        st.header("Bing Search Results")
        if st.button("Search Bing"):
            bing_results = perform_search(search_query, "Bing")
            for result in bing_results:
                st.write(f"[{result['title']}]({result['link']})")
    
    with col3:
        st.header("Wikipedia Search Results")
        if st.button("Search Wikipedia"):
            wikipedia_results = perform_search(search_query, "Wikipedia")
            for result in wikipedia_results:
                st.write(f"[{result['title']}]({result['link']})")
    
    with col4:
        st.header("Twitter Search Results")
        if st.button("Search Twitter"):
            twitter_results = perform_search(search_query, "Twitter")
            for result in twitter_results:
                st.write(f"[{result['title']}]({result['link']})")

if __name__ == "__main__":
    main()