Spaces:
Sleeping
Sleeping
File size: 7,075 Bytes
f02770e 409c813 28ca0a2 2769ea6 ceb6868 f89b7c9 2769ea6 f89b7c9 2769ea6 f89b7c9 2769ea6 f89b7c9 8b21f9c f89b7c9 8b21f9c 28ca0a2 f89b7c9 2769ea6 ceb6868 5a18aa8 f89b7c9 28ca0a2 5a18aa8 851f58a 28ca0a2 426506c 28ca0a2 426506c 2769ea6 426506c 2769ea6 28ca0a2 8b21f9c f89b7c9 8b21f9c f89b7c9 8b21f9c f89b7c9 8b21f9c 436f2c6 8b21f9c f89b7c9 8b21f9c f89b7c9 8b21f9c 426506c 2769ea6 f89b7c9 8b21f9c 851f58a f89b7c9 851f58a 8b21f9c f89b7c9 8b21f9c 851f58a 8b21f9c f89b7c9 2769ea6 8b21f9c 851f58a 8b21f9c f02770e 8b21f9c f02770e 8b21f9c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 |
import gradio as gr
import requests
import time
import random
from bs4 import BeautifulSoup
import trafilatura
USER_AGENTS = [
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Safari/605.1.15',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
'Mozilla/5.0 (iPhone; CPU iPhone OS 14_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0 Mobile/15E148 Safari/604.1'
]
def get_random_user_agent():
return random.choice(USER_AGENTS)
def extract_content_bs4(url):
try:
response = requests.get(url, headers={'User-Agent': get_random_user_agent()}, timeout=10)
soup = BeautifulSoup(response.content, 'html.parser')
paragraphs = soup.find_all('p')
content = ' '.join([p.text for p in paragraphs])
return content[:1000] + "..." if len(content) > 1000 else content
except Exception as e:
return f"Error extracting content: {str(e)}"
def extract_content_trafilatura(url):
try:
downloaded = trafilatura.fetch_url(url, headers={'User-Agent': get_random_user_agent()})
content = trafilatura.extract(downloaded, include_comments=False, include_tables=False)
return content[:1000] + "..." if content and len(content) > 1000 else content
except Exception as e:
return f"Error extracting content: {str(e)}"
def search_searx(query, instance_url='https://searx.org', categories='general', max_retries=3, num_results=10,
use_trafilatura=False, time_range='', language='en', safesearch=0):
"""
Perform a search using the SearXNG API with advanced options.
"""
search_endpoint = f"{instance_url}/search"
params = {
'q': query,
'format': 'json',
'categories': categories,
'pageno': 1,
'time_range': time_range,
'language': language,
'safesearch': safesearch,
'results': str(num_results)
}
headers = {
'User-Agent': get_random_user_agent(),
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Language': 'en-US,en;q=0.5',
'Referer': instance_url,
'DNT': '1',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1'
}
for attempt in range(max_retries):
try:
response = requests.get(search_endpoint, params=params, headers=headers, timeout=10)
response.raise_for_status()
data = response.json()
if 'results' not in data or not data['results']:
return "No results found."
formatted_results = ""
for idx, result in enumerate(data['results'][:num_results], start=1):
title = result.get('title', 'No Title')
url = result.get('url', 'No URL')
if use_trafilatura:
content = extract_content_trafilatura(url)
else:
content = extract_content_bs4(url)
formatted_results += f"**{idx}. {title}**\n[{url}]({url})\n{content}\n\n"
return formatted_results
except requests.exceptions.RequestException as e:
if response.status_code == 429:
wait_time = 2 ** attempt + random.uniform(0, 1)
time.sleep(wait_time)
else:
return f"An error occurred while searching: {e}"
return "Max retries reached. Please try again later."
def create_gradio_interface():
"""
Creates and returns the Gradio interface with advanced SearXNG options.
"""
with gr.Blocks() as demo:
gr.Markdown("# 🕵️♂️ Advanced SearXNG Search with Content Extraction")
gr.Markdown(
"This application allows you to perform private searches using SearXNG with advanced options and content extraction."
)
with gr.Row():
with gr.Column():
query = gr.Textbox(
label="Search Query",
placeholder="Enter your search query here...",
lines=1
)
instance_url = gr.Textbox(
label="SearXNG Instance URL",
value="https://searx.org",
placeholder="https://searx.instance.url",
lines=1
)
categories = gr.Textbox(
label="Categories",
value="general",
placeholder="e.g., general, news, science",
lines=1
)
num_results = gr.Slider(
minimum=1,
maximum=20,
value=10,
step=1,
label="Number of Results"
)
use_trafilatura = gr.Checkbox(label="Use Trafilatura for extraction (instead of BeautifulSoup)")
time_range = gr.Dropdown(
choices=["", "day", "week", "month", "year"],
value="",
label="Time Range"
)
language = gr.Textbox(
label="Language",
value="en",
placeholder="e.g., en, fr, de",
lines=1
)
safesearch = gr.Slider(
minimum=0,
maximum=2,
value=0,
step=1,
label="SafeSearch (0: Off, 1: Moderate, 2: Strict)"
)
search_button = gr.Button("Search")
with gr.Column():
results = gr.Markdown("### Search Results will appear here...")
def perform_search(q, url, cats, num, use_traf, t_range, lang, safe):
return search_searx(q, instance_url=url, categories=cats, num_results=int(num),
use_trafilatura=use_traf, time_range=t_range, language=lang, safesearch=int(safe))
search_button.click(
perform_search,
inputs=[query, instance_url, categories, num_results, use_trafilatura, time_range, language, safesearch],
outputs=results
)
gr.Markdown(
"""
---
**Note:** This application uses SearXNG to fetch results from multiple sources while preserving your privacy.
It then attempts to extract content from the original sources, which may be subject to the terms of service of those websites.
"""
)
return demo
iface = create_gradio_interface()
if __name__ == "__main__":
iface.launch() |