broadfield-dev commited on
Commit
2e45e88
·
verified ·
1 Parent(s): 085aa8e

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +207 -0
app.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.system("playwright install")
3
+ import re
4
+ import urllib.parse
5
+ import asyncio
6
+ from typing import Dict
7
+
8
+ import gradio as gr
9
+ from bs4 import BeautifulSoup, NavigableString
10
+ from playwright.async_api import async_playwright
11
+
12
+ # --- 1. GLOBAL RESOURCES & CONFIGURATION ---
13
+
14
+ # This dictionary will hold the long-lived Playwright and Browser objects
15
+ PLAYWRIGHT_STATE: Dict = {}
16
+
17
+ # EXPANDED: A comprehensive list of search engines
18
+ SEARCH_ENGINES = {
19
+ # Original
20
+ "DuckDuckGo": "https://duckduckgo.com/html/?q={query}",
21
+ "Google": "https://www.google.com/search?q={query}",
22
+ "Bing": "https://www.bing.com/search?q={query}",
23
+ "Brave": "https://search.brave.com/search?q={query}",
24
+ "Ecosia": "https://www.ecosia.org/search?q={query}",
25
+ # 10 More Added
26
+ "Yahoo": "https://search.yahoo.com/search?p={query}",
27
+ "Startpage": "https://www.startpage.com/sp/search?q={query}",
28
+ "Qwant": "https://www.qwant.com/?q={query}",
29
+ "Swisscows": "https://swisscows.com/web?query={query}",
30
+ "You.com": "https://you.com/search?q={query}",
31
+ "SearXNG": "https://searx.be/search?q={query}",
32
+ "MetaGer": "https://metager.org/meta/meta.ger-en?eingabe={query}",
33
+ "Yandex": "https://yandex.com/search/?text={query}",
34
+ "Baidu": "https://www.baidu.com/s?wd={query}",
35
+ "Perplexity": "https://www.perplexity.ai/search?q={query}"
36
+ }
37
+
38
+
39
+ # --- 2. ADVANCED HTML-TO-MARKDOWN CONVERTER (Unchanged) ---
40
+ class HTML_TO_MARKDOWN_CONVERTER:
41
+ # ... [The class code is identical to the previous version and remains unchanged] ...
42
+ def __init__(self, soup: BeautifulSoup, base_url: str):
43
+ self.soup = soup
44
+ self.base_url = base_url
45
+
46
+ def _cleanup_html(self):
47
+ selectors_to_remove = [
48
+ 'nav', 'footer', 'header', 'aside', 'form', 'script', 'style', 'svg', 'button', 'input', 'textarea',
49
+ '[role="navigation"]', '[role="search"]', '[id*="comment"]', '[class*="comment-"]',
50
+ '[id*="sidebar"]', '[class*="sidebar"]', '[id*="related"]', '[class*="related"]',
51
+ '[id*="share"]', '[class*="share"]', '[id*="social"]', '[class*="social"]',
52
+ '[id*="cookie"]', '[class*="cookie"]'
53
+ ]
54
+ for selector in selectors_to_remove:
55
+ for element in self.soup.select(selector):
56
+ element.decompose()
57
+
58
+ def convert(self):
59
+ self._cleanup_html()
60
+ content_node = self.soup.find('main') or self.soup.find('article') or self.soup.find('body')
61
+ if not content_node:
62
+ return "Could not find main content."
63
+ md = self._process_node(content_node)
64
+ return re.sub(r'\n{3,}', '\n\n', md).strip()
65
+
66
+ def _process_node(self, element):
67
+ if isinstance(element, NavigableString): return re.sub(r'\s+', ' ', element.strip())
68
+ if element.name is None or not element.name: return ''
69
+ inner_md = " ".join(self._process_node(child) for child in element.children).strip()
70
+ if element.name in ['p', 'div', 'section']: return f"\n\n{inner_md}\n\n"
71
+ if element.name == 'h1': return f"\n\n# {inner_md}\n\n"
72
+ if element.name == 'h2': return f"\n\n## {inner_md}\n\n"
73
+ if element.name == 'h3': return f"\n\n### {inner_md}\n\n"
74
+ if element.name in ['h4', 'h5', 'h6']: return f"\n\n#### {inner_md}\n\n"
75
+ if element.name == 'li': return f"* {inner_md}\n"
76
+ if element.name in ['ul', 'ol']: return f"\n{inner_md}\n"
77
+ if element.name == 'blockquote': return f"> {inner_md.replace(chr(10), chr(10) + '> ')}\n\n"
78
+ if element.name == 'hr': return "\n\n---\n\n"
79
+ if element.name == 'table':
80
+ header = " | ".join(f"**{th.get_text(strip=True)}**" for th in element.select('thead th, tr th'))
81
+ separator = " | ".join(['---'] * len(header.split('|')))
82
+ rows = [" | ".join(td.get_text(strip=True) for td in tr.find_all('td')) for tr in element.select('tbody tr')]
83
+ return f"\n\n{header}\n{separator}\n" + "\n".join(rows) + "\n\n"
84
+ if element.name == 'pre': return f"\n```\n{element.get_text(strip=True)}\n```\n\n"
85
+ if element.name == 'code': return f"`{inner_md}`"
86
+ if element.name in ['strong', 'b']: return f"**{inner_md}**"
87
+ if element.name in ['em', 'i']: return f"*{inner_md}*"
88
+ if element.name == 'a':
89
+ href = element.get('href', '')
90
+ full_href = urllib.parse.urljoin(self.base_url, href)
91
+ return f"[{inner_md}]({full_href})"
92
+ if element.name == 'img':
93
+ src = element.get('src', '')
94
+ alt = element.get('alt', 'Image').strip()
95
+ full_src = urllib.parse.urljoin(self.base_url, src)
96
+ return f"\n\n![{alt}]({full_src})\n\n"
97
+ return inner_md
98
+
99
+
100
+ # --- 3. CORE API FUNCTION ---
101
+
102
+ async def initialize_playwright():
103
+ """Launches Playwright and browser instances if they don't already exist."""
104
+ if "playwright" not in PLAYWRIGHT_STATE:
105
+ print("🚀 First request received, starting up Playwright...")
106
+ p = await async_playwright().start()
107
+ PLAYWRIGHT_STATE["playwright"] = p
108
+ PLAYWRIGHT_STATE["chromium"] = await p.chromium.launch(headless=True)
109
+ PLAYWRIGHT_STATE["firefox"] = await p.firefox.launch(headless=True)
110
+ PLAYWRIGHT_STATE["webkit"] = await p.webkit.launch(headless=True)
111
+ print("✅ Playwright and browsers are ready.")
112
+
113
+ async def perform_web_browse(query: str, browser_name: str, search_engine: str):
114
+ """
115
+ A stateless function that takes a query, browser, and search engine,
116
+ then returns the parsed content of the resulting page.
117
+ """
118
+ await initialize_playwright()
119
+
120
+ # Determine if the query is a URL or a search term
121
+ is_url = urllib.parse.urlparse(query).scheme in ['http', 'https']
122
+ if is_url:
123
+ url = query
124
+ else:
125
+ search_url_template = SEARCH_ENGINES.get(search_engine)
126
+ if not search_url_template:
127
+ return {"error": f"Invalid search engine: '{search_engine}'. Please choose from the provided list."}
128
+ url = search_url_template.format(query=urllib.parse.quote_plus(query))
129
+
130
+ browser_instance = PLAYWRIGHT_STATE.get(browser_name.lower())
131
+ if not browser_instance:
132
+ return {"error": f"Invalid browser: '{browser_name}'. Use 'chromium', 'firefox', or 'webkit'."}
133
+
134
+ context = await browser_instance.new_context(user_agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36')
135
+ page = await context.new_page()
136
+
137
+ try:
138
+ print(f"Navigating to: {url} using {browser_name}...")
139
+ await page.goto(url, wait_until='domcontentloaded', timeout=30000)
140
+
141
+ final_url = page.url
142
+ title = await page.title() or "No Title"
143
+ print(f"Arrived at: {final_url}")
144
+
145
+ html_content = await page.content()
146
+ soup = BeautifulSoup(html_content, 'lxml')
147
+
148
+ converter = HTML_TO_MARKDOWN_CONVERTER(soup, base_url=final_url)
149
+ markdown_text = converter.convert()
150
+
151
+ print("Content parsed successfully.")
152
+ return {
153
+ "status": "success",
154
+ "query": query,
155
+ "final_url": final_url,
156
+ "page_title": title,
157
+ "markdown_content": markdown_text,
158
+ }
159
+ except Exception as e:
160
+ error_message = str(e).splitlines()[0]
161
+ print(f"An error occurred: {error_message}")
162
+ return {"status": "error", "query": query, "error_message": error_message}
163
+ finally:
164
+ if page: await page.close()
165
+ if context: await context.close()
166
+ print("Session context closed.")
167
+
168
+
169
+ # --- 4. GRADIO INTERFACE & API LAUNCH ---
170
+
171
+ with gr.Blocks(title="Web Browse API", theme=gr.themes.Soft()) as demo:
172
+ gr.Markdown("# Web Browse API")
173
+ gr.Markdown(
174
+ "This interface exposes a stateless API endpoint (`/api/web_browse`) to fetch and parse web content."
175
+ )
176
+
177
+ query_input = gr.Textbox(
178
+ label="URL or Search Query",
179
+ placeholder="e.g., https://openai.com or 'history of artificial intelligence'"
180
+ )
181
+
182
+ with gr.Row():
183
+ browser_input = gr.Dropdown(
184
+ label="Browser",
185
+ choices=["firefox", "chromium", "webkit"],
186
+ value="firefox",
187
+ scale=1
188
+ )
189
+ search_engine_input = gr.Dropdown(
190
+ label="Search Engine (for non-URL queries)",
191
+ choices=sorted(list(SEARCH_ENGINES.keys())),
192
+ value="DuckDuckGo",
193
+ scale=2
194
+ )
195
+
196
+ submit_button = gr.Button("Browse", variant="primary")
197
+ output_json = gr.JSON(label="API Result")
198
+
199
+ submit_button.click(
200
+ fn=perform_web_browse,
201
+ inputs=[query_input, browser_input, search_engine_input],
202
+ outputs=output_json,
203
+ api_name="web_browse" # Creates the POST /api/web_browse endpoint
204
+ )
205
+
206
+ if __name__ == "__main__":
207
+ demo.launch()