# File: main/app.py # Purpose: One Space that offers three tools/tabs: # 1) Fetch — extract relevant page content (title, metadata, clean text, hyperlinks) # 2) DuckDuckGo Search — compact JSONL search output (short keys to minimize tokens) # 3) Python Code Executor — run Python code and capture stdout/errors from __future__ import annotations import re import json import sys from io import StringIO from typing import List, Dict, Tuple import gradio as gr import requests from bs4 import BeautifulSoup from readability import Document from urllib.parse import urljoin, urldefrag, urlparse from duckduckgo_search import DDGS # ============================== # Fetch: HTTP + extraction utils # ============================== def _http_get(url: str) -> requests.Response: """ Download the page politely with a short timeout and realistic headers. (Layman's terms: grab the web page like a normal browser would, but quickly.) """ headers = { "User-Agent": "Mozilla/5.0 (compatible; WebMCP/1.0; +https://example.com)", "Accept-Language": "en-US,en;q=0.9", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", } return requests.get(url, headers=headers, timeout=15) def _normalize_whitespace(text: str) -> str: """ Squeeze extra spaces and blank lines to keep things compact. (Layman's terms: tidy up the text so it’s not full of weird spacing.) """ text = re.sub(r"[ \t\u00A0]+", " ", text) text = re.sub(r"\n\s*\n\s*\n+", "\n\n", text.strip()) return text.strip() def _truncate(text: str, max_chars: int) -> Tuple[str, bool]: """ Cut text if it gets too long; return the text and whether we trimmed. (Layman's terms: shorten long text and tell us if we had to cut it.) """ if max_chars is None or max_chars <= 0 or len(text) <= max_chars: return text, False return text[:max_chars].rstrip() + " …", True def _shorten(text: str, limit: int) -> str: """ Hard cap a string with an ellipsis to keep tokens small. (Layman's terms: force a string to a max length with an ellipsis.) """ if limit <= 0 or len(text) <= limit: return text return text[: max(0, limit - 1)].rstrip() + "…" def _domain_of(url: str) -> str: """ Show a friendly site name like "example.com". (Layman's terms: pull the website's domain.) """ try: return urlparse(url).netloc or "" except Exception: return "" def _meta(soup: BeautifulSoup, name: str) -> str | None: tag = soup.find("meta", attrs={"name": name}) return tag.get("content") if tag and tag.has_attr("content") else None def _og(soup: BeautifulSoup, prop: str) -> str | None: tag = soup.find("meta", attrs={"property": prop}) return tag.get("content") if tag and tag.has_attr("content") else None def _extract_metadata(soup: BeautifulSoup, final_url: str) -> Dict[str, str]: """ Pull the useful bits: title, description, site name, canonical URL, language, etc. (Layman's terms: gather page basics like title/description/address.) """ meta: Dict[str, str] = {} # Title preference: > og:title > twitter:title title_candidates = [ (soup.title.string if soup.title and soup.title.string else None), _og(soup, "og:title"), _meta(soup, "twitter:title"), ] meta["title"] = next((t.strip() for t in title_candidates if t and t.strip()), "") # Description preference: description > og:description > twitter:description desc_candidates = [ _meta(soup, "description"), _og(soup, "og:description"), _meta(soup, "twitter:description"), ] meta["description"] = next((d.strip() for d in desc_candidates if d and d.strip()), "") # Canonical link (helps dedupe) link_canonical = soup.find("link", rel=lambda v: v and "canonical" in v) meta["canonical"] = (link_canonical.get("href") or "").strip() if link_canonical else "" # Site name + language info if present meta["site_name"] = (_og(soup, "og:site_name") or "").strip() html_tag = soup.find("html") meta["lang"] = (html_tag.get("lang") or "").strip() if html_tag else "" # Final URL + domain meta["fetched_url"] = final_url meta["domain"] = _domain_of(final_url) return meta def _extract_main_text(html: str) -> Tuple[str, BeautifulSoup]: """ Use Readability to isolate the main article and turn it into clean text. Returns (clean_text, soup_of_readable_html). (Layman's terms: find the real article text and clean it.) """ # Simplified article HTML from Readability doc = Document(html) readable_html = doc.summary(html_partial=True) # Parse simplified HTML s = BeautifulSoup(readable_html, "lxml") # Remove noisy tags for sel in ["script", "style", "noscript", "iframe", "svg"]: for tag in s.select(sel): tag.decompose() # Keep paragraphs, list items, and subheadings for structure without bloat text_parts: List[str] = [] for p in s.find_all(["p", "li", "h2", "h3", "h4", "blockquote"]): chunk = p.get_text(" ", strip=True) if chunk: text_parts.append(chunk) clean_text = _normalize_whitespace("\n\n".join(text_parts)) return clean_text, s def _extract_links(readable_soup: BeautifulSoup, base_url: str, max_links: int) -> List[Tuple[str, str]]: """ Collect clean, unique, absolute links from the readable section only. (Layman's terms: pull a tidy list of links from the article body.) """ seen = set() links: List[Tuple[str, str]] = [] for a in readable_soup.find_all("a", href=True): href = a.get("href").strip() # Skip junk links we can't use if not href or href.startswith("#") or href.startswith("mailto:") or href.startswith("javascript:"): continue # Resolve relative URLs, strip fragments (#…) absolute = urljoin(base_url, href) absolute, _ = urldefrag(absolute) if absolute in seen: continue seen.add(absolute) text = a.get_text(" ", strip=True) if len(text) > 120: text = text[:117] + "…" links.append((text or absolute, absolute)) if len(links) >= max_links > 0: break return links def _format_markdown( meta: Dict[str, str], body: str, body_truncated: bool, links: List[Tuple[str, str]], include_text: bool, include_metadata: bool, include_links: bool, verbosity: str, ) -> str: """ Assemble a compact Markdown summary with optional sections. (Layman's terms: build the final markdown output with options.) """ lines: List[str] = [] # Title header title = meta.get("title") or meta.get("domain") or "Untitled" lines.append(f"# {title}") # Metadata section (only show what exists) if include_metadata: md: List[str] = [] if meta.get("description"): md.append(f"- **Description:** {meta['description']}") if meta.get("site_name"): md.append(f"- **Site:** {meta['site_name']}") if meta.get("canonical"): md.append(f"- **Canonical:** {meta['canonical']}") if meta.get("lang"): md.append(f"- **Language:** {meta['lang']}") if meta.get("fetched_url"): md.append(f"- **Fetched From:** {meta['fetched_url']}") if md: lines.append("## Metadata") lines.extend(md) # Body text if include_text and body: if verbosity == "Brief": brief, was_more = _truncate(body, 800) lines.append("## Text") lines.append(brief) if was_more or body_truncated: lines.append("\n> (Trimmed for brevity)") else: lines.append("## Text") lines.append(body) if body_truncated: lines.append("\n> (Trimmed for brevity)") # Links section if include_links and links: lines.append(f"## Links ({len(links)})") for text, url in links: lines.append(f"- [{text}]({url})") return "\n\n".join(lines).strip() def Fetch_Webpage( # <-- MCP tool #1 (Fetch) url: str, verbosity: str = "Standard", include_metadata: bool = True, include_text: bool = True, include_links: bool = True, max_chars: int = 3000, max_links: int = 20, ) -> str: """ Fetch a web page and return a compact Markdown summary containing title, key metadata, readable main text, and outbound links. This function powers the MCP tool "Fetch_Webpage". Descriptions below are provided explicitly for MCP clients that surface parameter help. Args: url (str): The absolute URL to fetch. Must return HTML. Example: "https://example.com/article". verbosity (str, optional): Controls how much body text to include. One of: - "Brief" (about 800–1200 chars) - "Standard" (about 3000 chars) - "Full" (no practical cap apart from max_chars) Default: "Standard". include_metadata (bool, optional): If True, include a Metadata section with description, site name, canonical URL, language, and fetched URL. Default: True. include_text (bool, optional): If True, include the readable main text extracted with Readability. Default: True. include_links (bool, optional): If True, include outbound links discovered in the readable section of the page. Default: True. max_chars (int, optional): Hard maximum number of characters for the body text, applied after the verbosity preset. Use 0 or a negative value to disable the cap (the verbosity preset will still apply). Default: 3000. max_links (int, optional): Maximum number of links to include. Set 0 to omit links even if include_links=True. Default: 20. Returns: str: Markdown that may contain the following sections: - Title (H1) - Metadata (optional) - Text (optional, may be trimmed) - Links (optional, deduped and absolute) Notes: - Only HTML content types are supported. Non-HTML responses return a short message indicating the unsupported content type. - Link text is shortened to keep tokens small; URLs are absolute and fragments are removed. """ if not url or not url.strip(): return "Please enter a valid URL." try: resp = _http_get(url) resp.raise_for_status() except requests.exceptions.RequestException as e: return f"An error occurred: {e}" final_url = str(resp.url) ctype = resp.headers.get("Content-Type", "") if "html" not in ctype.lower(): return f"Unsupported content type for extraction: {ctype or 'unknown'}" # Decode to text resp.encoding = resp.encoding or resp.apparent_encoding html = resp.text # Full-page soup for metadata full_soup = BeautifulSoup(html, "lxml") meta = _extract_metadata(full_soup, final_url) # Readable content body_text, readable_soup = _extract_main_text(html) if not body_text: # Fallback to "whole-page text" if Readability found nothing fallback_text = full_soup.get_text(" ", strip=True) body_text = _normalize_whitespace(fallback_text) # Verbosity presets (we keep the smaller of preset vs. user cap) preset_caps = {"Brief": 1200, "Standard": 3000, "Full": 999_999} target_cap = preset_caps.get(verbosity, 3000) cap = min(max_chars if max_chars > 0 else target_cap, target_cap) body_text, truncated = _truncate(body_text, cap) if include_text else ("", False) # Extract links from the simplified content only links = _extract_links(readable_soup, final_url, max_links=max_links if include_links else 0) # Final compact Markdown md = _format_markdown( meta=meta, body=body_text, body_truncated=truncated, links=links, include_text=include_text, include_metadata=include_metadata, include_links=include_links, verbosity=verbosity, ) return md or "No content could be extracted." # =============================== # DuckDuckGo Search (JSONL lines) # =============================== # ============================================ # Concise DDG: ultra-succinct JSONL for tokens # ============================================ def Search_DuckDuckGo( # <-- MCP tool #2 (DDG Search) query: str, max_results: int = 5, include_snippets: bool = False, max_snippet_chars: int = 80, dedupe_domains: bool = True, title_chars: int = 80, ) -> str: """ Run a DuckDuckGo search and return ultra-compact JSONL with short keys to minimize tokens. This function powers the MCP tool "Search_DuckDuckGo". Each line in the output is a JSON object using short keys to reduce token usage. Args: query (str): The search query. Examples: "langchain streaming" or "vector database site:docs.pinecone.io". max_results (int, optional): Maximum number of results to return. Range: 1–20. Default: 5. include_snippets (bool, optional): If True, includes a short snippet for each result (key "s"). This increases tokens. Default: False. max_snippet_chars (int, optional): Character cap applied to each snippet when include_snippets=True. Default: 80. dedupe_domains (bool, optional): If True, only the first result from each domain is kept. Default: True. title_chars (int, optional): Character cap applied to titles. Default: 80. Returns: str: Newline-delimited JSON (JSONL). Each line has: {"t": "title", "u": "url"[, "s": "snippet"]} Error Handling: On search errors, returns a single JSON object with an "error" key. """ if not query or not query.strip(): return "" try: with DDGS() as ddgs: raw = ddgs.text(query, max_results=max_results) except Exception as e: return json.dumps({"error": str(e)[:120]}, ensure_ascii=False, separators=(",", ":")) seen_domains = set() lines: List[str] = [] for r in raw or []: title = _shorten((r.get("title") or "").strip(), title_chars) url = (r.get("href") or r.get("link") or "").strip() body = (r.get("body") or r.get("snippet") or "").strip() if not url: continue if dedupe_domains: dom = _domain_of(url) if dom in seen_domains: continue seen_domains.add(dom) obj = {"t": title or _domain_of(url), "u": url} if include_snippets and body: obj["s"] = _shorten(body, max_snippet_chars) # Emit most compact JSON possible (no spaces) lines.append(json.dumps(obj, ensure_ascii=False, separators=(",", ":"))) # Join as JSONL (each result on its own line) return "\n".join(lines) # ====================================== # Code Execution: Python (MCP tool #6) # ====================================== def Execute_Python(code: str) -> str: """ Execute arbitrary Python code and return captured stdout or an error message. This function powers the MCP tool "Execute_Python" and mirrors the standalone code interpreter behavior used in the UI tab. Args: code (str): The Python source code to run. The code executes in a single call with a fresh global scope. Prints are captured and returned. Returns: str: Combined stdout produced by the code, or the exception text if execution failed. Security: The code executes within the current Python process. Do not run untrusted code in environments where it could access sensitive data. """ if code is None: return "No code provided." old_stdout = sys.stdout redirected_output = sys.stdout = StringIO() try: exec(code) return redirected_output.getvalue() except Exception as e: return str(e) finally: sys.stdout = old_stdout # ====================== # UI: three-tab interface # ====================== # --- Fetch tab (compact controllable extraction) --- fetch_interface = gr.Interface( fn=Fetch_Webpage, # connect the function to the UI inputs=[ gr.Textbox(label="URL", placeholder="https://example.com/article"), gr.Dropdown(label="Verbosity", choices=["Brief", "Standard", "Full"], value="Standard"), gr.Checkbox(value=True, label="Include Metadata"), gr.Checkbox(value=True, label="Include Main Text"), gr.Checkbox(value=True, label="Include Links"), gr.Slider(400, 12000, value=3000, step=100, label="Max Characters (body text)"), gr.Slider(0, 100, value=20, step=1, label="Max Links"), ], outputs=gr.Markdown(label="Extracted Summary"), title="Fetch Webpage", description=( "<div style=\"text-align:center\">Extract title, key metadata, readable text, and links. No noisy HTML.</div>" ), api_description=( "Fetch a web page and return a compact Markdown summary with title, key " "metadata, readable body text, and outbound links. Parameters let you " "control verbosity, whether to include metadata/text/links, and limits " "for characters and number of links." ), allow_flagging="never", theme="Nymbo/Nymbo_Theme", ) # --- Concise DDG tab (JSONL with short keys, minimal tokens) --- concise_interface = gr.Interface( fn=Search_DuckDuckGo, inputs=[ gr.Textbox(label="Query", placeholder="topic OR site:example.com"), gr.Slider(minimum=1, maximum=20, value=5, step=1, label="Max results"), gr.Checkbox(value=False, label="Include snippets (adds tokens)"), gr.Slider(minimum=20, maximum=200, value=80, step=5, label="Max snippet chars"), gr.Checkbox(value=True, label="Dedupe by domain"), gr.Slider(minimum=20, maximum=120, value=80, step=5, label="Max title chars"), ], outputs=gr.Textbox(label="Results (JSONL)", interactive=False), title="DuckDuckGo Search", description=( "<div style=\"text-align:center\">Emits JSONL with short keys (t,u[,s]). Defaults avoid snippets and duplicate domains.</div>" ), api_description=( "Run a DuckDuckGo search and return newline-delimited JSON with short keys: " "t=title, u=url, optional s=snippet. Options control result count, " "snippet inclusion and length, domain deduping, and title length." ), allow_flagging="never", theme="Nymbo/Nymbo_Theme", submit_btn="Search", ) ## Removed Structured, Raw, and Sitemap tabs # --- Execute Python tab (simple code interpreter) --- code_interface = gr.Interface( fn=Execute_Python, inputs=gr.Code(label="Python Code", language="python"), outputs=gr.Textbox(label="Output"), title="Python Code Executor", description=( "<div style=\"text-align:center\">Execute Python code and see the output. This app is also an MCP server for LLMs.</div>" ), api_description=( "Execute arbitrary Python code and return captured stdout or an error message.\n\n" "Parameters:\n" "- code (string): The Python source code to run.\n\n" "Returns:\n" "- string: Combined stdout produced by the code, or the exception text if execution failed." ), allow_flagging="never", theme="Nymbo/Nymbo_Theme", ) # --- Combine all into a single app with tabs --- demo = gr.TabbedInterface( interface_list=[fetch_interface, concise_interface, code_interface], tab_names=[ "Fetch Webpage", "DuckDuckGo Search", "Python Code Executor", ], title="Tools MCP", theme="Nymbo/Nymbo_Theme", css=""" .gradio-container h1 { text-align: center; } .gradio-container h1::after { content: "Fetch Webpage | Search DuckDuckGo | Code Interpreter"; display: block; font-size: 1rem; font-weight: 500; opacity: 0.9; margin-top: 6px; } """, ) # Launch the UI and expose all functions as MCP tools in one server if __name__ == "__main__": demo.launch(mcp_server=True)