|
from fastapi import FastAPI, HTTPException, Query |
|
from pydantic import BaseModel |
|
from playwright.async_api import async_playwright |
|
import asyncio |
|
import base64 |
|
import logging |
|
from typing import List, Optional |
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
logger = logging.getLogger(__name__) |
|
|
|
app = FastAPI(title="Playwright Web Scraper", description="A simple web scraper using Playwright") |
|
|
|
class LinkInfo(BaseModel): |
|
text: str |
|
href: str |
|
|
|
class ScrapeResponse(BaseModel): |
|
body_content: Optional[str] = None |
|
screenshot: Optional[str] = None |
|
links: Optional[List[LinkInfo]] = None |
|
page_title: Optional[str] = None |
|
meta_description: Optional[str] = None |
|
|
|
@app.get("/") |
|
async def root(): |
|
return { |
|
"message": "Playwright Web Scraper API - Body, Links & Images", |
|
"endpoints": { |
|
"/scrape": "Scrape webpage body content, links, and take screenshot", |
|
"/docs": "API documentation" |
|
}, |
|
"example": "/scrape?url=https://example.com&screenshot=true&get_links=true&get_body=true", |
|
"features": [ |
|
"Extract body tag content (clean text)", |
|
"Get all links with text and URLs", |
|
"Take full page screenshot", |
|
"Extract page title and meta description" |
|
] |
|
} |
|
|
|
@app.get("/scrape") |
|
async def scrape_page( |
|
url: str = Query(..., description="URL to scrape"), |
|
screenshot: bool = Query(True, description="Take a full page screenshot"), |
|
get_links: bool = Query(True, description="Extract all links from the page"), |
|
get_body: bool = Query(True, description="Extract body tag content") |
|
): |
|
logger.info(f"Starting scrape for URL: {url}") |
|
try: |
|
async with async_playwright() as p: |
|
logger.info("Launching browser...") |
|
browser = await p.chromium.launch( |
|
headless=True, |
|
args=[ |
|
'--no-sandbox', |
|
'--disable-setuid-sandbox', |
|
'--disable-dev-shm-usage', |
|
'--disable-accelerated-2d-canvas', |
|
'--no-first-run', |
|
'--no-zygote', |
|
'--disable-gpu' |
|
] |
|
) |
|
page = await browser.new_page() |
|
|
|
try: |
|
logger.info(f"Navigating to {url}...") |
|
await page.goto(url, wait_until="networkidle") |
|
response = ScrapeResponse() |
|
|
|
|
|
logger.info("Getting page metadata...") |
|
response.page_title = await page.title() |
|
|
|
meta_desc = await page.evaluate(""" |
|
() => { |
|
const meta = document.querySelector('meta[name="description"]'); |
|
return meta ? meta.getAttribute('content') : null; |
|
} |
|
""") |
|
response.meta_description = meta_desc |
|
|
|
|
|
if get_body: |
|
logger.info("Extracting body content...") |
|
body_content = await page.evaluate(""" |
|
() => { |
|
const body = document.querySelector('body'); |
|
if (!body) return null; |
|
|
|
// Remove script and style elements |
|
const scripts = body.querySelectorAll('script, style, noscript'); |
|
scripts.forEach(el => el.remove()); |
|
|
|
// Get clean text content |
|
return body.innerText.trim(); |
|
} |
|
""") |
|
response.body_content = body_content |
|
|
|
|
|
if screenshot: |
|
logger.info("Taking full page screenshot...") |
|
screenshot_bytes = await page.screenshot(full_page=True) |
|
response.screenshot = base64.b64encode(screenshot_bytes).decode('utf-8') |
|
|
|
|
|
if get_links: |
|
logger.info("Extracting links...") |
|
links = await page.evaluate(""" |
|
() => { |
|
return Array.from(document.querySelectorAll('a[href]')).map(a => { |
|
const text = a.innerText.trim(); |
|
const href = a.href; |
|
|
|
// Only include links with meaningful text and valid URLs |
|
if (text && href && href.startsWith('http')) { |
|
return { |
|
text: text.substring(0, 200), // Limit text length |
|
href: href |
|
} |
|
} |
|
return null; |
|
}).filter(link => link !== null); |
|
} |
|
""") |
|
response.links = [LinkInfo(**link) for link in links] |
|
|
|
await browser.close() |
|
logger.info("Scraping completed successfully") |
|
return response |
|
|
|
except Exception as e: |
|
logger.error(f"Error during scraping: {str(e)}") |
|
await browser.close() |
|
raise HTTPException(status_code=500, detail=f"Scraping error: {str(e)}") |
|
|
|
except Exception as e: |
|
logger.error(f"Error launching browser: {str(e)}") |
|
raise HTTPException(status_code=500, detail=f"Browser launch error: {str(e)}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|