Create duckopds/main.py
Browse files- duckopds/main.py +81 -0
duckopds/main.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# duckopds/main.py
|
2 |
+
|
3 |
+
from fastapi import FastAPI, Query, Response
|
4 |
+
from fastapi.responses import XMLResponse
|
5 |
+
import requests
|
6 |
+
from bs4 import BeautifulSoup
|
7 |
+
from urllib.parse import urlencode, quote
|
8 |
+
import xml.etree.ElementTree as ET
|
9 |
+
import re
|
10 |
+
|
11 |
+
app = FastAPI()
|
12 |
+
|
13 |
+
# ========== FB2 Generator ==========
|
14 |
+
def html_to_fb2(title: str, body: str) -> str:
|
15 |
+
clean_text = BeautifulSoup(body, "html.parser").get_text()
|
16 |
+
fb2 = f"""<?xml version='1.0' encoding='utf-8'?>
|
17 |
+
<FictionBook xmlns:xlink='http://www.w3.org/1999/xlink'>
|
18 |
+
<description>
|
19 |
+
<title-info>
|
20 |
+
<genre>nonfiction</genre>
|
21 |
+
<author><first-name>OPDS</first-name><last-name>DuckScraper</last-name></author>
|
22 |
+
<book-title>{title}</book-title>
|
23 |
+
<lang>en</lang>
|
24 |
+
</title-info>
|
25 |
+
</description>
|
26 |
+
<body>
|
27 |
+
<section><title><p>{title}</p></title><p>{clean_text}</p></section>
|
28 |
+
</body>
|
29 |
+
</FictionBook>
|
30 |
+
"""
|
31 |
+
return fb2
|
32 |
+
|
33 |
+
# ========== DuckDuckGo Search ==========
|
34 |
+
def duckduckgo_search(query: str):
|
35 |
+
res = requests.get("https://html.duckduckgo.com/html/", data={"q": query}, headers={"User-Agent": "Mozilla/5.0"})
|
36 |
+
soup = BeautifulSoup(res.text, "html.parser")
|
37 |
+
results = []
|
38 |
+
for a in soup.select("a.result__a"):
|
39 |
+
href = a.get("href")
|
40 |
+
title = a.get_text()
|
41 |
+
if href and title:
|
42 |
+
results.append((title.strip(), href))
|
43 |
+
if len(results) >= 10:
|
44 |
+
break
|
45 |
+
return results
|
46 |
+
|
47 |
+
# ========== OPDS Feed ==========
|
48 |
+
def generate_opds(query: str, results):
|
49 |
+
ns = "http://www.w3.org/2005/Atom"
|
50 |
+
ET.register_namespace("", ns)
|
51 |
+
feed = ET.Element("feed", xmlns=ns)
|
52 |
+
ET.SubElement(feed, "title").text = f"Search results for '{query}'"
|
53 |
+
for title, url in results:
|
54 |
+
entry = ET.SubElement(feed, "entry")
|
55 |
+
ET.SubElement(entry, "title").text = title
|
56 |
+
ET.SubElement(entry, "id").text = url
|
57 |
+
ET.SubElement(entry, "updated").text = "2025-07-31T12:00:00Z"
|
58 |
+
ET.SubElement(entry, "link", {
|
59 |
+
"rel": "http://opds-spec.org/acquisition",
|
60 |
+
"href": f"/download?url={quote(url)}",
|
61 |
+
"type": "application/fb2+xml"
|
62 |
+
})
|
63 |
+
return ET.tostring(feed, encoding="utf-8", xml_declaration=True)
|
64 |
+
|
65 |
+
@app.get("/opds", response_class=XMLResponse)
|
66 |
+
def opds_catalog(q: str = Query(..., description="Search query")):
|
67 |
+
results = duckduckgo_search(q)
|
68 |
+
xml_data = generate_opds(q, results)
|
69 |
+
return Response(content=xml_data, media_type="application/atom+xml")
|
70 |
+
|
71 |
+
@app.get("/download")
|
72 |
+
def download_fb2(url: str):
|
73 |
+
try:
|
74 |
+
res = requests.get(url, headers={"User-Agent": "Mozilla/5.0"}, timeout=10)
|
75 |
+
soup = BeautifulSoup(res.text, "html.parser")
|
76 |
+
title = soup.title.string if soup.title else "No Title"
|
77 |
+
body = str(soup.body)
|
78 |
+
fb2 = html_to_fb2(title, body)
|
79 |
+
return Response(content=fb2, media_type="application/fb2+xml", headers={"Content-Disposition": f"attachment; filename={re.sub(r'[^a-zA-Z0-9]+', '_', title)[:30]}.fb2"})
|
80 |
+
except Exception as e:
|
81 |
+
return {"error": str(e)}
|