Update main.py
Browse files
main.py
CHANGED
@@ -58,27 +58,31 @@ def duckduckgo_search(query: str):
|
|
58 |
# ========== OPDS Feed Generator ==========
|
59 |
def create_feed(entries: list, q: Optional[str]) -> bytes:
|
60 |
ns = "http://www.w3.org/2005/Atom"
|
|
|
61 |
ET.register_namespace("", ns)
|
|
|
|
|
62 |
feed = ET.Element("feed", xmlns=ns)
|
63 |
ET.SubElement(feed, "id").text = "urn:uuid:duckopds-catalog"
|
64 |
ET.SubElement(feed, "title").text = "DuckDuckGo OPDS Catalog"
|
65 |
ET.SubElement(feed, "updated").text = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
|
66 |
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
# Add entries
|
76 |
for entry_info in entries:
|
77 |
entry = ET.SubElement(feed, "entry")
|
78 |
ET.SubElement(entry, "id").text = entry_info['id']
|
79 |
ET.SubElement(entry, "title").text = entry_info['title']
|
80 |
ET.SubElement(entry, "updated").text = entry_info['updated']
|
81 |
ET.SubElement(entry, "link", entry_info['link'])
|
|
|
|
|
82 |
|
83 |
return ET.tostring(feed, encoding="utf-8", xml_declaration=True)
|
84 |
|
@@ -86,7 +90,7 @@ def create_feed(entries: list, q: Optional[str]) -> bytes:
|
|
86 |
@app.get("/opds")
|
87 |
def opds(q: Optional[str] = Query(None, description="Search query")) -> Response:
|
88 |
entries = []
|
89 |
-
kind = "
|
90 |
if q:
|
91 |
results = duckduckgo_search(q)
|
92 |
for title, url in results:
|
@@ -100,16 +104,14 @@ def opds(q: Optional[str] = Query(None, description="Search query")) -> Response
|
|
100 |
'type': 'application/fb2+xml'
|
101 |
}
|
102 |
})
|
103 |
-
kind = "acquisition"
|
104 |
xml_data = create_feed(entries, q)
|
105 |
return Response(content=xml_data,
|
106 |
-
media_type=
|
107 |
|
108 |
@app.get("/download")
|
109 |
def download_fb2(url: str) -> Response:
|
110 |
res = requests.get(url, headers={"User-Agent": "Mozilla/5.0"}, timeout=10)
|
111 |
res.raise_for_status()
|
112 |
-
from bs4 import BeautifulSoup
|
113 |
soup = BeautifulSoup(res.text, "html.parser")
|
114 |
title = soup.title.string.strip() if soup.title and soup.title.string else "article"
|
115 |
fb2 = html_to_fb2(title, str(soup.body))
|
|
|
58 |
# ========== OPDS Feed Generator ==========
|
59 |
def create_feed(entries: list, q: Optional[str]) -> bytes:
|
60 |
ns = "http://www.w3.org/2005/Atom"
|
61 |
+
opds_ns = "http://opds-spec.org/2010/catalog"
|
62 |
ET.register_namespace("", ns)
|
63 |
+
ET.register_namespace("opds", opds_ns)
|
64 |
+
|
65 |
feed = ET.Element("feed", xmlns=ns)
|
66 |
ET.SubElement(feed, "id").text = "urn:uuid:duckopds-catalog"
|
67 |
ET.SubElement(feed, "title").text = "DuckDuckGo OPDS Catalog"
|
68 |
ET.SubElement(feed, "updated").text = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
|
69 |
|
70 |
+
if not q:
|
71 |
+
ET.SubElement(feed, "link", {
|
72 |
+
"rel": "search",
|
73 |
+
"type": "application/atom+xml",
|
74 |
+
"href": "/opds?q={searchTerms}",
|
75 |
+
"templated": "true"
|
76 |
+
})
|
77 |
+
|
|
|
78 |
for entry_info in entries:
|
79 |
entry = ET.SubElement(feed, "entry")
|
80 |
ET.SubElement(entry, "id").text = entry_info['id']
|
81 |
ET.SubElement(entry, "title").text = entry_info['title']
|
82 |
ET.SubElement(entry, "updated").text = entry_info['updated']
|
83 |
ET.SubElement(entry, "link", entry_info['link'])
|
84 |
+
ET.SubElement(entry, "content", {"type": "text"}).text = entry_info['title']
|
85 |
+
ET.SubElement(entry, "author").append(ET.Element("name", text="DuckOPDS"))
|
86 |
|
87 |
return ET.tostring(feed, encoding="utf-8", xml_declaration=True)
|
88 |
|
|
|
90 |
@app.get("/opds")
|
91 |
def opds(q: Optional[str] = Query(None, description="Search query")) -> Response:
|
92 |
entries = []
|
93 |
+
kind = "acquisition"
|
94 |
if q:
|
95 |
results = duckduckgo_search(q)
|
96 |
for title, url in results:
|
|
|
104 |
'type': 'application/fb2+xml'
|
105 |
}
|
106 |
})
|
|
|
107 |
xml_data = create_feed(entries, q)
|
108 |
return Response(content=xml_data,
|
109 |
+
media_type="application/atom+xml;charset=utf-8")
|
110 |
|
111 |
@app.get("/download")
|
112 |
def download_fb2(url: str) -> Response:
|
113 |
res = requests.get(url, headers={"User-Agent": "Mozilla/5.0"}, timeout=10)
|
114 |
res.raise_for_status()
|
|
|
115 |
soup = BeautifulSoup(res.text, "html.parser")
|
116 |
title = soup.title.string.strip() if soup.title and soup.title.string else "article"
|
117 |
fb2 = html_to_fb2(title, str(soup.body))
|