Spaces:
Running
Running
File size: 5,242 Bytes
4051191 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 |
from typing import List, AsyncIterable, Optional
from urllib.parse import urlparse, urljoin, quote, quote_plus
import aiohttp.http
from aiohttp import ClientResponse
from bs4 import BeautifulSoup
from plugins.client import MangaClient, MangaCard, MangaChapter, LastChapter
class NineMangaClient(MangaClient):
base_url = urlparse("https://www.ninemanga.com/")
search_param = 'wd'
query_param = 'waring=1'
pre_headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:97.0) Gecko/20100101 Firefox/97.0',
'Accept-Language': 'en-US,en;q=0.5',
}
def __init__(self, *args, name="NineManga", language=None, **kwargs):
if language is None:
language = 'en'
else:
self.base_url = urlparse(f"https://{language}.ninemanga.com/")
self.search_url = urljoin(self.base_url.geturl(), 'search/')
self.updates_url = self.base_url.geturl()
super().__init__(*args, name=name, headers=self.pre_headers, **kwargs)
def mangas_from_page(self, page: bytes):
bs = BeautifulSoup(page, "html.parser")
container = bs.find("ul", {"class": "direlist"})
cards = container.find_all("li")
mangas = [card.findNext('a', {'class': 'bookname'}) for card in cards]
names = [manga.string.strip().title() for manga in mangas]
url = [manga.get("href") for manga in mangas]
images = [card.findNext("img").get("src") for card in cards]
mangas = [MangaCard(self, *tup) for tup in zip(names, url, images)]
return mangas
def chapters_from_page(self, page: bytes, manga: MangaCard = None):
bs = BeautifulSoup(page, "html.parser")
container = bs.find("div", {"class": "chapterbox"})
lis = container.find_all("li")
items = [li.findNext('a') for li in lis]
links = [item.get("href") for item in items]
texts = [item.get("title").strip() for item in items]
return list(map(lambda x: MangaChapter(self, x[0], x[1], manga, []), zip(texts, links)))
def updates_from_page(self, content):
bs = BeautifulSoup(content, "html.parser")
container = bs.find("ul", {"class": "homeupdate"})
manga_items = container.find_all("li")
urls = dict()
for manga_item in manga_items[:20]:
manga_url = manga_item.findNext("a").get("href")
if manga_url in urls:
continue
chapter_url = manga_item.findNext("dl").findNext("a").get("href")
urls[manga_url] = chapter_url
return urls
async def pictures_from_chapters(self, content: bytes, response: Optional[ClientResponse] = None):
bs = BeautifulSoup(content, "html.parser")
container = bs.find("select", {"id": "page"})
options = container.find_all("option")
count = 10
total = len(options)
pages = (total - 1) // count
images_url = []
for page in range(pages):
url = f'{str(response.url)[:-5]}-{count}-{(page + 1)}.html'
content = await self.get_url(url)
bs = BeautifulSoup(content, "html.parser")
images_url += [img.get("src") for img in bs.find_all("img", {"class": "manga_pic"})]
return images_url
async def search(self, query: str = "", page: int = 1) -> List[MangaCard]:
query = quote_plus(query)
request_url = self.search_url
if query:
request_url += f'?{self.search_param}={query}'
content = await self.get_url(request_url)
return self.mangas_from_page(content)
async def get_chapters(self, manga_card: MangaCard, page: int = 1) -> List[MangaChapter]:
request_url = f'{manga_card.url}?{self.query_param}'
content = await self.get_url(request_url)
return self.chapters_from_page(content, manga_card)[(page - 1) * 20:page * 20]
async def iter_chapters(self, manga_url: str, manga_name) -> AsyncIterable[MangaChapter]:
manga_card = MangaCard(self, manga_name, manga_url, '')
request_url = f'{manga_card.url}?{self.query_param}'
content = await self.get_url(request_url)
for chapter in self.chapters_from_page(content, manga_card):
yield chapter
async def contains_url(self, url: str):
return url.startswith(self.base_url.geturl())
@staticmethod
def get_chapter_number_from_url(url):
if url.endswith('/'):
url = url[:-1]
if url.endswith('.html'):
url = url[:-5]
return url.split('/')[-1]
async def check_updated_urls(self, last_chapters: List[LastChapter]):
content = await self.get_url(self.updates_url)
updates = self.updates_from_page(content)
updated = [lc.url for lc in last_chapters if updates.get(lc.url) and
self.get_chapter_number_from_url(updates.get(lc.url)) != self.get_chapter_number_from_url(lc.chapter_url)]
not_updated = [lc.url for lc in last_chapters if not updates.get(lc.url) or
self.get_chapter_number_from_url(updates.get(lc.url)) == self.get_chapter_number_from_url(lc.chapter_url)]
return updated, not_updated
|