Spaces:
Running
Running
File size: 5,320 Bytes
4051191 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 |
import re
from dataclasses import dataclass
from typing import List, AsyncIterable
from urllib.parse import urlparse, urljoin, quote, quote_plus
from bs4 import BeautifulSoup
from models import LastChapter
from plugins.client import MangaClient, MangaCard, MangaChapter
@dataclass
class MangaBuddyCard(MangaCard):
read_url: str
def get_url(self):
return self.read_url
class MangaBuddyClient(MangaClient):
base_url = urlparse("https://mangabuddy.com/")
search_url = urljoin(base_url.geturl(), "search")
search_param = 'q'
home_page = urljoin(base_url.geturl(), "home-page")
img_server = "https://s1.mbbcdnv1.xyz/file/img-mbuddy/manga/"
pre_headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:97.0) Gecko/20100101 Firefox/97.0'
}
def __init__(self, *args, name="MangaBuddy", **kwargs):
super().__init__(*args, name=name, headers=self.pre_headers, **kwargs)
def mangas_from_page(self, page: bytes):
bs = BeautifulSoup(page, "html.parser")
cards = bs.find_all("div", {"class": "book-item"})
mangas = [card.a for card in cards if card.a is not None]
names = [manga.get("title").strip() for manga in mangas]
read_url = [urljoin(self.base_url.geturl(), manga.get('href').strip()) for manga in mangas]
url = [f'https://mangabuddy.com/api/manga{manga.get("href").strip()}/chapters?source=detail' for manga in mangas]
images = [manga.find("img").get('data-src').strip() for manga in mangas]
mangas = [MangaBuddyCard(self, *tup) for tup in zip(names, url, images, read_url)]
return mangas
def chapters_from_page(self, page: bytes, manga: MangaCard = None):
bs = BeautifulSoup(page, "html.parser")
ul = bs.find('ul', {'id': 'chapter-list'})
lis = ul.findAll('li')
a_elems = [li.find('a') for li in lis]
links = [urljoin(self.base_url.geturl(), a.get('href')) for a in a_elems]
texts = [a.findNext('strong', {'class': 'chapter-title'}).text.strip() for a in a_elems]
return list(map(lambda x: MangaChapter(self, x[0], x[1], manga, []), zip(texts, links)))
def updates_from_page(self, page: bytes):
bs = BeautifulSoup(page, "html.parser")
div = bs.find('div', {'class': 'container__left'})
manga_items = div.findAll('div', {'class': 'book-item'})
urls = dict()
for manga_item in manga_items:
manga_url_part = manga_item.findNext('a').get('href')
manga_url = f'https://mangabuddy.com/api/manga{manga_url_part}/chapters?source=detail'
chapter_item = manga_item.findNext("div", {"class": "chap-item"})
if not chapter_item or not chapter_item.a:
continue
chapter_url = urljoin(self.base_url.geturl(), chapter_item.a.get('href'))
if manga_url not in urls:
urls[manga_url] = chapter_url
return urls
async def pictures_from_chapters(self, content: bytes, response=None):
regex = rb"var chapImages = '(.*)'"
imgs = re.findall(regex, content)[0].decode().split(',')
images_url = [img for img in imgs]
return images_url
async def search(self, query: str = "", page: int = 1) -> List[MangaCard]:
request_url = self.search_url
if query:
request_url = f'{request_url}?{self.search_param}={quote_plus(query)}'
content = await self.get_url(request_url)
return self.mangas_from_page(content)[(page - 1) * 20:page * 20]
async def get_chapters(self, manga_card: MangaCard, page: int = 1) -> List[MangaChapter]:
request_url = f'{manga_card.url}'
content = await self.get_url(request_url)
return self.chapters_from_page(content, manga_card)[(page - 1) * 20:page * 20]
async def iter_chapters(self, manga_url: str, manga_name) -> AsyncIterable[MangaChapter]:
manga_card = MangaCard(self, manga_name, manga_url, '')
request_url = f'{manga_card.url}'
content = await self.get_url(request_url)
for chapter in self.chapters_from_page(content, manga_card):
yield chapter
async def contains_url(self, url: str):
return url.startswith(self.base_url.geturl())
async def check_updated_urls(self, last_chapters: List[LastChapter]):
content = await self.get_url(self.home_page)
updates = self.updates_from_page(content)
updated = [lc.url for lc in last_chapters if updates.get(lc.url) and updates.get(lc.url) != lc.chapter_url]
not_updated = [lc.url for lc in last_chapters if not updates.get(lc.url)
or updates.get(lc.url) == lc.chapter_url]
return updated, not_updated
async def get_cover(self, manga_card: MangaCard, *args, **kwargs):
headers = {**self.pre_headers, 'Referer': self.base_url.geturl()}
return await super(MangaBuddyClient, self).get_cover(manga_card, *args, headers=headers, **kwargs)
async def get_picture(self, manga_chapter: MangaChapter, url, *args, **kwargs):
headers = {**self.pre_headers, 'Referer': self.base_url.geturl()}
return await super(MangaBuddyClient, self).get_picture(manga_chapter, url, *args, headers=headers, **kwargs)
|