File size: 5,177 Bytes
4051191
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
import json
from typing import List, AsyncIterable
from urllib.parse import urlparse, urljoin, quote, quote_plus

from aiohttp import ClientResponse
from bs4 import BeautifulSoup
from bs4.element import PageElement

from plugins.client import MangaClient, MangaCard, MangaChapter, LastChapter


class MangatigreClient(MangaClient):

    base_url = urlparse("https://www.mangatigre.net/")
    search_url = urljoin(base_url.geturl(), 'mangas/search')
    manga_url = urljoin(base_url.geturl(), 'manga')
    img_url = urlparse("https://i2.mtcdn.xyz/")
    cover_url = urljoin(img_url.geturl(), "mangas")
    search_param = 'query'

    pre_headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:97.0) Gecko/20100101 Firefox/97.0'
    }

    def __init__(self, *args, name="Mangatigre", **kwargs):
        super().__init__(*args, name=name, headers=self.pre_headers, **kwargs)

    def mangas_from_page(self, page: bytes):
        dt = json.loads(page)
        mangas = dt['result']

        names = [manga.get('name') for manga in mangas]
        url = [f"{self.manga_url}/{manga.get('slug')}" for manga in mangas]
        images = [f"{self.cover_url}/{manga.get('image')}" for manga in mangas]

        mangas = [MangaCard(self, *tup) for tup in zip(names, url, images)]

        return mangas

    def chapters_from_page(self, page: bytes, manga: MangaCard = None):
        bs = BeautifulSoup(page, "html.parser")

        ul = bs.find('ul', {'class': 'list-unstyled'})
        lis = ul.find_all("li")

        items = [li.findNext('a') for li in lis]

        links = [item.get('href') for item in items]
        texts = [item.get('title').split(':')[0] for item in items]

        return list(map(lambda x: MangaChapter(self, x[0], x[1], manga, []), zip(texts, links)))

    def updates_from_page(self, page: bytes):
        bs = BeautifulSoup(page, "html.parser")

        manga_items: List[PageElement] = bs.find_all("article", {"class": "chapter-block"})

        urls = dict()

        for manga_item in manga_items:

            manga_url = manga_item.findNext('a').get('href')

            chapter_item = manga_item.findNext("div", {"class": "chapter"})
            chapter_url = chapter_item.findNext("a").get('href')

            urls[manga_url] = chapter_url

        return urls

    async def pictures_from_chapters(self, content: bytes, response: ClientResponse = None):
        bs = BeautifulSoup(content, "html.parser")

        btn = bs.find('button', {'data-read-type': 2})
        if btn:
            token = btn.get('data-token')

            data = {
                '_method': 'patch',
                '_token': token,
                'read_type': 2
            }

            content = await self.get_url(f'{response.url}/read-type', data=data, method='post')
            bs = BeautifulSoup(content, "html.parser")

        ul = bs.find("div", {"class": "display-zone"})

        images = ul.find_all('img')
        images = [f"https:{img.get('data-src') or img.get('src')}" for img in images]

        images_url = [quote(img, safe=':/%') for img in images]

        return images_url

    async def search(self, query: str = "", page: int = 1) -> List[MangaCard]:
        main_page = await self.get_url(self.base_url.geturl())

        bs = BeautifulSoup(main_page, "html.parser")
        div = bs.find('div', {'class': 'input-group'})
        token = div.find('input').get('data-csrf')

        request_url = self.search_url

        data = {
            self.search_param: query,
            '_token': token
        }

        content = await self.get_url(request_url, data=data, method='post')

        return self.mangas_from_page(content)[(page - 1) * 20:page * 20]

    async def get_chapters(self, manga_card: MangaCard, page: int = 1) -> List[MangaChapter]:
        chapters = [x async for x in self.iter_chapters(manga_card.url, manga_card.name)]
        return chapters[(page - 1) * 20:page * 20]

    async def iter_chapters(self, manga_url: str, manga_name) -> AsyncIterable[MangaChapter]:
        manga_card = MangaCard(self, manga_name, manga_url, '')

        request_url = f'{manga_card.url}'

        chapter_page = await self.get_url(request_url)

        bs = BeautifulSoup(chapter_page, "html.parser")
        btn = bs.find('button', {'class': 'btn-load-more-chapters'})
        token = btn.get('data-token')

        data = {'_token': token}

        content = await self.get_url(request_url, data=data, method='post')

        chapters = self.chapters_from_page(content, manga_card)

        for chapter in chapters:
            yield chapter

    async def contains_url(self, url: str):
        return url.startswith(self.base_url.geturl())

    async def check_updated_urls(self, last_chapters: List[LastChapter]):

        content = await self.get_url(self.base_url.geturl())

        updates = self.updates_from_page(content)

        updated = [lc.url for lc in last_chapters if updates.get(lc.url) and updates.get(lc.url) != lc.chapter_url]
        not_updated = [lc.url for lc in last_chapters if not updates.get(lc.url) or updates.get(lc.url) == lc.chapter_url]

        return updated, not_updated