Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
mikf
GitHub Repository: mikf/gallery-dl
Path: blob/master/gallery_dl/extractor/batoto.py
5399 views
1
# -*- coding: utf-8 -*-
2
3
# This program is free software; you can redistribute it and/or modify
4
# it under the terms of the GNU General Public License version 2 as
5
# published by the Free Software Foundation.
6
7
"""Extractors for https://bato.to/"""
8
9
from .common import Extractor, ChapterExtractor, MangaExtractor
10
from .. import text, util
11
from ..cache import memcache
12
13
BASE_PATTERN = (r"(?:https?://)?("
14
r"(?:ba|d|f|h|j|m|w)to\.to|"
15
r"(?:(?:manga|read)toto|batocomic|[xz]bato)\.(?:com|net|org)|"
16
r"comiko\.(?:net|org)|"
17
r"bat(?:otoo|o?two)\.com)")
18
19
# https://rentry.co/batoto
20
DOMAINS = {
21
"dto.to",
22
"fto.to",
23
"hto.to",
24
"jto.to",
25
"mto.to",
26
"wto.to",
27
"xbato.com",
28
"xbato.net",
29
"xbato.org",
30
"zbato.com",
31
"zbato.net",
32
"zbato.org",
33
"readtoto.com",
34
"readtoto.net",
35
"readtoto.org",
36
"batocomic.com",
37
"batocomic.net",
38
"batocomic.org",
39
"batotoo.com",
40
"batotwo.com",
41
"comiko.net",
42
"comiko.org",
43
"battwo.com",
44
}
45
LEGACY_DOMAINS = {
46
"bato.to",
47
"mangatoto.com",
48
"mangatoto.net",
49
"mangatoto.org",
50
}
51
52
53
class BatotoBase():
54
"""Base class for batoto extractors"""
55
category = "batoto"
56
root = "https://xbato.org"
57
_warn_legacy = True
58
59
def _init_root(self):
60
domain = self.config("domain")
61
if domain is None or domain in {"auto", "url"}:
62
domain = self.groups[0]
63
if domain in LEGACY_DOMAINS:
64
if self._warn_legacy:
65
BatotoBase._warn_legacy = False
66
self.log.warning("Legacy domain '%s'", domain)
67
elif domain == "nolegacy":
68
domain = self.groups[0]
69
if domain in LEGACY_DOMAINS:
70
domain = "xbato.org"
71
elif domain == "nowarn":
72
domain = self.groups[0]
73
self.root = "https://" + domain
74
75
def request(self, url, **kwargs):
76
kwargs["encoding"] = "utf-8"
77
return Extractor.request(self, url, **kwargs)
78
79
80
class BatotoChapterExtractor(BatotoBase, ChapterExtractor):
81
"""Extractor for batoto manga chapters"""
82
archive_fmt = "{chapter_id}_{page}"
83
pattern = BASE_PATTERN + r"/(?:title/[^/?#]+|chapter)/(\d+)"
84
example = "https://xbato.org/title/12345-MANGA/54321"
85
86
def __init__(self, match):
87
ChapterExtractor.__init__(self, match, False)
88
self._init_root()
89
self.chapter_id = self.groups[1]
90
self.page_url = f"{self.root}/title/0/{self.chapter_id}"
91
92
def metadata(self, page):
93
extr = text.extract_from(page)
94
try:
95
manga, info, _ = extr("<title>", "<").rsplit(" - ", 3)
96
except ValueError:
97
manga = info = None
98
99
manga_id = text.extr(
100
extr('rel="canonical" href="', '"'), "/title/", "/")
101
102
if not manga:
103
manga = extr('link-hover">', "<")
104
info = text.remove_html(extr('link-hover">', "</"))
105
info = text.unescape(info)
106
107
match = util.re(
108
r"(?i)(?:(?:Volume|S(?:eason)?)\s*(\d+)\s+)?"
109
r"(?:Chapter|Episode)\s*(\d+)([\w.]*)").match(info)
110
if match:
111
volume, chapter, minor = match.groups()
112
else:
113
volume = chapter = 0
114
minor = ""
115
116
return {
117
**_manga_info(self, manga_id),
118
"chapter_url" : extr(self.chapter_id + "-ch_", '"'),
119
"title" : text.unescape(text.remove_html(extr(
120
"selected>", "</option")).partition(" : ")[2]),
121
"volume" : text.parse_int(volume),
122
"chapter" : text.parse_int(chapter),
123
"chapter_minor" : minor,
124
"chapter_string": info,
125
"chapter_id" : text.parse_int(self.chapter_id),
126
"date" : text.parse_timestamp(extr(' time="', '"')[:-3]),
127
}
128
129
def images(self, page):
130
images_container = text.extr(page, 'pageOpts', ':[0,0]}"')
131
images_container = text.unescape(images_container)
132
return [
133
(url, None)
134
for url in text.extract_iter(images_container, r"\"", r"\"")
135
]
136
137
138
class BatotoMangaExtractor(BatotoBase, MangaExtractor):
139
"""Extractor for batoto manga"""
140
reverse = False
141
chapterclass = BatotoChapterExtractor
142
pattern = (BASE_PATTERN +
143
r"/(?:title/(\d+)[^/?#]*|series/(\d+)(?:/[^/?#]*)?)/?$")
144
example = "https://xbato.org/title/12345-MANGA/"
145
146
def __init__(self, match):
147
MangaExtractor.__init__(self, match, False)
148
self._init_root()
149
self.manga_id = self.groups[1] or self.groups[2]
150
self.page_url = f"{self.root}/title/{self.manga_id}"
151
152
def chapters(self, page):
153
extr = text.extract_from(page)
154
if warning := extr(' class="alert alert-warning">', "</div>"):
155
self.log.warning("'%s'", text.remove_html(warning))
156
extr('<div data-hk="0-0-0-0"', "")
157
data = _manga_info(self, self.manga_id, page)
158
159
results = []
160
while True:
161
href = extr('<a href="/title/', '"')
162
if not href:
163
break
164
165
chapter = href.rpartition("-ch_")[2]
166
chapter, sep, minor = chapter.partition(".")
167
168
data["chapter"] = text.parse_int(chapter)
169
data["chapter_minor"] = sep + minor
170
data["date"] = text.parse_datetime(
171
extr('time="', '"'), "%Y-%m-%dT%H:%M:%S.%fZ")
172
173
url = f"{self.root}/title/{href}"
174
results.append((url, data.copy()))
175
return results
176
177
178
@memcache(keyarg=1)
179
def _manga_info(self, manga_id, page=None):
180
if page is None:
181
url = f"{self.root}/title/{manga_id}"
182
page = self.request(url).text
183
184
props = text.extract(page, 'props="', '"', page.find(' prefix="r20" '))[0]
185
data = util.json_loads(text.unescape(props))["data"][1]
186
187
return {
188
"manga" : data["name"][1],
189
"manga_id" : text.parse_int(manga_id),
190
"manga_slug" : data["slug"][1],
191
"manga_date" : text.parse_timestamp(
192
data["dateCreate"][1] // 1000),
193
"manga_date_updated": text.parse_timestamp(
194
data["dateUpdate"][1] / 1000),
195
"author" : json_list(data["authors"]),
196
"artist" : json_list(data["artists"]),
197
"genre" : json_list(data["genres"]),
198
"lang" : data["tranLang"][1],
199
"lang_orig" : data["origLang"][1],
200
"status" : data["originalStatus"][1],
201
"published" : data["originalPubFrom"][1],
202
"description": data["summary"][1]["code"][1],
203
"cover" : data["urlCoverOri"][1],
204
"uploader" : data["userId"][1],
205
"score" : data["stat_score_avg"][1],
206
}
207
208
209
def json_list(value):
210
return [
211
item[1].replace("_", " ")
212
for item in util.json_loads(value[1].replace('\\"', '"'))
213
]
214
215