Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
mikf
GitHub Repository: mikf/gallery-dl
Path: blob/master/gallery_dl/extractor/danbooru.py
8932 views
1
# -*- coding: utf-8 -*-
2
3
# Copyright 2014-2025 Mike Fährmann
4
#
5
# This program is free software; you can redistribute it and/or modify
6
# it under the terms of the GNU General Public License version 2 as
7
# published by the Free Software Foundation.
8
9
"""Extractors for https://danbooru.donmai.us/ and other Danbooru instances"""
10
11
from .common import BaseExtractor, Message
12
from .. import text, util, dt
13
14
15
class DanbooruExtractor(BaseExtractor):
16
"""Base class for danbooru extractors"""
17
basecategory = "Danbooru"
18
filename_fmt = "{category}_{id}_{filename}.{extension}"
19
page_limit = 1000
20
page_start = None
21
per_page = 200
22
useragent = util.USERAGENT_GALLERYDL
23
request_interval = (0.5, 1.5)
24
25
def _init(self):
26
self.ugoira = self.config("ugoira", False)
27
self.external = self.config("external", False)
28
self.includes = False
29
30
threshold = self.config("threshold")
31
if isinstance(threshold, int):
32
self.threshold = 1 if threshold < 1 else threshold
33
else:
34
self.threshold = self.per_page - 20
35
36
username, api_key = self._get_auth_info()
37
if username:
38
self.log.debug("Using HTTP Basic Auth for user '%s'", username)
39
self.session.auth = util.HTTPBasicAuth(username, api_key)
40
41
def skip(self, num):
42
pages = num // self.per_page
43
if pages >= self.page_limit:
44
pages = self.page_limit - 1
45
self.page_start = pages + 1
46
return pages * self.per_page
47
48
def items(self):
49
# 'includes' initialization must be done here and not in '_init()'
50
# or it'll cause an exception with e621 when 'metadata' is enabled
51
if includes := self.config("metadata"):
52
if isinstance(includes, (list, tuple)):
53
includes = ",".join(includes)
54
elif not isinstance(includes, str):
55
includes = "artist_commentary,children,notes,parent,uploader"
56
self.includes = includes + ",id"
57
58
data = self.metadata()
59
for post in self.posts():
60
61
try:
62
url = post["file_url"]
63
except KeyError:
64
if self.external and post["source"]:
65
post.update(data)
66
yield Message.Directory, "", post
67
yield Message.Queue, post["source"], post
68
continue
69
70
text.nameext_from_url(url, post)
71
post["date"] = dt.parse_iso(post["created_at"])
72
73
post["tags"] = (
74
post["tag_string"].split(" ")
75
if post["tag_string"] else ())
76
post["tags_artist"] = (
77
post["tag_string_artist"].split(" ")
78
if post["tag_string_artist"] else ())
79
post["tags_character"] = (
80
post["tag_string_character"].split(" ")
81
if post["tag_string_character"] else ())
82
post["tags_copyright"] = (
83
post["tag_string_copyright"].split(" ")
84
if post["tag_string_copyright"] else ())
85
post["tags_general"] = (
86
post["tag_string_general"].split(" ")
87
if post["tag_string_general"] else ())
88
post["tags_meta"] = (
89
post["tag_string_meta"].split(" ")
90
if post["tag_string_meta"] else ())
91
92
if post["extension"] == "zip":
93
if self.ugoira:
94
post["_ugoira_original"] = False
95
post["_ugoira_frame_data"] = post["frames"] = \
96
self._ugoira_frames(post)
97
post["_http_adjust_extension"] = False
98
else:
99
url = post["large_file_url"]
100
post["extension"] = "webm"
101
102
if url[0] == "/":
103
if url[1] == "/":
104
url = "https:" + url
105
else:
106
url = self.root + url
107
108
post.update(data)
109
yield Message.Directory, "", post
110
yield Message.Url, url, post
111
112
def items_artists(self):
113
for artist in self.artists():
114
artist["_extractor"] = DanbooruTagExtractor
115
url = f"{self.root}/posts?tags={text.quote(artist['name'])}"
116
yield Message.Queue, url, artist
117
118
def metadata(self):
119
return ()
120
121
def posts(self):
122
return ()
123
124
def _pagination(self, endpoint, params, prefix=None):
125
url = self.root + endpoint
126
params["limit"] = self.per_page
127
params["page"] = self.page_start
128
129
first = True
130
while True:
131
posts = self.request_json(url, params=params)
132
if isinstance(posts, dict):
133
posts = posts["posts"]
134
135
if posts:
136
if self.includes:
137
params_meta = {
138
"only" : self.includes,
139
"limit": len(posts),
140
"tags" : "id:" + ",".join(str(p["id"]) for p in posts),
141
}
142
data = {
143
meta["id"]: meta
144
for meta in self.request_json(url, params=params_meta)
145
}
146
for post in posts:
147
post.update(data[post["id"]])
148
149
if prefix == "a" and not first:
150
posts.reverse()
151
152
yield from posts
153
154
if len(posts) < self.threshold:
155
return
156
157
if prefix:
158
params["page"] = prefix + str(posts[-1]["id"])
159
elif params["page"]:
160
params["page"] += 1
161
else:
162
params["page"] = 2
163
first = False
164
165
def _ugoira_frames(self, post):
166
data = self.request_json(
167
f"{self.root}/posts/{post['id']}.json?only=media_metadata"
168
)["media_metadata"]["metadata"]
169
170
if "Ugoira:FrameMimeType" in data:
171
ext = data["Ugoira:FrameMimeType"].rpartition("/")[2]
172
if ext == "jpeg":
173
ext = "jpg"
174
else:
175
ext = data["ZIP:ZipFileName"].rpartition(".")[2]
176
177
delays = data["Ugoira:FrameDelays"]
178
return [{"file": f"{index:>06}.{ext}", "delay": delay}
179
for index, delay in enumerate(delays)]
180
181
def _collection_posts(self, cid, ctype):
182
reverse = prefix = None
183
184
order = self.config("order-posts")
185
if not order or order in {"asc", "pool", "pool_asc", "asc_pool"}:
186
params = {"tags": f"ord{ctype}:{cid}"}
187
elif order in {"id", "desc_id", "id_desc"}:
188
params = {"tags": f"{ctype}:{cid}"}
189
prefix = "b"
190
elif order in {"desc", "desc_pool", "pool_desc"}:
191
params = {"tags": f"ord{ctype}:{cid}"}
192
reverse = True
193
elif order in {"asc_id", "id_asc"}:
194
params = {"tags": f"{ctype}:{cid}"}
195
reverse = True
196
197
posts = self._pagination("/posts.json", params, prefix)
198
if reverse:
199
self.log.info("Collecting posts of %s %s", ctype, cid)
200
return self._collection_enumerate_reverse(posts)
201
else:
202
return self._collection_enumerate(posts)
203
204
def _collection_metadata(self, cid, ctype, cname=None):
205
url = f"{self.root}/{cname or ctype}s/{cid}.json"
206
collection = self.request_json(url)
207
collection["name"] = collection["name"].replace("_", " ")
208
self.post_ids = collection.pop("post_ids", ())
209
return {ctype: collection}
210
211
def _collection_enumerate(self, posts):
212
pid_to_num = {pid: num for num, pid in enumerate(self.post_ids, 1)}
213
for post in posts:
214
post["num"] = pid_to_num[post["id"]]
215
yield post
216
217
def _collection_enumerate_reverse(self, posts):
218
posts = list(posts)
219
posts.reverse()
220
221
pid_to_num = {pid: num for num, pid in enumerate(self.post_ids, 1)}
222
for post in posts:
223
post["num"] = pid_to_num[post["id"]]
224
return posts
225
226
227
BASE_PATTERN = DanbooruExtractor.update({
228
"danbooru": {
229
"root": None,
230
"pattern": r"(?:(?:danbooru|hijiribe|sonohara|safebooru)\.donmai\.us"
231
r"|donmai\.moe)",
232
},
233
"atfbooru": {
234
"root": "https://booru.allthefallen.moe",
235
"pattern": r"booru\.allthefallen\.moe",
236
},
237
"aibooru": {
238
"root": None,
239
"pattern": r"(?:safe\.|general\.)?aibooru\.(?:online|download)",
240
},
241
"booruvar": {
242
"root": "https://booru.borvar.art",
243
"pattern": r"booru\.borvar\.art",
244
},
245
})
246
247
248
class DanbooruTagExtractor(DanbooruExtractor):
249
"""Extractor for danbooru posts from tag searches"""
250
subcategory = "tag"
251
directory_fmt = ("{category}", "{search_tags}")
252
archive_fmt = "t_{search_tags}_{id}"
253
pattern = BASE_PATTERN + r"/posts\?(?:[^&#]*&)*tags=([^&#]*)"
254
example = "https://danbooru.donmai.us/posts?tags=TAG"
255
256
def metadata(self):
257
self.tags = text.unquote(self.groups[-1].replace("+", " "))
258
return {"search_tags": self.tags}
259
260
def posts(self):
261
prefix = "b"
262
for tag in self.tags.split():
263
if tag.startswith("order:"):
264
if tag == "order:id" or tag == "order:id_asc":
265
prefix = "a"
266
elif tag == "order:id_desc":
267
prefix = "b"
268
else:
269
prefix = None
270
elif tag.startswith(
271
("id:", "md5:", "ordfav:", "ordfavgroup:", "ordpool:")):
272
prefix = None
273
break
274
275
return self._pagination("/posts.json", {"tags": self.tags}, prefix)
276
277
278
class DanbooruRandomExtractor(DanbooruTagExtractor):
279
"""Extractor for a random danbooru post"""
280
subcategory = "random"
281
pattern = BASE_PATTERN + r"/posts/random(?:\?(?:[^&#]*&)*tags=([^&#]*))?"
282
example = "https://danbooru.donmai.us/posts/random?tags=TAG"
283
284
def metadata(self):
285
tags = self.groups[-1] or ""
286
self.tags = text.unquote(tags.replace("+", " "))
287
return {"search_tags": self.tags}
288
289
def posts(self):
290
posts = self.request_json(self.root + "/posts/random.json",
291
params={"tags": self.tags or None})
292
return (posts,) if isinstance(posts, dict) else posts
293
294
295
class DanbooruPoolExtractor(DanbooruExtractor):
296
"""Extractor for Danbooru pools"""
297
subcategory = "pool"
298
directory_fmt = ("{category}", "pool", "{pool[id]} {pool[name]}")
299
filename_fmt = "{num:>04}_{id}_{filename}.{extension}"
300
archive_fmt = "p_{pool[id]}_{id}"
301
pattern = BASE_PATTERN + r"/pool(?:s|/show)/(\d+)"
302
example = "https://danbooru.donmai.us/pools/12345"
303
304
def metadata(self):
305
self.pool_id = self.groups[-1]
306
return self._collection_metadata(self.pool_id, "pool")
307
308
def posts(self):
309
return self._collection_posts(self.pool_id, "pool")
310
311
312
class DanbooruFavgroupExtractor(DanbooruExtractor):
313
"""Extractor for Danbooru favorite groups"""
314
subcategory = "favgroup"
315
directory_fmt = ("{category}", "Favorite Groups",
316
"{favgroup[id]} {favgroup[name]}")
317
filename_fmt = "{num:>04}_{id}_{filename}.{extension}"
318
archive_fmt = "fg_{favgroup[id]}_{id}"
319
pattern = BASE_PATTERN + r"/favorite_group(?:s|/show)/(\d+)"
320
example = "https://danbooru.donmai.us/favorite_groups/12345"
321
322
def metadata(self):
323
return self._collection_metadata(
324
self.groups[-1], "favgroup", "favorite_group")
325
326
def posts(self):
327
return self._collection_posts(self.groups[-1], "favgroup")
328
329
330
class DanbooruPostExtractor(DanbooruExtractor):
331
"""Extractor for single danbooru posts"""
332
subcategory = "post"
333
archive_fmt = "{id}"
334
pattern = BASE_PATTERN + r"/post(?:s|/show)/(\d+)"
335
example = "https://danbooru.donmai.us/posts/12345"
336
337
def posts(self):
338
url = f"{self.root}/posts/{self.groups[-1]}.json"
339
post = self.request_json(url)
340
if self.includes:
341
params = {"only": self.includes}
342
post.update(self.request_json(url, params=params))
343
return (post,)
344
345
346
class DanbooruMediaassetExtractor(DanbooruExtractor):
347
"""Extractor for a danbooru media asset"""
348
subcategory = "media-asset"
349
filename_fmt = "{category}_ma{id}_{filename}.{extension}"
350
archive_fmt = "m{id}"
351
pattern = BASE_PATTERN + r"/media_assets/(\d+)"
352
example = "https://danbooru.donmai.us/media_assets/12345"
353
354
def posts(self):
355
url = f"{self.root}/media_assets/{self.groups[-1]}.json"
356
asset = self.request_json(url)
357
358
asset["file_url"] = asset["variants"][-1]["url"]
359
asset["tag_string"] = \
360
asset["tag_string_artist"] = \
361
asset["tag_string_character"] = \
362
asset["tag_string_copyright"] = \
363
asset["tag_string_general"] = \
364
asset["tag_string_meta"] = ""
365
366
if self.includes:
367
params = {"only": self.includes}
368
asset.update(self.request_json(url, params=params))
369
return (asset,)
370
371
372
class DanbooruPopularExtractor(DanbooruExtractor):
373
"""Extractor for popular images from danbooru"""
374
subcategory = "popular"
375
directory_fmt = ("{category}", "popular", "{scale}", "{date}")
376
archive_fmt = "P_{scale[0]}_{date}_{id}"
377
pattern = BASE_PATTERN + r"/(?:explore/posts/)?popular(?:\?([^#]*))?"
378
example = "https://danbooru.donmai.us/explore/posts/popular"
379
380
def metadata(self):
381
self.params = params = text.parse_query(self.groups[-1])
382
scale = params.get("scale", "day")
383
date = params.get("date") or dt.date.today().isoformat()
384
385
if scale == "week":
386
date = dt.date.fromisoformat(date)
387
date = (date - dt.timedelta(days=date.weekday())).isoformat()
388
elif scale == "month":
389
date = date[:-3]
390
391
return {"date": date, "scale": scale}
392
393
def posts(self):
394
return self._pagination("/explore/posts/popular.json", self.params)
395
396
397
class DanbooruArtistExtractor(DanbooruExtractor):
398
"""Extractor for danbooru artists"""
399
subcategory = "artist"
400
pattern = BASE_PATTERN + r"/artists/(\d+)"
401
example = "https://danbooru.donmai.us/artists/12345"
402
403
items = DanbooruExtractor.items_artists
404
405
def artists(self):
406
url = f"{self.root}/artists/{self.groups[-1]}.json"
407
return (self.request_json(url),)
408
409
410
class DanbooruArtistSearchExtractor(DanbooruExtractor):
411
"""Extractor for danbooru artist searches"""
412
subcategory = "artist-search"
413
pattern = BASE_PATTERN + r"/artists/?\?([^#]+)"
414
example = "https://danbooru.donmai.us/artists?QUERY"
415
416
items = DanbooruExtractor.items_artists
417
418
def artists(self):
419
url = self.root + "/artists.json"
420
params = text.parse_query(self.groups[-1])
421
params["page"] = text.parse_int(params.get("page"), 1)
422
423
while True:
424
artists = self.request_json(url, params=params)
425
426
yield from artists
427
428
if len(artists) < 20:
429
return
430
params["page"] += 1
431
432