Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
mikf
GitHub Repository: mikf/gallery-dl
Path: blob/master/gallery_dl/extractor/e621.py
5399 views
1
# -*- coding: utf-8 -*-
2
3
# Copyright 2014-2025 Mike Fährmann
4
#
5
# This program is free software; you can redistribute it and/or modify
6
# it under the terms of the GNU General Public License version 2 as
7
# published by the Free Software Foundation.
8
9
"""Extractors for https://e621.net/ and other e621 instances"""
10
11
from .common import Extractor, Message
12
from . import danbooru
13
from ..cache import memcache
14
from .. import text, util
15
16
17
class E621Extractor(danbooru.DanbooruExtractor):
18
"""Base class for e621 extractors"""
19
basecategory = "E621"
20
page_limit = 750
21
page_start = None
22
per_page = 320
23
useragent = util.USERAGENT + " (by mikf)"
24
request_interval_min = 1.0
25
26
def items(self):
27
if includes := self.config("metadata") or ():
28
if isinstance(includes, str):
29
includes = includes.split(",")
30
elif not isinstance(includes, (list, tuple)):
31
includes = ("notes", "pools")
32
33
notes = ("notes" in includes)
34
pools = ("pools" in includes)
35
36
data = self.metadata()
37
for post in self.posts():
38
file = post["file"]
39
40
if not file["url"]:
41
md5 = file["md5"]
42
file["url"] = (f"https://static1.{self.root[8:]}/data"
43
f"/{md5[0:2]}/{md5[2:4]}/{md5}.{file['ext']}")
44
45
if notes and post.get("has_notes"):
46
post["notes"] = self._get_notes(post["id"])
47
48
if pools and post["pools"]:
49
post["pools"] = self._get_pools(
50
",".join(map(str, post["pools"])))
51
52
post["filename"] = file["md5"]
53
post["extension"] = file["ext"]
54
post["date"] = text.parse_datetime(
55
post["created_at"], "%Y-%m-%dT%H:%M:%S.%f%z")
56
57
post.update(data)
58
yield Message.Directory, post
59
yield Message.Url, file["url"], post
60
61
def _get_notes(self, id):
62
return self.request_json(
63
f"{self.root}/notes.json?search[post_id]={id}")
64
65
@memcache(keyarg=1)
66
def _get_pools(self, ids):
67
pools = self.request_json(
68
f"{self.root}/pools.json?search[id]={ids}")
69
for pool in pools:
70
pool["name"] = pool["name"].replace("_", " ")
71
return pools
72
73
74
BASE_PATTERN = E621Extractor.update({
75
"e621": {
76
"root": "https://e621.net",
77
"pattern": r"e621\.(?:net|cc)",
78
},
79
"e926": {
80
"root": "https://e926.net",
81
"pattern": r"e926\.net",
82
},
83
"e6ai": {
84
"root": "https://e6ai.net",
85
"pattern": r"e6ai\.net",
86
},
87
})
88
89
90
class E621TagExtractor(E621Extractor, danbooru.DanbooruTagExtractor):
91
"""Extractor for e621 posts from tag searches"""
92
pattern = BASE_PATTERN + r"/posts?(?:\?[^#]*?tags=|/index/\d+/)([^&#]*)"
93
example = "https://e621.net/posts?tags=TAG"
94
95
96
class E621PoolExtractor(E621Extractor, danbooru.DanbooruPoolExtractor):
97
"""Extractor for e621 pools"""
98
pattern = BASE_PATTERN + r"/pool(?:s|/show)/(\d+)"
99
example = "https://e621.net/pools/12345"
100
101
def posts(self):
102
self.log.info("Collecting posts of pool %s", self.pool_id)
103
104
id_to_post = {
105
post["id"]: post
106
for post in self._pagination(
107
"/posts.json", {"tags": "pool:" + self.pool_id})
108
}
109
110
posts = []
111
for num, pid in enumerate(self.post_ids, 1):
112
if pid in id_to_post:
113
post = id_to_post[pid]
114
post["num"] = num
115
posts.append(post)
116
else:
117
self.log.warning("Post %s is unavailable", pid)
118
return posts
119
120
121
class E621PostExtractor(E621Extractor, danbooru.DanbooruPostExtractor):
122
"""Extractor for single e621 posts"""
123
pattern = BASE_PATTERN + r"/post(?:s|/show)/(\d+)"
124
example = "https://e621.net/posts/12345"
125
126
def posts(self):
127
url = f"{self.root}/posts/{self.groups[-1]}.json"
128
return (self.request_json(url)["post"],)
129
130
131
class E621PopularExtractor(E621Extractor, danbooru.DanbooruPopularExtractor):
132
"""Extractor for popular images from e621"""
133
pattern = BASE_PATTERN + r"/explore/posts/popular(?:\?([^#]*))?"
134
example = "https://e621.net/explore/posts/popular"
135
136
def posts(self):
137
return self._pagination("/popular.json", self.params)
138
139
140
class E621FavoriteExtractor(E621Extractor):
141
"""Extractor for e621 favorites"""
142
subcategory = "favorite"
143
directory_fmt = ("{category}", "Favorites", "{user_id}")
144
archive_fmt = "f_{user_id}_{id}"
145
pattern = BASE_PATTERN + r"/favorites(?:\?([^#]*))?"
146
example = "https://e621.net/favorites"
147
148
def metadata(self):
149
self.query = text.parse_query(self.groups[-1])
150
return {"user_id": self.query.get("user_id", "")}
151
152
def posts(self):
153
return self._pagination("/favorites.json", self.query)
154
155
156
class E621FrontendExtractor(Extractor):
157
"""Extractor for alternative e621 frontends"""
158
basecategory = "E621"
159
category = "e621"
160
subcategory = "frontend"
161
pattern = r"(?:https?://)?e621\.(?:cc/\?tags|anthro\.fr/\?q)=([^&#]*)"
162
example = "https://e621.cc/?tags=TAG"
163
164
def initialize(self):
165
pass
166
167
def items(self):
168
url = "https://e621.net/posts?tags=" + self.groups[0]
169
data = {"_extractor": E621TagExtractor}
170
yield Message.Queue, url, data
171
172