Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
mikf
GitHub Repository: mikf/gallery-dl
Path: blob/master/gallery_dl/job.py
5457 views
1
# -*- coding: utf-8 -*-
2
3
# Copyright 2015-2025 Mike Fährmann
4
#
5
# This program is free software; you can redistribute it and/or modify
6
# it under the terms of the GNU General Public License version 2 as
7
# published by the Free Software Foundation.
8
9
import sys
10
import errno
11
import logging
12
import functools
13
import collections
14
15
from . import (
16
extractor,
17
downloader,
18
postprocessor,
19
archive,
20
config,
21
exception,
22
formatter,
23
output,
24
path,
25
text,
26
util,
27
version,
28
)
29
from .extractor.message import Message
30
stdout_write = output.stdout_write
31
FLAGS = util.FLAGS
32
33
34
class Job():
35
"""Base class for Job types"""
36
ulog = None
37
_logger_adapter = output.LoggerAdapter
38
39
def __init__(self, extr, parent=None):
40
if isinstance(extr, str):
41
extr = extractor.find(extr)
42
if not extr:
43
raise exception.NoExtractorError()
44
45
self.extractor = extr
46
self.pathfmt = None
47
self.status = 0
48
self.kwdict = {}
49
self.kwdict_eval = False
50
51
if cfgpath := self._build_config_path(parent):
52
if isinstance(cfgpath, list):
53
extr.config = extr._config_shared
54
extr.config_accumulate = extr._config_shared_accumulate
55
extr._cfgpath = cfgpath
56
57
if actions := extr.config("actions"):
58
from .actions import LoggerAdapter, parse_logging
59
self._logger_adapter = LoggerAdapter
60
self._logger_actions = parse_logging(actions)
61
62
path_proxy = output.PathfmtProxy(self)
63
self._logger_extra = {
64
"job" : self,
65
"extractor": extr,
66
"path" : path_proxy,
67
"keywords" : output.KwdictProxy(self),
68
}
69
extr.log = self._wrap_logger(extr.log)
70
extr.log.debug("Using %s for '%s'", extr.__class__.__name__, extr.url)
71
72
self.metadata_url = extr.config2("metadata-url", "url-metadata")
73
self.metadata_http = extr.config2("metadata-http", "http-metadata")
74
metadata_path = extr.config2("metadata-path", "path-metadata")
75
metadata_version = extr.config2("metadata-version", "version-metadata")
76
metadata_extractor = extr.config2(
77
"metadata-extractor", "extractor-metadata")
78
79
if metadata_path:
80
self.kwdict[metadata_path] = path_proxy
81
if metadata_extractor:
82
self.kwdict[metadata_extractor] = extr
83
if metadata_version:
84
self.kwdict[metadata_version] = {
85
"version" : version.__version__,
86
"is_executable" : util.EXECUTABLE,
87
"current_git_head": util.git_head()
88
}
89
# user-supplied metadata
90
if kwdict := extr.config("keywords"):
91
if extr.config("keywords-eval"):
92
self.kwdict_eval = []
93
for key, value in kwdict.items():
94
if isinstance(value, str):
95
fmt = formatter.parse(value, None, util.identity)
96
self.kwdict_eval.append((key, fmt.format_map))
97
else:
98
self.kwdict[key] = value
99
else:
100
self.kwdict.update(kwdict)
101
102
def _build_config_path(self, parent):
103
extr = self.extractor
104
cfgpath = []
105
106
if parent:
107
pextr = parent.extractor
108
if extr.category == pextr.category or \
109
extr.category in parent.parents:
110
parents = parent.parents
111
else:
112
parents = parent.parents + (pextr.category,)
113
self.parents = parents
114
115
if pextr.config("category-transfer", pextr.categorytransfer):
116
extr.category = pextr.category
117
extr.subcategory = pextr.subcategory
118
return pextr._cfgpath
119
120
if parents:
121
sub = extr.subcategory
122
for category in parents:
123
cat = f"{category}>{extr.category}"
124
cfgpath.append((cat, sub))
125
cfgpath.append((category + ">*", sub))
126
cfgpath.append((extr.category, sub))
127
else:
128
self.parents = ()
129
130
if extr.basecategory:
131
if not cfgpath:
132
cfgpath.append((extr.category, extr.subcategory))
133
cfgpath.append((extr.basecategory, extr.subcategory))
134
135
return cfgpath
136
137
def run(self):
138
"""Execute or run the job"""
139
extractor = self.extractor
140
log = extractor.log
141
msg = None
142
143
self._init()
144
145
# sleep before extractor start
146
sleep = util.build_duration_func(
147
extractor.config("sleep-extractor"))
148
if sleep:
149
extractor.sleep(sleep(), "extractor")
150
151
try:
152
for msg in extractor:
153
self.dispatch(msg)
154
except exception.StopExtraction as exc:
155
if exc.depth > 1 and exc.target != extractor.__class__.subcategory:
156
exc.depth -= 1
157
raise
158
pass
159
except exception.AbortExtraction as exc:
160
log.error(exc.message)
161
self.status |= exc.code
162
except (exception.TerminateExtraction, exception.RestartExtraction):
163
raise
164
except exception.GalleryDLException as exc:
165
log.error("%s: %s", exc.__class__.__name__, exc)
166
log.debug("", exc_info=exc)
167
self.status |= exc.code
168
except OSError as exc:
169
log.debug("", exc_info=exc)
170
name = exc.__class__.__name__
171
if name == "JSONDecodeError":
172
log.error("Failed to parse JSON data: %s: %s", name, exc)
173
self.status |= 1
174
else: # regular OSError
175
log.error("Unable to download data: %s: %s", name, exc)
176
self.status |= 128
177
except Exception as exc:
178
log.error(("An unexpected error occurred: %s - %s. "
179
"Please run gallery-dl again with the --verbose flag, "
180
"copy its output and report this issue on "
181
"https://github.com/mikf/gallery-dl/issues ."),
182
exc.__class__.__name__, exc)
183
log.debug("", exc_info=exc)
184
self.status |= 1
185
except BaseException:
186
self.status |= 1
187
raise
188
else:
189
if msg is None:
190
log.info("No results for %s", extractor.url)
191
finally:
192
self.handle_finalize()
193
extractor.finalize()
194
195
if s := extractor.status:
196
self.status |= s
197
return self.status
198
199
def dispatch(self, msg):
200
"""Call the appropriate message handler"""
201
if msg[0] == Message.Url:
202
_, url, kwdict = msg
203
if self.metadata_url:
204
kwdict[self.metadata_url] = url
205
if self.pred_url(url, kwdict):
206
self.update_kwdict(kwdict)
207
self.handle_url(url, kwdict)
208
if FLAGS.FILE is not None:
209
FLAGS.process("FILE")
210
211
elif msg[0] == Message.Directory:
212
self.update_kwdict(msg[1])
213
self.handle_directory(msg[1])
214
215
elif msg[0] == Message.Queue:
216
_, url, kwdict = msg
217
if self.metadata_url:
218
kwdict[self.metadata_url] = url
219
if self.pred_queue(url, kwdict):
220
self.update_kwdict(kwdict)
221
self.handle_queue(url, kwdict)
222
if FLAGS.CHILD is not None:
223
FLAGS.process("CHILD")
224
225
def handle_url(self, url, kwdict):
226
"""Handle Message.Url"""
227
228
def handle_directory(self, kwdict):
229
"""Handle Message.Directory"""
230
231
def handle_queue(self, url, kwdict):
232
"""Handle Message.Queue"""
233
234
def handle_finalize(self):
235
"""Handle job finalization"""
236
237
def update_kwdict(self, kwdict):
238
"""Update 'kwdict' with additional metadata"""
239
extr = self.extractor
240
kwdict["category"] = extr.category
241
kwdict["subcategory"] = extr.subcategory
242
if self.metadata_http:
243
kwdict.pop(self.metadata_http, None)
244
if extr.kwdict:
245
kwdict.update(extr.kwdict)
246
if self.kwdict:
247
kwdict.update(self.kwdict)
248
if self.kwdict_eval:
249
for key, valuegen in self.kwdict_eval:
250
kwdict[key] = valuegen(kwdict)
251
252
def _init(self):
253
self.extractor.initialize()
254
self.pred_url = self._prepare_predicates("image", True)
255
self.pred_queue = self._prepare_predicates("chapter", False)
256
257
def _prepare_predicates(self, target, skip=True):
258
predicates = []
259
260
if self.extractor.config(target + "-unique"):
261
predicates.append(util.UniquePredicate())
262
263
if pfilter := self.extractor.config(target + "-filter"):
264
try:
265
pred = util.FilterPredicate(pfilter, target)
266
except (SyntaxError, ValueError, TypeError) as exc:
267
self.extractor.log.warning(exc)
268
else:
269
predicates.append(pred)
270
271
if prange := self.extractor.config(target + "-range"):
272
try:
273
pred = util.RangePredicate(prange)
274
except ValueError as exc:
275
self.extractor.log.warning(
276
"invalid %s range: %s", target, exc)
277
else:
278
if skip and pred.lower > 1 and not pfilter:
279
pred.index += self.extractor.skip(pred.lower - 1)
280
predicates.append(pred)
281
282
return util.build_predicate(predicates)
283
284
def get_logger(self, name):
285
return self._wrap_logger(logging.getLogger(name))
286
287
def _wrap_logger(self, logger):
288
return self._logger_adapter(logger, self)
289
290
def _write_unsupported(self, url):
291
if self.ulog:
292
self.ulog.info(url)
293
294
295
class DownloadJob(Job):
296
"""Download images into appropriate directory/filename locations"""
297
298
def __init__(self, url, parent=None):
299
Job.__init__(self, url, parent)
300
self.log = self.get_logger("download")
301
self.fallback = None
302
self.archive = None
303
self.sleep = None
304
self.hooks = ()
305
self.downloaders = {}
306
self.out = output.select()
307
self.visited = parent.visited if parent else set()
308
self._extractor_filter = None
309
self._skipcnt = 0
310
311
def handle_url(self, url, kwdict):
312
"""Download the resource specified in 'url'"""
313
hooks = self.hooks
314
pathfmt = self.pathfmt
315
archive = self.archive
316
317
# prepare download
318
pathfmt.set_filename(kwdict)
319
320
if "prepare" in hooks:
321
for callback in hooks["prepare"]:
322
callback(pathfmt)
323
324
if archive and archive.check(kwdict):
325
pathfmt.fix_extension()
326
self.handle_skip()
327
return
328
329
if pathfmt.extension and not self.metadata_http:
330
pathfmt.build_path()
331
332
if pathfmt.exists():
333
if archive and self._archive_write_skip:
334
archive.add(kwdict)
335
self.handle_skip()
336
return
337
338
if "prepare-after" in hooks:
339
for callback in hooks["prepare-after"]:
340
callback(pathfmt)
341
342
if kwdict.pop("_file_recheck", False) and pathfmt.exists():
343
if archive and self._archive_write_skip:
344
archive.add(kwdict)
345
self.handle_skip()
346
return
347
348
if self.sleep:
349
self.extractor.sleep(self.sleep(), "download")
350
351
# download from URL
352
if not self.download(url):
353
354
# use fallback URLs if available/enabled
355
fallback = kwdict.get("_fallback", ()) if self.fallback else ()
356
for num, url in enumerate(fallback, 1):
357
util.remove_file(pathfmt.temppath)
358
self.log.info("Trying fallback URL #%d", num)
359
if self.download(url):
360
break
361
else:
362
# download failed
363
self.status |= 4
364
self.log.error("Failed to download %s",
365
pathfmt.filename or url)
366
if "error" in hooks:
367
for callback in hooks["error"]:
368
callback(pathfmt)
369
return
370
371
if not pathfmt.temppath:
372
if archive and self._archive_write_skip:
373
archive.add(kwdict)
374
self.handle_skip()
375
return
376
377
# run post processors
378
if "file" in hooks:
379
for callback in hooks["file"]:
380
callback(pathfmt)
381
382
# download succeeded
383
pathfmt.finalize()
384
self.out.success(pathfmt.path)
385
self._skipcnt = 0
386
if archive and self._archive_write_file:
387
archive.add(kwdict)
388
if "after" in hooks:
389
for callback in hooks["after"]:
390
callback(pathfmt)
391
392
def handle_directory(self, kwdict):
393
"""Set and create the target directory for downloads"""
394
if not self.pathfmt:
395
self.initialize(kwdict)
396
else:
397
if "post-after" in self.hooks:
398
for callback in self.hooks["post-after"]:
399
callback(self.pathfmt)
400
if FLAGS.POST is not None:
401
FLAGS.process("POST")
402
self.pathfmt.set_directory(kwdict)
403
if "post" in self.hooks:
404
for callback in self.hooks["post"]:
405
callback(self.pathfmt)
406
407
def handle_queue(self, url, kwdict):
408
if url in self.visited:
409
return
410
self.visited.add(url)
411
412
if cls := kwdict.get("_extractor"):
413
extr = cls.from_url(url)
414
else:
415
if extr := extractor.find(url):
416
if self._extractor_filter is None:
417
self._extractor_filter = self._build_extractor_filter()
418
if not self._extractor_filter(extr):
419
extr = None
420
421
if extr:
422
job = self.__class__(extr, self)
423
pfmt = self.pathfmt
424
pextr = self.extractor
425
426
if pfmt and pextr.config("parent-directory"):
427
extr._parentdir = pfmt.directory
428
else:
429
extr._parentdir = pextr._parentdir
430
431
if pmeta := pextr.config2("parent-metadata", "metadata-parent"):
432
if isinstance(pmeta, str):
433
data = self.kwdict.copy()
434
if kwdict:
435
data.update(kwdict)
436
job.kwdict[pmeta] = data
437
else:
438
if self.kwdict:
439
job.kwdict.update(self.kwdict)
440
if kwdict:
441
job.kwdict.update(kwdict)
442
443
while True:
444
try:
445
if pextr.config("parent-skip"):
446
job._skipcnt = self._skipcnt
447
status = job.run()
448
self._skipcnt = job._skipcnt
449
else:
450
status = job.run()
451
452
if status:
453
self.status |= status
454
if (status & 95 and # not FormatError or OSError
455
"_fallback" in kwdict and self.fallback):
456
fallback = kwdict["_fallback"] = \
457
iter(kwdict["_fallback"])
458
try:
459
url = next(fallback)
460
except StopIteration:
461
pass
462
else:
463
pextr.log.info("Downloading fallback URL")
464
text.nameext_from_url(url, kwdict)
465
if kwdict["filename"].startswith((
466
"HLS", "DASH")):
467
kwdict["filename"] = url.rsplit("/", 2)[-2]
468
if url.startswith("ytdl:"):
469
kwdict["extension"] = "mp4"
470
self.handle_url(url, kwdict)
471
break
472
except exception.RestartExtraction:
473
pass
474
475
else:
476
self._write_unsupported(url)
477
478
def handle_finalize(self):
479
if self.archive:
480
if not self.status:
481
self.archive.finalize()
482
self.archive.close()
483
484
if pathfmt := self.pathfmt:
485
hooks = self.hooks
486
if "post-after" in hooks:
487
for callback in hooks["post-after"]:
488
callback(pathfmt)
489
490
self.extractor.cookies_store()
491
492
if "finalize" in hooks:
493
for callback in hooks["finalize"]:
494
callback(pathfmt)
495
if self.status:
496
if "finalize-error" in hooks:
497
for callback in hooks["finalize-error"]:
498
callback(pathfmt)
499
else:
500
if "finalize-success" in hooks:
501
for callback in hooks["finalize-success"]:
502
callback(pathfmt)
503
504
def handle_skip(self):
505
pathfmt = self.pathfmt
506
if "skip" in self.hooks:
507
for callback in self.hooks["skip"]:
508
callback(pathfmt)
509
self.out.skip(pathfmt.path)
510
511
if self._skipexc:
512
if not self._skipftr or self._skipftr(pathfmt.kwdict):
513
self._skipcnt += 1
514
if self._skipcnt >= self._skipmax:
515
raise self._skipexc
516
517
def download(self, url):
518
"""Download 'url'"""
519
if downloader := self.get_downloader(url[:url.find(":")]):
520
try:
521
return downloader.download(url, self.pathfmt)
522
except OSError as exc:
523
if exc.errno == errno.ENOSPC:
524
raise
525
self.log.warning("%s: %s", exc.__class__.__name__, exc)
526
return False
527
self._write_unsupported(url)
528
return False
529
530
def get_downloader(self, scheme):
531
"""Return a downloader suitable for 'scheme'"""
532
try:
533
return self.downloaders[scheme]
534
except KeyError:
535
pass
536
537
cls = downloader.find(scheme)
538
if cls and config.get(("downloader", cls.scheme), "enabled", True):
539
instance = cls(self)
540
else:
541
instance = None
542
self.log.error("'%s:' URLs are not supported/enabled", scheme)
543
544
if cls and cls.scheme == "http":
545
self.downloaders["http"] = self.downloaders["https"] = instance
546
else:
547
self.downloaders[scheme] = instance
548
return instance
549
550
def initialize(self, kwdict=None):
551
"""Delayed initialization of PathFormat, etc."""
552
extr = self.extractor
553
cfg = extr.config
554
555
pathfmt = self.pathfmt = path.PathFormat(extr)
556
if kwdict:
557
pathfmt.set_directory(kwdict)
558
559
self.sleep = util.build_duration_func(cfg("sleep"))
560
self.fallback = cfg("fallback", True)
561
if not cfg("download", True):
562
# monkey-patch method to do nothing and always return True
563
self.download = pathfmt.fix_extension
564
565
if archive_path := cfg("archive"):
566
archive_table = cfg("archive-table")
567
archive_prefix = cfg("archive-prefix")
568
if archive_prefix is None:
569
archive_prefix = extr.category if archive_table is None else ""
570
571
archive_format = cfg("archive-format")
572
if archive_format is None:
573
archive_format = extr.archive_fmt
574
575
try:
576
self.archive = archive.connect(
577
archive_path,
578
archive_prefix,
579
archive_format,
580
archive_table,
581
cfg("archive-mode"),
582
cfg("archive-pragma"),
583
kwdict,
584
)
585
except Exception as exc:
586
extr.log.warning(
587
"Failed to open download archive at '%s' (%s: %s)",
588
archive_path, exc.__class__.__name__, exc)
589
else:
590
extr.log.debug("Using download archive '%s'", archive_path)
591
592
events = cfg("archive-event")
593
if events is None:
594
self._archive_write_file = True
595
self._archive_write_skip = False
596
else:
597
if isinstance(events, str):
598
events = events.split(",")
599
self._archive_write_file = ("file" in events)
600
self._archive_write_skip = ("skip" in events)
601
602
if skip := cfg("skip", True):
603
self._skipexc = None
604
if skip == "enumerate":
605
pathfmt.check_file = pathfmt._enum_file
606
elif isinstance(skip, str):
607
skip, _, smax = skip.partition(":")
608
if skip == "abort":
609
smax, _, sarg = smax.partition(":")
610
self._skipexc = exception.StopExtraction(sarg or None)
611
elif skip == "terminate":
612
self._skipexc = exception.TerminateExtraction
613
elif skip == "exit":
614
self._skipexc = SystemExit
615
self._skipmax = text.parse_int(smax)
616
617
if skip_filter := cfg("skip-filter"):
618
self._skipftr = util.compile_filter(skip_filter)
619
else:
620
self._skipftr = None
621
else:
622
# monkey-patch methods to always return False
623
pathfmt.exists = lambda x=None: False
624
if self.archive:
625
self.archive.check = pathfmt.exists
626
627
if not cfg("postprocess", True):
628
return
629
630
if postprocessors := extr.config_accumulate("postprocessors"):
631
self.hooks = collections.defaultdict(list)
632
633
pp_log = self.get_logger("postprocessor")
634
pp_conf = config.get((), "postprocessor") or {}
635
pp_opts = cfg("postprocessor-options")
636
pp_list = []
637
638
for pp_dict in postprocessors:
639
if isinstance(pp_dict, str):
640
pp_dict = pp_conf.get(pp_dict) or {"name": pp_dict}
641
elif "type" in pp_dict:
642
pp_type = pp_dict["type"]
643
if pp_type in pp_conf:
644
pp = pp_conf[pp_type].copy()
645
pp.update(pp_dict)
646
pp_dict = pp
647
if "name" not in pp_dict:
648
pp_dict["name"] = pp_type
649
if pp_opts:
650
pp_dict = pp_dict.copy()
651
pp_dict.update(pp_opts)
652
653
clist = pp_dict.get("whitelist")
654
if clist is not None:
655
negate = False
656
else:
657
clist = pp_dict.get("blacklist")
658
negate = True
659
if clist and not util.build_extractor_filter(
660
clist, negate)(extr):
661
continue
662
663
name = pp_dict.get("name", "")
664
if "__init__" not in pp_dict:
665
name, sep, event = name.rpartition("@")
666
if sep:
667
pp_dict["name"] = name
668
if "event" not in pp_dict:
669
pp_dict["event"] = event
670
else:
671
name = event
672
673
name, sep, mode = name.rpartition("/")
674
if sep:
675
pp_dict["name"] = name
676
if "mode" not in pp_dict:
677
pp_dict["mode"] = mode
678
else:
679
name = mode
680
681
pp_dict["__init__"] = None
682
683
pp_cls = postprocessor.find(name)
684
if not pp_cls:
685
pp_log.warning("module '%s' not found", name)
686
continue
687
try:
688
pp_obj = pp_cls(self, pp_dict)
689
except Exception as exc:
690
pp_log.error("'%s' initialization failed: %s: %s",
691
name, exc.__class__.__name__, exc)
692
pp_log.debug("", exc_info=exc)
693
else:
694
pp_list.append(pp_obj)
695
696
if pp_list:
697
extr.log.debug("Active postprocessor modules: %s", pp_list)
698
if "init" in self.hooks:
699
for callback in self.hooks["init"]:
700
callback(pathfmt)
701
702
def register_hooks(self, hooks, options=None):
703
expr = options.get("filter") if options else None
704
705
if expr:
706
condition = util.compile_filter(expr)
707
for hook, callback in hooks.items():
708
self.hooks[hook].append(functools.partial(
709
self._call_hook, callback, condition))
710
else:
711
for hook, callback in hooks.items():
712
self.hooks[hook].append(callback)
713
714
def _call_hook(self, callback, condition, pathfmt):
715
if condition(pathfmt.kwdict):
716
callback(pathfmt)
717
718
def _build_extractor_filter(self):
719
clist = self.extractor.config("whitelist")
720
if clist is not None:
721
negate = False
722
special = None
723
else:
724
clist = self.extractor.config("blacklist")
725
negate = True
726
special = util.SPECIAL_EXTRACTORS
727
if clist is None:
728
clist = (self.extractor.category,)
729
730
return util.build_extractor_filter(clist, negate, special)
731
732
733
class SimulationJob(DownloadJob):
734
"""Simulate the extraction process without downloading anything"""
735
736
def handle_url(self, url, kwdict):
737
ext = kwdict["extension"] or "jpg"
738
kwdict["extension"] = self.pathfmt.extension_map(ext, ext)
739
if self.sleep:
740
self.extractor.sleep(self.sleep(), "download")
741
if self.archive and self._archive_write_skip:
742
self.archive.add(kwdict)
743
self.out.skip(self.pathfmt.build_filename(kwdict))
744
745
def handle_directory(self, kwdict):
746
if not self.pathfmt:
747
self.initialize()
748
749
750
class KeywordJob(Job):
751
"""Print available keywords"""
752
753
def __init__(self, url, parent=None):
754
Job.__init__(self, url, parent)
755
self.private = config.get(("output",), "private")
756
757
def handle_url(self, url, kwdict):
758
stdout_write("\nKeywords for filenames and --filter:\n"
759
"------------------------------------\n")
760
761
if self.metadata_http and url.startswith("http"):
762
kwdict[self.metadata_http] = util.extract_headers(
763
self.extractor.request(url, method="HEAD"))
764
765
self.print_kwdict(kwdict)
766
raise exception.StopExtraction()
767
768
def handle_directory(self, kwdict):
769
stdout_write("Keywords for directory names:\n"
770
"-----------------------------\n")
771
self.print_kwdict(kwdict)
772
773
def handle_queue(self, url, kwdict):
774
extr = None
775
if "_extractor" in kwdict:
776
extr = kwdict["_extractor"].from_url(url)
777
778
if not util.filter_dict(kwdict):
779
self.extractor.log.info(
780
"This extractor only spawns other extractors "
781
"and does not provide any metadata on its own.")
782
783
if extr:
784
self.extractor.log.info(
785
"Showing results for '%s' instead:\n", url)
786
KeywordJob(extr, self).run()
787
else:
788
self.extractor.log.info(
789
"Try 'gallery-dl -K \"%s\"' instead.", url)
790
else:
791
stdout_write("Keywords for --chapter-filter:\n"
792
"------------------------------\n")
793
self.print_kwdict(kwdict)
794
if extr or self.extractor.categorytransfer:
795
stdout_write("\n")
796
KeywordJob(extr or url, self).run()
797
raise exception.StopExtraction()
798
799
def print_kwdict(self, kwdict, prefix="", markers=None):
800
"""Print key-value pairs in 'kwdict' with formatting"""
801
write = sys.stdout.write
802
suffix = "']" if prefix else ""
803
804
markerid = id(kwdict)
805
if markers is None:
806
markers = {markerid}
807
elif markerid in markers:
808
write(f"{prefix[:-2]}\n <circular reference>\n")
809
return # ignore circular reference
810
else:
811
markers.add(markerid)
812
813
for key, value in sorted(kwdict.items()):
814
if key[0] == "_" and not self.private:
815
continue
816
key = prefix + key + suffix
817
818
if isinstance(value, dict):
819
self.print_kwdict(value, key + "['", markers)
820
821
elif isinstance(value, list):
822
if not value:
823
pass
824
elif isinstance(value[0], dict):
825
self.print_kwdict(value[0], key + "[N]['", markers)
826
else:
827
fmt = (" {:>%s} {}\n" % len(str(len(value)))).format
828
write(key + "[N]\n")
829
for idx, val in enumerate(value, 0):
830
write(fmt(idx, val))
831
832
else:
833
# string or number
834
write(f"{key}\n {value}\n")
835
836
markers.remove(markerid)
837
838
839
class UrlJob(Job):
840
"""Print download urls"""
841
maxdepth = 1
842
843
def __init__(self, url, parent=None, depth=1):
844
Job.__init__(self, url, parent)
845
self.depth = depth
846
if depth >= self.maxdepth:
847
self.handle_queue = self.handle_url
848
849
def handle_url(self, url, _):
850
stdout_write(url + "\n")
851
852
def handle_url_fallback(self, url, kwdict):
853
stdout_write(url + "\n")
854
if "_fallback" in kwdict:
855
for url in kwdict["_fallback"]:
856
stdout_write(f"| {url}\n")
857
858
def handle_queue(self, url, kwdict):
859
if cls := kwdict.get("_extractor"):
860
extr = cls.from_url(url)
861
else:
862
extr = extractor.find(url)
863
864
if extr:
865
self.status |= self.__class__(extr, self, self.depth + 1).run()
866
else:
867
self._write_unsupported(url)
868
869
870
class InfoJob(Job):
871
"""Print extractor defaults and settings"""
872
873
def run(self):
874
ex = self.extractor
875
pm = self._print_multi
876
pc = self._print_config
877
878
if ex.basecategory:
879
pm("Category / Subcategory / Basecategory",
880
ex.category, ex.subcategory, ex.basecategory)
881
else:
882
pm("Category / Subcategory", ex.category, ex.subcategory)
883
884
pc("Filename format", "filename", ex.filename_fmt)
885
pc("Directory format", "directory", ex.directory_fmt)
886
pc("Archive format", "archive-format", ex.archive_fmt)
887
pc("Request interval", "sleep-request", ex.request_interval)
888
889
return 0
890
891
def _print_multi(self, title, *values):
892
stdout_write(
893
f"{title}\n {' / '.join(map(util.json_dumps, values))}\n\n")
894
895
def _print_config(self, title, optname, value):
896
optval = self.extractor.config(optname, util.SENTINEL)
897
if optval is not util.SENTINEL:
898
stdout_write(
899
f"{title} (custom):\n {util.json_dumps(optval)}\n"
900
f"{title} (default):\n {util.json_dumps(value)}\n\n")
901
elif value:
902
stdout_write(
903
f"{title} (default):\n {util.json_dumps(value)}\n\n")
904
905
906
class DataJob(Job):
907
"""Collect extractor results and dump them"""
908
resolve = False
909
910
def __init__(self, url, parent=None, file=sys.stdout, ensure_ascii=True,
911
resolve=False):
912
Job.__init__(self, url, parent)
913
self.file = file
914
self.data = []
915
self.data_urls = []
916
self.data_post = []
917
self.data_meta = []
918
self.exception = None
919
self.ascii = config.get(("output",), "ascii", ensure_ascii)
920
self.resolve = 128 if resolve is True else (resolve or self.resolve)
921
922
private = config.get(("output",), "private")
923
self.filter = dict.copy if private else util.filter_dict
924
925
if self.resolve > 0:
926
self.handle_queue = self.handle_queue_resolve
927
928
def run(self):
929
self._init()
930
931
extractor = self.extractor
932
sleep = util.build_duration_func(
933
extractor.config("sleep-extractor"))
934
if sleep:
935
extractor.sleep(sleep(), "extractor")
936
937
# collect data
938
try:
939
for msg in extractor:
940
self.dispatch(msg)
941
except exception.StopExtraction:
942
pass
943
except Exception as exc:
944
self.exception = exc
945
self.data.append((-1, {
946
"error" : exc.__class__.__name__,
947
"message": str(exc),
948
}))
949
except BaseException:
950
pass
951
952
# convert numbers to string
953
if config.get(("output",), "num-to-str", False):
954
for msg in self.data:
955
util.transform_dict(msg[-1], util.number_to_string)
956
957
if self.file:
958
# dump to 'file'
959
try:
960
util.dump_json(self.data, self.file, self.ascii, 2)
961
self.file.flush()
962
except Exception:
963
pass
964
965
return 0
966
967
def handle_url(self, url, kwdict):
968
kwdict = self.filter(kwdict)
969
self.data_urls.append(url)
970
self.data_meta.append(kwdict)
971
self.data.append((Message.Url, url, kwdict))
972
973
def handle_directory(self, kwdict):
974
kwdict = self.filter(kwdict)
975
self.data_post.append(kwdict)
976
self.data.append((Message.Directory, kwdict))
977
978
def handle_queue(self, url, kwdict):
979
kwdict = self.filter(kwdict)
980
self.data_urls.append(url)
981
self.data_meta.append(kwdict)
982
self.data.append((Message.Queue, url, kwdict))
983
984
def handle_queue_resolve(self, url, kwdict):
985
if cls := kwdict.get("_extractor"):
986
extr = cls.from_url(url)
987
else:
988
extr = extractor.find(url)
989
990
if not extr:
991
kwdict = self.filter(kwdict)
992
self.data_urls.append(url)
993
self.data_meta.append(kwdict)
994
return self.data.append((Message.Queue, url, kwdict))
995
996
job = self.__class__(extr, self, None, self.ascii, self.resolve-1)
997
job.data = self.data
998
job.data_urls = self.data_urls
999
job.data_post = self.data_post
1000
job.data_meta = self.data_meta
1001
job.run()
1002
1003