Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
automatic1111
GitHub Repository: automatic1111/stable-diffusion-webui
Path: blob/master/modules/initialize.py
3055 views
1
import importlib
2
import logging
3
import os
4
import sys
5
import warnings
6
from threading import Thread
7
8
from modules.timer import startup_timer
9
10
11
def imports():
12
logging.getLogger("torch.distributed.nn").setLevel(logging.ERROR) # sshh...
13
logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
14
15
import torch # noqa: F401
16
startup_timer.record("import torch")
17
import pytorch_lightning # noqa: F401
18
startup_timer.record("import torch")
19
warnings.filterwarnings(action="ignore", category=DeprecationWarning, module="pytorch_lightning")
20
warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision")
21
22
os.environ.setdefault('GRADIO_ANALYTICS_ENABLED', 'False')
23
import gradio # noqa: F401
24
startup_timer.record("import gradio")
25
26
from modules import paths, timer, import_hook, errors # noqa: F401
27
startup_timer.record("setup paths")
28
29
import ldm.modules.encoders.modules # noqa: F401
30
startup_timer.record("import ldm")
31
32
import sgm.modules.encoders.modules # noqa: F401
33
startup_timer.record("import sgm")
34
35
from modules import shared_init
36
shared_init.initialize()
37
startup_timer.record("initialize shared")
38
39
from modules import processing, gradio_extensons, ui # noqa: F401
40
startup_timer.record("other imports")
41
42
43
def check_versions():
44
from modules.shared_cmd_options import cmd_opts
45
46
if not cmd_opts.skip_version_check:
47
from modules import errors
48
errors.check_versions()
49
50
51
def initialize():
52
from modules import initialize_util
53
initialize_util.fix_torch_version()
54
initialize_util.fix_pytorch_lightning()
55
initialize_util.fix_asyncio_event_loop_policy()
56
initialize_util.validate_tls_options()
57
initialize_util.configure_sigint_handler()
58
initialize_util.configure_opts_onchange()
59
60
from modules import sd_models
61
sd_models.setup_model()
62
startup_timer.record("setup SD model")
63
64
from modules.shared_cmd_options import cmd_opts
65
66
from modules import codeformer_model
67
warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision.transforms.functional_tensor")
68
codeformer_model.setup_model(cmd_opts.codeformer_models_path)
69
startup_timer.record("setup codeformer")
70
71
from modules import gfpgan_model
72
gfpgan_model.setup_model(cmd_opts.gfpgan_models_path)
73
startup_timer.record("setup gfpgan")
74
75
initialize_rest(reload_script_modules=False)
76
77
78
def initialize_rest(*, reload_script_modules=False):
79
"""
80
Called both from initialize() and when reloading the webui.
81
"""
82
from modules.shared_cmd_options import cmd_opts
83
84
from modules import sd_samplers
85
sd_samplers.set_samplers()
86
startup_timer.record("set samplers")
87
88
from modules import extensions
89
extensions.list_extensions()
90
startup_timer.record("list extensions")
91
92
from modules import initialize_util
93
initialize_util.restore_config_state_file()
94
startup_timer.record("restore config state file")
95
96
from modules import shared, upscaler, scripts
97
if cmd_opts.ui_debug_mode:
98
shared.sd_upscalers = upscaler.UpscalerLanczos().scalers
99
scripts.load_scripts()
100
return
101
102
from modules import sd_models
103
sd_models.list_models()
104
startup_timer.record("list SD models")
105
106
from modules import localization
107
localization.list_localizations(cmd_opts.localizations_dir)
108
startup_timer.record("list localizations")
109
110
with startup_timer.subcategory("load scripts"):
111
scripts.load_scripts()
112
113
if reload_script_modules and shared.opts.enable_reloading_ui_scripts:
114
for module in [module for name, module in sys.modules.items() if name.startswith("modules.ui")]:
115
importlib.reload(module)
116
startup_timer.record("reload script modules")
117
118
from modules import modelloader
119
modelloader.load_upscalers()
120
startup_timer.record("load upscalers")
121
122
from modules import sd_vae
123
sd_vae.refresh_vae_list()
124
startup_timer.record("refresh VAE")
125
126
from modules import textual_inversion
127
textual_inversion.textual_inversion.list_textual_inversion_templates()
128
startup_timer.record("refresh textual inversion templates")
129
130
from modules import script_callbacks, sd_hijack_optimizations, sd_hijack
131
script_callbacks.on_list_optimizers(sd_hijack_optimizations.list_optimizers)
132
sd_hijack.list_optimizers()
133
startup_timer.record("scripts list_optimizers")
134
135
from modules import sd_unet
136
sd_unet.list_unets()
137
startup_timer.record("scripts list_unets")
138
139
def load_model():
140
"""
141
Accesses shared.sd_model property to load model.
142
After it's available, if it has been loaded before this access by some extension,
143
its optimization may be None because the list of optimizers has not been filled
144
by that time, so we apply optimization again.
145
"""
146
from modules import devices
147
devices.torch_npu_set_device()
148
149
shared.sd_model # noqa: B018
150
151
if sd_hijack.current_optimizer is None:
152
sd_hijack.apply_optimizations()
153
154
devices.first_time_calculation()
155
if not shared.cmd_opts.skip_load_model_at_start:
156
Thread(target=load_model).start()
157
158
from modules import shared_items
159
shared_items.reload_hypernetworks()
160
startup_timer.record("reload hypernetworks")
161
162
from modules import ui_extra_networks
163
ui_extra_networks.initialize()
164
ui_extra_networks.register_default_pages()
165
166
from modules import extra_networks
167
extra_networks.initialize()
168
extra_networks.register_default_extra_networks()
169
startup_timer.record("initialize extra networks")
170
171