Path: blob/master/extensions-builtin/Lora/networks.py
2447 views
from __future__ import annotations1import gradio as gr2import logging3import os4import re56import lora_patches7import network8import network_lora9import network_glora10import network_hada11import network_ia312import network_lokr13import network_full14import network_norm15import network_oft1617import torch18from typing import Union1920from modules import shared, devices, sd_models, errors, scripts, sd_hijack21import modules.textual_inversion.textual_inversion as textual_inversion22import modules.models.sd3.mmdit2324from lora_logger import logger2526module_types = [27network_lora.ModuleTypeLora(),28network_hada.ModuleTypeHada(),29network_ia3.ModuleTypeIa3(),30network_lokr.ModuleTypeLokr(),31network_full.ModuleTypeFull(),32network_norm.ModuleTypeNorm(),33network_glora.ModuleTypeGLora(),34network_oft.ModuleTypeOFT(),35]363738re_digits = re.compile(r"\d+")39re_x_proj = re.compile(r"(.*)_([qkv]_proj)$")40re_compiled = {}4142suffix_conversion = {43"attentions": {},44"resnets": {45"conv1": "in_layers_2",46"conv2": "out_layers_3",47"norm1": "in_layers_0",48"norm2": "out_layers_0",49"time_emb_proj": "emb_layers_1",50"conv_shortcut": "skip_connection",51}52}535455def convert_diffusers_name_to_compvis(key, is_sd2):56def match(match_list, regex_text):57regex = re_compiled.get(regex_text)58if regex is None:59regex = re.compile(regex_text)60re_compiled[regex_text] = regex6162r = re.match(regex, key)63if not r:64return False6566match_list.clear()67match_list.extend([int(x) if re.match(re_digits, x) else x for x in r.groups()])68return True6970m = []7172if match(m, r"lora_unet_conv_in(.*)"):73return f'diffusion_model_input_blocks_0_0{m[0]}'7475if match(m, r"lora_unet_conv_out(.*)"):76return f'diffusion_model_out_2{m[0]}'7778if match(m, r"lora_unet_time_embedding_linear_(\d+)(.*)"):79return f"diffusion_model_time_embed_{m[0] * 2 - 2}{m[1]}"8081if match(m, r"lora_unet_down_blocks_(\d+)_(attentions|resnets)_(\d+)_(.+)"):82suffix = suffix_conversion.get(m[1], {}).get(m[3], m[3])83return f"diffusion_model_input_blocks_{1 + m[0] * 3 + m[2]}_{1 if m[1] == 'attentions' else 0}_{suffix}"8485if match(m, r"lora_unet_mid_block_(attentions|resnets)_(\d+)_(.+)"):86suffix = suffix_conversion.get(m[0], {}).get(m[2], m[2])87return f"diffusion_model_middle_block_{1 if m[0] == 'attentions' else m[1] * 2}_{suffix}"8889if match(m, r"lora_unet_up_blocks_(\d+)_(attentions|resnets)_(\d+)_(.+)"):90suffix = suffix_conversion.get(m[1], {}).get(m[3], m[3])91return f"diffusion_model_output_blocks_{m[0] * 3 + m[2]}_{1 if m[1] == 'attentions' else 0}_{suffix}"9293if match(m, r"lora_unet_down_blocks_(\d+)_downsamplers_0_conv"):94return f"diffusion_model_input_blocks_{3 + m[0] * 3}_0_op"9596if match(m, r"lora_unet_up_blocks_(\d+)_upsamplers_0_conv"):97return f"diffusion_model_output_blocks_{2 + m[0] * 3}_{2 if m[0]>0 else 1}_conv"9899if match(m, r"lora_te_text_model_encoder_layers_(\d+)_(.+)"):100if is_sd2:101if 'mlp_fc1' in m[1]:102return f"model_transformer_resblocks_{m[0]}_{m[1].replace('mlp_fc1', 'mlp_c_fc')}"103elif 'mlp_fc2' in m[1]:104return f"model_transformer_resblocks_{m[0]}_{m[1].replace('mlp_fc2', 'mlp_c_proj')}"105else:106return f"model_transformer_resblocks_{m[0]}_{m[1].replace('self_attn', 'attn')}"107108return f"transformer_text_model_encoder_layers_{m[0]}_{m[1]}"109110if match(m, r"lora_te2_text_model_encoder_layers_(\d+)_(.+)"):111if 'mlp_fc1' in m[1]:112return f"1_model_transformer_resblocks_{m[0]}_{m[1].replace('mlp_fc1', 'mlp_c_fc')}"113elif 'mlp_fc2' in m[1]:114return f"1_model_transformer_resblocks_{m[0]}_{m[1].replace('mlp_fc2', 'mlp_c_proj')}"115else:116return f"1_model_transformer_resblocks_{m[0]}_{m[1].replace('self_attn', 'attn')}"117118return key119120121def assign_network_names_to_compvis_modules(sd_model):122network_layer_mapping = {}123124if shared.sd_model.is_sdxl:125for i, embedder in enumerate(shared.sd_model.conditioner.embedders):126if not hasattr(embedder, 'wrapped'):127continue128129for name, module in embedder.wrapped.named_modules():130network_name = f'{i}_{name.replace(".", "_")}'131network_layer_mapping[network_name] = module132module.network_layer_name = network_name133else:134cond_stage_model = getattr(shared.sd_model.cond_stage_model, 'wrapped', shared.sd_model.cond_stage_model)135136for name, module in cond_stage_model.named_modules():137network_name = name.replace(".", "_")138network_layer_mapping[network_name] = module139module.network_layer_name = network_name140141for name, module in shared.sd_model.model.named_modules():142network_name = name.replace(".", "_")143network_layer_mapping[network_name] = module144module.network_layer_name = network_name145146sd_model.network_layer_mapping = network_layer_mapping147148149class BundledTIHash(str):150def __init__(self, hash_str):151self.hash = hash_str152153def __str__(self):154return self.hash if shared.opts.lora_bundled_ti_to_infotext else ''155156157def load_network(name, network_on_disk):158net = network.Network(name, network_on_disk)159net.mtime = os.path.getmtime(network_on_disk.filename)160161sd = sd_models.read_state_dict(network_on_disk.filename)162163# this should not be needed but is here as an emergency fix for an unknown error people are experiencing in 1.2.0164if not hasattr(shared.sd_model, 'network_layer_mapping'):165assign_network_names_to_compvis_modules(shared.sd_model)166167keys_failed_to_match = {}168is_sd2 = 'model_transformer_resblocks' in shared.sd_model.network_layer_mapping169if hasattr(shared.sd_model, 'diffusers_weight_map'):170diffusers_weight_map = shared.sd_model.diffusers_weight_map171elif hasattr(shared.sd_model, 'diffusers_weight_mapping'):172diffusers_weight_map = {}173for k, v in shared.sd_model.diffusers_weight_mapping():174diffusers_weight_map[k] = v175shared.sd_model.diffusers_weight_map = diffusers_weight_map176else:177diffusers_weight_map = None178179matched_networks = {}180bundle_embeddings = {}181182for key_network, weight in sd.items():183184if diffusers_weight_map:185key_network_without_network_parts, network_name, network_weight = key_network.rsplit(".", 2)186network_part = network_name + '.' + network_weight187else:188key_network_without_network_parts, _, network_part = key_network.partition(".")189190if key_network_without_network_parts == "bundle_emb":191emb_name, vec_name = network_part.split(".", 1)192emb_dict = bundle_embeddings.get(emb_name, {})193if vec_name.split('.')[0] == 'string_to_param':194_, k2 = vec_name.split('.', 1)195emb_dict['string_to_param'] = {k2: weight}196else:197emb_dict[vec_name] = weight198bundle_embeddings[emb_name] = emb_dict199200if diffusers_weight_map:201key = diffusers_weight_map.get(key_network_without_network_parts, key_network_without_network_parts)202else:203key = convert_diffusers_name_to_compvis(key_network_without_network_parts, is_sd2)204205sd_module = shared.sd_model.network_layer_mapping.get(key, None)206207if sd_module is None:208m = re_x_proj.match(key)209if m:210sd_module = shared.sd_model.network_layer_mapping.get(m.group(1), None)211212# SDXL loras seem to already have correct compvis keys, so only need to replace "lora_unet" with "diffusion_model"213if sd_module is None and "lora_unet" in key_network_without_network_parts:214key = key_network_without_network_parts.replace("lora_unet", "diffusion_model")215sd_module = shared.sd_model.network_layer_mapping.get(key, None)216elif sd_module is None and "lora_te1_text_model" in key_network_without_network_parts:217key = key_network_without_network_parts.replace("lora_te1_text_model", "0_transformer_text_model")218sd_module = shared.sd_model.network_layer_mapping.get(key, None)219220# some SD1 Loras also have correct compvis keys221if sd_module is None:222key = key_network_without_network_parts.replace("lora_te1_text_model", "transformer_text_model")223sd_module = shared.sd_model.network_layer_mapping.get(key, None)224225# kohya_ss OFT module226elif sd_module is None and "oft_unet" in key_network_without_network_parts:227key = key_network_without_network_parts.replace("oft_unet", "diffusion_model")228sd_module = shared.sd_model.network_layer_mapping.get(key, None)229230# KohakuBlueLeaf OFT module231if sd_module is None and "oft_diag" in key:232key = key_network_without_network_parts.replace("lora_unet", "diffusion_model")233key = key_network_without_network_parts.replace("lora_te1_text_model", "0_transformer_text_model")234sd_module = shared.sd_model.network_layer_mapping.get(key, None)235236if sd_module is None:237keys_failed_to_match[key_network] = key238continue239240if key not in matched_networks:241matched_networks[key] = network.NetworkWeights(network_key=key_network, sd_key=key, w={}, sd_module=sd_module)242243matched_networks[key].w[network_part] = weight244245for key, weights in matched_networks.items():246net_module = None247for nettype in module_types:248net_module = nettype.create_module(net, weights)249if net_module is not None:250break251252if net_module is None:253raise AssertionError(f"Could not find a module type (out of {', '.join([x.__class__.__name__ for x in module_types])}) that would accept those keys: {', '.join(weights.w)}")254255net.modules[key] = net_module256257embeddings = {}258for emb_name, data in bundle_embeddings.items():259embedding = textual_inversion.create_embedding_from_data(data, emb_name, filename=network_on_disk.filename + "/" + emb_name)260embedding.loaded = None261embedding.shorthash = BundledTIHash(name)262embeddings[emb_name] = embedding263264net.bundle_embeddings = embeddings265266if keys_failed_to_match:267logging.debug(f"Network {network_on_disk.filename} didn't match keys: {keys_failed_to_match}")268269return net270271272def purge_networks_from_memory():273while len(networks_in_memory) > shared.opts.lora_in_memory_limit and len(networks_in_memory) > 0:274name = next(iter(networks_in_memory))275networks_in_memory.pop(name, None)276277devices.torch_gc()278279280def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=None):281emb_db = sd_hijack.model_hijack.embedding_db282already_loaded = {}283284for net in loaded_networks:285if net.name in names:286already_loaded[net.name] = net287for emb_name, embedding in net.bundle_embeddings.items():288if embedding.loaded:289emb_db.register_embedding_by_name(None, shared.sd_model, emb_name)290291loaded_networks.clear()292293unavailable_networks = []294for name in names:295if name.lower() in forbidden_network_aliases and available_networks.get(name) is None:296unavailable_networks.append(name)297elif available_network_aliases.get(name) is None:298unavailable_networks.append(name)299300if unavailable_networks:301update_available_networks_by_names(unavailable_networks)302303networks_on_disk = [available_networks.get(name, None) if name.lower() in forbidden_network_aliases else available_network_aliases.get(name, None) for name in names]304if any(x is None for x in networks_on_disk):305list_available_networks()306307networks_on_disk = [available_networks.get(name, None) if name.lower() in forbidden_network_aliases else available_network_aliases.get(name, None) for name in names]308309failed_to_load_networks = []310311for i, (network_on_disk, name) in enumerate(zip(networks_on_disk, names)):312net = already_loaded.get(name, None)313314if network_on_disk is not None:315if net is None:316net = networks_in_memory.get(name)317318if net is None or os.path.getmtime(network_on_disk.filename) > net.mtime:319try:320net = load_network(name, network_on_disk)321322networks_in_memory.pop(name, None)323networks_in_memory[name] = net324except Exception as e:325errors.display(e, f"loading network {network_on_disk.filename}")326continue327328net.mentioned_name = name329330network_on_disk.read_hash()331332if net is None:333failed_to_load_networks.append(name)334logging.info(f"Couldn't find network with name {name}")335continue336337net.te_multiplier = te_multipliers[i] if te_multipliers else 1.0338net.unet_multiplier = unet_multipliers[i] if unet_multipliers else 1.0339net.dyn_dim = dyn_dims[i] if dyn_dims else 1.0340loaded_networks.append(net)341342for emb_name, embedding in net.bundle_embeddings.items():343if embedding.loaded is None and emb_name in emb_db.word_embeddings:344logger.warning(345f'Skip bundle embedding: "{emb_name}"'346' as it was already loaded from embeddings folder'347)348continue349350embedding.loaded = False351if emb_db.expected_shape == -1 or emb_db.expected_shape == embedding.shape:352embedding.loaded = True353emb_db.register_embedding(embedding, shared.sd_model)354else:355emb_db.skipped_embeddings[name] = embedding356357if failed_to_load_networks:358lora_not_found_message = f'Lora not found: {", ".join(failed_to_load_networks)}'359sd_hijack.model_hijack.comments.append(lora_not_found_message)360if shared.opts.lora_not_found_warning_console:361print(f'\n{lora_not_found_message}\n')362if shared.opts.lora_not_found_gradio_warning:363gr.Warning(lora_not_found_message)364365purge_networks_from_memory()366367368def allowed_layer_without_weight(layer):369if isinstance(layer, torch.nn.LayerNorm) and not layer.elementwise_affine:370return True371372return False373374375def store_weights_backup(weight):376if weight is None:377return None378379return weight.to(devices.cpu, copy=True)380381382def restore_weights_backup(obj, field, weight):383if weight is None:384setattr(obj, field, None)385return386387getattr(obj, field).copy_(weight)388389390def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention]):391weights_backup = getattr(self, "network_weights_backup", None)392bias_backup = getattr(self, "network_bias_backup", None)393394if weights_backup is None and bias_backup is None:395return396397if weights_backup is not None:398if isinstance(self, torch.nn.MultiheadAttention):399restore_weights_backup(self, 'in_proj_weight', weights_backup[0])400restore_weights_backup(self.out_proj, 'weight', weights_backup[1])401else:402restore_weights_backup(self, 'weight', weights_backup)403404if isinstance(self, torch.nn.MultiheadAttention):405restore_weights_backup(self.out_proj, 'bias', bias_backup)406else:407restore_weights_backup(self, 'bias', bias_backup)408409410def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention]):411"""412Applies the currently selected set of networks to the weights of torch layer self.413If weights already have this particular set of networks applied, does nothing.414If not, restores original weights from backup and alters weights according to networks.415"""416417network_layer_name = getattr(self, 'network_layer_name', None)418if network_layer_name is None:419return420421current_names = getattr(self, "network_current_names", ())422wanted_names = tuple((x.name, x.te_multiplier, x.unet_multiplier, x.dyn_dim) for x in loaded_networks)423424weights_backup = getattr(self, "network_weights_backup", None)425if weights_backup is None and wanted_names != ():426if current_names != () and not allowed_layer_without_weight(self):427raise RuntimeError(f"{network_layer_name} - no backup weights found and current weights are not unchanged")428429if isinstance(self, torch.nn.MultiheadAttention):430weights_backup = (store_weights_backup(self.in_proj_weight), store_weights_backup(self.out_proj.weight))431else:432weights_backup = store_weights_backup(self.weight)433434self.network_weights_backup = weights_backup435436bias_backup = getattr(self, "network_bias_backup", None)437if bias_backup is None and wanted_names != ():438if isinstance(self, torch.nn.MultiheadAttention) and self.out_proj.bias is not None:439bias_backup = store_weights_backup(self.out_proj.bias)440elif getattr(self, 'bias', None) is not None:441bias_backup = store_weights_backup(self.bias)442else:443bias_backup = None444445# Unlike weight which always has value, some modules don't have bias.446# Only report if bias is not None and current bias are not unchanged.447if bias_backup is not None and current_names != ():448raise RuntimeError("no backup bias found and current bias are not unchanged")449450self.network_bias_backup = bias_backup451452if current_names != wanted_names:453network_restore_weights_from_backup(self)454455for net in loaded_networks:456module = net.modules.get(network_layer_name, None)457if module is not None and hasattr(self, 'weight') and not isinstance(module, modules.models.sd3.mmdit.QkvLinear):458try:459with torch.no_grad():460if getattr(self, 'fp16_weight', None) is None:461weight = self.weight462bias = self.bias463else:464weight = self.fp16_weight.clone().to(self.weight.device)465bias = getattr(self, 'fp16_bias', None)466if bias is not None:467bias = bias.clone().to(self.bias.device)468updown, ex_bias = module.calc_updown(weight)469470if len(weight.shape) == 4 and weight.shape[1] == 9:471# inpainting model. zero pad updown to make channel[1] 4 to 9472updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5))473474self.weight.copy_((weight.to(dtype=updown.dtype) + updown).to(dtype=self.weight.dtype))475if ex_bias is not None and hasattr(self, 'bias'):476if self.bias is None:477self.bias = torch.nn.Parameter(ex_bias).to(self.weight.dtype)478else:479self.bias.copy_((bias + ex_bias).to(dtype=self.bias.dtype))480except RuntimeError as e:481logging.debug(f"Network {net.name} layer {network_layer_name}: {e}")482extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1483484continue485486module_q = net.modules.get(network_layer_name + "_q_proj", None)487module_k = net.modules.get(network_layer_name + "_k_proj", None)488module_v = net.modules.get(network_layer_name + "_v_proj", None)489module_out = net.modules.get(network_layer_name + "_out_proj", None)490491if isinstance(self, torch.nn.MultiheadAttention) and module_q and module_k and module_v and module_out:492try:493with torch.no_grad():494# Send "real" orig_weight into MHA's lora module495qw, kw, vw = self.in_proj_weight.chunk(3, 0)496updown_q, _ = module_q.calc_updown(qw)497updown_k, _ = module_k.calc_updown(kw)498updown_v, _ = module_v.calc_updown(vw)499del qw, kw, vw500updown_qkv = torch.vstack([updown_q, updown_k, updown_v])501updown_out, ex_bias = module_out.calc_updown(self.out_proj.weight)502503self.in_proj_weight += updown_qkv504self.out_proj.weight += updown_out505if ex_bias is not None:506if self.out_proj.bias is None:507self.out_proj.bias = torch.nn.Parameter(ex_bias)508else:509self.out_proj.bias += ex_bias510511except RuntimeError as e:512logging.debug(f"Network {net.name} layer {network_layer_name}: {e}")513extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1514515continue516517if isinstance(self, modules.models.sd3.mmdit.QkvLinear) and module_q and module_k and module_v:518try:519with torch.no_grad():520# Send "real" orig_weight into MHA's lora module521qw, kw, vw = self.weight.chunk(3, 0)522updown_q, _ = module_q.calc_updown(qw)523updown_k, _ = module_k.calc_updown(kw)524updown_v, _ = module_v.calc_updown(vw)525del qw, kw, vw526updown_qkv = torch.vstack([updown_q, updown_k, updown_v])527self.weight += updown_qkv528529except RuntimeError as e:530logging.debug(f"Network {net.name} layer {network_layer_name}: {e}")531extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1532533continue534535if module is None:536continue537538logging.debug(f"Network {net.name} layer {network_layer_name}: couldn't find supported operation")539extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1540541self.network_current_names = wanted_names542543544def network_forward(org_module, input, original_forward):545"""546Old way of applying Lora by executing operations during layer's forward.547Stacking many loras this way results in big performance degradation.548"""549550if len(loaded_networks) == 0:551return original_forward(org_module, input)552553input = devices.cond_cast_unet(input)554555network_restore_weights_from_backup(org_module)556network_reset_cached_weight(org_module)557558y = original_forward(org_module, input)559560network_layer_name = getattr(org_module, 'network_layer_name', None)561for lora in loaded_networks:562module = lora.modules.get(network_layer_name, None)563if module is None:564continue565566y = module.forward(input, y)567568return y569570571def network_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]):572self.network_current_names = ()573self.network_weights_backup = None574self.network_bias_backup = None575576577def network_Linear_forward(self, input):578if shared.opts.lora_functional:579return network_forward(self, input, originals.Linear_forward)580581network_apply_weights(self)582583return originals.Linear_forward(self, input)584585586def network_Linear_load_state_dict(self, *args, **kwargs):587network_reset_cached_weight(self)588589return originals.Linear_load_state_dict(self, *args, **kwargs)590591592def network_Conv2d_forward(self, input):593if shared.opts.lora_functional:594return network_forward(self, input, originals.Conv2d_forward)595596network_apply_weights(self)597598return originals.Conv2d_forward(self, input)599600601def network_Conv2d_load_state_dict(self, *args, **kwargs):602network_reset_cached_weight(self)603604return originals.Conv2d_load_state_dict(self, *args, **kwargs)605606607def network_GroupNorm_forward(self, input):608if shared.opts.lora_functional:609return network_forward(self, input, originals.GroupNorm_forward)610611network_apply_weights(self)612613return originals.GroupNorm_forward(self, input)614615616def network_GroupNorm_load_state_dict(self, *args, **kwargs):617network_reset_cached_weight(self)618619return originals.GroupNorm_load_state_dict(self, *args, **kwargs)620621622def network_LayerNorm_forward(self, input):623if shared.opts.lora_functional:624return network_forward(self, input, originals.LayerNorm_forward)625626network_apply_weights(self)627628return originals.LayerNorm_forward(self, input)629630631def network_LayerNorm_load_state_dict(self, *args, **kwargs):632network_reset_cached_weight(self)633634return originals.LayerNorm_load_state_dict(self, *args, **kwargs)635636637def network_MultiheadAttention_forward(self, *args, **kwargs):638network_apply_weights(self)639640return originals.MultiheadAttention_forward(self, *args, **kwargs)641642643def network_MultiheadAttention_load_state_dict(self, *args, **kwargs):644network_reset_cached_weight(self)645646return originals.MultiheadAttention_load_state_dict(self, *args, **kwargs)647648649def process_network_files(names: list[str] | None = None):650candidates = list(shared.walk_files(shared.cmd_opts.lora_dir, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))651candidates += list(shared.walk_files(shared.cmd_opts.lyco_dir_backcompat, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))652for filename in candidates:653if os.path.isdir(filename):654continue655name = os.path.splitext(os.path.basename(filename))[0]656# if names is provided, only load networks with names in the list657if names and name not in names:658continue659try:660entry = network.NetworkOnDisk(name, filename)661except OSError: # should catch FileNotFoundError and PermissionError etc.662errors.report(f"Failed to load network {name} from {filename}", exc_info=True)663continue664665available_networks[name] = entry666667if entry.alias in available_network_aliases:668forbidden_network_aliases[entry.alias.lower()] = 1669670available_network_aliases[name] = entry671available_network_aliases[entry.alias] = entry672673674def update_available_networks_by_names(names: list[str]):675process_network_files(names)676677678def list_available_networks():679available_networks.clear()680available_network_aliases.clear()681forbidden_network_aliases.clear()682available_network_hash_lookup.clear()683forbidden_network_aliases.update({"none": 1, "Addams": 1})684685os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True)686687process_network_files()688689690re_network_name = re.compile(r"(.*)\s*\([0-9a-fA-F]+\)")691692693def infotext_pasted(infotext, params):694if "AddNet Module 1" in [x[1] for x in scripts.scripts_txt2img.infotext_fields]:695return # if the other extension is active, it will handle those fields, no need to do anything696697added = []698699for k in params:700if not k.startswith("AddNet Model "):701continue702703num = k[13:]704705if params.get("AddNet Module " + num) != "LoRA":706continue707708name = params.get("AddNet Model " + num)709if name is None:710continue711712m = re_network_name.match(name)713if m:714name = m.group(1)715716multiplier = params.get("AddNet Weight A " + num, "1.0")717718added.append(f"<lora:{name}:{multiplier}>")719720if added:721params["Prompt"] += "\n" + "".join(added)722723724originals: lora_patches.LoraPatches = None725726extra_network_lora = None727728available_networks = {}729available_network_aliases = {}730loaded_networks = []731loaded_bundle_embeddings = {}732networks_in_memory = {}733available_network_hash_lookup = {}734forbidden_network_aliases = {}735736list_available_networks()737738739