Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
automatic1111
GitHub Repository: automatic1111/stable-diffusion-webui
Path: blob/master/modules/models/diffusion/uni_pc/sampler.py
3078 views
1
"""SAMPLING ONLY."""
2
3
import torch
4
5
from .uni_pc import NoiseScheduleVP, model_wrapper, UniPC
6
from modules import shared, devices
7
8
9
class UniPCSampler(object):
10
def __init__(self, model, **kwargs):
11
super().__init__()
12
self.model = model
13
to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device)
14
self.before_sample = None
15
self.after_sample = None
16
self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod))
17
18
def register_buffer(self, name, attr):
19
if type(attr) == torch.Tensor:
20
if attr.device != devices.device:
21
attr = attr.to(devices.device)
22
setattr(self, name, attr)
23
24
def set_hooks(self, before_sample, after_sample, after_update):
25
self.before_sample = before_sample
26
self.after_sample = after_sample
27
self.after_update = after_update
28
29
@torch.no_grad()
30
def sample(self,
31
S,
32
batch_size,
33
shape,
34
conditioning=None,
35
callback=None,
36
normals_sequence=None,
37
img_callback=None,
38
quantize_x0=False,
39
eta=0.,
40
mask=None,
41
x0=None,
42
temperature=1.,
43
noise_dropout=0.,
44
score_corrector=None,
45
corrector_kwargs=None,
46
verbose=True,
47
x_T=None,
48
log_every_t=100,
49
unconditional_guidance_scale=1.,
50
unconditional_conditioning=None,
51
# this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
52
**kwargs
53
):
54
if conditioning is not None:
55
if isinstance(conditioning, dict):
56
ctmp = conditioning[list(conditioning.keys())[0]]
57
while isinstance(ctmp, list):
58
ctmp = ctmp[0]
59
cbs = ctmp.shape[0]
60
if cbs != batch_size:
61
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
62
63
elif isinstance(conditioning, list):
64
for ctmp in conditioning:
65
if ctmp.shape[0] != batch_size:
66
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
67
68
else:
69
if conditioning.shape[0] != batch_size:
70
print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
71
72
# sampling
73
C, H, W = shape
74
size = (batch_size, C, H, W)
75
# print(f'Data shape for UniPC sampling is {size}')
76
77
device = self.model.betas.device
78
if x_T is None:
79
img = torch.randn(size, device=device)
80
else:
81
img = x_T
82
83
ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod)
84
85
# SD 1.X is "noise", SD 2.X is "v"
86
model_type = "v" if self.model.parameterization == "v" else "noise"
87
88
model_fn = model_wrapper(
89
lambda x, t, c: self.model.apply_model(x, t, c),
90
ns,
91
model_type=model_type,
92
guidance_type="classifier-free",
93
#condition=conditioning,
94
#unconditional_condition=unconditional_conditioning,
95
guidance_scale=unconditional_guidance_scale,
96
)
97
98
uni_pc = UniPC(model_fn, ns, predict_x0=True, thresholding=False, variant=shared.opts.uni_pc_variant, condition=conditioning, unconditional_condition=unconditional_conditioning, before_sample=self.before_sample, after_sample=self.after_sample, after_update=self.after_update)
99
x = uni_pc.sample(img, steps=S, skip_type=shared.opts.uni_pc_skip_type, method="multistep", order=shared.opts.uni_pc_order, lower_order_final=shared.opts.uni_pc_lower_order_final)
100
101
return x.to(device), None
102
103