Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
AUTOMATIC1111
GitHub Repository: AUTOMATIC1111/stable-diffusion-webui
Path: blob/master/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py
2447 views
1
# This script is copied from the compvis/stable-diffusion repo (aka the SD V1 repo)
2
# Original filename: ldm/models/diffusion/ddpm.py
3
# The purpose to reinstate the old DDPM logic which works with VQ, whereas the V2 one doesn't
4
# Some models such as LDSR require VQ to work correctly
5
# The classes are suffixed with "V1" and added back to the "ldm.models.diffusion.ddpm" module
6
7
import torch
8
import torch.nn as nn
9
import numpy as np
10
import pytorch_lightning as pl
11
from torch.optim.lr_scheduler import LambdaLR
12
from einops import rearrange, repeat
13
from contextlib import contextmanager
14
from functools import partial
15
from tqdm import tqdm
16
from torchvision.utils import make_grid
17
from pytorch_lightning.utilities.distributed import rank_zero_only
18
19
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
20
from ldm.modules.ema import LitEma
21
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
22
from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
23
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
24
from ldm.models.diffusion.ddim import DDIMSampler
25
26
import ldm.models.diffusion.ddpm
27
28
__conditioning_keys__ = {'concat': 'c_concat',
29
'crossattn': 'c_crossattn',
30
'adm': 'y'}
31
32
33
def disabled_train(self, mode=True):
34
"""Overwrite model.train with this function to make sure train/eval mode
35
does not change anymore."""
36
return self
37
38
39
def uniform_on_device(r1, r2, shape, device):
40
return (r1 - r2) * torch.rand(*shape, device=device) + r2
41
42
43
class DDPMV1(pl.LightningModule):
44
# classic DDPM with Gaussian diffusion, in image space
45
def __init__(self,
46
unet_config,
47
timesteps=1000,
48
beta_schedule="linear",
49
loss_type="l2",
50
ckpt_path=None,
51
ignore_keys=None,
52
load_only_unet=False,
53
monitor="val/loss",
54
use_ema=True,
55
first_stage_key="image",
56
image_size=256,
57
channels=3,
58
log_every_t=100,
59
clip_denoised=True,
60
linear_start=1e-4,
61
linear_end=2e-2,
62
cosine_s=8e-3,
63
given_betas=None,
64
original_elbo_weight=0.,
65
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
66
l_simple_weight=1.,
67
conditioning_key=None,
68
parameterization="eps", # all assuming fixed variance schedules
69
scheduler_config=None,
70
use_positional_encodings=False,
71
learn_logvar=False,
72
logvar_init=0.,
73
):
74
super().__init__()
75
assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"'
76
self.parameterization = parameterization
77
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
78
self.cond_stage_model = None
79
self.clip_denoised = clip_denoised
80
self.log_every_t = log_every_t
81
self.first_stage_key = first_stage_key
82
self.image_size = image_size # try conv?
83
self.channels = channels
84
self.use_positional_encodings = use_positional_encodings
85
self.model = DiffusionWrapperV1(unet_config, conditioning_key)
86
count_params(self.model, verbose=True)
87
self.use_ema = use_ema
88
if self.use_ema:
89
self.model_ema = LitEma(self.model)
90
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
91
92
self.use_scheduler = scheduler_config is not None
93
if self.use_scheduler:
94
self.scheduler_config = scheduler_config
95
96
self.v_posterior = v_posterior
97
self.original_elbo_weight = original_elbo_weight
98
self.l_simple_weight = l_simple_weight
99
100
if monitor is not None:
101
self.monitor = monitor
102
if ckpt_path is not None:
103
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys or [], only_model=load_only_unet)
104
105
self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
106
linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
107
108
self.loss_type = loss_type
109
110
self.learn_logvar = learn_logvar
111
self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
112
if self.learn_logvar:
113
self.logvar = nn.Parameter(self.logvar, requires_grad=True)
114
115
116
def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
117
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
118
if exists(given_betas):
119
betas = given_betas
120
else:
121
betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
122
cosine_s=cosine_s)
123
alphas = 1. - betas
124
alphas_cumprod = np.cumprod(alphas, axis=0)
125
alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
126
127
timesteps, = betas.shape
128
self.num_timesteps = int(timesteps)
129
self.linear_start = linear_start
130
self.linear_end = linear_end
131
assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
132
133
to_torch = partial(torch.tensor, dtype=torch.float32)
134
135
self.register_buffer('betas', to_torch(betas))
136
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
137
self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
138
139
# calculations for diffusion q(x_t | x_{t-1}) and others
140
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
141
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
142
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
143
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
144
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
145
146
# calculations for posterior q(x_{t-1} | x_t, x_0)
147
posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / (
148
1. - alphas_cumprod) + self.v_posterior * betas
149
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
150
self.register_buffer('posterior_variance', to_torch(posterior_variance))
151
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
152
self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
153
self.register_buffer('posterior_mean_coef1', to_torch(
154
betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
155
self.register_buffer('posterior_mean_coef2', to_torch(
156
(1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
157
158
if self.parameterization == "eps":
159
lvlb_weights = self.betas ** 2 / (
160
2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))
161
elif self.parameterization == "x0":
162
lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod))
163
else:
164
raise NotImplementedError("mu not supported")
165
# TODO how to choose this term
166
lvlb_weights[0] = lvlb_weights[1]
167
self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
168
assert not torch.isnan(self.lvlb_weights).all()
169
170
@contextmanager
171
def ema_scope(self, context=None):
172
if self.use_ema:
173
self.model_ema.store(self.model.parameters())
174
self.model_ema.copy_to(self.model)
175
if context is not None:
176
print(f"{context}: Switched to EMA weights")
177
try:
178
yield None
179
finally:
180
if self.use_ema:
181
self.model_ema.restore(self.model.parameters())
182
if context is not None:
183
print(f"{context}: Restored training weights")
184
185
def init_from_ckpt(self, path, ignore_keys=None, only_model=False):
186
sd = torch.load(path, map_location="cpu")
187
if "state_dict" in list(sd.keys()):
188
sd = sd["state_dict"]
189
keys = list(sd.keys())
190
for k in keys:
191
for ik in ignore_keys or []:
192
if k.startswith(ik):
193
print("Deleting key {} from state_dict.".format(k))
194
del sd[k]
195
missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
196
sd, strict=False)
197
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
198
if missing:
199
print(f"Missing Keys: {missing}")
200
if unexpected:
201
print(f"Unexpected Keys: {unexpected}")
202
203
def q_mean_variance(self, x_start, t):
204
"""
205
Get the distribution q(x_t | x_0).
206
:param x_start: the [N x C x ...] tensor of noiseless inputs.
207
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
208
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
209
"""
210
mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
211
variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
212
log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
213
return mean, variance, log_variance
214
215
def predict_start_from_noise(self, x_t, t, noise):
216
return (
217
extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
218
extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
219
)
220
221
def q_posterior(self, x_start, x_t, t):
222
posterior_mean = (
223
extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
224
extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
225
)
226
posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
227
posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
228
return posterior_mean, posterior_variance, posterior_log_variance_clipped
229
230
def p_mean_variance(self, x, t, clip_denoised: bool):
231
model_out = self.model(x, t)
232
if self.parameterization == "eps":
233
x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
234
elif self.parameterization == "x0":
235
x_recon = model_out
236
if clip_denoised:
237
x_recon.clamp_(-1., 1.)
238
239
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
240
return model_mean, posterior_variance, posterior_log_variance
241
242
@torch.no_grad()
243
def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
244
b, *_, device = *x.shape, x.device
245
model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
246
noise = noise_like(x.shape, device, repeat_noise)
247
# no noise when t == 0
248
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
249
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
250
251
@torch.no_grad()
252
def p_sample_loop(self, shape, return_intermediates=False):
253
device = self.betas.device
254
b = shape[0]
255
img = torch.randn(shape, device=device)
256
intermediates = [img]
257
for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps):
258
img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long),
259
clip_denoised=self.clip_denoised)
260
if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
261
intermediates.append(img)
262
if return_intermediates:
263
return img, intermediates
264
return img
265
266
@torch.no_grad()
267
def sample(self, batch_size=16, return_intermediates=False):
268
image_size = self.image_size
269
channels = self.channels
270
return self.p_sample_loop((batch_size, channels, image_size, image_size),
271
return_intermediates=return_intermediates)
272
273
def q_sample(self, x_start, t, noise=None):
274
noise = default(noise, lambda: torch.randn_like(x_start))
275
return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
276
extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
277
278
def get_loss(self, pred, target, mean=True):
279
if self.loss_type == 'l1':
280
loss = (target - pred).abs()
281
if mean:
282
loss = loss.mean()
283
elif self.loss_type == 'l2':
284
if mean:
285
loss = torch.nn.functional.mse_loss(target, pred)
286
else:
287
loss = torch.nn.functional.mse_loss(target, pred, reduction='none')
288
else:
289
raise NotImplementedError("unknown loss type '{loss_type}'")
290
291
return loss
292
293
def p_losses(self, x_start, t, noise=None):
294
noise = default(noise, lambda: torch.randn_like(x_start))
295
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
296
model_out = self.model(x_noisy, t)
297
298
loss_dict = {}
299
if self.parameterization == "eps":
300
target = noise
301
elif self.parameterization == "x0":
302
target = x_start
303
else:
304
raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported")
305
306
loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
307
308
log_prefix = 'train' if self.training else 'val'
309
310
loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()})
311
loss_simple = loss.mean() * self.l_simple_weight
312
313
loss_vlb = (self.lvlb_weights[t] * loss).mean()
314
loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb})
315
316
loss = loss_simple + self.original_elbo_weight * loss_vlb
317
318
loss_dict.update({f'{log_prefix}/loss': loss})
319
320
return loss, loss_dict
321
322
def forward(self, x, *args, **kwargs):
323
# b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
324
# assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
325
t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
326
return self.p_losses(x, t, *args, **kwargs)
327
328
def get_input(self, batch, k):
329
x = batch[k]
330
if len(x.shape) == 3:
331
x = x[..., None]
332
x = rearrange(x, 'b h w c -> b c h w')
333
x = x.to(memory_format=torch.contiguous_format).float()
334
return x
335
336
def shared_step(self, batch):
337
x = self.get_input(batch, self.first_stage_key)
338
loss, loss_dict = self(x)
339
return loss, loss_dict
340
341
def training_step(self, batch, batch_idx):
342
loss, loss_dict = self.shared_step(batch)
343
344
self.log_dict(loss_dict, prog_bar=True,
345
logger=True, on_step=True, on_epoch=True)
346
347
self.log("global_step", self.global_step,
348
prog_bar=True, logger=True, on_step=True, on_epoch=False)
349
350
if self.use_scheduler:
351
lr = self.optimizers().param_groups[0]['lr']
352
self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False)
353
354
return loss
355
356
@torch.no_grad()
357
def validation_step(self, batch, batch_idx):
358
_, loss_dict_no_ema = self.shared_step(batch)
359
with self.ema_scope():
360
_, loss_dict_ema = self.shared_step(batch)
361
loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema}
362
self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
363
self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
364
365
def on_train_batch_end(self, *args, **kwargs):
366
if self.use_ema:
367
self.model_ema(self.model)
368
369
def _get_rows_from_list(self, samples):
370
n_imgs_per_row = len(samples)
371
denoise_grid = rearrange(samples, 'n b c h w -> b n c h w')
372
denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
373
denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
374
return denoise_grid
375
376
@torch.no_grad()
377
def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
378
log = {}
379
x = self.get_input(batch, self.first_stage_key)
380
N = min(x.shape[0], N)
381
n_row = min(x.shape[0], n_row)
382
x = x.to(self.device)[:N]
383
log["inputs"] = x
384
385
# get diffusion row
386
diffusion_row = []
387
x_start = x[:n_row]
388
389
for t in range(self.num_timesteps):
390
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
391
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
392
t = t.to(self.device).long()
393
noise = torch.randn_like(x_start)
394
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
395
diffusion_row.append(x_noisy)
396
397
log["diffusion_row"] = self._get_rows_from_list(diffusion_row)
398
399
if sample:
400
# get denoise row
401
with self.ema_scope("Plotting"):
402
samples, denoise_row = self.sample(batch_size=N, return_intermediates=True)
403
404
log["samples"] = samples
405
log["denoise_row"] = self._get_rows_from_list(denoise_row)
406
407
if return_keys:
408
if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
409
return log
410
else:
411
return {key: log[key] for key in return_keys}
412
return log
413
414
def configure_optimizers(self):
415
lr = self.learning_rate
416
params = list(self.model.parameters())
417
if self.learn_logvar:
418
params = params + [self.logvar]
419
opt = torch.optim.AdamW(params, lr=lr)
420
return opt
421
422
423
class LatentDiffusionV1(DDPMV1):
424
"""main class"""
425
def __init__(self,
426
first_stage_config,
427
cond_stage_config,
428
num_timesteps_cond=None,
429
cond_stage_key="image",
430
cond_stage_trainable=False,
431
concat_mode=True,
432
cond_stage_forward=None,
433
conditioning_key=None,
434
scale_factor=1.0,
435
scale_by_std=False,
436
*args, **kwargs):
437
self.num_timesteps_cond = default(num_timesteps_cond, 1)
438
self.scale_by_std = scale_by_std
439
assert self.num_timesteps_cond <= kwargs['timesteps']
440
# for backwards compatibility after implementation of DiffusionWrapper
441
if conditioning_key is None:
442
conditioning_key = 'concat' if concat_mode else 'crossattn'
443
if cond_stage_config == '__is_unconditional__':
444
conditioning_key = None
445
ckpt_path = kwargs.pop("ckpt_path", None)
446
ignore_keys = kwargs.pop("ignore_keys", [])
447
super().__init__(*args, conditioning_key=conditioning_key, **kwargs)
448
self.concat_mode = concat_mode
449
self.cond_stage_trainable = cond_stage_trainable
450
self.cond_stage_key = cond_stage_key
451
try:
452
self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
453
except Exception:
454
self.num_downs = 0
455
if not scale_by_std:
456
self.scale_factor = scale_factor
457
else:
458
self.register_buffer('scale_factor', torch.tensor(scale_factor))
459
self.instantiate_first_stage(first_stage_config)
460
self.instantiate_cond_stage(cond_stage_config)
461
self.cond_stage_forward = cond_stage_forward
462
self.clip_denoised = False
463
self.bbox_tokenizer = None
464
465
self.restarted_from_ckpt = False
466
if ckpt_path is not None:
467
self.init_from_ckpt(ckpt_path, ignore_keys)
468
self.restarted_from_ckpt = True
469
470
def make_cond_schedule(self, ):
471
self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
472
ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
473
self.cond_ids[:self.num_timesteps_cond] = ids
474
475
@rank_zero_only
476
@torch.no_grad()
477
def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
478
# only for very first batch
479
if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:
480
assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'
481
# set rescale weight to 1./std of encodings
482
print("### USING STD-RESCALING ###")
483
x = super().get_input(batch, self.first_stage_key)
484
x = x.to(self.device)
485
encoder_posterior = self.encode_first_stage(x)
486
z = self.get_first_stage_encoding(encoder_posterior).detach()
487
del self.scale_factor
488
self.register_buffer('scale_factor', 1. / z.flatten().std())
489
print(f"setting self.scale_factor to {self.scale_factor}")
490
print("### USING STD-RESCALING ###")
491
492
def register_schedule(self,
493
given_betas=None, beta_schedule="linear", timesteps=1000,
494
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
495
super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)
496
497
self.shorten_cond_schedule = self.num_timesteps_cond > 1
498
if self.shorten_cond_schedule:
499
self.make_cond_schedule()
500
501
def instantiate_first_stage(self, config):
502
model = instantiate_from_config(config)
503
self.first_stage_model = model.eval()
504
self.first_stage_model.train = disabled_train
505
for param in self.first_stage_model.parameters():
506
param.requires_grad = False
507
508
def instantiate_cond_stage(self, config):
509
if not self.cond_stage_trainable:
510
if config == "__is_first_stage__":
511
print("Using first stage also as cond stage.")
512
self.cond_stage_model = self.first_stage_model
513
elif config == "__is_unconditional__":
514
print(f"Training {self.__class__.__name__} as an unconditional model.")
515
self.cond_stage_model = None
516
# self.be_unconditional = True
517
else:
518
model = instantiate_from_config(config)
519
self.cond_stage_model = model.eval()
520
self.cond_stage_model.train = disabled_train
521
for param in self.cond_stage_model.parameters():
522
param.requires_grad = False
523
else:
524
assert config != '__is_first_stage__'
525
assert config != '__is_unconditional__'
526
model = instantiate_from_config(config)
527
self.cond_stage_model = model
528
529
def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
530
denoise_row = []
531
for zd in tqdm(samples, desc=desc):
532
denoise_row.append(self.decode_first_stage(zd.to(self.device),
533
force_not_quantize=force_no_decoder_quantization))
534
n_imgs_per_row = len(denoise_row)
535
denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
536
denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
537
denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
538
denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
539
return denoise_grid
540
541
def get_first_stage_encoding(self, encoder_posterior):
542
if isinstance(encoder_posterior, DiagonalGaussianDistribution):
543
z = encoder_posterior.sample()
544
elif isinstance(encoder_posterior, torch.Tensor):
545
z = encoder_posterior
546
else:
547
raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
548
return self.scale_factor * z
549
550
def get_learned_conditioning(self, c):
551
if self.cond_stage_forward is None:
552
if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
553
c = self.cond_stage_model.encode(c)
554
if isinstance(c, DiagonalGaussianDistribution):
555
c = c.mode()
556
else:
557
c = self.cond_stage_model(c)
558
else:
559
assert hasattr(self.cond_stage_model, self.cond_stage_forward)
560
c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
561
return c
562
563
def meshgrid(self, h, w):
564
y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
565
x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
566
567
arr = torch.cat([y, x], dim=-1)
568
return arr
569
570
def delta_border(self, h, w):
571
"""
572
:param h: height
573
:param w: width
574
:return: normalized distance to image border,
575
with min distance = 0 at border and max dist = 0.5 at image center
576
"""
577
lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
578
arr = self.meshgrid(h, w) / lower_right_corner
579
dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
580
dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
581
edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
582
return edge_dist
583
584
def get_weighting(self, h, w, Ly, Lx, device):
585
weighting = self.delta_border(h, w)
586
weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
587
self.split_input_params["clip_max_weight"], )
588
weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
589
590
if self.split_input_params["tie_braker"]:
591
L_weighting = self.delta_border(Ly, Lx)
592
L_weighting = torch.clip(L_weighting,
593
self.split_input_params["clip_min_tie_weight"],
594
self.split_input_params["clip_max_tie_weight"])
595
596
L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
597
weighting = weighting * L_weighting
598
return weighting
599
600
def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code
601
"""
602
:param x: img of size (bs, c, h, w)
603
:return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
604
"""
605
bs, nc, h, w = x.shape
606
607
# number of crops in image
608
Ly = (h - kernel_size[0]) // stride[0] + 1
609
Lx = (w - kernel_size[1]) // stride[1] + 1
610
611
if uf == 1 and df == 1:
612
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
613
unfold = torch.nn.Unfold(**fold_params)
614
615
fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
616
617
weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
618
normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap
619
weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
620
621
elif uf > 1 and df == 1:
622
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
623
unfold = torch.nn.Unfold(**fold_params)
624
625
fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
626
dilation=1, padding=0,
627
stride=(stride[0] * uf, stride[1] * uf))
628
fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
629
630
weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
631
normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap
632
weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
633
634
elif df > 1 and uf == 1:
635
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
636
unfold = torch.nn.Unfold(**fold_params)
637
638
fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
639
dilation=1, padding=0,
640
stride=(stride[0] // df, stride[1] // df))
641
fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
642
643
weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
644
normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap
645
weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
646
647
else:
648
raise NotImplementedError
649
650
return fold, unfold, normalization, weighting
651
652
@torch.no_grad()
653
def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
654
cond_key=None, return_original_cond=False, bs=None):
655
x = super().get_input(batch, k)
656
if bs is not None:
657
x = x[:bs]
658
x = x.to(self.device)
659
encoder_posterior = self.encode_first_stage(x)
660
z = self.get_first_stage_encoding(encoder_posterior).detach()
661
662
if self.model.conditioning_key is not None:
663
if cond_key is None:
664
cond_key = self.cond_stage_key
665
if cond_key != self.first_stage_key:
666
if cond_key in ['caption', 'coordinates_bbox']:
667
xc = batch[cond_key]
668
elif cond_key == 'class_label':
669
xc = batch
670
else:
671
xc = super().get_input(batch, cond_key).to(self.device)
672
else:
673
xc = x
674
if not self.cond_stage_trainable or force_c_encode:
675
if isinstance(xc, dict) or isinstance(xc, list):
676
# import pudb; pudb.set_trace()
677
c = self.get_learned_conditioning(xc)
678
else:
679
c = self.get_learned_conditioning(xc.to(self.device))
680
else:
681
c = xc
682
if bs is not None:
683
c = c[:bs]
684
685
if self.use_positional_encodings:
686
pos_x, pos_y = self.compute_latent_shifts(batch)
687
ckey = __conditioning_keys__[self.model.conditioning_key]
688
c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}
689
690
else:
691
c = None
692
xc = None
693
if self.use_positional_encodings:
694
pos_x, pos_y = self.compute_latent_shifts(batch)
695
c = {'pos_x': pos_x, 'pos_y': pos_y}
696
out = [z, c]
697
if return_first_stage_outputs:
698
xrec = self.decode_first_stage(z)
699
out.extend([x, xrec])
700
if return_original_cond:
701
out.append(xc)
702
return out
703
704
@torch.no_grad()
705
def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
706
if predict_cids:
707
if z.dim() == 4:
708
z = torch.argmax(z.exp(), dim=1).long()
709
z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
710
z = rearrange(z, 'b h w c -> b c h w').contiguous()
711
712
z = 1. / self.scale_factor * z
713
714
if hasattr(self, "split_input_params"):
715
if self.split_input_params["patch_distributed_vq"]:
716
ks = self.split_input_params["ks"] # eg. (128, 128)
717
stride = self.split_input_params["stride"] # eg. (64, 64)
718
uf = self.split_input_params["vqf"]
719
bs, nc, h, w = z.shape
720
if ks[0] > h or ks[1] > w:
721
ks = (min(ks[0], h), min(ks[1], w))
722
print("reducing Kernel")
723
724
if stride[0] > h or stride[1] > w:
725
stride = (min(stride[0], h), min(stride[1], w))
726
print("reducing stride")
727
728
fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
729
730
z = unfold(z) # (bn, nc * prod(**ks), L)
731
# 1. Reshape to img shape
732
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
733
734
# 2. apply model loop over last dim
735
if isinstance(self.first_stage_model, VQModelInterface):
736
output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
737
force_not_quantize=predict_cids or force_not_quantize)
738
for i in range(z.shape[-1])]
739
else:
740
741
output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
742
for i in range(z.shape[-1])]
743
744
o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
745
o = o * weighting
746
# Reverse 1. reshape to img shape
747
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
748
# stitch crops together
749
decoded = fold(o)
750
decoded = decoded / normalization # norm is shape (1, 1, h, w)
751
return decoded
752
else:
753
if isinstance(self.first_stage_model, VQModelInterface):
754
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
755
else:
756
return self.first_stage_model.decode(z)
757
758
else:
759
if isinstance(self.first_stage_model, VQModelInterface):
760
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
761
else:
762
return self.first_stage_model.decode(z)
763
764
# same as above but without decorator
765
def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
766
if predict_cids:
767
if z.dim() == 4:
768
z = torch.argmax(z.exp(), dim=1).long()
769
z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
770
z = rearrange(z, 'b h w c -> b c h w').contiguous()
771
772
z = 1. / self.scale_factor * z
773
774
if hasattr(self, "split_input_params"):
775
if self.split_input_params["patch_distributed_vq"]:
776
ks = self.split_input_params["ks"] # eg. (128, 128)
777
stride = self.split_input_params["stride"] # eg. (64, 64)
778
uf = self.split_input_params["vqf"]
779
bs, nc, h, w = z.shape
780
if ks[0] > h or ks[1] > w:
781
ks = (min(ks[0], h), min(ks[1], w))
782
print("reducing Kernel")
783
784
if stride[0] > h or stride[1] > w:
785
stride = (min(stride[0], h), min(stride[1], w))
786
print("reducing stride")
787
788
fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
789
790
z = unfold(z) # (bn, nc * prod(**ks), L)
791
# 1. Reshape to img shape
792
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
793
794
# 2. apply model loop over last dim
795
if isinstance(self.first_stage_model, VQModelInterface):
796
output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
797
force_not_quantize=predict_cids or force_not_quantize)
798
for i in range(z.shape[-1])]
799
else:
800
801
output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
802
for i in range(z.shape[-1])]
803
804
o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
805
o = o * weighting
806
# Reverse 1. reshape to img shape
807
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
808
# stitch crops together
809
decoded = fold(o)
810
decoded = decoded / normalization # norm is shape (1, 1, h, w)
811
return decoded
812
else:
813
if isinstance(self.first_stage_model, VQModelInterface):
814
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
815
else:
816
return self.first_stage_model.decode(z)
817
818
else:
819
if isinstance(self.first_stage_model, VQModelInterface):
820
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
821
else:
822
return self.first_stage_model.decode(z)
823
824
@torch.no_grad()
825
def encode_first_stage(self, x):
826
if hasattr(self, "split_input_params"):
827
if self.split_input_params["patch_distributed_vq"]:
828
ks = self.split_input_params["ks"] # eg. (128, 128)
829
stride = self.split_input_params["stride"] # eg. (64, 64)
830
df = self.split_input_params["vqf"]
831
self.split_input_params['original_image_size'] = x.shape[-2:]
832
bs, nc, h, w = x.shape
833
if ks[0] > h or ks[1] > w:
834
ks = (min(ks[0], h), min(ks[1], w))
835
print("reducing Kernel")
836
837
if stride[0] > h or stride[1] > w:
838
stride = (min(stride[0], h), min(stride[1], w))
839
print("reducing stride")
840
841
fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df)
842
z = unfold(x) # (bn, nc * prod(**ks), L)
843
# Reshape to img shape
844
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
845
846
output_list = [self.first_stage_model.encode(z[:, :, :, :, i])
847
for i in range(z.shape[-1])]
848
849
o = torch.stack(output_list, axis=-1)
850
o = o * weighting
851
852
# Reverse reshape to img shape
853
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
854
# stitch crops together
855
decoded = fold(o)
856
decoded = decoded / normalization
857
return decoded
858
859
else:
860
return self.first_stage_model.encode(x)
861
else:
862
return self.first_stage_model.encode(x)
863
864
def shared_step(self, batch, **kwargs):
865
x, c = self.get_input(batch, self.first_stage_key)
866
loss = self(x, c)
867
return loss
868
869
def forward(self, x, c, *args, **kwargs):
870
t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
871
if self.model.conditioning_key is not None:
872
assert c is not None
873
if self.cond_stage_trainable:
874
c = self.get_learned_conditioning(c)
875
if self.shorten_cond_schedule: # TODO: drop this option
876
tc = self.cond_ids[t].to(self.device)
877
c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
878
return self.p_losses(x, c, t, *args, **kwargs)
879
880
def apply_model(self, x_noisy, t, cond, return_ids=False):
881
882
if isinstance(cond, dict):
883
# hybrid case, cond is expected to be a dict
884
pass
885
else:
886
if not isinstance(cond, list):
887
cond = [cond]
888
key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
889
cond = {key: cond}
890
891
if hasattr(self, "split_input_params"):
892
assert len(cond) == 1 # todo can only deal with one conditioning atm
893
assert not return_ids
894
ks = self.split_input_params["ks"] # eg. (128, 128)
895
stride = self.split_input_params["stride"] # eg. (64, 64)
896
897
h, w = x_noisy.shape[-2:]
898
899
fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride)
900
901
z = unfold(x_noisy) # (bn, nc * prod(**ks), L)
902
# Reshape to img shape
903
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
904
z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])]
905
906
if self.cond_stage_key in ["image", "LR_image", "segmentation",
907
'bbox_img'] and self.model.conditioning_key: # todo check for completeness
908
c_key = next(iter(cond.keys())) # get key
909
c = next(iter(cond.values())) # get value
910
assert (len(c) == 1) # todo extend to list with more than one elem
911
c = c[0] # get element
912
913
c = unfold(c)
914
c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L )
915
916
cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]
917
918
elif self.cond_stage_key == 'coordinates_bbox':
919
assert 'original_image_size' in self.split_input_params, 'BoundingBoxRescaling is missing original_image_size'
920
921
# assuming padding of unfold is always 0 and its dilation is always 1
922
n_patches_per_row = int((w - ks[0]) / stride[0] + 1)
923
full_img_h, full_img_w = self.split_input_params['original_image_size']
924
# as we are operating on latents, we need the factor from the original image size to the
925
# spatial latent size to properly rescale the crops for regenerating the bbox annotations
926
num_downs = self.first_stage_model.encoder.num_resolutions - 1
927
rescale_latent = 2 ** (num_downs)
928
929
# get top left positions of patches as conforming for the bbbox tokenizer, therefore we
930
# need to rescale the tl patch coordinates to be in between (0,1)
931
tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w,
932
rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h)
933
for patch_nr in range(z.shape[-1])]
934
935
# patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w)
936
patch_limits = [(x_tl, y_tl,
937
rescale_latent * ks[0] / full_img_w,
938
rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates]
939
# patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates]
940
941
# tokenize crop coordinates for the bounding boxes of the respective patches
942
patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device)
943
for bbox in patch_limits] # list of length l with tensors of shape (1, 2)
944
print(patch_limits_tknzd[0].shape)
945
# cut tknzd crop position from conditioning
946
assert isinstance(cond, dict), 'cond must be dict to be fed into model'
947
cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device)
948
print(cut_cond.shape)
949
950
adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd])
951
adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n')
952
print(adapted_cond.shape)
953
adapted_cond = self.get_learned_conditioning(adapted_cond)
954
print(adapted_cond.shape)
955
adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1])
956
print(adapted_cond.shape)
957
958
cond_list = [{'c_crossattn': [e]} for e in adapted_cond]
959
960
else:
961
cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient
962
963
# apply model by loop over crops
964
output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])]
965
assert not isinstance(output_list[0],
966
tuple) # todo cant deal with multiple model outputs check this never happens
967
968
o = torch.stack(output_list, axis=-1)
969
o = o * weighting
970
# Reverse reshape to img shape
971
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
972
# stitch crops together
973
x_recon = fold(o) / normalization
974
975
else:
976
x_recon = self.model(x_noisy, t, **cond)
977
978
if isinstance(x_recon, tuple) and not return_ids:
979
return x_recon[0]
980
else:
981
return x_recon
982
983
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
984
return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
985
extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
986
987
def _prior_bpd(self, x_start):
988
"""
989
Get the prior KL term for the variational lower-bound, measured in
990
bits-per-dim.
991
This term can't be optimized, as it only depends on the encoder.
992
:param x_start: the [N x C x ...] tensor of inputs.
993
:return: a batch of [N] KL values (in bits), one per batch element.
994
"""
995
batch_size = x_start.shape[0]
996
t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
997
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
998
kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
999
return mean_flat(kl_prior) / np.log(2.0)
1000
1001
def p_losses(self, x_start, cond, t, noise=None):
1002
noise = default(noise, lambda: torch.randn_like(x_start))
1003
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
1004
model_output = self.apply_model(x_noisy, t, cond)
1005
1006
loss_dict = {}
1007
prefix = 'train' if self.training else 'val'
1008
1009
if self.parameterization == "x0":
1010
target = x_start
1011
elif self.parameterization == "eps":
1012
target = noise
1013
else:
1014
raise NotImplementedError()
1015
1016
loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
1017
loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
1018
1019
logvar_t = self.logvar[t].to(self.device)
1020
loss = loss_simple / torch.exp(logvar_t) + logvar_t
1021
# loss = loss_simple / torch.exp(self.logvar) + self.logvar
1022
if self.learn_logvar:
1023
loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
1024
loss_dict.update({'logvar': self.logvar.data.mean()})
1025
1026
loss = self.l_simple_weight * loss.mean()
1027
1028
loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
1029
loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
1030
loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
1031
loss += (self.original_elbo_weight * loss_vlb)
1032
loss_dict.update({f'{prefix}/loss': loss})
1033
1034
return loss, loss_dict
1035
1036
def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
1037
return_x0=False, score_corrector=None, corrector_kwargs=None):
1038
t_in = t
1039
model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
1040
1041
if score_corrector is not None:
1042
assert self.parameterization == "eps"
1043
model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
1044
1045
if return_codebook_ids:
1046
model_out, logits = model_out
1047
1048
if self.parameterization == "eps":
1049
x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
1050
elif self.parameterization == "x0":
1051
x_recon = model_out
1052
else:
1053
raise NotImplementedError()
1054
1055
if clip_denoised:
1056
x_recon.clamp_(-1., 1.)
1057
if quantize_denoised:
1058
x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
1059
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
1060
if return_codebook_ids:
1061
return model_mean, posterior_variance, posterior_log_variance, logits
1062
elif return_x0:
1063
return model_mean, posterior_variance, posterior_log_variance, x_recon
1064
else:
1065
return model_mean, posterior_variance, posterior_log_variance
1066
1067
@torch.no_grad()
1068
def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
1069
return_codebook_ids=False, quantize_denoised=False, return_x0=False,
1070
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
1071
b, *_, device = *x.shape, x.device
1072
outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
1073
return_codebook_ids=return_codebook_ids,
1074
quantize_denoised=quantize_denoised,
1075
return_x0=return_x0,
1076
score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
1077
if return_codebook_ids:
1078
raise DeprecationWarning("Support dropped.")
1079
model_mean, _, model_log_variance, logits = outputs
1080
elif return_x0:
1081
model_mean, _, model_log_variance, x0 = outputs
1082
else:
1083
model_mean, _, model_log_variance = outputs
1084
1085
noise = noise_like(x.shape, device, repeat_noise) * temperature
1086
if noise_dropout > 0.:
1087
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
1088
# no noise when t == 0
1089
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
1090
1091
if return_codebook_ids:
1092
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
1093
if return_x0:
1094
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
1095
else:
1096
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
1097
1098
@torch.no_grad()
1099
def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
1100
img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
1101
score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
1102
log_every_t=None):
1103
if not log_every_t:
1104
log_every_t = self.log_every_t
1105
timesteps = self.num_timesteps
1106
if batch_size is not None:
1107
b = batch_size if batch_size is not None else shape[0]
1108
shape = [batch_size] + list(shape)
1109
else:
1110
b = batch_size = shape[0]
1111
if x_T is None:
1112
img = torch.randn(shape, device=self.device)
1113
else:
1114
img = x_T
1115
intermediates = []
1116
if cond is not None:
1117
if isinstance(cond, dict):
1118
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
1119
[x[:batch_size] for x in cond[key]] for key in cond}
1120
else:
1121
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
1122
1123
if start_T is not None:
1124
timesteps = min(timesteps, start_T)
1125
iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
1126
total=timesteps) if verbose else reversed(
1127
range(0, timesteps))
1128
if type(temperature) == float:
1129
temperature = [temperature] * timesteps
1130
1131
for i in iterator:
1132
ts = torch.full((b,), i, device=self.device, dtype=torch.long)
1133
if self.shorten_cond_schedule:
1134
assert self.model.conditioning_key != 'hybrid'
1135
tc = self.cond_ids[ts].to(cond.device)
1136
cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
1137
1138
img, x0_partial = self.p_sample(img, cond, ts,
1139
clip_denoised=self.clip_denoised,
1140
quantize_denoised=quantize_denoised, return_x0=True,
1141
temperature=temperature[i], noise_dropout=noise_dropout,
1142
score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
1143
if mask is not None:
1144
assert x0 is not None
1145
img_orig = self.q_sample(x0, ts)
1146
img = img_orig * mask + (1. - mask) * img
1147
1148
if i % log_every_t == 0 or i == timesteps - 1:
1149
intermediates.append(x0_partial)
1150
if callback:
1151
callback(i)
1152
if img_callback:
1153
img_callback(img, i)
1154
return img, intermediates
1155
1156
@torch.no_grad()
1157
def p_sample_loop(self, cond, shape, return_intermediates=False,
1158
x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
1159
mask=None, x0=None, img_callback=None, start_T=None,
1160
log_every_t=None):
1161
1162
if not log_every_t:
1163
log_every_t = self.log_every_t
1164
device = self.betas.device
1165
b = shape[0]
1166
if x_T is None:
1167
img = torch.randn(shape, device=device)
1168
else:
1169
img = x_T
1170
1171
intermediates = [img]
1172
if timesteps is None:
1173
timesteps = self.num_timesteps
1174
1175
if start_T is not None:
1176
timesteps = min(timesteps, start_T)
1177
iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
1178
range(0, timesteps))
1179
1180
if mask is not None:
1181
assert x0 is not None
1182
assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
1183
1184
for i in iterator:
1185
ts = torch.full((b,), i, device=device, dtype=torch.long)
1186
if self.shorten_cond_schedule:
1187
assert self.model.conditioning_key != 'hybrid'
1188
tc = self.cond_ids[ts].to(cond.device)
1189
cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
1190
1191
img = self.p_sample(img, cond, ts,
1192
clip_denoised=self.clip_denoised,
1193
quantize_denoised=quantize_denoised)
1194
if mask is not None:
1195
img_orig = self.q_sample(x0, ts)
1196
img = img_orig * mask + (1. - mask) * img
1197
1198
if i % log_every_t == 0 or i == timesteps - 1:
1199
intermediates.append(img)
1200
if callback:
1201
callback(i)
1202
if img_callback:
1203
img_callback(img, i)
1204
1205
if return_intermediates:
1206
return img, intermediates
1207
return img
1208
1209
@torch.no_grad()
1210
def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
1211
verbose=True, timesteps=None, quantize_denoised=False,
1212
mask=None, x0=None, shape=None,**kwargs):
1213
if shape is None:
1214
shape = (batch_size, self.channels, self.image_size, self.image_size)
1215
if cond is not None:
1216
if isinstance(cond, dict):
1217
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
1218
[x[:batch_size] for x in cond[key]] for key in cond}
1219
else:
1220
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
1221
return self.p_sample_loop(cond,
1222
shape,
1223
return_intermediates=return_intermediates, x_T=x_T,
1224
verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
1225
mask=mask, x0=x0)
1226
1227
@torch.no_grad()
1228
def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs):
1229
1230
if ddim:
1231
ddim_sampler = DDIMSampler(self)
1232
shape = (self.channels, self.image_size, self.image_size)
1233
samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size,
1234
shape,cond,verbose=False,**kwargs)
1235
1236
else:
1237
samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
1238
return_intermediates=True,**kwargs)
1239
1240
return samples, intermediates
1241
1242
1243
@torch.no_grad()
1244
def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
1245
quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
1246
plot_diffusion_rows=True, **kwargs):
1247
1248
use_ddim = ddim_steps is not None
1249
1250
log = {}
1251
z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
1252
return_first_stage_outputs=True,
1253
force_c_encode=True,
1254
return_original_cond=True,
1255
bs=N)
1256
N = min(x.shape[0], N)
1257
n_row = min(x.shape[0], n_row)
1258
log["inputs"] = x
1259
log["reconstruction"] = xrec
1260
if self.model.conditioning_key is not None:
1261
if hasattr(self.cond_stage_model, "decode"):
1262
xc = self.cond_stage_model.decode(c)
1263
log["conditioning"] = xc
1264
elif self.cond_stage_key in ["caption"]:
1265
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"])
1266
log["conditioning"] = xc
1267
elif self.cond_stage_key == 'class_label':
1268
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
1269
log['conditioning'] = xc
1270
elif isimage(xc):
1271
log["conditioning"] = xc
1272
if ismap(xc):
1273
log["original_conditioning"] = self.to_rgb(xc)
1274
1275
if plot_diffusion_rows:
1276
# get diffusion row
1277
diffusion_row = []
1278
z_start = z[:n_row]
1279
for t in range(self.num_timesteps):
1280
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
1281
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
1282
t = t.to(self.device).long()
1283
noise = torch.randn_like(z_start)
1284
z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
1285
diffusion_row.append(self.decode_first_stage(z_noisy))
1286
1287
diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
1288
diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
1289
diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
1290
diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
1291
log["diffusion_row"] = diffusion_grid
1292
1293
if sample:
1294
# get denoise row
1295
with self.ema_scope("Plotting"):
1296
samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
1297
ddim_steps=ddim_steps,eta=ddim_eta)
1298
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
1299
x_samples = self.decode_first_stage(samples)
1300
log["samples"] = x_samples
1301
if plot_denoise_rows:
1302
denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
1303
log["denoise_row"] = denoise_grid
1304
1305
if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
1306
self.first_stage_model, IdentityFirstStage):
1307
# also display when quantizing x0 while sampling
1308
with self.ema_scope("Plotting Quantized Denoised"):
1309
samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
1310
ddim_steps=ddim_steps,eta=ddim_eta,
1311
quantize_denoised=True)
1312
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
1313
# quantize_denoised=True)
1314
x_samples = self.decode_first_stage(samples.to(self.device))
1315
log["samples_x0_quantized"] = x_samples
1316
1317
if inpaint:
1318
# make a simple center square
1319
h, w = z.shape[2], z.shape[3]
1320
mask = torch.ones(N, h, w).to(self.device)
1321
# zeros will be filled in
1322
mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
1323
mask = mask[:, None, ...]
1324
with self.ema_scope("Plotting Inpaint"):
1325
1326
samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta,
1327
ddim_steps=ddim_steps, x0=z[:N], mask=mask)
1328
x_samples = self.decode_first_stage(samples.to(self.device))
1329
log["samples_inpainting"] = x_samples
1330
log["mask"] = mask
1331
1332
# outpaint
1333
with self.ema_scope("Plotting Outpaint"):
1334
samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta,
1335
ddim_steps=ddim_steps, x0=z[:N], mask=mask)
1336
x_samples = self.decode_first_stage(samples.to(self.device))
1337
log["samples_outpainting"] = x_samples
1338
1339
if plot_progressive_rows:
1340
with self.ema_scope("Plotting Progressives"):
1341
img, progressives = self.progressive_denoising(c,
1342
shape=(self.channels, self.image_size, self.image_size),
1343
batch_size=N)
1344
prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
1345
log["progressive_row"] = prog_row
1346
1347
if return_keys:
1348
if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
1349
return log
1350
else:
1351
return {key: log[key] for key in return_keys}
1352
return log
1353
1354
def configure_optimizers(self):
1355
lr = self.learning_rate
1356
params = list(self.model.parameters())
1357
if self.cond_stage_trainable:
1358
print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
1359
params = params + list(self.cond_stage_model.parameters())
1360
if self.learn_logvar:
1361
print('Diffusion model optimizing logvar')
1362
params.append(self.logvar)
1363
opt = torch.optim.AdamW(params, lr=lr)
1364
if self.use_scheduler:
1365
assert 'target' in self.scheduler_config
1366
scheduler = instantiate_from_config(self.scheduler_config)
1367
1368
print("Setting up LambdaLR scheduler...")
1369
scheduler = [
1370
{
1371
'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
1372
'interval': 'step',
1373
'frequency': 1
1374
}]
1375
return [opt], scheduler
1376
return opt
1377
1378
@torch.no_grad()
1379
def to_rgb(self, x):
1380
x = x.float()
1381
if not hasattr(self, "colorize"):
1382
self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
1383
x = nn.functional.conv2d(x, weight=self.colorize)
1384
x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
1385
return x
1386
1387
1388
class DiffusionWrapperV1(pl.LightningModule):
1389
def __init__(self, diff_model_config, conditioning_key):
1390
super().__init__()
1391
self.diffusion_model = instantiate_from_config(diff_model_config)
1392
self.conditioning_key = conditioning_key
1393
assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm']
1394
1395
def forward(self, x, t, c_concat: list = None, c_crossattn: list = None):
1396
if self.conditioning_key is None:
1397
out = self.diffusion_model(x, t)
1398
elif self.conditioning_key == 'concat':
1399
xc = torch.cat([x] + c_concat, dim=1)
1400
out = self.diffusion_model(xc, t)
1401
elif self.conditioning_key == 'crossattn':
1402
cc = torch.cat(c_crossattn, 1)
1403
out = self.diffusion_model(x, t, context=cc)
1404
elif self.conditioning_key == 'hybrid':
1405
xc = torch.cat([x] + c_concat, dim=1)
1406
cc = torch.cat(c_crossattn, 1)
1407
out = self.diffusion_model(xc, t, context=cc)
1408
elif self.conditioning_key == 'adm':
1409
cc = c_crossattn[0]
1410
out = self.diffusion_model(x, t, y=cc)
1411
else:
1412
raise NotImplementedError()
1413
1414
return out
1415
1416
1417
class Layout2ImgDiffusionV1(LatentDiffusionV1):
1418
# TODO: move all layout-specific hacks to this class
1419
def __init__(self, cond_stage_key, *args, **kwargs):
1420
assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"'
1421
super().__init__(*args, cond_stage_key=cond_stage_key, **kwargs)
1422
1423
def log_images(self, batch, N=8, *args, **kwargs):
1424
logs = super().log_images(*args, batch=batch, N=N, **kwargs)
1425
1426
key = 'train' if self.training else 'validation'
1427
dset = self.trainer.datamodule.datasets[key]
1428
mapper = dset.conditional_builders[self.cond_stage_key]
1429
1430
bbox_imgs = []
1431
map_fn = lambda catno: dset.get_textual_label(dset.get_category_id(catno))
1432
for tknzd_bbox in batch[self.cond_stage_key][:N]:
1433
bboximg = mapper.plot(tknzd_bbox.detach().cpu(), map_fn, (256, 256))
1434
bbox_imgs.append(bboximg)
1435
1436
cond_img = torch.stack(bbox_imgs, dim=0)
1437
logs['bbox_image'] = cond_img
1438
return logs
1439
1440
ldm.models.diffusion.ddpm.DDPMV1 = DDPMV1
1441
ldm.models.diffusion.ddpm.LatentDiffusionV1 = LatentDiffusionV1
1442
ldm.models.diffusion.ddpm.DiffusionWrapperV1 = DiffusionWrapperV1
1443
ldm.models.diffusion.ddpm.Layout2ImgDiffusionV1 = Layout2ImgDiffusionV1
1444
1445