Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
automatic1111
GitHub Repository: automatic1111/stable-diffusion-webui
Path: blob/master/configs/alt-diffusion-m18-inference.yaml
3048 views
1
model:
2
base_learning_rate: 1.0e-04
3
target: ldm.models.diffusion.ddpm.LatentDiffusion
4
params:
5
linear_start: 0.00085
6
linear_end: 0.0120
7
num_timesteps_cond: 1
8
log_every_t: 200
9
timesteps: 1000
10
first_stage_key: "jpg"
11
cond_stage_key: "txt"
12
image_size: 64
13
channels: 4
14
cond_stage_trainable: false # Note: different from the one we trained before
15
conditioning_key: crossattn
16
monitor: val/loss_simple_ema
17
scale_factor: 0.18215
18
use_ema: False
19
20
scheduler_config: # 10000 warmup steps
21
target: ldm.lr_scheduler.LambdaLinearScheduler
22
params:
23
warm_up_steps: [ 10000 ]
24
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25
f_start: [ 1.e-6 ]
26
f_max: [ 1. ]
27
f_min: [ 1. ]
28
29
unet_config:
30
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31
params:
32
image_size: 32 # unused
33
in_channels: 4
34
out_channels: 4
35
model_channels: 320
36
attention_resolutions: [ 4, 2, 1 ]
37
num_res_blocks: 2
38
channel_mult: [ 1, 2, 4, 4 ]
39
num_head_channels: 64
40
use_spatial_transformer: True
41
use_linear_in_transformer: True
42
transformer_depth: 1
43
context_dim: 1024
44
use_checkpoint: False
45
legacy: False
46
47
first_stage_config:
48
target: ldm.models.autoencoder.AutoencoderKL
49
params:
50
embed_dim: 4
51
monitor: val/rec_loss
52
ddconfig:
53
double_z: true
54
z_channels: 4
55
resolution: 256
56
in_channels: 3
57
out_ch: 3
58
ch: 128
59
ch_mult:
60
- 1
61
- 2
62
- 4
63
- 4
64
num_res_blocks: 2
65
attn_resolutions: []
66
dropout: 0.0
67
lossconfig:
68
target: torch.nn.Identity
69
70
cond_stage_config:
71
target: modules.xlmr_m18.BertSeriesModelWithTransformation
72
params:
73
name: "XLMR-Large"
74
75