Path: blob/master/configs/alt-diffusion-inference.yaml
3048 views
model:1base_learning_rate: 1.0e-042target: ldm.models.diffusion.ddpm.LatentDiffusion3params:4linear_start: 0.000855linear_end: 0.01206num_timesteps_cond: 17log_every_t: 2008timesteps: 10009first_stage_key: "jpg"10cond_stage_key: "txt"11image_size: 6412channels: 413cond_stage_trainable: false # Note: different from the one we trained before14conditioning_key: crossattn15monitor: val/loss_simple_ema16scale_factor: 0.1821517use_ema: False1819scheduler_config: # 10000 warmup steps20target: ldm.lr_scheduler.LambdaLinearScheduler21params:22warm_up_steps: [ 10000 ]23cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases24f_start: [ 1.e-6 ]25f_max: [ 1. ]26f_min: [ 1. ]2728unet_config:29target: ldm.modules.diffusionmodules.openaimodel.UNetModel30params:31image_size: 32 # unused32in_channels: 433out_channels: 434model_channels: 32035attention_resolutions: [ 4, 2, 1 ]36num_res_blocks: 237channel_mult: [ 1, 2, 4, 4 ]38num_heads: 839use_spatial_transformer: True40transformer_depth: 141context_dim: 76842use_checkpoint: False43legacy: False4445first_stage_config:46target: ldm.models.autoencoder.AutoencoderKL47params:48embed_dim: 449monitor: val/rec_loss50ddconfig:51double_z: true52z_channels: 453resolution: 25654in_channels: 355out_ch: 356ch: 12857ch_mult:58- 159- 260- 461- 462num_res_blocks: 263attn_resolutions: []64dropout: 0.065lossconfig:66target: torch.nn.Identity6768cond_stage_config:69target: modules.xlmr.BertSeriesModelWithTransformation70params:71name: "XLMR-Large"7273