Path: blob/master/configs/alt-diffusion-m18-inference.yaml
3048 views
model:1base_learning_rate: 1.0e-042target: ldm.models.diffusion.ddpm.LatentDiffusion3params:4linear_start: 0.000855linear_end: 0.01206num_timesteps_cond: 17log_every_t: 2008timesteps: 10009first_stage_key: "jpg"10cond_stage_key: "txt"11image_size: 6412channels: 413cond_stage_trainable: false # Note: different from the one we trained before14conditioning_key: crossattn15monitor: val/loss_simple_ema16scale_factor: 0.1821517use_ema: False1819scheduler_config: # 10000 warmup steps20target: ldm.lr_scheduler.LambdaLinearScheduler21params:22warm_up_steps: [ 10000 ]23cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases24f_start: [ 1.e-6 ]25f_max: [ 1. ]26f_min: [ 1. ]2728unet_config:29target: ldm.modules.diffusionmodules.openaimodel.UNetModel30params:31image_size: 32 # unused32in_channels: 433out_channels: 434model_channels: 32035attention_resolutions: [ 4, 2, 1 ]36num_res_blocks: 237channel_mult: [ 1, 2, 4, 4 ]38num_head_channels: 6439use_spatial_transformer: True40use_linear_in_transformer: True41transformer_depth: 142context_dim: 102443use_checkpoint: False44legacy: False4546first_stage_config:47target: ldm.models.autoencoder.AutoencoderKL48params:49embed_dim: 450monitor: val/rec_loss51ddconfig:52double_z: true53z_channels: 454resolution: 25655in_channels: 356out_ch: 357ch: 12858ch_mult:59- 160- 261- 462- 463num_res_blocks: 264attn_resolutions: []65dropout: 0.066lossconfig:67target: torch.nn.Identity6869cond_stage_config:70target: modules.xlmr_m18.BertSeriesModelWithTransformation71params:72name: "XLMR-Large"737475