Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
TencentARC
GitHub Repository: TencentARC/GFPGAN
Path: blob/master/options/train_gfpgan_v1_simple.yml
884 views
1
# general settings
2
name: train_GFPGANv1_512_simple
3
model_type: GFPGANModel
4
num_gpu: auto # officially, we use 4 GPUs
5
manual_seed: 0
6
7
# dataset and data loader settings
8
datasets:
9
train:
10
name: FFHQ
11
type: FFHQDegradationDataset
12
# dataroot_gt: datasets/ffhq/ffhq_512.lmdb
13
dataroot_gt: datasets/ffhq/ffhq_512
14
io_backend:
15
# type: lmdb
16
type: disk
17
18
use_hflip: true
19
mean: [0.5, 0.5, 0.5]
20
std: [0.5, 0.5, 0.5]
21
out_size: 512
22
23
blur_kernel_size: 41
24
kernel_list: ['iso', 'aniso']
25
kernel_prob: [0.5, 0.5]
26
blur_sigma: [0.1, 10]
27
downsample_range: [0.8, 8]
28
noise_range: [0, 20]
29
jpeg_range: [60, 100]
30
31
# color jitter and gray
32
color_jitter_prob: 0.3
33
color_jitter_shift: 20
34
color_jitter_pt_prob: 0.3
35
gray_prob: 0.01
36
37
# If you do not want colorization, please set
38
# color_jitter_prob: ~
39
# color_jitter_pt_prob: ~
40
# gray_prob: 0.01
41
# gt_gray: True
42
43
# data loader
44
use_shuffle: true
45
num_worker_per_gpu: 6
46
batch_size_per_gpu: 3
47
dataset_enlarge_ratio: 1
48
prefetch_mode: ~
49
50
val:
51
# Please modify accordingly to use your own validation
52
# Or comment the val block if do not need validation during training
53
name: validation
54
type: PairedImageDataset
55
dataroot_lq: datasets/faces/validation/input
56
dataroot_gt: datasets/faces/validation/reference
57
io_backend:
58
type: disk
59
mean: [0.5, 0.5, 0.5]
60
std: [0.5, 0.5, 0.5]
61
scale: 1
62
63
# network structures
64
network_g:
65
type: GFPGANv1
66
out_size: 512
67
num_style_feat: 512
68
channel_multiplier: 1
69
resample_kernel: [1, 3, 3, 1]
70
decoder_load_path: experiments/pretrained_models/StyleGAN2_512_Cmul1_FFHQ_B12G4_scratch_800k.pth
71
fix_decoder: true
72
num_mlp: 8
73
lr_mlp: 0.01
74
input_is_latent: true
75
different_w: true
76
narrow: 1
77
sft_half: true
78
79
network_d:
80
type: StyleGAN2Discriminator
81
out_size: 512
82
channel_multiplier: 1
83
resample_kernel: [1, 3, 3, 1]
84
85
86
# path
87
path:
88
pretrain_network_g: ~
89
param_key_g: params_ema
90
strict_load_g: ~
91
pretrain_network_d: ~
92
resume_state: ~
93
94
# training settings
95
train:
96
optim_g:
97
type: Adam
98
lr: !!float 2e-3
99
optim_d:
100
type: Adam
101
lr: !!float 2e-3
102
optim_component:
103
type: Adam
104
lr: !!float 2e-3
105
106
scheduler:
107
type: MultiStepLR
108
milestones: [600000, 700000]
109
gamma: 0.5
110
111
total_iter: 800000
112
warmup_iter: -1 # no warm up
113
114
# losses
115
# pixel loss
116
pixel_opt:
117
type: L1Loss
118
loss_weight: !!float 1e-1
119
reduction: mean
120
# L1 loss used in pyramid loss, component style loss and identity loss
121
L1_opt:
122
type: L1Loss
123
loss_weight: 1
124
reduction: mean
125
126
# image pyramid loss
127
pyramid_loss_weight: 1
128
remove_pyramid_loss: 50000
129
# perceptual loss (content and style losses)
130
perceptual_opt:
131
type: PerceptualLoss
132
layer_weights:
133
# before relu
134
'conv1_2': 0.1
135
'conv2_2': 0.1
136
'conv3_4': 1
137
'conv4_4': 1
138
'conv5_4': 1
139
vgg_type: vgg19
140
use_input_norm: true
141
perceptual_weight: !!float 1
142
style_weight: 50
143
range_norm: true
144
criterion: l1
145
# gan loss
146
gan_opt:
147
type: GANLoss
148
gan_type: wgan_softplus
149
loss_weight: !!float 1e-1
150
# r1 regularization for discriminator
151
r1_reg_weight: 10
152
153
net_d_iters: 1
154
net_d_init_iters: 0
155
net_d_reg_every: 16
156
157
# validation settings
158
val:
159
val_freq: !!float 5e3
160
save_img: true
161
162
metrics:
163
psnr: # metric name
164
type: calculate_psnr
165
crop_border: 0
166
test_y_channel: false
167
168
# logging settings
169
logger:
170
print_freq: 100
171
save_checkpoint_freq: !!float 5e3
172
use_tb_logger: true
173
wandb:
174
project: ~
175
resume_id: ~
176
177
# dist training settings
178
dist_params:
179
backend: nccl
180
port: 29500
181
182
find_unused_parameters: true
183
184