Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
TencentARC
GitHub Repository: TencentARC/GFPGAN
Path: blob/master/scripts/convert_gfpganv_to_clean.py
884 views
1
import argparse
2
import math
3
import torch
4
5
from gfpgan.archs.gfpganv1_clean_arch import GFPGANv1Clean
6
7
8
def modify_checkpoint(checkpoint_bilinear, checkpoint_clean):
9
for ori_k, ori_v in checkpoint_bilinear.items():
10
if 'stylegan_decoder' in ori_k:
11
if 'style_mlp' in ori_k: # style_mlp_layers
12
lr_mul = 0.01
13
prefix, name, idx, var = ori_k.split('.')
14
idx = (int(idx) * 2) - 1
15
crt_k = f'{prefix}.{name}.{idx}.{var}'
16
if var == 'weight':
17
_, c_in = ori_v.size()
18
scale = (1 / math.sqrt(c_in)) * lr_mul
19
crt_v = ori_v * scale * 2**0.5
20
else:
21
crt_v = ori_v * lr_mul * 2**0.5
22
checkpoint_clean[crt_k] = crt_v
23
elif 'modulation' in ori_k: # modulation in StyleConv
24
lr_mul = 1
25
crt_k = ori_k
26
var = ori_k.split('.')[-1]
27
if var == 'weight':
28
_, c_in = ori_v.size()
29
scale = (1 / math.sqrt(c_in)) * lr_mul
30
crt_v = ori_v * scale
31
else:
32
crt_v = ori_v * lr_mul
33
checkpoint_clean[crt_k] = crt_v
34
elif 'style_conv' in ori_k:
35
# StyleConv in style_conv1 and style_convs
36
if 'activate' in ori_k: # FusedLeakyReLU
37
# eg. style_conv1.activate.bias
38
# eg. style_convs.13.activate.bias
39
split_rlt = ori_k.split('.')
40
if len(split_rlt) == 4:
41
prefix, name, _, var = split_rlt
42
crt_k = f'{prefix}.{name}.{var}'
43
elif len(split_rlt) == 5:
44
prefix, name, idx, _, var = split_rlt
45
crt_k = f'{prefix}.{name}.{idx}.{var}'
46
crt_v = ori_v * 2**0.5 # 2**0.5 used in FusedLeakyReLU
47
c = crt_v.size(0)
48
checkpoint_clean[crt_k] = crt_v.view(1, c, 1, 1)
49
elif 'modulated_conv' in ori_k:
50
# eg. style_conv1.modulated_conv.weight
51
# eg. style_convs.13.modulated_conv.weight
52
_, c_out, c_in, k1, k2 = ori_v.size()
53
scale = 1 / math.sqrt(c_in * k1 * k2)
54
crt_k = ori_k
55
checkpoint_clean[crt_k] = ori_v * scale
56
elif 'weight' in ori_k:
57
crt_k = ori_k
58
checkpoint_clean[crt_k] = ori_v * 2**0.5
59
elif 'to_rgb' in ori_k: # StyleConv in to_rgb1 and to_rgbs
60
if 'modulated_conv' in ori_k:
61
# eg. to_rgb1.modulated_conv.weight
62
# eg. to_rgbs.5.modulated_conv.weight
63
_, c_out, c_in, k1, k2 = ori_v.size()
64
scale = 1 / math.sqrt(c_in * k1 * k2)
65
crt_k = ori_k
66
checkpoint_clean[crt_k] = ori_v * scale
67
else:
68
crt_k = ori_k
69
checkpoint_clean[crt_k] = ori_v
70
else:
71
crt_k = ori_k
72
checkpoint_clean[crt_k] = ori_v
73
# end of 'stylegan_decoder'
74
elif 'conv_body_first' in ori_k or 'final_conv' in ori_k:
75
# key name
76
name, _, var = ori_k.split('.')
77
crt_k = f'{name}.{var}'
78
# weight and bias
79
if var == 'weight':
80
c_out, c_in, k1, k2 = ori_v.size()
81
scale = 1 / math.sqrt(c_in * k1 * k2)
82
checkpoint_clean[crt_k] = ori_v * scale * 2**0.5
83
else:
84
checkpoint_clean[crt_k] = ori_v * 2**0.5
85
elif 'conv_body' in ori_k:
86
if 'conv_body_up' in ori_k:
87
ori_k = ori_k.replace('conv2.weight', 'conv2.1.weight')
88
ori_k = ori_k.replace('skip.weight', 'skip.1.weight')
89
name1, idx1, name2, _, var = ori_k.split('.')
90
crt_k = f'{name1}.{idx1}.{name2}.{var}'
91
if name2 == 'skip':
92
c_out, c_in, k1, k2 = ori_v.size()
93
scale = 1 / math.sqrt(c_in * k1 * k2)
94
checkpoint_clean[crt_k] = ori_v * scale / 2**0.5
95
else:
96
if var == 'weight':
97
c_out, c_in, k1, k2 = ori_v.size()
98
scale = 1 / math.sqrt(c_in * k1 * k2)
99
checkpoint_clean[crt_k] = ori_v * scale
100
else:
101
checkpoint_clean[crt_k] = ori_v
102
if 'conv1' in ori_k:
103
checkpoint_clean[crt_k] *= 2**0.5
104
elif 'toRGB' in ori_k:
105
crt_k = ori_k
106
if 'weight' in ori_k:
107
c_out, c_in, k1, k2 = ori_v.size()
108
scale = 1 / math.sqrt(c_in * k1 * k2)
109
checkpoint_clean[crt_k] = ori_v * scale
110
else:
111
checkpoint_clean[crt_k] = ori_v
112
elif 'final_linear' in ori_k:
113
crt_k = ori_k
114
if 'weight' in ori_k:
115
_, c_in = ori_v.size()
116
scale = 1 / math.sqrt(c_in)
117
checkpoint_clean[crt_k] = ori_v * scale
118
else:
119
checkpoint_clean[crt_k] = ori_v
120
elif 'condition' in ori_k:
121
crt_k = ori_k
122
if '0.weight' in ori_k:
123
c_out, c_in, k1, k2 = ori_v.size()
124
scale = 1 / math.sqrt(c_in * k1 * k2)
125
checkpoint_clean[crt_k] = ori_v * scale * 2**0.5
126
elif '0.bias' in ori_k:
127
checkpoint_clean[crt_k] = ori_v * 2**0.5
128
elif '2.weight' in ori_k:
129
c_out, c_in, k1, k2 = ori_v.size()
130
scale = 1 / math.sqrt(c_in * k1 * k2)
131
checkpoint_clean[crt_k] = ori_v * scale
132
elif '2.bias' in ori_k:
133
checkpoint_clean[crt_k] = ori_v
134
135
return checkpoint_clean
136
137
138
if __name__ == '__main__':
139
parser = argparse.ArgumentParser()
140
parser.add_argument('--ori_path', type=str, help='Path to the original model')
141
parser.add_argument('--narrow', type=float, default=1)
142
parser.add_argument('--channel_multiplier', type=float, default=2)
143
parser.add_argument('--save_path', type=str)
144
args = parser.parse_args()
145
146
ori_ckpt = torch.load(args.ori_path)['params_ema']
147
148
net = GFPGANv1Clean(
149
512,
150
num_style_feat=512,
151
channel_multiplier=args.channel_multiplier,
152
decoder_load_path=None,
153
fix_decoder=False,
154
# for stylegan decoder
155
num_mlp=8,
156
input_is_latent=True,
157
different_w=True,
158
narrow=args.narrow,
159
sft_half=True)
160
crt_ckpt = net.state_dict()
161
162
crt_ckpt = modify_checkpoint(ori_ckpt, crt_ckpt)
163
print(f'Save to {args.save_path}.')
164
torch.save(dict(params_ema=crt_ckpt), args.save_path, _use_new_zipfile_serialization=False)
165
166