Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
hackassin
GitHub Repository: hackassin/learnopencv
Path: blob/master/FBAMatting/networks/models.py
3119 views
1
import networks.layers_WS as L
2
import networks.resnet_bn as resnet_bn
3
import networks.resnet_GN_WS as resnet_GN_WS
4
import torch
5
import torch.nn as nn
6
7
8
def build_model(args):
9
builder = ModelBuilder()
10
net_encoder = builder.build_encoder(arch=args.encoder)
11
12
if "BN" in args.encoder:
13
batch_norm = True
14
else:
15
batch_norm = False
16
net_decoder = builder.build_decoder(arch=args.decoder, batch_norm=batch_norm)
17
18
model = MattingModule(net_encoder, net_decoder)
19
20
if args.weights != "default":
21
sd = torch.load(args.weights)
22
model.load_state_dict(sd, strict=True)
23
24
return model
25
26
27
class MattingModule(nn.Module):
28
def __init__(self, net_enc, net_dec):
29
super(MattingModule, self).__init__()
30
self.encoder = net_enc
31
self.decoder = net_dec
32
33
def forward(self, image, two_chan_trimap, image_n, trimap_transformed):
34
resnet_input = torch.cat((image_n, trimap_transformed, two_chan_trimap), 1)
35
conv_out, indices = self.encoder(resnet_input, return_feature_maps=True)
36
return self.decoder(conv_out, image, indices, two_chan_trimap)
37
38
39
class ModelBuilder:
40
def build_encoder(self, arch="resnet50_GN"):
41
if arch == "resnet50_GN_WS":
42
orig_resnet = resnet_GN_WS.__dict__["l_resnet50"]()
43
net_encoder = ResnetDilated(orig_resnet, dilate_scale=8)
44
elif arch == "resnet50_BN":
45
orig_resnet = resnet_bn.__dict__["l_resnet50"]()
46
net_encoder = ResnetDilatedBN(orig_resnet, dilate_scale=8)
47
48
else:
49
raise Exception("Architecture undefined!")
50
51
num_channels = 3 + 6 + 2
52
53
if num_channels > 3:
54
print(f"modifying input layer to accept {num_channels} channels")
55
net_encoder_sd = net_encoder.state_dict()
56
conv1_weights = net_encoder_sd["conv1.weight"]
57
58
c_out, c_in, h, w = conv1_weights.size()
59
conv1_mod = torch.zeros(c_out, num_channels, h, w)
60
conv1_mod[:, :3, :, :] = conv1_weights
61
62
conv1 = net_encoder.conv1
63
conv1.in_channels = num_channels
64
conv1.weight = torch.nn.Parameter(conv1_mod)
65
66
net_encoder.conv1 = conv1
67
68
net_encoder_sd["conv1.weight"] = conv1_mod
69
70
net_encoder.load_state_dict(net_encoder_sd)
71
return net_encoder
72
73
def build_decoder(self, arch="fba_decoder", batch_norm=False):
74
if arch == "fba_decoder":
75
net_decoder = fba_decoder(batch_norm=batch_norm)
76
77
return net_decoder
78
79
80
class ResnetDilatedBN(nn.Module):
81
def __init__(self, orig_resnet, dilate_scale=8):
82
super(ResnetDilatedBN, self).__init__()
83
from functools import partial
84
85
if dilate_scale == 8:
86
orig_resnet.layer3.apply(partial(self._nostride_dilate, dilate=2))
87
orig_resnet.layer4.apply(partial(self._nostride_dilate, dilate=4))
88
elif dilate_scale == 16:
89
orig_resnet.layer4.apply(partial(self._nostride_dilate, dilate=2))
90
91
# take pretrained resnet, except AvgPool and FC
92
self.conv1 = orig_resnet.conv1
93
self.bn1 = orig_resnet.bn1
94
self.relu1 = orig_resnet.relu1
95
self.conv2 = orig_resnet.conv2
96
self.bn2 = orig_resnet.bn2
97
self.relu2 = orig_resnet.relu2
98
self.conv3 = orig_resnet.conv3
99
self.bn3 = orig_resnet.bn3
100
self.relu3 = orig_resnet.relu3
101
self.maxpool = orig_resnet.maxpool
102
self.layer1 = orig_resnet.layer1
103
self.layer2 = orig_resnet.layer2
104
self.layer3 = orig_resnet.layer3
105
self.layer4 = orig_resnet.layer4
106
107
def _nostride_dilate(self, m, dilate):
108
classname = m.__class__.__name__
109
if classname.find("Conv") != -1:
110
# the convolution with stride
111
if m.stride == (2, 2):
112
m.stride = (1, 1)
113
if m.kernel_size == (3, 3):
114
m.dilation = (dilate // 2, dilate // 2)
115
m.padding = (dilate // 2, dilate // 2)
116
# other convolutions
117
else:
118
if m.kernel_size == (3, 3):
119
m.dilation = (dilate, dilate)
120
m.padding = (dilate, dilate)
121
122
def forward(self, x, return_feature_maps=False):
123
conv_out = [x]
124
x = self.relu1(self.bn1(self.conv1(x)))
125
x = self.relu2(self.bn2(self.conv2(x)))
126
x = self.relu3(self.bn3(self.conv3(x)))
127
conv_out.append(x)
128
x, indices = self.maxpool(x)
129
x = self.layer1(x)
130
conv_out.append(x)
131
x = self.layer2(x)
132
conv_out.append(x)
133
x = self.layer3(x)
134
conv_out.append(x)
135
x = self.layer4(x)
136
conv_out.append(x)
137
138
if return_feature_maps:
139
return conv_out, indices
140
return [x]
141
142
143
class Resnet(nn.Module):
144
def __init__(self, orig_resnet):
145
super(Resnet, self).__init__()
146
147
# take pretrained resnet, except AvgPool and FC
148
self.conv1 = orig_resnet.conv1
149
self.bn1 = orig_resnet.bn1
150
self.relu1 = orig_resnet.relu1
151
self.conv2 = orig_resnet.conv2
152
self.bn2 = orig_resnet.bn2
153
self.relu2 = orig_resnet.relu2
154
self.conv3 = orig_resnet.conv3
155
self.bn3 = orig_resnet.bn3
156
self.relu3 = orig_resnet.relu3
157
self.maxpool = orig_resnet.maxpool
158
self.layer1 = orig_resnet.layer1
159
self.layer2 = orig_resnet.layer2
160
self.layer3 = orig_resnet.layer3
161
self.layer4 = orig_resnet.layer4
162
163
def forward(self, x, return_feature_maps=False):
164
conv_out = []
165
166
x = self.relu1(self.bn1(self.conv1(x)))
167
x = self.relu2(self.bn2(self.conv2(x)))
168
x = self.relu3(self.bn3(self.conv3(x)))
169
conv_out.append(x)
170
x, indices = self.maxpool(x)
171
172
x = self.layer1(x)
173
conv_out.append(x)
174
x = self.layer2(x)
175
conv_out.append(x)
176
x = self.layer3(x)
177
conv_out.append(x)
178
x = self.layer4(x)
179
conv_out.append(x)
180
181
if return_feature_maps:
182
return conv_out
183
return [x]
184
185
186
class ResnetDilated(nn.Module):
187
def __init__(self, orig_resnet, dilate_scale=8):
188
super(ResnetDilated, self).__init__()
189
from functools import partial
190
191
if dilate_scale == 8:
192
orig_resnet.layer3.apply(partial(self._nostride_dilate, dilate=2))
193
orig_resnet.layer4.apply(partial(self._nostride_dilate, dilate=4))
194
elif dilate_scale == 16:
195
orig_resnet.layer4.apply(partial(self._nostride_dilate, dilate=2))
196
197
# take pretrained resnet, except AvgPool and FC
198
self.conv1 = orig_resnet.conv1
199
self.bn1 = orig_resnet.bn1
200
self.relu = orig_resnet.relu
201
self.maxpool = orig_resnet.maxpool
202
self.layer1 = orig_resnet.layer1
203
self.layer2 = orig_resnet.layer2
204
self.layer3 = orig_resnet.layer3
205
self.layer4 = orig_resnet.layer4
206
207
def _nostride_dilate(self, m, dilate):
208
classname = m.__class__.__name__
209
if classname.find("Conv") != -1:
210
# the convolution with stride
211
if m.stride == (2, 2):
212
m.stride = (1, 1)
213
if m.kernel_size == (3, 3):
214
m.dilation = (dilate // 2, dilate // 2)
215
m.padding = (dilate // 2, dilate // 2)
216
# other convolutions
217
else:
218
if m.kernel_size == (3, 3):
219
m.dilation = (dilate, dilate)
220
m.padding = (dilate, dilate)
221
222
def forward(self, x, return_feature_maps=False):
223
conv_out = [x]
224
x = self.relu(self.bn1(self.conv1(x)))
225
conv_out.append(x)
226
x, indices = self.maxpool(x)
227
x = self.layer1(x)
228
conv_out.append(x)
229
x = self.layer2(x)
230
conv_out.append(x)
231
x = self.layer3(x)
232
conv_out.append(x)
233
x = self.layer4(x)
234
conv_out.append(x)
235
236
if return_feature_maps:
237
return conv_out, indices
238
return [x]
239
240
241
def norm(dim, bn=False):
242
if bn is False:
243
return nn.GroupNorm(32, dim)
244
else:
245
return nn.BatchNorm2d(dim)
246
247
248
def fba_fusion(alpha, img, F, B):
249
F = alpha * img + (1 - alpha ** 2) * F - alpha * (1 - alpha) * B
250
B = (1 - alpha) * img + (2 * alpha - alpha ** 2) * B - alpha * (1 - alpha) * F
251
252
F = torch.clamp(F, 0, 1)
253
B = torch.clamp(B, 0, 1)
254
la = 0.1
255
alpha = (alpha * la + torch.sum((img - B) * (F - B), 1, keepdim=True)) / (
256
torch.sum((F - B) * (F - B), 1, keepdim=True) + la
257
)
258
alpha = torch.clamp(alpha, 0, 1)
259
return alpha, F, B
260
261
262
class fba_decoder(nn.Module):
263
def __init__(self, batch_norm=False):
264
super(fba_decoder, self).__init__()
265
pool_scales = (1, 2, 3, 6)
266
self.batch_norm = batch_norm
267
268
self.ppm = []
269
270
for scale in pool_scales:
271
self.ppm.append(
272
nn.Sequential(
273
nn.AdaptiveAvgPool2d(scale),
274
L.Conv2d(2048, 256, kernel_size=1, bias=True),
275
norm(256, self.batch_norm),
276
nn.LeakyReLU(),
277
),
278
)
279
self.ppm = nn.ModuleList(self.ppm)
280
281
self.conv_up1 = nn.Sequential(
282
L.Conv2d(
283
2048 + len(pool_scales) * 256, 256, kernel_size=3, padding=1, bias=True,
284
),
285
norm(256, self.batch_norm),
286
nn.LeakyReLU(),
287
L.Conv2d(256, 256, kernel_size=3, padding=1),
288
norm(256, self.batch_norm),
289
nn.LeakyReLU(),
290
)
291
292
self.conv_up2 = nn.Sequential(
293
L.Conv2d(256 + 256, 256, kernel_size=3, padding=1, bias=True),
294
norm(256, self.batch_norm),
295
nn.LeakyReLU(),
296
)
297
if self.batch_norm:
298
d_up3 = 128
299
else:
300
d_up3 = 64
301
self.conv_up3 = nn.Sequential(
302
L.Conv2d(256 + d_up3, 64, kernel_size=3, padding=1, bias=True),
303
norm(64, self.batch_norm),
304
nn.LeakyReLU(),
305
)
306
307
self.unpool = nn.MaxUnpool2d(2, stride=2)
308
309
self.conv_up4 = nn.Sequential(
310
nn.Conv2d(64 + 3 + 3 + 2, 32, kernel_size=3, padding=1, bias=True),
311
nn.LeakyReLU(),
312
nn.Conv2d(32, 16, kernel_size=3, padding=1, bias=True),
313
nn.LeakyReLU(),
314
nn.Conv2d(16, 7, kernel_size=1, padding=0, bias=True),
315
)
316
317
def forward(self, conv_out, img, indices, two_chan_trimap):
318
conv5 = conv_out[-1]
319
320
input_size = conv5.size()
321
ppm_out = [conv5]
322
for pool_scale in self.ppm:
323
ppm_out.append(
324
nn.functional.interpolate(
325
pool_scale(conv5),
326
(input_size[2], input_size[3]),
327
mode="bilinear",
328
align_corners=False,
329
),
330
)
331
ppm_out = torch.cat(ppm_out, 1)
332
x = self.conv_up1(ppm_out)
333
334
x = torch.nn.functional.interpolate(
335
x, scale_factor=2, mode="bilinear", align_corners=False,
336
)
337
338
x = torch.cat((x, conv_out[-4]), 1)
339
340
x = self.conv_up2(x)
341
x = torch.nn.functional.interpolate(
342
x, scale_factor=2, mode="bilinear", align_corners=False,
343
)
344
345
x = torch.cat((x, conv_out[-5]), 1)
346
x = self.conv_up3(x)
347
348
x = torch.nn.functional.interpolate(
349
x, scale_factor=2, mode="bilinear", align_corners=False,
350
)
351
x = torch.cat((x, conv_out[-6][:, :3], img, two_chan_trimap), 1)
352
353
output = self.conv_up4(x)
354
355
alpha = torch.clamp(output[:, 0][:, None], 0, 1)
356
F = torch.sigmoid(output[:, 1:4])
357
B = torch.sigmoid(output[:, 4:7])
358
359
# FBA Fusion
360
alpha, F, B = fba_fusion(alpha, img, F, B)
361
362
output = torch.cat((alpha, F, B), 1)
363
364
return output
365
366