Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
hackassin
GitHub Repository: hackassin/learnopencv
Path: blob/master/Face-Recognition-with-ArcFace/align/get_nets.py
3142 views
1
import torch
2
import torch.nn as nn
3
import torch.nn.functional as F
4
from collections import OrderedDict
5
import numpy as np
6
7
8
class Flatten(nn.Module):
9
10
def __init__(self):
11
super(Flatten, self).__init__()
12
13
def forward(self, x):
14
"""
15
Arguments:
16
x: a float tensor with shape [batch_size, c, h, w].
17
Returns:
18
a float tensor with shape [batch_size, c*h*w].
19
"""
20
21
# without this pretrained model isn't working
22
x = x.transpose(3, 2).contiguous()
23
24
return x.view(x.size(0), -1)
25
26
27
class PNet(nn.Module):
28
29
def __init__(self):
30
31
super(PNet, self).__init__()
32
33
# suppose we have input with size HxW, then
34
# after first layer: H - 2,
35
# after pool: ceil((H - 2)/2),
36
# after second conv: ceil((H - 2)/2) - 2,
37
# after last conv: ceil((H - 2)/2) - 4,
38
# and the same for W
39
40
self.features = nn.Sequential(OrderedDict([
41
('conv1', nn.Conv2d(3, 10, 3, 1)),
42
('prelu1', nn.PReLU(10)),
43
('pool1', nn.MaxPool2d(2, 2, ceil_mode = True)),
44
45
('conv2', nn.Conv2d(10, 16, 3, 1)),
46
('prelu2', nn.PReLU(16)),
47
48
('conv3', nn.Conv2d(16, 32, 3, 1)),
49
('prelu3', nn.PReLU(32))
50
]))
51
52
self.conv4_1 = nn.Conv2d(32, 2, 1, 1)
53
self.conv4_2 = nn.Conv2d(32, 4, 1, 1)
54
55
weights = np.load("align/pnet.npy", allow_pickle=True)[()]
56
for n, p in self.named_parameters():
57
p.data = torch.FloatTensor(weights[n])
58
59
def forward(self, x):
60
"""
61
Arguments:
62
x: a float tensor with shape [batch_size, 3, h, w].
63
Returns:
64
b: a float tensor with shape [batch_size, 4, h', w'].
65
a: a float tensor with shape [batch_size, 2, h', w'].
66
"""
67
x = self.features(x)
68
a = self.conv4_1(x)
69
b = self.conv4_2(x)
70
a = F.softmax(a)
71
return b, a
72
73
74
class RNet(nn.Module):
75
76
def __init__(self):
77
78
super(RNet, self).__init__()
79
80
self.features = nn.Sequential(OrderedDict([
81
('conv1', nn.Conv2d(3, 28, 3, 1)),
82
('prelu1', nn.PReLU(28)),
83
('pool1', nn.MaxPool2d(3, 2, ceil_mode = True)),
84
85
('conv2', nn.Conv2d(28, 48, 3, 1)),
86
('prelu2', nn.PReLU(48)),
87
('pool2', nn.MaxPool2d(3, 2, ceil_mode = True)),
88
89
('conv3', nn.Conv2d(48, 64, 2, 1)),
90
('prelu3', nn.PReLU(64)),
91
92
('flatten', Flatten()),
93
('conv4', nn.Linear(576, 128)),
94
('prelu4', nn.PReLU(128))
95
]))
96
97
self.conv5_1 = nn.Linear(128, 2)
98
self.conv5_2 = nn.Linear(128, 4)
99
100
weights = np.load("align/rnet.npy", allow_pickle=True)[()]
101
for n, p in self.named_parameters():
102
p.data = torch.FloatTensor(weights[n])
103
104
def forward(self, x):
105
"""
106
Arguments:
107
x: a float tensor with shape [batch_size, 3, h, w].
108
Returns:
109
b: a float tensor with shape [batch_size, 4].
110
a: a float tensor with shape [batch_size, 2].
111
"""
112
x = self.features(x)
113
a = self.conv5_1(x)
114
b = self.conv5_2(x)
115
a = F.softmax(a)
116
return b, a
117
118
119
class ONet(nn.Module):
120
121
def __init__(self):
122
123
super(ONet, self).__init__()
124
125
self.features = nn.Sequential(OrderedDict([
126
('conv1', nn.Conv2d(3, 32, 3, 1)),
127
('prelu1', nn.PReLU(32)),
128
('pool1', nn.MaxPool2d(3, 2, ceil_mode = True)),
129
130
('conv2', nn.Conv2d(32, 64, 3, 1)),
131
('prelu2', nn.PReLU(64)),
132
('pool2', nn.MaxPool2d(3, 2, ceil_mode = True)),
133
134
('conv3', nn.Conv2d(64, 64, 3, 1)),
135
('prelu3', nn.PReLU(64)),
136
('pool3', nn.MaxPool2d(2, 2, ceil_mode = True)),
137
138
('conv4', nn.Conv2d(64, 128, 2, 1)),
139
('prelu4', nn.PReLU(128)),
140
141
('flatten', Flatten()),
142
('conv5', nn.Linear(1152, 256)),
143
('drop5', nn.Dropout(0.25)),
144
('prelu5', nn.PReLU(256)),
145
]))
146
147
self.conv6_1 = nn.Linear(256, 2)
148
self.conv6_2 = nn.Linear(256, 4)
149
self.conv6_3 = nn.Linear(256, 10)
150
151
weights = np.load("align/onet.npy", allow_pickle=True)[()]
152
for n, p in self.named_parameters():
153
p.data = torch.FloatTensor(weights[n])
154
155
def forward(self, x):
156
"""
157
Arguments:
158
x: a float tensor with shape [batch_size, 3, h, w].
159
Returns:
160
c: a float tensor with shape [batch_size, 10].
161
b: a float tensor with shape [batch_size, 4].
162
a: a float tensor with shape [batch_size, 2].
163
"""
164
x = self.features(x)
165
a = self.conv6_1(x)
166
b = self.conv6_2(x)
167
c = self.conv6_3(x)
168
a = F.softmax(a)
169
return c, b, a
170