Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Tetragramm
GitHub Repository: Tetragramm/opencv
Path: blob/master/modules/dnn/src/layers/scale_layer.cpp
16337 views
1
// This file is part of OpenCV project.
2
// It is subject to the license terms in the LICENSE file found in the top-level directory
3
// of this distribution and at http://opencv.org/license.html.
4
5
// Copyright (C) 2016, Intel Corporation, all rights reserved.
6
// Third party copyrights are property of their respective owners.
7
8
/*
9
Implementation of Scale layer.
10
*/
11
12
#include "../precomp.hpp"
13
#include "layers_common.hpp"
14
#include "../op_halide.hpp"
15
#include "../op_inf_engine.hpp"
16
#include <opencv2/dnn/shape_utils.hpp>
17
18
namespace cv
19
{
20
namespace dnn
21
{
22
23
class ScaleLayerImpl CV_FINAL : public ScaleLayer
24
{
25
public:
26
ScaleLayerImpl(const LayerParams& params)
27
{
28
setParamsFrom(params);
29
hasBias = params.get<bool>("bias_term", false);
30
axis = params.get<int>("axis", 1);
31
hasWeights = false;
32
}
33
34
bool getMemoryShapes(const std::vector<MatShape> &inputs,
35
const int requiredOutputs,
36
std::vector<MatShape> &outputs,
37
std::vector<MatShape> &internals) const CV_OVERRIDE
38
{
39
outputs.assign(1, inputs[0]);
40
return true;
41
}
42
43
virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE
44
{
45
std::vector<Mat> inputs;
46
inputs_arr.getMatVector(inputs);
47
hasWeights = blobs.size() == 2 || (blobs.size() == 1 && !hasBias);
48
CV_Assert(inputs.size() == 2 && blobs.empty() || blobs.size() == (int)hasWeights + (int)hasBias);
49
}
50
51
virtual bool supportBackend(int backendId) CV_OVERRIDE
52
{
53
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
54
backendId == DNN_BACKEND_INFERENCE_ENGINE && axis == 1;
55
}
56
57
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
58
{
59
CV_TRACE_FUNCTION();
60
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
61
62
if (inputs_arr.depth() == CV_16S)
63
{
64
forward_fallback(inputs_arr, outputs_arr, internals_arr);
65
return;
66
}
67
68
std::vector<Mat> inputs, outputs;
69
inputs_arr.getMatVector(inputs);
70
outputs_arr.getMatVector(outputs);
71
72
CV_Assert_N(outputs.size() == 1, !blobs.empty() || inputs.size() == 2);
73
74
Mat &inpBlob = inputs[0];
75
Mat &outBlob = outputs[0];
76
// There is a mode when we multiply a first blob by a second one
77
// instead of trainable weights.
78
Mat weights = blobs.empty() ? inputs[1] : (hasWeights ? blobs[0] : Mat());
79
Mat bias = hasBias ? blobs.back().reshape(1, 1) : Mat();
80
if (!weights.empty())
81
weights = weights.reshape(1, 1);
82
MatShape inpShape = shape(inpBlob);
83
const int numWeights = !weights.empty() ? weights.total() : bias.total();
84
CV_Assert(numWeights != 0);
85
if (hasWeights && hasBias)
86
CV_CheckEQ(weights.total(), bias.total(), "Incompatible weights/bias blobs");
87
88
int endAxis;
89
for (endAxis = axis + 1; endAxis <= inpBlob.dims; ++endAxis)
90
{
91
if (total(inpShape, axis, endAxis) == numWeights)
92
break;
93
}
94
CV_Assert(total(inpShape, axis, endAxis) == numWeights);
95
CV_Assert(!hasBias || numWeights == bias.total());
96
CV_CheckTypeEQ(inpBlob.type(), CV_32FC1, ""); CV_CheckTypeEQ(outBlob.type(), CV_32FC1, "");
97
98
int numSlices = total(inpShape, 0, axis);
99
float* inpData = (float*)inpBlob.data;
100
float* outData = (float*)outBlob.data;
101
102
if (endAxis != inpBlob.dims)
103
{
104
float* weightsData = !weights.empty() ? (float*)weights.data : 0;
105
float* biasesData = hasBias ? (float*)bias.data : 0;
106
int spatialSize = total(inpShape, endAxis); // spatialSize != 1
107
for (int i = 0; i < numSlices; ++i)
108
{
109
for (int j = 0; j < numWeights; ++j)
110
{
111
float w = weightsData ? weightsData[j] : 1;
112
float b = biasesData ? biasesData[j] : 0;
113
Mat inpSlice(1, spatialSize, CV_32F, inpData);
114
Mat outSlice(1, spatialSize, CV_32F, outData);
115
inpSlice.convertTo(outSlice, CV_32F, w, b);
116
inpData += spatialSize;
117
outData += spatialSize;
118
}
119
}
120
}
121
else
122
{
123
for (int i = 0; i < numSlices; ++i)
124
{
125
Mat inpSlice(1, numWeights, CV_32F, inpData);
126
Mat outSlice(1, numWeights, CV_32F, outData);
127
if (!weights.empty())
128
{
129
multiply(inpSlice, weights, outSlice);
130
if (hasBias)
131
add(outSlice, bias, outSlice);
132
}
133
else if (hasBias)
134
add(inpSlice, bias, outSlice);
135
inpData += numWeights;
136
outData += numWeights;
137
}
138
}
139
}
140
141
virtual Ptr<BackendNode> tryAttach(const Ptr<BackendNode>& node) CV_OVERRIDE
142
{
143
switch (node->backendId)
144
{
145
case DNN_BACKEND_HALIDE:
146
{
147
#ifdef HAVE_HALIDE
148
auto base = node.dynamicCast<HalideBackendNode>();
149
Halide::Func& input = base->funcs.back();
150
Halide::Var x("x"), y("y"), c("c"), n("n");
151
Halide::Func top = attachHalide(input(x, y, c, n));
152
return Ptr<BackendNode>(new HalideBackendNode(base, top));
153
#endif // HAVE_HALIDE
154
break;
155
}
156
}
157
return Ptr<BackendNode>();
158
}
159
160
virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
161
{
162
#ifdef HAVE_HALIDE
163
Halide::Buffer<float> input = halideBuffer(inputs[0]);
164
Halide::Var x("x"), y("y"), c("c"), n("n");
165
Halide::Func top = attachHalide(input(x, y, c, n));
166
return Ptr<BackendNode>(new HalideBackendNode(top));
167
#endif // HAVE_HALIDE
168
return Ptr<BackendNode>();
169
}
170
171
#ifdef HAVE_HALIDE
172
// attachHalide can work both with Halide::Buffer and Halide::Func. In the
173
// second case it will be a fusion.
174
Halide::Func attachHalide(const Halide::Expr& input)
175
{
176
Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
177
Halide::Var x("x"), y("y"), c("c"), n("n");
178
179
const int numChannels = blobs[0].total();
180
181
Halide::Expr topExpr = input;
182
if (hasWeights)
183
{
184
auto weights = wrapToHalideBuffer(blobs[0], {numChannels});
185
topExpr *= weights(c);
186
}
187
if (hasBias)
188
{
189
auto bias = wrapToHalideBuffer(blobs.back(), {numChannels});
190
topExpr += bias(c);
191
}
192
top(x, y, c, n) = topExpr;
193
return top;
194
}
195
#endif // HAVE_HALIDE
196
197
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
198
{
199
#ifdef HAVE_INF_ENGINE
200
InferenceEngine::LayerParams lp;
201
lp.name = name;
202
lp.type = "ScaleShift";
203
lp.precision = InferenceEngine::Precision::FP32;
204
std::shared_ptr<InferenceEngine::ScaleShiftLayer> ieLayer(new InferenceEngine::ScaleShiftLayer(lp));
205
206
CV_Assert(!blobs.empty());
207
const size_t numChannels = blobs[0].total();
208
if (hasWeights)
209
{
210
ieLayer->_weights = wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C);
211
}
212
else
213
{
214
auto weights = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32,
215
{numChannels});
216
weights->allocate();
217
218
std::vector<float> ones(numChannels, 1);
219
weights->set(ones);
220
ieLayer->_weights = weights;
221
}
222
if (hasBias)
223
ieLayer->_biases = wrapToInfEngineBlob(blobs.back(), {numChannels}, InferenceEngine::Layout::C);
224
225
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
226
#endif // HAVE_INF_ENGINE
227
return Ptr<BackendNode>();
228
}
229
230
void getScaleShift(Mat& scale, Mat& shift) const CV_OVERRIDE
231
{
232
scale = hasWeights ? blobs[0] : Mat();
233
shift = hasBias ? blobs.back() : Mat();
234
}
235
236
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
237
const std::vector<MatShape> &outputs) const CV_OVERRIDE
238
{
239
CV_UNUSED(outputs); // suppress unused variable warning
240
long flops = 0;
241
for(int i = 0; i < inputs.size(); i++)
242
{
243
flops += 2*total(inputs[i]);
244
}
245
return flops;
246
}
247
248
private:
249
bool hasWeights;
250
};
251
252
253
Ptr<ScaleLayer> ScaleLayer::create(const LayerParams& params)
254
{
255
return Ptr<ScaleLayer>(new ScaleLayerImpl(params));
256
}
257
258
Ptr<Layer> ShiftLayer::create(const LayerParams& params)
259
{
260
LayerParams scaleParams;
261
scaleParams.name = params.name;
262
scaleParams.type = "Scale";
263
scaleParams.blobs = params.blobs;
264
scaleParams.set("bias_term", true);
265
scaleParams.set("axis", 0);
266
return Ptr<ScaleLayer>(new ScaleLayerImpl(scaleParams));
267
}
268
269
} // namespace dnn
270
} // namespace cv
271
272