Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Tetragramm
GitHub Repository: Tetragramm/opencv
Path: blob/master/modules/dnn/src/layers/normalize_bbox_layer.cpp
16337 views
1
/*M///////////////////////////////////////////////////////////////////////////////////////
2
//
3
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4
//
5
// By downloading, copying, installing or using the software you agree to this license.
6
// If you do not agree to this license, do not download, install,
7
// copy or use the software.
8
//
9
//
10
// License Agreement
11
// For Open Source Computer Vision Library
12
//
13
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
14
// Copyright (C) 2017, Intel Corporation, all rights reserved.
15
// Third party copyrights are property of their respective owners.
16
//
17
// Redistribution and use in source and binary forms, with or without modification,
18
// are permitted provided that the following conditions are met:
19
//
20
// * Redistribution's of source code must retain the above copyright notice,
21
// this list of conditions and the following disclaimer.
22
//
23
// * Redistribution's in binary form must reproduce the above copyright notice,
24
// this list of conditions and the following disclaimer in the documentation
25
// and/or other materials provided with the distribution.
26
//
27
// * The name of the copyright holders may not be used to endorse or promote products
28
// derived from this software without specific prior written permission.
29
//
30
// This software is provided by the copyright holders and contributors "as is" and
31
// any express or implied warranties, including, but not limited to, the implied
32
// warranties of merchantability and fitness for a particular purpose are disclaimed.
33
// In no event shall the Intel Corporation or contributors be liable for any direct,
34
// indirect, incidental, special, exemplary, or consequential damages
35
// (including, but not limited to, procurement of substitute goods or services;
36
// loss of use, data, or profits; or business interruption) however caused
37
// and on any theory of liability, whether in contract, strict liability,
38
// or tort (including negligence or otherwise) arising in any way out of
39
// the use of this software, even if advised of the possibility of such damage.
40
//
41
//M*/
42
43
#include "../precomp.hpp"
44
#include "layers_common.hpp"
45
#include "../op_inf_engine.hpp"
46
47
namespace cv { namespace dnn {
48
49
class NormalizeBBoxLayerImpl CV_FINAL : public NormalizeBBoxLayer
50
{
51
public:
52
NormalizeBBoxLayerImpl(const LayerParams& params)
53
{
54
setParamsFrom(params);
55
pnorm = params.get<float>("p", 2);
56
epsilon = params.get<float>("eps", 1e-10f);
57
acrossSpatial = params.get<bool>("across_spatial", true);
58
startAxis = params.get<int>("start_axis", 1);
59
CV_Assert(!params.has("across_spatial") || !params.has("end_axis"));
60
endAxis = params.get<int>("end_axis", acrossSpatial ? -1 : startAxis);
61
CV_Assert(pnorm > 0);
62
}
63
64
virtual bool supportBackend(int backendId) CV_OVERRIDE
65
{
66
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
67
{
68
if (pnorm != 2)
69
return false;
70
if (!blobs.empty())
71
return true;
72
if (preferableTarget == DNN_TARGET_MYRIAD)
73
return !acrossSpatial;
74
return startAxis == 1 && (!acrossSpatial || endAxis > 1);
75
}
76
else
77
return backendId == DNN_BACKEND_OPENCV;
78
}
79
80
bool getMemoryShapes(const std::vector<MatShape> &inputs,
81
const int requiredOutputs,
82
std::vector<MatShape> &outputs,
83
std::vector<MatShape> &internals) const CV_OVERRIDE
84
{
85
CV_Assert(inputs.size() == 1);
86
Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals);
87
internals.resize(1, inputs[0]);
88
internals[0][0] = 1; // Batch size.
89
return true;
90
}
91
92
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE
93
{
94
std::vector<Mat> inputs;
95
inputs_arr.getMatVector(inputs);
96
CV_Assert(inputs.size() == 1);
97
endAxis = endAxis == -1 ? (inputs[0].dims - 1) : endAxis;
98
startAxis = startAxis == -1 ? (inputs[0].dims - 1) : startAxis;
99
acrossSpatial = (startAxis == 1 && endAxis == inputs[0].dims - 1);
100
}
101
102
#ifdef HAVE_OPENCL
103
bool forward_ocl(InputArrayOfArrays inputs_, OutputArrayOfArrays outputs_, OutputArrayOfArrays internals_)
104
{
105
std::vector<UMat> inputs;
106
std::vector<UMat> outputs;
107
std::vector<UMat> internals;
108
109
if (inputs_.depth() == CV_16S)
110
return false;
111
112
inputs_.getUMatVector(inputs);
113
outputs_.getUMatVector(outputs);
114
internals_.getUMatVector(internals);
115
116
CV_Assert(inputs.size() == 1 && outputs.size() == 1);
117
CV_Assert(inputs[0].total() == outputs[0].total());
118
119
const UMat& inp0 = inputs[0];
120
UMat& buffer = internals[0];
121
startAxis = clamp(startAxis, inp0.dims);
122
endAxis = clamp(endAxis, inp0.dims);
123
124
size_t num = total(shape(inp0.size), 0, startAxis);
125
size_t numPlanes = total(shape(inp0.size), startAxis, endAxis + 1);
126
size_t planeSize = inp0.total() / (num * numPlanes);
127
MatShape s = shape(1, inputs[0].total());
128
UMat inp = inputs[0].reshape(1, s.size(), &s[0]).reshape(1, num);
129
UMat out = outputs[0].reshape(1, s.size(), &s[0]).reshape(1, num);
130
for (size_t i = 0; i < num; ++i)
131
{
132
s = shape(numPlanes, planeSize);
133
UMat src = inp.row(i).reshape(1, s.size(), &s[0]);
134
UMat dst = out.row(i).reshape(1, s.size(), &s[0]);
135
136
UMat abs_mat;
137
absdiff(src, cv::Scalar::all(0), abs_mat);
138
pow(abs_mat, pnorm, buffer);
139
140
if (planeSize == 1)
141
{
142
// add eps to avoid overflow
143
float absSum = sum(buffer)[0] + epsilon;
144
float norm = pow(absSum, 1.0f / pnorm);
145
multiply(src, 1.0f / norm, dst);
146
}
147
else
148
{
149
Mat norm;
150
reduce(buffer, norm, 0, REDUCE_SUM);
151
norm += epsilon;
152
153
// compute inverted norm to call multiply instead divide
154
cv::pow(norm, -1.0f / pnorm, norm);
155
156
repeat(norm, numPlanes, 1, buffer);
157
multiply(src, buffer, dst);
158
}
159
160
if (!blobs.empty())
161
{
162
// scale the output
163
Mat scale = blobs[0];
164
if (scale.total() == 1)
165
{
166
// _scale: 1 x 1
167
multiply(dst, scale.at<float>(0, 0), dst);
168
}
169
else
170
{
171
// _scale: _channels x 1
172
CV_Assert(scale.total() == numPlanes);
173
repeat(scale, 1, dst.cols, buffer);
174
multiply(dst, buffer, dst);
175
}
176
}
177
}
178
return true;
179
}
180
#endif
181
182
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
183
{
184
CV_TRACE_FUNCTION();
185
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
186
187
CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
188
forward_ocl(inputs_arr, outputs_arr, internals_arr))
189
190
if (inputs_arr.depth() == CV_16S)
191
{
192
forward_fallback(inputs_arr, outputs_arr, internals_arr);
193
return;
194
}
195
196
std::vector<Mat> inputs, outputs, internals;
197
inputs_arr.getMatVector(inputs);
198
outputs_arr.getMatVector(outputs);
199
internals_arr.getMatVector(internals);
200
201
CV_Assert(inputs.size() == 1 && outputs.size() == 1);
202
CV_Assert(inputs[0].total() == outputs[0].total());
203
204
const Mat& inp0 = inputs[0];
205
Mat& buffer = internals[0];
206
startAxis = clamp(startAxis, inp0.dims);
207
endAxis = clamp(endAxis, inp0.dims);
208
209
const float* inpData = inp0.ptr<float>();
210
float* outData = outputs[0].ptr<float>();
211
212
size_t num = total(shape(inp0.size), 0, startAxis);
213
size_t numPlanes = total(shape(inp0.size), startAxis, endAxis + 1);
214
CV_Assert(num * numPlanes != 0);
215
size_t planeSize = inp0.total() / (num * numPlanes);
216
for (size_t n = 0; n < num; ++n)
217
{
218
Mat src = Mat(numPlanes, planeSize, CV_32F, (void*)inpData);
219
Mat dst = Mat(numPlanes, planeSize, CV_32F, (void*)outData);
220
cv::pow(abs(src), pnorm, buffer);
221
222
if (planeSize == 1)
223
{
224
// add eps to avoid overflow
225
float absSum = sum(buffer)[0] + epsilon;
226
float norm = pow(absSum, 1.0f / pnorm);
227
multiply(src, 1.0f / norm, dst);
228
}
229
else
230
{
231
Mat norm;
232
reduce(buffer, norm, 0, REDUCE_SUM);
233
norm += epsilon;
234
235
// compute inverted norm to call multiply instead divide
236
cv::pow(norm, -1.0f / pnorm, norm);
237
238
repeat(norm, numPlanes, 1, buffer);
239
multiply(src, buffer, dst);
240
}
241
242
if (!blobs.empty())
243
{
244
// scale the output
245
Mat scale = blobs[0];
246
if (scale.total() == 1)
247
{
248
// _scale: 1 x 1
249
dst *= scale.at<float>(0, 0);
250
}
251
else
252
{
253
// _scale: _channels x 1
254
CV_Assert(scale.total() == numPlanes);
255
repeat(scale, 1, dst.cols, buffer);
256
multiply(dst, buffer, dst);
257
}
258
}
259
inpData += numPlanes * planeSize;
260
outData += numPlanes * planeSize;
261
}
262
}
263
264
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
265
{
266
#ifdef HAVE_INF_ENGINE
267
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
268
269
InferenceEngine::LayerParams lp;
270
lp.name = name;
271
lp.precision = InferenceEngine::Precision::FP32;
272
273
if (input->dims.size() == 4)
274
{
275
const int numChannels = input->dims[2]; // NOTE: input->dims are reversed (whcn)
276
277
lp.type = "Normalize";
278
std::shared_ptr<InferenceEngine::CNNLayer> ieLayer(new InferenceEngine::CNNLayer(lp));
279
if (blobs.empty())
280
{
281
auto weights = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32,
282
InferenceEngine::Layout::C,
283
{(size_t)numChannels});
284
weights->allocate();
285
std::vector<float> ones(numChannels, 1);
286
weights->set(ones);
287
ieLayer->blobs["weights"] = weights;
288
ieLayer->params["channel_shared"] = "0";
289
}
290
else
291
{
292
CV_Assert(numChannels == blobs[0].total());
293
ieLayer->blobs["weights"] = wrapToInfEngineBlob(blobs[0], {(size_t)numChannels}, InferenceEngine::Layout::C);
294
ieLayer->params["channel_shared"] = blobs[0].total() == 1 ? "1" : "0";
295
}
296
ieLayer->params["eps"] = format("%f", epsilon);
297
ieLayer->params["across_spatial"] = acrossSpatial ? "1" : "0";
298
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
299
}
300
else
301
{
302
InferenceEngine::LayerParams lp;
303
lp.name = name;
304
lp.type = "GRN";
305
lp.precision = InferenceEngine::Precision::FP32;
306
std::shared_ptr<InferenceEngine::CNNLayer> ieLayer(new InferenceEngine::CNNLayer(lp));
307
ieLayer->params["bias"] = format("%f", epsilon);
308
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
309
}
310
#endif // HAVE_INF_ENGINE
311
return Ptr<BackendNode>();
312
}
313
314
private:
315
int startAxis, endAxis;
316
};
317
318
319
Ptr<NormalizeBBoxLayer> NormalizeBBoxLayer::create(const LayerParams &params)
320
{
321
return Ptr<NormalizeBBoxLayer>(new NormalizeBBoxLayerImpl(params));
322
}
323
324
}
325
}
326
327