Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Tetragramm
GitHub Repository: Tetragramm/opencv
Path: blob/master/modules/dnn/src/layers/mvn_layer.cpp
16337 views
1
/*M///////////////////////////////////////////////////////////////////////////////////////
2
//
3
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4
//
5
// By downloading, copying, installing or using the software you agree to this license.
6
// If you do not agree to this license, do not download, install,
7
// copy or use the software.
8
//
9
//
10
// License Agreement
11
// For Open Source Computer Vision Library
12
//
13
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
14
// Copyright (C) 2017, Intel Corporation, all rights reserved.
15
// Third party copyrights are property of their respective owners.
16
//
17
// Redistribution and use in source and binary forms, with or without modification,
18
// are permitted provided that the following conditions are met:
19
//
20
// * Redistribution's of source code must retain the above copyright notice,
21
// this list of conditions and the following disclaimer.
22
//
23
// * Redistribution's in binary form must reproduce the above copyright notice,
24
// this list of conditions and the following disclaimer in the documentation
25
// and/or other materials provided with the distribution.
26
//
27
// * The name of the copyright holders may not be used to endorse or promote products
28
// derived from this software without specific prior written permission.
29
//
30
// This software is provided by the copyright holders and contributors "as is" and
31
// any express or implied warranties, including, but not limited to, the implied
32
// warranties of merchantability and fitness for a particular purpose are disclaimed.
33
// In no event shall the Intel Corporation or contributors be liable for any direct,
34
// indirect, incidental, special, exemplary, or consequential damages
35
// (including, but not limited to, procurement of substitute goods or services;
36
// loss of use, data, or profits; or business interruption) however caused
37
// and on any theory of liability, whether in contract, strict liability,
38
// or tort (including negligence or otherwise) arising in any way out of
39
// the use of this software, even if advised of the possibility of such damage.
40
//
41
//M*/
42
43
#include "../precomp.hpp"
44
#include "layers_common.hpp"
45
#include "../op_inf_engine.hpp"
46
#include <opencv2/dnn/shape_utils.hpp>
47
48
#ifdef HAVE_OPENCL
49
#include "../ocl4dnn/include/math_functions.hpp"
50
#include "opencl_kernels_dnn.hpp"
51
#endif
52
53
namespace cv
54
{
55
namespace dnn
56
{
57
58
class MVNLayerImpl CV_FINAL : public MVNLayer
59
{
60
public:
61
MVNLayerImpl(const LayerParams& params)
62
{
63
setParamsFrom(params);
64
normVariance = params.get<bool>("normalize_variance", true);
65
acrossChannels = params.get<bool>("across_channels", false);
66
eps = params.get<double>("eps", 1e-9);
67
fuse_batch_norm = false;
68
fuse_relu = false;
69
relu_slope = 0.f;
70
zeroDev = false;
71
}
72
73
Mat scale, shift;
74
#ifdef HAVE_OPENCL
75
UMat umat_scale, umat_shift;
76
#endif
77
bool fuse_batch_norm;
78
79
Ptr<ReLULayer> activ_relu;
80
float relu_slope;
81
bool fuse_relu;
82
bool zeroDev; // TODO: Doesn't considered in Intel's Inference Engine backend.
83
bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE
84
{
85
if (!layer.empty() && !fuse_relu && !fuse_batch_norm)
86
{
87
layer->getScaleShift(scale, shift);
88
fuse_batch_norm = !scale.empty() || !shift.empty();
89
return fuse_batch_norm;
90
}
91
92
if (!layer.empty() && preferableTarget == DNN_TARGET_OPENCL)
93
{
94
activ_relu = layer.dynamicCast<ReLULayer>();
95
if( !activ_relu.empty() )
96
relu_slope = activ_relu->negativeSlope;
97
}
98
fuse_relu = !activ_relu.empty();
99
return fuse_relu;
100
}
101
102
void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE
103
{
104
std::vector<Mat> inputs;
105
inputs_arr.getMatVector(inputs);
106
int splitDim = (acrossChannels) ? 1 : 2;
107
int i, newRows = 1;
108
for( i = 0; i < splitDim; i++ )
109
newRows *= inputs[0].size[i];
110
zeroDev = inputs[0].total() == newRows;
111
#ifdef HAVE_OPENCL
112
umat_scale.release();
113
umat_shift.release();
114
#endif
115
}
116
117
virtual bool supportBackend(int backendId) CV_OVERRIDE
118
{
119
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
120
return !zeroDev && (preferableTarget == DNN_TARGET_CPU || eps <= 1e-7f);
121
else
122
return backendId == DNN_BACKEND_OPENCV;
123
}
124
125
#ifdef HAVE_OPENCL
126
bool fast_forward_ocl(std::vector<UMat> &inputs, std::vector<UMat> &outputs)
127
{
128
if (umat_scale.empty() && !scale.empty())
129
scale.copyTo(umat_scale);
130
if (umat_shift.empty() && !shift.empty())
131
shift.copyTo(umat_shift);
132
UMat& bnorm_weight = umat_scale;
133
UMat& bnorm_bias = umat_shift;
134
135
bool use_half = (inputs[0].depth() == CV_16S);
136
String opts = format(" -DT=%s -DT4=%s -Dconvert_T=%s", use_half ? "half" : "float",
137
use_half ? "half4" : "float4", use_half ? "convert_half4" : "convert_float4");
138
139
int splitDim = (acrossChannels) ? 1 : 2;
140
for (size_t inpIdx = 0; inpIdx < inputs.size(); inpIdx++)
141
{
142
UMat &inpMat = inputs[inpIdx];
143
UMat &outMat = outputs[inpIdx];
144
int newRows = total(shape(inpMat), 0, splitDim);
145
146
MatShape s = shape(newRows, inpMat.total() / newRows);
147
UMat meanMat = UMat(s[0], 1, (use_half) ? CV_16S : CV_32F);
148
UMat tmpMat = UMat(s[0], s[1], CV_32F);
149
float alpha = 1.0f / s[1];
150
151
String buildopt = "-DNUM=4" + opts;
152
ocl::Kernel k("mean_fuse4", ocl::dnn::mvn_oclsrc, buildopt);
153
size_t localsize[] = { 128 };
154
size_t globalsize[] = { (size_t)s[0] / 4 * localsize[0] };
155
156
int argId = 0;
157
k.set(argId++, ocl::KernelArg::PtrReadOnly(inpMat));
158
k.set(argId++, (int)s[1]);
159
k.set(argId++, alpha);
160
k.set(argId++, ocl::KernelArg::PtrWriteOnly(meanMat));
161
k.set(argId++, ocl::KernelArg::PtrWriteOnly(tmpMat));
162
k.set(argId++, NULL, localsize[0] * sizeof(cl_float4));
163
bool ret = k.run(1, globalsize, localsize, false);
164
if (!ret)
165
return false;
166
167
buildopt += format(" %s %s", (fuse_batch_norm) ? "-DFUSE_BATCH_NORM" : "",
168
(fuse_relu) ? "-DFUSE_RELU" : "");
169
170
ocl::Kernel k1("mvn_fuse4", ocl::dnn::mvn_oclsrc, buildopt);
171
argId = 0;
172
k1.set(argId++, ocl::KernelArg::PtrReadOnly(tmpMat));
173
k1.set(argId++, ocl::KernelArg::PtrReadOnly(inpMat));
174
k1.set(argId++, ocl::KernelArg::PtrReadOnly(meanMat));
175
k1.set(argId++, (int)s[1]);
176
k1.set(argId++, (float)alpha);
177
k1.set(argId++, (float)eps);
178
k1.set(argId++, (float)relu_slope);
179
k1.set(argId++, ocl::KernelArg::PtrReadOnly(bnorm_weight));
180
k1.set(argId++, ocl::KernelArg::PtrReadOnly(bnorm_bias));
181
k1.set(argId++, ocl::KernelArg::PtrWriteOnly(outMat));
182
k1.set(argId++, NULL, localsize[0] * sizeof(cl_float4));
183
ret = k1.run(1, globalsize, localsize, false);
184
if (!ret)
185
return false;
186
}
187
return true;
188
}
189
190
bool forward_ocl(InputArrayOfArrays inputs_, OutputArrayOfArrays outputs_, OutputArrayOfArrays internals_)
191
{
192
if (umat_scale.empty() && !scale.empty())
193
scale.copyTo(umat_scale);
194
if (umat_shift.empty() && !shift.empty())
195
shift.copyTo(umat_shift);
196
UMat& bnorm_weight = umat_scale;
197
UMat& bnorm_bias = umat_shift;
198
199
std::vector<UMat> inputs;
200
std::vector<UMat> outputs;
201
202
inputs_.getUMatVector(inputs);
203
outputs_.getUMatVector(outputs);
204
205
int splitDim = (acrossChannels) ? 1 : 2;
206
int row_size = total(shape(inputs[0]), 0, splitDim);
207
int plane_size = total(shape(inputs[0]), splitDim);
208
if (normVariance && (row_size % 4 == 0) && (plane_size % 4 == 0))
209
return fast_forward_ocl(inputs, outputs);
210
211
if (inputs[0].depth() == CV_16S)
212
return false;
213
214
String opts = format(" -DT=float -DT4=float4 -Dconvert_T=convert_float4");
215
216
for (size_t inpIdx = 0; inpIdx < inputs.size(); inpIdx++)
217
{
218
UMat &inpMat = inputs[inpIdx];
219
UMat &outMat = outputs[inpIdx];
220
int newRows = total(shape(inpMat), 0, splitDim);
221
222
MatShape s = shape(newRows, inpMat.total() / newRows);
223
UMat oneMat = UMat::ones(s[1], 1, CV_32F);
224
UMat meanMat = UMat(s[0], 1, CV_32F);
225
UMat devMat = UMat(s[0], 1, CV_32F);
226
UMat tmpMat = UMat(s[0], s[1], CV_32F);
227
float alpha = 1.0f / s[1];
228
229
bool ret = ocl4dnn::ocl4dnnGEMV<float>(ocl4dnn::CblasNoTrans, s[0], s[1], alpha,
230
inpMat, 0, oneMat, 0, 0.0f, meanMat, 0);
231
if (!ret)
232
return false;
233
234
int number = (s[1] % 8 == 0) ? 8 : ((s[1] % 4 == 0) ? 4 : 1);
235
size_t global[] = { (size_t)s[0], (size_t)(s[1] / number) };
236
String buildopt = format("-DNUM=%d", number) + opts;
237
if (normVariance)
238
{
239
String kname = format("calc_mean%d", number);
240
ocl::Kernel kernel(kname.c_str(), ocl::dnn::mvn_oclsrc, buildopt);
241
if (kernel.empty())
242
return false;
243
244
kernel.set(0, ocl::KernelArg::PtrReadOnly(inpMat));
245
kernel.set(1, (int)s[0]);
246
kernel.set(2, (int)s[1]);
247
kernel.set(3, ocl::KernelArg::PtrReadOnly(meanMat));
248
kernel.set(4, ocl::KernelArg::PtrWriteOnly(tmpMat));
249
ret = kernel.run(2, global, NULL, false);
250
if (!ret)
251
return false;
252
253
ret = ocl4dnn::ocl4dnnGEMV<float>(ocl4dnn::CblasNoTrans, s[0], s[1], alpha,
254
tmpMat, 0, oneMat, 0, 0.0f, devMat, 0);
255
if (!ret)
256
return false;
257
}
258
259
String kname = format("mvn%d", number);
260
buildopt += format("%s%s%s", (normVariance) ? " -DNORM_VARIANCE" : "",
261
(fuse_batch_norm) ? " -DFUSE_BATCH_NORM" : "",
262
(fuse_relu) ? " -DFUSE_RELU" : "");
263
ocl::Kernel kernel1(kname.c_str(), ocl::dnn::mvn_oclsrc, buildopt);
264
if (kernel1.empty())
265
return false;
266
kernel1.set(0, ocl::KernelArg::PtrReadOnly(inpMat));
267
kernel1.set(1, (int)s[0]);
268
kernel1.set(2, (int)s[1]);
269
kernel1.set(3, (float)eps);
270
kernel1.set(4, ocl::KernelArg::PtrReadOnly(meanMat));
271
kernel1.set(5, ocl::KernelArg::PtrReadOnly(devMat));
272
kernel1.set(6, ocl::KernelArg::PtrReadOnly(bnorm_weight));
273
kernel1.set(7, ocl::KernelArg::PtrReadOnly(bnorm_bias));
274
kernel1.set(8, (int)inpMat.size[1]);
275
kernel1.set(9, (float)relu_slope);
276
kernel1.set(10, ocl::KernelArg::PtrWriteOnly(outMat));
277
ret = kernel1.run(2, global, NULL, false);
278
if (!ret)
279
return false;
280
}
281
return true;
282
}
283
#endif
284
285
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
286
{
287
CV_TRACE_FUNCTION();
288
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
289
290
CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
291
forward_ocl(inputs_arr, outputs_arr, internals_arr))
292
293
if (inputs_arr.depth() == CV_16S)
294
{
295
forward_fallback(inputs_arr, outputs_arr, internals_arr);
296
return;
297
}
298
299
std::vector<Mat> inputs, outputs, internals;
300
inputs_arr.getMatVector(inputs);
301
outputs_arr.getMatVector(outputs);
302
internals_arr.getMatVector(internals);
303
304
for (size_t inpIdx = 0; inpIdx < inputs.size(); inpIdx++)
305
{
306
Mat &inpBlob = inputs[inpIdx];
307
Mat &outBlob = outputs[inpIdx];
308
309
int splitDim = (acrossChannels) ? 1 : 2;
310
int i, newRows = 1;
311
for( i = 0; i < splitDim; i++ )
312
newRows *= inpBlob.size[i];
313
314
Mat inpMat = inpBlob.reshape(1, newRows);
315
Mat outMat = outBlob.reshape(1, newRows);
316
317
if ( inpBlob.total() == newRows )
318
{
319
// MVN is applied to single values at an every row.
320
if (shift.empty())
321
{
322
outBlob.setTo(0);
323
}
324
else
325
{
326
for ( i = 0; i < newRows; i++ )
327
{
328
outMat.row(i).setTo(((float*)shift.data)[i]);
329
}
330
}
331
return;
332
}
333
334
Scalar mean, dev;
335
for ( i = 0; i < newRows; i++)
336
{
337
Mat inpRow = inpMat.row(i);
338
Mat outRow = outMat.row(i);
339
float weight = 1.f;
340
float bias = 0.f;
341
if (fuse_batch_norm)
342
{
343
weight = i < scale.cols ? ((float*)scale.data)[i] : weight;
344
bias = i < shift.cols ? ((float*)shift.data)[i] : bias;
345
}
346
cv::meanStdDev(inpRow, mean, (normVariance) ? dev : noArray());
347
double alpha = (normVariance) ? 1/(eps + dev[0]) : 1;
348
double normalizationScale = 1.0;
349
double normalizationShift = 0.0;
350
if (fuse_batch_norm)
351
{
352
normalizationScale = alpha * weight;
353
normalizationShift = -mean[0] * normalizationScale + bias;
354
}
355
else
356
{
357
normalizationScale = alpha;
358
normalizationShift = -mean[0] * alpha;
359
}
360
inpRow.convertTo(outRow, outRow.type(), normalizationScale, normalizationShift);
361
}
362
}
363
}
364
365
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
366
{
367
#ifdef HAVE_INF_ENGINE
368
InferenceEngine::LayerParams lp;
369
lp.name = name;
370
lp.type = "MVN";
371
lp.precision = InferenceEngine::Precision::FP32;
372
std::shared_ptr<InferenceEngine::MVNLayer> ieLayer(new InferenceEngine::MVNLayer(lp));
373
ieLayer->params["across_channels"] = acrossChannels ? "1" : "0";
374
ieLayer->params["normalize_variance"] = normVariance ? "1" : "0";
375
ieLayer->params["eps"] = format("%f", eps);
376
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
377
#endif // HAVE_INF_ENGINE
378
return Ptr<BackendNode>();
379
}
380
381
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
382
const std::vector<MatShape> &outputs) const CV_OVERRIDE
383
{
384
CV_UNUSED(outputs); // suppress unused variable warning
385
long flops = 0;
386
for(int i = 0; i < inputs.size(); i++)
387
{
388
flops += 6*total(inputs[i]) + 3*total(inputs[i], 0, normVariance ? 2 : 1);
389
}
390
return flops;
391
}
392
};
393
394
Ptr<MVNLayer> MVNLayer::create(const LayerParams& params)
395
{
396
return Ptr<MVNLayer>(new MVNLayerImpl(params));
397
}
398
399
}
400
}
401
402