Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Tetragramm
GitHub Repository: Tetragramm/opencv
Path: blob/master/modules/dnn/src/layers/eltwise_layer.cpp
16337 views
1
/*M///////////////////////////////////////////////////////////////////////////////////////
2
//
3
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4
//
5
// By downloading, copying, installing or using the software you agree to this license.
6
// If you do not agree to this license, do not download, install,
7
// copy or use the software.
8
//
9
//
10
// License Agreement
11
// For Open Source Computer Vision Library
12
//
13
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
14
// Copyright (C) 2017, Intel Corporation, all rights reserved.
15
// Third party copyrights are property of their respective owners.
16
//
17
// Redistribution and use in source and binary forms, with or without modification,
18
// are permitted provided that the following conditions are met:
19
//
20
// * Redistribution's of source code must retain the above copyright notice,
21
// this list of conditions and the following disclaimer.
22
//
23
// * Redistribution's in binary form must reproduce the above copyright notice,
24
// this list of conditions and the following disclaimer in the documentation
25
// and/or other materials provided with the distribution.
26
//
27
// * The name of the copyright holders may not be used to endorse or promote products
28
// derived from this software without specific prior written permission.
29
//
30
// This software is provided by the copyright holders and contributors "as is" and
31
// any express or implied warranties, including, but not limited to, the implied
32
// warranties of merchantability and fitness for a particular purpose are disclaimed.
33
// In no event shall the Intel Corporation or contributors be liable for any direct,
34
// indirect, incidental, special, exemplary, or consequential damages
35
// (including, but not limited to, procurement of substitute goods or services;
36
// loss of use, data, or profits; or business interruption) however caused
37
// and on any theory of liability, whether in contract, strict liability,
38
// or tort (including negligence or otherwise) arising in any way out of
39
// the use of this software, even if advised of the possibility of such damage.
40
//
41
//M*/
42
43
#include "../precomp.hpp"
44
#include "layers_common.hpp"
45
#include "../op_halide.hpp"
46
#include "../op_inf_engine.hpp"
47
48
#ifdef HAVE_OPENCL
49
#include "opencl_kernels_dnn.hpp"
50
#endif
51
52
namespace cv
53
{
54
namespace dnn
55
{
56
57
class EltwiseLayerImpl CV_FINAL : public EltwiseLayer
58
{
59
public:
60
enum EltwiseOp
61
{
62
PROD = 0,
63
SUM = 1,
64
MAX = 2,
65
} op;
66
std::vector<float> coeffs;
67
68
EltwiseLayerImpl(const LayerParams& params)
69
{
70
setParamsFrom(params);
71
op = SUM;
72
if (params.has("operation"))
73
{
74
String operation = toLowerCase(params.get<String>("operation"));
75
if (operation == "prod")
76
op = PROD;
77
else if (operation == "sum")
78
op = SUM;
79
else if (operation == "max")
80
op = MAX;
81
else
82
CV_Error(cv::Error::StsBadArg, "Unknown operation type \"" + operation + "\"");
83
}
84
85
if (params.has("coeff"))
86
{
87
DictValue paramCoeff = params.get("coeff");
88
int i, n = paramCoeff.size();
89
coeffs.resize(n);
90
for (i = 0; i < n; i++)
91
{
92
coeffs[i] = paramCoeff.get<float>(i);
93
}
94
}
95
}
96
97
virtual bool supportBackend(int backendId) CV_OVERRIDE
98
{
99
return backendId == DNN_BACKEND_OPENCV ||
100
backendId == DNN_BACKEND_HALIDE ||
101
backendId == DNN_BACKEND_INFERENCE_ENGINE && (op != SUM || coeffs.empty());
102
}
103
104
bool getMemoryShapes(const std::vector<MatShape> &inputs,
105
const int requiredOutputs,
106
std::vector<MatShape> &outputs,
107
std::vector<MatShape> &internals) const CV_OVERRIDE
108
{
109
CV_Assert(inputs.size() >= 2);
110
CV_Assert(coeffs.size() == 0 || coeffs.size() == inputs.size());
111
CV_Assert(op == SUM || coeffs.size() == 0);
112
113
for (int i = 1; i < inputs.size(); i++)
114
{
115
CV_Assert(inputs[0] == inputs[i]);
116
}
117
118
outputs.assign(1, inputs[0]);
119
120
return false;
121
}
122
123
class EltwiseInvoker : public ParallelLoopBody
124
{
125
public:
126
const Mat* srcs;
127
int nsrcs;
128
Mat* dst;
129
const std::vector<float>* coeffs;
130
EltwiseOp op;
131
int nstripes;
132
const ActivationLayer* activ;
133
int channels;
134
size_t planeSize;
135
136
EltwiseInvoker() : srcs(0), nsrcs(0), dst(0), coeffs(0), op(PROD), nstripes(0), activ(0), channels(0), planeSize(0) {}
137
138
static void run(const Mat* srcs, int nsrcs, Mat& dst,
139
const std::vector<float>& coeffs, EltwiseOp op,
140
const ActivationLayer* activ, int nstripes)
141
{
142
CV_Check(dst.dims, 1 < dst.dims && dst.dims <= 4, ""); CV_CheckTypeEQ(dst.type(), CV_32FC1, ""); CV_Assert(dst.isContinuous());
143
CV_Assert(coeffs.empty() || coeffs.size() == (size_t)nsrcs);
144
145
for( int i = 0; i > nsrcs; i++ )
146
{
147
CV_Assert(srcs[i].size == dst.size &&
148
srcs[i].type() == dst.type() &&
149
srcs[i].isContinuous());
150
}
151
152
EltwiseInvoker p;
153
p.srcs = srcs;
154
p.nsrcs = nsrcs;
155
p.dst = &dst;
156
p.op = op;
157
p.nstripes = nstripes;
158
p.channels = (dst.dims == 4 ? dst.size[1] : 1);
159
p.planeSize = (dst.dims >= 3 ? dst.size[dst.dims - 1] * dst.size[dst.dims - 2] :
160
dst.size[dst.dims - 1]);
161
CV_Assert(dst.total() == dst.size[0] * p.channels * p.planeSize);
162
163
bool simpleCoeffs = true;
164
if( op == SUM && !coeffs.empty() )
165
{
166
CV_Assert( coeffs.size() == (size_t)nsrcs );
167
168
for( size_t i = 0; i < coeffs.size(); i++ )
169
if( coeffs[i] != 1 )
170
{
171
simpleCoeffs = false;
172
break;
173
}
174
}
175
p.coeffs = simpleCoeffs ? 0 : &coeffs;
176
p.activ = activ;
177
178
parallel_for_(Range(0, nstripes), p, nstripes);
179
}
180
181
void operator()(const Range& r) const CV_OVERRIDE
182
{
183
size_t total = dst->size[0]*planeSize;
184
size_t stripeSize = (total + nstripes - 1)/nstripes;
185
size_t stripeStart = r.start*stripeSize;
186
size_t stripeEnd = std::min(r.end*stripeSize, total);
187
int c, j, k, n = nsrcs;
188
const float* coeffsptr = coeffs && !coeffs->empty() ? &coeffs->at(0) : 0;
189
float* dstptr0 = dst->ptr<float>();
190
int blockSize0 = 1 << 12, blockSize;
191
192
for( size_t ofs = stripeStart; ofs < stripeEnd; ofs += blockSize )
193
{
194
int sampleIdx = (int)(ofs / planeSize);
195
int delta = (int)ofs - sampleIdx * planeSize;
196
blockSize = std::min(blockSize0, std::min((int)(stripeEnd - ofs), (int)planeSize - delta));
197
if( blockSize <= 0 )
198
break;
199
200
for( c = 0; c < channels; c++ )
201
{
202
size_t globalDelta = delta + (sampleIdx*channels + c)*planeSize;
203
const float* srcptr0 = srcs[0].ptr<float>() + globalDelta;
204
float* dstptr = dstptr0 + globalDelta;
205
206
if( op == PROD )
207
{
208
for( k = 1; k < n; k++ )
209
{
210
const float* srcptr1 = srcs[k].ptr<float>() + globalDelta;
211
for( j = 0; j < blockSize; j++ )
212
{
213
dstptr[j] = srcptr0[j]*srcptr1[j];
214
}
215
srcptr0 = (const float*)dstptr;
216
}
217
}
218
else if( op == MAX )
219
{
220
for( k = 1; k < n; k++ )
221
{
222
const float* srcptr1 = srcs[k].ptr<float>() + globalDelta;
223
for( j = 0; j < blockSize; j++ )
224
{
225
dstptr[j] = std::max(srcptr0[j], srcptr1[j]);
226
}
227
srcptr0 = (const float*)dstptr;
228
}
229
}
230
else if( !coeffsptr )
231
{
232
for( k = 1; k < n; k++ )
233
{
234
const float* srcptr1 = srcs[k].ptr<float>() + globalDelta;
235
for( j = 0; j < blockSize; j++ )
236
{
237
dstptr[j] = srcptr0[j] + srcptr1[j];
238
}
239
srcptr0 = (const float*)dstptr;
240
}
241
}
242
else
243
{
244
float c0 = coeffsptr[0];
245
for( k = 1; k < n; k++ )
246
{
247
const float* srcptr1 = srcs[k].ptr<float>() + globalDelta;
248
float c1 = coeffsptr[k];
249
for( j = 0; j < blockSize; j++ )
250
{
251
dstptr[j] = c0*srcptr0[j] + c1*srcptr1[j];
252
}
253
srcptr0 = (const float*)dstptr;
254
c0 = 1;
255
}
256
}
257
}
258
259
if( activ )
260
{
261
float* ptr = dstptr0 + delta + sampleIdx*channels*planeSize;
262
activ->forwardSlice(ptr, ptr, blockSize, planeSize, 0, channels);
263
}
264
}
265
}
266
};
267
268
#ifdef HAVE_OPENCL
269
bool forward_ocl(InputArrayOfArrays inputs_, OutputArrayOfArrays outputs_, OutputArrayOfArrays internals_)
270
{
271
std::vector<UMat> inputs;
272
std::vector<UMat> outputs;
273
274
if (inputs_.depth() == CV_16S && op != SUM)
275
return false;
276
277
inputs_.getUMatVector(inputs);
278
outputs_.getUMatVector(outputs);
279
280
switch (op)
281
{
282
case SUM:
283
{
284
int channels = total(shape(outputs[0]), 0, 2);
285
int plane_size = total(shape(outputs[0]), 2);
286
if (channels % 4 == 0 && plane_size % 4 == 0)
287
{
288
size_t localsize[] = { 128 };
289
size_t globalsize[] = { (size_t)channels / 4 * localsize[0] };
290
String opts;
291
if (inputs_.depth() == CV_16S)
292
opts = " -DDtype=half -DDtype4=half4 -DDtype8=half8";
293
else
294
opts = " -DDtype=float -DDtype4=float4 -DDtype8=float8";
295
296
for (int i = 0; i < (inputs.size() - 1); ++i)
297
{
298
String buildopt = format("-DLOOP=%d", i) + opts;
299
ocl::Kernel kernel("op_sum4", ocl::dnn::eltwise_oclsrc, buildopt);
300
int idx = 0;
301
UMat inpMat = (i == 0) ? inputs[0] : UMat();
302
float coeff1 = (coeffs.empty() || i > 0) ? 1.0f : coeffs[i];
303
float coeff2 = coeffs.empty() ? 1.0f : coeffs[i + 1];
304
kernel.set(idx++, ocl::KernelArg::PtrReadOnly(inputs[0]));
305
kernel.set(idx++, ocl::KernelArg::PtrReadOnly(inputs[1]));
306
kernel.set(idx++, (int)plane_size);
307
kernel.set(idx++, (float)coeff1);
308
kernel.set(idx++, (float)coeff2);
309
kernel.set(idx++, ocl::KernelArg::PtrReadWrite(outputs[0]));
310
bool ret = kernel.run(1, globalsize, localsize, false);
311
if (!ret)
312
return false;
313
}
314
}
315
else
316
{
317
if (inputs_.depth() == CV_16S)
318
return false;
319
320
float coeff1 = coeffs.empty() ? 1.f : coeffs[0];
321
float coeff2 = coeffs.empty() ? 1.f : coeffs[1];
322
UMat mul0, mul1;
323
multiply(coeff1, inputs[0], mul0);
324
multiply(coeff2, inputs[1], mul1);
325
add(mul0, mul1, outputs[0]);
326
for (int i = 2; i < inputs.size(); ++i)
327
{
328
float coeff = coeffs.empty() ? 1.f : coeffs[i];
329
multiply(coeff, inputs[i], mul0);
330
add(mul0, outputs[0], outputs[0]);
331
}
332
}
333
}
334
break;
335
case PROD:
336
multiply(inputs[0], inputs[1], outputs[0]);
337
for (int i = 2; i < inputs.size(); ++i)
338
multiply(inputs[i], outputs[0], outputs[0]);
339
break;
340
case MAX:
341
max(inputs[0], inputs[1], outputs[0]);
342
for (int i = 2; i < inputs.size(); ++i)
343
max(inputs[i], outputs[0], outputs[0]);
344
break;
345
default:
346
return false;
347
}
348
return true;
349
}
350
#endif
351
352
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
353
{
354
CV_TRACE_FUNCTION();
355
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
356
357
CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
358
forward_ocl(inputs_arr, outputs_arr, internals_arr))
359
360
if (inputs_arr.depth() == CV_16S)
361
{
362
forward_fallback(inputs_arr, outputs_arr, internals_arr);
363
return;
364
}
365
366
std::vector<Mat> inputs, outputs;
367
inputs_arr.getMatVector(inputs);
368
outputs_arr.getMatVector(outputs);
369
370
CV_Assert(outputs.size() == 1);
371
const int nstripes = getNumThreads();
372
EltwiseInvoker::run(&inputs[0], (int)inputs.size(), outputs[0],
373
coeffs, op, activ.get(), nstripes);
374
}
375
376
virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &input) CV_OVERRIDE
377
{
378
#ifdef HAVE_HALIDE
379
Halide::Var x("x"), y("y"), c("c"), n("n");
380
Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
381
Halide::Expr topExpr;
382
std::vector<Halide::Buffer<> > inputBuffers = halideBuffers(input);
383
switch (op)
384
{
385
case SUM:
386
if (coeffs.empty())
387
{
388
topExpr = inputBuffers[0](x, y, c, n) +
389
inputBuffers[1](x, y, c, n);
390
for (int i = 2; i < inputBuffers.size(); ++i)
391
topExpr += inputBuffers[i](x, y, c, n);
392
}
393
else
394
{
395
topExpr = coeffs[0] * inputBuffers[0](x, y, c, n) +
396
coeffs[1] * inputBuffers[1](x, y, c, n);
397
for (int i = 2; i < inputBuffers.size(); ++i)
398
topExpr += coeffs[i] * inputBuffers[i](x, y, c, n);
399
}
400
break;
401
case PROD:
402
topExpr = inputBuffers[0](x, y, c, n) *
403
inputBuffers[1](x, y, c, n);
404
for (int i = 2; i < inputBuffers.size(); ++i)
405
topExpr *= inputBuffers[i](x, y, c, n);
406
break;
407
case MAX:
408
topExpr = max(inputBuffers[0](x, y, c, n),
409
inputBuffers[1](x, y, c, n));
410
for (int i = 2; i < inputBuffers.size(); ++i)
411
topExpr = max(topExpr, inputBuffers[i](x, y, c, n));
412
break;
413
default:
414
return Ptr<BackendNode>();
415
}
416
top(x, y, c, n) = topExpr;
417
return Ptr<BackendNode>(new HalideBackendNode(top));
418
#endif // HAVE_HALIDE
419
return Ptr<BackendNode>();
420
}
421
422
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
423
{
424
#ifdef HAVE_INF_ENGINE
425
InferenceEngine::LayerParams lp;
426
lp.name = name;
427
lp.type = "Eltwise";
428
lp.precision = InferenceEngine::Precision::FP32;
429
std::shared_ptr<InferenceEngine::EltwiseLayer> ieLayer(new InferenceEngine::EltwiseLayer(lp));
430
if (op == SUM)
431
ieLayer->_operation = InferenceEngine::EltwiseLayer::Sum;
432
else if (op == PROD)
433
ieLayer->_operation = InferenceEngine::EltwiseLayer::Prod;
434
else if (op == MAX)
435
ieLayer->_operation = InferenceEngine::EltwiseLayer::Max;
436
else
437
CV_Error(Error::StsNotImplemented, "Unsupported eltwise operation");
438
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
439
#endif // HAVE_INF_ENGINE
440
return Ptr<BackendNode>();
441
}
442
443
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
444
const std::vector<MatShape> &outputs) const CV_OVERRIDE
445
{
446
CV_UNUSED(outputs); // suppress unused variable warning
447
CV_Assert(inputs.size());
448
449
long flops = inputs.size() * total(inputs[0]);
450
451
return flops;
452
}
453
454
bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE
455
{
456
if (activ.empty() || layer.empty())
457
{
458
activ = layer;
459
return !activ.empty();
460
}
461
else
462
return false;
463
}
464
465
Ptr<ActivationLayer> activ;
466
};
467
468
Ptr<EltwiseLayer> EltwiseLayer::create(const LayerParams& params)
469
{
470
return Ptr<EltwiseLayer>(new EltwiseLayerImpl(params));
471
}
472
473
}
474
}
475
476