Path: blob/master/modules/dnn/src/layers/concat_layer.cpp
16337 views
/*M///////////////////////////////////////////////////////////////////////////////////////1//2// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.3//4// By downloading, copying, installing or using the software you agree to this license.5// If you do not agree to this license, do not download, install,6// copy or use the software.7//8//9// License Agreement10// For Open Source Computer Vision Library11//12// Copyright (C) 2013, OpenCV Foundation, all rights reserved.13// Copyright (C) 2017, Intel Corporation, all rights reserved.14// Third party copyrights are property of their respective owners.15//16// Redistribution and use in source and binary forms, with or without modification,17// are permitted provided that the following conditions are met:18//19// * Redistribution's of source code must retain the above copyright notice,20// this list of conditions and the following disclaimer.21//22// * Redistribution's in binary form must reproduce the above copyright notice,23// this list of conditions and the following disclaimer in the documentation24// and/or other materials provided with the distribution.25//26// * The name of the copyright holders may not be used to endorse or promote products27// derived from this software without specific prior written permission.28//29// This software is provided by the copyright holders and contributors "as is" and30// any express or implied warranties, including, but not limited to, the implied31// warranties of merchantability and fitness for a particular purpose are disclaimed.32// In no event shall the Intel Corporation or contributors be liable for any direct,33// indirect, incidental, special, exemplary, or consequential damages34// (including, but not limited to, procurement of substitute goods or services;35// loss of use, data, or profits; or business interruption) however caused36// and on any theory of liability, whether in contract, strict liability,37// or tort (including negligence or otherwise) arising in any way out of38// the use of this software, even if advised of the possibility of such damage.39//40//M*/4142#include "../precomp.hpp"43#include "layers_common.hpp"44#include "../op_halide.hpp"45#include "../op_inf_engine.hpp"46#include "../op_vkcom.hpp"4748#ifdef HAVE_OPENCL49#include "opencl_kernels_dnn.hpp"50#endif5152namespace cv53{54namespace dnn55{5657class ConcatLayerImpl CV_FINAL : public ConcatLayer58{59public:60ConcatLayerImpl(const LayerParams& params)61{62setParamsFrom(params);63axis = params.get<int>("axis", 1);64padding = params.get<bool>("padding", false);65}6667virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,68const int requiredOutputs,69std::vector<MatShape> &outputs,70std::vector<MatShape> &internals) const CV_OVERRIDE71{72CV_Assert(inputs.size() > 0);73outputs.resize(1, inputs[0]);74int cAxis = clamp(axis, inputs[0]);7576int axisSum = 0;77for (size_t i = 0; i < inputs.size(); i++)78{79MatShape curShape = inputs[i];8081if (padding)82{83for (int curAxis = 0; curAxis < outputs[0].size(); curAxis++)84{85outputs[0][curAxis] = std::max(outputs[0][curAxis], curShape[curAxis]);86}87}88else89{90CV_Assert(curShape.size() == outputs[0].size());91for (int curAxis = 0; curAxis < outputs[0].size(); curAxis++)92{93if (curAxis != cAxis && outputs[0][curAxis] != curShape[curAxis])94CV_Error(Error::StsBadSize, "Inconsistent shape for ConcatLayer");95}96}9798axisSum += curShape[cAxis];99}100outputs[0][cAxis] = axisSum;101return false;102}103104virtual bool supportBackend(int backendId) CV_OVERRIDE105{106return backendId == DNN_BACKEND_OPENCV ||107backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1 && !padding || // By channels108backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && !padding ||109backendId == DNN_BACKEND_VKCOM && haveVulkan() && !padding;110}111112class ChannelConcatInvoker : public ParallelLoopBody113{114public:115std::vector<Mat>* inputs;116Mat* output;117int nstripes;118std::vector<const float*> chptrs;119120static void run(std::vector<Mat>& inputs, Mat& output, int nstripes)121{122ChannelConcatInvoker cc;123cc.inputs = &inputs;124cc.output = &output;125cc.nstripes = nstripes;126127size_t i, ninputs = inputs.size();128int nchannels = 0, batchsz = output.size[0];129for( i = 0; i < ninputs; i++ )130{131Mat& inp = inputs[i];132CV_Assert( inp.isContinuous() && (inp.type() == CV_32F || inp.type() == CV_16S) &&133inp.dims == 4 && inp.size[0] == output.size[0] &&134inp.size[2] == output.size[2] &&135inp.size[3] == output.size[3] );136nchannels += inp.size[1];137}138CV_Assert( nchannels == output.size[1] );139CV_Assert( output.isContinuous() && (output.type() == CV_32F || output.type() == CV_16S) );140141cc.chptrs.resize(nchannels*batchsz);142143int ofs = 0;144for( i = 0; i < ninputs; i++)145{146Mat& inp = inputs[i];147for( int j = 0; j < batchsz; j++ )148for( int k = 0; k < inp.size[1]; k++ )149{150const float* ptr = inp.ptr<float>(j, k);151cc.chptrs[ofs + j*nchannels + k] = ptr;152}153ofs += inp.size[1];154}155156parallel_for_(Range(0, nstripes), cc, nstripes);157}158159ChannelConcatInvoker() : inputs(0), output(0), nstripes(0) {}160161void operator()(const Range& r) const CV_OVERRIDE162{163size_t planeSize = (size_t)output->size[2]*output->size[3];164size_t nch = chptrs.size();165size_t total = nch*planeSize;166size_t stripeSize = (total + nstripes - 1)/nstripes;167size_t stripeStart = r.start*stripeSize;168size_t stripeEnd = std::min(total, r.end*stripeSize);169const float** ptrs = (const float**)&chptrs[0];170float* outptr = output->ptr<float>();171size_t blockSize0 = 1 << 16;172173for( size_t ofs0 = stripeStart; ofs0 < stripeEnd; )174{175size_t ch = ofs0/planeSize;176size_t ofs = ofs0 - ch*planeSize;177size_t blockSize = std::min(blockSize0, planeSize - ofs);178memcpy(outptr + ofs0, ptrs[ch] + ofs, blockSize*sizeof(outptr[0]));179ofs0 += blockSize;180}181}182};183184#ifdef HAVE_OPENCL185bool forward_ocl(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)186{187std::vector<UMat> inputs;188std::vector<UMat> outputs;189190bool use_half = (inps.depth() == CV_16S);191inps.getUMatVector(inputs);192outs.getUMatVector(outputs);193194int cAxis = clamp(axis, inputs[0].dims);195if (padding)196return false;197198int bottom_concat_axis;199int concat_size = total(shape(inputs[0]), cAxis + 1);200int top_concat_axis = outputs[0].size[cAxis];201int num_concats = total(shape(inputs[0]), 0, cAxis);202int offset_concat_axis = 0;203UMat& outMat = outputs[0];204String buildopt = format(" -DDtype=%s", (use_half) ? "half" : "float");205String kname = format("concat_%s", use_half ? "half" : "float");206207for (size_t i = 0; i < inputs.size(); i++)208{209ocl::Kernel kernel(kname.c_str(), ocl::dnn::concat_oclsrc, buildopt);210if (kernel.empty())211return false;212213UMat& inpMat = inputs[i];214bottom_concat_axis = inputs[i].size[cAxis];215size_t nthreads = inputs[i].total();216217kernel.set(0, (int)nthreads);218kernel.set(1, ocl::KernelArg::PtrReadOnly(inpMat));219kernel.set(2, (int)num_concats);220kernel.set(3, (int)concat_size);221kernel.set(4, (int)top_concat_axis);222kernel.set(5, (int)bottom_concat_axis);223kernel.set(6, (int)offset_concat_axis);224kernel.set(7, ocl::KernelArg::PtrWriteOnly(outMat));225226if (!kernel.run(1, &nthreads, NULL, false))227return false;228229offset_concat_axis += bottom_concat_axis;230}231232return true;233}234#endif235236void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE237{238CV_TRACE_FUNCTION();239CV_TRACE_ARG_VALUE(name, "name", name.c_str());240241CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),242forward_ocl(inputs_arr, outputs_arr, internals_arr))243244std::vector<Mat> inputs, outputs;245inputs_arr.getMatVector(inputs);246outputs_arr.getMatVector(outputs);247248int cAxis = clamp(axis, inputs[0].dims);249Mat& outMat = outputs[0];250251if (padding)252outMat.setTo(0);253254if( cAxis == 1 && outMat.dims == 4 && !padding)255{256int nstripes = getNumThreads();257ChannelConcatInvoker::run(inputs, outMat, nstripes);258}259else260{261std::vector<Range> ranges(outputs[0].dims, Range::all());262263ranges[cAxis].start = 0;264for (size_t i = 0; i < inputs.size(); i++)265{266ranges[cAxis].end = ranges[cAxis].start + inputs[i].size[cAxis];267for (int j = 0; j < outMat.dims; ++j)268{269if (j == cAxis) continue;270ranges[j].start = (outMat.size[j] - inputs[i].size[j]) / 2;271ranges[j].end = ranges[j].start + inputs[i].size[j];272}273inputs[i].copyTo(outMat(&ranges[0]));274ranges[cAxis].start = ranges[cAxis].end;275}276}277}278virtual Ptr<BackendNode> initVkCom(const std::vector<Ptr<BackendWrapper> > &input) CV_OVERRIDE279{280#ifdef HAVE_VULKAN281vkcom::Tensor in = VkComTensor(input[0]);282int cAxis = clamp(axis, in.dimNum());283std::shared_ptr<vkcom::OpBase> op(new vkcom::OpConcat(cAxis));284return Ptr<BackendNode>(new VkComBackendNode(input, op));285#endif // HAVE_VULKAN286return Ptr<BackendNode>();287}288289virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &input) CV_OVERRIDE290{291#ifdef HAVE_HALIDE292std::vector<Halide::Buffer<> > inputBuffers = halideBuffers(input);293294Halide::Var x("x"), y("y"), c("c"), n("n");295Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));296int offset = inputBuffers[0].channels();297Halide::Expr topExpr = select(c < offset,298inputBuffers[0](x, y, c, n),299inputBuffers[1](x, y, c - offset, n));300for (int i = 2; i < input.size(); ++i)301{302offset += inputBuffers[i - 1].channels();303topExpr = select(c < offset, topExpr,304inputBuffers[i](x, y, c - offset, n));305}306top(x, y, c, n) = topExpr;307return Ptr<BackendNode>(new HalideBackendNode(top));308#endif // HAVE_HALIDE309return Ptr<BackendNode>();310}311312virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE313{314#ifdef HAVE_INF_ENGINE315InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);316InferenceEngine::LayerParams lp;317lp.name = name;318lp.type = "Concat";319lp.precision = InferenceEngine::Precision::FP32;320std::shared_ptr<InferenceEngine::ConcatLayer> ieLayer(new InferenceEngine::ConcatLayer(lp));321ieLayer->_axis = clamp(axis, input->dims.size());322return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));323#endif // HAVE_INF_ENGINE324return Ptr<BackendNode>();325}326};327328Ptr<ConcatLayer> ConcatLayer::create(const LayerParams& params)329{330return Ptr<ConcatLayer>(new ConcatLayerImpl(params));331}332333}334}335336337