Path: blob/master/modules/dnn/src/layers/mvn_layer.cpp
16337 views
/*M///////////////////////////////////////////////////////////////////////////////////////1//2// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.3//4// By downloading, copying, installing or using the software you agree to this license.5// If you do not agree to this license, do not download, install,6// copy or use the software.7//8//9// License Agreement10// For Open Source Computer Vision Library11//12// Copyright (C) 2013, OpenCV Foundation, all rights reserved.13// Copyright (C) 2017, Intel Corporation, all rights reserved.14// Third party copyrights are property of their respective owners.15//16// Redistribution and use in source and binary forms, with or without modification,17// are permitted provided that the following conditions are met:18//19// * Redistribution's of source code must retain the above copyright notice,20// this list of conditions and the following disclaimer.21//22// * Redistribution's in binary form must reproduce the above copyright notice,23// this list of conditions and the following disclaimer in the documentation24// and/or other materials provided with the distribution.25//26// * The name of the copyright holders may not be used to endorse or promote products27// derived from this software without specific prior written permission.28//29// This software is provided by the copyright holders and contributors "as is" and30// any express or implied warranties, including, but not limited to, the implied31// warranties of merchantability and fitness for a particular purpose are disclaimed.32// In no event shall the Intel Corporation or contributors be liable for any direct,33// indirect, incidental, special, exemplary, or consequential damages34// (including, but not limited to, procurement of substitute goods or services;35// loss of use, data, or profits; or business interruption) however caused36// and on any theory of liability, whether in contract, strict liability,37// or tort (including negligence or otherwise) arising in any way out of38// the use of this software, even if advised of the possibility of such damage.39//40//M*/4142#include "../precomp.hpp"43#include "layers_common.hpp"44#include "../op_inf_engine.hpp"45#include <opencv2/dnn/shape_utils.hpp>4647#ifdef HAVE_OPENCL48#include "../ocl4dnn/include/math_functions.hpp"49#include "opencl_kernels_dnn.hpp"50#endif5152namespace cv53{54namespace dnn55{5657class MVNLayerImpl CV_FINAL : public MVNLayer58{59public:60MVNLayerImpl(const LayerParams& params)61{62setParamsFrom(params);63normVariance = params.get<bool>("normalize_variance", true);64acrossChannels = params.get<bool>("across_channels", false);65eps = params.get<double>("eps", 1e-9);66fuse_batch_norm = false;67fuse_relu = false;68relu_slope = 0.f;69zeroDev = false;70}7172Mat scale, shift;73#ifdef HAVE_OPENCL74UMat umat_scale, umat_shift;75#endif76bool fuse_batch_norm;7778Ptr<ReLULayer> activ_relu;79float relu_slope;80bool fuse_relu;81bool zeroDev; // TODO: Doesn't considered in Intel's Inference Engine backend.82bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE83{84if (!layer.empty() && !fuse_relu && !fuse_batch_norm)85{86layer->getScaleShift(scale, shift);87fuse_batch_norm = !scale.empty() || !shift.empty();88return fuse_batch_norm;89}9091if (!layer.empty() && preferableTarget == DNN_TARGET_OPENCL)92{93activ_relu = layer.dynamicCast<ReLULayer>();94if( !activ_relu.empty() )95relu_slope = activ_relu->negativeSlope;96}97fuse_relu = !activ_relu.empty();98return fuse_relu;99}100101void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE102{103std::vector<Mat> inputs;104inputs_arr.getMatVector(inputs);105int splitDim = (acrossChannels) ? 1 : 2;106int i, newRows = 1;107for( i = 0; i < splitDim; i++ )108newRows *= inputs[0].size[i];109zeroDev = inputs[0].total() == newRows;110#ifdef HAVE_OPENCL111umat_scale.release();112umat_shift.release();113#endif114}115116virtual bool supportBackend(int backendId) CV_OVERRIDE117{118if (backendId == DNN_BACKEND_INFERENCE_ENGINE)119return !zeroDev && (preferableTarget == DNN_TARGET_CPU || eps <= 1e-7f);120else121return backendId == DNN_BACKEND_OPENCV;122}123124#ifdef HAVE_OPENCL125bool fast_forward_ocl(std::vector<UMat> &inputs, std::vector<UMat> &outputs)126{127if (umat_scale.empty() && !scale.empty())128scale.copyTo(umat_scale);129if (umat_shift.empty() && !shift.empty())130shift.copyTo(umat_shift);131UMat& bnorm_weight = umat_scale;132UMat& bnorm_bias = umat_shift;133134bool use_half = (inputs[0].depth() == CV_16S);135String opts = format(" -DT=%s -DT4=%s -Dconvert_T=%s", use_half ? "half" : "float",136use_half ? "half4" : "float4", use_half ? "convert_half4" : "convert_float4");137138int splitDim = (acrossChannels) ? 1 : 2;139for (size_t inpIdx = 0; inpIdx < inputs.size(); inpIdx++)140{141UMat &inpMat = inputs[inpIdx];142UMat &outMat = outputs[inpIdx];143int newRows = total(shape(inpMat), 0, splitDim);144145MatShape s = shape(newRows, inpMat.total() / newRows);146UMat meanMat = UMat(s[0], 1, (use_half) ? CV_16S : CV_32F);147UMat tmpMat = UMat(s[0], s[1], CV_32F);148float alpha = 1.0f / s[1];149150String buildopt = "-DNUM=4" + opts;151ocl::Kernel k("mean_fuse4", ocl::dnn::mvn_oclsrc, buildopt);152size_t localsize[] = { 128 };153size_t globalsize[] = { (size_t)s[0] / 4 * localsize[0] };154155int argId = 0;156k.set(argId++, ocl::KernelArg::PtrReadOnly(inpMat));157k.set(argId++, (int)s[1]);158k.set(argId++, alpha);159k.set(argId++, ocl::KernelArg::PtrWriteOnly(meanMat));160k.set(argId++, ocl::KernelArg::PtrWriteOnly(tmpMat));161k.set(argId++, NULL, localsize[0] * sizeof(cl_float4));162bool ret = k.run(1, globalsize, localsize, false);163if (!ret)164return false;165166buildopt += format(" %s %s", (fuse_batch_norm) ? "-DFUSE_BATCH_NORM" : "",167(fuse_relu) ? "-DFUSE_RELU" : "");168169ocl::Kernel k1("mvn_fuse4", ocl::dnn::mvn_oclsrc, buildopt);170argId = 0;171k1.set(argId++, ocl::KernelArg::PtrReadOnly(tmpMat));172k1.set(argId++, ocl::KernelArg::PtrReadOnly(inpMat));173k1.set(argId++, ocl::KernelArg::PtrReadOnly(meanMat));174k1.set(argId++, (int)s[1]);175k1.set(argId++, (float)alpha);176k1.set(argId++, (float)eps);177k1.set(argId++, (float)relu_slope);178k1.set(argId++, ocl::KernelArg::PtrReadOnly(bnorm_weight));179k1.set(argId++, ocl::KernelArg::PtrReadOnly(bnorm_bias));180k1.set(argId++, ocl::KernelArg::PtrWriteOnly(outMat));181k1.set(argId++, NULL, localsize[0] * sizeof(cl_float4));182ret = k1.run(1, globalsize, localsize, false);183if (!ret)184return false;185}186return true;187}188189bool forward_ocl(InputArrayOfArrays inputs_, OutputArrayOfArrays outputs_, OutputArrayOfArrays internals_)190{191if (umat_scale.empty() && !scale.empty())192scale.copyTo(umat_scale);193if (umat_shift.empty() && !shift.empty())194shift.copyTo(umat_shift);195UMat& bnorm_weight = umat_scale;196UMat& bnorm_bias = umat_shift;197198std::vector<UMat> inputs;199std::vector<UMat> outputs;200201inputs_.getUMatVector(inputs);202outputs_.getUMatVector(outputs);203204int splitDim = (acrossChannels) ? 1 : 2;205int row_size = total(shape(inputs[0]), 0, splitDim);206int plane_size = total(shape(inputs[0]), splitDim);207if (normVariance && (row_size % 4 == 0) && (plane_size % 4 == 0))208return fast_forward_ocl(inputs, outputs);209210if (inputs[0].depth() == CV_16S)211return false;212213String opts = format(" -DT=float -DT4=float4 -Dconvert_T=convert_float4");214215for (size_t inpIdx = 0; inpIdx < inputs.size(); inpIdx++)216{217UMat &inpMat = inputs[inpIdx];218UMat &outMat = outputs[inpIdx];219int newRows = total(shape(inpMat), 0, splitDim);220221MatShape s = shape(newRows, inpMat.total() / newRows);222UMat oneMat = UMat::ones(s[1], 1, CV_32F);223UMat meanMat = UMat(s[0], 1, CV_32F);224UMat devMat = UMat(s[0], 1, CV_32F);225UMat tmpMat = UMat(s[0], s[1], CV_32F);226float alpha = 1.0f / s[1];227228bool ret = ocl4dnn::ocl4dnnGEMV<float>(ocl4dnn::CblasNoTrans, s[0], s[1], alpha,229inpMat, 0, oneMat, 0, 0.0f, meanMat, 0);230if (!ret)231return false;232233int number = (s[1] % 8 == 0) ? 8 : ((s[1] % 4 == 0) ? 4 : 1);234size_t global[] = { (size_t)s[0], (size_t)(s[1] / number) };235String buildopt = format("-DNUM=%d", number) + opts;236if (normVariance)237{238String kname = format("calc_mean%d", number);239ocl::Kernel kernel(kname.c_str(), ocl::dnn::mvn_oclsrc, buildopt);240if (kernel.empty())241return false;242243kernel.set(0, ocl::KernelArg::PtrReadOnly(inpMat));244kernel.set(1, (int)s[0]);245kernel.set(2, (int)s[1]);246kernel.set(3, ocl::KernelArg::PtrReadOnly(meanMat));247kernel.set(4, ocl::KernelArg::PtrWriteOnly(tmpMat));248ret = kernel.run(2, global, NULL, false);249if (!ret)250return false;251252ret = ocl4dnn::ocl4dnnGEMV<float>(ocl4dnn::CblasNoTrans, s[0], s[1], alpha,253tmpMat, 0, oneMat, 0, 0.0f, devMat, 0);254if (!ret)255return false;256}257258String kname = format("mvn%d", number);259buildopt += format("%s%s%s", (normVariance) ? " -DNORM_VARIANCE" : "",260(fuse_batch_norm) ? " -DFUSE_BATCH_NORM" : "",261(fuse_relu) ? " -DFUSE_RELU" : "");262ocl::Kernel kernel1(kname.c_str(), ocl::dnn::mvn_oclsrc, buildopt);263if (kernel1.empty())264return false;265kernel1.set(0, ocl::KernelArg::PtrReadOnly(inpMat));266kernel1.set(1, (int)s[0]);267kernel1.set(2, (int)s[1]);268kernel1.set(3, (float)eps);269kernel1.set(4, ocl::KernelArg::PtrReadOnly(meanMat));270kernel1.set(5, ocl::KernelArg::PtrReadOnly(devMat));271kernel1.set(6, ocl::KernelArg::PtrReadOnly(bnorm_weight));272kernel1.set(7, ocl::KernelArg::PtrReadOnly(bnorm_bias));273kernel1.set(8, (int)inpMat.size[1]);274kernel1.set(9, (float)relu_slope);275kernel1.set(10, ocl::KernelArg::PtrWriteOnly(outMat));276ret = kernel1.run(2, global, NULL, false);277if (!ret)278return false;279}280return true;281}282#endif283284void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE285{286CV_TRACE_FUNCTION();287CV_TRACE_ARG_VALUE(name, "name", name.c_str());288289CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),290forward_ocl(inputs_arr, outputs_arr, internals_arr))291292if (inputs_arr.depth() == CV_16S)293{294forward_fallback(inputs_arr, outputs_arr, internals_arr);295return;296}297298std::vector<Mat> inputs, outputs, internals;299inputs_arr.getMatVector(inputs);300outputs_arr.getMatVector(outputs);301internals_arr.getMatVector(internals);302303for (size_t inpIdx = 0; inpIdx < inputs.size(); inpIdx++)304{305Mat &inpBlob = inputs[inpIdx];306Mat &outBlob = outputs[inpIdx];307308int splitDim = (acrossChannels) ? 1 : 2;309int i, newRows = 1;310for( i = 0; i < splitDim; i++ )311newRows *= inpBlob.size[i];312313Mat inpMat = inpBlob.reshape(1, newRows);314Mat outMat = outBlob.reshape(1, newRows);315316if ( inpBlob.total() == newRows )317{318// MVN is applied to single values at an every row.319if (shift.empty())320{321outBlob.setTo(0);322}323else324{325for ( i = 0; i < newRows; i++ )326{327outMat.row(i).setTo(((float*)shift.data)[i]);328}329}330return;331}332333Scalar mean, dev;334for ( i = 0; i < newRows; i++)335{336Mat inpRow = inpMat.row(i);337Mat outRow = outMat.row(i);338float weight = 1.f;339float bias = 0.f;340if (fuse_batch_norm)341{342weight = i < scale.cols ? ((float*)scale.data)[i] : weight;343bias = i < shift.cols ? ((float*)shift.data)[i] : bias;344}345cv::meanStdDev(inpRow, mean, (normVariance) ? dev : noArray());346double alpha = (normVariance) ? 1/(eps + dev[0]) : 1;347double normalizationScale = 1.0;348double normalizationShift = 0.0;349if (fuse_batch_norm)350{351normalizationScale = alpha * weight;352normalizationShift = -mean[0] * normalizationScale + bias;353}354else355{356normalizationScale = alpha;357normalizationShift = -mean[0] * alpha;358}359inpRow.convertTo(outRow, outRow.type(), normalizationScale, normalizationShift);360}361}362}363364virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE365{366#ifdef HAVE_INF_ENGINE367InferenceEngine::LayerParams lp;368lp.name = name;369lp.type = "MVN";370lp.precision = InferenceEngine::Precision::FP32;371std::shared_ptr<InferenceEngine::MVNLayer> ieLayer(new InferenceEngine::MVNLayer(lp));372ieLayer->params["across_channels"] = acrossChannels ? "1" : "0";373ieLayer->params["normalize_variance"] = normVariance ? "1" : "0";374ieLayer->params["eps"] = format("%f", eps);375return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));376#endif // HAVE_INF_ENGINE377return Ptr<BackendNode>();378}379380virtual int64 getFLOPS(const std::vector<MatShape> &inputs,381const std::vector<MatShape> &outputs) const CV_OVERRIDE382{383CV_UNUSED(outputs); // suppress unused variable warning384long flops = 0;385for(int i = 0; i < inputs.size(); i++)386{387flops += 6*total(inputs[i]) + 3*total(inputs[i], 0, normVariance ? 2 : 1);388}389return flops;390}391};392393Ptr<MVNLayer> MVNLayer::create(const LayerParams& params)394{395return Ptr<MVNLayer>(new MVNLayerImpl(params));396}397398}399}400401402