Path: blob/master/modules/dnn/src/layers/scale_layer.cpp
16337 views
// This file is part of OpenCV project.1// It is subject to the license terms in the LICENSE file found in the top-level directory2// of this distribution and at http://opencv.org/license.html.34// Copyright (C) 2016, Intel Corporation, all rights reserved.5// Third party copyrights are property of their respective owners.67/*8Implementation of Scale layer.9*/1011#include "../precomp.hpp"12#include "layers_common.hpp"13#include "../op_halide.hpp"14#include "../op_inf_engine.hpp"15#include <opencv2/dnn/shape_utils.hpp>1617namespace cv18{19namespace dnn20{2122class ScaleLayerImpl CV_FINAL : public ScaleLayer23{24public:25ScaleLayerImpl(const LayerParams& params)26{27setParamsFrom(params);28hasBias = params.get<bool>("bias_term", false);29axis = params.get<int>("axis", 1);30hasWeights = false;31}3233bool getMemoryShapes(const std::vector<MatShape> &inputs,34const int requiredOutputs,35std::vector<MatShape> &outputs,36std::vector<MatShape> &internals) const CV_OVERRIDE37{38outputs.assign(1, inputs[0]);39return true;40}4142virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE43{44std::vector<Mat> inputs;45inputs_arr.getMatVector(inputs);46hasWeights = blobs.size() == 2 || (blobs.size() == 1 && !hasBias);47CV_Assert(inputs.size() == 2 && blobs.empty() || blobs.size() == (int)hasWeights + (int)hasBias);48}4950virtual bool supportBackend(int backendId) CV_OVERRIDE51{52return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||53backendId == DNN_BACKEND_INFERENCE_ENGINE && axis == 1;54}5556void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE57{58CV_TRACE_FUNCTION();59CV_TRACE_ARG_VALUE(name, "name", name.c_str());6061if (inputs_arr.depth() == CV_16S)62{63forward_fallback(inputs_arr, outputs_arr, internals_arr);64return;65}6667std::vector<Mat> inputs, outputs;68inputs_arr.getMatVector(inputs);69outputs_arr.getMatVector(outputs);7071CV_Assert_N(outputs.size() == 1, !blobs.empty() || inputs.size() == 2);7273Mat &inpBlob = inputs[0];74Mat &outBlob = outputs[0];75// There is a mode when we multiply a first blob by a second one76// instead of trainable weights.77Mat weights = blobs.empty() ? inputs[1] : (hasWeights ? blobs[0] : Mat());78Mat bias = hasBias ? blobs.back().reshape(1, 1) : Mat();79if (!weights.empty())80weights = weights.reshape(1, 1);81MatShape inpShape = shape(inpBlob);82const int numWeights = !weights.empty() ? weights.total() : bias.total();83CV_Assert(numWeights != 0);84if (hasWeights && hasBias)85CV_CheckEQ(weights.total(), bias.total(), "Incompatible weights/bias blobs");8687int endAxis;88for (endAxis = axis + 1; endAxis <= inpBlob.dims; ++endAxis)89{90if (total(inpShape, axis, endAxis) == numWeights)91break;92}93CV_Assert(total(inpShape, axis, endAxis) == numWeights);94CV_Assert(!hasBias || numWeights == bias.total());95CV_CheckTypeEQ(inpBlob.type(), CV_32FC1, ""); CV_CheckTypeEQ(outBlob.type(), CV_32FC1, "");9697int numSlices = total(inpShape, 0, axis);98float* inpData = (float*)inpBlob.data;99float* outData = (float*)outBlob.data;100101if (endAxis != inpBlob.dims)102{103float* weightsData = !weights.empty() ? (float*)weights.data : 0;104float* biasesData = hasBias ? (float*)bias.data : 0;105int spatialSize = total(inpShape, endAxis); // spatialSize != 1106for (int i = 0; i < numSlices; ++i)107{108for (int j = 0; j < numWeights; ++j)109{110float w = weightsData ? weightsData[j] : 1;111float b = biasesData ? biasesData[j] : 0;112Mat inpSlice(1, spatialSize, CV_32F, inpData);113Mat outSlice(1, spatialSize, CV_32F, outData);114inpSlice.convertTo(outSlice, CV_32F, w, b);115inpData += spatialSize;116outData += spatialSize;117}118}119}120else121{122for (int i = 0; i < numSlices; ++i)123{124Mat inpSlice(1, numWeights, CV_32F, inpData);125Mat outSlice(1, numWeights, CV_32F, outData);126if (!weights.empty())127{128multiply(inpSlice, weights, outSlice);129if (hasBias)130add(outSlice, bias, outSlice);131}132else if (hasBias)133add(inpSlice, bias, outSlice);134inpData += numWeights;135outData += numWeights;136}137}138}139140virtual Ptr<BackendNode> tryAttach(const Ptr<BackendNode>& node) CV_OVERRIDE141{142switch (node->backendId)143{144case DNN_BACKEND_HALIDE:145{146#ifdef HAVE_HALIDE147auto base = node.dynamicCast<HalideBackendNode>();148Halide::Func& input = base->funcs.back();149Halide::Var x("x"), y("y"), c("c"), n("n");150Halide::Func top = attachHalide(input(x, y, c, n));151return Ptr<BackendNode>(new HalideBackendNode(base, top));152#endif // HAVE_HALIDE153break;154}155}156return Ptr<BackendNode>();157}158159virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE160{161#ifdef HAVE_HALIDE162Halide::Buffer<float> input = halideBuffer(inputs[0]);163Halide::Var x("x"), y("y"), c("c"), n("n");164Halide::Func top = attachHalide(input(x, y, c, n));165return Ptr<BackendNode>(new HalideBackendNode(top));166#endif // HAVE_HALIDE167return Ptr<BackendNode>();168}169170#ifdef HAVE_HALIDE171// attachHalide can work both with Halide::Buffer and Halide::Func. In the172// second case it will be a fusion.173Halide::Func attachHalide(const Halide::Expr& input)174{175Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));176Halide::Var x("x"), y("y"), c("c"), n("n");177178const int numChannels = blobs[0].total();179180Halide::Expr topExpr = input;181if (hasWeights)182{183auto weights = wrapToHalideBuffer(blobs[0], {numChannels});184topExpr *= weights(c);185}186if (hasBias)187{188auto bias = wrapToHalideBuffer(blobs.back(), {numChannels});189topExpr += bias(c);190}191top(x, y, c, n) = topExpr;192return top;193}194#endif // HAVE_HALIDE195196virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE197{198#ifdef HAVE_INF_ENGINE199InferenceEngine::LayerParams lp;200lp.name = name;201lp.type = "ScaleShift";202lp.precision = InferenceEngine::Precision::FP32;203std::shared_ptr<InferenceEngine::ScaleShiftLayer> ieLayer(new InferenceEngine::ScaleShiftLayer(lp));204205CV_Assert(!blobs.empty());206const size_t numChannels = blobs[0].total();207if (hasWeights)208{209ieLayer->_weights = wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C);210}211else212{213auto weights = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32,214{numChannels});215weights->allocate();216217std::vector<float> ones(numChannels, 1);218weights->set(ones);219ieLayer->_weights = weights;220}221if (hasBias)222ieLayer->_biases = wrapToInfEngineBlob(blobs.back(), {numChannels}, InferenceEngine::Layout::C);223224return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));225#endif // HAVE_INF_ENGINE226return Ptr<BackendNode>();227}228229void getScaleShift(Mat& scale, Mat& shift) const CV_OVERRIDE230{231scale = hasWeights ? blobs[0] : Mat();232shift = hasBias ? blobs.back() : Mat();233}234235virtual int64 getFLOPS(const std::vector<MatShape> &inputs,236const std::vector<MatShape> &outputs) const CV_OVERRIDE237{238CV_UNUSED(outputs); // suppress unused variable warning239long flops = 0;240for(int i = 0; i < inputs.size(); i++)241{242flops += 2*total(inputs[i]);243}244return flops;245}246247private:248bool hasWeights;249};250251252Ptr<ScaleLayer> ScaleLayer::create(const LayerParams& params)253{254return Ptr<ScaleLayer>(new ScaleLayerImpl(params));255}256257Ptr<Layer> ShiftLayer::create(const LayerParams& params)258{259LayerParams scaleParams;260scaleParams.name = params.name;261scaleParams.type = "Scale";262scaleParams.blobs = params.blobs;263scaleParams.set("bias_term", true);264scaleParams.set("axis", 0);265return Ptr<ScaleLayer>(new ScaleLayerImpl(scaleParams));266}267268} // namespace dnn269} // namespace cv270271272