Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Tetragramm
GitHub Repository: Tetragramm/opencv
Path: blob/master/modules/dnn/test/test_ie_models.cpp
16354 views
1
// This file is part of OpenCV project.
2
// It is subject to the license terms in the LICENSE file found in the top-level directory
3
// of this distribution and at http://opencv.org/license.html.
4
//
5
// Copyright (C) 2018, Intel Corporation, all rights reserved.
6
// Third party copyrights are property of their respective owners.
7
#include "test_precomp.hpp"
8
9
#ifdef HAVE_INF_ENGINE
10
#include <opencv2/core/utils/filesystem.hpp>
11
12
#include <inference_engine.hpp>
13
#include <ie_icnn_network.hpp>
14
#include <ie_extension.h>
15
16
namespace opencv_test { namespace {
17
18
static void initDLDTDataPath()
19
{
20
#ifndef WINRT
21
static bool initialized = false;
22
if (!initialized)
23
{
24
const char* dldtTestDataPath = getenv("INTEL_CVSDK_DIR");
25
if (dldtTestDataPath)
26
cvtest::addDataSearchPath(cv::utils::fs::join(dldtTestDataPath, "deployment_tools"));
27
initialized = true;
28
}
29
#endif
30
}
31
32
using namespace cv;
33
using namespace cv::dnn;
34
using namespace InferenceEngine;
35
36
static inline void genData(const std::vector<size_t>& dims, Mat& m, Blob::Ptr& dataPtr)
37
{
38
std::vector<int> reversedDims(dims.begin(), dims.end());
39
std::reverse(reversedDims.begin(), reversedDims.end());
40
41
m.create(reversedDims, CV_32F);
42
randu(m, -1, 1);
43
44
dataPtr = make_shared_blob<float>(Precision::FP32, dims, (float*)m.data);
45
}
46
47
void runIE(Target target, const std::string& xmlPath, const std::string& binPath,
48
std::map<std::string, cv::Mat>& inputsMap, std::map<std::string, cv::Mat>& outputsMap)
49
{
50
CNNNetReader reader;
51
reader.ReadNetwork(xmlPath);
52
reader.ReadWeights(binPath);
53
54
CNNNetwork net = reader.getNetwork();
55
56
InferenceEnginePluginPtr enginePtr;
57
InferencePlugin plugin;
58
ExecutableNetwork netExec;
59
InferRequest infRequest;
60
TargetDevice targetDevice;
61
switch (target)
62
{
63
case DNN_TARGET_CPU:
64
targetDevice = TargetDevice::eCPU;
65
break;
66
case DNN_TARGET_OPENCL:
67
case DNN_TARGET_OPENCL_FP16:
68
targetDevice = TargetDevice::eGPU;
69
break;
70
case DNN_TARGET_MYRIAD:
71
targetDevice = TargetDevice::eMYRIAD;
72
break;
73
default:
74
CV_Error(Error::StsNotImplemented, "Unknown target");
75
};
76
77
try
78
{
79
enginePtr = PluginDispatcher({""}).getSuitablePlugin(targetDevice);
80
81
if (targetDevice == TargetDevice::eCPU)
82
{
83
std::string suffixes[] = {"_avx2", "_sse4", ""};
84
bool haveFeature[] = {
85
checkHardwareSupport(CPU_AVX2),
86
checkHardwareSupport(CPU_SSE4_2),
87
true
88
};
89
for (int i = 0; i < 3; ++i)
90
{
91
if (!haveFeature[i])
92
continue;
93
#ifdef _WIN32
94
std::string libName = "cpu_extension" + suffixes[i] + ".dll";
95
#else
96
std::string libName = "libcpu_extension" + suffixes[i] + ".so";
97
#endif // _WIN32
98
try
99
{
100
IExtensionPtr extension = make_so_pointer<IExtension>(libName);
101
enginePtr->AddExtension(extension, 0);
102
break;
103
}
104
catch(...) {}
105
}
106
// Some of networks can work without a library of extra layers.
107
}
108
plugin = InferencePlugin(enginePtr);
109
110
netExec = plugin.LoadNetwork(net, {});
111
infRequest = netExec.CreateInferRequest();
112
}
113
catch (const std::exception& ex)
114
{
115
CV_Error(Error::StsAssert, format("Failed to initialize Inference Engine backend: %s", ex.what()));
116
}
117
118
// Fill input blobs.
119
inputsMap.clear();
120
BlobMap inputBlobs;
121
for (auto& it : net.getInputsInfo())
122
{
123
genData(it.second->getDims(), inputsMap[it.first], inputBlobs[it.first]);
124
}
125
infRequest.SetInput(inputBlobs);
126
127
// Fill output blobs.
128
outputsMap.clear();
129
BlobMap outputBlobs;
130
for (auto& it : net.getOutputsInfo())
131
{
132
genData(it.second->dims, outputsMap[it.first], outputBlobs[it.first]);
133
}
134
infRequest.SetOutput(outputBlobs);
135
136
infRequest.Infer();
137
}
138
139
std::vector<String> getOutputsNames(const Net& net)
140
{
141
std::vector<String> names;
142
if (names.empty())
143
{
144
std::vector<int> outLayers = net.getUnconnectedOutLayers();
145
std::vector<String> layersNames = net.getLayerNames();
146
names.resize(outLayers.size());
147
for (size_t i = 0; i < outLayers.size(); ++i)
148
names[i] = layersNames[outLayers[i] - 1];
149
}
150
return names;
151
}
152
153
void runCV(Target target, const std::string& xmlPath, const std::string& binPath,
154
const std::map<std::string, cv::Mat>& inputsMap,
155
std::map<std::string, cv::Mat>& outputsMap)
156
{
157
Net net = readNet(xmlPath, binPath);
158
for (auto& it : inputsMap)
159
net.setInput(it.second, it.first);
160
net.setPreferableTarget(target);
161
162
std::vector<String> outNames = getOutputsNames(net);
163
std::vector<Mat> outs;
164
net.forward(outs, outNames);
165
166
outputsMap.clear();
167
EXPECT_EQ(outs.size(), outNames.size());
168
for (int i = 0; i < outs.size(); ++i)
169
{
170
EXPECT_TRUE(outputsMap.insert({outNames[i], outs[i]}).second);
171
}
172
}
173
174
typedef TestWithParam<tuple<Target, String> > DNNTestOpenVINO;
175
TEST_P(DNNTestOpenVINO, models)
176
{
177
Target target = (dnn::Target)(int)get<0>(GetParam());
178
std::string modelName = get<1>(GetParam());
179
180
if (target == DNN_TARGET_MYRIAD && (modelName == "landmarks-regression-retail-0001" ||
181
modelName == "semantic-segmentation-adas-0001" ||
182
modelName == "face-reidentification-retail-0001"))
183
throw SkipTestException("");
184
185
std::string precision = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? "FP16" : "FP32";
186
std::string prefix = utils::fs::join("intel_models",
187
utils::fs::join(modelName,
188
utils::fs::join(precision, modelName)));
189
std::string xmlPath = findDataFile(prefix + ".xml");
190
std::string binPath = findDataFile(prefix + ".bin");
191
192
std::map<std::string, cv::Mat> inputsMap;
193
std::map<std::string, cv::Mat> ieOutputsMap, cvOutputsMap;
194
// Single Myriad device cannot be shared across multiple processes.
195
resetMyriadDevice();
196
runIE(target, xmlPath, binPath, inputsMap, ieOutputsMap);
197
runCV(target, xmlPath, binPath, inputsMap, cvOutputsMap);
198
199
EXPECT_EQ(ieOutputsMap.size(), cvOutputsMap.size());
200
for (auto& srcIt : ieOutputsMap)
201
{
202
auto dstIt = cvOutputsMap.find(srcIt.first);
203
CV_Assert(dstIt != cvOutputsMap.end());
204
double normInf = cvtest::norm(srcIt.second, dstIt->second, cv::NORM_INF);
205
EXPECT_EQ(normInf, 0);
206
}
207
}
208
209
static testing::internal::ParamGenerator<String> intelModels()
210
{
211
initDLDTDataPath();
212
std::vector<String> modelsNames;
213
214
std::string path;
215
try
216
{
217
path = findDataDirectory("intel_models", false);
218
}
219
catch (...)
220
{
221
std::cerr << "ERROR: Can't find OpenVINO models. Check INTEL_CVSDK_DIR environment variable (run setup.sh)" << std::endl;
222
return ValuesIn(modelsNames); // empty list
223
}
224
225
cv::utils::fs::glob_relative(path, "", modelsNames, false, true);
226
227
modelsNames.erase(
228
std::remove_if(modelsNames.begin(), modelsNames.end(),
229
[&](const String& dir){ return !utils::fs::isDirectory(utils::fs::join(path, dir)); }),
230
modelsNames.end()
231
);
232
CV_Assert(!modelsNames.empty());
233
234
return ValuesIn(modelsNames);
235
}
236
237
static testing::internal::ParamGenerator<Target> dnnDLIETargets()
238
{
239
std::vector<Target> targets;
240
targets.push_back(DNN_TARGET_CPU);
241
#ifdef HAVE_OPENCL
242
if (cv::ocl::useOpenCL() && ocl::Device::getDefault().isIntel())
243
{
244
targets.push_back(DNN_TARGET_OPENCL);
245
targets.push_back(DNN_TARGET_OPENCL_FP16);
246
}
247
#endif
248
if (checkMyriadTarget())
249
targets.push_back(DNN_TARGET_MYRIAD);
250
return testing::ValuesIn(targets);
251
}
252
253
INSTANTIATE_TEST_CASE_P(/**/, DNNTestOpenVINO, Combine(
254
dnnDLIETargets(), intelModels()
255
));
256
257
}}
258
#endif // HAVE_INF_ENGINE
259
260