Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Tetragramm
GitHub Repository: Tetragramm/opencv
Path: blob/master/modules/dnn/test/test_tf_importer.cpp
16339 views
1
// This file is part of OpenCV project.
2
// It is subject to the license terms in the LICENSE file found in the top-level directory
3
// of this distribution and at http://opencv.org/license.html.
4
5
// Copyright (C) 2017, Intel Corporation, all rights reserved.
6
// Third party copyrights are property of their respective owners.
7
8
/*
9
Test for Tensorflow models loading
10
*/
11
12
#include "test_precomp.hpp"
13
#include "npy_blob.hpp"
14
15
#include <opencv2/dnn/layer.details.hpp> // CV_DNN_REGISTER_LAYER_CLASS
16
17
namespace opencv_test
18
{
19
20
using namespace cv;
21
using namespace cv::dnn;
22
23
template<typename TString>
24
static std::string _tf(TString filename)
25
{
26
return (getOpenCVExtraDir() + "/dnn/") + filename;
27
}
28
29
TEST(Test_TensorFlow, read_inception)
30
{
31
Net net;
32
{
33
const string model = findDataFile("dnn/tensorflow_inception_graph.pb", false);
34
net = readNetFromTensorflow(model);
35
ASSERT_FALSE(net.empty());
36
}
37
net.setPreferableBackend(DNN_BACKEND_OPENCV);
38
39
Mat sample = imread(_tf("grace_hopper_227.png"));
40
ASSERT_TRUE(!sample.empty());
41
Mat input;
42
resize(sample, input, Size(224, 224));
43
input -= 128; // mean sub
44
45
Mat inputBlob = blobFromImage(input);
46
47
net.setInput(inputBlob, "input");
48
Mat out = net.forward("softmax2");
49
50
std::cout << out.dims << std::endl;
51
}
52
53
TEST(Test_TensorFlow, inception_accuracy)
54
{
55
Net net;
56
{
57
const string model = findDataFile("dnn/tensorflow_inception_graph.pb", false);
58
net = readNetFromTensorflow(model);
59
ASSERT_FALSE(net.empty());
60
}
61
net.setPreferableBackend(DNN_BACKEND_OPENCV);
62
63
Mat sample = imread(_tf("grace_hopper_227.png"));
64
ASSERT_TRUE(!sample.empty());
65
Mat inputBlob = blobFromImage(sample, 1.0, Size(224, 224), Scalar(), /*swapRB*/true);
66
67
net.setInput(inputBlob, "input");
68
Mat out = net.forward("softmax2");
69
70
Mat ref = blobFromNPY(_tf("tf_inception_prob.npy"));
71
72
normAssert(ref, out);
73
}
74
75
static std::string path(const std::string& file)
76
{
77
return findDataFile("dnn/tensorflow/" + file, false);
78
}
79
80
class Test_TensorFlow_layers : public DNNTestLayer
81
{
82
public:
83
void runTensorFlowNet(const std::string& prefix, bool hasText = false,
84
double l1 = 0.0, double lInf = 0.0, bool memoryLoad = false)
85
{
86
std::string netPath = path(prefix + "_net.pb");
87
std::string netConfig = (hasText ? path(prefix + "_net.pbtxt") : "");
88
std::string inpPath = path(prefix + "_in.npy");
89
std::string outPath = path(prefix + "_out.npy");
90
91
cv::Mat input = blobFromNPY(inpPath);
92
cv::Mat ref = blobFromNPY(outPath);
93
checkBackend(&input, &ref);
94
95
Net net;
96
if (memoryLoad)
97
{
98
// Load files into a memory buffers
99
string dataModel;
100
ASSERT_TRUE(readFileInMemory(netPath, dataModel));
101
102
string dataConfig;
103
if (hasText)
104
ASSERT_TRUE(readFileInMemory(netConfig, dataConfig));
105
106
net = readNetFromTensorflow(dataModel.c_str(), dataModel.size(),
107
dataConfig.c_str(), dataConfig.size());
108
}
109
else
110
net = readNetFromTensorflow(netPath, netConfig);
111
112
ASSERT_FALSE(net.empty());
113
114
net.setPreferableBackend(backend);
115
net.setPreferableTarget(target);
116
net.setInput(input);
117
cv::Mat output = net.forward();
118
normAssert(ref, output, "", l1 ? l1 : default_l1, lInf ? lInf : default_lInf);
119
}
120
};
121
122
TEST_P(Test_TensorFlow_layers, conv)
123
{
124
runTensorFlowNet("single_conv");
125
runTensorFlowNet("atrous_conv2d_valid");
126
runTensorFlowNet("atrous_conv2d_same");
127
runTensorFlowNet("depthwise_conv2d");
128
runTensorFlowNet("keras_atrous_conv2d_same");
129
runTensorFlowNet("conv_pool_nchw");
130
}
131
132
TEST_P(Test_TensorFlow_layers, padding)
133
{
134
runTensorFlowNet("padding_same");
135
runTensorFlowNet("padding_valid");
136
runTensorFlowNet("spatial_padding");
137
}
138
139
TEST_P(Test_TensorFlow_layers, eltwise_add_mul)
140
{
141
runTensorFlowNet("eltwise_add_mul");
142
}
143
144
TEST_P(Test_TensorFlow_layers, pad_and_concat)
145
{
146
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000
147
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
148
throw SkipTestException("Test is enabled starts from OpenVINO 2018R3");
149
#endif
150
runTensorFlowNet("pad_and_concat");
151
}
152
153
TEST_P(Test_TensorFlow_layers, concat_axis_1)
154
{
155
runTensorFlowNet("concat_axis_1");
156
}
157
158
TEST_P(Test_TensorFlow_layers, batch_norm)
159
{
160
runTensorFlowNet("batch_norm");
161
runTensorFlowNet("batch_norm", false, 0.0, 0.0, true);
162
runTensorFlowNet("fused_batch_norm");
163
runTensorFlowNet("fused_batch_norm", false, 0.0, 0.0, true);
164
runTensorFlowNet("batch_norm_text", true);
165
runTensorFlowNet("batch_norm_text", true, 0.0, 0.0, true);
166
runTensorFlowNet("unfused_batch_norm");
167
runTensorFlowNet("fused_batch_norm_no_gamma");
168
runTensorFlowNet("unfused_batch_norm_no_gamma");
169
runTensorFlowNet("mvn_batch_norm");
170
runTensorFlowNet("mvn_batch_norm_1x1");
171
}
172
173
TEST_P(Test_TensorFlow_layers, pooling)
174
{
175
runTensorFlowNet("max_pool_even");
176
runTensorFlowNet("max_pool_odd_valid");
177
runTensorFlowNet("max_pool_odd_same");
178
runTensorFlowNet("reduce_mean"); // an average pooling over all spatial dimensions.
179
}
180
181
// TODO: fix tests and replace to pooling
182
TEST_P(Test_TensorFlow_layers, ave_pool_same)
183
{
184
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000
185
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
186
throw SkipTestException("Test is enabled starts from OpenVINO 2018R3");
187
#endif
188
runTensorFlowNet("ave_pool_same");
189
}
190
191
TEST_P(Test_TensorFlow_layers, deconvolution)
192
{
193
runTensorFlowNet("deconvolution");
194
runTensorFlowNet("deconvolution_same");
195
runTensorFlowNet("deconvolution_stride_2_same");
196
runTensorFlowNet("deconvolution_adj_pad_valid");
197
runTensorFlowNet("deconvolution_adj_pad_same");
198
runTensorFlowNet("keras_deconv_valid");
199
runTensorFlowNet("keras_deconv_same");
200
}
201
202
TEST_P(Test_TensorFlow_layers, matmul)
203
{
204
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
205
throw SkipTestException("");
206
runTensorFlowNet("matmul");
207
runTensorFlowNet("nhwc_reshape_matmul");
208
runTensorFlowNet("nhwc_transpose_reshape_matmul");
209
}
210
211
TEST_P(Test_TensorFlow_layers, reshape)
212
{
213
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
214
throw SkipTestException("");
215
runTensorFlowNet("shift_reshape_no_reorder");
216
runTensorFlowNet("reshape_no_reorder");
217
runTensorFlowNet("reshape_reduce");
218
runTensorFlowNet("reshape_as_shape");
219
}
220
221
TEST_P(Test_TensorFlow_layers, flatten)
222
{
223
if (backend == DNN_BACKEND_INFERENCE_ENGINE &&
224
(target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD))
225
throw SkipTestException("");
226
runTensorFlowNet("flatten", true);
227
}
228
229
TEST_P(Test_TensorFlow_layers, unfused_flatten)
230
{
231
if (backend == DNN_BACKEND_INFERENCE_ENGINE &&
232
(target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
233
throw SkipTestException("");
234
runTensorFlowNet("unfused_flatten");
235
runTensorFlowNet("unfused_flatten_unknown_batch");
236
}
237
238
TEST_P(Test_TensorFlow_layers, leaky_relu)
239
{
240
runTensorFlowNet("leaky_relu_order1");
241
runTensorFlowNet("leaky_relu_order2");
242
runTensorFlowNet("leaky_relu_order3");
243
}
244
245
TEST_P(Test_TensorFlow_layers, l2_normalize)
246
{
247
runTensorFlowNet("l2_normalize");
248
}
249
250
// TODO: fix it and add to l2_normalize
251
TEST_P(Test_TensorFlow_layers, l2_normalize_3d)
252
{
253
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_CPU)
254
throw SkipTestException("");
255
runTensorFlowNet("l2_normalize_3d");
256
}
257
258
class Test_TensorFlow_nets : public DNNTestLayer {};
259
260
TEST_P(Test_TensorFlow_nets, MobileNet_SSD)
261
{
262
checkBackend();
263
if ((backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_CPU) ||
264
(backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16))
265
throw SkipTestException("");
266
267
std::string netPath = findDataFile("dnn/ssd_mobilenet_v1_coco.pb", false);
268
std::string netConfig = findDataFile("dnn/ssd_mobilenet_v1_coco.pbtxt", false);
269
std::string imgPath = findDataFile("dnn/street.png", false);
270
271
Mat inp;
272
resize(imread(imgPath), inp, Size(300, 300));
273
inp = blobFromImage(inp, 1.0f / 127.5, Size(), Scalar(127.5, 127.5, 127.5), true);
274
275
std::vector<String> outNames(3);
276
outNames[0] = "concat";
277
outNames[1] = "concat_1";
278
outNames[2] = "detection_out";
279
280
std::vector<Mat> refs(outNames.size());
281
for (int i = 0; i < outNames.size(); ++i)
282
{
283
std::string path = findDataFile("dnn/tensorflow/ssd_mobilenet_v1_coco." + outNames[i] + ".npy", false);
284
refs[i] = blobFromNPY(path);
285
}
286
287
Net net = readNetFromTensorflow(netPath, netConfig);
288
net.setPreferableBackend(backend);
289
net.setPreferableTarget(target);
290
291
net.setInput(inp);
292
293
std::vector<Mat> output;
294
net.forward(output, outNames);
295
296
normAssert(refs[0].reshape(1, 1), output[0].reshape(1, 1), "", 1e-5, 1.5e-4);
297
normAssert(refs[1].reshape(1, 1), output[1].reshape(1, 1), "", 1e-5, 3e-4);
298
normAssertDetections(refs[2], output[2], "", 0.2);
299
}
300
301
TEST_P(Test_TensorFlow_nets, Inception_v2_SSD)
302
{
303
checkBackend();
304
std::string proto = findDataFile("dnn/ssd_inception_v2_coco_2017_11_17.pbtxt", false);
305
std::string model = findDataFile("dnn/ssd_inception_v2_coco_2017_11_17.pb", false);
306
307
Net net = readNetFromTensorflow(model, proto);
308
Mat img = imread(findDataFile("dnn/street.png", false));
309
Mat blob = blobFromImage(img, 1.0f, Size(300, 300), Scalar(), true, false);
310
311
net.setPreferableBackend(backend);
312
net.setPreferableTarget(target);
313
314
net.setInput(blob);
315
// Output has shape 1x1xNx7 where N - number of detections.
316
// An every detection is a vector of values [id, classId, confidence, left, top, right, bottom]
317
Mat out = net.forward();
318
Mat ref = (Mat_<float>(5, 7) << 0, 1, 0.90176028, 0.19872092, 0.36311883, 0.26461923, 0.63498729,
319
0, 3, 0.93569964, 0.64865261, 0.45906419, 0.80675775, 0.65708131,
320
0, 3, 0.75838411, 0.44668293, 0.45907149, 0.49459291, 0.52197015,
321
0, 10, 0.95932811, 0.38349164, 0.32528657, 0.40387636, 0.39165527,
322
0, 10, 0.93973452, 0.66561931, 0.37841269, 0.68074018, 0.42907384);
323
double scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.0097 : default_l1;
324
double iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.09 : default_lInf;
325
normAssertDetections(ref, out, "", 0.5, scoreDiff, iouDiff);
326
}
327
328
TEST_P(Test_TensorFlow_nets, MobileNet_v1_SSD)
329
{
330
checkBackend();
331
332
std::string model = findDataFile("dnn/ssd_mobilenet_v1_coco_2017_11_17.pb", false);
333
std::string proto = findDataFile("dnn/ssd_mobilenet_v1_coco_2017_11_17.pbtxt", false);
334
335
Net net = readNetFromTensorflow(model, proto);
336
Mat img = imread(findDataFile("dnn/dog416.png", false));
337
Mat blob = blobFromImage(img, 1.0f, Size(300, 300), Scalar(), true, false);
338
339
net.setPreferableBackend(backend);
340
net.setPreferableTarget(target);
341
342
net.setInput(blob);
343
Mat out = net.forward();
344
345
Mat ref = blobFromNPY(findDataFile("dnn/tensorflow/ssd_mobilenet_v1_coco_2017_11_17.detection_out.npy"));
346
float scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 7e-3 : 1e-5;
347
float iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.0098 : 1e-3;
348
normAssertDetections(ref, out, "", 0.3, scoreDiff, iouDiff);
349
}
350
351
TEST_P(Test_TensorFlow_nets, Faster_RCNN)
352
{
353
static std::string names[] = {"faster_rcnn_inception_v2_coco_2018_01_28",
354
"faster_rcnn_resnet50_coco_2018_01_28"};
355
356
checkBackend();
357
if ((backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_CPU) ||
358
(backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16))
359
throw SkipTestException("");
360
361
for (int i = 1; i < 2; ++i)
362
{
363
std::string proto = findDataFile("dnn/" + names[i] + ".pbtxt", false);
364
std::string model = findDataFile("dnn/" + names[i] + ".pb", false);
365
366
Net net = readNetFromTensorflow(model, proto);
367
net.setPreferableBackend(backend);
368
net.setPreferableTarget(target);
369
Mat img = imread(findDataFile("dnn/dog416.png", false));
370
Mat blob = blobFromImage(img, 1.0f, Size(800, 600), Scalar(), true, false);
371
372
net.setInput(blob);
373
Mat out = net.forward();
374
375
Mat ref = blobFromNPY(findDataFile("dnn/tensorflow/" + names[i] + ".detection_out.npy"));
376
normAssertDetections(ref, out, names[i].c_str(), 0.3);
377
}
378
}
379
380
TEST_P(Test_TensorFlow_nets, MobileNet_v1_SSD_PPN)
381
{
382
checkBackend();
383
std::string proto = findDataFile("dnn/ssd_mobilenet_v1_ppn_coco.pbtxt", false);
384
std::string model = findDataFile("dnn/ssd_mobilenet_v1_ppn_coco.pb", false);
385
386
Net net = readNetFromTensorflow(model, proto);
387
Mat img = imread(findDataFile("dnn/dog416.png", false));
388
Mat ref = blobFromNPY(findDataFile("dnn/tensorflow/ssd_mobilenet_v1_ppn_coco.detection_out.npy", false));
389
Mat blob = blobFromImage(img, 1.0f, Size(300, 300), Scalar(), true, false);
390
391
net.setPreferableBackend(backend);
392
net.setPreferableTarget(target);
393
394
net.setInput(blob);
395
Mat out = net.forward();
396
397
double scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.011 : default_l1;
398
double iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.021 : default_lInf;
399
normAssertDetections(ref, out, "", 0.4, scoreDiff, iouDiff);
400
}
401
402
TEST_P(Test_TensorFlow_nets, opencv_face_detector_uint8)
403
{
404
checkBackend();
405
std::string proto = findDataFile("dnn/opencv_face_detector.pbtxt", false);
406
std::string model = findDataFile("dnn/opencv_face_detector_uint8.pb", false);
407
408
Net net = readNetFromTensorflow(model, proto);
409
Mat img = imread(findDataFile("gpu/lbpcascade/er.png", false));
410
Mat blob = blobFromImage(img, 1.0, Size(), Scalar(104.0, 177.0, 123.0), false, false);
411
412
net.setPreferableBackend(backend);
413
net.setPreferableTarget(target);
414
net.setInput(blob);
415
// Output has shape 1x1xNx7 where N - number of detections.
416
// An every detection is a vector of values [id, classId, confidence, left, top, right, bottom]
417
Mat out = net.forward();
418
419
// References are from test for Caffe model.
420
Mat ref = (Mat_<float>(6, 7) << 0, 1, 0.99520785, 0.80997437, 0.16379407, 0.87996572, 0.26685631,
421
0, 1, 0.9934696, 0.2831718, 0.50738752, 0.345781, 0.5985168,
422
0, 1, 0.99096733, 0.13629119, 0.24892329, 0.19756334, 0.3310290,
423
0, 1, 0.98977017, 0.23901358, 0.09084064, 0.29902688, 0.1769477,
424
0, 1, 0.97203469, 0.67965847, 0.06876482, 0.73999709, 0.1513494,
425
0, 1, 0.95097077, 0.51901293, 0.45863652, 0.5777427, 0.5347801);
426
double scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 4e-3 : 3.4e-3;
427
double iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.024 : 1e-2;
428
normAssertDetections(ref, out, "", 0.9, scoreDiff, iouDiff);
429
}
430
431
// inp = cv.imread('opencv_extra/testdata/cv/ximgproc/sources/08.png')
432
// inp = inp[:,:,[2, 1, 0]].astype(np.float32).reshape(1, 512, 512, 3)
433
// outs = sess.run([sess.graph.get_tensor_by_name('feature_fusion/Conv_7/Sigmoid:0'),
434
// sess.graph.get_tensor_by_name('feature_fusion/concat_3:0')],
435
// feed_dict={'input_images:0': inp})
436
// scores = np.ascontiguousarray(outs[0].transpose(0, 3, 1, 2))
437
// geometry = np.ascontiguousarray(outs[1].transpose(0, 3, 1, 2))
438
// np.save('east_text_detection.scores.npy', scores)
439
// np.save('east_text_detection.geometry.npy', geometry)
440
TEST_P(Test_TensorFlow_nets, EAST_text_detection)
441
{
442
checkBackend();
443
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000
444
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
445
throw SkipTestException("Test is enabled starts from OpenVINO 2018R3");
446
#endif
447
448
std::string netPath = findDataFile("dnn/frozen_east_text_detection.pb", false);
449
std::string imgPath = findDataFile("cv/ximgproc/sources/08.png", false);
450
std::string refScoresPath = findDataFile("dnn/east_text_detection.scores.npy", false);
451
std::string refGeometryPath = findDataFile("dnn/east_text_detection.geometry.npy", false);
452
453
Net net = readNet(findDataFile("dnn/frozen_east_text_detection.pb", false));
454
455
net.setPreferableBackend(backend);
456
net.setPreferableTarget(target);
457
458
Mat img = imread(imgPath);
459
Mat inp = blobFromImage(img, 1.0, Size(), Scalar(123.68, 116.78, 103.94), true, false);
460
net.setInput(inp);
461
462
std::vector<Mat> outs;
463
std::vector<String> outNames(2);
464
outNames[0] = "feature_fusion/Conv_7/Sigmoid";
465
outNames[1] = "feature_fusion/concat_3";
466
net.forward(outs, outNames);
467
468
Mat scores = outs[0];
469
Mat geometry = outs[1];
470
471
// Scores are in range [0, 1]. Geometry values are in range [-0.23, 290]
472
double l1_scores = default_l1, lInf_scores = default_lInf;
473
double l1_geometry = default_l1, lInf_geometry = default_lInf;
474
if (target == DNN_TARGET_OPENCL_FP16)
475
{
476
lInf_scores = 0.11;
477
l1_geometry = 0.28; lInf_geometry = 5.94;
478
}
479
else if (target == DNN_TARGET_MYRIAD)
480
{
481
lInf_scores = 0.214;
482
l1_geometry = 0.47; lInf_geometry = 15.34;
483
}
484
else
485
{
486
l1_geometry = 1e-4, lInf_geometry = 3e-3;
487
}
488
normAssert(scores, blobFromNPY(refScoresPath), "scores", l1_scores, lInf_scores);
489
normAssert(geometry, blobFromNPY(refGeometryPath), "geometry", l1_geometry, lInf_geometry);
490
}
491
492
INSTANTIATE_TEST_CASE_P(/**/, Test_TensorFlow_nets, dnnBackendsAndTargets());
493
494
TEST_P(Test_TensorFlow_layers, fp16_weights)
495
{
496
const float l1 = 0.00071;
497
const float lInf = 0.012;
498
runTensorFlowNet("fp16_single_conv", false, l1, lInf);
499
runTensorFlowNet("fp16_deconvolution", false, l1, lInf);
500
runTensorFlowNet("fp16_max_pool_odd_same", false, l1, lInf);
501
runTensorFlowNet("fp16_padding_valid", false, l1, lInf);
502
runTensorFlowNet("fp16_eltwise_add_mul", false, l1, lInf);
503
runTensorFlowNet("fp16_max_pool_odd_valid", false, l1, lInf);
504
runTensorFlowNet("fp16_max_pool_even", false, l1, lInf);
505
runTensorFlowNet("fp16_padding_same", false, l1, lInf);
506
}
507
508
// TODO: fix pad_and_concat and add this test case to fp16_weights
509
TEST_P(Test_TensorFlow_layers, fp16_pad_and_concat)
510
{
511
const float l1 = 0.00071;
512
const float lInf = 0.012;
513
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000
514
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
515
throw SkipTestException("Test is enabled starts from OpenVINO 2018R3");
516
#endif
517
runTensorFlowNet("fp16_pad_and_concat", false, l1, lInf);
518
}
519
520
TEST_P(Test_TensorFlow_layers, defun)
521
{
522
runTensorFlowNet("defun_dropout");
523
}
524
525
TEST_P(Test_TensorFlow_layers, quantized)
526
{
527
runTensorFlowNet("uint8_single_conv");
528
}
529
530
TEST_P(Test_TensorFlow_layers, lstm)
531
{
532
if (backend == DNN_BACKEND_INFERENCE_ENGINE ||
533
(backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16))
534
throw SkipTestException("");
535
runTensorFlowNet("lstm", true);
536
runTensorFlowNet("lstm", true, 0.0, 0.0, true);
537
}
538
539
TEST_P(Test_TensorFlow_layers, split)
540
{
541
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
542
throw SkipTestException("");
543
runTensorFlowNet("split_equals");
544
}
545
546
TEST_P(Test_TensorFlow_layers, resize_nearest_neighbor)
547
{
548
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_MYRIAD)
549
throw SkipTestException("");
550
runTensorFlowNet("resize_nearest_neighbor");
551
runTensorFlowNet("keras_upsampling2d");
552
}
553
554
TEST_P(Test_TensorFlow_layers, slice)
555
{
556
if (backend == DNN_BACKEND_INFERENCE_ENGINE &&
557
(target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
558
throw SkipTestException("");
559
runTensorFlowNet("slice_4d");
560
}
561
562
TEST_P(Test_TensorFlow_layers, softmax)
563
{
564
runTensorFlowNet("keras_softmax");
565
}
566
567
TEST_P(Test_TensorFlow_layers, relu6)
568
{
569
runTensorFlowNet("keras_relu6");
570
runTensorFlowNet("keras_relu6", /*hasText*/ true);
571
}
572
573
TEST_P(Test_TensorFlow_layers, keras_mobilenet_head)
574
{
575
runTensorFlowNet("keras_mobilenet_head");
576
}
577
578
TEST_P(Test_TensorFlow_layers, resize_bilinear)
579
{
580
runTensorFlowNet("resize_bilinear");
581
runTensorFlowNet("resize_bilinear_factor");
582
}
583
584
INSTANTIATE_TEST_CASE_P(/**/, Test_TensorFlow_layers, dnnBackendsAndTargets());
585
586
TEST(Test_TensorFlow, two_inputs)
587
{
588
Net net = readNet(path("two_inputs_net.pbtxt"));
589
net.setPreferableBackend(DNN_BACKEND_OPENCV);
590
591
Mat firstInput(2, 3, CV_32FC1), secondInput(2, 3, CV_32FC1);
592
randu(firstInput, -1, 1);
593
randu(secondInput, -1, 1);
594
595
net.setInput(firstInput, "first_input");
596
net.setInput(secondInput, "second_input");
597
Mat out = net.forward();
598
599
normAssert(out, firstInput + secondInput);
600
}
601
602
TEST(Test_TensorFlow, Mask_RCNN)
603
{
604
std::string proto = findDataFile("dnn/mask_rcnn_inception_v2_coco_2018_01_28.pbtxt", false);
605
std::string model = findDataFile("dnn/mask_rcnn_inception_v2_coco_2018_01_28.pb", false);
606
607
Net net = readNetFromTensorflow(model, proto);
608
Mat img = imread(findDataFile("dnn/street.png", false));
609
Mat refDetections = blobFromNPY(path("mask_rcnn_inception_v2_coco_2018_01_28.detection_out.npy"));
610
Mat refMasks = blobFromNPY(path("mask_rcnn_inception_v2_coco_2018_01_28.detection_masks.npy"));
611
Mat blob = blobFromImage(img, 1.0f, Size(800, 800), Scalar(), true, false);
612
613
net.setPreferableBackend(DNN_BACKEND_OPENCV);
614
615
net.setInput(blob);
616
617
// Mask-RCNN predicts bounding boxes and segmentation masks.
618
std::vector<String> outNames(2);
619
outNames[0] = "detection_out_final";
620
outNames[1] = "detection_masks";
621
622
std::vector<Mat> outs;
623
net.forward(outs, outNames);
624
625
Mat outDetections = outs[0];
626
Mat outMasks = outs[1];
627
normAssertDetections(refDetections, outDetections, "", /*threshold for zero confidence*/1e-5);
628
629
// Output size of masks is NxCxHxW where
630
// N - number of detected boxes
631
// C - number of classes (excluding background)
632
// HxW - segmentation shape
633
const int numDetections = outDetections.size[2];
634
635
int masksSize[] = {1, numDetections, outMasks.size[2], outMasks.size[3]};
636
Mat masks(4, &masksSize[0], CV_32F);
637
638
std::vector<cv::Range> srcRanges(4, cv::Range::all());
639
std::vector<cv::Range> dstRanges(4, cv::Range::all());
640
641
outDetections = outDetections.reshape(1, outDetections.total() / 7);
642
for (int i = 0; i < numDetections; ++i)
643
{
644
// Get a class id for this bounding box and copy mask only for that class.
645
int classId = static_cast<int>(outDetections.at<float>(i, 1));
646
srcRanges[0] = dstRanges[1] = cv::Range(i, i + 1);
647
srcRanges[1] = cv::Range(classId, classId + 1);
648
outMasks(srcRanges).copyTo(masks(dstRanges));
649
}
650
cv::Range topRefMasks[] = {Range::all(), Range(0, numDetections), Range::all(), Range::all()};
651
normAssert(masks, refMasks(&topRefMasks[0]));
652
}
653
654
}
655
656