Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Tetragramm
GitHub Repository: Tetragramm/opencv
Path: blob/master/modules/ts/src/ts_func.cpp
16337 views
1
#include "precomp.hpp"
2
#include <float.h>
3
#include <limits.h>
4
#include "opencv2/imgproc/types_c.h"
5
6
using namespace cv;
7
8
namespace cvtest
9
{
10
11
const char* getTypeName( int type )
12
{
13
static const char* type_names[] = { "8u", "8s", "16u", "16s", "32s", "32f", "64f", "ptr" };
14
return type_names[CV_MAT_DEPTH(type)];
15
}
16
17
int typeByName( const char* name )
18
{
19
int i;
20
for( i = 0; i < CV_DEPTH_MAX; i++ )
21
if( strcmp(name, getTypeName(i)) == 0 )
22
return i;
23
return -1;
24
}
25
26
string vec2str( const string& sep, const int* v, size_t nelems )
27
{
28
char buf[32];
29
string result = "";
30
for( size_t i = 0; i < nelems; i++ )
31
{
32
sprintf(buf, "%d", v[i]);
33
result += string(buf);
34
if( i < nelems - 1 )
35
result += sep;
36
}
37
return result;
38
}
39
40
41
Size randomSize(RNG& rng, double maxSizeLog)
42
{
43
double width_log = rng.uniform(0., maxSizeLog);
44
double height_log = rng.uniform(0., maxSizeLog - width_log);
45
if( (unsigned)rng % 2 != 0 )
46
std::swap(width_log, height_log);
47
Size sz;
48
sz.width = cvRound(exp(width_log));
49
sz.height = cvRound(exp(height_log));
50
return sz;
51
}
52
53
void randomSize(RNG& rng, int minDims, int maxDims, double maxSizeLog, vector<int>& sz)
54
{
55
int i, dims = rng.uniform(minDims, maxDims+1);
56
sz.resize(dims);
57
for( i = 0; i < dims; i++ )
58
{
59
double v = rng.uniform(0., maxSizeLog);
60
maxSizeLog -= v;
61
sz[i] = cvRound(exp(v));
62
}
63
for( i = 0; i < dims; i++ )
64
{
65
int j = rng.uniform(0, dims);
66
int k = rng.uniform(0, dims);
67
std::swap(sz[j], sz[k]);
68
}
69
}
70
71
int randomType(RNG& rng, _OutputArray::DepthMask typeMask, int minChannels, int maxChannels)
72
{
73
int channels = rng.uniform(minChannels, maxChannels+1);
74
int depth = 0;
75
CV_Assert((typeMask & _OutputArray::DEPTH_MASK_ALL_16F) != 0);
76
for(;;)
77
{
78
depth = rng.uniform(CV_8U, CV_16F+1);
79
if( ((1 << depth) & typeMask) != 0 )
80
break;
81
}
82
return CV_MAKETYPE(depth, channels);
83
}
84
85
double getMinVal(int depth)
86
{
87
depth = CV_MAT_DEPTH(depth);
88
double val = depth == CV_8U ? 0 : depth == CV_8S ? SCHAR_MIN : depth == CV_16U ? 0 :
89
depth == CV_16S ? SHRT_MIN : depth == CV_32S ? INT_MIN :
90
depth == CV_32F ? -FLT_MAX : depth == CV_64F ? -DBL_MAX : -1;
91
CV_Assert(val != -1);
92
return val;
93
}
94
95
double getMaxVal(int depth)
96
{
97
depth = CV_MAT_DEPTH(depth);
98
double val = depth == CV_8U ? UCHAR_MAX : depth == CV_8S ? SCHAR_MAX : depth == CV_16U ? USHRT_MAX :
99
depth == CV_16S ? SHRT_MAX : depth == CV_32S ? INT_MAX :
100
depth == CV_32F ? FLT_MAX : depth == CV_64F ? DBL_MAX : -1;
101
CV_Assert(val != -1);
102
return val;
103
}
104
105
Mat randomMat(RNG& rng, Size size, int type, double minVal, double maxVal, bool useRoi)
106
{
107
Size size0 = size;
108
if( useRoi )
109
{
110
size0.width += std::max(rng.uniform(0, 10) - 5, 0);
111
size0.height += std::max(rng.uniform(0, 10) - 5, 0);
112
}
113
114
Mat m(size0, type);
115
116
rng.fill(m, RNG::UNIFORM, minVal, maxVal);
117
if( size0 == size )
118
return m;
119
return m(Rect((size0.width-size.width)/2, (size0.height-size.height)/2, size.width, size.height));
120
}
121
122
Mat randomMat(RNG& rng, const vector<int>& size, int type, double minVal, double maxVal, bool useRoi)
123
{
124
int i, dims = (int)size.size();
125
vector<int> size0(dims);
126
vector<Range> r(dims);
127
bool eqsize = true;
128
for( i = 0; i < dims; i++ )
129
{
130
size0[i] = size[i];
131
r[i] = Range::all();
132
if( useRoi )
133
{
134
size0[i] += std::max(rng.uniform(0, 5) - 2, 0);
135
r[i] = Range((size0[i] - size[i])/2, (size0[i] - size[i])/2 + size[i]);
136
}
137
eqsize = eqsize && size[i] == size0[i];
138
}
139
140
Mat m(dims, &size0[0], type);
141
142
rng.fill(m, RNG::UNIFORM, minVal, maxVal);
143
if( eqsize )
144
return m;
145
return m(&r[0]);
146
}
147
148
void add(const Mat& _a, double alpha, const Mat& _b, double beta,
149
Scalar gamma, Mat& c, int ctype, bool calcAbs)
150
{
151
Mat a = _a, b = _b;
152
if( a.empty() || alpha == 0 )
153
{
154
// both alpha and beta can be 0, but at least one of a and b must be non-empty array,
155
// otherwise we do not know the size of the output (and may be type of the output, when ctype<0)
156
CV_Assert( !a.empty() || !b.empty() );
157
if( !b.empty() )
158
{
159
a = b;
160
alpha = beta;
161
b = Mat();
162
beta = 0;
163
}
164
}
165
if( b.empty() || beta == 0 )
166
{
167
b = Mat();
168
beta = 0;
169
}
170
else
171
CV_Assert(a.size == b.size);
172
173
if( ctype < 0 )
174
ctype = a.depth();
175
ctype = CV_MAKETYPE(CV_MAT_DEPTH(ctype), a.channels());
176
c.create(a.dims, &a.size[0], ctype);
177
const Mat *arrays[] = {&a, &b, &c, 0};
178
Mat planes[3], buf[3];
179
180
NAryMatIterator it(arrays, planes);
181
size_t i, nplanes = it.nplanes;
182
int cn=a.channels();
183
int total = (int)planes[0].total(), maxsize = std::min(12*12*std::max(12/cn, 1), total);
184
185
CV_Assert(planes[0].rows == 1);
186
buf[0].create(1, maxsize, CV_64FC(cn));
187
if(!b.empty())
188
buf[1].create(1, maxsize, CV_64FC(cn));
189
buf[2].create(1, maxsize, CV_64FC(cn));
190
scalarToRawData(gamma, buf[2].ptr(), CV_64FC(cn), (int)(maxsize*cn));
191
192
for( i = 0; i < nplanes; i++, ++it)
193
{
194
for( int j = 0; j < total; j += maxsize )
195
{
196
int j2 = std::min(j + maxsize, total);
197
Mat apart0 = planes[0].colRange(j, j2);
198
Mat cpart0 = planes[2].colRange(j, j2);
199
Mat apart = buf[0].colRange(0, j2 - j);
200
201
apart0.convertTo(apart, apart.type(), alpha);
202
size_t k, n = (j2 - j)*cn;
203
double* aptr = apart.ptr<double>();
204
const double* gptr = buf[2].ptr<double>();
205
206
if( b.empty() )
207
{
208
for( k = 0; k < n; k++ )
209
aptr[k] += gptr[k];
210
}
211
else
212
{
213
Mat bpart0 = planes[1].colRange((int)j, (int)j2);
214
Mat bpart = buf[1].colRange(0, (int)(j2 - j));
215
bpart0.convertTo(bpart, bpart.type(), beta);
216
const double* bptr = bpart.ptr<double>();
217
218
for( k = 0; k < n; k++ )
219
aptr[k] += bptr[k] + gptr[k];
220
}
221
if( calcAbs )
222
for( k = 0; k < n; k++ )
223
aptr[k] = fabs(aptr[k]);
224
apart.convertTo(cpart0, cpart0.type(), 1, 0);
225
}
226
}
227
}
228
229
230
template<typename _Tp1, typename _Tp2> inline void
231
convert_(const _Tp1* src, _Tp2* dst, size_t total, double alpha, double beta)
232
{
233
size_t i;
234
if( alpha == 1 && beta == 0 )
235
for( i = 0; i < total; i++ )
236
dst[i] = saturate_cast<_Tp2>(src[i]);
237
else if( beta == 0 )
238
for( i = 0; i < total; i++ )
239
dst[i] = saturate_cast<_Tp2>(src[i]*alpha);
240
else
241
for( i = 0; i < total; i++ )
242
dst[i] = saturate_cast<_Tp2>(src[i]*alpha + beta);
243
}
244
245
template<typename _Tp> inline void
246
convertTo(const _Tp* src, void* dst, int dtype, size_t total, double alpha, double beta)
247
{
248
switch( CV_MAT_DEPTH(dtype) )
249
{
250
case CV_8U:
251
convert_(src, (uchar*)dst, total, alpha, beta);
252
break;
253
case CV_8S:
254
convert_(src, (schar*)dst, total, alpha, beta);
255
break;
256
case CV_16U:
257
convert_(src, (ushort*)dst, total, alpha, beta);
258
break;
259
case CV_16S:
260
convert_(src, (short*)dst, total, alpha, beta);
261
break;
262
case CV_32S:
263
convert_(src, (int*)dst, total, alpha, beta);
264
break;
265
case CV_32F:
266
convert_(src, (float*)dst, total, alpha, beta);
267
break;
268
case CV_64F:
269
convert_(src, (double*)dst, total, alpha, beta);
270
break;
271
default:
272
CV_Assert(0);
273
}
274
}
275
276
void convert(const Mat& src, cv::OutputArray _dst, int dtype, double alpha, double beta)
277
{
278
if (dtype < 0) dtype = _dst.depth();
279
280
dtype = CV_MAKETYPE(CV_MAT_DEPTH(dtype), src.channels());
281
_dst.create(src.dims, &src.size[0], dtype);
282
Mat dst = _dst.getMat();
283
if( alpha == 0 )
284
{
285
set( dst, Scalar::all(beta) );
286
return;
287
}
288
if( dtype == src.type() && alpha == 1 && beta == 0 )
289
{
290
copy( src, dst );
291
return;
292
}
293
294
const Mat *arrays[]={&src, &dst, 0};
295
Mat planes[2];
296
297
NAryMatIterator it(arrays, planes);
298
size_t total = planes[0].total()*planes[0].channels();
299
size_t i, nplanes = it.nplanes;
300
301
for( i = 0; i < nplanes; i++, ++it)
302
{
303
const uchar* sptr = planes[0].ptr();
304
uchar* dptr = planes[1].ptr();
305
306
switch( src.depth() )
307
{
308
case CV_8U:
309
convertTo((const uchar*)sptr, dptr, dtype, total, alpha, beta);
310
break;
311
case CV_8S:
312
convertTo((const schar*)sptr, dptr, dtype, total, alpha, beta);
313
break;
314
case CV_16U:
315
convertTo((const ushort*)sptr, dptr, dtype, total, alpha, beta);
316
break;
317
case CV_16S:
318
convertTo((const short*)sptr, dptr, dtype, total, alpha, beta);
319
break;
320
case CV_32S:
321
convertTo((const int*)sptr, dptr, dtype, total, alpha, beta);
322
break;
323
case CV_32F:
324
convertTo((const float*)sptr, dptr, dtype, total, alpha, beta);
325
break;
326
case CV_64F:
327
convertTo((const double*)sptr, dptr, dtype, total, alpha, beta);
328
break;
329
}
330
}
331
}
332
333
334
void copy(const Mat& src, Mat& dst, const Mat& mask, bool invertMask)
335
{
336
dst.create(src.dims, &src.size[0], src.type());
337
338
if(mask.empty())
339
{
340
const Mat* arrays[] = {&src, &dst, 0};
341
Mat planes[2];
342
NAryMatIterator it(arrays, planes);
343
size_t i, nplanes = it.nplanes;
344
size_t planeSize = planes[0].total()*src.elemSize();
345
346
for( i = 0; i < nplanes; i++, ++it )
347
memcpy(planes[1].ptr(), planes[0].ptr(), planeSize);
348
349
return;
350
}
351
352
int mcn = mask.channels();
353
CV_Assert( src.size == mask.size && mask.depth() == CV_8U
354
&& (mcn == 1 || mcn == src.channels()) );
355
356
const Mat *arrays[]={&src, &dst, &mask, 0};
357
Mat planes[3];
358
359
NAryMatIterator it(arrays, planes);
360
size_t j, k, elemSize = src.elemSize(), maskElemSize = mask.elemSize(), total = planes[0].total();
361
size_t i, nplanes = it.nplanes;
362
size_t elemSize1 = src.elemSize1();
363
364
for( i = 0; i < nplanes; i++, ++it)
365
{
366
const uchar* sptr = planes[0].ptr();
367
uchar* dptr = planes[1].ptr();
368
const uchar* mptr = planes[2].ptr();
369
for( j = 0; j < total; j++, sptr += elemSize, dptr += elemSize, mptr += maskElemSize )
370
{
371
if( mcn == 1)
372
{
373
if( (mptr[0] != 0) ^ invertMask )
374
for( k = 0; k < elemSize; k++ )
375
dptr[k] = sptr[k];
376
}
377
else
378
{
379
for( int c = 0; c < mcn; c++ )
380
if( (mptr[c] != 0) ^ invertMask )
381
for( k = 0; k < elemSize1; k++ )
382
dptr[k + c * elemSize1] = sptr[k + c * elemSize1];
383
}
384
}
385
}
386
}
387
388
389
void set(Mat& dst, const Scalar& gamma, const Mat& mask)
390
{
391
double buf[12];
392
scalarToRawData(gamma, &buf, dst.type(), dst.channels());
393
const uchar* gptr = (const uchar*)&buf[0];
394
395
if(mask.empty())
396
{
397
const Mat* arrays[] = {&dst, 0};
398
Mat plane;
399
NAryMatIterator it(arrays, &plane);
400
size_t i, nplanes = it.nplanes;
401
size_t j, k, elemSize = dst.elemSize(), planeSize = plane.total()*elemSize;
402
403
for( k = 1; k < elemSize; k++ )
404
if( gptr[k] != gptr[0] )
405
break;
406
bool uniform = k >= elemSize;
407
408
for( i = 0; i < nplanes; i++, ++it )
409
{
410
uchar* dptr = plane.ptr();
411
if( uniform )
412
memset( dptr, gptr[0], planeSize );
413
else if( i == 0 )
414
{
415
for( j = 0; j < planeSize; j += elemSize, dptr += elemSize )
416
for( k = 0; k < elemSize; k++ )
417
dptr[k] = gptr[k];
418
}
419
else
420
memcpy(dptr, dst.ptr(), planeSize);
421
}
422
return;
423
}
424
425
int cn = dst.channels(), mcn = mask.channels();
426
CV_Assert( dst.size == mask.size && (mcn == 1 || mcn == cn) );
427
428
const Mat *arrays[]={&dst, &mask, 0};
429
Mat planes[2];
430
431
NAryMatIterator it(arrays, planes);
432
size_t j, k, elemSize = dst.elemSize(), maskElemSize = mask.elemSize(), total = planes[0].total();
433
size_t i, nplanes = it.nplanes;
434
size_t elemSize1 = dst.elemSize1();
435
436
for( i = 0; i < nplanes; i++, ++it)
437
{
438
uchar* dptr = planes[0].ptr();
439
const uchar* mptr = planes[1].ptr();
440
441
for( j = 0; j < total; j++, dptr += elemSize, mptr += maskElemSize )
442
{
443
if( mcn == 1)
444
{
445
if( mptr[0] )
446
for( k = 0; k < elemSize; k++ )
447
dptr[k] = gptr[k];
448
}
449
else
450
{
451
for( int c = 0; c < mcn; c++ )
452
if( mptr[c] )
453
for( k = 0; k < elemSize1; k++ )
454
dptr[k + c * elemSize1] = gptr[k + c * elemSize1];
455
}
456
}
457
}
458
}
459
460
461
void insert(const Mat& src, Mat& dst, int coi)
462
{
463
CV_Assert( dst.size == src.size && src.depth() == dst.depth() &&
464
0 <= coi && coi < dst.channels() );
465
466
const Mat* arrays[] = {&src, &dst, 0};
467
Mat planes[2];
468
NAryMatIterator it(arrays, planes);
469
size_t i, nplanes = it.nplanes;
470
size_t j, k, size0 = src.elemSize(), size1 = dst.elemSize(), total = planes[0].total();
471
472
for( i = 0; i < nplanes; i++, ++it )
473
{
474
const uchar* sptr = planes[0].ptr();
475
uchar* dptr = planes[1].ptr() + coi*size0;
476
477
for( j = 0; j < total; j++, sptr += size0, dptr += size1 )
478
{
479
for( k = 0; k < size0; k++ )
480
dptr[k] = sptr[k];
481
}
482
}
483
}
484
485
486
void extract(const Mat& src, Mat& dst, int coi)
487
{
488
dst.create( src.dims, &src.size[0], src.depth() );
489
CV_Assert( 0 <= coi && coi < src.channels() );
490
491
const Mat* arrays[] = {&src, &dst, 0};
492
Mat planes[2];
493
NAryMatIterator it(arrays, planes);
494
size_t i, nplanes = it.nplanes;
495
size_t j, k, size0 = src.elemSize(), size1 = dst.elemSize(), total = planes[0].total();
496
497
for( i = 0; i < nplanes; i++, ++it )
498
{
499
const uchar* sptr = planes[0].ptr() + coi*size1;
500
uchar* dptr = planes[1].ptr();
501
502
for( j = 0; j < total; j++, sptr += size0, dptr += size1 )
503
{
504
for( k = 0; k < size1; k++ )
505
dptr[k] = sptr[k];
506
}
507
}
508
}
509
510
511
void transpose(const Mat& src, Mat& dst)
512
{
513
CV_Assert(src.data != dst.data && "Inplace is not support in cvtest::transpose");
514
CV_Assert(src.dims == 2);
515
dst.create(src.cols, src.rows, src.type());
516
int i, j, k, esz = (int)src.elemSize();
517
518
for( i = 0; i < dst.rows; i++ )
519
{
520
const uchar* sptr = src.ptr(0) + i*esz;
521
uchar* dptr = dst.ptr(i);
522
523
for( j = 0; j < dst.cols; j++, sptr += src.step[0], dptr += esz )
524
{
525
for( k = 0; k < esz; k++ )
526
dptr[k] = sptr[k];
527
}
528
}
529
}
530
531
532
template<typename _Tp> static void
533
randUniInt_(RNG& rng, _Tp* data, size_t total, int cn, const Scalar& scale, const Scalar& delta)
534
{
535
for( size_t i = 0; i < total; i += cn )
536
for( int k = 0; k < cn; k++ )
537
{
538
int val = cvFloor( randInt(rng)*scale[k] + delta[k] );
539
data[i + k] = saturate_cast<_Tp>(val);
540
}
541
}
542
543
544
template<typename _Tp> static void
545
randUniFlt_(RNG& rng, _Tp* data, size_t total, int cn, const Scalar& scale, const Scalar& delta)
546
{
547
for( size_t i = 0; i < total; i += cn )
548
for( int k = 0; k < cn; k++ )
549
{
550
double val = randReal(rng)*scale[k] + delta[k];
551
data[i + k] = saturate_cast<_Tp>(val);
552
}
553
}
554
555
556
void randUni( RNG& rng, Mat& a, const Scalar& param0, const Scalar& param1 )
557
{
558
Scalar scale = param0;
559
Scalar delta = param1;
560
double C = a.depth() < CV_32F ? 1./(65536.*65536.) : 1.;
561
562
for( int k = 0; k < 4; k++ )
563
{
564
double s = scale.val[k] - delta.val[k];
565
if( s >= 0 )
566
scale.val[k] = s;
567
else
568
{
569
delta.val[k] = scale.val[k];
570
scale.val[k] = -s;
571
}
572
scale.val[k] *= C;
573
}
574
575
const Mat *arrays[]={&a, 0};
576
Mat plane;
577
578
NAryMatIterator it(arrays, &plane);
579
size_t i, nplanes = it.nplanes;
580
int depth = a.depth(), cn = a.channels();
581
size_t total = plane.total()*cn;
582
583
for( i = 0; i < nplanes; i++, ++it )
584
{
585
switch( depth )
586
{
587
case CV_8U:
588
randUniInt_(rng, plane.ptr<uchar>(), total, cn, scale, delta);
589
break;
590
case CV_8S:
591
randUniInt_(rng, plane.ptr<schar>(), total, cn, scale, delta);
592
break;
593
case CV_16U:
594
randUniInt_(rng, plane.ptr<ushort>(), total, cn, scale, delta);
595
break;
596
case CV_16S:
597
randUniInt_(rng, plane.ptr<short>(), total, cn, scale, delta);
598
break;
599
case CV_32S:
600
randUniInt_(rng, plane.ptr<int>(), total, cn, scale, delta);
601
break;
602
case CV_32F:
603
randUniFlt_(rng, plane.ptr<float>(), total, cn, scale, delta);
604
break;
605
case CV_64F:
606
randUniFlt_(rng, plane.ptr<double>(), total, cn, scale, delta);
607
break;
608
default:
609
CV_Assert(0);
610
}
611
}
612
}
613
614
615
template<typename _Tp> static void
616
erode_(const Mat& src, Mat& dst, const vector<int>& ofsvec)
617
{
618
int width = dst.cols*src.channels(), n = (int)ofsvec.size();
619
const int* ofs = &ofsvec[0];
620
621
for( int y = 0; y < dst.rows; y++ )
622
{
623
const _Tp* sptr = src.ptr<_Tp>(y);
624
_Tp* dptr = dst.ptr<_Tp>(y);
625
626
for( int x = 0; x < width; x++ )
627
{
628
_Tp result = sptr[x + ofs[0]];
629
for( int i = 1; i < n; i++ )
630
result = std::min(result, sptr[x + ofs[i]]);
631
dptr[x] = result;
632
}
633
}
634
}
635
636
637
template<typename _Tp> static void
638
dilate_(const Mat& src, Mat& dst, const vector<int>& ofsvec)
639
{
640
int width = dst.cols*src.channels(), n = (int)ofsvec.size();
641
const int* ofs = &ofsvec[0];
642
643
for( int y = 0; y < dst.rows; y++ )
644
{
645
const _Tp* sptr = src.ptr<_Tp>(y);
646
_Tp* dptr = dst.ptr<_Tp>(y);
647
648
for( int x = 0; x < width; x++ )
649
{
650
_Tp result = sptr[x + ofs[0]];
651
for( int i = 1; i < n; i++ )
652
result = std::max(result, sptr[x + ofs[i]]);
653
dptr[x] = result;
654
}
655
}
656
}
657
658
659
void erode(const Mat& _src, Mat& dst, const Mat& _kernel, Point anchor,
660
int borderType, const Scalar& _borderValue)
661
{
662
//if( _src.type() == CV_16UC3 && _src.size() == Size(1, 2) )
663
// putchar('*');
664
Mat kernel = _kernel, src;
665
Scalar borderValue = _borderValue;
666
if( kernel.empty() )
667
kernel = Mat::ones(3, 3, CV_8U);
668
else
669
{
670
CV_Assert( kernel.type() == CV_8U );
671
}
672
if( anchor == Point(-1,-1) )
673
anchor = Point(kernel.cols/2, kernel.rows/2);
674
if( borderType == BORDER_CONSTANT )
675
borderValue = getMaxVal(src.depth());
676
copyMakeBorder(_src, src, anchor.y, kernel.rows - anchor.y - 1,
677
anchor.x, kernel.cols - anchor.x - 1,
678
borderType, borderValue);
679
dst.create( _src.size(), src.type() );
680
681
vector<int> ofs;
682
int step = (int)(src.step/src.elemSize1()), cn = src.channels();
683
for( int i = 0; i < kernel.rows; i++ )
684
for( int j = 0; j < kernel.cols; j++ )
685
if( kernel.at<uchar>(i, j) != 0 )
686
ofs.push_back(i*step + j*cn);
687
if( ofs.empty() )
688
ofs.push_back(anchor.y*step + anchor.x*cn);
689
690
switch( src.depth() )
691
{
692
case CV_8U:
693
erode_<uchar>(src, dst, ofs);
694
break;
695
case CV_8S:
696
erode_<schar>(src, dst, ofs);
697
break;
698
case CV_16U:
699
erode_<ushort>(src, dst, ofs);
700
break;
701
case CV_16S:
702
erode_<short>(src, dst, ofs);
703
break;
704
case CV_32S:
705
erode_<int>(src, dst, ofs);
706
break;
707
case CV_32F:
708
erode_<float>(src, dst, ofs);
709
break;
710
case CV_64F:
711
erode_<double>(src, dst, ofs);
712
break;
713
default:
714
CV_Assert(0);
715
}
716
}
717
718
void dilate(const Mat& _src, Mat& dst, const Mat& _kernel, Point anchor,
719
int borderType, const Scalar& _borderValue)
720
{
721
Mat kernel = _kernel, src;
722
Scalar borderValue = _borderValue;
723
if( kernel.empty() )
724
kernel = Mat::ones(3, 3, CV_8U);
725
else
726
{
727
CV_Assert( kernel.type() == CV_8U );
728
}
729
if( anchor == Point(-1,-1) )
730
anchor = Point(kernel.cols/2, kernel.rows/2);
731
if( borderType == BORDER_CONSTANT )
732
borderValue = getMinVal(src.depth());
733
copyMakeBorder(_src, src, anchor.y, kernel.rows - anchor.y - 1,
734
anchor.x, kernel.cols - anchor.x - 1,
735
borderType, borderValue);
736
dst.create( _src.size(), src.type() );
737
738
vector<int> ofs;
739
int step = (int)(src.step/src.elemSize1()), cn = src.channels();
740
for( int i = 0; i < kernel.rows; i++ )
741
for( int j = 0; j < kernel.cols; j++ )
742
if( kernel.at<uchar>(i, j) != 0 )
743
ofs.push_back(i*step + j*cn);
744
if( ofs.empty() )
745
ofs.push_back(anchor.y*step + anchor.x*cn);
746
747
switch( src.depth() )
748
{
749
case CV_8U:
750
dilate_<uchar>(src, dst, ofs);
751
break;
752
case CV_8S:
753
dilate_<schar>(src, dst, ofs);
754
break;
755
case CV_16U:
756
dilate_<ushort>(src, dst, ofs);
757
break;
758
case CV_16S:
759
dilate_<short>(src, dst, ofs);
760
break;
761
case CV_32S:
762
dilate_<int>(src, dst, ofs);
763
break;
764
case CV_32F:
765
dilate_<float>(src, dst, ofs);
766
break;
767
case CV_64F:
768
dilate_<double>(src, dst, ofs);
769
break;
770
default:
771
CV_Assert(0);
772
}
773
}
774
775
776
template<typename _Tp> static void
777
filter2D_(const Mat& src, Mat& dst, const vector<int>& ofsvec, const vector<double>& coeffvec)
778
{
779
const int* ofs = &ofsvec[0];
780
const double* coeff = &coeffvec[0];
781
int width = dst.cols*dst.channels(), ncoeffs = (int)ofsvec.size();
782
783
for( int y = 0; y < dst.rows; y++ )
784
{
785
const _Tp* sptr = src.ptr<_Tp>(y);
786
double* dptr = dst.ptr<double>(y);
787
788
for( int x = 0; x < width; x++ )
789
{
790
double s = 0;
791
for( int i = 0; i < ncoeffs; i++ )
792
s += sptr[x + ofs[i]]*coeff[i];
793
dptr[x] = s;
794
}
795
}
796
}
797
798
799
void filter2D(const Mat& _src, Mat& dst, int ddepth, const Mat& kernel,
800
Point anchor, double delta, int borderType, const Scalar& _borderValue)
801
{
802
Mat src, _dst;
803
Scalar borderValue = _borderValue;
804
CV_Assert( kernel.type() == CV_32F || kernel.type() == CV_64F );
805
if( anchor == Point(-1,-1) )
806
anchor = Point(kernel.cols/2, kernel.rows/2);
807
if( borderType == BORDER_CONSTANT )
808
borderValue = getMinVal(src.depth());
809
copyMakeBorder(_src, src, anchor.y, kernel.rows - anchor.y - 1,
810
anchor.x, kernel.cols - anchor.x - 1,
811
borderType, borderValue);
812
_dst.create( _src.size(), CV_MAKETYPE(CV_64F, src.channels()) );
813
814
vector<int> ofs;
815
vector<double> coeff(kernel.rows*kernel.cols);
816
Mat cmat(kernel.rows, kernel.cols, CV_64F, &coeff[0]);
817
convert(kernel, cmat, cmat.type());
818
819
int step = (int)(src.step/src.elemSize1()), cn = src.channels();
820
for( int i = 0; i < kernel.rows; i++ )
821
for( int j = 0; j < kernel.cols; j++ )
822
ofs.push_back(i*step + j*cn);
823
824
switch( src.depth() )
825
{
826
case CV_8U:
827
filter2D_<uchar>(src, _dst, ofs, coeff);
828
break;
829
case CV_8S:
830
filter2D_<schar>(src, _dst, ofs, coeff);
831
break;
832
case CV_16U:
833
filter2D_<ushort>(src, _dst, ofs, coeff);
834
break;
835
case CV_16S:
836
filter2D_<short>(src, _dst, ofs, coeff);
837
break;
838
case CV_32S:
839
filter2D_<int>(src, _dst, ofs, coeff);
840
break;
841
case CV_32F:
842
filter2D_<float>(src, _dst, ofs, coeff);
843
break;
844
case CV_64F:
845
filter2D_<double>(src, _dst, ofs, coeff);
846
break;
847
default:
848
CV_Assert(0);
849
}
850
851
convert(_dst, dst, ddepth, 1, delta);
852
}
853
854
855
static int borderInterpolate( int p, int len, int borderType )
856
{
857
if( (unsigned)p < (unsigned)len )
858
;
859
else if( borderType == BORDER_REPLICATE )
860
p = p < 0 ? 0 : len - 1;
861
else if( borderType == BORDER_REFLECT || borderType == BORDER_REFLECT_101 )
862
{
863
int delta = borderType == BORDER_REFLECT_101;
864
if( len == 1 )
865
return 0;
866
do
867
{
868
if( p < 0 )
869
p = -p - 1 + delta;
870
else
871
p = len - 1 - (p - len) - delta;
872
}
873
while( (unsigned)p >= (unsigned)len );
874
}
875
else if( borderType == BORDER_WRAP )
876
{
877
if( p < 0 )
878
p -= ((p-len+1)/len)*len;
879
if( p >= len )
880
p %= len;
881
}
882
else if( borderType == BORDER_CONSTANT )
883
p = -1;
884
else
885
CV_Error( Error::StsBadArg, "Unknown/unsupported border type" );
886
return p;
887
}
888
889
890
void copyMakeBorder(const Mat& src, Mat& dst, int top, int bottom, int left, int right,
891
int borderType, const Scalar& borderValue)
892
{
893
dst.create(src.rows + top + bottom, src.cols + left + right, src.type());
894
int i, j, k, esz = (int)src.elemSize();
895
int width = src.cols*esz, width1 = dst.cols*esz;
896
897
if( borderType == BORDER_CONSTANT )
898
{
899
vector<uchar> valvec((src.cols + left + right)*esz);
900
uchar* val = &valvec[0];
901
scalarToRawData(borderValue, val, src.type(), (src.cols + left + right)*src.channels());
902
903
left *= esz;
904
right *= esz;
905
for( i = 0; i < src.rows; i++ )
906
{
907
const uchar* sptr = src.ptr(i);
908
uchar* dptr = dst.ptr(i + top) + left;
909
for( j = 0; j < left; j++ )
910
dptr[j - left] = val[j];
911
if( dptr != sptr )
912
for( j = 0; j < width; j++ )
913
dptr[j] = sptr[j];
914
for( j = 0; j < right; j++ )
915
dptr[j + width] = val[j];
916
}
917
918
for( i = 0; i < top; i++ )
919
{
920
uchar* dptr = dst.ptr(i);
921
for( j = 0; j < width1; j++ )
922
dptr[j] = val[j];
923
}
924
925
for( i = 0; i < bottom; i++ )
926
{
927
uchar* dptr = dst.ptr(i + top + src.rows);
928
for( j = 0; j < width1; j++ )
929
dptr[j] = val[j];
930
}
931
}
932
else
933
{
934
vector<int> tabvec((left + right)*esz + 1);
935
int* ltab = &tabvec[0];
936
int* rtab = &tabvec[left*esz];
937
for( i = 0; i < left; i++ )
938
{
939
j = borderInterpolate(i - left, src.cols, borderType)*esz;
940
for( k = 0; k < esz; k++ )
941
ltab[i*esz + k] = j + k;
942
}
943
for( i = 0; i < right; i++ )
944
{
945
j = borderInterpolate(src.cols + i, src.cols, borderType)*esz;
946
for( k = 0; k < esz; k++ )
947
rtab[i*esz + k] = j + k;
948
}
949
950
left *= esz;
951
right *= esz;
952
for( i = 0; i < src.rows; i++ )
953
{
954
const uchar* sptr = src.ptr(i);
955
uchar* dptr = dst.ptr(i + top);
956
957
for( j = 0; j < left; j++ )
958
dptr[j] = sptr[ltab[j]];
959
if( dptr + left != sptr )
960
{
961
for( j = 0; j < width; j++ )
962
dptr[j + left] = sptr[j];
963
}
964
for( j = 0; j < right; j++ )
965
dptr[j + left + width] = sptr[rtab[j]];
966
}
967
968
for( i = 0; i < top; i++ )
969
{
970
j = borderInterpolate(i - top, src.rows, borderType);
971
const uchar* sptr = dst.ptr(j + top);
972
uchar* dptr = dst.ptr(i);
973
974
for( k = 0; k < width1; k++ )
975
dptr[k] = sptr[k];
976
}
977
978
for( i = 0; i < bottom; i++ )
979
{
980
j = borderInterpolate(i + src.rows, src.rows, borderType);
981
const uchar* sptr = dst.ptr(j + top);
982
uchar* dptr = dst.ptr(i + top + src.rows);
983
984
for( k = 0; k < width1; k++ )
985
dptr[k] = sptr[k];
986
}
987
}
988
}
989
990
991
template<typename _Tp> static void
992
minMaxLoc_(const _Tp* src, size_t total, size_t startidx,
993
double* _minval, double* _maxval,
994
size_t* _minpos, size_t* _maxpos,
995
const uchar* mask)
996
{
997
_Tp maxval = saturate_cast<_Tp>(*_maxval), minval = saturate_cast<_Tp>(*_minval);
998
size_t minpos = *_minpos, maxpos = *_maxpos;
999
1000
if( !mask )
1001
{
1002
for( size_t i = 0; i < total; i++ )
1003
{
1004
_Tp val = src[i];
1005
if( minval > val || !minpos )
1006
{
1007
minval = val;
1008
minpos = startidx + i;
1009
}
1010
if( maxval < val || !maxpos )
1011
{
1012
maxval = val;
1013
maxpos = startidx + i;
1014
}
1015
}
1016
}
1017
else
1018
{
1019
for( size_t i = 0; i < total; i++ )
1020
{
1021
_Tp val = src[i];
1022
if( (minval > val || !minpos) && mask[i] )
1023
{
1024
minval = val;
1025
minpos = startidx + i;
1026
}
1027
if( (maxval < val || !maxpos) && mask[i] )
1028
{
1029
maxval = val;
1030
maxpos = startidx + i;
1031
}
1032
}
1033
}
1034
1035
*_maxval = maxval;
1036
*_minval = minval;
1037
*_maxpos = maxpos;
1038
*_minpos = minpos;
1039
}
1040
1041
1042
static void setpos( const Mat& mtx, vector<int>& pos, size_t idx )
1043
{
1044
pos.resize(mtx.dims);
1045
if( idx > 0 )
1046
{
1047
idx--;
1048
for( int i = mtx.dims-1; i >= 0; i-- )
1049
{
1050
int sz = mtx.size[i]*(i == mtx.dims-1 ? mtx.channels() : 1);
1051
pos[i] = (int)(idx % sz);
1052
idx /= sz;
1053
}
1054
}
1055
else
1056
{
1057
for( int i = mtx.dims-1; i >= 0; i-- )
1058
pos[i] = -1;
1059
}
1060
}
1061
1062
void minMaxLoc(const Mat& src, double* _minval, double* _maxval,
1063
vector<int>* _minloc, vector<int>* _maxloc,
1064
const Mat& mask)
1065
{
1066
CV_Assert( src.channels() == 1 );
1067
const Mat *arrays[]={&src, &mask, 0};
1068
Mat planes[2];
1069
1070
NAryMatIterator it(arrays, planes);
1071
size_t startidx = 1, total = planes[0].total();
1072
size_t i, nplanes = it.nplanes;
1073
int depth = src.depth();
1074
double minval = 0;
1075
double maxval = 0;
1076
size_t maxidx = 0, minidx = 0;
1077
1078
for( i = 0; i < nplanes; i++, ++it, startidx += total )
1079
{
1080
const uchar* sptr = planes[0].ptr();
1081
const uchar* mptr = planes[1].ptr();
1082
1083
switch( depth )
1084
{
1085
case CV_8U:
1086
minMaxLoc_((const uchar*)sptr, total, startidx,
1087
&minval, &maxval, &minidx, &maxidx, mptr);
1088
break;
1089
case CV_8S:
1090
minMaxLoc_((const schar*)sptr, total, startidx,
1091
&minval, &maxval, &minidx, &maxidx, mptr);
1092
break;
1093
case CV_16U:
1094
minMaxLoc_((const ushort*)sptr, total, startidx,
1095
&minval, &maxval, &minidx, &maxidx, mptr);
1096
break;
1097
case CV_16S:
1098
minMaxLoc_((const short*)sptr, total, startidx,
1099
&minval, &maxval, &minidx, &maxidx, mptr);
1100
break;
1101
case CV_32S:
1102
minMaxLoc_((const int*)sptr, total, startidx,
1103
&minval, &maxval, &minidx, &maxidx, mptr);
1104
break;
1105
case CV_32F:
1106
minMaxLoc_((const float*)sptr, total, startidx,
1107
&minval, &maxval, &minidx, &maxidx, mptr);
1108
break;
1109
case CV_64F:
1110
minMaxLoc_((const double*)sptr, total, startidx,
1111
&minval, &maxval, &minidx, &maxidx, mptr);
1112
break;
1113
default:
1114
CV_Assert(0);
1115
}
1116
}
1117
1118
if( _maxval )
1119
*_maxval = maxval;
1120
if( _minval )
1121
*_minval = minval;
1122
if( _maxloc )
1123
setpos( src, *_maxloc, maxidx );
1124
if( _minloc )
1125
setpos( src, *_minloc, minidx );
1126
}
1127
1128
1129
static int
1130
normHamming(const uchar* src, size_t total, int cellSize)
1131
{
1132
int result = 0;
1133
int mask = cellSize == 1 ? 1 : cellSize == 2 ? 3 : cellSize == 4 ? 15 : -1;
1134
CV_Assert( mask >= 0 );
1135
1136
for( size_t i = 0; i < total; i++ )
1137
{
1138
unsigned a = src[i];
1139
for( ; a != 0; a >>= cellSize )
1140
result += (a & mask) != 0;
1141
}
1142
return result;
1143
}
1144
1145
1146
template<typename _Tp> static double
1147
norm_(const _Tp* src, size_t total, int cn, int normType, double startval, const uchar* mask)
1148
{
1149
size_t i;
1150
double result = startval;
1151
if( !mask )
1152
total *= cn;
1153
1154
if( normType == NORM_INF )
1155
{
1156
if( !mask )
1157
for( i = 0; i < total; i++ )
1158
result = std::max(result, (double)std::abs(0+src[i]));// trick with 0 used to quiet gcc warning
1159
else
1160
for( int c = 0; c < cn; c++ )
1161
{
1162
for( i = 0; i < total; i++ )
1163
if( mask[i] )
1164
result = std::max(result, (double)std::abs(0+src[i*cn + c]));
1165
}
1166
}
1167
else if( normType == NORM_L1 )
1168
{
1169
if( !mask )
1170
for( i = 0; i < total; i++ )
1171
result += std::abs(0+src[i]);
1172
else
1173
for( int c = 0; c < cn; c++ )
1174
{
1175
for( i = 0; i < total; i++ )
1176
if( mask[i] )
1177
result += std::abs(0+src[i*cn + c]);
1178
}
1179
}
1180
else
1181
{
1182
if( !mask )
1183
for( i = 0; i < total; i++ )
1184
{
1185
double v = src[i];
1186
result += v*v;
1187
}
1188
else
1189
for( int c = 0; c < cn; c++ )
1190
{
1191
for( i = 0; i < total; i++ )
1192
if( mask[i] )
1193
{
1194
double v = src[i*cn + c];
1195
result += v*v;
1196
}
1197
}
1198
}
1199
return result;
1200
}
1201
1202
1203
template<typename _Tp> static double
1204
norm_(const _Tp* src1, const _Tp* src2, size_t total, int cn, int normType, double startval, const uchar* mask)
1205
{
1206
size_t i;
1207
double result = startval;
1208
if( !mask )
1209
total *= cn;
1210
1211
if( normType == NORM_INF )
1212
{
1213
if( !mask )
1214
for( i = 0; i < total; i++ )
1215
result = std::max(result, (double)std::abs(src1[i] - src2[i]));
1216
else
1217
for( int c = 0; c < cn; c++ )
1218
{
1219
for( i = 0; i < total; i++ )
1220
if( mask[i] )
1221
result = std::max(result, (double)std::abs(src1[i*cn + c] - src2[i*cn + c]));
1222
}
1223
}
1224
else if( normType == NORM_L1 )
1225
{
1226
if( !mask )
1227
for( i = 0; i < total; i++ )
1228
result += std::abs(src1[i] - src2[i]);
1229
else
1230
for( int c = 0; c < cn; c++ )
1231
{
1232
for( i = 0; i < total; i++ )
1233
if( mask[i] )
1234
result += std::abs(src1[i*cn + c] - src2[i*cn + c]);
1235
}
1236
}
1237
else
1238
{
1239
if( !mask )
1240
for( i = 0; i < total; i++ )
1241
{
1242
double v = src1[i] - src2[i];
1243
result += v*v;
1244
}
1245
else
1246
for( int c = 0; c < cn; c++ )
1247
{
1248
for( i = 0; i < total; i++ )
1249
if( mask[i] )
1250
{
1251
double v = src1[i*cn + c] - src2[i*cn + c];
1252
result += v*v;
1253
}
1254
}
1255
}
1256
return result;
1257
}
1258
1259
1260
double norm(InputArray _src, int normType, InputArray _mask)
1261
{
1262
Mat src = _src.getMat(), mask = _mask.getMat();
1263
if( src.depth() == CV_16F )
1264
{
1265
Mat src32f;
1266
src.convertTo(src32f, CV_32F);
1267
return cvtest::norm(src32f, normType, _mask);
1268
}
1269
1270
if( normType == NORM_HAMMING || normType == NORM_HAMMING2 )
1271
{
1272
if( !mask.empty() )
1273
{
1274
Mat temp;
1275
bitwise_and(src, mask, temp);
1276
return cvtest::norm(temp, normType, Mat());
1277
}
1278
1279
CV_Assert( src.depth() == CV_8U );
1280
1281
const Mat *arrays[]={&src, 0};
1282
Mat planes[1];
1283
1284
NAryMatIterator it(arrays, planes);
1285
size_t total = planes[0].total();
1286
size_t i, nplanes = it.nplanes;
1287
double result = 0;
1288
int cellSize = normType == NORM_HAMMING ? 1 : 2;
1289
1290
for( i = 0; i < nplanes; i++, ++it )
1291
result += normHamming(planes[0].ptr(), total, cellSize);
1292
return result;
1293
}
1294
int normType0 = normType;
1295
normType = normType == NORM_L2SQR ? NORM_L2 : normType;
1296
1297
CV_Assert( mask.empty() || (src.size == mask.size && mask.type() == CV_8U) );
1298
CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 );
1299
1300
const Mat *arrays[]={&src, &mask, 0};
1301
Mat planes[2];
1302
1303
NAryMatIterator it(arrays, planes);
1304
size_t total = planes[0].total();
1305
size_t i, nplanes = it.nplanes;
1306
int depth = src.depth(), cn = planes[0].channels();
1307
double result = 0;
1308
1309
for( i = 0; i < nplanes; i++, ++it )
1310
{
1311
const uchar* sptr = planes[0].ptr();
1312
const uchar* mptr = planes[1].ptr();
1313
1314
switch( depth )
1315
{
1316
case CV_8U:
1317
result = norm_((const uchar*)sptr, total, cn, normType, result, mptr);
1318
break;
1319
case CV_8S:
1320
result = norm_((const schar*)sptr, total, cn, normType, result, mptr);
1321
break;
1322
case CV_16U:
1323
result = norm_((const ushort*)sptr, total, cn, normType, result, mptr);
1324
break;
1325
case CV_16S:
1326
result = norm_((const short*)sptr, total, cn, normType, result, mptr);
1327
break;
1328
case CV_32S:
1329
result = norm_((const int*)sptr, total, cn, normType, result, mptr);
1330
break;
1331
case CV_32F:
1332
result = norm_((const float*)sptr, total, cn, normType, result, mptr);
1333
break;
1334
case CV_64F:
1335
result = norm_((const double*)sptr, total, cn, normType, result, mptr);
1336
break;
1337
default:
1338
CV_Error(Error::StsUnsupportedFormat, "");
1339
};
1340
}
1341
if( normType0 == NORM_L2 )
1342
result = sqrt(result);
1343
return result;
1344
}
1345
1346
1347
double norm(InputArray _src1, InputArray _src2, int normType, InputArray _mask)
1348
{
1349
Mat src1 = _src1.getMat(), src2 = _src2.getMat(), mask = _mask.getMat();
1350
if( src1.depth() == CV_16F )
1351
{
1352
Mat src1_32f, src2_32f;
1353
src1.convertTo(src1_32f, CV_32F);
1354
src2.convertTo(src2_32f, CV_32F);
1355
return cvtest::norm(src1_32f, src2_32f, normType, _mask);
1356
}
1357
1358
bool isRelative = (normType & NORM_RELATIVE) != 0;
1359
normType &= ~NORM_RELATIVE;
1360
1361
if( normType == NORM_HAMMING || normType == NORM_HAMMING2 )
1362
{
1363
Mat temp;
1364
bitwise_xor(src1, src2, temp);
1365
if( !mask.empty() )
1366
bitwise_and(temp, mask, temp);
1367
1368
CV_Assert( temp.depth() == CV_8U );
1369
1370
const Mat *arrays[]={&temp, 0};
1371
Mat planes[1];
1372
1373
NAryMatIterator it(arrays, planes);
1374
size_t total = planes[0].total();
1375
size_t i, nplanes = it.nplanes;
1376
double result = 0;
1377
int cellSize = normType == NORM_HAMMING ? 1 : 2;
1378
1379
for( i = 0; i < nplanes; i++, ++it )
1380
result += normHamming(planes[0].ptr(), total, cellSize);
1381
return result;
1382
}
1383
int normType0 = normType;
1384
normType = normType == NORM_L2SQR ? NORM_L2 : normType;
1385
1386
CV_Assert( src1.type() == src2.type() && src1.size == src2.size );
1387
CV_Assert( mask.empty() || (src1.size == mask.size && mask.type() == CV_8U) );
1388
CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 );
1389
const Mat *arrays[]={&src1, &src2, &mask, 0};
1390
Mat planes[3];
1391
1392
NAryMatIterator it(arrays, planes);
1393
size_t total = planes[0].total();
1394
size_t i, nplanes = it.nplanes;
1395
int depth = src1.depth(), cn = planes[0].channels();
1396
double result = 0;
1397
1398
for( i = 0; i < nplanes; i++, ++it )
1399
{
1400
const uchar* sptr1 = planes[0].ptr();
1401
const uchar* sptr2 = planes[1].ptr();
1402
const uchar* mptr = planes[2].ptr();
1403
1404
switch( depth )
1405
{
1406
case CV_8U:
1407
result = norm_((const uchar*)sptr1, (const uchar*)sptr2, total, cn, normType, result, mptr);
1408
break;
1409
case CV_8S:
1410
result = norm_((const schar*)sptr1, (const schar*)sptr2, total, cn, normType, result, mptr);
1411
break;
1412
case CV_16U:
1413
result = norm_((const ushort*)sptr1, (const ushort*)sptr2, total, cn, normType, result, mptr);
1414
break;
1415
case CV_16S:
1416
result = norm_((const short*)sptr1, (const short*)sptr2, total, cn, normType, result, mptr);
1417
break;
1418
case CV_32S:
1419
result = norm_((const int*)sptr1, (const int*)sptr2, total, cn, normType, result, mptr);
1420
break;
1421
case CV_32F:
1422
result = norm_((const float*)sptr1, (const float*)sptr2, total, cn, normType, result, mptr);
1423
break;
1424
case CV_64F:
1425
result = norm_((const double*)sptr1, (const double*)sptr2, total, cn, normType, result, mptr);
1426
break;
1427
default:
1428
CV_Error(Error::StsUnsupportedFormat, "");
1429
};
1430
}
1431
if( normType0 == NORM_L2 )
1432
result = sqrt(result);
1433
return isRelative ? result / (cvtest::norm(src2, normType) + DBL_EPSILON) : result;
1434
}
1435
1436
double PSNR(InputArray _src1, InputArray _src2)
1437
{
1438
CV_Assert( _src1.depth() == CV_8U );
1439
double diff = std::sqrt(cvtest::norm(_src1, _src2, NORM_L2SQR)/(_src1.total()*_src1.channels()));
1440
return 20*log10(255./(diff+DBL_EPSILON));
1441
}
1442
1443
template<typename _Tp> static double
1444
crossCorr_(const _Tp* src1, const _Tp* src2, size_t total)
1445
{
1446
double result = 0;
1447
for( size_t i = 0; i < total; i++ )
1448
result += (double)src1[i]*src2[i];
1449
return result;
1450
}
1451
1452
double crossCorr(const Mat& src1, const Mat& src2)
1453
{
1454
CV_Assert( src1.size == src2.size && src1.type() == src2.type() );
1455
const Mat *arrays[]={&src1, &src2, 0};
1456
Mat planes[2];
1457
1458
NAryMatIterator it(arrays, planes);
1459
size_t total = planes[0].total()*planes[0].channels();
1460
size_t i, nplanes = it.nplanes;
1461
int depth = src1.depth();
1462
double result = 0;
1463
1464
for( i = 0; i < nplanes; i++, ++it )
1465
{
1466
const uchar* sptr1 = planes[0].ptr();
1467
const uchar* sptr2 = planes[1].ptr();
1468
1469
switch( depth )
1470
{
1471
case CV_8U:
1472
result += crossCorr_((const uchar*)sptr1, (const uchar*)sptr2, total);
1473
break;
1474
case CV_8S:
1475
result += crossCorr_((const schar*)sptr1, (const schar*)sptr2, total);
1476
break;
1477
case CV_16U:
1478
result += crossCorr_((const ushort*)sptr1, (const ushort*)sptr2, total);
1479
break;
1480
case CV_16S:
1481
result += crossCorr_((const short*)sptr1, (const short*)sptr2, total);
1482
break;
1483
case CV_32S:
1484
result += crossCorr_((const int*)sptr1, (const int*)sptr2, total);
1485
break;
1486
case CV_32F:
1487
result += crossCorr_((const float*)sptr1, (const float*)sptr2, total);
1488
break;
1489
case CV_64F:
1490
result += crossCorr_((const double*)sptr1, (const double*)sptr2, total);
1491
break;
1492
default:
1493
CV_Error(Error::StsUnsupportedFormat, "");
1494
};
1495
}
1496
return result;
1497
}
1498
1499
1500
static void
1501
logicOp_(const uchar* src1, const uchar* src2, uchar* dst, size_t total, char c)
1502
{
1503
size_t i;
1504
if( c == '&' )
1505
for( i = 0; i < total; i++ )
1506
dst[i] = src1[i] & src2[i];
1507
else if( c == '|' )
1508
for( i = 0; i < total; i++ )
1509
dst[i] = src1[i] | src2[i];
1510
else
1511
for( i = 0; i < total; i++ )
1512
dst[i] = src1[i] ^ src2[i];
1513
}
1514
1515
static void
1516
logicOpS_(const uchar* src, const uchar* scalar, uchar* dst, size_t total, char c)
1517
{
1518
const size_t blockSize = 96;
1519
size_t i, j;
1520
if( c == '&' )
1521
for( i = 0; i < total; i += blockSize, dst += blockSize, src += blockSize )
1522
{
1523
size_t sz = MIN(total - i, blockSize);
1524
for( j = 0; j < sz; j++ )
1525
dst[j] = src[j] & scalar[j];
1526
}
1527
else if( c == '|' )
1528
for( i = 0; i < total; i += blockSize, dst += blockSize, src += blockSize )
1529
{
1530
size_t sz = MIN(total - i, blockSize);
1531
for( j = 0; j < sz; j++ )
1532
dst[j] = src[j] | scalar[j];
1533
}
1534
else if( c == '^' )
1535
{
1536
for( i = 0; i < total; i += blockSize, dst += blockSize, src += blockSize )
1537
{
1538
size_t sz = MIN(total - i, blockSize);
1539
for( j = 0; j < sz; j++ )
1540
dst[j] = src[j] ^ scalar[j];
1541
}
1542
}
1543
else
1544
for( i = 0; i < total; i++ )
1545
dst[i] = ~src[i];
1546
}
1547
1548
1549
void logicOp( const Mat& src1, const Mat& src2, Mat& dst, char op )
1550
{
1551
CV_Assert( op == '&' || op == '|' || op == '^' );
1552
CV_Assert( src1.type() == src2.type() && src1.size == src2.size );
1553
dst.create( src1.dims, &src1.size[0], src1.type() );
1554
const Mat *arrays[]={&src1, &src2, &dst, 0};
1555
Mat planes[3];
1556
1557
NAryMatIterator it(arrays, planes);
1558
size_t total = planes[0].total()*planes[0].elemSize();
1559
size_t i, nplanes = it.nplanes;
1560
1561
for( i = 0; i < nplanes; i++, ++it )
1562
{
1563
const uchar* sptr1 = planes[0].ptr();
1564
const uchar* sptr2 = planes[1].ptr();
1565
uchar* dptr = planes[2].ptr();
1566
1567
logicOp_(sptr1, sptr2, dptr, total, op);
1568
}
1569
}
1570
1571
1572
void logicOp(const Mat& src, const Scalar& s, Mat& dst, char op)
1573
{
1574
CV_Assert( op == '&' || op == '|' || op == '^' || op == '~' );
1575
dst.create( src.dims, &src.size[0], src.type() );
1576
const Mat *arrays[]={&src, &dst, 0};
1577
Mat planes[2];
1578
1579
NAryMatIterator it(arrays, planes);
1580
size_t total = planes[0].total()*planes[0].elemSize();
1581
size_t i, nplanes = it.nplanes;
1582
double buf[12];
1583
scalarToRawData(s, buf, src.type(), (int)(96/planes[0].elemSize1()));
1584
1585
for( i = 0; i < nplanes; i++, ++it )
1586
{
1587
const uchar* sptr = planes[0].ptr();
1588
uchar* dptr = planes[1].ptr();
1589
1590
logicOpS_(sptr, (uchar*)&buf[0], dptr, total, op);
1591
}
1592
}
1593
1594
1595
template<typename _Tp> static void
1596
compare_(const _Tp* src1, const _Tp* src2, uchar* dst, size_t total, int cmpop)
1597
{
1598
size_t i;
1599
switch( cmpop )
1600
{
1601
case CMP_LT:
1602
for( i = 0; i < total; i++ )
1603
dst[i] = src1[i] < src2[i] ? 255 : 0;
1604
break;
1605
case CMP_LE:
1606
for( i = 0; i < total; i++ )
1607
dst[i] = src1[i] <= src2[i] ? 255 : 0;
1608
break;
1609
case CMP_EQ:
1610
for( i = 0; i < total; i++ )
1611
dst[i] = src1[i] == src2[i] ? 255 : 0;
1612
break;
1613
case CMP_NE:
1614
for( i = 0; i < total; i++ )
1615
dst[i] = src1[i] != src2[i] ? 255 : 0;
1616
break;
1617
case CMP_GE:
1618
for( i = 0; i < total; i++ )
1619
dst[i] = src1[i] >= src2[i] ? 255 : 0;
1620
break;
1621
case CMP_GT:
1622
for( i = 0; i < total; i++ )
1623
dst[i] = src1[i] > src2[i] ? 255 : 0;
1624
break;
1625
default:
1626
CV_Error(Error::StsBadArg, "Unknown comparison operation");
1627
}
1628
}
1629
1630
1631
template<typename _Tp, typename _WTp> static void
1632
compareS_(const _Tp* src1, _WTp value, uchar* dst, size_t total, int cmpop)
1633
{
1634
size_t i;
1635
switch( cmpop )
1636
{
1637
case CMP_LT:
1638
for( i = 0; i < total; i++ )
1639
dst[i] = src1[i] < value ? 255 : 0;
1640
break;
1641
case CMP_LE:
1642
for( i = 0; i < total; i++ )
1643
dst[i] = src1[i] <= value ? 255 : 0;
1644
break;
1645
case CMP_EQ:
1646
for( i = 0; i < total; i++ )
1647
dst[i] = src1[i] == value ? 255 : 0;
1648
break;
1649
case CMP_NE:
1650
for( i = 0; i < total; i++ )
1651
dst[i] = src1[i] != value ? 255 : 0;
1652
break;
1653
case CMP_GE:
1654
for( i = 0; i < total; i++ )
1655
dst[i] = src1[i] >= value ? 255 : 0;
1656
break;
1657
case CMP_GT:
1658
for( i = 0; i < total; i++ )
1659
dst[i] = src1[i] > value ? 255 : 0;
1660
break;
1661
default:
1662
CV_Error(Error::StsBadArg, "Unknown comparison operation");
1663
}
1664
}
1665
1666
1667
void compare(const Mat& src1, const Mat& src2, Mat& dst, int cmpop)
1668
{
1669
CV_Assert( src1.type() == src2.type() && src1.channels() == 1 && src1.size == src2.size );
1670
dst.create( src1.dims, &src1.size[0], CV_8U );
1671
const Mat *arrays[]={&src1, &src2, &dst, 0};
1672
Mat planes[3];
1673
1674
NAryMatIterator it(arrays, planes);
1675
size_t total = planes[0].total();
1676
size_t i, nplanes = it.nplanes;
1677
int depth = src1.depth();
1678
1679
for( i = 0; i < nplanes; i++, ++it )
1680
{
1681
const uchar* sptr1 = planes[0].ptr();
1682
const uchar* sptr2 = planes[1].ptr();
1683
uchar* dptr = planes[2].ptr();
1684
1685
switch( depth )
1686
{
1687
case CV_8U:
1688
compare_((const uchar*)sptr1, (const uchar*)sptr2, dptr, total, cmpop);
1689
break;
1690
case CV_8S:
1691
compare_((const schar*)sptr1, (const schar*)sptr2, dptr, total, cmpop);
1692
break;
1693
case CV_16U:
1694
compare_((const ushort*)sptr1, (const ushort*)sptr2, dptr, total, cmpop);
1695
break;
1696
case CV_16S:
1697
compare_((const short*)sptr1, (const short*)sptr2, dptr, total, cmpop);
1698
break;
1699
case CV_32S:
1700
compare_((const int*)sptr1, (const int*)sptr2, dptr, total, cmpop);
1701
break;
1702
case CV_32F:
1703
compare_((const float*)sptr1, (const float*)sptr2, dptr, total, cmpop);
1704
break;
1705
case CV_64F:
1706
compare_((const double*)sptr1, (const double*)sptr2, dptr, total, cmpop);
1707
break;
1708
default:
1709
CV_Error(Error::StsUnsupportedFormat, "");
1710
}
1711
}
1712
}
1713
1714
void compare(const Mat& src, double value, Mat& dst, int cmpop)
1715
{
1716
CV_Assert( src.channels() == 1 );
1717
dst.create( src.dims, &src.size[0], CV_8U );
1718
const Mat *arrays[]={&src, &dst, 0};
1719
Mat planes[2];
1720
1721
NAryMatIterator it(arrays, planes);
1722
size_t total = planes[0].total();
1723
size_t i, nplanes = it.nplanes;
1724
int depth = src.depth();
1725
int ivalue = saturate_cast<int>(value);
1726
1727
for( i = 0; i < nplanes; i++, ++it )
1728
{
1729
const uchar* sptr = planes[0].ptr();
1730
uchar* dptr = planes[1].ptr();
1731
1732
switch( depth )
1733
{
1734
case CV_8U:
1735
compareS_((const uchar*)sptr, ivalue, dptr, total, cmpop);
1736
break;
1737
case CV_8S:
1738
compareS_((const schar*)sptr, ivalue, dptr, total, cmpop);
1739
break;
1740
case CV_16U:
1741
compareS_((const ushort*)sptr, ivalue, dptr, total, cmpop);
1742
break;
1743
case CV_16S:
1744
compareS_((const short*)sptr, ivalue, dptr, total, cmpop);
1745
break;
1746
case CV_32S:
1747
compareS_((const int*)sptr, ivalue, dptr, total, cmpop);
1748
break;
1749
case CV_32F:
1750
compareS_((const float*)sptr, value, dptr, total, cmpop);
1751
break;
1752
case CV_64F:
1753
compareS_((const double*)sptr, value, dptr, total, cmpop);
1754
break;
1755
default:
1756
CV_Error(Error::StsUnsupportedFormat, "");
1757
}
1758
}
1759
}
1760
1761
1762
template<typename _Tp> double
1763
cmpUlpsInt_(const _Tp* src1, const _Tp* src2, size_t total, int imaxdiff,
1764
size_t startidx, size_t& idx)
1765
{
1766
size_t i;
1767
int realmaxdiff = 0;
1768
for( i = 0; i < total; i++ )
1769
{
1770
int diff = std::abs(src1[i] - src2[i]);
1771
if( realmaxdiff < diff )
1772
{
1773
realmaxdiff = diff;
1774
if( diff > imaxdiff && idx == 0 )
1775
idx = i + startidx;
1776
}
1777
}
1778
return realmaxdiff;
1779
}
1780
1781
1782
template<> double cmpUlpsInt_<int>(const int* src1, const int* src2,
1783
size_t total, int imaxdiff,
1784
size_t startidx, size_t& idx)
1785
{
1786
size_t i;
1787
double realmaxdiff = 0;
1788
for( i = 0; i < total; i++ )
1789
{
1790
double diff = fabs((double)src1[i] - (double)src2[i]);
1791
if( realmaxdiff < diff )
1792
{
1793
realmaxdiff = diff;
1794
if( diff > imaxdiff && idx == 0 )
1795
idx = i + startidx;
1796
}
1797
}
1798
return realmaxdiff;
1799
}
1800
1801
1802
static double
1803
cmpUlpsFlt_(const int* src1, const int* src2, size_t total, int imaxdiff, size_t startidx, size_t& idx)
1804
{
1805
const int C = 0x7fffffff;
1806
int realmaxdiff = 0;
1807
size_t i;
1808
for( i = 0; i < total; i++ )
1809
{
1810
int a = src1[i], b = src2[i];
1811
if( a < 0 ) a ^= C;
1812
if( b < 0 ) b ^= C;
1813
int diff = std::abs(a - b);
1814
if( realmaxdiff < diff )
1815
{
1816
realmaxdiff = diff;
1817
if( diff > imaxdiff && idx == 0 )
1818
idx = i + startidx;
1819
}
1820
}
1821
return realmaxdiff;
1822
}
1823
1824
1825
static double
1826
cmpUlpsFlt_(const int64* src1, const int64* src2, size_t total, int imaxdiff, size_t startidx, size_t& idx)
1827
{
1828
const int64 C = CV_BIG_INT(0x7fffffffffffffff);
1829
double realmaxdiff = 0;
1830
size_t i;
1831
for( i = 0; i < total; i++ )
1832
{
1833
int64 a = src1[i], b = src2[i];
1834
if( a < 0 ) a ^= C;
1835
if( b < 0 ) b ^= C;
1836
double diff = fabs((double)a - (double)b);
1837
if( realmaxdiff < diff )
1838
{
1839
realmaxdiff = diff;
1840
if( diff > imaxdiff && idx == 0 )
1841
idx = i + startidx;
1842
}
1843
}
1844
return realmaxdiff;
1845
}
1846
1847
bool cmpUlps(const Mat& src1, const Mat& src2, int imaxDiff, double* _realmaxdiff, vector<int>* loc)
1848
{
1849
CV_Assert( src1.type() == src2.type() && src1.size == src2.size );
1850
const Mat *arrays[]={&src1, &src2, 0};
1851
Mat planes[2];
1852
NAryMatIterator it(arrays, planes);
1853
size_t total = planes[0].total()*planes[0].channels();
1854
size_t i, nplanes = it.nplanes;
1855
int depth = src1.depth();
1856
size_t startidx = 1, idx = 0;
1857
if(_realmaxdiff)
1858
*_realmaxdiff = 0;
1859
1860
for( i = 0; i < nplanes; i++, ++it, startidx += total )
1861
{
1862
const uchar* sptr1 = planes[0].ptr();
1863
const uchar* sptr2 = planes[1].ptr();
1864
double realmaxdiff = 0;
1865
1866
switch( depth )
1867
{
1868
case CV_8U:
1869
realmaxdiff = cmpUlpsInt_((const uchar*)sptr1, (const uchar*)sptr2, total, imaxDiff, startidx, idx);
1870
break;
1871
case CV_8S:
1872
realmaxdiff = cmpUlpsInt_((const schar*)sptr1, (const schar*)sptr2, total, imaxDiff, startidx, idx);
1873
break;
1874
case CV_16U:
1875
realmaxdiff = cmpUlpsInt_((const ushort*)sptr1, (const ushort*)sptr2, total, imaxDiff, startidx, idx);
1876
break;
1877
case CV_16S:
1878
realmaxdiff = cmpUlpsInt_((const short*)sptr1, (const short*)sptr2, total, imaxDiff, startidx, idx);
1879
break;
1880
case CV_32S:
1881
realmaxdiff = cmpUlpsInt_((const int*)sptr1, (const int*)sptr2, total, imaxDiff, startidx, idx);
1882
break;
1883
case CV_32F:
1884
realmaxdiff = cmpUlpsFlt_((const int*)sptr1, (const int*)sptr2, total, imaxDiff, startidx, idx);
1885
break;
1886
case CV_64F:
1887
realmaxdiff = cmpUlpsFlt_((const int64*)sptr1, (const int64*)sptr2, total, imaxDiff, startidx, idx);
1888
break;
1889
default:
1890
CV_Error(Error::StsUnsupportedFormat, "");
1891
}
1892
1893
if(_realmaxdiff)
1894
*_realmaxdiff = std::max(*_realmaxdiff, realmaxdiff);
1895
}
1896
if(idx > 0 && loc)
1897
setpos(src1, *loc, idx);
1898
return idx == 0;
1899
}
1900
1901
1902
template<typename _Tp> static void
1903
checkInt_(const _Tp* a, size_t total, int imin, int imax, size_t startidx, size_t& idx)
1904
{
1905
for( size_t i = 0; i < total; i++ )
1906
{
1907
int val = a[i];
1908
if( val < imin || val > imax )
1909
{
1910
idx = i + startidx;
1911
break;
1912
}
1913
}
1914
}
1915
1916
1917
template<typename _Tp> static void
1918
checkFlt_(const _Tp* a, size_t total, double fmin, double fmax, size_t startidx, size_t& idx)
1919
{
1920
for( size_t i = 0; i < total; i++ )
1921
{
1922
double val = a[i];
1923
if( cvIsNaN(val) || cvIsInf(val) || val < fmin || val > fmax )
1924
{
1925
idx = i + startidx;
1926
break;
1927
}
1928
}
1929
}
1930
1931
1932
// checks that the array does not have NaNs and/or Infs and all the elements are
1933
// within [min_val,max_val). idx is the index of the first "bad" element.
1934
int check( const Mat& a, double fmin, double fmax, vector<int>* _idx )
1935
{
1936
const Mat *arrays[]={&a, 0};
1937
Mat plane;
1938
NAryMatIterator it(arrays, &plane);
1939
size_t total = plane.total()*plane.channels();
1940
size_t i, nplanes = it.nplanes;
1941
int depth = a.depth();
1942
size_t startidx = 1, idx = 0;
1943
int imin = 0, imax = 0;
1944
1945
if( depth <= CV_32S )
1946
{
1947
imin = cvCeil(fmin);
1948
imax = cvFloor(fmax);
1949
}
1950
1951
for( i = 0; i < nplanes; i++, ++it, startidx += total )
1952
{
1953
const uchar* aptr = plane.ptr();
1954
1955
switch( depth )
1956
{
1957
case CV_8U:
1958
checkInt_((const uchar*)aptr, total, imin, imax, startidx, idx);
1959
break;
1960
case CV_8S:
1961
checkInt_((const schar*)aptr, total, imin, imax, startidx, idx);
1962
break;
1963
case CV_16U:
1964
checkInt_((const ushort*)aptr, total, imin, imax, startidx, idx);
1965
break;
1966
case CV_16S:
1967
checkInt_((const short*)aptr, total, imin, imax, startidx, idx);
1968
break;
1969
case CV_32S:
1970
checkInt_((const int*)aptr, total, imin, imax, startidx, idx);
1971
break;
1972
case CV_32F:
1973
checkFlt_((const float*)aptr, total, fmin, fmax, startidx, idx);
1974
break;
1975
case CV_64F:
1976
checkFlt_((const double*)aptr, total, fmin, fmax, startidx, idx);
1977
break;
1978
default:
1979
CV_Error(Error::StsUnsupportedFormat, "");
1980
}
1981
1982
if( idx != 0 )
1983
break;
1984
}
1985
1986
if(idx != 0 && _idx)
1987
setpos(a, *_idx, idx);
1988
return idx == 0 ? 0 : -1;
1989
}
1990
1991
#define CMP_EPS_OK 0
1992
#define CMP_EPS_BIG_DIFF -1
1993
#define CMP_EPS_INVALID_TEST_DATA -2 // there is NaN or Inf value in test data
1994
#define CMP_EPS_INVALID_REF_DATA -3 // there is NaN or Inf value in reference data
1995
1996
// compares two arrays. max_diff is the maximum actual difference,
1997
// success_err_level is maximum allowed difference, idx is the index of the first
1998
// element for which difference is >success_err_level
1999
// (or index of element with the maximum difference)
2000
int cmpEps( const Mat& arr_, const Mat& refarr_, double* _realmaxdiff,
2001
double success_err_level, vector<int>* _idx,
2002
bool element_wise_relative_error )
2003
{
2004
Mat arr = arr_, refarr = refarr_;
2005
CV_Assert( arr.type() == refarr.type() && arr.size == refarr.size );
2006
if( arr.depth() == CV_16F )
2007
{
2008
Mat arr32f, refarr32f;
2009
arr.convertTo(arr32f, CV_32F);
2010
refarr.convertTo(refarr32f, CV_32F);
2011
arr = arr32f;
2012
refarr = refarr32f;
2013
}
2014
2015
int ilevel = refarr.depth() <= CV_32S ? cvFloor(success_err_level) : 0;
2016
int result = CMP_EPS_OK;
2017
2018
const Mat *arrays[]={&arr, &refarr, 0};
2019
Mat planes[2];
2020
NAryMatIterator it(arrays, planes);
2021
size_t total = planes[0].total()*planes[0].channels(), j = total;
2022
size_t i, nplanes = it.nplanes;
2023
int depth = arr.depth();
2024
size_t startidx = 1, idx = 0;
2025
double realmaxdiff = 0, maxval = 0;
2026
2027
if(_realmaxdiff)
2028
*_realmaxdiff = 0;
2029
2030
if( refarr.depth() >= CV_32F && !element_wise_relative_error )
2031
{
2032
maxval = cvtest::norm( refarr, NORM_INF );
2033
maxval = MAX(maxval, 1.);
2034
}
2035
2036
for( i = 0; i < nplanes; i++, ++it, startidx += total )
2037
{
2038
const uchar* sptr1 = planes[0].ptr();
2039
const uchar* sptr2 = planes[1].ptr();
2040
2041
switch( depth )
2042
{
2043
case CV_8U:
2044
realmaxdiff = cmpUlpsInt_((const uchar*)sptr1, (const uchar*)sptr2, total, ilevel, startidx, idx);
2045
break;
2046
case CV_8S:
2047
realmaxdiff = cmpUlpsInt_((const schar*)sptr1, (const schar*)sptr2, total, ilevel, startidx, idx);
2048
break;
2049
case CV_16U:
2050
realmaxdiff = cmpUlpsInt_((const ushort*)sptr1, (const ushort*)sptr2, total, ilevel, startidx, idx);
2051
break;
2052
case CV_16S:
2053
realmaxdiff = cmpUlpsInt_((const short*)sptr1, (const short*)sptr2, total, ilevel, startidx, idx);
2054
break;
2055
case CV_32S:
2056
realmaxdiff = cmpUlpsInt_((const int*)sptr1, (const int*)sptr2, total, ilevel, startidx, idx);
2057
break;
2058
case CV_32F:
2059
for( j = 0; j < total; j++ )
2060
{
2061
double a_val = ((float*)sptr1)[j];
2062
double b_val = ((float*)sptr2)[j];
2063
double threshold;
2064
if( ((int*)sptr1)[j] == ((int*)sptr2)[j] )
2065
continue;
2066
if( cvIsNaN(a_val) || cvIsInf(a_val) )
2067
{
2068
result = CMP_EPS_INVALID_TEST_DATA;
2069
idx = startidx + j;
2070
break;
2071
}
2072
if( cvIsNaN(b_val) || cvIsInf(b_val) )
2073
{
2074
result = CMP_EPS_INVALID_REF_DATA;
2075
idx = startidx + j;
2076
break;
2077
}
2078
a_val = fabs(a_val - b_val);
2079
threshold = element_wise_relative_error ? fabs(b_val) + 1 : maxval;
2080
if( a_val > threshold*success_err_level )
2081
{
2082
realmaxdiff = a_val/threshold;
2083
if( idx == 0 )
2084
idx = startidx + j;
2085
break;
2086
}
2087
}
2088
break;
2089
case CV_64F:
2090
for( j = 0; j < total; j++ )
2091
{
2092
double a_val = ((double*)sptr1)[j];
2093
double b_val = ((double*)sptr2)[j];
2094
double threshold;
2095
if( ((int64*)sptr1)[j] == ((int64*)sptr2)[j] )
2096
continue;
2097
if( cvIsNaN(a_val) || cvIsInf(a_val) )
2098
{
2099
result = CMP_EPS_INVALID_TEST_DATA;
2100
idx = startidx + j;
2101
break;
2102
}
2103
if( cvIsNaN(b_val) || cvIsInf(b_val) )
2104
{
2105
result = CMP_EPS_INVALID_REF_DATA;
2106
idx = startidx + j;
2107
break;
2108
}
2109
a_val = fabs(a_val - b_val);
2110
threshold = element_wise_relative_error ? fabs(b_val) + 1 : maxval;
2111
if( a_val > threshold*success_err_level )
2112
{
2113
realmaxdiff = a_val/threshold;
2114
idx = startidx + j;
2115
break;
2116
}
2117
}
2118
break;
2119
default:
2120
assert(0);
2121
return CMP_EPS_BIG_DIFF;
2122
}
2123
if(_realmaxdiff)
2124
*_realmaxdiff = MAX(*_realmaxdiff, realmaxdiff);
2125
if( idx != 0 )
2126
break;
2127
}
2128
2129
if( result == 0 && idx != 0 )
2130
result = CMP_EPS_BIG_DIFF;
2131
2132
if( result < -1 && _realmaxdiff )
2133
*_realmaxdiff = exp(1000.);
2134
if(idx > 0 && _idx)
2135
setpos(arr, *_idx, idx);
2136
2137
return result;
2138
}
2139
2140
2141
int cmpEps2( TS* ts, const Mat& a, const Mat& b, double success_err_level,
2142
bool element_wise_relative_error, const char* desc )
2143
{
2144
char msg[100];
2145
double diff = 0;
2146
vector<int> idx;
2147
int code = cmpEps( a, b, &diff, success_err_level, &idx, element_wise_relative_error );
2148
2149
switch( code )
2150
{
2151
case CMP_EPS_BIG_DIFF:
2152
sprintf( msg, "%s: Too big difference (=%g > %g)", desc, diff, success_err_level );
2153
code = TS::FAIL_BAD_ACCURACY;
2154
break;
2155
case CMP_EPS_INVALID_TEST_DATA:
2156
sprintf( msg, "%s: Invalid output", desc );
2157
code = TS::FAIL_INVALID_OUTPUT;
2158
break;
2159
case CMP_EPS_INVALID_REF_DATA:
2160
sprintf( msg, "%s: Invalid reference output", desc );
2161
code = TS::FAIL_INVALID_OUTPUT;
2162
break;
2163
default:
2164
;
2165
}
2166
2167
if( code < 0 )
2168
{
2169
if( a.total() == 1 )
2170
{
2171
ts->printf( TS::LOG, "%s\n", msg );
2172
}
2173
else if( a.dims == 2 && (a.rows == 1 || a.cols == 1) )
2174
{
2175
ts->printf( TS::LOG, "%s at element %d\n", msg, idx[0] + idx[1] );
2176
}
2177
else
2178
{
2179
string idxstr = vec2str(", ", &idx[0], idx.size());
2180
ts->printf( TS::LOG, "%s at (%s)\n", msg, idxstr.c_str() );
2181
}
2182
}
2183
2184
return code;
2185
}
2186
2187
2188
int cmpEps2_64f( TS* ts, const double* val, const double* refval, int len,
2189
double eps, const char* param_name )
2190
{
2191
Mat _val(1, len, CV_64F, (void*)val);
2192
Mat _refval(1, len, CV_64F, (void*)refval);
2193
2194
return cmpEps2( ts, _val, _refval, eps, true, param_name );
2195
}
2196
2197
2198
template<typename _Tp> static void
2199
GEMM_(const _Tp* a_data0, int a_step, int a_delta,
2200
const _Tp* b_data0, int b_step, int b_delta,
2201
const _Tp* c_data0, int c_step, int c_delta,
2202
_Tp* d_data, int d_step,
2203
int d_rows, int d_cols, int a_cols, int cn,
2204
double alpha, double beta)
2205
{
2206
for( int i = 0; i < d_rows; i++, d_data += d_step, c_data0 += c_step, a_data0 += a_step )
2207
{
2208
for( int j = 0; j < d_cols; j++ )
2209
{
2210
const _Tp* a_data = a_data0;
2211
const _Tp* b_data = b_data0 + j*b_delta;
2212
const _Tp* c_data = c_data0 + j*c_delta;
2213
2214
if( cn == 1 )
2215
{
2216
double s = 0;
2217
for( int k = 0; k < a_cols; k++ )
2218
{
2219
s += ((double)a_data[0])*b_data[0];
2220
a_data += a_delta;
2221
b_data += b_step;
2222
}
2223
d_data[j] = (_Tp)(s*alpha + (c_data ? c_data[0]*beta : 0));
2224
}
2225
else
2226
{
2227
double s_re = 0, s_im = 0;
2228
2229
for( int k = 0; k < a_cols; k++ )
2230
{
2231
s_re += ((double)a_data[0])*b_data[0] - ((double)a_data[1])*b_data[1];
2232
s_im += ((double)a_data[0])*b_data[1] + ((double)a_data[1])*b_data[0];
2233
a_data += a_delta;
2234
b_data += b_step;
2235
}
2236
2237
s_re *= alpha;
2238
s_im *= alpha;
2239
2240
if( c_data )
2241
{
2242
s_re += c_data[0]*beta;
2243
s_im += c_data[1]*beta;
2244
}
2245
2246
d_data[j*2] = (_Tp)s_re;
2247
d_data[j*2+1] = (_Tp)s_im;
2248
}
2249
}
2250
}
2251
}
2252
2253
2254
void gemm( const Mat& _a, const Mat& _b, double alpha,
2255
const Mat& _c, double beta, Mat& d, int flags )
2256
{
2257
Mat a = _a, b = _b, c = _c;
2258
2259
if( a.data == d.data )
2260
a = a.clone();
2261
2262
if( b.data == d.data )
2263
b = b.clone();
2264
2265
if( !c.empty() && c.data == d.data && (flags & cv::GEMM_3_T) )
2266
c = c.clone();
2267
2268
int a_rows = a.rows, a_cols = a.cols, b_rows = b.rows, b_cols = b.cols;
2269
int cn = a.channels();
2270
int a_step = (int)a.step1(), a_delta = cn;
2271
int b_step = (int)b.step1(), b_delta = cn;
2272
int c_rows = 0, c_cols = 0, c_step = 0, c_delta = 0;
2273
2274
CV_Assert( a.type() == b.type() && a.dims == 2 && b.dims == 2 && cn <= 2 );
2275
2276
if( flags & cv::GEMM_1_T )
2277
{
2278
std::swap( a_rows, a_cols );
2279
std::swap( a_step, a_delta );
2280
}
2281
2282
if( flags & cv::GEMM_2_T )
2283
{
2284
std::swap( b_rows, b_cols );
2285
std::swap( b_step, b_delta );
2286
}
2287
2288
if( !c.empty() )
2289
{
2290
c_rows = c.rows;
2291
c_cols = c.cols;
2292
c_step = (int)c.step1();
2293
c_delta = cn;
2294
2295
if( flags & cv::GEMM_3_T )
2296
{
2297
std::swap( c_rows, c_cols );
2298
std::swap( c_step, c_delta );
2299
}
2300
2301
CV_Assert( c.dims == 2 && c.type() == a.type() && c_rows == a_rows && c_cols == b_cols );
2302
}
2303
2304
d.create(a_rows, b_cols, a.type());
2305
2306
if( a.depth() == CV_32F )
2307
GEMM_(a.ptr<float>(), a_step, a_delta, b.ptr<float>(), b_step, b_delta,
2308
!c.empty() ? c.ptr<float>() : 0, c_step, c_delta, d.ptr<float>(),
2309
(int)d.step1(), a_rows, b_cols, a_cols, cn, alpha, beta );
2310
else
2311
GEMM_(a.ptr<double>(), a_step, a_delta, b.ptr<double>(), b_step, b_delta,
2312
!c.empty() ? c.ptr<double>() : 0, c_step, c_delta, d.ptr<double>(),
2313
(int)d.step1(), a_rows, b_cols, a_cols, cn, alpha, beta );
2314
}
2315
2316
2317
template<typename _Tp> static void
2318
transform_(const _Tp* sptr, _Tp* dptr, size_t total, int scn, int dcn, const double* mat)
2319
{
2320
for( size_t i = 0; i < total; i++, sptr += scn, dptr += dcn )
2321
{
2322
for( int j = 0; j < dcn; j++ )
2323
{
2324
double s = mat[j*(scn + 1) + scn];
2325
for( int k = 0; k < scn; k++ )
2326
s += mat[j*(scn + 1) + k]*sptr[k];
2327
dptr[j] = saturate_cast<_Tp>(s);
2328
}
2329
}
2330
}
2331
2332
2333
void transform( const Mat& src, Mat& dst, const Mat& transmat, const Mat& _shift )
2334
{
2335
double mat[20];
2336
2337
int scn = src.channels();
2338
int dcn = dst.channels();
2339
int depth = src.depth();
2340
int mattype = transmat.depth();
2341
Mat shift = _shift.reshape(1, 0);
2342
bool haveShift = !shift.empty();
2343
2344
CV_Assert( scn <= 4 && dcn <= 4 &&
2345
(mattype == CV_32F || mattype == CV_64F) &&
2346
(!haveShift || (shift.type() == mattype && (shift.rows == 1 || shift.cols == 1))) );
2347
2348
// prepare cn x (cn + 1) transform matrix
2349
if( mattype == CV_32F )
2350
{
2351
for( int i = 0; i < transmat.rows; i++ )
2352
{
2353
mat[i*(scn+1)+scn] = 0.;
2354
for( int j = 0; j < transmat.cols; j++ )
2355
mat[i*(scn+1)+j] = transmat.at<float>(i,j);
2356
if( haveShift )
2357
mat[i*(scn+1)+scn] = shift.at<float>(i);
2358
}
2359
}
2360
else
2361
{
2362
for( int i = 0; i < transmat.rows; i++ )
2363
{
2364
mat[i*(scn+1)+scn] = 0.;
2365
for( int j = 0; j < transmat.cols; j++ )
2366
mat[i*(scn+1)+j] = transmat.at<double>(i,j);
2367
if( haveShift )
2368
mat[i*(scn+1)+scn] = shift.at<double>(i);
2369
}
2370
}
2371
2372
const Mat *arrays[]={&src, &dst, 0};
2373
Mat planes[2];
2374
NAryMatIterator it(arrays, planes);
2375
size_t total = planes[0].total();
2376
size_t i, nplanes = it.nplanes;
2377
2378
for( i = 0; i < nplanes; i++, ++it )
2379
{
2380
const uchar* sptr = planes[0].ptr();
2381
uchar* dptr = planes[1].ptr();
2382
2383
switch( depth )
2384
{
2385
case CV_8U:
2386
transform_((const uchar*)sptr, (uchar*)dptr, total, scn, dcn, mat);
2387
break;
2388
case CV_8S:
2389
transform_((const schar*)sptr, (schar*)dptr, total, scn, dcn, mat);
2390
break;
2391
case CV_16U:
2392
transform_((const ushort*)sptr, (ushort*)dptr, total, scn, dcn, mat);
2393
break;
2394
case CV_16S:
2395
transform_((const short*)sptr, (short*)dptr, total, scn, dcn, mat);
2396
break;
2397
case CV_32S:
2398
transform_((const int*)sptr, (int*)dptr, total, scn, dcn, mat);
2399
break;
2400
case CV_32F:
2401
transform_((const float*)sptr, (float*)dptr, total, scn, dcn, mat);
2402
break;
2403
case CV_64F:
2404
transform_((const double*)sptr, (double*)dptr, total, scn, dcn, mat);
2405
break;
2406
default:
2407
CV_Error(Error::StsUnsupportedFormat, "");
2408
}
2409
}
2410
}
2411
2412
template<typename _Tp> static void
2413
minmax_(const _Tp* src1, const _Tp* src2, _Tp* dst, size_t total, char op)
2414
{
2415
if( op == 'M' )
2416
for( size_t i = 0; i < total; i++ )
2417
dst[i] = std::max(src1[i], src2[i]);
2418
else
2419
for( size_t i = 0; i < total; i++ )
2420
dst[i] = std::min(src1[i], src2[i]);
2421
}
2422
2423
static void minmax(const Mat& src1, const Mat& src2, Mat& dst, char op)
2424
{
2425
dst.create(src1.dims, src1.size, src1.type());
2426
CV_Assert( src1.type() == src2.type() && src1.size == src2.size );
2427
const Mat *arrays[]={&src1, &src2, &dst, 0};
2428
Mat planes[3];
2429
2430
NAryMatIterator it(arrays, planes);
2431
size_t total = planes[0].total()*planes[0].channels();
2432
size_t i, nplanes = it.nplanes, depth = src1.depth();
2433
2434
for( i = 0; i < nplanes; i++, ++it )
2435
{
2436
const uchar* sptr1 = planes[0].ptr();
2437
const uchar* sptr2 = planes[1].ptr();
2438
uchar* dptr = planes[2].ptr();
2439
2440
switch( depth )
2441
{
2442
case CV_8U:
2443
minmax_((const uchar*)sptr1, (const uchar*)sptr2, (uchar*)dptr, total, op);
2444
break;
2445
case CV_8S:
2446
minmax_((const schar*)sptr1, (const schar*)sptr2, (schar*)dptr, total, op);
2447
break;
2448
case CV_16U:
2449
minmax_((const ushort*)sptr1, (const ushort*)sptr2, (ushort*)dptr, total, op);
2450
break;
2451
case CV_16S:
2452
minmax_((const short*)sptr1, (const short*)sptr2, (short*)dptr, total, op);
2453
break;
2454
case CV_32S:
2455
minmax_((const int*)sptr1, (const int*)sptr2, (int*)dptr, total, op);
2456
break;
2457
case CV_32F:
2458
minmax_((const float*)sptr1, (const float*)sptr2, (float*)dptr, total, op);
2459
break;
2460
case CV_64F:
2461
minmax_((const double*)sptr1, (const double*)sptr2, (double*)dptr, total, op);
2462
break;
2463
default:
2464
CV_Error(Error::StsUnsupportedFormat, "");
2465
}
2466
}
2467
}
2468
2469
2470
void min(const Mat& src1, const Mat& src2, Mat& dst)
2471
{
2472
minmax( src1, src2, dst, 'm' );
2473
}
2474
2475
void max(const Mat& src1, const Mat& src2, Mat& dst)
2476
{
2477
minmax( src1, src2, dst, 'M' );
2478
}
2479
2480
2481
template<typename _Tp> static void
2482
minmax_(const _Tp* src1, _Tp val, _Tp* dst, size_t total, char op)
2483
{
2484
if( op == 'M' )
2485
for( size_t i = 0; i < total; i++ )
2486
dst[i] = std::max(src1[i], val);
2487
else
2488
for( size_t i = 0; i < total; i++ )
2489
dst[i] = std::min(src1[i], val);
2490
}
2491
2492
static void minmax(const Mat& src1, double val, Mat& dst, char op)
2493
{
2494
dst.create(src1.dims, src1.size, src1.type());
2495
const Mat *arrays[]={&src1, &dst, 0};
2496
Mat planes[2];
2497
2498
NAryMatIterator it(arrays, planes);
2499
size_t total = planes[0].total()*planes[0].channels();
2500
size_t i, nplanes = it.nplanes, depth = src1.depth();
2501
int ival = saturate_cast<int>(val);
2502
2503
for( i = 0; i < nplanes; i++, ++it )
2504
{
2505
const uchar* sptr1 = planes[0].ptr();
2506
uchar* dptr = planes[1].ptr();
2507
2508
switch( depth )
2509
{
2510
case CV_8U:
2511
minmax_((const uchar*)sptr1, saturate_cast<uchar>(ival), (uchar*)dptr, total, op);
2512
break;
2513
case CV_8S:
2514
minmax_((const schar*)sptr1, saturate_cast<schar>(ival), (schar*)dptr, total, op);
2515
break;
2516
case CV_16U:
2517
minmax_((const ushort*)sptr1, saturate_cast<ushort>(ival), (ushort*)dptr, total, op);
2518
break;
2519
case CV_16S:
2520
minmax_((const short*)sptr1, saturate_cast<short>(ival), (short*)dptr, total, op);
2521
break;
2522
case CV_32S:
2523
minmax_((const int*)sptr1, saturate_cast<int>(ival), (int*)dptr, total, op);
2524
break;
2525
case CV_32F:
2526
minmax_((const float*)sptr1, saturate_cast<float>(val), (float*)dptr, total, op);
2527
break;
2528
case CV_64F:
2529
minmax_((const double*)sptr1, saturate_cast<double>(val), (double*)dptr, total, op);
2530
break;
2531
default:
2532
CV_Error(Error::StsUnsupportedFormat, "");
2533
}
2534
}
2535
}
2536
2537
2538
void min(const Mat& src1, double val, Mat& dst)
2539
{
2540
minmax( src1, val, dst, 'm' );
2541
}
2542
2543
void max(const Mat& src1, double val, Mat& dst)
2544
{
2545
minmax( src1, val, dst, 'M' );
2546
}
2547
2548
2549
template<typename _Tp> static void
2550
muldiv_(const _Tp* src1, const _Tp* src2, _Tp* dst, size_t total, double scale, char op)
2551
{
2552
if( op == '*' )
2553
for( size_t i = 0; i < total; i++ )
2554
dst[i] = saturate_cast<_Tp>((scale*src1[i])*src2[i]);
2555
else if( src1 )
2556
for( size_t i = 0; i < total; i++ )
2557
dst[i] = src2[i] ? saturate_cast<_Tp>((scale*src1[i])/src2[i]) : 0;
2558
else
2559
for( size_t i = 0; i < total; i++ )
2560
dst[i] = src2[i] ? saturate_cast<_Tp>(scale/src2[i]) : 0;
2561
}
2562
2563
static void muldiv(const Mat& src1, const Mat& src2, Mat& dst, double scale, char op)
2564
{
2565
dst.create(src2.dims, src2.size, src2.type());
2566
CV_Assert( src1.empty() || (src1.type() == src2.type() && src1.size == src2.size) );
2567
const Mat *arrays[]={&src1, &src2, &dst, 0};
2568
Mat planes[3];
2569
2570
NAryMatIterator it(arrays, planes);
2571
size_t total = planes[1].total()*planes[1].channels();
2572
size_t i, nplanes = it.nplanes, depth = src2.depth();
2573
2574
for( i = 0; i < nplanes; i++, ++it )
2575
{
2576
const uchar* sptr1 = planes[0].ptr();
2577
const uchar* sptr2 = planes[1].ptr();
2578
uchar* dptr = planes[2].ptr();
2579
2580
switch( depth )
2581
{
2582
case CV_8U:
2583
muldiv_((const uchar*)sptr1, (const uchar*)sptr2, (uchar*)dptr, total, scale, op);
2584
break;
2585
case CV_8S:
2586
muldiv_((const schar*)sptr1, (const schar*)sptr2, (schar*)dptr, total, scale, op);
2587
break;
2588
case CV_16U:
2589
muldiv_((const ushort*)sptr1, (const ushort*)sptr2, (ushort*)dptr, total, scale, op);
2590
break;
2591
case CV_16S:
2592
muldiv_((const short*)sptr1, (const short*)sptr2, (short*)dptr, total, scale, op);
2593
break;
2594
case CV_32S:
2595
muldiv_((const int*)sptr1, (const int*)sptr2, (int*)dptr, total, scale, op);
2596
break;
2597
case CV_32F:
2598
muldiv_((const float*)sptr1, (const float*)sptr2, (float*)dptr, total, scale, op);
2599
break;
2600
case CV_64F:
2601
muldiv_((const double*)sptr1, (const double*)sptr2, (double*)dptr, total, scale, op);
2602
break;
2603
default:
2604
CV_Error(Error::StsUnsupportedFormat, "");
2605
}
2606
}
2607
}
2608
2609
2610
void multiply(const Mat& src1, const Mat& src2, Mat& dst, double scale)
2611
{
2612
muldiv( src1, src2, dst, scale, '*' );
2613
}
2614
2615
void divide(const Mat& src1, const Mat& src2, Mat& dst, double scale)
2616
{
2617
muldiv( src1, src2, dst, scale, '/' );
2618
}
2619
2620
2621
template<typename _Tp> static void
2622
mean_(const _Tp* src, const uchar* mask, size_t total, int cn, Scalar& sum, int& nz)
2623
{
2624
if( !mask )
2625
{
2626
nz += (int)total;
2627
total *= cn;
2628
for( size_t i = 0; i < total; i += cn )
2629
{
2630
for( int c = 0; c < cn; c++ )
2631
sum[c] += src[i + c];
2632
}
2633
}
2634
else
2635
{
2636
for( size_t i = 0; i < total; i++ )
2637
if( mask[i] )
2638
{
2639
nz++;
2640
for( int c = 0; c < cn; c++ )
2641
sum[c] += src[i*cn + c];
2642
}
2643
}
2644
}
2645
2646
Scalar mean(const Mat& src, const Mat& mask)
2647
{
2648
CV_Assert(mask.empty() || (mask.type() == CV_8U && mask.size == src.size));
2649
Scalar sum;
2650
int nz = 0;
2651
2652
const Mat *arrays[]={&src, &mask, 0};
2653
Mat planes[2];
2654
2655
NAryMatIterator it(arrays, planes);
2656
size_t total = planes[0].total();
2657
size_t i, nplanes = it.nplanes;
2658
int depth = src.depth(), cn = src.channels();
2659
2660
for( i = 0; i < nplanes; i++, ++it )
2661
{
2662
const uchar* sptr = planes[0].ptr();
2663
const uchar* mptr = planes[1].ptr();
2664
2665
switch( depth )
2666
{
2667
case CV_8U:
2668
mean_((const uchar*)sptr, mptr, total, cn, sum, nz);
2669
break;
2670
case CV_8S:
2671
mean_((const schar*)sptr, mptr, total, cn, sum, nz);
2672
break;
2673
case CV_16U:
2674
mean_((const ushort*)sptr, mptr, total, cn, sum, nz);
2675
break;
2676
case CV_16S:
2677
mean_((const short*)sptr, mptr, total, cn, sum, nz);
2678
break;
2679
case CV_32S:
2680
mean_((const int*)sptr, mptr, total, cn, sum, nz);
2681
break;
2682
case CV_32F:
2683
mean_((const float*)sptr, mptr, total, cn, sum, nz);
2684
break;
2685
case CV_64F:
2686
mean_((const double*)sptr, mptr, total, cn, sum, nz);
2687
break;
2688
default:
2689
CV_Error(Error::StsUnsupportedFormat, "");
2690
}
2691
}
2692
2693
return sum * (1./std::max(nz, 1));
2694
}
2695
2696
2697
void patchZeros( Mat& mat, double level )
2698
{
2699
int j, ncols = mat.cols * mat.channels();
2700
int depth = mat.depth();
2701
CV_Assert( depth == CV_32F || depth == CV_64F );
2702
2703
for( int i = 0; i < mat.rows; i++ )
2704
{
2705
if( depth == CV_32F )
2706
{
2707
float* data = mat.ptr<float>(i);
2708
for( j = 0; j < ncols; j++ )
2709
if( fabs(data[j]) < level )
2710
data[j] += 1;
2711
}
2712
else
2713
{
2714
double* data = mat.ptr<double>(i);
2715
for( j = 0; j < ncols; j++ )
2716
if( fabs(data[j]) < level )
2717
data[j] += 1;
2718
}
2719
}
2720
}
2721
2722
2723
static void calcSobelKernel1D( int order, int _aperture_size, int size, vector<int>& kernel )
2724
{
2725
int i, j, oldval, newval;
2726
kernel.resize(size + 1);
2727
2728
if( _aperture_size < 0 )
2729
{
2730
static const int scharr[] = { 3, 10, 3, -1, 0, 1 };
2731
assert( size == 3 );
2732
for( i = 0; i < size; i++ )
2733
kernel[i] = scharr[order*3 + i];
2734
return;
2735
}
2736
2737
for( i = 1; i <= size; i++ )
2738
kernel[i] = 0;
2739
kernel[0] = 1;
2740
2741
for( i = 0; i < size - order - 1; i++ )
2742
{
2743
oldval = kernel[0];
2744
for( j = 1; j <= size; j++ )
2745
{
2746
newval = kernel[j] + kernel[j-1];
2747
kernel[j-1] = oldval;
2748
oldval = newval;
2749
}
2750
}
2751
2752
for( i = 0; i < order; i++ )
2753
{
2754
oldval = -kernel[0];
2755
for( j = 1; j <= size; j++ )
2756
{
2757
newval = kernel[j-1] - kernel[j];
2758
kernel[j-1] = oldval;
2759
oldval = newval;
2760
}
2761
}
2762
}
2763
2764
2765
Mat calcSobelKernel2D( int dx, int dy, int _aperture_size, int origin )
2766
{
2767
CV_Assert( (_aperture_size == -1 || (_aperture_size >= 1 && _aperture_size % 2 == 1)) &&
2768
dx >= 0 && dy >= 0 && dx + dy <= 3 );
2769
Size ksize = _aperture_size == -1 ? Size(3,3) : _aperture_size > 1 ?
2770
Size(_aperture_size, _aperture_size) : dx > 0 ? Size(3, 1) : Size(1, 3);
2771
2772
Mat kernel(ksize, CV_32F);
2773
vector<int> kx, ky;
2774
2775
calcSobelKernel1D( dx, _aperture_size, ksize.width, kx );
2776
calcSobelKernel1D( dy, _aperture_size, ksize.height, ky );
2777
2778
for( int i = 0; i < kernel.rows; i++ )
2779
{
2780
float ay = (float)ky[i]*(origin && (dy & 1) ? -1 : 1);
2781
for( int j = 0; j < kernel.cols; j++ )
2782
kernel.at<float>(i, j) = kx[j]*ay;
2783
}
2784
2785
return kernel;
2786
}
2787
2788
2789
Mat calcLaplaceKernel2D( int aperture_size )
2790
{
2791
int ksize = aperture_size == 1 ? 3 : aperture_size;
2792
Mat kernel(ksize, ksize, CV_32F);
2793
2794
vector<int> kx, ky;
2795
2796
calcSobelKernel1D( 2, aperture_size, ksize, kx );
2797
if( aperture_size > 1 )
2798
calcSobelKernel1D( 0, aperture_size, ksize, ky );
2799
else
2800
{
2801
ky.resize(3);
2802
ky[0] = ky[2] = 0; ky[1] = 1;
2803
}
2804
2805
for( int i = 0; i < ksize; i++ )
2806
for( int j = 0; j < ksize; j++ )
2807
kernel.at<float>(i, j) = (float)(kx[j]*ky[i] + kx[i]*ky[j]);
2808
2809
return kernel;
2810
}
2811
2812
2813
void initUndistortMap( const Mat& _a0, const Mat& _k0, Size sz, Mat& _mapx, Mat& _mapy )
2814
{
2815
_mapx.create(sz, CV_32F);
2816
_mapy.create(sz, CV_32F);
2817
2818
double a[9], k[5]={0,0,0,0,0};
2819
Mat _a(3, 3, CV_64F, a);
2820
Mat _k(_k0.rows,_k0.cols, CV_MAKETYPE(CV_64F,_k0.channels()),k);
2821
double fx, fy, cx, cy, ifx, ify, cxn, cyn;
2822
2823
_a0.convertTo(_a, CV_64F);
2824
_k0.convertTo(_k, CV_64F);
2825
fx = a[0]; fy = a[4]; cx = a[2]; cy = a[5];
2826
ifx = 1./fx; ify = 1./fy;
2827
cxn = cx;
2828
cyn = cy;
2829
2830
for( int v = 0; v < sz.height; v++ )
2831
{
2832
for( int u = 0; u < sz.width; u++ )
2833
{
2834
double x = (u - cxn)*ifx;
2835
double y = (v - cyn)*ify;
2836
double x2 = x*x, y2 = y*y;
2837
double r2 = x2 + y2;
2838
double cdist = 1 + (k[0] + (k[1] + k[4]*r2)*r2)*r2;
2839
double x1 = x*cdist + k[2]*2*x*y + k[3]*(r2 + 2*x2);
2840
double y1 = y*cdist + k[3]*2*x*y + k[2]*(r2 + 2*y2);
2841
2842
_mapy.at<float>(v, u) = (float)(y1*fy + cy);
2843
_mapx.at<float>(v, u) = (float)(x1*fx + cx);
2844
}
2845
}
2846
}
2847
2848
2849
std::ostream& operator << (std::ostream& out, const MatInfo& m)
2850
{
2851
if( !m.m || m.m->empty() )
2852
out << "<Empty>";
2853
else
2854
{
2855
static const char* depthstr[] = {"8u", "8s", "16u", "16s", "32s", "32f", "64f", "?"};
2856
out << depthstr[m.m->depth()] << "C" << m.m->channels() << " " << m.m->dims << "-dim (";
2857
for( int i = 0; i < m.m->dims; i++ )
2858
out << m.m->size[i] << (i < m.m->dims-1 ? " x " : ")");
2859
}
2860
return out;
2861
}
2862
2863
2864
static Mat getSubArray(const Mat& m, int border, vector<int>& ofs0, vector<int>& ofs)
2865
{
2866
ofs.resize(ofs0.size());
2867
if( border < 0 )
2868
{
2869
std::copy(ofs0.begin(), ofs0.end(), ofs.begin());
2870
return m;
2871
}
2872
int i, d = m.dims;
2873
CV_Assert(d == (int)ofs.size());
2874
vector<Range> r(d);
2875
for( i = 0; i < d; i++ )
2876
{
2877
r[i].start = std::max(0, ofs0[i] - border);
2878
r[i].end = std::min(ofs0[i] + 1 + border, m.size[i]);
2879
ofs[i] = std::min(ofs0[i], border);
2880
}
2881
return m(&r[0]);
2882
}
2883
2884
template<typename _Tp, typename _WTp> static void
2885
writeElems(std::ostream& out, const void* data, int nelems, int starpos)
2886
{
2887
for(int i = 0; i < nelems; i++)
2888
{
2889
if( i == starpos )
2890
out << "*";
2891
out << (_WTp)((_Tp*)data)[i];
2892
if( i == starpos )
2893
out << "*";
2894
out << (i+1 < nelems ? ", " : "");
2895
}
2896
}
2897
2898
2899
static void writeElems(std::ostream& out, const void* data, int nelems, int depth, int starpos)
2900
{
2901
if(depth == CV_8U)
2902
writeElems<uchar, int>(out, data, nelems, starpos);
2903
else if(depth == CV_8S)
2904
writeElems<schar, int>(out, data, nelems, starpos);
2905
else if(depth == CV_16U)
2906
writeElems<ushort, int>(out, data, nelems, starpos);
2907
else if(depth == CV_16S)
2908
writeElems<short, int>(out, data, nelems, starpos);
2909
else if(depth == CV_32S)
2910
writeElems<int, int>(out, data, nelems, starpos);
2911
else if(depth == CV_32F)
2912
{
2913
std::streamsize pp = out.precision();
2914
out.precision(8);
2915
writeElems<float, float>(out, data, nelems, starpos);
2916
out.precision(pp);
2917
}
2918
else if(depth == CV_64F)
2919
{
2920
std::streamsize pp = out.precision();
2921
out.precision(16);
2922
writeElems<double, double>(out, data, nelems, starpos);
2923
out.precision(pp);
2924
}
2925
else
2926
CV_Error(Error::StsUnsupportedFormat, "");
2927
}
2928
2929
2930
struct MatPart
2931
{
2932
MatPart(const Mat& _m, const vector<int>* _loc)
2933
: m(&_m), loc(_loc) {}
2934
const Mat* m;
2935
const vector<int>* loc;
2936
};
2937
2938
static std::ostream& operator << (std::ostream& out, const MatPart& m)
2939
{
2940
CV_Assert( !m.loc || ((int)m.loc->size() == m.m->dims && m.m->dims <= 2) );
2941
if( !m.loc )
2942
out << *m.m;
2943
else
2944
{
2945
int i, depth = m.m->depth(), cn = m.m->channels(), width = m.m->cols*cn;
2946
for( i = 0; i < m.m->rows; i++ )
2947
{
2948
writeElems(out, m.m->ptr(i), width, depth, i == (*m.loc)[0] ? (*m.loc)[1] : -1);
2949
out << (i < m.m->rows-1 ? ";\n" : "");
2950
}
2951
}
2952
return out;
2953
}
2954
2955
MatComparator::MatComparator(double _maxdiff, int _context)
2956
: maxdiff(_maxdiff), realmaxdiff(DBL_MAX), context(_context) {}
2957
2958
::testing::AssertionResult
2959
MatComparator::operator()(const char* expr1, const char* expr2,
2960
const Mat& m1, const Mat& m2)
2961
{
2962
if( m1.type() != m2.type() || m1.size != m2.size )
2963
return ::testing::AssertionFailure()
2964
<< "The reference and the actual output arrays have different type or size:\n"
2965
<< expr1 << " ~ " << MatInfo(m1) << "\n"
2966
<< expr2 << " ~ " << MatInfo(m2) << "\n";
2967
2968
//bool ok = cvtest::cmpUlps(m1, m2, maxdiff, &realmaxdiff, &loc0);
2969
int code = cmpEps( m1, m2, &realmaxdiff, maxdiff, &loc0, true);
2970
2971
if(code >= 0)
2972
return ::testing::AssertionSuccess();
2973
2974
Mat m[] = {m1.reshape(1,0), m2.reshape(1,0)};
2975
int dims = m[0].dims;
2976
vector<int> loc;
2977
int border = dims <= 2 ? context : 0;
2978
2979
Mat m1part, m2part;
2980
if( border == 0 )
2981
{
2982
loc = loc0;
2983
m1part = Mat(1, 1, m[0].depth(), m[0].ptr(&loc[0]));
2984
m2part = Mat(1, 1, m[1].depth(), m[1].ptr(&loc[0]));
2985
}
2986
else
2987
{
2988
m1part = getSubArray(m[0], border, loc0, loc);
2989
m2part = getSubArray(m[1], border, loc0, loc);
2990
}
2991
2992
return ::testing::AssertionFailure()
2993
<< "too big relative difference (" << realmaxdiff << " > "
2994
<< maxdiff << ") between "
2995
<< MatInfo(m1) << " '" << expr1 << "' and '" << expr2 << "' at " << Mat(loc0).t() << ".\n"
2996
<< "- " << expr1 << ":\n" << MatPart(m1part, border > 0 ? &loc : 0) << ".\n"
2997
<< "- " << expr2 << ":\n" << MatPart(m2part, border > 0 ? &loc : 0) << ".\n";
2998
}
2999
3000
void threshold( const Mat& _src, Mat& _dst,
3001
double thresh, double maxval, int thresh_type )
3002
{
3003
int i, j;
3004
int depth = _src.depth(), cn = _src.channels();
3005
int width_n = _src.cols*cn, height = _src.rows;
3006
int ithresh = cvFloor(thresh);
3007
int imaxval, ithresh2;
3008
3009
if( depth == CV_8U )
3010
{
3011
ithresh2 = saturate_cast<uchar>(ithresh);
3012
imaxval = saturate_cast<uchar>(maxval);
3013
}
3014
else if( depth == CV_16S )
3015
{
3016
ithresh2 = saturate_cast<short>(ithresh);
3017
imaxval = saturate_cast<short>(maxval);
3018
}
3019
else
3020
{
3021
ithresh2 = cvRound(ithresh);
3022
imaxval = cvRound(maxval);
3023
}
3024
3025
assert( depth == CV_8U || depth == CV_16S || depth == CV_32F );
3026
3027
switch( thresh_type )
3028
{
3029
case CV_THRESH_BINARY:
3030
for( i = 0; i < height; i++ )
3031
{
3032
if( depth == CV_8U )
3033
{
3034
const uchar* src = _src.ptr<uchar>(i);
3035
uchar* dst = _dst.ptr<uchar>(i);
3036
for( j = 0; j < width_n; j++ )
3037
dst[j] = (uchar)(src[j] > ithresh ? imaxval : 0);
3038
}
3039
else if( depth == CV_16S )
3040
{
3041
const short* src = _src.ptr<short>(i);
3042
short* dst = _dst.ptr<short>(i);
3043
for( j = 0; j < width_n; j++ )
3044
dst[j] = (short)(src[j] > ithresh ? imaxval : 0);
3045
}
3046
else
3047
{
3048
const float* src = _src.ptr<float>(i);
3049
float* dst = _dst.ptr<float>(i);
3050
for( j = 0; j < width_n; j++ )
3051
dst[j] = (float)((double)src[j] > thresh ? maxval : 0.f);
3052
}
3053
}
3054
break;
3055
case CV_THRESH_BINARY_INV:
3056
for( i = 0; i < height; i++ )
3057
{
3058
if( depth == CV_8U )
3059
{
3060
const uchar* src = _src.ptr<uchar>(i);
3061
uchar* dst = _dst.ptr<uchar>(i);
3062
for( j = 0; j < width_n; j++ )
3063
dst[j] = (uchar)(src[j] > ithresh ? 0 : imaxval);
3064
}
3065
else if( depth == CV_16S )
3066
{
3067
const short* src = _src.ptr<short>(i);
3068
short* dst = _dst.ptr<short>(i);
3069
for( j = 0; j < width_n; j++ )
3070
dst[j] = (short)(src[j] > ithresh ? 0 : imaxval);
3071
}
3072
else
3073
{
3074
const float* src = _src.ptr<float>(i);
3075
float* dst = _dst.ptr<float>(i);
3076
for( j = 0; j < width_n; j++ )
3077
dst[j] = (float)((double)src[j] > thresh ? 0.f : maxval);
3078
}
3079
}
3080
break;
3081
case CV_THRESH_TRUNC:
3082
for( i = 0; i < height; i++ )
3083
{
3084
if( depth == CV_8U )
3085
{
3086
const uchar* src = _src.ptr<uchar>(i);
3087
uchar* dst = _dst.ptr<uchar>(i);
3088
for( j = 0; j < width_n; j++ )
3089
{
3090
int s = src[j];
3091
dst[j] = (uchar)(s > ithresh ? ithresh2 : s);
3092
}
3093
}
3094
else if( depth == CV_16S )
3095
{
3096
const short* src = _src.ptr<short>(i);
3097
short* dst = _dst.ptr<short>(i);
3098
for( j = 0; j < width_n; j++ )
3099
{
3100
int s = src[j];
3101
dst[j] = (short)(s > ithresh ? ithresh2 : s);
3102
}
3103
}
3104
else
3105
{
3106
const float* src = _src.ptr<float>(i);
3107
float* dst = _dst.ptr<float>(i);
3108
for( j = 0; j < width_n; j++ )
3109
{
3110
double s = src[j];
3111
dst[j] = (float)(s > thresh ? thresh : s);
3112
}
3113
}
3114
}
3115
break;
3116
case CV_THRESH_TOZERO:
3117
for( i = 0; i < height; i++ )
3118
{
3119
if( depth == CV_8U )
3120
{
3121
const uchar* src = _src.ptr<uchar>(i);
3122
uchar* dst = _dst.ptr<uchar>(i);
3123
for( j = 0; j < width_n; j++ )
3124
{
3125
int s = src[j];
3126
dst[j] = (uchar)(s > ithresh ? s : 0);
3127
}
3128
}
3129
else if( depth == CV_16S )
3130
{
3131
const short* src = _src.ptr<short>(i);
3132
short* dst = _dst.ptr<short>(i);
3133
for( j = 0; j < width_n; j++ )
3134
{
3135
int s = src[j];
3136
dst[j] = (short)(s > ithresh ? s : 0);
3137
}
3138
}
3139
else
3140
{
3141
const float* src = _src.ptr<float>(i);
3142
float* dst = _dst.ptr<float>(i);
3143
for( j = 0; j < width_n; j++ )
3144
{
3145
float s = src[j];
3146
dst[j] = s > thresh ? s : 0.f;
3147
}
3148
}
3149
}
3150
break;
3151
case CV_THRESH_TOZERO_INV:
3152
for( i = 0; i < height; i++ )
3153
{
3154
if( depth == CV_8U )
3155
{
3156
const uchar* src = _src.ptr<uchar>(i);
3157
uchar* dst = _dst.ptr<uchar>(i);
3158
for( j = 0; j < width_n; j++ )
3159
{
3160
int s = src[j];
3161
dst[j] = (uchar)(s > ithresh ? 0 : s);
3162
}
3163
}
3164
else if( depth == CV_16S )
3165
{
3166
const short* src = _src.ptr<short>(i);
3167
short* dst = _dst.ptr<short>(i);
3168
for( j = 0; j < width_n; j++ )
3169
{
3170
int s = src[j];
3171
dst[j] = (short)(s > ithresh ? 0 : s);
3172
}
3173
}
3174
else
3175
{
3176
const float* src = _src.ptr<float>(i);
3177
float* dst = _dst.ptr<float>(i);
3178
for( j = 0; j < width_n; j++ )
3179
{
3180
float s = src[j];
3181
dst[j] = s > thresh ? 0.f : s;
3182
}
3183
}
3184
}
3185
break;
3186
default:
3187
assert(0);
3188
}
3189
}
3190
3191
3192
static void
3193
_minMaxIdx( const float* src, const uchar* mask, double* _minVal, double* _maxVal,
3194
size_t* _minIdx, size_t* _maxIdx, int len, size_t startIdx )
3195
{
3196
double minVal = FLT_MAX, maxVal = -FLT_MAX;
3197
size_t minIdx = 0, maxIdx = 0;
3198
3199
if( !mask )
3200
{
3201
for( int i = 0; i < len; i++ )
3202
{
3203
float val = src[i];
3204
if( val < minVal )
3205
{
3206
minVal = val;
3207
minIdx = startIdx + i;
3208
}
3209
if( val > maxVal )
3210
{
3211
maxVal = val;
3212
maxIdx = startIdx + i;
3213
}
3214
}
3215
}
3216
else
3217
{
3218
for( int i = 0; i < len; i++ )
3219
{
3220
float val = src[i];
3221
if( mask[i] && val < minVal )
3222
{
3223
minVal = val;
3224
minIdx = startIdx + i;
3225
}
3226
if( mask[i] && val > maxVal )
3227
{
3228
maxVal = val;
3229
maxIdx = startIdx + i;
3230
}
3231
}
3232
}
3233
3234
if (_minIdx)
3235
*_minIdx = minIdx;
3236
if (_maxIdx)
3237
*_maxIdx = maxIdx;
3238
if (_minVal)
3239
*_minVal = minVal;
3240
if (_maxVal)
3241
*_maxVal = maxVal;
3242
}
3243
3244
3245
void minMaxIdx( InputArray _img, double* minVal, double* maxVal,
3246
Point* minLoc, Point* maxLoc, InputArray _mask )
3247
{
3248
Mat img = _img.getMat();
3249
Mat mask = _mask.getMat();
3250
CV_Assert(img.dims <= 2);
3251
3252
_minMaxIdx((const float*)img.data, mask.data, minVal, maxVal, (size_t*)minLoc, (size_t*)maxLoc, (int)img.total(),1);
3253
if( minLoc )
3254
std::swap(minLoc->x, minLoc->y);
3255
if( maxLoc )
3256
std::swap(maxLoc->x, maxLoc->y);
3257
}
3258
3259
}
3260
3261