Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Tetragramm
GitHub Repository: Tetragramm/opencv
Path: blob/master/modules/core/src/copy.cpp
16337 views
1
/*M///////////////////////////////////////////////////////////////////////////////////////
2
//
3
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4
//
5
// By downloading, copying, installing or using the software you agree to this license.
6
// If you do not agree to this license, do not download, install,
7
// copy or use the software.
8
//
9
// License Agreement
10
// For Open Source Computer Vision Library
11
//
12
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
13
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
14
// Copyright (C) 2014, Itseez Inc., all rights reserved.
15
// Third party copyrights are property of their respective owners.
16
//
17
// Redistribution and use in source and binary forms, with or without modification,
18
// are permitted provided that the following conditions are met:
19
//
20
// * Redistribution's of source code must retain the above copyright notice,
21
// this list of conditions and the following disclaimer.
22
//
23
// * Redistribution's in binary form must reproduce the above copyright notice,
24
// this list of conditions and the following disclaimer in the documentation
25
// and/or other materials provided with the distribution.
26
//
27
// * The name of the copyright holders may not be used to endorse or promote products
28
// derived from this software without specific prior written permission.
29
//
30
// This software is provided by the copyright holders and contributors "as is" and
31
// any express or implied warranties, including, but not limited to, the implied
32
// warranties of merchantability and fitness for a particular purpose are disclaimed.
33
// In no event shall the Intel Corporation or contributors be liable for any direct,
34
// indirect, incidental, special, exemplary, or consequential damages
35
// (including, but not limited to, procurement of substitute goods or services;
36
// loss of use, data, or profits; or business interruption) however caused
37
// and on any theory of liability, whether in contract, strict liability,
38
// or tort (including negligence or otherwise) arising in any way out of
39
// the use of this software, even if advised of the possibility of such damage.
40
//
41
//M*/
42
43
/* ////////////////////////////////////////////////////////////////////
44
//
45
// Mat basic operations: Copy, Set
46
//
47
// */
48
49
#include "precomp.hpp"
50
#include "opencl_kernels_core.hpp"
51
52
53
namespace cv
54
{
55
56
template<typename T> static void
57
copyMask_(const uchar* _src, size_t sstep, const uchar* mask, size_t mstep, uchar* _dst, size_t dstep, Size size)
58
{
59
for( ; size.height--; mask += mstep, _src += sstep, _dst += dstep )
60
{
61
const T* src = (const T*)_src;
62
T* dst = (T*)_dst;
63
int x = 0;
64
#if CV_ENABLE_UNROLLED
65
for( ; x <= size.width - 4; x += 4 )
66
{
67
if( mask[x] )
68
dst[x] = src[x];
69
if( mask[x+1] )
70
dst[x+1] = src[x+1];
71
if( mask[x+2] )
72
dst[x+2] = src[x+2];
73
if( mask[x+3] )
74
dst[x+3] = src[x+3];
75
}
76
#endif
77
for( ; x < size.width; x++ )
78
if( mask[x] )
79
dst[x] = src[x];
80
}
81
}
82
83
template<> void
84
copyMask_<uchar>(const uchar* _src, size_t sstep, const uchar* mask, size_t mstep, uchar* _dst, size_t dstep, Size size)
85
{
86
CV_IPP_RUN_FAST(CV_INSTRUMENT_FUN_IPP(ippiCopy_8u_C1MR, _src, (int)sstep, _dst, (int)dstep, ippiSize(size), mask, (int)mstep) >= 0)
87
88
for( ; size.height--; mask += mstep, _src += sstep, _dst += dstep )
89
{
90
const uchar* src = (const uchar*)_src;
91
uchar* dst = (uchar*)_dst;
92
int x = 0;
93
#if CV_SIMD
94
{
95
v_uint8 v_zero = vx_setzero_u8();
96
97
for( ; x <= size.width - v_uint8::nlanes; x += v_uint8::nlanes )
98
{
99
v_uint8 v_src = vx_load(src + x),
100
v_dst = vx_load(dst + x),
101
v_nmask = vx_load(mask + x) == v_zero;
102
103
v_dst = v_select(v_nmask, v_dst, v_src);
104
v_store(dst + x, v_dst);
105
}
106
}
107
vx_cleanup();
108
#endif
109
for( ; x < size.width; x++ )
110
if( mask[x] )
111
dst[x] = src[x];
112
}
113
}
114
115
template<> void
116
copyMask_<ushort>(const uchar* _src, size_t sstep, const uchar* mask, size_t mstep, uchar* _dst, size_t dstep, Size size)
117
{
118
CV_IPP_RUN_FAST(CV_INSTRUMENT_FUN_IPP(ippiCopy_16u_C1MR, (const Ipp16u *)_src, (int)sstep, (Ipp16u *)_dst, (int)dstep, ippiSize(size), mask, (int)mstep) >= 0)
119
120
for( ; size.height--; mask += mstep, _src += sstep, _dst += dstep )
121
{
122
const ushort* src = (const ushort*)_src;
123
ushort* dst = (ushort*)_dst;
124
int x = 0;
125
#if CV_SIMD
126
{
127
v_uint8 v_zero = vx_setzero_u8();
128
129
for( ; x <= size.width - v_uint8::nlanes; x += v_uint8::nlanes )
130
{
131
v_uint16 v_src1 = vx_load(src + x), v_src2 = vx_load(src + x + v_uint16::nlanes),
132
v_dst1 = vx_load(dst + x), v_dst2 = vx_load(dst + x + v_uint16::nlanes);
133
134
v_uint8 v_nmask1, v_nmask2;
135
v_uint8 v_nmask = vx_load(mask + x) == v_zero;
136
v_zip(v_nmask, v_nmask, v_nmask1, v_nmask2);
137
138
v_dst1 = v_select(v_reinterpret_as_u16(v_nmask1), v_dst1, v_src1);
139
v_dst2 = v_select(v_reinterpret_as_u16(v_nmask2), v_dst2, v_src2);
140
v_store(dst + x, v_dst1);
141
v_store(dst + x + v_uint16::nlanes, v_dst2);
142
}
143
}
144
vx_cleanup();
145
#endif
146
for( ; x < size.width; x++ )
147
if( mask[x] )
148
dst[x] = src[x];
149
}
150
}
151
152
static void
153
copyMaskGeneric(const uchar* _src, size_t sstep, const uchar* mask, size_t mstep, uchar* _dst, size_t dstep, Size size, void* _esz)
154
{
155
size_t k, esz = *(size_t*)_esz;
156
for( ; size.height--; mask += mstep, _src += sstep, _dst += dstep )
157
{
158
const uchar* src = _src;
159
uchar* dst = _dst;
160
int x = 0;
161
for( ; x < size.width; x++, src += esz, dst += esz )
162
{
163
if( !mask[x] )
164
continue;
165
for( k = 0; k < esz; k++ )
166
dst[k] = src[k];
167
}
168
}
169
}
170
171
172
#define DEF_COPY_MASK(suffix, type) \
173
static void copyMask##suffix(const uchar* src, size_t sstep, const uchar* mask, size_t mstep, \
174
uchar* dst, size_t dstep, Size size, void*) \
175
{ \
176
copyMask_<type>(src, sstep, mask, mstep, dst, dstep, size); \
177
}
178
179
#if defined HAVE_IPP
180
#define DEF_COPY_MASK_F(suffix, type, ippfavor, ipptype) \
181
static void copyMask##suffix(const uchar* src, size_t sstep, const uchar* mask, size_t mstep, \
182
uchar* dst, size_t dstep, Size size, void*) \
183
{ \
184
CV_IPP_RUN_FAST(CV_INSTRUMENT_FUN_IPP(ippiCopy_##ippfavor, (const ipptype *)src, (int)sstep, (ipptype *)dst, (int)dstep, ippiSize(size), (const Ipp8u *)mask, (int)mstep) >= 0)\
185
copyMask_<type>(src, sstep, mask, mstep, dst, dstep, size); \
186
}
187
#else
188
#define DEF_COPY_MASK_F(suffix, type, ippfavor, ipptype) \
189
static void copyMask##suffix(const uchar* src, size_t sstep, const uchar* mask, size_t mstep, \
190
uchar* dst, size_t dstep, Size size, void*) \
191
{ \
192
copyMask_<type>(src, sstep, mask, mstep, dst, dstep, size); \
193
}
194
#endif
195
196
#if IPP_VERSION_X100 == 901 // bug in IPP 9.0.1
197
DEF_COPY_MASK(32sC3, Vec3i)
198
DEF_COPY_MASK(8uC3, Vec3b)
199
#else
200
DEF_COPY_MASK_F(8uC3, Vec3b, 8u_C3MR, Ipp8u)
201
DEF_COPY_MASK_F(32sC3, Vec3i, 32s_C3MR, Ipp32s)
202
#endif
203
DEF_COPY_MASK(8u, uchar)
204
DEF_COPY_MASK(16u, ushort)
205
DEF_COPY_MASK_F(32s, int, 32s_C1MR, Ipp32s)
206
DEF_COPY_MASK_F(16uC3, Vec3s, 16u_C3MR, Ipp16u)
207
DEF_COPY_MASK(32sC2, Vec2i)
208
DEF_COPY_MASK_F(32sC4, Vec4i, 32s_C4MR, Ipp32s)
209
DEF_COPY_MASK(32sC6, Vec6i)
210
DEF_COPY_MASK(32sC8, Vec8i)
211
212
BinaryFunc copyMaskTab[] =
213
{
214
0,
215
copyMask8u,
216
copyMask16u,
217
copyMask8uC3,
218
copyMask32s,
219
0,
220
copyMask16uC3,
221
0,
222
copyMask32sC2,
223
0, 0, 0,
224
copyMask32sC3,
225
0, 0, 0,
226
copyMask32sC4,
227
0, 0, 0, 0, 0, 0, 0,
228
copyMask32sC6,
229
0, 0, 0, 0, 0, 0, 0,
230
copyMask32sC8
231
};
232
233
BinaryFunc getCopyMaskFunc(size_t esz)
234
{
235
return esz <= 32 && copyMaskTab[esz] ? copyMaskTab[esz] : copyMaskGeneric;
236
}
237
238
/* dst = src */
239
void Mat::copyTo( OutputArray _dst ) const
240
{
241
CV_INSTRUMENT_REGION();
242
243
#ifdef HAVE_CUDA
244
if (_dst.isGpuMat())
245
{
246
_dst.getGpuMat().upload(*this);
247
return;
248
}
249
#endif
250
251
int dtype = _dst.type();
252
if( _dst.fixedType() && dtype != type() )
253
{
254
CV_Assert( channels() == CV_MAT_CN(dtype) );
255
convertTo( _dst, dtype );
256
return;
257
}
258
259
if( empty() )
260
{
261
_dst.release();
262
return;
263
}
264
265
if( _dst.isUMat() )
266
{
267
_dst.create( dims, size.p, type() );
268
UMat dst = _dst.getUMat();
269
CV_Assert(dst.u != NULL);
270
size_t i, sz[CV_MAX_DIM] = {0}, dstofs[CV_MAX_DIM], esz = elemSize();
271
CV_Assert(dims > 0 && dims < CV_MAX_DIM);
272
for( i = 0; i < (size_t)dims; i++ )
273
sz[i] = size.p[i];
274
sz[dims-1] *= esz;
275
dst.ndoffset(dstofs);
276
dstofs[dims-1] *= esz;
277
dst.u->currAllocator->upload(dst.u, data, dims, sz, dstofs, dst.step.p, step.p);
278
return;
279
}
280
281
if( dims <= 2 )
282
{
283
_dst.create( rows, cols, type() );
284
Mat dst = _dst.getMat();
285
if( data == dst.data )
286
return;
287
288
if( rows > 0 && cols > 0 )
289
{
290
// For some cases (with vector) dst.size != src.size, so force to column-based form
291
// It prevents memory corruption in case of column-based src
292
if (_dst.isVector())
293
dst = dst.reshape(0, (int)dst.total());
294
295
const uchar* sptr = data;
296
uchar* dptr = dst.data;
297
298
#if IPP_VERSION_X100 >= 201700
299
CV_IPP_RUN_FAST(CV_INSTRUMENT_FUN_IPP(ippiCopy_8u_C1R_L, sptr, (int)step, dptr, (int)dst.step, ippiSizeL((int)(cols*elemSize()), rows)) >= 0)
300
#endif
301
302
Size sz = getContinuousSize(*this, dst);
303
size_t len = sz.width*elemSize();
304
305
for( ; sz.height--; sptr += step, dptr += dst.step )
306
memcpy( dptr, sptr, len );
307
}
308
return;
309
}
310
311
_dst.create( dims, size, type() );
312
Mat dst = _dst.getMat();
313
if( data == dst.data )
314
return;
315
316
if( total() != 0 )
317
{
318
const Mat* arrays[] = { this, &dst };
319
uchar* ptrs[2] = {};
320
NAryMatIterator it(arrays, ptrs, 2);
321
size_t sz = it.size*elemSize();
322
323
for( size_t i = 0; i < it.nplanes; i++, ++it )
324
memcpy(ptrs[1], ptrs[0], sz);
325
}
326
}
327
328
#ifdef HAVE_IPP
329
static bool ipp_copyTo(const Mat &src, Mat &dst, const Mat &mask)
330
{
331
#ifdef HAVE_IPP_IW_LL
332
CV_INSTRUMENT_REGION_IPP();
333
334
if(mask.channels() > 1 || mask.depth() != CV_8U)
335
return false;
336
337
if (src.dims <= 2)
338
{
339
IppiSize size = ippiSize(src.size());
340
return CV_INSTRUMENT_FUN_IPP(llwiCopyMask, src.ptr(), (int)src.step, dst.ptr(), (int)dst.step, size, (int)src.elemSize1(), src.channels(), mask.ptr(), (int)mask.step) >= 0;
341
}
342
else
343
{
344
const Mat *arrays[] = {&src, &dst, &mask, NULL};
345
uchar *ptrs[3] = {NULL};
346
NAryMatIterator it(arrays, ptrs);
347
348
IppiSize size = ippiSize(it.size, 1);
349
350
for (size_t i = 0; i < it.nplanes; i++, ++it)
351
{
352
if(CV_INSTRUMENT_FUN_IPP(llwiCopyMask, ptrs[0], 0, ptrs[1], 0, size, (int)src.elemSize1(), src.channels(), ptrs[2], 0) < 0)
353
return false;
354
}
355
return true;
356
}
357
#else
358
CV_UNUSED(src); CV_UNUSED(dst); CV_UNUSED(mask);
359
return false;
360
#endif
361
}
362
#endif
363
364
void Mat::copyTo( OutputArray _dst, InputArray _mask ) const
365
{
366
CV_INSTRUMENT_REGION();
367
368
Mat mask = _mask.getMat();
369
if( !mask.data )
370
{
371
copyTo(_dst);
372
return;
373
}
374
375
int cn = channels(), mcn = mask.channels();
376
CV_Assert( mask.depth() == CV_8U && (mcn == 1 || mcn == cn) );
377
bool colorMask = mcn > 1;
378
if( dims <= 2 )
379
{
380
CV_Assert( size() == mask.size() );
381
}
382
383
Mat dst;
384
{
385
Mat dst0 = _dst.getMat();
386
_dst.create(dims, size, type()); // TODO Prohibit 'dst' re-creation, user should pass it explicitly with correct size/type or empty
387
dst = _dst.getMat();
388
389
if (dst.data != dst0.data) // re-allocation happened
390
{
391
#ifdef OPENCV_FUTURE
392
CV_Assert(dst0.empty() &&
393
"copyTo(): dst size/type mismatch (looks like a bug) - use dst.release() before copyTo() call to suppress this message");
394
#endif
395
dst = Scalar(0); // do not leave dst uninitialized
396
}
397
}
398
399
CV_IPP_RUN_FAST(ipp_copyTo(*this, dst, mask))
400
401
size_t esz = colorMask ? elemSize1() : elemSize();
402
BinaryFunc copymask = getCopyMaskFunc(esz);
403
404
if( dims <= 2 )
405
{
406
Size sz = getContinuousSize(*this, dst, mask, mcn);
407
copymask(data, step, mask.data, mask.step, dst.data, dst.step, sz, &esz);
408
return;
409
}
410
411
const Mat* arrays[] = { this, &dst, &mask, 0 };
412
uchar* ptrs[3] = {};
413
NAryMatIterator it(arrays, ptrs);
414
Size sz((int)(it.size*mcn), 1);
415
416
for( size_t i = 0; i < it.nplanes; i++, ++it )
417
copymask(ptrs[0], 0, ptrs[2], 0, ptrs[1], 0, sz, &esz);
418
}
419
420
Mat& Mat::operator = (const Scalar& s)
421
{
422
CV_INSTRUMENT_REGION();
423
424
if (this->empty())
425
return *this;
426
427
const Mat* arrays[] = { this };
428
uchar* dptr;
429
NAryMatIterator it(arrays, &dptr, 1);
430
size_t elsize = it.size*elemSize();
431
const int64* is = (const int64*)&s.val[0];
432
433
if( is[0] == 0 && is[1] == 0 && is[2] == 0 && is[3] == 0 )
434
{
435
for( size_t i = 0; i < it.nplanes; i++, ++it )
436
memset( dptr, 0, elsize );
437
}
438
else
439
{
440
if( it.nplanes > 0 )
441
{
442
double scalar[12];
443
scalarToRawData(s, scalar, type(), 12);
444
size_t blockSize = 12*elemSize1();
445
446
for( size_t j = 0; j < elsize; j += blockSize )
447
{
448
size_t sz = MIN(blockSize, elsize - j);
449
CV_Assert(sz <= sizeof(scalar));
450
memcpy( dptr + j, scalar, sz );
451
}
452
}
453
454
for( size_t i = 1; i < it.nplanes; i++ )
455
{
456
++it;
457
memcpy( dptr, data, elsize );
458
}
459
}
460
return *this;
461
}
462
463
#ifdef HAVE_IPP
464
static bool ipp_Mat_setTo_Mat(Mat &dst, Mat &_val, Mat &mask)
465
{
466
#ifdef HAVE_IPP_IW_LL
467
CV_INSTRUMENT_REGION_IPP();
468
469
if(mask.empty())
470
return false;
471
472
if(mask.depth() != CV_8U || mask.channels() > 1)
473
return false;
474
475
if(dst.channels() > 4)
476
return false;
477
478
if (dst.depth() == CV_32F)
479
{
480
for (int i = 0; i < (int)(_val.total()); i++)
481
{
482
float v = (float)(_val.at<double>(i)); // cast to float
483
if (cvIsNaN(v) || cvIsInf(v)) // accept finite numbers only
484
return false;
485
}
486
}
487
488
if(dst.dims <= 2)
489
{
490
IppiSize size = ippiSize(dst.size());
491
IppDataType dataType = ippiGetDataType(dst.depth());
492
::ipp::IwValueFloat s;
493
convertAndUnrollScalar(_val, CV_MAKETYPE(CV_64F, dst.channels()), (uchar*)((Ipp64f*)s), 1);
494
495
return CV_INSTRUMENT_FUN_IPP(llwiSetMask, s, dst.ptr(), (int)dst.step, size, dataType, dst.channels(), mask.ptr(), (int)mask.step) >= 0;
496
}
497
else
498
{
499
const Mat *arrays[] = {&dst, mask.empty()?NULL:&mask, NULL};
500
uchar *ptrs[2] = {NULL};
501
NAryMatIterator it(arrays, ptrs);
502
503
IppiSize size = {(int)it.size, 1};
504
IppDataType dataType = ippiGetDataType(dst.depth());
505
::ipp::IwValueFloat s;
506
convertAndUnrollScalar(_val, CV_MAKETYPE(CV_64F, dst.channels()), (uchar*)((Ipp64f*)s), 1);
507
508
for( size_t i = 0; i < it.nplanes; i++, ++it)
509
{
510
if(CV_INSTRUMENT_FUN_IPP(llwiSetMask, s, ptrs[0], 0, size, dataType, dst.channels(), ptrs[1], 0) < 0)
511
return false;
512
}
513
return true;
514
}
515
#else
516
CV_UNUSED(dst); CV_UNUSED(_val); CV_UNUSED(mask);
517
return false;
518
#endif
519
}
520
#endif
521
522
Mat& Mat::setTo(InputArray _value, InputArray _mask)
523
{
524
CV_INSTRUMENT_REGION();
525
526
if( empty() )
527
return *this;
528
529
Mat value = _value.getMat(), mask = _mask.getMat();
530
531
CV_Assert( checkScalar(value, type(), _value.kind(), _InputArray::MAT ));
532
int cn = channels(), mcn = mask.channels();
533
CV_Assert( mask.empty() || (mask.depth() == CV_8U && (mcn == 1 || mcn == cn) && size == mask.size) );
534
535
CV_IPP_RUN_FAST(ipp_Mat_setTo_Mat(*this, value, mask), *this)
536
537
size_t esz = mcn > 1 ? elemSize1() : elemSize();
538
BinaryFunc copymask = getCopyMaskFunc(esz);
539
540
const Mat* arrays[] = { this, !mask.empty() ? &mask : 0, 0 };
541
uchar* ptrs[2]={0,0};
542
NAryMatIterator it(arrays, ptrs);
543
int totalsz = (int)it.size*mcn;
544
int blockSize0 = std::min(totalsz, (int)((BLOCK_SIZE + esz-1)/esz));
545
blockSize0 -= blockSize0 % mcn; // must be divisible without remainder for unrolling and advancing
546
AutoBuffer<uchar> _scbuf(blockSize0*esz + 32);
547
uchar* scbuf = alignPtr((uchar*)_scbuf.data(), (int)sizeof(double));
548
convertAndUnrollScalar( value, type(), scbuf, blockSize0/mcn );
549
550
for( size_t i = 0; i < it.nplanes; i++, ++it )
551
{
552
for( int j = 0; j < totalsz; j += blockSize0 )
553
{
554
Size sz(std::min(blockSize0, totalsz - j), 1);
555
size_t blockSize = sz.width*esz;
556
if( ptrs[1] )
557
{
558
copymask(scbuf, 0, ptrs[1], 0, ptrs[0], 0, sz, &esz);
559
ptrs[1] += sz.width;
560
}
561
else
562
memcpy(ptrs[0], scbuf, blockSize);
563
ptrs[0] += blockSize;
564
}
565
}
566
return *this;
567
}
568
569
570
static void
571
flipHoriz( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size size, size_t esz )
572
{
573
int i, j, limit = (int)(((size.width + 1)/2)*esz);
574
AutoBuffer<int> _tab(size.width*esz);
575
int* tab = _tab.data();
576
577
for( i = 0; i < size.width; i++ )
578
for( size_t k = 0; k < esz; k++ )
579
tab[i*esz + k] = (int)((size.width - i - 1)*esz + k);
580
581
for( ; size.height--; src += sstep, dst += dstep )
582
{
583
for( i = 0; i < limit; i++ )
584
{
585
j = tab[i];
586
uchar t0 = src[i], t1 = src[j];
587
dst[i] = t1; dst[j] = t0;
588
}
589
}
590
}
591
592
static void
593
flipVert( const uchar* src0, size_t sstep, uchar* dst0, size_t dstep, Size size, size_t esz )
594
{
595
const uchar* src1 = src0 + (size.height - 1)*sstep;
596
uchar* dst1 = dst0 + (size.height - 1)*dstep;
597
size.width *= (int)esz;
598
599
for( int y = 0; y < (size.height + 1)/2; y++, src0 += sstep, src1 -= sstep,
600
dst0 += dstep, dst1 -= dstep )
601
{
602
int i = 0;
603
if( ((size_t)src0|(size_t)dst0|(size_t)src1|(size_t)dst1) % sizeof(int) == 0 )
604
{
605
for( ; i <= size.width - 16; i += 16 )
606
{
607
int t0 = ((int*)(src0 + i))[0];
608
int t1 = ((int*)(src1 + i))[0];
609
610
((int*)(dst0 + i))[0] = t1;
611
((int*)(dst1 + i))[0] = t0;
612
613
t0 = ((int*)(src0 + i))[1];
614
t1 = ((int*)(src1 + i))[1];
615
616
((int*)(dst0 + i))[1] = t1;
617
((int*)(dst1 + i))[1] = t0;
618
619
t0 = ((int*)(src0 + i))[2];
620
t1 = ((int*)(src1 + i))[2];
621
622
((int*)(dst0 + i))[2] = t1;
623
((int*)(dst1 + i))[2] = t0;
624
625
t0 = ((int*)(src0 + i))[3];
626
t1 = ((int*)(src1 + i))[3];
627
628
((int*)(dst0 + i))[3] = t1;
629
((int*)(dst1 + i))[3] = t0;
630
}
631
632
for( ; i <= size.width - 4; i += 4 )
633
{
634
int t0 = ((int*)(src0 + i))[0];
635
int t1 = ((int*)(src1 + i))[0];
636
637
((int*)(dst0 + i))[0] = t1;
638
((int*)(dst1 + i))[0] = t0;
639
}
640
}
641
642
for( ; i < size.width; i++ )
643
{
644
uchar t0 = src0[i];
645
uchar t1 = src1[i];
646
647
dst0[i] = t1;
648
dst1[i] = t0;
649
}
650
}
651
}
652
653
#ifdef HAVE_OPENCL
654
655
enum { FLIP_COLS = 1 << 0, FLIP_ROWS = 1 << 1, FLIP_BOTH = FLIP_ROWS | FLIP_COLS };
656
657
static bool ocl_flip(InputArray _src, OutputArray _dst, int flipCode )
658
{
659
CV_Assert(flipCode >= -1 && flipCode <= 1);
660
661
const ocl::Device & dev = ocl::Device::getDefault();
662
int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type),
663
flipType, kercn = std::min(ocl::predictOptimalVectorWidth(_src, _dst), 4);
664
665
bool doubleSupport = dev.doubleFPConfig() > 0;
666
if (!doubleSupport && depth == CV_64F)
667
kercn = cn;
668
669
if (cn > 4)
670
return false;
671
672
const char * kernelName;
673
if (flipCode == 0)
674
kernelName = "arithm_flip_rows", flipType = FLIP_ROWS;
675
else if (flipCode > 0)
676
kernelName = "arithm_flip_cols", flipType = FLIP_COLS;
677
else
678
kernelName = "arithm_flip_rows_cols", flipType = FLIP_BOTH;
679
680
int pxPerWIy = (dev.isIntel() && (dev.type() & ocl::Device::TYPE_GPU)) ? 4 : 1;
681
kercn = (cn!=3 || flipType == FLIP_ROWS) ? std::max(kercn, cn) : cn;
682
683
ocl::Kernel k(kernelName, ocl::core::flip_oclsrc,
684
format( "-D T=%s -D T1=%s -D cn=%d -D PIX_PER_WI_Y=%d -D kercn=%d",
685
kercn != cn ? ocl::typeToStr(CV_MAKE_TYPE(depth, kercn)) : ocl::vecopTypeToStr(CV_MAKE_TYPE(depth, kercn)),
686
kercn != cn ? ocl::typeToStr(depth) : ocl::vecopTypeToStr(depth), cn, pxPerWIy, kercn));
687
if (k.empty())
688
return false;
689
690
Size size = _src.size();
691
_dst.create(size, type);
692
UMat src = _src.getUMat(), dst = _dst.getUMat();
693
694
int cols = size.width * cn / kercn, rows = size.height;
695
cols = flipType == FLIP_COLS ? (cols + 1) >> 1 : cols;
696
rows = flipType & FLIP_ROWS ? (rows + 1) >> 1 : rows;
697
698
k.args(ocl::KernelArg::ReadOnlyNoSize(src),
699
ocl::KernelArg::WriteOnly(dst, cn, kercn), rows, cols);
700
701
size_t maxWorkGroupSize = dev.maxWorkGroupSize();
702
CV_Assert(maxWorkGroupSize % 4 == 0);
703
704
size_t globalsize[2] = { (size_t)cols, ((size_t)rows + pxPerWIy - 1) / pxPerWIy },
705
localsize[2] = { maxWorkGroupSize / 4, 4 };
706
return k.run(2, globalsize, (flipType == FLIP_COLS) && !dev.isIntel() ? localsize : NULL, false);
707
}
708
709
#endif
710
711
#if defined HAVE_IPP
712
static bool ipp_flip(Mat &src, Mat &dst, int flip_mode)
713
{
714
#ifdef HAVE_IPP_IW
715
CV_INSTRUMENT_REGION_IPP();
716
717
IppiAxis ippMode;
718
if(flip_mode < 0)
719
ippMode = ippAxsBoth;
720
else if(flip_mode == 0)
721
ippMode = ippAxsHorizontal;
722
else
723
ippMode = ippAxsVertical;
724
725
try
726
{
727
::ipp::IwiImage iwSrc = ippiGetImage(src);
728
::ipp::IwiImage iwDst = ippiGetImage(dst);
729
730
CV_INSTRUMENT_FUN_IPP(::ipp::iwiMirror, iwSrc, iwDst, ippMode);
731
}
732
catch(const ::ipp::IwException &)
733
{
734
return false;
735
}
736
737
return true;
738
#else
739
CV_UNUSED(src); CV_UNUSED(dst); CV_UNUSED(flip_mode);
740
return false;
741
#endif
742
}
743
#endif
744
745
746
void flip( InputArray _src, OutputArray _dst, int flip_mode )
747
{
748
CV_INSTRUMENT_REGION();
749
750
CV_Assert( _src.dims() <= 2 );
751
Size size = _src.size();
752
753
if (flip_mode < 0)
754
{
755
if (size.width == 1)
756
flip_mode = 0;
757
if (size.height == 1)
758
flip_mode = 1;
759
}
760
761
if ((size.width == 1 && flip_mode > 0) ||
762
(size.height == 1 && flip_mode == 0) ||
763
(size.height == 1 && size.width == 1 && flip_mode < 0))
764
{
765
return _src.copyTo(_dst);
766
}
767
768
CV_OCL_RUN( _dst.isUMat(), ocl_flip(_src, _dst, flip_mode))
769
770
Mat src = _src.getMat();
771
int type = src.type();
772
_dst.create( size, type );
773
Mat dst = _dst.getMat();
774
775
CV_IPP_RUN_FAST(ipp_flip(src, dst, flip_mode));
776
777
size_t esz = CV_ELEM_SIZE(type);
778
779
if( flip_mode <= 0 )
780
flipVert( src.ptr(), src.step, dst.ptr(), dst.step, src.size(), esz );
781
else
782
flipHoriz( src.ptr(), src.step, dst.ptr(), dst.step, src.size(), esz );
783
784
if( flip_mode < 0 )
785
flipHoriz( dst.ptr(), dst.step, dst.ptr(), dst.step, dst.size(), esz );
786
}
787
788
#ifdef HAVE_OPENCL
789
790
static bool ocl_rotate(InputArray _src, OutputArray _dst, int rotateMode)
791
{
792
switch (rotateMode)
793
{
794
case ROTATE_90_CLOCKWISE:
795
transpose(_src, _dst);
796
flip(_dst, _dst, 1);
797
break;
798
case ROTATE_180:
799
flip(_src, _dst, -1);
800
break;
801
case ROTATE_90_COUNTERCLOCKWISE:
802
transpose(_src, _dst);
803
flip(_dst, _dst, 0);
804
break;
805
default:
806
break;
807
}
808
return true;
809
}
810
#endif
811
812
void rotate(InputArray _src, OutputArray _dst, int rotateMode)
813
{
814
CV_Assert(_src.dims() <= 2);
815
816
CV_OCL_RUN(_dst.isUMat(), ocl_rotate(_src, _dst, rotateMode))
817
818
switch (rotateMode)
819
{
820
case ROTATE_90_CLOCKWISE:
821
transpose(_src, _dst);
822
flip(_dst, _dst, 1);
823
break;
824
case ROTATE_180:
825
flip(_src, _dst, -1);
826
break;
827
case ROTATE_90_COUNTERCLOCKWISE:
828
transpose(_src, _dst);
829
flip(_dst, _dst, 0);
830
break;
831
default:
832
break;
833
}
834
}
835
836
#if defined HAVE_OPENCL && !defined __APPLE__
837
838
static bool ocl_repeat(InputArray _src, int ny, int nx, OutputArray _dst)
839
{
840
if (ny == 1 && nx == 1)
841
{
842
_src.copyTo(_dst);
843
return true;
844
}
845
846
int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type),
847
rowsPerWI = ocl::Device::getDefault().isIntel() ? 4 : 1,
848
kercn = ocl::predictOptimalVectorWidth(_src, _dst);
849
850
ocl::Kernel k("repeat", ocl::core::repeat_oclsrc,
851
format("-D T=%s -D nx=%d -D ny=%d -D rowsPerWI=%d -D cn=%d",
852
ocl::memopTypeToStr(CV_MAKE_TYPE(depth, kercn)),
853
nx, ny, rowsPerWI, kercn));
854
if (k.empty())
855
return false;
856
857
UMat src = _src.getUMat(), dst = _dst.getUMat();
858
k.args(ocl::KernelArg::ReadOnly(src, cn, kercn), ocl::KernelArg::WriteOnlyNoSize(dst));
859
860
size_t globalsize[] = { (size_t)src.cols * cn / kercn, ((size_t)src.rows + rowsPerWI - 1) / rowsPerWI };
861
return k.run(2, globalsize, NULL, false);
862
}
863
864
#endif
865
866
void repeat(InputArray _src, int ny, int nx, OutputArray _dst)
867
{
868
CV_INSTRUMENT_REGION();
869
870
CV_Assert(_src.getObj() != _dst.getObj());
871
CV_Assert( _src.dims() <= 2 );
872
CV_Assert( ny > 0 && nx > 0 );
873
874
Size ssize = _src.size();
875
_dst.create(ssize.height*ny, ssize.width*nx, _src.type());
876
877
#if !defined __APPLE__
878
CV_OCL_RUN(_dst.isUMat(),
879
ocl_repeat(_src, ny, nx, _dst))
880
#endif
881
882
Mat src = _src.getMat(), dst = _dst.getMat();
883
Size dsize = dst.size();
884
int esz = (int)src.elemSize();
885
int x, y;
886
ssize.width *= esz; dsize.width *= esz;
887
888
for( y = 0; y < ssize.height; y++ )
889
{
890
for( x = 0; x < dsize.width; x += ssize.width )
891
memcpy( dst.ptr(y) + x, src.ptr(y), ssize.width );
892
}
893
894
for( ; y < dsize.height; y++ )
895
memcpy( dst.ptr(y), dst.ptr(y - ssize.height), dsize.width );
896
}
897
898
Mat repeat(const Mat& src, int ny, int nx)
899
{
900
if( nx == 1 && ny == 1 )
901
return src;
902
Mat dst;
903
repeat(src, ny, nx, dst);
904
return dst;
905
}
906
907
908
} // cv
909
910
911
/*
912
Various border types, image boundaries are denoted with '|'
913
914
* BORDER_REPLICATE: aaaaaa|abcdefgh|hhhhhhh
915
* BORDER_REFLECT: fedcba|abcdefgh|hgfedcb
916
* BORDER_REFLECT_101: gfedcb|abcdefgh|gfedcba
917
* BORDER_WRAP: cdefgh|abcdefgh|abcdefg
918
* BORDER_CONSTANT: iiiiii|abcdefgh|iiiiiii with some specified 'i'
919
*/
920
int cv::borderInterpolate( int p, int len, int borderType )
921
{
922
CV_TRACE_FUNCTION_VERBOSE();
923
924
if( (unsigned)p < (unsigned)len )
925
;
926
else if( borderType == BORDER_REPLICATE )
927
p = p < 0 ? 0 : len - 1;
928
else if( borderType == BORDER_REFLECT || borderType == BORDER_REFLECT_101 )
929
{
930
int delta = borderType == BORDER_REFLECT_101;
931
if( len == 1 )
932
return 0;
933
do
934
{
935
if( p < 0 )
936
p = -p - 1 + delta;
937
else
938
p = len - 1 - (p - len) - delta;
939
}
940
while( (unsigned)p >= (unsigned)len );
941
}
942
else if( borderType == BORDER_WRAP )
943
{
944
CV_Assert(len > 0);
945
if( p < 0 )
946
p -= ((p-len+1)/len)*len;
947
if( p >= len )
948
p %= len;
949
}
950
else if( borderType == BORDER_CONSTANT )
951
p = -1;
952
else
953
CV_Error( CV_StsBadArg, "Unknown/unsupported border type" );
954
return p;
955
}
956
957
namespace
958
{
959
960
void copyMakeBorder_8u( const uchar* src, size_t srcstep, cv::Size srcroi,
961
uchar* dst, size_t dststep, cv::Size dstroi,
962
int top, int left, int cn, int borderType )
963
{
964
const int isz = (int)sizeof(int);
965
int i, j, k, elemSize = 1;
966
bool intMode = false;
967
968
if( (cn | srcstep | dststep | (size_t)src | (size_t)dst) % isz == 0 )
969
{
970
cn /= isz;
971
elemSize = isz;
972
intMode = true;
973
}
974
975
cv::AutoBuffer<int> _tab((dstroi.width - srcroi.width)*cn);
976
int* tab = _tab.data();
977
int right = dstroi.width - srcroi.width - left;
978
int bottom = dstroi.height - srcroi.height - top;
979
980
for( i = 0; i < left; i++ )
981
{
982
j = cv::borderInterpolate(i - left, srcroi.width, borderType)*cn;
983
for( k = 0; k < cn; k++ )
984
tab[i*cn + k] = j + k;
985
}
986
987
for( i = 0; i < right; i++ )
988
{
989
j = cv::borderInterpolate(srcroi.width + i, srcroi.width, borderType)*cn;
990
for( k = 0; k < cn; k++ )
991
tab[(i+left)*cn + k] = j + k;
992
}
993
994
srcroi.width *= cn;
995
dstroi.width *= cn;
996
left *= cn;
997
right *= cn;
998
999
uchar* dstInner = dst + dststep*top + left*elemSize;
1000
1001
for( i = 0; i < srcroi.height; i++, dstInner += dststep, src += srcstep )
1002
{
1003
if( dstInner != src )
1004
memcpy(dstInner, src, srcroi.width*elemSize);
1005
1006
if( intMode )
1007
{
1008
const int* isrc = (int*)src;
1009
int* idstInner = (int*)dstInner;
1010
for( j = 0; j < left; j++ )
1011
idstInner[j - left] = isrc[tab[j]];
1012
for( j = 0; j < right; j++ )
1013
idstInner[j + srcroi.width] = isrc[tab[j + left]];
1014
}
1015
else
1016
{
1017
for( j = 0; j < left; j++ )
1018
dstInner[j - left] = src[tab[j]];
1019
for( j = 0; j < right; j++ )
1020
dstInner[j + srcroi.width] = src[tab[j + left]];
1021
}
1022
}
1023
1024
dstroi.width *= elemSize;
1025
dst += dststep*top;
1026
1027
for( i = 0; i < top; i++ )
1028
{
1029
j = cv::borderInterpolate(i - top, srcroi.height, borderType);
1030
memcpy(dst + (i - top)*dststep, dst + j*dststep, dstroi.width);
1031
}
1032
1033
for( i = 0; i < bottom; i++ )
1034
{
1035
j = cv::borderInterpolate(i + srcroi.height, srcroi.height, borderType);
1036
memcpy(dst + (i + srcroi.height)*dststep, dst + j*dststep, dstroi.width);
1037
}
1038
}
1039
1040
1041
void copyMakeConstBorder_8u( const uchar* src, size_t srcstep, cv::Size srcroi,
1042
uchar* dst, size_t dststep, cv::Size dstroi,
1043
int top, int left, int cn, const uchar* value )
1044
{
1045
int i, j;
1046
cv::AutoBuffer<uchar> _constBuf(dstroi.width*cn);
1047
uchar* constBuf = _constBuf.data();
1048
int right = dstroi.width - srcroi.width - left;
1049
int bottom = dstroi.height - srcroi.height - top;
1050
1051
for( i = 0; i < dstroi.width; i++ )
1052
{
1053
for( j = 0; j < cn; j++ )
1054
constBuf[i*cn + j] = value[j];
1055
}
1056
1057
srcroi.width *= cn;
1058
dstroi.width *= cn;
1059
left *= cn;
1060
right *= cn;
1061
1062
uchar* dstInner = dst + dststep*top + left;
1063
1064
for( i = 0; i < srcroi.height; i++, dstInner += dststep, src += srcstep )
1065
{
1066
if( dstInner != src )
1067
memcpy( dstInner, src, srcroi.width );
1068
memcpy( dstInner - left, constBuf, left );
1069
memcpy( dstInner + srcroi.width, constBuf, right );
1070
}
1071
1072
dst += dststep*top;
1073
1074
for( i = 0; i < top; i++ )
1075
memcpy(dst + (i - top)*dststep, constBuf, dstroi.width);
1076
1077
for( i = 0; i < bottom; i++ )
1078
memcpy(dst + (i + srcroi.height)*dststep, constBuf, dstroi.width);
1079
}
1080
1081
}
1082
1083
#ifdef HAVE_OPENCL
1084
1085
namespace cv {
1086
1087
static bool ocl_copyMakeBorder( InputArray _src, OutputArray _dst, int top, int bottom,
1088
int left, int right, int borderType, const Scalar& value )
1089
{
1090
int type = _src.type(), cn = CV_MAT_CN(type), depth = CV_MAT_DEPTH(type),
1091
rowsPerWI = ocl::Device::getDefault().isIntel() ? 4 : 1;
1092
bool isolated = (borderType & BORDER_ISOLATED) != 0;
1093
borderType &= ~cv::BORDER_ISOLATED;
1094
1095
if ( !(borderType == BORDER_CONSTANT || borderType == BORDER_REPLICATE || borderType == BORDER_REFLECT ||
1096
borderType == BORDER_WRAP || borderType == BORDER_REFLECT_101) ||
1097
cn > 4)
1098
return false;
1099
1100
const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", "BORDER_WRAP", "BORDER_REFLECT_101" };
1101
int scalarcn = cn == 3 ? 4 : cn;
1102
int sctype = CV_MAKETYPE(depth, scalarcn);
1103
String buildOptions = format("-D T=%s -D %s -D T1=%s -D cn=%d -D ST=%s -D rowsPerWI=%d",
1104
ocl::memopTypeToStr(type), borderMap[borderType],
1105
ocl::memopTypeToStr(depth), cn,
1106
ocl::memopTypeToStr(sctype), rowsPerWI);
1107
1108
ocl::Kernel k("copyMakeBorder", ocl::core::copymakeborder_oclsrc, buildOptions);
1109
if (k.empty())
1110
return false;
1111
1112
UMat src = _src.getUMat();
1113
if( src.isSubmatrix() && !isolated )
1114
{
1115
Size wholeSize;
1116
Point ofs;
1117
src.locateROI(wholeSize, ofs);
1118
int dtop = std::min(ofs.y, top);
1119
int dbottom = std::min(wholeSize.height - src.rows - ofs.y, bottom);
1120
int dleft = std::min(ofs.x, left);
1121
int dright = std::min(wholeSize.width - src.cols - ofs.x, right);
1122
src.adjustROI(dtop, dbottom, dleft, dright);
1123
top -= dtop;
1124
left -= dleft;
1125
bottom -= dbottom;
1126
right -= dright;
1127
}
1128
1129
_dst.create(src.rows + top + bottom, src.cols + left + right, type);
1130
UMat dst = _dst.getUMat();
1131
1132
if (top == 0 && left == 0 && bottom == 0 && right == 0)
1133
{
1134
if(src.u != dst.u || src.step != dst.step)
1135
src.copyTo(dst);
1136
return true;
1137
}
1138
1139
k.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnly(dst),
1140
top, left, ocl::KernelArg::Constant(Mat(1, 1, sctype, value)));
1141
1142
size_t globalsize[2] = { (size_t)dst.cols, ((size_t)dst.rows + rowsPerWI - 1) / rowsPerWI };
1143
return k.run(2, globalsize, NULL, false);
1144
}
1145
1146
}
1147
#endif
1148
1149
#ifdef HAVE_IPP
1150
namespace cv {
1151
1152
static bool ipp_copyMakeBorder( Mat &_src, Mat &_dst, int top, int bottom,
1153
int left, int right, int _borderType, const Scalar& value )
1154
{
1155
#if defined HAVE_IPP_IW_LL && !IPP_DISABLE_PERF_COPYMAKE
1156
CV_INSTRUMENT_REGION_IPP();
1157
1158
::ipp::IwiBorderSize borderSize(left, top, right, bottom);
1159
::ipp::IwiSize size(_src.cols, _src.rows);
1160
IppDataType dataType = ippiGetDataType(_src.depth());
1161
IppiBorderType borderType = ippiGetBorderType(_borderType);
1162
if((int)borderType == -1)
1163
return false;
1164
1165
if(_src.dims > 2)
1166
return false;
1167
1168
Rect dstRect(borderSize.left, borderSize.top,
1169
_dst.cols - borderSize.right - borderSize.left,
1170
_dst.rows - borderSize.bottom - borderSize.top);
1171
Mat subDst = Mat(_dst, dstRect);
1172
Mat *pSrc = &_src;
1173
1174
return CV_INSTRUMENT_FUN_IPP(llwiCopyMakeBorder, pSrc->ptr(), pSrc->step, subDst.ptr(), subDst.step, size, dataType, _src.channels(), borderSize, borderType, &value[0]) >= 0;
1175
#else
1176
CV_UNUSED(_src); CV_UNUSED(_dst); CV_UNUSED(top); CV_UNUSED(bottom); CV_UNUSED(left); CV_UNUSED(right);
1177
CV_UNUSED(_borderType); CV_UNUSED(value);
1178
return false;
1179
#endif
1180
}
1181
}
1182
#endif
1183
1184
void cv::copyMakeBorder( InputArray _src, OutputArray _dst, int top, int bottom,
1185
int left, int right, int borderType, const Scalar& value )
1186
{
1187
CV_INSTRUMENT_REGION();
1188
1189
CV_Assert( top >= 0 && bottom >= 0 && left >= 0 && right >= 0 );
1190
1191
CV_OCL_RUN(_dst.isUMat() && _src.dims() <= 2,
1192
ocl_copyMakeBorder(_src, _dst, top, bottom, left, right, borderType, value))
1193
1194
Mat src = _src.getMat();
1195
int type = src.type();
1196
1197
if( src.isSubmatrix() && (borderType & BORDER_ISOLATED) == 0 )
1198
{
1199
Size wholeSize;
1200
Point ofs;
1201
src.locateROI(wholeSize, ofs);
1202
int dtop = std::min(ofs.y, top);
1203
int dbottom = std::min(wholeSize.height - src.rows - ofs.y, bottom);
1204
int dleft = std::min(ofs.x, left);
1205
int dright = std::min(wholeSize.width - src.cols - ofs.x, right);
1206
src.adjustROI(dtop, dbottom, dleft, dright);
1207
top -= dtop;
1208
left -= dleft;
1209
bottom -= dbottom;
1210
right -= dright;
1211
}
1212
1213
_dst.create( src.rows + top + bottom, src.cols + left + right, type );
1214
Mat dst = _dst.getMat();
1215
1216
if(top == 0 && left == 0 && bottom == 0 && right == 0)
1217
{
1218
if(src.data != dst.data || src.step != dst.step)
1219
src.copyTo(dst);
1220
return;
1221
}
1222
1223
borderType &= ~BORDER_ISOLATED;
1224
1225
CV_IPP_RUN_FAST(ipp_copyMakeBorder(src, dst, top, bottom, left, right, borderType, value))
1226
1227
if( borderType != BORDER_CONSTANT )
1228
copyMakeBorder_8u( src.ptr(), src.step, src.size(),
1229
dst.ptr(), dst.step, dst.size(),
1230
top, left, (int)src.elemSize(), borderType );
1231
else
1232
{
1233
int cn = src.channels(), cn1 = cn;
1234
AutoBuffer<double> buf(cn);
1235
if( cn > 4 )
1236
{
1237
CV_Assert( value[0] == value[1] && value[0] == value[2] && value[0] == value[3] );
1238
cn1 = 1;
1239
}
1240
scalarToRawData(value, buf.data(), CV_MAKETYPE(src.depth(), cn1), cn);
1241
copyMakeConstBorder_8u( src.ptr(), src.step, src.size(),
1242
dst.ptr(), dst.step, dst.size(),
1243
top, left, (int)src.elemSize(), (uchar*)buf.data() );
1244
}
1245
}
1246
1247
/* dst = src */
1248
CV_IMPL void
1249
cvCopy( const void* srcarr, void* dstarr, const void* maskarr )
1250
{
1251
if( CV_IS_SPARSE_MAT(srcarr) && CV_IS_SPARSE_MAT(dstarr))
1252
{
1253
CV_Assert( maskarr == 0 );
1254
CvSparseMat* src1 = (CvSparseMat*)srcarr;
1255
CvSparseMat* dst1 = (CvSparseMat*)dstarr;
1256
CvSparseMatIterator iterator;
1257
CvSparseNode* node;
1258
1259
dst1->dims = src1->dims;
1260
memcpy( dst1->size, src1->size, src1->dims*sizeof(src1->size[0]));
1261
dst1->valoffset = src1->valoffset;
1262
dst1->idxoffset = src1->idxoffset;
1263
cvClearSet( dst1->heap );
1264
1265
if( src1->heap->active_count >= dst1->hashsize*CV_SPARSE_HASH_RATIO )
1266
{
1267
cvFree( &dst1->hashtable );
1268
dst1->hashsize = src1->hashsize;
1269
dst1->hashtable =
1270
(void**)cvAlloc( dst1->hashsize*sizeof(dst1->hashtable[0]));
1271
}
1272
1273
memset( dst1->hashtable, 0, dst1->hashsize*sizeof(dst1->hashtable[0]));
1274
1275
for( node = cvInitSparseMatIterator( src1, &iterator );
1276
node != 0; node = cvGetNextSparseNode( &iterator ))
1277
{
1278
CvSparseNode* node_copy = (CvSparseNode*)cvSetNew( dst1->heap );
1279
int tabidx = node->hashval & (dst1->hashsize - 1);
1280
memcpy( node_copy, node, dst1->heap->elem_size );
1281
node_copy->next = (CvSparseNode*)dst1->hashtable[tabidx];
1282
dst1->hashtable[tabidx] = node_copy;
1283
}
1284
return;
1285
}
1286
cv::Mat src = cv::cvarrToMat(srcarr, false, true, 1), dst = cv::cvarrToMat(dstarr, false, true, 1);
1287
CV_Assert( src.depth() == dst.depth() && src.size == dst.size );
1288
1289
int coi1 = 0, coi2 = 0;
1290
if( CV_IS_IMAGE(srcarr) )
1291
coi1 = cvGetImageCOI((const IplImage*)srcarr);
1292
if( CV_IS_IMAGE(dstarr) )
1293
coi2 = cvGetImageCOI((const IplImage*)dstarr);
1294
1295
if( coi1 || coi2 )
1296
{
1297
CV_Assert( (coi1 != 0 || src.channels() == 1) &&
1298
(coi2 != 0 || dst.channels() == 1) );
1299
1300
int pair[] = { std::max(coi1-1, 0), std::max(coi2-1, 0) };
1301
cv::mixChannels( &src, 1, &dst, 1, pair, 1 );
1302
return;
1303
}
1304
else
1305
CV_Assert( src.channels() == dst.channels() );
1306
1307
if( !maskarr )
1308
src.copyTo(dst);
1309
else
1310
src.copyTo(dst, cv::cvarrToMat(maskarr));
1311
}
1312
1313
CV_IMPL void
1314
cvSet( void* arr, CvScalar value, const void* maskarr )
1315
{
1316
cv::Mat m = cv::cvarrToMat(arr);
1317
if( !maskarr )
1318
m = value;
1319
else
1320
m.setTo(cv::Scalar(value), cv::cvarrToMat(maskarr));
1321
}
1322
1323
CV_IMPL void
1324
cvSetZero( CvArr* arr )
1325
{
1326
if( CV_IS_SPARSE_MAT(arr) )
1327
{
1328
CvSparseMat* mat1 = (CvSparseMat*)arr;
1329
cvClearSet( mat1->heap );
1330
if( mat1->hashtable )
1331
memset( mat1->hashtable, 0, mat1->hashsize*sizeof(mat1->hashtable[0]));
1332
return;
1333
}
1334
cv::Mat m = cv::cvarrToMat(arr);
1335
m = cv::Scalar(0);
1336
}
1337
1338
CV_IMPL void
1339
cvFlip( const CvArr* srcarr, CvArr* dstarr, int flip_mode )
1340
{
1341
cv::Mat src = cv::cvarrToMat(srcarr);
1342
cv::Mat dst;
1343
1344
if (!dstarr)
1345
dst = src;
1346
else
1347
dst = cv::cvarrToMat(dstarr);
1348
1349
CV_Assert( src.type() == dst.type() && src.size() == dst.size() );
1350
cv::flip( src, dst, flip_mode );
1351
}
1352
1353
CV_IMPL void
1354
cvRepeat( const CvArr* srcarr, CvArr* dstarr )
1355
{
1356
cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
1357
CV_Assert( src.type() == dst.type() &&
1358
dst.rows % src.rows == 0 && dst.cols % src.cols == 0 );
1359
cv::repeat(src, dst.rows/src.rows, dst.cols/src.cols, dst);
1360
}
1361
1362
/* End of file. */
1363
1364