Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Tetragramm
GitHub Repository: Tetragramm/opencv
Path: blob/master/modules/core/src/matrix.cpp
16337 views
1
// This file is part of OpenCV project.
2
// It is subject to the license terms in the LICENSE file found in the top-level directory
3
// of this distribution and at http://opencv.org/license.html
4
5
#include "precomp.hpp"
6
#include "bufferpool.impl.hpp"
7
8
namespace cv {
9
10
void MatAllocator::map(UMatData*, AccessFlag) const
11
{
12
}
13
14
void MatAllocator::unmap(UMatData* u) const
15
{
16
if(u->urefcount == 0 && u->refcount == 0)
17
{
18
deallocate(u);
19
}
20
}
21
22
void MatAllocator::download(UMatData* u, void* dstptr,
23
int dims, const size_t sz[],
24
const size_t srcofs[], const size_t srcstep[],
25
const size_t dststep[]) const
26
{
27
if(!u)
28
return;
29
int isz[CV_MAX_DIM];
30
uchar* srcptr = u->data;
31
for( int i = 0; i < dims; i++ )
32
{
33
CV_Assert( sz[i] <= (size_t)INT_MAX );
34
if( sz[i] == 0 )
35
return;
36
if( srcofs )
37
srcptr += srcofs[i]*(i <= dims-2 ? srcstep[i] : 1);
38
isz[i] = (int)sz[i];
39
}
40
41
Mat src(dims, isz, CV_8U, srcptr, srcstep);
42
Mat dst(dims, isz, CV_8U, dstptr, dststep);
43
44
const Mat* arrays[] = { &src, &dst };
45
uchar* ptrs[2];
46
NAryMatIterator it(arrays, ptrs, 2);
47
size_t planesz = it.size;
48
49
for( size_t j = 0; j < it.nplanes; j++, ++it )
50
memcpy(ptrs[1], ptrs[0], planesz);
51
}
52
53
54
void MatAllocator::upload(UMatData* u, const void* srcptr, int dims, const size_t sz[],
55
const size_t dstofs[], const size_t dststep[],
56
const size_t srcstep[]) const
57
{
58
if(!u)
59
return;
60
int isz[CV_MAX_DIM];
61
uchar* dstptr = u->data;
62
for( int i = 0; i < dims; i++ )
63
{
64
CV_Assert( sz[i] <= (size_t)INT_MAX );
65
if( sz[i] == 0 )
66
return;
67
if( dstofs )
68
dstptr += dstofs[i]*(i <= dims-2 ? dststep[i] : 1);
69
isz[i] = (int)sz[i];
70
}
71
72
Mat src(dims, isz, CV_8U, (void*)srcptr, srcstep);
73
Mat dst(dims, isz, CV_8U, dstptr, dststep);
74
75
const Mat* arrays[] = { &src, &dst };
76
uchar* ptrs[2];
77
NAryMatIterator it(arrays, ptrs, 2);
78
size_t planesz = it.size;
79
80
for( size_t j = 0; j < it.nplanes; j++, ++it )
81
memcpy(ptrs[1], ptrs[0], planesz);
82
}
83
84
void MatAllocator::copy(UMatData* usrc, UMatData* udst, int dims, const size_t sz[],
85
const size_t srcofs[], const size_t srcstep[],
86
const size_t dstofs[], const size_t dststep[], bool /*sync*/) const
87
{
88
CV_INSTRUMENT_REGION();
89
90
if(!usrc || !udst)
91
return;
92
int isz[CV_MAX_DIM];
93
uchar* srcptr = usrc->data;
94
uchar* dstptr = udst->data;
95
for( int i = 0; i < dims; i++ )
96
{
97
CV_Assert( sz[i] <= (size_t)INT_MAX );
98
if( sz[i] == 0 )
99
return;
100
if( srcofs )
101
srcptr += srcofs[i]*(i <= dims-2 ? srcstep[i] : 1);
102
if( dstofs )
103
dstptr += dstofs[i]*(i <= dims-2 ? dststep[i] : 1);
104
isz[i] = (int)sz[i];
105
}
106
107
Mat src(dims, isz, CV_8U, srcptr, srcstep);
108
Mat dst(dims, isz, CV_8U, dstptr, dststep);
109
110
const Mat* arrays[] = { &src, &dst };
111
uchar* ptrs[2];
112
NAryMatIterator it(arrays, ptrs, 2);
113
size_t planesz = it.size;
114
115
for( size_t j = 0; j < it.nplanes; j++, ++it )
116
memcpy(ptrs[1], ptrs[0], planesz);
117
}
118
119
BufferPoolController* MatAllocator::getBufferPoolController(const char* id) const
120
{
121
CV_UNUSED(id);
122
static DummyBufferPoolController dummy;
123
return &dummy;
124
}
125
126
class StdMatAllocator CV_FINAL : public MatAllocator
127
{
128
public:
129
UMatData* allocate(int dims, const int* sizes, int type,
130
void* data0, size_t* step, AccessFlag /*flags*/, UMatUsageFlags /*usageFlags*/) const CV_OVERRIDE
131
{
132
size_t total = CV_ELEM_SIZE(type);
133
for( int i = dims-1; i >= 0; i-- )
134
{
135
if( step )
136
{
137
if( data0 && step[i] != CV_AUTOSTEP )
138
{
139
CV_Assert(total <= step[i]);
140
total = step[i];
141
}
142
else
143
step[i] = total;
144
}
145
total *= sizes[i];
146
}
147
uchar* data = data0 ? (uchar*)data0 : (uchar*)fastMalloc(total);
148
UMatData* u = new UMatData(this);
149
u->data = u->origdata = data;
150
u->size = total;
151
if(data0)
152
u->flags |= UMatData::USER_ALLOCATED;
153
154
return u;
155
}
156
157
bool allocate(UMatData* u, AccessFlag /*accessFlags*/, UMatUsageFlags /*usageFlags*/) const CV_OVERRIDE
158
{
159
if(!u) return false;
160
return true;
161
}
162
163
void deallocate(UMatData* u) const CV_OVERRIDE
164
{
165
if(!u)
166
return;
167
168
CV_Assert(u->urefcount == 0);
169
CV_Assert(u->refcount == 0);
170
if( !(u->flags & UMatData::USER_ALLOCATED) )
171
{
172
fastFree(u->origdata);
173
u->origdata = 0;
174
}
175
delete u;
176
}
177
};
178
179
namespace
180
{
181
MatAllocator* volatile g_matAllocator = NULL;
182
}
183
184
MatAllocator* Mat::getDefaultAllocator()
185
{
186
if (g_matAllocator == NULL)
187
{
188
cv::AutoLock lock(cv::getInitializationMutex());
189
if (g_matAllocator == NULL)
190
{
191
g_matAllocator = getStdAllocator();
192
}
193
}
194
return g_matAllocator;
195
}
196
void Mat::setDefaultAllocator(MatAllocator* allocator)
197
{
198
g_matAllocator = allocator;
199
}
200
MatAllocator* Mat::getStdAllocator()
201
{
202
CV_SINGLETON_LAZY_INIT(MatAllocator, new StdMatAllocator())
203
}
204
205
//==================================================================================================
206
207
void setSize( Mat& m, int _dims, const int* _sz, const size_t* _steps, bool autoSteps)
208
{
209
CV_Assert( 0 <= _dims && _dims <= CV_MAX_DIM );
210
if( m.dims != _dims )
211
{
212
if( m.step.p != m.step.buf )
213
{
214
fastFree(m.step.p);
215
m.step.p = m.step.buf;
216
m.size.p = &m.rows;
217
}
218
if( _dims > 2 )
219
{
220
m.step.p = (size_t*)fastMalloc(_dims*sizeof(m.step.p[0]) + (_dims+1)*sizeof(m.size.p[0]));
221
m.size.p = (int*)(m.step.p + _dims) + 1;
222
m.size.p[-1] = _dims;
223
m.rows = m.cols = -1;
224
}
225
}
226
227
m.dims = _dims;
228
if( !_sz )
229
return;
230
231
size_t esz = CV_ELEM_SIZE(m.flags), esz1 = CV_ELEM_SIZE1(m.flags), total = esz;
232
for( int i = _dims-1; i >= 0; i-- )
233
{
234
int s = _sz[i];
235
CV_Assert( s >= 0 );
236
m.size.p[i] = s;
237
238
if( _steps )
239
{
240
if (_steps[i] % esz1 != 0)
241
{
242
CV_Error(Error::BadStep, "Step must be a multiple of esz1");
243
}
244
245
m.step.p[i] = i < _dims-1 ? _steps[i] : esz;
246
}
247
else if( autoSteps )
248
{
249
m.step.p[i] = total;
250
int64 total1 = (int64)total*s;
251
if( (uint64)total1 != (size_t)total1 )
252
CV_Error( CV_StsOutOfRange, "The total matrix size does not fit to \"size_t\" type" );
253
total = (size_t)total1;
254
}
255
}
256
257
if( _dims == 1 )
258
{
259
m.dims = 2;
260
m.cols = 1;
261
m.step[1] = esz;
262
}
263
}
264
265
int updateContinuityFlag(int flags, int dims, const int* size, const size_t* step)
266
{
267
int i, j;
268
for( i = 0; i < dims; i++ )
269
{
270
if( size[i] > 1 )
271
break;
272
}
273
274
uint64 t = (uint64)size[std::min(i, dims-1)]*CV_MAT_CN(flags);
275
for( j = dims-1; j > i; j-- )
276
{
277
t *= size[j];
278
if( step[j]*size[j] < step[j-1] )
279
break;
280
}
281
282
if( j <= i && t == (uint64)(int)t )
283
return flags | Mat::CONTINUOUS_FLAG;
284
return flags & ~Mat::CONTINUOUS_FLAG;
285
}
286
287
void Mat::updateContinuityFlag()
288
{
289
flags = cv::updateContinuityFlag(flags, dims, size.p, step.p);
290
}
291
292
void finalizeHdr(Mat& m)
293
{
294
m.updateContinuityFlag();
295
int d = m.dims;
296
if( d > 2 )
297
m.rows = m.cols = -1;
298
if(m.u)
299
m.datastart = m.data = m.u->data;
300
if( m.data )
301
{
302
m.datalimit = m.datastart + m.size[0]*m.step[0];
303
if( m.size[0] > 0 )
304
{
305
m.dataend = m.ptr() + m.size[d-1]*m.step[d-1];
306
for( int i = 0; i < d-1; i++ )
307
m.dataend += (m.size[i] - 1)*m.step[i];
308
}
309
else
310
m.dataend = m.datalimit;
311
}
312
else
313
m.dataend = m.datalimit = 0;
314
}
315
316
//==================================================================================================
317
318
void Mat::create(int d, const int* _sizes, int _type)
319
{
320
int i;
321
CV_Assert(0 <= d && d <= CV_MAX_DIM && _sizes);
322
_type = CV_MAT_TYPE(_type);
323
324
if( data && (d == dims || (d == 1 && dims <= 2)) && _type == type() )
325
{
326
if( d == 2 && rows == _sizes[0] && cols == _sizes[1] )
327
return;
328
for( i = 0; i < d; i++ )
329
if( size[i] != _sizes[i] )
330
break;
331
if( i == d && (d > 1 || size[1] == 1))
332
return;
333
}
334
335
int _sizes_backup[CV_MAX_DIM]; // #5991
336
if (_sizes == (this->size.p))
337
{
338
for(i = 0; i < d; i++ )
339
_sizes_backup[i] = _sizes[i];
340
_sizes = _sizes_backup;
341
}
342
343
release();
344
if( d == 0 )
345
return;
346
flags = (_type & CV_MAT_TYPE_MASK) | MAGIC_VAL;
347
setSize(*this, d, _sizes, 0, true);
348
349
if( total() > 0 )
350
{
351
MatAllocator *a = allocator, *a0 = getDefaultAllocator();
352
#ifdef HAVE_TGPU
353
if( !a || a == tegra::getAllocator() )
354
a = tegra::getAllocator(d, _sizes, _type);
355
#endif
356
if(!a)
357
a = a0;
358
try
359
{
360
u = a->allocate(dims, size, _type, 0, step.p, ACCESS_RW /* ignored */, USAGE_DEFAULT);
361
CV_Assert(u != 0);
362
}
363
catch (...)
364
{
365
if (a == a0)
366
throw;
367
u = a0->allocate(dims, size, _type, 0, step.p, ACCESS_RW /* ignored */, USAGE_DEFAULT);
368
CV_Assert(u != 0);
369
}
370
CV_Assert( step[dims-1] == (size_t)CV_ELEM_SIZE(flags) );
371
}
372
373
addref();
374
finalizeHdr(*this);
375
}
376
377
void Mat::create(const std::vector<int>& _sizes, int _type)
378
{
379
create((int)_sizes.size(), _sizes.data(), _type);
380
}
381
382
void Mat::copySize(const Mat& m)
383
{
384
setSize(*this, m.dims, 0, 0);
385
for( int i = 0; i < dims; i++ )
386
{
387
size[i] = m.size[i];
388
step[i] = m.step[i];
389
}
390
}
391
392
void Mat::deallocate()
393
{
394
if(u)
395
{
396
UMatData* u_ = u;
397
u = NULL;
398
(u_->currAllocator ? u_->currAllocator : allocator ? allocator : getDefaultAllocator())->unmap(u_);
399
}
400
}
401
402
Mat::Mat(const Mat& m, const Range& _rowRange, const Range& _colRange)
403
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
404
datalimit(0), allocator(0), u(0), size(&rows)
405
{
406
CV_Assert( m.dims >= 2 );
407
if( m.dims > 2 )
408
{
409
AutoBuffer<Range> rs(m.dims);
410
rs[0] = _rowRange;
411
rs[1] = _colRange;
412
for( int i = 2; i < m.dims; i++ )
413
rs[i] = Range::all();
414
*this = m(rs.data());
415
return;
416
}
417
418
*this = m;
419
CV_TRY
420
{
421
if( _rowRange != Range::all() && _rowRange != Range(0,rows) )
422
{
423
CV_Assert( 0 <= _rowRange.start && _rowRange.start <= _rowRange.end
424
&& _rowRange.end <= m.rows );
425
rows = _rowRange.size();
426
data += step*_rowRange.start;
427
flags |= SUBMATRIX_FLAG;
428
}
429
430
if( _colRange != Range::all() && _colRange != Range(0,cols) )
431
{
432
CV_Assert( 0 <= _colRange.start && _colRange.start <= _colRange.end
433
&& _colRange.end <= m.cols );
434
cols = _colRange.size();
435
data += _colRange.start*elemSize();
436
flags |= SUBMATRIX_FLAG;
437
}
438
}
439
CV_CATCH_ALL
440
{
441
release();
442
CV_RETHROW();
443
}
444
445
updateContinuityFlag();
446
447
if( rows <= 0 || cols <= 0 )
448
{
449
release();
450
rows = cols = 0;
451
}
452
}
453
454
455
Mat::Mat(const Mat& m, const Rect& roi)
456
: flags(m.flags), dims(2), rows(roi.height), cols(roi.width),
457
data(m.data + roi.y*m.step[0]),
458
datastart(m.datastart), dataend(m.dataend), datalimit(m.datalimit),
459
allocator(m.allocator), u(m.u), size(&rows)
460
{
461
CV_Assert( m.dims <= 2 );
462
463
size_t esz = CV_ELEM_SIZE(flags);
464
data += roi.x*esz;
465
CV_Assert( 0 <= roi.x && 0 <= roi.width && roi.x + roi.width <= m.cols &&
466
0 <= roi.y && 0 <= roi.height && roi.y + roi.height <= m.rows );
467
if( u )
468
CV_XADD(&u->refcount, 1);
469
if( roi.width < m.cols || roi.height < m.rows )
470
flags |= SUBMATRIX_FLAG;
471
472
step[0] = m.step[0]; step[1] = esz;
473
updateContinuityFlag();
474
475
if( rows <= 0 || cols <= 0 )
476
{
477
release();
478
rows = cols = 0;
479
}
480
}
481
482
483
Mat::Mat(int _dims, const int* _sizes, int _type, void* _data, const size_t* _steps)
484
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
485
datalimit(0), allocator(0), u(0), size(&rows)
486
{
487
flags |= CV_MAT_TYPE(_type);
488
datastart = data = (uchar*)_data;
489
setSize(*this, _dims, _sizes, _steps, true);
490
finalizeHdr(*this);
491
}
492
493
494
Mat::Mat(const std::vector<int>& _sizes, int _type, void* _data, const size_t* _steps)
495
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
496
datalimit(0), allocator(0), u(0), size(&rows)
497
{
498
flags |= CV_MAT_TYPE(_type);
499
datastart = data = (uchar*)_data;
500
setSize(*this, (int)_sizes.size(), _sizes.data(), _steps, true);
501
finalizeHdr(*this);
502
}
503
504
505
Mat::Mat(const Mat& m, const Range* ranges)
506
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
507
datalimit(0), allocator(0), u(0), size(&rows)
508
{
509
int d = m.dims;
510
511
CV_Assert(ranges);
512
for( int i = 0; i < d; i++ )
513
{
514
Range r = ranges[i];
515
CV_Assert( r == Range::all() || (0 <= r.start && r.start < r.end && r.end <= m.size[i]) );
516
}
517
*this = m;
518
for( int i = 0; i < d; i++ )
519
{
520
Range r = ranges[i];
521
if( r != Range::all() && r != Range(0, size.p[i]))
522
{
523
size.p[i] = r.end - r.start;
524
data += r.start*step.p[i];
525
flags |= SUBMATRIX_FLAG;
526
}
527
}
528
updateContinuityFlag();
529
}
530
531
Mat::Mat(const Mat& m, const std::vector<Range>& ranges)
532
: flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
533
datalimit(0), allocator(0), u(0), size(&rows)
534
{
535
int d = m.dims;
536
537
CV_Assert((int)ranges.size() == d);
538
for (int i = 0; i < d; i++)
539
{
540
Range r = ranges[i];
541
CV_Assert(r == Range::all() || (0 <= r.start && r.start < r.end && r.end <= m.size[i]));
542
}
543
*this = m;
544
for (int i = 0; i < d; i++)
545
{
546
Range r = ranges[i];
547
if (r != Range::all() && r != Range(0, size.p[i]))
548
{
549
size.p[i] = r.end - r.start;
550
data += r.start*step.p[i];
551
flags |= SUBMATRIX_FLAG;
552
}
553
}
554
updateContinuityFlag();
555
}
556
557
558
Mat Mat::diag(int d) const
559
{
560
CV_Assert( dims <= 2 );
561
Mat m = *this;
562
size_t esz = elemSize();
563
int len;
564
565
if( d >= 0 )
566
{
567
len = std::min(cols - d, rows);
568
m.data += esz*d;
569
}
570
else
571
{
572
len = std::min(rows + d, cols);
573
m.data -= step[0]*d;
574
}
575
CV_DbgAssert( len > 0 );
576
577
m.size[0] = m.rows = len;
578
m.size[1] = m.cols = 1;
579
m.step[0] += (len > 1 ? esz : 0);
580
581
m.updateContinuityFlag();
582
583
if( size() != Size(1,1) )
584
m.flags |= SUBMATRIX_FLAG;
585
586
return m;
587
}
588
589
590
void Mat::pop_back(size_t nelems)
591
{
592
CV_Assert( nelems <= (size_t)size.p[0] );
593
594
if( isSubmatrix() )
595
*this = rowRange(0, size.p[0] - (int)nelems);
596
else
597
{
598
size.p[0] -= (int)nelems;
599
dataend -= nelems*step.p[0];
600
}
601
}
602
603
604
void Mat::push_back_(const void* elem)
605
{
606
size_t r = size.p[0];
607
if( isSubmatrix() || dataend + step.p[0] > datalimit )
608
reserve( std::max(r + 1, (r*3+1)/2) );
609
610
size_t esz = elemSize();
611
memcpy(data + r*step.p[0], elem, esz);
612
size.p[0] = int(r + 1);
613
dataend += step.p[0];
614
uint64 tsz = size.p[0];
615
for( int i = 1; i < dims; i++ )
616
tsz *= size.p[i];
617
if( esz < step.p[0] || tsz != (uint64)(int)tsz )
618
flags &= ~CONTINUOUS_FLAG;
619
}
620
621
622
void Mat::reserve(size_t nelems)
623
{
624
const size_t MIN_SIZE = 64;
625
626
CV_Assert( (int)nelems >= 0 );
627
if( !isSubmatrix() && data + step.p[0]*nelems <= datalimit )
628
return;
629
630
int r = size.p[0];
631
632
if( (size_t)r >= nelems )
633
return;
634
635
size.p[0] = std::max((int)nelems, 1);
636
size_t newsize = total()*elemSize();
637
638
if( newsize < MIN_SIZE )
639
size.p[0] = (int)((MIN_SIZE + newsize - 1)*nelems/newsize);
640
641
Mat m(dims, size.p, type());
642
size.p[0] = r;
643
if( r > 0 )
644
{
645
Mat mpart = m.rowRange(0, r);
646
copyTo(mpart);
647
}
648
649
*this = m;
650
size.p[0] = r;
651
dataend = data + step.p[0]*r;
652
}
653
654
655
void Mat::reserveBuffer(size_t nbytes)
656
{
657
size_t esz = 1;
658
int mtype = CV_8UC1;
659
if (!empty())
660
{
661
if (!isSubmatrix() && data + nbytes <= dataend)//Should it be datalimit?
662
return;
663
esz = elemSize();
664
mtype = type();
665
}
666
667
size_t nelems = (nbytes - 1) / esz + 1;
668
669
#if SIZE_MAX > UINT_MAX
670
CV_Assert(nelems <= size_t(INT_MAX)*size_t(INT_MAX));
671
int newrows = nelems > size_t(INT_MAX) ? nelems > 0x400*size_t(INT_MAX) ? nelems > 0x100000 * size_t(INT_MAX) ? nelems > 0x40000000 * size_t(INT_MAX) ?
672
size_t(INT_MAX) : 0x40000000 : 0x100000 : 0x400 : 1;
673
#else
674
int newrows = nelems > size_t(INT_MAX) ? 2 : 1;
675
#endif
676
int newcols = (int)((nelems - 1) / newrows + 1);
677
678
create(newrows, newcols, mtype);
679
}
680
681
682
void Mat::resize(size_t nelems)
683
{
684
int saveRows = size.p[0];
685
if( saveRows == (int)nelems )
686
return;
687
CV_Assert( (int)nelems >= 0 );
688
689
if( isSubmatrix() || data + step.p[0]*nelems > datalimit )
690
reserve(nelems);
691
692
size.p[0] = (int)nelems;
693
dataend += (size.p[0] - saveRows)*step.p[0];
694
695
//updateContinuityFlag(*this);
696
}
697
698
699
void Mat::resize(size_t nelems, const Scalar& s)
700
{
701
int saveRows = size.p[0];
702
resize(nelems);
703
704
if( size.p[0] > saveRows )
705
{
706
Mat part = rowRange(saveRows, size.p[0]);
707
part = s;
708
}
709
}
710
711
void Mat::push_back(const Mat& elems)
712
{
713
size_t r = size.p[0];
714
size_t delta = elems.size.p[0];
715
if( delta == 0 )
716
return;
717
if( this == &elems )
718
{
719
Mat tmp = elems;
720
push_back(tmp);
721
return;
722
}
723
if( !data )
724
{
725
*this = elems.clone();
726
return;
727
}
728
729
size.p[0] = elems.size.p[0];
730
bool eq = size == elems.size;
731
size.p[0] = int(r);
732
if( !eq )
733
CV_Error(CV_StsUnmatchedSizes, "Pushed vector length is not equal to matrix row length");
734
if( type() != elems.type() )
735
CV_Error(CV_StsUnmatchedFormats, "Pushed vector type is not the same as matrix type");
736
737
if( isSubmatrix() || dataend + step.p[0]*delta > datalimit )
738
reserve( std::max(r + delta, (r*3+1)/2) );
739
740
size.p[0] += int(delta);
741
dataend += step.p[0]*delta;
742
743
//updateContinuityFlag(*this);
744
745
if( isContinuous() && elems.isContinuous() )
746
memcpy(data + r*step.p[0], elems.data, elems.total()*elems.elemSize());
747
else
748
{
749
Mat part = rowRange(int(r), int(r + delta));
750
elems.copyTo(part);
751
}
752
}
753
754
755
void Mat::locateROI( Size& wholeSize, Point& ofs ) const
756
{
757
CV_Assert( dims <= 2 && step[0] > 0 );
758
size_t esz = elemSize(), minstep;
759
ptrdiff_t delta1 = data - datastart, delta2 = dataend - datastart;
760
761
if( delta1 == 0 )
762
ofs.x = ofs.y = 0;
763
else
764
{
765
ofs.y = (int)(delta1/step[0]);
766
ofs.x = (int)((delta1 - step[0]*ofs.y)/esz);
767
CV_DbgAssert( data == datastart + ofs.y*step[0] + ofs.x*esz );
768
}
769
minstep = (ofs.x + cols)*esz;
770
wholeSize.height = (int)((delta2 - minstep)/step[0] + 1);
771
wholeSize.height = std::max(wholeSize.height, ofs.y + rows);
772
wholeSize.width = (int)((delta2 - step*(wholeSize.height-1))/esz);
773
wholeSize.width = std::max(wholeSize.width, ofs.x + cols);
774
}
775
776
Mat& Mat::adjustROI( int dtop, int dbottom, int dleft, int dright )
777
{
778
CV_Assert( dims <= 2 && step[0] > 0 );
779
Size wholeSize; Point ofs;
780
size_t esz = elemSize();
781
locateROI( wholeSize, ofs );
782
int row1 = std::min(std::max(ofs.y - dtop, 0), wholeSize.height), row2 = std::max(0, std::min(ofs.y + rows + dbottom, wholeSize.height));
783
int col1 = std::min(std::max(ofs.x - dleft, 0), wholeSize.width), col2 = std::max(0, std::min(ofs.x + cols + dright, wholeSize.width));
784
if(row1 > row2)
785
std::swap(row1, row2);
786
if(col1 > col2)
787
std::swap(col1, col2);
788
789
data += (row1 - ofs.y)*step + (col1 - ofs.x)*esz;
790
rows = row2 - row1; cols = col2 - col1;
791
size.p[0] = rows; size.p[1] = cols;
792
updateContinuityFlag();
793
return *this;
794
}
795
796
Mat Mat::reshape(int new_cn, int new_rows) const
797
{
798
int cn = channels();
799
Mat hdr = *this;
800
801
if( dims > 2 )
802
{
803
if( new_rows == 0 && new_cn != 0 && size[dims-1]*cn % new_cn == 0 )
804
{
805
hdr.flags = (hdr.flags & ~CV_MAT_CN_MASK) | ((new_cn-1) << CV_CN_SHIFT);
806
hdr.step[dims-1] = CV_ELEM_SIZE(hdr.flags);
807
hdr.size[dims-1] = hdr.size[dims-1]*cn / new_cn;
808
return hdr;
809
}
810
if( new_rows > 0 )
811
{
812
int sz[] = { new_rows, (int)(total()/new_rows) };
813
return reshape(new_cn, 2, sz);
814
}
815
}
816
817
CV_Assert( dims <= 2 );
818
819
if( new_cn == 0 )
820
new_cn = cn;
821
822
int total_width = cols * cn;
823
824
if( (new_cn > total_width || total_width % new_cn != 0) && new_rows == 0 )
825
new_rows = rows * total_width / new_cn;
826
827
if( new_rows != 0 && new_rows != rows )
828
{
829
int total_size = total_width * rows;
830
if( !isContinuous() )
831
CV_Error( CV_BadStep,
832
"The matrix is not continuous, thus its number of rows can not be changed" );
833
834
if( (unsigned)new_rows > (unsigned)total_size )
835
CV_Error( CV_StsOutOfRange, "Bad new number of rows" );
836
837
total_width = total_size / new_rows;
838
839
if( total_width * new_rows != total_size )
840
CV_Error( CV_StsBadArg, "The total number of matrix elements "
841
"is not divisible by the new number of rows" );
842
843
hdr.rows = new_rows;
844
hdr.step[0] = total_width * elemSize1();
845
}
846
847
int new_width = total_width / new_cn;
848
849
if( new_width * new_cn != total_width )
850
CV_Error( CV_BadNumChannels,
851
"The total width is not divisible by the new number of channels" );
852
853
hdr.cols = new_width;
854
hdr.flags = (hdr.flags & ~CV_MAT_CN_MASK) | ((new_cn-1) << CV_CN_SHIFT);
855
hdr.step[1] = CV_ELEM_SIZE(hdr.flags);
856
return hdr;
857
}
858
859
Mat Mat::reshape(int _cn, int _newndims, const int* _newsz) const
860
{
861
if(_newndims == dims)
862
{
863
if(_newsz == 0)
864
return reshape(_cn);
865
if(_newndims == 2)
866
return reshape(_cn, _newsz[0]);
867
}
868
869
if (isContinuous())
870
{
871
CV_Assert(_cn >= 0 && _newndims > 0 && _newndims <= CV_MAX_DIM && _newsz);
872
873
if (_cn == 0)
874
_cn = this->channels();
875
else
876
CV_Assert(_cn <= CV_CN_MAX);
877
878
size_t total_elem1_ref = this->total() * this->channels();
879
size_t total_elem1 = _cn;
880
881
AutoBuffer<int, 4> newsz_buf( (size_t)_newndims );
882
883
for (int i = 0; i < _newndims; i++)
884
{
885
CV_Assert(_newsz[i] >= 0);
886
887
if (_newsz[i] > 0)
888
newsz_buf[i] = _newsz[i];
889
else if (i < dims)
890
newsz_buf[i] = this->size[i];
891
else
892
CV_Error(CV_StsOutOfRange, "Copy dimension (which has zero size) is not present in source matrix");
893
894
total_elem1 *= (size_t)newsz_buf[i];
895
}
896
897
if (total_elem1 != total_elem1_ref)
898
CV_Error(CV_StsUnmatchedSizes, "Requested and source matrices have different count of elements");
899
900
Mat hdr = *this;
901
hdr.flags = (hdr.flags & ~CV_MAT_CN_MASK) | ((_cn-1) << CV_CN_SHIFT);
902
setSize(hdr, _newndims, newsz_buf.data(), NULL, true);
903
904
return hdr;
905
}
906
907
CV_Error(CV_StsNotImplemented, "Reshaping of n-dimensional non-continuous matrices is not supported yet");
908
// TBD
909
}
910
911
Mat Mat::reshape(int _cn, const std::vector<int>& _newshape) const
912
{
913
if(_newshape.empty())
914
{
915
CV_Assert(empty());
916
return *this;
917
}
918
919
return reshape(_cn, (int)_newshape.size(), &_newshape[0]);
920
}
921
922
Mat Mat::diag(const Mat& d)
923
{
924
CV_Assert( d.cols == 1 || d.rows == 1 );
925
int len = d.rows + d.cols - 1;
926
Mat m(len, len, d.type(), Scalar(0));
927
Mat md = m.diag();
928
if( d.cols == 1 )
929
d.copyTo(md);
930
else
931
transpose(d, md);
932
return m;
933
}
934
935
int Mat::checkVector(int _elemChannels, int _depth, bool _requireContinuous) const
936
{
937
return data && (depth() == _depth || _depth <= 0) &&
938
(isContinuous() || !_requireContinuous) &&
939
((dims == 2 && (((rows == 1 || cols == 1) && channels() == _elemChannels) ||
940
(cols == _elemChannels && channels() == 1))) ||
941
(dims == 3 && channels() == 1 && size.p[2] == _elemChannels && (size.p[0] == 1 || size.p[1] == 1) &&
942
(isContinuous() || step.p[1] == step.p[2]*size.p[2])))
943
? (int)(total()*channels()/_elemChannels) : -1;
944
}
945
946
} // cv::
947
948