Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Tetragramm
GitHub Repository: Tetragramm/opencv
Path: blob/master/modules/core/src/lapack.cpp
16337 views
1
/*M///////////////////////////////////////////////////////////////////////////////////////
2
//
3
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4
//
5
// By downloading, copying, installing or using the software you agree to this license.
6
// If you do not agree to this license, do not download, install,
7
// copy or use the software.
8
//
9
//
10
// License Agreement
11
// For Open Source Computer Vision Library
12
//
13
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15
// Third party copyrights are property of their respective owners.
16
//
17
// Redistribution and use in source and binary forms, with or without modification,
18
// are permitted provided that the following conditions are met:
19
//
20
// * Redistribution's of source code must retain the above copyright notice,
21
// this list of conditions and the following disclaimer.
22
//
23
// * Redistribution's in binary form must reproduce the above copyright notice,
24
// this list of conditions and the following disclaimer in the documentation
25
// and/or other materials provided with the distribution.
26
//
27
// * The name of the copyright holders may not be used to endorse or promote products
28
// derived from this software without specific prior written permission.
29
//
30
// This software is provided by the copyright holders and contributors "as is" and
31
// any express or implied warranties, including, but not limited to, the implied
32
// warranties of merchantability and fitness for a particular purpose are disclaimed.
33
// In no event shall the Intel Corporation or contributors be liable for any direct,
34
// indirect, incidental, special, exemplary, or consequential damages
35
// (including, but not limited to, procurement of substitute goods or services;
36
// loss of use, data, or profits; or business interruption) however caused
37
// and on any theory of liability, whether in contract, strict liability,
38
// or tort (including negligence or otherwise) arising in any way out of
39
// the use of this software, even if advised of the possibility of such damage.
40
//
41
//M*/
42
43
#include "precomp.hpp"
44
#include <limits>
45
46
#ifdef HAVE_EIGEN
47
# if defined(_MSC_VER)
48
# pragma warning(push)
49
# pragma warning(disable:4701) // potentially uninitialized local variable
50
# pragma warning(disable:4702) // unreachable code
51
# pragma warning(disable:4714) // const marked as __forceinline not inlined
52
# endif
53
# include <Eigen/Core>
54
# include <Eigen/Eigenvalues>
55
# if defined(_MSC_VER)
56
# pragma warning(pop)
57
# endif
58
# include "opencv2/core/eigen.hpp"
59
#endif
60
61
#if defined _M_IX86 && defined _MSC_VER && _MSC_VER < 1700
62
#pragma float_control(precise, on)
63
#endif
64
65
namespace cv
66
{
67
68
int LU(float* A, size_t astep, int m, float* b, size_t bstep, int n)
69
{
70
CV_INSTRUMENT_REGION();
71
72
return hal::LU32f(A, astep, m, b, bstep, n);
73
}
74
75
int LU(double* A, size_t astep, int m, double* b, size_t bstep, int n)
76
{
77
CV_INSTRUMENT_REGION();
78
79
return hal::LU64f(A, astep, m, b, bstep, n);
80
}
81
82
bool Cholesky(float* A, size_t astep, int m, float* b, size_t bstep, int n)
83
{
84
CV_INSTRUMENT_REGION();
85
86
return hal::Cholesky32f(A, astep, m, b, bstep, n);
87
}
88
89
bool Cholesky(double* A, size_t astep, int m, double* b, size_t bstep, int n)
90
{
91
CV_INSTRUMENT_REGION();
92
93
return hal::Cholesky64f(A, astep, m, b, bstep, n);
94
}
95
96
template<typename _Tp> static inline _Tp hypot(_Tp a, _Tp b)
97
{
98
a = std::abs(a);
99
b = std::abs(b);
100
if( a > b )
101
{
102
b /= a;
103
return a*std::sqrt(1 + b*b);
104
}
105
if( b > 0 )
106
{
107
a /= b;
108
return b*std::sqrt(1 + a*a);
109
}
110
return 0;
111
}
112
113
114
template<typename _Tp> bool
115
JacobiImpl_( _Tp* A, size_t astep, _Tp* W, _Tp* V, size_t vstep, int n, uchar* buf )
116
{
117
const _Tp eps = std::numeric_limits<_Tp>::epsilon();
118
int i, j, k, m;
119
120
astep /= sizeof(A[0]);
121
if( V )
122
{
123
vstep /= sizeof(V[0]);
124
for( i = 0; i < n; i++ )
125
{
126
for( j = 0; j < n; j++ )
127
V[i*vstep + j] = (_Tp)0;
128
V[i*vstep + i] = (_Tp)1;
129
}
130
}
131
132
int iters, maxIters = n*n*30;
133
134
int* indR = (int*)alignPtr(buf, sizeof(int));
135
int* indC = indR + n;
136
_Tp mv = (_Tp)0;
137
138
for( k = 0; k < n; k++ )
139
{
140
W[k] = A[(astep + 1)*k];
141
if( k < n - 1 )
142
{
143
for( m = k+1, mv = std::abs(A[astep*k + m]), i = k+2; i < n; i++ )
144
{
145
_Tp val = std::abs(A[astep*k+i]);
146
if( mv < val )
147
mv = val, m = i;
148
}
149
indR[k] = m;
150
}
151
if( k > 0 )
152
{
153
for( m = 0, mv = std::abs(A[k]), i = 1; i < k; i++ )
154
{
155
_Tp val = std::abs(A[astep*i+k]);
156
if( mv < val )
157
mv = val, m = i;
158
}
159
indC[k] = m;
160
}
161
}
162
163
if( n > 1 ) for( iters = 0; iters < maxIters; iters++ )
164
{
165
// find index (k,l) of pivot p
166
for( k = 0, mv = std::abs(A[indR[0]]), i = 1; i < n-1; i++ )
167
{
168
_Tp val = std::abs(A[astep*i + indR[i]]);
169
if( mv < val )
170
mv = val, k = i;
171
}
172
int l = indR[k];
173
for( i = 1; i < n; i++ )
174
{
175
_Tp val = std::abs(A[astep*indC[i] + i]);
176
if( mv < val )
177
mv = val, k = indC[i], l = i;
178
}
179
180
_Tp p = A[astep*k + l];
181
if( std::abs(p) <= eps )
182
break;
183
_Tp y = (_Tp)((W[l] - W[k])*0.5);
184
_Tp t = std::abs(y) + hypot(p, y);
185
_Tp s = hypot(p, t);
186
_Tp c = t/s;
187
s = p/s; t = (p/t)*p;
188
if( y < 0 )
189
s = -s, t = -t;
190
A[astep*k + l] = 0;
191
192
W[k] -= t;
193
W[l] += t;
194
195
_Tp a0, b0;
196
197
#undef rotate
198
#define rotate(v0, v1) a0 = v0, b0 = v1, v0 = a0*c - b0*s, v1 = a0*s + b0*c
199
200
// rotate rows and columns k and l
201
for( i = 0; i < k; i++ )
202
rotate(A[astep*i+k], A[astep*i+l]);
203
for( i = k+1; i < l; i++ )
204
rotate(A[astep*k+i], A[astep*i+l]);
205
for( i = l+1; i < n; i++ )
206
rotate(A[astep*k+i], A[astep*l+i]);
207
208
// rotate eigenvectors
209
if( V )
210
for( i = 0; i < n; i++ )
211
rotate(V[vstep*k+i], V[vstep*l+i]);
212
213
#undef rotate
214
215
for( j = 0; j < 2; j++ )
216
{
217
int idx = j == 0 ? k : l;
218
if( idx < n - 1 )
219
{
220
for( m = idx+1, mv = std::abs(A[astep*idx + m]), i = idx+2; i < n; i++ )
221
{
222
_Tp val = std::abs(A[astep*idx+i]);
223
if( mv < val )
224
mv = val, m = i;
225
}
226
indR[idx] = m;
227
}
228
if( idx > 0 )
229
{
230
for( m = 0, mv = std::abs(A[idx]), i = 1; i < idx; i++ )
231
{
232
_Tp val = std::abs(A[astep*i+idx]);
233
if( mv < val )
234
mv = val, m = i;
235
}
236
indC[idx] = m;
237
}
238
}
239
}
240
241
// sort eigenvalues & eigenvectors
242
for( k = 0; k < n-1; k++ )
243
{
244
m = k;
245
for( i = k+1; i < n; i++ )
246
{
247
if( W[m] < W[i] )
248
m = i;
249
}
250
if( k != m )
251
{
252
std::swap(W[m], W[k]);
253
if( V )
254
for( i = 0; i < n; i++ )
255
std::swap(V[vstep*m + i], V[vstep*k + i]);
256
}
257
}
258
259
return true;
260
}
261
262
static bool Jacobi( float* S, size_t sstep, float* e, float* E, size_t estep, int n, uchar* buf )
263
{
264
return JacobiImpl_(S, sstep, e, E, estep, n, buf);
265
}
266
267
static bool Jacobi( double* S, size_t sstep, double* e, double* E, size_t estep, int n, uchar* buf )
268
{
269
return JacobiImpl_(S, sstep, e, E, estep, n, buf);
270
}
271
272
273
template<typename T> struct VBLAS
274
{
275
int dot(const T*, const T*, int, T*) const { return 0; }
276
int givens(T*, T*, int, T, T) const { return 0; }
277
int givensx(T*, T*, int, T, T, T*, T*) const { return 0; }
278
};
279
280
#if CV_SIMD
281
template<> inline int VBLAS<float>::dot(const float* a, const float* b, int n, float* result) const
282
{
283
if( n < 2*v_float32::nlanes )
284
return 0;
285
int k = 0;
286
v_float32 s0 = vx_setzero_f32();
287
for( ; k <= n - v_float32::nlanes; k += v_float32::nlanes )
288
{
289
v_float32 a0 = vx_load(a + k);
290
v_float32 b0 = vx_load(b + k);
291
292
s0 += a0 * b0;
293
}
294
*result = v_reduce_sum(s0);
295
vx_cleanup();
296
return k;
297
}
298
299
300
template<> inline int VBLAS<float>::givens(float* a, float* b, int n, float c, float s) const
301
{
302
if( n < v_float32::nlanes)
303
return 0;
304
int k = 0;
305
v_float32 c4 = vx_setall_f32(c), s4 = vx_setall_f32(s);
306
for( ; k <= n - v_float32::nlanes; k += v_float32::nlanes )
307
{
308
v_float32 a0 = vx_load(a + k);
309
v_float32 b0 = vx_load(b + k);
310
v_float32 t0 = (a0 * c4) + (b0 * s4);
311
v_float32 t1 = (b0 * c4) - (a0 * s4);
312
v_store(a + k, t0);
313
v_store(b + k, t1);
314
}
315
vx_cleanup();
316
return k;
317
}
318
319
320
template<> inline int VBLAS<float>::givensx(float* a, float* b, int n, float c, float s,
321
float* anorm, float* bnorm) const
322
{
323
if( n < v_float32::nlanes)
324
return 0;
325
int k = 0;
326
v_float32 c4 = vx_setall_f32(c), s4 = vx_setall_f32(s);
327
v_float32 sa = vx_setzero_f32(), sb = vx_setzero_f32();
328
for( ; k <= n - v_float32::nlanes; k += v_float32::nlanes )
329
{
330
v_float32 a0 = vx_load(a + k);
331
v_float32 b0 = vx_load(b + k);
332
v_float32 t0 = (a0 * c4) + (b0 * s4);
333
v_float32 t1 = (b0 * c4) - (a0 * s4);
334
v_store(a + k, t0);
335
v_store(b + k, t1);
336
sa += t0 + t0;
337
sb += t1 + t1;
338
}
339
*anorm = v_reduce_sum(sa);
340
*bnorm = v_reduce_sum(sb);
341
vx_cleanup();
342
return k;
343
}
344
345
#if CV_SIMD_64F
346
template<> inline int VBLAS<double>::dot(const double* a, const double* b, int n, double* result) const
347
{
348
if( n < 2*v_float64::nlanes )
349
return 0;
350
int k = 0;
351
v_float64 s0 = vx_setzero_f64();
352
for( ; k <= n - v_float64::nlanes; k += v_float64::nlanes )
353
{
354
v_float64 a0 = vx_load(a + k);
355
v_float64 b0 = vx_load(b + k);
356
357
s0 += a0 * b0;
358
}
359
double sbuf[2];
360
v_store(sbuf, s0);
361
*result = sbuf[0] + sbuf[1];
362
vx_cleanup();
363
return k;
364
}
365
366
367
template<> inline int VBLAS<double>::givens(double* a, double* b, int n, double c, double s) const
368
{
369
int k = 0;
370
v_float64 c2 = vx_setall_f64(c), s2 = vx_setall_f64(s);
371
for( ; k <= n - v_float64::nlanes; k += v_float64::nlanes )
372
{
373
v_float64 a0 = vx_load(a + k);
374
v_float64 b0 = vx_load(b + k);
375
v_float64 t0 = (a0 * c2) + (b0 * s2);
376
v_float64 t1 = (b0 * c2) - (a0 * s2);
377
v_store(a + k, t0);
378
v_store(b + k, t1);
379
}
380
vx_cleanup();
381
return k;
382
}
383
384
385
template<> inline int VBLAS<double>::givensx(double* a, double* b, int n, double c, double s,
386
double* anorm, double* bnorm) const
387
{
388
int k = 0;
389
v_float64 c2 = vx_setall_f64(c), s2 = vx_setall_f64(s);
390
v_float64 sa = vx_setzero_f64(), sb = vx_setzero_f64();
391
for( ; k <= n - v_float64::nlanes; k += v_float64::nlanes )
392
{
393
v_float64 a0 = vx_load(a + k);
394
v_float64 b0 = vx_load(b + k);
395
v_float64 t0 = (a0 * c2) + (b0 * s2);
396
v_float64 t1 = (b0 * c2) - (a0 * s2);
397
v_store(a + k, t0);
398
v_store(b + k, t1);
399
sa += t0 * t0;
400
sb += t1 * t1;
401
}
402
double abuf[2], bbuf[2];
403
v_store(abuf, sa);
404
v_store(bbuf, sb);
405
*anorm = abuf[0] + abuf[1];
406
*bnorm = bbuf[0] + bbuf[1];
407
return k;
408
}
409
#endif //CV_SIMD_64F
410
#endif //CV_SIMD
411
412
template<typename _Tp> void
413
JacobiSVDImpl_(_Tp* At, size_t astep, _Tp* _W, _Tp* Vt, size_t vstep,
414
int m, int n, int n1, double minval, _Tp eps)
415
{
416
VBLAS<_Tp> vblas;
417
AutoBuffer<double> Wbuf(n);
418
double* W = Wbuf.data();
419
int i, j, k, iter, max_iter = std::max(m, 30);
420
_Tp c, s;
421
double sd;
422
astep /= sizeof(At[0]);
423
vstep /= sizeof(Vt[0]);
424
425
for( i = 0; i < n; i++ )
426
{
427
for( k = 0, sd = 0; k < m; k++ )
428
{
429
_Tp t = At[i*astep + k];
430
sd += (double)t*t;
431
}
432
W[i] = sd;
433
434
if( Vt )
435
{
436
for( k = 0; k < n; k++ )
437
Vt[i*vstep + k] = 0;
438
Vt[i*vstep + i] = 1;
439
}
440
}
441
442
for( iter = 0; iter < max_iter; iter++ )
443
{
444
bool changed = false;
445
446
for( i = 0; i < n-1; i++ )
447
for( j = i+1; j < n; j++ )
448
{
449
_Tp *Ai = At + i*astep, *Aj = At + j*astep;
450
double a = W[i], p = 0, b = W[j];
451
452
for( k = 0; k < m; k++ )
453
p += (double)Ai[k]*Aj[k];
454
455
if( std::abs(p) <= eps*std::sqrt((double)a*b) )
456
continue;
457
458
p *= 2;
459
double beta = a - b, gamma = hypot((double)p, beta);
460
if( beta < 0 )
461
{
462
double delta = (gamma - beta)*0.5;
463
s = (_Tp)std::sqrt(delta/gamma);
464
c = (_Tp)(p/(gamma*s*2));
465
}
466
else
467
{
468
c = (_Tp)std::sqrt((gamma + beta)/(gamma*2));
469
s = (_Tp)(p/(gamma*c*2));
470
}
471
472
a = b = 0;
473
for( k = 0; k < m; k++ )
474
{
475
_Tp t0 = c*Ai[k] + s*Aj[k];
476
_Tp t1 = -s*Ai[k] + c*Aj[k];
477
Ai[k] = t0; Aj[k] = t1;
478
479
a += (double)t0*t0; b += (double)t1*t1;
480
}
481
W[i] = a; W[j] = b;
482
483
changed = true;
484
485
if( Vt )
486
{
487
_Tp *Vi = Vt + i*vstep, *Vj = Vt + j*vstep;
488
k = vblas.givens(Vi, Vj, n, c, s);
489
490
for( ; k < n; k++ )
491
{
492
_Tp t0 = c*Vi[k] + s*Vj[k];
493
_Tp t1 = -s*Vi[k] + c*Vj[k];
494
Vi[k] = t0; Vj[k] = t1;
495
}
496
}
497
}
498
if( !changed )
499
break;
500
}
501
502
for( i = 0; i < n; i++ )
503
{
504
for( k = 0, sd = 0; k < m; k++ )
505
{
506
_Tp t = At[i*astep + k];
507
sd += (double)t*t;
508
}
509
W[i] = std::sqrt(sd);
510
}
511
512
for( i = 0; i < n-1; i++ )
513
{
514
j = i;
515
for( k = i+1; k < n; k++ )
516
{
517
if( W[j] < W[k] )
518
j = k;
519
}
520
if( i != j )
521
{
522
std::swap(W[i], W[j]);
523
if( Vt )
524
{
525
for( k = 0; k < m; k++ )
526
std::swap(At[i*astep + k], At[j*astep + k]);
527
528
for( k = 0; k < n; k++ )
529
std::swap(Vt[i*vstep + k], Vt[j*vstep + k]);
530
}
531
}
532
}
533
534
for( i = 0; i < n; i++ )
535
_W[i] = (_Tp)W[i];
536
537
if( !Vt )
538
return;
539
540
RNG rng(0x12345678);
541
for( i = 0; i < n1; i++ )
542
{
543
sd = i < n ? W[i] : 0;
544
545
for( int ii = 0; ii < 100 && sd <= minval; ii++ )
546
{
547
// if we got a zero singular value, then in order to get the corresponding left singular vector
548
// we generate a random vector, project it to the previously computed left singular vectors,
549
// subtract the projection and normalize the difference.
550
const _Tp val0 = (_Tp)(1./m);
551
for( k = 0; k < m; k++ )
552
{
553
_Tp val = (rng.next() & 256) != 0 ? val0 : -val0;
554
At[i*astep + k] = val;
555
}
556
for( iter = 0; iter < 2; iter++ )
557
{
558
for( j = 0; j < i; j++ )
559
{
560
sd = 0;
561
for( k = 0; k < m; k++ )
562
sd += At[i*astep + k]*At[j*astep + k];
563
_Tp asum = 0;
564
for( k = 0; k < m; k++ )
565
{
566
_Tp t = (_Tp)(At[i*astep + k] - sd*At[j*astep + k]);
567
At[i*astep + k] = t;
568
asum += std::abs(t);
569
}
570
asum = asum > eps*100 ? 1/asum : 0;
571
for( k = 0; k < m; k++ )
572
At[i*astep + k] *= asum;
573
}
574
}
575
sd = 0;
576
for( k = 0; k < m; k++ )
577
{
578
_Tp t = At[i*astep + k];
579
sd += (double)t*t;
580
}
581
sd = std::sqrt(sd);
582
}
583
584
s = (_Tp)(sd > minval ? 1/sd : 0.);
585
for( k = 0; k < m; k++ )
586
At[i*astep + k] *= s;
587
}
588
}
589
590
591
static void JacobiSVD(float* At, size_t astep, float* W, float* Vt, size_t vstep, int m, int n, int n1=-1)
592
{
593
hal::SVD32f(At, astep, W, NULL, astep, Vt, vstep, m, n, n1);
594
}
595
596
static void JacobiSVD(double* At, size_t astep, double* W, double* Vt, size_t vstep, int m, int n, int n1=-1)
597
{
598
hal::SVD64f(At, astep, W, NULL, astep, Vt, vstep, m, n, n1);
599
}
600
601
template <typename fptype> static inline int
602
decodeSVDParameters(const fptype* U, const fptype* Vt, int m, int n, int n1)
603
{
604
int halSVDFlag = 0;
605
if(Vt == NULL)
606
halSVDFlag = CV_HAL_SVD_NO_UV;
607
else if(n1 <= 0 || n1 == n)
608
{
609
halSVDFlag = CV_HAL_SVD_SHORT_UV;
610
if(U == NULL)
611
halSVDFlag |= CV_HAL_SVD_MODIFY_A;
612
}
613
else if(n1 == m)
614
{
615
halSVDFlag = CV_HAL_SVD_FULL_UV;
616
if(U == NULL)
617
halSVDFlag |= CV_HAL_SVD_MODIFY_A;
618
}
619
return halSVDFlag;
620
}
621
622
void hal::SVD32f(float* At, size_t astep, float* W, float* U, size_t ustep, float* Vt, size_t vstep, int m, int n, int n1)
623
{
624
CALL_HAL(SVD32f, cv_hal_SVD32f, At, astep, W, U, ustep, Vt, vstep, m, n, decodeSVDParameters(U, Vt, m, n, n1))
625
JacobiSVDImpl_(At, astep, W, Vt, vstep, m, n, !Vt ? 0 : n1 < 0 ? n : n1, FLT_MIN, FLT_EPSILON*2);
626
}
627
628
void hal::SVD64f(double* At, size_t astep, double* W, double* U, size_t ustep, double* Vt, size_t vstep, int m, int n, int n1)
629
{
630
CALL_HAL(SVD64f, cv_hal_SVD64f, At, astep, W, U, ustep, Vt, vstep, m, n, decodeSVDParameters(U, Vt, m, n, n1))
631
JacobiSVDImpl_(At, astep, W, Vt, vstep, m, n, !Vt ? 0 : n1 < 0 ? n : n1, DBL_MIN, DBL_EPSILON*10);
632
}
633
634
/* y[0:m,0:n] += diag(a[0:1,0:m]) * x[0:m,0:n] */
635
template<typename T1, typename T2, typename T3> static void
636
MatrAXPY( int m, int n, const T1* x, int dx,
637
const T2* a, int inca, T3* y, int dy )
638
{
639
int i;
640
for( i = 0; i < m; i++, x += dx, y += dy )
641
{
642
T2 s = a[i*inca];
643
int j = 0;
644
#if CV_ENABLE_UNROLLED
645
for(; j <= n - 4; j += 4 )
646
{
647
T3 t0 = (T3)(y[j] + s*x[j]);
648
T3 t1 = (T3)(y[j+1] + s*x[j+1]);
649
y[j] = t0;
650
y[j+1] = t1;
651
t0 = (T3)(y[j+2] + s*x[j+2]);
652
t1 = (T3)(y[j+3] + s*x[j+3]);
653
y[j+2] = t0;
654
y[j+3] = t1;
655
}
656
#endif
657
for( ; j < n; j++ )
658
y[j] = (T3)(y[j] + s*x[j]);
659
}
660
}
661
662
template<typename T> static void
663
SVBkSbImpl_( int m, int n, const T* w, int incw,
664
const T* u, int ldu, bool uT,
665
const T* v, int ldv, bool vT,
666
const T* b, int ldb, int nb,
667
T* x, int ldx, double* buffer, T eps )
668
{
669
double threshold = 0;
670
int udelta0 = uT ? ldu : 1, udelta1 = uT ? 1 : ldu;
671
int vdelta0 = vT ? ldv : 1, vdelta1 = vT ? 1 : ldv;
672
int i, j, nm = std::min(m, n);
673
674
if( !b )
675
nb = m;
676
677
for( i = 0; i < n; i++ )
678
for( j = 0; j < nb; j++ )
679
x[i*ldx + j] = 0;
680
681
for( i = 0; i < nm; i++ )
682
threshold += w[i*incw];
683
threshold *= eps;
684
685
// v * inv(w) * uT * b
686
for( i = 0; i < nm; i++, u += udelta0, v += vdelta0 )
687
{
688
double wi = w[i*incw];
689
if( (double)std::abs(wi) <= threshold )
690
continue;
691
wi = 1/wi;
692
693
if( nb == 1 )
694
{
695
double s = 0;
696
if( b )
697
for( j = 0; j < m; j++ )
698
s += u[j*udelta1]*b[j*ldb];
699
else
700
s = u[0];
701
s *= wi;
702
703
for( j = 0; j < n; j++ )
704
x[j*ldx] = (T)(x[j*ldx] + s*v[j*vdelta1]);
705
}
706
else
707
{
708
if( b )
709
{
710
for( j = 0; j < nb; j++ )
711
buffer[j] = 0;
712
MatrAXPY( m, nb, b, ldb, u, udelta1, buffer, 0 );
713
for( j = 0; j < nb; j++ )
714
buffer[j] *= wi;
715
}
716
else
717
{
718
for( j = 0; j < nb; j++ )
719
buffer[j] = u[j*udelta1]*wi;
720
}
721
MatrAXPY( n, nb, buffer, 0, v, vdelta1, x, ldx );
722
}
723
}
724
}
725
726
static void
727
SVBkSb( int m, int n, const float* w, size_t wstep,
728
const float* u, size_t ustep, bool uT,
729
const float* v, size_t vstep, bool vT,
730
const float* b, size_t bstep, int nb,
731
float* x, size_t xstep, uchar* buffer )
732
{
733
SVBkSbImpl_(m, n, w, wstep ? (int)(wstep/sizeof(w[0])) : 1,
734
u, (int)(ustep/sizeof(u[0])), uT,
735
v, (int)(vstep/sizeof(v[0])), vT,
736
b, (int)(bstep/sizeof(b[0])), nb,
737
x, (int)(xstep/sizeof(x[0])),
738
(double*)alignPtr(buffer, sizeof(double)), (float)(DBL_EPSILON*2) );
739
}
740
741
static void
742
SVBkSb( int m, int n, const double* w, size_t wstep,
743
const double* u, size_t ustep, bool uT,
744
const double* v, size_t vstep, bool vT,
745
const double* b, size_t bstep, int nb,
746
double* x, size_t xstep, uchar* buffer )
747
{
748
SVBkSbImpl_(m, n, w, wstep ? (int)(wstep/sizeof(w[0])) : 1,
749
u, (int)(ustep/sizeof(u[0])), uT,
750
v, (int)(vstep/sizeof(v[0])), vT,
751
b, (int)(bstep/sizeof(b[0])), nb,
752
x, (int)(xstep/sizeof(x[0])),
753
(double*)alignPtr(buffer, sizeof(double)), DBL_EPSILON*2 );
754
}
755
756
}
757
758
/****************************************************************************************\
759
* Determinant of the matrix *
760
\****************************************************************************************/
761
762
#define det2(m) ((double)m(0,0)*m(1,1) - (double)m(0,1)*m(1,0))
763
#define det3(m) (m(0,0)*((double)m(1,1)*m(2,2) - (double)m(1,2)*m(2,1)) - \
764
m(0,1)*((double)m(1,0)*m(2,2) - (double)m(1,2)*m(2,0)) + \
765
m(0,2)*((double)m(1,0)*m(2,1) - (double)m(1,1)*m(2,0)))
766
767
double cv::determinant( InputArray _mat )
768
{
769
CV_INSTRUMENT_REGION();
770
771
Mat mat = _mat.getMat();
772
double result = 0;
773
int type = mat.type(), rows = mat.rows;
774
size_t step = mat.step;
775
const uchar* m = mat.ptr();
776
777
CV_Assert( !mat.empty() );
778
CV_Assert( mat.rows == mat.cols && (type == CV_32F || type == CV_64F));
779
780
#define Mf(y, x) ((float*)(m + y*step))[x]
781
#define Md(y, x) ((double*)(m + y*step))[x]
782
783
if( type == CV_32F )
784
{
785
if( rows == 2 )
786
result = det2(Mf);
787
else if( rows == 3 )
788
result = det3(Mf);
789
else if( rows == 1 )
790
result = Mf(0,0);
791
else
792
{
793
size_t bufSize = rows*rows*sizeof(float);
794
AutoBuffer<uchar> buffer(bufSize);
795
Mat a(rows, rows, CV_32F, buffer.data());
796
mat.copyTo(a);
797
798
result = hal::LU32f(a.ptr<float>(), a.step, rows, 0, 0, 0);
799
if( result )
800
{
801
for( int i = 0; i < rows; i++ )
802
result *= a.at<float>(i,i);
803
}
804
}
805
}
806
else
807
{
808
if( rows == 2 )
809
result = det2(Md);
810
else if( rows == 3 )
811
result = det3(Md);
812
else if( rows == 1 )
813
result = Md(0,0);
814
else
815
{
816
size_t bufSize = rows*rows*sizeof(double);
817
AutoBuffer<uchar> buffer(bufSize);
818
Mat a(rows, rows, CV_64F, buffer.data());
819
mat.copyTo(a);
820
821
result = hal::LU64f(a.ptr<double>(), a.step, rows, 0, 0, 0);
822
if( result )
823
{
824
for( int i = 0; i < rows; i++ )
825
result *= a.at<double>(i,i);
826
}
827
}
828
}
829
830
#undef Mf
831
#undef Md
832
833
return result;
834
}
835
836
/****************************************************************************************\
837
* Inverse (or pseudo-inverse) of a matrix *
838
\****************************************************************************************/
839
840
#define Sf( y, x ) ((float*)(srcdata + y*srcstep))[x]
841
#define Sd( y, x ) ((double*)(srcdata + y*srcstep))[x]
842
#define Df( y, x ) ((float*)(dstdata + y*dststep))[x]
843
#define Dd( y, x ) ((double*)(dstdata + y*dststep))[x]
844
845
double cv::invert( InputArray _src, OutputArray _dst, int method )
846
{
847
CV_INSTRUMENT_REGION();
848
849
bool result = false;
850
Mat src = _src.getMat();
851
int type = src.type();
852
853
CV_Assert(type == CV_32F || type == CV_64F);
854
855
size_t esz = CV_ELEM_SIZE(type);
856
int m = src.rows, n = src.cols;
857
858
if( method == DECOMP_SVD )
859
{
860
int nm = std::min(m, n);
861
862
AutoBuffer<uchar> _buf((m*nm + nm + nm*n)*esz + sizeof(double));
863
uchar* buf = alignPtr((uchar*)_buf.data(), (int)esz);
864
Mat u(m, nm, type, buf);
865
Mat w(nm, 1, type, u.ptr() + m*nm*esz);
866
Mat vt(nm, n, type, w.ptr() + nm*esz);
867
868
SVD::compute(src, w, u, vt);
869
SVD::backSubst(w, u, vt, Mat(), _dst);
870
return type == CV_32F ?
871
(w.ptr<float>()[0] >= FLT_EPSILON ?
872
w.ptr<float>()[n-1]/w.ptr<float>()[0] : 0) :
873
(w.ptr<double>()[0] >= DBL_EPSILON ?
874
w.ptr<double>()[n-1]/w.ptr<double>()[0] : 0);
875
}
876
877
CV_Assert( m == n );
878
879
if( method == DECOMP_EIG )
880
{
881
AutoBuffer<uchar> _buf((n*n*2 + n)*esz + sizeof(double));
882
uchar* buf = alignPtr((uchar*)_buf.data(), (int)esz);
883
Mat u(n, n, type, buf);
884
Mat w(n, 1, type, u.ptr() + n*n*esz);
885
Mat vt(n, n, type, w.ptr() + n*esz);
886
887
eigen(src, w, vt);
888
transpose(vt, u);
889
SVD::backSubst(w, u, vt, Mat(), _dst);
890
return type == CV_32F ?
891
(w.ptr<float>()[0] >= FLT_EPSILON ?
892
w.ptr<float>()[n-1]/w.ptr<float>()[0] : 0) :
893
(w.ptr<double>()[0] >= DBL_EPSILON ?
894
w.ptr<double>()[n-1]/w.ptr<double>()[0] : 0);
895
}
896
897
CV_Assert( method == DECOMP_LU || method == DECOMP_CHOLESKY );
898
899
_dst.create( n, n, type );
900
Mat dst = _dst.getMat();
901
902
if( n <= 3 )
903
{
904
const uchar* srcdata = src.ptr();
905
uchar* dstdata = dst.ptr();
906
size_t srcstep = src.step;
907
size_t dststep = dst.step;
908
909
if( n == 2 )
910
{
911
if( type == CV_32FC1 )
912
{
913
double d = det2(Sf);
914
if( d != 0. )
915
{
916
result = true;
917
d = 1./d;
918
#if CV_SIMD128
919
static const float CV_DECL_ALIGNED(16) inv[4] = { 0.f,-0.f,-0.f,0.f };
920
v_float32x4 s0 = (v_load_halves((const float*)srcdata, (const float*)(srcdata + srcstep)) * v_setall_f32((float)d)) ^ v_load((const float *)inv);//0123//3120
921
s0 = v_extract<3>(s0, v_combine_low(v_rotate_right<1>(s0), s0));
922
v_store_low((float*)dstdata, s0);
923
v_store_high((float*)(dstdata + dststep), s0);
924
#else
925
double t0, t1;
926
t0 = Sf(0,0)*d;
927
t1 = Sf(1,1)*d;
928
Df(1,1) = (float)t0;
929
Df(0,0) = (float)t1;
930
t0 = -Sf(0,1)*d;
931
t1 = -Sf(1,0)*d;
932
Df(0,1) = (float)t0;
933
Df(1,0) = (float)t1;
934
#endif
935
}
936
}
937
else
938
{
939
double d = det2(Sd);
940
if( d != 0. )
941
{
942
result = true;
943
d = 1./d;
944
#if CV_SIMD128_64F
945
v_float64x2 det = v_setall_f64(d);
946
v_float64x2 s0 = v_load((const double*)srcdata) * det;
947
v_float64x2 s1 = v_load((const double*)(srcdata+srcstep)) * det;
948
v_float64x2 sm = v_extract<1>(s1, s0);//30
949
v_float64x2 ss = v_extract<1>(s0, s1) ^ v_setall_f64(-0.);//12
950
v_store((double*)dstdata, v_combine_low(sm, ss));//31
951
v_store((double*)(dstdata + dststep), v_combine_high(ss, sm));//20
952
#else
953
double t0, t1;
954
t0 = Sd(0,0)*d;
955
t1 = Sd(1,1)*d;
956
Dd(1,1) = t0;
957
Dd(0,0) = t1;
958
t0 = -Sd(0,1)*d;
959
t1 = -Sd(1,0)*d;
960
Dd(0,1) = t0;
961
Dd(1,0) = t1;
962
#endif
963
}
964
}
965
}
966
else if( n == 3 )
967
{
968
if( type == CV_32FC1 )
969
{
970
double d = det3(Sf);
971
972
if( d != 0. )
973
{
974
double t[12];
975
976
result = true;
977
d = 1./d;
978
t[0] = (((double)Sf(1,1) * Sf(2,2) - (double)Sf(1,2) * Sf(2,1)) * d);
979
t[1] = (((double)Sf(0,2) * Sf(2,1) - (double)Sf(0,1) * Sf(2,2)) * d);
980
t[2] = (((double)Sf(0,1) * Sf(1,2) - (double)Sf(0,2) * Sf(1,1)) * d);
981
982
t[3] = (((double)Sf(1,2) * Sf(2,0) - (double)Sf(1,0) * Sf(2,2)) * d);
983
t[4] = (((double)Sf(0,0) * Sf(2,2) - (double)Sf(0,2) * Sf(2,0)) * d);
984
t[5] = (((double)Sf(0,2) * Sf(1,0) - (double)Sf(0,0) * Sf(1,2)) * d);
985
986
t[6] = (((double)Sf(1,0) * Sf(2,1) - (double)Sf(1,1) * Sf(2,0)) * d);
987
t[7] = (((double)Sf(0,1) * Sf(2,0) - (double)Sf(0,0) * Sf(2,1)) * d);
988
t[8] = (((double)Sf(0,0) * Sf(1,1) - (double)Sf(0,1) * Sf(1,0)) * d);
989
990
Df(0,0) = (float)t[0]; Df(0,1) = (float)t[1]; Df(0,2) = (float)t[2];
991
Df(1,0) = (float)t[3]; Df(1,1) = (float)t[4]; Df(1,2) = (float)t[5];
992
Df(2,0) = (float)t[6]; Df(2,1) = (float)t[7]; Df(2,2) = (float)t[8];
993
}
994
}
995
else
996
{
997
double d = det3(Sd);
998
if( d != 0. )
999
{
1000
result = true;
1001
d = 1./d;
1002
double t[9];
1003
1004
t[0] = (Sd(1,1) * Sd(2,2) - Sd(1,2) * Sd(2,1)) * d;
1005
t[1] = (Sd(0,2) * Sd(2,1) - Sd(0,1) * Sd(2,2)) * d;
1006
t[2] = (Sd(0,1) * Sd(1,2) - Sd(0,2) * Sd(1,1)) * d;
1007
1008
t[3] = (Sd(1,2) * Sd(2,0) - Sd(1,0) * Sd(2,2)) * d;
1009
t[4] = (Sd(0,0) * Sd(2,2) - Sd(0,2) * Sd(2,0)) * d;
1010
t[5] = (Sd(0,2) * Sd(1,0) - Sd(0,0) * Sd(1,2)) * d;
1011
1012
t[6] = (Sd(1,0) * Sd(2,1) - Sd(1,1) * Sd(2,0)) * d;
1013
t[7] = (Sd(0,1) * Sd(2,0) - Sd(0,0) * Sd(2,1)) * d;
1014
t[8] = (Sd(0,0) * Sd(1,1) - Sd(0,1) * Sd(1,0)) * d;
1015
1016
Dd(0,0) = t[0]; Dd(0,1) = t[1]; Dd(0,2) = t[2];
1017
Dd(1,0) = t[3]; Dd(1,1) = t[4]; Dd(1,2) = t[5];
1018
Dd(2,0) = t[6]; Dd(2,1) = t[7]; Dd(2,2) = t[8];
1019
}
1020
}
1021
}
1022
else
1023
{
1024
assert( n == 1 );
1025
1026
if( type == CV_32FC1 )
1027
{
1028
double d = Sf(0,0);
1029
if( d != 0. )
1030
{
1031
result = true;
1032
Df(0,0) = (float)(1./d);
1033
}
1034
}
1035
else
1036
{
1037
double d = Sd(0,0);
1038
if( d != 0. )
1039
{
1040
result = true;
1041
Dd(0,0) = 1./d;
1042
}
1043
}
1044
}
1045
if( !result )
1046
dst = Scalar(0);
1047
return result;
1048
}
1049
1050
int elem_size = CV_ELEM_SIZE(type);
1051
AutoBuffer<uchar> buf(n*n*elem_size);
1052
Mat src1(n, n, type, buf.data());
1053
src.copyTo(src1);
1054
setIdentity(dst);
1055
1056
if( method == DECOMP_LU && type == CV_32F )
1057
result = hal::LU32f(src1.ptr<float>(), src1.step, n, dst.ptr<float>(), dst.step, n) != 0;
1058
else if( method == DECOMP_LU && type == CV_64F )
1059
result = hal::LU64f(src1.ptr<double>(), src1.step, n, dst.ptr<double>(), dst.step, n) != 0;
1060
else if( method == DECOMP_CHOLESKY && type == CV_32F )
1061
result = hal::Cholesky32f(src1.ptr<float>(), src1.step, n, dst.ptr<float>(), dst.step, n);
1062
else
1063
result = hal::Cholesky64f(src1.ptr<double>(), src1.step, n, dst.ptr<double>(), dst.step, n);
1064
1065
if( !result )
1066
dst = Scalar(0);
1067
1068
return result;
1069
}
1070
1071
1072
1073
/****************************************************************************************\
1074
* Solving a linear system *
1075
\****************************************************************************************/
1076
1077
bool cv::solve( InputArray _src, InputArray _src2arg, OutputArray _dst, int method )
1078
{
1079
CV_INSTRUMENT_REGION();
1080
1081
bool result = true;
1082
Mat src = _src.getMat(), _src2 = _src2arg.getMat();
1083
int type = src.type();
1084
bool is_normal = (method & DECOMP_NORMAL) != 0;
1085
1086
CV_Assert( type == _src2.type() && (type == CV_32F || type == CV_64F) );
1087
1088
method &= ~DECOMP_NORMAL;
1089
CV_Check(method, method == DECOMP_LU || method == DECOMP_SVD || method == DECOMP_EIG ||
1090
method == DECOMP_CHOLESKY || method == DECOMP_QR,
1091
"Unsupported method, see #DecompTypes");
1092
CV_Assert( (method != DECOMP_LU && method != DECOMP_CHOLESKY) ||
1093
is_normal || src.rows == src.cols );
1094
1095
// check case of a single equation and small matrix
1096
if( (method == DECOMP_LU || method == DECOMP_CHOLESKY) && !is_normal &&
1097
src.rows <= 3 && src.rows == src.cols && _src2.cols == 1 )
1098
{
1099
_dst.create( src.cols, _src2.cols, src.type() );
1100
Mat dst = _dst.getMat();
1101
1102
#define bf(y) ((float*)(bdata + y*src2step))[0]
1103
#define bd(y) ((double*)(bdata + y*src2step))[0]
1104
1105
const uchar* srcdata = src.ptr();
1106
const uchar* bdata = _src2.ptr();
1107
uchar* dstdata = dst.ptr();
1108
size_t srcstep = src.step;
1109
size_t src2step = _src2.step;
1110
size_t dststep = dst.step;
1111
1112
if( src.rows == 2 )
1113
{
1114
if( type == CV_32FC1 )
1115
{
1116
double d = det2(Sf);
1117
if( d != 0. )
1118
{
1119
double t;
1120
d = 1./d;
1121
t = (float)(((double)bf(0)*Sf(1,1) - (double)bf(1)*Sf(0,1))*d);
1122
Df(1,0) = (float)(((double)bf(1)*Sf(0,0) - (double)bf(0)*Sf(1,0))*d);
1123
Df(0,0) = (float)t;
1124
}
1125
else
1126
result = false;
1127
}
1128
else
1129
{
1130
double d = det2(Sd);
1131
if( d != 0. )
1132
{
1133
double t;
1134
d = 1./d;
1135
t = (bd(0)*Sd(1,1) - bd(1)*Sd(0,1))*d;
1136
Dd(1,0) = (bd(1)*Sd(0,0) - bd(0)*Sd(1,0))*d;
1137
Dd(0,0) = t;
1138
}
1139
else
1140
result = false;
1141
}
1142
}
1143
else if( src.rows == 3 )
1144
{
1145
if( type == CV_32FC1 )
1146
{
1147
double d = det3(Sf);
1148
if( d != 0. )
1149
{
1150
float t[3];
1151
d = 1./d;
1152
1153
t[0] = (float)(d*
1154
(bf(0)*((double)Sf(1,1)*Sf(2,2) - (double)Sf(1,2)*Sf(2,1)) -
1155
Sf(0,1)*((double)bf(1)*Sf(2,2) - (double)Sf(1,2)*bf(2)) +
1156
Sf(0,2)*((double)bf(1)*Sf(2,1) - (double)Sf(1,1)*bf(2))));
1157
1158
t[1] = (float)(d*
1159
(Sf(0,0)*(double)(bf(1)*Sf(2,2) - (double)Sf(1,2)*bf(2)) -
1160
bf(0)*((double)Sf(1,0)*Sf(2,2) - (double)Sf(1,2)*Sf(2,0)) +
1161
Sf(0,2)*((double)Sf(1,0)*bf(2) - (double)bf(1)*Sf(2,0))));
1162
1163
t[2] = (float)(d*
1164
(Sf(0,0)*((double)Sf(1,1)*bf(2) - (double)bf(1)*Sf(2,1)) -
1165
Sf(0,1)*((double)Sf(1,0)*bf(2) - (double)bf(1)*Sf(2,0)) +
1166
bf(0)*((double)Sf(1,0)*Sf(2,1) - (double)Sf(1,1)*Sf(2,0))));
1167
1168
Df(0,0) = t[0];
1169
Df(1,0) = t[1];
1170
Df(2,0) = t[2];
1171
}
1172
else
1173
result = false;
1174
}
1175
else
1176
{
1177
double d = det3(Sd);
1178
if( d != 0. )
1179
{
1180
double t[9];
1181
1182
d = 1./d;
1183
1184
t[0] = ((Sd(1,1) * Sd(2,2) - Sd(1,2) * Sd(2,1))*bd(0) +
1185
(Sd(0,2) * Sd(2,1) - Sd(0,1) * Sd(2,2))*bd(1) +
1186
(Sd(0,1) * Sd(1,2) - Sd(0,2) * Sd(1,1))*bd(2))*d;
1187
1188
t[1] = ((Sd(1,2) * Sd(2,0) - Sd(1,0) * Sd(2,2))*bd(0) +
1189
(Sd(0,0) * Sd(2,2) - Sd(0,2) * Sd(2,0))*bd(1) +
1190
(Sd(0,2) * Sd(1,0) - Sd(0,0) * Sd(1,2))*bd(2))*d;
1191
1192
t[2] = ((Sd(1,0) * Sd(2,1) - Sd(1,1) * Sd(2,0))*bd(0) +
1193
(Sd(0,1) * Sd(2,0) - Sd(0,0) * Sd(2,1))*bd(1) +
1194
(Sd(0,0) * Sd(1,1) - Sd(0,1) * Sd(1,0))*bd(2))*d;
1195
1196
Dd(0,0) = t[0];
1197
Dd(1,0) = t[1];
1198
Dd(2,0) = t[2];
1199
}
1200
else
1201
result = false;
1202
}
1203
}
1204
else
1205
{
1206
assert( src.rows == 1 );
1207
1208
if( type == CV_32FC1 )
1209
{
1210
double d = Sf(0,0);
1211
if( d != 0. )
1212
Df(0,0) = (float)(bf(0)/d);
1213
else
1214
result = false;
1215
}
1216
else
1217
{
1218
double d = Sd(0,0);
1219
if( d != 0. )
1220
Dd(0,0) = (bd(0)/d);
1221
else
1222
result = false;
1223
}
1224
}
1225
return result;
1226
}
1227
1228
int m = src.rows, m_ = m, n = src.cols, nb = _src2.cols;
1229
size_t esz = CV_ELEM_SIZE(type), bufsize = 0;
1230
size_t vstep = alignSize(n*esz, 16);
1231
size_t astep = method == DECOMP_SVD && !is_normal ? alignSize(m*esz, 16) : vstep;
1232
AutoBuffer<uchar> buffer;
1233
1234
Mat src2 = _src2;
1235
_dst.create( src.cols, src2.cols, src.type() );
1236
Mat dst = _dst.getMat();
1237
1238
if( m < n )
1239
CV_Error(CV_StsBadArg, "The function can not solve under-determined linear systems" );
1240
1241
if( m == n )
1242
is_normal = false;
1243
else if( is_normal )
1244
{
1245
m_ = n;
1246
if( method == DECOMP_SVD )
1247
method = DECOMP_EIG;
1248
}
1249
1250
size_t asize = astep*(method == DECOMP_SVD || is_normal ? n : m);
1251
bufsize += asize + 32;
1252
1253
if( is_normal )
1254
bufsize += n*nb*esz;
1255
if( method == DECOMP_SVD || method == DECOMP_EIG )
1256
bufsize += n*5*esz + n*vstep + nb*sizeof(double) + 32;
1257
1258
buffer.allocate(bufsize);
1259
uchar* ptr = alignPtr(buffer.data(), 16);
1260
1261
Mat a(m_, n, type, ptr, astep);
1262
1263
if( is_normal )
1264
mulTransposed(src, a, true);
1265
else if( method != DECOMP_SVD )
1266
src.copyTo(a);
1267
else
1268
{
1269
a = Mat(n, m_, type, ptr, astep);
1270
transpose(src, a);
1271
}
1272
ptr += asize;
1273
1274
if( !is_normal )
1275
{
1276
if( method == DECOMP_LU || method == DECOMP_CHOLESKY )
1277
src2.copyTo(dst);
1278
}
1279
else
1280
{
1281
// a'*b
1282
if( method == DECOMP_LU || method == DECOMP_CHOLESKY )
1283
gemm( src, src2, 1, Mat(), 0, dst, GEMM_1_T );
1284
else
1285
{
1286
Mat tmp(n, nb, type, ptr);
1287
ptr += n*nb*esz;
1288
gemm( src, src2, 1, Mat(), 0, tmp, GEMM_1_T );
1289
src2 = tmp;
1290
}
1291
}
1292
1293
if( method == DECOMP_LU )
1294
{
1295
if( type == CV_32F )
1296
result = hal::LU32f(a.ptr<float>(), a.step, n, dst.ptr<float>(), dst.step, nb) != 0;
1297
else
1298
result = hal::LU64f(a.ptr<double>(), a.step, n, dst.ptr<double>(), dst.step, nb) != 0;
1299
}
1300
else if( method == DECOMP_CHOLESKY )
1301
{
1302
if( type == CV_32F )
1303
result = hal::Cholesky32f(a.ptr<float>(), a.step, n, dst.ptr<float>(), dst.step, nb);
1304
else
1305
result = hal::Cholesky64f(a.ptr<double>(), a.step, n, dst.ptr<double>(), dst.step, nb);
1306
}
1307
else if( method == DECOMP_QR )
1308
{
1309
Mat rhsMat;
1310
if( is_normal || m == n )
1311
{
1312
src2.copyTo(dst);
1313
rhsMat = dst;
1314
}
1315
else
1316
{
1317
rhsMat = Mat(m, nb, type);
1318
src2.copyTo(rhsMat);
1319
}
1320
1321
if( type == CV_32F )
1322
result = hal::QR32f(a.ptr<float>(), a.step, a.rows, a.cols, rhsMat.cols, rhsMat.ptr<float>(), rhsMat.step, NULL) != 0;
1323
else
1324
result = hal::QR64f(a.ptr<double>(), a.step, a.rows, a.cols, rhsMat.cols, rhsMat.ptr<double>(), rhsMat.step, NULL) != 0;
1325
1326
if (rhsMat.rows != dst.rows)
1327
rhsMat.rowRange(0, dst.rows).copyTo(dst);
1328
}
1329
else
1330
{
1331
ptr = alignPtr(ptr, 16);
1332
Mat v(n, n, type, ptr, vstep), w(n, 1, type, ptr + vstep*n), u;
1333
ptr += n*(vstep + esz);
1334
1335
if( method == DECOMP_EIG )
1336
{
1337
if( type == CV_32F )
1338
Jacobi(a.ptr<float>(), a.step, w.ptr<float>(), v.ptr<float>(), v.step, n, ptr);
1339
else
1340
Jacobi(a.ptr<double>(), a.step, w.ptr<double>(), v.ptr<double>(), v.step, n, ptr);
1341
u = v;
1342
}
1343
else
1344
{
1345
if( type == CV_32F )
1346
JacobiSVD(a.ptr<float>(), a.step, w.ptr<float>(), v.ptr<float>(), v.step, m_, n);
1347
else
1348
JacobiSVD(a.ptr<double>(), a.step, w.ptr<double>(), v.ptr<double>(), v.step, m_, n);
1349
u = a;
1350
}
1351
1352
if( type == CV_32F )
1353
{
1354
SVBkSb(m_, n, w.ptr<float>(), 0, u.ptr<float>(), u.step, true,
1355
v.ptr<float>(), v.step, true, src2.ptr<float>(),
1356
src2.step, nb, dst.ptr<float>(), dst.step, ptr);
1357
}
1358
else
1359
{
1360
SVBkSb(m_, n, w.ptr<double>(), 0, u.ptr<double>(), u.step, true,
1361
v.ptr<double>(), v.step, true, src2.ptr<double>(),
1362
src2.step, nb, dst.ptr<double>(), dst.step, ptr);
1363
}
1364
result = true;
1365
}
1366
1367
if( !result )
1368
dst = Scalar(0);
1369
1370
return result;
1371
}
1372
1373
1374
/////////////////// finding eigenvalues and eigenvectors of a symmetric matrix ///////////////
1375
1376
bool cv::eigen( InputArray _src, OutputArray _evals, OutputArray _evects )
1377
{
1378
CV_INSTRUMENT_REGION();
1379
1380
Mat src = _src.getMat();
1381
int type = src.type();
1382
int n = src.rows;
1383
1384
CV_Assert( src.rows == src.cols );
1385
CV_Assert (type == CV_32F || type == CV_64F);
1386
1387
Mat v;
1388
if( _evects.needed() )
1389
{
1390
_evects.create(n, n, type);
1391
v = _evects.getMat();
1392
}
1393
1394
#ifdef HAVE_EIGEN
1395
const bool evecNeeded = _evects.needed();
1396
const int esOptions = evecNeeded ? Eigen::ComputeEigenvectors : Eigen::EigenvaluesOnly;
1397
_evals.create(n, 1, type);
1398
cv::Mat evals = _evals.getMat();
1399
if ( type == CV_64F )
1400
{
1401
Eigen::MatrixXd src_eig, zeros_eig;
1402
cv::cv2eigen(src, src_eig);
1403
1404
Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> es;
1405
es.compute(src_eig, esOptions);
1406
if ( es.info() == Eigen::Success )
1407
{
1408
cv::eigen2cv(es.eigenvalues().reverse().eval(), evals);
1409
if ( evecNeeded )
1410
{
1411
cv::Mat evects = _evects.getMat();
1412
cv::eigen2cv(es.eigenvectors().rowwise().reverse().transpose().eval(), v);
1413
}
1414
return true;
1415
}
1416
} else { // CV_32F
1417
Eigen::MatrixXf src_eig, zeros_eig;
1418
cv::cv2eigen(src, src_eig);
1419
1420
Eigen::SelfAdjointEigenSolver<Eigen::MatrixXf> es;
1421
es.compute(src_eig, esOptions);
1422
if ( es.info() == Eigen::Success )
1423
{
1424
cv::eigen2cv(es.eigenvalues().reverse().eval(), evals);
1425
if ( evecNeeded )
1426
{
1427
cv::eigen2cv(es.eigenvectors().rowwise().reverse().transpose().eval(), v);
1428
}
1429
return true;
1430
}
1431
}
1432
return false;
1433
#else
1434
1435
size_t elemSize = src.elemSize(), astep = alignSize(n*elemSize, 16);
1436
AutoBuffer<uchar> buf(n*astep + n*5*elemSize + 32);
1437
uchar* ptr = alignPtr(buf.data(), 16);
1438
Mat a(n, n, type, ptr, astep), w(n, 1, type, ptr + astep*n);
1439
ptr += astep*n + elemSize*n;
1440
src.copyTo(a);
1441
bool ok = type == CV_32F ?
1442
Jacobi(a.ptr<float>(), a.step, w.ptr<float>(), v.ptr<float>(), v.step, n, ptr) :
1443
Jacobi(a.ptr<double>(), a.step, w.ptr<double>(), v.ptr<double>(), v.step, n, ptr);
1444
1445
w.copyTo(_evals);
1446
return ok;
1447
#endif
1448
}
1449
1450
namespace cv
1451
{
1452
1453
static void _SVDcompute( InputArray _aarr, OutputArray _w,
1454
OutputArray _u, OutputArray _vt, int flags )
1455
{
1456
Mat src = _aarr.getMat();
1457
int m = src.rows, n = src.cols;
1458
int type = src.type();
1459
bool compute_uv = _u.needed() || _vt.needed();
1460
bool full_uv = (flags & SVD::FULL_UV) != 0;
1461
1462
CV_Assert( type == CV_32F || type == CV_64F );
1463
1464
if( flags & SVD::NO_UV )
1465
{
1466
_u.release();
1467
_vt.release();
1468
compute_uv = full_uv = false;
1469
}
1470
1471
bool at = false;
1472
if( m < n )
1473
{
1474
std::swap(m, n);
1475
at = true;
1476
}
1477
1478
int urows = full_uv ? m : n;
1479
size_t esz = src.elemSize(), astep = alignSize(m*esz, 16), vstep = alignSize(n*esz, 16);
1480
AutoBuffer<uchar> _buf(urows*astep + n*vstep + n*esz + 32);
1481
uchar* buf = alignPtr(_buf.data(), 16);
1482
Mat temp_a(n, m, type, buf, astep);
1483
Mat temp_w(n, 1, type, buf + urows*astep);
1484
Mat temp_u(urows, m, type, buf, astep), temp_v;
1485
1486
if( compute_uv )
1487
temp_v = Mat(n, n, type, alignPtr(buf + urows*astep + n*esz, 16), vstep);
1488
1489
if( urows > n )
1490
temp_u = Scalar::all(0);
1491
1492
if( !at )
1493
transpose(src, temp_a);
1494
else
1495
src.copyTo(temp_a);
1496
1497
if( type == CV_32F )
1498
{
1499
JacobiSVD(temp_a.ptr<float>(), temp_u.step, temp_w.ptr<float>(),
1500
temp_v.ptr<float>(), temp_v.step, m, n, compute_uv ? urows : 0);
1501
}
1502
else
1503
{
1504
JacobiSVD(temp_a.ptr<double>(), temp_u.step, temp_w.ptr<double>(),
1505
temp_v.ptr<double>(), temp_v.step, m, n, compute_uv ? urows : 0);
1506
}
1507
temp_w.copyTo(_w);
1508
if( compute_uv )
1509
{
1510
if( !at )
1511
{
1512
if( _u.needed() )
1513
transpose(temp_u, _u);
1514
if( _vt.needed() )
1515
temp_v.copyTo(_vt);
1516
}
1517
else
1518
{
1519
if( _u.needed() )
1520
transpose(temp_v, _u);
1521
if( _vt.needed() )
1522
temp_u.copyTo(_vt);
1523
}
1524
}
1525
}
1526
1527
1528
void SVD::compute( InputArray a, OutputArray w, OutputArray u, OutputArray vt, int flags )
1529
{
1530
CV_INSTRUMENT_REGION();
1531
1532
_SVDcompute(a, w, u, vt, flags);
1533
}
1534
1535
void SVD::compute( InputArray a, OutputArray w, int flags )
1536
{
1537
CV_INSTRUMENT_REGION();
1538
1539
_SVDcompute(a, w, noArray(), noArray(), flags);
1540
}
1541
1542
void SVD::backSubst( InputArray _w, InputArray _u, InputArray _vt,
1543
InputArray _rhs, OutputArray _dst )
1544
{
1545
Mat w = _w.getMat(), u = _u.getMat(), vt = _vt.getMat(), rhs = _rhs.getMat();
1546
int type = w.type(), esz = (int)w.elemSize();
1547
int m = u.rows, n = vt.cols, nb = rhs.data ? rhs.cols : m, nm = std::min(m, n);
1548
size_t wstep = w.rows == 1 ? (size_t)esz : w.cols == 1 ? (size_t)w.step : (size_t)w.step + esz;
1549
AutoBuffer<uchar> buffer(nb*sizeof(double) + 16);
1550
CV_Assert( w.type() == u.type() && u.type() == vt.type() && u.data && vt.data && w.data );
1551
CV_Assert( u.cols >= nm && vt.rows >= nm &&
1552
(w.size() == Size(nm, 1) || w.size() == Size(1, nm) || w.size() == Size(vt.rows, u.cols)) );
1553
CV_Assert( rhs.data == 0 || (rhs.type() == type && rhs.rows == m) );
1554
1555
_dst.create( n, nb, type );
1556
Mat dst = _dst.getMat();
1557
if( type == CV_32F )
1558
SVBkSb(m, n, w.ptr<float>(), wstep, u.ptr<float>(), u.step, false,
1559
vt.ptr<float>(), vt.step, true, rhs.ptr<float>(), rhs.step, nb,
1560
dst.ptr<float>(), dst.step, buffer.data());
1561
else if( type == CV_64F )
1562
SVBkSb(m, n, w.ptr<double>(), wstep, u.ptr<double>(), u.step, false,
1563
vt.ptr<double>(), vt.step, true, rhs.ptr<double>(), rhs.step, nb,
1564
dst.ptr<double>(), dst.step, buffer.data());
1565
else
1566
CV_Error( CV_StsUnsupportedFormat, "" );
1567
}
1568
1569
1570
SVD& SVD::operator ()(InputArray a, int flags)
1571
{
1572
_SVDcompute(a, w, u, vt, flags);
1573
return *this;
1574
}
1575
1576
1577
void SVD::backSubst( InputArray rhs, OutputArray dst ) const
1578
{
1579
backSubst( w, u, vt, rhs, dst );
1580
}
1581
1582
}
1583
1584
1585
void cv::SVDecomp(InputArray src, OutputArray w, OutputArray u, OutputArray vt, int flags)
1586
{
1587
CV_INSTRUMENT_REGION();
1588
1589
SVD::compute(src, w, u, vt, flags);
1590
}
1591
1592
void cv::SVBackSubst(InputArray w, InputArray u, InputArray vt, InputArray rhs, OutputArray dst)
1593
{
1594
CV_INSTRUMENT_REGION();
1595
1596
SVD::backSubst(w, u, vt, rhs, dst);
1597
}
1598
1599
1600
CV_IMPL double
1601
cvDet( const CvArr* arr )
1602
{
1603
if( CV_IS_MAT(arr) && ((CvMat*)arr)->rows <= 3 )
1604
{
1605
CvMat* mat = (CvMat*)arr;
1606
int type = CV_MAT_TYPE(mat->type);
1607
int rows = mat->rows;
1608
uchar* m = mat->data.ptr;
1609
int step = mat->step;
1610
CV_Assert( rows == mat->cols );
1611
1612
#define Mf(y, x) ((float*)(m + y*step))[x]
1613
#define Md(y, x) ((double*)(m + y*step))[x]
1614
1615
if( type == CV_32F )
1616
{
1617
if( rows == 2 )
1618
return det2(Mf);
1619
if( rows == 3 )
1620
return det3(Mf);
1621
}
1622
else if( type == CV_64F )
1623
{
1624
if( rows == 2 )
1625
return det2(Md);
1626
if( rows == 3 )
1627
return det3(Md);
1628
}
1629
}
1630
return cv::determinant(cv::cvarrToMat(arr));
1631
}
1632
1633
1634
CV_IMPL double
1635
cvInvert( const CvArr* srcarr, CvArr* dstarr, int method )
1636
{
1637
cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
1638
1639
CV_Assert( src.type() == dst.type() && src.rows == dst.cols && src.cols == dst.rows );
1640
return cv::invert( src, dst, method == CV_CHOLESKY ? cv::DECOMP_CHOLESKY :
1641
method == CV_SVD ? cv::DECOMP_SVD :
1642
method == CV_SVD_SYM ? cv::DECOMP_EIG : cv::DECOMP_LU );
1643
}
1644
1645
1646
CV_IMPL int
1647
cvSolve( const CvArr* Aarr, const CvArr* barr, CvArr* xarr, int method )
1648
{
1649
cv::Mat A = cv::cvarrToMat(Aarr), b = cv::cvarrToMat(barr), x = cv::cvarrToMat(xarr);
1650
1651
CV_Assert( A.type() == x.type() && A.cols == x.rows && x.cols == b.cols );
1652
bool is_normal = (method & CV_NORMAL) != 0;
1653
method &= ~CV_NORMAL;
1654
return cv::solve( A, b, x, (method == CV_CHOLESKY ? cv::DECOMP_CHOLESKY :
1655
method == CV_SVD ? cv::DECOMP_SVD :
1656
method == CV_SVD_SYM ? cv::DECOMP_EIG :
1657
A.rows > A.cols ? cv::DECOMP_QR : cv::DECOMP_LU) + (is_normal ? cv::DECOMP_NORMAL : 0) );
1658
}
1659
1660
1661
CV_IMPL void
1662
cvEigenVV( CvArr* srcarr, CvArr* evectsarr, CvArr* evalsarr, double,
1663
int, int )
1664
{
1665
cv::Mat src = cv::cvarrToMat(srcarr), evals0 = cv::cvarrToMat(evalsarr), evals = evals0;
1666
if( evectsarr )
1667
{
1668
cv::Mat evects0 = cv::cvarrToMat(evectsarr), evects = evects0;
1669
eigen(src, evals, evects);
1670
if( evects0.data != evects.data )
1671
{
1672
const uchar* p = evects0.ptr();
1673
evects.convertTo(evects0, evects0.type());
1674
CV_Assert( p == evects0.ptr() );
1675
}
1676
}
1677
else
1678
eigen(src, evals);
1679
if( evals0.data != evals.data )
1680
{
1681
const uchar* p = evals0.ptr();
1682
if( evals0.size() == evals.size() )
1683
evals.convertTo(evals0, evals0.type());
1684
else if( evals0.type() == evals.type() )
1685
cv::transpose(evals, evals0);
1686
else
1687
cv::Mat(evals.t()).convertTo(evals0, evals0.type());
1688
CV_Assert( p == evals0.ptr() );
1689
}
1690
}
1691
1692
1693
CV_IMPL void
1694
cvSVD( CvArr* aarr, CvArr* warr, CvArr* uarr, CvArr* varr, int flags )
1695
{
1696
cv::Mat a = cv::cvarrToMat(aarr), w = cv::cvarrToMat(warr), u, v;
1697
int m = a.rows, n = a.cols, type = a.type(), mn = std::max(m, n), nm = std::min(m, n);
1698
1699
CV_Assert( w.type() == type &&
1700
(w.size() == cv::Size(nm,1) || w.size() == cv::Size(1, nm) ||
1701
w.size() == cv::Size(nm, nm) || w.size() == cv::Size(n, m)) );
1702
1703
cv::SVD svd;
1704
1705
if( w.size() == cv::Size(nm, 1) )
1706
svd.w = cv::Mat(nm, 1, type, w.ptr() );
1707
else if( w.isContinuous() )
1708
svd.w = w;
1709
1710
if( uarr )
1711
{
1712
u = cv::cvarrToMat(uarr);
1713
CV_Assert( u.type() == type );
1714
svd.u = u;
1715
}
1716
1717
if( varr )
1718
{
1719
v = cv::cvarrToMat(varr);
1720
CV_Assert( v.type() == type );
1721
svd.vt = v;
1722
}
1723
1724
svd(a, ((flags & CV_SVD_MODIFY_A) ? cv::SVD::MODIFY_A : 0) |
1725
((!svd.u.data && !svd.vt.data) ? cv::SVD::NO_UV : 0) |
1726
((m != n && (svd.u.size() == cv::Size(mn, mn) ||
1727
svd.vt.size() == cv::Size(mn, mn))) ? cv::SVD::FULL_UV : 0));
1728
1729
if( !u.empty() )
1730
{
1731
if( flags & CV_SVD_U_T )
1732
cv::transpose( svd.u, u );
1733
else if( u.data != svd.u.data )
1734
{
1735
CV_Assert( u.size() == svd.u.size() );
1736
svd.u.copyTo(u);
1737
}
1738
}
1739
1740
if( !v.empty() )
1741
{
1742
if( !(flags & CV_SVD_V_T) )
1743
cv::transpose( svd.vt, v );
1744
else if( v.data != svd.vt.data )
1745
{
1746
CV_Assert( v.size() == svd.vt.size() );
1747
svd.vt.copyTo(v);
1748
}
1749
}
1750
1751
if( w.data != svd.w.data )
1752
{
1753
if( w.size() == svd.w.size() )
1754
svd.w.copyTo(w);
1755
else
1756
{
1757
w = cv::Scalar(0);
1758
cv::Mat wd = w.diag();
1759
svd.w.copyTo(wd);
1760
}
1761
}
1762
}
1763
1764
1765
CV_IMPL void
1766
cvSVBkSb( const CvArr* warr, const CvArr* uarr,
1767
const CvArr* varr, const CvArr* rhsarr,
1768
CvArr* dstarr, int flags )
1769
{
1770
cv::Mat w = cv::cvarrToMat(warr), u = cv::cvarrToMat(uarr),
1771
v = cv::cvarrToMat(varr), rhs,
1772
dst = cv::cvarrToMat(dstarr), dst0 = dst;
1773
if( flags & CV_SVD_U_T )
1774
{
1775
cv::Mat tmp;
1776
transpose(u, tmp);
1777
u = tmp;
1778
}
1779
if( !(flags & CV_SVD_V_T) )
1780
{
1781
cv::Mat tmp;
1782
transpose(v, tmp);
1783
v = tmp;
1784
}
1785
if( rhsarr )
1786
rhs = cv::cvarrToMat(rhsarr);
1787
1788
cv::SVD::backSubst(w, u, v, rhs, dst);
1789
CV_Assert( dst.data == dst0.data );
1790
}
1791
1792