Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Tetragramm
GitHub Repository: Tetragramm/opencv
Path: blob/master/modules/core/src/lda.cpp
16337 views
1
/*
2
* Copyright (c) 2011. Philipp Wagner <bytefish[at]gmx[dot]de>.
3
* Released to public domain under terms of the BSD Simplified license.
4
*
5
* Redistribution and use in source and binary forms, with or without
6
* modification, are permitted provided that the following conditions are met:
7
* * Redistributions of source code must retain the above copyright
8
* notice, this list of conditions and the following disclaimer.
9
* * Redistributions in binary form must reproduce the above copyright
10
* notice, this list of conditions and the following disclaimer in the
11
* documentation and/or other materials provided with the distribution.
12
* * Neither the name of the organization nor the names of its contributors
13
* may be used to endorse or promote products derived from this software
14
* without specific prior written permission.
15
*
16
* See <http://www.opensource.org/licenses/bsd-license>
17
*/
18
19
#include "precomp.hpp"
20
#include <iostream>
21
#include <map>
22
#include <set>
23
24
namespace cv
25
{
26
27
// Removes duplicate elements in a given vector.
28
template<typename _Tp>
29
inline std::vector<_Tp> remove_dups(const std::vector<_Tp>& src) {
30
typedef typename std::set<_Tp>::const_iterator constSetIterator;
31
typedef typename std::vector<_Tp>::const_iterator constVecIterator;
32
std::set<_Tp> set_elems;
33
for (constVecIterator it = src.begin(); it != src.end(); ++it)
34
set_elems.insert(*it);
35
std::vector<_Tp> elems;
36
for (constSetIterator it = set_elems.begin(); it != set_elems.end(); ++it)
37
elems.push_back(*it);
38
return elems;
39
}
40
41
static Mat argsort(InputArray _src, bool ascending=true)
42
{
43
Mat src = _src.getMat();
44
if (src.rows != 1 && src.cols != 1) {
45
String error_message = "Wrong shape of input matrix! Expected a matrix with one row or column.";
46
CV_Error(Error::StsBadArg, error_message);
47
}
48
int flags = SORT_EVERY_ROW | (ascending ? SORT_ASCENDING : SORT_DESCENDING);
49
Mat sorted_indices;
50
sortIdx(src.reshape(1,1),sorted_indices,flags);
51
return sorted_indices;
52
}
53
54
static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double beta=0) {
55
// make sure the input data is a vector of matrices or vector of vector
56
if(src.kind() != _InputArray::STD_VECTOR_MAT && src.kind() != _InputArray::STD_ARRAY_MAT &&
57
src.kind() != _InputArray::STD_VECTOR_VECTOR) {
58
String error_message = "The data is expected as InputArray::STD_VECTOR_MAT (a std::vector<Mat>) or _InputArray::STD_VECTOR_VECTOR (a std::vector< std::vector<...> >).";
59
CV_Error(Error::StsBadArg, error_message);
60
}
61
// number of samples
62
size_t n = src.total();
63
// return empty matrix if no matrices given
64
if(n == 0)
65
return Mat();
66
// dimensionality of (reshaped) samples
67
size_t d = src.getMat(0).total();
68
// create data matrix
69
Mat data((int)n, (int)d, rtype);
70
// now copy data
71
for(int i = 0; i < (int)n; i++) {
72
// make sure data can be reshaped, throw exception if not!
73
if(src.getMat(i).total() != d) {
74
String error_message = format("Wrong number of elements in matrix #%d! Expected %d was %d.", i, (int)d, (int)src.getMat(i).total());
75
CV_Error(Error::StsBadArg, error_message);
76
}
77
// get a hold of the current row
78
Mat xi = data.row(i);
79
// make reshape happy by cloning for non-continuous matrices
80
if(src.getMat(i).isContinuous()) {
81
src.getMat(i).reshape(1, 1).convertTo(xi, rtype, alpha, beta);
82
} else {
83
src.getMat(i).clone().reshape(1, 1).convertTo(xi, rtype, alpha, beta);
84
}
85
}
86
return data;
87
}
88
89
static void sortMatrixColumnsByIndices(InputArray _src, InputArray _indices, OutputArray _dst) {
90
if(_indices.getMat().type() != CV_32SC1) {
91
CV_Error(Error::StsUnsupportedFormat, "cv::sortColumnsByIndices only works on integer indices!");
92
}
93
Mat src = _src.getMat();
94
std::vector<int> indices = _indices.getMat();
95
_dst.create(src.rows, src.cols, src.type());
96
Mat dst = _dst.getMat();
97
for(size_t idx = 0; idx < indices.size(); idx++) {
98
Mat originalCol = src.col(indices[idx]);
99
Mat sortedCol = dst.col((int)idx);
100
originalCol.copyTo(sortedCol);
101
}
102
}
103
104
static Mat sortMatrixColumnsByIndices(InputArray src, InputArray indices) {
105
Mat dst;
106
sortMatrixColumnsByIndices(src, indices, dst);
107
return dst;
108
}
109
110
111
template<typename _Tp> static bool
112
isSymmetric_(InputArray src) {
113
Mat _src = src.getMat();
114
if(_src.cols != _src.rows)
115
return false;
116
for (int i = 0; i < _src.rows; i++) {
117
for (int j = 0; j < _src.cols; j++) {
118
_Tp a = _src.at<_Tp> (i, j);
119
_Tp b = _src.at<_Tp> (j, i);
120
if (a != b) {
121
return false;
122
}
123
}
124
}
125
return true;
126
}
127
128
template<typename _Tp> static bool
129
isSymmetric_(InputArray src, double eps) {
130
Mat _src = src.getMat();
131
if(_src.cols != _src.rows)
132
return false;
133
for (int i = 0; i < _src.rows; i++) {
134
for (int j = 0; j < _src.cols; j++) {
135
_Tp a = _src.at<_Tp> (i, j);
136
_Tp b = _src.at<_Tp> (j, i);
137
if (std::abs(a - b) > eps) {
138
return false;
139
}
140
}
141
}
142
return true;
143
}
144
145
static bool isSymmetric(InputArray src, double eps=1e-16)
146
{
147
Mat m = src.getMat();
148
switch (m.type()) {
149
case CV_8SC1: return isSymmetric_<char>(m); break;
150
case CV_8UC1:
151
return isSymmetric_<unsigned char>(m); break;
152
case CV_16SC1:
153
return isSymmetric_<short>(m); break;
154
case CV_16UC1:
155
return isSymmetric_<unsigned short>(m); break;
156
case CV_32SC1:
157
return isSymmetric_<int>(m); break;
158
case CV_32FC1:
159
return isSymmetric_<float>(m, eps); break;
160
case CV_64FC1:
161
return isSymmetric_<double>(m, eps); break;
162
default:
163
break;
164
}
165
return false;
166
}
167
168
169
//------------------------------------------------------------------------------
170
// cv::subspaceProject
171
//------------------------------------------------------------------------------
172
Mat LDA::subspaceProject(InputArray _W, InputArray _mean, InputArray _src) {
173
// get data matrices
174
Mat W = _W.getMat();
175
Mat mean = _mean.getMat();
176
Mat src = _src.getMat();
177
// get number of samples and dimension
178
int n = src.rows;
179
int d = src.cols;
180
// make sure the data has the correct shape
181
if(W.rows != d) {
182
String error_message = format("Wrong shapes for given matrices. Was size(src) = (%d,%d), size(W) = (%d,%d).", src.rows, src.cols, W.rows, W.cols);
183
CV_Error(Error::StsBadArg, error_message);
184
}
185
// make sure mean is correct if not empty
186
if(!mean.empty() && (mean.total() != (size_t) d)) {
187
String error_message = format("Wrong mean shape for the given data matrix. Expected %d, but was %zu.", d, mean.total());
188
CV_Error(Error::StsBadArg, error_message);
189
}
190
// create temporary matrices
191
Mat X, Y;
192
// make sure you operate on correct type
193
src.convertTo(X, W.type());
194
// safe to do, because of above assertion
195
if(!mean.empty()) {
196
for(int i=0; i<n; i++) {
197
Mat r_i = X.row(i);
198
subtract(r_i, mean.reshape(1,1), r_i);
199
}
200
}
201
// finally calculate projection as Y = (X-mean)*W
202
gemm(X, W, 1.0, Mat(), 0.0, Y);
203
return Y;
204
}
205
206
//------------------------------------------------------------------------------
207
// cv::subspaceReconstruct
208
//------------------------------------------------------------------------------
209
Mat LDA::subspaceReconstruct(InputArray _W, InputArray _mean, InputArray _src)
210
{
211
// get data matrices
212
Mat W = _W.getMat();
213
Mat mean = _mean.getMat();
214
Mat src = _src.getMat();
215
// get number of samples and dimension
216
int n = src.rows;
217
int d = src.cols;
218
// make sure the data has the correct shape
219
if(W.cols != d) {
220
String error_message = format("Wrong shapes for given matrices. Was size(src) = (%d,%d), size(W) = (%d,%d).", src.rows, src.cols, W.rows, W.cols);
221
CV_Error(Error::StsBadArg, error_message);
222
}
223
// make sure mean is correct if not empty
224
if(!mean.empty() && (mean.total() != (size_t) W.rows)) {
225
String error_message = format("Wrong mean shape for the given eigenvector matrix. Expected %d, but was %zu.", W.cols, mean.total());
226
CV_Error(Error::StsBadArg, error_message);
227
}
228
// initialize temporary matrices
229
Mat X, Y;
230
// copy data & make sure we are using the correct type
231
src.convertTo(Y, W.type());
232
// calculate the reconstruction
233
gemm(Y, W, 1.0, Mat(), 0.0, X, GEMM_2_T);
234
// safe to do because of above assertion
235
if(!mean.empty()) {
236
for(int i=0; i<n; i++) {
237
Mat r_i = X.row(i);
238
add(r_i, mean.reshape(1,1), r_i);
239
}
240
}
241
return X;
242
}
243
244
245
class EigenvalueDecomposition {
246
private:
247
248
// Holds the data dimension.
249
int n;
250
251
// Stores real/imag part of a complex division.
252
double cdivr, cdivi;
253
254
// Pointer to internal memory.
255
double *d, *e, *ort;
256
double **V, **H;
257
258
// Holds the computed eigenvalues.
259
Mat _eigenvalues;
260
261
// Holds the computed eigenvectors.
262
Mat _eigenvectors;
263
264
// Allocates memory.
265
template<typename _Tp>
266
_Tp *alloc_1d(int m) {
267
return new _Tp[m];
268
}
269
270
// Allocates memory.
271
template<typename _Tp>
272
_Tp *alloc_1d(int m, _Tp val) {
273
_Tp *arr = alloc_1d<_Tp> (m);
274
for (int i = 0; i < m; i++)
275
arr[i] = val;
276
return arr;
277
}
278
279
// Allocates memory.
280
template<typename _Tp>
281
_Tp **alloc_2d(int m, int _n) {
282
_Tp **arr = new _Tp*[m];
283
for (int i = 0; i < m; i++)
284
arr[i] = new _Tp[_n];
285
return arr;
286
}
287
288
// Allocates memory.
289
template<typename _Tp>
290
_Tp **alloc_2d(int m, int _n, _Tp val) {
291
_Tp **arr = alloc_2d<_Tp> (m, _n);
292
for (int i = 0; i < m; i++) {
293
for (int j = 0; j < _n; j++) {
294
arr[i][j] = val;
295
}
296
}
297
return arr;
298
}
299
300
void cdiv(double xr, double xi, double yr, double yi) {
301
double r, dv;
302
if (std::abs(yr) > std::abs(yi)) {
303
r = yi / yr;
304
dv = yr + r * yi;
305
cdivr = (xr + r * xi) / dv;
306
cdivi = (xi - r * xr) / dv;
307
} else {
308
r = yr / yi;
309
dv = yi + r * yr;
310
cdivr = (r * xr + xi) / dv;
311
cdivi = (r * xi - xr) / dv;
312
}
313
}
314
315
// Nonsymmetric reduction from Hessenberg to real Schur form.
316
317
void hqr2() {
318
319
// This is derived from the Algol procedure hqr2,
320
// by Martin and Wilkinson, Handbook for Auto. Comp.,
321
// Vol.ii-Linear Algebra, and the corresponding
322
// Fortran subroutine in EISPACK.
323
324
// Initialize
325
int nn = this->n;
326
int n1 = nn - 1;
327
int low = 0;
328
int high = nn - 1;
329
double eps = std::pow(2.0, -52.0);
330
double exshift = 0.0;
331
double p = 0, q = 0, r = 0, s = 0, z = 0, t, w, x, y;
332
333
// Store roots isolated by balanc and compute matrix norm
334
335
double norm = 0.0;
336
for (int i = 0; i < nn; i++) {
337
if (i < low || i > high) {
338
d[i] = H[i][i];
339
e[i] = 0.0;
340
}
341
for (int j = std::max(i - 1, 0); j < nn; j++) {
342
norm = norm + std::abs(H[i][j]);
343
}
344
}
345
346
// Outer loop over eigenvalue index
347
int iter = 0;
348
while (n1 >= low) {
349
350
// Look for single small sub-diagonal element
351
int l = n1;
352
while (l > low) {
353
if (norm < FLT_EPSILON) {
354
break;
355
}
356
s = std::abs(H[l - 1][l - 1]) + std::abs(H[l][l]);
357
if (s == 0.0) {
358
s = norm;
359
}
360
if (std::abs(H[l][l - 1]) < eps * s) {
361
break;
362
}
363
l--;
364
}
365
366
// Check for convergence
367
// One root found
368
369
if (l == n1) {
370
H[n1][n1] = H[n1][n1] + exshift;
371
d[n1] = H[n1][n1];
372
e[n1] = 0.0;
373
n1--;
374
iter = 0;
375
376
// Two roots found
377
378
} else if (l == n1 - 1) {
379
w = H[n1][n1 - 1] * H[n1 - 1][n1];
380
p = (H[n1 - 1][n1 - 1] - H[n1][n1]) / 2.0;
381
q = p * p + w;
382
z = std::sqrt(std::abs(q));
383
H[n1][n1] = H[n1][n1] + exshift;
384
H[n1 - 1][n1 - 1] = H[n1 - 1][n1 - 1] + exshift;
385
x = H[n1][n1];
386
387
// Real pair
388
389
if (q >= 0) {
390
if (p >= 0) {
391
z = p + z;
392
} else {
393
z = p - z;
394
}
395
d[n1 - 1] = x + z;
396
d[n1] = d[n1 - 1];
397
if (z != 0.0) {
398
d[n1] = x - w / z;
399
}
400
e[n1 - 1] = 0.0;
401
e[n1] = 0.0;
402
x = H[n1][n1 - 1];
403
s = std::abs(x) + std::abs(z);
404
p = x / s;
405
q = z / s;
406
r = std::sqrt(p * p + q * q);
407
p = p / r;
408
q = q / r;
409
410
// Row modification
411
412
for (int j = n1 - 1; j < nn; j++) {
413
z = H[n1 - 1][j];
414
H[n1 - 1][j] = q * z + p * H[n1][j];
415
H[n1][j] = q * H[n1][j] - p * z;
416
}
417
418
// Column modification
419
420
for (int i = 0; i <= n1; i++) {
421
z = H[i][n1 - 1];
422
H[i][n1 - 1] = q * z + p * H[i][n1];
423
H[i][n1] = q * H[i][n1] - p * z;
424
}
425
426
// Accumulate transformations
427
428
for (int i = low; i <= high; i++) {
429
z = V[i][n1 - 1];
430
V[i][n1 - 1] = q * z + p * V[i][n1];
431
V[i][n1] = q * V[i][n1] - p * z;
432
}
433
434
// Complex pair
435
436
} else {
437
d[n1 - 1] = x + p;
438
d[n1] = x + p;
439
e[n1 - 1] = z;
440
e[n1] = -z;
441
}
442
n1 = n1 - 2;
443
iter = 0;
444
445
// No convergence yet
446
447
} else {
448
449
// Form shift
450
451
x = H[n1][n1];
452
y = 0.0;
453
w = 0.0;
454
if (l < n1) {
455
y = H[n1 - 1][n1 - 1];
456
w = H[n1][n1 - 1] * H[n1 - 1][n1];
457
}
458
459
// Wilkinson's original ad hoc shift
460
461
if (iter == 10) {
462
exshift += x;
463
for (int i = low; i <= n1; i++) {
464
H[i][i] -= x;
465
}
466
s = std::abs(H[n1][n1 - 1]) + std::abs(H[n1 - 1][n1 - 2]);
467
x = y = 0.75 * s;
468
w = -0.4375 * s * s;
469
}
470
471
// MATLAB's new ad hoc shift
472
473
if (iter == 30) {
474
s = (y - x) / 2.0;
475
s = s * s + w;
476
if (s > 0) {
477
s = std::sqrt(s);
478
if (y < x) {
479
s = -s;
480
}
481
s = x - w / ((y - x) / 2.0 + s);
482
for (int i = low; i <= n1; i++) {
483
H[i][i] -= s;
484
}
485
exshift += s;
486
x = y = w = 0.964;
487
}
488
}
489
490
iter = iter + 1; // (Could check iteration count here.)
491
492
// Look for two consecutive small sub-diagonal elements
493
int m = n1 - 2;
494
while (m >= l) {
495
z = H[m][m];
496
r = x - z;
497
s = y - z;
498
p = (r * s - w) / H[m + 1][m] + H[m][m + 1];
499
q = H[m + 1][m + 1] - z - r - s;
500
r = H[m + 2][m + 1];
501
s = std::abs(p) + std::abs(q) + std::abs(r);
502
p = p / s;
503
q = q / s;
504
r = r / s;
505
if (m == l) {
506
break;
507
}
508
if (std::abs(H[m][m - 1]) * (std::abs(q) + std::abs(r)) < eps * (std::abs(p)
509
* (std::abs(H[m - 1][m - 1]) + std::abs(z) + std::abs(
510
H[m + 1][m + 1])))) {
511
break;
512
}
513
m--;
514
}
515
516
for (int i = m + 2; i <= n1; i++) {
517
H[i][i - 2] = 0.0;
518
if (i > m + 2) {
519
H[i][i - 3] = 0.0;
520
}
521
}
522
523
// Double QR step involving rows l:n and columns m:n
524
525
for (int k = m; k < n1; k++) {
526
bool notlast = (k != n1 - 1);
527
if (k != m) {
528
p = H[k][k - 1];
529
q = H[k + 1][k - 1];
530
r = (notlast ? H[k + 2][k - 1] : 0.0);
531
x = std::abs(p) + std::abs(q) + std::abs(r);
532
if (x != 0.0) {
533
p = p / x;
534
q = q / x;
535
r = r / x;
536
}
537
}
538
if (x == 0.0) {
539
break;
540
}
541
s = std::sqrt(p * p + q * q + r * r);
542
if (p < 0) {
543
s = -s;
544
}
545
if (s != 0) {
546
if (k != m) {
547
H[k][k - 1] = -s * x;
548
} else if (l != m) {
549
H[k][k - 1] = -H[k][k - 1];
550
}
551
p = p + s;
552
x = p / s;
553
y = q / s;
554
z = r / s;
555
q = q / p;
556
r = r / p;
557
558
// Row modification
559
560
for (int j = k; j < nn; j++) {
561
p = H[k][j] + q * H[k + 1][j];
562
if (notlast) {
563
p = p + r * H[k + 2][j];
564
H[k + 2][j] = H[k + 2][j] - p * z;
565
}
566
H[k][j] = H[k][j] - p * x;
567
H[k + 1][j] = H[k + 1][j] - p * y;
568
}
569
570
// Column modification
571
572
for (int i = 0; i <= std::min(n1, k + 3); i++) {
573
p = x * H[i][k] + y * H[i][k + 1];
574
if (notlast) {
575
p = p + z * H[i][k + 2];
576
H[i][k + 2] = H[i][k + 2] - p * r;
577
}
578
H[i][k] = H[i][k] - p;
579
H[i][k + 1] = H[i][k + 1] - p * q;
580
}
581
582
// Accumulate transformations
583
584
for (int i = low; i <= high; i++) {
585
p = x * V[i][k] + y * V[i][k + 1];
586
if (notlast) {
587
p = p + z * V[i][k + 2];
588
V[i][k + 2] = V[i][k + 2] - p * r;
589
}
590
V[i][k] = V[i][k] - p;
591
V[i][k + 1] = V[i][k + 1] - p * q;
592
}
593
} // (s != 0)
594
} // k loop
595
} // check convergence
596
} // while (n1 >= low)
597
598
// Backsubstitute to find vectors of upper triangular form
599
600
if (norm < FLT_EPSILON) {
601
return;
602
}
603
604
for (n1 = nn - 1; n1 >= 0; n1--) {
605
p = d[n1];
606
q = e[n1];
607
608
// Real vector
609
610
if (q == 0) {
611
int l = n1;
612
H[n1][n1] = 1.0;
613
for (int i = n1 - 1; i >= 0; i--) {
614
w = H[i][i] - p;
615
r = 0.0;
616
for (int j = l; j <= n1; j++) {
617
r = r + H[i][j] * H[j][n1];
618
}
619
if (e[i] < 0.0) {
620
z = w;
621
s = r;
622
} else {
623
l = i;
624
if (e[i] == 0.0) {
625
if (w != 0.0) {
626
H[i][n1] = -r / w;
627
} else {
628
H[i][n1] = -r / (eps * norm);
629
}
630
631
// Solve real equations
632
633
} else {
634
x = H[i][i + 1];
635
y = H[i + 1][i];
636
q = (d[i] - p) * (d[i] - p) + e[i] * e[i];
637
t = (x * s - z * r) / q;
638
H[i][n1] = t;
639
if (std::abs(x) > std::abs(z)) {
640
H[i + 1][n1] = (-r - w * t) / x;
641
} else {
642
H[i + 1][n1] = (-s - y * t) / z;
643
}
644
}
645
646
// Overflow control
647
648
t = std::abs(H[i][n1]);
649
if ((eps * t) * t > 1) {
650
for (int j = i; j <= n1; j++) {
651
H[j][n1] = H[j][n1] / t;
652
}
653
}
654
}
655
}
656
// Complex vector
657
} else if (q < 0) {
658
int l = n1 - 1;
659
660
// Last vector component imaginary so matrix is triangular
661
662
if (std::abs(H[n1][n1 - 1]) > std::abs(H[n1 - 1][n1])) {
663
H[n1 - 1][n1 - 1] = q / H[n1][n1 - 1];
664
H[n1 - 1][n1] = -(H[n1][n1] - p) / H[n1][n1 - 1];
665
} else {
666
cdiv(0.0, -H[n1 - 1][n1], H[n1 - 1][n1 - 1] - p, q);
667
H[n1 - 1][n1 - 1] = cdivr;
668
H[n1 - 1][n1] = cdivi;
669
}
670
H[n1][n1 - 1] = 0.0;
671
H[n1][n1] = 1.0;
672
for (int i = n1 - 2; i >= 0; i--) {
673
double ra, sa, vr, vi;
674
ra = 0.0;
675
sa = 0.0;
676
for (int j = l; j <= n1; j++) {
677
ra = ra + H[i][j] * H[j][n1 - 1];
678
sa = sa + H[i][j] * H[j][n1];
679
}
680
w = H[i][i] - p;
681
682
if (e[i] < 0.0) {
683
z = w;
684
r = ra;
685
s = sa;
686
} else {
687
l = i;
688
if (e[i] == 0) {
689
cdiv(-ra, -sa, w, q);
690
H[i][n1 - 1] = cdivr;
691
H[i][n1] = cdivi;
692
} else {
693
694
// Solve complex equations
695
696
x = H[i][i + 1];
697
y = H[i + 1][i];
698
vr = (d[i] - p) * (d[i] - p) + e[i] * e[i] - q * q;
699
vi = (d[i] - p) * 2.0 * q;
700
if (vr == 0.0 && vi == 0.0) {
701
vr = eps * norm * (std::abs(w) + std::abs(q) + std::abs(x)
702
+ std::abs(y) + std::abs(z));
703
}
704
cdiv(x * r - z * ra + q * sa,
705
x * s - z * sa - q * ra, vr, vi);
706
H[i][n1 - 1] = cdivr;
707
H[i][n1] = cdivi;
708
if (std::abs(x) > (std::abs(z) + std::abs(q))) {
709
H[i + 1][n1 - 1] = (-ra - w * H[i][n1 - 1] + q
710
* H[i][n1]) / x;
711
H[i + 1][n1] = (-sa - w * H[i][n1] - q * H[i][n1
712
- 1]) / x;
713
} else {
714
cdiv(-r - y * H[i][n1 - 1], -s - y * H[i][n1], z,
715
q);
716
H[i + 1][n1 - 1] = cdivr;
717
H[i + 1][n1] = cdivi;
718
}
719
}
720
721
// Overflow control
722
723
t = std::max(std::abs(H[i][n1 - 1]), std::abs(H[i][n1]));
724
if ((eps * t) * t > 1) {
725
for (int j = i; j <= n1; j++) {
726
H[j][n1 - 1] = H[j][n1 - 1] / t;
727
H[j][n1] = H[j][n1] / t;
728
}
729
}
730
}
731
}
732
}
733
}
734
735
// Vectors of isolated roots
736
737
for (int i = 0; i < nn; i++) {
738
if (i < low || i > high) {
739
for (int j = i; j < nn; j++) {
740
V[i][j] = H[i][j];
741
}
742
}
743
}
744
745
// Back transformation to get eigenvectors of original matrix
746
747
for (int j = nn - 1; j >= low; j--) {
748
for (int i = low; i <= high; i++) {
749
z = 0.0;
750
for (int k = low; k <= std::min(j, high); k++) {
751
z = z + V[i][k] * H[k][j];
752
}
753
V[i][j] = z;
754
}
755
}
756
}
757
758
// Nonsymmetric reduction to Hessenberg form.
759
void orthes() {
760
// This is derived from the Algol procedures orthes and ortran,
761
// by Martin and Wilkinson, Handbook for Auto. Comp.,
762
// Vol.ii-Linear Algebra, and the corresponding
763
// Fortran subroutines in EISPACK.
764
int low = 0;
765
int high = n - 1;
766
767
for (int m = low + 1; m < high; m++) {
768
769
// Scale column.
770
771
double scale = 0.0;
772
for (int i = m; i <= high; i++) {
773
scale = scale + std::abs(H[i][m - 1]);
774
}
775
if (scale != 0.0) {
776
777
// Compute Householder transformation.
778
779
double h = 0.0;
780
for (int i = high; i >= m; i--) {
781
ort[i] = H[i][m - 1] / scale;
782
h += ort[i] * ort[i];
783
}
784
double g = std::sqrt(h);
785
if (ort[m] > 0) {
786
g = -g;
787
}
788
h = h - ort[m] * g;
789
ort[m] = ort[m] - g;
790
791
// Apply Householder similarity transformation
792
// H = (I-u*u'/h)*H*(I-u*u')/h)
793
794
for (int j = m; j < n; j++) {
795
double f = 0.0;
796
for (int i = high; i >= m; i--) {
797
f += ort[i] * H[i][j];
798
}
799
f = f / h;
800
for (int i = m; i <= high; i++) {
801
H[i][j] -= f * ort[i];
802
}
803
}
804
805
for (int i = 0; i <= high; i++) {
806
double f = 0.0;
807
for (int j = high; j >= m; j--) {
808
f += ort[j] * H[i][j];
809
}
810
f = f / h;
811
for (int j = m; j <= high; j++) {
812
H[i][j] -= f * ort[j];
813
}
814
}
815
ort[m] = scale * ort[m];
816
H[m][m - 1] = scale * g;
817
}
818
}
819
820
// Accumulate transformations (Algol's ortran).
821
822
for (int i = 0; i < n; i++) {
823
for (int j = 0; j < n; j++) {
824
V[i][j] = (i == j ? 1.0 : 0.0);
825
}
826
}
827
828
for (int m = high - 1; m > low; m--) {
829
if (H[m][m - 1] != 0.0) {
830
for (int i = m + 1; i <= high; i++) {
831
ort[i] = H[i][m - 1];
832
}
833
for (int j = m; j <= high; j++) {
834
double g = 0.0;
835
for (int i = m; i <= high; i++) {
836
g += ort[i] * V[i][j];
837
}
838
// Double division avoids possible underflow
839
g = (g / ort[m]) / H[m][m - 1];
840
for (int i = m; i <= high; i++) {
841
V[i][j] += g * ort[i];
842
}
843
}
844
}
845
}
846
}
847
848
// Releases all internal working memory.
849
void release() {
850
// releases the working data
851
delete[] d;
852
delete[] e;
853
delete[] ort;
854
for (int i = 0; i < n; i++) {
855
delete[] H[i];
856
delete[] V[i];
857
}
858
delete[] H;
859
delete[] V;
860
}
861
862
// Computes the Eigenvalue Decomposition for a matrix given in H.
863
void compute() {
864
// Allocate memory for the working data.
865
V = alloc_2d<double> (n, n, 0.0);
866
d = alloc_1d<double> (n);
867
e = alloc_1d<double> (n);
868
ort = alloc_1d<double> (n);
869
CV_TRY {
870
// Reduce to Hessenberg form.
871
orthes();
872
// Reduce Hessenberg to real Schur form.
873
hqr2();
874
// Copy eigenvalues to OpenCV Matrix.
875
_eigenvalues.create(1, n, CV_64FC1);
876
for (int i = 0; i < n; i++) {
877
_eigenvalues.at<double> (0, i) = d[i];
878
}
879
// Copy eigenvectors to OpenCV Matrix.
880
_eigenvectors.create(n, n, CV_64FC1);
881
for (int i = 0; i < n; i++)
882
for (int j = 0; j < n; j++)
883
_eigenvectors.at<double> (i, j) = V[i][j];
884
// Deallocate the memory by releasing all internal working data.
885
release();
886
}
887
CV_CATCH_ALL
888
{
889
release();
890
CV_RETHROW();
891
}
892
}
893
894
public:
895
// Initializes & computes the Eigenvalue Decomposition for a general matrix
896
// given in src. This function is a port of the EigenvalueSolver in JAMA,
897
// which has been released to public domain by The MathWorks and the
898
// National Institute of Standards and Technology (NIST).
899
EigenvalueDecomposition(InputArray src, bool fallbackSymmetric = true) {
900
compute(src, fallbackSymmetric);
901
}
902
903
// This function computes the Eigenvalue Decomposition for a general matrix
904
// given in src. This function is a port of the EigenvalueSolver in JAMA,
905
// which has been released to public domain by The MathWorks and the
906
// National Institute of Standards and Technology (NIST).
907
void compute(InputArray src, bool fallbackSymmetric)
908
{
909
CV_INSTRUMENT_REGION();
910
911
if(fallbackSymmetric && isSymmetric(src)) {
912
// Fall back to OpenCV for a symmetric matrix!
913
cv::eigen(src, _eigenvalues, _eigenvectors);
914
} else {
915
Mat tmp;
916
// Convert the given input matrix to double. Is there any way to
917
// prevent allocating the temporary memory? Only used for copying
918
// into working memory and deallocated after.
919
src.getMat().convertTo(tmp, CV_64FC1);
920
// Get dimension of the matrix.
921
this->n = tmp.cols;
922
// Allocate the matrix data to work on.
923
this->H = alloc_2d<double> (n, n);
924
// Now safely copy the data.
925
for (int i = 0; i < tmp.rows; i++) {
926
for (int j = 0; j < tmp.cols; j++) {
927
this->H[i][j] = tmp.at<double>(i, j);
928
}
929
}
930
// Deallocates the temporary matrix before computing.
931
tmp.release();
932
// Performs the eigenvalue decomposition of H.
933
compute();
934
}
935
}
936
937
~EigenvalueDecomposition() {}
938
939
// Returns the eigenvalues of the Eigenvalue Decomposition.
940
Mat eigenvalues() const { return _eigenvalues; }
941
// Returns the eigenvectors of the Eigenvalue Decomposition.
942
Mat eigenvectors() const { return _eigenvectors; }
943
};
944
945
void eigenNonSymmetric(InputArray _src, OutputArray _evals, OutputArray _evects)
946
{
947
CV_INSTRUMENT_REGION();
948
949
Mat src = _src.getMat();
950
int type = src.type();
951
size_t n = (size_t)src.rows;
952
953
CV_Assert(src.rows == src.cols);
954
CV_Assert(type == CV_32F || type == CV_64F);
955
956
Mat src64f;
957
if (type == CV_32F)
958
src.convertTo(src64f, CV_32FC1);
959
else
960
src64f = src;
961
962
EigenvalueDecomposition eigensystem(src64f, false);
963
964
// EigenvalueDecomposition returns transposed and non-sorted eigenvalues
965
std::vector<double> eigenvalues64f;
966
eigensystem.eigenvalues().copyTo(eigenvalues64f);
967
CV_Assert(eigenvalues64f.size() == n);
968
969
std::vector<int> sort_indexes(n);
970
cv::sortIdx(eigenvalues64f, sort_indexes, SORT_EVERY_ROW | SORT_DESCENDING);
971
972
std::vector<double> sorted_eigenvalues64f(n);
973
for (size_t i = 0; i < n; i++) sorted_eigenvalues64f[i] = eigenvalues64f[sort_indexes[i]];
974
975
Mat(sorted_eigenvalues64f).convertTo(_evals, type);
976
977
if( _evects.needed() )
978
{
979
Mat eigenvectors64f = eigensystem.eigenvectors().t(); // transpose
980
CV_Assert((size_t)eigenvectors64f.rows == n);
981
CV_Assert((size_t)eigenvectors64f.cols == n);
982
Mat_<double> sorted_eigenvectors64f((int)n, (int)n, CV_64FC1);
983
for (size_t i = 0; i < n; i++)
984
{
985
double* pDst = sorted_eigenvectors64f.ptr<double>((int)i);
986
double* pSrc = eigenvectors64f.ptr<double>(sort_indexes[(int)i]);
987
CV_Assert(pSrc != NULL);
988
memcpy(pDst, pSrc, n * sizeof(double));
989
}
990
sorted_eigenvectors64f.convertTo(_evects, type);
991
}
992
}
993
994
995
//------------------------------------------------------------------------------
996
// Linear Discriminant Analysis implementation
997
//------------------------------------------------------------------------------
998
999
LDA::LDA(int num_components) : _num_components(num_components) { }
1000
1001
LDA::LDA(InputArrayOfArrays src, InputArray labels, int num_components) : _num_components(num_components)
1002
{
1003
this->compute(src, labels); //! compute eigenvectors and eigenvalues
1004
}
1005
1006
LDA::~LDA() {}
1007
1008
void LDA::save(const String& filename) const
1009
{
1010
FileStorage fs(filename, FileStorage::WRITE);
1011
if (!fs.isOpened()) {
1012
CV_Error(Error::StsError, "File can't be opened for writing!");
1013
}
1014
this->save(fs);
1015
fs.release();
1016
}
1017
1018
// Deserializes this object from a given filename.
1019
void LDA::load(const String& filename) {
1020
FileStorage fs(filename, FileStorage::READ);
1021
if (!fs.isOpened())
1022
CV_Error(Error::StsError, "File can't be opened for reading!");
1023
this->load(fs);
1024
fs.release();
1025
}
1026
1027
// Serializes this object to a given FileStorage.
1028
void LDA::save(FileStorage& fs) const {
1029
// write matrices
1030
fs << "num_components" << _num_components;
1031
fs << "eigenvalues" << _eigenvalues;
1032
fs << "eigenvectors" << _eigenvectors;
1033
}
1034
1035
// Deserializes this object from a given FileStorage.
1036
void LDA::load(const FileStorage& fs) {
1037
//read matrices
1038
fs["num_components"] >> _num_components;
1039
fs["eigenvalues"] >> _eigenvalues;
1040
fs["eigenvectors"] >> _eigenvectors;
1041
}
1042
1043
void LDA::lda(InputArrayOfArrays _src, InputArray _lbls) {
1044
// get data
1045
Mat src = _src.getMat();
1046
std::vector<int> labels;
1047
// safely copy the labels
1048
{
1049
Mat tmp = _lbls.getMat();
1050
for(unsigned int i = 0; i < tmp.total(); i++) {
1051
labels.push_back(tmp.at<int>(i));
1052
}
1053
}
1054
// turn into row sampled matrix
1055
Mat data;
1056
// ensure working matrix is double precision
1057
src.convertTo(data, CV_64FC1);
1058
// maps the labels, so they're ascending: [0,1,...,C]
1059
std::vector<int> mapped_labels(labels.size());
1060
std::vector<int> num2label = remove_dups(labels);
1061
std::map<int, int> label2num;
1062
for (int i = 0; i < (int)num2label.size(); i++)
1063
label2num[num2label[i]] = i;
1064
for (size_t i = 0; i < labels.size(); i++)
1065
mapped_labels[i] = label2num[labels[i]];
1066
// get sample size, dimension
1067
int N = data.rows;
1068
int D = data.cols;
1069
// number of unique labels
1070
int C = (int)num2label.size();
1071
// we can't do a LDA on one class, what do you
1072
// want to separate from each other then?
1073
if(C == 1) {
1074
String error_message = "At least two classes are needed to perform a LDA. Reason: Only one class was given!";
1075
CV_Error(Error::StsBadArg, error_message);
1076
}
1077
// throw error if less labels, than samples
1078
if (labels.size() != static_cast<size_t>(N)) {
1079
String error_message = format("The number of samples must equal the number of labels. Given %zu labels, %d samples. ", labels.size(), N);
1080
CV_Error(Error::StsBadArg, error_message);
1081
}
1082
// warn if within-classes scatter matrix becomes singular
1083
if (N < D) {
1084
std::cout << "Warning: Less observations than feature dimension given!"
1085
<< "Computation will probably fail."
1086
<< std::endl;
1087
}
1088
// clip number of components to be a valid number
1089
if ((_num_components <= 0) || (_num_components >= C)) {
1090
_num_components = (C - 1);
1091
}
1092
// holds the mean over all classes
1093
Mat meanTotal = Mat::zeros(1, D, data.type());
1094
// holds the mean for each class
1095
std::vector<Mat> meanClass(C);
1096
std::vector<int> numClass(C);
1097
// initialize
1098
for (int i = 0; i < C; i++) {
1099
numClass[i] = 0;
1100
meanClass[i] = Mat::zeros(1, D, data.type()); //! Dx1 image vector
1101
}
1102
// calculate sums
1103
for (int i = 0; i < N; i++) {
1104
Mat instance = data.row(i);
1105
int classIdx = mapped_labels[i];
1106
add(meanTotal, instance, meanTotal);
1107
add(meanClass[classIdx], instance, meanClass[classIdx]);
1108
numClass[classIdx]++;
1109
}
1110
// calculate total mean
1111
meanTotal.convertTo(meanTotal, meanTotal.type(), 1.0 / static_cast<double> (N));
1112
// calculate class means
1113
for (int i = 0; i < C; i++) {
1114
meanClass[i].convertTo(meanClass[i], meanClass[i].type(), 1.0 / static_cast<double> (numClass[i]));
1115
}
1116
// subtract class means
1117
for (int i = 0; i < N; i++) {
1118
int classIdx = mapped_labels[i];
1119
Mat instance = data.row(i);
1120
subtract(instance, meanClass[classIdx], instance);
1121
}
1122
// calculate within-classes scatter
1123
Mat Sw = Mat::zeros(D, D, data.type());
1124
mulTransposed(data, Sw, true);
1125
// calculate between-classes scatter
1126
Mat Sb = Mat::zeros(D, D, data.type());
1127
for (int i = 0; i < C; i++) {
1128
Mat tmp;
1129
subtract(meanClass[i], meanTotal, tmp);
1130
mulTransposed(tmp, tmp, true);
1131
add(Sb, tmp, Sb);
1132
}
1133
// invert Sw
1134
Mat Swi = Sw.inv();
1135
// M = inv(Sw)*Sb
1136
Mat M;
1137
gemm(Swi, Sb, 1.0, Mat(), 0.0, M);
1138
EigenvalueDecomposition es(M);
1139
_eigenvalues = es.eigenvalues();
1140
_eigenvectors = es.eigenvectors();
1141
// reshape eigenvalues, so they are stored by column
1142
_eigenvalues = _eigenvalues.reshape(1, 1);
1143
// get sorted indices descending by their eigenvalue
1144
std::vector<int> sorted_indices = argsort(_eigenvalues, false);
1145
// now sort eigenvalues and eigenvectors accordingly
1146
_eigenvalues = sortMatrixColumnsByIndices(_eigenvalues, sorted_indices);
1147
_eigenvectors = sortMatrixColumnsByIndices(_eigenvectors, sorted_indices);
1148
// and now take only the num_components and we're out!
1149
_eigenvalues = Mat(_eigenvalues, Range::all(), Range(0, _num_components));
1150
_eigenvectors = Mat(_eigenvectors, Range::all(), Range(0, _num_components));
1151
}
1152
1153
void LDA::compute(InputArrayOfArrays _src, InputArray _lbls) {
1154
switch(_src.kind()) {
1155
case _InputArray::STD_VECTOR_MAT:
1156
case _InputArray::STD_ARRAY_MAT:
1157
lda(asRowMatrix(_src, CV_64FC1), _lbls);
1158
break;
1159
case _InputArray::MAT:
1160
lda(_src.getMat(), _lbls);
1161
break;
1162
default:
1163
String error_message= format("InputArray Datatype %d is not supported.", _src.kind());
1164
CV_Error(Error::StsBadArg, error_message);
1165
break;
1166
}
1167
}
1168
1169
// Projects one or more row aligned samples into the LDA subspace.
1170
Mat LDA::project(InputArray src) {
1171
return subspaceProject(_eigenvectors, Mat(), src);
1172
}
1173
1174
// Reconstructs projections from the LDA subspace from one or more row aligned samples.
1175
Mat LDA::reconstruct(InputArray src) {
1176
return subspaceReconstruct(_eigenvectors, Mat(), src);
1177
}
1178
1179
}
1180
1181