Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Tetragramm
GitHub Repository: Tetragramm/opencv
Path: blob/master/modules/calib3d/src/fundam.cpp
16354 views
1
/*M///////////////////////////////////////////////////////////////////////////////////////
2
//
3
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4
//
5
// By downloading, copying, installing or using the software you agree to this license.
6
// If you do not agree to this license, do not download, install,
7
// copy or use the software.
8
//
9
//
10
// License Agreement
11
// For Open Source Computer Vision Library
12
//
13
// Copyright (C) 2000, Intel Corporation, all rights reserved.
14
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
15
// Third party copyrights are property of their respective owners.
16
//
17
// Redistribution and use in source and binary forms, with or without modification,
18
// are permitted provided that the following conditions are met:
19
//
20
// * Redistribution's of source code must retain the above copyright notice,
21
// this list of conditions and the following disclaimer.
22
//
23
// * Redistribution's in binary form must reproduce the above copyright notice,
24
// this list of conditions and the following disclaimer in the documentation
25
// and/or other materials provided with the distribution.
26
//
27
// * The name of the copyright holders may not be used to endorse or promote products
28
// derived from this software without specific prior written permission.
29
//
30
// This software is provided by the copyright holders and contributors "as is" and
31
// any express or implied warranties, including, but not limited to, the implied
32
// warranties of merchantability and fitness for a particular purpose are disclaimed.
33
// In no event shall the Intel Corporation or contributors be liable for any direct,
34
// indirect, incidental, special, exemplary, or consequential damages
35
// (including, but not limited to, procurement of substitute goods or services;
36
// loss of use, data, or profits; or business interruption) however caused
37
// and on any theory of liability, whether in contract, strict liability,
38
// or tort (including negligence or otherwise) arising in any way out of
39
// the use of this software, even if advised of the possibility of such damage.
40
//
41
//M*/
42
43
#include "precomp.hpp"
44
#include "rho.h"
45
#include <iostream>
46
47
namespace cv
48
{
49
50
/**
51
* This class estimates a homography \f$H\in \mathbb{R}^{3\times 3}\f$
52
* between \f$\mathbf{x} \in \mathbb{R}^3\f$ and
53
* \f$\mathbf{X} \in \mathbb{R}^3\f$ using DLT (direct linear transform)
54
* with algebraic distance.
55
*
56
* \f[
57
* \lambda \mathbf{x} = H \mathbf{X}
58
* \f]
59
* where \f$\lambda \in \mathbb{R} \f$.
60
*
61
*/
62
class HomographyEstimatorCallback CV_FINAL : public PointSetRegistrator::Callback
63
{
64
public:
65
bool checkSubset( InputArray _ms1, InputArray _ms2, int count ) const CV_OVERRIDE
66
{
67
Mat ms1 = _ms1.getMat(), ms2 = _ms2.getMat();
68
if( haveCollinearPoints(ms1, count) || haveCollinearPoints(ms2, count) )
69
return false;
70
71
// We check whether the minimal set of points for the homography estimation
72
// are geometrically consistent. We check if every 3 correspondences sets
73
// fulfills the constraint.
74
//
75
// The usefullness of this constraint is explained in the paper:
76
//
77
// "Speeding-up homography estimation in mobile devices"
78
// Journal of Real-Time Image Processing. 2013. DOI: 10.1007/s11554-012-0314-1
79
// Pablo Marquez-Neila, Javier Lopez-Alberca, Jose M. Buenaposada, Luis Baumela
80
if( count == 4 )
81
{
82
static const int tt[][3] = {{0, 1, 2}, {1, 2, 3}, {0, 2, 3}, {0, 1, 3}};
83
const Point2f* src = ms1.ptr<Point2f>();
84
const Point2f* dst = ms2.ptr<Point2f>();
85
int negative = 0;
86
87
for( int i = 0; i < 4; i++ )
88
{
89
const int* t = tt[i];
90
Matx33d A(src[t[0]].x, src[t[0]].y, 1., src[t[1]].x, src[t[1]].y, 1., src[t[2]].x, src[t[2]].y, 1.);
91
Matx33d B(dst[t[0]].x, dst[t[0]].y, 1., dst[t[1]].x, dst[t[1]].y, 1., dst[t[2]].x, dst[t[2]].y, 1.);
92
93
negative += determinant(A)*determinant(B) < 0;
94
}
95
if( negative != 0 && negative != 4 )
96
return false;
97
}
98
99
return true;
100
}
101
102
/**
103
* Normalization method:
104
* - $x$ and $y$ coordinates are normalized independently
105
* - first the coordinates are shifted so that the average coordinate is \f$(0,0)\f$
106
* - then the coordinates are scaled so that the average L1 norm is 1, i.e,
107
* the average L1 norm of the \f$x\f$ coordinates is 1 and the average
108
* L1 norm of the \f$y\f$ coordinates is also 1.
109
*
110
* @param _m1 source points containing (X,Y), depth is CV_32F with 1 column 2 channels or
111
* 2 columns 1 channel
112
* @param _m2 destination points containing (x,y), depth is CV_32F with 1 column 2 channels or
113
* 2 columns 1 channel
114
* @param _model, CV_64FC1, 3x3, normalized, i.e., the last element is 1
115
*/
116
int runKernel( InputArray _m1, InputArray _m2, OutputArray _model ) const CV_OVERRIDE
117
{
118
Mat m1 = _m1.getMat(), m2 = _m2.getMat();
119
int i, count = m1.checkVector(2);
120
const Point2f* M = m1.ptr<Point2f>();
121
const Point2f* m = m2.ptr<Point2f>();
122
123
double LtL[9][9], W[9][1], V[9][9];
124
Mat _LtL( 9, 9, CV_64F, &LtL[0][0] );
125
Mat matW( 9, 1, CV_64F, W );
126
Mat matV( 9, 9, CV_64F, V );
127
Mat _H0( 3, 3, CV_64F, V[8] );
128
Mat _Htemp( 3, 3, CV_64F, V[7] );
129
Point2d cM(0,0), cm(0,0), sM(0,0), sm(0,0);
130
131
for( i = 0; i < count; i++ )
132
{
133
cm.x += m[i].x; cm.y += m[i].y;
134
cM.x += M[i].x; cM.y += M[i].y;
135
}
136
137
cm.x /= count;
138
cm.y /= count;
139
cM.x /= count;
140
cM.y /= count;
141
142
for( i = 0; i < count; i++ )
143
{
144
sm.x += fabs(m[i].x - cm.x);
145
sm.y += fabs(m[i].y - cm.y);
146
sM.x += fabs(M[i].x - cM.x);
147
sM.y += fabs(M[i].y - cM.y);
148
}
149
150
if( fabs(sm.x) < DBL_EPSILON || fabs(sm.y) < DBL_EPSILON ||
151
fabs(sM.x) < DBL_EPSILON || fabs(sM.y) < DBL_EPSILON )
152
return 0;
153
sm.x = count/sm.x; sm.y = count/sm.y;
154
sM.x = count/sM.x; sM.y = count/sM.y;
155
156
double invHnorm[9] = { 1./sm.x, 0, cm.x, 0, 1./sm.y, cm.y, 0, 0, 1 };
157
double Hnorm2[9] = { sM.x, 0, -cM.x*sM.x, 0, sM.y, -cM.y*sM.y, 0, 0, 1 };
158
Mat _invHnorm( 3, 3, CV_64FC1, invHnorm );
159
Mat _Hnorm2( 3, 3, CV_64FC1, Hnorm2 );
160
161
_LtL.setTo(Scalar::all(0));
162
for( i = 0; i < count; i++ )
163
{
164
double x = (m[i].x - cm.x)*sm.x, y = (m[i].y - cm.y)*sm.y;
165
double X = (M[i].x - cM.x)*sM.x, Y = (M[i].y - cM.y)*sM.y;
166
double Lx[] = { X, Y, 1, 0, 0, 0, -x*X, -x*Y, -x };
167
double Ly[] = { 0, 0, 0, X, Y, 1, -y*X, -y*Y, -y };
168
int j, k;
169
for( j = 0; j < 9; j++ )
170
for( k = j; k < 9; k++ )
171
LtL[j][k] += Lx[j]*Lx[k] + Ly[j]*Ly[k];
172
}
173
completeSymm( _LtL );
174
175
eigen( _LtL, matW, matV );
176
_Htemp = _invHnorm*_H0;
177
_H0 = _Htemp*_Hnorm2;
178
_H0.convertTo(_model, _H0.type(), 1./_H0.at<double>(2,2) );
179
180
return 1;
181
}
182
183
/**
184
* Compute the reprojection error.
185
* m2 = H*m1
186
* @param _m1 depth CV_32F, 1-channel with 2 columns or 2-channel with 1 column
187
* @param _m2 depth CV_32F, 1-channel with 2 columns or 2-channel with 1 column
188
* @param _model CV_64FC1, 3x3
189
* @param _err, output, CV_32FC1, square of the L2 norm
190
*/
191
void computeError( InputArray _m1, InputArray _m2, InputArray _model, OutputArray _err ) const CV_OVERRIDE
192
{
193
Mat m1 = _m1.getMat(), m2 = _m2.getMat(), model = _model.getMat();
194
int i, count = m1.checkVector(2);
195
const Point2f* M = m1.ptr<Point2f>();
196
const Point2f* m = m2.ptr<Point2f>();
197
const double* H = model.ptr<double>();
198
float Hf[] = { (float)H[0], (float)H[1], (float)H[2], (float)H[3], (float)H[4], (float)H[5], (float)H[6], (float)H[7] };
199
200
_err.create(count, 1, CV_32F);
201
float* err = _err.getMat().ptr<float>();
202
203
for( i = 0; i < count; i++ )
204
{
205
float ww = 1.f/(Hf[6]*M[i].x + Hf[7]*M[i].y + 1.f);
206
float dx = (Hf[0]*M[i].x + Hf[1]*M[i].y + Hf[2])*ww - m[i].x;
207
float dy = (Hf[3]*M[i].x + Hf[4]*M[i].y + Hf[5])*ww - m[i].y;
208
err[i] = dx*dx + dy*dy;
209
}
210
}
211
};
212
213
214
class HomographyRefineCallback CV_FINAL : public LMSolver::Callback
215
{
216
public:
217
HomographyRefineCallback(InputArray _src, InputArray _dst)
218
{
219
src = _src.getMat();
220
dst = _dst.getMat();
221
}
222
223
bool compute(InputArray _param, OutputArray _err, OutputArray _Jac) const CV_OVERRIDE
224
{
225
int i, count = src.checkVector(2);
226
Mat param = _param.getMat();
227
_err.create(count*2, 1, CV_64F);
228
Mat err = _err.getMat(), J;
229
if( _Jac.needed())
230
{
231
_Jac.create(count*2, param.rows, CV_64F);
232
J = _Jac.getMat();
233
CV_Assert( J.isContinuous() && J.cols == 8 );
234
}
235
236
const Point2f* M = src.ptr<Point2f>();
237
const Point2f* m = dst.ptr<Point2f>();
238
const double* h = param.ptr<double>();
239
double* errptr = err.ptr<double>();
240
double* Jptr = J.data ? J.ptr<double>() : 0;
241
242
for( i = 0; i < count; i++ )
243
{
244
double Mx = M[i].x, My = M[i].y;
245
double ww = h[6]*Mx + h[7]*My + 1.;
246
ww = fabs(ww) > DBL_EPSILON ? 1./ww : 0;
247
double xi = (h[0]*Mx + h[1]*My + h[2])*ww;
248
double yi = (h[3]*Mx + h[4]*My + h[5])*ww;
249
errptr[i*2] = xi - m[i].x;
250
errptr[i*2+1] = yi - m[i].y;
251
252
if( Jptr )
253
{
254
Jptr[0] = Mx*ww; Jptr[1] = My*ww; Jptr[2] = ww;
255
Jptr[3] = Jptr[4] = Jptr[5] = 0.;
256
Jptr[6] = -Mx*ww*xi; Jptr[7] = -My*ww*xi;
257
Jptr[8] = Jptr[9] = Jptr[10] = 0.;
258
Jptr[11] = Mx*ww; Jptr[12] = My*ww; Jptr[13] = ww;
259
Jptr[14] = -Mx*ww*yi; Jptr[15] = -My*ww*yi;
260
261
Jptr += 16;
262
}
263
}
264
265
return true;
266
}
267
268
Mat src, dst;
269
};
270
} // end namesapce cv
271
272
namespace cv{
273
static bool createAndRunRHORegistrator(double confidence,
274
int maxIters,
275
double ransacReprojThreshold,
276
int npoints,
277
InputArray _src,
278
InputArray _dst,
279
OutputArray _H,
280
OutputArray _tempMask){
281
Mat src = _src.getMat();
282
Mat dst = _dst.getMat();
283
Mat tempMask;
284
bool result;
285
double beta = 0.35;/* 0.35 is a value that often works. */
286
287
/* Create temporary output matrix (RHO outputs a single-precision H only). */
288
Mat tmpH = Mat(3, 3, CV_32FC1);
289
290
/* Create output mask. */
291
tempMask = Mat(npoints, 1, CV_8U);
292
293
/**
294
* Make use of the RHO estimator API.
295
*
296
* This is where the math happens. A homography estimation context is
297
* initialized, used, then finalized.
298
*/
299
300
Ptr<RHO_HEST> p = rhoInit();
301
302
/**
303
* Optional. Ideally, the context would survive across calls to
304
* findHomography(), but no clean way appears to exit to do so. The price
305
* to pay is marginally more computational work than strictly needed.
306
*/
307
308
rhoEnsureCapacity(p, npoints, beta);
309
310
/**
311
* The critical call. All parameters are heavily documented in rho.h.
312
*
313
* Currently, NR (Non-Randomness criterion) and Final Refinement (with
314
* internal, optimized Levenberg-Marquardt method) are enabled. However,
315
* while refinement seems to correctly smooth jitter most of the time, when
316
* refinement fails it tends to make the estimate visually very much worse.
317
* It may be necessary to remove the refinement flags in a future commit if
318
* this behaviour is too problematic.
319
*/
320
321
result = !!rhoHest(p,
322
(const float*)src.data,
323
(const float*)dst.data,
324
(char*) tempMask.data,
325
(unsigned) npoints,
326
(float) ransacReprojThreshold,
327
(unsigned) maxIters,
328
(unsigned) maxIters,
329
confidence,
330
4U,
331
beta,
332
RHO_FLAG_ENABLE_NR | RHO_FLAG_ENABLE_FINAL_REFINEMENT,
333
NULL,
334
(float*)tmpH.data);
335
336
/* Convert float homography to double precision. */
337
tmpH.convertTo(_H, CV_64FC1);
338
339
/* Maps non-zero mask elements to 1, for the sake of the test case. */
340
for(int k=0;k<npoints;k++){
341
tempMask.data[k] = !!tempMask.data[k];
342
}
343
tempMask.copyTo(_tempMask);
344
345
return result;
346
}
347
}
348
349
350
cv::Mat cv::findHomography( InputArray _points1, InputArray _points2,
351
int method, double ransacReprojThreshold, OutputArray _mask,
352
const int maxIters, const double confidence)
353
{
354
CV_INSTRUMENT_REGION();
355
356
const double defaultRANSACReprojThreshold = 3;
357
bool result = false;
358
359
Mat points1 = _points1.getMat(), points2 = _points2.getMat();
360
Mat src, dst, H, tempMask;
361
int npoints = -1;
362
363
for( int i = 1; i <= 2; i++ )
364
{
365
Mat& p = i == 1 ? points1 : points2;
366
Mat& m = i == 1 ? src : dst;
367
npoints = p.checkVector(2, -1, false);
368
if( npoints < 0 )
369
{
370
npoints = p.checkVector(3, -1, false);
371
if( npoints < 0 )
372
CV_Error(Error::StsBadArg, "The input arrays should be 2D or 3D point sets");
373
if( npoints == 0 )
374
return Mat();
375
convertPointsFromHomogeneous(p, p);
376
}
377
p.reshape(2, npoints).convertTo(m, CV_32F);
378
}
379
380
CV_Assert( src.checkVector(2) == dst.checkVector(2) );
381
382
if( ransacReprojThreshold <= 0 )
383
ransacReprojThreshold = defaultRANSACReprojThreshold;
384
385
Ptr<PointSetRegistrator::Callback> cb = makePtr<HomographyEstimatorCallback>();
386
387
if( method == 0 || npoints == 4 )
388
{
389
tempMask = Mat::ones(npoints, 1, CV_8U);
390
result = cb->runKernel(src, dst, H) > 0;
391
}
392
else if( method == RANSAC )
393
result = createRANSACPointSetRegistrator(cb, 4, ransacReprojThreshold, confidence, maxIters)->run(src, dst, H, tempMask);
394
else if( method == LMEDS )
395
result = createLMeDSPointSetRegistrator(cb, 4, confidence, maxIters)->run(src, dst, H, tempMask);
396
else if( method == RHO )
397
result = createAndRunRHORegistrator(confidence, maxIters, ransacReprojThreshold, npoints, src, dst, H, tempMask);
398
else
399
CV_Error(Error::StsBadArg, "Unknown estimation method");
400
401
if( result && npoints > 4 && method != RHO)
402
{
403
compressElems( src.ptr<Point2f>(), tempMask.ptr<uchar>(), 1, npoints );
404
npoints = compressElems( dst.ptr<Point2f>(), tempMask.ptr<uchar>(), 1, npoints );
405
if( npoints > 0 )
406
{
407
Mat src1 = src.rowRange(0, npoints);
408
Mat dst1 = dst.rowRange(0, npoints);
409
src = src1;
410
dst = dst1;
411
if( method == RANSAC || method == LMEDS )
412
cb->runKernel( src, dst, H );
413
Mat H8(8, 1, CV_64F, H.ptr<double>());
414
createLMSolver(makePtr<HomographyRefineCallback>(src, dst), 10)->run(H8);
415
}
416
}
417
418
if( result )
419
{
420
if( _mask.needed() )
421
tempMask.copyTo(_mask);
422
}
423
else
424
{
425
H.release();
426
if(_mask.needed() ) {
427
tempMask = Mat::zeros(npoints >= 0 ? npoints : 0, 1, CV_8U);
428
tempMask.copyTo(_mask);
429
}
430
}
431
432
return H;
433
}
434
435
cv::Mat cv::findHomography( InputArray _points1, InputArray _points2,
436
OutputArray _mask, int method, double ransacReprojThreshold )
437
{
438
return cv::findHomography(_points1, _points2, method, ransacReprojThreshold, _mask);
439
}
440
441
442
443
/* Estimation of Fundamental Matrix from point correspondences.
444
The original code has been written by Valery Mosyagin */
445
446
/* The algorithms (except for RANSAC) and the notation have been taken from
447
Zhengyou Zhang's research report
448
"Determining the Epipolar Geometry and its Uncertainty: A Review"
449
that can be found at http://www-sop.inria.fr/robotvis/personnel/zzhang/zzhang-eng.html */
450
451
/************************************** 7-point algorithm *******************************/
452
namespace cv
453
{
454
455
/**
456
* Compute the fundamental matrix using the 7-point algorithm.
457
*
458
* \f[
459
* (\mathrm{m2}_i,1)^T \mathrm{fmatrix} (\mathrm{m1}_i,1) = 0
460
* \f]
461
*
462
* @param _m1 Contain points in the reference view. Depth CV_32F with 2-channel
463
* 1 column or 1-channel 2 columns. It has 7 rows.
464
* @param _m2 Contain points in the other view. Depth CV_32F with 2-channel
465
* 1 column or 1-channel 2 columns. It has 7 rows.
466
* @param _fmatrix Output fundamental matrix (or matrices) of type CV_64FC1.
467
* The user is responsible for allocating the memory before calling
468
* this function.
469
* @return Number of fundamental matrices. Valid values are 1, 2 or 3.
470
* - 1, row 0 to row 2 in _fmatrix is a valid fundamental matrix
471
* - 2, row 3 to row 5 in _fmatrix is a valid fundamental matrix
472
* - 3, row 6 to row 8 in _fmatrix is a valid fundamental matrix
473
*
474
* Note that the computed fundamental matrix is normalized, i.e.,
475
* the last element \f$F_{33}\f$ is 1.
476
*/
477
static int run7Point( const Mat& _m1, const Mat& _m2, Mat& _fmatrix )
478
{
479
double a[7*9], w[7], u[9*9], v[9*9], c[4], r[3] = {0};
480
double* f1, *f2;
481
double t0, t1, t2;
482
Mat A( 7, 9, CV_64F, a );
483
Mat U( 7, 9, CV_64F, u );
484
Mat Vt( 9, 9, CV_64F, v );
485
Mat W( 7, 1, CV_64F, w );
486
Mat coeffs( 1, 4, CV_64F, c );
487
Mat roots( 1, 3, CV_64F, r );
488
const Point2f* m1 = _m1.ptr<Point2f>();
489
const Point2f* m2 = _m2.ptr<Point2f>();
490
double* fmatrix = _fmatrix.ptr<double>();
491
int i, k, n;
492
493
// form a linear system: i-th row of A(=a) represents
494
// the equation: (m2[i], 1)'*F*(m1[i], 1) = 0
495
for( i = 0; i < 7; i++ )
496
{
497
double x0 = m1[i].x, y0 = m1[i].y;
498
double x1 = m2[i].x, y1 = m2[i].y;
499
500
a[i*9+0] = x1*x0;
501
a[i*9+1] = x1*y0;
502
a[i*9+2] = x1;
503
a[i*9+3] = y1*x0;
504
a[i*9+4] = y1*y0;
505
a[i*9+5] = y1;
506
a[i*9+6] = x0;
507
a[i*9+7] = y0;
508
a[i*9+8] = 1;
509
}
510
511
// A*(f11 f12 ... f33)' = 0 is singular (7 equations for 9 variables), so
512
// the solution is linear subspace of dimensionality 2.
513
// => use the last two singular vectors as a basis of the space
514
// (according to SVD properties)
515
SVDecomp( A, W, U, Vt, SVD::MODIFY_A + SVD::FULL_UV );
516
f1 = v + 7*9;
517
f2 = v + 8*9;
518
519
// f1, f2 is a basis => lambda*f1 + mu*f2 is an arbitrary fundamental matrix,
520
// as it is determined up to a scale, normalize lambda & mu (lambda + mu = 1),
521
// so f ~ lambda*f1 + (1 - lambda)*f2.
522
// use the additional constraint det(f) = det(lambda*f1 + (1-lambda)*f2) to find lambda.
523
// it will be a cubic equation.
524
// find c - polynomial coefficients.
525
for( i = 0; i < 9; i++ )
526
f1[i] -= f2[i];
527
528
t0 = f2[4]*f2[8] - f2[5]*f2[7];
529
t1 = f2[3]*f2[8] - f2[5]*f2[6];
530
t2 = f2[3]*f2[7] - f2[4]*f2[6];
531
532
c[3] = f2[0]*t0 - f2[1]*t1 + f2[2]*t2;
533
534
c[2] = f1[0]*t0 - f1[1]*t1 + f1[2]*t2 -
535
f1[3]*(f2[1]*f2[8] - f2[2]*f2[7]) +
536
f1[4]*(f2[0]*f2[8] - f2[2]*f2[6]) -
537
f1[5]*(f2[0]*f2[7] - f2[1]*f2[6]) +
538
f1[6]*(f2[1]*f2[5] - f2[2]*f2[4]) -
539
f1[7]*(f2[0]*f2[5] - f2[2]*f2[3]) +
540
f1[8]*(f2[0]*f2[4] - f2[1]*f2[3]);
541
542
t0 = f1[4]*f1[8] - f1[5]*f1[7];
543
t1 = f1[3]*f1[8] - f1[5]*f1[6];
544
t2 = f1[3]*f1[7] - f1[4]*f1[6];
545
546
c[1] = f2[0]*t0 - f2[1]*t1 + f2[2]*t2 -
547
f2[3]*(f1[1]*f1[8] - f1[2]*f1[7]) +
548
f2[4]*(f1[0]*f1[8] - f1[2]*f1[6]) -
549
f2[5]*(f1[0]*f1[7] - f1[1]*f1[6]) +
550
f2[6]*(f1[1]*f1[5] - f1[2]*f1[4]) -
551
f2[7]*(f1[0]*f1[5] - f1[2]*f1[3]) +
552
f2[8]*(f1[0]*f1[4] - f1[1]*f1[3]);
553
554
c[0] = f1[0]*t0 - f1[1]*t1 + f1[2]*t2;
555
556
// solve the cubic equation; there can be 1 to 3 roots ...
557
n = solveCubic( coeffs, roots );
558
559
if( n < 1 || n > 3 )
560
return n;
561
562
for( k = 0; k < n; k++, fmatrix += 9 )
563
{
564
// for each root form the fundamental matrix
565
double lambda = r[k], mu = 1.;
566
double s = f1[8]*r[k] + f2[8];
567
568
// normalize each matrix, so that F(3,3) (~fmatrix[8]) == 1
569
if( fabs(s) > DBL_EPSILON )
570
{
571
mu = 1./s;
572
lambda *= mu;
573
fmatrix[8] = 1.;
574
}
575
else
576
fmatrix[8] = 0.;
577
578
for( i = 0; i < 8; i++ )
579
fmatrix[i] = f1[i]*lambda + f2[i]*mu;
580
}
581
582
return n;
583
}
584
585
/**
586
* Compute the fundamental matrix using the 8-point algorithm.
587
*
588
* \f[
589
* (\mathrm{m2}_i,1)^T \mathrm{fmatrix} (\mathrm{m1}_i,1) = 0
590
* \f]
591
*
592
* @param _m1 Contain points in the reference view. Depth CV_32F with 2-channel
593
* 1 column or 1-channel 2 columns. It has 8 rows.
594
* @param _m2 Contain points in the other view. Depth CV_32F with 2-channel
595
* 1 column or 1-channel 2 columns. It has 8 rows.
596
* @param _fmatrix Output fundamental matrix (or matrices) of type CV_64FC1.
597
* The user is responsible for allocating the memory before calling
598
* this function.
599
* @return 1 on success, 0 on failure.
600
*
601
* Note that the computed fundamental matrix is normalized, i.e.,
602
* the last element \f$F_{33}\f$ is 1.
603
*/
604
static int run8Point( const Mat& _m1, const Mat& _m2, Mat& _fmatrix )
605
{
606
Point2d m1c(0,0), m2c(0,0);
607
double t, scale1 = 0, scale2 = 0;
608
609
const Point2f* m1 = _m1.ptr<Point2f>();
610
const Point2f* m2 = _m2.ptr<Point2f>();
611
CV_Assert( (_m1.cols == 1 || _m1.rows == 1) && _m1.size() == _m2.size());
612
int i, count = _m1.checkVector(2);
613
614
// compute centers and average distances for each of the two point sets
615
for( i = 0; i < count; i++ )
616
{
617
m1c += Point2d(m1[i]);
618
m2c += Point2d(m2[i]);
619
}
620
621
// calculate the normalizing transformations for each of the point sets:
622
// after the transformation each set will have the mass center at the coordinate origin
623
// and the average distance from the origin will be ~sqrt(2).
624
t = 1./count;
625
m1c *= t;
626
m2c *= t;
627
628
for( i = 0; i < count; i++ )
629
{
630
scale1 += norm(Point2d(m1[i].x - m1c.x, m1[i].y - m1c.y));
631
scale2 += norm(Point2d(m2[i].x - m2c.x, m2[i].y - m2c.y));
632
}
633
634
scale1 *= t;
635
scale2 *= t;
636
637
if( scale1 < FLT_EPSILON || scale2 < FLT_EPSILON )
638
return 0;
639
640
scale1 = std::sqrt(2.)/scale1;
641
scale2 = std::sqrt(2.)/scale2;
642
643
Matx<double, 9, 9> A;
644
645
// form a linear system Ax=0: for each selected pair of points m1 & m2,
646
// the row of A(=a) represents the coefficients of equation: (m2, 1)'*F*(m1, 1) = 0
647
// to save computation time, we compute (At*A) instead of A and then solve (At*A)x=0.
648
for( i = 0; i < count; i++ )
649
{
650
double x1 = (m1[i].x - m1c.x)*scale1;
651
double y1 = (m1[i].y - m1c.y)*scale1;
652
double x2 = (m2[i].x - m2c.x)*scale2;
653
double y2 = (m2[i].y - m2c.y)*scale2;
654
Vec<double, 9> r( x2*x1, x2*y1, x2, y2*x1, y2*y1, y2, x1, y1, 1 );
655
A += r*r.t();
656
}
657
658
Vec<double, 9> W;
659
Matx<double, 9, 9> V;
660
661
eigen(A, W, V);
662
663
for( i = 0; i < 9; i++ )
664
{
665
if( fabs(W[i]) < DBL_EPSILON )
666
break;
667
}
668
669
if( i < 8 )
670
return 0;
671
672
Matx33d F0( V.val + 9*8 ); // take the last column of v as a solution of Af = 0
673
674
// make F0 singular (of rank 2) by decomposing it with SVD,
675
// zeroing the last diagonal element of W and then composing the matrices back.
676
677
Vec3d w;
678
Matx33d U;
679
Matx33d Vt;
680
681
SVD::compute( F0, w, U, Vt);
682
w[2] = 0.;
683
684
F0 = U*Matx33d::diag(w)*Vt;
685
686
// apply the transformation that is inverse
687
// to what we used to normalize the point coordinates
688
Matx33d T1( scale1, 0, -scale1*m1c.x, 0, scale1, -scale1*m1c.y, 0, 0, 1 );
689
Matx33d T2( scale2, 0, -scale2*m2c.x, 0, scale2, -scale2*m2c.y, 0, 0, 1 );
690
691
F0 = T2.t()*F0*T1;
692
693
// make F(3,3) = 1
694
if( fabs(F0(2,2)) > FLT_EPSILON )
695
F0 *= 1./F0(2,2);
696
697
Mat(F0).copyTo(_fmatrix);
698
699
return 1;
700
}
701
702
703
class FMEstimatorCallback CV_FINAL : public PointSetRegistrator::Callback
704
{
705
public:
706
bool checkSubset( InputArray _ms1, InputArray _ms2, int count ) const CV_OVERRIDE
707
{
708
Mat ms1 = _ms1.getMat(), ms2 = _ms2.getMat();
709
return !haveCollinearPoints(ms1, count) && !haveCollinearPoints(ms2, count);
710
}
711
712
int runKernel( InputArray _m1, InputArray _m2, OutputArray _model ) const CV_OVERRIDE
713
{
714
double f[9*3];
715
Mat m1 = _m1.getMat(), m2 = _m2.getMat();
716
int count = m1.checkVector(2);
717
Mat F(count == 7 ? 9 : 3, 3, CV_64F, f);
718
int n = count == 7 ? run7Point(m1, m2, F) : run8Point(m1, m2, F);
719
720
if( n == 0 )
721
_model.release();
722
else
723
F.rowRange(0, n*3).copyTo(_model);
724
725
return n;
726
}
727
728
void computeError( InputArray _m1, InputArray _m2, InputArray _model, OutputArray _err ) const CV_OVERRIDE
729
{
730
Mat __m1 = _m1.getMat(), __m2 = _m2.getMat(), __model = _model.getMat();
731
int i, count = __m1.checkVector(2);
732
const Point2f* m1 = __m1.ptr<Point2f>();
733
const Point2f* m2 = __m2.ptr<Point2f>();
734
const double* F = __model.ptr<double>();
735
_err.create(count, 1, CV_32F);
736
float* err = _err.getMat().ptr<float>();
737
738
for( i = 0; i < count; i++ )
739
{
740
double a, b, c, d1, d2, s1, s2;
741
742
a = F[0]*m1[i].x + F[1]*m1[i].y + F[2];
743
b = F[3]*m1[i].x + F[4]*m1[i].y + F[5];
744
c = F[6]*m1[i].x + F[7]*m1[i].y + F[8];
745
746
s2 = 1./(a*a + b*b);
747
d2 = m2[i].x*a + m2[i].y*b + c;
748
749
a = F[0]*m2[i].x + F[3]*m2[i].y + F[6];
750
b = F[1]*m2[i].x + F[4]*m2[i].y + F[7];
751
c = F[2]*m2[i].x + F[5]*m2[i].y + F[8];
752
753
s1 = 1./(a*a + b*b);
754
d1 = m1[i].x*a + m1[i].y*b + c;
755
756
err[i] = (float)std::max(d1*d1*s1, d2*d2*s2);
757
}
758
}
759
};
760
761
}
762
763
cv::Mat cv::findFundamentalMat( InputArray _points1, InputArray _points2,
764
int method, double ransacReprojThreshold, double confidence,
765
OutputArray _mask )
766
{
767
CV_INSTRUMENT_REGION();
768
769
Mat points1 = _points1.getMat(), points2 = _points2.getMat();
770
Mat m1, m2, F;
771
int npoints = -1;
772
773
for( int i = 1; i <= 2; i++ )
774
{
775
Mat& p = i == 1 ? points1 : points2;
776
Mat& m = i == 1 ? m1 : m2;
777
npoints = p.checkVector(2, -1, false);
778
if( npoints < 0 )
779
{
780
npoints = p.checkVector(3, -1, false);
781
if( npoints < 0 )
782
CV_Error(Error::StsBadArg, "The input arrays should be 2D or 3D point sets");
783
if( npoints == 0 )
784
return Mat();
785
convertPointsFromHomogeneous(p, p);
786
}
787
p.reshape(2, npoints).convertTo(m, CV_32F);
788
}
789
790
CV_Assert( m1.checkVector(2) == m2.checkVector(2) );
791
792
if( npoints < 7 )
793
return Mat();
794
795
Ptr<PointSetRegistrator::Callback> cb = makePtr<FMEstimatorCallback>();
796
int result;
797
798
if( npoints == 7 || method == FM_8POINT )
799
{
800
result = cb->runKernel(m1, m2, F);
801
if( _mask.needed() )
802
{
803
_mask.create(npoints, 1, CV_8U, -1, true);
804
Mat mask = _mask.getMat();
805
CV_Assert( (mask.cols == 1 || mask.rows == 1) && (int)mask.total() == npoints );
806
mask.setTo(Scalar::all(1));
807
}
808
}
809
else
810
{
811
if( ransacReprojThreshold <= 0 )
812
ransacReprojThreshold = 3;
813
if( confidence < DBL_EPSILON || confidence > 1 - DBL_EPSILON )
814
confidence = 0.99;
815
816
if( (method & ~3) == FM_RANSAC && npoints >= 15 )
817
result = createRANSACPointSetRegistrator(cb, 7, ransacReprojThreshold, confidence)->run(m1, m2, F, _mask);
818
else
819
result = createLMeDSPointSetRegistrator(cb, 7, confidence)->run(m1, m2, F, _mask);
820
}
821
822
if( result <= 0 )
823
return Mat();
824
825
return F;
826
}
827
828
cv::Mat cv::findFundamentalMat( InputArray _points1, InputArray _points2,
829
OutputArray _mask, int method,
830
double ransacReprojThreshold , double confidence)
831
{
832
return cv::findFundamentalMat(_points1, _points2, method, ransacReprojThreshold, confidence, _mask);
833
}
834
835
836
void cv::computeCorrespondEpilines( InputArray _points, int whichImage,
837
InputArray _Fmat, OutputArray _lines )
838
{
839
CV_INSTRUMENT_REGION();
840
841
double f[9] = {0};
842
Mat tempF(3, 3, CV_64F, f);
843
Mat points = _points.getMat(), F = _Fmat.getMat();
844
845
if( !points.isContinuous() )
846
points = points.clone();
847
848
int npoints = points.checkVector(2);
849
if( npoints < 0 )
850
{
851
npoints = points.checkVector(3);
852
if( npoints < 0 )
853
CV_Error( Error::StsBadArg, "The input should be a 2D or 3D point set");
854
Mat temp;
855
convertPointsFromHomogeneous(points, temp);
856
points = temp;
857
}
858
int depth = points.depth();
859
CV_Assert( depth == CV_32F || depth == CV_32S || depth == CV_64F );
860
861
CV_Assert(F.size() == Size(3,3));
862
F.convertTo(tempF, CV_64F);
863
if( whichImage == 2 )
864
transpose(tempF, tempF);
865
866
int ltype = CV_MAKETYPE(MAX(depth, CV_32F), 3);
867
_lines.create(npoints, 1, ltype);
868
Mat lines = _lines.getMat();
869
if( !lines.isContinuous() )
870
{
871
_lines.release();
872
_lines.create(npoints, 1, ltype);
873
lines = _lines.getMat();
874
}
875
CV_Assert( lines.isContinuous());
876
877
if( depth == CV_32S || depth == CV_32F )
878
{
879
const Point* ptsi = points.ptr<Point>();
880
const Point2f* ptsf = points.ptr<Point2f>();
881
Point3f* dstf = lines.ptr<Point3f>();
882
for( int i = 0; i < npoints; i++ )
883
{
884
Point2f pt = depth == CV_32F ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
885
double a = f[0]*pt.x + f[1]*pt.y + f[2];
886
double b = f[3]*pt.x + f[4]*pt.y + f[5];
887
double c = f[6]*pt.x + f[7]*pt.y + f[8];
888
double nu = a*a + b*b;
889
nu = nu ? 1./std::sqrt(nu) : 1.;
890
a *= nu; b *= nu; c *= nu;
891
dstf[i] = Point3f((float)a, (float)b, (float)c);
892
}
893
}
894
else
895
{
896
const Point2d* ptsd = points.ptr<Point2d>();
897
Point3d* dstd = lines.ptr<Point3d>();
898
for( int i = 0; i < npoints; i++ )
899
{
900
Point2d pt = ptsd[i];
901
double a = f[0]*pt.x + f[1]*pt.y + f[2];
902
double b = f[3]*pt.x + f[4]*pt.y + f[5];
903
double c = f[6]*pt.x + f[7]*pt.y + f[8];
904
double nu = a*a + b*b;
905
nu = nu ? 1./std::sqrt(nu) : 1.;
906
a *= nu; b *= nu; c *= nu;
907
dstd[i] = Point3d(a, b, c);
908
}
909
}
910
}
911
912
void cv::convertPointsFromHomogeneous( InputArray _src, OutputArray _dst )
913
{
914
CV_INSTRUMENT_REGION();
915
916
Mat src = _src.getMat();
917
if( !src.isContinuous() )
918
src = src.clone();
919
int i, npoints = src.checkVector(3), depth = src.depth(), cn = 3;
920
if( npoints < 0 )
921
{
922
npoints = src.checkVector(4);
923
CV_Assert(npoints >= 0);
924
cn = 4;
925
}
926
CV_Assert( npoints >= 0 && (depth == CV_32S || depth == CV_32F || depth == CV_64F));
927
928
int dtype = CV_MAKETYPE(depth <= CV_32F ? CV_32F : CV_64F, cn-1);
929
_dst.create(npoints, 1, dtype);
930
Mat dst = _dst.getMat();
931
if( !dst.isContinuous() )
932
{
933
_dst.release();
934
_dst.create(npoints, 1, dtype);
935
dst = _dst.getMat();
936
}
937
CV_Assert( dst.isContinuous() );
938
939
if( depth == CV_32S )
940
{
941
if( cn == 3 )
942
{
943
const Point3i* sptr = src.ptr<Point3i>();
944
Point2f* dptr = dst.ptr<Point2f>();
945
for( i = 0; i < npoints; i++ )
946
{
947
float scale = sptr[i].z != 0 ? 1.f/sptr[i].z : 1.f;
948
dptr[i] = Point2f(sptr[i].x*scale, sptr[i].y*scale);
949
}
950
}
951
else
952
{
953
const Vec4i* sptr = src.ptr<Vec4i>();
954
Point3f* dptr = dst.ptr<Point3f>();
955
for( i = 0; i < npoints; i++ )
956
{
957
float scale = sptr[i][3] != 0 ? 1.f/sptr[i][3] : 1.f;
958
dptr[i] = Point3f(sptr[i][0]*scale, sptr[i][1]*scale, sptr[i][2]*scale);
959
}
960
}
961
}
962
else if( depth == CV_32F )
963
{
964
if( cn == 3 )
965
{
966
const Point3f* sptr = src.ptr<Point3f>();
967
Point2f* dptr = dst.ptr<Point2f>();
968
for( i = 0; i < npoints; i++ )
969
{
970
float scale = sptr[i].z != 0.f ? 1.f/sptr[i].z : 1.f;
971
dptr[i] = Point2f(sptr[i].x*scale, sptr[i].y*scale);
972
}
973
}
974
else
975
{
976
const Vec4f* sptr = src.ptr<Vec4f>();
977
Point3f* dptr = dst.ptr<Point3f>();
978
for( i = 0; i < npoints; i++ )
979
{
980
float scale = sptr[i][3] != 0.f ? 1.f/sptr[i][3] : 1.f;
981
dptr[i] = Point3f(sptr[i][0]*scale, sptr[i][1]*scale, sptr[i][2]*scale);
982
}
983
}
984
}
985
else if( depth == CV_64F )
986
{
987
if( cn == 3 )
988
{
989
const Point3d* sptr = src.ptr<Point3d>();
990
Point2d* dptr = dst.ptr<Point2d>();
991
for( i = 0; i < npoints; i++ )
992
{
993
double scale = sptr[i].z != 0. ? 1./sptr[i].z : 1.;
994
dptr[i] = Point2d(sptr[i].x*scale, sptr[i].y*scale);
995
}
996
}
997
else
998
{
999
const Vec4d* sptr = src.ptr<Vec4d>();
1000
Point3d* dptr = dst.ptr<Point3d>();
1001
for( i = 0; i < npoints; i++ )
1002
{
1003
double scale = sptr[i][3] != 0.f ? 1./sptr[i][3] : 1.;
1004
dptr[i] = Point3d(sptr[i][0]*scale, sptr[i][1]*scale, sptr[i][2]*scale);
1005
}
1006
}
1007
}
1008
else
1009
CV_Error(Error::StsUnsupportedFormat, "");
1010
}
1011
1012
1013
void cv::convertPointsToHomogeneous( InputArray _src, OutputArray _dst )
1014
{
1015
CV_INSTRUMENT_REGION();
1016
1017
Mat src = _src.getMat();
1018
if( !src.isContinuous() )
1019
src = src.clone();
1020
int i, npoints = src.checkVector(2), depth = src.depth(), cn = 2;
1021
if( npoints < 0 )
1022
{
1023
npoints = src.checkVector(3);
1024
CV_Assert(npoints >= 0);
1025
cn = 3;
1026
}
1027
CV_Assert( npoints >= 0 && (depth == CV_32S || depth == CV_32F || depth == CV_64F));
1028
1029
int dtype = CV_MAKETYPE(depth, cn+1);
1030
_dst.create(npoints, 1, dtype);
1031
Mat dst = _dst.getMat();
1032
if( !dst.isContinuous() )
1033
{
1034
_dst.release();
1035
_dst.create(npoints, 1, dtype);
1036
dst = _dst.getMat();
1037
}
1038
CV_Assert( dst.isContinuous() );
1039
1040
if( depth == CV_32S )
1041
{
1042
if( cn == 2 )
1043
{
1044
const Point2i* sptr = src.ptr<Point2i>();
1045
Point3i* dptr = dst.ptr<Point3i>();
1046
for( i = 0; i < npoints; i++ )
1047
dptr[i] = Point3i(sptr[i].x, sptr[i].y, 1);
1048
}
1049
else
1050
{
1051
const Point3i* sptr = src.ptr<Point3i>();
1052
Vec4i* dptr = dst.ptr<Vec4i>();
1053
for( i = 0; i < npoints; i++ )
1054
dptr[i] = Vec4i(sptr[i].x, sptr[i].y, sptr[i].z, 1);
1055
}
1056
}
1057
else if( depth == CV_32F )
1058
{
1059
if( cn == 2 )
1060
{
1061
const Point2f* sptr = src.ptr<Point2f>();
1062
Point3f* dptr = dst.ptr<Point3f>();
1063
for( i = 0; i < npoints; i++ )
1064
dptr[i] = Point3f(sptr[i].x, sptr[i].y, 1.f);
1065
}
1066
else
1067
{
1068
const Point3f* sptr = src.ptr<Point3f>();
1069
Vec4f* dptr = dst.ptr<Vec4f>();
1070
for( i = 0; i < npoints; i++ )
1071
dptr[i] = Vec4f(sptr[i].x, sptr[i].y, sptr[i].z, 1.f);
1072
}
1073
}
1074
else if( depth == CV_64F )
1075
{
1076
if( cn == 2 )
1077
{
1078
const Point2d* sptr = src.ptr<Point2d>();
1079
Point3d* dptr = dst.ptr<Point3d>();
1080
for( i = 0; i < npoints; i++ )
1081
dptr[i] = Point3d(sptr[i].x, sptr[i].y, 1.);
1082
}
1083
else
1084
{
1085
const Point3d* sptr = src.ptr<Point3d>();
1086
Vec4d* dptr = dst.ptr<Vec4d>();
1087
for( i = 0; i < npoints; i++ )
1088
dptr[i] = Vec4d(sptr[i].x, sptr[i].y, sptr[i].z, 1.);
1089
}
1090
}
1091
else
1092
CV_Error(Error::StsUnsupportedFormat, "");
1093
}
1094
1095
1096
void cv::convertPointsHomogeneous( InputArray _src, OutputArray _dst )
1097
{
1098
CV_INSTRUMENT_REGION();
1099
1100
int stype = _src.type(), dtype = _dst.type();
1101
CV_Assert( _dst.fixedType() );
1102
1103
if( CV_MAT_CN(stype) > CV_MAT_CN(dtype) )
1104
convertPointsFromHomogeneous(_src, _dst);
1105
else
1106
convertPointsToHomogeneous(_src, _dst);
1107
}
1108
1109
double cv::sampsonDistance(InputArray _pt1, InputArray _pt2, InputArray _F)
1110
{
1111
CV_INSTRUMENT_REGION();
1112
1113
CV_Assert(_pt1.type() == CV_64F && _pt2.type() == CV_64F && _F.type() == CV_64F);
1114
CV_DbgAssert(_pt1.rows() == 3 && _F.size() == Size(3, 3) && _pt1.rows() == _pt2.rows());
1115
1116
Mat pt1(_pt1.getMat());
1117
Mat pt2(_pt2.getMat());
1118
Mat F(_F.getMat());
1119
1120
Vec3d F_pt1 = *F.ptr<Matx33d>() * *pt1.ptr<Vec3d>();
1121
Vec3d Ft_pt2 = F.ptr<Matx33d>()->t() * *pt2.ptr<Vec3d>();
1122
1123
double v = pt2.ptr<Vec3d>()->dot(F_pt1);
1124
1125
// square
1126
Ft_pt2 = Ft_pt2.mul(Ft_pt2);
1127
F_pt1 = F_pt1.mul(F_pt1);
1128
1129
return v*v / (F_pt1[0] + F_pt1[1] + Ft_pt2[0] + Ft_pt2[1]);
1130
}
1131
1132
/* End of file. */
1133
1134