Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Tetragramm
GitHub Repository: Tetragramm/opencv
Path: blob/master/modules/calib3d/src/calibration.cpp
16354 views
1
/*M///////////////////////////////////////////////////////////////////////////////////////
2
//
3
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4
//
5
// By downloading, copying, installing or using the software you agree to this license.
6
// If you do not agree to this license, do not download, install,
7
// copy or use the software.
8
//
9
//
10
// License Agreement
11
// For Open Source Computer Vision Library
12
//
13
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15
// Third party copyrights are property of their respective owners.
16
//
17
// Redistribution and use in source and binary forms, with or without modification,
18
// are permitted provided that the following conditions are met:
19
//
20
// * Redistribution's of source code must retain the above copyright notice,
21
// this list of conditions and the following disclaimer.
22
//
23
// * Redistribution's in binary form must reproduce the above copyright notice,
24
// this list of conditions and the following disclaimer in the documentation
25
// and/or other materials provided with the distribution.
26
//
27
// * The name of the copyright holders may not be used to endorse or promote products
28
// derived from this software without specific prior written permission.
29
//
30
// This software is provided by the copyright holders and contributors "as is" and
31
// any express or implied warranties, including, but not limited to, the implied
32
// warranties of merchantability and fitness for a particular purpose are disclaimed.
33
// In no event shall the Intel Corporation or contributors be liable for any direct,
34
// indirect, incidental, special, exemplary, or consequential damages
35
// (including, but not limited to, procurement of substitute goods or services;
36
// loss of use, data, or profits; or business interruption) however caused
37
// and on any theory of liability, whether in contract, strict liability,
38
// or tort (including negligence or otherwise) arising in any way out of
39
// the use of this software, even if advised of the possibility of such damage.
40
//
41
//M*/
42
43
#include "precomp.hpp"
44
#include "opencv2/imgproc/imgproc_c.h"
45
#include "distortion_model.hpp"
46
#include "opencv2/calib3d/calib3d_c.h"
47
#include <stdio.h>
48
#include <iterator>
49
50
/*
51
This is stright-forward port v3 of Matlab calibration engine by Jean-Yves Bouguet
52
that is (in a large extent) based on the paper:
53
Z. Zhang. "A flexible new technique for camera calibration".
54
IEEE Transactions on Pattern Analysis and Machine Intelligence, 22(11):1330-1334, 2000.
55
The 1st initial port was done by Valery Mosyagin.
56
*/
57
58
using namespace cv;
59
60
// reimplementation of dAB.m
61
CV_IMPL void cvCalcMatMulDeriv( const CvMat* A, const CvMat* B, CvMat* dABdA, CvMat* dABdB )
62
{
63
int i, j, M, N, L;
64
int bstep;
65
66
CV_Assert( CV_IS_MAT(A) && CV_IS_MAT(B) );
67
CV_Assert( CV_ARE_TYPES_EQ(A, B) &&
68
(CV_MAT_TYPE(A->type) == CV_32F || CV_MAT_TYPE(A->type) == CV_64F) );
69
CV_Assert( A->cols == B->rows );
70
71
M = A->rows;
72
L = A->cols;
73
N = B->cols;
74
bstep = B->step/CV_ELEM_SIZE(B->type);
75
76
if( dABdA )
77
{
78
CV_Assert( CV_ARE_TYPES_EQ(A, dABdA) &&
79
dABdA->rows == A->rows*B->cols && dABdA->cols == A->rows*A->cols );
80
}
81
82
if( dABdB )
83
{
84
CV_Assert( CV_ARE_TYPES_EQ(A, dABdB) &&
85
dABdB->rows == A->rows*B->cols && dABdB->cols == B->rows*B->cols );
86
}
87
88
if( CV_MAT_TYPE(A->type) == CV_32F )
89
{
90
for( i = 0; i < M*N; i++ )
91
{
92
int i1 = i / N, i2 = i % N;
93
94
if( dABdA )
95
{
96
float* dcda = (float*)(dABdA->data.ptr + dABdA->step*i);
97
const float* b = (const float*)B->data.ptr + i2;
98
99
for( j = 0; j < M*L; j++ )
100
dcda[j] = 0;
101
for( j = 0; j < L; j++ )
102
dcda[i1*L + j] = b[j*bstep];
103
}
104
105
if( dABdB )
106
{
107
float* dcdb = (float*)(dABdB->data.ptr + dABdB->step*i);
108
const float* a = (const float*)(A->data.ptr + A->step*i1);
109
110
for( j = 0; j < L*N; j++ )
111
dcdb[j] = 0;
112
for( j = 0; j < L; j++ )
113
dcdb[j*N + i2] = a[j];
114
}
115
}
116
}
117
else
118
{
119
for( i = 0; i < M*N; i++ )
120
{
121
int i1 = i / N, i2 = i % N;
122
123
if( dABdA )
124
{
125
double* dcda = (double*)(dABdA->data.ptr + dABdA->step*i);
126
const double* b = (const double*)B->data.ptr + i2;
127
128
for( j = 0; j < M*L; j++ )
129
dcda[j] = 0;
130
for( j = 0; j < L; j++ )
131
dcda[i1*L + j] = b[j*bstep];
132
}
133
134
if( dABdB )
135
{
136
double* dcdb = (double*)(dABdB->data.ptr + dABdB->step*i);
137
const double* a = (const double*)(A->data.ptr + A->step*i1);
138
139
for( j = 0; j < L*N; j++ )
140
dcdb[j] = 0;
141
for( j = 0; j < L; j++ )
142
dcdb[j*N + i2] = a[j];
143
}
144
}
145
}
146
}
147
148
// reimplementation of compose_motion.m
149
CV_IMPL void cvComposeRT( const CvMat* _rvec1, const CvMat* _tvec1,
150
const CvMat* _rvec2, const CvMat* _tvec2,
151
CvMat* _rvec3, CvMat* _tvec3,
152
CvMat* dr3dr1, CvMat* dr3dt1,
153
CvMat* dr3dr2, CvMat* dr3dt2,
154
CvMat* dt3dr1, CvMat* dt3dt1,
155
CvMat* dt3dr2, CvMat* dt3dt2 )
156
{
157
double _r1[3], _r2[3];
158
double _R1[9], _d1[9*3], _R2[9], _d2[9*3];
159
CvMat r1 = cvMat(3,1,CV_64F,_r1), r2 = cvMat(3,1,CV_64F,_r2);
160
CvMat R1 = cvMat(3,3,CV_64F,_R1), R2 = cvMat(3,3,CV_64F,_R2);
161
CvMat dR1dr1 = cvMat(9,3,CV_64F,_d1), dR2dr2 = cvMat(9,3,CV_64F,_d2);
162
163
CV_Assert( CV_IS_MAT(_rvec1) && CV_IS_MAT(_rvec2) );
164
165
CV_Assert( CV_MAT_TYPE(_rvec1->type) == CV_32F ||
166
CV_MAT_TYPE(_rvec1->type) == CV_64F );
167
168
CV_Assert( _rvec1->rows == 3 && _rvec1->cols == 1 && CV_ARE_SIZES_EQ(_rvec1, _rvec2) );
169
170
cvConvert( _rvec1, &r1 );
171
cvConvert( _rvec2, &r2 );
172
173
cvRodrigues2( &r1, &R1, &dR1dr1 );
174
cvRodrigues2( &r2, &R2, &dR2dr2 );
175
176
if( _rvec3 || dr3dr1 || dr3dr2 )
177
{
178
double _r3[3], _R3[9], _dR3dR1[9*9], _dR3dR2[9*9], _dr3dR3[9*3];
179
double _W1[9*3], _W2[3*3];
180
CvMat r3 = cvMat(3,1,CV_64F,_r3), R3 = cvMat(3,3,CV_64F,_R3);
181
CvMat dR3dR1 = cvMat(9,9,CV_64F,_dR3dR1), dR3dR2 = cvMat(9,9,CV_64F,_dR3dR2);
182
CvMat dr3dR3 = cvMat(3,9,CV_64F,_dr3dR3);
183
CvMat W1 = cvMat(3,9,CV_64F,_W1), W2 = cvMat(3,3,CV_64F,_W2);
184
185
cvMatMul( &R2, &R1, &R3 );
186
cvCalcMatMulDeriv( &R2, &R1, &dR3dR2, &dR3dR1 );
187
188
cvRodrigues2( &R3, &r3, &dr3dR3 );
189
190
if( _rvec3 )
191
cvConvert( &r3, _rvec3 );
192
193
if( dr3dr1 )
194
{
195
cvMatMul( &dr3dR3, &dR3dR1, &W1 );
196
cvMatMul( &W1, &dR1dr1, &W2 );
197
cvConvert( &W2, dr3dr1 );
198
}
199
200
if( dr3dr2 )
201
{
202
cvMatMul( &dr3dR3, &dR3dR2, &W1 );
203
cvMatMul( &W1, &dR2dr2, &W2 );
204
cvConvert( &W2, dr3dr2 );
205
}
206
}
207
208
if( dr3dt1 )
209
cvZero( dr3dt1 );
210
if( dr3dt2 )
211
cvZero( dr3dt2 );
212
213
if( _tvec3 || dt3dr2 || dt3dt1 )
214
{
215
double _t1[3], _t2[3], _t3[3], _dxdR2[3*9], _dxdt1[3*3], _W3[3*3];
216
CvMat t1 = cvMat(3,1,CV_64F,_t1), t2 = cvMat(3,1,CV_64F,_t2);
217
CvMat t3 = cvMat(3,1,CV_64F,_t3);
218
CvMat dxdR2 = cvMat(3, 9, CV_64F, _dxdR2);
219
CvMat dxdt1 = cvMat(3, 3, CV_64F, _dxdt1);
220
CvMat W3 = cvMat(3, 3, CV_64F, _W3);
221
222
CV_Assert( CV_IS_MAT(_tvec1) && CV_IS_MAT(_tvec2) );
223
CV_Assert( CV_ARE_SIZES_EQ(_tvec1, _tvec2) && CV_ARE_SIZES_EQ(_tvec1, _rvec1) );
224
225
cvConvert( _tvec1, &t1 );
226
cvConvert( _tvec2, &t2 );
227
cvMatMulAdd( &R2, &t1, &t2, &t3 );
228
229
if( _tvec3 )
230
cvConvert( &t3, _tvec3 );
231
232
if( dt3dr2 || dt3dt1 )
233
{
234
cvCalcMatMulDeriv( &R2, &t1, &dxdR2, &dxdt1 );
235
if( dt3dr2 )
236
{
237
cvMatMul( &dxdR2, &dR2dr2, &W3 );
238
cvConvert( &W3, dt3dr2 );
239
}
240
if( dt3dt1 )
241
cvConvert( &dxdt1, dt3dt1 );
242
}
243
}
244
245
if( dt3dt2 )
246
cvSetIdentity( dt3dt2 );
247
if( dt3dr1 )
248
cvZero( dt3dr1 );
249
}
250
251
CV_IMPL int cvRodrigues2( const CvMat* src, CvMat* dst, CvMat* jacobian )
252
{
253
int depth, elem_size;
254
int i, k;
255
double J[27] = {0};
256
CvMat matJ = cvMat( 3, 9, CV_64F, J );
257
258
if( !CV_IS_MAT(src) )
259
CV_Error( !src ? CV_StsNullPtr : CV_StsBadArg, "Input argument is not a valid matrix" );
260
261
if( !CV_IS_MAT(dst) )
262
CV_Error( !dst ? CV_StsNullPtr : CV_StsBadArg,
263
"The first output argument is not a valid matrix" );
264
265
depth = CV_MAT_DEPTH(src->type);
266
elem_size = CV_ELEM_SIZE(depth);
267
268
if( depth != CV_32F && depth != CV_64F )
269
CV_Error( CV_StsUnsupportedFormat, "The matrices must have 32f or 64f data type" );
270
271
if( !CV_ARE_DEPTHS_EQ(src, dst) )
272
CV_Error( CV_StsUnmatchedFormats, "All the matrices must have the same data type" );
273
274
if( jacobian )
275
{
276
if( !CV_IS_MAT(jacobian) )
277
CV_Error( CV_StsBadArg, "Jacobian is not a valid matrix" );
278
279
if( !CV_ARE_DEPTHS_EQ(src, jacobian) || CV_MAT_CN(jacobian->type) != 1 )
280
CV_Error( CV_StsUnmatchedFormats, "Jacobian must have 32fC1 or 64fC1 datatype" );
281
282
if( (jacobian->rows != 9 || jacobian->cols != 3) &&
283
(jacobian->rows != 3 || jacobian->cols != 9))
284
CV_Error( CV_StsBadSize, "Jacobian must be 3x9 or 9x3" );
285
}
286
287
if( src->cols == 1 || src->rows == 1 )
288
{
289
int step = src->rows > 1 ? src->step / elem_size : 1;
290
291
if( src->rows + src->cols*CV_MAT_CN(src->type) - 1 != 3 )
292
CV_Error( CV_StsBadSize, "Input matrix must be 1x3, 3x1 or 3x3" );
293
294
if( dst->rows != 3 || dst->cols != 3 || CV_MAT_CN(dst->type) != 1 )
295
CV_Error( CV_StsBadSize, "Output matrix must be 3x3, single-channel floating point matrix" );
296
297
Point3d r;
298
if( depth == CV_32F )
299
{
300
r.x = src->data.fl[0];
301
r.y = src->data.fl[step];
302
r.z = src->data.fl[step*2];
303
}
304
else
305
{
306
r.x = src->data.db[0];
307
r.y = src->data.db[step];
308
r.z = src->data.db[step*2];
309
}
310
311
double theta = norm(r);
312
313
if( theta < DBL_EPSILON )
314
{
315
cvSetIdentity( dst );
316
317
if( jacobian )
318
{
319
memset( J, 0, sizeof(J) );
320
J[5] = J[15] = J[19] = -1;
321
J[7] = J[11] = J[21] = 1;
322
}
323
}
324
else
325
{
326
double c = cos(theta);
327
double s = sin(theta);
328
double c1 = 1. - c;
329
double itheta = theta ? 1./theta : 0.;
330
331
r *= itheta;
332
333
Matx33d rrt( r.x*r.x, r.x*r.y, r.x*r.z, r.x*r.y, r.y*r.y, r.y*r.z, r.x*r.z, r.y*r.z, r.z*r.z );
334
Matx33d r_x( 0, -r.z, r.y,
335
r.z, 0, -r.x,
336
-r.y, r.x, 0 );
337
338
// R = cos(theta)*I + (1 - cos(theta))*r*rT + sin(theta)*[r_x]
339
Matx33d R = c*Matx33d::eye() + c1*rrt + s*r_x;
340
341
Mat(R).convertTo(cvarrToMat(dst), dst->type);
342
343
if( jacobian )
344
{
345
const double I[] = { 1, 0, 0, 0, 1, 0, 0, 0, 1 };
346
double drrt[] = { r.x+r.x, r.y, r.z, r.y, 0, 0, r.z, 0, 0,
347
0, r.x, 0, r.x, r.y+r.y, r.z, 0, r.z, 0,
348
0, 0, r.x, 0, 0, r.y, r.x, r.y, r.z+r.z };
349
double d_r_x_[] = { 0, 0, 0, 0, 0, -1, 0, 1, 0,
350
0, 0, 1, 0, 0, 0, -1, 0, 0,
351
0, -1, 0, 1, 0, 0, 0, 0, 0 };
352
for( i = 0; i < 3; i++ )
353
{
354
double ri = i == 0 ? r.x : i == 1 ? r.y : r.z;
355
double a0 = -s*ri, a1 = (s - 2*c1*itheta)*ri, a2 = c1*itheta;
356
double a3 = (c - s*itheta)*ri, a4 = s*itheta;
357
for( k = 0; k < 9; k++ )
358
J[i*9+k] = a0*I[k] + a1*rrt.val[k] + a2*drrt[i*9+k] +
359
a3*r_x.val[k] + a4*d_r_x_[i*9+k];
360
}
361
}
362
}
363
}
364
else if( src->cols == 3 && src->rows == 3 )
365
{
366
Matx33d U, Vt;
367
Vec3d W;
368
double theta, s, c;
369
int step = dst->rows > 1 ? dst->step / elem_size : 1;
370
371
if( (dst->rows != 1 || dst->cols*CV_MAT_CN(dst->type) != 3) &&
372
(dst->rows != 3 || dst->cols != 1 || CV_MAT_CN(dst->type) != 1))
373
CV_Error( CV_StsBadSize, "Output matrix must be 1x3 or 3x1" );
374
375
Matx33d R = cvarrToMat(src);
376
377
if( !checkRange(R, true, NULL, -100, 100) )
378
{
379
cvZero(dst);
380
if( jacobian )
381
cvZero(jacobian);
382
return 0;
383
}
384
385
SVD::compute(R, W, U, Vt);
386
R = U*Vt;
387
388
Point3d r(R(2, 1) - R(1, 2), R(0, 2) - R(2, 0), R(1, 0) - R(0, 1));
389
390
s = std::sqrt((r.x*r.x + r.y*r.y + r.z*r.z)*0.25);
391
c = (R(0, 0) + R(1, 1) + R(2, 2) - 1)*0.5;
392
c = c > 1. ? 1. : c < -1. ? -1. : c;
393
theta = acos(c);
394
395
if( s < 1e-5 )
396
{
397
double t;
398
399
if( c > 0 )
400
r = Point3d(0, 0, 0);
401
else
402
{
403
t = (R(0, 0) + 1)*0.5;
404
r.x = std::sqrt(MAX(t,0.));
405
t = (R(1, 1) + 1)*0.5;
406
r.y = std::sqrt(MAX(t,0.))*(R(0, 1) < 0 ? -1. : 1.);
407
t = (R(2, 2) + 1)*0.5;
408
r.z = std::sqrt(MAX(t,0.))*(R(0, 2) < 0 ? -1. : 1.);
409
if( fabs(r.x) < fabs(r.y) && fabs(r.x) < fabs(r.z) && (R(1, 2) > 0) != (r.y*r.z > 0) )
410
r.z = -r.z;
411
theta /= norm(r);
412
r *= theta;
413
}
414
415
if( jacobian )
416
{
417
memset( J, 0, sizeof(J) );
418
if( c > 0 )
419
{
420
J[5] = J[15] = J[19] = -0.5;
421
J[7] = J[11] = J[21] = 0.5;
422
}
423
}
424
}
425
else
426
{
427
double vth = 1/(2*s);
428
429
if( jacobian )
430
{
431
double t, dtheta_dtr = -1./s;
432
// var1 = [vth;theta]
433
// var = [om1;var1] = [om1;vth;theta]
434
double dvth_dtheta = -vth*c/s;
435
double d1 = 0.5*dvth_dtheta*dtheta_dtr;
436
double d2 = 0.5*dtheta_dtr;
437
// dvar1/dR = dvar1/dtheta*dtheta/dR = [dvth/dtheta; 1] * dtheta/dtr * dtr/dR
438
double dvardR[5*9] =
439
{
440
0, 0, 0, 0, 0, 1, 0, -1, 0,
441
0, 0, -1, 0, 0, 0, 1, 0, 0,
442
0, 1, 0, -1, 0, 0, 0, 0, 0,
443
d1, 0, 0, 0, d1, 0, 0, 0, d1,
444
d2, 0, 0, 0, d2, 0, 0, 0, d2
445
};
446
// var2 = [om;theta]
447
double dvar2dvar[] =
448
{
449
vth, 0, 0, r.x, 0,
450
0, vth, 0, r.y, 0,
451
0, 0, vth, r.z, 0,
452
0, 0, 0, 0, 1
453
};
454
double domegadvar2[] =
455
{
456
theta, 0, 0, r.x*vth,
457
0, theta, 0, r.y*vth,
458
0, 0, theta, r.z*vth
459
};
460
461
CvMat _dvardR = cvMat( 5, 9, CV_64FC1, dvardR );
462
CvMat _dvar2dvar = cvMat( 4, 5, CV_64FC1, dvar2dvar );
463
CvMat _domegadvar2 = cvMat( 3, 4, CV_64FC1, domegadvar2 );
464
double t0[3*5];
465
CvMat _t0 = cvMat( 3, 5, CV_64FC1, t0 );
466
467
cvMatMul( &_domegadvar2, &_dvar2dvar, &_t0 );
468
cvMatMul( &_t0, &_dvardR, &matJ );
469
470
// transpose every row of matJ (treat the rows as 3x3 matrices)
471
CV_SWAP(J[1], J[3], t); CV_SWAP(J[2], J[6], t); CV_SWAP(J[5], J[7], t);
472
CV_SWAP(J[10], J[12], t); CV_SWAP(J[11], J[15], t); CV_SWAP(J[14], J[16], t);
473
CV_SWAP(J[19], J[21], t); CV_SWAP(J[20], J[24], t); CV_SWAP(J[23], J[25], t);
474
}
475
476
vth *= theta;
477
r *= vth;
478
}
479
480
if( depth == CV_32F )
481
{
482
dst->data.fl[0] = (float)r.x;
483
dst->data.fl[step] = (float)r.y;
484
dst->data.fl[step*2] = (float)r.z;
485
}
486
else
487
{
488
dst->data.db[0] = r.x;
489
dst->data.db[step] = r.y;
490
dst->data.db[step*2] = r.z;
491
}
492
}
493
494
if( jacobian )
495
{
496
if( depth == CV_32F )
497
{
498
if( jacobian->rows == matJ.rows )
499
cvConvert( &matJ, jacobian );
500
else
501
{
502
float Jf[3*9];
503
CvMat _Jf = cvMat( matJ.rows, matJ.cols, CV_32FC1, Jf );
504
cvConvert( &matJ, &_Jf );
505
cvTranspose( &_Jf, jacobian );
506
}
507
}
508
else if( jacobian->rows == matJ.rows )
509
cvCopy( &matJ, jacobian );
510
else
511
cvTranspose( &matJ, jacobian );
512
}
513
514
return 1;
515
}
516
517
518
static const char* cvDistCoeffErr = "Distortion coefficients must be 1x4, 4x1, 1x5, 5x1, 1x8, 8x1, 1x12, 12x1, 1x14 or 14x1 floating-point vector";
519
520
static void cvProjectPoints2Internal( const CvMat* objectPoints,
521
const CvMat* r_vec,
522
const CvMat* t_vec,
523
const CvMat* A,
524
const CvMat* distCoeffs,
525
CvMat* imagePoints, CvMat* dpdr CV_DEFAULT(NULL),
526
CvMat* dpdt CV_DEFAULT(NULL), CvMat* dpdf CV_DEFAULT(NULL),
527
CvMat* dpdc CV_DEFAULT(NULL), CvMat* dpdk CV_DEFAULT(NULL),
528
CvMat* dpdo CV_DEFAULT(NULL),
529
double aspectRatio CV_DEFAULT(0) )
530
{
531
Ptr<CvMat> matM, _m;
532
Ptr<CvMat> _dpdr, _dpdt, _dpdc, _dpdf, _dpdk;
533
Ptr<CvMat> _dpdo;
534
535
int i, j, count;
536
int calc_derivatives;
537
const CvPoint3D64f* M;
538
CvPoint2D64f* m;
539
double r[3], R[9], dRdr[27], t[3], a[9], k[14] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0}, fx, fy, cx, cy;
540
Matx33d matTilt = Matx33d::eye();
541
Matx33d dMatTiltdTauX(0,0,0,0,0,0,0,-1,0);
542
Matx33d dMatTiltdTauY(0,0,0,0,0,0,1,0,0);
543
CvMat _r, _t, _a = cvMat( 3, 3, CV_64F, a ), _k;
544
CvMat matR = cvMat( 3, 3, CV_64F, R ), _dRdr = cvMat( 3, 9, CV_64F, dRdr );
545
double *dpdr_p = 0, *dpdt_p = 0, *dpdk_p = 0, *dpdf_p = 0, *dpdc_p = 0;
546
double* dpdo_p = 0;
547
int dpdr_step = 0, dpdt_step = 0, dpdk_step = 0, dpdf_step = 0, dpdc_step = 0;
548
int dpdo_step = 0;
549
bool fixedAspectRatio = aspectRatio > FLT_EPSILON;
550
551
if( !CV_IS_MAT(objectPoints) || !CV_IS_MAT(r_vec) ||
552
!CV_IS_MAT(t_vec) || !CV_IS_MAT(A) ||
553
/*!CV_IS_MAT(distCoeffs) ||*/ !CV_IS_MAT(imagePoints) )
554
CV_Error( CV_StsBadArg, "One of required arguments is not a valid matrix" );
555
556
int total = objectPoints->rows * objectPoints->cols * CV_MAT_CN(objectPoints->type);
557
if(total % 3 != 0)
558
{
559
//we have stopped support of homogeneous coordinates because it cause ambiguity in interpretation of the input data
560
CV_Error( CV_StsBadArg, "Homogeneous coordinates are not supported" );
561
}
562
count = total / 3;
563
564
if( CV_IS_CONT_MAT(objectPoints->type) &&
565
(CV_MAT_DEPTH(objectPoints->type) == CV_32F || CV_MAT_DEPTH(objectPoints->type) == CV_64F)&&
566
((objectPoints->rows == 1 && CV_MAT_CN(objectPoints->type) == 3) ||
567
(objectPoints->rows == count && CV_MAT_CN(objectPoints->type)*objectPoints->cols == 3) ||
568
(objectPoints->rows == 3 && CV_MAT_CN(objectPoints->type) == 1 && objectPoints->cols == count)))
569
{
570
matM.reset(cvCreateMat( objectPoints->rows, objectPoints->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(objectPoints->type)) ));
571
cvConvert(objectPoints, matM);
572
}
573
else
574
{
575
// matM = cvCreateMat( 1, count, CV_64FC3 );
576
// cvConvertPointsHomogeneous( objectPoints, matM );
577
CV_Error( CV_StsBadArg, "Homogeneous coordinates are not supported" );
578
}
579
580
if( CV_IS_CONT_MAT(imagePoints->type) &&
581
(CV_MAT_DEPTH(imagePoints->type) == CV_32F || CV_MAT_DEPTH(imagePoints->type) == CV_64F) &&
582
((imagePoints->rows == 1 && CV_MAT_CN(imagePoints->type) == 2) ||
583
(imagePoints->rows == count && CV_MAT_CN(imagePoints->type)*imagePoints->cols == 2) ||
584
(imagePoints->rows == 2 && CV_MAT_CN(imagePoints->type) == 1 && imagePoints->cols == count)))
585
{
586
_m.reset(cvCreateMat( imagePoints->rows, imagePoints->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(imagePoints->type)) ));
587
cvConvert(imagePoints, _m);
588
}
589
else
590
{
591
// _m = cvCreateMat( 1, count, CV_64FC2 );
592
CV_Error( CV_StsBadArg, "Homogeneous coordinates are not supported" );
593
}
594
595
M = (CvPoint3D64f*)matM->data.db;
596
m = (CvPoint2D64f*)_m->data.db;
597
598
if( (CV_MAT_DEPTH(r_vec->type) != CV_64F && CV_MAT_DEPTH(r_vec->type) != CV_32F) ||
599
(((r_vec->rows != 1 && r_vec->cols != 1) ||
600
r_vec->rows*r_vec->cols*CV_MAT_CN(r_vec->type) != 3) &&
601
((r_vec->rows != 3 && r_vec->cols != 3) || CV_MAT_CN(r_vec->type) != 1)))
602
CV_Error( CV_StsBadArg, "Rotation must be represented by 1x3 or 3x1 "
603
"floating-point rotation vector, or 3x3 rotation matrix" );
604
605
if( r_vec->rows == 3 && r_vec->cols == 3 )
606
{
607
_r = cvMat( 3, 1, CV_64FC1, r );
608
cvRodrigues2( r_vec, &_r );
609
cvRodrigues2( &_r, &matR, &_dRdr );
610
cvCopy( r_vec, &matR );
611
}
612
else
613
{
614
_r = cvMat( r_vec->rows, r_vec->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(r_vec->type)), r );
615
cvConvert( r_vec, &_r );
616
cvRodrigues2( &_r, &matR, &_dRdr );
617
}
618
619
if( (CV_MAT_DEPTH(t_vec->type) != CV_64F && CV_MAT_DEPTH(t_vec->type) != CV_32F) ||
620
(t_vec->rows != 1 && t_vec->cols != 1) ||
621
t_vec->rows*t_vec->cols*CV_MAT_CN(t_vec->type) != 3 )
622
CV_Error( CV_StsBadArg,
623
"Translation vector must be 1x3 or 3x1 floating-point vector" );
624
625
_t = cvMat( t_vec->rows, t_vec->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(t_vec->type)), t );
626
cvConvert( t_vec, &_t );
627
628
if( (CV_MAT_TYPE(A->type) != CV_64FC1 && CV_MAT_TYPE(A->type) != CV_32FC1) ||
629
A->rows != 3 || A->cols != 3 )
630
CV_Error( CV_StsBadArg, "Instrinsic parameters must be 3x3 floating-point matrix" );
631
632
cvConvert( A, &_a );
633
fx = a[0]; fy = a[4];
634
cx = a[2]; cy = a[5];
635
636
if( fixedAspectRatio )
637
fx = fy*aspectRatio;
638
639
if( distCoeffs )
640
{
641
if( !CV_IS_MAT(distCoeffs) ||
642
(CV_MAT_DEPTH(distCoeffs->type) != CV_64F &&
643
CV_MAT_DEPTH(distCoeffs->type) != CV_32F) ||
644
(distCoeffs->rows != 1 && distCoeffs->cols != 1) ||
645
(distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) != 4 &&
646
distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) != 5 &&
647
distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) != 8 &&
648
distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) != 12 &&
649
distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) != 14) )
650
CV_Error( CV_StsBadArg, cvDistCoeffErr );
651
652
_k = cvMat( distCoeffs->rows, distCoeffs->cols,
653
CV_MAKETYPE(CV_64F,CV_MAT_CN(distCoeffs->type)), k );
654
cvConvert( distCoeffs, &_k );
655
if(k[12] != 0 || k[13] != 0)
656
{
657
detail::computeTiltProjectionMatrix(k[12], k[13],
658
&matTilt, &dMatTiltdTauX, &dMatTiltdTauY);
659
}
660
}
661
662
if( dpdr )
663
{
664
if( !CV_IS_MAT(dpdr) ||
665
(CV_MAT_TYPE(dpdr->type) != CV_32FC1 &&
666
CV_MAT_TYPE(dpdr->type) != CV_64FC1) ||
667
dpdr->rows != count*2 || dpdr->cols != 3 )
668
CV_Error( CV_StsBadArg, "dp/drot must be 2Nx3 floating-point matrix" );
669
670
if( CV_MAT_TYPE(dpdr->type) == CV_64FC1 )
671
{
672
_dpdr.reset(cvCloneMat(dpdr));
673
}
674
else
675
_dpdr.reset(cvCreateMat( 2*count, 3, CV_64FC1 ));
676
dpdr_p = _dpdr->data.db;
677
dpdr_step = _dpdr->step/sizeof(dpdr_p[0]);
678
}
679
680
if( dpdt )
681
{
682
if( !CV_IS_MAT(dpdt) ||
683
(CV_MAT_TYPE(dpdt->type) != CV_32FC1 &&
684
CV_MAT_TYPE(dpdt->type) != CV_64FC1) ||
685
dpdt->rows != count*2 || dpdt->cols != 3 )
686
CV_Error( CV_StsBadArg, "dp/dT must be 2Nx3 floating-point matrix" );
687
688
if( CV_MAT_TYPE(dpdt->type) == CV_64FC1 )
689
{
690
_dpdt.reset(cvCloneMat(dpdt));
691
}
692
else
693
_dpdt.reset(cvCreateMat( 2*count, 3, CV_64FC1 ));
694
dpdt_p = _dpdt->data.db;
695
dpdt_step = _dpdt->step/sizeof(dpdt_p[0]);
696
}
697
698
if( dpdf )
699
{
700
if( !CV_IS_MAT(dpdf) ||
701
(CV_MAT_TYPE(dpdf->type) != CV_32FC1 && CV_MAT_TYPE(dpdf->type) != CV_64FC1) ||
702
dpdf->rows != count*2 || dpdf->cols != 2 )
703
CV_Error( CV_StsBadArg, "dp/df must be 2Nx2 floating-point matrix" );
704
705
if( CV_MAT_TYPE(dpdf->type) == CV_64FC1 )
706
{
707
_dpdf.reset(cvCloneMat(dpdf));
708
}
709
else
710
_dpdf.reset(cvCreateMat( 2*count, 2, CV_64FC1 ));
711
dpdf_p = _dpdf->data.db;
712
dpdf_step = _dpdf->step/sizeof(dpdf_p[0]);
713
}
714
715
if( dpdc )
716
{
717
if( !CV_IS_MAT(dpdc) ||
718
(CV_MAT_TYPE(dpdc->type) != CV_32FC1 && CV_MAT_TYPE(dpdc->type) != CV_64FC1) ||
719
dpdc->rows != count*2 || dpdc->cols != 2 )
720
CV_Error( CV_StsBadArg, "dp/dc must be 2Nx2 floating-point matrix" );
721
722
if( CV_MAT_TYPE(dpdc->type) == CV_64FC1 )
723
{
724
_dpdc.reset(cvCloneMat(dpdc));
725
}
726
else
727
_dpdc.reset(cvCreateMat( 2*count, 2, CV_64FC1 ));
728
dpdc_p = _dpdc->data.db;
729
dpdc_step = _dpdc->step/sizeof(dpdc_p[0]);
730
}
731
732
if( dpdk )
733
{
734
if( !CV_IS_MAT(dpdk) ||
735
(CV_MAT_TYPE(dpdk->type) != CV_32FC1 && CV_MAT_TYPE(dpdk->type) != CV_64FC1) ||
736
dpdk->rows != count*2 || (dpdk->cols != 14 && dpdk->cols != 12 && dpdk->cols != 8 && dpdk->cols != 5 && dpdk->cols != 4 && dpdk->cols != 2) )
737
CV_Error( CV_StsBadArg, "dp/df must be 2Nx14, 2Nx12, 2Nx8, 2Nx5, 2Nx4 or 2Nx2 floating-point matrix" );
738
739
if( !distCoeffs )
740
CV_Error( CV_StsNullPtr, "distCoeffs is NULL while dpdk is not" );
741
742
if( CV_MAT_TYPE(dpdk->type) == CV_64FC1 )
743
{
744
_dpdk.reset(cvCloneMat(dpdk));
745
}
746
else
747
_dpdk.reset(cvCreateMat( dpdk->rows, dpdk->cols, CV_64FC1 ));
748
dpdk_p = _dpdk->data.db;
749
dpdk_step = _dpdk->step/sizeof(dpdk_p[0]);
750
}
751
752
if( dpdo )
753
{
754
if( !CV_IS_MAT( dpdo ) || ( CV_MAT_TYPE( dpdo->type ) != CV_32FC1
755
&& CV_MAT_TYPE( dpdo->type ) != CV_64FC1 )
756
|| dpdo->rows != count * 2 || dpdo->cols != count * 3 )
757
CV_Error( CV_StsBadArg, "dp/do must be 2Nx3N floating-point matrix" );
758
759
if( CV_MAT_TYPE( dpdo->type ) == CV_64FC1 )
760
{
761
_dpdo.reset( cvCloneMat( dpdo ) );
762
}
763
else
764
_dpdo.reset( cvCreateMat( 2 * count, 3 * count, CV_64FC1 ) );
765
cvZero(_dpdo);
766
dpdo_p = _dpdo->data.db;
767
dpdo_step = _dpdo->step / sizeof( dpdo_p[0] );
768
}
769
770
calc_derivatives = dpdr || dpdt || dpdf || dpdc || dpdk || dpdo;
771
772
for( i = 0; i < count; i++ )
773
{
774
double X = M[i].x, Y = M[i].y, Z = M[i].z;
775
double x = R[0]*X + R[1]*Y + R[2]*Z + t[0];
776
double y = R[3]*X + R[4]*Y + R[5]*Z + t[1];
777
double z = R[6]*X + R[7]*Y + R[8]*Z + t[2];
778
double r2, r4, r6, a1, a2, a3, cdist, icdist2;
779
double xd, yd, xd0, yd0, invProj;
780
Vec3d vecTilt;
781
Vec3d dVecTilt;
782
Matx22d dMatTilt;
783
Vec2d dXdYd;
784
785
double z0 = z;
786
z = z ? 1./z : 1;
787
x *= z; y *= z;
788
789
r2 = x*x + y*y;
790
r4 = r2*r2;
791
r6 = r4*r2;
792
a1 = 2*x*y;
793
a2 = r2 + 2*x*x;
794
a3 = r2 + 2*y*y;
795
cdist = 1 + k[0]*r2 + k[1]*r4 + k[4]*r6;
796
icdist2 = 1./(1 + k[5]*r2 + k[6]*r4 + k[7]*r6);
797
xd0 = x*cdist*icdist2 + k[2]*a1 + k[3]*a2 + k[8]*r2+k[9]*r4;
798
yd0 = y*cdist*icdist2 + k[2]*a3 + k[3]*a1 + k[10]*r2+k[11]*r4;
799
800
// additional distortion by projecting onto a tilt plane
801
vecTilt = matTilt*Vec3d(xd0, yd0, 1);
802
invProj = vecTilt(2) ? 1./vecTilt(2) : 1;
803
xd = invProj * vecTilt(0);
804
yd = invProj * vecTilt(1);
805
806
m[i].x = xd*fx + cx;
807
m[i].y = yd*fy + cy;
808
809
if( calc_derivatives )
810
{
811
if( dpdc_p )
812
{
813
dpdc_p[0] = 1; dpdc_p[1] = 0; // dp_xdc_x; dp_xdc_y
814
dpdc_p[dpdc_step] = 0;
815
dpdc_p[dpdc_step+1] = 1;
816
dpdc_p += dpdc_step*2;
817
}
818
819
if( dpdf_p )
820
{
821
if( fixedAspectRatio )
822
{
823
dpdf_p[0] = 0; dpdf_p[1] = xd*aspectRatio; // dp_xdf_x; dp_xdf_y
824
dpdf_p[dpdf_step] = 0;
825
dpdf_p[dpdf_step+1] = yd;
826
}
827
else
828
{
829
dpdf_p[0] = xd; dpdf_p[1] = 0;
830
dpdf_p[dpdf_step] = 0;
831
dpdf_p[dpdf_step+1] = yd;
832
}
833
dpdf_p += dpdf_step*2;
834
}
835
for (int row = 0; row < 2; ++row)
836
for (int col = 0; col < 2; ++col)
837
dMatTilt(row,col) = matTilt(row,col)*vecTilt(2)
838
- matTilt(2,col)*vecTilt(row);
839
double invProjSquare = (invProj*invProj);
840
dMatTilt *= invProjSquare;
841
if( dpdk_p )
842
{
843
dXdYd = dMatTilt*Vec2d(x*icdist2*r2, y*icdist2*r2);
844
dpdk_p[0] = fx*dXdYd(0);
845
dpdk_p[dpdk_step] = fy*dXdYd(1);
846
dXdYd = dMatTilt*Vec2d(x*icdist2*r4, y*icdist2*r4);
847
dpdk_p[1] = fx*dXdYd(0);
848
dpdk_p[dpdk_step+1] = fy*dXdYd(1);
849
if( _dpdk->cols > 2 )
850
{
851
dXdYd = dMatTilt*Vec2d(a1, a3);
852
dpdk_p[2] = fx*dXdYd(0);
853
dpdk_p[dpdk_step+2] = fy*dXdYd(1);
854
dXdYd = dMatTilt*Vec2d(a2, a1);
855
dpdk_p[3] = fx*dXdYd(0);
856
dpdk_p[dpdk_step+3] = fy*dXdYd(1);
857
if( _dpdk->cols > 4 )
858
{
859
dXdYd = dMatTilt*Vec2d(x*icdist2*r6, y*icdist2*r6);
860
dpdk_p[4] = fx*dXdYd(0);
861
dpdk_p[dpdk_step+4] = fy*dXdYd(1);
862
863
if( _dpdk->cols > 5 )
864
{
865
dXdYd = dMatTilt*Vec2d(
866
x*cdist*(-icdist2)*icdist2*r2, y*cdist*(-icdist2)*icdist2*r2);
867
dpdk_p[5] = fx*dXdYd(0);
868
dpdk_p[dpdk_step+5] = fy*dXdYd(1);
869
dXdYd = dMatTilt*Vec2d(
870
x*cdist*(-icdist2)*icdist2*r4, y*cdist*(-icdist2)*icdist2*r4);
871
dpdk_p[6] = fx*dXdYd(0);
872
dpdk_p[dpdk_step+6] = fy*dXdYd(1);
873
dXdYd = dMatTilt*Vec2d(
874
x*cdist*(-icdist2)*icdist2*r6, y*cdist*(-icdist2)*icdist2*r6);
875
dpdk_p[7] = fx*dXdYd(0);
876
dpdk_p[dpdk_step+7] = fy*dXdYd(1);
877
if( _dpdk->cols > 8 )
878
{
879
dXdYd = dMatTilt*Vec2d(r2, 0);
880
dpdk_p[8] = fx*dXdYd(0); //s1
881
dpdk_p[dpdk_step+8] = fy*dXdYd(1); //s1
882
dXdYd = dMatTilt*Vec2d(r4, 0);
883
dpdk_p[9] = fx*dXdYd(0); //s2
884
dpdk_p[dpdk_step+9] = fy*dXdYd(1); //s2
885
dXdYd = dMatTilt*Vec2d(0, r2);
886
dpdk_p[10] = fx*dXdYd(0);//s3
887
dpdk_p[dpdk_step+10] = fy*dXdYd(1); //s3
888
dXdYd = dMatTilt*Vec2d(0, r4);
889
dpdk_p[11] = fx*dXdYd(0);//s4
890
dpdk_p[dpdk_step+11] = fy*dXdYd(1); //s4
891
if( _dpdk->cols > 12 )
892
{
893
dVecTilt = dMatTiltdTauX * Vec3d(xd0, yd0, 1);
894
dpdk_p[12] = fx * invProjSquare * (
895
dVecTilt(0) * vecTilt(2) - dVecTilt(2) * vecTilt(0));
896
dpdk_p[dpdk_step+12] = fy*invProjSquare * (
897
dVecTilt(1) * vecTilt(2) - dVecTilt(2) * vecTilt(1));
898
dVecTilt = dMatTiltdTauY * Vec3d(xd0, yd0, 1);
899
dpdk_p[13] = fx * invProjSquare * (
900
dVecTilt(0) * vecTilt(2) - dVecTilt(2) * vecTilt(0));
901
dpdk_p[dpdk_step+13] = fy * invProjSquare * (
902
dVecTilt(1) * vecTilt(2) - dVecTilt(2) * vecTilt(1));
903
}
904
}
905
}
906
}
907
}
908
dpdk_p += dpdk_step*2;
909
}
910
911
if( dpdt_p )
912
{
913
double dxdt[] = { z, 0, -x*z }, dydt[] = { 0, z, -y*z };
914
for( j = 0; j < 3; j++ )
915
{
916
double dr2dt = 2*x*dxdt[j] + 2*y*dydt[j];
917
double dcdist_dt = k[0]*dr2dt + 2*k[1]*r2*dr2dt + 3*k[4]*r4*dr2dt;
918
double dicdist2_dt = -icdist2*icdist2*(k[5]*dr2dt + 2*k[6]*r2*dr2dt + 3*k[7]*r4*dr2dt);
919
double da1dt = 2*(x*dydt[j] + y*dxdt[j]);
920
double dmxdt = (dxdt[j]*cdist*icdist2 + x*dcdist_dt*icdist2 + x*cdist*dicdist2_dt +
921
k[2]*da1dt + k[3]*(dr2dt + 4*x*dxdt[j]) + k[8]*dr2dt + 2*r2*k[9]*dr2dt);
922
double dmydt = (dydt[j]*cdist*icdist2 + y*dcdist_dt*icdist2 + y*cdist*dicdist2_dt +
923
k[2]*(dr2dt + 4*y*dydt[j]) + k[3]*da1dt + k[10]*dr2dt + 2*r2*k[11]*dr2dt);
924
dXdYd = dMatTilt*Vec2d(dmxdt, dmydt);
925
dpdt_p[j] = fx*dXdYd(0);
926
dpdt_p[dpdt_step+j] = fy*dXdYd(1);
927
}
928
dpdt_p += dpdt_step*2;
929
}
930
931
if( dpdr_p )
932
{
933
double dx0dr[] =
934
{
935
X*dRdr[0] + Y*dRdr[1] + Z*dRdr[2],
936
X*dRdr[9] + Y*dRdr[10] + Z*dRdr[11],
937
X*dRdr[18] + Y*dRdr[19] + Z*dRdr[20]
938
};
939
double dy0dr[] =
940
{
941
X*dRdr[3] + Y*dRdr[4] + Z*dRdr[5],
942
X*dRdr[12] + Y*dRdr[13] + Z*dRdr[14],
943
X*dRdr[21] + Y*dRdr[22] + Z*dRdr[23]
944
};
945
double dz0dr[] =
946
{
947
X*dRdr[6] + Y*dRdr[7] + Z*dRdr[8],
948
X*dRdr[15] + Y*dRdr[16] + Z*dRdr[17],
949
X*dRdr[24] + Y*dRdr[25] + Z*dRdr[26]
950
};
951
for( j = 0; j < 3; j++ )
952
{
953
double dxdr = z*(dx0dr[j] - x*dz0dr[j]);
954
double dydr = z*(dy0dr[j] - y*dz0dr[j]);
955
double dr2dr = 2*x*dxdr + 2*y*dydr;
956
double dcdist_dr = (k[0] + 2*k[1]*r2 + 3*k[4]*r4)*dr2dr;
957
double dicdist2_dr = -icdist2*icdist2*(k[5] + 2*k[6]*r2 + 3*k[7]*r4)*dr2dr;
958
double da1dr = 2*(x*dydr + y*dxdr);
959
double dmxdr = (dxdr*cdist*icdist2 + x*dcdist_dr*icdist2 + x*cdist*dicdist2_dr +
960
k[2]*da1dr + k[3]*(dr2dr + 4*x*dxdr) + (k[8] + 2*r2*k[9])*dr2dr);
961
double dmydr = (dydr*cdist*icdist2 + y*dcdist_dr*icdist2 + y*cdist*dicdist2_dr +
962
k[2]*(dr2dr + 4*y*dydr) + k[3]*da1dr + (k[10] + 2*r2*k[11])*dr2dr);
963
dXdYd = dMatTilt*Vec2d(dmxdr, dmydr);
964
dpdr_p[j] = fx*dXdYd(0);
965
dpdr_p[dpdr_step+j] = fy*dXdYd(1);
966
}
967
dpdr_p += dpdr_step*2;
968
}
969
970
if( dpdo_p )
971
{
972
double dxdo[] = { z * ( R[0] - x * z * z0 * R[6] ),
973
z * ( R[1] - x * z * z0 * R[7] ),
974
z * ( R[2] - x * z * z0 * R[8] ) };
975
double dydo[] = { z * ( R[3] - y * z * z0 * R[6] ),
976
z * ( R[4] - y * z * z0 * R[7] ),
977
z * ( R[5] - y * z * z0 * R[8] ) };
978
for( j = 0; j < 3; j++ )
979
{
980
double dr2do = 2 * x * dxdo[j] + 2 * y * dydo[j];
981
double dr4do = 2 * r2 * dr2do;
982
double dr6do = 3 * r4 * dr2do;
983
double da1do = 2 * y * dxdo[j] + 2 * x * dydo[j];
984
double da2do = dr2do + 4 * x * dxdo[j];
985
double da3do = dr2do + 4 * y * dydo[j];
986
double dcdist_do
987
= k[0] * dr2do + k[1] * dr4do + k[4] * dr6do;
988
double dicdist2_do = -icdist2 * icdist2
989
* ( k[5] * dr2do + k[6] * dr4do + k[7] * dr6do );
990
double dxd0_do = cdist * icdist2 * dxdo[j]
991
+ x * icdist2 * dcdist_do + x * cdist * dicdist2_do
992
+ k[2] * da1do + k[3] * da2do + k[8] * dr2do
993
+ k[9] * dr4do;
994
double dyd0_do = cdist * icdist2 * dydo[j]
995
+ y * icdist2 * dcdist_do + y * cdist * dicdist2_do
996
+ k[2] * da3do + k[3] * da1do + k[10] * dr2do
997
+ k[11] * dr4do;
998
dXdYd = dMatTilt * Vec2d( dxd0_do, dyd0_do );
999
dpdo_p[i * 3 + j] = fx * dXdYd( 0 );
1000
dpdo_p[dpdo_step + i * 3 + j] = fy * dXdYd( 1 );
1001
}
1002
dpdo_p += dpdo_step * 2;
1003
}
1004
}
1005
}
1006
1007
if( _m != imagePoints )
1008
cvConvert( _m, imagePoints );
1009
1010
if( _dpdr != dpdr )
1011
cvConvert( _dpdr, dpdr );
1012
1013
if( _dpdt != dpdt )
1014
cvConvert( _dpdt, dpdt );
1015
1016
if( _dpdf != dpdf )
1017
cvConvert( _dpdf, dpdf );
1018
1019
if( _dpdc != dpdc )
1020
cvConvert( _dpdc, dpdc );
1021
1022
if( _dpdk != dpdk )
1023
cvConvert( _dpdk, dpdk );
1024
1025
if( _dpdo != dpdo )
1026
cvConvert( _dpdo, dpdo );
1027
}
1028
1029
CV_IMPL void cvProjectPoints2( const CvMat* objectPoints,
1030
const CvMat* r_vec,
1031
const CvMat* t_vec,
1032
const CvMat* A,
1033
const CvMat* distCoeffs,
1034
CvMat* imagePoints, CvMat* dpdr,
1035
CvMat* dpdt, CvMat* dpdf,
1036
CvMat* dpdc, CvMat* dpdk,
1037
double aspectRatio )
1038
{
1039
cvProjectPoints2Internal( objectPoints, r_vec, t_vec, A, distCoeffs, imagePoints, dpdr, dpdt,
1040
dpdf, dpdc, dpdk, NULL, aspectRatio );
1041
}
1042
1043
CV_IMPL void cvFindExtrinsicCameraParams2( const CvMat* objectPoints,
1044
const CvMat* imagePoints, const CvMat* A,
1045
const CvMat* distCoeffs, CvMat* rvec, CvMat* tvec,
1046
int useExtrinsicGuess )
1047
{
1048
const int max_iter = 20;
1049
Ptr<CvMat> matM, _Mxy, _m, _mn, matL;
1050
1051
int i, count;
1052
double a[9], ar[9]={1,0,0,0,1,0,0,0,1}, R[9];
1053
double MM[9], U[9], V[9], W[3];
1054
cv::Scalar Mc;
1055
double param[6];
1056
CvMat matA = cvMat( 3, 3, CV_64F, a );
1057
CvMat _Ar = cvMat( 3, 3, CV_64F, ar );
1058
CvMat matR = cvMat( 3, 3, CV_64F, R );
1059
CvMat _r = cvMat( 3, 1, CV_64F, param );
1060
CvMat _t = cvMat( 3, 1, CV_64F, param + 3 );
1061
CvMat _Mc = cvMat( 1, 3, CV_64F, Mc.val );
1062
CvMat _MM = cvMat( 3, 3, CV_64F, MM );
1063
CvMat matU = cvMat( 3, 3, CV_64F, U );
1064
CvMat matV = cvMat( 3, 3, CV_64F, V );
1065
CvMat matW = cvMat( 3, 1, CV_64F, W );
1066
CvMat _param = cvMat( 6, 1, CV_64F, param );
1067
CvMat _dpdr, _dpdt;
1068
1069
CV_Assert( CV_IS_MAT(objectPoints) && CV_IS_MAT(imagePoints) &&
1070
CV_IS_MAT(A) && CV_IS_MAT(rvec) && CV_IS_MAT(tvec) );
1071
1072
count = MAX(objectPoints->cols, objectPoints->rows);
1073
matM.reset(cvCreateMat( 1, count, CV_64FC3 ));
1074
_m.reset(cvCreateMat( 1, count, CV_64FC2 ));
1075
1076
cvConvertPointsHomogeneous( objectPoints, matM );
1077
cvConvertPointsHomogeneous( imagePoints, _m );
1078
cvConvert( A, &matA );
1079
1080
CV_Assert( (CV_MAT_DEPTH(rvec->type) == CV_64F || CV_MAT_DEPTH(rvec->type) == CV_32F) &&
1081
(rvec->rows == 1 || rvec->cols == 1) && rvec->rows*rvec->cols*CV_MAT_CN(rvec->type) == 3 );
1082
1083
CV_Assert( (CV_MAT_DEPTH(tvec->type) == CV_64F || CV_MAT_DEPTH(tvec->type) == CV_32F) &&
1084
(tvec->rows == 1 || tvec->cols == 1) && tvec->rows*tvec->cols*CV_MAT_CN(tvec->type) == 3 );
1085
1086
CV_Assert((count >= 4) || (count == 3 && useExtrinsicGuess)); // it is unsafe to call LM optimisation without an extrinsic guess in the case of 3 points. This is because there is no guarantee that it will converge on the correct solution.
1087
1088
_mn.reset(cvCreateMat( 1, count, CV_64FC2 ));
1089
_Mxy.reset(cvCreateMat( 1, count, CV_64FC2 ));
1090
1091
// normalize image points
1092
// (unapply the intrinsic matrix transformation and distortion)
1093
cvUndistortPoints( _m, _mn, &matA, distCoeffs, 0, &_Ar );
1094
1095
if( useExtrinsicGuess )
1096
{
1097
CvMat _r_temp = cvMat(rvec->rows, rvec->cols,
1098
CV_MAKETYPE(CV_64F,CV_MAT_CN(rvec->type)), param );
1099
CvMat _t_temp = cvMat(tvec->rows, tvec->cols,
1100
CV_MAKETYPE(CV_64F,CV_MAT_CN(tvec->type)), param + 3);
1101
cvConvert( rvec, &_r_temp );
1102
cvConvert( tvec, &_t_temp );
1103
}
1104
else
1105
{
1106
Mc = cvAvg(matM);
1107
cvReshape( matM, matM, 1, count );
1108
cvMulTransposed( matM, &_MM, 1, &_Mc );
1109
cvSVD( &_MM, &matW, 0, &matV, CV_SVD_MODIFY_A + CV_SVD_V_T );
1110
1111
// initialize extrinsic parameters
1112
if( W[2]/W[1] < 1e-3)
1113
{
1114
// a planar structure case (all M's lie in the same plane)
1115
double tt[3], h[9], h1_norm, h2_norm;
1116
CvMat* R_transform = &matV;
1117
CvMat T_transform = cvMat( 3, 1, CV_64F, tt );
1118
CvMat matH = cvMat( 3, 3, CV_64F, h );
1119
CvMat _h1, _h2, _h3;
1120
1121
if( V[2]*V[2] + V[5]*V[5] < 1e-10 )
1122
cvSetIdentity( R_transform );
1123
1124
if( cvDet(R_transform) < 0 )
1125
cvScale( R_transform, R_transform, -1 );
1126
1127
cvGEMM( R_transform, &_Mc, -1, 0, 0, &T_transform, CV_GEMM_B_T );
1128
1129
for( i = 0; i < count; i++ )
1130
{
1131
const double* Rp = R_transform->data.db;
1132
const double* Tp = T_transform.data.db;
1133
const double* src = matM->data.db + i*3;
1134
double* dst = _Mxy->data.db + i*2;
1135
1136
dst[0] = Rp[0]*src[0] + Rp[1]*src[1] + Rp[2]*src[2] + Tp[0];
1137
dst[1] = Rp[3]*src[0] + Rp[4]*src[1] + Rp[5]*src[2] + Tp[1];
1138
}
1139
1140
cvFindHomography( _Mxy, _mn, &matH );
1141
1142
if( cvCheckArr(&matH, CV_CHECK_QUIET) )
1143
{
1144
cvGetCol( &matH, &_h1, 0 );
1145
_h2 = _h1; _h2.data.db++;
1146
_h3 = _h2; _h3.data.db++;
1147
h1_norm = std::sqrt(h[0]*h[0] + h[3]*h[3] + h[6]*h[6]);
1148
h2_norm = std::sqrt(h[1]*h[1] + h[4]*h[4] + h[7]*h[7]);
1149
1150
cvScale( &_h1, &_h1, 1./MAX(h1_norm, DBL_EPSILON) );
1151
cvScale( &_h2, &_h2, 1./MAX(h2_norm, DBL_EPSILON) );
1152
cvScale( &_h3, &_t, 2./MAX(h1_norm + h2_norm, DBL_EPSILON));
1153
cvCrossProduct( &_h1, &_h2, &_h3 );
1154
1155
cvRodrigues2( &matH, &_r );
1156
cvRodrigues2( &_r, &matH );
1157
cvMatMulAdd( &matH, &T_transform, &_t, &_t );
1158
cvMatMul( &matH, R_transform, &matR );
1159
}
1160
else
1161
{
1162
cvSetIdentity( &matR );
1163
cvZero( &_t );
1164
}
1165
1166
cvRodrigues2( &matR, &_r );
1167
}
1168
else
1169
{
1170
// non-planar structure. Use DLT method
1171
double* L;
1172
double LL[12*12], LW[12], LV[12*12], sc;
1173
CvMat _LL = cvMat( 12, 12, CV_64F, LL );
1174
CvMat _LW = cvMat( 12, 1, CV_64F, LW );
1175
CvMat _LV = cvMat( 12, 12, CV_64F, LV );
1176
CvMat _RRt, _RR, _tt;
1177
CvPoint3D64f* M = (CvPoint3D64f*)matM->data.db;
1178
CvPoint2D64f* mn = (CvPoint2D64f*)_mn->data.db;
1179
1180
matL.reset(cvCreateMat( 2*count, 12, CV_64F ));
1181
L = matL->data.db;
1182
1183
for( i = 0; i < count; i++, L += 24 )
1184
{
1185
double x = -mn[i].x, y = -mn[i].y;
1186
L[0] = L[16] = M[i].x;
1187
L[1] = L[17] = M[i].y;
1188
L[2] = L[18] = M[i].z;
1189
L[3] = L[19] = 1.;
1190
L[4] = L[5] = L[6] = L[7] = 0.;
1191
L[12] = L[13] = L[14] = L[15] = 0.;
1192
L[8] = x*M[i].x;
1193
L[9] = x*M[i].y;
1194
L[10] = x*M[i].z;
1195
L[11] = x;
1196
L[20] = y*M[i].x;
1197
L[21] = y*M[i].y;
1198
L[22] = y*M[i].z;
1199
L[23] = y;
1200
}
1201
1202
cvMulTransposed( matL, &_LL, 1 );
1203
cvSVD( &_LL, &_LW, 0, &_LV, CV_SVD_MODIFY_A + CV_SVD_V_T );
1204
_RRt = cvMat( 3, 4, CV_64F, LV + 11*12 );
1205
cvGetCols( &_RRt, &_RR, 0, 3 );
1206
cvGetCol( &_RRt, &_tt, 3 );
1207
if( cvDet(&_RR) < 0 )
1208
cvScale( &_RRt, &_RRt, -1 );
1209
sc = cvNorm(&_RR);
1210
CV_Assert(fabs(sc) > DBL_EPSILON);
1211
cvSVD( &_RR, &matW, &matU, &matV, CV_SVD_MODIFY_A + CV_SVD_U_T + CV_SVD_V_T );
1212
cvGEMM( &matU, &matV, 1, 0, 0, &matR, CV_GEMM_A_T );
1213
cvScale( &_tt, &_t, cvNorm(&matR)/sc );
1214
cvRodrigues2( &matR, &_r );
1215
}
1216
}
1217
1218
cvReshape( matM, matM, 3, 1 );
1219
cvReshape( _mn, _mn, 2, 1 );
1220
1221
// refine extrinsic parameters using iterative algorithm
1222
CvLevMarq solver( 6, count*2, cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,max_iter,FLT_EPSILON), true);
1223
cvCopy( &_param, solver.param );
1224
1225
for(;;)
1226
{
1227
CvMat *matJ = 0, *_err = 0;
1228
const CvMat *__param = 0;
1229
bool proceed = solver.update( __param, matJ, _err );
1230
cvCopy( __param, &_param );
1231
if( !proceed || !_err )
1232
break;
1233
cvReshape( _err, _err, 2, 1 );
1234
if( matJ )
1235
{
1236
cvGetCols( matJ, &_dpdr, 0, 3 );
1237
cvGetCols( matJ, &_dpdt, 3, 6 );
1238
cvProjectPoints2( matM, &_r, &_t, &matA, distCoeffs,
1239
_err, &_dpdr, &_dpdt, 0, 0, 0 );
1240
}
1241
else
1242
{
1243
cvProjectPoints2( matM, &_r, &_t, &matA, distCoeffs,
1244
_err, 0, 0, 0, 0, 0 );
1245
}
1246
cvSub(_err, _m, _err);
1247
cvReshape( _err, _err, 1, 2*count );
1248
}
1249
cvCopy( solver.param, &_param );
1250
1251
_r = cvMat( rvec->rows, rvec->cols,
1252
CV_MAKETYPE(CV_64F,CV_MAT_CN(rvec->type)), param );
1253
_t = cvMat( tvec->rows, tvec->cols,
1254
CV_MAKETYPE(CV_64F,CV_MAT_CN(tvec->type)), param + 3 );
1255
1256
cvConvert( &_r, rvec );
1257
cvConvert( &_t, tvec );
1258
}
1259
1260
CV_IMPL void cvInitIntrinsicParams2D( const CvMat* objectPoints,
1261
const CvMat* imagePoints, const CvMat* npoints,
1262
CvSize imageSize, CvMat* cameraMatrix,
1263
double aspectRatio )
1264
{
1265
Ptr<CvMat> matA, _b, _allH;
1266
1267
int i, j, pos, nimages, ni = 0;
1268
double a[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 1 };
1269
double H[9] = {0}, f[2] = {0};
1270
CvMat _a = cvMat( 3, 3, CV_64F, a );
1271
CvMat matH = cvMat( 3, 3, CV_64F, H );
1272
CvMat _f = cvMat( 2, 1, CV_64F, f );
1273
1274
assert( CV_MAT_TYPE(npoints->type) == CV_32SC1 &&
1275
CV_IS_MAT_CONT(npoints->type) );
1276
nimages = npoints->rows + npoints->cols - 1;
1277
1278
if( (CV_MAT_TYPE(objectPoints->type) != CV_32FC3 &&
1279
CV_MAT_TYPE(objectPoints->type) != CV_64FC3) ||
1280
(CV_MAT_TYPE(imagePoints->type) != CV_32FC2 &&
1281
CV_MAT_TYPE(imagePoints->type) != CV_64FC2) )
1282
CV_Error( CV_StsUnsupportedFormat, "Both object points and image points must be 2D" );
1283
1284
if( objectPoints->rows != 1 || imagePoints->rows != 1 )
1285
CV_Error( CV_StsBadSize, "object points and image points must be a single-row matrices" );
1286
1287
matA.reset(cvCreateMat( 2*nimages, 2, CV_64F ));
1288
_b.reset(cvCreateMat( 2*nimages, 1, CV_64F ));
1289
a[2] = (!imageSize.width) ? 0.5 : (imageSize.width)*0.5;
1290
a[5] = (!imageSize.height) ? 0.5 : (imageSize.height)*0.5;
1291
_allH.reset(cvCreateMat( nimages, 9, CV_64F ));
1292
1293
// extract vanishing points in order to obtain initial value for the focal length
1294
for( i = 0, pos = 0; i < nimages; i++, pos += ni )
1295
{
1296
double* Ap = matA->data.db + i*4;
1297
double* bp = _b->data.db + i*2;
1298
ni = npoints->data.i[i];
1299
double h[3], v[3], d1[3], d2[3];
1300
double n[4] = {0,0,0,0};
1301
CvMat _m, matM;
1302
cvGetCols( objectPoints, &matM, pos, pos + ni );
1303
cvGetCols( imagePoints, &_m, pos, pos + ni );
1304
1305
cvFindHomography( &matM, &_m, &matH );
1306
memcpy( _allH->data.db + i*9, H, sizeof(H) );
1307
1308
H[0] -= H[6]*a[2]; H[1] -= H[7]*a[2]; H[2] -= H[8]*a[2];
1309
H[3] -= H[6]*a[5]; H[4] -= H[7]*a[5]; H[5] -= H[8]*a[5];
1310
1311
for( j = 0; j < 3; j++ )
1312
{
1313
double t0 = H[j*3], t1 = H[j*3+1];
1314
h[j] = t0; v[j] = t1;
1315
d1[j] = (t0 + t1)*0.5;
1316
d2[j] = (t0 - t1)*0.5;
1317
n[0] += t0*t0; n[1] += t1*t1;
1318
n[2] += d1[j]*d1[j]; n[3] += d2[j]*d2[j];
1319
}
1320
1321
for( j = 0; j < 4; j++ )
1322
n[j] = 1./std::sqrt(n[j]);
1323
1324
for( j = 0; j < 3; j++ )
1325
{
1326
h[j] *= n[0]; v[j] *= n[1];
1327
d1[j] *= n[2]; d2[j] *= n[3];
1328
}
1329
1330
Ap[0] = h[0]*v[0]; Ap[1] = h[1]*v[1];
1331
Ap[2] = d1[0]*d2[0]; Ap[3] = d1[1]*d2[1];
1332
bp[0] = -h[2]*v[2]; bp[1] = -d1[2]*d2[2];
1333
}
1334
1335
cvSolve( matA, _b, &_f, CV_NORMAL + CV_SVD );
1336
a[0] = std::sqrt(fabs(1./f[0]));
1337
a[4] = std::sqrt(fabs(1./f[1]));
1338
if( aspectRatio != 0 )
1339
{
1340
double tf = (a[0] + a[4])/(aspectRatio + 1.);
1341
a[0] = aspectRatio*tf;
1342
a[4] = tf;
1343
}
1344
1345
cvConvert( &_a, cameraMatrix );
1346
}
1347
1348
static void subMatrix(const cv::Mat& src, cv::Mat& dst, const std::vector<uchar>& cols,
1349
const std::vector<uchar>& rows) {
1350
int nonzeros_cols = cv::countNonZero(cols);
1351
cv::Mat tmp(src.rows, nonzeros_cols, CV_64FC1);
1352
1353
for (int i = 0, j = 0; i < (int)cols.size(); i++)
1354
{
1355
if (cols[i])
1356
{
1357
src.col(i).copyTo(tmp.col(j++));
1358
}
1359
}
1360
1361
int nonzeros_rows = cv::countNonZero(rows);
1362
dst.create(nonzeros_rows, nonzeros_cols, CV_64FC1);
1363
for (int i = 0, j = 0; i < (int)rows.size(); i++)
1364
{
1365
if (rows[i])
1366
{
1367
tmp.row(i).copyTo(dst.row(j++));
1368
}
1369
}
1370
}
1371
1372
static double cvCalibrateCamera2Internal( const CvMat* objectPoints,
1373
const CvMat* imagePoints, const CvMat* npoints,
1374
CvSize imageSize, int iFixedPoint, CvMat* cameraMatrix, CvMat* distCoeffs,
1375
CvMat* rvecs, CvMat* tvecs, CvMat* newObjPoints, CvMat* stdDevs,
1376
CvMat* perViewErrors, int flags, CvTermCriteria termCrit )
1377
{
1378
const int NINTRINSIC = CV_CALIB_NINTRINSIC;
1379
double reprojErr = 0;
1380
1381
Matx33d A;
1382
double k[14] = {0};
1383
CvMat matA = cvMat(3, 3, CV_64F, A.val), _k;
1384
int i, nimages, maxPoints = 0, ni = 0, pos, total = 0, nparams, npstep, cn;
1385
double aspectRatio = 0.;
1386
1387
// 0. check the parameters & allocate buffers
1388
if( !CV_IS_MAT(objectPoints) || !CV_IS_MAT(imagePoints) ||
1389
!CV_IS_MAT(npoints) || !CV_IS_MAT(cameraMatrix) || !CV_IS_MAT(distCoeffs) )
1390
CV_Error( CV_StsBadArg, "One of required vector arguments is not a valid matrix" );
1391
1392
if( imageSize.width <= 0 || imageSize.height <= 0 )
1393
CV_Error( CV_StsOutOfRange, "image width and height must be positive" );
1394
1395
if( CV_MAT_TYPE(npoints->type) != CV_32SC1 ||
1396
(npoints->rows != 1 && npoints->cols != 1) )
1397
CV_Error( CV_StsUnsupportedFormat,
1398
"the array of point counters must be 1-dimensional integer vector" );
1399
if(flags & CALIB_TILTED_MODEL)
1400
{
1401
//when the tilted sensor model is used the distortion coefficients matrix must have 14 parameters
1402
if (distCoeffs->cols*distCoeffs->rows != 14)
1403
CV_Error( CV_StsBadArg, "The tilted sensor model must have 14 parameters in the distortion matrix" );
1404
}
1405
else
1406
{
1407
//when the thin prism model is used the distortion coefficients matrix must have 12 parameters
1408
if(flags & CALIB_THIN_PRISM_MODEL)
1409
if (distCoeffs->cols*distCoeffs->rows != 12)
1410
CV_Error( CV_StsBadArg, "Thin prism model must have 12 parameters in the distortion matrix" );
1411
}
1412
1413
nimages = npoints->rows*npoints->cols;
1414
npstep = npoints->rows == 1 ? 1 : npoints->step/CV_ELEM_SIZE(npoints->type);
1415
1416
if( rvecs )
1417
{
1418
cn = CV_MAT_CN(rvecs->type);
1419
if( !CV_IS_MAT(rvecs) ||
1420
(CV_MAT_DEPTH(rvecs->type) != CV_32F && CV_MAT_DEPTH(rvecs->type) != CV_64F) ||
1421
((rvecs->rows != nimages || (rvecs->cols*cn != 3 && rvecs->cols*cn != 9)) &&
1422
(rvecs->rows != 1 || rvecs->cols != nimages || cn != 3)) )
1423
CV_Error( CV_StsBadArg, "the output array of rotation vectors must be 3-channel "
1424
"1xn or nx1 array or 1-channel nx3 or nx9 array, where n is the number of views" );
1425
}
1426
1427
if( tvecs )
1428
{
1429
cn = CV_MAT_CN(tvecs->type);
1430
if( !CV_IS_MAT(tvecs) ||
1431
(CV_MAT_DEPTH(tvecs->type) != CV_32F && CV_MAT_DEPTH(tvecs->type) != CV_64F) ||
1432
((tvecs->rows != nimages || tvecs->cols*cn != 3) &&
1433
(tvecs->rows != 1 || tvecs->cols != nimages || cn != 3)) )
1434
CV_Error( CV_StsBadArg, "the output array of translation vectors must be 3-channel "
1435
"1xn or nx1 array or 1-channel nx3 array, where n is the number of views" );
1436
}
1437
1438
bool releaseObject = iFixedPoint > 0 && iFixedPoint < npoints->data.i[0] - 1;
1439
1440
if( stdDevs && !releaseObject )
1441
{
1442
cn = CV_MAT_CN(stdDevs->type);
1443
if( !CV_IS_MAT(stdDevs) ||
1444
(CV_MAT_DEPTH(stdDevs->type) != CV_32F && CV_MAT_DEPTH(stdDevs->type) != CV_64F) ||
1445
((stdDevs->rows != (nimages*6 + NINTRINSIC) || stdDevs->cols*cn != 1) &&
1446
(stdDevs->rows != 1 || stdDevs->cols != (nimages*6 + NINTRINSIC) || cn != 1)) )
1447
#define STR__(x) #x
1448
#define STR_(x) STR__(x)
1449
CV_Error( CV_StsBadArg, "the output array of standard deviations vectors must be 1-channel "
1450
"1x(n*6 + NINTRINSIC) or (n*6 + NINTRINSIC)x1 array, where n is the number of views,"
1451
" NINTRINSIC = " STR_(CV_CALIB_NINTRINSIC));
1452
}
1453
1454
if( (CV_MAT_TYPE(cameraMatrix->type) != CV_32FC1 &&
1455
CV_MAT_TYPE(cameraMatrix->type) != CV_64FC1) ||
1456
cameraMatrix->rows != 3 || cameraMatrix->cols != 3 )
1457
CV_Error( CV_StsBadArg,
1458
"Intrinsic parameters must be 3x3 floating-point matrix" );
1459
1460
if( (CV_MAT_TYPE(distCoeffs->type) != CV_32FC1 &&
1461
CV_MAT_TYPE(distCoeffs->type) != CV_64FC1) ||
1462
(distCoeffs->cols != 1 && distCoeffs->rows != 1) ||
1463
(distCoeffs->cols*distCoeffs->rows != 4 &&
1464
distCoeffs->cols*distCoeffs->rows != 5 &&
1465
distCoeffs->cols*distCoeffs->rows != 8 &&
1466
distCoeffs->cols*distCoeffs->rows != 12 &&
1467
distCoeffs->cols*distCoeffs->rows != 14) )
1468
CV_Error( CV_StsBadArg, cvDistCoeffErr );
1469
1470
for( i = 0; i < nimages; i++ )
1471
{
1472
ni = npoints->data.i[i*npstep];
1473
if( ni < 4 )
1474
{
1475
CV_Error_( CV_StsOutOfRange, ("The number of points in the view #%d is < 4", i));
1476
}
1477
maxPoints = MAX( maxPoints, ni );
1478
total += ni;
1479
}
1480
1481
if( newObjPoints )
1482
{
1483
cn = CV_MAT_CN(newObjPoints->type);
1484
if( !CV_IS_MAT(newObjPoints) ||
1485
(CV_MAT_DEPTH(newObjPoints->type) != CV_32F && CV_MAT_DEPTH(newObjPoints->type) != CV_64F) ||
1486
((newObjPoints->rows != maxPoints || newObjPoints->cols*cn != 3) &&
1487
(newObjPoints->rows != 1 || newObjPoints->cols != maxPoints || cn != 3)) )
1488
CV_Error( CV_StsBadArg, "the output array of refined object points must be 3-channel "
1489
"1xn or nx1 array or 1-channel nx3 array, where n is the number of object points per view" );
1490
}
1491
1492
if( stdDevs && releaseObject )
1493
{
1494
cn = CV_MAT_CN(stdDevs->type);
1495
if( !CV_IS_MAT(stdDevs) ||
1496
(CV_MAT_DEPTH(stdDevs->type) != CV_32F && CV_MAT_DEPTH(stdDevs->type) != CV_64F) ||
1497
((stdDevs->rows != (nimages*6 + NINTRINSIC + maxPoints*3) || stdDevs->cols*cn != 1) &&
1498
(stdDevs->rows != 1 || stdDevs->cols != (nimages*6 + NINTRINSIC + maxPoints*3) || cn != 1)) )
1499
CV_Error( CV_StsBadArg, "the output array of standard deviations vectors must be 1-channel "
1500
"1x(n*6 + NINTRINSIC + m*3) or (n*6 + NINTRINSIC + m*3)x1 array, where n is the number of views,"
1501
" NINTRINSIC = " STR_(CV_CALIB_NINTRINSIC) ", m is the number of object points per view");
1502
}
1503
1504
Mat matM( 1, total, CV_64FC3 );
1505
Mat _m( 1, total, CV_64FC2 );
1506
Mat allErrors(1, total, CV_64FC2);
1507
1508
if(CV_MAT_CN(objectPoints->type) == 3) {
1509
cvarrToMat(objectPoints).convertTo(matM, CV_64F);
1510
} else {
1511
convertPointsHomogeneous(cvarrToMat(objectPoints), matM);
1512
}
1513
1514
if(CV_MAT_CN(imagePoints->type) == 2) {
1515
cvarrToMat(imagePoints).convertTo(_m, CV_64F);
1516
} else {
1517
convertPointsHomogeneous(cvarrToMat(imagePoints), _m);
1518
}
1519
1520
nparams = NINTRINSIC + nimages*6;
1521
if( releaseObject )
1522
nparams += maxPoints * 3;
1523
Mat _Ji( maxPoints*2, NINTRINSIC, CV_64FC1, Scalar(0));
1524
Mat _Je( maxPoints*2, 6, CV_64FC1 );
1525
Mat _Jo( maxPoints*2, maxPoints*3, CV_64FC1, Scalar(0) );
1526
Mat _err( maxPoints*2, 1, CV_64FC1 );
1527
1528
_k = cvMat( distCoeffs->rows, distCoeffs->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(distCoeffs->type)), k);
1529
if( distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) < 8 )
1530
{
1531
if( distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) < 5 )
1532
flags |= CALIB_FIX_K3;
1533
flags |= CALIB_FIX_K4 | CALIB_FIX_K5 | CALIB_FIX_K6;
1534
}
1535
const double minValidAspectRatio = 0.01;
1536
const double maxValidAspectRatio = 100.0;
1537
1538
// 1. initialize intrinsic parameters & LM solver
1539
if( flags & CALIB_USE_INTRINSIC_GUESS )
1540
{
1541
cvConvert( cameraMatrix, &matA );
1542
if( A(0, 0) <= 0 || A(1, 1) <= 0 )
1543
CV_Error( CV_StsOutOfRange, "Focal length (fx and fy) must be positive" );
1544
if( A(0, 2) < 0 || A(0, 2) >= imageSize.width ||
1545
A(1, 2) < 0 || A(1, 2) >= imageSize.height )
1546
CV_Error( CV_StsOutOfRange, "Principal point must be within the image" );
1547
if( fabs(A(0, 1)) > 1e-5 )
1548
CV_Error( CV_StsOutOfRange, "Non-zero skew is not supported by the function" );
1549
if( fabs(A(1, 0)) > 1e-5 || fabs(A(2, 0)) > 1e-5 ||
1550
fabs(A(2, 1)) > 1e-5 || fabs(A(2,2)-1) > 1e-5 )
1551
CV_Error( CV_StsOutOfRange,
1552
"The intrinsic matrix must have [fx 0 cx; 0 fy cy; 0 0 1] shape" );
1553
A(0, 1) = A(1, 0) = A(2, 0) = A(2, 1) = 0.;
1554
A(2, 2) = 1.;
1555
1556
if( flags & CALIB_FIX_ASPECT_RATIO )
1557
{
1558
aspectRatio = A(0, 0)/A(1, 1);
1559
1560
if( aspectRatio < minValidAspectRatio || aspectRatio > maxValidAspectRatio )
1561
CV_Error( CV_StsOutOfRange,
1562
"The specified aspect ratio (= cameraMatrix[0][0] / cameraMatrix[1][1]) is incorrect" );
1563
}
1564
cvConvert( distCoeffs, &_k );
1565
}
1566
else
1567
{
1568
Scalar mean, sdv;
1569
meanStdDev(matM, mean, sdv);
1570
if( fabs(mean[2]) > 1e-5 || fabs(sdv[2]) > 1e-5 )
1571
CV_Error( CV_StsBadArg,
1572
"For non-planar calibration rigs the initial intrinsic matrix must be specified" );
1573
for( i = 0; i < total; i++ )
1574
matM.at<Point3d>(i).z = 0.;
1575
1576
if( flags & CALIB_FIX_ASPECT_RATIO )
1577
{
1578
aspectRatio = cvmGet(cameraMatrix,0,0);
1579
aspectRatio /= cvmGet(cameraMatrix,1,1);
1580
if( aspectRatio < minValidAspectRatio || aspectRatio > maxValidAspectRatio )
1581
CV_Error( CV_StsOutOfRange,
1582
"The specified aspect ratio (= cameraMatrix[0][0] / cameraMatrix[1][1]) is incorrect" );
1583
}
1584
CvMat _matM = cvMat(matM), m = cvMat(_m);
1585
cvInitIntrinsicParams2D( &_matM, &m, npoints, imageSize, &matA, aspectRatio );
1586
}
1587
1588
CvLevMarq solver( nparams, 0, termCrit );
1589
1590
if(flags & CALIB_USE_LU) {
1591
solver.solveMethod = DECOMP_LU;
1592
}
1593
else if(flags & CALIB_USE_QR) {
1594
solver.solveMethod = DECOMP_QR;
1595
}
1596
1597
{
1598
double* param = solver.param->data.db;
1599
uchar* mask = solver.mask->data.ptr;
1600
1601
param[0] = A(0, 0); param[1] = A(1, 1); param[2] = A(0, 2); param[3] = A(1, 2);
1602
std::copy(k, k + 14, param + 4);
1603
1604
if(flags & CALIB_FIX_ASPECT_RATIO)
1605
mask[0] = 0;
1606
if( flags & CALIB_FIX_FOCAL_LENGTH )
1607
mask[0] = mask[1] = 0;
1608
if( flags & CALIB_FIX_PRINCIPAL_POINT )
1609
mask[2] = mask[3] = 0;
1610
if( flags & CALIB_ZERO_TANGENT_DIST )
1611
{
1612
param[6] = param[7] = 0;
1613
mask[6] = mask[7] = 0;
1614
}
1615
if( !(flags & CALIB_RATIONAL_MODEL) )
1616
flags |= CALIB_FIX_K4 + CALIB_FIX_K5 + CALIB_FIX_K6;
1617
if( !(flags & CALIB_THIN_PRISM_MODEL))
1618
flags |= CALIB_FIX_S1_S2_S3_S4;
1619
if( !(flags & CALIB_TILTED_MODEL))
1620
flags |= CALIB_FIX_TAUX_TAUY;
1621
1622
mask[ 4] = !(flags & CALIB_FIX_K1);
1623
mask[ 5] = !(flags & CALIB_FIX_K2);
1624
if( flags & CALIB_FIX_TANGENT_DIST )
1625
{
1626
mask[6] = mask[7] = 0;
1627
}
1628
mask[ 8] = !(flags & CALIB_FIX_K3);
1629
mask[ 9] = !(flags & CALIB_FIX_K4);
1630
mask[10] = !(flags & CALIB_FIX_K5);
1631
mask[11] = !(flags & CALIB_FIX_K6);
1632
1633
if(flags & CALIB_FIX_S1_S2_S3_S4)
1634
{
1635
mask[12] = 0;
1636
mask[13] = 0;
1637
mask[14] = 0;
1638
mask[15] = 0;
1639
}
1640
if(flags & CALIB_FIX_TAUX_TAUY)
1641
{
1642
mask[16] = 0;
1643
mask[17] = 0;
1644
}
1645
1646
if(releaseObject)
1647
{
1648
// copy object points
1649
std::copy( matM.ptr<double>(), matM.ptr<double>( 0, maxPoints - 1 ) + 3,
1650
param + NINTRINSIC + nimages * 6 );
1651
// fix points
1652
mask[NINTRINSIC + nimages * 6] = 0;
1653
mask[NINTRINSIC + nimages * 6 + 1] = 0;
1654
mask[NINTRINSIC + nimages * 6 + 2] = 0;
1655
mask[NINTRINSIC + nimages * 6 + iFixedPoint * 3] = 0;
1656
mask[NINTRINSIC + nimages * 6 + iFixedPoint * 3 + 1] = 0;
1657
mask[NINTRINSIC + nimages * 6 + iFixedPoint * 3 + 2] = 0;
1658
mask[nparams - 1] = 0;
1659
}
1660
}
1661
1662
// 2. initialize extrinsic parameters
1663
for( i = 0, pos = 0; i < nimages; i++, pos += ni )
1664
{
1665
CvMat _ri, _ti;
1666
ni = npoints->data.i[i*npstep];
1667
1668
cvGetRows( solver.param, &_ri, NINTRINSIC + i*6, NINTRINSIC + i*6 + 3 );
1669
cvGetRows( solver.param, &_ti, NINTRINSIC + i*6 + 3, NINTRINSIC + i*6 + 6 );
1670
1671
CvMat _Mi = cvMat(matM.colRange(pos, pos + ni));
1672
CvMat _mi = cvMat(_m.colRange(pos, pos + ni));
1673
1674
cvFindExtrinsicCameraParams2( &_Mi, &_mi, &matA, &_k, &_ri, &_ti );
1675
}
1676
1677
// 3. run the optimization
1678
for(;;)
1679
{
1680
const CvMat* _param = 0;
1681
CvMat *_JtJ = 0, *_JtErr = 0;
1682
double* _errNorm = 0;
1683
bool proceed = solver.updateAlt( _param, _JtJ, _JtErr, _errNorm );
1684
double *param = solver.param->data.db, *pparam = solver.prevParam->data.db;
1685
bool calcJ = solver.state == CvLevMarq::CALC_J || (!proceed && stdDevs);
1686
1687
if( flags & CALIB_FIX_ASPECT_RATIO )
1688
{
1689
param[0] = param[1]*aspectRatio;
1690
pparam[0] = pparam[1]*aspectRatio;
1691
}
1692
1693
A(0, 0) = param[0]; A(1, 1) = param[1]; A(0, 2) = param[2]; A(1, 2) = param[3];
1694
std::copy(param + 4, param + 4 + 14, k);
1695
1696
if ( !proceed && !stdDevs && !perViewErrors )
1697
break;
1698
else if ( !proceed && stdDevs )
1699
cvZero(_JtJ);
1700
1701
reprojErr = 0;
1702
1703
for( i = 0, pos = 0; i < nimages; i++, pos += ni )
1704
{
1705
CvMat _ri, _ti;
1706
ni = npoints->data.i[i*npstep];
1707
1708
cvGetRows( solver.param, &_ri, NINTRINSIC + i*6, NINTRINSIC + i*6 + 3 );
1709
cvGetRows( solver.param, &_ti, NINTRINSIC + i*6 + 3, NINTRINSIC + i*6 + 6 );
1710
1711
CvMat _Mi = cvMat(matM.colRange(pos, pos + ni));
1712
if( releaseObject )
1713
{
1714
cvGetRows( solver.param, &_Mi, NINTRINSIC + nimages * 6,
1715
NINTRINSIC + nimages * 6 + ni * 3 );
1716
cvReshape( &_Mi, &_Mi, 3, 1 );
1717
}
1718
CvMat _mi = cvMat(_m.colRange(pos, pos + ni));
1719
CvMat _me = cvMat(allErrors.colRange(pos, pos + ni));
1720
1721
_Je.resize(ni*2); _Ji.resize(ni*2); _err.resize(ni*2);
1722
_Jo.resize(ni*2);
1723
CvMat _dpdr = cvMat(_Je.colRange(0, 3));
1724
CvMat _dpdt = cvMat(_Je.colRange(3, 6));
1725
CvMat _dpdf = cvMat(_Ji.colRange(0, 2));
1726
CvMat _dpdc = cvMat(_Ji.colRange(2, 4));
1727
CvMat _dpdk = cvMat(_Ji.colRange(4, NINTRINSIC));
1728
CvMat _dpdo = cvMat(_Jo.colRange(0, ni * 3));
1729
CvMat _mp = cvMat(_err.reshape(2, 1));
1730
1731
if( calcJ )
1732
{
1733
cvProjectPoints2Internal( &_Mi, &_ri, &_ti, &matA, &_k, &_mp, &_dpdr, &_dpdt,
1734
(flags & CALIB_FIX_FOCAL_LENGTH) ? 0 : &_dpdf,
1735
(flags & CALIB_FIX_PRINCIPAL_POINT) ? 0 : &_dpdc, &_dpdk,
1736
&_dpdo,
1737
(flags & CALIB_FIX_ASPECT_RATIO) ? aspectRatio : 0);
1738
}
1739
else
1740
cvProjectPoints2( &_Mi, &_ri, &_ti, &matA, &_k, &_mp );
1741
1742
cvSub( &_mp, &_mi, &_mp );
1743
if (perViewErrors || stdDevs)
1744
cvCopy(&_mp, &_me);
1745
1746
if( calcJ )
1747
{
1748
Mat JtJ(cvarrToMat(_JtJ)), JtErr(cvarrToMat(_JtErr));
1749
1750
// see HZ: (A6.14) for details on the structure of the Jacobian
1751
JtJ(Rect(0, 0, NINTRINSIC, NINTRINSIC)) += _Ji.t() * _Ji;
1752
JtJ(Rect(NINTRINSIC + i * 6, NINTRINSIC + i * 6, 6, 6)) = _Je.t() * _Je;
1753
JtJ(Rect(NINTRINSIC + i * 6, 0, 6, NINTRINSIC)) = _Ji.t() * _Je;
1754
if( releaseObject )
1755
{
1756
JtJ(Rect(NINTRINSIC + nimages * 6, 0, maxPoints * 3, NINTRINSIC)) += _Ji.t() * _Jo;
1757
JtJ(Rect(NINTRINSIC + nimages * 6, NINTRINSIC + i * 6, maxPoints * 3, 6))
1758
+= _Je.t() * _Jo;
1759
JtJ(Rect(NINTRINSIC + nimages * 6, NINTRINSIC + nimages * 6, maxPoints * 3, maxPoints * 3))
1760
+= _Jo.t() * _Jo;
1761
}
1762
1763
JtErr.rowRange(0, NINTRINSIC) += _Ji.t() * _err;
1764
JtErr.rowRange(NINTRINSIC + i * 6, NINTRINSIC + (i + 1) * 6) = _Je.t() * _err;
1765
if( releaseObject )
1766
{
1767
JtErr.rowRange(NINTRINSIC + nimages * 6, nparams) += _Jo.t() * _err;
1768
}
1769
}
1770
1771
double viewErr = norm(_err, NORM_L2SQR);
1772
1773
if( perViewErrors )
1774
perViewErrors->data.db[i] = std::sqrt(viewErr / ni);
1775
1776
reprojErr += viewErr;
1777
}
1778
if( _errNorm )
1779
*_errNorm = reprojErr;
1780
1781
if( !proceed )
1782
{
1783
if( stdDevs )
1784
{
1785
Mat mask = cvarrToMat(solver.mask);
1786
int nparams_nz = countNonZero(mask);
1787
Mat JtJinv, JtJN;
1788
JtJN.create(nparams_nz, nparams_nz, CV_64F);
1789
subMatrix(cvarrToMat(_JtJ), JtJN, mask, mask);
1790
completeSymm(JtJN, false);
1791
cv::invert(JtJN, JtJinv, DECOMP_SVD);
1792
//sigma2 is deviation of the noise
1793
//see any papers about variance of the least squares estimator for
1794
//detailed description of the variance estimation methods
1795
double sigma2 = norm(allErrors, NORM_L2SQR) / (total - nparams_nz);
1796
Mat stdDevsM = cvarrToMat(stdDevs);
1797
int j = 0;
1798
for ( int s = 0; s < nparams; s++ )
1799
if( mask.data[s] )
1800
{
1801
stdDevsM.at<double>(s) = std::sqrt(JtJinv.at<double>(j,j) * sigma2);
1802
j++;
1803
}
1804
else
1805
stdDevsM.at<double>(s) = 0.;
1806
}
1807
break;
1808
}
1809
}
1810
1811
// 4. store the results
1812
cvConvert( &matA, cameraMatrix );
1813
cvConvert( &_k, distCoeffs );
1814
if( newObjPoints && releaseObject )
1815
{
1816
CvMat _Mi;
1817
cvGetRows( solver.param, &_Mi, NINTRINSIC + nimages * 6,
1818
NINTRINSIC + nimages * 6 + maxPoints * 3 );
1819
cvReshape( &_Mi, &_Mi, 3, 1 );
1820
cvConvert( &_Mi, newObjPoints );
1821
}
1822
1823
for( i = 0, pos = 0; i < nimages; i++ )
1824
{
1825
CvMat src, dst;
1826
1827
if( rvecs )
1828
{
1829
src = cvMat( 3, 1, CV_64F, solver.param->data.db + NINTRINSIC + i*6 );
1830
if( rvecs->rows == nimages && rvecs->cols*CV_MAT_CN(rvecs->type) == 9 )
1831
{
1832
dst = cvMat( 3, 3, CV_MAT_DEPTH(rvecs->type),
1833
rvecs->data.ptr + rvecs->step*i );
1834
cvRodrigues2( &src, &matA );
1835
cvConvert( &matA, &dst );
1836
}
1837
else
1838
{
1839
dst = cvMat( 3, 1, CV_MAT_DEPTH(rvecs->type), rvecs->rows == 1 ?
1840
rvecs->data.ptr + i*CV_ELEM_SIZE(rvecs->type) :
1841
rvecs->data.ptr + rvecs->step*i );
1842
cvConvert( &src, &dst );
1843
}
1844
}
1845
if( tvecs )
1846
{
1847
src = cvMat( 3, 1, CV_64F, solver.param->data.db + NINTRINSIC + i*6 + 3 );
1848
dst = cvMat( 3, 1, CV_MAT_DEPTH(tvecs->type), tvecs->rows == 1 ?
1849
tvecs->data.ptr + i*CV_ELEM_SIZE(tvecs->type) :
1850
tvecs->data.ptr + tvecs->step*i );
1851
cvConvert( &src, &dst );
1852
}
1853
}
1854
1855
return std::sqrt(reprojErr/total);
1856
}
1857
1858
1859
/* finds intrinsic and extrinsic camera parameters
1860
from a few views of known calibration pattern */
1861
CV_IMPL double cvCalibrateCamera2( const CvMat* objectPoints,
1862
const CvMat* imagePoints, const CvMat* npoints,
1863
CvSize imageSize, CvMat* cameraMatrix, CvMat* distCoeffs,
1864
CvMat* rvecs, CvMat* tvecs, int flags, CvTermCriteria termCrit )
1865
{
1866
return cvCalibrateCamera2Internal(objectPoints, imagePoints, npoints, imageSize, -1, cameraMatrix,
1867
distCoeffs, rvecs, tvecs, NULL, NULL, NULL, flags, termCrit);
1868
}
1869
1870
CV_IMPL double cvCalibrateCamera4( const CvMat* objectPoints,
1871
const CvMat* imagePoints, const CvMat* npoints,
1872
CvSize imageSize, int iFixedPoint, CvMat* cameraMatrix, CvMat* distCoeffs,
1873
CvMat* rvecs, CvMat* tvecs, CvMat* newObjPoints, int flags, CvTermCriteria termCrit )
1874
{
1875
if( !CV_IS_MAT(npoints) )
1876
CV_Error( CV_StsBadArg, "npoints is not a valid matrix" );
1877
if( CV_MAT_TYPE(npoints->type) != CV_32SC1 ||
1878
(npoints->rows != 1 && npoints->cols != 1) )
1879
CV_Error( CV_StsUnsupportedFormat,
1880
"the array of point counters must be 1-dimensional integer vector" );
1881
1882
bool releaseObject = iFixedPoint > 0 && iFixedPoint < npoints->data.i[0] - 1;
1883
int nimages = npoints->rows * npoints->cols;
1884
int npstep = npoints->rows == 1 ? 1 : npoints->step / CV_ELEM_SIZE(npoints->type);
1885
int i, ni;
1886
// check object points. If not qualified, report errors.
1887
if( releaseObject )
1888
{
1889
if( !CV_IS_MAT(objectPoints) )
1890
CV_Error( CV_StsBadArg, "objectPoints is not a valid matrix" );
1891
Mat matM;
1892
if(CV_MAT_CN(objectPoints->type) == 3) {
1893
matM = cvarrToMat(objectPoints);
1894
} else {
1895
convertPointsHomogeneous(cvarrToMat(objectPoints), matM);
1896
}
1897
1898
matM = matM.reshape(3, 1);
1899
ni = npoints->data.i[0];
1900
for( i = 1; i < nimages; i++ )
1901
{
1902
if( npoints->data.i[i * npstep] != ni )
1903
{
1904
CV_Error( CV_StsBadArg, "All objectPoints[i].size() should be equal when "
1905
"object-releasing method is requested." );
1906
}
1907
Mat ocmp = matM.colRange(ni * i, ni * i + ni) != matM.colRange(0, ni);
1908
ocmp = ocmp.reshape(1);
1909
if( countNonZero(ocmp) )
1910
{
1911
CV_Error( CV_StsBadArg, "All objectPoints[i] should be identical when object-releasing"
1912
" method is requested." );
1913
}
1914
}
1915
}
1916
1917
return cvCalibrateCamera2Internal(objectPoints, imagePoints, npoints, imageSize, iFixedPoint,
1918
cameraMatrix, distCoeffs, rvecs, tvecs, newObjPoints, NULL,
1919
NULL, flags, termCrit);
1920
}
1921
1922
void cvCalibrationMatrixValues( const CvMat *calibMatr, CvSize imgSize,
1923
double apertureWidth, double apertureHeight, double *fovx, double *fovy,
1924
double *focalLength, CvPoint2D64f *principalPoint, double *pasp )
1925
{
1926
/* Validate parameters. */
1927
if(calibMatr == 0)
1928
CV_Error(CV_StsNullPtr, "Some of parameters is a NULL pointer!");
1929
1930
if(!CV_IS_MAT(calibMatr))
1931
CV_Error(CV_StsUnsupportedFormat, "Input parameters must be a matrices!");
1932
1933
double dummy = .0;
1934
Point2d pp;
1935
cv::calibrationMatrixValues(cvarrToMat(calibMatr), imgSize, apertureWidth, apertureHeight,
1936
fovx ? *fovx : dummy,
1937
fovy ? *fovy : dummy,
1938
focalLength ? *focalLength : dummy,
1939
pp,
1940
pasp ? *pasp : dummy);
1941
1942
if(principalPoint)
1943
*principalPoint = cvPoint2D64f(pp.x, pp.y);
1944
}
1945
1946
1947
//////////////////////////////// Stereo Calibration ///////////////////////////////////
1948
1949
static int dbCmp( const void* _a, const void* _b )
1950
{
1951
double a = *(const double*)_a;
1952
double b = *(const double*)_b;
1953
1954
return (a > b) - (a < b);
1955
}
1956
1957
static double cvStereoCalibrateImpl( const CvMat* _objectPoints, const CvMat* _imagePoints1,
1958
const CvMat* _imagePoints2, const CvMat* _npoints,
1959
CvMat* _cameraMatrix1, CvMat* _distCoeffs1,
1960
CvMat* _cameraMatrix2, CvMat* _distCoeffs2,
1961
CvSize imageSize, CvMat* matR, CvMat* matT,
1962
CvMat* matE, CvMat* matF,
1963
CvMat* perViewErr, int flags,
1964
CvTermCriteria termCrit )
1965
{
1966
const int NINTRINSIC = 18;
1967
Ptr<CvMat> npoints, imagePoints[2], objectPoints, RT0;
1968
double reprojErr = 0;
1969
1970
double A[2][9], dk[2][14]={{0}}, rlr[9];
1971
CvMat K[2], Dist[2], om_LR, T_LR;
1972
CvMat R_LR = cvMat(3, 3, CV_64F, rlr);
1973
int i, k, p, ni = 0, ofs, nimages, pointsTotal, maxPoints = 0;
1974
int nparams;
1975
bool recomputeIntrinsics = false;
1976
double aspectRatio[2] = {0};
1977
1978
CV_Assert( CV_IS_MAT(_imagePoints1) && CV_IS_MAT(_imagePoints2) &&
1979
CV_IS_MAT(_objectPoints) && CV_IS_MAT(_npoints) &&
1980
CV_IS_MAT(matR) && CV_IS_MAT(matT) );
1981
1982
CV_Assert( CV_ARE_TYPES_EQ(_imagePoints1, _imagePoints2) &&
1983
CV_ARE_DEPTHS_EQ(_imagePoints1, _objectPoints) );
1984
1985
CV_Assert( (_npoints->cols == 1 || _npoints->rows == 1) &&
1986
CV_MAT_TYPE(_npoints->type) == CV_32SC1 );
1987
1988
nimages = _npoints->cols + _npoints->rows - 1;
1989
npoints.reset(cvCreateMat( _npoints->rows, _npoints->cols, _npoints->type ));
1990
cvCopy( _npoints, npoints );
1991
1992
for( i = 0, pointsTotal = 0; i < nimages; i++ )
1993
{
1994
maxPoints = MAX(maxPoints, npoints->data.i[i]);
1995
pointsTotal += npoints->data.i[i];
1996
}
1997
1998
objectPoints.reset(cvCreateMat( _objectPoints->rows, _objectPoints->cols,
1999
CV_64FC(CV_MAT_CN(_objectPoints->type))));
2000
cvConvert( _objectPoints, objectPoints );
2001
cvReshape( objectPoints, objectPoints, 3, 1 );
2002
2003
for( k = 0; k < 2; k++ )
2004
{
2005
const CvMat* points = k == 0 ? _imagePoints1 : _imagePoints2;
2006
const CvMat* cameraMatrix = k == 0 ? _cameraMatrix1 : _cameraMatrix2;
2007
const CvMat* distCoeffs = k == 0 ? _distCoeffs1 : _distCoeffs2;
2008
2009
int cn = CV_MAT_CN(_imagePoints1->type);
2010
CV_Assert( (CV_MAT_DEPTH(_imagePoints1->type) == CV_32F ||
2011
CV_MAT_DEPTH(_imagePoints1->type) == CV_64F) &&
2012
((_imagePoints1->rows == pointsTotal && _imagePoints1->cols*cn == 2) ||
2013
(_imagePoints1->rows == 1 && _imagePoints1->cols == pointsTotal && cn == 2)) );
2014
2015
K[k] = cvMat(3,3,CV_64F,A[k]);
2016
Dist[k] = cvMat(1,14,CV_64F,dk[k]);
2017
2018
imagePoints[k].reset(cvCreateMat( points->rows, points->cols, CV_64FC(CV_MAT_CN(points->type))));
2019
cvConvert( points, imagePoints[k] );
2020
cvReshape( imagePoints[k], imagePoints[k], 2, 1 );
2021
2022
if( flags & (CALIB_FIX_INTRINSIC|CALIB_USE_INTRINSIC_GUESS|
2023
CALIB_FIX_ASPECT_RATIO|CALIB_FIX_FOCAL_LENGTH) )
2024
cvConvert( cameraMatrix, &K[k] );
2025
2026
if( flags & (CALIB_FIX_INTRINSIC|CALIB_USE_INTRINSIC_GUESS|
2027
CALIB_FIX_K1|CALIB_FIX_K2|CALIB_FIX_K3|CALIB_FIX_K4|CALIB_FIX_K5|CALIB_FIX_K6|CALIB_FIX_TANGENT_DIST) )
2028
{
2029
CvMat tdist = cvMat( distCoeffs->rows, distCoeffs->cols,
2030
CV_MAKETYPE(CV_64F,CV_MAT_CN(distCoeffs->type)), Dist[k].data.db );
2031
cvConvert( distCoeffs, &tdist );
2032
}
2033
2034
if( !(flags & (CALIB_FIX_INTRINSIC|CALIB_USE_INTRINSIC_GUESS)))
2035
{
2036
cvCalibrateCamera2( objectPoints, imagePoints[k],
2037
npoints, imageSize, &K[k], &Dist[k], NULL, NULL, flags );
2038
}
2039
}
2040
2041
if( flags & CALIB_SAME_FOCAL_LENGTH )
2042
{
2043
static const int avg_idx[] = { 0, 4, 2, 5, -1 };
2044
for( k = 0; avg_idx[k] >= 0; k++ )
2045
A[0][avg_idx[k]] = A[1][avg_idx[k]] = (A[0][avg_idx[k]] + A[1][avg_idx[k]])*0.5;
2046
}
2047
2048
if( flags & CALIB_FIX_ASPECT_RATIO )
2049
{
2050
for( k = 0; k < 2; k++ )
2051
aspectRatio[k] = A[k][0]/A[k][4];
2052
}
2053
2054
recomputeIntrinsics = (flags & CALIB_FIX_INTRINSIC) == 0;
2055
2056
Mat err( maxPoints*2, 1, CV_64F );
2057
Mat Je( maxPoints*2, 6, CV_64F );
2058
Mat J_LR( maxPoints*2, 6, CV_64F );
2059
Mat Ji( maxPoints*2, NINTRINSIC, CV_64F, Scalar(0) );
2060
2061
// we optimize for the inter-camera R(3),t(3), then, optionally,
2062
// for intrinisic parameters of each camera ((fx,fy,cx,cy,k1,k2,p1,p2) ~ 8 parameters).
2063
nparams = 6*(nimages+1) + (recomputeIntrinsics ? NINTRINSIC*2 : 0);
2064
2065
CvLevMarq solver( nparams, 0, termCrit );
2066
2067
if(flags & CALIB_USE_LU) {
2068
solver.solveMethod = DECOMP_LU;
2069
}
2070
2071
if( recomputeIntrinsics )
2072
{
2073
uchar* imask = solver.mask->data.ptr + nparams - NINTRINSIC*2;
2074
if( !(flags & CALIB_RATIONAL_MODEL) )
2075
flags |= CALIB_FIX_K4 | CALIB_FIX_K5 | CALIB_FIX_K6;
2076
if( !(flags & CALIB_THIN_PRISM_MODEL) )
2077
flags |= CALIB_FIX_S1_S2_S3_S4;
2078
if( !(flags & CALIB_TILTED_MODEL) )
2079
flags |= CALIB_FIX_TAUX_TAUY;
2080
if( flags & CALIB_FIX_ASPECT_RATIO )
2081
imask[0] = imask[NINTRINSIC] = 0;
2082
if( flags & CALIB_FIX_FOCAL_LENGTH )
2083
imask[0] = imask[1] = imask[NINTRINSIC] = imask[NINTRINSIC+1] = 0;
2084
if( flags & CALIB_FIX_PRINCIPAL_POINT )
2085
imask[2] = imask[3] = imask[NINTRINSIC+2] = imask[NINTRINSIC+3] = 0;
2086
if( flags & (CALIB_ZERO_TANGENT_DIST|CALIB_FIX_TANGENT_DIST) )
2087
imask[6] = imask[7] = imask[NINTRINSIC+6] = imask[NINTRINSIC+7] = 0;
2088
if( flags & CALIB_FIX_K1 )
2089
imask[4] = imask[NINTRINSIC+4] = 0;
2090
if( flags & CALIB_FIX_K2 )
2091
imask[5] = imask[NINTRINSIC+5] = 0;
2092
if( flags & CALIB_FIX_K3 )
2093
imask[8] = imask[NINTRINSIC+8] = 0;
2094
if( flags & CALIB_FIX_K4 )
2095
imask[9] = imask[NINTRINSIC+9] = 0;
2096
if( flags & CALIB_FIX_K5 )
2097
imask[10] = imask[NINTRINSIC+10] = 0;
2098
if( flags & CALIB_FIX_K6 )
2099
imask[11] = imask[NINTRINSIC+11] = 0;
2100
if( flags & CALIB_FIX_S1_S2_S3_S4 )
2101
{
2102
imask[12] = imask[NINTRINSIC+12] = 0;
2103
imask[13] = imask[NINTRINSIC+13] = 0;
2104
imask[14] = imask[NINTRINSIC+14] = 0;
2105
imask[15] = imask[NINTRINSIC+15] = 0;
2106
}
2107
if( flags & CALIB_FIX_TAUX_TAUY )
2108
{
2109
imask[16] = imask[NINTRINSIC+16] = 0;
2110
imask[17] = imask[NINTRINSIC+17] = 0;
2111
}
2112
}
2113
2114
// storage for initial [om(R){i}|t{i}] (in order to compute the median for each component)
2115
RT0.reset(cvCreateMat( 6, nimages, CV_64F ));
2116
/*
2117
Compute initial estimate of pose
2118
For each image, compute:
2119
R(om) is the rotation matrix of om
2120
om(R) is the rotation vector of R
2121
R_ref = R(om_right) * R(om_left)'
2122
T_ref_list = [T_ref_list; T_right - R_ref * T_left]
2123
om_ref_list = {om_ref_list; om(R_ref)]
2124
om = median(om_ref_list)
2125
T = median(T_ref_list)
2126
*/
2127
for( i = ofs = 0; i < nimages; ofs += ni, i++ )
2128
{
2129
ni = npoints->data.i[i];
2130
CvMat objpt_i;
2131
double _om[2][3], r[2][9], t[2][3];
2132
CvMat om[2], R[2], T[2], imgpt_i[2];
2133
2134
objpt_i = cvMat(1, ni, CV_64FC3, objectPoints->data.db + ofs*3);
2135
for( k = 0; k < 2; k++ )
2136
{
2137
imgpt_i[k] = cvMat(1, ni, CV_64FC2, imagePoints[k]->data.db + ofs*2);
2138
om[k] = cvMat(3, 1, CV_64F, _om[k]);
2139
R[k] = cvMat(3, 3, CV_64F, r[k]);
2140
T[k] = cvMat(3, 1, CV_64F, t[k]);
2141
2142
cvFindExtrinsicCameraParams2( &objpt_i, &imgpt_i[k], &K[k], &Dist[k], &om[k], &T[k] );
2143
cvRodrigues2( &om[k], &R[k] );
2144
if( k == 0 )
2145
{
2146
// save initial om_left and T_left
2147
solver.param->data.db[(i+1)*6] = _om[0][0];
2148
solver.param->data.db[(i+1)*6 + 1] = _om[0][1];
2149
solver.param->data.db[(i+1)*6 + 2] = _om[0][2];
2150
solver.param->data.db[(i+1)*6 + 3] = t[0][0];
2151
solver.param->data.db[(i+1)*6 + 4] = t[0][1];
2152
solver.param->data.db[(i+1)*6 + 5] = t[0][2];
2153
}
2154
}
2155
cvGEMM( &R[1], &R[0], 1, 0, 0, &R[0], CV_GEMM_B_T );
2156
cvGEMM( &R[0], &T[0], -1, &T[1], 1, &T[1] );
2157
cvRodrigues2( &R[0], &T[0] );
2158
RT0->data.db[i] = t[0][0];
2159
RT0->data.db[i + nimages] = t[0][1];
2160
RT0->data.db[i + nimages*2] = t[0][2];
2161
RT0->data.db[i + nimages*3] = t[1][0];
2162
RT0->data.db[i + nimages*4] = t[1][1];
2163
RT0->data.db[i + nimages*5] = t[1][2];
2164
}
2165
2166
if(flags & CALIB_USE_EXTRINSIC_GUESS)
2167
{
2168
Vec3d R, T;
2169
cvarrToMat(matT).convertTo(T, CV_64F);
2170
2171
if( matR->rows == 3 && matR->cols == 3 )
2172
Rodrigues(cvarrToMat(matR), R);
2173
else
2174
cvarrToMat(matR).convertTo(R, CV_64F);
2175
2176
solver.param->data.db[0] = R[0];
2177
solver.param->data.db[1] = R[1];
2178
solver.param->data.db[2] = R[2];
2179
solver.param->data.db[3] = T[0];
2180
solver.param->data.db[4] = T[1];
2181
solver.param->data.db[5] = T[2];
2182
}
2183
else
2184
{
2185
// find the medians and save the first 6 parameters
2186
for( i = 0; i < 6; i++ )
2187
{
2188
qsort( RT0->data.db + i*nimages, nimages, CV_ELEM_SIZE(RT0->type), dbCmp );
2189
solver.param->data.db[i] = nimages % 2 != 0 ? RT0->data.db[i*nimages + nimages/2] :
2190
(RT0->data.db[i*nimages + nimages/2 - 1] + RT0->data.db[i*nimages + nimages/2])*0.5;
2191
}
2192
}
2193
2194
if( recomputeIntrinsics )
2195
for( k = 0; k < 2; k++ )
2196
{
2197
double* iparam = solver.param->data.db + (nimages+1)*6 + k*NINTRINSIC;
2198
if( flags & CALIB_ZERO_TANGENT_DIST )
2199
dk[k][2] = dk[k][3] = 0;
2200
iparam[0] = A[k][0]; iparam[1] = A[k][4]; iparam[2] = A[k][2]; iparam[3] = A[k][5];
2201
iparam[4] = dk[k][0]; iparam[5] = dk[k][1]; iparam[6] = dk[k][2];
2202
iparam[7] = dk[k][3]; iparam[8] = dk[k][4]; iparam[9] = dk[k][5];
2203
iparam[10] = dk[k][6]; iparam[11] = dk[k][7];
2204
iparam[12] = dk[k][8];
2205
iparam[13] = dk[k][9];
2206
iparam[14] = dk[k][10];
2207
iparam[15] = dk[k][11];
2208
iparam[16] = dk[k][12];
2209
iparam[17] = dk[k][13];
2210
}
2211
2212
om_LR = cvMat(3, 1, CV_64F, solver.param->data.db);
2213
T_LR = cvMat(3, 1, CV_64F, solver.param->data.db + 3);
2214
2215
for(;;)
2216
{
2217
const CvMat* param = 0;
2218
CvMat *JtJ = 0, *JtErr = 0;
2219
double *_errNorm = 0;
2220
double _omR[3], _tR[3];
2221
double _dr3dr1[9], _dr3dr2[9], /*_dt3dr1[9],*/ _dt3dr2[9], _dt3dt1[9], _dt3dt2[9];
2222
CvMat dr3dr1 = cvMat(3, 3, CV_64F, _dr3dr1);
2223
CvMat dr3dr2 = cvMat(3, 3, CV_64F, _dr3dr2);
2224
//CvMat dt3dr1 = cvMat(3, 3, CV_64F, _dt3dr1);
2225
CvMat dt3dr2 = cvMat(3, 3, CV_64F, _dt3dr2);
2226
CvMat dt3dt1 = cvMat(3, 3, CV_64F, _dt3dt1);
2227
CvMat dt3dt2 = cvMat(3, 3, CV_64F, _dt3dt2);
2228
CvMat om[2], T[2], imgpt_i[2];
2229
2230
if( !solver.updateAlt( param, JtJ, JtErr, _errNorm ))
2231
break;
2232
reprojErr = 0;
2233
2234
cvRodrigues2( &om_LR, &R_LR );
2235
om[1] = cvMat(3,1,CV_64F,_omR);
2236
T[1] = cvMat(3,1,CV_64F,_tR);
2237
2238
if( recomputeIntrinsics )
2239
{
2240
double* iparam = solver.param->data.db + (nimages+1)*6;
2241
double* ipparam = solver.prevParam->data.db + (nimages+1)*6;
2242
2243
if( flags & CALIB_SAME_FOCAL_LENGTH )
2244
{
2245
iparam[NINTRINSIC] = iparam[0];
2246
iparam[NINTRINSIC+1] = iparam[1];
2247
ipparam[NINTRINSIC] = ipparam[0];
2248
ipparam[NINTRINSIC+1] = ipparam[1];
2249
}
2250
if( flags & CALIB_FIX_ASPECT_RATIO )
2251
{
2252
iparam[0] = iparam[1]*aspectRatio[0];
2253
iparam[NINTRINSIC] = iparam[NINTRINSIC+1]*aspectRatio[1];
2254
ipparam[0] = ipparam[1]*aspectRatio[0];
2255
ipparam[NINTRINSIC] = ipparam[NINTRINSIC+1]*aspectRatio[1];
2256
}
2257
for( k = 0; k < 2; k++ )
2258
{
2259
A[k][0] = iparam[k*NINTRINSIC+0];
2260
A[k][4] = iparam[k*NINTRINSIC+1];
2261
A[k][2] = iparam[k*NINTRINSIC+2];
2262
A[k][5] = iparam[k*NINTRINSIC+3];
2263
dk[k][0] = iparam[k*NINTRINSIC+4];
2264
dk[k][1] = iparam[k*NINTRINSIC+5];
2265
dk[k][2] = iparam[k*NINTRINSIC+6];
2266
dk[k][3] = iparam[k*NINTRINSIC+7];
2267
dk[k][4] = iparam[k*NINTRINSIC+8];
2268
dk[k][5] = iparam[k*NINTRINSIC+9];
2269
dk[k][6] = iparam[k*NINTRINSIC+10];
2270
dk[k][7] = iparam[k*NINTRINSIC+11];
2271
dk[k][8] = iparam[k*NINTRINSIC+12];
2272
dk[k][9] = iparam[k*NINTRINSIC+13];
2273
dk[k][10] = iparam[k*NINTRINSIC+14];
2274
dk[k][11] = iparam[k*NINTRINSIC+15];
2275
dk[k][12] = iparam[k*NINTRINSIC+16];
2276
dk[k][13] = iparam[k*NINTRINSIC+17];
2277
}
2278
}
2279
2280
for( i = ofs = 0; i < nimages; ofs += ni, i++ )
2281
{
2282
ni = npoints->data.i[i];
2283
CvMat objpt_i;
2284
2285
om[0] = cvMat(3,1,CV_64F,solver.param->data.db+(i+1)*6);
2286
T[0] = cvMat(3,1,CV_64F,solver.param->data.db+(i+1)*6+3);
2287
2288
if( JtJ || JtErr )
2289
cvComposeRT( &om[0], &T[0], &om_LR, &T_LR, &om[1], &T[1], &dr3dr1, 0,
2290
&dr3dr2, 0, 0, &dt3dt1, &dt3dr2, &dt3dt2 );
2291
else
2292
cvComposeRT( &om[0], &T[0], &om_LR, &T_LR, &om[1], &T[1] );
2293
2294
objpt_i = cvMat(1, ni, CV_64FC3, objectPoints->data.db + ofs*3);
2295
err.resize(ni*2); Je.resize(ni*2); J_LR.resize(ni*2); Ji.resize(ni*2);
2296
2297
CvMat tmpimagePoints = cvMat(err.reshape(2, 1));
2298
CvMat dpdf = cvMat(Ji.colRange(0, 2));
2299
CvMat dpdc = cvMat(Ji.colRange(2, 4));
2300
CvMat dpdk = cvMat(Ji.colRange(4, NINTRINSIC));
2301
CvMat dpdrot = cvMat(Je.colRange(0, 3));
2302
CvMat dpdt = cvMat(Je.colRange(3, 6));
2303
2304
for( k = 0; k < 2; k++ )
2305
{
2306
imgpt_i[k] = cvMat(1, ni, CV_64FC2, imagePoints[k]->data.db + ofs*2);
2307
2308
if( JtJ || JtErr )
2309
cvProjectPoints2( &objpt_i, &om[k], &T[k], &K[k], &Dist[k],
2310
&tmpimagePoints, &dpdrot, &dpdt, &dpdf, &dpdc, &dpdk,
2311
(flags & CALIB_FIX_ASPECT_RATIO) ? aspectRatio[k] : 0);
2312
else
2313
cvProjectPoints2( &objpt_i, &om[k], &T[k], &K[k], &Dist[k], &tmpimagePoints );
2314
cvSub( &tmpimagePoints, &imgpt_i[k], &tmpimagePoints );
2315
2316
if( solver.state == CvLevMarq::CALC_J )
2317
{
2318
int iofs = (nimages+1)*6 + k*NINTRINSIC, eofs = (i+1)*6;
2319
assert( JtJ && JtErr );
2320
2321
Mat _JtJ(cvarrToMat(JtJ)), _JtErr(cvarrToMat(JtErr));
2322
2323
if( k == 1 )
2324
{
2325
// d(err_{x|y}R) ~ de3
2326
// convert de3/{dr3,dt3} => de3{dr1,dt1} & de3{dr2,dt2}
2327
for( p = 0; p < ni*2; p++ )
2328
{
2329
CvMat de3dr3 = cvMat( 1, 3, CV_64F, Je.ptr(p));
2330
CvMat de3dt3 = cvMat( 1, 3, CV_64F, de3dr3.data.db + 3 );
2331
CvMat de3dr2 = cvMat( 1, 3, CV_64F, J_LR.ptr(p) );
2332
CvMat de3dt2 = cvMat( 1, 3, CV_64F, de3dr2.data.db + 3 );
2333
double _de3dr1[3], _de3dt1[3];
2334
CvMat de3dr1 = cvMat( 1, 3, CV_64F, _de3dr1 );
2335
CvMat de3dt1 = cvMat( 1, 3, CV_64F, _de3dt1 );
2336
2337
cvMatMul( &de3dr3, &dr3dr1, &de3dr1 );
2338
cvMatMul( &de3dt3, &dt3dt1, &de3dt1 );
2339
2340
cvMatMul( &de3dr3, &dr3dr2, &de3dr2 );
2341
cvMatMulAdd( &de3dt3, &dt3dr2, &de3dr2, &de3dr2 );
2342
2343
cvMatMul( &de3dt3, &dt3dt2, &de3dt2 );
2344
2345
cvCopy( &de3dr1, &de3dr3 );
2346
cvCopy( &de3dt1, &de3dt3 );
2347
}
2348
2349
_JtJ(Rect(0, 0, 6, 6)) += J_LR.t()*J_LR;
2350
_JtJ(Rect(eofs, 0, 6, 6)) = J_LR.t()*Je;
2351
_JtErr.rowRange(0, 6) += J_LR.t()*err;
2352
}
2353
2354
_JtJ(Rect(eofs, eofs, 6, 6)) += Je.t()*Je;
2355
_JtErr.rowRange(eofs, eofs + 6) += Je.t()*err;
2356
2357
if( recomputeIntrinsics )
2358
{
2359
_JtJ(Rect(iofs, iofs, NINTRINSIC, NINTRINSIC)) += Ji.t()*Ji;
2360
_JtJ(Rect(iofs, eofs, NINTRINSIC, 6)) += Je.t()*Ji;
2361
if( k == 1 )
2362
{
2363
_JtJ(Rect(iofs, 0, NINTRINSIC, 6)) += J_LR.t()*Ji;
2364
}
2365
_JtErr.rowRange(iofs, iofs + NINTRINSIC) += Ji.t()*err;
2366
}
2367
}
2368
2369
double viewErr = norm(err, NORM_L2SQR);
2370
2371
if(perViewErr)
2372
perViewErr->data.db[i*2 + k] = std::sqrt(viewErr/ni);
2373
2374
reprojErr += viewErr;
2375
}
2376
}
2377
if(_errNorm)
2378
*_errNorm = reprojErr;
2379
}
2380
2381
cvRodrigues2( &om_LR, &R_LR );
2382
if( matR->rows == 1 || matR->cols == 1 )
2383
cvConvert( &om_LR, matR );
2384
else
2385
cvConvert( &R_LR, matR );
2386
cvConvert( &T_LR, matT );
2387
2388
if( recomputeIntrinsics )
2389
{
2390
cvConvert( &K[0], _cameraMatrix1 );
2391
cvConvert( &K[1], _cameraMatrix2 );
2392
2393
for( k = 0; k < 2; k++ )
2394
{
2395
CvMat* distCoeffs = k == 0 ? _distCoeffs1 : _distCoeffs2;
2396
CvMat tdist = cvMat( distCoeffs->rows, distCoeffs->cols,
2397
CV_MAKETYPE(CV_64F,CV_MAT_CN(distCoeffs->type)), Dist[k].data.db );
2398
cvConvert( &tdist, distCoeffs );
2399
}
2400
}
2401
2402
if( matE || matF )
2403
{
2404
double* t = T_LR.data.db;
2405
double tx[] =
2406
{
2407
0, -t[2], t[1],
2408
t[2], 0, -t[0],
2409
-t[1], t[0], 0
2410
};
2411
CvMat Tx = cvMat(3, 3, CV_64F, tx);
2412
double e[9], f[9];
2413
CvMat E = cvMat(3, 3, CV_64F, e);
2414
CvMat F = cvMat(3, 3, CV_64F, f);
2415
cvMatMul( &Tx, &R_LR, &E );
2416
if( matE )
2417
cvConvert( &E, matE );
2418
if( matF )
2419
{
2420
double ik[9];
2421
CvMat iK = cvMat(3, 3, CV_64F, ik);
2422
cvInvert(&K[1], &iK);
2423
cvGEMM( &iK, &E, 1, 0, 0, &E, CV_GEMM_A_T );
2424
cvInvert(&K[0], &iK);
2425
cvMatMul(&E, &iK, &F);
2426
cvConvertScale( &F, matF, fabs(f[8]) > 0 ? 1./f[8] : 1 );
2427
}
2428
}
2429
2430
return std::sqrt(reprojErr/(pointsTotal*2));
2431
}
2432
double cvStereoCalibrate( const CvMat* _objectPoints, const CvMat* _imagePoints1,
2433
const CvMat* _imagePoints2, const CvMat* _npoints,
2434
CvMat* _cameraMatrix1, CvMat* _distCoeffs1,
2435
CvMat* _cameraMatrix2, CvMat* _distCoeffs2,
2436
CvSize imageSize, CvMat* matR, CvMat* matT,
2437
CvMat* matE, CvMat* matF,
2438
int flags,
2439
CvTermCriteria termCrit )
2440
{
2441
return cvStereoCalibrateImpl(_objectPoints, _imagePoints1, _imagePoints2, _npoints, _cameraMatrix1,
2442
_distCoeffs1, _cameraMatrix2, _distCoeffs2, imageSize, matR, matT, matE,
2443
matF, NULL, flags, termCrit);
2444
}
2445
2446
static void
2447
icvGetRectangles( const CvMat* cameraMatrix, const CvMat* distCoeffs,
2448
const CvMat* R, const CvMat* newCameraMatrix, CvSize imgSize,
2449
cv::Rect_<float>& inner, cv::Rect_<float>& outer )
2450
{
2451
const int N = 9;
2452
int x, y, k;
2453
cv::Ptr<CvMat> _pts(cvCreateMat(1, N*N, CV_32FC2));
2454
CvPoint2D32f* pts = (CvPoint2D32f*)(_pts->data.ptr);
2455
2456
for( y = k = 0; y < N; y++ )
2457
for( x = 0; x < N; x++ )
2458
pts[k++] = cvPoint2D32f((float)x*imgSize.width/(N-1),
2459
(float)y*imgSize.height/(N-1));
2460
2461
cvUndistortPoints(_pts, _pts, cameraMatrix, distCoeffs, R, newCameraMatrix);
2462
2463
float iX0=-FLT_MAX, iX1=FLT_MAX, iY0=-FLT_MAX, iY1=FLT_MAX;
2464
float oX0=FLT_MAX, oX1=-FLT_MAX, oY0=FLT_MAX, oY1=-FLT_MAX;
2465
// find the inscribed rectangle.
2466
// the code will likely not work with extreme rotation matrices (R) (>45%)
2467
for( y = k = 0; y < N; y++ )
2468
for( x = 0; x < N; x++ )
2469
{
2470
CvPoint2D32f p = pts[k++];
2471
oX0 = MIN(oX0, p.x);
2472
oX1 = MAX(oX1, p.x);
2473
oY0 = MIN(oY0, p.y);
2474
oY1 = MAX(oY1, p.y);
2475
2476
if( x == 0 )
2477
iX0 = MAX(iX0, p.x);
2478
if( x == N-1 )
2479
iX1 = MIN(iX1, p.x);
2480
if( y == 0 )
2481
iY0 = MAX(iY0, p.y);
2482
if( y == N-1 )
2483
iY1 = MIN(iY1, p.y);
2484
}
2485
inner = cv::Rect_<float>(iX0, iY0, iX1-iX0, iY1-iY0);
2486
outer = cv::Rect_<float>(oX0, oY0, oX1-oX0, oY1-oY0);
2487
}
2488
2489
2490
void cvStereoRectify( const CvMat* _cameraMatrix1, const CvMat* _cameraMatrix2,
2491
const CvMat* _distCoeffs1, const CvMat* _distCoeffs2,
2492
CvSize imageSize, const CvMat* matR, const CvMat* matT,
2493
CvMat* _R1, CvMat* _R2, CvMat* _P1, CvMat* _P2,
2494
CvMat* matQ, int flags, double alpha, CvSize newImgSize,
2495
CvRect* roi1, CvRect* roi2 )
2496
{
2497
double _om[3], _t[3] = {0}, _uu[3]={0,0,0}, _r_r[3][3], _pp[3][4];
2498
double _ww[3], _wr[3][3], _z[3] = {0,0,0}, _ri[3][3], _w3[3];
2499
cv::Rect_<float> inner1, inner2, outer1, outer2;
2500
2501
CvMat om = cvMat(3, 1, CV_64F, _om);
2502
CvMat t = cvMat(3, 1, CV_64F, _t);
2503
CvMat uu = cvMat(3, 1, CV_64F, _uu);
2504
CvMat r_r = cvMat(3, 3, CV_64F, _r_r);
2505
CvMat pp = cvMat(3, 4, CV_64F, _pp);
2506
CvMat ww = cvMat(3, 1, CV_64F, _ww); // temps
2507
CvMat w3 = cvMat(3, 1, CV_64F, _w3); // temps
2508
CvMat wR = cvMat(3, 3, CV_64F, _wr);
2509
CvMat Z = cvMat(3, 1, CV_64F, _z);
2510
CvMat Ri = cvMat(3, 3, CV_64F, _ri);
2511
double nx = imageSize.width, ny = imageSize.height;
2512
int i, k;
2513
double nt, nw;
2514
2515
if( matR->rows == 3 && matR->cols == 3 )
2516
cvRodrigues2(matR, &om); // get vector rotation
2517
else
2518
cvConvert(matR, &om); // it's already a rotation vector
2519
cvConvertScale(&om, &om, -0.5); // get average rotation
2520
cvRodrigues2(&om, &r_r); // rotate cameras to same orientation by averaging
2521
cvMatMul(&r_r, matT, &t);
2522
2523
int idx = fabs(_t[0]) > fabs(_t[1]) ? 0 : 1;
2524
2525
// if idx == 0
2526
// e1 = T / ||T||
2527
// e2 = e1 x [0,0,1]
2528
2529
// if idx == 1
2530
// e2 = T / ||T||
2531
// e1 = e2 x [0,0,1]
2532
2533
// e3 = e1 x e2
2534
2535
_uu[2] = 1;
2536
cvCrossProduct(&uu, &t, &ww);
2537
nt = cvNorm(&t, 0, CV_L2);
2538
CV_Assert(fabs(nt) > 0);
2539
nw = cvNorm(&ww, 0, CV_L2);
2540
CV_Assert(fabs(nw) > 0);
2541
cvConvertScale(&ww, &ww, 1 / nw);
2542
cvCrossProduct(&t, &ww, &w3);
2543
nw = cvNorm(&w3, 0, CV_L2);
2544
CV_Assert(fabs(nw) > 0);
2545
cvConvertScale(&w3, &w3, 1 / nw);
2546
_uu[2] = 0;
2547
2548
for (i = 0; i < 3; ++i)
2549
{
2550
_wr[idx][i] = -_t[i] / nt;
2551
_wr[idx ^ 1][i] = -_ww[i];
2552
_wr[2][i] = _w3[i] * (1 - 2 * idx); // if idx == 1 -> opposite direction
2553
}
2554
2555
// apply to both views
2556
cvGEMM(&wR, &r_r, 1, 0, 0, &Ri, CV_GEMM_B_T);
2557
cvConvert( &Ri, _R1 );
2558
cvGEMM(&wR, &r_r, 1, 0, 0, &Ri, 0);
2559
cvConvert( &Ri, _R2 );
2560
cvMatMul(&Ri, matT, &t);
2561
2562
// calculate projection/camera matrices
2563
// these contain the relevant rectified image internal params (fx, fy=fx, cx, cy)
2564
double fc_new = DBL_MAX;
2565
CvPoint2D64f cc_new[2] = {};
2566
2567
newImgSize = newImgSize.width * newImgSize.height != 0 ? newImgSize : imageSize;
2568
const double ratio_x = (double)newImgSize.width / imageSize.width / 2;
2569
const double ratio_y = (double)newImgSize.height / imageSize.height / 2;
2570
const double ratio = idx == 1 ? ratio_x : ratio_y;
2571
fc_new = (cvmGet(_cameraMatrix1, idx ^ 1, idx ^ 1) + cvmGet(_cameraMatrix2, idx ^ 1, idx ^ 1)) * ratio;
2572
2573
for( k = 0; k < 2; k++ )
2574
{
2575
const CvMat* A = k == 0 ? _cameraMatrix1 : _cameraMatrix2;
2576
const CvMat* Dk = k == 0 ? _distCoeffs1 : _distCoeffs2;
2577
CvPoint2D32f _pts[4] = {};
2578
CvPoint3D32f _pts_3[4] = {};
2579
CvMat pts = cvMat(1, 4, CV_32FC2, _pts);
2580
CvMat pts_3 = cvMat(1, 4, CV_32FC3, _pts_3);
2581
2582
for( i = 0; i < 4; i++ )
2583
{
2584
int j = (i<2) ? 0 : 1;
2585
_pts[i].x = (float)((i % 2)*(nx));
2586
_pts[i].y = (float)(j*(ny));
2587
}
2588
cvUndistortPoints( &pts, &pts, A, Dk, 0, 0 );
2589
cvConvertPointsHomogeneous( &pts, &pts_3 );
2590
2591
//Change camera matrix to have cc=[0,0] and fc = fc_new
2592
double _a_tmp[3][3];
2593
CvMat A_tmp = cvMat(3, 3, CV_64F, _a_tmp);
2594
_a_tmp[0][0]=fc_new;
2595
_a_tmp[1][1]=fc_new;
2596
_a_tmp[0][2]=0.0;
2597
_a_tmp[1][2]=0.0;
2598
cvProjectPoints2( &pts_3, k == 0 ? _R1 : _R2, &Z, &A_tmp, 0, &pts );
2599
CvScalar avg = cvAvg(&pts);
2600
cc_new[k].x = (nx)/2 - avg.val[0];
2601
cc_new[k].y = (ny)/2 - avg.val[1];
2602
}
2603
2604
// vertical focal length must be the same for both images to keep the epipolar constraint
2605
// (for horizontal epipolar lines -- TBD: check for vertical epipolar lines)
2606
// use fy for fx also, for simplicity
2607
2608
// For simplicity, set the principal points for both cameras to be the average
2609
// of the two principal points (either one of or both x- and y- coordinates)
2610
if( flags & CALIB_ZERO_DISPARITY )
2611
{
2612
cc_new[0].x = cc_new[1].x = (cc_new[0].x + cc_new[1].x)*0.5;
2613
cc_new[0].y = cc_new[1].y = (cc_new[0].y + cc_new[1].y)*0.5;
2614
}
2615
else if( idx == 0 ) // horizontal stereo
2616
cc_new[0].y = cc_new[1].y = (cc_new[0].y + cc_new[1].y)*0.5;
2617
else // vertical stereo
2618
cc_new[0].x = cc_new[1].x = (cc_new[0].x + cc_new[1].x)*0.5;
2619
2620
cvZero( &pp );
2621
_pp[0][0] = _pp[1][1] = fc_new;
2622
_pp[0][2] = cc_new[0].x;
2623
_pp[1][2] = cc_new[0].y;
2624
_pp[2][2] = 1;
2625
cvConvert(&pp, _P1);
2626
2627
_pp[0][2] = cc_new[1].x;
2628
_pp[1][2] = cc_new[1].y;
2629
_pp[idx][3] = _t[idx]*fc_new; // baseline * focal length
2630
cvConvert(&pp, _P2);
2631
2632
alpha = MIN(alpha, 1.);
2633
2634
icvGetRectangles( _cameraMatrix1, _distCoeffs1, _R1, _P1, imageSize, inner1, outer1 );
2635
icvGetRectangles( _cameraMatrix2, _distCoeffs2, _R2, _P2, imageSize, inner2, outer2 );
2636
2637
{
2638
newImgSize = newImgSize.width*newImgSize.height != 0 ? newImgSize : imageSize;
2639
double cx1_0 = cc_new[0].x;
2640
double cy1_0 = cc_new[0].y;
2641
double cx2_0 = cc_new[1].x;
2642
double cy2_0 = cc_new[1].y;
2643
double cx1 = newImgSize.width*cx1_0/imageSize.width;
2644
double cy1 = newImgSize.height*cy1_0/imageSize.height;
2645
double cx2 = newImgSize.width*cx2_0/imageSize.width;
2646
double cy2 = newImgSize.height*cy2_0/imageSize.height;
2647
double s = 1.;
2648
2649
if( alpha >= 0 )
2650
{
2651
double s0 = std::max(std::max(std::max((double)cx1/(cx1_0 - inner1.x), (double)cy1/(cy1_0 - inner1.y)),
2652
(double)(newImgSize.width - cx1)/(inner1.x + inner1.width - cx1_0)),
2653
(double)(newImgSize.height - cy1)/(inner1.y + inner1.height - cy1_0));
2654
s0 = std::max(std::max(std::max(std::max((double)cx2/(cx2_0 - inner2.x), (double)cy2/(cy2_0 - inner2.y)),
2655
(double)(newImgSize.width - cx2)/(inner2.x + inner2.width - cx2_0)),
2656
(double)(newImgSize.height - cy2)/(inner2.y + inner2.height - cy2_0)),
2657
s0);
2658
2659
double s1 = std::min(std::min(std::min((double)cx1/(cx1_0 - outer1.x), (double)cy1/(cy1_0 - outer1.y)),
2660
(double)(newImgSize.width - cx1)/(outer1.x + outer1.width - cx1_0)),
2661
(double)(newImgSize.height - cy1)/(outer1.y + outer1.height - cy1_0));
2662
s1 = std::min(std::min(std::min(std::min((double)cx2/(cx2_0 - outer2.x), (double)cy2/(cy2_0 - outer2.y)),
2663
(double)(newImgSize.width - cx2)/(outer2.x + outer2.width - cx2_0)),
2664
(double)(newImgSize.height - cy2)/(outer2.y + outer2.height - cy2_0)),
2665
s1);
2666
2667
s = s0*(1 - alpha) + s1*alpha;
2668
}
2669
2670
fc_new *= s;
2671
cc_new[0] = cvPoint2D64f(cx1, cy1);
2672
cc_new[1] = cvPoint2D64f(cx2, cy2);
2673
2674
cvmSet(_P1, 0, 0, fc_new);
2675
cvmSet(_P1, 1, 1, fc_new);
2676
cvmSet(_P1, 0, 2, cx1);
2677
cvmSet(_P1, 1, 2, cy1);
2678
2679
cvmSet(_P2, 0, 0, fc_new);
2680
cvmSet(_P2, 1, 1, fc_new);
2681
cvmSet(_P2, 0, 2, cx2);
2682
cvmSet(_P2, 1, 2, cy2);
2683
cvmSet(_P2, idx, 3, s*cvmGet(_P2, idx, 3));
2684
2685
if(roi1)
2686
{
2687
*roi1 = cvRect(
2688
cv::Rect(cvCeil((inner1.x - cx1_0)*s + cx1),
2689
cvCeil((inner1.y - cy1_0)*s + cy1),
2690
cvFloor(inner1.width*s), cvFloor(inner1.height*s))
2691
& cv::Rect(0, 0, newImgSize.width, newImgSize.height)
2692
);
2693
}
2694
2695
if(roi2)
2696
{
2697
*roi2 = cvRect(
2698
cv::Rect(cvCeil((inner2.x - cx2_0)*s + cx2),
2699
cvCeil((inner2.y - cy2_0)*s + cy2),
2700
cvFloor(inner2.width*s), cvFloor(inner2.height*s))
2701
& cv::Rect(0, 0, newImgSize.width, newImgSize.height)
2702
);
2703
}
2704
}
2705
2706
if( matQ )
2707
{
2708
double q[] =
2709
{
2710
1, 0, 0, -cc_new[0].x,
2711
0, 1, 0, -cc_new[0].y,
2712
0, 0, 0, fc_new,
2713
0, 0, -1./_t[idx],
2714
(idx == 0 ? cc_new[0].x - cc_new[1].x : cc_new[0].y - cc_new[1].y)/_t[idx]
2715
};
2716
CvMat Q = cvMat(4, 4, CV_64F, q);
2717
cvConvert( &Q, matQ );
2718
}
2719
}
2720
2721
2722
void cvGetOptimalNewCameraMatrix( const CvMat* cameraMatrix, const CvMat* distCoeffs,
2723
CvSize imgSize, double alpha,
2724
CvMat* newCameraMatrix, CvSize newImgSize,
2725
CvRect* validPixROI, int centerPrincipalPoint )
2726
{
2727
cv::Rect_<float> inner, outer;
2728
newImgSize = newImgSize.width*newImgSize.height != 0 ? newImgSize : imgSize;
2729
2730
double M[3][3];
2731
CvMat matM = cvMat(3, 3, CV_64F, M);
2732
cvConvert(cameraMatrix, &matM);
2733
2734
if( centerPrincipalPoint )
2735
{
2736
double cx0 = M[0][2];
2737
double cy0 = M[1][2];
2738
double cx = (newImgSize.width)*0.5;
2739
double cy = (newImgSize.height)*0.5;
2740
2741
icvGetRectangles( cameraMatrix, distCoeffs, 0, cameraMatrix, imgSize, inner, outer );
2742
double s0 = std::max(std::max(std::max((double)cx/(cx0 - inner.x), (double)cy/(cy0 - inner.y)),
2743
(double)cx/(inner.x + inner.width - cx0)),
2744
(double)cy/(inner.y + inner.height - cy0));
2745
double s1 = std::min(std::min(std::min((double)cx/(cx0 - outer.x), (double)cy/(cy0 - outer.y)),
2746
(double)cx/(outer.x + outer.width - cx0)),
2747
(double)cy/(outer.y + outer.height - cy0));
2748
double s = s0*(1 - alpha) + s1*alpha;
2749
2750
M[0][0] *= s;
2751
M[1][1] *= s;
2752
M[0][2] = cx;
2753
M[1][2] = cy;
2754
2755
if( validPixROI )
2756
{
2757
inner = cv::Rect_<float>((float)((inner.x - cx0)*s + cx),
2758
(float)((inner.y - cy0)*s + cy),
2759
(float)(inner.width*s),
2760
(float)(inner.height*s));
2761
cv::Rect r(cvCeil(inner.x), cvCeil(inner.y), cvFloor(inner.width), cvFloor(inner.height));
2762
r &= cv::Rect(0, 0, newImgSize.width, newImgSize.height);
2763
*validPixROI = cvRect(r);
2764
}
2765
}
2766
else
2767
{
2768
// Get inscribed and circumscribed rectangles in normalized
2769
// (independent of camera matrix) coordinates
2770
icvGetRectangles( cameraMatrix, distCoeffs, 0, 0, imgSize, inner, outer );
2771
2772
// Projection mapping inner rectangle to viewport
2773
double fx0 = (newImgSize.width) / inner.width;
2774
double fy0 = (newImgSize.height) / inner.height;
2775
double cx0 = -fx0 * inner.x;
2776
double cy0 = -fy0 * inner.y;
2777
2778
// Projection mapping outer rectangle to viewport
2779
double fx1 = (newImgSize.width) / outer.width;
2780
double fy1 = (newImgSize.height) / outer.height;
2781
double cx1 = -fx1 * outer.x;
2782
double cy1 = -fy1 * outer.y;
2783
2784
// Interpolate between the two optimal projections
2785
M[0][0] = fx0*(1 - alpha) + fx1*alpha;
2786
M[1][1] = fy0*(1 - alpha) + fy1*alpha;
2787
M[0][2] = cx0*(1 - alpha) + cx1*alpha;
2788
M[1][2] = cy0*(1 - alpha) + cy1*alpha;
2789
2790
if( validPixROI )
2791
{
2792
icvGetRectangles( cameraMatrix, distCoeffs, 0, &matM, imgSize, inner, outer );
2793
cv::Rect r = inner;
2794
r &= cv::Rect(0, 0, newImgSize.width, newImgSize.height);
2795
*validPixROI = cvRect(r);
2796
}
2797
}
2798
2799
cvConvert(&matM, newCameraMatrix);
2800
}
2801
2802
2803
CV_IMPL int cvStereoRectifyUncalibrated(
2804
const CvMat* _points1, const CvMat* _points2,
2805
const CvMat* F0, CvSize imgSize,
2806
CvMat* _H1, CvMat* _H2, double threshold )
2807
{
2808
Ptr<CvMat> _m1, _m2, _lines1, _lines2;
2809
2810
int i, j, npoints;
2811
double cx, cy;
2812
double u[9], v[9], w[9], f[9], h1[9], h2[9], h0[9], e2[3] = {0};
2813
CvMat E2 = cvMat( 3, 1, CV_64F, e2 );
2814
CvMat U = cvMat( 3, 3, CV_64F, u );
2815
CvMat V = cvMat( 3, 3, CV_64F, v );
2816
CvMat W = cvMat( 3, 3, CV_64F, w );
2817
CvMat F = cvMat( 3, 3, CV_64F, f );
2818
CvMat H1 = cvMat( 3, 3, CV_64F, h1 );
2819
CvMat H2 = cvMat( 3, 3, CV_64F, h2 );
2820
CvMat H0 = cvMat( 3, 3, CV_64F, h0 );
2821
2822
CvPoint2D64f* m1;
2823
CvPoint2D64f* m2;
2824
CvPoint3D64f* lines1;
2825
CvPoint3D64f* lines2;
2826
2827
CV_Assert( CV_IS_MAT(_points1) && CV_IS_MAT(_points2) &&
2828
CV_ARE_SIZES_EQ(_points1, _points2) );
2829
2830
npoints = _points1->rows * _points1->cols * CV_MAT_CN(_points1->type) / 2;
2831
2832
_m1.reset(cvCreateMat( _points1->rows, _points1->cols, CV_64FC(CV_MAT_CN(_points1->type)) ));
2833
_m2.reset(cvCreateMat( _points2->rows, _points2->cols, CV_64FC(CV_MAT_CN(_points2->type)) ));
2834
_lines1.reset(cvCreateMat( 1, npoints, CV_64FC3 ));
2835
_lines2.reset(cvCreateMat( 1, npoints, CV_64FC3 ));
2836
2837
cvConvert( F0, &F );
2838
2839
cvSVD( (CvMat*)&F, &W, &U, &V, CV_SVD_U_T + CV_SVD_V_T );
2840
W.data.db[8] = 0.;
2841
cvGEMM( &U, &W, 1, 0, 0, &W, CV_GEMM_A_T );
2842
cvMatMul( &W, &V, &F );
2843
2844
cx = cvRound( (imgSize.width)*0.5 );
2845
cy = cvRound( (imgSize.height)*0.5 );
2846
2847
cvZero( _H1 );
2848
cvZero( _H2 );
2849
2850
cvConvert( _points1, _m1 );
2851
cvConvert( _points2, _m2 );
2852
cvReshape( _m1, _m1, 2, 1 );
2853
cvReshape( _m2, _m2, 2, 1 );
2854
2855
m1 = (CvPoint2D64f*)_m1->data.ptr;
2856
m2 = (CvPoint2D64f*)_m2->data.ptr;
2857
lines1 = (CvPoint3D64f*)_lines1->data.ptr;
2858
lines2 = (CvPoint3D64f*)_lines2->data.ptr;
2859
2860
if( threshold > 0 )
2861
{
2862
cvComputeCorrespondEpilines( _m1, 1, &F, _lines1 );
2863
cvComputeCorrespondEpilines( _m2, 2, &F, _lines2 );
2864
2865
// measure distance from points to the corresponding epilines, mark outliers
2866
for( i = j = 0; i < npoints; i++ )
2867
{
2868
if( fabs(m1[i].x*lines2[i].x +
2869
m1[i].y*lines2[i].y +
2870
lines2[i].z) <= threshold &&
2871
fabs(m2[i].x*lines1[i].x +
2872
m2[i].y*lines1[i].y +
2873
lines1[i].z) <= threshold )
2874
{
2875
if( j < i )
2876
{
2877
m1[j] = m1[i];
2878
m2[j] = m2[i];
2879
}
2880
j++;
2881
}
2882
}
2883
2884
npoints = j;
2885
if( npoints == 0 )
2886
return 0;
2887
}
2888
2889
_m1->cols = _m2->cols = npoints;
2890
memcpy( E2.data.db, U.data.db + 6, sizeof(e2));
2891
cvScale( &E2, &E2, e2[2] > 0 ? 1 : -1 );
2892
2893
double t[] =
2894
{
2895
1, 0, -cx,
2896
0, 1, -cy,
2897
0, 0, 1
2898
};
2899
CvMat T = cvMat(3, 3, CV_64F, t);
2900
cvMatMul( &T, &E2, &E2 );
2901
2902
int mirror = e2[0] < 0;
2903
double d = MAX(std::sqrt(e2[0]*e2[0] + e2[1]*e2[1]),DBL_EPSILON);
2904
double alpha = e2[0]/d;
2905
double beta = e2[1]/d;
2906
double r[] =
2907
{
2908
alpha, beta, 0,
2909
-beta, alpha, 0,
2910
0, 0, 1
2911
};
2912
CvMat R = cvMat(3, 3, CV_64F, r);
2913
cvMatMul( &R, &T, &T );
2914
cvMatMul( &R, &E2, &E2 );
2915
double invf = fabs(e2[2]) < 1e-6*fabs(e2[0]) ? 0 : -e2[2]/e2[0];
2916
double k[] =
2917
{
2918
1, 0, 0,
2919
0, 1, 0,
2920
invf, 0, 1
2921
};
2922
CvMat K = cvMat(3, 3, CV_64F, k);
2923
cvMatMul( &K, &T, &H2 );
2924
cvMatMul( &K, &E2, &E2 );
2925
2926
double it[] =
2927
{
2928
1, 0, cx,
2929
0, 1, cy,
2930
0, 0, 1
2931
};
2932
CvMat iT = cvMat( 3, 3, CV_64F, it );
2933
cvMatMul( &iT, &H2, &H2 );
2934
2935
memcpy( E2.data.db, U.data.db + 6, sizeof(e2));
2936
cvScale( &E2, &E2, e2[2] > 0 ? 1 : -1 );
2937
2938
double e2_x[] =
2939
{
2940
0, -e2[2], e2[1],
2941
e2[2], 0, -e2[0],
2942
-e2[1], e2[0], 0
2943
};
2944
double e2_111[] =
2945
{
2946
e2[0], e2[0], e2[0],
2947
e2[1], e2[1], e2[1],
2948
e2[2], e2[2], e2[2],
2949
};
2950
CvMat E2_x = cvMat(3, 3, CV_64F, e2_x);
2951
CvMat E2_111 = cvMat(3, 3, CV_64F, e2_111);
2952
cvMatMulAdd(&E2_x, &F, &E2_111, &H0 );
2953
cvMatMul(&H2, &H0, &H0);
2954
CvMat E1=cvMat(3, 1, CV_64F, V.data.db+6);
2955
cvMatMul(&H0, &E1, &E1);
2956
2957
cvPerspectiveTransform( _m1, _m1, &H0 );
2958
cvPerspectiveTransform( _m2, _m2, &H2 );
2959
CvMat A = cvMat( 1, npoints, CV_64FC3, lines1 ), BxBy, B;
2960
double x[3] = {0};
2961
CvMat X = cvMat( 3, 1, CV_64F, x );
2962
cvConvertPointsHomogeneous( _m1, &A );
2963
cvReshape( &A, &A, 1, npoints );
2964
cvReshape( _m2, &BxBy, 1, npoints );
2965
cvGetCol( &BxBy, &B, 0 );
2966
cvSolve( &A, &B, &X, CV_SVD );
2967
2968
double ha[] =
2969
{
2970
x[0], x[1], x[2],
2971
0, 1, 0,
2972
0, 0, 1
2973
};
2974
CvMat Ha = cvMat(3, 3, CV_64F, ha);
2975
cvMatMul( &Ha, &H0, &H1 );
2976
cvPerspectiveTransform( _m1, _m1, &Ha );
2977
2978
if( mirror )
2979
{
2980
double mm[] = { -1, 0, cx*2, 0, -1, cy*2, 0, 0, 1 };
2981
CvMat MM = cvMat(3, 3, CV_64F, mm);
2982
cvMatMul( &MM, &H1, &H1 );
2983
cvMatMul( &MM, &H2, &H2 );
2984
}
2985
2986
cvConvert( &H1, _H1 );
2987
cvConvert( &H2, _H2 );
2988
2989
return 1;
2990
}
2991
2992
2993
void cv::reprojectImageTo3D( InputArray _disparity,
2994
OutputArray __3dImage, InputArray _Qmat,
2995
bool handleMissingValues, int dtype )
2996
{
2997
CV_INSTRUMENT_REGION();
2998
2999
Mat disparity = _disparity.getMat(), Q = _Qmat.getMat();
3000
int stype = disparity.type();
3001
3002
CV_Assert( stype == CV_8UC1 || stype == CV_16SC1 ||
3003
stype == CV_32SC1 || stype == CV_32FC1 );
3004
CV_Assert( Q.size() == Size(4,4) );
3005
3006
if( dtype < 0 )
3007
dtype = CV_32FC3;
3008
else
3009
{
3010
dtype = CV_MAKETYPE(CV_MAT_DEPTH(dtype), 3);
3011
CV_Assert( dtype == CV_16SC3 || dtype == CV_32SC3 || dtype == CV_32FC3 );
3012
}
3013
3014
__3dImage.create(disparity.size(), CV_MAKETYPE(dtype, 3));
3015
Mat _3dImage = __3dImage.getMat();
3016
3017
const float bigZ = 10000.f;
3018
Matx44d _Q;
3019
Q.convertTo(_Q, CV_64F);
3020
3021
int x, cols = disparity.cols;
3022
CV_Assert( cols >= 0 );
3023
3024
std::vector<float> _sbuf(cols);
3025
std::vector<Vec3f> _dbuf(cols);
3026
float* sbuf = &_sbuf[0];
3027
Vec3f* dbuf = &_dbuf[0];
3028
double minDisparity = FLT_MAX;
3029
3030
// NOTE: here we quietly assume that at least one pixel in the disparity map is not defined.
3031
// and we set the corresponding Z's to some fixed big value.
3032
if( handleMissingValues )
3033
cv::minMaxIdx( disparity, &minDisparity, 0, 0, 0 );
3034
3035
for( int y = 0; y < disparity.rows; y++ )
3036
{
3037
float* sptr = sbuf;
3038
Vec3f* dptr = dbuf;
3039
3040
if( stype == CV_8UC1 )
3041
{
3042
const uchar* sptr0 = disparity.ptr<uchar>(y);
3043
for( x = 0; x < cols; x++ )
3044
sptr[x] = (float)sptr0[x];
3045
}
3046
else if( stype == CV_16SC1 )
3047
{
3048
const short* sptr0 = disparity.ptr<short>(y);
3049
for( x = 0; x < cols; x++ )
3050
sptr[x] = (float)sptr0[x];
3051
}
3052
else if( stype == CV_32SC1 )
3053
{
3054
const int* sptr0 = disparity.ptr<int>(y);
3055
for( x = 0; x < cols; x++ )
3056
sptr[x] = (float)sptr0[x];
3057
}
3058
else
3059
sptr = disparity.ptr<float>(y);
3060
3061
if( dtype == CV_32FC3 )
3062
dptr = _3dImage.ptr<Vec3f>(y);
3063
3064
for( x = 0; x < cols; x++)
3065
{
3066
double d = sptr[x];
3067
Vec4d homg_pt = _Q*Vec4d(x, y, d, 1.0);
3068
dptr[x] = Vec3d(homg_pt.val);
3069
dptr[x] /= homg_pt[3];
3070
3071
if( fabs(d-minDisparity) <= FLT_EPSILON )
3072
dptr[x][2] = bigZ;
3073
}
3074
3075
if( dtype == CV_16SC3 )
3076
{
3077
Vec3s* dptr0 = _3dImage.ptr<Vec3s>(y);
3078
for( x = 0; x < cols; x++ )
3079
{
3080
dptr0[x] = dptr[x];
3081
}
3082
}
3083
else if( dtype == CV_32SC3 )
3084
{
3085
Vec3i* dptr0 = _3dImage.ptr<Vec3i>(y);
3086
for( x = 0; x < cols; x++ )
3087
{
3088
dptr0[x] = dptr[x];
3089
}
3090
}
3091
}
3092
}
3093
3094
3095
void cvReprojectImageTo3D( const CvArr* disparityImage,
3096
CvArr* _3dImage, const CvMat* matQ,
3097
int handleMissingValues )
3098
{
3099
cv::Mat disp = cv::cvarrToMat(disparityImage);
3100
cv::Mat _3dimg = cv::cvarrToMat(_3dImage);
3101
cv::Mat mq = cv::cvarrToMat(matQ);
3102
CV_Assert( disp.size() == _3dimg.size() );
3103
int dtype = _3dimg.type();
3104
CV_Assert( dtype == CV_16SC3 || dtype == CV_32SC3 || dtype == CV_32FC3 );
3105
3106
cv::reprojectImageTo3D(disp, _3dimg, mq, handleMissingValues != 0, dtype );
3107
}
3108
3109
3110
CV_IMPL void
3111
cvRQDecomp3x3( const CvMat *matrixM, CvMat *matrixR, CvMat *matrixQ,
3112
CvMat *matrixQx, CvMat *matrixQy, CvMat *matrixQz,
3113
CvPoint3D64f *eulerAngles)
3114
{
3115
double matM[3][3], matR[3][3], matQ[3][3];
3116
CvMat M = cvMat(3, 3, CV_64F, matM);
3117
CvMat R = cvMat(3, 3, CV_64F, matR);
3118
CvMat Q = cvMat(3, 3, CV_64F, matQ);
3119
double z, c, s;
3120
3121
/* Validate parameters. */
3122
CV_Assert( CV_IS_MAT(matrixM) && CV_IS_MAT(matrixR) && CV_IS_MAT(matrixQ) &&
3123
matrixM->cols == 3 && matrixM->rows == 3 &&
3124
CV_ARE_SIZES_EQ(matrixM, matrixR) && CV_ARE_SIZES_EQ(matrixM, matrixQ));
3125
3126
cvConvert(matrixM, &M);
3127
3128
/* Find Givens rotation Q_x for x axis (left multiplication). */
3129
/*
3130
( 1 0 0 )
3131
Qx = ( 0 c s ), c = m33/sqrt(m32^2 + m33^2), s = m32/sqrt(m32^2 + m33^2)
3132
( 0 -s c )
3133
*/
3134
s = matM[2][1];
3135
c = matM[2][2];
3136
z = 1./std::sqrt(c * c + s * s + DBL_EPSILON);
3137
c *= z;
3138
s *= z;
3139
3140
double _Qx[3][3] = { {1, 0, 0}, {0, c, s}, {0, -s, c} };
3141
CvMat Qx = cvMat(3, 3, CV_64F, _Qx);
3142
3143
cvMatMul(&M, &Qx, &R);
3144
assert(fabs(matR[2][1]) < FLT_EPSILON);
3145
matR[2][1] = 0;
3146
3147
/* Find Givens rotation for y axis. */
3148
/*
3149
( c 0 -s )
3150
Qy = ( 0 1 0 ), c = m33/sqrt(m31^2 + m33^2), s = -m31/sqrt(m31^2 + m33^2)
3151
( s 0 c )
3152
*/
3153
s = -matR[2][0];
3154
c = matR[2][2];
3155
z = 1./std::sqrt(c * c + s * s + DBL_EPSILON);
3156
c *= z;
3157
s *= z;
3158
3159
double _Qy[3][3] = { {c, 0, -s}, {0, 1, 0}, {s, 0, c} };
3160
CvMat Qy = cvMat(3, 3, CV_64F, _Qy);
3161
cvMatMul(&R, &Qy, &M);
3162
3163
assert(fabs(matM[2][0]) < FLT_EPSILON);
3164
matM[2][0] = 0;
3165
3166
/* Find Givens rotation for z axis. */
3167
/*
3168
( c s 0 )
3169
Qz = (-s c 0 ), c = m22/sqrt(m21^2 + m22^2), s = m21/sqrt(m21^2 + m22^2)
3170
( 0 0 1 )
3171
*/
3172
3173
s = matM[1][0];
3174
c = matM[1][1];
3175
z = 1./std::sqrt(c * c + s * s + DBL_EPSILON);
3176
c *= z;
3177
s *= z;
3178
3179
double _Qz[3][3] = { {c, s, 0}, {-s, c, 0}, {0, 0, 1} };
3180
CvMat Qz = cvMat(3, 3, CV_64F, _Qz);
3181
3182
cvMatMul(&M, &Qz, &R);
3183
assert(fabs(matR[1][0]) < FLT_EPSILON);
3184
matR[1][0] = 0;
3185
3186
// Solve the decomposition ambiguity.
3187
// Diagonal entries of R, except the last one, shall be positive.
3188
// Further rotate R by 180 degree if necessary
3189
if( matR[0][0] < 0 )
3190
{
3191
if( matR[1][1] < 0 )
3192
{
3193
// rotate around z for 180 degree, i.e. a rotation matrix of
3194
// [-1, 0, 0],
3195
// [ 0, -1, 0],
3196
// [ 0, 0, 1]
3197
matR[0][0] *= -1;
3198
matR[0][1] *= -1;
3199
matR[1][1] *= -1;
3200
3201
_Qz[0][0] *= -1;
3202
_Qz[0][1] *= -1;
3203
_Qz[1][0] *= -1;
3204
_Qz[1][1] *= -1;
3205
}
3206
else
3207
{
3208
// rotate around y for 180 degree, i.e. a rotation matrix of
3209
// [-1, 0, 0],
3210
// [ 0, 1, 0],
3211
// [ 0, 0, -1]
3212
matR[0][0] *= -1;
3213
matR[0][2] *= -1;
3214
matR[1][2] *= -1;
3215
matR[2][2] *= -1;
3216
3217
cvTranspose( &Qz, &Qz );
3218
3219
_Qy[0][0] *= -1;
3220
_Qy[0][2] *= -1;
3221
_Qy[2][0] *= -1;
3222
_Qy[2][2] *= -1;
3223
}
3224
}
3225
else if( matR[1][1] < 0 )
3226
{
3227
// ??? for some reason, we never get here ???
3228
3229
// rotate around x for 180 degree, i.e. a rotation matrix of
3230
// [ 1, 0, 0],
3231
// [ 0, -1, 0],
3232
// [ 0, 0, -1]
3233
matR[0][1] *= -1;
3234
matR[0][2] *= -1;
3235
matR[1][1] *= -1;
3236
matR[1][2] *= -1;
3237
matR[2][2] *= -1;
3238
3239
cvTranspose( &Qz, &Qz );
3240
cvTranspose( &Qy, &Qy );
3241
3242
_Qx[1][1] *= -1;
3243
_Qx[1][2] *= -1;
3244
_Qx[2][1] *= -1;
3245
_Qx[2][2] *= -1;
3246
}
3247
3248
// calculate the euler angle
3249
if( eulerAngles )
3250
{
3251
eulerAngles->x = acos(_Qx[1][1]) * (_Qx[1][2] >= 0 ? 1 : -1) * (180.0 / CV_PI);
3252
eulerAngles->y = acos(_Qy[0][0]) * (_Qy[2][0] >= 0 ? 1 : -1) * (180.0 / CV_PI);
3253
eulerAngles->z = acos(_Qz[0][0]) * (_Qz[0][1] >= 0 ? 1 : -1) * (180.0 / CV_PI);
3254
}
3255
3256
/* Calculate orthogonal matrix. */
3257
/*
3258
Q = QzT * QyT * QxT
3259
*/
3260
cvGEMM( &Qz, &Qy, 1, 0, 0, &M, CV_GEMM_A_T + CV_GEMM_B_T );
3261
cvGEMM( &M, &Qx, 1, 0, 0, &Q, CV_GEMM_B_T );
3262
3263
/* Save R and Q matrices. */
3264
cvConvert( &R, matrixR );
3265
cvConvert( &Q, matrixQ );
3266
3267
if( matrixQx )
3268
cvConvert(&Qx, matrixQx);
3269
if( matrixQy )
3270
cvConvert(&Qy, matrixQy);
3271
if( matrixQz )
3272
cvConvert(&Qz, matrixQz);
3273
}
3274
3275
3276
CV_IMPL void
3277
cvDecomposeProjectionMatrix( const CvMat *projMatr, CvMat *calibMatr,
3278
CvMat *rotMatr, CvMat *posVect,
3279
CvMat *rotMatrX, CvMat *rotMatrY,
3280
CvMat *rotMatrZ, CvPoint3D64f *eulerAngles)
3281
{
3282
double tmpProjMatrData[16], tmpMatrixDData[16], tmpMatrixVData[16];
3283
CvMat tmpProjMatr = cvMat(4, 4, CV_64F, tmpProjMatrData);
3284
CvMat tmpMatrixD = cvMat(4, 4, CV_64F, tmpMatrixDData);
3285
CvMat tmpMatrixV = cvMat(4, 4, CV_64F, tmpMatrixVData);
3286
CvMat tmpMatrixM;
3287
3288
/* Validate parameters. */
3289
if(projMatr == 0 || calibMatr == 0 || rotMatr == 0 || posVect == 0)
3290
CV_Error(CV_StsNullPtr, "Some of parameters is a NULL pointer!");
3291
3292
if(!CV_IS_MAT(projMatr) || !CV_IS_MAT(calibMatr) || !CV_IS_MAT(rotMatr) || !CV_IS_MAT(posVect))
3293
CV_Error(CV_StsUnsupportedFormat, "Input parameters must be a matrices!");
3294
3295
if(projMatr->cols != 4 || projMatr->rows != 3)
3296
CV_Error(CV_StsUnmatchedSizes, "Size of projection matrix must be 3x4!");
3297
3298
if(calibMatr->cols != 3 || calibMatr->rows != 3 || rotMatr->cols != 3 || rotMatr->rows != 3)
3299
CV_Error(CV_StsUnmatchedSizes, "Size of calibration and rotation matrices must be 3x3!");
3300
3301
if(posVect->cols != 1 || posVect->rows != 4)
3302
CV_Error(CV_StsUnmatchedSizes, "Size of position vector must be 4x1!");
3303
3304
/* Compute position vector. */
3305
cvSetZero(&tmpProjMatr); // Add zero row to make matrix square.
3306
int i, k;
3307
for(i = 0; i < 3; i++)
3308
for(k = 0; k < 4; k++)
3309
cvmSet(&tmpProjMatr, i, k, cvmGet(projMatr, i, k));
3310
3311
cvSVD(&tmpProjMatr, &tmpMatrixD, NULL, &tmpMatrixV, CV_SVD_MODIFY_A + CV_SVD_V_T);
3312
3313
/* Save position vector. */
3314
for(i = 0; i < 4; i++)
3315
cvmSet(posVect, i, 0, cvmGet(&tmpMatrixV, 3, i)); // Solution is last row of V.
3316
3317
/* Compute calibration and rotation matrices via RQ decomposition. */
3318
cvGetCols(projMatr, &tmpMatrixM, 0, 3); // M is first square matrix of P.
3319
3320
CV_Assert(cvDet(&tmpMatrixM) != 0.0); // So far only finite cameras could be decomposed, so M has to be nonsingular [det(M) != 0].
3321
3322
cvRQDecomp3x3(&tmpMatrixM, calibMatr, rotMatr, rotMatrX, rotMatrY, rotMatrZ, eulerAngles);
3323
}
3324
3325
3326
3327
namespace cv
3328
{
3329
3330
static void collectCalibrationData( InputArrayOfArrays objectPoints,
3331
InputArrayOfArrays imagePoints1,
3332
InputArrayOfArrays imagePoints2,
3333
int iFixedPoint,
3334
Mat& objPtMat, Mat& imgPtMat1, Mat* imgPtMat2,
3335
Mat& npoints )
3336
{
3337
int nimages = (int)objectPoints.total();
3338
int i, j = 0, ni = 0, total = 0;
3339
CV_Assert(nimages > 0 && nimages == (int)imagePoints1.total() &&
3340
(!imgPtMat2 || nimages == (int)imagePoints2.total()));
3341
3342
for( i = 0; i < nimages; i++ )
3343
{
3344
ni = objectPoints.getMat(i).checkVector(3, CV_32F);
3345
if( ni <= 0 )
3346
CV_Error(CV_StsUnsupportedFormat, "objectPoints should contain vector of vectors of points of type Point3f");
3347
int ni1 = imagePoints1.getMat(i).checkVector(2, CV_32F);
3348
if( ni1 <= 0 )
3349
CV_Error(CV_StsUnsupportedFormat, "imagePoints1 should contain vector of vectors of points of type Point2f");
3350
CV_Assert( ni == ni1 );
3351
3352
total += ni;
3353
}
3354
3355
npoints.create(1, (int)nimages, CV_32S);
3356
objPtMat.create(1, (int)total, CV_32FC3);
3357
imgPtMat1.create(1, (int)total, CV_32FC2);
3358
Point2f* imgPtData2 = 0;
3359
3360
if( imgPtMat2 )
3361
{
3362
imgPtMat2->create(1, (int)total, CV_32FC2);
3363
imgPtData2 = imgPtMat2->ptr<Point2f>();
3364
}
3365
3366
Point3f* objPtData = objPtMat.ptr<Point3f>();
3367
Point2f* imgPtData1 = imgPtMat1.ptr<Point2f>();
3368
3369
for( i = 0; i < nimages; i++, j += ni )
3370
{
3371
Mat objpt = objectPoints.getMat(i);
3372
Mat imgpt1 = imagePoints1.getMat(i);
3373
ni = objpt.checkVector(3, CV_32F);
3374
npoints.at<int>(i) = ni;
3375
for (int n = 0; n < ni; ++n)
3376
{
3377
objPtData[j + n] = objpt.ptr<Point3f>()[n];
3378
imgPtData1[j + n] = imgpt1.ptr<Point2f>()[n];
3379
}
3380
3381
if( imgPtData2 )
3382
{
3383
Mat imgpt2 = imagePoints2.getMat(i);
3384
int ni2 = imgpt2.checkVector(2, CV_32F);
3385
CV_Assert( ni == ni2 );
3386
for (int n = 0; n < ni2; ++n)
3387
{
3388
imgPtData2[j + n] = imgpt2.ptr<Point2f>()[n];
3389
}
3390
}
3391
}
3392
3393
ni = npoints.at<int>(0);
3394
bool releaseObject = iFixedPoint > 0 && iFixedPoint < ni - 1;
3395
// check object points. If not qualified, report errors.
3396
if( releaseObject )
3397
{
3398
for( i = 1; i < nimages; i++ )
3399
{
3400
if( npoints.at<int>(i) != ni )
3401
{
3402
CV_Error( CV_StsBadArg, "All objectPoints[i].size() should be equal when "
3403
"object-releasing method is requested." );
3404
}
3405
Mat ocmp = objPtMat.colRange(ni * i, ni * i + ni) != objPtMat.colRange(0, ni);
3406
ocmp = ocmp.reshape(1);
3407
if( countNonZero(ocmp) )
3408
{
3409
CV_Error( CV_StsBadArg, "All objectPoints[i] should be identical when object-releasing"
3410
" method is requested." );
3411
}
3412
}
3413
}
3414
}
3415
3416
static void collectCalibrationData( InputArrayOfArrays objectPoints,
3417
InputArrayOfArrays imagePoints1,
3418
InputArrayOfArrays imagePoints2,
3419
Mat& objPtMat, Mat& imgPtMat1, Mat* imgPtMat2,
3420
Mat& npoints )
3421
{
3422
collectCalibrationData( objectPoints, imagePoints1, imagePoints2, -1, objPtMat, imgPtMat1,
3423
imgPtMat2, npoints );
3424
}
3425
3426
static Mat prepareCameraMatrix(Mat& cameraMatrix0, int rtype)
3427
{
3428
Mat cameraMatrix = Mat::eye(3, 3, rtype);
3429
if( cameraMatrix0.size() == cameraMatrix.size() )
3430
cameraMatrix0.convertTo(cameraMatrix, rtype);
3431
return cameraMatrix;
3432
}
3433
3434
static Mat prepareDistCoeffs(Mat& distCoeffs0, int rtype, int outputSize = 14)
3435
{
3436
CV_Assert((int)distCoeffs0.total() <= outputSize);
3437
Mat distCoeffs = Mat::zeros(distCoeffs0.cols == 1 ? Size(1, outputSize) : Size(outputSize, 1), rtype);
3438
if( distCoeffs0.size() == Size(1, 4) ||
3439
distCoeffs0.size() == Size(1, 5) ||
3440
distCoeffs0.size() == Size(1, 8) ||
3441
distCoeffs0.size() == Size(1, 12) ||
3442
distCoeffs0.size() == Size(1, 14) ||
3443
distCoeffs0.size() == Size(4, 1) ||
3444
distCoeffs0.size() == Size(5, 1) ||
3445
distCoeffs0.size() == Size(8, 1) ||
3446
distCoeffs0.size() == Size(12, 1) ||
3447
distCoeffs0.size() == Size(14, 1) )
3448
{
3449
Mat dstCoeffs(distCoeffs, Rect(0, 0, distCoeffs0.cols, distCoeffs0.rows));
3450
distCoeffs0.convertTo(dstCoeffs, rtype);
3451
}
3452
return distCoeffs;
3453
}
3454
3455
} // namespace cv
3456
3457
3458
void cv::Rodrigues(InputArray _src, OutputArray _dst, OutputArray _jacobian)
3459
{
3460
CV_INSTRUMENT_REGION();
3461
3462
Mat src = _src.getMat();
3463
bool v2m = src.cols == 1 || src.rows == 1;
3464
_dst.create(3, v2m ? 3 : 1, src.depth());
3465
Mat dst = _dst.getMat();
3466
CvMat _csrc = cvMat(src), _cdst = cvMat(dst), _cjacobian;
3467
if( _jacobian.needed() )
3468
{
3469
_jacobian.create(v2m ? Size(9, 3) : Size(3, 9), src.depth());
3470
_cjacobian = cvMat(_jacobian.getMat());
3471
}
3472
bool ok = cvRodrigues2(&_csrc, &_cdst, _jacobian.needed() ? &_cjacobian : 0) > 0;
3473
if( !ok )
3474
dst = Scalar(0);
3475
}
3476
3477
void cv::matMulDeriv( InputArray _Amat, InputArray _Bmat,
3478
OutputArray _dABdA, OutputArray _dABdB )
3479
{
3480
CV_INSTRUMENT_REGION();
3481
3482
Mat A = _Amat.getMat(), B = _Bmat.getMat();
3483
_dABdA.create(A.rows*B.cols, A.rows*A.cols, A.type());
3484
_dABdB.create(A.rows*B.cols, B.rows*B.cols, A.type());
3485
Mat dABdA = _dABdA.getMat(), dABdB = _dABdB.getMat();
3486
CvMat matA = cvMat(A), matB = cvMat(B), c_dABdA = cvMat(dABdA), c_dABdB = cvMat(dABdB);
3487
cvCalcMatMulDeriv(&matA, &matB, &c_dABdA, &c_dABdB);
3488
}
3489
3490
3491
void cv::composeRT( InputArray _rvec1, InputArray _tvec1,
3492
InputArray _rvec2, InputArray _tvec2,
3493
OutputArray _rvec3, OutputArray _tvec3,
3494
OutputArray _dr3dr1, OutputArray _dr3dt1,
3495
OutputArray _dr3dr2, OutputArray _dr3dt2,
3496
OutputArray _dt3dr1, OutputArray _dt3dt1,
3497
OutputArray _dt3dr2, OutputArray _dt3dt2 )
3498
{
3499
Mat rvec1 = _rvec1.getMat(), tvec1 = _tvec1.getMat();
3500
Mat rvec2 = _rvec2.getMat(), tvec2 = _tvec2.getMat();
3501
int rtype = rvec1.type();
3502
_rvec3.create(rvec1.size(), rtype);
3503
_tvec3.create(tvec1.size(), rtype);
3504
Mat rvec3 = _rvec3.getMat(), tvec3 = _tvec3.getMat();
3505
3506
CvMat c_rvec1 = cvMat(rvec1), c_tvec1 = cvMat(tvec1), c_rvec2 = cvMat(rvec2),
3507
c_tvec2 = cvMat(tvec2), c_rvec3 = cvMat(rvec3), c_tvec3 = cvMat(tvec3);
3508
CvMat c_dr3dr1, c_dr3dt1, c_dr3dr2, c_dr3dt2, c_dt3dr1, c_dt3dt1, c_dt3dr2, c_dt3dt2;
3509
CvMat *p_dr3dr1=0, *p_dr3dt1=0, *p_dr3dr2=0, *p_dr3dt2=0, *p_dt3dr1=0, *p_dt3dt1=0, *p_dt3dr2=0, *p_dt3dt2=0;
3510
#define CV_COMPOSE_RT_PARAM(name) \
3511
Mat name; \
3512
if (_ ## name.needed())\
3513
{ \
3514
_ ## name.create(3, 3, rtype); \
3515
name = _ ## name.getMat(); \
3516
p_ ## name = &(c_ ## name = cvMat(name)); \
3517
}
3518
3519
CV_COMPOSE_RT_PARAM(dr3dr1); CV_COMPOSE_RT_PARAM(dr3dt1);
3520
CV_COMPOSE_RT_PARAM(dr3dr2); CV_COMPOSE_RT_PARAM(dr3dt2);
3521
CV_COMPOSE_RT_PARAM(dt3dr1); CV_COMPOSE_RT_PARAM(dt3dt1);
3522
CV_COMPOSE_RT_PARAM(dt3dr2); CV_COMPOSE_RT_PARAM(dt3dt2);
3523
#undef CV_COMPOSE_RT_PARAM
3524
3525
cvComposeRT(&c_rvec1, &c_tvec1, &c_rvec2, &c_tvec2, &c_rvec3, &c_tvec3,
3526
p_dr3dr1, p_dr3dt1, p_dr3dr2, p_dr3dt2,
3527
p_dt3dr1, p_dt3dt1, p_dt3dr2, p_dt3dt2);
3528
}
3529
3530
3531
void cv::projectPoints( InputArray _opoints,
3532
InputArray _rvec,
3533
InputArray _tvec,
3534
InputArray _cameraMatrix,
3535
InputArray _distCoeffs,
3536
OutputArray _ipoints,
3537
OutputArray _jacobian,
3538
double aspectRatio )
3539
{
3540
Mat opoints = _opoints.getMat();
3541
int npoints = opoints.checkVector(3), depth = opoints.depth();
3542
CV_Assert(npoints >= 0 && (depth == CV_32F || depth == CV_64F));
3543
3544
CvMat dpdrot, dpdt, dpdf, dpdc, dpddist;
3545
CvMat *pdpdrot=0, *pdpdt=0, *pdpdf=0, *pdpdc=0, *pdpddist=0;
3546
3547
_ipoints.create(npoints, 1, CV_MAKETYPE(depth, 2), -1, true);
3548
Mat imagePoints = _ipoints.getMat();
3549
CvMat c_imagePoints = cvMat(imagePoints);
3550
CvMat c_objectPoints = cvMat(opoints);
3551
Mat cameraMatrix = _cameraMatrix.getMat();
3552
3553
Mat rvec = _rvec.getMat(), tvec = _tvec.getMat();
3554
CvMat c_cameraMatrix = cvMat(cameraMatrix);
3555
CvMat c_rvec = cvMat(rvec), c_tvec = cvMat(tvec);
3556
3557
double dc0buf[5]={0};
3558
Mat dc0(5,1,CV_64F,dc0buf);
3559
Mat distCoeffs = _distCoeffs.getMat();
3560
if( distCoeffs.empty() )
3561
distCoeffs = dc0;
3562
CvMat c_distCoeffs = cvMat(distCoeffs);
3563
int ndistCoeffs = distCoeffs.rows + distCoeffs.cols - 1;
3564
3565
Mat jacobian;
3566
if( _jacobian.needed() )
3567
{
3568
_jacobian.create(npoints*2, 3+3+2+2+ndistCoeffs, CV_64F);
3569
jacobian = _jacobian.getMat();
3570
pdpdrot = &(dpdrot = cvMat(jacobian.colRange(0, 3)));
3571
pdpdt = &(dpdt = cvMat(jacobian.colRange(3, 6)));
3572
pdpdf = &(dpdf = cvMat(jacobian.colRange(6, 8)));
3573
pdpdc = &(dpdc = cvMat(jacobian.colRange(8, 10)));
3574
pdpddist = &(dpddist = cvMat(jacobian.colRange(10, 10+ndistCoeffs)));
3575
}
3576
3577
cvProjectPoints2( &c_objectPoints, &c_rvec, &c_tvec, &c_cameraMatrix, &c_distCoeffs,
3578
&c_imagePoints, pdpdrot, pdpdt, pdpdf, pdpdc, pdpddist, aspectRatio );
3579
}
3580
3581
cv::Mat cv::initCameraMatrix2D( InputArrayOfArrays objectPoints,
3582
InputArrayOfArrays imagePoints,
3583
Size imageSize, double aspectRatio )
3584
{
3585
CV_INSTRUMENT_REGION();
3586
3587
Mat objPt, imgPt, npoints, cameraMatrix(3, 3, CV_64F);
3588
collectCalibrationData( objectPoints, imagePoints, noArray(),
3589
objPt, imgPt, 0, npoints );
3590
CvMat _objPt = cvMat(objPt), _imgPt = cvMat(imgPt), _npoints = cvMat(npoints), _cameraMatrix = cvMat(cameraMatrix);
3591
cvInitIntrinsicParams2D( &_objPt, &_imgPt, &_npoints,
3592
cvSize(imageSize), &_cameraMatrix, aspectRatio );
3593
return cameraMatrix;
3594
}
3595
3596
3597
3598
double cv::calibrateCamera( InputArrayOfArrays _objectPoints,
3599
InputArrayOfArrays _imagePoints,
3600
Size imageSize, InputOutputArray _cameraMatrix, InputOutputArray _distCoeffs,
3601
OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs, int flags, TermCriteria criteria )
3602
{
3603
CV_INSTRUMENT_REGION();
3604
3605
return calibrateCamera(_objectPoints, _imagePoints, imageSize, _cameraMatrix, _distCoeffs,
3606
_rvecs, _tvecs, noArray(), noArray(), noArray(), flags, criteria);
3607
}
3608
3609
double cv::calibrateCamera(InputArrayOfArrays _objectPoints,
3610
InputArrayOfArrays _imagePoints,
3611
Size imageSize, InputOutputArray _cameraMatrix, InputOutputArray _distCoeffs,
3612
OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs,
3613
OutputArray stdDeviationsIntrinsics,
3614
OutputArray stdDeviationsExtrinsics,
3615
OutputArray _perViewErrors, int flags, TermCriteria criteria )
3616
{
3617
CV_INSTRUMENT_REGION();
3618
3619
return calibrateCameraRO(_objectPoints, _imagePoints, imageSize, -1, _cameraMatrix, _distCoeffs,
3620
_rvecs, _tvecs, noArray(), stdDeviationsIntrinsics, stdDeviationsExtrinsics,
3621
noArray(), _perViewErrors, flags, criteria);
3622
}
3623
3624
double cv::calibrateCameraRO(InputArrayOfArrays _objectPoints,
3625
InputArrayOfArrays _imagePoints,
3626
Size imageSize, int iFixedPoint, InputOutputArray _cameraMatrix,
3627
InputOutputArray _distCoeffs,
3628
OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs,
3629
OutputArray newObjPoints,
3630
int flags, TermCriteria criteria)
3631
{
3632
CV_INSTRUMENT_REGION();
3633
3634
return calibrateCameraRO(_objectPoints, _imagePoints, imageSize, iFixedPoint, _cameraMatrix,
3635
_distCoeffs, _rvecs, _tvecs, newObjPoints, noArray(), noArray(),
3636
noArray(), noArray(), flags, criteria);
3637
}
3638
3639
double cv::calibrateCameraRO(InputArrayOfArrays _objectPoints,
3640
InputArrayOfArrays _imagePoints,
3641
Size imageSize, int iFixedPoint, InputOutputArray _cameraMatrix,
3642
InputOutputArray _distCoeffs,
3643
OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs,
3644
OutputArray newObjPoints,
3645
OutputArray stdDeviationsIntrinsics,
3646
OutputArray stdDeviationsExtrinsics,
3647
OutputArray stdDeviationsObjPoints,
3648
OutputArray _perViewErrors, int flags, TermCriteria criteria )
3649
{
3650
CV_INSTRUMENT_REGION();
3651
3652
int rtype = CV_64F;
3653
Mat cameraMatrix = _cameraMatrix.getMat();
3654
cameraMatrix = prepareCameraMatrix(cameraMatrix, rtype);
3655
Mat distCoeffs = _distCoeffs.getMat();
3656
distCoeffs = (flags & CALIB_THIN_PRISM_MODEL) && !(flags & CALIB_TILTED_MODEL) ? prepareDistCoeffs(distCoeffs, rtype, 12) :
3657
prepareDistCoeffs(distCoeffs, rtype);
3658
if( !(flags & CALIB_RATIONAL_MODEL) &&
3659
(!(flags & CALIB_THIN_PRISM_MODEL)) &&
3660
(!(flags & CALIB_TILTED_MODEL)))
3661
distCoeffs = distCoeffs.rows == 1 ? distCoeffs.colRange(0, 5) : distCoeffs.rowRange(0, 5);
3662
3663
int nimages = int(_objectPoints.total());
3664
CV_Assert( nimages > 0 );
3665
Mat objPt, imgPt, npoints, rvecM, tvecM, stdDeviationsM, errorsM;
3666
3667
bool rvecs_needed = _rvecs.needed(), tvecs_needed = _tvecs.needed(),
3668
stddev_needed = stdDeviationsIntrinsics.needed(), errors_needed = _perViewErrors.needed(),
3669
stddev_ext_needed = stdDeviationsExtrinsics.needed();
3670
bool newobj_needed = newObjPoints.needed();
3671
bool stddev_obj_needed = stdDeviationsObjPoints.needed();
3672
3673
bool rvecs_mat_vec = _rvecs.isMatVector();
3674
bool tvecs_mat_vec = _tvecs.isMatVector();
3675
3676
if( rvecs_needed )
3677
{
3678
_rvecs.create(nimages, 1, CV_64FC3);
3679
3680
if(rvecs_mat_vec)
3681
rvecM.create(nimages, 3, CV_64F);
3682
else
3683
rvecM = _rvecs.getMat();
3684
}
3685
3686
if( tvecs_needed )
3687
{
3688
_tvecs.create(nimages, 1, CV_64FC3);
3689
3690
if(tvecs_mat_vec)
3691
tvecM.create(nimages, 3, CV_64F);
3692
else
3693
tvecM = _tvecs.getMat();
3694
}
3695
3696
collectCalibrationData( _objectPoints, _imagePoints, noArray(), iFixedPoint,
3697
objPt, imgPt, 0, npoints );
3698
bool releaseObject = iFixedPoint > 0 && iFixedPoint < npoints.at<int>(0) - 1;
3699
3700
newobj_needed = newobj_needed && releaseObject;
3701
int np = npoints.at<int>( 0 );
3702
Mat newObjPt;
3703
if( newobj_needed ) {
3704
newObjPoints.create( 1, np, CV_32FC3 );
3705
newObjPt = newObjPoints.getMat();
3706
}
3707
3708
stddev_obj_needed = stddev_obj_needed && releaseObject;
3709
bool stddev_any_needed = stddev_needed || stddev_ext_needed || stddev_obj_needed;
3710
if( stddev_any_needed )
3711
{
3712
if( releaseObject )
3713
stdDeviationsM.create(nimages*6 + CV_CALIB_NINTRINSIC + np * 3, 1, CV_64F);
3714
else
3715
stdDeviationsM.create(nimages*6 + CV_CALIB_NINTRINSIC, 1, CV_64F);
3716
}
3717
3718
if( errors_needed )
3719
{
3720
_perViewErrors.create(nimages, 1, CV_64F);
3721
errorsM = _perViewErrors.getMat();
3722
}
3723
3724
CvMat c_objPt = cvMat(objPt), c_imgPt = cvMat(imgPt), c_npoints = cvMat(npoints);
3725
CvMat c_cameraMatrix = cvMat(cameraMatrix), c_distCoeffs = cvMat(distCoeffs);
3726
CvMat c_rvecM = cvMat(rvecM), c_tvecM = cvMat(tvecM), c_stdDev = cvMat(stdDeviationsM), c_errors = cvMat(errorsM);
3727
CvMat c_newObjPt = cvMat( newObjPt );
3728
3729
double reprojErr = cvCalibrateCamera2Internal(&c_objPt, &c_imgPt, &c_npoints, cvSize(imageSize),
3730
iFixedPoint,
3731
&c_cameraMatrix, &c_distCoeffs,
3732
rvecs_needed ? &c_rvecM : NULL,
3733
tvecs_needed ? &c_tvecM : NULL,
3734
newobj_needed ? &c_newObjPt : NULL,
3735
stddev_any_needed ? &c_stdDev : NULL,
3736
errors_needed ? &c_errors : NULL, flags, cvTermCriteria(criteria));
3737
3738
if( newobj_needed )
3739
newObjPt.copyTo(newObjPoints);
3740
3741
if( stddev_needed )
3742
{
3743
stdDeviationsIntrinsics.create(CV_CALIB_NINTRINSIC, 1, CV_64F);
3744
Mat stdDeviationsIntrinsicsMat = stdDeviationsIntrinsics.getMat();
3745
std::memcpy(stdDeviationsIntrinsicsMat.ptr(), stdDeviationsM.ptr(),
3746
CV_CALIB_NINTRINSIC*sizeof(double));
3747
}
3748
3749
if ( stddev_ext_needed )
3750
{
3751
stdDeviationsExtrinsics.create(nimages*6, 1, CV_64F);
3752
Mat stdDeviationsExtrinsicsMat = stdDeviationsExtrinsics.getMat();
3753
std::memcpy(stdDeviationsExtrinsicsMat.ptr(),
3754
stdDeviationsM.ptr() + CV_CALIB_NINTRINSIC*sizeof(double),
3755
nimages*6*sizeof(double));
3756
}
3757
3758
if( stddev_obj_needed )
3759
{
3760
stdDeviationsObjPoints.create( np * 3, 1, CV_64F );
3761
Mat stdDeviationsObjPointsMat = stdDeviationsObjPoints.getMat();
3762
std::memcpy( stdDeviationsObjPointsMat.ptr(), stdDeviationsM.ptr()
3763
+ ( CV_CALIB_NINTRINSIC + nimages * 6 ) * sizeof( double ),
3764
np * 3 * sizeof( double ) );
3765
}
3766
3767
// overly complicated and inefficient rvec/ tvec handling to support vector<Mat>
3768
for(int i = 0; i < nimages; i++ )
3769
{
3770
if( rvecs_needed && rvecs_mat_vec)
3771
{
3772
_rvecs.create(3, 1, CV_64F, i, true);
3773
Mat rv = _rvecs.getMat(i);
3774
memcpy(rv.ptr(), rvecM.ptr(i), 3*sizeof(double));
3775
}
3776
if( tvecs_needed && tvecs_mat_vec)
3777
{
3778
_tvecs.create(3, 1, CV_64F, i, true);
3779
Mat tv = _tvecs.getMat(i);
3780
memcpy(tv.ptr(), tvecM.ptr(i), 3*sizeof(double));
3781
}
3782
}
3783
3784
cameraMatrix.copyTo(_cameraMatrix);
3785
distCoeffs.copyTo(_distCoeffs);
3786
3787
return reprojErr;
3788
}
3789
3790
3791
void cv::calibrationMatrixValues( InputArray _cameraMatrix, Size imageSize,
3792
double apertureWidth, double apertureHeight,
3793
double& fovx, double& fovy, double& focalLength,
3794
Point2d& principalPoint, double& aspectRatio )
3795
{
3796
CV_INSTRUMENT_REGION();
3797
3798
if(_cameraMatrix.size() != Size(3, 3))
3799
CV_Error(CV_StsUnmatchedSizes, "Size of cameraMatrix must be 3x3!");
3800
3801
Matx33d K = _cameraMatrix.getMat();
3802
3803
CV_DbgAssert(imageSize.width != 0 && imageSize.height != 0 && K(0, 0) != 0.0 && K(1, 1) != 0.0);
3804
3805
/* Calculate pixel aspect ratio. */
3806
aspectRatio = K(1, 1) / K(0, 0);
3807
3808
/* Calculate number of pixel per realworld unit. */
3809
double mx, my;
3810
if(apertureWidth != 0.0 && apertureHeight != 0.0) {
3811
mx = imageSize.width / apertureWidth;
3812
my = imageSize.height / apertureHeight;
3813
} else {
3814
mx = 1.0;
3815
my = aspectRatio;
3816
}
3817
3818
/* Calculate fovx and fovy. */
3819
fovx = atan2(K(0, 2), K(0, 0)) + atan2(imageSize.width - K(0, 2), K(0, 0));
3820
fovy = atan2(K(1, 2), K(1, 1)) + atan2(imageSize.height - K(1, 2), K(1, 1));
3821
fovx *= 180.0 / CV_PI;
3822
fovy *= 180.0 / CV_PI;
3823
3824
/* Calculate focal length. */
3825
focalLength = K(0, 0) / mx;
3826
3827
/* Calculate principle point. */
3828
principalPoint = Point2d(K(0, 2) / mx, K(1, 2) / my);
3829
}
3830
3831
double cv::stereoCalibrate( InputArrayOfArrays _objectPoints,
3832
InputArrayOfArrays _imagePoints1,
3833
InputArrayOfArrays _imagePoints2,
3834
InputOutputArray _cameraMatrix1, InputOutputArray _distCoeffs1,
3835
InputOutputArray _cameraMatrix2, InputOutputArray _distCoeffs2,
3836
Size imageSize, OutputArray _Rmat, OutputArray _Tmat,
3837
OutputArray _Emat, OutputArray _Fmat, int flags,
3838
TermCriteria criteria)
3839
{
3840
if (flags & CALIB_USE_EXTRINSIC_GUESS)
3841
CV_Error(Error::StsBadFlag, "stereoCalibrate does not support CALIB_USE_EXTRINSIC_GUESS.");
3842
3843
Mat Rmat, Tmat;
3844
double ret = stereoCalibrate(_objectPoints, _imagePoints1, _imagePoints2, _cameraMatrix1, _distCoeffs1,
3845
_cameraMatrix2, _distCoeffs2, imageSize, Rmat, Tmat, _Emat, _Fmat,
3846
noArray(), flags, criteria);
3847
Rmat.copyTo(_Rmat);
3848
Tmat.copyTo(_Tmat);
3849
return ret;
3850
}
3851
3852
double cv::stereoCalibrate( InputArrayOfArrays _objectPoints,
3853
InputArrayOfArrays _imagePoints1,
3854
InputArrayOfArrays _imagePoints2,
3855
InputOutputArray _cameraMatrix1, InputOutputArray _distCoeffs1,
3856
InputOutputArray _cameraMatrix2, InputOutputArray _distCoeffs2,
3857
Size imageSize, InputOutputArray _Rmat, InputOutputArray _Tmat,
3858
OutputArray _Emat, OutputArray _Fmat,
3859
OutputArray _perViewErrors, int flags ,
3860
TermCriteria criteria)
3861
{
3862
int rtype = CV_64F;
3863
Mat cameraMatrix1 = _cameraMatrix1.getMat();
3864
Mat cameraMatrix2 = _cameraMatrix2.getMat();
3865
Mat distCoeffs1 = _distCoeffs1.getMat();
3866
Mat distCoeffs2 = _distCoeffs2.getMat();
3867
cameraMatrix1 = prepareCameraMatrix(cameraMatrix1, rtype);
3868
cameraMatrix2 = prepareCameraMatrix(cameraMatrix2, rtype);
3869
distCoeffs1 = prepareDistCoeffs(distCoeffs1, rtype);
3870
distCoeffs2 = prepareDistCoeffs(distCoeffs2, rtype);
3871
3872
if( !(flags & CALIB_RATIONAL_MODEL) &&
3873
(!(flags & CALIB_THIN_PRISM_MODEL)) &&
3874
(!(flags & CALIB_TILTED_MODEL)))
3875
{
3876
distCoeffs1 = distCoeffs1.rows == 1 ? distCoeffs1.colRange(0, 5) : distCoeffs1.rowRange(0, 5);
3877
distCoeffs2 = distCoeffs2.rows == 1 ? distCoeffs2.colRange(0, 5) : distCoeffs2.rowRange(0, 5);
3878
}
3879
3880
if((flags & CALIB_USE_EXTRINSIC_GUESS) == 0)
3881
{
3882
_Rmat.create(3, 3, rtype);
3883
_Tmat.create(3, 1, rtype);
3884
}
3885
3886
Mat objPt, imgPt, imgPt2, npoints;
3887
3888
collectCalibrationData( _objectPoints, _imagePoints1, _imagePoints2,
3889
objPt, imgPt, &imgPt2, npoints );
3890
CvMat c_objPt = cvMat(objPt), c_imgPt = cvMat(imgPt), c_imgPt2 = cvMat(imgPt2), c_npoints = cvMat(npoints);
3891
CvMat c_cameraMatrix1 = cvMat(cameraMatrix1), c_distCoeffs1 = cvMat(distCoeffs1);
3892
CvMat c_cameraMatrix2 = cvMat(cameraMatrix2), c_distCoeffs2 = cvMat(distCoeffs2);
3893
Mat matR_ = _Rmat.getMat(), matT_ = _Tmat.getMat();
3894
CvMat c_matR = cvMat(matR_), c_matT = cvMat(matT_), c_matE, c_matF, c_matErr;
3895
3896
bool E_needed = _Emat.needed(), F_needed = _Fmat.needed(), errors_needed = _perViewErrors.needed();
3897
3898
Mat matE_, matF_, matErr_;
3899
if( E_needed )
3900
{
3901
_Emat.create(3, 3, rtype);
3902
matE_ = _Emat.getMat();
3903
c_matE = cvMat(matE_);
3904
}
3905
if( F_needed )
3906
{
3907
_Fmat.create(3, 3, rtype);
3908
matF_ = _Fmat.getMat();
3909
c_matF = cvMat(matF_);
3910
}
3911
3912
if( errors_needed )
3913
{
3914
int nimages = int(_objectPoints.total());
3915
_perViewErrors.create(nimages, 2, CV_64F);
3916
matErr_ = _perViewErrors.getMat();
3917
c_matErr = cvMat(matErr_);
3918
}
3919
3920
double err = cvStereoCalibrateImpl(&c_objPt, &c_imgPt, &c_imgPt2, &c_npoints, &c_cameraMatrix1,
3921
&c_distCoeffs1, &c_cameraMatrix2, &c_distCoeffs2, cvSize(imageSize), &c_matR,
3922
&c_matT, E_needed ? &c_matE : NULL, F_needed ? &c_matF : NULL,
3923
errors_needed ? &c_matErr : NULL, flags, cvTermCriteria(criteria));
3924
3925
cameraMatrix1.copyTo(_cameraMatrix1);
3926
cameraMatrix2.copyTo(_cameraMatrix2);
3927
distCoeffs1.copyTo(_distCoeffs1);
3928
distCoeffs2.copyTo(_distCoeffs2);
3929
3930
return err;
3931
}
3932
3933
3934
void cv::stereoRectify( InputArray _cameraMatrix1, InputArray _distCoeffs1,
3935
InputArray _cameraMatrix2, InputArray _distCoeffs2,
3936
Size imageSize, InputArray _Rmat, InputArray _Tmat,
3937
OutputArray _Rmat1, OutputArray _Rmat2,
3938
OutputArray _Pmat1, OutputArray _Pmat2,
3939
OutputArray _Qmat, int flags,
3940
double alpha, Size newImageSize,
3941
Rect* validPixROI1, Rect* validPixROI2 )
3942
{
3943
Mat cameraMatrix1 = _cameraMatrix1.getMat(), cameraMatrix2 = _cameraMatrix2.getMat();
3944
Mat distCoeffs1 = _distCoeffs1.getMat(), distCoeffs2 = _distCoeffs2.getMat();
3945
Mat Rmat = _Rmat.getMat(), Tmat = _Tmat.getMat();
3946
CvMat c_cameraMatrix1 = cvMat(cameraMatrix1);
3947
CvMat c_cameraMatrix2 = cvMat(cameraMatrix2);
3948
CvMat c_distCoeffs1 = cvMat(distCoeffs1);
3949
CvMat c_distCoeffs2 = cvMat(distCoeffs2);
3950
CvMat c_R = cvMat(Rmat), c_T = cvMat(Tmat);
3951
3952
int rtype = CV_64F;
3953
_Rmat1.create(3, 3, rtype);
3954
_Rmat2.create(3, 3, rtype);
3955
_Pmat1.create(3, 4, rtype);
3956
_Pmat2.create(3, 4, rtype);
3957
Mat R1 = _Rmat1.getMat(), R2 = _Rmat2.getMat(), P1 = _Pmat1.getMat(), P2 = _Pmat2.getMat(), Q;
3958
CvMat c_R1 = cvMat(R1), c_R2 = cvMat(R2), c_P1 = cvMat(P1), c_P2 = cvMat(P2);
3959
CvMat c_Q, *p_Q = 0;
3960
3961
if( _Qmat.needed() )
3962
{
3963
_Qmat.create(4, 4, rtype);
3964
p_Q = &(c_Q = cvMat(Q = _Qmat.getMat()));
3965
}
3966
3967
CvMat *p_distCoeffs1 = distCoeffs1.empty() ? NULL : &c_distCoeffs1;
3968
CvMat *p_distCoeffs2 = distCoeffs2.empty() ? NULL : &c_distCoeffs2;
3969
cvStereoRectify( &c_cameraMatrix1, &c_cameraMatrix2, p_distCoeffs1, p_distCoeffs2,
3970
cvSize(imageSize), &c_R, &c_T, &c_R1, &c_R2, &c_P1, &c_P2, p_Q, flags, alpha,
3971
cvSize(newImageSize), (CvRect*)validPixROI1, (CvRect*)validPixROI2);
3972
}
3973
3974
bool cv::stereoRectifyUncalibrated( InputArray _points1, InputArray _points2,
3975
InputArray _Fmat, Size imgSize,
3976
OutputArray _Hmat1, OutputArray _Hmat2, double threshold )
3977
{
3978
CV_INSTRUMENT_REGION();
3979
3980
int rtype = CV_64F;
3981
_Hmat1.create(3, 3, rtype);
3982
_Hmat2.create(3, 3, rtype);
3983
Mat F = _Fmat.getMat();
3984
Mat points1 = _points1.getMat(), points2 = _points2.getMat();
3985
CvMat c_pt1 = cvMat(points1), c_pt2 = cvMat(points2);
3986
Mat H1 = _Hmat1.getMat(), H2 = _Hmat2.getMat();
3987
CvMat c_F, *p_F=0, c_H1 = cvMat(H1), c_H2 = cvMat(H2);
3988
if( F.size() == Size(3, 3) )
3989
p_F = &(c_F = cvMat(F));
3990
return cvStereoRectifyUncalibrated(&c_pt1, &c_pt2, p_F, cvSize(imgSize), &c_H1, &c_H2, threshold) > 0;
3991
}
3992
3993
cv::Mat cv::getOptimalNewCameraMatrix( InputArray _cameraMatrix,
3994
InputArray _distCoeffs,
3995
Size imgSize, double alpha, Size newImgSize,
3996
Rect* validPixROI, bool centerPrincipalPoint )
3997
{
3998
CV_INSTRUMENT_REGION();
3999
4000
Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();
4001
CvMat c_cameraMatrix = cvMat(cameraMatrix), c_distCoeffs = cvMat(distCoeffs);
4002
4003
Mat newCameraMatrix(3, 3, CV_MAT_TYPE(c_cameraMatrix.type));
4004
CvMat c_newCameraMatrix = cvMat(newCameraMatrix);
4005
4006
cvGetOptimalNewCameraMatrix(&c_cameraMatrix, &c_distCoeffs, cvSize(imgSize),
4007
alpha, &c_newCameraMatrix,
4008
cvSize(newImgSize), (CvRect*)validPixROI, (int)centerPrincipalPoint);
4009
return newCameraMatrix;
4010
}
4011
4012
4013
cv::Vec3d cv::RQDecomp3x3( InputArray _Mmat,
4014
OutputArray _Rmat,
4015
OutputArray _Qmat,
4016
OutputArray _Qx,
4017
OutputArray _Qy,
4018
OutputArray _Qz )
4019
{
4020
CV_INSTRUMENT_REGION();
4021
4022
Mat M = _Mmat.getMat();
4023
_Rmat.create(3, 3, M.type());
4024
_Qmat.create(3, 3, M.type());
4025
Mat Rmat = _Rmat.getMat();
4026
Mat Qmat = _Qmat.getMat();
4027
Vec3d eulerAngles;
4028
4029
CvMat matM = cvMat(M), matR = cvMat(Rmat), matQ = cvMat(Qmat);
4030
#define CV_RQDecomp3x3_PARAM(name) \
4031
Mat name; \
4032
CvMat c_ ## name, *p ## name = NULL; \
4033
if( _ ## name.needed() ) \
4034
{ \
4035
_ ## name.create(3, 3, M.type()); \
4036
name = _ ## name.getMat(); \
4037
c_ ## name = cvMat(name); p ## name = &c_ ## name; \
4038
}
4039
4040
CV_RQDecomp3x3_PARAM(Qx);
4041
CV_RQDecomp3x3_PARAM(Qy);
4042
CV_RQDecomp3x3_PARAM(Qz);
4043
#undef CV_RQDecomp3x3_PARAM
4044
cvRQDecomp3x3(&matM, &matR, &matQ, pQx, pQy, pQz, (CvPoint3D64f*)&eulerAngles[0]);
4045
return eulerAngles;
4046
}
4047
4048
4049
void cv::decomposeProjectionMatrix( InputArray _projMatrix, OutputArray _cameraMatrix,
4050
OutputArray _rotMatrix, OutputArray _transVect,
4051
OutputArray _rotMatrixX, OutputArray _rotMatrixY,
4052
OutputArray _rotMatrixZ, OutputArray _eulerAngles )
4053
{
4054
CV_INSTRUMENT_REGION();
4055
4056
Mat projMatrix = _projMatrix.getMat();
4057
int type = projMatrix.type();
4058
_cameraMatrix.create(3, 3, type);
4059
_rotMatrix.create(3, 3, type);
4060
_transVect.create(4, 1, type);
4061
Mat cameraMatrix = _cameraMatrix.getMat();
4062
Mat rotMatrix = _rotMatrix.getMat();
4063
Mat transVect = _transVect.getMat();
4064
CvMat c_projMatrix = cvMat(projMatrix), c_cameraMatrix = cvMat(cameraMatrix);
4065
CvMat c_rotMatrix = cvMat(rotMatrix), c_transVect = cvMat(transVect);
4066
CvPoint3D64f *p_eulerAngles = 0;
4067
4068
#define CV_decomposeProjectionMatrix_PARAM(name) \
4069
Mat name; \
4070
CvMat c_ ## name, *p_ ## name = NULL; \
4071
if( _ ## name.needed() ) \
4072
{ \
4073
_ ## name.create(3, 3, type); \
4074
name = _ ## name.getMat(); \
4075
c_ ## name = cvMat(name); p_ ## name = &c_ ## name; \
4076
}
4077
4078
CV_decomposeProjectionMatrix_PARAM(rotMatrixX);
4079
CV_decomposeProjectionMatrix_PARAM(rotMatrixY);
4080
CV_decomposeProjectionMatrix_PARAM(rotMatrixZ);
4081
#undef CV_decomposeProjectionMatrix_PARAM
4082
4083
if( _eulerAngles.needed() )
4084
{
4085
_eulerAngles.create(3, 1, CV_64F, -1, true);
4086
p_eulerAngles = _eulerAngles.getMat().ptr<CvPoint3D64f>();
4087
}
4088
4089
cvDecomposeProjectionMatrix(&c_projMatrix, &c_cameraMatrix, &c_rotMatrix,
4090
&c_transVect, p_rotMatrixX, p_rotMatrixY,
4091
p_rotMatrixZ, p_eulerAngles);
4092
}
4093
4094
4095
namespace cv
4096
{
4097
4098
static void adjust3rdMatrix(InputArrayOfArrays _imgpt1_0,
4099
InputArrayOfArrays _imgpt3_0,
4100
const Mat& cameraMatrix1, const Mat& distCoeffs1,
4101
const Mat& cameraMatrix3, const Mat& distCoeffs3,
4102
const Mat& R1, const Mat& R3, const Mat& P1, Mat& P3 )
4103
{
4104
size_t n1 = _imgpt1_0.total(), n3 = _imgpt3_0.total();
4105
std::vector<Point2f> imgpt1, imgpt3;
4106
4107
for( int i = 0; i < (int)std::min(n1, n3); i++ )
4108
{
4109
Mat pt1 = _imgpt1_0.getMat(i), pt3 = _imgpt3_0.getMat(i);
4110
int ni1 = pt1.checkVector(2, CV_32F), ni3 = pt3.checkVector(2, CV_32F);
4111
CV_Assert( ni1 > 0 && ni1 == ni3 );
4112
const Point2f* pt1data = pt1.ptr<Point2f>();
4113
const Point2f* pt3data = pt3.ptr<Point2f>();
4114
std::copy(pt1data, pt1data + ni1, std::back_inserter(imgpt1));
4115
std::copy(pt3data, pt3data + ni3, std::back_inserter(imgpt3));
4116
}
4117
4118
undistortPoints(imgpt1, imgpt1, cameraMatrix1, distCoeffs1, R1, P1);
4119
undistortPoints(imgpt3, imgpt3, cameraMatrix3, distCoeffs3, R3, P3);
4120
4121
double y1_ = 0, y2_ = 0, y1y1_ = 0, y1y2_ = 0;
4122
size_t n = imgpt1.size();
4123
4124
for( size_t i = 0; i < n; i++ )
4125
{
4126
double y1 = imgpt3[i].y, y2 = imgpt1[i].y;
4127
4128
y1_ += y1; y2_ += y2;
4129
y1y1_ += y1*y1; y1y2_ += y1*y2;
4130
}
4131
4132
y1_ /= n;
4133
y2_ /= n;
4134
y1y1_ /= n;
4135
y1y2_ /= n;
4136
4137
double a = (y1y2_ - y1_*y2_)/(y1y1_ - y1_*y1_);
4138
double b = y2_ - a*y1_;
4139
4140
P3.at<double>(0,0) *= a;
4141
P3.at<double>(1,1) *= a;
4142
P3.at<double>(0,2) = P3.at<double>(0,2)*a;
4143
P3.at<double>(1,2) = P3.at<double>(1,2)*a + b;
4144
P3.at<double>(0,3) *= a;
4145
P3.at<double>(1,3) *= a;
4146
}
4147
4148
}
4149
4150
float cv::rectify3Collinear( InputArray _cameraMatrix1, InputArray _distCoeffs1,
4151
InputArray _cameraMatrix2, InputArray _distCoeffs2,
4152
InputArray _cameraMatrix3, InputArray _distCoeffs3,
4153
InputArrayOfArrays _imgpt1,
4154
InputArrayOfArrays _imgpt3,
4155
Size imageSize, InputArray _Rmat12, InputArray _Tmat12,
4156
InputArray _Rmat13, InputArray _Tmat13,
4157
OutputArray _Rmat1, OutputArray _Rmat2, OutputArray _Rmat3,
4158
OutputArray _Pmat1, OutputArray _Pmat2, OutputArray _Pmat3,
4159
OutputArray _Qmat,
4160
double alpha, Size newImgSize,
4161
Rect* roi1, Rect* roi2, int flags )
4162
{
4163
// first, rectify the 1-2 stereo pair
4164
stereoRectify( _cameraMatrix1, _distCoeffs1, _cameraMatrix2, _distCoeffs2,
4165
imageSize, _Rmat12, _Tmat12, _Rmat1, _Rmat2, _Pmat1, _Pmat2, _Qmat,
4166
flags, alpha, newImgSize, roi1, roi2 );
4167
4168
Mat R12 = _Rmat12.getMat(), R13 = _Rmat13.getMat(), T12 = _Tmat12.getMat(), T13 = _Tmat13.getMat();
4169
4170
_Rmat3.create(3, 3, CV_64F);
4171
_Pmat3.create(3, 4, CV_64F);
4172
4173
Mat P1 = _Pmat1.getMat(), P2 = _Pmat2.getMat();
4174
Mat R3 = _Rmat3.getMat(), P3 = _Pmat3.getMat();
4175
4176
// recompute rectification transforms for cameras 1 & 2.
4177
Mat om, r_r, r_r13;
4178
4179
if( R13.size() != Size(3,3) )
4180
Rodrigues(R13, r_r13);
4181
else
4182
R13.copyTo(r_r13);
4183
4184
if( R12.size() == Size(3,3) )
4185
Rodrigues(R12, om);
4186
else
4187
R12.copyTo(om);
4188
4189
om *= -0.5;
4190
Rodrigues(om, r_r); // rotate cameras to same orientation by averaging
4191
Mat_<double> t12 = r_r * T12;
4192
4193
int idx = fabs(t12(0,0)) > fabs(t12(1,0)) ? 0 : 1;
4194
double c = t12(idx,0), nt = norm(t12, CV_L2);
4195
CV_Assert(fabs(nt) > 0);
4196
Mat_<double> uu = Mat_<double>::zeros(3,1);
4197
uu(idx, 0) = c > 0 ? 1 : -1;
4198
4199
// calculate global Z rotation
4200
Mat_<double> ww = t12.cross(uu), wR;
4201
double nw = norm(ww, CV_L2);
4202
CV_Assert(fabs(nw) > 0);
4203
ww *= acos(fabs(c)/nt)/nw;
4204
Rodrigues(ww, wR);
4205
4206
// now rotate camera 3 to make its optical axis parallel to cameras 1 and 2.
4207
R3 = wR*r_r.t()*r_r13.t();
4208
Mat_<double> t13 = R3 * T13;
4209
4210
P2.copyTo(P3);
4211
Mat t = P3.col(3);
4212
t13.copyTo(t);
4213
P3.at<double>(0,3) *= P3.at<double>(0,0);
4214
P3.at<double>(1,3) *= P3.at<double>(1,1);
4215
4216
if( !_imgpt1.empty() && !_imgpt3.empty() )
4217
adjust3rdMatrix(_imgpt1, _imgpt3, _cameraMatrix1.getMat(), _distCoeffs1.getMat(),
4218
_cameraMatrix3.getMat(), _distCoeffs3.getMat(), _Rmat1.getMat(), R3, P1, P3);
4219
4220
return (float)((P3.at<double>(idx,3)/P3.at<double>(idx,idx))/
4221
(P2.at<double>(idx,3)/P2.at<double>(idx,idx)));
4222
}
4223
4224
4225
/* End of file. */
4226
4227