Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Tetragramm
GitHub Repository: Tetragramm/opencv
Path: blob/master/modules/calib3d/src/fisheye.cpp
16344 views
1
/*M///////////////////////////////////////////////////////////////////////////////////////
2
//
3
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4
//
5
// By downloading, copying, installing or using the software you agree to this license.
6
// If you do not agree to this license, do not download, install,
7
// copy or use the software.
8
//
9
//
10
// License Agreement
11
// For Open Source Computer Vision Library
12
//
13
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
15
// Third party copyrights are property of their respective owners.
16
//
17
// Redistribution and use in source and binary forms, with or without modification,
18
// are permitted provided that the following conditions are met:
19
//
20
// * Redistribution's of source code must retain the above copyright notice,
21
// this list of conditions and the following disclaimer.
22
//
23
// * Redistribution's in binary form must reproduce the above copyright notice,
24
// this list of conditions and the following disclaimer in the documentation
25
// and/or other materials provided with the distribution.
26
//
27
// * The name of the copyright holders may not be used to endorse or promote products
28
// derived from this software without specific prior written permission.
29
//
30
// This software is provided by the copyright holders and contributors "as is" and
31
// any express or implied warranties, including, but not limited to, the implied
32
// warranties of merchantability and fitness for a particular purpose are disclaimed.
33
// In no event shall the Intel Corporation or contributors be liable for any direct,
34
// indirect, incidental, special, exemplary, or consequential damages
35
// (including, but not limited to, procurement of substitute goods or services;
36
// loss of use, data, or profits; or business interruption) however caused
37
// and on any theory of liability, whether in contract, strict liability,
38
// or tort (including negligence or otherwise) arising in any way out of
39
// the use of this software, even if advised of the possibility of such damage.
40
//
41
//M*/
42
43
#include "precomp.hpp"
44
#include "fisheye.hpp"
45
#include <limits>
46
47
namespace cv { namespace
48
{
49
struct JacobianRow
50
{
51
Vec2d df, dc;
52
Vec4d dk;
53
Vec3d dom, dT;
54
double dalpha;
55
};
56
57
void subMatrix(const Mat& src, Mat& dst, const std::vector<uchar>& cols, const std::vector<uchar>& rows);
58
}}
59
60
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
61
/// cv::fisheye::projectPoints
62
63
void cv::fisheye::projectPoints(InputArray objectPoints, OutputArray imagePoints, const Affine3d& affine,
64
InputArray K, InputArray D, double alpha, OutputArray jacobian)
65
{
66
CV_INSTRUMENT_REGION();
67
68
projectPoints(objectPoints, imagePoints, affine.rvec(), affine.translation(), K, D, alpha, jacobian);
69
}
70
71
void cv::fisheye::projectPoints(InputArray objectPoints, OutputArray imagePoints, InputArray _rvec,
72
InputArray _tvec, InputArray _K, InputArray _D, double alpha, OutputArray jacobian)
73
{
74
CV_INSTRUMENT_REGION();
75
76
// will support only 3-channel data now for points
77
CV_Assert(objectPoints.type() == CV_32FC3 || objectPoints.type() == CV_64FC3);
78
imagePoints.create(objectPoints.size(), CV_MAKETYPE(objectPoints.depth(), 2));
79
size_t n = objectPoints.total();
80
81
CV_Assert(_rvec.total() * _rvec.channels() == 3 && (_rvec.depth() == CV_32F || _rvec.depth() == CV_64F));
82
CV_Assert(_tvec.total() * _tvec.channels() == 3 && (_tvec.depth() == CV_32F || _tvec.depth() == CV_64F));
83
CV_Assert(_tvec.getMat().isContinuous() && _rvec.getMat().isContinuous());
84
85
Vec3d om = _rvec.depth() == CV_32F ? (Vec3d)*_rvec.getMat().ptr<Vec3f>() : *_rvec.getMat().ptr<Vec3d>();
86
Vec3d T = _tvec.depth() == CV_32F ? (Vec3d)*_tvec.getMat().ptr<Vec3f>() : *_tvec.getMat().ptr<Vec3d>();
87
88
CV_Assert(_K.size() == Size(3,3) && (_K.type() == CV_32F || _K.type() == CV_64F) && _D.type() == _K.type() && _D.total() == 4);
89
90
cv::Vec2d f, c;
91
if (_K.depth() == CV_32F)
92
{
93
94
Matx33f K = _K.getMat();
95
f = Vec2f(K(0, 0), K(1, 1));
96
c = Vec2f(K(0, 2), K(1, 2));
97
}
98
else
99
{
100
Matx33d K = _K.getMat();
101
f = Vec2d(K(0, 0), K(1, 1));
102
c = Vec2d(K(0, 2), K(1, 2));
103
}
104
105
Vec4d k = _D.depth() == CV_32F ? (Vec4d)*_D.getMat().ptr<Vec4f>(): *_D.getMat().ptr<Vec4d>();
106
107
JacobianRow *Jn = 0;
108
if (jacobian.needed())
109
{
110
int nvars = 2 + 2 + 1 + 4 + 3 + 3; // f, c, alpha, k, om, T,
111
jacobian.create(2*(int)n, nvars, CV_64F);
112
Jn = jacobian.getMat().ptr<JacobianRow>(0);
113
}
114
115
Matx33d R;
116
Matx<double, 3, 9> dRdom;
117
Rodrigues(om, R, dRdom);
118
Affine3d aff(om, T);
119
120
const Vec3f* Xf = objectPoints.getMat().ptr<Vec3f>();
121
const Vec3d* Xd = objectPoints.getMat().ptr<Vec3d>();
122
Vec2f *xpf = imagePoints.getMat().ptr<Vec2f>();
123
Vec2d *xpd = imagePoints.getMat().ptr<Vec2d>();
124
125
for(size_t i = 0; i < n; ++i)
126
{
127
Vec3d Xi = objectPoints.depth() == CV_32F ? (Vec3d)Xf[i] : Xd[i];
128
Vec3d Y = aff*Xi;
129
if (fabs(Y[2]) < DBL_MIN)
130
Y[2] = 1;
131
Vec2d x(Y[0]/Y[2], Y[1]/Y[2]);
132
133
double r2 = x.dot(x);
134
double r = std::sqrt(r2);
135
136
// Angle of the incoming ray:
137
double theta = atan(r);
138
139
double theta2 = theta*theta, theta3 = theta2*theta, theta4 = theta2*theta2, theta5 = theta4*theta,
140
theta6 = theta3*theta3, theta7 = theta6*theta, theta8 = theta4*theta4, theta9 = theta8*theta;
141
142
double theta_d = theta + k[0]*theta3 + k[1]*theta5 + k[2]*theta7 + k[3]*theta9;
143
144
double inv_r = r > 1e-8 ? 1.0/r : 1;
145
double cdist = r > 1e-8 ? theta_d * inv_r : 1;
146
147
Vec2d xd1 = x * cdist;
148
Vec2d xd3(xd1[0] + alpha*xd1[1], xd1[1]);
149
Vec2d final_point(xd3[0] * f[0] + c[0], xd3[1] * f[1] + c[1]);
150
151
if (objectPoints.depth() == CV_32F)
152
xpf[i] = final_point;
153
else
154
xpd[i] = final_point;
155
156
if (jacobian.needed())
157
{
158
//Vec3d Xi = pdepth == CV_32F ? (Vec3d)Xf[i] : Xd[i];
159
//Vec3d Y = aff*Xi;
160
double dYdR[] = { Xi[0], Xi[1], Xi[2], 0, 0, 0, 0, 0, 0,
161
0, 0, 0, Xi[0], Xi[1], Xi[2], 0, 0, 0,
162
0, 0, 0, 0, 0, 0, Xi[0], Xi[1], Xi[2] };
163
164
Matx33d dYdom_data = Matx<double, 3, 9>(dYdR) * dRdom.t();
165
const Vec3d *dYdom = (Vec3d*)dYdom_data.val;
166
167
Matx33d dYdT_data = Matx33d::eye();
168
const Vec3d *dYdT = (Vec3d*)dYdT_data.val;
169
170
//Vec2d x(Y[0]/Y[2], Y[1]/Y[2]);
171
Vec3d dxdom[2];
172
dxdom[0] = (1.0/Y[2]) * dYdom[0] - x[0]/Y[2] * dYdom[2];
173
dxdom[1] = (1.0/Y[2]) * dYdom[1] - x[1]/Y[2] * dYdom[2];
174
175
Vec3d dxdT[2];
176
dxdT[0] = (1.0/Y[2]) * dYdT[0] - x[0]/Y[2] * dYdT[2];
177
dxdT[1] = (1.0/Y[2]) * dYdT[1] - x[1]/Y[2] * dYdT[2];
178
179
//double r2 = x.dot(x);
180
Vec3d dr2dom = 2 * x[0] * dxdom[0] + 2 * x[1] * dxdom[1];
181
Vec3d dr2dT = 2 * x[0] * dxdT[0] + 2 * x[1] * dxdT[1];
182
183
//double r = std::sqrt(r2);
184
double drdr2 = r > 1e-8 ? 1.0/(2*r) : 1;
185
Vec3d drdom = drdr2 * dr2dom;
186
Vec3d drdT = drdr2 * dr2dT;
187
188
// Angle of the incoming ray:
189
//double theta = atan(r);
190
double dthetadr = 1.0/(1+r2);
191
Vec3d dthetadom = dthetadr * drdom;
192
Vec3d dthetadT = dthetadr * drdT;
193
194
//double theta_d = theta + k[0]*theta3 + k[1]*theta5 + k[2]*theta7 + k[3]*theta9;
195
double dtheta_ddtheta = 1 + 3*k[0]*theta2 + 5*k[1]*theta4 + 7*k[2]*theta6 + 9*k[3]*theta8;
196
Vec3d dtheta_ddom = dtheta_ddtheta * dthetadom;
197
Vec3d dtheta_ddT = dtheta_ddtheta * dthetadT;
198
Vec4d dtheta_ddk = Vec4d(theta3, theta5, theta7, theta9);
199
200
//double inv_r = r > 1e-8 ? 1.0/r : 1;
201
//double cdist = r > 1e-8 ? theta_d / r : 1;
202
Vec3d dcdistdom = inv_r * (dtheta_ddom - cdist*drdom);
203
Vec3d dcdistdT = inv_r * (dtheta_ddT - cdist*drdT);
204
Vec4d dcdistdk = inv_r * dtheta_ddk;
205
206
//Vec2d xd1 = x * cdist;
207
Vec4d dxd1dk[2];
208
Vec3d dxd1dom[2], dxd1dT[2];
209
dxd1dom[0] = x[0] * dcdistdom + cdist * dxdom[0];
210
dxd1dom[1] = x[1] * dcdistdom + cdist * dxdom[1];
211
dxd1dT[0] = x[0] * dcdistdT + cdist * dxdT[0];
212
dxd1dT[1] = x[1] * dcdistdT + cdist * dxdT[1];
213
dxd1dk[0] = x[0] * dcdistdk;
214
dxd1dk[1] = x[1] * dcdistdk;
215
216
//Vec2d xd3(xd1[0] + alpha*xd1[1], xd1[1]);
217
Vec4d dxd3dk[2];
218
Vec3d dxd3dom[2], dxd3dT[2];
219
dxd3dom[0] = dxd1dom[0] + alpha * dxd1dom[1];
220
dxd3dom[1] = dxd1dom[1];
221
dxd3dT[0] = dxd1dT[0] + alpha * dxd1dT[1];
222
dxd3dT[1] = dxd1dT[1];
223
dxd3dk[0] = dxd1dk[0] + alpha * dxd1dk[1];
224
dxd3dk[1] = dxd1dk[1];
225
226
Vec2d dxd3dalpha(xd1[1], 0);
227
228
//final jacobian
229
Jn[0].dom = f[0] * dxd3dom[0];
230
Jn[1].dom = f[1] * dxd3dom[1];
231
232
Jn[0].dT = f[0] * dxd3dT[0];
233
Jn[1].dT = f[1] * dxd3dT[1];
234
235
Jn[0].dk = f[0] * dxd3dk[0];
236
Jn[1].dk = f[1] * dxd3dk[1];
237
238
Jn[0].dalpha = f[0] * dxd3dalpha[0];
239
Jn[1].dalpha = 0; //f[1] * dxd3dalpha[1];
240
241
Jn[0].df = Vec2d(xd3[0], 0);
242
Jn[1].df = Vec2d(0, xd3[1]);
243
244
Jn[0].dc = Vec2d(1, 0);
245
Jn[1].dc = Vec2d(0, 1);
246
247
//step to jacobian rows for next point
248
Jn += 2;
249
}
250
}
251
}
252
253
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
254
/// cv::fisheye::distortPoints
255
256
void cv::fisheye::distortPoints(InputArray undistorted, OutputArray distorted, InputArray K, InputArray D, double alpha)
257
{
258
CV_INSTRUMENT_REGION();
259
260
// will support only 2-channel data now for points
261
CV_Assert(undistorted.type() == CV_32FC2 || undistorted.type() == CV_64FC2);
262
distorted.create(undistorted.size(), undistorted.type());
263
size_t n = undistorted.total();
264
265
CV_Assert(K.size() == Size(3,3) && (K.type() == CV_32F || K.type() == CV_64F) && D.total() == 4);
266
267
cv::Vec2d f, c;
268
if (K.depth() == CV_32F)
269
{
270
Matx33f camMat = K.getMat();
271
f = Vec2f(camMat(0, 0), camMat(1, 1));
272
c = Vec2f(camMat(0, 2), camMat(1, 2));
273
}
274
else
275
{
276
Matx33d camMat = K.getMat();
277
f = Vec2d(camMat(0, 0), camMat(1, 1));
278
c = Vec2d(camMat(0 ,2), camMat(1, 2));
279
}
280
281
Vec4d k = D.depth() == CV_32F ? (Vec4d)*D.getMat().ptr<Vec4f>(): *D.getMat().ptr<Vec4d>();
282
283
const Vec2f* Xf = undistorted.getMat().ptr<Vec2f>();
284
const Vec2d* Xd = undistorted.getMat().ptr<Vec2d>();
285
Vec2f *xpf = distorted.getMat().ptr<Vec2f>();
286
Vec2d *xpd = distorted.getMat().ptr<Vec2d>();
287
288
for(size_t i = 0; i < n; ++i)
289
{
290
Vec2d x = undistorted.depth() == CV_32F ? (Vec2d)Xf[i] : Xd[i];
291
292
double r2 = x.dot(x);
293
double r = std::sqrt(r2);
294
295
// Angle of the incoming ray:
296
double theta = atan(r);
297
298
double theta2 = theta*theta, theta3 = theta2*theta, theta4 = theta2*theta2, theta5 = theta4*theta,
299
theta6 = theta3*theta3, theta7 = theta6*theta, theta8 = theta4*theta4, theta9 = theta8*theta;
300
301
double theta_d = theta + k[0]*theta3 + k[1]*theta5 + k[2]*theta7 + k[3]*theta9;
302
303
double inv_r = r > 1e-8 ? 1.0/r : 1;
304
double cdist = r > 1e-8 ? theta_d * inv_r : 1;
305
306
Vec2d xd1 = x * cdist;
307
Vec2d xd3(xd1[0] + alpha*xd1[1], xd1[1]);
308
Vec2d final_point(xd3[0] * f[0] + c[0], xd3[1] * f[1] + c[1]);
309
310
if (undistorted.depth() == CV_32F)
311
xpf[i] = final_point;
312
else
313
xpd[i] = final_point;
314
}
315
}
316
317
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
318
/// cv::fisheye::undistortPoints
319
320
void cv::fisheye::undistortPoints( InputArray distorted, OutputArray undistorted, InputArray K, InputArray D, InputArray R, InputArray P)
321
{
322
CV_INSTRUMENT_REGION();
323
324
// will support only 2-channel data now for points
325
CV_Assert(distorted.type() == CV_32FC2 || distorted.type() == CV_64FC2);
326
undistorted.create(distorted.size(), distorted.type());
327
328
CV_Assert(P.empty() || P.size() == Size(3, 3) || P.size() == Size(4, 3));
329
CV_Assert(R.empty() || R.size() == Size(3, 3) || R.total() * R.channels() == 3);
330
CV_Assert(D.total() == 4 && K.size() == Size(3, 3) && (K.depth() == CV_32F || K.depth() == CV_64F));
331
332
cv::Vec2d f, c;
333
if (K.depth() == CV_32F)
334
{
335
Matx33f camMat = K.getMat();
336
f = Vec2f(camMat(0, 0), camMat(1, 1));
337
c = Vec2f(camMat(0, 2), camMat(1, 2));
338
}
339
else
340
{
341
Matx33d camMat = K.getMat();
342
f = Vec2d(camMat(0, 0), camMat(1, 1));
343
c = Vec2d(camMat(0, 2), camMat(1, 2));
344
}
345
346
Vec4d k = D.depth() == CV_32F ? (Vec4d)*D.getMat().ptr<Vec4f>(): *D.getMat().ptr<Vec4d>();
347
348
cv::Matx33d RR = cv::Matx33d::eye();
349
if (!R.empty() && R.total() * R.channels() == 3)
350
{
351
cv::Vec3d rvec;
352
R.getMat().convertTo(rvec, CV_64F);
353
RR = cv::Affine3d(rvec).rotation();
354
}
355
else if (!R.empty() && R.size() == Size(3, 3))
356
R.getMat().convertTo(RR, CV_64F);
357
358
if(!P.empty())
359
{
360
cv::Matx33d PP;
361
P.getMat().colRange(0, 3).convertTo(PP, CV_64F);
362
RR = PP * RR;
363
}
364
365
// start undistorting
366
const cv::Vec2f* srcf = distorted.getMat().ptr<cv::Vec2f>();
367
const cv::Vec2d* srcd = distorted.getMat().ptr<cv::Vec2d>();
368
cv::Vec2f* dstf = undistorted.getMat().ptr<cv::Vec2f>();
369
cv::Vec2d* dstd = undistorted.getMat().ptr<cv::Vec2d>();
370
371
size_t n = distorted.total();
372
int sdepth = distorted.depth();
373
374
for(size_t i = 0; i < n; i++ )
375
{
376
Vec2d pi = sdepth == CV_32F ? (Vec2d)srcf[i] : srcd[i]; // image point
377
Vec2d pw((pi[0] - c[0])/f[0], (pi[1] - c[1])/f[1]); // world point
378
379
double scale = 1.0;
380
381
double theta_d = sqrt(pw[0]*pw[0] + pw[1]*pw[1]);
382
383
// the current camera model is only valid up to 180 FOV
384
// for larger FOV the loop below does not converge
385
// clip values so we still get plausible results for super fisheye images > 180 grad
386
theta_d = min(max(-CV_PI/2., theta_d), CV_PI/2.);
387
388
if (theta_d > 1e-8)
389
{
390
// compensate distortion iteratively
391
double theta = theta_d;
392
393
const double EPS = 1e-8; // or std::numeric_limits<double>::epsilon();
394
for (int j = 0; j < 10; j++)
395
{
396
double theta2 = theta*theta, theta4 = theta2*theta2, theta6 = theta4*theta2, theta8 = theta6*theta2;
397
double k0_theta2 = k[0] * theta2, k1_theta4 = k[1] * theta4, k2_theta6 = k[2] * theta6, k3_theta8 = k[3] * theta8;
398
/* new_theta = theta - theta_fix, theta_fix = f0(theta) / f0'(theta) */
399
double theta_fix = (theta * (1 + k0_theta2 + k1_theta4 + k2_theta6 + k3_theta8) - theta_d) /
400
(1 + 3*k0_theta2 + 5*k1_theta4 + 7*k2_theta6 + 9*k3_theta8);
401
theta = theta - theta_fix;
402
if (fabs(theta_fix) < EPS)
403
break;
404
}
405
406
scale = std::tan(theta) / theta_d;
407
}
408
409
Vec2d pu = pw * scale; //undistorted point
410
411
// reproject
412
Vec3d pr = RR * Vec3d(pu[0], pu[1], 1.0); // rotated point optionally multiplied by new camera matrix
413
Vec2d fi(pr[0]/pr[2], pr[1]/pr[2]); // final
414
415
if( sdepth == CV_32F )
416
dstf[i] = fi;
417
else
418
dstd[i] = fi;
419
}
420
}
421
422
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
423
/// cv::fisheye::undistortPoints
424
425
void cv::fisheye::initUndistortRectifyMap( InputArray K, InputArray D, InputArray R, InputArray P,
426
const cv::Size& size, int m1type, OutputArray map1, OutputArray map2 )
427
{
428
CV_INSTRUMENT_REGION();
429
430
CV_Assert( m1type == CV_16SC2 || m1type == CV_32F || m1type <=0 );
431
map1.create( size, m1type <= 0 ? CV_16SC2 : m1type );
432
map2.create( size, map1.type() == CV_16SC2 ? CV_16UC1 : CV_32F );
433
434
CV_Assert((K.depth() == CV_32F || K.depth() == CV_64F) && (D.depth() == CV_32F || D.depth() == CV_64F));
435
CV_Assert((P.empty() || P.depth() == CV_32F || P.depth() == CV_64F) && (R.empty() || R.depth() == CV_32F || R.depth() == CV_64F));
436
CV_Assert(K.size() == Size(3, 3) && (D.empty() || D.total() == 4));
437
CV_Assert(R.empty() || R.size() == Size(3, 3) || R.total() * R.channels() == 3);
438
CV_Assert(P.empty() || P.size() == Size(3, 3) || P.size() == Size(4, 3));
439
440
cv::Vec2d f, c;
441
if (K.depth() == CV_32F)
442
{
443
Matx33f camMat = K.getMat();
444
f = Vec2f(camMat(0, 0), camMat(1, 1));
445
c = Vec2f(camMat(0, 2), camMat(1, 2));
446
}
447
else
448
{
449
Matx33d camMat = K.getMat();
450
f = Vec2d(camMat(0, 0), camMat(1, 1));
451
c = Vec2d(camMat(0, 2), camMat(1, 2));
452
}
453
454
Vec4d k = Vec4d::all(0);
455
if (!D.empty())
456
k = D.depth() == CV_32F ? (Vec4d)*D.getMat().ptr<Vec4f>(): *D.getMat().ptr<Vec4d>();
457
458
cv::Matx33d RR = cv::Matx33d::eye();
459
if (!R.empty() && R.total() * R.channels() == 3)
460
{
461
cv::Vec3d rvec;
462
R.getMat().convertTo(rvec, CV_64F);
463
RR = Affine3d(rvec).rotation();
464
}
465
else if (!R.empty() && R.size() == Size(3, 3))
466
R.getMat().convertTo(RR, CV_64F);
467
468
cv::Matx33d PP = cv::Matx33d::eye();
469
if (!P.empty())
470
P.getMat().colRange(0, 3).convertTo(PP, CV_64F);
471
472
cv::Matx33d iR = (PP * RR).inv(cv::DECOMP_SVD);
473
474
for( int i = 0; i < size.height; ++i)
475
{
476
float* m1f = map1.getMat().ptr<float>(i);
477
float* m2f = map2.getMat().ptr<float>(i);
478
short* m1 = (short*)m1f;
479
ushort* m2 = (ushort*)m2f;
480
481
double _x = i*iR(0, 1) + iR(0, 2),
482
_y = i*iR(1, 1) + iR(1, 2),
483
_w = i*iR(2, 1) + iR(2, 2);
484
485
for( int j = 0; j < size.width; ++j)
486
{
487
double u, v;
488
if( _w <= 0)
489
{
490
u = (_x > 0) ? -std::numeric_limits<double>::infinity() : std::numeric_limits<double>::infinity();
491
v = (_y > 0) ? -std::numeric_limits<double>::infinity() : std::numeric_limits<double>::infinity();
492
}
493
else
494
{
495
double x = _x/_w, y = _y/_w;
496
497
double r = sqrt(x*x + y*y);
498
double theta = atan(r);
499
500
double theta2 = theta*theta, theta4 = theta2*theta2, theta6 = theta4*theta2, theta8 = theta4*theta4;
501
double theta_d = theta * (1 + k[0]*theta2 + k[1]*theta4 + k[2]*theta6 + k[3]*theta8);
502
503
double scale = (r == 0) ? 1.0 : theta_d / r;
504
u = f[0]*x*scale + c[0];
505
v = f[1]*y*scale + c[1];
506
}
507
508
if( m1type == CV_16SC2 )
509
{
510
int iu = cv::saturate_cast<int>(u*cv::INTER_TAB_SIZE);
511
int iv = cv::saturate_cast<int>(v*cv::INTER_TAB_SIZE);
512
m1[j*2+0] = (short)(iu >> cv::INTER_BITS);
513
m1[j*2+1] = (short)(iv >> cv::INTER_BITS);
514
m2[j] = (ushort)((iv & (cv::INTER_TAB_SIZE-1))*cv::INTER_TAB_SIZE + (iu & (cv::INTER_TAB_SIZE-1)));
515
}
516
else if( m1type == CV_32FC1 )
517
{
518
m1f[j] = (float)u;
519
m2f[j] = (float)v;
520
}
521
522
_x += iR(0, 0);
523
_y += iR(1, 0);
524
_w += iR(2, 0);
525
}
526
}
527
}
528
529
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
530
/// cv::fisheye::undistortImage
531
532
void cv::fisheye::undistortImage(InputArray distorted, OutputArray undistorted,
533
InputArray K, InputArray D, InputArray Knew, const Size& new_size)
534
{
535
CV_INSTRUMENT_REGION();
536
537
Size size = !new_size.empty() ? new_size : distorted.size();
538
539
cv::Mat map1, map2;
540
fisheye::initUndistortRectifyMap(K, D, cv::Matx33d::eye(), Knew, size, CV_16SC2, map1, map2 );
541
cv::remap(distorted, undistorted, map1, map2, INTER_LINEAR, BORDER_CONSTANT);
542
}
543
544
545
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
546
/// cv::fisheye::estimateNewCameraMatrixForUndistortRectify
547
548
void cv::fisheye::estimateNewCameraMatrixForUndistortRectify(InputArray K, InputArray D, const Size &image_size, InputArray R,
549
OutputArray P, double balance, const Size& new_size, double fov_scale)
550
{
551
CV_INSTRUMENT_REGION();
552
553
CV_Assert( K.size() == Size(3, 3) && (K.depth() == CV_32F || K.depth() == CV_64F));
554
CV_Assert(D.empty() || ((D.total() == 4) && (D.depth() == CV_32F || D.depth() == CV_64F)));
555
556
int w = image_size.width, h = image_size.height;
557
balance = std::min(std::max(balance, 0.0), 1.0);
558
559
cv::Mat points(1, 4, CV_64FC2);
560
Vec2d* pptr = points.ptr<Vec2d>();
561
pptr[0] = Vec2d(w/2, 0);
562
pptr[1] = Vec2d(w, h/2);
563
pptr[2] = Vec2d(w/2, h);
564
pptr[3] = Vec2d(0, h/2);
565
566
fisheye::undistortPoints(points, points, K, D, R);
567
cv::Scalar center_mass = mean(points);
568
cv::Vec2d cn(center_mass.val);
569
570
double aspect_ratio = (K.depth() == CV_32F) ? K.getMat().at<float >(0,0)/K.getMat().at<float> (1,1)
571
: K.getMat().at<double>(0,0)/K.getMat().at<double>(1,1);
572
573
// convert to identity ratio
574
cn[0] *= aspect_ratio;
575
for(size_t i = 0; i < points.total(); ++i)
576
pptr[i][1] *= aspect_ratio;
577
578
double minx = DBL_MAX, miny = DBL_MAX, maxx = -DBL_MAX, maxy = -DBL_MAX;
579
for(size_t i = 0; i < points.total(); ++i)
580
{
581
miny = std::min(miny, pptr[i][1]);
582
maxy = std::max(maxy, pptr[i][1]);
583
minx = std::min(minx, pptr[i][0]);
584
maxx = std::max(maxx, pptr[i][0]);
585
}
586
587
double f1 = w * 0.5/(cn[0] - minx);
588
double f2 = w * 0.5/(maxx - cn[0]);
589
double f3 = h * 0.5 * aspect_ratio/(cn[1] - miny);
590
double f4 = h * 0.5 * aspect_ratio/(maxy - cn[1]);
591
592
double fmin = std::min(f1, std::min(f2, std::min(f3, f4)));
593
double fmax = std::max(f1, std::max(f2, std::max(f3, f4)));
594
595
double f = balance * fmin + (1.0 - balance) * fmax;
596
f *= fov_scale > 0 ? 1.0/fov_scale : 1.0;
597
598
cv::Vec2d new_f(f, f), new_c = -cn * f + Vec2d(w, h * aspect_ratio) * 0.5;
599
600
// restore aspect ratio
601
new_f[1] /= aspect_ratio;
602
new_c[1] /= aspect_ratio;
603
604
if (!new_size.empty())
605
{
606
double rx = new_size.width /(double)image_size.width;
607
double ry = new_size.height/(double)image_size.height;
608
609
new_f[0] *= rx; new_f[1] *= ry;
610
new_c[0] *= rx; new_c[1] *= ry;
611
}
612
613
Mat(Matx33d(new_f[0], 0, new_c[0],
614
0, new_f[1], new_c[1],
615
0, 0, 1)).convertTo(P, P.empty() ? K.type() : P.type());
616
}
617
618
619
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
620
/// cv::fisheye::stereoRectify
621
622
void cv::fisheye::stereoRectify( InputArray K1, InputArray D1, InputArray K2, InputArray D2, const Size& imageSize,
623
InputArray _R, InputArray _tvec, OutputArray R1, OutputArray R2, OutputArray P1, OutputArray P2,
624
OutputArray Q, int flags, const Size& newImageSize, double balance, double fov_scale)
625
{
626
CV_INSTRUMENT_REGION();
627
628
CV_Assert((_R.size() == Size(3, 3) || _R.total() * _R.channels() == 3) && (_R.depth() == CV_32F || _R.depth() == CV_64F));
629
CV_Assert(_tvec.total() * _tvec.channels() == 3 && (_tvec.depth() == CV_32F || _tvec.depth() == CV_64F));
630
631
632
cv::Mat aaa = _tvec.getMat().reshape(3, 1);
633
634
Vec3d rvec; // Rodrigues vector
635
if (_R.size() == Size(3, 3))
636
{
637
cv::Matx33d rmat;
638
_R.getMat().convertTo(rmat, CV_64F);
639
rvec = Affine3d(rmat).rvec();
640
}
641
else if (_R.total() * _R.channels() == 3)
642
_R.getMat().convertTo(rvec, CV_64F);
643
644
Vec3d tvec;
645
_tvec.getMat().convertTo(tvec, CV_64F);
646
647
// rectification algorithm
648
rvec *= -0.5; // get average rotation
649
650
Matx33d r_r;
651
Rodrigues(rvec, r_r); // rotate cameras to same orientation by averaging
652
653
Vec3d t = r_r * tvec;
654
Vec3d uu(t[0] > 0 ? 1 : -1, 0, 0);
655
656
// calculate global Z rotation
657
Vec3d ww = t.cross(uu);
658
double nw = norm(ww);
659
if (nw > 0.0)
660
ww *= acos(fabs(t[0])/cv::norm(t))/nw;
661
662
Matx33d wr;
663
Rodrigues(ww, wr);
664
665
// apply to both views
666
Matx33d ri1 = wr * r_r.t();
667
Mat(ri1, false).convertTo(R1, R1.empty() ? CV_64F : R1.type());
668
Matx33d ri2 = wr * r_r;
669
Mat(ri2, false).convertTo(R2, R2.empty() ? CV_64F : R2.type());
670
Vec3d tnew = ri2 * tvec;
671
672
// calculate projection/camera matrices. these contain the relevant rectified image internal params (fx, fy=fx, cx, cy)
673
Matx33d newK1, newK2;
674
estimateNewCameraMatrixForUndistortRectify(K1, D1, imageSize, R1, newK1, balance, newImageSize, fov_scale);
675
estimateNewCameraMatrixForUndistortRectify(K2, D2, imageSize, R2, newK2, balance, newImageSize, fov_scale);
676
677
double fc_new = std::min(newK1(1,1), newK2(1,1));
678
Point2d cc_new[2] = { Vec2d(newK1(0, 2), newK1(1, 2)), Vec2d(newK2(0, 2), newK2(1, 2)) };
679
680
// Vertical focal length must be the same for both images to keep the epipolar constraint use fy for fx also.
681
// For simplicity, set the principal points for both cameras to be the average
682
// of the two principal points (either one of or both x- and y- coordinates)
683
if( flags & cv::CALIB_ZERO_DISPARITY )
684
cc_new[0] = cc_new[1] = (cc_new[0] + cc_new[1]) * 0.5;
685
else
686
cc_new[0].y = cc_new[1].y = (cc_new[0].y + cc_new[1].y)*0.5;
687
688
Mat(Matx34d(fc_new, 0, cc_new[0].x, 0,
689
0, fc_new, cc_new[0].y, 0,
690
0, 0, 1, 0), false).convertTo(P1, P1.empty() ? CV_64F : P1.type());
691
692
Mat(Matx34d(fc_new, 0, cc_new[1].x, tnew[0]*fc_new, // baseline * focal length;,
693
0, fc_new, cc_new[1].y, 0,
694
0, 0, 1, 0), false).convertTo(P2, P2.empty() ? CV_64F : P2.type());
695
696
if (Q.needed())
697
Mat(Matx44d(1, 0, 0, -cc_new[0].x,
698
0, 1, 0, -cc_new[0].y,
699
0, 0, 0, fc_new,
700
0, 0, -1./tnew[0], (cc_new[0].x - cc_new[1].x)/tnew[0]), false).convertTo(Q, Q.empty() ? CV_64F : Q.depth());
701
}
702
703
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
704
/// cv::fisheye::calibrate
705
706
double cv::fisheye::calibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, const Size& image_size,
707
InputOutputArray K, InputOutputArray D, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
708
int flags , cv::TermCriteria criteria)
709
{
710
CV_INSTRUMENT_REGION();
711
712
CV_Assert(!objectPoints.empty() && !imagePoints.empty() && objectPoints.total() == imagePoints.total());
713
CV_Assert(objectPoints.type() == CV_32FC3 || objectPoints.type() == CV_64FC3);
714
CV_Assert(imagePoints.type() == CV_32FC2 || imagePoints.type() == CV_64FC2);
715
CV_Assert(K.empty() || (K.size() == Size(3,3)));
716
CV_Assert(D.empty() || (D.total() == 4));
717
CV_Assert(rvecs.empty() || (rvecs.channels() == 3));
718
CV_Assert(tvecs.empty() || (tvecs.channels() == 3));
719
720
CV_Assert((!K.empty() && !D.empty()) || !(flags & CALIB_USE_INTRINSIC_GUESS));
721
722
using namespace cv::internal;
723
//-------------------------------Initialization
724
IntrinsicParams finalParam;
725
IntrinsicParams currentParam;
726
IntrinsicParams errors;
727
728
finalParam.isEstimate[0] = 1;
729
finalParam.isEstimate[1] = 1;
730
finalParam.isEstimate[2] = flags & CALIB_FIX_PRINCIPAL_POINT ? 0 : 1;
731
finalParam.isEstimate[3] = flags & CALIB_FIX_PRINCIPAL_POINT ? 0 : 1;
732
finalParam.isEstimate[4] = flags & CALIB_FIX_SKEW ? 0 : 1;
733
finalParam.isEstimate[5] = flags & CALIB_FIX_K1 ? 0 : 1;
734
finalParam.isEstimate[6] = flags & CALIB_FIX_K2 ? 0 : 1;
735
finalParam.isEstimate[7] = flags & CALIB_FIX_K3 ? 0 : 1;
736
finalParam.isEstimate[8] = flags & CALIB_FIX_K4 ? 0 : 1;
737
738
const int recompute_extrinsic = flags & CALIB_RECOMPUTE_EXTRINSIC ? 1: 0;
739
const int check_cond = flags & CALIB_CHECK_COND ? 1 : 0;
740
741
const double alpha_smooth = 0.4;
742
const double thresh_cond = 1e6;
743
double change = 1;
744
Vec2d err_std;
745
746
Matx33d _K;
747
Vec4d _D;
748
if (flags & CALIB_USE_INTRINSIC_GUESS)
749
{
750
K.getMat().convertTo(_K, CV_64FC1);
751
D.getMat().convertTo(_D, CV_64FC1);
752
finalParam.Init(Vec2d(_K(0,0), _K(1, 1)),
753
Vec2d(_K(0,2), _K(1, 2)),
754
Vec4d(flags & CALIB_FIX_K1 ? 0 : _D[0],
755
flags & CALIB_FIX_K2 ? 0 : _D[1],
756
flags & CALIB_FIX_K3 ? 0 : _D[2],
757
flags & CALIB_FIX_K4 ? 0 : _D[3]),
758
_K(0, 1) / _K(0, 0));
759
}
760
else
761
{
762
finalParam.Init(Vec2d(max(image_size.width, image_size.height) / CV_PI, max(image_size.width, image_size.height) / CV_PI),
763
Vec2d(image_size.width / 2.0 - 0.5, image_size.height / 2.0 - 0.5));
764
}
765
766
errors.isEstimate = finalParam.isEstimate;
767
768
std::vector<Vec3d> omc(objectPoints.total()), Tc(objectPoints.total());
769
770
CalibrateExtrinsics(objectPoints, imagePoints, finalParam, check_cond, thresh_cond, omc, Tc);
771
772
773
//-------------------------------Optimization
774
for(int iter = 0; iter < std::numeric_limits<int>::max(); ++iter)
775
{
776
if ((criteria.type == 1 && iter >= criteria.maxCount) ||
777
(criteria.type == 2 && change <= criteria.epsilon) ||
778
(criteria.type == 3 && (change <= criteria.epsilon || iter >= criteria.maxCount)))
779
break;
780
781
double alpha_smooth2 = 1 - std::pow(1 - alpha_smooth, iter + 1.0);
782
783
Mat JJ2, ex3;
784
ComputeJacobians(objectPoints, imagePoints, finalParam, omc, Tc, check_cond,thresh_cond, JJ2, ex3);
785
786
Mat G;
787
solve(JJ2, ex3, G);
788
currentParam = finalParam + alpha_smooth2*G;
789
790
change = norm(Vec4d(currentParam.f[0], currentParam.f[1], currentParam.c[0], currentParam.c[1]) -
791
Vec4d(finalParam.f[0], finalParam.f[1], finalParam.c[0], finalParam.c[1]))
792
/ norm(Vec4d(currentParam.f[0], currentParam.f[1], currentParam.c[0], currentParam.c[1]));
793
794
finalParam = currentParam;
795
796
if (recompute_extrinsic)
797
{
798
CalibrateExtrinsics(objectPoints, imagePoints, finalParam, check_cond,
799
thresh_cond, omc, Tc);
800
}
801
}
802
803
//-------------------------------Validation
804
double rms;
805
EstimateUncertainties(objectPoints, imagePoints, finalParam, omc, Tc, errors, err_std, thresh_cond,
806
check_cond, rms);
807
808
//-------------------------------
809
_K = Matx33d(finalParam.f[0], finalParam.f[0] * finalParam.alpha, finalParam.c[0],
810
0, finalParam.f[1], finalParam.c[1],
811
0, 0, 1);
812
813
if (K.needed()) cv::Mat(_K).convertTo(K, K.empty() ? CV_64FC1 : K.type());
814
if (D.needed()) cv::Mat(finalParam.k).convertTo(D, D.empty() ? CV_64FC1 : D.type());
815
if (rvecs.isMatVector())
816
{
817
int N = (int)objectPoints.total();
818
819
if(rvecs.empty())
820
rvecs.create(N, 1, CV_64FC3);
821
822
if(tvecs.empty())
823
tvecs.create(N, 1, CV_64FC3);
824
825
for(int i = 0; i < N; i++ )
826
{
827
rvecs.create(3, 1, CV_64F, i, true);
828
tvecs.create(3, 1, CV_64F, i, true);
829
memcpy(rvecs.getMat(i).ptr(), omc[i].val, sizeof(Vec3d));
830
memcpy(tvecs.getMat(i).ptr(), Tc[i].val, sizeof(Vec3d));
831
}
832
}
833
else
834
{
835
if (rvecs.needed()) cv::Mat(omc).convertTo(rvecs, rvecs.empty() ? CV_64FC3 : rvecs.type());
836
if (tvecs.needed()) cv::Mat(Tc).convertTo(tvecs, tvecs.empty() ? CV_64FC3 : tvecs.type());
837
}
838
839
return rms;
840
}
841
842
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
843
/// cv::fisheye::stereoCalibrate
844
845
double cv::fisheye::stereoCalibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,
846
InputOutputArray K1, InputOutputArray D1, InputOutputArray K2, InputOutputArray D2, Size imageSize,
847
OutputArray R, OutputArray T, int flags, TermCriteria criteria)
848
{
849
CV_INSTRUMENT_REGION();
850
851
CV_Assert(!objectPoints.empty() && !imagePoints1.empty() && !imagePoints2.empty());
852
CV_Assert(objectPoints.total() == imagePoints1.total() || imagePoints1.total() == imagePoints2.total());
853
CV_Assert(objectPoints.type() == CV_32FC3 || objectPoints.type() == CV_64FC3);
854
CV_Assert(imagePoints1.type() == CV_32FC2 || imagePoints1.type() == CV_64FC2);
855
CV_Assert(imagePoints2.type() == CV_32FC2 || imagePoints2.type() == CV_64FC2);
856
857
CV_Assert(K1.empty() || (K1.size() == Size(3,3)));
858
CV_Assert(D1.empty() || (D1.total() == 4));
859
CV_Assert(K2.empty() || (K1.size() == Size(3,3)));
860
CV_Assert(D2.empty() || (D1.total() == 4));
861
862
CV_Assert((!K1.empty() && !K2.empty() && !D1.empty() && !D2.empty()) || !(flags & CALIB_FIX_INTRINSIC));
863
864
//-------------------------------Initialization
865
866
const int threshold = 50;
867
const double thresh_cond = 1e6;
868
const int check_cond = 1;
869
870
int n_points = (int)objectPoints.getMat(0).total();
871
int n_images = (int)objectPoints.total();
872
873
double change = 1;
874
875
cv::internal::IntrinsicParams intrinsicLeft;
876
cv::internal::IntrinsicParams intrinsicRight;
877
878
cv::internal::IntrinsicParams intrinsicLeft_errors;
879
cv::internal::IntrinsicParams intrinsicRight_errors;
880
881
Matx33d _K1, _K2;
882
Vec4d _D1, _D2;
883
if (!K1.empty()) K1.getMat().convertTo(_K1, CV_64FC1);
884
if (!D1.empty()) D1.getMat().convertTo(_D1, CV_64FC1);
885
if (!K2.empty()) K2.getMat().convertTo(_K2, CV_64FC1);
886
if (!D2.empty()) D2.getMat().convertTo(_D2, CV_64FC1);
887
888
std::vector<Vec3d> rvecs1(n_images), tvecs1(n_images), rvecs2(n_images), tvecs2(n_images);
889
890
if (!(flags & CALIB_FIX_INTRINSIC))
891
{
892
calibrate(objectPoints, imagePoints1, imageSize, _K1, _D1, rvecs1, tvecs1, flags, TermCriteria(3, 20, 1e-6));
893
calibrate(objectPoints, imagePoints2, imageSize, _K2, _D2, rvecs2, tvecs2, flags, TermCriteria(3, 20, 1e-6));
894
}
895
896
intrinsicLeft.Init(Vec2d(_K1(0,0), _K1(1, 1)), Vec2d(_K1(0,2), _K1(1, 2)),
897
Vec4d(_D1[0], _D1[1], _D1[2], _D1[3]), _K1(0, 1) / _K1(0, 0));
898
899
intrinsicRight.Init(Vec2d(_K2(0,0), _K2(1, 1)), Vec2d(_K2(0,2), _K2(1, 2)),
900
Vec4d(_D2[0], _D2[1], _D2[2], _D2[3]), _K2(0, 1) / _K2(0, 0));
901
902
if ((flags & CALIB_FIX_INTRINSIC))
903
{
904
cv::internal::CalibrateExtrinsics(objectPoints, imagePoints1, intrinsicLeft, check_cond, thresh_cond, rvecs1, tvecs1);
905
cv::internal::CalibrateExtrinsics(objectPoints, imagePoints2, intrinsicRight, check_cond, thresh_cond, rvecs2, tvecs2);
906
}
907
908
intrinsicLeft.isEstimate[0] = flags & CALIB_FIX_INTRINSIC ? 0 : 1;
909
intrinsicLeft.isEstimate[1] = flags & CALIB_FIX_INTRINSIC ? 0 : 1;
910
intrinsicLeft.isEstimate[2] = flags & CALIB_FIX_INTRINSIC ? 0 : 1;
911
intrinsicLeft.isEstimate[3] = flags & CALIB_FIX_INTRINSIC ? 0 : 1;
912
intrinsicLeft.isEstimate[4] = flags & (CALIB_FIX_SKEW | CALIB_FIX_INTRINSIC) ? 0 : 1;
913
intrinsicLeft.isEstimate[5] = flags & (CALIB_FIX_K1 | CALIB_FIX_INTRINSIC) ? 0 : 1;
914
intrinsicLeft.isEstimate[6] = flags & (CALIB_FIX_K2 | CALIB_FIX_INTRINSIC) ? 0 : 1;
915
intrinsicLeft.isEstimate[7] = flags & (CALIB_FIX_K3 | CALIB_FIX_INTRINSIC) ? 0 : 1;
916
intrinsicLeft.isEstimate[8] = flags & (CALIB_FIX_K4 | CALIB_FIX_INTRINSIC) ? 0 : 1;
917
918
intrinsicRight.isEstimate[0] = flags & CALIB_FIX_INTRINSIC ? 0 : 1;
919
intrinsicRight.isEstimate[1] = flags & CALIB_FIX_INTRINSIC ? 0 : 1;
920
intrinsicRight.isEstimate[2] = flags & CALIB_FIX_INTRINSIC ? 0 : 1;
921
intrinsicRight.isEstimate[3] = flags & CALIB_FIX_INTRINSIC ? 0 : 1;
922
intrinsicRight.isEstimate[4] = flags & (CALIB_FIX_SKEW | CALIB_FIX_INTRINSIC) ? 0 : 1;
923
intrinsicRight.isEstimate[5] = flags & (CALIB_FIX_K1 | CALIB_FIX_INTRINSIC) ? 0 : 1;
924
intrinsicRight.isEstimate[6] = flags & (CALIB_FIX_K2 | CALIB_FIX_INTRINSIC) ? 0 : 1;
925
intrinsicRight.isEstimate[7] = flags & (CALIB_FIX_K3 | CALIB_FIX_INTRINSIC) ? 0 : 1;
926
intrinsicRight.isEstimate[8] = flags & (CALIB_FIX_K4 | CALIB_FIX_INTRINSIC) ? 0 : 1;
927
928
intrinsicLeft_errors.isEstimate = intrinsicLeft.isEstimate;
929
intrinsicRight_errors.isEstimate = intrinsicRight.isEstimate;
930
931
std::vector<uchar> selectedParams;
932
std::vector<uchar> tmp(6 * (n_images + 1), 1);
933
selectedParams.insert(selectedParams.end(), intrinsicLeft.isEstimate.begin(), intrinsicLeft.isEstimate.end());
934
selectedParams.insert(selectedParams.end(), intrinsicRight.isEstimate.begin(), intrinsicRight.isEstimate.end());
935
selectedParams.insert(selectedParams.end(), tmp.begin(), tmp.end());
936
937
//Init values for rotation and translation between two views
938
cv::Mat om_list(1, n_images, CV_64FC3), T_list(1, n_images, CV_64FC3);
939
cv::Mat om_ref, R_ref, T_ref, R1, R2;
940
for (int image_idx = 0; image_idx < n_images; ++image_idx)
941
{
942
cv::Rodrigues(rvecs1[image_idx], R1);
943
cv::Rodrigues(rvecs2[image_idx], R2);
944
R_ref = R2 * R1.t();
945
T_ref = cv::Mat(tvecs2[image_idx]) - R_ref * cv::Mat(tvecs1[image_idx]);
946
cv::Rodrigues(R_ref, om_ref);
947
om_ref.reshape(3, 1).copyTo(om_list.col(image_idx));
948
T_ref.reshape(3, 1).copyTo(T_list.col(image_idx));
949
}
950
cv::Vec3d omcur = cv::internal::median3d(om_list);
951
cv::Vec3d Tcur = cv::internal::median3d(T_list);
952
953
cv::Mat J = cv::Mat::zeros(4 * n_points * n_images, 18 + 6 * (n_images + 1), CV_64FC1),
954
e = cv::Mat::zeros(4 * n_points * n_images, 1, CV_64FC1), Jkk, ekk;
955
956
for(int iter = 0; ; ++iter)
957
{
958
if ((criteria.type == 1 && iter >= criteria.maxCount) ||
959
(criteria.type == 2 && change <= criteria.epsilon) ||
960
(criteria.type == 3 && (change <= criteria.epsilon || iter >= criteria.maxCount)))
961
break;
962
963
J.create(4 * n_points * n_images, 18 + 6 * (n_images + 1), CV_64FC1);
964
e.create(4 * n_points * n_images, 1, CV_64FC1);
965
Jkk.create(4 * n_points, 18 + 6 * (n_images + 1), CV_64FC1);
966
ekk.create(4 * n_points, 1, CV_64FC1);
967
968
cv::Mat omr, Tr, domrdomckk, domrdTckk, domrdom, domrdT, dTrdomckk, dTrdTckk, dTrdom, dTrdT;
969
970
for (int image_idx = 0; image_idx < n_images; ++image_idx)
971
{
972
Jkk = cv::Mat::zeros(4 * n_points, 18 + 6 * (n_images + 1), CV_64FC1);
973
974
cv::Mat object = objectPoints.getMat(image_idx).clone();
975
cv::Mat imageLeft = imagePoints1.getMat(image_idx).clone();
976
cv::Mat imageRight = imagePoints2.getMat(image_idx).clone();
977
cv::Mat jacobians, projected;
978
979
//left camera jacobian
980
cv::Mat rvec = cv::Mat(rvecs1[image_idx]);
981
cv::Mat tvec = cv::Mat(tvecs1[image_idx]);
982
cv::internal::projectPoints(object, projected, rvec, tvec, intrinsicLeft, jacobians);
983
cv::Mat(cv::Mat((imageLeft - projected).t()).reshape(1, 1).t()).copyTo(ekk.rowRange(0, 2 * n_points));
984
jacobians.colRange(8, 11).copyTo(Jkk.colRange(24 + image_idx * 6, 27 + image_idx * 6).rowRange(0, 2 * n_points));
985
jacobians.colRange(11, 14).copyTo(Jkk.colRange(27 + image_idx * 6, 30 + image_idx * 6).rowRange(0, 2 * n_points));
986
jacobians.colRange(0, 2).copyTo(Jkk.colRange(0, 2).rowRange(0, 2 * n_points));
987
jacobians.colRange(2, 4).copyTo(Jkk.colRange(2, 4).rowRange(0, 2 * n_points));
988
jacobians.colRange(4, 8).copyTo(Jkk.colRange(5, 9).rowRange(0, 2 * n_points));
989
jacobians.col(14).copyTo(Jkk.col(4).rowRange(0, 2 * n_points));
990
991
//right camera jacobian
992
cv::internal::compose_motion(rvec, tvec, omcur, Tcur, omr, Tr, domrdomckk, domrdTckk, domrdom, domrdT, dTrdomckk, dTrdTckk, dTrdom, dTrdT);
993
rvec = cv::Mat(rvecs2[image_idx]);
994
tvec = cv::Mat(tvecs2[image_idx]);
995
996
cv::internal::projectPoints(object, projected, omr, Tr, intrinsicRight, jacobians);
997
cv::Mat(cv::Mat((imageRight - projected).t()).reshape(1, 1).t()).copyTo(ekk.rowRange(2 * n_points, 4 * n_points));
998
cv::Mat dxrdom = jacobians.colRange(8, 11) * domrdom + jacobians.colRange(11, 14) * dTrdom;
999
cv::Mat dxrdT = jacobians.colRange(8, 11) * domrdT + jacobians.colRange(11, 14)* dTrdT;
1000
cv::Mat dxrdomckk = jacobians.colRange(8, 11) * domrdomckk + jacobians.colRange(11, 14) * dTrdomckk;
1001
cv::Mat dxrdTckk = jacobians.colRange(8, 11) * domrdTckk + jacobians.colRange(11, 14) * dTrdTckk;
1002
1003
dxrdom.copyTo(Jkk.colRange(18, 21).rowRange(2 * n_points, 4 * n_points));
1004
dxrdT.copyTo(Jkk.colRange(21, 24).rowRange(2 * n_points, 4 * n_points));
1005
dxrdomckk.copyTo(Jkk.colRange(24 + image_idx * 6, 27 + image_idx * 6).rowRange(2 * n_points, 4 * n_points));
1006
dxrdTckk.copyTo(Jkk.colRange(27 + image_idx * 6, 30 + image_idx * 6).rowRange(2 * n_points, 4 * n_points));
1007
jacobians.colRange(0, 2).copyTo(Jkk.colRange(9 + 0, 9 + 2).rowRange(2 * n_points, 4 * n_points));
1008
jacobians.colRange(2, 4).copyTo(Jkk.colRange(9 + 2, 9 + 4).rowRange(2 * n_points, 4 * n_points));
1009
jacobians.colRange(4, 8).copyTo(Jkk.colRange(9 + 5, 9 + 9).rowRange(2 * n_points, 4 * n_points));
1010
jacobians.col(14).copyTo(Jkk.col(9 + 4).rowRange(2 * n_points, 4 * n_points));
1011
1012
//check goodness of sterepair
1013
double abs_max = 0;
1014
for (int i = 0; i < 4 * n_points; i++)
1015
{
1016
if (fabs(ekk.at<double>(i)) > abs_max)
1017
{
1018
abs_max = fabs(ekk.at<double>(i));
1019
}
1020
}
1021
1022
CV_Assert(abs_max < threshold); // bad stereo pair
1023
1024
Jkk.copyTo(J.rowRange(image_idx * 4 * n_points, (image_idx + 1) * 4 * n_points));
1025
ekk.copyTo(e.rowRange(image_idx * 4 * n_points, (image_idx + 1) * 4 * n_points));
1026
}
1027
1028
cv::Vec6d oldTom(Tcur[0], Tcur[1], Tcur[2], omcur[0], omcur[1], omcur[2]);
1029
1030
//update all parameters
1031
cv::subMatrix(J, J, selectedParams, std::vector<uchar>(J.rows, 1));
1032
int a = cv::countNonZero(intrinsicLeft.isEstimate);
1033
int b = cv::countNonZero(intrinsicRight.isEstimate);
1034
cv::Mat deltas;
1035
solve(J.t() * J, J.t()*e, deltas);
1036
if (a > 0)
1037
intrinsicLeft = intrinsicLeft + deltas.rowRange(0, a);
1038
if (b > 0)
1039
intrinsicRight = intrinsicRight + deltas.rowRange(a, a + b);
1040
omcur = omcur + cv::Vec3d(deltas.rowRange(a + b, a + b + 3));
1041
Tcur = Tcur + cv::Vec3d(deltas.rowRange(a + b + 3, a + b + 6));
1042
for (int image_idx = 0; image_idx < n_images; ++image_idx)
1043
{
1044
rvecs1[image_idx] = cv::Mat(cv::Mat(rvecs1[image_idx]) + deltas.rowRange(a + b + 6 + image_idx * 6, a + b + 9 + image_idx * 6));
1045
tvecs1[image_idx] = cv::Mat(cv::Mat(tvecs1[image_idx]) + deltas.rowRange(a + b + 9 + image_idx * 6, a + b + 12 + image_idx * 6));
1046
}
1047
1048
cv::Vec6d newTom(Tcur[0], Tcur[1], Tcur[2], omcur[0], omcur[1], omcur[2]);
1049
change = cv::norm(newTom - oldTom) / cv::norm(newTom);
1050
}
1051
1052
double rms = 0;
1053
const Vec2d* ptr_e = e.ptr<Vec2d>();
1054
for (size_t i = 0; i < e.total() / 2; i++)
1055
{
1056
rms += ptr_e[i][0] * ptr_e[i][0] + ptr_e[i][1] * ptr_e[i][1];
1057
}
1058
1059
rms /= ((double)e.total() / 2.0);
1060
rms = sqrt(rms);
1061
1062
_K1 = Matx33d(intrinsicLeft.f[0], intrinsicLeft.f[0] * intrinsicLeft.alpha, intrinsicLeft.c[0],
1063
0, intrinsicLeft.f[1], intrinsicLeft.c[1],
1064
0, 0, 1);
1065
1066
_K2 = Matx33d(intrinsicRight.f[0], intrinsicRight.f[0] * intrinsicRight.alpha, intrinsicRight.c[0],
1067
0, intrinsicRight.f[1], intrinsicRight.c[1],
1068
0, 0, 1);
1069
1070
Mat _R;
1071
Rodrigues(omcur, _R);
1072
1073
if (K1.needed()) cv::Mat(_K1).convertTo(K1, K1.empty() ? CV_64FC1 : K1.type());
1074
if (K2.needed()) cv::Mat(_K2).convertTo(K2, K2.empty() ? CV_64FC1 : K2.type());
1075
if (D1.needed()) cv::Mat(intrinsicLeft.k).convertTo(D1, D1.empty() ? CV_64FC1 : D1.type());
1076
if (D2.needed()) cv::Mat(intrinsicRight.k).convertTo(D2, D2.empty() ? CV_64FC1 : D2.type());
1077
if (R.needed()) _R.convertTo(R, R.empty() ? CV_64FC1 : R.type());
1078
if (T.needed()) cv::Mat(Tcur).convertTo(T, T.empty() ? CV_64FC1 : T.type());
1079
1080
return rms;
1081
}
1082
1083
namespace cv{ namespace {
1084
void subMatrix(const Mat& src, Mat& dst, const std::vector<uchar>& cols, const std::vector<uchar>& rows)
1085
{
1086
CV_Assert(src.channels() == 1);
1087
1088
int nonzeros_cols = cv::countNonZero(cols);
1089
Mat tmp(src.rows, nonzeros_cols, CV_64F);
1090
1091
for (int i = 0, j = 0; i < (int)cols.size(); i++)
1092
{
1093
if (cols[i])
1094
{
1095
src.col(i).copyTo(tmp.col(j++));
1096
}
1097
}
1098
1099
int nonzeros_rows = cv::countNonZero(rows);
1100
dst.create(nonzeros_rows, nonzeros_cols, CV_64F);
1101
for (int i = 0, j = 0; i < (int)rows.size(); i++)
1102
{
1103
if (rows[i])
1104
{
1105
tmp.row(i).copyTo(dst.row(j++));
1106
}
1107
}
1108
}
1109
1110
}}
1111
1112
cv::internal::IntrinsicParams::IntrinsicParams():
1113
f(Vec2d::all(0)), c(Vec2d::all(0)), k(Vec4d::all(0)), alpha(0), isEstimate(9,0)
1114
{
1115
}
1116
1117
cv::internal::IntrinsicParams::IntrinsicParams(Vec2d _f, Vec2d _c, Vec4d _k, double _alpha):
1118
f(_f), c(_c), k(_k), alpha(_alpha), isEstimate(9,0)
1119
{
1120
}
1121
1122
cv::internal::IntrinsicParams cv::internal::IntrinsicParams::operator+(const Mat& a)
1123
{
1124
CV_Assert(a.type() == CV_64FC1);
1125
IntrinsicParams tmp;
1126
const double* ptr = a.ptr<double>();
1127
1128
int j = 0;
1129
tmp.f[0] = this->f[0] + (isEstimate[0] ? ptr[j++] : 0);
1130
tmp.f[1] = this->f[1] + (isEstimate[1] ? ptr[j++] : 0);
1131
tmp.c[0] = this->c[0] + (isEstimate[2] ? ptr[j++] : 0);
1132
tmp.c[1] = this->c[1] + (isEstimate[3] ? ptr[j++] : 0);
1133
tmp.alpha = this->alpha + (isEstimate[4] ? ptr[j++] : 0);
1134
tmp.k[0] = this->k[0] + (isEstimate[5] ? ptr[j++] : 0);
1135
tmp.k[1] = this->k[1] + (isEstimate[6] ? ptr[j++] : 0);
1136
tmp.k[2] = this->k[2] + (isEstimate[7] ? ptr[j++] : 0);
1137
tmp.k[3] = this->k[3] + (isEstimate[8] ? ptr[j++] : 0);
1138
1139
tmp.isEstimate = isEstimate;
1140
return tmp;
1141
}
1142
1143
cv::internal::IntrinsicParams& cv::internal::IntrinsicParams::operator =(const Mat& a)
1144
{
1145
CV_Assert(a.type() == CV_64FC1);
1146
const double* ptr = a.ptr<double>();
1147
1148
int j = 0;
1149
1150
this->f[0] = isEstimate[0] ? ptr[j++] : 0;
1151
this->f[1] = isEstimate[1] ? ptr[j++] : 0;
1152
this->c[0] = isEstimate[2] ? ptr[j++] : 0;
1153
this->c[1] = isEstimate[3] ? ptr[j++] : 0;
1154
this->alpha = isEstimate[4] ? ptr[j++] : 0;
1155
this->k[0] = isEstimate[5] ? ptr[j++] : 0;
1156
this->k[1] = isEstimate[6] ? ptr[j++] : 0;
1157
this->k[2] = isEstimate[7] ? ptr[j++] : 0;
1158
this->k[3] = isEstimate[8] ? ptr[j++] : 0;
1159
1160
return *this;
1161
}
1162
1163
void cv::internal::IntrinsicParams::Init(const cv::Vec2d& _f, const cv::Vec2d& _c, const cv::Vec4d& _k, const double& _alpha)
1164
{
1165
this->c = _c;
1166
this->f = _f;
1167
this->k = _k;
1168
this->alpha = _alpha;
1169
}
1170
1171
void cv::internal::projectPoints(cv::InputArray objectPoints, cv::OutputArray imagePoints,
1172
cv::InputArray _rvec,cv::InputArray _tvec,
1173
const IntrinsicParams& param, cv::OutputArray jacobian)
1174
{
1175
CV_INSTRUMENT_REGION();
1176
1177
CV_Assert(!objectPoints.empty() && (objectPoints.type() == CV_32FC3 || objectPoints.type() == CV_64FC3));
1178
Matx33d K(param.f[0], param.f[0] * param.alpha, param.c[0],
1179
0, param.f[1], param.c[1],
1180
0, 0, 1);
1181
fisheye::projectPoints(objectPoints, imagePoints, _rvec, _tvec, K, param.k, param.alpha, jacobian);
1182
}
1183
1184
void cv::internal::ComputeExtrinsicRefine(const Mat& imagePoints, const Mat& objectPoints, Mat& rvec,
1185
Mat& tvec, Mat& J, const int MaxIter,
1186
const IntrinsicParams& param, const double thresh_cond)
1187
{
1188
CV_Assert(!objectPoints.empty() && objectPoints.type() == CV_64FC3);
1189
CV_Assert(!imagePoints.empty() && imagePoints.type() == CV_64FC2);
1190
CV_Assert(rvec.total() > 2 && tvec.total() > 2);
1191
Vec6d extrinsics(rvec.at<double>(0), rvec.at<double>(1), rvec.at<double>(2),
1192
tvec.at<double>(0), tvec.at<double>(1), tvec.at<double>(2));
1193
double change = 1;
1194
int iter = 0;
1195
1196
while (change > 1e-10 && iter < MaxIter)
1197
{
1198
std::vector<Point2d> x;
1199
Mat jacobians;
1200
projectPoints(objectPoints, x, rvec, tvec, param, jacobians);
1201
1202
Mat ex = imagePoints - Mat(x).t();
1203
ex = ex.reshape(1, 2);
1204
1205
J = jacobians.colRange(8, 14).clone();
1206
1207
SVD svd(J, SVD::NO_UV);
1208
double condJJ = svd.w.at<double>(0)/svd.w.at<double>(5);
1209
1210
if (condJJ > thresh_cond)
1211
change = 0;
1212
else
1213
{
1214
Vec6d param_innov;
1215
solve(J, ex.reshape(1, (int)ex.total()), param_innov, DECOMP_SVD + DECOMP_NORMAL);
1216
1217
Vec6d param_up = extrinsics + param_innov;
1218
change = norm(param_innov)/norm(param_up);
1219
extrinsics = param_up;
1220
iter = iter + 1;
1221
1222
rvec = Mat(Vec3d(extrinsics.val));
1223
tvec = Mat(Vec3d(extrinsics.val+3));
1224
}
1225
}
1226
}
1227
1228
cv::Mat cv::internal::ComputeHomography(Mat m, Mat M)
1229
{
1230
CV_INSTRUMENT_REGION();
1231
1232
int Np = m.cols;
1233
1234
if (m.rows < 3)
1235
{
1236
vconcat(m, Mat::ones(1, Np, CV_64FC1), m);
1237
}
1238
if (M.rows < 3)
1239
{
1240
vconcat(M, Mat::ones(1, Np, CV_64FC1), M);
1241
}
1242
1243
divide(m, Mat::ones(3, 1, CV_64FC1) * m.row(2), m);
1244
divide(M, Mat::ones(3, 1, CV_64FC1) * M.row(2), M);
1245
1246
Mat ax = m.row(0).clone();
1247
Mat ay = m.row(1).clone();
1248
1249
double mxx = mean(ax)[0];
1250
double myy = mean(ay)[0];
1251
1252
ax = ax - mxx;
1253
ay = ay - myy;
1254
1255
double scxx = mean(abs(ax))[0];
1256
double scyy = mean(abs(ay))[0];
1257
1258
Mat Hnorm (Matx33d( 1/scxx, 0.0, -mxx/scxx,
1259
0.0, 1/scyy, -myy/scyy,
1260
0.0, 0.0, 1.0 ));
1261
1262
Mat inv_Hnorm (Matx33d( scxx, 0, mxx,
1263
0, scyy, myy,
1264
0, 0, 1 ));
1265
Mat mn = Hnorm * m;
1266
1267
Mat L = Mat::zeros(2*Np, 9, CV_64FC1);
1268
1269
for (int i = 0; i < Np; ++i)
1270
{
1271
for (int j = 0; j < 3; j++)
1272
{
1273
L.at<double>(2 * i, j) = M.at<double>(j, i);
1274
L.at<double>(2 * i + 1, j + 3) = M.at<double>(j, i);
1275
L.at<double>(2 * i, j + 6) = -mn.at<double>(0,i) * M.at<double>(j, i);
1276
L.at<double>(2 * i + 1, j + 6) = -mn.at<double>(1,i) * M.at<double>(j, i);
1277
}
1278
}
1279
1280
if (Np > 4) L = L.t() * L;
1281
SVD svd(L);
1282
Mat hh = svd.vt.row(8) / svd.vt.row(8).at<double>(8);
1283
Mat Hrem = hh.reshape(1, 3);
1284
Mat H = inv_Hnorm * Hrem;
1285
1286
if (Np > 4)
1287
{
1288
Mat hhv = H.reshape(1, 9)(Rect(0, 0, 1, 8)).clone();
1289
for (int iter = 0; iter < 10; iter++)
1290
{
1291
Mat mrep = H * M;
1292
Mat J = Mat::zeros(2 * Np, 8, CV_64FC1);
1293
Mat MMM;
1294
divide(M, Mat::ones(3, 1, CV_64FC1) * mrep(Rect(0, 2, mrep.cols, 1)), MMM);
1295
divide(mrep, Mat::ones(3, 1, CV_64FC1) * mrep(Rect(0, 2, mrep.cols, 1)), mrep);
1296
Mat m_err = m(Rect(0,0, m.cols, 2)) - mrep(Rect(0,0, mrep.cols, 2));
1297
m_err = Mat(m_err.t()).reshape(1, m_err.cols * m_err.rows);
1298
Mat MMM2, MMM3;
1299
multiply(Mat::ones(3, 1, CV_64FC1) * mrep(Rect(0, 0, mrep.cols, 1)), MMM, MMM2);
1300
multiply(Mat::ones(3, 1, CV_64FC1) * mrep(Rect(0, 1, mrep.cols, 1)), MMM, MMM3);
1301
1302
for (int i = 0; i < Np; ++i)
1303
{
1304
for (int j = 0; j < 3; ++j)
1305
{
1306
J.at<double>(2 * i, j) = -MMM.at<double>(j, i);
1307
J.at<double>(2 * i + 1, j + 3) = -MMM.at<double>(j, i);
1308
}
1309
1310
for (int j = 0; j < 2; ++j)
1311
{
1312
J.at<double>(2 * i, j + 6) = MMM2.at<double>(j, i);
1313
J.at<double>(2 * i + 1, j + 6) = MMM3.at<double>(j, i);
1314
}
1315
}
1316
divide(M, Mat::ones(3, 1, CV_64FC1) * mrep(Rect(0,2,mrep.cols,1)), MMM);
1317
Mat hh_innov = (J.t() * J).inv() * (J.t()) * m_err;
1318
Mat hhv_up = hhv - hh_innov;
1319
Mat tmp;
1320
vconcat(hhv_up, Mat::ones(1,1,CV_64FC1), tmp);
1321
Mat H_up = tmp.reshape(1,3);
1322
hhv = hhv_up;
1323
H = H_up;
1324
}
1325
}
1326
return H;
1327
}
1328
1329
cv::Mat cv::internal::NormalizePixels(const Mat& imagePoints, const IntrinsicParams& param)
1330
{
1331
CV_INSTRUMENT_REGION();
1332
1333
CV_Assert(!imagePoints.empty() && imagePoints.type() == CV_64FC2);
1334
1335
Mat distorted((int)imagePoints.total(), 1, CV_64FC2), undistorted;
1336
const Vec2d* ptr = imagePoints.ptr<Vec2d>();
1337
Vec2d* ptr_d = distorted.ptr<Vec2d>();
1338
for (size_t i = 0; i < imagePoints.total(); ++i)
1339
{
1340
ptr_d[i] = (ptr[i] - param.c).mul(Vec2d(1.0 / param.f[0], 1.0 / param.f[1]));
1341
ptr_d[i][0] -= param.alpha * ptr_d[i][1];
1342
}
1343
cv::fisheye::undistortPoints(distorted, undistorted, Matx33d::eye(), param.k);
1344
return undistorted;
1345
}
1346
1347
void cv::internal::InitExtrinsics(const Mat& _imagePoints, const Mat& _objectPoints, const IntrinsicParams& param, Mat& omckk, Mat& Tckk)
1348
{
1349
CV_Assert(!_objectPoints.empty() && _objectPoints.type() == CV_64FC3);
1350
CV_Assert(!_imagePoints.empty() && _imagePoints.type() == CV_64FC2);
1351
1352
Mat imagePointsNormalized = NormalizePixels(_imagePoints, param).reshape(1).t();
1353
Mat objectPoints = _objectPoints.reshape(1).t();
1354
Mat objectPointsMean, covObjectPoints;
1355
Mat Rckk;
1356
int Np = imagePointsNormalized.cols;
1357
calcCovarMatrix(objectPoints, covObjectPoints, objectPointsMean, COVAR_NORMAL | COVAR_COLS);
1358
SVD svd(covObjectPoints);
1359
Mat R(svd.vt);
1360
if (norm(R(Rect(2, 0, 1, 2))) < 1e-6)
1361
R = Mat::eye(3,3, CV_64FC1);
1362
if (determinant(R) < 0)
1363
R = -R;
1364
Mat T = -R * objectPointsMean;
1365
Mat X_new = R * objectPoints + T * Mat::ones(1, Np, CV_64FC1);
1366
Mat H = ComputeHomography(imagePointsNormalized, X_new(Rect(0,0,X_new.cols,2)));
1367
double sc = .5 * (norm(H.col(0)) + norm(H.col(1)));
1368
H = H / sc;
1369
Mat u1 = H.col(0).clone();
1370
double norm_u1 = norm(u1);
1371
CV_Assert(fabs(norm_u1) > 0);
1372
u1 = u1 / norm_u1;
1373
Mat u2 = H.col(1).clone() - u1.dot(H.col(1).clone()) * u1;
1374
double norm_u2 = norm(u2);
1375
CV_Assert(fabs(norm_u2) > 0);
1376
u2 = u2 / norm_u2;
1377
Mat u3 = u1.cross(u2);
1378
Mat RRR;
1379
hconcat(u1, u2, RRR);
1380
hconcat(RRR, u3, RRR);
1381
Rodrigues(RRR, omckk);
1382
Rodrigues(omckk, Rckk);
1383
Tckk = H.col(2).clone();
1384
Tckk = Tckk + Rckk * T;
1385
Rckk = Rckk * R;
1386
Rodrigues(Rckk, omckk);
1387
}
1388
1389
void cv::internal::CalibrateExtrinsics(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints,
1390
const IntrinsicParams& param, const int check_cond,
1391
const double thresh_cond, InputOutputArray omc, InputOutputArray Tc)
1392
{
1393
CV_Assert(!objectPoints.empty() && (objectPoints.type() == CV_32FC3 || objectPoints.type() == CV_64FC3));
1394
CV_Assert(!imagePoints.empty() && (imagePoints.type() == CV_32FC2 || imagePoints.type() == CV_64FC2));
1395
CV_Assert(omc.type() == CV_64FC3 || Tc.type() == CV_64FC3);
1396
1397
if (omc.empty()) omc.create(1, (int)objectPoints.total(), CV_64FC3);
1398
if (Tc.empty()) Tc.create(1, (int)objectPoints.total(), CV_64FC3);
1399
1400
const int maxIter = 20;
1401
1402
for(int image_idx = 0; image_idx < (int)imagePoints.total(); ++image_idx)
1403
{
1404
Mat omckk, Tckk, JJ_kk;
1405
Mat image, object;
1406
1407
objectPoints.getMat(image_idx).convertTo(object, CV_64FC3);
1408
imagePoints.getMat (image_idx).convertTo(image, CV_64FC2);
1409
1410
bool imT = image.rows < image.cols;
1411
bool obT = object.rows < object.cols;
1412
1413
InitExtrinsics(imT ? image.t() : image, obT ? object.t() : object, param, omckk, Tckk);
1414
1415
ComputeExtrinsicRefine(!imT ? image.t() : image, !obT ? object.t() : object, omckk, Tckk, JJ_kk, maxIter, param, thresh_cond);
1416
if (check_cond)
1417
{
1418
SVD svd(JJ_kk, SVD::NO_UV);
1419
if(svd.w.at<double>(0) / svd.w.at<double>((int)svd.w.total() - 1) > thresh_cond )
1420
CV_Error( cv::Error::StsInternal, format("CALIB_CHECK_COND - Ill-conditioned matrix for input array %d",image_idx));
1421
}
1422
omckk.reshape(3,1).copyTo(omc.getMat().col(image_idx));
1423
Tckk.reshape(3,1).copyTo(Tc.getMat().col(image_idx));
1424
}
1425
}
1426
1427
void cv::internal::ComputeJacobians(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints,
1428
const IntrinsicParams& param, InputArray omc, InputArray Tc,
1429
const int& check_cond, const double& thresh_cond, Mat& JJ2, Mat& ex3)
1430
{
1431
CV_Assert(!objectPoints.empty() && (objectPoints.type() == CV_32FC3 || objectPoints.type() == CV_64FC3));
1432
CV_Assert(!imagePoints.empty() && (imagePoints.type() == CV_32FC2 || imagePoints.type() == CV_64FC2));
1433
1434
CV_Assert(!omc.empty() && omc.type() == CV_64FC3);
1435
CV_Assert(!Tc.empty() && Tc.type() == CV_64FC3);
1436
1437
int n = (int)objectPoints.total();
1438
1439
JJ2 = Mat::zeros(9 + 6 * n, 9 + 6 * n, CV_64FC1);
1440
ex3 = Mat::zeros(9 + 6 * n, 1, CV_64FC1 );
1441
1442
for (int image_idx = 0; image_idx < n; ++image_idx)
1443
{
1444
Mat image, object;
1445
objectPoints.getMat(image_idx).convertTo(object, CV_64FC3);
1446
imagePoints.getMat (image_idx).convertTo(image, CV_64FC2);
1447
1448
bool imT = image.rows < image.cols;
1449
Mat om(omc.getMat().col(image_idx)), T(Tc.getMat().col(image_idx));
1450
1451
std::vector<Point2d> x;
1452
Mat jacobians;
1453
projectPoints(object, x, om, T, param, jacobians);
1454
Mat exkk = (imT ? image.t() : image) - Mat(x);
1455
1456
Mat A(jacobians.rows, 9, CV_64FC1);
1457
jacobians.colRange(0, 4).copyTo(A.colRange(0, 4));
1458
jacobians.col(14).copyTo(A.col(4));
1459
jacobians.colRange(4, 8).copyTo(A.colRange(5, 9));
1460
1461
A = A.t();
1462
1463
Mat B = jacobians.colRange(8, 14).clone();
1464
B = B.t();
1465
1466
JJ2(Rect(0, 0, 9, 9)) += A * A.t();
1467
JJ2(Rect(9 + 6 * image_idx, 9 + 6 * image_idx, 6, 6)) = B * B.t();
1468
1469
JJ2(Rect(9 + 6 * image_idx, 0, 6, 9)) = A * B.t();
1470
JJ2(Rect(0, 9 + 6 * image_idx, 9, 6)) = JJ2(Rect(9 + 6 * image_idx, 0, 6, 9)).t();
1471
1472
ex3.rowRange(0, 9) += A * exkk.reshape(1, 2 * exkk.rows);
1473
ex3.rowRange(9 + 6 * image_idx, 9 + 6 * (image_idx + 1)) = B * exkk.reshape(1, 2 * exkk.rows);
1474
1475
if (check_cond)
1476
{
1477
Mat JJ_kk = B.t();
1478
SVD svd(JJ_kk, SVD::NO_UV);
1479
CV_Assert(svd.w.at<double>(0) / svd.w.at<double>(svd.w.rows - 1) < thresh_cond);
1480
}
1481
}
1482
1483
std::vector<uchar> idxs(param.isEstimate);
1484
idxs.insert(idxs.end(), 6 * n, 1);
1485
1486
subMatrix(JJ2, JJ2, idxs, idxs);
1487
subMatrix(ex3, ex3, std::vector<uchar>(1, 1), idxs);
1488
}
1489
1490
void cv::internal::EstimateUncertainties(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints,
1491
const IntrinsicParams& params, InputArray omc, InputArray Tc,
1492
IntrinsicParams& errors, Vec2d& std_err, double thresh_cond, int check_cond, double& rms)
1493
{
1494
CV_Assert(!objectPoints.empty() && (objectPoints.type() == CV_32FC3 || objectPoints.type() == CV_64FC3));
1495
CV_Assert(!imagePoints.empty() && (imagePoints.type() == CV_32FC2 || imagePoints.type() == CV_64FC2));
1496
1497
CV_Assert(!omc.empty() && omc.type() == CV_64FC3);
1498
CV_Assert(!Tc.empty() && Tc.type() == CV_64FC3);
1499
1500
int total_ex = 0;
1501
for (int image_idx = 0; image_idx < (int)objectPoints.total(); ++image_idx)
1502
{
1503
total_ex += (int)objectPoints.getMat(image_idx).total();
1504
}
1505
Mat ex(total_ex, 1, CV_64FC2);
1506
int insert_idx = 0;
1507
for (int image_idx = 0; image_idx < (int)objectPoints.total(); ++image_idx)
1508
{
1509
Mat image, object;
1510
objectPoints.getMat(image_idx).convertTo(object, CV_64FC3);
1511
imagePoints.getMat (image_idx).convertTo(image, CV_64FC2);
1512
1513
bool imT = image.rows < image.cols;
1514
1515
Mat om(omc.getMat().col(image_idx)), T(Tc.getMat().col(image_idx));
1516
1517
std::vector<Point2d> x;
1518
projectPoints(object, x, om, T, params, noArray());
1519
Mat ex_ = (imT ? image.t() : image) - Mat(x);
1520
ex_.copyTo(ex.rowRange(insert_idx, insert_idx + ex_.rows));
1521
insert_idx += ex_.rows;
1522
}
1523
1524
meanStdDev(ex, noArray(), std_err);
1525
std_err *= sqrt((double)ex.total()/((double)ex.total() - 1.0));
1526
1527
Vec<double, 1> sigma_x;
1528
meanStdDev(ex.reshape(1, 1), noArray(), sigma_x);
1529
sigma_x *= sqrt(2.0 * (double)ex.total()/(2.0 * (double)ex.total() - 1.0));
1530
1531
Mat JJ2, ex3;
1532
ComputeJacobians(objectPoints, imagePoints, params, omc, Tc, check_cond, thresh_cond, JJ2, ex3);
1533
1534
sqrt(JJ2.inv(), JJ2);
1535
1536
errors = 3 * sigma_x(0) * JJ2.diag();
1537
rms = sqrt(norm(ex, NORM_L2SQR)/ex.total());
1538
}
1539
1540
void cv::internal::dAB(InputArray A, InputArray B, OutputArray dABdA, OutputArray dABdB)
1541
{
1542
CV_Assert(A.getMat().cols == B.getMat().rows);
1543
CV_Assert(A.type() == CV_64FC1 && B.type() == CV_64FC1);
1544
1545
int p = A.getMat().rows;
1546
int n = A.getMat().cols;
1547
int q = B.getMat().cols;
1548
1549
dABdA.create(p * q, p * n, CV_64FC1);
1550
dABdB.create(p * q, q * n, CV_64FC1);
1551
1552
dABdA.getMat() = Mat::zeros(p * q, p * n, CV_64FC1);
1553
dABdB.getMat() = Mat::zeros(p * q, q * n, CV_64FC1);
1554
1555
for (int i = 0; i < q; ++i)
1556
{
1557
for (int j = 0; j < p; ++j)
1558
{
1559
int ij = j + i * p;
1560
for (int k = 0; k < n; ++k)
1561
{
1562
int kj = j + k * p;
1563
dABdA.getMat().at<double>(ij, kj) = B.getMat().at<double>(k, i);
1564
}
1565
}
1566
}
1567
1568
for (int i = 0; i < q; ++i)
1569
{
1570
A.getMat().copyTo(dABdB.getMat().rowRange(i * p, i * p + p).colRange(i * n, i * n + n));
1571
}
1572
}
1573
1574
void cv::internal::JRodriguesMatlab(const Mat& src, Mat& dst)
1575
{
1576
Mat tmp(src.cols, src.rows, src.type());
1577
if (src.rows == 9)
1578
{
1579
Mat(src.row(0).t()).copyTo(tmp.col(0));
1580
Mat(src.row(1).t()).copyTo(tmp.col(3));
1581
Mat(src.row(2).t()).copyTo(tmp.col(6));
1582
Mat(src.row(3).t()).copyTo(tmp.col(1));
1583
Mat(src.row(4).t()).copyTo(tmp.col(4));
1584
Mat(src.row(5).t()).copyTo(tmp.col(7));
1585
Mat(src.row(6).t()).copyTo(tmp.col(2));
1586
Mat(src.row(7).t()).copyTo(tmp.col(5));
1587
Mat(src.row(8).t()).copyTo(tmp.col(8));
1588
}
1589
else
1590
{
1591
Mat(src.col(0).t()).copyTo(tmp.row(0));
1592
Mat(src.col(1).t()).copyTo(tmp.row(3));
1593
Mat(src.col(2).t()).copyTo(tmp.row(6));
1594
Mat(src.col(3).t()).copyTo(tmp.row(1));
1595
Mat(src.col(4).t()).copyTo(tmp.row(4));
1596
Mat(src.col(5).t()).copyTo(tmp.row(7));
1597
Mat(src.col(6).t()).copyTo(tmp.row(2));
1598
Mat(src.col(7).t()).copyTo(tmp.row(5));
1599
Mat(src.col(8).t()).copyTo(tmp.row(8));
1600
}
1601
dst = tmp.clone();
1602
}
1603
1604
void cv::internal::compose_motion(InputArray _om1, InputArray _T1, InputArray _om2, InputArray _T2,
1605
Mat& om3, Mat& T3, Mat& dom3dom1, Mat& dom3dT1, Mat& dom3dom2,
1606
Mat& dom3dT2, Mat& dT3dom1, Mat& dT3dT1, Mat& dT3dom2, Mat& dT3dT2)
1607
{
1608
Mat om1 = _om1.getMat();
1609
Mat om2 = _om2.getMat();
1610
Mat T1 = _T1.getMat().reshape(1, 3);
1611
Mat T2 = _T2.getMat().reshape(1, 3);
1612
1613
//% Rotations:
1614
Mat R1, R2, R3, dR1dom1(9, 3, CV_64FC1), dR2dom2;
1615
Rodrigues(om1, R1, dR1dom1);
1616
Rodrigues(om2, R2, dR2dom2);
1617
JRodriguesMatlab(dR1dom1, dR1dom1);
1618
JRodriguesMatlab(dR2dom2, dR2dom2);
1619
R3 = R2 * R1;
1620
Mat dR3dR2, dR3dR1;
1621
dAB(R2, R1, dR3dR2, dR3dR1);
1622
Mat dom3dR3;
1623
Rodrigues(R3, om3, dom3dR3);
1624
JRodriguesMatlab(dom3dR3, dom3dR3);
1625
dom3dom1 = dom3dR3 * dR3dR1 * dR1dom1;
1626
dom3dom2 = dom3dR3 * dR3dR2 * dR2dom2;
1627
dom3dT1 = Mat::zeros(3, 3, CV_64FC1);
1628
dom3dT2 = Mat::zeros(3, 3, CV_64FC1);
1629
1630
//% Translations:
1631
Mat T3t = R2 * T1;
1632
Mat dT3tdR2, dT3tdT1;
1633
dAB(R2, T1, dT3tdR2, dT3tdT1);
1634
Mat dT3tdom2 = dT3tdR2 * dR2dom2;
1635
T3 = T3t + T2;
1636
dT3dT1 = dT3tdT1;
1637
dT3dT2 = Mat::eye(3, 3, CV_64FC1);
1638
dT3dom2 = dT3tdom2;
1639
dT3dom1 = Mat::zeros(3, 3, CV_64FC1);
1640
}
1641
1642
double cv::internal::median(const Mat& row)
1643
{
1644
CV_Assert(row.type() == CV_64FC1);
1645
CV_Assert(!row.empty() && row.rows == 1);
1646
Mat tmp = row.clone();
1647
sort(tmp, tmp, 0);
1648
if ((int)tmp.total() % 2) return tmp.at<double>((int)tmp.total() / 2);
1649
else return 0.5 *(tmp.at<double>((int)tmp.total() / 2) + tmp.at<double>((int)tmp.total() / 2 - 1));
1650
}
1651
1652
cv::Vec3d cv::internal::median3d(InputArray m)
1653
{
1654
CV_Assert(m.depth() == CV_64F && m.getMat().rows == 1);
1655
Mat M = Mat(m.getMat().t()).reshape(1).t();
1656
return Vec3d(median(M.row(0)), median(M.row(1)), median(M.row(2)));
1657
}
1658
1659