Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Tetragramm
GitHub Repository: Tetragramm/opencv
Path: blob/master/modules/calib3d/test/test_cameracalibration_tilt.cpp
16337 views
1
/*M///////////////////////////////////////////////////////////////////////////////////////
2
//
3
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4
//
5
// By downloading, copying, installing or using the software you agree to this license.
6
// If you do not agree to this license, do not download, install,
7
// copy or use the software.
8
//
9
//
10
// License Agreement
11
// For Open Source Computer Vision Library
12
//
13
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
15
// Third party copyrights are property of their respective owners.
16
//
17
// Redistribution and use in source and binary forms, with or without modification,
18
// are permitted provided that the following conditions are met:
19
//
20
// * Redistribution's of source code must retain the above copyright notice,
21
// this list of conditions and the following disclaimer.
22
//
23
// * Redistribution's in binary form must reproduce the above copyright notice,
24
// this list of conditions and the following disclaimer in the documentation
25
// and/or other materials provided with the distribution.
26
//
27
// * The name of the copyright holders may not be used to endorse or promote products
28
// derived from this software without specific prior written permission.
29
//
30
// This software is provided by the copyright holders and contributors "as is" and
31
// any express or implied warranties, including, but not limited to, the implied
32
// warranties of merchantability and fitness for a particular purpose are disclaimed.
33
// In no event shall the Intel Corporation or contributors be liable for any direct,
34
// indirect, incidental, special, exemplary, or consequential damages
35
// (including, but not limited to, procurement of substitute goods or services;
36
// loss of use, data, or profits; or business interruption) however caused
37
// and on any theory of liability, whether in contract, strict liability,
38
// or tort (including negligence or otherwise) arising in any way out of
39
// the use of this software, even if advised of the possibility of such damage.
40
//
41
//M*/
42
43
#include "test_precomp.hpp"
44
#include "opencv2/ts/cuda_test.hpp" // EXPECT_MAT_NEAR
45
46
namespace opencv_test { namespace {
47
48
#define NUM_DIST_COEFF_TILT 14
49
50
/**
51
Some conventions:
52
- the first camera determines the world coordinate system
53
- y points down, hence top means minimal y value (negative) and
54
bottom means maximal y value (positive)
55
- the field of view plane is tilted around x such that it
56
intersects the xy-plane in a line with a large (positive)
57
y-value
58
- image sensor and object are both modelled in the halfspace
59
z > 0
60
61
62
**/
63
class cameraCalibrationTiltTest : public ::testing::Test {
64
65
protected:
66
cameraCalibrationTiltTest()
67
: m_toRadian(acos(-1.0)/180.0)
68
, m_toDegree(180.0/acos(-1.0))
69
{}
70
virtual void SetUp();
71
72
protected:
73
static const cv::Size m_imageSize;
74
static const double m_pixelSize;
75
static const double m_circleConfusionPixel;
76
static const double m_lensFocalLength;
77
static const double m_lensFNumber;
78
static const double m_objectDistance;
79
static const double m_planeTiltDegree;
80
static const double m_pointTargetDist;
81
static const int m_pointTargetNum;
82
83
/** image distance corresponding to working distance */
84
double m_imageDistance;
85
/** image tilt angle corresponding to the tilt of the object plane */
86
double m_imageTiltDegree;
87
/** center of the field of view, near and far plane */
88
std::vector<cv::Vec3d> m_fovCenter;
89
/** normal of the field of view, near and far plane */
90
std::vector<cv::Vec3d> m_fovNormal;
91
/** points on a plane calibration target */
92
std::vector<cv::Point3d> m_pointTarget;
93
/** rotations for the calibration target */
94
std::vector<cv::Vec3d> m_pointTargetRvec;
95
/** translations for the calibration target */
96
std::vector<cv::Vec3d> m_pointTargetTvec;
97
/** camera matrix */
98
cv::Matx33d m_cameraMatrix;
99
/** distortion coefficients */
100
cv::Vec<double, NUM_DIST_COEFF_TILT> m_distortionCoeff;
101
102
/** random generator */
103
cv::RNG m_rng;
104
/** degree to radian conversion factor */
105
const double m_toRadian;
106
/** radian to degree conversion factor */
107
const double m_toDegree;
108
109
/**
110
computes for a given distance of an image or object point
111
the distance of the corresponding object or image point
112
*/
113
double opticalMap(double dist) {
114
return m_lensFocalLength*dist/(dist - m_lensFocalLength);
115
}
116
117
/** magnification of the optical map */
118
double magnification(double dist) {
119
return m_lensFocalLength/(dist - m_lensFocalLength);
120
}
121
122
/**
123
Changes given distortion coefficients randomly by adding
124
a uniformly distributed random variable in [-max max]
125
\param coeff input
126
\param max limits for the random variables
127
*/
128
void randomDistortionCoeff(
129
cv::Vec<double, NUM_DIST_COEFF_TILT>& coeff,
130
const cv::Vec<double, NUM_DIST_COEFF_TILT>& max)
131
{
132
for (int i = 0; i < coeff.rows; ++i)
133
coeff(i) += m_rng.uniform(-max(i), max(i));
134
}
135
136
/** numerical jacobian */
137
void numericalDerivative(
138
cv::Mat& jac,
139
double eps,
140
const std::vector<cv::Point3d>& obj,
141
const cv::Vec3d& rvec,
142
const cv::Vec3d& tvec,
143
const cv::Matx33d& camera,
144
const cv::Vec<double, NUM_DIST_COEFF_TILT>& distor);
145
146
/** remove points with projection outside the sensor array */
147
void removeInvalidPoints(
148
std::vector<cv::Point2d>& imagePoints,
149
std::vector<cv::Point3d>& objectPoints);
150
151
/** add uniform distribute noise in [-halfWidthNoise, halfWidthNoise]
152
to the image points and remove out of range points */
153
void addNoiseRemoveInvalidPoints(
154
std::vector<cv::Point2f>& imagePoints,
155
std::vector<cv::Point3f>& objectPoints,
156
std::vector<cv::Point2f>& noisyImagePoints,
157
double halfWidthNoise);
158
};
159
160
/** Number of Pixel of the sensor */
161
const cv::Size cameraCalibrationTiltTest::m_imageSize(1600, 1200);
162
/** Size of a pixel in mm */
163
const double cameraCalibrationTiltTest::m_pixelSize(.005);
164
/** Diameter of the circle of confusion */
165
const double cameraCalibrationTiltTest::m_circleConfusionPixel(3);
166
/** Focal length of the lens */
167
const double cameraCalibrationTiltTest::m_lensFocalLength(16.4);
168
/** F-Number */
169
const double cameraCalibrationTiltTest::m_lensFNumber(8);
170
/** Working distance */
171
const double cameraCalibrationTiltTest::m_objectDistance(200);
172
/** Angle between optical axis and object plane normal */
173
const double cameraCalibrationTiltTest::m_planeTiltDegree(55);
174
/** the calibration target are points on a square grid with this side length */
175
const double cameraCalibrationTiltTest::m_pointTargetDist(5);
176
/** the calibration target has (2*n + 1) x (2*n + 1) points */
177
const int cameraCalibrationTiltTest::m_pointTargetNum(15);
178
179
180
void cameraCalibrationTiltTest::SetUp()
181
{
182
m_imageDistance = opticalMap(m_objectDistance);
183
m_imageTiltDegree = m_toDegree * atan2(
184
m_imageDistance * tan(m_toRadian * m_planeTiltDegree),
185
m_objectDistance);
186
// half sensor height
187
double tmp = .5 * (m_imageSize.height - 1) * m_pixelSize
188
* cos(m_toRadian * m_imageTiltDegree);
189
// y-Value of tilted sensor
190
double yImage[2] = {tmp, -tmp};
191
// change in z because of the tilt
192
tmp *= sin(m_toRadian * m_imageTiltDegree);
193
// z-values of the sensor lower and upper corner
194
double zImage[2] = {
195
m_imageDistance + tmp,
196
m_imageDistance - tmp};
197
// circle of confusion
198
double circleConfusion = m_circleConfusionPixel*m_pixelSize;
199
// aperture of the lense
200
double aperture = m_lensFocalLength/m_lensFNumber;
201
// near and far factor on the image side
202
double nearFarFactorImage[2] = {
203
aperture/(aperture - circleConfusion),
204
aperture/(aperture + circleConfusion)};
205
// on the object side - points that determine the field of
206
// view
207
std::vector<cv::Vec3d> fovBottomTop(6);
208
std::vector<cv::Vec3d>::iterator itFov = fovBottomTop.begin();
209
for (size_t iBottomTop = 0; iBottomTop < 2; ++iBottomTop)
210
{
211
// mapping sensor to field of view
212
*itFov = cv::Vec3d(0,yImage[iBottomTop],zImage[iBottomTop]);
213
*itFov *= magnification((*itFov)(2));
214
++itFov;
215
for (size_t iNearFar = 0; iNearFar < 2; ++iNearFar, ++itFov)
216
{
217
// scaling to the near and far distance on the
218
// image side
219
*itFov = cv::Vec3d(0,yImage[iBottomTop],zImage[iBottomTop]) *
220
nearFarFactorImage[iNearFar];
221
// scaling to the object side
222
*itFov *= magnification((*itFov)(2));
223
}
224
}
225
m_fovCenter.resize(3);
226
m_fovNormal.resize(3);
227
for (size_t i = 0; i < 3; ++i)
228
{
229
m_fovCenter[i] = .5*(fovBottomTop[i] + fovBottomTop[i+3]);
230
m_fovNormal[i] = fovBottomTop[i+3] - fovBottomTop[i];
231
m_fovNormal[i] = cv::normalize(m_fovNormal[i]);
232
m_fovNormal[i] = cv::Vec3d(
233
m_fovNormal[i](0),
234
-m_fovNormal[i](2),
235
m_fovNormal[i](1));
236
// one target position in each plane
237
m_pointTargetTvec.push_back(m_fovCenter[i]);
238
cv::Vec3d rvec = cv::Vec3d(0,0,1).cross(m_fovNormal[i]);
239
rvec = cv::normalize(rvec);
240
rvec *= acos(m_fovNormal[i](2));
241
m_pointTargetRvec.push_back(rvec);
242
}
243
// calibration target
244
size_t num = 2*m_pointTargetNum + 1;
245
m_pointTarget.resize(num*num);
246
std::vector<cv::Point3d>::iterator itTarget = m_pointTarget.begin();
247
for (int iY = -m_pointTargetNum; iY <= m_pointTargetNum; ++iY)
248
{
249
for (int iX = -m_pointTargetNum; iX <= m_pointTargetNum; ++iX, ++itTarget)
250
{
251
*itTarget = cv::Point3d(iX, iY, 0) * m_pointTargetDist;
252
}
253
}
254
// oblique target positions
255
// approximate distance to the near and far plane
256
double dist = std::max(
257
std::abs(m_fovNormal[0].dot(m_fovCenter[0] - m_fovCenter[1])),
258
std::abs(m_fovNormal[0].dot(m_fovCenter[0] - m_fovCenter[2])));
259
// maximal angle such that target border "reaches" near and far plane
260
double maxAngle = atan2(dist, m_pointTargetNum*m_pointTargetDist);
261
std::vector<double> angle;
262
angle.push_back(-maxAngle);
263
angle.push_back(maxAngle);
264
cv::Matx33d baseMatrix;
265
cv::Rodrigues(m_pointTargetRvec.front(), baseMatrix);
266
for (std::vector<double>::const_iterator itAngle = angle.begin(); itAngle != angle.end(); ++itAngle)
267
{
268
cv::Matx33d rmat;
269
for (int i = 0; i < 2; ++i)
270
{
271
cv::Vec3d rvec(0,0,0);
272
rvec(i) = *itAngle;
273
cv::Rodrigues(rvec, rmat);
274
rmat = baseMatrix*rmat;
275
cv::Rodrigues(rmat, rvec);
276
m_pointTargetTvec.push_back(m_fovCenter.front());
277
m_pointTargetRvec.push_back(rvec);
278
}
279
}
280
// camera matrix
281
double cx = .5 * (m_imageSize.width - 1);
282
double cy = .5 * (m_imageSize.height - 1);
283
double f = m_imageDistance/m_pixelSize;
284
m_cameraMatrix = cv::Matx33d(
285
f,0,cx,
286
0,f,cy,
287
0,0,1);
288
// distortion coefficients
289
m_distortionCoeff = cv::Vec<double, NUM_DIST_COEFF_TILT>::all(0);
290
// tauX
291
m_distortionCoeff(12) = -m_toRadian*m_imageTiltDegree;
292
293
}
294
295
void cameraCalibrationTiltTest::numericalDerivative(
296
cv::Mat& jac,
297
double eps,
298
const std::vector<cv::Point3d>& obj,
299
const cv::Vec3d& rvec,
300
const cv::Vec3d& tvec,
301
const cv::Matx33d& camera,
302
const cv::Vec<double, NUM_DIST_COEFF_TILT>& distor)
303
{
304
cv::Vec3d r(rvec);
305
cv::Vec3d t(tvec);
306
cv::Matx33d cm(camera);
307
cv::Vec<double, NUM_DIST_COEFF_TILT> dc(distor);
308
double* param[10+NUM_DIST_COEFF_TILT] = {
309
&r(0), &r(1), &r(2),
310
&t(0), &t(1), &t(2),
311
&cm(0,0), &cm(1,1), &cm(0,2), &cm(1,2),
312
&dc(0), &dc(1), &dc(2), &dc(3), &dc(4), &dc(5), &dc(6),
313
&dc(7), &dc(8), &dc(9), &dc(10), &dc(11), &dc(12), &dc(13)};
314
std::vector<cv::Point2d> pix0, pix1;
315
double invEps = .5/eps;
316
317
for (int col = 0; col < 10+NUM_DIST_COEFF_TILT; ++col)
318
{
319
double save = *(param[col]);
320
*(param[col]) = save + eps;
321
cv::projectPoints(obj, r, t, cm, dc, pix0);
322
*(param[col]) = save - eps;
323
cv::projectPoints(obj, r, t, cm, dc, pix1);
324
*(param[col]) = save;
325
326
std::vector<cv::Point2d>::const_iterator it0 = pix0.begin();
327
std::vector<cv::Point2d>::const_iterator it1 = pix1.begin();
328
int row = 0;
329
for (;it0 != pix0.end(); ++it0, ++it1)
330
{
331
cv::Point2d d = invEps*(*it0 - *it1);
332
jac.at<double>(row, col) = d.x;
333
++row;
334
jac.at<double>(row, col) = d.y;
335
++row;
336
}
337
}
338
}
339
340
void cameraCalibrationTiltTest::removeInvalidPoints(
341
std::vector<cv::Point2d>& imagePoints,
342
std::vector<cv::Point3d>& objectPoints)
343
{
344
// remove object and imgage points out of range
345
std::vector<cv::Point2d>::iterator itImg = imagePoints.begin();
346
std::vector<cv::Point3d>::iterator itObj = objectPoints.begin();
347
while (itImg != imagePoints.end())
348
{
349
bool ok =
350
itImg->x >= 0 &&
351
itImg->x <= m_imageSize.width - 1.0 &&
352
itImg->y >= 0 &&
353
itImg->y <= m_imageSize.height - 1.0;
354
if (ok)
355
{
356
++itImg;
357
++itObj;
358
}
359
else
360
{
361
itImg = imagePoints.erase(itImg);
362
itObj = objectPoints.erase(itObj);
363
}
364
}
365
}
366
367
void cameraCalibrationTiltTest::addNoiseRemoveInvalidPoints(
368
std::vector<cv::Point2f>& imagePoints,
369
std::vector<cv::Point3f>& objectPoints,
370
std::vector<cv::Point2f>& noisyImagePoints,
371
double halfWidthNoise)
372
{
373
std::vector<cv::Point2f>::iterator itImg = imagePoints.begin();
374
std::vector<cv::Point3f>::iterator itObj = objectPoints.begin();
375
noisyImagePoints.clear();
376
noisyImagePoints.reserve(imagePoints.size());
377
while (itImg != imagePoints.end())
378
{
379
cv::Point2f pix = *itImg + cv::Point2f(
380
(float)m_rng.uniform(-halfWidthNoise, halfWidthNoise),
381
(float)m_rng.uniform(-halfWidthNoise, halfWidthNoise));
382
bool ok =
383
pix.x >= 0 &&
384
pix.x <= m_imageSize.width - 1.0 &&
385
pix.y >= 0 &&
386
pix.y <= m_imageSize.height - 1.0;
387
if (ok)
388
{
389
noisyImagePoints.push_back(pix);
390
++itImg;
391
++itObj;
392
}
393
else
394
{
395
itImg = imagePoints.erase(itImg);
396
itObj = objectPoints.erase(itObj);
397
}
398
}
399
}
400
401
402
TEST_F(cameraCalibrationTiltTest, projectPoints)
403
{
404
std::vector<cv::Point2d> imagePoints;
405
std::vector<cv::Point3d> objectPoints = m_pointTarget;
406
cv::Vec3d rvec = m_pointTargetRvec.front();
407
cv::Vec3d tvec = m_pointTargetTvec.front();
408
409
cv::Vec<double, NUM_DIST_COEFF_TILT> coeffNoiseHalfWidth(
410
.1, .1, // k1 k2
411
.01, .01, // p1 p2
412
.001, .001, .001, .001, // k3 k4 k5 k6
413
.001, .001, .001, .001, // s1 s2 s3 s4
414
.01, .01); // tauX tauY
415
for (size_t numTest = 0; numTest < 10; ++numTest)
416
{
417
// create random distortion coefficients
418
cv::Vec<double, NUM_DIST_COEFF_TILT> distortionCoeff = m_distortionCoeff;
419
randomDistortionCoeff(distortionCoeff, coeffNoiseHalfWidth);
420
421
// projection
422
cv::projectPoints(
423
objectPoints,
424
rvec,
425
tvec,
426
m_cameraMatrix,
427
distortionCoeff,
428
imagePoints);
429
430
// remove object and imgage points out of range
431
removeInvalidPoints(imagePoints, objectPoints);
432
433
int numPoints = (int)imagePoints.size();
434
int numParams = 10 + distortionCoeff.rows;
435
cv::Mat jacobian(2*numPoints, numParams, CV_64FC1);
436
437
// projection and jacobian
438
cv::projectPoints(
439
objectPoints,
440
rvec,
441
tvec,
442
m_cameraMatrix,
443
distortionCoeff,
444
imagePoints,
445
jacobian);
446
447
// numerical derivatives
448
cv::Mat numericJacobian(2*numPoints, numParams, CV_64FC1);
449
double eps = 1e-7;
450
numericalDerivative(
451
numericJacobian,
452
eps,
453
objectPoints,
454
rvec,
455
tvec,
456
m_cameraMatrix,
457
distortionCoeff);
458
459
#if 0
460
for (size_t row = 0; row < 2; ++row)
461
{
462
std::cout << "------ Row = " << row << " ------\n";
463
for (size_t i = 0; i < 10+NUM_DIST_COEFF_TILT; ++i)
464
{
465
std::cout << i
466
<< " jac = " << jacobian.at<double>(row,i)
467
<< " num = " << numericJacobian.at<double>(row,i)
468
<< " rel. diff = " << abs(numericJacobian.at<double>(row,i) - jacobian.at<double>(row,i))/abs(numericJacobian.at<double>(row,i))
469
<< "\n";
470
}
471
}
472
#endif
473
// relative difference for large values (rvec and tvec)
474
cv::Mat check = abs(jacobian(cv::Range::all(), cv::Range(0,6)) - numericJacobian(cv::Range::all(), cv::Range(0,6)))/
475
(1 + abs(jacobian(cv::Range::all(), cv::Range(0,6))));
476
double minVal, maxVal;
477
cv::minMaxIdx(check, &minVal, &maxVal);
478
EXPECT_LE(maxVal, .01);
479
// absolute difference for distortion and camera matrix
480
EXPECT_MAT_NEAR(jacobian(cv::Range::all(), cv::Range(6,numParams)), numericJacobian(cv::Range::all(), cv::Range(6,numParams)), 1e-5);
481
}
482
}
483
484
TEST_F(cameraCalibrationTiltTest, undistortPoints)
485
{
486
cv::Vec<double, NUM_DIST_COEFF_TILT> coeffNoiseHalfWidth(
487
.2, .1, // k1 k2
488
.01, .01, // p1 p2
489
.01, .01, .01, .01, // k3 k4 k5 k6
490
.001, .001, .001, .001, // s1 s2 s3 s4
491
.001, .001); // tauX tauY
492
double step = 99;
493
double toleranceBackProjection = 1e-5;
494
495
for (size_t numTest = 0; numTest < 10; ++numTest)
496
{
497
cv::Vec<double, NUM_DIST_COEFF_TILT> distortionCoeff = m_distortionCoeff;
498
randomDistortionCoeff(distortionCoeff, coeffNoiseHalfWidth);
499
500
// distorted points
501
std::vector<cv::Point2d> distorted;
502
for (double x = 0; x <= m_imageSize.width-1; x += step)
503
for (double y = 0; y <= m_imageSize.height-1; y += step)
504
distorted.push_back(cv::Point2d(x,y));
505
std::vector<cv::Point2d> normalizedUndistorted;
506
507
// undistort
508
cv::undistortPoints(distorted,
509
normalizedUndistorted,
510
m_cameraMatrix,
511
distortionCoeff);
512
513
// copy normalized points to 3D
514
std::vector<cv::Point3d> objectPoints;
515
for (std::vector<cv::Point2d>::const_iterator itPnt = normalizedUndistorted.begin();
516
itPnt != normalizedUndistorted.end(); ++itPnt)
517
objectPoints.push_back(cv::Point3d(itPnt->x, itPnt->y, 1));
518
519
// project
520
std::vector<cv::Point2d> imagePoints(objectPoints.size());
521
cv::projectPoints(objectPoints,
522
cv::Vec3d(0,0,0),
523
cv::Vec3d(0,0,0),
524
m_cameraMatrix,
525
distortionCoeff,
526
imagePoints);
527
528
EXPECT_MAT_NEAR(distorted, imagePoints, toleranceBackProjection);
529
}
530
}
531
532
template <typename INPUT, typename ESTIMATE>
533
void show(const std::string& name, const INPUT in, const ESTIMATE est)
534
{
535
std::cout << name << " = " << est << " (init = " << in
536
<< ", diff = " << est-in << ")\n";
537
}
538
539
template <typename INPUT>
540
void showVec(const std::string& name, const INPUT& in, const cv::Mat& est)
541
{
542
543
for (size_t i = 0; i < in.channels; ++i)
544
{
545
std::stringstream ss;
546
ss << name << "[" << i << "]";
547
show(ss.str(), in(i), est.at<double>(i));
548
}
549
}
550
551
/**
552
For given camera matrix and distortion coefficients
553
- project point target in different positions onto the sensor
554
- add pixel noise
555
- estimate camera model with noisy measurements
556
- compare result with initial model parameter
557
558
Parameter are differently affected by the noise
559
*/
560
TEST_F(cameraCalibrationTiltTest, calibrateCamera)
561
{
562
cv::Vec<double, NUM_DIST_COEFF_TILT> coeffNoiseHalfWidth(
563
.2, .1, // k1 k2
564
.01, .01, // p1 p2
565
0, 0, 0, 0, // k3 k4 k5 k6
566
.001, .001, .001, .001, // s1 s2 s3 s4
567
.001, .001); // tauX tauY
568
double pixelNoiseHalfWidth = .5;
569
std::vector<cv::Point3f> pointTarget;
570
pointTarget.reserve(m_pointTarget.size());
571
for (std::vector<cv::Point3d>::const_iterator it = m_pointTarget.begin(); it != m_pointTarget.end(); ++it)
572
pointTarget.push_back(cv::Point3f(
573
(float)(it->x),
574
(float)(it->y),
575
(float)(it->z)));
576
577
for (size_t numTest = 0; numTest < 5; ++numTest)
578
{
579
// create random distortion coefficients
580
cv::Vec<double, NUM_DIST_COEFF_TILT> distortionCoeff = m_distortionCoeff;
581
randomDistortionCoeff(distortionCoeff, coeffNoiseHalfWidth);
582
583
// container for calibration data
584
std::vector<std::vector<cv::Point3f> > viewsObjectPoints;
585
std::vector<std::vector<cv::Point2f> > viewsImagePoints;
586
std::vector<std::vector<cv::Point2f> > viewsNoisyImagePoints;
587
588
// simulate calibration data with projectPoints
589
std::vector<cv::Vec3d>::const_iterator itRvec = m_pointTargetRvec.begin();
590
std::vector<cv::Vec3d>::const_iterator itTvec = m_pointTargetTvec.begin();
591
// loop over different views
592
for (;itRvec != m_pointTargetRvec.end(); ++ itRvec, ++itTvec)
593
{
594
std::vector<cv::Point3f> objectPoints(pointTarget);
595
std::vector<cv::Point2f> imagePoints;
596
std::vector<cv::Point2f> noisyImagePoints;
597
// project calibration target to sensor
598
cv::projectPoints(
599
objectPoints,
600
*itRvec,
601
*itTvec,
602
m_cameraMatrix,
603
distortionCoeff,
604
imagePoints);
605
// remove invisible points
606
addNoiseRemoveInvalidPoints(
607
imagePoints,
608
objectPoints,
609
noisyImagePoints,
610
pixelNoiseHalfWidth);
611
// add data for view
612
viewsNoisyImagePoints.push_back(noisyImagePoints);
613
viewsImagePoints.push_back(imagePoints);
614
viewsObjectPoints.push_back(objectPoints);
615
}
616
617
// Output
618
std::vector<cv::Mat> outRvecs, outTvecs;
619
cv::Mat outCameraMatrix(3, 3, CV_64F, cv::Scalar::all(1)), outDistCoeff;
620
621
// Stopping criteria
622
cv::TermCriteria stop(
623
cv::TermCriteria::COUNT+cv::TermCriteria::EPS,
624
50000,
625
1e-14);
626
// model choice
627
int flag =
628
cv::CALIB_FIX_ASPECT_RATIO |
629
// cv::CALIB_RATIONAL_MODEL |
630
cv::CALIB_FIX_K3 |
631
// cv::CALIB_FIX_K6 |
632
cv::CALIB_THIN_PRISM_MODEL |
633
cv::CALIB_TILTED_MODEL;
634
// estimate
635
double backProjErr = cv::calibrateCamera(
636
viewsObjectPoints,
637
viewsNoisyImagePoints,
638
m_imageSize,
639
outCameraMatrix,
640
outDistCoeff,
641
outRvecs,
642
outTvecs,
643
flag,
644
stop);
645
646
EXPECT_LE(backProjErr, pixelNoiseHalfWidth);
647
648
#if 0
649
std::cout << "------ estimate ------\n";
650
std::cout << "back projection error = " << backProjErr << "\n";
651
std::cout << "points per view = {" << viewsObjectPoints.front().size();
652
for (size_t i = 1; i < viewsObjectPoints.size(); ++i)
653
std::cout << ", " << viewsObjectPoints[i].size();
654
std::cout << "}\n";
655
show("fx", m_cameraMatrix(0,0), outCameraMatrix.at<double>(0,0));
656
show("fy", m_cameraMatrix(1,1), outCameraMatrix.at<double>(1,1));
657
show("cx", m_cameraMatrix(0,2), outCameraMatrix.at<double>(0,2));
658
show("cy", m_cameraMatrix(1,2), outCameraMatrix.at<double>(1,2));
659
showVec("distor", distortionCoeff, outDistCoeff);
660
#endif
661
if (pixelNoiseHalfWidth > 0)
662
{
663
double tolRvec = pixelNoiseHalfWidth;
664
double tolTvec = m_objectDistance * tolRvec;
665
// back projection error
666
for (size_t i = 0; i < viewsNoisyImagePoints.size(); ++i)
667
{
668
double dRvec = cv::norm(m_pointTargetRvec[i],
669
cv::Vec3d(outRvecs[i].at<double>(0), outRvecs[i].at<double>(1), outRvecs[i].at<double>(2))
670
);
671
EXPECT_LE(dRvec, tolRvec);
672
double dTvec = cv::norm(m_pointTargetTvec[i],
673
cv::Vec3d(outTvecs[i].at<double>(0), outTvecs[i].at<double>(1), outTvecs[i].at<double>(2))
674
);
675
EXPECT_LE(dTvec, tolTvec);
676
677
std::vector<cv::Point2f> backProjection;
678
cv::projectPoints(
679
viewsObjectPoints[i],
680
outRvecs[i],
681
outTvecs[i],
682
outCameraMatrix,
683
outDistCoeff,
684
backProjection);
685
EXPECT_MAT_NEAR(backProjection, viewsNoisyImagePoints[i], 1.5*pixelNoiseHalfWidth);
686
EXPECT_MAT_NEAR(backProjection, viewsImagePoints[i], 1.5*pixelNoiseHalfWidth);
687
}
688
}
689
pixelNoiseHalfWidth *= .25;
690
}
691
}
692
693
}} // namespace
694
695