Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Tetragramm
GitHub Repository: Tetragramm/opencv
Path: blob/master/modules/videostab/src/global_motion.cpp
16354 views
1
/*M///////////////////////////////////////////////////////////////////////////////////////
2
//
3
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4
//
5
// By downloading, copying, installing or using the software you agree to this license.
6
// If you do not agree to this license, do not download, install,
7
// copy or use the software.
8
//
9
//
10
// License Agreement
11
// For Open Source Computer Vision Library
12
//
13
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
15
// Third party copyrights are property of their respective owners.
16
//
17
// Redistribution and use in source and binary forms, with or without modification,
18
// are permitted provided that the following conditions are met:
19
//
20
// * Redistribution's of source code must retain the above copyright notice,
21
// this list of conditions and the following disclaimer.
22
//
23
// * Redistribution's in binary form must reproduce the above copyright notice,
24
// this list of conditions and the following disclaimer in the documentation
25
// and/or other materials provided with the distribution.
26
//
27
// * The name of the copyright holders may not be used to endorse or promote products
28
// derived from this software without specific prior written permission.
29
//
30
// This software is provided by the copyright holders and contributors "as is" and
31
// any express or implied warranties, including, but not limited to, the implied
32
// warranties of merchantability and fitness for a particular purpose are disclaimed.
33
// In no event shall the Intel Corporation or contributors be liable for any direct,
34
// indirect, incidental, special, exemplary, or consequential damages
35
// (including, but not limited to, procurement of substitute goods or services;
36
// loss of use, data, or profits; or business interruption) however caused
37
// and on any theory of liability, whether in contract, strict liability,
38
// or tort (including negligence or otherwise) arising in any way out of
39
// the use of this software, even if advised of the possibility of such damage.
40
//
41
//M*/
42
43
#include "precomp.hpp"
44
#include "opencv2/videostab/global_motion.hpp"
45
#include "opencv2/videostab/ring_buffer.hpp"
46
#include "opencv2/videostab/outlier_rejection.hpp"
47
#include "opencv2/opencv_modules.hpp"
48
#include "clp.hpp"
49
50
#include "opencv2/core/private.cuda.hpp"
51
52
#if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW)
53
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
54
namespace cv { namespace cuda {
55
static void compactPoints(GpuMat&, GpuMat&, const GpuMat&) { throw_no_cuda(); }
56
}}
57
#else
58
namespace cv { namespace cuda { namespace device { namespace globmotion {
59
int compactPoints(int N, float *points0, float *points1, const uchar *mask);
60
}}}}
61
namespace cv { namespace cuda {
62
static void compactPoints(GpuMat &points0, GpuMat &points1, const GpuMat &mask)
63
{
64
CV_Assert(points0.rows == 1 && points1.rows == 1 && mask.rows == 1);
65
CV_Assert(points0.type() == CV_32FC2 && points1.type() == CV_32FC2 && mask.type() == CV_8U);
66
CV_Assert(points0.cols == mask.cols && points1.cols == mask.cols);
67
68
int npoints = points0.cols;
69
int remaining = cv::cuda::device::globmotion::compactPoints(
70
npoints, (float*)points0.data, (float*)points1.data, mask.data);
71
72
points0 = points0.colRange(0, remaining);
73
points1 = points1.colRange(0, remaining);
74
}
75
}}
76
#endif
77
#endif
78
79
namespace cv
80
{
81
namespace videostab
82
{
83
84
// does isotropic normalization
85
static Mat normalizePoints(int npoints, Point2f *points)
86
{
87
float cx = 0.f, cy = 0.f;
88
for (int i = 0; i < npoints; ++i)
89
{
90
cx += points[i].x;
91
cy += points[i].y;
92
}
93
cx /= npoints;
94
cy /= npoints;
95
96
float d = 0.f;
97
for (int i = 0; i < npoints; ++i)
98
{
99
points[i].x -= cx;
100
points[i].y -= cy;
101
d += std::sqrt(sqr(points[i].x) + sqr(points[i].y));
102
}
103
d /= npoints;
104
105
float s = std::sqrt(2.f) / d;
106
for (int i = 0; i < npoints; ++i)
107
{
108
points[i].x *= s;
109
points[i].y *= s;
110
}
111
112
Mat_<float> T = Mat::eye(3, 3, CV_32F);
113
T(0,0) = T(1,1) = s;
114
T(0,2) = -cx*s;
115
T(1,2) = -cy*s;
116
return T;
117
}
118
119
120
static Mat estimateGlobMotionLeastSquaresTranslation(
121
int npoints, Point2f *points0, Point2f *points1, float *rmse)
122
{
123
Mat_<float> M = Mat::eye(3, 3, CV_32F);
124
for (int i = 0; i < npoints; ++i)
125
{
126
M(0,2) += points1[i].x - points0[i].x;
127
M(1,2) += points1[i].y - points0[i].y;
128
}
129
M(0,2) /= npoints;
130
M(1,2) /= npoints;
131
132
if (rmse)
133
{
134
*rmse = 0;
135
for (int i = 0; i < npoints; ++i)
136
*rmse += sqr(points1[i].x - points0[i].x - M(0,2)) +
137
sqr(points1[i].y - points0[i].y - M(1,2));
138
*rmse = std::sqrt(*rmse / npoints);
139
}
140
141
return M;
142
}
143
144
145
static Mat estimateGlobMotionLeastSquaresTranslationAndScale(
146
int npoints, Point2f *points0, Point2f *points1, float *rmse)
147
{
148
Mat_<float> T0 = normalizePoints(npoints, points0);
149
Mat_<float> T1 = normalizePoints(npoints, points1);
150
151
Mat_<float> A(2*npoints, 3), b(2*npoints, 1);
152
float *a0, *a1;
153
Point2f p0, p1;
154
155
for (int i = 0; i < npoints; ++i)
156
{
157
a0 = A[2*i];
158
a1 = A[2*i+1];
159
p0 = points0[i];
160
p1 = points1[i];
161
a0[0] = p0.x; a0[1] = 1; a0[2] = 0;
162
a1[0] = p0.y; a1[1] = 0; a1[2] = 1;
163
b(2*i,0) = p1.x;
164
b(2*i+1,0) = p1.y;
165
}
166
167
Mat_<float> sol;
168
solve(A, b, sol, DECOMP_NORMAL | DECOMP_LU);
169
170
if (rmse)
171
*rmse = static_cast<float>(norm(A*sol, b, NORM_L2) / std::sqrt(static_cast<double>(npoints)));
172
173
Mat_<float> M = Mat::eye(3, 3, CV_32F);
174
M(0,0) = M(1,1) = sol(0,0);
175
M(0,2) = sol(1,0);
176
M(1,2) = sol(2,0);
177
178
return T1.inv() * M * T0;
179
}
180
181
static Mat estimateGlobMotionLeastSquaresRotation(
182
int npoints, Point2f *points0, Point2f *points1, float *rmse)
183
{
184
Point2f p0, p1;
185
float A(0), B(0);
186
for(int i=0; i<npoints; ++i)
187
{
188
p0 = points0[i];
189
p1 = points1[i];
190
191
A += p0.x*p1.x + p0.y*p1.y;
192
B += p0.x*p1.y - p1.x*p0.y;
193
}
194
195
// A*sin(alpha) + B*cos(alpha) = 0
196
float C = std::sqrt(A*A + B*B);
197
Mat_<float> M = Mat::eye(3, 3, CV_32F);
198
if ( C != 0 )
199
{
200
float sinAlpha = - B / C;
201
float cosAlpha = A / C;
202
203
M(0,0) = cosAlpha;
204
M(1,1) = M(0,0);
205
M(0,1) = sinAlpha;
206
M(1,0) = - M(0,1);
207
}
208
209
if (rmse)
210
{
211
*rmse = 0;
212
for (int i = 0; i < npoints; ++i)
213
{
214
p0 = points0[i];
215
p1 = points1[i];
216
*rmse += sqr(p1.x - M(0,0)*p0.x - M(0,1)*p0.y) +
217
sqr(p1.y - M(1,0)*p0.x - M(1,1)*p0.y);
218
}
219
*rmse = std::sqrt(*rmse / npoints);
220
}
221
222
return M;
223
}
224
225
static Mat estimateGlobMotionLeastSquaresRigid(
226
int npoints, Point2f *points0, Point2f *points1, float *rmse)
227
{
228
Point2f mean0(0.f, 0.f);
229
Point2f mean1(0.f, 0.f);
230
231
for (int i = 0; i < npoints; ++i)
232
{
233
mean0 += points0[i];
234
mean1 += points1[i];
235
}
236
237
mean0 *= 1.f / npoints;
238
mean1 *= 1.f / npoints;
239
240
Mat_<float> A = Mat::zeros(2, 2, CV_32F);
241
Point2f pt0, pt1;
242
243
for (int i = 0; i < npoints; ++i)
244
{
245
pt0 = points0[i] - mean0;
246
pt1 = points1[i] - mean1;
247
A(0,0) += pt1.x * pt0.x;
248
A(0,1) += pt1.x * pt0.y;
249
A(1,0) += pt1.y * pt0.x;
250
A(1,1) += pt1.y * pt0.y;
251
}
252
253
Mat_<float> M = Mat::eye(3, 3, CV_32F);
254
255
SVD svd(A);
256
Mat_<float> R = svd.u * svd.vt;
257
Mat tmp(M(Rect(0,0,2,2)));
258
R.copyTo(tmp);
259
260
M(0,2) = mean1.x - R(0,0)*mean0.x - R(0,1)*mean0.y;
261
M(1,2) = mean1.y - R(1,0)*mean0.x - R(1,1)*mean0.y;
262
263
if (rmse)
264
{
265
*rmse = 0;
266
for (int i = 0; i < npoints; ++i)
267
{
268
pt0 = points0[i];
269
pt1 = points1[i];
270
*rmse += sqr(pt1.x - M(0,0)*pt0.x - M(0,1)*pt0.y - M(0,2)) +
271
sqr(pt1.y - M(1,0)*pt0.x - M(1,1)*pt0.y - M(1,2));
272
}
273
*rmse = std::sqrt(*rmse / npoints);
274
}
275
276
return M;
277
}
278
279
280
static Mat estimateGlobMotionLeastSquaresSimilarity(
281
int npoints, Point2f *points0, Point2f *points1, float *rmse)
282
{
283
Mat_<float> T0 = normalizePoints(npoints, points0);
284
Mat_<float> T1 = normalizePoints(npoints, points1);
285
286
Mat_<float> A(2*npoints, 4), b(2*npoints, 1);
287
float *a0, *a1;
288
Point2f p0, p1;
289
290
for (int i = 0; i < npoints; ++i)
291
{
292
a0 = A[2*i];
293
a1 = A[2*i+1];
294
p0 = points0[i];
295
p1 = points1[i];
296
a0[0] = p0.x; a0[1] = p0.y; a0[2] = 1; a0[3] = 0;
297
a1[0] = p0.y; a1[1] = -p0.x; a1[2] = 0; a1[3] = 1;
298
b(2*i,0) = p1.x;
299
b(2*i+1,0) = p1.y;
300
}
301
302
Mat_<float> sol;
303
solve(A, b, sol, DECOMP_NORMAL | DECOMP_LU);
304
305
if (rmse)
306
*rmse = static_cast<float>(norm(A*sol, b, NORM_L2) / std::sqrt(static_cast<double>(npoints)));
307
308
Mat_<float> M = Mat::eye(3, 3, CV_32F);
309
M(0,0) = M(1,1) = sol(0,0);
310
M(0,1) = sol(1,0);
311
M(1,0) = -sol(1,0);
312
M(0,2) = sol(2,0);
313
M(1,2) = sol(3,0);
314
315
return T1.inv() * M * T0;
316
}
317
318
319
static Mat estimateGlobMotionLeastSquaresAffine(
320
int npoints, Point2f *points0, Point2f *points1, float *rmse)
321
{
322
Mat_<float> T0 = normalizePoints(npoints, points0);
323
Mat_<float> T1 = normalizePoints(npoints, points1);
324
325
Mat_<float> A(2*npoints, 6), b(2*npoints, 1);
326
float *a0, *a1;
327
Point2f p0, p1;
328
329
for (int i = 0; i < npoints; ++i)
330
{
331
a0 = A[2*i];
332
a1 = A[2*i+1];
333
p0 = points0[i];
334
p1 = points1[i];
335
a0[0] = p0.x; a0[1] = p0.y; a0[2] = 1; a0[3] = a0[4] = a0[5] = 0;
336
a1[0] = a1[1] = a1[2] = 0; a1[3] = p0.x; a1[4] = p0.y; a1[5] = 1;
337
b(2*i,0) = p1.x;
338
b(2*i+1,0) = p1.y;
339
}
340
341
Mat_<float> sol;
342
solve(A, b, sol, DECOMP_NORMAL | DECOMP_LU);
343
344
if (rmse)
345
*rmse = static_cast<float>(norm(A*sol, b, NORM_L2) / std::sqrt(static_cast<double>(npoints)));
346
347
Mat_<float> M = Mat::eye(3, 3, CV_32F);
348
for (int i = 0, k = 0; i < 2; ++i)
349
for (int j = 0; j < 3; ++j, ++k)
350
M(i,j) = sol(k,0);
351
352
return T1.inv() * M * T0;
353
}
354
355
356
Mat estimateGlobalMotionLeastSquares(
357
InputOutputArray points0, InputOutputArray points1, int model, float *rmse)
358
{
359
CV_INSTRUMENT_REGION();
360
361
CV_Assert(model <= MM_AFFINE);
362
CV_Assert(points0.type() == points1.type());
363
const int npoints = points0.getMat().checkVector(2);
364
CV_Assert(points1.getMat().checkVector(2) == npoints);
365
366
typedef Mat (*Impl)(int, Point2f*, Point2f*, float*);
367
static Impl impls[] = { estimateGlobMotionLeastSquaresTranslation,
368
estimateGlobMotionLeastSquaresTranslationAndScale,
369
estimateGlobMotionLeastSquaresRotation,
370
estimateGlobMotionLeastSquaresRigid,
371
estimateGlobMotionLeastSquaresSimilarity,
372
estimateGlobMotionLeastSquaresAffine };
373
374
Point2f *points0_ = points0.getMat().ptr<Point2f>();
375
Point2f *points1_ = points1.getMat().ptr<Point2f>();
376
377
return impls[model](npoints, points0_, points1_, rmse);
378
}
379
380
381
Mat estimateGlobalMotionRansac(
382
InputArray points0, InputArray points1, int model, const RansacParams &params,
383
float *rmse, int *ninliers)
384
{
385
CV_INSTRUMENT_REGION();
386
387
CV_Assert(model <= MM_AFFINE);
388
CV_Assert(points0.type() == points1.type());
389
const int npoints = points0.getMat().checkVector(2);
390
CV_Assert(points1.getMat().checkVector(2) == npoints);
391
392
if (npoints < params.size)
393
return Mat::eye(3, 3, CV_32F);
394
395
const Point2f *points0_ = points0.getMat().ptr<Point2f>();
396
const Point2f *points1_ = points1.getMat().ptr<Point2f>();
397
const int niters = params.niters();
398
399
// current hypothesis
400
std::vector<int> indices(params.size);
401
std::vector<Point2f> subset0(params.size);
402
std::vector<Point2f> subset1(params.size);
403
404
// best hypothesis
405
std::vector<int> bestIndices(params.size);
406
407
Mat_<float> bestM;
408
int ninliersMax = -1;
409
410
RNG rng(0);
411
Point2f p0, p1;
412
float x, y;
413
414
for (int iter = 0; iter < niters; ++iter)
415
{
416
for (int i = 0; i < params.size; ++i)
417
{
418
bool ok = false;
419
while (!ok)
420
{
421
ok = true;
422
indices[i] = static_cast<unsigned>(rng) % npoints;
423
for (int j = 0; j < i; ++j)
424
if (indices[i] == indices[j])
425
{ ok = false; break; }
426
}
427
}
428
for (int i = 0; i < params.size; ++i)
429
{
430
subset0[i] = points0_[indices[i]];
431
subset1[i] = points1_[indices[i]];
432
}
433
434
Mat_<float> M = estimateGlobalMotionLeastSquares(subset0, subset1, model, 0);
435
436
int numinliers = 0;
437
for (int i = 0; i < npoints; ++i)
438
{
439
p0 = points0_[i];
440
p1 = points1_[i];
441
x = M(0,0)*p0.x + M(0,1)*p0.y + M(0,2);
442
y = M(1,0)*p0.x + M(1,1)*p0.y + M(1,2);
443
if (sqr(x - p1.x) + sqr(y - p1.y) < params.thresh * params.thresh)
444
numinliers++;
445
}
446
if (numinliers >= ninliersMax)
447
{
448
bestM = M;
449
ninliersMax = numinliers;
450
bestIndices.swap(indices);
451
}
452
}
453
454
if (ninliersMax < params.size)
455
{
456
// compute RMSE
457
for (int i = 0; i < params.size; ++i)
458
{
459
subset0[i] = points0_[bestIndices[i]];
460
subset1[i] = points1_[bestIndices[i]];
461
}
462
bestM = estimateGlobalMotionLeastSquares(subset0, subset1, model, rmse);
463
}
464
else
465
{
466
subset0.resize(ninliersMax);
467
subset1.resize(ninliersMax);
468
for (int i = 0, j = 0; i < npoints && j < ninliersMax ; ++i)
469
{
470
p0 = points0_[i];
471
p1 = points1_[i];
472
x = bestM(0,0)*p0.x + bestM(0,1)*p0.y + bestM(0,2);
473
y = bestM(1,0)*p0.x + bestM(1,1)*p0.y + bestM(1,2);
474
if (sqr(x - p1.x) + sqr(y - p1.y) < params.thresh * params.thresh)
475
{
476
subset0[j] = p0;
477
subset1[j] = p1;
478
j++;
479
}
480
}
481
bestM = estimateGlobalMotionLeastSquares(subset0, subset1, model, rmse);
482
}
483
484
if (ninliers)
485
*ninliers = ninliersMax;
486
487
return bestM;
488
}
489
490
491
MotionEstimatorRansacL2::MotionEstimatorRansacL2(MotionModel model)
492
: MotionEstimatorBase(model)
493
{
494
setRansacParams(RansacParams::default2dMotion(model));
495
setMinInlierRatio(0.1f);
496
}
497
498
499
Mat MotionEstimatorRansacL2::estimate(InputArray points0, InputArray points1, bool *ok)
500
{
501
CV_Assert(points0.type() == points1.type());
502
const int npoints = points0.getMat().checkVector(2);
503
CV_Assert(points1.getMat().checkVector(2) == npoints);
504
505
// find motion
506
507
int ninliers = 0;
508
Mat_<float> M;
509
510
if (motionModel() != MM_HOMOGRAPHY)
511
M = estimateGlobalMotionRansac(
512
points0, points1, motionModel(), ransacParams_, 0, &ninliers);
513
else
514
{
515
std::vector<uchar> mask;
516
M = findHomography(points0, points1, mask, LMEDS);
517
for (int i = 0; i < npoints; ++i)
518
if (mask[i]) ninliers++;
519
}
520
521
// check if we're confident enough in estimated motion
522
523
if (ok) *ok = true;
524
if (static_cast<float>(ninliers) / npoints < minInlierRatio_)
525
{
526
M = Mat::eye(3, 3, CV_32F);
527
if (ok) *ok = false;
528
}
529
530
return M;
531
}
532
533
534
MotionEstimatorL1::MotionEstimatorL1(MotionModel model)
535
: MotionEstimatorBase(model)
536
{
537
}
538
539
540
// TODO will estimation of all motions as one LP problem be faster?
541
Mat MotionEstimatorL1::estimate(InputArray points0, InputArray points1, bool *ok)
542
{
543
CV_Assert(points0.type() == points1.type());
544
const int npoints = points0.getMat().checkVector(2);
545
CV_Assert(points1.getMat().checkVector(2) == npoints);
546
547
#ifndef HAVE_CLP
548
549
CV_UNUSED(ok);
550
CV_Error(Error::StsError, "The library is built without Clp support");
551
552
#else
553
554
CV_Assert(motionModel() <= MM_AFFINE && motionModel() != MM_RIGID);
555
556
if(npoints <= 0)
557
return Mat::eye(3, 3, CV_32F);
558
559
// prepare LP problem
560
561
const Point2f *points0_ = points0.getMat().ptr<Point2f>();
562
const Point2f *points1_ = points1.getMat().ptr<Point2f>();
563
564
int ncols = 6 + 2*npoints;
565
int nrows = 4*npoints;
566
567
if (motionModel() == MM_SIMILARITY)
568
nrows += 2;
569
else if (motionModel() == MM_TRANSLATION_AND_SCALE)
570
nrows += 3;
571
else if (motionModel() == MM_TRANSLATION)
572
nrows += 4;
573
574
rows_.clear();
575
cols_.clear();
576
elems_.clear();
577
obj_.assign(ncols, 0);
578
collb_.assign(ncols, -INF);
579
colub_.assign(ncols, INF);
580
581
int c = 6;
582
583
for (int i = 0; i < npoints; ++i, c += 2)
584
{
585
obj_[c] = 1;
586
collb_[c] = 0;
587
588
obj_[c+1] = 1;
589
collb_[c+1] = 0;
590
}
591
592
elems_.clear();
593
rowlb_.assign(nrows, -INF);
594
rowub_.assign(nrows, INF);
595
596
int r = 0;
597
Point2f p0, p1;
598
599
for (int i = 0; i < npoints; ++i, r += 4)
600
{
601
p0 = points0_[i];
602
p1 = points1_[i];
603
604
set(r, 0, p0.x); set(r, 1, p0.y); set(r, 2, 1); set(r, 6+2*i, -1);
605
rowub_[r] = p1.x;
606
607
set(r+1, 3, p0.x); set(r+1, 4, p0.y); set(r+1, 5, 1); set(r+1, 6+2*i+1, -1);
608
rowub_[r+1] = p1.y;
609
610
set(r+2, 0, p0.x); set(r+2, 1, p0.y); set(r+2, 2, 1); set(r+2, 6+2*i, 1);
611
rowlb_[r+2] = p1.x;
612
613
set(r+3, 3, p0.x); set(r+3, 4, p0.y); set(r+3, 5, 1); set(r+3, 6+2*i+1, 1);
614
rowlb_[r+3] = p1.y;
615
}
616
617
if (motionModel() == MM_SIMILARITY)
618
{
619
set(r, 0, 1); set(r, 4, -1); rowlb_[r] = rowub_[r] = 0;
620
set(r+1, 1, 1); set(r+1, 3, 1); rowlb_[r+1] = rowub_[r+1] = 0;
621
}
622
else if (motionModel() == MM_TRANSLATION_AND_SCALE)
623
{
624
set(r, 0, 1); set(r, 4, -1); rowlb_[r] = rowub_[r] = 0;
625
set(r+1, 1, 1); rowlb_[r+1] = rowub_[r+1] = 0;
626
set(r+2, 3, 1); rowlb_[r+2] = rowub_[r+2] = 0;
627
}
628
else if (motionModel() == MM_TRANSLATION)
629
{
630
set(r, 0, 1); rowlb_[r] = rowub_[r] = 1;
631
set(r+1, 1, 1); rowlb_[r+1] = rowub_[r+1] = 0;
632
set(r+2, 3, 1); rowlb_[r+2] = rowub_[r+2] = 0;
633
set(r+3, 4, 1); rowlb_[r+3] = rowub_[r+3] = 1;
634
}
635
636
// solve
637
638
CoinPackedMatrix A(true, &rows_[0], &cols_[0], &elems_[0], elems_.size());
639
A.setDimensions(nrows, ncols);
640
641
ClpSimplex model(false);
642
model.loadProblem(A, &collb_[0], &colub_[0], &obj_[0], &rowlb_[0], &rowub_[0]);
643
644
ClpDualRowSteepest dualSteep(1);
645
model.setDualRowPivotAlgorithm(dualSteep);
646
model.scaling(1);
647
648
model.dual();
649
650
// extract motion
651
652
const double *sol = model.getColSolution();
653
654
Mat_<float> M = Mat::eye(3, 3, CV_32F);
655
M(0,0) = sol[0];
656
M(0,1) = sol[1];
657
M(0,2) = sol[2];
658
M(1,0) = sol[3];
659
M(1,1) = sol[4];
660
M(1,2) = sol[5];
661
662
if (ok) *ok = true;
663
return M;
664
#endif
665
}
666
667
668
FromFileMotionReader::FromFileMotionReader(const String &path)
669
: ImageMotionEstimatorBase(MM_UNKNOWN)
670
{
671
file_.open(path.c_str());
672
CV_Assert(file_.is_open());
673
}
674
675
676
Mat FromFileMotionReader::estimate(const Mat &/*frame0*/, const Mat &/*frame1*/, bool *ok)
677
{
678
Mat_<float> M(3, 3);
679
bool ok_;
680
file_ >> M(0,0) >> M(0,1) >> M(0,2)
681
>> M(1,0) >> M(1,1) >> M(1,2)
682
>> M(2,0) >> M(2,1) >> M(2,2) >> ok_;
683
if (ok) *ok = ok_;
684
return M;
685
}
686
687
688
ToFileMotionWriter::ToFileMotionWriter(const String &path, Ptr<ImageMotionEstimatorBase> estimator)
689
: ImageMotionEstimatorBase(estimator->motionModel()), motionEstimator_(estimator)
690
{
691
file_.open(path.c_str());
692
CV_Assert(file_.is_open());
693
}
694
695
696
Mat ToFileMotionWriter::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
697
{
698
bool ok_;
699
Mat_<float> M = motionEstimator_->estimate(frame0, frame1, &ok_);
700
file_ << M(0,0) << " " << M(0,1) << " " << M(0,2) << " "
701
<< M(1,0) << " " << M(1,1) << " " << M(1,2) << " "
702
<< M(2,0) << " " << M(2,1) << " " << M(2,2) << " " << ok_ << std::endl;
703
if (ok) *ok = ok_;
704
return M;
705
}
706
707
708
KeypointBasedMotionEstimator::KeypointBasedMotionEstimator(Ptr<MotionEstimatorBase> estimator)
709
: ImageMotionEstimatorBase(estimator->motionModel()), motionEstimator_(estimator)
710
{
711
setDetector(GFTTDetector::create());
712
setOpticalFlowEstimator(makePtr<SparsePyrLkOptFlowEstimator>());
713
setOutlierRejector(makePtr<NullOutlierRejector>());
714
}
715
716
717
Mat KeypointBasedMotionEstimator::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
718
{
719
InputArray image0 = frame0;
720
InputArray image1 = frame1;
721
722
return estimate(image0, image1, ok);
723
}
724
725
Mat KeypointBasedMotionEstimator::estimate(InputArray frame0, InputArray frame1, bool *ok)
726
{
727
// find keypoints
728
detector_->detect(frame0, keypointsPrev_);
729
if (keypointsPrev_.empty())
730
return Mat::eye(3, 3, CV_32F);
731
732
// extract points from keypoints
733
pointsPrev_.resize(keypointsPrev_.size());
734
for (size_t i = 0; i < keypointsPrev_.size(); ++i)
735
pointsPrev_[i] = keypointsPrev_[i].pt;
736
737
// find correspondences
738
optFlowEstimator_->run(frame0, frame1, pointsPrev_, points_, status_, noArray());
739
740
// leave good correspondences only
741
742
pointsPrevGood_.clear(); pointsPrevGood_.reserve(points_.size());
743
pointsGood_.clear(); pointsGood_.reserve(points_.size());
744
745
for (size_t i = 0; i < points_.size(); ++i)
746
{
747
if (status_[i])
748
{
749
pointsPrevGood_.push_back(pointsPrev_[i]);
750
pointsGood_.push_back(points_[i]);
751
}
752
}
753
754
// perform outlier rejection
755
756
IOutlierRejector *outlRejector = outlierRejector_.get();
757
if (!dynamic_cast<NullOutlierRejector*>(outlRejector))
758
{
759
pointsPrev_.swap(pointsPrevGood_);
760
points_.swap(pointsGood_);
761
762
outlierRejector_->process(frame0.size(), pointsPrev_, points_, status_);
763
764
pointsPrevGood_.clear();
765
pointsPrevGood_.reserve(points_.size());
766
767
pointsGood_.clear();
768
pointsGood_.reserve(points_.size());
769
770
for (size_t i = 0; i < points_.size(); ++i)
771
{
772
if (status_[i])
773
{
774
pointsPrevGood_.push_back(pointsPrev_[i]);
775
pointsGood_.push_back(points_[i]);
776
}
777
}
778
}
779
780
// estimate motion
781
return motionEstimator_->estimate(pointsPrevGood_, pointsGood_, ok);
782
}
783
784
#if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW)
785
786
KeypointBasedMotionEstimatorGpu::KeypointBasedMotionEstimatorGpu(Ptr<MotionEstimatorBase> estimator)
787
: ImageMotionEstimatorBase(estimator->motionModel()), motionEstimator_(estimator)
788
{
789
detector_ = cuda::createGoodFeaturesToTrackDetector(CV_8UC1);
790
791
CV_Assert(cuda::getCudaEnabledDeviceCount() > 0);
792
setOutlierRejector(makePtr<NullOutlierRejector>());
793
}
794
795
796
Mat KeypointBasedMotionEstimatorGpu::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
797
{
798
frame0_.upload(frame0);
799
frame1_.upload(frame1);
800
return estimate(frame0_, frame1_, ok);
801
}
802
803
804
Mat KeypointBasedMotionEstimatorGpu::estimate(const cuda::GpuMat &frame0, const cuda::GpuMat &frame1, bool *ok)
805
{
806
// convert frame to gray if it's color
807
808
cuda::GpuMat grayFrame0;
809
if (frame0.channels() == 1)
810
grayFrame0 = frame0;
811
else
812
{
813
cuda::cvtColor(frame0, grayFrame0_, COLOR_BGR2GRAY);
814
grayFrame0 = grayFrame0_;
815
}
816
817
// find keypoints
818
detector_->detect(grayFrame0, pointsPrev_);
819
820
// find correspondences
821
optFlowEstimator_.run(frame0, frame1, pointsPrev_, points_, status_);
822
823
// leave good correspondences only
824
cuda::compactPoints(pointsPrev_, points_, status_);
825
826
pointsPrev_.download(hostPointsPrev_);
827
points_.download(hostPoints_);
828
829
// perform outlier rejection
830
831
IOutlierRejector *rejector = outlierRejector_.get();
832
if (!dynamic_cast<NullOutlierRejector*>(rejector))
833
{
834
outlierRejector_->process(frame0.size(), hostPointsPrev_, hostPoints_, rejectionStatus_);
835
836
hostPointsPrevTmp_.clear();
837
hostPointsPrevTmp_.reserve(hostPoints_.cols);
838
839
hostPointsTmp_.clear();
840
hostPointsTmp_.reserve(hostPoints_.cols);
841
842
for (int i = 0; i < hostPoints_.cols; ++i)
843
{
844
if (rejectionStatus_[i])
845
{
846
hostPointsPrevTmp_.push_back(hostPointsPrev_.at<Point2f>(0,i));
847
hostPointsTmp_.push_back(hostPoints_.at<Point2f>(0,i));
848
}
849
}
850
851
hostPointsPrev_ = Mat(1, (int)hostPointsPrevTmp_.size(), CV_32FC2, &hostPointsPrevTmp_[0]);
852
hostPoints_ = Mat(1, (int)hostPointsTmp_.size(), CV_32FC2, &hostPointsTmp_[0]);
853
}
854
855
// estimate motion
856
return motionEstimator_->estimate(hostPointsPrev_, hostPoints_, ok);
857
}
858
859
#endif // defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW)
860
861
862
Mat getMotion(int from, int to, const std::vector<Mat> &motions)
863
{
864
CV_INSTRUMENT_REGION();
865
866
Mat M = Mat::eye(3, 3, CV_32F);
867
if (to > from)
868
{
869
for (int i = from; i < to; ++i)
870
M = at(i, motions) * M;
871
}
872
else if (from > to)
873
{
874
for (int i = to; i < from; ++i)
875
M = at(i, motions) * M;
876
M = M.inv();
877
}
878
return M;
879
}
880
881
} // namespace videostab
882
} // namespace cv
883
884