Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Tetragramm
GitHub Repository: Tetragramm/opencv
Path: blob/master/modules/features2d/src/kaze/KAZEFeatures.cpp
16337 views
1
2
//=============================================================================
3
//
4
// KAZE.cpp
5
// Author: Pablo F. Alcantarilla
6
// Institution: University d'Auvergne
7
// Address: Clermont Ferrand, France
8
// Date: 21/01/2012
9
// Email: [email protected]
10
//
11
// KAZE Features Copyright 2012, Pablo F. Alcantarilla
12
// All Rights Reserved
13
// See LICENSE for the license information
14
//=============================================================================
15
16
/**
17
* @file KAZEFeatures.cpp
18
* @brief Main class for detecting and describing features in a nonlinear
19
* scale space
20
* @date Jan 21, 2012
21
* @author Pablo F. Alcantarilla
22
*/
23
#include "../precomp.hpp"
24
#include "KAZEFeatures.h"
25
#include "utils.h"
26
27
namespace cv
28
{
29
30
// Namespaces
31
using namespace std;
32
33
/* ************************************************************************* */
34
/**
35
* @brief KAZE constructor with input options
36
* @param options KAZE configuration options
37
* @note The constructor allocates memory for the nonlinear scale space
38
*/
39
KAZEFeatures::KAZEFeatures(KAZEOptions& options)
40
: options_(options)
41
{
42
ncycles_ = 0;
43
reordering_ = true;
44
45
// Now allocate memory for the evolution
46
Allocate_Memory_Evolution();
47
}
48
49
/* ************************************************************************* */
50
/**
51
* @brief This method allocates the memory for the nonlinear diffusion evolution
52
*/
53
void KAZEFeatures::Allocate_Memory_Evolution(void) {
54
55
// Allocate the dimension of the matrices for the evolution
56
for (int i = 0; i <= options_.omax - 1; i++)
57
{
58
for (int j = 0; j <= options_.nsublevels - 1; j++)
59
{
60
TEvolution aux;
61
aux.Lx = Mat::zeros(options_.img_height, options_.img_width, CV_32F);
62
aux.Ly = Mat::zeros(options_.img_height, options_.img_width, CV_32F);
63
aux.Lxx = Mat::zeros(options_.img_height, options_.img_width, CV_32F);
64
aux.Lxy = Mat::zeros(options_.img_height, options_.img_width, CV_32F);
65
aux.Lyy = Mat::zeros(options_.img_height, options_.img_width, CV_32F);
66
aux.Lt = Mat::zeros(options_.img_height, options_.img_width, CV_32F);
67
aux.Lsmooth = Mat::zeros(options_.img_height, options_.img_width, CV_32F);
68
aux.Ldet = Mat::zeros(options_.img_height, options_.img_width, CV_32F);
69
aux.esigma = options_.soffset*pow((float)2.0f, (float)(j) / (float)(options_.nsublevels)+i);
70
aux.etime = 0.5f*(aux.esigma*aux.esigma);
71
aux.sigma_size = cvRound(aux.esigma);
72
aux.octave = i;
73
aux.sublevel = j;
74
evolution_.push_back(aux);
75
}
76
}
77
78
// Allocate memory for the FED number of cycles and time steps
79
for (size_t i = 1; i < evolution_.size(); i++)
80
{
81
int naux = 0;
82
vector<float> tau;
83
float ttime = 0.0;
84
ttime = evolution_[i].etime - evolution_[i - 1].etime;
85
naux = fed_tau_by_process_time(ttime, 1, 0.25f, reordering_, tau);
86
nsteps_.push_back(naux);
87
tsteps_.push_back(tau);
88
ncycles_++;
89
}
90
}
91
92
/* ************************************************************************* */
93
/**
94
* @brief This method creates the nonlinear scale space for a given image
95
* @param img Input image for which the nonlinear scale space needs to be created
96
* @return 0 if the nonlinear scale space was created successfully. -1 otherwise
97
*/
98
int KAZEFeatures::Create_Nonlinear_Scale_Space(const Mat &img)
99
{
100
CV_Assert(evolution_.size() > 0);
101
102
// Copy the original image to the first level of the evolution
103
img.copyTo(evolution_[0].Lt);
104
gaussian_2D_convolution(evolution_[0].Lt, evolution_[0].Lt, 0, 0, options_.soffset);
105
gaussian_2D_convolution(evolution_[0].Lt, evolution_[0].Lsmooth, 0, 0, options_.sderivatives);
106
107
// Firstly compute the kcontrast factor
108
Compute_KContrast(evolution_[0].Lt, options_.kcontrast_percentille);
109
110
// Allocate memory for the flow and step images
111
Mat Lflow = Mat::zeros(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32F);
112
Mat Lstep = Mat::zeros(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32F);
113
114
// Now generate the rest of evolution levels
115
for (size_t i = 1; i < evolution_.size(); i++)
116
{
117
evolution_[i - 1].Lt.copyTo(evolution_[i].Lt);
118
gaussian_2D_convolution(evolution_[i - 1].Lt, evolution_[i].Lsmooth, 0, 0, options_.sderivatives);
119
120
// Compute the Gaussian derivatives Lx and Ly
121
Scharr(evolution_[i].Lsmooth, evolution_[i].Lx, CV_32F, 1, 0, 1, 0, BORDER_DEFAULT);
122
Scharr(evolution_[i].Lsmooth, evolution_[i].Ly, CV_32F, 0, 1, 1, 0, BORDER_DEFAULT);
123
124
// Compute the conductivity equation
125
if (options_.diffusivity == KAZE::DIFF_PM_G1)
126
pm_g1(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast);
127
else if (options_.diffusivity == KAZE::DIFF_PM_G2)
128
pm_g2(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast);
129
else if (options_.diffusivity == KAZE::DIFF_WEICKERT)
130
weickert_diffusivity(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast);
131
132
// Perform FED n inner steps
133
for (int j = 0; j < nsteps_[i - 1]; j++)
134
nld_step_scalar(evolution_[i].Lt, Lflow, Lstep, tsteps_[i - 1][j]);
135
}
136
137
return 0;
138
}
139
140
/* ************************************************************************* */
141
/**
142
* @brief This method computes the k contrast factor
143
* @param img Input image
144
* @param kpercentile Percentile of the gradient histogram
145
*/
146
void KAZEFeatures::Compute_KContrast(const Mat &img, const float &kpercentile)
147
{
148
options_.kcontrast = compute_k_percentile(img, kpercentile, options_.sderivatives, options_.kcontrast_bins, 0, 0);
149
}
150
151
/* ************************************************************************* */
152
/**
153
* @brief This method computes the feature detector response for the nonlinear scale space
154
* @note We use the Hessian determinant as feature detector
155
*/
156
void KAZEFeatures::Compute_Detector_Response(void)
157
{
158
float lxx = 0.0, lxy = 0.0, lyy = 0.0;
159
160
// Firstly compute the multiscale derivatives
161
Compute_Multiscale_Derivatives();
162
163
for (size_t i = 0; i < evolution_.size(); i++)
164
{
165
for (int ix = 0; ix < options_.img_height; ix++)
166
{
167
for (int jx = 0; jx < options_.img_width; jx++)
168
{
169
lxx = *(evolution_[i].Lxx.ptr<float>(ix)+jx);
170
lxy = *(evolution_[i].Lxy.ptr<float>(ix)+jx);
171
lyy = *(evolution_[i].Lyy.ptr<float>(ix)+jx);
172
*(evolution_[i].Ldet.ptr<float>(ix)+jx) = (lxx*lyy - lxy*lxy);
173
}
174
}
175
}
176
}
177
178
/* ************************************************************************* */
179
/**
180
* @brief This method selects interesting keypoints through the nonlinear scale space
181
* @param kpts Vector of keypoints
182
*/
183
void KAZEFeatures::Feature_Detection(std::vector<KeyPoint>& kpts)
184
{
185
kpts.clear();
186
Compute_Detector_Response();
187
Determinant_Hessian(kpts);
188
Do_Subpixel_Refinement(kpts);
189
}
190
191
/* ************************************************************************* */
192
class MultiscaleDerivativesKAZEInvoker : public ParallelLoopBody
193
{
194
public:
195
explicit MultiscaleDerivativesKAZEInvoker(std::vector<TEvolution>& ev) : evolution_(&ev)
196
{
197
}
198
199
void operator()(const Range& range) const CV_OVERRIDE
200
{
201
std::vector<TEvolution>& evolution = *evolution_;
202
for (int i = range.start; i < range.end; i++)
203
{
204
compute_scharr_derivatives(evolution[i].Lsmooth, evolution[i].Lx, 1, 0, evolution[i].sigma_size);
205
compute_scharr_derivatives(evolution[i].Lsmooth, evolution[i].Ly, 0, 1, evolution[i].sigma_size);
206
compute_scharr_derivatives(evolution[i].Lx, evolution[i].Lxx, 1, 0, evolution[i].sigma_size);
207
compute_scharr_derivatives(evolution[i].Ly, evolution[i].Lyy, 0, 1, evolution[i].sigma_size);
208
compute_scharr_derivatives(evolution[i].Lx, evolution[i].Lxy, 0, 1, evolution[i].sigma_size);
209
210
evolution[i].Lx = evolution[i].Lx*((evolution[i].sigma_size));
211
evolution[i].Ly = evolution[i].Ly*((evolution[i].sigma_size));
212
evolution[i].Lxx = evolution[i].Lxx*((evolution[i].sigma_size)*(evolution[i].sigma_size));
213
evolution[i].Lxy = evolution[i].Lxy*((evolution[i].sigma_size)*(evolution[i].sigma_size));
214
evolution[i].Lyy = evolution[i].Lyy*((evolution[i].sigma_size)*(evolution[i].sigma_size));
215
}
216
}
217
218
private:
219
std::vector<TEvolution>* evolution_;
220
};
221
222
/* ************************************************************************* */
223
/**
224
* @brief This method computes the multiscale derivatives for the nonlinear scale space
225
*/
226
void KAZEFeatures::Compute_Multiscale_Derivatives(void)
227
{
228
parallel_for_(Range(0, (int)evolution_.size()),
229
MultiscaleDerivativesKAZEInvoker(evolution_));
230
}
231
232
233
/* ************************************************************************* */
234
class FindExtremumKAZEInvoker : public ParallelLoopBody
235
{
236
public:
237
explicit FindExtremumKAZEInvoker(std::vector<TEvolution>& ev, std::vector<std::vector<KeyPoint> >& kpts_par,
238
const KAZEOptions& options) : evolution_(&ev), kpts_par_(&kpts_par), options_(options)
239
{
240
}
241
242
void operator()(const Range& range) const CV_OVERRIDE
243
{
244
std::vector<TEvolution>& evolution = *evolution_;
245
std::vector<std::vector<KeyPoint> >& kpts_par = *kpts_par_;
246
for (int i = range.start; i < range.end; i++)
247
{
248
float value = 0.0;
249
bool is_extremum = false;
250
251
for (int ix = 1; ix < options_.img_height - 1; ix++)
252
{
253
for (int jx = 1; jx < options_.img_width - 1; jx++)
254
{
255
is_extremum = false;
256
value = *(evolution[i].Ldet.ptr<float>(ix)+jx);
257
258
// Filter the points with the detector threshold
259
if (value > options_.dthreshold)
260
{
261
if (value >= *(evolution[i].Ldet.ptr<float>(ix)+jx - 1))
262
{
263
// First check on the same scale
264
if (check_maximum_neighbourhood(evolution[i].Ldet, 1, value, ix, jx, 1))
265
{
266
// Now check on the lower scale
267
if (check_maximum_neighbourhood(evolution[i - 1].Ldet, 1, value, ix, jx, 0))
268
{
269
// Now check on the upper scale
270
if (check_maximum_neighbourhood(evolution[i + 1].Ldet, 1, value, ix, jx, 0))
271
is_extremum = true;
272
}
273
}
274
}
275
}
276
277
// Add the point of interest!!
278
if (is_extremum)
279
{
280
KeyPoint point;
281
point.pt.x = (float)jx;
282
point.pt.y = (float)ix;
283
point.response = fabs(value);
284
point.size = evolution[i].esigma;
285
point.octave = (int)evolution[i].octave;
286
point.class_id = i;
287
288
// We use the angle field for the sublevel value
289
// Then, we will replace this angle field with the main orientation
290
point.angle = static_cast<float>(evolution[i].sublevel);
291
kpts_par[i - 1].push_back(point);
292
}
293
}
294
}
295
}
296
}
297
298
private:
299
std::vector<TEvolution>* evolution_;
300
std::vector<std::vector<KeyPoint> >* kpts_par_;
301
KAZEOptions options_;
302
};
303
304
/* ************************************************************************* */
305
/**
306
* @brief This method performs the detection of keypoints by using the normalized
307
* score of the Hessian determinant through the nonlinear scale space
308
* @param kpts Vector of keypoints
309
* @note We compute features for each of the nonlinear scale space level in a different processing thread
310
*/
311
void KAZEFeatures::Determinant_Hessian(std::vector<KeyPoint>& kpts)
312
{
313
int level = 0;
314
float dist = 0.0, smax = 3.0;
315
int npoints = 0, id_repeated = 0;
316
int left_x = 0, right_x = 0, up_y = 0, down_y = 0;
317
bool is_extremum = false, is_repeated = false, is_out = false;
318
319
// Delete the memory of the vector of keypoints vectors
320
// In case we use the same kaze object for multiple images
321
for (size_t i = 0; i < kpts_par_.size(); i++) {
322
vector<KeyPoint>().swap(kpts_par_[i]);
323
}
324
kpts_par_.clear();
325
vector<KeyPoint> aux;
326
327
// Allocate memory for the vector of vectors
328
for (size_t i = 1; i < evolution_.size() - 1; i++) {
329
kpts_par_.push_back(aux);
330
}
331
332
parallel_for_(Range(1, (int)evolution_.size()-1),
333
FindExtremumKAZEInvoker(evolution_, kpts_par_, options_));
334
335
// Now fill the vector of keypoints!!!
336
for (int i = 0; i < (int)kpts_par_.size(); i++)
337
{
338
for (int j = 0; j < (int)kpts_par_[i].size(); j++)
339
{
340
level = i + 1;
341
is_extremum = true;
342
is_repeated = false;
343
is_out = false;
344
345
// Check in case we have the same point as maxima in previous evolution levels
346
for (int ik = 0; ik < (int)kpts.size(); ik++) {
347
if (kpts[ik].class_id == level || kpts[ik].class_id == level + 1 || kpts[ik].class_id == level - 1) {
348
dist = pow(kpts_par_[i][j].pt.x - kpts[ik].pt.x, 2) + pow(kpts_par_[i][j].pt.y - kpts[ik].pt.y, 2);
349
350
if (dist < evolution_[level].sigma_size*evolution_[level].sigma_size) {
351
if (kpts_par_[i][j].response > kpts[ik].response) {
352
id_repeated = ik;
353
is_repeated = true;
354
}
355
else {
356
is_extremum = false;
357
}
358
359
break;
360
}
361
}
362
}
363
364
if (is_extremum == true) {
365
// Check that the point is under the image limits for the descriptor computation
366
left_x = cvRound(kpts_par_[i][j].pt.x - smax*kpts_par_[i][j].size);
367
right_x = cvRound(kpts_par_[i][j].pt.x + smax*kpts_par_[i][j].size);
368
up_y = cvRound(kpts_par_[i][j].pt.y - smax*kpts_par_[i][j].size);
369
down_y = cvRound(kpts_par_[i][j].pt.y + smax*kpts_par_[i][j].size);
370
371
if (left_x < 0 || right_x >= evolution_[level].Ldet.cols ||
372
up_y < 0 || down_y >= evolution_[level].Ldet.rows) {
373
is_out = true;
374
}
375
376
if (is_out == false) {
377
if (is_repeated == false) {
378
kpts.push_back(kpts_par_[i][j]);
379
npoints++;
380
}
381
else {
382
kpts[id_repeated] = kpts_par_[i][j];
383
}
384
}
385
}
386
}
387
}
388
}
389
390
/* ************************************************************************* */
391
/**
392
* @brief This method performs subpixel refinement of the detected keypoints
393
* @param kpts Vector of detected keypoints
394
*/
395
void KAZEFeatures::Do_Subpixel_Refinement(std::vector<KeyPoint> &kpts) {
396
397
int step = 1;
398
int x = 0, y = 0;
399
float Dx = 0.0, Dy = 0.0, Ds = 0.0, dsc = 0.0;
400
float Dxx = 0.0, Dyy = 0.0, Dss = 0.0, Dxy = 0.0, Dxs = 0.0, Dys = 0.0;
401
Mat A = Mat::zeros(3, 3, CV_32F);
402
Mat b = Mat::zeros(3, 1, CV_32F);
403
Mat dst = Mat::zeros(3, 1, CV_32F);
404
405
vector<KeyPoint> kpts_(kpts);
406
407
for (size_t i = 0; i < kpts_.size(); i++) {
408
409
x = static_cast<int>(kpts_[i].pt.x);
410
y = static_cast<int>(kpts_[i].pt.y);
411
412
// Compute the gradient
413
Dx = (1.0f / (2.0f*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y)+x + step)
414
- *(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y)+x - step));
415
Dy = (1.0f / (2.0f*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y + step) + x)
416
- *(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y - step) + x));
417
Ds = 0.5f*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr<float>(y)+x)
418
- *(evolution_[kpts_[i].class_id - 1].Ldet.ptr<float>(y)+x));
419
420
// Compute the Hessian
421
Dxx = (1.0f / (step*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y)+x + step)
422
+ *(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y)+x - step)
423
- 2.0f*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y)+x)));
424
425
Dyy = (1.0f / (step*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y + step) + x)
426
+ *(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y - step) + x)
427
- 2.0f*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y)+x)));
428
429
Dss = *(evolution_[kpts_[i].class_id + 1].Ldet.ptr<float>(y)+x)
430
+ *(evolution_[kpts_[i].class_id - 1].Ldet.ptr<float>(y)+x)
431
- 2.0f*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y)+x));
432
433
Dxy = (1.0f / (4.0f*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y + step) + x + step)
434
+ (*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y - step) + x - step)))
435
- (1.0f / (4.0f*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y - step) + x + step)
436
+ (*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y + step) + x - step)));
437
438
Dxs = (1.0f / (4.0f*step))*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr<float>(y)+x + step)
439
+ (*(evolution_[kpts_[i].class_id - 1].Ldet.ptr<float>(y)+x - step)))
440
- (1.0f / (4.0f*step))*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr<float>(y)+x - step)
441
+ (*(evolution_[kpts_[i].class_id - 1].Ldet.ptr<float>(y)+x + step)));
442
443
Dys = (1.0f / (4.0f*step))*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr<float>(y + step) + x)
444
+ (*(evolution_[kpts_[i].class_id - 1].Ldet.ptr<float>(y - step) + x)))
445
- (1.0f / (4.0f*step))*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr<float>(y - step) + x)
446
+ (*(evolution_[kpts_[i].class_id - 1].Ldet.ptr<float>(y + step) + x)));
447
448
// Solve the linear system
449
*(A.ptr<float>(0)) = Dxx;
450
*(A.ptr<float>(1) + 1) = Dyy;
451
*(A.ptr<float>(2) + 2) = Dss;
452
453
*(A.ptr<float>(0) + 1) = *(A.ptr<float>(1)) = Dxy;
454
*(A.ptr<float>(0) + 2) = *(A.ptr<float>(2)) = Dxs;
455
*(A.ptr<float>(1) + 2) = *(A.ptr<float>(2) + 1) = Dys;
456
457
*(b.ptr<float>(0)) = -Dx;
458
*(b.ptr<float>(1)) = -Dy;
459
*(b.ptr<float>(2)) = -Ds;
460
461
solve(A, b, dst, DECOMP_LU);
462
463
if (fabs(*(dst.ptr<float>(0))) <= 1.0f && fabs(*(dst.ptr<float>(1))) <= 1.0f && fabs(*(dst.ptr<float>(2))) <= 1.0f) {
464
kpts_[i].pt.x += *(dst.ptr<float>(0));
465
kpts_[i].pt.y += *(dst.ptr<float>(1));
466
dsc = kpts_[i].octave + (kpts_[i].angle + *(dst.ptr<float>(2))) / ((float)(options_.nsublevels));
467
468
// In OpenCV the size of a keypoint is the diameter!!
469
kpts_[i].size = 2.0f*options_.soffset*pow((float)2.0f, dsc);
470
kpts_[i].angle = 0.0;
471
}
472
// Set the points to be deleted after the for loop
473
else {
474
kpts_[i].response = -1;
475
}
476
}
477
478
// Clear the vector of keypoints
479
kpts.clear();
480
481
for (size_t i = 0; i < kpts_.size(); i++) {
482
if (kpts_[i].response != -1) {
483
kpts.push_back(kpts_[i]);
484
}
485
}
486
}
487
488
/* ************************************************************************* */
489
class KAZE_Descriptor_Invoker : public ParallelLoopBody
490
{
491
public:
492
KAZE_Descriptor_Invoker(std::vector<KeyPoint> &kpts, Mat &desc, std::vector<TEvolution>& evolution, const KAZEOptions& options)
493
: kpts_(&kpts)
494
, desc_(&desc)
495
, evolution_(&evolution)
496
, options_(options)
497
{
498
}
499
500
virtual ~KAZE_Descriptor_Invoker()
501
{
502
}
503
504
void operator() (const Range& range) const CV_OVERRIDE
505
{
506
std::vector<KeyPoint> &kpts = *kpts_;
507
Mat &desc = *desc_;
508
std::vector<TEvolution> &evolution = *evolution_;
509
510
for (int i = range.start; i < range.end; i++)
511
{
512
kpts[i].angle = 0.0;
513
if (options_.upright)
514
{
515
kpts[i].angle = 0.0;
516
if (options_.extended)
517
Get_KAZE_Upright_Descriptor_128(kpts[i], desc.ptr<float>((int)i));
518
else
519
Get_KAZE_Upright_Descriptor_64(kpts[i], desc.ptr<float>((int)i));
520
}
521
else
522
{
523
KAZEFeatures::Compute_Main_Orientation(kpts[i], evolution, options_);
524
525
if (options_.extended)
526
Get_KAZE_Descriptor_128(kpts[i], desc.ptr<float>((int)i));
527
else
528
Get_KAZE_Descriptor_64(kpts[i], desc.ptr<float>((int)i));
529
}
530
}
531
}
532
private:
533
void Get_KAZE_Upright_Descriptor_64(const KeyPoint& kpt, float* desc) const;
534
void Get_KAZE_Descriptor_64(const KeyPoint& kpt, float* desc) const;
535
void Get_KAZE_Upright_Descriptor_128(const KeyPoint& kpt, float* desc) const;
536
void Get_KAZE_Descriptor_128(const KeyPoint& kpt, float *desc) const;
537
538
std::vector<KeyPoint> * kpts_;
539
Mat * desc_;
540
std::vector<TEvolution> * evolution_;
541
KAZEOptions options_;
542
};
543
544
/* ************************************************************************* */
545
/**
546
* @brief This method computes the set of descriptors through the nonlinear scale space
547
* @param kpts Vector of keypoints
548
* @param desc Matrix with the feature descriptors
549
*/
550
void KAZEFeatures::Feature_Description(std::vector<KeyPoint> &kpts, Mat &desc)
551
{
552
for(size_t i = 0; i < kpts.size(); i++)
553
{
554
CV_Assert(0 <= kpts[i].class_id && kpts[i].class_id < static_cast<int>(evolution_.size()));
555
}
556
557
// Allocate memory for the matrix of descriptors
558
if (options_.extended == true) {
559
desc = Mat::zeros((int)kpts.size(), 128, CV_32FC1);
560
}
561
else {
562
desc = Mat::zeros((int)kpts.size(), 64, CV_32FC1);
563
}
564
565
parallel_for_(Range(0, (int)kpts.size()), KAZE_Descriptor_Invoker(kpts, desc, evolution_, options_));
566
}
567
568
/* ************************************************************************* */
569
/**
570
* @brief This method computes the main orientation for a given keypoint
571
* @param kpt Input keypoint
572
* @note The orientation is computed using a similar approach as described in the
573
* original SURF method. See Bay et al., Speeded Up Robust Features, ECCV 2006
574
*/
575
void KAZEFeatures::Compute_Main_Orientation(KeyPoint &kpt, const std::vector<TEvolution>& evolution_, const KAZEOptions& options)
576
{
577
int ix = 0, iy = 0, idx = 0, s = 0, level = 0;
578
float xf = 0.0, yf = 0.0, gweight = 0.0;
579
vector<float> resX(109), resY(109), Ang(109);
580
581
// Variables for computing the dominant direction
582
float sumX = 0.0, sumY = 0.0, max = 0.0, ang1 = 0.0, ang2 = 0.0;
583
584
// Get the information from the keypoint
585
xf = kpt.pt.x;
586
yf = kpt.pt.y;
587
level = kpt.class_id;
588
s = cvRound(kpt.size / 2.0f);
589
590
// Calculate derivatives responses for points within radius of 6*scale
591
for (int i = -6; i <= 6; ++i) {
592
for (int j = -6; j <= 6; ++j) {
593
if (i*i + j*j < 36) {
594
iy = cvRound(yf + j*s);
595
ix = cvRound(xf + i*s);
596
597
if (iy >= 0 && iy < options.img_height && ix >= 0 && ix < options.img_width) {
598
gweight = gaussian(iy - yf, ix - xf, 2.5f*s);
599
resX[idx] = gweight*(*(evolution_[level].Lx.ptr<float>(iy)+ix));
600
resY[idx] = gweight*(*(evolution_[level].Ly.ptr<float>(iy)+ix));
601
}
602
else {
603
resX[idx] = 0.0;
604
resY[idx] = 0.0;
605
}
606
607
Ang[idx] = fastAtan2(resY[idx], resX[idx]) * (float)(CV_PI / 180.0f);
608
++idx;
609
}
610
}
611
}
612
613
// Loop slides pi/3 window around feature point
614
for (ang1 = 0; ang1 < 2.0f*CV_PI; ang1 += 0.15f) {
615
ang2 = (ang1 + (float)(CV_PI / 3.0) > (float)(2.0*CV_PI) ? ang1 - (float)(5.0*CV_PI / 3.0) : ang1 + (float)(CV_PI / 3.0));
616
sumX = sumY = 0.f;
617
618
for (size_t k = 0; k < Ang.size(); ++k) {
619
// Get angle from the x-axis of the sample point
620
const float & ang = Ang[k];
621
622
// Determine whether the point is within the window
623
if (ang1 < ang2 && ang1 < ang && ang < ang2) {
624
sumX += resX[k];
625
sumY += resY[k];
626
}
627
else if (ang2 < ang1 &&
628
((ang > 0 && ang < ang2) || (ang > ang1 && ang < (float)(2.0*CV_PI)))) {
629
sumX += resX[k];
630
sumY += resY[k];
631
}
632
}
633
634
// if the vector produced from this window is longer than all
635
// previous vectors then this forms the new dominant direction
636
if (sumX*sumX + sumY*sumY > max) {
637
// store largest orientation
638
max = sumX*sumX + sumY*sumY;
639
kpt.angle = fastAtan2(sumY, sumX);
640
}
641
}
642
}
643
644
/* ************************************************************************* */
645
/**
646
* @brief This method computes the upright descriptor (not rotation invariant) of
647
* the provided keypoint
648
* @param kpt Input keypoint
649
* @param desc Descriptor vector
650
* @note Rectangular grid of 24 s x 24 s. Descriptor Length 64. The descriptor is inspired
651
* from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching,
652
* ECCV 2008
653
*/
654
void KAZE_Descriptor_Invoker::Get_KAZE_Upright_Descriptor_64(const KeyPoint &kpt, float *desc) const
655
{
656
float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0;
657
float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0;
658
float sample_x = 0.0, sample_y = 0.0;
659
int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0;
660
int x2 = 0, y2 = 0, kx = 0, ky = 0, i = 0, j = 0, dcount = 0;
661
float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0;
662
int dsize = 0, scale = 0, level = 0;
663
664
std::vector<TEvolution>& evolution = *evolution_;
665
666
// Subregion centers for the 4x4 gaussian weighting
667
float cx = -0.5f, cy = 0.5f;
668
669
// Set the descriptor size and the sample and pattern sizes
670
dsize = 64;
671
sample_step = 5;
672
pattern_size = 12;
673
674
// Get the information from the keypoint
675
yf = kpt.pt.y;
676
xf = kpt.pt.x;
677
scale = cvRound(kpt.size / 2.0f);
678
level = kpt.class_id;
679
680
i = -8;
681
682
// Calculate descriptor for this interest point
683
// Area of size 24 s x 24 s
684
while (i < pattern_size) {
685
j = -8;
686
i = i - 4;
687
688
cx += 1.0f;
689
cy = -0.5f;
690
691
while (j < pattern_size) {
692
693
dx = dy = mdx = mdy = 0.0;
694
cy += 1.0f;
695
j = j - 4;
696
697
ky = i + sample_step;
698
kx = j + sample_step;
699
700
ys = yf + (ky*scale);
701
xs = xf + (kx*scale);
702
703
for (int k = i; k < i + 9; k++) {
704
for (int l = j; l < j + 9; l++) {
705
706
sample_y = k*scale + yf;
707
sample_x = l*scale + xf;
708
709
//Get the gaussian weighted x and y responses
710
gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5f*scale);
711
712
y1 = (int)(sample_y - 0.5f);
713
x1 = (int)(sample_x - 0.5f);
714
715
checkDescriptorLimits(x1, y1, options_.img_width, options_.img_height);
716
717
y2 = (int)(sample_y + 0.5f);
718
x2 = (int)(sample_x + 0.5f);
719
720
checkDescriptorLimits(x2, y2, options_.img_width, options_.img_height);
721
722
fx = sample_x - x1;
723
fy = sample_y - y1;
724
725
res1 = *(evolution[level].Lx.ptr<float>(y1)+x1);
726
res2 = *(evolution[level].Lx.ptr<float>(y1)+x2);
727
res3 = *(evolution[level].Lx.ptr<float>(y2)+x1);
728
res4 = *(evolution[level].Lx.ptr<float>(y2)+x2);
729
rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
730
731
res1 = *(evolution[level].Ly.ptr<float>(y1)+x1);
732
res2 = *(evolution[level].Ly.ptr<float>(y1)+x2);
733
res3 = *(evolution[level].Ly.ptr<float>(y2)+x1);
734
res4 = *(evolution[level].Ly.ptr<float>(y2)+x2);
735
ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
736
737
rx = gauss_s1*rx;
738
ry = gauss_s1*ry;
739
740
// Sum the derivatives to the cumulative descriptor
741
dx += rx;
742
dy += ry;
743
mdx += fabs(rx);
744
mdy += fabs(ry);
745
}
746
}
747
748
// Add the values to the descriptor vector
749
gauss_s2 = gaussian(cx - 2.0f, cy - 2.0f, 1.5f);
750
751
desc[dcount++] = dx*gauss_s2;
752
desc[dcount++] = dy*gauss_s2;
753
desc[dcount++] = mdx*gauss_s2;
754
desc[dcount++] = mdy*gauss_s2;
755
756
len += (dx*dx + dy*dy + mdx*mdx + mdy*mdy)*gauss_s2*gauss_s2;
757
758
j += 9;
759
}
760
761
i += 9;
762
}
763
764
// convert to unit vector
765
len = sqrt(len);
766
767
for (i = 0; i < dsize; i++) {
768
desc[i] /= len;
769
}
770
}
771
772
/* ************************************************************************* */
773
/**
774
* @brief This method computes the descriptor of the provided keypoint given the
775
* main orientation of the keypoint
776
* @param kpt Input keypoint
777
* @param desc Descriptor vector
778
* @note Rectangular grid of 24 s x 24 s. Descriptor Length 64. The descriptor is inspired
779
* from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching,
780
* ECCV 2008
781
*/
782
void KAZE_Descriptor_Invoker::Get_KAZE_Descriptor_64(const KeyPoint &kpt, float *desc) const
783
{
784
float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0;
785
float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0;
786
float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0;
787
float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0;
788
int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0;
789
int kx = 0, ky = 0, i = 0, j = 0, dcount = 0;
790
int dsize = 0, scale = 0, level = 0;
791
792
std::vector<TEvolution>& evolution = *evolution_;
793
794
// Subregion centers for the 4x4 gaussian weighting
795
float cx = -0.5f, cy = 0.5f;
796
797
// Set the descriptor size and the sample and pattern sizes
798
dsize = 64;
799
sample_step = 5;
800
pattern_size = 12;
801
802
// Get the information from the keypoint
803
yf = kpt.pt.y;
804
xf = kpt.pt.x;
805
scale = cvRound(kpt.size / 2.0f);
806
angle = kpt.angle * static_cast<float>(CV_PI / 180.f);
807
level = kpt.class_id;
808
co = cos(angle);
809
si = sin(angle);
810
811
i = -8;
812
813
// Calculate descriptor for this interest point
814
// Area of size 24 s x 24 s
815
while (i < pattern_size) {
816
817
j = -8;
818
i = i - 4;
819
820
cx += 1.0f;
821
cy = -0.5f;
822
823
while (j < pattern_size) {
824
825
dx = dy = mdx = mdy = 0.0;
826
cy += 1.0f;
827
j = j - 4;
828
829
ky = i + sample_step;
830
kx = j + sample_step;
831
832
xs = xf + (-kx*scale*si + ky*scale*co);
833
ys = yf + (kx*scale*co + ky*scale*si);
834
835
for (int k = i; k < i + 9; ++k) {
836
for (int l = j; l < j + 9; ++l) {
837
838
// Get coords of sample point on the rotated axis
839
sample_y = yf + (l*scale*co + k*scale*si);
840
sample_x = xf + (-l*scale*si + k*scale*co);
841
842
// Get the gaussian weighted x and y responses
843
gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5f*scale);
844
y1 = cvFloor(sample_y);
845
x1 = cvFloor(sample_x);
846
847
checkDescriptorLimits(x1, y1, options_.img_width, options_.img_height);
848
849
y2 = y1 + 1;
850
x2 = x1 + 1;
851
852
checkDescriptorLimits(x2, y2, options_.img_width, options_.img_height);
853
854
fx = sample_x - x1;
855
fy = sample_y - y1;
856
857
res1 = *(evolution[level].Lx.ptr<float>(y1)+x1);
858
res2 = *(evolution[level].Lx.ptr<float>(y1)+x2);
859
res3 = *(evolution[level].Lx.ptr<float>(y2)+x1);
860
res4 = *(evolution[level].Lx.ptr<float>(y2)+x2);
861
rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
862
863
res1 = *(evolution[level].Ly.ptr<float>(y1)+x1);
864
res2 = *(evolution[level].Ly.ptr<float>(y1)+x2);
865
res3 = *(evolution[level].Ly.ptr<float>(y2)+x1);
866
res4 = *(evolution[level].Ly.ptr<float>(y2)+x2);
867
ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
868
869
// Get the x and y derivatives on the rotated axis
870
rry = gauss_s1*(rx*co + ry*si);
871
rrx = gauss_s1*(-rx*si + ry*co);
872
873
// Sum the derivatives to the cumulative descriptor
874
dx += rrx;
875
dy += rry;
876
mdx += fabs(rrx);
877
mdy += fabs(rry);
878
}
879
}
880
881
// Add the values to the descriptor vector
882
gauss_s2 = gaussian(cx - 2.0f, cy - 2.0f, 1.5f);
883
desc[dcount++] = dx*gauss_s2;
884
desc[dcount++] = dy*gauss_s2;
885
desc[dcount++] = mdx*gauss_s2;
886
desc[dcount++] = mdy*gauss_s2;
887
len += (dx*dx + dy*dy + mdx*mdx + mdy*mdy)*gauss_s2*gauss_s2;
888
j += 9;
889
}
890
i += 9;
891
}
892
893
// convert to unit vector
894
len = sqrt(len);
895
896
for (i = 0; i < dsize; i++) {
897
desc[i] /= len;
898
}
899
}
900
901
/* ************************************************************************* */
902
/**
903
* @brief This method computes the extended upright descriptor (not rotation invariant) of
904
* the provided keypoint
905
* @param kpt Input keypoint
906
* @param desc Descriptor vector
907
* @note Rectangular grid of 24 s x 24 s. Descriptor Length 128. The descriptor is inspired
908
* from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching,
909
* ECCV 2008
910
*/
911
void KAZE_Descriptor_Invoker::Get_KAZE_Upright_Descriptor_128(const KeyPoint &kpt, float *desc) const
912
{
913
float gauss_s1 = 0.0, gauss_s2 = 0.0;
914
float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0;
915
float sample_x = 0.0, sample_y = 0.0;
916
int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0;
917
int x2 = 0, y2 = 0, kx = 0, ky = 0, i = 0, j = 0, dcount = 0;
918
float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0;
919
float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0;
920
float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0;
921
int dsize = 0, scale = 0, level = 0;
922
923
// Subregion centers for the 4x4 gaussian weighting
924
float cx = -0.5f, cy = 0.5f;
925
926
std::vector<TEvolution>& evolution = *evolution_;
927
928
// Set the descriptor size and the sample and pattern sizes
929
dsize = 128;
930
sample_step = 5;
931
pattern_size = 12;
932
933
// Get the information from the keypoint
934
yf = kpt.pt.y;
935
xf = kpt.pt.x;
936
scale = cvRound(kpt.size / 2.0f);
937
level = kpt.class_id;
938
939
i = -8;
940
941
// Calculate descriptor for this interest point
942
// Area of size 24 s x 24 s
943
while (i < pattern_size) {
944
945
j = -8;
946
i = i - 4;
947
948
cx += 1.0f;
949
cy = -0.5f;
950
951
while (j < pattern_size) {
952
953
dxp = dxn = mdxp = mdxn = 0.0;
954
dyp = dyn = mdyp = mdyn = 0.0;
955
956
cy += 1.0f;
957
j = j - 4;
958
959
ky = i + sample_step;
960
kx = j + sample_step;
961
962
ys = yf + (ky*scale);
963
xs = xf + (kx*scale);
964
965
for (int k = i; k < i + 9; k++) {
966
for (int l = j; l < j + 9; l++) {
967
968
sample_y = k*scale + yf;
969
sample_x = l*scale + xf;
970
971
//Get the gaussian weighted x and y responses
972
gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5f*scale);
973
974
y1 = (int)(sample_y - 0.5f);
975
x1 = (int)(sample_x - 0.5f);
976
977
checkDescriptorLimits(x1, y1, options_.img_width, options_.img_height);
978
979
y2 = (int)(sample_y + 0.5f);
980
x2 = (int)(sample_x + 0.5f);
981
982
checkDescriptorLimits(x2, y2, options_.img_width, options_.img_height);
983
984
fx = sample_x - x1;
985
fy = sample_y - y1;
986
987
res1 = *(evolution[level].Lx.ptr<float>(y1)+x1);
988
res2 = *(evolution[level].Lx.ptr<float>(y1)+x2);
989
res3 = *(evolution[level].Lx.ptr<float>(y2)+x1);
990
res4 = *(evolution[level].Lx.ptr<float>(y2)+x2);
991
rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
992
993
res1 = *(evolution[level].Ly.ptr<float>(y1)+x1);
994
res2 = *(evolution[level].Ly.ptr<float>(y1)+x2);
995
res3 = *(evolution[level].Ly.ptr<float>(y2)+x1);
996
res4 = *(evolution[level].Ly.ptr<float>(y2)+x2);
997
ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
998
999
rx = gauss_s1*rx;
1000
ry = gauss_s1*ry;
1001
1002
// Sum the derivatives to the cumulative descriptor
1003
if (ry >= 0.0) {
1004
dxp += rx;
1005
mdxp += fabs(rx);
1006
}
1007
else {
1008
dxn += rx;
1009
mdxn += fabs(rx);
1010
}
1011
1012
if (rx >= 0.0) {
1013
dyp += ry;
1014
mdyp += fabs(ry);
1015
}
1016
else {
1017
dyn += ry;
1018
mdyn += fabs(ry);
1019
}
1020
}
1021
}
1022
1023
// Add the values to the descriptor vector
1024
gauss_s2 = gaussian(cx - 2.0f, cy - 2.0f, 1.5f);
1025
1026
desc[dcount++] = dxp*gauss_s2;
1027
desc[dcount++] = dxn*gauss_s2;
1028
desc[dcount++] = mdxp*gauss_s2;
1029
desc[dcount++] = mdxn*gauss_s2;
1030
desc[dcount++] = dyp*gauss_s2;
1031
desc[dcount++] = dyn*gauss_s2;
1032
desc[dcount++] = mdyp*gauss_s2;
1033
desc[dcount++] = mdyn*gauss_s2;
1034
1035
// Store the current length^2 of the vector
1036
len += (dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn +
1037
dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn)*gauss_s2*gauss_s2;
1038
1039
j += 9;
1040
}
1041
1042
i += 9;
1043
}
1044
1045
// convert to unit vector
1046
len = sqrt(len);
1047
1048
for (i = 0; i < dsize; i++) {
1049
desc[i] /= len;
1050
}
1051
}
1052
1053
/* ************************************************************************* */
1054
/**
1055
* @brief This method computes the extended G-SURF descriptor of the provided keypoint
1056
* given the main orientation of the keypoint
1057
* @param kpt Input keypoint
1058
* @param desc Descriptor vector
1059
* @note Rectangular grid of 24 s x 24 s. Descriptor Length 128. The descriptor is inspired
1060
* from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching,
1061
* ECCV 2008
1062
*/
1063
void KAZE_Descriptor_Invoker::Get_KAZE_Descriptor_128(const KeyPoint &kpt, float *desc) const
1064
{
1065
float gauss_s1 = 0.0, gauss_s2 = 0.0;
1066
float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0;
1067
float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0;
1068
float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0;
1069
float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0;
1070
float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0;
1071
int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0;
1072
int kx = 0, ky = 0, i = 0, j = 0, dcount = 0;
1073
int dsize = 0, scale = 0, level = 0;
1074
1075
std::vector<TEvolution>& evolution = *evolution_;
1076
1077
// Subregion centers for the 4x4 gaussian weighting
1078
float cx = -0.5f, cy = 0.5f;
1079
1080
// Set the descriptor size and the sample and pattern sizes
1081
dsize = 128;
1082
sample_step = 5;
1083
pattern_size = 12;
1084
1085
// Get the information from the keypoint
1086
yf = kpt.pt.y;
1087
xf = kpt.pt.x;
1088
scale = cvRound(kpt.size / 2.0f);
1089
angle = kpt.angle * static_cast<float>(CV_PI / 180.f);
1090
level = kpt.class_id;
1091
co = cos(angle);
1092
si = sin(angle);
1093
1094
i = -8;
1095
1096
// Calculate descriptor for this interest point
1097
// Area of size 24 s x 24 s
1098
while (i < pattern_size) {
1099
1100
j = -8;
1101
i = i - 4;
1102
1103
cx += 1.0f;
1104
cy = -0.5f;
1105
1106
while (j < pattern_size) {
1107
1108
dxp = dxn = mdxp = mdxn = 0.0;
1109
dyp = dyn = mdyp = mdyn = 0.0;
1110
1111
cy += 1.0f;
1112
j = j - 4;
1113
1114
ky = i + sample_step;
1115
kx = j + sample_step;
1116
1117
xs = xf + (-kx*scale*si + ky*scale*co);
1118
ys = yf + (kx*scale*co + ky*scale*si);
1119
1120
for (int k = i; k < i + 9; ++k) {
1121
for (int l = j; l < j + 9; ++l) {
1122
1123
// Get coords of sample point on the rotated axis
1124
sample_y = yf + (l*scale*co + k*scale*si);
1125
sample_x = xf + (-l*scale*si + k*scale*co);
1126
1127
// Get the gaussian weighted x and y responses
1128
gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5f*scale);
1129
1130
y1 = cvFloor(sample_y);
1131
x1 = cvFloor(sample_x);
1132
1133
checkDescriptorLimits(x1, y1, options_.img_width, options_.img_height);
1134
1135
y2 = y1 + 1;
1136
x2 = x1 + 1;
1137
1138
checkDescriptorLimits(x2, y2, options_.img_width, options_.img_height);
1139
1140
fx = sample_x - x1;
1141
fy = sample_y - y1;
1142
1143
res1 = *(evolution[level].Lx.ptr<float>(y1)+x1);
1144
res2 = *(evolution[level].Lx.ptr<float>(y1)+x2);
1145
res3 = *(evolution[level].Lx.ptr<float>(y2)+x1);
1146
res4 = *(evolution[level].Lx.ptr<float>(y2)+x2);
1147
rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
1148
1149
res1 = *(evolution[level].Ly.ptr<float>(y1)+x1);
1150
res2 = *(evolution[level].Ly.ptr<float>(y1)+x2);
1151
res3 = *(evolution[level].Ly.ptr<float>(y2)+x1);
1152
res4 = *(evolution[level].Ly.ptr<float>(y2)+x2);
1153
ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
1154
1155
// Get the x and y derivatives on the rotated axis
1156
rry = gauss_s1*(rx*co + ry*si);
1157
rrx = gauss_s1*(-rx*si + ry*co);
1158
1159
// Sum the derivatives to the cumulative descriptor
1160
if (rry >= 0.0) {
1161
dxp += rrx;
1162
mdxp += fabs(rrx);
1163
}
1164
else {
1165
dxn += rrx;
1166
mdxn += fabs(rrx);
1167
}
1168
1169
if (rrx >= 0.0) {
1170
dyp += rry;
1171
mdyp += fabs(rry);
1172
}
1173
else {
1174
dyn += rry;
1175
mdyn += fabs(rry);
1176
}
1177
}
1178
}
1179
1180
// Add the values to the descriptor vector
1181
gauss_s2 = gaussian(cx - 2.0f, cy - 2.0f, 1.5f);
1182
1183
desc[dcount++] = dxp*gauss_s2;
1184
desc[dcount++] = dxn*gauss_s2;
1185
desc[dcount++] = mdxp*gauss_s2;
1186
desc[dcount++] = mdxn*gauss_s2;
1187
desc[dcount++] = dyp*gauss_s2;
1188
desc[dcount++] = dyn*gauss_s2;
1189
desc[dcount++] = mdyp*gauss_s2;
1190
desc[dcount++] = mdyn*gauss_s2;
1191
1192
// Store the current length^2 of the vector
1193
len += (dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn +
1194
dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn)*gauss_s2*gauss_s2;
1195
1196
j += 9;
1197
}
1198
1199
i += 9;
1200
}
1201
1202
// convert to unit vector
1203
len = sqrt(len);
1204
1205
for (i = 0; i < dsize; i++) {
1206
desc[i] /= len;
1207
}
1208
}
1209
1210
}
1211
1212