Path: blob/master/modules/features2d/src/kaze/AKAZEFeatures.cpp
16337 views
/**1* @file AKAZEFeatures.cpp2* @brief Main class for detecting and describing binary features in an3* accelerated nonlinear scale space4* @date Sep 15, 20135* @author Pablo F. Alcantarilla, Jesus Nuevo6*/78#include "../precomp.hpp"9#include "AKAZEFeatures.h"10#include "fed.h"11#include "nldiffusion_functions.h"12#include "utils.h"13#include "opencl_kernels_features2d.hpp"1415#include <iostream>1617// Namespaces18namespace cv19{20using namespace std;2122/* ************************************************************************* */23/**24* @brief AKAZEFeatures constructor with input options25* @param options AKAZEFeatures configuration options26* @note This constructor allocates memory for the nonlinear scale space27*/28AKAZEFeatures::AKAZEFeatures(const AKAZEOptions& options) : options_(options) {2930ncycles_ = 0;31reordering_ = true;3233if (options_.descriptor_size > 0 && options_.descriptor >= AKAZE::DESCRIPTOR_MLDB_UPRIGHT) {34generateDescriptorSubsample(descriptorSamples_, descriptorBits_, options_.descriptor_size,35options_.descriptor_pattern_size, options_.descriptor_channels);36}3738Allocate_Memory_Evolution();39}4041/* ************************************************************************* */42/**43* @brief This method allocates the memory for the nonlinear diffusion evolution44*/45void AKAZEFeatures::Allocate_Memory_Evolution(void) {46CV_INSTRUMENT_REGION();4748float rfactor = 0.0f;49int level_height = 0, level_width = 0;5051// maximum size of the area for the descriptor computation52float smax = 0.0;53if (options_.descriptor == AKAZE::DESCRIPTOR_MLDB_UPRIGHT || options_.descriptor == AKAZE::DESCRIPTOR_MLDB) {54smax = 10.0f*sqrtf(2.0f);55}56else if (options_.descriptor == AKAZE::DESCRIPTOR_KAZE_UPRIGHT || options_.descriptor == AKAZE::DESCRIPTOR_KAZE) {57smax = 12.0f*sqrtf(2.0f);58}5960// Allocate the dimension of the matrices for the evolution61for (int i = 0, power = 1; i <= options_.omax - 1; i++, power *= 2) {62rfactor = 1.0f / power;63level_height = (int)(options_.img_height*rfactor);64level_width = (int)(options_.img_width*rfactor);6566// Smallest possible octave and allow one scale if the image is small67if ((level_width < 80 || level_height < 40) && i != 0) {68options_.omax = i;69break;70}7172for (int j = 0; j < options_.nsublevels; j++) {73MEvolution step;74step.size = Size(level_width, level_height);75step.esigma = options_.soffset*pow(2.f, (float)(j) / (float)(options_.nsublevels) + i);76step.sigma_size = cvRound(step.esigma * options_.derivative_factor / power); // In fact sigma_size only depends on j77step.etime = 0.5f * (step.esigma * step.esigma);78step.octave = i;79step.sublevel = j;80step.octave_ratio = (float)power;81step.border = cvRound(smax * step.sigma_size) + 1;8283evolution_.push_back(step);84}85}8687// Allocate memory for the number of cycles and time steps88for (size_t i = 1; i < evolution_.size(); i++) {89int naux = 0;90vector<float> tau;91float ttime = 0.0f;92ttime = evolution_[i].etime - evolution_[i - 1].etime;93naux = fed_tau_by_process_time(ttime, 1, 0.25f, reordering_, tau);94nsteps_.push_back(naux);95tsteps_.push_back(tau);96ncycles_++;97}98}99100/* ************************************************************************* */101/**102* @brief Computes kernel size for Gaussian smoothing if the image103* @param sigma Kernel standard deviation104* @returns kernel size105*/106static inline int getGaussianKernelSize(float sigma) {107// Compute an appropriate kernel size according to the specified sigma108int ksize = (int)cvCeil(2.0f*(1.0f + (sigma - 0.8f) / (0.3f)));109ksize |= 1; // kernel should be odd110return ksize;111}112113/* ************************************************************************* */114/**115* @brief This function computes a scalar non-linear diffusion step116* @param Lt Base image in the evolution117* @param Lf Conductivity image118* @param Lstep Output image that gives the difference between the current119* Ld and the next Ld being evolved120* @param row_begin row where to start121* @param row_end last row to fill exclusive. the range is [row_begin, row_end).122* @note Forward Euler Scheme 3x3 stencil123* The function c is a scalar value that depends on the gradient norm124* dL_by_ds = d(c dL_by_dx)_by_dx + d(c dL_by_dy)_by_dy125*/126static inline void127nld_step_scalar_one_lane(const Mat& Lt, const Mat& Lf, Mat& Lstep, float step_size, int row_begin, int row_end)128{129CV_INSTRUMENT_REGION();130/* The labeling scheme for this five star stencil:131[ a ]132[ -1 c +1 ]133[ b ]134*/135136Lstep.create(Lt.size(), Lt.type());137const int cols = Lt.cols - 2;138int row = row_begin;139140const float *lt_a, *lt_c, *lt_b;141const float *lf_a, *lf_c, *lf_b;142float *dst;143float step_r = 0.f;144145// Process the top row146if (row == 0) {147lt_c = Lt.ptr<float>(0) + 1; /* Skip the left-most column by +1 */148lf_c = Lf.ptr<float>(0) + 1;149lt_b = Lt.ptr<float>(1) + 1;150lf_b = Lf.ptr<float>(1) + 1;151152// fill the corner to prevent uninitialized values153dst = Lstep.ptr<float>(0);154dst[0] = 0.0f;155++dst;156157for (int j = 0; j < cols; j++) {158step_r = (lf_c[j] + lf_c[j + 1])*(lt_c[j + 1] - lt_c[j]) +159(lf_c[j] + lf_c[j - 1])*(lt_c[j - 1] - lt_c[j]) +160(lf_c[j] + lf_b[j ])*(lt_b[j ] - lt_c[j]);161dst[j] = step_r * step_size;162}163164// fill the corner to prevent uninitialized values165dst[cols] = 0.0f;166++row;167}168169// Process the middle rows170int middle_end = std::min(Lt.rows - 1, row_end);171for (; row < middle_end; ++row)172{173lt_a = Lt.ptr<float>(row - 1);174lf_a = Lf.ptr<float>(row - 1);175lt_c = Lt.ptr<float>(row );176lf_c = Lf.ptr<float>(row );177lt_b = Lt.ptr<float>(row + 1);178lf_b = Lf.ptr<float>(row + 1);179dst = Lstep.ptr<float>(row);180181// The left-most column182step_r = (lf_c[0] + lf_c[1])*(lt_c[1] - lt_c[0]) +183(lf_c[0] + lf_b[0])*(lt_b[0] - lt_c[0]) +184(lf_c[0] + lf_a[0])*(lt_a[0] - lt_c[0]);185dst[0] = step_r * step_size;186187lt_a++; lt_c++; lt_b++;188lf_a++; lf_c++; lf_b++;189dst++;190191// The middle columns192for (int j = 0; j < cols; j++)193{194step_r = (lf_c[j] + lf_c[j + 1])*(lt_c[j + 1] - lt_c[j]) +195(lf_c[j] + lf_c[j - 1])*(lt_c[j - 1] - lt_c[j]) +196(lf_c[j] + lf_b[j ])*(lt_b[j ] - lt_c[j]) +197(lf_c[j] + lf_a[j ])*(lt_a[j ] - lt_c[j]);198dst[j] = step_r * step_size;199}200201// The right-most column202step_r = (lf_c[cols] + lf_c[cols - 1])*(lt_c[cols - 1] - lt_c[cols]) +203(lf_c[cols] + lf_b[cols ])*(lt_b[cols ] - lt_c[cols]) +204(lf_c[cols] + lf_a[cols ])*(lt_a[cols ] - lt_c[cols]);205dst[cols] = step_r * step_size;206}207208// Process the bottom row (row == Lt.rows - 1)209if (row_end == Lt.rows) {210lt_a = Lt.ptr<float>(row - 1) + 1; /* Skip the left-most column by +1 */211lf_a = Lf.ptr<float>(row - 1) + 1;212lt_c = Lt.ptr<float>(row ) + 1;213lf_c = Lf.ptr<float>(row ) + 1;214215// fill the corner to prevent uninitialized values216dst = Lstep.ptr<float>(row);217dst[0] = 0.0f;218++dst;219220for (int j = 0; j < cols; j++) {221step_r = (lf_c[j] + lf_c[j + 1])*(lt_c[j + 1] - lt_c[j]) +222(lf_c[j] + lf_c[j - 1])*(lt_c[j - 1] - lt_c[j]) +223(lf_c[j] + lf_a[j ])*(lt_a[j ] - lt_c[j]);224dst[j] = step_r * step_size;225}226227// fill the corner to prevent uninitialized values228dst[cols] = 0.0f;229}230}231232class NonLinearScalarDiffusionStep : public ParallelLoopBody233{234public:235NonLinearScalarDiffusionStep(const Mat& Lt, const Mat& Lf, Mat& Lstep, float step_size)236: Lt_(&Lt), Lf_(&Lf), Lstep_(&Lstep), step_size_(step_size)237{}238239void operator()(const Range& range) const CV_OVERRIDE240{241nld_step_scalar_one_lane(*Lt_, *Lf_, *Lstep_, step_size_, range.start, range.end);242}243244private:245const Mat* Lt_;246const Mat* Lf_;247Mat* Lstep_;248float step_size_;249};250251#ifdef HAVE_OPENCL252static inline bool253ocl_non_linear_diffusion_step(InputArray Lt_, InputArray Lf_, OutputArray Lstep_, float step_size)254{255if(!Lt_.isContinuous())256return false;257258UMat Lt = Lt_.getUMat();259UMat Lf = Lf_.getUMat();260UMat Lstep = Lstep_.getUMat();261262size_t globalSize[] = {(size_t)Lt.cols, (size_t)Lt.rows};263264ocl::Kernel ker("AKAZE_nld_step_scalar", ocl::features2d::akaze_oclsrc);265if( ker.empty() )266return false;267268return ker.args(269ocl::KernelArg::ReadOnly(Lt),270ocl::KernelArg::PtrReadOnly(Lf),271ocl::KernelArg::PtrWriteOnly(Lstep),272step_size).run(2, globalSize, 0, true);273}274#endif // HAVE_OPENCL275276static inline void277non_linear_diffusion_step(InputArray Lt_, InputArray Lf_, OutputArray Lstep_, float step_size)278{279CV_INSTRUMENT_REGION();280281Lstep_.create(Lt_.size(), Lt_.type());282283CV_OCL_RUN(Lt_.isUMat() && Lf_.isUMat() && Lstep_.isUMat(),284ocl_non_linear_diffusion_step(Lt_, Lf_, Lstep_, step_size));285286Mat Lt = Lt_.getMat();287Mat Lf = Lf_.getMat();288Mat Lstep = Lstep_.getMat();289parallel_for_(Range(0, Lt.rows), NonLinearScalarDiffusionStep(Lt, Lf, Lstep, step_size));290}291292/**293* @brief This function computes a good empirical value for the k contrast factor294* given two gradient images, the percentile (0-1), the temporal storage to hold295* gradient norms and the histogram bins296* @param Lx Horizontal gradient of the input image297* @param Ly Vertical gradient of the input image298* @param nbins Number of histogram bins299* @return k contrast factor300*/301static inline float302compute_kcontrast(InputArray Lx_, InputArray Ly_, float perc, int nbins)303{304CV_INSTRUMENT_REGION();305306CV_Assert(nbins > 2);307CV_Assert(!Lx_.empty());308309Mat Lx = Lx_.getMat();310Mat Ly = Ly_.getMat();311312// temporary square roots of dot product313Mat modgs (Lx.rows - 2, Lx.cols - 2, CV_32F);314const int total = modgs.cols * modgs.rows;315float *modg = modgs.ptr<float>();316float hmax = 0.0f;317318for (int i = 1; i < Lx.rows - 1; i++) {319const float *lx = Lx.ptr<float>(i) + 1;320const float *ly = Ly.ptr<float>(i) + 1;321const int cols = Lx.cols - 2;322323for (int j = 0; j < cols; j++) {324float dist = sqrtf(lx[j] * lx[j] + ly[j] * ly[j]);325*modg++ = dist;326hmax = std::max(hmax, dist);327}328}329modg = modgs.ptr<float>();330331if (hmax == 0.0f)332return 0.03f; // e.g. a blank image333334// Compute the bin numbers: the value range [0, hmax] -> [0, nbins-1]335modgs *= (nbins - 1) / hmax;336337// Count up histogram338std::vector<int> hist(nbins, 0);339for (int i = 0; i < total; i++)340hist[(int)modg[i]]++;341342// Now find the perc of the histogram percentile343const int nthreshold = (int)((total - hist[0]) * perc); // Exclude hist[0] as background344int nelements = 0;345for (int k = 1; k < nbins; k++) {346if (nelements >= nthreshold)347return (float)hmax * k / nbins;348349nelements += hist[k];350}351352return 0.03f;353}354355#ifdef HAVE_OPENCL356static inline bool357ocl_pm_g2(InputArray Lx_, InputArray Ly_, OutputArray Lflow_, float kcontrast)358{359UMat Lx = Lx_.getUMat();360UMat Ly = Ly_.getUMat();361UMat Lflow = Lflow_.getUMat();362363int total = Lx.rows * Lx.cols;364size_t globalSize[] = {(size_t)total};365366ocl::Kernel ker("AKAZE_pm_g2", ocl::features2d::akaze_oclsrc);367if( ker.empty() )368return false;369370return ker.args(371ocl::KernelArg::PtrReadOnly(Lx),372ocl::KernelArg::PtrReadOnly(Ly),373ocl::KernelArg::PtrWriteOnly(Lflow),374kcontrast, total).run(1, globalSize, 0, true);375}376#endif // HAVE_OPENCL377378static inline void379compute_diffusivity(InputArray Lx, InputArray Ly, OutputArray Lflow, float kcontrast, KAZE::DiffusivityType diffusivity)380{381CV_INSTRUMENT_REGION();382383Lflow.create(Lx.size(), Lx.type());384385switch (diffusivity) {386case KAZE::DIFF_PM_G1:387pm_g1(Lx, Ly, Lflow, kcontrast);388break;389case KAZE::DIFF_PM_G2:390CV_OCL_RUN(Lx.isUMat() && Ly.isUMat() && Lflow.isUMat(), ocl_pm_g2(Lx, Ly, Lflow, kcontrast));391pm_g2(Lx, Ly, Lflow, kcontrast);392break;393case KAZE::DIFF_WEICKERT:394weickert_diffusivity(Lx, Ly, Lflow, kcontrast);395break;396case KAZE::DIFF_CHARBONNIER:397charbonnier_diffusivity(Lx, Ly, Lflow, kcontrast);398break;399default:400CV_Error_(Error::StsError, ("Diffusivity is not supported: %d", static_cast<int>(diffusivity)));401break;402}403}404405/**406* @brief Converts input image to grayscale float image407*408* @param image any image409* @param dst grayscale float image410*/411static inline void prepareInputImage(InputArray image, OutputArray dst)412{413Mat img = image.getMat();414if (img.channels() > 1)415cvtColor(image, img, COLOR_BGR2GRAY);416417if ( img.depth() == CV_32F )418dst.assign(img);419else if ( img.depth() == CV_8U )420img.convertTo(dst, CV_32F, 1.0 / 255.0, 0);421else if ( img.depth() == CV_16U )422img.convertTo(dst, CV_32F, 1.0 / 65535.0, 0);423}424425/**426* @brief This method creates the nonlinear scale space for a given image427* @param image Input image for which the nonlinear scale space needs to be created428*/429template<typename MatType>430static inline void431create_nonlinear_scale_space(InputArray image, const AKAZEOptions &options,432const std::vector<std::vector<float > > &tsteps_evolution, std::vector<Evolution<MatType> > &evolution)433{434CV_INSTRUMENT_REGION();435CV_Assert(evolution.size() > 0);436437// convert input to grayscale float image if needed438MatType img;439prepareInputImage(image, img);440441// create first level of the evolution442int ksize = getGaussianKernelSize(options.soffset);443GaussianBlur(img, evolution[0].Lsmooth, Size(ksize, ksize), options.soffset, options.soffset, BORDER_REPLICATE);444evolution[0].Lsmooth.copyTo(evolution[0].Lt);445446if (evolution.size() == 1) {447// we don't need to compute kcontrast factor448Compute_Determinant_Hessian_Response(evolution);449return;450}451452// derivatives, flow and diffusion step453MatType Lx, Ly, Lsmooth, Lflow, Lstep;454455// compute derivatives for computing k contrast456GaussianBlur(img, Lsmooth, Size(5, 5), 1.0f, 1.0f, BORDER_REPLICATE);457Scharr(Lsmooth, Lx, CV_32F, 1, 0, 1, 0, BORDER_DEFAULT);458Scharr(Lsmooth, Ly, CV_32F, 0, 1, 1, 0, BORDER_DEFAULT);459Lsmooth.release();460// compute the kcontrast factor461float kcontrast = compute_kcontrast(Lx, Ly, options.kcontrast_percentile, options.kcontrast_nbins);462463// Now generate the rest of evolution levels464for (size_t i = 1; i < evolution.size(); i++) {465Evolution<MatType> &e = evolution[i];466467if (e.octave > evolution[i - 1].octave) {468// new octave will be half the size469resize(evolution[i - 1].Lt, e.Lt, e.size, 0, 0, INTER_AREA);470kcontrast *= 0.75f;471}472else {473evolution[i - 1].Lt.copyTo(e.Lt);474}475476GaussianBlur(e.Lt, e.Lsmooth, Size(5, 5), 1.0f, 1.0f, BORDER_REPLICATE);477478// Compute the Gaussian derivatives Lx and Ly479Scharr(e.Lsmooth, Lx, CV_32F, 1, 0, 1.0, 0, BORDER_DEFAULT);480Scharr(e.Lsmooth, Ly, CV_32F, 0, 1, 1.0, 0, BORDER_DEFAULT);481482// Compute the conductivity equation483compute_diffusivity(Lx, Ly, Lflow, kcontrast, options.diffusivity);484485// Perform Fast Explicit Diffusion on Lt486const std::vector<float> &tsteps = tsteps_evolution[i - 1];487for (size_t j = 0; j < tsteps.size(); j++) {488const float step_size = tsteps[j] * 0.5f;489non_linear_diffusion_step(e.Lt, Lflow, Lstep, step_size);490add(e.Lt, Lstep, e.Lt);491}492}493494Compute_Determinant_Hessian_Response(evolution);495496return;497}498499/**500* @brief Converts between UMatPyramid and Pyramid and vice versa501* @details Matrices in evolution levels will be copied502*503* @param src source pyramid504* @param dst destination pyramid505*/506template<typename MatTypeSrc, typename MatTypeDst>507static inline void508convertScalePyramid(const std::vector<Evolution<MatTypeSrc> >& src, std::vector<Evolution<MatTypeDst> > &dst)509{510dst.resize(src.size());511for (size_t i = 0; i < src.size(); ++i) {512dst[i] = Evolution<MatTypeDst>(src[i]);513}514}515516/**517* @brief This method creates the nonlinear scale space for a given image518* @param image Input image for which the nonlinear scale space needs to be created519*/520void AKAZEFeatures::Create_Nonlinear_Scale_Space(InputArray image)521{522if (ocl::isOpenCLActivated() && image.isUMat()) {523// will run OCL version of scale space pyramid524UMatPyramid uPyr;525// init UMat pyramid with sizes526convertScalePyramid(evolution_, uPyr);527create_nonlinear_scale_space(image, options_, tsteps_, uPyr);528// download pyramid from GPU529convertScalePyramid(uPyr, evolution_);530} else {531// CPU version532create_nonlinear_scale_space(image, options_, tsteps_, evolution_);533}534}535536/* ************************************************************************* */537538#ifdef HAVE_OPENCL539static inline bool540ocl_compute_determinant(InputArray Lxx_, InputArray Lxy_, InputArray Lyy_,541OutputArray Ldet_, float sigma)542{543UMat Lxx = Lxx_.getUMat();544UMat Lxy = Lxy_.getUMat();545UMat Lyy = Lyy_.getUMat();546UMat Ldet = Ldet_.getUMat();547548const int total = Lxx.rows * Lxx.cols;549size_t globalSize[] = {(size_t)total};550551ocl::Kernel ker("AKAZE_compute_determinant", ocl::features2d::akaze_oclsrc);552if( ker.empty() )553return false;554555return ker.args(556ocl::KernelArg::PtrReadOnly(Lxx),557ocl::KernelArg::PtrReadOnly(Lxy),558ocl::KernelArg::PtrReadOnly(Lyy),559ocl::KernelArg::PtrWriteOnly(Ldet),560sigma, total).run(1, globalSize, 0, true);561}562#endif // HAVE_OPENCL563564/**565* @brief Compute determinant from hessians566* @details Compute Ldet by (Lxx.mul(Lyy) - Lxy.mul(Lxy)) * sigma567*568* @param Lxx spatial derivates569* @param Lxy spatial derivates570* @param Lyy spatial derivates571* @param Ldet output determinant572* @param sigma determinant will be scaled by this sigma573*/574static inline void compute_determinant(InputArray Lxx_, InputArray Lxy_, InputArray Lyy_,575OutputArray Ldet_, float sigma)576{577CV_INSTRUMENT_REGION();578579Ldet_.create(Lxx_.size(), Lxx_.type());580581CV_OCL_RUN(Lxx_.isUMat() && Ldet_.isUMat(), ocl_compute_determinant(Lxx_, Lxy_, Lyy_, Ldet_, sigma));582583// output determinant584Mat Lxx = Lxx_.getMat(), Lxy = Lxy_.getMat(), Lyy = Lyy_.getMat(), Ldet = Ldet_.getMat();585float *lxx = Lxx.ptr<float>();586float *lxy = Lxy.ptr<float>();587float *lyy = Lyy.ptr<float>();588float *ldet = Ldet.ptr<float>();589const int total = Lxx.cols * Lxx.rows;590for (int j = 0; j < total; j++) {591ldet[j] = (lxx[j] * lyy[j] - lxy[j] * lxy[j]) * sigma;592}593594}595596template <typename MatType>597class DeterminantHessianResponse : public ParallelLoopBody598{599public:600explicit DeterminantHessianResponse(std::vector<Evolution<MatType> >& ev)601: evolution_(&ev)602{603}604605void operator()(const Range& range) const CV_OVERRIDE606{607MatType Lxx, Lxy, Lyy;608609for (int i = range.start; i < range.end; i++)610{611Evolution<MatType> &e = (*evolution_)[i];612613// we cannot use cv:Scharr here, because we need to handle also614// kernel sizes other than 3, by default we are using 9x9, 5x5 and 7x7615616// compute kernels617Mat DxKx, DxKy, DyKx, DyKy;618compute_derivative_kernels(DxKx, DxKy, 1, 0, e.sigma_size);619compute_derivative_kernels(DyKx, DyKy, 0, 1, e.sigma_size);620621// compute the multiscale derivatives622sepFilter2D(e.Lsmooth, e.Lx, CV_32F, DxKx, DxKy);623sepFilter2D(e.Lx, Lxx, CV_32F, DxKx, DxKy);624sepFilter2D(e.Lx, Lxy, CV_32F, DyKx, DyKy);625sepFilter2D(e.Lsmooth, e.Ly, CV_32F, DyKx, DyKy);626sepFilter2D(e.Ly, Lyy, CV_32F, DyKx, DyKy);627628// free Lsmooth to same some space in the pyramid, it is not needed anymore629e.Lsmooth.release();630631// compute determinant scaled by sigma632float sigma_size_quat = (float)(e.sigma_size * e.sigma_size * e.sigma_size * e.sigma_size);633compute_determinant(Lxx, Lxy, Lyy, e.Ldet, sigma_size_quat);634}635}636637private:638std::vector<Evolution<MatType> >* evolution_;639};640641642/**643* @brief This method computes the feature detector response for the nonlinear scale space644* @details OCL version645* @note We use the Hessian determinant as the feature detector response646*/647static inline void648Compute_Determinant_Hessian_Response(UMatPyramid &evolution) {649CV_INSTRUMENT_REGION();650651DeterminantHessianResponse<UMat> body (evolution);652body(Range(0, (int)evolution.size()));653}654655/**656* @brief This method computes the feature detector response for the nonlinear scale space657* @details CPU version658* @note We use the Hessian determinant as the feature detector response659*/660static inline void661Compute_Determinant_Hessian_Response(Pyramid &evolution) {662CV_INSTRUMENT_REGION();663664parallel_for_(Range(0, (int)evolution.size()), DeterminantHessianResponse<Mat>(evolution));665}666667/* ************************************************************************* */668669/**670* @brief This method selects interesting keypoints through the nonlinear scale space671* @param kpts Vector of detected keypoints672*/673void AKAZEFeatures::Feature_Detection(std::vector<KeyPoint>& kpts)674{675CV_INSTRUMENT_REGION();676677kpts.clear();678std::vector<Mat> keypoints_by_layers;679Find_Scale_Space_Extrema(keypoints_by_layers);680Do_Subpixel_Refinement(keypoints_by_layers, kpts);681Compute_Keypoints_Orientation(kpts);682}683684/**685* @brief This method searches v for a neighbor point of the point candidate p686* @param x Coordinates of the keypoint candidate to search a neighbor687* @param y Coordinates of the keypoint candidate to search a neighbor688* @param mask Matrix holding keypoints positions689* @param search_radius neighbour radius for searching keypoints690* @param idx The index to mask, pointing to keypoint found.691* @return true if a neighbor point is found; false otherwise692*/693static inline bool694find_neighbor_point(const int x, const int y, const Mat &mask, const int search_radius, int &idx)695{696// search neighborhood for keypoints697for (int i = y - search_radius; i < y + search_radius; ++i) {698const uchar *curr = mask.ptr<uchar>(i);699for (int j = x - search_radius; j < x + search_radius; ++j) {700if (curr[j] == 0) {701continue; // skip non-keypoint702}703// fine-compare with L2 metric (L2 is smaller than our search window)704int dx = j - x;705int dy = i - y;706if (dx * dx + dy * dy <= search_radius * search_radius) {707idx = i * mask.cols + j;708return true;709}710}711}712713return false;714}715716/**717* @brief Find keypoints in parallel for each pyramid layer718*/719class FindKeypointsSameScale : public ParallelLoopBody720{721public:722explicit FindKeypointsSameScale(const Pyramid& ev,723std::vector<Mat>& kpts, float dthreshold)724: evolution_(&ev), keypoints_by_layers_(&kpts), dthreshold_(dthreshold)725{}726727void operator()(const Range& range) const CV_OVERRIDE728{729for (int i = range.start; i < range.end; i++)730{731const MEvolution &e = (*evolution_)[i];732Mat &kpts = (*keypoints_by_layers_)[i];733// this mask will hold positions of keypoints in this level734kpts = Mat::zeros(e.Ldet.size(), CV_8UC1);735736// if border is too big we shouldn't search any keypoints737if (e.border + 1 >= e.Ldet.rows)738continue;739740const float * prev = e.Ldet.ptr<float>(e.border - 1);741const float * curr = e.Ldet.ptr<float>(e.border );742const float * next = e.Ldet.ptr<float>(e.border + 1);743const float * ldet = e.Ldet.ptr<float>();744uchar *mask = kpts.ptr<uchar>();745const int search_radius = e.sigma_size; // size of keypoint in this level746747for (int y = e.border; y < e.Ldet.rows - e.border; y++) {748for (int x = e.border; x < e.Ldet.cols - e.border; x++) {749const float value = curr[x];750751// Filter the points with the detector threshold752if (value <= dthreshold_)753continue;754if (value <= curr[x-1] || value <= curr[x+1])755continue;756if (value <= prev[x-1] || value <= prev[x ] || value <= prev[x+1])757continue;758if (value <= next[x-1] || value <= next[x ] || value <= next[x+1])759continue;760761int idx = 0;762// Compare response with the same scale763if (find_neighbor_point(x, y, kpts, search_radius, idx)) {764if (value > ldet[idx]) {765mask[idx] = 0; // clear old point - we have better candidate now766} else {767continue; // there already is a better keypoint768}769}770771kpts.at<uchar>(y, x) = 1; // we have a new keypoint772}773774prev = curr;775curr = next;776next += e.Ldet.cols;777}778}779}780781private:782const Pyramid* evolution_;783std::vector<Mat>* keypoints_by_layers_;784float dthreshold_; ///< Detector response threshold to accept point785};786787/**788* @brief This method finds extrema in the nonlinear scale space789* @param keypoints_by_layers Output vectors of detected keypoints; one vector for each evolution level790*/791void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector<Mat>& keypoints_by_layers)792{793CV_INSTRUMENT_REGION();794795keypoints_by_layers.resize(evolution_.size());796797// find points in the same level798parallel_for_(Range(0, (int)evolution_.size()),799FindKeypointsSameScale(evolution_, keypoints_by_layers, options_.dthreshold));800801// Filter points with the lower scale level802for (size_t i = 1; i < keypoints_by_layers.size(); i++) {803// constants for this level804const Mat &keypoints = keypoints_by_layers[i];805const uchar *const kpts = keypoints_by_layers[i].ptr<uchar>();806uchar *const kpts_prev = keypoints_by_layers[i-1].ptr<uchar>();807const float *const ldet = evolution_[i].Ldet.ptr<float>();808const float *const ldet_prev = evolution_[i-1].Ldet.ptr<float>();809// ratios are just powers of 2810const int diff_ratio = (int)evolution_[i].octave_ratio / (int)evolution_[i-1].octave_ratio;811const int search_radius = evolution_[i].sigma_size * diff_ratio; // size of keypoint in this level812813size_t j = 0;814for (int y = 0; y < keypoints.rows; y++) {815for (int x = 0; x < keypoints.cols; x++, j++) {816if (kpts[j] == 0) {817continue; // skip non-keypoints818}819int idx = 0;820// project point to lower scale layer821const int p_x = x * diff_ratio;822const int p_y = y * diff_ratio;823if (find_neighbor_point(p_x, p_y, keypoints_by_layers[i-1], search_radius, idx)) {824if (ldet[j] > ldet_prev[idx]) {825kpts_prev[idx] = 0; // clear keypoint in lower layer826}827// else this pt may be pruned by the upper scale828}829}830}831}832833// Now filter points with the upper scale level (the other direction)834for (int i = (int)keypoints_by_layers.size() - 2; i >= 0; i--) {835// constants for this level836const Mat &keypoints = keypoints_by_layers[i];837const uchar *const kpts = keypoints_by_layers[i].ptr<uchar>();838uchar *const kpts_next = keypoints_by_layers[i+1].ptr<uchar>();839const float *const ldet = evolution_[i].Ldet.ptr<float>();840const float *const ldet_next = evolution_[i+1].Ldet.ptr<float>();841// ratios are just powers of 2, i+1 ratio is always greater or equal to i842const int diff_ratio = (int)evolution_[i+1].octave_ratio / (int)evolution_[i].octave_ratio;843const int search_radius = evolution_[i+1].sigma_size; // size of keypoints in upper level844845size_t j = 0;846for (int y = 0; y < keypoints.rows; y++) {847for (int x = 0; x < keypoints.cols; x++, j++) {848if (kpts[j] == 0) {849continue; // skip non-keypoints850}851int idx = 0;852// project point to upper scale layer853const int p_x = x / diff_ratio;854const int p_y = y / diff_ratio;855if (find_neighbor_point(p_x, p_y, keypoints_by_layers[i+1], search_radius, idx)) {856if (ldet[j] > ldet_next[idx]) {857kpts_next[idx] = 0; // clear keypoint in upper layer858}859}860}861}862}863}864865/* ************************************************************************* */866/**867* @brief This method performs subpixel refinement of the detected keypoints868* @param keypoints_by_layers Input vectors of detected keypoints, sorted by evolution levels869* @param kpts Output vector of the final refined keypoints870*/871void AKAZEFeatures::Do_Subpixel_Refinement(872std::vector<Mat>& keypoints_by_layers, std::vector<KeyPoint>& output_keypoints)873{874CV_INSTRUMENT_REGION();875876for (size_t i = 0; i < keypoints_by_layers.size(); i++) {877const MEvolution &e = evolution_[i];878const float * const ldet = e.Ldet.ptr<float>();879const float ratio = e.octave_ratio;880const int cols = e.Ldet.cols;881const Mat& keypoints = keypoints_by_layers[i];882const uchar *const kpts = keypoints.ptr<uchar>();883884size_t j = 0;885for (int y = 0; y < keypoints.rows; y++) {886for (int x = 0; x < keypoints.cols; x++, j++) {887if (kpts[j] == 0) {888continue; // skip non-keypoints889}890891// create a new keypoint892KeyPoint kp;893kp.pt.x = x * e.octave_ratio;894kp.pt.y = y * e.octave_ratio;895kp.size = e.esigma * options_.derivative_factor;896kp.angle = -1;897kp.response = ldet[j];898kp.octave = e.octave;899kp.class_id = static_cast<int>(i);900901// Compute the gradient902float Dx = 0.5f * (ldet[ y *cols + x + 1] - ldet[ y *cols + x - 1]);903float Dy = 0.5f * (ldet[(y + 1)*cols + x ] - ldet[(y - 1)*cols + x ]);904905// Compute the Hessian906float Dxx = ldet[ y *cols + x + 1] + ldet[ y *cols + x - 1] - 2.0f * ldet[y*cols + x];907float Dyy = ldet[(y + 1)*cols + x ] + ldet[(y - 1)*cols + x ] - 2.0f * ldet[y*cols + x];908float Dxy = 0.25f * (ldet[(y + 1)*cols + x + 1] + ldet[(y - 1)*cols + x - 1] -909ldet[(y - 1)*cols + x + 1] - ldet[(y + 1)*cols + x - 1]);910911// Solve the linear system912Matx22f A( Dxx, Dxy,913Dxy, Dyy );914Vec2f b( -Dx, -Dy );915Vec2f dst( 0.0f, 0.0f );916solve(A, b, dst, DECOMP_LU);917918float dx = dst(0);919float dy = dst(1);920921if (fabs(dx) > 1.0f || fabs(dy) > 1.0f)922continue; // Ignore the point that is not stable923924// Refine the coordinates925kp.pt.x += dx * ratio + .5f*(ratio-1.f);926kp.pt.y += dy * ratio + .5f*(ratio-1.f);927928kp.angle = 0.0;929kp.size *= 2.0f; // In OpenCV the size of a keypoint is the diameter930931// Push the refined keypoint to the final storage932output_keypoints.push_back(kp);933}934}935}936}937938/* ************************************************************************* */939940class SURF_Descriptor_Upright_64_Invoker : public ParallelLoopBody941{942public:943SURF_Descriptor_Upright_64_Invoker(std::vector<KeyPoint>& kpts, Mat& desc, const Pyramid& evolution)944: keypoints_(&kpts)945, descriptors_(&desc)946, evolution_(&evolution)947{948}949950void operator() (const Range& range) const CV_OVERRIDE951{952for (int i = range.start; i < range.end; i++)953{954Get_SURF_Descriptor_Upright_64((*keypoints_)[i], descriptors_->ptr<float>(i), descriptors_->cols);955}956}957958void Get_SURF_Descriptor_Upright_64(const KeyPoint& kpt, float* desc, int desc_size) const;959960private:961std::vector<KeyPoint>* keypoints_;962Mat* descriptors_;963const Pyramid* evolution_;964};965966class SURF_Descriptor_64_Invoker : public ParallelLoopBody967{968public:969SURF_Descriptor_64_Invoker(std::vector<KeyPoint>& kpts, Mat& desc, Pyramid& evolution)970: keypoints_(&kpts)971, descriptors_(&desc)972, evolution_(&evolution)973{974}975976void operator()(const Range& range) const CV_OVERRIDE977{978for (int i = range.start; i < range.end; i++)979{980Get_SURF_Descriptor_64((*keypoints_)[i], descriptors_->ptr<float>(i), descriptors_->cols);981}982}983984void Get_SURF_Descriptor_64(const KeyPoint& kpt, float* desc, int desc_size) const;985986private:987std::vector<KeyPoint>* keypoints_;988Mat* descriptors_;989Pyramid* evolution_;990};991992class MSURF_Upright_Descriptor_64_Invoker : public ParallelLoopBody993{994public:995MSURF_Upright_Descriptor_64_Invoker(std::vector<KeyPoint>& kpts, Mat& desc, Pyramid& evolution)996: keypoints_(&kpts)997, descriptors_(&desc)998, evolution_(&evolution)999{1000}10011002void operator()(const Range& range) const CV_OVERRIDE1003{1004for (int i = range.start; i < range.end; i++)1005{1006Get_MSURF_Upright_Descriptor_64((*keypoints_)[i], descriptors_->ptr<float>(i), descriptors_->cols);1007}1008}10091010void Get_MSURF_Upright_Descriptor_64(const KeyPoint& kpt, float* desc, int desc_size) const;10111012private:1013std::vector<KeyPoint>* keypoints_;1014Mat* descriptors_;1015Pyramid* evolution_;1016};10171018class MSURF_Descriptor_64_Invoker : public ParallelLoopBody1019{1020public:1021MSURF_Descriptor_64_Invoker(std::vector<KeyPoint>& kpts, Mat& desc, Pyramid& evolution)1022: keypoints_(&kpts)1023, descriptors_(&desc)1024, evolution_(&evolution)1025{1026}10271028void operator() (const Range& range) const CV_OVERRIDE1029{1030for (int i = range.start; i < range.end; i++)1031{1032Get_MSURF_Descriptor_64((*keypoints_)[i], descriptors_->ptr<float>(i), descriptors_->cols);1033}1034}10351036void Get_MSURF_Descriptor_64(const KeyPoint& kpt, float* desc, int desc_size) const;10371038private:1039std::vector<KeyPoint>* keypoints_;1040Mat* descriptors_;1041Pyramid* evolution_;1042};10431044class Upright_MLDB_Full_Descriptor_Invoker : public ParallelLoopBody1045{1046public:1047Upright_MLDB_Full_Descriptor_Invoker(std::vector<KeyPoint>& kpts, Mat& desc, Pyramid& evolution, AKAZEOptions& options)1048: keypoints_(&kpts)1049, descriptors_(&desc)1050, evolution_(&evolution)1051, options_(&options)1052{1053}10541055void operator() (const Range& range) const CV_OVERRIDE1056{1057for (int i = range.start; i < range.end; i++)1058{1059Get_Upright_MLDB_Full_Descriptor((*keypoints_)[i], descriptors_->ptr<unsigned char>(i), descriptors_->cols);1060}1061}10621063void Get_Upright_MLDB_Full_Descriptor(const KeyPoint& kpt, unsigned char* desc, int desc_size) const;10641065private:1066std::vector<KeyPoint>* keypoints_;1067Mat* descriptors_;1068Pyramid* evolution_;1069AKAZEOptions* options_;1070};10711072class Upright_MLDB_Descriptor_Subset_Invoker : public ParallelLoopBody1073{1074public:1075Upright_MLDB_Descriptor_Subset_Invoker(std::vector<KeyPoint>& kpts,1076Mat& desc,1077Pyramid& evolution,1078AKAZEOptions& options,1079Mat descriptorSamples,1080Mat descriptorBits)1081: keypoints_(&kpts)1082, descriptors_(&desc)1083, evolution_(&evolution)1084, options_(&options)1085, descriptorSamples_(descriptorSamples)1086, descriptorBits_(descriptorBits)1087{1088}10891090void operator() (const Range& range) const CV_OVERRIDE1091{1092for (int i = range.start; i < range.end; i++)1093{1094Get_Upright_MLDB_Descriptor_Subset((*keypoints_)[i], descriptors_->ptr<unsigned char>(i), descriptors_->cols);1095}1096}10971098void Get_Upright_MLDB_Descriptor_Subset(const KeyPoint& kpt, unsigned char* desc, int desc_size) const;10991100private:1101std::vector<KeyPoint>* keypoints_;1102Mat* descriptors_;1103Pyramid* evolution_;1104AKAZEOptions* options_;11051106Mat descriptorSamples_; // List of positions in the grids to sample LDB bits from.1107Mat descriptorBits_;1108};11091110class MLDB_Full_Descriptor_Invoker : public ParallelLoopBody1111{1112public:1113MLDB_Full_Descriptor_Invoker(std::vector<KeyPoint>& kpts, Mat& desc, Pyramid& evolution, AKAZEOptions& options)1114: keypoints_(&kpts)1115, descriptors_(&desc)1116, evolution_(&evolution)1117, options_(&options)1118{1119}11201121void operator() (const Range& range) const CV_OVERRIDE1122{1123for (int i = range.start; i < range.end; i++)1124{1125Get_MLDB_Full_Descriptor((*keypoints_)[i], descriptors_->ptr<unsigned char>(i), descriptors_->cols);1126}1127}11281129void Get_MLDB_Full_Descriptor(const KeyPoint& kpt, unsigned char* desc, int desc_size) const;1130void MLDB_Fill_Values(float* values, int sample_step, int level,1131float xf, float yf, float co, float si, float scale) const;1132void MLDB_Binary_Comparisons(float* values, unsigned char* desc,1133int count, int& dpos) const;11341135private:1136std::vector<KeyPoint>* keypoints_;1137Mat* descriptors_;1138Pyramid* evolution_;1139AKAZEOptions* options_;1140};11411142class MLDB_Descriptor_Subset_Invoker : public ParallelLoopBody1143{1144public:1145MLDB_Descriptor_Subset_Invoker(std::vector<KeyPoint>& kpts,1146Mat& desc,1147Pyramid& evolution,1148AKAZEOptions& options,1149Mat descriptorSamples,1150Mat descriptorBits)1151: keypoints_(&kpts)1152, descriptors_(&desc)1153, evolution_(&evolution)1154, options_(&options)1155, descriptorSamples_(descriptorSamples)1156, descriptorBits_(descriptorBits)1157{1158}11591160void operator() (const Range& range) const CV_OVERRIDE1161{1162for (int i = range.start; i < range.end; i++)1163{1164Get_MLDB_Descriptor_Subset((*keypoints_)[i], descriptors_->ptr<unsigned char>(i), descriptors_->cols);1165}1166}11671168void Get_MLDB_Descriptor_Subset(const KeyPoint& kpt, unsigned char* desc, int desc_size) const;11691170private:1171std::vector<KeyPoint>* keypoints_;1172Mat* descriptors_;1173Pyramid* evolution_;1174AKAZEOptions* options_;11751176Mat descriptorSamples_; // List of positions in the grids to sample LDB bits from.1177Mat descriptorBits_;1178};11791180/**1181* @brief This method computes the set of descriptors through the nonlinear scale space1182* @param kpts Vector of detected keypoints1183* @param desc Matrix to store the descriptors1184*/1185void AKAZEFeatures::Compute_Descriptors(std::vector<KeyPoint>& kpts, OutputArray descriptors)1186{1187CV_INSTRUMENT_REGION();11881189for(size_t i = 0; i < kpts.size(); i++)1190{1191CV_Assert(0 <= kpts[i].class_id && kpts[i].class_id < static_cast<int>(evolution_.size()));1192}11931194// Allocate memory for the matrix with the descriptors1195int descriptor_size = 64;1196int descriptor_type = CV_32FC1;1197if (options_.descriptor >= AKAZE::DESCRIPTOR_MLDB_UPRIGHT)1198{1199int descriptor_bits = (options_.descriptor_size == 0)1200? (6 + 36 + 120)*options_.descriptor_channels // the full length binary descriptor -> 486 bits1201: options_.descriptor_size; // the random bit selection length binary descriptor1202descriptor_size = divUp(descriptor_bits, 8);1203descriptor_type = CV_8UC1;1204}1205descriptors.create((int)kpts.size(), descriptor_size, descriptor_type);12061207Mat desc = descriptors.getMat();12081209switch (options_.descriptor)1210{1211case AKAZE::DESCRIPTOR_KAZE_UPRIGHT: // Upright descriptors, not invariant to rotation1212{1213parallel_for_(Range(0, (int)kpts.size()), MSURF_Upright_Descriptor_64_Invoker(kpts, desc, evolution_));1214}1215break;1216case AKAZE::DESCRIPTOR_KAZE:1217{1218parallel_for_(Range(0, (int)kpts.size()), MSURF_Descriptor_64_Invoker(kpts, desc, evolution_));1219}1220break;1221case AKAZE::DESCRIPTOR_MLDB_UPRIGHT: // Upright descriptors, not invariant to rotation1222{1223if (options_.descriptor_size == 0)1224parallel_for_(Range(0, (int)kpts.size()), Upright_MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_));1225else1226parallel_for_(Range(0, (int)kpts.size()), Upright_MLDB_Descriptor_Subset_Invoker(kpts, desc, evolution_, options_, descriptorSamples_, descriptorBits_));1227}1228break;1229case AKAZE::DESCRIPTOR_MLDB:1230{1231if (options_.descriptor_size == 0)1232parallel_for_(Range(0, (int)kpts.size()), MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_));1233else1234parallel_for_(Range(0, (int)kpts.size()), MLDB_Descriptor_Subset_Invoker(kpts, desc, evolution_, options_, descriptorSamples_, descriptorBits_));1235}1236break;1237}1238}12391240/* ************************************************************************* */1241/**1242* @brief This function samples the derivative responses Lx and Ly for the points1243* within the radius of 6*scale from (x0, y0), then multiply 2D Gaussian weight1244* @param Lx Horizontal derivative1245* @param Ly Vertical derivative1246* @param x0 X-coordinate of the center point1247* @param y0 Y-coordinate of the center point1248* @param scale The sampling step1249* @param resX Output array of the weighted horizontal derivative responses1250* @param resY Output array of the weighted vertical derivative responses1251*/1252static inline1253void Sample_Derivative_Response_Radius6(const Mat &Lx, const Mat &Ly,1254const int x0, const int y0, const int scale,1255float *resX, float *resY)1256{1257/* ************************************************************************* */1258/// Lookup table for 2d gaussian (sigma = 2.5) where (0,0) is top left and (6,6) is bottom right1259static const float gauss25[7][7] =1260{1261{ 0.02546481f, 0.02350698f, 0.01849125f, 0.01239505f, 0.00708017f, 0.00344629f, 0.00142946f },1262{ 0.02350698f, 0.02169968f, 0.01706957f, 0.01144208f, 0.00653582f, 0.00318132f, 0.00131956f },1263{ 0.01849125f, 0.01706957f, 0.01342740f, 0.00900066f, 0.00514126f, 0.00250252f, 0.00103800f },1264{ 0.01239505f, 0.01144208f, 0.00900066f, 0.00603332f, 0.00344629f, 0.00167749f, 0.00069579f },1265{ 0.00708017f, 0.00653582f, 0.00514126f, 0.00344629f, 0.00196855f, 0.00095820f, 0.00039744f },1266{ 0.00344629f, 0.00318132f, 0.00250252f, 0.00167749f, 0.00095820f, 0.00046640f, 0.00019346f },1267{ 0.00142946f, 0.00131956f, 0.00103800f, 0.00069579f, 0.00039744f, 0.00019346f, 0.00008024f }1268};1269static const struct gtable1270{1271float weight[109];1272int xidx[109];1273int yidx[109];12741275explicit gtable(void)1276{1277// Generate the weight and indices by one-time initialization1278int k = 0;1279for (int i = -6; i <= 6; ++i) {1280for (int j = -6; j <= 6; ++j) {1281if (i*i + j*j < 36) {1282CV_Assert(k < 109);1283weight[k] = gauss25[abs(i)][abs(j)];1284yidx[k] = i;1285xidx[k] = j;1286++k;1287}1288}1289}1290}1291} g;12921293CV_Assert(x0 - 6 * scale >= 0 && x0 + 6 * scale < Lx.cols);1294CV_Assert(y0 - 6 * scale >= 0 && y0 + 6 * scale < Lx.rows);12951296for (int i = 0; i < 109; i++)1297{1298int y = y0 + g.yidx[i] * scale;1299int x = x0 + g.xidx[i] * scale;13001301float w = g.weight[i];1302resX[i] = w * Lx.at<float>(y, x);1303resY[i] = w * Ly.at<float>(y, x);1304}1305}13061307/**1308* @brief This function sorts a[] by quantized float values1309* @param a[] Input floating point array to sort1310* @param n The length of a[]1311* @param quantum The interval to convert a[i]'s float values to integers1312* @param nkeys a[i] < nkeys * quantum1313* @param idx[] Output array of the indices: a[idx[i]] forms a sorted array1314* @param cum[] Output array of the starting indices of quantized floats1315* @note The values of a[] in [k*quantum, (k + 1)*quantum) is labeled by1316* the integer k, which is calculated by floor(a[i]/quantum). After sorting,1317* the values from a[idx[cum[k]]] to a[idx[cum[k+1]-1]] are all labeled by k.1318* This sorting is unstable to reduce the memory access.1319*/1320static inline1321void quantized_counting_sort(const float a[], const int n,1322const float quantum, const int nkeys,1323int idx[/*n*/], int cum[/*nkeys + 1*/])1324{1325memset(cum, 0, sizeof(cum[0]) * (nkeys + 1));13261327// Count up the quantized values1328for (int i = 0; i < n; i++)1329{1330int b = (int)(a[i] / quantum);1331if (b < 0 || b >= nkeys)1332b = 0;1333cum[b]++;1334}13351336// Compute the inclusive prefix sum i.e. the end indices; cum[nkeys] is the total1337for (int i = 1; i <= nkeys; i++)1338{1339cum[i] += cum[i - 1];1340}1341CV_Assert(cum[nkeys] == n);13421343// Generate the sorted indices; cum[] becomes the exclusive prefix sum i.e. the start indices of keys1344for (int i = 0; i < n; i++)1345{1346int b = (int)(a[i] / quantum);1347if (b < 0 || b >= nkeys)1348b = 0;1349idx[--cum[b]] = i;1350}1351}13521353/**1354* @brief This function computes the main orientation for a given keypoint1355* @param kpt Input keypoint1356* @note The orientation is computed using a similar approach as described in the1357* original SURF method. See Bay et al., Speeded Up Robust Features, ECCV 20061358*/1359static inline1360void Compute_Main_Orientation(KeyPoint& kpt, const Pyramid& evolution)1361{1362// get the right evolution level for this keypoint1363const MEvolution& e = evolution[kpt.class_id];1364// Get the information from the keypoint1365int scale = cvRound(0.5f * kpt.size / e.octave_ratio);1366int x0 = cvRound(kpt.pt.x / e.octave_ratio);1367int y0 = cvRound(kpt.pt.y / e.octave_ratio);13681369// Sample derivatives responses for the points within radius of 6*scale1370const int ang_size = 109;1371float resX[ang_size], resY[ang_size];1372Sample_Derivative_Response_Radius6(e.Lx, e.Ly, x0, y0, scale, resX, resY);13731374// Compute the angle of each gradient vector1375float Ang[ang_size];1376hal::fastAtan2(resY, resX, Ang, ang_size, false);13771378// Sort by the angles; angles are labeled by slices of 0.15 radian1379const int slices = 42;1380const float ang_step = (float)(2.0 * CV_PI / slices);1381int slice[slices + 1];1382int sorted_idx[ang_size];1383quantized_counting_sort(Ang, ang_size, ang_step, slices, sorted_idx, slice);13841385// Find the main angle by sliding a window of 7-slice size(=PI/3) around the keypoint1386const int win = 7;13871388float maxX = 0.0f, maxY = 0.0f;1389for (int i = slice[0]; i < slice[win]; i++) {1390const int idx = sorted_idx[i];1391maxX += resX[idx];1392maxY += resY[idx];1393}1394float maxNorm = maxX * maxX + maxY * maxY;13951396for (int sn = 1; sn <= slices - win; sn++) {13971398if (slice[sn] == slice[sn - 1] && slice[sn + win] == slice[sn + win - 1])1399continue; // The contents of the window didn't change; don't repeat the computation14001401float sumX = 0.0f, sumY = 0.0f;1402for (int i = slice[sn]; i < slice[sn + win]; i++) {1403const int idx = sorted_idx[i];1404sumX += resX[idx];1405sumY += resY[idx];1406}14071408float norm = sumX * sumX + sumY * sumY;1409if (norm > maxNorm)1410maxNorm = norm, maxX = sumX, maxY = sumY; // Found bigger one; update1411}14121413for (int sn = slices - win + 1; sn < slices; sn++) {1414int remain = sn + win - slices;14151416if (slice[sn] == slice[sn - 1] && slice[remain] == slice[remain - 1])1417continue;14181419float sumX = 0.0f, sumY = 0.0f;1420for (int i = slice[sn]; i < slice[slices]; i++) {1421const int idx = sorted_idx[i];1422sumX += resX[idx];1423sumY += resY[idx];1424}1425for (int i = slice[0]; i < slice[remain]; i++) {1426const int idx = sorted_idx[i];1427sumX += resX[idx];1428sumY += resY[idx];1429}14301431float norm = sumX * sumX + sumY * sumY;1432if (norm > maxNorm)1433maxNorm = norm, maxX = sumX, maxY = sumY;1434}14351436// Store the final result1437kpt.angle = fastAtan2(maxY, maxX);1438}14391440class ComputeKeypointOrientation : public ParallelLoopBody1441{1442public:1443ComputeKeypointOrientation(std::vector<KeyPoint>& kpts,1444const Pyramid& evolution)1445: keypoints_(&kpts)1446, evolution_(&evolution)1447{1448}14491450void operator() (const Range& range) const CV_OVERRIDE1451{1452for (int i = range.start; i < range.end; i++)1453{1454Compute_Main_Orientation((*keypoints_)[i], *evolution_);1455}1456}1457private:1458std::vector<KeyPoint>* keypoints_;1459const Pyramid* evolution_;1460};14611462/**1463* @brief This method computes the main orientation for a given keypoints1464* @param kpts Input keypoints1465*/1466void AKAZEFeatures::Compute_Keypoints_Orientation(std::vector<KeyPoint>& kpts) const1467{1468CV_INSTRUMENT_REGION();14691470parallel_for_(Range(0, (int)kpts.size()), ComputeKeypointOrientation(kpts, evolution_));1471}14721473/* ************************************************************************* */1474/**1475* @brief This method computes the upright descriptor (not rotation invariant) of1476* the provided keypoint1477* @param kpt Input keypoint1478* @param desc Descriptor vector1479* @note Rectangular grid of 24 s x 24 s. Descriptor Length 64. The descriptor is inspired1480* from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching,1481* ECCV 20081482*/1483void MSURF_Upright_Descriptor_64_Invoker::Get_MSURF_Upright_Descriptor_64(const KeyPoint& kpt, float *desc, int desc_size) const {14841485const int dsize = 64;1486CV_Assert(desc_size == dsize);14871488float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0;1489float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0;1490float sample_x = 0.0, sample_y = 0.0;1491int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0;1492int x2 = 0, y2 = 0, kx = 0, ky = 0, i = 0, j = 0, dcount = 0;1493float fx = 0.0, fy = 0.0, ratio = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0;1494int scale = 0;14951496// Subregion centers for the 4x4 gaussian weighting1497float cx = -0.5f, cy = 0.5f;14981499const Pyramid& evolution = *evolution_;15001501// Set the descriptor size and the sample and pattern sizes1502sample_step = 5;1503pattern_size = 12;15041505// Get the information from the keypoint1506ratio = (float)(1 << kpt.octave);1507scale = cvRound(0.5f*kpt.size / ratio);1508const int level = kpt.class_id;1509const Mat Lx = evolution[level].Lx;1510const Mat Ly = evolution[level].Ly;1511yf = kpt.pt.y / ratio;1512xf = kpt.pt.x / ratio;15131514i = -8;15151516// Calculate descriptor for this interest point1517// Area of size 24 s x 24 s1518while (i < pattern_size) {1519j = -8;1520i = i - 4;15211522cx += 1.0f;1523cy = -0.5f;15241525while (j < pattern_size) {1526dx = dy = mdx = mdy = 0.0;1527cy += 1.0f;1528j = j - 4;15291530ky = i + sample_step;1531kx = j + sample_step;15321533ys = yf + (ky*scale);1534xs = xf + (kx*scale);15351536for (int k = i; k < i + 9; k++) {1537for (int l = j; l < j + 9; l++) {1538sample_y = k*scale + yf;1539sample_x = l*scale + xf;15401541//Get the gaussian weighted x and y responses1542gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.50f*scale);15431544y1 = cvFloor(sample_y);1545x1 = cvFloor(sample_x);15461547y2 = y1 + 1;1548x2 = x1 + 1;15491550if (x1 < 0 || y1 < 0 || x2 >= Lx.cols || y2 >= Lx.rows)1551continue; // FIXIT Boundaries15521553fx = sample_x - x1;1554fy = sample_y - y1;15551556res1 = Lx.at<float>(y1, x1);1557res2 = Lx.at<float>(y1, x2);1558res3 = Lx.at<float>(y2, x1);1559res4 = Lx.at<float>(y2, x2);1560rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;15611562res1 = Ly.at<float>(y1, x1);1563res2 = Ly.at<float>(y1, x2);1564res3 = Ly.at<float>(y2, x1);1565res4 = Ly.at<float>(y2, x2);1566ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;15671568rx = gauss_s1*rx;1569ry = gauss_s1*ry;15701571// Sum the derivatives to the cumulative descriptor1572dx += rx;1573dy += ry;1574mdx += fabs(rx);1575mdy += fabs(ry);1576}1577}15781579// Add the values to the descriptor vector1580gauss_s2 = gaussian(cx - 2.0f, cy - 2.0f, 1.5f);15811582desc[dcount++] = dx*gauss_s2;1583desc[dcount++] = dy*gauss_s2;1584desc[dcount++] = mdx*gauss_s2;1585desc[dcount++] = mdy*gauss_s2;15861587len += (dx*dx + dy*dy + mdx*mdx + mdy*mdy)*gauss_s2*gauss_s2;15881589j += 9;1590}15911592i += 9;1593}15941595CV_Assert(dcount == desc_size);15961597// convert to unit vector1598len = sqrt(len);15991600const float len_inv = 1.0f / len;1601for (i = 0; i < dsize; i++) {1602desc[i] *= len_inv;1603}1604}16051606/* ************************************************************************* */1607/**1608* @brief This method computes the descriptor of the provided keypoint given the1609* main orientation of the keypoint1610* @param kpt Input keypoint1611* @param desc Descriptor vector1612* @note Rectangular grid of 24 s x 24 s. Descriptor Length 64. The descriptor is inspired1613* from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching,1614* ECCV 20081615*/1616void MSURF_Descriptor_64_Invoker::Get_MSURF_Descriptor_64(const KeyPoint& kpt, float *desc, int desc_size) const {16171618const int dsize = 64;1619CV_Assert(desc_size == dsize);16201621float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0;1622float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0;1623float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0;1624float fx = 0.0, fy = 0.0, ratio = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0;1625int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0;1626int kx = 0, ky = 0, i = 0, j = 0, dcount = 0;1627int scale = 0;16281629// Subregion centers for the 4x4 gaussian weighting1630float cx = -0.5f, cy = 0.5f;16311632const Pyramid& evolution = *evolution_;16331634// Set the descriptor size and the sample and pattern sizes1635sample_step = 5;1636pattern_size = 12;16371638// Get the information from the keypoint1639ratio = (float)(1 << kpt.octave);1640scale = cvRound(0.5f*kpt.size / ratio);1641angle = kpt.angle * static_cast<float>(CV_PI / 180.f);1642const int level = kpt.class_id;1643const Mat Lx = evolution[level].Lx;1644const Mat Ly = evolution[level].Ly;1645yf = kpt.pt.y / ratio;1646xf = kpt.pt.x / ratio;1647co = cos(angle);1648si = sin(angle);16491650i = -8;16511652// Calculate descriptor for this interest point1653// Area of size 24 s x 24 s1654while (i < pattern_size) {1655j = -8;1656i = i - 4;16571658cx += 1.0f;1659cy = -0.5f;16601661while (j < pattern_size) {1662dx = dy = mdx = mdy = 0.0;1663cy += 1.0f;1664j = j - 4;16651666ky = i + sample_step;1667kx = j + sample_step;16681669xs = xf + (-kx*scale*si + ky*scale*co);1670ys = yf + (kx*scale*co + ky*scale*si);16711672for (int k = i; k < i + 9; ++k) {1673for (int l = j; l < j + 9; ++l) {1674// Get coords of sample point on the rotated axis1675sample_y = yf + (l*scale*co + k*scale*si);1676sample_x = xf + (-l*scale*si + k*scale*co);16771678// Get the gaussian weighted x and y responses1679gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5f*scale);16801681y1 = cvFloor(sample_y);1682x1 = cvFloor(sample_x);16831684y2 = y1 + 1;1685x2 = x1 + 1;16861687if (x1 < 0 || y1 < 0 || x2 >= Lx.cols || y2 >= Lx.rows)1688continue; // FIXIT Boundaries16891690fx = sample_x - x1;1691fy = sample_y - y1;16921693res1 = Lx.at<float>(y1, x1);1694res2 = Lx.at<float>(y1, x2);1695res3 = Lx.at<float>(y2, x1);1696res4 = Lx.at<float>(y2, x2);1697rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;16981699res1 = Ly.at<float>(y1, x1);1700res2 = Ly.at<float>(y1, x2);1701res3 = Ly.at<float>(y2, x1);1702res4 = Ly.at<float>(y2, x2);1703ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;17041705// Get the x and y derivatives on the rotated axis1706rry = gauss_s1*(rx*co + ry*si);1707rrx = gauss_s1*(-rx*si + ry*co);17081709// Sum the derivatives to the cumulative descriptor1710dx += rrx;1711dy += rry;1712mdx += fabs(rrx);1713mdy += fabs(rry);1714}1715}17161717// Add the values to the descriptor vector1718gauss_s2 = gaussian(cx - 2.0f, cy - 2.0f, 1.5f);1719desc[dcount++] = dx*gauss_s2;1720desc[dcount++] = dy*gauss_s2;1721desc[dcount++] = mdx*gauss_s2;1722desc[dcount++] = mdy*gauss_s2;17231724len += (dx*dx + dy*dy + mdx*mdx + mdy*mdy)*gauss_s2*gauss_s2;17251726j += 9;1727}17281729i += 9;1730}17311732CV_Assert(dcount == desc_size);17331734// convert to unit vector1735len = sqrt(len);17361737const float len_inv = 1.0f / len;1738for (i = 0; i < dsize; i++) {1739desc[i] *= len_inv;1740}1741}17421743/* ************************************************************************* */1744/**1745* @brief This method computes the rupright descriptor (not rotation invariant) of1746* the provided keypoint1747* @param kpt Input keypoint1748* @param desc Descriptor vector1749*/1750void Upright_MLDB_Full_Descriptor_Invoker::Get_Upright_MLDB_Full_Descriptor(const KeyPoint& kpt, unsigned char *desc, int desc_size) const {17511752const AKAZEOptions & options = *options_;1753const Pyramid& evolution = *evolution_;17541755// Buffer for the M-LDB descriptor1756const int max_channels = 3;1757CV_Assert(options.descriptor_channels <= max_channels);1758float values[16*max_channels];17591760// Get the information from the keypoint1761const float ratio = (float)(1 << kpt.octave);1762const int scale = cvRound(0.5f*kpt.size / ratio);1763const int level = kpt.class_id;1764const Mat Lx = evolution[level].Lx;1765const Mat Ly = evolution[level].Ly;1766const Mat Lt = evolution[level].Lt;1767const float yf = kpt.pt.y / ratio;1768const float xf = kpt.pt.x / ratio;17691770// For 2x2 grid, 3x3 grid and 4x4 grid1771const int pattern_size = options_->descriptor_pattern_size;1772CV_Assert((pattern_size & 1) == 0);1773const int sample_step[3] = {1774pattern_size,1775divUp(pattern_size * 2, 3),1776divUp(pattern_size, 2)1777};17781779memset(desc, 0, desc_size);17801781// For the three grids1782int dcount1 = 0;1783for (int z = 0; z < 3; z++) {1784int dcount2 = 0;1785const int step = sample_step[z];1786for (int i = -pattern_size; i < pattern_size; i += step) {1787for (int j = -pattern_size; j < pattern_size; j += step) {1788float di = 0.0, dx = 0.0, dy = 0.0;17891790int nsamples = 0;1791for (int k = 0; k < step; k++) {1792for (int l = 0; l < step; l++) {17931794// Get the coordinates of the sample point1795const float sample_y = yf + (l+j)*scale;1796const float sample_x = xf + (k+i)*scale;17971798const int y1 = cvRound(sample_y);1799const int x1 = cvRound(sample_x);18001801if (y1 < 0 || y1 >= Lt.rows || x1 < 0 || x1 >= Lt.cols)1802continue; // Boundaries18031804const float ri = Lt.at<float>(y1, x1);1805const float rx = Lx.at<float>(y1, x1);1806const float ry = Ly.at<float>(y1, x1);18071808di += ri;1809dx += rx;1810dy += ry;1811nsamples++;1812}1813}18141815if (nsamples > 0)1816{1817const float nsamples_inv = 1.0f / nsamples;1818di *= nsamples_inv;1819dx *= nsamples_inv;1820dy *= nsamples_inv;1821}18221823float *val = &values[dcount2*max_channels];1824*(val) = di;1825*(val+1) = dx;1826*(val+2) = dy;1827dcount2++;1828}1829}18301831// Do binary comparison1832const int num = (z + 2) * (z + 2);1833for (int i = 0; i < num; i++) {1834for (int j = i + 1; j < num; j++) {1835const float * valI = &values[i*max_channels];1836const float * valJ = &values[j*max_channels];1837for (int k = 0; k < 3; ++k) {1838if (*(valI + k) > *(valJ + k)) {1839desc[dcount1 / 8] |= (1 << (dcount1 % 8));1840}1841dcount1++;1842}1843}1844}18451846} // for (int z = 0; z < 3; z++)18471848CV_Assert(dcount1 <= desc_size*8);1849CV_Assert(divUp(dcount1, 8) == desc_size);1850}18511852void MLDB_Full_Descriptor_Invoker::MLDB_Fill_Values(float* values, int sample_step, const int level,1853float xf, float yf, float co, float si, float scale) const1854{1855const Pyramid& evolution = *evolution_;1856int pattern_size = options_->descriptor_pattern_size;1857int chan = options_->descriptor_channels;1858const Mat Lx = evolution[level].Lx;1859const Mat Ly = evolution[level].Ly;1860const Mat Lt = evolution[level].Lt;18611862const Size size = Lt.size();1863CV_Assert(size == Lx.size());1864CV_Assert(size == Ly.size());18651866int valpos = 0;1867for (int i = -pattern_size; i < pattern_size; i += sample_step) {1868for (int j = -pattern_size; j < pattern_size; j += sample_step) {1869float di = 0.0f, dx = 0.0f, dy = 0.0f;18701871int nsamples = 0;1872for (int k = i; k < i + sample_step; k++) {1873for (int l = j; l < j + sample_step; l++) {1874float sample_y = yf + (l*co * scale + k*si*scale);1875float sample_x = xf + (-l*si * scale + k*co*scale);18761877int y1 = cvRound(sample_y);1878int x1 = cvRound(sample_x);18791880if (y1 < 0 || y1 >= Lt.rows || x1 < 0 || x1 >= Lt.cols)1881continue; // Boundaries18821883float ri = Lt.at<float>(y1, x1);1884di += ri;18851886if(chan > 1) {1887float rx = Lx.at<float>(y1, x1);1888float ry = Ly.at<float>(y1, x1);1889if (chan == 2) {1890dx += sqrtf(rx*rx + ry*ry);1891}1892else {1893float rry = rx*co + ry*si;1894float rrx = -rx*si + ry*co;1895dx += rrx;1896dy += rry;1897}1898}1899nsamples++;1900}1901}19021903if (nsamples > 0)1904{1905const float nsamples_inv = 1.0f / nsamples;1906di *= nsamples_inv;1907dx *= nsamples_inv;1908dy *= nsamples_inv;1909}19101911values[valpos] = di;1912if (chan > 1) {1913values[valpos + 1] = dx;1914}1915if (chan > 2) {1916values[valpos + 2] = dy;1917}1918valpos += chan;1919}1920}1921}19221923void MLDB_Full_Descriptor_Invoker::MLDB_Binary_Comparisons(float* values, unsigned char* desc,1924int count, int& dpos) const {1925int chan = options_->descriptor_channels;1926int* ivalues = (int*) values;1927for(int i = 0; i < count * chan; i++) {1928ivalues[i] = CV_TOGGLE_FLT(ivalues[i]);1929}19301931for(int pos = 0; pos < chan; pos++) {1932for (int i = 0; i < count; i++) {1933int ival = ivalues[chan * i + pos];1934for (int j = i + 1; j < count; j++) {1935if (ival > ivalues[chan * j + pos]) {1936desc[dpos >> 3] |= (1 << (dpos & 7));1937}1938dpos++;1939}1940}1941}1942}19431944/* ************************************************************************* */1945/**1946* @brief This method computes the descriptor of the provided keypoint given the1947* main orientation of the keypoint1948* @param kpt Input keypoint1949* @param desc Descriptor vector1950*/1951void MLDB_Full_Descriptor_Invoker::Get_MLDB_Full_Descriptor(const KeyPoint& kpt, unsigned char *desc, int desc_size) const {19521953const int max_channels = 3;1954CV_Assert(options_->descriptor_channels <= max_channels);1955const int pattern_size = options_->descriptor_pattern_size;19561957float values[16*max_channels];1958CV_Assert((pattern_size & 1) == 0);1959//const double size_mult[3] = {1, 2.0/3.0, 1.0/2.0};1960const int sample_step[3] = { // static_cast<int>(ceil(pattern_size * size_mult[lvl]))1961pattern_size,1962divUp(pattern_size * 2, 3),1963divUp(pattern_size, 2)1964};19651966float ratio = (float)(1 << kpt.octave);1967float scale = (float)cvRound(0.5f*kpt.size / ratio);1968float xf = kpt.pt.x / ratio;1969float yf = kpt.pt.y / ratio;1970float angle = kpt.angle * static_cast<float>(CV_PI / 180.f);1971float co = cos(angle);1972float si = sin(angle);19731974memset(desc, 0, desc_size);19751976int dpos = 0;1977for(int lvl = 0; lvl < 3; lvl++)1978{1979int val_count = (lvl + 2) * (lvl + 2);1980MLDB_Fill_Values(values, sample_step[lvl], kpt.class_id, xf, yf, co, si, scale);1981MLDB_Binary_Comparisons(values, desc, val_count, dpos);1982}19831984CV_Assert(dpos == 486);1985CV_Assert(divUp(dpos, 8) == desc_size);1986}19871988/* ************************************************************************* */1989/**1990* @brief This method computes the M-LDB descriptor of the provided keypoint given the1991* main orientation of the keypoint. The descriptor is computed based on a subset of1992* the bits of the whole descriptor1993* @param kpt Input keypoint1994* @param desc Descriptor vector1995*/1996void MLDB_Descriptor_Subset_Invoker::Get_MLDB_Descriptor_Subset(const KeyPoint& kpt, unsigned char *desc, int desc_size) const {19971998float rx = 0.f, ry = 0.f;1999float sample_x = 0.f, sample_y = 0.f;20002001const AKAZEOptions & options = *options_;2002const Pyramid& evolution = *evolution_;20032004// Get the information from the keypoint2005float ratio = (float)(1 << kpt.octave);2006int scale = cvRound(0.5f*kpt.size / ratio);2007float angle = kpt.angle * static_cast<float>(CV_PI / 180.f);2008const int level = kpt.class_id;2009const Mat Lx = evolution[level].Lx;2010const Mat Ly = evolution[level].Ly;2011const Mat Lt = evolution[level].Lt;2012float yf = kpt.pt.y / ratio;2013float xf = kpt.pt.x / ratio;2014float co = cos(angle);2015float si = sin(angle);20162017// Allocate memory for the matrix of values2018// Buffer for the M-LDB descriptor2019const int max_channels = 3;2020const int channels = options.descriptor_channels;2021CV_Assert(channels <= max_channels);2022float values[(4 + 9 + 16)*max_channels] = { 0 };20232024// Sample everything, but only do the comparisons2025const int pattern_size = options.descriptor_pattern_size;2026CV_Assert((pattern_size & 1) == 0);2027const int sample_steps[3] = {2028pattern_size,2029divUp(pattern_size * 2, 3),2030divUp(pattern_size, 2)2031};20322033for (int i = 0; i < descriptorSamples_.rows; i++) {2034const int *coords = descriptorSamples_.ptr<int>(i);2035CV_Assert(coords[0] >= 0 && coords[0] < 3);2036const int sample_step = sample_steps[coords[0]];2037float di = 0.f, dx = 0.f, dy = 0.f;20382039for (int k = coords[1]; k < coords[1] + sample_step; k++) {2040for (int l = coords[2]; l < coords[2] + sample_step; l++) {20412042// Get the coordinates of the sample point2043sample_y = yf + (l*scale*co + k*scale*si);2044sample_x = xf + (-l*scale*si + k*scale*co);20452046const int y1 = cvRound(sample_y);2047const int x1 = cvRound(sample_x);20482049if (x1 < 0 || y1 < 0 || x1 >= Lt.cols || y1 >= Lt.rows)2050continue; // Boundaries20512052di += Lt.at<float>(y1, x1);20532054if (options.descriptor_channels > 1) {2055rx = Lx.at<float>(y1, x1);2056ry = Ly.at<float>(y1, x1);20572058if (options.descriptor_channels == 2) {2059dx += sqrtf(rx*rx + ry*ry);2060}2061else if (options.descriptor_channels == 3) {2062// Get the x and y derivatives on the rotated axis2063dx += rx*co + ry*si;2064dy += -rx*si + ry*co;2065}2066}2067}2068}20692070float* pValues = &values[channels * i];2071pValues[0] = di;20722073if (channels == 2) {2074pValues[1] = dx;2075}2076else if (channels == 3) {2077pValues[1] = dx;2078pValues[2] = dy;2079}2080}20812082// Do the comparisons2083const int *comps = descriptorBits_.ptr<int>(0);20842085CV_Assert(divUp(descriptorBits_.rows, 8) == desc_size);2086memset(desc, 0, desc_size);20872088for (int i = 0; i<descriptorBits_.rows; i++) {2089if (values[comps[2 * i]] > values[comps[2 * i + 1]]) {2090desc[i / 8] |= (1 << (i % 8));2091}2092}2093}20942095/* ************************************************************************* */2096/**2097* @brief This method computes the upright (not rotation invariant) M-LDB descriptor2098* of the provided keypoint given the main orientation of the keypoint.2099* The descriptor is computed based on a subset of the bits of the whole descriptor2100* @param kpt Input keypoint2101* @param desc Descriptor vector2102*/2103void Upright_MLDB_Descriptor_Subset_Invoker::Get_Upright_MLDB_Descriptor_Subset(const KeyPoint& kpt, unsigned char *desc, int desc_size) const {21042105float di = 0.0f, dx = 0.0f, dy = 0.0f;2106float rx = 0.0f, ry = 0.0f;2107float sample_x = 0.0f, sample_y = 0.0f;2108int x1 = 0, y1 = 0;21092110const AKAZEOptions & options = *options_;2111const Pyramid& evolution = *evolution_;21122113// Get the information from the keypoint2114float ratio = (float)(1 << kpt.octave);2115int scale = cvRound(0.5f*kpt.size / ratio);2116const int level = kpt.class_id;2117const Mat Lx = evolution[level].Lx;2118const Mat Ly = evolution[level].Ly;2119const Mat Lt = evolution[level].Lt;2120float yf = kpt.pt.y / ratio;2121float xf = kpt.pt.x / ratio;21222123// Allocate memory for the matrix of values2124const int max_channels = 3;2125const int channels = options.descriptor_channels;2126CV_Assert(channels <= max_channels);2127float values[(4 + 9 + 16)*max_channels] = { 0 };21282129const int pattern_size = options.descriptor_pattern_size;2130CV_Assert((pattern_size & 1) == 0);2131const int sample_steps[3] = {2132pattern_size,2133divUp(pattern_size * 2, 3),2134divUp(pattern_size, 2)2135};21362137for (int i = 0; i < descriptorSamples_.rows; i++) {2138const int *coords = descriptorSamples_.ptr<int>(i);2139CV_Assert(coords[0] >= 0 && coords[0] < 3);2140int sample_step = sample_steps[coords[0]];2141di = 0.0f, dx = 0.0f, dy = 0.0f;21422143for (int k = coords[1]; k < coords[1] + sample_step; k++) {2144for (int l = coords[2]; l < coords[2] + sample_step; l++) {21452146// Get the coordinates of the sample point2147sample_y = yf + l*scale;2148sample_x = xf + k*scale;21492150y1 = cvRound(sample_y);2151x1 = cvRound(sample_x);21522153if (x1 < 0 || y1 < 0 || x1 >= Lt.cols || y1 >= Lt.rows)2154continue; // Boundaries21552156di += Lt.at<float>(y1, x1);21572158if (options.descriptor_channels > 1) {2159rx = Lx.at<float>(y1, x1);2160ry = Ly.at<float>(y1, x1);21612162if (options.descriptor_channels == 2) {2163dx += sqrtf(rx*rx + ry*ry);2164}2165else if (options.descriptor_channels == 3) {2166dx += rx;2167dy += ry;2168}2169}2170}2171}21722173float* pValues = &values[channels * i];2174pValues[0] = di;21752176if (options.descriptor_channels == 2) {2177pValues[1] = dx;2178}2179else if (options.descriptor_channels == 3) {2180pValues[1] = dx;2181pValues[2] = dy;2182}2183}21842185// Do the comparisons2186const int *comps = descriptorBits_.ptr<int>(0);21872188CV_Assert(divUp(descriptorBits_.rows, 8) == desc_size);2189memset(desc, 0, desc_size);21902191for (int i = 0; i<descriptorBits_.rows; i++) {2192if (values[comps[2 * i]] > values[comps[2 * i + 1]]) {2193desc[i / 8] |= (1 << (i % 8));2194}2195}2196}21972198/* ************************************************************************* */2199/**2200* @brief This function computes a (quasi-random) list of bits to be taken2201* from the full descriptor. To speed the extraction, the function creates2202* a list of the samples that are involved in generating at least a bit (sampleList)2203* and a list of the comparisons between those samples (comparisons)2204* @param sampleList2205* @param comparisons The matrix with the binary comparisons2206* @param nbits The number of bits of the descriptor2207* @param pattern_size The pattern size for the binary descriptor2208* @param nchannels Number of channels to consider in the descriptor (1-3)2209* @note The function keeps the 18 bits (3-channels by 6 comparisons) of the2210* coarser grid, since it provides the most robust estimations2211*/2212void generateDescriptorSubsample(Mat& sampleList, Mat& comparisons, int nbits,2213int pattern_size, int nchannels) {22142215int ssz = 0;2216for (int i = 0; i < 3; i++) {2217int gz = (i + 2)*(i + 2);2218ssz += gz*(gz - 1) / 2;2219}2220ssz *= nchannels;22212222CV_Assert(ssz == 162*nchannels);2223CV_Assert(nbits <= ssz && "Descriptor size can't be bigger than full descriptor (486 = 162*3 - 3 channels)");22242225// Since the full descriptor is usually under 10k elements, we pick2226// the selection from the full matrix. We take as many samples per2227// pick as the number of channels. For every pick, we2228// take the two samples involved and put them in the sampling list22292230Mat_<int> fullM(ssz / nchannels, 5);2231for (int i = 0, c = 0; i < 3; i++) {2232int gdiv = i + 2; //grid divisions, per row2233int gsz = gdiv*gdiv;2234int psz = divUp(2*pattern_size, gdiv);22352236for (int j = 0; j < gsz; j++) {2237for (int k = j + 1; k < gsz; k++, c++) {2238fullM(c, 0) = i;2239fullM(c, 1) = psz*(j % gdiv) - pattern_size;2240fullM(c, 2) = psz*(j / gdiv) - pattern_size;2241fullM(c, 3) = psz*(k % gdiv) - pattern_size;2242fullM(c, 4) = psz*(k / gdiv) - pattern_size;2243}2244}2245}22462247RNG rng(1024);2248const int npicks = divUp(nbits, nchannels);2249Mat_<int> comps = Mat_<int>(nchannels * npicks, 2);2250comps = 1000;22512252// Select some samples. A sample includes all channels2253int count = 0;2254Mat_<int> samples(29, 3);2255Mat_<int> fullcopy = fullM.clone();2256samples = -1;22572258for (int i = 0; i < npicks; i++) {2259int k = rng(fullM.rows - i);2260if (i < 6) {2261// Force use of the coarser grid values and comparisons2262k = i;2263}22642265bool n = true;22662267for (int j = 0; j < count; j++) {2268if (samples(j, 0) == fullcopy(k, 0) && samples(j, 1) == fullcopy(k, 1) && samples(j, 2) == fullcopy(k, 2)) {2269n = false;2270comps(i*nchannels, 0) = nchannels*j;2271comps(i*nchannels + 1, 0) = nchannels*j + 1;2272comps(i*nchannels + 2, 0) = nchannels*j + 2;2273break;2274}2275}22762277if (n) {2278samples(count, 0) = fullcopy(k, 0);2279samples(count, 1) = fullcopy(k, 1);2280samples(count, 2) = fullcopy(k, 2);2281comps(i*nchannels, 0) = nchannels*count;2282comps(i*nchannels + 1, 0) = nchannels*count + 1;2283comps(i*nchannels + 2, 0) = nchannels*count + 2;2284count++;2285}22862287n = true;2288for (int j = 0; j < count; j++) {2289if (samples(j, 0) == fullcopy(k, 0) && samples(j, 1) == fullcopy(k, 3) && samples(j, 2) == fullcopy(k, 4)) {2290n = false;2291comps(i*nchannels, 1) = nchannels*j;2292comps(i*nchannels + 1, 1) = nchannels*j + 1;2293comps(i*nchannels + 2, 1) = nchannels*j + 2;2294break;2295}2296}22972298if (n) {2299samples(count, 0) = fullcopy(k, 0);2300samples(count, 1) = fullcopy(k, 3);2301samples(count, 2) = fullcopy(k, 4);2302comps(i*nchannels, 1) = nchannels*count;2303comps(i*nchannels + 1, 1) = nchannels*count + 1;2304comps(i*nchannels + 2, 1) = nchannels*count + 2;2305count++;2306}23072308Mat tmp = fullcopy.row(k);2309fullcopy.row(fullcopy.rows - i - 1).copyTo(tmp);2310}23112312sampleList = samples.rowRange(0, count).clone();2313comparisons = comps.rowRange(0, nbits).clone();2314}23152316}231723182319