Path: blob/master/modules/features2d/include/opencv2/features2d.hpp
16339 views
/*M///////////////////////////////////////////////////////////////////////////////////////1//2// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.3//4// By downloading, copying, installing or using the software you agree to this license.5// If you do not agree to this license, do not download, install,6// copy or use the software.7//8//9// License Agreement10// For Open Source Computer Vision Library11//12// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.13// Copyright (C) 2009, Willow Garage Inc., all rights reserved.14// Third party copyrights are property of their respective owners.15//16// Redistribution and use in source and binary forms, with or without modification,17// are permitted provided that the following conditions are met:18//19// * Redistribution's of source code must retain the above copyright notice,20// this list of conditions and the following disclaimer.21//22// * Redistribution's in binary form must reproduce the above copyright notice,23// this list of conditions and the following disclaimer in the documentation24// and/or other materials provided with the distribution.25//26// * The name of the copyright holders may not be used to endorse or promote products27// derived from this software without specific prior written permission.28//29// This software is provided by the copyright holders and contributors "as is" and30// any express or implied warranties, including, but not limited to, the implied31// warranties of merchantability and fitness for a particular purpose are disclaimed.32// In no event shall the Intel Corporation or contributors be liable for any direct,33// indirect, incidental, special, exemplary, or consequential damages34// (including, but not limited to, procurement of substitute goods or services;35// loss of use, data, or profits; or business interruption) however caused36// and on any theory of liability, whether in contract, strict liability,37// or tort (including negligence or otherwise) arising in any way out of38// the use of this software, even if advised of the possibility of such damage.39//40//M*/4142#ifndef OPENCV_FEATURES_2D_HPP43#define OPENCV_FEATURES_2D_HPP4445#include "opencv2/opencv_modules.hpp"46#include "opencv2/core.hpp"4748#ifdef HAVE_OPENCV_FLANN49#include "opencv2/flann/miniflann.hpp"50#endif5152/**53@defgroup features2d 2D Features Framework54@{55@defgroup features2d_main Feature Detection and Description56@defgroup features2d_match Descriptor Matchers5758Matchers of keypoint descriptors in OpenCV have wrappers with a common interface that enables you to59easily switch between different algorithms solving the same problem. This section is devoted to60matching descriptors that are represented as vectors in a multidimensional space. All objects that61implement vector descriptor matchers inherit the DescriptorMatcher interface.6263@note64- An example explaining keypoint matching can be found at65opencv_source_code/samples/cpp/descriptor_extractor_matcher.cpp66- An example on descriptor matching evaluation can be found at67opencv_source_code/samples/cpp/detector_descriptor_matcher_evaluation.cpp68- An example on one to many image matching can be found at69opencv_source_code/samples/cpp/matching_to_many_images.cpp7071@defgroup features2d_draw Drawing Function of Keypoints and Matches72@defgroup features2d_category Object Categorization7374This section describes approaches based on local 2D features and used to categorize objects.7576@note77- A complete Bag-Of-Words sample can be found at78opencv_source_code/samples/cpp/bagofwords_classification.cpp79- (Python) An example using the features2D framework to perform object categorization can be80found at opencv_source_code/samples/python/find_obj.py8182@}83*/8485namespace cv86{8788//! @addtogroup features2d89//! @{9091// //! writes vector of keypoints to the file storage92// CV_EXPORTS void write(FileStorage& fs, const String& name, const std::vector<KeyPoint>& keypoints);93// //! reads vector of keypoints from the specified file storage node94// CV_EXPORTS void read(const FileNode& node, CV_OUT std::vector<KeyPoint>& keypoints);9596/** @brief A class filters a vector of keypoints.9798Because now it is difficult to provide a convenient interface for all usage scenarios of the99keypoints filter class, it has only several needed by now static methods.100*/101class CV_EXPORTS KeyPointsFilter102{103public:104KeyPointsFilter(){}105106/*107* Remove keypoints within borderPixels of an image edge.108*/109static void runByImageBorder( std::vector<KeyPoint>& keypoints, Size imageSize, int borderSize );110/*111* Remove keypoints of sizes out of range.112*/113static void runByKeypointSize( std::vector<KeyPoint>& keypoints, float minSize,114float maxSize=FLT_MAX );115/*116* Remove keypoints from some image by mask for pixels of this image.117*/118static void runByPixelsMask( std::vector<KeyPoint>& keypoints, const Mat& mask );119/*120* Remove duplicated keypoints.121*/122static void removeDuplicated( std::vector<KeyPoint>& keypoints );123/*124* Remove duplicated keypoints and sort the remaining keypoints125*/126static void removeDuplicatedSorted( std::vector<KeyPoint>& keypoints );127128/*129* Retain the specified number of the best keypoints (according to the response)130*/131static void retainBest( std::vector<KeyPoint>& keypoints, int npoints );132};133134135/************************************ Base Classes ************************************/136137/** @brief Abstract base class for 2D image feature detectors and descriptor extractors138*/139#ifdef __EMSCRIPTEN__140class CV_EXPORTS_W Feature2D : public Algorithm141#else142class CV_EXPORTS_W Feature2D : public virtual Algorithm143#endif144{145public:146virtual ~Feature2D();147148/** @brief Detects keypoints in an image (first variant) or image set (second variant).149150@param image Image.151@param keypoints The detected keypoints. In the second variant of the method keypoints[i] is a set152of keypoints detected in images[i] .153@param mask Mask specifying where to look for keypoints (optional). It must be a 8-bit integer154matrix with non-zero values in the region of interest.155*/156CV_WRAP virtual void detect( InputArray image,157CV_OUT std::vector<KeyPoint>& keypoints,158InputArray mask=noArray() );159160/** @overload161@param images Image set.162@param keypoints The detected keypoints. In the second variant of the method keypoints[i] is a set163of keypoints detected in images[i] .164@param masks Masks for each input image specifying where to look for keypoints (optional).165masks[i] is a mask for images[i].166*/167CV_WRAP virtual void detect( InputArrayOfArrays images,168CV_OUT std::vector<std::vector<KeyPoint> >& keypoints,169InputArrayOfArrays masks=noArray() );170171/** @brief Computes the descriptors for a set of keypoints detected in an image (first variant) or image set172(second variant).173174@param image Image.175@param keypoints Input collection of keypoints. Keypoints for which a descriptor cannot be176computed are removed. Sometimes new keypoints can be added, for example: SIFT duplicates keypoint177with several dominant orientations (for each orientation).178@param descriptors Computed descriptors. In the second variant of the method descriptors[i] are179descriptors computed for a keypoints[i]. Row j is the keypoints (or keypoints[i]) is the180descriptor for keypoint j-th keypoint.181*/182CV_WRAP virtual void compute( InputArray image,183CV_OUT CV_IN_OUT std::vector<KeyPoint>& keypoints,184OutputArray descriptors );185186/** @overload187188@param images Image set.189@param keypoints Input collection of keypoints. Keypoints for which a descriptor cannot be190computed are removed. Sometimes new keypoints can be added, for example: SIFT duplicates keypoint191with several dominant orientations (for each orientation).192@param descriptors Computed descriptors. In the second variant of the method descriptors[i] are193descriptors computed for a keypoints[i]. Row j is the keypoints (or keypoints[i]) is the194descriptor for keypoint j-th keypoint.195*/196CV_WRAP virtual void compute( InputArrayOfArrays images,197CV_OUT CV_IN_OUT std::vector<std::vector<KeyPoint> >& keypoints,198OutputArrayOfArrays descriptors );199200/** Detects keypoints and computes the descriptors */201CV_WRAP virtual void detectAndCompute( InputArray image, InputArray mask,202CV_OUT std::vector<KeyPoint>& keypoints,203OutputArray descriptors,204bool useProvidedKeypoints=false );205206CV_WRAP virtual int descriptorSize() const;207CV_WRAP virtual int descriptorType() const;208CV_WRAP virtual int defaultNorm() const;209210CV_WRAP void write( const String& fileName ) const;211212CV_WRAP void read( const String& fileName );213214virtual void write( FileStorage&) const CV_OVERRIDE;215216// see corresponding cv::Algorithm method217CV_WRAP virtual void read( const FileNode&) CV_OVERRIDE;218219//! Return true if detector object is empty220CV_WRAP virtual bool empty() const CV_OVERRIDE;221CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;222223// see corresponding cv::Algorithm method224CV_WRAP inline void write(const Ptr<FileStorage>& fs, const String& name = String()) const { Algorithm::write(fs, name); }225};226227/** Feature detectors in OpenCV have wrappers with a common interface that enables you to easily switch228between different algorithms solving the same problem. All objects that implement keypoint detectors229inherit the FeatureDetector interface. */230typedef Feature2D FeatureDetector;231232/** Extractors of keypoint descriptors in OpenCV have wrappers with a common interface that enables you233to easily switch between different algorithms solving the same problem. This section is devoted to234computing descriptors represented as vectors in a multidimensional space. All objects that implement235the vector descriptor extractors inherit the DescriptorExtractor interface.236*/237typedef Feature2D DescriptorExtractor;238239//! @addtogroup features2d_main240//! @{241242/** @brief Class implementing the BRISK keypoint detector and descriptor extractor, described in @cite LCS11 .243*/244class CV_EXPORTS_W BRISK : public Feature2D245{246public:247/** @brief The BRISK constructor248249@param thresh AGAST detection threshold score.250@param octaves detection octaves. Use 0 to do single scale.251@param patternScale apply this scale to the pattern used for sampling the neighbourhood of a252keypoint.253*/254CV_WRAP static Ptr<BRISK> create(int thresh=30, int octaves=3, float patternScale=1.0f);255256/** @brief The BRISK constructor for a custom pattern257258@param radiusList defines the radii (in pixels) where the samples around a keypoint are taken (for259keypoint scale 1).260@param numberList defines the number of sampling points on the sampling circle. Must be the same261size as radiusList..262@param dMax threshold for the short pairings used for descriptor formation (in pixels for keypoint263scale 1).264@param dMin threshold for the long pairings used for orientation determination (in pixels for265keypoint scale 1).266@param indexChange index remapping of the bits. */267CV_WRAP static Ptr<BRISK> create(const std::vector<float> &radiusList, const std::vector<int> &numberList,268float dMax=5.85f, float dMin=8.2f, const std::vector<int>& indexChange=std::vector<int>());269270/** @brief The BRISK constructor for a custom pattern, detection threshold and octaves271272@param thresh AGAST detection threshold score.273@param octaves detection octaves. Use 0 to do single scale.274@param radiusList defines the radii (in pixels) where the samples around a keypoint are taken (for275keypoint scale 1).276@param numberList defines the number of sampling points on the sampling circle. Must be the same277size as radiusList..278@param dMax threshold for the short pairings used for descriptor formation (in pixels for keypoint279scale 1).280@param dMin threshold for the long pairings used for orientation determination (in pixels for281keypoint scale 1).282@param indexChange index remapping of the bits. */283CV_WRAP static Ptr<BRISK> create(int thresh, int octaves, const std::vector<float> &radiusList,284const std::vector<int> &numberList, float dMax=5.85f, float dMin=8.2f,285const std::vector<int>& indexChange=std::vector<int>());286CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;287};288289/** @brief Class implementing the ORB (*oriented BRIEF*) keypoint detector and descriptor extractor290291described in @cite RRKB11 . The algorithm uses FAST in pyramids to detect stable keypoints, selects292the strongest features using FAST or Harris response, finds their orientation using first-order293moments and computes the descriptors using BRIEF (where the coordinates of random point pairs (or294k-tuples) are rotated according to the measured orientation).295*/296class CV_EXPORTS_W ORB : public Feature2D297{298public:299enum ScoreType { HARRIS_SCORE=0, FAST_SCORE=1 };300static const int kBytes = 32;301302/** @brief The ORB constructor303304@param nfeatures The maximum number of features to retain.305@param scaleFactor Pyramid decimation ratio, greater than 1. scaleFactor==2 means the classical306pyramid, where each next level has 4x less pixels than the previous, but such a big scale factor307will degrade feature matching scores dramatically. On the other hand, too close to 1 scale factor308will mean that to cover certain scale range you will need more pyramid levels and so the speed309will suffer.310@param nlevels The number of pyramid levels. The smallest level will have linear size equal to311input_image_linear_size/pow(scaleFactor, nlevels - firstLevel).312@param edgeThreshold This is size of the border where the features are not detected. It should313roughly match the patchSize parameter.314@param firstLevel The level of pyramid to put source image to. Previous layers are filled315with upscaled source image.316@param WTA_K The number of points that produce each element of the oriented BRIEF descriptor. The317default value 2 means the BRIEF where we take a random point pair and compare their brightnesses,318so we get 0/1 response. Other possible values are 3 and 4. For example, 3 means that we take 3319random points (of course, those point coordinates are random, but they are generated from the320pre-defined seed, so each element of BRIEF descriptor is computed deterministically from the pixel321rectangle), find point of maximum brightness and output index of the winner (0, 1 or 2). Such322output will occupy 2 bits, and therefore it will need a special variant of Hamming distance,323denoted as NORM_HAMMING2 (2 bits per bin). When WTA_K=4, we take 4 random points to compute each324bin (that will also occupy 2 bits with possible values 0, 1, 2 or 3).325@param scoreType The default HARRIS_SCORE means that Harris algorithm is used to rank features326(the score is written to KeyPoint::score and is used to retain best nfeatures features);327FAST_SCORE is alternative value of the parameter that produces slightly less stable keypoints,328but it is a little faster to compute.329@param patchSize size of the patch used by the oriented BRIEF descriptor. Of course, on smaller330pyramid layers the perceived image area covered by a feature will be larger.331@param fastThreshold332*/333CV_WRAP static Ptr<ORB> create(int nfeatures=500, float scaleFactor=1.2f, int nlevels=8, int edgeThreshold=31,334int firstLevel=0, int WTA_K=2, ORB::ScoreType scoreType=ORB::HARRIS_SCORE, int patchSize=31, int fastThreshold=20);335336CV_WRAP virtual void setMaxFeatures(int maxFeatures) = 0;337CV_WRAP virtual int getMaxFeatures() const = 0;338339CV_WRAP virtual void setScaleFactor(double scaleFactor) = 0;340CV_WRAP virtual double getScaleFactor() const = 0;341342CV_WRAP virtual void setNLevels(int nlevels) = 0;343CV_WRAP virtual int getNLevels() const = 0;344345CV_WRAP virtual void setEdgeThreshold(int edgeThreshold) = 0;346CV_WRAP virtual int getEdgeThreshold() const = 0;347348CV_WRAP virtual void setFirstLevel(int firstLevel) = 0;349CV_WRAP virtual int getFirstLevel() const = 0;350351CV_WRAP virtual void setWTA_K(int wta_k) = 0;352CV_WRAP virtual int getWTA_K() const = 0;353354CV_WRAP virtual void setScoreType(ORB::ScoreType scoreType) = 0;355CV_WRAP virtual ORB::ScoreType getScoreType() const = 0;356357CV_WRAP virtual void setPatchSize(int patchSize) = 0;358CV_WRAP virtual int getPatchSize() const = 0;359360CV_WRAP virtual void setFastThreshold(int fastThreshold) = 0;361CV_WRAP virtual int getFastThreshold() const = 0;362CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;363};364365/** @brief Maximally stable extremal region extractor366367The class encapsulates all the parameters of the %MSER extraction algorithm (see [wiki368article](http://en.wikipedia.org/wiki/Maximally_stable_extremal_regions)).369370- there are two different implementation of %MSER: one for grey image, one for color image371372- the grey image algorithm is taken from: @cite nister2008linear ; the paper claims to be faster373than union-find method; it actually get 1.5~2m/s on my centrino L7200 1.2GHz laptop.374375- the color image algorithm is taken from: @cite forssen2007maximally ; it should be much slower376than grey image method ( 3~4 times ); the chi_table.h file is taken directly from paper's source377code which is distributed under GPL.378379- (Python) A complete example showing the use of the %MSER detector can be found at samples/python/mser.py380*/381class CV_EXPORTS_W MSER : public Feature2D382{383public:384/** @brief Full consturctor for %MSER detector385386@param _delta it compares \f$(size_{i}-size_{i-delta})/size_{i-delta}\f$387@param _min_area prune the area which smaller than minArea388@param _max_area prune the area which bigger than maxArea389@param _max_variation prune the area have similar size to its children390@param _min_diversity for color image, trace back to cut off mser with diversity less than min_diversity391@param _max_evolution for color image, the evolution steps392@param _area_threshold for color image, the area threshold to cause re-initialize393@param _min_margin for color image, ignore too small margin394@param _edge_blur_size for color image, the aperture size for edge blur395*/396CV_WRAP static Ptr<MSER> create( int _delta=5, int _min_area=60, int _max_area=14400,397double _max_variation=0.25, double _min_diversity=.2,398int _max_evolution=200, double _area_threshold=1.01,399double _min_margin=0.003, int _edge_blur_size=5 );400401/** @brief Detect %MSER regions402403@param image input image (8UC1, 8UC3 or 8UC4, must be greater or equal than 3x3)404@param msers resulting list of point sets405@param bboxes resulting bounding boxes406*/407CV_WRAP virtual void detectRegions( InputArray image,408CV_OUT std::vector<std::vector<Point> >& msers,409CV_OUT std::vector<Rect>& bboxes ) = 0;410411CV_WRAP virtual void setDelta(int delta) = 0;412CV_WRAP virtual int getDelta() const = 0;413414CV_WRAP virtual void setMinArea(int minArea) = 0;415CV_WRAP virtual int getMinArea() const = 0;416417CV_WRAP virtual void setMaxArea(int maxArea) = 0;418CV_WRAP virtual int getMaxArea() const = 0;419420CV_WRAP virtual void setPass2Only(bool f) = 0;421CV_WRAP virtual bool getPass2Only() const = 0;422CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;423};424425//! @} features2d_main426427//! @addtogroup features2d_main428//! @{429430/** @brief Wrapping class for feature detection using the FAST method. :431*/432class CV_EXPORTS_W FastFeatureDetector : public Feature2D433{434public:435enum DetectorType436{437TYPE_5_8 = 0, TYPE_7_12 = 1, TYPE_9_16 = 2438};439enum440{441THRESHOLD = 10000, NONMAX_SUPPRESSION=10001, FAST_N=10002442};443444445CV_WRAP static Ptr<FastFeatureDetector> create( int threshold=10,446bool nonmaxSuppression=true,447FastFeatureDetector::DetectorType type=FastFeatureDetector::TYPE_9_16 );448449CV_WRAP virtual void setThreshold(int threshold) = 0;450CV_WRAP virtual int getThreshold() const = 0;451452CV_WRAP virtual void setNonmaxSuppression(bool f) = 0;453CV_WRAP virtual bool getNonmaxSuppression() const = 0;454455CV_WRAP virtual void setType(FastFeatureDetector::DetectorType type) = 0;456CV_WRAP virtual FastFeatureDetector::DetectorType getType() const = 0;457CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;458};459460/** @overload */461CV_EXPORTS void FAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints,462int threshold, bool nonmaxSuppression=true );463464/** @brief Detects corners using the FAST algorithm465466@param image grayscale image where keypoints (corners) are detected.467@param keypoints keypoints detected on the image.468@param threshold threshold on difference between intensity of the central pixel and pixels of a469circle around this pixel.470@param nonmaxSuppression if true, non-maximum suppression is applied to detected corners471(keypoints).472@param type one of the three neighborhoods as defined in the paper:473FastFeatureDetector::TYPE_9_16, FastFeatureDetector::TYPE_7_12,474FastFeatureDetector::TYPE_5_8475476Detects corners using the FAST algorithm by @cite Rosten06 .477478@note In Python API, types are given as cv2.FAST_FEATURE_DETECTOR_TYPE_5_8,479cv2.FAST_FEATURE_DETECTOR_TYPE_7_12 and cv2.FAST_FEATURE_DETECTOR_TYPE_9_16. For corner480detection, use cv2.FAST.detect() method.481*/482CV_EXPORTS void FAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints,483int threshold, bool nonmaxSuppression, FastFeatureDetector::DetectorType type );484485//! @} features2d_main486487//! @addtogroup features2d_main488//! @{489490/** @brief Wrapping class for feature detection using the AGAST method. :491*/492class CV_EXPORTS_W AgastFeatureDetector : public Feature2D493{494public:495enum DetectorType496{497AGAST_5_8 = 0, AGAST_7_12d = 1, AGAST_7_12s = 2, OAST_9_16 = 3,498};499500enum501{502THRESHOLD = 10000, NONMAX_SUPPRESSION = 10001,503};504505CV_WRAP static Ptr<AgastFeatureDetector> create( int threshold=10,506bool nonmaxSuppression=true,507AgastFeatureDetector::DetectorType type = AgastFeatureDetector::OAST_9_16);508509CV_WRAP virtual void setThreshold(int threshold) = 0;510CV_WRAP virtual int getThreshold() const = 0;511512CV_WRAP virtual void setNonmaxSuppression(bool f) = 0;513CV_WRAP virtual bool getNonmaxSuppression() const = 0;514515CV_WRAP virtual void setType(AgastFeatureDetector::DetectorType type) = 0;516CV_WRAP virtual AgastFeatureDetector::DetectorType getType() const = 0;517CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;518};519520/** @overload */521CV_EXPORTS void AGAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints,522int threshold, bool nonmaxSuppression=true );523524/** @brief Detects corners using the AGAST algorithm525526@param image grayscale image where keypoints (corners) are detected.527@param keypoints keypoints detected on the image.528@param threshold threshold on difference between intensity of the central pixel and pixels of a529circle around this pixel.530@param nonmaxSuppression if true, non-maximum suppression is applied to detected corners531(keypoints).532@param type one of the four neighborhoods as defined in the paper:533AgastFeatureDetector::AGAST_5_8, AgastFeatureDetector::AGAST_7_12d,534AgastFeatureDetector::AGAST_7_12s, AgastFeatureDetector::OAST_9_16535536For non-Intel platforms, there is a tree optimised variant of AGAST with same numerical results.537The 32-bit binary tree tables were generated automatically from original code using perl script.538The perl script and examples of tree generation are placed in features2d/doc folder.539Detects corners using the AGAST algorithm by @cite mair2010_agast .540541*/542CV_EXPORTS void AGAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints,543int threshold, bool nonmaxSuppression, AgastFeatureDetector::DetectorType type );544545/** @brief Wrapping class for feature detection using the goodFeaturesToTrack function. :546*/547class CV_EXPORTS_W GFTTDetector : public Feature2D548{549public:550CV_WRAP static Ptr<GFTTDetector> create( int maxCorners=1000, double qualityLevel=0.01, double minDistance=1,551int blockSize=3, bool useHarrisDetector=false, double k=0.04 );552CV_WRAP static Ptr<GFTTDetector> create( int maxCorners, double qualityLevel, double minDistance,553int blockSize, int gradiantSize, bool useHarrisDetector=false, double k=0.04 );554CV_WRAP virtual void setMaxFeatures(int maxFeatures) = 0;555CV_WRAP virtual int getMaxFeatures() const = 0;556557CV_WRAP virtual void setQualityLevel(double qlevel) = 0;558CV_WRAP virtual double getQualityLevel() const = 0;559560CV_WRAP virtual void setMinDistance(double minDistance) = 0;561CV_WRAP virtual double getMinDistance() const = 0;562563CV_WRAP virtual void setBlockSize(int blockSize) = 0;564CV_WRAP virtual int getBlockSize() const = 0;565566CV_WRAP virtual void setHarrisDetector(bool val) = 0;567CV_WRAP virtual bool getHarrisDetector() const = 0;568569CV_WRAP virtual void setK(double k) = 0;570CV_WRAP virtual double getK() const = 0;571CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;572};573574/** @brief Class for extracting blobs from an image. :575576The class implements a simple algorithm for extracting blobs from an image:5775781. Convert the source image to binary images by applying thresholding with several thresholds from579minThreshold (inclusive) to maxThreshold (exclusive) with distance thresholdStep between580neighboring thresholds.5812. Extract connected components from every binary image by findContours and calculate their582centers.5833. Group centers from several binary images by their coordinates. Close centers form one group that584corresponds to one blob, which is controlled by the minDistBetweenBlobs parameter.5854. From the groups, estimate final centers of blobs and their radiuses and return as locations and586sizes of keypoints.587588This class performs several filtrations of returned blobs. You should set filterBy\* to true/false589to turn on/off corresponding filtration. Available filtrations:590591- **By color**. This filter compares the intensity of a binary image at the center of a blob to592blobColor. If they differ, the blob is filtered out. Use blobColor = 0 to extract dark blobs593and blobColor = 255 to extract light blobs.594- **By area**. Extracted blobs have an area between minArea (inclusive) and maxArea (exclusive).595- **By circularity**. Extracted blobs have circularity596(\f$\frac{4*\pi*Area}{perimeter * perimeter}\f$) between minCircularity (inclusive) and597maxCircularity (exclusive).598- **By ratio of the minimum inertia to maximum inertia**. Extracted blobs have this ratio599between minInertiaRatio (inclusive) and maxInertiaRatio (exclusive).600- **By convexity**. Extracted blobs have convexity (area / area of blob convex hull) between601minConvexity (inclusive) and maxConvexity (exclusive).602603Default values of parameters are tuned to extract dark circular blobs.604*/605class CV_EXPORTS_W SimpleBlobDetector : public Feature2D606{607public:608struct CV_EXPORTS_W_SIMPLE Params609{610CV_WRAP Params();611CV_PROP_RW float thresholdStep;612CV_PROP_RW float minThreshold;613CV_PROP_RW float maxThreshold;614CV_PROP_RW size_t minRepeatability;615CV_PROP_RW float minDistBetweenBlobs;616617CV_PROP_RW bool filterByColor;618CV_PROP_RW uchar blobColor;619620CV_PROP_RW bool filterByArea;621CV_PROP_RW float minArea, maxArea;622623CV_PROP_RW bool filterByCircularity;624CV_PROP_RW float minCircularity, maxCircularity;625626CV_PROP_RW bool filterByInertia;627CV_PROP_RW float minInertiaRatio, maxInertiaRatio;628629CV_PROP_RW bool filterByConvexity;630CV_PROP_RW float minConvexity, maxConvexity;631632void read( const FileNode& fn );633void write( FileStorage& fs ) const;634};635636CV_WRAP static Ptr<SimpleBlobDetector>637create(const SimpleBlobDetector::Params ¶meters = SimpleBlobDetector::Params());638CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;639};640641//! @} features2d_main642643//! @addtogroup features2d_main644//! @{645646/** @brief Class implementing the KAZE keypoint detector and descriptor extractor, described in @cite ABD12 .647648@note AKAZE descriptor can only be used with KAZE or AKAZE keypoints .. [ABD12] KAZE Features. Pablo649F. Alcantarilla, Adrien Bartoli and Andrew J. Davison. In European Conference on Computer Vision650(ECCV), Fiorenze, Italy, October 2012.651*/652class CV_EXPORTS_W KAZE : public Feature2D653{654public:655enum DiffusivityType656{657DIFF_PM_G1 = 0,658DIFF_PM_G2 = 1,659DIFF_WEICKERT = 2,660DIFF_CHARBONNIER = 3661};662663/** @brief The KAZE constructor664665@param extended Set to enable extraction of extended (128-byte) descriptor.666@param upright Set to enable use of upright descriptors (non rotation-invariant).667@param threshold Detector response threshold to accept point668@param nOctaves Maximum octave evolution of the image669@param nOctaveLayers Default number of sublevels per scale level670@param diffusivity Diffusivity type. DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT or671DIFF_CHARBONNIER672*/673CV_WRAP static Ptr<KAZE> create(bool extended=false, bool upright=false,674float threshold = 0.001f,675int nOctaves = 4, int nOctaveLayers = 4,676KAZE::DiffusivityType diffusivity = KAZE::DIFF_PM_G2);677678CV_WRAP virtual void setExtended(bool extended) = 0;679CV_WRAP virtual bool getExtended() const = 0;680681CV_WRAP virtual void setUpright(bool upright) = 0;682CV_WRAP virtual bool getUpright() const = 0;683684CV_WRAP virtual void setThreshold(double threshold) = 0;685CV_WRAP virtual double getThreshold() const = 0;686687CV_WRAP virtual void setNOctaves(int octaves) = 0;688CV_WRAP virtual int getNOctaves() const = 0;689690CV_WRAP virtual void setNOctaveLayers(int octaveLayers) = 0;691CV_WRAP virtual int getNOctaveLayers() const = 0;692693CV_WRAP virtual void setDiffusivity(KAZE::DiffusivityType diff) = 0;694CV_WRAP virtual KAZE::DiffusivityType getDiffusivity() const = 0;695CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;696};697698/** @brief Class implementing the AKAZE keypoint detector and descriptor extractor, described in @cite ANB13.699700@details AKAZE descriptors can only be used with KAZE or AKAZE keypoints. This class is thread-safe.701702@note When you need descriptors use Feature2D::detectAndCompute, which703provides better performance. When using Feature2D::detect followed by704Feature2D::compute scale space pyramid is computed twice.705706@note AKAZE implements T-API. When image is passed as UMat some parts of the algorithm707will use OpenCL.708709@note [ANB13] Fast Explicit Diffusion for Accelerated Features in Nonlinear710Scale Spaces. Pablo F. Alcantarilla, Jesús Nuevo and Adrien Bartoli. In711British Machine Vision Conference (BMVC), Bristol, UK, September 2013.712713*/714class CV_EXPORTS_W AKAZE : public Feature2D715{716public:717// AKAZE descriptor type718enum DescriptorType719{720DESCRIPTOR_KAZE_UPRIGHT = 2, ///< Upright descriptors, not invariant to rotation721DESCRIPTOR_KAZE = 3,722DESCRIPTOR_MLDB_UPRIGHT = 4, ///< Upright descriptors, not invariant to rotation723DESCRIPTOR_MLDB = 5724};725726/** @brief The AKAZE constructor727728@param descriptor_type Type of the extracted descriptor: DESCRIPTOR_KAZE,729DESCRIPTOR_KAZE_UPRIGHT, DESCRIPTOR_MLDB or DESCRIPTOR_MLDB_UPRIGHT.730@param descriptor_size Size of the descriptor in bits. 0 -\> Full size731@param descriptor_channels Number of channels in the descriptor (1, 2, 3)732@param threshold Detector response threshold to accept point733@param nOctaves Maximum octave evolution of the image734@param nOctaveLayers Default number of sublevels per scale level735@param diffusivity Diffusivity type. DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT or736DIFF_CHARBONNIER737*/738CV_WRAP static Ptr<AKAZE> create(AKAZE::DescriptorType descriptor_type = AKAZE::DESCRIPTOR_MLDB,739int descriptor_size = 0, int descriptor_channels = 3,740float threshold = 0.001f, int nOctaves = 4,741int nOctaveLayers = 4, KAZE::DiffusivityType diffusivity = KAZE::DIFF_PM_G2);742743CV_WRAP virtual void setDescriptorType(AKAZE::DescriptorType dtype) = 0;744CV_WRAP virtual AKAZE::DescriptorType getDescriptorType() const = 0;745746CV_WRAP virtual void setDescriptorSize(int dsize) = 0;747CV_WRAP virtual int getDescriptorSize() const = 0;748749CV_WRAP virtual void setDescriptorChannels(int dch) = 0;750CV_WRAP virtual int getDescriptorChannels() const = 0;751752CV_WRAP virtual void setThreshold(double threshold) = 0;753CV_WRAP virtual double getThreshold() const = 0;754755CV_WRAP virtual void setNOctaves(int octaves) = 0;756CV_WRAP virtual int getNOctaves() const = 0;757758CV_WRAP virtual void setNOctaveLayers(int octaveLayers) = 0;759CV_WRAP virtual int getNOctaveLayers() const = 0;760761CV_WRAP virtual void setDiffusivity(KAZE::DiffusivityType diff) = 0;762CV_WRAP virtual KAZE::DiffusivityType getDiffusivity() const = 0;763CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;764};765766//! @} features2d_main767768/****************************************************************************************\769* Distance *770\****************************************************************************************/771772template<typename T>773struct CV_EXPORTS Accumulator774{775typedef T Type;776};777778template<> struct Accumulator<unsigned char> { typedef float Type; };779template<> struct Accumulator<unsigned short> { typedef float Type; };780template<> struct Accumulator<char> { typedef float Type; };781template<> struct Accumulator<short> { typedef float Type; };782783/*784* Squared Euclidean distance functor785*/786template<class T>787struct CV_EXPORTS SL2788{789static const NormTypes normType = NORM_L2SQR;790typedef T ValueType;791typedef typename Accumulator<T>::Type ResultType;792793ResultType operator()( const T* a, const T* b, int size ) const794{795return normL2Sqr<ValueType, ResultType>(a, b, size);796}797};798799/*800* Euclidean distance functor801*/802template<class T>803struct L2804{805static const NormTypes normType = NORM_L2;806typedef T ValueType;807typedef typename Accumulator<T>::Type ResultType;808809ResultType operator()( const T* a, const T* b, int size ) const810{811return (ResultType)std::sqrt((double)normL2Sqr<ValueType, ResultType>(a, b, size));812}813};814815/*816* Manhattan distance (city block distance) functor817*/818template<class T>819struct L1820{821static const NormTypes normType = NORM_L1;822typedef T ValueType;823typedef typename Accumulator<T>::Type ResultType;824825ResultType operator()( const T* a, const T* b, int size ) const826{827return normL1<ValueType, ResultType>(a, b, size);828}829};830831/****************************************************************************************\832* DescriptorMatcher *833\****************************************************************************************/834835//! @addtogroup features2d_match836//! @{837838/** @brief Abstract base class for matching keypoint descriptors.839840It has two groups of match methods: for matching descriptors of an image with another image or with841an image set.842*/843class CV_EXPORTS_W DescriptorMatcher : public Algorithm844{845public:846enum MatcherType847{848FLANNBASED = 1,849BRUTEFORCE = 2,850BRUTEFORCE_L1 = 3,851BRUTEFORCE_HAMMING = 4,852BRUTEFORCE_HAMMINGLUT = 5,853BRUTEFORCE_SL2 = 6854};855856virtual ~DescriptorMatcher();857858/** @brief Adds descriptors to train a CPU(trainDescCollectionis) or GPU(utrainDescCollectionis) descriptor859collection.860861If the collection is not empty, the new descriptors are added to existing train descriptors.862863@param descriptors Descriptors to add. Each descriptors[i] is a set of descriptors from the same864train image.865*/866CV_WRAP virtual void add( InputArrayOfArrays descriptors );867868/** @brief Returns a constant link to the train descriptor collection trainDescCollection .869*/870CV_WRAP const std::vector<Mat>& getTrainDescriptors() const;871872/** @brief Clears the train descriptor collections.873*/874CV_WRAP virtual void clear() CV_OVERRIDE;875876/** @brief Returns true if there are no train descriptors in the both collections.877*/878CV_WRAP virtual bool empty() const CV_OVERRIDE;879880/** @brief Returns true if the descriptor matcher supports masking permissible matches.881*/882CV_WRAP virtual bool isMaskSupported() const = 0;883884/** @brief Trains a descriptor matcher885886Trains a descriptor matcher (for example, the flann index). In all methods to match, the method887train() is run every time before matching. Some descriptor matchers (for example, BruteForceMatcher)888have an empty implementation of this method. Other matchers really train their inner structures (for889example, FlannBasedMatcher trains flann::Index ).890*/891CV_WRAP virtual void train();892893/** @brief Finds the best match for each descriptor from a query set.894895@param queryDescriptors Query set of descriptors.896@param trainDescriptors Train set of descriptors. This set is not added to the train descriptors897collection stored in the class object.898@param matches Matches. If a query descriptor is masked out in mask , no match is added for this899descriptor. So, matches size may be smaller than the query descriptors count.900@param mask Mask specifying permissible matches between an input query and train matrices of901descriptors.902903In the first variant of this method, the train descriptors are passed as an input argument. In the904second variant of the method, train descriptors collection that was set by DescriptorMatcher::add is905used. Optional mask (or masks) can be passed to specify which query and training descriptors can be906matched. Namely, queryDescriptors[i] can be matched with trainDescriptors[j] only if907mask.at\<uchar\>(i,j) is non-zero.908*/909CV_WRAP void match( InputArray queryDescriptors, InputArray trainDescriptors,910CV_OUT std::vector<DMatch>& matches, InputArray mask=noArray() ) const;911912/** @brief Finds the k best matches for each descriptor from a query set.913914@param queryDescriptors Query set of descriptors.915@param trainDescriptors Train set of descriptors. This set is not added to the train descriptors916collection stored in the class object.917@param mask Mask specifying permissible matches between an input query and train matrices of918descriptors.919@param matches Matches. Each matches[i] is k or less matches for the same query descriptor.920@param k Count of best matches found per each query descriptor or less if a query descriptor has921less than k possible matches in total.922@param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is923false, the matches vector has the same size as queryDescriptors rows. If compactResult is true,924the matches vector does not contain matches for fully masked-out query descriptors.925926These extended variants of DescriptorMatcher::match methods find several best matches for each query927descriptor. The matches are returned in the distance increasing order. See DescriptorMatcher::match928for the details about query and train descriptors.929*/930CV_WRAP void knnMatch( InputArray queryDescriptors, InputArray trainDescriptors,931CV_OUT std::vector<std::vector<DMatch> >& matches, int k,932InputArray mask=noArray(), bool compactResult=false ) const;933934/** @brief For each query descriptor, finds the training descriptors not farther than the specified distance.935936@param queryDescriptors Query set of descriptors.937@param trainDescriptors Train set of descriptors. This set is not added to the train descriptors938collection stored in the class object.939@param matches Found matches.940@param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is941false, the matches vector has the same size as queryDescriptors rows. If compactResult is true,942the matches vector does not contain matches for fully masked-out query descriptors.943@param maxDistance Threshold for the distance between matched descriptors. Distance means here944metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured945in Pixels)!946@param mask Mask specifying permissible matches between an input query and train matrices of947descriptors.948949For each query descriptor, the methods find such training descriptors that the distance between the950query descriptor and the training descriptor is equal or smaller than maxDistance. Found matches are951returned in the distance increasing order.952*/953CV_WRAP void radiusMatch( InputArray queryDescriptors, InputArray trainDescriptors,954CV_OUT std::vector<std::vector<DMatch> >& matches, float maxDistance,955InputArray mask=noArray(), bool compactResult=false ) const;956957/** @overload958@param queryDescriptors Query set of descriptors.959@param matches Matches. If a query descriptor is masked out in mask , no match is added for this960descriptor. So, matches size may be smaller than the query descriptors count.961@param masks Set of masks. Each masks[i] specifies permissible matches between the input query962descriptors and stored train descriptors from the i-th image trainDescCollection[i].963*/964CV_WRAP void match( InputArray queryDescriptors, CV_OUT std::vector<DMatch>& matches,965InputArrayOfArrays masks=noArray() );966/** @overload967@param queryDescriptors Query set of descriptors.968@param matches Matches. Each matches[i] is k or less matches for the same query descriptor.969@param k Count of best matches found per each query descriptor or less if a query descriptor has970less than k possible matches in total.971@param masks Set of masks. Each masks[i] specifies permissible matches between the input query972descriptors and stored train descriptors from the i-th image trainDescCollection[i].973@param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is974false, the matches vector has the same size as queryDescriptors rows. If compactResult is true,975the matches vector does not contain matches for fully masked-out query descriptors.976*/977CV_WRAP void knnMatch( InputArray queryDescriptors, CV_OUT std::vector<std::vector<DMatch> >& matches, int k,978InputArrayOfArrays masks=noArray(), bool compactResult=false );979/** @overload980@param queryDescriptors Query set of descriptors.981@param matches Found matches.982@param maxDistance Threshold for the distance between matched descriptors. Distance means here983metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured984in Pixels)!985@param masks Set of masks. Each masks[i] specifies permissible matches between the input query986descriptors and stored train descriptors from the i-th image trainDescCollection[i].987@param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is988false, the matches vector has the same size as queryDescriptors rows. If compactResult is true,989the matches vector does not contain matches for fully masked-out query descriptors.990*/991CV_WRAP void radiusMatch( InputArray queryDescriptors, CV_OUT std::vector<std::vector<DMatch> >& matches, float maxDistance,992InputArrayOfArrays masks=noArray(), bool compactResult=false );993994995CV_WRAP void write( const String& fileName ) const996{997FileStorage fs(fileName, FileStorage::WRITE);998write(fs);999}10001001CV_WRAP void read( const String& fileName )1002{1003FileStorage fs(fileName, FileStorage::READ);1004read(fs.root());1005}1006// Reads matcher object from a file node1007// see corresponding cv::Algorithm method1008CV_WRAP virtual void read( const FileNode& ) CV_OVERRIDE;1009// Writes matcher object to a file storage1010virtual void write( FileStorage& ) const CV_OVERRIDE;10111012/** @brief Clones the matcher.10131014@param emptyTrainData If emptyTrainData is false, the method creates a deep copy of the object,1015that is, copies both parameters and train data. If emptyTrainData is true, the method creates an1016object copy with the current parameters but with empty train data.1017*/1018CV_WRAP virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const = 0;10191020/** @brief Creates a descriptor matcher of a given type with the default parameters (using default1021constructor).10221023@param descriptorMatcherType Descriptor matcher type. Now the following matcher types are1024supported:1025- `BruteForce` (it uses L2 )1026- `BruteForce-L1`1027- `BruteForce-Hamming`1028- `BruteForce-Hamming(2)`1029- `FlannBased`1030*/1031CV_WRAP static Ptr<DescriptorMatcher> create( const String& descriptorMatcherType );10321033CV_WRAP static Ptr<DescriptorMatcher> create( const DescriptorMatcher::MatcherType& matcherType );103410351036// see corresponding cv::Algorithm method1037CV_WRAP inline void write(const Ptr<FileStorage>& fs, const String& name = String()) const { Algorithm::write(fs, name); }10381039protected:1040/**1041* Class to work with descriptors from several images as with one merged matrix.1042* It is used e.g. in FlannBasedMatcher.1043*/1044class CV_EXPORTS DescriptorCollection1045{1046public:1047DescriptorCollection();1048DescriptorCollection( const DescriptorCollection& collection );1049virtual ~DescriptorCollection();10501051// Vector of matrices "descriptors" will be merged to one matrix "mergedDescriptors" here.1052void set( const std::vector<Mat>& descriptors );1053virtual void clear();10541055const Mat& getDescriptors() const;1056const Mat getDescriptor( int imgIdx, int localDescIdx ) const;1057const Mat getDescriptor( int globalDescIdx ) const;1058void getLocalIdx( int globalDescIdx, int& imgIdx, int& localDescIdx ) const;10591060int size() const;10611062protected:1063Mat mergedDescriptors;1064std::vector<int> startIdxs;1065};10661067//! In fact the matching is implemented only by the following two methods. These methods suppose1068//! that the class object has been trained already. Public match methods call these methods1069//! after calling train().1070virtual void knnMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, int k,1071InputArrayOfArrays masks=noArray(), bool compactResult=false ) = 0;1072virtual void radiusMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, float maxDistance,1073InputArrayOfArrays masks=noArray(), bool compactResult=false ) = 0;10741075static bool isPossibleMatch( InputArray mask, int queryIdx, int trainIdx );1076static bool isMaskedOut( InputArrayOfArrays masks, int queryIdx );10771078static Mat clone_op( Mat m ) { return m.clone(); }1079void checkMasks( InputArrayOfArrays masks, int queryDescriptorsCount ) const;10801081//! Collection of descriptors from train images.1082std::vector<Mat> trainDescCollection;1083std::vector<UMat> utrainDescCollection;1084};10851086/** @brief Brute-force descriptor matcher.10871088For each descriptor in the first set, this matcher finds the closest descriptor in the second set1089by trying each one. This descriptor matcher supports masking permissible matches of descriptor1090sets.1091*/1092class CV_EXPORTS_W BFMatcher : public DescriptorMatcher1093{1094public:1095/** @brief Brute-force matcher constructor (obsolete). Please use BFMatcher.create()1096*1097*1098*/1099CV_WRAP BFMatcher( int normType=NORM_L2, bool crossCheck=false );11001101virtual ~BFMatcher() {}11021103virtual bool isMaskSupported() const CV_OVERRIDE { return true; }11041105/** @brief Brute-force matcher create method.1106@param normType One of NORM_L1, NORM_L2, NORM_HAMMING, NORM_HAMMING2. L1 and L2 norms are1107preferable choices for SIFT and SURF descriptors, NORM_HAMMING should be used with ORB, BRISK and1108BRIEF, NORM_HAMMING2 should be used with ORB when WTA_K==3 or 4 (see ORB::ORB constructor1109description).1110@param crossCheck If it is false, this is will be default BFMatcher behaviour when it finds the k1111nearest neighbors for each query descriptor. If crossCheck==true, then the knnMatch() method with1112k=1 will only return pairs (i,j) such that for i-th query descriptor the j-th descriptor in the1113matcher's collection is the nearest and vice versa, i.e. the BFMatcher will only return consistent1114pairs. Such technique usually produces best results with minimal number of outliers when there are1115enough matches. This is alternative to the ratio test, used by D. Lowe in SIFT paper.1116*/1117CV_WRAP static Ptr<BFMatcher> create( int normType=NORM_L2, bool crossCheck=false ) ;11181119virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const CV_OVERRIDE;1120protected:1121virtual void knnMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, int k,1122InputArrayOfArrays masks=noArray(), bool compactResult=false ) CV_OVERRIDE;1123virtual void radiusMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, float maxDistance,1124InputArrayOfArrays masks=noArray(), bool compactResult=false ) CV_OVERRIDE;11251126int normType;1127bool crossCheck;1128};11291130#if defined(HAVE_OPENCV_FLANN) || defined(CV_DOXYGEN)11311132/** @brief Flann-based descriptor matcher.11331134This matcher trains cv::flann::Index on a train descriptor collection and calls its nearest search1135methods to find the best matches. So, this matcher may be faster when matching a large train1136collection than the brute force matcher. FlannBasedMatcher does not support masking permissible1137matches of descriptor sets because flann::Index does not support this. :1138*/1139class CV_EXPORTS_W FlannBasedMatcher : public DescriptorMatcher1140{1141public:1142CV_WRAP FlannBasedMatcher( const Ptr<flann::IndexParams>& indexParams=makePtr<flann::KDTreeIndexParams>(),1143const Ptr<flann::SearchParams>& searchParams=makePtr<flann::SearchParams>() );11441145virtual void add( InputArrayOfArrays descriptors ) CV_OVERRIDE;1146virtual void clear() CV_OVERRIDE;11471148// Reads matcher object from a file node1149virtual void read( const FileNode& ) CV_OVERRIDE;1150// Writes matcher object to a file storage1151virtual void write( FileStorage& ) const CV_OVERRIDE;11521153virtual void train() CV_OVERRIDE;1154virtual bool isMaskSupported() const CV_OVERRIDE;11551156CV_WRAP static Ptr<FlannBasedMatcher> create();11571158virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const CV_OVERRIDE;1159protected:1160static void convertToDMatches( const DescriptorCollection& descriptors,1161const Mat& indices, const Mat& distances,1162std::vector<std::vector<DMatch> >& matches );11631164virtual void knnMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, int k,1165InputArrayOfArrays masks=noArray(), bool compactResult=false ) CV_OVERRIDE;1166virtual void radiusMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, float maxDistance,1167InputArrayOfArrays masks=noArray(), bool compactResult=false ) CV_OVERRIDE;11681169Ptr<flann::IndexParams> indexParams;1170Ptr<flann::SearchParams> searchParams;1171Ptr<flann::Index> flannIndex;11721173DescriptorCollection mergedDescriptors;1174int addedDescCount;1175};11761177#endif11781179//! @} features2d_match11801181/****************************************************************************************\1182* Drawing functions *1183\****************************************************************************************/11841185//! @addtogroup features2d_draw1186//! @{11871188enum struct DrawMatchesFlags1189{1190DEFAULT = 0, //!< Output image matrix will be created (Mat::create),1191//!< i.e. existing memory of output image may be reused.1192//!< Two source image, matches and single keypoints will be drawn.1193//!< For each keypoint only the center point will be drawn (without1194//!< the circle around keypoint with keypoint size and orientation).1195DRAW_OVER_OUTIMG = 1, //!< Output image matrix will not be created (Mat::create).1196//!< Matches will be drawn on existing content of output image.1197NOT_DRAW_SINGLE_POINTS = 2, //!< Single keypoints will not be drawn.1198DRAW_RICH_KEYPOINTS = 4 //!< For each keypoint the circle around keypoint with keypoint size and1199//!< orientation will be drawn.1200};1201CV_ENUM_FLAGS(DrawMatchesFlags);12021203/** @brief Draws keypoints.12041205@param image Source image.1206@param keypoints Keypoints from the source image.1207@param outImage Output image. Its content depends on the flags value defining what is drawn in the1208output image. See possible flags bit values below.1209@param color Color of keypoints.1210@param flags Flags setting drawing features. Possible flags bit values are defined by1211DrawMatchesFlags. See details above in drawMatches .12121213@note1214For Python API, flags are modified as cv2.DRAW_MATCHES_FLAGS_DEFAULT,1215cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS, cv2.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG,1216cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS1217*/1218CV_EXPORTS_W void drawKeypoints( InputArray image, const std::vector<KeyPoint>& keypoints, InputOutputArray outImage,1219const Scalar& color=Scalar::all(-1), DrawMatchesFlags flags=DrawMatchesFlags::DEFAULT );12201221/** @brief Draws the found matches of keypoints from two images.12221223@param img1 First source image.1224@param keypoints1 Keypoints from the first source image.1225@param img2 Second source image.1226@param keypoints2 Keypoints from the second source image.1227@param matches1to2 Matches from the first image to the second one, which means that keypoints1[i]1228has a corresponding point in keypoints2[matches[i]] .1229@param outImg Output image. Its content depends on the flags value defining what is drawn in the1230output image. See possible flags bit values below.1231@param matchColor Color of matches (lines and connected keypoints). If matchColor==Scalar::all(-1)1232, the color is generated randomly.1233@param singlePointColor Color of single keypoints (circles), which means that keypoints do not1234have the matches. If singlePointColor==Scalar::all(-1) , the color is generated randomly.1235@param matchesMask Mask determining which matches are drawn. If the mask is empty, all matches are1236drawn.1237@param flags Flags setting drawing features. Possible flags bit values are defined by1238DrawMatchesFlags.12391240This function draws matches of keypoints from two images in the output image. Match is a line1241connecting two keypoints (circles). See cv::DrawMatchesFlags.1242*/1243CV_EXPORTS_W void drawMatches( InputArray img1, const std::vector<KeyPoint>& keypoints1,1244InputArray img2, const std::vector<KeyPoint>& keypoints2,1245const std::vector<DMatch>& matches1to2, InputOutputArray outImg,1246const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1),1247const std::vector<char>& matchesMask=std::vector<char>(), DrawMatchesFlags flags=DrawMatchesFlags::DEFAULT );12481249/** @overload */1250CV_EXPORTS_AS(drawMatchesKnn) void drawMatches( InputArray img1, const std::vector<KeyPoint>& keypoints1,1251InputArray img2, const std::vector<KeyPoint>& keypoints2,1252const std::vector<std::vector<DMatch> >& matches1to2, InputOutputArray outImg,1253const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1),1254const std::vector<std::vector<char> >& matchesMask=std::vector<std::vector<char> >(), DrawMatchesFlags flags=DrawMatchesFlags::DEFAULT );12551256//! @} features2d_draw12571258/****************************************************************************************\1259* Functions to evaluate the feature detectors and [generic] descriptor extractors *1260\****************************************************************************************/12611262CV_EXPORTS void evaluateFeatureDetector( const Mat& img1, const Mat& img2, const Mat& H1to2,1263std::vector<KeyPoint>* keypoints1, std::vector<KeyPoint>* keypoints2,1264float& repeatability, int& correspCount,1265const Ptr<FeatureDetector>& fdetector=Ptr<FeatureDetector>() );12661267CV_EXPORTS void computeRecallPrecisionCurve( const std::vector<std::vector<DMatch> >& matches1to2,1268const std::vector<std::vector<uchar> >& correctMatches1to2Mask,1269std::vector<Point2f>& recallPrecisionCurve );12701271CV_EXPORTS float getRecall( const std::vector<Point2f>& recallPrecisionCurve, float l_precision );1272CV_EXPORTS int getNearestPoint( const std::vector<Point2f>& recallPrecisionCurve, float l_precision );12731274/****************************************************************************************\1275* Bag of visual words *1276\****************************************************************************************/12771278//! @addtogroup features2d_category1279//! @{12801281/** @brief Abstract base class for training the *bag of visual words* vocabulary from a set of descriptors.12821283For details, see, for example, *Visual Categorization with Bags of Keypoints* by Gabriella Csurka,1284Christopher R. Dance, Lixin Fan, Jutta Willamowski, Cedric Bray, 2004. :1285*/1286class CV_EXPORTS_W BOWTrainer1287{1288public:1289BOWTrainer();1290virtual ~BOWTrainer();12911292/** @brief Adds descriptors to a training set.12931294@param descriptors Descriptors to add to a training set. Each row of the descriptors matrix is a1295descriptor.12961297The training set is clustered using clustermethod to construct the vocabulary.1298*/1299CV_WRAP void add( const Mat& descriptors );13001301/** @brief Returns a training set of descriptors.1302*/1303CV_WRAP const std::vector<Mat>& getDescriptors() const;13041305/** @brief Returns the count of all descriptors stored in the training set.1306*/1307CV_WRAP int descriptorsCount() const;13081309CV_WRAP virtual void clear();13101311/** @overload */1312CV_WRAP virtual Mat cluster() const = 0;13131314/** @brief Clusters train descriptors.13151316@param descriptors Descriptors to cluster. Each row of the descriptors matrix is a descriptor.1317Descriptors are not added to the inner train descriptor set.13181319The vocabulary consists of cluster centers. So, this method returns the vocabulary. In the first1320variant of the method, train descriptors stored in the object are clustered. In the second variant,1321input descriptors are clustered.1322*/1323CV_WRAP virtual Mat cluster( const Mat& descriptors ) const = 0;13241325protected:1326std::vector<Mat> descriptors;1327int size;1328};13291330/** @brief kmeans -based class to train visual vocabulary using the *bag of visual words* approach. :1331*/1332class CV_EXPORTS_W BOWKMeansTrainer : public BOWTrainer1333{1334public:1335/** @brief The constructor.13361337@see cv::kmeans1338*/1339CV_WRAP BOWKMeansTrainer( int clusterCount, const TermCriteria& termcrit=TermCriteria(),1340int attempts=3, int flags=KMEANS_PP_CENTERS );1341virtual ~BOWKMeansTrainer();13421343// Returns trained vocabulary (i.e. cluster centers).1344CV_WRAP virtual Mat cluster() const CV_OVERRIDE;1345CV_WRAP virtual Mat cluster( const Mat& descriptors ) const CV_OVERRIDE;13461347protected:13481349int clusterCount;1350TermCriteria termcrit;1351int attempts;1352int flags;1353};13541355/** @brief Class to compute an image descriptor using the *bag of visual words*.13561357Such a computation consists of the following steps:135813591. Compute descriptors for a given image and its keypoints set.13602. Find the nearest visual words from the vocabulary for each keypoint descriptor.13613. Compute the bag-of-words image descriptor as is a normalized histogram of vocabulary words1362encountered in the image. The i-th bin of the histogram is a frequency of i-th word of the1363vocabulary in the given image.1364*/1365class CV_EXPORTS_W BOWImgDescriptorExtractor1366{1367public:1368/** @brief The constructor.13691370@param dextractor Descriptor extractor that is used to compute descriptors for an input image and1371its keypoints.1372@param dmatcher Descriptor matcher that is used to find the nearest word of the trained vocabulary1373for each keypoint descriptor of the image.1374*/1375CV_WRAP BOWImgDescriptorExtractor( const Ptr<DescriptorExtractor>& dextractor,1376const Ptr<DescriptorMatcher>& dmatcher );1377/** @overload */1378BOWImgDescriptorExtractor( const Ptr<DescriptorMatcher>& dmatcher );1379virtual ~BOWImgDescriptorExtractor();13801381/** @brief Sets a visual vocabulary.13821383@param vocabulary Vocabulary (can be trained using the inheritor of BOWTrainer ). Each row of the1384vocabulary is a visual word (cluster center).1385*/1386CV_WRAP void setVocabulary( const Mat& vocabulary );13871388/** @brief Returns the set vocabulary.1389*/1390CV_WRAP const Mat& getVocabulary() const;13911392/** @brief Computes an image descriptor using the set visual vocabulary.13931394@param image Image, for which the descriptor is computed.1395@param keypoints Keypoints detected in the input image.1396@param imgDescriptor Computed output image descriptor.1397@param pointIdxsOfClusters Indices of keypoints that belong to the cluster. This means that1398pointIdxsOfClusters[i] are keypoint indices that belong to the i -th cluster (word of vocabulary)1399returned if it is non-zero.1400@param descriptors Descriptors of the image keypoints that are returned if they are non-zero.1401*/1402void compute( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray imgDescriptor,1403std::vector<std::vector<int> >* pointIdxsOfClusters=0, Mat* descriptors=0 );1404/** @overload1405@param keypointDescriptors Computed descriptors to match with vocabulary.1406@param imgDescriptor Computed output image descriptor.1407@param pointIdxsOfClusters Indices of keypoints that belong to the cluster. This means that1408pointIdxsOfClusters[i] are keypoint indices that belong to the i -th cluster (word of vocabulary)1409returned if it is non-zero.1410*/1411void compute( InputArray keypointDescriptors, OutputArray imgDescriptor,1412std::vector<std::vector<int> >* pointIdxsOfClusters=0 );1413// compute() is not constant because DescriptorMatcher::match is not constant14141415CV_WRAP_AS(compute) void compute2( const Mat& image, std::vector<KeyPoint>& keypoints, CV_OUT Mat& imgDescriptor )1416{ compute(image,keypoints,imgDescriptor); }14171418/** @brief Returns an image descriptor size if the vocabulary is set. Otherwise, it returns 0.1419*/1420CV_WRAP int descriptorSize() const;14211422/** @brief Returns an image descriptor type.1423*/1424CV_WRAP int descriptorType() const;14251426protected:1427Mat vocabulary;1428Ptr<DescriptorExtractor> dextractor;1429Ptr<DescriptorMatcher> dmatcher;1430};14311432//! @} features2d_category14331434//! @} features2d14351436} /* namespace cv */14371438#endif143914401441