/*M///////////////////////////////////////////////////////////////////////////////////////1//2// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.3//4// By downloading, copying, installing or using the software you agree to this license.5// If you do not agree to this license, do not download, install,6// copy or use the software.7//8//9// License Agreement10// For Open Source Computer Vision Library11//12// Copyright (C) 2000, Intel Corporation, all rights reserved.13// Copyright (C) 2013, OpenCV Foundation, all rights reserved.14// Copyright (C) 2014, Itseez Inc, all rights reserved.15// Third party copyrights are property of their respective owners.16//17// Redistribution and use in source and binary forms, with or without modification,18// are permitted provided that the following conditions are met:19//20// * Redistribution's of source code must retain the above copyright notice,21// this list of conditions and the following disclaimer.22//23// * Redistribution's in binary form must reproduce the above copyright notice,24// this list of conditions and the following disclaimer in the documentation25// and/or other materials provided with the distribution.26//27// * The name of the copyright holders may not be used to endorse or promote products28// derived from this software without specific prior written permission.29//30// This software is provided by the copyright holders and contributors "as is" and31// any express or implied warranties, including, but not limited to, the implied32// warranties of merchantability and fitness for a particular purpose are disclaimed.33// In no event shall the Intel Corporation or contributors be liable for any direct,34// indirect, incidental, special, exemplary, or consequential damages35// (including, but not limited to, procurement of substitute goods or services;36// loss of use, data, or profits; or business interruption) however caused37// and on any theory of liability, whether in contract, strict liability,38// or tort (including negligence or otherwise) arising in any way out of39// the use of this software, even if advised of the possibility of such damage.40//41//M*/4243#ifndef OPENCV_ML_HPP44#define OPENCV_ML_HPP4546#ifdef __cplusplus47# include "opencv2/core.hpp"48#endif4950#ifdef __cplusplus5152#include <float.h>53#include <map>54#include <iostream>5556/**57@defgroup ml Machine Learning5859The Machine Learning Library (MLL) is a set of classes and functions for statistical60classification, regression, and clustering of data.6162Most of the classification and regression algorithms are implemented as C++ classes. As the63algorithms have different sets of features (like an ability to handle missing measurements or64categorical input variables), there is a little common ground between the classes. This common65ground is defined by the class cv::ml::StatModel that all the other ML classes are derived from.6667See detailed overview here: @ref ml_intro.68*/6970namespace cv71{7273namespace ml74{7576//! @addtogroup ml77//! @{7879/** @brief Variable types */80enum VariableTypes81{82VAR_NUMERICAL =0, //!< same as VAR_ORDERED83VAR_ORDERED =0, //!< ordered variables84VAR_CATEGORICAL =1 //!< categorical variables85};8687/** @brief %Error types */88enum ErrorTypes89{90TEST_ERROR = 0,91TRAIN_ERROR = 192};9394/** @brief Sample types */95enum SampleTypes96{97ROW_SAMPLE = 0, //!< each training sample is a row of samples98COL_SAMPLE = 1 //!< each training sample occupies a column of samples99};100101/** @brief The structure represents the logarithmic grid range of statmodel parameters.102103It is used for optimizing statmodel accuracy by varying model parameters, the accuracy estimate104being computed by cross-validation.105*/106class CV_EXPORTS_W ParamGrid107{108public:109/** @brief Default constructor */110ParamGrid();111/** @brief Constructor with parameters */112ParamGrid(double _minVal, double _maxVal, double _logStep);113114CV_PROP_RW double minVal; //!< Minimum value of the statmodel parameter. Default value is 0.115CV_PROP_RW double maxVal; //!< Maximum value of the statmodel parameter. Default value is 0.116/** @brief Logarithmic step for iterating the statmodel parameter.117118The grid determines the following iteration sequence of the statmodel parameter values:119\f[(minVal, minVal*step, minVal*{step}^2, \dots, minVal*{logStep}^n),\f]120where \f$n\f$ is the maximal index satisfying121\f[\texttt{minVal} * \texttt{logStep} ^n < \texttt{maxVal}\f]122The grid is logarithmic, so logStep must always be greater then 1. Default value is 1.123*/124CV_PROP_RW double logStep;125126/** @brief Creates a ParamGrid Ptr that can be given to the %SVM::trainAuto method127128@param minVal minimum value of the parameter grid129@param maxVal maximum value of the parameter grid130@param logstep Logarithmic step for iterating the statmodel parameter131*/132CV_WRAP static Ptr<ParamGrid> create(double minVal=0., double maxVal=0., double logstep=1.);133};134135/** @brief Class encapsulating training data.136137Please note that the class only specifies the interface of training data, but not implementation.138All the statistical model classes in _ml_ module accepts Ptr\<TrainData\> as parameter. In other139words, you can create your own class derived from TrainData and pass smart pointer to the instance140of this class into StatModel::train.141142@sa @ref ml_intro_data143*/144class CV_EXPORTS_W TrainData145{146public:147static inline float missingValue() { return FLT_MAX; }148virtual ~TrainData();149150CV_WRAP virtual int getLayout() const = 0;151CV_WRAP virtual int getNTrainSamples() const = 0;152CV_WRAP virtual int getNTestSamples() const = 0;153CV_WRAP virtual int getNSamples() const = 0;154CV_WRAP virtual int getNVars() const = 0;155CV_WRAP virtual int getNAllVars() const = 0;156157CV_WRAP virtual void getSample(InputArray varIdx, int sidx, float* buf) const = 0;158CV_WRAP virtual Mat getSamples() const = 0;159CV_WRAP virtual Mat getMissing() const = 0;160161/** @brief Returns matrix of train samples162163@param layout The requested layout. If it's different from the initial one, the matrix is164transposed. See ml::SampleTypes.165@param compressSamples if true, the function returns only the training samples (specified by166sampleIdx)167@param compressVars if true, the function returns the shorter training samples, containing only168the active variables.169170In current implementation the function tries to avoid physical data copying and returns the171matrix stored inside TrainData (unless the transposition or compression is needed).172*/173CV_WRAP virtual Mat getTrainSamples(int layout=ROW_SAMPLE,174bool compressSamples=true,175bool compressVars=true) const = 0;176177/** @brief Returns the vector of responses178179The function returns ordered or the original categorical responses. Usually it's used in180regression algorithms.181*/182CV_WRAP virtual Mat getTrainResponses() const = 0;183184/** @brief Returns the vector of normalized categorical responses185186The function returns vector of responses. Each response is integer from `0` to `<number of187classes>-1`. The actual label value can be retrieved then from the class label vector, see188TrainData::getClassLabels.189*/190CV_WRAP virtual Mat getTrainNormCatResponses() const = 0;191CV_WRAP virtual Mat getTestResponses() const = 0;192CV_WRAP virtual Mat getTestNormCatResponses() const = 0;193CV_WRAP virtual Mat getResponses() const = 0;194CV_WRAP virtual Mat getNormCatResponses() const = 0;195CV_WRAP virtual Mat getSampleWeights() const = 0;196CV_WRAP virtual Mat getTrainSampleWeights() const = 0;197CV_WRAP virtual Mat getTestSampleWeights() const = 0;198CV_WRAP virtual Mat getVarIdx() const = 0;199CV_WRAP virtual Mat getVarType() const = 0;200CV_WRAP virtual Mat getVarSymbolFlags() const = 0;201CV_WRAP virtual int getResponseType() const = 0;202CV_WRAP virtual Mat getTrainSampleIdx() const = 0;203CV_WRAP virtual Mat getTestSampleIdx() const = 0;204CV_WRAP virtual void getValues(int vi, InputArray sidx, float* values) const = 0;205virtual void getNormCatValues(int vi, InputArray sidx, int* values) const = 0;206CV_WRAP virtual Mat getDefaultSubstValues() const = 0;207208CV_WRAP virtual int getCatCount(int vi) const = 0;209210/** @brief Returns the vector of class labels211212The function returns vector of unique labels occurred in the responses.213*/214CV_WRAP virtual Mat getClassLabels() const = 0;215216CV_WRAP virtual Mat getCatOfs() const = 0;217CV_WRAP virtual Mat getCatMap() const = 0;218219/** @brief Splits the training data into the training and test parts220@sa TrainData::setTrainTestSplitRatio221*/222CV_WRAP virtual void setTrainTestSplit(int count, bool shuffle=true) = 0;223224/** @brief Splits the training data into the training and test parts225226The function selects a subset of specified relative size and then returns it as the training227set. If the function is not called, all the data is used for training. Please, note that for228each of TrainData::getTrain\* there is corresponding TrainData::getTest\*, so that the test229subset can be retrieved and processed as well.230@sa TrainData::setTrainTestSplit231*/232CV_WRAP virtual void setTrainTestSplitRatio(double ratio, bool shuffle=true) = 0;233CV_WRAP virtual void shuffleTrainTest() = 0;234235/** @brief Returns matrix of test samples */236CV_WRAP virtual Mat getTestSamples() const = 0;237238/** @brief Returns vector of symbolic names captured in loadFromCSV() */239CV_WRAP virtual void getNames(std::vector<String>& names) const = 0;240241/** @brief Extract from 1D vector elements specified by passed indexes.242@param vec input vector (supported types: CV_32S, CV_32F, CV_64F)243@param idx 1D index vector244*/245static CV_WRAP Mat getSubVector(const Mat& vec, const Mat& idx);246247/** @brief Extract from matrix rows/cols specified by passed indexes.248@param matrix input matrix (supported types: CV_32S, CV_32F, CV_64F)249@param idx 1D index vector250@param layout specifies to extract rows (cv::ml::ROW_SAMPLES) or to extract columns (cv::ml::COL_SAMPLES)251*/252static CV_WRAP Mat getSubMatrix(const Mat& matrix, const Mat& idx, int layout);253254/** @brief Reads the dataset from a .csv file and returns the ready-to-use training data.255256@param filename The input file name257@param headerLineCount The number of lines in the beginning to skip; besides the header, the258function also skips empty lines and lines staring with `#`259@param responseStartIdx Index of the first output variable. If -1, the function considers the260last variable as the response261@param responseEndIdx Index of the last output variable + 1. If -1, then there is single262response variable at responseStartIdx.263@param varTypeSpec The optional text string that specifies the variables' types. It has the264format `ord[n1-n2,n3,n4-n5,...]cat[n6,n7-n8,...]`. That is, variables from `n1 to n2`265(inclusive range), `n3`, `n4 to n5` ... are considered ordered and `n6`, `n7 to n8` ... are266considered as categorical. The range `[n1..n2] + [n3] + [n4..n5] + ... + [n6] + [n7..n8]`267should cover all the variables. If varTypeSpec is not specified, then algorithm uses the268following rules:269- all input variables are considered ordered by default. If some column contains has non-270numerical values, e.g. 'apple', 'pear', 'apple', 'apple', 'mango', the corresponding271variable is considered categorical.272- if there are several output variables, they are all considered as ordered. Error is273reported when non-numerical values are used.274- if there is a single output variable, then if its values are non-numerical or are all275integers, then it's considered categorical. Otherwise, it's considered ordered.276@param delimiter The character used to separate values in each line.277@param missch The character used to specify missing measurements. It should not be a digit.278Although it's a non-numerical value, it surely does not affect the decision of whether the279variable ordered or categorical.280@note If the dataset only contains input variables and no responses, use responseStartIdx = -2281and responseEndIdx = 0. The output variables vector will just contain zeros.282*/283static Ptr<TrainData> loadFromCSV(const String& filename,284int headerLineCount,285int responseStartIdx=-1,286int responseEndIdx=-1,287const String& varTypeSpec=String(),288char delimiter=',',289char missch='?');290291/** @brief Creates training data from in-memory arrays.292293@param samples matrix of samples. It should have CV_32F type.294@param layout see ml::SampleTypes.295@param responses matrix of responses. If the responses are scalar, they should be stored as a296single row or as a single column. The matrix should have type CV_32F or CV_32S (in the297former case the responses are considered as ordered by default; in the latter case - as298categorical)299@param varIdx vector specifying which variables to use for training. It can be an integer vector300(CV_32S) containing 0-based variable indices or byte vector (CV_8U) containing a mask of301active variables.302@param sampleIdx vector specifying which samples to use for training. It can be an integer303vector (CV_32S) containing 0-based sample indices or byte vector (CV_8U) containing a mask304of training samples.305@param sampleWeights optional vector with weights for each sample. It should have CV_32F type.306@param varType optional vector of type CV_8U and size `<number_of_variables_in_samples> +307<number_of_variables_in_responses>`, containing types of each input and output variable. See308ml::VariableTypes.309*/310CV_WRAP static Ptr<TrainData> create(InputArray samples, int layout, InputArray responses,311InputArray varIdx=noArray(), InputArray sampleIdx=noArray(),312InputArray sampleWeights=noArray(), InputArray varType=noArray());313};314315/** @brief Base class for statistical models in OpenCV ML.316*/317class CV_EXPORTS_W StatModel : public Algorithm318{319public:320/** Predict options */321enum Flags {322UPDATE_MODEL = 1,323RAW_OUTPUT=1, //!< makes the method return the raw results (the sum), not the class label324COMPRESSED_INPUT=2,325PREPROCESSED_INPUT=4326};327328/** @brief Returns the number of variables in training samples */329CV_WRAP virtual int getVarCount() const = 0;330331CV_WRAP virtual bool empty() const CV_OVERRIDE;332333/** @brief Returns true if the model is trained */334CV_WRAP virtual bool isTrained() const = 0;335/** @brief Returns true if the model is classifier */336CV_WRAP virtual bool isClassifier() const = 0;337338/** @brief Trains the statistical model339340@param trainData training data that can be loaded from file using TrainData::loadFromCSV or341created with TrainData::create.342@param flags optional flags, depending on the model. Some of the models can be updated with the343new training samples, not completely overwritten (such as NormalBayesClassifier or ANN_MLP).344*/345CV_WRAP virtual bool train( const Ptr<TrainData>& trainData, int flags=0 );346347/** @brief Trains the statistical model348349@param samples training samples350@param layout See ml::SampleTypes.351@param responses vector of responses associated with the training samples.352*/353CV_WRAP virtual bool train( InputArray samples, int layout, InputArray responses );354355/** @brief Computes error on the training or test dataset356357@param data the training data358@param test if true, the error is computed over the test subset of the data, otherwise it's359computed over the training subset of the data. Please note that if you loaded a completely360different dataset to evaluate already trained classifier, you will probably want not to set361the test subset at all with TrainData::setTrainTestSplitRatio and specify test=false, so362that the error is computed for the whole new set. Yes, this sounds a bit confusing.363@param resp the optional output responses.364365The method uses StatModel::predict to compute the error. For regression models the error is366computed as RMS, for classifiers - as a percent of missclassified samples (0%-100%).367*/368CV_WRAP virtual float calcError( const Ptr<TrainData>& data, bool test, OutputArray resp ) const;369370/** @brief Predicts response(s) for the provided sample(s)371372@param samples The input samples, floating-point matrix373@param results The optional output matrix of results.374@param flags The optional flags, model-dependent. See cv::ml::StatModel::Flags.375*/376CV_WRAP virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const = 0;377378/** @brief Create and train model with default parameters379380The class must implement static `create()` method with no parameters or with all default parameter values381*/382template<typename _Tp> static Ptr<_Tp> train(const Ptr<TrainData>& data, int flags=0)383{384Ptr<_Tp> model = _Tp::create();385return !model.empty() && model->train(data, flags) ? model : Ptr<_Tp>();386}387};388389/****************************************************************************************\390* Normal Bayes Classifier *391\****************************************************************************************/392393/** @brief Bayes classifier for normally distributed data.394395@sa @ref ml_intro_bayes396*/397class CV_EXPORTS_W NormalBayesClassifier : public StatModel398{399public:400/** @brief Predicts the response for sample(s).401402The method estimates the most probable classes for input vectors. Input vectors (one or more)403are stored as rows of the matrix inputs. In case of multiple input vectors, there should be one404output vector outputs. The predicted class for a single input vector is returned by the method.405The vector outputProbs contains the output probabilities corresponding to each element of406result.407*/408CV_WRAP virtual float predictProb( InputArray inputs, OutputArray outputs,409OutputArray outputProbs, int flags=0 ) const = 0;410411/** Creates empty model412Use StatModel::train to train the model after creation. */413CV_WRAP static Ptr<NormalBayesClassifier> create();414415/** @brief Loads and creates a serialized NormalBayesClassifier from a file416*417* Use NormalBayesClassifier::save to serialize and store an NormalBayesClassifier to disk.418* Load the NormalBayesClassifier from this file again, by calling this function with the path to the file.419* Optionally specify the node for the file containing the classifier420*421* @param filepath path to serialized NormalBayesClassifier422* @param nodeName name of node containing the classifier423*/424CV_WRAP static Ptr<NormalBayesClassifier> load(const String& filepath , const String& nodeName = String());425};426427/****************************************************************************************\428* K-Nearest Neighbour Classifier *429\****************************************************************************************/430431/** @brief The class implements K-Nearest Neighbors model432433@sa @ref ml_intro_knn434*/435class CV_EXPORTS_W KNearest : public StatModel436{437public:438439/** Default number of neighbors to use in predict method. */440/** @see setDefaultK */441CV_WRAP virtual int getDefaultK() const = 0;442/** @copybrief getDefaultK @see getDefaultK */443CV_WRAP virtual void setDefaultK(int val) = 0;444445/** Whether classification or regression model should be trained. */446/** @see setIsClassifier */447CV_WRAP virtual bool getIsClassifier() const = 0;448/** @copybrief getIsClassifier @see getIsClassifier */449CV_WRAP virtual void setIsClassifier(bool val) = 0;450451/** Parameter for KDTree implementation. */452/** @see setEmax */453CV_WRAP virtual int getEmax() const = 0;454/** @copybrief getEmax @see getEmax */455CV_WRAP virtual void setEmax(int val) = 0;456457/** %Algorithm type, one of KNearest::Types. */458/** @see setAlgorithmType */459CV_WRAP virtual int getAlgorithmType() const = 0;460/** @copybrief getAlgorithmType @see getAlgorithmType */461CV_WRAP virtual void setAlgorithmType(int val) = 0;462463/** @brief Finds the neighbors and predicts responses for input vectors.464465@param samples Input samples stored by rows. It is a single-precision floating-point matrix of466`<number_of_samples> * k` size.467@param k Number of used nearest neighbors. Should be greater than 1.468@param results Vector with results of prediction (regression or classification) for each input469sample. It is a single-precision floating-point vector with `<number_of_samples>` elements.470@param neighborResponses Optional output values for corresponding neighbors. It is a single-471precision floating-point matrix of `<number_of_samples> * k` size.472@param dist Optional output distances from the input vectors to the corresponding neighbors. It473is a single-precision floating-point matrix of `<number_of_samples> * k` size.474475For each input vector (a row of the matrix samples), the method finds the k nearest neighbors.476In case of regression, the predicted result is a mean value of the particular vector's neighbor477responses. In case of classification, the class is determined by voting.478479For each input vector, the neighbors are sorted by their distances to the vector.480481In case of C++ interface you can use output pointers to empty matrices and the function will482allocate memory itself.483484If only a single input vector is passed, all output matrices are optional and the predicted485value is returned by the method.486487The function is parallelized with the TBB library.488*/489CV_WRAP virtual float findNearest( InputArray samples, int k,490OutputArray results,491OutputArray neighborResponses=noArray(),492OutputArray dist=noArray() ) const = 0;493494/** @brief Implementations of KNearest algorithm495*/496enum Types497{498BRUTE_FORCE=1,499KDTREE=2500};501502/** @brief Creates the empty model503504The static method creates empty %KNearest classifier. It should be then trained using StatModel::train method.505*/506CV_WRAP static Ptr<KNearest> create();507};508509/****************************************************************************************\510* Support Vector Machines *511\****************************************************************************************/512513/** @brief Support Vector Machines.514515@sa @ref ml_intro_svm516*/517class CV_EXPORTS_W SVM : public StatModel518{519public:520521class CV_EXPORTS Kernel : public Algorithm522{523public:524virtual int getType() const = 0;525virtual void calc( int vcount, int n, const float* vecs, const float* another, float* results ) = 0;526};527528/** Type of a %SVM formulation.529See SVM::Types. Default value is SVM::C_SVC. */530/** @see setType */531CV_WRAP virtual int getType() const = 0;532/** @copybrief getType @see getType */533CV_WRAP virtual void setType(int val) = 0;534535/** Parameter \f$\gamma\f$ of a kernel function.536For SVM::POLY, SVM::RBF, SVM::SIGMOID or SVM::CHI2. Default value is 1. */537/** @see setGamma */538CV_WRAP virtual double getGamma() const = 0;539/** @copybrief getGamma @see getGamma */540CV_WRAP virtual void setGamma(double val) = 0;541542/** Parameter _coef0_ of a kernel function.543For SVM::POLY or SVM::SIGMOID. Default value is 0.*/544/** @see setCoef0 */545CV_WRAP virtual double getCoef0() const = 0;546/** @copybrief getCoef0 @see getCoef0 */547CV_WRAP virtual void setCoef0(double val) = 0;548549/** Parameter _degree_ of a kernel function.550For SVM::POLY. Default value is 0. */551/** @see setDegree */552CV_WRAP virtual double getDegree() const = 0;553/** @copybrief getDegree @see getDegree */554CV_WRAP virtual void setDegree(double val) = 0;555556/** Parameter _C_ of a %SVM optimization problem.557For SVM::C_SVC, SVM::EPS_SVR or SVM::NU_SVR. Default value is 0. */558/** @see setC */559CV_WRAP virtual double getC() const = 0;560/** @copybrief getC @see getC */561CV_WRAP virtual void setC(double val) = 0;562563/** Parameter \f$\nu\f$ of a %SVM optimization problem.564For SVM::NU_SVC, SVM::ONE_CLASS or SVM::NU_SVR. Default value is 0. */565/** @see setNu */566CV_WRAP virtual double getNu() const = 0;567/** @copybrief getNu @see getNu */568CV_WRAP virtual void setNu(double val) = 0;569570/** Parameter \f$\epsilon\f$ of a %SVM optimization problem.571For SVM::EPS_SVR. Default value is 0. */572/** @see setP */573CV_WRAP virtual double getP() const = 0;574/** @copybrief getP @see getP */575CV_WRAP virtual void setP(double val) = 0;576577/** Optional weights in the SVM::C_SVC problem, assigned to particular classes.578They are multiplied by _C_ so the parameter _C_ of class _i_ becomes `classWeights(i) * C`. Thus579these weights affect the misclassification penalty for different classes. The larger weight,580the larger penalty on misclassification of data from the corresponding class. Default value is581empty Mat. */582/** @see setClassWeights */583CV_WRAP virtual cv::Mat getClassWeights() const = 0;584/** @copybrief getClassWeights @see getClassWeights */585CV_WRAP virtual void setClassWeights(const cv::Mat &val) = 0;586587/** Termination criteria of the iterative %SVM training procedure which solves a partial588case of constrained quadratic optimization problem.589You can specify tolerance and/or the maximum number of iterations. Default value is590`TermCriteria( TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, FLT_EPSILON )`; */591/** @see setTermCriteria */592CV_WRAP virtual cv::TermCriteria getTermCriteria() const = 0;593/** @copybrief getTermCriteria @see getTermCriteria */594CV_WRAP virtual void setTermCriteria(const cv::TermCriteria &val) = 0;595596/** Type of a %SVM kernel.597See SVM::KernelTypes. Default value is SVM::RBF. */598CV_WRAP virtual int getKernelType() const = 0;599600/** Initialize with one of predefined kernels.601See SVM::KernelTypes. */602CV_WRAP virtual void setKernel(int kernelType) = 0;603604/** Initialize with custom kernel.605See SVM::Kernel class for implementation details */606virtual void setCustomKernel(const Ptr<Kernel> &_kernel) = 0;607608//! %SVM type609enum Types {610/** C-Support Vector Classification. n-class classification (n \f$\geq\f$ 2), allows611imperfect separation of classes with penalty multiplier C for outliers. */612C_SVC=100,613/** \f$\nu\f$-Support Vector Classification. n-class classification with possible614imperfect separation. Parameter \f$\nu\f$ (in the range 0..1, the larger the value, the smoother615the decision boundary) is used instead of C. */616NU_SVC=101,617/** Distribution Estimation (One-class %SVM). All the training data are from618the same class, %SVM builds a boundary that separates the class from the rest of the feature619space. */620ONE_CLASS=102,621/** \f$\epsilon\f$-Support Vector Regression. The distance between feature vectors622from the training set and the fitting hyper-plane must be less than p. For outliers the623penalty multiplier C is used. */624EPS_SVR=103,625/** \f$\nu\f$-Support Vector Regression. \f$\nu\f$ is used instead of p.626See @cite LibSVM for details. */627NU_SVR=104628};629630/** @brief %SVM kernel type631632A comparison of different kernels on the following 2D test case with four classes. Four633SVM::C_SVC SVMs have been trained (one against rest) with auto_train. Evaluation on three634different kernels (SVM::CHI2, SVM::INTER, SVM::RBF). The color depicts the class with max score.635Bright means max-score \> 0, dark means max-score \< 0.636637*/638enum KernelTypes {639/** Returned by SVM::getKernelType in case when custom kernel has been set */640CUSTOM=-1,641/** Linear kernel. No mapping is done, linear discrimination (or regression) is642done in the original feature space. It is the fastest option. \f$K(x_i, x_j) = x_i^T x_j\f$. */643LINEAR=0,644/** Polynomial kernel:645\f$K(x_i, x_j) = (\gamma x_i^T x_j + coef0)^{degree}, \gamma > 0\f$. */646POLY=1,647/** Radial basis function (RBF), a good choice in most cases.648\f$K(x_i, x_j) = e^{-\gamma ||x_i - x_j||^2}, \gamma > 0\f$. */649RBF=2,650/** Sigmoid kernel: \f$K(x_i, x_j) = \tanh(\gamma x_i^T x_j + coef0)\f$. */651SIGMOID=3,652/** Exponential Chi2 kernel, similar to the RBF kernel:653\f$K(x_i, x_j) = e^{-\gamma \chi^2(x_i,x_j)}, \chi^2(x_i,x_j) = (x_i-x_j)^2/(x_i+x_j), \gamma > 0\f$. */654CHI2=4,655/** Histogram intersection kernel. A fast kernel. \f$K(x_i, x_j) = min(x_i,x_j)\f$. */656INTER=5657};658659//! %SVM params type660enum ParamTypes {661C=0,662GAMMA=1,663P=2,664NU=3,665COEF=4,666DEGREE=5667};668669/** @brief Trains an %SVM with optimal parameters.670671@param data the training data that can be constructed using TrainData::create or672TrainData::loadFromCSV.673@param kFold Cross-validation parameter. The training set is divided into kFold subsets. One674subset is used to test the model, the others form the train set. So, the %SVM algorithm is675executed kFold times.676@param Cgrid grid for C677@param gammaGrid grid for gamma678@param pGrid grid for p679@param nuGrid grid for nu680@param coeffGrid grid for coeff681@param degreeGrid grid for degree682@param balanced If true and the problem is 2-class classification then the method creates more683balanced cross-validation subsets that is proportions between classes in subsets are close684to such proportion in the whole train dataset.685686The method trains the %SVM model automatically by choosing the optimal parameters C, gamma, p,687nu, coef0, degree. Parameters are considered optimal when the cross-validation688estimate of the test set error is minimal.689690If there is no need to optimize a parameter, the corresponding grid step should be set to any691value less than or equal to 1. For example, to avoid optimization in gamma, set `gammaGrid.step692= 0`, `gammaGrid.minVal`, `gamma_grid.maxVal` as arbitrary numbers. In this case, the value693`Gamma` is taken for gamma.694695And, finally, if the optimization in a parameter is required but the corresponding grid is696unknown, you may call the function SVM::getDefaultGrid. To generate a grid, for example, for697gamma, call `SVM::getDefaultGrid(SVM::GAMMA)`.698699This function works for the classification (SVM::C_SVC or SVM::NU_SVC) as well as for the700regression (SVM::EPS_SVR or SVM::NU_SVR). If it is SVM::ONE_CLASS, no optimization is made and701the usual %SVM with parameters specified in params is executed.702*/703virtual bool trainAuto( const Ptr<TrainData>& data, int kFold = 10,704ParamGrid Cgrid = getDefaultGrid(C),705ParamGrid gammaGrid = getDefaultGrid(GAMMA),706ParamGrid pGrid = getDefaultGrid(P),707ParamGrid nuGrid = getDefaultGrid(NU),708ParamGrid coeffGrid = getDefaultGrid(COEF),709ParamGrid degreeGrid = getDefaultGrid(DEGREE),710bool balanced=false) = 0;711712/** @brief Trains an %SVM with optimal parameters713714@param samples training samples715@param layout See ml::SampleTypes.716@param responses vector of responses associated with the training samples.717@param kFold Cross-validation parameter. The training set is divided into kFold subsets. One718subset is used to test the model, the others form the train set. So, the %SVM algorithm is719@param Cgrid grid for C720@param gammaGrid grid for gamma721@param pGrid grid for p722@param nuGrid grid for nu723@param coeffGrid grid for coeff724@param degreeGrid grid for degree725@param balanced If true and the problem is 2-class classification then the method creates more726balanced cross-validation subsets that is proportions between classes in subsets are close727to such proportion in the whole train dataset.728729The method trains the %SVM model automatically by choosing the optimal parameters C, gamma, p,730nu, coef0, degree. Parameters are considered optimal when the cross-validation731estimate of the test set error is minimal.732733This function only makes use of SVM::getDefaultGrid for parameter optimization and thus only734offers rudimentary parameter options.735736This function works for the classification (SVM::C_SVC or SVM::NU_SVC) as well as for the737regression (SVM::EPS_SVR or SVM::NU_SVR). If it is SVM::ONE_CLASS, no optimization is made and738the usual %SVM with parameters specified in params is executed.739*/740CV_WRAP virtual bool trainAuto(InputArray samples,741int layout,742InputArray responses,743int kFold = 10,744Ptr<ParamGrid> Cgrid = SVM::getDefaultGridPtr(SVM::C),745Ptr<ParamGrid> gammaGrid = SVM::getDefaultGridPtr(SVM::GAMMA),746Ptr<ParamGrid> pGrid = SVM::getDefaultGridPtr(SVM::P),747Ptr<ParamGrid> nuGrid = SVM::getDefaultGridPtr(SVM::NU),748Ptr<ParamGrid> coeffGrid = SVM::getDefaultGridPtr(SVM::COEF),749Ptr<ParamGrid> degreeGrid = SVM::getDefaultGridPtr(SVM::DEGREE),750bool balanced=false) = 0;751752/** @brief Retrieves all the support vectors753754The method returns all the support vectors as a floating-point matrix, where support vectors are755stored as matrix rows.756*/757CV_WRAP virtual Mat getSupportVectors() const = 0;758759/** @brief Retrieves all the uncompressed support vectors of a linear %SVM760761The method returns all the uncompressed support vectors of a linear %SVM that the compressed762support vector, used for prediction, was derived from. They are returned in a floating-point763matrix, where the support vectors are stored as matrix rows.764*/765CV_WRAP virtual Mat getUncompressedSupportVectors() const = 0;766767/** @brief Retrieves the decision function768769@param i the index of the decision function. If the problem solved is regression, 1-class or7702-class classification, then there will be just one decision function and the index should771always be 0. Otherwise, in the case of N-class classification, there will be \f$N(N-1)/2\f$772decision functions.773@param alpha the optional output vector for weights, corresponding to different support vectors.774In the case of linear %SVM all the alpha's will be 1's.775@param svidx the optional output vector of indices of support vectors within the matrix of776support vectors (which can be retrieved by SVM::getSupportVectors). In the case of linear777%SVM each decision function consists of a single "compressed" support vector.778779The method returns rho parameter of the decision function, a scalar subtracted from the weighted780sum of kernel responses.781*/782CV_WRAP virtual double getDecisionFunction(int i, OutputArray alpha, OutputArray svidx) const = 0;783784/** @brief Generates a grid for %SVM parameters.785786@param param_id %SVM parameters IDs that must be one of the SVM::ParamTypes. The grid is787generated for the parameter with this ID.788789The function generates a grid for the specified parameter of the %SVM algorithm. The grid may be790passed to the function SVM::trainAuto.791*/792static ParamGrid getDefaultGrid( int param_id );793794/** @brief Generates a grid for %SVM parameters.795796@param param_id %SVM parameters IDs that must be one of the SVM::ParamTypes. The grid is797generated for the parameter with this ID.798799The function generates a grid pointer for the specified parameter of the %SVM algorithm.800The grid may be passed to the function SVM::trainAuto.801*/802CV_WRAP static Ptr<ParamGrid> getDefaultGridPtr( int param_id );803804/** Creates empty model.805Use StatModel::train to train the model. Since %SVM has several parameters, you may want to806find the best parameters for your problem, it can be done with SVM::trainAuto. */807CV_WRAP static Ptr<SVM> create();808809/** @brief Loads and creates a serialized svm from a file810*811* Use SVM::save to serialize and store an SVM to disk.812* Load the SVM from this file again, by calling this function with the path to the file.813*814* @param filepath path to serialized svm815*/816CV_WRAP static Ptr<SVM> load(const String& filepath);817};818819/****************************************************************************************\820* Expectation - Maximization *821\****************************************************************************************/822823/** @brief The class implements the Expectation Maximization algorithm.824825@sa @ref ml_intro_em826*/827class CV_EXPORTS_W EM : public StatModel828{829public:830//! Type of covariation matrices831enum Types {832/** A scaled identity matrix \f$\mu_k * I\f$. There is the only833parameter \f$\mu_k\f$ to be estimated for each matrix. The option may be used in special cases,834when the constraint is relevant, or as a first step in the optimization (for example in case835when the data is preprocessed with PCA). The results of such preliminary estimation may be836passed again to the optimization procedure, this time with837covMatType=EM::COV_MAT_DIAGONAL. */838COV_MAT_SPHERICAL=0,839/** A diagonal matrix with positive diagonal elements. The number of840free parameters is d for each matrix. This is most commonly used option yielding good841estimation results. */842COV_MAT_DIAGONAL=1,843/** A symmetric positively defined matrix. The number of free844parameters in each matrix is about \f$d^2/2\f$. It is not recommended to use this option, unless845there is pretty accurate initial estimation of the parameters and/or a huge number of846training samples. */847COV_MAT_GENERIC=2,848COV_MAT_DEFAULT=COV_MAT_DIAGONAL849};850851//! Default parameters852enum {DEFAULT_NCLUSTERS=5, DEFAULT_MAX_ITERS=100};853854//! The initial step855enum {START_E_STEP=1, START_M_STEP=2, START_AUTO_STEP=0};856857/** The number of mixture components in the Gaussian mixture model.858Default value of the parameter is EM::DEFAULT_NCLUSTERS=5. Some of %EM implementation could859determine the optimal number of mixtures within a specified value range, but that is not the860case in ML yet. */861/** @see setClustersNumber */862CV_WRAP virtual int getClustersNumber() const = 0;863/** @copybrief getClustersNumber @see getClustersNumber */864CV_WRAP virtual void setClustersNumber(int val) = 0;865866/** Constraint on covariance matrices which defines type of matrices.867See EM::Types. */868/** @see setCovarianceMatrixType */869CV_WRAP virtual int getCovarianceMatrixType() const = 0;870/** @copybrief getCovarianceMatrixType @see getCovarianceMatrixType */871CV_WRAP virtual void setCovarianceMatrixType(int val) = 0;872873/** The termination criteria of the %EM algorithm.874The %EM algorithm can be terminated by the number of iterations termCrit.maxCount (number of875M-steps) or when relative change of likelihood logarithm is less than termCrit.epsilon. Default876maximum number of iterations is EM::DEFAULT_MAX_ITERS=100. */877/** @see setTermCriteria */878CV_WRAP virtual TermCriteria getTermCriteria() const = 0;879/** @copybrief getTermCriteria @see getTermCriteria */880CV_WRAP virtual void setTermCriteria(const TermCriteria &val) = 0;881882/** @brief Returns weights of the mixtures883884Returns vector with the number of elements equal to the number of mixtures.885*/886CV_WRAP virtual Mat getWeights() const = 0;887/** @brief Returns the cluster centers (means of the Gaussian mixture)888889Returns matrix with the number of rows equal to the number of mixtures and number of columns890equal to the space dimensionality.891*/892CV_WRAP virtual Mat getMeans() const = 0;893/** @brief Returns covariation matrices894895Returns vector of covariation matrices. Number of matrices is the number of gaussian mixtures,896each matrix is a square floating-point matrix NxN, where N is the space dimensionality.897*/898CV_WRAP virtual void getCovs(CV_OUT std::vector<Mat>& covs) const = 0;899900/** @brief Returns posterior probabilities for the provided samples901902@param samples The input samples, floating-point matrix903@param results The optional output \f$ nSamples \times nClusters\f$ matrix of results. It contains904posterior probabilities for each sample from the input905@param flags This parameter will be ignored906*/907CV_WRAP virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const CV_OVERRIDE = 0;908909/** @brief Returns a likelihood logarithm value and an index of the most probable mixture component910for the given sample.911912@param sample A sample for classification. It should be a one-channel matrix of913\f$1 \times dims\f$ or \f$dims \times 1\f$ size.914@param probs Optional output matrix that contains posterior probabilities of each component915given the sample. It has \f$1 \times nclusters\f$ size and CV_64FC1 type.916917The method returns a two-element double vector. Zero element is a likelihood logarithm value for918the sample. First element is an index of the most probable mixture component for the given919sample.920*/921CV_WRAP virtual Vec2d predict2(InputArray sample, OutputArray probs) const = 0;922923/** @brief Estimate the Gaussian mixture parameters from a samples set.924925This variation starts with Expectation step. Initial values of the model parameters will be926estimated by the k-means algorithm.927928Unlike many of the ML models, %EM is an unsupervised learning algorithm and it does not take929responses (class labels or function values) as input. Instead, it computes the *Maximum930Likelihood Estimate* of the Gaussian mixture parameters from an input sample set, stores all the931parameters inside the structure: \f$p_{i,k}\f$ in probs, \f$a_k\f$ in means , \f$S_k\f$ in932covs[k], \f$\pi_k\f$ in weights , and optionally computes the output "class label" for each933sample: \f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most934probable mixture component for each sample).935936The trained model can be used further for prediction, just like any other classifier. The937trained model is similar to the NormalBayesClassifier.938939@param samples Samples from which the Gaussian mixture model will be estimated. It should be a940one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type941it will be converted to the inner matrix of such type for the further computing.942@param logLikelihoods The optional output matrix that contains a likelihood logarithm value for943each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type.944@param labels The optional output "class label" for each sample:945\f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable946mixture component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type.947@param probs The optional output matrix that contains posterior probabilities of each Gaussian948mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and949CV_64FC1 type.950*/951CV_WRAP virtual bool trainEM(InputArray samples,952OutputArray logLikelihoods=noArray(),953OutputArray labels=noArray(),954OutputArray probs=noArray()) = 0;955956/** @brief Estimate the Gaussian mixture parameters from a samples set.957958This variation starts with Expectation step. You need to provide initial means \f$a_k\f$ of959mixture components. Optionally you can pass initial weights \f$\pi_k\f$ and covariance matrices960\f$S_k\f$ of mixture components.961962@param samples Samples from which the Gaussian mixture model will be estimated. It should be a963one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type964it will be converted to the inner matrix of such type for the further computing.965@param means0 Initial means \f$a_k\f$ of mixture components. It is a one-channel matrix of966\f$nclusters \times dims\f$ size. If the matrix does not have CV_64F type it will be967converted to the inner matrix of such type for the further computing.968@param covs0 The vector of initial covariance matrices \f$S_k\f$ of mixture components. Each of969covariance matrices is a one-channel matrix of \f$dims \times dims\f$ size. If the matrices970do not have CV_64F type they will be converted to the inner matrices of such type for the971further computing.972@param weights0 Initial weights \f$\pi_k\f$ of mixture components. It should be a one-channel973floating-point matrix with \f$1 \times nclusters\f$ or \f$nclusters \times 1\f$ size.974@param logLikelihoods The optional output matrix that contains a likelihood logarithm value for975each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type.976@param labels The optional output "class label" for each sample:977\f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable978mixture component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type.979@param probs The optional output matrix that contains posterior probabilities of each Gaussian980mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and981CV_64FC1 type.982*/983CV_WRAP virtual bool trainE(InputArray samples, InputArray means0,984InputArray covs0=noArray(),985InputArray weights0=noArray(),986OutputArray logLikelihoods=noArray(),987OutputArray labels=noArray(),988OutputArray probs=noArray()) = 0;989990/** @brief Estimate the Gaussian mixture parameters from a samples set.991992This variation starts with Maximization step. You need to provide initial probabilities993\f$p_{i,k}\f$ to use this option.994995@param samples Samples from which the Gaussian mixture model will be estimated. It should be a996one-channel matrix, each row of which is a sample. If the matrix does not have CV_64F type997it will be converted to the inner matrix of such type for the further computing.998@param probs0999@param logLikelihoods The optional output matrix that contains a likelihood logarithm value for1000each sample. It has \f$nsamples \times 1\f$ size and CV_64FC1 type.1001@param labels The optional output "class label" for each sample:1002\f$\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N\f$ (indices of the most probable1003mixture component for each sample). It has \f$nsamples \times 1\f$ size and CV_32SC1 type.1004@param probs The optional output matrix that contains posterior probabilities of each Gaussian1005mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and1006CV_64FC1 type.1007*/1008CV_WRAP virtual bool trainM(InputArray samples, InputArray probs0,1009OutputArray logLikelihoods=noArray(),1010OutputArray labels=noArray(),1011OutputArray probs=noArray()) = 0;10121013/** Creates empty %EM model.1014The model should be trained then using StatModel::train(traindata, flags) method. Alternatively, you1015can use one of the EM::train\* methods or load it from file using Algorithm::load\<EM\>(filename).1016*/1017CV_WRAP static Ptr<EM> create();10181019/** @brief Loads and creates a serialized EM from a file1020*1021* Use EM::save to serialize and store an EM to disk.1022* Load the EM from this file again, by calling this function with the path to the file.1023* Optionally specify the node for the file containing the classifier1024*1025* @param filepath path to serialized EM1026* @param nodeName name of node containing the classifier1027*/1028CV_WRAP static Ptr<EM> load(const String& filepath , const String& nodeName = String());1029};10301031/****************************************************************************************\1032* Decision Tree *1033\****************************************************************************************/10341035/** @brief The class represents a single decision tree or a collection of decision trees.10361037The current public interface of the class allows user to train only a single decision tree, however1038the class is capable of storing multiple decision trees and using them for prediction (by summing1039responses or using a voting schemes), and the derived from DTrees classes (such as RTrees and Boost)1040use this capability to implement decision tree ensembles.10411042@sa @ref ml_intro_trees1043*/1044class CV_EXPORTS_W DTrees : public StatModel1045{1046public:1047/** Predict options */1048enum Flags { PREDICT_AUTO=0, PREDICT_SUM=(1<<8), PREDICT_MAX_VOTE=(2<<8), PREDICT_MASK=(3<<8) };10491050/** Cluster possible values of a categorical variable into K\<=maxCategories clusters to1051find a suboptimal split.1052If a discrete variable, on which the training procedure tries to make a split, takes more than1053maxCategories values, the precise best subset estimation may take a very long time because the1054algorithm is exponential. Instead, many decision trees engines (including our implementation)1055try to find sub-optimal split in this case by clustering all the samples into maxCategories1056clusters that is some categories are merged together. The clustering is applied only in n \>10572-class classification problems for categorical variables with N \> max_categories possible1058values. In case of regression and 2-class classification the optimal split can be found1059efficiently without employing clustering, thus the parameter is not used in these cases.1060Default value is 10.*/1061/** @see setMaxCategories */1062CV_WRAP virtual int getMaxCategories() const = 0;1063/** @copybrief getMaxCategories @see getMaxCategories */1064CV_WRAP virtual void setMaxCategories(int val) = 0;10651066/** The maximum possible depth of the tree.1067That is the training algorithms attempts to split a node while its depth is less than maxDepth.1068The root node has zero depth. The actual depth may be smaller if the other termination criteria1069are met (see the outline of the training procedure @ref ml_intro_trees "here"), and/or if the1070tree is pruned. Default value is INT_MAX.*/1071/** @see setMaxDepth */1072CV_WRAP virtual int getMaxDepth() const = 0;1073/** @copybrief getMaxDepth @see getMaxDepth */1074CV_WRAP virtual void setMaxDepth(int val) = 0;10751076/** If the number of samples in a node is less than this parameter then the node will not be split.10771078Default value is 10.*/1079/** @see setMinSampleCount */1080CV_WRAP virtual int getMinSampleCount() const = 0;1081/** @copybrief getMinSampleCount @see getMinSampleCount */1082CV_WRAP virtual void setMinSampleCount(int val) = 0;10831084/** If CVFolds \> 1 then algorithms prunes the built decision tree using K-fold1085cross-validation procedure where K is equal to CVFolds.1086Default value is 10.*/1087/** @see setCVFolds */1088CV_WRAP virtual int getCVFolds() const = 0;1089/** @copybrief getCVFolds @see getCVFolds */1090CV_WRAP virtual void setCVFolds(int val) = 0;10911092/** If true then surrogate splits will be built.1093These splits allow to work with missing data and compute variable importance correctly.1094Default value is false.1095@note currently it's not implemented.*/1096/** @see setUseSurrogates */1097CV_WRAP virtual bool getUseSurrogates() const = 0;1098/** @copybrief getUseSurrogates @see getUseSurrogates */1099CV_WRAP virtual void setUseSurrogates(bool val) = 0;11001101/** If true then a pruning will be harsher.1102This will make a tree more compact and more resistant to the training data noise but a bit less1103accurate. Default value is true.*/1104/** @see setUse1SERule */1105CV_WRAP virtual bool getUse1SERule() const = 0;1106/** @copybrief getUse1SERule @see getUse1SERule */1107CV_WRAP virtual void setUse1SERule(bool val) = 0;11081109/** If true then pruned branches are physically removed from the tree.1110Otherwise they are retained and it is possible to get results from the original unpruned (or1111pruned less aggressively) tree. Default value is true.*/1112/** @see setTruncatePrunedTree */1113CV_WRAP virtual bool getTruncatePrunedTree() const = 0;1114/** @copybrief getTruncatePrunedTree @see getTruncatePrunedTree */1115CV_WRAP virtual void setTruncatePrunedTree(bool val) = 0;11161117/** Termination criteria for regression trees.1118If all absolute differences between an estimated value in a node and values of train samples1119in this node are less than this parameter then the node will not be split further. Default1120value is 0.01f*/1121/** @see setRegressionAccuracy */1122CV_WRAP virtual float getRegressionAccuracy() const = 0;1123/** @copybrief getRegressionAccuracy @see getRegressionAccuracy */1124CV_WRAP virtual void setRegressionAccuracy(float val) = 0;11251126/** @brief The array of a priori class probabilities, sorted by the class label value.11271128The parameter can be used to tune the decision tree preferences toward a certain class. For1129example, if you want to detect some rare anomaly occurrence, the training base will likely1130contain much more normal cases than anomalies, so a very good classification performance1131will be achieved just by considering every case as normal. To avoid this, the priors can be1132specified, where the anomaly probability is artificially increased (up to 0.5 or even1133greater), so the weight of the misclassified anomalies becomes much bigger, and the tree is1134adjusted properly.11351136You can also think about this parameter as weights of prediction categories which determine1137relative weights that you give to misclassification. That is, if the weight of the first1138category is 1 and the weight of the second category is 10, then each mistake in predicting1139the second category is equivalent to making 10 mistakes in predicting the first category.1140Default value is empty Mat.*/1141/** @see setPriors */1142CV_WRAP virtual cv::Mat getPriors() const = 0;1143/** @copybrief getPriors @see getPriors */1144CV_WRAP virtual void setPriors(const cv::Mat &val) = 0;11451146/** @brief The class represents a decision tree node.1147*/1148class CV_EXPORTS Node1149{1150public:1151Node();1152double value; //!< Value at the node: a class label in case of classification or estimated1153//!< function value in case of regression.1154int classIdx; //!< Class index normalized to 0..class_count-1 range and assigned to the1155//!< node. It is used internally in classification trees and tree ensembles.1156int parent; //!< Index of the parent node1157int left; //!< Index of the left child node1158int right; //!< Index of right child node1159int defaultDir; //!< Default direction where to go (-1: left or +1: right). It helps in the1160//!< case of missing values.1161int split; //!< Index of the first split1162};11631164/** @brief The class represents split in a decision tree.1165*/1166class CV_EXPORTS Split1167{1168public:1169Split();1170int varIdx; //!< Index of variable on which the split is created.1171bool inversed; //!< If true, then the inverse split rule is used (i.e. left and right1172//!< branches are exchanged in the rule expressions below).1173float quality; //!< The split quality, a positive number. It is used to choose the best split.1174int next; //!< Index of the next split in the list of splits for the node1175float c; /**< The threshold value in case of split on an ordered variable.1176The rule is:1177@code{.none}1178if var_value < c1179then next_node <- left1180else next_node <- right1181@endcode */1182int subsetOfs; /**< Offset of the bitset used by the split on a categorical variable.1183The rule is:1184@code{.none}1185if bitset[var_value] == 11186then next_node <- left1187else next_node <- right1188@endcode */1189};11901191/** @brief Returns indices of root nodes1192*/1193virtual const std::vector<int>& getRoots() const = 0;1194/** @brief Returns all the nodes11951196all the node indices are indices in the returned vector1197*/1198virtual const std::vector<Node>& getNodes() const = 0;1199/** @brief Returns all the splits12001201all the split indices are indices in the returned vector1202*/1203virtual const std::vector<Split>& getSplits() const = 0;1204/** @brief Returns all the bitsets for categorical splits12051206Split::subsetOfs is an offset in the returned vector1207*/1208virtual const std::vector<int>& getSubsets() const = 0;12091210/** @brief Creates the empty model12111212The static method creates empty decision tree with the specified parameters. It should be then1213trained using train method (see StatModel::train). Alternatively, you can load the model from1214file using Algorithm::load\<DTrees\>(filename).1215*/1216CV_WRAP static Ptr<DTrees> create();12171218/** @brief Loads and creates a serialized DTrees from a file1219*1220* Use DTree::save to serialize and store an DTree to disk.1221* Load the DTree from this file again, by calling this function with the path to the file.1222* Optionally specify the node for the file containing the classifier1223*1224* @param filepath path to serialized DTree1225* @param nodeName name of node containing the classifier1226*/1227CV_WRAP static Ptr<DTrees> load(const String& filepath , const String& nodeName = String());1228};12291230/****************************************************************************************\1231* Random Trees Classifier *1232\****************************************************************************************/12331234/** @brief The class implements the random forest predictor.12351236@sa @ref ml_intro_rtrees1237*/1238class CV_EXPORTS_W RTrees : public DTrees1239{1240public:12411242/** If true then variable importance will be calculated and then it can be retrieved by RTrees::getVarImportance.1243Default value is false.*/1244/** @see setCalculateVarImportance */1245CV_WRAP virtual bool getCalculateVarImportance() const = 0;1246/** @copybrief getCalculateVarImportance @see getCalculateVarImportance */1247CV_WRAP virtual void setCalculateVarImportance(bool val) = 0;12481249/** The size of the randomly selected subset of features at each tree node and that are used1250to find the best split(s).1251If you set it to 0 then the size will be set to the square root of the total number of1252features. Default value is 0.*/1253/** @see setActiveVarCount */1254CV_WRAP virtual int getActiveVarCount() const = 0;1255/** @copybrief getActiveVarCount @see getActiveVarCount */1256CV_WRAP virtual void setActiveVarCount(int val) = 0;12571258/** The termination criteria that specifies when the training algorithm stops.1259Either when the specified number of trees is trained and added to the ensemble or when1260sufficient accuracy (measured as OOB error) is achieved. Typically the more trees you have the1261better the accuracy. However, the improvement in accuracy generally diminishes and asymptotes1262pass a certain number of trees. Also to keep in mind, the number of tree increases the1263prediction time linearly. Default value is TermCriteria(TermCriteria::MAX_ITERS +1264TermCriteria::EPS, 50, 0.1)*/1265/** @see setTermCriteria */1266CV_WRAP virtual TermCriteria getTermCriteria() const = 0;1267/** @copybrief getTermCriteria @see getTermCriteria */1268CV_WRAP virtual void setTermCriteria(const TermCriteria &val) = 0;12691270/** Returns the variable importance array.1271The method returns the variable importance vector, computed at the training stage when1272CalculateVarImportance is set to true. If this flag was set to false, the empty matrix is1273returned.1274*/1275CV_WRAP virtual Mat getVarImportance() const = 0;12761277/** Returns the result of each individual tree in the forest.1278In case the model is a regression problem, the method will return each of the trees'1279results for each of the sample cases. If the model is a classifier, it will return1280a Mat with samples + 1 rows, where the first row gives the class number and the1281following rows return the votes each class had for each sample.1282@param samples Array containing the samples for which votes will be calculated.1283@param results Array where the result of the calculation will be written.1284@param flags Flags for defining the type of RTrees.1285*/1286CV_WRAP virtual void getVotes(InputArray samples, OutputArray results, int flags) const = 0;12871288/** Creates the empty model.1289Use StatModel::train to train the model, StatModel::train to create and train the model,1290Algorithm::load to load the pre-trained model.1291*/1292CV_WRAP static Ptr<RTrees> create();12931294/** @brief Loads and creates a serialized RTree from a file1295*1296* Use RTree::save to serialize and store an RTree to disk.1297* Load the RTree from this file again, by calling this function with the path to the file.1298* Optionally specify the node for the file containing the classifier1299*1300* @param filepath path to serialized RTree1301* @param nodeName name of node containing the classifier1302*/1303CV_WRAP static Ptr<RTrees> load(const String& filepath , const String& nodeName = String());1304};13051306/****************************************************************************************\1307* Boosted tree classifier *1308\****************************************************************************************/13091310/** @brief Boosted tree classifier derived from DTrees13111312@sa @ref ml_intro_boost1313*/1314class CV_EXPORTS_W Boost : public DTrees1315{1316public:1317/** Type of the boosting algorithm.1318See Boost::Types. Default value is Boost::REAL. */1319/** @see setBoostType */1320CV_WRAP virtual int getBoostType() const = 0;1321/** @copybrief getBoostType @see getBoostType */1322CV_WRAP virtual void setBoostType(int val) = 0;13231324/** The number of weak classifiers.1325Default value is 100. */1326/** @see setWeakCount */1327CV_WRAP virtual int getWeakCount() const = 0;1328/** @copybrief getWeakCount @see getWeakCount */1329CV_WRAP virtual void setWeakCount(int val) = 0;13301331/** A threshold between 0 and 1 used to save computational time.1332Samples with summary weight \f$\leq 1 - weight_trim_rate\f$ do not participate in the *next*1333iteration of training. Set this parameter to 0 to turn off this functionality. Default value is 0.95.*/1334/** @see setWeightTrimRate */1335CV_WRAP virtual double getWeightTrimRate() const = 0;1336/** @copybrief getWeightTrimRate @see getWeightTrimRate */1337CV_WRAP virtual void setWeightTrimRate(double val) = 0;13381339/** Boosting type.1340Gentle AdaBoost and Real AdaBoost are often the preferable choices. */1341enum Types {1342DISCRETE=0, //!< Discrete AdaBoost.1343REAL=1, //!< Real AdaBoost. It is a technique that utilizes confidence-rated predictions1344//!< and works well with categorical data.1345LOGIT=2, //!< LogitBoost. It can produce good regression fits.1346GENTLE=3 //!< Gentle AdaBoost. It puts less weight on outlier data points and for that1347//!<reason is often good with regression data.1348};13491350/** Creates the empty model.1351Use StatModel::train to train the model, Algorithm::load\<Boost\>(filename) to load the pre-trained model. */1352CV_WRAP static Ptr<Boost> create();13531354/** @brief Loads and creates a serialized Boost from a file1355*1356* Use Boost::save to serialize and store an RTree to disk.1357* Load the Boost from this file again, by calling this function with the path to the file.1358* Optionally specify the node for the file containing the classifier1359*1360* @param filepath path to serialized Boost1361* @param nodeName name of node containing the classifier1362*/1363CV_WRAP static Ptr<Boost> load(const String& filepath , const String& nodeName = String());1364};13651366/****************************************************************************************\1367* Gradient Boosted Trees *1368\****************************************************************************************/13691370/*class CV_EXPORTS_W GBTrees : public DTrees1371{1372public:1373struct CV_EXPORTS_W_MAP Params : public DTrees::Params1374{1375CV_PROP_RW int weakCount;1376CV_PROP_RW int lossFunctionType;1377CV_PROP_RW float subsamplePortion;1378CV_PROP_RW float shrinkage;13791380Params();1381Params( int lossFunctionType, int weakCount, float shrinkage,1382float subsamplePortion, int maxDepth, bool useSurrogates );1383};13841385enum {SQUARED_LOSS=0, ABSOLUTE_LOSS, HUBER_LOSS=3, DEVIANCE_LOSS};13861387virtual void setK(int k) = 0;13881389virtual float predictSerial( InputArray samples,1390OutputArray weakResponses, int flags) const = 0;13911392static Ptr<GBTrees> create(const Params& p);1393};*/13941395/****************************************************************************************\1396* Artificial Neural Networks (ANN) *1397\****************************************************************************************/13981399/////////////////////////////////// Multi-Layer Perceptrons //////////////////////////////14001401/** @brief Artificial Neural Networks - Multi-Layer Perceptrons.14021403Unlike many other models in ML that are constructed and trained at once, in the MLP model these1404steps are separated. First, a network with the specified topology is created using the non-default1405constructor or the method ANN_MLP::create. All the weights are set to zeros. Then, the network is1406trained using a set of input and output vectors. The training procedure can be repeated more than1407once, that is, the weights can be adjusted based on the new training data.14081409Additional flags for StatModel::train are available: ANN_MLP::TrainFlags.14101411@sa @ref ml_intro_ann1412*/1413class CV_EXPORTS_W ANN_MLP : public StatModel1414{1415public:1416/** Available training methods */1417enum TrainingMethods {1418BACKPROP=0, //!< The back-propagation algorithm.1419RPROP = 1, //!< The RPROP algorithm. See @cite RPROP93 for details.1420ANNEAL = 2 //!< The simulated annealing algorithm. See @cite Kirkpatrick83 for details.1421};14221423/** Sets training method and common parameters.1424@param method Default value is ANN_MLP::RPROP. See ANN_MLP::TrainingMethods.1425@param param1 passed to setRpropDW0 for ANN_MLP::RPROP and to setBackpropWeightScale for ANN_MLP::BACKPROP and to initialT for ANN_MLP::ANNEAL.1426@param param2 passed to setRpropDWMin for ANN_MLP::RPROP and to setBackpropMomentumScale for ANN_MLP::BACKPROP and to finalT for ANN_MLP::ANNEAL.1427*/1428CV_WRAP virtual void setTrainMethod(int method, double param1 = 0, double param2 = 0) = 0;14291430/** Returns current training method */1431CV_WRAP virtual int getTrainMethod() const = 0;14321433/** Initialize the activation function for each neuron.1434Currently the default and the only fully supported activation function is ANN_MLP::SIGMOID_SYM.1435@param type The type of activation function. See ANN_MLP::ActivationFunctions.1436@param param1 The first parameter of the activation function, \f$\alpha\f$. Default value is 0.1437@param param2 The second parameter of the activation function, \f$\beta\f$. Default value is 0.1438*/1439CV_WRAP virtual void setActivationFunction(int type, double param1 = 0, double param2 = 0) = 0;14401441/** Integer vector specifying the number of neurons in each layer including the input and output layers.1442The very first element specifies the number of elements in the input layer.1443The last element - number of elements in the output layer. Default value is empty Mat.1444@sa getLayerSizes */1445CV_WRAP virtual void setLayerSizes(InputArray _layer_sizes) = 0;14461447/** Integer vector specifying the number of neurons in each layer including the input and output layers.1448The very first element specifies the number of elements in the input layer.1449The last element - number of elements in the output layer.1450@sa setLayerSizes */1451CV_WRAP virtual cv::Mat getLayerSizes() const = 0;14521453/** Termination criteria of the training algorithm.1454You can specify the maximum number of iterations (maxCount) and/or how much the error could1455change between the iterations to make the algorithm continue (epsilon). Default value is1456TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, 0.01).*/1457/** @see setTermCriteria */1458CV_WRAP virtual TermCriteria getTermCriteria() const = 0;1459/** @copybrief getTermCriteria @see getTermCriteria */1460CV_WRAP virtual void setTermCriteria(TermCriteria val) = 0;14611462/** BPROP: Strength of the weight gradient term.1463The recommended value is about 0.1. Default value is 0.1.*/1464/** @see setBackpropWeightScale */1465CV_WRAP virtual double getBackpropWeightScale() const = 0;1466/** @copybrief getBackpropWeightScale @see getBackpropWeightScale */1467CV_WRAP virtual void setBackpropWeightScale(double val) = 0;14681469/** BPROP: Strength of the momentum term (the difference between weights on the 2 previous iterations).1470This parameter provides some inertia to smooth the random fluctuations of the weights. It can1471vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough.1472Default value is 0.1.*/1473/** @see setBackpropMomentumScale */1474CV_WRAP virtual double getBackpropMomentumScale() const = 0;1475/** @copybrief getBackpropMomentumScale @see getBackpropMomentumScale */1476CV_WRAP virtual void setBackpropMomentumScale(double val) = 0;14771478/** RPROP: Initial value \f$\Delta_0\f$ of update-values \f$\Delta_{ij}\f$.1479Default value is 0.1.*/1480/** @see setRpropDW0 */1481CV_WRAP virtual double getRpropDW0() const = 0;1482/** @copybrief getRpropDW0 @see getRpropDW0 */1483CV_WRAP virtual void setRpropDW0(double val) = 0;14841485/** RPROP: Increase factor \f$\eta^+\f$.1486It must be \>1. Default value is 1.2.*/1487/** @see setRpropDWPlus */1488CV_WRAP virtual double getRpropDWPlus() const = 0;1489/** @copybrief getRpropDWPlus @see getRpropDWPlus */1490CV_WRAP virtual void setRpropDWPlus(double val) = 0;14911492/** RPROP: Decrease factor \f$\eta^-\f$.1493It must be \<1. Default value is 0.5.*/1494/** @see setRpropDWMinus */1495CV_WRAP virtual double getRpropDWMinus() const = 0;1496/** @copybrief getRpropDWMinus @see getRpropDWMinus */1497CV_WRAP virtual void setRpropDWMinus(double val) = 0;14981499/** RPROP: Update-values lower limit \f$\Delta_{min}\f$.1500It must be positive. Default value is FLT_EPSILON.*/1501/** @see setRpropDWMin */1502CV_WRAP virtual double getRpropDWMin() const = 0;1503/** @copybrief getRpropDWMin @see getRpropDWMin */1504CV_WRAP virtual void setRpropDWMin(double val) = 0;15051506/** RPROP: Update-values upper limit \f$\Delta_{max}\f$.1507It must be \>1. Default value is 50.*/1508/** @see setRpropDWMax */1509CV_WRAP virtual double getRpropDWMax() const = 0;1510/** @copybrief getRpropDWMax @see getRpropDWMax */1511CV_WRAP virtual void setRpropDWMax(double val) = 0;15121513/** ANNEAL: Update initial temperature.1514It must be \>=0. Default value is 10.*/1515/** @see setAnnealInitialT */1516CV_WRAP virtual double getAnnealInitialT() const = 0;1517/** @copybrief getAnnealInitialT @see getAnnealInitialT */1518CV_WRAP virtual void setAnnealInitialT(double val) = 0;15191520/** ANNEAL: Update final temperature.1521It must be \>=0 and less than initialT. Default value is 0.1.*/1522/** @see setAnnealFinalT */1523CV_WRAP virtual double getAnnealFinalT() const = 0;1524/** @copybrief getAnnealFinalT @see getAnnealFinalT */1525CV_WRAP virtual void setAnnealFinalT(double val) = 0;15261527/** ANNEAL: Update cooling ratio.1528It must be \>0 and less than 1. Default value is 0.95.*/1529/** @see setAnnealCoolingRatio */1530CV_WRAP virtual double getAnnealCoolingRatio() const = 0;1531/** @copybrief getAnnealCoolingRatio @see getAnnealCoolingRatio */1532CV_WRAP virtual void setAnnealCoolingRatio(double val) = 0;15331534/** ANNEAL: Update iteration per step.1535It must be \>0 . Default value is 10.*/1536/** @see setAnnealItePerStep */1537CV_WRAP virtual int getAnnealItePerStep() const = 0;1538/** @copybrief getAnnealItePerStep @see getAnnealItePerStep */1539CV_WRAP virtual void setAnnealItePerStep(int val) = 0;15401541/** @brief Set/initialize anneal RNG */1542virtual void setAnnealEnergyRNG(const RNG& rng) = 0;15431544/** possible activation functions */1545enum ActivationFunctions {1546/** Identity function: \f$f(x)=x\f$ */1547IDENTITY = 0,1548/** Symmetrical sigmoid: \f$f(x)=\beta*(1-e^{-\alpha x})/(1+e^{-\alpha x})\f$1549@note1550If you are using the default sigmoid activation function with the default parameter values1551fparam1=0 and fparam2=0 then the function used is y = 1.7159\*tanh(2/3 \* x), so the output1552will range from [-1.7159, 1.7159], instead of [0,1].*/1553SIGMOID_SYM = 1,1554/** Gaussian function: \f$f(x)=\beta e^{-\alpha x*x}\f$ */1555GAUSSIAN = 2,1556/** ReLU function: \f$f(x)=max(0,x)\f$ */1557RELU = 3,1558/** Leaky ReLU function: for x>0 \f$f(x)=x \f$ and x<=0 \f$f(x)=\alpha x \f$*/1559LEAKYRELU= 41560};15611562/** Train options */1563enum TrainFlags {1564/** Update the network weights, rather than compute them from scratch. In the latter case1565the weights are initialized using the Nguyen-Widrow algorithm. */1566UPDATE_WEIGHTS = 1,1567/** Do not normalize the input vectors. If this flag is not set, the training algorithm1568normalizes each input feature independently, shifting its mean value to 0 and making the1569standard deviation equal to 1. If the network is assumed to be updated frequently, the new1570training data could be much different from original one. In this case, you should take care1571of proper normalization. */1572NO_INPUT_SCALE = 2,1573/** Do not normalize the output vectors. If the flag is not set, the training algorithm1574normalizes each output feature independently, by transforming it to the certain range1575depending on the used activation function. */1576NO_OUTPUT_SCALE = 41577};15781579CV_WRAP virtual Mat getWeights(int layerIdx) const = 0;15801581/** @brief Creates empty model15821583Use StatModel::train to train the model, Algorithm::load\<ANN_MLP\>(filename) to load the pre-trained model.1584Note that the train method has optional flags: ANN_MLP::TrainFlags.1585*/1586CV_WRAP static Ptr<ANN_MLP> create();15871588/** @brief Loads and creates a serialized ANN from a file1589*1590* Use ANN::save to serialize and store an ANN to disk.1591* Load the ANN from this file again, by calling this function with the path to the file.1592*1593* @param filepath path to serialized ANN1594*/1595CV_WRAP static Ptr<ANN_MLP> load(const String& filepath);15961597};15981599#ifndef DISABLE_OPENCV_3_COMPATIBILITY1600typedef ANN_MLP ANN_MLP_ANNEAL;1601#endif16021603/****************************************************************************************\1604* Logistic Regression *1605\****************************************************************************************/16061607/** @brief Implements Logistic Regression classifier.16081609@sa @ref ml_intro_lr1610*/1611class CV_EXPORTS_W LogisticRegression : public StatModel1612{1613public:16141615/** Learning rate. */1616/** @see setLearningRate */1617CV_WRAP virtual double getLearningRate() const = 0;1618/** @copybrief getLearningRate @see getLearningRate */1619CV_WRAP virtual void setLearningRate(double val) = 0;16201621/** Number of iterations. */1622/** @see setIterations */1623CV_WRAP virtual int getIterations() const = 0;1624/** @copybrief getIterations @see getIterations */1625CV_WRAP virtual void setIterations(int val) = 0;16261627/** Kind of regularization to be applied. See LogisticRegression::RegKinds. */1628/** @see setRegularization */1629CV_WRAP virtual int getRegularization() const = 0;1630/** @copybrief getRegularization @see getRegularization */1631CV_WRAP virtual void setRegularization(int val) = 0;16321633/** Kind of training method used. See LogisticRegression::Methods. */1634/** @see setTrainMethod */1635CV_WRAP virtual int getTrainMethod() const = 0;1636/** @copybrief getTrainMethod @see getTrainMethod */1637CV_WRAP virtual void setTrainMethod(int val) = 0;16381639/** Specifies the number of training samples taken in each step of Mini-Batch Gradient1640Descent. Will only be used if using LogisticRegression::MINI_BATCH training algorithm. It1641has to take values less than the total number of training samples. */1642/** @see setMiniBatchSize */1643CV_WRAP virtual int getMiniBatchSize() const = 0;1644/** @copybrief getMiniBatchSize @see getMiniBatchSize */1645CV_WRAP virtual void setMiniBatchSize(int val) = 0;16461647/** Termination criteria of the algorithm. */1648/** @see setTermCriteria */1649CV_WRAP virtual TermCriteria getTermCriteria() const = 0;1650/** @copybrief getTermCriteria @see getTermCriteria */1651CV_WRAP virtual void setTermCriteria(TermCriteria val) = 0;16521653//! Regularization kinds1654enum RegKinds {1655REG_DISABLE = -1, //!< Regularization disabled1656REG_L1 = 0, //!< %L1 norm1657REG_L2 = 1 //!< %L2 norm1658};16591660//! Training methods1661enum Methods {1662BATCH = 0,1663MINI_BATCH = 1 //!< Set MiniBatchSize to a positive integer when using this method.1664};16651666/** @brief Predicts responses for input samples and returns a float type.16671668@param samples The input data for the prediction algorithm. Matrix [m x n], where each row1669contains variables (features) of one object being classified. Should have data type CV_32F.1670@param results Predicted labels as a column matrix of type CV_32S.1671@param flags Not used.1672*/1673CV_WRAP virtual float predict( InputArray samples, OutputArray results=noArray(), int flags=0 ) const CV_OVERRIDE = 0;16741675/** @brief This function returns the trained parameters arranged across rows.16761677For a two class classifcation problem, it returns a row matrix. It returns learnt parameters of1678the Logistic Regression as a matrix of type CV_32F.1679*/1680CV_WRAP virtual Mat get_learnt_thetas() const = 0;16811682/** @brief Creates empty model.16831684Creates Logistic Regression model with parameters given.1685*/1686CV_WRAP static Ptr<LogisticRegression> create();16871688/** @brief Loads and creates a serialized LogisticRegression from a file1689*1690* Use LogisticRegression::save to serialize and store an LogisticRegression to disk.1691* Load the LogisticRegression from this file again, by calling this function with the path to the file.1692* Optionally specify the node for the file containing the classifier1693*1694* @param filepath path to serialized LogisticRegression1695* @param nodeName name of node containing the classifier1696*/1697CV_WRAP static Ptr<LogisticRegression> load(const String& filepath , const String& nodeName = String());1698};169917001701/****************************************************************************************\1702* Stochastic Gradient Descent SVM Classifier *1703\****************************************************************************************/17041705/*!1706@brief Stochastic Gradient Descent SVM classifier17071708SVMSGD provides a fast and easy-to-use implementation of the SVM classifier using the Stochastic Gradient Descent approach,1709as presented in @cite bottou2010large.17101711The classifier has following parameters:1712- model type,1713- margin type,1714- margin regularization (\f$\lambda\f$),1715- initial step size (\f$\gamma_0\f$),1716- step decreasing power (\f$c\f$),1717- and termination criteria.17181719The model type may have one of the following values: \ref SGD and \ref ASGD.17201721- \ref SGD is the classic version of SVMSGD classifier: every next step is calculated by the formula1722\f[w_{t+1} = w_t - \gamma(t) \frac{dQ_i}{dw} |_{w = w_t}\f]1723where1724- \f$w_t\f$ is the weights vector for decision function at step \f$t\f$,1725- \f$\gamma(t)\f$ is the step size of model parameters at the iteration \f$t\f$, it is decreased on each step by the formula1726\f$\gamma(t) = \gamma_0 (1 + \lambda \gamma_0 t) ^ {-c}\f$1727- \f$Q_i\f$ is the target functional from SVM task for sample with number \f$i\f$, this sample is chosen stochastically on each step of the algorithm.17281729- \ref ASGD is Average Stochastic Gradient Descent SVM Classifier. ASGD classifier averages weights vector on each step of algorithm by the formula1730\f$\widehat{w}_{t+1} = \frac{t}{1+t}\widehat{w}_{t} + \frac{1}{1+t}w_{t+1}\f$17311732The recommended model type is ASGD (following @cite bottou2010large).17331734The margin type may have one of the following values: \ref SOFT_MARGIN or \ref HARD_MARGIN.17351736- You should use \ref HARD_MARGIN type, if you have linearly separable sets.1737- You should use \ref SOFT_MARGIN type, if you have non-linearly separable sets or sets with outliers.1738- In the general case (if you know nothing about linear separability of your sets), use SOFT_MARGIN.17391740The other parameters may be described as follows:1741- Margin regularization parameter is responsible for weights decreasing at each step and for the strength of restrictions on outliers1742(the less the parameter, the less probability that an outlier will be ignored).1743Recommended value for SGD model is 0.0001, for ASGD model is 0.00001.17441745- Initial step size parameter is the initial value for the step size \f$\gamma(t)\f$.1746You will have to find the best initial step for your problem.17471748- Step decreasing power is the power parameter for \f$\gamma(t)\f$ decreasing by the formula, mentioned above.1749Recommended value for SGD model is 1, for ASGD model is 0.75.17501751- Termination criteria can be TermCriteria::COUNT, TermCriteria::EPS or TermCriteria::COUNT + TermCriteria::EPS.1752You will have to find the best termination criteria for your problem.17531754Note that the parameters margin regularization, initial step size, and step decreasing power should be positive.17551756To use SVMSGD algorithm do as follows:17571758- first, create the SVMSGD object. The algoorithm will set optimal parameters by default, but you can set your own parameters via functions setSvmsgdType(),1759setMarginType(), setMarginRegularization(), setInitialStepSize(), and setStepDecreasingPower().17601761- then the SVM model can be trained using the train features and the correspondent labels by the method train().17621763- after that, the label of a new feature vector can be predicted using the method predict().17641765@code1766// Create empty object1767cv::Ptr<SVMSGD> svmsgd = SVMSGD::create();17681769// Train the Stochastic Gradient Descent SVM1770svmsgd->train(trainData);17711772// Predict labels for the new samples1773svmsgd->predict(samples, responses);1774@endcode17751776*/17771778class CV_EXPORTS_W SVMSGD : public cv::ml::StatModel1779{1780public:17811782/** SVMSGD type.1783ASGD is often the preferable choice. */1784enum SvmsgdType1785{1786SGD, //!< Stochastic Gradient Descent1787ASGD //!< Average Stochastic Gradient Descent1788};17891790/** Margin type.*/1791enum MarginType1792{1793SOFT_MARGIN, //!< General case, suits to the case of non-linearly separable sets, allows outliers.1794HARD_MARGIN //!< More accurate for the case of linearly separable sets.1795};17961797/**1798* @return the weights of the trained model (decision function f(x) = weights * x + shift).1799*/1800CV_WRAP virtual Mat getWeights() = 0;18011802/**1803* @return the shift of the trained model (decision function f(x) = weights * x + shift).1804*/1805CV_WRAP virtual float getShift() = 0;18061807/** @brief Creates empty model.1808* Use StatModel::train to train the model. Since %SVMSGD has several parameters, you may want to1809* find the best parameters for your problem or use setOptimalParameters() to set some default parameters.1810*/1811CV_WRAP static Ptr<SVMSGD> create();18121813/** @brief Loads and creates a serialized SVMSGD from a file1814*1815* Use SVMSGD::save to serialize and store an SVMSGD to disk.1816* Load the SVMSGD from this file again, by calling this function with the path to the file.1817* Optionally specify the node for the file containing the classifier1818*1819* @param filepath path to serialized SVMSGD1820* @param nodeName name of node containing the classifier1821*/1822CV_WRAP static Ptr<SVMSGD> load(const String& filepath , const String& nodeName = String());18231824/** @brief Function sets optimal parameters values for chosen SVM SGD model.1825* @param svmsgdType is the type of SVMSGD classifier.1826* @param marginType is the type of margin constraint.1827*/1828CV_WRAP virtual void setOptimalParameters(int svmsgdType = SVMSGD::ASGD, int marginType = SVMSGD::SOFT_MARGIN) = 0;18291830/** @brief %Algorithm type, one of SVMSGD::SvmsgdType. */1831/** @see setSvmsgdType */1832CV_WRAP virtual int getSvmsgdType() const = 0;1833/** @copybrief getSvmsgdType @see getSvmsgdType */1834CV_WRAP virtual void setSvmsgdType(int svmsgdType) = 0;18351836/** @brief %Margin type, one of SVMSGD::MarginType. */1837/** @see setMarginType */1838CV_WRAP virtual int getMarginType() const = 0;1839/** @copybrief getMarginType @see getMarginType */1840CV_WRAP virtual void setMarginType(int marginType) = 0;18411842/** @brief Parameter marginRegularization of a %SVMSGD optimization problem. */1843/** @see setMarginRegularization */1844CV_WRAP virtual float getMarginRegularization() const = 0;1845/** @copybrief getMarginRegularization @see getMarginRegularization */1846CV_WRAP virtual void setMarginRegularization(float marginRegularization) = 0;18471848/** @brief Parameter initialStepSize of a %SVMSGD optimization problem. */1849/** @see setInitialStepSize */1850CV_WRAP virtual float getInitialStepSize() const = 0;1851/** @copybrief getInitialStepSize @see getInitialStepSize */1852CV_WRAP virtual void setInitialStepSize(float InitialStepSize) = 0;18531854/** @brief Parameter stepDecreasingPower of a %SVMSGD optimization problem. */1855/** @see setStepDecreasingPower */1856CV_WRAP virtual float getStepDecreasingPower() const = 0;1857/** @copybrief getStepDecreasingPower @see getStepDecreasingPower */1858CV_WRAP virtual void setStepDecreasingPower(float stepDecreasingPower) = 0;18591860/** @brief Termination criteria of the training algorithm.1861You can specify the maximum number of iterations (maxCount) and/or how much the error could1862change between the iterations to make the algorithm continue (epsilon).*/1863/** @see setTermCriteria */1864CV_WRAP virtual TermCriteria getTermCriteria() const = 0;1865/** @copybrief getTermCriteria @see getTermCriteria */1866CV_WRAP virtual void setTermCriteria(const cv::TermCriteria &val) = 0;1867};186818691870/****************************************************************************************\1871* Auxiliary functions declarations *1872\****************************************************************************************/18731874/** @brief Generates _sample_ from multivariate normal distribution18751876@param mean an average row vector1877@param cov symmetric covariation matrix1878@param nsamples returned samples count1879@param samples returned samples array1880*/1881CV_EXPORTS void randMVNormal( InputArray mean, InputArray cov, int nsamples, OutputArray samples);18821883/** @brief Creates test set */1884CV_EXPORTS void createConcentricSpheresTestSet( int nsamples, int nfeatures, int nclasses,1885OutputArray samples, OutputArray responses);188618871888/****************************************************************************************\1889* Simulated annealing solver *1890\****************************************************************************************/18911892#ifdef CV_DOXYGEN1893/** @brief This class declares example interface for system state used in simulated annealing optimization algorithm.18941895@note This class is not defined in C++ code and can't be use directly - you need your own implementation with the same methods.1896*/1897struct SimulatedAnnealingSolverSystem1898{1899/** Give energy value for a state of system.*/1900double energy() const;1901/** Function which change the state of system (random perturbation).*/1902void changeState();1903/** Function to reverse to the previous state. Can be called once only after changeState(). */1904void reverseState();1905};1906#endif // CV_DOXYGEN19071908/** @brief The class implements simulated annealing for optimization.19091910@cite Kirkpatrick83 for details19111912@param solverSystem optimization system (see SimulatedAnnealingSolverSystem)1913@param initialTemperature initial temperature1914@param finalTemperature final temperature1915@param coolingRatio temperature step multiplies1916@param iterationsPerStep number of iterations per temperature changing step1917@param lastTemperature optional output for last used temperature1918@param rngEnergy specify custom random numbers generator (cv::theRNG() by default)1919*/1920template<class SimulatedAnnealingSolverSystem>1921int simulatedAnnealingSolver(SimulatedAnnealingSolverSystem& solverSystem,1922double initialTemperature, double finalTemperature, double coolingRatio,1923size_t iterationsPerStep,1924CV_OUT double* lastTemperature = NULL,1925cv::RNG& rngEnergy = cv::theRNG()1926);19271928//! @} ml19291930}1931}19321933#include <opencv2/ml/ml.inl.hpp>19341935#endif // __cplusplus1936#endif // OPENCV_ML_HPP19371938/* End of file. */193919401941