Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Tetragramm
GitHub Repository: Tetragramm/opencv
Path: blob/master/modules/ml/src/inner_functions.cpp
16337 views
1
/*M///////////////////////////////////////////////////////////////////////////////////////
2
//
3
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4
//
5
// By downloading, copying, installing or using the software you agree to this license.
6
// If you do not agree to this license, do not download, install,
7
// copy or use the software.
8
//
9
//
10
// Intel License Agreement
11
//
12
// Copyright (C) 2000, Intel Corporation, all rights reserved.
13
// Third party copyrights are property of their respective owners.
14
//
15
// Redistribution and use in source and binary forms, with or without modification,
16
// are permitted provided that the following conditions are met:
17
//
18
// * Redistribution's of source code must retain the above copyright notice,
19
// this list of conditions and the following disclaimer.
20
//
21
// * Redistribution's in binary form must reproduce the above copyright notice,
22
// this list of conditions and the following disclaimer in the documentation
23
// and/or other materials provided with the distribution.
24
//
25
// * The name of Intel Corporation may not be used to endorse or promote products
26
// derived from this software without specific prior written permission.
27
//
28
// This software is provided by the copyright holders and contributors "as is" and
29
// any express or implied warranties, including, but not limited to, the implied
30
// warranties of merchantability and fitness for a particular purpose are disclaimed.
31
// In no event shall the Intel Corporation or contributors be liable for any direct,
32
// indirect, incidental, special, exemplary, or consequential damages
33
// (including, but not limited to, procurement of substitute goods or services;
34
// loss of use, data, or profits; or business interruption) however caused
35
// and on any theory of liability, whether in contract, strict liability,
36
// or tort (including negligence or otherwise) arising in any way out of
37
// the use of this software, even if advised of the possibility of such damage.
38
//
39
//M*/
40
41
#include "precomp.hpp"
42
43
namespace cv { namespace ml {
44
45
ParamGrid::ParamGrid() { minVal = maxVal = 0.; logStep = 1; }
46
ParamGrid::ParamGrid(double _minVal, double _maxVal, double _logStep)
47
{
48
CV_TRACE_FUNCTION();
49
minVal = std::min(_minVal, _maxVal);
50
maxVal = std::max(_minVal, _maxVal);
51
logStep = std::max(_logStep, 1.);
52
}
53
54
Ptr<ParamGrid> ParamGrid::create(double minval, double maxval, double logstep) {
55
return makePtr<ParamGrid>(minval, maxval, logstep);
56
}
57
58
bool StatModel::empty() const { return !isTrained(); }
59
60
int StatModel::getVarCount() const { return 0; }
61
62
bool StatModel::train( const Ptr<TrainData>&, int )
63
{
64
CV_TRACE_FUNCTION();
65
CV_Error(CV_StsNotImplemented, "");
66
return false;
67
}
68
69
bool StatModel::train( InputArray samples, int layout, InputArray responses )
70
{
71
CV_TRACE_FUNCTION();
72
return train(TrainData::create(samples, layout, responses));
73
}
74
75
class ParallelCalcError : public ParallelLoopBody
76
{
77
private:
78
const Ptr<TrainData>& data;
79
bool &testerr;
80
Mat &resp;
81
const StatModel &s;
82
vector<double> &errStrip;
83
public:
84
ParallelCalcError(const Ptr<TrainData>& d, bool &t, Mat &_r,const StatModel &w, vector<double> &e) :
85
data(d),
86
testerr(t),
87
resp(_r),
88
s(w),
89
errStrip(e)
90
{
91
}
92
virtual void operator()(const Range& range) const CV_OVERRIDE
93
{
94
int idxErr = range.start;
95
CV_TRACE_FUNCTION_SKIP_NESTED();
96
Mat samples = data->getSamples();
97
Mat weights=testerr? data->getTestSampleWeights() : data->getTrainSampleWeights();
98
int layout = data->getLayout();
99
Mat sidx = testerr ? data->getTestSampleIdx() : data->getTrainSampleIdx();
100
const int* sidx_ptr = sidx.ptr<int>();
101
bool isclassifier = s.isClassifier();
102
Mat responses = data->getResponses();
103
int responses_type = responses.type();
104
double err = 0;
105
106
107
const float* sw = weights.empty() ? 0 : weights.ptr<float>();
108
for (int i = range.start; i < range.end; i++)
109
{
110
int si = sidx_ptr ? sidx_ptr[i] : i;
111
double sweight = sw ? static_cast<double>(sw[i]) : 1.;
112
Mat sample = layout == ROW_SAMPLE ? samples.row(si) : samples.col(si);
113
float val = s.predict(sample);
114
float val0 = (responses_type == CV_32S) ? (float)responses.at<int>(si) : responses.at<float>(si);
115
116
if (isclassifier)
117
err += sweight * fabs(val - val0) > FLT_EPSILON;
118
else
119
err += sweight * (val - val0)*(val - val0);
120
if (!resp.empty())
121
resp.at<float>(i) = val;
122
}
123
124
125
errStrip[idxErr]=err ;
126
127
};
128
ParallelCalcError& operator=(const ParallelCalcError &) {
129
return *this;
130
};
131
};
132
133
134
float StatModel::calcError(const Ptr<TrainData>& data, bool testerr, OutputArray _resp) const
135
{
136
CV_TRACE_FUNCTION_SKIP_NESTED();
137
Mat samples = data->getSamples();
138
Mat sidx = testerr ? data->getTestSampleIdx() : data->getTrainSampleIdx();
139
Mat weights = testerr ? data->getTestSampleWeights() : data->getTrainSampleWeights();
140
int n = (int)sidx.total();
141
bool isclassifier = isClassifier();
142
Mat responses = data->getResponses();
143
144
if (n == 0)
145
{
146
n = data->getNSamples();
147
weights = data->getTrainSampleWeights();
148
testerr =false;
149
}
150
151
if (n == 0)
152
return -FLT_MAX;
153
154
Mat resp;
155
if (_resp.needed())
156
resp.create(n, 1, CV_32F);
157
158
double err = 0;
159
vector<double> errStrip(n,0.0);
160
ParallelCalcError x(data, testerr, resp, *this,errStrip);
161
162
parallel_for_(Range(0,n),x);
163
164
for (size_t i = 0; i < errStrip.size(); i++)
165
err += errStrip[i];
166
float weightSum= weights.empty() ? n: static_cast<float>(sum(weights)(0));
167
if (_resp.needed())
168
resp.copyTo(_resp);
169
170
return (float)(err/ weightSum * (isclassifier ? 100 : 1));
171
}
172
173
/* Calculates upper triangular matrix S, where A is a symmetrical matrix A=S'*S */
174
static void Cholesky( const Mat& A, Mat& S )
175
{
176
CV_TRACE_FUNCTION();
177
CV_Assert(A.type() == CV_32F);
178
179
S = A.clone();
180
cv::Cholesky ((float*)S.ptr(),S.step, S.rows,NULL, 0, 0);
181
S = S.t();
182
for (int i=1;i<S.rows;i++)
183
for (int j=0;j<i;j++)
184
S.at<float>(i,j)=0;
185
}
186
187
/* Generates <sample> from multivariate normal distribution, where <mean> - is an
188
average row vector, <cov> - symmetric covariation matrix */
189
void randMVNormal( InputArray _mean, InputArray _cov, int nsamples, OutputArray _samples )
190
{
191
CV_TRACE_FUNCTION();
192
// check mean vector and covariance matrix
193
Mat mean = _mean.getMat(), cov = _cov.getMat();
194
int dim = (int)mean.total(); // dimensionality
195
CV_Assert(mean.rows == 1 || mean.cols == 1);
196
CV_Assert(cov.rows == dim && cov.cols == dim);
197
mean = mean.reshape(1,1); // ensure a row vector
198
199
// generate n-samples of the same dimension, from ~N(0,1)
200
_samples.create(nsamples, dim, CV_32F);
201
Mat samples = _samples.getMat();
202
randn(samples, Scalar::all(0), Scalar::all(1));
203
204
// decompose covariance using Cholesky: cov = U'*U
205
// (cov must be square, symmetric, and positive semi-definite matrix)
206
Mat utmat;
207
Cholesky(cov, utmat);
208
209
// transform random numbers using specified mean and covariance
210
for( int i = 0; i < nsamples; i++ )
211
{
212
Mat sample = samples.row(i);
213
sample = sample * utmat + mean;
214
}
215
}
216
217
}}
218
219
/* End of file */
220
221