Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Tetragramm
GitHub Repository: Tetragramm/opencv
Path: blob/master/modules/core/src/conjugate_gradient.cpp
16337 views
1
/*M///////////////////////////////////////////////////////////////////////////////////////
2
//
3
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4
//
5
// By downloading, copying, installing or using the software you agree to this license.
6
// If you do not agree to this license, do not download, install,
7
// copy or use the software.
8
//
9
//
10
// License Agreement
11
// For Open Source Computer Vision Library
12
//
13
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
14
// Third party copyrights are property of their respective owners.
15
//
16
// Redistribution and use in source and binary forms, with or without modification,
17
// are permitted provided that the following conditions are met:
18
//
19
// * Redistribution's of source code must retain the above copyright notice,
20
// this list of conditions and the following disclaimer.
21
//
22
// * Redistribution's in binary form must reproduce the above copyright notice,
23
// this list of conditions and the following disclaimer in the documentation
24
// and/or other materials provided with the distribution.
25
//
26
// * The name of the copyright holders may not be used to endorse or promote products
27
// derived from this software without specific prior written permission.
28
//
29
// This software is provided by the copyright holders and contributors "as is" and
30
// any express or implied warranties, including, but not limited to, the implied
31
// warranties of merchantability and fitness for a particular purpose are disclaimed.
32
// In no event shall the OpenCV Foundation or contributors be liable for any direct,
33
// indirect, incidental, special, exemplary, or consequential damages
34
// (including, but not limited to, procurement of substitute goods or services;
35
// loss of use, data, or profits; or business interruption) however caused
36
// and on any theory of liability, whether in contract, strict liability,
37
// or tort (including negligence or otherwise) arising in any way out of
38
// the use of this software, even if advised of the possibility of such damage.
39
//
40
//M*/
41
42
#include "precomp.hpp"
43
44
#define dprintf(x)
45
#define print_matrix(x)
46
47
namespace cv
48
{
49
double MinProblemSolver::Function::getGradientEps() const { return 1e-3; }
50
void MinProblemSolver::Function::getGradient(const double* x, double* grad)
51
{
52
double eps = getGradientEps();
53
int i, n = getDims();
54
AutoBuffer<double> x_buf(n);
55
double* x_ = x_buf.data();
56
for( i = 0; i < n; i++ )
57
x_[i] = x[i];
58
for( i = 0; i < n; i++ )
59
{
60
x_[i] = x[i] + eps;
61
double y1 = calc(x_);
62
x_[i] = x[i] - eps;
63
double y0 = calc(x_);
64
grad[i] = (y1 - y0)/(2*eps);
65
x_[i] = x[i];
66
}
67
}
68
69
#define SEC_METHOD_ITERATIONS 4
70
#define INITIAL_SEC_METHOD_SIGMA 0.1
71
class ConjGradSolverImpl CV_FINAL : public ConjGradSolver
72
{
73
public:
74
Ptr<Function> getFunction() const CV_OVERRIDE;
75
void setFunction(const Ptr<Function>& f) CV_OVERRIDE;
76
TermCriteria getTermCriteria() const CV_OVERRIDE;
77
ConjGradSolverImpl();
78
void setTermCriteria(const TermCriteria& termcrit) CV_OVERRIDE;
79
double minimize(InputOutputArray x) CV_OVERRIDE;
80
protected:
81
Ptr<MinProblemSolver::Function> _Function;
82
TermCriteria _termcrit;
83
Mat_<double> d,r,buf_x,r_old;
84
Mat_<double> minimizeOnTheLine_buf1,minimizeOnTheLine_buf2;
85
private:
86
static void minimizeOnTheLine(Ptr<MinProblemSolver::Function> _f,Mat_<double>& x,const Mat_<double>& d,Mat_<double>& buf1,Mat_<double>& buf2);
87
};
88
89
void ConjGradSolverImpl::minimizeOnTheLine(Ptr<MinProblemSolver::Function> _f,Mat_<double>& x,const Mat_<double>& d,Mat_<double>& buf1,
90
Mat_<double>& buf2){
91
double sigma=INITIAL_SEC_METHOD_SIGMA;
92
buf1=0.0;
93
buf2=0.0;
94
95
dprintf(("before minimizeOnTheLine\n"));
96
dprintf(("x:\n"));
97
print_matrix(x);
98
dprintf(("d:\n"));
99
print_matrix(d);
100
101
for(int i=0;i<SEC_METHOD_ITERATIONS;i++){
102
_f->getGradient((double*)x.data,(double*)buf1.data);
103
dprintf(("buf1:\n"));
104
print_matrix(buf1);
105
x=x+sigma*d;
106
_f->getGradient((double*)x.data,(double*)buf2.data);
107
dprintf(("buf2:\n"));
108
print_matrix(buf2);
109
double d1=buf1.dot(d), d2=buf2.dot(d);
110
if((d1-d2)==0){
111
break;
112
}
113
double alpha=-sigma*d1/(d2-d1);
114
dprintf(("(buf2.dot(d)-buf1.dot(d))=%f\nalpha=%f\n",(buf2.dot(d)-buf1.dot(d)),alpha));
115
x=x+(alpha-sigma)*d;
116
sigma=-alpha;
117
}
118
119
dprintf(("after minimizeOnTheLine\n"));
120
print_matrix(x);
121
}
122
123
double ConjGradSolverImpl::minimize(InputOutputArray x){
124
CV_Assert(_Function.empty()==false);
125
dprintf(("termcrit:\n\ttype: %d\n\tmaxCount: %d\n\tEPS: %g\n",_termcrit.type,_termcrit.maxCount,_termcrit.epsilon));
126
127
Mat x_mat=x.getMat();
128
CV_Assert(MIN(x_mat.rows,x_mat.cols)==1);
129
int ndim=MAX(x_mat.rows,x_mat.cols);
130
CV_Assert(x_mat.type()==CV_64FC1);
131
132
if(d.cols!=ndim){
133
d.create(1,ndim);
134
r.create(1,ndim);
135
r_old.create(1,ndim);
136
minimizeOnTheLine_buf1.create(1,ndim);
137
minimizeOnTheLine_buf2.create(1,ndim);
138
}
139
140
Mat_<double> proxy_x;
141
if(x_mat.rows>1){
142
buf_x.create(1,ndim);
143
Mat_<double> proxy(ndim,1,buf_x.ptr<double>());
144
x_mat.copyTo(proxy);
145
proxy_x=buf_x;
146
}else{
147
proxy_x=x_mat;
148
}
149
_Function->getGradient(proxy_x.ptr<double>(),d.ptr<double>());
150
d*=-1.0;
151
d.copyTo(r);
152
153
//here everything goes. check that everything is set properly
154
dprintf(("proxy_x\n"));print_matrix(proxy_x);
155
dprintf(("d first time\n"));print_matrix(d);
156
dprintf(("r\n"));print_matrix(r);
157
158
for(int count=0;count<_termcrit.maxCount;count++){
159
minimizeOnTheLine(_Function,proxy_x,d,minimizeOnTheLine_buf1,minimizeOnTheLine_buf2);
160
r.copyTo(r_old);
161
_Function->getGradient(proxy_x.ptr<double>(),r.ptr<double>());
162
r*=-1.0;
163
double r_norm_sq=norm(r);
164
if(_termcrit.type==(TermCriteria::MAX_ITER+TermCriteria::EPS) && r_norm_sq<_termcrit.epsilon){
165
break;
166
}
167
r_norm_sq=r_norm_sq*r_norm_sq;
168
double beta=MAX(0.0,(r_norm_sq-r.dot(r_old))/r_norm_sq);
169
d=r+beta*d;
170
}
171
172
173
174
if(x_mat.rows>1){
175
Mat(ndim, 1, CV_64F, proxy_x.ptr<double>()).copyTo(x);
176
}
177
return _Function->calc(proxy_x.ptr<double>());
178
}
179
180
ConjGradSolverImpl::ConjGradSolverImpl(){
181
_Function=Ptr<Function>();
182
}
183
Ptr<MinProblemSolver::Function> ConjGradSolverImpl::getFunction()const{
184
return _Function;
185
}
186
void ConjGradSolverImpl::setFunction(const Ptr<Function>& f){
187
_Function=f;
188
}
189
TermCriteria ConjGradSolverImpl::getTermCriteria()const{
190
return _termcrit;
191
}
192
void ConjGradSolverImpl::setTermCriteria(const TermCriteria& termcrit){
193
CV_Assert((termcrit.type==(TermCriteria::MAX_ITER+TermCriteria::EPS) && termcrit.epsilon>0 && termcrit.maxCount>0) ||
194
((termcrit.type==TermCriteria::MAX_ITER) && termcrit.maxCount>0));
195
_termcrit=termcrit;
196
}
197
// both minRange & minError are specified by termcrit.epsilon; In addition, user may specify the number of iterations that the algorithm does.
198
Ptr<ConjGradSolver> ConjGradSolver::create(const Ptr<MinProblemSolver::Function>& f, TermCriteria termcrit){
199
Ptr<ConjGradSolver> CG = makePtr<ConjGradSolverImpl>();
200
CG->setFunction(f);
201
CG->setTermCriteria(termcrit);
202
return CG;
203
}
204
}
205
206