Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
ElmerCSC
GitHub Repository: ElmerCSC/elmerfem
Path: blob/devel/ElmerGUI/netgen/libsrc/opti/linsearch.cpp
3206 views
1
/***************************************************************************/
2
/* */
3
/* Problem: Liniensuche */
4
/* */
5
/* Programmautor: Joachim Sch�berl */
6
/* Matrikelnummer: 9155284 */
7
/* */
8
/* Algorithmus nach: */
9
/* */
10
/* Optimierung I, Gfrerer, WS94/95 */
11
/* Algorithmus 2.1: Liniensuche Problem (ii) */
12
/* */
13
/***************************************************************************/
14
15
16
17
#include <mystdlib.h>
18
19
#include <myadt.hpp> // min, max, sqr
20
21
#include <linalg.hpp>
22
#include "opti.hpp"
23
24
25
namespace netgen
26
{
27
const double eps0 = 1E-15;
28
29
// Liniensuche
30
31
32
double MinFunction :: Func (const Vector & /* x */) const
33
{
34
cerr << "Func of MinFunction called" << endl;
35
return 0;
36
}
37
38
void MinFunction :: Grad (const Vector & /* x */, Vector & /* g */) const
39
{
40
cerr << "Grad of MinFunction called" << endl;
41
}
42
43
double MinFunction :: FuncGrad (const Vector & x, Vector & g) const
44
{
45
cerr << "Grad of MinFunction called" << endl;
46
return 0;
47
/*
48
int n = x.Size();
49
50
static Vector xr;
51
static Vector xl;
52
xr.SetSize(n);
53
xl.SetSize(n);
54
55
double eps = 1e-6;
56
double fl, fr;
57
58
for (int i = 1; i <= n; i++)
59
{
60
xr.Set (1, x);
61
xl.Set (1, x);
62
63
xr.Elem(i) += eps;
64
fr = Func (xr);
65
66
xl.Elem(i) -= eps;
67
fl = Func (xl);
68
69
g.Elem(i) = (fr - fl) / (2 * eps);
70
}
71
72
double f = Func(x);
73
// (*testout) << "f = " << f << " grad = " << g << endl;
74
return f;
75
*/
76
}
77
78
79
double MinFunction :: FuncDeriv (const Vector & x, const Vector & dir, double & deriv) const
80
{
81
Vector g(x.Size());
82
double f = FuncGrad (x, g);
83
deriv = (g * dir);
84
85
// (*testout) << "g = " << g << ", dir = " << dir << ", deriv = " << deriv << endl;
86
return f;
87
}
88
89
void MinFunction :: ApproximateHesse (const Vector & x,
90
DenseMatrix & hesse) const
91
{
92
int n = x.Size();
93
int i, j;
94
95
static Vector hx;
96
hx.SetSize(n);
97
98
double eps = 1e-6;
99
double f, f11, f12, f21, f22;
100
101
for (i = 1; i <= n; i++)
102
{
103
for (j = 1; j < i; j++)
104
{
105
hx = x;
106
hx.Elem(i) = x.Get(i) + eps;
107
hx.Elem(j) = x.Get(j) + eps;
108
f11 = Func(hx);
109
hx.Elem(i) = x.Get(i) + eps;
110
hx.Elem(j) = x.Get(j) - eps;
111
f12 = Func(hx);
112
hx.Elem(i) = x.Get(i) - eps;
113
hx.Elem(j) = x.Get(j) + eps;
114
f21 = Func(hx);
115
hx.Elem(i) = x.Get(i) - eps;
116
hx.Elem(j) = x.Get(j) - eps;
117
f22 = Func(hx);
118
119
hesse.Elem(i, j) = hesse.Elem(j, i) =
120
(f11 + f22 - f12 - f21) / (2 * eps * eps);
121
}
122
123
hx = x;
124
f = Func(x);
125
hx.Elem(i) = x.Get(i) + eps;
126
f11 = Func(hx);
127
hx.Elem(i) = x.Get(i) - eps;
128
f22 = Func(hx);
129
130
hesse.Elem(i, i) = (f11 + f22 - 2 * f) / (eps * eps);
131
}
132
// (*testout) << "hesse = " << hesse << endl;
133
}
134
135
136
137
138
139
140
141
/// Line search, modified Mangasarien conditions
142
void lines (Vector & x, // i: initial point of line-search
143
Vector & xneu, // o: solution, if successful
144
Vector & p, // i: search direction
145
double & f, // i: function-value at x
146
// o: function-value at xneu, iff ifail = 0
147
Vector & g, // i: gradient at x
148
// o: gradient at xneu, iff ifail = 0
149
const MinFunction & fun, // function to minimize
150
const OptiParameters & par,
151
double & alphahat, // i: initial value for alpha_hat
152
// o: solution alpha iff ifail = 0
153
double fmin, // i: lower bound for f
154
double mu1, // i: Parameter mu_1 of Alg.2.1
155
double sigma, // i: Parameter sigma of Alg.2.1
156
double xi1, // i: Parameter xi_1 of Alg.2.1
157
double xi2, // i: Parameter xi_1 of Alg.2.1
158
double tau, // i: Parameter tau of Alg.2.1
159
double tau1, // i: Parameter tau_1 of Alg.2.1
160
double tau2, // i: Parameter tau_2 of Alg.2.1
161
int & ifail) // o: 0 on success
162
// -1 bei termination because lower limit fmin
163
// 1 bei illegal termination due to different reasons
164
165
{
166
double phi0, phi0prime, phi1, phi1prime, phihatprime;
167
double alpha1, alpha2, alphaincr, c;
168
char flag = 1;
169
long it;
170
171
alpha1 = 0;
172
alpha2 = 1e50;
173
phi0 = phi1 = f;
174
175
phi0prime = g * p;
176
177
if (phi0prime > 0)
178
{
179
ifail = 1;
180
return;
181
}
182
183
ifail = 1; // Markus
184
185
phi1prime = phi0prime;
186
187
// (*testout) << "phi0prime = " << phi0prime << endl;
188
189
// it = 100000l;
190
it = 0;
191
192
while (it++ <= par.maxit_linsearch)
193
{
194
195
xneu.Set2 (1, x, alphahat, p);
196
197
198
// f = fun.FuncGrad (xneu, g);
199
// f = fun.Func (xneu);
200
f = fun.FuncDeriv (xneu, p, phihatprime);
201
202
// (*testout) << "lines, f = " << f << " phip = " << phihatprime << endl;
203
204
if (f < fmin)
205
{
206
ifail = -1;
207
break;
208
}
209
210
211
if (alpha2 - alpha1 < eps0 * alpha2)
212
{
213
ifail = 0;
214
break;
215
}
216
217
// (*testout) << "i = " << it << " al = " << alphahat << " f = " << f << " fprime " << phihatprime << endl;;
218
219
if (f - phi0 > mu1 * alphahat * phi1prime + eps0 * fabs (phi0))
220
221
{
222
223
flag = 0;
224
alpha2 = alphahat;
225
226
c =
227
(f - phi1 - phi1prime * (alphahat-alpha1)) /
228
sqr (alphahat-alpha1);
229
230
alphahat = alpha1 - 0.5 * phi1prime / c;
231
232
if (alphahat > alpha2)
233
alphahat = alpha1 + 1/(4*c) *
234
( (sigma+mu1) * phi0prime - 2*phi1prime
235
+ sqrt (sqr(phi1prime - mu1 * phi0prime) -
236
4 * (phi1 - phi0 - mu1 * alpha1 * phi0prime) * c));
237
238
alphahat = max2 (alphahat, alpha1 + tau * (alpha2 - alpha1));
239
alphahat = min2 (alphahat, alpha2 - tau * (alpha2 - alpha1));
240
241
// (*testout) << " if-branch" << endl;
242
243
}
244
245
else
246
247
{
248
/*
249
f = fun.FuncGrad (xneu, g);
250
phihatprime = g * p;
251
*/
252
f = fun.FuncDeriv (xneu, p, phihatprime);
253
254
if (phihatprime < sigma * phi0prime * (1 + eps0))
255
256
{
257
if (phi1prime < phihatprime)
258
// Approximationsfunktion ist konvex
259
260
alphaincr = (alphahat - alpha1) * phihatprime /
261
(phi1prime - phihatprime);
262
263
else
264
alphaincr = 1e99; // MAXDOUBLE;
265
266
if (flag)
267
{
268
alphaincr = max2 (alphaincr, xi1 * (alphahat-alpha1));
269
alphaincr = min2 (alphaincr, xi2 * (alphahat-alpha1));
270
}
271
else
272
{
273
alphaincr = max2 (alphaincr, tau1 * (alpha2 - alphahat));
274
alphaincr = min2 (alphaincr, tau2 * (alpha2 - alphahat));
275
}
276
277
alpha1 = alphahat;
278
alphahat += alphaincr;
279
phi1 = f;
280
phi1prime = phihatprime;
281
}
282
283
else
284
285
{
286
ifail = 0; // Erfolg !!
287
break;
288
}
289
290
// (*testout) << " else, " << endl;
291
292
}
293
294
}
295
296
// (*testout) << "linsearch: it = " << it << " ifail = " << ifail << endl;
297
298
fun.FuncGrad (xneu, g);
299
300
301
if (it < 0)
302
ifail = 1;
303
304
// (*testout) << "fail = " << ifail << endl;
305
}
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
void SteepestDescent (Vector & x, const MinFunction & fun,
326
const OptiParameters & par)
327
{
328
int it, n = x.Size();
329
Vector xnew(n), p(n), g(n), g2(n);
330
double val, alphahat;
331
int fail;
332
333
val = fun.FuncGrad(x, g);
334
335
alphahat = 1;
336
// testout << "f = ";
337
for (it = 0; it < 10; it++)
338
{
339
// testout << val << " ";
340
341
// p = -g;
342
p.Set (-1, g);
343
344
lines (x, xnew, p, val, g, fun, par, alphahat, -1e5,
345
0.1, 0.1, 1, 10, 0.1, 0.1, 0.6, fail);
346
347
x = xnew;
348
}
349
// testout << endl;
350
}
351
}
352
353