Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
sagemath
GitHub Repository: sagemath/sagelib
Path: blob/master/sage/numerical/optimize.py
4036 views
1
"""
2
Numerical Root Finding and Optimization
3
4
AUTHOR:
5
6
- William Stein (2007): initial version
7
- Nathann Cohen (2008) : Bin Packing
8
9
10
Functions and Methods
11
----------------------
12
"""
13
from sage.modules.free_module_element import vector
14
from sage.rings.real_double import RDF
15
16
def find_root(f, a, b, xtol=10e-13, rtol=4.5e-16, maxiter=100, full_output=False):
17
"""
18
Numerically find a root of ``f`` on the closed interval `[a,b]`
19
(or `[b,a]`) if possible, where ``f`` is a function in the one variable.
20
21
22
INPUT:
23
24
- ``f`` -- a function of one variable or symbolic equality
25
26
- ``a``, ``b`` -- endpoints of the interval
27
28
- ``xtol``, ``rtol`` -- the routine converges when a root is known
29
to lie within ``xtol`` of the value return. Should be `\geq 0`.
30
The routine modifies this to take into account the relative precision
31
of doubles.
32
33
- ``maxiter`` -- integer; if convergence is not achieved in
34
``maxiter`` iterations, an error is raised. Must be `\geq 0`.
35
36
- ``full_output`` -- bool (default: ``False``), if ``True``, also return
37
object that contains information about convergence.
38
39
40
EXAMPLES:
41
42
An example involving an algebraic polynomial function::
43
44
sage: R.<x> = QQ[]
45
sage: f = (x+17)*(x-3)*(x-1/8)^3
46
sage: find_root(f, 0,4)
47
2.999999999999995
48
sage: find_root(f, 0,1) # note -- precision of answer isn't very good on some machines.
49
0.124999...
50
sage: find_root(f, -20,-10)
51
-17.0
52
53
In Pomerance book on primes he asserts that the famous Riemann
54
Hypothesis is equivalent to the statement that the function `f(x)`
55
defined below is positive for all `x \geq 2.01`::
56
57
sage: def f(x):
58
... return sqrt(x) * log(x) - abs(Li(x) - prime_pi(x))
59
60
We find where `f` equals, i.e., what value that is slightly smaller
61
than `2.01` that could have been used in the formulation of the Riemann
62
Hypothesis::
63
64
sage: find_root(f, 2, 4, rtol=0.0001)
65
2.0082590205656166
66
67
This agrees with the plot::
68
69
sage: plot(f,2,2.01)
70
"""
71
try:
72
return f.find_root(a=a,b=b,xtol=xtol,rtol=rtol,maxiter=maxiter,full_output=full_output)
73
except AttributeError:
74
pass
75
a = float(a); b = float(b)
76
if a > b:
77
a, b = b, a
78
left = f(a)
79
right = f(b)
80
if left > 0 and right > 0:
81
# Refine further -- try to find a point where this
82
# function is negative in the interval
83
val, s = find_minimum_on_interval(f, a, b)
84
if val > 0:
85
if val < rtol:
86
if full_output:
87
return s, "No extra data"
88
else:
89
return s
90
raise RuntimeError, "f appears to have no zero on the interval"
91
# If we found such an s, then we just instead find
92
# a root between left and s or s and right.
93
a = s # arbitrary choice -- maybe should try both and take one that works?
94
95
elif left < 0 and right < 0:
96
# Refine further
97
val, s = find_maximum_on_interval(f, a, b)
98
if val < 0:
99
if abs(val) < rtol:
100
if full_output:
101
return s, "No extra data"
102
else:
103
return s
104
raise RuntimeError, "f appears to have no zero on the interval"
105
a = s
106
107
import scipy.optimize
108
return scipy.optimize.brentq(f, a, b,
109
full_output=full_output, xtol=xtol, rtol=rtol, maxiter=maxiter)
110
111
def find_maximum_on_interval(f, a, b, tol=1.48e-08, maxfun=500):
112
"""
113
Numerically find the maximum of the expression `f` on the interval
114
`[a,b]` (or `[b,a]`) along with the point at which the maximum is attained.
115
116
See the documentation for :meth:`.find_minimum_on_interval`
117
for more details.
118
119
EXAMPLES::
120
121
sage: f = lambda x: x*cos(x)
122
sage: find_maximum_on_interval(f, 0,5)
123
(0.561096338191..., 0.8603335890...)
124
sage: find_maximum_on_interval(f, 0,5, tol=0.1, maxfun=10)
125
(0.561090323458..., 0.857926501456...)
126
"""
127
minval, x = find_minimum_on_interval(lambda z: -f(z), a=a, b=b, tol=tol, maxfun=maxfun)
128
return -minval, x
129
130
def find_minimum_on_interval(f, a, b, tol=1.48e-08, maxfun=500):
131
"""
132
Numerically find the minimum of the expression ``f`` on the
133
interval `[a,b]` (or `[b,a]`) and the point at which it attains that
134
minimum. Note that ``f`` must be a function of (at most) one
135
variable.
136
137
138
INPUT:
139
140
- ``f`` -- a function of at most one variable.
141
142
- ``a``, ``b`` -- endpoints of interval on which to minimize self.
143
144
- ``tol`` -- the convergence tolerance
145
146
- ``maxfun`` -- maximum function evaluations
147
148
149
OUTPUT:
150
151
- ``minval`` -- (float) the minimum value that self takes on in the
152
interval `[a,b]`
153
154
- ``x`` -- (float) the point at which self takes on the minimum value
155
156
157
EXAMPLES::
158
159
sage: f = lambda x: x*cos(x)
160
sage: find_minimum_on_interval(f, 1, 5)
161
(-3.28837139559..., 3.4256184695...)
162
sage: find_minimum_on_interval(f, 1, 5, tol=1e-3)
163
(-3.28837136189098..., 3.42575079030572...)
164
sage: find_minimum_on_interval(f, 1, 5, tol=1e-2, maxfun=10)
165
(-3.28837084598..., 3.4250840220...)
166
sage: show(plot(f, 0, 20))
167
sage: find_minimum_on_interval(f, 1, 15)
168
(-9.4772942594..., 9.5293344109...)
169
170
171
ALGORITHM:
172
173
Uses scipy.optimize.fminbound which uses Brent's method.
174
175
176
AUTHOR:
177
178
- William Stein (2007-12-07)
179
"""
180
try:
181
return f.find_minimum_on_interval(a=a, b=b, tol=tol,maxfun=maxfun)
182
except AttributeError:
183
pass
184
a = float(a); b = float(b)
185
import scipy.optimize
186
xmin, fval, iter, funcalls = scipy.optimize.fminbound(f, a, b, full_output=1, xtol=tol, maxfun=maxfun)
187
return fval, xmin
188
189
190
def minimize(func,x0,gradient=None,hessian=None,algorithm="default",**args):
191
r"""
192
This function is an interface to a variety of algorithms for computing
193
the minimum of a function of several variables.
194
195
196
INPUT:
197
198
- ``func`` -- Either a symbolic function or a Python function whose
199
argument is a tuple with `n` components
200
201
- ``x0`` -- Initial point for finding minimum.
202
203
- ``gradient`` -- Optional gradient function. This will be computed
204
automatically for symbolic functions. For Python functions, it allows
205
the use of algorithms requiring derivatives. It should accept a
206
tuple of arguments and return a NumPy array containing the partial
207
derivatives at that point.
208
209
- ``hessian`` -- Optional hessian function. This will be computed
210
automatically for symbolic functions. For Python functions, it allows
211
the use of algorithms requiring derivatives. It should accept a tuple
212
of arguments and return a NumPy array containing the second partial
213
derivatives of the function.
214
215
- ``algorithm`` -- String specifying algorithm to use. Options are
216
``'default'`` (for Python functions, the simplex method is the default)
217
(for symbolic functions bfgs is the default):
218
219
- ``'simplex'``
220
221
- ``'powell'``
222
223
- ``'bfgs'`` -- (broyden-fletcher-goldfarb-shannon) requires
224
``gradient``
225
226
- ``'cg'`` -- (conjugate-gradient) requires gradient
227
228
- ``'ncg'`` -- (newton-conjugate gradient) requires gradient and hessian
229
230
231
EXAMPLES::
232
233
sage: vars=var('x y z')
234
sage: f=100*(y-x^2)^2+(1-x)^2+100*(z-y^2)^2+(1-y)^2
235
sage: minimize(f,[.1,.3,.4],disp=0)
236
(1.00..., 1.00..., 1.00...)
237
238
sage: minimize(f,[.1,.3,.4],algorithm="ncg",disp=0)
239
(0.9999999..., 0.999999..., 0.999999...)
240
241
Same example with just Python functions::
242
243
sage: def rosen(x): # The Rosenbrock function
244
... return sum(100.0r*(x[1r:]-x[:-1r]**2.0r)**2.0r + (1r-x[:-1r])**2.0r)
245
sage: minimize(rosen,[.1,.3,.4],disp=0)
246
(1.00..., 1.00..., 1.00...)
247
248
Same example with a pure Python function and a Python function to
249
compute the gradient::
250
251
sage: def rosen(x): # The Rosenbrock function
252
... return sum(100.0r*(x[1r:]-x[:-1r]**2.0r)**2.0r + (1r-x[:-1r])**2.0r)
253
sage: import numpy
254
sage: from numpy import zeros
255
sage: def rosen_der(x):
256
... xm = x[1r:-1r]
257
... xm_m1 = x[:-2r]
258
... xm_p1 = x[2r:]
259
... der = zeros(x.shape,dtype=float)
260
... der[1r:-1r] = 200r*(xm-xm_m1**2r) - 400r*(xm_p1 - xm**2r)*xm - 2r*(1r-xm)
261
... der[0] = -400r*x[0r]*(x[1r]-x[0r]**2r) - 2r*(1r-x[0])
262
... der[-1] = 200r*(x[-1r]-x[-2r]**2r)
263
... return der
264
sage: minimize(rosen,[.1,.3,.4],gradient=rosen_der,algorithm="bfgs",disp=0)
265
(1.00..., 1.00..., 1.00...)
266
"""
267
from sage.symbolic.expression import Expression
268
from sage.ext.fast_eval import fast_callable
269
import scipy
270
from scipy import optimize
271
if isinstance(func, Expression):
272
var_list=func.variables()
273
var_names=map(str,var_list)
274
fast_f=fast_callable(func, vars=var_names, domain=float)
275
f=lambda p: fast_f(*p)
276
gradient_list=func.gradient()
277
fast_gradient_functions=[fast_callable(gradient_list[i], vars=var_names, domain=float) for i in xrange(len(gradient_list))]
278
gradient=lambda p: scipy.array([ a(*p) for a in fast_gradient_functions])
279
else:
280
f=func
281
282
if algorithm=="default":
283
if gradient==None:
284
min=optimize.fmin(f,map(float,x0),**args)
285
else:
286
min= optimize.fmin_bfgs(f,map(float,x0),fprime=gradient,**args)
287
else:
288
if algorithm=="simplex":
289
min= optimize.fmin(f,map(float,x0),**args)
290
elif algorithm=="bfgs":
291
min= optimize.fmin_bfgs(f,map(float,x0),fprime=gradient,**args)
292
elif algorithm=="cg":
293
min= optimize.fmin_cg(f,map(float,x0),fprime=gradient,**args)
294
elif algorithm=="powell":
295
min= optimize.fmin_powell(f,map(float,x0),**args)
296
elif algorithm=="ncg":
297
if isinstance(func, Expression):
298
hess=func.hessian()
299
hess_fast= [ [fast_callable(a, vars=var_names, domain=float) for a in row] for row in hess]
300
hessian=lambda p: [[a(*p) for a in row] for row in hess_fast]
301
hessian_p=lambda p,v: scipy.dot(scipy.array(hessian(p)),v)
302
min= optimize.fmin_ncg(f,map(float,x0),fprime=gradient,fhess=hessian,fhess_p=hessian_p,**args)
303
return vector(RDF,min)
304
305
def minimize_constrained(func,cons,x0,gradient=None,algorithm='default', **args):
306
r"""
307
Minimize a function with constraints.
308
309
310
INPUT:
311
312
- ``func`` -- Either a symbolic function, or a Python function whose
313
argument is a tuple with n components
314
315
- ``cons`` -- constraints. This should be either a function or list of
316
functions that must be positive. Alternatively, the constraints can
317
be specified as a list of intervals that define the region we are
318
minimizing in. If the constraints are specified as functions, the
319
functions should be functions of a tuple with `n` components
320
(assuming `n` variables). If the constraints are specified as a list
321
of intervals and there are no constraints for a given variable, that
322
component can be (``None``, ``None``).
323
324
- ``x0`` -- Initial point for finding minimum
325
326
- ``algorithm`` -- Optional, specify the algorithm to use:
327
328
- ``'default'`` -- default choices
329
330
- ``'l-bfgs-b'`` -- only effective if you specify bound constraints.
331
See [ZBN97]_.
332
333
- ``gradient`` -- Optional gradient function. This will be computed
334
automatically for symbolic functions. This is only used when the
335
constraints are specified as a list of intervals.
336
337
338
EXAMPLES:
339
340
Let us maximize `x + y - 50` subject to the following constraints:
341
`50x + 24y \leq 2400`, `30x + 33y \leq 2100`, `x \geq 45`,
342
and `y \geq 5`::
343
344
sage: y = var('y')
345
sage: f = lambda p: -p[0]-p[1]+50
346
sage: c_1 = lambda p: p[0]-45
347
sage: c_2 = lambda p: p[1]-5
348
sage: c_3 = lambda p: -50*p[0]-24*p[1]+2400
349
sage: c_4 = lambda p: -30*p[0]-33*p[1]+2100
350
sage: a = minimize_constrained(f,[c_1,c_2,c_3,c_4],[2,3])
351
sage: a
352
(45.0, 6.25)
353
354
Let's find a minimum of `\sin(xy)`::
355
356
sage: x,y = var('x y')
357
sage: f = sin(x*y)
358
sage: minimize_constrained(f, [(None,None),(4,10)],[5,5])
359
(4.8..., 4.8...)
360
361
Check, if L-BFGS-B finds the same minimum::
362
363
sage: minimize_constrained(f, [(None,None),(4,10)],[5,5], algorithm='l-bfgs-b')
364
(4.7..., 4.9...)
365
366
Rosenbrock function, [http://en.wikipedia.org/wiki/Rosenbrock_function]::
367
368
sage: from scipy.optimize import rosen, rosen_der
369
sage: minimize_constrained(rosen, [(-50,-10),(5,10)],[1,1],gradient=rosen_der,algorithm='l-bfgs-b')
370
(-10.0, 10.0)
371
sage: minimize_constrained(rosen, [(-50,-10),(5,10)],[1,1],algorithm='l-bfgs-b')
372
(-10.0, 10.0)
373
374
375
REFERENCES:
376
377
.. [ZBN97] C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778:
378
L-BFGS-B, FORTRAN routines for large scale bound constrained
379
optimization. ACM Transactions on Mathematical Software, Vol 23, Num. 4,
380
pp.550--560, 1997.
381
"""
382
from sage.symbolic.expression import Expression
383
import scipy
384
from scipy import optimize
385
function_type=type(lambda x,y: x+y)
386
387
if isinstance(func, Expression):
388
var_list=func.variables()
389
var_names=map(str,var_list)
390
fast_f=func._fast_float_(*var_names)
391
f=lambda p: fast_f(*p)
392
gradient_list=func.gradient()
393
fast_gradient_functions=[gradient_list[i]._fast_float_(*var_names) for i in xrange(len(gradient_list))]
394
gradient=lambda p: scipy.array([ a(*p) for a in fast_gradient_functions])
395
else:
396
f=func
397
398
if isinstance(cons,list):
399
if isinstance(cons[0],tuple) or isinstance(cons[0],list) or cons[0]==None:
400
if gradient!=None:
401
if algorithm=='l-bfgs-b':
402
min= optimize.fmin_l_bfgs_b(f,x0,gradient,bounds=cons, iprint=-1, **args)[0]
403
else:
404
min= optimize.fmin_tnc(f,x0,gradient,bounds=cons,messages=0,**args)[0]
405
else:
406
if algorithm=='l-bfgs-b':
407
min= optimize.fmin_l_bfgs_b(f,x0,approx_grad=True,bounds=cons,iprint=-1, **args)[0]
408
else:
409
min= optimize.fmin_tnc(f,x0,approx_grad=True,bounds=cons,messages=0,**args)[0]
410
411
elif isinstance(cons[0],function_type):
412
min= optimize.fmin_cobyla(f,x0,cons,iprint=0,**args)
413
elif isinstance(cons, function_type):
414
min= optimize.fmin_cobyla(f,x0,cons,iprint=0,**args)
415
return vector(RDF,min)
416
417
418
def linear_program(c,G,h,A=None,b=None,solver=None):
419
"""
420
Solves the dual linear programs:
421
422
- Minimize `c'x` subject to `Gx + s = h`, `Ax = b`, and `s \geq 0` where
423
`'` denotes transpose.
424
425
- Maximize `-h'z - b'y` subject to `G'z + A'y + c = 0` and `z \geq 0`.
426
427
428
INPUT:
429
430
- ``c`` -- a vector
431
432
- ``G`` -- a matrix
433
434
- ``h`` -- a vector
435
436
- ``A`` -- a matrix
437
438
- ``b`` --- a vector
439
440
- ``solver`` (optional) --- solver to use. If None, the cvxopt's lp-solver
441
is used. If it is 'glpk', then glpk's solver
442
is used.
443
444
These can be over any field that can be turned into a floating point
445
number.
446
447
448
OUTPUT:
449
450
A dictionary ``sol`` with keys ``x``, ``s``, ``y``, ``z`` corresponding
451
to the variables above:
452
453
- ``sol['x']`` -- the solution to the linear program
454
455
- ``sol['s']`` -- the slack variables for the solution
456
457
- ``sol['z']``, ``sol['y']`` -- solutions to the dual program
458
459
460
EXAMPLES:
461
462
First, we minimize `-4x_1 - 5x_2` subject to `2x_1 + x_2 \leq 3`,
463
`x_1 + 2x_2 \leq 3`, `x_1 \geq 0`, and `x_2 \geq 0`::
464
465
sage: c=vector(RDF,[-4,-5])
466
sage: G=matrix(RDF,[[2,1],[1,2],[-1,0],[0,-1]])
467
sage: h=vector(RDF,[3,3,0,0])
468
sage: sol=linear_program(c,G,h)
469
sage: sol['x']
470
(0.999..., 1.000...)
471
472
Next, we maximize `x+y-50` subject to `50x + 24y \leq 2400`,
473
`30x + 33y \leq 2100`, `x \geq 45`, and `y \geq 5`::
474
475
sage: v=vector([-1.0,-1.0,-1.0])
476
sage: m=matrix([[50.0,24.0,0.0],[30.0,33.0,0.0],[-1.0,0.0,0.0],[0.0,-1.0,0.0],[0.0,0.0,1.0],[0.0,0.0,-1.0]])
477
sage: h=vector([2400.0,2100.0,-45.0,-5.0,1.0,-1.0])
478
sage: sol=linear_program(v,m,h)
479
sage: sol['x']
480
(45.000000..., 6.2499999...3, 1.00000000...)
481
sage: sol=linear_program(v,m,h,solver='glpk')
482
sage: sol['x']
483
(45.0..., 6.25, 1.0...)
484
"""
485
from cvxopt.base import matrix as m
486
from cvxopt import solvers
487
solvers.options['show_progress']=False
488
if solver=='glpk':
489
from cvxopt import glpk
490
glpk.options['LPX_K_MSGLEV'] = 0
491
c_=m(c.base_extend(RDF).numpy())
492
G_=m(G.base_extend(RDF).numpy())
493
h_=m(h.base_extend(RDF).numpy())
494
if A!=None and b!=None:
495
A_=m(A.base_extend(RDF).numpy())
496
b_=m(b.base_extend(RDF).numpy())
497
sol=solvers.lp(c_,G_,h_,A_,b_,solver=solver)
498
else:
499
sol=solvers.lp(c_,G_,h_,solver=solver)
500
status=sol['status']
501
if status != 'optimal':
502
return {'primal objective':None,'x':None,'s':None,'y':None,
503
'z':None,'status':status}
504
x=vector(RDF,list(sol['x']))
505
s=vector(RDF,list(sol['s']))
506
y=vector(RDF,list(sol['y']))
507
z=vector(RDF,list(sol['z']))
508
return {'primal objective':sol['primal objective'],'x':x,'s':s,'y':y,
509
'z':z,'status':status}
510
511
512
def find_fit(data, model, initial_guess = None, parameters = None, variables = None, solution_dict = False):
513
r"""
514
Finds numerical estimates for the parameters of the function model to
515
give a best fit to data.
516
517
518
INPUT:
519
520
- ``data`` -- A two dimensional table of floating point numbers of the
521
form `[[x_{1,1}, x_{1,2}, \ldots, x_{1,k}, f_1],
522
[x_{2,1}, x_{2,2}, \ldots, x_{2,k}, f_2],
523
\ldots,
524
[x_{n,1}, x_{n,2}, \ldots, x_{n,k}, f_n]]` given as either a list of
525
lists, matrix, or numpy array.
526
527
- ``model`` -- Either a symbolic expression, symbolic function, or a
528
Python function. ``model`` has to be a function of the variables
529
`(x_1, x_2, \ldots, x_k)` and free parameters
530
`(a_1, a_2, \ldots, a_l)`.
531
532
- ``initial_guess`` -- (default: ``None``) Initial estimate for the
533
parameters `(a_1, a_2, \ldots, a_l)`, given as either a list, tuple,
534
vector or numpy array. If ``None``, the default estimate for each
535
parameter is `1`.
536
537
- ``parameters`` -- (default: ``None``) A list of the parameters
538
`(a_1, a_2, \ldots, a_l)`. If model is a symbolic function it is
539
ignored, and the free parameters of the symbolic function are used.
540
541
- ``variables`` -- (default: ``None``) A list of the variables
542
`(x_1, x_2, \ldots, x_k)`. If model is a symbolic function it is
543
ignored, and the variables of the symbolic function are used.
544
545
- ``solution_dict`` -- (default: ``False``) if ``True``, return the
546
solution as a dictionary rather than an equation.
547
548
549
EXAMPLES:
550
551
First we create some data points of a sine function with some random
552
perturbations::
553
554
sage: data = [(i, 1.2 * sin(0.5*i-0.2) + 0.1 * normalvariate(0, 1)) for i in xsrange(0, 4*pi, 0.2)]
555
sage: var('a, b, c, x')
556
(a, b, c, x)
557
558
We define a function with free parameters `a`, `b` and `c`::
559
560
sage: model(x) = a * sin(b * x - c)
561
562
We search for the parameters that give the best fit to the data::
563
564
sage: find_fit(data, model)
565
[a == 1.21..., b == 0.49..., c == 0.19...]
566
567
We can also use a Python function for the model::
568
569
sage: def f(x, a, b, c): return a * sin(b * x - c)
570
sage: fit = find_fit(data, f, parameters = [a, b, c], variables = [x], solution_dict = True)
571
sage: fit[a], fit[b], fit[c]
572
(1.21..., 0.49..., 0.19...)
573
574
We search for a formula for the `n`-th prime number::
575
576
sage: dataprime = [(i, nth_prime(i)) for i in xrange(1, 5000, 100)]
577
sage: find_fit(dataprime, a * x * log(b * x), parameters = [a, b], variables = [x])
578
[a == 1.11..., b == 1.24...]
579
580
581
ALGORITHM:
582
583
Uses ``scipy.optimize.leastsq`` which in turn uses MINPACK's lmdif and
584
lmder algorithms.
585
"""
586
import numpy
587
588
if not isinstance(data, numpy.ndarray):
589
try:
590
data = numpy.array(data, dtype = float)
591
except (ValueError, TypeError):
592
raise TypeError, "data has to be a list of lists, a matrix, or a numpy array"
593
elif data.dtype == object:
594
raise ValueError, "the entries of data have to be of type float"
595
596
if data.ndim != 2:
597
raise ValueError, "data has to be a two dimensional table of floating point numbers"
598
599
from sage.symbolic.expression import Expression
600
601
if isinstance(model, Expression):
602
if variables is None:
603
variables = list(model.arguments())
604
if parameters is None:
605
parameters = list(model.variables())
606
for v in variables:
607
parameters.remove(v)
608
609
if data.shape[1] != len(variables) + 1:
610
raise ValueError, "each row of data needs %d entries, only %d entries given" % (len(variables) + 1, data.shape[1])
611
612
if parameters is None or len(parameters) == 0 or \
613
variables is None or len(variables) == 0:
614
raise ValueError, "no variables given"
615
616
if initial_guess == None:
617
initial_guess = len(parameters) * [1]
618
619
if not isinstance(initial_guess, numpy.ndarray):
620
try:
621
initial_guess = numpy.array(initial_guess, dtype = float)
622
except (ValueError, TypeError):
623
raise TypeError, "initial_guess has to be a list, tuple, or numpy array"
624
elif initial_guess.dtype == object:
625
raise ValueError, "the entries of initial_guess have to be of type float"
626
627
if len(initial_guess) != len(parameters):
628
raise ValueError, "length of initial_guess does not coincide with the number of parameters"
629
630
if isinstance(model, Expression):
631
var_list = variables + parameters
632
var_names = map(str, var_list)
633
func = model._fast_float_(*var_names)
634
else:
635
func = model
636
637
def function(x_data, params):
638
result = numpy.zeros(len(x_data))
639
for row in xrange(len(x_data)):
640
fparams = numpy.hstack((x_data[row], params)).tolist()
641
result[row] = func(*fparams)
642
return result
643
644
def error_function(params, x_data, y_data):
645
result = numpy.zeros(len(x_data))
646
for row in xrange(len(x_data)):
647
fparams = x_data[row].tolist() + params.tolist()
648
result[row] = func(*fparams)
649
return result - y_data
650
651
x_data = data[:, 0:len(variables)]
652
y_data = data[:, -1]
653
654
from scipy.optimize import leastsq
655
estimated_params, d = leastsq(error_function, initial_guess, args = (x_data, y_data))
656
657
if isinstance(estimated_params, float):
658
estimated_params = [estimated_params]
659
else:
660
estimated_params = estimated_params.tolist()
661
662
if solution_dict:
663
dict = {}
664
for item in zip(parameters, estimated_params):
665
dict[item[0]] = item[1]
666
return dict
667
668
return [item[0] == item[1] for item in zip(parameters, estimated_params)]
669
670
def binpacking(items,maximum=1,k=None):
671
r"""
672
Solves the bin packing problem.
673
674
The Bin Packing problem is the following :
675
676
Given a list of items of weights `p_i` and a real value `K`, what is
677
the least number of bins such that all the items can be put in the
678
bins, while keeping sure that each bin contains a weight of at most `K` ?
679
680
For more informations : http://en.wikipedia.org/wiki/Bin_packing_problem
681
682
Two version of this problem are solved by this algorithm :
683
* Is it possible to put the given items in `L` bins ?
684
* What is the assignment of items using the
685
least number of bins with the given list of items ?
686
687
INPUT:
688
689
- ``items`` -- A list of real values (the items' weight)
690
691
- ``maximum`` -- The maximal size of a bin
692
693
- ``k`` -- Number of bins
694
695
- When set to an integer value, the function returns a partition
696
of the items into `k` bins if possible, and raises an
697
exception otherwise.
698
699
- When set to ``None``, the function returns a partition of the items
700
using the least number possible of bins.
701
702
OUTPUT:
703
704
A list of lists, each member corresponding to a box and containing
705
the list of the weights inside it. If there is no solution, an
706
exception is raised (this can only happen when ``k`` is specified
707
or if ``maximum`` is less that the size of one item).
708
709
EXAMPLES:
710
711
Trying to find the minimum amount of boxes for 5 items of weights
712
`1/5, 1/4, 2/3, 3/4, 5/7`::
713
714
sage: from sage.numerical.optimize import binpacking
715
sage: values = [1/5, 1/3, 2/3, 3/4, 5/7]
716
sage: bins = binpacking(values)
717
sage: len(bins)
718
3
719
720
Checking the bins are of correct size ::
721
722
sage: all([ sum(b)<= 1 for b in bins ])
723
True
724
725
Checking every item is in a bin ::
726
727
sage: b1, b2, b3 = bins
728
sage: all([ (v in b1 or v in b2 or v in b3) for v in values ])
729
True
730
731
One way to use only three boxes (which is best possible) is to put
732
`1/5 + 3/4` together in a box, `1/3+2/3` in another, and `5/7`
733
by itself in the third one.
734
735
Of course, we can also check that there is no solution using only two boxes ::
736
737
sage: from sage.numerical.optimize import binpacking
738
sage: binpacking([0.2,0.3,0.8,0.9], k=2)
739
Traceback (most recent call last):
740
...
741
ValueError: This problem has no solution !
742
"""
743
744
if max(items) > maximum:
745
raise ValueError("This problem has no solution !")
746
747
if k==None:
748
from sage.functions.other import ceil
749
k=ceil(sum(items)/maximum)
750
while True:
751
from sage.numerical.mip import MIPSolverException
752
try:
753
return binpacking(items,k=k,maximum=maximum)
754
except MIPSolverException:
755
k = k + 1
756
757
from sage.numerical.mip import MixedIntegerLinearProgram, MIPSolverException, Sum
758
p=MixedIntegerLinearProgram()
759
760
# Boolean variable indicating whether
761
# the i th element belongs to box b
762
box=p.new_variable(dim=2)
763
764
# Each bin contains at most max
765
for b in range(k):
766
p.add_constraint(Sum([items[i]*box[i][b] for i in range(len(items))]),max=maximum)
767
768
# Each item is assigned exactly one bin
769
for i in range(len(items)):
770
p.add_constraint(Sum([box[i][b] for b in range(k)]),min=1,max=1)
771
772
p.set_objective(None)
773
p.set_binary(box)
774
775
try:
776
p.solve()
777
except MIPSolverException:
778
raise ValueError("This problem has no solution !")
779
780
box=p.get_values(box)
781
782
boxes=[[] for i in range(k)]
783
784
for b in range(k):
785
boxes[b].extend([items[i] for i in range(len(items)) if round(box[i][b])==1])
786
787
return boxes
788
789
790
791
792