Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/contrib/arm-optimized-routines/math/tools/remez.jl
48249 views
1
#!/usr/bin/env julia
2
# -*- julia -*-
3
4
# remez.jl - implementation of the Remez algorithm for polynomial approximation
5
#
6
# Copyright (c) 2015-2019, Arm Limited.
7
# SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
8
9
import Base.\
10
11
# ----------------------------------------------------------------------
12
# Helper functions to cope with different Julia versions.
13
if VERSION >= v"0.7.0"
14
array1d(T, d) = Array{T, 1}(undef, d)
15
array2d(T, d1, d2) = Array{T, 2}(undef, d1, d2)
16
else
17
array1d(T, d) = Array(T, d)
18
array2d(T, d1, d2) = Array(T, d1, d2)
19
end
20
if VERSION < v"0.5.0"
21
String = ASCIIString
22
end
23
if VERSION >= v"0.6.0"
24
# Use Base.invokelatest to run functions made using eval(), to
25
# avoid "world age" error
26
run(f, x...) = Base.invokelatest(f, x...)
27
else
28
# Prior to 0.6.0, invokelatest doesn't exist (but fortunately the
29
# world age problem also doesn't seem to exist)
30
run(f, x...) = f(x...)
31
end
32
33
# ----------------------------------------------------------------------
34
# Global variables configured by command-line options.
35
floatsuffix = "" # adjusted by --floatsuffix
36
xvarname = "x" # adjusted by --variable
37
epsbits = 256 # adjusted by --bits
38
debug_facilities = Set() # adjusted by --debug
39
full_output = false # adjusted by --full
40
array_format = false # adjusted by --array
41
preliminary_commands = array1d(String, 0) # adjusted by --pre
42
43
# ----------------------------------------------------------------------
44
# Diagnostic and utility functions.
45
46
# Enable debugging printouts from a particular subpart of this
47
# program.
48
#
49
# Arguments:
50
# facility Name of the facility to debug. For a list of facility names,
51
# look through the code for calls to debug().
52
#
53
# Return value is a BigFloat.
54
function enable_debug(facility)
55
push!(debug_facilities, facility)
56
end
57
58
# Print a diagnostic.
59
#
60
# Arguments:
61
# facility Name of the facility for which this is a debug message.
62
# printargs Arguments to println() if debugging of that facility is
63
# enabled.
64
macro debug(facility, printargs...)
65
printit = quote
66
print("[", $facility, "] ")
67
end
68
for arg in printargs
69
printit = quote
70
$printit
71
print($(esc(arg)))
72
end
73
end
74
return quote
75
if $facility in debug_facilities
76
$printit
77
println()
78
end
79
end
80
end
81
82
# Evaluate a polynomial.
83
84
# Arguments:
85
# coeffs Array of BigFloats giving the coefficients of the polynomial.
86
# Starts with the constant term, i.e. coeffs[i] is the
87
# coefficient of x^(i-1) (because Julia arrays are 1-based).
88
# x Point at which to evaluate the polynomial.
89
#
90
# Return value is a BigFloat.
91
function poly_eval(coeffs::Array{BigFloat}, x::BigFloat)
92
n = length(coeffs)
93
if n == 0
94
return BigFloat(0)
95
elseif n == 1
96
return coeffs[1]
97
else
98
return coeffs[1] + x * poly_eval(coeffs[2:n], x)
99
end
100
end
101
102
# Evaluate a rational function.
103
104
# Arguments:
105
# ncoeffs Array of BigFloats giving the coefficients of the numerator.
106
# Starts with the constant term, and 1-based, as above.
107
# dcoeffs Array of BigFloats giving the coefficients of the denominator.
108
# Starts with the constant term, and 1-based, as above.
109
# x Point at which to evaluate the function.
110
#
111
# Return value is a BigFloat.
112
function ratfn_eval(ncoeffs::Array{BigFloat}, dcoeffs::Array{BigFloat},
113
x::BigFloat)
114
return poly_eval(ncoeffs, x) / poly_eval(dcoeffs, x)
115
end
116
117
# Format a BigFloat into an appropriate output format.
118
# Arguments:
119
# x BigFloat to format.
120
#
121
# Return value is a string.
122
function float_to_str(x)
123
return string(x) * floatsuffix
124
end
125
126
# Format a polynomial into an arithmetic expression, for pasting into
127
# other tools such as gnuplot.
128
129
# Arguments:
130
# coeffs Array of BigFloats giving the coefficients of the polynomial.
131
# Starts with the constant term, and 1-based, as above.
132
#
133
# Return value is a string.
134
function poly_to_string(coeffs::Array{BigFloat})
135
n = length(coeffs)
136
if n == 0
137
return "0"
138
elseif n == 1
139
return float_to_str(coeffs[1])
140
else
141
return string(float_to_str(coeffs[1]), "+", xvarname, "*(",
142
poly_to_string(coeffs[2:n]), ")")
143
end
144
end
145
146
# Format a rational function into a string.
147
148
# Arguments:
149
# ncoeffs Array of BigFloats giving the coefficients of the numerator.
150
# Starts with the constant term, and 1-based, as above.
151
# dcoeffs Array of BigFloats giving the coefficients of the denominator.
152
# Starts with the constant term, and 1-based, as above.
153
#
154
# Return value is a string.
155
function ratfn_to_string(ncoeffs::Array{BigFloat}, dcoeffs::Array{BigFloat})
156
if length(dcoeffs) == 1 && dcoeffs[1] == 1
157
# Special case: if the denominator is just 1, leave it out.
158
return poly_to_string(ncoeffs)
159
else
160
return string("(", poly_to_string(ncoeffs), ")/(",
161
poly_to_string(dcoeffs), ")")
162
end
163
end
164
165
# Format a list of x,y pairs into a string.
166
167
# Arguments:
168
# xys Array of (x,y) pairs of BigFloats.
169
#
170
# Return value is a string.
171
function format_xylist(xys::Array{Tuple{BigFloat,BigFloat}})
172
return ("[\n" *
173
join([" "*string(x)*" -> "*string(y) for (x,y) in xys], "\n") *
174
"\n]")
175
end
176
177
# ----------------------------------------------------------------------
178
# Matrix-equation solver for matrices of BigFloat.
179
#
180
# I had hoped that Julia's type-genericity would allow me to solve the
181
# matrix equation Mx=V by just writing 'M \ V'. Unfortunately, that
182
# works by translating the inputs into double precision and handing
183
# off to an optimised library, which misses the point when I have a
184
# matrix and vector of BigFloat and want my result in _better_ than
185
# double precision. So I have to implement my own specialisation of
186
# the \ operator for that case.
187
#
188
# Fortunately, the point of using BigFloats is that we have precision
189
# to burn, so I can do completely naïve Gaussian elimination without
190
# worrying about instability.
191
192
# Arguments:
193
# matrix_in 2-dimensional array of BigFloats, representing a matrix M
194
# in row-first order, i.e. matrix_in[r,c] represents the
195
# entry in row r col c.
196
# vector_in 1-dimensional array of BigFloats, representing a vector V.
197
#
198
# Return value: a 1-dimensional array X of BigFloats, satisfying M X = V.
199
#
200
# Expects the input to be an invertible square matrix and a vector of
201
# the corresponding size, on pain of failing an assertion.
202
function \(matrix_in :: Array{BigFloat,2},
203
vector_in :: Array{BigFloat,1})
204
# Copy the inputs, because we'll be mutating them as we go.
205
M = copy(matrix_in)
206
V = copy(vector_in)
207
208
# Input consistency criteria: matrix is square, and vector has
209
# length to match.
210
n = length(V)
211
@assert(n > 0)
212
@assert(size(M) == (n,n))
213
214
@debug("gausselim", "starting, n=", n)
215
216
for i = 1:1:n
217
# Straightforward Gaussian elimination: find the largest
218
# non-zero entry in column i (and in a row we haven't sorted
219
# out already), swap it into row i, scale that row to
220
# normalise it to 1, then zero out the rest of the column by
221
# subtracting a multiple of that row from each other row.
222
223
@debug("gausselim", "matrix=", repr(M))
224
@debug("gausselim", "vector=", repr(V))
225
226
# Find the best pivot.
227
bestrow = 0
228
bestval = 0
229
for j = i:1:n
230
if abs(M[j,i]) > bestval
231
bestrow = j
232
bestval = M[j,i]
233
end
234
end
235
@assert(bestrow > 0) # make sure we did actually find one
236
237
@debug("gausselim", "bestrow=", bestrow)
238
239
# Swap it into row i.
240
if bestrow != i
241
for k = 1:1:n
242
M[bestrow,k],M[i,k] = M[i,k],M[bestrow,k]
243
end
244
V[bestrow],V[i] = V[i],V[bestrow]
245
end
246
247
# Scale that row so that M[i,i] becomes 1.
248
divisor = M[i,i]
249
for k = 1:1:n
250
M[i,k] = M[i,k] / divisor
251
end
252
V[i] = V[i] / divisor
253
@assert(M[i,i] == 1)
254
255
# Zero out all other entries in column i, by subtracting
256
# multiples of this row.
257
for j = 1:1:n
258
if j != i
259
factor = M[j,i]
260
for k = 1:1:n
261
M[j,k] = M[j,k] - M[i,k] * factor
262
end
263
V[j] = V[j] - V[i] * factor
264
@assert(M[j,i] == 0)
265
end
266
end
267
end
268
269
@debug("gausselim", "matrix=", repr(M))
270
@debug("gausselim", "vector=", repr(V))
271
@debug("gausselim", "done!")
272
273
# Now we're done: M is the identity matrix, so the equation Mx=V
274
# becomes just x=V, i.e. V is already exactly the vector we want
275
# to return.
276
return V
277
end
278
279
# ----------------------------------------------------------------------
280
# Least-squares fitting of a rational function to a set of (x,y)
281
# points.
282
#
283
# We use this to get an initial starting point for the Remez
284
# iteration. Therefore, it doesn't really need to be particularly
285
# accurate; it only needs to be good enough to wiggle back and forth
286
# across the target function the right number of times (so as to give
287
# enough error extrema to start optimising from) and not have any
288
# poles in the target interval.
289
#
290
# Least-squares fitting of a _polynomial_ is actually a sensible thing
291
# to do, and minimises the rms error. Doing the following trick with a
292
# rational function P/Q is less sensible, because it cannot be made to
293
# minimise the error function (P/Q-f)^2 that you actually wanted;
294
# instead it minimises (P-fQ)^2. But that should be good enough to
295
# have the properties described above.
296
#
297
# Some theory: suppose you're trying to choose a set of parameters a_i
298
# so as to minimise the sum of squares of some error function E_i.
299
# Basic calculus says, if you do this in one variable, just
300
# differentiate and solve for zero. In this case, that works fine even
301
# with multiple variables, because you _partially_ differentiate with
302
# respect to each a_i, giving a system of equations, and that system
303
# turns out to be linear so we just solve it as a matrix.
304
#
305
# In this case, our parameters are the coefficients of P and Q; to
306
# avoid underdetermining the system we'll fix Q's constant term at 1,
307
# so that our error function (as described above) is
308
#
309
# E = \sum (p_0 + p_1 x + ... + p_n x^n - y - y q_1 x - ... - y q_d x^d)^2
310
#
311
# where the sum is over all (x,y) coordinate pairs. Setting dE/dp_j=0
312
# (for each j) gives an equation of the form
313
#
314
# 0 = \sum 2(p_0 + p_1 x + ... + p_n x^n - y - y q_1 x - ... - y q_d x^d) x^j
315
#
316
# and setting dE/dq_j=0 gives one of the form
317
#
318
# 0 = \sum 2(p_0 + p_1 x + ... + p_n x^n - y - y q_1 x - ... - y q_d x^d) y x^j
319
#
320
# And both of those row types, treated as multivariate linear
321
# equations in the p,q values, have each coefficient being a value of
322
# the form \sum x^i, \sum y x^i or \sum y^2 x^i, for various i. (Times
323
# a factor of 2, but we can throw that away.) So we can go through the
324
# list of input coordinates summing all of those things, and then we
325
# have enough information to construct our matrix and solve it
326
# straight off for the rational function coefficients.
327
328
# Arguments:
329
# f The function to be approximated. Maps BigFloat -> BigFloat.
330
# xvals Array of BigFloats, giving the list of x-coordinates at which
331
# to evaluate f.
332
# n Degree of the numerator polynomial of the desired rational
333
# function.
334
# d Degree of the denominator polynomial of the desired rational
335
# function.
336
# w Error-weighting function. Takes two BigFloat arguments x,y
337
# and returns a scaling factor for the error at that location.
338
# A larger value indicates that the error should be given
339
# greater weight in the square sum we try to minimise.
340
# If unspecified, defaults to giving everything the same weight.
341
#
342
# Return values: a pair of arrays of BigFloats (N,D) giving the
343
# coefficients of the returned rational function. N has size n+1; D
344
# has size d+1. Both start with the constant term, i.e. N[i] is the
345
# coefficient of x^(i-1) (because Julia arrays are 1-based). D[1] will
346
# be 1.
347
function ratfn_leastsquares(f::Function, xvals::Array{BigFloat}, n, d,
348
w = (x,y)->BigFloat(1))
349
# Accumulate sums of x^i y^j, for j={0,1,2} and a range of x.
350
# Again because Julia arrays are 1-based, we'll have sums[i,j]
351
# being the sum of x^(i-1) y^(j-1).
352
maxpow = max(n,d) * 2 + 1
353
sums = zeros(BigFloat, maxpow, 3)
354
for x = xvals
355
y = f(x)
356
weight = w(x,y)
357
for i = 1:1:maxpow
358
for j = 1:1:3
359
sums[i,j] += x^(i-1) * y^(j-1) * weight
360
end
361
end
362
end
363
364
@debug("leastsquares", "sums=", repr(sums))
365
366
# Build the matrix. We're solving n+d+1 equations in n+d+1
367
# unknowns. (We actually have to return n+d+2 coefficients, but
368
# one of them is hardwired to 1.)
369
matrix = array2d(BigFloat, n+d+1, n+d+1)
370
vector = array1d(BigFloat, n+d+1)
371
for i = 0:1:n
372
# Equation obtained by differentiating with respect to p_i,
373
# i.e. the numerator coefficient of x^i.
374
row = 1+i
375
for j = 0:1:n
376
matrix[row, 1+j] = sums[1+i+j, 1]
377
end
378
for j = 1:1:d
379
matrix[row, 1+n+j] = -sums[1+i+j, 2]
380
end
381
vector[row] = sums[1+i, 2]
382
end
383
for i = 1:1:d
384
# Equation obtained by differentiating with respect to q_i,
385
# i.e. the denominator coefficient of x^i.
386
row = 1+n+i
387
for j = 0:1:n
388
matrix[row, 1+j] = sums[1+i+j, 2]
389
end
390
for j = 1:1:d
391
matrix[row, 1+n+j] = -sums[1+i+j, 3]
392
end
393
vector[row] = sums[1+i, 3]
394
end
395
396
@debug("leastsquares", "matrix=", repr(matrix))
397
@debug("leastsquares", "vector=", repr(vector))
398
399
# Solve the matrix equation.
400
all_coeffs = matrix \ vector
401
402
@debug("leastsquares", "all_coeffs=", repr(all_coeffs))
403
404
# And marshal the results into two separate polynomial vectors to
405
# return.
406
ncoeffs = all_coeffs[1:n+1]
407
dcoeffs = vcat([1], all_coeffs[n+2:n+d+1])
408
return (ncoeffs, dcoeffs)
409
end
410
411
# ----------------------------------------------------------------------
412
# Golden-section search to find a maximum of a function.
413
414
# Arguments:
415
# f Function to be maximised/minimised. Maps BigFloat -> BigFloat.
416
# a,b,c BigFloats bracketing a maximum of the function.
417
#
418
# Expects:
419
# a,b,c are in order (either a<=b<=c or c<=b<=a)
420
# a != c (but b can equal one or the other if it wants to)
421
# f(a) <= f(b) >= f(c)
422
#
423
# Return value is an (x,y) pair of BigFloats giving the extremal input
424
# and output. (That is, y=f(x).)
425
function goldensection(f::Function, a::BigFloat, b::BigFloat, c::BigFloat)
426
# Decide on a 'good enough' threshold.
427
threshold = abs(c-a) * 2^(-epsbits/2)
428
429
# We'll need the golden ratio phi, of course. Or rather, in this
430
# case, we need 1/phi = 0.618...
431
one_over_phi = 2 / (1 + sqrt(BigFloat(5)))
432
433
# Flip round the interval endpoints so that the interval [a,b] is
434
# at least as large as [b,c]. (Then we can always pick our new
435
# point in [a,b] without having to handle lots of special cases.)
436
if abs(b-a) < abs(c-a)
437
a, c = c, a
438
end
439
440
# Evaluate the function at the initial points.
441
fa = f(a)
442
fb = f(b)
443
fc = f(c)
444
445
@debug("goldensection", "starting")
446
447
while abs(c-a) > threshold
448
@debug("goldensection", "a: ", a, " -> ", fa)
449
@debug("goldensection", "b: ", b, " -> ", fb)
450
@debug("goldensection", "c: ", c, " -> ", fc)
451
452
# Check invariants.
453
@assert(a <= b <= c || c <= b <= a)
454
@assert(fa <= fb >= fc)
455
456
# Subdivide the larger of the intervals [a,b] and [b,c]. We've
457
# arranged that this is always [a,b], for simplicity.
458
d = a + (b-a) * one_over_phi
459
460
# Now we have an interval looking like this (possibly
461
# reversed):
462
#
463
# a d b c
464
#
465
# and we know f(b) is bigger than either f(a) or f(c). We have
466
# two cases: either f(d) > f(b), or vice versa. In either
467
# case, we can narrow to an interval of 1/phi the size, and
468
# still satisfy all our invariants (three ordered points,
469
# [a,b] at least the width of [b,c], f(a)<=f(b)>=f(c)).
470
fd = f(d)
471
@debug("goldensection", "d: ", d, " -> ", fd)
472
if fd > fb
473
a, b, c = a, d, b
474
fa, fb, fc = fa, fd, fb
475
@debug("goldensection", "adb case")
476
else
477
a, b, c = c, b, d
478
fa, fb, fc = fc, fb, fd
479
@debug("goldensection", "cbd case")
480
end
481
end
482
483
@debug("goldensection", "done: ", b, " -> ", fb)
484
return (b, fb)
485
end
486
487
# ----------------------------------------------------------------------
488
# Find the extrema of a function within a given interval.
489
490
# Arguments:
491
# f The function to be approximated. Maps BigFloat -> BigFloat.
492
# grid A set of points at which to evaluate f. Must be high enough
493
# resolution to make extrema obvious.
494
#
495
# Returns an array of (x,y) pairs of BigFloats, with each x,y giving
496
# the extremum location and its value (i.e. y=f(x)).
497
function find_extrema(f::Function, grid::Array{BigFloat})
498
len = length(grid)
499
extrema = array1d(Tuple{BigFloat, BigFloat}, 0)
500
for i = 1:1:len
501
# We have to provide goldensection() with three points
502
# bracketing the extremum. If the extremum is at one end of
503
# the interval, then the only way we can do that is to set two
504
# of the points equal (which goldensection() will cope with).
505
prev = max(1, i-1)
506
next = min(i+1, len)
507
508
# Find our three pairs of (x,y) coordinates.
509
xp, xi, xn = grid[prev], grid[i], grid[next]
510
yp, yi, yn = f(xp), f(xi), f(xn)
511
512
# See if they look like an extremum, and if so, ask
513
# goldensection() to give a more exact location for it.
514
if yp <= yi >= yn
515
push!(extrema, goldensection(f, xp, xi, xn))
516
elseif yp >= yi <= yn
517
x, y = goldensection(x->-f(x), xp, xi, xn)
518
push!(extrema, (x, -y))
519
end
520
end
521
return extrema
522
end
523
524
# ----------------------------------------------------------------------
525
# Winnow a list of a function's extrema to give a subsequence of a
526
# specified length, with the extrema in the subsequence alternating
527
# signs, and with the smallest absolute value of an extremum in the
528
# subsequence as large as possible.
529
#
530
# We do this using a dynamic-programming approach. We work along the
531
# provided array of extrema, and at all times, we track the best set
532
# of extrema we have so far seen for each possible (length, sign of
533
# last extremum) pair. Each new extremum is evaluated to see whether
534
# it can be added to any previously seen best subsequence to make a
535
# new subsequence that beats the previous record holder in its slot.
536
537
# Arguments:
538
# extrema An array of (x,y) pairs of BigFloats giving the input extrema.
539
# n Number of extrema required as output.
540
#
541
# Returns a new array of (x,y) pairs which is a subsequence of the
542
# original sequence. (So, in particular, if the input was sorted by x
543
# then so will the output be.)
544
function winnow_extrema(extrema::Array{Tuple{BigFloat,BigFloat}}, n)
545
# best[i,j] gives the best sequence so far of length i and with
546
# sign j (where signs are coded as 1=positive, 2=negative), in the
547
# form of a tuple (cost, actual array of x,y pairs).
548
best = fill((BigFloat(0), array1d(Tuple{BigFloat,BigFloat}, 0)), n, 2)
549
550
for (x,y) = extrema
551
if y > 0
552
sign = 1
553
elseif y < 0
554
sign = 2
555
else
556
# A zero-valued extremum cannot possibly contribute to any
557
# optimal sequence, so we simply ignore it!
558
continue
559
end
560
561
for i = 1:1:n
562
# See if we can create a new entry for best[i,sign] by
563
# appending our current (x,y) to some previous thing.
564
if i == 1
565
# Special case: we don't store a best zero-length
566
# sequence :-)
567
candidate = (abs(y), [(x,y)])
568
else
569
othersign = 3-sign # map 1->2 and 2->1
570
oldscore, oldlist = best[i-1, othersign]
571
newscore = min(abs(y), oldscore)
572
newlist = vcat(oldlist, [(x,y)])
573
candidate = (newscore, newlist)
574
end
575
# If our new candidate improves on the previous value of
576
# best[i,sign], then replace it.
577
if candidate[1] > best[i,sign][1]
578
best[i,sign] = candidate
579
end
580
end
581
end
582
583
# Our ultimate return value has to be either best[n,1] or
584
# best[n,2], but it could be either. See which one has the higher
585
# score.
586
if best[n,1][1] > best[n,2][1]
587
ret = best[n,1][2]
588
else
589
ret = best[n,2][2]
590
end
591
# Make sure we did actually _find_ a good answer.
592
@assert(length(ret) == n)
593
return ret
594
end
595
596
# ----------------------------------------------------------------------
597
# Construct a rational-function approximation with equal and
598
# alternating weighted deviation at a specific set of x-coordinates.
599
600
# Arguments:
601
# f The function to be approximated. Maps BigFloat -> BigFloat.
602
# coords An array of BigFloats giving the x-coordinates. There should
603
# be n+d+2 of them.
604
# n, d The degrees of the numerator and denominator of the desired
605
# approximation.
606
# prev_err A plausible value for the alternating weighted deviation.
607
# (Required to kickstart a binary search in the nonlinear case;
608
# see comments below.)
609
# w Error-weighting function. Takes two BigFloat arguments x,y
610
# and returns a scaling factor for the error at that location.
611
# The returned approximation R should have the minimum possible
612
# maximum value of abs((f(x)-R(x)) * w(x,f(x))). Optional
613
# parameter, defaulting to the always-return-1 function.
614
#
615
# Return values: a pair of arrays of BigFloats (N,D) giving the
616
# coefficients of the returned rational function. N has size n+1; D
617
# has size d+1. Both start with the constant term, i.e. N[i] is the
618
# coefficient of x^(i-1) (because Julia arrays are 1-based). D[1] will
619
# be 1.
620
function ratfn_equal_deviation(f::Function, coords::Array{BigFloat},
621
n, d, prev_err::BigFloat,
622
w = (x,y)->BigFloat(1))
623
@debug("equaldev", "n=", n, " d=", d, " coords=", repr(coords))
624
@assert(length(coords) == n+d+2)
625
626
if d == 0
627
# Special case: we're after a polynomial. In this case, we
628
# have the particularly easy job of just constructing and
629
# solving a system of n+2 linear equations, to find the n+1
630
# coefficients of the polynomial and also the amount of
631
# deviation at the specified coordinates. Each equation is of
632
# the form
633
#
634
# p_0 x^0 + p_1 x^1 + ... + p_n x^n ± e/w(x) = f(x)
635
#
636
# in which the p_i and e are the variables, and the powers of
637
# x and calls to w and f are the coefficients.
638
639
matrix = array2d(BigFloat, n+2, n+2)
640
vector = array1d(BigFloat, n+2)
641
currsign = +1
642
for i = 1:1:n+2
643
x = coords[i]
644
for j = 0:1:n
645
matrix[i,1+j] = x^j
646
end
647
y = f(x)
648
vector[i] = y
649
matrix[i, n+2] = currsign / w(x,y)
650
currsign = -currsign
651
end
652
653
@debug("equaldev", "matrix=", repr(matrix))
654
@debug("equaldev", "vector=", repr(vector))
655
656
outvector = matrix \ vector
657
658
@debug("equaldev", "outvector=", repr(outvector))
659
660
ncoeffs = outvector[1:n+1]
661
dcoeffs = [BigFloat(1)]
662
return ncoeffs, dcoeffs
663
else
664
# For a nontrivial rational function, the system of equations
665
# we need to solve becomes nonlinear, because each equation
666
# now takes the form
667
#
668
# p_0 x^0 + p_1 x^1 + ... + p_n x^n
669
# --------------------------------- ± e/w(x) = f(x)
670
# x^0 + q_1 x^1 + ... + q_d x^d
671
#
672
# and multiplying up by the denominator gives you a lot of
673
# terms containing e × q_i. So we can't do this the really
674
# easy way using a matrix equation as above.
675
#
676
# Fortunately, this is a fairly easy kind of nonlinear system.
677
# The equations all become linear if you switch to treating e
678
# as a constant, so a reasonably sensible approach is to pick
679
# a candidate value of e, solve all but one of the equations
680
# for the remaining unknowns, and then see what the error
681
# turns out to be in the final equation. The Chebyshev
682
# alternation theorem guarantees that that error in the last
683
# equation will be anti-monotonic in the input e, so we can
684
# just binary-search until we get the two as close to equal as
685
# we need them.
686
687
function try_e(e)
688
# Try a given value of e, derive the coefficients of the
689
# resulting rational function by setting up equations
690
# based on the first n+d+1 of the n+d+2 coordinates, and
691
# see what the error turns out to be at the final
692
# coordinate.
693
matrix = array2d(BigFloat, n+d+1, n+d+1)
694
vector = array1d(BigFloat, n+d+1)
695
currsign = +1
696
for i = 1:1:n+d+1
697
x = coords[i]
698
y = f(x)
699
y_adj = y - currsign * e / w(x,y)
700
for j = 0:1:n
701
matrix[i,1+j] = x^j
702
end
703
for j = 1:1:d
704
matrix[i,1+n+j] = -x^j * y_adj
705
end
706
vector[i] = y_adj
707
currsign = -currsign
708
end
709
710
@debug("equaldev", "trying e=", e)
711
@debug("equaldev", "matrix=", repr(matrix))
712
@debug("equaldev", "vector=", repr(vector))
713
714
outvector = matrix \ vector
715
716
@debug("equaldev", "outvector=", repr(outvector))
717
718
ncoeffs = outvector[1:n+1]
719
dcoeffs = vcat([BigFloat(1)], outvector[n+2:n+d+1])
720
721
x = coords[n+d+2]
722
y = f(x)
723
last_e = (ratfn_eval(ncoeffs, dcoeffs, x) - y) * w(x,y) * -currsign
724
725
@debug("equaldev", "last e=", last_e)
726
727
return ncoeffs, dcoeffs, last_e
728
end
729
730
threshold = 2^(-epsbits/2) # convergence threshold
731
732
# Start by trying our previous iteration's error value. This
733
# value (e0) will be one end of our binary-search interval,
734
# and whatever it caused the last point's error to be, that
735
# (e1) will be the other end.
736
e0 = prev_err
737
@debug("equaldev", "e0 = ", e0)
738
nc, dc, e1 = try_e(e0)
739
@debug("equaldev", "e1 = ", e1)
740
if abs(e1-e0) <= threshold
741
# If we're _really_ lucky, we hit the error right on the
742
# nose just by doing that!
743
return nc, dc
744
end
745
s = sign(e1-e0)
746
@debug("equaldev", "s = ", s)
747
748
# Verify by assertion that trying our other interval endpoint
749
# e1 gives a value that's wrong in the other direction.
750
# (Otherwise our binary search won't get a sensible answer at
751
# all.)
752
nc, dc, e2 = try_e(e1)
753
@debug("equaldev", "e2 = ", e2)
754
@assert(sign(e2-e1) == -s)
755
756
# Now binary-search until our two endpoints narrow enough.
757
local emid
758
while abs(e1-e0) > threshold
759
emid = (e1+e0)/2
760
nc, dc, enew = try_e(emid)
761
if sign(enew-emid) == s
762
e0 = emid
763
else
764
e1 = emid
765
end
766
end
767
768
@debug("equaldev", "final e=", emid)
769
return nc, dc
770
end
771
end
772
773
# ----------------------------------------------------------------------
774
# Top-level function to find a minimax rational-function approximation.
775
776
# Arguments:
777
# f The function to be approximated. Maps BigFloat -> BigFloat.
778
# interval A pair of BigFloats giving the endpoints of the interval
779
# (in either order) on which to approximate f.
780
# n, d The degrees of the numerator and denominator of the desired
781
# approximation.
782
# w Error-weighting function. Takes two BigFloat arguments x,y
783
# and returns a scaling factor for the error at that location.
784
# The returned approximation R should have the minimum possible
785
# maximum value of abs((f(x)-R(x)) * w(x,f(x))). Optional
786
# parameter, defaulting to the always-return-1 function.
787
#
788
# Return values: a tuple (N,D,E,X), where
789
790
# N,D A pair of arrays of BigFloats giving the coefficients
791
# of the returned rational function. N has size n+1; D
792
# has size d+1. Both start with the constant term, i.e.
793
# N[i] is the coefficient of x^(i-1) (because Julia
794
# arrays are 1-based). D[1] will be 1.
795
# E The maximum weighted error (BigFloat).
796
# X An array of pairs of BigFloats giving the locations of n+2
797
# points and the weighted error at each of those points. The
798
# weighted error values will have alternating signs, which
799
# means that the Chebyshev alternation theorem guarantees
800
# that any other function of the same degree must exceed
801
# the error of this one at at least one of those points.
802
function ratfn_minimax(f::Function, interval::Tuple{BigFloat,BigFloat}, n, d,
803
w = (x,y)->BigFloat(1))
804
# We start off by finding a least-squares approximation. This
805
# doesn't need to be perfect, but if we can get it reasonably good
806
# then it'll save iterations in the refining stage.
807
#
808
# Least-squares approximations tend to look nicer in a minimax
809
# sense if you evaluate the function at a big pile of Chebyshev
810
# nodes rather than uniformly spaced points. These values will
811
# also make a good grid to use for the initial search for error
812
# extrema, so we'll keep them around for that reason too.
813
814
# Construct the grid.
815
lo, hi = minimum(interval), maximum(interval)
816
local grid
817
let
818
mid = (hi+lo)/2
819
halfwid = (hi-lo)/2
820
nnodes = 16 * (n+d+1)
821
pi = 2*asin(BigFloat(1))
822
grid = [ mid - halfwid * cos(pi*i/nnodes) for i=0:1:nnodes ]
823
end
824
825
# Find the initial least-squares approximation.
826
(nc, dc) = ratfn_leastsquares(f, grid, n, d, w)
827
@debug("minimax", "initial leastsquares approx = ",
828
ratfn_to_string(nc, dc))
829
830
# Threshold of convergence. We stop when the relative difference
831
# between the min and max (winnowed) error extrema is less than
832
# this.
833
#
834
# This is set to the cube root of machine epsilon on a more or
835
# less empirical basis, because the rational-function case will
836
# not converge reliably if you set it to only the square root.
837
# (Repeatable by using the --test mode.) On the assumption that
838
# input and output error in each iteration can be expected to be
839
# related by a simple power law (because it'll just be down to how
840
# many leading terms of a Taylor series are zero), the cube root
841
# was the next thing to try.
842
threshold = 2^(-epsbits/3)
843
844
# Main loop.
845
while true
846
# Find all the error extrema we can.
847
function compute_error(x)
848
real_y = f(x)
849
approx_y = ratfn_eval(nc, dc, x)
850
return (approx_y - real_y) * w(x, real_y)
851
end
852
extrema = find_extrema(compute_error, grid)
853
@debug("minimax", "all extrema = ", format_xylist(extrema))
854
855
# Winnow the extrema down to the right number, and ensure they
856
# have alternating sign.
857
extrema = winnow_extrema(extrema, n+d+2)
858
@debug("minimax", "winnowed extrema = ", format_xylist(extrema))
859
860
# See if we've finished.
861
min_err = minimum([abs(y) for (x,y) = extrema])
862
max_err = maximum([abs(y) for (x,y) = extrema])
863
variation = (max_err - min_err) / max_err
864
@debug("minimax", "extremum variation = ", variation)
865
if variation < threshold
866
@debug("minimax", "done!")
867
return nc, dc, max_err, extrema
868
end
869
870
# If not, refine our function by equalising the error at the
871
# extrema points, and go round again.
872
(nc, dc) = ratfn_equal_deviation(f, map(x->x[1], extrema),
873
n, d, max_err, w)
874
@debug("minimax", "refined approx = ", ratfn_to_string(nc, dc))
875
end
876
end
877
878
# ----------------------------------------------------------------------
879
# Check if a polynomial is well-conditioned for accurate evaluation in
880
# a given interval by Horner's rule.
881
#
882
# This is true if at every step where Horner's rule computes
883
# (coefficient + x*value_so_far), the constant coefficient you're
884
# adding on is of larger magnitude than the x*value_so_far operand.
885
# And this has to be true for every x in the interval.
886
#
887
# Arguments:
888
# coeffs The coefficients of the polynomial under test. Starts with
889
# the constant term, i.e. coeffs[i] is the coefficient of
890
# x^(i-1) (because Julia arrays are 1-based).
891
# lo, hi The bounds of the interval.
892
#
893
# Return value: the largest ratio (x*value_so_far / coefficient), at
894
# any step of evaluation, for any x in the interval. If this is less
895
# than 1, the polynomial is at least somewhat well-conditioned;
896
# ideally you want it to be more like 1/8 or 1/16 or so, so that the
897
# relative rounding error accumulated at each step are reduced by
898
# several factors of 2 when the next coefficient is added on.
899
900
function wellcond(coeffs, lo, hi)
901
x = max(abs(lo), abs(hi))
902
worst = 0
903
so_far = 0
904
for i = length(coeffs):-1:1
905
coeff = abs(coeffs[i])
906
so_far *= x
907
if coeff != 0
908
thisval = so_far / coeff
909
worst = max(worst, thisval)
910
so_far += coeff
911
end
912
end
913
return worst
914
end
915
916
# ----------------------------------------------------------------------
917
# Small set of unit tests.
918
919
function test()
920
passes = 0
921
fails = 0
922
923
function approx_eq(x, y, limit=1e-6)
924
return abs(x - y) < limit
925
end
926
927
function test(condition)
928
if condition
929
passes += 1
930
else
931
println("fail")
932
fails += 1
933
end
934
end
935
936
# Test Gaussian elimination.
937
println("Gaussian test 1:")
938
m = BigFloat[1 1 2; 3 5 8; 13 34 21]
939
v = BigFloat[1, -1, 2]
940
ret = m \ v
941
println(" ",repr(ret))
942
test(approx_eq(ret[1], 109/26))
943
test(approx_eq(ret[2], -105/130))
944
test(approx_eq(ret[3], -31/26))
945
946
# Test leastsquares rational functions.
947
println("Leastsquares test 1:")
948
n = 10000
949
a = array1d(BigFloat, n+1)
950
for i = 0:1:n
951
a[1+i] = i/BigFloat(n)
952
end
953
(nc, dc) = ratfn_leastsquares(x->exp(x), a, 2, 2)
954
println(" ",ratfn_to_string(nc, dc))
955
for x = a
956
test(approx_eq(exp(x), ratfn_eval(nc, dc, x), 1e-4))
957
end
958
959
# Test golden section search.
960
println("Golden section test 1:")
961
x, y = goldensection(x->sin(x),
962
BigFloat(0), BigFloat(1)/10, BigFloat(4))
963
println(" ", x, " -> ", y)
964
test(approx_eq(x, asin(BigFloat(1))))
965
test(approx_eq(y, 1))
966
967
# Test extrema-winnowing algorithm.
968
println("Winnow test 1:")
969
extrema = [(x, sin(20*x)*sin(197*x))
970
for x in BigFloat(0):BigFloat(1)/1000:BigFloat(1)]
971
winnowed = winnow_extrema(extrema, 12)
972
println(" ret = ", format_xylist(winnowed))
973
prevx, prevy = -1, 0
974
for (x,y) = winnowed
975
test(x > prevx)
976
test(y != 0)
977
test(prevy * y <= 0) # tolerates initial prevx having no sign
978
test(abs(y) > 0.9)
979
prevx, prevy = x, y
980
end
981
982
# Test actual minimax approximation.
983
println("Minimax test 1 (polynomial):")
984
(nc, dc, e, x) = ratfn_minimax(x->exp(x), (BigFloat(0), BigFloat(1)), 4, 0)
985
println(" ",e)
986
println(" ",ratfn_to_string(nc, dc))
987
test(0 < e < 1e-3)
988
for x = 0:BigFloat(1)/1000:1
989
test(abs(ratfn_eval(nc, dc, x) - exp(x)) <= e * 1.0000001)
990
end
991
992
println("Minimax test 2 (rational):")
993
(nc, dc, e, x) = ratfn_minimax(x->exp(x), (BigFloat(0), BigFloat(1)), 2, 2)
994
println(" ",e)
995
println(" ",ratfn_to_string(nc, dc))
996
test(0 < e < 1e-3)
997
for x = 0:BigFloat(1)/1000:1
998
test(abs(ratfn_eval(nc, dc, x) - exp(x)) <= e * 1.0000001)
999
end
1000
1001
println("Minimax test 3 (polynomial, weighted):")
1002
(nc, dc, e, x) = ratfn_minimax(x->exp(x), (BigFloat(0), BigFloat(1)), 4, 0,
1003
(x,y)->1/y)
1004
println(" ",e)
1005
println(" ",ratfn_to_string(nc, dc))
1006
test(0 < e < 1e-3)
1007
for x = 0:BigFloat(1)/1000:1
1008
test(abs(ratfn_eval(nc, dc, x) - exp(x))/exp(x) <= e * 1.0000001)
1009
end
1010
1011
println("Minimax test 4 (rational, weighted):")
1012
(nc, dc, e, x) = ratfn_minimax(x->exp(x), (BigFloat(0), BigFloat(1)), 2, 2,
1013
(x,y)->1/y)
1014
println(" ",e)
1015
println(" ",ratfn_to_string(nc, dc))
1016
test(0 < e < 1e-3)
1017
for x = 0:BigFloat(1)/1000:1
1018
test(abs(ratfn_eval(nc, dc, x) - exp(x))/exp(x) <= e * 1.0000001)
1019
end
1020
1021
println("Minimax test 5 (rational, weighted, odd degree):")
1022
(nc, dc, e, x) = ratfn_minimax(x->exp(x), (BigFloat(0), BigFloat(1)), 2, 1,
1023
(x,y)->1/y)
1024
println(" ",e)
1025
println(" ",ratfn_to_string(nc, dc))
1026
test(0 < e < 1e-3)
1027
for x = 0:BigFloat(1)/1000:1
1028
test(abs(ratfn_eval(nc, dc, x) - exp(x))/exp(x) <= e * 1.0000001)
1029
end
1030
1031
total = passes + fails
1032
println(passes, " passes ", fails, " fails ", total, " total")
1033
end
1034
1035
# ----------------------------------------------------------------------
1036
# Online help.
1037
function help()
1038
print("""
1039
Usage:
1040
1041
remez.jl [options] <lo> <hi> <n> <d> <expr> [<weight>]
1042
1043
Arguments:
1044
1045
<lo>, <hi>
1046
1047
Bounds of the interval on which to approximate the target
1048
function. These are parsed and evaluated as Julia expressions,
1049
so you can write things like '1/BigFloat(6)' to get an
1050
accurate representation of 1/6, or '4*atan(BigFloat(1))' to
1051
get pi. (Unfortunately, the obvious 'BigFloat(pi)' doesn't
1052
work in Julia.)
1053
1054
<n>, <d>
1055
1056
The desired degree of polynomial(s) you want for your
1057
approximation. These should be non-negative integers. If you
1058
want a rational function as output, set <n> to the degree of
1059
the numerator, and <d> the denominator. If you just want an
1060
ordinary polynomial, set <d> to 0, and <n> to the degree of
1061
the polynomial you want.
1062
1063
<expr>
1064
1065
A Julia expression giving the function to be approximated on
1066
the interval. The input value is predefined as 'x' when this
1067
expression is evaluated, so you should write something along
1068
the lines of 'sin(x)' or 'sqrt(1+tan(x)^2)' etc.
1069
1070
<weight>
1071
1072
If provided, a Julia expression giving the weighting factor
1073
for the approximation error. The output polynomial will
1074
minimise the largest absolute value of (P-f) * w at any point
1075
in the interval, where P is the value of the polynomial, f is
1076
the value of the target function given by <expr>, and w is the
1077
weight given by this function.
1078
1079
When this expression is evaluated, the input value to P and f
1080
is predefined as 'x', and also the true output value f(x) is
1081
predefined as 'y'. So you can minimise the relative error by
1082
simply writing '1/y'.
1083
1084
If the <weight> argument is not provided, the default
1085
weighting function always returns 1, so that the polynomial
1086
will minimise the maximum absolute error |P-f|.
1087
1088
Computation options:
1089
1090
--pre=<predef_expr>
1091
1092
Evaluate the Julia expression <predef_expr> before starting
1093
the computation. This permits you to pre-define variables or
1094
functions which the Julia expressions in your main arguments
1095
can refer to. All of <lo>, <hi>, <expr> and <weight> can make
1096
use of things defined by <predef_expr>.
1097
1098
One internal remez.jl function that you might sometimes find
1099
useful in this expression is 'goldensection', which finds the
1100
location and value of a maximum of a function. For example,
1101
one implementation strategy for the gamma function involves
1102
translating it to put its unique local minimum at the origin,
1103
in which case you can write something like this
1104
1105
--pre='(m,my) = goldensection(x -> -gamma(x),
1106
BigFloat(1), BigFloat(1.5), BigFloat(2))'
1107
1108
to predefine 'm' as the location of gamma's minimum, and 'my'
1109
as the (negated) value that gamma actually takes at that
1110
point, i.e. -gamma(m).
1111
1112
(Since 'goldensection' always finds a maximum, we had to
1113
negate gamma in the input function to make it find a minimum
1114
instead. Consult the comments in the source for more details
1115
on the use of this function.)
1116
1117
If you use this option more than once, all the expressions you
1118
provide will be run in sequence.
1119
1120
--bits=<bits>
1121
1122
Specify the accuracy to which you want the output polynomial,
1123
in bits. Default 256, which should be more than enough.
1124
1125
--bigfloatbits=<bits>
1126
1127
Turn up the precision used by Julia for its BigFloat
1128
evaluation. Default is Julia's default (also 256). You might
1129
want to try setting this higher than the --bits value if the
1130
algorithm is failing to converge for some reason.
1131
1132
Output options:
1133
1134
--full
1135
1136
Instead of just printing the approximation function itself,
1137
also print auxiliary information:
1138
- the locations of the error extrema, and the actual
1139
(weighted) error at each of those locations
1140
- the overall maximum error of the function
1141
- a 'well-conditioning quotient', giving the worst-case ratio
1142
between any polynomial coefficient and the largest possible
1143
value of the higher-order terms it will be added to.
1144
1145
The well-conditioning quotient should be less than 1, ideally
1146
by several factors of two, for accurate evaluation in the
1147
target precision. If you request a rational function, a
1148
separate well-conditioning quotient will be printed for the
1149
numerator and denominator.
1150
1151
Use this option when deciding how wide an interval to
1152
approximate your function on, and what degree of polynomial
1153
you need.
1154
1155
--variable=<identifier>
1156
1157
When writing the output polynomial or rational function in its
1158
usual form as an arithmetic expression, use <identifier> as
1159
the name of the input variable. Default is 'x'.
1160
1161
--suffix=<suffix>
1162
1163
When writing the output polynomial or rational function in its
1164
usual form as an arithmetic expression, write <suffix> after
1165
every floating-point literal. For example, '--suffix=F' will
1166
generate a C expression in which the coefficients are literals
1167
of type 'float' rather than 'double'.
1168
1169
--array
1170
1171
Instead of writing the output polynomial as an arithmetic
1172
expression in Horner's rule form, write out just its
1173
coefficients, one per line, each with a trailing comma.
1174
Suitable for pasting into a C array declaration.
1175
1176
This option is not currently supported if the output is a
1177
rational function, because you'd need two separate arrays for
1178
the numerator and denominator coefficients and there's no
1179
obviously right way to provide both of those together.
1180
1181
Debug and test options:
1182
1183
--debug=<facility>
1184
1185
Enable debugging output from various parts of the Remez
1186
calculation. <facility> should be the name of one of the
1187
classes of diagnostic output implemented in the program.
1188
Useful values include 'gausselim', 'leastsquares',
1189
'goldensection', 'equaldev', 'minimax'. This is probably
1190
mostly useful to people debugging problems with the script, so
1191
consult the source code for more information about what the
1192
diagnostic output for each of those facilities will be.
1193
1194
If you want diagnostics from more than one facility, specify
1195
this option multiple times with different arguments.
1196
1197
--test
1198
1199
Run remez.jl's internal test suite. No arguments needed.
1200
1201
Miscellaneous options:
1202
1203
--help
1204
1205
Display this text and exit. No arguments needed.
1206
1207
""")
1208
end
1209
1210
# ----------------------------------------------------------------------
1211
# Main program.
1212
1213
function main()
1214
nargs = length(argwords)
1215
if nargs != 5 && nargs != 6
1216
error("usage: remez.jl <lo> <hi> <n> <d> <expr> [<weight>]\n" *
1217
" run 'remez.jl --help' for more help")
1218
end
1219
1220
for preliminary_command in preliminary_commands
1221
eval(Meta.parse(preliminary_command))
1222
end
1223
1224
lo = BigFloat(eval(Meta.parse(argwords[1])))
1225
hi = BigFloat(eval(Meta.parse(argwords[2])))
1226
n = parse(Int,argwords[3])
1227
d = parse(Int,argwords[4])
1228
f = eval(Meta.parse("x -> " * argwords[5]))
1229
1230
# Wrap the user-provided function with a function of our own. This
1231
# arranges to detect silly FP values (inf,nan) early and diagnose
1232
# them sensibly, and also lets us log all evaluations of the
1233
# function in case you suspect it's doing the wrong thing at some
1234
# special-case point.
1235
function func(x)
1236
y = run(f,x)
1237
@debug("f", x, " -> ", y)
1238
if !isfinite(y)
1239
error("f(" * string(x) * ") returned non-finite value " * string(y))
1240
end
1241
return y
1242
end
1243
1244
if nargs == 6
1245
# Wrap the user-provided weight function similarly.
1246
w = eval(Meta.parse("(x,y) -> " * argwords[6]))
1247
function wrapped_weight(x,y)
1248
ww = run(w,x,y)
1249
if !isfinite(ww)
1250
error("w(" * string(x) * "," * string(y) *
1251
") returned non-finite value " * string(ww))
1252
end
1253
return ww
1254
end
1255
weight = wrapped_weight
1256
else
1257
weight = (x,y)->BigFloat(1)
1258
end
1259
1260
(nc, dc, e, extrema) = ratfn_minimax(func, (lo, hi), n, d, weight)
1261
if array_format
1262
if d == 0
1263
functext = join([string(x)*",\n" for x=nc],"")
1264
else
1265
# It's unclear how you should best format an array of
1266
# coefficients for a rational function, so I'll leave
1267
# implementing this option until I have a use case.
1268
error("--array unsupported for rational functions")
1269
end
1270
else
1271
functext = ratfn_to_string(nc, dc) * "\n"
1272
end
1273
if full_output
1274
# Print everything you might want to know about the function
1275
println("extrema = ", format_xylist(extrema))
1276
println("maxerror = ", string(e))
1277
if length(dc) > 1
1278
println("wellconditioning_numerator = ",
1279
string(wellcond(nc, lo, hi)))
1280
println("wellconditioning_denominator = ",
1281
string(wellcond(dc, lo, hi)))
1282
else
1283
println("wellconditioning = ", string(wellcond(nc, lo, hi)))
1284
end
1285
print("function = ", functext)
1286
else
1287
# Just print the text people will want to paste into their code
1288
print(functext)
1289
end
1290
end
1291
1292
# ----------------------------------------------------------------------
1293
# Top-level code: parse the argument list and decide what to do.
1294
1295
what_to_do = main
1296
1297
doing_opts = true
1298
argwords = array1d(String, 0)
1299
for arg = ARGS
1300
global doing_opts, what_to_do, argwords
1301
global full_output, array_format, xvarname, floatsuffix, epsbits
1302
if doing_opts && startswith(arg, "-")
1303
if arg == "--"
1304
doing_opts = false
1305
elseif arg == "--help"
1306
what_to_do = help
1307
elseif arg == "--test"
1308
what_to_do = test
1309
elseif arg == "--full"
1310
full_output = true
1311
elseif arg == "--array"
1312
array_format = true
1313
elseif startswith(arg, "--debug=")
1314
enable_debug(arg[length("--debug=")+1:end])
1315
elseif startswith(arg, "--variable=")
1316
xvarname = arg[length("--variable=")+1:end]
1317
elseif startswith(arg, "--suffix=")
1318
floatsuffix = arg[length("--suffix=")+1:end]
1319
elseif startswith(arg, "--bits=")
1320
epsbits = parse(Int,arg[length("--bits=")+1:end])
1321
elseif startswith(arg, "--bigfloatbits=")
1322
set_bigfloat_precision(
1323
parse(Int,arg[length("--bigfloatbits=")+1:end]))
1324
elseif startswith(arg, "--pre=")
1325
push!(preliminary_commands, arg[length("--pre=")+1:end])
1326
else
1327
error("unrecognised option: ", arg)
1328
end
1329
else
1330
push!(argwords, arg)
1331
end
1332
end
1333
1334
what_to_do()
1335
1336