Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
sagemath
GitHub Repository: sagemath/sagesmc
Path: blob/master/src/sage/libs/mpmath/ext_impl.pyx
8815 views
1
"""
2
This module provides the core implementation of multiprecision
3
floating-point arithmetic. Operations are done in-place.
4
5
TESTS:
6
7
See if :trac:`15118` is fixed::
8
9
sage: import mpmath
10
sage: mpmath.mpf(0)^(-2)
11
Traceback (most recent call last):
12
...
13
ZeroDivisionError
14
sage: mpmath.zeta(2r, -3r)
15
Traceback (most recent call last):
16
...
17
ZeroDivisionError
18
"""
19
20
include 'sage/ext/interrupt.pxi'
21
include "sage/ext/stdsage.pxi"
22
from cpython.int cimport *
23
from cpython.long cimport *
24
from cpython.float cimport *
25
from cpython.complex cimport *
26
from cpython.number cimport *
27
28
cdef extern from "math.h":
29
cdef double fpow "pow" (double, double)
30
cdef double fsqrt "sqrt" (double)
31
cdef double frexp "frexp" (double, int*)
32
33
from sage.libs.gmp.all cimport *
34
from sage.libs.mpfr cimport *
35
from sage.rings.integer cimport Integer
36
37
cdef extern from "mpz_pylong.h":
38
cdef mpz_get_pylong(mpz_t src)
39
cdef mpz_get_pyintlong(mpz_t src)
40
cdef int mpz_set_pylong(mpz_t dst, src) except -1
41
cdef long mpz_pythonhash(mpz_t src)
42
43
cdef mpz_set_integer(mpz_t v, x):
44
if PyInt_Check(x):
45
mpz_set_si(v, PyInt_AS_LONG(x))
46
elif PyLong_Check(x):
47
mpz_set_pylong(v, x)
48
elif PY_TYPE_CHECK(x, Integer):
49
mpz_set(v, (<Integer>x).value)
50
else:
51
raise TypeError("cannot convert %s to an integer" % x)
52
53
cdef inline void mpz_add_si(mpz_t a, mpz_t b, long x):
54
if x >= 0:
55
mpz_add_ui(a, b, x)
56
else:
57
# careful: overflow when negating INT_MIN
58
mpz_sub_ui(a, b, <unsigned long>(-x))
59
60
cdef inline mpzi(mpz_t n):
61
return mpz_get_pyintlong(n)
62
63
cdef inline mpzl(mpz_t n):
64
return mpz_get_pylong(n)
65
66
# This should be done better
67
cdef int mpz_tstbit_abs(mpz_t z, unsigned long bit_index):
68
cdef int res
69
if mpz_sgn(z) < 0:
70
mpz_neg(z, z)
71
res = mpz_tstbit(z, bit_index)
72
mpz_neg(z, z)
73
else:
74
res = mpz_tstbit(z, bit_index)
75
return res
76
77
cdef void mpz_set_fixed(mpz_t t, MPF *x, int prec, bint abs=False):
78
"""
79
Set t = x, or t = |x|, as a fixed-point number with prec bits.
80
"""
81
cdef int offset
82
offset = mpz_get_si(x.exp) + prec
83
if offset >= 0:
84
mpz_mul_2exp(t, x.man, offset)
85
else:
86
mpz_tdiv_q_2exp(t, x.man, -offset)
87
if abs:
88
mpz_abs(t, t)
89
90
cdef unsigned long mpz_bitcount(mpz_t z):
91
if mpz_sgn(z) == 0:
92
return 0
93
return mpz_sizeinbase(z, 2)
94
95
# The following limits allowed exponent shifts. We could use mpz_fits_slong_p,
96
# but then (-LONG_MIN) wraps around; we may also not be able to add large
97
# shifts safely. A higher limit could be used on 64-bit systems, but
98
# it is unlikely that anyone will run into this (adding numbers
99
# that differ by 2^(2^30), at precisions of 2^30 bits).
100
101
# Note: MPFR's emax is 1073741823
102
DEF MAX_SHIFT = 536870912 # 2^29
103
104
cdef int mpz_reasonable_shift(mpz_t z):
105
if mpz_sgn(z) > 0:
106
return mpz_cmp_ui(z, MAX_SHIFT) < 0
107
else:
108
return mpz_cmp_si(z, -MAX_SHIFT) > 0
109
110
DEF ROUND_N = 0
111
DEF ROUND_F = 1
112
DEF ROUND_C = 2
113
DEF ROUND_D = 3
114
DEF ROUND_U = 4
115
116
DEF S_NORMAL = 0
117
DEF S_ZERO = 1
118
DEF S_NZERO = 2
119
DEF S_INF = 3
120
DEF S_NINF = 4
121
DEF S_NAN = 5
122
123
cdef inline str rndmode_to_python(int rnd):
124
if rnd == ROUND_N: return 'n'
125
if rnd == ROUND_F: return 'f'
126
if rnd == ROUND_C: return 'c'
127
if rnd == ROUND_D: return 'd'
128
if rnd == ROUND_U: return 'u'
129
130
cdef inline rndmode_from_python(str rnd):
131
if rnd == 'n': return ROUND_N
132
if rnd == 'f': return ROUND_F
133
if rnd == 'c': return ROUND_C
134
if rnd == 'd': return ROUND_D
135
if rnd == 'u': return ROUND_U
136
137
cdef inline mpfr_rnd_t rndmode_to_mpfr(int rnd, int sign):
138
if rnd == ROUND_N: return GMP_RNDN
139
if rnd == ROUND_F: return GMP_RNDD
140
if rnd == ROUND_C: return GMP_RNDU
141
if rnd == ROUND_D: return GMP_RNDZ
142
if rnd == ROUND_U:
143
# return GMP_RNDA (unsupported)
144
if sign >= 0:
145
return GMP_RNDU
146
else:
147
return GMP_RNDD
148
149
cdef inline int reciprocal_rnd(int rnd):
150
if rnd == ROUND_N: return ROUND_N
151
if rnd == ROUND_D: return ROUND_U
152
if rnd == ROUND_U: return ROUND_D
153
if rnd == ROUND_C: return ROUND_F
154
if rnd == ROUND_F: return ROUND_C
155
156
cdef MPopts opts_exact
157
cdef MPopts opts_double_precision
158
cdef MPopts opts_mini_prec
159
160
opts_exact.prec = 0
161
opts_exact.rounding = ROUND_N
162
opts_double_precision.prec = 53
163
opts_double_precision.rounding = ROUND_N
164
opts_mini_prec.prec = 5
165
opts_mini_prec.rounding = ROUND_D
166
167
cdef double _double_inf = float("1e300") * float("1e300")
168
cdef double _double_ninf = -_double_inf
169
cdef double _double_nan = _double_inf - _double_inf
170
171
cdef inline void MPF_init(MPF *x):
172
"""Allocate space and set value to zero.
173
Must be called exactly once when creating a new MPF."""
174
x.special = S_ZERO
175
mpz_init(x.man)
176
mpz_init(x.exp)
177
178
cdef inline void MPF_clear(MPF *x):
179
"""Deallocate space. Must be called exactly once when finished with an MPF."""
180
mpz_clear(x.man)
181
mpz_clear(x.exp)
182
183
cdef inline void MPF_set(MPF *dest, MPF *src):
184
"""Clone MPF value. Assumes source value is already normalized."""
185
if src is dest:
186
return
187
dest.special = src.special
188
mpz_set(dest.man, src.man)
189
mpz_set(dest.exp, src.exp)
190
191
cdef inline void MPF_set_zero(MPF *x):
192
"""Set value to 0."""
193
x.special = S_ZERO
194
195
cdef inline void MPF_set_one(MPF *x):
196
"""Set value to 1."""
197
x.special = S_NORMAL
198
mpz_set_ui(x.man, 1)
199
mpz_set_ui(x.exp, 0)
200
201
cdef inline void MPF_set_nan(MPF *x):
202
"""Set value to NaN (not a number)."""
203
x.special = S_NAN
204
205
cdef inline void MPF_set_inf(MPF *x):
206
"""Set value to +infinity."""
207
x.special = S_INF
208
209
cdef inline void MPF_set_ninf(MPF *x):
210
"""Set value to -infinity."""
211
x.special = S_NINF
212
213
cdef MPF_set_si(MPF *x, long n):
214
"""Set value to that of a given C (long) integer."""
215
if n:
216
x.special = S_NORMAL
217
mpz_set_si(x.man, n)
218
mpz_set_ui(x.exp, 0)
219
MPF_normalize(x, opts_exact)
220
else:
221
MPF_set_zero(x)
222
223
cdef MPF_set_int(MPF *x, n):
224
"""Set value to that of a given Python integer."""
225
x.special = S_NORMAL
226
mpz_set_integer(x.man, n)
227
if mpz_sgn(x.man):
228
mpz_set_ui(x.exp, 0)
229
MPF_normalize(x, opts_exact)
230
else:
231
MPF_set_zero(x)
232
233
cdef MPF_set_man_exp(MPF *x, man, exp):
234
"""
235
Set value to man*2^exp where man, exp may be of any appropriate
236
Python integer types.
237
"""
238
x.special = S_NORMAL
239
mpz_set_integer(x.man, man)
240
mpz_set_integer(x.exp, exp)
241
MPF_normalize(x, opts_exact)
242
243
244
# Temporary variables. Note: not thread-safe.
245
# Used by MPF_add/MPF_sub/MPF_div
246
cdef mpz_t tmp_exponent
247
mpz_init(tmp_exponent)
248
cdef MPF tmp0
249
MPF_init(&tmp0)
250
251
# Used by MPF_hypot and MPF_cmp, which may call MPF_add/MPF_sub
252
cdef MPF tmp1
253
MPF_init(&tmp1)
254
cdef MPF tmp2
255
MPF_init(&tmp2)
256
257
258
# Constants needed in a few places
259
cdef MPF MPF_C_1
260
MPF_init(&MPF_C_1)
261
MPF_set_si(&MPF_C_1, 1)
262
cdef Integer MPZ_ZERO = Integer(0)
263
cdef tuple _mpf_fzero = (0, MPZ_ZERO, 0, 0)
264
cdef tuple _mpf_fnan = (0, MPZ_ZERO, -123, -1)
265
cdef tuple _mpf_finf = (0, MPZ_ZERO, -456, -2)
266
cdef tuple _mpf_fninf = (1, MPZ_ZERO, -789, -3)
267
268
cdef MPF_set_tuple(MPF *x, tuple value):
269
"""
270
Set value of an MPF to that of a normalized (sign, man, exp, bc) tuple
271
in the format used by mpmath.libmp.
272
"""
273
#cdef int sign
274
cdef Integer man
275
sign, _man, exp, bc = value
276
if PY_TYPE_CHECK(_man, Integer):
277
man = <Integer>_man
278
else:
279
# This is actually very unlikely; it should never happen
280
# in internal code that man isn't an Integer. Maybe the check
281
# can be avoided by doing checks in e.g. MPF_set_any?
282
man = Integer(_man)
283
if mpz_sgn(man.value):
284
MPF_set_man_exp(x, man, exp)
285
if sign:
286
mpz_neg(x.man, x.man)
287
return
288
if value == _mpf_fzero:
289
MPF_set_zero(x)
290
elif value == _mpf_finf:
291
MPF_set_inf(x)
292
elif value == _mpf_fninf:
293
MPF_set_ninf(x)
294
else:
295
MPF_set_nan(x)
296
297
cdef MPF_to_tuple(MPF *x):
298
"""Convert MPF value to (sign, man, exp, bc) tuple."""
299
cdef Integer man
300
if x.special:
301
if x.special == S_ZERO: return _mpf_fzero
302
#if x.special == S_NZERO: return _mpf_fnzero
303
if x.special == S_INF: return _mpf_finf
304
if x.special == S_NINF: return _mpf_fninf
305
return _mpf_fnan
306
man = PY_NEW(Integer)
307
if mpz_sgn(x.man) < 0:
308
mpz_neg(man.value, x.man)
309
sign = 1
310
else:
311
mpz_set(man.value, x.man)
312
sign = 0
313
exp = mpz_get_pyintlong(x.exp)
314
bc = mpz_sizeinbase(x.man, 2)
315
return (sign, man, exp, bc)
316
317
cdef MPF_set_double(MPF *r, double x):
318
"""
319
Set r to the value of a C double x.
320
"""
321
cdef int exp
322
cdef double man
323
if x != x:
324
MPF_set_nan(r)
325
return
326
if x == _double_inf:
327
MPF_set_inf(r)
328
return
329
if x == _double_ninf:
330
MPF_set_ninf(r)
331
return
332
man = frexp(x, &exp)
333
man *= 9007199254740992.0
334
mpz_set_d(r.man, man)
335
mpz_set_si(r.exp, exp-53)
336
r.special = S_NORMAL
337
MPF_normalize(r, opts_exact)
338
339
import math as pymath
340
341
# TODO: implement this function safely without using the Python math module
342
cdef double MPF_to_double(MPF *x, bint strict):
343
"""Convert MPF value to a Python float."""
344
if x.special == S_NORMAL:
345
man = mpzi(x.man)
346
exp = mpzi(x.exp)
347
bc = mpz_sizeinbase(x.man, 2)
348
try:
349
if bc < 100:
350
return pymath.ldexp(man, exp)
351
# Try resizing the mantissa. Overflow may still happen here.
352
n = bc - 53
353
m = man >> n
354
return pymath.ldexp(m, exp + n)
355
except OverflowError:
356
if strict:
357
raise
358
# Overflow to infinity
359
if exp + bc > 0:
360
if man < 0:
361
return _double_ninf
362
else:
363
return _double_inf
364
# Underflow to zero
365
return 0.0
366
if x.special == S_ZERO:
367
return 0.0
368
if x.special == S_INF:
369
return _double_inf
370
if x.special == S_NINF:
371
return _double_ninf
372
return _double_nan
373
374
cdef MPF_to_fixed(mpz_t r, MPF *x, long prec, bint truncate):
375
"""
376
Set r = x, r being in the format of a fixed-point number with prec bits.
377
Floor division is used unless truncate=True in which case
378
truncating division is used.
379
"""
380
cdef long shift
381
if x.special:
382
if x.special == S_ZERO or x.special == S_NZERO:
383
mpz_set_ui(r, 0)
384
return
385
raise ValueError("cannot create fixed-point number from special value")
386
if mpz_reasonable_shift(x.exp):
387
# XXX: signed integer overflow
388
shift = mpz_get_si(x.exp) + prec
389
if shift >= 0:
390
mpz_mul_2exp(r, x.man, shift)
391
else:
392
if truncate:
393
mpz_tdiv_q_2exp(r, x.man, -shift)
394
else:
395
mpz_fdiv_q_2exp(r, x.man, -shift)
396
return
397
# Underflow
398
if mpz_sgn(x.exp) < 0:
399
mpz_set_ui(r, 0)
400
return
401
raise OverflowError("cannot convert huge number to fixed-point format")
402
403
cdef int MPF_sgn(MPF *x):
404
"""
405
Gives the sign of an MPF (-1, 0, or 1).
406
"""
407
if x.special:
408
if x.special == S_INF:
409
return 1
410
if x.special == S_NINF:
411
return -1
412
return 0
413
return mpz_sgn(x.man)
414
415
cdef void MPF_neg(MPF *r, MPF *s):
416
"""
417
Sets r = -s. MPF_neg(x, x) negates in place.
418
"""
419
if s.special:
420
if s.special == S_ZERO: r.special = S_ZERO #r.special = S_NZERO
421
elif s.special == S_NZERO: r.special = S_ZERO
422
elif s.special == S_INF: r.special = S_NINF
423
elif s.special == S_NINF: r.special = S_INF
424
else: r.special = s.special
425
return
426
r.special = s.special
427
mpz_neg(r.man, s.man)
428
if r is not s:
429
mpz_set(r.exp, s.exp)
430
431
cdef void MPF_abs(MPF *r, MPF *s):
432
"""
433
Sets r = abs(s). MPF_abs(r, r) sets the absolute value in place.
434
"""
435
if s.special:
436
if s.special == S_NINF: r.special = S_INF
437
else: r.special = s.special
438
return
439
r.special = s.special
440
mpz_abs(r.man, s.man)
441
if r is not s:
442
mpz_set(r.exp, s.exp)
443
444
cdef MPF_normalize(MPF *x, MPopts opts):
445
"""
446
Normalize.
447
448
With prec = 0, trailing zero bits are stripped but no rounding
449
is performed.
450
"""
451
cdef int sign
452
cdef long trail, bc, shift
453
if x.special != S_NORMAL:
454
return
455
sign = mpz_sgn(x.man)
456
if sign == 0:
457
x.special = S_ZERO
458
mpz_set_ui(x.exp, 0)
459
return
460
bc = mpz_sizeinbase(x.man, 2)
461
shift = bc - opts.prec
462
# Ok if mantissa small and no trailing zero bits
463
if (shift <= 0 or not opts.prec) and mpz_odd_p(x.man):
464
return
465
# Mantissa is too large, so divide by appropriate power of 2
466
# Need to be careful about rounding
467
if shift > 0 and opts.prec:
468
if opts.rounding == ROUND_N:
469
if mpz_tstbit_abs(&x.man, shift-1):
470
if mpz_tstbit_abs(&x.man, shift) or mpz_scan1(x.man, 0) < (shift-1):
471
if sign < 0:
472
mpz_fdiv_q_2exp(x.man, x.man, shift)
473
else:
474
mpz_cdiv_q_2exp(x.man, x.man, shift)
475
else:
476
mpz_tdiv_q_2exp(x.man, x.man, shift)
477
else:
478
mpz_tdiv_q_2exp(x.man, x.man, shift)
479
elif opts.rounding == ROUND_D:
480
mpz_tdiv_q_2exp(x.man, x.man, shift)
481
elif opts.rounding == ROUND_F:
482
mpz_fdiv_q_2exp(x.man, x.man, shift)
483
elif opts.rounding == ROUND_C:
484
mpz_cdiv_q_2exp(x.man, x.man, shift)
485
elif opts.rounding == ROUND_U:
486
if sign < 0:
487
mpz_fdiv_q_2exp(x.man, x.man, shift)
488
else:
489
mpz_cdiv_q_2exp(x.man, x.man, shift)
490
else:
491
raise ValueError("bad rounding mode")
492
else:
493
shift = 0
494
# Strip trailing bits
495
trail = mpz_scan1(x.man, 0)
496
if 0 < trail < bc:
497
mpz_tdiv_q_2exp(x.man, x.man, trail)
498
shift += trail
499
mpz_add_si(x.exp, x.exp, shift)
500
501
cdef void MPF_pos(MPF *x, MPF *y, MPopts opts):
502
"""
503
Set x = +y (i.e. copy the value, and round if the
504
working precision is smaller than the width
505
of the mantissa of y).
506
"""
507
MPF_set(x, y)
508
MPF_normalize(x, opts)
509
510
cdef void _add_special(MPF *r, MPF *s, MPF *t):
511
if s.special == S_ZERO:
512
# (+0) + (-0) = +0
513
if t.special == S_NZERO:
514
MPF_set(r, s)
515
# (+0) + x = x
516
else:
517
MPF_set(r, t)
518
elif t.special == S_ZERO:
519
# (-0) + (+0) = +0
520
if s.special == S_NZERO:
521
MPF_set(r, t)
522
# x + (+0) = x
523
else:
524
MPF_set(r, s)
525
# (+/- 0) + x = x
526
elif s.special == S_NZERO:
527
MPF_set(r, t)
528
elif t.special == S_NZERO:
529
MPF_set(r, s)
530
# (+/- inf) + (-/+ inf) = nan
531
elif ((s.special == S_INF and t.special == S_NINF) or
532
(s.special == S_NINF and t.special == S_INF)):
533
MPF_set_nan(r)
534
# nan or +/- inf trumps any finite number
535
elif s.special == S_NAN or t.special == S_NAN:
536
MPF_set_nan(r)
537
elif s.special:
538
MPF_set(r, s)
539
else:
540
MPF_set(r, t)
541
return
542
543
cdef void _sub_special(MPF *r, MPF *s, MPF *t):
544
if s.special == S_ZERO:
545
# (+0) - (+/-0) = (+0)
546
if t.special == S_NZERO:
547
MPF_set(r, s)
548
else:
549
# (+0) - x = (-x)
550
MPF_neg(r, t)
551
elif t.special == S_ZERO:
552
# x - (+0) = x; also covers (-0) - (+0) = (-0)
553
MPF_set(r, s)
554
# (-0) - x = x
555
elif s.special == S_NZERO:
556
# (-0) - (-0) = (+0)
557
if t.special == S_NZERO:
558
MPF_set_zero(r)
559
# (-0) - x = -x
560
else:
561
MPF_neg(r, t)
562
elif t.special == S_NZERO:
563
# x - (-0) = x
564
MPF_set(r, s)
565
# (+/- inf) - (+/- inf) = nan
566
elif ((s.special == S_INF and t.special == S_INF) or
567
(s.special == S_NINF and t.special == S_NINF)):
568
MPF_set_nan(r)
569
elif s.special == S_NAN or t.special == S_NAN:
570
MPF_set_nan(r)
571
# nan - x or (+/-inf) - x = l.h.s
572
elif s.special:
573
MPF_set(r, s)
574
# x - nan or x - (+/-inf) = (- r.h.s)
575
else:
576
MPF_neg(r, t)
577
578
cdef void _mul_special(MPF *r, MPF *s, MPF *t):
579
if s.special == S_ZERO:
580
if t.special == S_NORMAL or t.special == S_ZERO:
581
MPF_set(r, s)
582
elif t.special == S_NZERO:
583
MPF_set(r, t)
584
else:
585
MPF_set_nan(r)
586
elif t.special == S_ZERO:
587
if s.special == S_NORMAL:
588
MPF_set(r, t)
589
elif s.special == S_NZERO:
590
MPF_set(r, s)
591
else:
592
MPF_set_nan(r)
593
elif s.special == S_NZERO:
594
if t.special == S_NORMAL:
595
if mpz_sgn(t.man) < 0:
596
MPF_set_zero(r)
597
else:
598
MPF_set(r, s)
599
else:
600
MPF_set_nan(r)
601
elif t.special == S_NZERO:
602
if s.special == S_NORMAL:
603
if mpz_sgn(s.man) < 0:
604
MPF_set_zero(r)
605
else:
606
MPF_set(r, t)
607
else:
608
MPF_set_nan(r)
609
elif s.special == S_NAN or t.special == S_NAN:
610
MPF_set_nan(r)
611
else:
612
if MPF_sgn(s) == MPF_sgn(t):
613
MPF_set_inf(r)
614
else:
615
MPF_set_ninf(r)
616
617
cdef _div_special(MPF *r, MPF *s, MPF *t):
618
# TODO: handle signed zeros correctly
619
if s.special == S_NAN or t.special == S_NAN:
620
MPF_set_nan(r)
621
elif t.special == S_ZERO or t.special == S_NZERO:
622
raise ZeroDivisionError
623
elif s.special == S_ZERO or s.special == S_NZERO:
624
MPF_set_zero(r)
625
elif s.special == S_NORMAL:
626
MPF_set_zero(r)
627
elif s.special == S_INF or s.special == S_NINF:
628
if t.special == S_INF or t.special == S_NINF:
629
MPF_set_nan(r)
630
elif MPF_sgn(s) == MPF_sgn(t):
631
MPF_set_inf(r)
632
else:
633
MPF_set_ninf(r)
634
# else:
635
elif t.special == S_INF or t.special == S_NINF:
636
MPF_set_zero(r)
637
638
cdef _add_perturbation(MPF *r, MPF *s, int sign, MPopts opts):
639
cdef long shift
640
if opts.rounding == ROUND_N:
641
MPF_set(r, s)
642
else:
643
shift = opts.prec - mpz_sizeinbase(s, 2) + 8
644
if shift < 0:
645
shift = 8
646
mpz_mul_2exp(r.man, s.man, shift)
647
mpz_add_si(r.man, r.man, sign)
648
mpz_sub_ui(r.exp, s.exp, shift)
649
MPF_normalize(r, opts)
650
651
cdef MPF_add(MPF *r, MPF *s, MPF *t, MPopts opts):
652
"""
653
Set r = s + t, with exact rounding.
654
655
With prec = 0, the addition is performed exactly. Note that this
656
may cause overflow if the exponents are huge.
657
"""
658
cdef long shift, sbc, tbc
659
#assert (r is not s) and (r is not t)
660
if s.special or t.special:
661
_add_special(r, s, t)
662
return
663
r.special = S_NORMAL
664
# Difference between exponents
665
mpz_sub(tmp_exponent, s.exp, t.exp)
666
if mpz_reasonable_shift(tmp_exponent):
667
shift = mpz_get_si(tmp_exponent)
668
if shift >= 0:
669
# |s| >> |t|
670
if shift > 2*opts.prec and opts.prec:
671
sbc = mpz_sizeinbase(s,2)
672
tbc = mpz_sizeinbase(t,2)
673
if shift + sbc - tbc > opts.prec+8:
674
_add_perturbation(r, s, mpz_sgn(t.man), opts)
675
return
676
# |s| > |t|
677
mpz_mul_2exp(tmp0.man, s.man, shift)
678
mpz_add(r.man, tmp0.man, t.man)
679
mpz_set(r.exp, t.exp)
680
MPF_normalize(r, opts)
681
elif shift < 0:
682
shift = -shift
683
# |s| << |t|
684
if shift > 2*opts.prec and opts.prec:
685
sbc = mpz_sizeinbase(s,2)
686
tbc = mpz_sizeinbase(t,2)
687
if shift + tbc - sbc > opts.prec+8:
688
_add_perturbation(r, t, mpz_sgn(s.man), opts)
689
return
690
# |s| < |t|
691
mpz_mul_2exp(tmp0.man, t.man, shift)
692
mpz_add(r.man, tmp0.man, s.man)
693
mpz_set(r.exp, s.exp)
694
MPF_normalize(r, opts)
695
else:
696
if not opts.prec:
697
raise OverflowError("the exact result does not fit in memory")
698
# |s| >>> |t|
699
if mpz_sgn(tmp_exponent) > 0:
700
_add_perturbation(r, s, mpz_sgn(t.man), opts)
701
# |s| <<< |t|
702
else:
703
_add_perturbation(r, t, mpz_sgn(s.man), opts)
704
705
cdef MPF_sub(MPF *r, MPF *s, MPF *t, MPopts opts):
706
"""
707
Set r = s - t, with exact rounding.
708
709
With prec = 0, the addition is performed exactly. Note that this
710
may cause overflow if the exponents are huge.
711
"""
712
cdef long shift, sbc, tbc
713
#assert (r is not s) and (r is not t)
714
if s.special or t.special:
715
_sub_special(r, s, t)
716
return
717
r.special = S_NORMAL
718
# Difference between exponents
719
mpz_sub(tmp_exponent, s.exp, t.exp)
720
if mpz_reasonable_shift(tmp_exponent):
721
shift = mpz_get_si(tmp_exponent)
722
if shift >= 0:
723
# |s| >> |t|
724
if shift > 2*opts.prec and opts.prec:
725
sbc = mpz_sizeinbase(s,2)
726
tbc = mpz_sizeinbase(t,2)
727
if shift + sbc - tbc > opts.prec+8:
728
_add_perturbation(r, s, -mpz_sgn(t.man), opts)
729
return
730
# |s| > |t|
731
mpz_mul_2exp(tmp0.man, s.man, shift)
732
mpz_sub(r.man, tmp0.man, t.man)
733
mpz_set(r.exp, t.exp)
734
MPF_normalize(r, opts)
735
elif shift < 0:
736
shift = -shift
737
# |s| << |t|
738
if shift > 2*opts.prec and opts.prec:
739
sbc = mpz_sizeinbase(s,2)
740
tbc = mpz_sizeinbase(t,2)
741
if shift + tbc - sbc > opts.prec+8:
742
_add_perturbation(r, t, -mpz_sgn(s.man), opts)
743
MPF_neg(r, r)
744
return
745
# |s| < |t|
746
mpz_mul_2exp(tmp0.man, t.man, shift)
747
mpz_sub(r.man, s.man, tmp0.man)
748
mpz_set(r.exp, s.exp)
749
MPF_normalize(r, opts)
750
else:
751
if not opts.prec:
752
raise OverflowError("the exact result does not fit in memory")
753
# |s| >>> |t|
754
if mpz_sgn(tmp_exponent) > 0:
755
_add_perturbation(r, s, -mpz_sgn(t.man), opts)
756
# |s| <<< |t|
757
else:
758
_add_perturbation(r, t, -mpz_sgn(s.man), opts)
759
MPF_neg(r, r)
760
761
cdef bint MPF_eq(MPF *s, MPF *t):
762
"""
763
Evaluates s == t.
764
"""
765
if s.special == S_NAN or t.special == S_NAN:
766
return False
767
if s.special == t.special:
768
if s.special == S_NORMAL:
769
return (mpz_cmp(s.man, t.man) == 0) and (mpz_cmp(s.exp, t.exp) == 0)
770
else:
771
return True
772
return False
773
774
cdef bint MPF_ne(MPF *s, MPF *t):
775
"""
776
Evaluates s != t.
777
"""
778
if s.special == S_NAN or t.special == S_NAN:
779
return True
780
if s.special == S_NORMAL and t.special == S_NORMAL:
781
return (mpz_cmp(s.man, t.man) != 0) or (mpz_cmp(s.exp, t.exp) != 0)
782
return s.special != t.special
783
784
cdef int MPF_cmp(MPF *s, MPF *t):
785
"""
786
Evaluates cmp(s,t). Conventions for nan follow those
787
of the mpmath.libmp function.
788
"""
789
cdef long sbc, tbc
790
cdef int cm
791
if MPF_eq(s, t):
792
return 0
793
if s.special != S_NORMAL or t.special != S_NORMAL:
794
if s.special == S_ZERO: return -MPF_sgn(t)
795
if t.special == S_ZERO: return MPF_sgn(s)
796
if t.special == S_NAN: return 1
797
if s.special == S_INF: return 1
798
if t.special == S_NINF: return 1
799
return -1
800
if mpz_sgn(s.man) != mpz_sgn(t.man):
801
if mpz_sgn(s.man) < 0:
802
return -1
803
else:
804
return 1
805
if not mpz_cmp(s.exp, t.exp):
806
return mpz_cmp(s.man, t.man)
807
mpz_add_ui(tmp1.exp, s.exp, mpz_sizeinbase(s.man, 2))
808
mpz_add_ui(tmp2.exp, t.exp, mpz_sizeinbase(t.man, 2))
809
cm = mpz_cmp(tmp1.exp, tmp2.exp)
810
if mpz_sgn(s.man) < 0:
811
if cm < 0: return 1
812
if cm > 0: return -1
813
else:
814
if cm < 0: return -1
815
if cm > 0: return 1
816
MPF_sub(&tmp1, s, t, opts_mini_prec)
817
return MPF_sgn(&tmp1)
818
819
cdef bint MPF_lt(MPF *s, MPF *t):
820
"""
821
Evaluates s < t.
822
"""
823
if s.special == S_NAN or t.special == S_NAN:
824
return False
825
return MPF_cmp(s, t) < 0
826
827
cdef bint MPF_le(MPF *s, MPF *t):
828
"""
829
Evaluates s <= t.
830
"""
831
if s.special == S_NAN or t.special == S_NAN:
832
return False
833
return MPF_cmp(s, t) <= 0
834
835
cdef bint MPF_gt(MPF *s, MPF *t):
836
"""
837
Evaluates s > t.
838
"""
839
if s.special == S_NAN or t.special == S_NAN:
840
return False
841
return MPF_cmp(s, t) > 0
842
843
cdef bint MPF_ge(MPF *s, MPF *t):
844
"""
845
Evaluates s >= t.
846
"""
847
if s.special == S_NAN or t.special == S_NAN:
848
return False
849
return MPF_cmp(s, t) >= 0
850
851
cdef MPF_mul(MPF *r, MPF *s, MPF *t, MPopts opts):
852
"""
853
Set r = s * t, with correct rounding.
854
855
With prec = 0, the multiplication is performed exactly,
856
i.e. no rounding is performed.
857
"""
858
if s.special or t.special:
859
_mul_special(r, s, t)
860
else:
861
r.special = S_NORMAL
862
mpz_mul(r.man, s.man, t.man)
863
mpz_add(r.exp, s.exp, t.exp)
864
if opts.prec:
865
MPF_normalize(r, opts)
866
867
cdef MPF_div(MPF *r, MPF *s, MPF *t, MPopts opts):
868
"""
869
Set r = s / t, with correct rounding.
870
"""
871
cdef int sign
872
cdef long sbc, tbc, extra
873
cdef mpz_t rem
874
#assert (r is not s) and (r is not t)
875
if s.special or t.special:
876
_div_special(r, s, t)
877
return
878
r.special = S_NORMAL
879
# Division by a power of two <=> shift exponents
880
if mpz_cmp_si(t.man, 1) == 0:
881
MPF_set(&tmp0, s)
882
mpz_sub(tmp0.exp, tmp0.exp, t.exp)
883
MPF_normalize(&tmp0, opts)
884
MPF_set(r, &tmp0)
885
return
886
elif mpz_cmp_si(t.man, -1) == 0:
887
MPF_neg(&tmp0, s)
888
mpz_sub(tmp0.exp, tmp0.exp, t.exp)
889
MPF_normalize(&tmp0, opts)
890
MPF_set(r, &tmp0)
891
return
892
sign = mpz_sgn(s.man) != mpz_sgn(t.man)
893
# Same strategy as for addition: if there is a remainder, perturb
894
# the result a few bits outside the precision range before rounding
895
extra = opts.prec - mpz_sizeinbase(s.man,2) + mpz_sizeinbase(t.man,2) + 5
896
if extra < 5:
897
extra = 5
898
mpz_init(rem)
899
mpz_mul_2exp(tmp0.man, s.man, extra)
900
mpz_tdiv_qr(r.man, rem, tmp0.man, t.man)
901
if mpz_sgn(rem):
902
mpz_mul_2exp(r.man, r.man, 1)
903
if sign:
904
mpz_sub_ui(r.man, r.man, 1)
905
else:
906
mpz_add_ui(r.man, r.man, 1)
907
extra += 1
908
mpz_clear(rem)
909
mpz_sub(r.exp, s.exp, t.exp)
910
mpz_sub_ui(r.exp, r.exp, extra)
911
MPF_normalize(r, opts)
912
913
cdef int MPF_sqrt(MPF *r, MPF *s, MPopts opts):
914
"""
915
Set r = sqrt(s), with correct rounding.
916
"""
917
cdef long shift
918
cdef mpz_t rem
919
#assert r is not s
920
if s.special:
921
if s.special == S_ZERO or s.special == S_INF:
922
MPF_set(r, s)
923
else:
924
MPF_set_nan(r)
925
return 0
926
if mpz_sgn(s.man) < 0:
927
MPF_set_nan(r)
928
return 1
929
r.special = S_NORMAL
930
if mpz_odd_p(s.exp):
931
mpz_sub_ui(r.exp, s.exp, 1)
932
mpz_mul_2exp(r.man, s.man, 1)
933
elif mpz_cmp_ui(s.man, 1) == 0:
934
# Square of a power of two
935
mpz_set_ui(r.man, 1)
936
mpz_tdiv_q_2exp(r.exp, s.exp, 1)
937
MPF_normalize(r, opts)
938
return 0
939
else:
940
mpz_set(r.man, s.man)
941
mpz_set(r.exp, s.exp)
942
shift = 2*opts.prec - mpz_sizeinbase(r.man,2) + 4
943
if shift < 4:
944
shift = 4
945
shift += shift & 1
946
mpz_mul_2exp(r.man, r.man, shift)
947
if opts.rounding == ROUND_F or opts.rounding == ROUND_D:
948
mpz_sqrt(r.man, r.man)
949
else:
950
mpz_init(rem)
951
mpz_sqrtrem(r.man, rem, r.man)
952
if mpz_sgn(rem):
953
mpz_mul_2exp(r.man, r.man, 1)
954
mpz_add_ui(r.man, r.man, 1)
955
shift += 2
956
mpz_clear(rem)
957
mpz_add_si(r.exp, r.exp, -shift)
958
mpz_tdiv_q_2exp(r.exp, r.exp, 1)
959
MPF_normalize(r, opts)
960
return 0
961
962
cdef MPF_hypot(MPF *r, MPF *a, MPF *b, MPopts opts):
963
"""
964
Set r = sqrt(a^2 + b^2)
965
"""
966
cdef MPopts tmp_opts
967
if a.special == S_ZERO:
968
MPF_abs(r, b)
969
MPF_normalize(r, opts)
970
return
971
if b.special == S_ZERO:
972
MPF_abs(r, a)
973
MPF_normalize(r, opts)
974
return
975
tmp_opts = opts
976
tmp_opts.prec += 30
977
MPF_mul(&tmp1, a, a, opts_exact)
978
MPF_mul(&tmp2, b, b, opts_exact)
979
MPF_add(r, &tmp1, &tmp2, tmp_opts)
980
MPF_sqrt(r, r, opts)
981
982
cdef MPF_pow_int(MPF *r, MPF *x, mpz_t n, MPopts opts):
983
"""
984
Set r = x ** n. Currently falls back to mpmath.libmp
985
unless n is tiny.
986
"""
987
cdef long m, absm
988
cdef unsigned long bc
989
cdef int nsign
990
if x.special != S_NORMAL:
991
nsign = mpz_sgn(n)
992
if x.special == S_ZERO:
993
if nsign < 0:
994
raise ZeroDivisionError
995
elif nsign == 0:
996
MPF_set(r, &MPF_C_1)
997
else:
998
MPF_set_zero(r)
999
elif x.special == S_INF:
1000
if nsign > 0:
1001
MPF_set(r, x)
1002
elif nsign == 0:
1003
MPF_set_nan(r)
1004
else:
1005
MPF_set_zero(r)
1006
elif x.special == S_NINF:
1007
if nsign > 0:
1008
if mpz_odd_p(n):
1009
MPF_set(r, x)
1010
else:
1011
MPF_neg(r, x)
1012
elif nsign == 0:
1013
MPF_set_nan(r)
1014
else:
1015
MPF_set_zero(r)
1016
else:
1017
MPF_set_nan(r)
1018
return
1019
bc = mpz_sizeinbase(r.man,2)
1020
r.special = S_NORMAL
1021
if mpz_reasonable_shift(n):
1022
m = mpz_get_si(n)
1023
if m == 0:
1024
MPF_set(r, &MPF_C_1)
1025
return
1026
if m == 1:
1027
MPF_set(r, x)
1028
MPF_normalize(r, opts)
1029
return
1030
if m == 2:
1031
MPF_mul(r, x, x, opts)
1032
return
1033
if m == -1:
1034
MPF_div(r, &MPF_C_1, x, opts)
1035
return
1036
if m == -2:
1037
MPF_mul(r, x, x, opts_exact)
1038
MPF_div(r, &MPF_C_1, r, opts)
1039
return
1040
absm = abs(m)
1041
if bc * absm < 10000:
1042
mpz_pow_ui(r.man, x.man, absm)
1043
mpz_mul_ui(r.exp, x.exp, absm)
1044
if m < 0:
1045
MPF_div(r, &MPF_C_1, r, opts)
1046
else:
1047
MPF_normalize(r, opts)
1048
return
1049
r.special = S_NORMAL
1050
# (2^p)^n
1051
if mpz_cmp_si(x.man, 1) == 0:
1052
mpz_set(r.man, x.man)
1053
mpz_mul(r.exp, x.exp, n)
1054
return
1055
# (-2^p)^n
1056
if mpz_cmp_si(x.man, -1) == 0:
1057
if mpz_odd_p(n):
1058
mpz_set(r.man, x.man)
1059
else:
1060
mpz_neg(r.man, x.man)
1061
mpz_mul(r.exp, x.exp, n)
1062
return
1063
# TODO: implement efficiently here
1064
import mpmath.libmp
1065
MPF_set_tuple(r,
1066
mpmath.libmp.mpf_pow_int(MPF_to_tuple(x), mpzi(n),
1067
opts.prec, rndmode_to_python(opts.rounding)))
1068
1069
cdef mpz_t _pi_value
1070
cdef int _pi_prec = -1
1071
1072
cdef mpz_t _ln2_value
1073
cdef int _ln2_prec = -1
1074
1075
cdef mpz_set_pi(mpz_t x, int prec):
1076
"""
1077
Set x = pi as a fixed-point number.
1078
"""
1079
global _pi_value
1080
global _pi_prec
1081
if prec <= _pi_prec:
1082
mpz_tdiv_q_2exp(x, _pi_value, _pi_prec-prec)
1083
else:
1084
from mpmath.libmp import pi_fixed
1085
if _pi_prec < 0:
1086
mpz_init(_pi_value)
1087
mpz_set_integer(_pi_value, pi_fixed(prec))
1088
mpz_set(x, _pi_value)
1089
_pi_prec = prec
1090
1091
cdef mpz_set_ln2(mpz_t x, int prec):
1092
"""
1093
Set x = ln(2) as a fixed-point number.
1094
"""
1095
global _ln2_value
1096
global _ln2_prec
1097
if prec <= _ln2_prec:
1098
mpz_tdiv_q_2exp(x, _ln2_value, _ln2_prec-prec)
1099
else:
1100
from mpmath.libmp import ln2_fixed
1101
if _ln2_prec < 0:
1102
mpz_init(_ln2_value)
1103
mpz_set_integer(_ln2_value, ln2_fixed(prec))
1104
mpz_set(x, _ln2_value)
1105
_ln2_prec = prec
1106
1107
cdef void _cy_exp_mpfr(mpz_t y, mpz_t x, int prec):
1108
"""
1109
Computes y = exp(x) for fixed-point numbers y and x using MPFR,
1110
assuming that no overflow will occur.
1111
"""
1112
cdef mpfr_t yf, xf
1113
mpfr_init2(xf, mpz_bitcount(x)+2)
1114
mpfr_init2(yf, prec+2)
1115
mpfr_set_z(xf, x, GMP_RNDN)
1116
mpfr_div_2exp(xf, xf, prec, GMP_RNDN)
1117
mpfr_exp(yf, xf, GMP_RNDN)
1118
mpfr_mul_2exp(yf, yf, prec, GMP_RNDN)
1119
mpfr_get_z(y, yf, GMP_RNDN)
1120
mpfr_clear(yf)
1121
mpfr_clear(xf)
1122
1123
cdef cy_exp_basecase(mpz_t y, mpz_t x, int prec):
1124
"""
1125
Computes y = exp(x) for fixed-point numbers y and x, assuming
1126
that x is small (|x| ~< 1). At small precisions, this function
1127
is equivalent to the exp_basecase function in
1128
mpmath.libmp.exp_fixed.
1129
"""
1130
cdef int k, r, u
1131
cdef mpz_t s0, s1, x2, a
1132
# TODO: could use custom implementation here; for now switch to MPFR
1133
if prec > 2000:
1134
_cy_exp_mpfr(y, x, prec)
1135
return
1136
mpz_init(s0)
1137
mpz_init(s1)
1138
mpz_init(x2)
1139
mpz_init(a)
1140
r = <int>fsqrt(prec)
1141
prec += r
1142
mpz_set_ui(s0, 1)
1143
mpz_mul_2exp(s0, s0, prec)
1144
mpz_set(s1, s0)
1145
k = 2
1146
mpz_mul(x2, x, x)
1147
mpz_fdiv_q_2exp(x2, x2, prec)
1148
mpz_set(a, x2)
1149
while mpz_sgn(a):
1150
mpz_fdiv_q_ui(a, a, k)
1151
mpz_add(s0, s0, a)
1152
k += 1
1153
mpz_fdiv_q_ui(a, a, k)
1154
mpz_add(s1, s1, a)
1155
k += 1
1156
mpz_mul(a, a, x2)
1157
mpz_fdiv_q_2exp(a, a, prec)
1158
mpz_mul(s1, s1, x)
1159
mpz_fdiv_q_2exp(s1, s1, prec)
1160
mpz_add(s0, s0, s1)
1161
u = r
1162
while r:
1163
mpz_mul(s0, s0, s0)
1164
mpz_fdiv_q_2exp(s0, s0, prec)
1165
r -= 1
1166
mpz_fdiv_q_2exp(y, s0, u)
1167
mpz_clear(s0)
1168
mpz_clear(s1)
1169
mpz_clear(x2)
1170
mpz_clear(a)
1171
1172
1173
cdef MPF_exp(MPF *y, MPF *x, MPopts opts):
1174
"""
1175
Set y = exp(x).
1176
"""
1177
cdef bint sign, is_int
1178
cdef long wp, wpmod, offset, mag
1179
cdef mpz_t t, u
1180
cdef tuple w
1181
if x.special:
1182
if x.special == S_ZERO: MPF_set_si(y, 1)
1183
elif x.special == S_NINF: MPF_set_zero(y)
1184
elif x.special == S_INF: MPF_set_inf(y)
1185
else: MPF_set_nan(y)
1186
return
1187
wp = opts.prec + 14
1188
sign = mpz_sgn(x.man) < 0
1189
is_int = mpz_sgn(x.exp) >= 0
1190
# note: bogus if not reasonable shift
1191
mag = mpz_bitcount(x.man) + mpz_get_si(x.exp)
1192
if (not mpz_reasonable_shift(x.exp)) or mag < -wp:
1193
if mpz_sgn(x.exp) <= 0:
1194
# perturb
1195
MPF_set_one(y)
1196
if opts.rounding != ROUND_N:
1197
mpz_mul_2exp(y.man, y.man, wp)
1198
if sign:
1199
mpz_sub_ui(y.man, y.man, 1)
1200
else:
1201
mpz_add_ui(y.man, y.man, 1)
1202
mpz_set_si(y.exp, -wp)
1203
MPF_normalize(y, opts)
1204
return
1205
else:
1206
raise OverflowError("exp of a huge number")
1207
#offset = mpz_get_si(x.exp) + wp
1208
mpz_init(t)
1209
if mag > 1:
1210
wpmod = wp + mag
1211
mpz_set_fixed(t, x, wpmod, False)
1212
mpz_init(u)
1213
mpz_set_ln2(u, wpmod)
1214
# y.exp, t = divmod(t, ln2)
1215
mpz_fdiv_qr(y.exp,t,t,u)
1216
mpz_clear(u)
1217
mpz_fdiv_q_2exp(t, t, mag)
1218
else:
1219
mpz_set_fixed(t, x, wp, False)
1220
mpz_set_ui(y.exp, 0)
1221
cy_exp_basecase(y.man, t, wp)
1222
mpz_add_si(y.exp, y.exp, -wp)
1223
y.special = S_NORMAL
1224
mpz_clear(t)
1225
MPF_normalize(y, opts)
1226
1227
1228
cdef MPF_complex_sqrt(MPF *c, MPF *d, MPF *a, MPF *b, MPopts opts):
1229
"""
1230
Set c+di = sqrt(a+bi).
1231
1232
c, a and d, b may be the same objects.
1233
"""
1234
cdef int apos, bneg
1235
cdef MPF t, u, v
1236
cdef MPopts wpopts
1237
if b.special == S_ZERO:
1238
if a.special == S_ZERO:
1239
MPF_set_zero(c)
1240
MPF_set_zero(d)
1241
# a+bi, a < 0, b = 0
1242
elif MPF_sgn(a) < 0:
1243
MPF_abs(d, a)
1244
MPF_sqrt(d, d, opts)
1245
MPF_set_zero(c)
1246
# a > 0
1247
else:
1248
MPF_sqrt(c, a, opts)
1249
MPF_set_zero(d)
1250
return
1251
wpopts.prec = opts.prec + 20
1252
wpopts.rounding = ROUND_D
1253
MPF_init(&t)
1254
MPF_init(&u)
1255
MPF_init(&v)
1256
apos = MPF_sgn(a) >= 0
1257
bneg = MPF_sgn(b) <= 0
1258
if apos:
1259
# real part
1260
MPF_hypot(&t, a, b, wpopts) #t = abs(a+bi) + a
1261
MPF_add(&t, &t, a, wpopts)
1262
MPF_set(&u, &t)
1263
mpz_sub_ui(u.exp, u.exp, 1) # u = t / 2
1264
MPF_sqrt(c, &u, opts) # re = sqrt(u)
1265
# imag part
1266
mpz_add_ui(t.exp, t.exp, 1) # t = 2*t
1267
MPF_sqrt(&u, &t, wpopts) # u = sqrt(t)
1268
MPF_div(d, b, &u, opts) # im = b / u
1269
else:
1270
MPF_set(&v, b)
1271
MPF_hypot(&t, a, b, wpopts) # t = abs(a+bi) - a
1272
MPF_sub(&t, &t, a, wpopts)
1273
MPF_set(&u, &t)
1274
mpz_sub_ui(u.exp, u.exp, 1) # u = t / 2
1275
MPF_sqrt(d, &u, opts) # im = sqrt(u)
1276
mpz_add_ui(t.exp, t.exp, 1) # t = 2*t
1277
MPF_sqrt(&u, &t, wpopts) # u = sqrt(t)
1278
MPF_div(c, &v, &u, opts) # re = b / u
1279
if bneg:
1280
MPF_neg(c, c)
1281
MPF_neg(d, d)
1282
MPF_clear(&t)
1283
MPF_clear(&u)
1284
MPF_clear(&v)
1285
1286
cdef int MPF_get_mpfr_overflow(mpfr_t y, MPF *x):
1287
"""
1288
Store the mpmath number x exactly in the MPFR variable y. The precision
1289
of y will be adjusted if necessary. If the exponent overflows, only
1290
the mantissa is stored and 1 is returned; if no overflow occurs,
1291
the function returns 0.
1292
"""
1293
cdef long prec, exp
1294
if x.special != S_NORMAL:
1295
if x.special == S_ZERO:
1296
mpfr_set_ui(y, 0, GMP_RNDN)
1297
elif x.special == S_INF:
1298
mpfr_set_inf(y, 1)
1299
elif x.special == S_NINF:
1300
mpfr_set_inf(y, -1)
1301
else:
1302
mpfr_set_nan(y)
1303
return 0
1304
prec = mpz_bitcount(x.man)
1305
# Minimum precision for MPFR
1306
if prec < 2:
1307
prec = 2
1308
mpfr_set_prec(y, prec)
1309
mpfr_set_z(y, x.man, GMP_RNDN)
1310
if mpz_reasonable_shift(x.exp):
1311
exp = mpz_get_si(x.exp)
1312
if exp >= 0:
1313
mpfr_mul_2exp(y, y, exp, GMP_RNDN)
1314
else:
1315
mpfr_div_2exp(y, y, -exp, GMP_RNDN)
1316
return 0
1317
else:
1318
return 1
1319
1320
cdef MPF_set_mpfr(MPF *y, mpfr_t x, MPopts opts):
1321
"""
1322
Convert the MPFR number x to a normalized MPF y.
1323
inf/nan and zero are handled.
1324
"""
1325
cdef long exp
1326
# TODO: use mpfr_regular_p with MPFR 3
1327
if mpfr_nan_p(x):
1328
MPF_set_nan(y)
1329
return
1330
if mpfr_inf_p(x):
1331
if mpfr_sgn(x) > 0:
1332
MPF_set_inf(y)
1333
else:
1334
MPF_set_ninf(y)
1335
return
1336
if mpfr_zero_p(x):
1337
MPF_set_zero(y)
1338
return
1339
exp = mpfr_get_z_exp(y.man, x)
1340
mpz_set_si(y.exp, exp)
1341
y.special = S_NORMAL
1342
MPF_normalize(y, opts)
1343
1344
cdef int MPF_log(MPF *y, MPF *x, MPopts opts):
1345
"""
1346
Set y = log(|x|). Returns 1 if x is negative.
1347
"""
1348
cdef MPF t
1349
cdef bint negative, overflow
1350
cdef mpfr_rnd_t rndmode
1351
cdef mpfr_t yy, xx
1352
if x.special != S_NORMAL:
1353
if x.special == S_ZERO:
1354
MPF_set_ninf(y)
1355
return 0
1356
if x.special == S_INF:
1357
MPF_set_inf(y)
1358
return 0
1359
if x.special == S_NAN:
1360
MPF_set_nan(y)
1361
return 0
1362
if x.special == S_NINF:
1363
MPF_set_inf(y)
1364
return 1
1365
1366
negative = MPF_sgn(x) < 0
1367
mpfr_init2(xx, opts.prec)
1368
mpfr_init2(yy, opts.prec)
1369
1370
overflow = MPF_get_mpfr_overflow(xx, x)
1371
rndmode = rndmode_to_mpfr(opts.rounding, mpfr_cmp_ui(xx, 1))
1372
1373
if overflow:
1374
MPF_init(&t)
1375
# Copy x exponent in case x and y are aliased
1376
mpz_set(t.exp, x.exp)
1377
1378
# log(m * 2^e) = log(m) + e*log(2)
1379
mpfr_abs(xx, xx, GMP_RNDN)
1380
mpfr_log(yy, xx, rndmode)
1381
MPF_set_mpfr(y, yy, opts)
1382
1383
mpz_set_ln2(t.man, opts.prec+20)
1384
mpz_mul(t.man, t.man, t.exp)
1385
mpz_set_si(t.exp, -(opts.prec+20))
1386
t.special = S_NORMAL
1387
1388
MPF_add(y, y, &t, opts)
1389
MPF_clear(&t)
1390
else:
1391
mpfr_abs(xx, xx, GMP_RNDN)
1392
mpfr_log(yy, xx, rndmode)
1393
MPF_set_mpfr(y, yy, opts)
1394
1395
mpfr_clear(xx)
1396
mpfr_clear(yy)
1397
return negative
1398
1399
cdef MPF_set_pi(MPF *x, MPopts opts):
1400
"""
1401
Set x = pi.
1402
"""
1403
x.special = S_NORMAL
1404
mpz_set_pi(x.man, (opts.prec+20))
1405
mpz_set_si(x.exp, -(opts.prec+20))
1406
MPF_normalize(x, opts)
1407
1408
cdef MPF_set_ln2(MPF *x, MPopts opts):
1409
"""
1410
Set x = ln(2).
1411
"""
1412
x.special = S_NORMAL
1413
mpz_set_ln2(x.man, (opts.prec+20))
1414
mpz_set_si(x.exp, -(opts.prec+20))
1415
MPF_normalize(x, opts)
1416
1417
1418
def exp_fixed(Integer x, int prec, ln2=None):
1419
"""
1420
Returns a fixed-point approximation of exp(x) where x is a fixed-point
1421
number.
1422
1423
EXAMPLES::
1424
1425
sage: from sage.libs.mpmath.ext_impl import exp_fixed
1426
sage: y = exp_fixed(1<<53, 53)
1427
sage: float(y) / 2^53
1428
2.718281828459044
1429
1430
"""
1431
cdef Integer v
1432
cdef mpz_t n, t
1433
cdef long nn
1434
mpz_init(n)
1435
mpz_init(t)
1436
if ln2 is None:
1437
mpz_set_ln2(t, prec)
1438
mpz_fdiv_qr(n, t, x.value, t)
1439
else:
1440
mpz_fdiv_qr(n, t, x.value, (<Integer>ln2).value)
1441
nn = mpz_get_si(n)
1442
v = PY_NEW(Integer)
1443
cy_exp_basecase(v.value, t, prec)
1444
if nn >= 0:
1445
mpz_mul_2exp(v.value, v.value, nn)
1446
else:
1447
mpz_fdiv_q_2exp(v.value, v.value, -nn)
1448
mpz_clear(t)
1449
mpz_clear(n)
1450
return v
1451
1452
def cos_sin_fixed(Integer x, int prec, pi2=None):
1453
"""
1454
Returns fixed-point approximations of cos(x), sin(x) where
1455
x is a fixed-point number.
1456
1457
EXAMPLES::
1458
1459
sage: from sage.libs.mpmath.ext_impl import cos_sin_fixed
1460
sage: c, s = cos_sin_fixed(1<<53, 53)
1461
sage: print float(c) / 2^53
1462
0.540302305868
1463
sage: print float(s) / 2^53
1464
0.841470984808
1465
1466
"""
1467
cdef Integer cv, sv
1468
cdef mpfr_t t, cf, sf
1469
mpfr_init2(t, mpz_bitcount(x.value)+2)
1470
mpfr_init2(cf, prec)
1471
mpfr_init2(sf, prec)
1472
mpfr_set_z(t, x.value, GMP_RNDN)
1473
mpfr_div_2exp(t, t, prec, GMP_RNDN)
1474
mpfr_sin_cos(sf, cf, t, GMP_RNDN)
1475
mpfr_mul_2exp(cf, cf, prec, GMP_RNDN)
1476
mpfr_mul_2exp(sf, sf, prec, GMP_RNDN)
1477
cv = PY_NEW(Integer)
1478
sv = PY_NEW(Integer)
1479
mpfr_get_z(cv.value, cf, GMP_RNDN)
1480
mpfr_get_z(sv.value, sf, GMP_RNDN)
1481
mpfr_clear(t)
1482
mpfr_clear(cf)
1483
mpfr_clear(sf)
1484
return cv, sv
1485
1486
DEF MAX_LOG_INT_CACHE = 2000
1487
1488
cdef mpz_t log_int_cache[MAX_LOG_INT_CACHE+1]
1489
cdef long log_int_cache_prec[MAX_LOG_INT_CACHE+1]
1490
cdef bint log_int_cache_initialized = 0
1491
1492
cdef mpz_log_int(mpz_t v, mpz_t n, int prec):
1493
"""
1494
Set v = log(n) where n is an integer and v is a fixed-point number
1495
with the specified precision.
1496
"""
1497
cdef mpfr_t f
1498
mpfr_init2(f, prec+15)
1499
mpfr_set_z(f, n, GMP_RNDN)
1500
mpfr_log(f, f, GMP_RNDN)
1501
mpfr_mul_2exp(f, f, prec, GMP_RNDN)
1502
mpfr_get_z(v, f, GMP_RNDN)
1503
mpfr_clear(f)
1504
1505
def log_int_fixed(n, long prec, ln2=None):
1506
"""
1507
Returns fixed-point approximation of log(n).
1508
1509
EXAMPLES::
1510
1511
sage: from sage.libs.mpmath.ext_impl import log_int_fixed
1512
sage: print float(log_int_fixed(5, 53)) / 2^53
1513
1.60943791243
1514
sage: print float(log_int_fixed(5, 53)) / 2^53 # exercise cache
1515
1.60943791243
1516
1517
"""
1518
global log_int_cache_initialized
1519
cdef Integer t
1520
cdef int i
1521
t = PY_NEW(Integer)
1522
mpz_set_integer(t.value, n)
1523
if mpz_sgn(t.value) <= 0:
1524
mpz_set_ui(t.value, 0)
1525
elif mpz_cmp_ui(t.value, MAX_LOG_INT_CACHE) <= 0:
1526
if not log_int_cache_initialized:
1527
for i in range(MAX_LOG_INT_CACHE+1):
1528
mpz_init(log_int_cache[i])
1529
log_int_cache_prec[i] = 0
1530
log_int_cache_initialized = 1
1531
i = mpz_get_si(t.value)
1532
if log_int_cache_prec[i] < prec:
1533
mpz_log_int(log_int_cache[i], t.value, prec+64)
1534
log_int_cache_prec[i] = prec+64
1535
mpz_tdiv_q_2exp(t.value, log_int_cache[i], log_int_cache_prec[i]-prec)
1536
1537
else:
1538
mpz_log_int(t.value, t.value, prec)
1539
return t
1540
1541
1542
cdef _MPF_cos_python(MPF *c, MPF *x, MPopts opts):
1543
"""
1544
Computes c = cos(x) by calling the mpmath.libmp Python implementation.
1545
"""
1546
from mpmath.libmp.libelefun import mpf_cos_sin
1547
ct = mpf_cos_sin(MPF_to_tuple(x), opts.prec,
1548
rndmode_to_python(opts.rounding), 1, False)
1549
MPF_set_tuple(c, ct)
1550
1551
cdef _MPF_sin_python(MPF *s, MPF *x, MPopts opts):
1552
"""
1553
Computes s = sin(x) by calling the mpmath.libmp Python implementation.
1554
"""
1555
from mpmath.libmp.libelefun import mpf_cos_sin
1556
st = mpf_cos_sin(MPF_to_tuple(x), opts.prec,
1557
rndmode_to_python(opts.rounding), 2, False)
1558
MPF_set_tuple(s, st)
1559
1560
1561
cdef MPF_cos(MPF *c, MPF *x, MPopts opts):
1562
"""
1563
Set c = cos(x)
1564
"""
1565
cdef mpfr_t cf, xf
1566
cdef bint overflow
1567
if x.special != S_NORMAL:
1568
if x.special == S_ZERO:
1569
MPF_set_one(c)
1570
else:
1571
MPF_set_nan(c)
1572
return
1573
mpfr_init(xf)
1574
mpfr_init2(cf, opts.prec)
1575
overflow = MPF_get_mpfr_overflow(xf, x)
1576
if overflow or opts.rounding == ROUND_U:
1577
_MPF_cos_python(c, x, opts)
1578
else:
1579
mpfr_cos(cf, xf, rndmode_to_mpfr(opts.rounding, 1))
1580
MPF_set_mpfr(c, cf, opts)
1581
mpfr_clear(xf)
1582
mpfr_clear(cf)
1583
1584
cdef MPF_sin(MPF *s, MPF *x, MPopts opts):
1585
"""
1586
Set s = sin(x)
1587
"""
1588
cdef mpfr_t sf, xf
1589
cdef bint overflow
1590
if x.special != S_NORMAL:
1591
if x.special == S_ZERO:
1592
MPF_set_zero(s)
1593
else:
1594
MPF_set_nan(s)
1595
return
1596
mpfr_init(xf)
1597
mpfr_init2(sf, opts.prec)
1598
overflow = MPF_get_mpfr_overflow(xf, x)
1599
if overflow or opts.rounding == ROUND_U:
1600
_MPF_sin_python(s, x, opts)
1601
else:
1602
mpfr_sin(sf, xf, rndmode_to_mpfr(opts.rounding, 1))
1603
MPF_set_mpfr(s, sf, opts)
1604
mpfr_clear(xf)
1605
mpfr_clear(sf)
1606
1607
cdef MPF_cos_sin(MPF *c, MPF *s, MPF *x, MPopts opts):
1608
"""
1609
Set c = cos(x), s = sin(x)
1610
"""
1611
cdef mpfr_t cf, sf, xf
1612
cdef bint overflow
1613
if x.special != S_NORMAL:
1614
if x.special == S_ZERO:
1615
MPF_set_one(c)
1616
MPF_set_zero(s)
1617
else:
1618
MPF_set_nan(c)
1619
MPF_set_nan(s)
1620
return
1621
mpfr_init(xf)
1622
mpfr_init2(sf, opts.prec)
1623
mpfr_init2(cf, opts.prec)
1624
overflow = MPF_get_mpfr_overflow(xf, x)
1625
if overflow or opts.rounding == ROUND_U:
1626
_MPF_cos_python(c, x, opts)
1627
_MPF_sin_python(s, x, opts)
1628
else:
1629
mpfr_sin_cos(sf, cf, xf, rndmode_to_mpfr(opts.rounding, 1))
1630
MPF_set_mpfr(s, sf, opts)
1631
MPF_set_mpfr(c, cf, opts)
1632
mpfr_clear(xf)
1633
mpfr_clear(cf)
1634
mpfr_clear(sf)
1635
1636
1637
cdef MPF_complex_exp(MPF *re, MPF *im, MPF *a, MPF *b, MPopts opts):
1638
"""
1639
Set re+im*i = exp(a+bi)
1640
"""
1641
cdef MPF mag, c, s
1642
cdef MPopts wopts
1643
if a.special == S_ZERO:
1644
MPF_cos_sin(re, im, b, opts)
1645
return
1646
if b.special == S_ZERO:
1647
MPF_exp(re, a, opts)
1648
MPF_set_zero(im)
1649
return
1650
MPF_init(&mag)
1651
MPF_init(&c)
1652
MPF_init(&s)
1653
wopts = opts
1654
wopts.prec += 4
1655
MPF_exp(&mag, a, wopts)
1656
MPF_cos_sin(&c, &s, b, wopts)
1657
MPF_mul(re, &mag, &c, opts)
1658
MPF_mul(im, &mag, &s, opts)
1659
MPF_clear(&mag)
1660
MPF_clear(&c)
1661
MPF_clear(&s)
1662
1663
cdef int MPF_pow(MPF *z, MPF *x, MPF *y, MPopts opts) except -1:
1664
"""
1665
Set z = x^y for real x and y and returns 0 if the result is real-valued.
1666
If the result is complex, does nothing and returns 1.
1667
"""
1668
cdef MPopts wopts
1669
cdef mpz_t t
1670
cdef MPF w
1671
cdef int xsign, ysign
1672
cdef mpz_t tm
1673
1674
# Integer exponentiation, if reasonable
1675
if y.special == S_NORMAL and mpz_sgn(y.exp) >= 0:
1676
mpz_init(tm)
1677
# check if size is reasonable
1678
mpz_add_ui(tm, y.exp, mpz_bitcount(y.man))
1679
mpz_abs(tm, tm)
1680
if mpz_cmp_ui(tm, 10000) < 0:
1681
# man * 2^exp
1682
mpz_mul_2exp(tm, y.man, mpz_get_ui(y.exp))
1683
MPF_pow_int(z, x, tm, opts)
1684
mpz_clear(tm)
1685
return 0
1686
mpz_clear(tm)
1687
1688
# x ^ 0
1689
if y.special == S_ZERO:
1690
if x.special == S_NORMAL or x.special == S_ZERO:
1691
MPF_set_one(z)
1692
else:
1693
MPF_set_nan(z)
1694
return 0
1695
1696
xsign = MPF_sgn(x)
1697
ysign = MPF_sgn(y)
1698
1699
if xsign < 0:
1700
return 1
1701
1702
# Square root or integer power thereof
1703
if y.special == S_NORMAL and mpz_cmp_si(y.exp, -1) == 0:
1704
# x^(1/2)
1705
if mpz_cmp_ui(y.man, 1) == 0:
1706
MPF_sqrt(z, x, opts)
1707
return 0
1708
# x^(-1/2)
1709
if mpz_cmp_si(y.man, -1) == 0:
1710
wopts = opts
1711
wopts.prec += 10
1712
wopts.rounding = reciprocal_rnd(wopts.rounding)
1713
MPF_sqrt(z, x, wopts)
1714
MPF_div(z, &MPF_C_1, z, opts)
1715
return 0
1716
# x^(n/2)
1717
wopts = opts
1718
wopts.prec += 10
1719
if mpz_sgn(y.man) < 0:
1720
wopts.rounding = reciprocal_rnd(wopts.rounding)
1721
mpz_init_set(t, y.man)
1722
MPF_sqrt(z, x, wopts)
1723
MPF_pow_int(z, z, t, opts)
1724
mpz_clear(t)
1725
return 0
1726
1727
if x.special != S_NORMAL or y.special != S_NORMAL:
1728
if x.special == S_NAN or y.special == S_NAN:
1729
MPF_set_nan(z)
1730
return 0
1731
if y.special == S_ZERO:
1732
if x.special == S_NORMAL or x.special == S_ZERO:
1733
MPF_set_one(z)
1734
else:
1735
MPF_set_nan(z)
1736
return 0
1737
if x.special == S_ZERO and y.special == S_NORMAL:
1738
if mpz_sgn(y.man) > 0:
1739
MPF_set_zero(z)
1740
return 0
1741
1742
wopts = opts
1743
wopts.prec += 10
1744
MPF_init(&w)
1745
MPF_log(&w, x, wopts)
1746
MPF_mul(&w, &w, y, opts_exact)
1747
MPF_exp(z, &w, opts)
1748
MPF_clear(&w)
1749
return 0
1750
1751
cdef MPF_complex_square(MPF *re, MPF *im, MPF *a, MPF *b, MPopts opts):
1752
"""
1753
Set re+im*i = (a+bi)^2 = a^2-b^2, 2ab*i.
1754
"""
1755
cdef MPF t, u
1756
MPF_init(&t)
1757
MPF_init(&u)
1758
MPF_mul(&t,a,a,opts_exact)
1759
MPF_mul(&u,b,b,opts_exact)
1760
MPF_sub(re, &t, &u, opts)
1761
MPF_mul(im, a, b, opts)
1762
if im.special == S_NORMAL:
1763
mpz_add_ui(im.exp, im.exp, 1)
1764
MPF_clear(&t)
1765
MPF_clear(&u)
1766
1767
1768
cdef MPF_complex_reciprocal(MPF *re, MPF *im, MPF *a, MPF *b, MPopts opts):
1769
"""
1770
Set re+im*i = 1/(a+bi), i.e. compute the reciprocal of
1771
a complex number.
1772
"""
1773
cdef MPopts wopts
1774
cdef MPF t, u, m
1775
wopts = opts
1776
wopts.prec += 10
1777
MPF_init(&t)
1778
MPF_init(&u)
1779
MPF_init(&m)
1780
MPF_mul(&t, a, a,opts_exact)
1781
MPF_mul(&u, b, b,opts_exact)
1782
MPF_add(&m, &t, &u,wopts)
1783
MPF_div(&t, a, &m, opts)
1784
MPF_div(&u, b, &m, opts)
1785
MPF_set(re, &t)
1786
MPF_neg(im, &u)
1787
MPF_clear(&t)
1788
MPF_clear(&u)
1789
MPF_clear(&m)
1790
1791
1792
cdef MPF_complex_pow_int(MPF *zre, MPF *zim, MPF *xre, MPF *xim, mpz_t n, MPopts opts):
1793
"""
1794
Set zre+zim*i = (xre+xim) ^ n, i.e. raise a complex number to an integer power.
1795
"""
1796
cdef MPopts wopts
1797
cdef long m
1798
1799
if xim.special == S_ZERO:
1800
MPF_pow_int(zre, xre, n, opts)
1801
MPF_set_zero(zim)
1802
return
1803
1804
if xre.special == S_ZERO:
1805
# n % 4
1806
m = mpz_get_si(n) % 4
1807
if m == 0:
1808
MPF_pow_int(zre, xim, n, opts)
1809
MPF_set_zero(zim)
1810
return
1811
if m == 1:
1812
MPF_set_zero(zre)
1813
MPF_pow_int(zim, xim, n, opts)
1814
return
1815
if m == 2:
1816
MPF_pow_int(zre, xim, n, opts)
1817
MPF_neg(zre, zre)
1818
MPF_set_zero(zim)
1819
return
1820
if m == 3:
1821
MPF_set_zero(zre)
1822
MPF_pow_int(zim, xim, n, opts)
1823
MPF_neg(zim, zim)
1824
return
1825
1826
if mpz_reasonable_shift(n):
1827
m = mpz_get_si(n)
1828
if m == 0:
1829
MPF_set_one(zre)
1830
MPF_set_zero(zim)
1831
return
1832
if m == 1:
1833
MPF_pos(zre, xre, opts)
1834
MPF_pos(zim, xim, opts)
1835
return
1836
if m == 2:
1837
MPF_complex_square(zre, zim, xre, xim, opts)
1838
return
1839
if m == -1:
1840
MPF_complex_reciprocal(zre, zim, xre, xim, opts)
1841
return
1842
if m == -2:
1843
wopts = opts
1844
wopts.prec += 10
1845
MPF_complex_square(zre, zim, xre, xim, wopts)
1846
MPF_complex_reciprocal(zre, zim, zre, zim, opts)
1847
return
1848
1849
xret = MPF_to_tuple(xre)
1850
ximt = MPF_to_tuple(xim)
1851
from mpmath.libmp import mpc_pow_int
1852
vr, vi = mpc_pow_int((xret, ximt), mpzi(n), \
1853
opts.prec, rndmode_to_python(opts.rounding))
1854
MPF_set_tuple(zre, vr)
1855
MPF_set_tuple(zim, vi)
1856
1857
1858
cdef MPF_complex_pow_re(MPF *zre, MPF *zim, MPF *xre, MPF *xim, MPF *y, MPopts opts):
1859
"""
1860
Set (zre+zim*i) = (xre+xim*i) ^ y, i.e. raise a complex number
1861
to a real power.
1862
"""
1863
1864
cdef mpz_t tm
1865
cdef MPopts wopts
1866
1867
if y.special == S_ZERO:
1868
if xre.special == S_NORMAL and xim.special == S_NORMAL:
1869
# x ^ 0
1870
MPF_set_one(zre)
1871
MPF_set_zero(zim)
1872
return
1873
1874
wopts = opts
1875
wopts.prec += 10
1876
1877
if y.special == S_NORMAL:
1878
# Integer
1879
if mpz_cmp_ui(y.exp, 0) >= 0 and mpz_reasonable_shift(y.exp):
1880
mpz_init_set(tm, y.man)
1881
mpz_mul_2exp(tm, tm, mpz_get_ui(y.exp))
1882
MPF_complex_pow_int(zre, zim, xre, xim, tm, opts)
1883
mpz_clear(tm)
1884
return
1885
# x ^ (n/2)
1886
if mpz_cmp_si(y.exp, -1) == 0:
1887
mpz_init_set(tm, y.man)
1888
MPF_complex_sqrt(zre, zim, xre, xim, wopts)
1889
MPF_complex_pow_int(zre, zim, zre, zim, tm, opts)
1890
mpz_clear(tm)
1891
return
1892
1893
xret = MPF_to_tuple(xre)
1894
ximt = MPF_to_tuple(xim)
1895
yret = MPF_to_tuple(y)
1896
from mpmath.libmp import mpc_pow_mpf, fzero
1897
vr, vi = mpc_pow_mpf((xret, ximt), yret, \
1898
opts.prec, rndmode_to_python(opts.rounding))
1899
MPF_set_tuple(zre, vr)
1900
MPF_set_tuple(zim, vi)
1901
1902
1903
cdef MPF_complex_pow(MPF *zre, MPF *zim, MPF *xre, MPF *xim, MPF *yre, MPF *yim, MPopts opts):
1904
"""
1905
Set (zre + zim*i) = (xre+xim*i) ^ (yre+yim*i).
1906
"""
1907
if yim.special == S_ZERO:
1908
MPF_complex_pow_re(zre, zim, xre, xim, yre, opts)
1909
return
1910
xret = MPF_to_tuple(xre)
1911
ximt = MPF_to_tuple(xim)
1912
yret = MPF_to_tuple(yre)
1913
yimt = MPF_to_tuple(yim)
1914
from mpmath.libmp import mpc_pow
1915
vr, vi = mpc_pow((xret,ximt), (yret,yimt), \
1916
opts.prec, rndmode_to_python(opts.rounding))
1917
MPF_set_tuple(zre, vr)
1918
MPF_set_tuple(zim, vi)
1919
1920
1921
cdef mpz_set_tuple_fixed(mpz_t x, tuple t, long prec):
1922
"""
1923
Set the integer x to a fixed-point number with specified precision
1924
and the value of t = (sign,man,exp,bc). Truncating division is used
1925
if the value cannot be represented exactly.
1926
"""
1927
cdef long offset
1928
sign, man, exp, bc = t
1929
mpz_set_integer(x, man)
1930
if sign:
1931
mpz_neg(x, x)
1932
offset = exp + prec
1933
if offset >= 0:
1934
mpz_mul_2exp(x, x, offset)
1935
else:
1936
mpz_tdiv_q_2exp(x, x, -offset)
1937
1938
cdef mpz_set_complex_tuple_fixed(mpz_t x, mpz_t y, tuple t, long prec):
1939
"""
1940
Set the integers (x,y) to fixed-point numbers with the values of
1941
the mpf pair t = ((xsign,xman,xexp,xbc), (ysign,yman,yexp,ybc)).
1942
"""
1943
mpz_set_tuple_fixed(x, t[0], prec)
1944
mpz_set_tuple_fixed(y, t[1], prec)
1945
1946
cdef MPF_set_fixed(MPF *x, mpz_t man, long wp, long prec, int rnd):
1947
"""
1948
Set value of an MPF given a fixed-point mantissa of precision wp,
1949
rounding to the given precision and rounding mode.
1950
"""
1951
cdef MPopts opts
1952
opts.prec = prec
1953
opts.rounding = rnd
1954
x.special = S_NORMAL
1955
mpz_set(x.man, man)
1956
mpz_set_si(x.exp, -wp)
1957
MPF_normalize(x, opts)
1958
1959
# TODO: we should allocate these dynamically
1960
DEF MAX_PARAMS = 128
1961
cdef mpz_t AINT[MAX_PARAMS]
1962
cdef mpz_t BINT[MAX_PARAMS]
1963
cdef mpz_t AP[MAX_PARAMS]
1964
cdef mpz_t AQ[MAX_PARAMS]
1965
cdef mpz_t BP[MAX_PARAMS]
1966
cdef mpz_t BQ[MAX_PARAMS]
1967
cdef mpz_t AREAL[MAX_PARAMS]
1968
cdef mpz_t BREAL[MAX_PARAMS]
1969
cdef mpz_t ACRE[MAX_PARAMS]
1970
cdef mpz_t ACIM[MAX_PARAMS]
1971
cdef mpz_t BCRE[MAX_PARAMS]
1972
cdef mpz_t BCIM[MAX_PARAMS]
1973
1974
1975
1976
cdef MPF_hypsum(MPF *a, MPF *b, int p, int q, param_types, str ztype, coeffs, z,
1977
long prec, long wp, long epsshift, dict magnitude_check, kwargs):
1978
"""
1979
Evaluates a+bi = pFq(..., z) by summing the hypergeometric
1980
series in fixed-point arithmetic.
1981
1982
This basically a Cython version of
1983
mpmath.libmp.libhyper.make_hyp_summator(). It should produce identical
1984
results (the calculations are exactly the same, and the same rounding
1985
is used for divisions).
1986
1987
This function is not intended to be called directly; it is wrapped
1988
by the hypsum_internal function in ext_main.pyx.
1989
1990
"""
1991
cdef long i, j, k, n, p_mag, cancellable_real, MAX, magn
1992
cdef int have_complex_param, have_complex_arg, have_complex
1993
1994
cdef mpz_t SRE, SIM, PRE, PIM, ZRE, ZIM, TRE, TIM, URE, UIM, MUL, DIV, HIGH, LOW, one
1995
# Count number of parameters
1996
cdef int aint, bint, arat, brat, areal, breal, acomplex, bcomplex
1997
cdef int have_multiplier
1998
1999
if p >= MAX_PARAMS or q >= MAX_PARAMS:
2000
raise NotImplementedError("too many hypergeometric function parameters")
2001
2002
have_complex_param = 'C' in param_types
2003
have_complex_arg = ztype == 'C'
2004
have_complex = have_complex_param or have_complex_arg
2005
2006
mpz_init(one)
2007
mpz_init(SRE)
2008
mpz_init(SIM)
2009
mpz_init(PRE)
2010
mpz_init(PIM)
2011
mpz_init(ZRE)
2012
mpz_init(ZIM)
2013
mpz_init(MUL)
2014
mpz_init(DIV)
2015
mpz_init(HIGH)
2016
mpz_init(LOW)
2017
mpz_init(TRE)
2018
mpz_init(TIM)
2019
mpz_init(URE)
2020
mpz_init(UIM)
2021
2022
aint = bint = arat = brat = areal = breal = acomplex = bcomplex = 0
2023
2024
MAX = kwargs.get('maxterms', wp*100)
2025
mpz_set_ui(HIGH, 1)
2026
mpz_mul_2exp(HIGH, HIGH, epsshift)
2027
mpz_neg(LOW, HIGH)
2028
2029
mpz_set_ui(one, 1)
2030
mpz_mul_2exp(one, one, wp)
2031
mpz_set(SRE, one)
2032
mpz_set(PRE, one)
2033
2034
# Copy input data to mpzs
2035
if have_complex_arg:
2036
mpz_set_complex_tuple_fixed(ZRE, ZIM, z, wp)
2037
else:
2038
mpz_set_tuple_fixed(ZRE, z, wp)
2039
for i in range(0,p):
2040
if param_types[i] == 'Z':
2041
mpz_init(AINT[aint])
2042
mpz_set_integer(AINT[aint], coeffs[i])
2043
aint += 1
2044
elif param_types[i] == 'Q':
2045
mpz_init(AP[arat])
2046
mpz_init(AQ[arat])
2047
__p, __q = coeffs[i]._mpq_
2048
mpz_set_integer(AP[arat], __p)
2049
mpz_set_integer(AQ[arat], __q)
2050
arat += 1
2051
elif param_types[i] == 'R':
2052
mpz_init(AREAL[areal])
2053
mpz_set_tuple_fixed(AREAL[areal], coeffs[i]._mpf_, wp)
2054
areal += 1
2055
elif param_types[i] == 'C':
2056
mpz_init(ACRE[acomplex])
2057
mpz_init(ACIM[acomplex])
2058
mpz_set_complex_tuple_fixed(ACRE[acomplex], ACIM[acomplex], coeffs[i]._mpc_, wp)
2059
acomplex += 1
2060
else:
2061
raise ValueError
2062
for i in range(p,p+q):
2063
if param_types[i] == 'Z':
2064
mpz_init(BINT[bint])
2065
mpz_set_integer(BINT[bint], coeffs[i])
2066
bint += 1
2067
elif param_types[i] == 'Q':
2068
mpz_init(BP[brat])
2069
mpz_init(BQ[brat])
2070
__p, __q = coeffs[i]._mpq_
2071
mpz_set_integer(BP[brat], __p)
2072
mpz_set_integer(BQ[brat], __q)
2073
brat += 1
2074
elif param_types[i] == 'R':
2075
mpz_init(BREAL[breal])
2076
mpz_set_tuple_fixed(BREAL[breal], coeffs[i]._mpf_, wp)
2077
breal += 1
2078
elif param_types[i] == 'C':
2079
mpz_init(BCRE[bcomplex])
2080
mpz_init(BCIM[bcomplex])
2081
mpz_set_complex_tuple_fixed(BCRE[bcomplex], BCIM[bcomplex], coeffs[i]._mpc_, wp)
2082
bcomplex += 1
2083
else:
2084
raise ValueError
2085
2086
cancellable_real = min(areal, breal)
2087
2088
# Main loop
2089
for n in range(1, 10**8):
2090
2091
if n in magnitude_check:
2092
p_mag = mpz_bitcount(PRE)
2093
if have_complex:
2094
p_mag = max(p_mag, mpz_bitcount(PIM))
2095
magnitude_check[n] = wp - p_mag
2096
2097
# Update rational part of product
2098
mpz_set_ui(MUL, 1)
2099
mpz_set_ui(DIV, n)
2100
2101
for i in range(aint): mpz_mul(MUL, MUL, AINT[i])
2102
for i in range(arat): mpz_mul(MUL, MUL, AP[i])
2103
for i in range(brat): mpz_mul(MUL, MUL, BQ[i])
2104
for i in range(bint): mpz_mul(DIV, DIV, BINT[i])
2105
for i in range(brat): mpz_mul(DIV, DIV, BP[i])
2106
for i in range(arat): mpz_mul(DIV, DIV, AQ[i])
2107
2108
# Check for singular terms
2109
if mpz_sgn(DIV) == 0:
2110
if mpz_sgn(MUL) == 0:
2111
break
2112
raise ZeroDivisionError
2113
2114
# Multiply real factors
2115
for k in range(0, cancellable_real):
2116
mpz_mul(PRE, PRE, AREAL[k])
2117
mpz_fdiv_q(PRE, PRE, BREAL[k])
2118
for k in range(cancellable_real, areal):
2119
mpz_mul(PRE, PRE, AREAL[k])
2120
mpz_fdiv_q_2exp(PRE, PRE, wp)
2121
for k in range(cancellable_real, breal):
2122
mpz_mul_2exp(PRE, PRE, wp)
2123
mpz_fdiv_q(PRE, PRE, BREAL[k])
2124
if have_complex:
2125
for k in range(0, cancellable_real):
2126
mpz_mul(PIM, PIM, AREAL[k])
2127
mpz_fdiv_q(PIM, PIM, BREAL[k])
2128
for k in range(cancellable_real, areal):
2129
mpz_mul(PIM, PIM, AREAL[k])
2130
mpz_fdiv_q_2exp(PIM, PIM, wp)
2131
for k in range(cancellable_real, breal):
2132
mpz_mul_2exp(PIM, PIM, wp)
2133
mpz_fdiv_q(PIM, PIM, BREAL[k])
2134
2135
# Update product
2136
if have_complex:
2137
if have_complex_arg:
2138
# PRE = ((mul*(PRE*ZRE-PIM*ZIM))//div)>>wp
2139
# PIM = ((mul*(PIM*ZRE+PRE*ZIM))//div)>>wp
2140
mpz_mul(TRE, PRE, ZRE)
2141
mpz_submul(TRE, PIM, ZIM)
2142
mpz_mul(TRE, TRE, MUL)
2143
2144
mpz_mul(TIM, PIM, ZRE)
2145
mpz_addmul(TIM, PRE, ZIM)
2146
mpz_mul(TIM, TIM, MUL)
2147
2148
mpz_fdiv_q(PRE, TRE, DIV)
2149
mpz_fdiv_q_2exp(PRE, PRE, wp)
2150
2151
mpz_fdiv_q(PIM, TIM, DIV)
2152
mpz_fdiv_q_2exp(PIM, PIM, wp)
2153
else:
2154
mpz_mul(PRE, PRE, MUL)
2155
mpz_mul(PRE, PRE, ZRE)
2156
mpz_fdiv_q_2exp(PRE, PRE, wp)
2157
mpz_fdiv_q(PRE, PRE, DIV)
2158
2159
mpz_mul(PIM, PIM, MUL)
2160
mpz_mul(PIM, PIM, ZRE)
2161
mpz_fdiv_q_2exp(PIM, PIM, wp)
2162
mpz_fdiv_q(PIM, PIM, DIV)
2163
2164
for i in range(acomplex):
2165
mpz_mul(TRE, PRE, ACRE[i])
2166
mpz_submul(TRE, PIM, ACIM[i])
2167
mpz_mul(TIM, PIM, ACRE[i])
2168
mpz_addmul(TIM, PRE, ACIM[i])
2169
mpz_fdiv_q_2exp(PRE, TRE, wp)
2170
mpz_fdiv_q_2exp(PIM, TIM, wp)
2171
2172
for i in range(bcomplex):
2173
mpz_mul(URE, BCRE[i], BCRE[i])
2174
mpz_addmul(URE, BCIM[i], BCIM[i])
2175
mpz_mul(TRE, PRE, BCRE[i])
2176
mpz_addmul(TRE, PIM, BCIM[i])
2177
mpz_mul(TIM, PIM, BCRE[i])
2178
mpz_submul(TIM, PRE, BCIM[i])
2179
mpz_mul_2exp(PRE, TRE, wp)
2180
mpz_fdiv_q(PRE, PRE, URE)
2181
mpz_mul_2exp(PIM, TIM, wp)
2182
mpz_fdiv_q(PIM, PIM, URE)
2183
else:
2184
mpz_mul(PRE, PRE, MUL)
2185
mpz_mul(PRE, PRE, ZRE)
2186
mpz_fdiv_q_2exp(PRE, PRE, wp)
2187
mpz_fdiv_q(PRE, PRE, DIV)
2188
2189
# Add product to sum
2190
if have_complex:
2191
mpz_add(SRE, SRE, PRE)
2192
mpz_add(SIM, SIM, PIM)
2193
if mpz_cmpabs(PRE, HIGH) < 0 and mpz_cmpabs(PIM, HIGH) < 0:
2194
break
2195
else:
2196
mpz_add(SRE, SRE, PRE)
2197
if mpz_cmpabs(PRE, HIGH) < 0:
2198
break
2199
2200
if n > MAX:
2201
from mpmath.libmp import NoConvergence
2202
raise NoConvergence('Hypergeometric series converges too slowly. Try increasing maxterms.')
2203
2204
# +1 all parameters for next iteration
2205
for i in range(aint): mpz_add_ui(AINT[i], AINT[i], 1)
2206
for i in range(bint): mpz_add_ui(BINT[i], BINT[i], 1)
2207
for i in range(arat): mpz_add(AP[i], AP[i], AQ[i])
2208
for i in range(brat): mpz_add(BP[i], BP[i], BQ[i])
2209
for i in range(areal): mpz_add(AREAL[i], AREAL[i], one)
2210
for i in range(breal): mpz_add(BREAL[i], BREAL[i], one)
2211
for i in range(acomplex): mpz_add(ACRE[i], ACRE[i], one)
2212
for i in range(bcomplex): mpz_add(BCRE[i], BCRE[i], one)
2213
2214
# Done
2215
if have_complex:
2216
MPF_set_fixed(a, SRE, wp, prec, ROUND_N)
2217
MPF_set_fixed(b, SIM, wp, prec, ROUND_N)
2218
if mpz_sgn(SRE):
2219
if mpz_sgn(SIM):
2220
magn = max(mpz_get_si(a.exp) + <long>mpz_bitcount(a.man),
2221
mpz_get_si(b.exp) + <long>mpz_bitcount(b.man))
2222
else:
2223
magn = mpz_get_si(a.exp) + <long>mpz_bitcount(a.man)
2224
elif mpz_sgn(SIM):
2225
magn = mpz_get_si(b.exp) + <long>mpz_bitcount(b.man)
2226
else:
2227
magn = -wp
2228
else:
2229
MPF_set_fixed(a, SRE, wp, prec, ROUND_N)
2230
if mpz_sgn(SRE):
2231
magn = mpz_get_si(a.exp) + <long>mpz_bitcount(a.man)
2232
else:
2233
magn = -wp
2234
2235
mpz_clear(one)
2236
mpz_clear(SRE)
2237
mpz_clear(SIM)
2238
mpz_clear(PRE)
2239
mpz_clear(PIM)
2240
mpz_clear(ZRE)
2241
mpz_clear(ZIM)
2242
mpz_clear(MUL)
2243
mpz_clear(DIV)
2244
mpz_clear(HIGH)
2245
mpz_clear(LOW)
2246
mpz_clear(TRE)
2247
mpz_clear(TIM)
2248
mpz_clear(URE)
2249
mpz_clear(UIM)
2250
2251
for i in range(aint): mpz_clear(AINT[i])
2252
for i in range(bint): mpz_clear(BINT[i])
2253
for i in range(arat):
2254
mpz_clear(AP[i])
2255
mpz_clear(AQ[i])
2256
for i in range(brat):
2257
mpz_clear(BP[i])
2258
mpz_clear(BQ[i])
2259
for i in range(areal): mpz_clear(AREAL[i])
2260
for i in range(breal): mpz_clear(BREAL[i])
2261
for i in range(acomplex):
2262
mpz_clear(ACRE[i])
2263
mpz_clear(ACIM[i])
2264
for i in range(bcomplex):
2265
mpz_clear(BCRE[i])
2266
mpz_clear(BCIM[i])
2267
2268
return have_complex, magn
2269
2270