#include "FEATURE/uwin"12#if !_UWIN || (_lib__copysign||_lib_copysign) && _lib_logb && (_lib__finite||_lib_finite) && (_lib_drem||_lib_remainder) && _lib_sqrt && _lib_ilogb && (_lib__scalb||_lib_scalb)34void _STUB_support(){}56#else78/*9* Copyright (c) 1985, 199310* The Regents of the University of California. All rights reserved.11*12* Redistribution and use in source and binary forms, with or without13* modification, are permitted provided that the following conditions14* are met:15* 1. Redistributions of source code must retain the above copyright16* notice, this list of conditions and the following disclaimer.17* 2. Redistributions in binary form must reproduce the above copyright18* notice, this list of conditions and the following disclaimer in the19* documentation and/or other materials provided with the distribution.20* 3. Neither the name of the University nor the names of its contributors21* may be used to endorse or promote products derived from this software22* without specific prior written permission.23*24* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND25* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE26* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE27* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE28* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL29* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS30* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)31* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT32* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY33* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF34* SUCH DAMAGE.35*/3637#ifndef lint38static char sccsid[] = "@(#)support.c 8.1 (Berkeley) 6/4/93";39#endif /* not lint */4041/*42* Some IEEE standard 754 recommended functions and remainder and sqrt for43* supporting the C elementary functions.44******************************************************************************45* WARNING:46* These codes are developed (in double) to support the C elementary47* functions temporarily. They are not universal, and some of them are very48* slow (in particular, drem and sqrt is extremely inefficient). Each49* computer system should have its implementation of these functions using50* its own assembler.51******************************************************************************52*53* IEEE 754 required operations:54* drem(x,p)55* returns x REM y = x - [x/y]*y , where [x/y] is the integer56* nearest x/y; in half way case, choose the even one.57* sqrt(x)58* returns the square root of x correctly rounded according to59* the rounding mod.60*61* IEEE 754 recommended functions:62* (a) copysign(x,y)63* returns x with the sign of y.64* (b) scalb(x,N)65* returns x * (2**N), for integer values N.66* (c) logb(x)67* returns the unbiased exponent of x, a signed integer in68* double precision, except that logb(0) is -INF, logb(INF)69* is +INF, and logb(NAN) is that NAN.70* (d) finite(x)71* returns the value TRUE if -INF < x < +INF and returns72* FALSE otherwise.73*74*75* CODED IN C BY K.C. NG, 11/25/84;76* REVISED BY K.C. NG on 1/22/85, 2/13/85, 3/24/85.77*/7879#include "mathimpl.h"8081#if defined(vax)||defined(tahoe) /* VAX D format */82#include <errno.h>83static const unsigned short msign=0x7fff , mexp =0x7f80 ;84static const short prep1=57, gap=7, bias=129 ;85static const double novf=1.7E38, nunf=3.0E-39, zero=0.0 ;86#else /* defined(vax)||defined(tahoe) */87static const unsigned short msign=0x7fff, mexp =0x7ff0 ;88static const short prep1=54, gap=4, bias=1023 ;89static const double novf=1.7E308, nunf=3.0E-308,zero=0.0;90#endif /* defined(vax)||defined(tahoe) */9192#if !_lib__scalb || !_lib_scalb9394extern double _scalb(x,N)95double x; double N;96{97int k;9899#ifdef national100unsigned short *px=(unsigned short *) &x + 3;101#else /* national */102unsigned short *px=(unsigned short *) &x;103#endif /* national */104105if( x == zero ) return(x);106107#if defined(vax)||defined(tahoe)108if( (k= *px & mexp ) != ~msign ) {109if (N < -260)110return(nunf*nunf);111else if (N > 260) {112return(copysign(infnan(ERANGE),x));113}114#else /* defined(vax)||defined(tahoe) */115if( (k= *px & mexp ) != mexp ) {116if( N<-2100) return(nunf*nunf); else if(N>2100) return(novf+novf);117if( k == 0 ) {118x *= scalb(1.0,prep1); N -= prep1; return(scalb(x,N));}119#endif /* defined(vax)||defined(tahoe) */120121if((k = (k>>gap)+ N) > 0 )122if( k < (mexp>>gap) ) *px = (*px&~mexp) | (k<<gap);123else x=novf+novf; /* overflow */124else125if( k > -prep1 )126/* gradual underflow */127{*px=(*px&~mexp)|(short)(1<<gap); x *= scalb(1.0,k-1);}128else129return(nunf*nunf);130}131return(x);132}133134#endif135136#if !_lib_scalb137138extern double scalb(x,N)139double x; double N;140{141return _scalb(x, N);142}143144#endif145146#if !_lib__copysign147148extern double _copysign(x,y)149double x,y;150{151#ifdef national152unsigned short *px=(unsigned short *) &x+3,153*py=(unsigned short *) &y+3;154#else /* national */155unsigned short *px=(unsigned short *) &x,156*py=(unsigned short *) &y;157#endif /* national */158159#if defined(vax)||defined(tahoe)160if ( (*px & mexp) == 0 ) return(x);161#endif /* defined(vax)||defined(tahoe) */162163*px = ( *px & msign ) | ( *py & ~msign );164return(x);165}166167#endif168169#if !_lib_copysign170171extern double copysign(x,y)172double x,y;173{174return _copysign(x,y);175}176177#endif178179#if !_lib_logb180181extern double logb(x)182double x;183{184185#ifdef national186short *px=(short *) &x+3, k;187#else /* national */188short *px=(short *) &x, k;189#endif /* national */190191#if defined(vax)||defined(tahoe)192return (int)(((*px&mexp)>>gap)-bias);193#else /* defined(vax)||defined(tahoe) */194if( (k= *px & mexp ) != mexp )195if ( k != 0 )196return ( (k>>gap) - bias );197else if( x != zero)198return ( -1022.0 );199else200return(-(1.0/zero));201else if(x != x)202return(x);203else204{*px &= msign; return(x);}205#endif /* defined(vax)||defined(tahoe) */206}207208#endif209210#if !_lib__finite211212extern int _finite(x)213double x;214{215#if defined(vax)||defined(tahoe)216return(1);217#else /* defined(vax)||defined(tahoe) */218#ifdef national219return( (*((short *) &x+3 ) & mexp ) != mexp );220#else /* national */221return( (*((short *) &x ) & mexp ) != mexp );222#endif /* national */223#endif /* defined(vax)||defined(tahoe) */224}225226#endif227228#if !_lib_finite229230extern int finite(x)231double x;232{233return _finite(x);234}235236#endif237238#if !_lib_drem239240extern double drem(x,p)241double x,p;242{243#if _lib_remainder244return remainder(x,p);245#else246short sign;247double hp,dp,tmp;248unsigned short k;249#ifdef national250unsigned short251*px=(unsigned short *) &x +3,252*pp=(unsigned short *) &p +3,253*pd=(unsigned short *) &dp +3,254*pt=(unsigned short *) &tmp+3;255#else /* national */256unsigned short257*px=(unsigned short *) &x ,258*pp=(unsigned short *) &p ,259*pd=(unsigned short *) &dp ,260*pt=(unsigned short *) &tmp;261#endif /* national */262263*pp &= msign ;264265#if defined(vax)||defined(tahoe)266if( ( *px & mexp ) == ~msign ) /* is x a reserved operand? */267#else /* defined(vax)||defined(tahoe) */268if( ( *px & mexp ) == mexp )269#endif /* defined(vax)||defined(tahoe) */270return (x-p)-(x-p); /* create nan if x is inf */271if (p == zero) {272#if defined(vax)||defined(tahoe)273return(infnan(EDOM));274#else /* defined(vax)||defined(tahoe) */275return zero/zero;276#endif /* defined(vax)||defined(tahoe) */277}278279#if defined(vax)||defined(tahoe)280if( ( *pp & mexp ) == ~msign ) /* is p a reserved operand? */281#else /* defined(vax)||defined(tahoe) */282if( ( *pp & mexp ) == mexp )283#endif /* defined(vax)||defined(tahoe) */284{ if (p != p) return p; else return x;}285286else if ( ((*pp & mexp)>>gap) <= 1 )287/* subnormal p, or almost subnormal p */288{ double b; b=scalb(1.0,(int)prep1);289p *= b; x = drem(x,p); x *= b; return(drem(x,p)/b);}290else if ( p >= novf/2)291{ p /= 2 ; x /= 2; return(drem(x,p)*2);}292else293{294dp=p+p; hp=p/2;295sign= *px & ~msign ;296*px &= msign ;297while ( x > dp )298{299k=(*px & mexp) - (*pd & mexp) ;300tmp = dp ;301*pt += k ;302303#if defined(vax)||defined(tahoe)304if( x < tmp ) *pt -= 128 ;305#else /* defined(vax)||defined(tahoe) */306if( x < tmp ) *pt -= 16 ;307#endif /* defined(vax)||defined(tahoe) */308309x -= tmp ;310}311if ( x > hp )312{ x -= p ; if ( x >= hp ) x -= p ; }313314#if defined(vax)||defined(tahoe)315if (x)316#endif /* defined(vax)||defined(tahoe) */317*px ^= sign;318return( x);319320}321#endif322}323324#endif325326#if !_lib_remainder327328extern double remainder(x,p)329double x,p;330{331return drem(x,p);332}333334#endif335336#if !_lib_sqrt337338extern double sqrt(x)339double x;340{341double q,s,b,r;342double t;343double const zero=0.0;344int m,n,i;345#if defined(vax)||defined(tahoe)346int k=54;347#else /* defined(vax)||defined(tahoe) */348int k=51;349#endif /* defined(vax)||defined(tahoe) */350351/* sqrt(NaN) is NaN, sqrt(+-0) = +-0 */352if(x!=x||x==zero) return(x);353354/* sqrt(negative) is invalid */355if(x<zero) {356#if defined(vax)||defined(tahoe)357return (infnan(EDOM)); /* NaN */358#else /* defined(vax)||defined(tahoe) */359return(zero/zero);360#endif /* defined(vax)||defined(tahoe) */361}362363/* sqrt(INF) is INF */364if(!finite(x)) return(x);365366/* scale x to [1,4) */367n=logb(x);368x=scalb(x,-n);369if((m=logb(x))!=0) x=scalb(x,-m); /* subnormal number */370m += n;371n = m/2;372if((n+n)!=m) {x *= 2; m -=1; n=m/2;}373374/* generate sqrt(x) bit by bit (accumulating in q) */375q=1.0; s=4.0; x -= 1.0; r=1;376for(i=1;i<=k;i++) {377t=s+1; x *= 4; r /= 2;378if(t<=x) {379s=t+t+2, x -= t; q += r;}380else381s *= 2;382}383384/* generate the last bit and determine the final rounding */385r/=2; x *= 4;386if(x==zero) goto end; 100+r; /* trigger inexact flag */387if(s<x) {388q+=r; x -=s; s += 2; s *= 2; x *= 4;389t = (x-s)-5;390b=1.0+3*r/4; if(b==1.0) goto end; /* b==1 : Round-to-zero */391b=1.0+r/4; if(b>1.0) t=1; /* b>1 : Round-to-(+INF) */392if(t>=0) q+=r; } /* else: Round-to-nearest */393else {394s *= 2; x *= 4;395t = (x-s)-1;396b=1.0+3*r/4; if(b==1.0) goto end;397b=1.0+r/4; if(b>1.0) t=1;398if(t>=0) q+=r; }399400end: return(scalb(q,n));401}402403#endif404405#if 0406/* DREM(X,Y)407* RETURN X REM Y =X-N*Y, N=[X/Y] ROUNDED (ROUNDED TO EVEN IN THE HALF WAY CASE)408* DOUBLE PRECISION (VAX D format 56 bits, IEEE DOUBLE 53 BITS)409* INTENDED FOR ASSEMBLY LANGUAGE410* CODED IN C BY K.C. NG, 3/23/85, 4/8/85.411*412* Warning: this code should not get compiled in unless ALL of413* the following machine-dependent routines are supplied.414*415* Required machine dependent functions (not on a VAX):416* swapINX(i): save inexact flag and reset it to "i"417* swapENI(e): save inexact enable and reset it to "e"418*/419420extern double drem(x,y)421double x,y;422{423424#ifdef national /* order of words in floating point number */425static const n0=3,n1=2,n2=1,n3=0;426#else /* VAX, SUN, ZILOG, TAHOE */427static const n0=0,n1=1,n2=2,n3=3;428#endif429430static const unsigned short mexp =0x7ff0, m25 =0x0190, m57 =0x0390;431static const double zero=0.0;432double hy,y1,t,t1;433short k;434long n;435int i,e;436unsigned short xexp,yexp, *px =(unsigned short *) &x ,437nx,nf, *py =(unsigned short *) &y ,438sign, *pt =(unsigned short *) &t ,439*pt1 =(unsigned short *) &t1 ;440441xexp = px[n0] & mexp ; /* exponent of x */442yexp = py[n0] & mexp ; /* exponent of y */443sign = px[n0] &0x8000; /* sign of x */444445/* return NaN if x is NaN, or y is NaN, or x is INF, or y is zero */446if(x!=x) return(x); if(y!=y) return(y); /* x or y is NaN */447if( xexp == mexp ) return(zero/zero); /* x is INF */448if(y==zero) return(y/y);449450/* save the inexact flag and inexact enable in i and e respectively451* and reset them to zero452*/453i=swapINX(0); e=swapENI(0);454455/* subnormal number */456nx=0;457if(yexp==0) {t=1.0,pt[n0]+=m57; y*=t; nx=m57;}458459/* if y is tiny (biased exponent <= 57), scale up y to y*2**57 */460if( yexp <= m57 ) {py[n0]+=m57; nx+=m57; yexp+=m57;}461462nf=nx;463py[n0] &= 0x7fff;464px[n0] &= 0x7fff;465466/* mask off the least significant 27 bits of y */467t=y; pt[n3]=0; pt[n2]&=0xf800; y1=t;468469/* LOOP: argument reduction on x whenever x > y */470loop:471while ( x > y )472{473t=y;474t1=y1;475xexp=px[n0]&mexp; /* exponent of x */476k=xexp-yexp-m25;477if(k>0) /* if x/y >= 2**26, scale up y so that x/y < 2**26 */478{pt[n0]+=k;pt1[n0]+=k;}479n=x/t; x=(x-n*t1)-n*(t-t1);480}481/* end while (x > y) */482483if(nx!=0) {t=1.0; pt[n0]+=nx; x*=t; nx=0; goto loop;}484485/* final adjustment */486487hy=y/2.0;488if(x>hy||((x==hy)&&n%2==1)) x-=y;489px[n0] ^= sign;490if(nf!=0) { t=1.0; pt[n0]-=nf; x*=t;}491492/* restore inexact flag and inexact enable */493swapINX(i); swapENI(e);494495return(x);496}497#endif498499#if 0500/* SQRT501* RETURN CORRECTLY ROUNDED (ACCORDING TO THE ROUNDING MODE) SQRT502* FOR IEEE DOUBLE PRECISION ONLY, INTENDED FOR ASSEMBLY LANGUAGE503* CODED IN C BY K.C. NG, 3/22/85.504*505* Warning: this code should not get compiled in unless ALL of506* the following machine-dependent routines are supplied.507*508* Required machine dependent functions:509* swapINX(i) ...return the status of INEXACT flag and reset it to "i"510* swapRM(r) ...return the current Rounding Mode and reset it to "r"511* swapENI(e) ...return the status of inexact enable and reset it to "e"512* addc(t) ...perform t=t+1 regarding t as a 64 bit unsigned integer513* subc(t) ...perform t=t-1 regarding t as a 64 bit unsigned integer514*/515516static const unsigned long table[] = {5170, 1204, 3062, 5746, 9193, 13348, 18162, 23592, 29598, 36145, 43202, 50740,51858733, 67158, 75992, 85215, 83599, 71378, 60428, 50647, 41945, 34246, 27478,51921581, 16499, 12183, 8588, 5674, 3403, 1742, 661, 130, };520521extern double newsqrt(x)522double x;523{524double y,z,t,addc(),subc()525double const b54=134217728.*134217728.; /* b54=2**54 */526long mx,scalx;527long const mexp=0x7ff00000;528int i,j,r,e,swapINX(),swapRM(),swapENI();529unsigned long *py=(unsigned long *) &y ,530*pt=(unsigned long *) &t ,531*px=(unsigned long *) &x ;532#ifdef national /* ordering of word in a floating point number */533const int n0=1, n1=0;534#else535const int n0=0, n1=1;536#endif537/* Rounding Mode: RN ...round-to-nearest538* RZ ...round-towards 0539* RP ...round-towards +INF540* RM ...round-towards -INF541*/542const int RN=0,RZ=1,RP=2,RM=3;543/* machine dependent: work on a Zilog Z8070544* and a National 32081 & 16081545*/546547/* exceptions */548if(x!=x||x==0.0) return(x); /* sqrt(NaN) is NaN, sqrt(+-0) = +-0 */549if(x<0) return((x-x)/(x-x)); /* sqrt(negative) is invalid */550if((mx=px[n0]&mexp)==mexp) return(x); /* sqrt(+INF) is +INF */551552/* save, reset, initialize */553e=swapENI(0); /* ...save and reset the inexact enable */554i=swapINX(0); /* ...save INEXACT flag */555r=swapRM(RN); /* ...save and reset the Rounding Mode to RN */556scalx=0;557558/* subnormal number, scale up x to x*2**54 */559if(mx==0) {x *= b54 ; scalx-=0x01b00000;}560561/* scale x to avoid intermediate over/underflow:562* if (x > 2**512) x=x/2**512; if (x < 2**-512) x=x*2**512 */563if(mx>0x5ff00000) {px[n0] -= 0x20000000; scalx+= 0x10000000;}564if(mx<0x1ff00000) {px[n0] += 0x20000000; scalx-= 0x10000000;}565566/* magic initial approximation to almost 8 sig. bits */567py[n0]=(px[n0]>>1)+0x1ff80000;568py[n0]=py[n0]-table[(py[n0]>>15)&31];569570/* Heron's rule once with correction to improve y to almost 18 sig. bits */571t=x/y; y=y+t; py[n0]=py[n0]-0x00100006; py[n1]=0;572573/* triple to almost 56 sig. bits; now y approx. sqrt(x) to within 1 ulp */574t=y*y; z=t; pt[n0]+=0x00100000; t+=z; z=(x-z)*y;575t=z/(t+x) ; pt[n0]+=0x00100000; y+=t;576577/* twiddle last bit to force y correctly rounded */578swapRM(RZ); /* ...set Rounding Mode to round-toward-zero */579swapINX(0); /* ...clear INEXACT flag */580swapENI(e); /* ...restore inexact enable status */581t=x/y; /* ...chopped quotient, possibly inexact */582j=swapINX(i); /* ...read and restore inexact flag */583if(j==0) { if(t==y) goto end; else t=subc(t); } /* ...t=t-ulp */584b54+0.1; /* ..trigger inexact flag, sqrt(x) is inexact */585if(r==RN) t=addc(t); /* ...t=t+ulp */586else if(r==RP) { t=addc(t);y=addc(y);}/* ...t=t+ulp;y=y+ulp; */587y=y+t; /* ...chopped sum */588py[n0]=py[n0]-0x00100000; /* ...correctly rounded sqrt(x) */589end: py[n0]=py[n0]+scalx; /* ...scale back y */590swapRM(r); /* ...restore Rounding Mode */591return(y);592}593#endif594595#if !_lib_ilogb596597extern int ilogb(double x)598{599return((int)logb(x));600}601602#endif603604#endif605606607