Path: blob/main/system/lib/libcxx/src/ryu/d2s.cpp
6175 views
//===----------------------------------------------------------------------===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//78// Copyright (c) Microsoft Corporation.9// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception1011// Copyright 2018 Ulf Adams12// Copyright (c) Microsoft Corporation. All rights reserved.1314// Boost Software License - Version 1.0 - August 17th, 20031516// Permission is hereby granted, free of charge, to any person or organization17// obtaining a copy of the software and accompanying documentation covered by18// this license (the "Software") to use, reproduce, display, distribute,19// execute, and transmit the Software, and to prepare derivative works of the20// Software, and to permit third-parties to whom the Software is furnished to21// do so, all subject to the following:2223// The copyright notices in the Software and this entire statement, including24// the above license grant, this restriction and the following disclaimer,25// must be included in all copies of the Software, in whole or in part, and26// all derivative works of the Software, unless such copies or derivative27// works are solely in the form of machine-executable object code generated by28// a source language processor.2930// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR31// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,32// FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT33// SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE34// FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,35// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER36// DEALINGS IN THE SOFTWARE.3738// Avoid formatting to keep the changes with the original code minimal.39// clang-format off4041#include <__assert>42#include <__config>43#include <charconv>44#include <cstddef>4546#include "include/ryu/common.h"47#include "include/ryu/d2fixed.h"48#include "include/ryu/d2s.h"49#include "include/ryu/d2s_full_table.h"50#include "include/ryu/d2s_intrinsics.h"51#include "include/ryu/digit_table.h"52#include "include/ryu/ryu.h"5354_LIBCPP_BEGIN_NAMESPACE_STD5556// We need a 64x128-bit multiplication and a subsequent 128-bit shift.57// Multiplication:58// The 64-bit factor is variable and passed in, the 128-bit factor comes59// from a lookup table. We know that the 64-bit factor only has 5560// significant bits (i.e., the 9 topmost bits are zeros). The 128-bit61// factor only has 124 significant bits (i.e., the 4 topmost bits are62// zeros).63// Shift:64// In principle, the multiplication result requires 55 + 124 = 179 bits to65// represent. However, we then shift this value to the right by __j, which is66// at least __j >= 115, so the result is guaranteed to fit into 179 - 115 = 6467// bits. This means that we only need the topmost 64 significant bits of68// the 64x128-bit multiplication.69//70// There are several ways to do this:71// 1. Best case: the compiler exposes a 128-bit type.72// We perform two 64x64-bit multiplications, add the higher 64 bits of the73// lower result to the higher result, and shift by __j - 64 bits.74//75// We explicitly cast from 64-bit to 128-bit, so the compiler can tell76// that these are only 64-bit inputs, and can map these to the best77// possible sequence of assembly instructions.78// x64 machines happen to have matching assembly instructions for79// 64x64-bit multiplications and 128-bit shifts.80//81// 2. Second best case: the compiler exposes intrinsics for the x64 assembly82// instructions mentioned in 1.83//84// 3. We only have 64x64 bit instructions that return the lower 64 bits of85// the result, i.e., we have to use plain C.86// Our inputs are less than the full width, so we have three options:87// a. Ignore this fact and just implement the intrinsics manually.88// b. Split both into 31-bit pieces, which guarantees no internal overflow,89// but requires extra work upfront (unless we change the lookup table).90// c. Split only the first factor into 31-bit pieces, which also guarantees91// no internal overflow, but requires extra work since the intermediate92// results are not perfectly aligned.93#ifdef _LIBCPP_INTRINSIC1289495[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline uint64_t __mulShift(const uint64_t __m, const uint64_t* const __mul, const int32_t __j) {96// __m is maximum 55 bits97uint64_t __high1; // 12898const uint64_t __low1 = __ryu_umul128(__m, __mul[1], &__high1); // 6499uint64_t __high0; // 64100(void) __ryu_umul128(__m, __mul[0], &__high0); // 0101const uint64_t __sum = __high0 + __low1;102if (__sum < __high0) {103++__high1; // overflow into __high1104}105return __ryu_shiftright128(__sum, __high1, static_cast<uint32_t>(__j - 64));106}107108[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline uint64_t __mulShiftAll(const uint64_t __m, const uint64_t* const __mul, const int32_t __j,109uint64_t* const __vp, uint64_t* const __vm, const uint32_t __mmShift) {110*__vp = __mulShift(4 * __m + 2, __mul, __j);111*__vm = __mulShift(4 * __m - 1 - __mmShift, __mul, __j);112return __mulShift(4 * __m, __mul, __j);113}114115#else // ^^^ intrinsics available ^^^ / vvv intrinsics unavailable vvv116117[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline _LIBCPP_ALWAYS_INLINE uint64_t __mulShiftAll(uint64_t __m, const uint64_t* const __mul, const int32_t __j,118uint64_t* const __vp, uint64_t* const __vm, const uint32_t __mmShift) { // TRANSITION, VSO-634761119__m <<= 1;120// __m is maximum 55 bits121uint64_t __tmp;122const uint64_t __lo = __ryu_umul128(__m, __mul[0], &__tmp);123uint64_t __hi;124const uint64_t __mid = __tmp + __ryu_umul128(__m, __mul[1], &__hi);125__hi += __mid < __tmp; // overflow into __hi126127const uint64_t __lo2 = __lo + __mul[0];128const uint64_t __mid2 = __mid + __mul[1] + (__lo2 < __lo);129const uint64_t __hi2 = __hi + (__mid2 < __mid);130*__vp = __ryu_shiftright128(__mid2, __hi2, static_cast<uint32_t>(__j - 64 - 1));131132if (__mmShift == 1) {133const uint64_t __lo3 = __lo - __mul[0];134const uint64_t __mid3 = __mid - __mul[1] - (__lo3 > __lo);135const uint64_t __hi3 = __hi - (__mid3 > __mid);136*__vm = __ryu_shiftright128(__mid3, __hi3, static_cast<uint32_t>(__j - 64 - 1));137} else {138const uint64_t __lo3 = __lo + __lo;139const uint64_t __mid3 = __mid + __mid + (__lo3 < __lo);140const uint64_t __hi3 = __hi + __hi + (__mid3 < __mid);141const uint64_t __lo4 = __lo3 - __mul[0];142const uint64_t __mid4 = __mid3 - __mul[1] - (__lo4 > __lo3);143const uint64_t __hi4 = __hi3 - (__mid4 > __mid3);144*__vm = __ryu_shiftright128(__mid4, __hi4, static_cast<uint32_t>(__j - 64));145}146147return __ryu_shiftright128(__mid, __hi, static_cast<uint32_t>(__j - 64 - 1));148}149150#endif // ^^^ intrinsics unavailable ^^^151152[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline uint32_t __decimalLength17(const uint64_t __v) {153// This is slightly faster than a loop.154// The average output length is 16.38 digits, so we check high-to-low.155// Function precondition: __v is not an 18, 19, or 20-digit number.156// (17 digits are sufficient for round-tripping.)157_LIBCPP_ASSERT_INTERNAL(__v < 100000000000000000u, "");158if (__v >= 10000000000000000u) { return 17; }159if (__v >= 1000000000000000u) { return 16; }160if (__v >= 100000000000000u) { return 15; }161if (__v >= 10000000000000u) { return 14; }162if (__v >= 1000000000000u) { return 13; }163if (__v >= 100000000000u) { return 12; }164if (__v >= 10000000000u) { return 11; }165if (__v >= 1000000000u) { return 10; }166if (__v >= 100000000u) { return 9; }167if (__v >= 10000000u) { return 8; }168if (__v >= 1000000u) { return 7; }169if (__v >= 100000u) { return 6; }170if (__v >= 10000u) { return 5; }171if (__v >= 1000u) { return 4; }172if (__v >= 100u) { return 3; }173if (__v >= 10u) { return 2; }174return 1;175}176177// A floating decimal representing m * 10^e.178struct __floating_decimal_64 {179uint64_t __mantissa;180int32_t __exponent;181};182183[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline __floating_decimal_64 __d2d(const uint64_t __ieeeMantissa, const uint32_t __ieeeExponent) {184int32_t __e2;185uint64_t __m2;186if (__ieeeExponent == 0) {187// We subtract 2 so that the bounds computation has 2 additional bits.188__e2 = 1 - __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS - 2;189__m2 = __ieeeMantissa;190} else {191__e2 = static_cast<int32_t>(__ieeeExponent) - __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS - 2;192__m2 = (1ull << __DOUBLE_MANTISSA_BITS) | __ieeeMantissa;193}194const bool __even = (__m2 & 1) == 0;195const bool __acceptBounds = __even;196197// Step 2: Determine the interval of valid decimal representations.198const uint64_t __mv = 4 * __m2;199// Implicit bool -> int conversion. True is 1, false is 0.200const uint32_t __mmShift = __ieeeMantissa != 0 || __ieeeExponent <= 1;201// We would compute __mp and __mm like this:202// uint64_t __mp = 4 * __m2 + 2;203// uint64_t __mm = __mv - 1 - __mmShift;204205// Step 3: Convert to a decimal power base using 128-bit arithmetic.206uint64_t __vr, __vp, __vm;207int32_t __e10;208bool __vmIsTrailingZeros = false;209bool __vrIsTrailingZeros = false;210if (__e2 >= 0) {211// I tried special-casing __q == 0, but there was no effect on performance.212// This expression is slightly faster than max(0, __log10Pow2(__e2) - 1).213const uint32_t __q = __log10Pow2(__e2) - (__e2 > 3);214__e10 = static_cast<int32_t>(__q);215const int32_t __k = __DOUBLE_POW5_INV_BITCOUNT + __pow5bits(static_cast<int32_t>(__q)) - 1;216const int32_t __i = -__e2 + static_cast<int32_t>(__q) + __k;217__vr = __mulShiftAll(__m2, __DOUBLE_POW5_INV_SPLIT[__q], __i, &__vp, &__vm, __mmShift);218if (__q <= 21) {219// This should use __q <= 22, but I think 21 is also safe. Smaller values220// may still be safe, but it's more difficult to reason about them.221// Only one of __mp, __mv, and __mm can be a multiple of 5, if any.222const uint32_t __mvMod5 = static_cast<uint32_t>(__mv) - 5 * static_cast<uint32_t>(__div5(__mv));223if (__mvMod5 == 0) {224__vrIsTrailingZeros = __multipleOfPowerOf5(__mv, __q);225} else if (__acceptBounds) {226// Same as min(__e2 + (~__mm & 1), __pow5Factor(__mm)) >= __q227// <=> __e2 + (~__mm & 1) >= __q && __pow5Factor(__mm) >= __q228// <=> true && __pow5Factor(__mm) >= __q, since __e2 >= __q.229__vmIsTrailingZeros = __multipleOfPowerOf5(__mv - 1 - __mmShift, __q);230} else {231// Same as min(__e2 + 1, __pow5Factor(__mp)) >= __q.232__vp -= __multipleOfPowerOf5(__mv + 2, __q);233}234}235} else {236// This expression is slightly faster than max(0, __log10Pow5(-__e2) - 1).237const uint32_t __q = __log10Pow5(-__e2) - (-__e2 > 1);238__e10 = static_cast<int32_t>(__q) + __e2;239const int32_t __i = -__e2 - static_cast<int32_t>(__q);240const int32_t __k = __pow5bits(__i) - __DOUBLE_POW5_BITCOUNT;241const int32_t __j = static_cast<int32_t>(__q) - __k;242__vr = __mulShiftAll(__m2, __DOUBLE_POW5_SPLIT[__i], __j, &__vp, &__vm, __mmShift);243if (__q <= 1) {244// {__vr,__vp,__vm} is trailing zeros if {__mv,__mp,__mm} has at least __q trailing 0 bits.245// __mv = 4 * __m2, so it always has at least two trailing 0 bits.246__vrIsTrailingZeros = true;247if (__acceptBounds) {248// __mm = __mv - 1 - __mmShift, so it has 1 trailing 0 bit iff __mmShift == 1.249__vmIsTrailingZeros = __mmShift == 1;250} else {251// __mp = __mv + 2, so it always has at least one trailing 0 bit.252--__vp;253}254} else if (__q < 63) { // TRANSITION(ulfjack): Use a tighter bound here.255// We need to compute min(ntz(__mv), __pow5Factor(__mv) - __e2) >= __q - 1256// <=> ntz(__mv) >= __q - 1 && __pow5Factor(__mv) - __e2 >= __q - 1257// <=> ntz(__mv) >= __q - 1 (__e2 is negative and -__e2 >= __q)258// <=> (__mv & ((1 << (__q - 1)) - 1)) == 0259// We also need to make sure that the left shift does not overflow.260__vrIsTrailingZeros = __multipleOfPowerOf2(__mv, __q - 1);261}262}263264// Step 4: Find the shortest decimal representation in the interval of valid representations.265int32_t __removed = 0;266uint8_t __lastRemovedDigit = 0;267uint64_t _Output;268// On average, we remove ~2 digits.269if (__vmIsTrailingZeros || __vrIsTrailingZeros) {270// General case, which happens rarely (~0.7%).271for (;;) {272const uint64_t __vpDiv10 = __div10(__vp);273const uint64_t __vmDiv10 = __div10(__vm);274if (__vpDiv10 <= __vmDiv10) {275break;276}277const uint32_t __vmMod10 = static_cast<uint32_t>(__vm) - 10 * static_cast<uint32_t>(__vmDiv10);278const uint64_t __vrDiv10 = __div10(__vr);279const uint32_t __vrMod10 = static_cast<uint32_t>(__vr) - 10 * static_cast<uint32_t>(__vrDiv10);280__vmIsTrailingZeros &= __vmMod10 == 0;281__vrIsTrailingZeros &= __lastRemovedDigit == 0;282__lastRemovedDigit = static_cast<uint8_t>(__vrMod10);283__vr = __vrDiv10;284__vp = __vpDiv10;285__vm = __vmDiv10;286++__removed;287}288if (__vmIsTrailingZeros) {289for (;;) {290const uint64_t __vmDiv10 = __div10(__vm);291const uint32_t __vmMod10 = static_cast<uint32_t>(__vm) - 10 * static_cast<uint32_t>(__vmDiv10);292if (__vmMod10 != 0) {293break;294}295const uint64_t __vpDiv10 = __div10(__vp);296const uint64_t __vrDiv10 = __div10(__vr);297const uint32_t __vrMod10 = static_cast<uint32_t>(__vr) - 10 * static_cast<uint32_t>(__vrDiv10);298__vrIsTrailingZeros &= __lastRemovedDigit == 0;299__lastRemovedDigit = static_cast<uint8_t>(__vrMod10);300__vr = __vrDiv10;301__vp = __vpDiv10;302__vm = __vmDiv10;303++__removed;304}305}306if (__vrIsTrailingZeros && __lastRemovedDigit == 5 && __vr % 2 == 0) {307// Round even if the exact number is .....50..0.308__lastRemovedDigit = 4;309}310// We need to take __vr + 1 if __vr is outside bounds or we need to round up.311_Output = __vr + ((__vr == __vm && (!__acceptBounds || !__vmIsTrailingZeros)) || __lastRemovedDigit >= 5);312} else {313// Specialized for the common case (~99.3%). Percentages below are relative to this.314bool __roundUp = false;315const uint64_t __vpDiv100 = __div100(__vp);316const uint64_t __vmDiv100 = __div100(__vm);317if (__vpDiv100 > __vmDiv100) { // Optimization: remove two digits at a time (~86.2%).318const uint64_t __vrDiv100 = __div100(__vr);319const uint32_t __vrMod100 = static_cast<uint32_t>(__vr) - 100 * static_cast<uint32_t>(__vrDiv100);320__roundUp = __vrMod100 >= 50;321__vr = __vrDiv100;322__vp = __vpDiv100;323__vm = __vmDiv100;324__removed += 2;325}326// Loop iterations below (approximately), without optimization above:327// 0: 0.03%, 1: 13.8%, 2: 70.6%, 3: 14.0%, 4: 1.40%, 5: 0.14%, 6+: 0.02%328// Loop iterations below (approximately), with optimization above:329// 0: 70.6%, 1: 27.8%, 2: 1.40%, 3: 0.14%, 4+: 0.02%330for (;;) {331const uint64_t __vpDiv10 = __div10(__vp);332const uint64_t __vmDiv10 = __div10(__vm);333if (__vpDiv10 <= __vmDiv10) {334break;335}336const uint64_t __vrDiv10 = __div10(__vr);337const uint32_t __vrMod10 = static_cast<uint32_t>(__vr) - 10 * static_cast<uint32_t>(__vrDiv10);338__roundUp = __vrMod10 >= 5;339__vr = __vrDiv10;340__vp = __vpDiv10;341__vm = __vmDiv10;342++__removed;343}344// We need to take __vr + 1 if __vr is outside bounds or we need to round up.345_Output = __vr + (__vr == __vm || __roundUp);346}347const int32_t __exp = __e10 + __removed;348349__floating_decimal_64 __fd;350__fd.__exponent = __exp;351__fd.__mantissa = _Output;352return __fd;353}354355[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline to_chars_result __to_chars(char* const _First, char* const _Last, const __floating_decimal_64 __v,356chars_format _Fmt, const double __f) {357// Step 5: Print the decimal representation.358uint64_t _Output = __v.__mantissa;359int32_t _Ryu_exponent = __v.__exponent;360const uint32_t __olength = __decimalLength17(_Output);361int32_t _Scientific_exponent = _Ryu_exponent + static_cast<int32_t>(__olength) - 1;362363if (_Fmt == chars_format{}) {364int32_t _Lower;365int32_t _Upper;366367if (__olength == 1) {368// Value | Fixed | Scientific369// 1e-3 | "0.001" | "1e-03"370// 1e4 | "10000" | "1e+04"371_Lower = -3;372_Upper = 4;373} else {374// Value | Fixed | Scientific375// 1234e-7 | "0.0001234" | "1.234e-04"376// 1234e5 | "123400000" | "1.234e+08"377_Lower = -static_cast<int32_t>(__olength + 3);378_Upper = 5;379}380381if (_Lower <= _Ryu_exponent && _Ryu_exponent <= _Upper) {382_Fmt = chars_format::fixed;383} else {384_Fmt = chars_format::scientific;385}386} else if (_Fmt == chars_format::general) {387// C11 7.21.6.1 "The fprintf function"/8:388// "Let P equal [...] 6 if the precision is omitted [...].389// Then, if a conversion with style E would have an exponent of X:390// - if P > X >= -4, the conversion is with style f [...].391// - otherwise, the conversion is with style e [...]."392if (-4 <= _Scientific_exponent && _Scientific_exponent < 6) {393_Fmt = chars_format::fixed;394} else {395_Fmt = chars_format::scientific;396}397}398399if (_Fmt == chars_format::fixed) {400// Example: _Output == 1729, __olength == 4401402// _Ryu_exponent | Printed | _Whole_digits | _Total_fixed_length | Notes403// --------------|----------|---------------|----------------------|---------------------------------------404// 2 | 172900 | 6 | _Whole_digits | Ryu can't be used for printing405// 1 | 17290 | 5 | (sometimes adjusted) | when the trimmed digits are nonzero.406// --------------|----------|---------------|----------------------|---------------------------------------407// 0 | 1729 | 4 | _Whole_digits | Unified length cases.408// --------------|----------|---------------|----------------------|---------------------------------------409// -1 | 172.9 | 3 | __olength + 1 | This case can't happen for410// -2 | 17.29 | 2 | | __olength == 1, but no additional411// -3 | 1.729 | 1 | | code is needed to avoid it.412// --------------|----------|---------------|----------------------|---------------------------------------413// -4 | 0.1729 | 0 | 2 - _Ryu_exponent | C11 7.21.6.1 "The fprintf function"/8:414// -5 | 0.01729 | -1 | | "If a decimal-point character appears,415// -6 | 0.001729 | -2 | | at least one digit appears before it."416417const int32_t _Whole_digits = static_cast<int32_t>(__olength) + _Ryu_exponent;418419uint32_t _Total_fixed_length;420if (_Ryu_exponent >= 0) { // cases "172900" and "1729"421_Total_fixed_length = static_cast<uint32_t>(_Whole_digits);422if (_Output == 1) {423// Rounding can affect the number of digits.424// For example, 1e23 is exactly "99999999999999991611392" which is 23 digits instead of 24.425// We can use a lookup table to detect this and adjust the total length.426static constexpr uint8_t _Adjustment[309] = {4270,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,0,1,0,1,1,1,0,1,1,1,0,0,0,0,0,4281,1,0,0,1,0,1,1,1,0,0,0,0,1,1,1,1,0,0,0,1,1,1,1,0,0,0,1,1,1,1,0,1,0,1,0,1,1,0,0,0,0,1,1,1,4291,0,0,0,0,0,0,0,1,1,0,1,1,0,0,1,0,1,0,1,0,1,1,0,0,0,0,0,1,1,1,0,0,1,1,1,1,1,0,1,0,1,1,0,1,4301,0,0,0,0,0,0,0,0,0,1,1,1,0,0,1,0,0,1,0,0,1,1,1,1,0,0,1,1,0,1,1,0,1,1,0,1,0,0,0,1,0,0,0,1,4310,1,0,1,0,1,1,1,0,0,0,0,0,0,1,1,1,1,0,0,1,0,1,1,1,0,0,0,1,0,1,1,1,1,1,1,0,1,0,1,1,0,0,0,1,4321,1,0,1,1,0,0,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,1,0,1,1,0,0,1,1,1,0,0,0,1,0,1,0,0,0,0,0,1,1,0,4330,1,0,1,1,1,0,0,1,0,0,0,0,1,0,1,0,0,0,0,0,1,0,1,0,1,1,0,1,0,0,0,0,0,1,1,0,1,0 };434_Total_fixed_length -= _Adjustment[_Ryu_exponent];435// _Whole_digits doesn't need to be adjusted because these cases won't refer to it later.436}437} else if (_Whole_digits > 0) { // case "17.29"438_Total_fixed_length = __olength + 1;439} else { // case "0.001729"440_Total_fixed_length = static_cast<uint32_t>(2 - _Ryu_exponent);441}442443if (_Last - _First < static_cast<ptrdiff_t>(_Total_fixed_length)) {444return { _Last, errc::value_too_large };445}446447char* _Mid;448if (_Ryu_exponent > 0) { // case "172900"449bool _Can_use_ryu;450451if (_Ryu_exponent > 22) { // 10^22 is the largest power of 10 that's exactly representable as a double.452_Can_use_ryu = false;453} else {454// Ryu generated X: __v.__mantissa * 10^_Ryu_exponent455// __v.__mantissa == 2^_Trailing_zero_bits * (__v.__mantissa >> _Trailing_zero_bits)456// 10^_Ryu_exponent == 2^_Ryu_exponent * 5^_Ryu_exponent457458// _Trailing_zero_bits is [0, 56] (aside: because 2^56 is the largest power of 2459// with 17 decimal digits, which is double's round-trip limit.)460// _Ryu_exponent is [1, 22].461// Normalization adds [2, 52] (aside: at least 2 because the pre-normalized mantissa is at least 5).462// This adds up to [3, 130], which is well below double's maximum binary exponent 1023.463464// Therefore, we just need to consider (__v.__mantissa >> _Trailing_zero_bits) * 5^_Ryu_exponent.465466// If that product would exceed 53 bits, then X can't be exactly represented as a double.467// (That's not a problem for round-tripping, because X is close enough to the original double,468// but X isn't mathematically equal to the original double.) This requires a high-precision fallback.469470// If the product is 53 bits or smaller, then X can be exactly represented as a double (and we don't471// need to re-synthesize it; the original double must have been X, because Ryu wouldn't produce the472// same output for two different doubles X and Y). This allows Ryu's output to be used (zero-filled).473474// (2^53 - 1) / 5^0 (for indexing), (2^53 - 1) / 5^1, ..., (2^53 - 1) / 5^22475static constexpr uint64_t _Max_shifted_mantissa[23] = {4769007199254740991u, 1801439850948198u, 360287970189639u, 72057594037927u, 14411518807585u,4772882303761517u, 576460752303u, 115292150460u, 23058430092u, 4611686018u, 922337203u, 184467440u,47836893488u, 7378697u, 1475739u, 295147u, 59029u, 11805u, 2361u, 472u, 94u, 18u, 3u };479480unsigned long _Trailing_zero_bits;481#if _LIBCPP_HAS_BITSCAN64482(void) _BitScanForward64(&_Trailing_zero_bits, __v.__mantissa); // __v.__mantissa is guaranteed nonzero483#else // ^^^ 64-bit ^^^ / vvv 32-bit vvv484const uint32_t _Low_mantissa = static_cast<uint32_t>(__v.__mantissa);485if (_Low_mantissa != 0) {486(void) _BitScanForward(&_Trailing_zero_bits, _Low_mantissa);487} else {488const uint32_t _High_mantissa = static_cast<uint32_t>(__v.__mantissa >> 32); // nonzero here489(void) _BitScanForward(&_Trailing_zero_bits, _High_mantissa);490_Trailing_zero_bits += 32;491}492#endif // ^^^ 32-bit ^^^493const uint64_t _Shifted_mantissa = __v.__mantissa >> _Trailing_zero_bits;494_Can_use_ryu = _Shifted_mantissa <= _Max_shifted_mantissa[_Ryu_exponent];495}496497if (!_Can_use_ryu) {498// Print the integer exactly.499// Performance note: This will redundantly perform bounds checking.500// Performance note: This will redundantly decompose the IEEE representation.501return __d2fixed_buffered_n(_First, _Last, __f, 0);502}503504// _Can_use_ryu505// Print the decimal digits, left-aligned within [_First, _First + _Total_fixed_length).506_Mid = _First + __olength;507} else { // cases "1729", "17.29", and "0.001729"508// Print the decimal digits, right-aligned within [_First, _First + _Total_fixed_length).509_Mid = _First + _Total_fixed_length;510}511512// We prefer 32-bit operations, even on 64-bit platforms.513// We have at most 17 digits, and uint32_t can store 9 digits.514// If _Output doesn't fit into uint32_t, we cut off 8 digits,515// so the rest will fit into uint32_t.516if ((_Output >> 32) != 0) {517// Expensive 64-bit division.518const uint64_t __q = __div1e8(_Output);519uint32_t __output2 = static_cast<uint32_t>(_Output - 100000000 * __q);520_Output = __q;521522const uint32_t __c = __output2 % 10000;523__output2 /= 10000;524const uint32_t __d = __output2 % 10000;525const uint32_t __c0 = (__c % 100) << 1;526const uint32_t __c1 = (__c / 100) << 1;527const uint32_t __d0 = (__d % 100) << 1;528const uint32_t __d1 = (__d / 100) << 1;529530std::memcpy(_Mid -= 2, __DIGIT_TABLE + __c0, 2);531std::memcpy(_Mid -= 2, __DIGIT_TABLE + __c1, 2);532std::memcpy(_Mid -= 2, __DIGIT_TABLE + __d0, 2);533std::memcpy(_Mid -= 2, __DIGIT_TABLE + __d1, 2);534}535uint32_t __output2 = static_cast<uint32_t>(_Output);536while (__output2 >= 10000) {537#ifdef __clang__ // TRANSITION, LLVM-38217538const uint32_t __c = __output2 - 10000 * (__output2 / 10000);539#else540const uint32_t __c = __output2 % 10000;541#endif542__output2 /= 10000;543const uint32_t __c0 = (__c % 100) << 1;544const uint32_t __c1 = (__c / 100) << 1;545std::memcpy(_Mid -= 2, __DIGIT_TABLE + __c0, 2);546std::memcpy(_Mid -= 2, __DIGIT_TABLE + __c1, 2);547}548if (__output2 >= 100) {549const uint32_t __c = (__output2 % 100) << 1;550__output2 /= 100;551std::memcpy(_Mid -= 2, __DIGIT_TABLE + __c, 2);552}553if (__output2 >= 10) {554const uint32_t __c = __output2 << 1;555std::memcpy(_Mid -= 2, __DIGIT_TABLE + __c, 2);556} else {557*--_Mid = static_cast<char>('0' + __output2);558}559560if (_Ryu_exponent > 0) { // case "172900" with _Can_use_ryu561// Performance note: it might be more efficient to do this immediately after setting _Mid.562std::memset(_First + __olength, '0', static_cast<size_t>(_Ryu_exponent));563} else if (_Ryu_exponent == 0) { // case "1729"564// Done!565} else if (_Whole_digits > 0) { // case "17.29"566// Performance note: moving digits might not be optimal.567std::memmove(_First, _First + 1, static_cast<size_t>(_Whole_digits));568_First[_Whole_digits] = '.';569} else { // case "0.001729"570// Performance note: a larger memset() followed by overwriting '.' might be more efficient.571_First[0] = '0';572_First[1] = '.';573std::memset(_First + 2, '0', static_cast<size_t>(-_Whole_digits));574}575576return { _First + _Total_fixed_length, errc{} };577}578579const uint32_t _Total_scientific_length = __olength + (__olength > 1) // digits + possible decimal point580+ (-100 < _Scientific_exponent && _Scientific_exponent < 100 ? 4 : 5); // + scientific exponent581if (_Last - _First < static_cast<ptrdiff_t>(_Total_scientific_length)) {582return { _Last, errc::value_too_large };583}584char* const __result = _First;585586// Print the decimal digits.587uint32_t __i = 0;588// We prefer 32-bit operations, even on 64-bit platforms.589// We have at most 17 digits, and uint32_t can store 9 digits.590// If _Output doesn't fit into uint32_t, we cut off 8 digits,591// so the rest will fit into uint32_t.592if ((_Output >> 32) != 0) {593// Expensive 64-bit division.594const uint64_t __q = __div1e8(_Output);595uint32_t __output2 = static_cast<uint32_t>(_Output) - 100000000 * static_cast<uint32_t>(__q);596_Output = __q;597598const uint32_t __c = __output2 % 10000;599__output2 /= 10000;600const uint32_t __d = __output2 % 10000;601const uint32_t __c0 = (__c % 100) << 1;602const uint32_t __c1 = (__c / 100) << 1;603const uint32_t __d0 = (__d % 100) << 1;604const uint32_t __d1 = (__d / 100) << 1;605std::memcpy(__result + __olength - __i - 1, __DIGIT_TABLE + __c0, 2);606std::memcpy(__result + __olength - __i - 3, __DIGIT_TABLE + __c1, 2);607std::memcpy(__result + __olength - __i - 5, __DIGIT_TABLE + __d0, 2);608std::memcpy(__result + __olength - __i - 7, __DIGIT_TABLE + __d1, 2);609__i += 8;610}611uint32_t __output2 = static_cast<uint32_t>(_Output);612while (__output2 >= 10000) {613#ifdef __clang__ // TRANSITION, LLVM-38217614const uint32_t __c = __output2 - 10000 * (__output2 / 10000);615#else616const uint32_t __c = __output2 % 10000;617#endif618__output2 /= 10000;619const uint32_t __c0 = (__c % 100) << 1;620const uint32_t __c1 = (__c / 100) << 1;621std::memcpy(__result + __olength - __i - 1, __DIGIT_TABLE + __c0, 2);622std::memcpy(__result + __olength - __i - 3, __DIGIT_TABLE + __c1, 2);623__i += 4;624}625if (__output2 >= 100) {626const uint32_t __c = (__output2 % 100) << 1;627__output2 /= 100;628std::memcpy(__result + __olength - __i - 1, __DIGIT_TABLE + __c, 2);629__i += 2;630}631if (__output2 >= 10) {632const uint32_t __c = __output2 << 1;633// We can't use memcpy here: the decimal dot goes between these two digits.634__result[2] = __DIGIT_TABLE[__c + 1];635__result[0] = __DIGIT_TABLE[__c];636} else {637__result[0] = static_cast<char>('0' + __output2);638}639640// Print decimal point if needed.641uint32_t __index;642if (__olength > 1) {643__result[1] = '.';644__index = __olength + 1;645} else {646__index = 1;647}648649// Print the exponent.650__result[__index++] = 'e';651if (_Scientific_exponent < 0) {652__result[__index++] = '-';653_Scientific_exponent = -_Scientific_exponent;654} else {655__result[__index++] = '+';656}657658if (_Scientific_exponent >= 100) {659const int32_t __c = _Scientific_exponent % 10;660std::memcpy(__result + __index, __DIGIT_TABLE + 2 * (_Scientific_exponent / 10), 2);661__result[__index + 2] = static_cast<char>('0' + __c);662__index += 3;663} else {664std::memcpy(__result + __index, __DIGIT_TABLE + 2 * _Scientific_exponent, 2);665__index += 2;666}667668return { _First + _Total_scientific_length, errc{} };669}670671[[nodiscard]] _LIBCPP_HIDE_FROM_ABI inline bool __d2d_small_int(const uint64_t __ieeeMantissa, const uint32_t __ieeeExponent,672__floating_decimal_64* const __v) {673const uint64_t __m2 = (1ull << __DOUBLE_MANTISSA_BITS) | __ieeeMantissa;674const int32_t __e2 = static_cast<int32_t>(__ieeeExponent) - __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS;675676if (__e2 > 0) {677// f = __m2 * 2^__e2 >= 2^53 is an integer.678// Ignore this case for now.679return false;680}681682if (__e2 < -52) {683// f < 1.684return false;685}686687// Since 2^52 <= __m2 < 2^53 and 0 <= -__e2 <= 52: 1 <= f = __m2 / 2^-__e2 < 2^53.688// Test if the lower -__e2 bits of the significand are 0, i.e. whether the fraction is 0.689const uint64_t __mask = (1ull << -__e2) - 1;690const uint64_t __fraction = __m2 & __mask;691if (__fraction != 0) {692return false;693}694695// f is an integer in the range [1, 2^53).696// Note: __mantissa might contain trailing (decimal) 0's.697// Note: since 2^53 < 10^16, there is no need to adjust __decimalLength17().698__v->__mantissa = __m2 >> -__e2;699__v->__exponent = 0;700return true;701}702703[[nodiscard]] to_chars_result __d2s_buffered_n(char* const _First, char* const _Last, const double __f,704const chars_format _Fmt) {705706// Step 1: Decode the floating-point number, and unify normalized and subnormal cases.707const uint64_t __bits = __double_to_bits(__f);708709// Case distinction; exit early for the easy cases.710if (__bits == 0) {711if (_Fmt == chars_format::scientific) {712if (_Last - _First < 5) {713return { _Last, errc::value_too_large };714}715716std::memcpy(_First, "0e+00", 5);717718return { _First + 5, errc{} };719}720721// Print "0" for chars_format::fixed, chars_format::general, and chars_format{}.722if (_First == _Last) {723return { _Last, errc::value_too_large };724}725726*_First = '0';727728return { _First + 1, errc{} };729}730731// Decode __bits into mantissa and exponent.732const uint64_t __ieeeMantissa = __bits & ((1ull << __DOUBLE_MANTISSA_BITS) - 1);733const uint32_t __ieeeExponent = static_cast<uint32_t>(__bits >> __DOUBLE_MANTISSA_BITS);734735if (_Fmt == chars_format::fixed) {736// const uint64_t _Mantissa2 = __ieeeMantissa | (1ull << __DOUBLE_MANTISSA_BITS); // restore implicit bit737const int32_t _Exponent2 = static_cast<int32_t>(__ieeeExponent)738- __DOUBLE_BIAS - __DOUBLE_MANTISSA_BITS; // bias and normalization739740// Normal values are equal to _Mantissa2 * 2^_Exponent2.741// (Subnormals are different, but they'll be rejected by the _Exponent2 test here, so they can be ignored.)742743// For nonzero integers, _Exponent2 >= -52. (The minimum value occurs when _Mantissa2 * 2^_Exponent2 is 1.744// In that case, _Mantissa2 is the implicit 1 bit followed by 52 zeros, so _Exponent2 is -52 to shift away745// the zeros.) The dense range of exactly representable integers has negative or zero exponents746// (as positive exponents make the range non-dense). For that dense range, Ryu will always be used:747// every digit is necessary to uniquely identify the value, so Ryu must print them all.748749// Positive exponents are the non-dense range of exactly representable integers. This contains all of the values750// for which Ryu can't be used (and a few Ryu-friendly values). We can save time by detecting positive751// exponents here and skipping Ryu. Calling __d2fixed_buffered_n() with precision 0 is valid for all integers752// (so it's okay if we call it with a Ryu-friendly value).753if (_Exponent2 > 0) {754return __d2fixed_buffered_n(_First, _Last, __f, 0);755}756}757758__floating_decimal_64 __v;759const bool __isSmallInt = __d2d_small_int(__ieeeMantissa, __ieeeExponent, &__v);760if (__isSmallInt) {761// For small integers in the range [1, 2^53), __v.__mantissa might contain trailing (decimal) zeros.762// For scientific notation we need to move these zeros into the exponent.763// (This is not needed for fixed-point notation, so it might be beneficial to trim764// trailing zeros in __to_chars only if needed - once fixed-point notation output is implemented.)765for (;;) {766const uint64_t __q = __div10(__v.__mantissa);767const uint32_t __r = static_cast<uint32_t>(__v.__mantissa) - 10 * static_cast<uint32_t>(__q);768if (__r != 0) {769break;770}771__v.__mantissa = __q;772++__v.__exponent;773}774} else {775__v = __d2d(__ieeeMantissa, __ieeeExponent);776}777778return __to_chars(_First, _Last, __v, _Fmt, __f);779}780781_LIBCPP_END_NAMESPACE_STD782783// clang-format on784785786