Path: blob/master/Utilities/cmliblzma/liblzma/rangecoder/range_decoder.h
3156 views
// SPDX-License-Identifier: 0BSD12///////////////////////////////////////////////////////////////////////////////3//4/// \file range_decoder.h5/// \brief Range Decoder6///7// Authors: Igor Pavlov8// Lasse Collin9//10///////////////////////////////////////////////////////////////////////////////1112#ifndef LZMA_RANGE_DECODER_H13#define LZMA_RANGE_DECODER_H1415#include "range_common.h"161718// Choose the range decoder variants to use using a bitmask.19// If no bits are set, only the basic version is used.20// If more than one version is selected for the same feature,21// the last one on the list below is used.22//23// Bitwise-or of the following enable branchless C versions:24// 0x01 normal bittrees25// 0x02 fixed-sized reverse bittrees26// 0x04 variable-sized reverse bittrees (not faster)27// 0x08 matched literal (not faster)28//29// GCC & Clang compatible x86-64 inline assembly:30// 0x010 normal bittrees31// 0x020 fixed-sized reverse bittrees32// 0x040 variable-sized reverse bittrees33// 0x080 matched literal34// 0x100 direct bits35//36// The default can be overridden at build time by defining37// LZMA_RANGE_DECODER_CONFIG to the desired mask.38//39// 2024-02-22: Feedback from benchmarks:40// - Brancless C (0x003) can be better than basic on x86-64 but often it's41// slightly worse on other archs. Since asm is much better on x86-64,42// branchless C is not used at all.43// - With x86-64 asm, there are slight differences between GCC and Clang44// and different processors. Overall 0x1F0 seems to be the best choice.45#ifndef LZMA_RANGE_DECODER_CONFIG46# if defined(__x86_64__) && !defined(__ILP32__) \47&& !defined(__NVCOMPILER) \48&& (defined(__GNUC__) || defined(__clang__))49# define LZMA_RANGE_DECODER_CONFIG 0x1F050# else51# define LZMA_RANGE_DECODER_CONFIG 052# endif53#endif545556// Negative RC_BIT_MODEL_TOTAL but the lowest RC_MOVE_BITS are flipped.57// This is useful for updating probability variables in branchless decoding:58//59// uint32_t decoded_bit = ...;60// probability tmp = RC_BIT_MODEL_OFFSET;61// tmp &= decoded_bit - 1;62// prob -= (prob + tmp) >> RC_MOVE_BITS;63#define RC_BIT_MODEL_OFFSET \64((UINT32_C(1) << RC_MOVE_BITS) - 1 - RC_BIT_MODEL_TOTAL)656667typedef struct {68uint32_t range;69uint32_t code;70uint32_t init_bytes_left;71} lzma_range_decoder;727374/// Reads the first five bytes to initialize the range decoder.75static inline lzma_ret76rc_read_init(lzma_range_decoder *rc, const uint8_t *restrict in,77size_t *restrict in_pos, size_t in_size)78{79while (rc->init_bytes_left > 0) {80if (*in_pos == in_size)81return LZMA_OK;8283// The first byte is always 0x00. It could have been omitted84// in LZMA2 but it wasn't, so one byte is wasted in every85// LZMA2 chunk.86if (rc->init_bytes_left == 5 && in[*in_pos] != 0x00)87return LZMA_DATA_ERROR;8889rc->code = (rc->code << 8) | in[*in_pos];90++*in_pos;91--rc->init_bytes_left;92}9394return LZMA_STREAM_END;95}969798/// Makes local copies of range decoder and *in_pos variables. Doing this99/// improves speed significantly. The range decoder macros expect also100/// variables 'in' and 'in_size' to be defined.101#define rc_to_local(range_decoder, in_pos, fast_mode_in_required) \102lzma_range_decoder rc = range_decoder; \103const uint8_t *rc_in_ptr = in + (in_pos); \104const uint8_t *rc_in_end = in + in_size; \105const uint8_t *rc_in_fast_end \106= (rc_in_end - rc_in_ptr) <= (fast_mode_in_required) \107? rc_in_ptr \108: rc_in_end - (fast_mode_in_required); \109(void)rc_in_fast_end; /* Silence a warning with HAVE_SMALL. */ \110uint32_t rc_bound111112113/// Evaluates to true if there is enough input remaining to use fast mode.114#define rc_is_fast_allowed() (rc_in_ptr < rc_in_fast_end)115116117/// Stores the local copes back to the range decoder structure.118#define rc_from_local(range_decoder, in_pos) \119do { \120range_decoder = rc; \121in_pos = (size_t)(rc_in_ptr - in); \122} while (0)123124125/// Resets the range decoder structure.126#define rc_reset(range_decoder) \127do { \128(range_decoder).range = UINT32_MAX; \129(range_decoder).code = 0; \130(range_decoder).init_bytes_left = 5; \131} while (0)132133134/// When decoding has been properly finished, rc.code is always zero unless135/// the input stream is corrupt. So checking this can catch some corrupt136/// files especially if they don't have any other integrity check.137#define rc_is_finished(range_decoder) \138((range_decoder).code == 0)139140141// Read the next input byte if needed.142#define rc_normalize() \143do { \144if (rc.range < RC_TOP_VALUE) { \145rc.range <<= RC_SHIFT_BITS; \146rc.code = (rc.code << RC_SHIFT_BITS) | *rc_in_ptr++; \147} \148} while (0)149150151/// If more input is needed but there is152/// no more input available, "goto out" is used to jump out of the main153/// decoder loop. The "_safe" macros are used in the Resumable decoder154/// mode in order to save the sequence to continue decoding from that155/// point later.156#define rc_normalize_safe(seq) \157do { \158if (rc.range < RC_TOP_VALUE) { \159if (rc_in_ptr == rc_in_end) { \160coder->sequence = seq; \161goto out; \162} \163rc.range <<= RC_SHIFT_BITS; \164rc.code = (rc.code << RC_SHIFT_BITS) | *rc_in_ptr++; \165} \166} while (0)167168169/// Start decoding a bit. This must be used together with rc_update_0()170/// and rc_update_1():171///172/// rc_if_0(prob) {173/// rc_update_0(prob);174/// // Do something175/// } else {176/// rc_update_1(prob);177/// // Do something else178/// }179///180#define rc_if_0(prob) \181rc_normalize(); \182rc_bound = (rc.range >> RC_BIT_MODEL_TOTAL_BITS) * (prob); \183if (rc.code < rc_bound)184185186#define rc_if_0_safe(prob, seq) \187rc_normalize_safe(seq); \188rc_bound = (rc.range >> RC_BIT_MODEL_TOTAL_BITS) * (prob); \189if (rc.code < rc_bound)190191192/// Update the range decoder state and the used probability variable to193/// match a decoded bit of 0.194///195/// The x86-64 assembly uses the commented method but it seems that,196/// at least on x86-64, the first version is slightly faster as C code.197#define rc_update_0(prob) \198do { \199rc.range = rc_bound; \200prob += (RC_BIT_MODEL_TOTAL - (prob)) >> RC_MOVE_BITS; \201/* prob -= ((prob) + RC_BIT_MODEL_OFFSET) >> RC_MOVE_BITS; */ \202} while (0)203204205/// Update the range decoder state and the used probability variable to206/// match a decoded bit of 1.207#define rc_update_1(prob) \208do { \209rc.range -= rc_bound; \210rc.code -= rc_bound; \211prob -= (prob) >> RC_MOVE_BITS; \212} while (0)213214215/// Decodes one bit and runs action0 or action1 depending on the decoded bit.216/// This macro is used as the last step in bittree reverse decoders since217/// those don't use "symbol" for anything else than indexing the probability218/// arrays.219#define rc_bit_last(prob, action0, action1) \220do { \221rc_if_0(prob) { \222rc_update_0(prob); \223action0; \224} else { \225rc_update_1(prob); \226action1; \227} \228} while (0)229230231#define rc_bit_last_safe(prob, action0, action1, seq) \232do { \233rc_if_0_safe(prob, seq) { \234rc_update_0(prob); \235action0; \236} else { \237rc_update_1(prob); \238action1; \239} \240} while (0)241242243/// Decodes one bit, updates "symbol", and runs action0 or action1 depending244/// on the decoded bit.245#define rc_bit(prob, action0, action1) \246rc_bit_last(prob, \247symbol <<= 1; action0, \248symbol = (symbol << 1) + 1; action1);249250251#define rc_bit_safe(prob, action0, action1, seq) \252rc_bit_last_safe(prob, \253symbol <<= 1; action0, \254symbol = (symbol << 1) + 1; action1, \255seq);256257// Unroll fixed-sized bittree decoding.258//259// A compile-time constant in final_add can be used to get rid of the high bit260// from symbol that is used for the array indexing (1U << bittree_bits).261// final_add may also be used to add offset to the result (LZMA length262// decoder does that).263//264// The reason to have final_add here is that in the asm code the addition265// can be done for free: in x86-64 there is SBB instruction with -1 as266// the immediate value, and final_add is combined with that value.267#define rc_bittree_bit(prob) \268rc_bit(prob, , )269270#define rc_bittree3(probs, final_add) \271do { \272symbol = 1; \273rc_bittree_bit(probs[symbol]); \274rc_bittree_bit(probs[symbol]); \275rc_bittree_bit(probs[symbol]); \276symbol += (uint32_t)(final_add); \277} while (0)278279#define rc_bittree6(probs, final_add) \280do { \281symbol = 1; \282rc_bittree_bit(probs[symbol]); \283rc_bittree_bit(probs[symbol]); \284rc_bittree_bit(probs[symbol]); \285rc_bittree_bit(probs[symbol]); \286rc_bittree_bit(probs[symbol]); \287rc_bittree_bit(probs[symbol]); \288symbol += (uint32_t)(final_add); \289} while (0)290291#define rc_bittree8(probs, final_add) \292do { \293symbol = 1; \294rc_bittree_bit(probs[symbol]); \295rc_bittree_bit(probs[symbol]); \296rc_bittree_bit(probs[symbol]); \297rc_bittree_bit(probs[symbol]); \298rc_bittree_bit(probs[symbol]); \299rc_bittree_bit(probs[symbol]); \300rc_bittree_bit(probs[symbol]); \301rc_bittree_bit(probs[symbol]); \302symbol += (uint32_t)(final_add); \303} while (0)304305306// Fixed-sized reverse bittree307#define rc_bittree_rev4(probs) \308do { \309symbol = 0; \310rc_bit_last(probs[symbol + 1], , symbol += 1); \311rc_bit_last(probs[symbol + 2], , symbol += 2); \312rc_bit_last(probs[symbol + 4], , symbol += 4); \313rc_bit_last(probs[symbol + 8], , symbol += 8); \314} while (0)315316317// Decode one bit from variable-sized reverse bittree. The loop is done318// in the code that uses this macro. This could be changed if the assembly319// version benefited from having the loop done in assembly but it didn't320// seem so in early 2024.321//322// Also, if the loop was done here, the loop counter would likely be local323// to the macro so that it wouldn't modify yet another input variable.324// If a _safe version of a macro with a loop was done then a modifiable325// input variable couldn't be avoided though.326#define rc_bit_add_if_1(probs, dest, value_to_add_if_1) \327rc_bit(probs[symbol], \328, \329dest += value_to_add_if_1);330331332// Matched literal333#define decode_with_match_bit \334t_match_byte <<= 1; \335t_match_bit = t_match_byte & t_offset; \336t_subcoder_index = t_offset + t_match_bit + symbol; \337rc_bit(probs[t_subcoder_index], \338t_offset &= ~t_match_bit, \339t_offset &= t_match_bit)340341#define rc_matched_literal(probs_base_var, match_byte) \342do { \343uint32_t t_match_byte = (match_byte); \344uint32_t t_match_bit; \345uint32_t t_subcoder_index; \346uint32_t t_offset = 0x100; \347symbol = 1; \348decode_with_match_bit; \349decode_with_match_bit; \350decode_with_match_bit; \351decode_with_match_bit; \352decode_with_match_bit; \353decode_with_match_bit; \354decode_with_match_bit; \355decode_with_match_bit; \356} while (0)357358359/// Decode a bit without using a probability.360//361// NOTE: GCC 13 and Clang/LLVM 16 can, at least on x86-64, optimize the bound362// calculation to use an arithmetic right shift so there's no need to provide363// the alternative code which, according to C99/C11/C23 6.3.1.3-p3 isn't364// perfectly portable: rc_bound = (uint32_t)((int32_t)rc.code >> 31);365#define rc_direct(dest, count_var) \366do { \367dest = (dest << 1) + 1; \368rc_normalize(); \369rc.range >>= 1; \370rc.code -= rc.range; \371rc_bound = UINT32_C(0) - (rc.code >> 31); \372dest += rc_bound; \373rc.code += rc.range & rc_bound; \374} while (--count_var > 0)375376377378#define rc_direct_safe(dest, count_var, seq) \379do { \380rc_normalize_safe(seq); \381rc.range >>= 1; \382rc.code -= rc.range; \383rc_bound = UINT32_C(0) - (rc.code >> 31); \384rc.code += rc.range & rc_bound; \385dest = (dest << 1) + (rc_bound + 1); \386} while (--count_var > 0)387388389//////////////////390// Branchless C //391//////////////////392393/// Decode a bit using a branchless method. This reduces the number of394/// mispredicted branches and thus can improve speed.395#define rc_c_bit(prob, action_bit, action_neg) \396do { \397probability *p = &(prob); \398rc_normalize(); \399rc_bound = (rc.range >> RC_BIT_MODEL_TOTAL_BITS) * *p; \400uint32_t rc_mask = rc.code >= rc_bound; /* rc_mask = decoded bit */ \401action_bit; /* action when rc_mask is 0 or 1 */ \402/* rc_mask becomes 0 if bit is 0 and 0xFFFFFFFF if bit is 1: */ \403rc_mask = 0U - rc_mask; \404rc.range &= rc_mask; /* If bit 0: set rc.range = 0 */ \405rc_bound ^= rc_mask; \406rc_bound -= rc_mask; /* If bit 1: rc_bound = 0U - rc_bound */ \407rc.range += rc_bound; \408rc_bound &= rc_mask; \409rc.code += rc_bound; \410action_neg; /* action when rc_mask is 0 or 0xFFFFFFFF */ \411rc_mask = ~rc_mask; /* If bit 0: all bits are set in rc_mask */ \412rc_mask &= RC_BIT_MODEL_OFFSET; \413*p -= (*p + rc_mask) >> RC_MOVE_BITS; \414} while (0)415416417// Testing on x86-64 give an impression that only the normal bittrees and418// the fixed-sized reverse bittrees are worth the branchless C code.419// It should be tested on other archs for which there isn't assembly code420// in this file.421422// Using addition in "(symbol << 1) + rc_mask" allows use of x86 LEA423// or RISC-V SH1ADD instructions. Compilers might infer it from424// "(symbol << 1) | rc_mask" too if they see that mask is 0 or 1 but425// the use of addition doesn't require such analysis from compilers.426#if LZMA_RANGE_DECODER_CONFIG & 0x01427#undef rc_bittree_bit428#define rc_bittree_bit(prob) \429rc_c_bit(prob, \430symbol = (symbol << 1) + rc_mask, \431)432#endif // LZMA_RANGE_DECODER_CONFIG & 0x01433434#if LZMA_RANGE_DECODER_CONFIG & 0x02435#undef rc_bittree_rev4436#define rc_bittree_rev4(probs) \437do { \438symbol = 0; \439rc_c_bit(probs[symbol + 1], symbol += rc_mask, ); \440rc_c_bit(probs[symbol + 2], symbol += rc_mask << 1, ); \441rc_c_bit(probs[symbol + 4], symbol += rc_mask << 2, ); \442rc_c_bit(probs[symbol + 8], symbol += rc_mask << 3, ); \443} while (0)444#endif // LZMA_RANGE_DECODER_CONFIG & 0x02445446#if LZMA_RANGE_DECODER_CONFIG & 0x04447#undef rc_bit_add_if_1448#define rc_bit_add_if_1(probs, dest, value_to_add_if_1) \449rc_c_bit(probs[symbol], \450symbol = (symbol << 1) + rc_mask, \451dest += (value_to_add_if_1) & rc_mask)452#endif // LZMA_RANGE_DECODER_CONFIG & 0x04453454455#if LZMA_RANGE_DECODER_CONFIG & 0x08456#undef decode_with_match_bit457#define decode_with_match_bit \458t_match_byte <<= 1; \459t_match_bit = t_match_byte & t_offset; \460t_subcoder_index = t_offset + t_match_bit + symbol; \461rc_c_bit(probs[t_subcoder_index], \462symbol = (symbol << 1) + rc_mask, \463t_offset &= ~t_match_bit ^ rc_mask)464#endif // LZMA_RANGE_DECODER_CONFIG & 0x08465466467////////////468// x86-64 //469////////////470471#if LZMA_RANGE_DECODER_CONFIG & 0x1F0472473// rc_asm_y and rc_asm_n are used as arguments to macros to control which474// strings to include or omit.475#define rc_asm_y(str) str476#define rc_asm_n(str)477478// There are a few possible variations for normalization.479// This is the smallest variant which is also used by LZMA SDK.480//481// - This has partial register write (the MOV from (%[in_ptr])).482//483// - INC saves one byte in code size over ADD. False dependency on484// partial flags from INC shouldn't become a problem on any processor485// because the instructions after normalization don't read the flags486// until SUB which sets all flags.487//488#define rc_asm_normalize \489"cmp %[top_value], %[range]\n\t" \490"jae 1f\n\t" \491"shl %[shift_bits], %[code]\n\t" \492"mov (%[in_ptr]), %b[code]\n\t" \493"shl %[shift_bits], %[range]\n\t" \494"inc %[in_ptr]\n" \495"1:\n"496497// rc_asm_calc(prob) is roughly equivalent to the C version of rc_if_0(prob)...498//499// rc_bound = (rc.range >> RC_BIT_MODEL_TOTAL_BITS) * (prob);500// if (rc.code < rc_bound)501//502// ...but the bound is stored in "range":503//504// t0 = range;505// range = (range >> RC_BIT_MODEL_TOTAL_BITS) * (prob);506// t0 -= range;507// t1 = code;508// code -= range;509//510// The carry flag (CF) from the last subtraction holds the negation of511// the decoded bit (if CF==0 then the decoded bit is 1).512// The values in t0 and t1 are needed for rc_update_0(prob) and513// rc_update_1(prob). If the bit is 0, rc_update_0(prob)...514//515// rc.range = rc_bound;516//517// ...has already been done but the "code -= range" has to be reverted using518// the old value stored in t1. (Also, prob needs to be updated.)519//520// If the bit is 1, rc_update_1(prob)...521//522// rc.range -= rc_bound;523// rc.code -= rc_bound;524//525// ...is already done for "code" but the value for "range" needs to be taken526// from t0. (Also, prob needs to be updated here as well.)527//528// The assignments from t0 and t1 can be done in a branchless manner with CMOV529// after the instructions from this macro. The CF from SUB tells which moves530// are needed.531#define rc_asm_calc(prob) \532"mov %[range], %[t0]\n\t" \533"shr %[bit_model_total_bits], %[range]\n\t" \534"imul %[" prob "], %[range]\n\t" \535"sub %[range], %[t0]\n\t" \536"mov %[code], %[t1]\n\t" \537"sub %[range], %[code]\n\t"538539// Also, prob needs to be updated: The update math depends on the decoded bit.540// It can be expressed in a few slightly different ways but this is fairly541// convenient here:542//543// prob -= (prob + (bit ? 0 : RC_BIT_MODEL_OFFSET)) >> RC_MOVE_BITS;544//545// To do it in branchless way when the negation of the decoded bit is in CF,546// both "prob" and "prob + RC_BIT_MODEL_OFFSET" are needed. Then the desired547// value can be picked with CMOV. The addition can be done using LEA without548// affecting CF.549//550// (This prob update method is a tiny bit different from LZMA SDK 23.01.551// In the LZMA SDK a single register is reserved solely for a constant to552// be used with CMOV when updating prob. That is fine since there are enough553// free registers to do so. The method used here uses one fewer register,554// which is valuable with inline assembly.)555//556// * * *557//558// In bittree decoding, each (unrolled) loop iteration decodes one bit559// and needs one prob variable. To make it faster, the prob variable of560// the iteration N+1 is loaded during iteration N. There are two possible561// prob variables to choose from for N+1. Both are loaded from memory and562// the correct one is chosen with CMOV using the same CF as is used for563// other things described above.564//565// This preloading/prefetching requires an extra register. To avoid566// useless moves from "preloaded prob register" to "current prob register",567// the macros swap between the two registers for odd and even iterations.568//569// * * *570//571// Finally, the decoded bit has to be stored in "symbol". Since the negation572// of the bit is in CF, this can be done with SBB: symbol -= CF - 1. That is,573// if the decoded bit is 0 (CF==1) the operation is a no-op "symbol -= 0"574// and when bit is 1 (CF==0) the operation is "symbol -= 0 - 1" which is575// the same as "symbol += 1".576//577// The instructions for all things are intertwined for a few reasons:578// - freeing temporary registers for new use579// - not modifying CF too early580// - instruction scheduling581//582// The first and last iterations can cheat a little. For example,583// on the first iteration "symbol" is known to start from 1 so it584// doesn't need to be read; it can even be immediately initialized585// to 2 to prepare for the second iteration of the loop.586//587// * * *588//589// a = number of the current prob variable (0 or 1)590// b = number of the next prob variable (1 or 0)591// *_only = rc_asm_y or _n to include or exclude code marked with them592#define rc_asm_bittree(a, b, first_only, middle_only, last_only) \593first_only( \594"movzwl 2(%[probs_base]), %[prob" #a "]\n\t" \595"mov $2, %[symbol]\n\t" \596"movzwl 4(%[probs_base]), %[prob" #b "]\n\t" \597) \598middle_only( \599/* Note the scaling of 4 instead of 2: */ \600"movzwl (%[probs_base], %q[symbol], 4), %[prob" #b "]\n\t" \601) \602last_only( \603"add %[symbol], %[symbol]\n\t" \604) \605\606rc_asm_normalize \607rc_asm_calc("prob" #a) \608\609"cmovae %[t0], %[range]\n\t" \610\611first_only( \612"movzwl 6(%[probs_base]), %[t0]\n\t" \613"cmovae %[t0], %[prob" #b "]\n\t" \614) \615middle_only( \616"movzwl 2(%[probs_base], %q[symbol], 4), %[t0]\n\t" \617"lea (%q[symbol], %q[symbol]), %[symbol]\n\t" \618"cmovae %[t0], %[prob" #b "]\n\t" \619) \620\621"lea %c[bit_model_offset](%q[prob" #a "]), %[t0]\n\t" \622"cmovb %[t1], %[code]\n\t" \623"mov %[symbol], %[t1]\n\t" \624"cmovae %[prob" #a "], %[t0]\n\t" \625\626first_only( \627"sbb $-1, %[symbol]\n\t" \628) \629middle_only( \630"sbb $-1, %[symbol]\n\t" \631) \632last_only( \633"sbb %[last_sbb], %[symbol]\n\t" \634) \635\636"shr %[move_bits], %[t0]\n\t" \637"sub %[t0], %[prob" #a "]\n\t" \638/* Scaling of 1 instead of 2 because symbol <<= 1. */ \639"mov %w[prob" #a "], (%[probs_base], %q[t1], 1)\n\t"640641// NOTE: The order of variables in __asm__ can affect speed and code size.642#define rc_asm_bittree_n(probs_base_var, final_add, asm_str) \643do { \644uint32_t t0; \645uint32_t t1; \646uint32_t t_prob0; \647uint32_t t_prob1; \648\649__asm__( \650asm_str \651: \652[range] "+&r"(rc.range), \653[code] "+&r"(rc.code), \654[t0] "=&r"(t0), \655[t1] "=&r"(t1), \656[prob0] "=&r"(t_prob0), \657[prob1] "=&r"(t_prob1), \658[symbol] "=&r"(symbol), \659[in_ptr] "+&r"(rc_in_ptr) \660: \661[probs_base] "r"(probs_base_var), \662[last_sbb] "n"(-1 - (final_add)), \663[top_value] "n"(RC_TOP_VALUE), \664[shift_bits] "n"(RC_SHIFT_BITS), \665[bit_model_total_bits] "n"(RC_BIT_MODEL_TOTAL_BITS), \666[bit_model_offset] "n"(RC_BIT_MODEL_OFFSET), \667[move_bits] "n"(RC_MOVE_BITS) \668: \669"cc", "memory"); \670} while (0)671672673#if LZMA_RANGE_DECODER_CONFIG & 0x010674#undef rc_bittree3675#define rc_bittree3(probs_base_var, final_add) \676rc_asm_bittree_n(probs_base_var, final_add, \677rc_asm_bittree(0, 1, rc_asm_y, rc_asm_n, rc_asm_n) \678rc_asm_bittree(1, 0, rc_asm_n, rc_asm_y, rc_asm_n) \679rc_asm_bittree(0, 1, rc_asm_n, rc_asm_n, rc_asm_y) \680)681682#undef rc_bittree6683#define rc_bittree6(probs_base_var, final_add) \684rc_asm_bittree_n(probs_base_var, final_add, \685rc_asm_bittree(0, 1, rc_asm_y, rc_asm_n, rc_asm_n) \686rc_asm_bittree(1, 0, rc_asm_n, rc_asm_y, rc_asm_n) \687rc_asm_bittree(0, 1, rc_asm_n, rc_asm_y, rc_asm_n) \688rc_asm_bittree(1, 0, rc_asm_n, rc_asm_y, rc_asm_n) \689rc_asm_bittree(0, 1, rc_asm_n, rc_asm_y, rc_asm_n) \690rc_asm_bittree(1, 0, rc_asm_n, rc_asm_n, rc_asm_y) \691)692693#undef rc_bittree8694#define rc_bittree8(probs_base_var, final_add) \695rc_asm_bittree_n(probs_base_var, final_add, \696rc_asm_bittree(0, 1, rc_asm_y, rc_asm_n, rc_asm_n) \697rc_asm_bittree(1, 0, rc_asm_n, rc_asm_y, rc_asm_n) \698rc_asm_bittree(0, 1, rc_asm_n, rc_asm_y, rc_asm_n) \699rc_asm_bittree(1, 0, rc_asm_n, rc_asm_y, rc_asm_n) \700rc_asm_bittree(0, 1, rc_asm_n, rc_asm_y, rc_asm_n) \701rc_asm_bittree(1, 0, rc_asm_n, rc_asm_y, rc_asm_n) \702rc_asm_bittree(0, 1, rc_asm_n, rc_asm_y, rc_asm_n) \703rc_asm_bittree(1, 0, rc_asm_n, rc_asm_n, rc_asm_y) \704)705#endif // LZMA_RANGE_DECODER_CONFIG & 0x010706707708// Fixed-sized reverse bittree709//710// This uses the indexing that constructs the final value in symbol directly.711// add = 1, 2, 4, 8712// dcur = -, 4, 8, 16713// dnext0 = 4, 8, 16, -714// dnext0 = 6, 12, 24, -715#define rc_asm_bittree_rev(a, b, add, dcur, dnext0, dnext1, \716first_only, middle_only, last_only) \717first_only( \718"movzwl 2(%[probs_base]), %[prob" #a "]\n\t" \719"xor %[symbol], %[symbol]\n\t" \720"movzwl 4(%[probs_base]), %[prob" #b "]\n\t" \721) \722middle_only( \723"movzwl " #dnext0 "(%[probs_base], %q[symbol], 2), " \724"%[prob" #b "]\n\t" \725) \726\727rc_asm_normalize \728rc_asm_calc("prob" #a) \729\730"cmovae %[t0], %[range]\n\t" \731\732first_only( \733"movzwl 6(%[probs_base]), %[t0]\n\t" \734"cmovae %[t0], %[prob" #b "]\n\t" \735) \736middle_only( \737"movzwl " #dnext1 "(%[probs_base], %q[symbol], 2), %[t0]\n\t" \738"cmovae %[t0], %[prob" #b "]\n\t" \739) \740\741"lea " #add "(%q[symbol]), %[t0]\n\t" \742"cmovb %[t1], %[code]\n\t" \743middle_only( \744"mov %[symbol], %[t1]\n\t" \745) \746last_only( \747"mov %[symbol], %[t1]\n\t" \748) \749"cmovae %[t0], %[symbol]\n\t" \750"lea %c[bit_model_offset](%q[prob" #a "]), %[t0]\n\t" \751"cmovae %[prob" #a "], %[t0]\n\t" \752\753"shr %[move_bits], %[t0]\n\t" \754"sub %[t0], %[prob" #a "]\n\t" \755first_only( \756"mov %w[prob" #a "], 2(%[probs_base])\n\t" \757) \758middle_only( \759"mov %w[prob" #a "], " \760#dcur "(%[probs_base], %q[t1], 2)\n\t" \761) \762last_only( \763"mov %w[prob" #a "], " \764#dcur "(%[probs_base], %q[t1], 2)\n\t" \765)766767#if LZMA_RANGE_DECODER_CONFIG & 0x020768#undef rc_bittree_rev4769#define rc_bittree_rev4(probs_base_var) \770rc_asm_bittree_n(probs_base_var, 4, \771rc_asm_bittree_rev(0, 1, 1, -, 4, 6, rc_asm_y, rc_asm_n, rc_asm_n) \772rc_asm_bittree_rev(1, 0, 2, 4, 8, 12, rc_asm_n, rc_asm_y, rc_asm_n) \773rc_asm_bittree_rev(0, 1, 4, 8, 16, 24, rc_asm_n, rc_asm_y, rc_asm_n) \774rc_asm_bittree_rev(1, 0, 8, 16, -, -, rc_asm_n, rc_asm_n, rc_asm_y) \775)776#endif // LZMA_RANGE_DECODER_CONFIG & 0x020777778779#if LZMA_RANGE_DECODER_CONFIG & 0x040780#undef rc_bit_add_if_1781#define rc_bit_add_if_1(probs_base_var, dest_var, value_to_add_if_1) \782do { \783uint32_t t0; \784uint32_t t1; \785uint32_t t2 = (value_to_add_if_1); \786uint32_t t_prob; \787uint32_t t_index; \788\789__asm__( \790"movzwl (%[probs_base], %q[symbol], 2), %[prob]\n\t" \791"mov %[symbol], %[index]\n\t" \792\793"add %[dest], %[t2]\n\t" \794"add %[symbol], %[symbol]\n\t" \795\796rc_asm_normalize \797rc_asm_calc("prob") \798\799"cmovae %[t0], %[range]\n\t" \800"lea %c[bit_model_offset](%q[prob]), %[t0]\n\t" \801"cmovb %[t1], %[code]\n\t" \802"cmovae %[prob], %[t0]\n\t" \803\804"cmovae %[t2], %[dest]\n\t" \805"sbb $-1, %[symbol]\n\t" \806\807"sar %[move_bits], %[t0]\n\t" \808"sub %[t0], %[prob]\n\t" \809"mov %w[prob], (%[probs_base], %q[index], 2)" \810: \811[range] "+&r"(rc.range), \812[code] "+&r"(rc.code), \813[t0] "=&r"(t0), \814[t1] "=&r"(t1), \815[prob] "=&r"(t_prob), \816[index] "=&r"(t_index), \817[symbol] "+&r"(symbol), \818[t2] "+&r"(t2), \819[dest] "+&r"(dest_var), \820[in_ptr] "+&r"(rc_in_ptr) \821: \822[probs_base] "r"(probs_base_var), \823[top_value] "n"(RC_TOP_VALUE), \824[shift_bits] "n"(RC_SHIFT_BITS), \825[bit_model_total_bits] "n"(RC_BIT_MODEL_TOTAL_BITS), \826[bit_model_offset] "n"(RC_BIT_MODEL_OFFSET), \827[move_bits] "n"(RC_MOVE_BITS) \828: \829"cc", "memory"); \830} while (0)831#endif // LZMA_RANGE_DECODER_CONFIG & 0x040832833834// Literal decoding uses a normal 8-bit bittree but literal with match byte835// is more complex in picking the probability variable from the correct836// subtree. This doesn't use preloading/prefetching of the next prob because837// there are four choices instead of two.838//839// FIXME? The first iteration starts with symbol = 1 so it could be optimized840// by a tiny amount.841#define rc_asm_matched_literal(nonlast_only) \842"add %[offset], %[symbol]\n\t" \843"and %[offset], %[match_bit]\n\t" \844"add %[match_bit], %[symbol]\n\t" \845\846"movzwl (%[probs_base], %q[symbol], 2), %[prob]\n\t" \847\848"add %[symbol], %[symbol]\n\t" \849\850nonlast_only( \851"xor %[match_bit], %[offset]\n\t" \852"add %[match_byte], %[match_byte]\n\t" \853) \854\855rc_asm_normalize \856rc_asm_calc("prob") \857\858"cmovae %[t0], %[range]\n\t" \859"lea %c[bit_model_offset](%q[prob]), %[t0]\n\t" \860"cmovb %[t1], %[code]\n\t" \861"mov %[symbol], %[t1]\n\t" \862"cmovae %[prob], %[t0]\n\t" \863\864nonlast_only( \865"cmovae %[match_bit], %[offset]\n\t" \866"mov %[match_byte], %[match_bit]\n\t" \867) \868\869"sbb $-1, %[symbol]\n\t" \870\871"shr %[move_bits], %[t0]\n\t" \872/* Undo symbol += match_bit + offset: */ \873"and $0x1FF, %[symbol]\n\t" \874"sub %[t0], %[prob]\n\t" \875\876/* Scaling of 1 instead of 2 because symbol <<= 1. */ \877"mov %w[prob], (%[probs_base], %q[t1], 1)\n\t"878879880#if LZMA_RANGE_DECODER_CONFIG & 0x080881#undef rc_matched_literal882#define rc_matched_literal(probs_base_var, match_byte_value) \883do { \884uint32_t t0; \885uint32_t t1; \886uint32_t t_prob; \887uint32_t t_match_byte = (uint32_t)(match_byte_value) << 1; \888uint32_t t_match_bit = t_match_byte; \889uint32_t t_offset = 0x100; \890symbol = 1; \891\892__asm__( \893rc_asm_matched_literal(rc_asm_y) \894rc_asm_matched_literal(rc_asm_y) \895rc_asm_matched_literal(rc_asm_y) \896rc_asm_matched_literal(rc_asm_y) \897rc_asm_matched_literal(rc_asm_y) \898rc_asm_matched_literal(rc_asm_y) \899rc_asm_matched_literal(rc_asm_y) \900rc_asm_matched_literal(rc_asm_n) \901: \902[range] "+&r"(rc.range), \903[code] "+&r"(rc.code), \904[t0] "=&r"(t0), \905[t1] "=&r"(t1), \906[prob] "=&r"(t_prob), \907[match_bit] "+&r"(t_match_bit), \908[symbol] "+&r"(symbol), \909[match_byte] "+&r"(t_match_byte), \910[offset] "+&r"(t_offset), \911[in_ptr] "+&r"(rc_in_ptr) \912: \913[probs_base] "r"(probs_base_var), \914[top_value] "n"(RC_TOP_VALUE), \915[shift_bits] "n"(RC_SHIFT_BITS), \916[bit_model_total_bits] "n"(RC_BIT_MODEL_TOTAL_BITS), \917[bit_model_offset] "n"(RC_BIT_MODEL_OFFSET), \918[move_bits] "n"(RC_MOVE_BITS) \919: \920"cc", "memory"); \921} while (0)922#endif // LZMA_RANGE_DECODER_CONFIG & 0x080923924925// Doing the loop in asm instead of C seems to help a little.926#if LZMA_RANGE_DECODER_CONFIG & 0x100927#undef rc_direct928#define rc_direct(dest_var, count_var) \929do { \930uint32_t t0; \931uint32_t t1; \932\933__asm__( \934"2:\n\t" \935"add %[dest], %[dest]\n\t" \936"lea 1(%q[dest]), %[t1]\n\t" \937\938rc_asm_normalize \939\940"shr $1, %[range]\n\t" \941"mov %[code], %[t0]\n\t" \942"sub %[range], %[code]\n\t" \943"cmovns %[t1], %[dest]\n\t" \944"cmovs %[t0], %[code]\n\t" \945"dec %[count]\n\t" \946"jnz 2b\n\t" \947: \948[range] "+&r"(rc.range), \949[code] "+&r"(rc.code), \950[t0] "=&r"(t0), \951[t1] "=&r"(t1), \952[dest] "+&r"(dest_var), \953[count] "+&r"(count_var), \954[in_ptr] "+&r"(rc_in_ptr) \955: \956[top_value] "n"(RC_TOP_VALUE), \957[shift_bits] "n"(RC_SHIFT_BITS) \958: \959"cc", "memory"); \960} while (0)961#endif // LZMA_RANGE_DECODER_CONFIG & 0x100962963#endif // x86_64964965#endif966967968