Path: blob/main/sys/contrib/openzfs/module/zstd/lib/common/mem.h
48774 views
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-only1/*2* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.3* All rights reserved.4*5* This source code is licensed under both the BSD-style license (found in the6* LICENSE file in the root directory of this source tree) and the GPLv2 (found7* in the COPYING file in the root directory of this source tree).8* You may select, at your option, one of the above-listed licenses.9*/1011#ifndef MEM_H_MODULE12#define MEM_H_MODULE1314#if defined (__cplusplus)15extern "C" {16#endif1718/*-****************************************19* Dependencies20******************************************/21#include <stddef.h> /* size_t, ptrdiff_t */22#include <string.h> /* memcpy */232425/*-****************************************26* Compiler specifics27******************************************/28#if defined(_MSC_VER) /* Visual Studio */29# include <stdlib.h> /* _byteswap_ulong */30# include <intrin.h> /* _byteswap_* */31#endif32#if defined(__GNUC__)33# define MEM_STATIC static __inline __attribute__((unused))34#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)35# define MEM_STATIC static inline36#elif defined(_MSC_VER)37# define MEM_STATIC static __inline38#else39# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */40#endif4142#ifndef __has_builtin43# define __has_builtin(x) 0 /* compat. with non-clang compilers */44#endif4546/* code only tested on 32 and 64 bits systems */47#define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; }48MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }4950/* detects whether we are being compiled under msan */51#if defined (__has_feature)52# if __has_feature(memory_sanitizer)53# define MEMORY_SANITIZER 154# endif55#endif5657#if defined (MEMORY_SANITIZER)58/* Not all platforms that support msan provide sanitizers/msan_interface.h.59* We therefore declare the functions we need ourselves, rather than trying to60* include the header file... */6162#include <stdint.h> /* intptr_t */6364/* Make memory region fully initialized (without changing its contents). */65void __msan_unpoison(const volatile void *a, size_t size);6667/* Make memory region fully uninitialized (without changing its contents).68This is a legacy interface that does not update origin information. Use69__msan_allocated_memory() instead. */70void __msan_poison(const volatile void *a, size_t size);7172/* Returns the offset of the first (at least partially) poisoned byte in the73memory range, or -1 if the whole range is good. */74intptr_t __msan_test_shadow(const volatile void *x, size_t size);75#endif7677/* detects whether we are being compiled under asan */78#if defined (ZFS_ASAN_ENABLED)79# define ADDRESS_SANITIZER 180# define ZSTD_ASAN_DONT_POISON_WORKSPACE81#endif8283#if defined (ADDRESS_SANITIZER)84/* Not all platforms that support asan provide sanitizers/asan_interface.h.85* We therefore declare the functions we need ourselves, rather than trying to86* include the header file... */8788/**89* Marks a memory region (<c>[addr, addr+size)</c>) as unaddressable.90*91* This memory must be previously allocated by your program. Instrumented92* code is forbidden from accessing addresses in this region until it is93* unpoisoned. This function is not guaranteed to poison the entire region -94* it could poison only a subregion of <c>[addr, addr+size)</c> due to ASan95* alignment restrictions.96*97* \note This function is not thread-safe because no two threads can poison or98* unpoison memory in the same memory region simultaneously.99*100* \param addr Start of memory region.101* \param size Size of memory region. */102void __asan_poison_memory_region(void const volatile *addr, size_t size);103104/**105* Marks a memory region (<c>[addr, addr+size)</c>) as addressable.106*107* This memory must be previously allocated by your program. Accessing108* addresses in this region is allowed until this region is poisoned again.109* This function could unpoison a super-region of <c>[addr, addr+size)</c> due110* to ASan alignment restrictions.111*112* \note This function is not thread-safe because no two threads can113* poison or unpoison memory in the same memory region simultaneously.114*115* \param addr Start of memory region.116* \param size Size of memory region. */117void __asan_unpoison_memory_region(void const volatile *addr, size_t size);118#endif119120121/*-**************************************************************122* Basic Types123*****************************************************************/124#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )125# include <stdint.h>126typedef uint8_t BYTE;127typedef uint16_t U16;128typedef int16_t S16;129typedef uint32_t U32;130typedef int32_t S32;131typedef uint64_t U64;132typedef int64_t S64;133#else134# include <limits.h>135#if CHAR_BIT != 8136# error "this implementation requires char to be exactly 8-bit type"137#endif138typedef unsigned char BYTE;139#if USHRT_MAX != 65535140# error "this implementation requires short to be exactly 16-bit type"141#endif142typedef unsigned short U16;143typedef signed short S16;144#if UINT_MAX != 4294967295145# error "this implementation requires int to be exactly 32-bit type"146#endif147typedef unsigned int U32;148typedef signed int S32;149/* note : there are no limits defined for long long type in C90.150* limits exist in C99, however, in such case, <stdint.h> is preferred */151typedef unsigned long long U64;152typedef signed long long S64;153#endif154155156/*-**************************************************************157* Memory I/O158*****************************************************************/159/* MEM_FORCE_MEMORY_ACCESS :160* By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.161* Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.162* The below switch allow to select different access method for improved performance.163* Method 0 (default) : use `memcpy()`. Safe and portable.164* Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable).165* This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.166* Method 2 : direct access. This method is portable but violate C standard.167* It can generate buggy code on targets depending on alignment.168* In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6)169* See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.170* Prefer these methods in priority order (0 > 1 > 2)171*/172#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */173# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )174# define MEM_FORCE_MEMORY_ACCESS 2175# elif defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__)176# define MEM_FORCE_MEMORY_ACCESS 1177# endif178#endif179180MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; }181MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; }182183MEM_STATIC unsigned MEM_isLittleEndian(void)184{185const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */186return one.c[0];187}188189#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)190191/* violates C standard, by lying on structure alignment.192Only use if no other choice to achieve best performance on target platform */193MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }194MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }195MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }196MEM_STATIC size_t MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; }197198MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }199MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }200MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; }201202#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)203204/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */205/* currently only defined for gcc and icc */206#if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32))207__pragma( pack(push, 1) )208typedef struct { U16 v; } unalign16;209typedef struct { U32 v; } unalign32;210typedef struct { U64 v; } unalign64;211typedef struct { size_t v; } unalignArch;212__pragma( pack(pop) )213#else214typedef struct { U16 v; } __attribute__((packed)) unalign16;215typedef struct { U32 v; } __attribute__((packed)) unalign32;216typedef struct { U64 v; } __attribute__((packed)) unalign64;217typedef struct { size_t v; } __attribute__((packed)) unalignArch;218#endif219220MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; }221MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; }222MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; }223MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; }224225MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; }226MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; }227MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; }228229#else230231/* default method, safe and standard.232can sometimes prove slower */233234MEM_STATIC U16 MEM_read16(const void* memPtr)235{236U16 val; memcpy(&val, memPtr, sizeof(val)); return val;237}238239MEM_STATIC U32 MEM_read32(const void* memPtr)240{241U32 val; memcpy(&val, memPtr, sizeof(val)); return val;242}243244MEM_STATIC U64 MEM_read64(const void* memPtr)245{246U64 val; memcpy(&val, memPtr, sizeof(val)); return val;247}248249MEM_STATIC size_t MEM_readST(const void* memPtr)250{251size_t val; memcpy(&val, memPtr, sizeof(val)); return val;252}253254MEM_STATIC void MEM_write16(void* memPtr, U16 value)255{256memcpy(memPtr, &value, sizeof(value));257}258259MEM_STATIC void MEM_write32(void* memPtr, U32 value)260{261memcpy(memPtr, &value, sizeof(value));262}263264MEM_STATIC void MEM_write64(void* memPtr, U64 value)265{266memcpy(memPtr, &value, sizeof(value));267}268269#endif /* MEM_FORCE_MEMORY_ACCESS */270271MEM_STATIC U32 MEM_swap32(U32 in)272{273#if defined(_MSC_VER) /* Visual Studio */274return _byteswap_ulong(in);275#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \276|| (defined(__clang__) && __has_builtin(__builtin_bswap32))277return __builtin_bswap32(in);278#else279return ((in << 24) & 0xff000000 ) |280((in << 8) & 0x00ff0000 ) |281((in >> 8) & 0x0000ff00 ) |282((in >> 24) & 0x000000ff );283#endif284}285286MEM_STATIC U64 MEM_swap64(U64 in)287{288#if defined(_MSC_VER) /* Visual Studio */289return _byteswap_uint64(in);290#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \291|| (defined(__clang__) && __has_builtin(__builtin_bswap64))292return __builtin_bswap64(in);293#else294return ((in << 56) & 0xff00000000000000ULL) |295((in << 40) & 0x00ff000000000000ULL) |296((in << 24) & 0x0000ff0000000000ULL) |297((in << 8) & 0x000000ff00000000ULL) |298((in >> 8) & 0x00000000ff000000ULL) |299((in >> 24) & 0x0000000000ff0000ULL) |300((in >> 40) & 0x000000000000ff00ULL) |301((in >> 56) & 0x00000000000000ffULL);302#endif303}304305MEM_STATIC size_t MEM_swapST(size_t in)306{307if (MEM_32bits())308return (size_t)MEM_swap32((U32)in);309else310return (size_t)MEM_swap64((U64)in);311}312313/*=== Little endian r/w ===*/314315MEM_STATIC U16 MEM_readLE16(const void* memPtr)316{317if (MEM_isLittleEndian())318return MEM_read16(memPtr);319else {320const BYTE* p = (const BYTE*)memPtr;321return (U16)(p[0] + (p[1]<<8));322}323}324325MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)326{327if (MEM_isLittleEndian()) {328MEM_write16(memPtr, val);329} else {330BYTE* p = (BYTE*)memPtr;331p[0] = (BYTE)val;332p[1] = (BYTE)(val>>8);333}334}335336MEM_STATIC U32 MEM_readLE24(const void* memPtr)337{338return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);339}340341MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val)342{343MEM_writeLE16(memPtr, (U16)val);344((BYTE*)memPtr)[2] = (BYTE)(val>>16);345}346347MEM_STATIC U32 MEM_readLE32(const void* memPtr)348{349if (MEM_isLittleEndian())350return MEM_read32(memPtr);351else352return MEM_swap32(MEM_read32(memPtr));353}354355MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32)356{357if (MEM_isLittleEndian())358MEM_write32(memPtr, val32);359else360MEM_write32(memPtr, MEM_swap32(val32));361}362363MEM_STATIC U64 MEM_readLE64(const void* memPtr)364{365if (MEM_isLittleEndian())366return MEM_read64(memPtr);367else368return MEM_swap64(MEM_read64(memPtr));369}370371MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64)372{373if (MEM_isLittleEndian())374MEM_write64(memPtr, val64);375else376MEM_write64(memPtr, MEM_swap64(val64));377}378379MEM_STATIC size_t MEM_readLEST(const void* memPtr)380{381if (MEM_32bits())382return (size_t)MEM_readLE32(memPtr);383else384return (size_t)MEM_readLE64(memPtr);385}386387MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val)388{389if (MEM_32bits())390MEM_writeLE32(memPtr, (U32)val);391else392MEM_writeLE64(memPtr, (U64)val);393}394395/*=== Big endian r/w ===*/396397MEM_STATIC U32 MEM_readBE32(const void* memPtr)398{399if (MEM_isLittleEndian())400return MEM_swap32(MEM_read32(memPtr));401else402return MEM_read32(memPtr);403}404405MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32)406{407if (MEM_isLittleEndian())408MEM_write32(memPtr, MEM_swap32(val32));409else410MEM_write32(memPtr, val32);411}412413MEM_STATIC U64 MEM_readBE64(const void* memPtr)414{415if (MEM_isLittleEndian())416return MEM_swap64(MEM_read64(memPtr));417else418return MEM_read64(memPtr);419}420421MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64)422{423if (MEM_isLittleEndian())424MEM_write64(memPtr, MEM_swap64(val64));425else426MEM_write64(memPtr, val64);427}428429MEM_STATIC size_t MEM_readBEST(const void* memPtr)430{431if (MEM_32bits())432return (size_t)MEM_readBE32(memPtr);433else434return (size_t)MEM_readBE64(memPtr);435}436437MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val)438{439if (MEM_32bits())440MEM_writeBE32(memPtr, (U32)val);441else442MEM_writeBE64(memPtr, (U64)val);443}444445446#if defined (__cplusplus)447}448#endif449450#endif /* MEM_H_MODULE */451452453