Path: blob/master/thirdparty/sdl/stdlib/SDL_malloc.c
21320 views
/*1Simple DirectMedia Layer2Copyright (C) 1997-2025 Sam Lantinga <[email protected]>34This software is provided 'as-is', without any express or implied5warranty. In no event will the authors be held liable for any damages6arising from the use of this software.78Permission is granted to anyone to use this software for any purpose,9including commercial applications, and to alter it and redistribute it10freely, subject to the following restrictions:11121. The origin of this software must not be misrepresented; you must not13claim that you wrote the original software. If you use this software14in a product, an acknowledgment in the product documentation would be15appreciated but is not required.162. Altered source versions must be plainly marked as such, and must not be17misrepresented as being the original software.183. This notice may not be removed or altered from any source distribution.19*/20#include "SDL_internal.h"2122/* This file contains portable memory management functions for SDL */2324#ifndef HAVE_MALLOC25#define LACKS_SYS_TYPES_H26#define LACKS_STDIO_H27#define LACKS_STRINGS_H28#define LACKS_STRING_H29#define LACKS_STDLIB_H30#define ABORT31#define NO_MALLOC_STATS 132#define USE_LOCKS 133#define USE_DL_PREFIX3435/*36This is a version (aka dlmalloc) of malloc/free/realloc written by37Doug Lea and released to the public domain, as explained at38http://creativecommons.org/publicdomain/zero/1.0/ Send questions,39comments, complaints, performance data, etc to [email protected]4041* Version 2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea42Note: There may be an updated version of this malloc obtainable at43ftp://gee.cs.oswego.edu/pub/misc/malloc.c44Check before installing!4546* Quickstart4748This library is all in one file to simplify the most common usage:49ftp it, compile it (-O3), and link it into another program. All of50the compile-time options default to reasonable values for use on51most platforms. You might later want to step through various52compile-time and dynamic tuning options.5354For convenience, an include file for code using this malloc is at:55ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.6.h56You don't really need this .h file unless you call functions not57defined in your system include files. The .h file contains only the58excerpts from this file needed for using this malloc on ANSI C/C++59systems, so long as you haven't changed compile-time options about60naming and tuning parameters. If you do, then you can create your61own malloc.h that does include all settings by cutting at the point62indicated below. Note that you may already by default be using a C63library containing a malloc that is based on some version of this64malloc (for example in linux). You might still want to use the one65in this file to customize settings or to avoid overheads associated66with library versions.6768* Vital statistics:6970Supported pointer/size_t representation: 4 or 8 bytes71size_t MUST be an unsigned type of the same width as72pointers. (If you are using an ancient system that declares73size_t as a signed type, or need it to be a different width74than pointers, you can use a previous release of this malloc75(e.g. 2.7.2) supporting these.)7677Alignment: 8 bytes (minimum)78This suffices for nearly all current machines and C compilers.79However, you can define MALLOC_ALIGNMENT to be wider than this80if necessary (up to 128bytes), at the expense of using more space.8182Minimum overhead per allocated chunk: 4 or 8 bytes (if 4byte sizes)838 or 16 bytes (if 8byte sizes)84Each malloced chunk has a hidden word of overhead holding size85and status information, and additional cross-check word86if FOOTERS is defined.8788Minimum allocated size: 4-byte ptrs: 16 bytes (including overhead)898-byte ptrs: 32 bytes (including overhead)9091Even a request for zero bytes (i.e., malloc(0)) returns a92pointer to something of the minimum allocatable size.93The maximum overhead wastage (i.e., number of extra bytes94allocated than were requested in malloc) is less than or equal95to the minimum size, except for requests >= mmap_threshold that96are serviced via mmap(), where the worst case wastage is about9732 bytes plus the remainder from a system page (the minimal98mmap unit); typically 4096 or 8192 bytes.99100Security: static-safe; optionally more or less101The "security" of malloc refers to the ability of malicious102code to accentuate the effects of errors (for example, freeing103space that is not currently malloc'ed or overwriting past the104ends of chunks) in code that calls malloc. This malloc105guarantees not to modify any memory locations below the base of106heap, i.e., static variables, even in the presence of usage107errors. The routines additionally detect most improper frees108and reallocs. All this holds as long as the static bookkeeping109for malloc itself is not corrupted by some other means. This110is only one aspect of security -- these checks do not, and111cannot, detect all possible programming errors.112113If FOOTERS is defined nonzero, then each allocated chunk114carries an additional check word to verify that it was malloced115from its space. These check words are the same within each116execution of a program using malloc, but differ across117executions, so externally crafted fake chunks cannot be118freed. This improves security by rejecting frees/reallocs that119could corrupt heap memory, in addition to the checks preventing120writes to statics that are always on. This may further improve121security at the expense of time and space overhead. (Note that122FOOTERS may also be worth using with MSPACES.)123124By default detected errors cause the program to abort (calling125"abort()"). You can override this to instead proceed past126errors by defining PROCEED_ON_ERROR. In this case, a bad free127has no effect, and a malloc that encounters a bad address128caused by user overwrites will ignore the bad address by129dropping pointers and indices to all known memory. This may130be appropriate for programs that should continue if at all131possible in the face of programming errors, although they may132run out of memory because dropped memory is never reclaimed.133134If you don't like either of these options, you can define135CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything136else. And if if you are sure that your program using malloc has137no errors or vulnerabilities, you can define INSECURE to 1,138which might (or might not) provide a small performance improvement.139140It is also possible to limit the maximum total allocatable141space, using malloc_set_footprint_limit. This is not142designed as a security feature in itself (calls to set limits143are not screened or privileged), but may be useful as one144aspect of a secure implementation.145146Thread-safety: NOT thread-safe unless USE_LOCKS defined non-zero147When USE_LOCKS is defined, each public call to malloc, free,148etc is surrounded with a lock. By default, this uses a plain149pthread mutex, win32 critical section, or a spin-lock if if150available for the platform and not disabled by setting151USE_SPIN_LOCKS=0. However, if USE_RECURSIVE_LOCKS is defined,152recursive versions are used instead (which are not required for153base functionality but may be needed in layered extensions).154Using a global lock is not especially fast, and can be a major155bottleneck. It is designed only to provide minimal protection156in concurrent environments, and to provide a basis for157extensions. If you are using malloc in a concurrent program,158consider instead using nedmalloc159(http://www.nedprod.com/programs/portable/nedmalloc/) or160ptmalloc (See http://www.malloc.de), which are derived from161versions of this malloc.162163System requirements: Any combination of MORECORE and/or MMAP/MUNMAP164This malloc can use unix sbrk or any emulation (invoked using165the CALL_MORECORE macro) and/or mmap/munmap or any emulation166(invoked using CALL_MMAP/CALL_MUNMAP) to get and release system167memory. On most unix systems, it tends to work best if both168MORECORE and MMAP are enabled. On Win32, it uses emulations169based on VirtualAlloc. It also uses common C library functions170like memset.171172Compliance: I believe it is compliant with the Single Unix Specification173(See http://www.unix.org). Also SVID/XPG, ANSI C, and probably174others as well.175176* Overview of algorithms177178This is not the fastest, most space-conserving, most portable, or179most tunable malloc ever written. However it is among the fastest180while also being among the most space-conserving, portable and181tunable. Consistent balance across these factors results in a good182general-purpose allocator for malloc-intensive programs.183184In most ways, this malloc is a best-fit allocator. Generally, it185chooses the best-fitting existing chunk for a request, with ties186broken in approximately least-recently-used order. (This strategy187normally maintains low fragmentation.) However, for requests less188than 256bytes, it deviates from best-fit when there is not an189exactly fitting available chunk by preferring to use space adjacent190to that used for the previous small request, as well as by breaking191ties in approximately most-recently-used order. (These enhance192locality of series of small allocations.) And for very large requests193(>= 256Kb by default), it relies on system memory mapping194facilities, if supported. (This helps avoid carrying around and195possibly fragmenting memory used only for large chunks.)196197All operations (except malloc_stats and mallinfo) have execution198times that are bounded by a constant factor of the number of bits in199a size_t, not counting any clearing in calloc or copying in realloc,200or actions surrounding MORECORE and MMAP that have times201proportional to the number of non-contiguous regions returned by202system allocation routines, which is often just 1. In real-time203applications, you can optionally suppress segment traversals using204NO_SEGMENT_TRAVERSAL, which assures bounded execution even when205system allocators return non-contiguous spaces, at the typical206expense of carrying around more memory and increased fragmentation.207208The implementation is not very modular and seriously overuses209macros. Perhaps someday all C compilers will do as good a job210inlining modular code as can now be done by brute-force expansion,211but now, enough of them seem not to.212213Some compilers issue a lot of warnings about code that is214dead/unreachable only on some platforms, and also about intentional215uses of negation on unsigned types. All known cases of each can be216ignored.217218For a longer but out of date high-level description, see219http://gee.cs.oswego.edu/dl/html/malloc.html220221* MSPACES222If MSPACES is defined, then in addition to malloc, free, etc.,223this file also defines mspace_malloc, mspace_free, etc. These224are versions of malloc routines that take an "mspace" argument225obtained using create_mspace, to control all internal bookkeeping.226If ONLY_MSPACES is defined, only these versions are compiled.227So if you would like to use this allocator for only some allocations,228and your system malloc for others, you can compile with229ONLY_MSPACES and then do something like...230static mspace mymspace = create_mspace(0,0); // for example231#define mymalloc(bytes) mspace_malloc(mymspace, bytes)232233(Note: If you only need one instance of an mspace, you can instead234use "USE_DL_PREFIX" to relabel the global malloc.)235236You can similarly create thread-local allocators by storing237mspaces as thread-locals. For example:238static __thread mspace tlms = 0;239void* tlmalloc(size_t bytes) {240if (tlms == 0) tlms = create_mspace(0, 0);241return mspace_malloc(tlms, bytes);242}243void tlfree(void* mem) { mspace_free(tlms, mem); }244245Unless FOOTERS is defined, each mspace is completely independent.246You cannot allocate from one and free to another (although247conformance is only weakly checked, so usage errors are not always248caught). If FOOTERS is defined, then each chunk carries around a tag249indicating its originating mspace, and frees are directed to their250originating spaces. Normally, this requires use of locks.251252------------------------- Compile-time options ---------------------------253254Be careful in setting #define values for numerical constants of type255size_t. On some systems, literal values are not automatically extended256to size_t precision unless they are explicitly casted. You can also257use the symbolic values MAX_SIZE_T, SIZE_T_ONE, etc below.258259WIN32 default: defined if _WIN32 defined260Defining WIN32 sets up defaults for MS environment and compilers.261Otherwise defaults are for unix. Beware that there seem to be some262cases where this malloc might not be a pure drop-in replacement for263Win32 malloc: Random-looking failures from Win32 GDI API's (eg;264SetDIBits()) may be due to bugs in some video driver implementations265when pixel buffers are malloc()ed, and the region spans more than266one VirtualAlloc()ed region. Because dlmalloc uses a small (64Kb)267default granularity, pixel buffers may straddle virtual allocation268regions more often than when using the Microsoft allocator. You can269avoid this by using VirtualAlloc() and VirtualFree() for all pixel270buffers rather than using malloc(). If this is not possible,271recompile this malloc with a larger DEFAULT_GRANULARITY. Note:272in cases where MSC and gcc (cygwin) are known to differ on WIN32,273conditions use _MSC_VER to distinguish them.274275DLMALLOC_EXPORT default: extern276Defines how public APIs are declared. If you want to export via a277Windows DLL, you might define this as278#define DLMALLOC_EXPORT extern __declspec(dllexport)279If you want a POSIX ELF shared object, you might use280#define DLMALLOC_EXPORT extern __attribute__((visibility("default")))281282MALLOC_ALIGNMENT default: (size_t)(2 * sizeof(void *))283Controls the minimum alignment for malloc'ed chunks. It must be a284power of two and at least 8, even on machines for which smaller285alignments would suffice. It may be defined as larger than this286though. Note however that code and data structures are optimized for287the case of 8-byte alignment.288289MSPACES default: 0 (false)290If true, compile in support for independent allocation spaces.291This is only supported if HAVE_MMAP is true.292293ONLY_MSPACES default: 0 (false)294If true, only compile in mspace versions, not regular versions.295296USE_LOCKS default: 0 (false)297Causes each call to each public routine to be surrounded with298pthread or WIN32 mutex lock/unlock. (If set true, this can be299overridden on a per-mspace basis for mspace versions.) If set to a300non-zero value other than 1, locks are used, but their301implementation is left out, so lock functions must be supplied manually,302as described below.303304USE_SPIN_LOCKS default: 1 iff USE_LOCKS and spin locks available305If true, uses custom spin locks for locking. This is currently306supported only gcc >= 4.1, older gccs on x86 platforms, and recent307MS compilers. Otherwise, posix locks or win32 critical sections are308used.309310USE_RECURSIVE_LOCKS default: not defined311If defined nonzero, uses recursive (aka reentrant) locks, otherwise312uses plain mutexes. This is not required for malloc proper, but may313be needed for layered allocators such as nedmalloc.314315LOCK_AT_FORK default: not defined316If defined nonzero, performs pthread_atfork upon initialization317to initialize child lock while holding parent lock. The implementation318assumes that pthread locks (not custom locks) are being used. In other319cases, you may need to customize the implementation.320321FOOTERS default: 0322If true, provide extra checking and dispatching by placing323information in the footers of allocated chunks. This adds324space and time overhead.325326INSECURE default: 0327If true, omit checks for usage errors and heap space overwrites.328329USE_DL_PREFIX default: NOT defined330Causes compiler to prefix all public routines with the string 'dl'.331This can be useful when you only want to use this malloc in one part332of a program, using your regular system malloc elsewhere.333334MALLOC_INSPECT_ALL default: NOT defined335If defined, compiles malloc_inspect_all and mspace_inspect_all, that336perform traversal of all heap space. Unless access to these337functions is otherwise restricted, you probably do not want to338include them in secure implementations.339340ABORT default: defined as abort()341Defines how to abort on failed checks. On most systems, a failed342check cannot die with an "assert" or even print an informative343message, because the underlying print routines in turn call malloc,344which will fail again. Generally, the best policy is to simply call345abort(). It's not very useful to do more than this because many346errors due to overwriting will show up as address faults (null, odd347addresses etc) rather than malloc-triggered checks, so will also348abort. Also, most compilers know that abort() does not return, so349can better optimize code conditionally calling it.350351PROCEED_ON_ERROR default: defined as 0 (false)352Controls whether detected bad addresses cause them to bypassed353rather than aborting. If set, detected bad arguments to free and354realloc are ignored. And all bookkeeping information is zeroed out355upon a detected overwrite of freed heap space, thus losing the356ability to ever return it from malloc again, but enabling the357application to proceed. If PROCEED_ON_ERROR is defined, the358static variable malloc_corruption_error_count is compiled in359and can be examined to see if errors have occurred. This option360generates slower code than the default abort policy.361362DEBUG default: NOT defined363The DEBUG setting is mainly intended for people trying to modify364this code or diagnose problems when porting to new platforms.365However, it may also be able to better isolate user errors than just366using runtime checks. The assertions in the check routines spell367out in more detail the assumptions and invariants underlying the368algorithms. The checking is fairly extensive, and will slow down369execution noticeably. Calling malloc_stats or mallinfo with DEBUG370set will attempt to check every non-mmapped allocated and free chunk371in the course of computing the summaries.372373ABORT_ON_ASSERT_FAILURE default: defined as 1 (true)374Debugging assertion failures can be nearly impossible if your375version of the assert macro causes malloc to be called, which will376lead to a cascade of further failures, blowing the runtime stack.377ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(),378which will usually make debugging easier.379380MALLOC_FAILURE_ACTION default: sets errno to ENOMEM, or no-op on win32381The action to take before "return 0" when malloc fails to be able to382return memory because there is none available.383384HAVE_MORECORE default: 1 (true) unless win32 or ONLY_MSPACES385True if this system supports sbrk or an emulation of it.386387MORECORE default: sbrk388The name of the sbrk-style system routine to call to obtain more389memory. See below for guidance on writing custom MORECORE390functions. The type of the argument to sbrk/MORECORE varies across391systems. It cannot be size_t, because it supports negative392arguments, so it is normally the signed type of the same width as393size_t (sometimes declared as "intptr_t"). It doesn't much matter394though. Internally, we only call it with arguments less than half395the max value of a size_t, which should work across all reasonable396possibilities, although sometimes generating compiler warnings.397398MORECORE_CONTIGUOUS default: 1 (true) if HAVE_MORECORE399If true, take advantage of fact that consecutive calls to MORECORE400with positive arguments always return contiguous increasing401addresses. This is true of unix sbrk. It does not hurt too much to402set it true anyway, since malloc copes with non-contiguities.403Setting it false when definitely non-contiguous saves time404and possibly wasted space it would take to discover this though.405406MORECORE_CANNOT_TRIM default: NOT defined407True if MORECORE cannot release space back to the system when given408negative arguments. This is generally necessary only if you are409using a hand-crafted MORECORE function that cannot handle negative410arguments.411412NO_SEGMENT_TRAVERSAL default: 0413If non-zero, suppresses traversals of memory segments414returned by either MORECORE or CALL_MMAP. This disables415merging of segments that are contiguous, and selectively416releasing them to the OS if unused, but bounds execution times.417418HAVE_MMAP default: 1 (true)419True if this system supports mmap or an emulation of it. If so, and420HAVE_MORECORE is not true, MMAP is used for all system421allocation. If set and HAVE_MORECORE is true as well, MMAP is422primarily used to directly allocate very large blocks. It is also423used as a backup strategy in cases where MORECORE fails to provide424space from system. Note: A single call to MUNMAP is assumed to be425able to unmap memory that may have be allocated using multiple calls426to MMAP, so long as they are adjacent.427428HAVE_MREMAP default: 1 on linux, else 0429If true realloc() uses mremap() to re-allocate large blocks and430extend or shrink allocation spaces.431432MMAP_CLEARS default: 1 except on WINCE.433True if mmap clears memory so calloc doesn't need to. This is true434for standard unix mmap using /dev/zero and on WIN32 except for WINCE.435436USE_BUILTIN_FFS default: 0 (i.e., not used)437Causes malloc to use the builtin ffs() function to compute indices.438Some compilers may recognize and intrinsify ffs to be faster than the439supplied C version. Also, the case of x86 using gcc is special-cased440to an asm instruction, so is already as fast as it can be, and so441this setting has no effect. Similarly for Win32 under recent MS compilers.442(On most x86s, the asm version is only slightly faster than the C version.)443444malloc_getpagesize default: derive from system includes, or 4096.445The system page size. To the extent possible, this malloc manages446memory from the system in page-size units. This may be (and447usually is) a function rather than a constant. This is ignored448if WIN32, where page size is determined using getSystemInfo during449initialization.450451USE_DEV_RANDOM default: 0 (i.e., not used)452Causes malloc to use /dev/random to initialize secure magic seed for453stamping footers. Otherwise, the current time is used.454455NO_MALLINFO default: 0456If defined, don't compile "mallinfo". This can be a simple way457of dealing with mismatches between system declarations and458those in this file.459460MALLINFO_FIELD_TYPE default: size_t461The type of the fields in the mallinfo struct. This was originally462defined as "int" in SVID etc, but is more usefully defined as463size_t. The value is used only if HAVE_USR_INCLUDE_MALLOC_H is not set464465NO_MALLOC_STATS default: 0466If defined, don't compile "malloc_stats". This avoids calls to467fprintf and bringing in stdio dependencies you might not want.468469REALLOC_ZERO_BYTES_FREES default: not defined470This should be set if a call to realloc with zero bytes should471be the same as a call to free. Some people think it should. Otherwise,472since this malloc returns a unique pointer for malloc(0), so does473realloc(p, 0).474475LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H476LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H, LACKS_ERRNO_H477LACKS_STDLIB_H LACKS_SCHED_H LACKS_TIME_H default: NOT defined unless on WIN32478Define these if your system does not have these header files.479You might need to manually insert some of the declarations they provide.480481DEFAULT_GRANULARITY default: page size if MORECORE_CONTIGUOUS,482system_info.dwAllocationGranularity in WIN32,483otherwise 64K.484Also settable using mallopt(M_GRANULARITY, x)485The unit for allocating and deallocating memory from the system. On486most systems with contiguous MORECORE, there is no reason to487make this more than a page. However, systems with MMAP tend to488either require or encourage larger granularities. You can increase489this value to prevent system allocation functions to be called so490often, especially if they are slow. The value must be at least one491page and must be a power of two. Setting to 0 causes initialization492to either page size or win32 region size. (Note: In previous493versions of malloc, the equivalent of this option was called494"TOP_PAD")495496DEFAULT_TRIM_THRESHOLD default: 2MB497Also settable using mallopt(M_TRIM_THRESHOLD, x)498The maximum amount of unused top-most memory to keep before499releasing via malloc_trim in free(). Automatic trimming is mainly500useful in long-lived programs using contiguous MORECORE. Because501trimming via sbrk can be slow on some systems, and can sometimes be502wasteful (in cases where programs immediately afterward allocate503more large chunks) the value should be high enough so that your504overall system performance would improve by releasing this much505memory. As a rough guide, you might set to a value close to the506average size of a process (program) running on your system.507Releasing this much memory would allow such a process to run in508memory. Generally, it is worth tuning trim thresholds when a509program undergoes phases where several large chunks are allocated510and released in ways that can reuse each other's storage, perhaps511mixed with phases where there are no such chunks at all. The trim512value must be greater than page size to have any useful effect. To513disable trimming completely, you can set to MAX_SIZE_T. Note that the trick514some people use of mallocing a huge space and then freeing it at515program startup, in an attempt to reserve system memory, doesn't516have the intended effect under automatic trimming, since that memory517will immediately be returned to the system.518519DEFAULT_MMAP_THRESHOLD default: 256K520Also settable using mallopt(M_MMAP_THRESHOLD, x)521The request size threshold for using MMAP to directly service a522request. Requests of at least this size that cannot be allocated523using already-existing space will be serviced via mmap. (If enough524normal freed space already exists it is used instead.) Using mmap525segregates relatively large chunks of memory so that they can be526individually obtained and released from the host system. A request527serviced through mmap is never reused by any other request (at least528not directly; the system may just so happen to remap successive529requests to the same locations). Segregating space in this way has530the benefits that: Mmapped space can always be individually released531back to the system, which helps keep the system level memory demands532of a long-lived program low. Also, mapped memory doesn't become533`locked' between other chunks, as can happen with normally allocated534chunks, which means that even trimming via malloc_trim would not535release them. However, it has the disadvantage that the space536cannot be reclaimed, consolidated, and then used to service later537requests, as happens with normal chunks. The advantages of mmap538nearly always outweigh disadvantages for "large" chunks, but the539value of "large" may vary across systems. The default is an540empirically derived value that works well in most systems. You can541disable mmap by setting to MAX_SIZE_T.542543MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP544The number of consolidated frees between checks to release545unused segments when freeing. When using non-contiguous segments,546especially with multiple mspaces, checking only for topmost space547doesn't always suffice to trigger trimming. To compensate for this,548free() will, with a period of MAX_RELEASE_CHECK_RATE (or the549current number of segments, if greater) try to release unused550segments to the OS when freeing chunks that result in551consolidation. The best value for this parameter is a compromise552between slowing down frees with relatively costly checks that553rarely trigger versus holding on to unused memory. To effectively554disable, set to MAX_SIZE_T. This may lead to a very slight speed555improvement at the expense of carrying around more memory.556*/557558/* Version identifier to allow people to support multiple versions */559#ifndef DLMALLOC_VERSION560#define DLMALLOC_VERSION 20806561#endif /* DLMALLOC_VERSION */562563#ifndef DLMALLOC_EXPORT564#define DLMALLOC_EXPORT extern565#endif566567#ifndef WIN32568#ifdef _WIN32569#define WIN32 1570#endif /* _WIN32 */571#ifdef _WIN32_WCE572#define LACKS_FCNTL_H573#define WIN32 1574#endif /* _WIN32_WCE */575#endif /* WIN32 */576#ifdef WIN32577#define WIN32_LEAN_AND_MEAN578#include <windows.h>579#include <tchar.h>580#define HAVE_MMAP 1581#define HAVE_MORECORE 0582#define LACKS_UNISTD_H583#define LACKS_SYS_PARAM_H584#define LACKS_SYS_MMAN_H585#define LACKS_STRING_H586#define LACKS_STRINGS_H587#define LACKS_SYS_TYPES_H588// #define LACKS_ERRNO_H // File uses `EINVAL` and `ENOMEM` defines, so include is required. Legacy exclusion?589#define LACKS_SCHED_H590#ifndef MALLOC_FAILURE_ACTION591#define MALLOC_FAILURE_ACTION592#endif /* MALLOC_FAILURE_ACTION */593#ifndef MMAP_CLEARS594#ifdef _WIN32_WCE /* WINCE reportedly does not clear */595#define MMAP_CLEARS 0596#else597#define MMAP_CLEARS 1598#endif /* _WIN32_WCE */599#endif /*MMAP_CLEARS */600#endif /* WIN32 */601602#if defined(DARWIN) || defined(_DARWIN)603/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */604#ifndef HAVE_MORECORE605#define HAVE_MORECORE 0606#define HAVE_MMAP 1607/* OSX allocators provide 16 byte alignment */608#ifndef MALLOC_ALIGNMENT609#define MALLOC_ALIGNMENT ((size_t)16U)610#endif611#endif /* HAVE_MORECORE */612#endif /* DARWIN */613614#ifndef LACKS_SYS_TYPES_H615#include <sys/types.h> /* For size_t */616#endif /* LACKS_SYS_TYPES_H */617618/* The maximum possible size_t value has all bits set */619#define MAX_SIZE_T (~(size_t)0)620621#ifndef USE_LOCKS /* ensure true if spin or recursive locks set */622#define USE_LOCKS ((defined(USE_SPIN_LOCKS) && USE_SPIN_LOCKS != 0) || \623(defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0))624#endif /* USE_LOCKS */625626#if USE_LOCKS /* Spin locks for gcc >= 4.1, older gcc on x86, MSC >= 1310 */627#if ((defined(__GNUC__) && \628((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) || \629defined(__i386__) || defined(__x86_64__))) || \630(defined(_MSC_VER) && _MSC_VER>=1310))631#ifndef USE_SPIN_LOCKS632#define USE_SPIN_LOCKS 1633#endif /* USE_SPIN_LOCKS */634#elif USE_SPIN_LOCKS635#error "USE_SPIN_LOCKS defined without implementation"636#endif /* ... locks available... */637#elif !defined(USE_SPIN_LOCKS)638#define USE_SPIN_LOCKS 0639#endif /* USE_LOCKS */640641#ifndef ONLY_MSPACES642#define ONLY_MSPACES 0643#endif /* ONLY_MSPACES */644#ifndef MSPACES645#if ONLY_MSPACES646#define MSPACES 1647#else /* ONLY_MSPACES */648#define MSPACES 0649#endif /* ONLY_MSPACES */650#endif /* MSPACES */651#ifndef MALLOC_ALIGNMENT652#define MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *)))653#endif /* MALLOC_ALIGNMENT */654#ifndef FOOTERS655#define FOOTERS 0656#endif /* FOOTERS */657#ifndef ABORT658#define ABORT abort()659#endif /* ABORT */660#ifndef ABORT_ON_ASSERT_FAILURE661#define ABORT_ON_ASSERT_FAILURE 1662#endif /* ABORT_ON_ASSERT_FAILURE */663#ifndef PROCEED_ON_ERROR664#define PROCEED_ON_ERROR 0665#endif /* PROCEED_ON_ERROR */666667#ifndef INSECURE668#define INSECURE 0669#endif /* INSECURE */670#ifndef MALLOC_INSPECT_ALL671#define MALLOC_INSPECT_ALL 0672#endif /* MALLOC_INSPECT_ALL */673#ifndef HAVE_MMAP674#define HAVE_MMAP 1675#endif /* HAVE_MMAP */676#ifndef MMAP_CLEARS677#define MMAP_CLEARS 1678#endif /* MMAP_CLEARS */679#ifndef HAVE_MREMAP680#ifdef linux681#define HAVE_MREMAP 1682#define _GNU_SOURCE /* Turns on mremap() definition */683#else /* linux */684#define HAVE_MREMAP 0685#endif /* linux */686#endif /* HAVE_MREMAP */687#ifndef MALLOC_FAILURE_ACTION688#define MALLOC_FAILURE_ACTION errno = ENOMEM;689#endif /* MALLOC_FAILURE_ACTION */690#ifndef HAVE_MORECORE691#if ONLY_MSPACES692#define HAVE_MORECORE 0693#else /* ONLY_MSPACES */694#define HAVE_MORECORE 1695#endif /* ONLY_MSPACES */696#endif /* HAVE_MORECORE */697#if !HAVE_MORECORE698#define MORECORE_CONTIGUOUS 0699#else /* !HAVE_MORECORE */700#define MORECORE_DEFAULT sbrk701#ifndef MORECORE_CONTIGUOUS702#define MORECORE_CONTIGUOUS 1703#endif /* MORECORE_CONTIGUOUS */704#endif /* HAVE_MORECORE */705#ifndef DEFAULT_GRANULARITY706#if (MORECORE_CONTIGUOUS || defined(WIN32))707#define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */708#else /* MORECORE_CONTIGUOUS */709#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)710#endif /* MORECORE_CONTIGUOUS */711#endif /* DEFAULT_GRANULARITY */712#ifndef DEFAULT_TRIM_THRESHOLD713#ifndef MORECORE_CANNOT_TRIM714#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)715#else /* MORECORE_CANNOT_TRIM */716#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T717#endif /* MORECORE_CANNOT_TRIM */718#endif /* DEFAULT_TRIM_THRESHOLD */719#ifndef DEFAULT_MMAP_THRESHOLD720#if HAVE_MMAP721#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)722#else /* HAVE_MMAP */723#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T724#endif /* HAVE_MMAP */725#endif /* DEFAULT_MMAP_THRESHOLD */726#ifndef MAX_RELEASE_CHECK_RATE727#if HAVE_MMAP728#define MAX_RELEASE_CHECK_RATE 4095729#else730#define MAX_RELEASE_CHECK_RATE MAX_SIZE_T731#endif /* HAVE_MMAP */732#endif /* MAX_RELEASE_CHECK_RATE */733#ifndef USE_BUILTIN_FFS734#define USE_BUILTIN_FFS 0735#endif /* USE_BUILTIN_FFS */736#ifndef USE_DEV_RANDOM737#define USE_DEV_RANDOM 0738#endif /* USE_DEV_RANDOM */739#ifndef NO_MALLINFO740#define NO_MALLINFO 0741#endif /* NO_MALLINFO */742#ifndef MALLINFO_FIELD_TYPE743#define MALLINFO_FIELD_TYPE size_t744#endif /* MALLINFO_FIELD_TYPE */745#ifndef NO_MALLOC_STATS746#define NO_MALLOC_STATS 0747#endif /* NO_MALLOC_STATS */748#ifndef NO_SEGMENT_TRAVERSAL749#define NO_SEGMENT_TRAVERSAL 0750#endif /* NO_SEGMENT_TRAVERSAL */751752/*753mallopt tuning options. SVID/XPG defines four standard parameter754numbers for mallopt, normally defined in malloc.h. None of these755are used in this malloc, so setting them has no effect. But this756malloc does support the following options.757*/758759#define M_TRIM_THRESHOLD (-1)760#define M_GRANULARITY (-2)761#define M_MMAP_THRESHOLD (-3)762763/* ------------------------ Mallinfo declarations ------------------------ */764765#if !NO_MALLINFO766/*767This version of malloc supports the standard SVID/XPG mallinfo768routine that returns a struct containing usage properties and769statistics. It should work on any system that has a770/usr/include/malloc.h defining struct mallinfo. The main771declaration needed is the mallinfo struct that is returned (by-copy)772by mallinfo(). The malloinfo struct contains a bunch of fields that773are not even meaningful in this version of malloc. These fields are774are instead filled by mallinfo() with other numbers that might be of775interest.776777HAVE_USR_INCLUDE_MALLOC_H should be set if you have a778/usr/include/malloc.h file that includes a declaration of struct779mallinfo. If so, it is included; else a compliant version is780declared below. These must be precisely the same for mallinfo() to781work. The original SVID version of this struct, defined on most782systems with mallinfo, declares all fields as ints. But some others783define as unsigned long. If your system defines the fields using a784type of different width than listed here, you MUST #include your785system version and #define HAVE_USR_INCLUDE_MALLOC_H.786*/787788/* #define HAVE_USR_INCLUDE_MALLOC_H */789790#ifdef HAVE_USR_INCLUDE_MALLOC_H791#include "/usr/include/malloc.h"792#else /* HAVE_USR_INCLUDE_MALLOC_H */793#ifndef STRUCT_MALLINFO_DECLARED794/* HP-UX (and others?) redefines mallinfo unless _STRUCT_MALLINFO is defined */795#define _STRUCT_MALLINFO796#define STRUCT_MALLINFO_DECLARED 1797struct mallinfo {798MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */799MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */800MALLINFO_FIELD_TYPE smblks; /* always 0 */801MALLINFO_FIELD_TYPE hblks; /* always 0 */802MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */803MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */804MALLINFO_FIELD_TYPE fsmblks; /* always 0 */805MALLINFO_FIELD_TYPE uordblks; /* total allocated space */806MALLINFO_FIELD_TYPE fordblks; /* total free space */807MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */808};809#endif /* STRUCT_MALLINFO_DECLARED */810#endif /* HAVE_USR_INCLUDE_MALLOC_H */811#endif /* NO_MALLINFO */812813/*814Try to persuade compilers to inline. The most critical functions for815inlining are defined as macros, so these aren't used for them.816*/817818#if 0 /* SDL */819#ifndef FORCEINLINE820#if defined(__GNUC__)821#define FORCEINLINE __inline __attribute__ ((always_inline))822#elif defined(_MSC_VER)823#define FORCEINLINE __forceinline824#endif825#endif826#endif /* SDL */827#ifndef NOINLINE828#if defined(__GNUC__)829#define NOINLINE __attribute__ ((noinline))830#elif defined(_MSC_VER)831#define NOINLINE __declspec(noinline)832#else833#define NOINLINE834#endif835#endif836837#ifdef __cplusplus838extern "C" {839#if 0 /* SDL */840#ifndef FORCEINLINE841#define FORCEINLINE inline842#endif843#endif /* SDL */844#endif /* __cplusplus */845#if 0 /* SDL */846#ifndef FORCEINLINE847#define FORCEINLINE848#endif849#endif /* SDL_FORCE_INLINE */850851#if !ONLY_MSPACES852853/* ------------------- Declarations of public routines ------------------- */854855#ifndef USE_DL_PREFIX856#define dlcalloc calloc857#define dlfree free858#define dlmalloc malloc859#define dlmemalign memalign860#define dlposix_memalign posix_memalign861#define dlrealloc realloc862#define dlrealloc_in_place realloc_in_place863#define dlvalloc valloc864#define dlpvalloc pvalloc865#define dlmallinfo mallinfo866#define dlmallopt mallopt867#define dlmalloc_trim malloc_trim868#define dlmalloc_stats malloc_stats869#define dlmalloc_usable_size malloc_usable_size870#define dlmalloc_footprint malloc_footprint871#define dlmalloc_max_footprint malloc_max_footprint872#define dlmalloc_footprint_limit malloc_footprint_limit873#define dlmalloc_set_footprint_limit malloc_set_footprint_limit874#define dlmalloc_inspect_all malloc_inspect_all875#define dlindependent_calloc independent_calloc876#define dlindependent_comalloc independent_comalloc877#define dlbulk_free bulk_free878#endif /* USE_DL_PREFIX */879880/*881malloc(size_t n)882Returns a pointer to a newly allocated chunk of at least n bytes, or883null if no space is available, in which case errno is set to ENOMEM884on ANSI C systems.885886If n is zero, malloc returns a minimum-sized chunk. (The minimum887size is 16 bytes on most 32bit systems, and 32 bytes on 64bit888systems.) Note that size_t is an unsigned type, so calls with889arguments that would be negative if signed are interpreted as890requests for huge amounts of space, which will often fail. The891maximum supported value of n differs across systems, but is in all892cases less than the maximum representable value of a size_t.893*/894DLMALLOC_EXPORT void* dlmalloc(size_t);895896/*897free(void* p)898Releases the chunk of memory pointed to by p, that had been previously899allocated using malloc or a related routine such as realloc.900It has no effect if p is null. If p was not malloced or already901freed, free(p) will by default cause the current program to abort.902*/903DLMALLOC_EXPORT void dlfree(void*);904905/*906calloc(size_t n_elements, size_t element_size);907Returns a pointer to n_elements * element_size bytes, with all locations908set to zero.909*/910DLMALLOC_EXPORT void* dlcalloc(size_t, size_t);911912/*913realloc(void* p, size_t n)914Returns a pointer to a chunk of size n that contains the same data915as does chunk p up to the minimum of (n, p's size) bytes, or null916if no space is available.917918The returned pointer may or may not be the same as p. The algorithm919prefers extending p in most cases when possible, otherwise it920employs the equivalent of a malloc-copy-free sequence.921922If p is null, realloc is equivalent to malloc.923924If space is not available, realloc returns null, errno is set (if on925ANSI) and p is NOT freed.926927if n is for fewer bytes than already held by p, the newly unused928space is lopped off and freed if possible. realloc with a size929argument of zero (re)allocates a minimum-sized chunk.930931The old unix realloc convention of allowing the last-free'd chunk932to be used as an argument to realloc is not supported.933*/934DLMALLOC_EXPORT void* dlrealloc(void*, size_t);935936/*937realloc_in_place(void* p, size_t n)938Resizes the space allocated for p to size n, only if this can be939done without moving p (i.e., only if there is adjacent space940available if n is greater than p's current allocated size, or n is941less than or equal to p's size). This may be used instead of plain942realloc if an alternative allocation strategy is needed upon failure943to expand space; for example, reallocation of a buffer that must be944memory-aligned or cleared. You can use realloc_in_place to trigger945these alternatives only when needed.946947Returns p if successful; otherwise null.948*/949DLMALLOC_EXPORT void* dlrealloc_in_place(void*, size_t);950951/*952memalign(size_t alignment, size_t n);953Returns a pointer to a newly allocated chunk of n bytes, aligned954in accord with the alignment argument.955956The alignment argument should be a power of two. If the argument is957not a power of two, the nearest greater power is used.9588-byte alignment is guaranteed by normal malloc calls, so don't959bother calling memalign with an argument of 8 or less.960961Overreliance on memalign is a sure way to fragment space.962*/963DLMALLOC_EXPORT void* dlmemalign(size_t, size_t);964965/*966int posix_memalign(void** pp, size_t alignment, size_t n);967Allocates a chunk of n bytes, aligned in accord with the alignment968argument. Differs from memalign only in that it (1) assigns the969allocated memory to *pp rather than returning it, (2) fails and970returns EINVAL if the alignment is not a power of two (3) fails and971returns ENOMEM if memory cannot be allocated.972*/973DLMALLOC_EXPORT int dlposix_memalign(void**, size_t, size_t);974975/*976valloc(size_t n);977Equivalent to memalign(pagesize, n), where pagesize is the page978size of the system. If the pagesize is unknown, 4096 is used.979*/980DLMALLOC_EXPORT void* dlvalloc(size_t);981982/*983mallopt(int parameter_number, int parameter_value)984Sets tunable parameters The format is to provide a985(parameter-number, parameter-value) pair. mallopt then sets the986corresponding parameter to the argument value if it can (i.e., so987long as the value is meaningful), and returns 1 if successful else9880. To workaround the fact that mallopt is specified to use int,989not size_t parameters, the value -1 is specially treated as the990maximum unsigned size_t value.991992SVID/XPG/ANSI defines four standard param numbers for mallopt,993normally defined in malloc.h. None of these are use in this malloc,994so setting them has no effect. But this malloc also supports other995options in mallopt. See below for details. Briefly, supported996parameters are as follows (listed defaults are for "typical"997configurations).998999Symbol param # default allowed param values1000M_TRIM_THRESHOLD -1 2*1024*1024 any (-1 disables)1001M_GRANULARITY -2 page size any power of 2 >= page size1002M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)1003*/1004DLMALLOC_EXPORT int dlmallopt(int, int);10051006/*1007malloc_footprint();1008Returns the number of bytes obtained from the system. The total1009number of bytes allocated by malloc, realloc etc., is less than this1010value. Unlike mallinfo, this function returns only a precomputed1011result, so can be called frequently to monitor memory consumption.1012Even if locks are otherwise defined, this function does not use them,1013so results might not be up to date.1014*/1015DLMALLOC_EXPORT size_t dlmalloc_footprint(void);10161017/*1018malloc_max_footprint();1019Returns the maximum number of bytes obtained from the system. This1020value will be greater than current footprint if deallocated space1021has been reclaimed by the system. The peak number of bytes allocated1022by malloc, realloc etc., is less than this value. Unlike mallinfo,1023this function returns only a precomputed result, so can be called1024frequently to monitor memory consumption. Even if locks are1025otherwise defined, this function does not use them, so results might1026not be up to date.1027*/1028DLMALLOC_EXPORT size_t dlmalloc_max_footprint(void);10291030/*1031malloc_footprint_limit();1032Returns the number of bytes that the heap is allowed to obtain from1033the system, returning the last value returned by1034malloc_set_footprint_limit, or the maximum size_t value if1035never set. The returned value reflects a permission. There is no1036guarantee that this number of bytes can actually be obtained from1037the system.1038*/1039DLMALLOC_EXPORT size_t dlmalloc_footprint_limit();10401041/*1042malloc_set_footprint_limit();1043Sets the maximum number of bytes to obtain from the system, causing1044failure returns from malloc and related functions upon attempts to1045exceed this value. The argument value may be subject to page1046rounding to an enforceable limit; this actual value is returned.1047Using an argument of the maximum possible size_t effectively1048disables checks. If the argument is less than or equal to the1049current malloc_footprint, then all future allocations that require1050additional system memory will fail. However, invocation cannot1051retroactively deallocate existing used memory.1052*/1053DLMALLOC_EXPORT size_t dlmalloc_set_footprint_limit(size_t bytes);10541055#if MALLOC_INSPECT_ALL1056/*1057malloc_inspect_all(void(*handler)(void *start,1058void *end,1059size_t used_bytes,1060void* callback_arg),1061void* arg);1062Traverses the heap and calls the given handler for each managed1063region, skipping all bytes that are (or may be) used for bookkeeping1064purposes. Traversal does not include include chunks that have been1065directly memory mapped. Each reported region begins at the start1066address, and continues up to but not including the end address. The1067first used_bytes of the region contain allocated data. If1068used_bytes is zero, the region is unallocated. The handler is1069invoked with the given callback argument. If locks are defined, they1070are held during the entire traversal. It is a bad idea to invoke1071other malloc functions from within the handler.10721073For example, to count the number of in-use chunks with size greater1074than 1000, you could write:1075static int count = 0;1076void count_chunks(void* start, void* end, size_t used, void* arg) {1077if (used >= 1000) ++count;1078}1079then:1080malloc_inspect_all(count_chunks, NULL);10811082malloc_inspect_all is compiled only if MALLOC_INSPECT_ALL is defined.1083*/1084DLMALLOC_EXPORT void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*),1085void* arg);10861087#endif /* MALLOC_INSPECT_ALL */10881089#if !NO_MALLINFO1090/*1091mallinfo()1092Returns (by copy) a struct containing various summary statistics:10931094arena: current total non-mmapped bytes allocated from system1095ordblks: the number of free chunks1096smblks: always zero.1097hblks: current number of mmapped regions1098hblkhd: total bytes held in mmapped regions1099usmblks: the maximum total allocated space. This will be greater1100than current total if trimming has occurred.1101fsmblks: always zero1102uordblks: current total allocated space (normal or mmapped)1103fordblks: total free space1104keepcost: the maximum number of bytes that could ideally be released1105back to system via malloc_trim. ("ideally" means that1106it ignores page restrictions etc.)11071108Because these fields are ints, but internal bookkeeping may1109be kept as longs, the reported values may wrap around zero and1110thus be inaccurate.1111*/1112DLMALLOC_EXPORT struct mallinfo dlmallinfo(void);1113#endif /* NO_MALLINFO */11141115/*1116independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);11171118independent_calloc is similar to calloc, but instead of returning a1119single cleared space, it returns an array of pointers to n_elements1120independent elements that can hold contents of size elem_size, each1121of which starts out cleared, and can be independently freed,1122realloc'ed etc. The elements are guaranteed to be adjacently1123allocated (this is not guaranteed to occur with multiple callocs or1124mallocs), which may also improve cache locality in some1125applications.11261127The "chunks" argument is optional (i.e., may be null, which is1128probably the most typical usage). If it is null, the returned array1129is itself dynamically allocated and should also be freed when it is1130no longer needed. Otherwise, the chunks array must be of at least1131n_elements in length. It is filled in with the pointers to the1132chunks.11331134In either case, independent_calloc returns this pointer array, or1135null if the allocation failed. If n_elements is zero and "chunks"1136is null, it returns a chunk representing an array with zero elements1137(which should be freed if not wanted).11381139Each element must be freed when it is no longer needed. This can be1140done all at once using bulk_free.11411142independent_calloc simplifies and speeds up implementations of many1143kinds of pools. It may also be useful when constructing large data1144structures that initially have a fixed number of fixed-sized nodes,1145but the number is not known at compile time, and some of the nodes1146may later need to be freed. For example:11471148struct Node { int item; struct Node* next; };11491150struct Node* build_list() {1151struct Node** pool;1152int n = read_number_of_nodes_needed();1153if (n <= 0) return 0;1154pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);1155if (pool == 0) die();1156// organize into a linked list...1157struct Node* first = pool[0];1158for (i = 0; i < n-1; ++i)1159pool[i]->next = pool[i+1];1160free(pool); // Can now free the array (or not, if it is needed later)1161return first;1162}1163*/1164DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**);11651166/*1167independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);11681169independent_comalloc allocates, all at once, a set of n_elements1170chunks with sizes indicated in the "sizes" array. It returns1171an array of pointers to these elements, each of which can be1172independently freed, realloc'ed etc. The elements are guaranteed to1173be adjacently allocated (this is not guaranteed to occur with1174multiple callocs or mallocs), which may also improve cache locality1175in some applications.11761177The "chunks" argument is optional (i.e., may be null). If it is null1178the returned array is itself dynamically allocated and should also1179be freed when it is no longer needed. Otherwise, the chunks array1180must be of at least n_elements in length. It is filled in with the1181pointers to the chunks.11821183In either case, independent_comalloc returns this pointer array, or1184null if the allocation failed. If n_elements is zero and chunks is1185null, it returns a chunk representing an array with zero elements1186(which should be freed if not wanted).11871188Each element must be freed when it is no longer needed. This can be1189done all at once using bulk_free.11901191independent_comallac differs from independent_calloc in that each1192element may have a different size, and also that it does not1193automatically clear elements.11941195independent_comalloc can be used to speed up allocation in cases1196where several structs or objects must always be allocated at the1197same time. For example:11981199struct Head { ... }1200struct Foot { ... }12011202void send_message(char* msg) {1203int msglen = strlen(msg);1204size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };1205void* chunks[3];1206if (independent_comalloc(3, sizes, chunks) == 0)1207die();1208struct Head* head = (struct Head*)(chunks[0]);1209char* body = (char*)(chunks[1]);1210struct Foot* foot = (struct Foot*)(chunks[2]);1211// ...1212}12131214In general though, independent_comalloc is worth using only for1215larger values of n_elements. For small values, you probably won't1216detect enough difference from series of malloc calls to bother.12171218Overuse of independent_comalloc can increase overall memory usage,1219since it cannot reuse existing noncontiguous small chunks that1220might be available for some of the elements.1221*/1222DLMALLOC_EXPORT void** dlindependent_comalloc(size_t, size_t*, void**);12231224/*1225bulk_free(void* array[], size_t n_elements)1226Frees and clears (sets to null) each non-null pointer in the given1227array. This is likely to be faster than freeing them one-by-one.1228If footers are used, pointers that have been allocated in different1229mspaces are not freed or cleared, and the count of all such pointers1230is returned. For large arrays of pointers with poor locality, it1231may be worthwhile to sort this array before calling bulk_free.1232*/1233DLMALLOC_EXPORT size_t dlbulk_free(void**, size_t n_elements);12341235/*1236pvalloc(size_t n);1237Equivalent to valloc(minimum-page-that-holds(n)), that is,1238round up n to nearest pagesize.1239*/1240DLMALLOC_EXPORT void* dlpvalloc(size_t);12411242/*1243malloc_trim(size_t pad);12441245If possible, gives memory back to the system (via negative arguments1246to sbrk) if there is unused memory at the `high' end of the malloc1247pool or in unused MMAP segments. You can call this after freeing1248large blocks of memory to potentially reduce the system-level memory1249requirements of a program. However, it cannot guarantee to reduce1250memory. Under some allocation patterns, some large free blocks of1251memory will be locked between two used chunks, so they cannot be1252given back to the system.12531254The `pad' argument to malloc_trim represents the amount of free1255trailing space to leave untrimmed. If this argument is zero, only1256the minimum amount of memory to maintain internal data structures1257will be left. Non-zero arguments can be supplied to maintain enough1258trailing space to service future expected allocations without having1259to re-obtain memory from the system.12601261Malloc_trim returns 1 if it actually released any memory, else 0.1262*/1263DLMALLOC_EXPORT int dlmalloc_trim(size_t);12641265/*1266malloc_stats();1267Prints on stderr the amount of space obtained from the system (both1268via sbrk and mmap), the maximum amount (which may be more than1269current if malloc_trim and/or munmap got called), and the current1270number of bytes allocated via malloc (or realloc, etc) but not yet1271freed. Note that this is the number of bytes allocated, not the1272number requested. It will be larger than the number requested1273because of alignment and bookkeeping overhead. Because it includes1274alignment wastage as being in use, this figure may be greater than1275zero even when no user-level chunks are allocated.12761277The reported current and maximum system memory can be inaccurate if1278a program makes other calls to system memory allocation functions1279(normally sbrk) outside of malloc.12801281malloc_stats prints only the most commonly interesting statistics.1282More information can be obtained by calling mallinfo.1283*/1284DLMALLOC_EXPORT void dlmalloc_stats(void);12851286/*1287malloc_usable_size(void* p);12881289Returns the number of bytes you can actually use in1290an allocated chunk, which may be more than you requested (although1291often not) due to alignment and minimum size constraints.1292You can use this many bytes without worrying about1293overwriting other allocated objects. This is not a particularly great1294programming practice. malloc_usable_size can be more useful in1295debugging and assertions, for example:12961297p = malloc(n);1298assert(malloc_usable_size(p) >= 256);1299*/1300size_t dlmalloc_usable_size(void*);13011302#endif /* ONLY_MSPACES */13031304#if MSPACES13051306/*1307mspace is an opaque type representing an independent1308region of space that supports mspace_malloc, etc.1309*/1310typedef void* mspace;13111312/*1313create_mspace creates and returns a new independent space with the1314given initial capacity, or, if 0, the default granularity size. It1315returns null if there is no system memory available to create the1316space. If argument locked is non-zero, the space uses a separate1317lock to control access. The capacity of the space will grow1318dynamically as needed to service mspace_malloc requests. You can1319control the sizes of incremental increases of this space by1320compiling with a different DEFAULT_GRANULARITY or dynamically1321setting with mallopt(M_GRANULARITY, value).1322*/1323DLMALLOC_EXPORT mspace create_mspace(size_t capacity, int locked);13241325/*1326destroy_mspace destroys the given space, and attempts to return all1327of its memory back to the system, returning the total number of1328bytes freed. After destruction, the results of access to all memory1329used by the space become undefined.1330*/1331DLMALLOC_EXPORT size_t destroy_mspace(mspace msp);13321333/*1334create_mspace_with_base uses the memory supplied as the initial base1335of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this1336space is used for bookkeeping, so the capacity must be at least this1337large. (Otherwise 0 is returned.) When this initial space is1338exhausted, additional memory will be obtained from the system.1339Destroying this space will deallocate all additionally allocated1340space (if possible) but not the initial base.1341*/1342DLMALLOC_EXPORT mspace create_mspace_with_base(void* base, size_t capacity, int locked);13431344/*1345mspace_track_large_chunks controls whether requests for large chunks1346are allocated in their own untracked mmapped regions, separate from1347others in this mspace. By default large chunks are not tracked,1348which reduces fragmentation. However, such chunks are not1349necessarily released to the system upon destroy_mspace. Enabling1350tracking by setting to true may increase fragmentation, but avoids1351leakage when relying on destroy_mspace to release all memory1352allocated using this space. The function returns the previous1353setting.1354*/1355DLMALLOC_EXPORT int mspace_track_large_chunks(mspace msp, int enable);135613571358/*1359mspace_malloc behaves as malloc, but operates within1360the given space.1361*/1362DLMALLOC_EXPORT void* mspace_malloc(mspace msp, size_t bytes);13631364/*1365mspace_free behaves as free, but operates within1366the given space.13671368If compiled with FOOTERS==1, mspace_free is not actually needed.1369free may be called instead of mspace_free because freed chunks from1370any space are handled by their originating spaces.1371*/1372DLMALLOC_EXPORT void mspace_free(mspace msp, void* mem);13731374/*1375mspace_realloc behaves as realloc, but operates within1376the given space.13771378If compiled with FOOTERS==1, mspace_realloc is not actually1379needed. realloc may be called instead of mspace_realloc because1380realloced chunks from any space are handled by their originating1381spaces.1382*/1383DLMALLOC_EXPORT void* mspace_realloc(mspace msp, void* mem, size_t newsize);13841385/*1386mspace_calloc behaves as calloc, but operates within1387the given space.1388*/1389DLMALLOC_EXPORT void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);13901391/*1392mspace_memalign behaves as memalign, but operates within1393the given space.1394*/1395DLMALLOC_EXPORT void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);13961397/*1398mspace_independent_calloc behaves as independent_calloc, but1399operates within the given space.1400*/1401DLMALLOC_EXPORT void** mspace_independent_calloc(mspace msp, size_t n_elements,1402size_t elem_size, void* chunks[]);14031404/*1405mspace_independent_comalloc behaves as independent_comalloc, but1406operates within the given space.1407*/1408DLMALLOC_EXPORT void** mspace_independent_comalloc(mspace msp, size_t n_elements,1409size_t sizes[], void* chunks[]);14101411/*1412mspace_footprint() returns the number of bytes obtained from the1413system for this space.1414*/1415DLMALLOC_EXPORT size_t mspace_footprint(mspace msp);14161417/*1418mspace_max_footprint() returns the peak number of bytes obtained from the1419system for this space.1420*/1421DLMALLOC_EXPORT size_t mspace_max_footprint(mspace msp);142214231424#if !NO_MALLINFO1425/*1426mspace_mallinfo behaves as mallinfo, but reports properties of1427the given space.1428*/1429DLMALLOC_EXPORT struct mallinfo mspace_mallinfo(mspace msp);1430#endif /* NO_MALLINFO */14311432/*1433malloc_usable_size(void* p) behaves the same as malloc_usable_size;1434*/1435DLMALLOC_EXPORT size_t mspace_usable_size(const void* mem);14361437/*1438mspace_malloc_stats behaves as malloc_stats, but reports1439properties of the given space.1440*/1441DLMALLOC_EXPORT void mspace_malloc_stats(mspace msp);14421443/*1444mspace_trim behaves as malloc_trim, but1445operates within the given space.1446*/1447DLMALLOC_EXPORT int mspace_trim(mspace msp, size_t pad);14481449/*1450An alias for mallopt.1451*/1452DLMALLOC_EXPORT int mspace_mallopt(int, int);14531454#endif /* MSPACES */14551456#ifdef __cplusplus1457} /* end of extern "C" */1458#endif /* __cplusplus */14591460/*1461========================================================================1462To make a fully customizable malloc.h header file, cut everything1463above this line, put into file malloc.h, edit to suit, and #include it1464on the next line, as well as in programs that use this malloc.1465========================================================================1466*/14671468/* #include "malloc.h" */14691470/*------------------------------ internal #includes ---------------------- */14711472#ifdef _MSC_VER1473#pragma warning( disable : 4146 ) /* no "unsigned" warnings */1474#endif /* _MSC_VER */1475#if !NO_MALLOC_STATS1476#include <stdio.h> /* for printing in malloc_stats */1477#endif /* NO_MALLOC_STATS */1478#ifndef LACKS_ERRNO_H1479#include <errno.h> /* for MALLOC_FAILURE_ACTION */1480#else /* LACKS_ERRNO_H */1481#ifndef EINVAL1482#define EINVAL 221483#endif1484#ifndef ENOMEM1485#define ENOMEM 121486#endif1487#endif /* LACKS_ERRNO_H */1488#ifdef DEBUG1489#if ABORT_ON_ASSERT_FAILURE1490#undef assert1491#define assert(x) if(!(x)) ABORT1492#else /* ABORT_ON_ASSERT_FAILURE */1493#include <assert.h>1494#endif /* ABORT_ON_ASSERT_FAILURE */1495#else /* DEBUG */1496#ifndef assert1497#define assert(x)1498#endif1499#define DEBUG 01500#endif /* DEBUG */1501#if !defined(WIN32) && !defined(LACKS_TIME_H)1502#include <time.h> /* for magic initialization */1503#endif /* WIN32 */1504#ifndef LACKS_STDLIB_H1505#include <stdlib.h> /* for abort() */1506#endif /* LACKS_STDLIB_H */1507#ifndef LACKS_STRING_H1508#include <string.h> /* for memset etc */1509#endif /* LACKS_STRING_H */1510#if USE_BUILTIN_FFS1511#ifndef LACKS_STRINGS_H1512#include <strings.h> /* for ffs */1513#endif /* LACKS_STRINGS_H */1514#endif /* USE_BUILTIN_FFS */1515#if HAVE_MMAP1516#ifndef LACKS_SYS_MMAN_H1517/* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */1518#if (defined(linux) && !defined(__USE_GNU))1519#define __USE_GNU 11520#include <sys/mman.h> /* for mmap */1521#undef __USE_GNU1522#else1523#include <sys/mman.h> /* for mmap */1524#endif /* linux */1525#endif /* LACKS_SYS_MMAN_H */1526#ifndef LACKS_FCNTL_H1527#include <fcntl.h>1528#endif /* LACKS_FCNTL_H */1529#endif /* HAVE_MMAP */1530#ifndef LACKS_UNISTD_H1531#include <unistd.h> /* for sbrk, sysconf */1532#else /* LACKS_UNISTD_H */1533#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)1534extern void* sbrk(ptrdiff_t);1535#endif /* FreeBSD etc */1536#endif /* LACKS_UNISTD_H */15371538/* Declarations for locking */1539#if USE_LOCKS1540#ifndef WIN321541#if defined (__SVR4) && defined (__sun) /* solaris */1542#include <thread.h>1543#elif !defined(LACKS_SCHED_H)1544#include <sched.h>1545#endif /* solaris or LACKS_SCHED_H */1546#if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS1547#include <pthread.h>1548#endif /* USE_RECURSIVE_LOCKS ... */1549#elif defined(_MSC_VER)1550#ifndef _M_AMD641551/* These are already defined on AMD64 builds */1552#ifdef __cplusplus1553extern "C" {1554#endif /* __cplusplus */1555LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp);1556LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value);1557#ifdef __cplusplus1558}1559#endif /* __cplusplus */1560#endif /* _M_AMD64 */1561#pragma intrinsic (_InterlockedCompareExchange)1562#pragma intrinsic (_InterlockedExchange)1563#define interlockedcompareexchange _InterlockedCompareExchange1564#define interlockedexchange _InterlockedExchange1565#elif defined(WIN32) && defined(__GNUC__)1566#define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b)1567#define interlockedexchange __sync_lock_test_and_set1568#endif /* Win32 */1569#else /* USE_LOCKS */1570#endif /* USE_LOCKS */15711572#ifndef LOCK_AT_FORK1573#define LOCK_AT_FORK 01574#endif15751576/* Declarations for bit scanning on win32 */1577#if defined(_MSC_VER) && _MSC_VER>=13001578#ifndef BitScanForward /* Try to avoid pulling in WinNT.h */1579#ifdef __cplusplus1580extern "C" {1581#endif /* __cplusplus */1582unsigned char _BitScanForward(unsigned long *index, unsigned long mask);1583unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);1584#ifdef __cplusplus1585}1586#endif /* __cplusplus */15871588#define BitScanForward _BitScanForward1589#define BitScanReverse _BitScanReverse1590#pragma intrinsic(_BitScanForward)1591#pragma intrinsic(_BitScanReverse)1592#endif /* BitScanForward */1593#endif /* defined(_MSC_VER) && _MSC_VER>=1300 */15941595#ifndef WIN321596#ifndef malloc_getpagesize1597# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */1598# ifndef _SC_PAGE_SIZE1599# define _SC_PAGE_SIZE _SC_PAGESIZE1600# endif1601# endif1602# ifdef _SC_PAGE_SIZE1603# define malloc_getpagesize sysconf(_SC_PAGE_SIZE)1604# else1605# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)1606extern int getpagesize();1607# define malloc_getpagesize getpagesize()1608# else1609# ifdef WIN32 /* use supplied emulation of getpagesize */1610# define malloc_getpagesize getpagesize()1611# else1612# ifndef LACKS_SYS_PARAM_H1613# include <sys/param.h>1614# endif1615# ifdef EXEC_PAGESIZE1616# define malloc_getpagesize EXEC_PAGESIZE1617# else1618# ifdef NBPG1619# ifndef CLSIZE1620# define malloc_getpagesize NBPG1621# else1622# define malloc_getpagesize (NBPG * CLSIZE)1623# endif1624# else1625# ifdef NBPC1626# define malloc_getpagesize NBPC1627# else1628# ifdef PAGESIZE1629# define malloc_getpagesize PAGESIZE1630# else /* just guess */1631# define malloc_getpagesize ((size_t)4096U)1632# endif1633# endif1634# endif1635# endif1636# endif1637# endif1638# endif1639#endif1640#endif16411642/* ------------------- size_t and alignment properties -------------------- */16431644/* The byte and bit size of a size_t */1645#define SIZE_T_SIZE (sizeof(size_t))1646#define SIZE_T_BITSIZE (sizeof(size_t) << 3)16471648/* Some constants coerced to size_t */1649/* Annoying but necessary to avoid errors on some platforms */1650#define SIZE_T_ZERO ((size_t)0)1651#define SIZE_T_ONE ((size_t)1)1652#define SIZE_T_TWO ((size_t)2)1653#define SIZE_T_FOUR ((size_t)4)1654#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)1655#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)1656#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)1657#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)16581659/* The bit mask value corresponding to MALLOC_ALIGNMENT */1660#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)16611662/* True if address a has acceptable alignment */1663#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)16641665/* the number of bytes to offset an address to align it */1666#define align_offset(A)\1667((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\1668((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))16691670/* -------------------------- MMAP preliminaries ------------------------- */16711672/*1673If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and1674checks to fail so compiler optimizer can delete code rather than1675using so many "#if"s.1676*/167716781679/* MORECORE and MMAP must return MFAIL on failure */1680#define MFAIL ((void*)(MAX_SIZE_T))1681#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */16821683#if HAVE_MMAP16841685#ifndef WIN321686#define MUNMAP_DEFAULT(a, s) munmap((a), (s))1687#define MMAP_PROT (PROT_READ|PROT_WRITE)1688#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)1689#define MAP_ANONYMOUS MAP_ANON1690#endif /* MAP_ANON */1691#ifdef MAP_ANONYMOUS1692#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)1693#define MMAP_DEFAULT(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)1694#else /* MAP_ANONYMOUS */1695/*1696Nearly all versions of mmap support MAP_ANONYMOUS, so the following1697is unlikely to be needed, but is supplied just in case.1698*/1699#define MMAP_FLAGS (MAP_PRIVATE)1700static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */1701#define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \1702(dev_zero_fd = open("/dev/zero", O_RDWR), \1703mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \1704mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))1705#endif /* MAP_ANONYMOUS */17061707#define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s)17081709#else /* WIN32 */17101711/* Win32 MMAP via VirtualAlloc */1712SDL_FORCE_INLINE void* win32mmap(size_t size) {1713void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);1714return (ptr != 0)? ptr: MFAIL;1715}17161717/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */1718SDL_FORCE_INLINE void* win32direct_mmap(size_t size) {1719void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,1720PAGE_READWRITE);1721return (ptr != 0)? ptr: MFAIL;1722}17231724/* This function supports releasing coalesed segments */1725SDL_FORCE_INLINE int win32munmap(void* ptr, size_t size) {1726MEMORY_BASIC_INFORMATION minfo;1727char* cptr = (char*)ptr;1728while (size) {1729if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)1730return -1;1731if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||1732minfo.State != MEM_COMMIT || minfo.RegionSize > size)1733return -1;1734if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)1735return -1;1736cptr += minfo.RegionSize;1737size -= minfo.RegionSize;1738}1739return 0;1740}17411742#define MMAP_DEFAULT(s) win32mmap(s)1743#define MUNMAP_DEFAULT(a, s) win32munmap((a), (s))1744#define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s)1745#endif /* WIN32 */1746#endif /* HAVE_MMAP */17471748#if HAVE_MREMAP1749#ifndef WIN321750#define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))1751#endif /* WIN32 */1752#endif /* HAVE_MREMAP */17531754/**1755* Define CALL_MORECORE1756*/1757#if HAVE_MORECORE1758#ifdef MORECORE1759#define CALL_MORECORE(S) MORECORE(S)1760#else /* MORECORE */1761#define CALL_MORECORE(S) MORECORE_DEFAULT(S)1762#endif /* MORECORE */1763#else /* HAVE_MORECORE */1764#define CALL_MORECORE(S) MFAIL1765#endif /* HAVE_MORECORE */17661767/**1768* Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP1769*/1770#if HAVE_MMAP1771#define USE_MMAP_BIT (SIZE_T_ONE)17721773#ifdef MMAP1774#define CALL_MMAP(s) MMAP(s)1775#else /* MMAP */1776#define CALL_MMAP(s) MMAP_DEFAULT(s)1777#endif /* MMAP */1778#ifdef MUNMAP1779#define CALL_MUNMAP(a, s) MUNMAP((a), (s))1780#else /* MUNMAP */1781#define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s))1782#endif /* MUNMAP */1783#ifdef DIRECT_MMAP1784#define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)1785#else /* DIRECT_MMAP */1786#define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s)1787#endif /* DIRECT_MMAP */1788#else /* HAVE_MMAP */1789#define USE_MMAP_BIT (SIZE_T_ZERO)17901791#define MMAP(s) MFAIL1792#define MUNMAP(a, s) (-1)1793#define DIRECT_MMAP(s) MFAIL1794#define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)1795#define CALL_MMAP(s) MMAP(s)1796#define CALL_MUNMAP(a, s) MUNMAP((a), (s))1797#endif /* HAVE_MMAP */17981799/**1800* Define CALL_MREMAP1801*/1802#if HAVE_MMAP && HAVE_MREMAP1803#ifdef MREMAP1804#define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv))1805#else /* MREMAP */1806#define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv))1807#endif /* MREMAP */1808#else /* HAVE_MMAP && HAVE_MREMAP */1809#define CALL_MREMAP(addr, osz, nsz, mv) MFAIL1810#endif /* HAVE_MMAP && HAVE_MREMAP */18111812/* mstate bit set if continguous morecore disabled or failed */1813#define USE_NONCONTIGUOUS_BIT (4U)18141815/* segment bit set in create_mspace_with_base */1816#define EXTERN_BIT (8U)181718181819/* --------------------------- Lock preliminaries ------------------------ */18201821/*1822When locks are defined, there is one global lock, plus1823one per-mspace lock.18241825The global lock_ensures that mparams.magic and other unique1826mparams values are initialized only once. It also protects1827sequences of calls to MORECORE. In many cases sys_alloc requires1828two calls, that should not be interleaved with calls by other1829threads. This does not protect against direct calls to MORECORE1830by other threads not using this lock, so there is still code to1831cope the best we can on interference.18321833Per-mspace locks surround calls to malloc, free, etc.1834By default, locks are simple non-reentrant mutexes.18351836Because lock-protected regions generally have bounded times, it is1837OK to use the supplied simple spinlocks. Spinlocks are likely to1838improve performance for lightly contended applications, but worsen1839performance under heavy contention.18401841If USE_LOCKS is > 1, the definitions of lock routines here are1842bypassed, in which case you will need to define the type MLOCK_T,1843and at least INITIAL_LOCK, DESTROY_LOCK, ACQUIRE_LOCK, RELEASE_LOCK1844and TRY_LOCK. You must also declare a1845static MLOCK_T malloc_global_mutex = { initialization values };.18461847*/18481849#if !USE_LOCKS1850#define USE_LOCK_BIT (0U)1851#define INITIAL_LOCK(l) (0)1852#define DESTROY_LOCK(l) (0)1853#define ACQUIRE_MALLOC_GLOBAL_LOCK()1854#define RELEASE_MALLOC_GLOBAL_LOCK()18551856#else1857#if USE_LOCKS > 11858/* ----------------------- User-defined locks ------------------------ */1859/* Define your own lock implementation here */1860/* #define INITIAL_LOCK(lk) ... */1861/* #define DESTROY_LOCK(lk) ... */1862/* #define ACQUIRE_LOCK(lk) ... */1863/* #define RELEASE_LOCK(lk) ... */1864/* #define TRY_LOCK(lk) ... */1865/* static MLOCK_T malloc_global_mutex = ... */18661867#elif USE_SPIN_LOCKS18681869/* First, define CAS_LOCK and CLEAR_LOCK on ints */1870/* Note CAS_LOCK defined to return 0 on success */18711872#if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))1873#define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1)1874#define CLEAR_LOCK(sl) __sync_lock_release(sl)18751876#elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))1877/* Custom spin locks for older gcc on x86 */1878SDL_FORCE_INLINE int x86_cas_lock(int *sl) {1879int ret;1880int val = 1;1881int cmp = 0;1882__asm__ __volatile__ ("lock; cmpxchgl %1, %2"1883: "=a" (ret)1884: "r" (val), "m" (*(sl)), "0"(cmp)1885: "memory", "cc");1886return ret;1887}18881889SDL_FORCE_INLINE void x86_clear_lock(int* sl) {1890assert(*sl != 0);1891int prev = 0;1892int ret;1893__asm__ __volatile__ ("lock; xchgl %0, %1"1894: "=r" (ret)1895: "m" (*(sl)), "0"(prev)1896: "memory");1897}18981899#define CAS_LOCK(sl) x86_cas_lock(sl)1900#define CLEAR_LOCK(sl) x86_clear_lock(sl)19011902#else /* Win32 MSC */1903#define CAS_LOCK(sl) interlockedexchange(sl, (LONG)1)1904#define CLEAR_LOCK(sl) interlockedexchange (sl, (LONG)0)19051906#endif /* ... gcc spins locks ... */19071908/* How to yield for a spin lock */1909#define SPINS_PER_YIELD 631910#if defined(_MSC_VER)1911#define SLEEP_EX_DURATION 50 /* delay for yield/sleep */1912#define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE)1913#elif defined (__SVR4) && defined (__sun) /* solaris */1914#define SPIN_LOCK_YIELD thr_yield();1915#elif !defined(LACKS_SCHED_H)1916#define SPIN_LOCK_YIELD sched_yield();1917#else1918#define SPIN_LOCK_YIELD1919#endif /* ... yield ... */19201921#if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 01922/* Plain spin locks use single word (embedded in malloc_states) */1923static int spin_acquire_lock(volatile long *sl) {1924int spins = 0;1925while (*sl != 0 || CAS_LOCK(sl)) {1926if ((++spins & SPINS_PER_YIELD) == 0) {1927SPIN_LOCK_YIELD;1928}1929}1930return 0;1931}19321933#define MLOCK_T volatile long1934#define TRY_LOCK(sl) !CAS_LOCK(sl)1935#define RELEASE_LOCK(sl) CLEAR_LOCK(sl)1936#define ACQUIRE_LOCK(sl) (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0)1937#define INITIAL_LOCK(sl) (*sl = 0)1938#define DESTROY_LOCK(sl) (0)1939static MLOCK_T malloc_global_mutex = 0;19401941#else /* USE_RECURSIVE_LOCKS */1942/* types for lock owners */1943#ifdef WIN321944#define THREAD_ID_T DWORD1945#define CURRENT_THREAD GetCurrentThreadId()1946#define EQ_OWNER(X,Y) ((X) == (Y))1947#else1948/*1949Note: the following assume that pthread_t is a type that can be1950initialized to (casted) zero. If this is not the case, you will need to1951somehow redefine these or not use spin locks.1952*/1953#define THREAD_ID_T pthread_t1954#define CURRENT_THREAD pthread_self()1955#define EQ_OWNER(X,Y) pthread_equal(X, Y)1956#endif19571958struct malloc_recursive_lock {1959int sl;1960unsigned int c;1961THREAD_ID_T threadid;1962};19631964#define MLOCK_T struct malloc_recursive_lock1965static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0};19661967SDL_FORCE_INLINE void recursive_release_lock(MLOCK_T *lk) {1968assert(lk->sl != 0);1969if (--lk->c == 0) {1970CLEAR_LOCK(&lk->sl);1971}1972}19731974SDL_FORCE_INLINE int recursive_acquire_lock(MLOCK_T *lk) {1975THREAD_ID_T mythreadid = CURRENT_THREAD;1976int spins = 0;1977for (;;) {1978if (*((volatile int *)(&lk->sl)) == 0) {1979if (!CAS_LOCK(&lk->sl)) {1980lk->threadid = mythreadid;1981lk->c = 1;1982return 0;1983}1984}1985else if (EQ_OWNER(lk->threadid, mythreadid)) {1986++lk->c;1987return 0;1988}1989if ((++spins & SPINS_PER_YIELD) == 0) {1990SPIN_LOCK_YIELD;1991}1992}1993}19941995SDL_FORCE_INLINE int recursive_try_lock(MLOCK_T *lk) {1996THREAD_ID_T mythreadid = CURRENT_THREAD;1997if (*((volatile int *)(&lk->sl)) == 0) {1998if (!CAS_LOCK(&lk->sl)) {1999lk->threadid = mythreadid;2000lk->c = 1;2001return 1;2002}2003}2004else if (EQ_OWNER(lk->threadid, mythreadid)) {2005++lk->c;2006return 1;2007}2008return 0;2009}20102011#define RELEASE_LOCK(lk) recursive_release_lock(lk)2012#define TRY_LOCK(lk) recursive_try_lock(lk)2013#define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk)2014#define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0)2015#define DESTROY_LOCK(lk) (0)2016#endif /* USE_RECURSIVE_LOCKS */20172018#elif defined(WIN32) /* Win32 critical sections */2019#define MLOCK_T CRITICAL_SECTION2020#define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0)2021#define RELEASE_LOCK(lk) LeaveCriticalSection(lk)2022#define TRY_LOCK(lk) TryEnterCriticalSection(lk)2023#define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000))2024#define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0)2025#define NEED_GLOBAL_LOCK_INIT20262027static MLOCK_T malloc_global_mutex;2028static volatile LONG malloc_global_mutex_status;20292030/* Use spin loop to initialize global lock */2031static void init_malloc_global_mutex() {2032for (;;) {2033long stat = malloc_global_mutex_status;2034if (stat > 0)2035return;2036/* transition to < 0 while initializing, then to > 0) */2037if (stat == 0 &&2038interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) {2039InitializeCriticalSection(&malloc_global_mutex);2040interlockedexchange(&malloc_global_mutex_status, (LONG)1);2041return;2042}2043SleepEx(0, FALSE);2044}2045}20462047#else /* pthreads-based locks */2048#define MLOCK_T pthread_mutex_t2049#define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk)2050#define RELEASE_LOCK(lk) pthread_mutex_unlock(lk)2051#define TRY_LOCK(lk) (!pthread_mutex_trylock(lk))2052#define INITIAL_LOCK(lk) pthread_init_lock(lk)2053#define DESTROY_LOCK(lk) pthread_mutex_destroy(lk)20542055#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE)2056/* Cope with old-style linux recursive lock initialization by adding */2057/* skipped internal declaration from pthread.h */2058extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr,2059int __kind));2060#define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP2061#define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y)2062#endif /* USE_RECURSIVE_LOCKS ... */20632064static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER;20652066static int pthread_init_lock (MLOCK_T *lk) {2067pthread_mutexattr_t attr;2068if (pthread_mutexattr_init(&attr)) return 1;2069#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 02070if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1;2071#endif2072if (pthread_mutex_init(lk, &attr)) return 1;2073if (pthread_mutexattr_destroy(&attr)) return 1;2074return 0;2075}20762077#endif /* ... lock types ... */20782079/* Common code for all lock types */2080#define USE_LOCK_BIT (2U)20812082#ifndef ACQUIRE_MALLOC_GLOBAL_LOCK2083#define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex);2084#endif20852086#ifndef RELEASE_MALLOC_GLOBAL_LOCK2087#define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex);2088#endif20892090#endif /* USE_LOCKS */20912092/* ----------------------- Chunk representations ------------------------ */20932094/*2095(The following includes lightly edited explanations by Colin Plumb.)20962097The malloc_chunk declaration below is misleading (but accurate and2098necessary). It declares a "view" into memory allowing access to2099necessary fields at known offsets from a given base.21002101Chunks of memory are maintained using a `boundary tag' method as2102originally described by Knuth. (See the paper by Paul Wilson2103ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such2104techniques.) Sizes of free chunks are stored both in the front of2105each chunk and at the end. This makes consolidating fragmented2106chunks into bigger chunks fast. The head fields also hold bits2107representing whether chunks are free or in use.21082109Here are some pictures to make it clearer. They are "exploded" to2110show that the state of a chunk can be thought of as extending from2111the high 31 bits of the head field of its header through the2112prev_foot and PINUSE_BIT bit of the following chunk header.21132114A chunk that's in use looks like:21152116chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2117| Size of previous chunk (if P = 0) |2118+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2119+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|2120| Size of this chunk 1| +-+2121mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2122| |2123+- -+2124| |2125+- -+2126| :2127+- size - sizeof(size_t) available payload bytes -+2128: |2129chunk-> +- -+2130| |2131+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2132+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|2133| Size of next chunk (may or may not be in use) | +-+2134mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+21352136And if it's free, it looks like this:21372138chunk-> +- -+2139| User payload (must be in use, or we would have merged!) |2140+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2141+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|2142| Size of this chunk 0| +-+2143mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2144| Next pointer |2145+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2146| Prev pointer |2147+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2148| :2149+- size - sizeof(struct chunk) unused bytes -+2150: |2151chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2152| Size of this chunk |2153+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2154+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|2155| Size of next chunk (must be in use, or we would have merged)| +-+2156mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2157| :2158+- User payload -+2159: |2160+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2161|0|2162+-+2163Note that since we always merge adjacent free chunks, the chunks2164adjacent to a free chunk must be in use.21652166Given a pointer to a chunk (which can be derived trivially from the2167payload pointer) we can, in O(1) time, find out whether the adjacent2168chunks are free, and if so, unlink them from the lists that they2169are on and merge them with the current chunk.21702171Chunks always begin on even word boundaries, so the mem portion2172(which is returned to the user) is also on an even word boundary, and2173thus at least double-word aligned.21742175The P (PINUSE_BIT) bit, stored in the unused low-order bit of the2176chunk size (which is always a multiple of two words), is an in-use2177bit for the *previous* chunk. If that bit is *clear*, then the2178word before the current chunk size contains the previous chunk2179size, and can be used to find the front of the previous chunk.2180The very first chunk allocated always has this bit set, preventing2181access to non-existent (or non-owned) memory. If pinuse is set for2182any given chunk, then you CANNOT determine the size of the2183previous chunk, and might even get a memory addressing fault when2184trying to do so.21852186The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of2187the chunk size redundantly records whether the current chunk is2188inuse (unless the chunk is mmapped). This redundancy enables usage2189checks within free and realloc, and reduces indirection when freeing2190and consolidating chunks.21912192Each freshly allocated chunk must have both cinuse and pinuse set.2193That is, each allocated chunk borders either a previously allocated2194and still in-use chunk, or the base of its memory arena. This is2195ensured by making all allocations from the `lowest' part of any2196found chunk. Further, no free chunk physically borders another one,2197so each free chunk is known to be preceded and followed by either2198inuse chunks or the ends of memory.21992200Note that the `foot' of the current chunk is actually represented2201as the prev_foot of the NEXT chunk. This makes it easier to2202deal with alignments etc but can be very confusing when trying2203to extend or adapt this code.22042205The exceptions to all this are220622071. The special chunk `top' is the top-most available chunk (i.e.,2208the one bordering the end of available memory). It is treated2209specially. Top is never included in any bin, is used only if2210no other chunk is available, and is released back to the2211system if it is very large (see M_TRIM_THRESHOLD). In effect,2212the top chunk is treated as larger (and thus less well2213fitting) than any other available chunk. The top chunk2214doesn't update its trailing size field since there is no next2215contiguous chunk that would have to index off it. However,2216space is still allocated for it (TOP_FOOT_SIZE) to enable2217separation or merging when space is extended.221822193. Chunks allocated via mmap, have both cinuse and pinuse bits2220cleared in their head fields. Because they are allocated2221one-by-one, each must carry its own prev_foot field, which is2222also used to hold the offset this chunk has within its mmapped2223region, which is needed to preserve alignment. Each mmapped2224chunk is trailed by the first two fields of a fake next-chunk2225for sake of usage checks.22262227*/22282229struct malloc_chunk {2230size_t prev_foot; /* Size of previous chunk (if free). */2231size_t head; /* Size and inuse bits. */2232struct malloc_chunk* fd; /* double links -- used only if free. */2233struct malloc_chunk* bk;2234};22352236typedef struct malloc_chunk mchunk;2237typedef struct malloc_chunk* mchunkptr;2238typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */2239typedef unsigned int bindex_t; /* Described below */2240typedef unsigned int binmap_t; /* Described below */2241typedef unsigned int flag_t; /* The type of various bit flag sets */22422243/* ------------------- Chunks sizes and alignments ----------------------- */22442245#define MCHUNK_SIZE (sizeof(mchunk))22462247#if FOOTERS2248#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)2249#else /* FOOTERS */2250#define CHUNK_OVERHEAD (SIZE_T_SIZE)2251#endif /* FOOTERS */22522253/* MMapped chunks need a second word of overhead ... */2254#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)2255/* ... and additional padding for fake next-chunk at foot */2256#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)22572258/* The smallest size we can malloc is an aligned minimal chunk */2259#define MIN_CHUNK_SIZE\2260((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)22612262/* conversion from malloc headers to user pointers, and back */2263#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))2264#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))2265/* chunk associated with aligned address A */2266#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))22672268/* Bounds on request (not chunk) sizes. */2269#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)2270#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)22712272/* pad request bytes into a usable size */2273#define pad_request(req) \2274(((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)22752276/* pad request, checking for minimum (but not maximum) */2277#define request2size(req) \2278(((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))227922802281/* ------------------ Operations on head and foot fields ----------------- */22822283/*2284The head field of a chunk is or'ed with PINUSE_BIT when previous2285adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in2286use, unless mmapped, in which case both bits are cleared.22872288FLAG4_BIT is not used by this malloc, but might be useful in extensions.2289*/22902291#define PINUSE_BIT (SIZE_T_ONE)2292#define CINUSE_BIT (SIZE_T_TWO)2293#define FLAG4_BIT (SIZE_T_FOUR)2294#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)2295#define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)22962297/* Head value for fenceposts */2298#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)22992300/* extraction of fields from head words */2301#define cinuse(p) ((p)->head & CINUSE_BIT)2302#define pinuse(p) ((p)->head & PINUSE_BIT)2303#define flag4inuse(p) ((p)->head & FLAG4_BIT)2304#define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT)2305#define is_mmapped(p) (((p)->head & INUSE_BITS) == 0)23062307#define chunksize(p) ((p)->head & ~(FLAG_BITS))23082309#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)2310#define set_flag4(p) ((p)->head |= FLAG4_BIT)2311#define clear_flag4(p) ((p)->head &= ~FLAG4_BIT)23122313/* Treat space at ptr +/- offset as a chunk */2314#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))2315#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))23162317/* Ptr to next or previous physical malloc_chunk. */2318#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))2319#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))23202321/* extract next chunk's pinuse bit */2322#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)23232324/* Get/set size at footer */2325#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)2326#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))23272328/* Set size, pinuse bit, and foot */2329#define set_size_and_pinuse_of_free_chunk(p, s)\2330((p)->head = (s|PINUSE_BIT), set_foot(p, s))23312332/* Set size, pinuse bit, foot, and clear next pinuse */2333#define set_free_with_pinuse(p, s, n)\2334(clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))23352336/* Get the internal overhead associated with chunk p */2337#define overhead_for(p)\2338(is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)23392340/* Return true if malloced space is not necessarily cleared */2341#if MMAP_CLEARS2342#define calloc_must_clear(p) (!is_mmapped(p))2343#else /* MMAP_CLEARS */2344#define calloc_must_clear(p) (1)2345#endif /* MMAP_CLEARS */23462347/* ---------------------- Overlaid data structures ----------------------- */23482349/*2350When chunks are not in use, they are treated as nodes of either2351lists or trees.23522353"Small" chunks are stored in circular doubly-linked lists, and look2354like this:23552356chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2357| Size of previous chunk |2358+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2359`head:' | Size of chunk, in bytes |P|2360mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2361| Forward pointer to next chunk in list |2362+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2363| Back pointer to previous chunk in list |2364+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2365| Unused space (may be 0 bytes long) .2366. .2367. |2368nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2369`foot:' | Size of chunk, in bytes |2370+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+23712372Larger chunks are kept in a form of bitwise digital trees (aka2373tries) keyed on chunksizes. Because malloc_tree_chunks are only for2374free chunks greater than 256 bytes, their size doesn't impose any2375constraints on user chunk sizes. Each node looks like:23762377chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2378| Size of previous chunk |2379+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2380`head:' | Size of chunk, in bytes |P|2381mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2382| Forward pointer to next chunk of same size |2383+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2384| Back pointer to previous chunk of same size |2385+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2386| Pointer to left child (child[0]) |2387+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2388| Pointer to right child (child[1]) |2389+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2390| Pointer to parent |2391+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2392| bin index of this chunk |2393+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2394| Unused space .2395. |2396nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2397`foot:' | Size of chunk, in bytes |2398+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+23992400Each tree holding treenodes is a tree of unique chunk sizes. Chunks2401of the same size are arranged in a circularly-linked list, with only2402the oldest chunk (the next to be used, in our FIFO ordering)2403actually in the tree. (Tree members are distinguished by a non-null2404parent pointer.) If a chunk with the same size an an existing node2405is inserted, it is linked off the existing node using pointers that2406work in the same way as fd/bk pointers of small chunks.24072408Each tree contains a power of 2 sized range of chunk sizes (the2409smallest is 0x100 <= x < 0x180), which is is divided in half at each2410tree level, with the chunks in the smaller half of the range (0x1002411<= x < 0x140 for the top nose) in the left subtree and the larger2412half (0x140 <= x < 0x180) in the right subtree. This is, of course,2413done by inspecting individual bits.24142415Using these rules, each node's left subtree contains all smaller2416sizes than its right subtree. However, the node at the root of each2417subtree has no particular ordering relationship to either. (The2418dividing line between the subtree sizes is based on trie relation.)2419If we remove the last chunk of a given size from the interior of the2420tree, we need to replace it with a leaf node. The tree ordering2421rules permit a node to be replaced by any leaf below it.24222423The smallest chunk in a tree (a common operation in a best-fit2424allocator) can be found by walking a path to the leftmost leaf in2425the tree. Unlike a usual binary tree, where we follow left child2426pointers until we reach a null, here we follow the right child2427pointer any time the left one is null, until we reach a leaf with2428both child pointers null. The smallest chunk in the tree will be2429somewhere along that path.24302431The worst case number of steps to add, find, or remove a node is2432bounded by the number of bits differentiating chunks within2433bins. Under current bin calculations, this ranges from 6 up to 212434(for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case2435is of course much better.2436*/24372438struct malloc_tree_chunk {2439/* The first four fields must be compatible with malloc_chunk */2440size_t prev_foot;2441size_t head;2442struct malloc_tree_chunk* fd;2443struct malloc_tree_chunk* bk;24442445struct malloc_tree_chunk* child[2];2446struct malloc_tree_chunk* parent;2447bindex_t index;2448};24492450typedef struct malloc_tree_chunk tchunk;2451typedef struct malloc_tree_chunk* tchunkptr;2452typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */24532454/* A little helper macro for trees */2455#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])24562457/* ----------------------------- Segments -------------------------------- */24582459/*2460Each malloc space may include non-contiguous segments, held in a2461list headed by an embedded malloc_segment record representing the2462top-most space. Segments also include flags holding properties of2463the space. Large chunks that are directly allocated by mmap are not2464included in this list. They are instead independently created and2465destroyed without otherwise keeping track of them.24662467Segment management mainly comes into play for spaces allocated by2468MMAP. Any call to MMAP might or might not return memory that is2469adjacent to an existing segment. MORECORE normally contiguously2470extends the current space, so this space is almost always adjacent,2471which is simpler and faster to deal with. (This is why MORECORE is2472used preferentially to MMAP when both are available -- see2473sys_alloc.) When allocating using MMAP, we don't use any of the2474hinting mechanisms (inconsistently) supported in various2475implementations of unix mmap, or distinguish reserving from2476committing memory. Instead, we just ask for space, and exploit2477contiguity when we get it. It is probably possible to do2478better than this on some systems, but no general scheme seems2479to be significantly better.24802481Management entails a simpler variant of the consolidation scheme2482used for chunks to reduce fragmentation -- new adjacent memory is2483normally prepended or appended to an existing segment. However,2484there are limitations compared to chunk consolidation that mostly2485reflect the fact that segment processing is relatively infrequent2486(occurring only when getting memory from system) and that we2487don't expect to have huge numbers of segments:24882489* Segments are not indexed, so traversal requires linear scans. (It2490would be possible to index these, but is not worth the extra2491overhead and complexity for most programs on most platforms.)2492* New segments are only appended to old ones when holding top-most2493memory; if they cannot be prepended to others, they are held in2494different segments.24952496Except for the top-most segment of an mstate, each segment record2497is kept at the tail of its segment. Segments are added by pushing2498segment records onto the list headed by &mstate.seg for the2499containing mstate.25002501Segment flags control allocation/merge/deallocation policies:2502* If EXTERN_BIT set, then we did not allocate this segment,2503and so should not try to deallocate or merge with others.2504(This currently holds only for the initial segment passed2505into create_mspace_with_base.)2506* If USE_MMAP_BIT set, the segment may be merged with2507other surrounding mmapped segments and trimmed/de-allocated2508using munmap.2509* If neither bit is set, then the segment was obtained using2510MORECORE so can be merged with surrounding MORECORE'd segments2511and deallocated/trimmed using MORECORE with negative arguments.2512*/25132514struct malloc_segment {2515char* base; /* base address */2516size_t size; /* allocated size */2517struct malloc_segment* next; /* ptr to next segment */2518flag_t sflags; /* mmap and extern flag */2519};25202521#define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT)2522#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)25232524typedef struct malloc_segment msegment;2525typedef struct malloc_segment* msegmentptr;25262527/* ---------------------------- malloc_state ----------------------------- */25282529/*2530A malloc_state holds all of the bookkeeping for a space.2531The main fields are:25322533Top2534The topmost chunk of the currently active segment. Its size is2535cached in topsize. The actual size of topmost space is2536topsize+TOP_FOOT_SIZE, which includes space reserved for adding2537fenceposts and segment records if necessary when getting more2538space from the system. The size at which to autotrim top is2539cached from mparams in trim_check, except that it is disabled if2540an autotrim fails.25412542Designated victim (dv)2543This is the preferred chunk for servicing small requests that2544don't have exact fits. It is normally the chunk split off most2545recently to service another small request. Its size is cached in2546dvsize. The link fields of this chunk are not maintained since it2547is not kept in a bin.25482549SmallBins2550An array of bin headers for free chunks. These bins hold chunks2551with sizes less than MIN_LARGE_SIZE bytes. Each bin contains2552chunks of all the same size, spaced 8 bytes apart. To simplify2553use in double-linked lists, each bin header acts as a malloc_chunk2554pointing to the real first node, if it exists (else pointing to2555itself). This avoids special-casing for headers. But to avoid2556waste, we allocate only the fd/bk pointers of bins, and then use2557repositioning tricks to treat these as the fields of a chunk.25582559TreeBins2560Treebins are pointers to the roots of trees holding a range of2561sizes. There are 2 equally spaced treebins for each power of two2562from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything2563larger.25642565Bin maps2566There is one bit map for small bins ("smallmap") and one for2567treebins ("treemap). Each bin sets its bit when non-empty, and2568clears the bit when empty. Bit operations are then used to avoid2569bin-by-bin searching -- nearly all "search" is done without ever2570looking at bins that won't be selected. The bit maps2571conservatively use 32 bits per map word, even if on 64bit system.2572For a good description of some of the bit-based techniques used2573here, see Henry S. Warren Jr's book "Hacker's Delight" (and2574supplement at http://hackersdelight.org/). Many of these are2575intended to reduce the branchiness of paths through malloc etc, as2576well as to reduce the number of memory locations read or written.25772578Segments2579A list of segments headed by an embedded malloc_segment record2580representing the initial space.25812582Address check support2583The least_addr field is the least address ever obtained from2584MORECORE or MMAP. Attempted frees and reallocs of any address less2585than this are trapped (unless INSECURE is defined).25862587Magic tag2588A cross-check field that should always hold same value as mparams.magic.25892590Max allowed footprint2591The maximum allowed bytes to allocate from system (zero means no limit)25922593Flags2594Bits recording whether to use MMAP, locks, or contiguous MORECORE25952596Statistics2597Each space keeps track of current and maximum system memory2598obtained via MORECORE or MMAP.25992600Trim support2601Fields holding the amount of unused topmost memory that should trigger2602trimming, and a counter to force periodic scanning to release unused2603non-topmost segments.26042605Locking2606If USE_LOCKS is defined, the "mutex" lock is acquired and released2607around every public call using this mspace.26082609Extension support2610A void* pointer and a size_t field that can be used to help implement2611extensions to this malloc.2612*/26132614/* Bin types, widths and sizes */2615#define NSMALLBINS (32U)2616#define NTREEBINS (32U)2617#define SMALLBIN_SHIFT (3U)2618#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)2619#define TREEBIN_SHIFT (8U)2620#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)2621#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)2622#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)26232624struct malloc_state {2625binmap_t smallmap;2626binmap_t treemap;2627size_t dvsize;2628size_t topsize;2629char* least_addr;2630mchunkptr dv;2631mchunkptr top;2632size_t trim_check;2633size_t release_checks;2634size_t magic;2635mchunkptr smallbins[(NSMALLBINS+1)*2];2636tbinptr treebins[NTREEBINS];2637size_t footprint;2638size_t max_footprint;2639size_t footprint_limit; /* zero means no limit */2640flag_t mflags;2641#if USE_LOCKS2642MLOCK_T mutex; /* locate lock among fields that rarely change */2643#endif /* USE_LOCKS */2644msegment seg;2645void* extp; /* Unused but available for extensions */2646size_t exts;2647};26482649typedef struct malloc_state* mstate;26502651/* ------------- Global malloc_state and malloc_params ------------------- */26522653/*2654malloc_params holds global properties, including those that can be2655dynamically set using mallopt. There is a single instance, mparams,2656initialized in init_mparams. Note that the non-zeroness of "magic"2657also serves as an initialization flag.2658*/26592660struct malloc_params {2661size_t magic;2662size_t page_size;2663size_t granularity;2664size_t mmap_threshold;2665size_t trim_threshold;2666flag_t default_mflags;2667};26682669static struct malloc_params mparams;26702671/* Ensure mparams initialized */2672#define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())26732674#if !ONLY_MSPACES26752676/* The global malloc_state used for all non-"mspace" calls */2677static struct malloc_state _gm_;2678#define gm (&_gm_)2679#define is_global(M) ((M) == &_gm_)26802681#endif /* !ONLY_MSPACES */26822683#define is_initialized(M) ((M)->top != 0)26842685/* -------------------------- system alloc setup ------------------------- */26862687/* Operations on mflags */26882689#define use_lock(M) ((M)->mflags & USE_LOCK_BIT)2690#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)2691#if USE_LOCKS2692#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)2693#else2694#define disable_lock(M)2695#endif26962697#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)2698#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)2699#if HAVE_MMAP2700#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)2701#else2702#define disable_mmap(M)2703#endif27042705#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)2706#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)27072708#define set_lock(M,L)\2709((M)->mflags = (L)?\2710((M)->mflags | USE_LOCK_BIT) :\2711((M)->mflags & ~USE_LOCK_BIT))27122713/* page-align a size */2714#define page_align(S)\2715(((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))27162717/* granularity-align a size */2718#define granularity_align(S)\2719(((S) + (mparams.granularity - SIZE_T_ONE))\2720& ~(mparams.granularity - SIZE_T_ONE))272127222723/* For mmap, use granularity alignment on windows, else page-align */2724#ifdef WIN322725#define mmap_align(S) granularity_align(S)2726#else2727#define mmap_align(S) page_align(S)2728#endif27292730/* For sys_alloc, enough padding to ensure can malloc request on success */2731#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)27322733#define is_page_aligned(S)\2734(((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)2735#define is_granularity_aligned(S)\2736(((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)27372738/* True if segment S holds address A */2739#define segment_holds(S, A)\2740((char*)(A) >= S->base && (char*)(A) < S->base + S->size)27412742/* Return segment holding given address */2743static msegmentptr segment_holding(mstate m, char* addr) {2744msegmentptr sp = &m->seg;2745for (;;) {2746if (addr >= sp->base && addr < sp->base + sp->size)2747return sp;2748if ((sp = sp->next) == 0)2749return 0;2750}2751}27522753/* Return true if segment contains a segment link */2754static int has_segment_link(mstate m, msegmentptr ss) {2755msegmentptr sp = &m->seg;2756for (;;) {2757if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)2758return 1;2759if ((sp = sp->next) == 0)2760return 0;2761}2762}27632764#ifndef MORECORE_CANNOT_TRIM2765#define should_trim(M,s) ((s) > (M)->trim_check)2766#else /* MORECORE_CANNOT_TRIM */2767#define should_trim(M,s) (0)2768#endif /* MORECORE_CANNOT_TRIM */27692770/*2771TOP_FOOT_SIZE is padding at the end of a segment, including space2772that may be needed to place segment records and fenceposts when new2773noncontiguous segments are added.2774*/2775#define TOP_FOOT_SIZE\2776(align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)277727782779/* ------------------------------- Hooks -------------------------------- */27802781/*2782PREACTION should be defined to return 0 on success, and nonzero on2783failure. If you are not using locking, you can redefine these to do2784anything you like.2785*/27862787#if USE_LOCKS2788#define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)2789#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }2790#else /* USE_LOCKS */27912792#ifndef PREACTION2793#define PREACTION(M) (0)2794#endif /* PREACTION */27952796#ifndef POSTACTION2797#define POSTACTION(M)2798#endif /* POSTACTION */27992800#endif /* USE_LOCKS */28012802/*2803CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.2804USAGE_ERROR_ACTION is triggered on detected bad frees and2805reallocs. The argument p is an address that might have triggered the2806fault. It is ignored by the two predefined actions, but might be2807useful in custom actions that try to help diagnose errors.2808*/28092810#if PROCEED_ON_ERROR28112812/* A count of the number of corruption errors causing resets */2813int malloc_corruption_error_count;28142815/* default corruption action */2816static void reset_on_error(mstate m);28172818#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)2819#define USAGE_ERROR_ACTION(m, p)28202821#else /* PROCEED_ON_ERROR */28222823#ifndef CORRUPTION_ERROR_ACTION2824#define CORRUPTION_ERROR_ACTION(m) ABORT2825#endif /* CORRUPTION_ERROR_ACTION */28262827#ifndef USAGE_ERROR_ACTION2828#define USAGE_ERROR_ACTION(m,p) ABORT2829#endif /* USAGE_ERROR_ACTION */28302831#endif /* PROCEED_ON_ERROR */283228332834/* -------------------------- Debugging setup ---------------------------- */28352836#if ! DEBUG28372838#define check_free_chunk(M,P)2839#define check_inuse_chunk(M,P)2840#define check_malloced_chunk(M,P,N)2841#define check_mmapped_chunk(M,P)2842#define check_malloc_state(M)2843#define check_top_chunk(M,P)28442845#else /* DEBUG */2846#define check_free_chunk(M,P) do_check_free_chunk(M,P)2847#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)2848#define check_top_chunk(M,P) do_check_top_chunk(M,P)2849#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)2850#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)2851#define check_malloc_state(M) do_check_malloc_state(M)28522853static void do_check_any_chunk(mstate m, mchunkptr p);2854static void do_check_top_chunk(mstate m, mchunkptr p);2855static void do_check_mmapped_chunk(mstate m, mchunkptr p);2856static void do_check_inuse_chunk(mstate m, mchunkptr p);2857static void do_check_free_chunk(mstate m, mchunkptr p);2858static void do_check_malloced_chunk(mstate m, void* mem, size_t s);2859static void do_check_tree(mstate m, tchunkptr t);2860static void do_check_treebin(mstate m, bindex_t i);2861static void do_check_smallbin(mstate m, bindex_t i);2862static void do_check_malloc_state(mstate m);2863static int bin_find(mstate m, mchunkptr x);2864static size_t traverse_and_check(mstate m);2865#endif /* DEBUG */28662867/* ---------------------------- Indexing Bins ---------------------------- */28682869#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)2870#define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT)2871#define small_index2size(i) ((i) << SMALLBIN_SHIFT)2872#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))28732874/* addressing by index. See above about smallbin repositioning */2875#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))2876#define treebin_at(M,i) (&((M)->treebins[i]))28772878/* assign tree index for size S to variable I. Use x86 asm if possible */2879#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))2880#define compute_tree_index(S, I)\2881{\2882unsigned int X = S >> TREEBIN_SHIFT;\2883if (X == 0)\2884I = 0;\2885else if (X > 0xFFFF)\2886I = NTREEBINS-1;\2887else {\2888unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \2889I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\2890}\2891}28922893#elif defined (__INTEL_COMPILER)2894#define compute_tree_index(S, I)\2895{\2896size_t X = S >> TREEBIN_SHIFT;\2897if (X == 0)\2898I = 0;\2899else if (X > 0xFFFF)\2900I = NTREEBINS-1;\2901else {\2902unsigned int K = _bit_scan_reverse (X); \2903I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\2904}\2905}29062907#elif defined(_MSC_VER) && _MSC_VER>=13002908#define compute_tree_index(S, I)\2909{\2910size_t X = S >> TREEBIN_SHIFT;\2911if (X == 0)\2912I = 0;\2913else if (X > 0xFFFF)\2914I = NTREEBINS-1;\2915else {\2916unsigned int K;\2917_BitScanReverse((DWORD *) &K, (DWORD) X);\2918I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\2919}\2920}29212922#else /* GNUC */2923#define compute_tree_index(S, I)\2924{\2925size_t X = S >> TREEBIN_SHIFT;\2926if (X == 0)\2927I = 0;\2928else if (X > 0xFFFF)\2929I = NTREEBINS-1;\2930else {\2931unsigned int Y = (unsigned int)X;\2932unsigned int N = ((Y - 0x100) >> 16) & 8;\2933unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\2934N += K;\2935N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\2936K = 14 - N + ((Y <<= K) >> 15);\2937I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\2938}\2939}2940#endif /* GNUC */29412942/* Bit representing maximum resolved size in a treebin at i */2943#define bit_for_tree_index(i) \2944(i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)29452946/* Shift placing maximum resolved bit in a treebin at i as sign bit */2947#define leftshift_for_tree_index(i) \2948((i == NTREEBINS-1)? 0 : \2949((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))29502951/* The size of the smallest chunk held in bin with index i */2952#define minsize_for_tree_index(i) \2953((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \2954(((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))295529562957/* ------------------------ Operations on bin maps ----------------------- */29582959/* bit corresponding to given index */2960#define idx2bit(i) ((binmap_t)(1) << (i))29612962/* Mark/Clear bits with given index */2963#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))2964#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))2965#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))29662967#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))2968#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))2969#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))29702971/* isolate the least set bit of a bitmap */2972#define least_bit(x) ((x) & -(x))29732974/* mask with all bits to left of least bit of x on */2975#define left_bits(x) ((x<<1) | -(x<<1))29762977/* mask with all bits to left of or equal to least bit of x on */2978#define same_or_left_bits(x) ((x) | -(x))29792980/* index corresponding to given bit. Use x86 asm if possible */29812982#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))2983#define compute_bit2idx(X, I)\2984{\2985unsigned int J;\2986J = __builtin_ctz(X); \2987I = (bindex_t)J;\2988}29892990#elif defined (__INTEL_COMPILER)2991#define compute_bit2idx(X, I)\2992{\2993unsigned int J;\2994J = _bit_scan_forward (X); \2995I = (bindex_t)J;\2996}29972998#elif defined(_MSC_VER) && _MSC_VER>=13002999#define compute_bit2idx(X, I)\3000{\3001unsigned int J;\3002_BitScanForward((DWORD *) &J, X);\3003I = (bindex_t)J;\3004}30053006#elif USE_BUILTIN_FFS3007#define compute_bit2idx(X, I) I = ffs(X)-130083009#else3010#define compute_bit2idx(X, I)\3011{\3012unsigned int Y = X - 1;\3013unsigned int K = Y >> (16-4) & 16;\3014unsigned int N = K; Y >>= K;\3015N += K = Y >> (8-3) & 8; Y >>= K;\3016N += K = Y >> (4-2) & 4; Y >>= K;\3017N += K = Y >> (2-1) & 2; Y >>= K;\3018N += K = Y >> (1-0) & 1; Y >>= K;\3019I = (bindex_t)(N + Y);\3020}3021#endif /* GNUC */302230233024/* ----------------------- Runtime Check Support ------------------------- */30253026/*3027For security, the main invariant is that malloc/free/etc never3028writes to a static address other than malloc_state, unless static3029malloc_state itself has been corrupted, which cannot occur via3030malloc (because of these checks). In essence this means that we3031believe all pointers, sizes, maps etc held in malloc_state, but3032check all of those linked or offsetted from other embedded data3033structures. These checks are interspersed with main code in a way3034that tends to minimize their run-time cost.30353036When FOOTERS is defined, in addition to range checking, we also3037verify footer fields of inuse chunks, which can be used guarantee3038that the mstate controlling malloc/free is intact. This is a3039streamlined version of the approach described by William Robertson3040et al in "Run-time Detection of Heap-based Overflows" LISA'033041http://www.usenix.org/events/lisa03/tech/robertson.html The footer3042of an inuse chunk holds the xor of its mstate and a random seed,3043that is checked upon calls to free() and realloc(). This is3044(probabalistically) unguessable from outside the program, but can be3045computed by any code successfully malloc'ing any chunk, so does not3046itself provide protection against code that has already broken3047security through some other means. Unlike Robertson et al, we3048always dynamically check addresses of all offset chunks (previous,3049next, etc). This turns out to be cheaper than relying on hashes.3050*/30513052#if !INSECURE3053/* Check if address a is at least as high as any from MORECORE or MMAP */3054#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)3055/* Check if address of next chunk n is higher than base chunk p */3056#define ok_next(p, n) ((char*)(p) < (char*)(n))3057/* Check if p has inuse status */3058#define ok_inuse(p) is_inuse(p)3059/* Check if p has its pinuse bit on */3060#define ok_pinuse(p) pinuse(p)30613062#else /* !INSECURE */3063#define ok_address(M, a) (1)3064#define ok_next(b, n) (1)3065#define ok_inuse(p) (1)3066#define ok_pinuse(p) (1)3067#endif /* !INSECURE */30683069#if (FOOTERS && !INSECURE)3070/* Check if (alleged) mstate m has expected magic field */3071#define ok_magic(M) ((M)->magic == mparams.magic)3072#else /* (FOOTERS && !INSECURE) */3073#define ok_magic(M) (1)3074#endif /* (FOOTERS && !INSECURE) */30753076/* In gcc, use __builtin_expect to minimize impact of checks */3077#if !INSECURE3078#if defined(__GNUC__) && __GNUC__ >= 33079#define RTCHECK(e) __builtin_expect(e, 1)3080#else /* GNUC */3081#define RTCHECK(e) (e)3082#endif /* GNUC */3083#else /* !INSECURE */3084#define RTCHECK(e) (1)3085#endif /* !INSECURE */30863087/* macros to set up inuse chunks with or without footers */30883089#if !FOOTERS30903091#define mark_inuse_foot(M,p,s)30923093/* Macros for setting head/foot of non-mmapped chunks */30943095/* Set cinuse bit and pinuse bit of next chunk */3096#define set_inuse(M,p,s)\3097((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\3098((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)30993100/* Set cinuse and pinuse of this chunk and pinuse of next chunk */3101#define set_inuse_and_pinuse(M,p,s)\3102((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\3103((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)31043105/* Set size, cinuse and pinuse bit of this chunk */3106#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\3107((p)->head = (s|PINUSE_BIT|CINUSE_BIT))31083109#else /* FOOTERS */31103111/* Set foot of inuse chunk to be xor of mstate and seed */3112#define mark_inuse_foot(M,p,s)\3113(((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))31143115#define get_mstate_for(p)\3116((mstate)(((mchunkptr)((char*)(p) +\3117(chunksize(p))))->prev_foot ^ mparams.magic))31183119#define set_inuse(M,p,s)\3120((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\3121(((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \3122mark_inuse_foot(M,p,s))31233124#define set_inuse_and_pinuse(M,p,s)\3125((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\3126(((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\3127mark_inuse_foot(M,p,s))31283129#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\3130((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\3131mark_inuse_foot(M, p, s))31323133#endif /* !FOOTERS */31343135/* ---------------------------- setting mparams -------------------------- */31363137#if LOCK_AT_FORK3138static void pre_fork(void) { ACQUIRE_LOCK(&(gm)->mutex); }3139static void post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); }3140static void post_fork_child(void) { INITIAL_LOCK(&(gm)->mutex); }3141#endif /* LOCK_AT_FORK */31423143/* Initialize mparams */3144static int init_mparams(void) {3145#ifdef NEED_GLOBAL_LOCK_INIT3146if (malloc_global_mutex_status <= 0)3147init_malloc_global_mutex();3148#endif31493150ACQUIRE_MALLOC_GLOBAL_LOCK();3151if (mparams.magic == 0) {3152size_t magic;3153size_t psize;3154size_t gsize;31553156#ifndef WIN323157psize = malloc_getpagesize;3158gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize);3159#else /* WIN32 */3160{3161SYSTEM_INFO system_info;3162GetSystemInfo(&system_info);3163psize = system_info.dwPageSize;3164gsize = ((DEFAULT_GRANULARITY != 0)?3165DEFAULT_GRANULARITY : system_info.dwAllocationGranularity);3166}3167#endif /* WIN32 */31683169/* Sanity-check configuration:3170size_t must be unsigned and as wide as pointer type.3171ints must be at least 4 bytes.3172alignment must be at least 8.3173Alignment, min chunk size, and page size must all be powers of 2.3174*/3175if ((sizeof(size_t) != sizeof(char*)) ||3176(MAX_SIZE_T < MIN_CHUNK_SIZE) ||3177(sizeof(int) < 4) ||3178(MALLOC_ALIGNMENT < (size_t)8U) ||3179((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) ||3180((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) ||3181((gsize & (gsize-SIZE_T_ONE)) != 0) ||3182((psize & (psize-SIZE_T_ONE)) != 0))3183ABORT;3184mparams.granularity = gsize;3185mparams.page_size = psize;3186mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;3187mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;3188#if MORECORE_CONTIGUOUS3189mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;3190#else /* MORECORE_CONTIGUOUS */3191mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;3192#endif /* MORECORE_CONTIGUOUS */31933194#if !ONLY_MSPACES3195/* Set up lock for main malloc area */3196gm->mflags = mparams.default_mflags;3197(void)INITIAL_LOCK(&gm->mutex);3198#endif3199#if LOCK_AT_FORK3200pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child);3201#endif32023203{3204#if USE_DEV_RANDOM3205int fd;3206unsigned char buf[sizeof(size_t)];3207/* Try to use /dev/urandom, else fall back on using time */3208if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&3209read(fd, buf, sizeof(buf)) == sizeof(buf)) {3210magic = *((size_t *) buf);3211close(fd);3212}3213else3214#endif /* USE_DEV_RANDOM */3215#ifdef WIN323216magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U);3217#elif defined(LACKS_TIME_H)3218magic = (size_t)&magic ^ (size_t)0x55555555U;3219#else3220magic = (size_t)(time(0) ^ (size_t)0x55555555U);3221#endif3222magic |= (size_t)8U; /* ensure nonzero */3223magic &= ~(size_t)7U; /* improve chances of fault for bad values */3224/* Until memory modes commonly available, use volatile-write */3225(*(volatile size_t *)(&(mparams.magic))) = magic;3226}3227}32283229RELEASE_MALLOC_GLOBAL_LOCK();3230return 1;3231}32323233/* support for mallopt */3234static int change_mparam(int param_number, int value) {3235size_t val;3236ensure_initialization();3237val = (value == -1)? MAX_SIZE_T : (size_t)value;3238switch(param_number) {3239case M_TRIM_THRESHOLD:3240mparams.trim_threshold = val;3241return 1;3242case M_GRANULARITY:3243if (val >= mparams.page_size && ((val & (val-1)) == 0)) {3244mparams.granularity = val;3245return 1;3246}3247else3248return 0;3249case M_MMAP_THRESHOLD:3250mparams.mmap_threshold = val;3251return 1;3252default:3253return 0;3254}3255}32563257#if DEBUG3258/* ------------------------- Debugging Support --------------------------- */32593260/* Check properties of any chunk, whether free, inuse, mmapped etc */3261static void do_check_any_chunk(mstate m, mchunkptr p) {3262assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));3263assert(ok_address(m, p));3264}32653266/* Check properties of top chunk */3267static void do_check_top_chunk(mstate m, mchunkptr p) {3268msegmentptr sp = segment_holding(m, (char*)p);3269size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */3270assert(sp != 0);3271assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));3272assert(ok_address(m, p));3273assert(sz == m->topsize);3274assert(sz > 0);3275assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);3276assert(pinuse(p));3277assert(!pinuse(chunk_plus_offset(p, sz)));3278}32793280/* Check properties of (inuse) mmapped chunks */3281static void do_check_mmapped_chunk(mstate m, mchunkptr p) {3282size_t sz = chunksize(p);3283size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD);3284assert(is_mmapped(p));3285assert(use_mmap(m));3286assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));3287assert(ok_address(m, p));3288assert(!is_small(sz));3289assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);3290assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);3291assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);3292}32933294/* Check properties of inuse chunks */3295static void do_check_inuse_chunk(mstate m, mchunkptr p) {3296do_check_any_chunk(m, p);3297assert(is_inuse(p));3298assert(next_pinuse(p));3299/* If not pinuse and not mmapped, previous chunk has OK offset */3300assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);3301if (is_mmapped(p))3302do_check_mmapped_chunk(m, p);3303}33043305/* Check properties of free chunks */3306static void do_check_free_chunk(mstate m, mchunkptr p) {3307size_t sz = chunksize(p);3308mchunkptr next = chunk_plus_offset(p, sz);3309do_check_any_chunk(m, p);3310assert(!is_inuse(p));3311assert(!next_pinuse(p));3312assert (!is_mmapped(p));3313if (p != m->dv && p != m->top) {3314if (sz >= MIN_CHUNK_SIZE) {3315assert((sz & CHUNK_ALIGN_MASK) == 0);3316assert(is_aligned(chunk2mem(p)));3317assert(next->prev_foot == sz);3318assert(pinuse(p));3319assert (next == m->top || is_inuse(next));3320assert(p->fd->bk == p);3321assert(p->bk->fd == p);3322}3323else /* markers are always of size SIZE_T_SIZE */3324assert(sz == SIZE_T_SIZE);3325}3326}33273328/* Check properties of malloced chunks at the point they are malloced */3329static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {3330if (mem != 0) {3331mchunkptr p = mem2chunk(mem);3332size_t sz = p->head & ~INUSE_BITS;3333do_check_inuse_chunk(m, p);3334assert((sz & CHUNK_ALIGN_MASK) == 0);3335assert(sz >= MIN_CHUNK_SIZE);3336assert(sz >= s);3337/* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */3338assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));3339}3340}33413342/* Check a tree and its subtrees. */3343static void do_check_tree(mstate m, tchunkptr t) {3344tchunkptr head = 0;3345tchunkptr u = t;3346bindex_t tindex = t->index;3347size_t tsize = chunksize(t);3348bindex_t idx;3349compute_tree_index(tsize, idx);3350assert(tindex == idx);3351assert(tsize >= MIN_LARGE_SIZE);3352assert(tsize >= minsize_for_tree_index(idx));3353assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));33543355do { /* traverse through chain of same-sized nodes */3356do_check_any_chunk(m, ((mchunkptr)u));3357assert(u->index == tindex);3358assert(chunksize(u) == tsize);3359assert(!is_inuse(u));3360assert(!next_pinuse(u));3361assert(u->fd->bk == u);3362assert(u->bk->fd == u);3363if (u->parent == 0) {3364assert(u->child[0] == 0);3365assert(u->child[1] == 0);3366}3367else {3368assert(head == 0); /* only one node on chain has parent */3369head = u;3370assert(u->parent != u);3371assert (u->parent->child[0] == u ||3372u->parent->child[1] == u ||3373*((tbinptr*)(u->parent)) == u);3374if (u->child[0] != 0) {3375assert(u->child[0]->parent == u);3376assert(u->child[0] != u);3377do_check_tree(m, u->child[0]);3378}3379if (u->child[1] != 0) {3380assert(u->child[1]->parent == u);3381assert(u->child[1] != u);3382do_check_tree(m, u->child[1]);3383}3384if (u->child[0] != 0 && u->child[1] != 0) {3385assert(chunksize(u->child[0]) < chunksize(u->child[1]));3386}3387}3388u = u->fd;3389} while (u != t);3390assert(head != 0);3391}33923393/* Check all the chunks in a treebin. */3394static void do_check_treebin(mstate m, bindex_t i) {3395tbinptr* tb = treebin_at(m, i);3396tchunkptr t = *tb;3397int empty = (m->treemap & (1U << i)) == 0;3398if (t == 0)3399assert(empty);3400if (!empty)3401do_check_tree(m, t);3402}34033404/* Check all the chunks in a smallbin. */3405static void do_check_smallbin(mstate m, bindex_t i) {3406sbinptr b = smallbin_at(m, i);3407mchunkptr p = b->bk;3408unsigned int empty = (m->smallmap & (1U << i)) == 0;3409if (p == b)3410assert(empty);3411if (!empty) {3412for (; p != b; p = p->bk) {3413size_t size = chunksize(p);3414mchunkptr q;3415/* each chunk claims to be free */3416do_check_free_chunk(m, p);3417/* chunk belongs in bin */3418assert(small_index(size) == i);3419assert(p->bk == b || chunksize(p->bk) == chunksize(p));3420/* chunk is followed by an inuse chunk */3421q = next_chunk(p);3422if (q->head != FENCEPOST_HEAD)3423do_check_inuse_chunk(m, q);3424}3425}3426}34273428/* Find x in a bin. Used in other check functions. */3429static int bin_find(mstate m, mchunkptr x) {3430size_t size = chunksize(x);3431if (is_small(size)) {3432bindex_t sidx = small_index(size);3433sbinptr b = smallbin_at(m, sidx);3434if (smallmap_is_marked(m, sidx)) {3435mchunkptr p = b;3436do {3437if (p == x)3438return 1;3439} while ((p = p->fd) != b);3440}3441}3442else {3443bindex_t tidx;3444compute_tree_index(size, tidx);3445if (treemap_is_marked(m, tidx)) {3446tchunkptr t = *treebin_at(m, tidx);3447size_t sizebits = size << leftshift_for_tree_index(tidx);3448while (t != 0 && chunksize(t) != size) {3449t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];3450sizebits <<= 1;3451}3452if (t != 0) {3453tchunkptr u = t;3454do {3455if (u == (tchunkptr)x)3456return 1;3457} while ((u = u->fd) != t);3458}3459}3460}3461return 0;3462}34633464/* Traverse each chunk and check it; return total */3465static size_t traverse_and_check(mstate m) {3466size_t sum = 0;3467if (is_initialized(m)) {3468msegmentptr s = &m->seg;3469sum += m->topsize + TOP_FOOT_SIZE;3470while (s != 0) {3471mchunkptr q = align_as_chunk(s->base);3472mchunkptr lastq = 0;3473assert(pinuse(q));3474while (segment_holds(s, q) &&3475q != m->top && q->head != FENCEPOST_HEAD) {3476sum += chunksize(q);3477if (is_inuse(q)) {3478assert(!bin_find(m, q));3479do_check_inuse_chunk(m, q);3480}3481else {3482assert(q == m->dv || bin_find(m, q));3483assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */3484do_check_free_chunk(m, q);3485}3486lastq = q;3487q = next_chunk(q);3488}3489s = s->next;3490}3491}3492return sum;3493}349434953496/* Check all properties of malloc_state. */3497static void do_check_malloc_state(mstate m) {3498bindex_t i;3499size_t total;3500/* check bins */3501for (i = 0; i < NSMALLBINS; ++i)3502do_check_smallbin(m, i);3503for (i = 0; i < NTREEBINS; ++i)3504do_check_treebin(m, i);35053506if (m->dvsize != 0) { /* check dv chunk */3507do_check_any_chunk(m, m->dv);3508assert(m->dvsize == chunksize(m->dv));3509assert(m->dvsize >= MIN_CHUNK_SIZE);3510assert(bin_find(m, m->dv) == 0);3511}35123513if (m->top != 0) { /* check top chunk */3514do_check_top_chunk(m, m->top);3515/*assert(m->topsize == chunksize(m->top)); redundant */3516assert(m->topsize > 0);3517assert(bin_find(m, m->top) == 0);3518}35193520total = traverse_and_check(m);3521assert(total <= m->footprint);3522assert(m->footprint <= m->max_footprint);3523}3524#endif /* DEBUG */35253526/* ----------------------------- statistics ------------------------------ */35273528#if !NO_MALLINFO3529static struct mallinfo internal_mallinfo(mstate m) {3530struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };3531ensure_initialization();3532if (!PREACTION(m)) {3533check_malloc_state(m);3534if (is_initialized(m)) {3535size_t nfree = SIZE_T_ONE; /* top always free */3536size_t mfree = m->topsize + TOP_FOOT_SIZE;3537size_t sum = mfree;3538msegmentptr s = &m->seg;3539while (s != 0) {3540mchunkptr q = align_as_chunk(s->base);3541while (segment_holds(s, q) &&3542q != m->top && q->head != FENCEPOST_HEAD) {3543size_t sz = chunksize(q);3544sum += sz;3545if (!is_inuse(q)) {3546mfree += sz;3547++nfree;3548}3549q = next_chunk(q);3550}3551s = s->next;3552}35533554nm.arena = sum;3555nm.ordblks = nfree;3556nm.hblkhd = m->footprint - sum;3557nm.usmblks = m->max_footprint;3558nm.uordblks = m->footprint - mfree;3559nm.fordblks = mfree;3560nm.keepcost = m->topsize;3561}35623563POSTACTION(m);3564}3565return nm;3566}3567#endif /* !NO_MALLINFO */35683569#if !NO_MALLOC_STATS3570static void internal_malloc_stats(mstate m) {3571ensure_initialization();3572if (!PREACTION(m)) {3573size_t maxfp = 0;3574size_t fp = 0;3575size_t used = 0;3576check_malloc_state(m);3577if (is_initialized(m)) {3578msegmentptr s = &m->seg;3579maxfp = m->max_footprint;3580fp = m->footprint;3581used = fp - (m->topsize + TOP_FOOT_SIZE);35823583while (s != 0) {3584mchunkptr q = align_as_chunk(s->base);3585while (segment_holds(s, q) &&3586q != m->top && q->head != FENCEPOST_HEAD) {3587if (!is_inuse(q))3588used -= chunksize(q);3589q = next_chunk(q);3590}3591s = s->next;3592}3593}3594POSTACTION(m); /* drop lock */3595fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));3596fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp));3597fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used));3598}3599}3600#endif /* NO_MALLOC_STATS */36013602/* ----------------------- Operations on smallbins ----------------------- */36033604/*3605Various forms of linking and unlinking are defined as macros. Even3606the ones for trees, which are very long but have very short typical3607paths. This is ugly but reduces reliance on inlining support of3608compilers.3609*/36103611/* Link a free chunk into a smallbin */3612#define insert_small_chunk(M, P, S) {\3613bindex_t I = small_index(S);\3614mchunkptr B = smallbin_at(M, I);\3615mchunkptr F = B;\3616assert(S >= MIN_CHUNK_SIZE);\3617if (!smallmap_is_marked(M, I))\3618mark_smallmap(M, I);\3619else if (RTCHECK(ok_address(M, B->fd)))\3620F = B->fd;\3621else {\3622CORRUPTION_ERROR_ACTION(M);\3623}\3624B->fd = P;\3625F->bk = P;\3626P->fd = F;\3627P->bk = B;\3628}36293630/* Unlink a chunk from a smallbin */3631#define unlink_small_chunk(M, P, S) {\3632mchunkptr F = P->fd;\3633mchunkptr B = P->bk;\3634bindex_t I = small_index(S);\3635assert(P != B);\3636assert(P != F);\3637assert(chunksize(P) == small_index2size(I));\3638if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \3639if (B == F) {\3640clear_smallmap(M, I);\3641}\3642else if (RTCHECK(B == smallbin_at(M,I) ||\3643(ok_address(M, B) && B->fd == P))) {\3644F->bk = B;\3645B->fd = F;\3646}\3647else {\3648CORRUPTION_ERROR_ACTION(M);\3649}\3650}\3651else {\3652CORRUPTION_ERROR_ACTION(M);\3653}\3654}36553656/* Unlink the first chunk from a smallbin */3657#define unlink_first_small_chunk(M, B, P, I) {\3658mchunkptr F = P->fd;\3659assert(P != B);\3660assert(P != F);\3661assert(chunksize(P) == small_index2size(I));\3662if (B == F) {\3663clear_smallmap(M, I);\3664}\3665else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\3666F->bk = B;\3667B->fd = F;\3668}\3669else {\3670CORRUPTION_ERROR_ACTION(M);\3671}\3672}36733674/* Replace dv node, binning the old one */3675/* Used only when dvsize known to be small */3676#define replace_dv(M, P, S) {\3677size_t DVS = M->dvsize;\3678assert(is_small(DVS));\3679if (DVS != 0) {\3680mchunkptr DV = M->dv;\3681insert_small_chunk(M, DV, DVS);\3682}\3683M->dvsize = S;\3684M->dv = P;\3685}36863687/* ------------------------- Operations on trees ------------------------- */36883689/* Insert chunk into tree */3690#define insert_large_chunk(M, X, S) {\3691tbinptr* H;\3692bindex_t I;\3693compute_tree_index(S, I);\3694H = treebin_at(M, I);\3695X->index = I;\3696X->child[0] = X->child[1] = 0;\3697if (!treemap_is_marked(M, I)) {\3698mark_treemap(M, I);\3699*H = X;\3700X->parent = (tchunkptr)H;\3701X->fd = X->bk = X;\3702}\3703else {\3704tchunkptr T = *H;\3705size_t K = S << leftshift_for_tree_index(I);\3706for (;;) {\3707if (chunksize(T) != S) {\3708tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\3709K <<= 1;\3710if (*C != 0)\3711T = *C;\3712else if (RTCHECK(ok_address(M, C))) {\3713*C = X;\3714X->parent = T;\3715X->fd = X->bk = X;\3716break;\3717}\3718else {\3719CORRUPTION_ERROR_ACTION(M);\3720break;\3721}\3722}\3723else {\3724tchunkptr F = T->fd;\3725if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\3726T->fd = F->bk = X;\3727X->fd = F;\3728X->bk = T;\3729X->parent = 0;\3730break;\3731}\3732else {\3733CORRUPTION_ERROR_ACTION(M);\3734break;\3735}\3736}\3737}\3738}\3739}37403741/*3742Unlink steps:374337441. If x is a chained node, unlink it from its same-sized fd/bk links3745and choose its bk node as its replacement.37462. If x was the last node of its size, but not a leaf node, it must3747be replaced with a leaf node (not merely one with an open left or3748right), to make sure that lefts and rights of descendents3749correspond properly to bit masks. We use the rightmost descendent3750of x. We could use any other leaf, but this is easy to locate and3751tends to counteract removal of leftmosts elsewhere, and so keeps3752paths shorter than minimally guaranteed. This doesn't loop much3753because on average a node in a tree is near the bottom.37543. If x is the base of a chain (i.e., has parent links) relink3755x's parent and children to x's replacement (or null if none).3756*/37573758#define unlink_large_chunk(M, X) {\3759tchunkptr XP = X->parent;\3760tchunkptr R;\3761if (X->bk != X) {\3762tchunkptr F = X->fd;\3763R = X->bk;\3764if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\3765F->bk = R;\3766R->fd = F;\3767}\3768else {\3769CORRUPTION_ERROR_ACTION(M);\3770}\3771}\3772else {\3773tchunkptr* RP;\3774if (((R = *(RP = &(X->child[1]))) != 0) ||\3775((R = *(RP = &(X->child[0]))) != 0)) {\3776tchunkptr* CP;\3777while ((*(CP = &(R->child[1])) != 0) ||\3778(*(CP = &(R->child[0])) != 0)) {\3779R = *(RP = CP);\3780}\3781if (RTCHECK(ok_address(M, RP)))\3782*RP = 0;\3783else {\3784CORRUPTION_ERROR_ACTION(M);\3785}\3786}\3787}\3788if (XP != 0) {\3789tbinptr* H = treebin_at(M, X->index);\3790if (X == *H) {\3791if ((*H = R) == 0) \3792clear_treemap(M, X->index);\3793}\3794else if (RTCHECK(ok_address(M, XP))) {\3795if (XP->child[0] == X) \3796XP->child[0] = R;\3797else \3798XP->child[1] = R;\3799}\3800else\3801CORRUPTION_ERROR_ACTION(M);\3802if (R != 0) {\3803if (RTCHECK(ok_address(M, R))) {\3804tchunkptr C0, C1;\3805R->parent = XP;\3806if ((C0 = X->child[0]) != 0) {\3807if (RTCHECK(ok_address(M, C0))) {\3808R->child[0] = C0;\3809C0->parent = R;\3810}\3811else\3812CORRUPTION_ERROR_ACTION(M);\3813}\3814if ((C1 = X->child[1]) != 0) {\3815if (RTCHECK(ok_address(M, C1))) {\3816R->child[1] = C1;\3817C1->parent = R;\3818}\3819else\3820CORRUPTION_ERROR_ACTION(M);\3821}\3822}\3823else\3824CORRUPTION_ERROR_ACTION(M);\3825}\3826}\3827}38283829/* Relays to large vs small bin operations */38303831#define insert_chunk(M, P, S)\3832if (is_small(S)) insert_small_chunk(M, P, S)\3833else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }38343835#define unlink_chunk(M, P, S)\3836if (is_small(S)) unlink_small_chunk(M, P, S)\3837else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }383838393840/* Relays to internal calls to malloc/free from realloc, memalign etc */38413842#if ONLY_MSPACES3843#define internal_malloc(m, b) mspace_malloc(m, b)3844#define internal_free(m, mem) mspace_free(m,mem);3845#else /* ONLY_MSPACES */3846#if MSPACES3847#define internal_malloc(m, b)\3848((m == gm)? dlmalloc(b) : mspace_malloc(m, b))3849#define internal_free(m, mem)\3850if (m == gm) dlfree(mem); else mspace_free(m,mem);3851#else /* MSPACES */3852#define internal_malloc(m, b) dlmalloc(b)3853#define internal_free(m, mem) dlfree(mem)3854#endif /* MSPACES */3855#endif /* ONLY_MSPACES */38563857/* ----------------------- Direct-mmapping chunks ----------------------- */38583859/*3860Directly mmapped chunks are set up with an offset to the start of3861the mmapped region stored in the prev_foot field of the chunk. This3862allows reconstruction of the required argument to MUNMAP when freed,3863and also allows adjustment of the returned chunk to meet alignment3864requirements (especially in memalign).3865*/38663867/* Malloc using mmap */3868static void* mmap_alloc(mstate m, size_t nb) {3869size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);3870if (m->footprint_limit != 0) {3871size_t fp = m->footprint + mmsize;3872if (fp <= m->footprint || fp > m->footprint_limit)3873return 0;3874}3875if (mmsize > nb) { /* Check for wrap around 0 */3876char* mm = (char*)(CALL_DIRECT_MMAP(mmsize));3877if (mm != CMFAIL) {3878size_t offset = align_offset(chunk2mem(mm));3879size_t psize = mmsize - offset - MMAP_FOOT_PAD;3880mchunkptr p = (mchunkptr)(mm + offset);3881p->prev_foot = offset;3882p->head = psize;3883mark_inuse_foot(m, p, psize);3884chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;3885chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;38863887if (m->least_addr == 0 || mm < m->least_addr)3888m->least_addr = mm;3889if ((m->footprint += mmsize) > m->max_footprint)3890m->max_footprint = m->footprint;3891assert(is_aligned(chunk2mem(p)));3892check_mmapped_chunk(m, p);3893return chunk2mem(p);3894}3895}3896return 0;3897}38983899/* Realloc using mmap */3900static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) {3901size_t oldsize = chunksize(oldp);3902(void)flags; /* placate people compiling -Wunused */3903if (is_small(nb)) /* Can't shrink mmap regions below small size */3904return 0;3905/* Keep old chunk if big enough but not too big */3906if (oldsize >= nb + SIZE_T_SIZE &&3907(oldsize - nb) <= (mparams.granularity << 1))3908return oldp;3909else {3910size_t offset = oldp->prev_foot;3911size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;3912size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);3913char* cp = (char*)CALL_MREMAP((char*)oldp - offset,3914oldmmsize, newmmsize, flags);3915if (cp != CMFAIL) {3916mchunkptr newp = (mchunkptr)(cp + offset);3917size_t psize = newmmsize - offset - MMAP_FOOT_PAD;3918newp->head = psize;3919mark_inuse_foot(m, newp, psize);3920chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;3921chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;39223923if (cp < m->least_addr)3924m->least_addr = cp;3925if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)3926m->max_footprint = m->footprint;3927check_mmapped_chunk(m, newp);3928return newp;3929}3930}3931return 0;3932}393339343935/* -------------------------- mspace management -------------------------- */39363937/* Initialize top chunk and its size */3938static void init_top(mstate m, mchunkptr p, size_t psize) {3939/* Ensure alignment */3940size_t offset = align_offset(chunk2mem(p));3941p = (mchunkptr)((char*)p + offset);3942psize -= offset;39433944m->top = p;3945m->topsize = psize;3946p->head = psize | PINUSE_BIT;3947/* set size of fake trailing chunk holding overhead space only once */3948chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;3949m->trim_check = mparams.trim_threshold; /* reset on each update */3950}39513952/* Initialize bins for a new mstate that is otherwise zeroed out */3953static void init_bins(mstate m) {3954/* Establish circular links for smallbins */3955bindex_t i;3956for (i = 0; i < NSMALLBINS; ++i) {3957sbinptr bin = smallbin_at(m,i);3958bin->fd = bin->bk = bin;3959}3960}39613962#if PROCEED_ON_ERROR39633964/* default corruption action */3965static void reset_on_error(mstate m) {3966int i;3967++malloc_corruption_error_count;3968/* Reinitialize fields to forget about all memory */3969m->smallmap = m->treemap = 0;3970m->dvsize = m->topsize = 0;3971m->seg.base = 0;3972m->seg.size = 0;3973m->seg.next = 0;3974m->top = m->dv = 0;3975for (i = 0; i < NTREEBINS; ++i)3976*treebin_at(m, i) = 0;3977init_bins(m);3978}3979#endif /* PROCEED_ON_ERROR */39803981/* Allocate chunk and prepend remainder with chunk in successor base. */3982static void* prepend_alloc(mstate m, char* newbase, char* oldbase,3983size_t nb) {3984mchunkptr p = align_as_chunk(newbase);3985mchunkptr oldfirst = align_as_chunk(oldbase);3986size_t psize = (char*)oldfirst - (char*)p;3987mchunkptr q = chunk_plus_offset(p, nb);3988size_t qsize = psize - nb;3989set_size_and_pinuse_of_inuse_chunk(m, p, nb);39903991assert((char*)oldfirst > (char*)q);3992assert(pinuse(oldfirst));3993assert(qsize >= MIN_CHUNK_SIZE);39943995/* consolidate remainder with first chunk of old base */3996if (oldfirst == m->top) {3997size_t tsize = m->topsize += qsize;3998m->top = q;3999q->head = tsize | PINUSE_BIT;4000check_top_chunk(m, q);4001}4002else if (oldfirst == m->dv) {4003size_t dsize = m->dvsize += qsize;4004m->dv = q;4005set_size_and_pinuse_of_free_chunk(q, dsize);4006}4007else {4008if (!is_inuse(oldfirst)) {4009size_t nsize = chunksize(oldfirst);4010unlink_chunk(m, oldfirst, nsize);4011oldfirst = chunk_plus_offset(oldfirst, nsize);4012qsize += nsize;4013}4014set_free_with_pinuse(q, qsize, oldfirst);4015insert_chunk(m, q, qsize);4016check_free_chunk(m, q);4017}40184019check_malloced_chunk(m, chunk2mem(p), nb);4020return chunk2mem(p);4021}40224023/* Add a segment to hold a new noncontiguous region */4024static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {4025/* Determine locations and sizes of segment, fenceposts, old top */4026char* old_top = (char*)m->top;4027msegmentptr oldsp = segment_holding(m, old_top);4028char* old_end = oldsp->base + oldsp->size;4029size_t ssize = pad_request(sizeof(struct malloc_segment));4030char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);4031size_t offset = align_offset(chunk2mem(rawsp));4032char* asp = rawsp + offset;4033char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;4034mchunkptr sp = (mchunkptr)csp;4035msegmentptr ss = (msegmentptr)(chunk2mem(sp));4036mchunkptr tnext = chunk_plus_offset(sp, ssize);4037mchunkptr p = tnext;4038int nfences = 0;40394040/* reset top to new space */4041init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);40424043/* Set up segment record */4044assert(is_aligned(ss));4045set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);4046*ss = m->seg; /* Push current record */4047m->seg.base = tbase;4048m->seg.size = tsize;4049m->seg.sflags = mmapped;4050m->seg.next = ss;40514052/* Insert trailing fenceposts */4053for (;;) {4054mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);4055p->head = FENCEPOST_HEAD;4056++nfences;4057if ((char*)(&(nextp->head)) < old_end)4058p = nextp;4059else4060break;4061}4062(void)nfences;4063assert(nfences >= 2);40644065/* Insert the rest of old top into a bin as an ordinary free chunk */4066if (csp != old_top) {4067mchunkptr q = (mchunkptr)old_top;4068size_t psize = csp - old_top;4069mchunkptr tn = chunk_plus_offset(q, psize);4070set_free_with_pinuse(q, psize, tn);4071insert_chunk(m, q, psize);4072}40734074check_top_chunk(m, m->top);4075}40764077/* -------------------------- System allocation -------------------------- */40784079/* Get memory from system using MORECORE or MMAP */4080static void* sys_alloc(mstate m, size_t nb) {4081char* tbase = CMFAIL;4082size_t tsize = 0;4083flag_t mmap_flag = 0;4084size_t asize; /* allocation size */40854086ensure_initialization();40874088/* Directly map large chunks, but only if already initialized */4089if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) {4090void* mem = mmap_alloc(m, nb);4091if (mem != 0)4092return mem;4093}40944095asize = granularity_align(nb + SYS_ALLOC_PADDING);4096if (asize <= nb)4097return 0; /* wraparound */4098if (m->footprint_limit != 0) {4099size_t fp = m->footprint + asize;4100if (fp <= m->footprint || fp > m->footprint_limit)4101return 0;4102}41034104/*4105Try getting memory in any of three ways (in most-preferred to4106least-preferred order):41071. A call to MORECORE that can normally contiguously extend memory.4108(disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or4109or main space is mmapped or a previous contiguous call failed)41102. A call to MMAP new space (disabled if not HAVE_MMAP).4111Note that under the default settings, if MORECORE is unable to4112fulfill a request, and HAVE_MMAP is true, then mmap is4113used as a noncontiguous system allocator. This is a useful backup4114strategy for systems with holes in address spaces -- in this case4115sbrk cannot contiguously expand the heap, but mmap may be able to4116find space.41173. A call to MORECORE that cannot usually contiguously extend memory.4118(disabled if not HAVE_MORECORE)41194120In all cases, we need to request enough bytes from system to ensure4121we can malloc nb bytes upon success, so pad with enough space for4122top_foot, plus alignment-pad to make sure we don't lose bytes if4123not on boundary, and round this up to a granularity unit.4124*/41254126if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {4127char* br = CMFAIL;4128size_t ssize = asize; /* sbrk call size */4129msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);4130ACQUIRE_MALLOC_GLOBAL_LOCK();41314132if (ss == 0) { /* First time through or recovery */4133char* base = (char*)CALL_MORECORE(0);4134if (base != CMFAIL) {4135size_t fp;4136/* Adjust to end on a page boundary */4137if (!is_page_aligned(base))4138ssize += (page_align((size_t)base) - (size_t)base);4139fp = m->footprint + ssize; /* recheck limits */4140if (ssize > nb && ssize < HALF_MAX_SIZE_T &&4141(m->footprint_limit == 0 ||4142(fp > m->footprint && fp <= m->footprint_limit)) &&4143(br = (char*)(CALL_MORECORE(ssize))) == base) {4144tbase = base;4145tsize = ssize;4146}4147}4148}4149else {4150/* Subtract out existing available top space from MORECORE request. */4151ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING);4152/* Use mem here only if it did continuously extend old space */4153if (ssize < HALF_MAX_SIZE_T &&4154(br = (char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) {4155tbase = br;4156tsize = ssize;4157}4158}41594160if (tbase == CMFAIL) { /* Cope with partial failure */4161if (br != CMFAIL) { /* Try to use/extend the space we did get */4162if (ssize < HALF_MAX_SIZE_T &&4163ssize < nb + SYS_ALLOC_PADDING) {4164size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize);4165if (esize < HALF_MAX_SIZE_T) {4166char* end = (char*)CALL_MORECORE(esize);4167if (end != CMFAIL)4168ssize += esize;4169else { /* Can't use; try to release */4170(void) CALL_MORECORE(-ssize);4171br = CMFAIL;4172}4173}4174}4175}4176if (br != CMFAIL) { /* Use the space we did get */4177tbase = br;4178tsize = ssize;4179}4180else4181disable_contiguous(m); /* Don't try contiguous path in the future */4182}41834184RELEASE_MALLOC_GLOBAL_LOCK();4185}41864187if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */4188char* mp = (char*)(CALL_MMAP(asize));4189if (mp != CMFAIL) {4190tbase = mp;4191tsize = asize;4192mmap_flag = USE_MMAP_BIT;4193}4194}41954196if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */4197if (asize < HALF_MAX_SIZE_T) {4198char* br = CMFAIL;4199char* end = CMFAIL;4200ACQUIRE_MALLOC_GLOBAL_LOCK();4201br = (char*)(CALL_MORECORE(asize));4202end = (char*)(CALL_MORECORE(0));4203RELEASE_MALLOC_GLOBAL_LOCK();4204if (br != CMFAIL && end != CMFAIL && br < end) {4205size_t ssize = end - br;4206if (ssize > nb + TOP_FOOT_SIZE) {4207tbase = br;4208tsize = ssize;4209}4210}4211}4212}42134214if (tbase != CMFAIL) {42154216if ((m->footprint += tsize) > m->max_footprint)4217m->max_footprint = m->footprint;42184219if (!is_initialized(m)) { /* first-time initialization */4220if (m->least_addr == 0 || tbase < m->least_addr)4221m->least_addr = tbase;4222m->seg.base = tbase;4223m->seg.size = tsize;4224m->seg.sflags = mmap_flag;4225m->magic = mparams.magic;4226m->release_checks = MAX_RELEASE_CHECK_RATE;4227init_bins(m);4228#if !ONLY_MSPACES4229if (is_global(m))4230init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);4231else4232#endif4233{4234/* Offset top by embedded malloc_state */4235mchunkptr mn = next_chunk(mem2chunk(m));4236init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);4237}4238}42394240else {4241/* Try to merge with an existing segment */4242msegmentptr sp = &m->seg;4243/* Only consider most recent segment if traversal suppressed */4244while (sp != 0 && tbase != sp->base + sp->size)4245sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;4246if (sp != 0 &&4247!is_extern_segment(sp) &&4248(sp->sflags & USE_MMAP_BIT) == mmap_flag &&4249segment_holds(sp, m->top)) { /* append */4250sp->size += tsize;4251init_top(m, m->top, m->topsize + tsize);4252}4253else {4254if (tbase < m->least_addr)4255m->least_addr = tbase;4256sp = &m->seg;4257while (sp != 0 && sp->base != tbase + tsize)4258sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;4259if (sp != 0 &&4260!is_extern_segment(sp) &&4261(sp->sflags & USE_MMAP_BIT) == mmap_flag) {4262char* oldbase = sp->base;4263sp->base = tbase;4264sp->size += tsize;4265return prepend_alloc(m, tbase, oldbase, nb);4266}4267else4268add_segment(m, tbase, tsize, mmap_flag);4269}4270}42714272if (nb < m->topsize) { /* Allocate from new or extended top space */4273size_t rsize = m->topsize -= nb;4274mchunkptr p = m->top;4275mchunkptr r = m->top = chunk_plus_offset(p, nb);4276r->head = rsize | PINUSE_BIT;4277set_size_and_pinuse_of_inuse_chunk(m, p, nb);4278check_top_chunk(m, m->top);4279check_malloced_chunk(m, chunk2mem(p), nb);4280return chunk2mem(p);4281}4282}42834284MALLOC_FAILURE_ACTION;4285return 0;4286}42874288/* ----------------------- system deallocation -------------------------- */42894290/* Unmap and unlink any mmapped segments that don't contain used chunks */4291static size_t release_unused_segments(mstate m) {4292size_t released = 0;4293int nsegs = 0;4294msegmentptr pred = &m->seg;4295msegmentptr sp = pred->next;4296while (sp != 0) {4297char* base = sp->base;4298size_t size = sp->size;4299msegmentptr next = sp->next;4300++nsegs;4301if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {4302mchunkptr p = align_as_chunk(base);4303size_t psize = chunksize(p);4304/* Can unmap if first chunk holds entire segment and not pinned */4305if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {4306tchunkptr tp = (tchunkptr)p;4307assert(segment_holds(sp, (char*)sp));4308if (p == m->dv) {4309m->dv = 0;4310m->dvsize = 0;4311}4312else {4313unlink_large_chunk(m, tp);4314}4315if (CALL_MUNMAP(base, size) == 0) {4316released += size;4317m->footprint -= size;4318/* unlink obsoleted record */4319sp = pred;4320sp->next = next;4321}4322else { /* back out if cannot unmap */4323insert_large_chunk(m, tp, psize);4324}4325}4326}4327if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */4328break;4329pred = sp;4330sp = next;4331}4332/* Reset check counter */4333m->release_checks = (((size_t) nsegs > (size_t) MAX_RELEASE_CHECK_RATE)?4334(size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE);4335return released;4336}43374338static int sys_trim(mstate m, size_t pad) {4339size_t released = 0;4340ensure_initialization();4341if (pad < MAX_REQUEST && is_initialized(m)) {4342pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */43434344if (m->topsize > pad) {4345/* Shrink top space in granularity-size units, keeping at least one */4346size_t unit = mparams.granularity;4347size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -4348SIZE_T_ONE) * unit;4349msegmentptr sp = segment_holding(m, (char*)m->top);43504351if (!is_extern_segment(sp)) {4352if (is_mmapped_segment(sp)) {4353if (HAVE_MMAP &&4354sp->size >= extra &&4355!has_segment_link(m, sp)) { /* can't shrink if pinned */4356size_t newsize = sp->size - extra;4357(void)newsize; /* placate people compiling -Wunused-variable */4358/* Prefer mremap, fall back to munmap */4359if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||4360(CALL_MUNMAP(sp->base + newsize, extra) == 0)) {4361released = extra;4362}4363}4364}4365else if (HAVE_MORECORE) {4366if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */4367extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;4368ACQUIRE_MALLOC_GLOBAL_LOCK();4369{4370/* Make sure end of memory is where we last set it. */4371char* old_br = (char*)(CALL_MORECORE(0));4372if (old_br == sp->base + sp->size) {4373char* rel_br = (char*)(CALL_MORECORE(-extra));4374char* new_br = (char*)(CALL_MORECORE(0));4375if (rel_br != CMFAIL && new_br < old_br)4376released = old_br - new_br;4377}4378}4379RELEASE_MALLOC_GLOBAL_LOCK();4380}4381}43824383if (released != 0) {4384sp->size -= released;4385m->footprint -= released;4386init_top(m, m->top, m->topsize - released);4387check_top_chunk(m, m->top);4388}4389}43904391/* Unmap any unused mmapped segments */4392if (HAVE_MMAP)4393released += release_unused_segments(m);43944395/* On failure, disable autotrim to avoid repeated failed future calls */4396if (released == 0 && m->topsize > m->trim_check)4397m->trim_check = MAX_SIZE_T;4398}43994400return (released != 0)? 1 : 0;4401}44024403/* Consolidate and bin a chunk. Differs from exported versions4404of free mainly in that the chunk need not be marked as inuse.4405*/4406static void dispose_chunk(mstate m, mchunkptr p, size_t psize) {4407mchunkptr next = chunk_plus_offset(p, psize);4408if (!pinuse(p)) {4409mchunkptr prev;4410size_t prevsize = p->prev_foot;4411if (is_mmapped(p)) {4412psize += prevsize + MMAP_FOOT_PAD;4413if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)4414m->footprint -= psize;4415return;4416}4417prev = chunk_minus_offset(p, prevsize);4418psize += prevsize;4419p = prev;4420if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */4421if (p != m->dv) {4422unlink_chunk(m, p, prevsize);4423}4424else if ((next->head & INUSE_BITS) == INUSE_BITS) {4425m->dvsize = psize;4426set_free_with_pinuse(p, psize, next);4427return;4428}4429}4430else {4431CORRUPTION_ERROR_ACTION(m);4432return;4433}4434}4435if (RTCHECK(ok_address(m, next))) {4436if (!cinuse(next)) { /* consolidate forward */4437if (next == m->top) {4438size_t tsize = m->topsize += psize;4439m->top = p;4440p->head = tsize | PINUSE_BIT;4441if (p == m->dv) {4442m->dv = 0;4443m->dvsize = 0;4444}4445return;4446}4447else if (next == m->dv) {4448size_t dsize = m->dvsize += psize;4449m->dv = p;4450set_size_and_pinuse_of_free_chunk(p, dsize);4451return;4452}4453else {4454size_t nsize = chunksize(next);4455psize += nsize;4456unlink_chunk(m, next, nsize);4457set_size_and_pinuse_of_free_chunk(p, psize);4458if (p == m->dv) {4459m->dvsize = psize;4460return;4461}4462}4463}4464else {4465set_free_with_pinuse(p, psize, next);4466}4467insert_chunk(m, p, psize);4468}4469else {4470CORRUPTION_ERROR_ACTION(m);4471}4472}44734474/* ---------------------------- malloc --------------------------- */44754476/* allocate a large request from the best fitting chunk in a treebin */4477static void* tmalloc_large(mstate m, size_t nb) {4478tchunkptr v = 0;4479size_t rsize = -nb; /* Unsigned negation */4480tchunkptr t;4481bindex_t idx;4482compute_tree_index(nb, idx);4483if ((t = *treebin_at(m, idx)) != 0) {4484/* Traverse tree for this bin looking for node with size == nb */4485size_t sizebits = nb << leftshift_for_tree_index(idx);4486tchunkptr rst = 0; /* The deepest untaken right subtree */4487for (;;) {4488tchunkptr rt;4489size_t trem = chunksize(t) - nb;4490if (trem < rsize) {4491v = t;4492if ((rsize = trem) == 0)4493break;4494}4495rt = t->child[1];4496t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];4497if (rt != 0 && rt != t)4498rst = rt;4499if (t == 0) {4500t = rst; /* set t to least subtree holding sizes > nb */4501break;4502}4503sizebits <<= 1;4504}4505}4506if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */4507binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;4508if (leftbits != 0) {4509bindex_t i;4510binmap_t leastbit = least_bit(leftbits);4511compute_bit2idx(leastbit, i);4512t = *treebin_at(m, i);4513}4514}45154516while (t != 0) { /* find smallest of tree or subtree */4517size_t trem = chunksize(t) - nb;4518if (trem < rsize) {4519rsize = trem;4520v = t;4521}4522t = leftmost_child(t);4523}45244525/* If dv is a better fit, return 0 so malloc will use it */4526if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {4527if (RTCHECK(ok_address(m, v))) { /* split */4528mchunkptr r = chunk_plus_offset(v, nb);4529assert(chunksize(v) == rsize + nb);4530if (RTCHECK(ok_next(v, r))) {4531unlink_large_chunk(m, v);4532if (rsize < MIN_CHUNK_SIZE)4533set_inuse_and_pinuse(m, v, (rsize + nb));4534else {4535set_size_and_pinuse_of_inuse_chunk(m, v, nb);4536set_size_and_pinuse_of_free_chunk(r, rsize);4537insert_chunk(m, r, rsize);4538}4539return chunk2mem(v);4540}4541}4542CORRUPTION_ERROR_ACTION(m);4543}4544return 0;4545}45464547/* allocate a small request from the best fitting chunk in a treebin */4548static void* tmalloc_small(mstate m, size_t nb) {4549tchunkptr t, v;4550size_t rsize;4551bindex_t i;4552binmap_t leastbit = least_bit(m->treemap);4553compute_bit2idx(leastbit, i);4554v = t = *treebin_at(m, i);4555rsize = chunksize(t) - nb;45564557while ((t = leftmost_child(t)) != 0) {4558size_t trem = chunksize(t) - nb;4559if (trem < rsize) {4560rsize = trem;4561v = t;4562}4563}45644565if (RTCHECK(ok_address(m, v))) {4566mchunkptr r = chunk_plus_offset(v, nb);4567assert(chunksize(v) == rsize + nb);4568if (RTCHECK(ok_next(v, r))) {4569unlink_large_chunk(m, v);4570if (rsize < MIN_CHUNK_SIZE)4571set_inuse_and_pinuse(m, v, (rsize + nb));4572else {4573set_size_and_pinuse_of_inuse_chunk(m, v, nb);4574set_size_and_pinuse_of_free_chunk(r, rsize);4575replace_dv(m, r, rsize);4576}4577return chunk2mem(v);4578}4579}45804581CORRUPTION_ERROR_ACTION(m);4582return 0;4583}45844585#if !ONLY_MSPACES45864587void* dlmalloc(size_t bytes) {4588/*4589Basic algorithm:4590If a small request (< 256 bytes minus per-chunk overhead):45911. If one exists, use a remainderless chunk in associated smallbin.4592(Remainderless means that there are too few excess bytes to4593represent as a chunk.)45942. If it is big enough, use the dv chunk, which is normally the4595chunk adjacent to the one used for the most recent small request.45963. If one exists, split the smallest available chunk in a bin,4597saving remainder in dv.45984. If it is big enough, use the top chunk.45995. If available, get memory from system and use it4600Otherwise, for a large request:46011. Find the smallest available binned chunk that fits, and use it4602if it is better fitting than dv chunk, splitting if necessary.46032. If better fitting than any binned chunk, use the dv chunk.46043. If it is big enough, use the top chunk.46054. If request size >= mmap threshold, try to directly mmap this chunk.46065. If available, get memory from system and use it46074608The ugly goto's here ensure that postaction occurs along all paths.4609*/46104611#if USE_LOCKS4612ensure_initialization(); /* initialize in sys_alloc if not using locks */4613#endif46144615if (!PREACTION(gm)) {4616void* mem;4617size_t nb;4618if (bytes <= MAX_SMALL_REQUEST) {4619bindex_t idx;4620binmap_t smallbits;4621nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);4622idx = small_index(nb);4623smallbits = gm->smallmap >> idx;46244625if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */4626mchunkptr b, p;4627idx += ~smallbits & 1; /* Uses next bin if idx empty */4628b = smallbin_at(gm, idx);4629p = b->fd;4630assert(chunksize(p) == small_index2size(idx));4631unlink_first_small_chunk(gm, b, p, idx);4632set_inuse_and_pinuse(gm, p, small_index2size(idx));4633mem = chunk2mem(p);4634check_malloced_chunk(gm, mem, nb);4635goto postaction;4636}46374638else if (nb > gm->dvsize) {4639if (smallbits != 0) { /* Use chunk in next nonempty smallbin */4640mchunkptr b, p, r;4641size_t rsize;4642bindex_t i;4643binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));4644binmap_t leastbit = least_bit(leftbits);4645compute_bit2idx(leastbit, i);4646b = smallbin_at(gm, i);4647p = b->fd;4648assert(chunksize(p) == small_index2size(i));4649unlink_first_small_chunk(gm, b, p, i);4650rsize = small_index2size(i) - nb;4651/* Fit here cannot be remainderless if 4byte sizes */4652if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)4653set_inuse_and_pinuse(gm, p, small_index2size(i));4654else {4655set_size_and_pinuse_of_inuse_chunk(gm, p, nb);4656r = chunk_plus_offset(p, nb);4657set_size_and_pinuse_of_free_chunk(r, rsize);4658replace_dv(gm, r, rsize);4659}4660mem = chunk2mem(p);4661check_malloced_chunk(gm, mem, nb);4662goto postaction;4663}46644665else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {4666check_malloced_chunk(gm, mem, nb);4667goto postaction;4668}4669}4670}4671else if (bytes >= MAX_REQUEST)4672nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */4673else {4674nb = pad_request(bytes);4675if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {4676check_malloced_chunk(gm, mem, nb);4677goto postaction;4678}4679}46804681if (nb <= gm->dvsize) {4682size_t rsize = gm->dvsize - nb;4683mchunkptr p = gm->dv;4684if (rsize >= MIN_CHUNK_SIZE) { /* split dv */4685mchunkptr r = gm->dv = chunk_plus_offset(p, nb);4686gm->dvsize = rsize;4687set_size_and_pinuse_of_free_chunk(r, rsize);4688set_size_and_pinuse_of_inuse_chunk(gm, p, nb);4689}4690else { /* exhaust dv */4691size_t dvs = gm->dvsize;4692gm->dvsize = 0;4693gm->dv = 0;4694set_inuse_and_pinuse(gm, p, dvs);4695}4696mem = chunk2mem(p);4697check_malloced_chunk(gm, mem, nb);4698goto postaction;4699}47004701else if (nb < gm->topsize) { /* Split top */4702size_t rsize = gm->topsize -= nb;4703mchunkptr p = gm->top;4704mchunkptr r = gm->top = chunk_plus_offset(p, nb);4705r->head = rsize | PINUSE_BIT;4706set_size_and_pinuse_of_inuse_chunk(gm, p, nb);4707mem = chunk2mem(p);4708check_top_chunk(gm, gm->top);4709check_malloced_chunk(gm, mem, nb);4710goto postaction;4711}47124713mem = sys_alloc(gm, nb);47144715postaction:4716POSTACTION(gm);4717return mem;4718}47194720return 0;4721}47224723/* ---------------------------- free --------------------------- */47244725void dlfree(void* mem) {4726/*4727Consolidate freed chunks with preceeding or succeeding bordering4728free chunks, if they exist, and then place in a bin. Intermixed4729with special cases for top, dv, mmapped chunks, and usage errors.4730*/47314732if (mem != 0) {4733mchunkptr p = mem2chunk(mem);4734#if FOOTERS4735mstate fm = get_mstate_for(p);4736if (!ok_magic(fm)) {4737USAGE_ERROR_ACTION(fm, p);4738return;4739}4740#else /* FOOTERS */4741#define fm gm4742#endif /* FOOTERS */4743if (!PREACTION(fm)) {4744check_inuse_chunk(fm, p);4745if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {4746size_t psize = chunksize(p);4747mchunkptr next = chunk_plus_offset(p, psize);4748if (!pinuse(p)) {4749size_t prevsize = p->prev_foot;4750if (is_mmapped(p)) {4751psize += prevsize + MMAP_FOOT_PAD;4752if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)4753fm->footprint -= psize;4754goto postaction;4755}4756else {4757mchunkptr prev = chunk_minus_offset(p, prevsize);4758psize += prevsize;4759p = prev;4760if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */4761if (p != fm->dv) {4762unlink_chunk(fm, p, prevsize);4763}4764else if ((next->head & INUSE_BITS) == INUSE_BITS) {4765fm->dvsize = psize;4766set_free_with_pinuse(p, psize, next);4767goto postaction;4768}4769}4770else4771goto erroraction;4772}4773}47744775if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {4776if (!cinuse(next)) { /* consolidate forward */4777if (next == fm->top) {4778size_t tsize = fm->topsize += psize;4779fm->top = p;4780p->head = tsize | PINUSE_BIT;4781if (p == fm->dv) {4782fm->dv = 0;4783fm->dvsize = 0;4784}4785if (should_trim(fm, tsize))4786sys_trim(fm, 0);4787goto postaction;4788}4789else if (next == fm->dv) {4790size_t dsize = fm->dvsize += psize;4791fm->dv = p;4792set_size_and_pinuse_of_free_chunk(p, dsize);4793goto postaction;4794}4795else {4796size_t nsize = chunksize(next);4797psize += nsize;4798unlink_chunk(fm, next, nsize);4799set_size_and_pinuse_of_free_chunk(p, psize);4800if (p == fm->dv) {4801fm->dvsize = psize;4802goto postaction;4803}4804}4805}4806else4807set_free_with_pinuse(p, psize, next);48084809if (is_small(psize)) {4810insert_small_chunk(fm, p, psize);4811check_free_chunk(fm, p);4812}4813else {4814tchunkptr tp = (tchunkptr)p;4815insert_large_chunk(fm, tp, psize);4816check_free_chunk(fm, p);4817if (--fm->release_checks == 0)4818release_unused_segments(fm);4819}4820goto postaction;4821}4822}4823erroraction:4824USAGE_ERROR_ACTION(fm, p);4825postaction:4826POSTACTION(fm);4827}4828}4829#if !FOOTERS4830#undef fm4831#endif /* FOOTERS */4832}48334834void* dlcalloc(size_t n_elements, size_t elem_size) {4835void* mem;4836size_t req = 0;4837if (n_elements != 0) {4838req = n_elements * elem_size;4839if (((n_elements | elem_size) & ~(size_t)0xffff) &&4840(req / n_elements != elem_size))4841req = MAX_SIZE_T; /* force downstream failure on overflow */4842}4843mem = dlmalloc(req);4844if (mem != 0 && calloc_must_clear(mem2chunk(mem)))4845memset(mem, 0, req);4846return mem;4847}48484849#endif /* !ONLY_MSPACES */48504851/* ------------ Internal support for realloc, memalign, etc -------------- */48524853/* Try to realloc; only in-place unless can_move true */4854static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb,4855int can_move) {4856mchunkptr newp = 0;4857size_t oldsize = chunksize(p);4858mchunkptr next = chunk_plus_offset(p, oldsize);4859if (RTCHECK(ok_address(m, p) && ok_inuse(p) &&4860ok_next(p, next) && ok_pinuse(next))) {4861if (is_mmapped(p)) {4862newp = mmap_resize(m, p, nb, can_move);4863}4864else if (oldsize >= nb) { /* already big enough */4865size_t rsize = oldsize - nb;4866if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */4867mchunkptr r = chunk_plus_offset(p, nb);4868set_inuse(m, p, nb);4869set_inuse(m, r, rsize);4870dispose_chunk(m, r, rsize);4871}4872newp = p;4873}4874else if (next == m->top) { /* extend into top */4875if (oldsize + m->topsize > nb) {4876size_t newsize = oldsize + m->topsize;4877size_t newtopsize = newsize - nb;4878mchunkptr newtop = chunk_plus_offset(p, nb);4879set_inuse(m, p, nb);4880newtop->head = newtopsize |PINUSE_BIT;4881m->top = newtop;4882m->topsize = newtopsize;4883newp = p;4884}4885}4886else if (next == m->dv) { /* extend into dv */4887size_t dvs = m->dvsize;4888if (oldsize + dvs >= nb) {4889size_t dsize = oldsize + dvs - nb;4890if (dsize >= MIN_CHUNK_SIZE) {4891mchunkptr r = chunk_plus_offset(p, nb);4892mchunkptr n = chunk_plus_offset(r, dsize);4893set_inuse(m, p, nb);4894set_size_and_pinuse_of_free_chunk(r, dsize);4895clear_pinuse(n);4896m->dvsize = dsize;4897m->dv = r;4898}4899else { /* exhaust dv */4900size_t newsize = oldsize + dvs;4901set_inuse(m, p, newsize);4902m->dvsize = 0;4903m->dv = 0;4904}4905newp = p;4906}4907}4908else if (!cinuse(next)) { /* extend into next free chunk */4909size_t nextsize = chunksize(next);4910if (oldsize + nextsize >= nb) {4911size_t rsize = oldsize + nextsize - nb;4912unlink_chunk(m, next, nextsize);4913if (rsize < MIN_CHUNK_SIZE) {4914size_t newsize = oldsize + nextsize;4915set_inuse(m, p, newsize);4916}4917else {4918mchunkptr r = chunk_plus_offset(p, nb);4919set_inuse(m, p, nb);4920set_inuse(m, r, rsize);4921dispose_chunk(m, r, rsize);4922}4923newp = p;4924}4925}4926}4927else {4928USAGE_ERROR_ACTION(m, chunk2mem(p));4929}4930return newp;4931}49324933static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {4934void* mem = 0;4935if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */4936alignment = MIN_CHUNK_SIZE;4937if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */4938size_t a = MALLOC_ALIGNMENT << 1;4939while (a < alignment) a <<= 1;4940alignment = a;4941}4942if (bytes >= MAX_REQUEST - alignment) {4943if (m != 0) { /* Test isn't needed but avoids compiler warning */4944MALLOC_FAILURE_ACTION;4945}4946}4947else {4948size_t nb = request2size(bytes);4949size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;4950mem = internal_malloc(m, req);4951if (mem != 0) {4952mchunkptr p = mem2chunk(mem);4953if (PREACTION(m))4954return 0;4955if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */4956/*4957Find an aligned spot inside chunk. Since we need to give4958back leading space in a chunk of at least MIN_CHUNK_SIZE, if4959the first calculation places us at a spot with less than4960MIN_CHUNK_SIZE leader, we can move to the next aligned spot.4961We've allocated enough total room so that this is always4962possible.4963*/4964char* br = (char*)mem2chunk((size_t)(((size_t)((char*)mem + alignment -4965SIZE_T_ONE)) &4966-alignment));4967char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?4968br : br+alignment;4969mchunkptr newp = (mchunkptr)pos;4970size_t leadsize = pos - (char*)(p);4971size_t newsize = chunksize(p) - leadsize;49724973if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */4974newp->prev_foot = p->prev_foot + leadsize;4975newp->head = newsize;4976}4977else { /* Otherwise, give back leader, use the rest */4978set_inuse(m, newp, newsize);4979set_inuse(m, p, leadsize);4980dispose_chunk(m, p, leadsize);4981}4982p = newp;4983}49844985/* Give back spare room at the end */4986if (!is_mmapped(p)) {4987size_t size = chunksize(p);4988if (size > nb + MIN_CHUNK_SIZE) {4989size_t remainder_size = size - nb;4990mchunkptr remainder = chunk_plus_offset(p, nb);4991set_inuse(m, p, nb);4992set_inuse(m, remainder, remainder_size);4993dispose_chunk(m, remainder, remainder_size);4994}4995}49964997mem = chunk2mem(p);4998assert (chunksize(p) >= nb);4999assert(((size_t)mem & (alignment - 1)) == 0);5000check_inuse_chunk(m, p);5001POSTACTION(m);5002}5003}5004return mem;5005}50065007/*5008Common support for independent_X routines, handling5009all of the combinations that can result.5010The opts arg has:5011bit 0 set if all elements are same size (using sizes[0])5012bit 1 set if elements should be zeroed5013*/5014static void** ialloc(mstate m,5015size_t n_elements,5016size_t* sizes,5017int opts,5018void* chunks[]) {50195020size_t element_size; /* chunksize of each element, if all same */5021size_t contents_size; /* total size of elements */5022size_t array_size; /* request size of pointer array */5023void* mem; /* malloced aggregate space */5024mchunkptr p; /* corresponding chunk */5025size_t remainder_size; /* remaining bytes while splitting */5026void** marray; /* either "chunks" or malloced ptr array */5027mchunkptr array_chunk; /* chunk for malloced ptr array */5028flag_t was_enabled; /* to disable mmap */5029size_t size;5030size_t i;50315032ensure_initialization();5033/* compute array length, if needed */5034if (chunks != 0) {5035if (n_elements == 0)5036return chunks; /* nothing to do */5037marray = chunks;5038array_size = 0;5039}5040else {5041/* if empty req, must still return chunk representing empty array */5042if (n_elements == 0)5043return (void**)internal_malloc(m, 0);5044marray = 0;5045array_size = request2size(n_elements * (sizeof(void*)));5046}50475048/* compute total element size */5049if (opts & 0x1) { /* all-same-size */5050element_size = request2size(*sizes);5051contents_size = n_elements * element_size;5052}5053else { /* add up all the sizes */5054element_size = 0;5055contents_size = 0;5056for (i = 0; i != n_elements; ++i)5057contents_size += request2size(sizes[i]);5058}50595060size = contents_size + array_size;50615062/*5063Allocate the aggregate chunk. First disable direct-mmapping so5064malloc won't use it, since we would not be able to later5065free/realloc space internal to a segregated mmap region.5066*/5067was_enabled = use_mmap(m);5068disable_mmap(m);5069mem = internal_malloc(m, size - CHUNK_OVERHEAD);5070if (was_enabled)5071enable_mmap(m);5072if (mem == 0)5073return 0;50745075if (PREACTION(m)) return 0;5076p = mem2chunk(mem);5077remainder_size = chunksize(p);50785079assert(!is_mmapped(p));50805081if (opts & 0x2) { /* optionally clear the elements */5082memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);5083}50845085/* If not provided, allocate the pointer array as final part of chunk */5086if (marray == 0) {5087size_t array_chunk_size;5088array_chunk = chunk_plus_offset(p, contents_size);5089array_chunk_size = remainder_size - contents_size;5090marray = (void**) (chunk2mem(array_chunk));5091set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);5092remainder_size = contents_size;5093}50945095/* split out elements */5096for (i = 0; ; ++i) {5097marray[i] = chunk2mem(p);5098if (i != n_elements-1) {5099if (element_size != 0)5100size = element_size;5101else5102size = request2size(sizes[i]);5103remainder_size -= size;5104set_size_and_pinuse_of_inuse_chunk(m, p, size);5105p = chunk_plus_offset(p, size);5106}5107else { /* the final element absorbs any overallocation slop */5108set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);5109break;5110}5111}51125113#if DEBUG5114if (marray != chunks) {5115/* final element must have exactly exhausted chunk */5116if (element_size != 0) {5117assert(remainder_size == element_size);5118}5119else {5120assert(remainder_size == request2size(sizes[i]));5121}5122check_inuse_chunk(m, mem2chunk(marray));5123}5124for (i = 0; i != n_elements; ++i)5125check_inuse_chunk(m, mem2chunk(marray[i]));51265127#endif /* DEBUG */51285129POSTACTION(m);5130return marray;5131}51325133/* Try to free all pointers in the given array.5134Note: this could be made faster, by delaying consolidation,5135at the price of disabling some user integrity checks, We5136still optimize some consolidations by combining adjacent5137chunks before freeing, which will occur often if allocated5138with ialloc or the array is sorted.5139*/5140static size_t internal_bulk_free(mstate m, void* array[], size_t nelem) {5141size_t unfreed = 0;5142if (!PREACTION(m)) {5143void** a;5144void** fence = &(array[nelem]);5145for (a = array; a != fence; ++a) {5146void* mem = *a;5147if (mem != 0) {5148mchunkptr p = mem2chunk(mem);5149size_t psize = chunksize(p);5150#if FOOTERS5151if (get_mstate_for(p) != m) {5152++unfreed;5153continue;5154}5155#endif5156check_inuse_chunk(m, p);5157*a = 0;5158if (RTCHECK(ok_address(m, p) && ok_inuse(p))) {5159void ** b = a + 1; /* try to merge with next chunk */5160mchunkptr next = next_chunk(p);5161if (b != fence && *b == chunk2mem(next)) {5162size_t newsize = chunksize(next) + psize;5163set_inuse(m, p, newsize);5164*b = chunk2mem(p);5165}5166else5167dispose_chunk(m, p, psize);5168}5169else {5170CORRUPTION_ERROR_ACTION(m);5171break;5172}5173}5174}5175if (should_trim(m, m->topsize))5176sys_trim(m, 0);5177POSTACTION(m);5178}5179return unfreed;5180}51815182/* Traversal */5183#if MALLOC_INSPECT_ALL5184static void internal_inspect_all(mstate m,5185void(*handler)(void *start,5186void *end,5187size_t used_bytes,5188void* callback_arg),5189void* arg) {5190if (is_initialized(m)) {5191mchunkptr top = m->top;5192msegmentptr s;5193for (s = &m->seg; s != 0; s = s->next) {5194mchunkptr q = align_as_chunk(s->base);5195while (segment_holds(s, q) && q->head != FENCEPOST_HEAD) {5196mchunkptr next = next_chunk(q);5197size_t sz = chunksize(q);5198size_t used;5199void* start;5200if (is_inuse(q)) {5201used = sz - CHUNK_OVERHEAD; /* must not be mmapped */5202start = chunk2mem(q);5203}5204else {5205used = 0;5206if (is_small(sz)) { /* offset by possible bookkeeping */5207start = (void*)((char*)q + sizeof(struct malloc_chunk));5208}5209else {5210start = (void*)((char*)q + sizeof(struct malloc_tree_chunk));5211}5212}5213if (start < (void*)next) /* skip if all space is bookkeeping */5214handler(start, next, used, arg);5215if (q == top)5216break;5217q = next;5218}5219}5220}5221}5222#endif /* MALLOC_INSPECT_ALL */52235224/* ------------------ Exported realloc, memalign, etc -------------------- */52255226#if !ONLY_MSPACES52275228void* dlrealloc(void* oldmem, size_t bytes) {5229void* mem = 0;5230if (oldmem == 0) {5231mem = dlmalloc(bytes);5232}5233else if (bytes >= MAX_REQUEST) {5234MALLOC_FAILURE_ACTION;5235}5236#ifdef REALLOC_ZERO_BYTES_FREES5237else if (bytes == 0) {5238dlfree(oldmem);5239}5240#endif /* REALLOC_ZERO_BYTES_FREES */5241else {5242size_t nb = request2size(bytes);5243mchunkptr oldp = mem2chunk(oldmem);5244#if ! FOOTERS5245mstate m = gm;5246#else /* FOOTERS */5247mstate m = get_mstate_for(oldp);5248if (!ok_magic(m)) {5249USAGE_ERROR_ACTION(m, oldmem);5250return 0;5251}5252#endif /* FOOTERS */5253if (!PREACTION(m)) {5254mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);5255POSTACTION(m);5256if (newp != 0) {5257check_inuse_chunk(m, newp);5258mem = chunk2mem(newp);5259}5260else {5261mem = internal_malloc(m, bytes);5262if (mem != 0) {5263size_t oc = chunksize(oldp) - overhead_for(oldp);5264memcpy(mem, oldmem, (oc < bytes)? oc : bytes);5265internal_free(m, oldmem);5266}5267}5268}5269}5270return mem;5271}52725273void* dlrealloc_in_place(void* oldmem, size_t bytes) {5274void* mem = 0;5275if (oldmem != 0) {5276if (bytes >= MAX_REQUEST) {5277MALLOC_FAILURE_ACTION;5278}5279else {5280size_t nb = request2size(bytes);5281mchunkptr oldp = mem2chunk(oldmem);5282#if ! FOOTERS5283mstate m = gm;5284#else /* FOOTERS */5285mstate m = get_mstate_for(oldp);5286if (!ok_magic(m)) {5287USAGE_ERROR_ACTION(m, oldmem);5288return 0;5289}5290#endif /* FOOTERS */5291if (!PREACTION(m)) {5292mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);5293POSTACTION(m);5294if (newp == oldp) {5295check_inuse_chunk(m, newp);5296mem = oldmem;5297}5298}5299}5300}5301return mem;5302}53035304void* dlmemalign(size_t alignment, size_t bytes) {5305if (alignment <= MALLOC_ALIGNMENT) {5306return dlmalloc(bytes);5307}5308return internal_memalign(gm, alignment, bytes);5309}53105311int dlposix_memalign(void** pp, size_t alignment, size_t bytes) {5312void* mem = 0;5313if (alignment == MALLOC_ALIGNMENT)5314mem = dlmalloc(bytes);5315else {5316size_t d = alignment / sizeof(void*);5317size_t r = alignment % sizeof(void*);5318if (r != 0 || d == 0 || (d & (d-SIZE_T_ONE)) != 0)5319return EINVAL;5320else if (bytes <= MAX_REQUEST - alignment) {5321if (alignment < MIN_CHUNK_SIZE)5322alignment = MIN_CHUNK_SIZE;5323mem = internal_memalign(gm, alignment, bytes);5324}5325}5326if (mem == 0)5327return ENOMEM;5328else {5329*pp = mem;5330return 0;5331}5332}53335334void* dlvalloc(size_t bytes) {5335size_t pagesz;5336ensure_initialization();5337pagesz = mparams.page_size;5338return dlmemalign(pagesz, bytes);5339}53405341void* dlpvalloc(size_t bytes) {5342size_t pagesz;5343ensure_initialization();5344pagesz = mparams.page_size;5345return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));5346}53475348void** dlindependent_calloc(size_t n_elements, size_t elem_size,5349void* chunks[]) {5350size_t sz = elem_size; /* serves as 1-element array */5351return ialloc(gm, n_elements, &sz, 3, chunks);5352}53535354void** dlindependent_comalloc(size_t n_elements, size_t sizes[],5355void* chunks[]) {5356return ialloc(gm, n_elements, sizes, 0, chunks);5357}53585359size_t dlbulk_free(void* array[], size_t nelem) {5360return internal_bulk_free(gm, array, nelem);5361}53625363#if MALLOC_INSPECT_ALL5364void dlmalloc_inspect_all(void(*handler)(void *start,5365void *end,5366size_t used_bytes,5367void* callback_arg),5368void* arg) {5369ensure_initialization();5370if (!PREACTION(gm)) {5371internal_inspect_all(gm, handler, arg);5372POSTACTION(gm);5373}5374}5375#endif /* MALLOC_INSPECT_ALL */53765377int dlmalloc_trim(size_t pad) {5378int result = 0;5379ensure_initialization();5380if (!PREACTION(gm)) {5381result = sys_trim(gm, pad);5382POSTACTION(gm);5383}5384return result;5385}53865387size_t dlmalloc_footprint(void) {5388return gm->footprint;5389}53905391size_t dlmalloc_max_footprint(void) {5392return gm->max_footprint;5393}53945395size_t dlmalloc_footprint_limit(void) {5396size_t maf = gm->footprint_limit;5397return maf == 0 ? MAX_SIZE_T : maf;5398}53995400size_t dlmalloc_set_footprint_limit(size_t bytes) {5401size_t result; /* invert sense of 0 */5402if (bytes == 0)5403result = granularity_align(1); /* Use minimal size */5404if (bytes == MAX_SIZE_T)5405result = 0; /* disable */5406else5407result = granularity_align(bytes);5408return gm->footprint_limit = result;5409}54105411#if !NO_MALLINFO5412struct mallinfo dlmallinfo(void) {5413return internal_mallinfo(gm);5414}5415#endif /* NO_MALLINFO */54165417#if !NO_MALLOC_STATS5418void dlmalloc_stats() {5419internal_malloc_stats(gm);5420}5421#endif /* NO_MALLOC_STATS */54225423int dlmallopt(int param_number, int value) {5424return change_mparam(param_number, value);5425}54265427size_t dlmalloc_usable_size(void* mem) {5428if (mem != 0) {5429mchunkptr p = mem2chunk(mem);5430if (is_inuse(p))5431return chunksize(p) - overhead_for(p);5432}5433return 0;5434}54355436#endif /* !ONLY_MSPACES */54375438/* ----------------------------- user mspaces ---------------------------- */54395440#if MSPACES54415442static mstate init_user_mstate(char* tbase, size_t tsize) {5443size_t msize = pad_request(sizeof(struct malloc_state));5444mchunkptr mn;5445mchunkptr msp = align_as_chunk(tbase);5446mstate m = (mstate)(chunk2mem(msp));5447memset(m, 0, msize);5448(void)INITIAL_LOCK(&m->mutex);5449msp->head = (msize|INUSE_BITS);5450m->seg.base = m->least_addr = tbase;5451m->seg.size = m->footprint = m->max_footprint = tsize;5452m->magic = mparams.magic;5453m->release_checks = MAX_RELEASE_CHECK_RATE;5454m->mflags = mparams.default_mflags;5455m->extp = 0;5456m->exts = 0;5457disable_contiguous(m);5458init_bins(m);5459mn = next_chunk(mem2chunk(m));5460init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);5461check_top_chunk(m, m->top);5462return m;5463}54645465mspace create_mspace(size_t capacity, int locked) {5466mstate m = 0;5467size_t msize;5468ensure_initialization();5469msize = pad_request(sizeof(struct malloc_state));5470if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {5471size_t rs = ((capacity == 0)? mparams.granularity :5472(capacity + TOP_FOOT_SIZE + msize));5473size_t tsize = granularity_align(rs);5474char* tbase = (char*)(CALL_MMAP(tsize));5475if (tbase != CMFAIL) {5476m = init_user_mstate(tbase, tsize);5477m->seg.sflags = USE_MMAP_BIT;5478set_lock(m, locked);5479}5480}5481return (mspace)m;5482}54835484mspace create_mspace_with_base(void* base, size_t capacity, int locked) {5485mstate m = 0;5486size_t msize;5487ensure_initialization();5488msize = pad_request(sizeof(struct malloc_state));5489if (capacity > msize + TOP_FOOT_SIZE &&5490capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {5491m = init_user_mstate((char*)base, capacity);5492m->seg.sflags = EXTERN_BIT;5493set_lock(m, locked);5494}5495return (mspace)m;5496}54975498int mspace_track_large_chunks(mspace msp, int enable) {5499int ret = 0;5500mstate ms = (mstate)msp;5501if (!PREACTION(ms)) {5502if (!use_mmap(ms)) {5503ret = 1;5504}5505if (!enable) {5506enable_mmap(ms);5507} else {5508disable_mmap(ms);5509}5510POSTACTION(ms);5511}5512return ret;5513}55145515size_t destroy_mspace(mspace msp) {5516size_t freed = 0;5517mstate ms = (mstate)msp;5518if (ok_magic(ms)) {5519msegmentptr sp = &ms->seg;5520(void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */5521while (sp != 0) {5522char* base = sp->base;5523size_t size = sp->size;5524flag_t flag = sp->sflags;5525(void)base; /* placate people compiling -Wunused-variable */5526sp = sp->next;5527if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) &&5528CALL_MUNMAP(base, size) == 0)5529freed += size;5530}5531}5532else {5533USAGE_ERROR_ACTION(ms,ms);5534}5535return freed;5536}55375538/*5539mspace versions of routines are near-clones of the global5540versions. This is not so nice but better than the alternatives.5541*/55425543void* mspace_malloc(mspace msp, size_t bytes) {5544mstate ms = (mstate)msp;5545if (!ok_magic(ms)) {5546USAGE_ERROR_ACTION(ms,ms);5547return 0;5548}5549if (!PREACTION(ms)) {5550void* mem;5551size_t nb;5552if (bytes <= MAX_SMALL_REQUEST) {5553bindex_t idx;5554binmap_t smallbits;5555nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);5556idx = small_index(nb);5557smallbits = ms->smallmap >> idx;55585559if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */5560mchunkptr b, p;5561idx += ~smallbits & 1; /* Uses next bin if idx empty */5562b = smallbin_at(ms, idx);5563p = b->fd;5564assert(chunksize(p) == small_index2size(idx));5565unlink_first_small_chunk(ms, b, p, idx);5566set_inuse_and_pinuse(ms, p, small_index2size(idx));5567mem = chunk2mem(p);5568check_malloced_chunk(ms, mem, nb);5569goto postaction;5570}55715572else if (nb > ms->dvsize) {5573if (smallbits != 0) { /* Use chunk in next nonempty smallbin */5574mchunkptr b, p, r;5575size_t rsize;5576bindex_t i;5577binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));5578binmap_t leastbit = least_bit(leftbits);5579compute_bit2idx(leastbit, i);5580b = smallbin_at(ms, i);5581p = b->fd;5582assert(chunksize(p) == small_index2size(i));5583unlink_first_small_chunk(ms, b, p, i);5584rsize = small_index2size(i) - nb;5585/* Fit here cannot be remainderless if 4byte sizes */5586if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)5587set_inuse_and_pinuse(ms, p, small_index2size(i));5588else {5589set_size_and_pinuse_of_inuse_chunk(ms, p, nb);5590r = chunk_plus_offset(p, nb);5591set_size_and_pinuse_of_free_chunk(r, rsize);5592replace_dv(ms, r, rsize);5593}5594mem = chunk2mem(p);5595check_malloced_chunk(ms, mem, nb);5596goto postaction;5597}55985599else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {5600check_malloced_chunk(ms, mem, nb);5601goto postaction;5602}5603}5604}5605else if (bytes >= MAX_REQUEST)5606nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */5607else {5608nb = pad_request(bytes);5609if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {5610check_malloced_chunk(ms, mem, nb);5611goto postaction;5612}5613}56145615if (nb <= ms->dvsize) {5616size_t rsize = ms->dvsize - nb;5617mchunkptr p = ms->dv;5618if (rsize >= MIN_CHUNK_SIZE) { /* split dv */5619mchunkptr r = ms->dv = chunk_plus_offset(p, nb);5620ms->dvsize = rsize;5621set_size_and_pinuse_of_free_chunk(r, rsize);5622set_size_and_pinuse_of_inuse_chunk(ms, p, nb);5623}5624else { /* exhaust dv */5625size_t dvs = ms->dvsize;5626ms->dvsize = 0;5627ms->dv = 0;5628set_inuse_and_pinuse(ms, p, dvs);5629}5630mem = chunk2mem(p);5631check_malloced_chunk(ms, mem, nb);5632goto postaction;5633}56345635else if (nb < ms->topsize) { /* Split top */5636size_t rsize = ms->topsize -= nb;5637mchunkptr p = ms->top;5638mchunkptr r = ms->top = chunk_plus_offset(p, nb);5639r->head = rsize | PINUSE_BIT;5640set_size_and_pinuse_of_inuse_chunk(ms, p, nb);5641mem = chunk2mem(p);5642check_top_chunk(ms, ms->top);5643check_malloced_chunk(ms, mem, nb);5644goto postaction;5645}56465647mem = sys_alloc(ms, nb);56485649postaction:5650POSTACTION(ms);5651return mem;5652}56535654return 0;5655}56565657void mspace_free(mspace msp, void* mem) {5658if (mem != 0) {5659mchunkptr p = mem2chunk(mem);5660#if FOOTERS5661mstate fm = get_mstate_for(p);5662(void)msp; /* placate people compiling -Wunused */5663#else /* FOOTERS */5664mstate fm = (mstate)msp;5665#endif /* FOOTERS */5666if (!ok_magic(fm)) {5667USAGE_ERROR_ACTION(fm, p);5668return;5669}5670if (!PREACTION(fm)) {5671check_inuse_chunk(fm, p);5672if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {5673size_t psize = chunksize(p);5674mchunkptr next = chunk_plus_offset(p, psize);5675if (!pinuse(p)) {5676size_t prevsize = p->prev_foot;5677if (is_mmapped(p)) {5678psize += prevsize + MMAP_FOOT_PAD;5679if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)5680fm->footprint -= psize;5681goto postaction;5682}5683else {5684mchunkptr prev = chunk_minus_offset(p, prevsize);5685psize += prevsize;5686p = prev;5687if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */5688if (p != fm->dv) {5689unlink_chunk(fm, p, prevsize);5690}5691else if ((next->head & INUSE_BITS) == INUSE_BITS) {5692fm->dvsize = psize;5693set_free_with_pinuse(p, psize, next);5694goto postaction;5695}5696}5697else5698goto erroraction;5699}5700}57015702if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {5703if (!cinuse(next)) { /* consolidate forward */5704if (next == fm->top) {5705size_t tsize = fm->topsize += psize;5706fm->top = p;5707p->head = tsize | PINUSE_BIT;5708if (p == fm->dv) {5709fm->dv = 0;5710fm->dvsize = 0;5711}5712if (should_trim(fm, tsize))5713sys_trim(fm, 0);5714goto postaction;5715}5716else if (next == fm->dv) {5717size_t dsize = fm->dvsize += psize;5718fm->dv = p;5719set_size_and_pinuse_of_free_chunk(p, dsize);5720goto postaction;5721}5722else {5723size_t nsize = chunksize(next);5724psize += nsize;5725unlink_chunk(fm, next, nsize);5726set_size_and_pinuse_of_free_chunk(p, psize);5727if (p == fm->dv) {5728fm->dvsize = psize;5729goto postaction;5730}5731}5732}5733else5734set_free_with_pinuse(p, psize, next);57355736if (is_small(psize)) {5737insert_small_chunk(fm, p, psize);5738check_free_chunk(fm, p);5739}5740else {5741tchunkptr tp = (tchunkptr)p;5742insert_large_chunk(fm, tp, psize);5743check_free_chunk(fm, p);5744if (--fm->release_checks == 0)5745release_unused_segments(fm);5746}5747goto postaction;5748}5749}5750erroraction:5751USAGE_ERROR_ACTION(fm, p);5752postaction:5753POSTACTION(fm);5754}5755}5756}57575758void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {5759void* mem;5760size_t req = 0;5761mstate ms = (mstate)msp;5762if (!ok_magic(ms)) {5763USAGE_ERROR_ACTION(ms,ms);5764return 0;5765}5766if (n_elements != 0) {5767req = n_elements * elem_size;5768if (((n_elements | elem_size) & ~(size_t)0xffff) &&5769(req / n_elements != elem_size))5770req = MAX_SIZE_T; /* force downstream failure on overflow */5771}5772mem = internal_malloc(ms, req);5773if (mem != 0 && calloc_must_clear(mem2chunk(mem)))5774memset(mem, 0, req);5775return mem;5776}57775778void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {5779void* mem = 0;5780if (oldmem == 0) {5781mem = mspace_malloc(msp, bytes);5782}5783else if (bytes >= MAX_REQUEST) {5784MALLOC_FAILURE_ACTION;5785}5786#ifdef REALLOC_ZERO_BYTES_FREES5787else if (bytes == 0) {5788mspace_free(msp, oldmem);5789}5790#endif /* REALLOC_ZERO_BYTES_FREES */5791else {5792size_t nb = request2size(bytes);5793mchunkptr oldp = mem2chunk(oldmem);5794#if ! FOOTERS5795mstate m = (mstate)msp;5796#else /* FOOTERS */5797mstate m = get_mstate_for(oldp);5798if (!ok_magic(m)) {5799USAGE_ERROR_ACTION(m, oldmem);5800return 0;5801}5802#endif /* FOOTERS */5803if (!PREACTION(m)) {5804mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);5805POSTACTION(m);5806if (newp != 0) {5807check_inuse_chunk(m, newp);5808mem = chunk2mem(newp);5809}5810else {5811mem = mspace_malloc(m, bytes);5812if (mem != 0) {5813size_t oc = chunksize(oldp) - overhead_for(oldp);5814memcpy(mem, oldmem, (oc < bytes)? oc : bytes);5815mspace_free(m, oldmem);5816}5817}5818}5819}5820return mem;5821}58225823void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes) {5824void* mem = 0;5825if (oldmem != 0) {5826if (bytes >= MAX_REQUEST) {5827MALLOC_FAILURE_ACTION;5828}5829else {5830size_t nb = request2size(bytes);5831mchunkptr oldp = mem2chunk(oldmem);5832#if ! FOOTERS5833mstate m = (mstate)msp;5834#else /* FOOTERS */5835mstate m = get_mstate_for(oldp);5836(void)msp; /* placate people compiling -Wunused */5837if (!ok_magic(m)) {5838USAGE_ERROR_ACTION(m, oldmem);5839return 0;5840}5841#endif /* FOOTERS */5842if (!PREACTION(m)) {5843mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);5844POSTACTION(m);5845if (newp == oldp) {5846check_inuse_chunk(m, newp);5847mem = oldmem;5848}5849}5850}5851}5852return mem;5853}58545855void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {5856mstate ms = (mstate)msp;5857if (!ok_magic(ms)) {5858USAGE_ERROR_ACTION(ms,ms);5859return 0;5860}5861if (alignment <= MALLOC_ALIGNMENT)5862return mspace_malloc(msp, bytes);5863return internal_memalign(ms, alignment, bytes);5864}58655866void** mspace_independent_calloc(mspace msp, size_t n_elements,5867size_t elem_size, void* chunks[]) {5868size_t sz = elem_size; /* serves as 1-element array */5869mstate ms = (mstate)msp;5870if (!ok_magic(ms)) {5871USAGE_ERROR_ACTION(ms,ms);5872return 0;5873}5874return ialloc(ms, n_elements, &sz, 3, chunks);5875}58765877void** mspace_independent_comalloc(mspace msp, size_t n_elements,5878size_t sizes[], void* chunks[]) {5879mstate ms = (mstate)msp;5880if (!ok_magic(ms)) {5881USAGE_ERROR_ACTION(ms,ms);5882return 0;5883}5884return ialloc(ms, n_elements, sizes, 0, chunks);5885}58865887size_t mspace_bulk_free(mspace msp, void* array[], size_t nelem) {5888return internal_bulk_free((mstate)msp, array, nelem);5889}58905891#if MALLOC_INSPECT_ALL5892void mspace_inspect_all(mspace msp,5893void(*handler)(void *start,5894void *end,5895size_t used_bytes,5896void* callback_arg),5897void* arg) {5898mstate ms = (mstate)msp;5899if (ok_magic(ms)) {5900if (!PREACTION(ms)) {5901internal_inspect_all(ms, handler, arg);5902POSTACTION(ms);5903}5904}5905else {5906USAGE_ERROR_ACTION(ms,ms);5907}5908}5909#endif /* MALLOC_INSPECT_ALL */59105911int mspace_trim(mspace msp, size_t pad) {5912int result = 0;5913mstate ms = (mstate)msp;5914if (ok_magic(ms)) {5915if (!PREACTION(ms)) {5916result = sys_trim(ms, pad);5917POSTACTION(ms);5918}5919}5920else {5921USAGE_ERROR_ACTION(ms,ms);5922}5923return result;5924}59255926#if !NO_MALLOC_STATS5927void mspace_malloc_stats(mspace msp) {5928mstate ms = (mstate)msp;5929if (ok_magic(ms)) {5930internal_malloc_stats(ms);5931}5932else {5933USAGE_ERROR_ACTION(ms,ms);5934}5935}5936#endif /* NO_MALLOC_STATS */59375938size_t mspace_footprint(mspace msp) {5939size_t result = 0;5940mstate ms = (mstate)msp;5941if (ok_magic(ms)) {5942result = ms->footprint;5943}5944else {5945USAGE_ERROR_ACTION(ms,ms);5946}5947return result;5948}59495950size_t mspace_max_footprint(mspace msp) {5951size_t result = 0;5952mstate ms = (mstate)msp;5953if (ok_magic(ms)) {5954result = ms->max_footprint;5955}5956else {5957USAGE_ERROR_ACTION(ms,ms);5958}5959return result;5960}59615962size_t mspace_footprint_limit(mspace msp) {5963size_t result = 0;5964mstate ms = (mstate)msp;5965if (ok_magic(ms)) {5966size_t maf = ms->footprint_limit;5967result = (maf == 0) ? MAX_SIZE_T : maf;5968}5969else {5970USAGE_ERROR_ACTION(ms,ms);5971}5972return result;5973}59745975size_t mspace_set_footprint_limit(mspace msp, size_t bytes) {5976size_t result = 0;5977mstate ms = (mstate)msp;5978if (ok_magic(ms)) {5979if (bytes == 0)5980result = granularity_align(1); /* Use minimal size */5981if (bytes == MAX_SIZE_T)5982result = 0; /* disable */5983else5984result = granularity_align(bytes);5985ms->footprint_limit = result;5986}5987else {5988USAGE_ERROR_ACTION(ms,ms);5989}5990return result;5991}59925993#if !NO_MALLINFO5994struct mallinfo mspace_mallinfo(mspace msp) {5995mstate ms = (mstate)msp;5996if (!ok_magic(ms)) {5997USAGE_ERROR_ACTION(ms,ms);5998}5999return internal_mallinfo(ms);6000}6001#endif /* NO_MALLINFO */60026003size_t mspace_usable_size(const void* mem) {6004if (mem != 0) {6005mchunkptr p = mem2chunk(mem);6006if (is_inuse(p))6007return chunksize(p) - overhead_for(p);6008}6009return 0;6010}60116012int mspace_mallopt(int param_number, int value) {6013return change_mparam(param_number, value);6014}60156016#endif /* MSPACES */601760186019/* -------------------- Alternative MORECORE functions ------------------- */60206021/*6022Guidelines for creating a custom version of MORECORE:60236024* For best performance, MORECORE should allocate in multiples of pagesize.6025* MORECORE may allocate more memory than requested. (Or even less,6026but this will usually result in a malloc failure.)6027* MORECORE must not allocate memory when given argument zero, but6028instead return one past the end address of memory from previous6029nonzero call.6030* For best performance, consecutive calls to MORECORE with positive6031arguments should return increasing addresses, indicating that6032space has been contiguously extended.6033* Even though consecutive calls to MORECORE need not return contiguous6034addresses, it must be OK for malloc'ed chunks to span multiple6035regions in those cases where they do happen to be contiguous.6036* MORECORE need not handle negative arguments -- it may instead6037just return MFAIL when given negative arguments.6038Negative arguments are always multiples of pagesize. MORECORE6039must not misinterpret negative args as large positive unsigned6040args. You can suppress all such calls from even occurring by defining6041MORECORE_CANNOT_TRIM,60426043As an example alternative MORECORE, here is a custom allocator6044kindly contributed for pre-OSX macOS. It uses virtually but not6045necessarily physically contiguous non-paged memory (locked in,6046present and won't get swapped out). You can use it by uncommenting6047this section, adding some #includes, and setting up the appropriate6048defines above:60496050#define MORECORE osMoreCore60516052There is also a shutdown routine that should somehow be called for6053cleanup upon program exit.60546055#define MAX_POOL_ENTRIES 1006056#define MINIMUM_MORECORE_SIZE (64 * 1024U)6057static int next_os_pool;6058void *our_os_pools[MAX_POOL_ENTRIES];60596060void *osMoreCore(int size)6061{6062void *ptr = 0;6063static void *sbrk_top = 0;60646065if (size > 0)6066{6067if (size < MINIMUM_MORECORE_SIZE)6068size = MINIMUM_MORECORE_SIZE;6069if (CurrentExecutionLevel() == kTaskLevel)6070ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);6071if (ptr == 0)6072{6073return (void *) MFAIL;6074}6075// save ptrs so they can be freed during cleanup6076our_os_pools[next_os_pool] = ptr;6077next_os_pool++;6078ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);6079sbrk_top = (char *) ptr + size;6080return ptr;6081}6082else if (size < 0)6083{6084// we don't currently support shrink behavior6085return (void *) MFAIL;6086}6087else6088{6089return sbrk_top;6090}6091}60926093// cleanup any allocated memory pools6094// called as last thing before shutting down driver60956096void osCleanupMem(void)6097{6098void **ptr;60996100for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)6101if (*ptr)6102{6103PoolDeallocate(*ptr);6104*ptr = 0;6105}6106}61076108*/610961106111/* -----------------------------------------------------------------------6112History:6113v2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea6114* fix bad comparison in dlposix_memalign6115* don't reuse adjusted asize in sys_alloc6116* add LOCK_AT_FORK -- thanks to Kirill Artamonov for the suggestion6117* reduce compiler warnings -- thanks to all who reported/suggested these61186119v2.8.5 Sun May 22 10:26:02 2011 Doug Lea (dl at gee)6120* Always perform unlink checks unless INSECURE6121* Add posix_memalign.6122* Improve realloc to expand in more cases; expose realloc_in_place.6123Thanks to Peter Buhr for the suggestion.6124* Add footprint_limit, inspect_all, bulk_free. Thanks6125to Barry Hayes and others for the suggestions.6126* Internal refactorings to avoid calls while holding locks6127* Use non-reentrant locks by default. Thanks to Roland McGrath6128for the suggestion.6129* Small fixes to mspace_destroy, reset_on_error.6130* Various configuration extensions/changes. Thanks6131to all who contributed these.61326133V2.8.4a Thu Apr 28 14:39:43 2011 (dl at gee.cs.oswego.edu)6134* Update Creative Commons URL61356136V2.8.4 Wed May 27 09:56:23 2009 Doug Lea (dl at gee)6137* Use zeros instead of prev foot for is_mmapped6138* Add mspace_track_large_chunks; thanks to Jean Brouwers6139* Fix set_inuse in internal_realloc; thanks to Jean Brouwers6140* Fix insufficient sys_alloc padding when using 16byte alignment6141* Fix bad error check in mspace_footprint6142* Adaptations for ptmalloc; thanks to Wolfram Gloger.6143* Reentrant spin locks; thanks to Earl Chew and others6144* Win32 improvements; thanks to Niall Douglas and Earl Chew6145* Add NO_SEGMENT_TRAVERSAL and MAX_RELEASE_CHECK_RATE options6146* Extension hook in malloc_state6147* Various small adjustments to reduce warnings on some compilers6148* Various configuration extensions/changes for more platforms. Thanks6149to all who contributed these.61506151V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee)6152* Add max_footprint functions6153* Ensure all appropriate literals are size_t6154* Fix conditional compilation problem for some #define settings6155* Avoid concatenating segments with the one provided6156in create_mspace_with_base6157* Rename some variables to avoid compiler shadowing warnings6158* Use explicit lock initialization.6159* Better handling of sbrk interference.6160* Simplify and fix segment insertion, trimming and mspace_destroy6161* Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x6162* Thanks especially to Dennis Flanagan for help on these.61636164V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee)6165* Fix memalign brace error.61666167V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee)6168* Fix improper #endif nesting in C++6169* Add explicit casts needed for C++61706171V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee)6172* Use trees for large bins6173* Support mspaces6174* Use segments to unify sbrk-based and mmap-based system allocation,6175removing need for emulation on most platforms without sbrk.6176* Default safety checks6177* Optional footer checks. Thanks to William Robertson for the idea.6178* Internal code refactoring6179* Incorporate suggestions and platform-specific changes.6180Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,6181Aaron Bachmann, Emery Berger, and others.6182* Speed up non-fastbin processing enough to remove fastbins.6183* Remove useless cfree() to avoid conflicts with other apps.6184* Remove internal memcpy, memset. Compilers handle builtins better.6185* Remove some options that no one ever used and rename others.61866187V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)6188* Fix malloc_state bitmap array misdeclaration61896190V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee)6191* Allow tuning of FIRST_SORTED_BIN_SIZE6192* Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.6193* Better detection and support for non-contiguousness of MORECORE.6194Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger6195* Bypass most of malloc if no frees. Thanks To Emery Berger.6196* Fix freeing of old top non-contiguous chunk im sysmalloc.6197* Raised default trim and map thresholds to 256K.6198* Fix mmap-related #defines. Thanks to Lubos Lunak.6199* Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.6200* Branch-free bin calculation6201* Default trim and mmap thresholds now 256K.62026203V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)6204* Introduce independent_comalloc and independent_calloc.6205Thanks to Michael Pachos for motivation and help.6206* Make optional .h file available6207* Allow > 2GB requests on 32bit systems.6208* new WIN32 sbrk, mmap, munmap, lock code from <[email protected]>.6209Thanks also to Andreas Mueller <a.mueller at paradatec.de>,6210and Anonymous.6211* Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for6212helping test this.)6213* memalign: check alignment arg6214* realloc: don't try to shift chunks backwards, since this6215leads to more fragmentation in some programs and doesn't6216seem to help in any others.6217* Collect all cases in malloc requiring system memory into sysmalloc6218* Use mmap as backup to sbrk6219* Place all internal state in malloc_state6220* Introduce fastbins (although similar to 2.5.1)6221* Many minor tunings and cosmetic improvements6222* Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK6223* Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS6224Thanks to Tony E. Bennett <[email protected]> and others.6225* Include errno.h to support default failure action.62266227V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)6228* return null for negative arguments6229* Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>6230* Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'6231(e.g. WIN32 platforms)6232* Cleanup header file inclusion for WIN32 platforms6233* Cleanup code to avoid Microsoft Visual C++ compiler complaints6234* Add 'USE_DL_PREFIX' to quickly allow co-existence with existing6235memory allocation routines6236* Set 'malloc_getpagesize' for WIN32 platforms (needs more work)6237* Use 'assert' rather than 'ASSERT' in WIN32 code to conform to6238usage of 'assert' in non-WIN32 code6239* Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to6240avoid infinite loop6241* Always call 'fREe()' rather than 'free()'62426243V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)6244* Fixed ordering problem with boundary-stamping62456246V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)6247* Added pvalloc, as recommended by H.J. Liu6248* Added 64bit pointer support mainly from Wolfram Gloger6249* Added anonymously donated WIN32 sbrk emulation6250* Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen6251* malloc_extend_top: fix mask error that caused wastage after6252foreign sbrks6253* Add linux mremap support code from HJ Liu62546255V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)6256* Integrated most documentation with the code.6257* Add support for mmap, with help from6258Wolfram Gloger ([email protected]).6259* Use last_remainder in more cases.6260* Pack bins using idea from [email protected]6261* Use ordered bins instead of best-fit threshhold6262* Eliminate block-local decls to simplify tracing and debugging.6263* Support another case of realloc via move into top6264* Fix error occuring when initial sbrk_base not word-aligned.6265* Rely on page size for units instead of SBRK_UNIT to6266avoid surprises about sbrk alignment conventions.6267* Add mallinfo, mallopt. Thanks to Raymond Nijssen6268([email protected]) for the suggestion.6269* Add `pad' argument to malloc_trim and top_pad mallopt parameter.6270* More precautions for cases where other routines call sbrk,6271courtesy of Wolfram Gloger ([email protected]).6272* Added macros etc., allowing use in linux libc from6273H.J. Lu ([email protected])6274* Inverted this history list62756276V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)6277* Re-tuned and fixed to behave more nicely with V2.6.0 changes.6278* Removed all preallocation code since under current scheme6279the work required to undo bad preallocations exceeds6280the work saved in good cases for most test programs.6281* No longer use return list or unconsolidated bins since6282no scheme using them consistently outperforms those that don't6283given above changes.6284* Use best fit for very large chunks to prevent some worst-cases.6285* Added some support for debugging62866287V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)6288* Removed footers when chunks are in use. Thanks to6289Paul Wilson ([email protected]) for the suggestion.62906291V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)6292* Added malloc_trim, with help from Wolfram Gloger6293([email protected]).62946295V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)62966297V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)6298* realloc: try to expand in both directions6299* malloc: swap order of clean-bin strategy;6300* realloc: only conditionally expand backwards6301* Try not to scavenge used bins6302* Use bin counts as a guide to preallocation6303* Occasionally bin return list chunks in first scan6304* Add a few optimizations from [email protected]63056306V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)6307* faster bin computation & slightly different binning6308* merged all consolidations to one part of malloc proper6309(eliminating old malloc_find_space & malloc_clean_bin)6310* Scan 2 returns chunks (not just 1)6311* Propagate failure in realloc if malloc returns 06312* Add stuff to allow compilation on non-ANSI compilers6313from [email protected]63146315V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)6316* removed potential for odd address access in prev_chunk6317* removed dependency on getpagesize.h6318* misc cosmetics and a bit more internal documentation6319* anticosmetics: mangled names in macros to evade debugger strangeness6320* tested on sparc, hp-700, dec-mips, rs60006321with gcc & native cc (hp, dec only) allowing6322Detlefs & Zorn comparison study (in SIGPLAN Notices.)63236324Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)6325* Based loosely on libg++-1.2X malloc. (It retains some of the overall6326structure of old version, but most details differ.)63276328*/63296330#endif /* !HAVE_MALLOC */63316332#ifdef HAVE_MALLOC6333static void * SDLCALL real_malloc(size_t s) { return malloc(s); }6334static void * SDLCALL real_calloc(size_t n, size_t s) { return calloc(n, s); }6335static void * SDLCALL real_realloc(void *p, size_t s) { return realloc(p,s); }6336static void SDLCALL real_free(void *p) { free(p); }6337#else6338#define real_malloc dlmalloc6339#define real_calloc dlcalloc6340#define real_realloc dlrealloc6341#define real_free dlfree6342#endif63436344// mark the allocator entry points as KEEPALIVE so we can call these from JavaScript.6345// otherwise they could could get so aggressively inlined that their symbols6346// don't exist at all in the final binary!6347#ifdef SDL_PLATFORM_EMSCRIPTEN6348#include <emscripten/emscripten.h>6349extern SDL_DECLSPEC SDL_MALLOC EMSCRIPTEN_KEEPALIVE void * SDLCALL SDL_malloc(size_t size);6350extern SDL_DECLSPEC SDL_MALLOC SDL_ALLOC_SIZE2(1, 2) EMSCRIPTEN_KEEPALIVE void * SDLCALL SDL_calloc(size_t nmemb, size_t size);6351extern SDL_DECLSPEC SDL_ALLOC_SIZE(2) EMSCRIPTEN_KEEPALIVE void * SDLCALL SDL_realloc(void *mem, size_t size);6352extern SDL_DECLSPEC EMSCRIPTEN_KEEPALIVE void SDLCALL SDL_free(void *mem);6353#endif63546355/* Memory functions used by SDL that can be replaced by the application */6356static struct6357{6358SDL_malloc_func malloc_func;6359SDL_calloc_func calloc_func;6360SDL_realloc_func realloc_func;6361SDL_free_func free_func;6362SDL_AtomicInt num_allocations;6363} s_mem = {6364real_malloc, real_calloc, real_realloc, real_free, { 0 }6365};63666367// Define this if you want to track the number of allocations active6368// #define SDL_TRACK_ALLOCATION_COUNT6369#ifdef SDL_TRACK_ALLOCATION_COUNT6370#define INCREMENT_ALLOCATION_COUNT() (void)SDL_AtomicIncRef(&s_mem.num_allocations)6371#define DECREMENT_ALLOCATION_COUNT() (void)SDL_AtomicDecRef(&s_mem.num_allocations)6372#else6373#define INCREMENT_ALLOCATION_COUNT()6374#define DECREMENT_ALLOCATION_COUNT()6375#endif637663776378void SDL_GetOriginalMemoryFunctions(SDL_malloc_func *malloc_func,6379SDL_calloc_func *calloc_func,6380SDL_realloc_func *realloc_func,6381SDL_free_func *free_func)6382{6383if (malloc_func) {6384*malloc_func = real_malloc;6385}6386if (calloc_func) {6387*calloc_func = real_calloc;6388}6389if (realloc_func) {6390*realloc_func = real_realloc;6391}6392if (free_func) {6393*free_func = real_free;6394}6395}63966397void SDL_GetMemoryFunctions(SDL_malloc_func *malloc_func,6398SDL_calloc_func *calloc_func,6399SDL_realloc_func *realloc_func,6400SDL_free_func *free_func)6401{6402if (malloc_func) {6403*malloc_func = s_mem.malloc_func;6404}6405if (calloc_func) {6406*calloc_func = s_mem.calloc_func;6407}6408if (realloc_func) {6409*realloc_func = s_mem.realloc_func;6410}6411if (free_func) {6412*free_func = s_mem.free_func;6413}6414}64156416bool SDL_SetMemoryFunctions(SDL_malloc_func malloc_func,6417SDL_calloc_func calloc_func,6418SDL_realloc_func realloc_func,6419SDL_free_func free_func)6420{6421if (!malloc_func) {6422return SDL_InvalidParamError("malloc_func");6423}6424if (!calloc_func) {6425return SDL_InvalidParamError("calloc_func");6426}6427if (!realloc_func) {6428return SDL_InvalidParamError("realloc_func");6429}6430if (!free_func) {6431return SDL_InvalidParamError("free_func");6432}64336434s_mem.malloc_func = malloc_func;6435s_mem.calloc_func = calloc_func;6436s_mem.realloc_func = realloc_func;6437s_mem.free_func = free_func;6438return true;6439}64406441int SDL_GetNumAllocations(void)6442{6443#ifdef SDL_TRACK_ALLOCATION_COUNT6444return SDL_GetAtomicInt(&s_mem.num_allocations);6445#else6446return -1;6447#endif6448}64496450void *SDL_malloc(size_t size)6451{6452void *mem;64536454if (!size) {6455size = 1;6456}64576458mem = s_mem.malloc_func(size);6459if (mem) {6460INCREMENT_ALLOCATION_COUNT();6461} else {6462SDL_OutOfMemory();6463}64646465return mem;6466}64676468void *SDL_calloc(size_t nmemb, size_t size)6469{6470void *mem;64716472if (!nmemb || !size) {6473nmemb = 1;6474size = 1;6475}64766477mem = s_mem.calloc_func(nmemb, size);6478if (mem) {6479INCREMENT_ALLOCATION_COUNT();6480} else {6481SDL_OutOfMemory();6482}64836484return mem;6485}64866487void *SDL_realloc(void *ptr, size_t size)6488{6489void *mem;64906491if (!size) {6492size = 1;6493}64946495mem = s_mem.realloc_func(ptr, size);6496if (mem && !ptr) {6497INCREMENT_ALLOCATION_COUNT();6498} else if (!mem) {6499SDL_OutOfMemory();6500}65016502return mem;6503}65046505void SDL_free(void *ptr)6506{6507if (!ptr) {6508return;6509}65106511s_mem.free_func(ptr);6512DECREMENT_ALLOCATION_COUNT();6513}651465156516