/*1Simple DirectMedia Layer2Copyright (C) 1997-2025 Sam Lantinga <[email protected]>34This software is provided 'as-is', without any express or implied5warranty. In no event will the authors be held liable for any damages6arising from the use of this software.78Permission is granted to anyone to use this software for any purpose,9including commercial applications, and to alter it and redistribute it10freely, subject to the following restrictions:11121. The origin of this software must not be misrepresented; you must not13claim that you wrote the original software. If you use this software14in a product, an acknowledgment in the product documentation would be15appreciated but is not required.162. Altered source versions must be plainly marked as such, and must not be17misrepresented as being the original software.183. This notice may not be removed or altered from any source distribution.19*/20#include "SDL_internal.h"2122/* This file contains portable memory management functions for SDL */2324#ifndef HAVE_MALLOC25#define LACKS_SYS_TYPES_H26#define LACKS_STDIO_H27#define LACKS_STRINGS_H28#define LACKS_STRING_H29#define LACKS_STDLIB_H30#define ABORT31#define NO_MALLOC_STATS 132#define USE_LOCKS 133#define USE_DL_PREFIX3435/*36This is a version (aka dlmalloc) of malloc/free/realloc written by37Doug Lea and released to the public domain, as explained at38http://creativecommons.org/publicdomain/zero/1.0/ Send questions,39comments, complaints, performance data, etc to [email protected]4041* Version 2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea42Note: There may be an updated version of this malloc obtainable at43ftp://gee.cs.oswego.edu/pub/misc/malloc.c44Check before installing!4546* Quickstart4748This library is all in one file to simplify the most common usage:49ftp it, compile it (-O3), and link it into another program. All of50the compile-time options default to reasonable values for use on51most platforms. You might later want to step through various52compile-time and dynamic tuning options.5354For convenience, an include file for code using this malloc is at:55ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.6.h56You don't really need this .h file unless you call functions not57defined in your system include files. The .h file contains only the58excerpts from this file needed for using this malloc on ANSI C/C++59systems, so long as you haven't changed compile-time options about60naming and tuning parameters. If you do, then you can create your61own malloc.h that does include all settings by cutting at the point62indicated below. Note that you may already by default be using a C63library containing a malloc that is based on some version of this64malloc (for example in linux). You might still want to use the one65in this file to customize settings or to avoid overheads associated66with library versions.6768* Vital statistics:6970Supported pointer/size_t representation: 4 or 8 bytes71size_t MUST be an unsigned type of the same width as72pointers. (If you are using an ancient system that declares73size_t as a signed type, or need it to be a different width74than pointers, you can use a previous release of this malloc75(e.g. 2.7.2) supporting these.)7677Alignment: 8 bytes (minimum)78This suffices for nearly all current machines and C compilers.79However, you can define MALLOC_ALIGNMENT to be wider than this80if necessary (up to 128bytes), at the expense of using more space.8182Minimum overhead per allocated chunk: 4 or 8 bytes (if 4byte sizes)838 or 16 bytes (if 8byte sizes)84Each malloced chunk has a hidden word of overhead holding size85and status information, and additional cross-check word86if FOOTERS is defined.8788Minimum allocated size: 4-byte ptrs: 16 bytes (including overhead)898-byte ptrs: 32 bytes (including overhead)9091Even a request for zero bytes (i.e., malloc(0)) returns a92pointer to something of the minimum allocatable size.93The maximum overhead wastage (i.e., number of extra bytes94allocated than were requested in malloc) is less than or equal95to the minimum size, except for requests >= mmap_threshold that96are serviced via mmap(), where the worst case wastage is about9732 bytes plus the remainder from a system page (the minimal98mmap unit); typically 4096 or 8192 bytes.99100Security: static-safe; optionally more or less101The "security" of malloc refers to the ability of malicious102code to accentuate the effects of errors (for example, freeing103space that is not currently malloc'ed or overwriting past the104ends of chunks) in code that calls malloc. This malloc105guarantees not to modify any memory locations below the base of106heap, i.e., static variables, even in the presence of usage107errors. The routines additionally detect most improper frees108and reallocs. All this holds as long as the static bookkeeping109for malloc itself is not corrupted by some other means. This110is only one aspect of security -- these checks do not, and111cannot, detect all possible programming errors.112113If FOOTERS is defined nonzero, then each allocated chunk114carries an additional check word to verify that it was malloced115from its space. These check words are the same within each116execution of a program using malloc, but differ across117executions, so externally crafted fake chunks cannot be118freed. This improves security by rejecting frees/reallocs that119could corrupt heap memory, in addition to the checks preventing120writes to statics that are always on. This may further improve121security at the expense of time and space overhead. (Note that122FOOTERS may also be worth using with MSPACES.)123124By default detected errors cause the program to abort (calling125"abort()"). You can override this to instead proceed past126errors by defining PROCEED_ON_ERROR. In this case, a bad free127has no effect, and a malloc that encounters a bad address128caused by user overwrites will ignore the bad address by129dropping pointers and indices to all known memory. This may130be appropriate for programs that should continue if at all131possible in the face of programming errors, although they may132run out of memory because dropped memory is never reclaimed.133134If you don't like either of these options, you can define135CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything136else. And if if you are sure that your program using malloc has137no errors or vulnerabilities, you can define INSECURE to 1,138which might (or might not) provide a small performance improvement.139140It is also possible to limit the maximum total allocatable141space, using malloc_set_footprint_limit. This is not142designed as a security feature in itself (calls to set limits143are not screened or privileged), but may be useful as one144aspect of a secure implementation.145146Thread-safety: NOT thread-safe unless USE_LOCKS defined non-zero147When USE_LOCKS is defined, each public call to malloc, free,148etc is surrounded with a lock. By default, this uses a plain149pthread mutex, win32 critical section, or a spin-lock if if150available for the platform and not disabled by setting151USE_SPIN_LOCKS=0. However, if USE_RECURSIVE_LOCKS is defined,152recursive versions are used instead (which are not required for153base functionality but may be needed in layered extensions).154Using a global lock is not especially fast, and can be a major155bottleneck. It is designed only to provide minimal protection156in concurrent environments, and to provide a basis for157extensions. If you are using malloc in a concurrent program,158consider instead using nedmalloc159(http://www.nedprod.com/programs/portable/nedmalloc/) or160ptmalloc (See http://www.malloc.de), which are derived from161versions of this malloc.162163System requirements: Any combination of MORECORE and/or MMAP/MUNMAP164This malloc can use unix sbrk or any emulation (invoked using165the CALL_MORECORE macro) and/or mmap/munmap or any emulation166(invoked using CALL_MMAP/CALL_MUNMAP) to get and release system167memory. On most unix systems, it tends to work best if both168MORECORE and MMAP are enabled. On Win32, it uses emulations169based on VirtualAlloc. It also uses common C library functions170like memset.171172Compliance: I believe it is compliant with the Single Unix Specification173(See http://www.unix.org). Also SVID/XPG, ANSI C, and probably174others as well.175176* Overview of algorithms177178This is not the fastest, most space-conserving, most portable, or179most tunable malloc ever written. However it is among the fastest180while also being among the most space-conserving, portable and181tunable. Consistent balance across these factors results in a good182general-purpose allocator for malloc-intensive programs.183184In most ways, this malloc is a best-fit allocator. Generally, it185chooses the best-fitting existing chunk for a request, with ties186broken in approximately least-recently-used order. (This strategy187normally maintains low fragmentation.) However, for requests less188than 256bytes, it deviates from best-fit when there is not an189exactly fitting available chunk by preferring to use space adjacent190to that used for the previous small request, as well as by breaking191ties in approximately most-recently-used order. (These enhance192locality of series of small allocations.) And for very large requests193(>= 256Kb by default), it relies on system memory mapping194facilities, if supported. (This helps avoid carrying around and195possibly fragmenting memory used only for large chunks.)196197All operations (except malloc_stats and mallinfo) have execution198times that are bounded by a constant factor of the number of bits in199a size_t, not counting any clearing in calloc or copying in realloc,200or actions surrounding MORECORE and MMAP that have times201proportional to the number of non-contiguous regions returned by202system allocation routines, which is often just 1. In real-time203applications, you can optionally suppress segment traversals using204NO_SEGMENT_TRAVERSAL, which assures bounded execution even when205system allocators return non-contiguous spaces, at the typical206expense of carrying around more memory and increased fragmentation.207208The implementation is not very modular and seriously overuses209macros. Perhaps someday all C compilers will do as good a job210inlining modular code as can now be done by brute-force expansion,211but now, enough of them seem not to.212213Some compilers issue a lot of warnings about code that is214dead/unreachable only on some platforms, and also about intentional215uses of negation on unsigned types. All known cases of each can be216ignored.217218For a longer but out of date high-level description, see219http://gee.cs.oswego.edu/dl/html/malloc.html220221* MSPACES222If MSPACES is defined, then in addition to malloc, free, etc.,223this file also defines mspace_malloc, mspace_free, etc. These224are versions of malloc routines that take an "mspace" argument225obtained using create_mspace, to control all internal bookkeeping.226If ONLY_MSPACES is defined, only these versions are compiled.227So if you would like to use this allocator for only some allocations,228and your system malloc for others, you can compile with229ONLY_MSPACES and then do something like...230static mspace mymspace = create_mspace(0,0); // for example231#define mymalloc(bytes) mspace_malloc(mymspace, bytes)232233(Note: If you only need one instance of an mspace, you can instead234use "USE_DL_PREFIX" to relabel the global malloc.)235236You can similarly create thread-local allocators by storing237mspaces as thread-locals. For example:238static __thread mspace tlms = 0;239void* tlmalloc(size_t bytes) {240if (tlms == 0) tlms = create_mspace(0, 0);241return mspace_malloc(tlms, bytes);242}243void tlfree(void* mem) { mspace_free(tlms, mem); }244245Unless FOOTERS is defined, each mspace is completely independent.246You cannot allocate from one and free to another (although247conformance is only weakly checked, so usage errors are not always248caught). If FOOTERS is defined, then each chunk carries around a tag249indicating its originating mspace, and frees are directed to their250originating spaces. Normally, this requires use of locks.251252------------------------- Compile-time options ---------------------------253254Be careful in setting #define values for numerical constants of type255size_t. On some systems, literal values are not automatically extended256to size_t precision unless they are explicitly casted. You can also257use the symbolic values MAX_SIZE_T, SIZE_T_ONE, etc below.258259WIN32 default: defined if _WIN32 defined260Defining WIN32 sets up defaults for MS environment and compilers.261Otherwise defaults are for unix. Beware that there seem to be some262cases where this malloc might not be a pure drop-in replacement for263Win32 malloc: Random-looking failures from Win32 GDI API's (eg;264SetDIBits()) may be due to bugs in some video driver implementations265when pixel buffers are malloc()ed, and the region spans more than266one VirtualAlloc()ed region. Because dlmalloc uses a small (64Kb)267default granularity, pixel buffers may straddle virtual allocation268regions more often than when using the Microsoft allocator. You can269avoid this by using VirtualAlloc() and VirtualFree() for all pixel270buffers rather than using malloc(). If this is not possible,271recompile this malloc with a larger DEFAULT_GRANULARITY. Note:272in cases where MSC and gcc (cygwin) are known to differ on WIN32,273conditions use _MSC_VER to distinguish them.274275DLMALLOC_EXPORT default: extern276Defines how public APIs are declared. If you want to export via a277Windows DLL, you might define this as278#define DLMALLOC_EXPORT extern __declspec(dllexport)279If you want a POSIX ELF shared object, you might use280#define DLMALLOC_EXPORT extern __attribute__((visibility("default")))281282MALLOC_ALIGNMENT default: (size_t)(2 * sizeof(void *))283Controls the minimum alignment for malloc'ed chunks. It must be a284power of two and at least 8, even on machines for which smaller285alignments would suffice. It may be defined as larger than this286though. Note however that code and data structures are optimized for287the case of 8-byte alignment.288289MSPACES default: 0 (false)290If true, compile in support for independent allocation spaces.291This is only supported if HAVE_MMAP is true.292293ONLY_MSPACES default: 0 (false)294If true, only compile in mspace versions, not regular versions.295296USE_LOCKS default: 0 (false)297Causes each call to each public routine to be surrounded with298pthread or WIN32 mutex lock/unlock. (If set true, this can be299overridden on a per-mspace basis for mspace versions.) If set to a300non-zero value other than 1, locks are used, but their301implementation is left out, so lock functions must be supplied manually,302as described below.303304USE_SPIN_LOCKS default: 1 iff USE_LOCKS and spin locks available305If true, uses custom spin locks for locking. This is currently306supported only gcc >= 4.1, older gccs on x86 platforms, and recent307MS compilers. Otherwise, posix locks or win32 critical sections are308used.309310USE_RECURSIVE_LOCKS default: not defined311If defined nonzero, uses recursive (aka reentrant) locks, otherwise312uses plain mutexes. This is not required for malloc proper, but may313be needed for layered allocators such as nedmalloc.314315LOCK_AT_FORK default: not defined316If defined nonzero, performs pthread_atfork upon initialization317to initialize child lock while holding parent lock. The implementation318assumes that pthread locks (not custom locks) are being used. In other319cases, you may need to customize the implementation.320321FOOTERS default: 0322If true, provide extra checking and dispatching by placing323information in the footers of allocated chunks. This adds324space and time overhead.325326INSECURE default: 0327If true, omit checks for usage errors and heap space overwrites.328329USE_DL_PREFIX default: NOT defined330Causes compiler to prefix all public routines with the string 'dl'.331This can be useful when you only want to use this malloc in one part332of a program, using your regular system malloc elsewhere.333334MALLOC_INSPECT_ALL default: NOT defined335If defined, compiles malloc_inspect_all and mspace_inspect_all, that336perform traversal of all heap space. Unless access to these337functions is otherwise restricted, you probably do not want to338include them in secure implementations.339340ABORT default: defined as abort()341Defines how to abort on failed checks. On most systems, a failed342check cannot die with an "assert" or even print an informative343message, because the underlying print routines in turn call malloc,344which will fail again. Generally, the best policy is to simply call345abort(). It's not very useful to do more than this because many346errors due to overwriting will show up as address faults (null, odd347addresses etc) rather than malloc-triggered checks, so will also348abort. Also, most compilers know that abort() does not return, so349can better optimize code conditionally calling it.350351PROCEED_ON_ERROR default: defined as 0 (false)352Controls whether detected bad addresses cause them to bypassed353rather than aborting. If set, detected bad arguments to free and354realloc are ignored. And all bookkeeping information is zeroed out355upon a detected overwrite of freed heap space, thus losing the356ability to ever return it from malloc again, but enabling the357application to proceed. If PROCEED_ON_ERROR is defined, the358static variable malloc_corruption_error_count is compiled in359and can be examined to see if errors have occurred. This option360generates slower code than the default abort policy.361362DEBUG default: NOT defined363The DEBUG setting is mainly intended for people trying to modify364this code or diagnose problems when porting to new platforms.365However, it may also be able to better isolate user errors than just366using runtime checks. The assertions in the check routines spell367out in more detail the assumptions and invariants underlying the368algorithms. The checking is fairly extensive, and will slow down369execution noticeably. Calling malloc_stats or mallinfo with DEBUG370set will attempt to check every non-mmapped allocated and free chunk371in the course of computing the summaries.372373ABORT_ON_ASSERT_FAILURE default: defined as 1 (true)374Debugging assertion failures can be nearly impossible if your375version of the assert macro causes malloc to be called, which will376lead to a cascade of further failures, blowing the runtime stack.377ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(),378which will usually make debugging easier.379380MALLOC_FAILURE_ACTION default: sets errno to ENOMEM, or no-op on win32381The action to take before "return 0" when malloc fails to be able to382return memory because there is none available.383384HAVE_MORECORE default: 1 (true) unless win32 or ONLY_MSPACES385True if this system supports sbrk or an emulation of it.386387MORECORE default: sbrk388The name of the sbrk-style system routine to call to obtain more389memory. See below for guidance on writing custom MORECORE390functions. The type of the argument to sbrk/MORECORE varies across391systems. It cannot be size_t, because it supports negative392arguments, so it is normally the signed type of the same width as393size_t (sometimes declared as "intptr_t"). It doesn't much matter394though. Internally, we only call it with arguments less than half395the max value of a size_t, which should work across all reasonable396possibilities, although sometimes generating compiler warnings.397398MORECORE_CONTIGUOUS default: 1 (true) if HAVE_MORECORE399If true, take advantage of fact that consecutive calls to MORECORE400with positive arguments always return contiguous increasing401addresses. This is true of unix sbrk. It does not hurt too much to402set it true anyway, since malloc copes with non-contiguities.403Setting it false when definitely non-contiguous saves time404and possibly wasted space it would take to discover this though.405406MORECORE_CANNOT_TRIM default: NOT defined407True if MORECORE cannot release space back to the system when given408negative arguments. This is generally necessary only if you are409using a hand-crafted MORECORE function that cannot handle negative410arguments.411412NO_SEGMENT_TRAVERSAL default: 0413If non-zero, suppresses traversals of memory segments414returned by either MORECORE or CALL_MMAP. This disables415merging of segments that are contiguous, and selectively416releasing them to the OS if unused, but bounds execution times.417418HAVE_MMAP default: 1 (true)419True if this system supports mmap or an emulation of it. If so, and420HAVE_MORECORE is not true, MMAP is used for all system421allocation. If set and HAVE_MORECORE is true as well, MMAP is422primarily used to directly allocate very large blocks. It is also423used as a backup strategy in cases where MORECORE fails to provide424space from system. Note: A single call to MUNMAP is assumed to be425able to unmap memory that may have be allocated using multiple calls426to MMAP, so long as they are adjacent.427428HAVE_MREMAP default: 1 on linux, else 0429If true realloc() uses mremap() to re-allocate large blocks and430extend or shrink allocation spaces.431432MMAP_CLEARS default: 1 except on WINCE.433True if mmap clears memory so calloc doesn't need to. This is true434for standard unix mmap using /dev/zero and on WIN32 except for WINCE.435436USE_BUILTIN_FFS default: 0 (i.e., not used)437Causes malloc to use the builtin ffs() function to compute indices.438Some compilers may recognize and intrinsify ffs to be faster than the439supplied C version. Also, the case of x86 using gcc is special-cased440to an asm instruction, so is already as fast as it can be, and so441this setting has no effect. Similarly for Win32 under recent MS compilers.442(On most x86s, the asm version is only slightly faster than the C version.)443444malloc_getpagesize default: derive from system includes, or 4096.445The system page size. To the extent possible, this malloc manages446memory from the system in page-size units. This may be (and447usually is) a function rather than a constant. This is ignored448if WIN32, where page size is determined using getSystemInfo during449initialization.450451USE_DEV_RANDOM default: 0 (i.e., not used)452Causes malloc to use /dev/random to initialize secure magic seed for453stamping footers. Otherwise, the current time is used.454455NO_MALLINFO default: 0456If defined, don't compile "mallinfo". This can be a simple way457of dealing with mismatches between system declarations and458those in this file.459460MALLINFO_FIELD_TYPE default: size_t461The type of the fields in the mallinfo struct. This was originally462defined as "int" in SVID etc, but is more usefully defined as463size_t. The value is used only if HAVE_USR_INCLUDE_MALLOC_H is not set464465NO_MALLOC_STATS default: 0466If defined, don't compile "malloc_stats". This avoids calls to467fprintf and bringing in stdio dependencies you might not want.468469REALLOC_ZERO_BYTES_FREES default: not defined470This should be set if a call to realloc with zero bytes should471be the same as a call to free. Some people think it should. Otherwise,472since this malloc returns a unique pointer for malloc(0), so does473realloc(p, 0).474475LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H476LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H, LACKS_ERRNO_H477LACKS_STDLIB_H LACKS_SCHED_H LACKS_TIME_H default: NOT defined unless on WIN32478Define these if your system does not have these header files.479You might need to manually insert some of the declarations they provide.480481DEFAULT_GRANULARITY default: page size if MORECORE_CONTIGUOUS,482system_info.dwAllocationGranularity in WIN32,483otherwise 64K.484Also settable using mallopt(M_GRANULARITY, x)485The unit for allocating and deallocating memory from the system. On486most systems with contiguous MORECORE, there is no reason to487make this more than a page. However, systems with MMAP tend to488either require or encourage larger granularities. You can increase489this value to prevent system allocation functions to be called so490often, especially if they are slow. The value must be at least one491page and must be a power of two. Setting to 0 causes initialization492to either page size or win32 region size. (Note: In previous493versions of malloc, the equivalent of this option was called494"TOP_PAD")495496DEFAULT_TRIM_THRESHOLD default: 2MB497Also settable using mallopt(M_TRIM_THRESHOLD, x)498The maximum amount of unused top-most memory to keep before499releasing via malloc_trim in free(). Automatic trimming is mainly500useful in long-lived programs using contiguous MORECORE. Because501trimming via sbrk can be slow on some systems, and can sometimes be502wasteful (in cases where programs immediately afterward allocate503more large chunks) the value should be high enough so that your504overall system performance would improve by releasing this much505memory. As a rough guide, you might set to a value close to the506average size of a process (program) running on your system.507Releasing this much memory would allow such a process to run in508memory. Generally, it is worth tuning trim thresholds when a509program undergoes phases where several large chunks are allocated510and released in ways that can reuse each other's storage, perhaps511mixed with phases where there are no such chunks at all. The trim512value must be greater than page size to have any useful effect. To513disable trimming completely, you can set to MAX_SIZE_T. Note that the trick514some people use of mallocing a huge space and then freeing it at515program startup, in an attempt to reserve system memory, doesn't516have the intended effect under automatic trimming, since that memory517will immediately be returned to the system.518519DEFAULT_MMAP_THRESHOLD default: 256K520Also settable using mallopt(M_MMAP_THRESHOLD, x)521The request size threshold for using MMAP to directly service a522request. Requests of at least this size that cannot be allocated523using already-existing space will be serviced via mmap. (If enough524normal freed space already exists it is used instead.) Using mmap525segregates relatively large chunks of memory so that they can be526individually obtained and released from the host system. A request527serviced through mmap is never reused by any other request (at least528not directly; the system may just so happen to remap successive529requests to the same locations). Segregating space in this way has530the benefits that: Mmapped space can always be individually released531back to the system, which helps keep the system level memory demands532of a long-lived program low. Also, mapped memory doesn't become533`locked' between other chunks, as can happen with normally allocated534chunks, which means that even trimming via malloc_trim would not535release them. However, it has the disadvantage that the space536cannot be reclaimed, consolidated, and then used to service later537requests, as happens with normal chunks. The advantages of mmap538nearly always outweigh disadvantages for "large" chunks, but the539value of "large" may vary across systems. The default is an540empirically derived value that works well in most systems. You can541disable mmap by setting to MAX_SIZE_T.542543MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP544The number of consolidated frees between checks to release545unused segments when freeing. When using non-contiguous segments,546especially with multiple mspaces, checking only for topmost space547doesn't always suffice to trigger trimming. To compensate for this,548free() will, with a period of MAX_RELEASE_CHECK_RATE (or the549current number of segments, if greater) try to release unused550segments to the OS when freeing chunks that result in551consolidation. The best value for this parameter is a compromise552between slowing down frees with relatively costly checks that553rarely trigger versus holding on to unused memory. To effectively554disable, set to MAX_SIZE_T. This may lead to a very slight speed555improvement at the expense of carrying around more memory.556*/557558/* Version identifier to allow people to support multiple versions */559#ifndef DLMALLOC_VERSION560#define DLMALLOC_VERSION 20806561#endif /* DLMALLOC_VERSION */562563#ifndef DLMALLOC_EXPORT564#define DLMALLOC_EXPORT extern565#endif566567#ifndef WIN32568#ifdef _WIN32569#define WIN32 1570#endif /* _WIN32 */571#ifdef _WIN32_WCE572#define LACKS_FCNTL_H573#define WIN32 1574#endif /* _WIN32_WCE */575#endif /* WIN32 */576#ifdef WIN32577#define WIN32_LEAN_AND_MEAN578#include <windows.h>579#include <tchar.h>580#define HAVE_MMAP 1581#define HAVE_MORECORE 0582#define LACKS_UNISTD_H583#define LACKS_SYS_PARAM_H584#define LACKS_SYS_MMAN_H585#define LACKS_STRING_H586#define LACKS_STRINGS_H587#define LACKS_SYS_TYPES_H588// #define LACKS_ERRNO_H // File uses `EINVAL` and `ENOMEM` defines, so include is required. Legacy exclusion?589#define LACKS_SCHED_H590#ifndef MALLOC_FAILURE_ACTION591#define MALLOC_FAILURE_ACTION592#endif /* MALLOC_FAILURE_ACTION */593#ifndef MMAP_CLEARS594#ifdef _WIN32_WCE /* WINCE reportedly does not clear */595#define MMAP_CLEARS 0596#else597#define MMAP_CLEARS 1598#endif /* _WIN32_WCE */599#endif /*MMAP_CLEARS */600#endif /* WIN32 */601602#if defined(DARWIN) || defined(_DARWIN)603/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */604#ifndef HAVE_MORECORE605#define HAVE_MORECORE 0606#define HAVE_MMAP 1607/* OSX allocators provide 16 byte alignment */608#ifndef MALLOC_ALIGNMENT609#define MALLOC_ALIGNMENT ((size_t)16U)610#endif611#endif /* HAVE_MORECORE */612#endif /* DARWIN */613614#ifndef LACKS_SYS_TYPES_H615#include <sys/types.h> /* For size_t */616#endif /* LACKS_SYS_TYPES_H */617618/* The maximum possible size_t value has all bits set */619#define MAX_SIZE_T (~(size_t)0)620621#ifndef USE_LOCKS /* ensure true if spin or recursive locks set */622#define USE_LOCKS ((defined(USE_SPIN_LOCKS) && USE_SPIN_LOCKS != 0) || \623(defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0))624#endif /* USE_LOCKS */625626#if USE_LOCKS /* Spin locks for gcc >= 4.1, older gcc on x86, MSC >= 1310 */627#if ((defined(__GNUC__) && \628((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) || \629defined(__i386__) || defined(__x86_64__))) || \630(defined(_MSC_VER) && _MSC_VER>=1310))631#ifndef USE_SPIN_LOCKS632#define USE_SPIN_LOCKS 1633#endif /* USE_SPIN_LOCKS */634#elif USE_SPIN_LOCKS635#error "USE_SPIN_LOCKS defined without implementation"636#endif /* ... locks available... */637#elif !defined(USE_SPIN_LOCKS)638#define USE_SPIN_LOCKS 0639#endif /* USE_LOCKS */640641#ifndef ONLY_MSPACES642#define ONLY_MSPACES 0643#endif /* ONLY_MSPACES */644#ifndef MSPACES645#if ONLY_MSPACES646#define MSPACES 1647#else /* ONLY_MSPACES */648#define MSPACES 0649#endif /* ONLY_MSPACES */650#endif /* MSPACES */651#ifndef MALLOC_ALIGNMENT652#define MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *)))653#endif /* MALLOC_ALIGNMENT */654#ifndef FOOTERS655#define FOOTERS 0656#endif /* FOOTERS */657#ifndef ABORT658#define ABORT abort()659#endif /* ABORT */660#ifndef ABORT_ON_ASSERT_FAILURE661#define ABORT_ON_ASSERT_FAILURE 1662#endif /* ABORT_ON_ASSERT_FAILURE */663#ifndef PROCEED_ON_ERROR664#define PROCEED_ON_ERROR 0665#endif /* PROCEED_ON_ERROR */666667#ifndef INSECURE668#define INSECURE 0669#endif /* INSECURE */670#ifndef MALLOC_INSPECT_ALL671#define MALLOC_INSPECT_ALL 0672#endif /* MALLOC_INSPECT_ALL */673#ifndef HAVE_MMAP674#define HAVE_MMAP 1675#endif /* HAVE_MMAP */676#ifndef MMAP_CLEARS677#define MMAP_CLEARS 1678#endif /* MMAP_CLEARS */679#ifndef HAVE_MREMAP680#ifdef linux681#define HAVE_MREMAP 1682#define _GNU_SOURCE /* Turns on mremap() definition */683#else /* linux */684#define HAVE_MREMAP 0685#endif /* linux */686#endif /* HAVE_MREMAP */687#ifndef MALLOC_FAILURE_ACTION688#define MALLOC_FAILURE_ACTION errno = ENOMEM;689#endif /* MALLOC_FAILURE_ACTION */690#ifndef HAVE_MORECORE691#if ONLY_MSPACES692#define HAVE_MORECORE 0693#else /* ONLY_MSPACES */694#define HAVE_MORECORE 1695#endif /* ONLY_MSPACES */696#endif /* HAVE_MORECORE */697#if !HAVE_MORECORE698#define MORECORE_CONTIGUOUS 0699#else /* !HAVE_MORECORE */700#define MORECORE_DEFAULT sbrk701#ifndef MORECORE_CONTIGUOUS702#define MORECORE_CONTIGUOUS 1703#endif /* MORECORE_CONTIGUOUS */704#endif /* HAVE_MORECORE */705#ifndef DEFAULT_GRANULARITY706#if (MORECORE_CONTIGUOUS || defined(WIN32))707#define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */708#else /* MORECORE_CONTIGUOUS */709#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)710#endif /* MORECORE_CONTIGUOUS */711#endif /* DEFAULT_GRANULARITY */712#ifndef DEFAULT_TRIM_THRESHOLD713#ifndef MORECORE_CANNOT_TRIM714#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)715#else /* MORECORE_CANNOT_TRIM */716#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T717#endif /* MORECORE_CANNOT_TRIM */718#endif /* DEFAULT_TRIM_THRESHOLD */719#ifndef DEFAULT_MMAP_THRESHOLD720#if HAVE_MMAP721#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)722#else /* HAVE_MMAP */723#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T724#endif /* HAVE_MMAP */725#endif /* DEFAULT_MMAP_THRESHOLD */726#ifndef MAX_RELEASE_CHECK_RATE727#if HAVE_MMAP728#define MAX_RELEASE_CHECK_RATE 4095729#else730#define MAX_RELEASE_CHECK_RATE MAX_SIZE_T731#endif /* HAVE_MMAP */732#endif /* MAX_RELEASE_CHECK_RATE */733#ifndef USE_BUILTIN_FFS734#define USE_BUILTIN_FFS 0735#endif /* USE_BUILTIN_FFS */736#ifndef USE_DEV_RANDOM737#define USE_DEV_RANDOM 0738#endif /* USE_DEV_RANDOM */739#ifndef NO_MALLINFO740#define NO_MALLINFO 0741#endif /* NO_MALLINFO */742#ifndef MALLINFO_FIELD_TYPE743#define MALLINFO_FIELD_TYPE size_t744#endif /* MALLINFO_FIELD_TYPE */745#ifndef NO_MALLOC_STATS746#define NO_MALLOC_STATS 0747#endif /* NO_MALLOC_STATS */748#ifndef NO_SEGMENT_TRAVERSAL749#define NO_SEGMENT_TRAVERSAL 0750#endif /* NO_SEGMENT_TRAVERSAL */751752/*753mallopt tuning options. SVID/XPG defines four standard parameter754numbers for mallopt, normally defined in malloc.h. None of these755are used in this malloc, so setting them has no effect. But this756malloc does support the following options.757*/758759#define M_TRIM_THRESHOLD (-1)760#define M_GRANULARITY (-2)761#define M_MMAP_THRESHOLD (-3)762763/* ------------------------ Mallinfo declarations ------------------------ */764765#if !NO_MALLINFO766/*767This version of malloc supports the standard SVID/XPG mallinfo768routine that returns a struct containing usage properties and769statistics. It should work on any system that has a770/usr/include/malloc.h defining struct mallinfo. The main771declaration needed is the mallinfo struct that is returned (by-copy)772by mallinfo(). The malloinfo struct contains a bunch of fields that773are not even meaningful in this version of malloc. These fields are774are instead filled by mallinfo() with other numbers that might be of775interest.776777HAVE_USR_INCLUDE_MALLOC_H should be set if you have a778/usr/include/malloc.h file that includes a declaration of struct779mallinfo. If so, it is included; else a compliant version is780declared below. These must be precisely the same for mallinfo() to781work. The original SVID version of this struct, defined on most782systems with mallinfo, declares all fields as ints. But some others783define as unsigned long. If your system defines the fields using a784type of different width than listed here, you MUST #include your785system version and #define HAVE_USR_INCLUDE_MALLOC_H.786*/787788/* #define HAVE_USR_INCLUDE_MALLOC_H */789790#ifdef HAVE_USR_INCLUDE_MALLOC_H791#include "/usr/include/malloc.h"792#else /* HAVE_USR_INCLUDE_MALLOC_H */793#ifndef STRUCT_MALLINFO_DECLARED794/* HP-UX (and others?) redefines mallinfo unless _STRUCT_MALLINFO is defined */795#define _STRUCT_MALLINFO796#define STRUCT_MALLINFO_DECLARED 1797struct mallinfo {798MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */799MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */800MALLINFO_FIELD_TYPE smblks; /* always 0 */801MALLINFO_FIELD_TYPE hblks; /* always 0 */802MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */803MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */804MALLINFO_FIELD_TYPE fsmblks; /* always 0 */805MALLINFO_FIELD_TYPE uordblks; /* total allocated space */806MALLINFO_FIELD_TYPE fordblks; /* total free space */807MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */808};809#endif /* STRUCT_MALLINFO_DECLARED */810#endif /* HAVE_USR_INCLUDE_MALLOC_H */811#endif /* NO_MALLINFO */812813/*814Try to persuade compilers to inline. The most critical functions for815inlining are defined as macros, so these aren't used for them.816*/817818#if 0 /* SDL */819#ifndef FORCEINLINE820#if defined(__GNUC__)821#define FORCEINLINE __inline __attribute__ ((always_inline))822#elif defined(_MSC_VER)823#define FORCEINLINE __forceinline824#endif825#endif826#endif /* SDL */827#ifndef NOINLINE828#if defined(__GNUC__)829#define NOINLINE __attribute__ ((noinline))830#elif defined(_MSC_VER)831#define NOINLINE __declspec(noinline)832#else833#define NOINLINE834#endif835#endif836837#ifdef __cplusplus838extern "C" {839#if 0 /* SDL */840#ifndef FORCEINLINE841#define FORCEINLINE inline842#endif843#endif /* SDL */844#endif /* __cplusplus */845#if 0 /* SDL */846#ifndef FORCEINLINE847#define FORCEINLINE848#endif849#endif /* SDL_FORCE_INLINE */850851#if !ONLY_MSPACES852853/* ------------------- Declarations of public routines ------------------- */854855#ifndef USE_DL_PREFIX856#define dlcalloc calloc857#define dlfree free858#define dlmalloc malloc859#define dlmemalign memalign860#define dlposix_memalign posix_memalign861#define dlrealloc realloc862#define dlrealloc_in_place realloc_in_place863#define dlvalloc valloc864#define dlpvalloc pvalloc865#define dlmallinfo mallinfo866#define dlmallopt mallopt867#define dlmalloc_trim malloc_trim868#define dlmalloc_stats malloc_stats869#define dlmalloc_usable_size malloc_usable_size870#define dlmalloc_footprint malloc_footprint871#define dlmalloc_max_footprint malloc_max_footprint872#define dlmalloc_footprint_limit malloc_footprint_limit873#define dlmalloc_set_footprint_limit malloc_set_footprint_limit874#define dlmalloc_inspect_all malloc_inspect_all875#define dlindependent_calloc independent_calloc876#define dlindependent_comalloc independent_comalloc877#define dlbulk_free bulk_free878#endif /* USE_DL_PREFIX */879880/*881malloc(size_t n)882Returns a pointer to a newly allocated chunk of at least n bytes, or883null if no space is available, in which case errno is set to ENOMEM884on ANSI C systems.885886If n is zero, malloc returns a minimum-sized chunk. (The minimum887size is 16 bytes on most 32bit systems, and 32 bytes on 64bit888systems.) Note that size_t is an unsigned type, so calls with889arguments that would be negative if signed are interpreted as890requests for huge amounts of space, which will often fail. The891maximum supported value of n differs across systems, but is in all892cases less than the maximum representable value of a size_t.893*/894DLMALLOC_EXPORT void* dlmalloc(size_t);895896/*897free(void* p)898Releases the chunk of memory pointed to by p, that had been previously899allocated using malloc or a related routine such as realloc.900It has no effect if p is null. If p was not malloced or already901freed, free(p) will by default cause the current program to abort.902*/903DLMALLOC_EXPORT void dlfree(void*);904905/*906calloc(size_t n_elements, size_t element_size);907Returns a pointer to n_elements * element_size bytes, with all locations908set to zero.909*/910DLMALLOC_EXPORT void* dlcalloc(size_t, size_t);911912/*913realloc(void* p, size_t n)914Returns a pointer to a chunk of size n that contains the same data915as does chunk p up to the minimum of (n, p's size) bytes, or null916if no space is available.917918The returned pointer may or may not be the same as p. The algorithm919prefers extending p in most cases when possible, otherwise it920employs the equivalent of a malloc-copy-free sequence.921922If p is null, realloc is equivalent to malloc.923924If space is not available, realloc returns null, errno is set (if on925ANSI) and p is NOT freed.926927if n is for fewer bytes than already held by p, the newly unused928space is lopped off and freed if possible. realloc with a size929argument of zero (re)allocates a minimum-sized chunk.930931The old unix realloc convention of allowing the last-free'd chunk932to be used as an argument to realloc is not supported.933*/934DLMALLOC_EXPORT void* dlrealloc(void*, size_t);935936/*937realloc_in_place(void* p, size_t n)938Resizes the space allocated for p to size n, only if this can be939done without moving p (i.e., only if there is adjacent space940available if n is greater than p's current allocated size, or n is941less than or equal to p's size). This may be used instead of plain942realloc if an alternative allocation strategy is needed upon failure943to expand space; for example, reallocation of a buffer that must be944memory-aligned or cleared. You can use realloc_in_place to trigger945these alternatives only when needed.946947Returns p if successful; otherwise null.948*/949DLMALLOC_EXPORT void* dlrealloc_in_place(void*, size_t);950951/*952memalign(size_t alignment, size_t n);953Returns a pointer to a newly allocated chunk of n bytes, aligned954in accord with the alignment argument.955956The alignment argument should be a power of two. If the argument is957not a power of two, the nearest greater power is used.9588-byte alignment is guaranteed by normal malloc calls, so don't959bother calling memalign with an argument of 8 or less.960961Overreliance on memalign is a sure way to fragment space.962*/963DLMALLOC_EXPORT void* dlmemalign(size_t, size_t);964965/*966int posix_memalign(void** pp, size_t alignment, size_t n);967Allocates a chunk of n bytes, aligned in accord with the alignment968argument. Differs from memalign only in that it (1) assigns the969allocated memory to *pp rather than returning it, (2) fails and970returns EINVAL if the alignment is not a power of two (3) fails and971returns ENOMEM if memory cannot be allocated.972*/973DLMALLOC_EXPORT int dlposix_memalign(void**, size_t, size_t);974975/*976valloc(size_t n);977Equivalent to memalign(pagesize, n), where pagesize is the page978size of the system. If the pagesize is unknown, 4096 is used.979*/980DLMALLOC_EXPORT void* dlvalloc(size_t);981982/*983mallopt(int parameter_number, int parameter_value)984Sets tunable parameters The format is to provide a985(parameter-number, parameter-value) pair. mallopt then sets the986corresponding parameter to the argument value if it can (i.e., so987long as the value is meaningful), and returns 1 if successful else9880. To workaround the fact that mallopt is specified to use int,989not size_t parameters, the value -1 is specially treated as the990maximum unsigned size_t value.991992SVID/XPG/ANSI defines four standard param numbers for mallopt,993normally defined in malloc.h. None of these are use in this malloc,994so setting them has no effect. But this malloc also supports other995options in mallopt. See below for details. Briefly, supported996parameters are as follows (listed defaults are for "typical"997configurations).998999Symbol param # default allowed param values1000M_TRIM_THRESHOLD -1 2*1024*1024 any (-1 disables)1001M_GRANULARITY -2 page size any power of 2 >= page size1002M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)1003*/1004DLMALLOC_EXPORT int dlmallopt(int, int);10051006/*1007malloc_footprint();1008Returns the number of bytes obtained from the system. The total1009number of bytes allocated by malloc, realloc etc., is less than this1010value. Unlike mallinfo, this function returns only a precomputed1011result, so can be called frequently to monitor memory consumption.1012Even if locks are otherwise defined, this function does not use them,1013so results might not be up to date.1014*/1015DLMALLOC_EXPORT size_t dlmalloc_footprint(void);10161017/*1018malloc_max_footprint();1019Returns the maximum number of bytes obtained from the system. This1020value will be greater than current footprint if deallocated space1021has been reclaimed by the system. The peak number of bytes allocated1022by malloc, realloc etc., is less than this value. Unlike mallinfo,1023this function returns only a precomputed result, so can be called1024frequently to monitor memory consumption. Even if locks are1025otherwise defined, this function does not use them, so results might1026not be up to date.1027*/1028DLMALLOC_EXPORT size_t dlmalloc_max_footprint(void);10291030/*1031malloc_footprint_limit();1032Returns the number of bytes that the heap is allowed to obtain from1033the system, returning the last value returned by1034malloc_set_footprint_limit, or the maximum size_t value if1035never set. The returned value reflects a permission. There is no1036guarantee that this number of bytes can actually be obtained from1037the system.1038*/1039DLMALLOC_EXPORT size_t dlmalloc_footprint_limit();10401041/*1042malloc_set_footprint_limit();1043Sets the maximum number of bytes to obtain from the system, causing1044failure returns from malloc and related functions upon attempts to1045exceed this value. The argument value may be subject to page1046rounding to an enforceable limit; this actual value is returned.1047Using an argument of the maximum possible size_t effectively1048disables checks. If the argument is less than or equal to the1049current malloc_footprint, then all future allocations that require1050additional system memory will fail. However, invocation cannot1051retroactively deallocate existing used memory.1052*/1053DLMALLOC_EXPORT size_t dlmalloc_set_footprint_limit(size_t bytes);10541055#if MALLOC_INSPECT_ALL1056/*1057malloc_inspect_all(void(*handler)(void *start,1058void *end,1059size_t used_bytes,1060void* callback_arg),1061void* arg);1062Traverses the heap and calls the given handler for each managed1063region, skipping all bytes that are (or may be) used for bookkeeping1064purposes. Traversal does not include include chunks that have been1065directly memory mapped. Each reported region begins at the start1066address, and continues up to but not including the end address. The1067first used_bytes of the region contain allocated data. If1068used_bytes is zero, the region is unallocated. The handler is1069invoked with the given callback argument. If locks are defined, they1070are held during the entire traversal. It is a bad idea to invoke1071other malloc functions from within the handler.10721073For example, to count the number of in-use chunks with size greater1074than 1000, you could write:1075static int count = 0;1076void count_chunks(void* start, void* end, size_t used, void* arg) {1077if (used >= 1000) ++count;1078}1079then:1080malloc_inspect_all(count_chunks, NULL);10811082malloc_inspect_all is compiled only if MALLOC_INSPECT_ALL is defined.1083*/1084DLMALLOC_EXPORT void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*),1085void* arg);10861087#endif /* MALLOC_INSPECT_ALL */10881089#if !NO_MALLINFO1090/*1091mallinfo()1092Returns (by copy) a struct containing various summary statistics:10931094arena: current total non-mmapped bytes allocated from system1095ordblks: the number of free chunks1096smblks: always zero.1097hblks: current number of mmapped regions1098hblkhd: total bytes held in mmapped regions1099usmblks: the maximum total allocated space. This will be greater1100than current total if trimming has occurred.1101fsmblks: always zero1102uordblks: current total allocated space (normal or mmapped)1103fordblks: total free space1104keepcost: the maximum number of bytes that could ideally be released1105back to system via malloc_trim. ("ideally" means that1106it ignores page restrictions etc.)11071108Because these fields are ints, but internal bookkeeping may1109be kept as longs, the reported values may wrap around zero and1110thus be inaccurate.1111*/1112DLMALLOC_EXPORT struct mallinfo dlmallinfo(void);1113#endif /* NO_MALLINFO */11141115/*1116independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);11171118independent_calloc is similar to calloc, but instead of returning a1119single cleared space, it returns an array of pointers to n_elements1120independent elements that can hold contents of size elem_size, each1121of which starts out cleared, and can be independently freed,1122realloc'ed etc. The elements are guaranteed to be adjacently1123allocated (this is not guaranteed to occur with multiple callocs or1124mallocs), which may also improve cache locality in some1125applications.11261127The "chunks" argument is optional (i.e., may be null, which is1128probably the most typical usage). If it is null, the returned array1129is itself dynamically allocated and should also be freed when it is1130no longer needed. Otherwise, the chunks array must be of at least1131n_elements in length. It is filled in with the pointers to the1132chunks.11331134In either case, independent_calloc returns this pointer array, or1135null if the allocation failed. If n_elements is zero and "chunks"1136is null, it returns a chunk representing an array with zero elements1137(which should be freed if not wanted).11381139Each element must be freed when it is no longer needed. This can be1140done all at once using bulk_free.11411142independent_calloc simplifies and speeds up implementations of many1143kinds of pools. It may also be useful when constructing large data1144structures that initially have a fixed number of fixed-sized nodes,1145but the number is not known at compile time, and some of the nodes1146may later need to be freed. For example:11471148struct Node { int item; struct Node* next; };11491150struct Node* build_list() {1151struct Node** pool;1152int n = read_number_of_nodes_needed();1153if (n <= 0) return 0;1154pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);1155if (pool == 0) die();1156// organize into a linked list...1157struct Node* first = pool[0];1158for (i = 0; i < n-1; ++i)1159pool[i]->next = pool[i+1];1160free(pool); // Can now free the array (or not, if it is needed later)1161return first;1162}1163*/1164DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**);11651166/*1167independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);11681169independent_comalloc allocates, all at once, a set of n_elements1170chunks with sizes indicated in the "sizes" array. It returns1171an array of pointers to these elements, each of which can be1172independently freed, realloc'ed etc. The elements are guaranteed to1173be adjacently allocated (this is not guaranteed to occur with1174multiple callocs or mallocs), which may also improve cache locality1175in some applications.11761177The "chunks" argument is optional (i.e., may be null). If it is null1178the returned array is itself dynamically allocated and should also1179be freed when it is no longer needed. Otherwise, the chunks array1180must be of at least n_elements in length. It is filled in with the1181pointers to the chunks.11821183In either case, independent_comalloc returns this pointer array, or1184null if the allocation failed. If n_elements is zero and chunks is1185null, it returns a chunk representing an array with zero elements1186(which should be freed if not wanted).11871188Each element must be freed when it is no longer needed. This can be1189done all at once using bulk_free.11901191independent_comallac differs from independent_calloc in that each1192element may have a different size, and also that it does not1193automatically clear elements.11941195independent_comalloc can be used to speed up allocation in cases1196where several structs or objects must always be allocated at the1197same time. For example:11981199struct Head { ... }1200struct Foot { ... }12011202void send_message(char* msg) {1203int msglen = strlen(msg);1204size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };1205void* chunks[3];1206if (independent_comalloc(3, sizes, chunks) == 0)1207die();1208struct Head* head = (struct Head*)(chunks[0]);1209char* body = (char*)(chunks[1]);1210struct Foot* foot = (struct Foot*)(chunks[2]);1211// ...1212}12131214In general though, independent_comalloc is worth using only for1215larger values of n_elements. For small values, you probably won't1216detect enough difference from series of malloc calls to bother.12171218Overuse of independent_comalloc can increase overall memory usage,1219since it cannot reuse existing noncontiguous small chunks that1220might be available for some of the elements.1221*/1222DLMALLOC_EXPORT void** dlindependent_comalloc(size_t, size_t*, void**);12231224/*1225bulk_free(void* array[], size_t n_elements)1226Frees and clears (sets to null) each non-null pointer in the given1227array. This is likely to be faster than freeing them one-by-one.1228If footers are used, pointers that have been allocated in different1229mspaces are not freed or cleared, and the count of all such pointers1230is returned. For large arrays of pointers with poor locality, it1231may be worthwhile to sort this array before calling bulk_free.1232*/1233DLMALLOC_EXPORT size_t dlbulk_free(void**, size_t n_elements);12341235/*1236pvalloc(size_t n);1237Equivalent to valloc(minimum-page-that-holds(n)), that is,1238round up n to nearest pagesize.1239*/1240DLMALLOC_EXPORT void* dlpvalloc(size_t);12411242/*1243malloc_trim(size_t pad);12441245If possible, gives memory back to the system (via negative arguments1246to sbrk) if there is unused memory at the `high' end of the malloc1247pool or in unused MMAP segments. You can call this after freeing1248large blocks of memory to potentially reduce the system-level memory1249requirements of a program. However, it cannot guarantee to reduce1250memory. Under some allocation patterns, some large free blocks of1251memory will be locked between two used chunks, so they cannot be1252given back to the system.12531254The `pad' argument to malloc_trim represents the amount of free1255trailing space to leave untrimmed. If this argument is zero, only1256the minimum amount of memory to maintain internal data structures1257will be left. Non-zero arguments can be supplied to maintain enough1258trailing space to service future expected allocations without having1259to re-obtain memory from the system.12601261Malloc_trim returns 1 if it actually released any memory, else 0.1262*/1263DLMALLOC_EXPORT int dlmalloc_trim(size_t);12641265/*1266malloc_stats();1267Prints on stderr the amount of space obtained from the system (both1268via sbrk and mmap), the maximum amount (which may be more than1269current if malloc_trim and/or munmap got called), and the current1270number of bytes allocated via malloc (or realloc, etc) but not yet1271freed. Note that this is the number of bytes allocated, not the1272number requested. It will be larger than the number requested1273because of alignment and bookkeeping overhead. Because it includes1274alignment wastage as being in use, this figure may be greater than1275zero even when no user-level chunks are allocated.12761277The reported current and maximum system memory can be inaccurate if1278a program makes other calls to system memory allocation functions1279(normally sbrk) outside of malloc.12801281malloc_stats prints only the most commonly interesting statistics.1282More information can be obtained by calling mallinfo.1283*/1284DLMALLOC_EXPORT void dlmalloc_stats(void);12851286/*1287malloc_usable_size(void* p);12881289Returns the number of bytes you can actually use in1290an allocated chunk, which may be more than you requested (although1291often not) due to alignment and minimum size constraints.1292You can use this many bytes without worrying about1293overwriting other allocated objects. This is not a particularly great1294programming practice. malloc_usable_size can be more useful in1295debugging and assertions, for example:12961297p = malloc(n);1298assert(malloc_usable_size(p) >= 256);1299*/1300size_t dlmalloc_usable_size(void*);13011302#endif /* ONLY_MSPACES */13031304#if MSPACES13051306/*1307mspace is an opaque type representing an independent1308region of space that supports mspace_malloc, etc.1309*/1310typedef void* mspace;13111312/*1313create_mspace creates and returns a new independent space with the1314given initial capacity, or, if 0, the default granularity size. It1315returns null if there is no system memory available to create the1316space. If argument locked is non-zero, the space uses a separate1317lock to control access. The capacity of the space will grow1318dynamically as needed to service mspace_malloc requests. You can1319control the sizes of incremental increases of this space by1320compiling with a different DEFAULT_GRANULARITY or dynamically1321setting with mallopt(M_GRANULARITY, value).1322*/1323DLMALLOC_EXPORT mspace create_mspace(size_t capacity, int locked);13241325/*1326destroy_mspace destroys the given space, and attempts to return all1327of its memory back to the system, returning the total number of1328bytes freed. After destruction, the results of access to all memory1329used by the space become undefined.1330*/1331DLMALLOC_EXPORT size_t destroy_mspace(mspace msp);13321333/*1334create_mspace_with_base uses the memory supplied as the initial base1335of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this1336space is used for bookkeeping, so the capacity must be at least this1337large. (Otherwise 0 is returned.) When this initial space is1338exhausted, additional memory will be obtained from the system.1339Destroying this space will deallocate all additionally allocated1340space (if possible) but not the initial base.1341*/1342DLMALLOC_EXPORT mspace create_mspace_with_base(void* base, size_t capacity, int locked);13431344/*1345mspace_track_large_chunks controls whether requests for large chunks1346are allocated in their own untracked mmapped regions, separate from1347others in this mspace. By default large chunks are not tracked,1348which reduces fragmentation. However, such chunks are not1349necessarily released to the system upon destroy_mspace. Enabling1350tracking by setting to true may increase fragmentation, but avoids1351leakage when relying on destroy_mspace to release all memory1352allocated using this space. The function returns the previous1353setting.1354*/1355DLMALLOC_EXPORT int mspace_track_large_chunks(mspace msp, int enable);135613571358/*1359mspace_malloc behaves as malloc, but operates within1360the given space.1361*/1362DLMALLOC_EXPORT void* mspace_malloc(mspace msp, size_t bytes);13631364/*1365mspace_free behaves as free, but operates within1366the given space.13671368If compiled with FOOTERS==1, mspace_free is not actually needed.1369free may be called instead of mspace_free because freed chunks from1370any space are handled by their originating spaces.1371*/1372DLMALLOC_EXPORT void mspace_free(mspace msp, void* mem);13731374/*1375mspace_realloc behaves as realloc, but operates within1376the given space.13771378If compiled with FOOTERS==1, mspace_realloc is not actually1379needed. realloc may be called instead of mspace_realloc because1380realloced chunks from any space are handled by their originating1381spaces.1382*/1383DLMALLOC_EXPORT void* mspace_realloc(mspace msp, void* mem, size_t newsize);13841385/*1386mspace_calloc behaves as calloc, but operates within1387the given space.1388*/1389DLMALLOC_EXPORT void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);13901391/*1392mspace_memalign behaves as memalign, but operates within1393the given space.1394*/1395DLMALLOC_EXPORT void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);13961397/*1398mspace_independent_calloc behaves as independent_calloc, but1399operates within the given space.1400*/1401DLMALLOC_EXPORT void** mspace_independent_calloc(mspace msp, size_t n_elements,1402size_t elem_size, void* chunks[]);14031404/*1405mspace_independent_comalloc behaves as independent_comalloc, but1406operates within the given space.1407*/1408DLMALLOC_EXPORT void** mspace_independent_comalloc(mspace msp, size_t n_elements,1409size_t sizes[], void* chunks[]);14101411/*1412mspace_footprint() returns the number of bytes obtained from the1413system for this space.1414*/1415DLMALLOC_EXPORT size_t mspace_footprint(mspace msp);14161417/*1418mspace_max_footprint() returns the peak number of bytes obtained from the1419system for this space.1420*/1421DLMALLOC_EXPORT size_t mspace_max_footprint(mspace msp);142214231424#if !NO_MALLINFO1425/*1426mspace_mallinfo behaves as mallinfo, but reports properties of1427the given space.1428*/1429DLMALLOC_EXPORT struct mallinfo mspace_mallinfo(mspace msp);1430#endif /* NO_MALLINFO */14311432/*1433malloc_usable_size(void* p) behaves the same as malloc_usable_size;1434*/1435DLMALLOC_EXPORT size_t mspace_usable_size(const void* mem);14361437/*1438mspace_malloc_stats behaves as malloc_stats, but reports1439properties of the given space.1440*/1441DLMALLOC_EXPORT void mspace_malloc_stats(mspace msp);14421443/*1444mspace_trim behaves as malloc_trim, but1445operates within the given space.1446*/1447DLMALLOC_EXPORT int mspace_trim(mspace msp, size_t pad);14481449/*1450An alias for mallopt.1451*/1452DLMALLOC_EXPORT int mspace_mallopt(int, int);14531454#endif /* MSPACES */14551456#ifdef __cplusplus1457} /* end of extern "C" */1458#endif /* __cplusplus */14591460/*1461========================================================================1462To make a fully customizable malloc.h header file, cut everything1463above this line, put into file malloc.h, edit to suit, and #include it1464on the next line, as well as in programs that use this malloc.1465========================================================================1466*/14671468/* #include "malloc.h" */14691470/*------------------------------ internal #includes ---------------------- */14711472#ifdef _MSC_VER1473#pragma warning( disable : 4146 ) /* no "unsigned" warnings */1474#endif /* _MSC_VER */1475#if !NO_MALLOC_STATS1476#include <stdio.h> /* for printing in malloc_stats */1477#endif /* NO_MALLOC_STATS */1478#ifndef LACKS_ERRNO_H1479#include <errno.h> /* for MALLOC_FAILURE_ACTION */1480#endif /* LACKS_ERRNO_H */1481#ifdef DEBUG1482#if ABORT_ON_ASSERT_FAILURE1483#undef assert1484#define assert(x) if(!(x)) ABORT1485#else /* ABORT_ON_ASSERT_FAILURE */1486#include <assert.h>1487#endif /* ABORT_ON_ASSERT_FAILURE */1488#else /* DEBUG */1489#ifndef assert1490#define assert(x)1491#endif1492#define DEBUG 01493#endif /* DEBUG */1494#if !defined(WIN32) && !defined(LACKS_TIME_H)1495#include <time.h> /* for magic initialization */1496#endif /* WIN32 */1497#ifndef LACKS_STDLIB_H1498#include <stdlib.h> /* for abort() */1499#endif /* LACKS_STDLIB_H */1500#ifndef LACKS_STRING_H1501#include <string.h> /* for memset etc */1502#endif /* LACKS_STRING_H */1503#if USE_BUILTIN_FFS1504#ifndef LACKS_STRINGS_H1505#include <strings.h> /* for ffs */1506#endif /* LACKS_STRINGS_H */1507#endif /* USE_BUILTIN_FFS */1508#if HAVE_MMAP1509#ifndef LACKS_SYS_MMAN_H1510/* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */1511#if (defined(linux) && !defined(__USE_GNU))1512#define __USE_GNU 11513#include <sys/mman.h> /* for mmap */1514#undef __USE_GNU1515#else1516#include <sys/mman.h> /* for mmap */1517#endif /* linux */1518#endif /* LACKS_SYS_MMAN_H */1519#ifndef LACKS_FCNTL_H1520#include <fcntl.h>1521#endif /* LACKS_FCNTL_H */1522#endif /* HAVE_MMAP */1523#ifndef LACKS_UNISTD_H1524#include <unistd.h> /* for sbrk, sysconf */1525#else /* LACKS_UNISTD_H */1526#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)1527extern void* sbrk(ptrdiff_t);1528#endif /* FreeBSD etc */1529#endif /* LACKS_UNISTD_H */15301531/* Declarations for locking */1532#if USE_LOCKS1533#ifndef WIN321534#if defined (__SVR4) && defined (__sun) /* solaris */1535#include <thread.h>1536#elif !defined(LACKS_SCHED_H)1537#include <sched.h>1538#endif /* solaris or LACKS_SCHED_H */1539#if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS1540#include <pthread.h>1541#endif /* USE_RECURSIVE_LOCKS ... */1542#elif defined(_MSC_VER)1543#ifndef _M_AMD641544/* These are already defined on AMD64 builds */1545#ifdef __cplusplus1546extern "C" {1547#endif /* __cplusplus */1548LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp);1549LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value);1550#ifdef __cplusplus1551}1552#endif /* __cplusplus */1553#endif /* _M_AMD64 */1554#pragma intrinsic (_InterlockedCompareExchange)1555#pragma intrinsic (_InterlockedExchange)1556#define interlockedcompareexchange _InterlockedCompareExchange1557#define interlockedexchange _InterlockedExchange1558#elif defined(WIN32) && defined(__GNUC__)1559#define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b)1560#define interlockedexchange __sync_lock_test_and_set1561#endif /* Win32 */1562#else /* USE_LOCKS */1563#endif /* USE_LOCKS */15641565#ifndef LOCK_AT_FORK1566#define LOCK_AT_FORK 01567#endif15681569/* Declarations for bit scanning on win32 */1570#if defined(_MSC_VER) && _MSC_VER>=13001571#ifndef BitScanForward /* Try to avoid pulling in WinNT.h */1572#ifdef __cplusplus1573extern "C" {1574#endif /* __cplusplus */1575unsigned char _BitScanForward(unsigned long *index, unsigned long mask);1576unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);1577#ifdef __cplusplus1578}1579#endif /* __cplusplus */15801581#define BitScanForward _BitScanForward1582#define BitScanReverse _BitScanReverse1583#pragma intrinsic(_BitScanForward)1584#pragma intrinsic(_BitScanReverse)1585#endif /* BitScanForward */1586#endif /* defined(_MSC_VER) && _MSC_VER>=1300 */15871588#ifndef WIN321589#ifndef malloc_getpagesize1590# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */1591# ifndef _SC_PAGE_SIZE1592# define _SC_PAGE_SIZE _SC_PAGESIZE1593# endif1594# endif1595# ifdef _SC_PAGE_SIZE1596# define malloc_getpagesize sysconf(_SC_PAGE_SIZE)1597# else1598# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)1599extern int getpagesize();1600# define malloc_getpagesize getpagesize()1601# else1602# ifdef WIN32 /* use supplied emulation of getpagesize */1603# define malloc_getpagesize getpagesize()1604# else1605# ifndef LACKS_SYS_PARAM_H1606# include <sys/param.h>1607# endif1608# ifdef EXEC_PAGESIZE1609# define malloc_getpagesize EXEC_PAGESIZE1610# else1611# ifdef NBPG1612# ifndef CLSIZE1613# define malloc_getpagesize NBPG1614# else1615# define malloc_getpagesize (NBPG * CLSIZE)1616# endif1617# else1618# ifdef NBPC1619# define malloc_getpagesize NBPC1620# else1621# ifdef PAGESIZE1622# define malloc_getpagesize PAGESIZE1623# else /* just guess */1624# define malloc_getpagesize ((size_t)4096U)1625# endif1626# endif1627# endif1628# endif1629# endif1630# endif1631# endif1632#endif1633#endif16341635/* ------------------- size_t and alignment properties -------------------- */16361637/* The byte and bit size of a size_t */1638#define SIZE_T_SIZE (sizeof(size_t))1639#define SIZE_T_BITSIZE (sizeof(size_t) << 3)16401641/* Some constants coerced to size_t */1642/* Annoying but necessary to avoid errors on some platforms */1643#define SIZE_T_ZERO ((size_t)0)1644#define SIZE_T_ONE ((size_t)1)1645#define SIZE_T_TWO ((size_t)2)1646#define SIZE_T_FOUR ((size_t)4)1647#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)1648#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)1649#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)1650#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)16511652/* The bit mask value corresponding to MALLOC_ALIGNMENT */1653#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)16541655/* True if address a has acceptable alignment */1656#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)16571658/* the number of bytes to offset an address to align it */1659#define align_offset(A)\1660((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\1661((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))16621663/* -------------------------- MMAP preliminaries ------------------------- */16641665/*1666If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and1667checks to fail so compiler optimizer can delete code rather than1668using so many "#if"s.1669*/167016711672/* MORECORE and MMAP must return MFAIL on failure */1673#define MFAIL ((void*)(MAX_SIZE_T))1674#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */16751676#if HAVE_MMAP16771678#ifndef WIN321679#define MUNMAP_DEFAULT(a, s) munmap((a), (s))1680#define MMAP_PROT (PROT_READ|PROT_WRITE)1681#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)1682#define MAP_ANONYMOUS MAP_ANON1683#endif /* MAP_ANON */1684#ifdef MAP_ANONYMOUS1685#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)1686#define MMAP_DEFAULT(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)1687#else /* MAP_ANONYMOUS */1688/*1689Nearly all versions of mmap support MAP_ANONYMOUS, so the following1690is unlikely to be needed, but is supplied just in case.1691*/1692#define MMAP_FLAGS (MAP_PRIVATE)1693static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */1694#define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \1695(dev_zero_fd = open("/dev/zero", O_RDWR), \1696mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \1697mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))1698#endif /* MAP_ANONYMOUS */16991700#define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s)17011702#else /* WIN32 */17031704/* Win32 MMAP via VirtualAlloc */1705SDL_FORCE_INLINE void* win32mmap(size_t size) {1706void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);1707return (ptr != 0)? ptr: MFAIL;1708}17091710/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */1711SDL_FORCE_INLINE void* win32direct_mmap(size_t size) {1712void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,1713PAGE_READWRITE);1714return (ptr != 0)? ptr: MFAIL;1715}17161717/* This function supports releasing coalesed segments */1718SDL_FORCE_INLINE int win32munmap(void* ptr, size_t size) {1719MEMORY_BASIC_INFORMATION minfo;1720char* cptr = (char*)ptr;1721while (size) {1722if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)1723return -1;1724if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||1725minfo.State != MEM_COMMIT || minfo.RegionSize > size)1726return -1;1727if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)1728return -1;1729cptr += minfo.RegionSize;1730size -= minfo.RegionSize;1731}1732return 0;1733}17341735#define MMAP_DEFAULT(s) win32mmap(s)1736#define MUNMAP_DEFAULT(a, s) win32munmap((a), (s))1737#define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s)1738#endif /* WIN32 */1739#endif /* HAVE_MMAP */17401741#if HAVE_MREMAP1742#ifndef WIN321743#define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))1744#endif /* WIN32 */1745#endif /* HAVE_MREMAP */17461747/**1748* Define CALL_MORECORE1749*/1750#if HAVE_MORECORE1751#ifdef MORECORE1752#define CALL_MORECORE(S) MORECORE(S)1753#else /* MORECORE */1754#define CALL_MORECORE(S) MORECORE_DEFAULT(S)1755#endif /* MORECORE */1756#else /* HAVE_MORECORE */1757#define CALL_MORECORE(S) MFAIL1758#endif /* HAVE_MORECORE */17591760/**1761* Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP1762*/1763#if HAVE_MMAP1764#define USE_MMAP_BIT (SIZE_T_ONE)17651766#ifdef MMAP1767#define CALL_MMAP(s) MMAP(s)1768#else /* MMAP */1769#define CALL_MMAP(s) MMAP_DEFAULT(s)1770#endif /* MMAP */1771#ifdef MUNMAP1772#define CALL_MUNMAP(a, s) MUNMAP((a), (s))1773#else /* MUNMAP */1774#define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s))1775#endif /* MUNMAP */1776#ifdef DIRECT_MMAP1777#define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)1778#else /* DIRECT_MMAP */1779#define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s)1780#endif /* DIRECT_MMAP */1781#else /* HAVE_MMAP */1782#define USE_MMAP_BIT (SIZE_T_ZERO)17831784#define MMAP(s) MFAIL1785#define MUNMAP(a, s) (-1)1786#define DIRECT_MMAP(s) MFAIL1787#define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)1788#define CALL_MMAP(s) MMAP(s)1789#define CALL_MUNMAP(a, s) MUNMAP((a), (s))1790#endif /* HAVE_MMAP */17911792/**1793* Define CALL_MREMAP1794*/1795#if HAVE_MMAP && HAVE_MREMAP1796#ifdef MREMAP1797#define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv))1798#else /* MREMAP */1799#define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv))1800#endif /* MREMAP */1801#else /* HAVE_MMAP && HAVE_MREMAP */1802#define CALL_MREMAP(addr, osz, nsz, mv) MFAIL1803#endif /* HAVE_MMAP && HAVE_MREMAP */18041805/* mstate bit set if continguous morecore disabled or failed */1806#define USE_NONCONTIGUOUS_BIT (4U)18071808/* segment bit set in create_mspace_with_base */1809#define EXTERN_BIT (8U)181018111812/* --------------------------- Lock preliminaries ------------------------ */18131814/*1815When locks are defined, there is one global lock, plus1816one per-mspace lock.18171818The global lock_ensures that mparams.magic and other unique1819mparams values are initialized only once. It also protects1820sequences of calls to MORECORE. In many cases sys_alloc requires1821two calls, that should not be interleaved with calls by other1822threads. This does not protect against direct calls to MORECORE1823by other threads not using this lock, so there is still code to1824cope the best we can on interference.18251826Per-mspace locks surround calls to malloc, free, etc.1827By default, locks are simple non-reentrant mutexes.18281829Because lock-protected regions generally have bounded times, it is1830OK to use the supplied simple spinlocks. Spinlocks are likely to1831improve performance for lightly contended applications, but worsen1832performance under heavy contention.18331834If USE_LOCKS is > 1, the definitions of lock routines here are1835bypassed, in which case you will need to define the type MLOCK_T,1836and at least INITIAL_LOCK, DESTROY_LOCK, ACQUIRE_LOCK, RELEASE_LOCK1837and TRY_LOCK. You must also declare a1838static MLOCK_T malloc_global_mutex = { initialization values };.18391840*/18411842#if !USE_LOCKS1843#define USE_LOCK_BIT (0U)1844#define INITIAL_LOCK(l) (0)1845#define DESTROY_LOCK(l) (0)1846#define ACQUIRE_MALLOC_GLOBAL_LOCK()1847#define RELEASE_MALLOC_GLOBAL_LOCK()18481849#else1850#if USE_LOCKS > 11851/* ----------------------- User-defined locks ------------------------ */1852/* Define your own lock implementation here */1853/* #define INITIAL_LOCK(lk) ... */1854/* #define DESTROY_LOCK(lk) ... */1855/* #define ACQUIRE_LOCK(lk) ... */1856/* #define RELEASE_LOCK(lk) ... */1857/* #define TRY_LOCK(lk) ... */1858/* static MLOCK_T malloc_global_mutex = ... */18591860#elif USE_SPIN_LOCKS18611862/* First, define CAS_LOCK and CLEAR_LOCK on ints */1863/* Note CAS_LOCK defined to return 0 on success */18641865#if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))1866#define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1)1867#define CLEAR_LOCK(sl) __sync_lock_release(sl)18681869#elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))1870/* Custom spin locks for older gcc on x86 */1871SDL_FORCE_INLINE int x86_cas_lock(int *sl) {1872int ret;1873int val = 1;1874int cmp = 0;1875__asm__ __volatile__ ("lock; cmpxchgl %1, %2"1876: "=a" (ret)1877: "r" (val), "m" (*(sl)), "0"(cmp)1878: "memory", "cc");1879return ret;1880}18811882SDL_FORCE_INLINE void x86_clear_lock(int* sl) {1883assert(*sl != 0);1884int prev = 0;1885int ret;1886__asm__ __volatile__ ("lock; xchgl %0, %1"1887: "=r" (ret)1888: "m" (*(sl)), "0"(prev)1889: "memory");1890}18911892#define CAS_LOCK(sl) x86_cas_lock(sl)1893#define CLEAR_LOCK(sl) x86_clear_lock(sl)18941895#else /* Win32 MSC */1896#define CAS_LOCK(sl) interlockedexchange(sl, (LONG)1)1897#define CLEAR_LOCK(sl) interlockedexchange (sl, (LONG)0)18981899#endif /* ... gcc spins locks ... */19001901/* How to yield for a spin lock */1902#define SPINS_PER_YIELD 631903#if defined(_MSC_VER)1904#define SLEEP_EX_DURATION 50 /* delay for yield/sleep */1905#define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE)1906#elif defined (__SVR4) && defined (__sun) /* solaris */1907#define SPIN_LOCK_YIELD thr_yield();1908#elif !defined(LACKS_SCHED_H)1909#define SPIN_LOCK_YIELD sched_yield();1910#else1911#define SPIN_LOCK_YIELD1912#endif /* ... yield ... */19131914#if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 01915/* Plain spin locks use single word (embedded in malloc_states) */1916static int spin_acquire_lock(volatile long *sl) {1917int spins = 0;1918while (*sl != 0 || CAS_LOCK(sl)) {1919if ((++spins & SPINS_PER_YIELD) == 0) {1920SPIN_LOCK_YIELD;1921}1922}1923return 0;1924}19251926#define MLOCK_T volatile long1927#define TRY_LOCK(sl) !CAS_LOCK(sl)1928#define RELEASE_LOCK(sl) CLEAR_LOCK(sl)1929#define ACQUIRE_LOCK(sl) (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0)1930#define INITIAL_LOCK(sl) (*sl = 0)1931#define DESTROY_LOCK(sl) (0)1932static MLOCK_T malloc_global_mutex = 0;19331934#else /* USE_RECURSIVE_LOCKS */1935/* types for lock owners */1936#ifdef WIN321937#define THREAD_ID_T DWORD1938#define CURRENT_THREAD GetCurrentThreadId()1939#define EQ_OWNER(X,Y) ((X) == (Y))1940#else1941/*1942Note: the following assume that pthread_t is a type that can be1943initialized to (casted) zero. If this is not the case, you will need to1944somehow redefine these or not use spin locks.1945*/1946#define THREAD_ID_T pthread_t1947#define CURRENT_THREAD pthread_self()1948#define EQ_OWNER(X,Y) pthread_equal(X, Y)1949#endif19501951struct malloc_recursive_lock {1952int sl;1953unsigned int c;1954THREAD_ID_T threadid;1955};19561957#define MLOCK_T struct malloc_recursive_lock1958static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0};19591960SDL_FORCE_INLINE void recursive_release_lock(MLOCK_T *lk) {1961assert(lk->sl != 0);1962if (--lk->c == 0) {1963CLEAR_LOCK(&lk->sl);1964}1965}19661967SDL_FORCE_INLINE int recursive_acquire_lock(MLOCK_T *lk) {1968THREAD_ID_T mythreadid = CURRENT_THREAD;1969int spins = 0;1970for (;;) {1971if (*((volatile int *)(&lk->sl)) == 0) {1972if (!CAS_LOCK(&lk->sl)) {1973lk->threadid = mythreadid;1974lk->c = 1;1975return 0;1976}1977}1978else if (EQ_OWNER(lk->threadid, mythreadid)) {1979++lk->c;1980return 0;1981}1982if ((++spins & SPINS_PER_YIELD) == 0) {1983SPIN_LOCK_YIELD;1984}1985}1986}19871988SDL_FORCE_INLINE int recursive_try_lock(MLOCK_T *lk) {1989THREAD_ID_T mythreadid = CURRENT_THREAD;1990if (*((volatile int *)(&lk->sl)) == 0) {1991if (!CAS_LOCK(&lk->sl)) {1992lk->threadid = mythreadid;1993lk->c = 1;1994return 1;1995}1996}1997else if (EQ_OWNER(lk->threadid, mythreadid)) {1998++lk->c;1999return 1;2000}2001return 0;2002}20032004#define RELEASE_LOCK(lk) recursive_release_lock(lk)2005#define TRY_LOCK(lk) recursive_try_lock(lk)2006#define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk)2007#define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0)2008#define DESTROY_LOCK(lk) (0)2009#endif /* USE_RECURSIVE_LOCKS */20102011#elif defined(WIN32) /* Win32 critical sections */2012#define MLOCK_T CRITICAL_SECTION2013#define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0)2014#define RELEASE_LOCK(lk) LeaveCriticalSection(lk)2015#define TRY_LOCK(lk) TryEnterCriticalSection(lk)2016#define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000))2017#define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0)2018#define NEED_GLOBAL_LOCK_INIT20192020static MLOCK_T malloc_global_mutex;2021static volatile LONG malloc_global_mutex_status;20222023/* Use spin loop to initialize global lock */2024static void init_malloc_global_mutex() {2025for (;;) {2026long stat = malloc_global_mutex_status;2027if (stat > 0)2028return;2029/* transition to < 0 while initializing, then to > 0) */2030if (stat == 0 &&2031interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) {2032InitializeCriticalSection(&malloc_global_mutex);2033interlockedexchange(&malloc_global_mutex_status, (LONG)1);2034return;2035}2036SleepEx(0, FALSE);2037}2038}20392040#else /* pthreads-based locks */2041#define MLOCK_T pthread_mutex_t2042#define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk)2043#define RELEASE_LOCK(lk) pthread_mutex_unlock(lk)2044#define TRY_LOCK(lk) (!pthread_mutex_trylock(lk))2045#define INITIAL_LOCK(lk) pthread_init_lock(lk)2046#define DESTROY_LOCK(lk) pthread_mutex_destroy(lk)20472048#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE)2049/* Cope with old-style linux recursive lock initialization by adding */2050/* skipped internal declaration from pthread.h */2051extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr,2052int __kind));2053#define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP2054#define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y)2055#endif /* USE_RECURSIVE_LOCKS ... */20562057static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER;20582059static int pthread_init_lock (MLOCK_T *lk) {2060pthread_mutexattr_t attr;2061if (pthread_mutexattr_init(&attr)) return 1;2062#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 02063if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1;2064#endif2065if (pthread_mutex_init(lk, &attr)) return 1;2066if (pthread_mutexattr_destroy(&attr)) return 1;2067return 0;2068}20692070#endif /* ... lock types ... */20712072/* Common code for all lock types */2073#define USE_LOCK_BIT (2U)20742075#ifndef ACQUIRE_MALLOC_GLOBAL_LOCK2076#define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex);2077#endif20782079#ifndef RELEASE_MALLOC_GLOBAL_LOCK2080#define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex);2081#endif20822083#endif /* USE_LOCKS */20842085/* ----------------------- Chunk representations ------------------------ */20862087/*2088(The following includes lightly edited explanations by Colin Plumb.)20892090The malloc_chunk declaration below is misleading (but accurate and2091necessary). It declares a "view" into memory allowing access to2092necessary fields at known offsets from a given base.20932094Chunks of memory are maintained using a `boundary tag' method as2095originally described by Knuth. (See the paper by Paul Wilson2096ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such2097techniques.) Sizes of free chunks are stored both in the front of2098each chunk and at the end. This makes consolidating fragmented2099chunks into bigger chunks fast. The head fields also hold bits2100representing whether chunks are free or in use.21012102Here are some pictures to make it clearer. They are "exploded" to2103show that the state of a chunk can be thought of as extending from2104the high 31 bits of the head field of its header through the2105prev_foot and PINUSE_BIT bit of the following chunk header.21062107A chunk that's in use looks like:21082109chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2110| Size of previous chunk (if P = 0) |2111+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2112+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|2113| Size of this chunk 1| +-+2114mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2115| |2116+- -+2117| |2118+- -+2119| :2120+- size - sizeof(size_t) available payload bytes -+2121: |2122chunk-> +- -+2123| |2124+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2125+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|2126| Size of next chunk (may or may not be in use) | +-+2127mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+21282129And if it's free, it looks like this:21302131chunk-> +- -+2132| User payload (must be in use, or we would have merged!) |2133+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2134+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|2135| Size of this chunk 0| +-+2136mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2137| Next pointer |2138+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2139| Prev pointer |2140+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2141| :2142+- size - sizeof(struct chunk) unused bytes -+2143: |2144chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2145| Size of this chunk |2146+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2147+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|2148| Size of next chunk (must be in use, or we would have merged)| +-+2149mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2150| :2151+- User payload -+2152: |2153+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2154|0|2155+-+2156Note that since we always merge adjacent free chunks, the chunks2157adjacent to a free chunk must be in use.21582159Given a pointer to a chunk (which can be derived trivially from the2160payload pointer) we can, in O(1) time, find out whether the adjacent2161chunks are free, and if so, unlink them from the lists that they2162are on and merge them with the current chunk.21632164Chunks always begin on even word boundaries, so the mem portion2165(which is returned to the user) is also on an even word boundary, and2166thus at least double-word aligned.21672168The P (PINUSE_BIT) bit, stored in the unused low-order bit of the2169chunk size (which is always a multiple of two words), is an in-use2170bit for the *previous* chunk. If that bit is *clear*, then the2171word before the current chunk size contains the previous chunk2172size, and can be used to find the front of the previous chunk.2173The very first chunk allocated always has this bit set, preventing2174access to non-existent (or non-owned) memory. If pinuse is set for2175any given chunk, then you CANNOT determine the size of the2176previous chunk, and might even get a memory addressing fault when2177trying to do so.21782179The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of2180the chunk size redundantly records whether the current chunk is2181inuse (unless the chunk is mmapped). This redundancy enables usage2182checks within free and realloc, and reduces indirection when freeing2183and consolidating chunks.21842185Each freshly allocated chunk must have both cinuse and pinuse set.2186That is, each allocated chunk borders either a previously allocated2187and still in-use chunk, or the base of its memory arena. This is2188ensured by making all allocations from the `lowest' part of any2189found chunk. Further, no free chunk physically borders another one,2190so each free chunk is known to be preceded and followed by either2191inuse chunks or the ends of memory.21922193Note that the `foot' of the current chunk is actually represented2194as the prev_foot of the NEXT chunk. This makes it easier to2195deal with alignments etc but can be very confusing when trying2196to extend or adapt this code.21972198The exceptions to all this are219922001. The special chunk `top' is the top-most available chunk (i.e.,2201the one bordering the end of available memory). It is treated2202specially. Top is never included in any bin, is used only if2203no other chunk is available, and is released back to the2204system if it is very large (see M_TRIM_THRESHOLD). In effect,2205the top chunk is treated as larger (and thus less well2206fitting) than any other available chunk. The top chunk2207doesn't update its trailing size field since there is no next2208contiguous chunk that would have to index off it. However,2209space is still allocated for it (TOP_FOOT_SIZE) to enable2210separation or merging when space is extended.221122123. Chunks allocated via mmap, have both cinuse and pinuse bits2213cleared in their head fields. Because they are allocated2214one-by-one, each must carry its own prev_foot field, which is2215also used to hold the offset this chunk has within its mmapped2216region, which is needed to preserve alignment. Each mmapped2217chunk is trailed by the first two fields of a fake next-chunk2218for sake of usage checks.22192220*/22212222struct malloc_chunk {2223size_t prev_foot; /* Size of previous chunk (if free). */2224size_t head; /* Size and inuse bits. */2225struct malloc_chunk* fd; /* double links -- used only if free. */2226struct malloc_chunk* bk;2227};22282229typedef struct malloc_chunk mchunk;2230typedef struct malloc_chunk* mchunkptr;2231typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */2232typedef unsigned int bindex_t; /* Described below */2233typedef unsigned int binmap_t; /* Described below */2234typedef unsigned int flag_t; /* The type of various bit flag sets */22352236/* ------------------- Chunks sizes and alignments ----------------------- */22372238#define MCHUNK_SIZE (sizeof(mchunk))22392240#if FOOTERS2241#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)2242#else /* FOOTERS */2243#define CHUNK_OVERHEAD (SIZE_T_SIZE)2244#endif /* FOOTERS */22452246/* MMapped chunks need a second word of overhead ... */2247#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)2248/* ... and additional padding for fake next-chunk at foot */2249#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)22502251/* The smallest size we can malloc is an aligned minimal chunk */2252#define MIN_CHUNK_SIZE\2253((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)22542255/* conversion from malloc headers to user pointers, and back */2256#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))2257#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))2258/* chunk associated with aligned address A */2259#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))22602261/* Bounds on request (not chunk) sizes. */2262#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)2263#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)22642265/* pad request bytes into a usable size */2266#define pad_request(req) \2267(((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)22682269/* pad request, checking for minimum (but not maximum) */2270#define request2size(req) \2271(((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))227222732274/* ------------------ Operations on head and foot fields ----------------- */22752276/*2277The head field of a chunk is or'ed with PINUSE_BIT when previous2278adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in2279use, unless mmapped, in which case both bits are cleared.22802281FLAG4_BIT is not used by this malloc, but might be useful in extensions.2282*/22832284#define PINUSE_BIT (SIZE_T_ONE)2285#define CINUSE_BIT (SIZE_T_TWO)2286#define FLAG4_BIT (SIZE_T_FOUR)2287#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)2288#define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)22892290/* Head value for fenceposts */2291#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)22922293/* extraction of fields from head words */2294#define cinuse(p) ((p)->head & CINUSE_BIT)2295#define pinuse(p) ((p)->head & PINUSE_BIT)2296#define flag4inuse(p) ((p)->head & FLAG4_BIT)2297#define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT)2298#define is_mmapped(p) (((p)->head & INUSE_BITS) == 0)22992300#define chunksize(p) ((p)->head & ~(FLAG_BITS))23012302#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)2303#define set_flag4(p) ((p)->head |= FLAG4_BIT)2304#define clear_flag4(p) ((p)->head &= ~FLAG4_BIT)23052306/* Treat space at ptr +/- offset as a chunk */2307#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))2308#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))23092310/* Ptr to next or previous physical malloc_chunk. */2311#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))2312#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))23132314/* extract next chunk's pinuse bit */2315#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)23162317/* Get/set size at footer */2318#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)2319#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))23202321/* Set size, pinuse bit, and foot */2322#define set_size_and_pinuse_of_free_chunk(p, s)\2323((p)->head = (s|PINUSE_BIT), set_foot(p, s))23242325/* Set size, pinuse bit, foot, and clear next pinuse */2326#define set_free_with_pinuse(p, s, n)\2327(clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))23282329/* Get the internal overhead associated with chunk p */2330#define overhead_for(p)\2331(is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)23322333/* Return true if malloced space is not necessarily cleared */2334#if MMAP_CLEARS2335#define calloc_must_clear(p) (!is_mmapped(p))2336#else /* MMAP_CLEARS */2337#define calloc_must_clear(p) (1)2338#endif /* MMAP_CLEARS */23392340/* ---------------------- Overlaid data structures ----------------------- */23412342/*2343When chunks are not in use, they are treated as nodes of either2344lists or trees.23452346"Small" chunks are stored in circular doubly-linked lists, and look2347like this:23482349chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2350| Size of previous chunk |2351+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2352`head:' | Size of chunk, in bytes |P|2353mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2354| Forward pointer to next chunk in list |2355+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2356| Back pointer to previous chunk in list |2357+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2358| Unused space (may be 0 bytes long) .2359. .2360. |2361nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2362`foot:' | Size of chunk, in bytes |2363+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+23642365Larger chunks are kept in a form of bitwise digital trees (aka2366tries) keyed on chunksizes. Because malloc_tree_chunks are only for2367free chunks greater than 256 bytes, their size doesn't impose any2368constraints on user chunk sizes. Each node looks like:23692370chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2371| Size of previous chunk |2372+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2373`head:' | Size of chunk, in bytes |P|2374mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2375| Forward pointer to next chunk of same size |2376+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2377| Back pointer to previous chunk of same size |2378+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2379| Pointer to left child (child[0]) |2380+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2381| Pointer to right child (child[1]) |2382+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2383| Pointer to parent |2384+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2385| bin index of this chunk |2386+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2387| Unused space .2388. |2389nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+2390`foot:' | Size of chunk, in bytes |2391+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+23922393Each tree holding treenodes is a tree of unique chunk sizes. Chunks2394of the same size are arranged in a circularly-linked list, with only2395the oldest chunk (the next to be used, in our FIFO ordering)2396actually in the tree. (Tree members are distinguished by a non-null2397parent pointer.) If a chunk with the same size an an existing node2398is inserted, it is linked off the existing node using pointers that2399work in the same way as fd/bk pointers of small chunks.24002401Each tree contains a power of 2 sized range of chunk sizes (the2402smallest is 0x100 <= x < 0x180), which is is divided in half at each2403tree level, with the chunks in the smaller half of the range (0x1002404<= x < 0x140 for the top nose) in the left subtree and the larger2405half (0x140 <= x < 0x180) in the right subtree. This is, of course,2406done by inspecting individual bits.24072408Using these rules, each node's left subtree contains all smaller2409sizes than its right subtree. However, the node at the root of each2410subtree has no particular ordering relationship to either. (The2411dividing line between the subtree sizes is based on trie relation.)2412If we remove the last chunk of a given size from the interior of the2413tree, we need to replace it with a leaf node. The tree ordering2414rules permit a node to be replaced by any leaf below it.24152416The smallest chunk in a tree (a common operation in a best-fit2417allocator) can be found by walking a path to the leftmost leaf in2418the tree. Unlike a usual binary tree, where we follow left child2419pointers until we reach a null, here we follow the right child2420pointer any time the left one is null, until we reach a leaf with2421both child pointers null. The smallest chunk in the tree will be2422somewhere along that path.24232424The worst case number of steps to add, find, or remove a node is2425bounded by the number of bits differentiating chunks within2426bins. Under current bin calculations, this ranges from 6 up to 212427(for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case2428is of course much better.2429*/24302431struct malloc_tree_chunk {2432/* The first four fields must be compatible with malloc_chunk */2433size_t prev_foot;2434size_t head;2435struct malloc_tree_chunk* fd;2436struct malloc_tree_chunk* bk;24372438struct malloc_tree_chunk* child[2];2439struct malloc_tree_chunk* parent;2440bindex_t index;2441};24422443typedef struct malloc_tree_chunk tchunk;2444typedef struct malloc_tree_chunk* tchunkptr;2445typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */24462447/* A little helper macro for trees */2448#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])24492450/* ----------------------------- Segments -------------------------------- */24512452/*2453Each malloc space may include non-contiguous segments, held in a2454list headed by an embedded malloc_segment record representing the2455top-most space. Segments also include flags holding properties of2456the space. Large chunks that are directly allocated by mmap are not2457included in this list. They are instead independently created and2458destroyed without otherwise keeping track of them.24592460Segment management mainly comes into play for spaces allocated by2461MMAP. Any call to MMAP might or might not return memory that is2462adjacent to an existing segment. MORECORE normally contiguously2463extends the current space, so this space is almost always adjacent,2464which is simpler and faster to deal with. (This is why MORECORE is2465used preferentially to MMAP when both are available -- see2466sys_alloc.) When allocating using MMAP, we don't use any of the2467hinting mechanisms (inconsistently) supported in various2468implementations of unix mmap, or distinguish reserving from2469committing memory. Instead, we just ask for space, and exploit2470contiguity when we get it. It is probably possible to do2471better than this on some systems, but no general scheme seems2472to be significantly better.24732474Management entails a simpler variant of the consolidation scheme2475used for chunks to reduce fragmentation -- new adjacent memory is2476normally prepended or appended to an existing segment. However,2477there are limitations compared to chunk consolidation that mostly2478reflect the fact that segment processing is relatively infrequent2479(occurring only when getting memory from system) and that we2480don't expect to have huge numbers of segments:24812482* Segments are not indexed, so traversal requires linear scans. (It2483would be possible to index these, but is not worth the extra2484overhead and complexity for most programs on most platforms.)2485* New segments are only appended to old ones when holding top-most2486memory; if they cannot be prepended to others, they are held in2487different segments.24882489Except for the top-most segment of an mstate, each segment record2490is kept at the tail of its segment. Segments are added by pushing2491segment records onto the list headed by &mstate.seg for the2492containing mstate.24932494Segment flags control allocation/merge/deallocation policies:2495* If EXTERN_BIT set, then we did not allocate this segment,2496and so should not try to deallocate or merge with others.2497(This currently holds only for the initial segment passed2498into create_mspace_with_base.)2499* If USE_MMAP_BIT set, the segment may be merged with2500other surrounding mmapped segments and trimmed/de-allocated2501using munmap.2502* If neither bit is set, then the segment was obtained using2503MORECORE so can be merged with surrounding MORECORE'd segments2504and deallocated/trimmed using MORECORE with negative arguments.2505*/25062507struct malloc_segment {2508char* base; /* base address */2509size_t size; /* allocated size */2510struct malloc_segment* next; /* ptr to next segment */2511flag_t sflags; /* mmap and extern flag */2512};25132514#define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT)2515#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)25162517typedef struct malloc_segment msegment;2518typedef struct malloc_segment* msegmentptr;25192520/* ---------------------------- malloc_state ----------------------------- */25212522/*2523A malloc_state holds all of the bookkeeping for a space.2524The main fields are:25252526Top2527The topmost chunk of the currently active segment. Its size is2528cached in topsize. The actual size of topmost space is2529topsize+TOP_FOOT_SIZE, which includes space reserved for adding2530fenceposts and segment records if necessary when getting more2531space from the system. The size at which to autotrim top is2532cached from mparams in trim_check, except that it is disabled if2533an autotrim fails.25342535Designated victim (dv)2536This is the preferred chunk for servicing small requests that2537don't have exact fits. It is normally the chunk split off most2538recently to service another small request. Its size is cached in2539dvsize. The link fields of this chunk are not maintained since it2540is not kept in a bin.25412542SmallBins2543An array of bin headers for free chunks. These bins hold chunks2544with sizes less than MIN_LARGE_SIZE bytes. Each bin contains2545chunks of all the same size, spaced 8 bytes apart. To simplify2546use in double-linked lists, each bin header acts as a malloc_chunk2547pointing to the real first node, if it exists (else pointing to2548itself). This avoids special-casing for headers. But to avoid2549waste, we allocate only the fd/bk pointers of bins, and then use2550repositioning tricks to treat these as the fields of a chunk.25512552TreeBins2553Treebins are pointers to the roots of trees holding a range of2554sizes. There are 2 equally spaced treebins for each power of two2555from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything2556larger.25572558Bin maps2559There is one bit map for small bins ("smallmap") and one for2560treebins ("treemap). Each bin sets its bit when non-empty, and2561clears the bit when empty. Bit operations are then used to avoid2562bin-by-bin searching -- nearly all "search" is done without ever2563looking at bins that won't be selected. The bit maps2564conservatively use 32 bits per map word, even if on 64bit system.2565For a good description of some of the bit-based techniques used2566here, see Henry S. Warren Jr's book "Hacker's Delight" (and2567supplement at http://hackersdelight.org/). Many of these are2568intended to reduce the branchiness of paths through malloc etc, as2569well as to reduce the number of memory locations read or written.25702571Segments2572A list of segments headed by an embedded malloc_segment record2573representing the initial space.25742575Address check support2576The least_addr field is the least address ever obtained from2577MORECORE or MMAP. Attempted frees and reallocs of any address less2578than this are trapped (unless INSECURE is defined).25792580Magic tag2581A cross-check field that should always hold same value as mparams.magic.25822583Max allowed footprint2584The maximum allowed bytes to allocate from system (zero means no limit)25852586Flags2587Bits recording whether to use MMAP, locks, or contiguous MORECORE25882589Statistics2590Each space keeps track of current and maximum system memory2591obtained via MORECORE or MMAP.25922593Trim support2594Fields holding the amount of unused topmost memory that should trigger2595trimming, and a counter to force periodic scanning to release unused2596non-topmost segments.25972598Locking2599If USE_LOCKS is defined, the "mutex" lock is acquired and released2600around every public call using this mspace.26012602Extension support2603A void* pointer and a size_t field that can be used to help implement2604extensions to this malloc.2605*/26062607/* Bin types, widths and sizes */2608#define NSMALLBINS (32U)2609#define NTREEBINS (32U)2610#define SMALLBIN_SHIFT (3U)2611#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)2612#define TREEBIN_SHIFT (8U)2613#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)2614#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)2615#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)26162617struct malloc_state {2618binmap_t smallmap;2619binmap_t treemap;2620size_t dvsize;2621size_t topsize;2622char* least_addr;2623mchunkptr dv;2624mchunkptr top;2625size_t trim_check;2626size_t release_checks;2627size_t magic;2628mchunkptr smallbins[(NSMALLBINS+1)*2];2629tbinptr treebins[NTREEBINS];2630size_t footprint;2631size_t max_footprint;2632size_t footprint_limit; /* zero means no limit */2633flag_t mflags;2634#if USE_LOCKS2635MLOCK_T mutex; /* locate lock among fields that rarely change */2636#endif /* USE_LOCKS */2637msegment seg;2638void* extp; /* Unused but available for extensions */2639size_t exts;2640};26412642typedef struct malloc_state* mstate;26432644/* ------------- Global malloc_state and malloc_params ------------------- */26452646/*2647malloc_params holds global properties, including those that can be2648dynamically set using mallopt. There is a single instance, mparams,2649initialized in init_mparams. Note that the non-zeroness of "magic"2650also serves as an initialization flag.2651*/26522653struct malloc_params {2654size_t magic;2655size_t page_size;2656size_t granularity;2657size_t mmap_threshold;2658size_t trim_threshold;2659flag_t default_mflags;2660};26612662static struct malloc_params mparams;26632664/* Ensure mparams initialized */2665#define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())26662667#if !ONLY_MSPACES26682669/* The global malloc_state used for all non-"mspace" calls */2670static struct malloc_state _gm_;2671#define gm (&_gm_)2672#define is_global(M) ((M) == &_gm_)26732674#endif /* !ONLY_MSPACES */26752676#define is_initialized(M) ((M)->top != 0)26772678/* -------------------------- system alloc setup ------------------------- */26792680/* Operations on mflags */26812682#define use_lock(M) ((M)->mflags & USE_LOCK_BIT)2683#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)2684#if USE_LOCKS2685#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)2686#else2687#define disable_lock(M)2688#endif26892690#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)2691#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)2692#if HAVE_MMAP2693#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)2694#else2695#define disable_mmap(M)2696#endif26972698#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)2699#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)27002701#define set_lock(M,L)\2702((M)->mflags = (L)?\2703((M)->mflags | USE_LOCK_BIT) :\2704((M)->mflags & ~USE_LOCK_BIT))27052706/* page-align a size */2707#define page_align(S)\2708(((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))27092710/* granularity-align a size */2711#define granularity_align(S)\2712(((S) + (mparams.granularity - SIZE_T_ONE))\2713& ~(mparams.granularity - SIZE_T_ONE))271427152716/* For mmap, use granularity alignment on windows, else page-align */2717#ifdef WIN322718#define mmap_align(S) granularity_align(S)2719#else2720#define mmap_align(S) page_align(S)2721#endif27222723/* For sys_alloc, enough padding to ensure can malloc request on success */2724#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)27252726#define is_page_aligned(S)\2727(((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)2728#define is_granularity_aligned(S)\2729(((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)27302731/* True if segment S holds address A */2732#define segment_holds(S, A)\2733((char*)(A) >= S->base && (char*)(A) < S->base + S->size)27342735/* Return segment holding given address */2736static msegmentptr segment_holding(mstate m, char* addr) {2737msegmentptr sp = &m->seg;2738for (;;) {2739if (addr >= sp->base && addr < sp->base + sp->size)2740return sp;2741if ((sp = sp->next) == 0)2742return 0;2743}2744}27452746/* Return true if segment contains a segment link */2747static int has_segment_link(mstate m, msegmentptr ss) {2748msegmentptr sp = &m->seg;2749for (;;) {2750if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)2751return 1;2752if ((sp = sp->next) == 0)2753return 0;2754}2755}27562757#ifndef MORECORE_CANNOT_TRIM2758#define should_trim(M,s) ((s) > (M)->trim_check)2759#else /* MORECORE_CANNOT_TRIM */2760#define should_trim(M,s) (0)2761#endif /* MORECORE_CANNOT_TRIM */27622763/*2764TOP_FOOT_SIZE is padding at the end of a segment, including space2765that may be needed to place segment records and fenceposts when new2766noncontiguous segments are added.2767*/2768#define TOP_FOOT_SIZE\2769(align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)277027712772/* ------------------------------- Hooks -------------------------------- */27732774/*2775PREACTION should be defined to return 0 on success, and nonzero on2776failure. If you are not using locking, you can redefine these to do2777anything you like.2778*/27792780#if USE_LOCKS2781#define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)2782#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }2783#else /* USE_LOCKS */27842785#ifndef PREACTION2786#define PREACTION(M) (0)2787#endif /* PREACTION */27882789#ifndef POSTACTION2790#define POSTACTION(M)2791#endif /* POSTACTION */27922793#endif /* USE_LOCKS */27942795/*2796CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.2797USAGE_ERROR_ACTION is triggered on detected bad frees and2798reallocs. The argument p is an address that might have triggered the2799fault. It is ignored by the two predefined actions, but might be2800useful in custom actions that try to help diagnose errors.2801*/28022803#if PROCEED_ON_ERROR28042805/* A count of the number of corruption errors causing resets */2806int malloc_corruption_error_count;28072808/* default corruption action */2809static void reset_on_error(mstate m);28102811#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)2812#define USAGE_ERROR_ACTION(m, p)28132814#else /* PROCEED_ON_ERROR */28152816#ifndef CORRUPTION_ERROR_ACTION2817#define CORRUPTION_ERROR_ACTION(m) ABORT2818#endif /* CORRUPTION_ERROR_ACTION */28192820#ifndef USAGE_ERROR_ACTION2821#define USAGE_ERROR_ACTION(m,p) ABORT2822#endif /* USAGE_ERROR_ACTION */28232824#endif /* PROCEED_ON_ERROR */282528262827/* -------------------------- Debugging setup ---------------------------- */28282829#if ! DEBUG28302831#define check_free_chunk(M,P)2832#define check_inuse_chunk(M,P)2833#define check_malloced_chunk(M,P,N)2834#define check_mmapped_chunk(M,P)2835#define check_malloc_state(M)2836#define check_top_chunk(M,P)28372838#else /* DEBUG */2839#define check_free_chunk(M,P) do_check_free_chunk(M,P)2840#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)2841#define check_top_chunk(M,P) do_check_top_chunk(M,P)2842#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)2843#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)2844#define check_malloc_state(M) do_check_malloc_state(M)28452846static void do_check_any_chunk(mstate m, mchunkptr p);2847static void do_check_top_chunk(mstate m, mchunkptr p);2848static void do_check_mmapped_chunk(mstate m, mchunkptr p);2849static void do_check_inuse_chunk(mstate m, mchunkptr p);2850static void do_check_free_chunk(mstate m, mchunkptr p);2851static void do_check_malloced_chunk(mstate m, void* mem, size_t s);2852static void do_check_tree(mstate m, tchunkptr t);2853static void do_check_treebin(mstate m, bindex_t i);2854static void do_check_smallbin(mstate m, bindex_t i);2855static void do_check_malloc_state(mstate m);2856static int bin_find(mstate m, mchunkptr x);2857static size_t traverse_and_check(mstate m);2858#endif /* DEBUG */28592860/* ---------------------------- Indexing Bins ---------------------------- */28612862#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)2863#define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT)2864#define small_index2size(i) ((i) << SMALLBIN_SHIFT)2865#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))28662867/* addressing by index. See above about smallbin repositioning */2868#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))2869#define treebin_at(M,i) (&((M)->treebins[i]))28702871/* assign tree index for size S to variable I. Use x86 asm if possible */2872#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))2873#define compute_tree_index(S, I)\2874{\2875unsigned int X = S >> TREEBIN_SHIFT;\2876if (X == 0)\2877I = 0;\2878else if (X > 0xFFFF)\2879I = NTREEBINS-1;\2880else {\2881unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \2882I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\2883}\2884}28852886#elif defined (__INTEL_COMPILER)2887#define compute_tree_index(S, I)\2888{\2889size_t X = S >> TREEBIN_SHIFT;\2890if (X == 0)\2891I = 0;\2892else if (X > 0xFFFF)\2893I = NTREEBINS-1;\2894else {\2895unsigned int K = _bit_scan_reverse (X); \2896I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\2897}\2898}28992900#elif defined(_MSC_VER) && _MSC_VER>=13002901#define compute_tree_index(S, I)\2902{\2903size_t X = S >> TREEBIN_SHIFT;\2904if (X == 0)\2905I = 0;\2906else if (X > 0xFFFF)\2907I = NTREEBINS-1;\2908else {\2909unsigned int K;\2910_BitScanReverse((DWORD *) &K, (DWORD) X);\2911I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\2912}\2913}29142915#else /* GNUC */2916#define compute_tree_index(S, I)\2917{\2918size_t X = S >> TREEBIN_SHIFT;\2919if (X == 0)\2920I = 0;\2921else if (X > 0xFFFF)\2922I = NTREEBINS-1;\2923else {\2924unsigned int Y = (unsigned int)X;\2925unsigned int N = ((Y - 0x100) >> 16) & 8;\2926unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\2927N += K;\2928N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\2929K = 14 - N + ((Y <<= K) >> 15);\2930I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\2931}\2932}2933#endif /* GNUC */29342935/* Bit representing maximum resolved size in a treebin at i */2936#define bit_for_tree_index(i) \2937(i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)29382939/* Shift placing maximum resolved bit in a treebin at i as sign bit */2940#define leftshift_for_tree_index(i) \2941((i == NTREEBINS-1)? 0 : \2942((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))29432944/* The size of the smallest chunk held in bin with index i */2945#define minsize_for_tree_index(i) \2946((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \2947(((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))294829492950/* ------------------------ Operations on bin maps ----------------------- */29512952/* bit corresponding to given index */2953#define idx2bit(i) ((binmap_t)(1) << (i))29542955/* Mark/Clear bits with given index */2956#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))2957#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))2958#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))29592960#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))2961#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))2962#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))29632964/* isolate the least set bit of a bitmap */2965#define least_bit(x) ((x) & -(x))29662967/* mask with all bits to left of least bit of x on */2968#define left_bits(x) ((x<<1) | -(x<<1))29692970/* mask with all bits to left of or equal to least bit of x on */2971#define same_or_left_bits(x) ((x) | -(x))29722973/* index corresponding to given bit. Use x86 asm if possible */29742975#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))2976#define compute_bit2idx(X, I)\2977{\2978unsigned int J;\2979J = __builtin_ctz(X); \2980I = (bindex_t)J;\2981}29822983#elif defined (__INTEL_COMPILER)2984#define compute_bit2idx(X, I)\2985{\2986unsigned int J;\2987J = _bit_scan_forward (X); \2988I = (bindex_t)J;\2989}29902991#elif defined(_MSC_VER) && _MSC_VER>=13002992#define compute_bit2idx(X, I)\2993{\2994unsigned int J;\2995_BitScanForward((DWORD *) &J, X);\2996I = (bindex_t)J;\2997}29982999#elif USE_BUILTIN_FFS3000#define compute_bit2idx(X, I) I = ffs(X)-130013002#else3003#define compute_bit2idx(X, I)\3004{\3005unsigned int Y = X - 1;\3006unsigned int K = Y >> (16-4) & 16;\3007unsigned int N = K; Y >>= K;\3008N += K = Y >> (8-3) & 8; Y >>= K;\3009N += K = Y >> (4-2) & 4; Y >>= K;\3010N += K = Y >> (2-1) & 2; Y >>= K;\3011N += K = Y >> (1-0) & 1; Y >>= K;\3012I = (bindex_t)(N + Y);\3013}3014#endif /* GNUC */301530163017/* ----------------------- Runtime Check Support ------------------------- */30183019/*3020For security, the main invariant is that malloc/free/etc never3021writes to a static address other than malloc_state, unless static3022malloc_state itself has been corrupted, which cannot occur via3023malloc (because of these checks). In essence this means that we3024believe all pointers, sizes, maps etc held in malloc_state, but3025check all of those linked or offsetted from other embedded data3026structures. These checks are interspersed with main code in a way3027that tends to minimize their run-time cost.30283029When FOOTERS is defined, in addition to range checking, we also3030verify footer fields of inuse chunks, which can be used guarantee3031that the mstate controlling malloc/free is intact. This is a3032streamlined version of the approach described by William Robertson3033et al in "Run-time Detection of Heap-based Overflows" LISA'033034http://www.usenix.org/events/lisa03/tech/robertson.html The footer3035of an inuse chunk holds the xor of its mstate and a random seed,3036that is checked upon calls to free() and realloc(). This is3037(probabalistically) unguessable from outside the program, but can be3038computed by any code successfully malloc'ing any chunk, so does not3039itself provide protection against code that has already broken3040security through some other means. Unlike Robertson et al, we3041always dynamically check addresses of all offset chunks (previous,3042next, etc). This turns out to be cheaper than relying on hashes.3043*/30443045#if !INSECURE3046/* Check if address a is at least as high as any from MORECORE or MMAP */3047#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)3048/* Check if address of next chunk n is higher than base chunk p */3049#define ok_next(p, n) ((char*)(p) < (char*)(n))3050/* Check if p has inuse status */3051#define ok_inuse(p) is_inuse(p)3052/* Check if p has its pinuse bit on */3053#define ok_pinuse(p) pinuse(p)30543055#else /* !INSECURE */3056#define ok_address(M, a) (1)3057#define ok_next(b, n) (1)3058#define ok_inuse(p) (1)3059#define ok_pinuse(p) (1)3060#endif /* !INSECURE */30613062#if (FOOTERS && !INSECURE)3063/* Check if (alleged) mstate m has expected magic field */3064#define ok_magic(M) ((M)->magic == mparams.magic)3065#else /* (FOOTERS && !INSECURE) */3066#define ok_magic(M) (1)3067#endif /* (FOOTERS && !INSECURE) */30683069/* In gcc, use __builtin_expect to minimize impact of checks */3070#if !INSECURE3071#if defined(__GNUC__) && __GNUC__ >= 33072#define RTCHECK(e) __builtin_expect(e, 1)3073#else /* GNUC */3074#define RTCHECK(e) (e)3075#endif /* GNUC */3076#else /* !INSECURE */3077#define RTCHECK(e) (1)3078#endif /* !INSECURE */30793080/* macros to set up inuse chunks with or without footers */30813082#if !FOOTERS30833084#define mark_inuse_foot(M,p,s)30853086/* Macros for setting head/foot of non-mmapped chunks */30873088/* Set cinuse bit and pinuse bit of next chunk */3089#define set_inuse(M,p,s)\3090((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\3091((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)30923093/* Set cinuse and pinuse of this chunk and pinuse of next chunk */3094#define set_inuse_and_pinuse(M,p,s)\3095((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\3096((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)30973098/* Set size, cinuse and pinuse bit of this chunk */3099#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\3100((p)->head = (s|PINUSE_BIT|CINUSE_BIT))31013102#else /* FOOTERS */31033104/* Set foot of inuse chunk to be xor of mstate and seed */3105#define mark_inuse_foot(M,p,s)\3106(((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))31073108#define get_mstate_for(p)\3109((mstate)(((mchunkptr)((char*)(p) +\3110(chunksize(p))))->prev_foot ^ mparams.magic))31113112#define set_inuse(M,p,s)\3113((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\3114(((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \3115mark_inuse_foot(M,p,s))31163117#define set_inuse_and_pinuse(M,p,s)\3118((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\3119(((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\3120mark_inuse_foot(M,p,s))31213122#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\3123((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\3124mark_inuse_foot(M, p, s))31253126#endif /* !FOOTERS */31273128/* ---------------------------- setting mparams -------------------------- */31293130#if LOCK_AT_FORK3131static void pre_fork(void) { ACQUIRE_LOCK(&(gm)->mutex); }3132static void post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); }3133static void post_fork_child(void) { INITIAL_LOCK(&(gm)->mutex); }3134#endif /* LOCK_AT_FORK */31353136/* Initialize mparams */3137static int init_mparams(void) {3138#ifdef NEED_GLOBAL_LOCK_INIT3139if (malloc_global_mutex_status <= 0)3140init_malloc_global_mutex();3141#endif31423143ACQUIRE_MALLOC_GLOBAL_LOCK();3144if (mparams.magic == 0) {3145size_t magic;3146size_t psize;3147size_t gsize;31483149#ifndef WIN323150psize = malloc_getpagesize;3151gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize);3152#else /* WIN32 */3153{3154SYSTEM_INFO system_info;3155GetSystemInfo(&system_info);3156psize = system_info.dwPageSize;3157gsize = ((DEFAULT_GRANULARITY != 0)?3158DEFAULT_GRANULARITY : system_info.dwAllocationGranularity);3159}3160#endif /* WIN32 */31613162/* Sanity-check configuration:3163size_t must be unsigned and as wide as pointer type.3164ints must be at least 4 bytes.3165alignment must be at least 8.3166Alignment, min chunk size, and page size must all be powers of 2.3167*/3168if ((sizeof(size_t) != sizeof(char*)) ||3169(MAX_SIZE_T < MIN_CHUNK_SIZE) ||3170(sizeof(int) < 4) ||3171(MALLOC_ALIGNMENT < (size_t)8U) ||3172((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) ||3173((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) ||3174((gsize & (gsize-SIZE_T_ONE)) != 0) ||3175((psize & (psize-SIZE_T_ONE)) != 0))3176ABORT;3177mparams.granularity = gsize;3178mparams.page_size = psize;3179mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;3180mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;3181#if MORECORE_CONTIGUOUS3182mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;3183#else /* MORECORE_CONTIGUOUS */3184mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;3185#endif /* MORECORE_CONTIGUOUS */31863187#if !ONLY_MSPACES3188/* Set up lock for main malloc area */3189gm->mflags = mparams.default_mflags;3190(void)INITIAL_LOCK(&gm->mutex);3191#endif3192#if LOCK_AT_FORK3193pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child);3194#endif31953196{3197#if USE_DEV_RANDOM3198int fd;3199unsigned char buf[sizeof(size_t)];3200/* Try to use /dev/urandom, else fall back on using time */3201if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&3202read(fd, buf, sizeof(buf)) == sizeof(buf)) {3203magic = *((size_t *) buf);3204close(fd);3205}3206else3207#endif /* USE_DEV_RANDOM */3208#ifdef WIN323209magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U);3210#elif defined(LACKS_TIME_H)3211magic = (size_t)&magic ^ (size_t)0x55555555U;3212#else3213magic = (size_t)(time(0) ^ (size_t)0x55555555U);3214#endif3215magic |= (size_t)8U; /* ensure nonzero */3216magic &= ~(size_t)7U; /* improve chances of fault for bad values */3217/* Until memory modes commonly available, use volatile-write */3218(*(volatile size_t *)(&(mparams.magic))) = magic;3219}3220}32213222RELEASE_MALLOC_GLOBAL_LOCK();3223return 1;3224}32253226/* support for mallopt */3227static int change_mparam(int param_number, int value) {3228size_t val;3229ensure_initialization();3230val = (value == -1)? MAX_SIZE_T : (size_t)value;3231switch(param_number) {3232case M_TRIM_THRESHOLD:3233mparams.trim_threshold = val;3234return 1;3235case M_GRANULARITY:3236if (val >= mparams.page_size && ((val & (val-1)) == 0)) {3237mparams.granularity = val;3238return 1;3239}3240else3241return 0;3242case M_MMAP_THRESHOLD:3243mparams.mmap_threshold = val;3244return 1;3245default:3246return 0;3247}3248}32493250#if DEBUG3251/* ------------------------- Debugging Support --------------------------- */32523253/* Check properties of any chunk, whether free, inuse, mmapped etc */3254static void do_check_any_chunk(mstate m, mchunkptr p) {3255assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));3256assert(ok_address(m, p));3257}32583259/* Check properties of top chunk */3260static void do_check_top_chunk(mstate m, mchunkptr p) {3261msegmentptr sp = segment_holding(m, (char*)p);3262size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */3263assert(sp != 0);3264assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));3265assert(ok_address(m, p));3266assert(sz == m->topsize);3267assert(sz > 0);3268assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);3269assert(pinuse(p));3270assert(!pinuse(chunk_plus_offset(p, sz)));3271}32723273/* Check properties of (inuse) mmapped chunks */3274static void do_check_mmapped_chunk(mstate m, mchunkptr p) {3275size_t sz = chunksize(p);3276size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD);3277assert(is_mmapped(p));3278assert(use_mmap(m));3279assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));3280assert(ok_address(m, p));3281assert(!is_small(sz));3282assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);3283assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);3284assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);3285}32863287/* Check properties of inuse chunks */3288static void do_check_inuse_chunk(mstate m, mchunkptr p) {3289do_check_any_chunk(m, p);3290assert(is_inuse(p));3291assert(next_pinuse(p));3292/* If not pinuse and not mmapped, previous chunk has OK offset */3293assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);3294if (is_mmapped(p))3295do_check_mmapped_chunk(m, p);3296}32973298/* Check properties of free chunks */3299static void do_check_free_chunk(mstate m, mchunkptr p) {3300size_t sz = chunksize(p);3301mchunkptr next = chunk_plus_offset(p, sz);3302do_check_any_chunk(m, p);3303assert(!is_inuse(p));3304assert(!next_pinuse(p));3305assert (!is_mmapped(p));3306if (p != m->dv && p != m->top) {3307if (sz >= MIN_CHUNK_SIZE) {3308assert((sz & CHUNK_ALIGN_MASK) == 0);3309assert(is_aligned(chunk2mem(p)));3310assert(next->prev_foot == sz);3311assert(pinuse(p));3312assert (next == m->top || is_inuse(next));3313assert(p->fd->bk == p);3314assert(p->bk->fd == p);3315}3316else /* markers are always of size SIZE_T_SIZE */3317assert(sz == SIZE_T_SIZE);3318}3319}33203321/* Check properties of malloced chunks at the point they are malloced */3322static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {3323if (mem != 0) {3324mchunkptr p = mem2chunk(mem);3325size_t sz = p->head & ~INUSE_BITS;3326do_check_inuse_chunk(m, p);3327assert((sz & CHUNK_ALIGN_MASK) == 0);3328assert(sz >= MIN_CHUNK_SIZE);3329assert(sz >= s);3330/* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */3331assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));3332}3333}33343335/* Check a tree and its subtrees. */3336static void do_check_tree(mstate m, tchunkptr t) {3337tchunkptr head = 0;3338tchunkptr u = t;3339bindex_t tindex = t->index;3340size_t tsize = chunksize(t);3341bindex_t idx;3342compute_tree_index(tsize, idx);3343assert(tindex == idx);3344assert(tsize >= MIN_LARGE_SIZE);3345assert(tsize >= minsize_for_tree_index(idx));3346assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));33473348do { /* traverse through chain of same-sized nodes */3349do_check_any_chunk(m, ((mchunkptr)u));3350assert(u->index == tindex);3351assert(chunksize(u) == tsize);3352assert(!is_inuse(u));3353assert(!next_pinuse(u));3354assert(u->fd->bk == u);3355assert(u->bk->fd == u);3356if (u->parent == 0) {3357assert(u->child[0] == 0);3358assert(u->child[1] == 0);3359}3360else {3361assert(head == 0); /* only one node on chain has parent */3362head = u;3363assert(u->parent != u);3364assert (u->parent->child[0] == u ||3365u->parent->child[1] == u ||3366*((tbinptr*)(u->parent)) == u);3367if (u->child[0] != 0) {3368assert(u->child[0]->parent == u);3369assert(u->child[0] != u);3370do_check_tree(m, u->child[0]);3371}3372if (u->child[1] != 0) {3373assert(u->child[1]->parent == u);3374assert(u->child[1] != u);3375do_check_tree(m, u->child[1]);3376}3377if (u->child[0] != 0 && u->child[1] != 0) {3378assert(chunksize(u->child[0]) < chunksize(u->child[1]));3379}3380}3381u = u->fd;3382} while (u != t);3383assert(head != 0);3384}33853386/* Check all the chunks in a treebin. */3387static void do_check_treebin(mstate m, bindex_t i) {3388tbinptr* tb = treebin_at(m, i);3389tchunkptr t = *tb;3390int empty = (m->treemap & (1U << i)) == 0;3391if (t == 0)3392assert(empty);3393if (!empty)3394do_check_tree(m, t);3395}33963397/* Check all the chunks in a smallbin. */3398static void do_check_smallbin(mstate m, bindex_t i) {3399sbinptr b = smallbin_at(m, i);3400mchunkptr p = b->bk;3401unsigned int empty = (m->smallmap & (1U << i)) == 0;3402if (p == b)3403assert(empty);3404if (!empty) {3405for (; p != b; p = p->bk) {3406size_t size = chunksize(p);3407mchunkptr q;3408/* each chunk claims to be free */3409do_check_free_chunk(m, p);3410/* chunk belongs in bin */3411assert(small_index(size) == i);3412assert(p->bk == b || chunksize(p->bk) == chunksize(p));3413/* chunk is followed by an inuse chunk */3414q = next_chunk(p);3415if (q->head != FENCEPOST_HEAD)3416do_check_inuse_chunk(m, q);3417}3418}3419}34203421/* Find x in a bin. Used in other check functions. */3422static int bin_find(mstate m, mchunkptr x) {3423size_t size = chunksize(x);3424if (is_small(size)) {3425bindex_t sidx = small_index(size);3426sbinptr b = smallbin_at(m, sidx);3427if (smallmap_is_marked(m, sidx)) {3428mchunkptr p = b;3429do {3430if (p == x)3431return 1;3432} while ((p = p->fd) != b);3433}3434}3435else {3436bindex_t tidx;3437compute_tree_index(size, tidx);3438if (treemap_is_marked(m, tidx)) {3439tchunkptr t = *treebin_at(m, tidx);3440size_t sizebits = size << leftshift_for_tree_index(tidx);3441while (t != 0 && chunksize(t) != size) {3442t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];3443sizebits <<= 1;3444}3445if (t != 0) {3446tchunkptr u = t;3447do {3448if (u == (tchunkptr)x)3449return 1;3450} while ((u = u->fd) != t);3451}3452}3453}3454return 0;3455}34563457/* Traverse each chunk and check it; return total */3458static size_t traverse_and_check(mstate m) {3459size_t sum = 0;3460if (is_initialized(m)) {3461msegmentptr s = &m->seg;3462sum += m->topsize + TOP_FOOT_SIZE;3463while (s != 0) {3464mchunkptr q = align_as_chunk(s->base);3465mchunkptr lastq = 0;3466assert(pinuse(q));3467while (segment_holds(s, q) &&3468q != m->top && q->head != FENCEPOST_HEAD) {3469sum += chunksize(q);3470if (is_inuse(q)) {3471assert(!bin_find(m, q));3472do_check_inuse_chunk(m, q);3473}3474else {3475assert(q == m->dv || bin_find(m, q));3476assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */3477do_check_free_chunk(m, q);3478}3479lastq = q;3480q = next_chunk(q);3481}3482s = s->next;3483}3484}3485return sum;3486}348734883489/* Check all properties of malloc_state. */3490static void do_check_malloc_state(mstate m) {3491bindex_t i;3492size_t total;3493/* check bins */3494for (i = 0; i < NSMALLBINS; ++i)3495do_check_smallbin(m, i);3496for (i = 0; i < NTREEBINS; ++i)3497do_check_treebin(m, i);34983499if (m->dvsize != 0) { /* check dv chunk */3500do_check_any_chunk(m, m->dv);3501assert(m->dvsize == chunksize(m->dv));3502assert(m->dvsize >= MIN_CHUNK_SIZE);3503assert(bin_find(m, m->dv) == 0);3504}35053506if (m->top != 0) { /* check top chunk */3507do_check_top_chunk(m, m->top);3508/*assert(m->topsize == chunksize(m->top)); redundant */3509assert(m->topsize > 0);3510assert(bin_find(m, m->top) == 0);3511}35123513total = traverse_and_check(m);3514assert(total <= m->footprint);3515assert(m->footprint <= m->max_footprint);3516}3517#endif /* DEBUG */35183519/* ----------------------------- statistics ------------------------------ */35203521#if !NO_MALLINFO3522static struct mallinfo internal_mallinfo(mstate m) {3523struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };3524ensure_initialization();3525if (!PREACTION(m)) {3526check_malloc_state(m);3527if (is_initialized(m)) {3528size_t nfree = SIZE_T_ONE; /* top always free */3529size_t mfree = m->topsize + TOP_FOOT_SIZE;3530size_t sum = mfree;3531msegmentptr s = &m->seg;3532while (s != 0) {3533mchunkptr q = align_as_chunk(s->base);3534while (segment_holds(s, q) &&3535q != m->top && q->head != FENCEPOST_HEAD) {3536size_t sz = chunksize(q);3537sum += sz;3538if (!is_inuse(q)) {3539mfree += sz;3540++nfree;3541}3542q = next_chunk(q);3543}3544s = s->next;3545}35463547nm.arena = sum;3548nm.ordblks = nfree;3549nm.hblkhd = m->footprint - sum;3550nm.usmblks = m->max_footprint;3551nm.uordblks = m->footprint - mfree;3552nm.fordblks = mfree;3553nm.keepcost = m->topsize;3554}35553556POSTACTION(m);3557}3558return nm;3559}3560#endif /* !NO_MALLINFO */35613562#if !NO_MALLOC_STATS3563static void internal_malloc_stats(mstate m) {3564ensure_initialization();3565if (!PREACTION(m)) {3566size_t maxfp = 0;3567size_t fp = 0;3568size_t used = 0;3569check_malloc_state(m);3570if (is_initialized(m)) {3571msegmentptr s = &m->seg;3572maxfp = m->max_footprint;3573fp = m->footprint;3574used = fp - (m->topsize + TOP_FOOT_SIZE);35753576while (s != 0) {3577mchunkptr q = align_as_chunk(s->base);3578while (segment_holds(s, q) &&3579q != m->top && q->head != FENCEPOST_HEAD) {3580if (!is_inuse(q))3581used -= chunksize(q);3582q = next_chunk(q);3583}3584s = s->next;3585}3586}3587POSTACTION(m); /* drop lock */3588fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));3589fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp));3590fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used));3591}3592}3593#endif /* NO_MALLOC_STATS */35943595/* ----------------------- Operations on smallbins ----------------------- */35963597/*3598Various forms of linking and unlinking are defined as macros. Even3599the ones for trees, which are very long but have very short typical3600paths. This is ugly but reduces reliance on inlining support of3601compilers.3602*/36033604/* Link a free chunk into a smallbin */3605#define insert_small_chunk(M, P, S) {\3606bindex_t I = small_index(S);\3607mchunkptr B = smallbin_at(M, I);\3608mchunkptr F = B;\3609assert(S >= MIN_CHUNK_SIZE);\3610if (!smallmap_is_marked(M, I))\3611mark_smallmap(M, I);\3612else if (RTCHECK(ok_address(M, B->fd)))\3613F = B->fd;\3614else {\3615CORRUPTION_ERROR_ACTION(M);\3616}\3617B->fd = P;\3618F->bk = P;\3619P->fd = F;\3620P->bk = B;\3621}36223623/* Unlink a chunk from a smallbin */3624#define unlink_small_chunk(M, P, S) {\3625mchunkptr F = P->fd;\3626mchunkptr B = P->bk;\3627bindex_t I = small_index(S);\3628assert(P != B);\3629assert(P != F);\3630assert(chunksize(P) == small_index2size(I));\3631if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \3632if (B == F) {\3633clear_smallmap(M, I);\3634}\3635else if (RTCHECK(B == smallbin_at(M,I) ||\3636(ok_address(M, B) && B->fd == P))) {\3637F->bk = B;\3638B->fd = F;\3639}\3640else {\3641CORRUPTION_ERROR_ACTION(M);\3642}\3643}\3644else {\3645CORRUPTION_ERROR_ACTION(M);\3646}\3647}36483649/* Unlink the first chunk from a smallbin */3650#define unlink_first_small_chunk(M, B, P, I) {\3651mchunkptr F = P->fd;\3652assert(P != B);\3653assert(P != F);\3654assert(chunksize(P) == small_index2size(I));\3655if (B == F) {\3656clear_smallmap(M, I);\3657}\3658else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\3659F->bk = B;\3660B->fd = F;\3661}\3662else {\3663CORRUPTION_ERROR_ACTION(M);\3664}\3665}36663667/* Replace dv node, binning the old one */3668/* Used only when dvsize known to be small */3669#define replace_dv(M, P, S) {\3670size_t DVS = M->dvsize;\3671assert(is_small(DVS));\3672if (DVS != 0) {\3673mchunkptr DV = M->dv;\3674insert_small_chunk(M, DV, DVS);\3675}\3676M->dvsize = S;\3677M->dv = P;\3678}36793680/* ------------------------- Operations on trees ------------------------- */36813682/* Insert chunk into tree */3683#define insert_large_chunk(M, X, S) {\3684tbinptr* H;\3685bindex_t I;\3686compute_tree_index(S, I);\3687H = treebin_at(M, I);\3688X->index = I;\3689X->child[0] = X->child[1] = 0;\3690if (!treemap_is_marked(M, I)) {\3691mark_treemap(M, I);\3692*H = X;\3693X->parent = (tchunkptr)H;\3694X->fd = X->bk = X;\3695}\3696else {\3697tchunkptr T = *H;\3698size_t K = S << leftshift_for_tree_index(I);\3699for (;;) {\3700if (chunksize(T) != S) {\3701tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\3702K <<= 1;\3703if (*C != 0)\3704T = *C;\3705else if (RTCHECK(ok_address(M, C))) {\3706*C = X;\3707X->parent = T;\3708X->fd = X->bk = X;\3709break;\3710}\3711else {\3712CORRUPTION_ERROR_ACTION(M);\3713break;\3714}\3715}\3716else {\3717tchunkptr F = T->fd;\3718if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\3719T->fd = F->bk = X;\3720X->fd = F;\3721X->bk = T;\3722X->parent = 0;\3723break;\3724}\3725else {\3726CORRUPTION_ERROR_ACTION(M);\3727break;\3728}\3729}\3730}\3731}\3732}37333734/*3735Unlink steps:373637371. If x is a chained node, unlink it from its same-sized fd/bk links3738and choose its bk node as its replacement.37392. If x was the last node of its size, but not a leaf node, it must3740be replaced with a leaf node (not merely one with an open left or3741right), to make sure that lefts and rights of descendents3742correspond properly to bit masks. We use the rightmost descendent3743of x. We could use any other leaf, but this is easy to locate and3744tends to counteract removal of leftmosts elsewhere, and so keeps3745paths shorter than minimally guaranteed. This doesn't loop much3746because on average a node in a tree is near the bottom.37473. If x is the base of a chain (i.e., has parent links) relink3748x's parent and children to x's replacement (or null if none).3749*/37503751#define unlink_large_chunk(M, X) {\3752tchunkptr XP = X->parent;\3753tchunkptr R;\3754if (X->bk != X) {\3755tchunkptr F = X->fd;\3756R = X->bk;\3757if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\3758F->bk = R;\3759R->fd = F;\3760}\3761else {\3762CORRUPTION_ERROR_ACTION(M);\3763}\3764}\3765else {\3766tchunkptr* RP;\3767if (((R = *(RP = &(X->child[1]))) != 0) ||\3768((R = *(RP = &(X->child[0]))) != 0)) {\3769tchunkptr* CP;\3770while ((*(CP = &(R->child[1])) != 0) ||\3771(*(CP = &(R->child[0])) != 0)) {\3772R = *(RP = CP);\3773}\3774if (RTCHECK(ok_address(M, RP)))\3775*RP = 0;\3776else {\3777CORRUPTION_ERROR_ACTION(M);\3778}\3779}\3780}\3781if (XP != 0) {\3782tbinptr* H = treebin_at(M, X->index);\3783if (X == *H) {\3784if ((*H = R) == 0) \3785clear_treemap(M, X->index);\3786}\3787else if (RTCHECK(ok_address(M, XP))) {\3788if (XP->child[0] == X) \3789XP->child[0] = R;\3790else \3791XP->child[1] = R;\3792}\3793else\3794CORRUPTION_ERROR_ACTION(M);\3795if (R != 0) {\3796if (RTCHECK(ok_address(M, R))) {\3797tchunkptr C0, C1;\3798R->parent = XP;\3799if ((C0 = X->child[0]) != 0) {\3800if (RTCHECK(ok_address(M, C0))) {\3801R->child[0] = C0;\3802C0->parent = R;\3803}\3804else\3805CORRUPTION_ERROR_ACTION(M);\3806}\3807if ((C1 = X->child[1]) != 0) {\3808if (RTCHECK(ok_address(M, C1))) {\3809R->child[1] = C1;\3810C1->parent = R;\3811}\3812else\3813CORRUPTION_ERROR_ACTION(M);\3814}\3815}\3816else\3817CORRUPTION_ERROR_ACTION(M);\3818}\3819}\3820}38213822/* Relays to large vs small bin operations */38233824#define insert_chunk(M, P, S)\3825if (is_small(S)) insert_small_chunk(M, P, S)\3826else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }38273828#define unlink_chunk(M, P, S)\3829if (is_small(S)) unlink_small_chunk(M, P, S)\3830else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }383138323833/* Relays to internal calls to malloc/free from realloc, memalign etc */38343835#if ONLY_MSPACES3836#define internal_malloc(m, b) mspace_malloc(m, b)3837#define internal_free(m, mem) mspace_free(m,mem);3838#else /* ONLY_MSPACES */3839#if MSPACES3840#define internal_malloc(m, b)\3841((m == gm)? dlmalloc(b) : mspace_malloc(m, b))3842#define internal_free(m, mem)\3843if (m == gm) dlfree(mem); else mspace_free(m,mem);3844#else /* MSPACES */3845#define internal_malloc(m, b) dlmalloc(b)3846#define internal_free(m, mem) dlfree(mem)3847#endif /* MSPACES */3848#endif /* ONLY_MSPACES */38493850/* ----------------------- Direct-mmapping chunks ----------------------- */38513852/*3853Directly mmapped chunks are set up with an offset to the start of3854the mmapped region stored in the prev_foot field of the chunk. This3855allows reconstruction of the required argument to MUNMAP when freed,3856and also allows adjustment of the returned chunk to meet alignment3857requirements (especially in memalign).3858*/38593860/* Malloc using mmap */3861static void* mmap_alloc(mstate m, size_t nb) {3862size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);3863if (m->footprint_limit != 0) {3864size_t fp = m->footprint + mmsize;3865if (fp <= m->footprint || fp > m->footprint_limit)3866return 0;3867}3868if (mmsize > nb) { /* Check for wrap around 0 */3869char* mm = (char*)(CALL_DIRECT_MMAP(mmsize));3870if (mm != CMFAIL) {3871size_t offset = align_offset(chunk2mem(mm));3872size_t psize = mmsize - offset - MMAP_FOOT_PAD;3873mchunkptr p = (mchunkptr)(mm + offset);3874p->prev_foot = offset;3875p->head = psize;3876mark_inuse_foot(m, p, psize);3877chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;3878chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;38793880if (m->least_addr == 0 || mm < m->least_addr)3881m->least_addr = mm;3882if ((m->footprint += mmsize) > m->max_footprint)3883m->max_footprint = m->footprint;3884assert(is_aligned(chunk2mem(p)));3885check_mmapped_chunk(m, p);3886return chunk2mem(p);3887}3888}3889return 0;3890}38913892/* Realloc using mmap */3893static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) {3894size_t oldsize = chunksize(oldp);3895(void)flags; /* placate people compiling -Wunused */3896if (is_small(nb)) /* Can't shrink mmap regions below small size */3897return 0;3898/* Keep old chunk if big enough but not too big */3899if (oldsize >= nb + SIZE_T_SIZE &&3900(oldsize - nb) <= (mparams.granularity << 1))3901return oldp;3902else {3903size_t offset = oldp->prev_foot;3904size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;3905size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);3906char* cp = (char*)CALL_MREMAP((char*)oldp - offset,3907oldmmsize, newmmsize, flags);3908if (cp != CMFAIL) {3909mchunkptr newp = (mchunkptr)(cp + offset);3910size_t psize = newmmsize - offset - MMAP_FOOT_PAD;3911newp->head = psize;3912mark_inuse_foot(m, newp, psize);3913chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;3914chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;39153916if (cp < m->least_addr)3917m->least_addr = cp;3918if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)3919m->max_footprint = m->footprint;3920check_mmapped_chunk(m, newp);3921return newp;3922}3923}3924return 0;3925}392639273928/* -------------------------- mspace management -------------------------- */39293930/* Initialize top chunk and its size */3931static void init_top(mstate m, mchunkptr p, size_t psize) {3932/* Ensure alignment */3933size_t offset = align_offset(chunk2mem(p));3934p = (mchunkptr)((char*)p + offset);3935psize -= offset;39363937m->top = p;3938m->topsize = psize;3939p->head = psize | PINUSE_BIT;3940/* set size of fake trailing chunk holding overhead space only once */3941chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;3942m->trim_check = mparams.trim_threshold; /* reset on each update */3943}39443945/* Initialize bins for a new mstate that is otherwise zeroed out */3946static void init_bins(mstate m) {3947/* Establish circular links for smallbins */3948bindex_t i;3949for (i = 0; i < NSMALLBINS; ++i) {3950sbinptr bin = smallbin_at(m,i);3951bin->fd = bin->bk = bin;3952}3953}39543955#if PROCEED_ON_ERROR39563957/* default corruption action */3958static void reset_on_error(mstate m) {3959int i;3960++malloc_corruption_error_count;3961/* Reinitialize fields to forget about all memory */3962m->smallmap = m->treemap = 0;3963m->dvsize = m->topsize = 0;3964m->seg.base = 0;3965m->seg.size = 0;3966m->seg.next = 0;3967m->top = m->dv = 0;3968for (i = 0; i < NTREEBINS; ++i)3969*treebin_at(m, i) = 0;3970init_bins(m);3971}3972#endif /* PROCEED_ON_ERROR */39733974/* Allocate chunk and prepend remainder with chunk in successor base. */3975static void* prepend_alloc(mstate m, char* newbase, char* oldbase,3976size_t nb) {3977mchunkptr p = align_as_chunk(newbase);3978mchunkptr oldfirst = align_as_chunk(oldbase);3979size_t psize = (char*)oldfirst - (char*)p;3980mchunkptr q = chunk_plus_offset(p, nb);3981size_t qsize = psize - nb;3982set_size_and_pinuse_of_inuse_chunk(m, p, nb);39833984assert((char*)oldfirst > (char*)q);3985assert(pinuse(oldfirst));3986assert(qsize >= MIN_CHUNK_SIZE);39873988/* consolidate remainder with first chunk of old base */3989if (oldfirst == m->top) {3990size_t tsize = m->topsize += qsize;3991m->top = q;3992q->head = tsize | PINUSE_BIT;3993check_top_chunk(m, q);3994}3995else if (oldfirst == m->dv) {3996size_t dsize = m->dvsize += qsize;3997m->dv = q;3998set_size_and_pinuse_of_free_chunk(q, dsize);3999}4000else {4001if (!is_inuse(oldfirst)) {4002size_t nsize = chunksize(oldfirst);4003unlink_chunk(m, oldfirst, nsize);4004oldfirst = chunk_plus_offset(oldfirst, nsize);4005qsize += nsize;4006}4007set_free_with_pinuse(q, qsize, oldfirst);4008insert_chunk(m, q, qsize);4009check_free_chunk(m, q);4010}40114012check_malloced_chunk(m, chunk2mem(p), nb);4013return chunk2mem(p);4014}40154016/* Add a segment to hold a new noncontiguous region */4017static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {4018/* Determine locations and sizes of segment, fenceposts, old top */4019char* old_top = (char*)m->top;4020msegmentptr oldsp = segment_holding(m, old_top);4021char* old_end = oldsp->base + oldsp->size;4022size_t ssize = pad_request(sizeof(struct malloc_segment));4023char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);4024size_t offset = align_offset(chunk2mem(rawsp));4025char* asp = rawsp + offset;4026char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;4027mchunkptr sp = (mchunkptr)csp;4028msegmentptr ss = (msegmentptr)(chunk2mem(sp));4029mchunkptr tnext = chunk_plus_offset(sp, ssize);4030mchunkptr p = tnext;4031int nfences = 0;40324033/* reset top to new space */4034init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);40354036/* Set up segment record */4037assert(is_aligned(ss));4038set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);4039*ss = m->seg; /* Push current record */4040m->seg.base = tbase;4041m->seg.size = tsize;4042m->seg.sflags = mmapped;4043m->seg.next = ss;40444045/* Insert trailing fenceposts */4046for (;;) {4047mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);4048p->head = FENCEPOST_HEAD;4049++nfences;4050if ((char*)(&(nextp->head)) < old_end)4051p = nextp;4052else4053break;4054}4055(void)nfences;4056assert(nfences >= 2);40574058/* Insert the rest of old top into a bin as an ordinary free chunk */4059if (csp != old_top) {4060mchunkptr q = (mchunkptr)old_top;4061size_t psize = csp - old_top;4062mchunkptr tn = chunk_plus_offset(q, psize);4063set_free_with_pinuse(q, psize, tn);4064insert_chunk(m, q, psize);4065}40664067check_top_chunk(m, m->top);4068}40694070/* -------------------------- System allocation -------------------------- */40714072/* Get memory from system using MORECORE or MMAP */4073static void* sys_alloc(mstate m, size_t nb) {4074char* tbase = CMFAIL;4075size_t tsize = 0;4076flag_t mmap_flag = 0;4077size_t asize; /* allocation size */40784079ensure_initialization();40804081/* Directly map large chunks, but only if already initialized */4082if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) {4083void* mem = mmap_alloc(m, nb);4084if (mem != 0)4085return mem;4086}40874088asize = granularity_align(nb + SYS_ALLOC_PADDING);4089if (asize <= nb)4090return 0; /* wraparound */4091if (m->footprint_limit != 0) {4092size_t fp = m->footprint + asize;4093if (fp <= m->footprint || fp > m->footprint_limit)4094return 0;4095}40964097/*4098Try getting memory in any of three ways (in most-preferred to4099least-preferred order):41001. A call to MORECORE that can normally contiguously extend memory.4101(disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or4102or main space is mmapped or a previous contiguous call failed)41032. A call to MMAP new space (disabled if not HAVE_MMAP).4104Note that under the default settings, if MORECORE is unable to4105fulfill a request, and HAVE_MMAP is true, then mmap is4106used as a noncontiguous system allocator. This is a useful backup4107strategy for systems with holes in address spaces -- in this case4108sbrk cannot contiguously expand the heap, but mmap may be able to4109find space.41103. A call to MORECORE that cannot usually contiguously extend memory.4111(disabled if not HAVE_MORECORE)41124113In all cases, we need to request enough bytes from system to ensure4114we can malloc nb bytes upon success, so pad with enough space for4115top_foot, plus alignment-pad to make sure we don't lose bytes if4116not on boundary, and round this up to a granularity unit.4117*/41184119if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {4120char* br = CMFAIL;4121size_t ssize = asize; /* sbrk call size */4122msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);4123ACQUIRE_MALLOC_GLOBAL_LOCK();41244125if (ss == 0) { /* First time through or recovery */4126char* base = (char*)CALL_MORECORE(0);4127if (base != CMFAIL) {4128size_t fp;4129/* Adjust to end on a page boundary */4130if (!is_page_aligned(base))4131ssize += (page_align((size_t)base) - (size_t)base);4132fp = m->footprint + ssize; /* recheck limits */4133if (ssize > nb && ssize < HALF_MAX_SIZE_T &&4134(m->footprint_limit == 0 ||4135(fp > m->footprint && fp <= m->footprint_limit)) &&4136(br = (char*)(CALL_MORECORE(ssize))) == base) {4137tbase = base;4138tsize = ssize;4139}4140}4141}4142else {4143/* Subtract out existing available top space from MORECORE request. */4144ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING);4145/* Use mem here only if it did continuously extend old space */4146if (ssize < HALF_MAX_SIZE_T &&4147(br = (char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) {4148tbase = br;4149tsize = ssize;4150}4151}41524153if (tbase == CMFAIL) { /* Cope with partial failure */4154if (br != CMFAIL) { /* Try to use/extend the space we did get */4155if (ssize < HALF_MAX_SIZE_T &&4156ssize < nb + SYS_ALLOC_PADDING) {4157size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize);4158if (esize < HALF_MAX_SIZE_T) {4159char* end = (char*)CALL_MORECORE(esize);4160if (end != CMFAIL)4161ssize += esize;4162else { /* Can't use; try to release */4163(void) CALL_MORECORE(-ssize);4164br = CMFAIL;4165}4166}4167}4168}4169if (br != CMFAIL) { /* Use the space we did get */4170tbase = br;4171tsize = ssize;4172}4173else4174disable_contiguous(m); /* Don't try contiguous path in the future */4175}41764177RELEASE_MALLOC_GLOBAL_LOCK();4178}41794180if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */4181char* mp = (char*)(CALL_MMAP(asize));4182if (mp != CMFAIL) {4183tbase = mp;4184tsize = asize;4185mmap_flag = USE_MMAP_BIT;4186}4187}41884189if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */4190if (asize < HALF_MAX_SIZE_T) {4191char* br = CMFAIL;4192char* end = CMFAIL;4193ACQUIRE_MALLOC_GLOBAL_LOCK();4194br = (char*)(CALL_MORECORE(asize));4195end = (char*)(CALL_MORECORE(0));4196RELEASE_MALLOC_GLOBAL_LOCK();4197if (br != CMFAIL && end != CMFAIL && br < end) {4198size_t ssize = end - br;4199if (ssize > nb + TOP_FOOT_SIZE) {4200tbase = br;4201tsize = ssize;4202}4203}4204}4205}42064207if (tbase != CMFAIL) {42084209if ((m->footprint += tsize) > m->max_footprint)4210m->max_footprint = m->footprint;42114212if (!is_initialized(m)) { /* first-time initialization */4213if (m->least_addr == 0 || tbase < m->least_addr)4214m->least_addr = tbase;4215m->seg.base = tbase;4216m->seg.size = tsize;4217m->seg.sflags = mmap_flag;4218m->magic = mparams.magic;4219m->release_checks = MAX_RELEASE_CHECK_RATE;4220init_bins(m);4221#if !ONLY_MSPACES4222if (is_global(m))4223init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);4224else4225#endif4226{4227/* Offset top by embedded malloc_state */4228mchunkptr mn = next_chunk(mem2chunk(m));4229init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);4230}4231}42324233else {4234/* Try to merge with an existing segment */4235msegmentptr sp = &m->seg;4236/* Only consider most recent segment if traversal suppressed */4237while (sp != 0 && tbase != sp->base + sp->size)4238sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;4239if (sp != 0 &&4240!is_extern_segment(sp) &&4241(sp->sflags & USE_MMAP_BIT) == mmap_flag &&4242segment_holds(sp, m->top)) { /* append */4243sp->size += tsize;4244init_top(m, m->top, m->topsize + tsize);4245}4246else {4247if (tbase < m->least_addr)4248m->least_addr = tbase;4249sp = &m->seg;4250while (sp != 0 && sp->base != tbase + tsize)4251sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;4252if (sp != 0 &&4253!is_extern_segment(sp) &&4254(sp->sflags & USE_MMAP_BIT) == mmap_flag) {4255char* oldbase = sp->base;4256sp->base = tbase;4257sp->size += tsize;4258return prepend_alloc(m, tbase, oldbase, nb);4259}4260else4261add_segment(m, tbase, tsize, mmap_flag);4262}4263}42644265if (nb < m->topsize) { /* Allocate from new or extended top space */4266size_t rsize = m->topsize -= nb;4267mchunkptr p = m->top;4268mchunkptr r = m->top = chunk_plus_offset(p, nb);4269r->head = rsize | PINUSE_BIT;4270set_size_and_pinuse_of_inuse_chunk(m, p, nb);4271check_top_chunk(m, m->top);4272check_malloced_chunk(m, chunk2mem(p), nb);4273return chunk2mem(p);4274}4275}42764277MALLOC_FAILURE_ACTION;4278return 0;4279}42804281/* ----------------------- system deallocation -------------------------- */42824283/* Unmap and unlink any mmapped segments that don't contain used chunks */4284static size_t release_unused_segments(mstate m) {4285size_t released = 0;4286int nsegs = 0;4287msegmentptr pred = &m->seg;4288msegmentptr sp = pred->next;4289while (sp != 0) {4290char* base = sp->base;4291size_t size = sp->size;4292msegmentptr next = sp->next;4293++nsegs;4294if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {4295mchunkptr p = align_as_chunk(base);4296size_t psize = chunksize(p);4297/* Can unmap if first chunk holds entire segment and not pinned */4298if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {4299tchunkptr tp = (tchunkptr)p;4300assert(segment_holds(sp, (char*)sp));4301if (p == m->dv) {4302m->dv = 0;4303m->dvsize = 0;4304}4305else {4306unlink_large_chunk(m, tp);4307}4308if (CALL_MUNMAP(base, size) == 0) {4309released += size;4310m->footprint -= size;4311/* unlink obsoleted record */4312sp = pred;4313sp->next = next;4314}4315else { /* back out if cannot unmap */4316insert_large_chunk(m, tp, psize);4317}4318}4319}4320if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */4321break;4322pred = sp;4323sp = next;4324}4325/* Reset check counter */4326m->release_checks = (((size_t) nsegs > (size_t) MAX_RELEASE_CHECK_RATE)?4327(size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE);4328return released;4329}43304331static int sys_trim(mstate m, size_t pad) {4332size_t released = 0;4333ensure_initialization();4334if (pad < MAX_REQUEST && is_initialized(m)) {4335pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */43364337if (m->topsize > pad) {4338/* Shrink top space in granularity-size units, keeping at least one */4339size_t unit = mparams.granularity;4340size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -4341SIZE_T_ONE) * unit;4342msegmentptr sp = segment_holding(m, (char*)m->top);43434344if (!is_extern_segment(sp)) {4345if (is_mmapped_segment(sp)) {4346if (HAVE_MMAP &&4347sp->size >= extra &&4348!has_segment_link(m, sp)) { /* can't shrink if pinned */4349size_t newsize = sp->size - extra;4350(void)newsize; /* placate people compiling -Wunused-variable */4351/* Prefer mremap, fall back to munmap */4352if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||4353(CALL_MUNMAP(sp->base + newsize, extra) == 0)) {4354released = extra;4355}4356}4357}4358else if (HAVE_MORECORE) {4359if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */4360extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;4361ACQUIRE_MALLOC_GLOBAL_LOCK();4362{4363/* Make sure end of memory is where we last set it. */4364char* old_br = (char*)(CALL_MORECORE(0));4365if (old_br == sp->base + sp->size) {4366char* rel_br = (char*)(CALL_MORECORE(-extra));4367char* new_br = (char*)(CALL_MORECORE(0));4368if (rel_br != CMFAIL && new_br < old_br)4369released = old_br - new_br;4370}4371}4372RELEASE_MALLOC_GLOBAL_LOCK();4373}4374}43754376if (released != 0) {4377sp->size -= released;4378m->footprint -= released;4379init_top(m, m->top, m->topsize - released);4380check_top_chunk(m, m->top);4381}4382}43834384/* Unmap any unused mmapped segments */4385if (HAVE_MMAP)4386released += release_unused_segments(m);43874388/* On failure, disable autotrim to avoid repeated failed future calls */4389if (released == 0 && m->topsize > m->trim_check)4390m->trim_check = MAX_SIZE_T;4391}43924393return (released != 0)? 1 : 0;4394}43954396/* Consolidate and bin a chunk. Differs from exported versions4397of free mainly in that the chunk need not be marked as inuse.4398*/4399static void dispose_chunk(mstate m, mchunkptr p, size_t psize) {4400mchunkptr next = chunk_plus_offset(p, psize);4401if (!pinuse(p)) {4402mchunkptr prev;4403size_t prevsize = p->prev_foot;4404if (is_mmapped(p)) {4405psize += prevsize + MMAP_FOOT_PAD;4406if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)4407m->footprint -= psize;4408return;4409}4410prev = chunk_minus_offset(p, prevsize);4411psize += prevsize;4412p = prev;4413if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */4414if (p != m->dv) {4415unlink_chunk(m, p, prevsize);4416}4417else if ((next->head & INUSE_BITS) == INUSE_BITS) {4418m->dvsize = psize;4419set_free_with_pinuse(p, psize, next);4420return;4421}4422}4423else {4424CORRUPTION_ERROR_ACTION(m);4425return;4426}4427}4428if (RTCHECK(ok_address(m, next))) {4429if (!cinuse(next)) { /* consolidate forward */4430if (next == m->top) {4431size_t tsize = m->topsize += psize;4432m->top = p;4433p->head = tsize | PINUSE_BIT;4434if (p == m->dv) {4435m->dv = 0;4436m->dvsize = 0;4437}4438return;4439}4440else if (next == m->dv) {4441size_t dsize = m->dvsize += psize;4442m->dv = p;4443set_size_and_pinuse_of_free_chunk(p, dsize);4444return;4445}4446else {4447size_t nsize = chunksize(next);4448psize += nsize;4449unlink_chunk(m, next, nsize);4450set_size_and_pinuse_of_free_chunk(p, psize);4451if (p == m->dv) {4452m->dvsize = psize;4453return;4454}4455}4456}4457else {4458set_free_with_pinuse(p, psize, next);4459}4460insert_chunk(m, p, psize);4461}4462else {4463CORRUPTION_ERROR_ACTION(m);4464}4465}44664467/* ---------------------------- malloc --------------------------- */44684469/* allocate a large request from the best fitting chunk in a treebin */4470static void* tmalloc_large(mstate m, size_t nb) {4471tchunkptr v = 0;4472size_t rsize = -nb; /* Unsigned negation */4473tchunkptr t;4474bindex_t idx;4475compute_tree_index(nb, idx);4476if ((t = *treebin_at(m, idx)) != 0) {4477/* Traverse tree for this bin looking for node with size == nb */4478size_t sizebits = nb << leftshift_for_tree_index(idx);4479tchunkptr rst = 0; /* The deepest untaken right subtree */4480for (;;) {4481tchunkptr rt;4482size_t trem = chunksize(t) - nb;4483if (trem < rsize) {4484v = t;4485if ((rsize = trem) == 0)4486break;4487}4488rt = t->child[1];4489t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];4490if (rt != 0 && rt != t)4491rst = rt;4492if (t == 0) {4493t = rst; /* set t to least subtree holding sizes > nb */4494break;4495}4496sizebits <<= 1;4497}4498}4499if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */4500binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;4501if (leftbits != 0) {4502bindex_t i;4503binmap_t leastbit = least_bit(leftbits);4504compute_bit2idx(leastbit, i);4505t = *treebin_at(m, i);4506}4507}45084509while (t != 0) { /* find smallest of tree or subtree */4510size_t trem = chunksize(t) - nb;4511if (trem < rsize) {4512rsize = trem;4513v = t;4514}4515t = leftmost_child(t);4516}45174518/* If dv is a better fit, return 0 so malloc will use it */4519if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {4520if (RTCHECK(ok_address(m, v))) { /* split */4521mchunkptr r = chunk_plus_offset(v, nb);4522assert(chunksize(v) == rsize + nb);4523if (RTCHECK(ok_next(v, r))) {4524unlink_large_chunk(m, v);4525if (rsize < MIN_CHUNK_SIZE)4526set_inuse_and_pinuse(m, v, (rsize + nb));4527else {4528set_size_and_pinuse_of_inuse_chunk(m, v, nb);4529set_size_and_pinuse_of_free_chunk(r, rsize);4530insert_chunk(m, r, rsize);4531}4532return chunk2mem(v);4533}4534}4535CORRUPTION_ERROR_ACTION(m);4536}4537return 0;4538}45394540/* allocate a small request from the best fitting chunk in a treebin */4541static void* tmalloc_small(mstate m, size_t nb) {4542tchunkptr t, v;4543size_t rsize;4544bindex_t i;4545binmap_t leastbit = least_bit(m->treemap);4546compute_bit2idx(leastbit, i);4547v = t = *treebin_at(m, i);4548rsize = chunksize(t) - nb;45494550while ((t = leftmost_child(t)) != 0) {4551size_t trem = chunksize(t) - nb;4552if (trem < rsize) {4553rsize = trem;4554v = t;4555}4556}45574558if (RTCHECK(ok_address(m, v))) {4559mchunkptr r = chunk_plus_offset(v, nb);4560assert(chunksize(v) == rsize + nb);4561if (RTCHECK(ok_next(v, r))) {4562unlink_large_chunk(m, v);4563if (rsize < MIN_CHUNK_SIZE)4564set_inuse_and_pinuse(m, v, (rsize + nb));4565else {4566set_size_and_pinuse_of_inuse_chunk(m, v, nb);4567set_size_and_pinuse_of_free_chunk(r, rsize);4568replace_dv(m, r, rsize);4569}4570return chunk2mem(v);4571}4572}45734574CORRUPTION_ERROR_ACTION(m);4575return 0;4576}45774578#if !ONLY_MSPACES45794580void* dlmalloc(size_t bytes) {4581/*4582Basic algorithm:4583If a small request (< 256 bytes minus per-chunk overhead):45841. If one exists, use a remainderless chunk in associated smallbin.4585(Remainderless means that there are too few excess bytes to4586represent as a chunk.)45872. If it is big enough, use the dv chunk, which is normally the4588chunk adjacent to the one used for the most recent small request.45893. If one exists, split the smallest available chunk in a bin,4590saving remainder in dv.45914. If it is big enough, use the top chunk.45925. If available, get memory from system and use it4593Otherwise, for a large request:45941. Find the smallest available binned chunk that fits, and use it4595if it is better fitting than dv chunk, splitting if necessary.45962. If better fitting than any binned chunk, use the dv chunk.45973. If it is big enough, use the top chunk.45984. If request size >= mmap threshold, try to directly mmap this chunk.45995. If available, get memory from system and use it46004601The ugly goto's here ensure that postaction occurs along all paths.4602*/46034604#if USE_LOCKS4605ensure_initialization(); /* initialize in sys_alloc if not using locks */4606#endif46074608if (!PREACTION(gm)) {4609void* mem;4610size_t nb;4611if (bytes <= MAX_SMALL_REQUEST) {4612bindex_t idx;4613binmap_t smallbits;4614nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);4615idx = small_index(nb);4616smallbits = gm->smallmap >> idx;46174618if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */4619mchunkptr b, p;4620idx += ~smallbits & 1; /* Uses next bin if idx empty */4621b = smallbin_at(gm, idx);4622p = b->fd;4623assert(chunksize(p) == small_index2size(idx));4624unlink_first_small_chunk(gm, b, p, idx);4625set_inuse_and_pinuse(gm, p, small_index2size(idx));4626mem = chunk2mem(p);4627check_malloced_chunk(gm, mem, nb);4628goto postaction;4629}46304631else if (nb > gm->dvsize) {4632if (smallbits != 0) { /* Use chunk in next nonempty smallbin */4633mchunkptr b, p, r;4634size_t rsize;4635bindex_t i;4636binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));4637binmap_t leastbit = least_bit(leftbits);4638compute_bit2idx(leastbit, i);4639b = smallbin_at(gm, i);4640p = b->fd;4641assert(chunksize(p) == small_index2size(i));4642unlink_first_small_chunk(gm, b, p, i);4643rsize = small_index2size(i) - nb;4644/* Fit here cannot be remainderless if 4byte sizes */4645if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)4646set_inuse_and_pinuse(gm, p, small_index2size(i));4647else {4648set_size_and_pinuse_of_inuse_chunk(gm, p, nb);4649r = chunk_plus_offset(p, nb);4650set_size_and_pinuse_of_free_chunk(r, rsize);4651replace_dv(gm, r, rsize);4652}4653mem = chunk2mem(p);4654check_malloced_chunk(gm, mem, nb);4655goto postaction;4656}46574658else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {4659check_malloced_chunk(gm, mem, nb);4660goto postaction;4661}4662}4663}4664else if (bytes >= MAX_REQUEST)4665nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */4666else {4667nb = pad_request(bytes);4668if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {4669check_malloced_chunk(gm, mem, nb);4670goto postaction;4671}4672}46734674if (nb <= gm->dvsize) {4675size_t rsize = gm->dvsize - nb;4676mchunkptr p = gm->dv;4677if (rsize >= MIN_CHUNK_SIZE) { /* split dv */4678mchunkptr r = gm->dv = chunk_plus_offset(p, nb);4679gm->dvsize = rsize;4680set_size_and_pinuse_of_free_chunk(r, rsize);4681set_size_and_pinuse_of_inuse_chunk(gm, p, nb);4682}4683else { /* exhaust dv */4684size_t dvs = gm->dvsize;4685gm->dvsize = 0;4686gm->dv = 0;4687set_inuse_and_pinuse(gm, p, dvs);4688}4689mem = chunk2mem(p);4690check_malloced_chunk(gm, mem, nb);4691goto postaction;4692}46934694else if (nb < gm->topsize) { /* Split top */4695size_t rsize = gm->topsize -= nb;4696mchunkptr p = gm->top;4697mchunkptr r = gm->top = chunk_plus_offset(p, nb);4698r->head = rsize | PINUSE_BIT;4699set_size_and_pinuse_of_inuse_chunk(gm, p, nb);4700mem = chunk2mem(p);4701check_top_chunk(gm, gm->top);4702check_malloced_chunk(gm, mem, nb);4703goto postaction;4704}47054706mem = sys_alloc(gm, nb);47074708postaction:4709POSTACTION(gm);4710return mem;4711}47124713return 0;4714}47154716/* ---------------------------- free --------------------------- */47174718void dlfree(void* mem) {4719/*4720Consolidate freed chunks with preceeding or succeeding bordering4721free chunks, if they exist, and then place in a bin. Intermixed4722with special cases for top, dv, mmapped chunks, and usage errors.4723*/47244725if (mem != 0) {4726mchunkptr p = mem2chunk(mem);4727#if FOOTERS4728mstate fm = get_mstate_for(p);4729if (!ok_magic(fm)) {4730USAGE_ERROR_ACTION(fm, p);4731return;4732}4733#else /* FOOTERS */4734#define fm gm4735#endif /* FOOTERS */4736if (!PREACTION(fm)) {4737check_inuse_chunk(fm, p);4738if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {4739size_t psize = chunksize(p);4740mchunkptr next = chunk_plus_offset(p, psize);4741if (!pinuse(p)) {4742size_t prevsize = p->prev_foot;4743if (is_mmapped(p)) {4744psize += prevsize + MMAP_FOOT_PAD;4745if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)4746fm->footprint -= psize;4747goto postaction;4748}4749else {4750mchunkptr prev = chunk_minus_offset(p, prevsize);4751psize += prevsize;4752p = prev;4753if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */4754if (p != fm->dv) {4755unlink_chunk(fm, p, prevsize);4756}4757else if ((next->head & INUSE_BITS) == INUSE_BITS) {4758fm->dvsize = psize;4759set_free_with_pinuse(p, psize, next);4760goto postaction;4761}4762}4763else4764goto erroraction;4765}4766}47674768if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {4769if (!cinuse(next)) { /* consolidate forward */4770if (next == fm->top) {4771size_t tsize = fm->topsize += psize;4772fm->top = p;4773p->head = tsize | PINUSE_BIT;4774if (p == fm->dv) {4775fm->dv = 0;4776fm->dvsize = 0;4777}4778if (should_trim(fm, tsize))4779sys_trim(fm, 0);4780goto postaction;4781}4782else if (next == fm->dv) {4783size_t dsize = fm->dvsize += psize;4784fm->dv = p;4785set_size_and_pinuse_of_free_chunk(p, dsize);4786goto postaction;4787}4788else {4789size_t nsize = chunksize(next);4790psize += nsize;4791unlink_chunk(fm, next, nsize);4792set_size_and_pinuse_of_free_chunk(p, psize);4793if (p == fm->dv) {4794fm->dvsize = psize;4795goto postaction;4796}4797}4798}4799else4800set_free_with_pinuse(p, psize, next);48014802if (is_small(psize)) {4803insert_small_chunk(fm, p, psize);4804check_free_chunk(fm, p);4805}4806else {4807tchunkptr tp = (tchunkptr)p;4808insert_large_chunk(fm, tp, psize);4809check_free_chunk(fm, p);4810if (--fm->release_checks == 0)4811release_unused_segments(fm);4812}4813goto postaction;4814}4815}4816erroraction:4817USAGE_ERROR_ACTION(fm, p);4818postaction:4819POSTACTION(fm);4820}4821}4822#if !FOOTERS4823#undef fm4824#endif /* FOOTERS */4825}48264827void* dlcalloc(size_t n_elements, size_t elem_size) {4828void* mem;4829size_t req = 0;4830if (n_elements != 0) {4831req = n_elements * elem_size;4832if (((n_elements | elem_size) & ~(size_t)0xffff) &&4833(req / n_elements != elem_size))4834req = MAX_SIZE_T; /* force downstream failure on overflow */4835}4836mem = dlmalloc(req);4837if (mem != 0 && calloc_must_clear(mem2chunk(mem)))4838memset(mem, 0, req);4839return mem;4840}48414842#endif /* !ONLY_MSPACES */48434844/* ------------ Internal support for realloc, memalign, etc -------------- */48454846/* Try to realloc; only in-place unless can_move true */4847static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb,4848int can_move) {4849mchunkptr newp = 0;4850size_t oldsize = chunksize(p);4851mchunkptr next = chunk_plus_offset(p, oldsize);4852if (RTCHECK(ok_address(m, p) && ok_inuse(p) &&4853ok_next(p, next) && ok_pinuse(next))) {4854if (is_mmapped(p)) {4855newp = mmap_resize(m, p, nb, can_move);4856}4857else if (oldsize >= nb) { /* already big enough */4858size_t rsize = oldsize - nb;4859if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */4860mchunkptr r = chunk_plus_offset(p, nb);4861set_inuse(m, p, nb);4862set_inuse(m, r, rsize);4863dispose_chunk(m, r, rsize);4864}4865newp = p;4866}4867else if (next == m->top) { /* extend into top */4868if (oldsize + m->topsize > nb) {4869size_t newsize = oldsize + m->topsize;4870size_t newtopsize = newsize - nb;4871mchunkptr newtop = chunk_plus_offset(p, nb);4872set_inuse(m, p, nb);4873newtop->head = newtopsize |PINUSE_BIT;4874m->top = newtop;4875m->topsize = newtopsize;4876newp = p;4877}4878}4879else if (next == m->dv) { /* extend into dv */4880size_t dvs = m->dvsize;4881if (oldsize + dvs >= nb) {4882size_t dsize = oldsize + dvs - nb;4883if (dsize >= MIN_CHUNK_SIZE) {4884mchunkptr r = chunk_plus_offset(p, nb);4885mchunkptr n = chunk_plus_offset(r, dsize);4886set_inuse(m, p, nb);4887set_size_and_pinuse_of_free_chunk(r, dsize);4888clear_pinuse(n);4889m->dvsize = dsize;4890m->dv = r;4891}4892else { /* exhaust dv */4893size_t newsize = oldsize + dvs;4894set_inuse(m, p, newsize);4895m->dvsize = 0;4896m->dv = 0;4897}4898newp = p;4899}4900}4901else if (!cinuse(next)) { /* extend into next free chunk */4902size_t nextsize = chunksize(next);4903if (oldsize + nextsize >= nb) {4904size_t rsize = oldsize + nextsize - nb;4905unlink_chunk(m, next, nextsize);4906if (rsize < MIN_CHUNK_SIZE) {4907size_t newsize = oldsize + nextsize;4908set_inuse(m, p, newsize);4909}4910else {4911mchunkptr r = chunk_plus_offset(p, nb);4912set_inuse(m, p, nb);4913set_inuse(m, r, rsize);4914dispose_chunk(m, r, rsize);4915}4916newp = p;4917}4918}4919}4920else {4921USAGE_ERROR_ACTION(m, chunk2mem(p));4922}4923return newp;4924}49254926static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {4927void* mem = 0;4928if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */4929alignment = MIN_CHUNK_SIZE;4930if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */4931size_t a = MALLOC_ALIGNMENT << 1;4932while (a < alignment) a <<= 1;4933alignment = a;4934}4935if (bytes >= MAX_REQUEST - alignment) {4936if (m != 0) { /* Test isn't needed but avoids compiler warning */4937MALLOC_FAILURE_ACTION;4938}4939}4940else {4941size_t nb = request2size(bytes);4942size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;4943mem = internal_malloc(m, req);4944if (mem != 0) {4945mchunkptr p = mem2chunk(mem);4946if (PREACTION(m))4947return 0;4948if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */4949/*4950Find an aligned spot inside chunk. Since we need to give4951back leading space in a chunk of at least MIN_CHUNK_SIZE, if4952the first calculation places us at a spot with less than4953MIN_CHUNK_SIZE leader, we can move to the next aligned spot.4954We've allocated enough total room so that this is always4955possible.4956*/4957char* br = (char*)mem2chunk((size_t)(((size_t)((char*)mem + alignment -4958SIZE_T_ONE)) &4959-alignment));4960char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?4961br : br+alignment;4962mchunkptr newp = (mchunkptr)pos;4963size_t leadsize = pos - (char*)(p);4964size_t newsize = chunksize(p) - leadsize;49654966if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */4967newp->prev_foot = p->prev_foot + leadsize;4968newp->head = newsize;4969}4970else { /* Otherwise, give back leader, use the rest */4971set_inuse(m, newp, newsize);4972set_inuse(m, p, leadsize);4973dispose_chunk(m, p, leadsize);4974}4975p = newp;4976}49774978/* Give back spare room at the end */4979if (!is_mmapped(p)) {4980size_t size = chunksize(p);4981if (size > nb + MIN_CHUNK_SIZE) {4982size_t remainder_size = size - nb;4983mchunkptr remainder = chunk_plus_offset(p, nb);4984set_inuse(m, p, nb);4985set_inuse(m, remainder, remainder_size);4986dispose_chunk(m, remainder, remainder_size);4987}4988}49894990mem = chunk2mem(p);4991assert (chunksize(p) >= nb);4992assert(((size_t)mem & (alignment - 1)) == 0);4993check_inuse_chunk(m, p);4994POSTACTION(m);4995}4996}4997return mem;4998}49995000/*5001Common support for independent_X routines, handling5002all of the combinations that can result.5003The opts arg has:5004bit 0 set if all elements are same size (using sizes[0])5005bit 1 set if elements should be zeroed5006*/5007static void** ialloc(mstate m,5008size_t n_elements,5009size_t* sizes,5010int opts,5011void* chunks[]) {50125013size_t element_size; /* chunksize of each element, if all same */5014size_t contents_size; /* total size of elements */5015size_t array_size; /* request size of pointer array */5016void* mem; /* malloced aggregate space */5017mchunkptr p; /* corresponding chunk */5018size_t remainder_size; /* remaining bytes while splitting */5019void** marray; /* either "chunks" or malloced ptr array */5020mchunkptr array_chunk; /* chunk for malloced ptr array */5021flag_t was_enabled; /* to disable mmap */5022size_t size;5023size_t i;50245025ensure_initialization();5026/* compute array length, if needed */5027if (chunks != 0) {5028if (n_elements == 0)5029return chunks; /* nothing to do */5030marray = chunks;5031array_size = 0;5032}5033else {5034/* if empty req, must still return chunk representing empty array */5035if (n_elements == 0)5036return (void**)internal_malloc(m, 0);5037marray = 0;5038array_size = request2size(n_elements * (sizeof(void*)));5039}50405041/* compute total element size */5042if (opts & 0x1) { /* all-same-size */5043element_size = request2size(*sizes);5044contents_size = n_elements * element_size;5045}5046else { /* add up all the sizes */5047element_size = 0;5048contents_size = 0;5049for (i = 0; i != n_elements; ++i)5050contents_size += request2size(sizes[i]);5051}50525053size = contents_size + array_size;50545055/*5056Allocate the aggregate chunk. First disable direct-mmapping so5057malloc won't use it, since we would not be able to later5058free/realloc space internal to a segregated mmap region.5059*/5060was_enabled = use_mmap(m);5061disable_mmap(m);5062mem = internal_malloc(m, size - CHUNK_OVERHEAD);5063if (was_enabled)5064enable_mmap(m);5065if (mem == 0)5066return 0;50675068if (PREACTION(m)) return 0;5069p = mem2chunk(mem);5070remainder_size = chunksize(p);50715072assert(!is_mmapped(p));50735074if (opts & 0x2) { /* optionally clear the elements */5075memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);5076}50775078/* If not provided, allocate the pointer array as final part of chunk */5079if (marray == 0) {5080size_t array_chunk_size;5081array_chunk = chunk_plus_offset(p, contents_size);5082array_chunk_size = remainder_size - contents_size;5083marray = (void**) (chunk2mem(array_chunk));5084set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);5085remainder_size = contents_size;5086}50875088/* split out elements */5089for (i = 0; ; ++i) {5090marray[i] = chunk2mem(p);5091if (i != n_elements-1) {5092if (element_size != 0)5093size = element_size;5094else5095size = request2size(sizes[i]);5096remainder_size -= size;5097set_size_and_pinuse_of_inuse_chunk(m, p, size);5098p = chunk_plus_offset(p, size);5099}5100else { /* the final element absorbs any overallocation slop */5101set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);5102break;5103}5104}51055106#if DEBUG5107if (marray != chunks) {5108/* final element must have exactly exhausted chunk */5109if (element_size != 0) {5110assert(remainder_size == element_size);5111}5112else {5113assert(remainder_size == request2size(sizes[i]));5114}5115check_inuse_chunk(m, mem2chunk(marray));5116}5117for (i = 0; i != n_elements; ++i)5118check_inuse_chunk(m, mem2chunk(marray[i]));51195120#endif /* DEBUG */51215122POSTACTION(m);5123return marray;5124}51255126/* Try to free all pointers in the given array.5127Note: this could be made faster, by delaying consolidation,5128at the price of disabling some user integrity checks, We5129still optimize some consolidations by combining adjacent5130chunks before freeing, which will occur often if allocated5131with ialloc or the array is sorted.5132*/5133static size_t internal_bulk_free(mstate m, void* array[], size_t nelem) {5134size_t unfreed = 0;5135if (!PREACTION(m)) {5136void** a;5137void** fence = &(array[nelem]);5138for (a = array; a != fence; ++a) {5139void* mem = *a;5140if (mem != 0) {5141mchunkptr p = mem2chunk(mem);5142size_t psize = chunksize(p);5143#if FOOTERS5144if (get_mstate_for(p) != m) {5145++unfreed;5146continue;5147}5148#endif5149check_inuse_chunk(m, p);5150*a = 0;5151if (RTCHECK(ok_address(m, p) && ok_inuse(p))) {5152void ** b = a + 1; /* try to merge with next chunk */5153mchunkptr next = next_chunk(p);5154if (b != fence && *b == chunk2mem(next)) {5155size_t newsize = chunksize(next) + psize;5156set_inuse(m, p, newsize);5157*b = chunk2mem(p);5158}5159else5160dispose_chunk(m, p, psize);5161}5162else {5163CORRUPTION_ERROR_ACTION(m);5164break;5165}5166}5167}5168if (should_trim(m, m->topsize))5169sys_trim(m, 0);5170POSTACTION(m);5171}5172return unfreed;5173}51745175/* Traversal */5176#if MALLOC_INSPECT_ALL5177static void internal_inspect_all(mstate m,5178void(*handler)(void *start,5179void *end,5180size_t used_bytes,5181void* callback_arg),5182void* arg) {5183if (is_initialized(m)) {5184mchunkptr top = m->top;5185msegmentptr s;5186for (s = &m->seg; s != 0; s = s->next) {5187mchunkptr q = align_as_chunk(s->base);5188while (segment_holds(s, q) && q->head != FENCEPOST_HEAD) {5189mchunkptr next = next_chunk(q);5190size_t sz = chunksize(q);5191size_t used;5192void* start;5193if (is_inuse(q)) {5194used = sz - CHUNK_OVERHEAD; /* must not be mmapped */5195start = chunk2mem(q);5196}5197else {5198used = 0;5199if (is_small(sz)) { /* offset by possible bookkeeping */5200start = (void*)((char*)q + sizeof(struct malloc_chunk));5201}5202else {5203start = (void*)((char*)q + sizeof(struct malloc_tree_chunk));5204}5205}5206if (start < (void*)next) /* skip if all space is bookkeeping */5207handler(start, next, used, arg);5208if (q == top)5209break;5210q = next;5211}5212}5213}5214}5215#endif /* MALLOC_INSPECT_ALL */52165217/* ------------------ Exported realloc, memalign, etc -------------------- */52185219#if !ONLY_MSPACES52205221void* dlrealloc(void* oldmem, size_t bytes) {5222void* mem = 0;5223if (oldmem == 0) {5224mem = dlmalloc(bytes);5225}5226else if (bytes >= MAX_REQUEST) {5227MALLOC_FAILURE_ACTION;5228}5229#ifdef REALLOC_ZERO_BYTES_FREES5230else if (bytes == 0) {5231dlfree(oldmem);5232}5233#endif /* REALLOC_ZERO_BYTES_FREES */5234else {5235size_t nb = request2size(bytes);5236mchunkptr oldp = mem2chunk(oldmem);5237#if ! FOOTERS5238mstate m = gm;5239#else /* FOOTERS */5240mstate m = get_mstate_for(oldp);5241if (!ok_magic(m)) {5242USAGE_ERROR_ACTION(m, oldmem);5243return 0;5244}5245#endif /* FOOTERS */5246if (!PREACTION(m)) {5247mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);5248POSTACTION(m);5249if (newp != 0) {5250check_inuse_chunk(m, newp);5251mem = chunk2mem(newp);5252}5253else {5254mem = internal_malloc(m, bytes);5255if (mem != 0) {5256size_t oc = chunksize(oldp) - overhead_for(oldp);5257memcpy(mem, oldmem, (oc < bytes)? oc : bytes);5258internal_free(m, oldmem);5259}5260}5261}5262}5263return mem;5264}52655266void* dlrealloc_in_place(void* oldmem, size_t bytes) {5267void* mem = 0;5268if (oldmem != 0) {5269if (bytes >= MAX_REQUEST) {5270MALLOC_FAILURE_ACTION;5271}5272else {5273size_t nb = request2size(bytes);5274mchunkptr oldp = mem2chunk(oldmem);5275#if ! FOOTERS5276mstate m = gm;5277#else /* FOOTERS */5278mstate m = get_mstate_for(oldp);5279if (!ok_magic(m)) {5280USAGE_ERROR_ACTION(m, oldmem);5281return 0;5282}5283#endif /* FOOTERS */5284if (!PREACTION(m)) {5285mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);5286POSTACTION(m);5287if (newp == oldp) {5288check_inuse_chunk(m, newp);5289mem = oldmem;5290}5291}5292}5293}5294return mem;5295}52965297void* dlmemalign(size_t alignment, size_t bytes) {5298if (alignment <= MALLOC_ALIGNMENT) {5299return dlmalloc(bytes);5300}5301return internal_memalign(gm, alignment, bytes);5302}53035304int dlposix_memalign(void** pp, size_t alignment, size_t bytes) {5305void* mem = 0;5306if (alignment == MALLOC_ALIGNMENT)5307mem = dlmalloc(bytes);5308else {5309size_t d = alignment / sizeof(void*);5310size_t r = alignment % sizeof(void*);5311if (r != 0 || d == 0 || (d & (d-SIZE_T_ONE)) != 0)5312return EINVAL;5313else if (bytes <= MAX_REQUEST - alignment) {5314if (alignment < MIN_CHUNK_SIZE)5315alignment = MIN_CHUNK_SIZE;5316mem = internal_memalign(gm, alignment, bytes);5317}5318}5319if (mem == 0)5320return ENOMEM;5321else {5322*pp = mem;5323return 0;5324}5325}53265327void* dlvalloc(size_t bytes) {5328size_t pagesz;5329ensure_initialization();5330pagesz = mparams.page_size;5331return dlmemalign(pagesz, bytes);5332}53335334void* dlpvalloc(size_t bytes) {5335size_t pagesz;5336ensure_initialization();5337pagesz = mparams.page_size;5338return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));5339}53405341void** dlindependent_calloc(size_t n_elements, size_t elem_size,5342void* chunks[]) {5343size_t sz = elem_size; /* serves as 1-element array */5344return ialloc(gm, n_elements, &sz, 3, chunks);5345}53465347void** dlindependent_comalloc(size_t n_elements, size_t sizes[],5348void* chunks[]) {5349return ialloc(gm, n_elements, sizes, 0, chunks);5350}53515352size_t dlbulk_free(void* array[], size_t nelem) {5353return internal_bulk_free(gm, array, nelem);5354}53555356#if MALLOC_INSPECT_ALL5357void dlmalloc_inspect_all(void(*handler)(void *start,5358void *end,5359size_t used_bytes,5360void* callback_arg),5361void* arg) {5362ensure_initialization();5363if (!PREACTION(gm)) {5364internal_inspect_all(gm, handler, arg);5365POSTACTION(gm);5366}5367}5368#endif /* MALLOC_INSPECT_ALL */53695370int dlmalloc_trim(size_t pad) {5371int result = 0;5372ensure_initialization();5373if (!PREACTION(gm)) {5374result = sys_trim(gm, pad);5375POSTACTION(gm);5376}5377return result;5378}53795380size_t dlmalloc_footprint(void) {5381return gm->footprint;5382}53835384size_t dlmalloc_max_footprint(void) {5385return gm->max_footprint;5386}53875388size_t dlmalloc_footprint_limit(void) {5389size_t maf = gm->footprint_limit;5390return maf == 0 ? MAX_SIZE_T : maf;5391}53925393size_t dlmalloc_set_footprint_limit(size_t bytes) {5394size_t result; /* invert sense of 0 */5395if (bytes == 0)5396result = granularity_align(1); /* Use minimal size */5397if (bytes == MAX_SIZE_T)5398result = 0; /* disable */5399else5400result = granularity_align(bytes);5401return gm->footprint_limit = result;5402}54035404#if !NO_MALLINFO5405struct mallinfo dlmallinfo(void) {5406return internal_mallinfo(gm);5407}5408#endif /* NO_MALLINFO */54095410#if !NO_MALLOC_STATS5411void dlmalloc_stats() {5412internal_malloc_stats(gm);5413}5414#endif /* NO_MALLOC_STATS */54155416int dlmallopt(int param_number, int value) {5417return change_mparam(param_number, value);5418}54195420size_t dlmalloc_usable_size(void* mem) {5421if (mem != 0) {5422mchunkptr p = mem2chunk(mem);5423if (is_inuse(p))5424return chunksize(p) - overhead_for(p);5425}5426return 0;5427}54285429#endif /* !ONLY_MSPACES */54305431/* ----------------------------- user mspaces ---------------------------- */54325433#if MSPACES54345435static mstate init_user_mstate(char* tbase, size_t tsize) {5436size_t msize = pad_request(sizeof(struct malloc_state));5437mchunkptr mn;5438mchunkptr msp = align_as_chunk(tbase);5439mstate m = (mstate)(chunk2mem(msp));5440memset(m, 0, msize);5441(void)INITIAL_LOCK(&m->mutex);5442msp->head = (msize|INUSE_BITS);5443m->seg.base = m->least_addr = tbase;5444m->seg.size = m->footprint = m->max_footprint = tsize;5445m->magic = mparams.magic;5446m->release_checks = MAX_RELEASE_CHECK_RATE;5447m->mflags = mparams.default_mflags;5448m->extp = 0;5449m->exts = 0;5450disable_contiguous(m);5451init_bins(m);5452mn = next_chunk(mem2chunk(m));5453init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);5454check_top_chunk(m, m->top);5455return m;5456}54575458mspace create_mspace(size_t capacity, int locked) {5459mstate m = 0;5460size_t msize;5461ensure_initialization();5462msize = pad_request(sizeof(struct malloc_state));5463if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {5464size_t rs = ((capacity == 0)? mparams.granularity :5465(capacity + TOP_FOOT_SIZE + msize));5466size_t tsize = granularity_align(rs);5467char* tbase = (char*)(CALL_MMAP(tsize));5468if (tbase != CMFAIL) {5469m = init_user_mstate(tbase, tsize);5470m->seg.sflags = USE_MMAP_BIT;5471set_lock(m, locked);5472}5473}5474return (mspace)m;5475}54765477mspace create_mspace_with_base(void* base, size_t capacity, int locked) {5478mstate m = 0;5479size_t msize;5480ensure_initialization();5481msize = pad_request(sizeof(struct malloc_state));5482if (capacity > msize + TOP_FOOT_SIZE &&5483capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {5484m = init_user_mstate((char*)base, capacity);5485m->seg.sflags = EXTERN_BIT;5486set_lock(m, locked);5487}5488return (mspace)m;5489}54905491int mspace_track_large_chunks(mspace msp, int enable) {5492int ret = 0;5493mstate ms = (mstate)msp;5494if (!PREACTION(ms)) {5495if (!use_mmap(ms)) {5496ret = 1;5497}5498if (!enable) {5499enable_mmap(ms);5500} else {5501disable_mmap(ms);5502}5503POSTACTION(ms);5504}5505return ret;5506}55075508size_t destroy_mspace(mspace msp) {5509size_t freed = 0;5510mstate ms = (mstate)msp;5511if (ok_magic(ms)) {5512msegmentptr sp = &ms->seg;5513(void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */5514while (sp != 0) {5515char* base = sp->base;5516size_t size = sp->size;5517flag_t flag = sp->sflags;5518(void)base; /* placate people compiling -Wunused-variable */5519sp = sp->next;5520if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) &&5521CALL_MUNMAP(base, size) == 0)5522freed += size;5523}5524}5525else {5526USAGE_ERROR_ACTION(ms,ms);5527}5528return freed;5529}55305531/*5532mspace versions of routines are near-clones of the global5533versions. This is not so nice but better than the alternatives.5534*/55355536void* mspace_malloc(mspace msp, size_t bytes) {5537mstate ms = (mstate)msp;5538if (!ok_magic(ms)) {5539USAGE_ERROR_ACTION(ms,ms);5540return 0;5541}5542if (!PREACTION(ms)) {5543void* mem;5544size_t nb;5545if (bytes <= MAX_SMALL_REQUEST) {5546bindex_t idx;5547binmap_t smallbits;5548nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);5549idx = small_index(nb);5550smallbits = ms->smallmap >> idx;55515552if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */5553mchunkptr b, p;5554idx += ~smallbits & 1; /* Uses next bin if idx empty */5555b = smallbin_at(ms, idx);5556p = b->fd;5557assert(chunksize(p) == small_index2size(idx));5558unlink_first_small_chunk(ms, b, p, idx);5559set_inuse_and_pinuse(ms, p, small_index2size(idx));5560mem = chunk2mem(p);5561check_malloced_chunk(ms, mem, nb);5562goto postaction;5563}55645565else if (nb > ms->dvsize) {5566if (smallbits != 0) { /* Use chunk in next nonempty smallbin */5567mchunkptr b, p, r;5568size_t rsize;5569bindex_t i;5570binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));5571binmap_t leastbit = least_bit(leftbits);5572compute_bit2idx(leastbit, i);5573b = smallbin_at(ms, i);5574p = b->fd;5575assert(chunksize(p) == small_index2size(i));5576unlink_first_small_chunk(ms, b, p, i);5577rsize = small_index2size(i) - nb;5578/* Fit here cannot be remainderless if 4byte sizes */5579if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)5580set_inuse_and_pinuse(ms, p, small_index2size(i));5581else {5582set_size_and_pinuse_of_inuse_chunk(ms, p, nb);5583r = chunk_plus_offset(p, nb);5584set_size_and_pinuse_of_free_chunk(r, rsize);5585replace_dv(ms, r, rsize);5586}5587mem = chunk2mem(p);5588check_malloced_chunk(ms, mem, nb);5589goto postaction;5590}55915592else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {5593check_malloced_chunk(ms, mem, nb);5594goto postaction;5595}5596}5597}5598else if (bytes >= MAX_REQUEST)5599nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */5600else {5601nb = pad_request(bytes);5602if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {5603check_malloced_chunk(ms, mem, nb);5604goto postaction;5605}5606}56075608if (nb <= ms->dvsize) {5609size_t rsize = ms->dvsize - nb;5610mchunkptr p = ms->dv;5611if (rsize >= MIN_CHUNK_SIZE) { /* split dv */5612mchunkptr r = ms->dv = chunk_plus_offset(p, nb);5613ms->dvsize = rsize;5614set_size_and_pinuse_of_free_chunk(r, rsize);5615set_size_and_pinuse_of_inuse_chunk(ms, p, nb);5616}5617else { /* exhaust dv */5618size_t dvs = ms->dvsize;5619ms->dvsize = 0;5620ms->dv = 0;5621set_inuse_and_pinuse(ms, p, dvs);5622}5623mem = chunk2mem(p);5624check_malloced_chunk(ms, mem, nb);5625goto postaction;5626}56275628else if (nb < ms->topsize) { /* Split top */5629size_t rsize = ms->topsize -= nb;5630mchunkptr p = ms->top;5631mchunkptr r = ms->top = chunk_plus_offset(p, nb);5632r->head = rsize | PINUSE_BIT;5633set_size_and_pinuse_of_inuse_chunk(ms, p, nb);5634mem = chunk2mem(p);5635check_top_chunk(ms, ms->top);5636check_malloced_chunk(ms, mem, nb);5637goto postaction;5638}56395640mem = sys_alloc(ms, nb);56415642postaction:5643POSTACTION(ms);5644return mem;5645}56465647return 0;5648}56495650void mspace_free(mspace msp, void* mem) {5651if (mem != 0) {5652mchunkptr p = mem2chunk(mem);5653#if FOOTERS5654mstate fm = get_mstate_for(p);5655(void)msp; /* placate people compiling -Wunused */5656#else /* FOOTERS */5657mstate fm = (mstate)msp;5658#endif /* FOOTERS */5659if (!ok_magic(fm)) {5660USAGE_ERROR_ACTION(fm, p);5661return;5662}5663if (!PREACTION(fm)) {5664check_inuse_chunk(fm, p);5665if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {5666size_t psize = chunksize(p);5667mchunkptr next = chunk_plus_offset(p, psize);5668if (!pinuse(p)) {5669size_t prevsize = p->prev_foot;5670if (is_mmapped(p)) {5671psize += prevsize + MMAP_FOOT_PAD;5672if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)5673fm->footprint -= psize;5674goto postaction;5675}5676else {5677mchunkptr prev = chunk_minus_offset(p, prevsize);5678psize += prevsize;5679p = prev;5680if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */5681if (p != fm->dv) {5682unlink_chunk(fm, p, prevsize);5683}5684else if ((next->head & INUSE_BITS) == INUSE_BITS) {5685fm->dvsize = psize;5686set_free_with_pinuse(p, psize, next);5687goto postaction;5688}5689}5690else5691goto erroraction;5692}5693}56945695if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {5696if (!cinuse(next)) { /* consolidate forward */5697if (next == fm->top) {5698size_t tsize = fm->topsize += psize;5699fm->top = p;5700p->head = tsize | PINUSE_BIT;5701if (p == fm->dv) {5702fm->dv = 0;5703fm->dvsize = 0;5704}5705if (should_trim(fm, tsize))5706sys_trim(fm, 0);5707goto postaction;5708}5709else if (next == fm->dv) {5710size_t dsize = fm->dvsize += psize;5711fm->dv = p;5712set_size_and_pinuse_of_free_chunk(p, dsize);5713goto postaction;5714}5715else {5716size_t nsize = chunksize(next);5717psize += nsize;5718unlink_chunk(fm, next, nsize);5719set_size_and_pinuse_of_free_chunk(p, psize);5720if (p == fm->dv) {5721fm->dvsize = psize;5722goto postaction;5723}5724}5725}5726else5727set_free_with_pinuse(p, psize, next);57285729if (is_small(psize)) {5730insert_small_chunk(fm, p, psize);5731check_free_chunk(fm, p);5732}5733else {5734tchunkptr tp = (tchunkptr)p;5735insert_large_chunk(fm, tp, psize);5736check_free_chunk(fm, p);5737if (--fm->release_checks == 0)5738release_unused_segments(fm);5739}5740goto postaction;5741}5742}5743erroraction:5744USAGE_ERROR_ACTION(fm, p);5745postaction:5746POSTACTION(fm);5747}5748}5749}57505751void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {5752void* mem;5753size_t req = 0;5754mstate ms = (mstate)msp;5755if (!ok_magic(ms)) {5756USAGE_ERROR_ACTION(ms,ms);5757return 0;5758}5759if (n_elements != 0) {5760req = n_elements * elem_size;5761if (((n_elements | elem_size) & ~(size_t)0xffff) &&5762(req / n_elements != elem_size))5763req = MAX_SIZE_T; /* force downstream failure on overflow */5764}5765mem = internal_malloc(ms, req);5766if (mem != 0 && calloc_must_clear(mem2chunk(mem)))5767memset(mem, 0, req);5768return mem;5769}57705771void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {5772void* mem = 0;5773if (oldmem == 0) {5774mem = mspace_malloc(msp, bytes);5775}5776else if (bytes >= MAX_REQUEST) {5777MALLOC_FAILURE_ACTION;5778}5779#ifdef REALLOC_ZERO_BYTES_FREES5780else if (bytes == 0) {5781mspace_free(msp, oldmem);5782}5783#endif /* REALLOC_ZERO_BYTES_FREES */5784else {5785size_t nb = request2size(bytes);5786mchunkptr oldp = mem2chunk(oldmem);5787#if ! FOOTERS5788mstate m = (mstate)msp;5789#else /* FOOTERS */5790mstate m = get_mstate_for(oldp);5791if (!ok_magic(m)) {5792USAGE_ERROR_ACTION(m, oldmem);5793return 0;5794}5795#endif /* FOOTERS */5796if (!PREACTION(m)) {5797mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);5798POSTACTION(m);5799if (newp != 0) {5800check_inuse_chunk(m, newp);5801mem = chunk2mem(newp);5802}5803else {5804mem = mspace_malloc(m, bytes);5805if (mem != 0) {5806size_t oc = chunksize(oldp) - overhead_for(oldp);5807memcpy(mem, oldmem, (oc < bytes)? oc : bytes);5808mspace_free(m, oldmem);5809}5810}5811}5812}5813return mem;5814}58155816void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes) {5817void* mem = 0;5818if (oldmem != 0) {5819if (bytes >= MAX_REQUEST) {5820MALLOC_FAILURE_ACTION;5821}5822else {5823size_t nb = request2size(bytes);5824mchunkptr oldp = mem2chunk(oldmem);5825#if ! FOOTERS5826mstate m = (mstate)msp;5827#else /* FOOTERS */5828mstate m = get_mstate_for(oldp);5829(void)msp; /* placate people compiling -Wunused */5830if (!ok_magic(m)) {5831USAGE_ERROR_ACTION(m, oldmem);5832return 0;5833}5834#endif /* FOOTERS */5835if (!PREACTION(m)) {5836mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);5837POSTACTION(m);5838if (newp == oldp) {5839check_inuse_chunk(m, newp);5840mem = oldmem;5841}5842}5843}5844}5845return mem;5846}58475848void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {5849mstate ms = (mstate)msp;5850if (!ok_magic(ms)) {5851USAGE_ERROR_ACTION(ms,ms);5852return 0;5853}5854if (alignment <= MALLOC_ALIGNMENT)5855return mspace_malloc(msp, bytes);5856return internal_memalign(ms, alignment, bytes);5857}58585859void** mspace_independent_calloc(mspace msp, size_t n_elements,5860size_t elem_size, void* chunks[]) {5861size_t sz = elem_size; /* serves as 1-element array */5862mstate ms = (mstate)msp;5863if (!ok_magic(ms)) {5864USAGE_ERROR_ACTION(ms,ms);5865return 0;5866}5867return ialloc(ms, n_elements, &sz, 3, chunks);5868}58695870void** mspace_independent_comalloc(mspace msp, size_t n_elements,5871size_t sizes[], void* chunks[]) {5872mstate ms = (mstate)msp;5873if (!ok_magic(ms)) {5874USAGE_ERROR_ACTION(ms,ms);5875return 0;5876}5877return ialloc(ms, n_elements, sizes, 0, chunks);5878}58795880size_t mspace_bulk_free(mspace msp, void* array[], size_t nelem) {5881return internal_bulk_free((mstate)msp, array, nelem);5882}58835884#if MALLOC_INSPECT_ALL5885void mspace_inspect_all(mspace msp,5886void(*handler)(void *start,5887void *end,5888size_t used_bytes,5889void* callback_arg),5890void* arg) {5891mstate ms = (mstate)msp;5892if (ok_magic(ms)) {5893if (!PREACTION(ms)) {5894internal_inspect_all(ms, handler, arg);5895POSTACTION(ms);5896}5897}5898else {5899USAGE_ERROR_ACTION(ms,ms);5900}5901}5902#endif /* MALLOC_INSPECT_ALL */59035904int mspace_trim(mspace msp, size_t pad) {5905int result = 0;5906mstate ms = (mstate)msp;5907if (ok_magic(ms)) {5908if (!PREACTION(ms)) {5909result = sys_trim(ms, pad);5910POSTACTION(ms);5911}5912}5913else {5914USAGE_ERROR_ACTION(ms,ms);5915}5916return result;5917}59185919#if !NO_MALLOC_STATS5920void mspace_malloc_stats(mspace msp) {5921mstate ms = (mstate)msp;5922if (ok_magic(ms)) {5923internal_malloc_stats(ms);5924}5925else {5926USAGE_ERROR_ACTION(ms,ms);5927}5928}5929#endif /* NO_MALLOC_STATS */59305931size_t mspace_footprint(mspace msp) {5932size_t result = 0;5933mstate ms = (mstate)msp;5934if (ok_magic(ms)) {5935result = ms->footprint;5936}5937else {5938USAGE_ERROR_ACTION(ms,ms);5939}5940return result;5941}59425943size_t mspace_max_footprint(mspace msp) {5944size_t result = 0;5945mstate ms = (mstate)msp;5946if (ok_magic(ms)) {5947result = ms->max_footprint;5948}5949else {5950USAGE_ERROR_ACTION(ms,ms);5951}5952return result;5953}59545955size_t mspace_footprint_limit(mspace msp) {5956size_t result = 0;5957mstate ms = (mstate)msp;5958if (ok_magic(ms)) {5959size_t maf = ms->footprint_limit;5960result = (maf == 0) ? MAX_SIZE_T : maf;5961}5962else {5963USAGE_ERROR_ACTION(ms,ms);5964}5965return result;5966}59675968size_t mspace_set_footprint_limit(mspace msp, size_t bytes) {5969size_t result = 0;5970mstate ms = (mstate)msp;5971if (ok_magic(ms)) {5972if (bytes == 0)5973result = granularity_align(1); /* Use minimal size */5974if (bytes == MAX_SIZE_T)5975result = 0; /* disable */5976else5977result = granularity_align(bytes);5978ms->footprint_limit = result;5979}5980else {5981USAGE_ERROR_ACTION(ms,ms);5982}5983return result;5984}59855986#if !NO_MALLINFO5987struct mallinfo mspace_mallinfo(mspace msp) {5988mstate ms = (mstate)msp;5989if (!ok_magic(ms)) {5990USAGE_ERROR_ACTION(ms,ms);5991}5992return internal_mallinfo(ms);5993}5994#endif /* NO_MALLINFO */59955996size_t mspace_usable_size(const void* mem) {5997if (mem != 0) {5998mchunkptr p = mem2chunk(mem);5999if (is_inuse(p))6000return chunksize(p) - overhead_for(p);6001}6002return 0;6003}60046005int mspace_mallopt(int param_number, int value) {6006return change_mparam(param_number, value);6007}60086009#endif /* MSPACES */601060116012/* -------------------- Alternative MORECORE functions ------------------- */60136014/*6015Guidelines for creating a custom version of MORECORE:60166017* For best performance, MORECORE should allocate in multiples of pagesize.6018* MORECORE may allocate more memory than requested. (Or even less,6019but this will usually result in a malloc failure.)6020* MORECORE must not allocate memory when given argument zero, but6021instead return one past the end address of memory from previous6022nonzero call.6023* For best performance, consecutive calls to MORECORE with positive6024arguments should return increasing addresses, indicating that6025space has been contiguously extended.6026* Even though consecutive calls to MORECORE need not return contiguous6027addresses, it must be OK for malloc'ed chunks to span multiple6028regions in those cases where they do happen to be contiguous.6029* MORECORE need not handle negative arguments -- it may instead6030just return MFAIL when given negative arguments.6031Negative arguments are always multiples of pagesize. MORECORE6032must not misinterpret negative args as large positive unsigned6033args. You can suppress all such calls from even occurring by defining6034MORECORE_CANNOT_TRIM,60356036As an example alternative MORECORE, here is a custom allocator6037kindly contributed for pre-OSX macOS. It uses virtually but not6038necessarily physically contiguous non-paged memory (locked in,6039present and won't get swapped out). You can use it by uncommenting6040this section, adding some #includes, and setting up the appropriate6041defines above:60426043#define MORECORE osMoreCore60446045There is also a shutdown routine that should somehow be called for6046cleanup upon program exit.60476048#define MAX_POOL_ENTRIES 1006049#define MINIMUM_MORECORE_SIZE (64 * 1024U)6050static int next_os_pool;6051void *our_os_pools[MAX_POOL_ENTRIES];60526053void *osMoreCore(int size)6054{6055void *ptr = 0;6056static void *sbrk_top = 0;60576058if (size > 0)6059{6060if (size < MINIMUM_MORECORE_SIZE)6061size = MINIMUM_MORECORE_SIZE;6062if (CurrentExecutionLevel() == kTaskLevel)6063ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);6064if (ptr == 0)6065{6066return (void *) MFAIL;6067}6068// save ptrs so they can be freed during cleanup6069our_os_pools[next_os_pool] = ptr;6070next_os_pool++;6071ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);6072sbrk_top = (char *) ptr + size;6073return ptr;6074}6075else if (size < 0)6076{6077// we don't currently support shrink behavior6078return (void *) MFAIL;6079}6080else6081{6082return sbrk_top;6083}6084}60856086// cleanup any allocated memory pools6087// called as last thing before shutting down driver60886089void osCleanupMem(void)6090{6091void **ptr;60926093for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)6094if (*ptr)6095{6096PoolDeallocate(*ptr);6097*ptr = 0;6098}6099}61006101*/610261036104/* -----------------------------------------------------------------------6105History:6106v2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea6107* fix bad comparison in dlposix_memalign6108* don't reuse adjusted asize in sys_alloc6109* add LOCK_AT_FORK -- thanks to Kirill Artamonov for the suggestion6110* reduce compiler warnings -- thanks to all who reported/suggested these61116112v2.8.5 Sun May 22 10:26:02 2011 Doug Lea (dl at gee)6113* Always perform unlink checks unless INSECURE6114* Add posix_memalign.6115* Improve realloc to expand in more cases; expose realloc_in_place.6116Thanks to Peter Buhr for the suggestion.6117* Add footprint_limit, inspect_all, bulk_free. Thanks6118to Barry Hayes and others for the suggestions.6119* Internal refactorings to avoid calls while holding locks6120* Use non-reentrant locks by default. Thanks to Roland McGrath6121for the suggestion.6122* Small fixes to mspace_destroy, reset_on_error.6123* Various configuration extensions/changes. Thanks6124to all who contributed these.61256126V2.8.4a Thu Apr 28 14:39:43 2011 (dl at gee.cs.oswego.edu)6127* Update Creative Commons URL61286129V2.8.4 Wed May 27 09:56:23 2009 Doug Lea (dl at gee)6130* Use zeros instead of prev foot for is_mmapped6131* Add mspace_track_large_chunks; thanks to Jean Brouwers6132* Fix set_inuse in internal_realloc; thanks to Jean Brouwers6133* Fix insufficient sys_alloc padding when using 16byte alignment6134* Fix bad error check in mspace_footprint6135* Adaptations for ptmalloc; thanks to Wolfram Gloger.6136* Reentrant spin locks; thanks to Earl Chew and others6137* Win32 improvements; thanks to Niall Douglas and Earl Chew6138* Add NO_SEGMENT_TRAVERSAL and MAX_RELEASE_CHECK_RATE options6139* Extension hook in malloc_state6140* Various small adjustments to reduce warnings on some compilers6141* Various configuration extensions/changes for more platforms. Thanks6142to all who contributed these.61436144V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee)6145* Add max_footprint functions6146* Ensure all appropriate literals are size_t6147* Fix conditional compilation problem for some #define settings6148* Avoid concatenating segments with the one provided6149in create_mspace_with_base6150* Rename some variables to avoid compiler shadowing warnings6151* Use explicit lock initialization.6152* Better handling of sbrk interference.6153* Simplify and fix segment insertion, trimming and mspace_destroy6154* Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x6155* Thanks especially to Dennis Flanagan for help on these.61566157V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee)6158* Fix memalign brace error.61596160V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee)6161* Fix improper #endif nesting in C++6162* Add explicit casts needed for C++61636164V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee)6165* Use trees for large bins6166* Support mspaces6167* Use segments to unify sbrk-based and mmap-based system allocation,6168removing need for emulation on most platforms without sbrk.6169* Default safety checks6170* Optional footer checks. Thanks to William Robertson for the idea.6171* Internal code refactoring6172* Incorporate suggestions and platform-specific changes.6173Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,6174Aaron Bachmann, Emery Berger, and others.6175* Speed up non-fastbin processing enough to remove fastbins.6176* Remove useless cfree() to avoid conflicts with other apps.6177* Remove internal memcpy, memset. Compilers handle builtins better.6178* Remove some options that no one ever used and rename others.61796180V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)6181* Fix malloc_state bitmap array misdeclaration61826183V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee)6184* Allow tuning of FIRST_SORTED_BIN_SIZE6185* Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.6186* Better detection and support for non-contiguousness of MORECORE.6187Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger6188* Bypass most of malloc if no frees. Thanks To Emery Berger.6189* Fix freeing of old top non-contiguous chunk im sysmalloc.6190* Raised default trim and map thresholds to 256K.6191* Fix mmap-related #defines. Thanks to Lubos Lunak.6192* Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.6193* Branch-free bin calculation6194* Default trim and mmap thresholds now 256K.61956196V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)6197* Introduce independent_comalloc and independent_calloc.6198Thanks to Michael Pachos for motivation and help.6199* Make optional .h file available6200* Allow > 2GB requests on 32bit systems.6201* new WIN32 sbrk, mmap, munmap, lock code from <[email protected]>.6202Thanks also to Andreas Mueller <a.mueller at paradatec.de>,6203and Anonymous.6204* Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for6205helping test this.)6206* memalign: check alignment arg6207* realloc: don't try to shift chunks backwards, since this6208leads to more fragmentation in some programs and doesn't6209seem to help in any others.6210* Collect all cases in malloc requiring system memory into sysmalloc6211* Use mmap as backup to sbrk6212* Place all internal state in malloc_state6213* Introduce fastbins (although similar to 2.5.1)6214* Many minor tunings and cosmetic improvements6215* Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK6216* Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS6217Thanks to Tony E. Bennett <[email protected]> and others.6218* Include errno.h to support default failure action.62196220V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)6221* return null for negative arguments6222* Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>6223* Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'6224(e.g. WIN32 platforms)6225* Cleanup header file inclusion for WIN32 platforms6226* Cleanup code to avoid Microsoft Visual C++ compiler complaints6227* Add 'USE_DL_PREFIX' to quickly allow co-existence with existing6228memory allocation routines6229* Set 'malloc_getpagesize' for WIN32 platforms (needs more work)6230* Use 'assert' rather than 'ASSERT' in WIN32 code to conform to6231usage of 'assert' in non-WIN32 code6232* Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to6233avoid infinite loop6234* Always call 'fREe()' rather than 'free()'62356236V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)6237* Fixed ordering problem with boundary-stamping62386239V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)6240* Added pvalloc, as recommended by H.J. Liu6241* Added 64bit pointer support mainly from Wolfram Gloger6242* Added anonymously donated WIN32 sbrk emulation6243* Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen6244* malloc_extend_top: fix mask error that caused wastage after6245foreign sbrks6246* Add linux mremap support code from HJ Liu62476248V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)6249* Integrated most documentation with the code.6250* Add support for mmap, with help from6251Wolfram Gloger ([email protected]).6252* Use last_remainder in more cases.6253* Pack bins using idea from [email protected]6254* Use ordered bins instead of best-fit threshhold6255* Eliminate block-local decls to simplify tracing and debugging.6256* Support another case of realloc via move into top6257* Fix error occuring when initial sbrk_base not word-aligned.6258* Rely on page size for units instead of SBRK_UNIT to6259avoid surprises about sbrk alignment conventions.6260* Add mallinfo, mallopt. Thanks to Raymond Nijssen6261([email protected]) for the suggestion.6262* Add `pad' argument to malloc_trim and top_pad mallopt parameter.6263* More precautions for cases where other routines call sbrk,6264courtesy of Wolfram Gloger ([email protected]).6265* Added macros etc., allowing use in linux libc from6266H.J. Lu ([email protected])6267* Inverted this history list62686269V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)6270* Re-tuned and fixed to behave more nicely with V2.6.0 changes.6271* Removed all preallocation code since under current scheme6272the work required to undo bad preallocations exceeds6273the work saved in good cases for most test programs.6274* No longer use return list or unconsolidated bins since6275no scheme using them consistently outperforms those that don't6276given above changes.6277* Use best fit for very large chunks to prevent some worst-cases.6278* Added some support for debugging62796280V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)6281* Removed footers when chunks are in use. Thanks to6282Paul Wilson ([email protected]) for the suggestion.62836284V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)6285* Added malloc_trim, with help from Wolfram Gloger6286([email protected]).62876288V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)62896290V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)6291* realloc: try to expand in both directions6292* malloc: swap order of clean-bin strategy;6293* realloc: only conditionally expand backwards6294* Try not to scavenge used bins6295* Use bin counts as a guide to preallocation6296* Occasionally bin return list chunks in first scan6297* Add a few optimizations from [email protected]62986299V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)6300* faster bin computation & slightly different binning6301* merged all consolidations to one part of malloc proper6302(eliminating old malloc_find_space & malloc_clean_bin)6303* Scan 2 returns chunks (not just 1)6304* Propagate failure in realloc if malloc returns 06305* Add stuff to allow compilation on non-ANSI compilers6306from [email protected]63076308V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)6309* removed potential for odd address access in prev_chunk6310* removed dependency on getpagesize.h6311* misc cosmetics and a bit more internal documentation6312* anticosmetics: mangled names in macros to evade debugger strangeness6313* tested on sparc, hp-700, dec-mips, rs60006314with gcc & native cc (hp, dec only) allowing6315Detlefs & Zorn comparison study (in SIGPLAN Notices.)63166317Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)6318* Based loosely on libg++-1.2X malloc. (It retains some of the overall6319structure of old version, but most details differ.)63206321*/63226323#endif /* !HAVE_MALLOC */63246325#ifdef HAVE_MALLOC6326static void * SDLCALL real_malloc(size_t s) { return malloc(s); }6327static void * SDLCALL real_calloc(size_t n, size_t s) { return calloc(n, s); }6328static void * SDLCALL real_realloc(void *p, size_t s) { return realloc(p,s); }6329static void SDLCALL real_free(void *p) { free(p); }6330#else6331#define real_malloc dlmalloc6332#define real_calloc dlcalloc6333#define real_realloc dlrealloc6334#define real_free dlfree6335#endif63366337// mark the allocator entry points as KEEPALIVE so we can call these from JavaScript.6338// otherwise they could could get so aggressively inlined that their symbols6339// don't exist at all in the final binary!6340#ifdef SDL_PLATFORM_EMSCRIPTEN6341#include <emscripten/emscripten.h>6342extern SDL_DECLSPEC SDL_MALLOC EMSCRIPTEN_KEEPALIVE void * SDLCALL SDL_malloc(size_t size);6343extern SDL_DECLSPEC SDL_MALLOC SDL_ALLOC_SIZE2(1, 2) EMSCRIPTEN_KEEPALIVE void * SDLCALL SDL_calloc(size_t nmemb, size_t size);6344extern SDL_DECLSPEC SDL_ALLOC_SIZE(2) EMSCRIPTEN_KEEPALIVE void * SDLCALL SDL_realloc(void *mem, size_t size);6345extern SDL_DECLSPEC EMSCRIPTEN_KEEPALIVE void SDLCALL SDL_free(void *mem);6346#endif63476348/* Memory functions used by SDL that can be replaced by the application */6349static struct6350{6351SDL_malloc_func malloc_func;6352SDL_calloc_func calloc_func;6353SDL_realloc_func realloc_func;6354SDL_free_func free_func;6355SDL_AtomicInt num_allocations;6356} s_mem = {6357real_malloc, real_calloc, real_realloc, real_free, { 0 }6358};63596360// Define this if you want to track the number of allocations active6361// #define SDL_TRACK_ALLOCATION_COUNT6362#ifdef SDL_TRACK_ALLOCATION_COUNT6363#define INCREMENT_ALLOCATION_COUNT() (void)SDL_AtomicIncRef(&s_mem.num_allocations)6364#define DECREMENT_ALLOCATION_COUNT() (void)SDL_AtomicDecRef(&s_mem.num_allocations)6365#else6366#define INCREMENT_ALLOCATION_COUNT()6367#define DECREMENT_ALLOCATION_COUNT()6368#endif636963706371void SDL_GetOriginalMemoryFunctions(SDL_malloc_func *malloc_func,6372SDL_calloc_func *calloc_func,6373SDL_realloc_func *realloc_func,6374SDL_free_func *free_func)6375{6376if (malloc_func) {6377*malloc_func = real_malloc;6378}6379if (calloc_func) {6380*calloc_func = real_calloc;6381}6382if (realloc_func) {6383*realloc_func = real_realloc;6384}6385if (free_func) {6386*free_func = real_free;6387}6388}63896390void SDL_GetMemoryFunctions(SDL_malloc_func *malloc_func,6391SDL_calloc_func *calloc_func,6392SDL_realloc_func *realloc_func,6393SDL_free_func *free_func)6394{6395if (malloc_func) {6396*malloc_func = s_mem.malloc_func;6397}6398if (calloc_func) {6399*calloc_func = s_mem.calloc_func;6400}6401if (realloc_func) {6402*realloc_func = s_mem.realloc_func;6403}6404if (free_func) {6405*free_func = s_mem.free_func;6406}6407}64086409bool SDL_SetMemoryFunctions(SDL_malloc_func malloc_func,6410SDL_calloc_func calloc_func,6411SDL_realloc_func realloc_func,6412SDL_free_func free_func)6413{6414if (!malloc_func) {6415return SDL_InvalidParamError("malloc_func");6416}6417if (!calloc_func) {6418return SDL_InvalidParamError("calloc_func");6419}6420if (!realloc_func) {6421return SDL_InvalidParamError("realloc_func");6422}6423if (!free_func) {6424return SDL_InvalidParamError("free_func");6425}64266427s_mem.malloc_func = malloc_func;6428s_mem.calloc_func = calloc_func;6429s_mem.realloc_func = realloc_func;6430s_mem.free_func = free_func;6431return true;6432}64336434int SDL_GetNumAllocations(void)6435{6436#ifdef SDL_TRACK_ALLOCATION_COUNT6437return SDL_GetAtomicInt(&s_mem.num_allocations);6438#else6439return -1;6440#endif6441}64426443void *SDL_malloc(size_t size)6444{6445void *mem;64466447if (!size) {6448size = 1;6449}64506451mem = s_mem.malloc_func(size);6452if (mem) {6453INCREMENT_ALLOCATION_COUNT();6454} else {6455SDL_OutOfMemory();6456}64576458return mem;6459}64606461void *SDL_calloc(size_t nmemb, size_t size)6462{6463void *mem;64646465if (!nmemb || !size) {6466nmemb = 1;6467size = 1;6468}64696470mem = s_mem.calloc_func(nmemb, size);6471if (mem) {6472INCREMENT_ALLOCATION_COUNT();6473} else {6474SDL_OutOfMemory();6475}64766477return mem;6478}64796480void *SDL_realloc(void *ptr, size_t size)6481{6482void *mem;64836484if (!size) {6485size = 1;6486}64876488mem = s_mem.realloc_func(ptr, size);6489if (mem && !ptr) {6490INCREMENT_ALLOCATION_COUNT();6491} else if (!mem) {6492SDL_OutOfMemory();6493}64946495return mem;6496}64976498void SDL_free(void *ptr)6499{6500if (!ptr) {6501return;6502}65036504s_mem.free_func(ptr);6505DECREMENT_ALLOCATION_COUNT();6506}650765086509