3#ifdef TMEMORY_USE_DLMALLOC
10#pragma warning( disable : 4146 )
19#if ABORT_ON_ASSERT_FAILURE
21#define assert(x) if(!(x)) ABORT
31#if !defined(WIN32) && !defined(LACKS_TIME_H)
41#ifndef LACKS_STRINGS_H
46#ifndef LACKS_SYS_MMAN_H
48#if (defined(linux) && !defined(__USE_GNU))
63#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
64extern void* sbrk(ptrdiff_t);
71#if defined (__SVR4) && defined (__sun)
73#elif !defined(LACKS_SCHED_H)
76#if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS
79#elif defined(_MSC_VER)
85LONG __cdecl _InterlockedCompareExchange(LONG
volatile *Dest, LONG Exchange, LONG Comp);
86LONG __cdecl _InterlockedExchange(LONG
volatile *Target, LONG Value);
91#pragma intrinsic (_InterlockedCompareExchange)
92#pragma intrinsic (_InterlockedExchange)
93#define interlockedcompareexchange _InterlockedCompareExchange
94#define interlockedexchange _InterlockedExchange
95#elif defined(WIN32) && defined(__GNUC__)
96#define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b)
97#define interlockedexchange __sync_lock_test_and_set
103#define LOCK_AT_FORK 0
107#if defined(_MSC_VER) && _MSC_VER>=1300
108#ifndef BitScanForward
112unsigned char _BitScanForward(
unsigned long *index,
unsigned long mask);
113unsigned char _BitScanReverse(
unsigned long *index,
unsigned long mask);
118#define BitScanForward _BitScanForward
119#define BitScanReverse _BitScanReverse
120#pragma intrinsic(_BitScanForward)
121#pragma intrinsic(_BitScanReverse)
126#ifndef malloc_getpagesize
128# ifndef _SC_PAGE_SIZE
129# define _SC_PAGE_SIZE _SC_PAGESIZE
133# define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
135# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
136 extern size_t getpagesize();
137# define malloc_getpagesize getpagesize()
140# define malloc_getpagesize getpagesize()
142# ifndef LACKS_SYS_PARAM_H
143# include <sys/param.h>
146# define malloc_getpagesize EXEC_PAGESIZE
150# define malloc_getpagesize NBPG
152# define malloc_getpagesize (NBPG * CLSIZE)
156# define malloc_getpagesize NBPC
159# define malloc_getpagesize PAGESIZE
161# define malloc_getpagesize ((size_t)4096U)
175#define SIZE_T_SIZE (sizeof(size_t))
176#define SIZE_T_BITSIZE (sizeof(size_t) << 3)
180#define SIZE_T_ZERO ((size_t)0)
181#define SIZE_T_ONE ((size_t)1)
182#define SIZE_T_TWO ((size_t)2)
183#define SIZE_T_FOUR ((size_t)4)
184#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
185#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
186#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
187#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
190#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
193#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
196#define align_offset(A)\
197 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
198 ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
210#define MFAIL ((void*)(MAX_SIZE_T))
211#define CMFAIL ((char*)(MFAIL))
216#define MUNMAP_DEFAULT(a, s) munmap((a), (s))
217#define MMAP_PROT (PROT_READ|PROT_WRITE)
218#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
219#define MAP_ANONYMOUS MAP_ANON
222#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
223#define MMAP_DEFAULT(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
229#define MMAP_FLAGS (MAP_PRIVATE)
230static int dev_zero_fd = -1;
231#define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \
232 (dev_zero_fd = open("/dev/zero", O_RDWR), \
233 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
234 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
237#define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s)
242static FORCEINLINE
void* win32mmap(
size_t size) {
243 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
244 return (ptr != 0)? ptr: MFAIL;
248static FORCEINLINE
void* win32direct_mmap(
size_t size) {
249 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
251 return (ptr != 0)? ptr: MFAIL;
255static FORCEINLINE
int win32munmap(
void* ptr,
size_t size) {
256 MEMORY_BASIC_INFORMATION minfo;
257 char* cptr = (
char*)ptr;
259 if (VirtualQuery(cptr, &minfo,
sizeof(minfo)) == 0)
261 if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
262 minfo.State != MEM_COMMIT || minfo.RegionSize > size)
264 if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
266 cptr += minfo.RegionSize;
267 size -= minfo.RegionSize;
272#define MMAP_DEFAULT(s) win32mmap(s)
273#define MUNMAP_DEFAULT(a, s) win32munmap((a), (s))
274#define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s)
280#define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
289 #define CALL_MORECORE(S) MORECORE(S)
291 #define CALL_MORECORE(S) MORECORE_DEFAULT(S)
294 #define CALL_MORECORE(S) MFAIL
301 #define USE_MMAP_BIT (SIZE_T_ONE)
304 #define CALL_MMAP(s) MMAP(s)
306 #define CALL_MMAP(s) MMAP_DEFAULT(s)
309 #define CALL_MUNMAP(a, s) MUNMAP((a), (s))
311 #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s))
314 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
316 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s)
319 #define USE_MMAP_BIT (SIZE_T_ZERO)
321 #define MMAP(s) MFAIL
322 #define MUNMAP(a, s) (-1)
323 #define DIRECT_MMAP(s) MFAIL
324 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
325 #define CALL_MMAP(s) MMAP(s)
326 #define CALL_MUNMAP(a, s) MUNMAP((a), (s))
332#if HAVE_MMAP && HAVE_MREMAP
334 #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv))
336 #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv))
339 #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
343#define USE_NONCONTIGUOUS_BIT (4U)
346#define EXTERN_BIT (8U)
380#define USE_LOCK_BIT (0U)
381#define INITIAL_LOCK(l) (0)
382#define DESTROY_LOCK(l) (0)
383#define ACQUIRE_MALLOC_GLOBAL_LOCK()
384#define RELEASE_MALLOC_GLOBAL_LOCK()
402#if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
403#define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1)
404#define CLEAR_LOCK(sl) __sync_lock_release(sl)
406#elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))
408static FORCEINLINE
int x86_cas_lock(
int *sl) {
412 __asm__ __volatile__ (
"lock; cmpxchgl %1, %2"
414 :
"r" (val),
"m" (*(sl)),
"0"(cmp)
419static FORCEINLINE
void x86_clear_lock(
int* sl) {
423 __asm__ __volatile__ (
"lock; xchgl %0, %1"
425 :
"m" (*(sl)),
"0"(prev)
429#define CAS_LOCK(sl) x86_cas_lock(sl)
430#define CLEAR_LOCK(sl) x86_clear_lock(sl)
433#define CAS_LOCK(sl) interlockedexchange(sl, (LONG)1)
434#define CLEAR_LOCK(sl) interlockedexchange (sl, (LONG)0)
439#define SPINS_PER_YIELD 63
441#define SLEEP_EX_DURATION 50
442#define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE)
443#elif defined (__SVR4) && defined (__sun)
444#define SPIN_LOCK_YIELD thr_yield();
445#elif !defined(LACKS_SCHED_H)
446#define SPIN_LOCK_YIELD sched_yield();
448#define SPIN_LOCK_YIELD
451#if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0
453static int spin_acquire_lock(
int *sl) {
455 while (*(
volatile int *)sl != 0 || CAS_LOCK(sl)) {
456 if ((++spins & SPINS_PER_YIELD) == 0) {
464#define TRY_LOCK(sl) !CAS_LOCK(sl)
465#define RELEASE_LOCK(sl) CLEAR_LOCK(sl)
466#define ACQUIRE_LOCK(sl) (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0)
467#define INITIAL_LOCK(sl) (*sl = 0)
468#define DESTROY_LOCK(sl) (0)
469static MLOCK_T malloc_global_mutex = 0;
474#define THREAD_ID_T DWORD
475#define CURRENT_THREAD GetCurrentThreadId()
476#define EQ_OWNER(X,Y) ((X) == (Y))
483#define THREAD_ID_T pthread_t
484#define CURRENT_THREAD pthread_self()
485#define EQ_OWNER(X,Y) pthread_equal(X, Y)
488struct malloc_recursive_lock {
491 THREAD_ID_T threadid;
494#define MLOCK_T struct malloc_recursive_lock
495static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0};
497static FORCEINLINE
void recursive_release_lock(MLOCK_T *lk) {
504static FORCEINLINE
int recursive_acquire_lock(MLOCK_T *lk) {
505 THREAD_ID_T mythreadid = CURRENT_THREAD;
508 if (*((
volatile int *)(&lk->sl)) == 0) {
509 if (!CAS_LOCK(&lk->sl)) {
510 lk->threadid = mythreadid;
515 else if (EQ_OWNER(lk->threadid, mythreadid)) {
519 if ((++spins & SPINS_PER_YIELD) == 0) {
525static FORCEINLINE
int recursive_try_lock(MLOCK_T *lk) {
526 THREAD_ID_T mythreadid = CURRENT_THREAD;
527 if (*((
volatile int *)(&lk->sl)) == 0) {
528 if (!CAS_LOCK(&lk->sl)) {
529 lk->threadid = mythreadid;
534 else if (EQ_OWNER(lk->threadid, mythreadid)) {
541#define RELEASE_LOCK(lk) recursive_release_lock(lk)
542#define TRY_LOCK(lk) recursive_try_lock(lk)
543#define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk)
544#define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0)
545#define DESTROY_LOCK(lk) (0)
549#define MLOCK_T CRITICAL_SECTION
550#define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0)
551#define RELEASE_LOCK(lk) LeaveCriticalSection(lk)
552#define TRY_LOCK(lk) TryEnterCriticalSection(lk)
553#define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000))
554#define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0)
555#define NEED_GLOBAL_LOCK_INIT
557static MLOCK_T malloc_global_mutex;
558static volatile LONG malloc_global_mutex_status;
561static void init_malloc_global_mutex() {
563 long stat = malloc_global_mutex_status;
568 interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) {
569 InitializeCriticalSection(&malloc_global_mutex);
570 interlockedexchange(&malloc_global_mutex_status, (LONG)1);
578#define MLOCK_T pthread_mutex_t
579#define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk)
580#define RELEASE_LOCK(lk) pthread_mutex_unlock(lk)
581#define TRY_LOCK(lk) (!pthread_mutex_trylock(lk))
582#define INITIAL_LOCK(lk) pthread_init_lock(lk)
583#define DESTROY_LOCK(lk) pthread_mutex_destroy(lk)
585#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE)
588extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr,
590#define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP
591#define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y)
594static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER;
596static int pthread_init_lock (MLOCK_T *lk) {
597 pthread_mutexattr_t attr;
598 if (pthread_mutexattr_init(&attr))
return 1;
599#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0
600 if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE))
return 1;
602 if (pthread_mutex_init(lk, &attr))
return 1;
603 if (pthread_mutexattr_destroy(&attr))
return 1;
610#define USE_LOCK_BIT (2U)
612#ifndef ACQUIRE_MALLOC_GLOBAL_LOCK
613#define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex);
616#ifndef RELEASE_MALLOC_GLOBAL_LOCK
617#define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex);
762 struct malloc_chunk* fd;
763 struct malloc_chunk* bk;
766typedef struct malloc_chunk mchunk;
767typedef struct malloc_chunk* mchunkptr;
768typedef struct malloc_chunk* sbinptr;
769typedef unsigned int bindex_t;
770typedef unsigned int binmap_t;
771typedef unsigned int flag_t;
775#define MCHUNK_SIZE (sizeof(mchunk))
778#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
780#define CHUNK_OVERHEAD (SIZE_T_SIZE)
784#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
786#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
789#define MIN_CHUNK_SIZE\
790 ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
793#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
794#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
796#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
799#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
800#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
803#define pad_request(req) \
804 (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
807#define request2size(req) \
808 (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
821#define PINUSE_BIT (SIZE_T_ONE)
822#define CINUSE_BIT (SIZE_T_TWO)
823#define FLAG4_BIT (SIZE_T_FOUR)
824#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
825#define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)
828#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
831#define cinuse(p) ((p)->head & CINUSE_BIT)
832#define pinuse(p) ((p)->head & PINUSE_BIT)
833#define flag4inuse(p) ((p)->head & FLAG4_BIT)
834#define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT)
835#define is_mmapped(p) (((p)->head & INUSE_BITS) == 0)
837#define chunksize(p) ((p)->head & ~(FLAG_BITS))
839#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
840#define set_flag4(p) ((p)->head |= FLAG4_BIT)
841#define clear_flag4(p) ((p)->head &= ~FLAG4_BIT)
844#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
845#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
848#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))
849#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
852#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
855#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
856#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
859#define set_size_and_pinuse_of_free_chunk(p, s)\
860 ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
863#define set_free_with_pinuse(p, s, n)\
864 (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
867#define overhead_for(p)\
868 (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
872#define calloc_must_clear(p) (!is_mmapped(p))
874#define calloc_must_clear(p) (1)
968struct malloc_tree_chunk {
972 struct malloc_tree_chunk* fd;
973 struct malloc_tree_chunk* bk;
975 struct malloc_tree_chunk* child[2];
976 struct malloc_tree_chunk* parent;
980typedef struct malloc_tree_chunk tchunk;
981typedef struct malloc_tree_chunk* tchunkptr;
982typedef struct malloc_tree_chunk* tbinptr;
985#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
1044struct malloc_segment {
1047 struct malloc_segment* next;
1051#define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT)
1052#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
1054typedef struct malloc_segment msegment;
1055typedef struct malloc_segment* msegmentptr;
1145#define NSMALLBINS (32U)
1146#define NTREEBINS (32U)
1147#define SMALLBIN_SHIFT (3U)
1148#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
1149#define TREEBIN_SHIFT (8U)
1150#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
1151#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
1152#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
1154struct malloc_state {
1163 size_t release_checks;
1165 mchunkptr smallbins[(NSMALLBINS+1)*2];
1166 tbinptr treebins[NTREEBINS];
1168 size_t max_footprint;
1169 size_t footprint_limit;
1179typedef struct malloc_state* mstate;
1190struct malloc_params {
1194 size_t mmap_threshold;
1195 size_t trim_threshold;
1196 flag_t default_mflags;
1199static struct malloc_params mparams;
1202#define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())
1207static struct malloc_state _gm_;
1209#define is_global(M) ((M) == &_gm_)
1213#define is_initialized(M) ((M)->top != 0)
1219#define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
1220#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
1222#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
1224#define disable_lock(M)
1227#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
1228#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
1230#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
1232#define disable_mmap(M)
1235#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
1236#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
1238#define set_lock(M,L)\
1239 ((M)->mflags = (L)?\
1240 ((M)->mflags | USE_LOCK_BIT) :\
1241 ((M)->mflags & ~USE_LOCK_BIT))
1244#define page_align(S)\
1245 (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
1248#define granularity_align(S)\
1249 (((S) + (mparams.granularity - SIZE_T_ONE))\
1250 & ~(mparams.granularity - SIZE_T_ONE))
1255#define mmap_align(S) granularity_align(S)
1257#define mmap_align(S) page_align(S)
1261#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
1263#define is_page_aligned(S)\
1264 (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
1265#define is_granularity_aligned(S)\
1266 (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
1269#define segment_holds(S, A)\
1270 ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
1273static msegmentptr segment_holding(mstate m,
char* addr) {
1274 msegmentptr sp = &m->seg;
1276 if (addr >= sp->base && addr < sp->base + sp->size)
1278 if ((sp = sp->next) == 0)
1284static int has_segment_link(mstate m, msegmentptr ss) {
1285 msegmentptr sp = &m->seg;
1287 if ((
char*)sp >= ss->base && (
char*)sp < ss->base + ss->size)
1289 if ((sp = sp->next) == 0)
1294#ifndef MORECORE_CANNOT_TRIM
1295#define should_trim(M,s) ((s) > (M)->trim_check)
1297#define should_trim(M,s) (0)
1305#define TOP_FOOT_SIZE\
1306 (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
1318#define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
1319#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
1323#define PREACTION(M) (0)
1327#define POSTACTION(M)
1343int malloc_corruption_error_count;
1346static void reset_on_error(mstate m);
1348#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
1349#define USAGE_ERROR_ACTION(m, p)
1355#ifndef CORRUPTION_ERROR_ACTION
1356#define CORRUPTION_ERROR_ACTION(m) ABORT
1359#ifndef USAGE_ERROR_ACTION
1360#define USAGE_ERROR_ACTION(m,p) ABORT
1365#ifndef CORRUPTION_ERROR_ACTION
1366#define CORRUPTION_ERROR_ACTION(m) TCRITICAL("!\"TMemory: Memory system corruption detected!\"")
1369#ifndef USAGE_ERROR_ACTION
1370#define USAGE_ERROR_ACTION(m,p) TBREAK(); TCRITICAL("!\"TMemory: Memory block footer corruption detected - the block being free'd was probably overrun!\"")
1382#define check_free_chunk(M,P)
1383#define check_inuse_chunk(M,P)
1384#define check_malloced_chunk(M,P,N)
1385#define check_mmapped_chunk(M,P)
1386#define check_malloc_state(M)
1387#define check_top_chunk(M,P)
1390#define check_free_chunk(M,P) do_check_free_chunk(M,P)
1391#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
1392#define check_top_chunk(M,P) do_check_top_chunk(M,P)
1393#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
1394#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
1395#define check_malloc_state(M) do_check_malloc_state(M)
1397static void do_check_any_chunk(mstate m, mchunkptr p);
1398static void do_check_top_chunk(mstate m, mchunkptr p);
1399static void do_check_mmapped_chunk(mstate m, mchunkptr p);
1400static void do_check_inuse_chunk(mstate m, mchunkptr p);
1401static void do_check_free_chunk(mstate m, mchunkptr p);
1402static void do_check_malloced_chunk(mstate m,
void* mem,
size_t s);
1403static void do_check_tree(mstate m, tchunkptr t);
1404static void do_check_treebin(mstate m, bindex_t i);
1405static void do_check_smallbin(mstate m, bindex_t i);
1406static void do_check_malloc_state(mstate m);
1407static int bin_find(mstate m, mchunkptr x);
1408static size_t traverse_and_check(mstate m);
1413#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
1414#define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT)
1415#define small_index2size(i) ((i) << SMALLBIN_SHIFT)
1416#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
1419#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
1420#define treebin_at(M,i) (&((M)->treebins[i]))
1423#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
1424#define compute_tree_index(S, I)\
1426 unsigned int X = S >> TREEBIN_SHIFT;\
1429 else if (X > 0xFFFF)\
1432 unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \
1433 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
1437#elif defined (__INTEL_COMPILER)
1438#define compute_tree_index(S, I)\
1440 size_t X = S >> TREEBIN_SHIFT;\
1443 else if (X > 0xFFFF)\
1446 unsigned int K = _bit_scan_reverse (X); \
1447 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
1451#elif defined(_MSC_VER) && _MSC_VER>=1300
1452#define compute_tree_index(S, I)\
1454 size_t X = S >> TREEBIN_SHIFT;\
1457 else if (X > 0xFFFF)\
1461 _BitScanReverse((DWORD *) &K, (DWORD) X);\
1462 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
1467#define compute_tree_index(S, I)\
1469 size_t X = S >> TREEBIN_SHIFT;\
1472 else if (X > 0xFFFF)\
1475 unsigned int Y = (unsigned int)X;\
1476 unsigned int N = ((Y - 0x100) >> 16) & 8;\
1477 unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
1479 N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
1480 K = 14 - N + ((Y <<= K) >> 15);\
1481 I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
1487#define bit_for_tree_index(i) \
1488 (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
1491#define leftshift_for_tree_index(i) \
1492 ((i == NTREEBINS-1)? 0 : \
1493 ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
1496#define minsize_for_tree_index(i) \
1497 ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
1498 (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
1504#define idx2bit(i) ((binmap_t)(1) << (i))
1507#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
1508#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
1509#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
1511#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
1512#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
1513#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
1516#define least_bit(x) ((x) & -(x))
1519#define left_bits(x) ((x<<1) | -(x<<1))
1522#define same_or_left_bits(x) ((x) | -(x))
1526#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
1527#define compute_bit2idx(X, I)\
1530 J = __builtin_ctz(X); \
1534#elif defined (__INTEL_COMPILER)
1535#define compute_bit2idx(X, I)\
1538 J = _bit_scan_forward (X); \
1542#elif defined(_MSC_VER) && _MSC_VER>=1300
1543#define compute_bit2idx(X, I)\
1546 _BitScanForward((DWORD *) &J, X);\
1550#elif USE_BUILTIN_FFS
1551#define compute_bit2idx(X, I) I = ffs(X)-1
1554#define compute_bit2idx(X, I)\
1556 unsigned int Y = X - 1;\
1557 unsigned int K = Y >> (16-4) & 16;\
1558 unsigned int N = K; Y >>= K;\
1559 N += K = Y >> (8-3) & 8; Y >>= K;\
1560 N += K = Y >> (4-2) & 4; Y >>= K;\
1561 N += K = Y >> (2-1) & 2; Y >>= K;\
1562 N += K = Y >> (1-0) & 1; Y >>= K;\
1563 I = (bindex_t)(N + Y);\
1598#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
1600#define ok_next(p, n) ((char*)(p) < (char*)(n))
1602#define ok_inuse(p) is_inuse(p)
1604#define ok_pinuse(p) pinuse(p)
1607#define ok_address(M, a) (1)
1608#define ok_next(b, n) (1)
1609#define ok_inuse(p) (1)
1610#define ok_pinuse(p) (1)
1613#if (FOOTERS && !INSECURE)
1615#define ok_magic(M) ((M)->magic == mparams.magic)
1617#define ok_magic(M) (1)
1622#if defined(__GNUC__) && __GNUC__ >= 3
1623#define RTCHECK(e) __builtin_expect(e, 1)
1625#define RTCHECK(e) (e)
1628#define RTCHECK(e) (1)
1635#define mark_inuse_foot(M,p,s)
1640#define set_inuse(M,p,s)\
1641 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
1642 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
1645#define set_inuse_and_pinuse(M,p,s)\
1646 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1647 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
1650#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
1651 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
1656#define mark_inuse_foot(M,p,s)\
1657 (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
1659#define get_mstate_for(p)\
1660 ((mstate)(((mchunkptr)((char*)(p) +\
1661 (chunksize(p))))->prev_foot ^ mparams.magic))
1663#define set_inuse(M,p,s)\
1664 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
1665 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
1666 mark_inuse_foot(M,p,s))
1668#define set_inuse_and_pinuse(M,p,s)\
1669 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1670 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
1671 mark_inuse_foot(M,p,s))
1673#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
1674 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1675 mark_inuse_foot(M, p, s))
1682static void pre_fork(
void) { ACQUIRE_LOCK(&(gm)->mutex); }
1683static void post_fork_parent(
void) { RELEASE_LOCK(&(gm)->mutex); }
1684static void post_fork_child(
void) { INITIAL_LOCK(&(gm)->mutex); }
1688static int init_mparams(
void) {
1689#ifdef NEED_GLOBAL_LOCK_INIT
1690 if (malloc_global_mutex_status <= 0)
1691 init_malloc_global_mutex();
1694 ACQUIRE_MALLOC_GLOBAL_LOCK();
1695 if (mparams.magic == 0) {
1701 psize = malloc_getpagesize;
1702 gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize);
1705 SYSTEM_INFO system_info;
1706 GetSystemInfo(&system_info);
1707 psize = system_info.dwPageSize;
1708 gsize = ((DEFAULT_GRANULARITY != 0)?
1709 DEFAULT_GRANULARITY : system_info.dwAllocationGranularity);
1719 if ((
sizeof(
size_t) !=
sizeof(
char*)) ||
1720 (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
1721 (
sizeof(
int) < 4) ||
1722 (MALLOC_ALIGNMENT < (
size_t)8U) ||
1723 ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) ||
1724 ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) ||
1725 ((gsize & (gsize-SIZE_T_ONE)) != 0) ||
1726 ((psize & (psize-SIZE_T_ONE)) != 0))
1728 mparams.granularity = gsize;
1729 mparams.page_size = psize;
1730 mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
1731 mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
1732#if MORECORE_CONTIGUOUS
1733 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
1735 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
1740 gm->mflags = mparams.default_mflags;
1741 (void)INITIAL_LOCK(&gm->mutex);
1744 pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child);
1748 magic = (size_t)((
size_t)time(
TNULL) ^ (
size_t)0x55555555U);
1749 magic |= (size_t)8U;
1750 magic &= ~(size_t)7U;
1752 (*(
volatile size_t *)(&(mparams.magic))) = magic;
1756 RELEASE_MALLOC_GLOBAL_LOCK();
1761static int change_mparam(
int param_number,
int value) {
1763 ensure_initialization();
1764 val = (value == -1)? MAX_SIZE_T : (size_t)value;
1765 switch(param_number) {
1766 case M_TRIM_THRESHOLD:
1767 mparams.trim_threshold = val;
1770 if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
1771 mparams.granularity = val;
1776 case M_MMAP_THRESHOLD:
1777 mparams.mmap_threshold = val;
1788static void do_check_any_chunk(mstate m, mchunkptr p) {
1789 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1790 assert(ok_address(m, p));
1794static void do_check_top_chunk(mstate m, mchunkptr p) {
1795 msegmentptr sp = segment_holding(m, (
char*)p);
1796 size_t sz = p->head & ~INUSE_BITS;
1798 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1799 assert(ok_address(m, p));
1800 assert(sz == m->topsize);
1802 assert(sz == ((sp->base + sp->size) - (
char*)p) - TOP_FOOT_SIZE);
1804 assert(!pinuse(chunk_plus_offset(p, sz)));
1808static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
1809 size_t sz = chunksize(p);
1810 size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD);
1811 assert(is_mmapped(p));
1812 assert(use_mmap(m));
1813 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1814 assert(ok_address(m, p));
1815 assert(!is_small(sz));
1816 assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
1817 assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
1818 assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
1822static void do_check_inuse_chunk(mstate m, mchunkptr p) {
1823 do_check_any_chunk(m, p);
1824 assert(is_inuse(p));
1825 assert(next_pinuse(p));
1827 assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
1829 do_check_mmapped_chunk(m, p);
1833static void do_check_free_chunk(mstate m, mchunkptr p) {
1834 size_t sz = chunksize(p);
1835 mchunkptr next = chunk_plus_offset(p, sz);
1836 do_check_any_chunk(m, p);
1837 assert(!is_inuse(p));
1838 assert(!next_pinuse(p));
1839 assert (!is_mmapped(p));
1840 if (p != m->dv && p != m->top) {
1841 if (sz >= MIN_CHUNK_SIZE) {
1842 assert((sz & CHUNK_ALIGN_MASK) == 0);
1843 assert(is_aligned(chunk2mem(p)));
1844 assert(next->prev_foot == sz);
1846 assert (next == m->top || is_inuse(next));
1847 assert(p->fd->bk == p);
1848 assert(p->bk->fd == p);
1851 assert(sz == SIZE_T_SIZE);
1856static void do_check_malloced_chunk(mstate m,
void* mem,
size_t s) {
1858 mchunkptr p = mem2chunk(mem);
1859 size_t sz = p->head & ~INUSE_BITS;
1860 do_check_inuse_chunk(m, p);
1861 assert((sz & CHUNK_ALIGN_MASK) == 0);
1862 assert(sz >= MIN_CHUNK_SIZE);
1865 assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
1870static void do_check_tree(mstate m, tchunkptr t) {
1873 bindex_t tindex = t->index;
1874 size_t tsize = chunksize(t);
1876 compute_tree_index(tsize, idx);
1877 assert(tindex == idx);
1878 assert(tsize >= MIN_LARGE_SIZE);
1879 assert(tsize >= minsize_for_tree_index(idx));
1880 assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
1883 do_check_any_chunk(m, ((mchunkptr)u));
1884 assert(u->index == tindex);
1885 assert(chunksize(u) == tsize);
1886 assert(!is_inuse(u));
1887 assert(!next_pinuse(u));
1888 assert(u->fd->bk == u);
1889 assert(u->bk->fd == u);
1890 if (u->parent == 0) {
1891 assert(u->child[0] == 0);
1892 assert(u->child[1] == 0);
1897 assert(u->parent != u);
1898 assert (u->parent->child[0] == u ||
1899 u->parent->child[1] == u ||
1900 *((tbinptr*)(u->parent)) == u);
1901 if (u->child[0] != 0) {
1902 assert(u->child[0]->parent == u);
1903 assert(u->child[0] != u);
1904 do_check_tree(m, u->child[0]);
1906 if (u->child[1] != 0) {
1907 assert(u->child[1]->parent == u);
1908 assert(u->child[1] != u);
1909 do_check_tree(m, u->child[1]);
1911 if (u->child[0] != 0 && u->child[1] != 0) {
1912 assert(chunksize(u->child[0]) < chunksize(u->child[1]));
1921static void do_check_treebin(mstate m, bindex_t i) {
1922 tbinptr* tb = treebin_at(m, i);
1924 int empty = (m->treemap & (1U << i)) == 0;
1928 do_check_tree(m, t);
1932static void do_check_smallbin(mstate m, bindex_t i) {
1933 sbinptr b = smallbin_at(m, i);
1934 mchunkptr p = b->bk;
1935 unsigned int empty = (m->smallmap & (1U << i)) == 0;
1939 for (; p != b; p = p->bk) {
1940 size_t size = chunksize(p);
1943 do_check_free_chunk(m, p);
1945 assert(small_index(size) == i);
1946 assert(p->bk == b || chunksize(p->bk) == chunksize(p));
1949 if (q->head != FENCEPOST_HEAD)
1950 do_check_inuse_chunk(m, q);
1956static int bin_find(mstate m, mchunkptr x) {
1957 size_t size = chunksize(x);
1958 if (is_small(size)) {
1959 bindex_t sidx = small_index(size);
1960 sbinptr b = smallbin_at(m, sidx);
1961 if (smallmap_is_marked(m, sidx)) {
1966 }
while ((p = p->fd) != b);
1971 compute_tree_index(size, tidx);
1972 if (treemap_is_marked(m, tidx)) {
1973 tchunkptr t = *treebin_at(m, tidx);
1974 size_t sizebits = size << leftshift_for_tree_index(tidx);
1975 while (t != 0 && chunksize(t) != size) {
1976 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
1982 if (u == (tchunkptr)x)
1984 }
while ((u = u->fd) != t);
1992static size_t traverse_and_check(mstate m) {
1994 if (is_initialized(m)) {
1995 msegmentptr s = &m->seg;
1996 sum += m->topsize + TOP_FOOT_SIZE;
1998 mchunkptr q = align_as_chunk(s->base);
1999 mchunkptr lastq = 0;
2001 while (segment_holds(s, q) &&
2002 q != m->top && q->head != FENCEPOST_HEAD) {
2003 sum += chunksize(q);
2005 assert(!bin_find(m, q));
2006 do_check_inuse_chunk(m, q);
2009 assert(q == m->dv || bin_find(m, q));
2010 assert(lastq == 0 || is_inuse(lastq));
2011 do_check_free_chunk(m, q);
2024static void do_check_malloc_state(mstate m) {
2028 for (i = 0; i < NSMALLBINS; ++i)
2029 do_check_smallbin(m, i);
2030 for (i = 0; i < NTREEBINS; ++i)
2031 do_check_treebin(m, i);
2033 if (m->dvsize != 0) {
2034 do_check_any_chunk(m, m->dv);
2035 assert(m->dvsize == chunksize(m->dv));
2036 assert(m->dvsize >= MIN_CHUNK_SIZE);
2037 assert(bin_find(m, m->dv) == 0);
2041 do_check_top_chunk(m, m->top);
2043 assert(m->topsize > 0);
2044 assert(bin_find(m, m->top) == 0);
2047 total = traverse_and_check(m);
2048 assert(total <= m->footprint);
2049 assert(m->footprint <= m->max_footprint);
2056static struct mallinfo internal_mallinfo(mstate m) {
2057 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2058 ensure_initialization();
2059 if (!PREACTION(m)) {
2060 check_malloc_state(m);
2061 if (is_initialized(m)) {
2062 size_t nfree = SIZE_T_ONE;
2063 size_t mfree = m->topsize + TOP_FOOT_SIZE;
2065 msegmentptr s = &m->seg;
2067 mchunkptr q = align_as_chunk(s->base);
2068 while (segment_holds(s, q) &&
2069 q != m->top && q->head != FENCEPOST_HEAD) {
2070 size_t sz = chunksize(q);
2083 nm.hblkhd = m->footprint - sum;
2084 nm.usmblks = m->max_footprint;
2085 nm.uordblks = m->footprint - mfree;
2086 nm.fordblks = mfree;
2087 nm.keepcost = m->topsize;
2097static void internal_malloc_stats(mstate m) {
2098 ensure_initialization();
2099 if (!PREACTION(m)) {
2103 check_malloc_state(m);
2104 if (is_initialized(m)) {
2105 msegmentptr s = &m->seg;
2106 maxfp = m->max_footprint;
2108 used = fp - (m->topsize + TOP_FOOT_SIZE);
2111 mchunkptr q = align_as_chunk(s->base);
2112 while (segment_holds(s, q) &&
2113 q != m->top && q->head != FENCEPOST_HEAD) {
2115 used -= chunksize(q);
2122 fprintf(stderr,
"max system bytes = %10lu\n", (
unsigned long)(maxfp));
2123 fprintf(stderr,
"system bytes = %10lu\n", (
unsigned long)(fp));
2124 fprintf(stderr,
"in use bytes = %10lu\n", (
unsigned long)(used));
2139#define insert_small_chunk(M, P, S) {\
2140 bindex_t I = small_index(S);\
2141 mchunkptr B = smallbin_at(M, I);\
2143 assert(S >= MIN_CHUNK_SIZE);\
2144 if (!smallmap_is_marked(M, I))\
2145 mark_smallmap(M, I);\
2146 else if (RTCHECK(ok_address(M, B->fd)))\
2149 CORRUPTION_ERROR_ACTION(M);\
2158#define unlink_small_chunk(M, P, S) {\
2159 mchunkptr F = P->fd;\
2160 mchunkptr B = P->bk;\
2161 bindex_t I = small_index(S);\
2164 assert(chunksize(P) == small_index2size(I));\
2165 if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \
2167 clear_smallmap(M, I);\
2169 else if (RTCHECK(B == smallbin_at(M,I) ||\
2170 (ok_address(M, B) && B->fd == P))) {\
2175 CORRUPTION_ERROR_ACTION(M);\
2179 CORRUPTION_ERROR_ACTION(M);\
2184#define unlink_first_small_chunk(M, B, P, I) {\
2185 mchunkptr F = P->fd;\
2188 assert(chunksize(P) == small_index2size(I));\
2190 clear_smallmap(M, I);\
2192 else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\
2197 CORRUPTION_ERROR_ACTION(M);\
2203#define replace_dv(M, P, S) {\
2204 size_t DVS = M->dvsize;\
2205 assert(is_small(DVS));\
2207 mchunkptr DV = M->dv;\
2208 insert_small_chunk(M, DV, DVS);\
2217#define insert_large_chunk(M, X, S) {\
2220 compute_tree_index(S, I);\
2221 H = treebin_at(M, I);\
2223 X->child[0] = X->child[1] = 0;\
2224 if (!treemap_is_marked(M, I)) {\
2225 mark_treemap(M, I);\
2227 X->parent = (tchunkptr)H;\
2232 size_t K = S << leftshift_for_tree_index(I);\
2234 if (chunksize(T) != S) {\
2235 tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
2239 else if (RTCHECK(ok_address(M, C))) {\
2246 CORRUPTION_ERROR_ACTION(M);\
2251 tchunkptr F = T->fd;\
2252 if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
2260 CORRUPTION_ERROR_ACTION(M);\
2285#define unlink_large_chunk(M, X) {\
2286 tchunkptr XP = X->parent;\
2289 tchunkptr F = X->fd;\
2291 if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\
2296 CORRUPTION_ERROR_ACTION(M);\
2301 if (((R = *(RP = &(X->child[1]))) != 0) ||\
2302 ((R = *(RP = &(X->child[0]))) != 0)) {\
2304 while ((*(CP = &(R->child[1])) != 0) ||\
2305 (*(CP = &(R->child[0])) != 0)) {\
2308 if (RTCHECK(ok_address(M, RP)))\
2311 CORRUPTION_ERROR_ACTION(M);\
2316 tbinptr* H = treebin_at(M, X->index);\
2318 if ((*H = R) == 0) \
2319 clear_treemap(M, X->index);\
2321 else if (RTCHECK(ok_address(M, XP))) {\
2322 if (XP->child[0] == X) \
2328 CORRUPTION_ERROR_ACTION(M);\
2330 if (RTCHECK(ok_address(M, R))) {\
2333 if ((C0 = X->child[0]) != 0) {\
2334 if (RTCHECK(ok_address(M, C0))) {\
2339 CORRUPTION_ERROR_ACTION(M);\
2341 if ((C1 = X->child[1]) != 0) {\
2342 if (RTCHECK(ok_address(M, C1))) {\
2347 CORRUPTION_ERROR_ACTION(M);\
2351 CORRUPTION_ERROR_ACTION(M);\
2358#define insert_chunk(M, P, S)\
2359 if (is_small(S)) insert_small_chunk(M, P, S)\
2360 else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
2362#define unlink_chunk(M, P, S)\
2363 if (is_small(S)) unlink_small_chunk(M, P, S)\
2364 else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
2370#define internal_malloc(m, b) mspace_malloc(m, b)
2371#define internal_free(m, mem) mspace_free(m,mem);
2374#define internal_malloc(m, b)\
2375 ((m == gm)? dlmalloc(b) : mspace_malloc(m, b))
2376#define internal_free(m, mem)\
2377 if (m == gm) dlfree(mem); else mspace_free(m,mem);
2379#define internal_malloc(m, b) dlmalloc(b)
2380#define internal_free(m, mem) dlfree(mem)
2395static void* mmap_alloc(mstate m,
size_t nb) {
2396 size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2397 if (m->footprint_limit != 0) {
2398 size_t fp = m->footprint + mmsize;
2399 if (fp <= m->footprint || fp > m->footprint_limit)
2403 char* mm = (
char*)(CALL_DIRECT_MMAP(mmsize));
2405 size_t offset = align_offset(chunk2mem(mm));
2406 size_t psize = mmsize - offset - MMAP_FOOT_PAD;
2407 mchunkptr p = (mchunkptr)(mm + offset);
2408 p->prev_foot = offset;
2410 mark_inuse_foot(m, p, psize);
2411 chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
2412 chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
2414 if (m->least_addr == 0 || mm < m->least_addr)
2416 if ((m->footprint += mmsize) > m->max_footprint)
2417 m->max_footprint = m->footprint;
2418 assert(is_aligned(chunk2mem(p)));
2419 check_mmapped_chunk(m, p);
2420 return chunk2mem(p);
2427static mchunkptr mmap_resize(mstate m, mchunkptr oldp,
size_t nb,
int flags) {
2428 size_t oldsize = chunksize(oldp);
2433 if (oldsize >= nb + SIZE_T_SIZE &&
2434 (oldsize - nb) <= (mparams.granularity << 1))
2437 size_t offset = oldp->prev_foot;
2438 size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
2439 size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2440 char* cp = (
char*)CALL_MREMAP((
char*)oldp - offset,
2441 oldmmsize, newmmsize, flags);
2443 mchunkptr newp = (mchunkptr)(cp + offset);
2444 size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
2446 mark_inuse_foot(m, newp, psize);
2447 chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
2448 chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
2450 if (cp < m->least_addr)
2452 if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
2453 m->max_footprint = m->footprint;
2454 check_mmapped_chunk(m, newp);
2465static void init_top(mstate m, mchunkptr p,
size_t psize) {
2467 size_t offset = align_offset(chunk2mem(p));
2468 p = (mchunkptr)((
char*)p + offset);
2473 p->head = psize | PINUSE_BIT;
2475 chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
2476 m->trim_check = mparams.trim_threshold;
2480static void init_bins(mstate m) {
2483 for (i = 0; i < NSMALLBINS; ++i) {
2484 sbinptr bin = smallbin_at(m,i);
2485 bin->fd = bin->bk = bin;
2492static void reset_on_error(mstate m) {
2494 ++malloc_corruption_error_count;
2496 m->smallmap = m->treemap = 0;
2497 m->dvsize = m->topsize = 0;
2502 for (i = 0; i < NTREEBINS; ++i)
2503 *treebin_at(m, i) = 0;
2509static void* prepend_alloc(mstate m,
char* newbase,
char* oldbase,
2511 mchunkptr p = align_as_chunk(newbase);
2512 mchunkptr oldfirst = align_as_chunk(oldbase);
2513 size_t psize = (
char*)oldfirst - (
char*)p;
2514 mchunkptr q = chunk_plus_offset(p, nb);
2515 size_t qsize = psize -
nb;
2516 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
2518 assert((
char*)oldfirst > (
char*)q);
2519 assert(pinuse(oldfirst));
2520 assert(qsize >= MIN_CHUNK_SIZE);
2523 if (oldfirst == m->top) {
2524 size_t tsize = m->topsize += qsize;
2526 q->head = tsize | PINUSE_BIT;
2527 check_top_chunk(m, q);
2529 else if (oldfirst == m->dv) {
2530 size_t dsize = m->dvsize += qsize;
2532 set_size_and_pinuse_of_free_chunk(q, dsize);
2535 if (!is_inuse(oldfirst)) {
2536 size_t nsize = chunksize(oldfirst);
2537 unlink_chunk(m, oldfirst, nsize);
2538 oldfirst = chunk_plus_offset(oldfirst, nsize);
2541 set_free_with_pinuse(q, qsize, oldfirst);
2542 insert_chunk(m, q, qsize);
2543 check_free_chunk(m, q);
2546 check_malloced_chunk(m, chunk2mem(p), nb);
2547 return chunk2mem(p);
2551static void add_segment(mstate m,
char* tbase,
size_t tsize, flag_t mmapped) {
2553 char* old_top = (
char*)m->top;
2554 msegmentptr oldsp = segment_holding(m, old_top);
2555 char* old_end = oldsp->base + oldsp->size;
2556 size_t ssize = pad_request(
sizeof(
struct malloc_segment));
2557 char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2558 size_t offset = align_offset(chunk2mem(rawsp));
2559 char* asp = rawsp + offset;
2560 char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
2561 mchunkptr sp = (mchunkptr)csp;
2562 msegmentptr ss = (msegmentptr)(chunk2mem(sp));
2563 mchunkptr tnext = chunk_plus_offset(sp, ssize);
2564 mchunkptr p = tnext;
2568 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
2571 assert(is_aligned(ss));
2572 set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
2574 m->seg.base = tbase;
2575 m->seg.size = tsize;
2576 m->seg.sflags = mmapped;
2581 mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
2582 p->head = FENCEPOST_HEAD;
2584 if ((
char*)(&(nextp->head)) < old_end)
2589 assert(nfences >= 2);
2592 if (csp != old_top) {
2593 mchunkptr q = (mchunkptr)old_top;
2594 size_t psize = csp - old_top;
2595 mchunkptr tn = chunk_plus_offset(q, psize);
2596 set_free_with_pinuse(q, psize, tn);
2597 insert_chunk(m, q, psize);
2600 check_top_chunk(m, m->top);
2606static void* sys_alloc(mstate m,
size_t nb) {
2607 char* tbase = CMFAIL;
2609 flag_t mmap_flag = 0;
2612 ensure_initialization();
2615 if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) {
2616 void* mem = mmap_alloc(m, nb);
2621 asize = granularity_align(nb + SYS_ALLOC_PADDING);
2624 if (m->footprint_limit != 0) {
2625 size_t fp = m->footprint + asize;
2626 if (fp <= m->footprint || fp > m->footprint_limit)
2652 if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
2654 size_t ssize = asize;
2655 msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
2656 ACQUIRE_MALLOC_GLOBAL_LOCK();
2659 char* base = (
char*)CALL_MORECORE(0);
2660 if (base != CMFAIL) {
2663 if (!is_page_aligned(base))
2664 ssize += (page_align((
size_t)base) - (size_t)base);
2665 fp = m->footprint + ssize;
2666 if (ssize > nb && ssize < HALF_MAX_SIZE_T &&
2667 (m->footprint_limit == 0 ||
2668 (fp > m->footprint && fp <= m->footprint_limit)) &&
2669 (br = (
char*)(CALL_MORECORE(ssize))) == base) {
2677 ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING);
2679 if (ssize < HALF_MAX_SIZE_T &&
2680 (br = (
char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) {
2686 if (tbase == CMFAIL) {
2688 if (ssize < HALF_MAX_SIZE_T &&
2689 ssize < nb + SYS_ALLOC_PADDING) {
2690 size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize);
2691 if (esize < HALF_MAX_SIZE_T) {
2692 char* end = (
char*)CALL_MORECORE(esize);
2696 (void) CALL_MORECORE(-ssize);
2707 disable_contiguous(m);
2710 RELEASE_MALLOC_GLOBAL_LOCK();
2713 if (HAVE_MMAP && tbase == CMFAIL) {
2714 char* mp = (
char*)(CALL_MMAP(asize));
2718 mmap_flag = USE_MMAP_BIT;
2722 if (HAVE_MORECORE && tbase == CMFAIL) {
2723 if (asize < HALF_MAX_SIZE_T) {
2726 ACQUIRE_MALLOC_GLOBAL_LOCK();
2727 br = (
char*)(CALL_MORECORE(asize));
2728 end = (
char*)(CALL_MORECORE(0));
2729 RELEASE_MALLOC_GLOBAL_LOCK();
2730 if (br != CMFAIL && end != CMFAIL && br < end) {
2731 size_t ssize = end - br;
2732 if (ssize > nb + TOP_FOOT_SIZE) {
2740 if (tbase != CMFAIL) {
2742 if ((m->footprint += tsize) > m->max_footprint)
2743 m->max_footprint = m->footprint;
2745 if (!is_initialized(m)) {
2746 if (m->least_addr == 0 || tbase < m->least_addr)
2747 m->least_addr = tbase;
2748 m->seg.base = tbase;
2749 m->seg.size = tsize;
2750 m->seg.sflags = mmap_flag;
2751 m->magic = mparams.magic;
2752 m->release_checks = MAX_RELEASE_CHECK_RATE;
2756 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
2761 mchunkptr mn = next_chunk(mem2chunk(m));
2762 init_top(m, mn, (
size_t)((tbase + tsize) - (
char*)mn) -TOP_FOOT_SIZE);
2768 msegmentptr sp = &m->seg;
2770 while (sp != 0 && tbase != sp->base + sp->size)
2771 sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
2773 !is_extern_segment(sp) &&
2774 (sp->sflags & USE_MMAP_BIT) == mmap_flag &&
2775 segment_holds(sp, m->top)) {
2777 init_top(m, m->top, m->topsize + tsize);
2780 if (tbase < m->least_addr)
2781 m->least_addr = tbase;
2783 while (sp != 0 && sp->base != tbase + tsize)
2784 sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
2786 !is_extern_segment(sp) &&
2787 (sp->sflags & USE_MMAP_BIT) == mmap_flag) {
2788 char* oldbase = sp->base;
2791 return prepend_alloc(m, tbase, oldbase, nb);
2794 add_segment(m, tbase, tsize, mmap_flag);
2798 if (nb < m->topsize) {
2799 size_t rsize = m->topsize -=
nb;
2800 mchunkptr p = m->top;
2801 mchunkptr r = m->top = chunk_plus_offset(p, nb);
2802 r->head = rsize | PINUSE_BIT;
2803 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
2804 check_top_chunk(m, m->top);
2805 check_malloced_chunk(m, chunk2mem(p), nb);
2806 return chunk2mem(p);
2810 MALLOC_FAILURE_ACTION;
2817static size_t release_unused_segments(mstate m) {
2818 size_t released = 0;
2820 msegmentptr pred = &m->seg;
2821 msegmentptr sp = pred->next;
2823 char* base = sp->base;
2824 size_t size = sp->size;
2825 msegmentptr next = sp->next;
2827 if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
2828 mchunkptr p = align_as_chunk(base);
2829 size_t psize = chunksize(p);
2831 if (!is_inuse(p) && (
char*)p + psize >= base + size - TOP_FOOT_SIZE) {
2832 tchunkptr tp = (tchunkptr)p;
2833 assert(segment_holds(sp, (
char*)sp));
2839 unlink_large_chunk(m, tp);
2841 if (CALL_MUNMAP(base, size) == 0) {
2843 m->footprint -= size;
2849 insert_large_chunk(m, tp, psize);
2853 if (NO_SEGMENT_TRAVERSAL)
2859 m->release_checks = (((size_t) nsegs > (size_t) MAX_RELEASE_CHECK_RATE)?
2860 (size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE);
2864static int sys_trim(mstate m,
size_t pad) {
2865 size_t released = 0;
2866 ensure_initialization();
2867 if (pad < MAX_REQUEST && is_initialized(m)) {
2868 pad += TOP_FOOT_SIZE;
2870 if (m->topsize > pad) {
2872 size_t unit = mparams.granularity;
2873 size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
2875 msegmentptr sp = segment_holding(m, (
char*)m->top);
2877 if (!is_extern_segment(sp)) {
2878 if (is_mmapped_segment(sp)) {
2880 sp->size >= extra &&
2881 !has_segment_link(m, sp)) {
2882 size_t newsize = sp->size - extra;
2885 if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
2886 (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
2891 else if (HAVE_MORECORE) {
2892 if (extra >= HALF_MAX_SIZE_T)
2893 extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
2894 ACQUIRE_MALLOC_GLOBAL_LOCK();
2897 char* old_br = (
char*)(CALL_MORECORE(0));
2898 if (old_br == sp->base + sp->size) {
2899 char* rel_br = (
char*)(CALL_MORECORE(-extra));
2900 char* new_br = (
char*)(CALL_MORECORE(0));
2901 if (rel_br != CMFAIL && new_br < old_br)
2902 released = old_br - new_br;
2905 RELEASE_MALLOC_GLOBAL_LOCK();
2909 if (released != 0) {
2910 sp->size -= released;
2911 m->footprint -= released;
2912 init_top(m, m->top, m->topsize - released);
2913 check_top_chunk(m, m->top);
2919 released += release_unused_segments(m);
2922 if (released == 0 && m->topsize > m->trim_check)
2923 m->trim_check = MAX_SIZE_T;
2926 return (released != 0)? 1 : 0;
2932static void dispose_chunk(mstate m, mchunkptr p,
size_t psize) {
2933 mchunkptr next = chunk_plus_offset(p, psize);
2936 size_t prevsize = p->prev_foot;
2937 if (is_mmapped(p)) {
2938 psize += prevsize + MMAP_FOOT_PAD;
2939 if (CALL_MUNMAP((
char*)p - prevsize, psize) == 0)
2940 m->footprint -= psize;
2943 prev = chunk_minus_offset(p, prevsize);
2946 if (RTCHECK(ok_address(m, prev))) {
2948 unlink_chunk(m, p, prevsize);
2950 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
2952 set_free_with_pinuse(p, psize, next);
2957 CORRUPTION_ERROR_ACTION(m);
2961 if (RTCHECK(ok_address(m, next))) {
2962 if (!cinuse(next)) {
2963 if (next == m->top) {
2964 size_t tsize = m->topsize += psize;
2966 p->head = tsize | PINUSE_BIT;
2973 else if (next == m->dv) {
2974 size_t dsize = m->dvsize += psize;
2976 set_size_and_pinuse_of_free_chunk(p, dsize);
2980 size_t nsize = chunksize(next);
2982 unlink_chunk(m, next, nsize);
2983 set_size_and_pinuse_of_free_chunk(p, psize);
2991 set_free_with_pinuse(p, psize, next);
2993 insert_chunk(m, p, psize);
2996 CORRUPTION_ERROR_ACTION(m);
3003static void* tmalloc_large(mstate m,
size_t nb) {
3008 compute_tree_index(nb, idx);
3009 if ((t = *treebin_at(m, idx)) != 0) {
3011 size_t sizebits =
nb << leftshift_for_tree_index(idx);
3015 size_t trem = chunksize(t) -
nb;
3018 if ((rsize = trem) == 0)
3022 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
3023 if (rt != 0 && rt != t)
3032 if (t == 0 && v == 0) {
3033 binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
3034 if (leftbits != 0) {
3036 binmap_t leastbit = least_bit(leftbits);
3037 compute_bit2idx(leastbit, i);
3038 t = *treebin_at(m, i);
3043 size_t trem = chunksize(t) -
nb;
3048 t = leftmost_child(t);
3052 if (v != 0 && rsize < (
size_t)(m->dvsize - nb)) {
3053 if (RTCHECK(ok_address(m, v))) {
3054 mchunkptr r = chunk_plus_offset(v, nb);
3055 assert(chunksize(v) == rsize + nb);
3056 if (RTCHECK(ok_next(v, r))) {
3057 unlink_large_chunk(m, v);
3058 if (rsize < MIN_CHUNK_SIZE)
3059 set_inuse_and_pinuse(m, v, (rsize + nb));
3061 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
3062 set_size_and_pinuse_of_free_chunk(r, rsize);
3063 insert_chunk(m, r, rsize);
3065 return chunk2mem(v);
3068 CORRUPTION_ERROR_ACTION(m);
3074static void* tmalloc_small(mstate m,
size_t nb) {
3078 binmap_t leastbit = least_bit(m->treemap);
3079 compute_bit2idx(leastbit, i);
3080 v = t = *treebin_at(m, i);
3081 rsize = chunksize(t) -
nb;
3083 while ((t = leftmost_child(t)) != 0) {
3084 size_t trem = chunksize(t) -
nb;
3091 if (RTCHECK(ok_address(m, v))) {
3092 mchunkptr r = chunk_plus_offset(v, nb);
3093 assert(chunksize(v) == rsize + nb);
3094 if (RTCHECK(ok_next(v, r))) {
3095 unlink_large_chunk(m, v);
3096 if (rsize < MIN_CHUNK_SIZE)
3097 set_inuse_and_pinuse(m, v, (rsize + nb));
3099 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
3100 set_size_and_pinuse_of_free_chunk(r, rsize);
3101 replace_dv(m, r, rsize);
3103 return chunk2mem(v);
3107 CORRUPTION_ERROR_ACTION(m);
3113void* dlmalloc(
size_t bytes) {
3138 ensure_initialization();
3141 if (!PREACTION(gm)) {
3144 if (bytes <= MAX_SMALL_REQUEST) {
3147 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
3148 idx = small_index(nb);
3149 smallbits = gm->smallmap >> idx;
3151 if ((smallbits & 0x3U) != 0) {
3153 idx += ~smallbits & 1;
3154 b = smallbin_at(gm, idx);
3156 assert(chunksize(p) == small_index2size(idx));
3157 unlink_first_small_chunk(gm, b, p, idx);
3158 set_inuse_and_pinuse(gm, p, small_index2size(idx));
3160 check_malloced_chunk(gm, mem, nb);
3164 else if (nb > gm->dvsize) {
3165 if (smallbits != 0) {
3169 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
3170 binmap_t leastbit = least_bit(leftbits);
3171 compute_bit2idx(leastbit, i);
3172 b = smallbin_at(gm, i);
3174 assert(chunksize(p) == small_index2size(i));
3175 unlink_first_small_chunk(gm, b, p, i);
3176 rsize = small_index2size(i) -
nb;
3178 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
3179 set_inuse_and_pinuse(gm, p, small_index2size(i));
3181 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
3182 r = chunk_plus_offset(p, nb);
3183 set_size_and_pinuse_of_free_chunk(r, rsize);
3184 replace_dv(gm, r, rsize);
3187 check_malloced_chunk(gm, mem, nb);
3191 else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
3192 check_malloced_chunk(gm, mem, nb);
3197 else if (bytes >= MAX_REQUEST)
3200 nb = pad_request(bytes);
3201 if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
3202 check_malloced_chunk(gm, mem, nb);
3207 if (nb <= gm->dvsize) {
3208 size_t rsize = gm->dvsize -
nb;
3209 mchunkptr p = gm->dv;
3210 if (rsize >= MIN_CHUNK_SIZE) {
3211 mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
3213 set_size_and_pinuse_of_free_chunk(r, rsize);
3214 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
3217 size_t dvs = gm->dvsize;
3220 set_inuse_and_pinuse(gm, p, dvs);
3223 check_malloced_chunk(gm, mem, nb);
3227 else if (nb < gm->topsize) {
3228 size_t rsize = gm->topsize -=
nb;
3229 mchunkptr p = gm->top;
3230 mchunkptr r = gm->top = chunk_plus_offset(p, nb);
3231 r->head = rsize | PINUSE_BIT;
3232 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
3234 check_top_chunk(gm, gm->top);
3235 check_malloced_chunk(gm, mem, nb);
3239 mem = sys_alloc(gm, nb);
3251void dlfree(
void* mem) {
3259 mchunkptr p = mem2chunk(mem);
3261 mstate fm = get_mstate_for(p);
3262 if (!ok_magic(fm)) {
3263 USAGE_ERROR_ACTION(fm, p);
3269 if (!PREACTION(fm)) {
3270 check_inuse_chunk(fm, p);
3271 if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
3272 size_t psize = chunksize(p);
3273 mchunkptr next = chunk_plus_offset(p, psize);
3275 size_t prevsize = p->prev_foot;
3276 if (is_mmapped(p)) {
3277 psize += prevsize + MMAP_FOOT_PAD;
3278 if (CALL_MUNMAP((
char*)p - prevsize, psize) == 0)
3279 fm->footprint -= psize;
3283 mchunkptr prev = chunk_minus_offset(p, prevsize);
3286 if (RTCHECK(ok_address(fm, prev))) {
3288 unlink_chunk(fm, p, prevsize);
3290 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
3292 set_free_with_pinuse(p, psize, next);
3301 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
3302 if (!cinuse(next)) {
3303 if (next == fm->top) {
3304 size_t tsize = fm->topsize += psize;
3306 p->head = tsize | PINUSE_BIT;
3311 if (should_trim(fm, tsize))
3315 else if (next == fm->dv) {
3316 size_t dsize = fm->dvsize += psize;
3318 set_size_and_pinuse_of_free_chunk(p, dsize);
3322 size_t nsize = chunksize(next);
3324 unlink_chunk(fm, next, nsize);
3325 set_size_and_pinuse_of_free_chunk(p, psize);
3333 set_free_with_pinuse(p, psize, next);
3335 if (is_small(psize)) {
3336 insert_small_chunk(fm, p, psize);
3337 check_free_chunk(fm, p);
3340 tchunkptr tp = (tchunkptr)p;
3341 insert_large_chunk(fm, tp, psize);
3342 check_free_chunk(fm, p);
3343 if (--fm->release_checks == 0)
3344 release_unused_segments(fm);
3350 USAGE_ERROR_ACTION(fm, p);
3360void* dlcalloc(
size_t n_elements,
size_t elem_size) {
3363 if (n_elements != 0) {
3364 req = n_elements * elem_size;
3365 if (((n_elements | elem_size) & ~(
size_t)0xffff) &&
3366 (req / n_elements != elem_size))
3369 mem = dlmalloc(req);
3370 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
3371 memset(mem, 0, req);
3380static mchunkptr try_realloc_chunk(mstate m, mchunkptr p,
size_t nb,
3383 size_t oldsize = chunksize(p);
3384 mchunkptr next = chunk_plus_offset(p, oldsize);
3385 if (RTCHECK(ok_address(m, p) && ok_inuse(p) &&
3386 ok_next(p, next) && ok_pinuse(next))) {
3387 if (is_mmapped(p)) {
3388 newp = mmap_resize(m, p, nb, can_move);
3390 else if (oldsize >= nb) {
3391 size_t rsize = oldsize -
nb;
3392 if (rsize >= MIN_CHUNK_SIZE) {
3393 mchunkptr r = chunk_plus_offset(p, nb);
3394 set_inuse(m, p, nb);
3395 set_inuse(m, r, rsize);
3396 dispose_chunk(m, r, rsize);
3400 else if (next == m->top) {
3401 if (oldsize + m->topsize > nb) {
3402 size_t newsize = oldsize + m->topsize;
3403 size_t newtopsize = newsize -
nb;
3404 mchunkptr newtop = chunk_plus_offset(p, nb);
3405 set_inuse(m, p, nb);
3406 newtop->head = newtopsize |PINUSE_BIT;
3408 m->topsize = newtopsize;
3412 else if (next == m->dv) {
3413 size_t dvs = m->dvsize;
3414 if (oldsize + dvs >= nb) {
3415 size_t dsize = oldsize + dvs -
nb;
3416 if (dsize >= MIN_CHUNK_SIZE) {
3417 mchunkptr r = chunk_plus_offset(p, nb);
3418 mchunkptr n = chunk_plus_offset(r, dsize);
3419 set_inuse(m, p, nb);
3420 set_size_and_pinuse_of_free_chunk(r, dsize);
3426 size_t newsize = oldsize + dvs;
3427 set_inuse(m, p, newsize);
3434 else if (!cinuse(next)) {
3435 size_t nextsize = chunksize(next);
3436 if (oldsize + nextsize >= nb) {
3437 size_t rsize = oldsize + nextsize -
nb;
3438 unlink_chunk(m, next, nextsize);
3439 if (rsize < MIN_CHUNK_SIZE) {
3440 size_t newsize = oldsize + nextsize;
3441 set_inuse(m, p, newsize);
3444 mchunkptr r = chunk_plus_offset(p, nb);
3445 set_inuse(m, p, nb);
3446 set_inuse(m, r, rsize);
3447 dispose_chunk(m, r, rsize);
3454 USAGE_ERROR_ACTION(m, chunk2mem(p));
3459static void* internal_memalign(mstate m,
size_t alignment,
size_t bytes) {
3461 if (alignment < MIN_CHUNK_SIZE)
3462 alignment = MIN_CHUNK_SIZE;
3463 if ((alignment & (alignment-SIZE_T_ONE)) != 0) {
3464 size_t a = MALLOC_ALIGNMENT << 1;
3465 while (a < alignment) a <<= 1;
3468 if (bytes >= MAX_REQUEST - alignment) {
3470 MALLOC_FAILURE_ACTION;
3474 size_t nb = request2size(bytes);
3475 size_t req =
nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
3476 mem = internal_malloc(m, req);
3478 mchunkptr p = mem2chunk(mem);
3481 if ((((
size_t)(mem)) & (alignment - 1)) != 0) {
3490 char* br = (
char*)mem2chunk((
size_t)(((
size_t)((
char*)mem + alignment -
3493 char* pos = ((size_t)(br - (
char*)(p)) >= MIN_CHUNK_SIZE)?
3495 mchunkptr newp = (mchunkptr)pos;
3496 size_t leadsize = pos - (
char*)(p);
3497 size_t newsize = chunksize(p) - leadsize;
3499 if (is_mmapped(p)) {
3500 newp->prev_foot = p->prev_foot + leadsize;
3501 newp->head = newsize;
3504 set_inuse(m, newp, newsize);
3505 set_inuse(m, p, leadsize);
3506 dispose_chunk(m, p, leadsize);
3512 if (!is_mmapped(p)) {
3513 size_t size = chunksize(p);
3514 if (size > nb + MIN_CHUNK_SIZE) {
3515 size_t remainder_size = size -
nb;
3516 mchunkptr remainder = chunk_plus_offset(p, nb);
3517 set_inuse(m, p, nb);
3518 set_inuse(m, remainder, remainder_size);
3519 dispose_chunk(m, remainder, remainder_size);
3524 assert (chunksize(p) >= nb);
3525 assert(((
size_t)mem & (alignment - 1)) == 0);
3526 check_inuse_chunk(m, p);
3540static void** ialloc(mstate m,
3546 size_t element_size;
3547 size_t contents_size;
3551 size_t remainder_size;
3553 mchunkptr array_chunk;
3558 ensure_initialization();
3561 if (n_elements == 0)
3568 if (n_elements == 0)
3569 return (
void**)internal_malloc(m, 0);
3571 array_size = request2size(n_elements * (
sizeof(
void*)));
3576 element_size = request2size(*sizes);
3577 contents_size = n_elements * element_size;
3582 for (i = 0; i != n_elements; ++i)
3583 contents_size += request2size(sizes[i]);
3586 size = contents_size + array_size;
3593 was_enabled = use_mmap(m);
3595 mem = internal_malloc(m, size - CHUNK_OVERHEAD);
3601 if (PREACTION(m))
return 0;
3603 remainder_size = chunksize(p);
3605 assert(!is_mmapped(p));
3608 memset((
size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
3613 size_t array_chunk_size;
3614 array_chunk = chunk_plus_offset(p, contents_size);
3615 array_chunk_size = remainder_size - contents_size;
3616 marray = (
void**) (chunk2mem(array_chunk));
3617 set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
3618 remainder_size = contents_size;
3622 for (i = 0; ; ++i) {
3623 marray[i] = chunk2mem(p);
3624 if (i != n_elements-1) {
3625 if (element_size != 0)
3626 size = element_size;
3628 size = request2size(sizes[i]);
3629 remainder_size -= size;
3630 set_size_and_pinuse_of_inuse_chunk(m, p, size);
3631 p = chunk_plus_offset(p, size);
3634 set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
3640 if (marray != chunks) {
3642 if (element_size != 0) {
3643 assert(remainder_size == element_size);
3646 assert(remainder_size == request2size(sizes[i]));
3648 check_inuse_chunk(m, mem2chunk(marray));
3650 for (i = 0; i != n_elements; ++i)
3651 check_inuse_chunk(m, mem2chunk(marray[i]));
3666static size_t internal_bulk_free(mstate m,
void* array[],
size_t nelem) {
3668 if (!PREACTION(m)) {
3670 void** fence = &(array[nelem]);
3671 for (a = array; a != fence; ++a) {
3674 mchunkptr p = mem2chunk(mem);
3675 size_t psize = chunksize(p);
3677 if (get_mstate_for(p) != m) {
3682 check_inuse_chunk(m, p);
3684 if (RTCHECK(ok_address(m, p) && ok_inuse(p))) {
3686 mchunkptr next = next_chunk(p);
3687 if (b != fence && *b == chunk2mem(next)) {
3688 size_t newsize = chunksize(next) + psize;
3689 set_inuse(m, p, newsize);
3693 dispose_chunk(m, p, psize);
3696 CORRUPTION_ERROR_ACTION(m);
3701 if (should_trim(m, m->topsize))
3709#if MALLOC_INSPECT_ALL
3710static void internal_inspect_all(mstate m,
3711 void(*handler)(
void *start,
3714 void* callback_arg),
3716 if (is_initialized(m)) {
3717 mchunkptr top = m->top;
3719 for (s = &m->seg; s != 0; s = s->next) {
3720 mchunkptr q = align_as_chunk(s->base);
3721 while (segment_holds(s, q) && q->head != FENCEPOST_HEAD) {
3722 mchunkptr next = next_chunk(q);
3723 size_t sz = chunksize(q);
3727 used = sz - CHUNK_OVERHEAD;
3728 start = chunk2mem(q);
3733 start = (
void*)((
char*)q +
sizeof(
struct malloc_chunk));
3736 start = (
void*)((
char*)q +
sizeof(
struct malloc_tree_chunk));
3739 if (start < (
void*)next)
3740 handler(start, next, used, arg);
3754void* dlrealloc(
void* oldmem,
size_t bytes) {
3757 mem = dlmalloc(bytes);
3759 else if (bytes >= MAX_REQUEST) {
3760 MALLOC_FAILURE_ACTION;
3762#ifdef REALLOC_ZERO_BYTES_FREES
3763 else if (bytes == 0) {
3768 size_t nb = request2size(bytes);
3769 mchunkptr oldp = mem2chunk(oldmem);
3773 mstate m = get_mstate_for(oldp);
3775 USAGE_ERROR_ACTION(m, oldmem);
3779 if (!PREACTION(m)) {
3780 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
3783 check_inuse_chunk(m, newp);
3784 mem = chunk2mem(newp);
3787 mem = internal_malloc(m, bytes);
3789 size_t oc = chunksize(oldp) - overhead_for(oldp);
3790 memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
3791 internal_free(m, oldmem);
3799void* dlrealloc_in_place(
void* oldmem,
size_t bytes) {
3802 if (bytes >= MAX_REQUEST) {
3803 MALLOC_FAILURE_ACTION;
3806 size_t nb = request2size(bytes);
3807 mchunkptr oldp = mem2chunk(oldmem);
3811 mstate m = get_mstate_for(oldp);
3813 USAGE_ERROR_ACTION(m, oldmem);
3817 if (!PREACTION(m)) {
3818 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
3821 check_inuse_chunk(m, newp);
3830void* dlmemalign(
size_t alignment,
size_t bytes) {
3831 if (alignment <= MALLOC_ALIGNMENT) {
3832 return dlmalloc(bytes);
3834 return internal_memalign(gm, alignment, bytes);
3837int dlposix_memalign(
void** pp,
size_t alignment,
size_t bytes) {
3839 if (alignment == MALLOC_ALIGNMENT)
3840 mem = dlmalloc(bytes);
3842 size_t d = alignment /
sizeof(
void*);
3843 size_t r = alignment %
sizeof(
void*);
3844 if (r != 0 || d == 0 || (d & (d-SIZE_T_ONE)) != 0)
3846 else if (bytes <= MAX_REQUEST - alignment) {
3847 if (alignment < MIN_CHUNK_SIZE)
3848 alignment = MIN_CHUNK_SIZE;
3849 mem = internal_memalign(gm, alignment, bytes);
3860void* dlvalloc(
size_t bytes) {
3862 ensure_initialization();
3863 pagesz = mparams.page_size;
3864 return dlmemalign(pagesz, bytes);
3867void* dlpvalloc(
size_t bytes) {
3869 ensure_initialization();
3870 pagesz = mparams.page_size;
3871 return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
3874void** dlindependent_calloc(
size_t n_elements,
size_t elem_size,
3876 size_t sz = elem_size;
3877 return ialloc(gm, n_elements, &sz, 3, chunks);
3880void** dlindependent_comalloc(
size_t n_elements,
size_t sizes[],
3882 return ialloc(gm, n_elements, sizes, 0, chunks);
3885size_t dlbulk_free(
void* array[],
size_t nelem) {
3886 return internal_bulk_free(gm, array, nelem);
3889#if MALLOC_INSPECT_ALL
3890void dlmalloc_inspect_all(
void(*handler)(
void *start,
3893 void* callback_arg),
3895 ensure_initialization();
3896 if (!PREACTION(gm)) {
3897 internal_inspect_all(gm, handler, arg);
3903int dlmalloc_trim(
size_t pad) {
3905 ensure_initialization();
3906 if (!PREACTION(gm)) {
3907 result = sys_trim(gm, pad);
3913size_t dlmalloc_footprint(
void) {
3914 return gm->footprint;
3917size_t dlmalloc_max_footprint(
void) {
3918 return gm->max_footprint;
3921size_t dlmalloc_footprint_limit(
void) {
3922 size_t maf = gm->footprint_limit;
3923 return maf == 0 ? MAX_SIZE_T : maf;
3926size_t dlmalloc_set_footprint_limit(
size_t bytes) {
3929 result = granularity_align(1);
3930 if (bytes == MAX_SIZE_T)
3933 result = granularity_align(bytes);
3934 return gm->footprint_limit = result;
3938struct mallinfo dlmallinfo(void) {
3939 return internal_mallinfo(gm);
3944void dlmalloc_stats() {
3945 internal_malloc_stats(gm);
3949int dlmallopt(
int param_number,
int value) {
3950 return change_mparam(param_number, value);
3953size_t dlmalloc_usable_size(
void* mem) {
3955 mchunkptr p = mem2chunk(mem);
3957 return chunksize(p) - overhead_for(p);
3968static mstate init_user_mstate(
char* tbase,
size_t tsize) {
3969 size_t msize = pad_request(
sizeof(
struct malloc_state));
3971 mchunkptr msp = align_as_chunk(tbase);
3972 mstate m = (mstate)(chunk2mem(msp));
3973 memset(m, 0, msize);
3974 (void)INITIAL_LOCK(&m->mutex);
3975 msp->head = (msize|INUSE_BITS);
3976 m->seg.base = m->least_addr = tbase;
3977 m->seg.size = m->footprint = m->max_footprint = tsize;
3978 m->magic = mparams.magic;
3979 m->release_checks = MAX_RELEASE_CHECK_RATE;
3980 m->mflags = mparams.default_mflags;
3983 disable_contiguous(m);
3985 mn = next_chunk(mem2chunk(m));
3986 init_top(m, mn, (
size_t)((tbase + tsize) - (
char*)mn) - TOP_FOOT_SIZE);
3987 check_top_chunk(m, m->top);
3991mspace create_mspace(
size_t capacity,
int locked) {
3994 ensure_initialization();
3995 msize = pad_request(
sizeof(
struct malloc_state));
3996 if (capacity < (
size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
3997 size_t rs = ((capacity == 0)? mparams.granularity :
3998 (capacity + TOP_FOOT_SIZE + msize));
3999 size_t tsize = granularity_align(rs);
4000 char* tbase = (
char*)(CALL_MMAP(tsize));
4001 if (tbase != CMFAIL) {
4002 m = init_user_mstate(tbase, tsize);
4003 m->seg.sflags = USE_MMAP_BIT;
4004 set_lock(m, locked);
4010mspace create_mspace_with_base(
void* base,
size_t capacity,
int locked) {
4013 ensure_initialization();
4014 msize = pad_request(
sizeof(
struct malloc_state));
4015 if (capacity > msize + TOP_FOOT_SIZE &&
4016 capacity < (
size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
4017 m = init_user_mstate((
char*)base, capacity);
4018 m->seg.sflags = EXTERN_BIT;
4019 set_lock(m, locked);
4024int mspace_track_large_chunks(mspace msp,
int enable) {
4026 mstate ms = (mstate)msp;
4027 if (!PREACTION(ms)) {
4028 if (!use_mmap(ms)) {
4041size_t destroy_mspace(mspace msp) {
4043 mstate ms = (mstate)msp;
4045 msegmentptr sp = &ms->seg;
4046 (void)DESTROY_LOCK(&ms->mutex);
4048 char* base = sp->base;
4049 size_t size = sp->size;
4050 flag_t flag = sp->sflags;
4053 if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) &&
4054 CALL_MUNMAP(base, size) == 0)
4059 USAGE_ERROR_ACTION(ms,ms);
4069void* mspace_malloc(mspace msp,
size_t bytes) {
4070 mstate ms = (mstate)msp;
4071 if (!ok_magic(ms)) {
4072 USAGE_ERROR_ACTION(ms,ms);
4075 if (!PREACTION(ms)) {
4078 if (bytes <= MAX_SMALL_REQUEST) {
4081 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
4082 idx = small_index(nb);
4083 smallbits = ms->smallmap >> idx;
4085 if ((smallbits & 0x3U) != 0) {
4087 idx += ~smallbits & 1;
4088 b = smallbin_at(ms, idx);
4090 assert(chunksize(p) == small_index2size(idx));
4091 unlink_first_small_chunk(ms, b, p, idx);
4092 set_inuse_and_pinuse(ms, p, small_index2size(idx));
4094 check_malloced_chunk(ms, mem, nb);
4098 else if (nb > ms->dvsize) {
4099 if (smallbits != 0) {
4103 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
4104 binmap_t leastbit = least_bit(leftbits);
4105 compute_bit2idx(leastbit, i);
4106 b = smallbin_at(ms, i);
4108 assert(chunksize(p) == small_index2size(i));
4109 unlink_first_small_chunk(ms, b, p, i);
4110 rsize = small_index2size(i) -
nb;
4112 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
4113 set_inuse_and_pinuse(ms, p, small_index2size(i));
4115 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4116 r = chunk_plus_offset(p, nb);
4117 set_size_and_pinuse_of_free_chunk(r, rsize);
4118 replace_dv(ms, r, rsize);
4121 check_malloced_chunk(ms, mem, nb);
4125 else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
4126 check_malloced_chunk(ms, mem, nb);
4131 else if (bytes >= MAX_REQUEST)
4134 nb = pad_request(bytes);
4135 if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
4136 check_malloced_chunk(ms, mem, nb);
4141 if (nb <= ms->dvsize) {
4142 size_t rsize = ms->dvsize -
nb;
4143 mchunkptr p = ms->dv;
4144 if (rsize >= MIN_CHUNK_SIZE) {
4145 mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
4147 set_size_and_pinuse_of_free_chunk(r, rsize);
4148 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4151 size_t dvs = ms->dvsize;
4154 set_inuse_and_pinuse(ms, p, dvs);
4157 check_malloced_chunk(ms, mem, nb);
4161 else if (nb < ms->topsize) {
4162 size_t rsize = ms->topsize -=
nb;
4163 mchunkptr p = ms->top;
4164 mchunkptr r = ms->top = chunk_plus_offset(p, nb);
4165 r->head = rsize | PINUSE_BIT;
4166 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4168 check_top_chunk(ms, ms->top);
4169 check_malloced_chunk(ms, mem, nb);
4173 mem = sys_alloc(ms, nb);
4183void mspace_free(mspace msp,
void* mem) {
4185 mchunkptr p = mem2chunk(mem);
4187 mstate fm = get_mstate_for(p);
4190 mstate fm = (mstate)msp;
4192 if (!ok_magic(fm)) {
4193 USAGE_ERROR_ACTION(fm, p);
4196 if (!PREACTION(fm)) {
4197 check_inuse_chunk(fm, p);
4198 if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
4199 size_t psize = chunksize(p);
4200 mchunkptr next = chunk_plus_offset(p, psize);
4202 size_t prevsize = p->prev_foot;
4203 if (is_mmapped(p)) {
4204 psize += prevsize + MMAP_FOOT_PAD;
4205 if (CALL_MUNMAP((
char*)p - prevsize, psize) == 0)
4206 fm->footprint -= psize;
4210 mchunkptr prev = chunk_minus_offset(p, prevsize);
4213 if (RTCHECK(ok_address(fm, prev))) {
4215 unlink_chunk(fm, p, prevsize);
4217 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
4219 set_free_with_pinuse(p, psize, next);
4228 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
4229 if (!cinuse(next)) {
4230 if (next == fm->top) {
4231 size_t tsize = fm->topsize += psize;
4233 p->head = tsize | PINUSE_BIT;
4238 if (should_trim(fm, tsize))
4242 else if (next == fm->dv) {
4243 size_t dsize = fm->dvsize += psize;
4245 set_size_and_pinuse_of_free_chunk(p, dsize);
4249 size_t nsize = chunksize(next);
4251 unlink_chunk(fm, next, nsize);
4252 set_size_and_pinuse_of_free_chunk(p, psize);
4260 set_free_with_pinuse(p, psize, next);
4262 if (is_small(psize)) {
4263 insert_small_chunk(fm, p, psize);
4264 check_free_chunk(fm, p);
4267 tchunkptr tp = (tchunkptr)p;
4268 insert_large_chunk(fm, tp, psize);
4269 check_free_chunk(fm, p);
4270 if (--fm->release_checks == 0)
4271 release_unused_segments(fm);
4277 USAGE_ERROR_ACTION(fm, p);
4284void* mspace_calloc(mspace msp,
size_t n_elements,
size_t elem_size) {
4287 mstate ms = (mstate)msp;
4288 if (!ok_magic(ms)) {
4289 USAGE_ERROR_ACTION(ms,ms);
4292 if (n_elements != 0) {
4293 req = n_elements * elem_size;
4294 if (((n_elements | elem_size) & ~(
size_t)0xffff) &&
4295 (req / n_elements != elem_size))
4298 mem = internal_malloc(ms, req);
4299 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
4300 memset(mem, 0, req);
4304void* mspace_realloc(mspace msp,
void* oldmem,
size_t bytes) {
4307 mem = mspace_malloc(msp, bytes);
4309 else if (bytes >= MAX_REQUEST) {
4310 MALLOC_FAILURE_ACTION;
4312#ifdef REALLOC_ZERO_BYTES_FREES
4313 else if (bytes == 0) {
4314 mspace_free(msp, oldmem);
4318 size_t nb = request2size(bytes);
4319 mchunkptr oldp = mem2chunk(oldmem);
4321 mstate m = (mstate)msp;
4323 mstate m = get_mstate_for(oldp);
4325 USAGE_ERROR_ACTION(m, oldmem);
4329 if (!PREACTION(m)) {
4330 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
4333 check_inuse_chunk(m, newp);
4334 mem = chunk2mem(newp);
4337 mem = mspace_malloc(m, bytes);
4339 size_t oc = chunksize(oldp) - overhead_for(oldp);
4340 memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
4341 mspace_free(m, oldmem);
4349void* mspace_realloc_in_place(mspace msp,
void* oldmem,
size_t bytes) {
4352 if (bytes >= MAX_REQUEST) {
4353 MALLOC_FAILURE_ACTION;
4356 size_t nb = request2size(bytes);
4357 mchunkptr oldp = mem2chunk(oldmem);
4359 mstate m = (mstate)msp;
4361 mstate m = get_mstate_for(oldp);
4364 USAGE_ERROR_ACTION(m, oldmem);
4368 if (!PREACTION(m)) {
4369 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
4372 check_inuse_chunk(m, newp);
4381void* mspace_memalign(mspace msp,
size_t alignment,
size_t bytes) {
4382 mstate ms = (mstate)msp;
4383 if (!ok_magic(ms)) {
4384 USAGE_ERROR_ACTION(ms,ms);
4387 if (alignment <= MALLOC_ALIGNMENT)
4388 return mspace_malloc(msp, bytes);
4389 return internal_memalign(ms, alignment, bytes);
4392void** mspace_independent_calloc(mspace msp,
size_t n_elements,
4393 size_t elem_size,
void* chunks[]) {
4394 size_t sz = elem_size;
4395 mstate ms = (mstate)msp;
4396 if (!ok_magic(ms)) {
4397 USAGE_ERROR_ACTION(ms,ms);
4400 return ialloc(ms, n_elements, &sz, 3, chunks);
4403void** mspace_independent_comalloc(mspace msp,
size_t n_elements,
4404 size_t sizes[],
void* chunks[]) {
4405 mstate ms = (mstate)msp;
4406 if (!ok_magic(ms)) {
4407 USAGE_ERROR_ACTION(ms,ms);
4410 return ialloc(ms, n_elements, sizes, 0, chunks);
4413size_t mspace_bulk_free(mspace msp,
void* array[],
size_t nelem) {
4414 return internal_bulk_free((mstate)msp, array, nelem);
4417#if MALLOC_INSPECT_ALL
4418void mspace_inspect_all(mspace msp,
4419 void(*handler)(
void *start,
4422 void* callback_arg),
4424 mstate ms = (mstate)msp;
4426 if (!PREACTION(ms)) {
4427 internal_inspect_all(ms, handler, arg);
4432 USAGE_ERROR_ACTION(ms,ms);
4437int mspace_trim(mspace msp,
size_t pad) {
4439 mstate ms = (mstate)msp;
4441 if (!PREACTION(ms)) {
4442 result = sys_trim(ms, pad);
4447 USAGE_ERROR_ACTION(ms,ms);
4453void mspace_malloc_stats(mspace msp) {
4454 mstate ms = (mstate)msp;
4456 internal_malloc_stats(ms);
4459 USAGE_ERROR_ACTION(ms,ms);
4464size_t mspace_footprint(mspace msp) {
4466 mstate ms = (mstate)msp;
4468 result = ms->footprint;
4471 USAGE_ERROR_ACTION(ms,ms);
4476size_t mspace_max_footprint(mspace msp) {
4478 mstate ms = (mstate)msp;
4480 result = ms->max_footprint;
4483 USAGE_ERROR_ACTION(ms,ms);
4488size_t mspace_footprint_limit(mspace msp) {
4490 mstate ms = (mstate)msp;
4492 size_t maf = ms->footprint_limit;
4493 result = (maf == 0) ? MAX_SIZE_T : maf;
4496 USAGE_ERROR_ACTION(ms,ms);
4501size_t mspace_set_footprint_limit(mspace msp,
size_t bytes) {
4503 mstate ms = (mstate)msp;
4506 result = granularity_align(1);
4507 if (bytes == MAX_SIZE_T)
4510 result = granularity_align(bytes);
4511 ms->footprint_limit = result;
4514 USAGE_ERROR_ACTION(ms,ms);
4520struct mallinfo mspace_mallinfo(mspace msp) {
4521 mstate ms = (mstate)msp;
4522 if (!ok_magic(ms)) {
4523 USAGE_ERROR_ACTION(ms,ms);
4525 return internal_mallinfo(ms);
4529size_t mspace_usable_size(
const void* mem) {
4531 mchunkptr p = mem2chunk(mem);
4533 return chunksize(p) - overhead_for(p);
4538int mspace_mallopt(
int param_number,
int value) {
4539 return change_mparam(param_number, value);
4542void* get_mspace_from_ptr(
void* ptr)
4544 mchunkptr chunk = mem2chunk(ptr);
4545 mstate ms = get_mstate_for(chunk);