OpenBarnyard
 
Loading...
Searching...
No Matches
dlmalloc.cpp
Go to the documentation of this file.
1#include "ToshiPCH.h"
2
3#ifdef TMEMORY_USE_DLMALLOC
4
5#include "dlmalloc.h"
6
7/*------------------------------ internal #includes ---------------------- */
8
9#ifdef _MSC_VER
10#pragma warning( disable : 4146 ) /* no "unsigned" warnings */
11#endif /* _MSC_VER */
12#if !NO_MALLOC_STATS
13#include <stdio.h> /* for printing in malloc_stats */
14#endif /* NO_MALLOC_STATS */
15#ifndef LACKS_ERRNO_H
16#include <errno.h> /* for MALLOC_FAILURE_ACTION */
17#endif /* LACKS_ERRNO_H */
18#ifdef DEBUG
19#if ABORT_ON_ASSERT_FAILURE
20#undef assert
21#define assert(x) if(!(x)) ABORT
22#else /* ABORT_ON_ASSERT_FAILURE */
23#include <assert.h>
24#endif /* ABORT_ON_ASSERT_FAILURE */
25#else /* DEBUG */
26#ifndef assert
27#define assert(x)
28#endif
29#define DEBUG 0
30#endif /* DEBUG */
31#if !defined(WIN32) && !defined(LACKS_TIME_H)
32#include <time.h> /* for magic initialization */
33#endif /* WIN32 */
34#ifndef LACKS_STDLIB_H
35#include <stdlib.h> /* for abort() */
36#endif /* LACKS_STDLIB_H */
37#ifndef LACKS_STRING_H
38#include <string.h> /* for memset etc */
39#endif /* LACKS_STRING_H */
40#if USE_BUILTIN_FFS
41#ifndef LACKS_STRINGS_H
42#include <strings.h> /* for ffs */
43#endif /* LACKS_STRINGS_H */
44#endif /* USE_BUILTIN_FFS */
45#if HAVE_MMAP
46#ifndef LACKS_SYS_MMAN_H
47/* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */
48#if (defined(linux) && !defined(__USE_GNU))
49#define __USE_GNU 1
50#include <sys/mman.h> /* for mmap */
51#undef __USE_GNU
52#else
53#include <sys/mman.h> /* for mmap */
54#endif /* linux */
55#endif /* LACKS_SYS_MMAN_H */
56#ifndef LACKS_FCNTL_H
57#include <fcntl.h>
58#endif /* LACKS_FCNTL_H */
59#endif /* HAVE_MMAP */
60#ifndef LACKS_UNISTD_H
61#include <unistd.h> /* for sbrk, sysconf */
62#else /* LACKS_UNISTD_H */
63#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
64extern void* sbrk(ptrdiff_t);
65#endif /* FreeBSD etc */
66#endif /* LACKS_UNISTD_H */
67
68/* Declarations for locking */
69#if USE_LOCKS
70#ifndef WIN32
71#if defined (__SVR4) && defined (__sun) /* solaris */
72#include <thread.h>
73#elif !defined(LACKS_SCHED_H)
74#include <sched.h>
75#endif /* solaris or LACKS_SCHED_H */
76#if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS
77#include <pthread.h>
78#endif /* USE_RECURSIVE_LOCKS ... */
79#elif defined(_MSC_VER)
80#ifndef _M_AMD64
81/* These are already defined on AMD64 builds */
82#ifdef __cplusplus
83extern "C" {
84#endif /* __cplusplus */
85LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp);
86LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value);
87#ifdef __cplusplus
88}
89#endif /* __cplusplus */
90#endif /* _M_AMD64 */
91#pragma intrinsic (_InterlockedCompareExchange)
92#pragma intrinsic (_InterlockedExchange)
93#define interlockedcompareexchange _InterlockedCompareExchange
94#define interlockedexchange _InterlockedExchange
95#elif defined(WIN32) && defined(__GNUC__)
96#define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b)
97#define interlockedexchange __sync_lock_test_and_set
98#endif /* Win32 */
99#else /* USE_LOCKS */
100#endif /* USE_LOCKS */
101
102#ifndef LOCK_AT_FORK
103#define LOCK_AT_FORK 0
104#endif
105
106/* Declarations for bit scanning on win32 */
107#if defined(_MSC_VER) && _MSC_VER>=1300
108#ifndef BitScanForward /* Try to avoid pulling in WinNT.h */
109#ifdef __cplusplus
110extern "C" {
111#endif /* __cplusplus */
112unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
113unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
114#ifdef __cplusplus
115}
116#endif /* __cplusplus */
117
118#define BitScanForward _BitScanForward
119#define BitScanReverse _BitScanReverse
120#pragma intrinsic(_BitScanForward)
121#pragma intrinsic(_BitScanReverse)
122#endif /* BitScanForward */
123#endif /* defined(_MSC_VER) && _MSC_VER>=1300 */
124
125#ifndef WIN32
126#ifndef malloc_getpagesize
127# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
128# ifndef _SC_PAGE_SIZE
129# define _SC_PAGE_SIZE _SC_PAGESIZE
130# endif
131# endif
132# ifdef _SC_PAGE_SIZE
133# define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
134# else
135# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
136 extern size_t getpagesize();
137# define malloc_getpagesize getpagesize()
138# else
139# ifdef WIN32 /* use supplied emulation of getpagesize */
140# define malloc_getpagesize getpagesize()
141# else
142# ifndef LACKS_SYS_PARAM_H
143# include <sys/param.h>
144# endif
145# ifdef EXEC_PAGESIZE
146# define malloc_getpagesize EXEC_PAGESIZE
147# else
148# ifdef NBPG
149# ifndef CLSIZE
150# define malloc_getpagesize NBPG
151# else
152# define malloc_getpagesize (NBPG * CLSIZE)
153# endif
154# else
155# ifdef NBPC
156# define malloc_getpagesize NBPC
157# else
158# ifdef PAGESIZE
159# define malloc_getpagesize PAGESIZE
160# else /* just guess */
161# define malloc_getpagesize ((size_t)4096U)
162# endif
163# endif
164# endif
165# endif
166# endif
167# endif
168# endif
169#endif
170#endif
171
172/* ------------------- size_t and alignment properties -------------------- */
173
174/* The byte and bit size of a size_t */
175#define SIZE_T_SIZE (sizeof(size_t))
176#define SIZE_T_BITSIZE (sizeof(size_t) << 3)
177
178/* Some constants coerced to size_t */
179/* Annoying but necessary to avoid errors on some platforms */
180#define SIZE_T_ZERO ((size_t)0)
181#define SIZE_T_ONE ((size_t)1)
182#define SIZE_T_TWO ((size_t)2)
183#define SIZE_T_FOUR ((size_t)4)
184#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
185#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
186#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
187#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
188
189/* The bit mask value corresponding to MALLOC_ALIGNMENT */
190#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
191
192/* True if address a has acceptable alignment */
193#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
194
195/* the number of bytes to offset an address to align it */
196#define align_offset(A)\
197 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
198 ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
199
200/* -------------------------- MMAP preliminaries ------------------------- */
201
202/*
203 If HAVE_MORECORE or HAVE_MMAP are TFALSE, we just define calls and
204 checks to fail so compiler optimizer can delete code rather than
205 using so many "#if"s.
206*/
207
208
209/* MORECORE and MMAP must return MFAIL on failure */
210#define MFAIL ((void*)(MAX_SIZE_T))
211#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */
212
213#if HAVE_MMAP
214
215#ifndef WIN32
216#define MUNMAP_DEFAULT(a, s) munmap((a), (s))
217#define MMAP_PROT (PROT_READ|PROT_WRITE)
218#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
219#define MAP_ANONYMOUS MAP_ANON
220#endif /* MAP_ANON */
221#ifdef MAP_ANONYMOUS
222#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
223#define MMAP_DEFAULT(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
224#else /* MAP_ANONYMOUS */
225/*
226 Nearly all versions of mmap support MAP_ANONYMOUS, so the following
227 is unlikely to be needed, but is supplied just in case.
228*/
229#define MMAP_FLAGS (MAP_PRIVATE)
230static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
231#define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \
232 (dev_zero_fd = open("/dev/zero", O_RDWR), \
233 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
234 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
235#endif /* MAP_ANONYMOUS */
236
237#define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s)
238
239#else /* WIN32 */
240
241/* Win32 MMAP via VirtualAlloc */
242static FORCEINLINE void* win32mmap(size_t size) {
243 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
244 return (ptr != 0)? ptr: MFAIL;
245}
246
247/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
248static FORCEINLINE void* win32direct_mmap(size_t size) {
249 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
250 PAGE_READWRITE);
251 return (ptr != 0)? ptr: MFAIL;
252}
253
254/* This function supports releasing coalesed segments */
255static FORCEINLINE int win32munmap(void* ptr, size_t size) {
256 MEMORY_BASIC_INFORMATION minfo;
257 char* cptr = (char*)ptr;
258 while (size) {
259 if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
260 return -1;
261 if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
262 minfo.State != MEM_COMMIT || minfo.RegionSize > size)
263 return -1;
264 if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
265 return -1;
266 cptr += minfo.RegionSize;
267 size -= minfo.RegionSize;
268 }
269 return 0;
270}
271
272#define MMAP_DEFAULT(s) win32mmap(s)
273#define MUNMAP_DEFAULT(a, s) win32munmap((a), (s))
274#define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s)
275#endif /* WIN32 */
276#endif /* HAVE_MMAP */
277
278#if HAVE_MREMAP
279#ifndef WIN32
280#define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
281#endif /* WIN32 */
282#endif /* HAVE_MREMAP */
283
287#if HAVE_MORECORE
288 #ifdef MORECORE
289 #define CALL_MORECORE(S) MORECORE(S)
290 #else /* MORECORE */
291 #define CALL_MORECORE(S) MORECORE_DEFAULT(S)
292 #endif /* MORECORE */
293#else /* HAVE_MORECORE */
294 #define CALL_MORECORE(S) MFAIL
295#endif /* HAVE_MORECORE */
296
300#if HAVE_MMAP
301 #define USE_MMAP_BIT (SIZE_T_ONE)
302
303 #ifdef MMAP
304 #define CALL_MMAP(s) MMAP(s)
305 #else /* MMAP */
306 #define CALL_MMAP(s) MMAP_DEFAULT(s)
307 #endif /* MMAP */
308 #ifdef MUNMAP
309 #define CALL_MUNMAP(a, s) MUNMAP((a), (s))
310 #else /* MUNMAP */
311 #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s))
312 #endif /* MUNMAP */
313 #ifdef DIRECT_MMAP
314 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
315 #else /* DIRECT_MMAP */
316 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s)
317 #endif /* DIRECT_MMAP */
318#else /* HAVE_MMAP */
319 #define USE_MMAP_BIT (SIZE_T_ZERO)
320
321 #define MMAP(s) MFAIL
322 #define MUNMAP(a, s) (-1)
323 #define DIRECT_MMAP(s) MFAIL
324 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
325 #define CALL_MMAP(s) MMAP(s)
326 #define CALL_MUNMAP(a, s) MUNMAP((a), (s))
327#endif /* HAVE_MMAP */
328
332#if HAVE_MMAP && HAVE_MREMAP
333 #ifdef MREMAP
334 #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv))
335 #else /* MREMAP */
336 #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv))
337 #endif /* MREMAP */
338#else /* HAVE_MMAP && HAVE_MREMAP */
339 #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
340#endif /* HAVE_MMAP && HAVE_MREMAP */
341
342/* mstate bit set if continguous morecore disabled or failed */
343#define USE_NONCONTIGUOUS_BIT (4U)
344
345/* segment bit set in create_mspace_with_base */
346#define EXTERN_BIT (8U)
347
348
349/* --------------------------- Lock preliminaries ------------------------ */
350
351/*
352 When locks are defined, there is one global lock, plus
353 one per-mspace lock.
354
355 The global lock_ensures that mparams.magic and other unique
356 mparams values are initialized only once. It also protects
357 sequences of calls to MORECORE. In many cases sys_alloc requires
358 two calls, that should not be interleaved with calls by other
359 threads. This does not protect against direct calls to MORECORE
360 by other threads not using this lock, so there is still code to
361 cope the best we can on interference.
362
363 Per-mspace locks surround calls to malloc, free, etc.
364 By default, locks are simple non-reentrant mutexes.
365
366 Because lock-protected regions generally have bounded times, it is
367 OK to use the supplied simple spinlocks. Spinlocks are likely to
368 improve performance for lightly contended applications, but worsen
369 performance under heavy contention.
370
371 If USE_LOCKS is > 1, the definitions of lock routines here are
372 bypassed, in which case you will need to define the type MLOCK_T,
373 and at least INITIAL_LOCK, DESTROY_LOCK, ACQUIRE_LOCK, RELEASE_LOCK
374 and TRY_LOCK. You must also declare a
375 static MLOCK_T malloc_global_mutex = { initialization values };.
376
377*/
378
379#if !USE_LOCKS
380#define USE_LOCK_BIT (0U)
381#define INITIAL_LOCK(l) (0)
382#define DESTROY_LOCK(l) (0)
383#define ACQUIRE_MALLOC_GLOBAL_LOCK()
384#define RELEASE_MALLOC_GLOBAL_LOCK()
385
386#else
387#if USE_LOCKS > 1
388/* ----------------------- User-defined locks ------------------------ */
389/* Define your own lock implementation here */
390/* #define INITIAL_LOCK(lk) ... */
391/* #define DESTROY_LOCK(lk) ... */
392/* #define ACQUIRE_LOCK(lk) ... */
393/* #define RELEASE_LOCK(lk) ... */
394/* #define TRY_LOCK(lk) ... */
395/* static MLOCK_T malloc_global_mutex = ... */
396
397#elif USE_SPIN_LOCKS
398
399/* First, define CAS_LOCK and CLEAR_LOCK on ints */
400/* Note CAS_LOCK defined to return 0 on success */
401
402#if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
403#define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1)
404#define CLEAR_LOCK(sl) __sync_lock_release(sl)
405
406#elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))
407/* Custom spin locks for older gcc on x86 */
408static FORCEINLINE int x86_cas_lock(int *sl) {
409 int ret;
410 int val = 1;
411 int cmp = 0;
412 __asm__ __volatile__ ("lock; cmpxchgl %1, %2"
413 : "=a" (ret)
414 : "r" (val), "m" (*(sl)), "0"(cmp)
415 : "memory", "cc");
416 return ret;
417}
418
419static FORCEINLINE void x86_clear_lock(int* sl) {
420 assert(*sl != 0);
421 int prev = 0;
422 int ret;
423 __asm__ __volatile__ ("lock; xchgl %0, %1"
424 : "=r" (ret)
425 : "m" (*(sl)), "0"(prev)
426 : "memory");
427}
428
429#define CAS_LOCK(sl) x86_cas_lock(sl)
430#define CLEAR_LOCK(sl) x86_clear_lock(sl)
431
432#else /* Win32 MSC */
433#define CAS_LOCK(sl) interlockedexchange(sl, (LONG)1)
434#define CLEAR_LOCK(sl) interlockedexchange (sl, (LONG)0)
435
436#endif /* ... gcc spins locks ... */
437
438/* How to yield for a spin lock */
439#define SPINS_PER_YIELD 63
440#if defined(_MSC_VER)
441#define SLEEP_EX_DURATION 50 /* delay for yield/sleep */
442#define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE)
443#elif defined (__SVR4) && defined (__sun) /* solaris */
444#define SPIN_LOCK_YIELD thr_yield();
445#elif !defined(LACKS_SCHED_H)
446#define SPIN_LOCK_YIELD sched_yield();
447#else
448#define SPIN_LOCK_YIELD
449#endif /* ... yield ... */
450
451#if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0
452/* Plain spin locks use single word (embedded in malloc_states) */
453static int spin_acquire_lock(int *sl) {
454 int spins = 0;
455 while (*(volatile int *)sl != 0 || CAS_LOCK(sl)) {
456 if ((++spins & SPINS_PER_YIELD) == 0) {
457 SPIN_LOCK_YIELD;
458 }
459 }
460 return 0;
461}
462
463#define MLOCK_T int
464#define TRY_LOCK(sl) !CAS_LOCK(sl)
465#define RELEASE_LOCK(sl) CLEAR_LOCK(sl)
466#define ACQUIRE_LOCK(sl) (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0)
467#define INITIAL_LOCK(sl) (*sl = 0)
468#define DESTROY_LOCK(sl) (0)
469static MLOCK_T malloc_global_mutex = 0;
470
471#else /* USE_RECURSIVE_LOCKS */
472/* types for lock owners */
473#ifdef WIN32
474#define THREAD_ID_T DWORD
475#define CURRENT_THREAD GetCurrentThreadId()
476#define EQ_OWNER(X,Y) ((X) == (Y))
477#else
478/*
479 Note: the following assume that pthread_t is a type that can be
480 initialized to (casted) zero. If this is not the case, you will need to
481 somehow redefine these or not use spin locks.
482*/
483#define THREAD_ID_T pthread_t
484#define CURRENT_THREAD pthread_self()
485#define EQ_OWNER(X,Y) pthread_equal(X, Y)
486#endif
487
488struct malloc_recursive_lock {
489 int sl;
490 unsigned int c;
491 THREAD_ID_T threadid;
492};
493
494#define MLOCK_T struct malloc_recursive_lock
495static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0};
496
497static FORCEINLINE void recursive_release_lock(MLOCK_T *lk) {
498 assert(lk->sl != 0);
499 if (--lk->c == 0) {
500 CLEAR_LOCK(&lk->sl);
501 }
502}
503
504static FORCEINLINE int recursive_acquire_lock(MLOCK_T *lk) {
505 THREAD_ID_T mythreadid = CURRENT_THREAD;
506 int spins = 0;
507 for (;;) {
508 if (*((volatile int *)(&lk->sl)) == 0) {
509 if (!CAS_LOCK(&lk->sl)) {
510 lk->threadid = mythreadid;
511 lk->c = 1;
512 return 0;
513 }
514 }
515 else if (EQ_OWNER(lk->threadid, mythreadid)) {
516 ++lk->c;
517 return 0;
518 }
519 if ((++spins & SPINS_PER_YIELD) == 0) {
520 SPIN_LOCK_YIELD;
521 }
522 }
523}
524
525static FORCEINLINE int recursive_try_lock(MLOCK_T *lk) {
526 THREAD_ID_T mythreadid = CURRENT_THREAD;
527 if (*((volatile int *)(&lk->sl)) == 0) {
528 if (!CAS_LOCK(&lk->sl)) {
529 lk->threadid = mythreadid;
530 lk->c = 1;
531 return 1;
532 }
533 }
534 else if (EQ_OWNER(lk->threadid, mythreadid)) {
535 ++lk->c;
536 return 1;
537 }
538 return 0;
539}
540
541#define RELEASE_LOCK(lk) recursive_release_lock(lk)
542#define TRY_LOCK(lk) recursive_try_lock(lk)
543#define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk)
544#define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0)
545#define DESTROY_LOCK(lk) (0)
546#endif /* USE_RECURSIVE_LOCKS */
547
548#elif defined(WIN32) /* Win32 critical sections */
549#define MLOCK_T CRITICAL_SECTION
550#define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0)
551#define RELEASE_LOCK(lk) LeaveCriticalSection(lk)
552#define TRY_LOCK(lk) TryEnterCriticalSection(lk)
553#define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000))
554#define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0)
555#define NEED_GLOBAL_LOCK_INIT
556
557static MLOCK_T malloc_global_mutex;
558static volatile LONG malloc_global_mutex_status;
559
560/* Use spin loop to initialize global lock */
561static void init_malloc_global_mutex() {
562 for (;;) {
563 long stat = malloc_global_mutex_status;
564 if (stat > 0)
565 return;
566 /* transition to < 0 while initializing, then to > 0) */
567 if (stat == 0 &&
568 interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) {
569 InitializeCriticalSection(&malloc_global_mutex);
570 interlockedexchange(&malloc_global_mutex_status, (LONG)1);
571 return;
572 }
573 SleepEx(0, FALSE);
574 }
575}
576
577#else /* pthreads-based locks */
578#define MLOCK_T pthread_mutex_t
579#define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk)
580#define RELEASE_LOCK(lk) pthread_mutex_unlock(lk)
581#define TRY_LOCK(lk) (!pthread_mutex_trylock(lk))
582#define INITIAL_LOCK(lk) pthread_init_lock(lk)
583#define DESTROY_LOCK(lk) pthread_mutex_destroy(lk)
584
585#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE)
586/* Cope with old-style linux recursive lock initialization by adding */
587/* skipped internal declaration from pthread.h */
588extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr,
589 int __kind));
590#define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP
591#define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y)
592#endif /* USE_RECURSIVE_LOCKS ... */
593
594static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER;
595
596static int pthread_init_lock (MLOCK_T *lk) {
597 pthread_mutexattr_t attr;
598 if (pthread_mutexattr_init(&attr)) return 1;
599#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0
600 if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1;
601#endif
602 if (pthread_mutex_init(lk, &attr)) return 1;
603 if (pthread_mutexattr_destroy(&attr)) return 1;
604 return 0;
605}
606
607#endif /* ... lock types ... */
608
609/* Common code for all lock types */
610#define USE_LOCK_BIT (2U)
611
612#ifndef ACQUIRE_MALLOC_GLOBAL_LOCK
613#define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex);
614#endif
615
616#ifndef RELEASE_MALLOC_GLOBAL_LOCK
617#define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex);
618#endif
619
620#endif /* USE_LOCKS */
621
622/* ----------------------- Chunk representations ------------------------ */
623
624/*
625 (The following includes lightly edited explanations by Colin Plumb.)
626
627 The malloc_chunk declaration below is misleading (but accurate and
628 necessary). It declares a "view" into memory allowing access to
629 necessary fields at known offsets from a given base.
630
631 Chunks of memory are maintained using a `boundary tag' method as
632 originally described by Knuth. (See the paper by Paul Wilson
633 ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
634 techniques.) Sizes of free chunks are stored both in the front of
635 each chunk and at the end. This makes consolidating fragmented
636 chunks into bigger chunks fast. The head fields also hold bits
637 representing whether chunks are free or in use.
638
639 Here are some pictures to make it clearer. They are "exploded" to
640 show that the state of a chunk can be thought of as extending from
641 the high 31 bits of the head field of its header through the
642 prev_foot and PINUSE_BIT bit of the following chunk header.
643
644 A chunk that's in use looks like:
645
646 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
647 | Size of previous chunk (if P = 0) |
648 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
649 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
650 | Size of this chunk 1| +-+
651 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
652 | |
653 +- -+
654 | |
655 +- -+
656 | :
657 +- size - sizeof(size_t) available payload bytes -+
658 : |
659 chunk-> +- -+
660 | |
661 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
662 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
663 | Size of next chunk (may or may not be in use) | +-+
664 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
665
666 And if it's free, it looks like this:
667
668 chunk-> +- -+
669 | User payload (must be in use, or we would have merged!) |
670 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
671 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
672 | Size of this chunk 0| +-+
673 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
674 | Next pointer |
675 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
676 | Prev pointer |
677 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
678 | :
679 +- size - sizeof(struct chunk) unused bytes -+
680 : |
681 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
682 | Size of this chunk |
683 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
684 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
685 | Size of next chunk (must be in use, or we would have merged)| +-+
686 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
687 | :
688 +- User payload -+
689 : |
690 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
691 |0|
692 +-+
693 Note that since we always merge adjacent free chunks, the chunks
694 adjacent to a free chunk must be in use.
695
696 Given a pointer to a chunk (which can be derived trivially from the
697 payload pointer) we can, in O(1) time, find out whether the adjacent
698 chunks are free, and if so, unlink them from the lists that they
699 are on and merge them with the current chunk.
700
701 Chunks always begin on even word boundaries, so the mem portion
702 (which is returned to the user) is also on an even word boundary, and
703 thus at least double-word aligned.
704
705 The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
706 chunk size (which is always a multiple of two words), is an in-use
707 bit for the *previous* chunk. If that bit is *clear*, then the
708 word before the current chunk size contains the previous chunk
709 size, and can be used to find the front of the previous chunk.
710 The very first chunk allocated always has this bit set, preventing
711 access to non-existent (or non-owned) memory. If pinuse is set for
712 any given chunk, then you CANNOT determine the size of the
713 previous chunk, and might even get a memory addressing fault when
714 trying to do so.
715
716 The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
717 the chunk size redundantly records whether the current chunk is
718 inuse (unless the chunk is mmapped). This redundancy enables usage
719 checks within free and realloc, and reduces indirection when freeing
720 and consolidating chunks.
721
722 Each freshly allocated chunk must have both cinuse and pinuse set.
723 That is, each allocated chunk borders either a previously allocated
724 and still in-use chunk, or the base of its memory arena. This is
725 ensured by making all allocations from the `lowest' part of any
726 found chunk. Further, no free chunk physically borders another one,
727 so each free chunk is known to be preceded and followed by either
728 inuse chunks or the ends of memory.
729
730 Note that the `foot' of the current chunk is actually represented
731 as the prev_foot of the NEXT chunk. This makes it easier to
732 deal with alignments etc but can be very confusing when trying
733 to extend or adapt this code.
734
735 The exceptions to all this are
736
737 1. The special chunk `top' is the top-most available chunk (i.e.,
738 the one bordering the end of available memory). It is treated
739 specially. Top is never included in any bin, is used only if
740 no other chunk is available, and is released back to the
741 system if it is very large (see M_TRIM_THRESHOLD). In effect,
742 the top chunk is treated as larger (and thus less well
743 fitting) than any other available chunk. The top chunk
744 doesn't update its trailing size field since there is no next
745 contiguous chunk that would have to index off it. However,
746 space is still allocated for it (TOP_FOOT_SIZE) to enable
747 separation or merging when space is extended.
748
749 3. Chunks allocated via mmap, have both cinuse and pinuse bits
750 cleared in their head fields. Because they are allocated
751 one-by-one, each must carry its own prev_foot field, which is
752 also used to hold the offset this chunk has within its mmapped
753 region, which is needed to preserve alignment. Each mmapped
754 chunk is trailed by the first two fields of a fake next-chunk
755 for sake of usage checks.
756
757*/
758
759struct malloc_chunk {
760 size_t prev_foot; /* Size of previous chunk (if free). */
761 size_t head; /* Size and inuse bits. */
762 struct malloc_chunk* fd; /* double links -- used only if free. */
763 struct malloc_chunk* bk;
764};
765
766typedef struct malloc_chunk mchunk;
767typedef struct malloc_chunk* mchunkptr;
768typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */
769typedef unsigned int bindex_t; /* Described below */
770typedef unsigned int binmap_t; /* Described below */
771typedef unsigned int flag_t; /* The type of various bit flag sets */
772
773/* ------------------- Chunks sizes and alignments ----------------------- */
774
775#define MCHUNK_SIZE (sizeof(mchunk))
776
777#if FOOTERS
778#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
779#else /* FOOTERS */
780#define CHUNK_OVERHEAD (SIZE_T_SIZE)
781#endif /* FOOTERS */
782
783/* MMapped chunks need a second word of overhead ... */
784#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
785/* ... and additional padding for fake next-chunk at foot */
786#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
787
788/* The smallest size we can malloc is an aligned minimal chunk */
789#define MIN_CHUNK_SIZE\
790 ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
791
792/* conversion from malloc headers to user pointers, and back */
793#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
794#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
795/* chunk associated with aligned address A */
796#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
797
798/* Bounds on request (not chunk) sizes. */
799#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
800#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
801
802/* pad request bytes into a usable size */
803#define pad_request(req) \
804 (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
805
806/* pad request, checking for minimum (but not maximum) */
807#define request2size(req) \
808 (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
809
810
811/* ------------------ Operations on head and foot fields ----------------- */
812
813/*
814 The head field of a chunk is or'ed with PINUSE_BIT when previous
815 adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
816 use, unless mmapped, in which case both bits are cleared.
817
818 FLAG4_BIT is not used by this malloc, but might be useful in extensions.
819*/
820
821#define PINUSE_BIT (SIZE_T_ONE)
822#define CINUSE_BIT (SIZE_T_TWO)
823#define FLAG4_BIT (SIZE_T_FOUR)
824#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
825#define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)
826
827/* Head value for fenceposts */
828#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
829
830/* extraction of fields from head words */
831#define cinuse(p) ((p)->head & CINUSE_BIT)
832#define pinuse(p) ((p)->head & PINUSE_BIT)
833#define flag4inuse(p) ((p)->head & FLAG4_BIT)
834#define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT)
835#define is_mmapped(p) (((p)->head & INUSE_BITS) == 0)
836
837#define chunksize(p) ((p)->head & ~(FLAG_BITS))
838
839#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
840#define set_flag4(p) ((p)->head |= FLAG4_BIT)
841#define clear_flag4(p) ((p)->head &= ~FLAG4_BIT)
842
843/* Treat space at ptr +/- offset as a chunk */
844#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
845#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
846
847/* Ptr to next or previous physical malloc_chunk. */
848#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))
849#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
850
851/* extract next chunk's pinuse bit */
852#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
853
854/* Get/set size at footer */
855#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
856#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
857
858/* Set size, pinuse bit, and foot */
859#define set_size_and_pinuse_of_free_chunk(p, s)\
860 ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
861
862/* Set size, pinuse bit, foot, and clear next pinuse */
863#define set_free_with_pinuse(p, s, n)\
864 (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
865
866/* Get the internal overhead associated with chunk p */
867#define overhead_for(p)\
868 (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
869
870/* Return TTRUE if malloced space is not necessarily cleared */
871#if MMAP_CLEARS
872#define calloc_must_clear(p) (!is_mmapped(p))
873#else /* MMAP_CLEARS */
874#define calloc_must_clear(p) (1)
875#endif /* MMAP_CLEARS */
876
877/* ---------------------- Overlaid data structures ----------------------- */
878
879/*
880 When chunks are not in use, they are treated as nodes of either
881 lists or trees.
882
883 "Small" chunks are stored in circular doubly-linked lists, and look
884 like this:
885
886 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
887 | Size of previous chunk |
888 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
889 `head:' | Size of chunk, in bytes |P|
890 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
891 | Forward pointer to next chunk in list |
892 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
893 | Back pointer to previous chunk in list |
894 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
895 | Unused space (may be 0 bytes long) .
896 . .
897 . |
898nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
899 `foot:' | Size of chunk, in bytes |
900 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
901
902 Larger chunks are kept in a form of bitwise digital trees (aka
903 tries) keyed on chunksizes. Because malloc_tree_chunks are only for
904 free chunks greater than 256 bytes, their size doesn't impose any
905 constraints on user chunk sizes. Each node looks like:
906
907 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
908 | Size of previous chunk |
909 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
910 `head:' | Size of chunk, in bytes |P|
911 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
912 | Forward pointer to next chunk of same size |
913 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
914 | Back pointer to previous chunk of same size |
915 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
916 | Pointer to left child (child[0]) |
917 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
918 | Pointer to right child (child[1]) |
919 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
920 | Pointer to parent |
921 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
922 | bin index of this chunk |
923 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
924 | Unused space .
925 . |
926nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
927 `foot:' | Size of chunk, in bytes |
928 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
929
930 Each tree holding treenodes is a tree of unique chunk sizes. Chunks
931 of the same size are arranged in a circularly-linked list, with only
932 the oldest chunk (the next to be used, in our FIFO ordering)
933 actually in the tree. (Tree members are distinguished by a non-null
934 parent pointer.) If a chunk with the same size an an existing node
935 is inserted, it is linked off the existing node using pointers that
936 work in the same way as fd/bk pointers of small chunks.
937
938 Each tree contains a power of 2 sized range of chunk sizes (the
939 smallest is 0x100 <= x < 0x180), which is is divided in half at each
940 tree level, with the chunks in the smaller half of the range (0x100
941 <= x < 0x140 for the top nose) in the left subtree and the larger
942 half (0x140 <= x < 0x180) in the right subtree. This is, of course,
943 done by inspecting individual bits.
944
945 Using these rules, each node's left subtree contains all smaller
946 sizes than its right subtree. However, the node at the root of each
947 subtree has no particular ordering relationship to either. (The
948 dividing line between the subtree sizes is based on trie relation.)
949 If we remove the last chunk of a given size from the interior of the
950 tree, we need to replace it with a leaf node. The tree ordering
951 rules permit a node to be replaced by any leaf below it.
952
953 The smallest chunk in a tree (a common operation in a best-fit
954 allocator) can be found by walking a path to the leftmost leaf in
955 the tree. Unlike a usual binary tree, where we follow left child
956 pointers until we reach a null, here we follow the right child
957 pointer any time the left one is null, until we reach a leaf with
958 both child pointers null. The smallest chunk in the tree will be
959 somewhere along that path.
960
961 The worst case number of steps to add, find, or remove a node is
962 bounded by the number of bits differentiating chunks within
963 bins. Under current bin calculations, this ranges from 6 up to 21
964 (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
965 is of course much better.
966*/
967
968struct malloc_tree_chunk {
969 /* The first four fields must be compatible with malloc_chunk */
970 size_t prev_foot;
971 size_t head;
972 struct malloc_tree_chunk* fd;
973 struct malloc_tree_chunk* bk;
974
975 struct malloc_tree_chunk* child[2];
976 struct malloc_tree_chunk* parent;
977 bindex_t index;
978};
979
980typedef struct malloc_tree_chunk tchunk;
981typedef struct malloc_tree_chunk* tchunkptr;
982typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
983
984/* A little helper macro for trees */
985#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
986
987/* ----------------------------- Segments -------------------------------- */
988
989/*
990 Each malloc space may include non-contiguous segments, held in a
991 list headed by an embedded malloc_segment record representing the
992 top-most space. Segments also include flags holding properties of
993 the space. Large chunks that are directly allocated by mmap are not
994 included in this list. They are instead independently created and
995 destroyed without otherwise keeping track of them.
996
997 Segment management mainly comes into play for spaces allocated by
998 MMAP. Any call to MMAP might or might not return memory that is
999 adjacent to an existing segment. MORECORE normally contiguously
1000 extends the current space, so this space is almost always adjacent,
1001 which is simpler and faster to deal with. (This is why MORECORE is
1002 used preferentially to MMAP when both are available -- see
1003 sys_alloc.) When allocating using MMAP, we don't use any of the
1004 hinting mechanisms (inconsistently) supported in various
1005 implementations of unix mmap, or distinguish reserving from
1006 committing memory. Instead, we just ask for space, and exploit
1007 contiguity when we get it. It is probably possible to do
1008 better than this on some systems, but no general scheme seems
1009 to be significantly better.
1010
1011 Management entails a simpler variant of the consolidation scheme
1012 used for chunks to reduce fragmentation -- new adjacent memory is
1013 normally prepended or appended to an existing segment. However,
1014 there are limitations compared to chunk consolidation that mostly
1015 reflect the fact that segment processing is relatively infrequent
1016 (occurring only when getting memory from system) and that we
1017 don't expect to have huge numbers of segments:
1018
1019 * Segments are not indexed, so traversal requires linear scans. (It
1020 would be possible to index these, but is not worth the extra
1021 overhead and complexity for most programs on most platforms.)
1022 * New segments are only appended to old ones when holding top-most
1023 memory; if they cannot be prepended to others, they are held in
1024 different segments.
1025
1026 Except for the top-most segment of an mstate, each segment record
1027 is kept at the tail of its segment. Segments are added by pushing
1028 segment records onto the list headed by &mstate.seg for the
1029 containing mstate.
1030
1031 Segment flags control allocation/merge/deallocation policies:
1032 * If EXTERN_BIT set, then we did not allocate this segment,
1033 and so should not try to deallocate or merge with others.
1034 (This currently holds only for the initial segment passed
1035 into create_mspace_with_base.)
1036 * If USE_MMAP_BIT set, the segment may be merged with
1037 other surrounding mmapped segments and trimmed/de-allocated
1038 using munmap.
1039 * If neither bit is set, then the segment was obtained using
1040 MORECORE so can be merged with surrounding MORECORE'd segments
1041 and deallocated/trimmed using MORECORE with negative arguments.
1042*/
1043
1044struct malloc_segment {
1045 char* base; /* base address */
1046 size_t size; /* allocated size */
1047 struct malloc_segment* next; /* ptr to next segment */
1048 flag_t sflags; /* mmap and extern flag */
1049};
1050
1051#define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT)
1052#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
1053
1054typedef struct malloc_segment msegment;
1055typedef struct malloc_segment* msegmentptr;
1056
1057/* ---------------------------- malloc_state ----------------------------- */
1058
1059/*
1060 A malloc_state holds all of the bookkeeping for a space.
1061 The main fields are:
1062
1063 Top
1064 The topmost chunk of the currently active segment. Its size is
1065 cached in topsize. The actual size of topmost space is
1066 topsize+TOP_FOOT_SIZE, which includes space reserved for adding
1067 fenceposts and segment records if necessary when getting more
1068 space from the system. The size at which to autotrim top is
1069 cached from mparams in trim_check, except that it is disabled if
1070 an autotrim fails.
1071
1072 Designated victim (dv)
1073 This is the preferred chunk for servicing small requests that
1074 don't have exact fits. It is normally the chunk split off most
1075 recently to service another small request. Its size is cached in
1076 dvsize. The link fields of this chunk are not maintained since it
1077 is not kept in a bin.
1078
1079 SmallBins
1080 An array of bin headers for free chunks. These bins hold chunks
1081 with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
1082 chunks of all the same size, spaced 8 bytes apart. To simplify
1083 use in double-linked lists, each bin header acts as a malloc_chunk
1084 pointing to the real first node, if it exists (else pointing to
1085 itself). This avoids special-casing for headers. But to avoid
1086 waste, we allocate only the fd/bk pointers of bins, and then use
1087 repositioning tricks to treat these as the fields of a chunk.
1088
1089 TreeBins
1090 Treebins are pointers to the roots of trees holding a range of
1091 sizes. There are 2 equally spaced treebins for each power of two
1092 from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
1093 larger.
1094
1095 Bin maps
1096 There is one bit map for small bins ("smallmap") and one for
1097 treebins ("treemap). Each bin sets its bit when non-empty, and
1098 clears the bit when empty. Bit operations are then used to avoid
1099 bin-by-bin searching -- nearly all "search" is done without ever
1100 looking at bins that won't be selected. The bit maps
1101 conservatively use 32 bits per map word, even if on 64bit system.
1102 For a good description of some of the bit-based techniques used
1103 here, see Henry S. Warren Jr's book "Hacker's Delight" (and
1104 supplement at http://hackersdelight.org/). Many of these are
1105 intended to reduce the branchiness of paths through malloc etc, as
1106 well as to reduce the number of memory locations read or written.
1107
1108 Segments
1109 A list of segments headed by an embedded malloc_segment record
1110 representing the initial space.
1111
1112 Address check support
1113 The least_addr field is the least address ever obtained from
1114 MORECORE or MMAP. Attempted frees and reallocs of any address less
1115 than this are trapped (unless INSECURE is defined).
1116
1117 Magic tag
1118 A cross-check field that should always hold same value as mparams.magic.
1119
1120 Max allowed footprint
1121 The maximum allowed bytes to allocate from system (zero means no limit)
1122
1123 Flags
1124 Bits recording whether to use MMAP, locks, or contiguous MORECORE
1125
1126 Statistics
1127 Each space keeps track of current and maximum system memory
1128 obtained via MORECORE or MMAP.
1129
1130 Trim support
1131 Fields holding the amount of unused topmost memory that should trigger
1132 trimming, and a counter to force periodic scanning to release unused
1133 non-topmost segments.
1134
1135 Locking
1136 If USE_LOCKS is defined, the "mutex" lock is acquired and released
1137 around every public call using this mspace.
1138
1139 Extension support
1140 A void* pointer and a size_t field that can be used to help implement
1141 extensions to this malloc.
1142*/
1143
1144/* Bin types, widths and sizes */
1145#define NSMALLBINS (32U)
1146#define NTREEBINS (32U)
1147#define SMALLBIN_SHIFT (3U)
1148#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
1149#define TREEBIN_SHIFT (8U)
1150#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
1151#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
1152#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
1153
1154struct malloc_state {
1155 binmap_t smallmap;
1156 binmap_t treemap;
1157 size_t dvsize;
1158 size_t topsize;
1159 char* least_addr;
1160 mchunkptr dv;
1161 mchunkptr top;
1162 size_t trim_check;
1163 size_t release_checks;
1164 size_t magic;
1165 mchunkptr smallbins[(NSMALLBINS+1)*2];
1166 tbinptr treebins[NTREEBINS];
1167 size_t footprint;
1168 size_t max_footprint;
1169 size_t footprint_limit; /* zero means no limit */
1170 flag_t mflags;
1171#if USE_LOCKS
1172 MLOCK_T mutex; /* locate lock among fields that rarely change */
1173#endif /* USE_LOCKS */
1174 msegment seg;
1175 void* extp; /* Unused but available for extensions */
1176 size_t exts;
1177};
1178
1179typedef struct malloc_state* mstate;
1180
1181/* ------------- Global malloc_state and malloc_params ------------------- */
1182
1183/*
1184 malloc_params holds global properties, including those that can be
1185 dynamically set using mallopt. There is a single instance, mparams,
1186 initialized in init_mparams. Note that the non-zeroness of "magic"
1187 also serves as an initialization flag.
1188*/
1189
1190struct malloc_params {
1191 size_t magic;
1192 size_t page_size;
1193 size_t granularity;
1194 size_t mmap_threshold;
1195 size_t trim_threshold;
1196 flag_t default_mflags;
1197};
1198
1199static struct malloc_params mparams;
1200
1201/* Ensure mparams initialized */
1202#define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())
1203
1204#if !ONLY_MSPACES
1205
1206/* The global malloc_state used for all non-"mspace" calls */
1207static struct malloc_state _gm_;
1208#define gm (&_gm_)
1209#define is_global(M) ((M) == &_gm_)
1210
1211#endif /* !ONLY_MSPACES */
1212
1213#define is_initialized(M) ((M)->top != 0)
1214
1215/* -------------------------- system alloc setup ------------------------- */
1216
1217/* Operations on mflags */
1218
1219#define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
1220#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
1221#if USE_LOCKS
1222#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
1223#else
1224#define disable_lock(M)
1225#endif
1226
1227#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
1228#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
1229#if HAVE_MMAP
1230#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
1231#else
1232#define disable_mmap(M)
1233#endif
1234
1235#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
1236#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
1237
1238#define set_lock(M,L)\
1239 ((M)->mflags = (L)?\
1240 ((M)->mflags | USE_LOCK_BIT) :\
1241 ((M)->mflags & ~USE_LOCK_BIT))
1242
1243/* page-align a size */
1244#define page_align(S)\
1245 (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
1246
1247/* granularity-align a size */
1248#define granularity_align(S)\
1249 (((S) + (mparams.granularity - SIZE_T_ONE))\
1250 & ~(mparams.granularity - SIZE_T_ONE))
1251
1252
1253/* For mmap, use granularity alignment on windows, else page-align */
1254#ifdef WIN32
1255#define mmap_align(S) granularity_align(S)
1256#else
1257#define mmap_align(S) page_align(S)
1258#endif
1259
1260/* For sys_alloc, enough padding to ensure can malloc request on success */
1261#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
1262
1263#define is_page_aligned(S)\
1264 (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
1265#define is_granularity_aligned(S)\
1266 (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
1267
1268/* True if segment S holds address A */
1269#define segment_holds(S, A)\
1270 ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
1271
1272/* Return segment holding given address */
1273static msegmentptr segment_holding(mstate m, char* addr) {
1274 msegmentptr sp = &m->seg;
1275 for (;;) {
1276 if (addr >= sp->base && addr < sp->base + sp->size)
1277 return sp;
1278 if ((sp = sp->next) == 0)
1279 return 0;
1280 }
1281}
1282
1283/* Return TTRUE if segment contains a segment link */
1284static int has_segment_link(mstate m, msegmentptr ss) {
1285 msegmentptr sp = &m->seg;
1286 for (;;) {
1287 if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
1288 return 1;
1289 if ((sp = sp->next) == 0)
1290 return 0;
1291 }
1292}
1293
1294#ifndef MORECORE_CANNOT_TRIM
1295#define should_trim(M,s) ((s) > (M)->trim_check)
1296#else /* MORECORE_CANNOT_TRIM */
1297#define should_trim(M,s) (0)
1298#endif /* MORECORE_CANNOT_TRIM */
1299
1300/*
1301 TOP_FOOT_SIZE is padding at the end of a segment, including space
1302 that may be needed to place segment records and fenceposts when new
1303 noncontiguous segments are added.
1304*/
1305#define TOP_FOOT_SIZE\
1306 (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
1307
1308
1309/* ------------------------------- Hooks -------------------------------- */
1310
1311/*
1312 PREACTION should be defined to return 0 on success, and nonzero on
1313 failure. If you are not using locking, you can redefine these to do
1314 anything you like.
1315*/
1316
1317#if USE_LOCKS
1318#define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
1319#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
1320#else /* USE_LOCKS */
1321
1322#ifndef PREACTION
1323#define PREACTION(M) (0)
1324#endif /* PREACTION */
1325
1326#ifndef POSTACTION
1327#define POSTACTION(M)
1328#endif /* POSTACTION */
1329
1330#endif /* USE_LOCKS */
1331
1332/*
1333 CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
1334 USAGE_ERROR_ACTION is triggered on detected bad frees and
1335 reallocs. The argument p is an address that might have triggered the
1336 fault. It is ignored by the two predefined actions, but might be
1337 useful in custom actions that try to help diagnose errors.
1338*/
1339
1340#if PROCEED_ON_ERROR
1341
1342/* A count of the number of corruption errors causing resets */
1343int malloc_corruption_error_count;
1344
1345/* default corruption action */
1346static void reset_on_error(mstate m);
1347
1348#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
1349#define USAGE_ERROR_ACTION(m, p)
1350
1351#else /* PROCEED_ON_ERROR */
1352
1353#ifndef TOSHI_DEBUG
1354
1355#ifndef CORRUPTION_ERROR_ACTION
1356#define CORRUPTION_ERROR_ACTION(m) ABORT
1357#endif /* CORRUPTION_ERROR_ACTION */
1358
1359#ifndef USAGE_ERROR_ACTION
1360#define USAGE_ERROR_ACTION(m,p) ABORT
1361#endif /* USAGE_ERROR_ACTION */
1362
1363#else /* TOSHI_DEBUG */
1364
1365#ifndef CORRUPTION_ERROR_ACTION
1366#define CORRUPTION_ERROR_ACTION(m) TCRITICAL("!\"TMemory: Memory system corruption detected!\"")
1367#endif /* CORRUPTION_ERROR_ACTION */
1368
1369#ifndef USAGE_ERROR_ACTION
1370#define USAGE_ERROR_ACTION(m,p) TBREAK(); TCRITICAL("!\"TMemory: Memory block footer corruption detected - the block being free'd was probably overrun!\"")
1371#endif /* USAGE_ERROR_ACTION */
1372
1373#endif /* TOSHI_DEBUG */
1374
1375#endif /* PROCEED_ON_ERROR */
1376
1377
1378/* -------------------------- Debugging setup ---------------------------- */
1379
1380#if ! DEBUG
1381
1382#define check_free_chunk(M,P)
1383#define check_inuse_chunk(M,P)
1384#define check_malloced_chunk(M,P,N)
1385#define check_mmapped_chunk(M,P)
1386#define check_malloc_state(M)
1387#define check_top_chunk(M,P)
1388
1389#else /* DEBUG */
1390#define check_free_chunk(M,P) do_check_free_chunk(M,P)
1391#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
1392#define check_top_chunk(M,P) do_check_top_chunk(M,P)
1393#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
1394#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
1395#define check_malloc_state(M) do_check_malloc_state(M)
1396
1397static void do_check_any_chunk(mstate m, mchunkptr p);
1398static void do_check_top_chunk(mstate m, mchunkptr p);
1399static void do_check_mmapped_chunk(mstate m, mchunkptr p);
1400static void do_check_inuse_chunk(mstate m, mchunkptr p);
1401static void do_check_free_chunk(mstate m, mchunkptr p);
1402static void do_check_malloced_chunk(mstate m, void* mem, size_t s);
1403static void do_check_tree(mstate m, tchunkptr t);
1404static void do_check_treebin(mstate m, bindex_t i);
1405static void do_check_smallbin(mstate m, bindex_t i);
1406static void do_check_malloc_state(mstate m);
1407static int bin_find(mstate m, mchunkptr x);
1408static size_t traverse_and_check(mstate m);
1409#endif /* DEBUG */
1410
1411/* ---------------------------- Indexing Bins ---------------------------- */
1412
1413#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
1414#define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT)
1415#define small_index2size(i) ((i) << SMALLBIN_SHIFT)
1416#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
1417
1418/* addressing by index. See above about smallbin repositioning */
1419#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
1420#define treebin_at(M,i) (&((M)->treebins[i]))
1421
1422/* assign tree index for size S to variable I. Use x86 asm if possible */
1423#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
1424#define compute_tree_index(S, I)\
1425{\
1426 unsigned int X = S >> TREEBIN_SHIFT;\
1427 if (X == 0)\
1428 I = 0;\
1429 else if (X > 0xFFFF)\
1430 I = NTREEBINS-1;\
1431 else {\
1432 unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \
1433 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
1434 }\
1435}
1436
1437#elif defined (__INTEL_COMPILER)
1438#define compute_tree_index(S, I)\
1439{\
1440 size_t X = S >> TREEBIN_SHIFT;\
1441 if (X == 0)\
1442 I = 0;\
1443 else if (X > 0xFFFF)\
1444 I = NTREEBINS-1;\
1445 else {\
1446 unsigned int K = _bit_scan_reverse (X); \
1447 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
1448 }\
1449}
1450
1451#elif defined(_MSC_VER) && _MSC_VER>=1300
1452#define compute_tree_index(S, I)\
1453{\
1454 size_t X = S >> TREEBIN_SHIFT;\
1455 if (X == 0)\
1456 I = 0;\
1457 else if (X > 0xFFFF)\
1458 I = NTREEBINS-1;\
1459 else {\
1460 unsigned int K;\
1461 _BitScanReverse((DWORD *) &K, (DWORD) X);\
1462 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
1463 }\
1464}
1465
1466#else /* GNUC */
1467#define compute_tree_index(S, I)\
1468{\
1469 size_t X = S >> TREEBIN_SHIFT;\
1470 if (X == 0)\
1471 I = 0;\
1472 else if (X > 0xFFFF)\
1473 I = NTREEBINS-1;\
1474 else {\
1475 unsigned int Y = (unsigned int)X;\
1476 unsigned int N = ((Y - 0x100) >> 16) & 8;\
1477 unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
1478 N += K;\
1479 N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
1480 K = 14 - N + ((Y <<= K) >> 15);\
1481 I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
1482 }\
1483}
1484#endif /* GNUC */
1485
1486/* Bit representing maximum resolved size in a treebin at i */
1487#define bit_for_tree_index(i) \
1488 (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
1489
1490/* Shift placing maximum resolved bit in a treebin at i as sign bit */
1491#define leftshift_for_tree_index(i) \
1492 ((i == NTREEBINS-1)? 0 : \
1493 ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
1494
1495/* The size of the smallest chunk held in bin with index i */
1496#define minsize_for_tree_index(i) \
1497 ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
1498 (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
1499
1500
1501/* ------------------------ Operations on bin maps ----------------------- */
1502
1503/* bit corresponding to given index */
1504#define idx2bit(i) ((binmap_t)(1) << (i))
1505
1506/* Mark/Clear bits with given index */
1507#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
1508#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
1509#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
1510
1511#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
1512#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
1513#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
1514
1515/* isolate the least set bit of a bitmap */
1516#define least_bit(x) ((x) & -(x))
1517
1518/* mask with all bits to left of least bit of x on */
1519#define left_bits(x) ((x<<1) | -(x<<1))
1520
1521/* mask with all bits to left of or equal to least bit of x on */
1522#define same_or_left_bits(x) ((x) | -(x))
1523
1524/* index corresponding to given bit. Use x86 asm if possible */
1525
1526#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
1527#define compute_bit2idx(X, I)\
1528{\
1529 unsigned int J;\
1530 J = __builtin_ctz(X); \
1531 I = (bindex_t)J;\
1532}
1533
1534#elif defined (__INTEL_COMPILER)
1535#define compute_bit2idx(X, I)\
1536{\
1537 unsigned int J;\
1538 J = _bit_scan_forward (X); \
1539 I = (bindex_t)J;\
1540}
1541
1542#elif defined(_MSC_VER) && _MSC_VER>=1300
1543#define compute_bit2idx(X, I)\
1544{\
1545 unsigned int J;\
1546 _BitScanForward((DWORD *) &J, X);\
1547 I = (bindex_t)J;\
1548}
1549
1550#elif USE_BUILTIN_FFS
1551#define compute_bit2idx(X, I) I = ffs(X)-1
1552
1553#else
1554#define compute_bit2idx(X, I)\
1555{\
1556 unsigned int Y = X - 1;\
1557 unsigned int K = Y >> (16-4) & 16;\
1558 unsigned int N = K; Y >>= K;\
1559 N += K = Y >> (8-3) & 8; Y >>= K;\
1560 N += K = Y >> (4-2) & 4; Y >>= K;\
1561 N += K = Y >> (2-1) & 2; Y >>= K;\
1562 N += K = Y >> (1-0) & 1; Y >>= K;\
1563 I = (bindex_t)(N + Y);\
1564}
1565#endif /* GNUC */
1566
1567
1568/* ----------------------- Runtime Check Support ------------------------- */
1569
1570/*
1571 For security, the main invariant is that malloc/free/etc never
1572 writes to a static address other than malloc_state, unless static
1573 malloc_state itself has been corrupted, which cannot occur via
1574 malloc (because of these checks). In essence this means that we
1575 believe all pointers, sizes, maps etc held in malloc_state, but
1576 check all of those linked or offsetted from other embedded data
1577 structures. These checks are interspersed with main code in a way
1578 that tends to minimize their run-time cost.
1579
1580 When FOOTERS is defined, in addition to range checking, we also
1581 verify footer fields of inuse chunks, which can be used guarantee
1582 that the mstate controlling malloc/free is intact. This is a
1583 streamlined version of the approach described by William Robertson
1584 et al in "Run-time Detection of Heap-based Overflows" LISA'03
1585 http://www.usenix.org/events/lisa03/tech/robertson.html The footer
1586 of an inuse chunk holds the xor of its mstate and a random seed,
1587 that is checked upon calls to free() and realloc(). This is
1588 (probabalistically) unguessable from outside the program, but can be
1589 computed by any code successfully malloc'ing any chunk, so does not
1590 itself provide protection against code that has already broken
1591 security through some other means. Unlike Robertson et al, we
1592 always dynamically check addresses of all offset chunks (previous,
1593 next, etc). This turns out to be cheaper than relying on hashes.
1594*/
1595
1596#if !INSECURE
1597/* Check if address a is at least as high as any from MORECORE or MMAP */
1598#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
1599/* Check if address of next chunk n is higher than base chunk p */
1600#define ok_next(p, n) ((char*)(p) < (char*)(n))
1601/* Check if p has inuse status */
1602#define ok_inuse(p) is_inuse(p)
1603/* Check if p has its pinuse bit on */
1604#define ok_pinuse(p) pinuse(p)
1605
1606#else /* !INSECURE */
1607#define ok_address(M, a) (1)
1608#define ok_next(b, n) (1)
1609#define ok_inuse(p) (1)
1610#define ok_pinuse(p) (1)
1611#endif /* !INSECURE */
1612
1613#if (FOOTERS && !INSECURE)
1614/* Check if (alleged) mstate m has expected magic field */
1615#define ok_magic(M) ((M)->magic == mparams.magic)
1616#else /* (FOOTERS && !INSECURE) */
1617#define ok_magic(M) (1)
1618#endif /* (FOOTERS && !INSECURE) */
1619
1620/* In gcc, use __builtin_expect to minimize impact of checks */
1621#if !INSECURE
1622#if defined(__GNUC__) && __GNUC__ >= 3
1623#define RTCHECK(e) __builtin_expect(e, 1)
1624#else /* GNUC */
1625#define RTCHECK(e) (e)
1626#endif /* GNUC */
1627#else /* !INSECURE */
1628#define RTCHECK(e) (1)
1629#endif /* !INSECURE */
1630
1631/* macros to set up inuse chunks with or without footers */
1632
1633#if !FOOTERS
1634
1635#define mark_inuse_foot(M,p,s)
1636
1637/* Macros for setting head/foot of non-mmapped chunks */
1638
1639/* Set cinuse bit and pinuse bit of next chunk */
1640#define set_inuse(M,p,s)\
1641 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
1642 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
1643
1644/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
1645#define set_inuse_and_pinuse(M,p,s)\
1646 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1647 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
1648
1649/* Set size, cinuse and pinuse bit of this chunk */
1650#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
1651 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
1652
1653#else /* FOOTERS */
1654
1655/* Set foot of inuse chunk to be xor of mstate and seed */
1656#define mark_inuse_foot(M,p,s)\
1657 (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
1658
1659#define get_mstate_for(p)\
1660 ((mstate)(((mchunkptr)((char*)(p) +\
1661 (chunksize(p))))->prev_foot ^ mparams.magic))
1662
1663#define set_inuse(M,p,s)\
1664 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
1665 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
1666 mark_inuse_foot(M,p,s))
1667
1668#define set_inuse_and_pinuse(M,p,s)\
1669 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1670 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
1671 mark_inuse_foot(M,p,s))
1672
1673#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
1674 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1675 mark_inuse_foot(M, p, s))
1676
1677#endif /* !FOOTERS */
1678
1679/* ---------------------------- setting mparams -------------------------- */
1680
1681#if LOCK_AT_FORK
1682static void pre_fork(void) { ACQUIRE_LOCK(&(gm)->mutex); }
1683static void post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); }
1684static void post_fork_child(void) { INITIAL_LOCK(&(gm)->mutex); }
1685#endif /* LOCK_AT_FORK */
1686
1687/* Init mparams */
1688static int init_mparams(void) {
1689#ifdef NEED_GLOBAL_LOCK_INIT
1690 if (malloc_global_mutex_status <= 0)
1691 init_malloc_global_mutex();
1692#endif
1693
1694 ACQUIRE_MALLOC_GLOBAL_LOCK();
1695 if (mparams.magic == 0) {
1696 size_t magic;
1697 size_t psize;
1698 size_t gsize;
1699
1700#ifndef WIN32
1701 psize = malloc_getpagesize;
1702 gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize);
1703#else /* WIN32 */
1704 {
1705 SYSTEM_INFO system_info;
1706 GetSystemInfo(&system_info);
1707 psize = system_info.dwPageSize;
1708 gsize = ((DEFAULT_GRANULARITY != 0)?
1709 DEFAULT_GRANULARITY : system_info.dwAllocationGranularity);
1710 }
1711#endif /* WIN32 */
1712
1713 /* Sanity-check configuration:
1714 size_t must be unsigned and as wide as pointer type.
1715 ints must be at least 4 bytes.
1716 alignment must be at least 8.
1717 Alignment, min chunk size, and page size must all be powers of 2.
1718 */
1719 if ((sizeof(size_t) != sizeof(char*)) ||
1720 (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
1721 (sizeof(int) < 4) ||
1722 (MALLOC_ALIGNMENT < (size_t)8U) ||
1723 ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) ||
1724 ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) ||
1725 ((gsize & (gsize-SIZE_T_ONE)) != 0) ||
1726 ((psize & (psize-SIZE_T_ONE)) != 0))
1727 ABORT;
1728 mparams.granularity = gsize;
1729 mparams.page_size = psize;
1730 mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
1731 mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
1732#if MORECORE_CONTIGUOUS
1733 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
1734#else /* MORECORE_CONTIGUOUS */
1735 mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
1736#endif /* MORECORE_CONTIGUOUS */
1737
1738#if !ONLY_MSPACES
1739 /* Set up lock for main malloc area */
1740 gm->mflags = mparams.default_mflags;
1741 (void)INITIAL_LOCK(&gm->mutex);
1742#endif
1743#if LOCK_AT_FORK
1744 pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child);
1745#endif
1746
1747 {
1748 magic = (size_t)((size_t)time(TNULL) ^ (size_t)0x55555555U);
1749 magic |= (size_t)8U; /* ensure nonzero */
1750 magic &= ~(size_t)7U; /* improve chances of fault for bad values */
1751 /* Until memory modes commonly available, use volatile-write */
1752 (*(volatile size_t *)(&(mparams.magic))) = magic;
1753 }
1754 }
1755
1756 RELEASE_MALLOC_GLOBAL_LOCK();
1757 return 1;
1758}
1759
1760/* support for mallopt */
1761static int change_mparam(int param_number, int value) {
1762 size_t val;
1763 ensure_initialization();
1764 val = (value == -1)? MAX_SIZE_T : (size_t)value;
1765 switch(param_number) {
1766 case M_TRIM_THRESHOLD:
1767 mparams.trim_threshold = val;
1768 return 1;
1769 case M_GRANULARITY:
1770 if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
1771 mparams.granularity = val;
1772 return 1;
1773 }
1774 else
1775 return 0;
1776 case M_MMAP_THRESHOLD:
1777 mparams.mmap_threshold = val;
1778 return 1;
1779 default:
1780 return 0;
1781 }
1782}
1783
1784#if DEBUG
1785/* ------------------------- Debugging Support --------------------------- */
1786
1787/* Check properties of any chunk, whether free, inuse, mmapped etc */
1788static void do_check_any_chunk(mstate m, mchunkptr p) {
1789 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1790 assert(ok_address(m, p));
1791}
1792
1793/* Check properties of top chunk */
1794static void do_check_top_chunk(mstate m, mchunkptr p) {
1795 msegmentptr sp = segment_holding(m, (char*)p);
1796 size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */
1797 assert(sp != 0);
1798 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1799 assert(ok_address(m, p));
1800 assert(sz == m->topsize);
1801 assert(sz > 0);
1802 assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
1803 assert(pinuse(p));
1804 assert(!pinuse(chunk_plus_offset(p, sz)));
1805}
1806
1807/* Check properties of (inuse) mmapped chunks */
1808static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
1809 size_t sz = chunksize(p);
1810 size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD);
1811 assert(is_mmapped(p));
1812 assert(use_mmap(m));
1813 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1814 assert(ok_address(m, p));
1815 assert(!is_small(sz));
1816 assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
1817 assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
1818 assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
1819}
1820
1821/* Check properties of inuse chunks */
1822static void do_check_inuse_chunk(mstate m, mchunkptr p) {
1823 do_check_any_chunk(m, p);
1824 assert(is_inuse(p));
1825 assert(next_pinuse(p));
1826 /* If not pinuse and not mmapped, previous chunk has OK offset */
1827 assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
1828 if (is_mmapped(p))
1829 do_check_mmapped_chunk(m, p);
1830}
1831
1832/* Check properties of free chunks */
1833static void do_check_free_chunk(mstate m, mchunkptr p) {
1834 size_t sz = chunksize(p);
1835 mchunkptr next = chunk_plus_offset(p, sz);
1836 do_check_any_chunk(m, p);
1837 assert(!is_inuse(p));
1838 assert(!next_pinuse(p));
1839 assert (!is_mmapped(p));
1840 if (p != m->dv && p != m->top) {
1841 if (sz >= MIN_CHUNK_SIZE) {
1842 assert((sz & CHUNK_ALIGN_MASK) == 0);
1843 assert(is_aligned(chunk2mem(p)));
1844 assert(next->prev_foot == sz);
1845 assert(pinuse(p));
1846 assert (next == m->top || is_inuse(next));
1847 assert(p->fd->bk == p);
1848 assert(p->bk->fd == p);
1849 }
1850 else /* markers are always of size SIZE_T_SIZE */
1851 assert(sz == SIZE_T_SIZE);
1852 }
1853}
1854
1855/* Check properties of malloced chunks at the point they are malloced */
1856static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
1857 if (mem != 0) {
1858 mchunkptr p = mem2chunk(mem);
1859 size_t sz = p->head & ~INUSE_BITS;
1860 do_check_inuse_chunk(m, p);
1861 assert((sz & CHUNK_ALIGN_MASK) == 0);
1862 assert(sz >= MIN_CHUNK_SIZE);
1863 assert(sz >= s);
1864 /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
1865 assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
1866 }
1867}
1868
1869/* Check a tree and its subtrees. */
1870static void do_check_tree(mstate m, tchunkptr t) {
1871 tchunkptr head = 0;
1872 tchunkptr u = t;
1873 bindex_t tindex = t->index;
1874 size_t tsize = chunksize(t);
1875 bindex_t idx;
1876 compute_tree_index(tsize, idx);
1877 assert(tindex == idx);
1878 assert(tsize >= MIN_LARGE_SIZE);
1879 assert(tsize >= minsize_for_tree_index(idx));
1880 assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
1881
1882 do { /* traverse through chain of same-sized nodes */
1883 do_check_any_chunk(m, ((mchunkptr)u));
1884 assert(u->index == tindex);
1885 assert(chunksize(u) == tsize);
1886 assert(!is_inuse(u));
1887 assert(!next_pinuse(u));
1888 assert(u->fd->bk == u);
1889 assert(u->bk->fd == u);
1890 if (u->parent == 0) {
1891 assert(u->child[0] == 0);
1892 assert(u->child[1] == 0);
1893 }
1894 else {
1895 assert(head == 0); /* only one node on chain has parent */
1896 head = u;
1897 assert(u->parent != u);
1898 assert (u->parent->child[0] == u ||
1899 u->parent->child[1] == u ||
1900 *((tbinptr*)(u->parent)) == u);
1901 if (u->child[0] != 0) {
1902 assert(u->child[0]->parent == u);
1903 assert(u->child[0] != u);
1904 do_check_tree(m, u->child[0]);
1905 }
1906 if (u->child[1] != 0) {
1907 assert(u->child[1]->parent == u);
1908 assert(u->child[1] != u);
1909 do_check_tree(m, u->child[1]);
1910 }
1911 if (u->child[0] != 0 && u->child[1] != 0) {
1912 assert(chunksize(u->child[0]) < chunksize(u->child[1]));
1913 }
1914 }
1915 u = u->fd;
1916 } while (u != t);
1917 assert(head != 0);
1918}
1919
1920/* Check all the chunks in a treebin. */
1921static void do_check_treebin(mstate m, bindex_t i) {
1922 tbinptr* tb = treebin_at(m, i);
1923 tchunkptr t = *tb;
1924 int empty = (m->treemap & (1U << i)) == 0;
1925 if (t == 0)
1926 assert(empty);
1927 if (!empty)
1928 do_check_tree(m, t);
1929}
1930
1931/* Check all the chunks in a smallbin. */
1932static void do_check_smallbin(mstate m, bindex_t i) {
1933 sbinptr b = smallbin_at(m, i);
1934 mchunkptr p = b->bk;
1935 unsigned int empty = (m->smallmap & (1U << i)) == 0;
1936 if (p == b)
1937 assert(empty);
1938 if (!empty) {
1939 for (; p != b; p = p->bk) {
1940 size_t size = chunksize(p);
1941 mchunkptr q;
1942 /* each chunk claims to be free */
1943 do_check_free_chunk(m, p);
1944 /* chunk belongs in bin */
1945 assert(small_index(size) == i);
1946 assert(p->bk == b || chunksize(p->bk) == chunksize(p));
1947 /* chunk is followed by an inuse chunk */
1948 q = next_chunk(p);
1949 if (q->head != FENCEPOST_HEAD)
1950 do_check_inuse_chunk(m, q);
1951 }
1952 }
1953}
1954
1955/* Find x in a bin. Used in other check functions. */
1956static int bin_find(mstate m, mchunkptr x) {
1957 size_t size = chunksize(x);
1958 if (is_small(size)) {
1959 bindex_t sidx = small_index(size);
1960 sbinptr b = smallbin_at(m, sidx);
1961 if (smallmap_is_marked(m, sidx)) {
1962 mchunkptr p = b;
1963 do {
1964 if (p == x)
1965 return 1;
1966 } while ((p = p->fd) != b);
1967 }
1968 }
1969 else {
1970 bindex_t tidx;
1971 compute_tree_index(size, tidx);
1972 if (treemap_is_marked(m, tidx)) {
1973 tchunkptr t = *treebin_at(m, tidx);
1974 size_t sizebits = size << leftshift_for_tree_index(tidx);
1975 while (t != 0 && chunksize(t) != size) {
1976 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
1977 sizebits <<= 1;
1978 }
1979 if (t != 0) {
1980 tchunkptr u = t;
1981 do {
1982 if (u == (tchunkptr)x)
1983 return 1;
1984 } while ((u = u->fd) != t);
1985 }
1986 }
1987 }
1988 return 0;
1989}
1990
1991/* Traverse each chunk and check it; return total */
1992static size_t traverse_and_check(mstate m) {
1993 size_t sum = 0;
1994 if (is_initialized(m)) {
1995 msegmentptr s = &m->seg;
1996 sum += m->topsize + TOP_FOOT_SIZE;
1997 while (s != 0) {
1998 mchunkptr q = align_as_chunk(s->base);
1999 mchunkptr lastq = 0;
2000 assert(pinuse(q));
2001 while (segment_holds(s, q) &&
2002 q != m->top && q->head != FENCEPOST_HEAD) {
2003 sum += chunksize(q);
2004 if (is_inuse(q)) {
2005 assert(!bin_find(m, q));
2006 do_check_inuse_chunk(m, q);
2007 }
2008 else {
2009 assert(q == m->dv || bin_find(m, q));
2010 assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */
2011 do_check_free_chunk(m, q);
2012 }
2013 lastq = q;
2014 q = next_chunk(q);
2015 }
2016 s = s->next;
2017 }
2018 }
2019 return sum;
2020}
2021
2022
2023/* Check all properties of malloc_state. */
2024static void do_check_malloc_state(mstate m) {
2025 bindex_t i;
2026 size_t total;
2027 /* check bins */
2028 for (i = 0; i < NSMALLBINS; ++i)
2029 do_check_smallbin(m, i);
2030 for (i = 0; i < NTREEBINS; ++i)
2031 do_check_treebin(m, i);
2032
2033 if (m->dvsize != 0) { /* check dv chunk */
2034 do_check_any_chunk(m, m->dv);
2035 assert(m->dvsize == chunksize(m->dv));
2036 assert(m->dvsize >= MIN_CHUNK_SIZE);
2037 assert(bin_find(m, m->dv) == 0);
2038 }
2039
2040 if (m->top != 0) { /* check top chunk */
2041 do_check_top_chunk(m, m->top);
2042 /*assert(m->topsize == chunksize(m->top)); redundant */
2043 assert(m->topsize > 0);
2044 assert(bin_find(m, m->top) == 0);
2045 }
2046
2047 total = traverse_and_check(m);
2048 assert(total <= m->footprint);
2049 assert(m->footprint <= m->max_footprint);
2050}
2051#endif /* DEBUG */
2052
2053/* ----------------------------- statistics ------------------------------ */
2054
2055#if !NO_MALLINFO
2056static struct mallinfo internal_mallinfo(mstate m) {
2057 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2058 ensure_initialization();
2059 if (!PREACTION(m)) {
2060 check_malloc_state(m);
2061 if (is_initialized(m)) {
2062 size_t nfree = SIZE_T_ONE; /* top always free */
2063 size_t mfree = m->topsize + TOP_FOOT_SIZE;
2064 size_t sum = mfree;
2065 msegmentptr s = &m->seg;
2066 while (s != 0) {
2067 mchunkptr q = align_as_chunk(s->base);
2068 while (segment_holds(s, q) &&
2069 q != m->top && q->head != FENCEPOST_HEAD) {
2070 size_t sz = chunksize(q);
2071 sum += sz;
2072 if (!is_inuse(q)) {
2073 mfree += sz;
2074 ++nfree;
2075 }
2076 q = next_chunk(q);
2077 }
2078 s = s->next;
2079 }
2080
2081 nm.arena = sum;
2082 nm.ordblks = nfree;
2083 nm.hblkhd = m->footprint - sum;
2084 nm.usmblks = m->max_footprint;
2085 nm.uordblks = m->footprint - mfree;
2086 nm.fordblks = mfree;
2087 nm.keepcost = m->topsize;
2088 }
2089
2090 POSTACTION(m);
2091 }
2092 return nm;
2093}
2094#endif /* !NO_MALLINFO */
2095
2096#if !NO_MALLOC_STATS
2097static void internal_malloc_stats(mstate m) {
2098 ensure_initialization();
2099 if (!PREACTION(m)) {
2100 size_t maxfp = 0;
2101 size_t fp = 0;
2102 size_t used = 0;
2103 check_malloc_state(m);
2104 if (is_initialized(m)) {
2105 msegmentptr s = &m->seg;
2106 maxfp = m->max_footprint;
2107 fp = m->footprint;
2108 used = fp - (m->topsize + TOP_FOOT_SIZE);
2109
2110 while (s != 0) {
2111 mchunkptr q = align_as_chunk(s->base);
2112 while (segment_holds(s, q) &&
2113 q != m->top && q->head != FENCEPOST_HEAD) {
2114 if (!is_inuse(q))
2115 used -= chunksize(q);
2116 q = next_chunk(q);
2117 }
2118 s = s->next;
2119 }
2120 }
2121 POSTACTION(m); /* drop lock */
2122 fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));
2123 fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp));
2124 fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used));
2125 }
2126}
2127#endif /* NO_MALLOC_STATS */
2128
2129/* ----------------------- Operations on smallbins ----------------------- */
2130
2131/*
2132 Various forms of linking and unlinking are defined as macros. Even
2133 the ones for trees, which are very long but have very short typical
2134 paths. This is ugly but reduces reliance on inlining support of
2135 compilers.
2136*/
2137
2138/* Link a free chunk into a smallbin */
2139#define insert_small_chunk(M, P, S) {\
2140 bindex_t I = small_index(S);\
2141 mchunkptr B = smallbin_at(M, I);\
2142 mchunkptr F = B;\
2143 assert(S >= MIN_CHUNK_SIZE);\
2144 if (!smallmap_is_marked(M, I))\
2145 mark_smallmap(M, I);\
2146 else if (RTCHECK(ok_address(M, B->fd)))\
2147 F = B->fd;\
2148 else {\
2149 CORRUPTION_ERROR_ACTION(M);\
2150 }\
2151 B->fd = P;\
2152 F->bk = P;\
2153 P->fd = F;\
2154 P->bk = B;\
2155}
2156
2157/* Unlink a chunk from a smallbin */
2158#define unlink_small_chunk(M, P, S) {\
2159 mchunkptr F = P->fd;\
2160 mchunkptr B = P->bk;\
2161 bindex_t I = small_index(S);\
2162 assert(P != B);\
2163 assert(P != F);\
2164 assert(chunksize(P) == small_index2size(I));\
2165 if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \
2166 if (B == F) {\
2167 clear_smallmap(M, I);\
2168 }\
2169 else if (RTCHECK(B == smallbin_at(M,I) ||\
2170 (ok_address(M, B) && B->fd == P))) {\
2171 F->bk = B;\
2172 B->fd = F;\
2173 }\
2174 else {\
2175 CORRUPTION_ERROR_ACTION(M);\
2176 }\
2177 }\
2178 else {\
2179 CORRUPTION_ERROR_ACTION(M);\
2180 }\
2181}
2182
2183/* Unlink the first chunk from a smallbin */
2184#define unlink_first_small_chunk(M, B, P, I) {\
2185 mchunkptr F = P->fd;\
2186 assert(P != B);\
2187 assert(P != F);\
2188 assert(chunksize(P) == small_index2size(I));\
2189 if (B == F) {\
2190 clear_smallmap(M, I);\
2191 }\
2192 else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\
2193 F->bk = B;\
2194 B->fd = F;\
2195 }\
2196 else {\
2197 CORRUPTION_ERROR_ACTION(M);\
2198 }\
2199}
2200
2201/* Replace dv node, binning the old one */
2202/* Used only when dvsize known to be small */
2203#define replace_dv(M, P, S) {\
2204 size_t DVS = M->dvsize;\
2205 assert(is_small(DVS));\
2206 if (DVS != 0) {\
2207 mchunkptr DV = M->dv;\
2208 insert_small_chunk(M, DV, DVS);\
2209 }\
2210 M->dvsize = S;\
2211 M->dv = P;\
2212}
2213
2214/* ------------------------- Operations on trees ------------------------- */
2215
2216/* Insert chunk into tree */
2217#define insert_large_chunk(M, X, S) {\
2218 tbinptr* H;\
2219 bindex_t I;\
2220 compute_tree_index(S, I);\
2221 H = treebin_at(M, I);\
2222 X->index = I;\
2223 X->child[0] = X->child[1] = 0;\
2224 if (!treemap_is_marked(M, I)) {\
2225 mark_treemap(M, I);\
2226 *H = X;\
2227 X->parent = (tchunkptr)H;\
2228 X->fd = X->bk = X;\
2229 }\
2230 else {\
2231 tchunkptr T = *H;\
2232 size_t K = S << leftshift_for_tree_index(I);\
2233 for (;;) {\
2234 if (chunksize(T) != S) {\
2235 tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
2236 K <<= 1;\
2237 if (*C != 0)\
2238 T = *C;\
2239 else if (RTCHECK(ok_address(M, C))) {\
2240 *C = X;\
2241 X->parent = T;\
2242 X->fd = X->bk = X;\
2243 break;\
2244 }\
2245 else {\
2246 CORRUPTION_ERROR_ACTION(M);\
2247 break;\
2248 }\
2249 }\
2250 else {\
2251 tchunkptr F = T->fd;\
2252 if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
2253 T->fd = F->bk = X;\
2254 X->fd = F;\
2255 X->bk = T;\
2256 X->parent = 0;\
2257 break;\
2258 }\
2259 else {\
2260 CORRUPTION_ERROR_ACTION(M);\
2261 break;\
2262 }\
2263 }\
2264 }\
2265 }\
2266}
2267
2268/*
2269 Unlink steps:
2270
2271 1. If x is a chained node, unlink it from its same-sized fd/bk links
2272 and choose its bk node as its replacement.
2273 2. If x was the last node of its size, but not a leaf node, it must
2274 be replaced with a leaf node (not merely one with an open left or
2275 right), to make sure that lefts and rights of descendents
2276 correspond properly to bit masks. We use the rightmost descendent
2277 of x. We could use any other leaf, but this is easy to locate and
2278 tends to counteract removal of leftmosts elsewhere, and so keeps
2279 paths shorter than minimally guaranteed. This doesn't loop much
2280 because on average a node in a tree is near the bottom.
2281 3. If x is the base of a chain (i.e., has parent links) relink
2282 x's parent and children to x's replacement (or null if none).
2283*/
2284
2285#define unlink_large_chunk(M, X) {\
2286 tchunkptr XP = X->parent;\
2287 tchunkptr R;\
2288 if (X->bk != X) {\
2289 tchunkptr F = X->fd;\
2290 R = X->bk;\
2291 if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\
2292 F->bk = R;\
2293 R->fd = F;\
2294 }\
2295 else {\
2296 CORRUPTION_ERROR_ACTION(M);\
2297 }\
2298 }\
2299 else {\
2300 tchunkptr* RP;\
2301 if (((R = *(RP = &(X->child[1]))) != 0) ||\
2302 ((R = *(RP = &(X->child[0]))) != 0)) {\
2303 tchunkptr* CP;\
2304 while ((*(CP = &(R->child[1])) != 0) ||\
2305 (*(CP = &(R->child[0])) != 0)) {\
2306 R = *(RP = CP);\
2307 }\
2308 if (RTCHECK(ok_address(M, RP)))\
2309 *RP = 0;\
2310 else {\
2311 CORRUPTION_ERROR_ACTION(M);\
2312 }\
2313 }\
2314 }\
2315 if (XP != 0) {\
2316 tbinptr* H = treebin_at(M, X->index);\
2317 if (X == *H) {\
2318 if ((*H = R) == 0) \
2319 clear_treemap(M, X->index);\
2320 }\
2321 else if (RTCHECK(ok_address(M, XP))) {\
2322 if (XP->child[0] == X) \
2323 XP->child[0] = R;\
2324 else \
2325 XP->child[1] = R;\
2326 }\
2327 else\
2328 CORRUPTION_ERROR_ACTION(M);\
2329 if (R != 0) {\
2330 if (RTCHECK(ok_address(M, R))) {\
2331 tchunkptr C0, C1;\
2332 R->parent = XP;\
2333 if ((C0 = X->child[0]) != 0) {\
2334 if (RTCHECK(ok_address(M, C0))) {\
2335 R->child[0] = C0;\
2336 C0->parent = R;\
2337 }\
2338 else\
2339 CORRUPTION_ERROR_ACTION(M);\
2340 }\
2341 if ((C1 = X->child[1]) != 0) {\
2342 if (RTCHECK(ok_address(M, C1))) {\
2343 R->child[1] = C1;\
2344 C1->parent = R;\
2345 }\
2346 else\
2347 CORRUPTION_ERROR_ACTION(M);\
2348 }\
2349 }\
2350 else\
2351 CORRUPTION_ERROR_ACTION(M);\
2352 }\
2353 }\
2354}
2355
2356/* Relays to large vs small bin operations */
2357
2358#define insert_chunk(M, P, S)\
2359 if (is_small(S)) insert_small_chunk(M, P, S)\
2360 else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
2361
2362#define unlink_chunk(M, P, S)\
2363 if (is_small(S)) unlink_small_chunk(M, P, S)\
2364 else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
2365
2366
2367/* Relays to internal calls to malloc/free from realloc, memalign etc */
2368
2369#if ONLY_MSPACES
2370#define internal_malloc(m, b) mspace_malloc(m, b)
2371#define internal_free(m, mem) mspace_free(m,mem);
2372#else /* ONLY_MSPACES */
2373#if MSPACES
2374#define internal_malloc(m, b)\
2375 ((m == gm)? dlmalloc(b) : mspace_malloc(m, b))
2376#define internal_free(m, mem)\
2377 if (m == gm) dlfree(mem); else mspace_free(m,mem);
2378#else /* MSPACES */
2379#define internal_malloc(m, b) dlmalloc(b)
2380#define internal_free(m, mem) dlfree(mem)
2381#endif /* MSPACES */
2382#endif /* ONLY_MSPACES */
2383
2384/* ----------------------- Direct-mmapping chunks ----------------------- */
2385
2386/*
2387 Directly mmapped chunks are set up with an offset to the start of
2388 the mmapped region stored in the prev_foot field of the chunk. This
2389 allows reconstruction of the required argument to MUNMAP when freed,
2390 and also allows adjustment of the returned chunk to meet alignment
2391 requirements (especially in memalign).
2392*/
2393
2394/* Malloc using mmap */
2395static void* mmap_alloc(mstate m, size_t nb) {
2396 size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2397 if (m->footprint_limit != 0) {
2398 size_t fp = m->footprint + mmsize;
2399 if (fp <= m->footprint || fp > m->footprint_limit)
2400 return 0;
2401 }
2402 if (mmsize > nb) { /* Check for wrap around 0 */
2403 char* mm = (char*)(CALL_DIRECT_MMAP(mmsize));
2404 if (mm != CMFAIL) {
2405 size_t offset = align_offset(chunk2mem(mm));
2406 size_t psize = mmsize - offset - MMAP_FOOT_PAD;
2407 mchunkptr p = (mchunkptr)(mm + offset);
2408 p->prev_foot = offset;
2409 p->head = psize;
2410 mark_inuse_foot(m, p, psize);
2411 chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
2412 chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
2413
2414 if (m->least_addr == 0 || mm < m->least_addr)
2415 m->least_addr = mm;
2416 if ((m->footprint += mmsize) > m->max_footprint)
2417 m->max_footprint = m->footprint;
2418 assert(is_aligned(chunk2mem(p)));
2419 check_mmapped_chunk(m, p);
2420 return chunk2mem(p);
2421 }
2422 }
2423 return 0;
2424}
2425
2426/* Realloc using mmap */
2427static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) {
2428 size_t oldsize = chunksize(oldp);
2429 (void)flags; /* placate people compiling -Wunused */
2430 if (is_small(nb)) /* Can't shrink mmap regions below small size */
2431 return 0;
2432 /* Keep old chunk if big enough but not too big */
2433 if (oldsize >= nb + SIZE_T_SIZE &&
2434 (oldsize - nb) <= (mparams.granularity << 1))
2435 return oldp;
2436 else {
2437 size_t offset = oldp->prev_foot;
2438 size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
2439 size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2440 char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
2441 oldmmsize, newmmsize, flags);
2442 if (cp != CMFAIL) {
2443 mchunkptr newp = (mchunkptr)(cp + offset);
2444 size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
2445 newp->head = psize;
2446 mark_inuse_foot(m, newp, psize);
2447 chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
2448 chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
2449
2450 if (cp < m->least_addr)
2451 m->least_addr = cp;
2452 if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
2453 m->max_footprint = m->footprint;
2454 check_mmapped_chunk(m, newp);
2455 return newp;
2456 }
2457 }
2458 return 0;
2459}
2460
2461
2462/* -------------------------- mspace management -------------------------- */
2463
2464/* Init top chunk and its size */
2465static void init_top(mstate m, mchunkptr p, size_t psize) {
2466 /* Ensure alignment */
2467 size_t offset = align_offset(chunk2mem(p));
2468 p = (mchunkptr)((char*)p + offset);
2469 psize -= offset;
2470
2471 m->top = p;
2472 m->topsize = psize;
2473 p->head = psize | PINUSE_BIT;
2474 /* set size of fake trailing chunk holding overhead space only once */
2475 chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
2476 m->trim_check = mparams.trim_threshold; /* reset on each update */
2477}
2478
2479/* Init bins for a new mstate that is otherwise zeroed out */
2480static void init_bins(mstate m) {
2481 /* Establish circular links for smallbins */
2482 bindex_t i;
2483 for (i = 0; i < NSMALLBINS; ++i) {
2484 sbinptr bin = smallbin_at(m,i);
2485 bin->fd = bin->bk = bin;
2486 }
2487}
2488
2489#if PROCEED_ON_ERROR
2490
2491/* default corruption action */
2492static void reset_on_error(mstate m) {
2493 int i;
2494 ++malloc_corruption_error_count;
2495 /* Reinitialize fields to forget about all memory */
2496 m->smallmap = m->treemap = 0;
2497 m->dvsize = m->topsize = 0;
2498 m->seg.base = 0;
2499 m->seg.size = 0;
2500 m->seg.next = 0;
2501 m->top = m->dv = 0;
2502 for (i = 0; i < NTREEBINS; ++i)
2503 *treebin_at(m, i) = 0;
2504 init_bins(m);
2505}
2506#endif /* PROCEED_ON_ERROR */
2507
2508/* Allocate chunk and prepend remainder with chunk in successor base. */
2509static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
2510 size_t nb) {
2511 mchunkptr p = align_as_chunk(newbase);
2512 mchunkptr oldfirst = align_as_chunk(oldbase);
2513 size_t psize = (char*)oldfirst - (char*)p;
2514 mchunkptr q = chunk_plus_offset(p, nb);
2515 size_t qsize = psize - nb;
2516 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
2517
2518 assert((char*)oldfirst > (char*)q);
2519 assert(pinuse(oldfirst));
2520 assert(qsize >= MIN_CHUNK_SIZE);
2521
2522 /* consolidate remainder with first chunk of old base */
2523 if (oldfirst == m->top) {
2524 size_t tsize = m->topsize += qsize;
2525 m->top = q;
2526 q->head = tsize | PINUSE_BIT;
2527 check_top_chunk(m, q);
2528 }
2529 else if (oldfirst == m->dv) {
2530 size_t dsize = m->dvsize += qsize;
2531 m->dv = q;
2532 set_size_and_pinuse_of_free_chunk(q, dsize);
2533 }
2534 else {
2535 if (!is_inuse(oldfirst)) {
2536 size_t nsize = chunksize(oldfirst);
2537 unlink_chunk(m, oldfirst, nsize);
2538 oldfirst = chunk_plus_offset(oldfirst, nsize);
2539 qsize += nsize;
2540 }
2541 set_free_with_pinuse(q, qsize, oldfirst);
2542 insert_chunk(m, q, qsize);
2543 check_free_chunk(m, q);
2544 }
2545
2546 check_malloced_chunk(m, chunk2mem(p), nb);
2547 return chunk2mem(p);
2548}
2549
2550/* Add a segment to hold a new noncontiguous region */
2551static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
2552 /* Determine locations and sizes of segment, fenceposts, old top */
2553 char* old_top = (char*)m->top;
2554 msegmentptr oldsp = segment_holding(m, old_top);
2555 char* old_end = oldsp->base + oldsp->size;
2556 size_t ssize = pad_request(sizeof(struct malloc_segment));
2557 char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2558 size_t offset = align_offset(chunk2mem(rawsp));
2559 char* asp = rawsp + offset;
2560 char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
2561 mchunkptr sp = (mchunkptr)csp;
2562 msegmentptr ss = (msegmentptr)(chunk2mem(sp));
2563 mchunkptr tnext = chunk_plus_offset(sp, ssize);
2564 mchunkptr p = tnext;
2565 int nfences = 0;
2566
2567 /* reset top to new space */
2568 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
2569
2570 /* Set up segment record */
2571 assert(is_aligned(ss));
2572 set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
2573 *ss = m->seg; /* Push current record */
2574 m->seg.base = tbase;
2575 m->seg.size = tsize;
2576 m->seg.sflags = mmapped;
2577 m->seg.next = ss;
2578
2579 /* Insert trailing fenceposts */
2580 for (;;) {
2581 mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
2582 p->head = FENCEPOST_HEAD;
2583 ++nfences;
2584 if ((char*)(&(nextp->head)) < old_end)
2585 p = nextp;
2586 else
2587 break;
2588 }
2589 assert(nfences >= 2);
2590
2591 /* Insert the rest of old top into a bin as an ordinary free chunk */
2592 if (csp != old_top) {
2593 mchunkptr q = (mchunkptr)old_top;
2594 size_t psize = csp - old_top;
2595 mchunkptr tn = chunk_plus_offset(q, psize);
2596 set_free_with_pinuse(q, psize, tn);
2597 insert_chunk(m, q, psize);
2598 }
2599
2600 check_top_chunk(m, m->top);
2601}
2602
2603/* -------------------------- System allocation -------------------------- */
2604
2605/* Get memory from system using MORECORE or MMAP */
2606static void* sys_alloc(mstate m, size_t nb) {
2607 char* tbase = CMFAIL;
2608 size_t tsize = 0;
2609 flag_t mmap_flag = 0;
2610 size_t asize; /* allocation size */
2611
2612 ensure_initialization();
2613
2614 /* Directly map large chunks, but only if already initialized */
2615 if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) {
2616 void* mem = mmap_alloc(m, nb);
2617 if (mem != 0)
2618 return mem;
2619 }
2620
2621 asize = granularity_align(nb + SYS_ALLOC_PADDING);
2622 if (asize <= nb)
2623 return 0; /* wraparound */
2624 if (m->footprint_limit != 0) {
2625 size_t fp = m->footprint + asize;
2626 if (fp <= m->footprint || fp > m->footprint_limit)
2627 return 0;
2628 }
2629
2630 /*
2631 Try getting memory in any of three ways (in most-preferred to
2632 least-preferred order):
2633 1. A call to MORECORE that can normally contiguously extend memory.
2634 (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
2635 or main space is mmapped or a previous contiguous call failed)
2636 2. A call to MMAP new space (disabled if not HAVE_MMAP).
2637 Note that under the default settings, if MORECORE is unable to
2638 fulfill a request, and HAVE_MMAP is TTRUE, then mmap is
2639 used as a noncontiguous system allocator. This is a useful backup
2640 strategy for systems with holes in address spaces -- in this case
2641 sbrk cannot contiguously expand the heap, but mmap may be able to
2642 find space.
2643 3. A call to MORECORE that cannot usually contiguously extend memory.
2644 (disabled if not HAVE_MORECORE)
2645
2646 In all cases, we need to request enough bytes from system to ensure
2647 we can malloc nb bytes upon success, so pad with enough space for
2648 top_foot, plus alignment-pad to make sure we don't lose bytes if
2649 not on boundary, and round this up to a granularity unit.
2650 */
2651
2652 if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
2653 char* br = CMFAIL;
2654 size_t ssize = asize; /* sbrk call size */
2655 msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
2656 ACQUIRE_MALLOC_GLOBAL_LOCK();
2657
2658 if (ss == 0) { /* First time through or recovery */
2659 char* base = (char*)CALL_MORECORE(0);
2660 if (base != CMFAIL) {
2661 size_t fp;
2662 /* Adjust to end on a page boundary */
2663 if (!is_page_aligned(base))
2664 ssize += (page_align((size_t)base) - (size_t)base);
2665 fp = m->footprint + ssize; /* recheck limits */
2666 if (ssize > nb && ssize < HALF_MAX_SIZE_T &&
2667 (m->footprint_limit == 0 ||
2668 (fp > m->footprint && fp <= m->footprint_limit)) &&
2669 (br = (char*)(CALL_MORECORE(ssize))) == base) {
2670 tbase = base;
2671 tsize = ssize;
2672 }
2673 }
2674 }
2675 else {
2676 /* Subtract out existing available top space from MORECORE request. */
2677 ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING);
2678 /* Use mem here only if it did continuously extend old space */
2679 if (ssize < HALF_MAX_SIZE_T &&
2680 (br = (char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) {
2681 tbase = br;
2682 tsize = ssize;
2683 }
2684 }
2685
2686 if (tbase == CMFAIL) { /* Cope with partial failure */
2687 if (br != CMFAIL) { /* Try to use/extend the space we did get */
2688 if (ssize < HALF_MAX_SIZE_T &&
2689 ssize < nb + SYS_ALLOC_PADDING) {
2690 size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize);
2691 if (esize < HALF_MAX_SIZE_T) {
2692 char* end = (char*)CALL_MORECORE(esize);
2693 if (end != CMFAIL)
2694 ssize += esize;
2695 else { /* Can't use; try to release */
2696 (void) CALL_MORECORE(-ssize);
2697 br = CMFAIL;
2698 }
2699 }
2700 }
2701 }
2702 if (br != CMFAIL) { /* Use the space we did get */
2703 tbase = br;
2704 tsize = ssize;
2705 }
2706 else
2707 disable_contiguous(m); /* Don't try contiguous path in the future */
2708 }
2709
2710 RELEASE_MALLOC_GLOBAL_LOCK();
2711 }
2712
2713 if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */
2714 char* mp = (char*)(CALL_MMAP(asize));
2715 if (mp != CMFAIL) {
2716 tbase = mp;
2717 tsize = asize;
2718 mmap_flag = USE_MMAP_BIT;
2719 }
2720 }
2721
2722 if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
2723 if (asize < HALF_MAX_SIZE_T) {
2724 char* br = CMFAIL;
2725 char* end = CMFAIL;
2726 ACQUIRE_MALLOC_GLOBAL_LOCK();
2727 br = (char*)(CALL_MORECORE(asize));
2728 end = (char*)(CALL_MORECORE(0));
2729 RELEASE_MALLOC_GLOBAL_LOCK();
2730 if (br != CMFAIL && end != CMFAIL && br < end) {
2731 size_t ssize = end - br;
2732 if (ssize > nb + TOP_FOOT_SIZE) {
2733 tbase = br;
2734 tsize = ssize;
2735 }
2736 }
2737 }
2738 }
2739
2740 if (tbase != CMFAIL) {
2741
2742 if ((m->footprint += tsize) > m->max_footprint)
2743 m->max_footprint = m->footprint;
2744
2745 if (!is_initialized(m)) { /* first-time initialization */
2746 if (m->least_addr == 0 || tbase < m->least_addr)
2747 m->least_addr = tbase;
2748 m->seg.base = tbase;
2749 m->seg.size = tsize;
2750 m->seg.sflags = mmap_flag;
2751 m->magic = mparams.magic;
2752 m->release_checks = MAX_RELEASE_CHECK_RATE;
2753 init_bins(m);
2754#if !ONLY_MSPACES
2755 if (is_global(m))
2756 init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
2757 else
2758#endif
2759 {
2760 /* Offset top by embedded malloc_state */
2761 mchunkptr mn = next_chunk(mem2chunk(m));
2762 init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
2763 }
2764 }
2765
2766 else {
2767 /* Try to merge with an existing segment */
2768 msegmentptr sp = &m->seg;
2769 /* Only consider most recent segment if traversal suppressed */
2770 while (sp != 0 && tbase != sp->base + sp->size)
2771 sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
2772 if (sp != 0 &&
2773 !is_extern_segment(sp) &&
2774 (sp->sflags & USE_MMAP_BIT) == mmap_flag &&
2775 segment_holds(sp, m->top)) { /* append */
2776 sp->size += tsize;
2777 init_top(m, m->top, m->topsize + tsize);
2778 }
2779 else {
2780 if (tbase < m->least_addr)
2781 m->least_addr = tbase;
2782 sp = &m->seg;
2783 while (sp != 0 && sp->base != tbase + tsize)
2784 sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
2785 if (sp != 0 &&
2786 !is_extern_segment(sp) &&
2787 (sp->sflags & USE_MMAP_BIT) == mmap_flag) {
2788 char* oldbase = sp->base;
2789 sp->base = tbase;
2790 sp->size += tsize;
2791 return prepend_alloc(m, tbase, oldbase, nb);
2792 }
2793 else
2794 add_segment(m, tbase, tsize, mmap_flag);
2795 }
2796 }
2797
2798 if (nb < m->topsize) { /* Allocate from new or extended top space */
2799 size_t rsize = m->topsize -= nb;
2800 mchunkptr p = m->top;
2801 mchunkptr r = m->top = chunk_plus_offset(p, nb);
2802 r->head = rsize | PINUSE_BIT;
2803 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
2804 check_top_chunk(m, m->top);
2805 check_malloced_chunk(m, chunk2mem(p), nb);
2806 return chunk2mem(p);
2807 }
2808 }
2809
2810 MALLOC_FAILURE_ACTION;
2811 return 0;
2812}
2813
2814/* ----------------------- system deallocation -------------------------- */
2815
2816/* Unmap and unlink any mmapped segments that don't contain used chunks */
2817static size_t release_unused_segments(mstate m) {
2818 size_t released = 0;
2819 int nsegs = 0;
2820 msegmentptr pred = &m->seg;
2821 msegmentptr sp = pred->next;
2822 while (sp != 0) {
2823 char* base = sp->base;
2824 size_t size = sp->size;
2825 msegmentptr next = sp->next;
2826 ++nsegs;
2827 if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
2828 mchunkptr p = align_as_chunk(base);
2829 size_t psize = chunksize(p);
2830 /* Can unmap if first chunk holds entire segment and not pinned */
2831 if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
2832 tchunkptr tp = (tchunkptr)p;
2833 assert(segment_holds(sp, (char*)sp));
2834 if (p == m->dv) {
2835 m->dv = 0;
2836 m->dvsize = 0;
2837 }
2838 else {
2839 unlink_large_chunk(m, tp);
2840 }
2841 if (CALL_MUNMAP(base, size) == 0) {
2842 released += size;
2843 m->footprint -= size;
2844 /* unlink obsoleted record */
2845 sp = pred;
2846 sp->next = next;
2847 }
2848 else { /* back out if cannot unmap */
2849 insert_large_chunk(m, tp, psize);
2850 }
2851 }
2852 }
2853 if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */
2854 break;
2855 pred = sp;
2856 sp = next;
2857 }
2858 /* Reset check counter */
2859 m->release_checks = (((size_t) nsegs > (size_t) MAX_RELEASE_CHECK_RATE)?
2860 (size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE);
2861 return released;
2862}
2863
2864static int sys_trim(mstate m, size_t pad) {
2865 size_t released = 0;
2866 ensure_initialization();
2867 if (pad < MAX_REQUEST && is_initialized(m)) {
2868 pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
2869
2870 if (m->topsize > pad) {
2871 /* Shrink top space in granularity-size units, keeping at least one */
2872 size_t unit = mparams.granularity;
2873 size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
2874 SIZE_T_ONE) * unit;
2875 msegmentptr sp = segment_holding(m, (char*)m->top);
2876
2877 if (!is_extern_segment(sp)) {
2878 if (is_mmapped_segment(sp)) {
2879 if (HAVE_MMAP &&
2880 sp->size >= extra &&
2881 !has_segment_link(m, sp)) { /* can't shrink if pinned */
2882 size_t newsize = sp->size - extra;
2883 (void)newsize; /* placate people compiling -Wunused-variable */
2884 /* Prefer mremap, fall back to munmap */
2885 if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
2886 (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
2887 released = extra;
2888 }
2889 }
2890 }
2891 else if (HAVE_MORECORE) {
2892 if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
2893 extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
2894 ACQUIRE_MALLOC_GLOBAL_LOCK();
2895 {
2896 /* Make sure end of memory is where we last set it. */
2897 char* old_br = (char*)(CALL_MORECORE(0));
2898 if (old_br == sp->base + sp->size) {
2899 char* rel_br = (char*)(CALL_MORECORE(-extra));
2900 char* new_br = (char*)(CALL_MORECORE(0));
2901 if (rel_br != CMFAIL && new_br < old_br)
2902 released = old_br - new_br;
2903 }
2904 }
2905 RELEASE_MALLOC_GLOBAL_LOCK();
2906 }
2907 }
2908
2909 if (released != 0) {
2910 sp->size -= released;
2911 m->footprint -= released;
2912 init_top(m, m->top, m->topsize - released);
2913 check_top_chunk(m, m->top);
2914 }
2915 }
2916
2917 /* Unmap any unused mmapped segments */
2918 if (HAVE_MMAP)
2919 released += release_unused_segments(m);
2920
2921 /* On failure, disable autotrim to avoid repeated failed future calls */
2922 if (released == 0 && m->topsize > m->trim_check)
2923 m->trim_check = MAX_SIZE_T;
2924 }
2925
2926 return (released != 0)? 1 : 0;
2927}
2928
2929/* Consolidate and bin a chunk. Differs from exported versions
2930 of free mainly in that the chunk need not be marked as inuse.
2931*/
2932static void dispose_chunk(mstate m, mchunkptr p, size_t psize) {
2933 mchunkptr next = chunk_plus_offset(p, psize);
2934 if (!pinuse(p)) {
2935 mchunkptr prev;
2936 size_t prevsize = p->prev_foot;
2937 if (is_mmapped(p)) {
2938 psize += prevsize + MMAP_FOOT_PAD;
2939 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
2940 m->footprint -= psize;
2941 return;
2942 }
2943 prev = chunk_minus_offset(p, prevsize);
2944 psize += prevsize;
2945 p = prev;
2946 if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */
2947 if (p != m->dv) {
2948 unlink_chunk(m, p, prevsize);
2949 }
2950 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
2951 m->dvsize = psize;
2952 set_free_with_pinuse(p, psize, next);
2953 return;
2954 }
2955 }
2956 else {
2957 CORRUPTION_ERROR_ACTION(m);
2958 return;
2959 }
2960 }
2961 if (RTCHECK(ok_address(m, next))) {
2962 if (!cinuse(next)) { /* consolidate forward */
2963 if (next == m->top) {
2964 size_t tsize = m->topsize += psize;
2965 m->top = p;
2966 p->head = tsize | PINUSE_BIT;
2967 if (p == m->dv) {
2968 m->dv = 0;
2969 m->dvsize = 0;
2970 }
2971 return;
2972 }
2973 else if (next == m->dv) {
2974 size_t dsize = m->dvsize += psize;
2975 m->dv = p;
2976 set_size_and_pinuse_of_free_chunk(p, dsize);
2977 return;
2978 }
2979 else {
2980 size_t nsize = chunksize(next);
2981 psize += nsize;
2982 unlink_chunk(m, next, nsize);
2983 set_size_and_pinuse_of_free_chunk(p, psize);
2984 if (p == m->dv) {
2985 m->dvsize = psize;
2986 return;
2987 }
2988 }
2989 }
2990 else {
2991 set_free_with_pinuse(p, psize, next);
2992 }
2993 insert_chunk(m, p, psize);
2994 }
2995 else {
2996 CORRUPTION_ERROR_ACTION(m);
2997 }
2998}
2999
3000/* ---------------------------- malloc --------------------------- */
3001
3002/* allocate a large request from the best fitting chunk in a treebin */
3003static void* tmalloc_large(mstate m, size_t nb) {
3004 tchunkptr v = 0;
3005 size_t rsize = -nb; /* Unsigned negation */
3006 tchunkptr t;
3007 bindex_t idx;
3008 compute_tree_index(nb, idx);
3009 if ((t = *treebin_at(m, idx)) != 0) {
3010 /* Traverse tree for this bin looking for node with size == nb */
3011 size_t sizebits = nb << leftshift_for_tree_index(idx);
3012 tchunkptr rst = 0; /* The deepest untaken right subtree */
3013 for (;;) {
3014 tchunkptr rt;
3015 size_t trem = chunksize(t) - nb;
3016 if (trem < rsize) {
3017 v = t;
3018 if ((rsize = trem) == 0)
3019 break;
3020 }
3021 rt = t->child[1];
3022 t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
3023 if (rt != 0 && rt != t)
3024 rst = rt;
3025 if (t == 0) {
3026 t = rst; /* set t to least subtree holding sizes > nb */
3027 break;
3028 }
3029 sizebits <<= 1;
3030 }
3031 }
3032 if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
3033 binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
3034 if (leftbits != 0) {
3035 bindex_t i;
3036 binmap_t leastbit = least_bit(leftbits);
3037 compute_bit2idx(leastbit, i);
3038 t = *treebin_at(m, i);
3039 }
3040 }
3041
3042 while (t != 0) { /* find smallest of tree or subtree */
3043 size_t trem = chunksize(t) - nb;
3044 if (trem < rsize) {
3045 rsize = trem;
3046 v = t;
3047 }
3048 t = leftmost_child(t);
3049 }
3050
3051 /* If dv is a better fit, return 0 so malloc will use it */
3052 if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
3053 if (RTCHECK(ok_address(m, v))) { /* split */
3054 mchunkptr r = chunk_plus_offset(v, nb);
3055 assert(chunksize(v) == rsize + nb);
3056 if (RTCHECK(ok_next(v, r))) {
3057 unlink_large_chunk(m, v);
3058 if (rsize < MIN_CHUNK_SIZE)
3059 set_inuse_and_pinuse(m, v, (rsize + nb));
3060 else {
3061 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
3062 set_size_and_pinuse_of_free_chunk(r, rsize);
3063 insert_chunk(m, r, rsize);
3064 }
3065 return chunk2mem(v);
3066 }
3067 }
3068 CORRUPTION_ERROR_ACTION(m);
3069 }
3070 return 0;
3071}
3072
3073/* allocate a small request from the best fitting chunk in a treebin */
3074static void* tmalloc_small(mstate m, size_t nb) {
3075 tchunkptr t, v;
3076 size_t rsize;
3077 bindex_t i;
3078 binmap_t leastbit = least_bit(m->treemap);
3079 compute_bit2idx(leastbit, i);
3080 v = t = *treebin_at(m, i);
3081 rsize = chunksize(t) - nb;
3082
3083 while ((t = leftmost_child(t)) != 0) {
3084 size_t trem = chunksize(t) - nb;
3085 if (trem < rsize) {
3086 rsize = trem;
3087 v = t;
3088 }
3089 }
3090
3091 if (RTCHECK(ok_address(m, v))) {
3092 mchunkptr r = chunk_plus_offset(v, nb);
3093 assert(chunksize(v) == rsize + nb);
3094 if (RTCHECK(ok_next(v, r))) {
3095 unlink_large_chunk(m, v);
3096 if (rsize < MIN_CHUNK_SIZE)
3097 set_inuse_and_pinuse(m, v, (rsize + nb));
3098 else {
3099 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
3100 set_size_and_pinuse_of_free_chunk(r, rsize);
3101 replace_dv(m, r, rsize);
3102 }
3103 return chunk2mem(v);
3104 }
3105 }
3106
3107 CORRUPTION_ERROR_ACTION(m);
3108 return 0;
3109}
3110
3111#if !ONLY_MSPACES
3112
3113void* dlmalloc(size_t bytes) {
3114 /*
3115 Basic algorithm:
3116 If a small request (< 256 bytes minus per-chunk overhead):
3117 1. If one exists, use a remainderless chunk in associated smallbin.
3118 (Remainderless means that there are too few excess bytes to
3119 represent as a chunk.)
3120 2. If it is big enough, use the dv chunk, which is normally the
3121 chunk adjacent to the one used for the most recent small request.
3122 3. If one exists, split the smallest available chunk in a bin,
3123 saving remainder in dv.
3124 4. If it is big enough, use the top chunk.
3125 5. If available, get memory from system and use it
3126 Otherwise, for a large request:
3127 1. Find the smallest available binned chunk that fits, and use it
3128 if it is better fitting than dv chunk, splitting if necessary.
3129 2. If better fitting than any binned chunk, use the dv chunk.
3130 3. If it is big enough, use the top chunk.
3131 4. If request size >= mmap threshold, try to directly mmap this chunk.
3132 5. If available, get memory from system and use it
3133
3134 The ugly goto's here ensure that postaction occurs along all paths.
3135 */
3136
3137#if USE_LOCKS
3138 ensure_initialization(); /* initialize in sys_alloc if not using locks */
3139#endif
3140
3141 if (!PREACTION(gm)) {
3142 void* mem;
3143 size_t nb;
3144 if (bytes <= MAX_SMALL_REQUEST) {
3145 bindex_t idx;
3146 binmap_t smallbits;
3147 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
3148 idx = small_index(nb);
3149 smallbits = gm->smallmap >> idx;
3150
3151 if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
3152 mchunkptr b, p;
3153 idx += ~smallbits & 1; /* Uses next bin if idx empty */
3154 b = smallbin_at(gm, idx);
3155 p = b->fd;
3156 assert(chunksize(p) == small_index2size(idx));
3157 unlink_first_small_chunk(gm, b, p, idx);
3158 set_inuse_and_pinuse(gm, p, small_index2size(idx));
3159 mem = chunk2mem(p);
3160 check_malloced_chunk(gm, mem, nb);
3161 goto postaction;
3162 }
3163
3164 else if (nb > gm->dvsize) {
3165 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
3166 mchunkptr b, p, r;
3167 size_t rsize;
3168 bindex_t i;
3169 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
3170 binmap_t leastbit = least_bit(leftbits);
3171 compute_bit2idx(leastbit, i);
3172 b = smallbin_at(gm, i);
3173 p = b->fd;
3174 assert(chunksize(p) == small_index2size(i));
3175 unlink_first_small_chunk(gm, b, p, i);
3176 rsize = small_index2size(i) - nb;
3177 /* Fit here cannot be remainderless if 4byte sizes */
3178 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
3179 set_inuse_and_pinuse(gm, p, small_index2size(i));
3180 else {
3181 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
3182 r = chunk_plus_offset(p, nb);
3183 set_size_and_pinuse_of_free_chunk(r, rsize);
3184 replace_dv(gm, r, rsize);
3185 }
3186 mem = chunk2mem(p);
3187 check_malloced_chunk(gm, mem, nb);
3188 goto postaction;
3189 }
3190
3191 else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
3192 check_malloced_chunk(gm, mem, nb);
3193 goto postaction;
3194 }
3195 }
3196 }
3197 else if (bytes >= MAX_REQUEST)
3198 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
3199 else {
3200 nb = pad_request(bytes);
3201 if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
3202 check_malloced_chunk(gm, mem, nb);
3203 goto postaction;
3204 }
3205 }
3206
3207 if (nb <= gm->dvsize) {
3208 size_t rsize = gm->dvsize - nb;
3209 mchunkptr p = gm->dv;
3210 if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
3211 mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
3212 gm->dvsize = rsize;
3213 set_size_and_pinuse_of_free_chunk(r, rsize);
3214 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
3215 }
3216 else { /* exhaust dv */
3217 size_t dvs = gm->dvsize;
3218 gm->dvsize = 0;
3219 gm->dv = 0;
3220 set_inuse_and_pinuse(gm, p, dvs);
3221 }
3222 mem = chunk2mem(p);
3223 check_malloced_chunk(gm, mem, nb);
3224 goto postaction;
3225 }
3226
3227 else if (nb < gm->topsize) { /* Split top */
3228 size_t rsize = gm->topsize -= nb;
3229 mchunkptr p = gm->top;
3230 mchunkptr r = gm->top = chunk_plus_offset(p, nb);
3231 r->head = rsize | PINUSE_BIT;
3232 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
3233 mem = chunk2mem(p);
3234 check_top_chunk(gm, gm->top);
3235 check_malloced_chunk(gm, mem, nb);
3236 goto postaction;
3237 }
3238
3239 mem = sys_alloc(gm, nb);
3240
3241 postaction:
3242 POSTACTION(gm);
3243 return mem;
3244 }
3245
3246 return 0;
3247}
3248
3249/* ---------------------------- free --------------------------- */
3250
3251void dlfree(void* mem) {
3252 /*
3253 Consolidate freed chunks with preceeding or succeeding bordering
3254 free chunks, if they exist, and then place in a bin. Intermixed
3255 with special cases for top, dv, mmapped chunks, and usage errors.
3256 */
3257
3258 if (mem != 0) {
3259 mchunkptr p = mem2chunk(mem);
3260#if FOOTERS
3261 mstate fm = get_mstate_for(p);
3262 if (!ok_magic(fm)) {
3263 USAGE_ERROR_ACTION(fm, p);
3264 return;
3265 }
3266#else /* FOOTERS */
3267#define fm gm
3268#endif /* FOOTERS */
3269 if (!PREACTION(fm)) {
3270 check_inuse_chunk(fm, p);
3271 if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
3272 size_t psize = chunksize(p);
3273 mchunkptr next = chunk_plus_offset(p, psize);
3274 if (!pinuse(p)) {
3275 size_t prevsize = p->prev_foot;
3276 if (is_mmapped(p)) {
3277 psize += prevsize + MMAP_FOOT_PAD;
3278 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
3279 fm->footprint -= psize;
3280 goto postaction;
3281 }
3282 else {
3283 mchunkptr prev = chunk_minus_offset(p, prevsize);
3284 psize += prevsize;
3285 p = prev;
3286 if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
3287 if (p != fm->dv) {
3288 unlink_chunk(fm, p, prevsize);
3289 }
3290 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
3291 fm->dvsize = psize;
3292 set_free_with_pinuse(p, psize, next);
3293 goto postaction;
3294 }
3295 }
3296 else
3297 goto erroraction;
3298 }
3299 }
3300
3301 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
3302 if (!cinuse(next)) { /* consolidate forward */
3303 if (next == fm->top) {
3304 size_t tsize = fm->topsize += psize;
3305 fm->top = p;
3306 p->head = tsize | PINUSE_BIT;
3307 if (p == fm->dv) {
3308 fm->dv = 0;
3309 fm->dvsize = 0;
3310 }
3311 if (should_trim(fm, tsize))
3312 sys_trim(fm, 0);
3313 goto postaction;
3314 }
3315 else if (next == fm->dv) {
3316 size_t dsize = fm->dvsize += psize;
3317 fm->dv = p;
3318 set_size_and_pinuse_of_free_chunk(p, dsize);
3319 goto postaction;
3320 }
3321 else {
3322 size_t nsize = chunksize(next);
3323 psize += nsize;
3324 unlink_chunk(fm, next, nsize);
3325 set_size_and_pinuse_of_free_chunk(p, psize);
3326 if (p == fm->dv) {
3327 fm->dvsize = psize;
3328 goto postaction;
3329 }
3330 }
3331 }
3332 else
3333 set_free_with_pinuse(p, psize, next);
3334
3335 if (is_small(psize)) {
3336 insert_small_chunk(fm, p, psize);
3337 check_free_chunk(fm, p);
3338 }
3339 else {
3340 tchunkptr tp = (tchunkptr)p;
3341 insert_large_chunk(fm, tp, psize);
3342 check_free_chunk(fm, p);
3343 if (--fm->release_checks == 0)
3344 release_unused_segments(fm);
3345 }
3346 goto postaction;
3347 }
3348 }
3349 erroraction:
3350 USAGE_ERROR_ACTION(fm, p);
3351 postaction:
3352 POSTACTION(fm);
3353 }
3354 }
3355#if !FOOTERS
3356#undef fm
3357#endif /* FOOTERS */
3358}
3359
3360void* dlcalloc(size_t n_elements, size_t elem_size) {
3361 void* mem;
3362 size_t req = 0;
3363 if (n_elements != 0) {
3364 req = n_elements * elem_size;
3365 if (((n_elements | elem_size) & ~(size_t)0xffff) &&
3366 (req / n_elements != elem_size))
3367 req = MAX_SIZE_T; /* force downstream failure on overflow */
3368 }
3369 mem = dlmalloc(req);
3370 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
3371 memset(mem, 0, req);
3372 return mem;
3373}
3374
3375#endif /* !ONLY_MSPACES */
3376
3377/* ------------ Internal support for realloc, memalign, etc -------------- */
3378
3379/* Try to realloc; only in-place unless can_move TTRUE */
3380static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb,
3381 int can_move) {
3382 mchunkptr newp = 0;
3383 size_t oldsize = chunksize(p);
3384 mchunkptr next = chunk_plus_offset(p, oldsize);
3385 if (RTCHECK(ok_address(m, p) && ok_inuse(p) &&
3386 ok_next(p, next) && ok_pinuse(next))) {
3387 if (is_mmapped(p)) {
3388 newp = mmap_resize(m, p, nb, can_move);
3389 }
3390 else if (oldsize >= nb) { /* already big enough */
3391 size_t rsize = oldsize - nb;
3392 if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */
3393 mchunkptr r = chunk_plus_offset(p, nb);
3394 set_inuse(m, p, nb);
3395 set_inuse(m, r, rsize);
3396 dispose_chunk(m, r, rsize);
3397 }
3398 newp = p;
3399 }
3400 else if (next == m->top) { /* extend into top */
3401 if (oldsize + m->topsize > nb) {
3402 size_t newsize = oldsize + m->topsize;
3403 size_t newtopsize = newsize - nb;
3404 mchunkptr newtop = chunk_plus_offset(p, nb);
3405 set_inuse(m, p, nb);
3406 newtop->head = newtopsize |PINUSE_BIT;
3407 m->top = newtop;
3408 m->topsize = newtopsize;
3409 newp = p;
3410 }
3411 }
3412 else if (next == m->dv) { /* extend into dv */
3413 size_t dvs = m->dvsize;
3414 if (oldsize + dvs >= nb) {
3415 size_t dsize = oldsize + dvs - nb;
3416 if (dsize >= MIN_CHUNK_SIZE) {
3417 mchunkptr r = chunk_plus_offset(p, nb);
3418 mchunkptr n = chunk_plus_offset(r, dsize);
3419 set_inuse(m, p, nb);
3420 set_size_and_pinuse_of_free_chunk(r, dsize);
3421 clear_pinuse(n);
3422 m->dvsize = dsize;
3423 m->dv = r;
3424 }
3425 else { /* exhaust dv */
3426 size_t newsize = oldsize + dvs;
3427 set_inuse(m, p, newsize);
3428 m->dvsize = 0;
3429 m->dv = 0;
3430 }
3431 newp = p;
3432 }
3433 }
3434 else if (!cinuse(next)) { /* extend into next free chunk */
3435 size_t nextsize = chunksize(next);
3436 if (oldsize + nextsize >= nb) {
3437 size_t rsize = oldsize + nextsize - nb;
3438 unlink_chunk(m, next, nextsize);
3439 if (rsize < MIN_CHUNK_SIZE) {
3440 size_t newsize = oldsize + nextsize;
3441 set_inuse(m, p, newsize);
3442 }
3443 else {
3444 mchunkptr r = chunk_plus_offset(p, nb);
3445 set_inuse(m, p, nb);
3446 set_inuse(m, r, rsize);
3447 dispose_chunk(m, r, rsize);
3448 }
3449 newp = p;
3450 }
3451 }
3452 }
3453 else {
3454 USAGE_ERROR_ACTION(m, chunk2mem(p));
3455 }
3456 return newp;
3457}
3458
3459static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
3460 void* mem = 0;
3461 if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
3462 alignment = MIN_CHUNK_SIZE;
3463 if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
3464 size_t a = MALLOC_ALIGNMENT << 1;
3465 while (a < alignment) a <<= 1;
3466 alignment = a;
3467 }
3468 if (bytes >= MAX_REQUEST - alignment) {
3469 if (m != 0) { /* Test isn't needed but avoids compiler warning */
3470 MALLOC_FAILURE_ACTION;
3471 }
3472 }
3473 else {
3474 size_t nb = request2size(bytes);
3475 size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
3476 mem = internal_malloc(m, req);
3477 if (mem != 0) {
3478 mchunkptr p = mem2chunk(mem);
3479 if (PREACTION(m))
3480 return 0;
3481 if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */
3482 /*
3483 Find an aligned spot inside chunk. Since we need to give
3484 back leading space in a chunk of at least MIN_CHUNK_SIZE, if
3485 the first calculation places us at a spot with less than
3486 MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
3487 We've allocated enough total room so that this is always
3488 possible.
3489 */
3490 char* br = (char*)mem2chunk((size_t)(((size_t)((char*)mem + alignment -
3491 SIZE_T_ONE)) &
3492 -alignment));
3493 char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
3494 br : br+alignment;
3495 mchunkptr newp = (mchunkptr)pos;
3496 size_t leadsize = pos - (char*)(p);
3497 size_t newsize = chunksize(p) - leadsize;
3498
3499 if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
3500 newp->prev_foot = p->prev_foot + leadsize;
3501 newp->head = newsize;
3502 }
3503 else { /* Otherwise, give back leader, use the rest */
3504 set_inuse(m, newp, newsize);
3505 set_inuse(m, p, leadsize);
3506 dispose_chunk(m, p, leadsize);
3507 }
3508 p = newp;
3509 }
3510
3511 /* Give back spare room at the end */
3512 if (!is_mmapped(p)) {
3513 size_t size = chunksize(p);
3514 if (size > nb + MIN_CHUNK_SIZE) {
3515 size_t remainder_size = size - nb;
3516 mchunkptr remainder = chunk_plus_offset(p, nb);
3517 set_inuse(m, p, nb);
3518 set_inuse(m, remainder, remainder_size);
3519 dispose_chunk(m, remainder, remainder_size);
3520 }
3521 }
3522
3523 mem = chunk2mem(p);
3524 assert (chunksize(p) >= nb);
3525 assert(((size_t)mem & (alignment - 1)) == 0);
3526 check_inuse_chunk(m, p);
3527 POSTACTION(m);
3528 }
3529 }
3530 return mem;
3531}
3532
3533/*
3534 Common support for independent_X routines, handling
3535 all of the combinations that can result.
3536 The opts arg has:
3537 bit 0 set if all elements are same size (using sizes[0])
3538 bit 1 set if elements should be zeroed
3539*/
3540static void** ialloc(mstate m,
3541 size_t n_elements,
3542 size_t* sizes,
3543 int opts,
3544 void* chunks[]) {
3545
3546 size_t element_size; /* chunksize of each element, if all same */
3547 size_t contents_size; /* total size of elements */
3548 size_t array_size; /* request size of pointer array */
3549 void* mem; /* malloced aggregate space */
3550 mchunkptr p; /* corresponding chunk */
3551 size_t remainder_size; /* remaining bytes while splitting */
3552 void** marray; /* either "chunks" or malloced ptr array */
3553 mchunkptr array_chunk; /* chunk for malloced ptr array */
3554 flag_t was_enabled; /* to disable mmap */
3555 size_t size;
3556 size_t i;
3557
3558 ensure_initialization();
3559 /* compute array length, if needed */
3560 if (chunks != 0) {
3561 if (n_elements == 0)
3562 return chunks; /* nothing to do */
3563 marray = chunks;
3564 array_size = 0;
3565 }
3566 else {
3567 /* if empty req, must still return chunk representing empty array */
3568 if (n_elements == 0)
3569 return (void**)internal_malloc(m, 0);
3570 marray = 0;
3571 array_size = request2size(n_elements * (sizeof(void*)));
3572 }
3573
3574 /* compute total element size */
3575 if (opts & 0x1) { /* all-same-size */
3576 element_size = request2size(*sizes);
3577 contents_size = n_elements * element_size;
3578 }
3579 else { /* add up all the sizes */
3580 element_size = 0;
3581 contents_size = 0;
3582 for (i = 0; i != n_elements; ++i)
3583 contents_size += request2size(sizes[i]);
3584 }
3585
3586 size = contents_size + array_size;
3587
3588 /*
3589 Allocate the aggregate chunk. First disable direct-mmapping so
3590 malloc won't use it, since we would not be able to later
3591 free/realloc space internal to a segregated mmap region.
3592 */
3593 was_enabled = use_mmap(m);
3594 disable_mmap(m);
3595 mem = internal_malloc(m, size - CHUNK_OVERHEAD);
3596 if (was_enabled)
3597 enable_mmap(m);
3598 if (mem == 0)
3599 return 0;
3600
3601 if (PREACTION(m)) return 0;
3602 p = mem2chunk(mem);
3603 remainder_size = chunksize(p);
3604
3605 assert(!is_mmapped(p));
3606
3607 if (opts & 0x2) { /* optionally clear the elements */
3608 memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
3609 }
3610
3611 /* If not provided, allocate the pointer array as final part of chunk */
3612 if (marray == 0) {
3613 size_t array_chunk_size;
3614 array_chunk = chunk_plus_offset(p, contents_size);
3615 array_chunk_size = remainder_size - contents_size;
3616 marray = (void**) (chunk2mem(array_chunk));
3617 set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
3618 remainder_size = contents_size;
3619 }
3620
3621 /* split out elements */
3622 for (i = 0; ; ++i) {
3623 marray[i] = chunk2mem(p);
3624 if (i != n_elements-1) {
3625 if (element_size != 0)
3626 size = element_size;
3627 else
3628 size = request2size(sizes[i]);
3629 remainder_size -= size;
3630 set_size_and_pinuse_of_inuse_chunk(m, p, size);
3631 p = chunk_plus_offset(p, size);
3632 }
3633 else { /* the final element absorbs any overallocation slop */
3634 set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
3635 break;
3636 }
3637 }
3638
3639#if DEBUG
3640 if (marray != chunks) {
3641 /* final element must have exactly exhausted chunk */
3642 if (element_size != 0) {
3643 assert(remainder_size == element_size);
3644 }
3645 else {
3646 assert(remainder_size == request2size(sizes[i]));
3647 }
3648 check_inuse_chunk(m, mem2chunk(marray));
3649 }
3650 for (i = 0; i != n_elements; ++i)
3651 check_inuse_chunk(m, mem2chunk(marray[i]));
3652
3653#endif /* DEBUG */
3654
3655 POSTACTION(m);
3656 return marray;
3657}
3658
3659/* Try to free all pointers in the given array.
3660 Note: this could be made faster, by delaying consolidation,
3661 at the price of disabling some user integrity checks, We
3662 still optimize some consolidations by combining adjacent
3663 chunks before freeing, which will occur often if allocated
3664 with ialloc or the array is sorted.
3665*/
3666static size_t internal_bulk_free(mstate m, void* array[], size_t nelem) {
3667 size_t unfreed = 0;
3668 if (!PREACTION(m)) {
3669 void** a;
3670 void** fence = &(array[nelem]);
3671 for (a = array; a != fence; ++a) {
3672 void* mem = *a;
3673 if (mem != 0) {
3674 mchunkptr p = mem2chunk(mem);
3675 size_t psize = chunksize(p);
3676#if FOOTERS
3677 if (get_mstate_for(p) != m) {
3678 ++unfreed;
3679 continue;
3680 }
3681#endif
3682 check_inuse_chunk(m, p);
3683 *a = 0;
3684 if (RTCHECK(ok_address(m, p) && ok_inuse(p))) {
3685 void ** b = a + 1; /* try to merge with next chunk */
3686 mchunkptr next = next_chunk(p);
3687 if (b != fence && *b == chunk2mem(next)) {
3688 size_t newsize = chunksize(next) + psize;
3689 set_inuse(m, p, newsize);
3690 *b = chunk2mem(p);
3691 }
3692 else
3693 dispose_chunk(m, p, psize);
3694 }
3695 else {
3696 CORRUPTION_ERROR_ACTION(m);
3697 break;
3698 }
3699 }
3700 }
3701 if (should_trim(m, m->topsize))
3702 sys_trim(m, 0);
3703 POSTACTION(m);
3704 }
3705 return unfreed;
3706}
3707
3708/* Traversal */
3709#if MALLOC_INSPECT_ALL
3710static void internal_inspect_all(mstate m,
3711 void(*handler)(void *start,
3712 void *end,
3713 size_t used_bytes,
3714 void* callback_arg),
3715 void* arg) {
3716 if (is_initialized(m)) {
3717 mchunkptr top = m->top;
3718 msegmentptr s;
3719 for (s = &m->seg; s != 0; s = s->next) {
3720 mchunkptr q = align_as_chunk(s->base);
3721 while (segment_holds(s, q) && q->head != FENCEPOST_HEAD) {
3722 mchunkptr next = next_chunk(q);
3723 size_t sz = chunksize(q);
3724 size_t used;
3725 void* start;
3726 if (is_inuse(q)) {
3727 used = sz - CHUNK_OVERHEAD; /* must not be mmapped */
3728 start = chunk2mem(q);
3729 }
3730 else {
3731 used = 0;
3732 if (is_small(sz)) { /* offset by possible bookkeeping */
3733 start = (void*)((char*)q + sizeof(struct malloc_chunk));
3734 }
3735 else {
3736 start = (void*)((char*)q + sizeof(struct malloc_tree_chunk));
3737 }
3738 }
3739 if (start < (void*)next) /* skip if all space is bookkeeping */
3740 handler(start, next, used, arg);
3741 if (q == top)
3742 break;
3743 q = next;
3744 }
3745 }
3746 }
3747}
3748#endif /* MALLOC_INSPECT_ALL */
3749
3750/* ------------------ Exported realloc, memalign, etc -------------------- */
3751
3752#if !ONLY_MSPACES
3753
3754void* dlrealloc(void* oldmem, size_t bytes) {
3755 void* mem = 0;
3756 if (oldmem == 0) {
3757 mem = dlmalloc(bytes);
3758 }
3759 else if (bytes >= MAX_REQUEST) {
3760 MALLOC_FAILURE_ACTION;
3761 }
3762#ifdef REALLOC_ZERO_BYTES_FREES
3763 else if (bytes == 0) {
3764 dlfree(oldmem);
3765 }
3766#endif /* REALLOC_ZERO_BYTES_FREES */
3767 else {
3768 size_t nb = request2size(bytes);
3769 mchunkptr oldp = mem2chunk(oldmem);
3770#if ! FOOTERS
3771 mstate m = gm;
3772#else /* FOOTERS */
3773 mstate m = get_mstate_for(oldp);
3774 if (!ok_magic(m)) {
3775 USAGE_ERROR_ACTION(m, oldmem);
3776 return 0;
3777 }
3778#endif /* FOOTERS */
3779 if (!PREACTION(m)) {
3780 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
3781 POSTACTION(m);
3782 if (newp != 0) {
3783 check_inuse_chunk(m, newp);
3784 mem = chunk2mem(newp);
3785 }
3786 else {
3787 mem = internal_malloc(m, bytes);
3788 if (mem != 0) {
3789 size_t oc = chunksize(oldp) - overhead_for(oldp);
3790 memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
3791 internal_free(m, oldmem);
3792 }
3793 }
3794 }
3795 }
3796 return mem;
3797}
3798
3799void* dlrealloc_in_place(void* oldmem, size_t bytes) {
3800 void* mem = 0;
3801 if (oldmem != 0) {
3802 if (bytes >= MAX_REQUEST) {
3803 MALLOC_FAILURE_ACTION;
3804 }
3805 else {
3806 size_t nb = request2size(bytes);
3807 mchunkptr oldp = mem2chunk(oldmem);
3808#if ! FOOTERS
3809 mstate m = gm;
3810#else /* FOOTERS */
3811 mstate m = get_mstate_for(oldp);
3812 if (!ok_magic(m)) {
3813 USAGE_ERROR_ACTION(m, oldmem);
3814 return 0;
3815 }
3816#endif /* FOOTERS */
3817 if (!PREACTION(m)) {
3818 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
3819 POSTACTION(m);
3820 if (newp == oldp) {
3821 check_inuse_chunk(m, newp);
3822 mem = oldmem;
3823 }
3824 }
3825 }
3826 }
3827 return mem;
3828}
3829
3830void* dlmemalign(size_t alignment, size_t bytes) {
3831 if (alignment <= MALLOC_ALIGNMENT) {
3832 return dlmalloc(bytes);
3833 }
3834 return internal_memalign(gm, alignment, bytes);
3835}
3836
3837int dlposix_memalign(void** pp, size_t alignment, size_t bytes) {
3838 void* mem = 0;
3839 if (alignment == MALLOC_ALIGNMENT)
3840 mem = dlmalloc(bytes);
3841 else {
3842 size_t d = alignment / sizeof(void*);
3843 size_t r = alignment % sizeof(void*);
3844 if (r != 0 || d == 0 || (d & (d-SIZE_T_ONE)) != 0)
3845 return EINVAL;
3846 else if (bytes <= MAX_REQUEST - alignment) {
3847 if (alignment < MIN_CHUNK_SIZE)
3848 alignment = MIN_CHUNK_SIZE;
3849 mem = internal_memalign(gm, alignment, bytes);
3850 }
3851 }
3852 if (mem == 0)
3853 return ENOMEM;
3854 else {
3855 *pp = mem;
3856 return 0;
3857 }
3858}
3859
3860void* dlvalloc(size_t bytes) {
3861 size_t pagesz;
3862 ensure_initialization();
3863 pagesz = mparams.page_size;
3864 return dlmemalign(pagesz, bytes);
3865}
3866
3867void* dlpvalloc(size_t bytes) {
3868 size_t pagesz;
3869 ensure_initialization();
3870 pagesz = mparams.page_size;
3871 return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
3872}
3873
3874void** dlindependent_calloc(size_t n_elements, size_t elem_size,
3875 void* chunks[]) {
3876 size_t sz = elem_size; /* serves as 1-element array */
3877 return ialloc(gm, n_elements, &sz, 3, chunks);
3878}
3879
3880void** dlindependent_comalloc(size_t n_elements, size_t sizes[],
3881 void* chunks[]) {
3882 return ialloc(gm, n_elements, sizes, 0, chunks);
3883}
3884
3885size_t dlbulk_free(void* array[], size_t nelem) {
3886 return internal_bulk_free(gm, array, nelem);
3887}
3888
3889#if MALLOC_INSPECT_ALL
3890void dlmalloc_inspect_all(void(*handler)(void *start,
3891 void *end,
3892 size_t used_bytes,
3893 void* callback_arg),
3894 void* arg) {
3895 ensure_initialization();
3896 if (!PREACTION(gm)) {
3897 internal_inspect_all(gm, handler, arg);
3898 POSTACTION(gm);
3899 }
3900}
3901#endif /* MALLOC_INSPECT_ALL */
3902
3903int dlmalloc_trim(size_t pad) {
3904 int result = 0;
3905 ensure_initialization();
3906 if (!PREACTION(gm)) {
3907 result = sys_trim(gm, pad);
3908 POSTACTION(gm);
3909 }
3910 return result;
3911}
3912
3913size_t dlmalloc_footprint(void) {
3914 return gm->footprint;
3915}
3916
3917size_t dlmalloc_max_footprint(void) {
3918 return gm->max_footprint;
3919}
3920
3921size_t dlmalloc_footprint_limit(void) {
3922 size_t maf = gm->footprint_limit;
3923 return maf == 0 ? MAX_SIZE_T : maf;
3924}
3925
3926size_t dlmalloc_set_footprint_limit(size_t bytes) {
3927 size_t result; /* invert sense of 0 */
3928 if (bytes == 0)
3929 result = granularity_align(1); /* Use minimal size */
3930 if (bytes == MAX_SIZE_T)
3931 result = 0; /* disable */
3932 else
3933 result = granularity_align(bytes);
3934 return gm->footprint_limit = result;
3935}
3936
3937#if !NO_MALLINFO
3938struct mallinfo dlmallinfo(void) {
3939 return internal_mallinfo(gm);
3940}
3941#endif /* NO_MALLINFO */
3942
3943#if !NO_MALLOC_STATS
3944void dlmalloc_stats() {
3945 internal_malloc_stats(gm);
3946}
3947#endif /* NO_MALLOC_STATS */
3948
3949int dlmallopt(int param_number, int value) {
3950 return change_mparam(param_number, value);
3951}
3952
3953size_t dlmalloc_usable_size(void* mem) {
3954 if (mem != 0) {
3955 mchunkptr p = mem2chunk(mem);
3956 if (is_inuse(p))
3957 return chunksize(p) - overhead_for(p);
3958 }
3959 return 0;
3960}
3961
3962#endif /* !ONLY_MSPACES */
3963
3964/* ----------------------------- user mspaces ---------------------------- */
3965
3966#if MSPACES
3967
3968static mstate init_user_mstate(char* tbase, size_t tsize) {
3969 size_t msize = pad_request(sizeof(struct malloc_state));
3970 mchunkptr mn;
3971 mchunkptr msp = align_as_chunk(tbase);
3972 mstate m = (mstate)(chunk2mem(msp));
3973 memset(m, 0, msize);
3974 (void)INITIAL_LOCK(&m->mutex);
3975 msp->head = (msize|INUSE_BITS);
3976 m->seg.base = m->least_addr = tbase;
3977 m->seg.size = m->footprint = m->max_footprint = tsize;
3978 m->magic = mparams.magic;
3979 m->release_checks = MAX_RELEASE_CHECK_RATE;
3980 m->mflags = mparams.default_mflags;
3981 m->extp = 0;
3982 m->exts = 0;
3983 disable_contiguous(m);
3984 init_bins(m);
3985 mn = next_chunk(mem2chunk(m));
3986 init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
3987 check_top_chunk(m, m->top);
3988 return m;
3989}
3990
3991mspace create_mspace(size_t capacity, int locked) {
3992 mstate m = 0;
3993 size_t msize;
3994 ensure_initialization();
3995 msize = pad_request(sizeof(struct malloc_state));
3996 if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
3997 size_t rs = ((capacity == 0)? mparams.granularity :
3998 (capacity + TOP_FOOT_SIZE + msize));
3999 size_t tsize = granularity_align(rs);
4000 char* tbase = (char*)(CALL_MMAP(tsize));
4001 if (tbase != CMFAIL) {
4002 m = init_user_mstate(tbase, tsize);
4003 m->seg.sflags = USE_MMAP_BIT;
4004 set_lock(m, locked);
4005 }
4006 }
4007 return (mspace)m;
4008}
4009
4010mspace create_mspace_with_base(void* base, size_t capacity, int locked) {
4011 mstate m = 0;
4012 size_t msize;
4013 ensure_initialization();
4014 msize = pad_request(sizeof(struct malloc_state));
4015 if (capacity > msize + TOP_FOOT_SIZE &&
4016 capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
4017 m = init_user_mstate((char*)base, capacity);
4018 m->seg.sflags = EXTERN_BIT;
4019 set_lock(m, locked);
4020 }
4021 return (mspace)m;
4022}
4023
4024int mspace_track_large_chunks(mspace msp, int enable) {
4025 int ret = 0;
4026 mstate ms = (mstate)msp;
4027 if (!PREACTION(ms)) {
4028 if (!use_mmap(ms)) {
4029 ret = 1;
4030 }
4031 if (!enable) {
4032 enable_mmap(ms);
4033 } else {
4034 disable_mmap(ms);
4035 }
4036 POSTACTION(ms);
4037 }
4038 return ret;
4039}
4040
4041size_t destroy_mspace(mspace msp) {
4042 size_t freed = 0;
4043 mstate ms = (mstate)msp;
4044 if (ok_magic(ms)) {
4045 msegmentptr sp = &ms->seg;
4046 (void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */
4047 while (sp != 0) {
4048 char* base = sp->base;
4049 size_t size = sp->size;
4050 flag_t flag = sp->sflags;
4051 (void)base; /* placate people compiling -Wunused-variable */
4052 sp = sp->next;
4053 if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) &&
4054 CALL_MUNMAP(base, size) == 0)
4055 freed += size;
4056 }
4057 }
4058 else {
4059 USAGE_ERROR_ACTION(ms,ms);
4060 }
4061 return freed;
4062}
4063
4064/*
4065 mspace versions of routines are near-clones of the global
4066 versions. This is not so nice but better than the alternatives.
4067*/
4068
4069void* mspace_malloc(mspace msp, size_t bytes) {
4070 mstate ms = (mstate)msp;
4071 if (!ok_magic(ms)) {
4072 USAGE_ERROR_ACTION(ms,ms);
4073 return 0;
4074 }
4075 if (!PREACTION(ms)) {
4076 void* mem;
4077 size_t nb;
4078 if (bytes <= MAX_SMALL_REQUEST) {
4079 bindex_t idx;
4080 binmap_t smallbits;
4081 nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
4082 idx = small_index(nb);
4083 smallbits = ms->smallmap >> idx;
4084
4085 if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
4086 mchunkptr b, p;
4087 idx += ~smallbits & 1; /* Uses next bin if idx empty */
4088 b = smallbin_at(ms, idx);
4089 p = b->fd;
4090 assert(chunksize(p) == small_index2size(idx));
4091 unlink_first_small_chunk(ms, b, p, idx);
4092 set_inuse_and_pinuse(ms, p, small_index2size(idx));
4093 mem = chunk2mem(p);
4094 check_malloced_chunk(ms, mem, nb);
4095 goto postaction;
4096 }
4097
4098 else if (nb > ms->dvsize) {
4099 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
4100 mchunkptr b, p, r;
4101 size_t rsize;
4102 bindex_t i;
4103 binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
4104 binmap_t leastbit = least_bit(leftbits);
4105 compute_bit2idx(leastbit, i);
4106 b = smallbin_at(ms, i);
4107 p = b->fd;
4108 assert(chunksize(p) == small_index2size(i));
4109 unlink_first_small_chunk(ms, b, p, i);
4110 rsize = small_index2size(i) - nb;
4111 /* Fit here cannot be remainderless if 4byte sizes */
4112 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
4113 set_inuse_and_pinuse(ms, p, small_index2size(i));
4114 else {
4115 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4116 r = chunk_plus_offset(p, nb);
4117 set_size_and_pinuse_of_free_chunk(r, rsize);
4118 replace_dv(ms, r, rsize);
4119 }
4120 mem = chunk2mem(p);
4121 check_malloced_chunk(ms, mem, nb);
4122 goto postaction;
4123 }
4124
4125 else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
4126 check_malloced_chunk(ms, mem, nb);
4127 goto postaction;
4128 }
4129 }
4130 }
4131 else if (bytes >= MAX_REQUEST)
4132 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
4133 else {
4134 nb = pad_request(bytes);
4135 if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
4136 check_malloced_chunk(ms, mem, nb);
4137 goto postaction;
4138 }
4139 }
4140
4141 if (nb <= ms->dvsize) {
4142 size_t rsize = ms->dvsize - nb;
4143 mchunkptr p = ms->dv;
4144 if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
4145 mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
4146 ms->dvsize = rsize;
4147 set_size_and_pinuse_of_free_chunk(r, rsize);
4148 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4149 }
4150 else { /* exhaust dv */
4151 size_t dvs = ms->dvsize;
4152 ms->dvsize = 0;
4153 ms->dv = 0;
4154 set_inuse_and_pinuse(ms, p, dvs);
4155 }
4156 mem = chunk2mem(p);
4157 check_malloced_chunk(ms, mem, nb);
4158 goto postaction;
4159 }
4160
4161 else if (nb < ms->topsize) { /* Split top */
4162 size_t rsize = ms->topsize -= nb;
4163 mchunkptr p = ms->top;
4164 mchunkptr r = ms->top = chunk_plus_offset(p, nb);
4165 r->head = rsize | PINUSE_BIT;
4166 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4167 mem = chunk2mem(p);
4168 check_top_chunk(ms, ms->top);
4169 check_malloced_chunk(ms, mem, nb);
4170 goto postaction;
4171 }
4172
4173 mem = sys_alloc(ms, nb);
4174
4175 postaction:
4176 POSTACTION(ms);
4177 return mem;
4178 }
4179
4180 return 0;
4181}
4182
4183void mspace_free(mspace msp, void* mem) {
4184 if (mem != 0) {
4185 mchunkptr p = mem2chunk(mem);
4186#if FOOTERS
4187 mstate fm = get_mstate_for(p);
4188 (void)msp; /* placate people compiling -Wunused */
4189#else /* FOOTERS */
4190 mstate fm = (mstate)msp;
4191#endif /* FOOTERS */
4192 if (!ok_magic(fm)) {
4193 USAGE_ERROR_ACTION(fm, p);
4194 return;
4195 }
4196 if (!PREACTION(fm)) {
4197 check_inuse_chunk(fm, p);
4198 if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
4199 size_t psize = chunksize(p);
4200 mchunkptr next = chunk_plus_offset(p, psize);
4201 if (!pinuse(p)) {
4202 size_t prevsize = p->prev_foot;
4203 if (is_mmapped(p)) {
4204 psize += prevsize + MMAP_FOOT_PAD;
4205 if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
4206 fm->footprint -= psize;
4207 goto postaction;
4208 }
4209 else {
4210 mchunkptr prev = chunk_minus_offset(p, prevsize);
4211 psize += prevsize;
4212 p = prev;
4213 if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
4214 if (p != fm->dv) {
4215 unlink_chunk(fm, p, prevsize);
4216 }
4217 else if ((next->head & INUSE_BITS) == INUSE_BITS) {
4218 fm->dvsize = psize;
4219 set_free_with_pinuse(p, psize, next);
4220 goto postaction;
4221 }
4222 }
4223 else
4224 goto erroraction;
4225 }
4226 }
4227
4228 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
4229 if (!cinuse(next)) { /* consolidate forward */
4230 if (next == fm->top) {
4231 size_t tsize = fm->topsize += psize;
4232 fm->top = p;
4233 p->head = tsize | PINUSE_BIT;
4234 if (p == fm->dv) {
4235 fm->dv = 0;
4236 fm->dvsize = 0;
4237 }
4238 if (should_trim(fm, tsize))
4239 sys_trim(fm, 0);
4240 goto postaction;
4241 }
4242 else if (next == fm->dv) {
4243 size_t dsize = fm->dvsize += psize;
4244 fm->dv = p;
4245 set_size_and_pinuse_of_free_chunk(p, dsize);
4246 goto postaction;
4247 }
4248 else {
4249 size_t nsize = chunksize(next);
4250 psize += nsize;
4251 unlink_chunk(fm, next, nsize);
4252 set_size_and_pinuse_of_free_chunk(p, psize);
4253 if (p == fm->dv) {
4254 fm->dvsize = psize;
4255 goto postaction;
4256 }
4257 }
4258 }
4259 else
4260 set_free_with_pinuse(p, psize, next);
4261
4262 if (is_small(psize)) {
4263 insert_small_chunk(fm, p, psize);
4264 check_free_chunk(fm, p);
4265 }
4266 else {
4267 tchunkptr tp = (tchunkptr)p;
4268 insert_large_chunk(fm, tp, psize);
4269 check_free_chunk(fm, p);
4270 if (--fm->release_checks == 0)
4271 release_unused_segments(fm);
4272 }
4273 goto postaction;
4274 }
4275 }
4276 erroraction:
4277 USAGE_ERROR_ACTION(fm, p);
4278 postaction:
4279 POSTACTION(fm);
4280 }
4281 }
4282}
4283
4284void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
4285 void* mem;
4286 size_t req = 0;
4287 mstate ms = (mstate)msp;
4288 if (!ok_magic(ms)) {
4289 USAGE_ERROR_ACTION(ms,ms);
4290 return 0;
4291 }
4292 if (n_elements != 0) {
4293 req = n_elements * elem_size;
4294 if (((n_elements | elem_size) & ~(size_t)0xffff) &&
4295 (req / n_elements != elem_size))
4296 req = MAX_SIZE_T; /* force downstream failure on overflow */
4297 }
4298 mem = internal_malloc(ms, req);
4299 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
4300 memset(mem, 0, req);
4301 return mem;
4302}
4303
4304void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
4305 void* mem = 0;
4306 if (oldmem == 0) {
4307 mem = mspace_malloc(msp, bytes);
4308 }
4309 else if (bytes >= MAX_REQUEST) {
4310 MALLOC_FAILURE_ACTION;
4311 }
4312#ifdef REALLOC_ZERO_BYTES_FREES
4313 else if (bytes == 0) {
4314 mspace_free(msp, oldmem);
4315 }
4316#endif /* REALLOC_ZERO_BYTES_FREES */
4317 else {
4318 size_t nb = request2size(bytes);
4319 mchunkptr oldp = mem2chunk(oldmem);
4320#if ! FOOTERS
4321 mstate m = (mstate)msp;
4322#else /* FOOTERS */
4323 mstate m = get_mstate_for(oldp);
4324 if (!ok_magic(m)) {
4325 USAGE_ERROR_ACTION(m, oldmem);
4326 return 0;
4327 }
4328#endif /* FOOTERS */
4329 if (!PREACTION(m)) {
4330 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
4331 POSTACTION(m);
4332 if (newp != 0) {
4333 check_inuse_chunk(m, newp);
4334 mem = chunk2mem(newp);
4335 }
4336 else {
4337 mem = mspace_malloc(m, bytes);
4338 if (mem != 0) {
4339 size_t oc = chunksize(oldp) - overhead_for(oldp);
4340 memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
4341 mspace_free(m, oldmem);
4342 }
4343 }
4344 }
4345 }
4346 return mem;
4347}
4348
4349void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes) {
4350 void* mem = 0;
4351 if (oldmem != 0) {
4352 if (bytes >= MAX_REQUEST) {
4353 MALLOC_FAILURE_ACTION;
4354 }
4355 else {
4356 size_t nb = request2size(bytes);
4357 mchunkptr oldp = mem2chunk(oldmem);
4358#if ! FOOTERS
4359 mstate m = (mstate)msp;
4360#else /* FOOTERS */
4361 mstate m = get_mstate_for(oldp);
4362 (void)msp; /* placate people compiling -Wunused */
4363 if (!ok_magic(m)) {
4364 USAGE_ERROR_ACTION(m, oldmem);
4365 return 0;
4366 }
4367#endif /* FOOTERS */
4368 if (!PREACTION(m)) {
4369 mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
4370 POSTACTION(m);
4371 if (newp == oldp) {
4372 check_inuse_chunk(m, newp);
4373 mem = oldmem;
4374 }
4375 }
4376 }
4377 }
4378 return mem;
4379}
4380
4381void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
4382 mstate ms = (mstate)msp;
4383 if (!ok_magic(ms)) {
4384 USAGE_ERROR_ACTION(ms,ms);
4385 return 0;
4386 }
4387 if (alignment <= MALLOC_ALIGNMENT)
4388 return mspace_malloc(msp, bytes);
4389 return internal_memalign(ms, alignment, bytes);
4390}
4391
4392void** mspace_independent_calloc(mspace msp, size_t n_elements,
4393 size_t elem_size, void* chunks[]) {
4394 size_t sz = elem_size; /* serves as 1-element array */
4395 mstate ms = (mstate)msp;
4396 if (!ok_magic(ms)) {
4397 USAGE_ERROR_ACTION(ms,ms);
4398 return 0;
4399 }
4400 return ialloc(ms, n_elements, &sz, 3, chunks);
4401}
4402
4403void** mspace_independent_comalloc(mspace msp, size_t n_elements,
4404 size_t sizes[], void* chunks[]) {
4405 mstate ms = (mstate)msp;
4406 if (!ok_magic(ms)) {
4407 USAGE_ERROR_ACTION(ms,ms);
4408 return 0;
4409 }
4410 return ialloc(ms, n_elements, sizes, 0, chunks);
4411}
4412
4413size_t mspace_bulk_free(mspace msp, void* array[], size_t nelem) {
4414 return internal_bulk_free((mstate)msp, array, nelem);
4415}
4416
4417#if MALLOC_INSPECT_ALL
4418void mspace_inspect_all(mspace msp,
4419 void(*handler)(void *start,
4420 void *end,
4421 size_t used_bytes,
4422 void* callback_arg),
4423 void* arg) {
4424 mstate ms = (mstate)msp;
4425 if (ok_magic(ms)) {
4426 if (!PREACTION(ms)) {
4427 internal_inspect_all(ms, handler, arg);
4428 POSTACTION(ms);
4429 }
4430 }
4431 else {
4432 USAGE_ERROR_ACTION(ms,ms);
4433 }
4434}
4435#endif /* MALLOC_INSPECT_ALL */
4436
4437int mspace_trim(mspace msp, size_t pad) {
4438 int result = 0;
4439 mstate ms = (mstate)msp;
4440 if (ok_magic(ms)) {
4441 if (!PREACTION(ms)) {
4442 result = sys_trim(ms, pad);
4443 POSTACTION(ms);
4444 }
4445 }
4446 else {
4447 USAGE_ERROR_ACTION(ms,ms);
4448 }
4449 return result;
4450}
4451
4452#if !NO_MALLOC_STATS
4453void mspace_malloc_stats(mspace msp) {
4454 mstate ms = (mstate)msp;
4455 if (ok_magic(ms)) {
4456 internal_malloc_stats(ms);
4457 }
4458 else {
4459 USAGE_ERROR_ACTION(ms,ms);
4460 }
4461}
4462#endif /* NO_MALLOC_STATS */
4463
4464size_t mspace_footprint(mspace msp) {
4465 size_t result = 0;
4466 mstate ms = (mstate)msp;
4467 if (ok_magic(ms)) {
4468 result = ms->footprint;
4469 }
4470 else {
4471 USAGE_ERROR_ACTION(ms,ms);
4472 }
4473 return result;
4474}
4475
4476size_t mspace_max_footprint(mspace msp) {
4477 size_t result = 0;
4478 mstate ms = (mstate)msp;
4479 if (ok_magic(ms)) {
4480 result = ms->max_footprint;
4481 }
4482 else {
4483 USAGE_ERROR_ACTION(ms,ms);
4484 }
4485 return result;
4486}
4487
4488size_t mspace_footprint_limit(mspace msp) {
4489 size_t result = 0;
4490 mstate ms = (mstate)msp;
4491 if (ok_magic(ms)) {
4492 size_t maf = ms->footprint_limit;
4493 result = (maf == 0) ? MAX_SIZE_T : maf;
4494 }
4495 else {
4496 USAGE_ERROR_ACTION(ms,ms);
4497 }
4498 return result;
4499}
4500
4501size_t mspace_set_footprint_limit(mspace msp, size_t bytes) {
4502 size_t result = 0;
4503 mstate ms = (mstate)msp;
4504 if (ok_magic(ms)) {
4505 if (bytes == 0)
4506 result = granularity_align(1); /* Use minimal size */
4507 if (bytes == MAX_SIZE_T)
4508 result = 0; /* disable */
4509 else
4510 result = granularity_align(bytes);
4511 ms->footprint_limit = result;
4512 }
4513 else {
4514 USAGE_ERROR_ACTION(ms,ms);
4515 }
4516 return result;
4517}
4518
4519#if !NO_MALLINFO
4520struct mallinfo mspace_mallinfo(mspace msp) {
4521 mstate ms = (mstate)msp;
4522 if (!ok_magic(ms)) {
4523 USAGE_ERROR_ACTION(ms,ms);
4524 }
4525 return internal_mallinfo(ms);
4526}
4527#endif /* NO_MALLINFO */
4528
4529size_t mspace_usable_size(const void* mem) {
4530 if (mem != 0) {
4531 mchunkptr p = mem2chunk(mem);
4532 if (is_inuse(p))
4533 return chunksize(p) - overhead_for(p);
4534 }
4535 return 0;
4536}
4537
4538int mspace_mallopt(int param_number, int value) {
4539 return change_mparam(param_number, value);
4540}
4541
4542void* get_mspace_from_ptr(void* ptr)
4543{
4544 mchunkptr chunk = mem2chunk(ptr);
4545 mstate ms = get_mstate_for(chunk);
4546
4547 return ms;
4548}
4549
4550#endif /* MSPACES */
4551
4552
4553/* -------------------- Alternative MORECORE functions ------------------- */
4554
4555/*
4556 Guidelines for creating a custom version of MORECORE:
4557
4558 * For best performance, MORECORE should allocate in multiples of pagesize.
4559 * MORECORE may allocate more memory than requested. (Or even less,
4560 but this will usually result in a malloc failure.)
4561 * MORECORE must not allocate memory when given argument zero, but
4562 instead return one past the end address of memory from previous
4563 nonzero call.
4564 * For best performance, consecutive calls to MORECORE with positive
4565 arguments should return increasing addresses, indicating that
4566 space has been contiguously extended.
4567 * Even though consecutive calls to MORECORE need not return contiguous
4568 addresses, it must be OK for malloc'ed chunks to span multiple
4569 regions in those cases where they do happen to be contiguous.
4570 * MORECORE need not handle negative arguments -- it may instead
4571 just return MFAIL when given negative arguments.
4572 Negative arguments are always multiples of pagesize. MORECORE
4573 must not misinterpret negative args as large positive unsigned
4574 args. You can suppress all such calls from even occurring by defining
4575 MORECORE_CANNOT_TRIM,
4576
4577 As an example alternative MORECORE, here is a custom allocator
4578 kindly contributed for pre-OSX macOS. It uses virtually but not
4579 necessarily physically contiguous non-paged memory (locked in,
4580 present and won't get swapped out). You can use it by uncommenting
4581 this section, adding some #includes, and setting up the appropriate
4582 defines above:
4583
4584 #define MORECORE osMoreCore
4585
4586 There is also a shutdown routine that should somehow be called for
4587 cleanup upon program exit.
4588
4589 #define MAX_POOL_ENTRIES 100
4590 #define MINIMUM_MORECORE_SIZE (64 * 1024U)
4591 static int next_os_pool;
4592 void *our_os_pools[MAX_POOL_ENTRIES];
4593
4594 void *osMoreCore(int size)
4595 {
4596 void *ptr = 0;
4597 static void *sbrk_top = 0;
4598
4599 if (size > 0)
4600 {
4601 if (size < MINIMUM_MORECORE_SIZE)
4602 size = MINIMUM_MORECORE_SIZE;
4603 if (CurrentExecutionLevel() == kTaskLevel)
4604 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
4605 if (ptr == 0)
4606 {
4607 return (void *) MFAIL;
4608 }
4609 // save ptrs so they can be freed during cleanup
4610 our_os_pools[next_os_pool] = ptr;
4611 next_os_pool++;
4612 ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
4613 sbrk_top = (char *) ptr + size;
4614 return ptr;
4615 }
4616 else if (size < 0)
4617 {
4618 // we don't currently support shrink behavior
4619 return (void *) MFAIL;
4620 }
4621 else
4622 {
4623 return sbrk_top;
4624 }
4625 }
4626
4627 // cleanup any allocated memory pools
4628 // called as last thing before shutting down driver
4629
4630 void osCleanupMem(void)
4631 {
4632 void **ptr;
4633
4634 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
4635 if (*ptr)
4636 {
4637 PoolDeallocate(*ptr);
4638 *ptr = 0;
4639 }
4640 }
4641
4642*/
4643
4644
4645/* -----------------------------------------------------------------------
4646History:
4647 v2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea
4648 * fix bad comparison in dlposix_memalign
4649 * don't reuse adjusted asize in sys_alloc
4650 * add LOCK_AT_FORK -- thanks to Kirill Artamonov for the suggestion
4651 * reduce compiler warnings -- thanks to all who reported/suggested these
4652
4653 v2.8.5 Sun May 22 10:26:02 2011 Doug Lea (dl at gee)
4654 * Always perform unlink checks unless INSECURE
4655 * Add posix_memalign.
4656 * Improve realloc to expand in more cases; expose realloc_in_place.
4657 Thanks to Peter Buhr for the suggestion.
4658 * Add footprint_limit, inspect_all, bulk_free. Thanks
4659 to Barry Hayes and others for the suggestions.
4660 * Internal refactorings to avoid calls while holding locks
4661 * Use non-reentrant locks by default. Thanks to Roland McGrath
4662 for the suggestion.
4663 * Small fixes to mspace_destroy, reset_on_error.
4664 * Various configuration extensions/changes. Thanks
4665 to all who contributed these.
4666
4667 V2.8.4a Thu Apr 28 14:39:43 2011 (dl at gee.cs.oswego.edu)
4668 * Update Creative Commons URL
4669
4670 V2.8.4 Wed May 27 09:56:23 2009 Doug Lea (dl at gee)
4671 * Use zeros instead of prev foot for is_mmapped
4672 * Add mspace_track_large_chunks; thanks to Jean Brouwers
4673 * Fix set_inuse in internal_realloc; thanks to Jean Brouwers
4674 * Fix insufficient sys_alloc padding when using 16byte alignment
4675 * Fix bad error check in mspace_footprint
4676 * Adaptations for ptmalloc; thanks to Wolfram Gloger.
4677 * Reentrant spin locks; thanks to Earl Chew and others
4678 * Win32 improvements; thanks to Niall Douglas and Earl Chew
4679 * Add NO_SEGMENT_TRAVERSAL and MAX_RELEASE_CHECK_RATE options
4680 * Extension hook in malloc_state
4681 * Various small adjustments to reduce warnings on some compilers
4682 * Various configuration extensions/changes for more platforms. Thanks
4683 to all who contributed these.
4684
4685 V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee)
4686 * Add max_footprint functions
4687 * Ensure all appropriate literals are size_t
4688 * Fix conditional compilation problem for some #define settings
4689 * Avoid concatenating segments with the one provided
4690 in create_mspace_with_base
4691 * Rename some variables to avoid compiler shadowing warnings
4692 * Use explicit lock initialization.
4693 * Better handling of sbrk interference.
4694 * Simplify and fix segment insertion, trimming and mspace_destroy
4695 * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x
4696 * Thanks especially to Dennis Flanagan for help on these.
4697
4698 V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee)
4699 * Fix memalign brace error.
4700
4701 V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee)
4702 * Fix improper #endif nesting in C++
4703 * Add explicit casts needed for C++
4704
4705 V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee)
4706 * Use trees for large bins
4707 * Support mspaces
4708 * Use segments to unify sbrk-based and mmap-based system allocation,
4709 removing need for emulation on most platforms without sbrk.
4710 * Default safety checks
4711 * Optional footer checks. Thanks to William Robertson for the idea.
4712 * Internal code refactoring
4713 * Incorporate suggestions and platform-specific changes.
4714 Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,
4715 Aaron Bachmann, Emery Berger, and others.
4716 * Speed up non-fastbin processing enough to remove fastbins.
4717 * Remove useless cfree() to avoid conflicts with other apps.
4718 * Remove internal memcpy, memset. Compilers handle builtins better.
4719 * Remove some options that no one ever used and rename others.
4720
4721 V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
4722 * Fix malloc_state bitmap array misdeclaration
4723
4724 V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee)
4725 * Allow tuning of FIRST_SORTED_BIN_SIZE
4726 * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
4727 * Better detection and support for non-contiguousness of MORECORE.
4728 Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
4729 * Bypass most of malloc if no frees. Thanks To Emery Berger.
4730 * Fix freeing of old top non-contiguous chunk im sysmalloc.
4731 * Raised default trim and map thresholds to 256K.
4732 * Fix mmap-related #defines. Thanks to Lubos Lunak.
4733 * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
4734 * Branch-free bin calculation
4735 * Default trim and mmap thresholds now 256K.
4736
4737 V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
4738 * Introduce independent_comalloc and independent_calloc.
4739 Thanks to Michael Pachos for motivation and help.
4740 * Make optional .h file available
4741 * Allow > 2GB requests on 32bit systems.
4742 * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
4743 Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
4744 and Anonymous.
4745 * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
4746 helping test this.)
4747 * memalign: check alignment arg
4748 * realloc: don't try to shift chunks backwards, since this
4749 leads to more fragmentation in some programs and doesn't
4750 seem to help in any others.
4751 * Collect all cases in malloc requiring system memory into sysmalloc
4752 * Use mmap as backup to sbrk
4753 * Place all internal state in malloc_state
4754 * Introduce fastbins (although similar to 2.5.1)
4755 * Many minor tunings and cosmetic improvements
4756 * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
4757 * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
4758 Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
4759 * Include errno.h to support default failure action.
4760
4761 V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)
4762 * return null for negative arguments
4763 * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
4764 * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
4765 (e.g. WIN32 platforms)
4766 * Cleanup header file inclusion for WIN32 platforms
4767 * Cleanup code to avoid Microsoft Visual C++ compiler complaints
4768 * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
4769 memory allocation routines
4770 * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
4771 * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
4772 usage of 'assert' in non-WIN32 code
4773 * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
4774 avoid infinite loop
4775 * Always call 'fREe()' rather than 'free()'
4776
4777 V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)
4778 * Fixed ordering problem with boundary-stamping
4779
4780 V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)
4781 * Added pvalloc, as recommended by H.J. Liu
4782 * Added 64bit pointer support mainly from Wolfram Gloger
4783 * Added anonymously donated WIN32 sbrk emulation
4784 * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
4785 * malloc_extend_top: fix mask error that caused wastage after
4786 foreign sbrks
4787 * Add linux mremap support code from HJ Liu
4788
4789 V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)
4790 * Integrated most documentation with the code.
4791 * Add support for mmap, with help from
4792 Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
4793 * Use last_remainder in more cases.
4794 * Pack bins using idea from colin@nyx10.cs.du.edu
4795 * Use ordered bins instead of best-fit threshhold
4796 * Eliminate block-local decls to simplify tracing and debugging.
4797 * Support another case of realloc via move into top
4798 * Fix error occuring when initial sbrk_base not word-aligned.
4799 * Rely on page size for units instead of SBRK_UNIT to
4800 avoid surprises about sbrk alignment conventions.
4801 * Add mallinfo, mallopt. Thanks to Raymond Nijssen
4802 (raymond@es.ele.tue.nl) for the suggestion.
4803 * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
4804 * More precautions for cases where other routines call sbrk,
4805 courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
4806 * Added macros etc., allowing use in linux libc from
4807 H.J. Lu (hjl@gnu.ai.mit.edu)
4808 * Inverted this history list
4809
4810 V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)
4811 * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
4812 * Removed all preallocation code since under current scheme
4813 the work required to undo bad preallocations exceeds
4814 the work saved in good cases for most test programs.
4815 * No longer use return list or unconsolidated bins since
4816 no scheme using them consistently outperforms those that don't
4817 given above changes.
4818 * Use best fit for very large chunks to prevent some worst-cases.
4819 * Added some support for debugging
4820
4821 V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)
4822 * Removed footers when chunks are in use. Thanks to
4823 Paul Wilson (wilson@cs.texas.edu) for the suggestion.
4824
4825 V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)
4826 * Added malloc_trim, with help from Wolfram Gloger
4827 (wmglo@Dent.MED.Uni-Muenchen.DE).
4828
4829 V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)
4830
4831 V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)
4832 * realloc: try to expand in both directions
4833 * malloc: swap order of clean-bin strategy;
4834 * realloc: only conditionally expand backwards
4835 * Try not to scavenge used bins
4836 * Use bin counts as a guide to preallocation
4837 * Occasionally bin return list chunks in first scan
4838 * Add a few optimizations from colin@nyx10.cs.du.edu
4839
4840 V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)
4841 * faster bin computation & slightly different binning
4842 * merged all consolidations to one part of malloc proper
4843 (eliminating old malloc_find_space & malloc_clean_bin)
4844 * Scan 2 returns chunks (not just 1)
4845 * Propagate failure in realloc if malloc returns 0
4846 * Add stuff to allow compilation on non-ANSI compilers
4847 from kpv@research.att.com
4848
4849 V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)
4850 * removed potential for odd address access in prev_chunk
4851 * removed dependency on getpagesize.h
4852 * misc cosmetics and a bit more internal documentation
4853 * anticosmetics: mangled names in macros to evade debugger strangeness
4854 * tested on sparc, hp-700, dec-mips, rs6000
4855 with gcc & native cc (hp, dec only) allowing
4856 Detlefs & Zorn comparison study (in SIGPLAN Notices.)
4857
4858 Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
4859 * Based loosely on libg++-1.2X malloc. (It retains some of the overall
4860 structure of old version, but most details differ.)
4861
4862*/
4863
4864#endif // TMEMORY_USE_DLMALLOC
#define TNULL
Definition Typedefs.h:23