glibc-2.14中的arean.c源代码,供研究malloc和free实现使用:
- /* Malloc implementation for multiple threads without lock contention.
- Copyright (C) 2001,2002,2003,2004,2005,2006,2007,2009,2010
- Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public License as
- published by the Free Software Foundation; either version 2.1 of the
- License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; see the file COPYING.LIB. If not,
- write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- Boston, MA 02111-1307, USA. */
-
- #include <stdbool.h>
-
- /* Compile-time constants. */
-
- #define HEAP_MIN_SIZE (32*1024)
- #ifndef HEAP_MAX_SIZE
- # ifdef DEFAULT_MMAP_THRESHOLD_MAX
- # define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
- # else
- # define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */
- # endif
- #endif
-
- /* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
- that are dynamically created for multi-threaded programs. The
- maximum size must be a power of two, for fast determination of
- which heap belongs to a chunk. It should be much larger than the
- mmap threshold, so that requests with a size just below that
- threshold can be fulfilled without creating too many heaps. */
-
-
- #ifndef THREAD_STATS
- #define THREAD_STATS 0
- #endif
-
- /* If THREAD_STATS is non-zero, some statistics on mutex locking are
- computed. */
-
- /***************************************************************************/
-
- #define top(ar_ptr) ((ar_ptr)->top)
-
- /* A heap is a single contiguous memory region holding (coalesceable)
- malloc_chunks. It is allocated with mmap() and always starts at an
- address aligned to HEAP_MAX_SIZE. Not used unless compiling with
- USE_ARENAS. */
-
- typedef struct _heap_info {
- mstate ar_ptr; /* Arena for this heap. */
- struct _heap_info *prev; /* Previous heap. */
- size_t size; /* Current size in bytes. */
- size_t mprotect_size; /* Size in bytes that has been mprotected
- PROT_READ|PROT_WRITE. */
- /* Make sure the following data is properly aligned, particularly
- that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
- MALLOC_ALIGNMENT. */
- char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK];
- } heap_info;
-
- /* Get a compile-time error if the heap_info padding is not correct
- to make alignment work as expected in sYSMALLOc. */
- extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
- + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
- ? -1 : 1];
-
- /* Thread specific data */
-
- static tsd_key_t arena_key;
- static mutex_t list_lock;
- #ifdef PER_THREAD
- static size_t narenas;
- static mstate free_list;
- #endif
-
- #if THREAD_STATS
- static int stat_n_heaps;
- #define THREAD_STAT(x) x
- #else
- #define THREAD_STAT(x) do ; while(0)
- #endif
-
- /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
- static unsigned long arena_mem;
-
- /* Already initialized? */
- int __malloc_initialized = -1;
-
- /**************************************************************************/
-
- #if USE_ARENAS
-
- /* arena_get() acquires an arena and locks the corresponding mutex.
- First, try the one last locked successfully by this thread. (This
- is the common case and handled with a macro for speed.) Then, loop
- once over the circularly linked list of arenas. If no arena is
- readily available, create a new one. In this latter case, `size'
- is just a hint as to how much memory will be required immediately
- in the new arena. */
-
- #define arena_get(ptr, size) do { \
- arena_lookup(ptr); \
- arena_lock(ptr, size); \
- } while(0)
-
- #define arena_lookup(ptr) do { \
- Void_t *vptr = NULL; \
- ptr = (mstate)tsd_getspecific(arena_key, vptr); \
- } while(0)
-
- #ifdef PER_THREAD
- #define arena_lock(ptr, size) do { \
- if(ptr) \
- (void)mutex_lock(&ptr->mutex); \
- else \
- ptr = arena_get2(ptr, (size)); \
- } while(0)
- #else
- #define arena_lock(ptr, size) do { \
- if(ptr && !mutex_trylock(&ptr->mutex)) { \
- THREAD_STAT(++(ptr->stat_lock_direct)); \
- } else \
- ptr = arena_get2(ptr, (size)); \
- } while(0)
- #endif
-
- /* find the heap and corresponding arena for a given ptr */
-
- #define heap_for_ptr(ptr) \
- ((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))
- #define arena_for_chunk(ptr) \
- (chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena)
-
- #else /* !USE_ARENAS */
-
- /* There is only one arena, main_arena. */
-
- #if THREAD_STATS
- #define arena_get(ar_ptr, sz) do { \
- ar_ptr = &main_arena; \
- if(!mutex_trylock(&ar_ptr->mutex)) \
- ++(ar_ptr->stat_lock_direct); \
- else { \
- (void)mutex_lock(&ar_ptr->mutex); \
- ++(ar_ptr->stat_lock_wait); \
- } \
- } while(0)
- #else
- #define arena_get(ar_ptr, sz) do { \
- ar_ptr = &main_arena; \
- (void)mutex_lock(&ar_ptr->mutex); \
- } while(0)
- #endif
- #define arena_for_chunk(ptr) (&main_arena)
-
- #endif /* USE_ARENAS */
-
- /**************************************************************************/
-
- #ifndef NO_THREADS
-
- /* atfork support. */
-
- static __malloc_ptr_t (*save_malloc_hook) (size_t __size,
- __const __malloc_ptr_t);
- # if !defined _LIBC || (defined SHARED && !USE___THREAD)
- static __malloc_ptr_t (*save_memalign_hook) (size_t __align, size_t __size,
- __const __malloc_ptr_t);
- # endif
- static void (*save_free_hook) (__malloc_ptr_t __ptr,
- __const __malloc_ptr_t);
- static Void_t* save_arena;
-
- #ifdef ATFORK_MEM
- ATFORK_MEM;
- #endif
-
- /* Magic value for the thread-specific arena pointer when
- malloc_atfork() is in use. */
-
- #define ATFORK_ARENA_PTR ((Void_t*)-1)
-
- /* The following hooks are used while the `atfork' handling mechanism
- is active. */
-
- static Void_t*
- malloc_atfork(size_t sz, const Void_t *caller)
- {
- Void_t *vptr = NULL;
- Void_t *victim;
-
- tsd_getspecific(arena_key, vptr);
- if(vptr == ATFORK_ARENA_PTR) {
- /* We are the only thread that may allocate at all. */
- if(save_malloc_hook != malloc_check) {
- return _int_malloc(&main_arena, sz);
- } else {
- if(top_check()<0)
- return 0;
- victim = _int_malloc(&main_arena, sz+1);
- return mem2mem_check(victim, sz);
- }
- } else {
- /* Suspend the thread until the `atfork' handlers have completed.
- By that time, the hooks will have been reset as well, so that
- mALLOc() can be used again. */
- (void)mutex_lock(&list_lock);
- (void)mutex_unlock(&list_lock);
- return public_mALLOc(sz);
- }
- }
-
- static void
- free_atfork(Void_t* mem, const Void_t *caller)
- {
- Void_t *vptr = NULL;
- mstate ar_ptr;
- mchunkptr p; /* chunk corresponding to mem */
-
- if (mem == 0) /* free(0) has no effect */
- return;
-
- p = mem2chunk(mem); /* do not bother to replicate free_check here */
-
- #if HAVE_MMAP
- if (chunk_is_mmapped(p)) /* release mmapped memory. */
- {
- munmap_chunk(p);
- return;
- }
- #endif
-
- #ifdef ATOMIC_FASTBINS
- ar_ptr = arena_for_chunk(p);
- tsd_getspecific(arena_key, vptr);
- _int_free(ar_ptr, p, vptr == ATFORK_ARENA_PTR);
- #else
- ar_ptr = arena_for_chunk(p);
- tsd_getspecific(arena_key, vptr);
- if(vptr != ATFORK_ARENA_PTR)
- (void)mutex_lock(&ar_ptr->mutex);
- _int_free(ar_ptr, p);
- if(vptr != ATFORK_ARENA_PTR)
- (void)mutex_unlock(&ar_ptr->mutex);
- #endif
- }
-
-
- /* Counter for number of times the list is locked by the same thread. */
- static unsigned int atfork_recursive_cntr;
-
- /* The following two functions are registered via thread_atfork() to
- make sure that the mutexes remain in a consistent state in the
- fork()ed version of a thread. Also adapt the malloc and free hooks
- temporarily, because the `atfork' handler mechanism may use
- malloc/free internally (e.g. in LinuxThreads). */
-
- static void
- ptmalloc_lock_all (void)
- {
- mstate ar_ptr;
-
- if(__malloc_initialized < 1)
- return;
- if (mutex_trylock(&list_lock))
- {
- Void_t *my_arena;
- tsd_getspecific(arena_key, my_arena);
- if (my_arena == ATFORK_ARENA_PTR)
- /* This is the same thread which already locks the global list.
- Just bump the counter. */
- goto out;
-
- /* This thread has to wait its turn. */
- (void)mutex_lock(&list_lock);
- }
- for(ar_ptr = &main_arena;;) {
- (void)mutex_lock(&ar_ptr->mutex);
- ar_ptr = ar_ptr->next;
- if(ar_ptr == &main_arena) break;
- }
- save_malloc_hook = __malloc_hook;
- save_free_hook = __free_hook;
- __malloc_hook = malloc_atfork;
- __free_hook = free_atfork;
- /* Only the current thread may perform malloc/free calls now. */
- tsd_getspecific(arena_key, save_arena);
- tsd_setspecific(arena_key, ATFORK_ARENA_PTR);
- out:
- ++atfork_recursive_cntr;
- }
-
- static void
- ptmalloc_unlock_all (void)
- {
- mstate ar_ptr;
-
- if(__malloc_initialized < 1)
- return;
- if (--atfork_recursive_cntr != 0)
- return;
- tsd_setspecific(arena_key, save_arena);
- __malloc_hook = save_malloc_hook;
- __free_hook = save_free_hook;
- for(ar_ptr = &main_arena;;) {
- (void)mutex_unlock(&ar_ptr->mutex);
- ar_ptr = ar_ptr->next;
- if(ar_ptr == &main_arena) break;
- }
- (void)mutex_unlock(&list_lock);
- }
-
- #ifdef __linux__
-
- /* In NPTL, unlocking a mutex in the child process after a
- fork() is currently unsafe, whereas re-initializing it is safe and
- does not leak resources. Therefore, a special atfork handler is
- installed for the child. */
-
- static void
- ptmalloc_unlock_all2 (void)
- {
- mstate ar_ptr;
-
- if(__malloc_initialized < 1)
- return;
- #if defined _LIBC || defined MALLOC_HOOKS
- tsd_setspecific(arena_key, save_arena);
- __malloc_hook = save_malloc_hook;
- __free_hook = save_free_hook;
- #endif
- #ifdef PER_THREAD
- free_list = NULL;
- #endif
- for(ar_ptr = &main_arena;;) {
- mutex_init(&ar_ptr->mutex);
- #ifdef PER_THREAD
- if (ar_ptr != save_arena) {
- ar_ptr->next_free = free_list;
- free_list = ar_ptr;
- }
- #endif
- ar_ptr = ar_ptr->next;
- if(ar_ptr == &main_arena) break;
- }
- mutex_init(&list_lock);
- atfork_recursive_cntr = 0;
- }
-
- #else
-
- #define ptmalloc_unlock_all2 ptmalloc_unlock_all
-
- #endif
-
- #endif /* !defined NO_THREADS */
-
- /* Initialization routine. */
- #ifdef _LIBC
- #include <string.h>
- extern char **_environ;
-
- static char *
- internal_function
- next_env_entry (char ***position)
- {
- char **current = *position;
- char *result = NULL;
-
- while (*current != NULL)
- {
- if (__builtin_expect ((*current)[0] == 'M', 0)
- && (*current)[1] == 'A'
- && (*current)[2] == 'L'
- && (*current)[3] == 'L'
- && (*current)[4] == 'O'
- && (*current)[5] == 'C'
- && (*current)[6] == '_')
- {
- result = &(*current)[7];
-
- /* Save current position for next visit. */
- *position = ++current;
-
- break;
- }
-
- ++current;
- }
-
- return result;
- }
- #endif /* _LIBC */
-
- /* Set up basic state so that _int_malloc et al can work. */
- static void
- ptmalloc_init_minimal (void)
- {
- #if DEFAULT_TOP_PAD != 0
- mp_.top_pad = DEFAULT_TOP_PAD;
- #endif
- mp_.n_mmaps_max = DEFAULT_MMAP_MAX;
- mp_.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
- mp_.trim_threshold = DEFAULT_TRIM_THRESHOLD;
- mp_.pagesize = malloc_getpagesize;
- #ifdef PER_THREAD
- # define NARENAS_FROM_NCORES(n) ((n) * (sizeof(long) == 4 ? 2 : 8))
- mp_.arena_test = NARENAS_FROM_NCORES (1);
- narenas = 1;
- #endif
- }
-
-
- #ifdef _LIBC
- # ifdef SHARED
- static void *
- __failing_morecore (ptrdiff_t d)
- {
- return (void *) MORECORE_FAILURE;
- }
-
- extern struct dl_open_hook *_dl_open_hook;
- libc_hidden_proto (_dl_open_hook);
- # endif
-
- # if defined SHARED && !USE___THREAD
- /* This is called by __pthread_initialize_minimal when it needs to use
- malloc to set up the TLS state. We cannot do the full work of
- ptmalloc_init (below) until __pthread_initialize_minimal has finished,
- so it has to switch to using the special startup-time hooks while doing
- those allocations. */
- void
- __libc_malloc_pthread_startup (bool first_time)
- {
- if (first_time)
- {
- ptmalloc_init_minimal ();
- save_malloc_hook = __malloc_hook;
- save_memalign_hook = __memalign_hook;
- save_free_hook = __free_hook;
- __malloc_hook = malloc_starter;
- __memalign_hook = memalign_starter;
- __free_hook = free_starter;
- }
- else
- {
- __malloc_hook = save_malloc_hook;
- __memalign_hook = save_memalign_hook;
- __free_hook = save_free_hook;
- }
- }
- # endif
- #endif
-
- static void
- ptmalloc_init (void)
- {
- #if __STD_C
- const char* s;
- #else
- char* s;
- #endif
- int secure = 0;
-
- if(__malloc_initialized >= 0) return;
- __malloc_initialized = 0;
-
- #ifdef _LIBC
- # if defined SHARED && !USE___THREAD
- /* ptmalloc_init_minimal may already have been called via
- __libc_malloc_pthread_startup, above. */
- if (mp_.pagesize == 0)
- # endif
- #endif
- ptmalloc_init_minimal();
-
- #ifndef NO_THREADS
- # if defined _LIBC
- /* We know __pthread_initialize_minimal has already been called,
- and that is enough. */
- # define NO_STARTER
- # endif
- # ifndef NO_STARTER
- /* With some threads implementations, creating thread-specific data
- or initializing a mutex may call malloc() itself. Provide a
- simple starter version (realloc() won't work). */
- save_malloc_hook = __malloc_hook;
- save_memalign_hook = __memalign_hook;
- save_free_hook = __free_hook;
- __malloc_hook = malloc_starter;
- __memalign_hook = memalign_starter;
- __free_hook = free_starter;
- # ifdef _LIBC
- /* Initialize the pthreads interface. */
- if (__pthread_initialize != NULL)
- __pthread_initialize();
- # endif /* !defined _LIBC */
- # endif /* !defined NO_STARTER */
- #endif /* !defined NO_THREADS */
- mutex_init(&main_arena.mutex);
- main_arena.next = &main_arena;
-
- #if defined _LIBC && defined SHARED
- /* In case this libc copy is in a non-default namespace, never use brk.
- Likewise if dlopened from statically linked program. */
- Dl_info di;
- struct link_map *l;
-
- if (_dl_open_hook != NULL
- || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0
- && l->l_ns != LM_ID_BASE))
- __morecore = __failing_morecore;
- #endif
-
- mutex_init(&list_lock);
- tsd_key_create(&arena_key, NULL);
- tsd_setspecific(arena_key, (Void_t *)&main_arena);
- thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
- #ifndef NO_THREADS
- # ifndef NO_STARTER
- __malloc_hook = save_malloc_hook;
- __memalign_hook = save_memalign_hook;
- __free_hook = save_free_hook;
- # else
- # undef NO_STARTER
- # endif
- #endif
- #ifdef _LIBC
- secure = __libc_enable_secure;
- s = NULL;
- if (__builtin_expect (_environ != NULL, 1))
- {
- char **runp = _environ;
- char *envline;
-
- while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
- 0))
- {
- size_t len = strcspn (envline, "=");
-
- if (envline[len] != '=')
- /* This is a "MALLOC_" variable at the end of the string
- without a '=' character. Ignore it since otherwise we
- will access invalid memory below. */
- continue;
-
- switch (len)
- {
- case 6:
- if (memcmp (envline, "CHECK_", 6) == 0)
- s = &envline[7];
- break;
- case 8:
- if (! secure)
- {
- if (memcmp (envline, "TOP_PAD_", 8) == 0)
- mALLOPt(M_TOP_PAD, atoi(&envline[9]));
- else if (memcmp (envline, "PERTURB_", 8) == 0)
- mALLOPt(M_PERTURB, atoi(&envline[9]));
- }
- break;
- case 9:
- if (! secure)
- {
- if (memcmp (envline, "MMAP_MAX_", 9) == 0)
- mALLOPt(M_MMAP_MAX, atoi(&envline[10]));
- #ifdef PER_THREAD
- else if (memcmp (envline, "ARENA_MAX", 9) == 0)
- mALLOPt(M_ARENA_MAX, atoi(&envline[10]));
- #endif
- }
- break;
- #ifdef PER_THREAD
- case 10:
- if (! secure)
- {
- if (memcmp (envline, "ARENA_TEST", 10) == 0)
- mALLOPt(M_ARENA_TEST, atoi(&envline[11]));
- }
- break;
- #endif
- case 15:
- if (! secure)
- {
- if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
- mALLOPt(M_TRIM_THRESHOLD, atoi(&envline[16]));
- else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
- mALLOPt(M_MMAP_THRESHOLD, atoi(&envline[16]));
- }
- break;
- default:
- break;
- }
- }
- }
- #else
- if (! secure)
- {
- if((s = getenv("MALLOC_TRIM_THRESHOLD_")))
- mALLOPt(M_TRIM_THRESHOLD, atoi(s));
- if((s = getenv("MALLOC_TOP_PAD_")))
- mALLOPt(M_TOP_PAD, atoi(s));
- if((s = getenv("MALLOC_PERTURB_")))
- mALLOPt(M_PERTURB, atoi(s));
- if((s = getenv("MALLOC_MMAP_THRESHOLD_")))
- mALLOPt(M_MMAP_THRESHOLD, atoi(s));
- if((s = getenv("MALLOC_MMAP_MAX_")))
- mALLOPt(M_MMAP_MAX, atoi(s));
- }
- s = getenv("MALLOC_CHECK_");
- #endif
- if(s && s[0]) {
- mALLOPt(M_CHECK_ACTION, (int)(s[0] - '0'));
- if (check_action != 0)
- __malloc_check_init();
- }
- void (*hook) (void) = force_reg (__malloc_initialize_hook);
- if (hook != NULL)
- (*hook)();
- __malloc_initialized = 1;
- }
-
- /* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
- #ifdef thread_atfork_static
- thread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \
- ptmalloc_unlock_all2)
- #endif
-