Blame malloc/arena.c

Packit 6c4009
/* Malloc implementation for multiple threads without lock contention.
Packit 6c4009
   Copyright (C) 2001-2018 Free Software Foundation, Inc.
Packit 6c4009
   This file is part of the GNU C Library.
Packit 6c4009
   Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
Packit 6c4009
Packit 6c4009
   The GNU C Library is free software; you can redistribute it and/or
Packit 6c4009
   modify it under the terms of the GNU Lesser General Public License as
Packit 6c4009
   published by the Free Software Foundation; either version 2.1 of the
Packit 6c4009
   License, or (at your option) any later version.
Packit 6c4009
Packit 6c4009
   The GNU C Library is distributed in the hope that it will be useful,
Packit 6c4009
   but WITHOUT ANY WARRANTY; without even the implied warranty of
Packit 6c4009
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
Packit 6c4009
   Lesser General Public License for more details.
Packit 6c4009
Packit 6c4009
   You should have received a copy of the GNU Lesser General Public
Packit 6c4009
   License along with the GNU C Library; see the file COPYING.LIB.  If
Packit 6c4009
   not, see <http://www.gnu.org/licenses/>.  */
Packit 6c4009
Packit 6c4009
#include <stdbool.h>
Packit 6c4009
Packit 6c4009
#if HAVE_TUNABLES
Packit 6c4009
# define TUNABLE_NAMESPACE malloc
Packit 6c4009
#endif
Packit 6c4009
#include <elf/dl-tunables.h>
Packit 6c4009
Packit 6c4009
/* Compile-time constants.  */
Packit 6c4009
Packit 6c4009
#define HEAP_MIN_SIZE (32 * 1024)
Packit 6c4009
#ifndef HEAP_MAX_SIZE
Packit 6c4009
# ifdef DEFAULT_MMAP_THRESHOLD_MAX
Packit 6c4009
#  define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
Packit 6c4009
# else
Packit 6c4009
#  define HEAP_MAX_SIZE (1024 * 1024) /* must be a power of two */
Packit 6c4009
# endif
Packit 6c4009
#endif
Packit 6c4009
Packit 6c4009
/* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
Packit 6c4009
   that are dynamically created for multi-threaded programs.  The
Packit 6c4009
   maximum size must be a power of two, for fast determination of
Packit 6c4009
   which heap belongs to a chunk.  It should be much larger than the
Packit 6c4009
   mmap threshold, so that requests with a size just below that
Packit 6c4009
   threshold can be fulfilled without creating too many heaps.  */
Packit 6c4009
Packit 6c4009
/***************************************************************************/
Packit 6c4009
Packit 6c4009
#define top(ar_ptr) ((ar_ptr)->top)
Packit 6c4009
Packit 6c4009
/* A heap is a single contiguous memory region holding (coalesceable)
Packit 6c4009
   malloc_chunks.  It is allocated with mmap() and always starts at an
Packit 6c4009
   address aligned to HEAP_MAX_SIZE.  */
Packit 6c4009
Packit 6c4009
typedef struct _heap_info
Packit 6c4009
{
Packit 6c4009
  mstate ar_ptr; /* Arena for this heap. */
Packit 6c4009
  struct _heap_info *prev; /* Previous heap. */
Packit 6c4009
  size_t size;   /* Current size in bytes. */
Packit 6c4009
  size_t mprotect_size; /* Size in bytes that has been mprotected
Packit 6c4009
                           PROT_READ|PROT_WRITE.  */
Packit 6c4009
  /* Make sure the following data is properly aligned, particularly
Packit 6c4009
     that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
Packit 6c4009
     MALLOC_ALIGNMENT. */
Packit 6c4009
  char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK];
Packit 6c4009
} heap_info;
Packit 6c4009
Packit 6c4009
/* Get a compile-time error if the heap_info padding is not correct
Packit 6c4009
   to make alignment work as expected in sYSMALLOc.  */
Packit 6c4009
extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
Packit 6c4009
                                             + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
Packit 6c4009
                                            ? -1 : 1];
Packit 6c4009
Packit 6c4009
/* Thread specific data.  */
Packit 6c4009
Packit 6c4009
static __thread mstate thread_arena attribute_tls_model_ie;
Packit 6c4009
Packit 6c4009
/* Arena free list.  free_list_lock synchronizes access to the
Packit 6c4009
   free_list variable below, and the next_free and attached_threads
Packit 6c4009
   members of struct malloc_state objects.  No other locks must be
Packit 6c4009
   acquired after free_list_lock has been acquired.  */
Packit 6c4009
Packit 6c4009
__libc_lock_define_initialized (static, free_list_lock);
Packit 6c4009
static size_t narenas = 1;
Packit 6c4009
static mstate free_list;
Packit 6c4009
Packit 6c4009
/* list_lock prevents concurrent writes to the next member of struct
Packit 6c4009
   malloc_state objects.
Packit 6c4009
Packit 6c4009
   Read access to the next member is supposed to synchronize with the
Packit 6c4009
   atomic_write_barrier and the write to the next member in
Packit 6c4009
   _int_new_arena.  This suffers from data races; see the FIXME
Packit 6c4009
   comments in _int_new_arena and reused_arena.
Packit 6c4009
Packit 6c4009
   list_lock also prevents concurrent forks.  At the time list_lock is
Packit 6c4009
   acquired, no arena lock must have been acquired, but it is
Packit 6c4009
   permitted to acquire arena locks subsequently, while list_lock is
Packit 6c4009
   acquired.  */
Packit 6c4009
__libc_lock_define_initialized (static, list_lock);
Packit 6c4009
Packit 6c4009
/* Already initialized? */
Packit 6c4009
int __malloc_initialized = -1;
Packit 6c4009
Packit 6c4009
/**************************************************************************/
Packit 6c4009
Packit 6c4009
Packit 6c4009
/* arena_get() acquires an arena and locks the corresponding mutex.
Packit 6c4009
   First, try the one last locked successfully by this thread.  (This
Packit 6c4009
   is the common case and handled with a macro for speed.)  Then, loop
Packit 6c4009
   once over the circularly linked list of arenas.  If no arena is
Packit 6c4009
   readily available, create a new one.  In this latter case, `size'
Packit 6c4009
   is just a hint as to how much memory will be required immediately
Packit 6c4009
   in the new arena. */
Packit 6c4009
Packit 6c4009
#define arena_get(ptr, size) do { \
Packit 6c4009
      ptr = thread_arena;						      \
Packit 6c4009
      arena_lock (ptr, size);						      \
Packit 6c4009
  } while (0)
Packit 6c4009
Packit 6c4009
#define arena_lock(ptr, size) do {					      \
Packit 6c4009
      if (ptr)								      \
Packit 6c4009
        __libc_lock_lock (ptr->mutex);					      \
Packit 6c4009
      else								      \
Packit 6c4009
        ptr = arena_get2 ((size), NULL);				      \
Packit 6c4009
  } while (0)
Packit 6c4009
Packit 6c4009
/* find the heap and corresponding arena for a given ptr */
Packit 6c4009
Packit 6c4009
#define heap_for_ptr(ptr) \
Packit 6c4009
  ((heap_info *) ((unsigned long) (ptr) & ~(HEAP_MAX_SIZE - 1)))
Packit 6c4009
#define arena_for_chunk(ptr) \
Packit 6c4009
  (chunk_main_arena (ptr) ? &main_arena : heap_for_ptr (ptr)->ar_ptr)
Packit 6c4009
Packit 6c4009
Packit 6c4009
/**************************************************************************/
Packit 6c4009
Packit 6c4009
/* atfork support.  */
Packit 6c4009
Packit 6c4009
/* The following three functions are called around fork from a
Packit 6c4009
   multi-threaded process.  We do not use the general fork handler
Packit 6c4009
   mechanism to make sure that our handlers are the last ones being
Packit 6c4009
   called, so that other fork handlers can use the malloc
Packit 6c4009
   subsystem.  */
Packit 6c4009
Packit 6c4009
void
Packit 6c4009
__malloc_fork_lock_parent (void)
Packit 6c4009
{
Packit 6c4009
  if (__malloc_initialized < 1)
Packit 6c4009
    return;
Packit 6c4009
Packit 6c4009
  /* We do not acquire free_list_lock here because we completely
Packit 6c4009
     reconstruct free_list in __malloc_fork_unlock_child.  */
Packit 6c4009
Packit 6c4009
  __libc_lock_lock (list_lock);
Packit 6c4009
Packit 6c4009
  for (mstate ar_ptr = &main_arena;; )
Packit 6c4009
    {
Packit 6c4009
      __libc_lock_lock (ar_ptr->mutex);
Packit 6c4009
      ar_ptr = ar_ptr->next;
Packit 6c4009
      if (ar_ptr == &main_arena)
Packit 6c4009
        break;
Packit 6c4009
    }
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
void
Packit 6c4009
__malloc_fork_unlock_parent (void)
Packit 6c4009
{
Packit 6c4009
  if (__malloc_initialized < 1)
Packit 6c4009
    return;
Packit 6c4009
Packit 6c4009
  for (mstate ar_ptr = &main_arena;; )
Packit 6c4009
    {
Packit 6c4009
      __libc_lock_unlock (ar_ptr->mutex);
Packit 6c4009
      ar_ptr = ar_ptr->next;
Packit 6c4009
      if (ar_ptr == &main_arena)
Packit 6c4009
        break;
Packit 6c4009
    }
Packit 6c4009
  __libc_lock_unlock (list_lock);
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
void
Packit 6c4009
__malloc_fork_unlock_child (void)
Packit 6c4009
{
Packit 6c4009
  if (__malloc_initialized < 1)
Packit 6c4009
    return;
Packit 6c4009
Packit 6c4009
  /* Push all arenas to the free list, except thread_arena, which is
Packit 6c4009
     attached to the current thread.  */
Packit 6c4009
  __libc_lock_init (free_list_lock);
Packit 6c4009
  if (thread_arena != NULL)
Packit 6c4009
    thread_arena->attached_threads = 1;
Packit 6c4009
  free_list = NULL;
Packit 6c4009
  for (mstate ar_ptr = &main_arena;; )
Packit 6c4009
    {
Packit 6c4009
      __libc_lock_init (ar_ptr->mutex);
Packit 6c4009
      if (ar_ptr != thread_arena)
Packit 6c4009
        {
Packit 6c4009
	  /* This arena is no longer attached to any thread.  */
Packit 6c4009
	  ar_ptr->attached_threads = 0;
Packit 6c4009
          ar_ptr->next_free = free_list;
Packit 6c4009
          free_list = ar_ptr;
Packit 6c4009
        }
Packit 6c4009
      ar_ptr = ar_ptr->next;
Packit 6c4009
      if (ar_ptr == &main_arena)
Packit 6c4009
        break;
Packit 6c4009
    }
Packit 6c4009
Packit 6c4009
  __libc_lock_init (list_lock);
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
#if HAVE_TUNABLES
Packit 6c4009
static inline int do_set_mallopt_check (int32_t value);
Packit 6c4009
void
Packit 6c4009
TUNABLE_CALLBACK (set_mallopt_check) (tunable_val_t *valp)
Packit 6c4009
{
Packit 6c4009
  int32_t value = (int32_t) valp->numval;
Packit 6c4009
  if (value != 0)
Packit 6c4009
    __malloc_check_init ();
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
# define TUNABLE_CALLBACK_FNDECL(__name, __type) \
Packit 6c4009
static inline int do_ ## __name (__type value);				      \
Packit 6c4009
void									      \
Packit 6c4009
TUNABLE_CALLBACK (__name) (tunable_val_t *valp)				      \
Packit 6c4009
{									      \
Packit 6c4009
  __type value = (__type) (valp)->numval;				      \
Packit 6c4009
  do_ ## __name (value);						      \
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
TUNABLE_CALLBACK_FNDECL (set_mmap_threshold, size_t)
Packit 6c4009
TUNABLE_CALLBACK_FNDECL (set_mmaps_max, int32_t)
Packit 6c4009
TUNABLE_CALLBACK_FNDECL (set_top_pad, size_t)
Packit 6c4009
TUNABLE_CALLBACK_FNDECL (set_perturb_byte, int32_t)
Packit 6c4009
TUNABLE_CALLBACK_FNDECL (set_trim_threshold, size_t)
Packit 6c4009
TUNABLE_CALLBACK_FNDECL (set_arena_max, size_t)
Packit 6c4009
TUNABLE_CALLBACK_FNDECL (set_arena_test, size_t)
Packit 6c4009
#if USE_TCACHE
Packit 6c4009
TUNABLE_CALLBACK_FNDECL (set_tcache_max, size_t)
Packit 6c4009
TUNABLE_CALLBACK_FNDECL (set_tcache_count, size_t)
Packit 6c4009
TUNABLE_CALLBACK_FNDECL (set_tcache_unsorted_limit, size_t)
Packit 6c4009
#endif
Packit Service aa5507
TUNABLE_CALLBACK_FNDECL (set_mxfast, size_t)
Packit 6c4009
#else
Packit 6c4009
/* Initialization routine. */
Packit 6c4009
#include <string.h>
Packit 6c4009
extern char **_environ;
Packit 6c4009
Packit 6c4009
static char *
Packit 6c4009
next_env_entry (char ***position)
Packit 6c4009
{
Packit 6c4009
  char **current = *position;
Packit 6c4009
  char *result = NULL;
Packit 6c4009
Packit 6c4009
  while (*current != NULL)
Packit 6c4009
    {
Packit 6c4009
      if (__builtin_expect ((*current)[0] == 'M', 0)
Packit 6c4009
          && (*current)[1] == 'A'
Packit 6c4009
          && (*current)[2] == 'L'
Packit 6c4009
          && (*current)[3] == 'L'
Packit 6c4009
          && (*current)[4] == 'O'
Packit 6c4009
          && (*current)[5] == 'C'
Packit 6c4009
          && (*current)[6] == '_')
Packit 6c4009
        {
Packit 6c4009
          result = &(*current)[7];
Packit 6c4009
Packit 6c4009
          /* Save current position for next visit.  */
Packit 6c4009
          *position = ++current;
Packit 6c4009
Packit 6c4009
          break;
Packit 6c4009
        }
Packit 6c4009
Packit 6c4009
      ++current;
Packit 6c4009
    }
Packit 6c4009
Packit 6c4009
  return result;
Packit 6c4009
}
Packit 6c4009
#endif
Packit 6c4009
Packit 6c4009
Packit 6c4009
#ifdef SHARED
Packit 6c4009
static void *
Packit 6c4009
__failing_morecore (ptrdiff_t d)
Packit 6c4009
{
Packit 6c4009
  return (void *) MORECORE_FAILURE;
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
extern struct dl_open_hook *_dl_open_hook;
Packit 6c4009
libc_hidden_proto (_dl_open_hook);
Packit 6c4009
#endif
Packit 6c4009
Packit 6c4009
static void
Packit 6c4009
ptmalloc_init (void)
Packit 6c4009
{
Packit 6c4009
  if (__malloc_initialized >= 0)
Packit 6c4009
    return;
Packit 6c4009
Packit 6c4009
  __malloc_initialized = 0;
Packit 6c4009
Packit 6c4009
#ifdef SHARED
Packit 6c4009
  /* In case this libc copy is in a non-default namespace, never use brk.
Packit 6c4009
     Likewise if dlopened from statically linked program.  */
Packit 6c4009
  Dl_info di;
Packit 6c4009
  struct link_map *l;
Packit 6c4009
Packit 6c4009
  if (_dl_open_hook != NULL
Packit 6c4009
      || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0
Packit 6c4009
          && l->l_ns != LM_ID_BASE))
Packit 6c4009
    __morecore = __failing_morecore;
Packit 6c4009
#endif
Packit 6c4009
Packit 6c4009
  thread_arena = &main_arena;
Packit 6c4009
Packit 6c4009
  malloc_init_state (&main_arena);
Packit 6c4009
Packit 6c4009
#if HAVE_TUNABLES
Packit 6c4009
  TUNABLE_GET (check, int32_t, TUNABLE_CALLBACK (set_mallopt_check));
Packit 6c4009
  TUNABLE_GET (top_pad, size_t, TUNABLE_CALLBACK (set_top_pad));
Packit 6c4009
  TUNABLE_GET (perturb, int32_t, TUNABLE_CALLBACK (set_perturb_byte));
Packit 6c4009
  TUNABLE_GET (mmap_threshold, size_t, TUNABLE_CALLBACK (set_mmap_threshold));
Packit 6c4009
  TUNABLE_GET (trim_threshold, size_t, TUNABLE_CALLBACK (set_trim_threshold));
Packit 6c4009
  TUNABLE_GET (mmap_max, int32_t, TUNABLE_CALLBACK (set_mmaps_max));
Packit 6c4009
  TUNABLE_GET (arena_max, size_t, TUNABLE_CALLBACK (set_arena_max));
Packit 6c4009
  TUNABLE_GET (arena_test, size_t, TUNABLE_CALLBACK (set_arena_test));
Packit 6c4009
# if USE_TCACHE
Packit 6c4009
  TUNABLE_GET (tcache_max, size_t, TUNABLE_CALLBACK (set_tcache_max));
Packit 6c4009
  TUNABLE_GET (tcache_count, size_t, TUNABLE_CALLBACK (set_tcache_count));
Packit 6c4009
  TUNABLE_GET (tcache_unsorted_limit, size_t,
Packit 6c4009
	       TUNABLE_CALLBACK (set_tcache_unsorted_limit));
Packit 6c4009
# endif
Packit Service aa5507
  TUNABLE_GET (mxfast, size_t, TUNABLE_CALLBACK (set_mxfast));
Packit 6c4009
#else
Packit 6c4009
  const char *s = NULL;
Packit 6c4009
  if (__glibc_likely (_environ != NULL))
Packit 6c4009
    {
Packit 6c4009
      char **runp = _environ;
Packit 6c4009
      char *envline;
Packit 6c4009
Packit 6c4009
      while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
Packit 6c4009
                               0))
Packit 6c4009
        {
Packit 6c4009
          size_t len = strcspn (envline, "=");
Packit 6c4009
Packit 6c4009
          if (envline[len] != '=')
Packit 6c4009
            /* This is a "MALLOC_" variable at the end of the string
Packit 6c4009
               without a '=' character.  Ignore it since otherwise we
Packit 6c4009
               will access invalid memory below.  */
Packit 6c4009
            continue;
Packit 6c4009
Packit 6c4009
          switch (len)
Packit 6c4009
            {
Packit 6c4009
            case 6:
Packit 6c4009
              if (memcmp (envline, "CHECK_", 6) == 0)
Packit 6c4009
                s = &envline[7];
Packit 6c4009
              break;
Packit 6c4009
            case 8:
Packit 6c4009
              if (!__builtin_expect (__libc_enable_secure, 0))
Packit 6c4009
                {
Packit 6c4009
                  if (memcmp (envline, "TOP_PAD_", 8) == 0)
Packit 6c4009
                    __libc_mallopt (M_TOP_PAD, atoi (&envline[9]));
Packit 6c4009
                  else if (memcmp (envline, "PERTURB_", 8) == 0)
Packit 6c4009
                    __libc_mallopt (M_PERTURB, atoi (&envline[9]));
Packit 6c4009
                }
Packit 6c4009
              break;
Packit 6c4009
            case 9:
Packit 6c4009
              if (!__builtin_expect (__libc_enable_secure, 0))
Packit 6c4009
                {
Packit 6c4009
                  if (memcmp (envline, "MMAP_MAX_", 9) == 0)
Packit 6c4009
                    __libc_mallopt (M_MMAP_MAX, atoi (&envline[10]));
Packit 6c4009
                  else if (memcmp (envline, "ARENA_MAX", 9) == 0)
Packit 6c4009
                    __libc_mallopt (M_ARENA_MAX, atoi (&envline[10]));
Packit 6c4009
                }
Packit 6c4009
              break;
Packit 6c4009
            case 10:
Packit 6c4009
              if (!__builtin_expect (__libc_enable_secure, 0))
Packit 6c4009
                {
Packit 6c4009
                  if (memcmp (envline, "ARENA_TEST", 10) == 0)
Packit 6c4009
                    __libc_mallopt (M_ARENA_TEST, atoi (&envline[11]));
Packit 6c4009
                }
Packit 6c4009
              break;
Packit 6c4009
            case 15:
Packit 6c4009
              if (!__builtin_expect (__libc_enable_secure, 0))
Packit 6c4009
                {
Packit 6c4009
                  if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
Packit 6c4009
                    __libc_mallopt (M_TRIM_THRESHOLD, atoi (&envline[16]));
Packit 6c4009
                  else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
Packit 6c4009
                    __libc_mallopt (M_MMAP_THRESHOLD, atoi (&envline[16]));
Packit 6c4009
                }
Packit 6c4009
              break;
Packit 6c4009
            default:
Packit 6c4009
              break;
Packit 6c4009
            }
Packit 6c4009
        }
Packit 6c4009
    }
Packit 6c4009
  if (s && s[0] != '\0' && s[0] != '0')
Packit 6c4009
    __malloc_check_init ();
Packit 6c4009
#endif
Packit 6c4009
Packit 6c4009
#if HAVE_MALLOC_INIT_HOOK
Packit 6c4009
  void (*hook) (void) = atomic_forced_read (__malloc_initialize_hook);
Packit 6c4009
  if (hook != NULL)
Packit 6c4009
    (*hook)();
Packit 6c4009
#endif
Packit 6c4009
  __malloc_initialized = 1;
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
/* Managing heaps and arenas (for concurrent threads) */
Packit 6c4009
Packit 6c4009
#if MALLOC_DEBUG > 1
Packit 6c4009
Packit 6c4009
/* Print the complete contents of a single heap to stderr. */
Packit 6c4009
Packit 6c4009
static void
Packit 6c4009
dump_heap (heap_info *heap)
Packit 6c4009
{
Packit 6c4009
  char *ptr;
Packit 6c4009
  mchunkptr p;
Packit 6c4009
Packit 6c4009
  fprintf (stderr, "Heap %p, size %10lx:\n", heap, (long) heap->size);
Packit 6c4009
  ptr = (heap->ar_ptr != (mstate) (heap + 1)) ?
Packit 6c4009
        (char *) (heap + 1) : (char *) (heap + 1) + sizeof (struct malloc_state);
Packit 6c4009
  p = (mchunkptr) (((unsigned long) ptr + MALLOC_ALIGN_MASK) &
Packit 6c4009
                   ~MALLOC_ALIGN_MASK);
Packit 6c4009
  for (;; )
Packit 6c4009
    {
Packit 6c4009
      fprintf (stderr, "chunk %p size %10lx", p, (long) p->size);
Packit 6c4009
      if (p == top (heap->ar_ptr))
Packit 6c4009
        {
Packit 6c4009
          fprintf (stderr, " (top)\n");
Packit 6c4009
          break;
Packit 6c4009
        }
Packit 6c4009
      else if (p->size == (0 | PREV_INUSE))
Packit 6c4009
        {
Packit 6c4009
          fprintf (stderr, " (fence)\n");
Packit 6c4009
          break;
Packit 6c4009
        }
Packit 6c4009
      fprintf (stderr, "\n");
Packit 6c4009
      p = next_chunk (p);
Packit 6c4009
    }
Packit 6c4009
}
Packit 6c4009
#endif /* MALLOC_DEBUG > 1 */
Packit 6c4009
Packit 6c4009
/* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
Packit 6c4009
   addresses as opposed to increasing, new_heap would badly fragment the
Packit 6c4009
   address space.  In that case remember the second HEAP_MAX_SIZE part
Packit 6c4009
   aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
Packit 6c4009
   call (if it is already aligned) and try to reuse it next time.  We need
Packit 6c4009
   no locking for it, as kernel ensures the atomicity for us - worst case
Packit 6c4009
   we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
Packit 6c4009
   multiple threads, but only one will succeed.  */
Packit 6c4009
static char *aligned_heap_area;
Packit 6c4009
Packit 6c4009
/* Create a new heap.  size is automatically rounded up to a multiple
Packit 6c4009
   of the page size. */
Packit 6c4009
Packit 6c4009
static heap_info *
Packit 6c4009
new_heap (size_t size, size_t top_pad)
Packit 6c4009
{
Packit 6c4009
  size_t pagesize = GLRO (dl_pagesize);
Packit 6c4009
  char *p1, *p2;
Packit 6c4009
  unsigned long ul;
Packit 6c4009
  heap_info *h;
Packit 6c4009
Packit 6c4009
  if (size + top_pad < HEAP_MIN_SIZE)
Packit 6c4009
    size = HEAP_MIN_SIZE;
Packit 6c4009
  else if (size + top_pad <= HEAP_MAX_SIZE)
Packit 6c4009
    size += top_pad;
Packit 6c4009
  else if (size > HEAP_MAX_SIZE)
Packit 6c4009
    return 0;
Packit 6c4009
  else
Packit 6c4009
    size = HEAP_MAX_SIZE;
Packit 6c4009
  size = ALIGN_UP (size, pagesize);
Packit 6c4009
Packit 6c4009
  /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
Packit 6c4009
     No swap space needs to be reserved for the following large
Packit 6c4009
     mapping (on Linux, this is the case for all non-writable mappings
Packit 6c4009
     anyway). */
Packit 6c4009
  p2 = MAP_FAILED;
Packit 6c4009
  if (aligned_heap_area)
Packit 6c4009
    {
Packit 6c4009
      p2 = (char *) MMAP (aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
Packit 6c4009
                          MAP_NORESERVE);
Packit 6c4009
      aligned_heap_area = NULL;
Packit 6c4009
      if (p2 != MAP_FAILED && ((unsigned long) p2 & (HEAP_MAX_SIZE - 1)))
Packit 6c4009
        {
Packit 6c4009
          __munmap (p2, HEAP_MAX_SIZE);
Packit 6c4009
          p2 = MAP_FAILED;
Packit 6c4009
        }
Packit 6c4009
    }
Packit 6c4009
  if (p2 == MAP_FAILED)
Packit 6c4009
    {
Packit 6c4009
      p1 = (char *) MMAP (0, HEAP_MAX_SIZE << 1, PROT_NONE, MAP_NORESERVE);
Packit 6c4009
      if (p1 != MAP_FAILED)
Packit 6c4009
        {
Packit 6c4009
          p2 = (char *) (((unsigned long) p1 + (HEAP_MAX_SIZE - 1))
Packit 6c4009
                         & ~(HEAP_MAX_SIZE - 1));
Packit 6c4009
          ul = p2 - p1;
Packit 6c4009
          if (ul)
Packit 6c4009
            __munmap (p1, ul);
Packit 6c4009
          else
Packit 6c4009
            aligned_heap_area = p2 + HEAP_MAX_SIZE;
Packit 6c4009
          __munmap (p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
Packit 6c4009
        }
Packit 6c4009
      else
Packit 6c4009
        {
Packit 6c4009
          /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
Packit 6c4009
             is already aligned. */
Packit 6c4009
          p2 = (char *) MMAP (0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
Packit 6c4009
          if (p2 == MAP_FAILED)
Packit 6c4009
            return 0;
Packit 6c4009
Packit 6c4009
          if ((unsigned long) p2 & (HEAP_MAX_SIZE - 1))
Packit 6c4009
            {
Packit 6c4009
              __munmap (p2, HEAP_MAX_SIZE);
Packit 6c4009
              return 0;
Packit 6c4009
            }
Packit 6c4009
        }
Packit 6c4009
    }
Packit 6c4009
  if (__mprotect (p2, size, PROT_READ | PROT_WRITE) != 0)
Packit 6c4009
    {
Packit 6c4009
      __munmap (p2, HEAP_MAX_SIZE);
Packit 6c4009
      return 0;
Packit 6c4009
    }
Packit 6c4009
  h = (heap_info *) p2;
Packit 6c4009
  h->size = size;
Packit 6c4009
  h->mprotect_size = size;
Packit 6c4009
  LIBC_PROBE (memory_heap_new, 2, h, h->size);
Packit 6c4009
  return h;
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
/* Grow a heap.  size is automatically rounded up to a
Packit 6c4009
   multiple of the page size. */
Packit 6c4009
Packit 6c4009
static int
Packit 6c4009
grow_heap (heap_info *h, long diff)
Packit 6c4009
{
Packit 6c4009
  size_t pagesize = GLRO (dl_pagesize);
Packit 6c4009
  long new_size;
Packit 6c4009
Packit 6c4009
  diff = ALIGN_UP (diff, pagesize);
Packit 6c4009
  new_size = (long) h->size + diff;
Packit 6c4009
  if ((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
Packit 6c4009
    return -1;
Packit 6c4009
Packit 6c4009
  if ((unsigned long) new_size > h->mprotect_size)
Packit 6c4009
    {
Packit 6c4009
      if (__mprotect ((char *) h + h->mprotect_size,
Packit 6c4009
                      (unsigned long) new_size - h->mprotect_size,
Packit 6c4009
                      PROT_READ | PROT_WRITE) != 0)
Packit 6c4009
        return -2;
Packit 6c4009
Packit 6c4009
      h->mprotect_size = new_size;
Packit 6c4009
    }
Packit 6c4009
Packit 6c4009
  h->size = new_size;
Packit 6c4009
  LIBC_PROBE (memory_heap_more, 2, h, h->size);
Packit 6c4009
  return 0;
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
/* Shrink a heap.  */
Packit 6c4009
Packit 6c4009
static int
Packit 6c4009
shrink_heap (heap_info *h, long diff)
Packit 6c4009
{
Packit 6c4009
  long new_size;
Packit 6c4009
Packit 6c4009
  new_size = (long) h->size - diff;
Packit 6c4009
  if (new_size < (long) sizeof (*h))
Packit 6c4009
    return -1;
Packit 6c4009
Packit 6c4009
  /* Try to re-map the extra heap space freshly to save memory, and make it
Packit 6c4009
     inaccessible.  See malloc-sysdep.h to know when this is true.  */
Packit 6c4009
  if (__glibc_unlikely (check_may_shrink_heap ()))
Packit 6c4009
    {
Packit 6c4009
      if ((char *) MMAP ((char *) h + new_size, diff, PROT_NONE,
Packit 6c4009
                         MAP_FIXED) == (char *) MAP_FAILED)
Packit 6c4009
        return -2;
Packit 6c4009
Packit 6c4009
      h->mprotect_size = new_size;
Packit 6c4009
    }
Packit 6c4009
  else
Packit 6c4009
    __madvise ((char *) h + new_size, diff, MADV_DONTNEED);
Packit 6c4009
  /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
Packit 6c4009
Packit 6c4009
  h->size = new_size;
Packit 6c4009
  LIBC_PROBE (memory_heap_less, 2, h, h->size);
Packit 6c4009
  return 0;
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
/* Delete a heap. */
Packit 6c4009
Packit 6c4009
#define delete_heap(heap) \
Packit 6c4009
  do {									      \
Packit 6c4009
      if ((char *) (heap) + HEAP_MAX_SIZE == aligned_heap_area)		      \
Packit 6c4009
        aligned_heap_area = NULL;					      \
Packit 6c4009
      __munmap ((char *) (heap), HEAP_MAX_SIZE);			      \
Packit 6c4009
    } while (0)
Packit 6c4009
Packit 6c4009
static int
Packit 6c4009
heap_trim (heap_info *heap, size_t pad)
Packit 6c4009
{
Packit 6c4009
  mstate ar_ptr = heap->ar_ptr;
Packit 6c4009
  unsigned long pagesz = GLRO (dl_pagesize);
Packit Service 7a9260
  mchunkptr top_chunk = top (ar_ptr), p;
Packit 6c4009
  heap_info *prev_heap;
Packit 6c4009
  long new_size, top_size, top_area, extra, prev_size, misalign;
Packit 6c4009
Packit 6c4009
  /* Can this heap go away completely? */
Packit 6c4009
  while (top_chunk == chunk_at_offset (heap, sizeof (*heap)))
Packit 6c4009
    {
Packit 6c4009
      prev_heap = heap->prev;
Packit 6c4009
      prev_size = prev_heap->size - (MINSIZE - 2 * SIZE_SZ);
Packit 6c4009
      p = chunk_at_offset (prev_heap, prev_size);
Packit 6c4009
      /* fencepost must be properly aligned.  */
Packit 6c4009
      misalign = ((long) p) & MALLOC_ALIGN_MASK;
Packit 6c4009
      p = chunk_at_offset (prev_heap, prev_size - misalign);
Packit 6c4009
      assert (chunksize_nomask (p) == (0 | PREV_INUSE)); /* must be fencepost */
Packit 6c4009
      p = prev_chunk (p);
Packit 6c4009
      new_size = chunksize (p) + (MINSIZE - 2 * SIZE_SZ) + misalign;
Packit 6c4009
      assert (new_size > 0 && new_size < (long) (2 * MINSIZE));
Packit 6c4009
      if (!prev_inuse (p))
Packit 6c4009
        new_size += prev_size (p);
Packit 6c4009
      assert (new_size > 0 && new_size < HEAP_MAX_SIZE);
Packit 6c4009
      if (new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
Packit 6c4009
        break;
Packit 6c4009
      ar_ptr->system_mem -= heap->size;
Packit 6c4009
      LIBC_PROBE (memory_heap_free, 2, heap, heap->size);
Packit 6c4009
      delete_heap (heap);
Packit 6c4009
      heap = prev_heap;
Packit 6c4009
      if (!prev_inuse (p)) /* consolidate backward */
Packit 6c4009
        {
Packit 6c4009
          p = prev_chunk (p);
Packit Service 7a9260
          unlink_chunk (ar_ptr, p);
Packit 6c4009
        }
Packit 6c4009
      assert (((unsigned long) ((char *) p + new_size) & (pagesz - 1)) == 0);
Packit 6c4009
      assert (((char *) p + new_size) == ((char *) heap + heap->size));
Packit 6c4009
      top (ar_ptr) = top_chunk = p;
Packit 6c4009
      set_head (top_chunk, new_size | PREV_INUSE);
Packit 6c4009
      /*check_chunk(ar_ptr, top_chunk);*/
Packit 6c4009
    }
Packit 6c4009
Packit 6c4009
  /* Uses similar logic for per-thread arenas as the main arena with systrim
Packit 6c4009
     and _int_free by preserving the top pad and rounding down to the nearest
Packit 6c4009
     page.  */
Packit 6c4009
  top_size = chunksize (top_chunk);
Packit 6c4009
  if ((unsigned long)(top_size) <
Packit 6c4009
      (unsigned long)(mp_.trim_threshold))
Packit 6c4009
    return 0;
Packit 6c4009
Packit 6c4009
  top_area = top_size - MINSIZE - 1;
Packit 6c4009
  if (top_area < 0 || (size_t) top_area <= pad)
Packit 6c4009
    return 0;
Packit 6c4009
Packit 6c4009
  /* Release in pagesize units and round down to the nearest page.  */
Packit 6c4009
  extra = ALIGN_DOWN(top_area - pad, pagesz);
Packit 6c4009
  if (extra == 0)
Packit 6c4009
    return 0;
Packit 6c4009
Packit 6c4009
  /* Try to shrink. */
Packit 6c4009
  if (shrink_heap (heap, extra) != 0)
Packit 6c4009
    return 0;
Packit 6c4009
Packit 6c4009
  ar_ptr->system_mem -= extra;
Packit 6c4009
Packit 6c4009
  /* Success. Adjust top accordingly. */
Packit 6c4009
  set_head (top_chunk, (top_size - extra) | PREV_INUSE);
Packit 6c4009
  /*check_chunk(ar_ptr, top_chunk);*/
Packit 6c4009
  return 1;
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
/* Create a new arena with initial size "size".  */
Packit 6c4009
Packit 6c4009
/* If REPLACED_ARENA is not NULL, detach it from this thread.  Must be
Packit 6c4009
   called while free_list_lock is held.  */
Packit 6c4009
static void
Packit 6c4009
detach_arena (mstate replaced_arena)
Packit 6c4009
{
Packit 6c4009
  if (replaced_arena != NULL)
Packit 6c4009
    {
Packit 6c4009
      assert (replaced_arena->attached_threads > 0);
Packit 6c4009
      /* The current implementation only detaches from main_arena in
Packit 6c4009
	 case of allocation failure.  This means that it is likely not
Packit 6c4009
	 beneficial to put the arena on free_list even if the
Packit 6c4009
	 reference count reaches zero.  */
Packit 6c4009
      --replaced_arena->attached_threads;
Packit 6c4009
    }
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
static mstate
Packit 6c4009
_int_new_arena (size_t size)
Packit 6c4009
{
Packit 6c4009
  mstate a;
Packit 6c4009
  heap_info *h;
Packit 6c4009
  char *ptr;
Packit 6c4009
  unsigned long misalign;
Packit 6c4009
Packit 6c4009
  h = new_heap (size + (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT),
Packit 6c4009
                mp_.top_pad);
Packit 6c4009
  if (!h)
Packit 6c4009
    {
Packit 6c4009
      /* Maybe size is too large to fit in a single heap.  So, just try
Packit 6c4009
         to create a minimally-sized arena and let _int_malloc() attempt
Packit 6c4009
         to deal with the large request via mmap_chunk().  */
Packit 6c4009
      h = new_heap (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT, mp_.top_pad);
Packit 6c4009
      if (!h)
Packit 6c4009
        return 0;
Packit 6c4009
    }
Packit 6c4009
  a = h->ar_ptr = (mstate) (h + 1);
Packit 6c4009
  malloc_init_state (a);
Packit 6c4009
  a->attached_threads = 1;
Packit 6c4009
  /*a->next = NULL;*/
Packit 6c4009
  a->system_mem = a->max_system_mem = h->size;
Packit 6c4009
Packit 6c4009
  /* Set up the top chunk, with proper alignment. */
Packit 6c4009
  ptr = (char *) (a + 1);
Packit 6c4009
  misalign = (unsigned long) chunk2mem (ptr) & MALLOC_ALIGN_MASK;
Packit 6c4009
  if (misalign > 0)
Packit 6c4009
    ptr += MALLOC_ALIGNMENT - misalign;
Packit 6c4009
  top (a) = (mchunkptr) ptr;
Packit 6c4009
  set_head (top (a), (((char *) h + h->size) - ptr) | PREV_INUSE);
Packit 6c4009
Packit 6c4009
  LIBC_PROBE (memory_arena_new, 2, a, size);
Packit 6c4009
  mstate replaced_arena = thread_arena;
Packit 6c4009
  thread_arena = a;
Packit 6c4009
  __libc_lock_init (a->mutex);
Packit 6c4009
Packit 6c4009
  __libc_lock_lock (list_lock);
Packit 6c4009
Packit 6c4009
  /* Add the new arena to the global list.  */
Packit 6c4009
  a->next = main_arena.next;
Packit 6c4009
  /* FIXME: The barrier is an attempt to synchronize with read access
Packit 6c4009
     in reused_arena, which does not acquire list_lock while
Packit 6c4009
     traversing the list.  */
Packit 6c4009
  atomic_write_barrier ();
Packit 6c4009
  main_arena.next = a;
Packit 6c4009
Packit 6c4009
  __libc_lock_unlock (list_lock);
Packit 6c4009
Packit 6c4009
  __libc_lock_lock (free_list_lock);
Packit 6c4009
  detach_arena (replaced_arena);
Packit 6c4009
  __libc_lock_unlock (free_list_lock);
Packit 6c4009
Packit 6c4009
  /* Lock this arena.  NB: Another thread may have been attached to
Packit 6c4009
     this arena because the arena is now accessible from the
Packit 6c4009
     main_arena.next list and could have been picked by reused_arena.
Packit 6c4009
     This can only happen for the last arena created (before the arena
Packit 6c4009
     limit is reached).  At this point, some arena has to be attached
Packit 6c4009
     to two threads.  We could acquire the arena lock before list_lock
Packit 6c4009
     to make it less likely that reused_arena picks this new arena,
Packit 6c4009
     but this could result in a deadlock with
Packit 6c4009
     __malloc_fork_lock_parent.  */
Packit 6c4009
Packit 6c4009
  __libc_lock_lock (a->mutex);
Packit 6c4009
Packit 6c4009
  return a;
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
Packit 6c4009
/* Remove an arena from free_list.  */
Packit 6c4009
static mstate
Packit 6c4009
get_free_list (void)
Packit 6c4009
{
Packit 6c4009
  mstate replaced_arena = thread_arena;
Packit 6c4009
  mstate result = free_list;
Packit 6c4009
  if (result != NULL)
Packit 6c4009
    {
Packit 6c4009
      __libc_lock_lock (free_list_lock);
Packit 6c4009
      result = free_list;
Packit 6c4009
      if (result != NULL)
Packit 6c4009
	{
Packit 6c4009
	  free_list = result->next_free;
Packit 6c4009
Packit 6c4009
	  /* The arena will be attached to this thread.  */
Packit 6c4009
	  assert (result->attached_threads == 0);
Packit 6c4009
	  result->attached_threads = 1;
Packit 6c4009
Packit 6c4009
	  detach_arena (replaced_arena);
Packit 6c4009
	}
Packit 6c4009
      __libc_lock_unlock (free_list_lock);
Packit 6c4009
Packit 6c4009
      if (result != NULL)
Packit 6c4009
        {
Packit 6c4009
          LIBC_PROBE (memory_arena_reuse_free_list, 1, result);
Packit 6c4009
          __libc_lock_lock (result->mutex);
Packit 6c4009
	  thread_arena = result;
Packit 6c4009
        }
Packit 6c4009
    }
Packit 6c4009
Packit 6c4009
  return result;
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
/* Remove the arena from the free list (if it is present).
Packit 6c4009
   free_list_lock must have been acquired by the caller.  */
Packit 6c4009
static void
Packit 6c4009
remove_from_free_list (mstate arena)
Packit 6c4009
{
Packit 6c4009
  mstate *previous = &free_list;
Packit 6c4009
  for (mstate p = free_list; p != NULL; p = p->next_free)
Packit 6c4009
    {
Packit 6c4009
      assert (p->attached_threads == 0);
Packit 6c4009
      if (p == arena)
Packit 6c4009
	{
Packit 6c4009
	  /* Remove the requested arena from the list.  */
Packit 6c4009
	  *previous = p->next_free;
Packit 6c4009
	  break;
Packit 6c4009
	}
Packit 6c4009
      else
Packit 6c4009
	previous = &p->next_free;
Packit 6c4009
    }
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
/* Lock and return an arena that can be reused for memory allocation.
Packit 6c4009
   Avoid AVOID_ARENA as we have already failed to allocate memory in
Packit 6c4009
   it and it is currently locked.  */
Packit 6c4009
static mstate
Packit 6c4009
reused_arena (mstate avoid_arena)
Packit 6c4009
{
Packit 6c4009
  mstate result;
Packit 6c4009
  /* FIXME: Access to next_to_use suffers from data races.  */
Packit 6c4009
  static mstate next_to_use;
Packit 6c4009
  if (next_to_use == NULL)
Packit 6c4009
    next_to_use = &main_arena;
Packit 6c4009
Packit 6c4009
  /* Iterate over all arenas (including those linked from
Packit 6c4009
     free_list).  */
Packit 6c4009
  result = next_to_use;
Packit 6c4009
  do
Packit 6c4009
    {
Packit 6c4009
      if (!__libc_lock_trylock (result->mutex))
Packit 6c4009
        goto out;
Packit 6c4009
Packit 6c4009
      /* FIXME: This is a data race, see _int_new_arena.  */
Packit 6c4009
      result = result->next;
Packit 6c4009
    }
Packit 6c4009
  while (result != next_to_use);
Packit 6c4009
Packit 6c4009
  /* Avoid AVOID_ARENA as we have already failed to allocate memory
Packit 6c4009
     in that arena and it is currently locked.   */
Packit 6c4009
  if (result == avoid_arena)
Packit 6c4009
    result = result->next;
Packit 6c4009
Packit 6c4009
  /* No arena available without contention.  Wait for the next in line.  */
Packit 6c4009
  LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena);
Packit 6c4009
  __libc_lock_lock (result->mutex);
Packit 6c4009
Packit 6c4009
out:
Packit 6c4009
  /* Attach the arena to the current thread.  */
Packit 6c4009
  {
Packit 6c4009
    /* Update the arena thread attachment counters.   */
Packit 6c4009
    mstate replaced_arena = thread_arena;
Packit 6c4009
    __libc_lock_lock (free_list_lock);
Packit 6c4009
    detach_arena (replaced_arena);
Packit 6c4009
Packit 6c4009
    /* We may have picked up an arena on the free list.  We need to
Packit 6c4009
       preserve the invariant that no arena on the free list has a
Packit 6c4009
       positive attached_threads counter (otherwise,
Packit 6c4009
       arena_thread_freeres cannot use the counter to determine if the
Packit 6c4009
       arena needs to be put on the free list).  We unconditionally
Packit 6c4009
       remove the selected arena from the free list.  The caller of
Packit 6c4009
       reused_arena checked the free list and observed it to be empty,
Packit 6c4009
       so the list is very short.  */
Packit 6c4009
    remove_from_free_list (result);
Packit 6c4009
Packit 6c4009
    ++result->attached_threads;
Packit 6c4009
Packit 6c4009
    __libc_lock_unlock (free_list_lock);
Packit 6c4009
  }
Packit 6c4009
Packit 6c4009
  LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena);
Packit 6c4009
  thread_arena = result;
Packit 6c4009
  next_to_use = result->next;
Packit 6c4009
Packit 6c4009
  return result;
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
static mstate
Packit 6c4009
arena_get2 (size_t size, mstate avoid_arena)
Packit 6c4009
{
Packit 6c4009
  mstate a;
Packit 6c4009
Packit 6c4009
  static size_t narenas_limit;
Packit 6c4009
Packit 6c4009
  a = get_free_list ();
Packit 6c4009
  if (a == NULL)
Packit 6c4009
    {
Packit 6c4009
      /* Nothing immediately available, so generate a new arena.  */
Packit 6c4009
      if (narenas_limit == 0)
Packit 6c4009
        {
Packit 6c4009
          if (mp_.arena_max != 0)
Packit 6c4009
            narenas_limit = mp_.arena_max;
Packit 6c4009
          else if (narenas > mp_.arena_test)
Packit 6c4009
            {
Packit 6c4009
              int n = __get_nprocs ();
Packit 6c4009
Packit 6c4009
              if (n >= 1)
Packit 6c4009
                narenas_limit = NARENAS_FROM_NCORES (n);
Packit 6c4009
              else
Packit 6c4009
                /* We have no information about the system.  Assume two
Packit 6c4009
                   cores.  */
Packit 6c4009
                narenas_limit = NARENAS_FROM_NCORES (2);
Packit 6c4009
            }
Packit 6c4009
        }
Packit 6c4009
    repeat:;
Packit 6c4009
      size_t n = narenas;
Packit 6c4009
      /* NB: the following depends on the fact that (size_t)0 - 1 is a
Packit 6c4009
         very large number and that the underflow is OK.  If arena_max
Packit 6c4009
         is set the value of arena_test is irrelevant.  If arena_test
Packit 6c4009
         is set but narenas is not yet larger or equal to arena_test
Packit 6c4009
         narenas_limit is 0.  There is no possibility for narenas to
Packit 6c4009
         be too big for the test to always fail since there is not
Packit 6c4009
         enough address space to create that many arenas.  */
Packit 6c4009
      if (__glibc_unlikely (n <= narenas_limit - 1))
Packit 6c4009
        {
Packit 6c4009
          if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
Packit 6c4009
            goto repeat;
Packit 6c4009
          a = _int_new_arena (size);
Packit 6c4009
	  if (__glibc_unlikely (a == NULL))
Packit 6c4009
            catomic_decrement (&narenas);
Packit 6c4009
        }
Packit 6c4009
      else
Packit 6c4009
        a = reused_arena (avoid_arena);
Packit 6c4009
    }
Packit 6c4009
  return a;
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
/* If we don't have the main arena, then maybe the failure is due to running
Packit 6c4009
   out of mmapped areas, so we can try allocating on the main arena.
Packit 6c4009
   Otherwise, it is likely that sbrk() has failed and there is still a chance
Packit 6c4009
   to mmap(), so try one of the other arenas.  */
Packit 6c4009
static mstate
Packit 6c4009
arena_get_retry (mstate ar_ptr, size_t bytes)
Packit 6c4009
{
Packit 6c4009
  LIBC_PROBE (memory_arena_retry, 2, bytes, ar_ptr);
Packit 6c4009
  if (ar_ptr != &main_arena)
Packit 6c4009
    {
Packit 6c4009
      __libc_lock_unlock (ar_ptr->mutex);
Packit 6c4009
      ar_ptr = &main_arena;
Packit 6c4009
      __libc_lock_lock (ar_ptr->mutex);
Packit 6c4009
    }
Packit 6c4009
  else
Packit 6c4009
    {
Packit 6c4009
      __libc_lock_unlock (ar_ptr->mutex);
Packit 6c4009
      ar_ptr = arena_get2 (bytes, ar_ptr);
Packit 6c4009
    }
Packit 6c4009
Packit 6c4009
  return ar_ptr;
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
void
Packit 6c4009
__malloc_arena_thread_freeres (void)
Packit 6c4009
{
Packit 6c4009
  /* Shut down the thread cache first.  This could deallocate data for
Packit 6c4009
     the thread arena, so do this before we put the arena on the free
Packit 6c4009
     list.  */
Packit 6c4009
  tcache_thread_shutdown ();
Packit 6c4009
Packit 6c4009
  mstate a = thread_arena;
Packit 6c4009
  thread_arena = NULL;
Packit 6c4009
Packit 6c4009
  if (a != NULL)
Packit 6c4009
    {
Packit 6c4009
      __libc_lock_lock (free_list_lock);
Packit 6c4009
      /* If this was the last attached thread for this arena, put the
Packit 6c4009
	 arena on the free list.  */
Packit 6c4009
      assert (a->attached_threads > 0);
Packit 6c4009
      if (--a->attached_threads == 0)
Packit 6c4009
	{
Packit 6c4009
	  a->next_free = free_list;
Packit 6c4009
	  free_list = a;
Packit 6c4009
	}
Packit 6c4009
      __libc_lock_unlock (free_list_lock);
Packit 6c4009
    }
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
/*
Packit 6c4009
 * Local variables:
Packit 6c4009
 * c-basic-offset: 2
Packit 6c4009
 * End:
Packit 6c4009
 */