Blame nptl/allocatestack.c

Packit 6c4009
/* Copyright (C) 2002-2018 Free Software Foundation, Inc.
Packit 6c4009
   This file is part of the GNU C Library.
Packit 6c4009
   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
Packit 6c4009
Packit 6c4009
   The GNU C Library is free software; you can redistribute it and/or
Packit 6c4009
   modify it under the terms of the GNU Lesser General Public
Packit 6c4009
   License as published by the Free Software Foundation; either
Packit 6c4009
   version 2.1 of the License, or (at your option) any later version.
Packit 6c4009
Packit 6c4009
   The GNU C Library is distributed in the hope that it will be useful,
Packit 6c4009
   but WITHOUT ANY WARRANTY; without even the implied warranty of
Packit 6c4009
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
Packit 6c4009
   Lesser General Public License for more details.
Packit 6c4009
Packit 6c4009
   You should have received a copy of the GNU Lesser General Public
Packit 6c4009
   License along with the GNU C Library; if not, see
Packit 6c4009
   <http://www.gnu.org/licenses/>.  */
Packit 6c4009
Packit 6c4009
#include <assert.h>
Packit 6c4009
#include <errno.h>
Packit 6c4009
#include <signal.h>
Packit 6c4009
#include <stdint.h>
Packit 6c4009
#include <string.h>
Packit 6c4009
#include <unistd.h>
Packit 6c4009
#include <sys/mman.h>
Packit 6c4009
#include <sys/param.h>
Packit 6c4009
#include <dl-sysdep.h>
Packit 6c4009
#include <dl-tls.h>
Packit 6c4009
#include <tls.h>
Packit 6c4009
#include <list.h>
Packit 6c4009
#include <lowlevellock.h>
Packit 6c4009
#include <futex-internal.h>
Packit 6c4009
#include <kernel-features.h>
Packit 6c4009
#include <stack-aliasing.h>
Packit 6c4009
Packit 6c4009
Packit 6c4009
#ifndef NEED_SEPARATE_REGISTER_STACK
Packit 6c4009
Packit 6c4009
/* Most architectures have exactly one stack pointer.  Some have more.  */
Packit 6c4009
# define STACK_VARIABLES void *stackaddr = NULL
Packit 6c4009
Packit 6c4009
/* How to pass the values to the 'create_thread' function.  */
Packit 6c4009
# define STACK_VARIABLES_ARGS stackaddr
Packit 6c4009
Packit 6c4009
/* How to declare function which gets there parameters.  */
Packit 6c4009
# define STACK_VARIABLES_PARMS void *stackaddr
Packit 6c4009
Packit 6c4009
/* How to declare allocate_stack.  */
Packit 6c4009
# define ALLOCATE_STACK_PARMS void **stack
Packit 6c4009
Packit 6c4009
/* This is how the function is called.  We do it this way to allow
Packit 6c4009
   other variants of the function to have more parameters.  */
Packit 6c4009
# define ALLOCATE_STACK(attr, pd) allocate_stack (attr, pd, &stackaddr)
Packit 6c4009
Packit 6c4009
#else
Packit 6c4009
Packit 6c4009
/* We need two stacks.  The kernel will place them but we have to tell
Packit 6c4009
   the kernel about the size of the reserved address space.  */
Packit 6c4009
# define STACK_VARIABLES void *stackaddr = NULL; size_t stacksize = 0
Packit 6c4009
Packit 6c4009
/* How to pass the values to the 'create_thread' function.  */
Packit 6c4009
# define STACK_VARIABLES_ARGS stackaddr, stacksize
Packit 6c4009
Packit 6c4009
/* How to declare function which gets there parameters.  */
Packit 6c4009
# define STACK_VARIABLES_PARMS void *stackaddr, size_t stacksize
Packit 6c4009
Packit 6c4009
/* How to declare allocate_stack.  */
Packit 6c4009
# define ALLOCATE_STACK_PARMS void **stack, size_t *stacksize
Packit 6c4009
Packit 6c4009
/* This is how the function is called.  We do it this way to allow
Packit 6c4009
   other variants of the function to have more parameters.  */
Packit 6c4009
# define ALLOCATE_STACK(attr, pd) \
Packit 6c4009
  allocate_stack (attr, pd, &stackaddr, &stacksize)
Packit 6c4009
Packit 6c4009
#endif
Packit 6c4009
Packit 6c4009
Packit 6c4009
/* Default alignment of stack.  */
Packit 6c4009
#ifndef STACK_ALIGN
Packit 6c4009
# define STACK_ALIGN __alignof__ (long double)
Packit 6c4009
#endif
Packit 6c4009
Packit 6c4009
/* Default value for minimal stack size after allocating thread
Packit 6c4009
   descriptor and guard.  */
Packit 6c4009
#ifndef MINIMAL_REST_STACK
Packit 6c4009
# define MINIMAL_REST_STACK	4096
Packit 6c4009
#endif
Packit 6c4009
Packit 6c4009
Packit 6c4009
/* Newer kernels have the MAP_STACK flag to indicate a mapping is used for
Packit 6c4009
   a stack.  Use it when possible.  */
Packit 6c4009
#ifndef MAP_STACK
Packit 6c4009
# define MAP_STACK 0
Packit 6c4009
#endif
Packit 6c4009
Packit 6c4009
/* This yields the pointer that TLS support code calls the thread pointer.  */
Packit 6c4009
#if TLS_TCB_AT_TP
Packit 6c4009
# define TLS_TPADJ(pd) (pd)
Packit 6c4009
#elif TLS_DTV_AT_TP
Packit 6c4009
# define TLS_TPADJ(pd) ((struct pthread *)((char *) (pd) + TLS_PRE_TCB_SIZE))
Packit 6c4009
#endif
Packit 6c4009
Packit 6c4009
/* Cache handling for not-yet free stacks.  */
Packit 6c4009
Packit 6c4009
/* Maximum size in kB of cache.  */
Packit 6c4009
static size_t stack_cache_maxsize = 40 * 1024 * 1024; /* 40MiBi by default.  */
Packit 6c4009
static size_t stack_cache_actsize;
Packit 6c4009
Packit 6c4009
/* Mutex protecting this variable.  */
Packit 6c4009
static int stack_cache_lock = LLL_LOCK_INITIALIZER;
Packit 6c4009
Packit 6c4009
/* List of queued stack frames.  */
Packit 6c4009
static LIST_HEAD (stack_cache);
Packit 6c4009
Packit 6c4009
/* List of the stacks in use.  */
Packit 6c4009
static LIST_HEAD (stack_used);
Packit 6c4009
Packit 6c4009
/* We need to record what list operations we are going to do so that,
Packit 6c4009
   in case of an asynchronous interruption due to a fork() call, we
Packit 6c4009
   can correct for the work.  */
Packit 6c4009
static uintptr_t in_flight_stack;
Packit 6c4009
Packit 6c4009
/* List of the threads with user provided stacks in use.  No need to
Packit 6c4009
   initialize this, since it's done in __pthread_initialize_minimal.  */
Packit 6c4009
list_t __stack_user __attribute__ ((nocommon));
Packit 6c4009
hidden_data_def (__stack_user)
Packit 6c4009
Packit 6c4009
Packit 6c4009
/* Check whether the stack is still used or not.  */
Packit 6c4009
#define FREE_P(descr) ((descr)->tid <= 0)
Packit 6c4009
Packit 6c4009
Packit 6c4009
static void
Packit 6c4009
stack_list_del (list_t *elem)
Packit 6c4009
{
Packit 6c4009
  in_flight_stack = (uintptr_t) elem;
Packit 6c4009
Packit 6c4009
  atomic_write_barrier ();
Packit 6c4009
Packit 6c4009
  list_del (elem);
Packit 6c4009
Packit 6c4009
  atomic_write_barrier ();
Packit 6c4009
Packit 6c4009
  in_flight_stack = 0;
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
Packit 6c4009
static void
Packit 6c4009
stack_list_add (list_t *elem, list_t *list)
Packit 6c4009
{
Packit 6c4009
  in_flight_stack = (uintptr_t) elem | 1;
Packit 6c4009
Packit 6c4009
  atomic_write_barrier ();
Packit 6c4009
Packit 6c4009
  list_add (elem, list);
Packit 6c4009
Packit 6c4009
  atomic_write_barrier ();
Packit 6c4009
Packit 6c4009
  in_flight_stack = 0;
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
Packit 6c4009
/* We create a double linked list of all cache entries.  Double linked
Packit 6c4009
   because this allows removing entries from the end.  */
Packit 6c4009
Packit 6c4009
Packit 6c4009
/* Get a stack frame from the cache.  We have to match by size since
Packit 6c4009
   some blocks might be too small or far too large.  */
Packit 6c4009
static struct pthread *
Packit 6c4009
get_cached_stack (size_t *sizep, void **memp)
Packit 6c4009
{
Packit 6c4009
  size_t size = *sizep;
Packit 6c4009
  struct pthread *result = NULL;
Packit 6c4009
  list_t *entry;
Packit 6c4009
Packit 6c4009
  lll_lock (stack_cache_lock, LLL_PRIVATE);
Packit 6c4009
Packit 6c4009
  /* Search the cache for a matching entry.  We search for the
Packit 6c4009
     smallest stack which has at least the required size.  Note that
Packit 6c4009
     in normal situations the size of all allocated stacks is the
Packit 6c4009
     same.  As the very least there are only a few different sizes.
Packit 6c4009
     Therefore this loop will exit early most of the time with an
Packit 6c4009
     exact match.  */
Packit 6c4009
  list_for_each (entry, &stack_cache)
Packit 6c4009
    {
Packit 6c4009
      struct pthread *curr;
Packit 6c4009
Packit 6c4009
      curr = list_entry (entry, struct pthread, list);
Packit 6c4009
      if (FREE_P (curr) && curr->stackblock_size >= size)
Packit 6c4009
	{
Packit 6c4009
	  if (curr->stackblock_size == size)
Packit 6c4009
	    {
Packit 6c4009
	      result = curr;
Packit 6c4009
	      break;
Packit 6c4009
	    }
Packit 6c4009
Packit 6c4009
	  if (result == NULL
Packit 6c4009
	      || result->stackblock_size > curr->stackblock_size)
Packit 6c4009
	    result = curr;
Packit 6c4009
	}
Packit 6c4009
    }
Packit 6c4009
Packit 6c4009
  if (__builtin_expect (result == NULL, 0)
Packit 6c4009
      /* Make sure the size difference is not too excessive.  In that
Packit 6c4009
	 case we do not use the block.  */
Packit 6c4009
      || __builtin_expect (result->stackblock_size > 4 * size, 0))
Packit 6c4009
    {
Packit 6c4009
      /* Release the lock.  */
Packit 6c4009
      lll_unlock (stack_cache_lock, LLL_PRIVATE);
Packit 6c4009
Packit 6c4009
      return NULL;
Packit 6c4009
    }
Packit 6c4009
Packit 6c4009
  /* Don't allow setxid until cloned.  */
Packit 6c4009
  result->setxid_futex = -1;
Packit 6c4009
Packit 6c4009
  /* Dequeue the entry.  */
Packit 6c4009
  stack_list_del (&result->list);
Packit 6c4009
Packit 6c4009
  /* And add to the list of stacks in use.  */
Packit 6c4009
  stack_list_add (&result->list, &stack_used);
Packit 6c4009
Packit 6c4009
  /* And decrease the cache size.  */
Packit 6c4009
  stack_cache_actsize -= result->stackblock_size;
Packit 6c4009
Packit 6c4009
  /* Release the lock early.  */
Packit 6c4009
  lll_unlock (stack_cache_lock, LLL_PRIVATE);
Packit 6c4009
Packit 6c4009
  /* Report size and location of the stack to the caller.  */
Packit 6c4009
  *sizep = result->stackblock_size;
Packit 6c4009
  *memp = result->stackblock;
Packit 6c4009
Packit 6c4009
  /* Cancellation handling is back to the default.  */
Packit 6c4009
  result->cancelhandling = 0;
Packit 6c4009
  result->cleanup = NULL;
Packit 6c4009
Packit 6c4009
  /* No pending event.  */
Packit 6c4009
  result->nextevent = NULL;
Packit 6c4009
Packit 6c4009
  /* Clear the DTV.  */
Packit 6c4009
  dtv_t *dtv = GET_DTV (TLS_TPADJ (result));
Packit 6c4009
  for (size_t cnt = 0; cnt < dtv[-1].counter; ++cnt)
Packit 6c4009
    free (dtv[1 + cnt].pointer.to_free);
Packit 6c4009
  memset (dtv, '\0', (dtv[-1].counter + 1) * sizeof (dtv_t));
Packit 6c4009
Packit 6c4009
  /* Re-initialize the TLS.  */
Packit 6c4009
  _dl_allocate_tls_init (TLS_TPADJ (result));
Packit 6c4009
Packit 6c4009
  return result;
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
Packit 6c4009
/* Free stacks until cache size is lower than LIMIT.  */
Packit 6c4009
static void
Packit 6c4009
free_stacks (size_t limit)
Packit 6c4009
{
Packit 6c4009
  /* We reduce the size of the cache.  Remove the last entries until
Packit 6c4009
     the size is below the limit.  */
Packit 6c4009
  list_t *entry;
Packit 6c4009
  list_t *prev;
Packit 6c4009
Packit 6c4009
  /* Search from the end of the list.  */
Packit 6c4009
  list_for_each_prev_safe (entry, prev, &stack_cache)
Packit 6c4009
    {
Packit 6c4009
      struct pthread *curr;
Packit 6c4009
Packit 6c4009
      curr = list_entry (entry, struct pthread, list);
Packit 6c4009
      if (FREE_P (curr))
Packit 6c4009
	{
Packit 6c4009
	  /* Unlink the block.  */
Packit 6c4009
	  stack_list_del (entry);
Packit 6c4009
Packit 6c4009
	  /* Account for the freed memory.  */
Packit 6c4009
	  stack_cache_actsize -= curr->stackblock_size;
Packit 6c4009
Packit 6c4009
	  /* Free the memory associated with the ELF TLS.  */
Packit 6c4009
	  _dl_deallocate_tls (TLS_TPADJ (curr), false);
Packit 6c4009
Packit 6c4009
	  /* Remove this block.  This should never fail.  If it does
Packit 6c4009
	     something is really wrong.  */
Packit 6c4009
	  if (__munmap (curr->stackblock, curr->stackblock_size) != 0)
Packit 6c4009
	    abort ();
Packit 6c4009
Packit 6c4009
	  /* Maybe we have freed enough.  */
Packit 6c4009
	  if (stack_cache_actsize <= limit)
Packit 6c4009
	    break;
Packit 6c4009
	}
Packit 6c4009
    }
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
/* Free all the stacks on cleanup.  */
Packit 6c4009
void
Packit 6c4009
__nptl_stacks_freeres (void)
Packit 6c4009
{
Packit 6c4009
  free_stacks (0);
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
/* Add a stack frame which is not used anymore to the stack.  Must be
Packit 6c4009
   called with the cache lock held.  */
Packit 6c4009
static inline void
Packit 6c4009
__attribute ((always_inline))
Packit 6c4009
queue_stack (struct pthread *stack)
Packit 6c4009
{
Packit 6c4009
  /* We unconditionally add the stack to the list.  The memory may
Packit 6c4009
     still be in use but it will not be reused until the kernel marks
Packit 6c4009
     the stack as not used anymore.  */
Packit 6c4009
  stack_list_add (&stack->list, &stack_cache);
Packit 6c4009
Packit 6c4009
  stack_cache_actsize += stack->stackblock_size;
Packit 6c4009
  if (__glibc_unlikely (stack_cache_actsize > stack_cache_maxsize))
Packit 6c4009
    free_stacks (stack_cache_maxsize);
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
Packit 6c4009
static int
Packit 6c4009
change_stack_perm (struct pthread *pd
Packit 6c4009
#ifdef NEED_SEPARATE_REGISTER_STACK
Packit 6c4009
		   , size_t pagemask
Packit 6c4009
#endif
Packit 6c4009
		   )
Packit 6c4009
{
Packit 6c4009
#ifdef NEED_SEPARATE_REGISTER_STACK
Packit 6c4009
  void *stack = (pd->stackblock
Packit 6c4009
		 + (((((pd->stackblock_size - pd->guardsize) / 2)
Packit 6c4009
		      & pagemask) + pd->guardsize) & pagemask));
Packit 6c4009
  size_t len = pd->stackblock + pd->stackblock_size - stack;
Packit 6c4009
#elif _STACK_GROWS_DOWN
Packit 6c4009
  void *stack = pd->stackblock + pd->guardsize;
Packit 6c4009
  size_t len = pd->stackblock_size - pd->guardsize;
Packit 6c4009
#elif _STACK_GROWS_UP
Packit 6c4009
  void *stack = pd->stackblock;
Packit 6c4009
  size_t len = (uintptr_t) pd - pd->guardsize - (uintptr_t) pd->stackblock;
Packit 6c4009
#else
Packit 6c4009
# error "Define either _STACK_GROWS_DOWN or _STACK_GROWS_UP"
Packit 6c4009
#endif
Packit 6c4009
  if (__mprotect (stack, len, PROT_READ | PROT_WRITE | PROT_EXEC) != 0)
Packit 6c4009
    return errno;
Packit 6c4009
Packit 6c4009
  return 0;
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
/* Return the guard page position on allocated stack.  */
Packit 6c4009
static inline char *
Packit 6c4009
__attribute ((always_inline))
Packit 6c4009
guard_position (void *mem, size_t size, size_t guardsize, struct pthread *pd,
Packit 6c4009
		size_t pagesize_m1)
Packit 6c4009
{
Packit 6c4009
#ifdef NEED_SEPARATE_REGISTER_STACK
Packit 6c4009
  return mem + (((size - guardsize) / 2) & ~pagesize_m1);
Packit 6c4009
#elif _STACK_GROWS_DOWN
Packit 6c4009
  return mem;
Packit 6c4009
#elif _STACK_GROWS_UP
Packit 6c4009
  return (char *) (((uintptr_t) pd - guardsize) & ~pagesize_m1);
Packit 6c4009
#endif
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
/* Based on stack allocated with PROT_NONE, setup the required portions with
Packit 6c4009
   'prot' flags based on the guard page position.  */
Packit 6c4009
static inline int
Packit 6c4009
setup_stack_prot (char *mem, size_t size, char *guard, size_t guardsize,
Packit 6c4009
		  const int prot)
Packit 6c4009
{
Packit 6c4009
  char *guardend = guard + guardsize;
Packit 6c4009
#if _STACK_GROWS_DOWN && !defined(NEED_SEPARATE_REGISTER_STACK)
Packit 6c4009
  /* As defined at guard_position, for architectures with downward stack
Packit 6c4009
     the guard page is always at start of the allocated area.  */
Packit 6c4009
  if (__mprotect (guardend, size - guardsize, prot) != 0)
Packit 6c4009
    return errno;
Packit 6c4009
#else
Packit 6c4009
  size_t mprots1 = (uintptr_t) guard - (uintptr_t) mem;
Packit 6c4009
  if (__mprotect (mem, mprots1, prot) != 0)
Packit 6c4009
    return errno;
Packit 6c4009
  size_t mprots2 = ((uintptr_t) mem + size) - (uintptr_t) guardend;
Packit 6c4009
  if (__mprotect (guardend, mprots2, prot) != 0)
Packit 6c4009
    return errno;
Packit 6c4009
#endif
Packit 6c4009
  return 0;
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
/* Mark the memory of the stack as usable to the kernel.  It frees everything
Packit 6c4009
   except for the space used for the TCB itself.  */
Packit 6c4009
static inline void
Packit 6c4009
__always_inline
Packit 6c4009
advise_stack_range (void *mem, size_t size, uintptr_t pd, size_t guardsize)
Packit 6c4009
{
Packit 6c4009
  uintptr_t sp = (uintptr_t) CURRENT_STACK_FRAME;
Packit 6c4009
  size_t pagesize_m1 = __getpagesize () - 1;
Packit 6c4009
#if _STACK_GROWS_DOWN && !defined(NEED_SEPARATE_REGISTER_STACK)
Packit 6c4009
  size_t freesize = (sp - (uintptr_t) mem) & ~pagesize_m1;
Packit 6c4009
  assert (freesize < size);
Packit 6c4009
  if (freesize > PTHREAD_STACK_MIN)
Packit 6c4009
    __madvise (mem, freesize - PTHREAD_STACK_MIN, MADV_DONTNEED);
Packit 6c4009
#else
Packit 6c4009
  /* Page aligned start of memory to free (higher than or equal
Packit 6c4009
     to current sp plus the minimum stack size).  */
Packit 6c4009
  uintptr_t freeblock = (sp + PTHREAD_STACK_MIN + pagesize_m1) & ~pagesize_m1;
Packit 6c4009
  uintptr_t free_end = (pd - guardsize) & ~pagesize_m1;
Packit 6c4009
  if (free_end > freeblock)
Packit 6c4009
    {
Packit 6c4009
      size_t freesize = free_end - freeblock;
Packit 6c4009
      assert (freesize < size);
Packit 6c4009
      __madvise ((void*) freeblock, freesize, MADV_DONTNEED);
Packit 6c4009
    }
Packit 6c4009
#endif
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
/* Returns a usable stack for a new thread either by allocating a
Packit 6c4009
   new stack or reusing a cached stack of sufficient size.
Packit 6c4009
   ATTR must be non-NULL and point to a valid pthread_attr.
Packit 6c4009
   PDP must be non-NULL.  */
Packit 6c4009
static int
Packit 6c4009
allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
Packit 6c4009
		ALLOCATE_STACK_PARMS)
Packit 6c4009
{
Packit 6c4009
  struct pthread *pd;
Packit 6c4009
  size_t size;
Packit 6c4009
  size_t pagesize_m1 = __getpagesize () - 1;
Packit 6c4009
Packit 6c4009
  assert (powerof2 (pagesize_m1 + 1));
Packit 6c4009
  assert (TCB_ALIGNMENT >= STACK_ALIGN);
Packit 6c4009
Packit 6c4009
  /* Get the stack size from the attribute if it is set.  Otherwise we
Packit 6c4009
     use the default we determined at start time.  */
Packit 6c4009
  if (attr->stacksize != 0)
Packit 6c4009
    size = attr->stacksize;
Packit 6c4009
  else
Packit 6c4009
    {
Packit 6c4009
      lll_lock (__default_pthread_attr_lock, LLL_PRIVATE);
Packit 6c4009
      size = __default_pthread_attr.stacksize;
Packit 6c4009
      lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);
Packit 6c4009
    }
Packit 6c4009
Packit 6c4009
  /* Get memory for the stack.  */
Packit 6c4009
  if (__glibc_unlikely (attr->flags & ATTR_FLAG_STACKADDR))
Packit 6c4009
    {
Packit 6c4009
      uintptr_t adj;
Packit 6c4009
      char *stackaddr = (char *) attr->stackaddr;
Packit 6c4009
Packit 6c4009
      /* Assume the same layout as the _STACK_GROWS_DOWN case, with struct
Packit 6c4009
	 pthread at the top of the stack block.  Later we adjust the guard
Packit 6c4009
	 location and stack address to match the _STACK_GROWS_UP case.  */
Packit 6c4009
      if (_STACK_GROWS_UP)
Packit 6c4009
	stackaddr += attr->stacksize;
Packit 6c4009
Packit 6c4009
      /* If the user also specified the size of the stack make sure it
Packit 6c4009
	 is large enough.  */
Packit 6c4009
      if (attr->stacksize != 0
Packit 6c4009
	  && attr->stacksize < (__static_tls_size + MINIMAL_REST_STACK))
Packit 6c4009
	return EINVAL;
Packit 6c4009
Packit 6c4009
      /* Adjust stack size for alignment of the TLS block.  */
Packit 6c4009
#if TLS_TCB_AT_TP
Packit 6c4009
      adj = ((uintptr_t) stackaddr - TLS_TCB_SIZE)
Packit 6c4009
	    & __static_tls_align_m1;
Packit 6c4009
      assert (size > adj + TLS_TCB_SIZE);
Packit 6c4009
#elif TLS_DTV_AT_TP
Packit 6c4009
      adj = ((uintptr_t) stackaddr - __static_tls_size)
Packit 6c4009
	    & __static_tls_align_m1;
Packit 6c4009
      assert (size > adj);
Packit 6c4009
#endif
Packit 6c4009
Packit 6c4009
      /* The user provided some memory.  Let's hope it matches the
Packit 6c4009
	 size...  We do not allocate guard pages if the user provided
Packit 6c4009
	 the stack.  It is the user's responsibility to do this if it
Packit 6c4009
	 is wanted.  */
Packit 6c4009
#if TLS_TCB_AT_TP
Packit 6c4009
      pd = (struct pthread *) ((uintptr_t) stackaddr
Packit 6c4009
			       - TLS_TCB_SIZE - adj);
Packit 6c4009
#elif TLS_DTV_AT_TP
Packit 6c4009
      pd = (struct pthread *) (((uintptr_t) stackaddr
Packit 6c4009
				- __static_tls_size - adj)
Packit 6c4009
			       - TLS_PRE_TCB_SIZE);
Packit 6c4009
#endif
Packit 6c4009
Packit 6c4009
      /* The user provided stack memory needs to be cleared.  */
Packit 6c4009
      memset (pd, '\0', sizeof (struct pthread));
Packit 6c4009
Packit 6c4009
      /* The first TSD block is included in the TCB.  */
Packit 6c4009
      pd->specific[0] = pd->specific_1stblock;
Packit 6c4009
Packit 6c4009
      /* Remember the stack-related values.  */
Packit 6c4009
      pd->stackblock = (char *) stackaddr - size;
Packit 6c4009
      pd->stackblock_size = size;
Packit 6c4009
Packit 6c4009
      /* This is a user-provided stack.  It will not be queued in the
Packit 6c4009
	 stack cache nor will the memory (except the TLS memory) be freed.  */
Packit 6c4009
      pd->user_stack = true;
Packit 6c4009
Packit 6c4009
      /* This is at least the second thread.  */
Packit 6c4009
      pd->header.multiple_threads = 1;
Packit 6c4009
#ifndef TLS_MULTIPLE_THREADS_IN_TCB
Packit 6c4009
      __pthread_multiple_threads = *__libc_multiple_threads_ptr = 1;
Packit 6c4009
#endif
Packit 6c4009
Packit 6c4009
#ifdef NEED_DL_SYSINFO
Packit 6c4009
      SETUP_THREAD_SYSINFO (pd);
Packit 6c4009
#endif
Packit 6c4009
Packit 6c4009
      /* Don't allow setxid until cloned.  */
Packit 6c4009
      pd->setxid_futex = -1;
Packit 6c4009
Packit 6c4009
      /* Allocate the DTV for this thread.  */
Packit 6c4009
      if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL)
Packit 6c4009
	{
Packit 6c4009
	  /* Something went wrong.  */
Packit 6c4009
	  assert (errno == ENOMEM);
Packit 6c4009
	  return errno;
Packit 6c4009
	}
Packit 6c4009
Packit 6c4009
Packit 6c4009
      /* Prepare to modify global data.  */
Packit 6c4009
      lll_lock (stack_cache_lock, LLL_PRIVATE);
Packit 6c4009
Packit 6c4009
      /* And add to the list of stacks in use.  */
Packit 6c4009
      list_add (&pd->list, &__stack_user);
Packit 6c4009
Packit 6c4009
      lll_unlock (stack_cache_lock, LLL_PRIVATE);
Packit 6c4009
    }
Packit 6c4009
  else
Packit 6c4009
    {
Packit 6c4009
      /* Allocate some anonymous memory.  If possible use the cache.  */
Packit 6c4009
      size_t guardsize;
Packit 6c4009
      size_t reqsize;
Packit 6c4009
      void *mem;
Packit 6c4009
      const int prot = (PROT_READ | PROT_WRITE
Packit 6c4009
			| ((GL(dl_stack_flags) & PF_X) ? PROT_EXEC : 0));
Packit 6c4009
Packit 6c4009
      /* Adjust the stack size for alignment.  */
Packit 6c4009
      size &= ~__static_tls_align_m1;
Packit 6c4009
      assert (size != 0);
Packit 6c4009
Packit 6c4009
      /* Make sure the size of the stack is enough for the guard and
Packit 6c4009
	 eventually the thread descriptor.  */
Packit 6c4009
      guardsize = (attr->guardsize + pagesize_m1) & ~pagesize_m1;
Packit 6c4009
      if (guardsize < attr->guardsize || size + guardsize < guardsize)
Packit 6c4009
	/* Arithmetic overflow.  */
Packit 6c4009
	return EINVAL;
Packit 6c4009
      size += guardsize;
Packit 6c4009
      if (__builtin_expect (size < ((guardsize + __static_tls_size
Packit 6c4009
				     + MINIMAL_REST_STACK + pagesize_m1)
Packit 6c4009
				    & ~pagesize_m1),
Packit 6c4009
			    0))
Packit 6c4009
	/* The stack is too small (or the guard too large).  */
Packit 6c4009
	return EINVAL;
Packit 6c4009
Packit 6c4009
      /* Try to get a stack from the cache.  */
Packit 6c4009
      reqsize = size;
Packit 6c4009
      pd = get_cached_stack (&size, &mem;;
Packit 6c4009
      if (pd == NULL)
Packit 6c4009
	{
Packit 6c4009
	  /* To avoid aliasing effects on a larger scale than pages we
Packit 6c4009
	     adjust the allocated stack size if necessary.  This way
Packit 6c4009
	     allocations directly following each other will not have
Packit 6c4009
	     aliasing problems.  */
Packit 6c4009
#if MULTI_PAGE_ALIASING != 0
Packit 6c4009
	  if ((size % MULTI_PAGE_ALIASING) == 0)
Packit 6c4009
	    size += pagesize_m1 + 1;
Packit 6c4009
#endif
Packit 6c4009
Packit 6c4009
	  /* If a guard page is required, avoid committing memory by first
Packit 6c4009
	     allocate with PROT_NONE and then reserve with required permission
Packit 6c4009
	     excluding the guard page.  */
Packit 6c4009
	  mem = __mmap (NULL, size, (guardsize == 0) ? prot : PROT_NONE,
Packit 6c4009
			MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
Packit 6c4009
Packit 6c4009
	  if (__glibc_unlikely (mem == MAP_FAILED))
Packit 6c4009
	    return errno;
Packit 6c4009
Packit 6c4009
	  /* SIZE is guaranteed to be greater than zero.
Packit 6c4009
	     So we can never get a null pointer back from mmap.  */
Packit 6c4009
	  assert (mem != NULL);
Packit 6c4009
Packit 6c4009
	  /* Place the thread descriptor at the end of the stack.  */
Packit 6c4009
#if TLS_TCB_AT_TP
Packit Bot 0c2104
	  pd = (struct pthread *) ((char *) mem + size) - 1;
Packit 6c4009
#elif TLS_DTV_AT_TP
Packit 6c4009
	  pd = (struct pthread *) ((((uintptr_t) mem + size
Packit 6c4009
				    - __static_tls_size)
Packit 6c4009
				    & ~__static_tls_align_m1)
Packit 6c4009
				   - TLS_PRE_TCB_SIZE);
Packit 6c4009
#endif
Packit 6c4009
Packit 6c4009
	  /* Now mprotect the required region excluding the guard area.  */
Packit 6c4009
	  if (__glibc_likely (guardsize > 0))
Packit 6c4009
	    {
Packit 6c4009
	      char *guard = guard_position (mem, size, guardsize, pd,
Packit 6c4009
					    pagesize_m1);
Packit 6c4009
	      if (setup_stack_prot (mem, size, guard, guardsize, prot) != 0)
Packit 6c4009
		{
Packit 6c4009
		  __munmap (mem, size);
Packit 6c4009
		  return errno;
Packit 6c4009
		}
Packit 6c4009
	    }
Packit 6c4009
Packit 6c4009
	  /* Remember the stack-related values.  */
Packit 6c4009
	  pd->stackblock = mem;
Packit 6c4009
	  pd->stackblock_size = size;
Packit 6c4009
	  /* Update guardsize for newly allocated guardsize to avoid
Packit 6c4009
	     an mprotect in guard resize below.  */
Packit 6c4009
	  pd->guardsize = guardsize;
Packit 6c4009
Packit 6c4009
	  /* We allocated the first block thread-specific data array.
Packit 6c4009
	     This address will not change for the lifetime of this
Packit 6c4009
	     descriptor.  */
Packit 6c4009
	  pd->specific[0] = pd->specific_1stblock;
Packit 6c4009
Packit 6c4009
	  /* This is at least the second thread.  */
Packit 6c4009
	  pd->header.multiple_threads = 1;
Packit 6c4009
#ifndef TLS_MULTIPLE_THREADS_IN_TCB
Packit 6c4009
	  __pthread_multiple_threads = *__libc_multiple_threads_ptr = 1;
Packit 6c4009
#endif
Packit 6c4009
Packit 6c4009
#ifdef NEED_DL_SYSINFO
Packit 6c4009
	  SETUP_THREAD_SYSINFO (pd);
Packit 6c4009
#endif
Packit 6c4009
Packit 6c4009
	  /* Don't allow setxid until cloned.  */
Packit 6c4009
	  pd->setxid_futex = -1;
Packit 6c4009
Packit 6c4009
	  /* Allocate the DTV for this thread.  */
Packit 6c4009
	  if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL)
Packit 6c4009
	    {
Packit 6c4009
	      /* Something went wrong.  */
Packit 6c4009
	      assert (errno == ENOMEM);
Packit 6c4009
Packit 6c4009
	      /* Free the stack memory we just allocated.  */
Packit 6c4009
	      (void) __munmap (mem, size);
Packit 6c4009
Packit 6c4009
	      return errno;
Packit 6c4009
	    }
Packit 6c4009
Packit 6c4009
Packit 6c4009
	  /* Prepare to modify global data.  */
Packit 6c4009
	  lll_lock (stack_cache_lock, LLL_PRIVATE);
Packit 6c4009
Packit 6c4009
	  /* And add to the list of stacks in use.  */
Packit 6c4009
	  stack_list_add (&pd->list, &stack_used);
Packit 6c4009
Packit 6c4009
	  lll_unlock (stack_cache_lock, LLL_PRIVATE);
Packit 6c4009
Packit 6c4009
Packit 6c4009
	  /* There might have been a race.  Another thread might have
Packit 6c4009
	     caused the stacks to get exec permission while this new
Packit 6c4009
	     stack was prepared.  Detect if this was possible and
Packit 6c4009
	     change the permission if necessary.  */
Packit 6c4009
	  if (__builtin_expect ((GL(dl_stack_flags) & PF_X) != 0
Packit 6c4009
				&& (prot & PROT_EXEC) == 0, 0))
Packit 6c4009
	    {
Packit 6c4009
	      int err = change_stack_perm (pd
Packit 6c4009
#ifdef NEED_SEPARATE_REGISTER_STACK
Packit 6c4009
					   , ~pagesize_m1
Packit 6c4009
#endif
Packit 6c4009
					   );
Packit 6c4009
	      if (err != 0)
Packit 6c4009
		{
Packit 6c4009
		  /* Free the stack memory we just allocated.  */
Packit 6c4009
		  (void) __munmap (mem, size);
Packit 6c4009
Packit 6c4009
		  return err;
Packit 6c4009
		}
Packit 6c4009
	    }
Packit 6c4009
Packit 6c4009
Packit 6c4009
	  /* Note that all of the stack and the thread descriptor is
Packit 6c4009
	     zeroed.  This means we do not have to initialize fields
Packit 6c4009
	     with initial value zero.  This is specifically true for
Packit 6c4009
	     the 'tid' field which is always set back to zero once the
Packit 6c4009
	     stack is not used anymore and for the 'guardsize' field
Packit 6c4009
	     which will be read next.  */
Packit 6c4009
	}
Packit 6c4009
Packit 6c4009
      /* Create or resize the guard area if necessary.  */
Packit 6c4009
      if (__glibc_unlikely (guardsize > pd->guardsize))
Packit 6c4009
	{
Packit 6c4009
	  char *guard = guard_position (mem, size, guardsize, pd,
Packit 6c4009
					pagesize_m1);
Packit 6c4009
	  if (__mprotect (guard, guardsize, PROT_NONE) != 0)
Packit 6c4009
	    {
Packit 6c4009
	    mprot_error:
Packit 6c4009
	      lll_lock (stack_cache_lock, LLL_PRIVATE);
Packit 6c4009
Packit 6c4009
	      /* Remove the thread from the list.  */
Packit 6c4009
	      stack_list_del (&pd->list);
Packit 6c4009
Packit 6c4009
	      lll_unlock (stack_cache_lock, LLL_PRIVATE);
Packit 6c4009
Packit 6c4009
	      /* Get rid of the TLS block we allocated.  */
Packit 6c4009
	      _dl_deallocate_tls (TLS_TPADJ (pd), false);
Packit 6c4009
Packit 6c4009
	      /* Free the stack memory regardless of whether the size
Packit 6c4009
		 of the cache is over the limit or not.  If this piece
Packit 6c4009
		 of memory caused problems we better do not use it
Packit 6c4009
		 anymore.  Uh, and we ignore possible errors.  There
Packit 6c4009
		 is nothing we could do.  */
Packit 6c4009
	      (void) __munmap (mem, size);
Packit 6c4009
Packit 6c4009
	      return errno;
Packit 6c4009
	    }
Packit 6c4009
Packit 6c4009
	  pd->guardsize = guardsize;
Packit 6c4009
	}
Packit 6c4009
      else if (__builtin_expect (pd->guardsize - guardsize > size - reqsize,
Packit 6c4009
				 0))
Packit 6c4009
	{
Packit 6c4009
	  /* The old guard area is too large.  */
Packit 6c4009
Packit 6c4009
#ifdef NEED_SEPARATE_REGISTER_STACK
Packit 6c4009
	  char *guard = mem + (((size - guardsize) / 2) & ~pagesize_m1);
Packit 6c4009
	  char *oldguard = mem + (((size - pd->guardsize) / 2) & ~pagesize_m1);
Packit 6c4009
Packit 6c4009
	  if (oldguard < guard
Packit 6c4009
	      && __mprotect (oldguard, guard - oldguard, prot) != 0)
Packit 6c4009
	    goto mprot_error;
Packit 6c4009
Packit 6c4009
	  if (__mprotect (guard + guardsize,
Packit 6c4009
			oldguard + pd->guardsize - guard - guardsize,
Packit 6c4009
			prot) != 0)
Packit 6c4009
	    goto mprot_error;
Packit 6c4009
#elif _STACK_GROWS_DOWN
Packit 6c4009
	  if (__mprotect ((char *) mem + guardsize, pd->guardsize - guardsize,
Packit 6c4009
			prot) != 0)
Packit 6c4009
	    goto mprot_error;
Packit 6c4009
#elif _STACK_GROWS_UP
Packit 6c4009
         char *new_guard = (char *)(((uintptr_t) pd - guardsize)
Packit 6c4009
                                    & ~pagesize_m1);
Packit 6c4009
         char *old_guard = (char *)(((uintptr_t) pd - pd->guardsize)
Packit 6c4009
                                    & ~pagesize_m1);
Packit 6c4009
         /* The guard size difference might be > 0, but once rounded
Packit 6c4009
            to the nearest page the size difference might be zero.  */
Packit 6c4009
         if (new_guard > old_guard
Packit 6c4009
             && __mprotect (old_guard, new_guard - old_guard, prot) != 0)
Packit 6c4009
	    goto mprot_error;
Packit 6c4009
#endif
Packit 6c4009
Packit 6c4009
	  pd->guardsize = guardsize;
Packit 6c4009
	}
Packit 6c4009
      /* The pthread_getattr_np() calls need to get passed the size
Packit 6c4009
	 requested in the attribute, regardless of how large the
Packit 6c4009
	 actually used guardsize is.  */
Packit 6c4009
      pd->reported_guardsize = guardsize;
Packit 6c4009
    }
Packit 6c4009
Packit 6c4009
  /* Initialize the lock.  We have to do this unconditionally since the
Packit 6c4009
     stillborn thread could be canceled while the lock is taken.  */
Packit 6c4009
  pd->lock = LLL_LOCK_INITIALIZER;
Packit 6c4009
Packit 6c4009
  /* The robust mutex lists also need to be initialized
Packit 6c4009
     unconditionally because the cleanup for the previous stack owner
Packit 6c4009
     might have happened in the kernel.  */
Packit 6c4009
  pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
Packit 6c4009
				  - offsetof (pthread_mutex_t,
Packit 6c4009
					      __data.__list.__next));
Packit 6c4009
  pd->robust_head.list_op_pending = NULL;
Packit 6c4009
#if __PTHREAD_MUTEX_HAVE_PREV
Packit 6c4009
  pd->robust_prev = &pd->robust_head;
Packit 6c4009
#endif
Packit 6c4009
  pd->robust_head.list = &pd->robust_head;
Packit 6c4009
Packit 6c4009
  /* We place the thread descriptor at the end of the stack.  */
Packit 6c4009
  *pdp = pd;
Packit 6c4009
Packit 6c4009
#if _STACK_GROWS_DOWN
Packit 6c4009
  void *stacktop;
Packit 6c4009
Packit 6c4009
# if TLS_TCB_AT_TP
Packit 6c4009
  /* The stack begins before the TCB and the static TLS block.  */
Packit 6c4009
  stacktop = ((char *) (pd + 1) - __static_tls_size);
Packit 6c4009
# elif TLS_DTV_AT_TP
Packit 6c4009
  stacktop = (char *) (pd - 1);
Packit 6c4009
# endif
Packit 6c4009
Packit 6c4009
# ifdef NEED_SEPARATE_REGISTER_STACK
Packit 6c4009
  *stack = pd->stackblock;
Packit 6c4009
  *stacksize = stacktop - *stack;
Packit 6c4009
# else
Packit 6c4009
  *stack = stacktop;
Packit 6c4009
# endif
Packit 6c4009
#else
Packit 6c4009
  *stack = pd->stackblock;
Packit 6c4009
#endif
Packit 6c4009
Packit 6c4009
  return 0;
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
Packit 6c4009
void
Packit 6c4009
__deallocate_stack (struct pthread *pd)
Packit 6c4009
{
Packit 6c4009
  lll_lock (stack_cache_lock, LLL_PRIVATE);
Packit 6c4009
Packit 6c4009
  /* Remove the thread from the list of threads with user defined
Packit 6c4009
     stacks.  */
Packit 6c4009
  stack_list_del (&pd->list);
Packit 6c4009
Packit 6c4009
  /* Not much to do.  Just free the mmap()ed memory.  Note that we do
Packit 6c4009
     not reset the 'used' flag in the 'tid' field.  This is done by
Packit 6c4009
     the kernel.  If no thread has been created yet this field is
Packit 6c4009
     still zero.  */
Packit 6c4009
  if (__glibc_likely (! pd->user_stack))
Packit 6c4009
    (void) queue_stack (pd);
Packit 6c4009
  else
Packit 6c4009
    /* Free the memory associated with the ELF TLS.  */
Packit 6c4009
    _dl_deallocate_tls (TLS_TPADJ (pd), false);
Packit 6c4009
Packit 6c4009
  lll_unlock (stack_cache_lock, LLL_PRIVATE);
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
Packit 6c4009
int
Packit 6c4009
__make_stacks_executable (void **stack_endp)
Packit 6c4009
{
Packit 6c4009
  /* First the main thread's stack.  */
Packit 6c4009
  int err = _dl_make_stack_executable (stack_endp);
Packit 6c4009
  if (err != 0)
Packit 6c4009
    return err;
Packit 6c4009
Packit 6c4009
#ifdef NEED_SEPARATE_REGISTER_STACK
Packit 6c4009
  const size_t pagemask = ~(__getpagesize () - 1);
Packit 6c4009
#endif
Packit 6c4009
Packit 6c4009
  lll_lock (stack_cache_lock, LLL_PRIVATE);
Packit 6c4009
Packit 6c4009
  list_t *runp;
Packit 6c4009
  list_for_each (runp, &stack_used)
Packit 6c4009
    {
Packit 6c4009
      err = change_stack_perm (list_entry (runp, struct pthread, list)
Packit 6c4009
#ifdef NEED_SEPARATE_REGISTER_STACK
Packit 6c4009
			       , pagemask
Packit 6c4009
#endif
Packit 6c4009
			       );
Packit 6c4009
      if (err != 0)
Packit 6c4009
	break;
Packit 6c4009
    }
Packit 6c4009
Packit 6c4009
  /* Also change the permission for the currently unused stacks.  This
Packit 6c4009
     might be wasted time but better spend it here than adding a check
Packit 6c4009
     in the fast path.  */
Packit 6c4009
  if (err == 0)
Packit 6c4009
    list_for_each (runp, &stack_cache)
Packit 6c4009
      {
Packit 6c4009
	err = change_stack_perm (list_entry (runp, struct pthread, list)
Packit 6c4009
#ifdef NEED_SEPARATE_REGISTER_STACK
Packit 6c4009
				 , pagemask
Packit 6c4009
#endif
Packit 6c4009
				 );
Packit 6c4009
	if (err != 0)
Packit 6c4009
	  break;
Packit 6c4009
      }
Packit 6c4009
Packit 6c4009
  lll_unlock (stack_cache_lock, LLL_PRIVATE);
Packit 6c4009
Packit 6c4009
  return err;
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
Packit 6c4009
/* In case of a fork() call the memory allocation in the child will be
Packit 6c4009
   the same but only one thread is running.  All stacks except that of
Packit 6c4009
   the one running thread are not used anymore.  We have to recycle
Packit 6c4009
   them.  */
Packit 6c4009
void
Packit 6c4009
__reclaim_stacks (void)
Packit 6c4009
{
Packit 6c4009
  struct pthread *self = (struct pthread *) THREAD_SELF;
Packit 6c4009
Packit 6c4009
  /* No locking necessary.  The caller is the only stack in use.  But
Packit 6c4009
     we have to be aware that we might have interrupted a list
Packit 6c4009
     operation.  */
Packit 6c4009
Packit 6c4009
  if (in_flight_stack != 0)
Packit 6c4009
    {
Packit 6c4009
      bool add_p = in_flight_stack & 1;
Packit 6c4009
      list_t *elem = (list_t *) (in_flight_stack & ~(uintptr_t) 1);
Packit 6c4009
Packit 6c4009
      if (add_p)
Packit 6c4009
	{
Packit 6c4009
	  /* We always add at the beginning of the list.  So in this case we
Packit 6c4009
	     only need to check the beginning of these lists to see if the
Packit 6c4009
	     pointers at the head of the list are inconsistent.  */
Packit 6c4009
	  list_t *l = NULL;
Packit 6c4009
Packit 6c4009
	  if (stack_used.next->prev != &stack_used)
Packit 6c4009
	    l = &stack_used;
Packit 6c4009
	  else if (stack_cache.next->prev != &stack_cache)
Packit 6c4009
	    l = &stack_cache;
Packit 6c4009
Packit 6c4009
	  if (l != NULL)
Packit 6c4009
	    {
Packit 6c4009
	      assert (l->next->prev == elem);
Packit 6c4009
	      elem->next = l->next;
Packit 6c4009
	      elem->prev = l;
Packit 6c4009
	      l->next = elem;
Packit 6c4009
	    }
Packit 6c4009
	}
Packit 6c4009
      else
Packit 6c4009
	{
Packit 6c4009
	  /* We can simply always replay the delete operation.  */
Packit 6c4009
	  elem->next->prev = elem->prev;
Packit 6c4009
	  elem->prev->next = elem->next;
Packit 6c4009
	}
Packit 6c4009
    }
Packit 6c4009
Packit 6c4009
  /* Mark all stacks except the still running one as free.  */
Packit 6c4009
  list_t *runp;
Packit 6c4009
  list_for_each (runp, &stack_used)
Packit 6c4009
    {
Packit 6c4009
      struct pthread *curp = list_entry (runp, struct pthread, list);
Packit 6c4009
      if (curp != self)
Packit 6c4009
	{
Packit 6c4009
	  /* This marks the stack as free.  */
Packit 6c4009
	  curp->tid = 0;
Packit 6c4009
Packit 6c4009
	  /* Account for the size of the stack.  */
Packit 6c4009
	  stack_cache_actsize += curp->stackblock_size;
Packit 6c4009
Packit 6c4009
	  if (curp->specific_used)
Packit 6c4009
	    {
Packit 6c4009
	      /* Clear the thread-specific data.  */
Packit 6c4009
	      memset (curp->specific_1stblock, '\0',
Packit 6c4009
		      sizeof (curp->specific_1stblock));
Packit 6c4009
Packit 6c4009
	      curp->specific_used = false;
Packit 6c4009
Packit 6c4009
	      for (size_t cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
Packit 6c4009
		if (curp->specific[cnt] != NULL)
Packit 6c4009
		  {
Packit 6c4009
		    memset (curp->specific[cnt], '\0',
Packit 6c4009
			    sizeof (curp->specific_1stblock));
Packit 6c4009
Packit 6c4009
		    /* We have allocated the block which we do not
Packit 6c4009
		       free here so re-set the bit.  */
Packit 6c4009
		    curp->specific_used = true;
Packit 6c4009
		  }
Packit 6c4009
	    }
Packit 6c4009
	}
Packit 6c4009
    }
Packit 6c4009
Packit 6c4009
  /* Add the stack of all running threads to the cache.  */
Packit 6c4009
  list_splice (&stack_used, &stack_cache);
Packit 6c4009
Packit 6c4009
  /* Remove the entry for the current thread to from the cache list
Packit 6c4009
     and add it to the list of running threads.  Which of the two
Packit 6c4009
     lists is decided by the user_stack flag.  */
Packit 6c4009
  stack_list_del (&self->list);
Packit 6c4009
Packit 6c4009
  /* Re-initialize the lists for all the threads.  */
Packit 6c4009
  INIT_LIST_HEAD (&stack_used);
Packit 6c4009
  INIT_LIST_HEAD (&__stack_user);
Packit 6c4009
Packit 6c4009
  if (__glibc_unlikely (THREAD_GETMEM (self, user_stack)))
Packit 6c4009
    list_add (&self->list, &__stack_user);
Packit 6c4009
  else
Packit 6c4009
    list_add (&self->list, &stack_used);
Packit 6c4009
Packit 6c4009
  /* There is one thread running.  */
Packit 6c4009
  __nptl_nthreads = 1;
Packit 6c4009
Packit 6c4009
  in_flight_stack = 0;
Packit 6c4009
Packit 6c4009
  /* Initialize locks.  */
Packit 6c4009
  stack_cache_lock = LLL_LOCK_INITIALIZER;
Packit 6c4009
  __default_pthread_attr_lock = LLL_LOCK_INITIALIZER;
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
Packit Bot 0c2104
#if HP_TIMING_AVAIL
Packit Bot 0c2104
# undef __find_thread_by_id
Packit Bot 0c2104
/* Find a thread given the thread ID.  */
Packit Bot 0c2104
attribute_hidden
Packit Bot 0c2104
struct pthread *
Packit Bot 0c2104
__find_thread_by_id (pid_t tid)
Packit Bot 0c2104
{
Packit Bot 0c2104
  struct pthread *result = NULL;
Packit Bot 0c2104
Packit Bot 0c2104
  lll_lock (stack_cache_lock, LLL_PRIVATE);
Packit Bot 0c2104
Packit Bot 0c2104
  /* Iterate over the list with system-allocated threads first.  */
Packit Bot 0c2104
  list_t *runp;
Packit Bot 0c2104
  list_for_each (runp, &stack_used)
Packit Bot 0c2104
    {
Packit Bot 0c2104
      struct pthread *curp;
Packit Bot 0c2104
Packit Bot 0c2104
      curp = list_entry (runp, struct pthread, list);
Packit Bot 0c2104
Packit Bot 0c2104
      if (curp->tid == tid)
Packit Bot 0c2104
	{
Packit Bot 0c2104
	  result = curp;
Packit Bot 0c2104
	  goto out;
Packit Bot 0c2104
	}
Packit Bot 0c2104
    }
Packit Bot 0c2104
Packit Bot 0c2104
  /* Now the list with threads using user-allocated stacks.  */
Packit Bot 0c2104
  list_for_each (runp, &__stack_user)
Packit Bot 0c2104
    {
Packit Bot 0c2104
      struct pthread *curp;
Packit Bot 0c2104
Packit Bot 0c2104
      curp = list_entry (runp, struct pthread, list);
Packit Bot 0c2104
Packit Bot 0c2104
      if (curp->tid == tid)
Packit Bot 0c2104
	{
Packit Bot 0c2104
	  result = curp;
Packit Bot 0c2104
	  goto out;
Packit Bot 0c2104
	}
Packit Bot 0c2104
    }
Packit Bot 0c2104
Packit Bot 0c2104
 out:
Packit Bot 0c2104
  lll_unlock (stack_cache_lock, LLL_PRIVATE);
Packit Bot 0c2104
Packit Bot 0c2104
  return result;
Packit Bot 0c2104
}
Packit Bot 0c2104
#endif
Packit Bot 0c2104
Packit Bot 0c2104
Packit 6c4009
#ifdef SIGSETXID
Packit 6c4009
static void
Packit 6c4009
setxid_mark_thread (struct xid_command *cmdp, struct pthread *t)
Packit 6c4009
{
Packit 6c4009
  int ch;
Packit 6c4009
Packit 6c4009
  /* Wait until this thread is cloned.  */
Packit 6c4009
  if (t->setxid_futex == -1
Packit 6c4009
      && ! atomic_compare_and_exchange_bool_acq (&t->setxid_futex, -2, -1))
Packit 6c4009
    do
Packit 6c4009
      futex_wait_simple (&t->setxid_futex, -2, FUTEX_PRIVATE);
Packit 6c4009
    while (t->setxid_futex == -2);
Packit 6c4009
Packit 6c4009
  /* Don't let the thread exit before the setxid handler runs.  */
Packit 6c4009
  t->setxid_futex = 0;
Packit 6c4009
Packit 6c4009
  do
Packit 6c4009
    {
Packit 6c4009
      ch = t->cancelhandling;
Packit 6c4009
Packit 6c4009
      /* If the thread is exiting right now, ignore it.  */
Packit 6c4009
      if ((ch & EXITING_BITMASK) != 0)
Packit 6c4009
	{
Packit 6c4009
	  /* Release the futex if there is no other setxid in
Packit 6c4009
	     progress.  */
Packit 6c4009
	  if ((ch & SETXID_BITMASK) == 0)
Packit 6c4009
	    {
Packit 6c4009
	      t->setxid_futex = 1;
Packit 6c4009
	      futex_wake (&t->setxid_futex, 1, FUTEX_PRIVATE);
Packit 6c4009
	    }
Packit 6c4009
	  return;
Packit 6c4009
	}
Packit 6c4009
    }
Packit 6c4009
  while (atomic_compare_and_exchange_bool_acq (&t->cancelhandling,
Packit 6c4009
					       ch | SETXID_BITMASK, ch));
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
Packit 6c4009
static void
Packit 6c4009
setxid_unmark_thread (struct xid_command *cmdp, struct pthread *t)
Packit 6c4009
{
Packit 6c4009
  int ch;
Packit 6c4009
Packit 6c4009
  do
Packit 6c4009
    {
Packit 6c4009
      ch = t->cancelhandling;
Packit 6c4009
      if ((ch & SETXID_BITMASK) == 0)
Packit 6c4009
	return;
Packit 6c4009
    }
Packit 6c4009
  while (atomic_compare_and_exchange_bool_acq (&t->cancelhandling,
Packit 6c4009
					       ch & ~SETXID_BITMASK, ch));
Packit 6c4009
Packit 6c4009
  /* Release the futex just in case.  */
Packit 6c4009
  t->setxid_futex = 1;
Packit 6c4009
  futex_wake (&t->setxid_futex, 1, FUTEX_PRIVATE);
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
Packit 6c4009
static int
Packit 6c4009
setxid_signal_thread (struct xid_command *cmdp, struct pthread *t)
Packit 6c4009
{
Packit 6c4009
  if ((t->cancelhandling & SETXID_BITMASK) == 0)
Packit 6c4009
    return 0;
Packit 6c4009
Packit 6c4009
  int val;
Packit 6c4009
  pid_t pid = __getpid ();
Packit 6c4009
  INTERNAL_SYSCALL_DECL (err);
Packit 6c4009
  val = INTERNAL_SYSCALL_CALL (tgkill, err, pid, t->tid, SIGSETXID);
Packit 6c4009
Packit 6c4009
  /* If this failed, it must have had not started yet or else exited.  */
Packit 6c4009
  if (!INTERNAL_SYSCALL_ERROR_P (val, err))
Packit 6c4009
    {
Packit 6c4009
      atomic_increment (&cmdp->cntr);
Packit 6c4009
      return 1;
Packit 6c4009
    }
Packit 6c4009
  else
Packit 6c4009
    return 0;
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
/* Check for consistency across set*id system call results.  The abort
Packit 6c4009
   should not happen as long as all privileges changes happen through
Packit 6c4009
   the glibc wrappers.  ERROR must be 0 (no error) or an errno
Packit 6c4009
   code.  */
Packit 6c4009
void
Packit 6c4009
attribute_hidden
Packit 6c4009
__nptl_setxid_error (struct xid_command *cmdp, int error)
Packit 6c4009
{
Packit 6c4009
  do
Packit 6c4009
    {
Packit 6c4009
      int olderror = cmdp->error;
Packit 6c4009
      if (olderror == error)
Packit 6c4009
	break;
Packit 6c4009
      if (olderror != -1)
Packit 6c4009
	{
Packit 6c4009
	  /* Mismatch between current and previous results.  Save the
Packit 6c4009
	     error value to memory so that is not clobbered by the
Packit 6c4009
	     abort function and preserved in coredumps.  */
Packit 6c4009
	  volatile int xid_err __attribute__((unused)) = error;
Packit 6c4009
	  abort ();
Packit 6c4009
	}
Packit 6c4009
    }
Packit 6c4009
  while (atomic_compare_and_exchange_bool_acq (&cmdp->error, error, -1));
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
int
Packit 6c4009
attribute_hidden
Packit 6c4009
__nptl_setxid (struct xid_command *cmdp)
Packit 6c4009
{
Packit 6c4009
  int signalled;
Packit 6c4009
  int result;
Packit 6c4009
  lll_lock (stack_cache_lock, LLL_PRIVATE);
Packit 6c4009
Packit 6c4009
  __xidcmd = cmdp;
Packit 6c4009
  cmdp->cntr = 0;
Packit 6c4009
  cmdp->error = -1;
Packit 6c4009
Packit 6c4009
  struct pthread *self = THREAD_SELF;
Packit 6c4009
Packit 6c4009
  /* Iterate over the list with system-allocated threads first.  */
Packit 6c4009
  list_t *runp;
Packit 6c4009
  list_for_each (runp, &stack_used)
Packit 6c4009
    {
Packit 6c4009
      struct pthread *t = list_entry (runp, struct pthread, list);
Packit 6c4009
      if (t == self)
Packit 6c4009
	continue;
Packit 6c4009
Packit 6c4009
      setxid_mark_thread (cmdp, t);
Packit 6c4009
    }
Packit 6c4009
Packit 6c4009
  /* Now the list with threads using user-allocated stacks.  */
Packit 6c4009
  list_for_each (runp, &__stack_user)
Packit 6c4009
    {
Packit 6c4009
      struct pthread *t = list_entry (runp, struct pthread, list);
Packit 6c4009
      if (t == self)
Packit 6c4009
	continue;
Packit 6c4009
Packit 6c4009
      setxid_mark_thread (cmdp, t);
Packit 6c4009
    }
Packit 6c4009
Packit 6c4009
  /* Iterate until we don't succeed in signalling anyone.  That means
Packit 6c4009
     we have gotten all running threads, and their children will be
Packit 6c4009
     automatically correct once started.  */
Packit 6c4009
  do
Packit 6c4009
    {
Packit 6c4009
      signalled = 0;
Packit 6c4009
Packit 6c4009
      list_for_each (runp, &stack_used)
Packit 6c4009
	{
Packit 6c4009
	  struct pthread *t = list_entry (runp, struct pthread, list);
Packit 6c4009
	  if (t == self)
Packit 6c4009
	    continue;
Packit 6c4009
Packit 6c4009
	  signalled += setxid_signal_thread (cmdp, t);
Packit 6c4009
	}
Packit 6c4009
Packit 6c4009
      list_for_each (runp, &__stack_user)
Packit 6c4009
	{
Packit 6c4009
	  struct pthread *t = list_entry (runp, struct pthread, list);
Packit 6c4009
	  if (t == self)
Packit 6c4009
	    continue;
Packit 6c4009
Packit 6c4009
	  signalled += setxid_signal_thread (cmdp, t);
Packit 6c4009
	}
Packit 6c4009
Packit 6c4009
      int cur = cmdp->cntr;
Packit 6c4009
      while (cur != 0)
Packit 6c4009
	{
Packit 6c4009
	  futex_wait_simple ((unsigned int *) &cmdp->cntr, cur,
Packit 6c4009
			     FUTEX_PRIVATE);
Packit 6c4009
	  cur = cmdp->cntr;
Packit 6c4009
	}
Packit 6c4009
    }
Packit 6c4009
  while (signalled != 0);
Packit 6c4009
Packit 6c4009
  /* Clean up flags, so that no thread blocks during exit waiting
Packit 6c4009
     for a signal which will never come.  */
Packit 6c4009
  list_for_each (runp, &stack_used)
Packit 6c4009
    {
Packit 6c4009
      struct pthread *t = list_entry (runp, struct pthread, list);
Packit 6c4009
      if (t == self)
Packit 6c4009
	continue;
Packit 6c4009
Packit 6c4009
      setxid_unmark_thread (cmdp, t);
Packit 6c4009
    }
Packit 6c4009
Packit 6c4009
  list_for_each (runp, &__stack_user)
Packit 6c4009
    {
Packit 6c4009
      struct pthread *t = list_entry (runp, struct pthread, list);
Packit 6c4009
      if (t == self)
Packit 6c4009
	continue;
Packit 6c4009
Packit 6c4009
      setxid_unmark_thread (cmdp, t);
Packit 6c4009
    }
Packit 6c4009
Packit 6c4009
  /* This must be last, otherwise the current thread might not have
Packit 6c4009
     permissions to send SIGSETXID syscall to the other threads.  */
Packit 6c4009
  INTERNAL_SYSCALL_DECL (err);
Packit 6c4009
  result = INTERNAL_SYSCALL_NCS (cmdp->syscall_no, err, 3,
Packit 6c4009
				 cmdp->id[0], cmdp->id[1], cmdp->id[2]);
Packit 6c4009
  int error = 0;
Packit 6c4009
  if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err)))
Packit 6c4009
    {
Packit 6c4009
      error = INTERNAL_SYSCALL_ERRNO (result, err);
Packit 6c4009
      __set_errno (error);
Packit 6c4009
      result = -1;
Packit 6c4009
    }
Packit 6c4009
  __nptl_setxid_error (cmdp, error);
Packit 6c4009
Packit 6c4009
  lll_unlock (stack_cache_lock, LLL_PRIVATE);
Packit 6c4009
  return result;
Packit 6c4009
}
Packit 6c4009
#endif  /* SIGSETXID.  */
Packit 6c4009
Packit 6c4009
Packit 6c4009
static inline void __attribute__((always_inline))
Packit 6c4009
init_one_static_tls (struct pthread *curp, struct link_map *map)
Packit 6c4009
{
Packit 6c4009
# if TLS_TCB_AT_TP
Packit 6c4009
  void *dest = (char *) curp - map->l_tls_offset;
Packit 6c4009
# elif TLS_DTV_AT_TP
Packit 6c4009
  void *dest = (char *) curp + map->l_tls_offset + TLS_PRE_TCB_SIZE;
Packit 6c4009
# else
Packit 6c4009
#  error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
Packit 6c4009
# endif
Packit 6c4009
Packit 6c4009
  /* Initialize the memory.  */
Packit 6c4009
  memset (__mempcpy (dest, map->l_tls_initimage, map->l_tls_initimage_size),
Packit 6c4009
	  '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
void
Packit 6c4009
attribute_hidden
Packit 6c4009
__pthread_init_static_tls (struct link_map *map)
Packit 6c4009
{
Packit 6c4009
  lll_lock (stack_cache_lock, LLL_PRIVATE);
Packit 6c4009
Packit 6c4009
  /* Iterate over the list with system-allocated threads first.  */
Packit 6c4009
  list_t *runp;
Packit 6c4009
  list_for_each (runp, &stack_used)
Packit 6c4009
    init_one_static_tls (list_entry (runp, struct pthread, list), map);
Packit 6c4009
Packit 6c4009
  /* Now the list with threads using user-allocated stacks.  */
Packit 6c4009
  list_for_each (runp, &__stack_user)
Packit 6c4009
    init_one_static_tls (list_entry (runp, struct pthread, list), map);
Packit 6c4009
Packit 6c4009
  lll_unlock (stack_cache_lock, LLL_PRIVATE);
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
Packit 6c4009
void
Packit 6c4009
attribute_hidden
Packit 6c4009
__wait_lookup_done (void)
Packit 6c4009
{
Packit 6c4009
  lll_lock (stack_cache_lock, LLL_PRIVATE);
Packit 6c4009
Packit 6c4009
  struct pthread *self = THREAD_SELF;
Packit 6c4009
Packit 6c4009
  /* Iterate over the list with system-allocated threads first.  */
Packit 6c4009
  list_t *runp;
Packit 6c4009
  list_for_each (runp, &stack_used)
Packit 6c4009
    {
Packit 6c4009
      struct pthread *t = list_entry (runp, struct pthread, list);
Packit 6c4009
      if (t == self || t->header.gscope_flag == THREAD_GSCOPE_FLAG_UNUSED)
Packit 6c4009
	continue;
Packit 6c4009
Packit 6c4009
      int *const gscope_flagp = &t->header.gscope_flag;
Packit 6c4009
Packit 6c4009
      /* We have to wait until this thread is done with the global
Packit 6c4009
	 scope.  First tell the thread that we are waiting and
Packit 6c4009
	 possibly have to be woken.  */
Packit 6c4009
      if (atomic_compare_and_exchange_bool_acq (gscope_flagp,
Packit 6c4009
						THREAD_GSCOPE_FLAG_WAIT,
Packit 6c4009
						THREAD_GSCOPE_FLAG_USED))
Packit 6c4009
	continue;
Packit 6c4009
Packit 6c4009
      do
Packit 6c4009
	futex_wait_simple ((unsigned int *) gscope_flagp,
Packit 6c4009
			   THREAD_GSCOPE_FLAG_WAIT, FUTEX_PRIVATE);
Packit 6c4009
      while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT);
Packit 6c4009
    }
Packit 6c4009
Packit 6c4009
  /* Now the list with threads using user-allocated stacks.  */
Packit 6c4009
  list_for_each (runp, &__stack_user)
Packit 6c4009
    {
Packit 6c4009
      struct pthread *t = list_entry (runp, struct pthread, list);
Packit 6c4009
      if (t == self || t->header.gscope_flag == THREAD_GSCOPE_FLAG_UNUSED)
Packit 6c4009
	continue;
Packit 6c4009
Packit 6c4009
      int *const gscope_flagp = &t->header.gscope_flag;
Packit 6c4009
Packit 6c4009
      /* We have to wait until this thread is done with the global
Packit 6c4009
	 scope.  First tell the thread that we are waiting and
Packit 6c4009
	 possibly have to be woken.  */
Packit 6c4009
      if (atomic_compare_and_exchange_bool_acq (gscope_flagp,
Packit 6c4009
						THREAD_GSCOPE_FLAG_WAIT,
Packit 6c4009
						THREAD_GSCOPE_FLAG_USED))
Packit 6c4009
	continue;
Packit 6c4009
Packit 6c4009
      do
Packit 6c4009
	futex_wait_simple ((unsigned int *) gscope_flagp,
Packit 6c4009
			   THREAD_GSCOPE_FLAG_WAIT, FUTEX_PRIVATE);
Packit 6c4009
      while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT);
Packit 6c4009
    }
Packit 6c4009
Packit 6c4009
  lll_unlock (stack_cache_lock, LLL_PRIVATE);
Packit 6c4009
}