Blame nptl/pthread_mutex_lock.c

Packit 6c4009
/* Copyright (C) 2002-2018 Free Software Foundation, Inc.
Packit 6c4009
   This file is part of the GNU C Library.
Packit 6c4009
   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
Packit 6c4009
Packit 6c4009
   The GNU C Library is free software; you can redistribute it and/or
Packit 6c4009
   modify it under the terms of the GNU Lesser General Public
Packit 6c4009
   License as published by the Free Software Foundation; either
Packit 6c4009
   version 2.1 of the License, or (at your option) any later version.
Packit 6c4009
Packit 6c4009
   The GNU C Library is distributed in the hope that it will be useful,
Packit 6c4009
   but WITHOUT ANY WARRANTY; without even the implied warranty of
Packit 6c4009
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
Packit 6c4009
   Lesser General Public License for more details.
Packit 6c4009
Packit 6c4009
   You should have received a copy of the GNU Lesser General Public
Packit 6c4009
   License along with the GNU C Library; if not, see
Packit 6c4009
   <http://www.gnu.org/licenses/>.  */
Packit 6c4009
Packit 6c4009
#include <assert.h>
Packit 6c4009
#include <errno.h>
Packit 6c4009
#include <stdlib.h>
Packit 6c4009
#include <unistd.h>
Packit 6c4009
#include <sys/param.h>
Packit 6c4009
#include <not-cancel.h>
Packit 6c4009
#include "pthreadP.h"
Packit 6c4009
#include <atomic.h>
Packit 6c4009
#include <lowlevellock.h>
Packit 6c4009
#include <stap-probe.h>
Packit 6c4009
Packit 6c4009
#ifndef lll_lock_elision
Packit 6c4009
#define lll_lock_elision(lock, try_lock, private)	({ \
Packit 6c4009
      lll_lock (lock, private); 0; })
Packit 6c4009
#endif
Packit 6c4009
Packit 6c4009
#ifndef lll_trylock_elision
Packit 6c4009
#define lll_trylock_elision(a,t) lll_trylock(a)
Packit 6c4009
#endif
Packit 6c4009
Packit 6c4009
/* Some of the following definitions differ when pthread_mutex_cond_lock.c
Packit 6c4009
   includes this file.  */
Packit 6c4009
#ifndef LLL_MUTEX_LOCK
Packit 6c4009
# define LLL_MUTEX_LOCK(mutex) \
Packit 6c4009
  lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
Packit 6c4009
# define LLL_MUTEX_TRYLOCK(mutex) \
Packit 6c4009
  lll_trylock ((mutex)->__data.__lock)
Packit 6c4009
# define LLL_ROBUST_MUTEX_LOCK_MODIFIER 0
Packit 6c4009
# define LLL_MUTEX_LOCK_ELISION(mutex) \
Packit 6c4009
  lll_lock_elision ((mutex)->__data.__lock, (mutex)->__data.__elision, \
Packit 6c4009
		   PTHREAD_MUTEX_PSHARED (mutex))
Packit 6c4009
# define LLL_MUTEX_TRYLOCK_ELISION(mutex) \
Packit 6c4009
  lll_trylock_elision((mutex)->__data.__lock, (mutex)->__data.__elision, \
Packit 6c4009
		   PTHREAD_MUTEX_PSHARED (mutex))
Packit 6c4009
#endif
Packit 6c4009
Packit 6c4009
#ifndef FORCE_ELISION
Packit 6c4009
#define FORCE_ELISION(m, s)
Packit 6c4009
#endif
Packit 6c4009
Packit 6c4009
static int __pthread_mutex_lock_full (pthread_mutex_t *mutex)
Packit 6c4009
     __attribute_noinline__;
Packit 6c4009
Packit 6c4009
int
Packit 6c4009
__pthread_mutex_lock (pthread_mutex_t *mutex)
Packit 6c4009
{
Packit Service a13c75
  /* See concurrency notes regarding mutex type which is loaded from __kind
Packit Service a13c75
     in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h.  */
Packit 6c4009
  unsigned int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);
Packit 6c4009
Packit 6c4009
  LIBC_PROBE (mutex_entry, 1, mutex);
Packit 6c4009
Packit 6c4009
  if (__builtin_expect (type & ~(PTHREAD_MUTEX_KIND_MASK_NP
Packit 6c4009
				 | PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
Packit 6c4009
    return __pthread_mutex_lock_full (mutex);
Packit 6c4009
Packit 6c4009
  if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_NP))
Packit 6c4009
    {
Packit 6c4009
      FORCE_ELISION (mutex, goto elision);
Packit 6c4009
    simple:
Packit 6c4009
      /* Normal mutex.  */
Packit 6c4009
      LLL_MUTEX_LOCK (mutex);
Packit 6c4009
      assert (mutex->__data.__owner == 0);
Packit 6c4009
    }
Packit 6c4009
#ifdef HAVE_ELISION
Packit 6c4009
  else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP))
Packit 6c4009
    {
Packit 6c4009
  elision: __attribute__((unused))
Packit 6c4009
      /* This case can never happen on a system without elision,
Packit 6c4009
         as the mutex type initialization functions will not
Packit 6c4009
	 allow to set the elision flags.  */
Packit 6c4009
      /* Don't record owner or users for elision case.  This is a
Packit 6c4009
         tail call.  */
Packit 6c4009
      return LLL_MUTEX_LOCK_ELISION (mutex);
Packit 6c4009
    }
Packit 6c4009
#endif
Packit 6c4009
  else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
Packit 6c4009
			     == PTHREAD_MUTEX_RECURSIVE_NP, 1))
Packit 6c4009
    {
Packit 6c4009
      /* Recursive mutex.  */
Packit 6c4009
      pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
Packit 6c4009
Packit 6c4009
      /* Check whether we already hold the mutex.  */
Packit 6c4009
      if (mutex->__data.__owner == id)
Packit 6c4009
	{
Packit 6c4009
	  /* Just bump the counter.  */
Packit 6c4009
	  if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
Packit 6c4009
	    /* Overflow of the counter.  */
Packit 6c4009
	    return EAGAIN;
Packit 6c4009
Packit 6c4009
	  ++mutex->__data.__count;
Packit 6c4009
Packit 6c4009
	  return 0;
Packit 6c4009
	}
Packit 6c4009
Packit 6c4009
      /* We have to get the mutex.  */
Packit 6c4009
      LLL_MUTEX_LOCK (mutex);
Packit 6c4009
Packit 6c4009
      assert (mutex->__data.__owner == 0);
Packit 6c4009
      mutex->__data.__count = 1;
Packit 6c4009
    }
Packit 6c4009
  else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
Packit 6c4009
			  == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
Packit 6c4009
    {
Packit 6c4009
      if (! __is_smp)
Packit 6c4009
	goto simple;
Packit 6c4009
Packit 6c4009
      if (LLL_MUTEX_TRYLOCK (mutex) != 0)
Packit 6c4009
	{
Packit 6c4009
	  int cnt = 0;
Packit 6c4009
	  int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
Packit 6c4009
			     mutex->__data.__spins * 2 + 10);
Packit 6c4009
	  do
Packit 6c4009
	    {
Packit 6c4009
	      if (cnt++ >= max_cnt)
Packit 6c4009
		{
Packit 6c4009
		  LLL_MUTEX_LOCK (mutex);
Packit 6c4009
		  break;
Packit 6c4009
		}
Packit 6c4009
	      atomic_spin_nop ();
Packit 6c4009
	    }
Packit 6c4009
	  while (LLL_MUTEX_TRYLOCK (mutex) != 0);
Packit 6c4009
Packit 6c4009
	  mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
Packit 6c4009
	}
Packit 6c4009
      assert (mutex->__data.__owner == 0);
Packit 6c4009
    }
Packit 6c4009
  else
Packit 6c4009
    {
Packit 6c4009
      pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
Packit 6c4009
      assert (PTHREAD_MUTEX_TYPE (mutex) == PTHREAD_MUTEX_ERRORCHECK_NP);
Packit 6c4009
      /* Check whether we already hold the mutex.  */
Packit 6c4009
      if (__glibc_unlikely (mutex->__data.__owner == id))
Packit 6c4009
	return EDEADLK;
Packit 6c4009
      goto simple;
Packit 6c4009
    }
Packit 6c4009
Packit 6c4009
  pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
Packit 6c4009
Packit 6c4009
  /* Record the ownership.  */
Packit 6c4009
  mutex->__data.__owner = id;
Packit 6c4009
#ifndef NO_INCR
Packit 6c4009
  ++mutex->__data.__nusers;
Packit 6c4009
#endif
Packit 6c4009
Packit 6c4009
  LIBC_PROBE (mutex_acquired, 1, mutex);
Packit 6c4009
Packit 6c4009
  return 0;
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
static int
Packit 6c4009
__pthread_mutex_lock_full (pthread_mutex_t *mutex)
Packit 6c4009
{
Packit 6c4009
  int oldval;
Packit 6c4009
  pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
Packit 6c4009
Packit 6c4009
  switch (PTHREAD_MUTEX_TYPE (mutex))
Packit 6c4009
    {
Packit 6c4009
    case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
Packit 6c4009
    case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
Packit 6c4009
    case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
Packit 6c4009
    case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
Packit 6c4009
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
Packit 6c4009
		     &mutex->__data.__list.__next);
Packit 6c4009
      /* We need to set op_pending before starting the operation.  Also
Packit 6c4009
	 see comments at ENQUEUE_MUTEX.  */
Packit 6c4009
      __asm ("" ::: "memory");
Packit 6c4009
Packit 6c4009
      oldval = mutex->__data.__lock;
Packit 6c4009
      /* This is set to FUTEX_WAITERS iff we might have shared the
Packit 6c4009
	 FUTEX_WAITERS flag with other threads, and therefore need to keep it
Packit 6c4009
	 set to avoid lost wake-ups.  We have the same requirement in the
Packit 6c4009
	 simple mutex algorithm.
Packit 6c4009
	 We start with value zero for a normal mutex, and FUTEX_WAITERS if we
Packit 6c4009
	 are building the special case mutexes for use from within condition
Packit 6c4009
	 variables.  */
Packit 6c4009
      unsigned int assume_other_futex_waiters = LLL_ROBUST_MUTEX_LOCK_MODIFIER;
Packit 6c4009
      while (1)
Packit 6c4009
	{
Packit 6c4009
	  /* Try to acquire the lock through a CAS from 0 (not acquired) to
Packit 6c4009
	     our TID | assume_other_futex_waiters.  */
Packit 6c4009
	  if (__glibc_likely (oldval == 0))
Packit 6c4009
	    {
Packit 6c4009
	      oldval
Packit 6c4009
	        = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
Packit 6c4009
	            id | assume_other_futex_waiters, 0);
Packit 6c4009
	      if (__glibc_likely (oldval == 0))
Packit 6c4009
		break;
Packit 6c4009
	    }
Packit 6c4009
Packit 6c4009
	  if ((oldval & FUTEX_OWNER_DIED) != 0)
Packit 6c4009
	    {
Packit 6c4009
	      /* The previous owner died.  Try locking the mutex.  */
Packit 6c4009
	      int newval = id;
Packit 6c4009
#ifdef NO_INCR
Packit 6c4009
	      /* We are not taking assume_other_futex_waiters into accoount
Packit 6c4009
		 here simply because we'll set FUTEX_WAITERS anyway.  */
Packit 6c4009
	      newval |= FUTEX_WAITERS;
Packit 6c4009
#else
Packit 6c4009
	      newval |= (oldval & FUTEX_WAITERS) | assume_other_futex_waiters;
Packit 6c4009
#endif
Packit 6c4009
Packit 6c4009
	      newval
Packit 6c4009
		= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
Packit 6c4009
						       newval, oldval);
Packit 6c4009
Packit 6c4009
	      if (newval != oldval)
Packit 6c4009
		{
Packit 6c4009
		  oldval = newval;
Packit 6c4009
		  continue;
Packit 6c4009
		}
Packit 6c4009
Packit 6c4009
	      /* We got the mutex.  */
Packit 6c4009
	      mutex->__data.__count = 1;
Packit 6c4009
	      /* But it is inconsistent unless marked otherwise.  */
Packit 6c4009
	      mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
Packit 6c4009
Packit 6c4009
	      /* We must not enqueue the mutex before we have acquired it.
Packit 6c4009
		 Also see comments at ENQUEUE_MUTEX.  */
Packit 6c4009
	      __asm ("" ::: "memory");
Packit 6c4009
	      ENQUEUE_MUTEX (mutex);
Packit 6c4009
	      /* We need to clear op_pending after we enqueue the mutex.  */
Packit 6c4009
	      __asm ("" ::: "memory");
Packit 6c4009
	      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
Packit 6c4009
Packit 6c4009
	      /* Note that we deliberately exit here.  If we fall
Packit 6c4009
		 through to the end of the function __nusers would be
Packit 6c4009
		 incremented which is not correct because the old
Packit 6c4009
		 owner has to be discounted.  If we are not supposed
Packit 6c4009
		 to increment __nusers we actually have to decrement
Packit 6c4009
		 it here.  */
Packit 6c4009
#ifdef NO_INCR
Packit 6c4009
	      --mutex->__data.__nusers;
Packit 6c4009
#endif
Packit 6c4009
Packit 6c4009
	      return EOWNERDEAD;
Packit 6c4009
	    }
Packit 6c4009
Packit 6c4009
	  /* Check whether we already hold the mutex.  */
Packit 6c4009
	  if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
Packit 6c4009
	    {
Packit 6c4009
	      int kind = PTHREAD_MUTEX_TYPE (mutex);
Packit 6c4009
	      if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
Packit 6c4009
		{
Packit 6c4009
		  /* We do not need to ensure ordering wrt another memory
Packit 6c4009
		     access.  Also see comments at ENQUEUE_MUTEX. */
Packit 6c4009
		  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
Packit 6c4009
				 NULL);
Packit 6c4009
		  return EDEADLK;
Packit 6c4009
		}
Packit 6c4009
Packit 6c4009
	      if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
Packit 6c4009
		{
Packit 6c4009
		  /* We do not need to ensure ordering wrt another memory
Packit 6c4009
		     access.  */
Packit 6c4009
		  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
Packit 6c4009
				 NULL);
Packit 6c4009
Packit 6c4009
		  /* Just bump the counter.  */
Packit 6c4009
		  if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
Packit 6c4009
		    /* Overflow of the counter.  */
Packit 6c4009
		    return EAGAIN;
Packit 6c4009
Packit 6c4009
		  ++mutex->__data.__count;
Packit 6c4009
Packit 6c4009
		  return 0;
Packit 6c4009
		}
Packit 6c4009
	    }
Packit 6c4009
Packit 6c4009
	  /* We cannot acquire the mutex nor has its owner died.  Thus, try
Packit 6c4009
	     to block using futexes.  Set FUTEX_WAITERS if necessary so that
Packit 6c4009
	     other threads are aware that there are potentially threads
Packit 6c4009
	     blocked on the futex.  Restart if oldval changed in the
Packit 6c4009
	     meantime.  */
Packit 6c4009
	  if ((oldval & FUTEX_WAITERS) == 0)
Packit 6c4009
	    {
Packit 6c4009
	      if (atomic_compare_and_exchange_bool_acq (&mutex->__data.__lock,
Packit 6c4009
							oldval | FUTEX_WAITERS,
Packit 6c4009
							oldval)
Packit 6c4009
		  != 0)
Packit 6c4009
		{
Packit 6c4009
		  oldval = mutex->__data.__lock;
Packit 6c4009
		  continue;
Packit 6c4009
		}
Packit 6c4009
	      oldval |= FUTEX_WAITERS;
Packit 6c4009
	    }
Packit 6c4009
Packit 6c4009
	  /* It is now possible that we share the FUTEX_WAITERS flag with
Packit 6c4009
	     another thread; therefore, update assume_other_futex_waiters so
Packit 6c4009
	     that we do not forget about this when handling other cases
Packit 6c4009
	     above and thus do not cause lost wake-ups.  */
Packit 6c4009
	  assume_other_futex_waiters |= FUTEX_WAITERS;
Packit 6c4009
Packit 6c4009
	  /* Block using the futex and reload current lock value.  */
Packit 6c4009
	  lll_futex_wait (&mutex->__data.__lock, oldval,
Packit 6c4009
			  PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
Packit 6c4009
	  oldval = mutex->__data.__lock;
Packit 6c4009
	}
Packit 6c4009
Packit 6c4009
      /* We have acquired the mutex; check if it is still consistent.  */
Packit 6c4009
      if (__builtin_expect (mutex->__data.__owner
Packit 6c4009
			    == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
Packit 6c4009
	{
Packit 6c4009
	  /* This mutex is now not recoverable.  */
Packit 6c4009
	  mutex->__data.__count = 0;
Packit 6c4009
	  int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
Packit 6c4009
	  lll_unlock (mutex->__data.__lock, private);
Packit 6c4009
	  /* FIXME This violates the mutex destruction requirements.  See
Packit 6c4009
	     __pthread_mutex_unlock_full.  */
Packit 6c4009
	  THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
Packit 6c4009
	  return ENOTRECOVERABLE;
Packit 6c4009
	}
Packit 6c4009
Packit 6c4009
      mutex->__data.__count = 1;
Packit 6c4009
      /* We must not enqueue the mutex before we have acquired it.
Packit 6c4009
	 Also see comments at ENQUEUE_MUTEX.  */
Packit 6c4009
      __asm ("" ::: "memory");
Packit 6c4009
      ENQUEUE_MUTEX (mutex);
Packit 6c4009
      /* We need to clear op_pending after we enqueue the mutex.  */
Packit 6c4009
      __asm ("" ::: "memory");
Packit 6c4009
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
Packit 6c4009
      break;
Packit 6c4009
Packit 6c4009
    /* The PI support requires the Linux futex system call.  If that's not
Packit 6c4009
       available, pthread_mutex_init should never have allowed the type to
Packit 6c4009
       be set.  So it will get the default case for an invalid type.  */
Packit 6c4009
#ifdef __NR_futex
Packit 6c4009
    case PTHREAD_MUTEX_PI_RECURSIVE_NP:
Packit 6c4009
    case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
Packit 6c4009
    case PTHREAD_MUTEX_PI_NORMAL_NP:
Packit 6c4009
    case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
Packit 6c4009
    case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
Packit 6c4009
    case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
Packit 6c4009
    case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
Packit 6c4009
    case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
Packit 6c4009
      {
Packit Service a13c75
	int kind, robust;
Packit Service a13c75
	{
Packit Service a13c75
	  /* See concurrency notes regarding __kind in struct __pthread_mutex_s
Packit Service a13c75
	     in sysdeps/nptl/bits/thread-shared-types.h.  */
Packit Service a13c75
	  int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind));
Packit Service a13c75
	  kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP;
Packit Service a13c75
	  robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
Packit Service a13c75
	}
Packit 6c4009
Packit 6c4009
	if (robust)
Packit 6c4009
	  {
Packit 6c4009
	    /* Note: robust PI futexes are signaled by setting bit 0.  */
Packit 6c4009
	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
Packit 6c4009
			   (void *) (((uintptr_t) &mutex->__data.__list.__next)
Packit 6c4009
				     | 1));
Packit 6c4009
	    /* We need to set op_pending before starting the operation.  Also
Packit 6c4009
	       see comments at ENQUEUE_MUTEX.  */
Packit 6c4009
	    __asm ("" ::: "memory");
Packit 6c4009
	  }
Packit 6c4009
Packit 6c4009
	oldval = mutex->__data.__lock;
Packit 6c4009
Packit 6c4009
	/* Check whether we already hold the mutex.  */
Packit 6c4009
	if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
Packit 6c4009
	  {
Packit 6c4009
	    if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
Packit 6c4009
	      {
Packit 6c4009
		/* We do not need to ensure ordering wrt another memory
Packit 6c4009
		   access.  */
Packit 6c4009
		THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
Packit 6c4009
		return EDEADLK;
Packit 6c4009
	      }
Packit 6c4009
Packit 6c4009
	    if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
Packit 6c4009
	      {
Packit 6c4009
		/* We do not need to ensure ordering wrt another memory
Packit 6c4009
		   access.  */
Packit 6c4009
		THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
Packit 6c4009
Packit 6c4009
		/* Just bump the counter.  */
Packit 6c4009
		if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
Packit 6c4009
		  /* Overflow of the counter.  */
Packit 6c4009
		  return EAGAIN;
Packit 6c4009
Packit 6c4009
		++mutex->__data.__count;
Packit 6c4009
Packit 6c4009
		return 0;
Packit 6c4009
	      }
Packit 6c4009
	  }
Packit 6c4009
Packit 6c4009
	int newval = id;
Packit 6c4009
# ifdef NO_INCR
Packit 6c4009
	newval |= FUTEX_WAITERS;
Packit 6c4009
# endif
Packit 6c4009
	oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
Packit 6c4009
						      newval, 0);
Packit 6c4009
Packit 6c4009
	if (oldval != 0)
Packit 6c4009
	  {
Packit 6c4009
	    /* The mutex is locked.  The kernel will now take care of
Packit 6c4009
	       everything.  */
Packit 6c4009
	    int private = (robust
Packit 6c4009
			   ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
Packit 6c4009
			   : PTHREAD_MUTEX_PSHARED (mutex));
Packit 6c4009
	    INTERNAL_SYSCALL_DECL (__err);
Packit 6c4009
	    int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
Packit 6c4009
				      __lll_private_flag (FUTEX_LOCK_PI,
Packit 6c4009
							  private), 1, 0);
Packit 6c4009
Packit 6c4009
	    if (INTERNAL_SYSCALL_ERROR_P (e, __err)
Packit 6c4009
		&& (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
Packit 6c4009
		    || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK))
Packit 6c4009
	      {
Packit 6c4009
		assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
Packit 6c4009
			|| (kind != PTHREAD_MUTEX_ERRORCHECK_NP
Packit 6c4009
			    && kind != PTHREAD_MUTEX_RECURSIVE_NP));
Packit 6c4009
		/* ESRCH can happen only for non-robust PI mutexes where
Packit 6c4009
		   the owner of the lock died.  */
Packit 6c4009
		assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust);
Packit 6c4009
Packit 6c4009
		/* Delay the thread indefinitely.  */
Packit 6c4009
		while (1)
Packit 6c4009
		  __pause_nocancel ();
Packit 6c4009
	      }
Packit 6c4009
Packit 6c4009
	    oldval = mutex->__data.__lock;
Packit 6c4009
Packit 6c4009
	    assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
Packit 6c4009
	  }
Packit 6c4009
Packit 6c4009
	if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
Packit 6c4009
	  {
Packit 6c4009
	    atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
Packit 6c4009
Packit 6c4009
	    /* We got the mutex.  */
Packit 6c4009
	    mutex->__data.__count = 1;
Packit 6c4009
	    /* But it is inconsistent unless marked otherwise.  */
Packit 6c4009
	    mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
Packit 6c4009
Packit 6c4009
	    /* We must not enqueue the mutex before we have acquired it.
Packit 6c4009
	       Also see comments at ENQUEUE_MUTEX.  */
Packit 6c4009
	    __asm ("" ::: "memory");
Packit 6c4009
	    ENQUEUE_MUTEX_PI (mutex);
Packit 6c4009
	    /* We need to clear op_pending after we enqueue the mutex.  */
Packit 6c4009
	    __asm ("" ::: "memory");
Packit 6c4009
	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
Packit 6c4009
Packit 6c4009
	    /* Note that we deliberately exit here.  If we fall
Packit 6c4009
	       through to the end of the function __nusers would be
Packit 6c4009
	       incremented which is not correct because the old owner
Packit 6c4009
	       has to be discounted.  If we are not supposed to
Packit 6c4009
	       increment __nusers we actually have to decrement it here.  */
Packit 6c4009
# ifdef NO_INCR
Packit 6c4009
	    --mutex->__data.__nusers;
Packit 6c4009
# endif
Packit 6c4009
Packit 6c4009
	    return EOWNERDEAD;
Packit 6c4009
	  }
Packit 6c4009
Packit 6c4009
	if (robust
Packit 6c4009
	    && __builtin_expect (mutex->__data.__owner
Packit 6c4009
				 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
Packit 6c4009
	  {
Packit 6c4009
	    /* This mutex is now not recoverable.  */
Packit 6c4009
	    mutex->__data.__count = 0;
Packit 6c4009
Packit 6c4009
	    INTERNAL_SYSCALL_DECL (__err);
Packit 6c4009
	    INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
Packit 6c4009
			      __lll_private_flag (FUTEX_UNLOCK_PI,
Packit 6c4009
						  PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
Packit 6c4009
			      0, 0);
Packit 6c4009
Packit 6c4009
	    /* To the kernel, this will be visible after the kernel has
Packit 6c4009
	       acquired the mutex in the syscall.  */
Packit 6c4009
	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
Packit 6c4009
	    return ENOTRECOVERABLE;
Packit 6c4009
	  }
Packit 6c4009
Packit 6c4009
	mutex->__data.__count = 1;
Packit 6c4009
	if (robust)
Packit 6c4009
	  {
Packit 6c4009
	    /* We must not enqueue the mutex before we have acquired it.
Packit 6c4009
	       Also see comments at ENQUEUE_MUTEX.  */
Packit 6c4009
	    __asm ("" ::: "memory");
Packit 6c4009
	    ENQUEUE_MUTEX_PI (mutex);
Packit 6c4009
	    /* We need to clear op_pending after we enqueue the mutex.  */
Packit 6c4009
	    __asm ("" ::: "memory");
Packit 6c4009
	    THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
Packit 6c4009
	  }
Packit 6c4009
      }
Packit 6c4009
      break;
Packit 6c4009
#endif  /* __NR_futex.  */
Packit 6c4009
Packit 6c4009
    case PTHREAD_MUTEX_PP_RECURSIVE_NP:
Packit 6c4009
    case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
Packit 6c4009
    case PTHREAD_MUTEX_PP_NORMAL_NP:
Packit 6c4009
    case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
Packit 6c4009
      {
Packit Service a13c75
	/* See concurrency notes regarding __kind in struct __pthread_mutex_s
Packit Service a13c75
	   in sysdeps/nptl/bits/thread-shared-types.h.  */
Packit Service a13c75
	int kind = atomic_load_relaxed (&(mutex->__data.__kind))
Packit Service a13c75
	  & PTHREAD_MUTEX_KIND_MASK_NP;
Packit 6c4009
Packit 6c4009
	oldval = mutex->__data.__lock;
Packit 6c4009
Packit 6c4009
	/* Check whether we already hold the mutex.  */
Packit 6c4009
	if (mutex->__data.__owner == id)
Packit 6c4009
	  {
Packit 6c4009
	    if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
Packit 6c4009
	      return EDEADLK;
Packit 6c4009
Packit 6c4009
	    if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
Packit 6c4009
	      {
Packit 6c4009
		/* Just bump the counter.  */
Packit 6c4009
		if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
Packit 6c4009
		  /* Overflow of the counter.  */
Packit 6c4009
		  return EAGAIN;
Packit 6c4009
Packit 6c4009
		++mutex->__data.__count;
Packit 6c4009
Packit 6c4009
		return 0;
Packit 6c4009
	      }
Packit 6c4009
	  }
Packit 6c4009
Packit 6c4009
	int oldprio = -1, ceilval;
Packit 6c4009
	do
Packit 6c4009
	  {
Packit 6c4009
	    int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
Packit 6c4009
			  >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
Packit 6c4009
Packit 6c4009
	    if (__pthread_current_priority () > ceiling)
Packit 6c4009
	      {
Packit 6c4009
		if (oldprio != -1)
Packit 6c4009
		  __pthread_tpp_change_priority (oldprio, -1);
Packit 6c4009
		return EINVAL;
Packit 6c4009
	      }
Packit 6c4009
Packit 6c4009
	    int retval = __pthread_tpp_change_priority (oldprio, ceiling);
Packit 6c4009
	    if (retval)
Packit 6c4009
	      return retval;
Packit 6c4009
Packit 6c4009
	    ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
Packit 6c4009
	    oldprio = ceiling;
Packit 6c4009
Packit 6c4009
	    oldval
Packit 6c4009
	      = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
Packit 6c4009
#ifdef NO_INCR
Packit 6c4009
						     ceilval | 2,
Packit 6c4009
#else
Packit 6c4009
						     ceilval | 1,
Packit 6c4009
#endif
Packit 6c4009
						     ceilval);
Packit 6c4009
Packit 6c4009
	    if (oldval == ceilval)
Packit 6c4009
	      break;
Packit 6c4009
Packit 6c4009
	    do
Packit 6c4009
	      {
Packit 6c4009
		oldval
Packit 6c4009
		  = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
Packit 6c4009
							 ceilval | 2,
Packit 6c4009
							 ceilval | 1);
Packit 6c4009
Packit 6c4009
		if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
Packit 6c4009
		  break;
Packit 6c4009
Packit 6c4009
		if (oldval != ceilval)
Packit 6c4009
		  lll_futex_wait (&mutex->__data.__lock, ceilval | 2,
Packit 6c4009
				  PTHREAD_MUTEX_PSHARED (mutex));
Packit 6c4009
	      }
Packit 6c4009
	    while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
Packit 6c4009
							ceilval | 2, ceilval)
Packit 6c4009
		   != ceilval);
Packit 6c4009
	  }
Packit 6c4009
	while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
Packit 6c4009
Packit 6c4009
	assert (mutex->__data.__owner == 0);
Packit 6c4009
	mutex->__data.__count = 1;
Packit 6c4009
      }
Packit 6c4009
      break;
Packit 6c4009
Packit 6c4009
    default:
Packit 6c4009
      /* Correct code cannot set any other type.  */
Packit 6c4009
      return EINVAL;
Packit 6c4009
    }
Packit 6c4009
Packit 6c4009
  /* Record the ownership.  */
Packit 6c4009
  mutex->__data.__owner = id;
Packit 6c4009
#ifndef NO_INCR
Packit 6c4009
  ++mutex->__data.__nusers;
Packit 6c4009
#endif
Packit 6c4009
Packit 6c4009
  LIBC_PROBE (mutex_acquired, 1, mutex);
Packit 6c4009
Packit 6c4009
  return 0;
Packit 6c4009
}
Packit 6c4009
#ifndef __pthread_mutex_lock
Packit 6c4009
weak_alias (__pthread_mutex_lock, pthread_mutex_lock)
Packit 6c4009
hidden_def (__pthread_mutex_lock)
Packit 6c4009
#endif
Packit 6c4009
Packit 6c4009
Packit 6c4009
#ifdef NO_INCR
Packit 6c4009
void
Packit 6c4009
__pthread_mutex_cond_lock_adjust (pthread_mutex_t *mutex)
Packit 6c4009
{
Packit Service a13c75
  /* See concurrency notes regarding __kind in struct __pthread_mutex_s
Packit Service a13c75
     in sysdeps/nptl/bits/thread-shared-types.h.  */
Packit Service a13c75
  int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind));
Packit Service a13c75
  assert ((mutex_kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0);
Packit Service a13c75
  assert ((mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0);
Packit Service a13c75
  assert ((mutex_kind & PTHREAD_MUTEX_PSHARED_BIT) == 0);
Packit 6c4009
Packit 6c4009
  /* Record the ownership.  */
Packit 6c4009
  pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
Packit 6c4009
  mutex->__data.__owner = id;
Packit 6c4009
Packit Service a13c75
  if (mutex_kind == PTHREAD_MUTEX_PI_RECURSIVE_NP)
Packit 6c4009
    ++mutex->__data.__count;
Packit 6c4009
}
Packit 6c4009
#endif