Blame nptl/pthread_cond_common.c

Packit 6c4009
/* pthread_cond_common -- shared code for condition variable.
Packit 6c4009
   Copyright (C) 2016-2018 Free Software Foundation, Inc.
Packit 6c4009
   This file is part of the GNU C Library.
Packit 6c4009
Packit 6c4009
   The GNU C Library is free software; you can redistribute it and/or
Packit 6c4009
   modify it under the terms of the GNU Lesser General Public
Packit 6c4009
   License as published by the Free Software Foundation; either
Packit 6c4009
   version 2.1 of the License, or (at your option) any later version.
Packit 6c4009
Packit 6c4009
   The GNU C Library is distributed in the hope that it will be useful,
Packit 6c4009
   but WITHOUT ANY WARRANTY; without even the implied warranty of
Packit 6c4009
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
Packit 6c4009
   Lesser General Public License for more details.
Packit 6c4009
Packit 6c4009
   You should have received a copy of the GNU Lesser General Public
Packit 6c4009
   License along with the GNU C Library; if not, see
Packit 6c4009
   <http://www.gnu.org/licenses/>.  */
Packit 6c4009
Packit 6c4009
#include <atomic.h>
Packit 6c4009
#include <stdint.h>
Packit 6c4009
#include <pthread.h>
Packit 6c4009
Packit 6c4009
/* We need 3 least-significant bits on __wrefs for something else.  */
Packit 6c4009
#define __PTHREAD_COND_MAX_GROUP_SIZE ((unsigned) 1 << 29)
Packit 6c4009
Packit 6c4009
#if __HAVE_64B_ATOMICS == 1
Packit 6c4009
Packit 6c4009
static uint64_t __attribute__ ((unused))
Packit 6c4009
__condvar_load_wseq_relaxed (pthread_cond_t *cond)
Packit 6c4009
{
Packit 6c4009
  return atomic_load_relaxed (&cond->__data.__wseq);
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
static uint64_t __attribute__ ((unused))
Packit 6c4009
__condvar_fetch_add_wseq_acquire (pthread_cond_t *cond, unsigned int val)
Packit 6c4009
{
Packit 6c4009
  return atomic_fetch_add_acquire (&cond->__data.__wseq, val);
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
static uint64_t __attribute__ ((unused))
Packit 6c4009
__condvar_fetch_xor_wseq_release (pthread_cond_t *cond, unsigned int val)
Packit 6c4009
{
Packit 6c4009
  return atomic_fetch_xor_release (&cond->__data.__wseq, val);
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
static uint64_t __attribute__ ((unused))
Packit 6c4009
__condvar_load_g1_start_relaxed (pthread_cond_t *cond)
Packit 6c4009
{
Packit 6c4009
  return atomic_load_relaxed (&cond->__data.__g1_start);
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
static void __attribute__ ((unused))
Packit 6c4009
__condvar_add_g1_start_relaxed (pthread_cond_t *cond, unsigned int val)
Packit 6c4009
{
Packit 6c4009
  atomic_store_relaxed (&cond->__data.__g1_start,
Packit 6c4009
      atomic_load_relaxed (&cond->__data.__g1_start) + val);
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
#else
Packit 6c4009
Packit 6c4009
/* We use two 64b counters: __wseq and __g1_start.  They are monotonically
Packit 6c4009
   increasing and single-writer-multiple-readers counters, so we can implement
Packit 6c4009
   load, fetch-and-add, and fetch-and-xor operations even when we just have
Packit 6c4009
   32b atomics.  Values we add or xor are less than or equal to 1<<31 (*),
Packit 6c4009
   so we only have to make overflow-and-addition atomic wrt. to concurrent
Packit 6c4009
   load operations and xor operations.  To do that, we split each counter into
Packit 6c4009
   two 32b values of which we reserve the MSB of each to represent an
Packit 6c4009
   overflow from the lower-order half to the higher-order half.
Packit 6c4009
Packit 6c4009
   In the common case, the state is (higher-order / lower-order half, and . is
Packit 6c4009
   basically concatenation of the bits):
Packit 6c4009
   0.h     / 0.l  = h.l
Packit 6c4009
Packit 6c4009
   When we add a value of x that overflows (i.e., 0.l + x == 1.L), we run the
Packit 6c4009
   following steps S1-S4 (the values these represent are on the right-hand
Packit 6c4009
   side):
Packit 6c4009
   S1:  0.h     / 1.L == (h+1).L
Packit 6c4009
   S2:  1.(h+1) / 1.L == (h+1).L
Packit 6c4009
   S3:  1.(h+1) / 0.L == (h+1).L
Packit 6c4009
   S4:  0.(h+1) / 0.L == (h+1).L
Packit 6c4009
   If the LSB of the higher-order half is set, readers will ignore the
Packit 6c4009
   overflow bit in the lower-order half.
Packit 6c4009
Packit 6c4009
   To get an atomic snapshot in load operations, we exploit that the
Packit 6c4009
   higher-order half is monotonically increasing; if we load a value V from
Packit 6c4009
   it, then read the lower-order half, and then read the higher-order half
Packit 6c4009
   again and see the same value V, we know that both halves have existed in
Packit 6c4009
   the sequence of values the full counter had.  This is similar to the
Packit 6c4009
   validated reads in the time-based STMs in GCC's libitm (e.g.,
Packit 6c4009
   method_ml_wt).
Packit 6c4009
Packit 6c4009
   The xor operation needs to be an atomic read-modify-write.  The write
Packit 6c4009
   itself is not an issue as it affects just the lower-order half but not bits
Packit 6c4009
   used in the add operation.  To make the full fetch-and-xor atomic, we
Packit 6c4009
   exploit that concurrently, the value can increase by at most 1<<31 (*): The
Packit 6c4009
   xor operation is only called while having acquired the lock, so not more
Packit 6c4009
   than __PTHREAD_COND_MAX_GROUP_SIZE waiters can enter concurrently and thus
Packit 6c4009
   increment __wseq.  Therefore, if the xor operation observes a value of
Packit 6c4009
   __wseq, then the value it applies the modification to later on can be
Packit 6c4009
   derived (see below).
Packit 6c4009
Packit 6c4009
   One benefit of this scheme is that this makes load operations
Packit 6c4009
   obstruction-free because unlike if we would just lock the counter, readers
Packit 6c4009
   can almost always interpret a snapshot of each halves.  Readers can be
Packit 6c4009
   forced to read a new snapshot when the read is concurrent with an overflow.
Packit 6c4009
   However, overflows will happen infrequently, so load operations are
Packit 6c4009
   practically lock-free.
Packit 6c4009
Packit 6c4009
   (*) The highest value we add is __PTHREAD_COND_MAX_GROUP_SIZE << 2 to
Packit 6c4009
   __g1_start (the two extra bits are for the lock in the two LSBs of
Packit 6c4009
   __g1_start).  */
Packit 6c4009
Packit 6c4009
typedef struct
Packit 6c4009
{
Packit 6c4009
  unsigned int low;
Packit 6c4009
  unsigned int high;
Packit 6c4009
} _condvar_lohi;
Packit 6c4009
Packit 6c4009
static uint64_t
Packit 6c4009
__condvar_fetch_add_64_relaxed (_condvar_lohi *lh, unsigned int op)
Packit 6c4009
{
Packit 6c4009
  /* S1. Note that this is an atomic read-modify-write so it extends the
Packit 6c4009
     release sequence of release MO store at S3.  */
Packit 6c4009
  unsigned int l = atomic_fetch_add_relaxed (&lh->low, op);
Packit 6c4009
  unsigned int h = atomic_load_relaxed (&lh->high);
Packit 6c4009
  uint64_t result = ((uint64_t) h << 31) | l;
Packit 6c4009
  l += op;
Packit 6c4009
  if ((l >> 31) > 0)
Packit 6c4009
    {
Packit 6c4009
      /* Overflow.  Need to increment higher-order half.  Note that all
Packit 6c4009
	 add operations are ordered in happens-before.  */
Packit 6c4009
      h++;
Packit 6c4009
      /* S2. Release MO to synchronize with the loads of the higher-order half
Packit 6c4009
	 in the load operation.  See __condvar_load_64_relaxed.  */
Packit 6c4009
      atomic_store_release (&lh->high, h | ((unsigned int) 1 << 31));
Packit 6c4009
      l ^= (unsigned int) 1 << 31;
Packit 6c4009
      /* S3.  See __condvar_load_64_relaxed.  */
Packit 6c4009
      atomic_store_release (&lh->low, l);
Packit 6c4009
      /* S4.  Likewise.  */
Packit 6c4009
      atomic_store_release (&lh->high, h);
Packit 6c4009
    }
Packit 6c4009
  return result;
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
static uint64_t
Packit 6c4009
__condvar_load_64_relaxed (_condvar_lohi *lh)
Packit 6c4009
{
Packit 6c4009
  unsigned int h, l, h2;
Packit 6c4009
  do
Packit 6c4009
    {
Packit 6c4009
      /* This load and the second one below to the same location read from the
Packit 6c4009
	 stores in the overflow handling of the add operation or the
Packit 6c4009
	 initializing stores (which is a simple special case because
Packit 6c4009
	 initialization always completely happens before further use).
Packit 6c4009
	 Because no two stores to the higher-order half write the same value,
Packit 6c4009
	 the loop ensures that if we continue to use the snapshot, this load
Packit 6c4009
	 and the second one read from the same store operation.  All candidate
Packit 6c4009
	 store operations have release MO.
Packit 6c4009
	 If we read from S2 in the first load, then we will see the value of
Packit 6c4009
	 S1 on the next load (because we synchronize with S2), or a value
Packit 6c4009
	 later in modification order.  We correctly ignore the lower-half's
Packit 6c4009
	 overflow bit in this case.  If we read from S4, then we will see the
Packit 6c4009
	 value of S3 in the next load (or a later value), which does not have
Packit 6c4009
	 the overflow bit set anymore.
Packit 6c4009
	  */
Packit 6c4009
      h = atomic_load_acquire (&lh->high);
Packit 6c4009
      /* This will read from the release sequence of S3 (i.e, either the S3
Packit 6c4009
	 store or the read-modify-writes at S1 following S3 in modification
Packit 6c4009
	 order).  Thus, the read synchronizes with S3, and the following load
Packit 6c4009
	 of the higher-order half will read from the matching S2 (or a later
Packit 6c4009
	 value).
Packit 6c4009
	 Thus, if we read a lower-half value here that already overflowed and
Packit 6c4009
	 belongs to an increased higher-order half value, we will see the
Packit 6c4009
	 latter and h and h2 will not be equal.  */
Packit 6c4009
      l = atomic_load_acquire (&lh->low);
Packit 6c4009
      /* See above.  */
Packit 6c4009
      h2 = atomic_load_relaxed (&lh->high);
Packit 6c4009
    }
Packit 6c4009
  while (h != h2);
Packit 6c4009
  if (((l >> 31) > 0) && ((h >> 31) > 0))
Packit 6c4009
    l ^= (unsigned int) 1 << 31;
Packit 6c4009
  return ((uint64_t) (h & ~((unsigned int) 1 << 31)) << 31) + l;
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
static uint64_t __attribute__ ((unused))
Packit 6c4009
__condvar_load_wseq_relaxed (pthread_cond_t *cond)
Packit 6c4009
{
Packit 6c4009
  return __condvar_load_64_relaxed ((_condvar_lohi *) &cond->__data.__wseq32);
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
static uint64_t __attribute__ ((unused))
Packit 6c4009
__condvar_fetch_add_wseq_acquire (pthread_cond_t *cond, unsigned int val)
Packit 6c4009
{
Packit 6c4009
  uint64_t r = __condvar_fetch_add_64_relaxed
Packit 6c4009
      ((_condvar_lohi *) &cond->__data.__wseq32, val);
Packit 6c4009
  atomic_thread_fence_acquire ();
Packit 6c4009
  return r;
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
static uint64_t __attribute__ ((unused))
Packit 6c4009
__condvar_fetch_xor_wseq_release (pthread_cond_t *cond, unsigned int val)
Packit 6c4009
{
Packit 6c4009
  _condvar_lohi *lh = (_condvar_lohi *) &cond->__data.__wseq32;
Packit 6c4009
  /* First, get the current value.  See __condvar_load_64_relaxed.  */
Packit 6c4009
  unsigned int h, l, h2;
Packit 6c4009
  do
Packit 6c4009
    {
Packit 6c4009
      h = atomic_load_acquire (&lh->high);
Packit 6c4009
      l = atomic_load_acquire (&lh->low);
Packit 6c4009
      h2 = atomic_load_relaxed (&lh->high);
Packit 6c4009
    }
Packit 6c4009
  while (h != h2);
Packit 6c4009
  if (((l >> 31) > 0) && ((h >> 31) == 0))
Packit 6c4009
    h++;
Packit 6c4009
  h &= ~((unsigned int) 1 << 31);
Packit 6c4009
  l &= ~((unsigned int) 1 << 31);
Packit 6c4009
Packit 6c4009
  /* Now modify.  Due to the coherence rules, the prior load will read a value
Packit 6c4009
     earlier in modification order than the following fetch-xor.
Packit 6c4009
     This uses release MO to make the full operation have release semantics
Packit 6c4009
     (all other operations access the lower-order half).  */
Packit 6c4009
  unsigned int l2 = atomic_fetch_xor_release (&lh->low, val)
Packit 6c4009
      & ~((unsigned int) 1 << 31);
Packit 6c4009
  if (l2 < l)
Packit 6c4009
    /* The lower-order half overflowed in the meantime.  This happened exactly
Packit 6c4009
       once due to the limit on concurrent waiters (see above).  */
Packit 6c4009
    h++;
Packit 6c4009
  return ((uint64_t) h << 31) + l2;
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
static uint64_t __attribute__ ((unused))
Packit 6c4009
__condvar_load_g1_start_relaxed (pthread_cond_t *cond)
Packit 6c4009
{
Packit 6c4009
  return __condvar_load_64_relaxed
Packit 6c4009
      ((_condvar_lohi *) &cond->__data.__g1_start32);
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
static void __attribute__ ((unused))
Packit 6c4009
__condvar_add_g1_start_relaxed (pthread_cond_t *cond, unsigned int val)
Packit 6c4009
{
Packit 6c4009
  ignore_value (__condvar_fetch_add_64_relaxed
Packit 6c4009
      ((_condvar_lohi *) &cond->__data.__g1_start32, val));
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
#endif  /* !__HAVE_64B_ATOMICS  */
Packit 6c4009
Packit 6c4009
Packit 6c4009
/* The lock that signalers use.  See pthread_cond_wait_common for uses.
Packit 6c4009
   The lock is our normal three-state lock: not acquired (0) / acquired (1) /
Packit 6c4009
   acquired-with-futex_wake-request (2).  However, we need to preserve the
Packit 6c4009
   other bits in the unsigned int used for the lock, and therefore it is a
Packit 6c4009
   little more complex.  */
Packit 6c4009
static void __attribute__ ((unused))
Packit 6c4009
__condvar_acquire_lock (pthread_cond_t *cond, int private)
Packit 6c4009
{
Packit 6c4009
  unsigned int s = atomic_load_relaxed (&cond->__data.__g1_orig_size);
Packit 6c4009
  while ((s & 3) == 0)
Packit 6c4009
    {
Packit 6c4009
      if (atomic_compare_exchange_weak_acquire (&cond->__data.__g1_orig_size,
Packit 6c4009
	  &s, s | 1))
Packit 6c4009
	return;
Packit 6c4009
      /* TODO Spinning and back-off.  */
Packit 6c4009
    }
Packit 6c4009
  /* We can't change from not acquired to acquired, so try to change to
Packit 6c4009
     acquired-with-futex-wake-request and do a futex wait if we cannot change
Packit 6c4009
     from not acquired.  */
Packit 6c4009
  while (1)
Packit 6c4009
    {
Packit 6c4009
      while ((s & 3) != 2)
Packit 6c4009
	{
Packit 6c4009
	  if (atomic_compare_exchange_weak_acquire
Packit 6c4009
	      (&cond->__data.__g1_orig_size, &s, (s & ~(unsigned int) 3) | 2))
Packit 6c4009
	    {
Packit 6c4009
	      if ((s & 3) == 0)
Packit 6c4009
		return;
Packit 6c4009
	      break;
Packit 6c4009
	    }
Packit 6c4009
	  /* TODO Back off.  */
Packit 6c4009
	}
Packit 6c4009
      futex_wait_simple (&cond->__data.__g1_orig_size,
Packit 6c4009
	  (s & ~(unsigned int) 3) | 2, private);
Packit 6c4009
      /* Reload so we see a recent value.  */
Packit 6c4009
      s = atomic_load_relaxed (&cond->__data.__g1_orig_size);
Packit 6c4009
    }
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
/* See __condvar_acquire_lock.  */
Packit 6c4009
static void __attribute__ ((unused))
Packit 6c4009
__condvar_release_lock (pthread_cond_t *cond, int private)
Packit 6c4009
{
Packit 6c4009
  if ((atomic_fetch_and_release (&cond->__data.__g1_orig_size,
Packit 6c4009
				 ~(unsigned int) 3) & 3)
Packit 6c4009
      == 2)
Packit 6c4009
    futex_wake (&cond->__data.__g1_orig_size, 1, private);
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
/* Only use this when having acquired the lock.  */
Packit 6c4009
static unsigned int __attribute__ ((unused))
Packit 6c4009
__condvar_get_orig_size (pthread_cond_t *cond)
Packit 6c4009
{
Packit 6c4009
  return atomic_load_relaxed (&cond->__data.__g1_orig_size) >> 2;
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
/* Only use this when having acquired the lock.  */
Packit 6c4009
static void __attribute__ ((unused))
Packit 6c4009
__condvar_set_orig_size (pthread_cond_t *cond, unsigned int size)
Packit 6c4009
{
Packit 6c4009
  /* We have acquired the lock, but might get one concurrent update due to a
Packit 6c4009
     lock state change from acquired to acquired-with-futex_wake-request.
Packit 6c4009
     The store with relaxed MO is fine because there will be no further
Packit 6c4009
     changes to the lock bits nor the size, and we will subsequently release
Packit 6c4009
     the lock with release MO.  */
Packit 6c4009
  unsigned int s;
Packit 6c4009
  s = (atomic_load_relaxed (&cond->__data.__g1_orig_size) & 3)
Packit 6c4009
      | (size << 2);
Packit 6c4009
  if ((atomic_exchange_relaxed (&cond->__data.__g1_orig_size, s) & 3)
Packit 6c4009
      != (s & 3))
Packit 6c4009
    atomic_store_relaxed (&cond->__data.__g1_orig_size, (size << 2) | 2);
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
/* Returns FUTEX_SHARED or FUTEX_PRIVATE based on the provided __wrefs
Packit 6c4009
   value.  */
Packit 6c4009
static int __attribute__ ((unused))
Packit 6c4009
__condvar_get_private (int flags)
Packit 6c4009
{
Packit 6c4009
  if ((flags & __PTHREAD_COND_SHARED_MASK) == 0)
Packit 6c4009
    return FUTEX_PRIVATE;
Packit 6c4009
  else
Packit 6c4009
    return FUTEX_SHARED;
Packit 6c4009
}
Packit 6c4009
Packit 6c4009
/* This closes G1 (whose index is in G1INDEX), waits for all futex waiters to
Packit 6c4009
   leave G1, converts G1 into a fresh G2, and then switches group roles so that
Packit 6c4009
   the former G2 becomes the new G1 ending at the current __wseq value when we
Packit 6c4009
   eventually make the switch (WSEQ is just an observation of __wseq by the
Packit 6c4009
   signaler).
Packit 6c4009
   If G2 is empty, it will not switch groups because then it would create an
Packit 6c4009
   empty G1 which would require switching groups again on the next signal.
Packit 6c4009
   Returns false iff groups were not switched because G2 was empty.  */
Packit 6c4009
static bool __attribute__ ((unused))
Packit 6c4009
__condvar_quiesce_and_switch_g1 (pthread_cond_t *cond, uint64_t wseq,
Packit 6c4009
    unsigned int *g1index, int private)
Packit 6c4009
{
Packit 6c4009
  const unsigned int maxspin = 0;
Packit 6c4009
  unsigned int g1 = *g1index;
Packit 6c4009
Packit 6c4009
  /* If there is no waiter in G2, we don't do anything.  The expression may
Packit 6c4009
     look odd but remember that __g_size might hold a negative value, so
Packit 6c4009
     putting the expression this way avoids relying on implementation-defined
Packit 6c4009
     behavior.
Packit 6c4009
     Note that this works correctly for a zero-initialized condvar too.  */
Packit 6c4009
  unsigned int old_orig_size = __condvar_get_orig_size (cond);
Packit 6c4009
  uint64_t old_g1_start = __condvar_load_g1_start_relaxed (cond) >> 1;
Packit 6c4009
  if (((unsigned) (wseq - old_g1_start - old_orig_size)
Packit 6c4009
	  + cond->__data.__g_size[g1 ^ 1]) == 0)
Packit 6c4009
	return false;
Packit 6c4009
Packit 6c4009
  /* Now try to close and quiesce G1.  We have to consider the following kinds
Packit 6c4009
     of waiters:
Packit 6c4009
     * Waiters from less recent groups than G1 are not affected because
Packit 6c4009
       nothing will change for them apart from __g1_start getting larger.
Packit 6c4009
     * New waiters arriving concurrently with the group switching will all go
Packit 6c4009
       into G2 until we atomically make the switch.  Waiters existing in G2
Packit 6c4009
       are not affected.
Packit 6c4009
     * Waiters in G1 will be closed out immediately by setting a flag in
Packit 6c4009
       __g_signals, which will prevent waiters from blocking using a futex on
Packit 6c4009
       __g_signals and also notifies them that the group is closed.  As a
Packit 6c4009
       result, they will eventually remove their group reference, allowing us
Packit 6c4009
       to close switch group roles.  */
Packit 6c4009
Packit 6c4009
  /* First, set the closed flag on __g_signals.  This tells waiters that are
Packit 6c4009
     about to wait that they shouldn't do that anymore.  This basically
Packit 6c4009
     serves as an advance notificaton of the upcoming change to __g1_start;
Packit 6c4009
     waiters interpret it as if __g1_start was larger than their waiter
Packit 6c4009
     sequence position.  This allows us to change __g1_start after waiting
Packit 6c4009
     for all existing waiters with group references to leave, which in turn
Packit 6c4009
     makes recovery after stealing a signal simpler because it then can be
Packit 6c4009
     skipped if __g1_start indicates that the group is closed (otherwise,
Packit 6c4009
     we would have to recover always because waiters don't know how big their
Packit 6c4009
     groups are).  Relaxed MO is fine.  */
Packit 6c4009
  atomic_fetch_or_relaxed (cond->__data.__g_signals + g1, 1);
Packit 6c4009
Packit 6c4009
  /* Wait until there are no group references anymore.  The fetch-or operation
Packit 6c4009
     injects us into the modification order of __g_refs; release MO ensures
Packit 6c4009
     that waiters incrementing __g_refs after our fetch-or see the previous
Packit 6c4009
     changes to __g_signals and to __g1_start that had to happen before we can
Packit 6c4009
     switch this G1 and alias with an older group (we have two groups, so
Packit 6c4009
     aliasing requires switching group roles twice).  Note that nobody else
Packit 6c4009
     can have set the wake-request flag, so we do not have to act upon it.
Packit 6c4009
Packit 6c4009
     Also note that it is harmless if older waiters or waiters from this G1
Packit 6c4009
     get a group reference after we have quiesced the group because it will
Packit 6c4009
     remain closed for them either because of the closed flag in __g_signals
Packit 6c4009
     or the later update to __g1_start.  New waiters will never arrive here
Packit 6c4009
     but instead continue to go into the still current G2.  */
Packit 6c4009
  unsigned r = atomic_fetch_or_release (cond->__data.__g_refs + g1, 0);
Packit 6c4009
  while ((r >> 1) > 0)
Packit 6c4009
    {
Packit 6c4009
      for (unsigned int spin = maxspin; ((r >> 1) > 0) && (spin > 0); spin--)
Packit 6c4009
	{
Packit 6c4009
	  /* TODO Back off.  */
Packit 6c4009
	  r = atomic_load_relaxed (cond->__data.__g_refs + g1);
Packit 6c4009
	}
Packit 6c4009
      if ((r >> 1) > 0)
Packit 6c4009
	{
Packit 6c4009
	  /* There is still a waiter after spinning.  Set the wake-request
Packit 6c4009
	     flag and block.  Relaxed MO is fine because this is just about
Packit Service 93b49b
	     this futex word.
Packit Service 93b49b
Packit Service 93b49b
	     Update r to include the set wake-request flag so that the upcoming
Packit Service 93b49b
	     futex_wait only blocks if the flag is still set (otherwise, we'd
Packit Service 93b49b
	     violate the basic client-side futex protocol).  */
Packit Service 93b49b
	  r = atomic_fetch_or_relaxed (cond->__data.__g_refs + g1, 1) | 1;
Packit 6c4009
Packit 6c4009
	  if ((r >> 1) > 0)
Packit 6c4009
	    futex_wait_simple (cond->__data.__g_refs + g1, r, private);
Packit 6c4009
	  /* Reload here so we eventually see the most recent value even if we
Packit 6c4009
	     do not spin.   */
Packit 6c4009
	  r = atomic_load_relaxed (cond->__data.__g_refs + g1);
Packit 6c4009
	}
Packit 6c4009
    }
Packit 6c4009
  /* Acquire MO so that we synchronize with the release operation that waiters
Packit 6c4009
     use to decrement __g_refs and thus happen after the waiters we waited
Packit 6c4009
     for.  */
Packit 6c4009
  atomic_thread_fence_acquire ();
Packit 6c4009
Packit 6c4009
  /* Update __g1_start, which finishes closing this group.  The value we add
Packit 6c4009
     will never be negative because old_orig_size can only be zero when we
Packit 6c4009
     switch groups the first time after a condvar was initialized, in which
Packit 6c4009
     case G1 will be at index 1 and we will add a value of 1.  See above for
Packit 6c4009
     why this takes place after waiting for quiescence of the group.
Packit 6c4009
     Relaxed MO is fine because the change comes with no additional
Packit 6c4009
     constraints that others would have to observe.  */
Packit 6c4009
  __condvar_add_g1_start_relaxed (cond,
Packit 6c4009
      (old_orig_size << 1) + (g1 == 1 ? 1 : - 1));
Packit 6c4009
Packit 6c4009
  /* Now reopen the group, thus enabling waiters to again block using the
Packit 6c4009
     futex controlled by __g_signals.  Release MO so that observers that see
Packit 6c4009
     no signals (and thus can block) also see the write __g1_start and thus
Packit 6c4009
     that this is now a new group (see __pthread_cond_wait_common for the
Packit 6c4009
     matching acquire MO loads).  */
Packit 6c4009
  atomic_store_release (cond->__data.__g_signals + g1, 0);
Packit 6c4009
Packit 6c4009
  /* At this point, the old G1 is now a valid new G2 (but not in use yet).
Packit 6c4009
     No old waiter can neither grab a signal nor acquire a reference without
Packit 6c4009
     noticing that __g1_start is larger.
Packit 6c4009
     We can now publish the group switch by flipping the G2 index in __wseq.
Packit 6c4009
     Release MO so that this synchronizes with the acquire MO operation
Packit 6c4009
     waiters use to obtain a position in the waiter sequence.  */
Packit 6c4009
  wseq = __condvar_fetch_xor_wseq_release (cond, 1) >> 1;
Packit 6c4009
  g1 ^= 1;
Packit 6c4009
  *g1index ^= 1;
Packit 6c4009
Packit 6c4009
  /* These values are just observed by signalers, and thus protected by the
Packit 6c4009
     lock.  */
Packit 6c4009
  unsigned int orig_size = wseq - (old_g1_start + old_orig_size);
Packit 6c4009
  __condvar_set_orig_size (cond, orig_size);
Packit 6c4009
  /* Use and addition to not loose track of cancellations in what was
Packit 6c4009
     previously G2.  */
Packit 6c4009
  cond->__data.__g_size[g1] += orig_size;
Packit 6c4009
Packit 6c4009
  /* The new G1's size may be zero because of cancellations during its time
Packit 6c4009
     as G2.  If this happens, there are no waiters that have to receive a
Packit 6c4009
     signal, so we do not need to add any and return false.  */
Packit 6c4009
  if (cond->__data.__g_size[g1] == 0)
Packit 6c4009
    return false;
Packit 6c4009
Packit 6c4009
  return true;
Packit 6c4009
}