Blame jemalloc/include/jemalloc/internal/mutex.h

Packit 345191
#ifndef JEMALLOC_INTERNAL_MUTEX_H
Packit 345191
#define JEMALLOC_INTERNAL_MUTEX_H
Packit 345191
Packit 345191
#include "jemalloc/internal/atomic.h"
Packit 345191
#include "jemalloc/internal/mutex_prof.h"
Packit 345191
#include "jemalloc/internal/tsd.h"
Packit 345191
#include "jemalloc/internal/witness.h"
Packit 345191
Packit 345191
typedef enum {
Packit 345191
	/* Can only acquire one mutex of a given witness rank at a time. */
Packit 345191
	malloc_mutex_rank_exclusive,
Packit 345191
	/*
Packit 345191
	 * Can acquire multiple mutexes of the same witness rank, but in
Packit 345191
	 * address-ascending order only.
Packit 345191
	 */
Packit 345191
	malloc_mutex_address_ordered
Packit 345191
} malloc_mutex_lock_order_t;
Packit 345191
Packit 345191
typedef struct malloc_mutex_s malloc_mutex_t;
Packit 345191
struct malloc_mutex_s {
Packit 345191
	union {
Packit 345191
		struct {
Packit 345191
			/*
Packit 345191
			 * prof_data is defined first to reduce cacheline
Packit 345191
			 * bouncing: the data is not touched by the mutex holder
Packit 345191
			 * during unlocking, while might be modified by
Packit 345191
			 * contenders.  Having it before the mutex itself could
Packit 345191
			 * avoid prefetching a modified cacheline (for the
Packit 345191
			 * unlocking thread).
Packit 345191
			 */
Packit 345191
			mutex_prof_data_t	prof_data;
Packit 345191
#ifdef _WIN32
Packit 345191
#  if _WIN32_WINNT >= 0x0600
Packit 345191
			SRWLOCK         	lock;
Packit 345191
#  else
Packit 345191
			CRITICAL_SECTION	lock;
Packit 345191
#  endif
Packit 345191
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
Packit 345191
			os_unfair_lock		lock;
Packit 345191
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
Packit 345191
			pthread_mutex_t		lock;
Packit 345191
			malloc_mutex_t		*postponed_next;
Packit 345191
#else
Packit 345191
			pthread_mutex_t		lock;
Packit 345191
#endif
Packit 345191
			/*
Packit 345191
			 * Hint flag to avoid exclusive cache line contention
Packit 345191
			 * during spin waiting
Packit 345191
			 */
Packit 345191
			atomic_b_t		locked;
Packit 345191
		};
Packit 345191
		/*
Packit 345191
		 * We only touch witness when configured w/ debug.  However we
Packit 345191
		 * keep the field in a union when !debug so that we don't have
Packit 345191
		 * to pollute the code base with #ifdefs, while avoid paying the
Packit 345191
		 * memory cost.
Packit 345191
		 */
Packit 345191
#if !defined(JEMALLOC_DEBUG)
Packit 345191
		witness_t			witness;
Packit 345191
		malloc_mutex_lock_order_t	lock_order;
Packit 345191
#endif
Packit 345191
	};
Packit 345191
Packit 345191
#if defined(JEMALLOC_DEBUG)
Packit 345191
	witness_t			witness;
Packit 345191
	malloc_mutex_lock_order_t	lock_order;
Packit 345191
#endif
Packit 345191
};
Packit 345191
Packit 345191
/*
Packit 345191
 * Based on benchmark results, a fixed spin with this amount of retries works
Packit 345191
 * well for our critical sections.
Packit 345191
 */
Packit 345191
#define MALLOC_MUTEX_MAX_SPIN 250
Packit 345191
Packit 345191
#ifdef _WIN32
Packit 345191
#  if _WIN32_WINNT >= 0x0600
Packit 345191
#    define MALLOC_MUTEX_LOCK(m)    AcquireSRWLockExclusive(&(m)->lock)
Packit 345191
#    define MALLOC_MUTEX_UNLOCK(m)  ReleaseSRWLockExclusive(&(m)->lock)
Packit 345191
#    define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock))
Packit 345191
#  else
Packit 345191
#    define MALLOC_MUTEX_LOCK(m)    EnterCriticalSection(&(m)->lock)
Packit 345191
#    define MALLOC_MUTEX_UNLOCK(m)  LeaveCriticalSection(&(m)->lock)
Packit 345191
#    define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock))
Packit 345191
#  endif
Packit 345191
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
Packit 345191
#    define MALLOC_MUTEX_LOCK(m)    os_unfair_lock_lock(&(m)->lock)
Packit 345191
#    define MALLOC_MUTEX_UNLOCK(m)  os_unfair_lock_unlock(&(m)->lock)
Packit 345191
#    define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock))
Packit 345191
#else
Packit 345191
#    define MALLOC_MUTEX_LOCK(m)    pthread_mutex_lock(&(m)->lock)
Packit 345191
#    define MALLOC_MUTEX_UNLOCK(m)  pthread_mutex_unlock(&(m)->lock)
Packit 345191
#    define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0)
Packit 345191
#endif
Packit 345191
Packit 345191
#define LOCK_PROF_DATA_INITIALIZER					\
Packit 345191
    {NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0,		\
Packit 345191
	    ATOMIC_INIT(0), 0, NULL, 0}
Packit 345191
Packit 345191
#ifdef _WIN32
Packit 345191
#  define MALLOC_MUTEX_INITIALIZER
Packit 345191
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
Packit 345191
#  if defined(JEMALLOC_DEBUG)
Packit 345191
#    define MALLOC_MUTEX_INITIALIZER					\
Packit 345191
  {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT, ATOMIC_INIT(false)}}, \
Packit 345191
         WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
Packit 345191
#  else
Packit 345191
#    define MALLOC_MUTEX_INITIALIZER                      \
Packit 345191
  {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT, ATOMIC_INIT(false)}},  \
Packit 345191
      WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
Packit 345191
#  endif
Packit 345191
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
Packit 345191
#  if (defined(JEMALLOC_DEBUG))
Packit 345191
#     define MALLOC_MUTEX_INITIALIZER					\
Packit 345191
      {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL, ATOMIC_INIT(false)}},	\
Packit 345191
           WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
Packit 345191
#  else
Packit 345191
#     define MALLOC_MUTEX_INITIALIZER					\
Packit 345191
      {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL, ATOMIC_INIT(false)}},	\
Packit 345191
           WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
Packit 345191
#  endif
Packit 345191
Packit 345191
#else
Packit 345191
#    define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
Packit 345191
#  if defined(JEMALLOC_DEBUG)
Packit 345191
#    define MALLOC_MUTEX_INITIALIZER					\
Packit 345191
     {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, ATOMIC_INIT(false)}}, \
Packit 345191
           WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
Packit 345191
#  else
Packit 345191
#    define MALLOC_MUTEX_INITIALIZER                          \
Packit 345191
     {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, ATOMIC_INIT(false)}},	\
Packit 345191
      WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
Packit 345191
#  endif
Packit 345191
#endif
Packit 345191
Packit 345191
#ifdef JEMALLOC_LAZY_LOCK
Packit 345191
extern bool isthreaded;
Packit 345191
#else
Packit 345191
#  undef isthreaded /* Undo private_namespace.h definition. */
Packit 345191
#  define isthreaded true
Packit 345191
#endif
Packit 345191
Packit 345191
bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
Packit 345191
    witness_rank_t rank, malloc_mutex_lock_order_t lock_order);
Packit 345191
void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
Packit 345191
void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
Packit 345191
void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
Packit 345191
bool malloc_mutex_boot(void);
Packit 345191
void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex);
Packit 345191
Packit 345191
void malloc_mutex_lock_slow(malloc_mutex_t *mutex);
Packit 345191
Packit 345191
static inline void
Packit 345191
malloc_mutex_lock_final(malloc_mutex_t *mutex) {
Packit 345191
	MALLOC_MUTEX_LOCK(mutex);
Packit 345191
	atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED);
Packit 345191
}
Packit 345191
Packit 345191
static inline bool
Packit 345191
malloc_mutex_trylock_final(malloc_mutex_t *mutex) {
Packit 345191
	return MALLOC_MUTEX_TRYLOCK(mutex);
Packit 345191
}
Packit 345191
Packit 345191
static inline void
Packit 345191
mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) {
Packit 345191
	if (config_stats) {
Packit 345191
		mutex_prof_data_t *data = &mutex->prof_data;
Packit 345191
		data->n_lock_ops++;
Packit 345191
		if (data->prev_owner != tsdn) {
Packit 345191
			data->prev_owner = tsdn;
Packit 345191
			data->n_owner_switches++;
Packit 345191
		}
Packit 345191
	}
Packit 345191
}
Packit 345191
Packit 345191
/* Trylock: return false if the lock is successfully acquired. */
Packit 345191
static inline bool
Packit 345191
malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
Packit 345191
	witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
Packit 345191
	if (isthreaded) {
Packit 345191
		if (malloc_mutex_trylock_final(mutex)) {
Packit 345191
			atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED);
Packit 345191
			return true;
Packit 345191
		}
Packit 345191
		mutex_owner_stats_update(tsdn, mutex);
Packit 345191
	}
Packit 345191
	witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
Packit 345191
Packit 345191
	return false;
Packit 345191
}
Packit 345191
Packit 345191
/* Aggregate lock prof data. */
Packit 345191
static inline void
Packit 345191
malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) {
Packit 345191
	nstime_add(&sum->tot_wait_time, &data->tot_wait_time);
Packit 345191
	if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) {
Packit 345191
		nstime_copy(&sum->max_wait_time, &data->max_wait_time);
Packit 345191
	}
Packit 345191
Packit 345191
	sum->n_wait_times += data->n_wait_times;
Packit 345191
	sum->n_spin_acquired += data->n_spin_acquired;
Packit 345191
Packit 345191
	if (sum->max_n_thds < data->max_n_thds) {
Packit 345191
		sum->max_n_thds = data->max_n_thds;
Packit 345191
	}
Packit 345191
	uint32_t cur_n_waiting_thds = atomic_load_u32(&sum->n_waiting_thds,
Packit 345191
	    ATOMIC_RELAXED);
Packit 345191
	uint32_t new_n_waiting_thds = cur_n_waiting_thds + atomic_load_u32(
Packit 345191
	    &data->n_waiting_thds, ATOMIC_RELAXED);
Packit 345191
	atomic_store_u32(&sum->n_waiting_thds, new_n_waiting_thds,
Packit 345191
	    ATOMIC_RELAXED);
Packit 345191
	sum->n_owner_switches += data->n_owner_switches;
Packit 345191
	sum->n_lock_ops += data->n_lock_ops;
Packit 345191
}
Packit 345191
Packit 345191
static inline void
Packit 345191
malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
Packit 345191
	witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
Packit 345191
	if (isthreaded) {
Packit 345191
		if (malloc_mutex_trylock_final(mutex)) {
Packit 345191
			malloc_mutex_lock_slow(mutex);
Packit 345191
			atomic_store_b(&mutex->locked, true, ATOMIC_RELAXED);
Packit 345191
		}
Packit 345191
		mutex_owner_stats_update(tsdn, mutex);
Packit 345191
	}
Packit 345191
	witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
Packit 345191
}
Packit 345191
Packit 345191
static inline void
Packit 345191
malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
Packit 345191
	atomic_store_b(&mutex->locked, false, ATOMIC_RELAXED);
Packit 345191
	witness_unlock(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
Packit 345191
	if (isthreaded) {
Packit 345191
		MALLOC_MUTEX_UNLOCK(mutex);
Packit 345191
	}
Packit 345191
}
Packit 345191
Packit 345191
static inline void
Packit 345191
malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
Packit 345191
	witness_assert_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
Packit 345191
}
Packit 345191
Packit 345191
static inline void
Packit 345191
malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
Packit 345191
	witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
Packit 345191
}
Packit 345191
Packit 345191
/* Copy the prof data from mutex for processing. */
Packit 345191
static inline void
Packit 345191
malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
Packit 345191
    malloc_mutex_t *mutex) {
Packit 345191
	mutex_prof_data_t *source = &mutex->prof_data;
Packit 345191
	/* Can only read holding the mutex. */
Packit 345191
	malloc_mutex_assert_owner(tsdn, mutex);
Packit 345191
Packit 345191
	/*
Packit 345191
	 * Not *really* allowed (we shouldn't be doing non-atomic loads of
Packit 345191
	 * atomic data), but the mutex protection makes this safe, and writing
Packit 345191
	 * a member-for-member copy is tedious for this situation.
Packit 345191
	 */
Packit 345191
	*data = *source;
Packit 345191
	/* n_wait_thds is not reported (modified w/o locking). */
Packit 345191
	atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
Packit 345191
}
Packit 345191
Packit 345191
static inline void
Packit 345191
malloc_mutex_prof_accum(tsdn_t *tsdn, mutex_prof_data_t *data,
Packit 345191
    malloc_mutex_t *mutex) {
Packit 345191
	mutex_prof_data_t *source = &mutex->prof_data;
Packit 345191
	/* Can only read holding the mutex. */
Packit 345191
	malloc_mutex_assert_owner(tsdn, mutex);
Packit 345191
Packit 345191
	nstime_add(&data->tot_wait_time, &source->tot_wait_time);
Packit 345191
	if (nstime_compare(&source->max_wait_time, &data->max_wait_time) > 0) {
Packit 345191
		nstime_copy(&data->max_wait_time, &source->max_wait_time);
Packit 345191
	}
Packit 345191
	data->n_wait_times += source->n_wait_times;
Packit 345191
	data->n_spin_acquired += source->n_spin_acquired;
Packit 345191
	if (data->max_n_thds < source->max_n_thds) {
Packit 345191
		data->max_n_thds = source->max_n_thds;
Packit 345191
	}
Packit 345191
	/* n_wait_thds is not reported. */
Packit 345191
	atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
Packit 345191
	data->n_owner_switches += source->n_owner_switches;
Packit 345191
	data->n_lock_ops += source->n_lock_ops;
Packit 345191
}
Packit 345191
Packit 345191
#endif /* JEMALLOC_INTERNAL_MUTEX_H */