Blame jemalloc/include/jemalloc/internal/atomic_gcc_sync.h

Packit 345191
#ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H
Packit 345191
#define JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H
Packit 345191
Packit 345191
#define ATOMIC_INIT(...) {__VA_ARGS__}
Packit 345191
Packit 345191
typedef enum {
Packit 345191
	atomic_memory_order_relaxed,
Packit 345191
	atomic_memory_order_acquire,
Packit 345191
	atomic_memory_order_release,
Packit 345191
	atomic_memory_order_acq_rel,
Packit 345191
	atomic_memory_order_seq_cst
Packit 345191
} atomic_memory_order_t;
Packit 345191
Packit 345191
ATOMIC_INLINE void
Packit 345191
atomic_fence(atomic_memory_order_t mo) {
Packit 345191
	/* Easy cases first: no barrier, and full barrier. */
Packit 345191
	if (mo == atomic_memory_order_relaxed) {
Packit 345191
		asm volatile("" ::: "memory");
Packit 345191
		return;
Packit 345191
	}
Packit 345191
	if (mo == atomic_memory_order_seq_cst) {
Packit 345191
		asm volatile("" ::: "memory");
Packit 345191
		__sync_synchronize();
Packit 345191
		asm volatile("" ::: "memory");
Packit 345191
		return;
Packit 345191
	}
Packit 345191
	asm volatile("" ::: "memory");
Packit 345191
#  if defined(__i386__) || defined(__x86_64__)
Packit 345191
	/* This is implicit on x86. */
Packit 345191
#  elif defined(__ppc64__)
Packit 345191
	asm volatile("lwsync");
Packit 345191
#  elif defined(__ppc__)
Packit 345191
	asm volatile("sync");
Packit 345191
#  elif defined(__sparc__) && defined(__arch64__)
Packit 345191
	if (mo == atomic_memory_order_acquire) {
Packit 345191
		asm volatile("membar #LoadLoad | #LoadStore");
Packit 345191
	} else if (mo == atomic_memory_order_release) {
Packit 345191
		asm volatile("membar #LoadStore | #StoreStore");
Packit 345191
	} else {
Packit 345191
		asm volatile("membar #LoadLoad | #LoadStore | #StoreStore");
Packit 345191
	}
Packit 345191
#  else
Packit 345191
	__sync_synchronize();
Packit 345191
#  endif
Packit 345191
	asm volatile("" ::: "memory");
Packit 345191
}
Packit 345191
Packit 345191
/*
Packit 345191
 * A correct implementation of seq_cst loads and stores on weakly ordered
Packit 345191
 * architectures could do either of the following:
Packit 345191
 *   1. store() is weak-fence -> store -> strong fence, load() is load ->
Packit 345191
 *      strong-fence.
Packit 345191
 *   2. store() is strong-fence -> store, load() is strong-fence -> load ->
Packit 345191
 *      weak-fence.
Packit 345191
 * The tricky thing is, load() and store() above can be the load or store
Packit 345191
 * portions of a gcc __sync builtin, so we have to follow GCC's lead, which
Packit 345191
 * means going with strategy 2.
Packit 345191
 * On strongly ordered architectures, the natural strategy is to stick a strong
Packit 345191
 * fence after seq_cst stores, and have naked loads.  So we want the strong
Packit 345191
 * fences in different places on different architectures.
Packit 345191
 * atomic_pre_sc_load_fence and atomic_post_sc_store_fence allow us to
Packit 345191
 * accomplish this.
Packit 345191
 */
Packit 345191
Packit 345191
ATOMIC_INLINE void
Packit 345191
atomic_pre_sc_load_fence() {
Packit 345191
#  if defined(__i386__) || defined(__x86_64__) ||			\
Packit 345191
    (defined(__sparc__) && defined(__arch64__))
Packit 345191
	atomic_fence(atomic_memory_order_relaxed);
Packit 345191
#  else
Packit 345191
	atomic_fence(atomic_memory_order_seq_cst);
Packit 345191
#  endif
Packit 345191
}
Packit 345191
Packit 345191
ATOMIC_INLINE void
Packit 345191
atomic_post_sc_store_fence() {
Packit 345191
#  if defined(__i386__) || defined(__x86_64__) ||			\
Packit 345191
    (defined(__sparc__) && defined(__arch64__))
Packit 345191
	atomic_fence(atomic_memory_order_seq_cst);
Packit 345191
#  else
Packit 345191
	atomic_fence(atomic_memory_order_relaxed);
Packit 345191
#  endif
Packit 345191
Packit 345191
}
Packit 345191
Packit 345191
#define JEMALLOC_GENERATE_ATOMICS(type, short_type,			\
Packit 345191
    /* unused */ lg_size)						\
Packit 345191
typedef struct {							\
Packit 345191
	type volatile repr;						\
Packit 345191
} atomic_##short_type##_t;						\
Packit 345191
									\
Packit 345191
ATOMIC_INLINE type							\
Packit 345191
atomic_load_##short_type(const atomic_##short_type##_t *a,		\
Packit 345191
    atomic_memory_order_t mo) {						\
Packit 345191
	if (mo == atomic_memory_order_seq_cst) {			\
Packit 345191
		atomic_pre_sc_load_fence();				\
Packit 345191
	}								\
Packit 345191
	type result = a->repr;						\
Packit 345191
	if (mo != atomic_memory_order_relaxed) {			\
Packit 345191
		atomic_fence(atomic_memory_order_acquire);		\
Packit 345191
	}								\
Packit 345191
	return result;							\
Packit 345191
}									\
Packit 345191
									\
Packit 345191
ATOMIC_INLINE void							\
Packit 345191
atomic_store_##short_type(atomic_##short_type##_t *a,			\
Packit 345191
    type val, atomic_memory_order_t mo) {				\
Packit 345191
	if (mo != atomic_memory_order_relaxed) {			\
Packit 345191
		atomic_fence(atomic_memory_order_release);		\
Packit 345191
	}								\
Packit 345191
	a->repr = val;							\
Packit 345191
	if (mo == atomic_memory_order_seq_cst) {			\
Packit 345191
		atomic_post_sc_store_fence();				\
Packit 345191
	}								\
Packit 345191
}									\
Packit 345191
									\
Packit 345191
ATOMIC_INLINE type							\
Packit 345191
atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
Packit 345191
    atomic_memory_order_t mo) {                  					 \
Packit 345191
	/*								\
Packit 345191
	 * Because of FreeBSD, we care about gcc 4.2, which doesn't have\
Packit 345191
	 * an atomic exchange builtin.  We fake it with a CAS loop.	\
Packit 345191
	 */								\
Packit 345191
	while (true) {							\
Packit 345191
		type old = a->repr;					\
Packit 345191
		if (__sync_bool_compare_and_swap(&a->repr, old, val)) {	\
Packit 345191
			return old;					\
Packit 345191
		}							\
Packit 345191
	}								\
Packit 345191
}									\
Packit 345191
									\
Packit 345191
ATOMIC_INLINE bool							\
Packit 345191
atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a,	\
Packit 345191
    type *expected, type desired,                                     \
Packit 345191
    atomic_memory_order_t success_mo,                          \
Packit 345191
    atomic_memory_order_t failure_mo) {				                \
Packit 345191
	type prev = __sync_val_compare_and_swap(&a->repr, *expected,	\
Packit 345191
	    desired);							\
Packit 345191
	if (prev == *expected) {					\
Packit 345191
		return true;						\
Packit 345191
	} else {							\
Packit 345191
		*expected = prev;					\
Packit 345191
		return false;						\
Packit 345191
	}								\
Packit 345191
}									\
Packit 345191
ATOMIC_INLINE bool							\
Packit 345191
atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a,	\
Packit 345191
    type *expected, type desired,                                       \
Packit 345191
    atomic_memory_order_t success_mo,                            \
Packit 345191
    atomic_memory_order_t failure_mo) {                          \
Packit 345191
	type prev = __sync_val_compare_and_swap(&a->repr, *expected,	\
Packit 345191
	    desired);							\
Packit 345191
	if (prev == *expected) {					\
Packit 345191
		return true;						\
Packit 345191
	} else {							\
Packit 345191
		*expected = prev;					\
Packit 345191
		return false;						\
Packit 345191
	}								\
Packit 345191
}
Packit 345191
Packit 345191
#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type,			\
Packit 345191
    /* unused */ lg_size)						\
Packit 345191
JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size)	\
Packit 345191
									\
Packit 345191
ATOMIC_INLINE type							\
Packit 345191
atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val,	\
Packit 345191
    atomic_memory_order_t mo) {						\
Packit 345191
	return __sync_fetch_and_add(&a->repr, val);			\
Packit 345191
}									\
Packit 345191
									\
Packit 345191
ATOMIC_INLINE type							\
Packit 345191
atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val,	\
Packit 345191
    atomic_memory_order_t mo) {						\
Packit 345191
	return __sync_fetch_and_sub(&a->repr, val);			\
Packit 345191
}									\
Packit 345191
									\
Packit 345191
ATOMIC_INLINE type							\
Packit 345191
atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val,	\
Packit 345191
    atomic_memory_order_t mo) {						\
Packit 345191
	return __sync_fetch_and_and(&a->repr, val);			\
Packit 345191
}									\
Packit 345191
									\
Packit 345191
ATOMIC_INLINE type							\
Packit 345191
atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val,	\
Packit 345191
    atomic_memory_order_t mo) {						\
Packit 345191
	return __sync_fetch_and_or(&a->repr, val);			\
Packit 345191
}									\
Packit 345191
									\
Packit 345191
ATOMIC_INLINE type							\
Packit 345191
atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val,	\
Packit 345191
    atomic_memory_order_t mo) {						\
Packit 345191
	return __sync_fetch_and_xor(&a->repr, val);			\
Packit 345191
}
Packit 345191
Packit 345191
#endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H */