Blame jemalloc/src/extent.c

Packit 345191
#define JEMALLOC_EXTENT_C_
Packit 345191
#include "jemalloc/internal/jemalloc_preamble.h"
Packit 345191
#include "jemalloc/internal/jemalloc_internal_includes.h"
Packit 345191
Packit 345191
#include "jemalloc/internal/assert.h"
Packit 345191
#include "jemalloc/internal/extent_dss.h"
Packit 345191
#include "jemalloc/internal/extent_mmap.h"
Packit 345191
#include "jemalloc/internal/ph.h"
Packit 345191
#include "jemalloc/internal/rtree.h"
Packit 345191
#include "jemalloc/internal/mutex.h"
Packit 345191
#include "jemalloc/internal/mutex_pool.h"
Packit 345191
Packit 345191
/******************************************************************************/
Packit 345191
/* Data. */
Packit 345191
Packit 345191
rtree_t		extents_rtree;
Packit 345191
/* Keyed by the address of the extent_t being protected. */
Packit 345191
mutex_pool_t	extent_mutex_pool;
Packit 345191
Packit 345191
size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
Packit 345191
Packit 345191
static const bitmap_info_t extents_bitmap_info =
Packit 345191
    BITMAP_INFO_INITIALIZER(SC_NPSIZES+1);
Packit 345191
Packit 345191
static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr,
Packit 345191
    size_t size, size_t alignment, bool *zero, bool *commit,
Packit 345191
    unsigned arena_ind);
Packit 345191
static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr,
Packit 345191
    size_t size, bool committed, unsigned arena_ind);
Packit 345191
static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr,
Packit 345191
    size_t size, bool committed, unsigned arena_ind);
Packit 345191
static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr,
Packit 345191
    size_t size, size_t offset, size_t length, unsigned arena_ind);
Packit 345191
static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
Packit 345191
    size_t length, bool growing_retained);
Packit 345191
static bool extent_decommit_default(extent_hooks_t *extent_hooks,
Packit 345191
    void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
Packit 345191
#ifdef PAGES_CAN_PURGE_LAZY
Packit 345191
static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr,
Packit 345191
    size_t size, size_t offset, size_t length, unsigned arena_ind);
Packit 345191
#endif
Packit 345191
static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
Packit 345191
    size_t length, bool growing_retained);
Packit 345191
#ifdef PAGES_CAN_PURGE_FORCED
Packit 345191
static bool extent_purge_forced_default(extent_hooks_t *extent_hooks,
Packit 345191
    void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
Packit 345191
#endif
Packit 345191
static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
Packit 345191
    size_t length, bool growing_retained);
Packit 345191
static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr,
Packit 345191
    size_t size, size_t size_a, size_t size_b, bool committed,
Packit 345191
    unsigned arena_ind);
Packit 345191
static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
Packit 345191
    szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
Packit 345191
    bool growing_retained);
Packit 345191
static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a,
Packit 345191
    size_t size_a, void *addr_b, size_t size_b, bool committed,
Packit 345191
    unsigned arena_ind);
Packit 345191
static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
Packit 345191
    bool growing_retained);
Packit 345191
Packit 345191
const extent_hooks_t	extent_hooks_default = {
Packit 345191
	extent_alloc_default,
Packit 345191
	extent_dalloc_default,
Packit 345191
	extent_destroy_default,
Packit 345191
	extent_commit_default,
Packit 345191
	extent_decommit_default
Packit 345191
#ifdef PAGES_CAN_PURGE_LAZY
Packit 345191
	,
Packit 345191
	extent_purge_lazy_default
Packit 345191
#else
Packit 345191
	,
Packit 345191
	NULL
Packit 345191
#endif
Packit 345191
#ifdef PAGES_CAN_PURGE_FORCED
Packit 345191
	,
Packit 345191
	extent_purge_forced_default
Packit 345191
#else
Packit 345191
	,
Packit 345191
	NULL
Packit 345191
#endif
Packit 345191
	,
Packit 345191
	extent_split_default,
Packit 345191
	extent_merge_default
Packit 345191
};
Packit 345191
Packit 345191
/* Used exclusively for gdump triggering. */
Packit 345191
static atomic_zu_t curpages;
Packit 345191
static atomic_zu_t highpages;
Packit 345191
Packit 345191
/******************************************************************************/
Packit 345191
/*
Packit 345191
 * Function prototypes for static functions that are referenced prior to
Packit 345191
 * definition.
Packit 345191
 */
Packit 345191
Packit 345191
static void extent_deregister(tsdn_t *tsdn, extent_t *extent);
Packit 345191
static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
Packit 345191
    size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind,
Packit 345191
    bool *zero, bool *commit, bool growing_retained);
Packit 345191
static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
Packit 345191
    extent_t *extent, bool *coalesced, bool growing_retained);
Packit 345191
static void extent_record(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent,
Packit 345191
    bool growing_retained);
Packit 345191
Packit 345191
/******************************************************************************/
Packit 345191
Packit 345191
#define ATTR_NONE /* does nothing */
Packit 345191
Packit 345191
ph_gen(ATTR_NONE, extent_avail_, extent_tree_t, extent_t, ph_link,
Packit 345191
    extent_esnead_comp)
Packit 345191
Packit 345191
#undef ATTR_NONE
Packit 345191
Packit 345191
typedef enum {
Packit 345191
	lock_result_success,
Packit 345191
	lock_result_failure,
Packit 345191
	lock_result_no_extent
Packit 345191
} lock_result_t;
Packit 345191
Packit 345191
static lock_result_t
Packit 345191
extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm,
Packit 345191
    extent_t **result, bool inactive_only) {
Packit 345191
	extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree,
Packit 345191
	    elm, true);
Packit 345191
Packit 345191
	/* Slab implies active extents and should be skipped. */
Packit 345191
	if (extent1 == NULL || (inactive_only && rtree_leaf_elm_slab_read(tsdn,
Packit 345191
	    &extents_rtree, elm, true))) {
Packit 345191
		return lock_result_no_extent;
Packit 345191
	}
Packit 345191
Packit 345191
	/*
Packit 345191
	 * It's possible that the extent changed out from under us, and with it
Packit 345191
	 * the leaf->extent mapping.  We have to recheck while holding the lock.
Packit 345191
	 */
Packit 345191
	extent_lock(tsdn, extent1);
Packit 345191
	extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn,
Packit 345191
	    &extents_rtree, elm, true);
Packit 345191
Packit 345191
	if (extent1 == extent2) {
Packit 345191
		*result = extent1;
Packit 345191
		return lock_result_success;
Packit 345191
	} else {
Packit 345191
		extent_unlock(tsdn, extent1);
Packit 345191
		return lock_result_failure;
Packit 345191
	}
Packit 345191
}
Packit 345191
Packit 345191
/*
Packit 345191
 * Returns a pool-locked extent_t * if there's one associated with the given
Packit 345191
 * address, and NULL otherwise.
Packit 345191
 */
Packit 345191
static extent_t *
Packit 345191
extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr,
Packit 345191
    bool inactive_only) {
Packit 345191
	extent_t *ret = NULL;
Packit 345191
	rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree,
Packit 345191
	    rtree_ctx, (uintptr_t)addr, false, false);
Packit 345191
	if (elm == NULL) {
Packit 345191
		return NULL;
Packit 345191
	}
Packit 345191
	lock_result_t lock_result;
Packit 345191
	do {
Packit 345191
		lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret,
Packit 345191
		    inactive_only);
Packit 345191
	} while (lock_result == lock_result_failure);
Packit 345191
	return ret;
Packit 345191
}
Packit 345191
Packit 345191
extent_t *
Packit 345191
extent_alloc(tsdn_t *tsdn, arena_t *arena) {
Packit 345191
	malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
Packit 345191
	extent_t *extent = extent_avail_first(&arena->extent_avail);
Packit 345191
	if (extent == NULL) {
Packit 345191
		malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
Packit 345191
		return base_alloc_extent(tsdn, arena->base);
Packit 345191
	}
Packit 345191
	extent_avail_remove(&arena->extent_avail, extent);
Packit 345191
	atomic_fetch_sub_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED);
Packit 345191
	malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
Packit 345191
	return extent;
Packit 345191
}
Packit 345191
Packit 345191
void
Packit 345191
extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
Packit 345191
	malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
Packit 345191
	extent_avail_insert(&arena->extent_avail, extent);
Packit 345191
	atomic_fetch_add_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED);
Packit 345191
	malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
Packit 345191
}
Packit 345191
Packit 345191
extent_hooks_t *
Packit 345191
extent_hooks_get(arena_t *arena) {
Packit 345191
	return base_extent_hooks_get(arena->base);
Packit 345191
}
Packit 345191
Packit 345191
extent_hooks_t *
Packit 345191
extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) {
Packit 345191
	background_thread_info_t *info;
Packit 345191
	if (have_background_thread) {
Packit 345191
		info = arena_background_thread_info_get(arena);
Packit 345191
		malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
Packit 345191
	}
Packit 345191
	extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
Packit 345191
	if (have_background_thread) {
Packit 345191
		malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
Packit 345191
	}
Packit 345191
Packit 345191
	return ret;
Packit 345191
}
Packit 345191
Packit 345191
static void
Packit 345191
extent_hooks_assure_initialized(arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks) {
Packit 345191
	if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) {
Packit 345191
		*r_extent_hooks = extent_hooks_get(arena);
Packit 345191
	}
Packit 345191
}
Packit 345191
Packit 345191
#ifndef JEMALLOC_JET
Packit 345191
static
Packit 345191
#endif
Packit 345191
size_t
Packit 345191
extent_size_quantize_floor(size_t size) {
Packit 345191
	size_t ret;
Packit 345191
	pszind_t pind;
Packit 345191
Packit 345191
	assert(size > 0);
Packit 345191
	assert((size & PAGE_MASK) == 0);
Packit 345191
Packit 345191
	pind = sz_psz2ind(size - sz_large_pad + 1);
Packit 345191
	if (pind == 0) {
Packit 345191
		/*
Packit 345191
		 * Avoid underflow.  This short-circuit would also do the right
Packit 345191
		 * thing for all sizes in the range for which there are
Packit 345191
		 * PAGE-spaced size classes, but it's simplest to just handle
Packit 345191
		 * the one case that would cause erroneous results.
Packit 345191
		 */
Packit 345191
		return size;
Packit 345191
	}
Packit 345191
	ret = sz_pind2sz(pind - 1) + sz_large_pad;
Packit 345191
	assert(ret <= size);
Packit 345191
	return ret;
Packit 345191
}
Packit 345191
Packit 345191
#ifndef JEMALLOC_JET
Packit 345191
static
Packit 345191
#endif
Packit 345191
size_t
Packit 345191
extent_size_quantize_ceil(size_t size) {
Packit 345191
	size_t ret;
Packit 345191
Packit 345191
	assert(size > 0);
Packit 345191
	assert(size - sz_large_pad <= SC_LARGE_MAXCLASS);
Packit 345191
	assert((size & PAGE_MASK) == 0);
Packit 345191
Packit 345191
	ret = extent_size_quantize_floor(size);
Packit 345191
	if (ret < size) {
Packit 345191
		/*
Packit 345191
		 * Skip a quantization that may have an adequately large extent,
Packit 345191
		 * because under-sized extents may be mixed in.  This only
Packit 345191
		 * happens when an unusual size is requested, i.e. for aligned
Packit 345191
		 * allocation, and is just one of several places where linear
Packit 345191
		 * search would potentially find sufficiently aligned available
Packit 345191
		 * memory somewhere lower.
Packit 345191
		 */
Packit 345191
		ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
Packit 345191
		    sz_large_pad;
Packit 345191
	}
Packit 345191
	return ret;
Packit 345191
}
Packit 345191
Packit 345191
/* Generate pairing heap functions. */
Packit 345191
ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
Packit 345191
Packit 345191
bool
Packit 345191
extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
Packit 345191
    bool delay_coalesce) {
Packit 345191
	if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS,
Packit 345191
	    malloc_mutex_rank_exclusive)) {
Packit 345191
		return true;
Packit 345191
	}
Packit 345191
	for (unsigned i = 0; i < SC_NPSIZES + 1; i++) {
Packit 345191
		extent_heap_new(&extents->heaps[i]);
Packit 345191
	}
Packit 345191
	bitmap_init(extents->bitmap, &extents_bitmap_info, true);
Packit 345191
	extent_list_init(&extents->lru);
Packit 345191
	atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED);
Packit 345191
	extents->state = state;
Packit 345191
	extents->delay_coalesce = delay_coalesce;
Packit 345191
	return false;
Packit 345191
}
Packit 345191
Packit 345191
extent_state_t
Packit 345191
extents_state_get(const extents_t *extents) {
Packit 345191
	return extents->state;
Packit 345191
}
Packit 345191
Packit 345191
size_t
Packit 345191
extents_npages_get(extents_t *extents) {
Packit 345191
	return atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
Packit 345191
}
Packit 345191
Packit 345191
size_t
Packit 345191
extents_nextents_get(extents_t *extents, pszind_t pind) {
Packit 345191
	return atomic_load_zu(&extents->nextents[pind], ATOMIC_RELAXED);
Packit 345191
}
Packit 345191
Packit 345191
size_t
Packit 345191
extents_nbytes_get(extents_t *extents, pszind_t pind) {
Packit 345191
	return atomic_load_zu(&extents->nbytes[pind], ATOMIC_RELAXED);
Packit 345191
}
Packit 345191
Packit 345191
static void
Packit 345191
extents_stats_add(extents_t *extent, pszind_t pind, size_t sz) {
Packit 345191
	size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED);
Packit 345191
	atomic_store_zu(&extent->nextents[pind], cur + 1, ATOMIC_RELAXED);
Packit 345191
	cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED);
Packit 345191
	atomic_store_zu(&extent->nbytes[pind], cur + sz, ATOMIC_RELAXED);
Packit 345191
}
Packit 345191
Packit 345191
static void
Packit 345191
extents_stats_sub(extents_t *extent, pszind_t pind, size_t sz) {
Packit 345191
	size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED);
Packit 345191
	atomic_store_zu(&extent->nextents[pind], cur - 1, ATOMIC_RELAXED);
Packit 345191
	cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED);
Packit 345191
	atomic_store_zu(&extent->nbytes[pind], cur - sz, ATOMIC_RELAXED);
Packit 345191
}
Packit 345191
Packit 345191
static void
Packit 345191
extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
Packit 345191
	malloc_mutex_assert_owner(tsdn, &extents->mtx);
Packit 345191
	assert(extent_state_get(extent) == extents->state);
Packit 345191
Packit 345191
	size_t size = extent_size_get(extent);
Packit 345191
	size_t psz = extent_size_quantize_floor(size);
Packit 345191
	pszind_t pind = sz_psz2ind(psz);
Packit 345191
	if (extent_heap_empty(&extents->heaps[pind])) {
Packit 345191
		bitmap_unset(extents->bitmap, &extents_bitmap_info,
Packit 345191
		    (size_t)pind);
Packit 345191
	}
Packit 345191
	extent_heap_insert(&extents->heaps[pind], extent);
Packit 345191
Packit 345191
	if (config_stats) {
Packit 345191
		extents_stats_add(extents, pind, size);
Packit 345191
	}
Packit 345191
Packit 345191
	extent_list_append(&extents->lru, extent);
Packit 345191
	size_t npages = size >> LG_PAGE;
Packit 345191
	/*
Packit 345191
	 * All modifications to npages hold the mutex (as asserted above), so we
Packit 345191
	 * don't need an atomic fetch-add; we can get by with a load followed by
Packit 345191
	 * a store.
Packit 345191
	 */
Packit 345191
	size_t cur_extents_npages =
Packit 345191
	    atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
Packit 345191
	atomic_store_zu(&extents->npages, cur_extents_npages + npages,
Packit 345191
	    ATOMIC_RELAXED);
Packit 345191
}
Packit 345191
Packit 345191
static void
Packit 345191
extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
Packit 345191
	malloc_mutex_assert_owner(tsdn, &extents->mtx);
Packit 345191
	assert(extent_state_get(extent) == extents->state);
Packit 345191
Packit 345191
	size_t size = extent_size_get(extent);
Packit 345191
	size_t psz = extent_size_quantize_floor(size);
Packit 345191
	pszind_t pind = sz_psz2ind(psz);
Packit 345191
	extent_heap_remove(&extents->heaps[pind], extent);
Packit 345191
Packit 345191
	if (config_stats) {
Packit 345191
		extents_stats_sub(extents, pind, size);
Packit 345191
	}
Packit 345191
Packit 345191
	if (extent_heap_empty(&extents->heaps[pind])) {
Packit 345191
		bitmap_set(extents->bitmap, &extents_bitmap_info,
Packit 345191
		    (size_t)pind);
Packit 345191
	}
Packit 345191
	extent_list_remove(&extents->lru, extent);
Packit 345191
	size_t npages = size >> LG_PAGE;
Packit 345191
	/*
Packit 345191
	 * As in extents_insert_locked, we hold extents->mtx and so don't need
Packit 345191
	 * atomic operations for updating extents->npages.
Packit 345191
	 */
Packit 345191
	size_t cur_extents_npages =
Packit 345191
	    atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
Packit 345191
	assert(cur_extents_npages >= npages);
Packit 345191
	atomic_store_zu(&extents->npages,
Packit 345191
	    cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
Packit 345191
}
Packit 345191
Packit 345191
/*
Packit 345191
 * Find an extent with size [min_size, max_size) to satisfy the alignment
Packit 345191
 * requirement.  For each size, try only the first extent in the heap.
Packit 345191
 */
Packit 345191
static extent_t *
Packit 345191
extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size,
Packit 345191
    size_t alignment) {
Packit 345191
        pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size));
Packit 345191
        pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size));
Packit 345191
Packit 345191
	for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
Packit 345191
	    &extents_bitmap_info, (size_t)pind); i < pind_max; i =
Packit 345191
	    (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
Packit 345191
	    (size_t)i+1)) {
Packit 345191
		assert(i < SC_NPSIZES);
Packit 345191
		assert(!extent_heap_empty(&extents->heaps[i]));
Packit 345191
		extent_t *extent = extent_heap_first(&extents->heaps[i]);
Packit 345191
		uintptr_t base = (uintptr_t)extent_base_get(extent);
Packit 345191
		size_t candidate_size = extent_size_get(extent);
Packit 345191
		assert(candidate_size >= min_size);
Packit 345191
Packit 345191
		uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
Packit 345191
		    PAGE_CEILING(alignment));
Packit 345191
		if (base > next_align || base + candidate_size <= next_align) {
Packit 345191
			/* Overflow or not crossing the next alignment. */
Packit 345191
			continue;
Packit 345191
		}
Packit 345191
Packit 345191
		size_t leadsize = next_align - base;
Packit 345191
		if (candidate_size - leadsize >= min_size) {
Packit 345191
			return extent;
Packit 345191
		}
Packit 345191
	}
Packit 345191
Packit 345191
	return NULL;
Packit 345191
}
Packit 345191
Packit 345191
/*
Packit 345191
 * Do first-fit extent selection, i.e. select the oldest/lowest extent that is
Packit 345191
 * large enough.
Packit 345191
 */
Packit 345191
static extent_t *
Packit 345191
extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
Packit 345191
    size_t size) {
Packit 345191
	extent_t *ret = NULL;
Packit 345191
Packit 345191
	pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
Packit 345191
Packit 345191
	if (!maps_coalesce && !opt_retain) {
Packit 345191
		/*
Packit 345191
		 * No split / merge allowed (Windows w/o retain). Try exact fit
Packit 345191
		 * only.
Packit 345191
		 */
Packit 345191
		return extent_heap_empty(&extents->heaps[pind]) ? NULL :
Packit 345191
		    extent_heap_first(&extents->heaps[pind]);
Packit 345191
	}
Packit 345191
Packit 345191
	for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
Packit 345191
	    &extents_bitmap_info, (size_t)pind);
Packit 345191
	    i < SC_NPSIZES + 1;
Packit 345191
	    i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
Packit 345191
	    (size_t)i+1)) {
Packit 345191
		assert(!extent_heap_empty(&extents->heaps[i]));
Packit 345191
		extent_t *extent = extent_heap_first(&extents->heaps[i]);
Packit 345191
		assert(extent_size_get(extent) >= size);
Packit 345191
		/*
Packit 345191
		 * In order to reduce fragmentation, avoid reusing and splitting
Packit 345191
		 * large extents for much smaller sizes.
Packit 345191
		 *
Packit 345191
		 * Only do check for dirty extents (delay_coalesce).
Packit 345191
		 */
Packit 345191
		if (extents->delay_coalesce &&
Packit 345191
		    (sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
Packit 345191
			break;
Packit 345191
		}
Packit 345191
		if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
Packit 345191
			ret = extent;
Packit 345191
		}
Packit 345191
		if (i == SC_NPSIZES) {
Packit 345191
			break;
Packit 345191
		}
Packit 345191
		assert(i < SC_NPSIZES);
Packit 345191
	}
Packit 345191
Packit 345191
	return ret;
Packit 345191
}
Packit 345191
Packit 345191
/*
Packit 345191
 * Do first-fit extent selection, where the selection policy choice is
Packit 345191
 * based on extents->delay_coalesce.
Packit 345191
 */
Packit 345191
static extent_t *
Packit 345191
extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
Packit 345191
    size_t esize, size_t alignment) {
Packit 345191
	malloc_mutex_assert_owner(tsdn, &extents->mtx);
Packit 345191
Packit 345191
	size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
Packit 345191
	/* Beware size_t wrap-around. */
Packit 345191
	if (max_size < esize) {
Packit 345191
		return NULL;
Packit 345191
	}
Packit 345191
Packit 345191
	extent_t *extent =
Packit 345191
	    extents_first_fit_locked(tsdn, arena, extents, max_size);
Packit 345191
Packit 345191
	if (alignment > PAGE && extent == NULL) {
Packit 345191
		/*
Packit 345191
		 * max_size guarantees the alignment requirement but is rather
Packit 345191
		 * pessimistic.  Next we try to satisfy the aligned allocation
Packit 345191
		 * with sizes in [esize, max_size).
Packit 345191
		 */
Packit 345191
		extent = extents_fit_alignment(extents, esize, max_size,
Packit 345191
		    alignment);
Packit 345191
	}
Packit 345191
Packit 345191
	return extent;
Packit 345191
}
Packit 345191
Packit 345191
static bool
Packit 345191
extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
Packit 345191
    extent_t *extent) {
Packit 345191
	extent_state_set(extent, extent_state_active);
Packit 345191
	bool coalesced;
Packit 345191
	extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx,
Packit 345191
	    extents, extent, &coalesced, false);
Packit 345191
	extent_state_set(extent, extents_state_get(extents));
Packit 345191
Packit 345191
	if (!coalesced) {
Packit 345191
		return true;
Packit 345191
	}
Packit 345191
	extents_insert_locked(tsdn, extents, extent);
Packit 345191
	return false;
Packit 345191
}
Packit 345191
Packit 345191
extent_t *
Packit 345191
extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
Packit 345191
    extents_t *extents, void *new_addr, size_t size, size_t pad,
Packit 345191
    size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
Packit 345191
	assert(size + pad != 0);
Packit 345191
	assert(alignment != 0);
Packit 345191
	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
Packit 345191
	    WITNESS_RANK_CORE, 0);
Packit 345191
Packit 345191
	extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents,
Packit 345191
	    new_addr, size, pad, alignment, slab, szind, zero, commit, false);
Packit 345191
	assert(extent == NULL || extent_dumpable_get(extent));
Packit 345191
	return extent;
Packit 345191
}
Packit 345191
Packit 345191
void
Packit 345191
extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
Packit 345191
    extents_t *extents, extent_t *extent) {
Packit 345191
	assert(extent_base_get(extent) != NULL);
Packit 345191
	assert(extent_size_get(extent) != 0);
Packit 345191
	assert(extent_dumpable_get(extent));
Packit 345191
	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
Packit 345191
	    WITNESS_RANK_CORE, 0);
Packit 345191
Packit 345191
	extent_addr_set(extent, extent_base_get(extent));
Packit 345191
	extent_zeroed_set(extent, false);
Packit 345191
Packit 345191
	extent_record(tsdn, arena, r_extent_hooks, extents, extent, false);
Packit 345191
}
Packit 345191
Packit 345191
extent_t *
Packit 345191
extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
Packit 345191
    extents_t *extents, size_t npages_min) {
Packit 345191
	rtree_ctx_t rtree_ctx_fallback;
Packit 345191
	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
Packit 345191
Packit 345191
	malloc_mutex_lock(tsdn, &extents->mtx);
Packit 345191
Packit 345191
	/*
Packit 345191
	 * Get the LRU coalesced extent, if any.  If coalescing was delayed,
Packit 345191
	 * the loop will iterate until the LRU extent is fully coalesced.
Packit 345191
	 */
Packit 345191
	extent_t *extent;
Packit 345191
	while (true) {
Packit 345191
		/* Get the LRU extent, if any. */
Packit 345191
		extent = extent_list_first(&extents->lru);
Packit 345191
		if (extent == NULL) {
Packit 345191
			goto label_return;
Packit 345191
		}
Packit 345191
		/* Check the eviction limit. */
Packit 345191
		size_t extents_npages = atomic_load_zu(&extents->npages,
Packit 345191
		    ATOMIC_RELAXED);
Packit 345191
		if (extents_npages <= npages_min) {
Packit 345191
			extent = NULL;
Packit 345191
			goto label_return;
Packit 345191
		}
Packit 345191
		extents_remove_locked(tsdn, extents, extent);
Packit 345191
		if (!extents->delay_coalesce) {
Packit 345191
			break;
Packit 345191
		}
Packit 345191
		/* Try to coalesce. */
Packit 345191
		if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks,
Packit 345191
		    rtree_ctx, extents, extent)) {
Packit 345191
			break;
Packit 345191
		}
Packit 345191
		/*
Packit 345191
		 * The LRU extent was just coalesced and the result placed in
Packit 345191
		 * the LRU at its neighbor's position.  Start over.
Packit 345191
		 */
Packit 345191
	}
Packit 345191
Packit 345191
	/*
Packit 345191
	 * Either mark the extent active or deregister it to protect against
Packit 345191
	 * concurrent operations.
Packit 345191
	 */
Packit 345191
	switch (extents_state_get(extents)) {
Packit 345191
	case extent_state_active:
Packit 345191
		not_reached();
Packit 345191
	case extent_state_dirty:
Packit 345191
	case extent_state_muzzy:
Packit 345191
		extent_state_set(extent, extent_state_active);
Packit 345191
		break;
Packit 345191
	case extent_state_retained:
Packit 345191
		extent_deregister(tsdn, extent);
Packit 345191
		break;
Packit 345191
	default:
Packit 345191
		not_reached();
Packit 345191
	}
Packit 345191
Packit 345191
label_return:
Packit 345191
	malloc_mutex_unlock(tsdn, &extents->mtx);
Packit 345191
	return extent;
Packit 345191
}
Packit 345191
Packit 345191
/*
Packit 345191
 * This can only happen when we fail to allocate a new extent struct (which
Packit 345191
 * indicates OOM), e.g. when trying to split an existing extent.
Packit 345191
 */
Packit 345191
static void
Packit 345191
extents_abandon_vm(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
Packit 345191
    extents_t *extents, extent_t *extent, bool growing_retained) {
Packit 345191
	size_t sz = extent_size_get(extent);
Packit 345191
	if (config_stats) {
Packit 345191
		arena_stats_accum_zu(&arena->stats.abandoned_vm, sz);
Packit 345191
	}
Packit 345191
	/*
Packit 345191
	 * Leak extent after making sure its pages have already been purged, so
Packit 345191
	 * that this is only a virtual memory leak.
Packit 345191
	 */
Packit 345191
	if (extents_state_get(extents) == extent_state_dirty) {
Packit 345191
		if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks,
Packit 345191
		    extent, 0, sz, growing_retained)) {
Packit 345191
			extent_purge_forced_impl(tsdn, arena, r_extent_hooks,
Packit 345191
			    extent, 0, extent_size_get(extent),
Packit 345191
			    growing_retained);
Packit 345191
		}
Packit 345191
	}
Packit 345191
	extent_dalloc(tsdn, arena, extent);
Packit 345191
}
Packit 345191
Packit 345191
void
Packit 345191
extents_prefork(tsdn_t *tsdn, extents_t *extents) {
Packit 345191
	malloc_mutex_prefork(tsdn, &extents->mtx);
Packit 345191
}
Packit 345191
Packit 345191
void
Packit 345191
extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) {
Packit 345191
	malloc_mutex_postfork_parent(tsdn, &extents->mtx);
Packit 345191
}
Packit 345191
Packit 345191
void
Packit 345191
extents_postfork_child(tsdn_t *tsdn, extents_t *extents) {
Packit 345191
	malloc_mutex_postfork_child(tsdn, &extents->mtx);
Packit 345191
}
Packit 345191
Packit 345191
static void
Packit 345191
extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
Packit 345191
    extent_t *extent) {
Packit 345191
	assert(extent_arena_get(extent) == arena);
Packit 345191
	assert(extent_state_get(extent) == extent_state_active);
Packit 345191
Packit 345191
	extent_state_set(extent, extents_state_get(extents));
Packit 345191
	extents_insert_locked(tsdn, extents, extent);
Packit 345191
}
Packit 345191
Packit 345191
static void
Packit 345191
extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
Packit 345191
    extent_t *extent) {
Packit 345191
	malloc_mutex_lock(tsdn, &extents->mtx);
Packit 345191
	extent_deactivate_locked(tsdn, arena, extents, extent);
Packit 345191
	malloc_mutex_unlock(tsdn, &extents->mtx);
Packit 345191
}
Packit 345191
Packit 345191
static void
Packit 345191
extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
Packit 345191
    extent_t *extent) {
Packit 345191
	assert(extent_arena_get(extent) == arena);
Packit 345191
	assert(extent_state_get(extent) == extents_state_get(extents));
Packit 345191
Packit 345191
	extents_remove_locked(tsdn, extents, extent);
Packit 345191
	extent_state_set(extent, extent_state_active);
Packit 345191
}
Packit 345191
Packit 345191
static bool
Packit 345191
extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
Packit 345191
    const extent_t *extent, bool dependent, bool init_missing,
Packit 345191
    rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
Packit 345191
	*r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
Packit 345191
	    (uintptr_t)extent_base_get(extent), dependent, init_missing);
Packit 345191
	if (!dependent && *r_elm_a == NULL) {
Packit 345191
		return true;
Packit 345191
	}
Packit 345191
	assert(*r_elm_a != NULL);
Packit 345191
Packit 345191
	*r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
Packit 345191
	    (uintptr_t)extent_last_get(extent), dependent, init_missing);
Packit 345191
	if (!dependent && *r_elm_b == NULL) {
Packit 345191
		return true;
Packit 345191
	}
Packit 345191
	assert(*r_elm_b != NULL);
Packit 345191
Packit 345191
	return false;
Packit 345191
}
Packit 345191
Packit 345191
static void
Packit 345191
extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
Packit 345191
    rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) {
Packit 345191
	rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab);
Packit 345191
	if (elm_b != NULL) {
Packit 345191
		rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind,
Packit 345191
		    slab);
Packit 345191
	}
Packit 345191
}
Packit 345191
Packit 345191
static void
Packit 345191
extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent,
Packit 345191
    szind_t szind) {
Packit 345191
	assert(extent_slab_get(extent));
Packit 345191
Packit 345191
	/* Register interior. */
Packit 345191
	for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
Packit 345191
		rtree_write(tsdn, &extents_rtree, rtree_ctx,
Packit 345191
		    (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
Packit 345191
		    LG_PAGE), extent, szind, true);
Packit 345191
	}
Packit 345191
}
Packit 345191
Packit 345191
static void
Packit 345191
extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
Packit 345191
	cassert(config_prof);
Packit 345191
	/* prof_gdump() requirement. */
Packit 345191
	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
Packit 345191
	    WITNESS_RANK_CORE, 0);
Packit 345191
Packit 345191
	if (opt_prof && extent_state_get(extent) == extent_state_active) {
Packit 345191
		size_t nadd = extent_size_get(extent) >> LG_PAGE;
Packit 345191
		size_t cur = atomic_fetch_add_zu(&curpages, nadd,
Packit 345191
		    ATOMIC_RELAXED) + nadd;
Packit 345191
		size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
Packit 345191
		while (cur > high && !atomic_compare_exchange_weak_zu(
Packit 345191
		    &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) {
Packit 345191
			/*
Packit 345191
			 * Don't refresh cur, because it may have decreased
Packit 345191
			 * since this thread lost the highpages update race.
Packit 345191
			 * Note that high is updated in case of CAS failure.
Packit 345191
			 */
Packit 345191
		}
Packit 345191
		if (cur > high && prof_gdump_get_unlocked()) {
Packit 345191
			prof_gdump(tsdn);
Packit 345191
		}
Packit 345191
	}
Packit 345191
}
Packit 345191
Packit 345191
static void
Packit 345191
extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) {
Packit 345191
	cassert(config_prof);
Packit 345191
Packit 345191
	if (opt_prof && extent_state_get(extent) == extent_state_active) {
Packit 345191
		size_t nsub = extent_size_get(extent) >> LG_PAGE;
Packit 345191
		assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub);
Packit 345191
		atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED);
Packit 345191
	}
Packit 345191
}
Packit 345191
Packit 345191
static bool
Packit 345191
extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
Packit 345191
	rtree_ctx_t rtree_ctx_fallback;
Packit 345191
	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
Packit 345191
	rtree_leaf_elm_t *elm_a, *elm_b;
Packit 345191
Packit 345191
	/*
Packit 345191
	 * We need to hold the lock to protect against a concurrent coalesce
Packit 345191
	 * operation that sees us in a partial state.
Packit 345191
	 */
Packit 345191
	extent_lock(tsdn, extent);
Packit 345191
Packit 345191
	if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true,
Packit 345191
	    &elm_a, &elm_b)) {
Packit 345191
		extent_unlock(tsdn, extent);
Packit 345191
		return true;
Packit 345191
	}
Packit 345191
Packit 345191
	szind_t szind = extent_szind_get_maybe_invalid(extent);
Packit 345191
	bool slab = extent_slab_get(extent);
Packit 345191
	extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab);
Packit 345191
	if (slab) {
Packit 345191
		extent_interior_register(tsdn, rtree_ctx, extent, szind);
Packit 345191
	}
Packit 345191
Packit 345191
	extent_unlock(tsdn, extent);
Packit 345191
Packit 345191
	if (config_prof && gdump_add) {
Packit 345191
		extent_gdump_add(tsdn, extent);
Packit 345191
	}
Packit 345191
Packit 345191
	return false;
Packit 345191
}
Packit 345191
Packit 345191
static bool
Packit 345191
extent_register(tsdn_t *tsdn, extent_t *extent) {
Packit 345191
	return extent_register_impl(tsdn, extent, true);
Packit 345191
}
Packit 345191
Packit 345191
static bool
Packit 345191
extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) {
Packit 345191
	return extent_register_impl(tsdn, extent, false);
Packit 345191
}
Packit 345191
Packit 345191
static void
Packit 345191
extent_reregister(tsdn_t *tsdn, extent_t *extent) {
Packit 345191
	bool err = extent_register(tsdn, extent);
Packit 345191
	assert(!err);
Packit 345191
}
Packit 345191
Packit 345191
/*
Packit 345191
 * Removes all pointers to the given extent from the global rtree indices for
Packit 345191
 * its interior.  This is relevant for slab extents, for which we need to do
Packit 345191
 * metadata lookups at places other than the head of the extent.  We deregister
Packit 345191
 * on the interior, then, when an extent moves from being an active slab to an
Packit 345191
 * inactive state.
Packit 345191
 */
Packit 345191
static void
Packit 345191
extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
Packit 345191
    extent_t *extent) {
Packit 345191
	size_t i;
Packit 345191
Packit 345191
	assert(extent_slab_get(extent));
Packit 345191
Packit 345191
	for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
Packit 345191
		rtree_clear(tsdn, &extents_rtree, rtree_ctx,
Packit 345191
		    (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
Packit 345191
		    LG_PAGE));
Packit 345191
	}
Packit 345191
}
Packit 345191
Packit 345191
/*
Packit 345191
 * Removes all pointers to the given extent from the global rtree.
Packit 345191
 */
Packit 345191
static void
Packit 345191
extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) {
Packit 345191
	rtree_ctx_t rtree_ctx_fallback;
Packit 345191
	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
Packit 345191
	rtree_leaf_elm_t *elm_a, *elm_b;
Packit 345191
	extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false,
Packit 345191
	    &elm_a, &elm_b);
Packit 345191
Packit 345191
	extent_lock(tsdn, extent);
Packit 345191
Packit 345191
	extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, SC_NSIZES, false);
Packit 345191
	if (extent_slab_get(extent)) {
Packit 345191
		extent_interior_deregister(tsdn, rtree_ctx, extent);
Packit 345191
		extent_slab_set(extent, false);
Packit 345191
	}
Packit 345191
Packit 345191
	extent_unlock(tsdn, extent);
Packit 345191
Packit 345191
	if (config_prof && gdump) {
Packit 345191
		extent_gdump_sub(tsdn, extent);
Packit 345191
	}
Packit 345191
}
Packit 345191
Packit 345191
static void
Packit 345191
extent_deregister(tsdn_t *tsdn, extent_t *extent) {
Packit 345191
	extent_deregister_impl(tsdn, extent, true);
Packit 345191
}
Packit 345191
Packit 345191
static void
Packit 345191
extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) {
Packit 345191
	extent_deregister_impl(tsdn, extent, false);
Packit 345191
}
Packit 345191
Packit 345191
/*
Packit 345191
 * Tries to find and remove an extent from extents that can be used for the
Packit 345191
 * given allocation request.
Packit 345191
 */
Packit 345191
static extent_t *
Packit 345191
extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
Packit 345191
    void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
Packit 345191
    bool growing_retained) {
Packit 345191
	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
Packit 345191
	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
Packit 345191
	assert(alignment > 0);
Packit 345191
	if (config_debug && new_addr != NULL) {
Packit 345191
		/*
Packit 345191
		 * Non-NULL new_addr has two use cases:
Packit 345191
		 *
Packit 345191
		 *   1) Recycle a known-extant extent, e.g. during purging.
Packit 345191
		 *   2) Perform in-place expanding reallocation.
Packit 345191
		 *
Packit 345191
		 * Regardless of use case, new_addr must either refer to a
Packit 345191
		 * non-existing extent, or to the base of an extant extent,
Packit 345191
		 * since only active slabs support interior lookups (which of
Packit 345191
		 * course cannot be recycled).
Packit 345191
		 */
Packit 345191
		assert(PAGE_ADDR2BASE(new_addr) == new_addr);
Packit 345191
		assert(pad == 0);
Packit 345191
		assert(alignment <= PAGE);
Packit 345191
	}
Packit 345191
Packit 345191
	size_t esize = size + pad;
Packit 345191
	malloc_mutex_lock(tsdn, &extents->mtx);
Packit 345191
	extent_hooks_assure_initialized(arena, r_extent_hooks);
Packit 345191
	extent_t *extent;
Packit 345191
	if (new_addr != NULL) {
Packit 345191
		extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr,
Packit 345191
		    false);
Packit 345191
		if (extent != NULL) {
Packit 345191
			/*
Packit 345191
			 * We might null-out extent to report an error, but we
Packit 345191
			 * still need to unlock the associated mutex after.
Packit 345191
			 */
Packit 345191
			extent_t *unlock_extent = extent;
Packit 345191
			assert(extent_base_get(extent) == new_addr);
Packit 345191
			if (extent_arena_get(extent) != arena ||
Packit 345191
			    extent_size_get(extent) < esize ||
Packit 345191
			    extent_state_get(extent) !=
Packit 345191
			    extents_state_get(extents)) {
Packit 345191
				extent = NULL;
Packit 345191
			}
Packit 345191
			extent_unlock(tsdn, unlock_extent);
Packit 345191
		}
Packit 345191
	} else {
Packit 345191
		extent = extents_fit_locked(tsdn, arena, extents, esize,
Packit 345191
		    alignment);
Packit 345191
	}
Packit 345191
	if (extent == NULL) {
Packit 345191
		malloc_mutex_unlock(tsdn, &extents->mtx);
Packit 345191
		return NULL;
Packit 345191
	}
Packit 345191
Packit 345191
	extent_activate_locked(tsdn, arena, extents, extent);
Packit 345191
	malloc_mutex_unlock(tsdn, &extents->mtx);
Packit 345191
Packit 345191
	return extent;
Packit 345191
}
Packit 345191
Packit 345191
/*
Packit 345191
 * Given an allocation request and an extent guaranteed to be able to satisfy
Packit 345191
 * it, this splits off lead and trail extents, leaving extent pointing to an
Packit 345191
 * extent satisfying the allocation.
Packit 345191
 * This function doesn't put lead or trail into any extents_t; it's the caller's
Packit 345191
 * job to ensure that they can be reused.
Packit 345191
 */
Packit 345191
typedef enum {
Packit 345191
	/*
Packit 345191
	 * Split successfully.  lead, extent, and trail, are modified to extents
Packit 345191
	 * describing the ranges before, in, and after the given allocation.
Packit 345191
	 */
Packit 345191
	extent_split_interior_ok,
Packit 345191
	/*
Packit 345191
	 * The extent can't satisfy the given allocation request.  None of the
Packit 345191
	 * input extent_t *s are touched.
Packit 345191
	 */
Packit 345191
	extent_split_interior_cant_alloc,
Packit 345191
	/*
Packit 345191
	 * In a potentially invalid state.  Must leak (if *to_leak is non-NULL),
Packit 345191
	 * and salvage what's still salvageable (if *to_salvage is non-NULL).
Packit 345191
	 * None of lead, extent, or trail are valid.
Packit 345191
	 */
Packit 345191
	extent_split_interior_error
Packit 345191
} extent_split_interior_result_t;
Packit 345191
Packit 345191
static extent_split_interior_result_t
Packit 345191
extent_split_interior(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx,
Packit 345191
    /* The result of splitting, in case of success. */
Packit 345191
    extent_t **extent, extent_t **lead, extent_t **trail,
Packit 345191
    /* The mess to clean up, in case of error. */
Packit 345191
    extent_t **to_leak, extent_t **to_salvage,
Packit 345191
    void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
Packit 345191
    szind_t szind, bool growing_retained) {
Packit 345191
	size_t esize = size + pad;
Packit 345191
	size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent),
Packit 345191
	    PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent);
Packit 345191
	assert(new_addr == NULL || leadsize == 0);
Packit 345191
	if (extent_size_get(*extent) < leadsize + esize) {
Packit 345191
		return extent_split_interior_cant_alloc;
Packit 345191
	}
Packit 345191
	size_t trailsize = extent_size_get(*extent) - leadsize - esize;
Packit 345191
Packit 345191
	*lead = NULL;
Packit 345191
	*trail = NULL;
Packit 345191
	*to_leak = NULL;
Packit 345191
	*to_salvage = NULL;
Packit 345191
Packit 345191
	/* Split the lead. */
Packit 345191
	if (leadsize != 0) {
Packit 345191
		*lead = *extent;
Packit 345191
		*extent = extent_split_impl(tsdn, arena, r_extent_hooks,
Packit 345191
		    *lead, leadsize, SC_NSIZES, false, esize + trailsize, szind,
Packit 345191
		    slab, growing_retained);
Packit 345191
		if (*extent == NULL) {
Packit 345191
			*to_leak = *lead;
Packit 345191
			*lead = NULL;
Packit 345191
			return extent_split_interior_error;
Packit 345191
		}
Packit 345191
	}
Packit 345191
Packit 345191
	/* Split the trail. */
Packit 345191
	if (trailsize != 0) {
Packit 345191
		*trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent,
Packit 345191
		    esize, szind, slab, trailsize, SC_NSIZES, false,
Packit 345191
		    growing_retained);
Packit 345191
		if (*trail == NULL) {
Packit 345191
			*to_leak = *extent;
Packit 345191
			*to_salvage = *lead;
Packit 345191
			*lead = NULL;
Packit 345191
			*extent = NULL;
Packit 345191
			return extent_split_interior_error;
Packit 345191
		}
Packit 345191
	}
Packit 345191
Packit 345191
	if (leadsize == 0 && trailsize == 0) {
Packit 345191
		/*
Packit 345191
		 * Splitting causes szind to be set as a side effect, but no
Packit 345191
		 * splitting occurred.
Packit 345191
		 */
Packit 345191
		extent_szind_set(*extent, szind);
Packit 345191
		if (szind != SC_NSIZES) {
Packit 345191
			rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
Packit 345191
			    (uintptr_t)extent_addr_get(*extent), szind, slab);
Packit 345191
			if (slab && extent_size_get(*extent) > PAGE) {
Packit 345191
				rtree_szind_slab_update(tsdn, &extents_rtree,
Packit 345191
				    rtree_ctx,
Packit 345191
				    (uintptr_t)extent_past_get(*extent) -
Packit 345191
				    (uintptr_t)PAGE, szind, slab);
Packit 345191
			}
Packit 345191
		}
Packit 345191
	}
Packit 345191
Packit 345191
	return extent_split_interior_ok;
Packit 345191
}
Packit 345191
Packit 345191
/*
Packit 345191
 * This fulfills the indicated allocation request out of the given extent (which
Packit 345191
 * the caller should have ensured was big enough).  If there's any unused space
Packit 345191
 * before or after the resulting allocation, that space is given its own extent
Packit 345191
 * and put back into extents.
Packit 345191
 */
Packit 345191
static extent_t *
Packit 345191
extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
Packit 345191
    void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
Packit 345191
    szind_t szind, extent_t *extent, bool growing_retained) {
Packit 345191
	extent_t *lead;
Packit 345191
	extent_t *trail;
Packit 345191
	extent_t *to_leak;
Packit 345191
	extent_t *to_salvage;
Packit 345191
Packit 345191
	extent_split_interior_result_t result = extent_split_interior(
Packit 345191
	    tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
Packit 345191
	    &to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind,
Packit 345191
	    growing_retained);
Packit 345191
Packit 345191
	if (!maps_coalesce && result != extent_split_interior_ok
Packit 345191
	    && !opt_retain) {
Packit 345191
		/*
Packit 345191
		 * Split isn't supported (implies Windows w/o retain).  Avoid
Packit 345191
		 * leaking the extents.
Packit 345191
		 */
Packit 345191
		assert(to_leak != NULL && lead == NULL && trail == NULL);
Packit 345191
		extent_deactivate(tsdn, arena, extents, to_leak);
Packit 345191
		return NULL;
Packit 345191
	}
Packit 345191
Packit 345191
	if (result == extent_split_interior_ok) {
Packit 345191
		if (lead != NULL) {
Packit 345191
			extent_deactivate(tsdn, arena, extents, lead);
Packit 345191
		}
Packit 345191
		if (trail != NULL) {
Packit 345191
			extent_deactivate(tsdn, arena, extents, trail);
Packit 345191
		}
Packit 345191
		return extent;
Packit 345191
	} else {
Packit 345191
		/*
Packit 345191
		 * We should have picked an extent that was large enough to
Packit 345191
		 * fulfill our allocation request.
Packit 345191
		 */
Packit 345191
		assert(result == extent_split_interior_error);
Packit 345191
		if (to_salvage != NULL) {
Packit 345191
			extent_deregister(tsdn, to_salvage);
Packit 345191
		}
Packit 345191
		if (to_leak != NULL) {
Packit 345191
			void *leak = extent_base_get(to_leak);
Packit 345191
			extent_deregister_no_gdump_sub(tsdn, to_leak);
Packit 345191
			extents_abandon_vm(tsdn, arena, r_extent_hooks, extents,
Packit 345191
			    to_leak, growing_retained);
Packit 345191
			assert(extent_lock_from_addr(tsdn, rtree_ctx, leak,
Packit 345191
			    false) == NULL);
Packit 345191
		}
Packit 345191
		return NULL;
Packit 345191
	}
Packit 345191
	unreachable();
Packit 345191
}
Packit 345191
Packit 345191
static bool
Packit 345191
extent_need_manual_zero(arena_t *arena) {
Packit 345191
	/*
Packit 345191
	 * Need to manually zero the extent on repopulating if either; 1) non
Packit 345191
	 * default extent hooks installed (in which case the purge semantics may
Packit 345191
	 * change); or 2) transparent huge pages enabled.
Packit 345191
	 */
Packit 345191
	return (!arena_has_default_hooks(arena) ||
Packit 345191
		(opt_thp == thp_mode_always));
Packit 345191
}
Packit 345191
Packit 345191
/*
Packit 345191
 * Tries to satisfy the given allocation request by reusing one of the extents
Packit 345191
 * in the given extents_t.
Packit 345191
 */
Packit 345191
static extent_t *
Packit 345191
extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
Packit 345191
    extents_t *extents, void *new_addr, size_t size, size_t pad,
Packit 345191
    size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit,
Packit 345191
    bool growing_retained) {
Packit 345191
	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
Packit 345191
	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
Packit 345191
	assert(new_addr == NULL || !slab);
Packit 345191
	assert(pad == 0 || !slab);
Packit 345191
	assert(!*zero || !slab);
Packit 345191
Packit 345191
	rtree_ctx_t rtree_ctx_fallback;
Packit 345191
	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
Packit 345191
Packit 345191
	extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks,
Packit 345191
	    rtree_ctx, extents, new_addr, size, pad, alignment, slab,
Packit 345191
	    growing_retained);
Packit 345191
	if (extent == NULL) {
Packit 345191
		return NULL;
Packit 345191
	}
Packit 345191
Packit 345191
	extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx,
Packit 345191
	    extents, new_addr, size, pad, alignment, slab, szind, extent,
Packit 345191
	    growing_retained);
Packit 345191
	if (extent == NULL) {
Packit 345191
		return NULL;
Packit 345191
	}
Packit 345191
Packit 345191
	if (*commit && !extent_committed_get(extent)) {
Packit 345191
		if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent,
Packit 345191
		    0, extent_size_get(extent), growing_retained)) {
Packit 345191
			extent_record(tsdn, arena, r_extent_hooks, extents,
Packit 345191
			    extent, growing_retained);
Packit 345191
			return NULL;
Packit 345191
		}
Packit 345191
		if (!extent_need_manual_zero(arena)) {
Packit 345191
			extent_zeroed_set(extent, true);
Packit 345191
		}
Packit 345191
	}
Packit 345191
Packit 345191
	if (extent_committed_get(extent)) {
Packit 345191
		*commit = true;
Packit 345191
	}
Packit 345191
	if (extent_zeroed_get(extent)) {
Packit 345191
		*zero = true;
Packit 345191
	}
Packit 345191
Packit 345191
	if (pad != 0) {
Packit 345191
		extent_addr_randomize(tsdn, extent, alignment);
Packit 345191
	}
Packit 345191
	assert(extent_state_get(extent) == extent_state_active);
Packit 345191
	if (slab) {
Packit 345191
		extent_slab_set(extent, slab);
Packit 345191
		extent_interior_register(tsdn, rtree_ctx, extent, szind);
Packit 345191
	}
Packit 345191
Packit 345191
	if (*zero) {
Packit 345191
		void *addr = extent_base_get(extent);
Packit 345191
		if (!extent_zeroed_get(extent)) {
Packit 345191
			size_t size = extent_size_get(extent);
Packit 345191
			if (extent_need_manual_zero(arena) ||
Packit 345191
			    pages_purge_forced(addr, size)) {
Packit 345191
				memset(addr, 0, size);
Packit 345191
			}
Packit 345191
		} else if (config_debug) {
Packit 345191
			size_t *p = (size_t *)(uintptr_t)addr;
Packit 345191
			/* Check the first page only. */
Packit 345191
			for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
Packit 345191
				assert(p[i] == 0);
Packit 345191
			}
Packit 345191
		}
Packit 345191
	}
Packit 345191
	return extent;
Packit 345191
}
Packit 345191
Packit 345191
/*
Packit 345191
 * If the caller specifies (!*zero), it is still possible to receive zeroed
Packit 345191
 * memory, in which case *zero is toggled to true.  arena_extent_alloc() takes
Packit 345191
 * advantage of this to avoid demanding zeroed extents, but taking advantage of
Packit 345191
 * them if they are returned.
Packit 345191
 */
Packit 345191
static void *
Packit 345191
extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
Packit 345191
    size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
Packit 345191
	void *ret;
Packit 345191
Packit 345191
	assert(size != 0);
Packit 345191
	assert(alignment != 0);
Packit 345191
Packit 345191
	/* "primary" dss. */
Packit 345191
	if (have_dss && dss_prec == dss_prec_primary && (ret =
Packit 345191
	    extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
Packit 345191
	    commit)) != NULL) {
Packit 345191
		return ret;
Packit 345191
	}
Packit 345191
	/* mmap. */
Packit 345191
	if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
Packit 345191
	    != NULL) {
Packit 345191
		return ret;
Packit 345191
	}
Packit 345191
	/* "secondary" dss. */
Packit 345191
	if (have_dss && dss_prec == dss_prec_secondary && (ret =
Packit 345191
	    extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
Packit 345191
	    commit)) != NULL) {
Packit 345191
		return ret;
Packit 345191
	}
Packit 345191
Packit 345191
	/* All strategies for allocation failed. */
Packit 345191
	return NULL;
Packit 345191
}
Packit 345191
Packit 345191
static void *
Packit 345191
extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
Packit 345191
    size_t size, size_t alignment, bool *zero, bool *commit) {
Packit 345191
	void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
Packit 345191
	    commit, (dss_prec_t)atomic_load_u(&arena->dss_prec,
Packit 345191
	    ATOMIC_RELAXED));
Packit 345191
	if (have_madvise_huge && ret) {
Packit 345191
		pages_set_thp_state(ret, size);
Packit 345191
	}
Packit 345191
	return ret;
Packit 345191
}
Packit 345191
Packit 345191
static void *
Packit 345191
extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
Packit 345191
    size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
Packit 345191
	tsdn_t *tsdn;
Packit 345191
	arena_t *arena;
Packit 345191
Packit 345191
	tsdn = tsdn_fetch();
Packit 345191
	arena = arena_get(tsdn, arena_ind, false);
Packit 345191
	/*
Packit 345191
	 * The arena we're allocating on behalf of must have been initialized
Packit 345191
	 * already.
Packit 345191
	 */
Packit 345191
	assert(arena != NULL);
Packit 345191
Packit 345191
	return extent_alloc_default_impl(tsdn, arena, new_addr, size,
Packit 345191
	    ALIGNMENT_CEILING(alignment, PAGE), zero, commit);
Packit 345191
}
Packit 345191
Packit 345191
static void
Packit 345191
extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) {
Packit 345191
	tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
Packit 345191
	if (arena == arena_get(tsd_tsdn(tsd), 0, false)) {
Packit 345191
		/*
Packit 345191
		 * The only legitimate case of customized extent hooks for a0 is
Packit 345191
		 * hooks with no allocation activities.  One such example is to
Packit 345191
		 * place metadata on pre-allocated resources such as huge pages.
Packit 345191
		 * In that case, rely on reentrancy_level checks to catch
Packit 345191
		 * infinite recursions.
Packit 345191
		 */
Packit 345191
		pre_reentrancy(tsd, NULL);
Packit 345191
	} else {
Packit 345191
		pre_reentrancy(tsd, arena);
Packit 345191
	}
Packit 345191
}
Packit 345191
Packit 345191
static void
Packit 345191
extent_hook_post_reentrancy(tsdn_t *tsdn) {
Packit 345191
	tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
Packit 345191
	post_reentrancy(tsd);
Packit 345191
}
Packit 345191
Packit 345191
/*
Packit 345191
 * If virtual memory is retained, create increasingly larger extents from which
Packit 345191
 * to split requested extents in order to limit the total number of disjoint
Packit 345191
 * virtual memory ranges retained by each arena.
Packit 345191
 */
Packit 345191
static extent_t *
Packit 345191
extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment,
Packit 345191
    bool slab, szind_t szind, bool *zero, bool *commit) {
Packit 345191
	malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx);
Packit 345191
	assert(pad == 0 || !slab);
Packit 345191
	assert(!*zero || !slab);
Packit 345191
Packit 345191
	size_t esize = size + pad;
Packit 345191
	size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE;
Packit 345191
	/* Beware size_t wrap-around. */
Packit 345191
	if (alloc_size_min < esize) {
Packit 345191
		goto label_err;
Packit 345191
	}
Packit 345191
	/*
Packit 345191
	 * Find the next extent size in the series that would be large enough to
Packit 345191
	 * satisfy this request.
Packit 345191
	 */
Packit 345191
	pszind_t egn_skip = 0;
Packit 345191
	size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
Packit 345191
	while (alloc_size < alloc_size_min) {
Packit 345191
		egn_skip++;
Packit 345191
		if (arena->extent_grow_next + egn_skip >=
Packit 345191
		    sz_psz2ind(SC_LARGE_MAXCLASS)) {
Packit 345191
			/* Outside legal range. */
Packit 345191
			goto label_err;
Packit 345191
		}
Packit 345191
		alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
Packit 345191
	}
Packit 345191
Packit 345191
	extent_t *extent = extent_alloc(tsdn, arena);
Packit 345191
	if (extent == NULL) {
Packit 345191
		goto label_err;
Packit 345191
	}
Packit 345191
	bool zeroed = false;
Packit 345191
	bool committed = false;
Packit 345191
Packit 345191
	void *ptr;
Packit 345191
	if (*r_extent_hooks == &extent_hooks_default) {
Packit 345191
		ptr = extent_alloc_default_impl(tsdn, arena, NULL,
Packit 345191
		    alloc_size, PAGE, &zeroed, &committed);
Packit 345191
	} else {
Packit 345191
		extent_hook_pre_reentrancy(tsdn, arena);
Packit 345191
		ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL,
Packit 345191
		    alloc_size, PAGE, &zeroed, &committed,
Packit 345191
		    arena_ind_get(arena));
Packit 345191
		extent_hook_post_reentrancy(tsdn);
Packit 345191
	}
Packit 345191
Packit 345191
	extent_init(extent, arena, ptr, alloc_size, false, SC_NSIZES,
Packit 345191
	    arena_extent_sn_next(arena), extent_state_active, zeroed,
Packit 345191
	    committed, true, EXTENT_IS_HEAD);
Packit 345191
	if (ptr == NULL) {
Packit 345191
		extent_dalloc(tsdn, arena, extent);
Packit 345191
		goto label_err;
Packit 345191
	}
Packit 345191
Packit 345191
	if (extent_register_no_gdump_add(tsdn, extent)) {
Packit 345191
		extent_dalloc(tsdn, arena, extent);
Packit 345191
		goto label_err;
Packit 345191
	}
Packit 345191
Packit 345191
	if (extent_zeroed_get(extent) && extent_committed_get(extent)) {
Packit 345191
		*zero = true;
Packit 345191
	}
Packit 345191
	if (extent_committed_get(extent)) {
Packit 345191
		*commit = true;
Packit 345191
	}
Packit 345191
Packit 345191
	rtree_ctx_t rtree_ctx_fallback;
Packit 345191
	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
Packit 345191
Packit 345191
	extent_t *lead;
Packit 345191
	extent_t *trail;
Packit 345191
	extent_t *to_leak;
Packit 345191
	extent_t *to_salvage;
Packit 345191
	extent_split_interior_result_t result = extent_split_interior(
Packit 345191
	    tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
Packit 345191
	    &to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind,
Packit 345191
	    true);
Packit 345191
Packit 345191
	if (result == extent_split_interior_ok) {
Packit 345191
		if (lead != NULL) {
Packit 345191
			extent_record(tsdn, arena, r_extent_hooks,
Packit 345191
			    &arena->extents_retained, lead, true);
Packit 345191
		}
Packit 345191
		if (trail != NULL) {
Packit 345191
			extent_record(tsdn, arena, r_extent_hooks,
Packit 345191
			    &arena->extents_retained, trail, true);
Packit 345191
		}
Packit 345191
	} else {
Packit 345191
		/*
Packit 345191
		 * We should have allocated a sufficiently large extent; the
Packit 345191
		 * cant_alloc case should not occur.
Packit 345191
		 */
Packit 345191
		assert(result == extent_split_interior_error);
Packit 345191
		if (to_salvage != NULL) {
Packit 345191
			if (config_prof) {
Packit 345191
				extent_gdump_add(tsdn, to_salvage);
Packit 345191
			}
Packit 345191
			extent_record(tsdn, arena, r_extent_hooks,
Packit 345191
			    &arena->extents_retained, to_salvage, true);
Packit 345191
		}
Packit 345191
		if (to_leak != NULL) {
Packit 345191
			extent_deregister_no_gdump_sub(tsdn, to_leak);
Packit 345191
			extents_abandon_vm(tsdn, arena, r_extent_hooks,
Packit 345191
			    &arena->extents_retained, to_leak, true);
Packit 345191
		}
Packit 345191
		goto label_err;
Packit 345191
	}
Packit 345191
Packit 345191
	if (*commit && !extent_committed_get(extent)) {
Packit 345191
		if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0,
Packit 345191
		    extent_size_get(extent), true)) {
Packit 345191
			extent_record(tsdn, arena, r_extent_hooks,
Packit 345191
			    &arena->extents_retained, extent, true);
Packit 345191
			goto label_err;
Packit 345191
		}
Packit 345191
		if (!extent_need_manual_zero(arena)) {
Packit 345191
			extent_zeroed_set(extent, true);
Packit 345191
		}
Packit 345191
	}
Packit 345191
Packit 345191
	/*
Packit 345191
	 * Increment extent_grow_next if doing so wouldn't exceed the allowed
Packit 345191
	 * range.
Packit 345191
	 */
Packit 345191
	if (arena->extent_grow_next + egn_skip + 1 <=
Packit 345191
	    arena->retain_grow_limit) {
Packit 345191
		arena->extent_grow_next += egn_skip + 1;
Packit 345191
	} else {
Packit 345191
		arena->extent_grow_next = arena->retain_grow_limit;
Packit 345191
	}
Packit 345191
	/* All opportunities for failure are past. */
Packit 345191
	malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
Packit 345191
Packit 345191
	if (config_prof) {
Packit 345191
		/* Adjust gdump stats now that extent is final size. */
Packit 345191
		extent_gdump_add(tsdn, extent);
Packit 345191
	}
Packit 345191
	if (pad != 0) {
Packit 345191
		extent_addr_randomize(tsdn, extent, alignment);
Packit 345191
	}
Packit 345191
	if (slab) {
Packit 345191
		rtree_ctx_t rtree_ctx_fallback;
Packit 345191
		rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
Packit 345191
		    &rtree_ctx_fallback);
Packit 345191
Packit 345191
		extent_slab_set(extent, true);
Packit 345191
		extent_interior_register(tsdn, rtree_ctx, extent, szind);
Packit 345191
	}
Packit 345191
	if (*zero && !extent_zeroed_get(extent)) {
Packit 345191
		void *addr = extent_base_get(extent);
Packit 345191
		size_t size = extent_size_get(extent);
Packit 345191
		if (extent_need_manual_zero(arena) ||
Packit 345191
		    pages_purge_forced(addr, size)) {
Packit 345191
			memset(addr, 0, size);
Packit 345191
		}
Packit 345191
	}
Packit 345191
Packit 345191
	return extent;
Packit 345191
label_err:
Packit 345191
	malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
Packit 345191
	return NULL;
Packit 345191
}
Packit 345191
Packit 345191
static extent_t *
Packit 345191
extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
Packit 345191
    size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
Packit 345191
	assert(size != 0);
Packit 345191
	assert(alignment != 0);
Packit 345191
Packit 345191
	malloc_mutex_lock(tsdn, &arena->extent_grow_mtx);
Packit 345191
Packit 345191
	extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks,
Packit 345191
	    &arena->extents_retained, new_addr, size, pad, alignment, slab,
Packit 345191
	    szind, zero, commit, true);
Packit 345191
	if (extent != NULL) {
Packit 345191
		malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
Packit 345191
		if (config_prof) {
Packit 345191
			extent_gdump_add(tsdn, extent);
Packit 345191
		}
Packit 345191
	} else if (opt_retain && new_addr == NULL) {
Packit 345191
		extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size,
Packit 345191
		    pad, alignment, slab, szind, zero, commit);
Packit 345191
		/* extent_grow_retained() always releases extent_grow_mtx. */
Packit 345191
	} else {
Packit 345191
		malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
Packit 345191
	}
Packit 345191
	malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx);
Packit 345191
Packit 345191
	return extent;
Packit 345191
}
Packit 345191
Packit 345191
static extent_t *
Packit 345191
extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
Packit 345191
    size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
Packit 345191
	size_t esize = size + pad;
Packit 345191
	extent_t *extent = extent_alloc(tsdn, arena);
Packit 345191
	if (extent == NULL) {
Packit 345191
		return NULL;
Packit 345191
	}
Packit 345191
	void *addr;
Packit 345191
	size_t palignment = ALIGNMENT_CEILING(alignment, PAGE);
Packit 345191
	if (*r_extent_hooks == &extent_hooks_default) {
Packit 345191
		/* Call directly to propagate tsdn. */
Packit 345191
		addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize,
Packit 345191
		    palignment, zero, commit);
Packit 345191
	} else {
Packit 345191
		extent_hook_pre_reentrancy(tsdn, arena);
Packit 345191
		addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr,
Packit 345191
		    esize, palignment, zero, commit, arena_ind_get(arena));
Packit 345191
		extent_hook_post_reentrancy(tsdn);
Packit 345191
	}
Packit 345191
	if (addr == NULL) {
Packit 345191
		extent_dalloc(tsdn, arena, extent);
Packit 345191
		return NULL;
Packit 345191
	}
Packit 345191
	extent_init(extent, arena, addr, esize, slab, szind,
Packit 345191
	    arena_extent_sn_next(arena), extent_state_active, *zero, *commit,
Packit 345191
	    true, EXTENT_NOT_HEAD);
Packit 345191
	if (pad != 0) {
Packit 345191
		extent_addr_randomize(tsdn, extent, alignment);
Packit 345191
	}
Packit 345191
	if (extent_register(tsdn, extent)) {
Packit 345191
		extent_dalloc(tsdn, arena, extent);
Packit 345191
		return NULL;
Packit 345191
	}
Packit 345191
Packit 345191
	return extent;
Packit 345191
}
Packit 345191
Packit 345191
extent_t *
Packit 345191
extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
Packit 345191
    size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
Packit 345191
	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
Packit 345191
	    WITNESS_RANK_CORE, 0);
Packit 345191
Packit 345191
	extent_hooks_assure_initialized(arena, r_extent_hooks);
Packit 345191
Packit 345191
	extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks,
Packit 345191
	    new_addr, size, pad, alignment, slab, szind, zero, commit);
Packit 345191
	if (extent == NULL) {
Packit 345191
		if (opt_retain && new_addr != NULL) {
Packit 345191
			/*
Packit 345191
			 * When retain is enabled and new_addr is set, we do not
Packit 345191
			 * attempt extent_alloc_wrapper_hard which does mmap
Packit 345191
			 * that is very unlikely to succeed (unless it happens
Packit 345191
			 * to be at the end).
Packit 345191
			 */
Packit 345191
			return NULL;
Packit 345191
		}
Packit 345191
		extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks,
Packit 345191
		    new_addr, size, pad, alignment, slab, szind, zero, commit);
Packit 345191
	}
Packit 345191
Packit 345191
	assert(extent == NULL || extent_dumpable_get(extent));
Packit 345191
	return extent;
Packit 345191
}
Packit 345191
Packit 345191
static bool
Packit 345191
extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner,
Packit 345191
    const extent_t *outer) {
Packit 345191
	assert(extent_arena_get(inner) == arena);
Packit 345191
	if (extent_arena_get(outer) != arena) {
Packit 345191
		return false;
Packit 345191
	}
Packit 345191
Packit 345191
	assert(extent_state_get(inner) == extent_state_active);
Packit 345191
	if (extent_state_get(outer) != extents->state) {
Packit 345191
		return false;
Packit 345191
	}
Packit 345191
Packit 345191
	if (extent_committed_get(inner) != extent_committed_get(outer)) {
Packit 345191
		return false;
Packit 345191
	}
Packit 345191
Packit 345191
	return true;
Packit 345191
}
Packit 345191
Packit 345191
static bool
Packit 345191
extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
Packit 345191
    extents_t *extents, extent_t *inner, extent_t *outer, bool forward,
Packit 345191
    bool growing_retained) {
Packit 345191
	assert(extent_can_coalesce(arena, extents, inner, outer));
Packit 345191
Packit 345191
	extent_activate_locked(tsdn, arena, extents, outer);
Packit 345191
Packit 345191
	malloc_mutex_unlock(tsdn, &extents->mtx);
Packit 345191
	bool err = extent_merge_impl(tsdn, arena, r_extent_hooks,
Packit 345191
	    forward ? inner : outer, forward ? outer : inner, growing_retained);
Packit 345191
	malloc_mutex_lock(tsdn, &extents->mtx);
Packit 345191
Packit 345191
	if (err) {
Packit 345191
		extent_deactivate_locked(tsdn, arena, extents, outer);
Packit 345191
	}
Packit 345191
Packit 345191
	return err;
Packit 345191
}
Packit 345191
Packit 345191
static extent_t *
Packit 345191
extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
Packit 345191
    extent_t *extent, bool *coalesced, bool growing_retained,
Packit 345191
    bool inactive_only) {
Packit 345191
	/*
Packit 345191
	 * We avoid checking / locking inactive neighbors for large size
Packit 345191
	 * classes, since they are eagerly coalesced on deallocation which can
Packit 345191
	 * cause lock contention.
Packit 345191
	 */
Packit 345191
	/*
Packit 345191
	 * Continue attempting to coalesce until failure, to protect against
Packit 345191
	 * races with other threads that are thwarted by this one.
Packit 345191
	 */
Packit 345191
	bool again;
Packit 345191
	do {
Packit 345191
		again = false;
Packit 345191
Packit 345191
		/* Try to coalesce forward. */
Packit 345191
		extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx,
Packit 345191
		    extent_past_get(extent), inactive_only);
Packit 345191
		if (next != NULL) {
Packit 345191
			/*
Packit 345191
			 * extents->mtx only protects against races for
Packit 345191
			 * like-state extents, so call extent_can_coalesce()
Packit 345191
			 * before releasing next's pool lock.
Packit 345191
			 */
Packit 345191
			bool can_coalesce = extent_can_coalesce(arena, extents,
Packit 345191
			    extent, next);
Packit 345191
Packit 345191
			extent_unlock(tsdn, next);
Packit 345191
Packit 345191
			if (can_coalesce && !extent_coalesce(tsdn, arena,
Packit 345191
			    r_extent_hooks, extents, extent, next, true,
Packit 345191
			    growing_retained)) {
Packit 345191
				if (extents->delay_coalesce) {
Packit 345191
					/* Do minimal coalescing. */
Packit 345191
					*coalesced = true;
Packit 345191
					return extent;
Packit 345191
				}
Packit 345191
				again = true;
Packit 345191
			}
Packit 345191
		}
Packit 345191
Packit 345191
		/* Try to coalesce backward. */
Packit 345191
		extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx,
Packit 345191
		    extent_before_get(extent), inactive_only);
Packit 345191
		if (prev != NULL) {
Packit 345191
			bool can_coalesce = extent_can_coalesce(arena, extents,
Packit 345191
			    extent, prev);
Packit 345191
			extent_unlock(tsdn, prev);
Packit 345191
Packit 345191
			if (can_coalesce && !extent_coalesce(tsdn, arena,
Packit 345191
			    r_extent_hooks, extents, extent, prev, false,
Packit 345191
			    growing_retained)) {
Packit 345191
				extent = prev;
Packit 345191
				if (extents->delay_coalesce) {
Packit 345191
					/* Do minimal coalescing. */
Packit 345191
					*coalesced = true;
Packit 345191
					return extent;
Packit 345191
				}
Packit 345191
				again = true;
Packit 345191
			}
Packit 345191
		}
Packit 345191
	} while (again);
Packit 345191
Packit 345191
	if (extents->delay_coalesce) {
Packit 345191
		*coalesced = false;
Packit 345191
	}
Packit 345191
	return extent;
Packit 345191
}
Packit 345191
Packit 345191
static extent_t *
Packit 345191
extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
Packit 345191
    extent_t *extent, bool *coalesced, bool growing_retained) {
Packit 345191
	return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx,
Packit 345191
	    extents, extent, coalesced, growing_retained, false);
Packit 345191
}
Packit 345191
Packit 345191
static extent_t *
Packit 345191
extent_try_coalesce_large(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
Packit 345191
    extent_t *extent, bool *coalesced, bool growing_retained) {
Packit 345191
	return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx,
Packit 345191
	    extents, extent, coalesced, growing_retained, true);
Packit 345191
}
Packit 345191
Packit 345191
/*
Packit 345191
 * Does the metadata management portions of putting an unused extent into the
Packit 345191
 * given extents_t (coalesces, deregisters slab interiors, the heap operations).
Packit 345191
 */
Packit 345191
static void
Packit 345191
extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
Packit 345191
    extents_t *extents, extent_t *extent, bool growing_retained) {
Packit 345191
	rtree_ctx_t rtree_ctx_fallback;
Packit 345191
	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
Packit 345191
Packit 345191
	assert((extents_state_get(extents) != extent_state_dirty &&
Packit 345191
	    extents_state_get(extents) != extent_state_muzzy) ||
Packit 345191
	    !extent_zeroed_get(extent));
Packit 345191
Packit 345191
	malloc_mutex_lock(tsdn, &extents->mtx);
Packit 345191
	extent_hooks_assure_initialized(arena, r_extent_hooks);
Packit 345191
Packit 345191
	extent_szind_set(extent, SC_NSIZES);
Packit 345191
	if (extent_slab_get(extent)) {
Packit 345191
		extent_interior_deregister(tsdn, rtree_ctx, extent);
Packit 345191
		extent_slab_set(extent, false);
Packit 345191
	}
Packit 345191
Packit 345191
	assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
Packit 345191
	    (uintptr_t)extent_base_get(extent), true) == extent);
Packit 345191
Packit 345191
	if (!extents->delay_coalesce) {
Packit 345191
		extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
Packit 345191
		    rtree_ctx, extents, extent, NULL, growing_retained);
Packit 345191
	} else if (extent_size_get(extent) >= SC_LARGE_MINCLASS) {
Packit 345191
		assert(extents == &arena->extents_dirty);
Packit 345191
		/* Always coalesce large extents eagerly. */
Packit 345191
		bool coalesced;
Packit 345191
		do {
Packit 345191
			assert(extent_state_get(extent) == extent_state_active);
Packit 345191
			extent = extent_try_coalesce_large(tsdn, arena,
Packit 345191
			    r_extent_hooks, rtree_ctx, extents, extent,
Packit 345191
			    &coalesced, growing_retained);
Packit 345191
		} while (coalesced);
Packit 345191
		if (extent_size_get(extent) >= oversize_threshold) {
Packit 345191
			/* Shortcut to purge the oversize extent eagerly. */
Packit 345191
			malloc_mutex_unlock(tsdn, &extents->mtx);
Packit 345191
			arena_decay_extent(tsdn, arena, r_extent_hooks, extent);
Packit 345191
			return;
Packit 345191
		}
Packit 345191
	}
Packit 345191
	extent_deactivate_locked(tsdn, arena, extents, extent);
Packit 345191
Packit 345191
	malloc_mutex_unlock(tsdn, &extents->mtx);
Packit 345191
}
Packit 345191
Packit 345191
void
Packit 345191
extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
Packit 345191
	extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
Packit 345191
Packit 345191
	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
Packit 345191
	    WITNESS_RANK_CORE, 0);
Packit 345191
Packit 345191
	if (extent_register(tsdn, extent)) {
Packit 345191
		extent_dalloc(tsdn, arena, extent);
Packit 345191
		return;
Packit 345191
	}
Packit 345191
	extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent);
Packit 345191
}
Packit 345191
Packit 345191
static bool
Packit 345191
extent_may_dalloc(void) {
Packit 345191
	/* With retain enabled, the default dalloc always fails. */
Packit 345191
	return !opt_retain;
Packit 345191
}
Packit 345191
Packit 345191
static bool
Packit 345191
extent_dalloc_default_impl(void *addr, size_t size) {
Packit 345191
	if (!have_dss || !extent_in_dss(addr)) {
Packit 345191
		return extent_dalloc_mmap(addr, size);
Packit 345191
	}
Packit 345191
	return true;
Packit 345191
}
Packit 345191
Packit 345191
static bool
Packit 345191
extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
Packit 345191
    bool committed, unsigned arena_ind) {
Packit 345191
	return extent_dalloc_default_impl(addr, size);
Packit 345191
}
Packit 345191
Packit 345191
static bool
Packit 345191
extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, extent_t *extent) {
Packit 345191
	bool err;
Packit 345191
Packit 345191
	assert(extent_base_get(extent) != NULL);
Packit 345191
	assert(extent_size_get(extent) != 0);
Packit 345191
	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
Packit 345191
	    WITNESS_RANK_CORE, 0);
Packit 345191
Packit 345191
	extent_addr_set(extent, extent_base_get(extent));
Packit 345191
Packit 345191
	extent_hooks_assure_initialized(arena, r_extent_hooks);
Packit 345191
	/* Try to deallocate. */
Packit 345191
	if (*r_extent_hooks == &extent_hooks_default) {
Packit 345191
		/* Call directly to propagate tsdn. */
Packit 345191
		err = extent_dalloc_default_impl(extent_base_get(extent),
Packit 345191
		    extent_size_get(extent));
Packit 345191
	} else {
Packit 345191
		extent_hook_pre_reentrancy(tsdn, arena);
Packit 345191
		err = ((*r_extent_hooks)->dalloc == NULL ||
Packit 345191
		    (*r_extent_hooks)->dalloc(*r_extent_hooks,
Packit 345191
		    extent_base_get(extent), extent_size_get(extent),
Packit 345191
		    extent_committed_get(extent), arena_ind_get(arena)));
Packit 345191
		extent_hook_post_reentrancy(tsdn);
Packit 345191
	}
Packit 345191
Packit 345191
	if (!err) {
Packit 345191
		extent_dalloc(tsdn, arena, extent);
Packit 345191
	}
Packit 345191
Packit 345191
	return err;
Packit 345191
}
Packit 345191
Packit 345191
void
Packit 345191
extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, extent_t *extent) {
Packit 345191
	assert(extent_dumpable_get(extent));
Packit 345191
	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
Packit 345191
	    WITNESS_RANK_CORE, 0);
Packit 345191
Packit 345191
	/* Avoid calling the default extent_dalloc unless have to. */
Packit 345191
	if (*r_extent_hooks != &extent_hooks_default || extent_may_dalloc()) {
Packit 345191
		/*
Packit 345191
		 * Deregister first to avoid a race with other allocating
Packit 345191
		 * threads, and reregister if deallocation fails.
Packit 345191
		 */
Packit 345191
		extent_deregister(tsdn, extent);
Packit 345191
		if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks,
Packit 345191
		    extent)) {
Packit 345191
			return;
Packit 345191
		}
Packit 345191
		extent_reregister(tsdn, extent);
Packit 345191
	}
Packit 345191
Packit 345191
	if (*r_extent_hooks != &extent_hooks_default) {
Packit 345191
		extent_hook_pre_reentrancy(tsdn, arena);
Packit 345191
	}
Packit 345191
	/* Try to decommit; purge if that fails. */
Packit 345191
	bool zeroed;
Packit 345191
	if (!extent_committed_get(extent)) {
Packit 345191
		zeroed = true;
Packit 345191
	} else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent,
Packit 345191
	    0, extent_size_get(extent))) {
Packit 345191
		zeroed = true;
Packit 345191
	} else if ((*r_extent_hooks)->purge_forced != NULL &&
Packit 345191
	    !(*r_extent_hooks)->purge_forced(*r_extent_hooks,
Packit 345191
	    extent_base_get(extent), extent_size_get(extent), 0,
Packit 345191
	    extent_size_get(extent), arena_ind_get(arena))) {
Packit 345191
		zeroed = true;
Packit 345191
	} else if (extent_state_get(extent) == extent_state_muzzy ||
Packit 345191
	    ((*r_extent_hooks)->purge_lazy != NULL &&
Packit 345191
	    !(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
Packit 345191
	    extent_base_get(extent), extent_size_get(extent), 0,
Packit 345191
	    extent_size_get(extent), arena_ind_get(arena)))) {
Packit 345191
		zeroed = false;
Packit 345191
	} else {
Packit 345191
		zeroed = false;
Packit 345191
	}
Packit 345191
	if (*r_extent_hooks != &extent_hooks_default) {
Packit 345191
		extent_hook_post_reentrancy(tsdn);
Packit 345191
	}
Packit 345191
	extent_zeroed_set(extent, zeroed);
Packit 345191
Packit 345191
	if (config_prof) {
Packit 345191
		extent_gdump_sub(tsdn, extent);
Packit 345191
	}
Packit 345191
Packit 345191
	extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained,
Packit 345191
	    extent, false);
Packit 345191
}
Packit 345191
Packit 345191
static void
Packit 345191
extent_destroy_default_impl(void *addr, size_t size) {
Packit 345191
	if (!have_dss || !extent_in_dss(addr)) {
Packit 345191
		pages_unmap(addr, size);
Packit 345191
	}
Packit 345191
}
Packit 345191
Packit 345191
static void
Packit 345191
extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
Packit 345191
    bool committed, unsigned arena_ind) {
Packit 345191
	extent_destroy_default_impl(addr, size);
Packit 345191
}
Packit 345191
Packit 345191
void
Packit 345191
extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, extent_t *extent) {
Packit 345191
	assert(extent_base_get(extent) != NULL);
Packit 345191
	assert(extent_size_get(extent) != 0);
Packit 345191
	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
Packit 345191
	    WITNESS_RANK_CORE, 0);
Packit 345191
Packit 345191
	/* Deregister first to avoid a race with other allocating threads. */
Packit 345191
	extent_deregister(tsdn, extent);
Packit 345191
Packit 345191
	extent_addr_set(extent, extent_base_get(extent));
Packit 345191
Packit 345191
	extent_hooks_assure_initialized(arena, r_extent_hooks);
Packit 345191
	/* Try to destroy; silently fail otherwise. */
Packit 345191
	if (*r_extent_hooks == &extent_hooks_default) {
Packit 345191
		/* Call directly to propagate tsdn. */
Packit 345191
		extent_destroy_default_impl(extent_base_get(extent),
Packit 345191
		    extent_size_get(extent));
Packit 345191
	} else if ((*r_extent_hooks)->destroy != NULL) {
Packit 345191
		extent_hook_pre_reentrancy(tsdn, arena);
Packit 345191
		(*r_extent_hooks)->destroy(*r_extent_hooks,
Packit 345191
		    extent_base_get(extent), extent_size_get(extent),
Packit 345191
		    extent_committed_get(extent), arena_ind_get(arena));
Packit 345191
		extent_hook_post_reentrancy(tsdn);
Packit 345191
	}
Packit 345191
Packit 345191
	extent_dalloc(tsdn, arena, extent);
Packit 345191
}
Packit 345191
Packit 345191
static bool
Packit 345191
extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
Packit 345191
    size_t offset, size_t length, unsigned arena_ind) {
Packit 345191
	return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
Packit 345191
	    length);
Packit 345191
}
Packit 345191
Packit 345191
static bool
Packit 345191
extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
Packit 345191
    size_t length, bool growing_retained) {
Packit 345191
	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
Packit 345191
	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
Packit 345191
Packit 345191
	extent_hooks_assure_initialized(arena, r_extent_hooks);
Packit 345191
	if (*r_extent_hooks != &extent_hooks_default) {
Packit 345191
		extent_hook_pre_reentrancy(tsdn, arena);
Packit 345191
	}
Packit 345191
	bool err = ((*r_extent_hooks)->commit == NULL ||
Packit 345191
	    (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent),
Packit 345191
	    extent_size_get(extent), offset, length, arena_ind_get(arena)));
Packit 345191
	if (*r_extent_hooks != &extent_hooks_default) {
Packit 345191
		extent_hook_post_reentrancy(tsdn);
Packit 345191
	}
Packit 345191
	extent_committed_set(extent, extent_committed_get(extent) || !err);
Packit 345191
	return err;
Packit 345191
}
Packit 345191
Packit 345191
bool
Packit 345191
extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
Packit 345191
    size_t length) {
Packit 345191
	return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset,
Packit 345191
	    length, false);
Packit 345191
}
Packit 345191
Packit 345191
static bool
Packit 345191
extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
Packit 345191
    size_t offset, size_t length, unsigned arena_ind) {
Packit 345191
	return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
Packit 345191
	    length);
Packit 345191
}
Packit 345191
Packit 345191
bool
Packit 345191
extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
Packit 345191
    size_t length) {
Packit 345191
	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
Packit 345191
	    WITNESS_RANK_CORE, 0);
Packit 345191
Packit 345191
	extent_hooks_assure_initialized(arena, r_extent_hooks);
Packit 345191
Packit 345191
	if (*r_extent_hooks != &extent_hooks_default) {
Packit 345191
		extent_hook_pre_reentrancy(tsdn, arena);
Packit 345191
	}
Packit 345191
	bool err = ((*r_extent_hooks)->decommit == NULL ||
Packit 345191
	    (*r_extent_hooks)->decommit(*r_extent_hooks,
Packit 345191
	    extent_base_get(extent), extent_size_get(extent), offset, length,
Packit 345191
	    arena_ind_get(arena)));
Packit 345191
	if (*r_extent_hooks != &extent_hooks_default) {
Packit 345191
		extent_hook_post_reentrancy(tsdn);
Packit 345191
	}
Packit 345191
	extent_committed_set(extent, extent_committed_get(extent) && err);
Packit 345191
	return err;
Packit 345191
}
Packit 345191
Packit 345191
#ifdef PAGES_CAN_PURGE_LAZY
Packit 345191
static bool
Packit 345191
extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
Packit 345191
    size_t offset, size_t length, unsigned arena_ind) {
Packit 345191
	assert(addr != NULL);
Packit 345191
	assert((offset & PAGE_MASK) == 0);
Packit 345191
	assert(length != 0);
Packit 345191
	assert((length & PAGE_MASK) == 0);
Packit 345191
Packit 345191
	return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
Packit 345191
	    length);
Packit 345191
}
Packit 345191
#endif
Packit 345191
Packit 345191
static bool
Packit 345191
extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
Packit 345191
    size_t length, bool growing_retained) {
Packit 345191
	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
Packit 345191
	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
Packit 345191
Packit 345191
	extent_hooks_assure_initialized(arena, r_extent_hooks);
Packit 345191
Packit 345191
	if ((*r_extent_hooks)->purge_lazy == NULL) {
Packit 345191
		return true;
Packit 345191
	}
Packit 345191
	if (*r_extent_hooks != &extent_hooks_default) {
Packit 345191
		extent_hook_pre_reentrancy(tsdn, arena);
Packit 345191
	}
Packit 345191
	bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks,
Packit 345191
	    extent_base_get(extent), extent_size_get(extent), offset, length,
Packit 345191
	    arena_ind_get(arena));
Packit 345191
	if (*r_extent_hooks != &extent_hooks_default) {
Packit 345191
		extent_hook_post_reentrancy(tsdn);
Packit 345191
	}
Packit 345191
Packit 345191
	return err;
Packit 345191
}
Packit 345191
Packit 345191
bool
Packit 345191
extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
Packit 345191
    size_t length) {
Packit 345191
	return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent,
Packit 345191
	    offset, length, false);
Packit 345191
}
Packit 345191
Packit 345191
#ifdef PAGES_CAN_PURGE_FORCED
Packit 345191
static bool
Packit 345191
extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr,
Packit 345191
    size_t size, size_t offset, size_t length, unsigned arena_ind) {
Packit 345191
	assert(addr != NULL);
Packit 345191
	assert((offset & PAGE_MASK) == 0);
Packit 345191
	assert(length != 0);
Packit 345191
	assert((length & PAGE_MASK) == 0);
Packit 345191
Packit 345191
	return pages_purge_forced((void *)((uintptr_t)addr +
Packit 345191
	    (uintptr_t)offset), length);
Packit 345191
}
Packit 345191
#endif
Packit 345191
Packit 345191
static bool
Packit 345191
extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
Packit 345191
    size_t length, bool growing_retained) {
Packit 345191
	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
Packit 345191
	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
Packit 345191
Packit 345191
	extent_hooks_assure_initialized(arena, r_extent_hooks);
Packit 345191
Packit 345191
	if ((*r_extent_hooks)->purge_forced == NULL) {
Packit 345191
		return true;
Packit 345191
	}
Packit 345191
	if (*r_extent_hooks != &extent_hooks_default) {
Packit 345191
		extent_hook_pre_reentrancy(tsdn, arena);
Packit 345191
	}
Packit 345191
	bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks,
Packit 345191
	    extent_base_get(extent), extent_size_get(extent), offset, length,
Packit 345191
	    arena_ind_get(arena));
Packit 345191
	if (*r_extent_hooks != &extent_hooks_default) {
Packit 345191
		extent_hook_post_reentrancy(tsdn);
Packit 345191
	}
Packit 345191
	return err;
Packit 345191
}
Packit 345191
Packit 345191
bool
Packit 345191
extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
Packit 345191
    size_t length) {
Packit 345191
	return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent,
Packit 345191
	    offset, length, false);
Packit 345191
}
Packit 345191
Packit 345191
static bool
Packit 345191
extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
Packit 345191
    size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
Packit 345191
	if (!maps_coalesce) {
Packit 345191
		/*
Packit 345191
		 * Without retain, only whole regions can be purged (required by
Packit 345191
		 * MEM_RELEASE on Windows) -- therefore disallow splitting.  See
Packit 345191
		 * comments in extent_head_no_merge().
Packit 345191
		 */
Packit 345191
		return !opt_retain;
Packit 345191
	}
Packit 345191
Packit 345191
	return false;
Packit 345191
}
Packit 345191
Packit 345191
/*
Packit 345191
 * Accepts the extent to split, and the characteristics of each side of the
Packit 345191
 * split.  The 'a' parameters go with the 'lead' of the resulting pair of
Packit 345191
 * extents (the lower addressed portion of the split), and the 'b' parameters go
Packit 345191
 * with the trail (the higher addressed portion).  This makes 'extent' the lead,
Packit 345191
 * and returns the trail (except in case of error).
Packit 345191
 */
Packit 345191
static extent_t *
Packit 345191
extent_split_impl(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
Packit 345191
    szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
Packit 345191
    bool growing_retained) {
Packit 345191
	assert(extent_size_get(extent) == size_a + size_b);
Packit 345191
	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
Packit 345191
	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
Packit 345191
Packit 345191
	extent_hooks_assure_initialized(arena, r_extent_hooks);
Packit 345191
Packit 345191
	if ((*r_extent_hooks)->split == NULL) {
Packit 345191
		return NULL;
Packit 345191
	}
Packit 345191
Packit 345191
	extent_t *trail = extent_alloc(tsdn, arena);
Packit 345191
	if (trail == NULL) {
Packit 345191
		goto label_error_a;
Packit 345191
	}
Packit 345191
Packit 345191
	extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
Packit 345191
	    size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
Packit 345191
	    extent_state_get(extent), extent_zeroed_get(extent),
Packit 345191
	    extent_committed_get(extent), extent_dumpable_get(extent),
Packit 345191
	    EXTENT_NOT_HEAD);
Packit 345191
Packit 345191
	rtree_ctx_t rtree_ctx_fallback;
Packit 345191
	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
Packit 345191
	rtree_leaf_elm_t *lead_elm_a, *lead_elm_b;
Packit 345191
	{
Packit 345191
		extent_t lead;
Packit 345191
Packit 345191
		extent_init(&lead, arena, extent_addr_get(extent), size_a,
Packit 345191
		    slab_a, szind_a, extent_sn_get(extent),
Packit 345191
		    extent_state_get(extent), extent_zeroed_get(extent),
Packit 345191
		    extent_committed_get(extent), extent_dumpable_get(extent),
Packit 345191
		    EXTENT_NOT_HEAD);
Packit 345191
Packit 345191
		extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
Packit 345191
		    true, &lead_elm_a, &lead_elm_b);
Packit 345191
	}
Packit 345191
	rtree_leaf_elm_t *trail_elm_a, *trail_elm_b;
Packit 345191
	extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true,
Packit 345191
	    &trail_elm_a, &trail_elm_b);
Packit 345191
Packit 345191
	if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL
Packit 345191
	    || trail_elm_b == NULL) {
Packit 345191
		goto label_error_b;
Packit 345191
	}
Packit 345191
Packit 345191
	extent_lock2(tsdn, extent, trail);
Packit 345191
Packit 345191
	if (*r_extent_hooks != &extent_hooks_default) {
Packit 345191
		extent_hook_pre_reentrancy(tsdn, arena);
Packit 345191
	}
Packit 345191
	bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent),
Packit 345191
	    size_a + size_b, size_a, size_b, extent_committed_get(extent),
Packit 345191
	    arena_ind_get(arena));
Packit 345191
	if (*r_extent_hooks != &extent_hooks_default) {
Packit 345191
		extent_hook_post_reentrancy(tsdn);
Packit 345191
	}
Packit 345191
	if (err) {
Packit 345191
		goto label_error_c;
Packit 345191
	}
Packit 345191
Packit 345191
	extent_size_set(extent, size_a);
Packit 345191
	extent_szind_set(extent, szind_a);
Packit 345191
Packit 345191
	extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent,
Packit 345191
	    szind_a, slab_a);
Packit 345191
	extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail,
Packit 345191
	    szind_b, slab_b);
Packit 345191
Packit 345191
	extent_unlock2(tsdn, extent, trail);
Packit 345191
Packit 345191
	return trail;
Packit 345191
label_error_c:
Packit 345191
	extent_unlock2(tsdn, extent, trail);
Packit 345191
label_error_b:
Packit 345191
	extent_dalloc(tsdn, arena, trail);
Packit 345191
label_error_a:
Packit 345191
	return NULL;
Packit 345191
}
Packit 345191
Packit 345191
extent_t *
Packit 345191
extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
Packit 345191
    szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) {
Packit 345191
	return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a,
Packit 345191
	    szind_a, slab_a, size_b, szind_b, slab_b, false);
Packit 345191
}
Packit 345191
Packit 345191
static bool
Packit 345191
extent_merge_default_impl(void *addr_a, void *addr_b) {
Packit 345191
	if (!maps_coalesce && !opt_retain) {
Packit 345191
		return true;
Packit 345191
	}
Packit 345191
	if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
Packit 345191
		return true;
Packit 345191
	}
Packit 345191
Packit 345191
	return false;
Packit 345191
}
Packit 345191
Packit 345191
/*
Packit 345191
 * Returns true if the given extents can't be merged because of their head bit
Packit 345191
 * settings.  Assumes the second extent has the higher address.
Packit 345191
 */
Packit 345191
static bool
Packit 345191
extent_head_no_merge(extent_t *a, extent_t *b) {
Packit 345191
	assert(extent_base_get(a) < extent_base_get(b));
Packit 345191
	/*
Packit 345191
	 * When coalesce is not always allowed (Windows), only merge extents
Packit 345191
	 * from the same VirtualAlloc region under opt.retain (in which case
Packit 345191
	 * MEM_DECOMMIT is utilized for purging).
Packit 345191
	 */
Packit 345191
	if (maps_coalesce) {
Packit 345191
		return false;
Packit 345191
	}
Packit 345191
	if (!opt_retain) {
Packit 345191
		return true;
Packit 345191
	}
Packit 345191
	/* If b is a head extent, disallow the cross-region merge. */
Packit 345191
	if (extent_is_head_get(b)) {
Packit 345191
		/*
Packit 345191
		 * Additionally, sn should not overflow with retain; sanity
Packit 345191
		 * check that different regions have unique sn.
Packit 345191
		 */
Packit 345191
		assert(extent_sn_comp(a, b) != 0);
Packit 345191
		return true;
Packit 345191
	}
Packit 345191
	assert(extent_sn_comp(a, b) == 0);
Packit 345191
Packit 345191
	return false;
Packit 345191
}
Packit 345191
Packit 345191
static bool
Packit 345191
extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
Packit 345191
    void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
Packit 345191
	if (!maps_coalesce) {
Packit 345191
		tsdn_t *tsdn = tsdn_fetch();
Packit 345191
		extent_t *a = iealloc(tsdn, addr_a);
Packit 345191
		extent_t *b = iealloc(tsdn, addr_b);
Packit 345191
		if (extent_head_no_merge(a, b)) {
Packit 345191
			return true;
Packit 345191
		}
Packit 345191
	}
Packit 345191
	return extent_merge_default_impl(addr_a, addr_b);
Packit 345191
}
Packit 345191
Packit 345191
static bool
Packit 345191
extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
Packit 345191
    bool growing_retained) {
Packit 345191
	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
Packit 345191
	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
Packit 345191
	assert(extent_base_get(a) < extent_base_get(b));
Packit 345191
Packit 345191
	extent_hooks_assure_initialized(arena, r_extent_hooks);
Packit 345191
Packit 345191
	if ((*r_extent_hooks)->merge == NULL || extent_head_no_merge(a, b)) {
Packit 345191
		return true;
Packit 345191
	}
Packit 345191
Packit 345191
	bool err;
Packit 345191
	if (*r_extent_hooks == &extent_hooks_default) {
Packit 345191
		/* Call directly to propagate tsdn. */
Packit 345191
		err = extent_merge_default_impl(extent_base_get(a),
Packit 345191
		    extent_base_get(b));
Packit 345191
	} else {
Packit 345191
		extent_hook_pre_reentrancy(tsdn, arena);
Packit 345191
		err = (*r_extent_hooks)->merge(*r_extent_hooks,
Packit 345191
		    extent_base_get(a), extent_size_get(a), extent_base_get(b),
Packit 345191
		    extent_size_get(b), extent_committed_get(a),
Packit 345191
		    arena_ind_get(arena));
Packit 345191
		extent_hook_post_reentrancy(tsdn);
Packit 345191
	}
Packit 345191
Packit 345191
	if (err) {
Packit 345191
		return true;
Packit 345191
	}
Packit 345191
Packit 345191
	/*
Packit 345191
	 * The rtree writes must happen while all the relevant elements are
Packit 345191
	 * owned, so the following code uses decomposed helper functions rather
Packit 345191
	 * than extent_{,de}register() to do things in the right order.
Packit 345191
	 */
Packit 345191
	rtree_ctx_t rtree_ctx_fallback;
Packit 345191
	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
Packit 345191
	rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
Packit 345191
	extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a,
Packit 345191
	    &a_elm_b);
Packit 345191
	extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a,
Packit 345191
	    &b_elm_b);
Packit 345191
Packit 345191
	extent_lock2(tsdn, a, b);
Packit 345191
Packit 345191
	if (a_elm_b != NULL) {
Packit 345191
		rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL,
Packit 345191
		    SC_NSIZES, false);
Packit 345191
	}
Packit 345191
	if (b_elm_b != NULL) {
Packit 345191
		rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL,
Packit 345191
		    SC_NSIZES, false);
Packit 345191
	} else {
Packit 345191
		b_elm_b = b_elm_a;
Packit 345191
	}
Packit 345191
Packit 345191
	extent_size_set(a, extent_size_get(a) + extent_size_get(b));
Packit 345191
	extent_szind_set(a, SC_NSIZES);
Packit 345191
	extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
Packit 345191
	    extent_sn_get(a) : extent_sn_get(b));
Packit 345191
	extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
Packit 345191
Packit 345191
	extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, SC_NSIZES,
Packit 345191
	    false);
Packit 345191
Packit 345191
	extent_unlock2(tsdn, a, b);
Packit 345191
Packit 345191
	extent_dalloc(tsdn, extent_arena_get(b), b);
Packit 345191
Packit 345191
	return false;
Packit 345191
}
Packit 345191
Packit 345191
bool
Packit 345191
extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
Packit 345191
    extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) {
Packit 345191
	return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false);
Packit 345191
}
Packit 345191
Packit 345191
bool
Packit 345191
extent_boot(void) {
Packit 345191
	if (rtree_new(&extents_rtree, true)) {
Packit 345191
		return true;
Packit 345191
	}
Packit 345191
Packit 345191
	if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool",
Packit 345191
	    WITNESS_RANK_EXTENT_POOL)) {
Packit 345191
		return true;
Packit 345191
	}
Packit 345191
Packit 345191
	if (have_dss) {
Packit 345191
		extent_dss_boot();
Packit 345191
	}
Packit 345191
Packit 345191
	return false;
Packit 345191
}
Packit 345191
Packit 345191
void
Packit 345191
extent_util_stats_get(tsdn_t *tsdn, const void *ptr,
Packit 345191
    size_t *nfree, size_t *nregs, size_t *size) {
Packit 345191
	assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL);
Packit 345191
Packit 345191
	const extent_t *extent = iealloc(tsdn, ptr);
Packit 345191
	if (unlikely(extent == NULL)) {
Packit 345191
		*nfree = *nregs = *size = 0;
Packit 345191
		return;
Packit 345191
	}
Packit 345191
Packit 345191
	*size = extent_size_get(extent);
Packit 345191
	if (!extent_slab_get(extent)) {
Packit 345191
		*nfree = 0;
Packit 345191
		*nregs = 1;
Packit 345191
	} else {
Packit 345191
		*nfree = extent_nfree_get(extent);
Packit 345191
		*nregs = bin_infos[extent_szind_get(extent)].nregs;
Packit 345191
		assert(*nfree <= *nregs);
Packit 345191
		assert(*nfree * extent_usize_get(extent) <= *size);
Packit 345191
	}
Packit 345191
}
Packit 345191
Packit 345191
void
Packit 345191
extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
Packit 345191
    size_t *nfree, size_t *nregs, size_t *size,
Packit 345191
    size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr) {
Packit 345191
	assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL
Packit 345191
	    && bin_nfree != NULL && bin_nregs != NULL && slabcur_addr != NULL);
Packit 345191
Packit 345191
	const extent_t *extent = iealloc(tsdn, ptr);
Packit 345191
	if (unlikely(extent == NULL)) {
Packit 345191
		*nfree = *nregs = *size = *bin_nfree = *bin_nregs = 0;
Packit 345191
		*slabcur_addr = NULL;
Packit 345191
		return;
Packit 345191
	}
Packit 345191
Packit 345191
	*size = extent_size_get(extent);
Packit 345191
	if (!extent_slab_get(extent)) {
Packit 345191
		*nfree = *bin_nfree = *bin_nregs = 0;
Packit 345191
		*nregs = 1;
Packit 345191
		*slabcur_addr = NULL;
Packit 345191
		return;
Packit 345191
	}
Packit 345191
Packit 345191
	*nfree = extent_nfree_get(extent);
Packit 345191
	const szind_t szind = extent_szind_get(extent);
Packit 345191
	*nregs = bin_infos[szind].nregs;
Packit 345191
	assert(*nfree <= *nregs);
Packit 345191
	assert(*nfree * extent_usize_get(extent) <= *size);
Packit 345191
Packit 345191
	const arena_t *arena = extent_arena_get(extent);
Packit 345191
	assert(arena != NULL);
Packit 345191
	const unsigned binshard = extent_binshard_get(extent);
Packit 345191
	bin_t *bin = &arena->bins[szind].bin_shards[binshard];
Packit 345191
Packit 345191
	malloc_mutex_lock(tsdn, &bin->lock);
Packit 345191
	if (config_stats) {
Packit 345191
		*bin_nregs = *nregs * bin->stats.curslabs;
Packit 345191
		assert(*bin_nregs >= bin->stats.curregs);
Packit 345191
		*bin_nfree = *bin_nregs - bin->stats.curregs;
Packit 345191
	} else {
Packit 345191
		*bin_nfree = *bin_nregs = 0;
Packit 345191
	}
Packit 345191
	extent_t *slab;
Packit 345191
	if (bin->slabcur != NULL) {
Packit 345191
		slab = bin->slabcur;
Packit 345191
	} else {
Packit 345191
		slab = extent_heap_first(&bin->slabs_nonfull);
Packit 345191
	}
Packit 345191
	*slabcur_addr = slab != NULL ? extent_addr_get(slab) : NULL;
Packit 345191
	malloc_mutex_unlock(tsdn, &bin->lock);
Packit 345191
}