Blame jemalloc/include/jemalloc/internal/arena_structs_b.h

Packit 345191
#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
Packit 345191
#define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
Packit 345191
Packit 345191
#include "jemalloc/internal/arena_stats.h"
Packit 345191
#include "jemalloc/internal/atomic.h"
Packit 345191
#include "jemalloc/internal/bin.h"
Packit 345191
#include "jemalloc/internal/bitmap.h"
Packit 345191
#include "jemalloc/internal/extent_dss.h"
Packit 345191
#include "jemalloc/internal/jemalloc_internal_types.h"
Packit 345191
#include "jemalloc/internal/mutex.h"
Packit 345191
#include "jemalloc/internal/nstime.h"
Packit 345191
#include "jemalloc/internal/ql.h"
Packit 345191
#include "jemalloc/internal/sc.h"
Packit 345191
#include "jemalloc/internal/smoothstep.h"
Packit 345191
#include "jemalloc/internal/ticker.h"
Packit 345191
Packit 345191
struct arena_decay_s {
Packit 345191
	/* Synchronizes all non-atomic fields. */
Packit 345191
	malloc_mutex_t		mtx;
Packit 345191
	/*
Packit 345191
	 * True if a thread is currently purging the extents associated with
Packit 345191
	 * this decay structure.
Packit 345191
	 */
Packit 345191
	bool			purging;
Packit 345191
	/*
Packit 345191
	 * Approximate time in milliseconds from the creation of a set of unused
Packit 345191
	 * dirty pages until an equivalent set of unused dirty pages is purged
Packit 345191
	 * and/or reused.
Packit 345191
	 */
Packit 345191
	atomic_zd_t		time_ms;
Packit 345191
	/* time / SMOOTHSTEP_NSTEPS. */
Packit 345191
	nstime_t		interval;
Packit 345191
	/*
Packit 345191
	 * Time at which the current decay interval logically started.  We do
Packit 345191
	 * not actually advance to a new epoch until sometime after it starts
Packit 345191
	 * because of scheduling and computation delays, and it is even possible
Packit 345191
	 * to completely skip epochs.  In all cases, during epoch advancement we
Packit 345191
	 * merge all relevant activity into the most recently recorded epoch.
Packit 345191
	 */
Packit 345191
	nstime_t		epoch;
Packit 345191
	/* Deadline randomness generator. */
Packit 345191
	uint64_t		jitter_state;
Packit 345191
	/*
Packit 345191
	 * Deadline for current epoch.  This is the sum of interval and per
Packit 345191
	 * epoch jitter which is a uniform random variable in [0..interval).
Packit 345191
	 * Epochs always advance by precise multiples of interval, but we
Packit 345191
	 * randomize the deadline to reduce the likelihood of arenas purging in
Packit 345191
	 * lockstep.
Packit 345191
	 */
Packit 345191
	nstime_t		deadline;
Packit 345191
	/*
Packit 345191
	 * Number of unpurged pages at beginning of current epoch.  During epoch
Packit 345191
	 * advancement we use the delta between arena->decay_*.nunpurged and
Packit 345191
	 * extents_npages_get(&arena->extents_*) to determine how many dirty
Packit 345191
	 * pages, if any, were generated.
Packit 345191
	 */
Packit 345191
	size_t			nunpurged;
Packit 345191
	/*
Packit 345191
	 * Trailing log of how many unused dirty pages were generated during
Packit 345191
	 * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
Packit 345191
	 * element is the most recent epoch.  Corresponding epoch times are
Packit 345191
	 * relative to epoch.
Packit 345191
	 */
Packit 345191
	size_t			backlog[SMOOTHSTEP_NSTEPS];
Packit 345191
Packit 345191
	/*
Packit 345191
	 * Pointer to associated stats.  These stats are embedded directly in
Packit 345191
	 * the arena's stats due to how stats structures are shared between the
Packit 345191
	 * arena and ctl code.
Packit 345191
	 *
Packit 345191
	 * Synchronization: Same as associated arena's stats field. */
Packit 345191
	arena_stats_decay_t	*stats;
Packit 345191
	/* Peak number of pages in associated extents.  Used for debug only. */
Packit 345191
	uint64_t		ceil_npages;
Packit 345191
};
Packit 345191
Packit 345191
struct arena_s {
Packit 345191
	/*
Packit 345191
	 * Number of threads currently assigned to this arena.  Each thread has
Packit 345191
	 * two distinct assignments, one for application-serving allocation, and
Packit 345191
	 * the other for internal metadata allocation.  Internal metadata must
Packit 345191
	 * not be allocated from arenas explicitly created via the arenas.create
Packit 345191
	 * mallctl, because the arena..reset mallctl indiscriminately
Packit 345191
	 * discards all allocations for the affected arena.
Packit 345191
	 *
Packit 345191
	 *   0: Application allocation.
Packit 345191
	 *   1: Internal metadata allocation.
Packit 345191
	 *
Packit 345191
	 * Synchronization: atomic.
Packit 345191
	 */
Packit 345191
	atomic_u_t		nthreads[2];
Packit 345191
Packit 345191
	/* Next bin shard for binding new threads. Synchronization: atomic. */
Packit 345191
	atomic_u_t		binshard_next;
Packit 345191
Packit 345191
	/*
Packit 345191
	 * When percpu_arena is enabled, to amortize the cost of reading /
Packit 345191
	 * updating the current CPU id, track the most recent thread accessing
Packit 345191
	 * this arena, and only read CPU if there is a mismatch.
Packit 345191
	 */
Packit 345191
	tsdn_t		*last_thd;
Packit 345191
Packit 345191
	/* Synchronization: internal. */
Packit 345191
	arena_stats_t		stats;
Packit 345191
Packit 345191
	/*
Packit 345191
	 * Lists of tcaches and cache_bin_array_descriptors for extant threads
Packit 345191
	 * associated with this arena.  Stats from these are merged
Packit 345191
	 * incrementally, and at exit if opt_stats_print is enabled.
Packit 345191
	 *
Packit 345191
	 * Synchronization: tcache_ql_mtx.
Packit 345191
	 */
Packit 345191
	ql_head(tcache_t)			tcache_ql;
Packit 345191
	ql_head(cache_bin_array_descriptor_t)	cache_bin_array_descriptor_ql;
Packit 345191
	malloc_mutex_t				tcache_ql_mtx;
Packit 345191
Packit 345191
	/* Synchronization: internal. */
Packit 345191
	prof_accum_t		prof_accum;
Packit 345191
Packit 345191
	/*
Packit 345191
	 * PRNG state for cache index randomization of large allocation base
Packit 345191
	 * pointers.
Packit 345191
	 *
Packit 345191
	 * Synchronization: atomic.
Packit 345191
	 */
Packit 345191
	atomic_zu_t		offset_state;
Packit 345191
Packit 345191
	/*
Packit 345191
	 * Extent serial number generator state.
Packit 345191
	 *
Packit 345191
	 * Synchronization: atomic.
Packit 345191
	 */
Packit 345191
	atomic_zu_t		extent_sn_next;
Packit 345191
Packit 345191
	/*
Packit 345191
	 * Represents a dss_prec_t, but atomically.
Packit 345191
	 *
Packit 345191
	 * Synchronization: atomic.
Packit 345191
	 */
Packit 345191
	atomic_u_t		dss_prec;
Packit 345191
Packit 345191
	/*
Packit 345191
	 * Number of pages in active extents.
Packit 345191
	 *
Packit 345191
	 * Synchronization: atomic.
Packit 345191
	 */
Packit 345191
	atomic_zu_t		nactive;
Packit 345191
Packit 345191
	/*
Packit 345191
	 * Extant large allocations.
Packit 345191
	 *
Packit 345191
	 * Synchronization: large_mtx.
Packit 345191
	 */
Packit 345191
	extent_list_t		large;
Packit 345191
	/* Synchronizes all large allocation/update/deallocation. */
Packit 345191
	malloc_mutex_t		large_mtx;
Packit 345191
Packit 345191
	/*
Packit 345191
	 * Collections of extents that were previously allocated.  These are
Packit 345191
	 * used when allocating extents, in an attempt to re-use address space.
Packit 345191
	 *
Packit 345191
	 * Synchronization: internal.
Packit 345191
	 */
Packit 345191
	extents_t		extents_dirty;
Packit 345191
	extents_t		extents_muzzy;
Packit 345191
	extents_t		extents_retained;
Packit 345191
Packit 345191
	/*
Packit 345191
	 * Decay-based purging state, responsible for scheduling extent state
Packit 345191
	 * transitions.
Packit 345191
	 *
Packit 345191
	 * Synchronization: internal.
Packit 345191
	 */
Packit 345191
	arena_decay_t		decay_dirty; /* dirty --> muzzy */
Packit 345191
	arena_decay_t		decay_muzzy; /* muzzy --> retained */
Packit 345191
Packit 345191
	/*
Packit 345191
	 * Next extent size class in a growing series to use when satisfying a
Packit 345191
	 * request via the extent hooks (only if opt_retain).  This limits the
Packit 345191
	 * number of disjoint virtual memory ranges so that extent merging can
Packit 345191
	 * be effective even if multiple arenas' extent allocation requests are
Packit 345191
	 * highly interleaved.
Packit 345191
	 *
Packit 345191
	 * retain_grow_limit is the max allowed size ind to expand (unless the
Packit 345191
	 * required size is greater).  Default is no limit, and controlled
Packit 345191
	 * through mallctl only.
Packit 345191
	 *
Packit 345191
	 * Synchronization: extent_grow_mtx
Packit 345191
	 */
Packit 345191
	pszind_t		extent_grow_next;
Packit 345191
	pszind_t		retain_grow_limit;
Packit 345191
	malloc_mutex_t		extent_grow_mtx;
Packit 345191
Packit 345191
	/*
Packit 345191
	 * Available extent structures that were allocated via
Packit 345191
	 * base_alloc_extent().
Packit 345191
	 *
Packit 345191
	 * Synchronization: extent_avail_mtx.
Packit 345191
	 */
Packit 345191
	extent_tree_t		extent_avail;
Packit 345191
	atomic_zu_t		extent_avail_cnt;
Packit 345191
	malloc_mutex_t		extent_avail_mtx;
Packit 345191
Packit 345191
	/*
Packit 345191
	 * bins is used to store heaps of free regions.
Packit 345191
	 *
Packit 345191
	 * Synchronization: internal.
Packit 345191
	 */
Packit 345191
	bins_t			bins[SC_NBINS];
Packit 345191
Packit 345191
	/*
Packit 345191
	 * Base allocator, from which arena metadata are allocated.
Packit 345191
	 *
Packit 345191
	 * Synchronization: internal.
Packit 345191
	 */
Packit 345191
	base_t			*base;
Packit 345191
	/* Used to determine uptime.  Read-only after initialization. */
Packit 345191
	nstime_t		create_time;
Packit 345191
};
Packit 345191
Packit 345191
/* Used in conjunction with tsd for fast arena-related context lookup. */
Packit 345191
struct arena_tdata_s {
Packit 345191
	ticker_t		decay_ticker;
Packit 345191
};
Packit 345191
Packit 345191
/* Used to pass rtree lookup context down the path. */
Packit 345191
struct alloc_ctx_s {
Packit 345191
	szind_t szind;
Packit 345191
	bool slab;
Packit 345191
};
Packit 345191
Packit 345191
#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H */