From 1c86a620d74f4f9383c4956dafd3e2486300dc0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn-Egil=20Dahlberg?= Date: Wed, 17 Jun 2015 17:33:29 +0200 Subject: erts: Remove HALFWORD_HEAP definition --- erts/emulator/sys/common/erl_mmap.c | 3 +-- erts/emulator/sys/common/erl_mseg.c | 46 ------------------------------------- erts/emulator/sys/common/erl_mseg.h | 3 --- 3 files changed, 1 insertion(+), 51 deletions(-) (limited to 'erts/emulator/sys/common') diff --git a/erts/emulator/sys/common/erl_mmap.c b/erts/emulator/sys/common/erl_mmap.c index 754047829f..e6d0e1e281 100644 --- a/erts/emulator/sys/common/erl_mmap.c +++ b/erts/emulator/sys/common/erl_mmap.c @@ -2453,7 +2453,6 @@ Eterm erts_mmap_debug_info(Process* p) UWord values[4]; Eterm *hp, *hp_end; Uint may_need; - const Uint PTR_BIG_SZ = HALFWORD_HEAP ? 3 : 2; erts_smp_mtx_lock(&mmap_state.mtx); values[0] = (UWord)mmap_state.sa.bot; @@ -2464,7 +2463,7 @@ Eterm erts_mmap_debug_info(Process* p) sua_list = build_free_seg_list(p, &mmap_state.sua.map); erts_smp_mtx_unlock(&mmap_state.mtx); - may_need = 4*(2+3+PTR_BIG_SZ) + 2*(2+3); + may_need = 4*(2+3+2) + 2*(2+3); hp = HAlloc(p, may_need); hp_end = hp + may_need; diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c index 0d51aad863..8073d5b908 100644 --- a/erts/emulator/sys/common/erl_mseg.c +++ b/erts/emulator/sys/common/erl_mseg.c @@ -107,9 +107,6 @@ const ErtsMsegOpt_t erts_mseg_default_opt = { 0, /* Absolute shrink threshold */ 0, /* Relative shrink threshold */ 0 /* Scheduler specific */ -#if HALFWORD_HEAP - ,0 /* need low memory */ -#endif }; @@ -184,12 +181,7 @@ struct ErtsMsegAllctr_t_ { MemKind* mk_list; -#if HALFWORD_HEAP - MemKind low_mem; - MemKind hi_mem; -#else MemKind the_mem; -#endif Uint max_cache_size; Uint abs_max_cache_bad_fit; @@ -309,11 +301,6 @@ mseg_create(ErtsMsegAllctr_t *ma, Uint flags, MemKind* mk, UWord *sizep) #endif void *seg; Uint32 mmap_flags = 0; -#if HALFWORD_HEAP - mmap_flags |= ((mk == &ma->low_mem) - ? ERTS_MMAPFLG_SUPERCARRIER_ONLY - : ERTS_MMAPFLG_OS_ONLY); -#endif if (MSEG_FLG_IS_2POW(flags)) mmap_flags |= ERTS_MMAPFLG_SUPERALIGNED; @@ -334,11 +321,6 @@ static ERTS_INLINE void mseg_destroy(ErtsMsegAllctr_t *ma, Uint flags, MemKind* mk, void *seg_p, UWord size) { Uint32 mmap_flags = 0; -#if HALFWORD_HEAP - mmap_flags |= ((mk == &ma->low_mem) - ? ERTS_MMAPFLG_SUPERCARRIER_ONLY - : ERTS_MMAPFLG_OS_ONLY); -#endif if (MSEG_FLG_IS_2POW(flags)) mmap_flags |= ERTS_MMAPFLG_SUPERALIGNED; @@ -360,11 +342,6 @@ mseg_recreate(ErtsMsegAllctr_t *ma, Uint flags, MemKind* mk, void *old_seg, UWor #endif void *new_seg; Uint32 mmap_flags = 0; -#if HALFWORD_HEAP - mmap_flags |= ((mk == &ma->low_mem) - ? ERTS_MMAPFLG_SUPERCARRIER_ONLY - : ERTS_MMAPFLG_OS_ONLY); -#endif if (MSEG_FLG_IS_2POW(flags)) mmap_flags |= ERTS_MMAPFLG_SUPERALIGNED; @@ -768,11 +745,7 @@ void erts_mseg_clear_cache(void) { static ERTS_INLINE MemKind* memkind(ErtsMsegAllctr_t *ma, const ErtsMsegOpt_t *opt) { -#if HALFWORD_HEAP - return opt->low_mem ? &ma->low_mem : &ma->hi_mem; -#else return &ma->the_mem; -#endif } static void * @@ -1326,12 +1299,7 @@ erts_mseg_info(int ix, ERTS_MSEG_LOCK(ma); ERTS_DBG_MA_CHK_THR_ACCESS(ma); -#if HALFWORD_HEAP - values[n++] = info_memkind(ma, &ma->low_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp); - values[n++] = info_memkind(ma, &ma->hi_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp); -#else values[n++] = info_memkind(ma, &ma->the_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp); -#endif if (hpp || szp) res = bld_2tup_list(hpp, szp, n, atoms, values); @@ -1488,15 +1456,6 @@ erts_mseg_init(ErtsMsegInit_t *init) erts_mtx_init(&init_atoms_mutex, "mseg_init_atoms"); -#if HALFWORD_HEAP - if (sizeof(void *) != 8) - erl_exit(-1,"Halfword emulator cannot be run in 32bit mode"); - - init->mmap.virtual_range.start = (char *) sbrk(0); - init->mmap.virtual_range.end = (char *) 0x100000000UL; - init->mmap.sco = 0; -#endif - erts_mmap_init(&init->mmap); if (!IS_2POW(GET_PAGE_SIZE)) @@ -1531,12 +1490,7 @@ erts_mseg_init(ErtsMsegInit_t *init) ma->mk_list = NULL; -#if HALFWORD_HEAP - mem_kind_init(ma, &ma->low_mem, "low memory"); - mem_kind_init(ma, &ma->hi_mem, "high memory"); -#else mem_kind_init(ma, &ma->the_mem, "all memory"); -#endif sys_memzero((void *) &ma->calls, sizeof(ErtsMsegCalls)); } diff --git a/erts/emulator/sys/common/erl_mseg.h b/erts/emulator/sys/common/erl_mseg.h index ba04e919fc..656484702d 100644 --- a/erts/emulator/sys/common/erl_mseg.h +++ b/erts/emulator/sys/common/erl_mseg.h @@ -87,9 +87,6 @@ typedef struct { UWord abs_shrink_th; UWord rel_shrink_th; int sched_spec; -#if HALFWORD_HEAP - int low_mem; -#endif } ErtsMsegOpt_t; extern const ErtsMsegOpt_t erts_mseg_default_opt; -- cgit v1.2.3 From 298210017aedaf01b2eb477f4375af4d30822c71 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn-Egil=20Dahlberg?= Date: Mon, 22 Jun 2015 16:53:23 +0200 Subject: erts: Remove halfword MemKind mseg --- erts/emulator/sys/common/erl_mseg.c | 332 +++++++++++++++--------------------- 1 file changed, 135 insertions(+), 197 deletions(-) (limited to 'erts/emulator/sys/common') diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c index 8073d5b908..7eb8a4a460 100644 --- a/erts/emulator/sys/common/erl_mseg.c +++ b/erts/emulator/sys/common/erl_mseg.c @@ -99,8 +99,6 @@ static const int debruijn[32] = { static int atoms_initialized; -typedef struct mem_kind_t MemKind; - const ErtsMsegOpt_t erts_mseg_default_opt = { 1, /* Use cache */ 1, /* Preserv data */ @@ -139,7 +137,14 @@ struct cache_t_ { typedef struct ErtsMsegAllctr_t_ ErtsMsegAllctr_t; -struct mem_kind_t { +struct ErtsMsegAllctr_t_ { + int ix; + + int is_init_done; + int is_thread_safe; + erts_mtx_t mtx; + + int is_cache_check_scheduled; cache_t cache[MAX_CACHE_SIZE]; cache_t cache_unpowered_node; @@ -165,24 +170,6 @@ struct mem_kind_t { } max_ever; } segments; - ErtsMsegAllctr_t *ma; - const char* name; - MemKind* next; -};/*MemKind*/ - -struct ErtsMsegAllctr_t_ { - int ix; - - int is_init_done; - int is_thread_safe; - erts_mtx_t mtx; - - int is_cache_check_scheduled; - - MemKind* mk_list; - - MemKind the_mem; - Uint max_cache_size; Uint abs_max_cache_bad_fit; Uint rel_max_cache_bad_fit; @@ -294,7 +281,7 @@ schedule_cache_check(ErtsMsegAllctr_t *ma) { /* #define ERTS_PRINT_ERTS_MMAP */ static ERTS_INLINE void * -mseg_create(ErtsMsegAllctr_t *ma, Uint flags, MemKind* mk, UWord *sizep) +mseg_create(ErtsMsegAllctr_t *ma, Uint flags, UWord *sizep) { #ifdef ERTS_PRINT_ERTS_MMAP UWord req_size = *sizep; @@ -318,7 +305,7 @@ mseg_create(ErtsMsegAllctr_t *ma, Uint flags, MemKind* mk, UWord *sizep) } static ERTS_INLINE void -mseg_destroy(ErtsMsegAllctr_t *ma, Uint flags, MemKind* mk, void *seg_p, UWord size) { +mseg_destroy(ErtsMsegAllctr_t *ma, Uint flags, void *seg_p, UWord size) { Uint32 mmap_flags = 0; if (MSEG_FLG_IS_2POW(flags)) @@ -335,7 +322,7 @@ mseg_destroy(ErtsMsegAllctr_t *ma, Uint flags, MemKind* mk, void *seg_p, UWord s } static ERTS_INLINE void * -mseg_recreate(ErtsMsegAllctr_t *ma, Uint flags, MemKind* mk, void *old_seg, UWord old_size, UWord *sizep) +mseg_recreate(ErtsMsegAllctr_t *ma, Uint flags, void *old_seg, UWord old_size, UWord *sizep) { #ifdef ERTS_PRINT_ERTS_MMAP UWord req_size = *sizep; @@ -369,11 +356,8 @@ do { \ || erts_smp_thr_progress_is_blocking() \ || ERTS_IS_CRASH_DUMPING); \ } while (0) -#define ERTS_DBG_MK_CHK_THR_ACCESS(MK) \ - ERTS_DBG_MA_CHK_THR_ACCESS((MK)->ma) #else #define ERTS_DBG_MA_CHK_THR_ACCESS(MA) -#define ERTS_DBG_MK_CHK_THR_ACCESS(MK) #endif /* Cache interface */ @@ -386,10 +370,10 @@ static ERTS_INLINE void mseg_cache_clear_node(cache_t *c) { c->prev = c; } -static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, UWord size, Uint flags) { +static ERTS_INLINE int cache_bless_segment(ErtsMsegAllctr_t *ma, void *seg, UWord size, Uint flags) { cache_t *c; - ERTS_DBG_MK_CHK_THR_ACCESS(mk); + ERTS_DBG_MA_CHK_THR_ACCESS(ma); ASSERT(!MSEG_FLG_IS_2POW(flags) || (MSEG_FLG_IS_2POW(flags) && MAP_IS_ALIGNED(seg) && IS_2POW(size))); @@ -398,11 +382,11 @@ static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, UWord size, U * Large blocks has no such cache and it is up to mseg to cache them to speed things up. */ - if (!erts_circleq_is_empty(&(mk->cache_free))) { + if (!erts_circleq_is_empty(&(ma->cache_free))) { /* We have free slots, use one to cache the segment */ - c = erts_circleq_head(&(mk->cache_free)); + c = erts_circleq_head(&(ma->cache_free)); erts_circleq_remove(c); c->seg = seg; @@ -414,29 +398,28 @@ static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, UWord size, U ASSERT(ix < CACHE_AREAS); ASSERT((1 << (ix + MSEG_ALIGN_BITS)) == size); - erts_circleq_push_head(&(mk->cache_powered_node[ix]), c); + erts_circleq_push_head(&(ma->cache_powered_node[ix]), c); } else - erts_circleq_push_head(&(mk->cache_unpowered_node), c); + erts_circleq_push_head(&(ma->cache_unpowered_node), c); - mk->cache_size++; - ASSERT(mk->cache_size <= mk->ma->max_cache_size); + ma->cache_size++; return 1; - } else if (!MSEG_FLG_IS_2POW(flags) && !erts_circleq_is_empty(&(mk->cache_unpowered_node))) { + } else if (!MSEG_FLG_IS_2POW(flags) && !erts_circleq_is_empty(&(ma->cache_unpowered_node))) { /* No free slots. * Evict oldest slot from unpowered cache so we can cache an unpowered (sbc) segment */ - c = erts_circleq_tail(&(mk->cache_unpowered_node)); + c = erts_circleq_tail(&(ma->cache_unpowered_node)); erts_circleq_remove(c); - mseg_destroy(mk->ma, ERTS_MSEG_FLG_NONE, mk, c->seg, c->size); + mseg_destroy(ma, ERTS_MSEG_FLG_NONE, c->seg, c->size); mseg_cache_clear_node(c); c->seg = seg; c->size = size; - erts_circleq_push_head(&(mk->cache_unpowered_node), c); + erts_circleq_push_head(&(ma->cache_unpowered_node), c); return 1; } else if (!MSEG_FLG_IS_2POW(flags)) { @@ -450,20 +433,20 @@ static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, UWord size, U int i; for( i = 0; i < CACHE_AREAS; i++) { - if (erts_circleq_is_empty(&(mk->cache_powered_node[i]))) + if (erts_circleq_is_empty(&(ma->cache_powered_node[i]))) continue; - c = erts_circleq_tail(&(mk->cache_powered_node[i])); + c = erts_circleq_tail(&(ma->cache_powered_node[i])); erts_circleq_remove(c); - mseg_destroy(mk->ma, ERTS_MSEG_FLG_2POW, mk, c->seg, c->size); + mseg_destroy(ma, ERTS_MSEG_FLG_2POW, c->seg, c->size); mseg_cache_clear_node(c); c->seg = seg; c->size = size; - erts_circleq_push_head(&(mk->cache_unpowered_node), c); + erts_circleq_push_head(&(ma->cache_unpowered_node), c); return 1; } @@ -472,11 +455,11 @@ static ERTS_INLINE int cache_bless_segment(MemKind *mk, void *seg, UWord size, U return 0; } -static ERTS_INLINE void *cache_get_segment(MemKind *mk, UWord *size_p, Uint flags) { +static ERTS_INLINE void *cache_get_segment(ErtsMsegAllctr_t *ma, UWord *size_p, Uint flags) { UWord size = *size_p; - ERTS_DBG_MK_CHK_THR_ACCESS(mk); + ERTS_DBG_MA_CHK_THR_ACCESS(ma); if (MSEG_FLG_IS_2POW(flags)) { @@ -489,10 +472,10 @@ static ERTS_INLINE void *cache_get_segment(MemKind *mk, UWord *size_p, Uint flag for( i = ix; i < CACHE_AREAS; i++) { - if (erts_circleq_is_empty(&(mk->cache_powered_node[i]))) + if (erts_circleq_is_empty(&(ma->cache_powered_node[i]))) continue; - c = erts_circleq_head(&(mk->cache_powered_node[i])); + c = erts_circleq_head(&(ma->cache_powered_node[i])); erts_circleq_remove(c); ASSERT(IS_2POW(c->size)); @@ -501,31 +484,31 @@ static ERTS_INLINE void *cache_get_segment(MemKind *mk, UWord *size_p, Uint flag csize = c->size; seg = (char*) c->seg; - mk->cache_size--; - mk->cache_hits++; + ma->cache_size--; + ma->cache_hits++; /* link to free cache list */ mseg_cache_clear_node(c); - erts_circleq_push_head(&(mk->cache_free), c); + erts_circleq_push_head(&(ma->cache_free), c); - ASSERT(!(mk->cache_size < 0)); + ASSERT(!(ma->cache_size < 0)); if (csize != size) - mseg_destroy(mk->ma, ERTS_MSEG_FLG_2POW, mk, seg + size, csize - size); + mseg_destroy(ma, ERTS_MSEG_FLG_2POW, seg + size, csize - size); return seg; } } - else if (!erts_circleq_is_empty(&(mk->cache_unpowered_node))) { + else if (!erts_circleq_is_empty(&(ma->cache_unpowered_node))) { void *seg; cache_t *c; cache_t *best = NULL; UWord bdiff = 0; UWord csize; - UWord bad_max_abs = mk->ma->abs_max_cache_bad_fit; - UWord bad_max_rel = mk->ma->rel_max_cache_bad_fit; + UWord bad_max_abs = ma->abs_max_cache_bad_fit; + UWord bad_max_rel = ma->rel_max_cache_bad_fit; - erts_circleq_foreach(c, &(mk->cache_unpowered_node)) { + erts_circleq_foreach(c, &(ma->cache_unpowered_node)) { csize = c->size; if (csize >= size) { if (((csize - size)*100 < bad_max_rel*size) && (csize - size) < bad_max_abs ) { @@ -534,11 +517,11 @@ static ERTS_INLINE void *cache_get_segment(MemKind *mk, UWord *size_p, Uint flag erts_circleq_remove(c); - mk->cache_size--; - mk->cache_hits++; + ma->cache_size--; + ma->cache_hits++; mseg_cache_clear_node(c); - erts_circleq_push_head(&(mk->cache_free), c); + erts_circleq_push_head(&(ma->cache_free), c); *size_p = csize; @@ -561,7 +544,7 @@ static ERTS_INLINE void *cache_get_segment(MemKind *mk, UWord *size_p, Uint flag ASSERT(best->seg); ASSERT(best->size > 0); - mk->cache_hits++; + ma->cache_hits++; /* Use current cache placement for remaining segment space */ @@ -585,7 +568,7 @@ static ERTS_INLINE void *cache_get_segment(MemKind *mk, UWord *size_p, Uint flag * using callbacks from aux-work in the scheduler. */ -static ERTS_INLINE Uint mseg_drop_one_memkind_cache_size(MemKind *mk, Uint flags, cache_t *head) { +static ERTS_INLINE Uint mseg_drop_one_cache_size(ErtsMsegAllctr_t *ma, Uint flags, cache_t *head) { cache_t *c = NULL; c = erts_circleq_tail(head); @@ -594,19 +577,19 @@ static ERTS_INLINE Uint mseg_drop_one_memkind_cache_size(MemKind *mk, Uint flags if (erts_mtrace_enabled) erts_mtrace_crr_free(SEGTYPE, SEGTYPE, c->seg); - mseg_destroy(mk->ma, flags, mk, c->seg, c->size); + mseg_destroy(ma, flags, c->seg, c->size); mseg_cache_clear_node(c); - erts_circleq_push_head(&(mk->cache_free), c); + erts_circleq_push_head(&(ma->cache_free), c); - mk->segments.current.watermark--; - mk->cache_size--; + ma->segments.current.watermark--; + ma->cache_size--; - ASSERT( mk->cache_size >= 0 ); + ASSERT(ma->cache_size >= 0); - return mk->cache_size; + return ma->cache_size; } -static ERTS_INLINE Uint mseg_drop_memkind_cache_size(MemKind *mk, Uint flags, cache_t *head) { +static ERTS_INLINE Uint mseg_drop_cache_size(ErtsMsegAllctr_t *ma, Uint flags, cache_t *head) { cache_t *c = NULL; while (!erts_circleq_is_empty(head)) { @@ -617,58 +600,52 @@ static ERTS_INLINE Uint mseg_drop_memkind_cache_size(MemKind *mk, Uint flags, ca if (erts_mtrace_enabled) erts_mtrace_crr_free(SEGTYPE, SEGTYPE, c->seg); - mseg_destroy(mk->ma, flags, mk, c->seg, c->size); + mseg_destroy(ma, flags, c->seg, c->size); mseg_cache_clear_node(c); - erts_circleq_push_head(&(mk->cache_free), c); - - mk->segments.current.watermark--; - mk->cache_size--; + erts_circleq_push_head(&(ma->cache_free), c); + ma->segments.current.watermark--; + ma->cache_size--; } - ASSERT( mk->cache_size >= 0 ); + ASSERT(ma->cache_size >= 0); - return mk->cache_size; + return ma->cache_size; } -/* mseg_check_memkind_cache - * - Check if we can empty some cached segments in this - * MemKind. +/* mseg_check_cache + * - Check if we can empty some cached segments in this allocator */ -static Uint mseg_check_memkind_cache(MemKind *mk) { +static Uint mseg_check_cache(ErtsMsegAllctr_t *ma) { int i; - ERTS_DBG_MK_CHK_THR_ACCESS(mk); + ERTS_DBG_MA_CHK_THR_ACCESS(ma); for (i = 0; i < CACHE_AREAS; i++) { - if (!erts_circleq_is_empty(&(mk->cache_powered_node[i]))) - return mseg_drop_one_memkind_cache_size(mk, ERTS_MSEG_FLG_2POW, &(mk->cache_powered_node[i])); + if (!erts_circleq_is_empty(&(ma->cache_powered_node[i]))) + return mseg_drop_one_cache_size(ma, ERTS_MSEG_FLG_2POW, &(ma->cache_powered_node[i])); } - if (!erts_circleq_is_empty(&(mk->cache_unpowered_node))) - return mseg_drop_one_memkind_cache_size(mk, ERTS_MSEG_FLG_NONE, &(mk->cache_unpowered_node)); + if (!erts_circleq_is_empty(&(ma->cache_unpowered_node))) + return mseg_drop_one_cache_size(ma, ERTS_MSEG_FLG_NONE, &(ma->cache_unpowered_node)); return 0; } /* mseg_cache_check * - Check if we have some cache we can purge - * in any of the memkinds. */ static void mseg_cache_check(ErtsMsegAllctr_t *ma) { - MemKind* mk; Uint empty_cache = 1; ERTS_MSEG_LOCK(ma); - for (mk = ma->mk_list; mk; mk = mk->next) { - if (mseg_check_memkind_cache(mk)) - empty_cache = 0; - } + if (mseg_check_cache(ma)) + empty_cache = 0; /* If all MemKinds caches are empty, * remove aux-work callback @@ -686,7 +663,7 @@ static void mseg_cache_check(ErtsMsegAllctr_t *ma) { /* erts_mseg_cache_check * - This is a callback that is scheduled as aux-work from * schedulers and is called at some interval if we have a cache - * on this mseg-allocator and memkind. + * on this mseg-allocator. * - Purpose: Empty cache slowly so we don't collect mapped areas * and bloat memory. */ @@ -696,42 +673,32 @@ void erts_mseg_cache_check(void) { } -/* *_mseg_clear_*_cache +/* mseg_clear_cache * Remove cached segments from the allocator completely */ -static void mseg_clear_memkind_cache(MemKind *mk) { + +static void mseg_clear_cache(ErtsMsegAllctr_t *ma) { int i; + ERTS_MSEG_LOCK(ma); + ERTS_DBG_MA_CHK_THR_ACCESS(ma); /* drop pow2 caches */ for (i = 0; i < CACHE_AREAS; i++) { - if (erts_circleq_is_empty(&(mk->cache_powered_node[i]))) + if (erts_circleq_is_empty(&(ma->cache_powered_node[i]))) continue; - mseg_drop_memkind_cache_size(mk, ERTS_MSEG_FLG_2POW, &(mk->cache_powered_node[i])); - ASSERT(erts_circleq_is_empty(&(mk->cache_powered_node[i]))); + mseg_drop_cache_size(ma, ERTS_MSEG_FLG_2POW, &(ma->cache_powered_node[i])); + ASSERT(erts_circleq_is_empty(&(ma->cache_powered_node[i]))); } /* drop varied caches */ - if (!erts_circleq_is_empty(&(mk->cache_unpowered_node))) - mseg_drop_memkind_cache_size(mk, ERTS_MSEG_FLG_NONE, &(mk->cache_unpowered_node)); + if (!erts_circleq_is_empty(&(ma->cache_unpowered_node))) + mseg_drop_cache_size(ma, ERTS_MSEG_FLG_NONE, &(ma->cache_unpowered_node)); - ASSERT(erts_circleq_is_empty(&(mk->cache_unpowered_node))); - ASSERT(mk->cache_size == 0); -} - -static void mseg_clear_cache(ErtsMsegAllctr_t *ma) { - MemKind* mk; - - ERTS_MSEG_LOCK(ma); - ERTS_DBG_MA_CHK_THR_ACCESS(ma); - - - for (mk = ma->mk_list; mk; mk = mk->next) { - mseg_clear_memkind_cache(mk); - } + ASSERT(erts_circleq_is_empty(&(ma->cache_unpowered_node))); + ASSERT(ma->cache_size == 0); INC_CC(ma, clear_cache); - ERTS_MSEG_UNLOCK(ma); } @@ -740,21 +707,12 @@ void erts_mseg_clear_cache(void) { mseg_clear_cache(ERTS_MSEG_ALLCTR_IX(0)); } - - -static ERTS_INLINE MemKind* memkind(ErtsMsegAllctr_t *ma, - const ErtsMsegOpt_t *opt) -{ - return &ma->the_mem; -} - static void * mseg_alloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, UWord *size_p, Uint flags, const ErtsMsegOpt_t *opt) { UWord size; void *seg; - MemKind* mk = memkind(ma, opt); INC_CC(ma, alloc); @@ -768,10 +726,10 @@ mseg_alloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, UWord *size_p, } } - if (opt->cache && mk->cache_size > 0 && (seg = cache_get_segment(mk, &size, flags)) != NULL) + if (opt->cache && ma->cache_size > 0 && (seg = cache_get_segment(ma, &size, flags)) != NULL) goto done; - seg = mseg_create(ma, flags, mk, &size); + seg = mseg_create(ma, flags, &size); if (!seg) *size_p = 0; @@ -781,7 +739,7 @@ done: if (erts_mtrace_enabled) erts_mtrace_crr_alloc(seg, atype, ERTS_MTRACE_SEGMENT_ID, size); - ERTS_MSEG_ALLOC_STAT(mk,size); + ERTS_MSEG_ALLOC_STAT(ma,size); } return seg; @@ -792,11 +750,9 @@ static void mseg_dealloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, UWord size, Uint flags, const ErtsMsegOpt_t *opt) { - MemKind* mk = memkind(ma, opt); - - ERTS_MSEG_DEALLOC_STAT(mk,size); + ERTS_MSEG_DEALLOC_STAT(ma,size); - if (opt->cache && cache_bless_segment(mk, seg, size, flags)) { + if (opt->cache && cache_bless_segment(ma, seg, size, flags)) { schedule_cache_check(ma); goto done; } @@ -804,7 +760,7 @@ mseg_dealloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, UWord size, if (erts_mtrace_enabled) erts_mtrace_crr_free(atype, SEGTYPE, seg); - mseg_destroy(ma, flags, mk, seg, size); + mseg_destroy(ma, flags, seg, size); done: @@ -815,7 +771,6 @@ static void * mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, UWord old_size, UWord *new_size_p, Uint flags, const ErtsMsegOpt_t *opt) { - MemKind* mk; void *new_seg; UWord new_size; @@ -834,7 +789,6 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, return NULL; } - mk = memkind(ma, opt); new_seg = seg; if (!MSEG_FLG_IS_2POW(flags)) @@ -849,7 +803,7 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, if (new_size > old_size) { if (opt->preserv) { - new_seg = mseg_recreate(ma, flags, mk, (void *) seg, old_size, &new_size); + new_seg = mseg_recreate(ma, flags, (void *) seg, old_size, &new_size); if (!new_seg) new_size = old_size; } @@ -869,7 +823,7 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, new_size = old_size; } else { - new_seg = mseg_recreate(ma, flags, mk, (void *) seg, old_size, &new_size); + new_seg = mseg_recreate(ma, flags, (void *) seg, old_size, &new_size); if (!new_seg) new_size = old_size; } @@ -883,7 +837,7 @@ mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, ASSERT(!MSEG_FLG_IS_2POW(flags) || IS_2POW(new_size)); *new_size_p = new_size; - ERTS_MSEG_REALLOC_STAT(mk, old_size, new_size); + ERTS_MSEG_REALLOC_STAT(ma, old_size, new_size); return new_seg; } @@ -1153,63 +1107,63 @@ info_calls(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, Uint **hpp } static Eterm -info_status(ErtsMsegAllctr_t *ma, MemKind* mk, int *print_to_p, void *print_to_arg, +info_status(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, int begin_new_max_period, Uint **hpp, Uint *szp) { Eterm res = THE_NON_VALUE; - if (mk->segments.max_ever.no < mk->segments.max.no) - mk->segments.max_ever.no = mk->segments.max.no; - if (mk->segments.max_ever.sz < mk->segments.max.sz) - mk->segments.max_ever.sz = mk->segments.max.sz; + if (ma->segments.max_ever.no < ma->segments.max.no) + ma->segments.max_ever.no = ma->segments.max.no; + if (ma->segments.max_ever.sz < ma->segments.max.sz) + ma->segments.max_ever.sz = ma->segments.max.sz; if (print_to_p) { int to = *print_to_p; void *arg = print_to_arg; - erts_print(to, arg, "cached_segments: %beu\n", mk->cache_size); - erts_print(to, arg, "cache_hits: %beu\n", mk->cache_hits); + erts_print(to, arg, "cached_segments: %beu\n", ma->cache_size); + erts_print(to, arg, "cache_hits: %beu\n", ma->cache_hits); erts_print(to, arg, "segments: %beu %beu %beu\n", - mk->segments.current.no, mk->segments.max.no, mk->segments.max_ever.no); + ma->segments.current.no, ma->segments.max.no, ma->segments.max_ever.no); erts_print(to, arg, "segments_size: %beu %beu %beu\n", - mk->segments.current.sz, mk->segments.max.sz, mk->segments.max_ever.sz); + ma->segments.current.sz, ma->segments.max.sz, ma->segments.max_ever.sz); erts_print(to, arg, "segments_watermark: %beu\n", - mk->segments.current.watermark); + ma->segments.current.watermark); } if (hpp || szp) { res = NIL; add_2tup(hpp, szp, &res, am.segments_watermark, - bld_unstable_uint(hpp, szp, mk->segments.current.watermark)); + bld_unstable_uint(hpp, szp, ma->segments.current.watermark)); add_4tup(hpp, szp, &res, am.segments_size, - bld_unstable_uint(hpp, szp, mk->segments.current.sz), - bld_unstable_uint(hpp, szp, mk->segments.max.sz), - bld_unstable_uint(hpp, szp, mk->segments.max_ever.sz)); + bld_unstable_uint(hpp, szp, ma->segments.current.sz), + bld_unstable_uint(hpp, szp, ma->segments.max.sz), + bld_unstable_uint(hpp, szp, ma->segments.max_ever.sz)); add_4tup(hpp, szp, &res, am.segments, - bld_unstable_uint(hpp, szp, mk->segments.current.no), - bld_unstable_uint(hpp, szp, mk->segments.max.no), - bld_unstable_uint(hpp, szp, mk->segments.max_ever.no)); + bld_unstable_uint(hpp, szp, ma->segments.current.no), + bld_unstable_uint(hpp, szp, ma->segments.max.no), + bld_unstable_uint(hpp, szp, ma->segments.max_ever.no)); add_2tup(hpp, szp, &res, am.cache_hits, - bld_unstable_uint(hpp, szp, mk->cache_hits)); + bld_unstable_uint(hpp, szp, ma->cache_hits)); add_2tup(hpp, szp, &res, am.cached_segments, - bld_unstable_uint(hpp, szp, mk->cache_size)); + bld_unstable_uint(hpp, szp, ma->cache_size)); } if (begin_new_max_period) { - mk->segments.max.no = mk->segments.current.no; - mk->segments.max.sz = mk->segments.current.sz; + ma->segments.max.no = ma->segments.current.no; + ma->segments.max.sz = ma->segments.current.sz; } return res; } -static Eterm info_memkind(ErtsMsegAllctr_t *ma, MemKind* mk, int *print_to_p, void *print_to_arg, +static Eterm info_memkind(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, int begin_max_per, Uint **hpp, Uint *szp) { Eterm res = THE_NON_VALUE; @@ -1217,15 +1171,15 @@ static Eterm info_memkind(ErtsMsegAllctr_t *ma, MemKind* mk, int *print_to_p, vo Eterm values[3]; if (print_to_p) { - erts_print(*print_to_p, print_to_arg, "memory kind: %s\n", mk->name); + erts_print(*print_to_p, print_to_arg, "memory kind: %s\n", "all memory"); } if (hpp || szp) { atoms[0] = am.name; atoms[1] = am.status; atoms[2] = am.calls; - values[0] = erts_bld_string(hpp, szp, mk->name); + values[0] = erts_bld_string(hpp, szp, "all memory"); } - values[1] = info_status(ma, mk, print_to_p, print_to_arg, begin_max_per, hpp, szp); + values[1] = info_status(ma, print_to_p, print_to_arg, begin_max_per, hpp, szp); values[2] = info_calls(ma, print_to_p, print_to_arg, hpp, szp); if (hpp || szp) @@ -1234,7 +1188,6 @@ static Eterm info_memkind(ErtsMsegAllctr_t *ma, MemKind* mk, int *print_to_p, vo return res; } - static Eterm info_version(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp) { @@ -1299,7 +1252,7 @@ erts_mseg_info(int ix, ERTS_MSEG_LOCK(ma); ERTS_DBG_MA_CHK_THR_ACCESS(ma); - values[n++] = info_memkind(ma, &ma->the_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp); + values[n++] = info_memkind(ma, print_to_p, print_to_arg, begin_max_per, hpp, szp); if (hpp || szp) res = bld_2tup_list(hpp, szp, n, atoms, values); @@ -1376,13 +1329,10 @@ Uint erts_mseg_no(const ErtsMsegOpt_t *opt) { ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt); - MemKind* mk; - Uint n = 0; + Uint n; ERTS_MSEG_LOCK(ma); ERTS_DBG_MA_CHK_THR_ACCESS(ma); - for (mk=ma->mk_list; mk; mk=mk->next) { - n += mk->segments.current.no; - } + n = ma->segments.current.no; ERTS_MSEG_UNLOCK(ma); return n; } @@ -1394,16 +1344,16 @@ erts_mseg_unit_size(void) } -static void mem_kind_init(ErtsMsegAllctr_t *ma, MemKind* mk, const char* name) +static void mem_cache_init(ErtsMsegAllctr_t *ma) { int i; /* Clear all cache headers */ - mseg_cache_clear_node(&(mk->cache_free)); - mseg_cache_clear_node(&(mk->cache_unpowered_node)); + mseg_cache_clear_node(&(ma->cache_free)); + mseg_cache_clear_node(&(ma->cache_unpowered_node)); for (i = 0; i < CACHE_AREAS; i++) { - mseg_cache_clear_node(&(mk->cache_powered_node[i])); + mseg_cache_clear_node(&(ma->cache_powered_node[i])); } /* Populate cache free list */ @@ -1411,25 +1361,20 @@ static void mem_kind_init(ErtsMsegAllctr_t *ma, MemKind* mk, const char* name) ASSERT(ma->max_cache_size <= MAX_CACHE_SIZE); for (i = 0; i < ma->max_cache_size; i++) { - mseg_cache_clear_node(&(mk->cache[i])); - erts_circleq_push_head(&(mk->cache_free), &(mk->cache[i])); + mseg_cache_clear_node(&(ma->cache[i])); + erts_circleq_push_head(&(ma->cache_free), &(ma->cache[i])); } - mk->cache_size = 0; - mk->cache_hits = 0; - - mk->segments.current.watermark = 0; - mk->segments.current.no = 0; - mk->segments.current.sz = 0; - mk->segments.max.no = 0; - mk->segments.max.sz = 0; - mk->segments.max_ever.no = 0; - mk->segments.max_ever.sz = 0; - - mk->ma = ma; - mk->name = name; - mk->next = ma->mk_list; - ma->mk_list = mk; + ma->cache_size = 0; + ma->cache_hits = 0; + + ma->segments.current.watermark = 0; + ma->segments.current.no = 0; + ma->segments.current.sz = 0; + ma->segments.max.no = 0; + ma->segments.max.sz = 0; + ma->segments.max_ever.no = 0; + ma->segments.max_ever.sz = 0; } void @@ -1488,9 +1433,7 @@ erts_mseg_init(ErtsMsegInit_t *init) if (ma->max_cache_size > MAX_CACHE_SIZE) ma->max_cache_size = MAX_CACHE_SIZE; - ma->mk_list = NULL; - - mem_kind_init(ma, &ma->the_mem, "all memory"); + mem_cache_init(ma); sys_memzero((void *) &ma->calls, sizeof(ErtsMsegCalls)); } @@ -1499,13 +1442,8 @@ erts_mseg_init(ErtsMsegInit_t *init) static ERTS_INLINE Uint tot_cache_size(ErtsMsegAllctr_t *ma) { - MemKind* mk; - Uint sz = 0; ERTS_DBG_MA_CHK_THR_ACCESS(ma); - for (mk=ma->mk_list; mk; mk=mk->next) { - sz += mk->cache_size; - } - return sz; + return ma->cache_size; } /* -- cgit v1.2.3 From c431a065ba515d27830f01c852f70940efb3003b Mon Sep 17 00:00:00 2001 From: Lukas Larsson Date: Thu, 2 Jul 2015 11:13:32 +0200 Subject: ose: Remove all code related to the OSE port The OSE port is no longer supported and this commit removed it and any changes related to it. The things that were general improvements have been left in the code. --- erts/emulator/sys/common/erl_check_io.c | 24 ------------------------ erts/emulator/sys/common/erl_poll.h | 22 ++-------------------- 2 files changed, 2 insertions(+), 44 deletions(-) (limited to 'erts/emulator/sys/common') diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c index 105b129065..f87196d724 100644 --- a/erts/emulator/sys/common/erl_check_io.c +++ b/erts/emulator/sys/common/erl_check_io.c @@ -1337,11 +1337,7 @@ print_select_op(erts_dsprintf_buf_t *dsbufp, { Port *pp = erts_drvport2port(ix); erts_dsprintf(dsbufp, -#ifdef __OSE__ - "driver_select(%p, %d,%s%s%s%s | %d, %d) " -#else "driver_select(%p, %d,%s%s%s%s, %d) " -#endif "by ", ix, (int) GET_FD(fd), @@ -1861,25 +1857,6 @@ stale_drv_select(Eterm id, ErtsDrvEventState *state, int mode) #ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS -#ifdef __OSE__ -static SafeHashValue drv_ev_state_hash(void *des) -{ - ErtsSysFdType fd = ((ErtsDrvEventState *) des)->fd; - /* We use hash on signo ^ id in order for steal to happen when the - same signo + fd is selected on by two different ports */ - SafeHashValue val = (SafeHashValue)(fd->signo ^ fd->id); - return val ^ (val >> 8); -} - -static int drv_ev_state_cmp(void *des1, void *des2) -{ - ErtsSysFdType fd1 = ((ErtsDrvEventState *) des1)->fd; - ErtsSysFdType fd2 = ((ErtsDrvEventState *) des2)->fd; - if (fd1->signo == fd2->signo && fd1->id == fd2->id) - return 0; - return 1; -} -#else /* !__OSE__ && !ERTS_SYS_CONTINOUS_FD_NUMBERS i.e. probably windows */ static SafeHashValue drv_ev_state_hash(void *des) { SafeHashValue val = (SafeHashValue) ((ErtsDrvEventState *) des)->fd; @@ -1891,7 +1868,6 @@ static int drv_ev_state_cmp(void *des1, void *des2) return ( ((ErtsDrvEventState *) des1)->fd == ((ErtsDrvEventState *) des2)->fd ? 0 : 1); } -#endif static void *drv_ev_state_alloc(void *des_tmpl) { diff --git a/erts/emulator/sys/common/erl_poll.h b/erts/emulator/sys/common/erl_poll.h index 19ce582154..6d8aef822e 100644 --- a/erts/emulator/sys/common/erl_poll.h +++ b/erts/emulator/sys/common/erl_poll.h @@ -93,7 +93,7 @@ # if defined(ERTS_USE_POLL) # undef ERTS_POLL_USE_POLL # define ERTS_POLL_USE_POLL 1 -# elif !defined(__WIN32__) && !defined(__OSE__) +# elif !defined(__WIN32__) # undef ERTS_POLL_USE_SELECT # define ERTS_POLL_USE_SELECT 1 # endif @@ -104,31 +104,13 @@ typedef Uint32 ErtsPollEvents; #undef ERTS_POLL_EV_E2N -#if defined(__WIN32__) || defined(__OSE__) /* --- win32 or ose -------- */ +#if defined(__WIN32__) /* --- win32 --------------------------------------- */ #define ERTS_POLL_EV_IN 1 #define ERTS_POLL_EV_OUT 2 #define ERTS_POLL_EV_ERR 4 #define ERTS_POLL_EV_NVAL 8 -#ifdef __OSE__ - -typedef struct ErtsPollOseMsgList_ { - struct ErtsPollOseMsgList_ *next; - union SIGNAL *data; -} ErtsPollOseMsgList; - -struct erts_sys_fd_type { - SIGSELECT signo; - ErlDrvOseEventId id; - ErtsPollOseMsgList *msgs; - ErlDrvOseEventId (*resolve_signal)(union SIGNAL *sig); - ethr_mutex mtx; - void *extra; -}; - -#endif - #elif ERTS_POLL_USE_EPOLL /* --- epoll ------------------------------- */ #include -- cgit v1.2.3 From 997881b9046d3468685c545dc917e8204e0a9ef6 Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Thu, 3 Sep 2015 17:57:30 +0200 Subject: erts: Remove unused erts_have_erts_mmap --- erts/emulator/sys/common/erl_mmap.c | 6 ------ erts/emulator/sys/common/erl_mmap.h | 3 --- 2 files changed, 9 deletions(-) (limited to 'erts/emulator/sys/common') diff --git a/erts/emulator/sys/common/erl_mmap.c b/erts/emulator/sys/common/erl_mmap.c index e6d0e1e281..eeaf96dd93 100644 --- a/erts/emulator/sys/common/erl_mmap.c +++ b/erts/emulator/sys/common/erl_mmap.c @@ -67,7 +67,6 @@ (((UWord) (PTR)) - ((UWord) mmap_state.sua.bot) \ < ((UWord) mmap_state.sua.top) - ((UWord) mmap_state.sua.bot))) -int erts_have_erts_mmap; UWord erts_page_inv_mask; #if defined(DEBUG) || defined(ERTS_MMAP_DEBUG) @@ -2131,8 +2130,6 @@ erts_mmap_init(ErtsMMapInit *init) ERTS_MMAP_OP_RINGBUF_INIT(); - erts_have_erts_mmap = 0; - mmap_state.supercarrier = 0; mmap_state.reserve_physical = reserve_noop; mmap_state.unreserve_physical = unreserve_noop; @@ -2206,8 +2203,6 @@ erts_mmap_init(ErtsMMapInit *init) } #endif } - if (!mmap_state.no_os_mmap) - erts_have_erts_mmap |= ERTS_HAVE_ERTS_OS_MMAP; #endif mmap_state.no.free_seg_descs = 0; @@ -2291,7 +2286,6 @@ erts_mmap_init(ErtsMMapInit *init) init_free_seg_map(&mmap_state.sua.map, SZ_REVERSE_ADDR_ORDER); mmap_state.supercarrier = 1; - erts_have_erts_mmap |= ERTS_HAVE_ERTS_SUPERCARRIER_MMAP; mmap_state.desc.new_area_hint = end; diff --git a/erts/emulator/sys/common/erl_mmap.h b/erts/emulator/sys/common/erl_mmap.h index 66619c5161..51f830d045 100644 --- a/erts/emulator/sys/common/erl_mmap.h +++ b/erts/emulator/sys/common/erl_mmap.h @@ -30,9 +30,6 @@ #define ERTS_MMAPFLG_SUPERCARRIER_ONLY (((Uint32) 1) << 1) #define ERTS_MMAPFLG_SUPERALIGNED (((Uint32) 1) << 2) -#define ERTS_HAVE_ERTS_OS_MMAP (1 << 0) -#define ERTS_HAVE_ERTS_SUPERCARRIER_MMAP (1 << 1) -extern int erts_have_erts_mmap; extern UWord erts_page_inv_mask; typedef struct { -- cgit v1.2.3 From 1fd4809777931a4dc166fd9f1b552317a385cd62 Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Mon, 7 Sep 2015 19:58:47 +0200 Subject: erts: Fix strangeness in treatment of MSEG_ALIGN_BITS And remove old case of using only page alignment (12 bits). --- erts/emulator/sys/common/erl_mseg.h | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'erts/emulator/sys/common') diff --git a/erts/emulator/sys/common/erl_mseg.h b/erts/emulator/sys/common/erl_mseg.h index 656484702d..677f8ea4ed 100644 --- a/erts/emulator/sys/common/erl_mseg.h +++ b/erts/emulator/sys/common/erl_mseg.h @@ -42,16 +42,6 @@ #if ERTS_HAVE_MSEG_SUPER_ALIGNED # define MSEG_ALIGN_BITS ERTS_MMAP_SUPERALIGNED_BITS -#else -/* If we don't use super aligned multiblock carriers - * we will mmap with page size alignment (and thus use corresponding - * align bits). - * - * Current implementation needs this to be a constant and - * only uses this for user dev testing so setting page size - * to 4096 (12 bits) is fine. - */ -# define MSEG_ALIGN_BITS (12) #endif #if HAVE_ERTS_MSEG -- cgit v1.2.3 From 63466c5522ed58b6e73e35dc29c7c7584f073768 Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Wed, 2 Sep 2015 18:35:29 +0200 Subject: erts: Refactor erl_mmap to allow several mapper instances --- erts/emulator/sys/common/erl_mmap.c | 550 ++++++++++++++++++------------------ erts/emulator/sys/common/erl_mmap.h | 19 +- erts/emulator/sys/common/erl_mseg.c | 11 +- 3 files changed, 295 insertions(+), 285 deletions(-) (limited to 'erts/emulator/sys/common') diff --git a/erts/emulator/sys/common/erl_mmap.c b/erts/emulator/sys/common/erl_mmap.c index eeaf96dd93..e8ff2bf752 100644 --- a/erts/emulator/sys/common/erl_mmap.c +++ b/erts/emulator/sys/common/erl_mmap.c @@ -51,21 +51,21 @@ #endif /* - * `mmap_state.sa.bot` and `mmap_state.sua.top` are read only after + * `mm->sa.bot` and `mm->sua.top` are read only after * initialization, but the other pointers are not; i.e., only * ERTS_MMAP_IN_SUPERCARRIER() is allowed without the mutex held. */ #define ERTS_MMAP_IN_SUPERCARRIER(PTR) \ - (((UWord) (PTR)) - ((UWord) mmap_state.sa.bot) \ - < ((UWord) mmap_state.sua.top) - ((UWord) mmap_state.sa.bot)) + (((UWord) (PTR)) - ((UWord) mm->sa.bot) \ + < ((UWord) mm->sua.top) - ((UWord) mm->sa.bot)) #define ERTS_MMAP_IN_SUPERALIGNED_AREA(PTR) \ - (ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&mmap_state.mtx)), \ - (((UWord) (PTR)) - ((UWord) mmap_state.sa.bot) \ - < ((UWord) mmap_state.sa.top) - ((UWord) mmap_state.sa.bot))) + (ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&mm->mtx)), \ + (((UWord) (PTR)) - ((UWord) mm->sa.bot) \ + < ((UWord) mm->sa.top) - ((UWord) mm->sa.bot))) #define ERTS_MMAP_IN_SUPERUNALIGNED_AREA(PTR) \ - (ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&mmap_state.mtx)), \ - (((UWord) (PTR)) - ((UWord) mmap_state.sua.bot) \ - < ((UWord) mmap_state.sua.top) - ((UWord) mmap_state.sua.bot))) + (ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&mm->mtx)), \ + (((UWord) (PTR)) - ((UWord) mm->sua.bot) \ + < ((UWord) mm->sua.top) - ((UWord) mm->sua.bot))) UWord erts_page_inv_mask; @@ -196,10 +196,10 @@ static ErtsMMapOp mmap_ops[ERTS_MMAP_OP_RINGBUF_SZ]; #define ERTS_MMAP_OP_LCK(RES, IN_SZ, OUT_SZ) \ do { \ - erts_smp_mtx_lock(&mmap_state.mtx); \ + erts_smp_mtx_lock(&mm->mtx); \ ERTS_MMAP_OP_START((IN_SZ)); \ ERTS_MMAP_OP_END((RES), (OUT_SZ)); \ - erts_smp_mtx_unlock(&mmap_state.mtx); \ + erts_smp_mtx_unlock(&mm->mtx); \ } while (0) #define ERTS_MUNMAP_OP(PTR, SZ) \ @@ -218,9 +218,9 @@ static ErtsMMapOp mmap_ops[ERTS_MMAP_OP_RINGBUF_SZ]; #define ERTS_MUNMAP_OP_LCK(PTR, SZ) \ do { \ - erts_smp_mtx_lock(&mmap_state.mtx); \ + erts_smp_mtx_lock(&mm->mtx); \ ERTS_MUNMAP_OP((PTR), (SZ)); \ - erts_smp_mtx_unlock(&mmap_state.mtx); \ + erts_smp_mtx_unlock(&mm->mtx); \ } while (0) #define ERTS_MREMAP_OP_START(OLD_PTR, OLD_SZ, IN_SZ) \ @@ -246,10 +246,10 @@ static ErtsMMapOp mmap_ops[ERTS_MMAP_OP_RINGBUF_SZ]; #define ERTS_MREMAP_OP_LCK(RES, OLD_PTR, OLD_SZ, IN_SZ, OUT_SZ) \ do { \ - erts_smp_mtx_lock(&mmap_state.mtx); \ + erts_smp_mtx_lock(&mm->mtx); \ ERTS_MREMAP_OP_START((OLD_PTR), (OLD_SZ), (IN_SZ)); \ ERTS_MREMAP_OP_END((RES), (OUT_SZ)); \ - erts_smp_mtx_unlock(&mmap_state.mtx); \ + erts_smp_mtx_unlock(&mm->mtx); \ } while (0) #define ERTS_MMAP_OP_ABORT() \ @@ -293,7 +293,7 @@ typedef struct { Uint nseg; }ErtsFreeSegMap; -static struct { +struct ErtsMemMapper_ { int (*reserve_physical)(char *, UWord); void (*unreserve_physical)(char *, UWord); int supercarrier; @@ -345,54 +345,56 @@ static struct { UWord used; } os; } size; -} mmap_state; +}; + +ErtsMemMapper erts_dflt_mmapper; #define ERTS_MMAP_SIZE_SC_SA_INC(SZ) \ do { \ - mmap_state.size.supercarrier.used.total += (SZ); \ - mmap_state.size.supercarrier.used.sa += (SZ); \ - ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.total \ - <= mmap_state.size.supercarrier.total); \ - ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.sa \ - <= mmap_state.size.supercarrier.used.total); \ + mm->size.supercarrier.used.total += (SZ); \ + mm->size.supercarrier.used.sa += (SZ); \ + ERTS_MMAP_ASSERT(mm->size.supercarrier.used.total \ + <= mm->size.supercarrier.total); \ + ERTS_MMAP_ASSERT(mm->size.supercarrier.used.sa \ + <= mm->size.supercarrier.used.total); \ } while (0) #define ERTS_MMAP_SIZE_SC_SA_DEC(SZ) \ do { \ - ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.total >= (SZ)); \ - mmap_state.size.supercarrier.used.total -= (SZ); \ - ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.sa >= (SZ)); \ - mmap_state.size.supercarrier.used.sa -= (SZ); \ + ERTS_MMAP_ASSERT(mm->size.supercarrier.used.total >= (SZ)); \ + mm->size.supercarrier.used.total -= (SZ); \ + ERTS_MMAP_ASSERT(mm->size.supercarrier.used.sa >= (SZ)); \ + mm->size.supercarrier.used.sa -= (SZ); \ } while (0) #define ERTS_MMAP_SIZE_SC_SUA_INC(SZ) \ do { \ - mmap_state.size.supercarrier.used.total += (SZ); \ - mmap_state.size.supercarrier.used.sua += (SZ); \ - ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.total \ - <= mmap_state.size.supercarrier.total); \ - ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.sua \ - <= mmap_state.size.supercarrier.used.total); \ + mm->size.supercarrier.used.total += (SZ); \ + mm->size.supercarrier.used.sua += (SZ); \ + ERTS_MMAP_ASSERT(mm->size.supercarrier.used.total \ + <= mm->size.supercarrier.total); \ + ERTS_MMAP_ASSERT(mm->size.supercarrier.used.sua \ + <= mm->size.supercarrier.used.total); \ } while (0) #define ERTS_MMAP_SIZE_SC_SUA_DEC(SZ) \ do { \ - ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.total >= (SZ)); \ - mmap_state.size.supercarrier.used.total -= (SZ); \ - ERTS_MMAP_ASSERT(mmap_state.size.supercarrier.used.sua >= (SZ)); \ - mmap_state.size.supercarrier.used.sua -= (SZ); \ + ERTS_MMAP_ASSERT(mm->size.supercarrier.used.total >= (SZ)); \ + mm->size.supercarrier.used.total -= (SZ); \ + ERTS_MMAP_ASSERT(mm->size.supercarrier.used.sua >= (SZ)); \ + mm->size.supercarrier.used.sua -= (SZ); \ } while (0) #define ERTS_MMAP_SIZE_OS_INC(SZ) \ do { \ - ERTS_MMAP_ASSERT(mmap_state.size.os.used + (SZ) >= (SZ)); \ - mmap_state.size.os.used += (SZ); \ + ERTS_MMAP_ASSERT(mm->size.os.used + (SZ) >= (SZ)); \ + mm->size.os.used += (SZ); \ } while (0) #define ERTS_MMAP_SIZE_OS_DEC(SZ) \ do { \ - ERTS_MMAP_ASSERT(mmap_state.size.os.used >= (SZ)); \ - mmap_state.size.os.used -= (SZ); \ + ERTS_MMAP_ASSERT(mm->size.os.used >= (SZ)); \ + mm->size.os.used -= (SZ); \ } while (0) static void -add_free_desc_area(char *start, char *end) +add_free_desc_area(ErtsMemMapper* mm, char *start, char *end) { ERTS_MMAP_ASSERT(end == (void *) 0 || end > start); if (sizeof(ErtsFreeSegDesc) <= ((UWord) end) - ((UWord) start)) { @@ -402,7 +404,7 @@ add_free_desc_area(char *start, char *end) no = 1; prev_desc = (ErtsFreeSegDesc *) start; - prev_desc->start = mmap_state.desc.free_list; + prev_desc->start = mm->desc.free_list; desc = (ErtsFreeSegDesc *) (start + sizeof(ErtsFreeSegDesc)); desc_end = start + 2*sizeof(ErtsFreeSegDesc); @@ -413,59 +415,59 @@ add_free_desc_area(char *start, char *end) desc_end += sizeof(ErtsFreeSegDesc); no++; } - mmap_state.desc.free_list = (char *) prev_desc; - mmap_state.no.free_seg_descs += no; + mm->desc.free_list = (char *) prev_desc; + mm->no.free_seg_descs += no; } } static ErtsFreeSegDesc * -add_unused_free_desc_area(void) +add_unused_free_desc_area(ErtsMemMapper* mm) { char *ptr; - if (!mmap_state.desc.unused_start) + if (!mm->desc.unused_start) return NULL; - ERTS_MMAP_ASSERT(mmap_state.desc.unused_end); + ERTS_MMAP_ASSERT(mm->desc.unused_end); ERTS_MMAP_ASSERT(ERTS_PAGEALIGNED_SIZE - <= mmap_state.desc.unused_end - mmap_state.desc.unused_start); + <= mm->desc.unused_end - mm->desc.unused_start); - ptr = mmap_state.desc.unused_start + ERTS_PAGEALIGNED_SIZE; - add_free_desc_area(mmap_state.desc.unused_start, ptr); + ptr = mm->desc.unused_start + ERTS_PAGEALIGNED_SIZE; + add_free_desc_area(mm, mm->desc.unused_start, ptr); - if ((mmap_state.desc.unused_end - ptr) >= ERTS_PAGEALIGNED_SIZE) - mmap_state.desc.unused_start = ptr; + if ((mm->desc.unused_end - ptr) >= ERTS_PAGEALIGNED_SIZE) + mm->desc.unused_start = ptr; else - mmap_state.desc.unused_end = mmap_state.desc.unused_start = NULL; + mm->desc.unused_end = mm->desc.unused_start = NULL; - ERTS_MMAP_ASSERT(mmap_state.desc.free_list); - return (ErtsFreeSegDesc *) mmap_state.desc.free_list; + ERTS_MMAP_ASSERT(mm->desc.free_list); + return (ErtsFreeSegDesc *) mm->desc.free_list; } static ERTS_INLINE ErtsFreeSegDesc * -alloc_desc(void) +alloc_desc(ErtsMemMapper* mm) { ErtsFreeSegDesc *res; - res = (ErtsFreeSegDesc *) mmap_state.desc.free_list; + res = (ErtsFreeSegDesc *) mm->desc.free_list; if (!res) { - res = add_unused_free_desc_area(); + res = add_unused_free_desc_area(mm); if (!res) return NULL; } - mmap_state.desc.free_list = res->start; - ASSERT(mmap_state.no.free_segs.curr < mmap_state.no.free_seg_descs); - mmap_state.no.free_segs.curr++; - if (mmap_state.no.free_segs.max < mmap_state.no.free_segs.curr) - mmap_state.no.free_segs.max = mmap_state.no.free_segs.curr; + mm->desc.free_list = res->start; + ASSERT(mm->no.free_segs.curr < mm->no.free_seg_descs); + mm->no.free_segs.curr++; + if (mm->no.free_segs.max < mm->no.free_segs.curr) + mm->no.free_segs.max = mm->no.free_segs.curr; return res; } static ERTS_INLINE void -free_desc(ErtsFreeSegDesc *desc) +free_desc(ErtsMemMapper* mm, ErtsFreeSegDesc *desc) { - desc->start = mmap_state.desc.free_list; - mmap_state.desc.free_list = (char *) desc; - ERTS_MMAP_ASSERT(mmap_state.no.free_segs.curr > 0); - mmap_state.no.free_segs.curr--; + desc->start = mm->desc.free_list; + mm->desc.free_list = (char *) desc; + ERTS_MMAP_ASSERT(mm->no.free_segs.curr > 0); + mm->no.free_segs.curr--; } static ERTS_INLINE ErtsFreeSegDesc* anode_to_desc(RBTNode* anode) @@ -1232,7 +1234,7 @@ Eterm build_free_seg_list(Process* p, ErtsFreeSegMap* map) # define ERTS_MMAP_FD (-1) # else # define ERTS_MMAP_FLAGS (MAP_PRIVATE) -# define ERTS_MMAP_FD mmap_state.mmap_fd +# define ERTS_MMAP_FD mm->mmap_fd # endif #endif @@ -1377,11 +1379,12 @@ static void unreserve_noop(char *ptr, UWord size) } static UWord -alloc_desc_insert_free_seg(ErtsFreeSegMap *map, char* start, char* end) +alloc_desc_insert_free_seg(ErtsMemMapper* mm, + ErtsFreeSegMap *map, char* start, char* end) { char *ptr; ErtsFreeSegMap *da_map; - ErtsFreeSegDesc *desc = alloc_desc(); + ErtsFreeSegDesc *desc = alloc_desc(mm); if (desc) { insert_free_seg(map, desc, start, end); return 0; @@ -1394,13 +1397,13 @@ alloc_desc_insert_free_seg(ErtsFreeSegMap *map, char* start, char* end) */ #if ERTS_HAVE_OS_MMAP - if (!mmap_state.no_os_mmap) { - ptr = os_mmap(mmap_state.desc.new_area_hint, ERTS_PAGEALIGNED_SIZE, 0); + if (!mm->no_os_mmap) { + ptr = os_mmap(mm->desc.new_area_hint, ERTS_PAGEALIGNED_SIZE, 0); if (ptr) { - mmap_state.desc.new_area_hint = ptr+ERTS_PAGEALIGNED_SIZE; + mm->desc.new_area_hint = ptr+ERTS_PAGEALIGNED_SIZE; ERTS_MMAP_SIZE_OS_INC(ERTS_PAGEALIGNED_SIZE); - add_free_desc_area(ptr, ptr+ERTS_PAGEALIGNED_SIZE); - desc = alloc_desc(); + add_free_desc_area(mm, ptr, ptr+ERTS_PAGEALIGNED_SIZE); + desc = alloc_desc(mm); ERTS_MMAP_ASSERT(desc); insert_free_seg(map, desc, start, end); return 0; @@ -1411,20 +1414,20 @@ alloc_desc_insert_free_seg(ErtsFreeSegMap *map, char* start, char* end) /* * ...then try to find a good place in the supercarrier... */ - da_map = &mmap_state.sua.map; + da_map = &mm->sua.map; desc = lookup_free_seg(da_map, ERTS_PAGEALIGNED_SIZE); if (desc) { - if (mmap_state.reserve_physical(desc->start, ERTS_PAGEALIGNED_SIZE)) + if (mm->reserve_physical(desc->start, ERTS_PAGEALIGNED_SIZE)) ERTS_MMAP_SIZE_SC_SUA_INC(ERTS_PAGEALIGNED_SIZE); else desc = NULL; } else { - da_map = &mmap_state.sa.map; + da_map = &mm->sa.map; desc = lookup_free_seg(da_map, ERTS_PAGEALIGNED_SIZE); if (desc) { - if (mmap_state.reserve_physical(desc->start, ERTS_PAGEALIGNED_SIZE)) + if (mm->reserve_physical(desc->start, ERTS_PAGEALIGNED_SIZE)) ERTS_MMAP_SIZE_SC_SA_INC(ERTS_PAGEALIGNED_SIZE); else desc = NULL; @@ -1432,15 +1435,15 @@ alloc_desc_insert_free_seg(ErtsFreeSegMap *map, char* start, char* end) } if (desc) { char *da_end = desc->start + ERTS_PAGEALIGNED_SIZE; - add_free_desc_area(desc->start, da_end); + add_free_desc_area(mm, desc->start, da_end); if (da_end != desc->end) resize_free_seg(da_map, desc, da_end, desc->end); else { delete_free_seg(da_map, desc); - free_desc(desc); + free_desc(mm, desc); } - desc = alloc_desc(); + desc = alloc_desc(mm); ERTS_MMAP_ASSERT(desc); insert_free_seg(map, desc, start, end); return 0; @@ -1453,10 +1456,10 @@ alloc_desc_insert_free_seg(ErtsFreeSegMap *map, char* start, char* end) ptr = start + ERTS_PAGEALIGNED_SIZE; ERTS_MMAP_ASSERT(ptr <= end); - add_free_desc_area(start, ptr); + add_free_desc_area(mm, start, ptr); if (ptr != end) { - desc = alloc_desc(); + desc = alloc_desc(mm); ERTS_MMAP_ASSERT(desc); insert_free_seg(map, desc, ptr, end); } @@ -1465,46 +1468,46 @@ alloc_desc_insert_free_seg(ErtsFreeSegMap *map, char* start, char* end) } void * -erts_mmap(Uint32 flags, UWord *sizep) +erts_mmap(ErtsMemMapper* mm, Uint32 flags, UWord *sizep) { char *seg; UWord asize = ERTS_PAGEALIGNED_CEILING(*sizep); /* Map in premapped supercarrier */ - if (mmap_state.supercarrier && !(ERTS_MMAPFLG_OS_ONLY & flags)) { + if (mm->supercarrier && !(ERTS_MMAPFLG_OS_ONLY & flags)) { char *end; ErtsFreeSegDesc *desc; Uint32 superaligned = (ERTS_MMAPFLG_SUPERALIGNED & flags); - erts_smp_mtx_lock(&mmap_state.mtx); + erts_smp_mtx_lock(&mm->mtx); ERTS_MMAP_OP_START(*sizep); if (!superaligned) { - desc = lookup_free_seg(&mmap_state.sua.map, asize); + desc = lookup_free_seg(&mm->sua.map, asize); if (desc) { seg = desc->start; end = seg+asize; - if (!mmap_state.reserve_physical(seg, asize)) + if (!mm->reserve_physical(seg, asize)) goto supercarrier_reserve_failure; if (desc->end == end) { - delete_free_seg(&mmap_state.sua.map, desc); - free_desc(desc); + delete_free_seg(&mm->sua.map, desc); + free_desc(mm, desc); } else { ERTS_MMAP_ASSERT(end < desc->end); - resize_free_seg(&mmap_state.sua.map, desc, end, desc->end); + resize_free_seg(&mm->sua.map, desc, end, desc->end); } ERTS_MMAP_SIZE_SC_SUA_INC(asize); goto supercarrier_success; } - if (asize <= mmap_state.sua.bot - mmap_state.sa.top) { - if (!mmap_state.reserve_physical(mmap_state.sua.bot - asize, + if (asize <= mm->sua.bot - mm->sa.top) { + if (!mm->reserve_physical(mm->sua.bot - asize, asize)) goto supercarrier_reserve_failure; - mmap_state.sua.bot -= asize; - seg = mmap_state.sua.bot; + mm->sua.bot -= asize; + seg = mm->sua.bot; ERTS_MMAP_SIZE_SC_SUA_INC(asize); goto supercarrier_success; } @@ -1512,84 +1515,84 @@ erts_mmap(Uint32 flags, UWord *sizep) asize = ERTS_SUPERALIGNED_CEILING(asize); - desc = lookup_free_seg(&mmap_state.sa.map, asize); + desc = lookup_free_seg(&mm->sa.map, asize); if (desc) { char *start = seg = desc->start; seg = (char *) ERTS_SUPERALIGNED_CEILING(seg); end = seg+asize; - if (!mmap_state.reserve_physical(start, (UWord) (end - start))) + if (!mm->reserve_physical(start, (UWord) (end - start))) goto supercarrier_reserve_failure; ERTS_MMAP_SIZE_SC_SA_INC(asize); if (desc->end == end) { if (start != seg) - resize_free_seg(&mmap_state.sa.map, desc, start, seg); + resize_free_seg(&mm->sa.map, desc, start, seg); else { - delete_free_seg(&mmap_state.sa.map, desc); - free_desc(desc); + delete_free_seg(&mm->sa.map, desc); + free_desc(mm, desc); } } else { ERTS_MMAP_ASSERT(end < desc->end); - resize_free_seg(&mmap_state.sa.map, desc, end, desc->end); + resize_free_seg(&mm->sa.map, desc, end, desc->end); if (start != seg) { UWord ad_sz; - ad_sz = alloc_desc_insert_free_seg(&mmap_state.sua.map, + ad_sz = alloc_desc_insert_free_seg(mm, &mm->sua.map, start, seg); start += ad_sz; if (start != seg) - mmap_state.unreserve_physical(start, (UWord) (seg - start)); + mm->unreserve_physical(start, (UWord) (seg - start)); } } goto supercarrier_success; } if (superaligned) { - char *start = mmap_state.sa.top; + char *start = mm->sa.top; seg = (char *) ERTS_SUPERALIGNED_CEILING(start); - if (asize + (seg - start) <= mmap_state.sua.bot - start) { + if (asize + (seg - start) <= mm->sua.bot - start) { end = seg + asize; - if (!mmap_state.reserve_physical(start, (UWord) (end - start))) + if (!mm->reserve_physical(start, (UWord) (end - start))) goto supercarrier_reserve_failure; - mmap_state.sa.top = end; + mm->sa.top = end; ERTS_MMAP_SIZE_SC_SA_INC(asize); if (start != seg) { UWord ad_sz; - ad_sz = alloc_desc_insert_free_seg(&mmap_state.sua.map, + ad_sz = alloc_desc_insert_free_seg(mm, &mm->sua.map, start, seg); start += ad_sz; if (start != seg) - mmap_state.unreserve_physical(start, (UWord) (seg - start)); + mm->unreserve_physical(start, (UWord) (seg - start)); } goto supercarrier_success; } - desc = lookup_free_seg(&mmap_state.sua.map, asize + ERTS_SUPERALIGNED_SIZE); + desc = lookup_free_seg(&mm->sua.map, asize + ERTS_SUPERALIGNED_SIZE); if (desc) { char *org_start = desc->start; char *org_end = desc->end; seg = (char *) ERTS_SUPERALIGNED_CEILING(org_start); end = seg + asize; - if (!mmap_state.reserve_physical(seg, (UWord) (org_end - seg))) + if (!mm->reserve_physical(seg, (UWord) (org_end - seg))) goto supercarrier_reserve_failure; ERTS_MMAP_SIZE_SC_SUA_INC(asize); if (org_start != seg) { ERTS_MMAP_ASSERT(org_start < seg); - resize_free_seg(&mmap_state.sua.map, desc, org_start, seg); + resize_free_seg(&mm->sua.map, desc, org_start, seg); desc = NULL; } if (end != org_end) { UWord ad_sz = 0; ERTS_MMAP_ASSERT(end < org_end); if (desc) - resize_free_seg(&mmap_state.sua.map, desc, end, org_end); + resize_free_seg(&mm->sua.map, desc, end, org_end); else - ad_sz = alloc_desc_insert_free_seg(&mmap_state.sua.map, + ad_sz = alloc_desc_insert_free_seg(mm, &mm->sua.map, end, org_end); end += ad_sz; if (end != org_end) - mmap_state.unreserve_physical(end, + mm->unreserve_physical(end, (UWord) (org_end - end)); } goto supercarrier_success; @@ -1597,12 +1600,12 @@ erts_mmap(Uint32 flags, UWord *sizep) } ERTS_MMAP_OP_ABORT(); - erts_smp_mtx_unlock(&mmap_state.mtx); + erts_smp_mtx_unlock(&mm->mtx); } #if ERTS_HAVE_OS_MMAP /* Map using OS primitives */ - if (!(ERTS_MMAPFLG_SUPERCARRIER_ONLY & flags) && !mmap_state.no_os_mmap) { + if (!(ERTS_MMAPFLG_SUPERCARRIER_ONLY & flags) && !mm->no_os_mmap) { if (!(ERTS_MMAPFLG_SUPERALIGNED & flags)) { seg = os_mmap(NULL, asize, 0); if (!seg) @@ -1660,25 +1663,25 @@ supercarrier_success: #endif ERTS_MMAP_OP_END(seg, asize); - erts_smp_mtx_unlock(&mmap_state.mtx); + erts_smp_mtx_unlock(&mm->mtx); *sizep = asize; return (void *) seg; supercarrier_reserve_failure: - erts_smp_mtx_unlock(&mmap_state.mtx); + erts_smp_mtx_unlock(&mm->mtx); *sizep = 0; return NULL; } void -erts_munmap(Uint32 flags, void *ptr, UWord size) +erts_munmap(ErtsMemMapper* mm, Uint32 flags, void *ptr, UWord size) { ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(ptr)); ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(size)); if (!ERTS_MMAP_IN_SUPERCARRIER(ptr)) { - ERTS_MMAP_ASSERT(!mmap_state.no_os_mmap); + ERTS_MMAP_ASSERT(!mm->no_os_mmap); #if ERTS_HAVE_OS_MMAP ERTS_MUNMAP_OP_LCK(ptr, size); ERTS_MMAP_SIZE_OS_DEC(size); @@ -1691,45 +1694,45 @@ erts_munmap(Uint32 flags, void *ptr, UWord size) ErtsFreeSegDesc *prev, *next, *desc; UWord ad_sz = 0; - ERTS_MMAP_ASSERT(mmap_state.supercarrier); + ERTS_MMAP_ASSERT(mm->supercarrier); start = (char *) ptr; end = start + size; - erts_smp_mtx_lock(&mmap_state.mtx); + erts_smp_mtx_lock(&mm->mtx); ERTS_MUNMAP_OP(ptr, size); if (ERTS_MMAP_IN_SUPERALIGNED_AREA(ptr)) { - map = &mmap_state.sa.map; + map = &mm->sa.map; adjacent_free_seg(map, start, end, &prev, &next); ERTS_MMAP_SIZE_SC_SA_DEC(size); - if (end == mmap_state.sa.top) { + if (end == mm->sa.top) { ERTS_MMAP_ASSERT(!next); if (prev) { start = prev->start; delete_free_seg(map, prev); - free_desc(prev); + free_desc(mm, prev); } - mmap_state.sa.top = start; + mm->sa.top = start; goto supercarrier_success; } } else { - map = &mmap_state.sua.map; + map = &mm->sua.map; adjacent_free_seg(map, start, end, &prev, &next); ERTS_MMAP_SIZE_SC_SUA_DEC(size); - if (start == mmap_state.sua.bot) { + if (start == mm->sua.bot) { ERTS_MMAP_ASSERT(!prev); if (next) { end = next->end; delete_free_seg(map, next); - free_desc(next); + free_desc(mm, next); } - mmap_state.sua.bot = end; + mm->sua.bot = end; goto supercarrier_success; } } @@ -1741,7 +1744,7 @@ erts_munmap(Uint32 flags, void *ptr, UWord size) end = next->end; if (prev) { delete_free_seg(map, next); - free_desc(next); + free_desc(mm, next); goto save_prev; } desc = next; @@ -1755,7 +1758,7 @@ erts_munmap(Uint32 flags, void *ptr, UWord size) if (desc) resize_free_seg(map, desc, start, end); else - ad_sz = alloc_desc_insert_free_seg(map, start, end); + ad_sz = alloc_desc_insert_free_seg(mm, map, start, end); supercarrier_success: { UWord unres_sz; @@ -1763,30 +1766,32 @@ erts_munmap(Uint32 flags, void *ptr, UWord size) ERTS_MMAP_ASSERT(size >= ad_sz); unres_sz = size - ad_sz; if (unres_sz) - mmap_state.unreserve_physical(((char *) ptr) + ad_sz, unres_sz); + mm->unreserve_physical(((char *) ptr) + ad_sz, unres_sz); - erts_smp_mtx_unlock(&mmap_state.mtx); + erts_smp_mtx_unlock(&mm->mtx); } } } static void * -remap_move(Uint32 flags, void *ptr, UWord old_size, UWord *sizep) +remap_move(ErtsMemMapper* mm, + Uint32 flags, void *ptr, UWord old_size, UWord *sizep) { UWord size = *sizep; - void *new_ptr = erts_mmap(flags, &size); + void *new_ptr = erts_mmap(mm, flags, &size); if (!new_ptr) return NULL; *sizep = size; if (old_size < size) size = old_size; sys_memcpy(new_ptr, ptr, (size_t) size); - erts_munmap(flags, ptr, old_size); + erts_munmap(mm, flags, ptr, old_size); return new_ptr; } void * -erts_mremap(Uint32 flags, void *ptr, UWord old_size, UWord *sizep) +erts_mremap(ErtsMemMapper* mm, + Uint32 flags, void *ptr, UWord old_size, UWord *sizep) { void *new_ptr; Uint32 superaligned; @@ -1798,11 +1803,11 @@ erts_mremap(Uint32 flags, void *ptr, UWord old_size, UWord *sizep) if (!ERTS_MMAP_IN_SUPERCARRIER(ptr)) { - ERTS_MMAP_ASSERT(!mmap_state.no_os_mmap); + ERTS_MMAP_ASSERT(!mm->no_os_mmap); - if (!(ERTS_MMAPFLG_OS_ONLY & flags) && mmap_state.supercarrier) { - new_ptr = remap_move(ERTS_MMAPFLG_SUPERCARRIER_ONLY|flags, ptr, - old_size, sizep); + if (!(ERTS_MMAPFLG_OS_ONLY & flags) && mm->supercarrier) { + new_ptr = remap_move(mm, ERTS_MMAPFLG_SUPERCARRIER_ONLY|flags, + ptr, old_size, sizep); if (new_ptr) return new_ptr; } @@ -1849,7 +1854,7 @@ erts_mremap(Uint32 flags, void *ptr, UWord old_size, UWord *sizep) #endif #if ERTS_HAVE_OS_MREMAP if (superaligned) - return remap_move(flags, new_ptr, old_size, sizep); + return remap_move(mm, flags, new_ptr, old_size, sizep); else { new_ptr = os_mremap(ptr, old_size, asize, 0); if (!new_ptr) @@ -1871,10 +1876,10 @@ erts_mremap(Uint32 flags, void *ptr, UWord old_size, UWord *sizep) ErtsFreeSegDesc *prev, *next; UWord ad_sz = 0; - ERTS_MMAP_ASSERT(mmap_state.supercarrier); + ERTS_MMAP_ASSERT(mm->supercarrier); if (ERTS_MMAPFLG_OS_ONLY & flags) - return remap_move(flags, ptr, old_size, sizep); + return remap_move(mm, flags, ptr, old_size, sizep); superaligned = (ERTS_MMAPFLG_SUPERALIGNED & flags); @@ -1882,19 +1887,19 @@ erts_mremap(Uint32 flags, void *ptr, UWord old_size, UWord *sizep) ? ERTS_SUPERALIGNED_CEILING(*sizep) : ERTS_PAGEALIGNED_CEILING(*sizep)); - erts_smp_mtx_lock(&mmap_state.mtx); + erts_smp_mtx_lock(&mm->mtx); if (ERTS_MMAP_IN_SUPERALIGNED_AREA(ptr) - ? (!superaligned && lookup_free_seg(&mmap_state.sua.map, asize)) - : (superaligned && lookup_free_seg(&mmap_state.sa.map, asize))) { - erts_smp_mtx_unlock(&mmap_state.mtx); + ? (!superaligned && lookup_free_seg(&mm->sua.map, asize)) + : (superaligned && lookup_free_seg(&mm->sa.map, asize))) { + erts_smp_mtx_unlock(&mm->mtx); /* * Segment currently in wrong area (due to a previous memory * shortage), move it to the right area. * (remap_move() will succeed) */ - return remap_move(ERTS_MMAPFLG_SUPERCARRIER_ONLY|flags, ptr, - old_size, sizep); + return remap_move(mm, ERTS_MMAPFLG_SUPERCARRIER_ONLY|flags, + ptr, old_size, sizep); } ERTS_MREMAP_OP_START(ptr, old_size, *sizep); @@ -1916,18 +1921,18 @@ erts_mremap(Uint32 flags, void *ptr, UWord old_size, UWord *sizep) UWord unres_sz; new_ptr = ptr; if (!ERTS_MMAP_IN_SUPERALIGNED_AREA(ptr)) { - map = &mmap_state.sua.map; + map = &mm->sua.map; ERTS_MMAP_SIZE_SC_SUA_DEC(old_size - asize); } else { - if (end == mmap_state.sa.top) { - mmap_state.sa.top = new_end; - mmap_state.unreserve_physical(((char *) ptr) + asize, + if (end == mm->sa.top) { + mm->sa.top = new_end; + mm->unreserve_physical(((char *) ptr) + asize, old_size - asize); goto supercarrier_resize_success; } ERTS_MMAP_SIZE_SC_SA_DEC(old_size - asize); - map = &mmap_state.sa.map; + map = &mm->sa.map; } adjacent_free_seg(map, start, end, &prev, &next); @@ -1935,11 +1940,11 @@ erts_mremap(Uint32 flags, void *ptr, UWord old_size, UWord *sizep) if (next) resize_free_seg(map, next, new_end, next->end); else - ad_sz = alloc_desc_insert_free_seg(map, new_end, end); + ad_sz = alloc_desc_insert_free_seg(mm, map, new_end, end); ERTS_MMAP_ASSERT(old_size - asize >= ad_sz); unres_sz = old_size - asize - ad_sz; if (unres_sz) - mmap_state.unreserve_physical(((char *) ptr) + asize + ad_sz, + mm->unreserve_physical(((char *) ptr) + asize + ad_sz, unres_sz); goto supercarrier_resize_success; } @@ -1949,17 +1954,17 @@ erts_mremap(Uint32 flags, void *ptr, UWord old_size, UWord *sizep) ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(old_size)); ERTS_MMAP_ASSERT(ERTS_IS_PAGEALIGNED(asize)); - adjacent_free_seg(&mmap_state.sua.map, start, end, &prev, &next); + adjacent_free_seg(&mm->sua.map, start, end, &prev, &next); if (next && new_end <= next->end) { - if (!mmap_state.reserve_physical(((char *) ptr) + old_size, + if (!mm->reserve_physical(((char *) ptr) + old_size, asize - old_size)) goto supercarrier_reserve_failure; if (new_end < next->end) - resize_free_seg(&mmap_state.sua.map, next, new_end, next->end); + resize_free_seg(&mm->sua.map, next, new_end, next->end); else { - delete_free_seg(&mmap_state.sua.map, next); - free_desc(next); + delete_free_seg(&mm->sua.map, next); + free_desc(mm, next); } new_ptr = ptr; ERTS_MMAP_SIZE_SC_SUA_INC(asize - old_size); @@ -1968,28 +1973,28 @@ erts_mremap(Uint32 flags, void *ptr, UWord old_size, UWord *sizep) } else { /* Superaligned area */ - if (end == mmap_state.sa.top) { - if (new_end <= mmap_state.sua.bot) { - if (!mmap_state.reserve_physical(((char *) ptr) + old_size, + if (end == mm->sa.top) { + if (new_end <= mm->sua.bot) { + if (!mm->reserve_physical(((char *) ptr) + old_size, asize - old_size)) goto supercarrier_reserve_failure; - mmap_state.sa.top = new_end; + mm->sa.top = new_end; new_ptr = ptr; ERTS_MMAP_SIZE_SC_SA_INC(asize - old_size); goto supercarrier_resize_success; } } else { - adjacent_free_seg(&mmap_state.sa.map, start, end, &prev, &next); + adjacent_free_seg(&mm->sa.map, start, end, &prev, &next); if (next && new_end <= next->end) { - if (!mmap_state.reserve_physical(((char *) ptr) + old_size, + if (!mm->reserve_physical(((char *) ptr) + old_size, asize - old_size)) goto supercarrier_reserve_failure; if (new_end < next->end) - resize_free_seg(&mmap_state.sa.map, next, new_end, next->end); + resize_free_seg(&mm->sa.map, next, new_end, next->end); else { - delete_free_seg(&mmap_state.sa.map, next); - free_desc(next); + delete_free_seg(&mm->sa.map, next); + free_desc(mm, next); } new_ptr = ptr; ERTS_MMAP_SIZE_SC_SA_INC(asize - old_size); @@ -1999,12 +2004,12 @@ erts_mremap(Uint32 flags, void *ptr, UWord old_size, UWord *sizep) } ERTS_MMAP_OP_ABORT(); - erts_smp_mtx_unlock(&mmap_state.mtx); + erts_smp_mtx_unlock(&mm->mtx); /* Failed to resize... */ } - return remap_move(flags, ptr, old_size, sizep); + return remap_move(mm, flags, ptr, old_size, sizep); supercarrier_resize_success: @@ -2021,25 +2026,24 @@ supercarrier_resize_success: #endif ERTS_MREMAP_OP_END(new_ptr, asize); - erts_smp_mtx_unlock(&mmap_state.mtx); + erts_smp_mtx_unlock(&mm->mtx); *sizep = asize; return new_ptr; supercarrier_reserve_failure: ERTS_MREMAP_OP_END(NULL, old_size); - erts_smp_mtx_unlock(&mmap_state.mtx); + erts_smp_mtx_unlock(&mm->mtx); *sizep = old_size; return NULL; } -int erts_mmap_in_supercarrier(void *ptr) +int erts_mmap_in_supercarrier(ErtsMemMapper* mm, void *ptr) { return ERTS_MMAP_IN_SUPERCARRIER(ptr); } - static struct { Eterm total; Eterm total_sa; @@ -2102,7 +2106,7 @@ static void hard_dbg_mseg_init(void); #endif void -erts_mmap_init(ErtsMMapInit *init) +erts_mmap_init(ErtsMemMapper* mm, ErtsMMapInit *init) { int virtual_map = 0; char *start = NULL, *end = NULL; @@ -2130,17 +2134,17 @@ erts_mmap_init(ErtsMMapInit *init) ERTS_MMAP_OP_RINGBUF_INIT(); - mmap_state.supercarrier = 0; - mmap_state.reserve_physical = reserve_noop; - mmap_state.unreserve_physical = unreserve_noop; + mm->supercarrier = 0; + mm->reserve_physical = reserve_noop; + mm->unreserve_physical = unreserve_noop; #if HAVE_MMAP && !defined(MAP_ANON) - mmap_state.mmap_fd = open("/dev/zero", O_RDWR); - if (mmap_state.mmap_fd < 0) + mm->mmap_fd = open("/dev/zero", O_RDWR); + if (mm->mmap_fd < 0) erl_exit(-1, "erts_mmap: Failed to open /dev/zero\n"); #endif - erts_smp_mtx_init(&mmap_state.mtx, "erts_mmap"); + erts_smp_mtx_init(&mm->mtx, "erts_mmap"); erts_mtx_init(&am.init_mutex, "mmap_init_atoms"); #ifdef ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION @@ -2157,8 +2161,8 @@ erts_mmap_init(ErtsMMapInit *init) sz = start - ptr; if (sz) os_munmap(end, sz); - mmap_state.reserve_physical = os_reserve_physical; - mmap_state.unreserve_physical = os_unreserve_physical; + mm->reserve_physical = os_reserve_physical; + mm->unreserve_physical = os_unreserve_physical; virtual_map = 1; } else @@ -2176,8 +2180,8 @@ erts_mmap_init(ErtsMMapInit *init) #ifdef ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION if (!init->scrpm) { start = os_mmap_virtual(NULL, sz); - mmap_state.reserve_physical = os_reserve_physical; - mmap_state.unreserve_physical = os_unreserve_physical; + mm->reserve_physical = os_reserve_physical; + mm->unreserve_physical = os_unreserve_physical; virtual_map = 1; } else @@ -2205,30 +2209,30 @@ erts_mmap_init(ErtsMMapInit *init) } #endif - mmap_state.no.free_seg_descs = 0; - mmap_state.no.free_segs.curr = 0; - mmap_state.no.free_segs.max = 0; + mm->no.free_seg_descs = 0; + mm->no.free_segs.curr = 0; + mm->no.free_segs.max = 0; - mmap_state.size.supercarrier.total = 0; - mmap_state.size.supercarrier.used.total = 0; - mmap_state.size.supercarrier.used.sa = 0; - mmap_state.size.supercarrier.used.sua = 0; - mmap_state.size.os.used = 0; + mm->size.supercarrier.total = 0; + mm->size.supercarrier.used.total = 0; + mm->size.supercarrier.used.sa = 0; + mm->size.supercarrier.used.sua = 0; + mm->size.os.used = 0; - mmap_state.desc.new_area_hint = NULL; + mm->desc.new_area_hint = NULL; if (!start) { - mmap_state.sa.bot = NULL; - mmap_state.sua.top = NULL; - mmap_state.sa.bot = NULL; - mmap_state.sua.top = NULL; - mmap_state.no_os_mmap = 0; - mmap_state.supercarrier = 0; + mm->sa.bot = NULL; + mm->sua.top = NULL; + mm->sa.bot = NULL; + mm->sua.top = NULL; + mm->no_os_mmap = 0; + mm->supercarrier = 0; } else { size_t desc_size; - mmap_state.no_os_mmap = init->sco; + mm->no_os_mmap = init->sco; desc_size = init->scrfsd; if (desc_size < 100) @@ -2239,60 +2243,60 @@ erts_mmap_init(ErtsMMapInit *init) + ERTS_PAGEALIGNED_SIZE) > end - start) erl_exit(-1, "erts_mmap: No space for segments in super carrier\n"); - mmap_state.sa.bot = start; - mmap_state.sa.bot += desc_size; - mmap_state.sa.bot = (char *) ERTS_SUPERALIGNED_CEILING(mmap_state.sa.bot); - mmap_state.sa.top = mmap_state.sa.bot; - mmap_state.sua.top = end; - mmap_state.sua.bot = mmap_state.sua.top; + mm->sa.bot = start; + mm->sa.bot += desc_size; + mm->sa.bot = (char *) ERTS_SUPERALIGNED_CEILING(mm->sa.bot); + mm->sa.top = mm->sa.bot; + mm->sua.top = end; + mm->sua.bot = mm->sua.top; - mmap_state.size.supercarrier.used.total += (UWord) (mmap_state.sa.bot - start); + mm->size.supercarrier.used.total += (UWord) (mm->sa.bot - start); - mmap_state.desc.free_list = NULL; - mmap_state.desc.reserved = 0; + mm->desc.free_list = NULL; + mm->desc.reserved = 0; if (end == (void *) 0) { /* * Very unlikely, but we need a guarantee - * that `mmap_state.sua.top` always will + * that `mm->sua.top` always will * compare as larger than all segment pointers * into the super carrier... */ - mmap_state.sua.top -= ERTS_PAGEALIGNED_SIZE; - mmap_state.size.supercarrier.used.total += ERTS_PAGEALIGNED_SIZE; + mm->sua.top -= ERTS_PAGEALIGNED_SIZE; + mm->size.supercarrier.used.total += ERTS_PAGEALIGNED_SIZE; #ifdef ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION - if (!virtual_map || os_reserve_physical(mmap_state.sua.top, ERTS_PAGEALIGNED_SIZE)) + if (!virtual_map || os_reserve_physical(mm->sua.top, ERTS_PAGEALIGNED_SIZE)) #endif - add_free_desc_area(mmap_state.sua.top, end); - mmap_state.desc.reserved += (end - mmap_state.sua.top) / sizeof(ErtsFreeSegDesc); + add_free_desc_area(mm, mm->sua.top, end); + mm->desc.reserved += (end - mm->sua.top) / sizeof(ErtsFreeSegDesc); } - mmap_state.size.supercarrier.total = (UWord) (mmap_state.sua.top - start); + mm->size.supercarrier.total = (UWord) (mm->sua.top - start); /* * Area before (and after) super carrier * will be used for free segment descritors. */ #ifdef ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION - if (virtual_map && !os_reserve_physical(start, mmap_state.sa.bot - start)) + if (virtual_map && !os_reserve_physical(start, mm->sa.bot - start)) erl_exit(-1, "erts_mmap: Failed to reserve physical memory for descriptors\n"); #endif - mmap_state.desc.unused_start = start; - mmap_state.desc.unused_end = mmap_state.sa.bot; - mmap_state.desc.reserved += ((mmap_state.desc.unused_end - start) + mm->desc.unused_start = start; + mm->desc.unused_end = mm->sa.bot; + mm->desc.reserved += ((mm->desc.unused_end - start) / sizeof(ErtsFreeSegDesc)); - init_free_seg_map(&mmap_state.sa.map, SA_SZ_ADDR_ORDER); - init_free_seg_map(&mmap_state.sua.map, SZ_REVERSE_ADDR_ORDER); + init_free_seg_map(&mm->sa.map, SA_SZ_ADDR_ORDER); + init_free_seg_map(&mm->sua.map, SZ_REVERSE_ADDR_ORDER); - mmap_state.supercarrier = 1; + mm->supercarrier = 1; - mmap_state.desc.new_area_hint = end; + mm->desc.new_area_hint = end; } #if !ERTS_HAVE_OS_MMAP - mmap_state.no_os_mmap = 1; + mm->no_os_mmap = 1; #endif #ifdef HARD_DEBUG_MSEG @@ -2307,7 +2311,8 @@ add_2tup(Uint **hpp, Uint *szp, Eterm *lp, Eterm el1, Eterm el2) *lp = erts_bld_cons(hpp, szp, erts_bld_tuple(hpp, szp, 2, el1, el2), *lp); } -Eterm erts_mmap_info(int *print_to_p, +Eterm erts_mmap_info(ErtsMemMapper* mm, + int *print_to_p, void *print_to_arg, Eterm** hpp, Uint* szp, struct erts_mmap_info_struct* emis) @@ -2322,29 +2327,29 @@ Eterm erts_mmap_info(int *print_to_p, Eterm res = THE_NON_VALUE; if (!hpp) { - erts_smp_mtx_lock(&mmap_state.mtx); - emis->sizes[0] = mmap_state.size.supercarrier.total; - emis->sizes[1] = mmap_state.sa.top - mmap_state.sa.bot; - emis->sizes[2] = mmap_state.sua.top - mmap_state.sua.bot; - emis->sizes[3] = mmap_state.size.supercarrier.used.total; - emis->sizes[4] = mmap_state.size.supercarrier.used.sa; - emis->sizes[5] = mmap_state.size.supercarrier.used.sua; + erts_smp_mtx_lock(&mm->mtx); + emis->sizes[0] = mm->size.supercarrier.total; + emis->sizes[1] = mm->sa.top - mm->sa.bot; + emis->sizes[2] = mm->sua.top - mm->sua.bot; + emis->sizes[3] = mm->size.supercarrier.used.total; + emis->sizes[4] = mm->size.supercarrier.used.sa; + emis->sizes[5] = mm->size.supercarrier.used.sua; - emis->segs[0] = mmap_state.no.free_segs.curr; - emis->segs[1] = mmap_state.no.free_segs.max; - emis->segs[2] = mmap_state.no.free_seg_descs; - emis->segs[3] = mmap_state.desc.reserved; - emis->segs[4] = mmap_state.sa.map.nseg; - emis->segs[5] = mmap_state.sua.map.nseg; + emis->segs[0] = mm->no.free_segs.curr; + emis->segs[1] = mm->no.free_segs.max; + emis->segs[2] = mm->no.free_seg_descs; + emis->segs[3] = mm->desc.reserved; + emis->segs[4] = mm->sa.map.nseg; + emis->segs[5] = mm->sua.map.nseg; - emis->os_used = mmap_state.size.os.used; - erts_smp_mtx_unlock(&mmap_state.mtx); + emis->os_used = mm->size.os.used; + erts_smp_mtx_unlock(&mm->mtx); } if (print_to_p) { int to = *print_to_p; void *arg = print_to_arg; - if (mmap_state.supercarrier) { + if (mm->supercarrier) { const char* prefix = "supercarrier "; erts_print(to, arg, "%stotal size: %bpu\n", prefix, emis->sizes[0]); erts_print(to, arg, "%stotal sa size: %bpu\n", prefix, emis->sizes[1]); @@ -2359,7 +2364,7 @@ Eterm erts_mmap_info(int *print_to_p, erts_print(to, arg, "%ssa free segs: %bpu\n", prefix, emis->segs[4]); erts_print(to, arg, "%ssua free segs: %bpu\n", prefix, emis->segs[5]); } - if (!mmap_state.no_os_mmap) { + if (!mm->no_os_mmap) { erts_print(to, arg, "os mmap size used: %bpu\n", emis->os_used); } } @@ -2371,7 +2376,7 @@ Eterm erts_mmap_info(int *print_to_p, } lix = 0; - if (mmap_state.supercarrier) { + if (mm->supercarrier) { group[0] = erts_bld_atom_uword_2tup_list(hpp, szp, sizeof(size_tags)/sizeof(Eterm), size_tags, emis->sizes); @@ -2383,7 +2388,7 @@ Eterm erts_mmap_info(int *print_to_p, lix++; } - if (!mmap_state.no_os_mmap) { + if (!mm->no_os_mmap) { group[0] = erts_bld_atom_uword_2tup_list(hpp, szp, 1, &am.used, &emis->os_used); list[lix] = erts_bld_2tup_list(hpp, szp, 1, group_tags, group); @@ -2395,25 +2400,26 @@ Eterm erts_mmap_info(int *print_to_p, return res; } -Eterm erts_mmap_info_options(char *prefix, +Eterm erts_mmap_info_options(ErtsMemMapper* mm, + char *prefix, int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp) { - const UWord scs = mmap_state.sua.top - mmap_state.sa.bot; - const Eterm sco = mmap_state.no_os_mmap ? am_true : am_false; - const Eterm scrpm = (mmap_state.reserve_physical == reserve_noop) ? am_true : am_false; + const UWord scs = mm->sua.top - mm->sa.bot; + const Eterm sco = mm->no_os_mmap ? am_true : am_false; + const Eterm scrpm = (mm->reserve_physical == reserve_noop) ? am_true : am_false; Eterm res = THE_NON_VALUE; if (print_to_p) { int to = *print_to_p; void *arg = print_to_arg; erts_print(to, arg, "%sscs: %bpu\n", prefix, scs); - if (mmap_state.supercarrier) { + if (mm->supercarrier) { erts_print(to, arg, "%ssco: %T\n", prefix, sco); erts_print(to, arg, "%sscrpm: %T\n", prefix, scrpm); - erts_print(to, arg, "%sscrfsd: %beu\n", prefix, mmap_state.desc.reserved); + erts_print(to, arg, "%sscrfsd: %beu\n", prefix, mm->desc.reserved); } } @@ -2423,9 +2429,9 @@ Eterm erts_mmap_info_options(char *prefix, } res = NIL; - if (mmap_state.supercarrier) { + if (mm->supercarrier) { add_2tup(hpp, szp, &res, am.scrfsd, - erts_bld_uint(hpp,szp, mmap_state.desc.reserved)); + erts_bld_uint(hpp,szp, mm->desc.reserved)); add_2tup(hpp, szp, &res, am.scrpm, scrpm); add_2tup(hpp, szp, &res, am.sco, sco); } @@ -2435,9 +2441,9 @@ Eterm erts_mmap_info_options(char *prefix, } -Eterm erts_mmap_debug_info(Process* p) +Eterm erts_mmap_debug_info(ErtsMemMapper* mm, Process* p) { - if (mmap_state.supercarrier) { + if (mm->supercarrier) { ERTS_DECL_AM(sabot); ERTS_DECL_AM(satop); ERTS_DECL_AM(suabot); @@ -2448,14 +2454,14 @@ Eterm erts_mmap_debug_info(Process* p) Eterm *hp, *hp_end; Uint may_need; - erts_smp_mtx_lock(&mmap_state.mtx); - values[0] = (UWord)mmap_state.sa.bot; - values[1] = (UWord)mmap_state.sa.top; - values[2] = (UWord)mmap_state.sua.bot; - values[3] = (UWord)mmap_state.sua.top; - sa_list = build_free_seg_list(p, &mmap_state.sa.map); - sua_list = build_free_seg_list(p, &mmap_state.sua.map); - erts_smp_mtx_unlock(&mmap_state.mtx); + erts_smp_mtx_lock(&mm->mtx); + values[0] = (UWord)mm->sa.bot; + values[1] = (UWord)mm->sa.top; + values[2] = (UWord)mm->sua.bot; + values[3] = (UWord)mm->sua.top; + sa_list = build_free_seg_list(p, &mm->sa.map); + sua_list = build_free_seg_list(p, &mm->sua.map); + erts_smp_mtx_unlock(&mm->mtx); may_need = 4*(2+3+2) + 2*(2+3); hp = HAlloc(p, may_need); diff --git a/erts/emulator/sys/common/erl_mmap.h b/erts/emulator/sys/common/erl_mmap.h index 51f830d045..8707c23527 100644 --- a/erts/emulator/sys/common/erl_mmap.h +++ b/erts/emulator/sys/common/erl_mmap.h @@ -50,23 +50,26 @@ typedef struct { #define ERTS_MMAP_INIT_DEFAULT_INITER \ {{NULL, NULL}, {NULL, NULL}, 0, 1, (1 << 16), 1} -void *erts_mmap(Uint32 flags, UWord *sizep); -void erts_munmap(Uint32 flags, void *ptr, UWord size); -void *erts_mremap(Uint32 flags, void *ptr, UWord old_size, UWord *sizep); -int erts_mmap_in_supercarrier(void *ptr); -void erts_mmap_init(ErtsMMapInit*); +typedef struct ErtsMemMapper_ ErtsMemMapper; +extern ErtsMemMapper erts_dflt_mmapper; +void *erts_mmap(ErtsMemMapper*, Uint32 flags, UWord *sizep); +void erts_munmap(ErtsMemMapper*, Uint32 flags, void *ptr, UWord size); +void *erts_mremap(ErtsMemMapper*, Uint32 flags, void *ptr, UWord old_size, UWord *sizep); +int erts_mmap_in_supercarrier(ErtsMemMapper*, void *ptr); +void erts_mmap_init(ErtsMemMapper*, ErtsMMapInit*); struct erts_mmap_info_struct { UWord sizes[6]; UWord segs[6]; UWord os_used; }; -Eterm erts_mmap_info(int *print_to_p, void *print_to_arg, +Eterm erts_mmap_info(ErtsMemMapper*, int *print_to_p, void *print_to_arg, Eterm** hpp, Uint* szp, struct erts_mmap_info_struct*); -Eterm erts_mmap_info_options(char *prefix, int *print_to_p, void *print_to_arg, +Eterm erts_mmap_info_options(ErtsMemMapper*, + char *prefix, int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp); struct process; -Eterm erts_mmap_debug_info(struct process*); +Eterm erts_mmap_debug_info(ErtsMemMapper*, struct process*); #define ERTS_SUPERALIGNED_SIZE \ (1 << ERTS_MMAP_SUPERALIGNED_BITS) diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c index 7eb8a4a460..aadcc755c1 100644 --- a/erts/emulator/sys/common/erl_mseg.c +++ b/erts/emulator/sys/common/erl_mseg.c @@ -291,7 +291,7 @@ mseg_create(ErtsMsegAllctr_t *ma, Uint flags, UWord *sizep) if (MSEG_FLG_IS_2POW(flags)) mmap_flags |= ERTS_MMAPFLG_SUPERALIGNED; - seg = erts_mmap(mmap_flags, sizep); + seg = erts_mmap(&erts_dflt_mmapper, mmap_flags, sizep); #ifdef ERTS_PRINT_ERTS_MMAP erts_fprintf(stderr, "%p = erts_mmap(%s, {%bpu, %bpu});\n", seg, @@ -311,7 +311,7 @@ mseg_destroy(ErtsMsegAllctr_t *ma, Uint flags, void *seg_p, UWord size) { if (MSEG_FLG_IS_2POW(flags)) mmap_flags |= ERTS_MMAPFLG_SUPERALIGNED; - erts_munmap(mmap_flags, seg_p, size); + erts_munmap(&erts_dflt_mmapper, mmap_flags, seg_p, size); #ifdef ERTS_PRINT_ERTS_MMAP erts_fprintf(stderr, "erts_munmap(%s, %p, %bpu);\n", (mmap_flags & ERTS_MMAPFLG_SUPERALIGNED) ? "sa" : "sua", @@ -332,7 +332,7 @@ mseg_recreate(ErtsMsegAllctr_t *ma, Uint flags, void *old_seg, UWord old_size, U if (MSEG_FLG_IS_2POW(flags)) mmap_flags |= ERTS_MMAPFLG_SUPERALIGNED; - new_seg = erts_mremap(mmap_flags, old_seg, old_size, sizep); + new_seg = erts_mremap(&erts_dflt_mmapper, mmap_flags, old_seg, old_size, sizep); #ifdef ERTS_PRINT_ERTS_MMAP erts_fprintf(stderr, "%p = erts_mremap(%s, %p, %bpu, {%bpu, %bpu});\n", @@ -997,7 +997,8 @@ info_options(ErtsMsegAllctr_t *ma, { Eterm res; - res = erts_mmap_info_options(prefix, print_to_p, print_to_arg, hpp, szp); + res = erts_mmap_info_options(&erts_dflt_mmapper, + prefix, print_to_p, print_to_arg, hpp, szp); if (print_to_p) { int to = *print_to_p; @@ -1401,7 +1402,7 @@ erts_mseg_init(ErtsMsegInit_t *init) erts_mtx_init(&init_atoms_mutex, "mseg_init_atoms"); - erts_mmap_init(&init->mmap); + erts_mmap_init(&erts_dflt_mmapper, &init->mmap); if (!IS_2POW(GET_PAGE_SIZE)) erl_exit(ERTS_ABORT_EXIT, "erts_mseg: Unexpected page_size %beu\n", GET_PAGE_SIZE); -- cgit v1.2.3 From b7f760cee119e1824de0cfc2b34ae6fe971bf505 Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Fri, 11 Sep 2015 16:03:00 +0200 Subject: erts: Add support for fast erts_is_literal() --- erts/emulator/sys/common/erl_mmap.c | 19 ++++++++++++++++++- erts/emulator/sys/common/erl_mmap.h | 10 +++++++++- erts/emulator/sys/common/erl_mseg.c | 2 +- erts/emulator/sys/common/erl_mseg.h | 6 ++++-- 4 files changed, 32 insertions(+), 5 deletions(-) (limited to 'erts/emulator/sys/common') diff --git a/erts/emulator/sys/common/erl_mmap.c b/erts/emulator/sys/common/erl_mmap.c index e8ff2bf752..03ca080c14 100644 --- a/erts/emulator/sys/common/erl_mmap.c +++ b/erts/emulator/sys/common/erl_mmap.c @@ -349,6 +349,12 @@ struct ErtsMemMapper_ { ErtsMemMapper erts_dflt_mmapper; +#if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION) +ErtsMemMapper erts_literal_mmapper; +char* erts_literals_start; +UWord erts_literals_size; +#endif + #define ERTS_MMAP_SIZE_SC_SA_INC(SZ) \ do { \ mm->size.supercarrier.used.total += (SZ); \ @@ -2108,6 +2114,7 @@ static void hard_dbg_mseg_init(void); void erts_mmap_init(ErtsMemMapper* mm, ErtsMMapInit *init) { + static int is_first_call = 1; int virtual_map = 0; char *start = NULL, *end = NULL; UWord pagesize; @@ -2145,7 +2152,9 @@ erts_mmap_init(ErtsMemMapper* mm, ErtsMMapInit *init) #endif erts_smp_mtx_init(&mm->mtx, "erts_mmap"); - erts_mtx_init(&am.init_mutex, "mmap_init_atoms"); + if (is_first_call) { + erts_mtx_init(&am.init_mutex, "mmap_init_atoms"); + } #ifdef ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION if (init->virtual_range.start) { @@ -2302,6 +2311,14 @@ erts_mmap_init(ErtsMemMapper* mm, ErtsMMapInit *init) #ifdef HARD_DEBUG_MSEG hard_dbg_mseg_init(); #endif + +#if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION) + if (mm == &erts_literal_mmapper) { + erts_literals_start = erts_literal_mmapper.sa.bot; + erts_literals_size = erts_literal_mmapper.sua.top - erts_literals_start; + } +#endif + is_first_call = 0; } diff --git a/erts/emulator/sys/common/erl_mmap.h b/erts/emulator/sys/common/erl_mmap.h index 8707c23527..61d912fd28 100644 --- a/erts/emulator/sys/common/erl_mmap.h +++ b/erts/emulator/sys/common/erl_mmap.h @@ -50,8 +50,11 @@ typedef struct { #define ERTS_MMAP_INIT_DEFAULT_INITER \ {{NULL, NULL}, {NULL, NULL}, 0, 1, (1 << 16), 1} +#define ERTS_MMAP_INIT_LITERAL_INITER \ + {{NULL, NULL}, {NULL, NULL}, 1024*1024*1024, 1, (1 << 16), 0} + typedef struct ErtsMemMapper_ ErtsMemMapper; -extern ErtsMemMapper erts_dflt_mmapper; + void *erts_mmap(ErtsMemMapper*, Uint32 flags, UWord *sizep); void erts_munmap(ErtsMemMapper*, Uint32 flags, void *ptr, UWord size); void *erts_mremap(ErtsMemMapper*, Uint32 flags, void *ptr, UWord old_size, UWord *sizep); @@ -121,6 +124,11 @@ Eterm erts_mmap_debug_info(ErtsMemMapper*, struct process*); # define ERTS_HAVE_OS_MMAP 1 #endif +extern ErtsMemMapper erts_dflt_mmapper; +#if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION) +extern ErtsMemMapper erts_literal_mmapper; +#endif + /*#define HARD_DEBUG_MSEG*/ #ifdef HARD_DEBUG_MSEG # define HARD_DBG_INSERT_MSEG hard_dbg_insert_mseg diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c index aadcc755c1..20695899eb 100644 --- a/erts/emulator/sys/common/erl_mseg.c +++ b/erts/emulator/sys/common/erl_mseg.c @@ -1402,7 +1402,7 @@ erts_mseg_init(ErtsMsegInit_t *init) erts_mtx_init(&init_atoms_mutex, "mseg_init_atoms"); - erts_mmap_init(&erts_dflt_mmapper, &init->mmap); + erts_mmap_init(&erts_dflt_mmapper, &init->dflt_mmap); if (!IS_2POW(GET_PAGE_SIZE)) erl_exit(ERTS_ABORT_EXIT, "erts_mseg: Unexpected page_size %beu\n", GET_PAGE_SIZE); diff --git a/erts/emulator/sys/common/erl_mseg.h b/erts/emulator/sys/common/erl_mseg.h index 677f8ea4ed..2acd8f8505 100644 --- a/erts/emulator/sys/common/erl_mseg.h +++ b/erts/emulator/sys/common/erl_mseg.h @@ -59,7 +59,8 @@ typedef struct { Uint rmcbf; Uint mcs; Uint nos; - ErtsMMapInit mmap; + ErtsMMapInit dflt_mmap; + ErtsMMapInit literal_mmap; } ErtsMsegInit_t; #define ERTS_MSEG_INIT_DEFAULT_INITIALIZER \ @@ -68,7 +69,8 @@ typedef struct { 20, /* rmcbf: Relative max cache bad fit */ \ 10, /* mcs: Max cache size */ \ 1000, /* cci: Cache check interval */ \ - ERTS_MMAP_INIT_DEFAULT_INITER \ + ERTS_MMAP_INIT_DEFAULT_INITER, \ + ERTS_MMAP_INIT_LITERAL_INITER \ } typedef struct { -- cgit v1.2.3 From 664ed2a6fd2b324bb6b56db3d3eca853cfda8f61 Mon Sep 17 00:00:00 2001 From: Lukas Larsson Date: Fri, 12 Sep 2014 16:38:00 +0200 Subject: erts: Add microstate accounting Microstate accounting is a way to track which state the different threads within ERTS are in. The main usage area is to pin point performance bottlenecks by checking which states the threads are in and then from there figuring out why and where to optimize. Since checking whether microstate accounting is on or off is relatively expensive if done in a short loop only a few of the states are enabled by default and more states can be enabled through configure. I've done some benchmarking and the overhead with it turned off is not noticible and with it on it is a fraction of a percent. If you enable the extra states, depending on the benchmark, the ovehead when turned off is about 1% and when turned on somewhere inbetween 5-15%. OTP-12345 --- erts/emulator/sys/common/erl_poll.c | 36 ++++++++++++++++++++++++++++-------- 1 file changed, 28 insertions(+), 8 deletions(-) (limited to 'erts/emulator/sys/common') diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c index 21fa214364..36ee94111c 100644 --- a/erts/emulator/sys/common/erl_poll.c +++ b/erts/emulator/sys/common/erl_poll.c @@ -74,6 +74,7 @@ #include "erl_thr_progress.h" #include "erl_driver.h" #include "erl_alloc.h" +#include "erl_msacc.h" #if !defined(ERTS_POLL_USE_EPOLL) \ && !defined(ERTS_POLL_USE_DEVPOLL) \ @@ -2238,6 +2239,7 @@ static ERTS_INLINE int check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res) { int res; + ERTS_MSACC_PUSH_STATE_M(); if (erts_smp_atomic_read_nob(&ps->no_of_user_fds) == 0 && timeout_time == ERTS_POLL_NO_TIMEOUT) { /* Nothing to poll and zero timeout; done... */ @@ -2259,6 +2261,7 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res) #ifdef ERTS_SMP erts_thr_progress_prepare_wait(NULL); #endif + ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP); timerfd_set(ps, &its); res = epoll_wait(ps->kp_fd, ps->res_events, max_res, -1); res = timerfd_clear(ps, res, max_res); @@ -2268,10 +2271,12 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res) } #else /* !ERTS_POLL_USE_TIMERFD */ timeout = (int) get_timeout(ps, 1000, timeout_time); + if (timeout) { #ifdef ERTS_SMP - if (timeout) erts_thr_progress_prepare_wait(NULL); #endif + ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP); + } res = epoll_wait(ps->kp_fd, ps->res_events, max_res, timeout); #endif /* !ERTS_POLL_USE_TIMERFD */ #elif ERTS_POLL_USE_KQUEUE /* --- kqueue ------------------------------ */ @@ -2279,10 +2284,12 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res) if (max_res > ps->res_events_len) grow_res_events(ps, max_res); timeout = get_timeout_timespec(ps, &ts, timeout_time); + if (timeout) { #ifdef ERTS_SMP - if (timeout) erts_thr_progress_prepare_wait(NULL); #endif + ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP); + } res = kevent(ps->kp_fd, NULL, 0, ps->res_events, max_res, &ts); #endif /* ----------------------------------------- */ } @@ -2306,26 +2313,33 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res) if (poll_res.dp_nfds > ps->res_events_len) grow_res_events(ps, poll_res.dp_nfds); poll_res.dp_fds = ps->res_events; + if (timeout) { #ifdef ERTS_SMP - if (timeout) erts_thr_progress_prepare_wait(NULL); #endif + ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP); + } poll_res.dp_timeout = timeout; res = ioctl(ps->kp_fd, DP_POLL, &poll_res); #elif ERTS_POLL_USE_POLL && defined(HAVE_PPOLL) /* --- ppoll ---------------- */ struct timespec ts; timeout = get_timeout_timespec(ps, &ts, timeout_time); + if (timeout) { #ifdef ERTS_SMP - if (timeout) erts_thr_progress_prepare_wait(NULL); #endif + ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP); + } res = ppoll(ps->poll_fds, ps->no_poll_fds, &ts, NULL); #elif ERTS_POLL_USE_POLL /* --- poll --------------------------------- */ timeout = (int) get_timeout(ps, 1000, timeout_time); + + if (timeout) { #ifdef ERTS_SMP - if (timeout) erts_thr_progress_prepare_wait(NULL); #endif + ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP); + } res = poll(ps->poll_fds, ps->no_poll_fds, timeout); #elif ERTS_POLL_USE_SELECT /* --- select ------------------------------ */ SysTimeval to; @@ -2334,18 +2348,22 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res) ERTS_FD_COPY(&ps->input_fds, &ps->res_input_fds); ERTS_FD_COPY(&ps->output_fds, &ps->res_output_fds); + if (timeout) { #ifdef ERTS_SMP - if (timeout) erts_thr_progress_prepare_wait(NULL); #endif + ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP); + } res = ERTS_SELECT(ps->max_fd + 1, &ps->res_input_fds, &ps->res_output_fds, NULL, &to); #ifdef ERTS_SMP - if (timeout) + if (timeout) { erts_thr_progress_finalize_wait(NULL); + ERTS_MSACC_POP_STATE_M(); + } if (res < 0 && errno == EBADF && ERTS_POLLSET_HAVE_UPDATE_REQUESTS(ps)) { @@ -2380,10 +2398,12 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res) return res; #endif /* ----------------------------------------- */ } + if (timeout) { #ifdef ERTS_SMP - if (timeout) erts_thr_progress_finalize_wait(NULL); #endif + ERTS_MSACC_POP_STATE_M(); + } return res; } } -- cgit v1.2.3