diff options
Diffstat (limited to 'erts/emulator/sys/common')
-rw-r--r-- | erts/emulator/sys/common/erl_check_io.c | 11 | ||||
-rw-r--r-- | erts/emulator/sys/common/erl_check_io.h | 9 | ||||
-rw-r--r-- | erts/emulator/sys/common/erl_mseg.c | 669 | ||||
-rw-r--r-- | erts/emulator/sys/common/erl_mseg.h | 11 | ||||
-rw-r--r-- | erts/emulator/sys/common/erl_poll.c | 193 | ||||
-rw-r--r-- | erts/emulator/sys/common/erl_poll.h | 5 |
6 files changed, 513 insertions, 385 deletions
diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c index 57321259f9..6d4ad459cc 100644 --- a/erts/emulator/sys/common/erl_check_io.c +++ b/erts/emulator/sys/common/erl_check_io.c @@ -66,6 +66,9 @@ typedef char EventStateFlags; #define ERTS_CIO_POLL_CTL ERTS_POLL_EXPORT(erts_poll_control) #define ERTS_CIO_POLL_WAIT ERTS_POLL_EXPORT(erts_poll_wait) +#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT +#define ERTS_CIO_POLL_AS_INTR ERTS_POLL_EXPORT(erts_poll_async_sig_interrupt) +#endif #define ERTS_CIO_POLL_INTR ERTS_POLL_EXPORT(erts_poll_interrupt) #define ERTS_CIO_POLL_INTR_TMD ERTS_POLL_EXPORT(erts_poll_interrupt_timed) #define ERTS_CIO_NEW_POLLSET ERTS_POLL_EXPORT(erts_poll_create_pollset) @@ -1115,6 +1118,14 @@ eready(Eterm id, ErtsDrvEventState *state, ErlDrvEventData event_data) static void bad_fd_in_pollset( ErtsDrvEventState *, Eterm, Eterm, ErtsPollEvents); +#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT +void +ERTS_CIO_EXPORT(erts_check_io_async_sig_interrupt)(void) +{ + ERTS_CIO_POLL_AS_INTR(pollset.ps); +} +#endif + void ERTS_CIO_EXPORT(erts_check_io_interrupt)(int set) { diff --git a/erts/emulator/sys/common/erl_check_io.h b/erts/emulator/sys/common/erl_check_io.h index 9b45a63913..7cc1658062 100644 --- a/erts/emulator/sys/common/erl_check_io.h +++ b/erts/emulator/sys/common/erl_check_io.h @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2006-2009. All Rights Reserved. + * Copyright Ericsson AB 2006-2011. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -40,6 +40,10 @@ Eterm erts_check_io_info_kp(void *); Eterm erts_check_io_info_nkp(void *); int erts_check_io_max_files_kp(void); int erts_check_io_max_files_nkp(void); +#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT +void erts_check_io_async_sig_interrupt_kp(void); +void erts_check_io_async_sig_interrupt_nkp(void); +#endif void erts_check_io_interrupt_kp(int); void erts_check_io_interrupt_nkp(int); void erts_check_io_interrupt_timed_kp(int, long); @@ -56,6 +60,9 @@ int erts_check_io_debug_nkp(void); Uint erts_check_io_size(void); Eterm erts_check_io_info(void *); int erts_check_io_max_files(void); +#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT +void erts_check_io_async_sig_interrupt(void); +#endif void erts_check_io_interrupt(int); void erts_check_io_interrupt_timed(int, long); void erts_check_io(int); diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c index eaef6680dd..3a90db607b 100644 --- a/erts/emulator/sys/common/erl_mseg.c +++ b/erts/emulator/sys/common/erl_mseg.c @@ -36,14 +36,11 @@ #include "erl_threads.h" #include "erl_mtrace.h" #include "erl_time.h" +#include "erl_alloc.h" #include "big.h" #if HAVE_ERTS_MSEG -#if defined(USE_THREADS) && !defined(ERTS_SMP) -# define ERTS_THREADS_NO_SMP -#endif - #define SEGTYPE ERTS_MTRACE_SEGMENT_ID #ifndef HAVE_GETPAGESIZE @@ -75,16 +72,9 @@ static int atoms_initialized; -static Uint cache_check_interval; - typedef struct mem_kind_t MemKind; -static void check_cache(void *unused); static void mseg_clear_cache(MemKind*); -static int is_cache_check_scheduled; -#ifdef ERTS_THREADS_NO_SMP -static int is_cache_check_requested; -#endif #if HALFWORD_HEAP static int initialize_pmmap(void); @@ -138,7 +128,8 @@ const ErtsMsegOpt_t erts_mseg_default_opt = { 1, /* Use cache */ 1, /* Preserv data */ 0, /* Absolute shrink threshold */ - 0 /* Relative shrink threshold */ + 0, /* Relative shrink threshold */ + 0 /* Scheduler specific */ #if HALFWORD_HEAP ,0 /* need low memory */ #endif @@ -157,11 +148,10 @@ typedef struct { Uint32 no; } CallCounter; -static int is_init_done; static Uint page_size; static Uint page_shift; -static struct { +typedef struct { CallCounter alloc; CallCounter dealloc; CallCounter realloc; @@ -172,7 +162,9 @@ static struct { #endif CallCounter clear_cache; CallCounter check_cache; -} calls; +} ErtsMsegCalls; + +typedef struct ErtsMsegAllctr_t_ ErtsMsegAllctr_t; struct mem_kind_t { cache_desc_t cache_descs[MAX_CACHE_SIZE]; @@ -201,25 +193,84 @@ struct mem_kind_t { } max_ever; } segments; + ErtsMsegAllctr_t *ma; const char* name; MemKind* next; };/*MemKind*/ +struct ErtsMsegAllctr_t_ { + int ix; + + int is_init_done; + int is_thread_safe; + erts_mtx_t mtx; + + int is_cache_check_scheduled; + + MemKind* mk_list; + #if HALFWORD_HEAP -static MemKind low_mem, hi_mem; + MemKind low_mem; + MemKind hi_mem; #else -static MemKind the_mem; + MemKind the_mem; #endif -static MemKind* mk_list = NULL; -static Uint max_cache_size; -static Uint abs_max_cache_bad_fit; -static Uint rel_max_cache_bad_fit; + Uint max_cache_size; + Uint abs_max_cache_bad_fit; + Uint rel_max_cache_bad_fit; + + ErtsMsegCalls calls; #if CAN_PARTLY_DESTROY -static Uint min_seg_size; + Uint min_seg_size; +#endif + +}; + +typedef union { + ErtsMsegAllctr_t mseg_alloc; + char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsMsegAllctr_t))]; +} ErtsAlgndMsegAllctr_t; + +static int no_mseg_allocators; +static ErtsAlgndMsegAllctr_t *aligned_mseg_allctr; + +#ifdef ERTS_SMP + +#define ERTS_MSEG_ALLCTR_IX(IX) \ + (&aligned_mseg_allctr[(IX)].mseg_alloc) + +#define ERTS_MSEG_ALLCTR_SS() \ + ERTS_MSEG_ALLCTR_IX((int) erts_get_scheduler_id()) + +#define ERTS_MSEG_ALLCTR_OPT(OPT) \ + ((OPT)->sched_spec ? ERTS_MSEG_ALLCTR_SS() : ERTS_MSEG_ALLCTR_IX(0)) + +#else + +#define ERTS_MSEG_ALLCTR_IX(IX) \ + (&aligned_mseg_allctr[0].mseg_alloc) + +#define ERTS_MSEG_ALLCTR_SS() \ + (&aligned_mseg_allctr[0].mseg_alloc) + +#define ERTS_MSEG_ALLCTR_OPT(OPT) \ + (&aligned_mseg_allctr[0].mseg_alloc) + #endif +#define ERTS_MSEG_LOCK(MA) \ +do { \ + if ((MA)->is_thread_safe) \ + erts_mtx_lock(&(MA)->mtx); \ +} while (0) + +#define ERTS_MSEG_UNLOCK(MA) \ +do { \ + if ((MA)->is_thread_safe) \ + erts_mtx_unlock(&(MA)->mtx); \ +} while (0) #define ERTS_MSEG_ALLOC_STAT(C,SZ) \ do { \ @@ -250,104 +301,44 @@ do { \ #define ONE_GIGA (1000000000) -#define ZERO_CC(CC) (calls.CC.no = 0, calls.CC.giga_no = 0) +#define ZERO_CC(MA, CC) ((MA)->calls.CC.no = 0, \ + (MA)->calls.CC.giga_no = 0) -#define INC_CC(CC) (calls.CC.no == ONE_GIGA - 1 \ - ? (calls.CC.giga_no++, calls.CC.no = 0) \ - : calls.CC.no++) +#define INC_CC(MA, CC) ((MA)->calls.CC.no == ONE_GIGA - 1 \ + ? ((MA)->calls.CC.giga_no++, \ + (MA)->calls.CC.no = 0) \ + : (MA)->calls.CC.no++) -#define DEC_CC(CC) (calls.CC.no == 0 \ - ? (calls.CC.giga_no--, \ - calls.CC.no = ONE_GIGA - 1) \ - : calls.CC.no--) +#define DEC_CC(MA, CC) ((MA)->calls.CC.no == 0 \ + ? ((MA)->calls.CC.giga_no--, \ + (MA)->calls.CC.no = ONE_GIGA - 1) \ + : (MA)->calls.CC.no--) -static erts_mtx_t mseg_mutex; /* Also needed when !USE_THREADS */ static erts_mtx_t init_atoms_mutex; /* Also needed when !USE_THREADS */ -#ifdef USE_THREADS -#ifdef ERTS_THREADS_NO_SMP -static erts_tid_t main_tid; -static int async_handle = -1; -#endif - -static void thread_safe_init(void) -{ - erts_mtx_init(&init_atoms_mutex, "mseg_init_atoms"); - erts_mtx_init(&mseg_mutex, "mseg"); - -#ifdef ERTS_THREADS_NO_SMP - main_tid = erts_thr_self(); -#endif -} - -#endif - -static ErlTimer cache_check_timer; static ERTS_INLINE void -schedule_cache_check(void) -{ - if (!is_cache_check_scheduled && is_init_done) { -#ifdef ERTS_THREADS_NO_SMP - if (!erts_equal_tids(erts_thr_self(), main_tid)) { - if (!is_cache_check_requested) { - is_cache_check_requested = 1; - sys_async_ready(async_handle); - } - } - else -#endif - { - cache_check_timer.active = 0; - erts_set_timer(&cache_check_timer, - check_cache, - NULL, - NULL, - cache_check_interval); - is_cache_check_scheduled = 1; -#ifdef ERTS_THREADS_NO_SMP - is_cache_check_requested = 0; -#endif - } - } -} - -#ifdef ERTS_THREADS_NO_SMP - -static void -check_schedule_cache_check(void) +schedule_cache_check(ErtsMsegAllctr_t *ma) { - erts_mtx_lock(&mseg_mutex); - if (is_cache_check_requested - && !is_cache_check_scheduled) { - schedule_cache_check(); - } - erts_mtx_unlock(&mseg_mutex); -} - -#endif -static void -mseg_shutdown(void) -{ - MemKind* mk; - erts_mtx_lock(&mseg_mutex); - for (mk=mk_list; mk; mk=mk->next) { - mseg_clear_cache(mk); + if (!ma->is_cache_check_scheduled && ma->is_init_done) { + erts_set_aux_work_timeout(ma->ix, + ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK, + 1); + ma->is_cache_check_scheduled = 1; } - erts_mtx_unlock(&mseg_mutex); } static ERTS_INLINE void * -mseg_create(MemKind* mk, Uint size) +mseg_create(ErtsMsegAllctr_t *ma, MemKind* mk, Uint size) { void *seg; ASSERT(size % page_size == 0); #if HALFWORD_HEAP - if (mk == &low_mem) { + if (mk == &ma->low_mem) { seg = pmmap(size); if ((unsigned long) seg & CHECK_POINTER_MASK) { erts_fprintf(stderr,"Pointer mask failure (0x%08lx)\n",(unsigned long) seg); @@ -371,18 +362,18 @@ mseg_create(MemKind* mk, Uint size) #endif } - INC_CC(create); + INC_CC(ma, create); return seg; } static ERTS_INLINE void -mseg_destroy(MemKind* mk, void *seg, Uint size) +mseg_destroy(ErtsMsegAllctr_t *ma, MemKind* mk, void *seg, Uint size) { int res; #if HALFWORD_HEAP - if (mk == &low_mem) { + if (mk == &ma->low_mem) { res = pmunmap((void *) seg, size); } else @@ -401,14 +392,14 @@ mseg_destroy(MemKind* mk, void *seg, Uint size) ASSERT(size % page_size == 0); ASSERT(res == 0); - INC_CC(destroy); + INC_CC(ma, destroy); } #if HAVE_MSEG_RECREATE static ERTS_INLINE void * -mseg_recreate(MemKind* mk, void *old_seg, Uint old_size, Uint new_size) +mseg_recreate(ErtsMsegAllctr_t *ma, MemKind* mk, void *old_seg, Uint old_size, Uint new_size) { void *new_seg; @@ -416,7 +407,7 @@ mseg_recreate(MemKind* mk, void *old_seg, Uint old_size, Uint new_size) ASSERT(new_size % page_size == 0); #if HALFWORD_HEAP - if (mk == &low_mem) { + if (mk == &ma->low_mem) { new_seg = (void *) pmremap((void *) old_seg, (size_t) old_size, (size_t) new_size); @@ -447,19 +438,39 @@ mseg_recreate(MemKind* mk, void *old_seg, Uint old_size, Uint new_size) #endif } - INC_CC(recreate); + INC_CC(ma, recreate); return new_seg; } #endif /* #if HAVE_MSEG_RECREATE */ +#ifdef DEBUG +#define ERTS_DBG_MA_CHK_THR_ACCESS(MA) \ +do { \ + if ((MA)->is_thread_safe) \ + ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&(MA)->mtx) \ + || erts_smp_is_system_blocked(0) \ + || (ERTS_IS_CRASH_DUMPING \ + && erts_smp_is_system_blocked(ERTS_BS_FLG_ALLOW_GC)));\ + else \ + ERTS_LC_ASSERT((MA)->ix == (int) erts_get_scheduler_id() \ + || erts_smp_is_system_blocked(0) \ + || (ERTS_IS_CRASH_DUMPING \ + && erts_smp_is_system_blocked(ERTS_BS_FLG_ALLOW_GC)));\ +} while (0) +#define ERTS_DBG_MK_CHK_THR_ACCESS(MK) \ + ERTS_DBG_MA_CHK_THR_ACCESS((MK)->ma) +#else +#define ERTS_DBG_MA_CHK_THR_ACCESS(MA) +#define ERTS_DBG_MK_CHK_THR_ACCESS(MK) +#endif static ERTS_INLINE cache_desc_t * alloc_cd(MemKind* mk) { cache_desc_t *cd = mk->free_cache_descs; - ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex)); + ERTS_DBG_MK_CHK_THR_ACCESS(mk); if (cd) mk->free_cache_descs = cd->next; return cd; @@ -468,7 +479,7 @@ alloc_cd(MemKind* mk) static ERTS_INLINE void free_cd(MemKind* mk, cache_desc_t *cd) { - ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex)); + ERTS_DBG_MK_CHK_THR_ACCESS(mk); cd->next = mk->free_cache_descs; mk->free_cache_descs = cd; } @@ -477,7 +488,7 @@ free_cd(MemKind* mk, cache_desc_t *cd) static ERTS_INLINE void link_cd(MemKind* mk, cache_desc_t *cd) { - ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex)); + ERTS_DBG_MK_CHK_THR_ACCESS(mk); if (mk->cache) mk->cache->prev = cd; cd->next = mk->cache; @@ -496,7 +507,7 @@ link_cd(MemKind* mk, cache_desc_t *cd) static ERTS_INLINE void end_link_cd(MemKind* mk, cache_desc_t *cd) { - ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex)); + ERTS_DBG_MK_CHK_THR_ACCESS(mk); if (mk->cache_end) mk->cache_end->next = cd; cd->next = NULL; @@ -515,7 +526,7 @@ end_link_cd(MemKind* mk, cache_desc_t *cd) static ERTS_INLINE void unlink_cd(MemKind* mk, cache_desc_t *cd) { - ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex)); + ERTS_DBG_MK_CHK_THR_ACCESS(mk); if (cd->next) cd->next->prev = cd->prev; else @@ -533,7 +544,7 @@ static ERTS_INLINE void check_cache_limits(MemKind* mk) { cache_desc_t *cd; - ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex)); + ERTS_DBG_MK_CHK_THR_ACCESS(mk); mk->max_cached_seg_size = 0; mk->min_cached_seg_size = ~((Uint) 0); for (cd = mk->cache; cd; cd = cd->next) { @@ -551,7 +562,7 @@ adjust_cache_size(MemKind* mk, int force_check_limits) int check_limits = force_check_limits; Sint max_cached = ((Sint) mk->segments.current.watermark - (Sint) mk->segments.current.no); - ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex)); + ERTS_DBG_MK_CHK_THR_ACCESS(mk); while (((Sint) mk->cache_size) > max_cached && ((Sint) mk->cache_size) > 0) { ASSERT(mk->cache_end); cd = mk->cache_end; @@ -562,7 +573,7 @@ adjust_cache_size(MemKind* mk, int force_check_limits) } if (erts_mtrace_enabled) erts_mtrace_crr_free(SEGTYPE, SEGTYPE, cd->seg); - mseg_destroy(mk, cd->seg, cd->size); + mseg_destroy(mk->ma, mk, cd->seg, cd->size); unlink_cd(mk,cd); free_cd(mk,cd); } @@ -571,7 +582,7 @@ adjust_cache_size(MemKind* mk, int force_check_limits) check_cache_limits(mk); } -static void +static Uint check_one_cache(MemKind* mk) { if (mk->segments.current.watermark > mk->segments.current.no) @@ -579,23 +590,37 @@ check_one_cache(MemKind* mk) adjust_cache_size(mk, 0); if (mk->cache_size) - schedule_cache_check(); + schedule_cache_check(mk->ma); + return mk->cache_size; } -static void check_cache(void* unused) +static void do_cache_check(ErtsMsegAllctr_t *ma) { + int empty_cache = 1; MemKind* mk; - erts_mtx_lock(&mseg_mutex); - is_cache_check_scheduled = 0; + ERTS_MSEG_LOCK(ma); - for (mk=mk_list; mk; mk=mk->next) { - check_one_cache(mk); + for (mk=ma->mk_list; mk; mk=mk->next) { + if (check_one_cache(mk)) + empty_cache = 0; + } + + if (empty_cache) { + ma->is_cache_check_scheduled = 0; + erts_set_aux_work_timeout(ma->ix, + ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK, + 0); } - INC_CC(check_cache); + INC_CC(ma, check_cache); - erts_mtx_unlock(&mseg_mutex); + ERTS_MSEG_UNLOCK(ma); +} + +void erts_mseg_cache_check(void) +{ + do_cache_check(ERTS_MSEG_ALLCTR_SS()); } static void @@ -611,42 +636,44 @@ mseg_clear_cache(MemKind* mk) mk->segments.current.watermark = mk->segments.current.no; - INC_CC(clear_cache); + INC_CC(mk->ma, clear_cache); } -static ERTS_INLINE MemKind* memkind(const ErtsMsegOpt_t *opt) +static ERTS_INLINE MemKind* memkind(ErtsMsegAllctr_t *ma, + const ErtsMsegOpt_t *opt) { #if HALFWORD_HEAP - return opt->low_mem ? &low_mem : &hi_mem; + return opt->low_mem ? &ma->low_mem : &ma->hi_mem; #else - return &the_mem; + return &ma->the_mem; #endif } static void * -mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt) +mseg_alloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, Uint *size_p, + const ErtsMsegOpt_t *opt) { Uint max, min, diff_size, size; cache_desc_t *cd, *cand_cd; void *seg; - MemKind* mk = memkind(opt); + MemKind* mk = memkind(ma, opt); - INC_CC(alloc); + INC_CC(ma, alloc); size = PAGE_CEILING(*size_p); #if CAN_PARTLY_DESTROY - if (size < min_seg_size) - min_seg_size = size; + if (size < ma->min_seg_size) + ma->min_seg_size = size; #endif if (!opt->cache) { create_seg: adjust_cache_size(mk,0); - seg = mseg_create(mk, size); + seg = mseg_create(ma, mk, size); if (!seg) { mseg_clear_cache(mk); - seg = mseg_create(mk, size); + seg = mseg_create(ma, mk, size); if (!seg) size = 0; } @@ -667,10 +694,10 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt) diff_size = mk->min_cached_seg_size - size; - if (diff_size > abs_max_cache_bad_fit) + if (diff_size > ma->abs_max_cache_bad_fit) goto create_seg; - if (100*PAGES(diff_size) > rel_max_cache_bad_fit*PAGES(size)) + if (100*PAGES(diff_size) > ma->rel_max_cache_bad_fit*PAGES(size)) goto create_seg; } @@ -708,8 +735,8 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt) diff_size = cand_cd->size - size; - if (diff_size > abs_max_cache_bad_fit - || 100*PAGES(diff_size) > rel_max_cache_bad_fit*PAGES(size)) { + if (diff_size > ma->abs_max_cache_bad_fit + || 100*PAGES(diff_size) > ma->rel_max_cache_bad_fit*PAGES(size)) { if (mk->max_cached_seg_size < cand_cd->size) mk->max_cached_seg_size = cand_cd->size; if (mk->min_cached_seg_size > cand_cd->size) @@ -740,18 +767,18 @@ mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt) static void -mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size, +mseg_dealloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, Uint size, const ErtsMsegOpt_t *opt) { - MemKind* mk = memkind(opt); + MemKind* mk = memkind(ma, opt); cache_desc_t *cd; ERTS_MSEG_DEALLOC_STAT(mk,size); - if (!opt->cache || max_cache_size == 0) { + if (!opt->cache || ma->max_cache_size == 0) { if (erts_mtrace_enabled) erts_mtrace_crr_free(atype, SEGTYPE, seg); - mseg_destroy(mk, seg, size); + mseg_destroy(ma, mk, seg, size); } else { int check_limits = 0; @@ -769,7 +796,7 @@ mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size, } if (erts_mtrace_enabled) erts_mtrace_crr_free(SEGTYPE, SEGTYPE, cd->seg); - mseg_destroy(mk, cd->seg, cd->size); + mseg_destroy(ma, mk, cd->seg, cd->size); unlink_cd(mk,cd); free_cd(mk,cd); } @@ -790,33 +817,34 @@ mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size, if (check_limits) check_cache_limits(mk); - schedule_cache_check(); + schedule_cache_check(ma); } - INC_CC(dealloc); + INC_CC(ma, dealloc); } static void * -mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p, - const ErtsMsegOpt_t *opt) +mseg_realloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, void *seg, + Uint old_size, Uint *new_size_p, const ErtsMsegOpt_t *opt) { - MemKind* mk = memkind(opt); + MemKind* mk; void *new_seg; Uint new_size; if (!seg || !old_size) { - new_seg = mseg_alloc(atype, new_size_p, opt); - DEC_CC(alloc); + new_seg = mseg_alloc(ma, atype, new_size_p, opt); + DEC_CC(ma, alloc); return new_seg; } if (!(*new_size_p)) { - mseg_dealloc(atype, seg, old_size, opt); - DEC_CC(dealloc); + mseg_dealloc(ma, atype, seg, old_size, opt); + DEC_CC(ma, dealloc); return NULL; } + mk = memkind(ma, opt); new_seg = seg; new_size = PAGE_CEILING(*new_size_p); @@ -826,8 +854,8 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p, Uint shrink_sz = old_size - new_size; #if CAN_PARTLY_DESTROY - if (new_size < min_seg_size) - min_seg_size = new_size; + if (new_size < ma->min_seg_size) + ma->min_seg_size = new_size; #endif if (shrink_sz < opt->abs_shrink_th @@ -838,7 +866,7 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p, #if CAN_PARTLY_DESTROY - if (shrink_sz > min_seg_size + if (shrink_sz > ma->min_seg_size && mk->free_cache_descs && opt->cache) { cache_desc_t *cd; @@ -857,7 +885,7 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p, new_size); erts_mtrace_crr_alloc(cd->seg, SEGTYPE, SEGTYPE, cd->size); } - schedule_cache_check(); + schedule_cache_check(ma); } else { if (erts_mtrace_enabled) @@ -866,7 +894,7 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p, SEGTYPE, seg, new_size); - mseg_destroy(mk, ((char *) seg) + new_size, shrink_sz); + mseg_destroy(ma, mk, ((char *) seg) + new_size, shrink_sz); } #elif HAVE_MSEG_RECREATE @@ -875,14 +903,14 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p, #else - new_seg = mseg_alloc(atype, &new_size, opt); + new_seg = mseg_alloc(ma, atype, &new_size, opt); if (!new_seg) new_size = old_size; else { sys_memcpy(((char *) new_seg), ((char *) seg), MIN(new_size, old_size)); - mseg_dealloc(atype, seg, old_size, opt); + mseg_dealloc(ma, atype, seg, old_size, opt); } #endif @@ -892,34 +920,34 @@ mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, Uint *new_size_p, else { if (!opt->preserv) { - mseg_dealloc(atype, seg, old_size, opt); - new_seg = mseg_alloc(atype, &new_size, opt); + mseg_dealloc(ma, atype, seg, old_size, opt); + new_seg = mseg_alloc(ma, atype, &new_size, opt); } else { #if HAVE_MSEG_RECREATE #if !CAN_PARTLY_DESTROY do_recreate: #endif - new_seg = mseg_recreate(mk, (void *) seg, old_size, new_size); + new_seg = mseg_recreate(ma, mk, (void *) seg, old_size, new_size); if (erts_mtrace_enabled) erts_mtrace_crr_realloc(new_seg, atype, SEGTYPE, seg, new_size); if (!new_seg) new_size = old_size; #else - new_seg = mseg_alloc(atype, &new_size, opt); + new_seg = mseg_alloc(ma, atype, &new_size, opt); if (!new_seg) new_size = old_size; else { sys_memcpy(((char *) new_seg), ((char *) seg), MIN(new_size, old_size)); - mseg_dealloc(atype, seg, old_size, opt); + mseg_dealloc(ma, atype, seg, old_size, opt); } #endif } } - INC_CC(realloc); + INC_CC(ma, realloc); *new_size_p = new_size; @@ -937,7 +965,6 @@ static struct { Eterm amcbf; Eterm rmcbf; Eterm mcs; - Eterm cci; Eterm memkind; Eterm name; @@ -973,13 +1000,13 @@ static void ERTS_INLINE atom_init(Eterm *atom, char *name) #define AM_INIT(AM) atom_init(&am.AM, #AM) static void -init_atoms(void) +init_atoms(ErtsMsegAllctr_t *ma) { #ifdef DEBUG Eterm *atom; #endif - erts_mtx_unlock(&mseg_mutex); + ERTS_MSEG_UNLOCK(ma); erts_mtx_lock(&init_atoms_mutex); if (!atoms_initialized) { @@ -997,7 +1024,6 @@ init_atoms(void) AM_INIT(amcbf); AM_INIT(rmcbf); AM_INIT(mcs); - AM_INIT(cci); AM_INIT(status); AM_INIT(cached_segments); @@ -1025,7 +1051,7 @@ init_atoms(void) #endif } - erts_mtx_lock(&mseg_mutex); + ERTS_MSEG_LOCK(ma); atoms_initialized = 1; erts_mtx_unlock(&init_atoms_mutex); } @@ -1082,7 +1108,8 @@ add_4tup(Uint **hpp, Uint *szp, Eterm *lp, } static Eterm -info_options(char *prefix, +info_options(ErtsMsegAllctr_t *ma, + char *prefix, int *print_to_p, void *print_to_arg, Uint **hpp, @@ -1093,30 +1120,26 @@ info_options(char *prefix, if (print_to_p) { int to = *print_to_p; void *arg = print_to_arg; - erts_print(to, arg, "%samcbf: %beu\n", prefix, abs_max_cache_bad_fit); - erts_print(to, arg, "%srmcbf: %beu\n", prefix, rel_max_cache_bad_fit); - erts_print(to, arg, "%smcs: %beu\n", prefix, max_cache_size); - erts_print(to, arg, "%scci: %beu\n", prefix, cache_check_interval); + erts_print(to, arg, "%samcbf: %beu\n", prefix, ma->abs_max_cache_bad_fit); + erts_print(to, arg, "%srmcbf: %beu\n", prefix, ma->rel_max_cache_bad_fit); + erts_print(to, arg, "%smcs: %beu\n", prefix, ma->max_cache_size); } if (hpp || szp) { if (!atoms_initialized) - init_atoms(); + init_atoms(ma); res = NIL; add_2tup(hpp, szp, &res, - am.cci, - bld_uint(hpp, szp, cache_check_interval)); - add_2tup(hpp, szp, &res, am.mcs, - bld_uint(hpp, szp, max_cache_size)); + bld_uint(hpp, szp, ma->max_cache_size)); add_2tup(hpp, szp, &res, am.rmcbf, - bld_uint(hpp, szp, rel_max_cache_bad_fit)); + bld_uint(hpp, szp, ma->rel_max_cache_bad_fit)); add_2tup(hpp, szp, &res, am.amcbf, - bld_uint(hpp, szp, abs_max_cache_bad_fit)); + bld_uint(hpp, szp, ma->abs_max_cache_bad_fit)); } @@ -1124,18 +1147,18 @@ info_options(char *prefix, } static Eterm -info_calls(int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp) +info_calls(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp) { Eterm res = THE_NON_VALUE; if (print_to_p) { -#define PRINT_CC(TO, TOA, CC) \ - if (calls.CC.giga_no == 0) \ - erts_print(TO, TOA, "mseg_%s calls: %b32u\n", #CC, calls.CC.no); \ - else \ +#define PRINT_CC(TO, TOA, CC) \ + if (ma->calls.CC.giga_no == 0) \ + erts_print(TO, TOA, "mseg_%s calls: %b32u\n", #CC, ma->calls.CC.no); \ + else \ erts_print(TO, TOA, "mseg_%s calls: %b32u%09b32u\n", #CC, \ - calls.CC.giga_no, calls.CC.no) + ma->calls.CC.giga_no, ma->calls.CC.no) int to = *print_to_p; void *arg = print_to_arg; @@ -1161,48 +1184,48 @@ info_calls(int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp) add_3tup(hpp, szp, &res, am.mseg_check_cache, - bld_unstable_uint(hpp, szp, calls.check_cache.giga_no), - bld_unstable_uint(hpp, szp, calls.check_cache.no)); + bld_unstable_uint(hpp, szp, ma->calls.check_cache.giga_no), + bld_unstable_uint(hpp, szp, ma->calls.check_cache.no)); add_3tup(hpp, szp, &res, am.mseg_clear_cache, - bld_unstable_uint(hpp, szp, calls.clear_cache.giga_no), - bld_unstable_uint(hpp, szp, calls.clear_cache.no)); + bld_unstable_uint(hpp, szp, ma->calls.clear_cache.giga_no), + bld_unstable_uint(hpp, szp, ma->calls.clear_cache.no)); #if HAVE_MSEG_RECREATE add_3tup(hpp, szp, &res, am.mseg_recreate, - bld_unstable_uint(hpp, szp, calls.recreate.giga_no), - bld_unstable_uint(hpp, szp, calls.recreate.no)); + bld_unstable_uint(hpp, szp, ma->calls.recreate.giga_no), + bld_unstable_uint(hpp, szp, ma->calls.recreate.no)); #endif add_3tup(hpp, szp, &res, am.mseg_destroy, - bld_unstable_uint(hpp, szp, calls.destroy.giga_no), - bld_unstable_uint(hpp, szp, calls.destroy.no)); + bld_unstable_uint(hpp, szp, ma->calls.destroy.giga_no), + bld_unstable_uint(hpp, szp, ma->calls.destroy.no)); add_3tup(hpp, szp, &res, am.mseg_create, - bld_unstable_uint(hpp, szp, calls.create.giga_no), - bld_unstable_uint(hpp, szp, calls.create.no)); + bld_unstable_uint(hpp, szp, ma->calls.create.giga_no), + bld_unstable_uint(hpp, szp, ma->calls.create.no)); add_3tup(hpp, szp, &res, am.mseg_realloc, - bld_unstable_uint(hpp, szp, calls.realloc.giga_no), - bld_unstable_uint(hpp, szp, calls.realloc.no)); + bld_unstable_uint(hpp, szp, ma->calls.realloc.giga_no), + bld_unstable_uint(hpp, szp, ma->calls.realloc.no)); add_3tup(hpp, szp, &res, am.mseg_dealloc, - bld_unstable_uint(hpp, szp, calls.dealloc.giga_no), - bld_unstable_uint(hpp, szp, calls.dealloc.no)); + bld_unstable_uint(hpp, szp, ma->calls.dealloc.giga_no), + bld_unstable_uint(hpp, szp, ma->calls.dealloc.no)); add_3tup(hpp, szp, &res, am.mseg_alloc, - bld_unstable_uint(hpp, szp, calls.alloc.giga_no), - bld_unstable_uint(hpp, szp, calls.alloc.no)); + bld_unstable_uint(hpp, szp, ma->calls.alloc.giga_no), + bld_unstable_uint(hpp, szp, ma->calls.alloc.no)); } return res; } static Eterm -info_status(MemKind* mk, int *print_to_p, void *print_to_arg, +info_status(ErtsMsegAllctr_t *ma, MemKind* mk, int *print_to_p, void *print_to_arg, int begin_new_max_period, Uint **hpp, Uint *szp) { Eterm res = THE_NON_VALUE; @@ -1258,7 +1281,7 @@ info_status(MemKind* mk, int *print_to_p, void *print_to_arg, return res; } -static Eterm info_memkind(MemKind* mk, int *print_to_p, void *print_to_arg, +static Eterm info_memkind(ErtsMsegAllctr_t *ma, MemKind* mk, int *print_to_p, void *print_to_arg, int begin_max_per, Uint **hpp, Uint *szp) { Eterm res = THE_NON_VALUE; @@ -1274,8 +1297,8 @@ static Eterm info_memkind(MemKind* mk, int *print_to_p, void *print_to_arg, atoms[2] = am.calls; values[0] = erts_bld_string(hpp, szp, mk->name); } - values[1] = info_status(mk, print_to_p, print_to_arg, begin_max_per, hpp, szp); - values[2] = info_calls(print_to_p, print_to_arg, hpp, szp); + values[1] = info_status(ma, mk, print_to_p, print_to_arg, begin_max_per, hpp, szp); + values[2] = info_calls(ma, print_to_p, print_to_arg, hpp, szp); if (hpp || szp) res = bld_2tup_list(hpp, szp, 3, atoms, values); @@ -1285,7 +1308,7 @@ static Eterm info_memkind(MemKind* mk, int *print_to_p, void *print_to_arg, static Eterm -info_version(int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp) +info_version(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp) { Eterm res = THE_NON_VALUE; @@ -1306,56 +1329,64 @@ info_version(int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp) \* */ Eterm -erts_mseg_info_options(int *print_to_p, void *print_to_arg, +erts_mseg_info_options(int ix, + int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp) { + ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_IX(ix); Eterm res; - erts_mtx_lock(&mseg_mutex); + ERTS_MSEG_LOCK(ma); - res = info_options("option ", print_to_p, print_to_arg, hpp, szp); + ERTS_DBG_MA_CHK_THR_ACCESS(ma); - erts_mtx_unlock(&mseg_mutex); + res = info_options(ma, "option ", print_to_p, print_to_arg, hpp, szp); + + ERTS_MSEG_UNLOCK(ma); return res; } Eterm -erts_mseg_info(int *print_to_p, +erts_mseg_info(int ix, + int *print_to_p, void *print_to_arg, int begin_max_per, Uint **hpp, Uint *szp) { + ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_IX(ix); Eterm res = THE_NON_VALUE; Eterm atoms[4]; Eterm values[4]; Uint n = 0; - erts_mtx_lock(&mseg_mutex); + ERTS_MSEG_LOCK(ma); + + ERTS_DBG_MA_CHK_THR_ACCESS(ma); if (hpp || szp) { if (!atoms_initialized) - init_atoms(); + init_atoms(ma); atoms[0] = am.version; atoms[1] = am.options; atoms[2] = am.memkind; atoms[3] = am.memkind; } - values[n++] = info_version(print_to_p, print_to_arg, hpp, szp); - values[n++] = info_options("option ", print_to_p, print_to_arg, hpp, szp); + values[n++] = info_version(ma, print_to_p, print_to_arg, hpp, szp); + values[n++] = info_options(ma, "option ", print_to_p, print_to_arg, hpp, szp); #if HALFWORD_HEAP - values[n++] = info_memkind(&low_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp); - values[n++] = info_memkind(&hi_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp); + values[n++] = info_memkind(ma, &ma->low_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp); + values[n++] = info_memkind(ma, &ma->hi_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp); #else - values[n++] = info_memkind(&the_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp); + values[n++] = info_memkind(ma, &ma->the_mem, print_to_p, print_to_arg, begin_max_per, hpp, szp); #endif if (hpp || szp) res = bld_2tup_list(hpp, szp, n, atoms, values); - erts_mtx_unlock(&mseg_mutex); + ERTS_MSEG_UNLOCK(ma); return res; } @@ -1363,10 +1394,12 @@ erts_mseg_info(int *print_to_p, void * erts_mseg_alloc_opt(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt) { + ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt); void *seg; - erts_mtx_lock(&mseg_mutex); - seg = mseg_alloc(atype, size_p, opt); - erts_mtx_unlock(&mseg_mutex); + ERTS_MSEG_LOCK(ma); + ERTS_DBG_MA_CHK_THR_ACCESS(ma); + seg = mseg_alloc(ma, atype, size_p, opt); + ERTS_MSEG_UNLOCK(ma); return seg; } @@ -1377,12 +1410,14 @@ erts_mseg_alloc(ErtsAlcType_t atype, Uint *size_p) } void -erts_mseg_dealloc_opt(ErtsAlcType_t atype, void *seg, Uint size, - const ErtsMsegOpt_t *opt) +erts_mseg_dealloc_opt(ErtsAlcType_t atype, void *seg, + Uint size, const ErtsMsegOpt_t *opt) { - erts_mtx_lock(&mseg_mutex); - mseg_dealloc(atype, seg, size, opt); - erts_mtx_unlock(&mseg_mutex); + ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt); + ERTS_MSEG_LOCK(ma); + ERTS_DBG_MA_CHK_THR_ACCESS(ma); + mseg_dealloc(ma, atype, seg, size, opt); + ERTS_MSEG_UNLOCK(ma); } void @@ -1392,44 +1427,60 @@ erts_mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size) } void * -erts_mseg_realloc_opt(ErtsAlcType_t atype, void *seg, Uint old_size, - Uint *new_size_p, const ErtsMsegOpt_t *opt) +erts_mseg_realloc_opt(ErtsAlcType_t atype, void *seg, + Uint old_size, Uint *new_size_p, + const ErtsMsegOpt_t *opt) { + ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt); void *new_seg; - erts_mtx_lock(&mseg_mutex); - new_seg = mseg_realloc(atype, seg, old_size, new_size_p, opt); - erts_mtx_unlock(&mseg_mutex); + ERTS_MSEG_LOCK(ma); + ERTS_DBG_MA_CHK_THR_ACCESS(ma); + new_seg = mseg_realloc(ma, atype, seg, old_size, new_size_p, opt); + ERTS_MSEG_UNLOCK(ma); return new_seg; } void * -erts_mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size, - Uint *new_size_p) +erts_mseg_realloc(ErtsAlcType_t atype, void *seg, + Uint old_size, Uint *new_size_p) { - return erts_mseg_realloc_opt(atype, seg, old_size, new_size_p, &erts_mseg_default_opt); + return erts_mseg_realloc_opt(atype, seg, old_size, new_size_p, + &erts_mseg_default_opt); } void erts_mseg_clear_cache(void) { + ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_SS(); MemKind* mk; - erts_mtx_lock(&mseg_mutex); - for (mk=mk_list; mk; mk=mk->next) { + +start: + + ERTS_MSEG_LOCK(ma); + ERTS_DBG_MA_CHK_THR_ACCESS(ma); + for (mk=ma->mk_list; mk; mk=mk->next) { mseg_clear_cache(mk); } - erts_mtx_unlock(&mseg_mutex); + ERTS_MSEG_UNLOCK(ma); + + if (ma->ix != 0) { + ma = ERTS_MSEG_ALLCTR_IX(0); + goto start; + } } Uint -erts_mseg_no(void) +erts_mseg_no(const ErtsMsegOpt_t *opt) { + ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt); MemKind* mk; Uint n = 0; - erts_mtx_lock(&mseg_mutex); - for (mk=mk_list; mk; mk=mk->next) { + ERTS_MSEG_LOCK(ma); + ERTS_DBG_MA_CHK_THR_ACCESS(ma); + for (mk=ma->mk_list; mk; mk=mk->next) { n += mk->segments.current.no; } - erts_mtx_unlock(&mseg_mutex); + ERTS_MSEG_UNLOCK(ma); return n; } @@ -1439,7 +1490,7 @@ erts_mseg_unit_size(void) return page_size; } -static void mem_kind_init(MemKind* mk, const char* name) +static void mem_kind_init(ErtsMsegAllctr_t *ma, MemKind* mk, const char* name) { unsigned i; @@ -1450,10 +1501,10 @@ static void mem_kind_init(MemKind* mk, const char* name) mk->cache_size = 0; mk->cache_hits = 0; - if (max_cache_size > 0) { - for (i = 0; i < max_cache_size - 1; i++) + if (ma->max_cache_size > 0) { + for (i = 0; i < ma->max_cache_size - 1; i++) mk->cache_descs[i].next = &mk->cache_descs[i + 1]; - mk->cache_descs[max_cache_size - 1].next = NULL; + mk->cache_descs[ma->max_cache_size - 1].next = NULL; mk->free_cache_descs = &mk->cache_descs[0]; } else @@ -1467,30 +1518,38 @@ static void mem_kind_init(MemKind* mk, const char* name) mk->segments.max_ever.no = 0; mk->segments.max_ever.sz = 0; + mk->ma = ma; mk->name = name; - mk->next = mk_list; - mk_list = mk; + mk->next = ma->mk_list; + ma->mk_list = mk; } + + void erts_mseg_init(ErtsMsegInit_t *init) { - atoms_initialized = 0; - is_init_done = 0; + int i; + UWord x; - /* Options ... */ +#ifdef ERTS_SMP + no_mseg_allocators = init->nos + 1; +#else + no_mseg_allocators = 1; +#endif - abs_max_cache_bad_fit = init->amcbf; - rel_max_cache_bad_fit = init->rmcbf; - max_cache_size = init->mcs; - cache_check_interval = init->cci; + x = (UWord) malloc(sizeof(ErtsAlgndMsegAllctr_t) + *no_mseg_allocators + + (ERTS_CACHE_LINE_SIZE-1)); + if (x & ERTS_CACHE_LINE_MASK) + x = (x & ~ERTS_CACHE_LINE_MASK) + ERTS_CACHE_LINE_SIZE; + ASSERT((x & ERTS_CACHE_LINE_MASK) == 0); + aligned_mseg_allctr = (ErtsAlgndMsegAllctr_t *) x; - /* */ + atoms_initialized = 0; -#ifdef USE_THREADS - thread_safe_init(); -#endif + erts_mtx_init(&init_atoms_mutex, "mseg_init_atoms"); #if HAVE_MMAP && !defined(MAP_ANON) mmap_fd = open("/dev/zero", O_RDWR); @@ -1512,34 +1571,55 @@ erts_mseg_init(ErtsMsegInit_t *init) page_shift++; } - sys_memzero((void *) &calls, sizeof(calls)); + for (i = 0; i < no_mseg_allocators; i++) { + ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_IX(i); -#if CAN_PARTLY_DESTROY - min_seg_size = ~((Uint) 0); -#endif + ma->ix = i; + + ma->is_init_done = 0; + + if (i != 0) + ma->is_thread_safe = 0; + else { + ma->is_thread_safe = 1; + erts_mtx_init(&ma->mtx, "mseg"); + } + + ma->is_cache_check_scheduled = 0; + + /* Options ... */ + + ma->abs_max_cache_bad_fit = init->amcbf; + ma->rel_max_cache_bad_fit = init->rmcbf; + ma->max_cache_size = init->mcs; - if (max_cache_size > MAX_CACHE_SIZE) - max_cache_size = MAX_CACHE_SIZE; + if (ma->max_cache_size > MAX_CACHE_SIZE) + ma->max_cache_size = MAX_CACHE_SIZE; + + ma->mk_list = NULL; #if HALFWORD_HEAP - mem_kind_init(&low_mem, "low memory"); - mem_kind_init(&hi_mem, "high memory"); + mem_kind_init(ma, &ma->low_mem, "low memory"); + mem_kind_init(ma, &ma->hi_mem, "high memory"); #else - mem_kind_init(&the_mem, "all memory"); + mem_kind_init(ma, &ma->the_mem, "all memory"); #endif - is_cache_check_scheduled = 0; -#ifdef ERTS_THREADS_NO_SMP - is_cache_check_requested = 0; + sys_memzero((void *) &ma->calls, sizeof(ErtsMsegCalls)); + +#if CAN_PARTLY_DESTROY + ma->min_seg_size = ~((Uint) 0); #endif + } } -static ERTS_INLINE Uint tot_cache_size(void) +static ERTS_INLINE Uint tot_cache_size(ErtsMsegAllctr_t *ma) { MemKind* mk; Uint sz = 0; - for (mk=mk_list; mk; mk=mk->next) { + ERTS_DBG_MA_CHK_THR_ACCESS(ma); + for (mk=ma->mk_list; mk; mk=mk->next) { sz += mk->cache_size; } return sz; @@ -1552,25 +1632,13 @@ static ERTS_INLINE Uint tot_cache_size(void) void erts_mseg_late_init(void) { -#ifdef ERTS_THREADS_NO_SMP - int handle = - erts_register_async_ready_callback( - check_schedule_cache_check); -#endif - erts_mtx_lock(&mseg_mutex); - is_init_done = 1; -#ifdef ERTS_THREADS_NO_SMP - async_handle = handle; -#endif - if (tot_cache_size()) - schedule_cache_check(); - erts_mtx_unlock(&mseg_mutex); -} - -void -erts_mseg_exit(void) -{ - mseg_shutdown(); + ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_SS(); + ERTS_MSEG_LOCK(ma); + ERTS_DBG_MA_CHK_THR_ACCESS(ma); + ma->is_init_done = 1; + if (tot_cache_size(ma)) + schedule_cache_check(ma); + ERTS_MSEG_UNLOCK(ma); } #endif /* #if HAVE_ERTS_MSEG */ @@ -1599,12 +1667,13 @@ erts_mseg_test(unsigned long op, erts_mseg_clear_cache(); return (unsigned long) 0; case 0x405: - return (unsigned long) erts_mseg_no(); + return (unsigned long) erts_mseg_no(&erts_mseg_default_opt); case 0x406: { + ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_IX(0); unsigned long res; - erts_mtx_lock(&mseg_mutex); - res = (unsigned long) tot_cache_size(); - erts_mtx_unlock(&mseg_mutex); + ERTS_MSEG_LOCK(ma); + res = (unsigned long) tot_cache_size(ma); + ERTS_MSEG_UNLOCK(ma); return res; } #else /* #if HAVE_ERTS_MSEG */ diff --git a/erts/emulator/sys/common/erl_mseg.h b/erts/emulator/sys/common/erl_mseg.h index 8f116030a8..741080fb78 100644 --- a/erts/emulator/sys/common/erl_mseg.h +++ b/erts/emulator/sys/common/erl_mseg.h @@ -44,7 +44,7 @@ typedef struct { Uint amcbf; Uint rmcbf; Uint mcs; - Uint cci; + Uint nos; } ErtsMsegInit_t; #define ERTS_MSEG_INIT_DEFAULT_INITIALIZER \ @@ -60,6 +60,7 @@ typedef struct { int preserv; UWord abs_shrink_th; UWord rel_shrink_th; + int sched_spec; #if HALFWORD_HEAP int low_mem; #endif @@ -75,14 +76,14 @@ void *erts_mseg_realloc(ErtsAlcType_t, void *, Uint, Uint *); void *erts_mseg_realloc_opt(ErtsAlcType_t, void *, Uint, Uint *, const ErtsMsegOpt_t *); void erts_mseg_clear_cache(void); -Uint erts_mseg_no(void); +void erts_mseg_cache_check(void); +Uint erts_mseg_no( const ErtsMsegOpt_t *); Uint erts_mseg_unit_size(void); void erts_mseg_init(ErtsMsegInit_t *init); void erts_mseg_late_init(void); /* Have to be called after all allocators, threads and timers have been initialized. */ -void erts_mseg_exit(void); -Eterm erts_mseg_info_options(int *, void*, Uint **, Uint *); -Eterm erts_mseg_info(int *, void*, int, Uint **, Uint *); +Eterm erts_mseg_info_options(int, int *, void*, Uint **, Uint *); +Eterm erts_mseg_info(int, int *, void*, int, Uint **, Uint *); #endif /* #if HAVE_ERTS_MSEG */ diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c index 9bd64f5908..80db2055a2 100644 --- a/erts/emulator/sys/common/erl_poll.c +++ b/erts/emulator/sys/common/erl_poll.c @@ -68,6 +68,7 @@ # endif # endif #endif +#include "erl_thr_progress.h" #include "erl_driver.h" #include "erl_alloc.h" @@ -114,7 +115,7 @@ #endif #define ERTS_POLL_USE_WAKEUP_PIPE \ - (ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(ERTS_SMP)) + (ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(USE_THREADS)) #ifdef ERTS_SMP @@ -261,7 +262,6 @@ struct ErtsPollSet_ { #ifdef ERTS_SMP erts_atomic32_t polled; erts_smp_mtx_t mtx; -#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT #endif #if ERTS_POLL_USE_WAKEUP_PIPE int wake_fds[2]; @@ -269,10 +269,8 @@ struct ErtsPollSet_ { #if ERTS_POLL_USE_FALLBACK int fallback_used; #endif -#ifdef ERTS_SMP +#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT erts_atomic32_t wakeup_state; -#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT - volatile int wakeup_state; #endif erts_smp_atomic32_t timeout; #ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS @@ -345,21 +343,16 @@ static void print_misc_debug_info(void); static ERTS_INLINE void reset_wakeup_state(ErtsPollSet ps) { -#ifdef ERTS_SMP - erts_atomic32_set_nob(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN); - ERTS_THR_MEMORY_BARRIER; -#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT - ps->wakeup_state = 0; +#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT + erts_atomic32_set_mb(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN); #endif } static ERTS_INLINE int is_woken(ErtsPollSet ps) { -#ifdef ERTS_SMP +#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT return erts_atomic32_read_acqb(&ps->wakeup_state) != ERTS_POLL_NOT_WOKEN; -#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT - return ps->wakeup_state != ERTS_POLL_NOT_WOKEN; #else return 0; #endif @@ -368,13 +361,9 @@ is_woken(ErtsPollSet ps) static ERTS_INLINE int is_interrupted_reset(ErtsPollSet ps) { -#ifdef ERTS_SMP +#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT return (erts_atomic32_xchg_nob(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN) == ERTS_POLL_WOKEN_INTR); -#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT - int res = ps->wakeup_state == ERTS_POLL_WOKEN_INTR; - ps->wakeup_state = ERTS_POLL_NOT_WOKEN; - return res; #else return 0; #endif @@ -383,16 +372,13 @@ is_interrupted_reset(ErtsPollSet ps) static ERTS_INLINE void woke_up(ErtsPollSet ps) { -#ifdef ERTS_SMP +#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT erts_aint32_t wakeup_state = erts_atomic32_read_nob(&ps->wakeup_state); if (wakeup_state == ERTS_POLL_NOT_WOKEN) (void) erts_atomic32_cmpxchg_nob(&ps->wakeup_state, ERTS_POLL_WOKEN, ERTS_POLL_NOT_WOKEN); ASSERT(erts_atomic32_read_nob(&ps->wakeup_state) != ERTS_POLL_NOT_WOKEN); -#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT - if (ps->wakeup_state == ERTS_POLL_NOT_WOKEN) - ps->wakeup_state = ERTS_POLL_WOKEN; #endif } @@ -403,28 +389,27 @@ woke_up(ErtsPollSet ps) #if ERTS_POLL_USE_WAKEUP_PIPE static ERTS_INLINE void -wake_poller(ErtsPollSet ps, int interrupted) +wake_poller(ErtsPollSet ps, int interrupted, int async_signal_safe) { - int wake = 0; -#ifdef ERTS_SMP - erts_aint32_t wakeup_state; - if (!interrupted) - wakeup_state = erts_atomic32_cmpxchg_relb(&ps->wakeup_state, - ERTS_POLL_WOKEN, - ERTS_POLL_NOT_WOKEN); + int wake; + if (async_signal_safe) + wake = 1; else { - /* - * We might unnecessarily write to the pipe, however, - * that isn't problematic. - */ - wakeup_state = erts_atomic32_read_nob(&ps->wakeup_state); - erts_atomic32_set_relb(&ps->wakeup_state, ERTS_POLL_WOKEN_INTR); + erts_aint32_t wakeup_state; + if (!interrupted) + wakeup_state = erts_atomic32_cmpxchg_relb(&ps->wakeup_state, + ERTS_POLL_WOKEN, + ERTS_POLL_NOT_WOKEN); + else { + /* + * We might unnecessarily write to the pipe, however, + * that isn't problematic. + */ + wakeup_state = erts_atomic32_read_nob(&ps->wakeup_state); + erts_atomic32_set_relb(&ps->wakeup_state, ERTS_POLL_WOKEN_INTR); + } + wake = wakeup_state == ERTS_POLL_NOT_WOKEN; } - wake = wakeup_state == ERTS_POLL_NOT_WOKEN; -#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT - wake = ps->wakeup_state == ERTS_POLL_NOT_WOKEN; - ps->wakeup_state = interrupted ? ERTS_POLL_WOKEN_INTR : ERTS_POLL_NOT_WOKEN; -#endif /* * NOTE: This function might be called from signal handlers in the * non-smp case; therefore, it has to be async-signal safe in @@ -439,9 +424,17 @@ wake_poller(ErtsPollSet ps, int interrupted) res = write(ps->wake_fds[1], "!", 1); } while (res < 0 && errno == EINTR); if (res <= 0 && errno != ERRNO_BLOCK) { - fatal_error_async_signal_safe(__FILE__ - ":XXX:wake_poller(): " - "Failed to write on wakeup pipe\n"); + if (async_signal_safe) + fatal_error_async_signal_safe(__FILE__ + ":XXX:wake_poller(): " + "Failed to write on wakeup pipe\n"); + else + fatal_error("%s:%d:wake_poller(): " + "Failed to write to wakeup pipe fd=%d: " + "%s (%d)\n", + __FILE__, __LINE__, + ps->wake_fds[1], + erl_errno_id(errno), errno); } } } @@ -449,11 +442,18 @@ wake_poller(ErtsPollSet ps, int interrupted) static ERTS_INLINE void cleanup_wakeup_pipe(ErtsPollSet ps) { +#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT + int intr = 0; +#endif int fd = ps->wake_fds[0]; int res; do { char buf[32]; res = read(fd, buf, sizeof(buf)); +#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT + if (res > 0) + intr = 1; +#endif } while (res > 0 || (res < 0 && errno == EINTR)); if (res < 0 && errno != ERRNO_BLOCK) { fatal_error("%s:%d:cleanup_wakeup_pipe(): " @@ -463,6 +463,10 @@ cleanup_wakeup_pipe(ErtsPollSet ps) fd, erl_errno_id(errno), errno); } +#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT + if (intr) + erts_atomic32_set_nob(&ps->wakeup_state, ERTS_POLL_WOKEN_INTR); +#endif } static void @@ -1497,7 +1501,7 @@ ERTS_POLL_EXPORT(erts_poll_controlv)(ErtsPollSet ps, #ifdef ERTS_SMP if (final_do_wake) - wake_poller(ps, 0); + wake_poller(ps, 0, 0); #endif /* ERTS_SMP */ } @@ -1520,7 +1524,7 @@ ERTS_POLL_EXPORT(erts_poll_control)(ErtsPollSet ps, #ifdef ERTS_SMP if (*do_wake) { - wake_poller(ps, 0); + wake_poller(ps, 0, 0); } #endif /* ERTS_SMP */ @@ -1893,9 +1897,9 @@ save_poll_result(ErtsPollSet ps, ErtsPollResFd pr[], int max_res, } static ERTS_INLINE int -check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked) +check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res) { - ASSERT(!*ps_locked); + int res; if (erts_smp_atomic_read_nob(&ps->no_of_user_fds) == 0 && tv->tv_usec == 0 && tv->tv_sec == 0) { /* Nothing to poll and zero timeout; done... */ @@ -1915,16 +1919,23 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked) timeout = INT_MAX; if (max_res > ps->res_events_len) grow_res_events(ps, max_res); - return epoll_wait(ps->kp_fd, ps->res_events, max_res, (int)timeout); +#ifdef ERTS_SMP + if (timeout) + erts_thr_progress_prepare_wait(NULL); +#endif + res = epoll_wait(ps->kp_fd, ps->res_events, max_res, (int)timeout); #elif ERTS_POLL_USE_KQUEUE /* --- kqueue ------------------------------ */ struct timespec ts; - ts.tv_sec = tv->tv_sec; - ts.tv_nsec = tv->tv_usec*1000; if (max_res > ps->res_events_len) grow_res_events(ps, max_res); - return kevent(ps->kp_fd, NULL, 0, ps->res_events, max_res, &ts); +#ifdef ERTS_SMP + if (timeout) + erts_thr_progress_prepare_wait(NULL); +#endif + ts.tv_sec = tv->tv_sec; + ts.tv_nsec = tv->tv_usec*1000; + res = kevent(ps->kp_fd, NULL, 0, ps->res_events, max_res, &ts); #endif /* ----------------------------------------- */ - } else /* use fallback (i.e. poll() or select()) */ #endif /* ERTS_POLL_USE_FALLBACK */ @@ -1947,22 +1958,38 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked) if (poll_res.dp_nfds > ps->res_events_len) grow_res_events(ps, poll_res.dp_nfds); poll_res.dp_fds = ps->res_events; +#ifdef ERTS_SMP + if (timeout) + erts_thr_progress_prepare_wait(NULL); +#endif poll_res.dp_timeout = (int) timeout; - return ioctl(ps->kp_fd, DP_POLL, &poll_res); + res = ioctl(ps->kp_fd, DP_POLL, &poll_res); #elif ERTS_POLL_USE_POLL /* --- poll -------------------------------- */ if (timeout > INT_MAX) timeout = INT_MAX; - return poll(ps->poll_fds, ps->no_poll_fds, (int) timeout); +#ifdef ERTS_SMP + if (timeout) + erts_thr_progress_prepare_wait(NULL); +#endif + res = poll(ps->poll_fds, ps->no_poll_fds, (int) timeout); #elif ERTS_POLL_USE_SELECT /* --- select ------------------------------ */ - int res; + SysTimeval to = *tv; + ps->res_input_fds = ps->input_fds; ps->res_output_fds = ps->output_fds; + +#ifdef ERTS_SMP + if (to.tv_sec || to.tv_usec) + erts_thr_progress_prepare_wait(NULL); +#endif res = select(ps->max_fd + 1, &ps->res_input_fds, &ps->res_output_fds, NULL, - tv); + &to); #ifdef ERTS_SMP + if (to.tv_sec || to.tv_usec) + erts_thr_progress_finalize_wait(NULL); if (res < 0 && errno == EBADF && ERTS_POLLSET_HAVE_UPDATE_REQUESTS(ps)) { @@ -1978,15 +2005,16 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked) * have triggered, we fake an EAGAIN error and let the caller * restart us. */ - SysTimeval zero_tv = {0, 0}; - *ps_locked = 1; + to.tv_sec = 0; + to.tv_usec = 0; ERTS_POLLSET_LOCK(ps); handle_update_requests(ps); + ERTS_POLLSET_UNLOCK(ps); res = select(ps->max_fd + 1, &ps->res_input_fds, &ps->res_output_fds, NULL, - &zero_tv); + &to); if (res == 0) { errno = EAGAIN; res = -1; @@ -1996,6 +2024,11 @@ check_fd_events(ErtsPollSet ps, SysTimeval *tv, int max_res, int *ps_locked) return res; #endif /* ----------------------------------------- */ } +#ifdef ERTS_SMP + if (timeout) + erts_thr_progress_finalize_wait(NULL); +#endif + return res; } } @@ -2007,7 +2040,9 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps, { int res, no_fds; int ebadf = 0; - int ps_locked; +#ifdef ERTS_SMP + int ps_locked = 0; +#endif SysTimeval *tvp; SysTimeval itv; @@ -2049,8 +2084,7 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps, } #endif - ps_locked = 0; - res = check_fd_events(ps, tvp, no_fds, &ps_locked); + res = check_fd_events(ps, tvp, no_fds); woke_up(ps); @@ -2072,10 +2106,8 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps, #endif #ifdef ERTS_SMP - if (!ps_locked) { - ps_locked = 1; - ERTS_POLLSET_LOCK(ps); - } + ps_locked = 1; + ERTS_POLLSET_LOCK(ps); #endif no_fds = save_poll_result(ps, pr, no_fds, res, ebadf); @@ -2111,19 +2143,26 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps, void ERTS_POLL_EXPORT(erts_poll_interrupt)(ErtsPollSet ps, int set) { -#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(ERTS_SMP) - /* - * NOTE: This function might be called from signal handlers in the - * non-smp case; therefore, it has to be async-signal safe in - * the non-smp case. - */ +#if defined(USE_THREADS) if (!set) reset_wakeup_state(ps); else - wake_poller(ps, 1); + wake_poller(ps, 1, 0); #endif } +#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT +void +ERTS_POLL_EXPORT(erts_poll_async_sig_interrupt)(ErtsPollSet ps) +{ + /* + * NOTE: This function is called from signal handlers, it, + * therefore, it has to be async-signal safe. + */ + wake_poller(ps, 1, 1); +} +#endif + /* * erts_poll_interrupt_timed(): * If 'set' != 0, interrupt thread blocked in erts_poll_wait() if it @@ -2139,7 +2178,7 @@ ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet ps, reset_wakeup_state(ps); else { if (erts_smp_atomic32_read_acqb(&ps->timeout) > (erts_aint32_t) msec) - wake_poller(ps, 1); + wake_poller(ps, 1, 0); #ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS else { if (ERTS_POLLSET_IS_POLLED(ps)) @@ -2266,10 +2305,8 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void) erts_atomic32_init_nob(&ps->polled, 0); erts_smp_mtx_init(&ps->mtx, "pollset"); #endif -#ifdef ERTS_SMP +#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT erts_atomic32_init_nob(&ps->wakeup_state, (erts_aint32_t) 0); -#elif ERTS_POLL_ASYNC_INTERRUPT_SUPPORT - ps->wakeup_state = 0; #endif #if ERTS_POLL_USE_WAKEUP_PIPE create_wakeup_pipe(ps); diff --git a/erts/emulator/sys/common/erl_poll.h b/erts/emulator/sys/common/erl_poll.h index 725a77a152..e0296c6a33 100644 --- a/erts/emulator/sys/common/erl_poll.h +++ b/erts/emulator/sys/common/erl_poll.h @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2006-2009. All Rights Reserved. + * Copyright Ericsson AB 2006-2011. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -216,6 +216,9 @@ typedef struct { #endif } ErtsPollInfo; +#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT +void ERTS_POLL_EXPORT(erts_poll_async_sig_interrupt)(ErtsPollSet); +#endif void ERTS_POLL_EXPORT(erts_poll_interrupt)(ErtsPollSet, int); void ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet, |