diff options
Diffstat (limited to 'erts/emulator')
-rw-r--r-- | erts/emulator/beam/erl_alloc.c | 51 | ||||
-rw-r--r-- | erts/emulator/beam/erl_alloc.h | 20 | ||||
-rw-r--r-- | erts/emulator/beam/erl_alloc_util.c | 66 | ||||
-rw-r--r-- | erts/emulator/beam/erl_instrument.c | 97 | ||||
-rw-r--r-- | erts/emulator/beam/erl_mtrace.c | 44 |
5 files changed, 221 insertions, 57 deletions
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c index f0cc52e30f..c19a2647e4 100644 --- a/erts/emulator/beam/erl_alloc.c +++ b/erts/emulator/beam/erl_alloc.c @@ -570,6 +570,8 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) if (ncpu < 1) ncpu = 1; + erts_tsd_key_create(&erts_allctr_prelock_tsd_key); + erts_sys_alloc_init(); erts_init_utils_mem(); @@ -3139,6 +3141,55 @@ erts_request_alloc_info(struct process *c_p, return 1; } +/* + * The allocator wrapper prelocking stuff below is about the locking order. + * It only affects wrappers (erl_mtrace.c and erl_instrument.c) that keep locks + * during alloc/realloc/free. + * + * Some query functions in erl_alloc_util.c lock the allocator mutex and then + * use erts_printf that in turn may call the sys allocator through the wrappers. + * To avoid breaking locking order these query functions first "pre-locks" all + * allocator wrappers. + */ +ErtsAllocatorWrapper_t *erts_allctr_wrappers; +int erts_allctr_wrapper_prelocked = 0; +erts_tsd_key_t erts_allctr_prelock_tsd_key; + +void erts_allctr_wrapper_prelock_init(ErtsAllocatorWrapper_t* wrapper) +{ + ASSERT(wrapper->lock && wrapper->unlock); + wrapper->next = erts_allctr_wrappers; + erts_allctr_wrappers = wrapper; +} + +void erts_allctr_wrapper_pre_lock(void) +{ + if (erts_allctr_wrappers) { + ErtsAllocatorWrapper_t* wrapper = erts_allctr_wrappers; + for ( ; wrapper; wrapper = wrapper->next) { + wrapper->lock(); + } + ASSERT(!erts_allctr_wrapper_prelocked); + erts_allctr_wrapper_prelocked = 1; + erts_tsd_set(erts_allctr_prelock_tsd_key, (void*)1); + } +} + +void erts_allctr_wrapper_pre_unlock(void) +{ + if (erts_allctr_wrappers) { + ErtsAllocatorWrapper_t* wrapper = erts_allctr_wrappers; + + erts_allctr_wrapper_prelocked = 0; + erts_tsd_set(erts_allctr_prelock_tsd_key, (void*)0); + for ( ; wrapper; wrapper = wrapper->next) { + wrapper->unlock(); + } + } +} + + + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\ * Deprecated functions * * * diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h index ba5ec9c367..9cafd8ddc8 100644 --- a/erts/emulator/beam/erl_alloc.h +++ b/erts/emulator/beam/erl_alloc.h @@ -135,6 +135,18 @@ typedef struct { extern ErtsAllocatorThrSpec_t erts_allctr_thr_spec[ERTS_ALC_A_MAX+1]; +typedef struct ErtsAllocatorWrapper_t_ { + void (*lock)(void); + void (*unlock)(void); + struct ErtsAllocatorWrapper_t_* next; +}ErtsAllocatorWrapper_t; +ErtsAllocatorWrapper_t *erts_allctr_wrappers; +extern int erts_allctr_wrapper_prelocked; +extern erts_tsd_key_t erts_allctr_prelock_tsd_key; +void erts_allctr_wrapper_prelock_init(ErtsAllocatorWrapper_t* wrapper); +void erts_allctr_wrapper_pre_lock(void); +void erts_allctr_wrapper_pre_unlock(void); + void erts_alloc_register_scheduler(void *vesdp); #ifdef ERTS_SMP void erts_alloc_scheduler_handle_delayed_dealloc(void *vesdp, @@ -188,6 +200,7 @@ void *erts_realloc(ErtsAlcType_t type, void *ptr, Uint size); void erts_free(ErtsAlcType_t type, void *ptr); void *erts_alloc_fnf(ErtsAlcType_t type, Uint size); void *erts_realloc_fnf(ErtsAlcType_t type, void *ptr, Uint size); +int erts_is_allctr_wrapper_prelocked(void); #endif /* #if !ERTS_ALC_DO_INLINE */ @@ -258,6 +271,13 @@ void *erts_realloc_fnf(ErtsAlcType_t type, void *ptr, Uint size) size); } +ERTS_ALC_INLINE +int erts_is_allctr_wrapper_prelocked(void) +{ + return erts_allctr_wrapper_prelocked /* locked */ + && !!erts_tsd_get(erts_allctr_prelock_tsd_key); /* by me */ +} + #endif /* #if ERTS_ALC_DO_INLINE || defined(ERTS_ALC_INTERNAL__) */ #define ERTS_ALC_GET_THR_IX() ((int) erts_get_scheduler_id()) diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c index 583aeed12d..cbeb2c1e3a 100644 --- a/erts/emulator/beam/erl_alloc_util.c +++ b/erts/emulator/beam/erl_alloc_util.c @@ -588,17 +588,21 @@ do { \ #ifdef DEBUG #ifdef USE_THREADS +# ifdef ERTS_SMP +# define IS_ACTUALLY_BLOCKING (erts_thr_progress_is_blocking()) +# else +# define IS_ACTUALLY_BLOCKING 0 +# endif #define ERTS_ALCU_DBG_CHK_THR_ACCESS(A) \ do { \ - if (!(A)->thread_safe) { \ - if (!(A)->debug.saved_tid) { \ + if (!(A)->thread_safe && !IS_ACTUALLY_BLOCKING) { \ + if (!(A)->debug.saved_tid) { \ (A)->debug.tid = erts_thr_self(); \ (A)->debug.saved_tid = 1; \ } \ else { \ ERTS_SMP_LC_ASSERT( \ - ethr_equal_tids((A)->debug.tid, erts_thr_self()) \ - || erts_thr_progress_is_blocking()); \ + ethr_equal_tids((A)->debug.tid, erts_thr_self())); \ } \ } \ } while (0) @@ -2510,12 +2514,6 @@ static erts_mtx_t init_atoms_mtx; static void init_atoms(Allctr_t *allctr) { - -#ifdef USE_THREADS - if (allctr && allctr->thread_safe) - erts_mtx_unlock(&allctr->mutex); -#endif - erts_mtx_lock(&init_atoms_mtx); if (!atoms_initialized) { @@ -2606,18 +2604,13 @@ init_atoms(Allctr_t *allctr) fix_type_atoms[ix] = am_atom_put(name, len); } } - - if (allctr) { + if (allctr && !allctr->atoms_initialized) { make_name_atoms(allctr); (*allctr->init_atoms)(); -#ifdef USE_THREADS - if (allctr->thread_safe) - erts_mtx_lock(&allctr->mutex); -#endif allctr->atoms_initialized = 1; } @@ -3245,17 +3238,21 @@ erts_alcu_info_options(Allctr_t *allctr, { Eterm res; + if (hpp || szp) + ensure_atoms_initialized(allctr); #ifdef USE_THREADS - if (allctr->thread_safe) + if (allctr->thread_safe) { + erts_allctr_wrapper_pre_lock(); erts_mtx_lock(&allctr->mutex); + } #endif - if (hpp || szp) - ensure_atoms_initialized(allctr); res = info_options(allctr, print_to_p, print_to_arg, hpp, szp); #ifdef USE_THREADS - if (allctr->thread_safe) + if (allctr->thread_safe) { erts_mtx_unlock(&allctr->mutex); + erts_allctr_wrapper_pre_unlock(); + } #endif return res; } @@ -3282,16 +3279,18 @@ erts_alcu_sz_info(Allctr_t *allctr, return am_false; } + if (hpp || szp) + ensure_atoms_initialized(allctr); + #ifdef USE_THREADS - if (allctr->thread_safe) + if (allctr->thread_safe) { + erts_allctr_wrapper_pre_lock(); erts_mtx_lock(&allctr->mutex); + } #endif ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr); - if (hpp || szp) - ensure_atoms_initialized(allctr); - /* Update sbc values not continously updated */ allctr->sbcs.blocks.curr.no = allctr->sbcs.curr.norm.mseg.no + allctr->sbcs.curr.norm.sys_alloc.no; @@ -3327,13 +3326,16 @@ erts_alcu_sz_info(Allctr_t *allctr, #ifdef USE_THREADS - if (allctr->thread_safe) + if (allctr->thread_safe) { erts_mtx_unlock(&allctr->mutex); + erts_allctr_wrapper_pre_unlock(); + } #endif return res; } + Eterm erts_alcu_info(Allctr_t *allctr, int begin_max_period, @@ -3354,16 +3356,18 @@ erts_alcu_info(Allctr_t *allctr, return am_false; } + if (hpp || szp) + ensure_atoms_initialized(allctr); + #ifdef USE_THREADS - if (allctr->thread_safe) + if (allctr->thread_safe) { + erts_allctr_wrapper_pre_lock(); erts_mtx_lock(&allctr->mutex); + } #endif ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr); - if (hpp || szp) - ensure_atoms_initialized(allctr); - /* Update sbc values not continously updated */ allctr->sbcs.blocks.curr.no = allctr->sbcs.curr.norm.mseg.no + allctr->sbcs.curr.norm.sys_alloc.no; @@ -3416,8 +3420,10 @@ erts_alcu_info(Allctr_t *allctr, #ifdef USE_THREADS - if (allctr->thread_safe) + if (allctr->thread_safe) { erts_mtx_unlock(&allctr->mutex); + erts_allctr_wrapper_pre_unlock(); + } #endif return res; diff --git a/erts/emulator/beam/erl_instrument.c b/erts/emulator/beam/erl_instrument.c index b5b245288b..1aea3f65bc 100644 --- a/erts/emulator/beam/erl_instrument.c +++ b/erts/emulator/beam/erl_instrument.c @@ -271,6 +271,18 @@ stat_upd_realloc(ErtsAlcType_t n, Uint size, Uint old_size) * stat instrumentation callback functions */ +static void stat_pre_lock(void) +{ + erts_mtx_lock(&instr_mutex); +} + +static void stat_pre_unlock(void) +{ + erts_mtx_unlock(&instr_mutex); +} + +static ErtsAllocatorWrapper_t instr_wrapper; + static void * stat_alloc(ErtsAlcType_t n, void *extra, Uint size) { @@ -278,7 +290,9 @@ stat_alloc(ErtsAlcType_t n, void *extra, Uint size) Uint ssize; void *res; - erts_mtx_lock(&instr_mutex); + if (!erts_is_allctr_wrapper_prelocked()) { + erts_mtx_lock(&instr_mutex); + } ssize = size + STAT_BLOCK_HEADER_SIZE; res = (*real_af->alloc)(n, real_af->extra, ssize); @@ -293,7 +307,9 @@ stat_alloc(ErtsAlcType_t n, void *extra, Uint size) res = (void *) ((StatBlock_t *) res)->mem; } - erts_mtx_unlock(&instr_mutex); + if (!erts_is_allctr_wrapper_prelocked()) { + erts_mtx_unlock(&instr_mutex); + } return res; } @@ -307,7 +323,9 @@ stat_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size) void *sptr; void *res; - erts_mtx_lock(&instr_mutex); + if (!erts_is_allctr_wrapper_prelocked()) { + erts_mtx_lock(&instr_mutex); + } if (ptr) { sptr = (void *) (((char *) ptr) - STAT_BLOCK_HEADER_SIZE); @@ -329,7 +347,9 @@ stat_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size) res = (void *) ((StatBlock_t *) res)->mem; } - erts_mtx_unlock(&instr_mutex); + if (!erts_is_allctr_wrapper_prelocked()) { + erts_mtx_unlock(&instr_mutex); + } return res; } @@ -340,7 +360,9 @@ stat_free(ErtsAlcType_t n, void *extra, void *ptr) ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra; void *sptr; - erts_mtx_lock(&instr_mutex); + if (!erts_is_allctr_wrapper_prelocked()) { + erts_mtx_lock(&instr_mutex); + } if (ptr) { sptr = (void *) (((char *) ptr) - STAT_BLOCK_HEADER_SIZE); @@ -352,7 +374,9 @@ stat_free(ErtsAlcType_t n, void *extra, void *ptr) (*real_af->free)(n, real_af->extra, sptr); - erts_mtx_unlock(&instr_mutex); + if (!erts_is_allctr_wrapper_prelocked()) { + erts_mtx_unlock(&instr_mutex); + } } @@ -360,6 +384,18 @@ stat_free(ErtsAlcType_t n, void *extra, void *ptr) * map stat instrumentation callback functions */ +static void map_stat_pre_lock(void) +{ + erts_mtx_lock(&instr_x_mutex); + erts_mtx_lock(&instr_mutex); +} + +static void map_stat_pre_unlock(void) +{ + erts_mtx_unlock(&instr_mutex); + erts_mtx_unlock(&instr_x_mutex); +} + static void * map_stat_alloc(ErtsAlcType_t n, void *extra, Uint size) { @@ -367,7 +403,9 @@ map_stat_alloc(ErtsAlcType_t n, void *extra, Uint size) Uint msize; void *res; - erts_mtx_lock(&instr_mutex); + if (!erts_is_allctr_wrapper_prelocked()) { + erts_mtx_lock(&instr_mutex); + } msize = size + MAP_STAT_BLOCK_HEADER_SIZE; res = (*real_af->alloc)(n, real_af->extra, msize); @@ -388,7 +426,9 @@ map_stat_alloc(ErtsAlcType_t n, void *extra, Uint size) res = (void *) mb->mem; } - erts_mtx_unlock(&instr_mutex); + if (!erts_is_allctr_wrapper_prelocked()) { + erts_mtx_unlock(&instr_mutex); + } return res; } @@ -402,8 +442,10 @@ map_stat_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size) void *mptr; void *res; - erts_mtx_lock(&instr_x_mutex); - erts_mtx_lock(&instr_mutex); + if (!erts_is_allctr_wrapper_prelocked()) { + erts_mtx_lock(&instr_x_mutex); + erts_mtx_lock(&instr_mutex); + } if (ptr) { mptr = (void *) (((char *) ptr) - MAP_STAT_BLOCK_HEADER_SIZE); @@ -449,9 +491,10 @@ map_stat_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size) res = (void *) mb->mem; } - - erts_mtx_unlock(&instr_mutex); - erts_mtx_unlock(&instr_x_mutex); + if (!erts_is_allctr_wrapper_prelocked()) { + erts_mtx_unlock(&instr_mutex); + erts_mtx_unlock(&instr_x_mutex); + } return res; } @@ -462,8 +505,10 @@ map_stat_free(ErtsAlcType_t n, void *extra, void *ptr) ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra; void *mptr; - erts_mtx_lock(&instr_x_mutex); - erts_mtx_lock(&instr_mutex); + if (!erts_is_allctr_wrapper_prelocked()) { + erts_mtx_lock(&instr_x_mutex); + erts_mtx_lock(&instr_mutex); + } if (ptr) { MapStatBlock_t *mb; @@ -486,8 +531,10 @@ map_stat_free(ErtsAlcType_t n, void *extra, void *ptr) (*real_af->free)(n, real_af->extra, mptr); - erts_mtx_unlock(&instr_mutex); - erts_mtx_unlock(&instr_x_mutex); + if (!erts_is_allctr_wrapper_prelocked()) { + erts_mtx_unlock(&instr_mutex); + erts_mtx_unlock(&instr_x_mutex); + } } @@ -496,8 +543,10 @@ static void dump_memory_map_to_stream(FILE *fp) ErtsAlcType_t n; MapStatBlock_t *bp; int lock = !ERTS_IS_CRASH_DUMPING; - if (lock) + if (lock) { + ASSERT(!erts_is_allctr_wrapper_prelocked()); erts_mtx_lock(&instr_mutex); + } /* Write header */ @@ -1155,6 +1204,7 @@ erts_instr_get_type_info(Process *proc) Uint erts_instr_init(int stat, int map_stat) { + Uint extra_sz; int i; am_tot = NULL; @@ -1208,7 +1258,9 @@ erts_instr_init(int stat, int map_stat) erts_allctrs[i].free = map_stat_free; erts_allctrs[i].extra = (void *) &real_allctrs[i]; } - return MAP_STAT_BLOCK_HEADER_SIZE; + instr_wrapper.lock = map_stat_pre_lock; + instr_wrapper.unlock = map_stat_pre_unlock; + extra_sz = MAP_STAT_BLOCK_HEADER_SIZE; } else { erts_instr_stat = 1; @@ -1220,8 +1272,11 @@ erts_instr_init(int stat, int map_stat) erts_allctrs[i].free = stat_free; erts_allctrs[i].extra = (void *) &real_allctrs[i]; } - return STAT_BLOCK_HEADER_SIZE; + instr_wrapper.lock = stat_pre_lock; + instr_wrapper.unlock = stat_pre_unlock; + extra_sz = STAT_BLOCK_HEADER_SIZE; } - + erts_allctr_wrapper_prelock_init(&instr_wrapper); + return extra_sz; } diff --git a/erts/emulator/beam/erl_mtrace.c b/erts/emulator/beam/erl_mtrace.c index e538ba30c2..c8bb126687 100644 --- a/erts/emulator/beam/erl_mtrace.c +++ b/erts/emulator/beam/erl_mtrace.c @@ -222,6 +222,8 @@ static byte *tracep; static byte *endp; static SysTimeval last_tv; +static ErtsAllocatorWrapper_t mtrace_wrapper; + #if ERTS_MTRACE_SEGMENT_ID >= ERTS_ALC_A_MIN || ERTS_MTRACE_SEGMENT_ID < 0 #error ERTS_MTRACE_SEGMENT_ID >= ERTS_ALC_A_MIN || ERTS_MTRACE_SEGMENT_ID < 0 #endif @@ -555,6 +557,8 @@ write_trace_header(char *nodename, char *pid, char *hostname) return 1; } +static void mtrace_pre_lock(void); +static void mtrace_pre_unlock(void); static void *mtrace_alloc(ErtsAlcType_t, void *, Uint); static void *mtrace_realloc(ErtsAlcType_t, void *, void *, Uint); static void mtrace_free(ErtsAlcType_t, void *, void *); @@ -635,12 +639,16 @@ erts_mtrace_install_wrapper_functions(void) erts_allctrs[i].free = mtrace_free; erts_allctrs[i].extra = (void *) &real_allctrs[i]; } + mtrace_wrapper.lock = mtrace_pre_lock; + mtrace_wrapper.unlock = mtrace_pre_unlock; + erts_allctr_wrapper_prelock_init(&mtrace_wrapper); } } void erts_mtrace_stop(void) { + ASSERT(!erts_is_allctr_wrapper_prelocked()); erts_mtx_lock(&mtrace_op_mutex); erts_mtx_lock(&mtrace_buf_mutex); if (erts_mtrace_enabled) { @@ -677,6 +685,7 @@ erts_mtrace_stop(void) void erts_mtrace_exit(Uint32 exit_value) { + ASSERT(!erts_is_allctr_wrapper_prelocked()); erts_mtx_lock(&mtrace_op_mutex); erts_mtx_lock(&mtrace_buf_mutex); if (erts_mtrace_enabled) { @@ -935,18 +944,33 @@ write_free_entry(byte tag, erts_mtx_unlock(&mtrace_buf_mutex); } +static void mtrace_pre_lock(void) +{ + erts_mtx_lock(&mtrace_op_mutex); +} + +static void mtrace_pre_unlock(void) +{ + erts_mtx_unlock(&mtrace_op_mutex); +} + + static void * mtrace_alloc(ErtsAlcType_t n, void *extra, Uint size) { ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra; void *res; - erts_mtx_lock(&mtrace_op_mutex); + if (!erts_is_allctr_wrapper_prelocked()) { + erts_mtx_lock(&mtrace_op_mutex); + } res = (*real_af->alloc)(n, real_af->extra, size); write_alloc_entry(ERTS_MT_ALLOC_BDY_TAG, res, n, 0, size); - erts_mtx_unlock(&mtrace_op_mutex); + if (!erts_is_allctr_wrapper_prelocked()) { + erts_mtx_unlock(&mtrace_op_mutex); + } return res; } @@ -957,12 +981,16 @@ mtrace_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size) ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra; void *res; - erts_mtx_lock(&mtrace_op_mutex); + if (!erts_is_allctr_wrapper_prelocked()) { + erts_mtx_lock(&mtrace_op_mutex); + } res = (*real_af->realloc)(n, real_af->extra, ptr, size); write_realloc_entry(ERTS_MT_REALLOC_BDY_TAG, res, n, 0, ptr, size); - erts_mtx_unlock(&mtrace_op_mutex); + if (!erts_is_allctr_wrapper_prelocked()) { + erts_mtx_unlock(&mtrace_op_mutex); + } return res; @@ -973,10 +1001,14 @@ mtrace_free(ErtsAlcType_t n, void *extra, void *ptr) { ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra; - erts_mtx_lock(&mtrace_op_mutex); + if (!erts_is_allctr_wrapper_prelocked()) { + erts_mtx_lock(&mtrace_op_mutex); + } (*real_af->free)(n, real_af->extra, ptr); - write_free_entry(ERTS_MT_FREE_BDY_TAG, n, 0, ptr); + if (!erts_is_allctr_wrapper_prelocked()) { + write_free_entry(ERTS_MT_FREE_BDY_TAG, n, 0, ptr); + } erts_mtx_unlock(&mtrace_op_mutex); } |