From cf9bb9e1e5f1cf58e88b8949b1124b0f160d25fe Mon Sep 17 00:00:00 2001 From: Sverker Eriksson Date: Wed, 2 Mar 2011 18:29:36 +0100 Subject: Add erts_alloc_permanent_cache_aligned to supress valgrind Ease the valgrind supression of memory that are permanently allocated and then aligned up to cache line. --- erts/emulator/beam/erl_alloc.h | 25 ++++++++++++++++---- erts/emulator/beam/erl_db.c | 15 ++++-------- erts/emulator/beam/erl_process.c | 50 +++++++++++----------------------------- 3 files changed, 38 insertions(+), 52 deletions(-) (limited to 'erts') diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h index dd4cc22171..2cd62c01c1 100644 --- a/erts/emulator/beam/erl_alloc.h +++ b/erts/emulator/beam/erl_alloc.h @@ -172,9 +172,17 @@ void *erts_realloc(ErtsAlcType_t type, void *ptr, Uint size); void erts_free(ErtsAlcType_t type, void *ptr); void *erts_alloc_fnf(ErtsAlcType_t type, Uint size); void *erts_realloc_fnf(ErtsAlcType_t type, void *ptr, Uint size); +void *erts_alloc_permanent_cache_aligned(ErtsAlcType_t type, Uint size); + #endif /* #if !ERTS_ALC_DO_INLINE */ +#ifndef ERTS_CACHE_LINE_SIZE +/* Assume a cache line size of 64 bytes */ +# define ERTS_CACHE_LINE_SIZE ((UWord) 64) +# define ERTS_CACHE_LINE_MASK (ERTS_CACHE_LINE_SIZE - 1) +#endif + #if ERTS_ALC_DO_INLINE || defined(ERTS_ALC_INTERNAL__) ERTS_ALC_INLINE @@ -234,6 +242,18 @@ void *erts_realloc_fnf(ErtsAlcType_t type, void *ptr, Uint size) size); } +ERTS_ALC_INLINE +void *erts_alloc_permanent_cache_aligned(ErtsAlcType_t type, Uint size) +{ + UWord v = (UWord) erts_alloc(type, size + (ERTS_CACHE_LINE_SIZE-1)); + + if (v & ERTS_CACHE_LINE_MASK) { + v = (v & ~ERTS_CACHE_LINE_MASK) + ERTS_CACHE_LINE_SIZE; + } + ASSERT((v & ERTS_CACHE_LINE_MASK) == 0); + return (void*)v; +} + #endif /* #if ERTS_ALC_DO_INLINE || defined(ERTS_ALC_INTERNAL__) */ typedef void (*erts_alloc_verify_func_t)(Allctr_t *); @@ -241,11 +261,6 @@ typedef void (*erts_alloc_verify_func_t)(Allctr_t *); erts_alloc_verify_func_t erts_alloc_get_verify_unused_temp_alloc(Allctr_t **allctr); -#ifndef ERTS_CACHE_LINE_SIZE -/* Assume a cache line size of 64 bytes */ -# define ERTS_CACHE_LINE_SIZE ((UWord) 64) -# define ERTS_CACHE_LINE_MASK (ERTS_CACHE_LINE_SIZE - 1) -#endif #define ERTS_ALC_CACHE_LINE_ALIGN_SIZE(SZ) \ (((((SZ) - 1) / ERTS_CACHE_LINE_SIZE) + 1) * ERTS_CACHE_LINE_SIZE) diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c index 5b74240cc3..61e8a595be 100644 --- a/erts/emulator/beam/erl_db.c +++ b/erts/emulator/beam/erl_db.c @@ -2773,17 +2773,10 @@ void init_db(void) rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED; - meta_main_tab_locks = erts_alloc(ERTS_ALC_T_DB_TABLES, - (sizeof(erts_meta_main_tab_lock_t) - * (ERTS_META_MAIN_TAB_LOCK_TAB_SIZE+1))); - - if ((((UWord) meta_main_tab_locks) & ERTS_CACHE_LINE_MASK) != 0) - meta_main_tab_locks = ((erts_meta_main_tab_lock_t *) - ((((UWord) meta_main_tab_locks) - & ~ERTS_CACHE_LINE_MASK) - + ERTS_CACHE_LINE_SIZE)); - - ASSERT((((UWord) meta_main_tab_locks) & ERTS_CACHE_LINE_MASK) == 0); + meta_main_tab_locks = + erts_alloc_permanent_cache_aligned(ERTS_ALC_T_DB_TABLES, + sizeof(erts_meta_main_tab_lock_t) + * ERTS_META_MAIN_TAB_LOCK_TAB_SIZE); for (i = 0; i < ERTS_META_MAIN_TAB_LOCK_TAB_SIZE; i++) { erts_smp_rwmtx_init_opt_x(&meta_main_tab_locks[i].rwmtx, &rwmtx_opt, diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index 4d6e982325..e8b2360ee9 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -622,14 +622,10 @@ init_misc_aux_work(void) init_misc_aux_work_alloc(); - misc_aux_work_queues = erts_alloc(ERTS_ALC_T_MISC_AUX_WORK_Q, - (sizeof(erts_algnd_misc_aux_work_q_t) - *(erts_no_schedulers+1))); - if ((((UWord) misc_aux_work_queues) & ERTS_CACHE_LINE_MASK) != 0) - misc_aux_work_queues = ((erts_algnd_misc_aux_work_q_t *) - ((((UWord) misc_aux_work_queues) - & ~ERTS_CACHE_LINE_MASK) - + ERTS_CACHE_LINE_SIZE)); + misc_aux_work_queues = + erts_alloc_permanent_cache_aligned(ERTS_ALC_T_MISC_AUX_WORK_Q, + erts_no_schedulers * + sizeof(erts_algnd_misc_aux_work_q_t)); for (ix = 0; ix < erts_no_schedulers; ix++) { erts_smp_mtx_init_x(&misc_aux_work_queues[ix].data.mtx, @@ -2515,16 +2511,9 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online) n = (int) (mrq ? no_schedulers : 1); - erts_aligned_run_queues = erts_alloc(ERTS_ALC_T_RUNQS, - (sizeof(ErtsAlignedRunQueue)*(n+1))); - if ((((UWord) erts_aligned_run_queues) & ERTS_CACHE_LINE_MASK) != 0) - erts_aligned_run_queues = ((ErtsAlignedRunQueue *) - ((((UWord) erts_aligned_run_queues) - & ~ERTS_CACHE_LINE_MASK) - + ERTS_CACHE_LINE_SIZE)); - - ASSERT((((UWord) erts_aligned_run_queues) & ERTS_CACHE_LINE_MASK) == 0); - + erts_aligned_run_queues = + erts_alloc_permanent_cache_aligned(ERTS_ALC_T_RUNQS, + sizeof(ErtsAlignedRunQueue) * n); #ifdef ERTS_SMP erts_smp_atomic32_init(&no_empty_run_queues, 0); #endif @@ -2619,14 +2608,10 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online) #ifdef ERTS_SMP /* Create and initialize scheduler sleep info */ - aligned_sched_sleep_info = erts_alloc(ERTS_ALC_T_SCHDLR_SLP_INFO, - (sizeof(ErtsAlignedSchedulerSleepInfo) - *(n+1))); - if ((((UWord) aligned_sched_sleep_info) & ERTS_CACHE_LINE_MASK) == 0) - aligned_sched_sleep_info = ((ErtsAlignedSchedulerSleepInfo *) - ((((UWord) aligned_sched_sleep_info) - & ~ERTS_CACHE_LINE_MASK) - + ERTS_CACHE_LINE_SIZE)); + aligned_sched_sleep_info = + erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_SLP_INFO, + n * sizeof(ErtsAlignedSchedulerSleepInfo)); + for (ix = 0; ix < n; ix++) { ErtsSchedulerSleepInfo *ssi = ERTS_SCHED_SLEEP_INFO_IX(ix); #if 0 /* no need to initialize these... */ @@ -2641,16 +2626,9 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online) /* Create and initialize scheduler specific data */ - erts_aligned_scheduler_data = erts_alloc(ERTS_ALC_T_SCHDLR_DATA, - (sizeof(ErtsAlignedSchedulerData) - *(n+1))); - if ((((UWord) erts_aligned_scheduler_data) & ERTS_CACHE_LINE_MASK) != 0) - erts_aligned_scheduler_data = ((ErtsAlignedSchedulerData *) - ((((UWord) erts_aligned_scheduler_data) - & ~ERTS_CACHE_LINE_MASK) - + ERTS_CACHE_LINE_SIZE)); - - ASSERT((((UWord) erts_aligned_scheduler_data) & ERTS_CACHE_LINE_MASK) == 0); + erts_aligned_scheduler_data = + erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA, + n*sizeof(ErtsAlignedSchedulerData)); for (ix = 0; ix < n; ix++) { ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(ix); -- cgit v1.2.3