From 766c7cabb1545418bf59e8dcfcc1a5fae8b01d40 Mon Sep 17 00:00:00 2001 From: Rickard Green Date: Thu, 16 Dec 2010 11:27:23 +0100 Subject: Fix rwlock resource leak when hitting system limit --- erts/emulator/beam/erl_db.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'erts') diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c index 8577354d27..0dfc0e721f 100644 --- a/erts/emulator/beam/erl_db.c +++ b/erts/emulator/beam/erl_db.c @@ -1436,8 +1436,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) "** Too many db tables **\n"); free_heir_data(tb); tb->common.meth->db_free_table(tb); - erts_db_free(ERTS_ALC_T_DB_TABLE, tb, (void *) tb, sizeof(DbTable)); - ERTS_ETS_MISC_MEM_ADD(-sizeof(DbTable)); + db_unref(tb, LCK_NONE); BIF_ERROR(BIF_P, SYSTEM_LIMIT); } -- cgit v1.2.3 From 8b18824e2b13e60fb1a067f80dbb228172f6a3d2 Mon Sep 17 00:00:00 2001 From: Rickard Green Date: Wed, 15 Dec 2010 00:56:56 +0100 Subject: Safe deallocation of ETS-table structures Ensure that all threads potentially accessing an ETS-table have dropped all references to the table before deallocating it. --- erts/emulator/beam/erl_alloc.types | 2 + erts/emulator/beam/erl_db.c | 36 +++++++++- erts/emulator/beam/erl_lock_check.c | 2 + erts/emulator/beam/erl_process.c | 137 ++++++++++++++++++++++++++++++++++++ erts/emulator/beam/erl_process.h | 14 ++-- 5 files changed, 183 insertions(+), 8 deletions(-) (limited to 'erts') diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types index 408ffd12f7..b7b9c6a133 100644 --- a/erts/emulator/beam/erl_alloc.types +++ b/erts/emulator/beam/erl_alloc.types @@ -263,6 +263,8 @@ type XPORTS_LIST SHORT_LIVED SYSTEM extra_port_list type PROC_LCK_WTR LONG_LIVED SYSTEM proc_lock_waiter type PROC_LCK_QS LONG_LIVED SYSTEM proc_lock_queues type RUNQ_BLNS LONG_LIVED SYSTEM run_queue_balancing +type MISC_AUX_WORK_Q LONG_LIVED SYSTEM misc_aux_work_q +type MISC_AUX_WORK SHORT_LIVED SYSTEM misc_aux_work +endif # diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c index 0dfc0e721f..e662b17592 100644 --- a/erts/emulator/beam/erl_db.c +++ b/erts/emulator/beam/erl_db.c @@ -227,10 +227,10 @@ static ERTS_INLINE DbTable* db_ref(DbTable* tb, db_lock_kind_t kind) } return tb; } - -static ERTS_INLINE DbTable* db_unref(DbTable* tb, db_lock_kind_t kind) + +static void +free_dbtable(DbTable* tb) { - if (kind != LCK_READ && !erts_refc_dectest(&tb->common.ref, 0)) { #ifdef HARDDEBUG if (erts_smp_atomic_read(&tb->common.memory_size) != sizeof(DbTable)) { erts_fprintf(stderr, "ets: db_unref memory remain=%ld fix=%x\n", @@ -257,6 +257,36 @@ static ERTS_INLINE DbTable* db_unref(DbTable* tb, db_lock_kind_t kind) ASSERT(is_immed(tb->common.heir_data)); erts_db_free(ERTS_ALC_T_DB_TABLE, tb, (void *) tb, sizeof(DbTable)); ERTS_ETS_MISC_MEM_ADD(-sizeof(DbTable)); +} + +#ifdef ERTS_SMP +static void +chk_free_dbtable(void *vtb) +{ + DbTable * tb = (DbTable *) vtb; + ERTS_THR_MEMORY_BARRIER; + if (erts_refc_dectest(&tb->common.ref, 0) == 0) + free_dbtable(tb); +} +#endif + +static ERTS_INLINE DbTable* db_unref(DbTable* tb, db_lock_kind_t kind) +{ + if (kind != LCK_READ && erts_refc_dectest(&tb->common.ref, 0) == 0) { +#ifdef ERTS_SMP + int scheds = erts_get_max_no_executing_schedulers(); + if (scheds == 1) + free_dbtable(tb); + else { + int refs = scheds - 1; + ASSERT(scheds > 1); + ERTS_THR_MEMORY_BARRIER; + erts_refc_add(&tb->common.ref, refs, refs); + erts_smp_schedule_misc_aux_work(1, scheds, chk_free_dbtable, tb); + } +#else + free_dbtable(tb); +#endif return NULL; } return tb; diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c index 04c7dbd2ec..d298fe8f71 100644 --- a/erts/emulator/beam/erl_lock_check.c +++ b/erts/emulator/beam/erl_lock_check.c @@ -177,6 +177,8 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "async_id", NULL }, { "pix_lock", "address" }, { "run_queues_lists", NULL }, + { "misc_aux_work_queue", "index" }, + { "misc_aux_work_pre_alloc_lock", "address" }, { "sched_stat", NULL }, { "run_queue_sleep_list", "address" }, #endif diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index fc950af8ce..4ec06d8c82 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -586,6 +586,122 @@ erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi, long flags) } } +typedef struct erts_misc_aux_work_t_ erts_misc_aux_work_t; +struct erts_misc_aux_work_t_ { + erts_misc_aux_work_t *next; + void (*func)(void *); + void *arg; +}; + +typedef struct { + erts_smp_mtx_t mtx; + erts_misc_aux_work_t *first; + erts_misc_aux_work_t *last; +} erts_misc_aux_work_q_t; + +typedef union { + erts_misc_aux_work_q_t data; + char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_misc_aux_work_q_t))]; +} erts_algnd_misc_aux_work_q_t; + +static erts_algnd_misc_aux_work_q_t *misc_aux_work_queues; + +ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(misc_aux_work, + erts_misc_aux_work_t, + 200, + ERTS_ALC_T_MISC_AUX_WORK) + +static void +init_misc_aux_work(void) +{ + int ix; + + init_misc_aux_work_alloc(); + + misc_aux_work_queues = erts_alloc(ERTS_ALC_T_MISC_AUX_WORK_Q, + (sizeof(erts_algnd_misc_aux_work_q_t) + *(erts_no_schedulers+1))); + if ((((UWord) misc_aux_work_queues) & ERTS_CACHE_LINE_MASK) != 0) + misc_aux_work_queues = ((erts_algnd_misc_aux_work_q_t *) + ((((UWord) misc_aux_work_queues) + & ~ERTS_CACHE_LINE_MASK) + + ERTS_CACHE_LINE_SIZE)); + + for (ix = 0; ix < erts_no_schedulers; ix++) { + erts_smp_mtx_init_x(&misc_aux_work_queues[ix].data.mtx, + "misc_aux_work_queue", + make_small(ix + 1)); + misc_aux_work_queues[ix].data.first = NULL; + misc_aux_work_queues[ix].data.last = NULL; + } +} + +static void +handle_misc_aux_work(ErtsSchedulerData *esdp) +{ + int ix = (int) esdp->no - 1; + erts_misc_aux_work_t *mawp; + + erts_smp_mtx_lock(&misc_aux_work_queues[ix].data.mtx); + mawp = misc_aux_work_queues[ix].data.first; + misc_aux_work_queues[ix].data.first = NULL; + misc_aux_work_queues[ix].data.last = NULL; + erts_smp_mtx_unlock(&misc_aux_work_queues[ix].data.mtx); + + while (mawp) { + erts_misc_aux_work_t *free_mawp; + mawp->func(mawp->arg); + free_mawp = mawp; + mawp = mawp->next; + misc_aux_work_free(free_mawp); + } +} + +void +erts_smp_schedule_misc_aux_work(int ignore_self, + int max_sched, + void (*func)(void *), + void *arg) +{ + int ix, ignore_ix = -1; + + if (ignore_self) { + ErtsSchedulerData *esdp = erts_get_scheduler_data(); + if (esdp) + ignore_ix = (int) esdp->no - 1; + } + + ASSERT(0 <= max_sched && max_sched <= erts_no_schedulers); + + for (ix = 0; ix < max_sched; ix++) { + long aux_work; + erts_misc_aux_work_t *mawp; + ErtsSchedulerSleepInfo *ssi; + if (ix == ignore_ix) + continue; + + mawp = misc_aux_work_alloc(); + + mawp->func = func; + mawp->arg = arg; + mawp->next = NULL; + + erts_smp_mtx_lock(&misc_aux_work_queues[ix].data.mtx); + if (!misc_aux_work_queues[ix].data.last) + misc_aux_work_queues[ix].data.first = mawp; + else + misc_aux_work_queues[ix].data.last->next = mawp; + misc_aux_work_queues[ix].data.last = mawp; + erts_smp_mtx_unlock(&misc_aux_work_queues[ix].data.mtx); + + ssi = ERTS_SCHED_SLEEP_INFO_IX(ix); + aux_work = erts_smp_atomic_bor(&ssi->aux_work, + ERTS_SSI_AUX_WORK_MISC); + if ((aux_work & ERTS_SSI_AUX_WORK_MISC) == 0) + erts_sched_poke(ssi); + } +} + #ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN void erts_smp_notify_check_children_needed(void) @@ -611,6 +727,12 @@ blockable_aux_work(ErtsSchedulerData *esdp, long aux_work) { if (aux_work & ERTS_SSI_BLOCKABLE_AUX_WORK_MASK) { + if (aux_work & ERTS_SSI_AUX_WORK_MISC) { + aux_work = erts_smp_atomic_band(&ssi->aux_work, + ~ERTS_SSI_AUX_WORK_MISC); + aux_work &= ~ERTS_SSI_AUX_WORK_MISC; + handle_misc_aux_work(esdp); + } #ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN if (aux_work & ERTS_SSI_AUX_WORK_CHECK_CHILDREN) { aux_work = erts_smp_atomic_band(&ssi->aux_work, @@ -2603,6 +2725,8 @@ erts_init_scheduling(int mrq, int no_schedulers, int no_schedulers_online) erts_smp_atomic_init(&doing_sys_schedule, 0); + init_misc_aux_work(); + #else /* !ERTS_SMP */ { ErtsSchedulerData *esdp; @@ -2730,6 +2854,19 @@ resume_process(Process *p) p->rstatus = P_FREE; } +int +erts_get_max_no_executing_schedulers(void) +{ +#ifdef ERTS_SMP + if (erts_smp_atomic_read(&schdlr_sspnd.changing)) + return (int) erts_no_schedulers; + ERTS_THR_MEMORY_BARRIER; + return (int) erts_smp_atomic_read(&schdlr_sspnd.active); +#else + return 1; +#endif +} + #ifdef ERTS_SMP static void diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h index c038e57b65..0215993035 100644 --- a/erts/emulator/beam/erl_process.h +++ b/erts/emulator/beam/erl_process.h @@ -236,16 +236,14 @@ typedef enum { | ERTS_SSI_FLG_WAITING \ | ERTS_SSI_FLG_SUSPENDED) - -#if !defined(ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK) \ - && defined(ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN) #define ERTS_SCHED_NEED_BLOCKABLE_AUX_WORK -#endif #define ERTS_SSI_AUX_WORK_CHECK_CHILDREN (((long) 1) << 0) +#define ERTS_SSI_AUX_WORK_MISC (((long) 1) << 1) #define ERTS_SSI_BLOCKABLE_AUX_WORK_MASK \ - (ERTS_SSI_AUX_WORK_CHECK_CHILDREN) + (ERTS_SSI_AUX_WORK_CHECK_CHILDREN \ + | ERTS_SSI_AUX_WORK_MISC) #define ERTS_SSI_NONBLOCKABLE_AUX_WORK_MASK \ (0) @@ -1034,6 +1032,7 @@ int erts_sched_set_wakeup_limit(char *str); #ifdef DEBUG void erts_dbg_multi_scheduling_return_trap(Process *, Eterm); #endif +int erts_get_max_no_executing_schedulers(void); #ifdef ERTS_SMP ErtsSchedSuspendResult erts_schedulers_state(Uint *, Uint *, Uint *, int); @@ -1048,6 +1047,11 @@ int erts_is_multi_scheduling_blocked(void); Eterm erts_multi_scheduling_blockers(Process *); void erts_start_schedulers(void); void erts_smp_notify_check_children_needed(void); +void +erts_smp_schedule_misc_aux_work(int ignore_self, + int max_sched, + void (*func)(void *), + void *arg); #endif void erts_sched_notify_check_cpu_bind(void); Uint erts_active_schedulers(void); -- cgit v1.2.3