diff options
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r-- | erts/emulator/beam/atom.names | 1 | ||||
-rw-r--r-- | erts/emulator/beam/erl_async.c | 4 | ||||
-rwxr-xr-x[-rw-r--r--] | erts/emulator/beam/erl_bif_info.c | 96 | ||||
-rw-r--r-- | erts/emulator/beam/erl_db_hash.c | 63 | ||||
-rw-r--r-- | erts/emulator/beam/erl_init.c | 32 | ||||
-rw-r--r-- | erts/emulator/beam/erl_lock_count.c | 29 | ||||
-rw-r--r-- | erts/emulator/beam/erl_lock_count.h | 3 | ||||
-rw-r--r-- | erts/emulator/beam/erl_process.c | 329 | ||||
-rw-r--r-- | erts/emulator/beam/erl_process.h | 43 | ||||
-rw-r--r-- | erts/emulator/beam/erl_process_lock.c | 28 | ||||
-rw-r--r-- | erts/emulator/beam/erl_process_lock.h | 2 | ||||
-rwxr-xr-x[-rw-r--r--] | erts/emulator/beam/global.h | 5 | ||||
-rw-r--r-- | erts/emulator/beam/io.c | 48 |
13 files changed, 578 insertions, 105 deletions
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names index 02735d4b68..78c566ed38 100644 --- a/erts/emulator/beam/atom.names +++ b/erts/emulator/beam/atom.names @@ -391,6 +391,7 @@ atom opt atom or atom ordered_set atom orelse +atom os_pid atom os_type atom os_version atom ose_bg_proc diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c index f321ed21aa..cb975d64b0 100644 --- a/erts/emulator/beam/erl_async.c +++ b/erts/emulator/beam/erl_async.c @@ -253,7 +253,9 @@ erts_get_async_ready_queue(Uint sched_id) static ERTS_INLINE void async_add(ErtsAsync *a, ErtsAsyncQ* q) { +#ifdef USE_VM_PROBES int len; +#endif if (is_internal_port(a->port)) { #if ERTS_USE_ASYNC_READY_Q @@ -291,7 +293,9 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q, int saved_fin_deq = 0; ErtsThrQFinDeQ_t fin_deq; #endif +#ifdef USE_VM_PROBES int len; +#endif while (1) { ErtsAsync *a = (ErtsAsync *) erts_thr_q_dequeue(q); diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index f889ccdb93..2373dc7af4 100644..100755 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -2765,7 +2765,8 @@ port_info_1(BIF_ALIST_1) am_id, am_connected, am_input, - am_output + am_output, + am_os_pid }; Eterm items[ASIZE(keys)]; Eterm result = NIL; @@ -2822,6 +2823,7 @@ port_info_1(BIF_ALIST_1) ** name String ** input Number of bytes input from port program ** output Number of bytes output to the port program +** os_pid The child's process ID */ BIF_RETTYPE port_info_2(BIF_ALIST_2) @@ -2922,6 +2924,18 @@ static BIF_RETTYPE port_info(Process* p, Eterm portid, Eterm item) hp = HAlloc(p, hsz); res = erts_bld_uint(&hp, NULL, n); } + else if (item == am_os_pid) { + if (prt->os_pid >= 0) { + Uint hsz = 3; + UWord n = prt->os_pid; + (void) erts_bld_uword(NULL, &hsz, n); + hp = HAlloc(p, hsz); + res = erts_bld_uword(&hp, NULL, n); + } else { + hp = HAlloc(p, 3); + res = am_undefined; + } + } else if (item == am_registered_name) { RegProc *reg; reg = prt->reg; @@ -4110,48 +4124,52 @@ BIF_RETTYPE erts_debug_lock_counters_1(BIF_ALIST_1) Eterm* tp = tuple_val(BIF_ARG_1); switch (arityval(tp[0])) { - case 2: + case 2: { + int opt = 0; + int val = 0; if (ERTS_IS_ATOM_STR("copy_save", tp[1])) { - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); - if (tp[2] == am_true) { - - res = erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_COPYSAVE) ? am_true : am_false; - - } else if (tp[2] == am_false) { - - res = erts_lcnt_clear_rt_opt(ERTS_LCNT_OPT_COPYSAVE) ? am_true : am_false; - - } else { - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); - BIF_ERROR(BIF_P, BADARG); - } - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); - BIF_RET(res); - + opt = ERTS_LCNT_OPT_COPYSAVE; } else if (ERTS_IS_ATOM_STR("process_locks", tp[1])) { - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); - if (tp[2] == am_true) { - - res = erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_PROCLOCK) ? am_true : am_false; - - } else if (tp[2] == am_false) { - - res = erts_lcnt_set_rt_opt(ERTS_LCNT_OPT_PROCLOCK) ? am_true : am_false; + opt = ERTS_LCNT_OPT_PROCLOCK; + } else if (ERTS_IS_ATOM_STR("port_locks", tp[1])) { + opt = ERTS_LCNT_OPT_PORTLOCK; + } else if (ERTS_IS_ATOM_STR("suspend", tp[1])) { + opt = ERTS_LCNT_OPT_SUSPEND; + } else if (ERTS_IS_ATOM_STR("location", tp[1])) { + opt = ERTS_LCNT_OPT_LOCATION; + } else { + BIF_ERROR(BIF_P, BADARG); + } + if (tp[2] == am_true) { + val = 1; + } else if (tp[2] == am_false) { + val = 0; + } else { + BIF_ERROR(BIF_P, BADARG); + } - } else { - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); - BIF_ERROR(BIF_P, BADARG); + erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_smp_thr_progress_block(); + + if (val) { + res = erts_lcnt_set_rt_opt(opt) ? am_true : am_false; + } else { + res = erts_lcnt_clear_rt_opt(opt) ? am_true : am_false; + } +#ifdef ERTS_SMP + if (res != tp[2]) { + if (opt == ERTS_LCNT_OPT_PORTLOCK) { + erts_lcnt_enable_io_lock_count(val); + } else if (opt == ERTS_LCNT_OPT_PROCLOCK) { + erts_lcnt_enable_proc_lock_count(val); } - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); - BIF_RET(res); - } - break; + } +#endif + erts_smp_thr_progress_unblock(); + erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + BIF_RET(res); + break; + } default: break; diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index c726be5fb4..2fea4671e1 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -105,7 +105,20 @@ #define NSEG_2 256 /* Size of second segment table */ #define NSEG_INC 128 /* Number of segments to grow after that */ -#define SEGTAB(tb) ((struct segment**)erts_smp_atomic_read_ddrb(&(tb)->segtab)) +#ifdef ERTS_SMP +# define DB_USING_FINE_LOCKING(TB) (((TB))->common.type & DB_FINE_LOCKED) +#else +# define DB_USING_FINE_LOCKING(TB) 0 +#endif + +#ifdef ETHR_ORDERED_READ_DEPEND +#define SEGTAB(tb) ((struct segment**) erts_smp_atomic_read_nob(&(tb)->segtab)) +#else +#define SEGTAB(tb) \ + (DB_USING_FINE_LOCKING(tb) \ + ? ((struct segment**) erts_smp_atomic_read_ddrb(&(tb)->segtab)) \ + : ((struct segment**) erts_smp_atomic_read_nob(&(tb)->segtab))) +#endif #define NACTIVE(tb) ((int)erts_smp_atomic_read_nob(&(tb)->nactive)) #define NITEMS(tb) ((int)erts_smp_atomic_read_nob(&(tb)->common.nitems)) @@ -122,7 +135,9 @@ */ static ERTS_INLINE Uint hash_to_ix(DbTableHash* tb, HashValue hval) { - Uint mask = erts_smp_atomic_read_acqb(&tb->szm); + Uint mask = (DB_USING_FINE_LOCKING(tb) + ? erts_smp_atomic_read_acqb(&tb->szm) + : erts_smp_atomic_read_nob(&tb->szm)); Uint ix = hval & mask; if (ix >= erts_smp_atomic_read_nob(&tb->nactive)) { ix &= mask>>1; @@ -319,7 +334,10 @@ struct ext_segment { static ERTS_INLINE void SET_SEGTAB(DbTableHash* tb, struct segment** segtab) { - erts_smp_atomic_set_wb(&tb->segtab, (erts_aint_t) segtab); + if (DB_USING_FINE_LOCKING(tb)) + erts_smp_atomic_set_wb(&tb->segtab, (erts_aint_t) segtab); + else + erts_smp_atomic_set_nob(&tb->segtab, (erts_aint_t) segtab); #ifdef VALGRIND tb->top_ptr_to_segment_with_active_segtab = EXTSEG(segtab); #endif @@ -2501,6 +2519,28 @@ static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2, return list; } +static ERTS_INLINE int +begin_resizing(DbTableHash* tb) +{ + if (DB_USING_FINE_LOCKING(tb)) + return !erts_smp_atomic_xchg_acqb(&tb->is_resizing, 1); + else { + if (erts_smp_atomic_read_nob(&tb->is_resizing)) + return 0; + erts_smp_atomic_set_nob(&tb->is_resizing, 1); + return 1; + } +} + +static ERTS_INLINE void +done_resizing(DbTableHash* tb) +{ + if (DB_USING_FINE_LOCKING(tb)) + erts_smp_atomic_set_relb(&tb->is_resizing, 0); + else + erts_smp_atomic_set_nob(&tb->is_resizing, 0); +} + /* Grow table with one new bucket. ** Allocate new segment if needed. */ @@ -2513,9 +2553,8 @@ static void grow(DbTableHash* tb, int nactive) int from_ix; int szm; - if (erts_smp_atomic_xchg_acqb(&tb->is_resizing, 1)) { + if (!begin_resizing(tb)) return; /* already in progress */ - } if (NACTIVE(tb) != nactive) { goto abort; /* already done (race) */ } @@ -2547,9 +2586,12 @@ static void grow(DbTableHash* tb, int nactive) } erts_smp_atomic_inc_nob(&tb->nactive); if (from_ix == 0) { - erts_smp_atomic_set_relb(&tb->szm, szm); + if (DB_USING_FINE_LOCKING(tb)) + erts_smp_atomic_set_relb(&tb->szm, szm); + else + erts_smp_atomic_set_nob(&tb->szm, szm); } - erts_smp_atomic_set_relb(&tb->is_resizing, 0); + done_resizing(tb); /* Finally, let's split the bucket. We try to do it in a smart way to keep link order and avoid unnecessary updates of next-pointers */ @@ -2581,7 +2623,7 @@ static void grow(DbTableHash* tb, int nactive) return; abort: - erts_smp_atomic_set_relb(&tb->is_resizing, 0); + done_resizing(tb); } @@ -2590,9 +2632,8 @@ abort: */ static void shrink(DbTableHash* tb, int nactive) { - if (erts_smp_atomic_xchg_acqb(&tb->is_resizing, 1)) { + if (!begin_resizing(tb)) return; /* already in progress */ - } if (NACTIVE(tb) == nactive) { erts_smp_rwmtx_t* lck; int src_ix = nactive - 1; @@ -2639,7 +2680,7 @@ static void shrink(DbTableHash* tb, int nactive) } /*else already done */ - erts_smp_atomic_set_relb(&tb->is_resizing, 0); + done_resizing(tb); } diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c index ca4385dd3a..7bf8d14708 100644 --- a/erts/emulator/beam/erl_init.c +++ b/erts/emulator/beam/erl_init.c @@ -511,10 +511,14 @@ void erts_usage(void) erts_fprintf(stderr, "-rg amount set reader groups limit\n"); erts_fprintf(stderr, "-sbt type set scheduler bind type, valid types are:\n"); erts_fprintf(stderr, " u|ns|ts|ps|s|nnts|nnps|tnnps|db\n"); + erts_fprintf(stderr, "-sbwt val set scheduler busy wait threshold, valid values are:\n"); + erts_fprintf(stderr, " none|very_short|short|medium|long|very_long.\n"); erts_fprintf(stderr, "-scl bool enable/disable compaction of scheduler load,\n"); erts_fprintf(stderr, " see the erl(1) documentation for more info.\n"); erts_fprintf(stderr, "-sct cput set cpu topology,\n"); erts_fprintf(stderr, " see the erl(1) documentation for more info.\n"); + erts_fprintf(stderr, "-sws val set scheduler wakeup strategy, valid values are:\n"); + erts_fprintf(stderr, " default|legacy|proposal.\n"); erts_fprintf(stderr, "-swt val set scheduler wakeup threshold, valid values are:\n"); erts_fprintf(stderr, " very_low|low|medium|high|very_high.\n"); erts_fprintf(stderr, "-sss size suggested stack size in kilo words for scheduler threads,\n"); @@ -790,6 +794,10 @@ early_init(int *argc, char **argv) /* } } +#ifndef USE_THREADS + erts_async_max_threads = 0; +#endif + #ifdef ERTS_SMP no_schedulers = schdlrs; no_schedulers_online = schdlrs_onln; @@ -1198,6 +1206,16 @@ erl_start(int argc, char **argv) erts_usage(); } } + else if (has_prefix("bwt", sub_param)) { + arg = get_arg(sub_param+3, argv[i+1], &i); + if (erts_sched_set_busy_wait_threshold(arg) != 0) { + erts_fprintf(stderr, "bad scheduler busy wait threshold: %s\n", + arg); + erts_usage(); + } + VERBOSE(DEBUG_SYSTEM, + ("scheduler wakup threshold: %s\n", arg)); + } else if (has_prefix("cl", sub_param)) { arg = get_arg(sub_param+2, argv[i+1], &i); if (sys_strcmp("true", arg) == 0) @@ -1258,13 +1276,23 @@ erl_start(int argc, char **argv) erts_use_sender_punish = 0; else if (sys_strcmp("wt", sub_param) == 0) { arg = get_arg(sub_param+2, argv[i+1], &i); - if (erts_sched_set_wakeup_limit(arg) != 0) { + if (erts_sched_set_wakeup_other_thresold(arg) != 0) { erts_fprintf(stderr, "scheduler wakeup threshold: %s\n", arg); erts_usage(); } VERBOSE(DEBUG_SYSTEM, - ("scheduler wakup threshold: %s\n", arg)); + ("scheduler wakeup threshold: %s\n", arg)); + } + else if (sys_strcmp("ws", sub_param) == 0) { + arg = get_arg(sub_param+2, argv[i+1], &i); + if (erts_sched_set_wakeup_other_type(arg) != 0) { + erts_fprintf(stderr, "scheduler wakeup strategy: %s\n", + arg); + erts_usage(); + } + VERBOSE(DEBUG_SYSTEM, + ("scheduler wakeup threshold: %s\n", arg)); } else if (has_prefix("ss", sub_param)) { /* suggested stack size (Kilo Words) for scheduler threads */ diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c index a36c53560e..741c0cb08e 100644 --- a/erts/emulator/beam/erl_lock_count.c +++ b/erts/emulator/beam/erl_lock_count.c @@ -49,7 +49,7 @@ const char *str_undefined = "undefined"; static ethr_tsd_key lcnt_thr_data_key; static int lcnt_n_thr; -static erts_lcnt_thread_data_t *lcnt_thread_data[1024]; +static erts_lcnt_thread_data_t *lcnt_thread_data[4096]; /* local functions */ @@ -240,7 +240,7 @@ void erts_lcnt_init() { lcnt_lock(); - erts_lcnt_rt_options = ERTS_LCNT_OPT_PROCLOCK; + erts_lcnt_rt_options = ERTS_LCNT_OPT_PROCLOCK | ERTS_LCNT_OPT_LOCATION; eltd = lcnt_thread_data_alloc(); @@ -312,7 +312,7 @@ void erts_lcnt_list_insert(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock) } void erts_lcnt_list_delete(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock) { - + if (lock->next) lock->next->prev = lock->prev; if (lock->prev) lock->prev->next = lock->next; if (list->head == lock) list->head = lock->next; @@ -334,6 +334,10 @@ void erts_lcnt_init_lock(erts_lcnt_lock_t *lock, char *name, Uint16 flag ) { } void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eterm id) { int i; + if (!name) { + lock->flag = 0; + return; + } lcnt_lock(); lock->next = NULL; @@ -363,6 +367,8 @@ void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eter void erts_lcnt_destroy_lock(erts_lcnt_lock_t *lock) { erts_lcnt_lock_t *deleted_lock; + if (!ERTS_LCNT_LOCK_TYPE(lock)) return; + lcnt_lock(); if (erts_lcnt_rt_options & ERTS_LCNT_OPT_COPYSAVE) { @@ -378,6 +384,7 @@ void erts_lcnt_destroy_lock(erts_lcnt_lock_t *lock) { } /* delete original */ erts_lcnt_list_delete(erts_lcnt_data->current_locks, lock); + lock->flag = 0; lcnt_unlock(); } @@ -389,6 +396,7 @@ void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option) { erts_lcnt_thread_data_t *eltd; if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return; + if (!ERTS_LCNT_LOCK_TYPE(lock)) return; eltd = lcnt_get_thread_data(); @@ -422,6 +430,7 @@ void erts_lcnt_lock(erts_lcnt_lock_t *lock) { erts_lcnt_thread_data_t *eltd; if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return; + if (!ERTS_LCNT_LOCK_TYPE(lock)) return; w_state = ethr_atomic_read(&lock->w_state); ethr_atomic_inc( &lock->w_state); @@ -452,6 +461,7 @@ void erts_lcnt_lock_unaquire(erts_lcnt_lock_t *lock) { /* should check if this thread was "waiting" */ if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return; + if (!ERTS_LCNT_LOCK_TYPE(lock)) return; ethr_atomic_dec( &lock->w_state); } @@ -475,6 +485,7 @@ void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line #endif if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return; + if (!ERTS_LCNT_LOCK_TYPE(lock)) return; #ifdef DEBUG if (!(lock->flag & (ERTS_LCNT_LT_RWMUTEX | ERTS_LCNT_LT_RWSPINLOCK))) { @@ -489,9 +500,13 @@ void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line ASSERT(eltd); /* if lock was in conflict, time it */ - - stats = lcnt_get_lock_stats(lock, file, line); + if (erts_lcnt_rt_options & ERTS_LCNT_OPT_LOCATION) { + stats = lcnt_get_lock_stats(lock, file, line); + } else { + stats = &lock->stats[0]; + } + if (eltd->timer_set) { lcnt_time(&timer); @@ -510,6 +525,7 @@ void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line void erts_lcnt_unlock_opt(erts_lcnt_lock_t *lock, Uint16 option) { if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return; + if (!ERTS_LCNT_LOCK_TYPE(lock)) return; if (option & ERTS_LCNT_LO_WRITE) ethr_atomic_dec(&lock->w_state); if (option & ERTS_LCNT_LO_READ ) ethr_atomic_dec(&lock->r_state); } @@ -520,6 +536,7 @@ void erts_lcnt_unlock(erts_lcnt_lock_t *lock) { erts_aint_t flowstate; #endif if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return; + if (!ERTS_LCNT_LOCK_TYPE(lock)) return; #ifdef DEBUG /* flowstate */ flowstate = ethr_atomic_read(&lock->flowstate); @@ -537,6 +554,7 @@ void erts_lcnt_unlock(erts_lcnt_lock_t *lock) { void erts_lcnt_trylock_opt(erts_lcnt_lock_t *lock, int res, Uint16 option) { if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return; + if (!ERTS_LCNT_LOCK_TYPE(lock)) return; /* Determine lock_state via res instead of state */ if (res != EBUSY) { if (option & ERTS_LCNT_LO_WRITE) ethr_atomic_inc(&lock->w_state); @@ -555,6 +573,7 @@ void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res) { erts_aint_t flowstate; #endif if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return; + if (!ERTS_LCNT_LOCK_TYPE(lock)) return; if (res != EBUSY) { #ifdef DEBUG diff --git a/erts/emulator/beam/erl_lock_count.h b/erts/emulator/beam/erl_lock_count.h index 6306580ae4..690551c71f 100644 --- a/erts/emulator/beam/erl_lock_count.h +++ b/erts/emulator/beam/erl_lock_count.h @@ -89,6 +89,7 @@ #define ERTS_LCNT_OPT_LOCATION (((Uint16) 1) << 1) #define ERTS_LCNT_OPT_PROCLOCK (((Uint16) 1) << 2) #define ERTS_LCNT_OPT_COPYSAVE (((Uint16) 1) << 3) +#define ERTS_LCNT_OPT_PORTLOCK (((Uint16) 1) << 4) typedef struct { unsigned long s; @@ -201,5 +202,7 @@ void erts_lcnt_clear_counters(void); char *erts_lcnt_lock_type(Uint16 type); erts_lcnt_data_t *erts_lcnt_get_data(void); +#define ERTS_LCNT_LOCK_TYPE(lockp) ((lockp)->flag & ERTS_LCNT_LT_ALL) + #endif /* ifdef ERTS_ENABLE_LOCK_COUNT */ #endif /* ifndef ERTS_LOCK_COUNT_H__ */ diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index 95d408f79d..09ca536188 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -52,21 +52,22 @@ #define ERTS_SCHED_SPIN_UNTIL_YIELD 100 -#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT 10 +#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT_VERY_LONG 40 +#define ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_VERY_LONG 1000 +#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT_LONG 20 +#define ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_LONG 1000 +#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM 10 +#define ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_MEDIUM 1000 +#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT_SHORT 10 +#define ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_SHORT 0 +#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT_VERY_SHORT 5 +#define ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_VERY_SHORT 0 +#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT_NONE 0 +#define ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_NONE 0 + #define ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT 1000 -#define ERTS_SCHED_TSE_SLEEP_SPINCOUNT \ - (ERTS_SCHED_SYS_SLEEP_SPINCOUNT*ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT) #define ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT 0 -#define ERTS_WAKEUP_OTHER_LIMIT_VERY_HIGH (200*CONTEXT_REDS) -#define ERTS_WAKEUP_OTHER_LIMIT_HIGH (50*CONTEXT_REDS) -#define ERTS_WAKEUP_OTHER_LIMIT_MEDIUM (10*CONTEXT_REDS) -#define ERTS_WAKEUP_OTHER_LIMIT_LOW (CONTEXT_REDS) -#define ERTS_WAKEUP_OTHER_LIMIT_VERY_LOW (CONTEXT_REDS/10) - -#define ERTS_WAKEUP_OTHER_DEC 10 -#define ERTS_WAKEUP_OTHER_FIXED_INC (CONTEXT_REDS/10) - #if 0 || defined(DEBUG) #define ERTS_FAKE_SCHED_BIND_PRINT_SORTED_CPU_DATA #endif @@ -123,14 +124,18 @@ Uint erts_no_schedulers; Uint erts_max_processes = ERTS_DEFAULT_MAX_PROCESSES; Uint erts_process_tab_index_mask; -static int wakeup_other_limit; - int erts_sched_thread_suggested_stack_size = -1; #ifdef ERTS_ENABLE_LOCK_CHECK ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE]; #endif +static struct { + int aux_work; + int tse; + int sys_schedule; +} sched_busy_wait; + #ifdef ERTS_SMP int erts_disable_proc_not_running_opt; @@ -2046,7 +2051,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) erts_smp_runq_unlock(rq); - spincount = ERTS_SCHED_TSE_SLEEP_SPINCOUNT; + spincount = sched_busy_wait.tse; tse_wait: @@ -2097,7 +2102,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) } flgs = sched_prep_cont_spin_wait(ssi); - spincount = ERTS_SCHED_TSE_SLEEP_SPINCOUNT; + spincount = sched_busy_wait.aux_work; if (!(flgs & ERTS_SSI_FLG_WAITING)) { ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING)); @@ -2134,7 +2139,9 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) ASSERT(working); sched_wall_time_change(esdp, working = 0); - spincount = ERTS_SCHED_SYS_SLEEP_SPINCOUNT; + spincount = sched_busy_wait.sys_schedule; + if (spincount == 0) + goto sys_aux_work; while (spincount-- > 0) { @@ -3560,31 +3567,280 @@ erts_debug_nbalance(void) #endif } +/* Wakeup other schedulers */ + +typedef enum { + ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_HIGH, + ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_HIGH, + ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM, + ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_LOW, + ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_LOW +} ErtsSchedWakeupOtherThreshold; + +typedef enum { + ERTS_SCHED_WAKEUP_OTHER_TYPE_PROPOSAL, + ERTS_SCHED_WAKEUP_OTHER_TYPE_LEGACY +} ErtsSchedWakeupOtherType; + +/* First proposal */ + +#define ERTS_WAKEUP_OTHER_LIMIT_VERY_HIGH (200*CONTEXT_REDS) +#define ERTS_WAKEUP_OTHER_LIMIT_HIGH (50*CONTEXT_REDS) +#define ERTS_WAKEUP_OTHER_LIMIT_MEDIUM (10*CONTEXT_REDS) +#define ERTS_WAKEUP_OTHER_LIMIT_LOW (CONTEXT_REDS) +#define ERTS_WAKEUP_OTHER_LIMIT_VERY_LOW (CONTEXT_REDS/10) + +#define ERTS_WAKEUP_OTHER_DEC_SHIFT_VERY_HIGH 3 +#define ERTS_WAKEUP_OTHER_DEC_SHIFT_HIGH 1 +#define ERTS_WAKEUP_OTHER_DEC_SHIFT_MEDIUM 0 +#define ERTS_WAKEUP_OTHER_DEC_SHIFT_LOW -2 +#define ERTS_WAKEUP_OTHER_DEC_SHIFT_VERY_LOW -5 + +#define ERTS_WAKEUP_OTHER_DEC_SHIFT 2 +#define ERTS_WAKEUP_OTHER_FIXED_INC (CONTEXT_REDS/10) + +/* To be legacy */ + +#define ERTS_WAKEUP_OTHER_LIMIT_VERY_HIGH_LEGACY (200*CONTEXT_REDS) +#define ERTS_WAKEUP_OTHER_LIMIT_HIGH_LEGACY (50*CONTEXT_REDS) +#define ERTS_WAKEUP_OTHER_LIMIT_MEDIUM_LEGACY (10*CONTEXT_REDS) +#define ERTS_WAKEUP_OTHER_LIMIT_LOW_LEGACY (CONTEXT_REDS) +#define ERTS_WAKEUP_OTHER_LIMIT_VERY_LOW_LEGACY (CONTEXT_REDS/10) + +#define ERTS_WAKEUP_OTHER_DEC_LEGACY 10 +#define ERTS_WAKEUP_OTHER_FIXED_INC_LEGACY (CONTEXT_REDS/10) + +#ifdef ERTS_SMP + +static struct { + ErtsSchedWakeupOtherThreshold threshold; + ErtsSchedWakeupOtherType type; + int limit; + int dec_shift; + int dec_mask; + void (*check)(ErtsRunQueue *rq); +} wakeup_other; + +static void +wakeup_other_check(ErtsRunQueue *rq) +{ + int wo_reds = rq->wakeup_other_reds; + if (wo_reds) { + int left_len = rq->len - 1; + if (left_len < 1) { + int wo_reduce = wo_reds << wakeup_other.dec_shift; + wo_reduce &= wakeup_other.dec_mask; + rq->wakeup_other -= wo_reduce; + if (rq->wakeup_other < 0) + rq->wakeup_other = 0; + } + else { + rq->wakeup_other += (left_len*wo_reds + + ERTS_WAKEUP_OTHER_FIXED_INC); + if (rq->wakeup_other > wakeup_other.limit) { + int empty_rqs = + erts_smp_atomic32_read_acqb(&no_empty_run_queues); + if (empty_rqs != 0) + wake_scheduler_on_empty_runq(rq); + rq->wakeup_other = 0; + } + } + rq->wakeup_other_reds = 0; + } +} + +static void +wakeup_other_set_limit(void) +{ + switch (wakeup_other.threshold) { + case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_HIGH: + wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_VERY_HIGH; + wakeup_other.dec_shift = ERTS_WAKEUP_OTHER_DEC_SHIFT_VERY_HIGH; + break; + case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_HIGH: + wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_HIGH; + wakeup_other.dec_shift = ERTS_WAKEUP_OTHER_DEC_SHIFT_HIGH; + break; + case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM: + wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_MEDIUM; + wakeup_other.dec_shift = ERTS_WAKEUP_OTHER_DEC_SHIFT_MEDIUM; + break; + case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_LOW: + wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_LOW; + wakeup_other.dec_shift = ERTS_WAKEUP_OTHER_DEC_SHIFT_LOW; + break; + case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_LOW: + wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_VERY_LOW; + wakeup_other.dec_shift = ERTS_WAKEUP_OTHER_DEC_SHIFT_VERY_LOW; + break; + } + if (wakeup_other.dec_shift < 0) + wakeup_other.dec_mask = (1 << (sizeof(wakeup_other.dec_mask)*8 + + wakeup_other.dec_shift)) - 1; + else { + wakeup_other.dec_mask = 0; + wakeup_other.dec_mask = ~wakeup_other.dec_mask; + } +} + +static void +wakeup_other_check_legacy(ErtsRunQueue *rq) +{ + int wo_reds = rq->wakeup_other_reds; + if (wo_reds) { + if (rq->len < 2) { + rq->wakeup_other -= ERTS_WAKEUP_OTHER_DEC_LEGACY*wo_reds; + if (rq->wakeup_other < 0) + rq->wakeup_other = 0; + } + else if (rq->wakeup_other < wakeup_other.limit) + rq->wakeup_other += rq->len*wo_reds + ERTS_WAKEUP_OTHER_FIXED_INC_LEGACY; + else { + if (erts_smp_atomic32_read_acqb(&no_empty_run_queues) != 0) { + wake_scheduler_on_empty_runq(rq); + rq->wakeup_other = 0; + } + rq->wakeup_other = 0; + } + } + rq->wakeup_other_reds = 0; +} + +static void +wakeup_other_set_limit_legacy(void) +{ + switch (wakeup_other.threshold) { + case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_HIGH: + wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_VERY_HIGH_LEGACY; + break; + case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_HIGH: + wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_HIGH_LEGACY; + break; + case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM: + wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_MEDIUM_LEGACY; + break; + case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_LOW: + wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_LOW_LEGACY; + break; + case ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_LOW: + wakeup_other.limit = ERTS_WAKEUP_OTHER_LIMIT_VERY_LOW_LEGACY; + break; + } +} + +static void +set_wakeup_other_data(void) +{ + switch (wakeup_other.type) { + case ERTS_SCHED_WAKEUP_OTHER_TYPE_PROPOSAL: + wakeup_other.check = wakeup_other_check; + wakeup_other_set_limit(); + break; + case ERTS_SCHED_WAKEUP_OTHER_TYPE_LEGACY: + wakeup_other.check = wakeup_other_check_legacy; + wakeup_other_set_limit_legacy(); + break; + } +} + +#endif + void erts_early_init_scheduling(int no_schedulers) { aux_work_timeout_early_init(no_schedulers); - wakeup_other_limit = ERTS_WAKEUP_OTHER_LIMIT_MEDIUM; +#ifdef ERTS_SMP + wakeup_other.threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM; + wakeup_other.type = ERTS_SCHED_WAKEUP_OTHER_TYPE_LEGACY; +#endif + sched_busy_wait.sys_schedule = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM; + sched_busy_wait.tse = (ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM + * ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT); + sched_busy_wait.aux_work = (ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM + * ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_MEDIUM); } int -erts_sched_set_wakeup_limit(char *str) +erts_sched_set_wakeup_other_thresold(char *str) { + ErtsSchedWakeupOtherThreshold threshold; if (sys_strcmp(str, "very_high") == 0) - wakeup_other_limit = ERTS_WAKEUP_OTHER_LIMIT_VERY_HIGH; + threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_HIGH; else if (sys_strcmp(str, "high") == 0) - wakeup_other_limit = ERTS_WAKEUP_OTHER_LIMIT_HIGH; + threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_HIGH; else if (sys_strcmp(str, "medium") == 0) - wakeup_other_limit = ERTS_WAKEUP_OTHER_LIMIT_MEDIUM; + threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM; else if (sys_strcmp(str, "low") == 0) - wakeup_other_limit = ERTS_WAKEUP_OTHER_LIMIT_LOW; + threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_LOW; else if (sys_strcmp(str, "very_low") == 0) - wakeup_other_limit = ERTS_WAKEUP_OTHER_LIMIT_VERY_LOW; + threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_LOW; + else + return EINVAL; +#ifdef ERTS_SMP + wakeup_other.threshold = threshold; + set_wakeup_other_data(); +#endif + return 0; +} + +int +erts_sched_set_wakeup_other_type(char *str) +{ + ErtsSchedWakeupOtherType type; + if (sys_strcmp(str, "proposal") == 0) + type = ERTS_SCHED_WAKEUP_OTHER_TYPE_PROPOSAL; + else if (sys_strcmp(str, "default") == 0) + type = ERTS_SCHED_WAKEUP_OTHER_TYPE_LEGACY; + else if (sys_strcmp(str, "legacy") == 0) + type = ERTS_SCHED_WAKEUP_OTHER_TYPE_LEGACY; else return EINVAL; +#ifdef ERTS_SMP + wakeup_other.type = type; +#endif return 0; } +int +erts_sched_set_busy_wait_threshold(char *str) +{ + int sys_sched; + int aux_work_fact; + + if (sys_strcmp(str, "very_long") == 0) { + sys_sched = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_VERY_LONG; + aux_work_fact = ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_VERY_LONG; + } + else if (sys_strcmp(str, "long") == 0) { + sys_sched = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_LONG; + aux_work_fact = ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_LONG; + } + else if (sys_strcmp(str, "medium") == 0) { + sys_sched = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM; + aux_work_fact = ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_MEDIUM; + } + else if (sys_strcmp(str, "short") == 0) { + sys_sched = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_SHORT; + aux_work_fact = ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_SHORT; + } + else if (sys_strcmp(str, "very_short") == 0) { + sys_sched = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_VERY_SHORT; + aux_work_fact = ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_VERY_SHORT; + } + else if (sys_strcmp(str, "none") == 0) { + sys_sched = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_NONE; + aux_work_fact = ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_NONE; + } + else { + return EINVAL; + } + + sched_busy_wait.sys_schedule = sys_sched; + sched_busy_wait.tse = sys_sched*ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT; + sched_busy_wait.aux_work = sys_sched*aux_work_fact; + + return 0; +} static void init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp) { @@ -3613,6 +3869,10 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online) init_misc_op_list_alloc(); +#ifdef ERTS_SMP + set_wakeup_other_data(); +#endif + ASSERT(no_schedulers_online <= no_schedulers); ASSERT(no_schedulers_online >= 1); ASSERT(no_schedulers >= 1); @@ -6502,26 +6762,7 @@ Process *schedule(Process *p, int calls) exec_misc_ops(rq); #ifdef ERTS_SMP - { - int wo_reds = rq->wakeup_other_reds; - if (wo_reds) { - if (rq->len < 2) { - rq->wakeup_other -= ERTS_WAKEUP_OTHER_DEC*wo_reds; - if (rq->wakeup_other < 0) - rq->wakeup_other = 0; - } - else if (rq->wakeup_other < wakeup_other_limit) - rq->wakeup_other += rq->len*wo_reds + ERTS_WAKEUP_OTHER_FIXED_INC; - else { - if (erts_smp_atomic32_read_acqb(&no_empty_run_queues) != 0) { - wake_scheduler_on_empty_runq(rq); - rq->wakeup_other = 0; - } - rq->wakeup_other = 0; - } - } - rq->wakeup_other_reds = 0; - } + wakeup_other.check(rq); #endif /* diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h index cff0783bc4..28aaedf2e2 100644 --- a/erts/emulator/beam/erl_process.h +++ b/erts/emulator/beam/erl_process.h @@ -1096,7 +1096,9 @@ ErtsProcList *erts_proclist_create(Process *); void erts_proclist_destroy(ErtsProcList *); int erts_proclist_same(ErtsProcList *, Process *); -int erts_sched_set_wakeup_limit(char *str); +int erts_sched_set_wakeup_other_thresold(char *str); +int erts_sched_set_wakeup_other_type(char *str); +int erts_sched_set_busy_wait_threshold(char *str); #if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) int erts_dbg_check_halloc_lock(Process *p); @@ -1400,10 +1402,14 @@ ERTS_GLB_INLINE Eterm erts_get_current_pid(void); ERTS_GLB_INLINE Uint erts_get_scheduler_id(void); ERTS_GLB_INLINE ErtsRunQueue *erts_get_runq_proc(Process *p); ERTS_GLB_INLINE ErtsRunQueue *erts_get_runq_current(ErtsSchedulerData *esdp); +#ifndef ERTS_ENABLE_LOCK_COUNT ERTS_GLB_INLINE void erts_smp_runq_lock(ErtsRunQueue *rq); +#endif ERTS_GLB_INLINE int erts_smp_runq_trylock(ErtsRunQueue *rq); ERTS_GLB_INLINE void erts_smp_runq_unlock(ErtsRunQueue *rq); +#ifndef ERTS_ENABLE_LOCK_COUNT ERTS_GLB_INLINE void erts_smp_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq); +#endif ERTS_GLB_INLINE void erts_smp_xrunq_unlock(ErtsRunQueue *rq, ErtsRunQueue *xrq); ERTS_GLB_INLINE void erts_smp_runqs_lock(ErtsRunQueue *rq1, ErtsRunQueue *rq2); ERTS_GLB_INLINE void erts_smp_runqs_unlock(ErtsRunQueue *rq1, ErtsRunQueue *rq2); @@ -1492,6 +1498,12 @@ erts_get_runq_current(ErtsSchedulerData *esdp) #endif } +#ifdef ERTS_ENABLE_LOCK_COUNT + +#define erts_smp_runq_lock(rq) erts_smp_mtx_lock_x(&(rq)->mtx, __FILE__, __LINE__) + +#else + ERTS_GLB_INLINE void erts_smp_runq_lock(ErtsRunQueue *rq) { @@ -1500,6 +1512,8 @@ erts_smp_runq_lock(ErtsRunQueue *rq) #endif } +#endif + ERTS_GLB_INLINE int erts_smp_runq_trylock(ErtsRunQueue *rq) { @@ -1518,6 +1532,31 @@ erts_smp_runq_unlock(ErtsRunQueue *rq) #endif } +#ifdef ERTS_ENABLE_LOCK_COUNT + +#define erts_smp_xrunq_lock(rq, xrq) erts_smp_xrunq_lock_x((rq), (xrq), __FILE__, __LINE__) + +ERTS_GLB_INLINE void +erts_smp_xrunq_lock_x(ErtsRunQueue *rq, ErtsRunQueue *xrq, char* file, int line) +{ +#ifdef ERTS_SMP + ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&rq->mtx)); + if (xrq != rq) { + if (erts_smp_mtx_trylock(&xrq->mtx) == EBUSY) { + if (rq < xrq) + erts_smp_mtx_lock_x(&xrq->mtx, file, line); + else { + erts_smp_mtx_unlock(&rq->mtx); + erts_smp_mtx_lock_x(&xrq->mtx, file, line); + erts_smp_mtx_lock_x(&rq->mtx, file, line); + } + } + } +#endif +} + +#else + ERTS_GLB_INLINE void erts_smp_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq) { @@ -1537,6 +1576,8 @@ erts_smp_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq) #endif } +#endif + ERTS_GLB_INLINE void erts_smp_xrunq_unlock(ErtsRunQueue *rq, ErtsRunQueue *xrq) { diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c index a5a753b798..b3b4601a31 100644 --- a/erts/emulator/beam/erl_process_lock.c +++ b/erts/emulator/beam/erl_process_lock.c @@ -1002,7 +1002,7 @@ erts_proc_lock_init(Process *p) #ifdef ERTS_ENABLE_LOCK_COUNT void erts_lcnt_proc_lock_init(Process *p) { - + if (erts_lcnt_rt_options & ERTS_LCNT_OPT_PROCLOCK) { if (p->id != ERTS_INVALID_PID) { erts_lcnt_init_lock_x(&(p->lock.lcnt_main), "proc_main", ERTS_LCNT_LT_PROCLOCK, p->id); erts_lcnt_init_lock_x(&(p->lock.lcnt_msgq), "proc_msgq", ERTS_LCNT_LT_PROCLOCK, p->id); @@ -1014,6 +1014,12 @@ void erts_lcnt_proc_lock_init(Process *p) { erts_lcnt_init_lock(&(p->lock.lcnt_link), "proc_link", ERTS_LCNT_LT_PROCLOCK); erts_lcnt_init_lock(&(p->lock.lcnt_status), "proc_status", ERTS_LCNT_LT_PROCLOCK); } + } else { + sys_memzero(&(p->lock.lcnt_main), sizeof(p->lock.lcnt_main)); + sys_memzero(&(p->lock.lcnt_msgq), sizeof(p->lock.lcnt_msgq)); + sys_memzero(&(p->lock.lcnt_link), sizeof(p->lock.lcnt_link)); + sys_memzero(&(p->lock.lcnt_status), sizeof(p->lock.lcnt_status)); + } } @@ -1108,6 +1114,26 @@ void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res } } + +void erts_lcnt_enable_proc_lock_count(int enable) { + int i; + + for (i = 0; i < erts_max_processes; ++i) { + Process* p = process_tab[i]; + if (p) { + if (enable) { + if (!ERTS_LCNT_LOCK_TYPE(&(p->lock.lcnt_main))) { + erts_lcnt_proc_lock_init(p); + } + } else { + if (ERTS_LCNT_LOCK_TYPE(&(p->lock.lcnt_main))) { + erts_lcnt_proc_lock_destroy(p); + } + } + } + } +} + #endif /* ifdef ERTS_ENABLE_LOCK_COUNT */ diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h index 8dbdaccc68..413c45480c 100644 --- a/erts/emulator/beam/erl_process_lock.h +++ b/erts/emulator/beam/erl_process_lock.h @@ -215,6 +215,8 @@ void erts_lcnt_proc_lock_unaquire(erts_proc_lock_t *lock, ErtsProcLocks locks); void erts_lcnt_proc_unlock(erts_proc_lock_t *lock, ErtsProcLocks locks); void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res); +void erts_lcnt_enable_proc_lock_count(int enable); + #endif /* ERTS_ENABLE_LOCK_COUNT*/ diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h index b000e2c5d4..894872dbc0 100644..100755 --- a/erts/emulator/beam/global.h +++ b/erts/emulator/beam/global.h @@ -173,6 +173,7 @@ struct port { char *name; /* String used in the open */ erts_driver_t* drv_ptr; UWord drv_data; + SWord os_pid; /* Child process ID */ ErtsProcList *suspended; /* List of suspended processes. */ LineBuf *linebuf; /* Buffer to hold data not ready for process to get (line oriented I/O)*/ @@ -1187,6 +1188,10 @@ void erts_fire_port_monitor(Port *prt, Eterm ref); void erts_smp_xports_unlock(Port *); #endif +#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT) +void erts_lcnt_enable_io_lock_count(int enable); +#endif + #if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) int erts_lc_is_port_locked(Port *); #endif diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c index 8a2a43bebd..204bff299e 100644 --- a/erts/emulator/beam/io.c +++ b/erts/emulator/beam/io.c @@ -440,6 +440,7 @@ setup_port(Port* prt, Eterm pid, erts_driver_t *driver, sys_strcpy(new_name, name); erts_smp_runq_lock(runq); erts_smp_port_state_lock(prt); + prt->os_pid = -1; prt->status = ERTS_PORT_SFLG_CONNECTED | xstatus; prt->snapshot = erts_smp_atomic32_read_nob(&erts_ports_snapshot); old_name = prt->name; @@ -625,7 +626,11 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */ port->lock = erts_alloc(ERTS_ALC_T_PORT_LOCK, sizeof(erts_smp_mtx_t)); erts_smp_mtx_init_x(port->lock, +#ifdef ERTS_ENABLE_LOCK_COUNT + (erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK) ? "port_lock" : NULL, +#else "port_lock", +#endif port->id); xstatus |= ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK; } @@ -783,7 +788,13 @@ driver_create_port(ErlDrvPort creator_port_ix, /* Creating port */ creator_port->xports = xplp; port->lock = erts_alloc(ERTS_ALC_T_PORT_LOCK, sizeof(erts_smp_mtx_t)); - erts_smp_mtx_init_locked_x(port->lock, "port_lock", port_id); + erts_smp_mtx_init_locked_x(port->lock, +#ifdef ERTS_ENABLE_LOCK_COUNT + (erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK) ? "port_lock" : NULL, +#else + "port_lock", +#endif + port_id); xstatus |= ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK; } @@ -1347,7 +1358,13 @@ void init_io(void) erts_smp_atomic_init_nob(&erts_port[i].refc, 0); erts_port[i].lock = NULL; erts_port[i].xports = NULL; - erts_smp_spinlock_init_x(&erts_port[i].state_lck, "port_state", make_small(i)); + erts_smp_spinlock_init_x(&erts_port[i].state_lck, +#ifdef ERTS_ENABLE_LOCK_COUNT + (erts_lcnt_rt_options & ERTS_LCNT_OPT_PORTLOCK) ? "port_state" : NULL, +#else + "port_state", +#endif + make_small(0)); #endif erts_port[i].tracer_proc = NIL; erts_port[i].trace_flags = 0; @@ -1380,6 +1397,27 @@ void init_io(void) erts_smp_mtx_unlock(&erts_driver_list_lock); } +#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP) +void erts_lcnt_enable_io_lock_count(int enable) { + int i; + + for (i = 0; i < erts_max_ports; i++) { + Port* p = &erts_port[i]; + if (enable) { + erts_lcnt_init_lock_x(&p->state_lck.lcnt, "port_state", ERTS_LCNT_LT_SPINLOCK, make_small(i)); + if (p->lock) { + erts_lcnt_init_lock_x(&p->lock->lcnt, "port_lock", ERTS_LCNT_LT_MUTEX, make_small(i)); + } + } else { + erts_lcnt_destroy_lock(&p->state_lck.lcnt); + if (p->lock) { + erts_lcnt_destroy_lock(&p->lock->lcnt); + } + } + } +} +#endif + /* * Buffering of data when using line oriented I/O on ports */ @@ -3222,6 +3260,8 @@ driver_deliver_term(ErlDrvPort port, Uint size = ptr[1]; Uint offset = ptr[2]; + erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) size); + if (size <= ERL_ONHEAP_BIN_LIMIT) { ErlHeapBin* hbp = (ErlHeapBin *) hp; hp += heap_bin_size(size); @@ -3253,6 +3293,9 @@ driver_deliver_term(ErlDrvPort port, case ERL_DRV_BUF2BINARY: { /* char*, size */ byte *bufp = (byte *) ptr[0]; Uint size = (Uint) ptr[1]; + + erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) size); + if (size <= ERL_ONHEAP_BIN_LIMIT) { ErlHeapBin* hbp = (ErlHeapBin *) hp; hp += heap_bin_size(size); @@ -3289,6 +3332,7 @@ driver_deliver_term(ErlDrvPort port, } case ERL_DRV_STRING: /* char*, length */ + erts_smp_atomic_add_nob(&erts_bytes_in, (erts_aint_t) ptr[1]); mess = buf_to_intlist(&hp, (char*)ptr[0], ptr[1], NIL); ptr += 2; break; |