diff options
Diffstat (limited to 'erts/emulator')
41 files changed, 1364 insertions, 429 deletions
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c index 1026e5f649..8bfb7d2ad2 100644 --- a/erts/emulator/beam/beam_emu.c +++ b/erts/emulator/beam/beam_emu.c @@ -3503,6 +3503,7 @@ get_map_elements_fail: * I[0]: &&call_nif * I[1]: Function pointer to NIF function * I[2]: Pointer to erl_module_nif + * I[3]: Function pointer to dirty NIF */ BifFunction vbf; @@ -3523,13 +3524,6 @@ get_map_elements_fail: reg[0] = r(0); nif_bif_result = (*fp)(&env, bif_nif_arity, reg); erts_post_nif(&env); -#ifdef ERTS_DIRTY_SCHEDULERS - if (is_non_value(nif_bif_result) && c_p->freason == TRAP) { - Export* ep = ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(c_p); - ep->code[0] = I[-3]; - ep->code[1] = I[-2]; - } -#endif } ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(nif_bif_result)); PROCESS_MAIN_CHK_LOCKS(c_p); diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c index e96177cfd9..cfc6146b0a 100644 --- a/erts/emulator/beam/beam_load.c +++ b/erts/emulator/beam/beam_load.c @@ -2363,7 +2363,11 @@ load_code(LoaderState* stp) if (stp->may_load_nif) { const int finfo_ix = ci - FUNC_INFO_SZ; - enum { MIN_FUNC_SZ = 3 }; +#ifdef ERTS_DIRTY_SCHEDULERS + enum { MIN_FUNC_SZ = 4 }; +#else + enum { MIN_FUNC_SZ = 3 }; +#endif if (finfo_ix - last_func_start < MIN_FUNC_SZ && last_func_start) { /* Must make room for call_nif op */ int pad = MIN_FUNC_SZ - (finfo_ix - last_func_start); diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c index fcbeb6cf5c..a5be8e1529 100644 --- a/erts/emulator/beam/bif.c +++ b/erts/emulator/beam/bif.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 1996-2013. All Rights Reserved. + * Copyright Ericsson AB 1996-2014. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -1869,6 +1869,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) { } else if (is_external_pid(to)) { dep = external_pid_dist_entry(to); if(dep == erts_this_dist_entry) { +#if DEBUG erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf(); erts_dsprintf(dsbufp, "Discarding message %T from %T to %T in an old " @@ -1879,6 +1880,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) { external_pid_creation(to), erts_this_node->creation); erts_send_error_to_logger(p->group_leader, dsbufp); +#endif return 0; } return remote_send(p, dep, to, to, msg, suspend); @@ -1912,6 +1914,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) { } else if (is_external_port(to) && (external_port_dist_entry(to) == erts_this_dist_entry)) { +#if DEBUG erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf(); erts_dsprintf(dsbufp, "Discarding message %T from %T to %T in an old " @@ -1922,6 +1925,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) { external_port_creation(to), erts_this_node->creation); erts_send_error_to_logger(p->group_leader, dsbufp); +#endif return 0; } else if (is_internal_port(to)) { int ret_val; @@ -2887,9 +2891,6 @@ static int do_list_to_integer(Process *p, Eterm orig_list, res = big_plus_small(res, m, hp); } - if (is_big(res)) /* check if small */ - res = big_plus_small(res, 0, hp); /* includes conversion to small */ - if (neg) { if (is_small(res)) res = make_small(-signed_val(res)); @@ -2899,8 +2900,12 @@ static int do_list_to_integer(Process *p, Eterm orig_list, } } - if (is_big(res)) { - hp += (big_arity(res)+1); + if (is_not_small(res)) { + res = big_plus_small(res, 0, hp); /* includes conversion to small */ + + if (is_not_small(res)) { + hp += (big_arity(res)+1); + } } HRelease(p,hp_end,hp); } diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab index 011e49f1fe..e68b8e6274 100644 --- a/erts/emulator/beam/bif.tab +++ b/erts/emulator/beam/bif.tab @@ -601,6 +601,10 @@ bif maps:values/1 bif erts_internal:cmp_term/2 # +# New in 17.1. +# +bif erlang:fun_info_mfa/1 +# # Obsolete # diff --git a/erts/emulator/beam/big.c b/erts/emulator/beam/big.c index 41a041eba6..a8710dd910 100644 --- a/erts/emulator/beam/big.c +++ b/erts/emulator/beam/big.c @@ -274,6 +274,9 @@ _b = _b << _s; \ _vn1 = _b >> H_EXP; \ _vn0 = _b & LO_MASK; \ + /* Sometimes _s is 0 which triggers undefined behaviour for the \ + (_a0>>(D_EXP-_s)) shift, but this is ok because the \ + & -s will make it all to 0 later anyways. */ \ _un32 = (_a1 << _s) | ((_a0>>(D_EXP-_s)) & (-_s >> (D_EXP-1))); \ _un10 = _a0 << _s; \ _un1 = _un10 >> H_EXP; \ @@ -1506,13 +1509,15 @@ Eterm uword_to_big(UWord x, Eterm *y) */ Eterm small_to_big(Sint x, Eterm *y) { + Uint xu; if (x >= 0) { + xu = x; *y = make_pos_bignum_header(1); } else { - x = -x; + xu = -(Uint)x; *y = make_neg_bignum_header(1); } - BIG_DIGIT(y, 0) = x; + BIG_DIGIT(y, 0) = xu; return make_big(y); } @@ -1540,21 +1545,24 @@ Eterm erts_uint64_to_big(Uint64 x, Eterm **hpp) Eterm erts_sint64_to_big(Sint64 x, Eterm **hpp) { Eterm *hp = *hpp; + Uint64 ux; int neg; - if (x >= 0) + if (x >= 0) { neg = 0; + ux = x; + } else { neg = 1; - x = -x; + ux = -(Uint64)x; } #if defined(ARCH_32) || HALFWORD_HEAP - if (x >= (((Uint64) 1) << 32)) { + if (ux >= (((Uint64) 1) << 32)) { if (neg) *hp = make_neg_bignum_header(2); else *hp = make_pos_bignum_header(2); - BIG_DIGIT(hp, 0) = (Uint) (x & ((Uint) 0xffffffff)); - BIG_DIGIT(hp, 1) = (Uint) ((x >> 32) & ((Uint) 0xffffffff)); + BIG_DIGIT(hp, 0) = (Uint) (ux & ((Uint) 0xffffffff)); + BIG_DIGIT(hp, 1) = (Uint) ((ux >> 32) & ((Uint) 0xffffffff)); *hpp += 3; } else @@ -1564,7 +1572,7 @@ Eterm erts_sint64_to_big(Sint64 x, Eterm **hpp) *hp = make_neg_bignum_header(1); else *hp = make_pos_bignum_header(1); - BIG_DIGIT(hp, 0) = (Uint) x; + BIG_DIGIT(hp, 0) = (Uint) ux; *hpp += 2; } return make_big(hp); @@ -2667,9 +2675,6 @@ Eterm erts_chars_to_integer(Process *BIF_P, char *bytes, res = big_plus_small(res, m, hp); } - if (is_big(res)) /* check if small */ - res = big_plus_small(res, 0, hp); /* includes conversion to small */ - if (neg) { if (is_small(res)) res = make_small(-signed_val(res)); @@ -2679,8 +2684,12 @@ Eterm erts_chars_to_integer(Process *BIF_P, char *bytes, } } - if (is_big(res)) { - hp += (big_arity(res) + 1); + if (is_not_small(res)) { + res = big_plus_small(res, 0, hp); /* includes conversion to small */ + + if (is_not_small(res)) { + hp += (big_arity(res) + 1); + } } HRelease(BIF_P, hp_end, hp); goto bytebuf_to_integer_1_done; diff --git a/erts/emulator/beam/big.h b/erts/emulator/beam/big.h index d80111822e..da31876d75 100644 --- a/erts/emulator/beam/big.h +++ b/erts/emulator/beam/big.h @@ -101,7 +101,7 @@ typedef Uint dsize_t; /* Vector size type */ #define ERTS_SINT64_HEAP_SIZE(X) \ (IS_SSMALL((X)) \ ? 0 \ - : ERTS_UINT64_BIG_HEAP_SIZE__((X) >= 0 ? (X) : -(X))) + : ERTS_UINT64_BIG_HEAP_SIZE__((X) >= 0 ? (X) : -(Uint64)(X))) #define ERTS_UINT64_HEAP_SIZE(X) \ (IS_USMALL(0, (X)) ? 0 : ERTS_UINT64_BIG_HEAP_SIZE__((X))) diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c index 7d4f52ee23..08265b590d 100644 --- a/erts/emulator/beam/break.c +++ b/erts/emulator/beam/break.c @@ -256,6 +256,7 @@ print_process_info(int to, void *to_arg, Process *p) p->current[1], p->current[2]); } + erts_print(to, to_arg, "Run queue: %d\n", erts_get_runq_proc(p)->ix); erts_print(to, to_arg, "Spawned by: %T\n", p->parent); approx_started = (time_t) p->approx_started; diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c index 05ac24e04d..90cd227fae 100644 --- a/erts/emulator/beam/erl_alloc.c +++ b/erts/emulator/beam/erl_alloc.c @@ -1873,8 +1873,8 @@ erts_alc_fatal_error(int error, int func, ErtsAlcType_t n, ...) size = va_arg(argp, Uint); va_end(argp); erl_exit(1, - "%s: Cannot %s %lu bytes of memory (of type \"%s\").\n", - allctr_str, op, size, t_str); + "%s: Cannot %s %lu bytes of memory (of type \"%s\", thread %d).\n", + allctr_str, op, size, t_str, ERTS_ALC_GET_THR_IX()); break; } case ERTS_ALC_E_NOALLCTR: diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c index 45f0cc4312..a4e164bf51 100644 --- a/erts/emulator/beam/erl_alloc_util.c +++ b/erts/emulator/beam/erl_alloc_util.c @@ -3274,6 +3274,15 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags) ASSERT(!(flags & CFLG_FORCE_MSEG && flags & CFLG_FORCE_SYS_ALLOC)); + if (umem_sz > (ERTS_UINT_MAX - ERTS_UINT_MAX/100)) { + /* Do an overly conservative _overflow_ check here so we don't + * have to deal with it from here on. I guess we could be more accurate + * but I don't think the need to allocate over 99% of the address space + * will ever arise on any machine, neither 32 nor 64 bit. + */ + return NULL; + } + blk_sz = UMEMSZ2BLKSZ(allctr, umem_sz); #ifdef ERTS_SMP diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c index 7e0e825a0d..3bf78adce7 100644 --- a/erts/emulator/beam/erl_bif_binary.c +++ b/erts/emulator/beam/erl_bif_binary.c @@ -1324,9 +1324,9 @@ static int parse_match_opts_list(Eterm l, Eterm bin, Uint *posp, Uint *endp) goto badarg; } if (len < 0) { - Sint lentmp = -len; + Uint lentmp = -(Uint)len; /* overflow */ - if (lentmp == len || lentmp < 0 || -lentmp != len) { + if ((Sint)lentmp < 0) { goto badarg; } len = lentmp; @@ -1555,9 +1555,9 @@ BIF_RETTYPE erts_binary_part(Process *p, Eterm binary, Eterm epos, Eterm elen) goto badarg; } if (len < 0) { - Sint lentmp = -len; + Uint lentmp = -(Uint)len; /* overflow */ - if (lentmp == len || lentmp < 0 || -lentmp != len) { + if ((Sint)lentmp < 0) { goto badarg; } len = lentmp; @@ -1644,9 +1644,9 @@ BIF_RETTYPE erts_gc_binary_part(Process *p, Eterm *reg, Eterm live, int range_is goto badarg; } if (len < 0) { - Sint lentmp = -len; + Uint lentmp = -(Uint)len; /* overflow */ - if (lentmp == len || lentmp < 0 || -lentmp != len) { + if ((Sint)lentmp < 0) { goto badarg; } len = lentmp; @@ -2213,9 +2213,9 @@ static BIF_RETTYPE binary_bin_to_list_common(Process *p, goto badarg; } if (len < 0) { - Sint lentmp = -len; + Uint lentmp = -(Uint)len; /* overflow */ - if (lentmp == len || lentmp < 0 || -lentmp != len) { + if ((Sint)lentmp < 0) { goto badarg; } len = lentmp; diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index 4d5e55aaf5..6efe9d9550 100644 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -3055,6 +3055,25 @@ fun_info_2(BIF_ALIST_2) return TUPLE2(hp, what, val); } +BIF_RETTYPE +fun_info_mfa_1(BIF_ALIST_1) +{ + Process* p = BIF_P; + Eterm fun = BIF_ARG_1; + Eterm* hp; + + if (is_fun(fun)) { + ErlFunThing* funp = (ErlFunThing *) fun_val(fun); + hp = HAlloc(p, 4); + BIF_RET(TUPLE3(hp,funp->fe->module,funp->fe->address[-2],make_small(funp->arity))); + } else if (is_export(fun)) { + Export* exp = (Export *) ((UWord) (export_val(fun))[1]); + hp = HAlloc(p, 4); + BIF_RET(TUPLE3(hp,exp->code[0],exp->code[1],make_small(exp->code[2]))); + } + BIF_ERROR(p, BADARG); +} + BIF_RETTYPE is_process_alive_1(BIF_ALIST_1) { if(is_internal_pid(BIF_ARG_1)) { @@ -3856,16 +3875,19 @@ static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_s Uint tries = 0, colls = 0; unsigned long timer_s = 0, timer_ns = 0, timer_n = 0; unsigned int line = 0; + unsigned int i; Eterm af, uil; Eterm uit, uic; Eterm uits, uitns, uitn; Eterm tt, tstat, tloc, t; + Eterm thist, vhist[ERTS_LCNT_HISTOGRAM_SLOT_SIZE]; /* term: - * [{{file, line}, {tries, colls, {seconds, nanoseconds, n_blocks}}}] + * [{{file, line}, {tries, colls, {seconds, nanoseconds, n_blocks}}, + * { .. histogram .. }] */ - + tries = (Uint) ethr_atomic_read(&stats->tries); colls = (Uint) ethr_atomic_read(&stats->colls); @@ -3874,23 +3896,27 @@ static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_s timer_ns = stats->timer.ns; timer_n = stats->timer_n; - af = erts_atom_put(stats->file, strlen(stats->file), ERTS_ATOM_ENC_LATIN1, 1); + af = erts_atom_put((byte *)stats->file, strlen(stats->file), ERTS_ATOM_ENC_LATIN1, 1); uil = erts_bld_uint( hpp, szp, line); tloc = erts_bld_tuple(hpp, szp, 2, af, uil); - uit = erts_bld_uint( hpp, szp, tries); - uic = erts_bld_uint( hpp, szp, colls); - + uit = erts_bld_uint( hpp, szp, tries); + uic = erts_bld_uint( hpp, szp, colls); + uits = erts_bld_uint( hpp, szp, timer_s); uitns = erts_bld_uint( hpp, szp, timer_ns); uitn = erts_bld_uint( hpp, szp, timer_n); tt = erts_bld_tuple(hpp, szp, 3, uits, uitns, uitn); tstat = erts_bld_tuple(hpp, szp, 3, uit, uic, tt); - - t = erts_bld_tuple(hpp, szp, 2, tloc, tstat); - - res = erts_bld_cons( hpp, szp, t, res); + + for(i = 0; i < ERTS_LCNT_HISTOGRAM_SLOT_SIZE; i++) { + vhist[i] = erts_bld_uint(hpp, szp, stats->hist.ns[i]); + } + thist = erts_bld_tuplev(hpp, szp, ERTS_LCNT_HISTOGRAM_SLOT_SIZE, vhist); + + t = erts_bld_tuple(hpp, szp, 3, tloc, tstat, thist); + res = erts_bld_cons( hpp, szp, t, res); return res; } @@ -3911,13 +3937,13 @@ static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_t *lock ASSERT(ltype); - type = erts_atom_put(ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1); - name = erts_atom_put(lock->name, strlen(lock->name), ERTS_ATOM_ENC_LATIN1, 1); + type = erts_atom_put((byte *)ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1); + name = erts_atom_put((byte *)lock->name, strlen(lock->name), ERTS_ATOM_ENC_LATIN1, 1); if (lock->flag & ERTS_LCNT_LT_ALLOC) { /* use allocator types names as id's for allocator locks */ ltype = (char *) ERTS_ALC_A2AD(signed_val(lock->id)); - id = erts_atom_put(ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1); + id = erts_atom_put((byte *)ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1); } else if (lock->flag & ERTS_LCNT_LT_PROCLOCK) { /* use registered names as id's for process locks if available */ proc = erts_proc_lookup(lock->id); @@ -3928,16 +3954,15 @@ static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_t *lock id = lock->id; } } else { - id = lock->id; + id = lock->id; } - + for (i = 0; i < lock->n_stats; i++) { stats = lcnt_build_lock_stats_term(hpp, szp, &(lock->stats[i]), stats); } - - t = erts_bld_tuple(hpp, szp, 4, name, id, type, stats); - - res = erts_bld_cons( hpp, szp, t, res); + + t = erts_bld_tuple(hpp, szp, 4, name, id, type, stats); + res = erts_bld_cons( hpp, szp, t, res); return res; } @@ -3957,12 +3982,12 @@ static Eterm lcnt_build_result_term(Eterm **hpp, Uint *szp, erts_lcnt_data_t *da dtns = erts_bld_uint( hpp, szp, data->duration.ns); tdt = erts_bld_tuple(hpp, szp, 2, dts, dtns); - adur = erts_atom_put(str_duration, strlen(str_duration), ERTS_ATOM_ENC_LATIN1, 1); + adur = erts_atom_put((byte *)str_duration, strlen(str_duration), ERTS_ATOM_ENC_LATIN1, 1); tdur = erts_bld_tuple(hpp, szp, 2, adur, tdt); /* lock tuple */ - aloc = erts_atom_put(str_locks, strlen(str_locks), ERTS_ATOM_ENC_LATIN1, 1); + aloc = erts_atom_put((byte *)str_locks, strlen(str_locks), ERTS_ATOM_ENC_LATIN1, 1); for (lock = data->current_locks->head; lock != NULL ; lock = lock->next ) { lloc = lcnt_build_lock_term(hpp, szp, lock, lloc); diff --git a/erts/emulator/beam/erl_binary.h b/erts/emulator/beam/erl_binary.h index 6c9f53ce87..06dfeb1260 100644 --- a/erts/emulator/beam/erl_binary.h +++ b/erts/emulator/beam/erl_binary.h @@ -236,6 +236,8 @@ erts_bin_drv_alloc_fnf(Uint size) { Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD; void *res; + if (bsize < size) /* overflow */ + return NULL; res = erts_alloc_fnf(ERTS_ALC_T_DRV_BINARY, bsize); ERTS_CHK_BIN_ALIGNMENT(res); return (Binary *) res; @@ -246,6 +248,8 @@ erts_bin_drv_alloc(Uint size) { Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD; void *res; + if (bsize < size) /* overflow */ + erts_alloc_enomem(ERTS_ALC_T_DRV_BINARY, size); res = erts_alloc(ERTS_ALC_T_DRV_BINARY, bsize); ERTS_CHK_BIN_ALIGNMENT(res); return (Binary *) res; @@ -257,6 +261,8 @@ erts_bin_nrml_alloc(Uint size) { Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD; void *res; + if (bsize < size) /* overflow */ + erts_alloc_enomem(ERTS_ALC_T_BINARY, size); res = erts_alloc(ERTS_ALC_T_BINARY, bsize); ERTS_CHK_BIN_ALIGNMENT(res); return (Binary *) res; @@ -267,11 +273,12 @@ erts_bin_realloc_fnf(Binary *bp, Uint size) { Binary *nbp; Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD; + ErtsAlcType_t type = (bp->flags & BIN_FLAG_DRV) ? ERTS_ALC_T_DRV_BINARY + : ERTS_ALC_T_BINARY; ASSERT((bp->flags & BIN_FLAG_MAGIC) == 0); - if (bp->flags & BIN_FLAG_DRV) - nbp = erts_realloc_fnf(ERTS_ALC_T_DRV_BINARY, (void *) bp, bsize); - else - nbp = erts_realloc_fnf(ERTS_ALC_T_BINARY, (void *) bp, bsize); + if (bsize < size) /* overflow */ + return NULL; + nbp = erts_realloc_fnf(type, (void *) bp, bsize); ERTS_CHK_BIN_ALIGNMENT(nbp); return nbp; } @@ -281,17 +288,14 @@ erts_bin_realloc(Binary *bp, Uint size) { Binary *nbp; Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD; + ErtsAlcType_t type = (bp->flags & BIN_FLAG_DRV) ? ERTS_ALC_T_DRV_BINARY + : ERTS_ALC_T_BINARY; ASSERT((bp->flags & BIN_FLAG_MAGIC) == 0); - if (bp->flags & BIN_FLAG_DRV) - nbp = erts_realloc_fnf(ERTS_ALC_T_DRV_BINARY, (void *) bp, bsize); - else - nbp = erts_realloc_fnf(ERTS_ALC_T_BINARY, (void *) bp, bsize); + if (bsize < size) /* overflow */ + erts_realloc_enomem(type, bp, size); + nbp = erts_realloc_fnf(type, (void *) bp, bsize); if (!nbp) - erts_realloc_n_enomem(ERTS_ALC_T2N(bp->flags & BIN_FLAG_DRV - ? ERTS_ALC_T_DRV_BINARY - : ERTS_ALC_T_BINARY), - bp, - bsize); + erts_realloc_enomem(type, bp, bsize); ERTS_CHK_BIN_ALIGNMENT(nbp); return nbp; } @@ -312,6 +316,7 @@ erts_create_magic_binary(Uint size, void (*destructor)(Binary *)) { Uint bsize = ERTS_MAGIC_BIN_SIZE(size); Binary* bptr = erts_alloc_fnf(ERTS_ALC_T_BINARY, bsize); + ASSERT(bsize > size); if (!bptr) erts_alloc_n_enomem(ERTS_ALC_T2N(ERTS_ALC_T_BINARY), bsize); ERTS_CHK_BIN_ALIGNMENT(bptr); diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c index aa15d2cc57..0db42d4325 100644 --- a/erts/emulator/beam/erl_gc.c +++ b/erts/emulator/beam/erl_gc.c @@ -2018,6 +2018,20 @@ setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset) roots[n].sz = 1; n++; } + + /* + * If a NIF has saved arguments, they need to be added + */ + if (ERTS_PROC_GET_NIF_TRAP_EXPORT(p)) { + Eterm* argv; + int argc; + if (erts_setup_nif_gc(p, &argv, &argc)) { + roots[n].v = argv; + roots[n].sz = argc; + n++; + } + } + ASSERT(n <= rootset->size); mp = p->msg.first; diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c index 5e6d812242..88c4006934 100644 --- a/erts/emulator/beam/erl_init.c +++ b/erts/emulator/beam/erl_init.c @@ -2066,8 +2066,10 @@ erl_exit_vv(int n, int flush_async, char *fmt, va_list args1, va_list args2) system_cleanup(flush_async); save_statistics(); - - an = abs(n); + if (n < 0) + an = -(unsigned int)n; + else + an = n; if (erts_mtrace_enabled) erts_mtrace_exit((Uint32) an); diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c index c13eb87012..b105ece6f1 100644 --- a/erts/emulator/beam/erl_lock_check.c +++ b/erts/emulator/beam/erl_lock_check.c @@ -139,7 +139,6 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "drv_tsd", NULL }, { "async_enq_mtx", NULL }, #ifdef ERTS_SMP - { "sys_msg_q", NULL }, { "atom_tab", NULL }, { "make_ref", NULL }, { "misc_op_list_pre_alloc_lock", "address" }, @@ -148,6 +147,7 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "btm_pre_alloc_lock", NULL, }, { "dist_entry_out_queue", "address" }, { "port_sched_lock", "port_id" }, + { "sys_msg_q", NULL }, { "port_table", NULL }, #endif { "mtrace_op", NULL }, @@ -227,8 +227,7 @@ rw_op_str(Uint16 flags) case ERTS_LC_FLG_LO_READ: return " (r)"; case ERTS_LC_FLG_LO_WRITE: - erts_fprintf(stderr, "\nInternal error\n"); - lc_abort(); + ERTS_INTERNAL_ERROR("Only write flag present"); default: break; } @@ -311,8 +310,7 @@ static ERTS_INLINE void lc_free(void *p) static void *lc_core_alloc(void) { lc_unlock(); - erts_fprintf(stderr, "Lock checker out of memory!\n"); - lc_abort(); + ERTS_INTERNAL_ERROR("Lock checker out of memory!\n"); } #else @@ -325,8 +323,7 @@ static void *lc_core_alloc(void) fbs = (erts_lc_free_block_t *) malloc(sizeof(erts_lc_free_block_t) * ERTS_LC_FB_CHUNK_SIZE); if (!fbs) { - erts_fprintf(stderr, "Lock checker failed to allocate memory!\n"); - lc_abort(); + ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!"); } for (i = 1; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) { #ifdef DEBUG @@ -366,11 +363,11 @@ create_locked_locks(char *thread_name) { erts_lc_locked_locks_t *l_lcks = malloc(sizeof(erts_lc_locked_locks_t)); if (!l_lcks) - lc_abort(); + ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!"); l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown"); if (!l_lcks->thread_name) - lc_abort(); + ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!"); l_lcks->emu_thread = 0; l_lcks->tid = erts_thr_self(); @@ -691,7 +688,7 @@ erts_lc_set_thread_name(char *thread_name) free((void *) l_lcks->thread_name); l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown"); if (!l_lcks->thread_name) - lc_abort(); + ERTS_INTERNAL_ERROR("strdup failed"); } l_lcks->emu_thread = 1; } @@ -1330,7 +1327,7 @@ erts_lc_init(void) #endif /* #ifdef ERTS_LC_STATIC_ALLOC */ if (ethr_spinlock_init(&free_blocks_lock) != 0) - lc_abort(); + ERTS_INTERNAL_ERROR("spinlock_init failed"); erts_tsd_key_create(&locks_key,"erts_lock_check_key"); } diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c index 6f44bf097b..cf6996ea06 100644 --- a/erts/emulator/beam/erl_lock_count.c +++ b/erts/emulator/beam/erl_lock_count.c @@ -61,6 +61,25 @@ static ERTS_INLINE void lcnt_unlock(void) { ethr_mutex_unlock(&lcnt_data_lock); } +const int log2_tab64[64] = { + 63, 0, 58, 1, 59, 47, 53, 2, + 60, 39, 48, 27, 54, 33, 42, 3, + 61, 51, 37, 40, 49, 18, 28, 20, + 55, 30, 34, 11, 43, 14, 22, 4, + 62, 57, 46, 52, 38, 26, 32, 41, + 50, 36, 17, 19, 29, 10, 13, 21, + 56, 45, 25, 31, 35, 16, 9, 12, + 44, 24, 15, 8, 23, 7, 6, 5}; + +static ERTS_INLINE int lcnt_log2(Uint64 v) { + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v |= v >> 32; + return log2_tab64[((Uint64)((v - (v >> 1))*0x07EDD5E59A4E28C2)) >> 58]; +} static char* lcnt_lock_type(Uint16 flag) { switch(flag & ERTS_LCNT_LT_ALL) { @@ -81,19 +100,20 @@ static void lcnt_clear_stats(erts_lcnt_lock_stats_t *stats) { stats->timer_n = 0; stats->file = (char *)str_undefined; stats->line = 0; + sys_memzero(stats->hist.ns, sizeof(stats->hist.ns)); } static void lcnt_time(erts_lcnt_time_t *time) { -#ifdef HAVE_GETHRTIME +#if 0 || defined(HAVE_GETHRTIME) SysHrTime hr_time; hr_time = sys_gethrtime(); time->s = (unsigned long)(hr_time / 1000000000LL); time->ns = (unsigned long)(hr_time - 1000000000LL*time->s); -#else - SysTimeval tv; - sys_gettimeofday(&tv); - time->s = tv.tv_sec; - time->ns = tv.tv_usec*1000LL; +#else + SysTimeval tv; + sys_gettimeofday(&tv); + time->s = tv.tv_sec; + time->ns = tv.tv_usec*1000LL; #endif } @@ -111,28 +131,29 @@ static void lcnt_time_diff(erts_lcnt_time_t *d, erts_lcnt_time_t *t1, erts_lcnt_ dns += 1000000000LL; } + ASSERT(ds >= 0); + d->s = ds; d->ns = dns; } -/* difference d must be positive */ +/* difference d must be non-negative */ static void lcnt_time_add(erts_lcnt_time_t *t, erts_lcnt_time_t *d) { - unsigned long ngns = 0; - t->s += d->s; t->ns += d->ns; - ngns = t->ns / 1000000000LL; + t->s += t->ns / 1000000000LL; t->ns = t->ns % 1000000000LL; - - t->s += ngns; } static erts_lcnt_thread_data_t *lcnt_thread_data_alloc(void) { erts_lcnt_thread_data_t *eltd; eltd = (erts_lcnt_thread_data_t*)malloc(sizeof(erts_lcnt_thread_data_t)); + if (!eltd) { + ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!"); + } eltd->timer_set = 0; eltd->lock_in_conflict = 0; @@ -158,59 +179,64 @@ static char* lock_opt(Uint16 flag) { return "--"; } -static void print_lock_x(erts_lcnt_lock_t *lock, Uint16 flag, char *action, char *extra) { - erts_aint_t colls, tries, w_state, r_state; - erts_lcnt_lock_stats_t *stats = NULL; - +static void print_lock_x(erts_lcnt_lock_t *lock, Uint16 flag, char *action) { + erts_aint_t w_state, r_state; char *type; - int i; - + + if (strcmp(lock->name, "run_queue") != 0) return; type = lcnt_lock_type(lock->flag); r_state = ethr_atomic_read(&lock->r_state); w_state = ethr_atomic_read(&lock->w_state); - if (lock->flag & flag) { - erts_printf("%20s [%30s] [r/w state %4ld/%4ld] id %T %s\r\n", - action, - lock->name, - r_state, - w_state, - lock->id, - extra); + erts_fprintf(stderr,"%10s [%24s] [r/w state %4ld/%4ld] %2s id %T\r\n", + action, + lock->name, + r_state, + w_state, + type, + lock->id); } } - -static void print_lock(erts_lcnt_lock_t *lock, char *action) { - if (strcmp(lock->name, "proc_main") == 0) { - print_lock_x(lock, ERTS_LCNT_LT_ALL, action, ""); - } -} - #endif static erts_lcnt_lock_stats_t *lcnt_get_lock_stats(erts_lcnt_lock_t *lock, char *file, unsigned int line) { unsigned int i; erts_lcnt_lock_stats_t *stats = NULL; - - for (i = 0; i < lock->n_stats; i++) { - if ((lock->stats[i].file == file) && (lock->stats[i].line == line)) { - return &(lock->stats[i]); - } - } - if (lock->n_stats < ERTS_LCNT_MAX_LOCK_LOCATIONS) { - stats = &lock->stats[lock->n_stats]; - lock->n_stats++; - stats->file = file; - stats->line = line; - return stats; + if (erts_lcnt_rt_options & ERTS_LCNT_OPT_LOCATION) { + for (i = 0; i < lock->n_stats; i++) { + if ((lock->stats[i].file == file) && (lock->stats[i].line == line)) { + return &(lock->stats[i]); + } + } + if (lock->n_stats < ERTS_LCNT_MAX_LOCK_LOCATIONS) { + stats = &lock->stats[lock->n_stats]; + lock->n_stats++; + stats->file = file; + stats->line = line; + return stats; + } } return &lock->stats[0]; +} +static void lcnt_update_stats_hist(erts_lcnt_hist_t *hist, erts_lcnt_time_t *time_wait) { + int idx; + unsigned long r; + + if (time_wait->s > 0 || time_wait->ns > ERTS_LCNT_HISTOGRAM_MAX_NS) { + idx = ERTS_LCNT_HISTOGRAM_SLOT_SIZE - 1; + } else { + r = time_wait->ns >> ERTS_LCNT_HISTOGRAM_RSHIFT; + if (r) idx = lcnt_log2(r); + else idx = 0; + } + hist->ns[idx]++; } -static void lcnt_update_stats(erts_lcnt_lock_stats_t *stats, int lock_in_conflict, erts_lcnt_time_t *time_wait) { +static void lcnt_update_stats(erts_lcnt_lock_stats_t *stats, int lock_in_conflict, + erts_lcnt_time_t *time_wait) { ethr_atomic_inc(&stats->tries); @@ -220,6 +246,7 @@ static void lcnt_update_stats(erts_lcnt_lock_stats_t *stats, int lock_in_conflic if (time_wait) { lcnt_time_add(&(stats->timer), time_wait); stats->timer_n++; + lcnt_update_stats_hist(&stats->hist,time_wait); } } @@ -248,6 +275,9 @@ void erts_lcnt_init() { /* init lcnt structure */ erts_lcnt_data = (erts_lcnt_data_t*)malloc(sizeof(erts_lcnt_data_t)); + if (!erts_lcnt_data) { + ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!"); + } erts_lcnt_data->current_locks = erts_lcnt_list_init(); erts_lcnt_data->deleted_locks = erts_lcnt_list_init(); @@ -269,6 +299,9 @@ erts_lcnt_lock_list_t *erts_lcnt_list_init(void) { erts_lcnt_lock_list_t *list; list = (erts_lcnt_lock_list_t*)malloc(sizeof(erts_lcnt_lock_list_t)); + if (!list) { + ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!"); + } list->head = NULL; list->tail = NULL; list->n = 0; @@ -330,8 +363,9 @@ void erts_lcnt_list_delete(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock) /* interface to erl_threads.h */ /* only lock on init and destroy, all others should use atomics */ void erts_lcnt_init_lock(erts_lcnt_lock_t *lock, char *name, Uint16 flag ) { - erts_lcnt_init_lock_x(lock, name, flag, am_undefined); + erts_lcnt_init_lock_x(lock, name, flag, NIL); } + void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eterm id) { int i; if (!name) { @@ -360,7 +394,6 @@ void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eter } erts_lcnt_list_insert(erts_lcnt_data->current_locks, lock); - lcnt_unlock(); } @@ -375,6 +408,9 @@ void erts_lcnt_destroy_lock(erts_lcnt_lock_t *lock) { /* copy structure and insert the copy */ deleted_lock = (erts_lcnt_lock_t*)malloc(sizeof(erts_lcnt_lock_t)); + if (!deleted_lock) { + ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!"); + } memcpy(deleted_lock, lock, sizeof(erts_lcnt_lock_t)); deleted_lock->next = NULL; @@ -417,8 +453,9 @@ void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option) { if ((w_state > 0) || (r_state > 0)) { eltd->lock_in_conflict = 1; - if (eltd->timer_set == 0) + if (eltd->timer_set == 0) { lcnt_time(&eltd->timer); + } eltd->timer_set++; } else { eltd->lock_in_conflict = 0; @@ -433,7 +470,7 @@ void erts_lcnt_lock(erts_lcnt_lock_t *lock) { if (!ERTS_LCNT_LOCK_TYPE(lock)) return; w_state = ethr_atomic_read(&lock->w_state); - ethr_atomic_inc( &lock->w_state); + ethr_atomic_inc(&lock->w_state); eltd = lcnt_get_thread_data(); @@ -446,10 +483,10 @@ void erts_lcnt_lock(erts_lcnt_lock_t *lock) { * 'atomicly'. All other locks will block the thread if w_state > 0 * i.e. locked. */ - if (eltd->timer_set == 0) + if (eltd->timer_set == 0) { lcnt_time(&eltd->timer); + } eltd->timer_set++; - } else { eltd->lock_in_conflict = 0; } @@ -459,11 +496,10 @@ void erts_lcnt_lock(erts_lcnt_lock_t *lock) { void erts_lcnt_lock_unaquire(erts_lcnt_lock_t *lock) { /* should check if this thread was "waiting" */ - if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return; if (!ERTS_LCNT_LOCK_TYPE(lock)) return; - ethr_atomic_dec( &lock->w_state); + ethr_atomic_dec(&lock->w_state); } /* erts_lcnt_lock_post @@ -491,7 +527,7 @@ void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line if (!(lock->flag & (ERTS_LCNT_LT_RWMUTEX | ERTS_LCNT_LT_RWSPINLOCK))) { flowstate = ethr_atomic_read(&lock->flowstate); ASSERT(flowstate == 0); - ethr_atomic_inc( &lock->flowstate); + ethr_atomic_inc(&lock->flowstate); } #endif @@ -500,19 +536,12 @@ void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line ASSERT(eltd); /* if lock was in conflict, time it */ - - if (erts_lcnt_rt_options & ERTS_LCNT_OPT_LOCATION) { - stats = lcnt_get_lock_stats(lock, file, line); - } else { - stats = &lock->stats[0]; - } - + stats = lcnt_get_lock_stats(lock, file, line); if (eltd->timer_set) { lcnt_time(&timer); lcnt_time_diff(&time_wait, &timer, &(eltd->timer)); lcnt_update_stats(stats, eltd->lock_in_conflict, &time_wait); - eltd->timer_set--; ASSERT(eltd->timer_set >= 0); } else { @@ -541,11 +570,11 @@ void erts_lcnt_unlock(erts_lcnt_lock_t *lock) { /* flowstate */ flowstate = ethr_atomic_read(&lock->flowstate); ASSERT(flowstate == 1); - ethr_atomic_dec( &lock->flowstate); + ethr_atomic_dec(&lock->flowstate); /* write state */ w_state = ethr_atomic_read(&lock->w_state); - ASSERT(w_state > 0) + ASSERT(w_state > 0); #endif ethr_atomic_dec(&lock->w_state); } @@ -582,9 +611,7 @@ void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res) { ethr_atomic_inc( &lock->flowstate); #endif ethr_atomic_inc(&lock->w_state); - lcnt_update_stats(&(lock->stats[0]), 0, NULL); - } else { ethr_atomic_inc(&lock->stats[0].tries); ethr_atomic_inc(&lock->stats[0].colls); diff --git a/erts/emulator/beam/erl_lock_count.h b/erts/emulator/beam/erl_lock_count.h index 75f7cd028b..ffbb93da1b 100644 --- a/erts/emulator/beam/erl_lock_count.h +++ b/erts/emulator/beam/erl_lock_count.h @@ -35,6 +35,10 @@ * | | | - collisions (including trylock busy) * | | | - timer (time spent in waiting for lock) * | | | - n_timer (collisions excluding trylock busy) + * | | | - histogram + * | | | | - # 0 = log2(lock wait_time ns) + * | | | | - ... + * | | | | - # n = log2(lock wait_time ns) * * Each instance of a lock is the unique lock, i.e. set and id in that set. * For each lock there is a set of statistics with where and what impact @@ -68,8 +72,17 @@ #include "ethread.h" +#define ERTS_LCNT_MAX_LOCK_LOCATIONS (10) -#define ERTS_LCNT_MAX_LOCK_LOCATIONS (10) +/* histogram */ +#define ERTS_LCNT_HISTOGRAM_MAX_NS (((unsigned long)1LL << 28) - 1) +#if 0 || defined(HAVE_GETHRTIME) +#define ERTS_LCNT_HISTOGRAM_SLOT_SIZE (30) +#define ERTS_LCNT_HISTOGRAM_RSHIFT (0) +#else +#define ERTS_LCNT_HISTOGRAM_SLOT_SIZE (20) +#define ERTS_LCNT_HISTOGRAM_RSHIFT (10) +#endif #define ERTS_LCNT_LT_SPINLOCK (((Uint16) 1) << 0) #define ERTS_LCNT_LT_RWSPINLOCK (((Uint16) 1) << 1) @@ -104,6 +117,10 @@ typedef struct { extern erts_lcnt_time_t timer_start; +typedef struct { + Uint32 ns[ERTS_LCNT_HISTOGRAM_SLOT_SIZE]; /* log2 array of nano seconds occurences */ +} erts_lcnt_hist_t; + typedef struct erts_lcnt_lock_stats_s { /* "tries" and "colls" needs to be atomic since * trylock busy does not aquire a lock and there @@ -118,6 +135,7 @@ typedef struct erts_lcnt_lock_stats_s { unsigned long timer_n; /* #times waited for lock */ erts_lcnt_time_t timer; /* total wait time for lock */ + erts_lcnt_hist_t hist; } erts_lcnt_lock_stats_t; /* rw locks uses both states, other locks only uses w_state */ diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c index 59a677a12c..8870fac7d9 100644 --- a/erts/emulator/beam/erl_message.c +++ b/erts/emulator/beam/erl_message.c @@ -415,7 +415,13 @@ erts_queue_dist_message(Process *rcvr, if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ)) erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); - erts_proc_notify_new_message(rcvr); + erts_proc_notify_new_message(rcvr, +#ifdef ERTS_SMP + *rcvr_locks +#else + 0 +#endif + ); } } @@ -542,7 +548,13 @@ queue_message(Process *c_p, if (locked_msgq) erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); - erts_proc_notify_new_message(receiver); + erts_proc_notify_new_message(receiver, +#ifdef ERTS_SMP + *receiver_locks +#else + 0 +#endif + ); #ifndef ERTS_SMP ERTS_HOLE_CHECK(receiver); diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c index ff551ea3af..1caea6dcf8 100644 --- a/erts/emulator/beam/erl_nif.c +++ b/erts/emulator/beam/erl_nif.c @@ -472,6 +472,18 @@ int enif_inspect_binary(ErlNifEnv* env, Eterm bin_term, ErlNifBinary* bin) struct enif_tmp_obj_t* tmp; byte* raw_ptr; }u; + + if (is_boxed(bin_term) && *binary_val(bin_term) == HEADER_SUB_BIN) { + ErlSubBin* sb = (ErlSubBin*) binary_val(bin_term); + if (sb->is_writable) { + ProcBin* pb = (ProcBin*) binary_val(sb->orig); + ASSERT(pb->thing_word == HEADER_PROC_BIN); + if (pb->flags) { + erts_emasculate_writable_binary(pb); + sb->is_writable = 0; + } + } + } u.tmp = NULL; bin->data = erts_get_aligned_binary_bytes_extra(bin_term, &u.raw_ptr, allocator, sizeof(struct enif_tmp_obj_t)); @@ -1513,72 +1525,251 @@ int enif_consume_timeslice(ErlNifEnv* env, int percent) return ERTS_BIF_REDS_LEFT(env->proc) == 0; } -#ifdef ERTS_DIRTY_SCHEDULERS - -/* NIFs exports need one more item than the Export struct provides, the - * erl_module_nif*, so the DirtyNifExport below adds that. The Export - * member must be first in the struct. +/* + * NIF exports need a few more items than the Export struct provides, + * including the erl_module_nif* and a NIF function pointer, so the + * NifExport below adds those. The Export member must be first in the + * struct. The saved_mfa, saved_argc, nif_level, alloced_argv_sz and argv + * members are used to track the MFA and arguments of the top NIF in case a + * chain of one or more enif_schedule_nif() calls results in an exception, + * since in that case the original MFA and registers have to be restored + * before returning to Erlang to ensure stacktrace information associated + * with the exception is correct. */ +typedef ERL_NIF_TERM (*NativeFunPtr)(ErlNifEnv*, int, const ERL_NIF_TERM[]); + typedef struct { Export exp; struct erl_module_nif* m; -} DirtyNifExport; + NativeFunPtr fp; + Eterm saved_mfa[3]; + int saved_argc; + int alloced_argv_sz; + Eterm argv[1]; +} NifExport; -static void -alloc_proc_psd(Process* proc, DirtyNifExport **ep) +/* + * If a process has saved arguments, they need to be part of the GC + * rootset. The function below is called from setup_rootset() in + * erl_gc.c. This function is declared in erl_process.h. + */ +int +erts_setup_nif_gc(Process* proc, Eterm** objv, int* nobj) { + NifExport* ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); + int gc = (ep && ep->saved_argc > 0); + + if (gc) { + *objv = ep->argv; + *nobj = ep->saved_argc; + } + return gc; +} + +/* + * Allocate a NifExport and set it in proc specific data + */ +static NifExport* +allocate_nif_sched_data(Process* proc, int argc) +{ + NifExport* ep; + size_t argv_extra, total; int i; - if (!*ep) { - *ep = erts_alloc(ERTS_ALC_T_PSD, sizeof(DirtyNifExport)); - sys_memset((void*) *ep, 0, sizeof(DirtyNifExport)); - for (i=0; i<ERTS_NUM_CODE_IX; i++) { - (*ep)->exp.addressv[i] = &(*ep)->exp.code[3]; - } - (*ep)->exp.code[3] = (BeamInstr) em_call_nif; + + argv_extra = argc > 1 ? sizeof(Eterm)*(argc-1) : 0; + total = sizeof(NifExport) + argv_extra; + ep = erts_alloc(ERTS_ALC_T_PSD, total); + sys_memset((void*) ep, 0, total); + ep->alloced_argv_sz = argc; + for (i=0; i<ERTS_NUM_CODE_IX; i++) { + ep->exp.addressv[i] = &ep->exp.code[3]; } - (void) ERTS_PROC_SET_DIRTY_SCHED_TRAP_EXPORT(proc, ERTS_PROC_LOCK_MAIN, &(*ep)->exp); + ep->exp.code[3] = (BeamInstr) em_call_nif; + (void) ERTS_PROC_SET_NIF_TRAP_EXPORT(proc, ERTS_PROC_LOCK_MAIN, &ep->exp); + return ep; } +/* + * Initialize a NifExport struct. Create it if needed and store it in the + * proc. The direct_fp function is what will be invoked by op_call_nif, and + * the indirect_fp function, if not NULL, is what the direct_fp function + * will call. If the allocated NifExport isn't enough to hold all of argv, + * allocate a larger one. Save MFA and registers only if the need_save + * parameter is true. + */ static ERL_NIF_TERM -execute_dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +init_nif_sched_data(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp, + int need_save, int argc, const ERL_NIF_TERM argv[]) { - Eterm* reg = ERTS_PROC_GET_SCHDATA(env->proc)->x_reg_array; - ERL_NIF_TERM result, dirty_result = (ERL_NIF_TERM) reg[0]; - typedef ERL_NIF_TERM (*FinalizerFP)(ErlNifEnv*, ERL_NIF_TERM); - FinalizerFP fp; -#if HAVE_INT64 && SIZEOF_LONG != 8 - ASSERT(sizeof(fp) <= sizeof(ErlNifUInt64)); - enif_get_uint64(env, reg[1], (ErlNifUInt64 *) &fp); -#else - ASSERT(sizeof(fp) <= sizeof(unsigned long)); - enif_get_ulong(env, reg[1], (unsigned long *) &fp); -#endif - result = (*fp)(env, dirty_result); - if (erts_refc_dectest(&env->mod_nif->rt_dtor_cnt, 0) == 0 - && env->mod_nif->mod == NULL) - close_lib(env->mod_nif); - return result; + Process* proc = env->proc; + Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array; + NifExport* ep; + int i; + + ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); + if (!ep) + ep = allocate_nif_sched_data(proc, argc); + else if (need_save && ep->alloced_argv_sz < argc) { + NifExport* new_ep = allocate_nif_sched_data(proc, argc); + erts_free(ERTS_ALC_T_PSD, (void*) ep); + ep = new_ep; + } + ERTS_VBUMP_ALL_REDS(proc); + for (i = 0; i < argc; i++) { + if (need_save) + ep->argv[i] = reg[i]; + reg[i] = (Eterm) argv[i]; + } + if (need_save) { + ep->saved_mfa[0] = proc->current[0]; + ep->saved_mfa[1] = proc->current[1]; + ep->saved_mfa[2] = proc->current[2]; + ep->saved_argc = argc; + } + proc->i = (BeamInstr*) ep->exp.addressv[0]; + ep->exp.code[0] = (BeamInstr) proc->current[0]; + ep->exp.code[1] = (BeamInstr) proc->current[1]; + ep->exp.code[2] = argc; + ep->exp.code[4] = (BeamInstr) direct_fp; + ep->m = env->mod_nif; + ep->fp = indirect_fp; + proc->freason = TRAP; + return THE_NON_VALUE; } -#endif /* ERTS_DIRTY_SCHEDULERS */ +/* + * Restore saved MFA and registers. Registers are restored only when the + * exception flag is true. + */ +static void +restore_nif_mfa(Process* proc, NifExport* ep, int exception) +{ + int i; + Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array; -#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT + proc->current[0] = ep->saved_mfa[0]; + proc->current[1] = ep->saved_mfa[1]; + proc->current[2] = ep->saved_mfa[2]; + if (exception) + for (i = 0; i < ep->saved_argc; i++) + reg[i] = ep->argv[i]; + ep->saved_argc = 0; + ep->saved_mfa[0] = THE_NON_VALUE; +} -ERL_NIF_TERM -enif_schedule_dirty_nif(ErlNifEnv* env, int flags, - ERL_NIF_TERM (*fp)(ErlNifEnv*, int, const ERL_NIF_TERM[]), - int argc, const ERL_NIF_TERM argv[]) +#ifdef ERTS_DIRTY_SCHEDULERS + +/* + * Finalize a dirty NIF call. This function is scheduled to cause the VM to + * switch the process off a dirty scheduler thread and back onto a regular + * scheduler thread, and then return the result from the dirty NIF. It also + * restores the original NIF MFA when necessary based on the value of + * ep->fp set by execute_dirty_nif via init_nif_sched_data -- non-NULL + * means restore, NULL means do not restore. + */ +static ERL_NIF_TERM +dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +{ + Process* proc = env->proc; + NifExport* ep; + + ASSERT(argc == 1); + ASSERT(!ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data)); + ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); + ASSERT(ep); + if (ep->fp) + restore_nif_mfa(proc, ep, 0); + return argv[0]; +} + +/* Finalize a dirty NIF call that raised an exception. Otherwise same as + * the dirty_nif_finalizer() function. + */ +static ERL_NIF_TERM +dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +{ + Process* proc = env->proc; + NifExport* ep; + + ASSERT(!ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data)); + ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); + ASSERT(ep); + if (ep->fp) + restore_nif_mfa(proc, ep, 1); + return enif_make_badarg(env); +} + +/* + * Dirty NIF execution wrapper function. Invoke an application's dirty NIF, + * then check the result and schedule the appropriate finalizer function + * where needed. Also restore the original NIF MFA when appropriate. + */ +static ERL_NIF_TERM +execute_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +{ + Process* proc = env->proc; + NativeFunPtr fp = (NativeFunPtr) proc->current[6]; + NifExport* ep; + ERL_NIF_TERM result; + + ASSERT(ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data)); + + /* + * Set ep->fp to NULL before the native call so we know later whether it scheduled another NIF for execution + */ + ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); + ASSERT(ep); + ep->fp = NULL; + result = (*fp)(env, argc, argv); + erts_smp_atomic32_read_band_mb(&proc->state, + ~(ERTS_PSFLG_DIRTY_CPU_PROC + |ERTS_PSFLG_DIRTY_IO_PROC + |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q + |ERTS_PSFLG_DIRTY_IO_PROC_IN_Q)); + if (erts_refc_dectest(&env->mod_nif->rt_dtor_cnt, 0) == 0 && env->mod_nif->mod == NULL) + close_lib(env->mod_nif); + /* + * If no more NIFs were scheduled by the native call via + * enif_schedule_nif(), then ep->fp will still be NULL as set above, in + * which case we need to restore the original NIF calling + * context. Reuse fp essentially as a boolean for this, passing it to + * init_nif_sched_data below. Both dirty_nif_exception and + * dirty_nif_finalizer then check ep->fp to decide whether or not to + * restore the original calling context. + */ + ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); + ASSERT(ep); + if (ep->fp) + fp = NULL; + if (is_non_value(result)) { + if (proc->freason != TRAP) { + ASSERT(proc->freason == BADARG); + return init_nif_sched_data(env, dirty_nif_exception, fp, 0, argc, argv); + } else { + if (ep->fp == NULL) + restore_nif_mfa(proc, ep, 1); + return result; + } + } + else + return init_nif_sched_data(env, dirty_nif_finalizer, fp, 0, 1, &result); +} + +/* + * Dirty NIF scheduling wrapper function. Schedule a dirty NIF to execute + * via the execute_dirty_nif() wrapper function. The dirty scheduler thread + * type (CPU or I/O) is indicated in flags parameter. + */ +static ERTS_INLINE ERL_NIF_TERM +schedule_dirty_nif(ErlNifEnv* env, int flags, int argc, const ERL_NIF_TERM argv[]) { -#ifdef USE_THREADS erts_aint32_t state, n, a; Process* proc = env->proc; - Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array; - DirtyNifExport* ep = NULL; - int i; + NativeFunPtr fp = (NativeFunPtr) proc->current[6]; + NifExport* ep; + int need_save; - int chkflgs = (flags & (ERL_NIF_DIRTY_JOB_IO_BOUND|ERL_NIF_DIRTY_JOB_CPU_BOUND)); - if (chkflgs != ERL_NIF_DIRTY_JOB_IO_BOUND && chkflgs != ERL_NIF_DIRTY_JOB_CPU_BOUND) - return enif_make_badarg(env); + ASSERT(flags==ERL_NIF_DIRTY_JOB_IO_BOUND || flags==ERL_NIF_DIRTY_JOB_CPU_BOUND); a = erts_smp_atomic32_read_acqb(&proc->state); while (1) { @@ -1590,7 +1781,7 @@ enif_schedule_dirty_nif(ErlNifEnv* env, int flags, */ n &= ~(ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q); - if (chkflgs == ERL_NIF_DIRTY_JOB_CPU_BOUND) + if (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND) n |= ERTS_PSFLG_DIRTY_CPU_PROC; else n |= ERTS_PSFLG_DIRTY_IO_PROC; @@ -1598,69 +1789,100 @@ enif_schedule_dirty_nif(ErlNifEnv* env, int flags, if (a == state) break; } - if (!(ep = (DirtyNifExport*) ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(proc))) - alloc_proc_psd(proc, &ep); - ERTS_VBUMP_ALL_REDS(proc); - ep->exp.code[2] = argc; - for (i = 0; i < argc; i++) { - reg[i] = (Eterm) argv[i]; - } - proc->i = (BeamInstr*) ep->exp.addressv[0]; - ep->exp.code[4] = (BeamInstr) fp; - ep->m = env->mod_nif; - proc->freason = TRAP; - erts_refc_inc(&env->mod_nif->rt_dtor_cnt, 1); + ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); + need_save = (ep == NULL || is_non_value(ep->saved_mfa[0])); + return init_nif_sched_data(env, execute_dirty_nif, fp, need_save, argc, argv); +} - return THE_NON_VALUE; -#else - return (*fp)(env, argc, argv); -#endif +static ERL_NIF_TERM +schedule_dirty_io_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +{ + return schedule_dirty_nif(env, ERL_NIF_DIRTY_JOB_IO_BOUND, argc, argv); +} + +static ERL_NIF_TERM +schedule_dirty_cpu_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +{ + return schedule_dirty_nif(env, ERL_NIF_DIRTY_JOB_CPU_BOUND, argc, argv); +} + +#endif /* ERTS_DIRTY_SCHEDULERS */ + +/* + * NIF execution wrapper used by enif_schedule_nif() for regular NIFs. It + * calls the actual NIF, restores original NIF MFA if necessary, and + * then returns the NIF result. + */ +static ERL_NIF_TERM +execute_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +{ + Process* proc = env->proc; + NativeFunPtr fp = (NativeFunPtr) proc->current[6]; + NifExport* ep; + ERL_NIF_TERM result; + + ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); + ASSERT(ep); + ep->fp = NULL; + result = (*fp)(env, argc, argv); + ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); + ASSERT(ep); + /* + * If no NIFs were scheduled by the native call via + * enif_schedule_nif(), then ep->fp will still be NULL as set above, in + * which case we need to restore the original NIF MFA. + */ + if (ep->fp == NULL) + restore_nif_mfa(proc, ep, is_non_value(result) && proc->freason != TRAP); + return result; } ERL_NIF_TERM -enif_schedule_dirty_nif_finalizer(ErlNifEnv* env, ERL_NIF_TERM result, - ERL_NIF_TERM (*fp)(ErlNifEnv*, ERL_NIF_TERM)) +enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags, + ERL_NIF_TERM (*fp)(ErlNifEnv*, int, const ERL_NIF_TERM[]), + int argc, const ERL_NIF_TERM argv[]) { -#ifdef USE_THREADS Process* proc = env->proc; - Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array; - DirtyNifExport* ep; + NifExport* ep; + ERL_NIF_TERM fun_name_atom, result; + int need_save; - erts_smp_atomic32_read_band_mb(&proc->state, - ~(ERTS_PSFLG_DIRTY_CPU_PROC - |ERTS_PSFLG_DIRTY_IO_PROC - |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q - |ERTS_PSFLG_DIRTY_IO_PROC_IN_Q)); - if (!(ep = (DirtyNifExport*) ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(proc))) - alloc_proc_psd(proc, &ep); - ERTS_VBUMP_ALL_REDS(proc); - ep->exp.code[2] = 2; - reg[0] = (Eterm) result; -#if HAVE_INT64 && SIZEOF_LONG != 8 - ASSERT(sizeof(fp) <= sizeof(ErlNifUInt64)); - reg[1] = (Eterm) enif_make_uint64(env, (ErlNifUInt64) fp); -#else - ASSERT(sizeof(fp) <= sizeof(unsigned long)); - reg[1] = (Eterm) enif_make_ulong(env, (unsigned long) fp); -#endif - proc->i = (BeamInstr*) ep->exp.addressv[0]; - ep->exp.code[4] = (BeamInstr) execute_dirty_nif_finalizer; - proc->freason = TRAP; + if (argc > MAX_ARG) + return enif_make_badarg(env); + fun_name_atom = enif_make_atom(env, fun_name); + if (enif_is_exception(env, fun_name_atom)) + return fun_name_atom; - return THE_NON_VALUE; + ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); + need_save = (ep == NULL || is_non_value(ep->saved_mfa[0])); + + if (flags) { +#ifdef ERTS_DIRTY_SCHEDULERS + NativeFunPtr sched_fun; + int chkflgs = (flags & (ERL_NIF_DIRTY_JOB_IO_BOUND|ERL_NIF_DIRTY_JOB_CPU_BOUND)); + if (chkflgs == ERL_NIF_DIRTY_JOB_IO_BOUND) + sched_fun = schedule_dirty_io_nif; + else if (chkflgs == ERL_NIF_DIRTY_JOB_CPU_BOUND) + sched_fun = schedule_dirty_cpu_nif; + else + return enif_make_badarg(env); + result = init_nif_sched_data(env, sched_fun, fp, need_save, argc, argv); #else - return (*fp)(env, result); + return enif_make_badarg(env); #endif -} + } + else + result = init_nif_sched_data(env, execute_nif, fp, need_save, argc, argv); -/* A simple finalizer that just returns its result argument */ -ERL_NIF_TERM -enif_dirty_nif_finalizer(ErlNifEnv* env, ERL_NIF_TERM result) -{ + ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); + ASSERT(ep); + ep->exp.code[1] = (BeamInstr) fun_name_atom; return result; } +#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT + int enif_is_on_dirty_scheduler(ErlNifEnv* env) { @@ -1977,6 +2199,35 @@ static Eterm load_nif_error(Process* p, const char* atom, const char* format, .. return ret; } +/* + * The function below is for looping through ErlNifFunc arrays, helping + * provide backwards compatibility across the version 2.7 change that added + * the "flags" field to ErlNifFunc. + */ +static ErlNifFunc* next_func(ErlNifEntry* entry, int* incrp, ErlNifFunc* func) +{ + ASSERT(incrp); + if (!*incrp) { + if (entry->major > 2 || (entry->major == 2 && entry->minor >= 7)) + *incrp = sizeof(ErlNifFunc); + else { + /* + * ErlNifFuncV1 below is what ErlNifFunc was before the + * addition of the flags field for 2.7, and is needed to handle + * backward compatibility. + */ + typedef struct { + const char* name; + unsigned arity; + ERL_NIF_TERM (*fptr)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]); + }ErlNifFuncV1; + *incrp = sizeof(ErlNifFuncV1); + } + } + return (ErlNifFunc*) ((char*)func + *incrp); +} + + BIF_RETTYPE load_nif_2(BIF_ALIST_2) { static const char bad_lib[] = "bad_lib"; @@ -2086,22 +2337,48 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) } else { /*erts_fprintf(stderr, "Found module %T\r\n", mod_atom);*/ - + + int maybe_dirty_nifs = ((entry->major > 2 || (entry->major == 2 && entry->minor >= 7)) + && (entry->options & ERL_NIF_DIRTY_NIF_OPTION)); + int incr = 0; + ErlNifFunc* f = entry->funcs; for (i=0; i < entry->num_of_funcs && ret==am_ok; i++) { BeamInstr** code_pp; - ErlNifFunc* f = &entry->funcs[i]; if (!erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1) || (code_pp = get_func_pp(mod->curr.code, f_atom, f->arity))==NULL) { ret = load_nif_error(BIF_P,bad_lib,"Function not found %T:%s/%u", mod_atom, f->name, f->arity); - } - else if (code_pp[1] - code_pp[0] < (5+3)) { + } + else if (maybe_dirty_nifs && f->flags) { + /* + * If the flags field is non-zero and this emulator was + * built with dirty scheduler support, check that the flags + * value is legal. But if this emulator was built without + * dirty scheduler support, treat a non-zero flags field as + * a load error. + */ +#ifdef ERTS_DIRTY_SCHEDULERS + if (f->flags != ERL_NIF_DIRTY_JOB_IO_BOUND && f->flags != ERL_NIF_DIRTY_JOB_CPU_BOUND) + ret = load_nif_error(BIF_P, bad_lib, "Illegal flags field value %d for NIF %T:%s/%u", + f->flags, mod_atom, f->name, f->arity); +#else + ret = load_nif_error(BIF_P, bad_lib, "NIF %T:%s/%u requires a runtime with dirty scheduler support.", + mod_atom, f->name, f->arity); +#endif + } +#ifdef ERTS_DIRTY_SCHEDULERS + else if (code_pp[1] - code_pp[0] < (5+4)) +#else + else if (code_pp[1] - code_pp[0] < (5+3)) +#endif + { ret = load_nif_error(BIF_P,bad_lib,"No explicit call to load_nif" - " in module (%T:%s/%u to small)", - mod_atom, entry->funcs[i].name, entry->funcs[i].arity); + " in module (%T:%s/%u too small)", + mod_atom, f->name, f->arity); } /*erts_fprintf(stderr, "Found NIF %T:%s/%u\r\n", - mod_atom, entry->funcs[i].name, entry->funcs[i].arity);*/ + mod_atom, f->name, f->arity);*/ + f = next_func(entry, &incr, f); } } @@ -2127,7 +2404,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) * is deprecated and was only ment as a development feature not to * be used in production systems. (See warning below) */ - int k; + int k, old_incr = 0; + ErlNifFunc* old_func; lib->priv_data = mod->curr.nif->priv_data; ASSERT(mod->curr.nif->entry != NULL); @@ -2136,13 +2414,16 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) goto error; } /* Check that no NIF is removed */ + old_func = mod->curr.nif->entry->funcs; for (k=0; k < mod->curr.nif->entry->num_of_funcs; k++) { - ErlNifFunc* old_func = &mod->curr.nif->entry->funcs[k]; + int incr = 0; + ErlNifFunc* f = entry->funcs; for (i=0; i < entry->num_of_funcs; i++) { - if (old_func->arity == entry->funcs[i].arity - && sys_strcmp(old_func->name, entry->funcs[i].name) == 0) { + if (old_func->arity == f->arity + && sys_strcmp(old_func->name, f->name) == 0) { break; } + f = next_func(entry, &incr, f); } if (i == entry->num_of_funcs) { ret = load_nif_error(BIF_P,reload,"Reloaded library missing " @@ -2150,7 +2431,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) old_func->name, old_func->arity); goto error; } - } + old_func = next_func(mod->curr.nif->entry, &old_incr, old_func); + } erts_pre_nif(&env, BIF_P, lib); veto = entry->reload(&env, &lib->priv_data, BIF_ARG_2); erts_post_nif(&env); @@ -2197,13 +2479,17 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) /* ** Everything ok, patch the beam code with op_call_nif */ - mod->curr.nif = lib; + + int incr = 0; + ErlNifFunc* f = entry->funcs; + + mod->curr.nif = lib; for (i=0; i < entry->num_of_funcs; i++) { BeamInstr* code_ptr; - erts_atom_get(entry->funcs[i].name, sys_strlen(entry->funcs[i].name), &f_atom, ERTS_ATOM_ENC_LATIN1); - code_ptr = *get_func_pp(mod->curr.code, f_atom, entry->funcs[i].arity); - + erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1); + code_ptr = *get_func_pp(mod->curr.code, f_atom, f->arity); + if (code_ptr[1] == 0) { code_ptr[5+0] = (BeamInstr) BeamOp(op_call_nif); } @@ -2211,10 +2497,21 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) GenericBp* g = (GenericBp *) code_ptr[1]; ASSERT(code_ptr[5+0] == (BeamInstr) BeamOp(op_i_generic_breakpoint)); - g->orig_instr = (BeamInstr) BeamOp(op_call_nif); - } - code_ptr[5+1] = (BeamInstr) entry->funcs[i].fptr; + g->orig_instr = (BeamInstr) BeamOp(op_call_nif); + } + if ((entry->major > 2 || (entry->major == 2 && entry->minor >= 7)) + && (entry->options & ERL_NIF_DIRTY_NIF_OPTION) && f->flags) { +#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT + code_ptr[5+3] = (BeamInstr) f->fptr; + code_ptr[5+1] = (f->flags == ERL_NIF_DIRTY_JOB_IO_BOUND) ? + (BeamInstr) schedule_dirty_io_nif : + (BeamInstr) schedule_dirty_cpu_nif; +#endif + } + else + code_ptr[5+1] = (BeamInstr) f->fptr; code_ptr[5+2] = (BeamInstr) lib; + f = next_func(entry, &incr, f); } } else { diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h index 5b93c2398e..226fc199a1 100644 --- a/erts/emulator/beam/erl_nif.h +++ b/erts/emulator/beam/erl_nif.h @@ -42,9 +42,13 @@ ** 2.5: R17 Maps API additions ** 2.6: R17 with maps ** R17 dirty schedulers +** 2.7: 17.3 add enif_schedule_nif +** remove enif_schedule_dirty_nif, enif_schedule_dirty_nif_finalizer, enif_dirty_nif_finalizer +** add ErlNifEntry options +** add ErlNifFunc flags */ #define ERL_NIF_MAJOR_VERSION 2 -#define ERL_NIF_MINOR_VERSION 6 +#define ERL_NIF_MINOR_VERSION 7 /* * The emulator will refuse to load a nif-lib with a major version @@ -125,8 +129,10 @@ typedef struct const char* name; unsigned arity; ERL_NIF_TERM (*fptr)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]); + unsigned flags; }ErlNifFunc; + typedef struct enif_entry_t { int major; @@ -139,8 +145,11 @@ typedef struct enif_entry_t int (*upgrade)(ErlNifEnv*, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info); void (*unload) (ErlNifEnv*, void* priv_data); const char* vm_variant; + unsigned options; }ErlNifEntry; +/* Field bits for ErlNifEntry options */ +#define ERL_NIF_DIRTY_NIF_OPTION 1 typedef struct @@ -232,10 +241,21 @@ extern TWinDynNifCallbacks WinDynNifCallbacks; # else # define ERL_NIF_INIT_DECL(MODNAME) __declspec(dllexport) ErlNifEntry* nif_init(TWinDynNifCallbacks* callbacks) # endif -# define ERL_NIF_INIT_BODY memcpy(&WinDynNifCallbacks,callbacks,sizeof(TWinDynNifCallbacks)) +# ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT +# define ERL_NIF_INIT_BODY do { \ + memcpy(&WinDynNifCallbacks,callbacks,sizeof(TWinDynNifCallbacks)); \ + entry.options = ERL_NIF_DIRTY_NIF_OPTION; \ + } while(0) +# else +# define ERL_NIF_INIT_BODY memcpy(&WinDynNifCallbacks,callbacks,sizeof(TWinDynNifCallbacks)) +# endif #else # define ERL_NIF_INIT_GLOB -# define ERL_NIF_INIT_BODY +# ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT +# define ERL_NIF_INIT_BODY entry.options = ERL_NIF_DIRTY_NIF_OPTION +# else +# define ERL_NIF_INIT_BODY +# endif # ifdef STATIC_ERLANG_NIF # define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* MODNAME ## _nif_init(void) # else diff --git a/erts/emulator/beam/erl_nif_api_funcs.h b/erts/emulator/beam/erl_nif_api_funcs.h index d7c554e60b..be39816a64 100644 --- a/erts/emulator/beam/erl_nif_api_funcs.h +++ b/erts/emulator/beam/erl_nif_api_funcs.h @@ -141,10 +141,8 @@ ERL_NIF_API_FUNC_DECL(int,enif_is_number,(ErlNifEnv*, ERL_NIF_TERM term)); ERL_NIF_API_FUNC_DECL(void*,enif_dlopen,(const char* lib, void (*err_handler)(void*,const char*), void* err_arg)); ERL_NIF_API_FUNC_DECL(void*,enif_dlsym,(void* handle, const char* symbol, void (*err_handler)(void*,const char*), void* err_arg)); ERL_NIF_API_FUNC_DECL(int,enif_consume_timeslice,(ErlNifEnv*, int percent)); +ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_schedule_nif,(ErlNifEnv*,const char*,int,ERL_NIF_TERM (*)(ErlNifEnv*,int,const ERL_NIF_TERM[]),int,const ERL_NIF_TERM[])); #ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT -ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_schedule_dirty_nif,(ErlNifEnv*,int,ERL_NIF_TERM (*)(ErlNifEnv*,int,const ERL_NIF_TERM[]),int,const ERL_NIF_TERM[])); -ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_schedule_dirty_nif_finalizer,(ErlNifEnv*,ERL_NIF_TERM,ERL_NIF_TERM (*)(ErlNifEnv*,ERL_NIF_TERM))); -ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_dirty_nif_finalizer,(ErlNifEnv*,ERL_NIF_TERM)); ERL_NIF_API_FUNC_DECL(int,enif_is_on_dirty_scheduler,(ErlNifEnv*)); ERL_NIF_API_FUNC_DECL(int,enif_have_dirty_schedulers,(void)); #endif @@ -289,10 +287,8 @@ ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_get_pair, (ErlNifEnv *env, ErlNifMa # define enif_dlopen ERL_NIF_API_FUNC_MACRO(enif_dlopen) # define enif_dlsym ERL_NIF_API_FUNC_MACRO(enif_dlsym) # define enif_consume_timeslice ERL_NIF_API_FUNC_MACRO(enif_consume_timeslice) +# define enif_schedule_nif ERL_NIF_API_FUNC_MACRO(enif_schedule_nif) #ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT -# define enif_schedule_dirty_nif ERL_NIF_API_FUNC_MACRO(enif_schedule_dirty_nif) -# define enif_schedule_dirty_nif_finalizer ERL_NIF_API_FUNC_MACRO(enif_schedule_dirty_nif_finalizer) -# define enif_dirty_nif_finalizer ERL_NIF_API_FUNC_MACRO(enif_dirty_nif_finalizer) # define enif_is_on_dirty_scheduler ERL_NIF_API_FUNC_MACRO(enif_is_on_dirty_scheduler) # define enif_have_dirty_schedulers ERL_NIF_API_FUNC_MACRO(enif_have_dirty_schedulers) #endif diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c index 31d9a1e26e..682f6f8f4b 100644 --- a/erts/emulator/beam/erl_port_task.c +++ b/erts/emulator/beam/erl_port_task.c @@ -68,6 +68,13 @@ static void chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_q #define DTRACE_DRIVER(PROBE_NAME, PP) do {} while(0) #endif +#define ERTS_SMP_LC_VERIFY_RQ(RQ, PP) \ + do { \ + ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); \ + ERTS_SMP_LC_ASSERT((RQ) == ((ErtsRunQueue *) \ + erts_smp_atomic_read_nob(&(PP)->run_queue))); \ + } while (0) + erts_smp_atomic_t erts_port_task_outstanding_io_tasks; #define ERTS_PT_STATE_SCHEDULED 0 @@ -798,12 +805,13 @@ schedule_port_task_handle_list_free(ErtsPortTaskHandleList *pthlp) static ERTS_INLINE void abort_nosuspend_task(Port *pp, ErtsPortTaskType type, - ErtsPortTaskTypeData *tdp) + ErtsPortTaskTypeData *tdp, + int bpq_data) { ASSERT(type == ERTS_PORT_TASK_PROC_SIG); - if (!pp->sched.taskq.bpq) + if (!bpq_data) tdp->psig.callback(NULL, ERTS_PORT_SFLG_INVALID, ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND, @@ -991,6 +999,7 @@ static ERTS_INLINE int finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q) { erts_aint32_t act; + unsigned int prof_runnable_ports; if (!processing_busy_q) pp->sched.taskq.local.first = *execq; @@ -1007,6 +1016,10 @@ finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q) if (act & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q) act = check_unset_busy_port_q(pp, act, pp->sched.taskq.bpq); + prof_runnable_ports = erts_system_profile_flags.runnable_ports; + if (prof_runnable_ports) + erts_port_task_sched_lock(&pp->sched); + while (1) { erts_aint32_t new, exp; @@ -1018,12 +1031,24 @@ finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q) act = erts_smp_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp); - ASSERT(!(act & ERTS_PTS_FLG_IN_RUNQ)); + ERTS_LC_ASSERT(!(act & ERTS_PTS_FLG_IN_RUNQ)); + ERTS_LC_ASSERT(!(act & ERTS_PTS_FLG_EXEC_IMM)); if (exp == act) break; } + if (prof_runnable_ports | IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) { + /* trace port scheduling, out */ + if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) + trace_sched_ports(pp, am_out); + if (prof_runnable_ports) { + if (!(act & (ERTS_PTS_FLG_EXEC_IMM|ERTS_PTS_FLG_HAVE_TASKS))) + profile_runnable_port(pp, am_inactive); + erts_port_task_sched_unlock(&pp->sched); + } + } + return (act & ERTS_PTS_FLG_HAVE_TASKS) != 0; } @@ -1345,7 +1370,7 @@ erts_port_task_abort_nosuspend_tasks(Port *pp) #endif schedule_port_task_handle_list_free(pthlp); - abort_nosuspend_task(pp, type, &td); + abort_nosuspend_task(pp, type, &td, pp->sched.taskq.bpq != NULL); } } @@ -1369,6 +1394,7 @@ erts_port_task_schedule(Eterm id, Port *pp; ErtsPortTask *ptp = NULL; erts_aint32_t act, add_flags; + unsigned int prof_runnable_ports; if (pthp && erts_port_task_is_scheduled(pthp)) { ASSERT(0); @@ -1457,6 +1483,10 @@ erts_port_task_schedule(Eterm id, if (ns_pthlp) add_flags |= ERTS_PTS_FLG_HAVE_NS_TASKS; + prof_runnable_ports = erts_system_profile_flags.runnable_ports; + if (prof_runnable_ports) + erts_port_task_sched_lock(&pp->sched); + while (1) { erts_aint32_t new, exp; @@ -1481,6 +1511,13 @@ erts_port_task_schedule(Eterm id, goto done; /* Died after our task insert... */ } + if (prof_runnable_ports) { + if (!(act & ERTS_PTS_FLG_EXEC_IMM)) + profile_runnable_port(pp, am_active); + erts_port_task_sched_unlock(&pp->sched); + prof_runnable_ports = 0; + } + /* Enqueue port on run-queue */ runq = erts_port_runq(pp); @@ -1489,8 +1526,10 @@ erts_port_task_schedule(Eterm id, #ifdef ERTS_SMP xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL); + ERTS_SMP_LC_ASSERT(runq != xrunq); + ERTS_SMP_LC_VERIFY_RQ(runq, pp); if (xrunq) { - /* Port emigrated ... */ + /* Emigrate port ... */ erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq); erts_smp_runq_unlock(runq); runq = erts_port_runq(pp); @@ -1500,10 +1539,6 @@ erts_port_task_schedule(Eterm id, #endif enqueue_port(runq, pp); - - if (erts_system_profile_flags.runnable_ports) { - profile_runnable_port(pp, am_active); - } erts_smp_runq_unlock(runq); @@ -1511,6 +1546,9 @@ erts_port_task_schedule(Eterm id, done: + if (prof_runnable_ports) + erts_port_task_sched_unlock(&pp->sched); + #ifdef ERTS_SMP if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) erts_port_dec_refc(pp); @@ -1525,7 +1563,7 @@ abort_nosuspend: erts_port_dec_refc(pp); #endif - abort_nosuspend_task(pp, ptp->type, &ptp->u.alive.td); + abort_nosuspend_task(pp, ptp->type, &ptp->u.alive.td, 0); ASSERT(ns_pthlp); erts_free(ERTS_ALC_T_PT_HNDL_LIST, ns_pthlp); @@ -1609,6 +1647,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) goto done; } + ERTS_SMP_LC_VERIFY_RQ(runq, pp); + erts_smp_runq_unlock(runq); *curr_port_pp = pp; @@ -1765,10 +1805,6 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) erts_unblock_fpe(fpe_was_unmasked); - /* trace port scheduling, out */ - if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) { - trace_sched_ports(pp, am_out); - } if (io_tasks_executed) { ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) @@ -1791,11 +1827,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) erts_smp_runq_lock(runq); - if (!active) { - if (erts_system_profile_flags.runnable_ports) - profile_runnable_port(pp, am_inactive); - } - else { + if (active) { #ifdef ERTS_SMP ErtsRunQueue *xrunq; #endif @@ -1804,6 +1836,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) #ifdef ERTS_SMP xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL); + ERTS_SMP_LC_ASSERT(runq != xrunq); + ERTS_SMP_LC_VERIFY_RQ(runq, pp); if (!xrunq) { #endif enqueue_port(runq, pp); @@ -1811,7 +1845,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) #ifdef ERTS_SMP } else { - /* Port emigrated ... */ + /* Emigrate port... */ erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq); erts_smp_runq_unlock(runq); diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h index 1d30465ec9..9ef0cfcedc 100644 --- a/erts/emulator/beam/erl_port_task.h +++ b/erts/emulator/beam/erl_port_task.h @@ -78,6 +78,7 @@ extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks; #define ERTS_PTS_FLG_PARALLELISM (((erts_aint32_t) 1) << 9) #define ERTS_PTS_FLG_FORCE_SCHED (((erts_aint32_t) 1) << 10) #define ERTS_PTS_FLG_EXITING (((erts_aint32_t) 1) << 11) +#define ERTS_PTS_FLG_EXEC_IMM (((erts_aint32_t) 1) << 12) #define ERTS_PTS_FLGS_BUSY \ (ERTS_PTS_FLG_BUSY_PORT | ERTS_PTS_FLG_BUSY_PORT_Q) @@ -87,6 +88,7 @@ extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks; | ERTS_PTS_FLG_HAVE_BUSY_TASKS \ | ERTS_PTS_FLG_HAVE_TASKS \ | ERTS_PTS_FLG_EXEC \ + | ERTS_PTS_FLG_EXEC_IMM \ | ERTS_PTS_FLG_FORCE_SCHED \ | ERTS_PTS_FLG_EXITING) diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index b73f9b7f92..685004f267 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -590,12 +590,10 @@ erts_pre_init_process(void) erts_psd_required_locks[ERTS_PSD_DELAYED_GC_TASK_QS].set_locks = ERTS_PSD_DELAYED_GC_TASK_QS_SET_LOCKS; -#ifdef ERTS_DIRTY_SCHEDULERS - erts_psd_required_locks[ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT].get_locks - = ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_GET_LOCKS; - erts_psd_required_locks[ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT].set_locks - = ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_SET_LOCKS; -#endif + erts_psd_required_locks[ERTS_PSD_NIF_TRAP_EXPORT].get_locks + = ERTS_PSD_NIF_TRAP_EXPORT_GET_LOCKS; + erts_psd_required_locks[ERTS_PSD_NIF_TRAP_EXPORT].set_locks + = ERTS_PSD_NIF_TRAP_EXPORT_SET_LOCKS; /* Check that we have locks for all entries */ for (ix = 0; ix < ERTS_PSD_SIZE; ix++) { @@ -2211,6 +2209,9 @@ aux_work_timeout_early_init(int no_schedulers) p = (UWord) malloc((sizeof(ErtsAuxWorkTmo) + sizeof(erts_atomic32_t)*(no_schedulers+1)) + ERTS_CACHE_LINE_SIZE-1); + if (!p) { + ERTS_INTERNAL_ERROR("malloc failed to allocate memory!"); + } if (p & ERTS_CACHE_LINE_MASK) p = (p & ~ERTS_CACHE_LINE_MASK) + ERTS_CACHE_LINE_SIZE; ASSERT((p & ERTS_CACHE_LINE_MASK) == 0); @@ -3755,17 +3756,25 @@ evacuate_run_queue(ErtsRunQueue *rq, } #ifdef ERTS_DIRTY_SCHEDULERS else if (state & ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q) { - erts_aint32_t old; - old = erts_smp_atomic32_read_band_nob(&proc->state, - ~(ERTS_PSFLG_DIRTY_CPU_PROC - | ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q)); +#ifdef DEBUG + erts_aint32_t old = +#else + (void) +#endif + erts_smp_atomic32_read_band_nob(&proc->state, + ~(ERTS_PSFLG_DIRTY_CPU_PROC + | ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q)); /* assert that no other dirty flags are set */ ASSERT(!(old & (ERTS_PSFLG_DIRTY_IO_PROC|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q))); } else if (state & ERTS_PSFLG_DIRTY_IO_PROC_IN_Q) { - erts_aint32_t old; - old = erts_smp_atomic32_read_band_nob(&proc->state, - ~(ERTS_PSFLG_DIRTY_IO_PROC - | ERTS_PSFLG_DIRTY_IO_PROC_IN_Q)); +#ifdef DEBUG + erts_aint32_t old = +#else + (void) +#endif + erts_smp_atomic32_read_band_nob(&proc->state, + ~(ERTS_PSFLG_DIRTY_IO_PROC + | ERTS_PSFLG_DIRTY_IO_PROC_IN_Q)); /* assert that no other dirty flags are set */ ASSERT(!(old & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q))); } @@ -5874,6 +5883,9 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Proces case ERTS_ENQUEUE_NOT: if (erts_system_profile_flags.runnable_procs) { + /* Status lock prevents out of order "runnable proc" trace msgs */ + ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); + if (!(a & ERTS_PSFLG_ACTIVE_SYS) && (!(a & ERTS_PSFLG_ACTIVE) || (a & ERTS_PSFLG_SUSPENDED))) { @@ -5987,7 +5999,8 @@ change_proc_schedule_state(Process *p, erts_aint32_t clear_state_flags, erts_aint32_t set_state_flags, erts_aint32_t *statep, - erts_aint32_t *enq_prio_p) + erts_aint32_t *enq_prio_p, + ErtsProcLocks locks) { /* * NOTE: ERTS_PSFLG_RUNNING, ERTS_PSFLG_RUNNING_SYS and @@ -5996,6 +6009,11 @@ change_proc_schedule_state(Process *p, */ erts_aint32_t a = *statep, n; int enqueue; /* < 0 -> use proxy */ + unsigned int prof_runnable_procs = erts_system_profile_flags.runnable_procs; + unsigned int lock_status = (prof_runnable_procs + && !(locks & ERTS_PROC_LOCK_STATUS)); + + ERTS_SMP_LC_ASSERT(locks == erts_proc_lc_my_proc_locks(p)); ASSERT(!(a & ERTS_PSFLG_PROXY)); ASSERT((clear_state_flags & (ERTS_PSFLG_RUNNING @@ -6005,6 +6023,9 @@ change_proc_schedule_state(Process *p, | ERTS_PSFLG_RUNNING_SYS | ERTS_PSFLG_ACTIVE_SYS)) == 0); + if (lock_status) + erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + while (1) { erts_aint32_t e; n = e = a; @@ -6040,7 +6061,9 @@ change_proc_schedule_state(Process *p, break; } - if (erts_system_profile_flags.runnable_procs) { + if (prof_runnable_procs) { + + /* Status lock prevents out of order "runnable proc" trace msgs */ if (((n & (ERTS_PSFLG_SUSPENDED | ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE) @@ -6053,15 +6076,18 @@ change_proc_schedule_state(Process *p, profile_runnable_proc(p, am_active); } + if (lock_status) + erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); } + *statep = a; return enqueue; } static ERTS_INLINE void -schedule_process(Process *p, erts_aint32_t in_state) +schedule_process(Process *p, erts_aint32_t in_state, ErtsProcLocks locks) { erts_aint32_t enq_prio = -1; erts_aint32_t state = in_state; @@ -6069,7 +6095,8 @@ schedule_process(Process *p, erts_aint32_t in_state) 0, ERTS_PSFLG_ACTIVE, &state, - &enq_prio); + &enq_prio, + locks); if (enqueue != ERTS_ENQUEUE_NOT) add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio), state, @@ -6077,16 +6104,27 @@ schedule_process(Process *p, erts_aint32_t in_state) } void -erts_schedule_process(Process *p, erts_aint32_t state) +erts_schedule_process(Process *p, erts_aint32_t state, ErtsProcLocks locks) { - schedule_process(p, state); + schedule_process(p, state, locks); } static void schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy) { + /* + * Expects status lock to be locked when called, and + * returns with status lock unlocked... + */ erts_aint32_t a = state, n, enq_prio = -1; int enqueue; /* < 0 -> use proxy */ + unsigned int prof_runnable_procs = erts_system_profile_flags.runnable_procs; + + /* Status lock prevents out of order "runnable proc" trace msgs */ + ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); + + if (!prof_runnable_procs) + erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); ASSERT(!(state & ERTS_PSFLG_PROXY)); @@ -6095,7 +6133,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy) n = e = a; if (a & ERTS_PSFLG_FREE) - return; /* We don't want to schedule free processes... */ + goto cleanup; /* We don't want to schedule free processes... */ enqueue = ERTS_ENQUEUE_NOT; n |= ERTS_PSFLG_ACTIVE_SYS; @@ -6108,7 +6146,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy) goto cleanup; } - if (erts_system_profile_flags.runnable_procs) { + if (prof_runnable_procs) { if (!(a & (ERTS_PSFLG_ACTIVE_SYS | ERTS_PSFLG_RUNNING @@ -6118,6 +6156,8 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy) profile_runnable_proc(p, am_active); } + erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + prof_runnable_procs = 0; } if (enqueue != ERTS_ENQUEUE_NOT) { @@ -6132,8 +6172,14 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy) } cleanup: + + if (prof_runnable_procs) + erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + if (proxy) free_proxy_proc(proxy); + + ERTS_SMP_LC_ASSERT(!(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p))); } static ERTS_INLINE int @@ -6200,7 +6246,7 @@ suspend_process(Process *c_p, Process *p) } static ERTS_INLINE void -resume_process(Process *p) +resume_process(Process *p, ErtsProcLocks locks) { erts_aint32_t state, enq_prio = -1; int enqueue; @@ -6217,7 +6263,8 @@ resume_process(Process *p) ERTS_PSFLG_SUSPENDED, 0, &state, - &enq_prio); + &enq_prio, + locks); if (enqueue) add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio), state, @@ -7818,6 +7865,9 @@ erts_start_schedulers(void) #ifdef ETHR_HAVE_THREAD_NAMES opts.name = malloc(80); + if (!opts.name) { + ERTS_INTERNAL_ERROR("malloc failed to allocate memory!"); + } #endif #ifdef ERTS_SMP @@ -8030,7 +8080,8 @@ handle_pend_sync_suspend(Process *suspendee, } /* suspender is suspended waiting for suspendee to suspend; resume suspender */ - resume_process(suspender); + ASSERT(suspendee != suspender); + resume_process(suspender, ERTS_PROC_LOCK_STATUS); erts_smp_proc_unlock(suspender, ERTS_PROC_LOCK_STATUS); } } @@ -8065,7 +8116,7 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, ASSERT(c_p->flags & F_P2PNR_RESCHED); c_p->flags &= ~F_P2PNR_RESCHED; if (!suspend && rp) - resume_process(rp); + resume_process(rp, rp_locks); } else { @@ -8223,7 +8274,8 @@ handle_pend_bif_sync_suspend(Process *suspendee, } /* suspender is suspended waiting for suspendee to suspend; resume suspender */ - resume_process(suspender); + ASSERT(suspender != suspendee); + resume_process(suspender, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); erts_smp_proc_unlock(suspender, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); } @@ -8583,7 +8635,8 @@ resume_process_1(BIF_ALIST_1) ASSERT(ERTS_PSFLG_SUSPENDED & erts_smp_atomic32_read_nob(&suspendee->state)); - resume_process(suspendee); + ASSERT(BIF_P != suspendee); + resume_process(suspendee, ERTS_PROC_LOCK_STATUS); erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); } @@ -8713,7 +8766,7 @@ erts_resume(Process* process, ErtsProcLocks process_locks) ERTS_SMP_LC_ASSERT(process_locks == erts_proc_lc_my_proc_locks(process)); if (!(process_locks & ERTS_PROC_LOCK_STATUS)) erts_smp_proc_lock(process, ERTS_PROC_LOCK_STATUS); - resume_process(process); + resume_process(process, process_locks|ERTS_PROC_LOCK_STATUS); if (!(process_locks & ERTS_PROC_LOCK_STATUS)) erts_smp_proc_unlock(process, ERTS_PROC_LOCK_STATUS); } @@ -8732,7 +8785,7 @@ erts_resume_processes(ErtsProcList *list) proc = erts_pid2proc(NULL, 0, plp->pid, ERTS_PROC_LOCK_STATUS); if (proc) { if (erts_proclist_same(plp, proc)) { - resume_process(proc); + resume_process(proc, ERTS_PROC_LOCK_STATUS); nresumed++; } erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_STATUS); @@ -9968,8 +10021,10 @@ erts_internal_request_system_task_3(BIF_ALIST_3) rp_state = n; } - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); - + /* + * schedule_process_sys_task() unlocks status + * lock on process. + */ schedule_process_sys_task(rp, rp_state, NULL); if (free_stqs) @@ -10714,7 +10769,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). * Schedule process for execution. */ - schedule_process(p, state); + schedule_process(p, state, 0); VERBOSE(DEBUG_PROCESSES, ("Created a new process: %T\n",p->common.id)); @@ -11035,7 +11090,8 @@ set_proc_exiting(Process *p, ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT, ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE, &state, - &enq_prio); + &enq_prio, + ERTS_PROC_LOCKS_ALL); p->fvalue = reason; if (bp) @@ -11076,7 +11132,8 @@ set_proc_self_exiting(Process *c_p) ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT, ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE, &state, - &enq_prio); + &enq_prio, + ERTS_PROC_LOCKS_ALL); ASSERT(!enqueue); return state; @@ -11721,8 +11778,9 @@ resume_suspend_monitor(ErtsSuspendMonitor *smon, void *vc_p) Process *suspendee = erts_pid2proc((Process *) vc_p, ERTS_PROC_LOCK_MAIN, smon->pid, ERTS_PROC_LOCK_STATUS); if (suspendee) { + ASSERT(suspendee != vc_p); if (smon->active) - resume_process(suspendee); + resume_process(suspendee, ERTS_PROC_LOCK_STATUS); erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); } erts_destroy_suspend_monitor(smon); @@ -12055,7 +12113,7 @@ timeout_proc(Process* p) state = erts_smp_atomic32_read_acqb(&p->state); if (!(state & ERTS_PSFLG_ACTIVE)) - schedule_process(p, state); + schedule_process(p, state, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS); } diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h index ed6dadbffa..9b740f049e 100644 --- a/erts/emulator/beam/erl_process.h +++ b/erts/emulator/beam/erl_process.h @@ -734,13 +734,9 @@ erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi) #define ERTS_PSD_DIST_ENTRY 3 #define ERTS_PSD_CALL_TIME_BP 4 #define ERTS_PSD_DELAYED_GC_TASK_QS 5 -#ifdef ERTS_DIRTY_SCHEDULERS -#define ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT 6 +#define ERTS_PSD_NIF_TRAP_EXPORT 6 #define ERTS_PSD_SIZE 7 -#else -#define ERTS_PSD_SIZE 6 -#endif typedef struct { void *data[ERTS_PSD_SIZE]; @@ -767,10 +763,8 @@ typedef struct { #define ERTS_PSD_DELAYED_GC_TASK_QS_GET_LOCKS ERTS_PROC_LOCK_MAIN #define ERTS_PSD_DELAYED_GC_TASK_QS_SET_LOCKS ERTS_PROC_LOCK_MAIN -#ifdef ERTS_DIRTY_SCHEDULERS -#define ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_GET_LOCKS ERTS_PROC_LOCK_MAIN -#define ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_SET_LOCKS ERTS_PROC_LOCK_MAIN -#endif +#define ERTS_PSD_NIF_TRAP_EXPORT_GET_LOCKS ERTS_PROC_LOCK_MAIN +#define ERTS_PSD_NIF_TRAP_EXPORT_SET_LOCKS ERTS_PROC_LOCK_MAIN typedef struct { ErtsProcLocks get_locks; @@ -1367,6 +1361,8 @@ Uint64 erts_get_proc_interval(void); Uint64 erts_ensure_later_proc_interval(Uint64); Uint64 erts_step_proc_interval(void); +int erts_setup_nif_gc(Process* proc, Eterm** objv, int* nobj); /* see erl_nif.c */ + ErtsProcList *erts_proclist_create(Process *); void erts_proclist_destroy(ErtsProcList *); @@ -1704,17 +1700,17 @@ ErtsSchedulerData *erts_get_scheduler_data(void) #endif #endif -void erts_schedule_process(Process *, erts_aint32_t); +void erts_schedule_process(Process *, erts_aint32_t, ErtsProcLocks); -ERTS_GLB_INLINE void erts_proc_notify_new_message(Process *p); +ERTS_GLB_INLINE void erts_proc_notify_new_message(Process *p, ErtsProcLocks locks); #if ERTS_GLB_INLINE_INCL_FUNC_DEF ERTS_GLB_INLINE void -erts_proc_notify_new_message(Process *p) +erts_proc_notify_new_message(Process *p, ErtsProcLocks locks) { /* No barrier needed, due to msg lock */ erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state); if (!(state & ERTS_PSFLG_ACTIVE)) - erts_schedule_process(p, state); + erts_schedule_process(p, state, locks); } #endif @@ -1817,12 +1813,10 @@ erts_psd_set(Process *p, ErtsProcLocks plocks, int ix, void *data) #define ERTS_PROC_SET_DELAYED_GC_TASK_QS(P, L, PBT) \ ((ErtsProcSysTaskQs *) erts_psd_set((P), (L), ERTS_PSD_DELAYED_GC_TASK_QS, (void *) (PBT))) -#ifdef ERTS_DIRTY_SCHEDULERS -#define ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(P) \ - ((Export *) erts_psd_get((P), ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT)) -#define ERTS_PROC_SET_DIRTY_SCHED_TRAP_EXPORT(P, L, DSTE) \ - ((Export *) erts_psd_set((P), (L), ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT, (void *) (DSTE))) -#endif +#define ERTS_PROC_GET_NIF_TRAP_EXPORT(P) \ + ((Export *) erts_psd_get((P), ERTS_PSD_NIF_TRAP_EXPORT)) +#define ERTS_PROC_SET_NIF_TRAP_EXPORT(P, L, DSTE) \ + ((Export *) erts_psd_set((P), (L), ERTS_PSD_NIF_TRAP_EXPORT, (void *) (DSTE))) ERTS_GLB_INLINE Eterm erts_proc_get_error_handler(Process *p); diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c index edf4a28784..ae053fc191 100644 --- a/erts/emulator/beam/io.c +++ b/erts/emulator/beam/io.c @@ -1218,9 +1218,10 @@ typedef struct { static ERTS_INLINE ErtsTryImmDrvCallResult try_imm_drv_call(ErtsTryImmDrvCallState *sp) { + unsigned int prof_runnable_ports; ErtsTryImmDrvCallResult res; int reds_left_in; - erts_aint32_t invalid_state, invalid_sched_flags; + erts_aint32_t act, exp, invalid_state, invalid_sched_flags; Port *prt = sp->port; Process *c_p = sp->c_p; @@ -1247,18 +1248,39 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp) goto locked_fail; } - sp->sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); - if (sp->sched_flags & invalid_sched_flags) { - res = ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS; - goto locked_fail; - } + prof_runnable_ports = erts_system_profile_flags.runnable_ports; + if (prof_runnable_ports) + erts_port_task_sched_lock(&prt->sched); + act = erts_smp_atomic32_read_nob(&prt->sched.flags); + + do { + erts_aint32_t new; + + if (act & invalid_sched_flags) { + res = ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS; + sp->sched_flags = act; + goto locked_fail; + } + exp = act; + new = act | ERTS_PTS_FLG_EXEC_IMM; + act = erts_smp_atomic32_cmpxchg_mb(&prt->sched.flags, new, exp); + } while (act != exp); + + sp->sched_flags = act; if (!c_p) reds_left_in = CONTEXT_REDS/10; else { if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS)) trace_virtual_sched(c_p, am_out); + /* + * No status lock held while sending runnable + * proc trace messages. It is however not needed + * in this case, since only this thread can send + * such messages for this process until the process + * has been scheduled out. + */ if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) profile_runnable_proc(c_p, am_inactive); @@ -1273,11 +1295,14 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp) ERTS_SMP_CHK_NO_PROC_LOCKS; - if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) - trace_sched_ports_where(prt, am_in, sp->port_op); - if (erts_system_profile_flags.runnable_ports - && !erts_port_is_scheduled(prt)) - profile_runnable_port(prt, am_active); + if (prof_runnable_ports | IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) { + if (prof_runnable_ports && !(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC))) + profile_runnable_port(prt, am_active); + if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) + trace_sched_ports_where(prt, am_in, sp->port_op); + if (prof_runnable_ports) + erts_port_task_sched_unlock(&prt->sched); + } sp->fpe_was_unmasked = erts_block_fpe(); @@ -1294,17 +1319,31 @@ finalize_imm_drv_call(ErtsTryImmDrvCallState *sp) int reds; Port *prt = sp->port; Process *c_p = sp->c_p; + erts_aint32_t act; + unsigned int prof_runnable_ports; reds = prt->reds; reds += erts_port_driver_callback_epilogue(prt, NULL); erts_unblock_fpe(sp->fpe_was_unmasked); - if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) - trace_sched_ports_where(prt, am_out, sp->port_op); - if (erts_system_profile_flags.runnable_ports - && !erts_port_is_scheduled(prt)) - profile_runnable_port(prt, am_inactive); + prof_runnable_ports = erts_system_profile_flags.runnable_ports; + if (prof_runnable_ports) + erts_port_task_sched_lock(&prt->sched); + + act = erts_smp_atomic32_read_band_mb(&prt->sched.flags, + ~ERTS_PTS_FLG_EXEC_IMM); + ERTS_SMP_LC_ASSERT(act & ERTS_PTS_FLG_EXEC_IMM); + + if (prof_runnable_ports | IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) { + if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) + trace_sched_ports_where(prt, am_out, sp->port_op); + if (prof_runnable_ports) { + if (!(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC))) + profile_runnable_port(prt, am_inactive); + erts_port_task_sched_unlock(&prt->sched); + } + } erts_port_release(prt); @@ -1319,6 +1358,13 @@ finalize_imm_drv_call(ErtsTryImmDrvCallState *sp) if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS)) trace_virtual_sched(c_p, am_in); + /* + * No status lock held while sending runnable + * proc trace messages. It is however not needed + * in this case, since only this thread can send + * such messages for this process until the process + * has been scheduled out. + */ if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) profile_runnable_proc(c_p, am_active); @@ -6129,7 +6175,7 @@ driver_pdl_create(ErlDrvPort dp) return NULL; pdl = erts_alloc(ERTS_ALC_T_PORT_DATA_LOCK, sizeof(struct erl_drv_port_data_lock)); - erts_mtx_init(&pdl->mtx, "port_data_lock"); + erts_mtx_init_x(&pdl->mtx, "port_data_lock", pp->common.id, 1); pdl_init_refc(pdl); erts_port_inc_refc(pp); pdl->prt = pp; @@ -7166,7 +7212,7 @@ char *driver_dl_error(void) #define ERL_DRV_SYS_INFO_SIZE(LAST_FIELD) \ - (((size_t) &((ErlDrvSysInfo *) 0)->LAST_FIELD) \ + (offsetof(ErlDrvSysInfo, LAST_FIELD) \ + sizeof(((ErlDrvSysInfo *) 0)->LAST_FIELD)) void diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h index 05f07e57b2..3d8dd9c6d0 100644 --- a/erts/emulator/beam/sys.h +++ b/erts/emulator/beam/sys.h @@ -274,6 +274,7 @@ __decl_noreturn void __noreturn erl_assert_error(const char* expr, const char *f typedef unsigned int Eterm; typedef unsigned int Uint; typedef int Sint; +#define ERTS_UINT_MAX UINT_MAX #define ERTS_SIZEOF_ETERM SIZEOF_INT #define ErtsStrToSint strtol #else @@ -347,6 +348,7 @@ typedef long long Sint; typedef Uint UWord; typedef Sint SWord; +#define ERTS_UINT_MAX ERTS_UWORD_MAX #endif /* HALFWORD_HEAP */ diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c index 72092ec7b0..55f9e68e78 100644 --- a/erts/emulator/beam/utils.c +++ b/erts/emulator/beam/utils.c @@ -3948,6 +3948,9 @@ erts_save_emu_args(int argc, char **argv) size += sz+1; } ptr = (char *) malloc(size); + if (!ptr) { + ERTS_INTERNAL_ERROR("malloc failed to allocate memory!"); + } #ifdef DEBUG end_ptr = ptr + size; #endif diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c index 09bada457d..891589d1c5 100644 --- a/erts/emulator/drivers/common/inet_drv.c +++ b/erts/emulator/drivers/common/inet_drv.c @@ -4372,7 +4372,7 @@ static int erl_inet_close(inet_descriptor* desc) desc_close(desc); desc->state = INET_STATE_CLOSED; } else if (desc->prebound && (desc->s != INVALID_SOCKET)) { - sock_select(desc, FD_READ | FD_WRITE | FD_CLOSE, 0); + sock_select(desc, FD_READ | FD_WRITE | FD_CLOSE | ERL_DRV_USE_NO_CALLBACK, 0); desc->event_mask = 0; #ifdef __WIN32__ desc->forced_events = 0; @@ -4536,7 +4536,8 @@ static ErlDrvSSizeT inet_ctl_open(inet_descriptor* desc, int domain, int type, /* as inet_open but pass in an open socket (MUST BE OF RIGHT TYPE) */ static ErlDrvSSizeT inet_ctl_fdopen(inet_descriptor* desc, int domain, int type, - SOCKET s, char** rbuf, ErlDrvSizeT rsize) + SOCKET s, Uint32 bound, + char** rbuf, ErlDrvSizeT rsize) { inet_address name; unsigned int sz = sizeof(name); @@ -4560,7 +4561,12 @@ static ErlDrvSSizeT inet_ctl_fdopen(inet_descriptor* desc, int domain, int type, #ifdef __WIN32__ driver_select(desc->port, desc->event, ERL_DRV_READ, 1); #endif - desc->state = INET_STATE_BOUND; /* assume bound */ + + if (bound) + desc->state = INET_STATE_BOUND; + else + desc->state = INET_STATE_OPEN; + if (type == SOCK_STREAM) { /* check if connected */ sz = sizeof(name); if (!IS_SOCKET_ERROR(sock_peer(s, (struct sockaddr*) &name, &sz))) { @@ -5772,7 +5778,7 @@ done: ia_p->Ipv6IfIndex && ia_p->Ipv6IfIndex != index) { - /* Oops, there was an other interface for IPv6. Possible? XXX */ + /* Oops, there was another interface for IPv6. Possible? XXX */ index = ia_p->Ipv6IfIndex; goto index; } @@ -9121,10 +9127,11 @@ static ErlDrvSSizeT tcp_inet_ctl(ErlDrvData e, unsigned int cmd, break; } - case INET_REQ_FDOPEN: { /* pass in an open socket */ + case INET_REQ_FDOPEN: { /* pass in an open (and optionally bound) socket */ int domain; + int bound; DEBUGF(("tcp_inet_ctl(%ld): FDOPEN\r\n", (long)desc->inet.port)); - if (len != 6) return ctl_error(EINVAL, rbuf, rsize); + if (len != 6 && len != 10) return ctl_error(EINVAL, rbuf, rsize); switch(buf[0]) { case INET_AF_INET: domain = AF_INET; @@ -9142,8 +9149,13 @@ static ErlDrvSSizeT tcp_inet_ctl(ErlDrvData e, unsigned int cmd, return ctl_error(EINVAL, rbuf, rsize); } if (buf[1] != INET_TYPE_STREAM) return ctl_error(EINVAL, rbuf, rsize); + + if (len == 6) bound = 1; + else bound = get_int32(buf+2+4); + return inet_ctl_fdopen(INETP(desc), domain, SOCK_STREAM, - (SOCKET) get_int32(buf+2), rbuf, rsize); + (SOCKET) get_int32(buf+2), + bound, rbuf, rsize); break; } @@ -11116,10 +11128,11 @@ static ErlDrvSSizeT packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, return replen; - case INET_REQ_FDOPEN: { /* pass in an open (and bound) socket */ + case INET_REQ_FDOPEN: { /* pass in an open (and optionally bound) socket */ SOCKET s; + int bound; DEBUGF(("packet inet_ctl(%ld): FDOPEN\r\n", (long)desc->port)); - if (len != 6) { + if (len != 6 && len != 10) { return ctl_error(EINVAL, rbuf, rsize); } switch (buf[0]) { @@ -11144,7 +11157,11 @@ static ErlDrvSSizeT packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, return ctl_error(EINVAL, rbuf, rsize); } s = (SOCKET)get_int32(buf+2); - replen = inet_ctl_fdopen(desc, af, type, s, rbuf, rsize); + + if (len == 6) bound = 1; + else bound = get_int32(buf+2+4); + + replen = inet_ctl_fdopen(desc, af, type, s, bound, rbuf, rsize); if ((*rbuf)[0] != INET_REP_ERROR) { if (desc->active) diff --git a/erts/emulator/drivers/unix/multi_drv.c b/erts/emulator/drivers/unix/multi_drv.c index 822c96730c..724d325ed5 100644 --- a/erts/emulator/drivers/unix/multi_drv.c +++ b/erts/emulator/drivers/unix/multi_drv.c @@ -20,7 +20,7 @@ /* Purpose: Multidriver interface This is an example of a driver which allows multiple instances of itself. I.e have one erlang process execute open_port(multi......) and - at the same time have an other erlang process open an other port + at the same time have another erlang process open another port running multi there as well. */ diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c index 0a58a625b2..aa412a20c8 100644 --- a/erts/emulator/sys/common/erl_poll.c +++ b/erts/emulator/sys/common/erl_poll.c @@ -2157,7 +2157,7 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps, #ifdef ERTS_POLL_DEBUG_PRINT erts_printf("Entering erts_poll_wait(), timeout=%d\n", - (int) tv->tv_sec*1000 + tv->tv_usec/1000); + (int) tvp->tv_sec*1000 + tvp->tv_usec/1000); #endif if (ERTS_POLLSET_SET_POLLED_CHK(ps)) { diff --git a/erts/emulator/test/Makefile b/erts/emulator/test/Makefile index 0b0568c31a..dfbe47786a 100644 --- a/erts/emulator/test/Makefile +++ b/erts/emulator/test/Makefile @@ -31,6 +31,7 @@ MODULES= \ a_SUITE \ after_SUITE \ alloc_SUITE \ + async_ports_SUITE \ beam_SUITE \ beam_literals_SUITE \ bif_SUITE \ diff --git a/erts/emulator/test/async_ports_SUITE.erl b/erts/emulator/test/async_ports_SUITE.erl new file mode 100644 index 0000000000..c89b3655ff --- /dev/null +++ b/erts/emulator/test/async_ports_SUITE.erl @@ -0,0 +1,118 @@ +-module(async_ports_SUITE). + +-include_lib("common_test/include/ct.hrl"). + +-compile(export_all). + +-define(PACKET_SIZE, (10 * 1024 * 8)). +-define(CPORT_DELAY, 100). +-define(TEST_LOOPS_COUNT, 100000). +-define(SLEEP_BEFORE_CHECK, 1000). +-define(TEST_PROCS_COUNT, 2). +-define(TC_TIMETRAP_SECONDS, 10). + +suite() -> [{ct_hooks,[ts_install_cth]}]. + +all() -> + [ + permanent_busy_test + ]. + +permanent_busy_test(Config) -> + ct:timetrap({seconds, ?TC_TIMETRAP_SECONDS}), + ExePath = filename:join(?config(data_dir, Config), "cport"), + + Self = self(), + spawn_link( + fun() -> + Block = <<0:?PACKET_SIZE>>, + + Port = open_port(ExePath), + + Testers = + lists:map( + fun(_) -> + erlang:spawn_link(?MODULE, run_loop, + [Self, + Port, + Block, + ?TEST_LOOPS_COUNT, + 0]) + end, + lists:seq(1, ?TEST_PROCS_COUNT)), + Self ! {test_info, Port, Testers}, + endless_flush(Port) + end), + + receive + {test_info, Port, Testers} -> + MaxWaitTime = round(0.7 * ?TC_TIMETRAP_SECONDS * 1000), + ct:log("wait testers, maximum ~w mcsec~n", [MaxWaitTime]), + ok = wait_testers(MaxWaitTime, Testers), + timer:sleep(?SLEEP_BEFORE_CHECK), + case erlang:port_command(Port, <<"test">>, [nosuspend]) of + false -> + exit(port_dead); + true -> + ok + end + end. + +wait_testers(Timeout, Testers) -> + lists:foldl( + fun(Pid, AccIn) -> + StartWait = os:timestamp(), + receive + {Pid, port_dead} -> + recalc_timeout(AccIn, StartWait) + after AccIn -> + Pid ! stop, + recalc_timeout(AccIn, StartWait) + end + end, Timeout, Testers), + ok. + +recalc_timeout(TimeoutIn, WaitStart) -> + erlang:max(0, TimeoutIn - round(timer:now_diff(os:timestamp(), WaitStart)) div 1000). + +open_port(ExePath) -> + erlang:open_port({spawn, ExePath ++ " 100"}, [{packet, 4}, eof, exit_status, use_stdio, binary]). + +run_loop(RootProc, Port, Block, CheckLimit, BusyCnt) -> + receive + stop -> + ok + after 0 -> + case erlang:port_command(Port, Block, [nosuspend]) of + true -> + run_loop(RootProc, Port, Block, CheckLimit, 0); + false -> + if + BusyCnt + 1 > CheckLimit -> + check_dead(RootProc, Port, Block, CheckLimit); + true -> + run_loop(RootProc, Port, Block, CheckLimit, BusyCnt + 1) + end + end + end. + +check_dead(RootProc, Port, Block, CheckLimit) -> + ct:log("~p: check port dead~n", [self()]), + timer:sleep(?SLEEP_BEFORE_CHECK), + case erlang:port_command(Port, Block, [nosuspend]) of + true -> + ct:log("not dead~n"), + run_loop(RootProc, Port, Block, CheckLimit, 0); + false -> + ct:log("port dead: ~p~n", [Port]), + RootProc ! {self(), port_dead}, + ok + end. + +endless_flush(Port) -> + receive + {Port, {data, _}} -> + endless_flush(Port); + {Port, SomethingWrong} -> + erlang:error({someting_wrong, SomethingWrong}) + end. diff --git a/erts/emulator/test/async_ports_SUITE_data/Makefile.src b/erts/emulator/test/async_ports_SUITE_data/Makefile.src new file mode 100644 index 0000000000..56da3fbe12 --- /dev/null +++ b/erts/emulator/test/async_ports_SUITE_data/Makefile.src @@ -0,0 +1,15 @@ +CC = @CC@ +LD = @LD@ +CFLAGS = @CFLAGS@ @DEFS@ +CROSSLDFLAGS = @CROSSLDFLAGS@ + +PROGS = cport@exe@ + + +all: $(PROGS) + +cport@exe@: cport@obj@ + $(LD) $(CROSSLDFLAGS) -o cport cport@obj@ @LIBS@ + +cport@obj@: cport.c + $(CC) -c -o cport@obj@ $(CFLAGS) cport.c diff --git a/erts/emulator/test/async_ports_SUITE_data/cport.c b/erts/emulator/test/async_ports_SUITE_data/cport.c new file mode 100644 index 0000000000..033aff382a --- /dev/null +++ b/erts/emulator/test/async_ports_SUITE_data/cport.c @@ -0,0 +1,81 @@ +#include <stdlib.h> +#include <stdio.h> +#include <errno.h> +#include <string.h> +#ifdef __WIN32__ +# include "windows.h" +# include "winbase.h" +#else +# include <unistd.h> +#endif + +typedef unsigned char byte; + +int read_cmd(byte *buf) +{ + int len; + if (read_exact(buf, 4) != 4) + return(-1); + + len = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]; + return read_exact(buf, len); +} + +int write_cmd(byte *buf, int len) +{ + byte li[4]; + li[0] = (len >> 24) & 0xff; + li[1] = (len >> 16) & 0xff; + li[2] = (len >> 8) & 0xff; + li[3] = len & 0xff; + write_exact(&li, 4); + + return write_exact(buf, len); +} + +int read_exact(byte *buf, int len) +{ + int i, got=0; + do { + if ((i = read(0, buf+got, len-got)) <= 0) + { + return(i); + } + got += i; + } while (got<len); + return len; +} + +int write_exact(byte *buf, int len) +{ + int i, wrote = 0; + do { + if ((i = write(1, buf+wrote, len-wrote)) < 0) + return (i); + wrote += i; + } while (wrote<len); + return len; +} + +byte static_buf[31457280]; // 30 mb + +int main(int argc, char **argv) { + int sleep_time = atoi(argv[1]); + int fn, arg, res; + byte *buf = &static_buf[0]; + int len = 0; + if (sleep_time <= 0) + sleep_time = 0; +#ifdef __WIN32__ + else + sleep_time = ((sleep_time - 1) / 1000) + 1; /* Milli seconds */ +#endif + while ((len = read_cmd(buf)) > 0) { +#ifdef __WIN32__ + Sleep((DWORD) sleep_time); +#else + usleep(sleep_time); +#endif + write_cmd(buf, len); + } +} diff --git a/erts/emulator/test/busy_port_SUITE.erl b/erts/emulator/test/busy_port_SUITE.erl index 4b4af0babe..2ed5aaa0d0 100644 --- a/erts/emulator/test/busy_port_SUITE.erl +++ b/erts/emulator/test/busy_port_SUITE.erl @@ -98,8 +98,10 @@ generator(0, Writer, _Data) -> %% Calling process_info(Pid, current_function) on a suspended process %% used to crash Beam. - {current_function, {erlang, send, 2}} = - process_info(Writer, current_function), + case process_info(Writer, [status,current_function]) of + [{status,suspended},{current_function,{erlang,send,2}}] -> ok; + [{status,suspended},{current_function,{erlang,bif_return_trap,_}}] -> ok + end, unlock_slave(); generator(N, Writer, Data) -> Writer ! {exec, Data}, diff --git a/erts/emulator/test/driver_SUITE.erl b/erts/emulator/test/driver_SUITE.erl index c62bc0c454..344bde7c91 100644 --- a/erts/emulator/test/driver_SUITE.erl +++ b/erts/emulator/test/driver_SUITE.erl @@ -1062,10 +1062,9 @@ otp_6602(Config) when is_list(Config) -> %% Inet driver use port locking... {ok, S} = gen_udp:open(0), {ok, Fd} = inet:getfd(S), - {ok, Port} = inet:port(S), %% Steal fd (lock checker used to %% trigger here). - {ok, _S2} = gen_udp:open(Port,[{fd,Fd}]), + {ok, _S2} = gen_udp:open(0,[{fd,Fd}]), Parent ! Done end), ?line receive Done -> ok end, diff --git a/erts/emulator/test/fun_SUITE.erl b/erts/emulator/test/fun_SUITE.erl index 8ad5f290ed..2968f5bebb 100644 --- a/erts/emulator/test/fun_SUITE.erl +++ b/erts/emulator/test/fun_SUITE.erl @@ -30,7 +30,7 @@ fun_to_port/1,t_hash/1,t_phash/1,t_phash2/1,md5/1, refc/1,refc_ets/1,refc_dist/1, const_propagation/1,t_arity/1,t_is_function2/1, - t_fun_info/1]). + t_fun_info/1,t_fun_info_mfa/1]). -export([nothing/0]). @@ -42,7 +42,8 @@ all() -> [bad_apply, bad_fun_call, badarity, ext_badarity, equality, ordering, fun_to_port, t_hash, t_phash, t_phash2, md5, refc, refc_ets, refc_dist, - const_propagation, t_arity, t_is_function2, t_fun_info]. + const_propagation, t_arity, t_is_function2, t_fun_info, + t_fun_info_mfa]. groups() -> []. @@ -824,6 +825,24 @@ t_fun_info(Config) when is_list(Config) -> ?line bad_info(<<1,2>>), ok. +t_fun_info_mfa(Config) when is_list(Config) -> + Fun1 = fun spawn_call/2, + {module,M1} = erlang:fun_info(Fun1, module), + {name,F1} = erlang:fun_info(Fun1, name), + {arity,A1} = erlang:fun_info(Fun1, arity), + {M1,F1,A1=2} = erlang:fun_info_mfa(Fun1), + %% Module fun. + Fun2 = fun ?MODULE:t_fun_info/1, + {module,M2} = erlang:fun_info(Fun2, module), + {name,F2} = erlang:fun_info(Fun2, name), + {arity,A2} = erlang:fun_info(Fun2, arity), + {M2,F2,A2=1} = erlang:fun_info_mfa(Fun2), + + %% Not fun. + {'EXIT',_} = (catch erlang:fun_info_mfa(id(d))), + ok. + + bad_info(Term) -> try erlang:fun_info(Term, module) of Any -> diff --git a/erts/emulator/test/nif_SUITE.erl b/erts/emulator/test/nif_SUITE.erl index b2da6f58af..14e6585220 100644 --- a/erts/emulator/test/nif_SUITE.erl +++ b/erts/emulator/test/nif_SUITE.erl @@ -37,7 +37,9 @@ threading/1, send/1, send2/1, send3/1, send_threaded/1, neg/1, is_checks/1, get_length/1, make_atom/1, make_string/1, reverse_list_test/1, - otp_9668/1, consume_timeslice/1, dirty_nif/1, dirty_nif_send/1 + otp_9828/1, + otp_9668/1, consume_timeslice/1, dirty_nif/1, dirty_nif_send/1, + dirty_nif_exception/1, nif_schedule/1 ]). -export([many_args_100/100]). @@ -64,7 +66,9 @@ all() -> resource_takeover, threading, send, send2, send3, send_threaded, neg, is_checks, get_length, make_atom, make_string,reverse_list_test, - otp_9668, consume_timeslice, dirty_nif, dirty_nif_send + otp_9828, + otp_9668, consume_timeslice, + nif_schedule, dirty_nif, dirty_nif_send, dirty_nif_exception ]. groups() -> @@ -1440,6 +1444,20 @@ otp_9668(Config) -> ?line verify_tmpmem(TmpMem), ok. +otp_9828(doc) -> ["Copy of writable binary"]; +otp_9828(Config) -> + ensure_lib_loaded(Config, 1), + + otp_9828_loop(<<"I'm alive!">>, 1000). + +otp_9828_loop(Bin, 0) -> + ok; +otp_9828_loop(Bin, Val) -> + WrtBin = <<Bin/binary, Val:32>>, + ok = otp_9828_nif(WrtBin), + otp_9828_loop(WrtBin, Val-1). + + consume_timeslice(Config) when is_list(Config) -> CONTEXT_REDS = 2000, Me = self(), @@ -1524,6 +1542,20 @@ consume_timeslice(Config) when is_list(Config) -> ok. +nif_schedule(Config) when is_list(Config) -> + ensure_lib_loaded(Config), + A = "this is a string", + B = {this,is,a,tuple}, + {B,A} = call_nif_schedule(A, B), + ok = try call_nif_schedule(1, 2) + catch + error:badarg -> + [{?MODULE,call_nif_schedule,[1,2],_}|_] = + erlang:get_stacktrace(), + ok + end, + ok. + dirty_nif(Config) when is_list(Config) -> try erlang:system_info(dirty_cpu_schedulers) of N when is_integer(N) -> @@ -1556,6 +1588,24 @@ dirty_nif_send(Config) when is_list(Config) -> {skipped,"No dirty scheduler support"} end. +dirty_nif_exception(Config) when is_list(Config) -> + try erlang:system_info(dirty_cpu_schedulers) of + N when is_integer(N) -> + ensure_lib_loaded(Config), + try + call_dirty_nif_exception(), + ?t:fail(expected_badarg) + catch + error:badarg -> + [{?MODULE,call_dirty_nif_exception,[],_}|_] = + erlang:get_stacktrace(), + ok + end + catch + error:badarg -> + {skipped,"No dirty scheduler support"} + end. + next_msg(_Pid) -> receive M -> M @@ -1684,9 +1734,12 @@ reverse_list(_) -> ?nif_stub. echo_int(_) -> ?nif_stub. type_sizes() -> ?nif_stub. otp_9668_nif(_) -> ?nif_stub. +otp_9828_nif(_) -> ?nif_stub. consume_timeslice_nif(_,_) -> ?nif_stub. +call_nif_schedule(_,_) -> ?nif_stub. call_dirty_nif(_,_,_) -> ?nif_stub. send_from_dirty_nif(_) -> ?nif_stub. +call_dirty_nif_exception() -> ?nif_stub. %% maps is_map_nif(_) -> ?nif_stub. diff --git a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c index 955dc64189..ff5fb8c5af 100644 --- a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c +++ b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c @@ -1473,6 +1473,26 @@ static ERL_NIF_TERM otp_9668_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM ar return atom_ok; } +static ERL_NIF_TERM otp_9828_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +{ + /* copy a writable binary could reallocate it due to "emasculation" + and thereby render a previous inspection invalid. + */ + ErlNifBinary bin1; + ErlNifEnv* myenv; + + if (!enif_inspect_binary(env, argv[0], &bin1)) { + return enif_make_badarg(env); + } + + myenv = enif_alloc_env(); + enif_make_copy(myenv, argv[0]); + enif_free_env(myenv); + + return memcmp(bin1.data, "I'm alive!", 10)==0 ? atom_ok : atom_false; +} + + static ERL_NIF_TERM consume_timeslice_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { int percent; @@ -1493,6 +1513,31 @@ static ERL_NIF_TERM consume_timeslice_nif(ErlNifEnv* env, int argc, const ERL_NI } } +static ERL_NIF_TERM nif_sched2(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +{ + char s[64]; + if (!enif_get_string(env, argv[2], s, sizeof s, ERL_NIF_LATIN1)) + return enif_make_badarg(env); + return enif_make_tuple2(env, argv[3], argv[2]); +} + +static ERL_NIF_TERM nif_sched1(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +{ + ERL_NIF_TERM new_argv[4]; + new_argv[0] = enif_make_atom(env, "garbage0"); + new_argv[1] = enif_make_atom(env, "garbage1"); + new_argv[2] = argv[0]; + new_argv[3] = argv[1]; + return enif_schedule_nif(env, "nif_sched2", 0, nif_sched2, 4, new_argv); +} + +static ERL_NIF_TERM call_nif_schedule(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +{ + if (argc != 2) + return enif_make_atom(env, "false"); + return enif_schedule_nif(env, "nif_sched1", 0, nif_sched1, argc, argv); +} + #ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT static ERL_NIF_TERM dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { @@ -1507,11 +1552,10 @@ static ERL_NIF_TERM dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[ enif_get_int(env, argv[0], &n); enif_get_string(env, argv[1], s, sizeof s, ERL_NIF_LATIN1); enif_inspect_binary(env, argv[2], &b); - result = enif_make_tuple3(env, - enif_make_int(env, n), - enif_make_string(env, s, ERL_NIF_LATIN1), - enif_make_binary(env, &b)); - return enif_schedule_dirty_nif_finalizer(env, result, enif_dirty_nif_finalizer); + return enif_make_tuple3(env, + enif_make_int(env, n), + enif_make_string(env, s, ERL_NIF_LATIN1), + enif_make_binary(env, &b)); } static ERL_NIF_TERM call_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) @@ -1526,7 +1570,7 @@ static ERL_NIF_TERM call_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM if (enif_get_int(env, argv[0], &n) && enif_get_string(env, argv[1], s, sizeof s, ERL_NIF_LATIN1) && enif_inspect_binary(env, argv[2], &b)) - return enif_schedule_dirty_nif(env, ERL_NIF_DIRTY_JOB_CPU_BOUND, dirty_nif, argc, argv); + return enif_schedule_nif(env, "call_dirty_nif", ERL_NIF_DIRTY_JOB_CPU_BOUND, dirty_nif, argc, argv); else return enif_make_badarg(env); } else { @@ -1534,35 +1578,42 @@ static ERL_NIF_TERM call_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM } } -static ERL_NIF_TERM dirty_sender(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +static ERL_NIF_TERM send_from_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { ERL_NIF_TERM result; ErlNifPid pid; ErlNifEnv* menv; int res; - enif_get_local_pid(env, argv[0], &pid); + if (!enif_get_local_pid(env, argv[0], &pid)) + return enif_make_badarg(env); result = enif_make_tuple2(env, enif_make_atom(env, "ok"), enif_make_pid(env, &pid)); menv = enif_alloc_env(); res = enif_send(env, &pid, menv, result); enif_free_env(menv); if (!res) - /* Note the next line will crash, since dirty nifs can't return exceptions. - * This is intentional, since enif_send should not fail if the test succeeds. - */ - return enif_schedule_dirty_nif_finalizer(env, enif_make_badarg(env), enif_dirty_nif_finalizer); + return enif_make_badarg(env); else - return enif_schedule_dirty_nif_finalizer(env, result, enif_dirty_nif_finalizer); + return result; } -static ERL_NIF_TERM send_from_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +static ERL_NIF_TERM call_dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { - ERL_NIF_TERM result; - ErlNifPid pid; - - if (!enif_get_local_pid(env, argv[0], &pid)) + switch (argc) { + case 0: { + ERL_NIF_TERM args[255]; + int i; + for (i = 0; i < 255; i++) + args[i] = enif_make_int(env, i); + return enif_schedule_nif(env, "call_dirty_nif_exception", ERL_NIF_DIRTY_JOB_CPU_BOUND, + call_dirty_nif_exception, 255, argv); + } + case 1: return enif_make_badarg(env); - return enif_schedule_dirty_nif(env, ERL_NIF_DIRTY_JOB_CPU_BOUND, dirty_sender, argc, argv); + default: + return enif_schedule_nif(env, "call_dirty_nif_exception", ERL_NIF_DIRTY_JOB_CPU_BOUND, + call_dirty_nif_exception, argc-1, argv); + } } #endif @@ -1741,10 +1792,13 @@ static ErlNifFunc nif_funcs[] = {"echo_int", 1, echo_int}, {"type_sizes", 0, type_sizes}, {"otp_9668_nif", 1, otp_9668_nif}, + {"otp_9828_nif", 1, otp_9828_nif}, {"consume_timeslice_nif", 2, consume_timeslice_nif}, + {"call_nif_schedule", 2, call_nif_schedule}, #ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT {"call_dirty_nif", 3, call_dirty_nif}, - {"send_from_dirty_nif", 1, send_from_dirty_nif}, + {"send_from_dirty_nif", 1, send_from_dirty_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND}, + {"call_dirty_nif_exception", 0, call_dirty_nif_exception, ERL_NIF_DIRTY_JOB_IO_BOUND}, #endif {"is_map_nif", 1, is_map_nif}, {"get_map_size_nif", 1, get_map_size_nif}, diff --git a/erts/emulator/test/num_bif_SUITE.erl b/erts/emulator/test/num_bif_SUITE.erl index ff8d18eef8..8cf8377c30 100644 --- a/erts/emulator/test/num_bif_SUITE.erl +++ b/erts/emulator/test/num_bif_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2013. All Rights Reserved. +%% Copyright Ericsson AB 1997-2014. All Rights Reserved. %% %% The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in @@ -394,18 +394,15 @@ t_string_to_integer(Config) when is_list(Config) -> test_sti(268435455), test_sti(-268435455), - %% 1 bsl 28 - 1, just before 32 bit bignum - test_sti(1 bsl 28 - 1), - %% 1 bsl 28, just beyond 32 bit small - test_sti(1 bsl 28), - %% 1 bsl 33, just beyond 32 bit - test_sti(1 bsl 33), - %% 1 bsl 60 - 1, just before 64 bit bignum - test_sti(1 bsl 60 - 1), - %% 1 bsl 60, just beyond 64 bit small - test_sti(1 bsl 60), - %% 1 bsl 65, just beyond 64 bit - test_sti(1 bsl 65), + % Interesting values around 2-pows, such as MIN_SMALL and MAX_SMALL. + lists:foreach(fun(Bits) -> + N = 1 bsl Bits, + test_sti(N - 1), + test_sti(N), + test_sti(N + 1) + end, + lists:seq(16, 130)), + %% Bignums. test_sti(123456932798748738738,16), test_sti(list_to_integer(lists:duplicate(2000, $1))), @@ -454,10 +451,11 @@ test_sti(Num) -> end|| Base <- lists:seq(2,36)]. test_sti(Num,Base) -> - Num = list_to_integer(int2list(Num,Base),Base), - Num = -1*list_to_integer(int2list(Num*-1,Base),Base), - Num = binary_to_integer(int2bin(Num,Base),Base), - Num = -1*binary_to_integer(int2bin(Num*-1,Base),Base). + Neg = -Num, + Num = list_to_integer(int2list(Num,Base),Base), + Neg = list_to_integer(int2list(Num*-1,Base),Base), + Num = binary_to_integer(int2bin(Num,Base),Base), + Neg = binary_to_integer(int2bin(Num*-1,Base),Base). % Calling this function (which is not supposed to be inlined) prevents % the compiler from calculating the answer, so we don't test the compiler |