diff options
Diffstat (limited to 'erts/emulator/beam')
120 files changed, 10468 insertions, 15876 deletions
diff --git a/erts/emulator/beam/arith_instrs.tab b/erts/emulator/beam/arith_instrs.tab new file mode 100644 index 0000000000..b828e86788 --- /dev/null +++ b/erts/emulator/beam/arith_instrs.tab @@ -0,0 +1,396 @@ +// -*- c -*- +// +// %CopyrightBegin% +// +// Copyright Ericsson AB 2017. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// %CopyrightEnd% +// + +OUTLINED_ARITH_2(Fail, Live, Name, BIF, Op1, Op2, Dst) { + Eterm result; + Uint live = $Live; + HEAVY_SWAPOUT; + reg[live] = $Op1; + reg[live+1] = $Op2; + result = erts_gc_$Name (c_p, reg, live); + HEAVY_SWAPIN; + ERTS_HOLE_CHECK(c_p); + if (ERTS_LIKELY(is_value(result))) { + $REFRESH_GEN_DEST(); + $Dst = result; + $NEXT0(); + } + $BIF_ERROR_ARITY_2($Fail, $BIF, reg[live], reg[live+1]); +} + + +i_plus := plus.fetch.execute; + +plus.head() { + Eterm PlusOp1, PlusOp2; +} + +plus.fetch(Op1, Op2) { + PlusOp1 = $Op1; + PlusOp2 = $Op2; +} + +plus.execute(Fail, Live, Dst) { + if (ERTS_LIKELY(is_both_small(PlusOp1, PlusOp2))) { + Sint i = signed_val(PlusOp1) + signed_val(PlusOp2); + if (ERTS_LIKELY(IS_SSMALL(i))) { + $Dst = make_small(i); + $NEXT0(); + } + } + $OUTLINED_ARITH_2($Fail, $Live, mixed_plus, BIF_splus_2, PlusOp1, PlusOp2, $Dst); +} + +i_minus := minus.fetch.execute; + +minus.head() { + Eterm MinusOp1, MinusOp2; +} + +minus.fetch(Op1, Op2) { + MinusOp1 = $Op1; + MinusOp2 = $Op2; +} + +minus.execute(Fail, Live, Dst) { + if (ERTS_LIKELY(is_both_small(MinusOp1, MinusOp2))) { + Sint i = signed_val(MinusOp1) - signed_val(MinusOp2); + if (ERTS_LIKELY(IS_SSMALL(i))) { + $Dst = make_small(i); + $NEXT0(); + } + } + $OUTLINED_ARITH_2($Fail, $Live, mixed_minus, BIF_sminus_2, MinusOp1, MinusOp2, $Dst); +} + +i_increment := increment.fetch.execute; + +increment.head() { + Eterm increment_reg_val; +} + +increment.fetch(Src) { + increment_reg_val = $Src; +} + +increment.execute(IncrementVal, Live, Dst) { + Eterm increment_val = $IncrementVal; + Uint live; + Eterm result; + + if (ERTS_LIKELY(is_small(increment_reg_val))) { + Sint i = signed_val(increment_reg_val) + increment_val; + if (ERTS_LIKELY(IS_SSMALL(i))) { + $Dst = make_small(i); + $NEXT0(); + } + } + live = $Live; + HEAVY_SWAPOUT; + reg[live] = increment_reg_val; + reg[live+1] = make_small(increment_val); + result = erts_gc_mixed_plus(c_p, reg, live); + HEAVY_SWAPIN; + ERTS_HOLE_CHECK(c_p); + if (ERTS_LIKELY(is_value(result))) { + $REFRESH_GEN_DEST(); + $Dst = result; + $NEXT0(); + } + ASSERT(c_p->freason != BADMATCH || is_value(c_p->fvalue)); + goto find_func_info; +} + +i_times(Fail, Live, Op1, Op2, Dst) { + Eterm op1 = $Op1; + Eterm op2 = $Op2; + $OUTLINED_ARITH_2($Fail, $Live, mixed_times, BIF_stimes_2, op1, op2, $Dst); +} + +i_m_div(Fail, Live, Op1, Op2, Dst) { + Eterm op1 = $Op1; + Eterm op2 = $Op2; + $OUTLINED_ARITH_2($Fail, $Live, mixed_div, BIF_div_2, op1, op2, $Dst); +} + +i_int_div(Fail, Live, Op1, Op2, Dst) { + Eterm op1 = $Op1; + Eterm op2 = $Op2; + if (ERTS_UNLIKELY(op2 == SMALL_ZERO)) { + c_p->freason = BADARITH; + $BIF_ERROR_ARITY_2($Fail, BIF_intdiv_2, op1, op2); + } else if (ERTS_LIKELY(is_both_small(op1, op2))) { + Sint ires = signed_val(op1) / signed_val(op2); + if (ERTS_LIKELY(IS_SSMALL(ires))) { + $Dst = make_small(ires); + $NEXT0(); + } + } + $OUTLINED_ARITH_2($Fail, $Live, int_div, BIF_intdiv_2, op1, op2, $Dst); +} + +i_rem := rem.fetch.execute; + +rem.head() { + Eterm RemOp1, RemOp2; +} + +rem.fetch(Src1, Src2) { + RemOp1 = $Src1; + RemOp2 = $Src2; +} + +rem.execute(Fail, Live, Dst) { + if (ERTS_UNLIKELY(RemOp2 == SMALL_ZERO)) { + c_p->freason = BADARITH; + $BIF_ERROR_ARITY_2($Fail, BIF_rem_2, RemOp1, RemOp2); + } else if (ERTS_LIKELY(is_both_small(RemOp1, RemOp2))) { + $Dst = make_small(signed_val(RemOp1) % signed_val(RemOp2)); + $NEXT0(); + } else { + $OUTLINED_ARITH_2($Fail, $Live, int_rem, BIF_rem_2, RemOp1, RemOp2, $Dst); + } +} + +i_band := band.fetch.execute; + +band.head() { + Eterm BandOp1, BandOp2; +} + +band.fetch(Src1, Src2) { + BandOp1 = $Src1; + BandOp2 = $Src2; +} + +band.execute(Fail, Live, Dst) { + if (ERTS_LIKELY(is_both_small(BandOp1, BandOp2))) { + /* + * No need to untag -- TAG & TAG == TAG. + */ + $Dst = BandOp1 & BandOp2; + $NEXT0(); + } + $OUTLINED_ARITH_2($Fail, $Live, band, BIF_band_2, BandOp1, BandOp2, $Dst); +} + +i_bor(Fail, Live, Src1, Src2, Dst) { + if (ERTS_LIKELY(is_both_small($Src1, $Src2))) { + /* + * No need to untag -- TAG | TAG == TAG. + */ + $Dst = $Src1 | $Src2; + $NEXT0(); + } + $OUTLINED_ARITH_2($Fail, $Live, bor, BIF_bor_2, $Src1, $Src2, $Dst); +} + +i_bxor(Fail, Live, Src1, Src2, Dst) { + if (ERTS_LIKELY(is_both_small($Src1, $Src2))) { + /* + * TAG ^ TAG == 0. + * + * Therefore, we perform the XOR operation on the tagged values, + * and OR in the tag bits. + */ + $Dst = ($Src1 ^ $Src2) | make_small(0); + $NEXT0(); + } + $OUTLINED_ARITH_2($Fail, $Live, bxor, BIF_bxor_2, $Src1, $Src2, $Dst); +} + +i_bsl := shift.setup_bsl.execute; +i_bsr := shift.setup_bsr.execute; + +shift.head() { + Eterm Op1, Op2; + Sint shift_left_count; +} + +shift.setup_bsr(Src1, Src2) { + Op1 = $Src1; + Op2 = $Src2; + shift_left_count = 0; + if (ERTS_LIKELY(is_small(Op2))) { + shift_left_count = -signed_val(Op2); + } else if (is_big(Op2)) { + /* + * N bsr NegativeBigNum == N bsl MAX_SMALL + * N bsr PositiveBigNum == N bsl MIN_SMALL + */ + shift_left_count = make_small(bignum_header_is_neg(*big_val(Op2)) ? + MAX_SMALL : MIN_SMALL); + } +} + +shift.setup_bsl(Src1, Src2) { + Op1 = $Src1; + Op2 = $Src2; + shift_left_count = 0; + if (ERTS_LIKELY(is_small(Op2))) { + shift_left_count = signed_val(Op2); + } else if (is_big(Op2)) { + if (bignum_header_is_neg(*big_val(Op2))) { + /* + * N bsl NegativeBigNum is either 0 or -1, depending on + * the sign of N. Since we don't believe this case + * is common, do the calculation with the minimum + * amount of code. + */ + shift_left_count = MIN_SMALL; + } else if (is_integer(Op1)) { + /* + * N bsl PositiveBigNum is too large to represent. + */ + shift_left_count = MAX_SMALL; + } + } +} + +shift.execute(Fail, Live, Dst) { + Uint big_words_needed; + + if (ERTS_LIKELY(is_small(Op1))) { + Sint int_res = signed_val(Op1); + if (ERTS_UNLIKELY(shift_left_count == 0 || int_res == 0)) { + if (ERTS_UNLIKELY(is_not_integer(Op2))) { + goto shift_error; + } + if (int_res == 0) { + $Dst = Op1; + $NEXT0(); + } + } else if (shift_left_count < 0) { /* Right shift */ + Eterm bsr_res; + shift_left_count = -shift_left_count; + if (shift_left_count >= SMALL_BITS-1) { + bsr_res = (int_res < 0) ? SMALL_MINUS_ONE : SMALL_ZERO; + } else { + bsr_res = make_small(int_res >> shift_left_count); + } + $Dst = bsr_res; + $NEXT0(); + } else if (shift_left_count < SMALL_BITS-1) { /* Left shift */ + if ((int_res > 0 && + ((~(Uint)0 << ((SMALL_BITS-1)-shift_left_count)) & int_res) == 0) || + ((~(Uint)0 << ((SMALL_BITS-1)-shift_left_count)) & ~int_res) == 0) { + $Dst = make_small(int_res << shift_left_count); + $NEXT0(); + } + } + big_words_needed = 1; /* big_size(small_to_big(Op1)) */ + goto big_shift; + } else if (is_big(Op1)) { + if (shift_left_count == 0) { + if (is_not_integer(Op2)) { + goto shift_error; + } + $Dst = Op1; + $NEXT0(); + } + big_words_needed = big_size(Op1); + + big_shift: + if (shift_left_count > 0) { /* Left shift. */ + big_words_needed += (shift_left_count / D_EXP); + } else { /* Right shift. */ + if (big_words_needed <= (-shift_left_count / D_EXP)) { + big_words_needed = 3; /* ??? */ + } else { + big_words_needed -= (-shift_left_count / D_EXP); + } + } + { + Eterm tmp_big[2]; + Sint big_need_size = BIG_NEED_SIZE(big_words_needed+1); + + /* + * Slightly conservative check the size to avoid + * allocating huge amounts of memory for bignums that + * clearly would overflow the arity in the header + * word. + */ + if (big_need_size-8 > BIG_ARITY_MAX) { + $SYSTEM_LIMIT($Fail); + } + $GC_TEST_PRESERVE(big_need_size+1, $Live, Op1); + if (is_small(Op1)) { + Op1 = small_to_big(signed_val(Op1), tmp_big); + } + Op1 = big_lshift(Op1, shift_left_count, HTOP); + if (is_big(Op1)) { + HTOP += bignum_header_arity(*HTOP) + 1; + } + HEAP_SPACE_VERIFIED(0); + if (ERTS_UNLIKELY(is_nil(Op1))) { + /* + * This result must have been only slighty larger + * than allowed since it wasn't caught by the + * previous test. + */ + $SYSTEM_LIMIT($Fail); + } + ERTS_HOLE_CHECK(c_p); + $REFRESH_GEN_DEST(); + $Dst = Op1; + $NEXT0(); + } + } + + /* + * One or more non-integer arguments. + */ + shift_error: + c_p->freason = BADARITH; + if ($Fail) { + $FAIL($Fail); + } else { + reg[0] = Op1; + reg[1] = Op2; + SWAPOUT; + if (IsOpCode(I[0], i_bsl_ssjtd)) { + I = handle_error(c_p, I, reg, &bif_export[BIF_bsl_2]->info.mfa); + } else { + ASSERT(IsOpCode(I[0], i_bsr_ssjtd)); + I = handle_error(c_p, I, reg, &bif_export[BIF_bsr_2]->info.mfa); + } + goto post_error_handling; + } +} + +i_int_bnot(Fail, Src, Live, Dst) { + Eterm bnot_val = $Src; + if (ERTS_LIKELY(is_small(bnot_val))) { + bnot_val = make_small(~signed_val(bnot_val)); + } else { + Uint live = $Live; + HEAVY_SWAPOUT; + reg[live] = bnot_val; + bnot_val = erts_gc_bnot(c_p, reg, live); + HEAVY_SWAPIN; + ERTS_HOLE_CHECK(c_p); + if (ERTS_UNLIKELY(is_nil(bnot_val))) { + $BIF_ERROR_ARITY_1($Fail, BIF_bnot_1, reg[live]); + } + $REFRESH_GEN_DEST(); + } + $Dst = bnot_val; +} diff --git a/erts/emulator/beam/atom.c b/erts/emulator/beam/atom.c index 38e02c386f..bbe1cb3e11 100644 --- a/erts/emulator/beam/atom.c +++ b/erts/emulator/beam/atom.c @@ -34,20 +34,18 @@ IndexTable erts_atom_table; /* The index table */ -#include "erl_smp.h" +static erts_rwmtx_t atom_table_lock; -static erts_smp_rwmtx_t atom_table_lock; - -#define atom_read_lock() erts_smp_rwmtx_rlock(&atom_table_lock) -#define atom_read_unlock() erts_smp_rwmtx_runlock(&atom_table_lock) -#define atom_write_lock() erts_smp_rwmtx_rwlock(&atom_table_lock) -#define atom_write_unlock() erts_smp_rwmtx_rwunlock(&atom_table_lock) +#define atom_read_lock() erts_rwmtx_rlock(&atom_table_lock) +#define atom_read_unlock() erts_rwmtx_runlock(&atom_table_lock) +#define atom_write_lock() erts_rwmtx_rwlock(&atom_table_lock) +#define atom_write_unlock() erts_rwmtx_rwunlock(&atom_table_lock) #if 0 #define ERTS_ATOM_PUT_OPS_STAT #endif #ifdef ERTS_ATOM_PUT_OPS_STAT -static erts_smp_atomic_t atom_put_ops; +static erts_atomic_t atom_put_ops; #endif /* Functions for allocating space for the ext of atoms. We do not @@ -76,7 +74,7 @@ void atom_info(fmtfn_t to, void *to_arg) index_info(to, to_arg, &erts_atom_table); #ifdef ERTS_ATOM_PUT_OPS_STAT erts_print(to, to_arg, "atom_put_ops: %ld\n", - erts_smp_atomic_read_nob(&atom_put_ops)); + erts_atomic_read_nob(&atom_put_ops)); #endif if (lock) @@ -246,7 +244,7 @@ erts_atom_put_index(const byte *name, int len, ErtsAtomEncoding enc, int trunc) int aix; #ifdef ERTS_ATOM_PUT_OPS_STAT - erts_smp_atomic_inc_nob(&atom_put_ops); + erts_atomic_inc_nob(&atom_put_ops); #endif if (tlen < 0) { @@ -359,32 +357,24 @@ am_atom_put(const char* name, int len) int atom_table_size(void) { int ret; -#ifdef ERTS_SMP int lock = !ERTS_IS_CRASH_DUMPING; if (lock) atom_read_lock(); -#endif ret = erts_atom_table.entries; -#ifdef ERTS_SMP if (lock) atom_read_unlock(); -#endif return ret; } int atom_table_sz(void) { int ret; -#ifdef ERTS_SMP int lock = !ERTS_IS_CRASH_DUMPING; if (lock) atom_read_lock(); -#endif ret = index_table_sz(&erts_atom_table); -#ifdef ERTS_SMP if (lock) atom_read_unlock(); -#endif return ret; } @@ -412,19 +402,15 @@ erts_atom_get(const char *name, int len, Eterm* ap, ErtsAtomEncoding enc) void erts_atom_get_text_space_sizes(Uint *reserved, Uint *used) { -#ifdef ERTS_SMP int lock = !ERTS_IS_CRASH_DUMPING; if (lock) atom_read_lock(); -#endif if (reserved) *reserved = reserved_atom_space; if (used) *used = atom_space; -#ifdef ERTS_SMP if (lock) atom_read_unlock(); -#endif } void @@ -433,16 +419,16 @@ init_atom_table(void) HashFunctions f; int i; Atom a; - erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; + erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER; - rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; - rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED; + rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ; + rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED; #ifdef ERTS_ATOM_PUT_OPS_STAT - erts_smp_atomic_init_nob(&atom_put_ops, 0); + erts_atomic_init_nob(&atom_put_ops, 0); #endif - erts_smp_rwmtx_init_opt(&atom_table_lock, &rwmtx_opt, "atom_tab", NIL, + erts_rwmtx_init_opt(&atom_table_lock, &rwmtx_opt, "atom_tab", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); f.hash = (H_FUN) atom_hash; @@ -505,4 +491,4 @@ Uint erts_get_atom_limit(void) { return erts_atom_table.limit; -}
\ No newline at end of file +} diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names index a44d23b181..fc55b687d4 100644 --- a/erts/emulator/beam/atom.names +++ b/erts/emulator/beam/atom.names @@ -217,6 +217,8 @@ atom discard atom display_items atom dist atom dist_cmd +atom dist_ctrl_put_data +atom dist_data atom Div='/' atom div atom dlink diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c index 023ee3ef4b..00822d1415 100644 --- a/erts/emulator/beam/beam_bif_load.c +++ b/erts/emulator/beam/beam_bif_load.c @@ -50,7 +50,7 @@ static struct { Eterm module; - erts_smp_mtx_t mtx; + erts_mtx_t mtx; Export *pending_purge_lambda; Eterm *sprocs; Eterm def_sprocs[10]; @@ -65,12 +65,10 @@ static struct { Process *erts_code_purger = NULL; -#ifdef ERTS_DIRTY_SCHEDULERS Process *erts_dirty_process_code_checker; -#endif -erts_smp_atomic_t erts_copy_literal_area__; +erts_atomic_t erts_copy_literal_area__; #define ERTS_SET_COPY_LITERAL_AREA(LA) \ - erts_smp_atomic_set_nob(&erts_copy_literal_area__, \ + erts_atomic_set_nob(&erts_copy_literal_area__, \ (erts_aint_t) (LA)) Process *erts_literal_area_collector = NULL; @@ -81,7 +79,7 @@ struct ErtsLiteralAreaRef_ { }; struct { - erts_smp_mtx_t mtx; + erts_mtx_t mtx; ErtsLiteralAreaRef *first; ErtsLiteralAreaRef *last; } release_literal_areas; @@ -97,7 +95,7 @@ init_purge_state(void) { purge_state.module = THE_NON_VALUE; - erts_smp_mtx_init(&purge_state.mtx, "purge_state", NIL, + erts_mtx_init(&purge_state.mtx, "purge_state", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); purge_state.pending_purge_lambda = @@ -119,12 +117,12 @@ init_purge_state(void) void erts_beam_bif_load_init(void) { - erts_smp_mtx_init(&release_literal_areas.mtx, "release_literal_areas", NIL, + erts_mtx_init(&release_literal_areas.mtx, "release_literal_areas", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); release_literal_areas.first = NULL; release_literal_areas.last = NULL; - erts_smp_atomic_init_nob(&erts_copy_literal_area__, + erts_atomic_init_nob(&erts_copy_literal_area__, (erts_aint_t) NULL); init_purge_state(); @@ -172,8 +170,8 @@ BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3) BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3); } - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); modp = erts_get_module(mod, erts_active_code_ix()); @@ -197,8 +195,8 @@ BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3) else { erts_abort_staging_code_ix(); } - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); return res; #endif @@ -265,7 +263,6 @@ struct m { }; static Eterm staging_epilogue(Process* c_p, int, Eterm res, int, struct m*, int, int); -#ifdef ERTS_SMP static void smp_code_ix_commiter(void*); static struct /* Protected by code_write_permission */ @@ -273,7 +270,6 @@ static struct /* Protected by code_write_permission */ Process* stager; ErtsThrPrgrLaterOp lop; } committer_state; -#endif static Eterm exception_list(Process* p, Eterm tag, struct m* mp, Sint exceptions) @@ -401,8 +397,8 @@ finish_loading_1(BIF_ALIST_1) erts_is_default_trace_enabled() || IF_HIPE(hipe_need_blocking(p[i].modp))) { /* tracing or hipe need thread blocking */ - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); is_blocking = 1; break; } @@ -465,9 +461,7 @@ static Eterm staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking, struct m* mods, int nmods, int free_mods) { -#ifdef ERTS_SMP if (is_blocking || !commit) -#endif { if (commit) { int i; @@ -491,13 +485,12 @@ staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking, erts_free(ERTS_ALC_T_LOADER_TMP, mods); } if (is_blocking) { - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } erts_release_code_write_permission(); return res; } -#ifdef ERTS_SMP else { ASSERT(is_value(res)); @@ -522,11 +515,9 @@ staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking, */ ERTS_BIF_YIELD_RETURN(c_p, res); } -#endif } -#ifdef ERTS_SMP static void smp_code_ix_commiter(void* null) { Process* p = committer_state.stager; @@ -536,14 +527,13 @@ static void smp_code_ix_commiter(void* null) committer_state.stager = NULL; #endif erts_release_code_write_permission(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); if (!ERTS_PROC_IS_EXITING(p)) { erts_resume(p, ERTS_PROC_LOCK_STATUS); } - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); erts_proc_dec_refc(p); } -#endif /* ERTS_SMP */ @@ -613,9 +603,6 @@ badarg: BIF_RETTYPE erts_internal_check_dirty_process_code_2(BIF_ALIST_2) { -#if !defined(ERTS_DIRTY_SCHEDULERS) - BIF_ERROR(BIF_P, EXC_NOTSUP); -#else Process *rp; int reds = 0; Eterm res; @@ -640,12 +627,11 @@ BIF_RETTYPE erts_internal_check_dirty_process_code_2(BIF_ALIST_2) res = erts_check_process_code(rp, BIF_ARG_2, &reds, BIF_P->fcalls); if (BIF_P != rp) - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); ASSERT(is_value(res)); BIF_RET2(res, reds); -#endif } BIF_RETTYPE delete_module_1(BIF_ALIST_1) @@ -683,8 +669,8 @@ BIF_RETTYPE delete_module_1(BIF_ALIST_1) modp->curr.num_traced_exports > 0 || IF_HIPE(hipe_need_blocking(modp))) { /* tracing or hipe need to go single threaded */ - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); is_blocking = 1; if (modp->curr.num_breakpoints) { erts_clear_module_break(modp); @@ -789,16 +775,16 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2) /* ToDo: Use code_ix staging instead of thread blocking */ - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); code_ix = erts_active_code_ix(); modp = erts_get_module(BIF_ARG_1, code_ix); if (!modp || !modp->on_load || !modp->on_load->code_hdr) { error: - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); BIF_ERROR(BIF_P, BADARG); } @@ -837,11 +823,11 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2) ep->beam[1] = 0; } else { if (ep->addressv[code_ix] == ep->beam && - ep->beam[0] == (BeamInstr) em_apply_bif) { + BeamIsOpCode(ep->beam[0], op_apply_bif)) { continue; } - ep->addressv[code_ix] = ep->beam; - ep->beam[0] = (BeamInstr) em_call_error_handler; + ep->addressv[code_ix] = ep->beam; + ep->beam[0] = BeamOpCodeAddr(op_call_error_handler); } } modp->curr.code_hdr->on_load_function_ptr = NULL; @@ -863,14 +849,14 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2) if (ep == NULL || ep->info.mfa.module != BIF_ARG_1) { continue; } - if (ep->beam[0] == (BeamInstr) em_apply_bif) { + if (BeamIsOpCode(ep->beam[0], op_apply_bif)) { continue; } ep->beam[1] = 0; } } - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); BIF_RET(am_true); } @@ -931,9 +917,9 @@ erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed * any other heap than the message it self. */ - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ); - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ); + ERTS_MSGQ_MV_INQ2PRIVQ(c_p); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ); for (msgp = c_p->msg.first; msgp; msgp = msgp->next) { ErlHeapFragment *hf; @@ -1063,10 +1049,8 @@ erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed return_ok: -#ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p))) c_p->flags &= ~F_DIRTY_CLA; -#endif return am_ok; @@ -1081,10 +1065,8 @@ literal_gc: *redsp += erts_garbage_collect_literals(c_p, (Eterm *) literals, lit_bsize, oh, fcalls); -#ifdef ERTS_DIRTY_SCHEDULERS if (c_p->flags & F_DIRTY_CLA) return THE_NON_VALUE; -#endif return am_ok; } @@ -1314,7 +1296,6 @@ hfrag_literal_copy(Eterm **hpp, ErlOffHeap *ohp, } } -#ifdef ERTS_SMP ErtsThrPrgrLaterOp later_literal_area_switch; @@ -1336,13 +1317,12 @@ static void complete_literal_area_switch(void *literal_area) { Process *p = erts_literal_area_collector; - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); erts_resume(p, ERTS_PROC_LOCK_STATUS); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); if (literal_area) erts_release_literal_area((ErtsLiteralArea *) literal_area); } -#endif BIF_RETTYPE erts_internal_release_literal_area_switch_0(BIF_ALIST_0) { @@ -1352,7 +1332,7 @@ BIF_RETTYPE erts_internal_release_literal_area_switch_0(BIF_ALIST_0) if (BIF_P != erts_literal_area_collector) BIF_ERROR(BIF_P, EXC_NOTSUP); - erts_smp_mtx_lock(&release_literal_areas.mtx); + erts_mtx_lock(&release_literal_areas.mtx); la_ref = release_literal_areas.first; if (la_ref) { @@ -1361,14 +1341,13 @@ BIF_RETTYPE erts_internal_release_literal_area_switch_0(BIF_ALIST_0) release_literal_areas.last = NULL; } - erts_smp_mtx_unlock(&release_literal_areas.mtx); + erts_mtx_unlock(&release_literal_areas.mtx); unused_la = ERTS_COPY_LITERAL_AREA(); if (!la_ref) { ERTS_SET_COPY_LITERAL_AREA(NULL); if (unused_la) { -#ifdef ERTS_SMP ErtsLaterReleasLiteralArea *lrlap; lrlap = erts_alloc(ERTS_ALC_T_RELEASE_LAREA, sizeof(ErtsLaterReleasLiteralArea)); @@ -1382,9 +1361,6 @@ BIF_RETTYPE erts_internal_release_literal_area_switch_0(BIF_ALIST_0) + ((unused_la->end - &unused_la->start[0]) - 1)*(sizeof(Eterm)))); -#else - erts_release_literal_area(unused_la); -#endif } BIF_RET(am_false); } @@ -1393,16 +1369,11 @@ BIF_RETTYPE erts_internal_release_literal_area_switch_0(BIF_ALIST_0) erts_free(ERTS_ALC_T_LITERAL_REF, la_ref); -#ifdef ERTS_SMP erts_schedule_thr_prgr_later_op(complete_literal_area_switch, unused_la, &later_literal_area_switch); erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); ERTS_BIF_YIELD_RETURN(BIF_P, am_true); -#else - erts_release_literal_area(unused_la); - BIF_RET(am_true); -#endif } @@ -1428,7 +1399,7 @@ erts_purge_state_add_fun(ErlFunEntry *fe) Export * erts_suspend_process_on_pending_purge_lambda(Process *c_p, ErlFunEntry* fe) { - erts_smp_mtx_lock(&purge_state.mtx); + erts_mtx_lock(&purge_state.mtx); if (purge_state.module == fe->module) { /* * The process c_p is about to call a fun in the code @@ -1454,7 +1425,7 @@ erts_suspend_process_on_pending_purge_lambda(Process *c_p, ErlFunEntry* fe) erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL); ERTS_VBUMP_ALL_REDS(c_p); } - erts_smp_mtx_unlock(&purge_state.mtx); + erts_mtx_unlock(&purge_state.mtx); return purge_state.pending_purge_lambda; } @@ -1464,9 +1435,9 @@ finalize_purge_operation(Process *c_p, int succeded) Uint ix; if (c_p) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); - erts_smp_mtx_lock(&purge_state.mtx); + erts_mtx_lock(&purge_state.mtx); ASSERT(purge_state.module != THE_NON_VALUE); @@ -1482,14 +1453,14 @@ finalize_purge_operation(Process *c_p, int succeded) ERTS_PROC_LOCK_STATUS); if (rp) { erts_resume(rp, ERTS_PROC_LOCK_STATUS); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); } } - erts_smp_mtx_unlock(&purge_state.mtx); + erts_mtx_unlock(&purge_state.mtx); if (c_p) - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); if (purge_state.sprocs != &purge_state.def_sprocs[0]) { erts_free(ERTS_ALC_T_PURGE_DATA, purge_state.sprocs); @@ -1508,7 +1479,6 @@ finalize_purge_operation(Process *c_p, int succeded) purge_state.fe_ix = 0; } -#ifdef ERTS_SMP static ErtsThrPrgrLaterOp purger_lop_data; @@ -1516,9 +1486,9 @@ static void resume_purger(void *unused) { Process *p = erts_code_purger; - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); erts_resume(p, ERTS_PROC_LOCK_STATUS); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); } static void @@ -1531,7 +1501,6 @@ finalize_purge_abort(void *unused) resume_purger(NULL); } -#endif /* ERTS_SMP */ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) { @@ -1590,9 +1559,9 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) else { BeamInstr* code; BeamInstr* end; - erts_smp_mtx_lock(&purge_state.mtx); + erts_mtx_lock(&purge_state.mtx); purge_state.module = BIF_ARG_1; - erts_smp_mtx_unlock(&purge_state.mtx); + erts_mtx_unlock(&purge_state.mtx); res = am_true; code = (BeamInstr*) modp->old.code_hdr; end = (BeamInstr *)((char *)code + modp->old.code_length); @@ -1606,9 +1575,6 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) } } -#ifndef ERTS_SMP - BIF_RET(res); -#else if (res != am_true) BIF_RET(res); else { @@ -1627,7 +1593,6 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); ERTS_BIF_YIELD_RETURN(BIF_P, am_true); } -#endif } case am_abort: { @@ -1641,11 +1606,6 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) erts_fun_purge_abort_prepare(purge_state.funs, purge_state.fe_ix); -#ifndef ERTS_SMP - erts_fun_purge_abort_finalize(purge_state.funs, purge_state.fe_ix); - finalize_purge_operation(BIF_P, 0); - BIF_RET(am_false); -#else /* * We need to restore the code addresses of the funs in * two stages in order to ensure that we do not get any @@ -1661,7 +1621,6 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) &purger_lop_data); erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); ERTS_BIF_YIELD_RETURN(BIF_P, am_false); -#endif } case am_complete: { @@ -1713,8 +1672,8 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) || IF_HIPE(hipe_purge_need_blocking(modp))) { /* ToDo: Do unload nif without blocking */ erts_rwunlock_old_code(code_ix); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); is_blocking = 1; erts_rwlock_old_code(code_ix); if (modp->old.nif) { @@ -1752,8 +1711,8 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) erts_rwunlock_old_code(code_ix); } if (is_blocking) { - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); } erts_release_code_write_permission(); @@ -1766,7 +1725,7 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) sizeof(ErtsLiteralAreaRef)); ref->literal_area = literals; ref->next = NULL; - erts_smp_mtx_lock(&release_literal_areas.mtx); + erts_mtx_lock(&release_literal_areas.mtx); if (release_literal_areas.last) { release_literal_areas.last->next = ref; release_literal_areas.last = ref; @@ -1775,7 +1734,7 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) release_literal_areas.first = ref; release_literal_areas.last = ref; } - erts_smp_mtx_unlock(&release_literal_areas.mtx); + erts_mtx_unlock(&release_literal_areas.mtx); erts_queue_message(erts_literal_area_collector, 0, erts_alloc_message(0, NULL), @@ -1807,22 +1766,23 @@ delete_code(Module* modp) Export *ep = export_list(i, code_ix); if (ep != NULL && (ep->info.mfa.module == module)) { if (ep->addressv[code_ix] == ep->beam) { - if (ep->beam[0] == (BeamInstr) em_apply_bif) { + if (BeamIsOpCode(ep->beam[0], op_apply_bif)) { continue; } - else if (ep->beam[0] == - (BeamInstr) BeamOp(op_i_generic_breakpoint)) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); + else if (BeamIsOpCode(ep->beam[0], op_i_generic_breakpoint)) { + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); ASSERT(modp->curr.num_traced_exports > 0); DBG_TRACE_MFA_P(&ep->info.mfa, "export trace cleared, code_ix=%d", code_ix); erts_clear_export_break(modp, &ep->info); } - else ASSERT(ep->beam[0] == (BeamInstr) em_call_error_handler - || !erts_initialized); - } + else { + ASSERT(BeamIsOpCode(ep->beam[0], op_call_error_handler) || + !erts_initialized); + } + } ep->addressv[code_ix] = ep->beam; - ep->beam[0] = (BeamInstr) em_call_error_handler; + ep->beam[0] = BeamOpCodeAddr(op_call_error_handler); ep->beam[1] = 0; DBG_TRACE_MFA_P(&ep->info.mfa, "export invalidation, code_ix=%d", code_ix); diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c index 950639f7ae..c81380c14d 100644 --- a/erts/emulator/beam/beam_bp.c +++ b/erts/emulator/beam/beam_bp.c @@ -46,15 +46,15 @@ #define ReAlloc(P, SIZ) erts_realloc(ERTS_ALC_T_BPD, (P), (SZ)) #define Free(P) erts_free(ERTS_ALC_T_BPD, (P)) -#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP) -# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \ +#if defined(ERTS_ENABLE_LOCK_CHECK) +# define ERTS_REQ_PROC_MAIN_LOCK(P) \ if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN,\ __FILE__, __LINE__) -# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \ +# define ERTS_UNREQ_PROC_MAIN_LOCK(P) \ if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN) #else -# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) -# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) +# define ERTS_REQ_PROC_MAIN_LOCK(P) +# define ERTS_UNREQ_PROC_MAIN_LOCK(P) #endif #define ERTS_BPF_LOCAL_TRACE 0x01 @@ -73,11 +73,9 @@ extern BeamInstr beam_return_trace[1]; /* OpCode(i_return_trace) */ extern BeamInstr beam_exception_trace[1]; /* OpCode(i_exception_trace) */ extern BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */ -erts_smp_atomic32_t erts_active_bp_index; -erts_smp_atomic32_t erts_staging_bp_index; -#ifdef ERTS_DIRTY_SCHEDULERS -erts_smp_mtx_t erts_dirty_bp_ix_mtx; -#endif +erts_atomic32_t erts_active_bp_index; +erts_atomic32_t erts_staging_bp_index; +erts_mtx_t erts_dirty_bp_ix_mtx; /* * Inlined helpers @@ -94,22 +92,18 @@ acquire_bp_sched_ix(Process *c_p) { ErtsSchedulerData *esdp = erts_proc_sched_data(c_p); ASSERT(esdp); -#ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_SCHEDULER_IS_DIRTY(esdp)) { - erts_smp_mtx_lock(&erts_dirty_bp_ix_mtx); + erts_mtx_lock(&erts_dirty_bp_ix_mtx); return (Uint32) erts_no_schedulers; } -#endif return (Uint32) esdp->no - 1; } static ERTS_INLINE void release_bp_sched_ix(Uint32 ix) { -#ifdef ERTS_DIRTY_SCHEDULERS if (ix == (Uint32) erts_no_schedulers) - erts_smp_mtx_unlock(&erts_dirty_bp_ix_mtx); -#endif + erts_mtx_unlock(&erts_dirty_bp_ix_mtx); } @@ -162,12 +156,10 @@ static void bp_hash_delete(bp_time_hash_t *hash); void erts_bp_init(void) { - erts_smp_atomic32_init_nob(&erts_active_bp_index, 0); - erts_smp_atomic32_init_nob(&erts_staging_bp_index, 1); -#ifdef ERTS_DIRTY_SCHEDULERS - erts_smp_mtx_init(&erts_dirty_bp_ix_mtx, "dirty_break_point_index", NIL, + erts_atomic32_init_nob(&erts_active_bp_index, 0); + erts_atomic32_init_nob(&erts_staging_bp_index, 1); + erts_mtx_init(&erts_dirty_bp_ix_mtx, "dirty_break_point_index", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG); -#endif } @@ -211,7 +203,7 @@ erts_bp_match_functions(BpFunctions* f, ErtsCodeMFA *mfa, int specified) for (fi = 0; fi < num_functions; fi++) { ci = code_hdr->functions[fi]; - ASSERT(ci->op == (BeamInstr) BeamOp(op_i_func_info_IaaI)); + ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI)); if (erts_is_function_native(ci)) { continue; } @@ -273,11 +265,11 @@ erts_bp_match_export(BpFunctions* f, ErtsCodeMFA *mfa, int specified) pc = ep->beam; if (ep->addressv[code_ix] == pc) { - if ((*pc == (BeamInstr) em_apply_bif || - *pc == (BeamInstr) em_call_error_handler)) { - continue; + if (BeamIsOpCode(*pc, op_apply_bif) || + BeamIsOpCode(*pc, op_call_error_handler)) { + continue; } - ASSERT(*pc == (BeamInstr) BeamOp(op_i_generic_breakpoint)); + ASSERT(BeamIsOpCode(*pc, op_i_generic_breakpoint)); } else if (erts_is_function_native(erts_code_to_codeinfo(ep->addressv[code_ix]))) { continue; } @@ -306,7 +298,7 @@ erts_consolidate_bp_data(BpFunctions* f, int local) Uint i; Uint n = f->matched; - ERTS_SMP_LC_ASSERT(erts_has_code_write_permission()); + ERTS_LC_ASSERT(erts_has_code_write_permission()); for (i = 0; i < n; i++) { consolidate_bp_data(fs[i].mod, fs[i].ci, local); @@ -318,7 +310,7 @@ erts_consolidate_bif_bp_data(void) { int i; - ERTS_SMP_LC_ASSERT(erts_has_code_write_permission()); + ERTS_LC_ASSERT(erts_has_code_write_permission()); for (i = 0; i < BIF_SIZE; i++) { Export *ep = bif_export[i]; consolidate_bp_data(0, &ep->info, 0); @@ -374,8 +366,8 @@ consolidate_bp_data(Module* modp, ErtsCodeInfo *ci, int local) } ASSERT(modp->curr.num_breakpoints >= 0); ASSERT(modp->curr.num_traced_exports >= 0); - ASSERT(*erts_codeinfo_to_code(ci) != - (BeamInstr) BeamOp(op_i_generic_breakpoint)); + ASSERT(! BeamIsOpCode(*erts_codeinfo_to_code(ci), + op_i_generic_breakpoint)); } ci->u.gen_bp = NULL; Free(g); @@ -393,17 +385,17 @@ consolidate_bp_data(Module* modp, ErtsCodeInfo *ci, int local) } if (flags & ERTS_BPF_META_TRACE) { dst->meta_tracer = src->meta_tracer; - erts_smp_refc_inc(&dst->meta_tracer->refc, 1); + erts_refc_inc(&dst->meta_tracer->refc, 1); dst->meta_ms = src->meta_ms; MatchSetRef(dst->meta_ms); } if (flags & ERTS_BPF_COUNT) { dst->count = src->count; - erts_smp_refc_inc(&dst->count->refc, 1); + erts_refc_inc(&dst->count->refc, 1); } if (flags & ERTS_BPF_TIME_TRACE) { dst->time = src->time; - erts_smp_refc_inc(&dst->time->refc, 1); + erts_refc_inc(&dst->time->refc, 1); ASSERT(dst->time->hash); } } @@ -414,8 +406,8 @@ erts_commit_staged_bp(void) ErtsBpIndex staging = erts_staging_bp_ix(); ErtsBpIndex active = erts_active_bp_ix(); - erts_smp_atomic32_set_nob(&erts_active_bp_index, staging); - erts_smp_atomic32_set_nob(&erts_staging_bp_index, active); + erts_atomic32_set_nob(&erts_active_bp_index, staging); + erts_atomic32_set_nob(&erts_staging_bp_index, active); } void @@ -423,7 +415,7 @@ erts_install_breakpoints(BpFunctions* f) { Uint i; Uint n = f->matched; - BeamInstr br = (BeamInstr) BeamOp(op_i_generic_breakpoint); + BeamInstr br = BeamOpCodeAddr(op_i_generic_breakpoint); for (i = 0; i < n; i++) { ErtsCodeInfo* ci = f->matching[i].ci; @@ -468,7 +460,7 @@ static void uninstall_breakpoint(ErtsCodeInfo *ci) { BeamInstr *pc = erts_codeinfo_to_code(ci); - if (*pc == (BeamInstr) BeamOp(op_i_generic_breakpoint)) { + if (BeamIsOpCode(*pc, op_i_generic_breakpoint)) { GenericBp* g = ci->u.gen_bp; if (g->data[erts_active_bp_ix()].flags == 0) { /* @@ -575,7 +567,7 @@ erts_clear_mtrace_bif(ErtsCodeInfo *ci) void erts_clear_debug_break(BpFunctions* f) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); clear_break(f, ERTS_BPF_DEBUG); } @@ -603,7 +595,7 @@ erts_clear_module_break(Module *modp) { Uint n; Uint i; - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); ASSERT(modp); code_hdr = modp->curr.code_hdr; if (!code_hdr) { @@ -633,7 +625,7 @@ erts_clear_module_break(Module *modp) { void erts_clear_export_break(Module* modp, ErtsCodeInfo *ci) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); clear_function_break(ci, ERTS_BPF_ALL); erts_commit_staged_bp(); @@ -650,7 +642,7 @@ erts_generic_breakpoint(Process* c_p, ErtsCodeInfo *info, Eterm* reg) Uint bp_flags; ErtsBpIndex ix = erts_active_bp_ix(); - ASSERT(info->op == (BeamInstr) BeamOp(op_i_func_info_IaaI)); + ASSERT(BeamIsOpCode(info->op, op_i_func_info_IaaI)); g = info->u.gen_bp; bp = &g->data[ix]; @@ -679,12 +671,12 @@ erts_generic_breakpoint(Process* c_p, ErtsCodeInfo *info, Eterm* reg) if (bp_flags & ERTS_BPF_META_TRACE) { ErtsTracer old_tracer, new_tracer; - old_tracer = erts_smp_atomic_read_nob(&bp->meta_tracer->tracer); + old_tracer = erts_atomic_read_nob(&bp->meta_tracer->tracer); new_tracer = do_call_trace(c_p, info, reg, 1, bp->meta_ms, old_tracer); if (!ERTS_TRACER_COMPARE(new_tracer, old_tracer)) { - if (old_tracer == erts_smp_atomic_cmpxchg_acqb( + if (old_tracer == erts_atomic_cmpxchg_acqb( &bp->meta_tracer->tracer, (erts_aint_t)new_tracer, (erts_aint_t)old_tracer)) { @@ -696,16 +688,16 @@ erts_generic_breakpoint(Process* c_p, ErtsCodeInfo *info, Eterm* reg) } if (bp_flags & ERTS_BPF_COUNT_ACTIVE) { - erts_smp_atomic_inc_nob(&bp->count->acount); + erts_atomic_inc_nob(&bp->count->acount); } if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE) { Eterm w; erts_trace_time_call(c_p, info, bp->time); w = (BeamInstr) *c_p->cp; - if (! (w == (BeamInstr) BeamOp(op_i_return_time_trace) || - w == (BeamInstr) BeamOp(op_return_trace) || - w == (BeamInstr) BeamOp(op_i_return_to_trace)) ) { + if (! (BeamIsOpCode(w, op_i_return_time_trace) || + BeamIsOpCode(w, op_return_trace) || + BeamIsOpCode(w, op_i_return_to_trace)) ) { Eterm* E = c_p->stop; ASSERT(c_p->htop <= E && E <= c_p->hend); if (E - 2 < c_p->htop) { @@ -725,7 +717,7 @@ erts_generic_breakpoint(Process* c_p, ErtsCodeInfo *info, Eterm* reg) } if (bp_flags & ERTS_BPF_DEBUG) { - return (BeamInstr) BeamOp(op_i_debug_breakpoint); + return BeamOpCodeAddr(op_i_debug_breakpoint); } else { return g->orig_instr; } @@ -753,7 +745,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I) GenericBpData* bp = NULL; Uint bp_flags = 0; - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); g = ep->info.u.gen_bp; if (g) { @@ -777,7 +769,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I) if (bp_flags & ERTS_BPF_META_TRACE) { ErtsTracer old_tracer; - meta_tracer = erts_smp_atomic_read_nob(&bp->meta_tracer->tracer); + meta_tracer = erts_atomic_read_nob(&bp->meta_tracer->tracer); old_tracer = meta_tracer; flags_meta = erts_call_trace(p, &ep->info, bp->meta_ms, args, 0, &meta_tracer); @@ -785,7 +777,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I) if (!ERTS_TRACER_COMPARE(old_tracer, meta_tracer)) { ErtsTracer new_tracer = erts_tracer_nil; erts_tracer_update(&new_tracer, meta_tracer); - if (old_tracer == erts_smp_atomic_cmpxchg_acqb( + if (old_tracer == erts_atomic_cmpxchg_acqb( &bp->meta_tracer->tracer, (erts_aint_t)new_tracer, (erts_aint_t)old_tracer)) { @@ -912,9 +904,9 @@ erts_bif_trace_epilogue(Process *p, Eterm result, int applying, } } if ((flags_meta|flags) & MATCH_SET_EXCEPTION_TRACE) { - erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); ERTS_TRACE_FLAGS(p) |= F_EXCEPTION_TRACE; - erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR); } } } else { @@ -937,7 +929,7 @@ erts_bif_trace_epilogue(Process *p, Eterm result, int applying, } } } - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); return result; } @@ -954,12 +946,12 @@ do_call_trace(Process* c_p, ErtsCodeInfo* info, Eterm* reg, Eterm* E = c_p->stop; w = *c_p->cp; - if (w == (BeamInstr) BeamOp(op_return_trace)) { + if (BeamIsOpCode(w, op_return_trace)) { cpp = &E[2]; - } else if (w == (BeamInstr) BeamOp(op_i_return_to_trace)) { + } else if (BeamIsOpCode(w, op_i_return_to_trace)) { return_to_trace = 1; cpp = &E[0]; - } else if (w == (BeamInstr) BeamOp(op_i_return_time_trace)) { + } else if (BeamIsOpCode(w, op_i_return_time_trace)) { cpp = &E[0]; } else { cpp = NULL; @@ -967,12 +959,12 @@ do_call_trace(Process* c_p, ErtsCodeInfo* info, Eterm* reg, if (cpp) { for (;;) { BeamInstr w = *cp_val(*cpp); - if (w == (BeamInstr) BeamOp(op_return_trace)) { + if (BeamIsOpCode(w, op_return_trace)) { cpp += 3; - } else if (w == (BeamInstr) BeamOp(op_i_return_to_trace)) { + } else if (BeamIsOpCode(w, op_i_return_to_trace)) { return_to_trace = 1; cpp += 1; - } else if (w == (BeamInstr) BeamOp(op_i_return_time_trace)) { + } else if (BeamIsOpCode(w, op_i_return_time_trace)) { cpp += 2; } else { break; @@ -982,9 +974,9 @@ do_call_trace(Process* c_p, ErtsCodeInfo* info, Eterm* reg, c_p->cp = (BeamInstr *) cp_val(*cpp); ASSERT(is_CP(*cpp)); } - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); flags = erts_call_trace(c_p, info, ms, reg, local, &tracer); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); if (cpp) { c_p->cp = cp_save; } @@ -1024,9 +1016,9 @@ do_call_trace(Process* c_p, ErtsCodeInfo* info, Eterm* reg, the funcinfo is above i. */ c_p->cp = (flags & MATCH_SET_EXCEPTION_TRACE) ? beam_exception_trace : beam_return_trace; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); ERTS_TRACE_FLAGS(c_p) |= F_EXCEPTION_TRACE; - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); } else c_p->stop = E; return tracer; @@ -1043,7 +1035,7 @@ erts_trace_time_call(Process* c_p, ErtsCodeInfo *info, BpDataTime* bdt) Uint32 six = acquire_bp_sched_ix(c_p); ASSERT(c_p); - ASSERT(erts_smp_atomic32_read_acqb(&c_p->state) & (ERTS_PSFLG_RUNNING + ASSERT(erts_atomic32_read_acqb(&c_p->state) & (ERTS_PSFLG_RUNNING | ERTS_PSFLG_DIRTY_RUNNING)); /* get previous timestamp and breakpoint @@ -1124,7 +1116,7 @@ erts_trace_time_return(Process *p, ErtsCodeInfo *ci) Uint32 six = acquire_bp_sched_ix(p); ASSERT(p); - ASSERT(erts_smp_atomic32_read_acqb(&p->state) & (ERTS_PSFLG_RUNNING + ASSERT(erts_atomic32_read_acqb(&p->state) & (ERTS_PSFLG_RUNNING | ERTS_PSFLG_DIRTY_RUNNING)); /* get previous timestamp and breakpoint @@ -1206,7 +1198,7 @@ erts_is_mtrace_break(ErtsCodeInfo *ci, Binary **match_spec_ret, *match_spec_ret = bp->meta_ms; } if (tracer_ret) { - *tracer_ret = erts_smp_atomic_read_nob(&bp->meta_tracer->tracer); + *tracer_ret = erts_atomic_read_nob(&bp->meta_tracer->tracer); } return 1; } @@ -1220,7 +1212,7 @@ erts_is_count_break(ErtsCodeInfo *ci, Uint *count_ret) if (bp) { if (count_ret) { - *count_ret = (Uint) erts_smp_atomic_read_nob(&bp->count->acount); + *count_ret = (Uint) erts_atomic_read_nob(&bp->count->acount); } return 1; } @@ -1301,7 +1293,7 @@ erts_find_local_func(ErtsCodeMFA *mfa) { n = (BeamInstr) code_hdr->num_functions; for (i = 0; i < n; ++i) { ci = code_hdr->functions[i]; - ASSERT(((BeamInstr) BeamOp(op_i_func_info_IaaI)) == ci->op); + ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI)); ASSERT(mfa->module == ci->mfa.module || is_nil(ci->mfa.module)); if (mfa->function == ci->mfa.function && mfa->arity == ci->mfa.arity) { @@ -1500,7 +1492,7 @@ set_function_break(ErtsCodeInfo *ci, Binary *match_spec, Uint break_flags, Uint common; ErtsBpIndex ix = erts_staging_bp_ix(); - ERTS_SMP_LC_ASSERT(erts_has_code_write_permission()); + ERTS_LC_ASSERT(erts_has_code_write_permission()); g = ci->u.gen_bp; if (g == 0) { int i; @@ -1532,7 +1524,7 @@ set_function_break(ErtsCodeInfo *ci, Binary *match_spec, Uint break_flags, bp->flags &= ~ERTS_BPF_COUNT_ACTIVE; } else { bp->flags |= ERTS_BPF_COUNT_ACTIVE; - erts_smp_atomic_set_nob(&bp->count->acount, 0); + erts_atomic_set_nob(&bp->count->acount, 0); } ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0); return; @@ -1566,17 +1558,17 @@ set_function_break(ErtsCodeInfo *ci, Binary *match_spec, Uint break_flags, MatchSetRef(match_spec); bp->meta_ms = match_spec; bmt = Alloc(sizeof(BpMetaTracer)); - erts_smp_refc_init(&bmt->refc, 1); + erts_refc_init(&bmt->refc, 1); erts_tracer_update(&meta_tracer, tracer); /* copy tracer */ - erts_smp_atomic_init_nob(&bmt->tracer, (erts_aint_t)meta_tracer); + erts_atomic_init_nob(&bmt->tracer, (erts_aint_t)meta_tracer); bp->meta_tracer = bmt; } else if (break_flags & ERTS_BPF_COUNT) { BpCount* bcp; ASSERT((bp->flags & ERTS_BPF_COUNT) == 0); bcp = Alloc(sizeof(BpCount)); - erts_smp_refc_init(&bcp->refc, 1); - erts_smp_atomic_init_nob(&bcp->acount, 0); + erts_refc_init(&bcp->refc, 1); + erts_atomic_init_nob(&bcp->acount, 0); bp->count = bcp; } else if (break_flags & ERTS_BPF_TIME_TRACE) { BpDataTime* bdt; @@ -1584,12 +1576,8 @@ set_function_break(ErtsCodeInfo *ci, Binary *match_spec, Uint break_flags, ASSERT((bp->flags & ERTS_BPF_TIME_TRACE) == 0); bdt = Alloc(sizeof(BpDataTime)); - erts_smp_refc_init(&bdt->refc, 1); -#ifdef ERTS_DIRTY_SCHEDULERS + erts_refc_init(&bdt->refc, 1); bdt->n = erts_no_schedulers + 1; -#else - bdt->n = erts_no_schedulers; -#endif bdt->hash = Alloc(sizeof(bp_time_hash_t)*(bdt->n)); for (i = 0; i < bdt->n; i++) { bp_hash_init(&(bdt->hash[i]), 32); @@ -1621,7 +1609,7 @@ clear_function_break(ErtsCodeInfo *ci, Uint break_flags) Uint common; ErtsBpIndex ix = erts_staging_bp_ix(); - ERTS_SMP_LC_ASSERT(erts_has_code_write_permission()); + ERTS_LC_ASSERT(erts_has_code_write_permission()); if ((g = ci->u.gen_bp) == NULL) { return 1; @@ -1654,8 +1642,8 @@ clear_function_break(ErtsCodeInfo *ci, Uint break_flags) static void bp_meta_unref(BpMetaTracer* bmt) { - if (erts_smp_refc_dectest(&bmt->refc, 0) <= 0) { - ErtsTracer trc = erts_smp_atomic_read_nob(&bmt->tracer); + if (erts_refc_dectest(&bmt->refc, 0) <= 0) { + ErtsTracer trc = erts_atomic_read_nob(&bmt->tracer); ERTS_TRACER_CLEAR(&trc); Free(bmt); } @@ -1664,7 +1652,7 @@ bp_meta_unref(BpMetaTracer* bmt) static void bp_count_unref(BpCount* bcp) { - if (erts_smp_refc_dectest(&bcp->refc, 0) <= 0) { + if (erts_refc_dectest(&bcp->refc, 0) <= 0) { Free(bcp); } } @@ -1672,7 +1660,7 @@ bp_count_unref(BpCount* bcp) static void bp_time_unref(BpDataTime* bdt) { - if (erts_smp_refc_dectest(&bdt->refc, 0) <= 0) { + if (erts_refc_dectest(&bdt->refc, 0) <= 0) { Uint i = 0; Uint j = 0; Process *h_p = NULL; @@ -1696,7 +1684,7 @@ bp_time_unref(BpDataTime* bdt) if (pbt) { Free(pbt); } - erts_smp_proc_unlock(h_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(h_p, ERTS_PROC_LOCK_MAIN); } } } @@ -1720,7 +1708,7 @@ check_break(ErtsCodeInfo *ci, Uint break_flags) { GenericBp* g = ci->u.gen_bp; - ASSERT(ci->op == (BeamInstr) BeamOp(op_i_func_info_IaaI)); + ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI)); if (erts_is_function_native(ci)) { return 0; } diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h index 56fa82b912..a64765822b 100644 --- a/erts/emulator/beam/beam_bp.h +++ b/erts/emulator/beam/beam_bp.h @@ -41,7 +41,7 @@ typedef struct { typedef struct bp_data_time { /* Call time */ Uint n; bp_time_hash_t *hash; - erts_smp_refc_t refc; + erts_refc_t refc; } BpDataTime; typedef struct { @@ -50,13 +50,13 @@ typedef struct { } process_breakpoint_time_t; /* used within psd */ typedef struct { - erts_smp_atomic_t acount; - erts_smp_refc_t refc; + erts_atomic_t acount; + erts_refc_t refc; } BpCount; typedef struct { - erts_smp_atomic_t tracer; - erts_smp_refc_t refc; + erts_atomic_t tracer; + erts_refc_t refc; } BpMetaTracer; typedef struct generic_bp_data { @@ -79,9 +79,7 @@ typedef struct generic_bp { #define ERTS_BP_CALL_TIME_SCHEDULE_OUT (1) #define ERTS_BP_CALL_TIME_SCHEDULE_EXITING (2) -#ifdef ERTS_DIRTY_SCHEDULERS -extern erts_smp_mtx_t erts_dirty_bp_ix_mtx; -#endif +extern erts_mtx_t erts_dirty_bp_ix_mtx; enum erts_break_op{ ERTS_BREAK_NOP = 0, /* Must be false */ @@ -173,17 +171,17 @@ ErtsCodeInfo *erts_find_local_func(ErtsCodeMFA *mfa); #if ERTS_GLB_INLINE_INCL_FUNC_DEF -extern erts_smp_atomic32_t erts_active_bp_index; -extern erts_smp_atomic32_t erts_staging_bp_index; +extern erts_atomic32_t erts_active_bp_index; +extern erts_atomic32_t erts_staging_bp_index; ERTS_GLB_INLINE ErtsBpIndex erts_active_bp_ix(void) { - return erts_smp_atomic32_read_nob(&erts_active_bp_index); + return erts_atomic32_read_nob(&erts_active_bp_index); } ERTS_GLB_INLINE ErtsBpIndex erts_staging_bp_ix(void) { - return erts_smp_atomic32_read_nob(&erts_staging_bp_index); + return erts_atomic32_read_nob(&erts_staging_bp_index); } #endif diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c index a2060c80de..0f332da63f 100644 --- a/erts/emulator/beam/beam_debug.c +++ b/erts/emulator/beam/beam_debug.c @@ -53,6 +53,8 @@ void dbg_where(BeamInstr* addr, Eterm x0, Eterm* reg); static int print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr); static void print_bif_name(fmtfn_t to, void* to_arg, BifFunction bif); +static BeamInstr* f_to_addr(BeamInstr* base, int op, BeamInstr* ap); +static BeamInstr* f_to_addr_packed(BeamInstr* base, int op, Sint32* ap); BIF_RETTYPE erts_debug_same_2(BIF_ALIST_2) @@ -157,8 +159,8 @@ erts_debug_breakpoint_2(BIF_ALIST_2) ERTS_BIF_YIELD2(bif_export[BIF_erts_debug_breakpoint_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); erts_bp_match_functions(&f, &mfa, specified); if (boolean == am_true) { @@ -174,8 +176,8 @@ erts_debug_breakpoint_2(BIF_ALIST_2) res = make_small(f.matched); erts_bp_free_matched_functions(&f); - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); return res; @@ -197,7 +199,7 @@ void debug_dump_code(BeamInstr *I, int num) erts_print(ERTS_PRINT_DSBUF, (void *) dsbufp, HEXF ": ", code_ptr); instr = (BeamInstr) code_ptr[0]; for (i = 0; i < NUM_SPECIFIC_OPS; i++) { - if (instr == (BeamInstr) BeamOp(i) && opc[i].name[0] != '\0') { + if (BeamIsOpCode(instr, i) && opc[i].name[0] != '\0') { code_ptr += print_op(ERTS_PRINT_DSBUF, (void *) dsbufp, i, opc[i].sz-1, code_ptr+1) + 1; break; @@ -317,7 +319,7 @@ erts_debug_disassemble_1(BIF_ALIST_1) erts_print(ERTS_PRINT_DSBUF, (void *) dsbufp, HEXF ": ", code_ptr); instr = (BeamInstr) code_ptr[0]; for (i = 0; i < NUM_SPECIFIC_OPS; i++) { - if (instr == (BeamInstr) BeamOp(i) && opc[i].name[0] != '\0') { + if (BeamIsOpCode(instr, i) && opc[i].name[0] != '\0') { code_ptr += print_op(ERTS_PRINT_DSBUF, (void *) dsbufp, i, opc[i].sz-1, code_ptr+1) + 1; break; @@ -424,7 +426,9 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr) while (start_prog < prog) { prog--; switch (*prog) { + case 'f': case 'g': + case 'q': *ap++ = *--sp; break; case 'i': /* Initialize packing accumulator. */ @@ -489,6 +493,14 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr) case 'n': /* Nil */ erts_print(to, to_arg, "[]"); break; + case 'S': /* Register */ + { + Uint reg_type = (*ap & 1) ? 'y' : 'x'; + Uint n = ap[0] / sizeof(Eterm); + erts_print(to, to_arg, "%c(%d)", reg_type, n); + ap++; + break; + } case 's': /* Any source (tagged constant or register) */ tag = loader_tag(*ap); if (tag == LOADER_X_REG) { @@ -522,12 +534,13 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr) } ap++; break; - case 'I': /* Untagged integer. */ - case 't': + case 't': /* Untagged integers */ + case 'I': + case 'W': switch (op) { - case op_i_gc_bif1_jIsId: - case op_i_gc_bif2_jIIssd: - case op_i_gc_bif3_jIIssd: + case op_i_gc_bif1_jWstd: + case op_i_gc_bif2_jWtssd: + case op_i_gc_bif3_jWtssd: { const ErtsGcBif* p; BifFunction gcf = (BifFunction) *ap; @@ -549,9 +562,10 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr) break; case 'f': /* Destination label */ { - ErtsCodeMFA* cmfa = find_function_from_pc((BeamInstr *)*ap); - if (!cmfa || erts_codemfa_to_code(cmfa) != (BeamInstr *) *ap) { - erts_print(to, to_arg, "f(" HEXF ")", *ap); + BeamInstr* target = f_to_addr(addr, op, ap); + ErtsCodeMFA* cmfa = find_function_from_pc(target); + if (!cmfa || erts_codemfa_to_code(cmfa) != target) { + erts_print(to, to_arg, "f(" HEXF ")", target); } else { erts_print(to, to_arg, "%T:%T/%bpu", cmfa->module, cmfa->function, cmfa->arity); @@ -561,18 +575,18 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr) break; case 'p': /* Pointer (to label) */ { - ErtsCodeMFA* cmfa = find_function_from_pc((BeamInstr *)*ap); - if (!cmfa || erts_codemfa_to_code(cmfa) != (BeamInstr *) *ap) { - erts_print(to, to_arg, "p(" HEXF ")", *ap); - } else { - erts_print(to, to_arg, "%T:%T/%bpu", cmfa->module, - cmfa->function, cmfa->arity); - } + BeamInstr* target = f_to_addr(addr, op, ap); + erts_print(to, to_arg, "p(" HEXF ")", target); ap++; } break; case 'j': /* Pointer (to label) */ - erts_print(to, to_arg, "j(" HEXF ")", *ap); + if (*ap == 0) { + erts_print(to, to_arg, "j(0)"); + } else { + BeamInstr* target = f_to_addr(addr, op, ap); + erts_print(to, to_arg, "j(" HEXF ")", target); + } ap++; break; case 'e': /* Export entry */ @@ -615,12 +629,22 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr) unpacked = ap; ap = addr + size; + + /* + * In the code below, never use ap[-1], ap[-2], ... + * (will not work if the arguments have been packed). + * + * Instead use unpacked[-1], unpacked[-2], ... + */ switch (op) { case op_i_select_val_lins_xfI: case op_i_select_val_lins_yfI: + case op_i_select_val_bins_xfI: + case op_i_select_val_bins_yfI: { - int n = ap[-1]; + int n = unpacked[-1]; int ix = n; + Sint32* jump_tab = (Sint32 *)(ap + n); while (ix--) { erts_print(to, to_arg, "%T ", (Eterm) ap[0]); @@ -629,30 +653,19 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr) } ix = n; while (ix--) { - erts_print(to, to_arg, "f(" HEXF ") ", (Eterm) ap[0]); - ap++; - size++; - } - } - break; - case op_i_select_val_bins_xfI: - case op_i_select_val_bins_yfI: - { - int n = ap[-1]; - - while (n > 0) { - erts_print(to, to_arg, "%T f(" HEXF ") ", (Eterm) ap[0], ap[1]); - ap += 2; - size += 2; - n--; + BeamInstr* target = f_to_addr_packed(addr, op, jump_tab); + erts_print(to, to_arg, "f(" HEXF ") ", target); + jump_tab++; } + size += (n+1) / 2; } break; case op_i_select_tuple_arity_xfI: case op_i_select_tuple_arity_yfI: { - int n = ap[-1]; + int n = unpacked[-1]; int ix = n - 1; /* without sentinel */ + Sint32* jump_tab = (Sint32 *)(ap + n); while (ix--) { Uint arity = arityval(ap[0]); @@ -666,39 +679,62 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr) size++; ix = n; while (ix--) { - erts_print(to, to_arg, "f(" HEXF ") ", ap[0]); - ap++; - size++; + BeamInstr* target = f_to_addr_packed(addr, op, jump_tab); + erts_print(to, to_arg, "f(" HEXF ") ", target); + jump_tab++; + } + size += (n+1) / 2; + } + break; + case op_i_select_val2_xfcc: + case op_i_select_val2_yfcc: + case op_i_select_tuple_arity2_xfAA: + case op_i_select_tuple_arity2_yfAA: + { + Sint32* jump_tab = (Sint32 *) ap; + BeamInstr* target; + int i; + + for (i = 0; i < 2; i++) { + target = f_to_addr_packed(addr, op, jump_tab++); + erts_print(to, to_arg, "f(" HEXF ") ", target); } + size += 1; } break; - case op_i_jump_on_val_xfII: - case op_i_jump_on_val_yfII: + case op_i_jump_on_val_xfIW: + case op_i_jump_on_val_yfIW: { - int n; - for (n = ap[-2]; n > 0; n--) { - erts_print(to, to_arg, "f(" HEXF ") ", ap[0]); - ap++; - size++; + int n = unpacked[-2]; + Sint32* jump_tab = (Sint32 *) ap; + + size += (n+1) / 2; + while (n-- > 0) { + BeamInstr* target = f_to_addr_packed(addr, op, jump_tab); + erts_print(to, to_arg, "f(" HEXF ") ", target); + jump_tab++; } } break; case op_i_jump_on_val_zero_xfI: case op_i_jump_on_val_zero_yfI: { - int n; - for (n = ap[-1]; n > 0; n--) { - erts_print(to, to_arg, "f(" HEXF ") ", ap[0]); - ap++; - size++; + int n = unpacked[-1]; + Sint32* jump_tab = (Sint32 *) ap; + + size += (n+1) / 2; + while (n-- > 0) { + BeamInstr* target = f_to_addr_packed(addr, op, jump_tab); + erts_print(to, to_arg, "f(" HEXF ") ", target); + jump_tab++; } } break; case op_i_put_tuple_xI: case op_i_put_tuple_yI: - case op_new_map_dII: - case op_update_map_assoc_jsdII: - case op_update_map_exact_jsdII: + case op_new_map_dtI: + case op_update_map_assoc_sdtI: + case op_update_map_exact_jsdtI: { int n = unpacked[-1]; @@ -718,6 +754,27 @@ print_op(fmtfn_t to, void *to_arg, int op, int size, BeamInstr* addr) } } break; + case op_i_new_small_map_lit_dtq: + { + Eterm *tp = tuple_val(unpacked[-1]); + int n = arityval(*tp); + + while (n > 0) { + switch (loader_tag(ap[0])) { + case LOADER_X_REG: + erts_print(to, to_arg, " x(%d)", loader_x_reg_index(ap[0])); + break; + case LOADER_Y_REG: + erts_print(to, to_arg, " x(%d)", loader_y_reg_index(ap[0])); + break; + default: + erts_print(to, to_arg, " %T", (Eterm) ap[0]); + break; + } + ap++, size++, n--; + } + } + break; case op_i_get_map_elements_fsI: { int n = unpacked[-1]; @@ -766,6 +823,17 @@ static void print_bif_name(fmtfn_t to, void* to_arg, BifFunction bif) } } +static BeamInstr* f_to_addr(BeamInstr* base, int op, BeamInstr* ap) +{ + return base - 1 + opc[op].adjust + (Sint32) *ap; +} + +static BeamInstr* f_to_addr_packed(BeamInstr* base, int op, Sint32* ap) +{ + return base - 1 + opc[op].adjust + *ap; +} + + /* * Dirty BIF testing. * @@ -774,10 +842,8 @@ static void print_bif_name(fmtfn_t to, void* to_arg, BifFunction bif) * test suite. */ -#ifdef ERTS_DIRTY_SCHEDULERS static int ms_wait(Process *c_p, Eterm etimeout, int busy); static int dirty_send_message(Process *c_p, Eterm to, Eterm tag); -#endif static BIF_RETTYPE dirty_test(Process *c_p, Eterm type, Eterm arg1, Eterm arg2, UWord *I); /* @@ -806,7 +872,6 @@ erts_debug_dirty_io_2(BIF_ALIST_2) BIF_RETTYPE erts_debug_dirty_3(BIF_ALIST_3) { -#ifdef ERTS_DIRTY_SCHEDULERS Eterm argv[2]; switch (BIF_ARG_1) { case am_normal: @@ -836,9 +901,6 @@ erts_debug_dirty_3(BIF_ALIST_3) default: BIF_ERROR(BIF_P, EXC_BADARG); } -#else - BIF_ERROR(BIF_P, EXC_UNDEF); -#endif } @@ -846,7 +908,6 @@ static BIF_RETTYPE dirty_test(Process *c_p, Eterm type, Eterm arg1, Eterm arg2, UWord *I) { BIF_RETTYPE ret; -#ifdef ERTS_DIRTY_SCHEDULERS if (am_scheduler == arg1) { ErtsSchedulerData *esdp; if (arg2 != am_type) @@ -1032,13 +1093,9 @@ dirty_test(Process *c_p, Eterm type, Eterm arg1, Eterm arg2, UWord *I) badarg: ERTS_BIF_PREP_ERROR(ret, c_p, BADARG); } -#else - ERTS_BIF_PREP_ERROR(ret, c_p, EXC_UNDEF); -#endif return ret; } -#ifdef ERTS_DIRTY_SCHEDULERS static int dirty_send_message(Process *c_p, Eterm to, Eterm tag) @@ -1075,7 +1132,7 @@ dirty_send_message(Process *c_p, Eterm to, Eterm tag) if (rp == real_c_p) rp_locks &= ~c_p_locks; if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); erts_proc_dec_refc(rp); @@ -1125,13 +1182,8 @@ ms_wait(Process *c_p, Eterm etimeout, int busy) return 1; } -#endif /* ERTS_DIRTY_SCHEDULERS */ -#ifdef ERTS_SMP # define ERTS_STACK_LIMIT ((char *) ethr_get_stacklimit()) -#else -# define ERTS_STACK_LIMIT ((char *) erts_scheduler_stack_limit) -#endif /* * The below functions is for testing of the stack diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c index b4e6c35579..9a77c63390 100644 --- a/erts/emulator/beam/beam_emu.c +++ b/erts/emulator/beam/beam_emu.c @@ -50,45 +50,38 @@ #if defined(NO_JUMP_TABLE) # define OpCase(OpCode) case op_##OpCode # define CountCase(OpCode) case op_count_##OpCode -# define OpCode(OpCode) ((Uint*)op_##OpCode) -# define Goto(Rel) {Go = (int)(UWord)(Rel); goto emulator_loop;} -# define LabelAddr(Addr) &&##Addr +# define IsOpCode(InstrWord, OpCode) ((InstrWord) == (BeamInstr)op_##OpCode) +# define Goto(Rel) {Go = (Rel); goto emulator_loop;} #else # define OpCase(OpCode) lb_##OpCode # define CountCase(OpCode) lb_count_##OpCode +# define IsOpCode(InstrWord, OpCode) ((InstrWord) == (BeamInstr)&&lb_##OpCode) # define Goto(Rel) goto *((void *)Rel) # define LabelAddr(Label) &&Label -# define OpCode(OpCode) (&&lb_##OpCode) #endif #ifdef ERTS_ENABLE_LOCK_CHECK -# ifdef ERTS_SMP -# define PROCESS_MAIN_CHK_LOCKS(P) \ -do { \ - if ((P)) \ - erts_proc_lc_chk_only_proc_main((P)); \ - ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); \ +# define PROCESS_MAIN_CHK_LOCKS(P) \ +do { \ + if ((P)) \ + erts_proc_lc_chk_only_proc_main((P)); \ + ERTS_LC_ASSERT(!erts_thr_progress_is_blocking()); \ } while (0) -# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \ -do { \ - if ((P)) \ - erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN, \ - __FILE__, __LINE__); \ +# define ERTS_REQ_PROC_MAIN_LOCK(P) \ +do { \ + if ((P)) \ + erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN, \ + __FILE__, __LINE__); \ } while (0) -# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \ +# define ERTS_UNREQ_PROC_MAIN_LOCK(P) \ do { \ if ((P)) \ erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN); \ } while (0) -# else -# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) -# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) -# define PROCESS_MAIN_CHK_LOCKS(P) erts_lc_check_exact(NULL, 0) -# endif #else # define PROCESS_MAIN_CHK_LOCKS(P) -# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) -# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) +# define ERTS_REQ_PROC_MAIN_LOCK(P) +# define ERTS_UNREQ_PROC_MAIN_LOCK(P) #endif /* @@ -113,6 +106,8 @@ do { \ # define CHECK_ARGS(T) #endif +#define CHECK_ALIGNED(Dst) ASSERT((((Uint)&Dst) & (sizeof(Uint)-1)) == 0) + #define GET_BIF_MODULE(p) (p->info.mfa.module) #define GET_BIF_FUNCTION(p) (p->info.mfa.function) #define GET_BIF_ARITY(p) (p->info.mfa.arity) @@ -155,50 +150,7 @@ do { \ * Register target (X or Y register). */ -#define REG_TARGET_PTR(Target) (((Target) & 1) ? &yb(Target-1) : &xb(Target)) -#define REG_TARGET(Target) (*REG_TARGET_PTR(Target)) - -/* - * Store a result into a register given a destination descriptor. - */ - -#define StoreResult(Result, DestDesc) \ - do { \ - Eterm stb_reg; \ - stb_reg = (DestDesc); \ - CHECK_TERM(Result); \ - REG_TARGET(stb_reg) = (Result); \ - } while (0) - -/* - * Store a result into a register and execute the next instruction. - * Dst points to the word with a destination descriptor, which MUST - * be just before the next instruction. - */ - -#define StoreBifResult(Dst, Result) \ - do { \ - BeamInstr* stb_next; \ - Eterm stb_reg; \ - stb_reg = Arg(Dst); \ - I += (Dst) + 2; \ - stb_next = (BeamInstr *) *I; \ - CHECK_TERM(Result); \ - REG_TARGET(stb_reg) = (Result); \ - Goto(stb_next); \ - } while (0) - -#define ClauseFail() goto jump_f - -#define SAVE_CP(X) \ - do { \ - *(X) = make_cp(c_p->cp); \ - c_p->cp = 0; \ - } while(0) - -#define RESTORE_CP(X) SET_CP(c_p, (BeamInstr *) cp_val(*(X))) - -#define ISCATCHEND(instr) ((Eterm *) *(instr) == OpCode(catch_end_y)) +#define REG_TARGET_PTR(Target) (((Target) & 1) ? &yb((Target)-1) : &xb(Target)) /* * Special Beam instructions. @@ -208,11 +160,6 @@ BeamInstr beam_apply[2]; BeamInstr beam_exit[1]; BeamInstr beam_continue_exit[1]; -BeamInstr* em_call_error_handler; -BeamInstr* em_apply_bif; -BeamInstr* em_call_nif; -BeamInstr* em_call_bif_e; - /* NOTE These should be the only variables containing trace instructions. ** Sometimes tests are form the instruction value, and sometimes @@ -281,13 +228,16 @@ void** beam_ops; HEAP_TOP((P)) = HTOP; \ (P)->stop = E; \ PROCESS_MAIN_CHK_LOCKS((P)); \ - ERTS_SMP_UNREQ_PROC_MAIN_LOCK((P)) + ERTS_UNREQ_PROC_MAIN_LOCK((P)) #define db(N) (N) +#define fb(N) ((Sint)(Sint32)(N)) +#define jb(N) ((Sint)(Sint32)(N)) #define tb(N) (N) #define xb(N) (*(Eterm *) (((unsigned char *)reg) + (N))) #define yb(N) (*(Eterm *) (((unsigned char *)E) + (N))) -#define fb(N) (*(double *) (((unsigned char *)&(freg[0].fd)) + (N))) +#define Sb(N) (*REG_TARGET_PTR(N)) +#define lb(N) (*(double *) (((unsigned char *)&(freg[0].fd)) + (N))) #define Qb(N) (N) #define Ib(N) (N) #define x(N) reg[N] @@ -295,159 +245,14 @@ void** beam_ops; #define r(N) x(N) /* - * Makes sure that there are StackNeed + HeapNeed + 1 words available - * on the combined heap/stack segment, then allocates StackNeed + 1 - * words on the stack and saves CP. - * - * M is number of live registers to preserve during garbage collection - */ - -#define AH(StackNeed, HeapNeed, M) \ - do { \ - int needed; \ - needed = (StackNeed) + 1; \ - if (E - HTOP < (needed + (HeapNeed))) { \ - SWAPOUT; \ - PROCESS_MAIN_CHK_LOCKS(c_p); \ - FCALLS -= erts_garbage_collect_nobump(c_p, needed + (HeapNeed), \ - reg, (M), FCALLS); \ - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \ - PROCESS_MAIN_CHK_LOCKS(c_p); \ - SWAPIN; \ - } \ - E -= needed; \ - SAVE_CP(E); \ - } while (0) - -#define Allocate(Ns, Live) AH(Ns, 0, Live) - -#define AllocateZero(Ns, Live) \ - do { Eterm* ptr; \ - int i = (Ns); \ - AH(i, 0, Live); \ - for (ptr = E + i; ptr > E; ptr--) { \ - make_blank(*ptr); \ - } \ - } while (0) - -#define AllocateHeap(Ns, Nh, Live) AH(Ns, Nh, Live) - -#define AllocateHeapZero(Ns, Nh, Live) \ - do { Eterm* ptr; \ - int i = (Ns); \ - AH(i, Nh, Live); \ - for (ptr = E + i; ptr > E; ptr--) { \ - make_blank(*ptr); \ - } \ - } while (0) - -#define AllocateInit(Ns, Live, Y) \ - do { AH(Ns, 0, Live); make_blank(Y); } while (0) - -/* - * Like the AH macro, but allocates no additional heap space. - */ - -#define A(StackNeed, M) AH(StackNeed, 0, M) - -#define D(N) \ - RESTORE_CP(E); \ - E += (N) + 1; - - - -#define TestBinVHeap(VNh, Nh, Live) \ - do { \ - unsigned need = (Nh); \ - if ((E - HTOP < need) || (MSO(c_p).overhead + (VNh) >= BIN_VHEAP_SZ(c_p))) {\ - SWAPOUT; \ - PROCESS_MAIN_CHK_LOCKS(c_p); \ - FCALLS -= erts_garbage_collect_nobump(c_p, need, reg, (Live), FCALLS); \ - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \ - PROCESS_MAIN_CHK_LOCKS(c_p); \ - SWAPIN; \ - } \ - HEAP_SPACE_VERIFIED(need); \ - } while (0) - - - -/* - * Check if Nh words of heap are available; if not, do a garbage collection. - * Live is number of active argument registers to be preserved. - */ - -#define TestHeap(Nh, Live) \ - do { \ - unsigned need = (Nh); \ - if (E - HTOP < need) { \ - SWAPOUT; \ - PROCESS_MAIN_CHK_LOCKS(c_p); \ - FCALLS -= erts_garbage_collect_nobump(c_p, need, reg, (Live), FCALLS); \ - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \ - PROCESS_MAIN_CHK_LOCKS(c_p); \ - SWAPIN; \ - } \ - HEAP_SPACE_VERIFIED(need); \ - } while (0) - -/* - * Check if Nh words of heap are available; if not, do a garbage collection. - * Live is number of active argument registers to be preserved. - * Takes special care to preserve Extra if a garbage collection occurs. - */ - -#define TestHeapPreserve(Nh, Live, Extra) \ - do { \ - unsigned need = (Nh); \ - if (E - HTOP < need) { \ - SWAPOUT; \ - reg[Live] = Extra; \ - PROCESS_MAIN_CHK_LOCKS(c_p); \ - FCALLS -= erts_garbage_collect_nobump(c_p, need, reg, (Live)+1, FCALLS); \ - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \ - PROCESS_MAIN_CHK_LOCKS(c_p); \ - Extra = reg[Live]; \ - SWAPIN; \ - } \ - HEAP_SPACE_VERIFIED(need); \ - } while (0) - -#define TestHeapPutList(Need, Reg) \ - do { \ - TestHeap((Need), 1); \ - PutList(Reg, r(0), r(0)); \ - CHECK_TERM(r(0)); \ - } while (0) - -#define Init(N) make_blank(yb(N)) - -#define Init2(Y1, Y2) do { make_blank(Y1); make_blank(Y2); } while (0) -#define Init3(Y1, Y2, Y3) \ - do { make_blank(Y1); make_blank(Y2); make_blank(Y3); } while (0) - -#define MakeFun(FunP, NumFree) \ - do { \ - HEAVY_SWAPOUT; \ - r(0) = new_fun(c_p, reg, (ErlFunEntry *) FunP, NumFree); \ - HEAVY_SWAPIN; \ - } while (0) - -#define PutTuple(Dst, Arity) \ - do { \ - Dst = make_tuple(HTOP); \ - pt_arity = (Arity); \ - } while (0) - -/* * Check that we haven't used the reductions and jump to function pointed to by * the I register. If we are out of reductions, do a context switch. */ #define DispatchMacro() \ do { \ - BeamInstr* dis_next; \ - dis_next = (BeamInstr *) *I; \ + BeamInstr dis_next; \ + dis_next = *I; \ CHECK_ARGS(I); \ if (FCALLS > 0 || FCALLS > neg_o_reds) { \ FCALLS--; \ @@ -455,12 +260,12 @@ void** beam_ops; } else { \ goto context_switch; \ } \ - } while (0) + } while (0) \ #define DispatchMacroFun() \ do { \ - BeamInstr* dis_next; \ - dis_next = (BeamInstr *) *I; \ + BeamInstr dis_next; \ + dis_next = *I; \ CHECK_ARGS(I); \ if (FCALLS > 0 || FCALLS > neg_o_reds) { \ FCALLS--; \ @@ -470,23 +275,23 @@ void** beam_ops; } \ } while (0) -#define DispatchMacrox() \ - do { \ - if (FCALLS > 0) { \ - Eterm* dis_next; \ - SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]); \ - dis_next = (Eterm *) *I; \ - FCALLS--; \ - CHECK_ARGS(I); \ - Goto(dis_next); \ - } else if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p) \ - && FCALLS > neg_o_reds) { \ - goto save_calls1; \ - } else { \ - SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]); \ - CHECK_ARGS(I); \ - goto context_switch; \ - } \ +#define DispatchMacrox() \ + do { \ + if (FCALLS > 0) { \ + BeamInstr dis_next; \ + SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]); \ + dis_next = *I; \ + FCALLS--; \ + CHECK_ARGS(I); \ + Goto(dis_next); \ + } else if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p) \ + && FCALLS > neg_o_reds) { \ + goto save_calls1; \ + } else { \ + SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]); \ + CHECK_ARGS(I); \ + goto context_switch; \ + } \ } while (0) #ifdef DEBUG @@ -505,20 +310,7 @@ void** beam_ops; # define Dispatchfun() DispatchMacroFun() #endif -#define Self(R) R = c_p->common.id -#define Node(R) R = erts_this_node->sysname - #define Arg(N) I[(N)+1] -#define Next(N) \ - I += (N) + 1; \ - ASSERT(VALID_INSTR(*I)); \ - Goto(*I) - -#define PreFetch(N, Dst) do { Dst = (BeamInstr *) *(I + N + 1); } while (0) -#define NextPF(N, Dst) \ - I += N + 1; \ - ASSERT(VALID_INSTR(Dst)); \ - Goto(Dst) #define GetR(pos, tr) \ do { \ @@ -535,97 +327,20 @@ void** beam_ops; CHECK_TERM(tr); \ } while (0) -#define GetArg1(N, Dst) GetR((N), Dst) - -#define GetArg2(N, Dst1, Dst2) \ - do { \ - GetR(N, Dst1); \ - GetR((N)+1, Dst2); \ - } while (0) - -#define PutList(H, T, Dst) \ - do { \ - HTOP[0] = (H); HTOP[1] = (T); \ - Dst = make_list(HTOP); \ - HTOP += 2; \ - } while (0) - -#define Swap(R1, R2) \ - do { \ - Eterm V = R1; \ - R1 = R2; \ - R2 = V; \ - } while (0) - -#define SwapTemp(R1, R2, Tmp) \ - do { \ - Eterm V = R1; \ - R1 = R2; \ - R2 = Tmp = V; \ - } while (0) - -#define Move(Src, Dst) Dst = (Src) - -#define Move2Par(S1, D1, S2, D2) \ - do { \ - Eterm V1, V2; \ - V1 = (S1); V2 = (S2); D1 = V1; D2 = V2; \ - } while (0) - -#define MoveShift(Src, SD, D) \ - do { \ - Eterm V; \ - V = Src; D = SD; SD = V; \ - } while (0) - -#define MoveDup(Src, D1, D2) \ - do { \ - D1 = D2 = (Src); \ - } while (0) - -#define Move3(S1, D1, S2, D2, S3, D3) D1 = (S1); D2 = (S2); D3 = (S3) - -#define MoveWindow3(S1, S2, S3, D) \ - do { \ - Eterm xt0, xt1, xt2; \ - Eterm *y = &D; \ - xt0 = S1; \ - xt1 = S2; \ - xt2 = S3; \ - y[0] = xt0; \ - y[1] = xt1; \ - y[2] = xt2; \ - } while (0) - -#define MoveWindow4(S1, S2, S3, S4, D) \ - do { \ - Eterm xt0, xt1, xt2, xt3; \ - Eterm *y = &D; \ - xt0 = S1; \ - xt1 = S2; \ - xt2 = S3; \ - xt3 = S4; \ - y[0] = xt0; \ - y[1] = xt1; \ - y[2] = xt2; \ - y[3] = xt3; \ - } while (0) - -#define MoveWindow5(S1, S2, S3, S4, S5, D) \ - do { \ - Eterm xt0, xt1, xt2, xt3, xt4; \ - Eterm *y = &D; \ - xt0 = S1; \ - xt1 = S2; \ - xt2 = S3; \ - xt3 = S4; \ - xt4 = S5; \ - y[0] = xt0; \ - y[1] = xt1; \ - y[2] = xt2; \ - y[3] = xt3; \ - y[4] = xt4; \ - } while (0) +#define PUT_TERM_REG(term, desc) \ +do { \ + switch (loader_tag(desc)) { \ + case LOADER_X_REG: \ + x(loader_x_reg_index(desc)) = (term); \ + break; \ + case LOADER_Y_REG: \ + y(loader_y_reg_index(desc)) = (term); \ + break; \ + default: \ + ASSERT(0); \ + break; \ + } \ +} while(0) #define DispatchReturn \ do { \ @@ -640,409 +355,14 @@ do { \ } \ } while (0) -#define MoveReturn(Src) \ - x(0) = (Src); \ - I = c_p->cp; \ - ASSERT(VALID_INSTR(*c_p->cp)); \ - c_p->cp = 0; \ - CHECK_TERM(r(0)); \ - DispatchReturn - -#define DeallocateReturn(Deallocate) \ - do { \ - int words_to_pop = (Deallocate); \ - SET_I((BeamInstr *) cp_val(*E)); \ - E = ADD_BYTE_OFFSET(E, words_to_pop); \ - CHECK_TERM(r(0)); \ - DispatchReturn; \ - } while (0) - -#define MoveDeallocateReturn(Src, Deallocate) \ - x(0) = (Src); \ - DeallocateReturn(Deallocate) - -#define MoveCall(Src, CallDest, Size) \ - x(0) = (Src); \ - SET_CP(c_p, I+Size+1); \ - SET_I((BeamInstr *) CallDest); \ - Dispatch(); - -#define MoveCallLast(Src, CallDest, Deallocate) \ - x(0) = (Src); \ - RESTORE_CP(E); \ - E = ADD_BYTE_OFFSET(E, (Deallocate)); \ - SET_I((BeamInstr *) CallDest); \ - Dispatch(); - -#define MoveCallOnly(Src, CallDest) \ - x(0) = (Src); \ - SET_I((BeamInstr *) CallDest); \ - Dispatch(); - -#define MoveJump(Src) \ - r(0) = (Src); \ - SET_I((BeamInstr *) Arg(0)); \ - Goto(*I); - -#define GetList(Src, H, T) \ - do { \ - Eterm* tmp_ptr = list_val(Src); \ - Eterm hd, tl; \ - hd = CAR(tmp_ptr); \ - tl = CDR(tmp_ptr); \ - H = hd; T = tl; \ - } while (0) - -#define GetTupleElement(Src, Element, Dest) \ - do { \ - Eterm* src; \ - src = ADD_BYTE_OFFSET(tuple_val(Src), (Element)); \ - (Dest) = *src; \ - } while (0) - -#define GetTupleElement2(Src, Element, Dest) \ - do { \ - Eterm* src; \ - Eterm* dst; \ - Eterm E1, E2; \ - src = ADD_BYTE_OFFSET(tuple_val(Src), (Element)); \ - dst = &(Dest); \ - E1 = src[0]; \ - E2 = src[1]; \ - dst[0] = E1; \ - dst[1] = E2; \ - } while (0) - -#define GetTupleElement2Y(Src, Element, D1, D2) \ - do { \ - Eterm* src; \ - Eterm E1, E2; \ - src = ADD_BYTE_OFFSET(tuple_val(Src), (Element)); \ - E1 = src[0]; \ - E2 = src[1]; \ - D1 = E1; \ - D2 = E2; \ - } while (0) - -#define GetTupleElement3(Src, Element, Dest) \ - do { \ - Eterm* src; \ - Eterm* dst; \ - Eterm E1, E2, E3; \ - src = ADD_BYTE_OFFSET(tuple_val(Src), (Element)); \ - dst = &(Dest); \ - E1 = src[0]; \ - E2 = src[1]; \ - E3 = src[2]; \ - dst[0] = E1; \ - dst[1] = E2; \ - dst[2] = E3; \ - } while (0) - -#define EqualImmed(X, Y, Action) if (X != Y) { Action; } -#define NotEqualImmed(X, Y, Action) if (X == Y) { Action; } -#define EqualExact(X, Y, Action) if (!EQ(X,Y)) { Action; } -#define NotEqualExact(X, Y, Action) if (EQ(X,Y)) { Action; } -#define Equal(X, Y, Action) CMP_EQ_ACTION(X,Y,Action) -#define NotEqual(X, Y, Action) CMP_NE_ACTION(X,Y,Action) -#define IsLessThan(X, Y, Action) CMP_LT_ACTION(X,Y,Action) -#define IsGreaterEqual(X, Y, Action) CMP_GE_ACTION(X,Y,Action) - -#define IsFloat(Src, Fail) if (is_not_float(Src)) { Fail; } - -#define IsInteger(Src, Fail) if (is_not_integer(Src)) { Fail; } - -#define IsNumber(X, Fail) if (is_not_integer(X) && is_not_float(X)) { Fail; } - -#define IsAtom(Src, Fail) if (is_not_atom(Src)) { Fail; } - -#define IsIntegerAllocate(Src, Need, Alive, Fail) \ - if (is_not_integer(Src)) { Fail; } \ - A(Need, Alive) - -#define IsNil(Src, Fail) if (is_not_nil(Src)) { Fail; } - -#define IsList(Src, Fail) if (is_not_list(Src) && is_not_nil(Src)) { Fail; } - -#define IsNonemptyList(Src, Fail) if (is_not_list(Src)) { Fail; } - -#define IsNonemptyListAllocate(Src, Need, Alive, Fail) \ - if (is_not_list(Src)) { Fail; } \ - A(Need, Alive) - -#define IsNonemptyListTestHeap(Need, Alive, Fail) \ - if (is_not_list(x(0))) { Fail; } \ - TestHeap(Need, Alive) - -#define IsNonemptyListGetList(Src, H, T, Fail) \ - if (is_not_list(Src)) { \ - Fail; \ - } else { \ - Eterm* tmp_ptr = list_val(Src); \ - Eterm hd, tl; \ - hd = CAR(tmp_ptr); \ - tl = CDR(tmp_ptr); \ - H = hd; T = tl; \ - } - -#define IsTuple(X, Action) if (is_not_tuple(X)) Action - -#define IsArity(Pointer, Arity, Fail) \ - if (*tuple_val(Pointer) != (Arity)) { \ - Fail; \ - } - -#define IsMap(Src, Fail) if (!is_map(Src)) { Fail; } - -#define GetMapElement(Src, Key, Dst, Fail) \ - do { \ - Eterm _res = get_map_element(Src, Key); \ - if (is_non_value(_res)) { \ - Fail; \ - } \ - Dst = _res; \ - } while (0) - -#define GetMapElementHash(Src, Key, Hx, Dst, Fail) \ - do { \ - Eterm _res = get_map_element_hash(Src, Key, Hx); \ - if (is_non_value(_res)) { \ - Fail; \ - } \ - Dst = _res; \ - } while (0) - -#define IsFunction(X, Action) \ - do { \ - if ( !(is_any_fun(X)) ) { \ - Action; \ - } \ - } while (0) - -#define IsFunction2(F, A, Action) \ - do { \ - if (erl_is_function(c_p, F, A) != am_true ) { \ - Action; \ - } \ - } while (0) - #ifdef DEBUG -#define IsTupleOfArity(Src, Arityval, Fail) \ - do { \ - if (!(is_tuple(Src) && *tuple_val(Src) == Arityval)) { \ - Fail; \ - } \ - } while (0) -#else -#define IsTupleOfArity(Src, Arityval, Fail) \ - do { \ - if (!(is_boxed(Src) && *tuple_val(Src) == Arityval)) { \ - Fail; \ - } \ - } while (0) -#endif - -#define IsTaggedTuple(Src,Arityval,Tag,Fail) \ - do { \ - if (!(is_tuple(Src) && \ - (tuple_val(Src))[0] == Arityval && \ - (tuple_val(Src))[1] == Tag)) { \ - Fail; \ - } \ - } while (0) - -#define IsBoolean(X, Fail) if ((X) != am_true && (X) != am_false) { Fail; } - -#define IsBinary(Src, Fail) \ - if (is_not_binary(Src) || binary_bitsize(Src) != 0) { Fail; } - -#define IsBitstring(Src, Fail) \ - if (is_not_binary(Src)) { Fail; } - -#if defined(ARCH_64) -#define BsSafeMul(A, B, Fail, Target) \ - do { Uint64 _res = (A) * (B); \ - if (_res / B != A) { Fail; } \ - Target = _res; \ - } while (0) +/* Better static type testing by the C compiler */ +# define BEAM_IS_TUPLE(Src) is_tuple(Src) #else -#define BsSafeMul(A, B, Fail, Target) \ - do { Uint64 _res = (Uint64)(A) * (Uint64)(B); \ - if ((_res >> (8*sizeof(Uint))) != 0) { Fail; } \ - Target = _res; \ - } while (0) +/* Better performance */ +# define BEAM_IS_TUPLE(Src) is_boxed(Src) #endif -#define BsGetFieldSize(Bits, Unit, Fail, Target) \ - do { \ - Sint _signed_size; Uint _uint_size; \ - Uint temp_bits; \ - if (is_small(Bits)) { \ - _signed_size = signed_val(Bits); \ - if (_signed_size < 0) { Fail; } \ - _uint_size = (Uint) _signed_size; \ - } else { \ - if (!term_to_Uint(Bits, &temp_bits)) { Fail; } \ - _uint_size = temp_bits; \ - } \ - BsSafeMul(_uint_size, Unit, Fail, Target); \ - } while (0) - -#define BsGetUncheckedFieldSize(Bits, Unit, Fail, Target) \ - do { \ - Sint _signed_size; Uint _uint_size; \ - Uint temp_bits; \ - if (is_small(Bits)) { \ - _signed_size = signed_val(Bits); \ - if (_signed_size < 0) { Fail; } \ - _uint_size = (Uint) _signed_size; \ - } else { \ - if (!term_to_Uint(Bits, &temp_bits)) { Fail; } \ - _uint_size = (Uint) temp_bits; \ - } \ - Target = _uint_size * Unit; \ - } while (0) - -#define BsGetFloat2(Ms, Live, Sz, Flags, Dst, Fail) \ - do { \ - ErlBinMatchBuffer *_mb; \ - Eterm _result; Sint _size; \ - if (!is_small(Sz) || (_size = unsigned_val(Sz)) > 64) { Fail; } \ - _size *= ((Flags) >> 3); \ - TestHeap(FLOAT_SIZE_OBJECT, Live); \ - _mb = ms_matchbuffer(Ms); \ - LIGHT_SWAPOUT; \ - _result = erts_bs_get_float_2(c_p, _size, (Flags), _mb); \ - LIGHT_SWAPIN; \ - HEAP_SPACE_VERIFIED(0); \ - if (is_non_value(_result)) { Fail; } \ - else { Dst = _result; } \ - } while (0) - -#define BsGetBinaryImm_2(Ms, Live, Sz, Flags, Dst, Fail) \ - do { \ - ErlBinMatchBuffer *_mb; \ - Eterm _result; \ - TestHeap(heap_bin_size(ERL_ONHEAP_BIN_LIMIT), Live); \ - _mb = ms_matchbuffer(Ms); \ - LIGHT_SWAPOUT; \ - _result = erts_bs_get_binary_2(c_p, (Sz), (Flags), _mb); \ - LIGHT_SWAPIN; \ - HEAP_SPACE_VERIFIED(0); \ - if (is_non_value(_result)) { Fail; } \ - else { Dst = _result; } \ - } while (0) - -#define BsGetBinary_2(Ms, Live, Sz, Flags, Dst, Fail) \ - do { \ - ErlBinMatchBuffer *_mb; \ - Eterm _result; Uint _size; \ - BsGetFieldSize(Sz, ((Flags) >> 3), Fail, _size); \ - TestHeap(ERL_SUB_BIN_SIZE, Live); \ - _mb = ms_matchbuffer(Ms); \ - LIGHT_SWAPOUT; \ - _result = erts_bs_get_binary_2(c_p, _size, (Flags), _mb); \ - LIGHT_SWAPIN; \ - HEAP_SPACE_VERIFIED(0); \ - if (is_non_value(_result)) { Fail; } \ - else { Dst = _result; } \ - } while (0) - -#define BsGetBinaryAll_2(Ms, Live, Unit, Dst, Fail) \ - do { \ - ErlBinMatchBuffer *_mb; \ - Eterm _result; \ - TestHeap(ERL_SUB_BIN_SIZE, Live); \ - _mb = ms_matchbuffer(Ms); \ - if (((_mb->size - _mb->offset) % Unit) == 0) { \ - LIGHT_SWAPOUT; \ - _result = erts_bs_get_binary_all_2(c_p, _mb); \ - LIGHT_SWAPIN; \ - HEAP_SPACE_VERIFIED(0); \ - ASSERT(is_value(_result)); \ - Dst = _result; \ - } else { \ - HEAP_SPACE_VERIFIED(0); \ - Fail; } \ - } while (0) - -#define BsSkipBits2(Ms, Bits, Unit, Fail) \ - do { \ - ErlBinMatchBuffer *_mb; \ - size_t new_offset; \ - Uint _size; \ - _mb = ms_matchbuffer(Ms); \ - BsGetFieldSize(Bits, Unit, Fail, _size); \ - new_offset = _mb->offset + _size; \ - if (new_offset <= _mb->size) { _mb->offset = new_offset; } \ - else { Fail; } \ - } while (0) - -#define BsSkipBitsAll2(Ms, Unit, Fail) \ - do { \ - ErlBinMatchBuffer *_mb; \ - _mb = ms_matchbuffer(Ms); \ - if (((_mb->size - _mb->offset) % Unit) == 0) {_mb->offset = _mb->size; } \ - else { Fail; } \ - } while (0) - -#define BsSkipBitsImm2(Ms, Bits, Fail) \ - do { \ - ErlBinMatchBuffer *_mb; \ - size_t new_offset; \ - _mb = ms_matchbuffer(Ms); \ - new_offset = _mb->offset + (Bits); \ - if (new_offset <= _mb->size) { _mb->offset = new_offset; } \ - else { Fail; } \ - } while (0) - -#define NewBsPutIntegerImm(Sz, Flags, Src) \ - do { \ - if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3((Src), (Sz), (Flags)))) { goto badarg; } \ - } while (0) - -#define NewBsPutInteger(Sz, Flags, Src) \ - do { \ - Sint _size; \ - BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \ - if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3((Src), _size, (Flags)))) \ - { goto badarg; } \ - } while (0) - -#define NewBsPutFloatImm(Sz, Flags, Src) \ - do { \ - if (!erts_new_bs_put_float(c_p, (Src), (Sz), (Flags))) { goto badarg; } \ - } while (0) - -#define NewBsPutFloat(Sz, Flags, Src) \ - do { \ - Sint _size; \ - BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \ - if (!erts_new_bs_put_float(c_p, (Src), _size, (Flags))) { goto badarg; } \ - } while (0) - -#define NewBsPutBinary(Sz, Flags, Src) \ - do { \ - Sint _size; \ - BsGetUncheckedFieldSize(Sz, ((Flags) >> 3), goto badarg, _size); \ - if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2((Src), _size))) { goto badarg; } \ - } while (0) - -#define NewBsPutBinaryImm(Sz, Src) \ - do { \ - if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2((Src), (Sz)))) { goto badarg; } \ - } while (0) - -#define NewBsPutBinaryAll(Src, Unit) \ - do { \ - if (!erts_new_bs_put_binary_all(ERL_BITS_ARGS_2((Src), (Unit)))) { goto badarg; } \ - } while (0) - - -#define IsPort(Src, Fail) if (is_not_port(Src)) { Fail; } -#define IsPid(Src, Fail) if (is_not_pid(Src)) { Fail; } -#define IsRef(Src, Fail) if (is_not_ref(Src)) { Fail; } - /* * process_main() is already huge, so we want to avoid inlining * into it. Especially functions that are seldom used. @@ -1058,6 +378,7 @@ do { \ * The following functions are called directly by process_main(). * Don't inline them. */ +static void init_emulator_finish(void) NOINLINE; static ErtsCodeMFA *ubif2mfa(void* uf) NOINLINE; static ErtsCodeMFA *gcbif2mfa(void* gcf) NOINLINE; static BeamInstr* handle_error(Process* c_p, BeamInstr* pc, @@ -1066,20 +387,21 @@ static BeamInstr* call_error_handler(Process* p, ErtsCodeMFA* mfa, Eterm* reg, Eterm func) NOINLINE; static BeamInstr* fixed_apply(Process* p, Eterm* reg, Uint arity, BeamInstr *I, Uint offs) NOINLINE; -static BeamInstr* apply(Process* p, Eterm module, Eterm function, - Eterm args, Eterm* reg, - BeamInstr *I, Uint offs) NOINLINE; +static BeamInstr* apply(Process* p, Eterm* reg, + BeamInstr *I, Uint offs) NOINLINE; static BeamInstr* call_fun(Process* p, int arity, Eterm* reg, Eterm args) NOINLINE; static BeamInstr* apply_fun(Process* p, Eterm fun, Eterm args, Eterm* reg) NOINLINE; static Eterm new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free) NOINLINE; -static Eterm new_map(Process* p, Eterm* reg, BeamInstr* I) NOINLINE; -static Eterm update_map_assoc(Process* p, Eterm* reg, - Eterm map, BeamInstr* I) NOINLINE; -static Eterm update_map_exact(Process* p, Eterm* reg, - Eterm map, BeamInstr* I) NOINLINE; +static Eterm new_map(Process* p, Eterm* reg, Uint live, Uint n, BeamInstr* ptr) NOINLINE; +static Eterm new_small_map_lit(Process* p, Eterm* reg, Eterm keys_literal, + Uint live, BeamInstr* ptr) NOINLINE; +static Eterm update_map_assoc(Process* p, Eterm* reg, Uint live, + Uint n, BeamInstr* new_p) NOINLINE; +static Eterm update_map_exact(Process* p, Eterm* reg, Uint live, + Uint n, Eterm* new_p) NOINLINE; static Eterm get_map_element(Eterm map, Eterm key); static Eterm get_map_element_hash(Eterm map, Eterm key, Uint32 hx); @@ -1111,6 +433,12 @@ init_emulator(void) # define REG_stop asm("%l3") # define REG_I asm("%l4") # define REG_fcalls asm("%l5") +#elif defined(__GNUC__) && defined(__amd64__) && !defined(DEBUG) +# define REG_xregs asm("%r12") +# define REG_htop +# define REG_stop asm("%r13") +# define REG_I asm("%rbx") +# define REG_fcalls asm("%r14") #else # define REG_xregs # define REG_htop @@ -1230,6 +558,13 @@ init_emulator(void) #define ERTS_DBG_CHK_REDS(P, FC) #endif +#ifdef NO_FPE_SIGNALS +# define ERTS_NO_FPE_CHECK_INIT ERTS_FP_CHECK_INIT +# define ERTS_NO_FPE_ERROR ERTS_FP_ERROR +#else +# define ERTS_NO_FPE_CHECK_INIT(p) +# define ERTS_NO_FPE_ERROR(p, a, b) +#endif /* * process_main() is called twice: @@ -1289,12 +624,10 @@ void process_main(Eterm * x_reg_array, FloatDef* f_reg_array) #ifndef NO_JUMP_TABLE static void* opcodes[] = { DEFINE_OPCODES }; #else - int Go; + register BeamInstr Go; #endif #endif - Eterm pt_arity; /* Used by do_put_tuple */ - Uint64 start_time = 0; /* Monitor long schedule */ BeamInstr* start_time_i = NULL; @@ -1311,7 +644,7 @@ void process_main(Eterm * x_reg_array, FloatDef* f_reg_array) * Note: c_p->arity must be set to reflect the number of useful terms in * c_p->arg_reg before calling the scheduler. */ - if (!init_done) { + if (ERTS_UNLIKELY(!init_done)) { /* This should only be reached during the init phase when only the main * process is running. I.e. there is no race for init_done. */ @@ -1344,16 +677,16 @@ void process_main(Eterm * x_reg_array, FloatDef* f_reg_array) } PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); c_p = erts_schedule(NULL, c_p, reds_used); ASSERT(!(c_p->flags & F_HIPE_MODE)); ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); start_time = 0; #ifdef DEBUG - pid = c_p->common.id; /* Save for debugging purpouses */ + pid = c_p->common.id; /* Save for debugging purposes */ #endif - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); PROCESS_MAIN_CHK_LOCKS(c_p); ERTS_MSACC_UPDATE_CACHE_X(); @@ -1367,7 +700,7 @@ void process_main(Eterm * x_reg_array, FloatDef* f_reg_array) { int reds; Eterm* argp; - BeamInstr *next; + BeamInstr next; int i; argp = c_p->arg_reg; @@ -1399,7 +732,7 @@ void process_main(Eterm * x_reg_array, FloatDef* f_reg_array) ERTS_DBG_CHK_REDS(c_p, FCALLS); - next = (BeamInstr *) *I; + next = *I; SWAPIN; ASSERT(VALID_INSTR(next)); @@ -1435,1923 +768,8 @@ void process_main(Eterm * x_reg_array, FloatDef* f_reg_array) #ifdef NO_JUMP_TABLE switch (Go) { #endif -#include "beam_hot.h" - - { - Eterm increment_reg_val; - Eterm increment_val; - Uint live; - Eterm result; - - OpCase(i_increment_rIId): - increment_reg_val = x(0); - I--; - goto do_increment; - - OpCase(i_increment_xIId): - increment_reg_val = xb(Arg(0)); - goto do_increment; - - OpCase(i_increment_yIId): - increment_reg_val = yb(Arg(0)); - goto do_increment; - - do_increment: - increment_val = Arg(1); - if (is_small(increment_reg_val)) { - Sint i = signed_val(increment_reg_val) + increment_val; - ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i)); - if (MY_IS_SSMALL(i)) { - result = make_small(i); - StoreBifResult(3, result); - } - } - - live = Arg(2); - HEAVY_SWAPOUT; - reg[live] = increment_reg_val; - reg[live+1] = make_small(increment_val); - result = erts_gc_mixed_plus(c_p, reg, live); - HEAVY_SWAPIN; - ERTS_HOLE_CHECK(c_p); - if (is_value(result)) { - StoreBifResult(3, result); - } - ASSERT(c_p->freason != BADMATCH || is_value(c_p->fvalue)); - goto find_func_info; - } - -#define DO_OUTLINED_ARITH_2(name, Op1, Op2) \ - do { \ - Eterm result; \ - Uint live = Arg(1); \ - \ - HEAVY_SWAPOUT; \ - reg[live] = Op1; \ - reg[live+1] = Op2; \ - result = erts_gc_##name(c_p, reg, live); \ - HEAVY_SWAPIN; \ - ERTS_HOLE_CHECK(c_p); \ - if (is_value(result)) { \ - StoreBifResult(4, result); \ - } \ - goto lb_Cl_error; \ - } while (0) - - { - Eterm PlusOp1, PlusOp2; - Eterm result; - - OpCase(i_plus_jIxxd): - PlusOp1 = xb(Arg(2)); - PlusOp2 = xb(Arg(3)); - goto do_plus; - - OpCase(i_plus_jIxyd): - PlusOp1 = xb(Arg(2)); - PlusOp2 = yb(Arg(3)); - goto do_plus; - - OpCase(i_plus_jIssd): - GetArg2(2, PlusOp1, PlusOp2); - goto do_plus; - - do_plus: - if (is_both_small(PlusOp1, PlusOp2)) { - Sint i = signed_val(PlusOp1) + signed_val(PlusOp2); - ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i)); - if (MY_IS_SSMALL(i)) { - result = make_small(i); - StoreBifResult(4, result); - } - } - DO_OUTLINED_ARITH_2(mixed_plus, PlusOp1, PlusOp2); - } - - { - Eterm MinusOp1, MinusOp2; - Eterm result; - - OpCase(i_minus_jIxxd): - MinusOp1 = xb(Arg(2)); - MinusOp2 = xb(Arg(3)); - goto do_minus; - - OpCase(i_minus_jIssd): - GetArg2(2, MinusOp1, MinusOp2); - goto do_minus; - - do_minus: - if (is_both_small(MinusOp1, MinusOp2)) { - Sint i = signed_val(MinusOp1) - signed_val(MinusOp2); - ASSERT(MY_IS_SSMALL(i) == IS_SSMALL(i)); - if (MY_IS_SSMALL(i)) { - result = make_small(i); - StoreBifResult(4, result); - } - } - DO_OUTLINED_ARITH_2(mixed_minus, MinusOp1, MinusOp2); - } - - { - Eterm is_eq_exact_lit_val; - - OpCase(i_is_eq_exact_literal_fxc): - is_eq_exact_lit_val = xb(Arg(1)); - goto do_is_eq_exact_literal; - - OpCase(i_is_eq_exact_literal_fyc): - is_eq_exact_lit_val = yb(Arg(1)); - goto do_is_eq_exact_literal; - - do_is_eq_exact_literal: - if (!eq(Arg(2), is_eq_exact_lit_val)) { - ClauseFail(); - } - Next(3); - } - - { - Eterm is_ne_exact_lit_val; - - OpCase(i_is_ne_exact_literal_fxc): - is_ne_exact_lit_val = xb(Arg(1)); - goto do_is_ne_exact_literal; - - OpCase(i_is_ne_exact_literal_fyc): - is_ne_exact_lit_val = yb(Arg(1)); - goto do_is_ne_exact_literal; - - do_is_ne_exact_literal: - if (eq(Arg(2), is_ne_exact_lit_val)) { - ClauseFail(); - } - Next(3); - } - - OpCase(i_move_call_only_fc): { - r(0) = Arg(1); - } - /* FALL THROUGH */ - OpCase(i_call_only_f): { - SET_I((BeamInstr *) Arg(0)); - DTRACE_LOCAL_CALL(c_p, erts_code_to_codemfa(I)); - Dispatch(); - } - - OpCase(i_move_call_last_fPc): { - r(0) = Arg(2); - } - /* FALL THROUGH */ - OpCase(i_call_last_fP): { - RESTORE_CP(E); - E = ADD_BYTE_OFFSET(E, Arg(1)); - SET_I((BeamInstr *) Arg(0)); - DTRACE_LOCAL_CALL(c_p, erts_code_to_codemfa(I)); - Dispatch(); - } - - OpCase(i_move_call_cf): { - r(0) = Arg(0); - I++; - } - /* FALL THROUGH */ - OpCase(i_call_f): { - SET_CP(c_p, I+2); - SET_I((BeamInstr *) Arg(0)); - DTRACE_LOCAL_CALL(c_p, erts_code_to_codemfa(I)); - Dispatch(); - } - - OpCase(i_move_call_ext_last_ePc): { - r(0) = Arg(2); - } - /* FALL THROUGH */ - OpCase(i_call_ext_last_eP): - RESTORE_CP(E); - E = ADD_BYTE_OFFSET(E, Arg(1)); - - /* - * Note: The pointer to the export entry is never NULL; if the module - * is not loaded, it points to code which will invoke the error handler - * (see lb_call_error_handler below). - */ - DTRACE_GLOBAL_CALL_FROM_EXPORT(c_p, Arg(0)); - Dispatchx(); - - OpCase(i_move_call_ext_ce): { - r(0) = Arg(0); - I++; - } - /* FALL THROUGH */ - OpCase(i_call_ext_e): - SET_CP(c_p, I+2); - DTRACE_GLOBAL_CALL_FROM_EXPORT(c_p, Arg(0)); - Dispatchx(); - - OpCase(i_move_call_ext_only_ec): { - r(0) = Arg(1); - } - /* FALL THROUGH */ - OpCase(i_call_ext_only_e): - DTRACE_GLOBAL_CALL_FROM_EXPORT(c_p, Arg(0)); - Dispatchx(); - - OpCase(init_y): { - BeamInstr *next; - - PreFetch(1, next); - make_blank(yb(Arg(0))); - NextPF(1, next); - } - - OpCase(i_trim_I): { - BeamInstr *next; - Uint words; - Uint cp; - - words = Arg(0); - cp = E[0]; - PreFetch(1, next); - E += words; - E[0] = cp; - NextPF(1, next); - } - - OpCase(move_x1_c): { - x(1) = Arg(0); - Next(1); - } - - OpCase(move_x2_c): { - x(2) = Arg(0); - Next(1); - } - - OpCase(return): { - SET_I(c_p->cp); - DTRACE_RETURN_FROM_PC(c_p); - /* - * We must clear the CP to make sure that a stale value do not - * create a false module dependcy preventing code upgrading. - * It also means that we can use the CP in stack backtraces. - */ - c_p->cp = 0; - CHECK_TERM(r(0)); - HEAP_SPACE_VERIFIED(0); - DispatchReturn; - } - - /* - * Send is almost a standard call-BIF with two arguments, except for: - * 1) It cannot be traced. - * 2) There is no pointer to the send_2 function stored in - * the instruction. - */ - - OpCase(send): { - BeamInstr *next; - Eterm result; - - if (!(FCALLS > 0 || FCALLS > neg_o_reds)) { - /* If we have run out of reductions, we do a context - switch before calling the bif */ - c_p->arity = 2; - c_p->current = NULL; - goto context_switch3; - } - - PRE_BIF_SWAPOUT(c_p); - c_p->fcalls = FCALLS - 1; - result = erl_send(c_p, r(0), x(1)); - PreFetch(0, next); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); - PROCESS_MAIN_CHK_LOCKS(c_p); - HTOP = HEAP_TOP(c_p); - FCALLS = c_p->fcalls; - if (is_value(result)) { - r(0) = result; - CHECK_TERM(r(0)); - NextPF(0, next); - } else if (c_p->freason == TRAP) { - SET_CP(c_p, I+1); - SET_I(c_p->i); - SWAPIN; - Dispatch(); - } - goto find_func_info; - } - - { - Eterm element_index; - Eterm element_tuple; - - OpCase(i_element_jxsd): - element_tuple = xb(Arg(1)); - goto do_element; - - OpCase(i_element_jysd): - element_tuple = yb(Arg(1)); - goto do_element; - - do_element: - GetArg1(2, element_index); - if (is_small(element_index) && is_tuple(element_tuple)) { - Eterm* tp = tuple_val(element_tuple); - - if ((signed_val(element_index) >= 1) && - (signed_val(element_index) <= arityval(*tp))) { - Eterm result = tp[signed_val(element_index)]; - StoreBifResult(3, result); - } - } - } - /* Fall through */ - - OpCase(badarg_j): - badarg: - c_p->freason = BADARG; - goto lb_Cl_error; - - { - Eterm fast_element_tuple; - - OpCase(i_fast_element_jxId): - fast_element_tuple = xb(Arg(1)); - goto do_fast_element; - - OpCase(i_fast_element_jyId): - fast_element_tuple = yb(Arg(1)); - goto do_fast_element; - - do_fast_element: - if (is_tuple(fast_element_tuple)) { - Eterm* tp = tuple_val(fast_element_tuple); - Eterm pos = Arg(2); /* Untagged integer >= 1 */ - if (pos <= arityval(*tp)) { - Eterm result = tp[pos]; - StoreBifResult(3, result); - } - } - goto badarg; - } - - OpCase(catch_yf): - c_p->catches++; - yb(Arg(0)) = Arg(1); - Next(2); - - OpCase(catch_end_y): { - c_p->catches--; - make_blank(yb(Arg(0))); - if (is_non_value(r(0))) { - c_p->fvalue = NIL; - if (x(1) == am_throw) { - r(0) = x(2); - } else { - if (x(1) == am_error) { - SWAPOUT; - x(2) = add_stacktrace(c_p, x(2), x(3)); - SWAPIN; - } - /* only x(2) is included in the rootset here */ - if (E - HTOP < 3) { - SWAPOUT; - PROCESS_MAIN_CHK_LOCKS(c_p); - FCALLS -= erts_garbage_collect_nobump(c_p, 3, reg+2, 1, FCALLS); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - PROCESS_MAIN_CHK_LOCKS(c_p); - SWAPIN; - } - r(0) = TUPLE2(HTOP, am_EXIT, x(2)); - HTOP += 3; - } - } - CHECK_TERM(r(0)); - Next(1); - } - - OpCase(try_end_y): { - c_p->catches--; - make_blank(yb(Arg(0))); - if (is_non_value(r(0))) { - c_p->fvalue = NIL; - r(0) = x(1); - x(1) = x(2); - x(2) = x(3); - } - Next(1); - } - - /* - * Skeleton for receive statement: - * - * recv_mark L1 Optional - * call make_ref/monitor Optional - * ... - * recv_set L1 Optional - * L1: <-------------------+ - * <-----------+ | - * | | - * loop_rec L2 ------+---+ | - * ... | | | - * remove_message | | | - * jump L3 | | | - * ... | | | - * loop_rec_end L1 --+ | | - * L2: <---------------+ | - * wait L1 -----------------+ or wait_timeout - * timeout - * - * L3: Code after receive... - * - * - */ - - OpCase(recv_mark_f): { - /* - * Save the current position in message buffer and the - * the label for the loop_rec/2 instruction for the - * the receive statement. - */ - c_p->msg.mark = (BeamInstr *) Arg(0); - c_p->msg.saved_last = c_p->msg.last; - Next(1); - } - - OpCase(i_recv_set): { - /* - * If the mark is valid (points to the loop_rec/2 - * instruction that follows), we know that the saved - * position points to the first message that could - * possibly be matched out. - * - * If the mark is invalid, we do nothing, meaning that - * we will look through all messages in the message queue. - */ - if (c_p->msg.mark == (BeamInstr *) (I+1)) { - c_p->msg.save = c_p->msg.saved_last; - } - I++; - /* Fall through to the loop_rec/2 instruction */ - } - - /* - * Pick up the next message and place it in x(0). - * If no message, jump to a wait or wait_timeout instruction. - */ - OpCase(i_loop_rec_f): - { - BeamInstr *next; - ErtsMessage* msgp; - - /* - * We need to disable GC while matching messages - * in the queue. This since messages with data outside - * the heap will be corrupted by a GC. - */ - ASSERT(!(c_p->flags & F_DELAY_GC)); - c_p->flags |= F_DELAY_GC; - - loop_rec__: - - PROCESS_MAIN_CHK_LOCKS(c_p); - - msgp = PEEK_MESSAGE(c_p); - - if (!msgp) { -#ifdef ERTS_SMP - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); - /* Make sure messages wont pass exit signals... */ - if (ERTS_PROC_PENDING_EXIT(c_p)) { - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); - SWAPOUT; - c_p->flags &= ~F_DELAY_GC; - c_p->arity = 0; - goto do_schedule; /* Will be rescheduled for exit */ - } - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); - msgp = PEEK_MESSAGE(c_p); - if (msgp) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); - else -#endif - { - c_p->flags &= ~F_DELAY_GC; - SET_I((BeamInstr *) Arg(0)); - Goto(*I); /* Jump to a wait or wait_timeout instruction */ - } - } - if (is_non_value(ERL_MESSAGE_TERM(msgp))) { - SWAPOUT; /* erts_decode_dist_message() may write to heap... */ - if (!erts_decode_dist_message(c_p, ERTS_PROC_LOCK_MAIN, msgp, 0)) { - /* - * A corrupt distribution message that we weren't able to decode; - * remove it... - */ - /* No swapin should be needed */ - ASSERT(HTOP == c_p->htop && E == c_p->stop); - /* TODO: Add DTrace probe for this bad message situation? */ - UNLINK_MESSAGE(c_p, msgp); - msgp->next = NULL; - erts_cleanup_messages(msgp); - goto loop_rec__; - } - SWAPIN; - } - PreFetch(1, next); - r(0) = ERL_MESSAGE_TERM(msgp); - NextPF(1, next); - } - - /* - * Remove a (matched) message from the message queue. - */ - OpCase(remove_message): { - BeamInstr *next; - ErtsMessage* msgp; - PROCESS_MAIN_CHK_LOCKS(c_p); - - ERTS_CHK_MBUF_SZ(c_p); - - PreFetch(0, next); - msgp = PEEK_MESSAGE(c_p); - - if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) { - save_calls(c_p, &exp_receive); - } - if (ERL_MESSAGE_TOKEN(msgp) == NIL) { -#ifdef USE_VM_PROBES - if (DT_UTAG(c_p) != NIL) { - if (DT_UTAG_FLAGS(c_p) & DT_UTAG_PERMANENT) { - SEQ_TRACE_TOKEN(c_p) = am_have_dt_utag; - } else { - DT_UTAG(c_p) = NIL; - SEQ_TRACE_TOKEN(c_p) = NIL; - } - } else { -#endif - SEQ_TRACE_TOKEN(c_p) = NIL; -#ifdef USE_VM_PROBES - } - DT_UTAG_FLAGS(c_p) &= ~DT_UTAG_SPREADING; -#endif - } else if (ERL_MESSAGE_TOKEN(msgp) != am_undefined) { - Eterm msg; - SEQ_TRACE_TOKEN(c_p) = ERL_MESSAGE_TOKEN(msgp); -#ifdef USE_VM_PROBES - if (ERL_MESSAGE_TOKEN(msgp) == am_have_dt_utag) { - if (DT_UTAG(c_p) == NIL) { - DT_UTAG(c_p) = ERL_MESSAGE_DT_UTAG(msgp); - } - DT_UTAG_FLAGS(c_p) |= DT_UTAG_SPREADING; - } else { -#endif - ASSERT(is_tuple(SEQ_TRACE_TOKEN(c_p))); - ASSERT(SEQ_TRACE_TOKEN_ARITY(c_p) == 5); - ASSERT(is_small(SEQ_TRACE_TOKEN_SERIAL(c_p))); - ASSERT(is_small(SEQ_TRACE_TOKEN_LASTCNT(c_p))); - ASSERT(is_small(SEQ_TRACE_TOKEN_FLAGS(c_p))); - ASSERT(is_pid(SEQ_TRACE_TOKEN_SENDER(c_p))); - c_p->seq_trace_lastcnt = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p)); - if (c_p->seq_trace_clock < unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p))) { - c_p->seq_trace_clock = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p)); - } - msg = ERL_MESSAGE_TERM(msgp); - seq_trace_output(SEQ_TRACE_TOKEN(c_p), msg, SEQ_TRACE_RECEIVE, - c_p->common.id, c_p); -#ifdef USE_VM_PROBES - } -#endif - } -#ifdef USE_VM_PROBES - if (DTRACE_ENABLED(message_receive)) { - Eterm token2 = NIL; - DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE); - Sint tok_label = 0; - Sint tok_lastcnt = 0; - Sint tok_serial = 0; - - dtrace_proc_str(c_p, receiver_name); - token2 = SEQ_TRACE_TOKEN(c_p); - if (have_seqtrace(token2)) { - tok_label = signed_val(SEQ_TRACE_T_LABEL(token2)); - tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token2)); - tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token2)); - } - DTRACE6(message_receive, - receiver_name, size_object(ERL_MESSAGE_TERM(msgp)), - c_p->msg.len - 1, tok_label, tok_lastcnt, tok_serial); - } -#endif - UNLINK_MESSAGE(c_p, msgp); - JOIN_MESSAGE(c_p); - CANCEL_TIMER(c_p); - - erts_save_message_in_proc(c_p, msgp); - c_p->flags &= ~F_DELAY_GC; - - if (ERTS_IS_GC_DESIRED_INTERNAL(c_p, HTOP, E)) { - /* - * We want to GC soon but we leave a few - * reductions giving the message some time - * to turn into garbage. - */ - ERTS_VBUMP_LEAVE_REDS_INTERNAL(c_p, 5, FCALLS); - } - - ERTS_DBG_CHK_REDS(c_p, FCALLS); - ERTS_CHK_MBUF_SZ(c_p); - - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - PROCESS_MAIN_CHK_LOCKS(c_p); - NextPF(0, next); - } - - /* - * Advance the save pointer to the next message (the current - * message didn't match), then jump to the loop_rec instruction. - */ - OpCase(loop_rec_end_f): { - - ASSERT(c_p->flags & F_DELAY_GC); - - SET_I((BeamInstr *) Arg(0)); - SAVE_MESSAGE(c_p); - if (FCALLS > 0 || FCALLS > neg_o_reds) { - FCALLS--; - goto loop_rec__; - } - - c_p->flags &= ~F_DELAY_GC; - c_p->i = I; - SWAPOUT; - c_p->arity = 0; - c_p->current = NULL; - goto do_schedule; - } - /* - * Prepare to wait for a message or a timeout, whichever occurs first. - * - * Note: In order to keep the compatibility between 32 and 64 bits - * emulators, only timeout values that can be represented in 32 bits - * (unsigned) or less are allowed. - */ - - - OpCase(i_wait_timeout_fs): { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); - - /* Fall through */ - } - OpCase(i_wait_timeout_locked_fs): { - Eterm timeout_value; - - /* - * If we have already set the timer, we must NOT set it again. Therefore, - * we must test the F_INSLPQUEUE flag as well as the F_TIMO flag. - */ - if (c_p->flags & (F_INSLPQUEUE | F_TIMO)) { - goto wait2; - } - GetArg1(1, timeout_value); - if (timeout_value != make_small(0)) { - - if (timeout_value == am_infinity) - c_p->flags |= F_TIMO; - else { - int tres = erts_set_proc_timer_term(c_p, timeout_value); - if (tres == 0) { - /* - * The timer routiner will set c_p->i to the value in - * c_p->def_arg_reg[0]. Note that it is safe to use this - * location because there are no living x registers in - * a receive statement. - */ - BeamInstr** pi = (BeamInstr**) c_p->def_arg_reg; - *pi = I+3; - } - else { /* Wrong time */ - OpCase(i_wait_error_locked): { - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); - /* Fall through */ - } - OpCase(i_wait_error): { - c_p->freason = EXC_TIMEOUT_VALUE; - goto find_func_info; - } - } - } - - /* - * Prepare to wait indefinitely for a new message to arrive - * (or the time set above if falling through from above). - * - * When a new message arrives, control will be transferred - * the loop_rec instruction (at label L1). In case of - * of timeout, control will be transferred to the timeout - * instruction following the wait_timeout instruction. - */ - - OpCase(wait_locked_f): - OpCase(wait_f): - - wait2: { -#ifndef ERTS_SMP - if (ERTS_PROC_IS_EXITING(c_p)) { - /* - * I non smp case: - * - * Currently executing process might be sent an exit - * signal if it is traced by a port that it also is - * linked to, and the port terminates during the - * trace. In this case we do *not* want to clear - * the active flag, which will make the process hang - * in limbo forever. - */ - SWAPOUT; - c_p->arity = 0; - goto do_schedule; - } -#endif - c_p->i = (BeamInstr *) Arg(0); /* L1 */ - SWAPOUT; - c_p->arity = 0; - - if (!ERTS_PTMR_IS_TIMED_OUT(c_p)) - erts_smp_atomic32_read_band_relb(&c_p->state, - ~ERTS_PSFLG_ACTIVE); - ASSERT(!ERTS_PROC_IS_EXITING(c_p)); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); - c_p->current = NULL; - goto do_schedule; - } - OpCase(wait_unlocked_f): { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); - goto wait2; - } - } - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); - Next(2); - } - - OpCase(i_wait_timeout_fI): { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); - } - - OpCase(i_wait_timeout_locked_fI): - { - /* - * If we have already set the timer, we must NOT set it again. Therefore, - * we must test the F_INSLPQUEUE flag as well as the F_TIMO flag. - */ - if ((c_p->flags & (F_INSLPQUEUE | F_TIMO)) == 0) { - BeamInstr** p = (BeamInstr **) c_p->def_arg_reg; - *p = I+3; - erts_set_proc_timer_uword(c_p, Arg(1)); - } - goto wait2; - } - - /* - * A timeout has occurred. Reset the save pointer so that the next - * receive statement will examine the first message first. - */ - OpCase(timeout_locked): { - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); - } - - OpCase(timeout): { - BeamInstr *next; - - PreFetch(0, next); - if (IS_TRACED_FL(c_p, F_TRACE_RECEIVE)) { - trace_receive(c_p, am_clock_service, am_timeout, NULL); - } - if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) { - save_calls(c_p, &exp_timeout); - } - c_p->flags &= ~F_TIMO; - JOIN_MESSAGE(c_p); - NextPF(0, next); - } - - { - Eterm select_val2; - - OpCase(i_select_tuple_arity2_yfAAff): - select_val2 = yb(Arg(0)); - goto do_select_tuple_arity2; - - OpCase(i_select_tuple_arity2_xfAAff): - select_val2 = xb(Arg(0)); - goto do_select_tuple_arity2; - - do_select_tuple_arity2: - if (is_not_tuple(select_val2)) { - goto select_val2_fail; - } - select_val2 = *tuple_val(select_val2); - goto do_select_val2; - - OpCase(i_select_val2_yfccff): - select_val2 = yb(Arg(0)); - goto do_select_val2; - - OpCase(i_select_val2_xfccff): - select_val2 = xb(Arg(0)); - goto do_select_val2; - - do_select_val2: - if (select_val2 == Arg(2)) { - I += 3; - } else if (select_val2 == Arg(3)) { - I += 4; - } - - select_val2_fail: - SET_I((BeamInstr *) Arg(1)); - Goto(*I); - } - - { - Eterm select_val; - - OpCase(i_select_tuple_arity_xfI): - select_val = xb(Arg(0)); - goto do_select_tuple_arity; - - OpCase(i_select_tuple_arity_yfI): - select_val = yb(Arg(0)); - goto do_select_tuple_arity; - - do_select_tuple_arity: - if (is_tuple(select_val)) { - select_val = *tuple_val(select_val); - goto do_linear_search; - } - SET_I((BeamInstr *) Arg(1)); - Goto(*I); - - OpCase(i_select_val_lins_xfI): - select_val = xb(Arg(0)); - goto do_linear_search; - - OpCase(i_select_val_lins_yfI): - select_val = yb(Arg(0)); - goto do_linear_search; - - do_linear_search: { - BeamInstr *vs = &Arg(3); - int ix = 0; - - for(;;) { - if (vs[ix+0] >= select_val) { ix += 0; break; } - if (vs[ix+1] >= select_val) { ix += 1; break; } - ix += 2; - } - - if (vs[ix] == select_val) { - I += ix + Arg(2) + 2; - } - - SET_I((BeamInstr *) Arg(1)); - Goto(*I); - } - - OpCase(i_select_val_bins_xfI): - select_val = xb(Arg(0)); - goto do_binary_search; - - OpCase(i_select_val_bins_yfI): - select_val = yb(Arg(0)); - goto do_binary_search; - - do_binary_search: - { - struct Pairs { - BeamInstr val; - BeamInstr* addr; - }; - struct Pairs* low; - struct Pairs* high; - struct Pairs* mid; - int bdiff; /* int not long because the arrays aren't that large */ - - low = (struct Pairs *) &Arg(3); - high = low + Arg(2); - - /* The pointer subtraction (high-low) below must produce - * a signed result, because high could be < low. That - * requires the compiler to insert quite a bit of code. - * - * However, high will be > low so the result will be - * positive. We can use that knowledge to optimise the - * entire sequence, from the initial comparison to the - * computation of mid. - * - * -- Mikael Pettersson, Acumem AB - * - * Original loop control code: - * - * while (low < high) { - * mid = low + (high-low) / 2; - * - */ - while ((bdiff = (int)((char*)high - (char*)low)) > 0) { - unsigned int boffset = ((unsigned int)bdiff >> 1) & ~(sizeof(struct Pairs)-1); - - mid = (struct Pairs*)((char*)low + boffset); - if (select_val < mid->val) { - high = mid; - } else if (select_val > mid->val) { - low = mid + 1; - } else { - SET_I(mid->addr); - Goto(*I); - } - } - SET_I((BeamInstr *) Arg(1)); - Goto(*I); - } - } - - { - Eterm jump_on_val_zero_index; - - OpCase(i_jump_on_val_zero_yfI): - jump_on_val_zero_index = yb(Arg(0)); - goto do_jump_on_val_zero_index; - - OpCase(i_jump_on_val_zero_xfI): - jump_on_val_zero_index = xb(Arg(0)); - goto do_jump_on_val_zero_index; - - do_jump_on_val_zero_index: - if (is_small(jump_on_val_zero_index)) { - jump_on_val_zero_index = signed_val(jump_on_val_zero_index); - if (jump_on_val_zero_index < Arg(2)) { - SET_I((BeamInstr *) (&Arg(3))[jump_on_val_zero_index]); - Goto(*I); - } - } - SET_I((BeamInstr *) Arg(1)); - Goto(*I); - } - - { - Eterm jump_on_val_index; - - - OpCase(i_jump_on_val_yfII): - jump_on_val_index = yb(Arg(0)); - goto do_jump_on_val_index; - - OpCase(i_jump_on_val_xfII): - jump_on_val_index = xb(Arg(0)); - goto do_jump_on_val_index; - - do_jump_on_val_index: - if (is_small(jump_on_val_index)) { - jump_on_val_index = (Uint) (signed_val(jump_on_val_index) - Arg(3)); - if (jump_on_val_index < Arg(2)) { - SET_I((BeamInstr *) (&Arg(4))[jump_on_val_index]); - Goto(*I); - } - } - SET_I((BeamInstr *) Arg(1)); - Goto(*I); - } - - do_put_tuple: { - Eterm* hp = HTOP; - - *hp++ = make_arityval(pt_arity); - - do { - Eterm term = *I++; - switch (loader_tag(term)) { - case LOADER_X_REG: - *hp++ = x(loader_x_reg_index(term)); - break; - case LOADER_Y_REG: - *hp++ = y(loader_y_reg_index(term)); - break; - default: - *hp++ = term; - break; - } - } while (--pt_arity != 0); - HTOP = hp; - Goto(*I); - } - - OpCase(new_map_dII): { - Eterm res; - - HEAVY_SWAPOUT; - res = new_map(c_p, reg, I-1); - HEAVY_SWAPIN; - StoreResult(res, Arg(0)); - Next(3+Arg(2)); - } - -#define PUT_TERM_REG(term, desc) \ -do { \ - switch (loader_tag(desc)) { \ - case LOADER_X_REG: \ - x(loader_x_reg_index(desc)) = (term); \ - break; \ - case LOADER_Y_REG: \ - y(loader_y_reg_index(desc)) = (term); \ - break; \ - default: \ - ASSERT(0); \ - break; \ - } \ -} while(0) - - OpCase(i_get_map_elements_fsI): { - Eterm map; - BeamInstr *fs; - Uint sz, n; - - GetArg1(1, map); - - /* this instruction assumes Arg1 is a map, - * i.e. that it follows a test is_map if needed. - */ - - n = (Uint)Arg(2) / 3; - fs = &Arg(3); /* pattern fields and target registers */ - - if (is_flatmap(map)) { - flatmap_t *mp; - Eterm *ks; - Eterm *vs; - - mp = (flatmap_t *)flatmap_val(map); - sz = flatmap_get_size(mp); - - if (sz == 0) { - ClauseFail(); - } - - ks = flatmap_get_keys(mp); - vs = flatmap_get_values(mp); - - while(sz) { - if (EQ((Eterm) fs[0], *ks)) { - PUT_TERM_REG(*vs, fs[1]); - n--; - fs += 3; - /* no more values to fetch, we are done */ - if (n == 0) { - I = fs; - Next(-1); - } - } - ks++, sz--, vs++; - } - - ClauseFail(); - } else { - const Eterm *v; - Uint32 hx; - ASSERT(is_hashmap(map)); - while(n--) { - hx = fs[2]; - ASSERT(hx == hashmap_make_hash((Eterm)fs[0])); - if ((v = erts_hashmap_get(hx, (Eterm)fs[0], map)) == NULL) { - ClauseFail(); - } - PUT_TERM_REG(*v, fs[1]); - fs += 3; - } - I = fs; - Next(-1); - } - } -#undef PUT_TERM_REG - - OpCase(update_map_assoc_jsdII): { - Eterm res; - Eterm map; - - GetArg1(1, map); - HEAVY_SWAPOUT; - res = update_map_assoc(c_p, reg, map, I); - HEAVY_SWAPIN; - if (is_value(res)) { - StoreResult(res, Arg(2)); - Next(5+Arg(4)); - } else { - /* - * This can only happen if the code was compiled - * with the compiler in OTP 17. - */ - c_p->freason = BADMAP; - c_p->fvalue = map; - goto lb_Cl_error; - } - } - - OpCase(update_map_exact_jsdII): { - Eterm res; - Eterm map; - - GetArg1(1, map); - HEAVY_SWAPOUT; - res = update_map_exact(c_p, reg, map, I); - HEAVY_SWAPIN; - if (is_value(res)) { - StoreResult(res, Arg(2)); - Next(5+Arg(4)); - } else { - goto lb_Cl_error; - } - } - - - /* - * All guards with zero arguments have special instructions: - * self/0 - * node/0 - * - * All other guard BIFs take one or two arguments. - */ - - /* - * Guard BIF in head. On failure, ignore the error and jump - * to the code for the next clause. We don't support tracing - * of guard BIFs. - */ - - OpCase(bif1_fbsd): - { - ErtsBifFunc bf; - Eterm tmp_reg[1]; - Eterm result; - - GetArg1(2, tmp_reg[0]); - bf = (BifFunction) Arg(1); - ERTS_DBG_CHK_REDS(c_p, FCALLS); - c_p->fcalls = FCALLS; - PROCESS_MAIN_CHK_LOCKS(c_p); - ASSERT(!ERTS_PROC_IS_EXITING(c_p)); - ERTS_CHK_MBUF_SZ(c_p); - result = (*bf)(c_p, tmp_reg, I); - ERTS_CHK_MBUF_SZ(c_p); - ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result)); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_HOLE_CHECK(c_p); - FCALLS = c_p->fcalls; - ERTS_DBG_CHK_REDS(c_p, FCALLS); - if (is_value(result)) { - StoreBifResult(3, result); - } - SET_I((BeamInstr *) Arg(0)); - Goto(*I); - } - - /* - * Guard BIF in body. It can fail like any BIF. No trace support. - */ - - OpCase(bif1_body_bsd): - { - ErtsBifFunc bf; - - Eterm tmp_reg[1]; - Eterm result; - - GetArg1(1, tmp_reg[0]); - bf = (ErtsBifFunc) Arg(0); - ERTS_DBG_CHK_REDS(c_p, FCALLS); - c_p->fcalls = FCALLS; - PROCESS_MAIN_CHK_LOCKS(c_p); - ASSERT(!ERTS_PROC_IS_EXITING(c_p)); - ERTS_CHK_MBUF_SZ(c_p); - result = (*bf)(c_p, tmp_reg, I); - ERTS_CHK_MBUF_SZ(c_p); - ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result)); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_HOLE_CHECK(c_p); - FCALLS = c_p->fcalls; - ERTS_DBG_CHK_REDS(c_p, FCALLS); - if (is_value(result)) { - StoreBifResult(2, result); - } - reg[0] = tmp_reg[0]; - SWAPOUT; - I = handle_error(c_p, I, reg, ubif2mfa((void *) bf)); - goto post_error_handling; - } - - OpCase(i_gc_bif1_jIsId): - { - typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint); - GcBifFunction bf; - Eterm result; - Uint live = (Uint) Arg(3); - - GetArg1(2, x(live)); - bf = (GcBifFunction) Arg(1); - ERTS_DBG_CHK_REDS(c_p, FCALLS); - c_p->fcalls = FCALLS; - SWAPOUT; - PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); - ERTS_CHK_MBUF_SZ(c_p); - result = (*bf)(c_p, reg, live); - ERTS_CHK_MBUF_SZ(c_p); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); - PROCESS_MAIN_CHK_LOCKS(c_p); - SWAPIN; - ERTS_HOLE_CHECK(c_p); - FCALLS = c_p->fcalls; - ERTS_DBG_CHK_REDS(c_p, FCALLS); - if (is_value(result)) { - StoreBifResult(4, result); - } - if (Arg(0) != 0) { - SET_I((BeamInstr *) Arg(0)); - Goto(*I); - } - x(0) = x(live); - I = handle_error(c_p, I, reg, gcbif2mfa((void *) bf)); - goto post_error_handling; - } - - OpCase(i_gc_bif2_jIIssd): /* Note, one less parameter than the i_gc_bif1 - and i_gc_bif3 */ - { - typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint); - GcBifFunction bf; - Eterm result; - Uint live = (Uint) Arg(2); - - GetArg2(3, x(live), x(live+1)); - /* - * XXX This calling convention does not make sense. 'live' - * should point out the first argument, not the second - * (i.e. 'live' should not be incremented below). - */ - live++; - bf = (GcBifFunction) Arg(1); - ERTS_DBG_CHK_REDS(c_p, FCALLS); - c_p->fcalls = FCALLS; - SWAPOUT; - PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); - ERTS_CHK_MBUF_SZ(c_p); - result = (*bf)(c_p, reg, live); - ERTS_CHK_MBUF_SZ(c_p); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); - PROCESS_MAIN_CHK_LOCKS(c_p); - SWAPIN; - ERTS_HOLE_CHECK(c_p); - FCALLS = c_p->fcalls; - ERTS_DBG_CHK_REDS(c_p, FCALLS); - if (is_value(result)) { - StoreBifResult(5, result); - } - if (Arg(0) != 0) { - SET_I((BeamInstr *) Arg(0)); - Goto(*I); - } - live--; - x(0) = x(live); - x(1) = x(live+1); - I = handle_error(c_p, I, reg, gcbif2mfa((void *) bf)); - goto post_error_handling; - } - - OpCase(i_gc_bif3_jIIssd): - { - typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint); - GcBifFunction bf; - Eterm result; - Uint live = (Uint) Arg(2); - - x(live) = x(SCRATCH_X_REG); - GetArg2(3, x(live+1), x(live+2)); - /* - * XXX This calling convention does not make sense. 'live' - * should point out the first argument, not the third - * (i.e. 'live' should not be incremented below). - */ - live += 2; - bf = (GcBifFunction) Arg(1); - ERTS_DBG_CHK_REDS(c_p, FCALLS); - c_p->fcalls = FCALLS; - SWAPOUT; - PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); - ERTS_CHK_MBUF_SZ(c_p); - result = (*bf)(c_p, reg, live); - ERTS_CHK_MBUF_SZ(c_p); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); - PROCESS_MAIN_CHK_LOCKS(c_p); - SWAPIN; - ERTS_HOLE_CHECK(c_p); - FCALLS = c_p->fcalls; - ERTS_DBG_CHK_REDS(c_p, FCALLS); - if (is_value(result)) { - StoreBifResult(5, result); - } - if (Arg(0) != 0) { - SET_I((BeamInstr *) Arg(0)); - Goto(*I); - } - live -= 2; - x(0) = x(live); - x(1) = x(live+1); - x(2) = x(live+2); - I = handle_error(c_p, I, reg, gcbif2mfa((void *) bf)); - goto post_error_handling; - } - - /* - * Guards bifs and, or, xor in guards. - */ - OpCase(i_bif2_fbssd): - { - Eterm tmp_reg[2]; - ErtsBifFunc bf; - Eterm result; - - GetArg2(2, tmp_reg[0], tmp_reg[1]); - bf = (ErtsBifFunc) Arg(1); - ERTS_DBG_CHK_REDS(c_p, FCALLS); - c_p->fcalls = FCALLS; - PROCESS_MAIN_CHK_LOCKS(c_p); - ASSERT(!ERTS_PROC_IS_EXITING(c_p)); - ERTS_CHK_MBUF_SZ(c_p); - result = (*bf)(c_p, tmp_reg, I); - ERTS_CHK_MBUF_SZ(c_p); - ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result)); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_HOLE_CHECK(c_p); - FCALLS = c_p->fcalls; - ERTS_DBG_CHK_REDS(c_p, FCALLS); - if (is_value(result)) { - StoreBifResult(4, result); - } - SET_I((BeamInstr *) Arg(0)); - Goto(*I); - } - - /* - * Guards bifs and, or, xor, relational operators in body. - */ - OpCase(i_bif2_body_bssd): - { - Eterm tmp_reg[2]; - ErtsBifFunc bf; - Eterm result; - - GetArg2(1, tmp_reg[0], tmp_reg[1]); - bf = (ErtsBifFunc) Arg(0); - PROCESS_MAIN_CHK_LOCKS(c_p); - ASSERT(!ERTS_PROC_IS_EXITING(c_p)); - ERTS_CHK_MBUF_SZ(c_p); - result = (*bf)(c_p, tmp_reg, I); - ERTS_CHK_MBUF_SZ(c_p); - ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result)); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_HOLE_CHECK(c_p); - if (is_value(result)) { - ASSERT(!is_CP(result)); - StoreBifResult(3, result); - } - reg[0] = tmp_reg[0]; - reg[1] = tmp_reg[1]; - SWAPOUT; - I = handle_error(c_p, I, reg, ubif2mfa((void *) bf)); - goto post_error_handling; - } - - /* - * The most general BIF call. The BIF may build any amount of data - * on the heap. The result is always returned in r(0). - */ - OpCase(call_bif_e): - { - ErtsBifFunc bf; - Eterm result; - BeamInstr *next; - ErlHeapFragment *live_hf_end; - Export *export = (Export*)Arg(0); - - - if (!((FCALLS - 1) > 0 || (FCALLS-1) > neg_o_reds)) { - /* If we have run out of reductions, we do a context - switch before calling the bif */ - c_p->arity = GET_BIF_ARITY(export); - c_p->current = &export->info.mfa; - goto context_switch3; - } - - ERTS_MSACC_SET_BIF_STATE_CACHED_X( - GET_BIF_MODULE(export), GET_BIF_ADDRESS(export)); - - bf = GET_BIF_ADDRESS(export); - PRE_BIF_SWAPOUT(c_p); - ERTS_DBG_CHK_REDS(c_p, FCALLS); - c_p->fcalls = FCALLS - 1; - if (FCALLS <= 0) { - save_calls(c_p, export); - } - PreFetch(1, next); - ASSERT(!ERTS_PROC_IS_EXITING(c_p)); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - live_hf_end = c_p->mbuf; - ERTS_CHK_MBUF_SZ(c_p); - result = (*bf)(c_p, reg, I); - ERTS_CHK_MBUF_SZ(c_p); - ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result)); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - ERTS_HOLE_CHECK(c_p); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); - if (ERTS_IS_GC_DESIRED(c_p)) { - Uint arity = GET_BIF_ARITY(export); - result = erts_gc_after_bif_call_lhf(c_p, live_hf_end, result, reg, arity); - E = c_p->stop; - } - PROCESS_MAIN_CHK_LOCKS(c_p); - HTOP = HEAP_TOP(c_p); - FCALLS = c_p->fcalls; - ERTS_DBG_CHK_REDS(c_p, FCALLS); - /* We have to update the cache if we are enabled in order - to make sure no book keeping is done after we disabled - msacc. We don't always do this as it is quite expensive. */ - if (ERTS_MSACC_IS_ENABLED_CACHED_X()) - ERTS_MSACC_UPDATE_CACHE_X(); - ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR); - if (is_value(result)) { - r(0) = result; - CHECK_TERM(r(0)); - NextPF(1, next); - } else if (c_p->freason == TRAP) { - SET_CP(c_p, I+2); - SET_I(c_p->i); - SWAPIN; - Dispatch(); - } - - /* - * Error handling. SWAPOUT is not needed because it was done above. - */ - ASSERT(c_p->stop == E); - I = handle_error(c_p, I, reg, &export->info.mfa); - goto post_error_handling; - } - - /* - * Arithmetic operations. - */ - - OpCase(i_times_jIssd): - { - Eterm Op1, Op2; - GetArg2(2, Op1, Op2); - DO_OUTLINED_ARITH_2(mixed_times, Op1, Op2); - } - - OpCase(i_m_div_jIssd): - { - Eterm Op1, Op2; - GetArg2(2, Op1, Op2); - DO_OUTLINED_ARITH_2(mixed_div, Op1, Op2); - } - - OpCase(i_int_div_jIssd): - { - Eterm Op1, Op2; - - GetArg2(2, Op1, Op2); - if (Op2 == SMALL_ZERO) { - goto badarith; - } else if (is_both_small(Op1, Op2)) { - Sint ires = signed_val(Op1) / signed_val(Op2); - if (MY_IS_SSMALL(ires)) { - Eterm result = make_small(ires); - StoreBifResult(4, result); - } - } - DO_OUTLINED_ARITH_2(int_div, Op1, Op2); - } - - { - Eterm RemOp1, RemOp2; - - OpCase(i_rem_jIxxd): - RemOp1 = xb(Arg(2)); - RemOp2 = xb(Arg(3)); - goto do_rem; - - OpCase(i_rem_jIssd): - GetArg2(2, RemOp1, RemOp2); - goto do_rem; - - do_rem: - if (RemOp2 == SMALL_ZERO) { - goto badarith; - } else if (is_both_small(RemOp1, RemOp2)) { - Eterm result = make_small(signed_val(RemOp1) % signed_val(RemOp2)); - StoreBifResult(4, result); - } else { - DO_OUTLINED_ARITH_2(int_rem, RemOp1, RemOp2); - } - } - - { - Eterm BandOp1, BandOp2; - - OpCase(i_band_jIxcd): - BandOp1 = xb(Arg(2)); - BandOp2 = Arg(3); - goto do_band; - - OpCase(i_band_jIssd): - GetArg2(2, BandOp1, BandOp2); - goto do_band; - - do_band: - if (is_both_small(BandOp1, BandOp2)) { - /* - * No need to untag -- TAG & TAG == TAG. - */ - Eterm result = BandOp1 & BandOp2; - StoreBifResult(4, result); - } - DO_OUTLINED_ARITH_2(band, BandOp1, BandOp2); - } - - /* - * An error occurred in an arithmetic operation or test that could - * appear either in a head or in a body. - * In a head, execution should continue at failure address in Arg(0). - * In a body, Arg(0) == 0 and an exception should be raised. - */ - lb_Cl_error: { - if (Arg(0) != 0) { - OpCase(jump_f): { - jump_f: - SET_I((BeamInstr *) Arg(0)); - Goto(*I); - } - } - ASSERT(c_p->freason != BADMATCH || is_value(c_p->fvalue)); - goto find_func_info; - } - - OpCase(i_bor_jIssd): - { - Eterm Op1, Op2; - - GetArg2(2, Op1, Op2); - if (is_both_small(Op1, Op2)) { - /* - * No need to untag -- TAG | TAG == TAG. - */ - Eterm result = Op1 | Op2; - StoreBifResult(4, result); - } - DO_OUTLINED_ARITH_2(bor, Op1, Op2); - } - - OpCase(i_bxor_jIssd): - { - Eterm Op1, Op2; - - GetArg2(2, Op1, Op2); - if (is_both_small(Op1, Op2)) { - /* - * TAG ^ TAG == 0. - * - * Therefore, we perform the XOR operation on the tagged values, - * and OR in the tag bits. - */ - Eterm result = (Op1 ^ Op2) | make_small(0); - StoreBifResult(4, result); - } - DO_OUTLINED_ARITH_2(bxor, Op1, Op2); - } - - { - Eterm Op1, Op2; - Sint i; - Sint ires; - Eterm* bigp; - Eterm tmp_big[2]; - - OpCase(i_bsr_jIssd): - GetArg2(2, Op1, Op2); - if (is_small(Op2)) { - i = -signed_val(Op2); - if (is_small(Op1)) { - goto small_shift; - } else if (is_big(Op1)) { - if (i == 0) { - StoreBifResult(4, Op1); - } - ires = big_size(Op1); - goto big_shift; - } - } else if (is_big(Op2)) { - /* - * N bsr NegativeBigNum == N bsl MAX_SMALL - * N bsr PositiveBigNum == N bsl MIN_SMALL - */ - Op2 = make_small(bignum_header_is_neg(*big_val(Op2)) ? - MAX_SMALL : MIN_SMALL); - goto do_bsl; - } - goto badarith; - - OpCase(i_bsl_jIssd): - GetArg2(2, Op1, Op2); - do_bsl: - if (is_small(Op2)) { - i = signed_val(Op2); - - if (is_small(Op1)) { - small_shift: - ires = signed_val(Op1); - - if (i == 0 || ires == 0) { - StoreBifResult(4, Op1); - } else if (i < 0) { /* Right shift */ - i = -i; - if (i >= SMALL_BITS-1) { - Op1 = (ires < 0) ? SMALL_MINUS_ONE : SMALL_ZERO; - } else { - Op1 = make_small(ires >> i); - } - StoreBifResult(4, Op1); - } else if (i < SMALL_BITS-1) { /* Left shift */ - if ((ires > 0 && ((~(Uint)0 << ((SMALL_BITS-1)-i)) & ires) == 0) || - ((~(Uint)0 << ((SMALL_BITS-1)-i)) & ~ires) == 0) { - Op1 = make_small(ires << i); - StoreBifResult(4, Op1); - } - } - ires = 1; /* big_size(small_to_big(Op1)) */ - - big_shift: - if (i > 0) { /* Left shift. */ - ires += (i / D_EXP); - } else { /* Right shift. */ - if (ires <= (-i / D_EXP)) - ires = 3; /* ??? */ - else - ires -= (-i / D_EXP); - } - { - ires = BIG_NEED_SIZE(ires+1); - /* - * Slightly conservative check the size to avoid - * allocating huge amounts of memory for bignums that - * clearly would overflow the arity in the header - * word. - */ - if (ires-8 > BIG_ARITY_MAX) { - c_p->freason = SYSTEM_LIMIT; - goto lb_Cl_error; - } - TestHeapPreserve(ires+1, Arg(1), Op1); - if (is_small(Op1)) { - Op1 = small_to_big(signed_val(Op1), tmp_big); - } - bigp = HTOP; - Op1 = big_lshift(Op1, i, bigp); - if (is_big(Op1)) { - HTOP += bignum_header_arity(*HTOP) + 1; - } - HEAP_SPACE_VERIFIED(0); - if (is_nil(Op1)) { - /* - * This result must have been only slight larger - * than allowed since it wasn't caught by the - * previous test. - */ - c_p->freason = SYSTEM_LIMIT; - goto lb_Cl_error; - } - ERTS_HOLE_CHECK(c_p); - StoreBifResult(4, Op1); - } - } else if (is_big(Op1)) { - if (i == 0) { - StoreBifResult(4, Op1); - } - ires = big_size(Op1); - goto big_shift; - } - } else if (is_big(Op2)) { - if (bignum_header_is_neg(*big_val(Op2))) { - /* - * N bsl NegativeBigNum is either 0 or -1, depending on - * the sign of N. Since we don't believe this case - * is common, do the calculation with the minimum - * amount of code. - */ - Op2 = make_small(MIN_SMALL); - goto do_bsl; - } else if (is_small(Op1) || is_big(Op1)) { - /* - * N bsl PositiveBigNum is too large to represent. - */ - c_p->freason = SYSTEM_LIMIT; - goto lb_Cl_error; - } - /* Fall through if the left argument is not an integer. */ - } - /* - * One or more non-integer arguments. - */ - goto badarith; - } - - OpCase(i_int_bnot_jsId): - { - Eterm bnot_val; - - GetArg1(1, bnot_val); - if (is_small(bnot_val)) { - bnot_val = make_small(~signed_val(bnot_val)); - } else { - Uint live = Arg(2); - HEAVY_SWAPOUT; - reg[live] = bnot_val; - bnot_val = erts_gc_bnot(c_p, reg, live); - HEAVY_SWAPIN; - ERTS_HOLE_CHECK(c_p); - if (is_nil(bnot_val)) { - goto lb_Cl_error; - } - } - StoreBifResult(3, bnot_val); - } - - badarith: - c_p->freason = BADARITH; - goto lb_Cl_error; - - OpCase(i_apply): { - BeamInstr *next; - HEAVY_SWAPOUT; - next = apply(c_p, r(0), x(1), x(2), reg, NULL, 0); - HEAVY_SWAPIN; - if (next != NULL) { - SET_CP(c_p, I+1); - SET_I(next); - Dispatch(); - } - I = handle_error(c_p, I, reg, &bif_export[BIF_apply_3]->info.mfa); - goto post_error_handling; - } - - OpCase(i_apply_last_P): { - BeamInstr *next; - HEAVY_SWAPOUT; - next = apply(c_p, r(0), x(1), x(2), reg, I, Arg(0)); - HEAVY_SWAPIN; - if (next != NULL) { - SET_CP(c_p, (BeamInstr *) E[0]); - E = ADD_BYTE_OFFSET(E, Arg(0)); - SET_I(next); - Dispatch(); - } - I = handle_error(c_p, I, reg, &bif_export[BIF_apply_3]->info.mfa); - goto post_error_handling; - } - - OpCase(i_apply_only): { - BeamInstr *next; - HEAVY_SWAPOUT; - next = apply(c_p, r(0), x(1), x(2), reg, I, 0); - HEAVY_SWAPIN; - if (next != NULL) { - SET_I(next); - Dispatch(); - } - I = handle_error(c_p, I, reg, &bif_export[BIF_apply_3]->info.mfa); - goto post_error_handling; - } - - OpCase(apply_I): { - BeamInstr *next; - - HEAVY_SWAPOUT; - next = fixed_apply(c_p, reg, Arg(0), NULL, 0); - HEAVY_SWAPIN; - if (next != NULL) { - SET_CP(c_p, I+2); - SET_I(next); - Dispatch(); - } - I = handle_error(c_p, I, reg, &bif_export[BIF_apply_3]->info.mfa); - goto post_error_handling; - } - - OpCase(apply_last_IP): { - BeamInstr *next; - - HEAVY_SWAPOUT; - next = fixed_apply(c_p, reg, Arg(0), I, Arg(1)); - HEAVY_SWAPIN; - if (next != NULL) { - SET_CP(c_p, (BeamInstr *) E[0]); - E = ADD_BYTE_OFFSET(E, Arg(1)); - SET_I(next); - Dispatch(); - } - I = handle_error(c_p, I, reg, &bif_export[BIF_apply_3]->info.mfa); - goto post_error_handling; - } - - OpCase(i_apply_fun): { - BeamInstr *next; - - HEAVY_SWAPOUT; - next = apply_fun(c_p, r(0), x(1), reg); - HEAVY_SWAPIN; - if (next != NULL) { - SET_CP(c_p, I+1); - SET_I(next); - Dispatchfun(); - } - goto find_func_info; - } - - OpCase(i_apply_fun_last_P): { - BeamInstr *next; - - HEAVY_SWAPOUT; - next = apply_fun(c_p, r(0), x(1), reg); - HEAVY_SWAPIN; - if (next != NULL) { - SET_CP(c_p, (BeamInstr *) E[0]); - E = ADD_BYTE_OFFSET(E, Arg(0)); - SET_I(next); - Dispatchfun(); - } - goto find_func_info; - } - - OpCase(i_apply_fun_only): { - BeamInstr *next; - - HEAVY_SWAPOUT; - next = apply_fun(c_p, r(0), x(1), reg); - HEAVY_SWAPIN; - if (next != NULL) { - SET_I(next); - Dispatchfun(); - } - goto find_func_info; - } - - OpCase(i_call_fun_I): { - BeamInstr *next; - - HEAVY_SWAPOUT; - next = call_fun(c_p, Arg(0), reg, THE_NON_VALUE); - HEAVY_SWAPIN; - if (next != NULL) { - SET_CP(c_p, I+2); - SET_I(next); - Dispatchfun(); - } - goto find_func_info; - } - - OpCase(i_call_fun_last_IP): { - BeamInstr *next; - - HEAVY_SWAPOUT; - next = call_fun(c_p, Arg(0), reg, THE_NON_VALUE); - HEAVY_SWAPIN; - if (next != NULL) { - SET_CP(c_p, (BeamInstr *) E[0]); - E = ADD_BYTE_OFFSET(E, Arg(1)); - SET_I(next); - Dispatchfun(); - } - goto find_func_info; - } +#include "beam_hot.h" #ifdef DEBUG /* @@ -3395,7 +813,7 @@ do { \ Eterm* argp; int i; - if (erts_smp_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_EXITING) { + if (erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_EXITING) { c_p->i = beam_exit; c_p->arity = 0; c_p->current = NULL; @@ -3452,64 +870,25 @@ do { \ goto do_schedule1; } - OpCase(set_tuple_element_sdP): { - Eterm element; - Eterm tuple; - BeamInstr *next; - Eterm* p; - - PreFetch(3, next); - GetArg1(0, element); - tuple = REG_TARGET(Arg(1)); - ASSERT(is_tuple(tuple)); - p = (Eterm *) ((unsigned char *) tuple_val(tuple) + Arg(2)); - *p = element; - NextPF(3, next); - } +#include "beam_warm.h" OpCase(normal_exit): { SWAPOUT; c_p->freason = EXC_NORMAL; - c_p->arity = 0; /* In case this process will never be garbed again. */ - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + c_p->arity = 0; /* In case this process will ever be garbed again. */ + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); erts_do_exit_process(c_p, am_normal); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); goto do_schedule; } OpCase(continue_exit): { - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); erts_continue_exit_process(c_p); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); goto do_schedule; } - OpCase(i_raise): { - Eterm raise_trace = x(2); - Eterm raise_value = x(1); - struct StackTrace *s; - - c_p->fvalue = raise_value; - c_p->ftrace = raise_trace; - s = get_trace_from_exc(raise_trace); - if (s == NULL) { - c_p->freason = EXC_ERROR; - } else { - c_p->freason = PRIMARY_EXCEPTION(s->freason); - } - goto find_func_info; - } - - { - Eterm badmatch_val; - - OpCase(badmatch_x): - badmatch_val = xb(Arg(0)); - c_p->fvalue = badmatch_val; - c_p->freason = BADMATCH; - } - /* Fall through here */ - find_func_info: { SWAPOUT; I = handle_error(c_p, I, reg, NULL); @@ -3550,194 +929,6 @@ do { \ } } - { - Eterm nif_bif_result; - Eterm bif_nif_arity; - - OpCase(call_nif): - { - /* - * call_nif is always first instruction in function: - * - * I[-3]: Module - * I[-2]: Function - * I[-1]: Arity - * I[0]: &&call_nif - * I[1]: Function pointer to NIF function - * I[2]: Pointer to erl_module_nif - * I[3]: Function pointer to dirty NIF - * - * This layout is determined by the NifExport struct - */ - BifFunction vbf; - ErlHeapFragment *live_hf_end; - ErtsCodeMFA *codemfa; - - ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_NIF); - - codemfa = erts_code_to_codemfa(I); - - c_p->current = codemfa; /* current and vbf set to please handle_error */ - - DTRACE_NIF_ENTRY(c_p, codemfa); - - HEAVY_SWAPOUT; - - PROCESS_MAIN_CHK_LOCKS(c_p); - bif_nif_arity = codemfa->arity; - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); - - ASSERT(!ERTS_PROC_IS_EXITING(c_p)); - { - typedef Eterm NifF(struct enif_environment_t*, int argc, Eterm argv[]); - NifF* fp = vbf = (NifF*) I[1]; - struct enif_environment_t env; -#ifdef ERTS_SMP - ASSERT(c_p->scheduler_data); -#endif - live_hf_end = c_p->mbuf; - ERTS_CHK_MBUF_SZ(c_p); - erts_pre_nif(&env, c_p, (struct erl_module_nif*)I[2], NULL); - nif_bif_result = (*fp)(&env, bif_nif_arity, reg); - if (env.exception_thrown) - nif_bif_result = THE_NON_VALUE; - erts_post_nif(&env); - ERTS_CHK_MBUF_SZ(c_p); - - PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR); - ASSERT(!env.exiting); - ASSERT(!ERTS_PROC_IS_EXITING(c_p)); - } - - DTRACE_NIF_RETURN(c_p, codemfa); - goto apply_bif_or_nif_epilogue; - - OpCase(apply_bif): - /* - * At this point, I points to the code[0] in the export entry for - * the BIF: - * - * code[-3]: Module - * code[-2]: Function - * code[-1]: Arity - * code[0]: &&apply_bif - * code[1]: Function pointer to BIF function - */ - - if (!((FCALLS - 1) > 0 || (FCALLS - 1) > neg_o_reds)) { - /* If we have run out of reductions, we do a context - switch before calling the bif */ - goto context_switch; - } - - codemfa = erts_code_to_codemfa(I); - - ERTS_MSACC_SET_BIF_STATE_CACHED_X(codemfa->module, (BifFunction)Arg(0)); - - - /* In case we apply process_info/1,2 or load_nif/1 */ - c_p->current = codemfa; - c_p->i = I; /* In case we apply check_process_code/2. */ - c_p->arity = 0; /* To allow garbage collection on ourselves - * (check_process_code/2). - */ - DTRACE_BIF_ENTRY(c_p, codemfa); - - SWAPOUT; - ERTS_DBG_CHK_REDS(c_p, FCALLS - 1); - c_p->fcalls = FCALLS - 1; - vbf = (BifFunction) Arg(0); - PROCESS_MAIN_CHK_LOCKS(c_p); - bif_nif_arity = codemfa->arity; - ASSERT(bif_nif_arity <= 4); - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - { - ErtsBifFunc bf = vbf; - ASSERT(!ERTS_PROC_IS_EXITING(c_p)); - live_hf_end = c_p->mbuf; - ERTS_CHK_MBUF_SZ(c_p); - nif_bif_result = (*bf)(c_p, reg, I); - ERTS_CHK_MBUF_SZ(c_p); - ASSERT(!ERTS_PROC_IS_EXITING(c_p) || - is_non_value(nif_bif_result)); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - PROCESS_MAIN_CHK_LOCKS(c_p); - } - /* We have to update the cache if we are enabled in order - to make sure no book keeping is done after we disabled - msacc. We don't always do this as it is quite expensive. */ - if (ERTS_MSACC_IS_ENABLED_CACHED_X()) - ERTS_MSACC_UPDATE_CACHE_X(); - ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR); - DTRACE_BIF_RETURN(c_p, codemfa); - - apply_bif_or_nif_epilogue: - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); - ERTS_HOLE_CHECK(c_p); - if (ERTS_IS_GC_DESIRED(c_p)) { - nif_bif_result = erts_gc_after_bif_call_lhf(c_p, live_hf_end, - nif_bif_result, - reg, bif_nif_arity); - } - SWAPIN; /* There might have been a garbage collection. */ - FCALLS = c_p->fcalls; - ERTS_DBG_CHK_REDS(c_p, FCALLS); - if (is_value(nif_bif_result)) { - r(0) = nif_bif_result; - CHECK_TERM(r(0)); - SET_I(c_p->cp); - c_p->cp = 0; - Goto(*I); - } else if (c_p->freason == TRAP) { - SET_I(c_p->i); - if (c_p->flags & F_HIBERNATE_SCHED) { - c_p->flags &= ~F_HIBERNATE_SCHED; - goto do_schedule; - } - Dispatch(); - } - I = handle_error(c_p, c_p->cp, reg, c_p->current); - goto post_error_handling; - } - } - - OpCase(i_get_sd): - { - Eterm arg; - Eterm result; - - GetArg1(0, arg); - result = erts_pd_hash_get(c_p, arg); - StoreBifResult(1, result); - } - - OpCase(i_get_hash_cId): - { - Eterm arg; - Eterm result; - - GetArg1(0, arg); - result = erts_pd_hash_get_with_hx(c_p, Arg(1), arg); - StoreBifResult(2, result); - } - - { - Eterm case_end_val; - - OpCase(case_end_x): - case_end_val = xb(Arg(0)); - c_p->fvalue = case_end_val; - c_p->freason = EXC_CASE_CLAUSE; - goto find_func_info; - } - - OpCase(if_end): - c_p->freason = EXC_IF_CLAUSE; - goto find_func_info; - OpCase(i_func_info_IaaI): { ErtsCodeInfo *ci = (ErtsCodeInfo*)I; c_p->freason = EXC_FUNCTION_CLAUSE; @@ -3745,1367 +936,8 @@ do { \ goto handle_error; } - OpCase(try_case_end_s): - { - Eterm try_case_end_val; - GetArg1(0, try_case_end_val); - c_p->fvalue = try_case_end_val; - c_p->freason = EXC_TRY_CLAUSE; - goto find_func_info; - } - - /* - * Construction of binaries using new instructions. - */ - { - Eterm new_binary; - Eterm num_bits_term; - Uint num_bits; - Uint alloc; - Uint num_bytes; - - OpCase(i_bs_init_bits_heap_IIId): { - num_bits = Arg(0); - alloc = Arg(1); - I++; - goto do_bs_init_bits_known; - } - - OpCase(i_bs_init_bits_IId): { - num_bits = Arg(0); - alloc = 0; - goto do_bs_init_bits_known; - } - - OpCase(i_bs_init_bits_fail_heap_sIjId): { - GetArg1(0, num_bits_term); - alloc = Arg(1); - I += 2; - goto do_bs_init_bits; - } - - OpCase(i_bs_init_bits_fail_yjId): { - num_bits_term = yb(Arg(0)); - I++; - alloc = 0; - goto do_bs_init_bits; - } - OpCase(i_bs_init_bits_fail_xjId): { - num_bits_term = xb(Arg(0)); - I++; - alloc = 0; - /* FALL THROUGH */ - } - - /* num_bits_term = Term for number of bits to build (small/big) - * alloc = Number of words to allocate on heap - * Operands: Fail Live Dst - */ - - do_bs_init_bits: - if (is_small(num_bits_term)) { - Sint size = signed_val(num_bits_term); - if (size < 0) { - goto badarg; - } - num_bits = (Uint) size; - } else { - Uint bits; - - if (!term_to_Uint(num_bits_term, &bits)) { - c_p->freason = bits; - goto lb_Cl_error; - - } - num_bits = (Eterm) bits; - } - - /* num_bits = Number of bits to build - * alloc = Number of extra words to allocate on heap - * Operands: NotUsed Live Dst - */ - do_bs_init_bits_known: - num_bytes = ((Uint64)num_bits+(Uint64)7) >> 3; - if (num_bits & 7) { - alloc += ERL_SUB_BIN_SIZE; - } - if (num_bytes <= ERL_ONHEAP_BIN_LIMIT) { - alloc += heap_bin_size(num_bytes); - } else { - alloc += PROC_BIN_SIZE; - } - TestHeap(alloc, Arg(1)); - - /* num_bits = Number of bits to build - * num_bytes = Number of bytes to allocate in the binary - * alloc = Total number of words to allocate on heap - * Operands: NotUsed NotUsed Dst - */ - if (num_bytes <= ERL_ONHEAP_BIN_LIMIT) { - ErlHeapBin* hb; - - erts_bin_offset = 0; - erts_writable_bin = 0; - hb = (ErlHeapBin *) HTOP; - HTOP += heap_bin_size(num_bytes); - hb->thing_word = header_heap_bin(num_bytes); - hb->size = num_bytes; - erts_current_bin = (byte *) hb->data; - new_binary = make_binary(hb); - - do_bits_sub_bin: - if (num_bits & 7) { - ErlSubBin* sb; - - sb = (ErlSubBin *) HTOP; - HTOP += ERL_SUB_BIN_SIZE; - sb->thing_word = HEADER_SUB_BIN; - sb->size = num_bytes - 1; - sb->bitsize = num_bits & 7; - sb->offs = 0; - sb->bitoffs = 0; - sb->is_writable = 0; - sb->orig = new_binary; - new_binary = make_binary(sb); - } - HEAP_SPACE_VERIFIED(0); - StoreBifResult(2, new_binary); - } else { - Binary* bptr; - ProcBin* pb; - - erts_bin_offset = 0; - erts_writable_bin = 0; - - /* - * Allocate the binary struct itself. - */ - bptr = erts_bin_nrml_alloc(num_bytes); - erts_current_bin = (byte *) bptr->orig_bytes; - - /* - * Now allocate the ProcBin on the heap. - */ - pb = (ProcBin *) HTOP; - HTOP += PROC_BIN_SIZE; - pb->thing_word = HEADER_PROC_BIN; - pb->size = num_bytes; - pb->next = MSO(c_p).first; - MSO(c_p).first = (struct erl_off_heap_header*) pb; - pb->val = bptr; - pb->bytes = (byte*) bptr->orig_bytes; - pb->flags = 0; - OH_OVERHEAD(&(MSO(c_p)), pb->size / sizeof(Eterm)); - new_binary = make_binary(pb); - goto do_bits_sub_bin; - } - } - - { - Eterm BsOp1, BsOp2; - - OpCase(i_bs_init_fail_heap_sIjId): { - GetArg1(0, BsOp1); - BsOp2 = Arg(1); - I += 2; - goto do_bs_init; - } - - OpCase(i_bs_init_fail_yjId): { - BsOp1 = yb(Arg(0)); - BsOp2 = 0; - I++; - goto do_bs_init; - } - - OpCase(i_bs_init_fail_xjId): { - BsOp1 = xb(Arg(0)); - BsOp2 = 0; - I++; - } - /* FALL THROUGH */ - do_bs_init: - if (is_small(BsOp1)) { - Sint size = signed_val(BsOp1); - if (size < 0) { - goto badarg; - } - BsOp1 = (Eterm) size; - } else { - Uint bytes; - - if (!term_to_Uint(BsOp1, &bytes)) { - c_p->freason = bytes; - goto lb_Cl_error; - } - if ((bytes >> (8*sizeof(Uint)-3)) != 0) { - goto system_limit; - } - BsOp1 = (Eterm) bytes; - } - if (BsOp1 <= ERL_ONHEAP_BIN_LIMIT) { - goto do_heap_bin_alloc; - } else { - goto do_proc_bin_alloc; - } - - - OpCase(i_bs_init_heap_IIId): { - BsOp1 = Arg(0); - BsOp2 = Arg(1); - I++; - goto do_proc_bin_alloc; - } - - OpCase(i_bs_init_IId): { - BsOp1 = Arg(0); - BsOp2 = 0; - } - /* FALL THROUGH */ - do_proc_bin_alloc: { - Binary* bptr; - ProcBin* pb; - - erts_bin_offset = 0; - erts_writable_bin = 0; - TestBinVHeap(BsOp1 / sizeof(Eterm), - BsOp2 + PROC_BIN_SIZE + ERL_SUB_BIN_SIZE, Arg(1)); - - /* - * Allocate the binary struct itself. - */ - bptr = erts_bin_nrml_alloc(BsOp1); - erts_current_bin = (byte *) bptr->orig_bytes; - - /* - * Now allocate the ProcBin on the heap. - */ - pb = (ProcBin *) HTOP; - HTOP += PROC_BIN_SIZE; - pb->thing_word = HEADER_PROC_BIN; - pb->size = BsOp1; - pb->next = MSO(c_p).first; - MSO(c_p).first = (struct erl_off_heap_header*) pb; - pb->val = bptr; - pb->bytes = (byte*) bptr->orig_bytes; - pb->flags = 0; - - OH_OVERHEAD(&(MSO(c_p)), BsOp1 / sizeof(Eterm)); - - StoreBifResult(2, make_binary(pb)); - } - - OpCase(i_bs_init_heap_bin_heap_IIId): { - BsOp1 = Arg(0); - BsOp2 = Arg(1); - I++; - goto do_heap_bin_alloc; - } - - OpCase(i_bs_init_heap_bin_IId): { - BsOp1 = Arg(0); - BsOp2 = 0; - } - /* Fall through */ - do_heap_bin_alloc: - { - ErlHeapBin* hb; - Uint bin_need; - - bin_need = heap_bin_size(BsOp1); - erts_bin_offset = 0; - erts_writable_bin = 0; - TestHeap(bin_need+BsOp2+ERL_SUB_BIN_SIZE, Arg(1)); - hb = (ErlHeapBin *) HTOP; - HTOP += bin_need; - hb->thing_word = header_heap_bin(BsOp1); - hb->size = BsOp1; - erts_current_bin = (byte *) hb->data; - BsOp1 = make_binary(hb); - StoreBifResult(2, BsOp1); - } - } - - OpCase(bs_add_jssId): { - Eterm Op1, Op2; - Uint Unit = Arg(3); - - GetArg2(1, Op1, Op2); - if (is_both_small(Op1, Op2)) { - Sint Arg1 = signed_val(Op1); - Sint Arg2 = signed_val(Op2); - - if (Arg1 >= 0 && Arg2 >= 0) { - BsSafeMul(Arg2, Unit, goto system_limit, Op1); - Op1 += Arg1; - - store_bs_add_result: - if (Op1 <= MAX_SMALL) { - Op1 = make_small(Op1); - } else { - /* - * May generate a heap fragment, but in this - * particular case it is OK, since the value will be - * stored into an x register (the GC will scan x - * registers for references to heap fragments) and - * there is no risk that value can be stored into a - * location that is not scanned for heap-fragment - * references (such as the heap). - */ - SWAPOUT; - Op1 = erts_make_integer(Op1, c_p); - HTOP = HEAP_TOP(c_p); - } - StoreBifResult(4, Op1); - } - goto badarg; - } else { - Uint a; - Uint b; - Uint c; - - /* - * Now we know that one of the arguments is - * not a small. We must convert both arguments - * to Uints and check for errors at the same time. - * - * Error checking is tricky. - * - * If one of the arguments is not numeric or - * not positive, the error reason is BADARG. - * - * Otherwise if both arguments are numeric, - * but at least one argument does not fit in - * an Uint, the reason is SYSTEM_LIMIT. - */ - - if (!term_to_Uint(Op1, &a)) { - if (a == BADARG) { - goto badarg; - } - if (!term_to_Uint(Op2, &b)) { - c_p->freason = b; - goto lb_Cl_error; - } - goto system_limit; - } else if (!term_to_Uint(Op2, &b)) { - c_p->freason = b; - goto lb_Cl_error; - } - - /* - * The arguments are now correct and stored in a and b. - */ - - BsSafeMul(b, Unit, goto system_limit, c); - Op1 = a + c; - if (Op1 < a) { - /* - * If the result is less than one of the - * arguments, there must have been an overflow. - */ - goto system_limit; - } - goto store_bs_add_result; - } - /* No fallthrough */ - ASSERT(0); - } - - OpCase(bs_put_string_II): - { - BeamInstr *next; - PreFetch(2, next); - erts_new_bs_put_string(ERL_BITS_ARGS_2((byte *) Arg(1), Arg(0))); - NextPF(2, next); - } - - /* - * x(SCRATCH_X_REG); - * Operands: Fail ExtraHeap Live Unit Size Dst - */ - - OpCase(i_bs_append_jIIIsd): { - Uint live = Arg(2); - Uint res; - Eterm Size; - - GetArg1(4, Size); - HEAVY_SWAPOUT; - reg[live] = x(SCRATCH_X_REG); - res = erts_bs_append(c_p, reg, live, Size, Arg(1), Arg(3)); - HEAVY_SWAPIN; - if (is_non_value(res)) { - /* c_p->freason is already set (may be either BADARG or SYSTEM_LIMIT). */ - goto lb_Cl_error; - } - StoreBifResult(5, res); - } - - /* - * Operands: Fail Size Src Unit Dst - */ - OpCase(i_bs_private_append_jIssd): { - Eterm res; - Eterm Size, Src; - - GetArg2(2, Size, Src); - res = erts_bs_private_append(c_p, Src, Size, Arg(1)); - if (is_non_value(res)) { - /* c_p->freason is already set (may be either BADARG or SYSTEM_LIMIT). */ - goto lb_Cl_error; - } - StoreBifResult(4, res); - } - - OpCase(bs_init_writable): { - HEAVY_SWAPOUT; - r(0) = erts_bs_init_writable(c_p, r(0)); - HEAVY_SWAPIN; - Next(0); - } - - /* - * Calculate the number of bytes needed to encode the source - * operarand to UTF-8. If the source operand is invalid (e.g. wrong - * type or range) we return a nonsense integer result (0 or 4). We - * can get away with that because we KNOW that bs_put_utf8 will do - * full error checking. - */ - OpCase(i_bs_utf8_size_sd): { - Eterm arg; - Eterm result; - - GetArg1(0, arg); - if (arg < make_small(0x80UL)) { - result = make_small(1); - } else if (arg < make_small(0x800UL)) { - result = make_small(2); - } else if (arg < make_small(0x10000UL)) { - result = make_small(3); - } else { - result = make_small(4); - } - StoreBifResult(1, result); - } - - OpCase(i_bs_put_utf8_js): { - Eterm arg; - - GetArg1(1, arg); - if (!erts_bs_put_utf8(ERL_BITS_ARGS_1(arg))) { - goto badarg; - } - Next(2); - } - - /* - * Calculate the number of bytes needed to encode the source - * operarand to UTF-8. If the source operand is invalid (e.g. wrong - * type or range) we return a nonsense integer result (2 or 4). We - * can get away with that because we KNOW that bs_put_utf16 will do - * full error checking. - */ - - OpCase(i_bs_utf16_size_sd): { - Eterm arg; - Eterm result = make_small(2); - - GetArg1(0, arg); - if (arg >= make_small(0x10000UL)) { - result = make_small(4); - } - StoreBifResult(1, result); - } - - OpCase(bs_put_utf16_jIs): { - Eterm arg; - - GetArg1(2, arg); - if (!erts_bs_put_utf16(ERL_BITS_ARGS_2(arg, Arg(1)))) { - goto badarg; - } - Next(3); - } - - /* - * Only used for validating a value about to be stored in a binary. - */ - OpCase(i_bs_validate_unicode_js): { - Eterm val; - - GetArg1(1, val); - - /* - * There is no need to untag the integer, but it IS necessary - * to make sure it is small (if the term is a bignum, it could - * slip through the test, and there is no further test that - * would catch it, since bit syntax construction silently masks - * too big numbers). - */ - if (is_not_small(val) || val > make_small(0x10FFFFUL) || - (make_small(0xD800UL) <= val && val <= make_small(0xDFFFUL))) { - goto badarg; - } - Next(2); - } - - /* - * Only used for validating a value matched out. - */ - OpCase(i_bs_validate_unicode_retract_jss): { - Eterm i; /* Integer to validate */ - - /* - * There is no need to untag the integer, but it IS necessary - * to make sure it is small (a bignum pointer could fall in - * the valid range). - */ - - GetArg1(1, i); - if (is_not_small(i) || i > make_small(0x10FFFFUL) || - (make_small(0xD800UL) <= i && i <= make_small(0xDFFFUL))) { - Eterm ms; /* Match context */ - ErlBinMatchBuffer* mb; - - GetArg1(2, ms); - mb = ms_matchbuffer(ms); - mb->offset -= 32; - goto badarg; - } - Next(3); - } - - /* - * Matching of binaries. - */ - - { - Eterm header; - BeamInstr *next; - Uint slots; - Eterm context; - - do_start_match: - slots = Arg(2); - if (!is_boxed(context)) { - ClauseFail(); - } - PreFetch(4, next); - header = *boxed_val(context); - if (header_is_bin_matchstate(header)) { - ErlBinMatchState* ms = (ErlBinMatchState *) boxed_val(context); - Uint actual_slots = HEADER_NUM_SLOTS(header); - ms->save_offset[0] = ms->mb.offset; - if (actual_slots < slots) { - ErlBinMatchState* dst; - Uint live = Arg(1); - Uint wordsneeded = ERL_BIN_MATCHSTATE_SIZE(slots); - - TestHeapPreserve(wordsneeded, live, context); - ms = (ErlBinMatchState *) boxed_val(context); - dst = (ErlBinMatchState *) HTOP; - *dst = *ms; - *HTOP = HEADER_BIN_MATCHSTATE(slots); - HTOP += wordsneeded; - HEAP_SPACE_VERIFIED(0); - StoreResult(make_matchstate(dst), Arg(3)); - } - } else if (is_binary_header(header)) { - Eterm result; - Uint live = Arg(1); - Uint wordsneeded = ERL_BIN_MATCHSTATE_SIZE(slots); - TestHeapPreserve(wordsneeded, live, context); - HEAP_TOP(c_p) = HTOP; -#ifdef DEBUG - c_p->stop = E; /* Needed for checking in HeapOnlyAlloc(). */ -#endif - result = erts_bs_start_match_2(c_p, context, slots); - HTOP = HEAP_TOP(c_p); - HEAP_SPACE_VERIFIED(0); - if (is_non_value(result)) { - ClauseFail(); - } else { - StoreResult(result, Arg(3)); - } - } else { - ClauseFail(); - } - NextPF(4, next); - - OpCase(i_bs_start_match2_xfIId): { - context = xb(Arg(0)); - I++; - goto do_start_match; - } - OpCase(i_bs_start_match2_yfIId): { - context = yb(Arg(0)); - I++; - goto do_start_match; - } - } - - OpCase(bs_test_zero_tail2_fx): { - BeamInstr *next; - ErlBinMatchBuffer *_mb; - - PreFetch(2, next); - _mb = (ErlBinMatchBuffer*) ms_matchbuffer(xb(Arg(1))); - if (_mb->size != _mb->offset) { - ClauseFail(); - } - NextPF(2, next); - } - - OpCase(bs_test_tail_imm2_fxI): { - BeamInstr *next; - ErlBinMatchBuffer *_mb; - PreFetch(3, next); - _mb = ms_matchbuffer(xb(Arg(1))); - if (_mb->size - _mb->offset != Arg(2)) { - ClauseFail(); - } - NextPF(3, next); - } - - OpCase(bs_test_unit_fxI): { - BeamInstr *next; - ErlBinMatchBuffer *_mb; - PreFetch(3, next); - _mb = ms_matchbuffer(xb(Arg(1))); - if ((_mb->size - _mb->offset) % Arg(2)) { - ClauseFail(); - } - NextPF(3, next); - } - - OpCase(bs_test_unit8_fx): { - BeamInstr *next; - ErlBinMatchBuffer *_mb; - PreFetch(2, next); - _mb = ms_matchbuffer(xb(Arg(1))); - if ((_mb->size - _mb->offset) & 7) { - ClauseFail(); - } - NextPF(2, next); - } - - { - Eterm bs_get_integer8_context; - - OpCase(i_bs_get_integer_8_xfd): { - ErlBinMatchBuffer *_mb; - Eterm _result; - bs_get_integer8_context = xb(Arg(0)); - I++; - _mb = ms_matchbuffer(bs_get_integer8_context); - if (_mb->size - _mb->offset < 8) { - ClauseFail(); - } - if (BIT_OFFSET(_mb->offset) != 0) { - _result = erts_bs_get_integer_2(c_p, 8, 0, _mb); - } else { - _result = make_small(_mb->base[BYTE_OFFSET(_mb->offset)]); - _mb->offset += 8; - } - StoreBifResult(1, _result); - } - } - - { - Eterm bs_get_integer_16_context; - - OpCase(i_bs_get_integer_16_xfd): - bs_get_integer_16_context = xb(Arg(0)); - I++; - - { - ErlBinMatchBuffer *_mb; - Eterm _result; - _mb = ms_matchbuffer(bs_get_integer_16_context); - if (_mb->size - _mb->offset < 16) { - ClauseFail(); - } - if (BIT_OFFSET(_mb->offset) != 0) { - _result = erts_bs_get_integer_2(c_p, 16, 0, _mb); - } else { - _result = make_small(get_int16(_mb->base+BYTE_OFFSET(_mb->offset))); - _mb->offset += 16; - } - StoreBifResult(1, _result); - } - } - - { - Eterm bs_get_integer_32_context; - - OpCase(i_bs_get_integer_32_xfId): - bs_get_integer_32_context = xb(Arg(0)); - I++; - - { - ErlBinMatchBuffer *_mb; - Uint32 _integer; - Eterm _result; - _mb = ms_matchbuffer(bs_get_integer_32_context); - if (_mb->size - _mb->offset < 32) { ClauseFail(); } - if (BIT_OFFSET(_mb->offset) != 0) { - _integer = erts_bs_get_unaligned_uint32(_mb); - } else { - _integer = get_int32(_mb->base + _mb->offset/8); - } - _mb->offset += 32; -#if !defined(ARCH_64) - if (IS_USMALL(0, _integer)) { -#endif - _result = make_small(_integer); -#if !defined(ARCH_64) - } else { - TestHeap(BIG_UINT_HEAP_SIZE, Arg(1)); - _result = uint_to_big((Uint) _integer, HTOP); - HTOP += BIG_UINT_HEAP_SIZE; - HEAP_SPACE_VERIFIED(0); - } -#endif - StoreBifResult(2, _result); - } - } - - { - Eterm Ms, Sz; - - /* Operands: x(Reg) Size Live Fail Flags Dst */ - OpCase(i_bs_get_integer_imm_xIIfId): { - Uint wordsneeded; - Ms = xb(Arg(0)); - Sz = Arg(1); - wordsneeded = 1+WSIZE(NBYTES(Sz)); - TestHeapPreserve(wordsneeded, Arg(2), Ms); - I += 3; - /* Operands: Fail Flags Dst */ - goto do_bs_get_integer_imm; - } - - /* Operands: x(Reg) Size Fail Flags Dst */ - OpCase(i_bs_get_integer_small_imm_xIfId): { - Ms = xb(Arg(0)); - Sz = Arg(1); - I += 2; - /* Operands: Fail Flags Dst */ - goto do_bs_get_integer_imm; - } - - /* - * Ms = match context - * Sz = size of field - * Operands: Fail Flags Dst - */ - do_bs_get_integer_imm: { - ErlBinMatchBuffer* mb; - Eterm result; - - mb = ms_matchbuffer(Ms); - LIGHT_SWAPOUT; - result = erts_bs_get_integer_2(c_p, Sz, Arg(1), mb); - LIGHT_SWAPIN; - HEAP_SPACE_VERIFIED(0); - if (is_non_value(result)) { - ClauseFail(); - } - StoreBifResult(2, result); - } - } - - /* - * Operands: Fail Live FlagsAndUnit Ms Sz Dst - */ - OpCase(i_bs_get_integer_fIIssd): { - Uint flags; - Uint size; - Eterm Ms; - Eterm Sz; - ErlBinMatchBuffer* mb; - Eterm result; - - flags = Arg(2); - GetArg2(3, Ms, Sz); - BsGetFieldSize(Sz, (flags >> 3), ClauseFail(), size); - if (size >= SMALL_BITS) { - Uint wordsneeded; - /* Check bits size before potential gc. - * We do not want a gc and then realize we don't need - * the allocated space (i.e. if the op fails). - * - * Remember to re-acquire the matchbuffer after gc. - */ - - mb = ms_matchbuffer(Ms); - if (mb->size - mb->offset < size) { - ClauseFail(); - } - wordsneeded = 1+WSIZE(NBYTES((Uint) size)); - TestHeapPreserve(wordsneeded, Arg(1), Ms); - } - mb = ms_matchbuffer(Ms); - LIGHT_SWAPOUT; - result = erts_bs_get_integer_2(c_p, size, flags, mb); - LIGHT_SWAPIN; - HEAP_SPACE_VERIFIED(0); - if (is_non_value(result)) { - ClauseFail(); - } - StoreBifResult(5, result); - } - - { - Eterm get_utf8_context; - - /* Operands: MatchContext Fail Dst */ - OpCase(i_bs_get_utf8_xfd): { - get_utf8_context = xb(Arg(0)); - I++; - } - - /* - * get_utf8_context = match_context - * Operands: Fail Dst - */ - - { - Eterm result = erts_bs_get_utf8(ms_matchbuffer(get_utf8_context)); - if (is_non_value(result)) { - ClauseFail(); - } - StoreBifResult(1, result); - } - } - - { - Eterm get_utf16_context; - - /* Operands: MatchContext Fail Flags Dst */ - OpCase(i_bs_get_utf16_xfId): { - get_utf16_context = xb(Arg(0)); - I++; - } - - /* - * get_utf16_context = match_context - * Operands: Fail Flags Dst - */ - { - Eterm result = erts_bs_get_utf16(ms_matchbuffer(get_utf16_context), - Arg(1)); - if (is_non_value(result)) { - ClauseFail(); - } - StoreBifResult(2, result); - } - } - - { - Eterm context_to_binary_context; - ErlBinMatchBuffer* mb; - ErlSubBin* sb; - Uint size; - Uint offs; - Uint orig; - Uint hole_size; - - OpCase(bs_context_to_binary_x): - context_to_binary_context = xb(Arg(0)); - I--; - - if (is_boxed(context_to_binary_context) && - header_is_bin_matchstate(*boxed_val(context_to_binary_context))) { - ErlBinMatchState* ms; - ms = (ErlBinMatchState *) boxed_val(context_to_binary_context); - mb = &ms->mb; - offs = ms->save_offset[0]; - size = mb->size - offs; - goto do_bs_get_binary_all_reuse_common; - } - Next(2); - - OpCase(i_bs_get_binary_all_reuse_xfI): { - context_to_binary_context = xb(Arg(0)); - I++; - } - - mb = ms_matchbuffer(context_to_binary_context); - size = mb->size - mb->offset; - if (size % Arg(1) != 0) { - ClauseFail(); - } - offs = mb->offset; - - do_bs_get_binary_all_reuse_common: - orig = mb->orig; - sb = (ErlSubBin *) boxed_val(context_to_binary_context); - hole_size = 1 + header_arity(sb->thing_word) - ERL_SUB_BIN_SIZE; - sb->thing_word = HEADER_SUB_BIN; - sb->size = BYTE_OFFSET(size); - sb->bitsize = BIT_OFFSET(size); - sb->offs = BYTE_OFFSET(offs); - sb->bitoffs = BIT_OFFSET(offs); - sb->is_writable = 0; - sb->orig = orig; - if (hole_size) { - sb[1].thing_word = make_pos_bignum_header(hole_size-1); - } - Next(2); - } - - { - Eterm match_string_context; - - OpCase(i_bs_match_string_xfII): { - match_string_context = xb(Arg(0)); - I++; - } - - { - BeamInstr *next; - byte* bytes; - Uint bits; - ErlBinMatchBuffer* mb; - Uint offs; - - PreFetch(3, next); - bits = Arg(1); - bytes = (byte *) Arg(2); - mb = ms_matchbuffer(match_string_context); - if (mb->size - mb->offset < bits) { - ClauseFail(); - } - offs = mb->offset & 7; - if (offs == 0 && (bits & 7) == 0) { - if (sys_memcmp(bytes, mb->base+(mb->offset>>3), bits>>3)) { - ClauseFail(); - } - } else if (erts_cmp_bits(bytes, 0, mb->base+(mb->offset>>3), mb->offset & 7, bits)) { - ClauseFail(); - } - mb->offset += bits; - NextPF(3, next); - } - } - - OpCase(i_bs_save2_xI): { - BeamInstr *next; - ErlBinMatchState *_ms; - PreFetch(2, next); - _ms = (ErlBinMatchState*) boxed_val((Eterm) xb(Arg(0))); - _ms->save_offset[Arg(1)] = _ms->mb.offset; - NextPF(2, next); - } - - OpCase(i_bs_restore2_xI): { - BeamInstr *next; - ErlBinMatchState *_ms; - PreFetch(2, next); - _ms = (ErlBinMatchState*) boxed_val((Eterm) xb(Arg(0))); - _ms->mb.offset = _ms->save_offset[Arg(1)]; - NextPF(2, next); - } - #include "beam_cold.h" - - /* - * This instruction is probably never used (because it is combined with a - * a return). However, a future compiler might for some reason emit a - * deallocate not followed by a return, and that should work. - */ - OpCase(deallocate_I): { - BeamInstr *next; - - PreFetch(1, next); - D(Arg(0)); - NextPF(1, next); - } - - /* - * Trace and debugging support. - */ - - OpCase(return_trace): { - ErtsCodeMFA* mfa = (ErtsCodeMFA *)(E[0]); - - SWAPOUT; /* Needed for shared heap */ - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); - erts_trace_return(c_p, mfa, r(0), ERTS_TRACER_FROM_ETERM(E+1)/* tracer */); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); - SWAPIN; - c_p->cp = NULL; - SET_I((BeamInstr *) cp_val(E[2])); - E += 3; - Goto(*I); - } - - OpCase(i_generic_breakpoint): { - BeamInstr real_I; - HEAVY_SWAPOUT; - real_I = erts_generic_breakpoint(c_p, erts_code_to_codeinfo(I), reg); - HEAVY_SWAPIN; - ASSERT(VALID_INSTR(real_I)); - Goto(real_I); - } - - OpCase(i_return_time_trace): { - BeamInstr *pc = (BeamInstr *) (UWord) E[0]; - SWAPOUT; - erts_trace_time_return(c_p, erts_code_to_codeinfo(pc)); - SWAPIN; - c_p->cp = NULL; - SET_I((BeamInstr *) cp_val(E[1])); - E += 2; - Goto(*I); - } - - OpCase(i_return_to_trace): { - if (IS_TRACED_FL(c_p, F_TRACE_RETURN_TO)) { - Uint *cpp = (Uint*) E; - for(;;) { - ASSERT(is_CP(*cpp)); - if (*cp_val(*cpp) == (BeamInstr) OpCode(return_trace)) { - do ++cpp; while(is_not_CP(*cpp)); - cpp += 2; - } else if (*cp_val(*cpp) == (BeamInstr) OpCode(i_return_to_trace)) { - do ++cpp; while(is_not_CP(*cpp)); - } else break; - } - SWAPOUT; /* Needed for shared heap */ - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); - erts_trace_return_to(c_p, cp_val(*cpp)); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); - SWAPIN; - } - c_p->cp = NULL; - SET_I((BeamInstr *) cp_val(E[0])); - E += 1; - Goto(*I); - } - - /* - * New floating point instructions. - */ - - OpCase(fmove_ql): { - Eterm fr = Arg(1); - BeamInstr *next; - - PreFetch(2, next); - GET_DOUBLE(Arg(0), *(FloatDef*)ADD_BYTE_OFFSET(freg, fr)); - NextPF(2, next); - } - - OpCase(fmove_dl): { - Eterm targ1; - Eterm fr = Arg(1); - BeamInstr *next; - - PreFetch(2, next); - targ1 = REG_TARGET(Arg(0)); - /* Arg(0) == HEADER_FLONUM */ - GET_DOUBLE(targ1, *(FloatDef*)ADD_BYTE_OFFSET(freg, fr)); - NextPF(2, next); - } - - OpCase(fmove_ld): { - Eterm fr = Arg(0); - Eterm dest = make_float(HTOP); - - PUT_DOUBLE(*(FloatDef*)ADD_BYTE_OFFSET(freg, fr), HTOP); - HTOP += FLOAT_SIZE_OBJECT; - StoreBifResult(1, dest); - } - - OpCase(fconv_dl): { - Eterm targ1; - Eterm fr = Arg(1); - BeamInstr *next; - - targ1 = REG_TARGET(Arg(0)); - PreFetch(2, next); - if (is_small(targ1)) { - fb(fr) = (double) signed_val(targ1); - } else if (is_big(targ1)) { - if (big_to_double(targ1, &fb(fr)) < 0) { - goto fbadarith; - } - } else if (is_float(targ1)) { - GET_DOUBLE(targ1, *(FloatDef*)ADD_BYTE_OFFSET(freg, fr)); - } else { - goto fbadarith; - } - NextPF(2, next); - } - -#ifdef NO_FPE_SIGNALS - OpCase(fclearerror): - OpCase(i_fcheckerror): - erts_exit(ERTS_ERROR_EXIT, "fclearerror/i_fcheckerror without fpe signals (beam_emu)"); -# define ERTS_NO_FPE_CHECK_INIT ERTS_FP_CHECK_INIT -# define ERTS_NO_FPE_ERROR ERTS_FP_ERROR -#else -# define ERTS_NO_FPE_CHECK_INIT(p) -# define ERTS_NO_FPE_ERROR(p, a, b) - - OpCase(fclearerror): { - BeamInstr *next; - - PreFetch(0, next); - ERTS_FP_CHECK_INIT(c_p); - NextPF(0, next); - } - - OpCase(i_fcheckerror): { - BeamInstr *next; - - PreFetch(0, next); - ERTS_FP_ERROR(c_p, freg[0].fd, goto fbadarith); - NextPF(0, next); - } -#endif - - - OpCase(i_fadd_lll): { - BeamInstr *next; - - PreFetch(3, next); - ERTS_NO_FPE_CHECK_INIT(c_p); - fb(Arg(2)) = fb(Arg(0)) + fb(Arg(1)); - ERTS_NO_FPE_ERROR(c_p, fb(Arg(2)), goto fbadarith); - NextPF(3, next); - } - OpCase(i_fsub_lll): { - BeamInstr *next; - - PreFetch(3, next); - ERTS_NO_FPE_CHECK_INIT(c_p); - fb(Arg(2)) = fb(Arg(0)) - fb(Arg(1)); - ERTS_NO_FPE_ERROR(c_p, fb(Arg(2)), goto fbadarith); - NextPF(3, next); - } - OpCase(i_fmul_lll): { - BeamInstr *next; - - PreFetch(3, next); - ERTS_NO_FPE_CHECK_INIT(c_p); - fb(Arg(2)) = fb(Arg(0)) * fb(Arg(1)); - ERTS_NO_FPE_ERROR(c_p, fb(Arg(2)), goto fbadarith); - NextPF(3, next); - } - OpCase(i_fdiv_lll): { - BeamInstr *next; - - PreFetch(3, next); - ERTS_NO_FPE_CHECK_INIT(c_p); - fb(Arg(2)) = fb(Arg(0)) / fb(Arg(1)); - ERTS_NO_FPE_ERROR(c_p, fb(Arg(2)), goto fbadarith); - NextPF(3, next); - } - OpCase(i_fnegate_ll): { - BeamInstr *next; - - PreFetch(2, next); - ERTS_NO_FPE_CHECK_INIT(c_p); - fb(Arg(1)) = -fb(Arg(0)); - ERTS_NO_FPE_ERROR(c_p, fb(Arg(1)), goto fbadarith); - NextPF(2, next); - - fbadarith: - c_p->freason = BADARITH; - goto find_func_info; - } - -#ifdef HIPE - { -#define HIPE_MODE_SWITCH(Cmd) \ - SWAPOUT; \ - ERTS_DBG_CHK_REDS(c_p, FCALLS); \ - c_p->fcalls = FCALLS; \ - c_p->def_arg_reg[4] = -neg_o_reds; \ - c_p = hipe_mode_switch(c_p, Cmd, reg); \ - goto L_post_hipe_mode_switch - - OpCase(hipe_trap_call): { - /* - * I[-5]: &&lb_i_func_info_IaaI - * I[-4]: Native code callee (inserted by HiPE) - * I[-3]: Module (tagged atom) - * I[-2]: Function (tagged atom) - * I[-1]: Arity (untagged integer) - * I[ 0]: &&lb_hipe_trap_call - * ... remainder of original BEAM code - */ - ErtsCodeInfo *ci = erts_code_to_codeinfo(I); - ASSERT(ci->op == (Uint) OpCode(i_func_info_IaaI)); - c_p->hipe.u.ncallee = ci->u.ncallee; - ++hipe_trap_count; - HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_CALL | (ci->mfa.arity << 8)); - } - OpCase(hipe_trap_call_closure): { - ErtsCodeInfo *ci = erts_code_to_codeinfo(I); - ASSERT(ci->op == (Uint) OpCode(i_func_info_IaaI)); - c_p->hipe.u.ncallee = ci->u.ncallee; - ++hipe_trap_count; - HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_CALL_CLOSURE | (ci->mfa.arity << 8)); - } - OpCase(hipe_trap_return): { - HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_RETURN); - } - OpCase(hipe_trap_throw): { - HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_THROW); - } - OpCase(hipe_trap_resume): { - HIPE_MODE_SWITCH(HIPE_MODE_SWITCH_CMD_RESUME); - } -#undef HIPE_MODE_SWITCH - - L_post_hipe_mode_switch: -#ifdef DEBUG - pid = c_p->common.id; /* may have switched process... */ -#endif - reg = erts_proc_sched_data(c_p)->x_reg_array; - freg = erts_proc_sched_data(c_p)->f_reg_array; - ERL_BITS_RELOAD_STATEP(c_p); - /* XXX: this abuse of def_arg_reg[] is horrid! */ - neg_o_reds = -c_p->def_arg_reg[4]; - FCALLS = c_p->fcalls; - SWAPIN; - ERTS_DBG_CHK_REDS(c_p, FCALLS); - switch( c_p->def_arg_reg[3] ) { - case HIPE_MODE_SWITCH_RES_RETURN: - ASSERT(is_value(reg[0])); - SET_I(c_p->cp); - c_p->cp = 0; - Goto(*I); - case HIPE_MODE_SWITCH_RES_CALL_EXPORTED: - c_p->i = c_p->hipe.u.callee_exp->addressv[erts_active_code_ix()]; - /*fall through*/ - case HIPE_MODE_SWITCH_RES_CALL_BEAM: - SET_I(c_p->i); - Dispatch(); - case HIPE_MODE_SWITCH_RES_CALL_CLOSURE: - /* This can be used to call any function value, but currently it's - only used to call closures referring to unloaded modules. */ - { - BeamInstr *next; - - next = call_fun(c_p, c_p->arity - 1, reg, THE_NON_VALUE); - HEAVY_SWAPIN; - if (next != NULL) { - SET_I(next); - Dispatchfun(); - } - goto find_func_info; - } - case HIPE_MODE_SWITCH_RES_THROW: - c_p->cp = NULL; - I = handle_error(c_p, I, reg, NULL); - goto post_error_handling; - default: - erts_exit(ERTS_ERROR_EXIT, "hipe_mode_switch: result %u\n", c_p->def_arg_reg[3]); - } - } - OpCase(hipe_call_count): { - /* - * I[-5]: &&lb_i_func_info_IaaI - * I[-4]: pointer to struct hipe_call_count (inserted by HiPE) - * I[-3]: Module (tagged atom) - * I[-2]: Function (tagged atom) - * I[-1]: Arity (untagged integer) - * I[ 0]: &&lb_hipe_call_count - * ... remainder of original BEAM code - */ - ErtsCodeInfo *ci = erts_code_to_codeinfo(I); - struct hipe_call_count *hcc = ci->u.hcc; - ASSERT(ci->op == (Uint) OpCode(i_func_info_IaaI)); - ASSERT(hcc != NULL); - ASSERT(VALID_INSTR(hcc->opcode)); - ++(hcc->count); - Goto(hcc->opcode); - } -#endif /* HIPE */ - - OpCase(i_yield): - { - /* This is safe as long as REDS_IN(c_p) is never stored - * in c_p->arg_reg[0]. It is currently stored in c_p->def_arg_reg[5], - * which may be c_p->arg_reg[5], which is close, but no banana. - */ - c_p->arg_reg[0] = am_true; - c_p->arity = 1; /* One living register (the 'true' return value) */ - SWAPOUT; - c_p->i = I + 1; /* Next instruction */ - c_p->current = NULL; - goto do_schedule; - } - - OpCase(i_hibernate): { - HEAVY_SWAPOUT; - if (erts_hibernate(c_p, r(0), x(1), x(2), reg)) { - FCALLS = c_p->fcalls; - c_p->flags &= ~F_HIBERNATE_SCHED; - goto do_schedule; - } else { - HEAVY_SWAPIN; - I = handle_error(c_p, I, reg, &bif_export[BIF_hibernate_3]->info.mfa); - goto post_error_handling; - } - } - - /* This is optimised as an instruction because - it has to be very very fast */ - OpCase(i_perf_counter): { - BeamInstr* next; - ErtsSysPerfCounter ts; - PreFetch(0, next); - - ts = erts_sys_perf_counter(); - - if (IS_SSMALL(ts)) { - r(0) = make_small((Sint)ts); - } else { - TestHeap(ERTS_SINT64_HEAP_SIZE(ts),0); - r(0) = make_big(HTOP); -#if defined(ARCH_32) - if (ts >= (((Uint64) 1) << 32)) { - *HTOP = make_pos_bignum_header(2); - BIG_DIGIT(HTOP, 0) = (Uint) (ts & ((Uint) 0xffffffff)); - BIG_DIGIT(HTOP, 1) = (Uint) ((ts >> 32) & ((Uint) 0xffffffff)); - HTOP += 3; - } - else -#endif - { - *HTOP = make_pos_bignum_header(1); - BIG_DIGIT(HTOP, 0) = (Uint) ts; - HTOP += 2; - } - } - NextPF(0, next); - } - - OpCase(i_debug_breakpoint): { - HEAVY_SWAPOUT; - I = call_error_handler(c_p, erts_code_to_codemfa(I), reg, am_breakpoint); - HEAVY_SWAPIN; - if (I) { - Goto(*I); - } - goto handle_error; - } - - - OpCase(system_limit_j): - system_limit: - c_p->freason = SYSTEM_LIMIT; - goto lb_Cl_error; - - #ifdef ERTS_OPCODE_COUNTER_SUPPORT DEFINE_COUNTING_LABELS; #endif @@ -5128,9 +960,6 @@ do { \ init_emulator: { - int i; - Export* ep; - #ifndef NO_JUMP_TABLE #ifdef ERTS_OPCODE_COUNTER_SUPPORT #ifdef DEBUG @@ -5142,35 +971,8 @@ do { \ beam_ops = opcodes; #endif /* ERTS_OPCODE_COUNTER_SUPPORT */ #endif /* NO_JUMP_TABLE */ - - em_call_error_handler = OpCode(call_error_handler); - em_apply_bif = OpCode(apply_bif); - em_call_nif = OpCode(call_nif); - em_call_bif_e = OpCode(call_bif_e); - - beam_apply[0] = (BeamInstr) OpCode(i_apply); - beam_apply[1] = (BeamInstr) OpCode(normal_exit); - beam_exit[0] = (BeamInstr) OpCode(error_action_code); - beam_continue_exit[0] = (BeamInstr) OpCode(continue_exit); - beam_return_to_trace[0] = (BeamInstr) OpCode(i_return_to_trace); - beam_return_trace[0] = (BeamInstr) OpCode(return_trace); - beam_exception_trace[0] = (BeamInstr) OpCode(return_trace); /* UGLY */ - beam_return_time_trace[0] = (BeamInstr) OpCode(i_return_time_trace); - - /* - * Enter all BIFs into the export table. - */ - for (i = 0; i < BIF_SIZE; i++) { - ep = erts_export_put(bif_table[i].module, - bif_table[i].name, - bif_table[i].arity); - bif_export[i] = ep; - ep->beam[0] = (BeamInstr) OpCode(apply_bif); - ep->beam[1] = (BeamInstr) bif_table[i].f; - /* XXX: set func info for bifs */ - ep->info.op = (BeamInstr) BeamOp(op_i_func_info_IaaI); - } + init_emulator_finish(); return; } #ifdef NO_JUMP_TABLE @@ -5182,26 +984,59 @@ do { \ save_calls1: { - Eterm* dis_next; + BeamInstr dis_next; save_calls(c_p, (Export *) Arg(0)); SET_I(((Export *) Arg(0))->addressv[erts_active_code_ix()]); - dis_next = (Eterm *) *I; + dis_next = *I; FCALLS--; Goto(dis_next); } } /* + * One-time initialization of emulator. Does not need to be + * in process_main(). + */ +static void +init_emulator_finish(void) +{ + int i; + Export* ep; + + beam_apply[0] = BeamOpCodeAddr(op_i_apply); + beam_apply[1] = BeamOpCodeAddr(op_normal_exit); + beam_exit[0] = BeamOpCodeAddr(op_error_action_code); + beam_continue_exit[0] = BeamOpCodeAddr(op_continue_exit); + beam_return_to_trace[0] = BeamOpCodeAddr(op_i_return_to_trace); + beam_return_trace[0] = BeamOpCodeAddr(op_return_trace); + beam_exception_trace[0] = BeamOpCodeAddr(op_return_trace); /* UGLY */ + beam_return_time_trace[0] = BeamOpCodeAddr(op_i_return_time_trace); + + /* + * Enter all BIFs into the export table. + */ + for (i = 0; i < BIF_SIZE; i++) { + ep = erts_export_put(bif_table[i].module, + bif_table[i].name, + bif_table[i].arity); + bif_export[i] = ep; + ep->beam[0] = BeamOpCodeAddr(op_apply_bif); + ep->beam[1] = (BeamInstr) bif_table[i].f; + /* XXX: set func info for bifs */ + ep->info.op = BeamOpCodeAddr(op_i_func_info_IaaI); + } +} + +/* * erts_dirty_process_main() is what dirty schedulers execute. Since they handle * only NIF calls they do not need to be able to execute all BEAM * instructions. */ void erts_dirty_process_main(ErtsSchedulerData *esdp) { -#ifdef ERTS_DIRTY_SCHEDULERS Process* c_p = NULL; ErtsMonotonicTime start_time; #ifdef DEBUG @@ -5313,7 +1148,7 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp) } PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); c_p = erts_schedule(esdp, c_p, reds_used); @@ -5327,7 +1162,7 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp) #ifdef DEBUG pid = c_p->common.id; /* Save for debugging purposes */ #endif - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); PROCESS_MAIN_CHK_LOCKS(c_p); ASSERT(!(c_p->flags & F_HIPE_MODE)); @@ -5342,7 +1177,7 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp) else c_p->fcalls = CONTEXT_REDS; - if (erts_smp_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_DIRTY_RUNNING_SYS) { + if (erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_DIRTY_RUNNING_SYS) { erts_execute_dirty_system_task(c_p); goto do_dirty_schedule; } @@ -5413,21 +1248,21 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp) c_p->current = codemfa; SWAPOUT; PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); ASSERT(!ERTS_PROC_IS_EXITING(c_p)); - if (em_apply_bif == (BeamInstr *) *I) { + if (BeamIsOpCode(*I, op_apply_bif)) { exiting = erts_call_dirty_bif(esdp, c_p, I, reg); } else { - ASSERT(em_call_nif == (BeamInstr *) *I); - exiting = erts_call_dirty_nif(esdp, c_p, I, reg); + ASSERT(BeamIsOpCode(*I, op_call_nif)); + exiting = erts_call_dirty_nif(esdp, c_p, I, reg); } ASSERT(!(c_p->flags & F_HIBERNATE_SCHED)); PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR); if (exiting) @@ -5440,7 +1275,6 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp) I = c_p->i; goto context_switch; } -#endif /* ERTS_DIRTY_SCHEDULERS */ } static ErtsCodeMFA * @@ -5606,9 +1440,9 @@ handle_error(Process* c_p, BeamInstr* pc, Eterm* reg, ErtsCodeMFA *bif_mfa) } if (c_p->catches > 0) erts_exit(ERTS_ERROR_EXIT, "Catch not found"); } - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); terminate_proc(c_p, Value); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); return NULL; } @@ -6257,8 +2091,8 @@ apply_bif_error_adjustment(Process *p, Export *ep, * and apply_last_IP. */ if (I - && ep->beam[0] == (BeamInstr) em_apply_bif - && (ep == bif_export[BIF_error_1] + && BeamIsOpCode(ep->beam[0], op_apply_bif) + && (ep == bif_export[BIF_error_1] || ep == bif_export[BIF_error_2] || ep == bif_export[BIF_exit_1] || ep == bif_export[BIF_throw_1])) { @@ -6344,13 +2178,14 @@ apply_bif_error_adjustment(Process *p, Export *ep, } static BeamInstr* -apply( -Process* p, Eterm module, Eterm function, Eterm args, Eterm* reg, -BeamInstr *I, Uint stack_offset) +apply(Process* p, Eterm* reg, BeamInstr *I, Uint stack_offset) { int arity; Export* ep; - Eterm tmp, this; + Eterm tmp; + Eterm module = reg[0]; + Eterm function = reg[1]; + Eterm args = reg[2]; /* * Check the arguments which should be of the form apply(Module, @@ -6373,20 +2208,8 @@ BeamInstr *I, Uint stack_offset) while (1) { Eterm m, f, a; - /* The module argument may be either an atom or an abstract module - * (currently implemented using tuples, but this might change). - */ - this = THE_NON_VALUE; - if (is_not_atom(module)) { - Eterm* tp; - - if (is_not_tuple(module)) goto error; - tp = tuple_val(module); - if (arityval(tp[0]) < 1) goto error; - this = module; - module = tp[1]; - if (is_not_atom(module)) goto error; - } + + if (is_not_atom(module)) goto error; if (module != am_erlang || function != am_apply) break; @@ -6421,9 +2244,7 @@ BeamInstr *I, Uint stack_offset) } /* * Walk down the 3rd parameter of apply (the argument list) and copy - * the parameters to the x registers (reg[]). If the module argument - * was an abstract module, add 1 to the function arity and put the - * module argument in the n+1st x register as a THIS reference. + * the parameters to the x registers (reg[]). */ tmp = args; @@ -6440,9 +2261,6 @@ BeamInstr *I, Uint stack_offset) if (is_not_nil(tmp)) { /* Must be well-formed list */ goto error; } - if (this != THE_NON_VALUE) { - reg[arity++] = this; - } /* * Get the index into the export table, or failing that the export @@ -6481,22 +2299,12 @@ fixed_apply(Process* p, Eterm* reg, Uint arity, return 0; } - /* The module argument may be either an atom or an abstract module - * (currently implemented using tuples, but this might change). - */ - if (is_not_atom(module)) { - Eterm* tp; - if (is_not_tuple(module)) goto error; - tp = tuple_val(module); - if (arityval(tp[0]) < 1) goto error; - module = tp[1]; - if (is_not_atom(module)) goto error; - ++arity; - } + if (is_not_atom(module)) goto error; /* Handle apply of apply/3... */ - if (module == am_erlang && function == am_apply && arity == 3) - return apply(p, reg[0], reg[1], reg[2], reg, I, stack_offset); + if (module == am_erlang && function == am_apply && arity == 3) { + return apply(p, reg, I, stack_offset); + } /* * Get the index into the export table, or failing that the export @@ -6517,27 +2325,13 @@ fixed_apply(Process* p, Eterm* reg, Uint arity, } int -erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* reg) +erts_hibernate(Process* c_p, Eterm* reg) { int arity; Eterm tmp; - -#ifndef ERTS_SMP - if (ERTS_PROC_IS_EXITING(c_p)) { - /* - * I non smp case: - * - * Currently executing process might be sent an exit - * signal if it is traced by a port that it also is - * linked to, and the port terminates during the - * trace. In this case we do *not* want to clear - * the active flag, which will make the process hang - * in limbo forever. Get out of here and terminate - * the process... - */ - return -1; - } -#endif + Eterm module = reg[0]; + Eterm function = reg[1]; + Eterm args = reg[2]; if (is_not_atom(module) || is_not_atom(function)) { /* @@ -6605,33 +2399,22 @@ erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* re * If there are no waiting messages, garbage collect and * shrink the heap. */ - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); + ERTS_MSGQ_MV_INQ2PRIVQ(c_p); if (!c_p->msg.len) { - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); c_p->fvalue = NIL; PROCESS_MAIN_CHK_LOCKS(c_p); erts_garbage_collect_hibernate(c_p); ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); PROCESS_MAIN_CHK_LOCKS(c_p); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); -#ifndef ERTS_SMP - if (ERTS_PROC_IS_EXITING(c_p)) { - /* - * See comment in the beginning of the function... - * - * This second test is needed since gc might be traced. - */ - return -1; - } -#else /* ERTS_SMP */ - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); + ERTS_MSGQ_MV_INQ2PRIVQ(c_p); if (!c_p->msg.len) -#endif - erts_smp_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_ACTIVE); + erts_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_ACTIVE); ASSERT(!ERTS_PROC_IS_EXITING(c_p)); } - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); c_p->current = &bif_export[BIF_hibernate_3]->info.mfa; c_p->flags |= F_HIBERNATE_SCHED; /* Needed also when woken! */ return 1; @@ -6721,7 +2504,7 @@ call_fun(Process* p, /* Current process. */ module = fe->module; - ERTS_SMP_READ_MEMORY_BARRIER; + ERTS_THR_READ_MEMORY_BARRIER; if (fe->pend_purge_address) { /* * The system is currently trying to purge the @@ -6852,7 +2635,7 @@ new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free) p->htop = hp + needed; funp = (ErlFunThing *) hp; hp = funp->env; - erts_smp_refc_inc(&fe->refc, 2); + erts_refc_inc(&fe->refc, 2); funp->thing_word = HEADER_FUN; funp->next = MSO(p).first; MSO(p).first = (struct erl_off_heap_header*) funp; @@ -6955,24 +2738,20 @@ do { \ static Eterm -new_map(Process* p, Eterm* reg, BeamInstr* I) +new_map(Process* p, Eterm* reg, Uint live, Uint n, BeamInstr* ptr) { - Uint n = Arg(3); Uint i; Uint need = n + 1 /* hdr */ + 1 /*size*/ + 1 /* ptr */ + 1 /* arity */; Eterm keys; Eterm *mhp,*thp; Eterm *E; - BeamInstr *ptr; flatmap_t *mp; ErtsHeapFactory factory; - ptr = &Arg(4); - if (n > 2*MAP_SMALL_MAP_LIMIT) { Eterm res; if (HeapWordsLeft(p) < n) { - erts_garbage_collect(p, n, reg, Arg(2)); + erts_garbage_collect(p, n, reg, live); } mhp = p->htop; @@ -6993,7 +2772,7 @@ new_map(Process* p, Eterm* reg, BeamInstr* I) } if (HeapWordsLeft(p) < need) { - erts_garbage_collect(p, need, reg, Arg(2)); + erts_garbage_collect(p, need, reg, live); } thp = p->htop; @@ -7016,9 +2795,42 @@ new_map(Process* p, Eterm* reg, BeamInstr* I) } static Eterm -update_map_assoc(Process* p, Eterm* reg, Eterm map, BeamInstr* I) +new_small_map_lit(Process* p, Eterm* reg, Eterm keys_literal, Uint live, BeamInstr* ptr) +{ + Eterm* keys = tuple_val(keys_literal); + Uint n = arityval(*keys); + Uint need = n + 1 /* hdr */ + 1 /*size*/ + 1 /* ptr */ + 1 /* arity */; + Uint i; + flatmap_t *mp; + Eterm *mhp; + Eterm *E; + + ASSERT(n <= MAP_SMALL_MAP_LIMIT); + + if (HeapWordsLeft(p) < need) { + erts_garbage_collect(p, need, reg, live); + } + + mhp = p->htop; + E = p->stop; + + mp = (flatmap_t *)mhp; mhp += MAP_HEADER_FLATMAP_SZ; + mp->thing_word = MAP_HEADER_FLATMAP; + mp->size = n; + mp->keys = keys_literal; + + for (i = 0; i < n; i++) { + GET_TERM(*ptr++, *mhp++); + } + + p->htop = mhp; + + return make_flatmap(mp); +} + +static Eterm +update_map_assoc(Process* p, Eterm* reg, Uint live, Uint n, BeamInstr* new_p) { - Uint n; Uint num_old; Uint num_updates; Uint need; @@ -7028,23 +2840,18 @@ update_map_assoc(Process* p, Eterm* reg, Eterm map, BeamInstr* I) Eterm* E; Eterm* old_keys; Eterm* old_vals; - BeamInstr* new_p; Eterm new_key; Eterm* kp; + Eterm map; - new_p = &Arg(5); - num_updates = Arg(4) / 2; + num_updates = n / 2; + map = reg[live]; if (is_not_flatmap(map)) { Uint32 hx; Eterm val; - /* apparently the compiler does not emit is_map instructions, - * bad compiler */ - - if (is_not_hashmap(map)) - return THE_NON_VALUE; - + ASSERT(is_hashmap(map)); res = map; E = p->stop; while(num_updates--) { @@ -7068,7 +2875,7 @@ update_map_assoc(Process* p, Eterm* reg, Eterm map, BeamInstr* I) */ if (num_old == 0) { - return new_map(p, reg, I+1); + return new_map(p, reg, live, n, new_p); } /* @@ -7078,8 +2885,6 @@ update_map_assoc(Process* p, Eterm* reg, Eterm map, BeamInstr* I) need = 2*(num_old+num_updates) + 1 + MAP_HEADER_FLATMAP_SZ; if (HeapWordsLeft(p) < need) { - Uint live = Arg(3); - reg[live] = map; erts_garbage_collect(p, need, reg, live+1); map = reg[live]; old_mp = (flatmap_t *)flatmap_val(map); @@ -7226,9 +3031,8 @@ update_map_assoc(Process* p, Eterm* reg, Eterm map, BeamInstr* I) */ static Eterm -update_map_exact(Process* p, Eterm* reg, Eterm map, BeamInstr* I) +update_map_exact(Process* p, Eterm* reg, Uint live, Uint n, Eterm* new_p) { - Uint n; Uint i; Uint num_old; Uint need; @@ -7238,12 +3042,12 @@ update_map_exact(Process* p, Eterm* reg, Eterm map, BeamInstr* I) Eterm* E; Eterm* old_keys; Eterm* old_vals; - BeamInstr* new_p; Eterm new_key; + Eterm map; - new_p = &Arg(5); - n = Arg(4) / 2; /* Number of values to be updated */ + n /= 2; /* Number of values to be updated */ ASSERT(n > 0); + map = reg[live]; if (is_not_flatmap(map)) { Uint32 hx; @@ -7297,8 +3101,6 @@ update_map_exact(Process* p, Eterm* reg, Eterm map, BeamInstr* I) need = num_old + MAP_HEADER_FLATMAP_SZ; if (HeapWordsLeft(p) < need) { - Uint live = Arg(3); - reg[live] = map; erts_garbage_collect(p, need, reg, live+1); map = reg[live]; old_mp = (flatmap_t *)flatmap_val(map); @@ -7396,8 +3198,8 @@ erts_is_builtin(Eterm Mod, Eterm Name, int arity) if ((ep = export_get(&e)) == NULL) { return 0; } - return ep->addressv[erts_active_code_ix()] == ep->beam - && (ep->beam[0] == (BeamInstr) em_apply_bif); + return ep->addressv[erts_active_code_ix()] == ep->beam && + BeamIsOpCode(ep->beam[0], op_apply_bif); } diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c index 23258dbe9c..9835b1c096 100644 --- a/erts/emulator/beam/beam_load.c +++ b/erts/emulator/beam/beam_load.c @@ -55,12 +55,6 @@ ErlDrvBinary* erts_gzinflate_buffer(char*, int); #define DEFINED 1 #define EXPORTED 2 -#ifdef NO_JUMP_TABLE -# define BeamOpCode(Op) ((BeamInstr)(Op)) -#else -# define BeamOpCode(Op) ((BeamInstr)beam_ops[Op]) -#endif - #if defined(WORDS_BIGENDIAN) # define NATIVE_ENDIAN(F) \ if ((F).val & BSF_NATIVE) { \ @@ -81,16 +75,28 @@ ErlDrvBinary* erts_gzinflate_buffer(char*, int); #define TE_FAIL (-1) #define TE_SHORT_WINDOW (-2) +/* + * Type for a reference to a label that must be patched. + */ + typedef struct { - Uint value; /* Value of label (NULL if not known yet). */ - Sint patches; /* Index (into code buffer) to first location - * which must be patched with the value of this label. - */ -#ifdef ERTS_SMP + Uint pos; /* Position of label reference to patch. */ + Uint offset; /* Offset from patch location. */ + int packed; /* 0 (not packed), 1 (lsw), 2 (msw) */ +} LabelPatch; + +/* + * Type for a label. + */ + +typedef struct { + Uint value; /* Value of label (0 if not known yet). */ Uint looprec_targeted; /* Non-zero if this label is the target of a loop_rec * instruction. */ -#endif + LabelPatch* patches; /* Array of label patches. */ + Uint num_patches; /* Number of patches in array. */ + Uint num_allocated; /* Number of allocated patches. */ } Label; /* @@ -227,7 +233,7 @@ typedef struct { typedef struct literal_patch LiteralPatch; struct literal_patch { - int pos; /* Position in code */ + Uint pos; /* Position in code */ LiteralPatch* next; }; @@ -307,6 +313,7 @@ typedef struct LoaderState { int on_load; /* Index in the code for the on_load function * (or 0 if there is no on_load function) */ + int otp_20_or_higher; /* Compiled with OTP 20 or higher */ /* * Atom table. @@ -507,6 +514,7 @@ static int read_lambda_table(LoaderState* stp); static int read_literal_table(LoaderState* stp); static int read_line_table(LoaderState* stp); static int read_code_header(LoaderState* stp); +static void init_label(Label* lp); static int load_code(LoaderState* stp); static GenOp* gen_element(LoaderState* stp, GenOpArg Fail, GenOpArg Index, GenOpArg Tuple, GenOpArg Dst); @@ -537,6 +545,7 @@ static int get_tag_and_value(LoaderState* stp, Uint len_code, static int new_label(LoaderState* stp); static void new_literal_patch(LoaderState* stp, int pos); static void new_string_patch(LoaderState* stp, int pos); +static int find_literal(LoaderState* stp, Eterm needle, Uint *idx); static Uint new_literal(LoaderState* stp, Eterm** hpp, Uint heap_size); static int genopargcompare(GenOpArg* a, GenOpArg* b); static Eterm get_module_info(Process* p, ErtsCodeIndex code_ix, @@ -740,6 +749,13 @@ erts_prepare_loading(Binary* magic, Process *c_p, Eterm group_leader, } /* + * Find out whether the code was compiled with OTP 20 + * or higher. + */ + + stp->otp_20_or_higher = stp->chunks[UTF8_ATOM_CHUNK].size > 0; + + /* * Load the code chunk. */ @@ -795,8 +811,8 @@ erts_finish_loading(Binary* magic, Process* c_p, * table which is not protected by any locks. */ - ERTS_SMP_LC_ASSERT(erts_initialized == 0 || erts_has_code_write_permission() || - erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_initialized == 0 || erts_has_code_write_permission() || + erts_thr_progress_is_blocking()); /* * Make current code for the module old and insert the new code * as current. This will fail if there already exists old code @@ -827,11 +843,10 @@ erts_finish_loading(Binary* magic, Process* c_p, continue; } if (ep->addressv[code_ix] == ep->beam) { - if (ep->beam[0] == (BeamInstr) em_apply_bif) { + if (BeamIsOpCode(ep->beam[0], op_apply_bif)) { continue; - } else if (ep->beam[0] == - (BeamInstr) BeamOp(op_i_generic_breakpoint)) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); + } else if (BeamIsOpCode(ep->beam[0], op_i_generic_breakpoint)) { + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); ASSERT(mod_tab_p->curr.num_traced_exports > 0); erts_clear_export_break(mod_tab_p, &ep->info); ep->addressv[code_ix] = (BeamInstr *) ep->beam[1]; @@ -1043,6 +1058,10 @@ loader_state_dtor(Binary* magic) stp->codev = 0; } if (stp->labels != 0) { + Uint num; + for (num = 0; num < stp->num_labels; num++) { + erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->labels[num].patches); + } erts_free(ERTS_ALC_T_PREPARED_CODE, (void *) stp->labels); stp->labels = 0; } @@ -1456,7 +1475,7 @@ load_import_table(LoaderState* stp) * the BIF function. */ if ((e = erts_active_export_entry(mod, func, arity)) != NULL) { - if (e->beam[0] == (BeamInstr) em_apply_bif) { + if (BeamIsOpCode(e->beam[0], op_apply_bif)) { stp->import[i].bf = (BifFunction) e->beam[1]; if (func == am_load_nif && mod == am_erlang && arity == 2) { stp->may_load_nif = 1; @@ -1526,7 +1545,7 @@ read_export_table(LoaderState* stp) * any other functions that walk through all local functions. */ - if (stp->labels[n].patches >= 0) { + if (stp->labels[n].num_patches > 0) { LoadError3(stp, "there are local calls to the stub for " "the BIF %T:%T/%d", stp->module, func, arity); @@ -1550,7 +1569,7 @@ is_bif(Eterm mod, Eterm func, unsigned arity) if (e == NULL) { return 0; } - if (e->beam[0] != (BeamInstr) em_apply_bif) { + if (! BeamIsOpCode(e->beam[0], op_apply_bif)) { return 0; } if (mod == am_erlang && func == am_apply && arity == 3) { @@ -1872,11 +1891,7 @@ read_code_header(LoaderState* stp) stp->labels = (Label *) erts_alloc(ERTS_ALC_T_PREPARED_CODE, stp->num_labels * sizeof(Label)); for (i = 0; i < stp->num_labels; i++) { - stp->labels[i].value = 0; - stp->labels[i].patches = -1; -#ifdef ERTS_SMP - stp->labels[i].looprec_targeted = 0; -#endif + init_label(&stp->labels[i]); } stp->catches = 0; @@ -1905,12 +1920,43 @@ read_code_header(LoaderState* stp) #define TermWords(t) (((t) / (sizeof(BeamInstr)/sizeof(Eterm))) + !!((t) % (sizeof(BeamInstr)/sizeof(Eterm)))) +static void init_label(Label* lp) +{ + lp->value = 0; + lp->looprec_targeted = 0; + lp->num_patches = 0; + lp->num_allocated = 4; + lp->patches = erts_alloc(ERTS_ALC_T_PREPARED_CODE, + lp->num_allocated * sizeof(LabelPatch)); +} + +static void +register_label_patch(LoaderState* stp, Uint label, Uint ci, Uint offset) +{ + Label* lp; + + ASSERT(label < stp->num_labels); + lp = &stp->labels[label]; + if (lp->num_allocated <= lp->num_patches) { + lp->num_allocated *= 2; + lp->patches = erts_realloc(ERTS_ALC_T_PREPARED_CODE, + (void *) lp->patches, + lp->num_allocated * sizeof(LabelPatch)); + } + lp->patches[lp->num_patches].pos = ci; + lp->patches[lp->num_patches].offset = offset; + lp->patches[lp->num_patches].packed = 0; + lp->num_patches++; + stp->codev[ci] = label; +} + static int load_code(LoaderState* stp) { int i; - int ci; - int last_func_start = 0; /* Needed by nif loading and line instructions */ + Uint ci; + Uint last_instr_start; /* Needed for relative jumps */ + Uint last_func_start = 0; /* Needed by nif loading and line instructions */ char* sign; int arg; /* Number of current argument. */ int num_specific; /* Number of specific ops for current. */ @@ -1923,6 +1969,9 @@ load_code(LoaderState* stp) GenOp** last_op_next = NULL; int arity; int retval = 1; +#if defined(BEAM_WIDE_SHIFT) + int num_trailing_f; /* Number of extra 'f' arguments in a list */ +#endif /* * The size of the loaded func_info instruction is needed @@ -2028,30 +2077,10 @@ load_code(LoaderState* stp) case 0: /* Floating point number. * Not generated by the compiler in R16B and later. + * (The literal pool is used instead.) */ - { - Eterm* hp; -#if !defined(ARCH_64) - Uint high, low; -# endif - last_op->a[arg].val = new_literal(stp, &hp, - FLOAT_SIZE_OBJECT); - hp[0] = HEADER_FLONUM; - last_op->a[arg].type = TAG_q; -#if defined(ARCH_64) - GetInt(stp, 8, hp[1]); -# else - GetInt(stp, 4, high); - GetInt(stp, 4, low); - if (must_swap_floats) { - Uint t = high; - high = low; - low = t; - } - hp[1] = high; - hp[2] = low; -# endif - } + LoadError0(stp, "please re-compile this module with an " + ERLANG_OTP_RELEASE " compiler"); break; case 1: /* List. */ if (arg+1 != arity) { @@ -2286,7 +2315,8 @@ load_code(LoaderState* stp) stp->specific_op = specific; CodeNeed(opc[stp->specific_op].sz+16); /* Extra margin for packing */ - code[ci++] = BeamOpCode(stp->specific_op); + last_instr_start = ci + opc[stp->specific_op].adjust; + code[ci++] = BeamOpCodeAddr(stp->specific_op); } /* @@ -2368,7 +2398,8 @@ load_code(LoaderState* stp) break; } break; - case 'd': /* Destination (x(0), x(N), y(N) */ + case 'd': /* Destination (x(N), y(N) */ + case 'S': /* Source (x(N), y(N)) */ switch (tag) { case TAG_x: code[ci++] = tmp_op->a[arg].val * sizeof(Eterm); @@ -2382,11 +2413,29 @@ load_code(LoaderState* stp) break; } break; - case 'I': /* Untagged integer (or pointer). */ - VerifyTag(stp, tag, TAG_u); - code[ci++] = tmp_op->a[arg].val; - break; - case 't': /* Small untagged integer -- can be packed. */ + case 't': /* Small untagged integer (16 bits) -- can be packed. */ + case 'I': /* Untagged integer (32 bits) -- can be packed. */ + case 'W': /* Untagged integer or pointer (machine word). */ +#ifdef DEBUG + switch (*sign) { + case 't': + if (tmp_op->a[arg].val >> 16 != 0) { + load_printf(__LINE__, stp, "value %lu of type 't' does not fit in 16 bits", + tmp_op->a[arg].val); + ASSERT(0); + } + break; +#ifdef ARCH_64 + case 'I': + if (tmp_op->a[arg].val >> 32 != 0) { + load_printf(__LINE__, stp, "value %lu of type 'I' does not fit in 32 bits", + tmp_op->a[arg].val); + ASSERT(0); + } + break; +#endif + } +#endif VerifyTag(stp, tag, TAG_u); code[ci++] = tmp_op->a[arg].val; break; @@ -2396,16 +2445,14 @@ load_code(LoaderState* stp) break; case 'f': /* Destination label */ VerifyTag(stp, tag_to_letter[tag], *sign); - code[ci] = stp->labels[tmp_op->a[arg].val].patches; - stp->labels[tmp_op->a[arg].val].patches = ci; + register_label_patch(stp, tmp_op->a[arg].val, ci, -last_instr_start); ci++; break; case 'j': /* 'f' or 'p' */ if (tag == TAG_p) { code[ci] = 0; } else if (tag == TAG_f) { - code[ci] = stp->labels[tmp_op->a[arg].val].patches; - stp->labels[tmp_op->a[arg].val].patches = ci; + register_label_patch(stp, tmp_op->a[arg].val, ci, -last_instr_start); } else { LoadError3(stp, "bad tag %d; expected %d or %d", tag, TAG_f, TAG_p); @@ -2425,7 +2472,6 @@ load_code(LoaderState* stp) LoadError1(stp, "label %d defined more than once", last_label); } stp->labels[last_label].value = ci; - ASSERT(stp->labels[last_label].patches < ci); break; case 'e': /* Export entry */ VerifyTag(stp, tag, TAG_u); @@ -2471,35 +2517,133 @@ load_code(LoaderState* stp) * The packing engine. */ if (opc[stp->specific_op].pack[0]) { - char* prog; /* Program for packing engine. */ - BeamInstr stack[8]; /* Stack. */ - BeamInstr* sp = stack; /* Points to next free position. */ - BeamInstr packed = 0; /* Accumulator for packed operations. */ + char* prog; /* Program for packing engine. */ + struct pack_stack { + BeamInstr instr; + Uint* patch_pos; + } stack[8]; /* Stack. */ + struct pack_stack* sp = stack; /* Points to next free position. */ + BeamInstr packed = 0; /* Accumulator for packed operations. */ + LabelPatch* packed_label = 0; for (prog = opc[stp->specific_op].pack; *prog; prog++) { switch (*prog) { - case 'g': /* Get instruction; push on stack. */ - *sp++ = code[--ci]; - break; + case 'g': /* Get operand and push on stack. */ + ci--; + sp->instr = code[ci]; + sp->patch_pos = 0; + sp++; + break; + case 'f': /* Get possible 'f' operand and push on stack. */ + { + Uint w = code[--ci]; + sp->instr = w; + sp->patch_pos = 0; + + if (w != 0) { + LabelPatch* lbl_p; + int num_patches; + int patch; + + ASSERT(w < stp->num_labels); + lbl_p = stp->labels[w].patches; + num_patches = stp->labels[w].num_patches; + for (patch = num_patches - 1; patch >= 0; patch--) { + if (lbl_p[patch].pos == ci) { + sp->patch_pos = &lbl_p[patch].pos; + break; + } + } + ASSERT(sp->patch_pos); + } + sp++; + } + break; + case 'q': /* Get possible 'q' operand and push on stack. */ + { + LiteralPatch* lp; + + ci--; + sp->instr = code[ci]; + sp->patch_pos = 0; + + for (lp = stp->literal_patches; + lp && lp->pos > ci-MAX_OPARGS; + lp = lp->next) { + if (lp->pos == ci) { + sp->patch_pos = &lp->pos; + break; + } + } + sp++; + } + break; case 'i': /* Initialize packing accumulator. */ packed = code[--ci]; break; case '0': /* Tight shift */ packed = (packed << BEAM_TIGHT_SHIFT) | code[--ci]; + if (packed_label) { + packed_label->packed++; + } break; case '6': /* Shift 16 steps */ packed = (packed << BEAM_LOOSE_SHIFT) | code[--ci]; + if (packed_label) { + packed_label->packed++; + } break; #ifdef ARCH_64 case 'w': /* Shift 32 steps */ - packed = (packed << BEAM_WIDE_SHIFT) | code[--ci]; - break; + { + Uint w = code[--ci]; + + if (packed_label) { + packed_label->packed++; + } + + /* + * 'w' can handle both labels ('f' and 'j'), as well + * as 'I'. Test whether this is a label. + */ + + if (w < stp->num_labels) { + /* + * Probably a label. Look for patch pointing to this + * position. + */ + LabelPatch* lp = stp->labels[w].patches; + int num_patches = stp->labels[w].num_patches; + int patch; + for (patch = num_patches - 1; patch >= 0; patch--) { + if (lp[patch].pos == ci) { + lp[patch].packed = 1; + packed_label = &lp[patch]; + break; + } + } + } + packed = (packed << BEAM_WIDE_SHIFT) | + (code[ci] & BEAM_WIDE_MASK); + } + break; #endif case 'p': /* Put instruction (from stack). */ - code[ci++] = *--sp; + --sp; + code[ci] = sp->instr; + if (sp->patch_pos) { + *sp->patch_pos = ci; + } + ci++; break; - case 'P': /* Put packed operands. */ - *sp++ = packed; + case 'P': /* Put packed operands (on the stack). */ + sp->instr = packed; + sp->patch_pos = 0; + if (packed_label) { + sp->patch_pos = &packed_label->pos; + packed_label = 0; + } + sp++; packed = 0; break; default: @@ -2513,7 +2657,17 @@ load_code(LoaderState* stp) * Load any list arguments using the primitive tags. */ +#if defined(BEAM_WIDE_SHIFT) + num_trailing_f = 0; +#endif for ( ; arg < tmp_op->arity; arg++) { +#if defined(BEAM_WIDE_SHIFT) + if (tmp_op->a[arg].type == TAG_f) { + num_trailing_f++; + } else { + num_trailing_f = 0; + } +#endif switch (tmp_op->a[arg].type) { case TAG_i: CodeNeed(1); @@ -2527,8 +2681,7 @@ load_code(LoaderState* stp) break; case TAG_f: CodeNeed(1); - code[ci] = stp->labels[tmp_op->a[arg].val].patches; - stp->labels[tmp_op->a[arg].val].patches = ci; + register_label_patch(stp, tmp_op->a[arg].val, ci, -last_instr_start); ci++; break; case TAG_x: @@ -2554,6 +2707,61 @@ load_code(LoaderState* stp) } } + /* + * If all the extra arguments were 'f' operands, + * and the wordsize is 64 bits, pack two 'f' operands + * into each word. + */ + +#if defined(BEAM_WIDE_SHIFT) + if (num_trailing_f >= 1) { + Uint src_index = ci - num_trailing_f; + Uint src_limit = ci; + Uint dst_limit = src_index + (num_trailing_f+1)/2; + + ci = src_index; + while (ci < dst_limit) { + Uint w[2]; + BeamInstr packed = 0; + int wi; + + w[0] = code[src_index]; + if (src_index+1 < src_limit) { + w[1] = code[src_index+1]; + } else { + w[1] = 0; + } + for (wi = 0; wi < 2; wi++) { + Uint lbl = w[wi]; + LabelPatch* lp = stp->labels[lbl].patches; + int num_patches = stp->labels[lbl].num_patches; + +#if defined(WORDS_BIGENDIAN) + packed <<= BEAM_WIDE_SHIFT; + packed |= lbl & BEAM_WIDE_MASK; +#else + packed >>= BEAM_WIDE_SHIFT; + packed |= lbl << BEAM_WIDE_SHIFT; +#endif + while (num_patches-- > 0) { + if (lp->pos == src_index + wi) { + lp->pos = ci; +#if defined(WORDS_BIGENDIAN) + lp->packed = 2 - wi; +#else + lp->packed = wi + 1; +#endif + break; + } + lp++; + } + } + code[ci++] = packed; + src_index += 2; + } + } +#endif + /* * Handle a few special cases. */ @@ -2600,17 +2808,16 @@ load_code(LoaderState* stp) the size of the ops.tab i_func_info instruction is not the same as FUNC_INFO_SZ */ ASSERT(stp->labels[last_label].value == ci - FUNC_INFO_SZ); - stp->hdr->functions[function_number] = (ErtsCodeInfo*) stp->labels[last_label].patches; offset = function_number; - stp->labels[last_label].patches = offset; + register_label_patch(stp, last_label, offset, 0); function_number++; if (stp->arity > MAX_ARG) { LoadError1(stp, "too many arguments: %d", stp->arity); } #ifdef DEBUG - ASSERT(stp->labels[0].patches < 0); /* Should not be referenced. */ + ASSERT(stp->labels[0].num_patches == 0); /* Should not be referenced. */ for (i = 1; i < stp->num_labels; i++) { - ASSERT(stp->labels[i].patches < ci); + ASSERT(stp->labels[i].num_patches <= stp->labels[i].num_allocated); } #endif } @@ -2621,8 +2828,8 @@ load_code(LoaderState* stp) /* Remember offset for the on_load function. */ stp->on_load = ci; break; - case op_bs_put_string_II: - case op_i_bs_match_string_xfII: + case op_bs_put_string_WW: + case op_i_bs_match_string_xfWW: new_string_patch(stp, ci-1); break; @@ -2733,6 +2940,12 @@ load_code(LoaderState* stp) #define never(St) 0 +static int +compiled_with_otp_20_or_higher(LoaderState* stp) +{ + return stp->otp_20_or_higher; +} + /* * Predicate that tests whether a jump table can be used. */ @@ -2872,17 +3085,18 @@ gen_element(LoaderState* stp, GenOpArg Fail, GenOpArg Index, op->next = NULL; if (Index.type == TAG_i && Index.val > 0 && + Index.val <= ERTS_MAX_TUPLE_SIZE && (Tuple.type == TAG_x || Tuple.type == TAG_y)) { op->op = genop_i_fast_element_4; - op->a[0] = Fail; - op->a[1] = Tuple; + op->a[0] = Tuple; + op->a[1] = Fail; op->a[2].type = TAG_u; op->a[2].val = Index.val; op->a[3] = Dst; } else { op->op = genop_i_element_4; - op->a[0] = Fail; - op->a[1] = Tuple; + op->a[0] = Tuple; + op->a[1] = Fail; op->a[2] = Index; op->a[3] = Dst; } @@ -2962,13 +3176,14 @@ gen_get_integer2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms, GenOpArg Live, op->a[0] = Ms; op->a[1] = Fail; op->a[2] = Dst; +#ifdef ARCH_64 } else if (bits == 32 && (Flags.val & BSF_LITTLE) == 0) { - op->op = genop_i_bs_get_integer_32_4; - op->arity = 4; + op->op = genop_i_bs_get_integer_32_3; + op->arity = 3; op->a[0] = Ms; op->a[1] = Fail; - op->a[2] = Live; - op->a[3] = Dst; + op->a[2] = Dst; +#endif } else { generic: if (bits < SMALL_BITS) { @@ -3103,16 +3318,6 @@ gen_get_binary2(LoaderState* stp, GenOpArg Fail, GenOpArg Ms, GenOpArg Live, } /* - * Predicate to test whether a heap binary should be generated. - */ - -static int -should_gen_heap_bin(LoaderState* stp, GenOpArg Src) -{ - return Src.val <= ERL_ONHEAP_BIN_LIMIT; -} - -/* * Predicate to test whether a binary construction is too big. */ @@ -3384,27 +3589,14 @@ negation_is_small(LoaderState* stp, GenOpArg Int) IS_SSMALL(-((Sint)Int.val)); } - -static int -smp(LoaderState* stp) -{ -#ifdef ERTS_SMP - return 1; -#else - return 0; -#endif -} - /* * Mark this label. */ static int smp_mark_target_label(LoaderState* stp, GenOpArg L) { -#ifdef ERTS_SMP ASSERT(L.type == TAG_f); stp->labels[L.val].looprec_targeted = 1; -#endif return 1; } @@ -3415,12 +3607,8 @@ smp_mark_target_label(LoaderState* stp, GenOpArg L) static int smp_already_locked(LoaderState* stp, GenOpArg L) { -#ifdef ERTS_SMP ASSERT(L.type == TAG_u); return stp->labels[L.val].looprec_targeted; -#else - return 0; -#endif } /* @@ -3434,11 +3622,11 @@ gen_literal_timeout(LoaderState* stp, GenOpArg Fail, GenOpArg Time) Sint timeout; NEW_GENOP(stp, op); - op->op = genop_i_wait_timeout_2; + op->op = genop_wait_timeout_unlocked_int_2; op->next = NULL; op->arity = 2; - op->a[0] = Fail; - op->a[1].type = TAG_u; + op->a[0].type = TAG_u; + op->a[1] = Fail; if (Time.type == TAG_i && (timeout = Time.val) >= 0 && #if defined(ARCH_64) @@ -3447,7 +3635,7 @@ gen_literal_timeout(LoaderState* stp, GenOpArg Fail, GenOpArg Time) 1 #endif ) { - op->a[1].val = timeout; + op->a[0].val = timeout; #if !defined(ARCH_64) } else if (Time.type == TAG_q) { Eterm big; @@ -3461,7 +3649,7 @@ gen_literal_timeout(LoaderState* stp, GenOpArg Fail, GenOpArg Time) } else { Uint u; (void) term_to_Uint(big, &u); - op->a[1].val = (BeamInstr) u; + op->a[0].val = (BeamInstr) u; } #endif } else { @@ -3481,12 +3669,12 @@ gen_literal_timeout_locked(LoaderState* stp, GenOpArg Fail, GenOpArg Time) Sint timeout; NEW_GENOP(stp, op); - op->op = genop_i_wait_timeout_locked_2; + op->op = genop_wait_timeout_locked_int_2; op->next = NULL; op->arity = 2; - op->a[0] = Fail; - op->a[1].type = TAG_u; - + op->a[0].type = TAG_u; + op->a[1] = Fail; + if (Time.type == TAG_i && (timeout = Time.val) >= 0 && #if defined(ARCH_64) (timeout >> 32) == 0 @@ -3494,7 +3682,7 @@ gen_literal_timeout_locked(LoaderState* stp, GenOpArg Fail, GenOpArg Time) 1 #endif ) { - op->a[1].val = timeout; + op->a[0].val = timeout; #if !defined(ARCH_64) } else if (Time.type == TAG_q) { Eterm big; @@ -3508,7 +3696,7 @@ gen_literal_timeout_locked(LoaderState* stp, GenOpArg Fail, GenOpArg Time) } else { Uint u; (void) term_to_Uint(big, &u); - op->a[1].val = (BeamInstr) u; + op->a[0].val = (BeamInstr) u; } #endif } else { @@ -3554,7 +3742,7 @@ gen_select_tuple_arity(LoaderState* stp, GenOpArg S, GenOpArg Fail, if (size == 2) { NEW_GENOP(stp, op); op->next = NULL; - op->op = genop_i_select_tuple_arity2_6; + op->op = genop_i_select_tuple_arity2_4; GENOP_ARITY(op, arity - 1); op->a[0] = S; op->a[1] = Fail; @@ -3844,14 +4032,13 @@ gen_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail, int i, j, align = 0; if (size == 2) { - /* * Use a special-cased instruction if there are only two values. */ NEW_GENOP(stp, op); op->next = NULL; - op->op = genop_i_select_val2_6; + op->op = genop_i_select_val2_4; GENOP_ARITY(op, arity - 1); op->a[0] = S; op->a[1] = Fail; @@ -3861,47 +4048,19 @@ gen_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail, op->a[5] = Rest[3]; return op; - - } else if (size > 10) { - - /* binary search instruction */ - - NEW_GENOP(stp, op); - op->next = NULL; - op->op = genop_i_select_val_bins_3; - GENOP_ARITY(op, arity); - op->a[0] = S; - op->a[1] = Fail; - op->a[2].type = TAG_u; - op->a[2].val = size; - for (i = 3; i < arity; i++) { - op->a[i] = Rest[i-3]; - } - - /* - * Sort the values to make them useful for a binary search. - */ - - qsort(op->a+3, size, 2*sizeof(GenOpArg), - (int (*)(const void *, const void *)) genopargcompare); -#ifdef DEBUG - for (i = 3; i < arity-2; i += 2) { - ASSERT(op->a[i].val < op->a[i+2].val); - } -#endif - return op; } - /* linear search instruction */ - - align = 1; + if (size <= 10) { + /* Use linear search. Reserve place for a sentinel. */ + align = 1; + } arity += 2*align; size += align; NEW_GENOP(stp, op); op->next = NULL; - op->op = genop_i_select_val_lins_3; + op->op = (align == 0) ? genop_i_select_val_bins_3 : genop_i_select_val_lins_3; GENOP_ARITY(op, arity); op->a[0] = S; op->a[1] = Fail; @@ -3915,7 +4074,7 @@ gen_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail, } /* - * Sort the values to make them useful for a sentinel search + * Sort the values to make them useful for a binary or sentinel search. */ qsort(tmp, size - align, 2*sizeof(GenOpArg), @@ -3930,11 +4089,12 @@ gen_select_val(LoaderState* stp, GenOpArg S, GenOpArg Fail, erts_free(ERTS_ALC_T_LOADER_TMP, (void *) tmp); - /* add sentinel */ - - op->a[j].type = TAG_u; - op->a[j].val = ~((BeamInstr)0); - op->a[j+size] = Fail; + if (align) { + /* Add sentinel for linear search. */ + op->a[j].type = TAG_u; + op->a[j].val = ~((BeamInstr)0); + op->a[j+size] = Fail; + } #ifdef DEBUG for (i = 0; i < size - 1; i++) { @@ -4222,6 +4382,92 @@ literal_is_map(LoaderState* stp, GenOpArg Lit) } /* + * Predicate to test whether all of the given new small map keys are literals + */ +static int +is_small_map_literal_keys(LoaderState* stp, GenOpArg Size, GenOpArg* Rest) +{ + if (Size.val > MAP_SMALL_MAP_LIMIT) { + return 0; + } + + /* + * Operations with non-literals have always only one key. + */ + if (Size.val != 2) { + return 1; + } + + switch (Rest[0].type) { + case TAG_a: + case TAG_i: + case TAG_n: + case TAG_q: + return 1; + default: + return 0; + } +} + +static GenOp* +gen_new_small_map_lit(LoaderState* stp, GenOpArg Dst, GenOpArg Live, + GenOpArg Size, GenOpArg* Rest) +{ + unsigned size = Size.val; + Uint lit; + unsigned i; + GenOp* op; + GenOpArg* dst; + Eterm* hp; + Eterm* tmp; + Eterm* thp; + Eterm keys; + + NEW_GENOP(stp, op); + GENOP_ARITY(op, 3 + size/2); + op->next = NULL; + op->op = genop_i_new_small_map_lit_3; + + tmp = thp = erts_alloc(ERTS_ALC_T_LOADER_TMP, (1 + size/2) * sizeof(*tmp)); + keys = make_tuple(thp); + *thp++ = make_arityval(size/2); + + dst = op->a+3; + + for (i = 0; i < size; i += 2) { + switch (Rest[i].type) { + case TAG_a: + *thp++ = Rest[i].val; + ASSERT(is_atom(Rest[i].val)); + break; + case TAG_i: + *thp++ = make_small(Rest[i].val); + break; + case TAG_n: + *thp++ = NIL; + break; + case TAG_q: + *thp++ = stp->literals[Rest[i].val].term; + break; + } + *dst++ = Rest[i + 1]; + } + + if (!find_literal(stp, keys, &lit)) { + lit = new_literal(stp, &hp, 1 + size/2); + sys_memcpy(hp, tmp, (1 + size/2) * sizeof(*tmp)); + } + erts_free(ERTS_ALC_T_LOADER_TMP, tmp); + + op->a[0] = Dst; + op->a[1] = Live; + op->a[2].type = TAG_q; + op->a[2].val = lit; + + return op; +} + +/* * Predicate to test whether the given literal is an empty map. */ @@ -4732,21 +4978,57 @@ freeze_code(LoaderState* stp) */ for (i = 0; i < stp->num_labels; i++) { - Sint this_patch; - Sint next_patch; + Uint patch; Uint value = stp->labels[i].value; - - if (value == 0 && stp->labels[i].patches >= 0) { + + if (value == 0 && stp->labels[i].num_patches != 0) { LoadError1(stp, "label %d not resolved", i); } ASSERT(value < stp->ci); - this_patch = stp->labels[i].patches; - while (this_patch >= 0) { - ASSERT(this_patch < stp->ci); - next_patch = codev[this_patch]; - ASSERT(next_patch < stp->ci); - codev[this_patch] = (BeamInstr) (codev + value); - this_patch = next_patch; + for (patch = 0; patch < stp->labels[i].num_patches; patch++) { + LabelPatch* lp = &stp->labels[i].patches[patch]; + Uint pos = lp->pos; + ASSERT(pos < stp->ci); + if (pos < stp->num_functions) { + /* + * This is the array of pointers to the beginning of + * each function. The pointers must remain absolute. + */ + codev[pos] = (BeamInstr) (codev + value); + } else { +#ifdef DEBUG + Uint w; +#endif + Sint32 rel = lp->offset + value; + switch (lp->packed) { + case 0: /* Not packed */ + ASSERT(codev[pos] == i); + codev[pos] = rel; + break; +#ifdef BEAM_WIDE_MASK + case 1: /* Least significant word. */ +#ifdef DEBUG + w = codev[pos] & BEAM_WIDE_MASK; + /* Correct label in least significant word? */ + ASSERT(w == i); +#endif + codev[pos] = (codev[pos] & ~BEAM_WIDE_MASK) | + (rel & BEAM_WIDE_MASK); + break; + case 2: /* Most significant word */ +#ifdef DEBUG + w = (codev[pos] >> BEAM_WIDE_SHIFT) & BEAM_WIDE_MASK; + /* Correct label in most significant word? */ + ASSERT(w == i); +#endif + codev[pos] = ((Uint)rel << BEAM_WIDE_SHIFT) | + (codev[pos] & BEAM_WIDE_MASK); + break; +#endif + default: + ASSERT(0); + } + } } } CHKBLK(ERTS_ALC_T_CODE,code_hdr); @@ -4789,8 +5071,11 @@ final_touch(LoaderState* stp, struct erl_module_instance* inst_p) catches = BEAM_CATCHES_NIL; while (index != 0) { BeamInstr next = codev[index]; - codev[index] = BeamOpCode(op_catch_yf); - catches = beam_catches_cons((BeamInstr *)codev[index+2], catches); + BeamInstr* abs_addr; + codev[index] = BeamOpCodeAddr(op_catch_yf); + /* We must make the address of the label absolute again. */ + abs_addr = (BeamInstr *)codev + index + codev[index+2]; + catches = beam_catches_cons(abs_addr, catches); codev[index+2] = make_catch(catches); index = next; } @@ -4861,7 +5146,7 @@ final_touch(LoaderState* stp, struct erl_module_instance* inst_p) /* * We are hiding a pointer into older code. */ - erts_smp_refc_dec(&fe->refc, 1); + erts_refc_dec(&fe->refc, 1); } fe->address = code_ptr; #ifdef HIPE @@ -5257,12 +5542,15 @@ get_tag_and_value(LoaderState* stp, Uint len_code, { Uint count; Sint val; - byte default_buf[128]; - byte* bigbuf = default_buf; + byte default_byte_buf[128]; + byte* byte_buf = default_byte_buf; + Eterm default_big_buf[128/sizeof(Eterm)]; + Eterm* big_buf = default_big_buf; + Eterm tmp_big; byte* s; int i; int neg = 0; - Uint arity; + Uint words_needed; Eterm* hp; /* @@ -5339,8 +5627,11 @@ get_tag_and_value(LoaderState* stp, Uint len_code, *result = val; return TAG_i; } else { - *result = new_literal(stp, &hp, BIG_UINT_HEAP_SIZE); - (void) small_to_big(val, hp); + tmp_big = small_to_big(val, big_buf); + if (!find_literal(stp, tmp_big, result)) { + *result = new_literal(stp, &hp, BIG_UINT_HEAP_SIZE); + sys_memcpy(hp, big_buf, BIG_UINT_HEAP_SIZE*sizeof(Eterm)); + } return TAG_q; } } @@ -5350,8 +5641,8 @@ get_tag_and_value(LoaderState* stp, Uint len_code, * (including margin). */ - if (count+8 > sizeof(default_buf)) { - bigbuf = erts_alloc(ERTS_ALC_T_LOADER_TMP, count+8); + if (count+8 > sizeof(default_byte_buf)) { + byte_buf = erts_alloc(ERTS_ALC_T_LOADER_TMP, count+8); } /* @@ -5360,20 +5651,20 @@ get_tag_and_value(LoaderState* stp, Uint len_code, GetString(stp, s, count); for (i = 0; i < count; i++) { - bigbuf[count-i-1] = *s++; + byte_buf[count-i-1] = *s++; } /* * Check if the number is negative, and negate it if so. */ - if ((bigbuf[count-1] & 0x80) != 0) { + if ((byte_buf[count-1] & 0x80) != 0) { unsigned carry = 1; neg = 1; for (i = 0; i < count; i++) { - bigbuf[i] = ~bigbuf[i] + carry; - carry = (bigbuf[i] == 0 && carry == 1); + byte_buf[i] = ~byte_buf[i] + carry; + carry = (byte_buf[i] == 0 && carry == 1); } ASSERT(carry == 0); } @@ -5382,33 +5673,52 @@ get_tag_and_value(LoaderState* stp, Uint len_code, * Align to word boundary. */ - if (bigbuf[count-1] == 0) { + if (byte_buf[count-1] == 0) { count--; } - if (bigbuf[count-1] == 0) { + if (byte_buf[count-1] == 0) { LoadError0(stp, "bignum not normalized"); } while (count % sizeof(Eterm) != 0) { - bigbuf[count++] = 0; + byte_buf[count++] = 0; } /* - * Allocate heap space for the bignum and copy it. + * Convert to a bignum. */ - arity = count/sizeof(Eterm); - *result = new_literal(stp, &hp, arity+1); - if (is_nil(bytes_to_big(bigbuf, count, neg, hp))) - goto load_error; + words_needed = count/sizeof(Eterm) + 1; + if (words_needed*sizeof(Eterm) > sizeof(default_big_buf)) { + big_buf = erts_alloc(ERTS_ALC_T_LOADER_TMP, words_needed*sizeof(Eterm)); + } + tmp_big = bytes_to_big(byte_buf, count, neg, big_buf); + if (is_nil(tmp_big)) { + goto load_error; + } - if (bigbuf != default_buf) { - erts_free(ERTS_ALC_T_LOADER_TMP, (void *) bigbuf); + /* + * Create a literal if there is no previous literal with the same value. + */ + + if (!find_literal(stp, tmp_big, result)) { + *result = new_literal(stp, &hp, words_needed); + sys_memcpy(hp, big_buf, words_needed*sizeof(Eterm)); + } + + if (byte_buf != default_byte_buf) { + erts_free(ERTS_ALC_T_LOADER_TMP, (void *) byte_buf); + } + if (big_buf != default_big_buf) { + erts_free(ERTS_ALC_T_LOADER_TMP, (void *) big_buf); } return TAG_q; load_error: - if (bigbuf != default_buf) { - erts_free(ERTS_ALC_T_LOADER_TMP, (void *) bigbuf); + if (byte_buf != default_byte_buf) { + erts_free(ERTS_ALC_T_LOADER_TMP, (void *) byte_buf); + } + if (big_buf != default_big_buf) { + erts_free(ERTS_ALC_T_LOADER_TMP, (void *) big_buf); } return -1; } @@ -5453,8 +5763,7 @@ new_label(LoaderState* stp) stp->labels = (Label *) erts_realloc(ERTS_ALC_T_PREPARED_CODE, (void *) stp->labels, stp->num_labels * sizeof(Label)); - stp->labels[num].value = 0; - stp->labels[num].patches = -1; + init_label(&stp->labels[num]); return num; } @@ -5509,6 +5818,24 @@ new_literal(LoaderState* stp, Eterm** hpp, Uint heap_size) return stp->num_literals++; } +static int +find_literal(LoaderState* stp, Eterm needle, Uint *idx) +{ + int i; + + /* + * The search is done backwards since the most recent literals + * allocated by the loader itself will be placed at the end + */ + for (i = stp->num_literals - 1; i >= 0; i--) { + if (EQ(needle, stp->literals[i].term)) { + *idx = (Uint) i; + return 1; + } + } + return 0; +} + Eterm erts_module_info_0(Process* p, Eterm module) { @@ -5699,9 +6026,9 @@ int erts_is_function_native(ErtsCodeInfo *ci) { #ifdef HIPE - ASSERT(ci->op == (BeamInstr) BeamOp(op_i_func_info_IaaI)); - return erts_codeinfo_to_code(ci)[0] == (BeamInstr) BeamOp(op_hipe_trap_call) - || erts_codeinfo_to_code(ci)[0] == (BeamInstr) BeamOp(op_hipe_trap_call_closure); + ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI)); + return BeamIsOpCode(erts_codeinfo_to_code(ci)[0], op_hipe_trap_call) || + BeamIsOpCode(erts_codeinfo_to_code(ci)[0], op_hipe_trap_call_closure); #else return 0; #endif @@ -5770,7 +6097,7 @@ exported_from_module(Process* p, /* Process whose heap to use. */ Eterm tuple; if (ep->addressv[code_ix] == ep->beam && - ep->beam[0] == (BeamInstr) em_call_error_handler) { + BeamIsOpCode(ep->beam[0], op_call_error_handler)) { /* There is a call to the function, but it does not exist. */ continue; } @@ -6041,7 +6368,7 @@ make_stub(ErtsCodeInfo* info, Eterm mod, Eterm func, Uint arity, Uint native, Be { DBG_TRACE_MFA(mod,func,arity,"make beam stub at %p", erts_codeinfo_to_code(info)); ASSERT(WORDS_PER_FUNCTION == 6); - info->op = (BeamInstr) BeamOp(op_i_func_info_IaaI); + info->op = BeamOpCodeAddr(op_i_func_info_IaaI); info->u.ncallee = (void (*)(void)) native; info->mfa.module = mod; info->mfa.function = func; @@ -6146,7 +6473,7 @@ stub_final_touch(LoaderState* stp, ErtsCodeInfo* ci) for (i = 0, lp = stp->lambdas; i < n; i++, lp++) { ErlFunEntry* fe = stp->lambdas[i].fe; if (lp->function == ci->mfa.function && lp->arity == ci->mfa.arity) { - *erts_codeinfo_to_code(ci) = (Eterm) BeamOpCode(op_hipe_trap_call_closure); + *erts_codeinfo_to_code(ci) = BeamOpCodeAddr(op_hipe_trap_call_closure); fe->address = erts_codeinfo_to_code(ci); } } @@ -6276,7 +6603,7 @@ patch_funentries(Eterm Patchlist) fe = erts_get_fun_entry(Mod, uniq, index); fe->native_address = (Uint *)native_address; - erts_smp_refc_dec(&fe->refc, 1); + erts_refc_dec(&fe->refc, 1); if (!patch(Addresses, (Uint) fe)) return 0; @@ -6470,7 +6797,7 @@ erts_make_stub_module(Process* p, Eterm hipe_magic_bin, Eterm Beam, Eterm Info) * as the body until we know what kind of trap we should put there. */ code_hdr->functions[i] = (ErtsCodeInfo*)fp; - op = (Eterm) BeamOpCode(op_hipe_trap_call); /* Might be changed later. */ + op = BeamOpCodeAddr(op_hipe_trap_call); /* Might be changed later. */ fp = make_stub((ErtsCodeInfo*)fp, hipe_stp->module, func, arity, (Uint)native_address, op); } @@ -6480,7 +6807,7 @@ erts_make_stub_module(Process* p, Eterm hipe_magic_bin, Eterm Beam, Eterm Info) */ code_hdr->functions[i] = (ErtsCodeInfo*)fp; - *fp++ = (BeamInstr) BeamOp(op_int_code_end); + *fp++ = BeamOpCodeAddr(op_int_code_end); /* * Copy attributes and compilation information. diff --git a/erts/emulator/beam/beam_load.h b/erts/emulator/beam/beam_load.h index c088bdb751..156c3c45e2 100644 --- a/erts/emulator/beam/beam_load.h +++ b/erts/emulator/beam/beam_load.h @@ -37,11 +37,6 @@ typedef struct gen_op_entry { extern const GenOpEntry gen_opc[]; -extern BeamInstr beam_debug_apply[]; -extern BeamInstr* em_call_error_handler; -extern BeamInstr* em_apply_bif; -extern BeamInstr* em_call_nif; - struct ErtsLiteralArea_; /* @@ -111,11 +106,7 @@ typedef struct beam_code_header { }BeamCodeHeader; -#ifdef ERTS_DIRTY_SCHEDULERS # define BEAM_NIF_MIN_FUNC_SZ 4 -#else -# define BEAM_NIF_MIN_FUNC_SZ 3 -#endif void erts_release_literal_area(struct ErtsLiteralArea_* literal_area); int erts_is_module_native(BeamCodeHeader* code); diff --git a/erts/emulator/beam/beam_ranges.c b/erts/emulator/beam/beam_ranges.c index 9b0335e83d..6e373a3480 100644 --- a/erts/emulator/beam/beam_ranges.c +++ b/erts/emulator/beam/beam_ranges.c @@ -29,12 +29,12 @@ typedef struct { BeamInstr* start; /* Pointer to start of module. */ - erts_smp_atomic_t end; /* (BeamInstr*) Points one word beyond last function in module. */ + erts_atomic_t end; /* (BeamInstr*) Points one word beyond last function in module. */ } Range; /* Range 'end' needs to be atomic as we purge module by setting end=start in active code_ix */ -#define RANGE_END(R) ((BeamInstr*)erts_smp_atomic_read_nob(&(R)->end)) +#define RANGE_END(R) ((BeamInstr*)erts_atomic_read_nob(&(R)->end)) static Range* find_range(BeamInstr* pc); static void lookup_loc(FunctionInfo* fi, const BeamInstr* pc, @@ -49,10 +49,10 @@ struct ranges { Range* modules; /* Sorted lists of module addresses. */ Sint n; /* Number of range entries. */ Sint allocated; /* Number of allocated entries. */ - erts_smp_atomic_t mid; /* Cached search start point */ + erts_atomic_t mid; /* Cached search start point */ }; static struct ranges r[ERTS_NUM_CODE_IX]; -static erts_smp_atomic_t mem_used; +static erts_atomic_t mem_used; static Range* write_ptr; #ifdef HARD_DEBUG @@ -90,12 +90,12 @@ erts_init_ranges(void) { Sint i; - erts_smp_atomic_init_nob(&mem_used, 0); + erts_atomic_init_nob(&mem_used, 0); for (i = 0; i < ERTS_NUM_CODE_IX; i++) { r[i].modules = 0; r[i].n = 0; r[i].allocated = 0; - erts_smp_atomic_init_nob(&r[i].mid, 0); + erts_atomic_init_nob(&r[i].mid, 0); } } @@ -107,12 +107,12 @@ erts_start_staging_ranges(int num_new) Sint need; if (r[dst].modules) { - erts_smp_atomic_add_nob(&mem_used, -r[dst].allocated); + erts_atomic_add_nob(&mem_used, -r[dst].allocated); erts_free(ERTS_ALC_T_MODULE_REFS, r[dst].modules); } need = r[dst].allocated = r[src].n + num_new; - erts_smp_atomic_add_nob(&mem_used, need); + erts_atomic_add_nob(&mem_used, need); write_ptr = erts_alloc(ERTS_ALC_T_MODULE_REFS, need * sizeof(Range)); r[dst].modules = write_ptr; @@ -135,7 +135,7 @@ erts_end_staging_ranges(int commit) if (rp->start < RANGE_END(rp)) { /* Only insert a module that has not been purged. */ write_ptr->start = rp->start; - erts_smp_atomic_init_nob(&write_ptr->end, + erts_atomic_init_nob(&write_ptr->end, (erts_aint_t)(RANGE_END(rp))); write_ptr++; } @@ -161,7 +161,7 @@ erts_end_staging_ranges(int commit) } r[dst].modules = mp; CHECK(&r[dst]); - erts_smp_atomic_set_nob(&r[dst].mid, + erts_atomic_set_nob(&r[dst].mid, (erts_aint_t) (r[dst].modules + r[dst].n / 2)); } @@ -182,7 +182,7 @@ erts_update_ranges(BeamInstr* code, Uint size) */ if (r[dst].modules == NULL) { Sint need = 128; - erts_smp_atomic_add_nob(&mem_used, need); + erts_atomic_add_nob(&mem_used, need); r[dst].modules = erts_alloc(ERTS_ALC_T_MODULE_REFS, need * sizeof(Range)); r[dst].allocated = need; @@ -192,7 +192,7 @@ erts_update_ranges(BeamInstr* code, Uint size) ASSERT(r[dst].modules); write_ptr->start = code; - erts_smp_atomic_init_nob(&(write_ptr->end), + erts_atomic_init_nob(&(write_ptr->end), (erts_aint_t)(((byte *)code) + size)); write_ptr++; } @@ -201,13 +201,13 @@ void erts_remove_from_ranges(BeamInstr* code) { Range* rp = find_range(code); - erts_smp_atomic_set_nob(&rp->end, (erts_aint_t)rp->start); + erts_atomic_set_nob(&rp->end, (erts_aint_t)rp->start); } UWord erts_ranges_sz(void) { - return erts_smp_atomic_read_nob(&mem_used) * sizeof(Range); + return erts_atomic_read_nob(&mem_used) * sizeof(Range); } /* @@ -262,7 +262,7 @@ find_range(BeamInstr* pc) ErtsCodeIndex active = erts_active_code_ix(); Range* low = r[active].modules; Range* high = low + r[active].n; - Range* mid = (Range *) erts_smp_atomic_read_nob(&r[active].mid); + Range* mid = (Range *) erts_atomic_read_nob(&r[active].mid); CHECK(&r[active]); while (low < high) { @@ -271,7 +271,7 @@ find_range(BeamInstr* pc) } else if (pc >= RANGE_END(mid)) { low = mid + 1; } else { - erts_smp_atomic_set_nob(&r[active].mid, (erts_aint_t) mid); + erts_atomic_set_nob(&r[active].mid, (erts_aint_t) mid); return mid; } mid = low + (high-low) / 2; diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c index b6595d2a5d..ad555bb195 100644 --- a/erts/emulator/beam/bif.c +++ b/erts/emulator/beam/bif.c @@ -57,10 +57,10 @@ static Export dsend_continue_trap_export; Export *erts_convert_time_unit_trap = NULL; static Export *await_msacc_mod_trap = NULL; -static erts_smp_atomic32_t msacc; +static erts_atomic32_t msacc; static Export *await_sched_wall_time_mod_trap; -static erts_smp_atomic32_t sched_wall_time; +static erts_atomic32_t sched_wall_time; #define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1) @@ -98,14 +98,12 @@ static int insert_internal_link(Process* p, Eterm rpid) ASSERT(is_internal_pid(rpid)); -#ifdef ERTS_SMP if (IS_TRACED(p) && (ERTS_TRACE_FLAGS(p) & (F_TRACE_SOL|F_TRACE_SOL1))) { rp_locks = ERTS_PROC_LOCKS_ALL; } - erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK); -#endif + erts_proc_lock(p, ERTS_PROC_LOCK_LINK); /* get a pointer to the process struct of the linked process */ rp = erts_pid2proc_opt(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK, @@ -113,7 +111,7 @@ static int insert_internal_link(Process* p, Eterm rpid) ERTS_P2P_FLG_ALLOW_OTHER_X); if (!rp) { - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); return 0; } @@ -139,10 +137,10 @@ static int insert_internal_link(Process* p, Eterm rpid) rp, am_getting_linked, p->common.id); if (p == rp) - erts_smp_proc_unlock(p, rp_locks & ~ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(p, rp_locks & ~ERTS_PROC_LOCK_MAIN); else { - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, rp_locks); } return 1; @@ -178,13 +176,13 @@ BIF_RETTYPE link_1(BIF_ALIST_1) goto res_no_proc; } - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); if (erts_add_link(&ERTS_P_LINKS(BIF_P), LINK_PID, BIF_ARG_1) >= 0) send_link_signal = 1; /* else: already linked */ - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); if (send_link_signal) { Eterm ref; @@ -212,11 +210,11 @@ BIF_RETTYPE link_1(BIF_ALIST_1) if (is_external_pid(BIF_ARG_1)) { - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); /* We may earn time by checking first that we're not linked already */ if (erts_lookup_link(ERTS_P_LINKS(BIF_P), BIF_ARG_1) != NULL) { - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); BIF_RET(am_true); } else { @@ -225,7 +223,7 @@ BIF_RETTYPE link_1(BIF_ALIST_1) ErtsDSigData dsd; dep = external_pid_dist_entry(BIF_ARG_1); if (dep == erts_this_dist_entry) { - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); goto res_no_proc; } @@ -234,13 +232,13 @@ BIF_RETTYPE link_1(BIF_ALIST_1) case ERTS_DSIG_PREP_NOT_ALIVE: /* Let the dlink trap handle it */ case ERTS_DSIG_PREP_NOT_CONNECTED: - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); BIF_TRAP1(dlink_trap, BIF_P, BIF_ARG_1); case ERTS_DSIG_PREP_CONNECTED: /* We are connected. Setup link and send link signal */ - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); erts_add_link(&ERTS_P_LINKS(BIF_P), LINK_PID, BIF_ARG_1); lnk = erts_add_or_lookup_link(&(dep->nlinks), @@ -249,9 +247,9 @@ BIF_RETTYPE link_1(BIF_ALIST_1) ASSERT(lnk != NULL); erts_add_link(&ERTS_LINK_ROOT(lnk), LINK_PID, BIF_ARG_1); - erts_smp_de_links_unlock(dep); - erts_smp_de_runlock(dep); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_de_links_unlock(dep); + erts_de_runlock(dep); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); code = erts_dsig_send_link(&dsd, BIF_P->common.id, BIF_ARG_1); if (code == ERTS_DSIG_SEND_YIELD) @@ -267,11 +265,11 @@ BIF_RETTYPE link_1(BIF_ALIST_1) BIF_ERROR(BIF_P, BADARG); res_no_proc: { - erts_aint32_t state = erts_smp_atomic32_read_nob(&BIF_P->state); + erts_aint32_t state = erts_atomic32_read_nob(&BIF_P->state); if (state & ERTS_PSFLG_TRAP_EXIT) { ErtsProcLocks locks = ERTS_PROC_LOCK_MAIN; erts_deliver_exit_message(BIF_ARG_1, BIF_P, &locks, am_noproc, NIL); - erts_smp_proc_unlock(BIF_P, ~ERTS_PROC_LOCK_MAIN & locks); + erts_proc_unlock(BIF_P, ~ERTS_PROC_LOCK_MAIN & locks); BIF_RET(am_true); } else @@ -290,58 +288,41 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to) ErtsMonitor *mon; int code; Eterm res = am_false; -#ifndef ERTS_SMP - int stale_mon = 0; -#endif - ERTS_SMP_LC_ASSERT((ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK) + ERTS_LC_ASSERT((ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK) == erts_proc_lc_my_proc_locks(c_p)); code = erts_dsig_prepare(&dsd, dep, c_p, ERTS_DSP_RLOCK, 0); switch (code) { case ERTS_DSIG_PREP_NOT_ALIVE: case ERTS_DSIG_PREP_NOT_CONNECTED: -#ifndef ERTS_SMP - /* XXX Is this possible? Shouldn't this link - previously have been removed if the node - had previously been disconnected. */ - ASSERT(0); - stale_mon = 1; -#endif /* * In the smp case this is possible if the node goes * down just before the call to demonitor. */ if (dep) { - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); dmon = erts_remove_monitor(&dep->monitors, ref); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); if (dmon) erts_destroy_monitor(dmon); } mon = erts_remove_monitor(&ERTS_P_MONITORS(c_p), ref); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_LINK); res = am_true; break; case ERTS_DSIG_PREP_CONNECTED: - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); mon = erts_remove_monitor(&ERTS_P_MONITORS(c_p), ref); dmon = erts_remove_monitor(&dep->monitors, ref); - erts_smp_de_links_unlock(dep); - erts_smp_de_runlock(dep); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_LINK); + erts_de_links_unlock(dep); + erts_de_runlock(dep); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_LINK); if (!dmon) { -#ifndef ERTS_SMP - /* XXX How is this possible? Shouldn't this link - previously have been removed when the distributed - end was removed. */ - ASSERT(0); - stale_mon = 1; -#endif /* * This is possible when smp support is enabled. * 'DOWN' message just arrived. @@ -370,18 +351,6 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to) return am_internal_error; } -#ifndef ERTS_SMP - if (stale_mon) { - erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf(); - erts_dsprintf(dsbufp, "Stale process monitor %T to ", ref); - if (is_atom(to)) - erts_dsprintf(dsbufp, "{%T, %T}", to, dep->sysname); - else - erts_dsprintf(dsbufp, "%T", to); - erts_dsprintf(dsbufp, " found\n"); - erts_send_error_to_logger(c_p->group_leader, dsbufp); - } -#endif /* * We aren't allowed to destroy 'mon' until now, since 'to' @@ -391,7 +360,7 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to) lookup and remove */ erts_destroy_monitor(mon); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); return res; } @@ -405,13 +374,9 @@ demonitor_local_process(Process *c_p, Eterm ref, Eterm to, Eterm *res) ERTS_P2P_FLG_ALLOW_OTHER_X); ErtsMonitor *mon = erts_remove_monitor(&ERTS_P_MONITORS(c_p), ref); -#ifndef ERTS_SMP - ASSERT(mon); -#else if (!mon) *res = am_false; else -#endif { *res = am_true; erts_destroy_monitor(mon); @@ -420,12 +385,12 @@ demonitor_local_process(Process *c_p, Eterm ref, Eterm to, Eterm *res) ErtsMonitor *rmon; rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref); if (rp != c_p) - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); if (rmon != NULL) erts_destroy_monitor(rmon); } else { - ERTS_SMP_ASSERT_IS_NOT_EXITING(c_p); + ERTS_ASSERT_IS_NOT_EXITING(c_p); } } @@ -438,7 +403,7 @@ demonitor_local_port(Process *origin, Eterm ref, Eterm target) if (!port) { BIF_ERROR(origin, BADARG); } - erts_smp_proc_unlock(origin, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(origin, ERTS_PROC_LOCK_LINK); if (port) { Eterm trap_ref; @@ -458,7 +423,7 @@ demonitor_local_port(Process *origin, Eterm ref, Eterm target) } } else { - ERTS_SMP_ASSERT_IS_NOT_EXITING(origin); + ERTS_ASSERT_IS_NOT_EXITING(origin); } BIF_RET(res); } @@ -472,11 +437,10 @@ BIF_RETTYPE demonitor(Process *c_p, Eterm ref, Eterm *multip) ErtsMonitor *mon = NULL; /* The monitor entry to delete */ Eterm to = NIL; /* Monitor link traget */ DistEntry *dep = NULL; /* Target's distribution entry */ - int deref_de = 0; BIF_RETTYPE res = am_false; int unlock_link = 1; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_LINK); + erts_proc_lock(c_p, ERTS_PROC_LOCK_LINK); if (is_not_internal_ref(ref)) { res = am_badarg; @@ -502,8 +466,6 @@ BIF_RETTYPE demonitor(Process *c_p, Eterm ref, Eterm *multip) ASSERT(is_node_name_atom(to)); dep = erts_sysname_to_connected_dist_entry(to); ASSERT(dep != erts_this_dist_entry); - if (dep) - deref_de = 1; } else if (is_port(to)) { if (port_dist_entry(to) != erts_this_dist_entry) { goto badarg; @@ -521,11 +483,6 @@ BIF_RETTYPE demonitor(Process *c_p, Eterm ref, Eterm *multip) unlock_link = 0; } else { /* Local monitor */ - if (deref_de) { - deref_de = 0; - erts_deref_dist_entry(dep); - } - dep = NULL; demonitor_local_process(c_p, ref, to, &res); } break; @@ -538,14 +495,9 @@ badarg: done: if (unlock_link) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_LINK); - - if (deref_de) { - ASSERT(dep); - erts_deref_dist_entry(dep); - } + erts_proc_unlock(c_p, ERTS_PROC_LOCK_LINK); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); BIF_RET(res); } @@ -668,12 +620,12 @@ local_pid_monitor(Process *p, Eterm target, Eterm mon_ref, int boolean) return ret; } - erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK); + erts_proc_lock(p, ERTS_PROC_LOCK_LINK); rp = erts_pid2proc_opt(p, p_locks, target, ERTS_PROC_LOCK_LINK, ERTS_P2P_FLG_ALLOW_OTHER_X); if (!rp) { - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); p_locks &= ~ERTS_PROC_LOCK_LINK; if (boolean) ret = am_false; @@ -690,10 +642,10 @@ local_pid_monitor(Process *p, Eterm target, Eterm mon_ref, int boolean) erts_add_monitor(&ERTS_P_MONITORS(p), MON_ORIGIN, mon_ref, target, NIL); erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, mon_ref, p->common.id, NIL); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); } - erts_smp_proc_unlock(p, p_locks & ~ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(p, p_locks & ~ERTS_PROC_LOCK_MAIN); return ret; } @@ -727,7 +679,7 @@ res_no_proc: break; } } - erts_smp_proc_unlock(origin, p_locks & ~ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(origin, p_locks & ~ERTS_PROC_LOCK_MAIN); BIF_RET(ref); } @@ -742,7 +694,7 @@ local_name_monitor(Process *self, Eterm type, Eterm target_name) Process *proc = NULL; Port *port = NULL; - erts_smp_proc_lock(self, ERTS_PROC_LOCK_LINK); + erts_proc_lock(self, ERTS_PROC_LOCK_LINK); erts_whereis_name(self, p_locks, target_name, &proc, ERTS_PROC_LOCK_LINK, @@ -761,7 +713,7 @@ local_name_monitor(Process *self, Eterm type, Eterm target_name) Eterm item; UseTmpHeap(3,self); - erts_smp_proc_unlock(self, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(self, ERTS_PROC_LOCK_LINK); p_locks &= ~ERTS_PROC_LOCK_LINK; item = TUPLE2(lhp, target_name, erts_this_dist_entry->sysname); @@ -772,7 +724,7 @@ local_name_monitor(Process *self, Eterm type, Eterm target_name) UnUseTmpHeap(3,self); } else if (port) { - erts_smp_proc_unlock(self, p_locks & ~ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(self, p_locks & ~ERTS_PROC_LOCK_MAIN); p_locks &= ~ERTS_PROC_LOCK_MAIN; switch (erts_port_monitor(self, port, target_name, &ret)) { @@ -793,16 +745,16 @@ local_name_monitor(Process *self, Eterm type, Eterm target_name) proc->common.id, target_name); erts_add_monitor(&ERTS_P_MONITORS(proc), MON_TARGET, ret, self->common.id, target_name); - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(proc, ERTS_PROC_LOCK_LINK); } if (p_locks) { - erts_smp_proc_unlock(self, p_locks & ~ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(self, p_locks & ~ERTS_PROC_LOCK_MAIN); } BIF_RET(ret); badarg: if (p_locks) { - erts_smp_proc_unlock(self, p_locks & ~ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(self, p_locks & ~ERTS_PROC_LOCK_MAIN); } BIF_ERROR(self, BADARG); } @@ -815,20 +767,20 @@ remote_monitor(Process *p, Eterm bifarg1, Eterm bifarg2, BIF_RETTYPE ret; int code; - erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK); + erts_proc_lock(p, ERTS_PROC_LOCK_LINK); code = erts_dsig_prepare(&dsd, dep, p, ERTS_DSP_RLOCK, 0); switch (code) { case ERTS_DSIG_PREP_NOT_ALIVE: /* Let the dmonitor_p trap handle it */ case ERTS_DSIG_PREP_NOT_CONNECTED: - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); ERTS_BIF_PREP_TRAP2(ret, dmonitor_p_trap, p, bifarg1, bifarg2); break; case ERTS_DSIG_PREP_CONNECTED: if (!(dep->flags & DFLAG_DIST_MONITOR) || (byname && !(dep->flags & DFLAG_DIST_MONITOR_NAME))) { - erts_smp_de_runlock(dep); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_de_runlock(dep); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); ERTS_BIF_PREP_ERROR(ret, p, BADARG); } else { @@ -847,16 +799,16 @@ remote_monitor(Process *p, Eterm bifarg1, Eterm bifarg2, d_name = NIL; } - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); erts_add_monitor(&ERTS_P_MONITORS(p), MON_ORIGIN, mon_ref, p_trgt, p_name); erts_add_monitor(&(dep->monitors), MON_TARGET, mon_ref, p->common.id, d_name); - erts_smp_de_links_unlock(dep); - erts_smp_de_runlock(dep); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_de_links_unlock(dep); + erts_de_runlock(dep); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); code = erts_dsig_send_monitor(&dsd, p->common.id, target, mon_ref); if (code == ERTS_DSIG_SEND_YIELD) @@ -879,7 +831,6 @@ BIF_RETTYPE monitor_2(BIF_ALIST_2) Eterm target = BIF_ARG_2; BIF_RETTYPE ret; DistEntry *dep = NULL; - int deref_de = 0; /* Only process monitors are implemented */ switch (BIF_ARG_1) { @@ -889,10 +840,10 @@ BIF_RETTYPE monitor_2(BIF_ALIST_2) goto badarg; } ref = erts_make_ref(BIF_P); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); erts_add_monitor(&ERTS_P_MONITORS(BIF_P), MON_TIME_OFFSET, ref, am_clock_service, NIL); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); erts_monitor_time_offset(BIF_P->common.id, ref); BIF_RET(ref); } @@ -939,21 +890,14 @@ local_port: } dep = erts_sysname_to_connected_dist_entry(remote_node); if (dep == erts_this_dist_entry) { - deref_de = 1; ret = local_name_monitor(BIF_P, BIF_ARG_1, name); } else { - if (dep) - deref_de = 1; ret = remote_monitor(BIF_P, BIF_ARG_1, BIF_ARG_2, dep, name, 1); } } else { badarg: ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG); } - if (deref_de) { - deref_de = 0; - erts_deref_dist_entry(dep); - } return ret; } @@ -1009,7 +953,7 @@ BIF_RETTYPE spawn_opt_1(BIF_ALIST_1) so.max_heap_size = H_MAX_SIZE; so.max_heap_flags = H_MAX_FLAGS; so.priority = PRIORITY_NORMAL; - so.max_gen_gcs = (Uint16) erts_smp_atomic32_read_nob(&erts_max_gen_gcs); + so.max_gen_gcs = (Uint16) erts_atomic32_read_nob(&erts_max_gen_gcs); so.scheduler = 0; /* @@ -1150,15 +1094,13 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1) } if (is_internal_port(BIF_ARG_1)) { - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); -#ifdef ERTS_SMP + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); if (ERTS_PROC_PENDING_EXIT(BIF_P)) goto handle_pending_exit; -#endif l = erts_remove_link(&ERTS_P_LINKS(BIF_P), BIF_ARG_1); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); if (l) { Port *prt; @@ -1200,14 +1142,12 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1) /* Blind removal, we might have trapped or anything, this leaves us in a state where monitors might be inconsistent, but the dist code should take care of it. */ - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); -#ifdef ERTS_SMP + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); if (ERTS_PROC_PENDING_EXIT(BIF_P)) goto handle_pending_exit; -#endif l = erts_remove_link(&ERTS_P_LINKS(BIF_P), BIF_ARG_1); - erts_smp_proc_unlock(BIF_P, + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); if (l) @@ -1249,7 +1189,7 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1) /* Internal pid... */ - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); cp_locks |= ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS; @@ -1258,13 +1198,11 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1) BIF_ARG_1, ERTS_PROC_LOCK_LINK, ERTS_P2P_FLG_ALLOW_OTHER_X); -#ifdef ERTS_SMP if (ERTS_PROC_PENDING_EXIT(BIF_P)) { if (rp && rp != BIF_P) - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); goto handle_pending_exit; } -#endif /* unlink and ignore errors */ l = erts_remove_link(&ERTS_P_LINKS(BIF_P), BIF_ARG_1); @@ -1272,7 +1210,7 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1) erts_destroy_link(l); if (!rp) { - ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P); + ERTS_ASSERT_IS_NOT_EXITING(BIF_P); } else { rl = erts_remove_link(&ERTS_P_LINKS(rp), BIF_P->common.id); @@ -1280,29 +1218,27 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1) erts_destroy_link(rl); if (IS_TRACED_FL(rp, F_TRACE_PROCS) && rl != NULL) { - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_STATUS); cp_locks &= ~ERTS_PROC_LOCK_STATUS; trace_proc(BIF_P, (ERTS_PROC_LOCK_MAIN | ERTS_PROC_LOCK_LINK), rp, am_getting_unlinked, BIF_P->common.id); } if (rp != BIF_P) - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); } - erts_smp_proc_unlock(BIF_P, cp_locks & ~ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, cp_locks & ~ERTS_PROC_LOCK_MAIN); BIF_RET(am_true); -#ifdef ERTS_SMP handle_pending_exit: erts_handle_pending_exit(BIF_P, (ERTS_PROC_LOCK_MAIN | ERTS_PROC_LOCK_LINK | ERTS_PROC_LOCK_STATUS)); ASSERT(ERTS_PROC_IS_EXITING(BIF_P)); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); ERTS_BIF_EXITED(BIF_P); -#endif } BIF_RETTYPE hibernate_3(BIF_ALIST_3) @@ -1314,7 +1250,11 @@ BIF_RETTYPE hibernate_3(BIF_ALIST_3) */ Eterm reg[3]; - if (erts_hibernate(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, reg)) { + reg[0] = BIF_ARG_1; + reg[1] = BIF_ARG_2; + reg[2] = BIF_ARG_3; + + if (erts_hibernate(BIF_P, reg)) { /* * If hibernate succeeded, TRAP. The process will be wait in a * hibernated state if its state is inactive (!ERTS_PSFLG_ACTIVE); @@ -1635,7 +1575,7 @@ BIF_RETTYPE exit_2(BIF_ALIST_2) if (BIF_ARG_1 == BIF_P->common.id) { rp_locks = ERTS_PROC_LOCKS_ALL; rp = BIF_P; - erts_smp_proc_lock(rp, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(rp, ERTS_PROC_LOCKS_ALL_MINOR); } else { rp_locks = ERTS_PROC_LOCKS_XSIG_SEND; @@ -1657,12 +1597,10 @@ BIF_RETTYPE exit_2(BIF_ALIST_2) NIL, NULL, BIF_P == rp ? ERTS_XSIG_FLG_NO_IGN_NORMAL : 0); -#ifdef ERTS_SMP if (rp == BIF_P) rp_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); -#endif + erts_proc_unlock(rp, rp_locks); /* * We may have exited ourselves and may have to take action. */ @@ -1774,21 +1712,19 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2) * true. For more info, see implementation of * erts_send_exit_signal(). */ - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCKS_XSIG_SEND); + erts_proc_lock(BIF_P, ERTS_PROC_LOCKS_XSIG_SEND); if (trap_exit) - state = erts_smp_atomic32_read_bor_mb(&BIF_P->state, + state = erts_atomic32_read_bor_mb(&BIF_P->state, ERTS_PSFLG_TRAP_EXIT); else - state = erts_smp_atomic32_read_band_mb(&BIF_P->state, + state = erts_atomic32_read_band_mb(&BIF_P->state, ~ERTS_PSFLG_TRAP_EXIT); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCKS_XSIG_SEND); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCKS_XSIG_SEND); -#ifdef ERTS_SMP if (state & ERTS_PSFLG_PENDING_EXIT) { erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN); ERTS_BIF_EXITED(BIF_P); } -#endif old_value = (state & ERTS_PSFLG_TRAP_EXIT) ? am_true : am_false; BIF_RET(old_value); @@ -1806,15 +1742,13 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2) if (sched == 0) { new = NULL; - state = erts_smp_atomic32_read_band_mb(&BIF_P->state, + state = erts_atomic32_read_band_mb(&BIF_P->state, ~ERTS_PSFLG_BOUND); } else { new = erts_schedid2runq(sched); -#ifdef ERTS_SMP erts_atomic_set_nob(&BIF_P->run_queue, (erts_aint_t) new); -#endif - state = erts_smp_atomic32_read_bor_mb(&BIF_P->state, + state = erts_atomic32_read_bor_mb(&BIF_P->state, ERTS_PSFLG_BOUND); } @@ -1895,7 +1829,7 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2) } else { goto error; } - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR); old_value = (ERTS_TRACE_FLAGS(BIF_P) & F_SENSITIVE ? am_true : am_false); @@ -1904,7 +1838,7 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2) } else { ERTS_TRACE_FLAGS(BIF_P) &= ~F_SENSITIVE; } - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR); /* make sure to bump all reds so that we get rescheduled immediately so setting takes effect */ BIF_RET2(old_value, CONTEXT_REDS); @@ -1946,15 +1880,11 @@ BIF_RETTYPE process_flag_3(BIF_ALIST_3) Process *rp; Eterm res; -#ifdef ERTS_SMP rp = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN, BIF_ARG_1, ERTS_PROC_LOCK_MAIN); if (rp == ERTS_PROC_LOCK_BUSY) ERTS_BIF_YIELD3(bif_export[BIF_process_flag_3], BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3); -#else - rp = erts_proc_lookup(BIF_ARG_1); -#endif if (!rp) BIF_ERROR(BIF_P, BADARG); @@ -1962,7 +1892,7 @@ BIF_RETTYPE process_flag_3(BIF_ALIST_3) res = process_flag_aux(BIF_P, rp, BIF_ARG_2, BIF_ARG_3); if (rp != BIF_P) - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); return res; } @@ -2053,6 +1983,7 @@ static Sint remote_send(Process *p, DistEntry *dep, ASSERT(is_atom(to) || is_external_pid(to)); + ctx->dep = dep; code = erts_dsig_prepare(&ctx->dsd, dep, p, ERTS_DSP_NO_LOCK, !ctx->suspend); switch (code) { case ERTS_DSIG_PREP_NOT_ALIVE: @@ -2254,7 +2185,6 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext *ctx) if (dep == erts_this_dist_entry) { Eterm id; - erts_deref_dist_entry(dep); if (IS_TRACED_FL(p, F_TRACE_SEND)) trace_send(p, to, msg); if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) @@ -2277,11 +2207,9 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext *ctx) } ret = remote_send(p, dep, tp[1], to, msg, ctx); - if (ret != SEND_YIELD_CONTINUE) { - if (dep) { - erts_deref_dist_entry(dep); - } - } else { + if (ret == SEND_YIELD_CONTINUE) { + if (dep) + erts_ref_dist_entry(dep); ctx->dep_to_deref = dep; } return ret; @@ -2296,17 +2224,15 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext *ctx) send_message: { ErtsProcLocks rp_locks = 0; Sint res; -#ifdef ERTS_SMP if (p == rp) rp_locks |= ERTS_PROC_LOCK_MAIN; -#endif /* send to local process */ res = erts_send_message(p, rp, &rp_locks, msg, 0); if (erts_use_sender_punish) res *= 4; else res = 0; - erts_smp_proc_unlock(rp, + erts_proc_unlock(rp, p == rp ? (rp_locks & ~ERTS_PROC_LOCK_MAIN) : rp_locks); @@ -4010,14 +3936,14 @@ BIF_RETTYPE halt_2(BIF_ALIST_2) ERTS_BIF_YIELD2(bif_export[BIF_halt_2], BIF_P, am_undefined, am_undefined); } else { - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_exit(pos_int_code, ""); } } else if (ERTS_IS_ATOM_STR("abort", BIF_ARG_1)) { VERBOSE(DEBUG_SYSTEM, ("System halted by BIF halt(%T, %T)\n", BIF_ARG_1, BIF_ARG_2)); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_exit(ERTS_ABORT_EXIT, ""); } else if (is_list(BIF_ARG_1) || BIF_ARG_1 == NIL) { @@ -4033,7 +3959,7 @@ BIF_RETTYPE halt_2(BIF_ALIST_2) halt_msg[written] = '\0'; VERBOSE(DEBUG_SYSTEM, ("System halted by BIF halt(%T, %T)\n", BIF_ARG_1, BIF_ARG_2)); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_exit(ERTS_DUMP_EXIT, "%s\n", halt_msg); } else @@ -4219,7 +4145,6 @@ BIF_RETTYPE list_to_pid_1(BIF_ALIST_1) goto bad; if(dep == erts_this_dist_entry) { - erts_deref_dist_entry(dep); BIF_RET(make_internal_pid(make_pid_data(c, b))); } else { @@ -4239,13 +4164,10 @@ BIF_RETTYPE list_to_pid_1(BIF_ALIST_1) etp->data.ui[0] = make_pid_data(c, b); MSO(BIF_P).first = (struct erl_off_heap_header*) etp; - erts_deref_dist_entry(dep); BIF_RET(make_external_pid(etp)); } bad: - if (dep) - erts_deref_dist_entry(dep); if (buf) erts_free(ERTS_ALC_T_TMP, (void *) buf); BIF_ERROR(BIF_P, BADARG); @@ -4290,7 +4212,6 @@ BIF_RETTYPE list_to_port_1(BIF_ALIST_1) goto bad; if(dep == erts_this_dist_entry) { - erts_deref_dist_entry(dep); BIF_RET(make_internal_port(p)); } else { @@ -4310,13 +4231,10 @@ BIF_RETTYPE list_to_port_1(BIF_ALIST_1) etp->data.ui[0] = p; MSO(BIF_P).first = (struct erl_off_heap_header*) etp; - erts_deref_dist_entry(dep); BIF_RET(make_external_port(etp)); } bad: - if (dep) - erts_deref_dist_entry(dep); BIF_ERROR(BIF_P, BADARG); } @@ -4436,12 +4354,9 @@ BIF_RETTYPE list_to_ref_1(BIF_ALIST_1) res = make_external_ref(etp); } - erts_deref_dist_entry(dep); BIF_RET(res); bad: - if (dep) - erts_deref_dist_entry(dep); BIF_ERROR(BIF_P, BADARG); } @@ -4507,9 +4422,9 @@ BIF_RETTYPE group_leader_2(BIF_ALIST_2) new_member->group_leader = BIF_ARG_1; else { locks &= ~ERTS_PROC_LOCK_STATUS; - erts_smp_proc_unlock(new_member, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(new_member, ERTS_PROC_LOCK_STATUS); if (new_member == BIF_P - || !(erts_smp_atomic32_read_nob(&new_member->state) + || !(erts_atomic32_read_nob(&new_member->state) & ERTS_PSFLG_DIRTY_RUNNING)) { new_member->group_leader = STORE_NC_IN_PROC(new_member, BIF_ARG_1); @@ -4538,7 +4453,7 @@ BIF_RETTYPE group_leader_2(BIF_ALIST_2) if (new_member == BIF_P) locks &= ~ERTS_PROC_LOCK_MAIN; if (locks) - erts_smp_proc_unlock(new_member, locks); + erts_proc_unlock(new_member, locks); if (await_x) { /* Wait for new_member to terminate; then badarg */ @@ -4565,9 +4480,6 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) if (BIF_ARG_1 == am_multi_scheduling) { if (BIF_ARG_2 == am_block || BIF_ARG_2 == am_unblock || BIF_ARG_2 == am_block_normal || BIF_ARG_2 == am_unblock_normal) { -#ifndef ERTS_SMP - BIF_RET(am_disabled); -#else int block = (BIF_ARG_2 == am_block || BIF_ARG_2 == am_block_normal); int normal = (BIF_ARG_2 == am_block_normal @@ -4603,15 +4515,8 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR); break; } -#endif } } else if (BIF_ARG_1 == am_schedulers_online) { -#ifndef ERTS_SMP - if (BIF_ARG_2 != make_small(1)) - goto error; - else - BIF_RET(make_small(1)); -#else Sint old_no; if (!is_small(BIF_ARG_2)) goto error; @@ -4635,7 +4540,6 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR); break; } -#endif } else if (BIF_ARG_1 == am_fullsweep_after) { Uint16 nval; Uint oval; @@ -4643,7 +4547,7 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) goto error; } nval = (n > (Sint) ((Uint16) -1)) ? ((Uint16) -1) : ((Uint16) n); - oval = (Uint) erts_smp_atomic32_xchg_nob(&erts_max_gen_gcs, + oval = (Uint) erts_atomic32_xchg_nob(&erts_max_gen_gcs, (erts_aint32_t) nval); BIF_RET(make_small(oval)); } else if (BIF_ARG_1 == am_min_heap_size) { @@ -4653,13 +4557,13 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) goto error; } - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); H_MIN_SIZE = erts_next_heap_size(n, 0); - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(make_small(oval)); } else if (BIF_ARG_1 == am_min_bin_vheap_size) { @@ -4669,13 +4573,13 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) goto error; } - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); BIN_VH_MIN_SIZE = erts_next_heap_size(n, 0); - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(make_small(oval)); } else if (BIF_ARG_1 == am_max_heap_size) { @@ -4693,14 +4597,14 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) hp = HAlloc(BIF_P, sz); old_value = erts_max_heap_size_map(H_MAX_SIZE, H_MAX_FLAGS, &hp, NULL); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); H_MAX_SIZE = max_heap_size; H_MAX_FLAGS = max_heap_flags; - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(old_value); } else if (BIF_ARG_1 == am_display_items) { @@ -4754,8 +4658,8 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) } else if (BIF_ARG_1 == make_small(1)) { int i, max; ErtsMessage* mp; - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); max = erts_ptab_max(&erts_proc); for (i = 0; i < max; i++) { @@ -4768,7 +4672,7 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) #endif p->seq_trace_clock = 0; p->seq_trace_lastcnt = 0; - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p); + ERTS_MSGQ_MV_INQ2PRIVQ(p); mp = p->msg.first; while(mp != NULL) { #ifdef USE_VM_PROBES @@ -4781,14 +4685,14 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) } } - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(am_true); } else if (BIF_ARG_1 == am_scheduler_wall_time) { if (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false) { erts_aint32_t new = BIF_ARG_2 == am_true ? 1 : 0; - erts_aint32_t old = erts_smp_atomic32_xchg_nob(&sched_wall_time, + erts_aint32_t old = erts_atomic32_xchg_nob(&sched_wall_time, new); Eterm ref = erts_sched_wall_time_request(BIF_P, 1, new, 0, 0); ASSERT(is_value(ref)); @@ -4797,7 +4701,6 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) ref, old ? am_true : am_false); } -#if defined(ERTS_SMP) && defined(ERTS_DIRTY_SCHEDULERS) } else if (BIF_ARG_1 == am_dirty_cpu_schedulers_online) { Sint old_no; if (!is_small(BIF_ARG_2)) @@ -4823,13 +4726,12 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR); break; } -#endif } else if (BIF_ARG_1 == am_time_offset && ERTS_IS_ATOM_STR("finalize", BIF_ARG_2)) { ErtsTimeOffsetState res; - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); res = erts_finalize_time_offset(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); switch (res) { case ERTS_TIME_OFFSET_PRELIMINARY: { DECL_AM(preliminary); @@ -4851,7 +4753,7 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) Eterm threads; if (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false) { erts_aint32_t new = BIF_ARG_2 == am_true ? ERTS_MSACC_ENABLE : ERTS_MSACC_DISABLE; - erts_aint32_t old = erts_smp_atomic32_xchg_nob(&msacc, new); + erts_aint32_t old = erts_atomic32_xchg_nob(&msacc, new); Eterm ref = erts_msacc_request(BIF_P, new, &threads); if (is_non_value(ref)) BIF_RET(old ? am_true : am_false); @@ -4862,7 +4764,7 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) threads); } else if (BIF_ARG_2 == am_reset) { Eterm ref = erts_msacc_request(BIF_P, ERTS_MSACC_RESET, &threads); - erts_aint32_t old = erts_smp_atomic32_read_nob(&msacc); + erts_aint32_t old = erts_atomic32_read_nob(&msacc); ASSERT(is_value(ref)); BIF_TRAP3(await_msacc_mod_trap, BIF_P, @@ -4881,9 +4783,9 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) what = ERTS_SCHED_STAT_MODIFY_CLEAR; else goto error; - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_sched_stat_modify(what); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(am_true); } else if (ERTS_IS_ATOM_STR("internal_cpu_topology", BIF_ARG_1)) { Eterm res = erts_set_cpu_topology(BIF_P, BIF_ARG_2); @@ -5021,7 +4923,6 @@ static BIF_RETTYPE bif_return_trap(BIF_ALIST_2) Eterm res = BIF_ARG_1; switch (BIF_ARG_2) { -#ifdef ERTS_SMP case am_multi_scheduling: { int msb = erts_is_multi_scheduling_blocked(); if (msb > 0) @@ -5032,7 +4933,6 @@ static BIF_RETTYPE bif_return_trap(BIF_ALIST_2) ERTS_INTERNAL_ERROR("Unexpected multi scheduling block state"); break; } -#endif default: break; } @@ -5057,22 +4957,22 @@ static ERTS_INLINE int skip_current_msgq(Process *c_p) { int res; -#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP) +#if defined(ERTS_ENABLE_LOCK_CHECK) erts_proc_lc_chk_only_proc_main(c_p); #endif - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); if (ERTS_PROC_PENDING_EXIT(c_p)) { KILL_CATCHES(c_p); c_p->freason = EXC_EXIT; res = 0; } else { - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); + ERTS_MSGQ_MV_INQ2PRIVQ(c_p); c_p->msg.save = c_p->msg.last; res = 1; } - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); return res; } @@ -5131,7 +5031,7 @@ void erts_init_trap_export(Export* ep, Eterm m, Eterm f, Uint a, ep->info.mfa.module = m; ep->info.mfa.function = f; ep->info.mfa.arity = a; - ep->beam[0] = (BeamInstr) em_apply_bif; + ep->beam[0] = BeamOpCodeAddr(op_apply_bif); ep->beam[1] = (BeamInstr) bif; } @@ -5175,8 +5075,8 @@ void erts_init_bif(void) await_msacc_mod_trap = erts_export_put(am_erts_internal, am_await_microstate_accounting_modifications, 3); - erts_smp_atomic32_init_nob(&sched_wall_time, 0); - erts_smp_atomic32_init_nob(&msacc, ERTS_MSACC_IS_ENABLED()); + erts_atomic32_init_nob(&sched_wall_time, 0); + erts_atomic32_init_nob(&msacc, ERTS_MSACC_IS_ENABLED()); } /* @@ -5194,15 +5094,14 @@ schedule(Process *c_p, Process *dirty_shadow_proc, Eterm module, Eterm function, int argc, Eterm *argv) { - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p)); (void) erts_nif_export_schedule(c_p, dirty_shadow_proc, - mfa, pc, (BeamInstr) em_apply_bif, + mfa, pc, BeamOpCodeAddr(op_apply_bif), dfunc, ifunc, module, function, argc, argv); } -#ifdef ERTS_DIRTY_SCHEDULERS static BIF_RETTYPE dirty_bif_result(BIF_ALIST_1) { @@ -5245,9 +5144,7 @@ static BIF_RETTYPE dirty_bif_exception(BIF_ALIST_2) BIF_ERROR(BIF_P, freason); } -#endif /* ERTS_DIRTY_SCHEDULERS */ -extern BeamInstr* em_call_bif_e; static BIF_RETTYPE call_bif(Process *c_p, Eterm *reg, BeamInstr *I); BIF_RETTYPE @@ -5263,15 +5160,13 @@ erts_schedule_bif(Process *proc, Process *c_p, *dirty_shadow_proc; ErtsCodeMFA *mfa; -#ifdef ERTS_DIRTY_SCHEDULERS if (proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) { dirty_shadow_proc = proc; c_p = proc->next; ASSERT(c_p->common.id == dirty_shadow_proc->common.id); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } else -#endif { dirty_shadow_proc = NULL; c_p = proc; @@ -5287,7 +5182,6 @@ erts_schedule_bif(Process *proc, * ibif - indirect bif */ -#ifdef ERTS_DIRTY_SCHEDULERS erts_aint32_t set, mask; mask = (ERTS_PSFLG_DIRTY_CPU_PROC | ERTS_PSFLG_DIRTY_IO_PROC); @@ -5310,11 +5204,7 @@ erts_schedule_bif(Process *proc, break; } - (void) erts_smp_atomic32_read_bset_nob(&c_p->state, mask, set); -#else - dbif = call_bif; - ibif = bif; -#endif + (void) erts_atomic32_read_bset_nob(&c_p->state, mask, set); if (i == NULL) { ERTS_INTERNAL_ERROR("Missing instruction pointer"); @@ -5327,13 +5217,13 @@ erts_schedule_bif(Process *proc, mfa = &exp->info.mfa; } #endif - else if (em_call_bif_e == (BeamInstr *) *i) { + else if (BeamIsOpCode(*i, op_call_bif_e)) { /* Pointer to bif export in i+1 */ exp = (Export *) i[1]; pc = i; mfa = &exp->info.mfa; } - else if (em_apply_bif == (BeamInstr *) *i) { + else if (BeamIsOpCode(*i, op_apply_bif)) { /* Pointer to bif in i+1, and mfa in i-3 */ pc = c_p->cp; mfa = erts_code_to_codemfa(i); @@ -5355,7 +5245,7 @@ erts_schedule_bif(Process *proc, } if (dirty_shadow_proc) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); return THE_NON_VALUE; } @@ -5390,7 +5280,6 @@ call_bif(Process *c_p, Eterm *reg, BeamInstr *I) return ret; } -#ifdef ERTS_DIRTY_SCHEDULERS int erts_call_dirty_bif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm *reg) @@ -5405,7 +5294,7 @@ erts_call_dirty_bif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm * erts_aint32_t state; ASSERT(!c_p->scheduler_data); - state = erts_smp_atomic32_read_nob(&c_p->state); + state = erts_atomic32_read_nob(&c_p->state); ASSERT((state & ERTS_PSFLG_DIRTY_RUNNING) && !(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))); ASSERT(esdp); @@ -5419,7 +5308,7 @@ erts_call_dirty_bif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm * bf = (ErtsBifFunc) I[1]; - erts_smp_atomic32_read_band_mb(&c_p->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC + erts_atomic32_read_band_mb(&c_p->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC | ERTS_PSFLG_DIRTY_IO_PROC)); dirty_shadow_proc = erts_make_dirty_shadow_proc(esdp, c_p); @@ -5434,11 +5323,11 @@ erts_call_dirty_bif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm * c_p_htop = c_p->htop; #endif - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); result = (*bf)(dirty_shadow_proc, reg, I); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); ASSERT(c_p_htop == c_p->htop); ASSERT(dirty_shadow_proc->static_flags & ERTS_STC_FLG_SHADOW_PROC); @@ -5461,7 +5350,7 @@ erts_call_dirty_bif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm * } else if (nep->func == ERTS_SCHED_BIF_TRAP_MARKER) { /* Dirty BIF did an ordinary trap... */ - ASSERT(!(erts_smp_atomic32_read_nob(&c_p->state) + ASSERT(!(erts_atomic32_read_nob(&c_p->state) & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC))); schedule(c_p, dirty_shadow_proc, NULL, NULL, dirty_bif_trap, (void *) dirty_shadow_proc->i, @@ -5484,7 +5373,6 @@ erts_call_dirty_bif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm * return exiting; } -#endif /* ERTS_DIRTY_SCHEDULERS */ #ifdef HARDDEBUG diff --git a/erts/emulator/beam/bif.h b/erts/emulator/beam/bif.h index 01cca90a7a..a2bc883dbe 100644 --- a/erts/emulator/beam/bif.h +++ b/erts/emulator/beam/bif.h @@ -93,7 +93,7 @@ do { \ #define BUMP_REDS(p, gc) do { \ ASSERT(p); \ - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));\ + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));\ (p)->fcalls -= (gc); \ if ((p)->fcalls < 0) { \ if (!ERTS_PROC_GET_SAVED_CALLS_BUF((p))) \ @@ -498,10 +498,8 @@ erts_bif_prep_await_proc_exit_apply_trap(Process *c_p, Eterm args[], int nargs); -#ifdef ERTS_DIRTY_SCHEDULERS int erts_call_dirty_bif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm *reg); -#endif BIF_RETTYPE erts_schedule_bif(Process *proc, diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab index 10ca0b5066..f7b4451890 100644 --- a/erts/emulator/beam/bif.tab +++ b/erts/emulator/beam/bif.tab @@ -154,6 +154,12 @@ bif erlang:spawn_opt/1 bif erlang:setnode/2 bif erlang:setnode/3 bif erlang:dist_exit/3 +bif erlang:dist_get_stat/1 +bif erlang:dist_ctrl_input_handler/2 +bif erlang:dist_ctrl_put_data/2 +bif erlang:dist_ctrl_get_data/1 +bif erlang:dist_ctrl_get_data_notification/1 + # Static native functions in erts_internal bif erts_internal:port_info/1 diff --git a/erts/emulator/beam/bif_instrs.tab b/erts/emulator/beam/bif_instrs.tab new file mode 100644 index 0000000000..0932b8b985 --- /dev/null +++ b/erts/emulator/beam/bif_instrs.tab @@ -0,0 +1,539 @@ +// -*- c -*- +// +// %CopyrightBegin% +// +// Copyright Ericsson AB 2017. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// %CopyrightEnd% +// + +// ================================================================ +// All guards with zero arguments have special instructions, +// for example: +// +// self/0 +// node/0 +// +// All other guard BIFs take one or two arguments. +// ================================================================ + +CALL_GUARD_BIF(BF, TmpReg, Dst) { + Eterm result; + + ERTS_DBG_CHK_REDS(c_p, FCALLS); + c_p->fcalls = FCALLS; + PROCESS_MAIN_CHK_LOCKS(c_p); + ASSERT(!ERTS_PROC_IS_EXITING(c_p)); + ERTS_CHK_MBUF_SZ(c_p); + result = (*$BF)(c_p, $TmpReg, I); + ERTS_CHK_MBUF_SZ(c_p); + ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result)); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + PROCESS_MAIN_CHK_LOCKS(c_p); + ERTS_HOLE_CHECK(c_p); + FCALLS = c_p->fcalls; + ERTS_DBG_CHK_REDS(c_p, FCALLS); + if (ERTS_LIKELY(is_value(result))) { + $Dst = result; + $NEXT0(); + } +} + +// Guard BIF in head. On failure, ignore the error and jump +// to the code for the next clause. We don't support tracing +// of guard BIFs. + +bif1(Fail, Bif, Src, Dst) { + ErtsBifFunc bf; + Eterm tmp_reg[1]; + + tmp_reg[0] = $Src; + bf = (BifFunction) $Bif; + $CALL_GUARD_BIF(bf, tmp_reg, $Dst); + + $FAIL($Fail); +} + +// +// Guard BIF in body. It can fail like any BIF. No trace support. +// + +bif1_body(Bif, Src, Dst) { + ErtsBifFunc bf; + Eterm tmp_reg[1]; + + tmp_reg[0] = $Src; + bf = (BifFunction) $Bif; + $CALL_GUARD_BIF(bf, tmp_reg, $Dst); + + reg[0] = tmp_reg[0]; + SWAPOUT; + I = handle_error(c_p, I, reg, ubif2mfa((void *) bf)); + goto post_error_handling; +} + +// +// Guard bif in guard with two arguments ('and'/2, 'or'/2, 'xor'/2). +// + +i_bif2(Fail, Bif, Src1, Src2, Dst) { + Eterm tmp_reg[2]; + ErtsBifFunc bf; + + tmp_reg[0] = $Src1; + tmp_reg[1] = $Src2; + bf = (ErtsBifFunc) $Bif; + $CALL_GUARD_BIF(bf, tmp_reg, $Dst); + $FAIL($Fail); +} + +// +// Guard bif in body with two arguments ('and'/2, 'or'/2, 'xor'/2). +// + +i_bif2_body(Bif, Src1, Src2, Dst) { + Eterm tmp_reg[2]; + ErtsBifFunc bf; + + tmp_reg[0] = $Src1; + tmp_reg[1] = $Src2; + bf = (ErtsBifFunc) $Bif; + $CALL_GUARD_BIF(bf, tmp_reg, $Dst); + reg[0] = tmp_reg[0]; + reg[1] = tmp_reg[1]; + SWAPOUT; + I = handle_error(c_p, I, reg, ubif2mfa((void *) bf)); + goto post_error_handling; +} + +// +// Garbage-collecting BIF with one argument in either guard or body. +// + +i_gc_bif1(Fail, Bif, Src, Live, Dst) { + typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint); + GcBifFunction bf; + Eterm result; + Uint live = (Uint) $Live; + + x(live) = $Src; + bf = (GcBifFunction) $Bif; + ERTS_DBG_CHK_REDS(c_p, FCALLS); + c_p->fcalls = FCALLS; + SWAPOUT; + PROCESS_MAIN_CHK_LOCKS(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_CHK_MBUF_SZ(c_p); + result = (*bf)(c_p, reg, live); + ERTS_CHK_MBUF_SZ(c_p); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); + PROCESS_MAIN_CHK_LOCKS(c_p); + SWAPIN; + ERTS_HOLE_CHECK(c_p); + FCALLS = c_p->fcalls; + ERTS_DBG_CHK_REDS(c_p, FCALLS); + if (ERTS_LIKELY(is_value(result))) { + $REFRESH_GEN_DEST(); + $Dst = result; + $NEXT0(); + } + if (ERTS_LIKELY($Fail != 0)) { /* Handle error in guard. */ + $JUMP($Fail); + } + + /* Handle error in body. */ + x(0) = x(live); + I = handle_error(c_p, I, reg, gcbif2mfa((void *) bf)); + goto post_error_handling; +} + +// +// Garbage-collecting BIF with two arguments in either guard or body. +// + +i_gc_bif2(Fail, Bif, Live, Src1, Src2, Dst) { + typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint); + GcBifFunction bf; + Eterm result; + Uint live = (Uint) $Live; + + /* + * XXX This calling convention does not make sense. 'live' + * should point out the first argument, not the second + * (i.e. 'live' should not be incremented below). + */ + x(live) = $Src1; + x(live+1) = $Src2; + live++; + + bf = (GcBifFunction) $Bif; + ERTS_DBG_CHK_REDS(c_p, FCALLS); + c_p->fcalls = FCALLS; + SWAPOUT; + PROCESS_MAIN_CHK_LOCKS(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_CHK_MBUF_SZ(c_p); + result = (*bf)(c_p, reg, live); + ERTS_CHK_MBUF_SZ(c_p); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); + PROCESS_MAIN_CHK_LOCKS(c_p); + SWAPIN; + ERTS_HOLE_CHECK(c_p); + FCALLS = c_p->fcalls; + ERTS_DBG_CHK_REDS(c_p, FCALLS); + if (ERTS_LIKELY(is_value(result))) { + $REFRESH_GEN_DEST(); + $Dst = result; + $NEXT0(); + } + + if (ERTS_LIKELY($Fail != 0)) { /* Handle error in guard. */ + $JUMP($Fail); + } + + /* Handle error in body. */ + live--; + x(0) = x(live); + x(1) = x(live+1); + I = handle_error(c_p, I, reg, gcbif2mfa((void *) bf)); + goto post_error_handling; +} + +// +// Garbage-collecting BIF with three arguments in either guard or body. +// + +i_gc_bif3(Fail, Bif, Live, Src2, Src3, Dst) { + typedef Eterm (*GcBifFunction)(Process*, Eterm*, Uint); + GcBifFunction bf; + Eterm result; + Uint live = (Uint) $Live; + + /* + * XXX This calling convention does not make sense. 'live' + * should point out the first argument, not the third + * (i.e. 'live' should not be incremented below). + */ + x(live) = x(SCRATCH_X_REG); + x(live+1) = $Src2; + x(live+2) = $Src3; + live += 2; + + bf = (GcBifFunction) $Bif; + ERTS_DBG_CHK_REDS(c_p, FCALLS); + c_p->fcalls = FCALLS; + SWAPOUT; + PROCESS_MAIN_CHK_LOCKS(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_CHK_MBUF_SZ(c_p); + result = (*bf)(c_p, reg, live); + ERTS_CHK_MBUF_SZ(c_p); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); + PROCESS_MAIN_CHK_LOCKS(c_p); + SWAPIN; + ERTS_HOLE_CHECK(c_p); + FCALLS = c_p->fcalls; + ERTS_DBG_CHK_REDS(c_p, FCALLS); + if (ERTS_LIKELY(is_value(result))) { + $REFRESH_GEN_DEST(); + $Dst = result; + $NEXT0(); + } + + /* Handle error in guard. */ + if (ERTS_LIKELY($Fail != 0)) { + $JUMP($Fail); + } + + /* Handle error in body. */ + live -= 2; + x(0) = x(live); + x(1) = x(live+1); + x(2) = x(live+2); + I = handle_error(c_p, I, reg, gcbif2mfa((void *) bf)); + goto post_error_handling; +} + +// +// The most general BIF call. The BIF may build any amount of data +// on the heap. The result is always returned in r(0). +// +call_bif(Exp) { + ErtsBifFunc bf; + Eterm result; + ErlHeapFragment *live_hf_end; + Export *export = (Export*) $Exp; + + if (!((FCALLS - 1) > 0 || (FCALLS-1) > neg_o_reds)) { + /* If we have run out of reductions, we do a context + switch before calling the bif */ + c_p->arity = GET_BIF_ARITY(export); + c_p->current = &export->info.mfa; + goto context_switch3; + } + + ERTS_MSACC_SET_BIF_STATE_CACHED_X(GET_BIF_MODULE(export), + GET_BIF_ADDRESS(export)); + + bf = GET_BIF_ADDRESS(export); + + PRE_BIF_SWAPOUT(c_p); + ERTS_DBG_CHK_REDS(c_p, FCALLS); + c_p->fcalls = FCALLS - 1; + if (FCALLS <= 0) { + save_calls(c_p, export); + } + ASSERT(!ERTS_PROC_IS_EXITING(c_p)); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + live_hf_end = c_p->mbuf; + ERTS_CHK_MBUF_SZ(c_p); + result = (*bf)(c_p, reg, I); + ERTS_CHK_MBUF_SZ(c_p); + ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result)); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + ERTS_HOLE_CHECK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); + if (ERTS_IS_GC_DESIRED(c_p)) { + Uint arity = GET_BIF_ARITY(export); + result = erts_gc_after_bif_call_lhf(c_p, live_hf_end, result, + reg, arity); + E = c_p->stop; + } + PROCESS_MAIN_CHK_LOCKS(c_p); + HTOP = HEAP_TOP(c_p); + FCALLS = c_p->fcalls; + ERTS_DBG_CHK_REDS(c_p, FCALLS); + /* We have to update the cache if we are enabled in order + to make sure no book keeping is done after we disabled + msacc. We don't always do this as it is quite expensive. */ + if (ERTS_MSACC_IS_ENABLED_CACHED_X()) { + ERTS_MSACC_UPDATE_CACHE_X(); + } + ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR); + if (ERTS_LIKELY(is_value(result))) { + r(0) = result; + CHECK_TERM(r(0)); + $NEXT0(); + } else if (c_p->freason == TRAP) { + SET_CP(c_p, I+2); + SET_I(c_p->i); + SWAPIN; + Dispatch(); + } + + /* + * Error handling. SWAPOUT is not needed because it was done above. + */ + ASSERT(c_p->stop == E); + I = handle_error(c_p, I, reg, &export->info.mfa); + goto post_error_handling; +} + +// +// Send is almost a standard call-BIF with two arguments, except for: +// 1. It cannot be traced. +// 2. There is no pointer to the send_2 function stored in +// the instruction. +// + +send() { + Eterm result; + + if (!(FCALLS > 0 || FCALLS > neg_o_reds)) { + /* If we have run out of reductions, we do a context + switch before calling the bif */ + c_p->arity = 2; + c_p->current = NULL; + goto context_switch3; + } + + PRE_BIF_SWAPOUT(c_p); + c_p->fcalls = FCALLS - 1; + result = erl_send(c_p, r(0), x(1)); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); + PROCESS_MAIN_CHK_LOCKS(c_p); + HTOP = HEAP_TOP(c_p); + FCALLS = c_p->fcalls; + if (ERTS_LIKELY(is_value(result))) { + r(0) = result; + CHECK_TERM(r(0)); + } else if (c_p->freason == TRAP) { + SET_CP(c_p, I+1); + SET_I(c_p->i); + SWAPIN; + Dispatch(); + } else { + goto find_func_info; + } +} + +call_nif := nif_bif.call_nif.epilogue; +apply_bif := nif_bif.apply_bif.epilogue; + +nif_bif.head() { + Eterm nif_bif_result; + Eterm bif_nif_arity; + BifFunction vbf; + ErlHeapFragment *live_hf_end; + ErtsCodeMFA *codemfa; +} + +nif_bif.call_nif() { + /* + * call_nif is always first instruction in function: + * + * I[-3]: Module + * I[-2]: Function + * I[-1]: Arity + * I[0]: &&call_nif + * I[1]: Function pointer to NIF function + * I[2]: Pointer to erl_module_nif + * I[3]: Function pointer to dirty NIF + * + * This layout is determined by the NifExport struct + */ + + ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_NIF); + + codemfa = erts_code_to_codemfa(I); + + c_p->current = codemfa; /* current and vbf set to please handle_error */ + + DTRACE_NIF_ENTRY(c_p, codemfa); + + HEAVY_SWAPOUT; + + PROCESS_MAIN_CHK_LOCKS(c_p); + bif_nif_arity = codemfa->arity; + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); + + ASSERT(!ERTS_PROC_IS_EXITING(c_p)); + { + typedef Eterm NifF(struct enif_environment_t*, int argc, Eterm argv[]); + NifF* fp = vbf = (NifF*) I[1]; + struct enif_environment_t env; + ASSERT(c_p->scheduler_data); + live_hf_end = c_p->mbuf; + ERTS_CHK_MBUF_SZ(c_p); + erts_pre_nif(&env, c_p, (struct erl_module_nif*)I[2], NULL); + nif_bif_result = (*fp)(&env, bif_nif_arity, reg); + if (env.exception_thrown) + nif_bif_result = THE_NON_VALUE; + erts_post_nif(&env); + ERTS_CHK_MBUF_SZ(c_p); + + PROCESS_MAIN_CHK_LOCKS(c_p); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR); + ASSERT(!env.exiting); + ASSERT(!ERTS_PROC_IS_EXITING(c_p)); + } + + DTRACE_NIF_RETURN(c_p, codemfa); +} + +nif_bif.apply_bif() { + /* + * At this point, I points to the code[0] in the export entry for + * the BIF: + * + * code[-3]: Module + * code[-2]: Function + * code[-1]: Arity + * code[0]: &&apply_bif + * code[1]: Function pointer to BIF function + */ + + if (!((FCALLS - 1) > 0 || (FCALLS - 1) > neg_o_reds)) { + /* If we have run out of reductions, we do a context + switch before calling the bif */ + goto context_switch; + } + + codemfa = erts_code_to_codemfa(I); + + ERTS_MSACC_SET_BIF_STATE_CACHED_X(codemfa->module, (BifFunction)Arg(0)); + + + /* In case we apply process_info/1,2 or load_nif/1 */ + c_p->current = codemfa; + $SET_CP_I_ABS(I); /* In case we apply check_process_code/2. */ + c_p->arity = 0; /* To allow garbage collection on ourselves + * (check_process_code/2). + */ + DTRACE_BIF_ENTRY(c_p, codemfa); + + SWAPOUT; + ERTS_DBG_CHK_REDS(c_p, FCALLS - 1); + c_p->fcalls = FCALLS - 1; + vbf = (BifFunction) Arg(0); + PROCESS_MAIN_CHK_LOCKS(c_p); + bif_nif_arity = codemfa->arity; + ASSERT(bif_nif_arity <= 4); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + { + ErtsBifFunc bf = vbf; + ASSERT(!ERTS_PROC_IS_EXITING(c_p)); + live_hf_end = c_p->mbuf; + ERTS_CHK_MBUF_SZ(c_p); + nif_bif_result = (*bf)(c_p, reg, I); + ERTS_CHK_MBUF_SZ(c_p); + ASSERT(!ERTS_PROC_IS_EXITING(c_p) || + is_non_value(nif_bif_result)); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + PROCESS_MAIN_CHK_LOCKS(c_p); + } + /* We have to update the cache if we are enabled in order + to make sure no book keeping is done after we disabled + msacc. We don't always do this as it is quite expensive. */ + if (ERTS_MSACC_IS_ENABLED_CACHED_X()) + ERTS_MSACC_UPDATE_CACHE_X(); + ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR); + DTRACE_BIF_RETURN(c_p, codemfa); +} + +nif_bif.epilogue() { + ERTS_REQ_PROC_MAIN_LOCK(c_p); + ERTS_HOLE_CHECK(c_p); + if (ERTS_IS_GC_DESIRED(c_p)) { + nif_bif_result = erts_gc_after_bif_call_lhf(c_p, live_hf_end, + nif_bif_result, + reg, bif_nif_arity); + } + SWAPIN; /* There might have been a garbage collection. */ + FCALLS = c_p->fcalls; + ERTS_DBG_CHK_REDS(c_p, FCALLS); + if (ERTS_LIKELY(is_value(nif_bif_result))) { + r(0) = nif_bif_result; + CHECK_TERM(r(0)); + SET_I(c_p->cp); + c_p->cp = 0; + Goto(*I); + } else if (c_p->freason == TRAP) { + SET_I(c_p->i); + if (c_p->flags & F_HIBERNATE_SCHED) { + c_p->flags &= ~F_HIBERNATE_SCHED; + goto do_schedule; + } + Dispatch(); + } + I = handle_error(c_p, c_p->cp, reg, c_p->current); + goto post_error_handling; +} diff --git a/erts/emulator/beam/big.h b/erts/emulator/beam/big.h index 48efce20e7..7556205063 100644 --- a/erts/emulator/beam/big.h +++ b/erts/emulator/beam/big.h @@ -70,7 +70,20 @@ typedef Uint dsize_t; /* Vector size type */ /* Check for small */ #define IS_USMALL(sgn,x) ((sgn) ? ((x) <= MAX_SMALL+1) : ((x) <= MAX_SMALL)) -#define IS_SSMALL(x) (((x) >= MIN_SMALL) && ((x) <= MAX_SMALL)) + +/* + * It seems that both clang and gcc will generate sub-optimal code + * for the more obvious way to write the range check: + * + * #define IS_SSMALL(x) (((x) >= MIN_SMALL) && ((x) <= MAX_SMALL)) + * + * Note that IS_SSMALL() may be used in the 32-bit emulator with + * a Uint64 argument. Therefore, we must test the size of the argument + * to ensure that the cast does not discard the high-order 32 bits. + */ +#define _IS_SSMALL32(x) (((Uint32) ((((x)) >> (SMALL_BITS-1)) + 1)) < 2) +#define _IS_SSMALL64(x) (((Uint64) ((((x)) >> (SMALL_BITS-1)) + 1)) < 2) +#define IS_SSMALL(x) (sizeof(x) == sizeof(Uint32) ? _IS_SSMALL32(x) : _IS_SSMALL64(x)) /* The heap size needed for a bignum */ #define BIG_NEED_SIZE(x) ((x) + 1) diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c index 76a0c5c716..35b2365655 100644 --- a/erts/emulator/beam/break.c +++ b/erts/emulator/beam/break.c @@ -109,8 +109,8 @@ process_killer(void) ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND; erts_aint32_t state; erts_proc_inc_refc(rp); - erts_smp_proc_lock(rp, rp_locks); - state = erts_smp_atomic32_read_acqb(&rp->state); + erts_proc_lock(rp, rp_locks); + state = erts_atomic32_read_acqb(&rp->state); if (state & (ERTS_PSFLG_FREE | ERTS_PSFLG_EXITING | ERTS_PSFLG_ACTIVE @@ -132,7 +132,7 @@ process_killer(void) NULL, 0); } - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); erts_proc_dec_refc(rp); } case 'n': br = 1; break; @@ -219,7 +219,7 @@ print_process_info(fmtfn_t to, void *to_arg, Process *p) /* Display the state */ erts_print(to, to_arg, "State: "); - state = erts_smp_atomic32_read_acqb(&p->state); + state = erts_atomic32_read_acqb(&p->state); erts_dump_process_state(to, to_arg, state); if (state & ERTS_PSFLG_GC) { garbing = 1; @@ -258,7 +258,7 @@ print_process_info(fmtfn_t to, void *to_arg, Process *p) erts_print(to, to_arg, "Spawned by: %T\n", p->parent); approx_started = (time_t) p->approx_started; erts_print(to, to_arg, "Started: %s", ctime(&approx_started)); - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p); + ERTS_MSGQ_MV_INQ2PRIVQ(p); erts_print(to, to_arg, "Message queue length: %d\n", p->msg.len); /* display the message queue only if there is anything in it */ @@ -344,9 +344,7 @@ print_process_info(fmtfn_t to, void *to_arg, Process *p) erts_program_counter_info(to, to_arg, p); } else { erts_print(to, to_arg, "Stack dump:\n"); -#ifdef ERTS_SMP if (!garbing) -#endif erts_stack_dump(to, to_arg, p); } @@ -358,11 +356,9 @@ print_process_info(fmtfn_t to, void *to_arg, Process *p) static void print_garb_info(fmtfn_t to, void *to_arg, Process* p) { -#ifdef ERTS_SMP /* ERTS_SMP: A scheduler is probably concurrently doing gc... */ if (!ERTS_IS_CRASH_DUMPING) return; -#endif erts_print(to, to_arg, "New heap start: %bpX\n", p->heap); erts_print(to, to_arg, "New heap top: %bpX\n", p->htop); erts_print(to, to_arg, "Stack top: %bpX\n", p->stop); @@ -512,7 +508,7 @@ do_break(void) erts_free_read_env(mode); #endif /* __WIN32__ */ - ASSERT(erts_smp_thr_progress_is_blocking()); + ASSERT(erts_thr_progress_is_blocking()); erts_printf("\n" "BREAK: (a)bort (c)ontinue (p)roc info (i)nfo (l)oaded\n" @@ -698,9 +694,7 @@ static int crash_dump_limited_writer(void* vfdp, char* buf, size_t len) void erl_crash_dump_v(char *file, int line, char* fmt, va_list args) { -#ifdef ERTS_SMP ErtsThrPrgrData tpd_buf; /* in case we aren't a managed thread... */ -#endif int fd; size_t envsz; time_t now; @@ -717,7 +711,6 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args) if (ERTS_SOMEONE_IS_CRASH_DUMPING) return; -#ifdef ERTS_SMP /* Order all managed threads to block, this has to be done first to guarantee that this is the only thread to generate crash dump. */ @@ -741,12 +734,9 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args) #endif /* Allow us to pass certain places without locking... */ - erts_smp_atomic32_set_mb(&erts_writing_erl_crash_dump, 1); - erts_smp_tsd_set(erts_is_crash_dumping_key, (void *) 1); + erts_atomic32_set_mb(&erts_writing_erl_crash_dump, 1); + erts_tsd_set(erts_is_crash_dumping_key, (void *) 1); -#else /* !ERTS_SMP */ - erts_writing_erl_crash_dump = 1; -#endif /* ERTS_SMP */ envsz = sizeof(env); /* ERL_CRASH_DUMP_SECONDS not set @@ -841,7 +831,6 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args) erts_print_nif_taints(to, to_arg); erts_cbprintf(to, to_arg, "Atoms: %d\n", atom_table_size()); -#ifdef USE_THREADS /* We want to note which thread it was that called erts_exit */ if (erts_get_scheduler_data()) { erts_cbprintf(to, to_arg, "Calling Thread: scheduler:%d\n", @@ -852,9 +841,6 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args) else erts_cbprintf(to, to_arg, "Calling Thread: %p\n", erts_thr_self()); } -#else - erts_cbprintf(to, to_arg, "Calling Thread: scheduler:1\n"); -#endif #if defined(ERTS_HAVE_TRY_CATCH) @@ -873,7 +859,6 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args) } #endif -#ifdef ERTS_SMP #ifdef ERTS_SYS_SUSPEND_SIGNAL @@ -895,7 +880,6 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args) */ erts_thr_progress_fatal_error_wait(60000); /* Either worked or not... */ -#endif #ifndef ERTS_HAVE_TRY_CATCH /* This is safe to call here, as all schedulers are blocked */ diff --git a/erts/emulator/beam/bs_instrs.tab b/erts/emulator/beam/bs_instrs.tab new file mode 100644 index 0000000000..9f03b19731 --- /dev/null +++ b/erts/emulator/beam/bs_instrs.tab @@ -0,0 +1,1021 @@ +// -*- c -*- +// +// %CopyrightBegin% +// +// Copyright Ericsson AB 2017. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// %CopyrightEnd% +// + +%if ARCH_64 +BS_SAFE_MUL(A, B, Fail, Dst) { + Uint64 res = ($A) * ($B); + if (res / $B != $A) { + $Fail; + } + $Dst = res; +} +%else +BS_SAFE_MUL(A, B, Fail, Dst) { + Uint64 res = (Uint64)($A) * (Uint64)($B); + if ((res >> (8*sizeof(Uint))) != 0) { + $Fail; + } + $Dst = res; +} +%endif + +BS_GET_FIELD_SIZE(Bits, Unit, Fail, Dst) { + Sint signed_size; + Uint uint_size; + Uint temp_bits; + + if (is_small($Bits)) { + signed_size = signed_val($Bits); + if (signed_size < 0) { + $Fail; + } + uint_size = (Uint) signed_size; + } else { + if (!term_to_Uint($Bits, &temp_bits)) { + $Fail; + } + uint_size = temp_bits; + } + $BS_SAFE_MUL(uint_size, $Unit, $Fail, $Dst); +} + +BS_GET_UNCHECKED_FIELD_SIZE(Bits, Unit, Fail, Dst) { + Sint signed_size; + Uint uint_size; + Uint temp_bits; + + if (is_small($Bits)) { + signed_size = signed_val($Bits); + if (signed_size < 0) { + $Fail; + } + uint_size = (Uint) signed_size; + } else { + if (!term_to_Uint($Bits, &temp_bits)) { + $Fail; + } + uint_size = temp_bits; + } + $Dst = uint_size * $Unit; +} + +TEST_BIN_VHEAP(VNh, Nh, Live) { + Uint need = $Nh; + if (E - HTOP < need || MSO(c_p).overhead + $VNh >= BIN_VHEAP_SZ(c_p)) { + SWAPOUT; + PROCESS_MAIN_CHK_LOCKS(c_p); + FCALLS -= erts_garbage_collect_nobump(c_p, need, reg, $Live, FCALLS); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + PROCESS_MAIN_CHK_LOCKS(c_p); + SWAPIN; + } + HEAP_SPACE_VERIFIED(need); +} + +i_bs_get_binary_all2(Fail, Ms, Live, Unit, Dst) { + ErlBinMatchBuffer *_mb; + Eterm _result; + + $GC_TEST(0, ERL_SUB_BIN_SIZE, $Live); + _mb = ms_matchbuffer($Ms); + if (((_mb->size - _mb->offset) % $Unit) == 0) { + LIGHT_SWAPOUT; + _result = erts_bs_get_binary_all_2(c_p, _mb); + LIGHT_SWAPIN; + HEAP_SPACE_VERIFIED(0); + ASSERT(is_value(_result)); + $Dst = _result; + } else { + HEAP_SPACE_VERIFIED(0); + $FAIL($Fail); + } +} + +i_bs_get_binary2(Fail, Ms, Live, Sz, Flags, Dst) { + ErlBinMatchBuffer *_mb; + Eterm _result; + Uint _size; + $BS_GET_FIELD_SIZE($Sz, (($Flags) >> 3), $FAIL($Fail), _size); + $GC_TEST(0, ERL_SUB_BIN_SIZE, $Live); + _mb = ms_matchbuffer($Ms); + LIGHT_SWAPOUT; + _result = erts_bs_get_binary_2(c_p, _size, $Flags, _mb); + LIGHT_SWAPIN; + HEAP_SPACE_VERIFIED(0); + if (is_non_value(_result)) { + $FAIL($Fail); + } else { + $Dst = _result; + } +} + +i_bs_get_binary_imm2(Fail, Ms, Live, Sz, Flags, Dst) { + ErlBinMatchBuffer *_mb; + Eterm _result; + $GC_TEST(0, heap_bin_size(ERL_ONHEAP_BIN_LIMIT), $Live); + _mb = ms_matchbuffer($Ms); + LIGHT_SWAPOUT; + _result = erts_bs_get_binary_2(c_p, $Sz, $Flags, _mb); + LIGHT_SWAPIN; + HEAP_SPACE_VERIFIED(0); + if (is_non_value(_result)) { + $FAIL($Fail); + } else { + $Dst = _result; + } +} + +i_bs_get_float2(Fail, Ms, Live, Sz, Flags, Dst) { + ErlBinMatchBuffer *_mb; + Eterm _result; + Sint _size; + + if (!is_small($Sz) || (_size = unsigned_val($Sz)) > 64) { + $FAIL($Fail); + } + _size *= (($Flags) >> 3); + $GC_TEST(0, FLOAT_SIZE_OBJECT, $Live); + _mb = ms_matchbuffer($Ms); + LIGHT_SWAPOUT; + _result = erts_bs_get_float_2(c_p, _size, ($Flags), _mb); + LIGHT_SWAPIN; + HEAP_SPACE_VERIFIED(0); + if (is_non_value(_result)) { + $FAIL($Fail); + } else { + $Dst = _result; + } +} + +i_bs_skip_bits2(Fail, Ms, Bits, Unit) { + ErlBinMatchBuffer *_mb; + size_t new_offset; + Uint _size; + + _mb = ms_matchbuffer($Ms); + $BS_GET_FIELD_SIZE($Bits, $Unit, $FAIL($Fail), _size); + new_offset = _mb->offset + _size; + if (new_offset <= _mb->size) { + _mb->offset = new_offset; + } else { + $FAIL($Fail); + } +} + +i_bs_skip_bits_all2(Fail, Ms, Unit) { + ErlBinMatchBuffer *_mb; + _mb = ms_matchbuffer($Ms); + if (((_mb->size - _mb->offset) % $Unit) == 0) { + _mb->offset = _mb->size; + } else { + $FAIL($Fail); + } +} + +i_bs_skip_bits_imm2(Fail, Ms, Bits) { + ErlBinMatchBuffer *_mb; + size_t new_offset; + _mb = ms_matchbuffer($Ms); + new_offset = _mb->offset + ($Bits); + if (new_offset <= _mb->size) { + _mb->offset = new_offset; + } else { + $FAIL($Fail); + } +} + +i_new_bs_put_binary(Fail, Sz, Flags, Src) { + Sint _size; + $BS_GET_UNCHECKED_FIELD_SIZE($Sz, (($Flags) >> 3), $BADARG($Fail), _size); + if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2(($Src), _size))) { + $BADARG($Fail); + } +} + +i_new_bs_put_binary_all(Fail, Src, Unit) { + if (!erts_new_bs_put_binary_all(ERL_BITS_ARGS_2(($Src), ($Unit)))) { + $BADARG($Fail); + } +} + +i_new_bs_put_binary_imm(Fail, Sz, Src) { + if (!erts_new_bs_put_binary(ERL_BITS_ARGS_2(($Src), ($Sz)))) { + $BADARG($Fail); + } +} + +i_new_bs_put_float(Fail, Sz, Flags, Src) { + Sint _size; + $BS_GET_UNCHECKED_FIELD_SIZE($Sz, (($Flags) >> 3), $BADARG($Fail), _size); + if (!erts_new_bs_put_float(c_p, ($Src), _size, ($Flags))) { + $BADARG($Fail); + } +} + +i_new_bs_put_float_imm(Fail, Sz, Flags, Src) { + if (!erts_new_bs_put_float(c_p, ($Src), ($Sz), ($Flags))) { + $BADARG($Fail); + } +} + +i_new_bs_put_integer(Fail, Sz, Flags, Src) { + Sint _size; + $BS_GET_UNCHECKED_FIELD_SIZE($Sz, (($Flags) >> 3), $BADARG($Fail), _size); + if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3(($Src), _size, ($Flags)))) { + $BADARG($Fail); + } +} + +i_new_bs_put_integer_imm(Fail, Sz, Flags, Src) { + if (!erts_new_bs_put_integer(ERL_BITS_ARGS_3(($Src), ($Sz), ($Flags)))) { + $BADARG($Fail); + } +} + +# +# i_bs_init* +# + +i_bs_init_fail_heap := bs_init.fail_heap.verify.execute; +i_bs_init_fail := bs_init.fail.verify.execute; +i_bs_init := bs_init.plain.execute; +i_bs_init_heap := bs_init.heap.execute; + +bs_init.head() { + Eterm BsOp1; + Eterm BsOp2; +} + +bs_init.fail_heap(Size, HeapAlloc) { + BsOp1 = $Size; + BsOp2 = $HeapAlloc; +} + +bs_init.fail(Size) { + BsOp1 = $Size; + BsOp2 = 0; +} + +bs_init.plain(Size) { + BsOp1 = $Size; + BsOp2 = 0; +} + +bs_init.heap(Size, HeapAlloc) { + BsOp1 = $Size; + BsOp2 = $HeapAlloc; +} + +bs_init.verify(Fail) { + if (is_small(BsOp1)) { + Sint size = signed_val(BsOp1); + if (size < 0) { + $BADARG($Fail); + } + BsOp1 = (Eterm) size; + } else { + Uint bytes; + + if (!term_to_Uint(BsOp1, &bytes)) { + c_p->freason = bytes; + $FAIL_HEAD_OR_BODY($Fail); + } + if ((bytes >> (8*sizeof(Uint)-3)) != 0) { + $SYSTEM_LIMIT($Fail); + } + BsOp1 = (Eterm) bytes; + } +} + +bs_init.execute(Live, Dst) { + if (BsOp1 <= ERL_ONHEAP_BIN_LIMIT) { + ErlHeapBin* hb; + Uint bin_need; + + bin_need = heap_bin_size(BsOp1); + erts_bin_offset = 0; + erts_writable_bin = 0; + $GC_TEST(0, bin_need+BsOp2+ERL_SUB_BIN_SIZE, $Live); + hb = (ErlHeapBin *) HTOP; + HTOP += bin_need; + hb->thing_word = header_heap_bin(BsOp1); + hb->size = BsOp1; + erts_current_bin = (byte *) hb->data; + $Dst = make_binary(hb); + } else { + Binary* bptr; + ProcBin* pb; + + erts_bin_offset = 0; + erts_writable_bin = 0; + $TEST_BIN_VHEAP(BsOp1 / sizeof(Eterm), + BsOp2 + PROC_BIN_SIZE + ERL_SUB_BIN_SIZE, $Live); + + /* + * Allocate the binary struct itself. + */ + bptr = erts_bin_nrml_alloc(BsOp1); + erts_current_bin = (byte *) bptr->orig_bytes; + + /* + * Now allocate the ProcBin on the heap. + */ + pb = (ProcBin *) HTOP; + HTOP += PROC_BIN_SIZE; + pb->thing_word = HEADER_PROC_BIN; + pb->size = BsOp1; + pb->next = MSO(c_p).first; + MSO(c_p).first = (struct erl_off_heap_header*) pb; + pb->val = bptr; + pb->bytes = (byte*) bptr->orig_bytes; + pb->flags = 0; + + OH_OVERHEAD(&(MSO(c_p)), BsOp1 / sizeof(Eterm)); + + $Dst = make_binary(pb); + } +} + +# +# i_bs_init_bits* +# + +i_bs_init_bits := bs_init_bits.plain.execute; +i_bs_init_bits_heap := bs_init_bits.heap.execute; +i_bs_init_bits_fail := bs_init_bits.fail.verify.execute; +i_bs_init_bits_fail_heap := bs_init_bits.fail_heap.verify.execute; + +bs_init_bits.head() { + Eterm num_bits_term; + Uint num_bits; + Uint alloc; +} + +bs_init_bits.plain(NumBits) { + num_bits = $NumBits; + alloc = 0; +} + +bs_init_bits.heap(NumBits, Alloc) { + num_bits = $NumBits; + alloc = $Alloc; +} + +bs_init_bits.fail(NumBitsTerm) { + num_bits_term = $NumBitsTerm; + alloc = 0; +} + +bs_init_bits.fail_heap(NumBitsTerm, Alloc) { + num_bits_term = $NumBitsTerm; + alloc = $Alloc; +} + +bs_init_bits.verify(Fail) { + if (is_small(num_bits_term)) { + Sint size = signed_val(num_bits_term); + if (size < 0) { + $BADARG($Fail); + } + num_bits = (Uint) size; + } else { + Uint bits; + + if (!term_to_Uint(num_bits_term, &bits)) { + c_p->freason = bits; + $FAIL_HEAD_OR_BODY($Fail); + } + num_bits = (Uint) bits; + } +} + +bs_init_bits.execute(Live, Dst) { + Eterm new_binary; + Uint num_bytes = ((Uint64)num_bits+(Uint64)7) >> 3; + + if (num_bits & 7) { + alloc += ERL_SUB_BIN_SIZE; + } + if (num_bytes <= ERL_ONHEAP_BIN_LIMIT) { + alloc += heap_bin_size(num_bytes); + } else { + alloc += PROC_BIN_SIZE; + } + $test_heap(alloc, $Live); + + /* num_bits = Number of bits to build + * num_bytes = Number of bytes to allocate in the binary + * alloc = Total number of words to allocate on heap + * Operands: NotUsed NotUsed Dst + */ + if (num_bytes <= ERL_ONHEAP_BIN_LIMIT) { + ErlHeapBin* hb; + + erts_bin_offset = 0; + erts_writable_bin = 0; + hb = (ErlHeapBin *) HTOP; + HTOP += heap_bin_size(num_bytes); + hb->thing_word = header_heap_bin(num_bytes); + hb->size = num_bytes; + erts_current_bin = (byte *) hb->data; + new_binary = make_binary(hb); + + do_bits_sub_bin: + if (num_bits & 7) { + ErlSubBin* sb; + + sb = (ErlSubBin *) HTOP; + HTOP += ERL_SUB_BIN_SIZE; + sb->thing_word = HEADER_SUB_BIN; + sb->size = num_bytes - 1; + sb->bitsize = num_bits & 7; + sb->offs = 0; + sb->bitoffs = 0; + sb->is_writable = 0; + sb->orig = new_binary; + new_binary = make_binary(sb); + } + HEAP_SPACE_VERIFIED(0); + $Dst = new_binary; + } else { + Binary* bptr; + ProcBin* pb; + + erts_bin_offset = 0; + erts_writable_bin = 0; + + /* + * Allocate the binary struct itself. + */ + bptr = erts_bin_nrml_alloc(num_bytes); + erts_current_bin = (byte *) bptr->orig_bytes; + + /* + * Now allocate the ProcBin on the heap. + */ + pb = (ProcBin *) HTOP; + HTOP += PROC_BIN_SIZE; + pb->thing_word = HEADER_PROC_BIN; + pb->size = num_bytes; + pb->next = MSO(c_p).first; + MSO(c_p).first = (struct erl_off_heap_header*) pb; + pb->val = bptr; + pb->bytes = (byte*) bptr->orig_bytes; + pb->flags = 0; + OH_OVERHEAD(&(MSO(c_p)), pb->size / sizeof(Eterm)); + new_binary = make_binary(pb); + goto do_bits_sub_bin; + } +} + +bs_add(Fail, Src1, Src2, Unit, Dst) { + Eterm Op1 = $Src1; + Eterm Op2 = $Src2; + Uint unit = $Unit; + + if (is_both_small(Op1, Op2)) { + Sint Arg1 = signed_val(Op1); + Sint Arg2 = signed_val(Op2); + + if (Arg1 >= 0 && Arg2 >= 0) { + $BS_SAFE_MUL(Arg2, unit, $SYSTEM_LIMIT($Fail), Op1); + Op1 += Arg1; + + store_bs_add_result: + if (Op1 <= MAX_SMALL) { + Op1 = make_small(Op1); + } else { + /* + * May generate a heap fragment, but in this + * particular case it is OK, since the value will be + * stored into an x register (the GC will scan x + * registers for references to heap fragments) and + * there is no risk that value can be stored into a + * location that is not scanned for heap-fragment + * references (such as the heap). + */ + SWAPOUT; + Op1 = erts_make_integer(Op1, c_p); + HTOP = HEAP_TOP(c_p); + } + $Dst = Op1; + $NEXT0(); + } + $BADARG($Fail); + } else { + Uint a; + Uint b; + Uint c; + + /* + * Now we know that one of the arguments is + * not a small. We must convert both arguments + * to Uints and check for errors at the same time. + * + * Error checking is tricky. + * + * If one of the arguments is not numeric or + * not positive, the error reason is BADARG. + * + * Otherwise if both arguments are numeric, + * but at least one argument does not fit in + * an Uint, the reason is SYSTEM_LIMIT. + */ + + if (!term_to_Uint(Op1, &a)) { + if (a == BADARG) { + $BADARG($Fail); + } + if (!term_to_Uint(Op2, &b)) { + c_p->freason = b; + $FAIL_HEAD_OR_BODY($Fail); + } + $SYSTEM_LIMIT($Fail); + } else if (!term_to_Uint(Op2, &b)) { + c_p->freason = b; + $FAIL_HEAD_OR_BODY($Fail); + } + + /* + * The arguments are now correct and stored in a and b. + */ + + $BS_SAFE_MUL(b, unit, $SYSTEM_LIMIT($Fail), c); + Op1 = a + c; + if (Op1 < a) { + /* + * If the result is less than one of the + * arguments, there must have been an overflow. + */ + $SYSTEM_LIMIT($Fail); + } + goto store_bs_add_result; + } + /* No fallthrough */ + ASSERT(0); +} + +bs_put_string(Len, Ptr) { + erts_new_bs_put_string(ERL_BITS_ARGS_2((byte *) $Ptr, $Len)); +} + +i_bs_append(Fail, ExtraHeap, Live, Unit, Size, Dst) { + Uint live = $Live; + Uint res; + + HEAVY_SWAPOUT; + reg[live] = x(SCRATCH_X_REG); + res = erts_bs_append(c_p, reg, live, $Size, $ExtraHeap, $Unit); + HEAVY_SWAPIN; + if (is_non_value(res)) { + /* c_p->freason is already set (to BADARG or SYSTEM_LIMIT). */ + $FAIL_HEAD_OR_BODY($Fail); + } + $Dst = res; +} + +i_bs_private_append(Fail, Unit, Size, Src, Dst) { + Eterm res; + + res = erts_bs_private_append(c_p, $Src, $Size, $Unit); + if (is_non_value(res)) { + /* c_p->freason is already set (to BADARG or SYSTEM_LIMIT). */ + $FAIL_HEAD_OR_BODY($Fail); + } + $Dst = res; +} + +bs_init_writable() { + HEAVY_SWAPOUT; + r(0) = erts_bs_init_writable(c_p, r(0)); + HEAVY_SWAPIN; +} + +i_bs_utf8_size(Src, Dst) { + Eterm arg = $Src; + Eterm result; + + /* + * Calculate the number of bytes needed to encode the source + * operand to UTF-8. If the source operand is invalid (e.g. wrong + * type or range) we return a nonsense integer result (0 or 4). We + * can get away with that because we KNOW that bs_put_utf8 will do + * full error checking. + */ + + if (arg < make_small(0x80UL)) { + result = make_small(1); + } else if (arg < make_small(0x800UL)) { + result = make_small(2); + } else if (arg < make_small(0x10000UL)) { + result = make_small(3); + } else { + result = make_small(4); + } + $Dst = result; +} + +i_bs_put_utf8(Fail, Src) { + if (!erts_bs_put_utf8(ERL_BITS_ARGS_1($Src))) { + $BADARG($Fail); + } +} + +i_bs_utf16_size(Src, Dst) { + Eterm arg = $Src; + Eterm result = make_small(2); + + /* + * Calculate the number of bytes needed to encode the source + * operarand to UTF-16. If the source operand is invalid (e.g. wrong + * type or range) we return a nonsense integer result (2 or 4). We + * can get away with that because we KNOW that bs_put_utf16 will do + * full error checking. + */ + + if (arg >= make_small(0x10000UL)) { + result = make_small(4); + } + $Dst = result; +} + +bs_put_utf16(Fail, Flags, Src) { + if (!erts_bs_put_utf16(ERL_BITS_ARGS_2($Src, $Flags))) { + $BADARG($Fail); + } +} + +// Validate a value about to be stored in a binary. +i_bs_validate_unicode(Fail, Src) { + Eterm val = $Src; + + /* + * There is no need to untag the integer, but it IS necessary + * to make sure it is small (if the term is a bignum, it could + * slip through the test, and there is no further test that + * would catch it, since bit syntax construction silently masks + * too big numbers). + */ + if (is_not_small(val) || val > make_small(0x10FFFFUL) || + (make_small(0xD800UL) <= val && val <= make_small(0xDFFFUL))) { + $BADARG($Fail); + } +} + +// Validate a value that has been matched out. +i_bs_validate_unicode_retract(Fail, Src, Ms) { + /* + * There is no need to untag the integer, but it IS necessary + * to make sure it is small (a bignum pointer could fall in + * the valid range). + */ + + Eterm i = $Src; + if (is_not_small(i) || i > make_small(0x10FFFFUL) || + (make_small(0xD800UL) <= i && i <= make_small(0xDFFFUL))) { + Eterm ms = $Ms; /* Match context */ + ErlBinMatchBuffer* mb; + + /* Invalid value. Retract the position in the binary. */ + mb = ms_matchbuffer(ms); + mb->offset -= 32; + $BADARG($Fail); + } +} + + +// +// Matching of binaries. +// + +i_bs_start_match2 := bs_start_match.fetch.execute; + +bs_start_match.head() { + Eterm context; +} + +bs_start_match.fetch(Src) { + context = $Src; +} + +bs_start_match.execute(Fail, Live, Slots, Dst) { + Uint slots; + Uint live; + Eterm header; + if (!is_boxed(context)) { + $FAIL($Fail); + } + header = *boxed_val(context); + slots = $Slots; + live = $Live; + if (header_is_bin_matchstate(header)) { + ErlBinMatchState* ms = (ErlBinMatchState *) boxed_val(context); + Uint actual_slots = HEADER_NUM_SLOTS(header); + ms->save_offset[0] = ms->mb.offset; + if (actual_slots < slots) { + ErlBinMatchState* dst; + Uint live = $Live; + Uint wordsneeded = ERL_BIN_MATCHSTATE_SIZE(slots); + + $GC_TEST_PRESERVE(wordsneeded, live, context); + ms = (ErlBinMatchState *) boxed_val(context); + dst = (ErlBinMatchState *) HTOP; + *dst = *ms; + *HTOP = HEADER_BIN_MATCHSTATE(slots); + HTOP += wordsneeded; + HEAP_SPACE_VERIFIED(0); + $Dst = make_matchstate(dst); + } + } else if (is_binary_header(header)) { + Eterm result; + Uint wordsneeded = ERL_BIN_MATCHSTATE_SIZE(slots); + $GC_TEST_PRESERVE(wordsneeded, live, context); + HEAP_TOP(c_p) = HTOP; +#ifdef DEBUG + c_p->stop = E; /* Needed for checking in HeapOnlyAlloc(). */ +#endif + result = erts_bs_start_match_2(c_p, context, slots); + HTOP = HEAP_TOP(c_p); + HEAP_SPACE_VERIFIED(0); + if (is_non_value(result)) { + $FAIL($Fail); + } + $Dst = result; + } else { + $FAIL($Fail); + } +} + +bs_test_zero_tail2(Fail, Ctx) { + ErlBinMatchBuffer *_mb; + _mb = (ErlBinMatchBuffer*) ms_matchbuffer($Ctx); + if (_mb->size != _mb->offset) { + $FAIL($Fail); + } +} + +bs_test_tail_imm2(Fail, Ctx, Offset) { + ErlBinMatchBuffer *_mb; + _mb = ms_matchbuffer($Ctx); + if (_mb->size - _mb->offset != $Offset) { + $FAIL($Fail); + } +} + +bs_test_unit(Fail, Ctx, Unit) { + ErlBinMatchBuffer *_mb; + _mb = ms_matchbuffer($Ctx); + if ((_mb->size - _mb->offset) % $Unit) { + $FAIL($Fail); + } +} + +bs_test_unit8(Fail, Ctx) { + ErlBinMatchBuffer *_mb; + _mb = ms_matchbuffer($Ctx); + if ((_mb->size - _mb->offset) & 7) { + $FAIL($Fail); + } +} + +i_bs_get_integer_8(Ctx, Fail, Dst) { + Eterm _result; + ErlBinMatchBuffer* _mb = ms_matchbuffer($Ctx); + + if (_mb->size - _mb->offset < 8) { + $FAIL($Fail); + } + if (BIT_OFFSET(_mb->offset) != 0) { + _result = erts_bs_get_integer_2(c_p, 8, 0, _mb); + } else { + _result = make_small(_mb->base[BYTE_OFFSET(_mb->offset)]); + _mb->offset += 8; + } + $Dst = _result; +} + +i_bs_get_integer_16(Ctx, Fail, Dst) { + Eterm _result; + ErlBinMatchBuffer* _mb = ms_matchbuffer($Ctx); + + if (_mb->size - _mb->offset < 16) { + $FAIL($Fail); + } + if (BIT_OFFSET(_mb->offset) != 0) { + _result = erts_bs_get_integer_2(c_p, 16, 0, _mb); + } else { + _result = make_small(get_int16(_mb->base+BYTE_OFFSET(_mb->offset))); + _mb->offset += 16; + } + $Dst = _result; +} + +%if ARCH_64 +i_bs_get_integer_32(Ctx, Fail, Dst) { + Uint32 _integer; + ErlBinMatchBuffer* _mb = ms_matchbuffer($Ctx); + + if (_mb->size - _mb->offset < 32) { + $FAIL($Fail); + } + if (BIT_OFFSET(_mb->offset) != 0) { + _integer = erts_bs_get_unaligned_uint32(_mb); + } else { + _integer = get_int32(_mb->base + _mb->offset/8); + } + _mb->offset += 32; + $Dst = make_small(_integer); +} +%endif + +i_bs_get_integer_imm := bs_get_integer.fetch.execute; +i_bs_get_integer_small_imm := bs_get_integer.fetch_small.execute; + +bs_get_integer.head() { + Eterm Ms, Sz; +} + +bs_get_integer.fetch(Ctx, Size, Live) { + Uint wordsneeded; + Ms = $Ctx; + Sz = $Size; + wordsneeded = 1+WSIZE(NBYTES(Sz)); + $GC_TEST_PRESERVE(wordsneeded, $Live, Ms); +} + +bs_get_integer.fetch_small(Ctx, Size) { + Ms = $Ctx; + Sz = $Size; +} + +bs_get_integer.execute(Fail, Flags, Dst) { + ErlBinMatchBuffer* mb; + Eterm result; + + mb = ms_matchbuffer(Ms); + LIGHT_SWAPOUT; + result = erts_bs_get_integer_2(c_p, Sz, $Flags, mb); + LIGHT_SWAPIN; + HEAP_SPACE_VERIFIED(0); + if (is_non_value(result)) { + $FAIL($Fail); + } + $Dst = result; +} + +i_bs_get_integer(Fail, Live, FlagsAndUnit, Ms, Sz, Dst) { + Uint flags; + Uint size; + Eterm ms; + ErlBinMatchBuffer* mb; + Eterm result; + + flags = $FlagsAndUnit; + ms = $Ms; + $BS_GET_FIELD_SIZE($Sz, (flags >> 3), $FAIL($Fail), size); + if (size >= SMALL_BITS) { + Uint wordsneeded; + /* Check bits size before potential gc. + * We do not want a gc and then realize we don't need + * the allocated space (i.e. if the op fails). + * + * Remember to re-acquire the matchbuffer after gc. + */ + + mb = ms_matchbuffer(ms); + if (mb->size - mb->offset < size) { + $FAIL($Fail); + } + wordsneeded = 1+WSIZE(NBYTES((Uint) size)); + $GC_TEST_PRESERVE(wordsneeded, $Live, ms); + } + mb = ms_matchbuffer(ms); + LIGHT_SWAPOUT; + result = erts_bs_get_integer_2(c_p, size, flags, mb); + LIGHT_SWAPIN; + HEAP_SPACE_VERIFIED(0); + if (is_non_value(result)) { + $FAIL($Fail); + } + $Dst = result; +} + +i_bs_get_utf8(Ctx, Fail, Dst) { + ErlBinMatchBuffer* mb = ms_matchbuffer($Ctx); + Eterm result = erts_bs_get_utf8(mb); + + if (is_non_value(result)) { + $FAIL($Fail); + } + $Dst = result; +} + +i_bs_get_utf16(Ctx, Fail, Flags, Dst) { + ErlBinMatchBuffer* mb = ms_matchbuffer($Ctx); + Eterm result = erts_bs_get_utf16(mb, $Flags); + + if (is_non_value(result)) { + $FAIL($Fail); + } + $Dst = result; +} + +bs_context_to_binary := ctx_to_bin.fetch.execute; +i_bs_get_binary_all_reuse := ctx_to_bin.fetch_bin.execute; + +ctx_to_bin.head() { + Eterm context; + ErlBinMatchBuffer* mb; + Uint size; + Uint offs; +} + +ctx_to_bin.fetch(Src) { + context = $Src; + if (is_boxed(context) && + header_is_bin_matchstate(*boxed_val(context))) { + ErlBinMatchState* ms; + ms = (ErlBinMatchState *) boxed_val(context); + mb = &ms->mb; + offs = ms->save_offset[0]; + size = mb->size - offs; + } else { + $NEXT0(); + } +} + +ctx_to_bin.fetch_bin(Src, Fail, Unit) { + context = $Src; + mb = ms_matchbuffer(context); + size = mb->size - mb->offset; + if (size % $Unit != 0) { + $FAIL($Fail); + } + offs = mb->offset; +} + +ctx_to_bin.execute() { + Uint hole_size; + Uint orig = mb->orig; + ErlSubBin* sb = (ErlSubBin *) boxed_val(context); + hole_size = 1 + header_arity(sb->thing_word) - ERL_SUB_BIN_SIZE; + sb->thing_word = HEADER_SUB_BIN; + sb->size = BYTE_OFFSET(size); + sb->bitsize = BIT_OFFSET(size); + sb->offs = BYTE_OFFSET(offs); + sb->bitoffs = BIT_OFFSET(offs); + sb->is_writable = 0; + sb->orig = orig; + if (hole_size) { + sb[1].thing_word = make_pos_bignum_header(hole_size-1); + } +} + +i_bs_match_string(Ctx, Fail, Bits, Ptr) { + byte* bytes = (byte *) $Ptr; + Uint bits = $Bits; + ErlBinMatchBuffer* mb; + Uint offs; + + mb = ms_matchbuffer($Ctx); + if (mb->size - mb->offset < bits) { + $FAIL($Fail); + } + offs = mb->offset & 7; + if (offs == 0 && (bits & 7) == 0) { + if (sys_memcmp(bytes, mb->base+(mb->offset>>3), bits>>3)) { + $FAIL($Fail); + } + } else if (erts_cmp_bits(bytes, 0, mb->base+(mb->offset>>3), mb->offset & 7, bits)) { + $FAIL($Fail); + } + mb->offset += bits; +} + +i_bs_save2(Src, Slot) { + ErlBinMatchState* _ms = (ErlBinMatchState*) boxed_val((Eterm) $Src); + _ms->save_offset[$Slot] = _ms->mb.offset; +} + +i_bs_restore2(Src, Slot) { + ErlBinMatchState* _ms = (ErlBinMatchState*) boxed_val((Eterm) $Src); + _ms->mb.offset = _ms->save_offset[$Slot]; +} diff --git a/erts/emulator/beam/code_ix.c b/erts/emulator/beam/code_ix.c index 8a3d1b20b4..34e46f5f33 100644 --- a/erts/emulator/beam/code_ix.c +++ b/erts/emulator/beam/code_ix.c @@ -34,8 +34,8 @@ # define CIX_TRACE(text) #endif -erts_smp_atomic32_t the_active_code_index; -erts_smp_atomic32_t the_staging_code_index; +erts_atomic32_t the_active_code_index; +erts_atomic32_t the_staging_code_index; static Process* code_writing_process = NULL; struct code_write_queue_item { @@ -43,7 +43,7 @@ struct code_write_queue_item { struct code_write_queue_item* next; }; static struct code_write_queue_item* code_write_queue = NULL; -static erts_smp_mtx_t code_write_permission_mtx; +static erts_mtx_t code_write_permission_mtx; #ifdef ERTS_ENABLE_LOCK_CHECK static erts_tsd_key_t has_code_write_permission; @@ -55,9 +55,9 @@ void erts_code_ix_init(void) * single threaded with active and staging set both to zero. * Preloading is finished by a commit that will set things straight. */ - erts_smp_atomic32_init_nob(&the_active_code_index, 0); - erts_smp_atomic32_init_nob(&the_staging_code_index, 0); - erts_smp_mtx_init(&code_write_permission_mtx, "code_write_permission", NIL, + erts_atomic32_init_nob(&the_active_code_index, 0); + erts_atomic32_init_nob(&the_staging_code_index, 0); + erts_mtx_init(&code_write_permission_mtx, "code_write_permission", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); #ifdef ERTS_ENABLE_LOCK_CHECK erts_tsd_key_create(&has_code_write_permission, @@ -91,9 +91,9 @@ void erts_commit_staging_code_ix(void) /* We need to this lock as we are now making the staging export table active */ export_staging_lock(); ix = erts_staging_code_ix(); - erts_smp_atomic32_set_nob(&the_active_code_index, ix); + erts_atomic32_set_nob(&the_active_code_index, ix); ix = (ix + 1) % ERTS_NUM_CODE_IX; - erts_smp_atomic32_set_nob(&the_staging_code_index, ix); + erts_atomic32_set_nob(&the_staging_code_index, ix); export_staging_unlock(); erts_tracer_nif_clear(); CIX_TRACE("activate"); @@ -115,12 +115,10 @@ void erts_abort_staging_code_ix(void) int erts_try_seize_code_write_permission(Process* c_p) { int success; -#ifdef ERTS_SMP - ASSERT(!erts_smp_thr_progress_is_blocking()); /* to avoid deadlock */ -#endif + ASSERT(!erts_thr_progress_is_blocking()); /* to avoid deadlock */ ASSERT(c_p != NULL); - erts_smp_mtx_lock(&code_write_permission_mtx); + erts_mtx_lock(&code_write_permission_mtx); success = (code_writing_process == NULL); if (success) { code_writing_process = c_p; @@ -138,21 +136,21 @@ int erts_try_seize_code_write_permission(Process* c_p) code_write_queue = qitem; erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL); } - erts_smp_mtx_unlock(&code_write_permission_mtx); + erts_mtx_unlock(&code_write_permission_mtx); return success; } void erts_release_code_write_permission(void) { - erts_smp_mtx_lock(&code_write_permission_mtx); - ERTS_SMP_LC_ASSERT(erts_has_code_write_permission()); + erts_mtx_lock(&code_write_permission_mtx); + ERTS_LC_ASSERT(erts_has_code_write_permission()); while (code_write_queue != NULL) { /* unleash the entire herd */ struct code_write_queue_item* qitem = code_write_queue; - erts_smp_proc_lock(qitem->p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(qitem->p, ERTS_PROC_LOCK_STATUS); if (!ERTS_PROC_IS_EXITING(qitem->p)) { erts_resume(qitem->p, ERTS_PROC_LOCK_STATUS); } - erts_smp_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS); code_write_queue = qitem->next; erts_proc_dec_refc(qitem->p); erts_free(ERTS_ALC_T_CODE_IX_LOCK_Q, qitem); @@ -161,7 +159,7 @@ void erts_release_code_write_permission(void) #ifdef ERTS_ENABLE_LOCK_CHECK erts_tsd_set(has_code_write_permission, (void *) 0); #endif - erts_smp_mtx_unlock(&code_write_permission_mtx); + erts_mtx_unlock(&code_write_permission_mtx); } #ifdef ERTS_ENABLE_LOCK_CHECK diff --git a/erts/emulator/beam/code_ix.h b/erts/emulator/beam/code_ix.h index a28b0cd36e..42976d2301 100644 --- a/erts/emulator/beam/code_ix.h +++ b/erts/emulator/beam/code_ix.h @@ -176,7 +176,7 @@ int erts_has_code_write_permission(void); ERTS_GLB_INLINE BeamInstr *erts_codeinfo_to_code(ErtsCodeInfo *ci) { - ASSERT(ci->op == (BeamInstr) BeamOp(op_i_func_info_IaaI) || !ci->op); + ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI) || !ci->op); ASSERT_MFA(&ci->mfa); return (BeamInstr*)(ci + 1); } @@ -185,7 +185,7 @@ ERTS_GLB_INLINE ErtsCodeInfo *erts_code_to_codeinfo(BeamInstr *I) { ErtsCodeInfo *ci = ((ErtsCodeInfo *)(((char *)(I)) - sizeof(ErtsCodeInfo))); - ASSERT(ci->op == (BeamInstr) BeamOp(op_i_func_info_IaaI) || !ci->op); + ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI) || !ci->op); ASSERT_MFA(&ci->mfa); return ci; } @@ -205,16 +205,16 @@ ErtsCodeMFA *erts_code_to_codemfa(BeamInstr *I) return mfa; } -extern erts_smp_atomic32_t the_active_code_index; -extern erts_smp_atomic32_t the_staging_code_index; +extern erts_atomic32_t the_active_code_index; +extern erts_atomic32_t the_staging_code_index; ERTS_GLB_INLINE ErtsCodeIndex erts_active_code_ix(void) { - return erts_smp_atomic32_read_nob(&the_active_code_index); + return erts_atomic32_read_nob(&the_active_code_index); } ERTS_GLB_INLINE ErtsCodeIndex erts_staging_code_ix(void) { - return erts_smp_atomic32_read_nob(&the_staging_code_index); + return erts_atomic32_read_nob(&the_staging_code_index); } #endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */ diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c index fefde256d7..10bf197405 100644 --- a/erts/emulator/beam/copy.c +++ b/erts/emulator/beam/copy.c @@ -845,7 +845,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint funp = (ErlFunThing *) tp; funp->next = off_heap->first; off_heap->first = (struct erl_off_heap_header*) funp; - erts_smp_refc_inc(&funp->fe->refc, 2); + erts_refc_inc(&funp->fe->refc, 2); *argp = make_fun(tp); } break; @@ -854,7 +854,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint case EXTERNAL_REF_SUBTAG: { ExternalThing *etp = (ExternalThing *) objp; - erts_smp_refc_inc(&etp->node->refc, 2); + erts_refc_inc(&etp->node->refc, 2); } L_off_heap_node_container_common: { @@ -1531,7 +1531,7 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info, } funp->next = off_heap->first; off_heap->first = (struct erl_off_heap_header*) funp; - erts_smp_refc_inc(&funp->fe->refc, 2); + erts_refc_inc(&funp->fe->refc, 2); goto cleanup_next; } case MAP_SUBTAG: @@ -1658,7 +1658,7 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info, case EXTERNAL_REF_SUBTAG: { ExternalThing *etp = (ExternalThing *) ptr; - erts_smp_refc_inc(&etp->node->refc, 2); + erts_refc_inc(&etp->node->refc, 2); } off_heap_node_container_common: { @@ -1855,7 +1855,7 @@ Eterm copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap) case FUN_SUBTAG: { ErlFunThing* funp = (ErlFunThing *) (tp-1); - erts_smp_refc_inc(&funp->fe->refc, 2); + erts_refc_inc(&funp->fe->refc, 2); } goto off_heap_common; case EXTERNAL_PID_SUBTAG: @@ -1863,7 +1863,7 @@ Eterm copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap) case EXTERNAL_REF_SUBTAG: { ExternalThing* etp = (ExternalThing *) (tp-1); - erts_smp_refc_inc(&etp->node->refc, 2); + erts_refc_inc(&etp->node->refc, 2); } off_heap_common: { diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c index 09fdb897f5..bc168fc58d 100644 --- a/erts/emulator/beam/dist.c +++ b/erts/emulator/beam/dist.c @@ -121,7 +121,7 @@ Export* dexit_trap = NULL; Export* dmonitor_p_trap = NULL; /* local variables */ - +static Export *dist_ctrl_put_data_trap; /* forward declarations */ @@ -130,8 +130,8 @@ static int dsig_send_ctl(ErtsDSigData* dsdp, Eterm ctl, int force_busy); static void send_nodes_mon_msgs(Process *, Eterm, Eterm, Eterm, Eterm); static void init_nodes_monitors(void); -static erts_smp_atomic_t no_caches; -static erts_smp_atomic_t no_nodes; +static erts_atomic_t no_caches; +static erts_atomic_t no_nodes; struct { Eterm reason; @@ -144,8 +144,8 @@ delete_cache(ErtsAtomCache *cache) { if (cache) { erts_free(ERTS_ALC_T_DCACHE, (void *) cache); - ASSERT(erts_smp_atomic_read_nob(&no_caches) > 0); - erts_smp_atomic_dec_nob(&no_caches); + ASSERT(erts_atomic_read_nob(&no_caches) > 0); + erts_atomic_dec_nob(&no_caches); } } @@ -156,14 +156,12 @@ create_cache(DistEntry *dep) int i; ErtsAtomCache *cp; - ERTS_SMP_LC_ASSERT( - is_internal_port(dep->cid) - && erts_lc_is_port_locked(erts_port_lookup_raw(dep->cid))); + ERTS_LC_ASSERT(is_nil(dep->cid)); ASSERT(!dep->cache); dep->cache = cp = (ErtsAtomCache*) erts_alloc(ERTS_ALC_T_DCACHE, sizeof(ErtsAtomCache)); - erts_smp_atomic_inc_nob(&no_caches); + erts_atomic_inc_nob(&no_caches); for (i = 0; i < sizeof(cp->in_arr)/sizeof(cp->in_arr[0]); i++) { cp->in_arr[i] = THE_NON_VALUE; cp->out_arr[i] = THE_NON_VALUE; @@ -172,15 +170,17 @@ create_cache(DistEntry *dep) Uint erts_dist_cache_size(void) { - return (Uint) erts_smp_atomic_read_mb(&no_caches)*sizeof(ErtsAtomCache); + return (Uint) erts_atomic_read_mb(&no_caches)*sizeof(ErtsAtomCache); } static ErtsProcList * -get_suspended_on_de(DistEntry *dep, Uint32 unset_qflgs) +get_suspended_on_de(DistEntry *dep, erts_aint32_t unset_qflgs) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&dep->qlock)); - dep->qflgs &= ~unset_qflgs; - if (dep->qflgs & ERTS_DE_QFLG_EXIT) { + erts_aint32_t qflgs; + ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&dep->qlock)); + qflgs = erts_atomic32_read_band_acqb(&dep->qflgs, ~unset_qflgs); + qflgs &= ~unset_qflgs; + if (qflgs & ERTS_DE_QFLG_EXIT) { /* No resume when exit has been scheduled */ return NULL; } @@ -283,17 +283,15 @@ static void doit_monitor_net_exits(ErtsMonitor *mon, void *vnecp) watched = (is_atom(rmon->name) ? TUPLE2(lhp, rmon->name, dep->sysname) : rmon->u.pid); -#ifdef ERTS_SMP rp_locks |= ERTS_PROC_LOCKS_MSG_SEND; - erts_smp_proc_lock(rp, ERTS_PROC_LOCKS_MSG_SEND); -#endif + erts_proc_lock(rp, ERTS_PROC_LOCKS_MSG_SEND); erts_queue_monitor_message(rp, &rp_locks, mon->ref, am_process, watched, am_noconnection); erts_destroy_monitor(rmon); } UnUseTmpHeapNoproc(3); } - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); done: erts_destroy_monitor(mon); } @@ -342,7 +340,7 @@ static void doit_link_net_exits_sub(ErtsLink *sublnk, void *vlnecp) trace_proc(NULL, 0, rp, am_getting_unlinked, sublnk->pid); } } - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } done: erts_destroy_link(sublnk); @@ -384,7 +382,7 @@ static void doit_node_link_net_exits(ErtsLink *lnk, void *vnecp) rp = erts_proc_lookup(lnk->pid); if (!rp) goto done; - erts_smp_proc_lock(rp, rp_locks); + erts_proc_lock(rp, rp_locks); rlnk = erts_remove_link(&ERTS_P_LINKS(rp), name); if (rlnk != NULL) { ASSERT(is_atom(rlnk->pid) && (rlnk->type == LINK_NODE)); @@ -401,7 +399,7 @@ static void doit_node_link_net_exits(ErtsLink *lnk, void *vnecp) tup = TUPLE2(hp, am_nodedown, name); erts_queue_message(rp, rp_locks, msgp, tup, am_system); } - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } done: erts_destroy_link(lnk); @@ -413,16 +411,16 @@ set_node_not_alive(void *unused) ErlHeapFragment *bp; Eterm nodename = erts_this_dist_entry->sysname; - ASSERT(erts_smp_atomic_read_nob(&no_nodes) == 0); + ASSERT(erts_atomic_read_nob(&no_nodes) == 0); - erts_smp_thr_progress_block(); + erts_thr_progress_block(); erts_set_this_node(am_Noname, 0); erts_is_alive = 0; send_nodes_mon_msgs(NULL, am_nodedown, nodename, am_visible, nodedown.reason); nodedown.reason = NIL; bp = nodedown.bp; nodedown.bp = NULL; - erts_smp_thr_progress_unblock(); + erts_thr_progress_unblock(); if (bp) free_message_buffer(bp); } @@ -430,7 +428,7 @@ set_node_not_alive(void *unused) static ERTS_INLINE void dec_no_nodes(void) { - erts_aint_t no = erts_smp_atomic_dec_read_mb(&no_nodes); + erts_aint_t no = erts_atomic_dec_read_mb(&no_nodes); ASSERT(no >= 0); ASSERT(erts_get_scheduler_id()); /* Need to be a scheduler */ if (no == 0) @@ -443,12 +441,40 @@ static ERTS_INLINE void inc_no_nodes(void) { #ifdef DEBUG - erts_aint_t no = erts_smp_atomic_read_nob(&no_nodes); + erts_aint_t no = erts_atomic_read_nob(&no_nodes); ASSERT(erts_is_alive ? no > 0 : no == 0); #endif - erts_smp_atomic_inc_mb(&no_nodes); + erts_atomic_inc_mb(&no_nodes); } - + +static void +kill_dist_ctrl_proc(void *vpid) +{ + Eterm pid = (Eterm) vpid; + ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND; + Process *rp = erts_pid2proc(NULL, 0, pid, rp_locks); + if (rp) { + erts_send_exit_signal(NULL, rp->common.id, rp, &rp_locks, + am_kill, NIL, NULL, 0); + if (rp_locks) + erts_proc_unlock(rp, rp_locks); + } +} + +static void +schedule_kill_dist_ctrl_proc(Eterm pid) +{ + ErtsSchedulerData *esdp = erts_get_scheduler_data(); + int sched_id = 1; + if (!esdp || ERTS_SCHEDULER_IS_DIRTY(esdp)) + sched_id = 1; + else + sched_id = (int) esdp->no; + erts_schedule_misc_aux_work(sched_id, + kill_dist_ctrl_proc, + (void *) (UWord) pid); +} + /* * proc is currently running or exiting process. */ @@ -458,58 +484,62 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason) if (dep == erts_this_dist_entry) { /* Net kernel has died (clean up!!) */ DistEntry *tdep; - int no_dist_port = 0; + int no_dist_ctrl = 0; Eterm nd_reason = (reason == am_no_network ? am_no_network : am_net_kernel_terminated); - erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx); + erts_rwmtx_rlock(&erts_dist_table_rwmtx); for (tdep = erts_hidden_dist_entries; tdep; tdep = tdep->next) - no_dist_port++; + no_dist_ctrl++; for (tdep = erts_visible_dist_entries; tdep; tdep = tdep->next) - no_dist_port++; + no_dist_ctrl++; /* KILL all port controllers */ - if (no_dist_port == 0) - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); + if (no_dist_ctrl == 0) + erts_rwmtx_runlock(&erts_dist_table_rwmtx); else { Eterm def_buf[128]; int i = 0; - Eterm *dist_port; + Eterm *dist_ctrl; - if (no_dist_port <= sizeof(def_buf)/sizeof(def_buf[0])) - dist_port = &def_buf[0]; + if (no_dist_ctrl <= sizeof(def_buf)/sizeof(def_buf[0])) + dist_ctrl = &def_buf[0]; else - dist_port = erts_alloc(ERTS_ALC_T_TMP, - sizeof(Eterm)*no_dist_port); + dist_ctrl = erts_alloc(ERTS_ALC_T_TMP, + sizeof(Eterm)*no_dist_ctrl); for (tdep = erts_hidden_dist_entries; tdep; tdep = tdep->next) { - ASSERT(is_internal_port(tdep->cid)); - dist_port[i++] = tdep->cid; + ASSERT(is_internal_port(tdep->cid) || is_internal_pid(tdep->cid)); + dist_ctrl[i++] = tdep->cid; } for (tdep = erts_visible_dist_entries; tdep; tdep = tdep->next) { - ASSERT(is_internal_port(tdep->cid)); - dist_port[i++] = tdep->cid; + ASSERT(is_internal_port(tdep->cid) || is_internal_pid(tdep->cid)); + dist_ctrl[i++] = tdep->cid; } - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); - - for (i = 0; i < no_dist_port; i++) { - Port *prt = erts_port_lookup(dist_port[i], - ERTS_PORT_SFLGS_INVALID_LOOKUP); - if (!prt) - continue; - ASSERT(erts_atomic32_read_nob(&prt->state) - & ERTS_PORT_SFLG_DISTRIBUTION); - - erts_port_exit(NULL, ERTS_PORT_SIG_FLG_FORCE_SCHED, - prt, dist_port[i], nd_reason, NULL); + erts_rwmtx_runlock(&erts_dist_table_rwmtx); + + for (i = 0; i < no_dist_ctrl; i++) { + if (is_internal_pid(dist_ctrl[i])) + schedule_kill_dist_ctrl_proc(dist_ctrl[i]); + else { + Port *prt = erts_port_lookup(dist_ctrl[i], + ERTS_PORT_SFLGS_INVALID_LOOKUP); + if (prt) { + ASSERT(erts_atomic32_read_nob(&prt->state) + & ERTS_PORT_SFLG_DISTRIBUTION); + + erts_port_exit(NULL, ERTS_PORT_SIG_FLG_FORCE_SCHED, + prt, dist_ctrl[i], nd_reason, NULL); + } + } } - if (dist_port != &def_buf[0]) - erts_free(ERTS_ALC_T_TMP, dist_port); + if (dist_ctrl != &def_buf[0]) + erts_free(ERTS_ALC_T_TMP, dist_ctrl); } /* - * When last dist port exits, node will be taken + * When last dist ctrl exits, node will be taken * from alive to not alive. */ ASSERT(is_nil(nodedown.reason) && !nodedown.bp); @@ -526,52 +556,51 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason) &nodedown.bp->off_heap); } } - else { /* Call from distribution port */ + else { /* Call from distribution controller (port/process) */ NetExitsContext nec = {dep}; ErtsLink *nlinks; ErtsLink *node_links; ErtsMonitor *monitors; Uint32 flags; - erts_smp_atomic_set_mb(&dep->dist_cmd_scheduled, 1); - erts_smp_de_rwlock(dep); + erts_atomic_set_mb(&dep->dist_cmd_scheduled, 1); + erts_de_rwlock(dep); - ERTS_SMP_LC_ASSERT(is_internal_port(dep->cid) - && erts_lc_is_port_locked(erts_port_lookup_raw(dep->cid))); + if (is_internal_port(dep->cid)) { + ERTS_LC_ASSERT(erts_lc_is_port_locked(erts_port_lookup_raw(dep->cid))); - if (erts_port_task_is_scheduled(&dep->dist_cmd)) - erts_port_task_abort(&dep->dist_cmd); + if (erts_port_task_is_scheduled(&dep->dist_cmd)) + erts_port_task_abort(&dep->dist_cmd); + } if (dep->status & ERTS_DE_SFLG_EXITING) { #ifdef DEBUG - erts_smp_mtx_lock(&dep->qlock); - ASSERT(dep->qflgs & ERTS_DE_QFLG_EXIT); - erts_smp_mtx_unlock(&dep->qlock); + ASSERT(erts_atomic32_read_nob(&dep->qflgs) & ERTS_DE_QFLG_EXIT); #endif } else { dep->status |= ERTS_DE_SFLG_EXITING; - erts_smp_mtx_lock(&dep->qlock); - ASSERT(!(dep->qflgs & ERTS_DE_QFLG_EXIT)); - dep->qflgs |= ERTS_DE_QFLG_EXIT; - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_lock(&dep->qlock); + ASSERT(!(erts_atomic32_read_nob(&dep->qflgs) & ERTS_DE_QFLG_EXIT)); + erts_atomic32_read_bor_relb(&dep->qflgs, ERTS_DE_QFLG_EXIT); + erts_mtx_unlock(&dep->qlock); } - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); monitors = dep->monitors; nlinks = dep->nlinks; node_links = dep->node_links; dep->monitors = NULL; dep->nlinks = NULL; dep->node_links = NULL; - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); nodename = dep->sysname; flags = dep->flags; erts_set_dist_entry_not_connected(dep); - erts_smp_de_rwunlock(dep); + erts_de_rwunlock(dep); erts_sweep_monitors(monitors, &doit_monitor_net_exits, (void *) &nec); erts_sweep_links(nlinks, &doit_link_net_exits, (void *) &nec); @@ -605,8 +634,8 @@ void init_dist(void) nodedown.reason = NIL; nodedown.bp = NULL; - erts_smp_atomic_init_nob(&no_nodes, 0); - erts_smp_atomic_init_nob(&no_caches, 0); + erts_atomic_init_nob(&no_nodes, 0); + erts_atomic_init_nob(&no_caches, 0); /* Lookup/Install all references to trap functions */ dsend2_trap = trap_function(am_dsend,2); @@ -618,6 +647,9 @@ void init_dist(void) dgroup_leader_trap = trap_function(am_dgroup_leader,2); dexit_trap = trap_function(am_dexit, 2); dmonitor_p_trap = trap_function(am_dmonitor_p, 2); + dist_ctrl_put_data_trap = erts_export_put(am_erts_internal, + am_dist_ctrl_put_data, + 2); } #define ErtsDistOutputBuf2Binary(OB) \ @@ -659,19 +691,24 @@ static void clear_dist_entry(DistEntry *dep) ErtsProcList *suspendees; ErtsDistOutputBuf *obuf; - erts_smp_de_rwlock(dep); + erts_de_rwlock(dep); + erts_atomic_set_nob(&dep->input_handler, + (erts_aint_t) NIL); cache = dep->cache; dep->cache = NULL; #ifdef DEBUG - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); ASSERT(!dep->nlinks); ASSERT(!dep->node_links); ASSERT(!dep->monitors); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); #endif - erts_smp_mtx_lock(&dep->qlock); + erts_mtx_lock(&dep->qlock); + + erts_atomic64_set_nob(&dep->in, 0); + erts_atomic64_set_nob(&dep->out, 0); if (!dep->out_queue.last) obuf = dep->finalized_out_queue.first; @@ -680,17 +717,24 @@ static void clear_dist_entry(DistEntry *dep) obuf = dep->out_queue.first; } + if (dep->tmp_out_queue.first) { + dep->tmp_out_queue.last->next = obuf; + obuf = dep->tmp_out_queue.first; + } + dep->out_queue.first = NULL; dep->out_queue.last = NULL; + dep->tmp_out_queue.first = NULL; + dep->tmp_out_queue.last = NULL; dep->finalized_out_queue.first = NULL; dep->finalized_out_queue.last = NULL; dep->status = 0; suspendees = get_suspended_on_de(dep, ERTS_DE_QFLGS_ALL); - erts_smp_mtx_unlock(&dep->qlock); - erts_smp_atomic_set_nob(&dep->dist_cmd_scheduled, 0); + erts_mtx_unlock(&dep->qlock); + erts_atomic_set_nob(&dep->dist_cmd_scheduled, 0); dep->send = NULL; - erts_smp_de_rwunlock(dep); + erts_de_rwunlock(dep); erts_resume_processes(suspendees); @@ -705,10 +749,11 @@ static void clear_dist_entry(DistEntry *dep) } if (obufsize) { - erts_smp_mtx_lock(&dep->qlock); - ASSERT(dep->qsize >= obufsize); - dep->qsize -= obufsize; - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_lock(&dep->qlock); + ASSERT(erts_atomic_read_nob(&dep->qsize) >= obufsize); + erts_atomic_add_nob(&dep->qsize, + (erts_aint_t) -obufsize); + erts_mtx_unlock(&dep->qlock); } } @@ -813,9 +858,9 @@ erts_dsig_send_m_exit(ErtsDSigData *dsdp, Eterm watcher, Eterm watched, watched, watcher, ref, reason); #ifdef DEBUG - erts_smp_de_links_lock(dsdp->dep); + erts_de_links_lock(dsdp->dep); ASSERT(!erts_lookup_monitor(dsdp->dep->monitors, ref)); - erts_smp_de_links_unlock(dsdp->dep); + erts_de_links_unlock(dsdp->dep); #endif res = dsig_send_ctl(dsdp, ctl, 1); @@ -906,11 +951,30 @@ erts_dsig_send_msg(Eterm remote, Eterm message, ErtsSendContext* ctx) } #endif - if (token != NIL) - ctl = TUPLE4(&ctx->ctl_heap[0], - make_small(DOP_SEND_TT), am_Empty, remote, token); - else - ctl = TUPLE3(&ctx->ctl_heap[0], make_small(DOP_SEND), am_Empty, remote); + if (token != NIL) { + Eterm el1, el2; + if (ctx->dep->flags & DFLAG_SEND_SENDER) { + el1 = make_small(DOP_SEND_SENDER_TT); + el2 = sender->common.id; + } + else { + el1 = make_small(DOP_SEND_TT); + el2 = am_Empty; + } + ctl = TUPLE4(&ctx->ctl_heap[0], el1, el2, remote, token); + } + else { + Eterm el1, el2; + if (ctx->dep->flags & DFLAG_SEND_SENDER) { + el1 = make_small(DOP_SEND_SENDER); + el2 = sender->common.id; + } + else { + el1 = make_small(DOP_SEND); + el2 = am_Empty; + } + ctl = TUPLE3(&ctx->ctl_heap[0], el1, el2, remote); + } DTRACE6(message_send, sender_name, receiver_name, msize, tok_label, tok_lastcnt, tok_serial); DTRACE7(message_send_remote, sender_name, node_name, receiver_name, @@ -1147,22 +1211,25 @@ int erts_net_message(Port *prt, ErtsLink *lnk; Uint tuple_arity; int res; + Uint32 connection_id; #ifdef ERTS_DIST_MSG_DBG ErlDrvSizeT orig_len = len; #endif UseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(!prt || erts_lc_is_port_locked(prt)); if (!erts_is_alive) { UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE); return 0; } - if (hlen != 0) - goto data_error; + + + ASSERT(hlen == 0); + if (len == 0) { /* HANDLE TICK !!! */ UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE); return 0; @@ -1181,30 +1248,31 @@ int erts_net_message(Port *prt, len--; } - if (len == 0) { - PURIFY_MSG("data error"); - goto data_error; - } + res = erts_prepare_dist_ext(&ede, t, len, dep, dep->cache, &connection_id); - res = erts_prepare_dist_ext(&ede, t, len, dep, dep->cache); - - if (res >= 0) - res = ctl_len = erts_decode_dist_ext_size(&ede); - else { + switch (res) { + case ERTS_PREP_DIST_EXT_CLOSED: + return 0; /* Connection not alive; ignore signal... */ + case ERTS_PREP_DIST_EXT_FAILED: #ifdef ERTS_DIST_MSG_DBG erts_fprintf(stderr, "DIST MSG DEBUG: erts_prepare_dist_ext() failed:\n"); bw(buf, orig_len); #endif - ctl_len = 0; - } - - if (res < 0) { + goto data_error; + case ERTS_PREP_DIST_EXT_SUCCESS: + ctl_len = erts_decode_dist_ext_size(&ede); + if (ctl_len < 0) { #ifdef ERTS_DIST_MSG_DBG - erts_fprintf(stderr, "DIST MSG DEBUG: erts_decode_dist_ext_size(CTL) failed:\n"); - bw(buf, orig_len); + erts_fprintf(stderr, "DIST MSG DEBUG: erts_decode_dist_ext_size(CTL) failed:\n"); + bw(buf, orig_len); #endif - PURIFY_MSG("data error"); - goto data_error; + PURIFY_MSG("data error"); + goto data_error; + } + break; + default: + ERTS_INTERNAL_ERROR("Unexpected result from erts_prepare_dist_ext()"); + break; } if (ctl_len > DIST_CTL_DEFAULT_SIZE) { @@ -1235,6 +1303,7 @@ int erts_net_message(Port *prt, } token_size = 0; + token = NIL; switch (type = unsigned_val(tuple[1])) { case DOP_LINK: @@ -1263,23 +1332,23 @@ int erts_net_message(Port *prt, break; } - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); res = erts_add_link(&ERTS_P_LINKS(rp), LINK_PID, from); if (res < 0) { /* It was already there! Lets skip the rest... */ - erts_smp_de_links_unlock(dep); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_de_links_unlock(dep); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); break; } lnk = erts_add_or_lookup_link(&(dep->nlinks), LINK_PID, rp->common.id); erts_add_link(&(ERTS_LINK_ROOT(lnk)), LINK_PID, from); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); if (IS_TRACED_FL(rp, F_TRACE_PROCS)) trace_proc(NULL, 0, rp, am_getting_linked, from); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); break; case DOP_UNLINK: { @@ -1305,7 +1374,7 @@ int erts_net_message(Port *prt, trace_proc(NULL, 0, rp, am_getting_unlinked, from); } - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); erts_remove_dist_link(&dld, to, from, dep); erts_destroy_dist_link(&dld); @@ -1357,11 +1426,11 @@ int erts_net_message(Port *prt, else { if (is_atom(watched)) watched = rp->common.id; - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); erts_add_monitor(&(dep->monitors), MON_ORIGIN, ref, watched, name); erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, ref, watcher, name); - erts_smp_de_links_unlock(dep); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_de_links_unlock(dep); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); } break; @@ -1383,9 +1452,9 @@ int erts_net_message(Port *prt, goto invalid_message; } - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); mon = erts_remove_monitor(&(dep->monitors),ref); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); /* ASSERT(mon != NULL); can happen in case of broken dist message */ if (mon == NULL) { break; @@ -1399,7 +1468,7 @@ int erts_net_message(Port *prt, break; } mon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); ASSERT(mon != NULL); if (mon == NULL) { break; @@ -1460,42 +1529,56 @@ int erts_net_message(Port *prt, erts_queue_dist_message(rp, locks, ede_copy, token, from); if (locks) - erts_smp_proc_unlock(rp, locks); + erts_proc_unlock(rp, locks); } break; + case DOP_SEND_SENDER_TT: { + Uint xsize; case DOP_SEND_TT: + if (tuple_arity != 4) { goto invalid_message; } - - token_size = size_object(tuple[4]); - /* Fall through ... */ + + token = tuple[4]; + token_size = size_object(token); + xsize = ERTS_HEAP_FRAG_SIZE(token_size); + goto send_common; + + case DOP_SEND_SENDER: case DOP_SEND: + + token = NIL; + xsize = 0; + if (tuple_arity != 3) + goto invalid_message; + + send_common: + /* - * There is intentionally no testing of the cookie (it is always '') - * from R9B and onwards. + * If DOP_SEND_SENDER or DOP_SEND_SENDER_TT element 2 contains + * the sender pid (i.e. DFLAG_SEND_SENDER is set); otherwise, + * the atom '' (empty cookie). */ + ASSERT((type == DOP_SEND_SENDER || type == DOP_SEND_SENDER_TT) + ? (is_pid(tuple[2]) && (dep->flags & DFLAG_SEND_SENDER)) + : tuple[2] == am_Empty); + #ifdef ERTS_DIST_MSG_DBG dist_msg_dbg(&ede, "MSG", buf, orig_len); #endif - if (type != DOP_SEND_TT && tuple_arity != 3) { - goto invalid_message; - } to = tuple[3]; if (is_not_pid(to)) { goto invalid_message; } rp = erts_proc_lookup(to); if (rp) { - Uint xsize = type == DOP_SEND ? 0 : ERTS_HEAP_FRAG_SIZE(token_size); ErtsProcLocks locks = 0; ErtsDistExternal *ede_copy; ede_copy = erts_make_dist_ext_copy(&ede, xsize); - if (type == DOP_SEND) { - token = NIL; - } else { + if (is_not_nil(token)) { ErlHeapFragment *heap_frag; ErlOffHeap *ohp; ASSERT(xsize); @@ -1503,15 +1586,15 @@ int erts_net_message(Port *prt, ERTS_INIT_HEAP_FRAG(heap_frag, token_size, token_size); hp = heap_frag->mem; ohp = &heap_frag->off_heap; - token = tuple[4]; token = copy_struct(token, token_size, &hp, ohp); } - erts_queue_dist_message(rp, locks, ede_copy, token, tuple[2]); + erts_queue_dist_message(rp, locks, ede_copy, token, am_Empty); if (locks) - erts_smp_proc_unlock(rp, locks); + erts_proc_unlock(rp, locks); } break; + } case DOP_MONITOR_P_EXIT: { /* We are monitoring a process on the remote node which dies, we get @@ -1535,7 +1618,7 @@ int erts_net_message(Port *prt, goto invalid_message; } - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); sysname = dep->sysname; mon = erts_remove_monitor(&(dep->monitors), ref); /* @@ -1544,7 +1627,7 @@ int erts_net_message(Port *prt, * removed info about monitor. In this case, do nothing * and everything will be as it should. */ - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); if (mon == NULL) { break; } @@ -1558,7 +1641,7 @@ int erts_net_message(Port *prt, mon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref); if (mon == NULL) { - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); break; } UseTmpHeapNoproc(3); @@ -1569,7 +1652,7 @@ int erts_net_message(Port *prt, erts_queue_monitor_message(rp, &rp_locks, ref, am_process, watched, reason); - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); erts_destroy_monitor(mon); UnUseTmpHeapNoproc(3); break; @@ -1631,13 +1714,13 @@ int erts_net_message(Port *prt, if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) { /* We didn't exit the process and it is traced */ if (rp_locks & ERTS_PROC_LOCKS_XSIG_SEND) { - erts_smp_proc_unlock(rp, ERTS_PROC_LOCKS_XSIG_SEND); + erts_proc_unlock(rp, ERTS_PROC_LOCKS_XSIG_SEND); rp_locks &= ~ERTS_PROC_LOCKS_XSIG_SEND; } trace_proc(NULL, 0, rp, am_getting_unlinked, from); } } - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } erts_remove_dist_link(&dld, to, from, dep); if (lnk) @@ -1679,7 +1762,7 @@ int erts_net_message(Port *prt, token, NULL, 0); - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } break; } @@ -1697,7 +1780,7 @@ int erts_net_message(Port *prt, if (!rp) break; rp->group_leader = STORE_NC_IN_PROC(rp, from); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); break; default: @@ -1709,7 +1792,7 @@ int erts_net_message(Port *prt, erts_free(ERTS_ALC_T_DCTRL_BUF, (void *) ctl); } UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; return 0; invalid_message: { @@ -1725,8 +1808,8 @@ decode_error: } data_error: UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE); - erts_deliver_port_exit(prt, dep->cid, am_killed, 0, 1); - ERTS_SMP_CHK_NO_PROC_LOCKS; + erts_kill_dist_connection(dep, connection_id); + ERTS_CHK_NO_PROC_LOCKS; return -1; } @@ -1746,6 +1829,31 @@ static int dsig_send_ctl(ErtsDSigData* dsdp, Eterm ctl, int force_busy) return ret; } +static ERTS_INLINE void +notify_dist_data(Process *c_p, Eterm pid) +{ + Process *rp; + ErtsProcLocks rp_locks; + + ASSERT(erts_get_scheduler_data() + && !ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data())); + ASSERT(is_internal_pid(pid)); + + if (c_p && c_p->common.id == pid) { + rp = c_p; + rp_locks = ERTS_PROC_LOCK_MAIN; + } + else { + rp = erts_proc_lookup(pid); + rp_locks = 0; + } + + if (rp) { + ErtsMessage *mp = erts_alloc_message(0, NULL); + erts_queue_message(rp, rp_locks, mp, am_dist_data, am_system); + } +} + int erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx) { @@ -1762,7 +1870,7 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx) if (!ctx->c_p || dsdp->no_suspend) ctx->force_busy = 1; - ERTS_SMP_LC_ASSERT(!ctx->c_p + ERTS_LC_ASSERT(!ctx->c_p || (ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(ctx->c_p))); @@ -1851,28 +1959,48 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx) * and if so enqueue the signal and schedule it for send. */ ctx->obuf->next = NULL; - erts_smp_de_rlock(dep); + erts_de_rlock(dep); cid = dep->cid; if (cid != dsdp->cid || dep->connection_id != dsdp->connection_id || dep->status & ERTS_DE_SFLG_EXITING) { /* Not the same connection as when we started; drop message... */ - erts_smp_de_runlock(dep); + erts_de_runlock(dep); free_dist_obuf(ctx->obuf); } else { + Sint qsize; + erts_aint32_t qflgs; ErtsProcList *plp = NULL; - erts_smp_mtx_lock(&dep->qlock); - dep->qsize += size_obuf(ctx->obuf); - if (dep->qsize >= erts_dist_buf_busy_limit) - dep->qflgs |= ERTS_DE_QFLG_BUSY; - if (!ctx->force_busy && (dep->qflgs & ERTS_DE_QFLG_BUSY)) { - erts_smp_mtx_unlock(&dep->qlock); + Eterm notify_proc = NIL; + Sint obsz = size_obuf(ctx->obuf); + + erts_mtx_lock(&dep->qlock); + qsize = erts_atomic_add_read_nob(&dep->qsize, (erts_aint_t) obsz); + ASSERT(qsize >= obsz); + qflgs = erts_atomic32_read_nob(&dep->qflgs); + if (!(qflgs & ERTS_DE_QFLG_BUSY) && qsize >= erts_dist_buf_busy_limit) { + erts_atomic32_read_bor_relb(&dep->qflgs, ERTS_DE_QFLG_BUSY); + qflgs |= ERTS_DE_QFLG_BUSY; + } + if (qsize == obsz && (qflgs & ERTS_DE_QFLG_REQ_INFO)) { + /* Previously empty queue and info requested... */ + qflgs = erts_atomic32_read_band_mb(&dep->qflgs, + ~ERTS_DE_QFLG_REQ_INFO); + if (qflgs & ERTS_DE_QFLG_REQ_INFO) { + notify_proc = dep->cid; + ASSERT(is_internal_pid(notify_proc)); + } + /* else: requester will send itself the message... */ + qflgs &= ~ERTS_DE_QFLG_REQ_INFO; + } + if (!ctx->force_busy && (qflgs & ERTS_DE_QFLG_BUSY)) { + erts_mtx_unlock(&dep->qlock); plp = erts_proclist_create(ctx->c_p); erts_suspend(ctx->c_p, ERTS_PROC_LOCK_MAIN, NULL); suspended = 1; - erts_smp_mtx_lock(&dep->qlock); + erts_mtx_lock(&dep->qlock); } /* Enqueue obuf on dist entry */ @@ -1883,7 +2011,8 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx) dep->out_queue.last = ctx->obuf; if (!ctx->force_busy) { - if (!(dep->qflgs & ERTS_DE_QFLG_BUSY)) { + qflgs = erts_atomic32_read_nob(&dep->qflgs); + if (!(qflgs & ERTS_DE_QFLG_BUSY)) { if (suspended) resume = 1; /* was busy when we started, but isn't now */ #ifdef USE_VM_PROBES @@ -1907,9 +2036,12 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx) } } - erts_smp_mtx_unlock(&dep->qlock); - erts_schedule_dist_command(NULL, dep); - erts_smp_de_runlock(dep); + erts_mtx_unlock(&dep->qlock); + if (is_internal_port(dep->cid)) + erts_schedule_dist_command(NULL, dep); + erts_de_runlock(dep); + if (is_internal_pid(notify_proc)) + notify_dist_data(ctx->c_p, notify_proc); if (resume) { erts_resume(ctx->c_p, ERTS_PROC_LOCK_MAIN); @@ -1963,16 +2095,20 @@ static Uint dist_port_command(Port *prt, ErtsDistOutputBuf *obuf) { int fpe_was_unmasked; - Uint size = obuf->ext_endp - obuf->extp; + ErlDrvSizeT size; + char *bufp; - ERTS_SMP_CHK_NO_PROC_LOCKS; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); - if (size > (Uint) INT_MAX) - erts_exit(ERTS_DUMP_EXIT, - "Absurdly large distribution output data buffer " - "(%beu bytes) passed.\n", - size); + if (!obuf) { + size = 0; + bufp = NULL; + } + else { + size = obuf->ext_endp - obuf->extp; + bufp = (char*) obuf->extp; + } #ifdef USE_VM_PROBES if (DTRACE_ENABLED(dist_output)) { @@ -1987,11 +2123,10 @@ dist_port_command(Port *prt, ErtsDistOutputBuf *obuf) remote_str, size); } #endif + prt->caller = NIL; fpe_was_unmasked = erts_block_fpe(); - (*prt->drv_ptr->output)((ErlDrvData) prt->drv_data, - (char*) obuf->extp, - (int) size); + (*prt->drv_ptr->output)((ErlDrvData) prt->drv_data, bufp, size); erts_unblock_fpe(fpe_was_unmasked); return size; } @@ -2000,33 +2135,41 @@ static Uint dist_port_commandv(Port *prt, ErtsDistOutputBuf *obuf) { int fpe_was_unmasked; - Uint size = obuf->ext_endp - obuf->extp; + ErlDrvSizeT size; SysIOVec iov[2]; ErlDrvBinary* bv[2]; ErlIOVec eiov; - ERTS_SMP_CHK_NO_PROC_LOCKS; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); - - if (size > (Uint) INT_MAX) - erts_exit(ERTS_DUMP_EXIT, - "Absurdly large distribution output data buffer " - "(%beu bytes) passed.\n", - size); + ERTS_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); iov[0].iov_base = NULL; iov[0].iov_len = 0; bv[0] = NULL; - iov[1].iov_base = obuf->extp; - iov[1].iov_len = size; - bv[1] = Binary2ErlDrvBinary(ErtsDistOutputBuf2Binary(obuf)); + if (!obuf) { + size = 0; + eiov.vsize = 1; + } + else { + size = obuf->ext_endp - obuf->extp; + eiov.vsize = 2; + + iov[1].iov_base = obuf->extp; + iov[1].iov_len = size; + bv[1] = Binary2ErlDrvBinary(ErtsDistOutputBuf2Binary(obuf)); + } - eiov.vsize = 2; eiov.size = size; eiov.iov = iov; eiov.binv = bv; + if (size > (Uint) INT_MAX) + erts_exit(ERTS_DUMP_EXIT, + "Absurdly large distribution output data buffer " + "(%beu bytes) passed.\n", + size); + ASSERT(prt->drv_ptr->outputv); #ifdef USE_VM_PROBES @@ -2074,29 +2217,25 @@ erts_dist_command(Port *prt, int reds_limit) Sint reds = ERTS_PORT_REDS_DIST_CMD_START; Uint32 status; Uint32 flags; - Sint obufsize = 0; + Sint qsize, obufsize = 0; ErtsDistOutputQueue oq, foq; DistEntry *dep = prt->dist_entry; Uint (*send)(Port *prt, ErtsDistOutputBuf *obuf); erts_aint32_t sched_flags; ErtsSchedulerData *esdp = erts_get_scheduler_data(); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); - - erts_smp_refc_inc(&dep->refc, 1); /* Otherwise dist_entry might be - removed if port command fails */ + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); - erts_smp_atomic_set_mb(&dep->dist_cmd_scheduled, 0); + erts_atomic_set_mb(&dep->dist_cmd_scheduled, 0); - erts_smp_de_rlock(dep); + erts_de_rlock(dep); flags = dep->flags; status = dep->status; send = dep->send; - erts_smp_de_runlock(dep); + erts_de_runlock(dep); if (status & ERTS_DE_SFLG_EXITING) { erts_deliver_port_exit(prt, prt->common.id, am_killed, 0, 1); - erts_deref_dist_entry(dep); return reds + ERTS_PORT_REDS_DIST_CMD_EXIT; } @@ -2110,19 +2249,19 @@ erts_dist_command(Port *prt, int reds_limit) * a mess. */ - erts_smp_mtx_lock(&dep->qlock); + erts_mtx_lock(&dep->qlock); oq.first = dep->out_queue.first; oq.last = dep->out_queue.last; dep->out_queue.first = NULL; dep->out_queue.last = NULL; - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_unlock(&dep->qlock); foq.first = dep->finalized_out_queue.first; foq.last = dep->finalized_out_queue.last; dep->finalized_out_queue.first = NULL; dep->finalized_out_queue.last = NULL; - sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + sched_flags = erts_atomic32_read_nob(&prt->sched.flags); if (reds > reds_limit) goto preempted; @@ -2130,21 +2269,21 @@ erts_dist_command(Port *prt, int reds_limit) if (!(sched_flags & ERTS_PTS_FLG_BUSY_PORT) && foq.first) { int preempt = 0; do { - Uint size; - ErtsDistOutputBuf *fob; - - size = (*send)(prt, foq.first); - esdp->io.out += (Uint64) size; + Uint size; + ErtsDistOutputBuf *fob; + size = (*send)(prt, foq.first); + erts_atomic64_inc_nob(&dep->out); + esdp->io.out += (Uint64) size; #ifdef ERTS_RAW_DIST_MSG_DBG - erts_fprintf(stderr, ">> "); - bw(foq.first->extp, size); + erts_fprintf(stderr, ">> "); + bw(foq.first->extp, size); #endif - reds += ERTS_PORT_REDS_DIST_CMD_DATA(size); - fob = foq.first; - obufsize += size_obuf(fob); - foq.first = foq.first->next; - free_dist_obuf(fob); - sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + reds += ERTS_PORT_REDS_DIST_CMD_DATA(size); + fob = foq.first; + obufsize += size_obuf(fob); + foq.first = foq.first->next; + free_dist_obuf(fob); + sched_flags = erts_atomic32_read_nob(&prt->sched.flags); preempt = reds > reds_limit || (sched_flags & ERTS_PTS_FLG_EXIT); if (sched_flags & ERTS_PTS_FLG_BUSY_PORT) break; @@ -2204,32 +2343,34 @@ erts_dist_command(Port *prt, int reds_limit) } } else { + int de_busy; int preempt = 0; while (oq.first && !preempt) { - ErtsDistOutputBuf *fob; - Uint size; - oq.first->extp - = erts_encode_ext_dist_header_finalize(oq.first->extp, - dep->cache, - flags); - reds += ERTS_PORT_REDS_DIST_CMD_FINALIZE; - if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE)) - *--oq.first->extp = PASS_THROUGH; /* Old node; 'pass through' - needed */ - ASSERT(&oq.first->data[0] <= oq.first->extp - && oq.first->extp < oq.first->ext_endp); - size = (*send)(prt, oq.first); - esdp->io.out += (Uint64) size; + ErtsDistOutputBuf *fob; + Uint size; + oq.first->extp + = erts_encode_ext_dist_header_finalize(oq.first->extp, + dep->cache, + flags); + reds += ERTS_PORT_REDS_DIST_CMD_FINALIZE; + if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE)) + *--oq.first->extp = PASS_THROUGH; /* Old node; 'pass through' + needed */ + ASSERT(&oq.first->data[0] <= oq.first->extp + && oq.first->extp < oq.first->ext_endp); + size = (*send)(prt, oq.first); + erts_atomic64_inc_nob(&dep->out); + esdp->io.out += (Uint64) size; #ifdef ERTS_RAW_DIST_MSG_DBG - erts_fprintf(stderr, ">> "); - bw(oq.first->extp, size); + erts_fprintf(stderr, ">> "); + bw(oq.first->extp, size); #endif - reds += ERTS_PORT_REDS_DIST_CMD_DATA(size); - fob = oq.first; - obufsize += size_obuf(fob); - oq.first = oq.first->next; - free_dist_obuf(fob); - sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + reds += ERTS_PORT_REDS_DIST_CMD_DATA(size); + fob = oq.first; + obufsize += size_obuf(fob); + oq.first = oq.first->next; + free_dist_obuf(fob); + sched_flags = erts_atomic32_read_nob(&prt->sched.flags); preempt = reds > reds_limit || (sched_flags & ERTS_PTS_FLG_EXIT); if ((sched_flags & ERTS_PTS_FLG_BUSY_PORT) && oq.first && !preempt) goto finalize_only; @@ -2256,23 +2397,24 @@ erts_dist_command(Port *prt, int reds_limit) * dist entry in a non-busy state and resume suspended * processes. */ - erts_smp_mtx_lock(&dep->qlock); - ASSERT(dep->qsize >= obufsize); - dep->qsize -= obufsize; + erts_mtx_lock(&dep->qlock); + de_busy = !!(erts_atomic32_read_nob(&dep->qflgs) & ERTS_DE_QFLG_BUSY); + qsize = (Sint) erts_atomic_add_read_nob(&dep->qsize, + (erts_aint_t) -obufsize); + ASSERT(qsize >= 0); obufsize = 0; if (!(sched_flags & ERTS_PTS_FLG_BUSY_PORT) - && (dep->qflgs & ERTS_DE_QFLG_BUSY) - && dep->qsize < erts_dist_buf_busy_limit) { + && de_busy && qsize < erts_dist_buf_busy_limit) { ErtsProcList *suspendees; int resumed; suspendees = get_suspended_on_de(dep, ERTS_DE_QFLG_BUSY); - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_unlock(&dep->qlock); resumed = erts_resume_processes(suspendees); reds += resumed*ERTS_PORT_REDS_DIST_CMD_RESUMED; } else - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_unlock(&dep->qlock); } ASSERT(!oq.first && !oq.last); @@ -2281,10 +2423,15 @@ erts_dist_command(Port *prt, int reds_limit) if (obufsize != 0) { ASSERT(obufsize > 0); - erts_smp_mtx_lock(&dep->qlock); - ASSERT(dep->qsize >= obufsize); - dep->qsize -= obufsize; - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_lock(&dep->qlock); +#ifdef DEBUG + qsize = (Sint) erts_atomic_add_read_nob(&dep->qsize, + (erts_aint_t) -obufsize); + ASSERT(qsize >= 0); +#else + erts_atomic_add_nob(&dep->qsize, (erts_aint_t) -obufsize); +#endif + erts_mtx_unlock(&dep->qlock); } ASSERT(foq.first || !foq.last); @@ -2301,8 +2448,6 @@ erts_dist_command(Port *prt, int reds_limit) if (reds > INT_MAX/2) reds = INT_MAX/2; - erts_deref_dist_entry(dep); - return reds; preempted: @@ -2338,9 +2483,9 @@ erts_dist_command(Port *prt, int reds_limit) foq.last = NULL; #ifdef DEBUG - erts_smp_mtx_lock(&dep->qlock); - ASSERT(dep->qsize == obufsize); - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_lock(&dep->qlock); + ASSERT(erts_atomic_read_nob(&dep->qsize) == obufsize); + erts_mtx_unlock(&dep->qlock); #endif } else { @@ -2349,14 +2494,14 @@ erts_dist_command(Port *prt, int reds_limit) * Unhandle buffers need to be put back first * in out_queue. */ - erts_smp_mtx_lock(&dep->qlock); - dep->qsize -= obufsize; + erts_mtx_lock(&dep->qlock); + erts_atomic_add_nob(&dep->qsize, -obufsize); obufsize = 0; oq.last->next = dep->out_queue.first; dep->out_queue.first = oq.first; if (!dep->out_queue.last) dep->out_queue.last = oq.last; - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_unlock(&dep->qlock); } erts_schedule_dist_command(prt, NULL); @@ -2364,6 +2509,370 @@ erts_dist_command(Port *prt, int reds_limit) goto done; } +#if 0 + +int +dist_data_finalize(Process *c_p, int reds_limit) +{ + int reds = 5; + DistEntry *dep = ; + ErtsDistOutputQueue oq, foq; + ErtsDistOutputBuf *ob; + int preempt; + + + erts_mtx_lock(&dep->qlock); + flags = dep->flags; + oq.first = dep->out_queue.first; + oq.last = dep->out_queue.last; + dep->out_queue.first = NULL; + dep->out_queue.last = NULL; + erts_mtx_unlock(&dep->qlock); + + if (!oq.first) { + ASSERT(!oq.last); + oq.first = dep->tmp_out_queue.first; + oq.last = dep->tmp_out_queue.last; + } + else { + ErtsDistOutputBuf *f, *l; + ASSERT(oq.last); + if (dep->tmp_out_queue.last) { + dep->tmp_out_queue.last->next = oq.first; + oq.first = dep->tmp_out_queue.first; + } + } + + if (!oq.first) { + /* Nothing to do... */ + ASSERT(!oq.last); + return reds; + } + + foq.first = dep->finalized_out_queue.first; + foq.last = dep->finalized_out_queue.last; + + preempt = 0; + ob = oq.first; + ASSERT(ob); + + do { + ob->extp = erts_encode_ext_dist_header_finalize(ob->extp, + dep->cache, + flags); + if (!(flags & DFLAG_DIST_HDR_ATOM_CACHE)) + *--ob->extp = PASS_THROUGH; /* Old node; 'pass through' + needed */ + ASSERT(&ob->data[0] <= ob->extp && ob->extp < ob->ext_endp); + reds += ERTS_PORT_REDS_DIST_CMD_FINALIZE; + preempt = reds > reds_limit; + if (preempt) + break; + ob = ob->next; + } while (ob); + /* + * At least one buffer was finalized; if we got preempted, + * ob points to the last buffer that we finalized. + */ + if (foq.last) + foq.last->next = oq.first; + else + foq.first = oq.first; + if (!preempt) { + /* All buffers finalized */ + foq.last = oq.last; + oq.first = oq.last = NULL; + } + else { + /* Not all buffers finalized; split oq. */ + foq.last = ob; + oq.first = ob->next; + if (oq.first) + ob->next = NULL; + else + oq.last = NULL; + } + + dep->finalized_out_queue.first = foq.first; + dep->finalized_out_queue.last = foq.last; + dep->tmp_out_queue.first = oq.first; + dep->tmp_out_queue.last = oq.last; + + return reds; +} + +#endif + +BIF_RETTYPE +dist_ctrl_get_data_notification_1(BIF_ALIST_1) +{ + DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(BIF_P); + erts_aint32_t qflgs; + erts_aint_t qsize; + Eterm receiver = NIL; + + if (!dep) + BIF_ERROR(BIF_P, EXC_NOTSUP); + + if (erts_dhandle_to_dist_entry(BIF_ARG_1) != dep) + BIF_ERROR(BIF_P, BADARG); + + /* + * Caller is the only one that can consume from this queue + * and the only one that can set the req-info flag... + */ + + erts_de_rlock(dep); + + ASSERT(dep->cid == BIF_P->common.id); + + qflgs = erts_atomic32_read_acqb(&dep->qflgs); + + if (!(qflgs & ERTS_DE_QFLG_REQ_INFO)) { + qsize = erts_atomic_read_acqb(&dep->qsize); + ASSERT(qsize >= 0); + if (qsize > 0) + receiver = BIF_P->common.id; /* Notify ourselves... */ + else { /* Empty queue; set req-info flag... */ + qflgs = erts_atomic32_read_bor_mb(&dep->qflgs, + ERTS_DE_QFLG_REQ_INFO); + qsize = erts_atomic_read_acqb(&dep->qsize); + ASSERT(qsize >= 0); + if (qsize > 0) { + qflgs = erts_atomic32_read_band_mb(&dep->qflgs, + ~ERTS_DE_QFLG_REQ_INFO); + if (qflgs & ERTS_DE_QFLG_REQ_INFO) + receiver = BIF_P->common.id; /* Notify ourselves... */ + /* else: someone else will notify us... */ + } + /* else: still empty queue... */ + } + } + /* else: Already requested... */ + + erts_de_runlock(dep); + + if (is_internal_pid(receiver)) + notify_dist_data(BIF_P, receiver); + + BIF_RET(am_ok); +} + +BIF_RETTYPE +dist_ctrl_put_data_2(BIF_ALIST_2) +{ + DistEntry *dep; + ErlDrvSizeT size; + Eterm input_handler; + + if (is_binary(BIF_ARG_2)) + size = binary_size(BIF_ARG_2); + else if (is_nil(BIF_ARG_2)) + size = 0; + else if (is_list(BIF_ARG_2)) + BIF_TRAP2(dist_ctrl_put_data_trap, + BIF_P, BIF_ARG_1, BIF_ARG_2); + else + BIF_ERROR(BIF_P, BADARG); + + dep = erts_dhandle_to_dist_entry(BIF_ARG_1); + if (!dep) + BIF_ERROR(BIF_P, BADARG); + + input_handler = (Eterm) erts_atomic_read_nob(&dep->input_handler); + + if (input_handler != BIF_P->common.id) + BIF_ERROR(BIF_P, EXC_NOTSUP); + + erts_atomic64_inc_nob(&dep->in); + + if (size != 0) { + byte *data, *temp_alloc = NULL; + + data = (byte *) erts_get_aligned_binary_bytes(BIF_ARG_2, &temp_alloc); + if (!data) + BIF_ERROR(BIF_P, BADARG); + + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + + (void) erts_net_message(NULL, dep, NULL, 0, data, size); + /* + * We ignore any decode failures. On fatal failures the + * connection will be taken down by killing the + * distribution channel controller... + */ + + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + + BUMP_REDS(BIF_P, 5); + + erts_free_aligned_binary_bytes(temp_alloc); + + } + + BIF_RET(am_ok); +} + +BIF_RETTYPE +dist_get_stat_1(BIF_ALIST_1) +{ + Sint64 read, write, pend; + Eterm res, *hp, **hpp; + Uint sz, *szp; + DistEntry *dep = erts_dhandle_to_dist_entry(BIF_ARG_1); + + if (!dep) + BIF_ERROR(BIF_P, BADARG); + + erts_de_rlock(dep); + + read = (Sint64) erts_atomic64_read_nob(&dep->in); + write = (Sint64) erts_atomic64_read_nob(&dep->out); + pend = (Sint64) erts_atomic_read_nob(&dep->qsize); + + erts_de_runlock(dep); + + sz = 0; + szp = &sz; + hpp = NULL; + + while (1) { + res = erts_bld_tuple(hpp, szp, 4, + am_ok, + erts_bld_sint64(hpp, szp, read), + erts_bld_sint64(hpp, szp, write), + pend ? am_true : am_false); + if (hpp) + break; + hp = HAlloc(BIF_P, sz); + hpp = &hp; + szp = NULL; + } + + BIF_RET(res); +} + +BIF_RETTYPE +dist_ctrl_input_handler_2(BIF_ALIST_2) +{ + DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(BIF_P); + + if (!dep) + BIF_ERROR(BIF_P, EXC_NOTSUP); + + if (erts_dhandle_to_dist_entry(BIF_ARG_1) != dep) + BIF_ERROR(BIF_P, BADARG); + + if (is_not_internal_pid(BIF_ARG_2)) + BIF_ERROR(BIF_P, BADARG); + + erts_atomic_set_nob(&dep->input_handler, + (erts_aint_t) BIF_ARG_2); + + BIF_RET(am_ok); +} + +BIF_RETTYPE +dist_ctrl_get_data_1(BIF_ALIST_1) +{ + DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(BIF_P); + int reds = 1; + ErtsDistOutputBuf *obuf; + Eterm *hp; + ProcBin *pb; + erts_aint_t qsize; + + if (!dep) + BIF_ERROR(BIF_P, EXC_NOTSUP); + + if (erts_dhandle_to_dist_entry(BIF_ARG_1) != dep) + BIF_ERROR(BIF_P, BADARG); + + erts_de_rlock(dep); + + if (dep->status & ERTS_DE_SFLG_EXITING) + goto return_none; + + ASSERT(dep->cid == BIF_P->common.id); + +#if 0 + if (dep->finalized_out_queue.first) { + obuf = dep->finalized_out_queue.first; + dep->finalized_out_queue.first = obuf->next; + if (!obuf->next) + dep->finalized_out_queue.last = NULL; + } + else +#endif + { + if (!dep->tmp_out_queue.first) { + ASSERT(!dep->tmp_out_queue.last); + qsize = erts_atomic_read_acqb(&dep->qsize); + if (qsize > 0) { + erts_mtx_lock(&dep->qlock); + dep->tmp_out_queue.first = dep->out_queue.first; + dep->tmp_out_queue.last = dep->out_queue.last; + dep->out_queue.first = NULL; + dep->out_queue.last = NULL; + erts_mtx_unlock(&dep->qlock); + } + } + + if (!dep->tmp_out_queue.first) { + ASSERT(!dep->tmp_out_queue.last); + return_none: + erts_de_runlock(dep); + BIF_RET(am_none); + } + else { + obuf = dep->tmp_out_queue.first; + dep->tmp_out_queue.first = obuf->next; + if (!obuf->next) + dep->tmp_out_queue.last = NULL; + } + + obuf->extp = erts_encode_ext_dist_header_finalize(obuf->extp, + dep->cache, + dep->flags); + reds += ERTS_PORT_REDS_DIST_CMD_FINALIZE; + if (!(dep->flags & DFLAG_DIST_HDR_ATOM_CACHE)) + *--obuf->extp = PASS_THROUGH; /* 'pass through' needed */ + ASSERT(&obuf->data[0] <= obuf->extp + && obuf->extp < obuf->ext_endp); + } + + erts_atomic64_inc_nob(&dep->out); + + erts_de_runlock(dep); + + hp = HAlloc(BIF_P, PROC_BIN_SIZE); + pb = (ProcBin *) (char *) hp; + pb->thing_word = HEADER_PROC_BIN; + pb->size = obuf->ext_endp - obuf->extp; + pb->next = MSO(BIF_P).first; + MSO(BIF_P).first = (struct erl_off_heap_header*) pb; + pb->val = ErtsDistOutputBuf2Binary(obuf); + pb->bytes = (byte*) obuf->extp; + pb->flags = 0; + + qsize = erts_atomic_add_read_nob(&dep->qsize, -size_obuf(obuf)); + ASSERT(qsize >= 0); + + if (qsize < erts_dist_buf_busy_limit/2 + && (erts_atomic32_read_acqb(&dep->qflgs) & ERTS_DE_QFLG_BUSY)) { + ErtsProcList *resume_procs = NULL; + erts_mtx_lock(&dep->qlock); + resume_procs = get_suspended_on_de(dep, ERTS_DE_QFLG_BUSY); + erts_mtx_unlock(&dep->qlock); + if (resume_procs) { + int resumed = erts_resume_processes(resume_procs); + reds += resumed*ERTS_PORT_REDS_DIST_CMD_RESUMED; + } + } + + BIF_RET2(make_binary(pb), reds); +} + void erts_dist_port_not_busy(Port *prt) { @@ -2386,21 +2895,23 @@ erts_dist_port_not_busy(Port *prt) void erts_kill_dist_connection(DistEntry *dep, Uint32 connection_id) { - erts_smp_de_rwlock(dep); - if (is_internal_port(dep->cid) - && connection_id == dep->connection_id + erts_de_rwlock(dep); + if (connection_id == dep->connection_id && !(dep->status & ERTS_DE_SFLG_EXITING)) { dep->status |= ERTS_DE_SFLG_EXITING; - erts_smp_mtx_lock(&dep->qlock); - ASSERT(!(dep->qflgs & ERTS_DE_QFLG_EXIT)); - dep->qflgs |= ERTS_DE_QFLG_EXIT; - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_lock(&dep->qlock); + ASSERT(!(erts_atomic32_read_nob(&dep->qflgs) & ERTS_DE_QFLG_EXIT)); + erts_atomic32_read_bor_nob(&dep->qflgs, ERTS_DE_QFLG_EXIT); + erts_mtx_unlock(&dep->qlock); - erts_schedule_dist_command(NULL, dep); + if (is_internal_port(dep->cid)) + erts_schedule_dist_command(NULL, dep); + else if (is_internal_pid(dep->cid)) + schedule_kill_dist_ctrl_proc(dep->cid); } - erts_smp_de_rwunlock(dep); + erts_de_rwunlock(dep); } struct print_to_data { @@ -2515,9 +3026,6 @@ info_dist_entry(fmtfn_t to, void *arg, DistEntry *dep, int visible, int connecte } erts_print(to, arg, "Name: %T", dep->sysname); -#ifdef DEBUG - erts_print(to, arg, " (refc=%d)", erts_smp_refc_read(&dep->refc, 0)); -#endif erts_print(to, arg, "\n"); if (!connected && is_nil(dep->cid)) { if (dep->nlinks) { @@ -2637,32 +3145,46 @@ BIF_RETTYPE setnode_2(BIF_ALIST_2) goto error; } - net_kernel = erts_whereis_process(BIF_P, ERTS_PROC_LOCK_MAIN, - am_net_kernel, ERTS_PROC_LOCK_MAIN, 0); - if (!net_kernel) + net_kernel = erts_whereis_process(BIF_P, + ERTS_PROC_LOCK_MAIN, + am_net_kernel, + ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS, + 0); + if (!net_kernel || ERTS_PROC_GET_DIST_ENTRY(net_kernel)) goto error; /* By setting F_DISTRIBUTION on net_kernel, - * do_net_exist will be called when net_kernel is terminated !! */ + * erts_do_net_exits will be called when net_kernel is terminated !! */ net_kernel->flags |= F_DISTRIBUTION; - if (net_kernel != BIF_P) - erts_smp_proc_unlock(net_kernel, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(net_kernel, + (ERTS_PROC_LOCK_STATUS + | ((net_kernel != BIF_P) + ? ERTS_PROC_LOCK_MAIN + : 0))); #ifdef DEBUG - erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx); + erts_rwmtx_rlock(&erts_dist_table_rwmtx); ASSERT(!erts_visible_dist_entries && !erts_hidden_dist_entries); - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); + erts_rwmtx_runlock(&erts_dist_table_rwmtx); #endif - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); inc_no_nodes(); erts_set_this_node(BIF_ARG_1, (Uint32) creation); erts_is_alive = 1; send_nodes_mon_msgs(NULL, am_nodeup, BIF_ARG_1, am_visible, NIL); - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + + /* + * Note erts_this_dist_entry is changed by erts_set_this_node(), + * so we *need* to use the new one after erts_set_this_node() + * is called. + */ + erts_ref_dist_entry(erts_this_dist_entry); + ERTS_PROC_SET_DIST_ENTRY(net_kernel, erts_this_dist_entry); BIF_RET(am_true); @@ -2693,18 +3215,18 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3) Eterm ic, oc; Eterm *tp; DistEntry *dep = NULL; + ErtsProcLocks proc_unlock = 0; + Process *proc; Port *pp = NULL; - /* Prepare for success */ - ERTS_BIF_PREP_RET(ret, am_true); - /* * Check and pick out arguments */ if (!is_node_name_atom(BIF_ARG_1) || - is_not_internal_port(BIF_ARG_2) || - (erts_this_node->sysname == am_Noname)) { + !(is_internal_port(BIF_ARG_2) + || is_internal_pid(BIF_ARG_2)) + || (erts_this_node->sysname == am_Noname)) { goto badarg; } @@ -2748,77 +3270,124 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3) else if (!dep) goto system_limit; /* Should never happen!!! */ - pp = erts_id2port_sflgs(BIF_ARG_2, - BIF_P, - ERTS_PROC_LOCK_MAIN, - ERTS_PORT_SFLGS_INVALID_LOOKUP); - erts_smp_de_rwlock(dep); + if (is_internal_pid(BIF_ARG_2)) { + if (BIF_P->common.id == BIF_ARG_2) { + proc_unlock = 0; + proc = BIF_P; + } + else { + proc_unlock = ERTS_PROC_LOCK_MAIN; + proc = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN, + BIF_ARG_2, proc_unlock); + } + erts_de_rwlock(dep); - if (!pp || (erts_atomic32_read_nob(&pp->state) - & ERTS_PORT_SFLG_EXITING)) - goto badarg; + if (!proc) + goto badarg; + else if (proc == ERTS_PROC_LOCK_BUSY) { + proc_unlock = 0; + goto yield; + } - if ((pp->drv_ptr->flags & ERL_DRV_FLAG_SOFT_BUSY) == 0) - goto badarg; + erts_proc_lock(proc, ERTS_PROC_LOCK_STATUS); + proc_unlock |= ERTS_PROC_LOCK_STATUS; + + if (ERTS_PROC_GET_DIST_ENTRY(proc)) { + if (dep == ERTS_PROC_GET_DIST_ENTRY(proc) + && (proc->flags & F_DISTRIBUTION) + && dep->cid == BIF_ARG_2) { + ERTS_BIF_PREP_RET(ret, erts_make_dhandle(BIF_P, dep)); + goto done; + } + goto badarg; + } + + if (is_not_nil(dep->cid)) + goto badarg; + + proc->flags |= F_DISTRIBUTION; + ERTS_PROC_SET_DIST_ENTRY(proc, dep); - if (dep->cid == BIF_ARG_2 && pp->dist_entry == dep) - goto done; /* Already set */ + proc_unlock &= ~ERTS_PROC_LOCK_STATUS; + erts_proc_unlock(proc, ERTS_PROC_LOCK_STATUS); + + dep->send = NULL; /* Only for distr ports... */ - if (dep->status & ERTS_DE_SFLG_EXITING) { - /* Suspend on dist entry waiting for the exit to finish */ - ErtsProcList *plp = erts_proclist_create(BIF_P); - plp->next = NULL; - erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); - erts_smp_mtx_lock(&dep->qlock); - erts_proclist_store_last(&dep->suspended, plp); - erts_smp_mtx_unlock(&dep->qlock); - goto yield; } + else { - ASSERT(!(dep->status & ERTS_DE_SFLG_EXITING)); + pp = erts_id2port_sflgs(BIF_ARG_2, + BIF_P, + ERTS_PROC_LOCK_MAIN, + ERTS_PORT_SFLGS_INVALID_LOOKUP); + erts_de_rwlock(dep); - if (pp->dist_entry || is_not_nil(dep->cid)) - goto badarg; + if (!pp || (erts_atomic32_read_nob(&pp->state) + & ERTS_PORT_SFLG_EXITING)) + goto badarg; - erts_atomic32_read_bor_nob(&pp->state, ERTS_PORT_SFLG_DISTRIBUTION); + if ((pp->drv_ptr->flags & ERL_DRV_FLAG_SOFT_BUSY) == 0) + goto badarg; - /* - * Dist-ports do not use the "busy port message queue" functionality, but - * instead use "busy dist entry" functionality. - */ - { - ErlDrvSizeT disable = ERL_DRV_BUSY_MSGQ_DISABLED; - erl_drv_busy_msgq_limits(ERTS_Port2ErlDrvPort(pp), &disable, NULL); - } + if (dep->cid == BIF_ARG_2 && pp->dist_entry == dep) { + ERTS_BIF_PREP_RET(ret, erts_make_dhandle(BIF_P, dep)); + goto done; /* Already set */ + } - pp->dist_entry = dep; + if (dep->status & ERTS_DE_SFLG_EXITING) { + /* Suspend on dist entry waiting for the exit to finish */ + ErtsProcList *plp = erts_proclist_create(BIF_P); + plp->next = NULL; + erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); + erts_mtx_lock(&dep->qlock); + erts_proclist_store_last(&dep->suspended, plp); + erts_mtx_unlock(&dep->qlock); + goto yield; + } - dep->version = version; - dep->creation = 0; + ASSERT(!(dep->status & ERTS_DE_SFLG_EXITING)); - ASSERT(pp->drv_ptr->outputv || pp->drv_ptr->output); + if (pp->dist_entry || is_not_nil(dep->cid)) + goto badarg; -#if 1 - dep->send = (pp->drv_ptr->outputv - ? dist_port_commandv - : dist_port_command); -#else - dep->send = dist_port_command; -#endif - ASSERT(dep->send); + erts_atomic32_read_bor_nob(&pp->state, ERTS_PORT_SFLG_DISTRIBUTION); + + pp->dist_entry = dep; + + ASSERT(pp->drv_ptr->outputv || pp->drv_ptr->output); + + dep->send = (pp->drv_ptr->outputv + ? dist_port_commandv + : dist_port_command); + ASSERT(dep->send); + + /* + * Dist-ports do not use the "busy port message queue" functionality, but + * instead use "busy dist entry" functionality. + */ + { + ErlDrvSizeT disable = ERL_DRV_BUSY_MSGQ_DISABLED; + erl_drv_busy_msgq_limits(ERTS_Port2ErlDrvPort(pp), &disable, NULL); + } + + } + + dep->version = version; + dep->creation = 0; #ifdef DEBUG - erts_smp_mtx_lock(&dep->qlock); - ASSERT(dep->qsize == 0); - erts_smp_mtx_unlock(&dep->qlock); + ASSERT(erts_atomic_read_nob(&dep->qsize) == 0); #endif - erts_set_dist_entry_connected(dep, BIF_ARG_2, flags); - if (flags & DFLAG_DIST_HDR_ATOM_CACHE) create_cache(dep); - erts_smp_de_rwunlock(dep); + erts_set_dist_entry_connected(dep, BIF_ARG_2, flags); + + erts_de_rwunlock(dep); + + ERTS_BIF_PREP_RET(ret, erts_make_dhandle(BIF_P, dep)); + dep = NULL; /* inc of refc transferred to port (dist_entry field) */ inc_no_nodes(); @@ -2831,13 +3400,16 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3) done: if (dep && dep != erts_this_dist_entry) { - erts_smp_de_rwunlock(dep); + erts_de_rwunlock(dep); erts_deref_dist_entry(dep); } if (pp) erts_port_release(pp); + if (proc_unlock) + erts_proc_unlock(proc, proc_unlock); + return ret; yield: @@ -2883,7 +3455,7 @@ BIF_RETTYPE dist_exit_3(BIF_ALIST_3) if (BIF_P->common.id == local) { lp_locks = ERTS_PROC_LOCKS_ALL; lp = BIF_P; - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR); } else { lp_locks = ERTS_PROC_LOCKS_XSIG_SEND; @@ -2902,21 +3474,17 @@ BIF_RETTYPE dist_exit_3(BIF_ALIST_3) NIL, NULL, 0); -#ifdef ERTS_SMP if (lp == BIF_P) lp_locks &= ~ERTS_PROC_LOCK_MAIN; -#endif - erts_smp_proc_unlock(lp, lp_locks); + erts_proc_unlock(lp, lp_locks); if (lp == BIF_P) { - erts_aint32_t state = erts_smp_atomic32_read_acqb(&BIF_P->state); + erts_aint32_t state = erts_atomic32_read_acqb(&BIF_P->state); /* * We may have exited current process and may have to take action. */ if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) { -#ifdef ERTS_SMP if (state & ERTS_PSFLG_PENDING_EXIT) erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN); -#endif ERTS_BIF_EXITED(BIF_P); } } @@ -3002,7 +3570,7 @@ BIF_RETTYPE nodes_1(BIF_ALIST_1) length = 0; - erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx); + erts_rwmtx_rlock(&erts_dist_table_rwmtx); ASSERT(erts_no_of_not_connected_dist_entries > 0); ASSERT(erts_no_of_hidden_dist_entries >= 0); @@ -3019,7 +3587,7 @@ BIF_RETTYPE nodes_1(BIF_ALIST_1) result = NIL; if (length == 0) { - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); + erts_rwmtx_runlock(&erts_dist_table_rwmtx); goto done; } @@ -3050,7 +3618,7 @@ BIF_RETTYPE nodes_1(BIF_ALIST_1) hp += 2; } ASSERT(endp == hp); - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); + erts_rwmtx_runlock(&erts_dist_table_rwmtx); done: UnUseTmpHeap(2,BIF_P); @@ -3105,15 +3673,15 @@ monitor_node(Process* p, Eterm Node, Eterm Bool, Eterm Options) if (dep == erts_this_dist_entry) goto done; - erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK); - erts_smp_de_rlock(dep); + erts_proc_lock(p, ERTS_PROC_LOCK_LINK); + erts_de_rlock(dep); if (ERTS_DE_IS_NOT_CONNECTED(dep)) { - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); - erts_smp_de_runlock(dep); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_de_runlock(dep); goto do_trap; } - erts_smp_de_links_lock(dep); - erts_smp_de_runlock(dep); + erts_de_links_lock(dep); + erts_de_runlock(dep); if (Bool == am_true) { ASSERT(dep->cid != NIL); @@ -3140,11 +3708,10 @@ monitor_node(Process* p, Eterm Node, Eterm Bool, Eterm Options) } } - erts_smp_de_links_unlock(dep); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_de_links_unlock(dep); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); done: - erts_deref_dist_entry(dep); BIF_RET(am_true); } @@ -3173,9 +3740,9 @@ BIF_RETTYPE net_kernel_dflag_unicode_io_1(BIF_ALIST_1) if (de == erts_this_dist_entry) { BIF_RET(am_true); } - erts_smp_de_rlock(de); + erts_de_rlock(de); f = de->flags; - erts_smp_de_runlock(de); + erts_de_runlock(de); BIF_RET(((f & DFLAG_UNICODE_IO) ? am_true : am_false)); } @@ -3205,7 +3772,7 @@ struct ErtsNodesMonitor_ { Uint16 no; }; -static erts_smp_mtx_t nodes_monitors_mtx; +static erts_mtx_t nodes_monitors_mtx; static ErtsNodesMonitor *nodes_monitors; static ErtsNodesMonitor *nodes_monitors_end; @@ -3223,7 +3790,7 @@ static ErtsNodesMonitor *nodes_monitors_end; static void init_nodes_monitors(void) { - erts_smp_mtx_init(&nodes_monitors_mtx, "nodes_monitors", NIL, + erts_mtx_init(&nodes_monitors_mtx, "nodes_monitors", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION); nodes_monitors = NULL; nodes_monitors_end = NULL; @@ -3349,10 +3916,10 @@ send_nodes_mon_msgs(Process *c_p, Eterm what, Eterm node, Eterm type, Eterm reas } #endif - ERTS_SMP_LC_ASSERT(!c_p + ERTS_LC_ASSERT(!c_p || (erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN)); - erts_smp_mtx_lock(&nodes_monitors_mtx); + erts_mtx_lock(&nodes_monitors_mtx); for (nmp = nodes_monitors; nmp; nmp = nmp->next) { int i; @@ -3385,7 +3952,7 @@ send_nodes_mon_msgs(Process *c_p, Eterm what, Eterm node, Eterm type, Eterm reas if (rp) { if (rp == c_p) rp_locks &= ~ERTS_PROC_LOCK_MAIN; - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } rp = nmp->proc; @@ -3412,10 +3979,10 @@ send_nodes_mon_msgs(Process *c_p, Eterm what, Eterm node, Eterm type, Eterm reas if (rp) { if (rp == c_p) rp_locks &= ~ERTS_PROC_LOCK_MAIN; - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } - erts_smp_mtx_unlock(&nodes_monitors_mtx); + erts_mtx_unlock(&nodes_monitors_mtx); } static Eterm @@ -3425,8 +3992,8 @@ insert_nodes_monitor(Process *c_p, Uint32 opts) Eterm res = am_false; ErtsNodesMonitor *xnmp, *nmp; - ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&nodes_monitors_mtx)); - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); + ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&nodes_monitors_mtx)); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); xnmp = c_p->nodes_monitors; if (xnmp) { @@ -3510,8 +4077,8 @@ remove_nodes_monitors(Process *c_p, Uint32 opts, int all) Eterm res = am_false; ErtsNodesMonitor *nmp; - ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&nodes_monitors_mtx)); - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); + ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&nodes_monitors_mtx)); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); nmp = c_p->nodes_monitors; ASSERT(!nmp || !nmp->prev || nmp->prev->proc != c_p); @@ -3553,23 +4120,23 @@ remove_nodes_monitors(Process *c_p, Uint32 opts, int all) void erts_delete_nodes_monitors(Process *c_p, ErtsProcLocks locks) { -#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP) +#if defined(ERTS_ENABLE_LOCK_CHECK) if (c_p) { ErtsProcLocks might_unlock = locks & ~ERTS_PROC_LOCK_MAIN; if (might_unlock) erts_proc_lc_might_unlock(c_p, might_unlock); } #endif - if (erts_smp_mtx_trylock(&nodes_monitors_mtx) == EBUSY) { + if (erts_mtx_trylock(&nodes_monitors_mtx) == EBUSY) { ErtsProcLocks unlock_locks = locks & ~ERTS_PROC_LOCK_MAIN; if (c_p && unlock_locks) - erts_smp_proc_unlock(c_p, unlock_locks); - erts_smp_mtx_lock(&nodes_monitors_mtx); + erts_proc_unlock(c_p, unlock_locks); + erts_mtx_lock(&nodes_monitors_mtx); if (c_p && unlock_locks) - erts_smp_proc_lock(c_p, unlock_locks); + erts_proc_lock(c_p, unlock_locks); } remove_nodes_monitors(c_p, 0, 1); - erts_smp_mtx_unlock(&nodes_monitors_mtx); + erts_mtx_unlock(&nodes_monitors_mtx); } Eterm @@ -3580,7 +4147,7 @@ erts_monitor_nodes(Process *c_p, Eterm on, Eterm olist) Uint16 opts = (Uint16) 0; ASSERT(c_p); - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN); if (on != am_true && on != am_false) return THE_NON_VALUE; @@ -3636,14 +4203,14 @@ erts_monitor_nodes(Process *c_p, Eterm on, Eterm olist) return THE_NON_VALUE; } - erts_smp_mtx_lock(&nodes_monitors_mtx); + erts_mtx_lock(&nodes_monitors_mtx); if (on == am_true) res = insert_nodes_monitor(c_p, opts); else res = remove_nodes_monitors(c_p, opts, 0); - erts_smp_mtx_unlock(&nodes_monitors_mtx); + erts_mtx_unlock(&nodes_monitors_mtx); return res; } @@ -3666,8 +4233,8 @@ erts_processes_monitoring_nodes(Process *c_p) #endif ASSERT(c_p); - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN); - erts_smp_mtx_lock(&nodes_monitors_mtx); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN); + erts_mtx_lock(&nodes_monitors_mtx); sz = 0; szp = &sz; @@ -3716,7 +4283,7 @@ erts_processes_monitoring_nodes(Process *c_p) ASSERT(hp == hend); - erts_smp_mtx_unlock(&nodes_monitors_mtx); + erts_mtx_unlock(&nodes_monitors_mtx); return res; } diff --git a/erts/emulator/beam/dist.h b/erts/emulator/beam/dist.h index 3e17645997..d4765c50b8 100644 --- a/erts/emulator/beam/dist.h +++ b/erts/emulator/beam/dist.h @@ -44,6 +44,7 @@ #define DFLAG_UTF8_ATOMS 0x10000 #define DFLAG_MAP_TAG 0x20000 #define DFLAG_BIG_CREATION 0x40000 +#define DFLAG_SEND_SENDER 0x80000 /* All flags that should be enabled when term_to_binary/1 is used. */ #define TERM_TO_BINARY_DFLAGS (DFLAG_EXTENDED_REFERENCES \ @@ -74,6 +75,9 @@ #define DOP_DEMONITOR_P 20 #define DOP_MONITOR_P_EXIT 21 +#define DOP_SEND_SENDER 22 +#define DOP_SEND_SENDER_TT 23 + /* distribution trap functions */ extern Export* dsend2_trap; extern Export* dsend3_trap; @@ -100,7 +104,7 @@ typedef struct { } ErtsDSigData; #define ERTS_DE_IS_NOT_CONNECTED(DEP) \ - (ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&(DEP)->rwmtx) \ + (ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&(DEP)->rwmtx) \ || erts_lc_rwmtx_is_rwlocked(&(DEP)->rwmtx)), \ (is_nil((DEP)->cid) || ((DEP)->status & ERTS_DE_SFLG_EXITING))) @@ -153,21 +157,18 @@ erts_dsig_prepare(ErtsDSigData *dsdp, if (!dep) return ERTS_DSIG_PREP_NOT_CONNECTED; if (dspl == ERTS_DSP_RWLOCK) - erts_smp_de_rwlock(dep); + erts_de_rwlock(dep); else - erts_smp_de_rlock(dep); + erts_de_rlock(dep); if (ERTS_DE_IS_NOT_CONNECTED(dep)) { failure = ERTS_DSIG_PREP_NOT_CONNECTED; goto fail; } if (no_suspend) { - failure = ERTS_DSIG_PREP_CONNECTED; - erts_smp_mtx_lock(&dep->qlock); - if (dep->qflgs & ERTS_DE_QFLG_BUSY) + if (erts_atomic32_read_acqb(&dep->qflgs) & ERTS_DE_QFLG_BUSY) { failure = ERTS_DSIG_PREP_WOULD_SUSPEND; - erts_smp_mtx_unlock(&dep->qlock); - if (failure == ERTS_DSIG_PREP_WOULD_SUSPEND) goto fail; + } } dsdp->proc = proc; dsdp->dep = dep; @@ -175,14 +176,14 @@ erts_dsig_prepare(ErtsDSigData *dsdp, dsdp->connection_id = dep->connection_id; dsdp->no_suspend = no_suspend; if (dspl == ERTS_DSP_NO_LOCK) - erts_smp_de_runlock(dep); + erts_de_runlock(dep); return ERTS_DSIG_PREP_CONNECTED; fail: if (dspl == ERTS_DSP_RWLOCK) - erts_smp_de_rwunlock(dep); + erts_de_rwunlock(dep); else - erts_smp_de_runlock(dep); + erts_de_runlock(dep); return failure; } @@ -194,7 +195,7 @@ void erts_schedule_dist_command(Port *prt, DistEntry *dist_entry) Eterm id; if (prt) { - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); ASSERT((erts_atomic32_read_nob(&prt->state) & ERTS_PORT_SFLGS_DEAD) == 0); ASSERT(prt->dist_entry); @@ -204,7 +205,7 @@ void erts_schedule_dist_command(Port *prt, DistEntry *dist_entry) } else { ASSERT(dist_entry); - ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&dist_entry->rwmtx) + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&dist_entry->rwmtx) || erts_lc_rwmtx_is_rwlocked(&dist_entry->rwmtx)); ASSERT(is_internal_port(dist_entry->cid)); @@ -212,7 +213,7 @@ void erts_schedule_dist_command(Port *prt, DistEntry *dist_entry) id = dep->cid; } - if (!erts_smp_atomic_xchg_mb(&dep->dist_cmd_scheduled, 1)) + if (!erts_atomic_xchg_mb(&dep->dist_cmd_scheduled, 1)) erts_port_task_schedule(id, &dep->dist_cmd, ERTS_PORT_TASK_DIST_CMD); } @@ -238,7 +239,7 @@ erts_remove_dist_link(ErtsDistLinkData *dldp, Eterm rid, DistEntry *dep) { - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); dldp->d_lnk = erts_lookup_link(dep->nlinks, lid); if (!dldp->d_lnk) dldp->d_sub_lnk = NULL; @@ -248,7 +249,7 @@ erts_remove_dist_link(ErtsDistLinkData *dldp, ? NULL : erts_remove_link(&dep->nlinks, lid)); } - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); } ERTS_GLB_INLINE int @@ -349,6 +350,7 @@ typedef struct { Eterm ctl_heap[6]; ErtsDSigData dsd; DistEntry* dep_to_deref; + DistEntry *dep; struct erts_dsig_send_context dss; Eterm return_term; diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c index c7ab444c96..88285d8be6 100644 --- a/erts/emulator/beam/erl_alloc.c +++ b/erts/emulator/beam/erl_alloc.c @@ -83,14 +83,6 @@ #define ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC ERTS_ALC_DEFAULT_ENABLED_ACUL_EHEAP_ALLOC #define ERTS_ALC_DEFAULT_ACUL_LL_ALLOC ERTS_ALC_DEFAULT_ENABLED_ACUL_LL_ALLOC -#ifndef ERTS_SMP -# undef ERTS_ALC_DEFAULT_ACUL -# define ERTS_ALC_DEFAULT_ACUL 0 -# undef ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC -# define ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC 0 -# undef ERTS_ALC_DEFAULT_ACUL_LL_ALLOC -# define ERTS_ALC_DEFAULT_ACUL_LL_ALLOC 0 -#endif #ifdef DEBUG static Uint install_debug_functions(void); @@ -148,7 +140,7 @@ enum { }; typedef struct { - erts_smp_atomic32_t refc; + erts_atomic32_t refc; int only_sz; int internal; Uint req_sched; @@ -393,6 +385,7 @@ set_default_temp_alloc_opts(struct au_init *ip) SET_DEFAULT_ALLOC_OPTS(ip); ip->enable = AU_ALLOC_DEFAULT_ENABLE(1); ip->thr_spec = 1; + ip->disable_allowed = 0; ip->carrier_migration_allowed = 0; ip->atype = AFIT; ip->init.util.name_prefix = "temp_"; @@ -528,7 +521,6 @@ set_default_test_alloc_opts(struct au_init *ip) } -#ifdef ERTS_SMP static void adjust_tpref(struct au_init *ip, int no_sched) @@ -551,7 +543,6 @@ adjust_tpref(struct au_init *ip, int no_sched) } } -#endif static void handle_args(int *, char **, erts_alc_hndl_args_init_t *); @@ -580,7 +571,6 @@ static void adjust_fix_alloc_sizes(UWord extra_block_size) if (extra_block_size && erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].enabled) { int j; -#ifdef ERTS_SMP if (erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].thr_spec) { int i; ErtsAllocatorThrSpec_t* tspec; @@ -596,7 +586,6 @@ static void adjust_fix_alloc_sizes(UWord extra_block_size) } } else -#endif { Allctr_t* allctr = erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra; for (j=0; j < ERTS_ALC_NO_FIXED_SIZES; ++j) { @@ -619,7 +608,6 @@ strategy_support_carrier_migration(struct au_init *auip) static ERTS_INLINE void adjust_carrier_migration_support(struct au_init *auip) { -#ifdef ERTS_SMP if (auip->init.util.acul) { auip->thr_spec = -1; /* Need thread preferred */ @@ -633,9 +621,6 @@ adjust_carrier_migration_support(struct au_init *auip) auip->init.aoff.flavor = AOFF_BF; } } -#else - auip->init.util.acul = 0; -#endif } void @@ -660,18 +645,14 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) = ERTS_MONITOR_SH_SIZE * sizeof(Uint); fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_NLINK_SH)] = ERTS_LINK_SH_SIZE * sizeof(Uint); - fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_DRV_EV_D_STATE)] - = sizeof(ErtsDrvEventDataState); fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_DRV_SEL_D_STATE)] = sizeof(ErtsDrvSelectDataState); fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_NIF_SEL_D_STATE)] = sizeof(ErtsNifSelectDataState); fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MSG_REF)] = sizeof(ErtsMessageRef); -#ifdef ERTS_SMP fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_THR_Q_EL_SL)] = sizeof(ErtsThrQElement_t); -#endif fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_LL_PTIMER)] = erts_timer_type_size(ERTS_ALC_T_LL_PTIMER); fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_HL_PTIMER)] @@ -734,20 +715,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) #endif } -#ifndef ERTS_SMP - init.sl_alloc.thr_spec = 0; - init.std_alloc.thr_spec = 0; - init.ll_alloc.thr_spec = 0; - init.eheap_alloc.thr_spec = 0; - init.binary_alloc.thr_spec = 0; - init.ets_alloc.thr_spec = 0; - init.driver_alloc.thr_spec = 0; - init.fix_alloc.thr_spec = 0; - init.literal_alloc.thr_spec = 0; -#ifdef ERTS_ALC_A_EXEC - init.exec_alloc.thr_spec = 0; -#endif -#endif /* Make adjustments for carrier migration support */ init.temp_alloc.init.util.acul = 0; @@ -798,7 +765,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) #endif } -#ifdef ERTS_SMP /* Only temp_alloc can use thread specific interface */ if (init.temp_alloc.thr_spec) init.temp_alloc.thr_spec = erts_no_schedulers; @@ -817,10 +783,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) adjust_tpref(&init.exec_alloc, erts_no_schedulers); #endif -#else - /* No thread specific if not smp */ - init.temp_alloc.thr_spec = 0; -#endif /* * The following allocators cannot be run with afit strategy. @@ -839,10 +801,8 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop) refuse_af_strategy(&init.exec_alloc); #endif -#ifdef ERTS_SMP if (!init.temp_alloc.thr_spec) refuse_af_strategy(&init.temp_alloc); -#endif erts_mtrace_pre_init(); #if HAVE_ERTS_MSEG @@ -1006,8 +966,6 @@ set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init, int ncpu) return; } -#ifdef USE_THREADS -#ifdef ERTS_SMP if (init->thr_spec) { if (init->thr_spec > 0) { af->alloc = erts_alcu_alloc_thr_spec; @@ -1037,7 +995,6 @@ set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init, int ncpu) ai->thr_spec = tspec->size; } else -#endif if (init->init.util.ts) { af->alloc = erts_alcu_alloc_ts; if (init->init.util.fix_type_size) @@ -1049,21 +1006,9 @@ set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init, int ncpu) af->free = erts_alcu_free_ts; } else -#endif { -#ifdef ERTS_SMP erts_exit(ERTS_ABORT_EXIT, "%salloc is not thread safe\n", init->init.util.name_prefix); -#else - af->alloc = erts_alcu_alloc; - if (init->init.util.fix_type_size) - af->realloc = erts_realloc_fixed_size; - else if (init->init.util.ramv) - af->realloc = erts_alcu_realloc_mv; - else - af->realloc = erts_alcu_realloc; - af->free = erts_alcu_free; -#endif } af->extra = NULL; ai->alloc_util = 1; @@ -1548,8 +1493,7 @@ handle_args(int *argc, char **argv, erts_alc_hndl_args_init_t *init) &init->ll_alloc, &init->driver_alloc, &init->fix_alloc, - &init->sl_alloc, - &init->temp_alloc + &init->sl_alloc /* test_alloc not affected by +Mea??? or +Mu??? */ }; int aui_sz = (int) sizeof(aui)/sizeof(aui[0]); @@ -1895,9 +1839,7 @@ erts_alloc_register_scheduler(void *vesdp) int ix = (int) esdp->no; int aix; -#ifdef ERTS_DIRTY_SCHEDULERS ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp)); -#endif for (aix = ERTS_ALC_A_MIN; aix <= ERTS_ALC_A_MAX; aix++) { ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[aix]; esdp->alloc_data.deallctr[aix] = NULL; @@ -1915,7 +1857,6 @@ erts_alloc_register_scheduler(void *vesdp) } } -#ifdef ERTS_SMP void erts_alloc_scheduler_handle_delayed_dealloc(void *vesdp, int *need_thr_progress, @@ -1944,12 +1885,10 @@ erts_alloc_scheduler_handle_delayed_dealloc(void *vesdp, } } } -#endif erts_aint32_t erts_alloc_fix_alloc_shrink(int ix, erts_aint32_t flgs) { -#ifdef ERTS_SMP ErtsAllocatorThrSpec_t *tspec; tspec = &erts_allctr_thr_spec[ERTS_ALC_A_FIXED_SIZE]; if (erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].thr_spec && tspec->enabled) @@ -1957,11 +1896,6 @@ erts_alloc_fix_alloc_shrink(int ix, erts_aint32_t flgs) if (ix == 0 && erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra) return erts_alcu_fix_alloc_shrink( erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra, flgs); -#else - if (ix == 1 && erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra) - return erts_alcu_fix_alloc_shrink( - erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra, flgs); -#endif return 0; } @@ -2165,7 +2099,7 @@ erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg) int only_one_value = 0; ErtsAlcUFixInfo_t fi[ERTS_ALC_NO_FIXED_SIZES] = {{0,0}}; - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); /* Figure out whats wanted... */ @@ -2338,10 +2272,10 @@ erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg) if (proc) { - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(proc)); /* We'll need locks early in the lock order */ - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); } /* Calculate values needed... */ @@ -2499,7 +2433,7 @@ erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg) Uint *hp; Uint hsz; - erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN); if (only_one_value) { ASSERT(length == 1); @@ -2548,11 +2482,11 @@ erts_allocated_areas(fmtfn_t *print_to_p, void *print_to_arg, void *proc) Uint reserved_atom_space, atom_space; if (proc) { - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(proc)); /* We'll need locks early in the lock order */ - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); } i = 0; @@ -2704,7 +2638,7 @@ erts_allocated_areas(fmtfn_t *print_to_p, void *print_to_arg, void *proc) Uint hsz; Uint *hszp; - erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN); hpp = NULL; hsz = 0; @@ -2792,7 +2726,7 @@ erts_allocator_info(fmtfn_t to, void *arg) { ErtsAlcType_t a; - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); for (a = ERTS_ALC_A_MIN; a <= ERTS_ALC_A_MAX; a++) { int ai; @@ -2845,11 +2779,7 @@ erts_allocator_info(fmtfn_t to, void *arg) #if HAVE_ERTS_MSEG { struct erts_mmap_info_struct emis; -#ifdef ERTS_SMP int max = (int) erts_no_schedulers; -#else - int max = 0; -#endif int i; for (i = 0; i <= max; i++) { erts_print(to, arg, "=allocator:mseg_alloc[%d]\n", i); @@ -3310,7 +3240,7 @@ reply_alloc_info(void *vair) case ERTS_ALC_INFO_A_DISABLED_EXEC: break; case ERTS_ALC_INFO_A_MSEG_ALLOC: -#if HAVE_ERTS_MSEG && defined(ERTS_SMP) +#if HAVE_ERTS_MSEG alloc_atom = erts_bld_atom(hpp, szp, "mseg_alloc"); ainfo = erts_mseg_info(sched_id, NULL, NULL, hpp != NULL, air->only_sz, hpp, szp); @@ -3364,10 +3294,10 @@ reply_alloc_info(void *vair) if (air->req_sched == sched_id) rp_locks &= ~ERTS_PROC_LOCK_MAIN; - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); erts_proc_dec_refc(rp); - if (erts_smp_atomic32_dec_read_nob(&air->refc) == 0) { + if (erts_atomic32_dec_read_nob(&air->refc) == 0) { erts_iref_storage_clean(&air->iref); aireq_free(air); } @@ -3446,18 +3376,16 @@ erts_request_alloc_info(struct process *c_p, air->allocs[airix] = ERTS_ALC_A_INVALID; - erts_smp_atomic32_init_nob(&air->refc, + erts_atomic32_init_nob(&air->refc, (erts_aint32_t) erts_no_schedulers); erts_proc_add_refc(c_p, (Sint) erts_no_schedulers); -#ifdef ERTS_SMP if (erts_no_schedulers > 1) erts_schedule_multi_misc_aux_work(1, erts_no_schedulers, reply_alloc_info, (void *) air); -#endif reply_alloc_info((void *) air); @@ -3532,35 +3460,29 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3) case 0xf: switch (op) { case 0xf00: -#ifdef USE_THREADS if (((Allctr_t *) a1)->thread_safe) return (UWord) erts_alcu_alloc_ts(ERTS_ALC_T_UNDEF, (void *) a1, (Uint) a2); else -#endif return (UWord) erts_alcu_alloc(ERTS_ALC_T_UNDEF, (void *) a1, (Uint) a2); case 0xf01: -#ifdef USE_THREADS if (((Allctr_t *) a1)->thread_safe) return (UWord) erts_alcu_realloc_ts(ERTS_ALC_T_UNDEF, (void *) a1, (void *) a2, (Uint) a3); else -#endif return (UWord) erts_alcu_realloc(ERTS_ALC_T_UNDEF, (void *) a1, (void *) a2, (Uint) a3); case 0xf02: -#ifdef USE_THREADS if (((Allctr_t *) a1)->thread_safe) erts_alcu_free_ts(ERTS_ALC_T_UNDEF, (void *) a1, (void *) a2); else -#endif erts_alcu_free(ERTS_ALC_T_UNDEF, (void *) a1, (void *) a2); return 0; case 0xf03: { @@ -3571,11 +3493,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3) init.enable = 1; init.atype = GOODFIT; init.init.util.name_prefix = (char *) a1; -#ifdef ERTS_SMP init.init.util.ts = 1; -#else - init.init.util.ts = a2 ? 1 : 0; -#endif if ((char **) a3) { char **argv = (char **) a3; int i = 0; @@ -3630,7 +3548,6 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3) erts_alcu_stop((Allctr_t *) a1); erts_free(ERTS_ALC_T_UNDEF, (void *) a1); break; -#ifdef USE_THREADS case 0xf05: return (UWord) 1; case 0xf06: return (UWord) ((Allctr_t *) a1)->thread_safe; #ifdef ETHR_NO_FORKSAFETY @@ -3700,12 +3617,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3) ethr_thr_exit((void *) a1); ERTS_ALC_TEST_ABORT; break; -#endif /* #ifdef USE_THREADS */ -#ifdef ERTS_SMP case 0xf13: return (UWord) 1; -#else - case 0xf13: return (UWord) 0; -#endif case 0xf14: return (UWord) erts_alloc(ERTS_ALC_T_TEST, (Uint)a1); case 0xf15: erts_free(ERTS_ALC_T_TEST, (void*)a1); return 0; @@ -3909,10 +3821,8 @@ void check_allocators(void) ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) erts_allctrs[i].extra; Allctr_t *allctr = real_af->extra; Carrier_t *ct; -#ifdef USE_THREADS if (allctr->thread_safe) erts_mtx_lock(&allctr->mutex); -#endif if (allctr->check_mbc) { for (ct = allctr->mbc_list.first; ct; ct = ct->next) { @@ -3920,10 +3830,8 @@ void check_allocators(void) allctr->check_mbc(allctr,ct); } } -#ifdef USE_THREADS if (allctr->thread_safe) erts_mtx_unlock(&allctr->mutex); -#endif } } } diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h index 97a1cf1308..117f96a4ad 100644 --- a/erts/emulator/beam/erl_alloc.h +++ b/erts/emulator/beam/erl_alloc.h @@ -27,9 +27,7 @@ #include "erl_thr_progress.h" #undef ERL_THR_PROGRESS_TSD_TYPE_ONLY #include "erl_alloc_util.h" -#ifdef USE_THREADS #include "erl_threads.h" -#endif #include "erl_mmap.h" #ifdef DEBUG @@ -154,12 +152,10 @@ void erts_allctr_wrapper_pre_lock(void); void erts_allctr_wrapper_pre_unlock(void); void erts_alloc_register_scheduler(void *vesdp); -#ifdef ERTS_SMP void erts_alloc_scheduler_handle_delayed_dealloc(void *vesdp, int *need_thr_progress, ErtsThrPrgrVal *thr_prgr_p, int *more_work); -#endif erts_aint32_t erts_alloc_fix_alloc_shrink(int ix, erts_aint32_t flgs); __decl_noreturn void erts_alloc_enomem(ErtsAlcType_t,Uint) @@ -338,37 +334,10 @@ erts_alloc_get_verify_unused_temp_alloc(Allctr_t **allctr); (((((SZ) - 1) / ERTS_CACHE_LINE_SIZE) + 1) * ERTS_CACHE_LINE_SIZE) #define ERTS_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \ -ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, \ - (void) 0, (void) 0, (void) 0) - -#define ERTS_SMP_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \ -static erts_smp_spinlock_t NAME##_lck; \ -ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, \ - erts_smp_spinlock_init(&NAME##_lck, #NAME "_alloc_lock", NIL, \ - ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR),\ - erts_smp_spin_lock(&NAME##_lck), \ - erts_smp_spin_unlock(&NAME##_lck)) - -#ifdef ERTS_SMP + ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, (void) 0, (void) 0, (void) 0) #define ERTS_TS_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \ -ERTS_SMP_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) - -#else /* !ERTS_SMP */ - -#define ERTS_TS_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \ -static erts_mtx_t NAME##_lck; \ -ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, \ - erts_mtx_init(NAME##_lck, #NAME "_alloc_lock", NIL, \ - ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR),\ - erts_mtx_lock(&NAME##_lck), \ - erts_mtx_unlock(&NAME##_lck)) - - -#endif - -#define ERTS_PALLOC_IMPL(NAME, TYPE, PASZ) \ -ERTS_PRE_ALLOC_IMPL(NAME, TYPE, PASZ, (void) 0, (void) 0, (void) 0) +ERTS_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) #define ERTS_TS_PALLOC_IMPL(NAME, TYPE, PASZ) \ static erts_spinlock_t NAME##_lck; \ @@ -378,17 +347,10 @@ ERTS_PRE_ALLOC_IMPL(NAME, TYPE, PASZ, \ erts_spin_lock(&NAME##_lck), \ erts_spin_unlock(&NAME##_lck)) -#ifdef ERTS_SMP -#define ERTS_SMP_PALLOC_IMPL(NAME, TYPE, PASZ) \ +#define ERTS_PALLOC_IMPL(NAME, TYPE, PASZ) \ ERTS_TS_PALLOC_IMPL(NAME, TYPE, PASZ) -#else /* !ERTS_SMP */ - -#define ERTS_SMP_PALLOC_IMPL(NAME, TYPE, PASZ) \ - ERTS_PALLOC_IMPL(NAME, TYPE, PASZ) - -#endif #define ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, ILCK, LCK, ULCK) \ ERTS_PRE_ALLOC_IMPL(NAME##_pre, TYPE, PASZ, ILCK, LCK, ULCK) \ @@ -412,21 +374,11 @@ NAME##_free(TYPE *p) \ erts_free(ALCT, (void *) p); \ } -#ifdef ERTS_SMP #define ERTS_SCHED_PREF_PALLOC_IMPL(NAME, TYPE, PASZ) \ ERTS_SCHED_PREF_PRE_ALLOC_IMPL(NAME, TYPE, PASZ) -#else -#define ERTS_SCHED_PREF_PALLOC_IMPL(NAME, TYPE, PASZ) \ - ERTS_PRE_ALLOC_IMPL(NAME, TYPE, PASZ, (void) 0, (void) 0, (void) 0) -#endif -#ifdef ERTS_SMP #define ERTS_SCHED_PREF_AUX(NAME, TYPE, PASZ) \ ERTS_SCHED_PREF_PRE_ALLOC_IMPL(NAME##_pre, TYPE, PASZ) -#else -#define ERTS_SCHED_PREF_AUX(NAME, TYPE, PASZ) \ -ERTS_PRE_ALLOC_IMPL(NAME##_pre, TYPE, PASZ, (void) 0, (void) 0, (void) 0) -#endif #define ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \ ERTS_SCHED_PREF_AUX(NAME, TYPE, PASZ) \ @@ -450,6 +402,32 @@ NAME##_free(TYPE *p) \ erts_free(ALCT, (void *) p); \ } +#define ERTS_THR_PREF_AUX(NAME, TYPE, PASZ) \ +ERTS_THR_PREF_PRE_ALLOC_IMPL(NAME##_pre, TYPE, PASZ) + +#define ERTS_THR_PREF_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \ +ERTS_THR_PREF_AUX(NAME, TYPE, PASZ) \ +static void \ +init_##NAME##_alloc(int nthreads) \ +{ \ + init_##NAME##_pre_alloc(nthreads); \ +} \ +static ERTS_INLINE TYPE * \ +NAME##_alloc(void) \ +{ \ + TYPE *res = NAME##_pre_alloc(); \ + if (!res) \ + res = erts_alloc(ALCT, sizeof(TYPE)); \ + return res; \ +} \ +static ERTS_INLINE void \ +NAME##_free(TYPE *p) \ +{ \ + if (!NAME##_pre_free(p)) \ + erts_free(ALCT, (void *) p); \ +} + + #ifdef DEBUG #define ERTS_PRE_ALLOC_SIZE(SZ) ((SZ) < 1000 ? (SZ)/10 + 10 : 100) #define ERTS_PRE_ALLOC_CLOBBER(P, T) memset((void *) (P), 0xfd, sizeof(T)) @@ -530,7 +508,8 @@ init_##NAME##_alloc(void) \ { \ sspa_data_##NAME##__ = \ erts_sspa_create(sizeof(union erts_sspa_##NAME##__), \ - ERTS_PRE_ALLOC_SIZE((PASZ))); \ + ERTS_PRE_ALLOC_SIZE((PASZ)), \ + 0, NULL); \ } \ \ static TYPE * \ @@ -552,6 +531,57 @@ NAME##_free(TYPE *p) \ (char *) p); \ } + +#define ERTS_THR_PREF_PRE_ALLOC_IMPL(NAME, TYPE, PASZ) \ +union erts_sspa_##NAME##__ { \ + erts_sspa_blk_t next; \ + TYPE type; \ +}; \ + \ +static erts_sspa_data_t *sspa_data_##NAME##__; \ + \ +static void \ +init_##NAME##_alloc(int nthreads) \ +{ \ + sspa_data_##NAME##__ = \ + erts_sspa_create(sizeof(union erts_sspa_##NAME##__), \ + ERTS_PRE_ALLOC_SIZE((PASZ)), \ + nthreads, \ + #NAME); \ +} \ + \ +void \ +erts_##NAME##_alloc_init_thread(void) \ +{ \ + int id = erts_atomic_inc_read_nob(&sspa_data_##NAME##__->id_generator);\ + if (id > sspa_data_##NAME##__->nthreads) { \ + erts_exit(ERTS_ABORT_EXIT, \ + "%s:%d:%s(): Too many threads for '" #NAME "'\n", \ + __FILE__, __LINE__, __func__); \ + } \ + erts_tsd_set(sspa_data_##NAME##__->tsd_key, (void*)(SWord)id); \ +} \ + \ +static TYPE * \ +NAME##_alloc(void) \ +{ \ + int id = (int)(SWord)erts_tsd_get(sspa_data_##NAME##__->tsd_key); \ + if (id == 0) \ + return NULL; \ + return (TYPE *) erts_sspa_alloc(sspa_data_##NAME##__, \ + id-1); \ +} \ + \ +static int \ +NAME##_free(TYPE *p) \ +{ \ + int id = (int)(SWord)erts_tsd_get(sspa_data_##NAME##__->tsd_key); \ + return erts_sspa_free(sspa_data_##NAME##__, \ + id - 1, \ + (char *) p); \ +} + + #ifdef DEBUG #define ERTS_ALC_DBG_BLK_SZ(PTR) (*(((UWord *) (PTR)) - 2)) #endif /* #ifdef DEBUG */ diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types index 50a1d97dd5..bf5487d31e 100644 --- a/erts/emulator/beam/erl_alloc.types +++ b/erts/emulator/beam/erl_alloc.types @@ -53,15 +53,6 @@ # # IMPORTANT! Only use 7-bit ascii text in this file! -+if smp -+disable threads_no_smp -+else -+if threads -+enable threads_no_smp -+else -+disable threads_no_smp -+endif -+endif # --- Allocator declarations ------------------------------------------------- # @@ -77,8 +68,6 @@ allocator SYSTEM true sys_alloc -+if smp - allocator TEMPORARY true temp_alloc allocator SHORT_LIVED true sl_alloc allocator STANDARD true std_alloc @@ -91,22 +80,6 @@ allocator LITERAL true literal_alloc allocator EXEC true exec_alloc +endif -+else # Non smp build - -allocator TEMPORARY false temp_alloc -allocator SHORT_LIVED false sl_alloc -allocator STANDARD false std_alloc -allocator LONG_LIVED false ll_alloc -allocator EHEAP false eheap_alloc -allocator ETS false ets_alloc -allocator FIXED_SIZE false fix_alloc -allocator LITERAL false literal_alloc -+if exec_alloc -allocator EXEC false exec_alloc -+endif - -+endif - allocator BINARY true binary_alloc allocator DRIVER true driver_alloc @@ -285,32 +258,18 @@ type MREF_ENT STANDARD SYSTEM magic_ref_entry type MREF_TAB_BKTS STANDARD SYSTEM magic_ref_table_buckets type MREF_TAB LONG_LIVED SYSTEM magic_ref_table type MINDIRECTION FIXED_SIZE SYSTEM magic_indirection +type BINARY_FIND SHORT_LIVED PROCESSES binary_find -+if threads_no_smp -# Need thread safe allocs, but std_alloc and fix_alloc are not; -# use driver_alloc which is... -type THR_Q_EL DRIVER SYSTEM thr_q_element -type THR_Q_EL_SL DRIVER SYSTEM sl_thr_q_element -type MISC_AUX_WORK DRIVER SYSTEM misc_aux_work -+else type THR_Q_EL STANDARD SYSTEM thr_q_element type THR_Q_EL_SL FIXED_SIZE SYSTEM sl_thr_q_element type MISC_AUX_WORK SHORT_LIVED SYSTEM misc_aux_work -+endif type THR_Q STANDARD SYSTEM thr_queue type THR_Q_SL SHORT_LIVED SYSTEM short_lived_thr_queue type THR_Q_LL LONG_LIVED SYSTEM long_lived_thr_queue -+if smp type ASYNC SHORT_LIVED SYSTEM async type ZLIB STANDARD SYSTEM zlib -+else -# sl/std_alloc is not thread safe in non smp build; therefore, we use driver_alloc -type ZLIB DRIVER SYSTEM zlib -type ASYNC DRIVER SYSTEM async -+endif -+if smp type PORT_LOCK STANDARD SYSTEM port_lock type DRIVER_LOCK STANDARD SYSTEM driver_lock type XPORTS_LIST SHORT_LIVED SYSTEM extra_port_list @@ -320,33 +279,19 @@ type THR_PRGR_IDATA LONG_LIVED SYSTEM thr_prgr_internal_data type THR_PRGR_DATA LONG_LIVED SYSTEM thr_prgr_data type T_THR_PRGR_DATA SHORT_LIVED SYSTEM temp_thr_prgr_data type RELEASE_LAREA SHORT_LIVED SYSTEM release_literal_area -+endif # # Types used for special emulators # -+if threads - type ETHR_STD STANDARD SYSTEM ethread_standard type ETHR_SL SHORT_LIVED SYSTEM ethread_short_lived type ETHR_LL LONG_LIVED SYSTEM ethread_long_lived -+endif - -+if shared_heap - -type STACK STANDARD PROCESSES stack -type ACTIVE_PROCS STANDARD PROCESSES active_procs - -+endif - -+if smp type SYS_MSG_Q SHORT_LIVED PROCESSES system_messages_queue type FP_EXCEPTION LONG_LIVED SYSTEM fp_exception type LL_MPATHS LONG_LIVED SYSTEM ll_migration_paths type SL_MPATHS SHORT_LIVED SYSTEM sl_migration_paths -+endif +if hipe @@ -360,8 +305,6 @@ type HIPE_EXEC EXEC CODE hipe_code +endif - - +if heap_frag_elim_test type SSB SHORT_LIVED PROCESSES ssb @@ -408,11 +351,9 @@ type SYS_CHECK_REQ SHORT_LIVED SYSTEM system_check_request type TEMP_TERM TEMPORARY SYSTEM temp_term type DRV_TAB LONG_LIVED SYSTEM drv_tab type DRV_EV_STATE LONG_LIVED SYSTEM driver_event_state -type DRV_EV_D_STATE FIXED_SIZE SYSTEM driver_event_data_state type DRV_SEL_D_STATE FIXED_SIZE SYSTEM driver_select_data_state type NIF_SEL_D_STATE FIXED_SIZE SYSTEM enif_select_data_state type FD_LIST SHORT_LIVED SYSTEM fd_list -type ACTIVE_FD_ARR SHORT_LIVED SYSTEM active_fd_array type POLLSET LONG_LIVED SYSTEM pollset type POLLSET_UPDREQ SHORT_LIVED SYSTEM pollset_update_req type POLL_FDS LONG_LIVED SYSTEM poll_fds @@ -431,11 +372,7 @@ type PUTENV_STR SYSTEM SYSTEM putenv_string type PRT_REP_EXIT STANDARD SYSTEM port_report_exit type SYS_BLOCKING STANDARD SYSTEM sys_blocking -+if smp type SYS_WRITE_BUF TEMPORARY SYSTEM sys_write_buf -+else -type SYS_WRITE_BUF BINARY SYSTEM sys_write_buf -+endif +endif diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c index af86ad0548..4d4bddb93f 100644 --- a/erts/emulator/beam/erl_alloc_util.c +++ b/erts/emulator/beam/erl_alloc_util.c @@ -305,9 +305,6 @@ MBC after deallocating first block: # define ERTS_ALC_CPOOL_DEBUG #endif -#ifndef ERTS_SMP -# undef ERTS_ALC_CPOOL_DEBUG -#endif #ifdef ERTS_ALC_CPOOL_DEBUG # define ERTS_ALC_CPOOL_ASSERT(A) \ @@ -322,13 +319,8 @@ MBC after deallocating first block: # define ERTS_ALC_CPOOL_ASSERT(A) ((void) 1) #endif -#ifdef ERTS_SMP #define ERTS_ALC_IS_CPOOL_ENABLED(A) ((A)->cpool.util_limit) -#else -#define ERTS_ALC_IS_CPOOL_ENABLED(A) (0) -#endif -#ifdef ERTS_SMP #define ERTS_ALC_CPOOL_MAX_DISABLE_ABANDON 1000 #define ERTS_ALC_CPOOL_ALLOC_OP_INC 8 @@ -367,28 +359,16 @@ do { \ } \ } while (0) -#else -#define ERTS_ALC_CPOOL_ALLOC_OP(A) -#define ERTS_ALC_CPOOL_REALLOC_OP(A) -#define ERTS_ALC_CPOOL_FREE_OP(A) -#endif #define ERTS_CRR_ALCTR_FLG_IN_POOL (((erts_aint_t) 1) << 0) #define ERTS_CRR_ALCTR_FLG_BUSY (((erts_aint_t) 1) << 1) #define ERTS_CRR_ALCTR_FLG_MASK (ERTS_CRR_ALCTR_FLG_IN_POOL | \ ERTS_CRR_ALCTR_FLG_BUSY) -#ifdef ERTS_SMP #define SBC_HEADER_SIZE \ (UNIT_CEILING(offsetof(Carrier_t, cpool) \ + ABLK_HDR_SZ) \ - ABLK_HDR_SZ) -#else -#define SBC_HEADER_SIZE \ - (UNIT_CEILING(sizeof(Carrier_t) \ - + ABLK_HDR_SZ) \ - - ABLK_HDR_SZ) -#endif #define MBC_HEADER_SIZE(AP) ((AP)->mbc_header_size) @@ -402,7 +382,7 @@ do { \ #define SET_CARRIER_HDR(C, Sz, F, AP) \ (ASSERT(((Sz) & FLG_MASK) == 0), (C)->chdr = ((Sz) | (F)), \ - erts_smp_atomic_init_nob(&(C)->allctr, (erts_aint_t) (AP))) + erts_atomic_init_nob(&(C)->allctr, (erts_aint_t) (AP))) #define BLK_TO_SBC(B) \ ((Carrier_t *) (((char *) (B)) - SBC_HEADER_SIZE)) @@ -598,15 +578,11 @@ do { \ (AP)->mbcs.blocks.curr.size -= (CRR)->cpool.blocks_size; \ } while (0) -#ifdef ERTS_SMP #define STAT_MBC_BLK_ALLOC_CRR(CRR, BSZ) \ do { \ (CRR)->cpool.blocks++; \ (CRR)->cpool.blocks_size += (BSZ); \ } while (0) -#else -#define STAT_MBC_BLK_ALLOC_CRR(CRR, BSZ) ((void) (CRR)) /* Get rid of warning */ -#endif #define STAT_MBC_BLK_ALLOC(AP, CRR, BSZ, FLGS) \ do { \ @@ -626,7 +602,6 @@ stat_cpool_mbc_blk_free(Allctr_t *allctr, Carrier_t **busy_pcrr_pp, UWord blksz) { -#ifdef ERTS_SMP ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks > 0); crr->cpool.blocks--; @@ -651,9 +626,6 @@ stat_cpool_mbc_blk_free(Allctr_t *allctr, #endif return 1; -#else - return 0; -#endif } #define STAT_MBC_BLK_FREE(AP, CRR, BPCRRPP, BSZ, FLGS) \ @@ -689,12 +661,7 @@ do { \ #endif #ifdef DEBUG -#ifdef USE_THREADS -# ifdef ERTS_SMP # define IS_ACTUALLY_BLOCKING (erts_thr_progress_is_blocking()) -# else -# define IS_ACTUALLY_BLOCKING 0 -# endif #define ERTS_ALCU_DBG_CHK_THR_ACCESS(A) \ do { \ if (!(A)->thread_safe && !IS_ACTUALLY_BLOCKING) { \ @@ -703,7 +670,7 @@ do { \ (A)->debug.saved_tid = 1; \ } \ else { \ - ERTS_SMP_LC_ASSERT( \ + ERTS_LC_ASSERT( \ ethr_equal_tids((A)->debug.tid, erts_thr_self())); \ } \ } \ @@ -711,9 +678,6 @@ do { \ #else #define ERTS_ALCU_DBG_CHK_THR_ACCESS(A) #endif -#else -#define ERTS_ALCU_DBG_CHK_THR_ACCESS(A) -#endif static void make_name_atoms(Allctr_t *allctr); @@ -862,7 +826,7 @@ erts_alcu_literal_32_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags) Uint sz = ERTS_SUPERALIGNED_CEILING(*size_p); ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL && allctr->t == 0); - ERTS_SMP_LC_ASSERT(allctr->thread_safe); + ERTS_LC_ASSERT(allctr->thread_safe); res = erts_alcu_mseg_alloc(allctr, &sz, flags); if (res) { @@ -880,7 +844,7 @@ erts_alcu_literal_32_mseg_realloc(Allctr_t *allctr, void *seg, Uint new_sz = ERTS_SUPERALIGNED_CEILING(*new_size_p); ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL && allctr->t == 0); - ERTS_SMP_LC_ASSERT(allctr->thread_safe); + ERTS_LC_ASSERT(allctr->thread_safe); if (seg && old_size) clear_literal_range(seg, old_size); @@ -898,7 +862,7 @@ erts_alcu_literal_32_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size, { ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL && allctr->t == 0); - ERTS_SMP_LC_ASSERT(allctr->thread_safe); + ERTS_LC_ASSERT(allctr->thread_safe); erts_alcu_mseg_dealloc(allctr, seg, size, flags); @@ -1058,7 +1022,7 @@ erts_alcu_literal_32_sys_alloc(Allctr_t *allctr, Uint* size_p, int superalign) Uint size = ERTS_SUPERALIGNED_CEILING(*size_p); ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL && allctr->t == 0); - ERTS_SMP_LC_ASSERT(allctr->thread_safe); + ERTS_LC_ASSERT(allctr->thread_safe); res = erts_alcu_sys_alloc(allctr, &size, 1); if (res) { @@ -1076,7 +1040,7 @@ erts_alcu_literal_32_sys_realloc(Allctr_t *allctr, void *ptr, Uint* size_p, Uint ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL && allctr->t == 0); - ERTS_SMP_LC_ASSERT(allctr->thread_safe); + ERTS_LC_ASSERT(allctr->thread_safe); if (ptr && old_size) clear_literal_range(ptr, old_size); @@ -1093,7 +1057,7 @@ erts_alcu_literal_32_sys_dealloc(Allctr_t *allctr, void *ptr, Uint size, int sup { ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL && allctr->t == 0); - ERTS_SMP_LC_ASSERT(allctr->thread_safe); + ERTS_LC_ASSERT(allctr->thread_safe); erts_alcu_sys_dealloc(allctr, ptr, size, 1); @@ -1191,7 +1155,6 @@ unlink_carrier(CarrierList_t *cl, Carrier_t *crr) } } -#ifdef ERTS_SMP #ifdef DEBUG static int is_in_list(ErtsDoubleLink_t* sentinel, ErtsDoubleLink_t* node) @@ -1292,16 +1255,15 @@ clear_busy_pool_carrier(Allctr_t *allctr, Carrier_t *crr) erts_aint_t old_val = new_val|ERTS_CRR_ALCTR_FLG_BUSY; ERTS_ALC_CPOOL_ASSERT(old_val - == erts_smp_atomic_xchg_relb(&crr->allctr, + == erts_atomic_xchg_relb(&crr->allctr, new_val)); } #else - erts_smp_atomic_set_relb(&crr->allctr, new_val); + erts_atomic_set_relb(&crr->allctr, new_val); #endif } } -#endif /* ERTS_SMP */ #if 0 #define ERTS_DBG_CHK_FIX_LIST(A, FIX, IX, B) \ @@ -1325,12 +1287,10 @@ chk_fix_list(Allctr_t *allctr, ErtsAlcFixList_t *fix, int ix, int before) static void *mbc_alloc(Allctr_t *allctr, Uint size); -#ifdef ERTS_SMP typedef struct { ErtsAllctrDDBlock_t ddblock__; /* must be first */ ErtsAlcType_t fix_type; } ErtsAllctrFixDDBlock_t; -#endif #define ERTS_ALC_FIX_NO_UNUSE (((ErtsAlcType_t) 1) << ERTS_ALC_N_BITS) @@ -1341,11 +1301,9 @@ dealloc_fix_block(Allctr_t *allctr, ErtsAlcFixList_t *fix, int dec_cc_on_redirect) { -#ifdef ERTS_SMP /* May be redirected... */ ASSERT((type & ERTS_ALC_FIX_NO_UNUSE) == 0); ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type = type | ERTS_ALC_FIX_NO_UNUSE; -#endif dealloc_block(allctr, ptr, fix, dec_cc_on_redirect); } @@ -1379,12 +1337,10 @@ fix_cpool_check_shrink(Allctr_t *allctr, fix->u.cpool.shrink_list = 0; else { void *p; -#ifdef ERTS_SMP if (busy_pcrr_pp) { clear_busy_pool_carrier(allctr, *busy_pcrr_pp); *busy_pcrr_pp = NULL; } -#endif fix->u.cpool.shrink_list--; p = fix->list; fix->list = *((void **) p); @@ -1477,10 +1433,8 @@ fix_cpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs) int ix, o; int flush = flgs == 0; -#ifdef USE_THREADS if (allctr->thread_safe) erts_mtx_lock(&allctr->mutex); -#endif for (ix = 0; ix < ERTS_ALC_NO_FIXED_SIZES; ix++) { ErtsAlcFixList_t *fix = &allctr->fix[ix]; @@ -1520,10 +1474,8 @@ fix_cpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs) if (all_empty) sched_fix_shrink(allctr, 0); -#ifdef USE_THREADS if (allctr->thread_safe) erts_mtx_unlock(&allctr->mutex); -#endif return res; } @@ -1635,10 +1587,8 @@ fix_nocpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs) int ix, o; int flush = flgs == 0; -#ifdef USE_THREADS if (allctr->thread_safe) erts_mtx_lock(&allctr->mutex); -#endif for (ix = 0; ix < ERTS_ALC_NO_FIXED_SIZES; ix++) { ErtsAlcFixList_t *fix = &allctr->fix[ix]; @@ -1680,10 +1630,8 @@ fix_nocpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs) if (all_empty) sched_fix_shrink(allctr, 0); -#ifdef USE_THREADS if (allctr->thread_safe) erts_mtx_unlock(&allctr->mutex); -#endif return res; } @@ -1709,7 +1657,6 @@ dealloc_mbc(Allctr_t *allctr, Carrier_t *crr) dealloc_carrier(allctr, crr, 1); } -#ifdef ERTS_SMP static ERTS_INLINE Allctr_t* get_pref_allctr(void *extra) @@ -1750,7 +1697,7 @@ get_used_allctr(Allctr_t *pref_allctr, int pref_lock, void *p, UWord *sizep, crr = BLK_TO_SBC(blk); if (sizep) *sizep = SBC_BLK_SZ(blk) - ABLK_HDR_SZ; - iallctr = erts_smp_atomic_read_dirty(&crr->allctr); + iallctr = erts_atomic_read_dirty(&crr->allctr); } else { crr = ABLK_TO_MBC(blk); @@ -1758,10 +1705,10 @@ get_used_allctr(Allctr_t *pref_allctr, int pref_lock, void *p, UWord *sizep, if (sizep) *sizep = MBC_ABLK_SZ(blk) - ABLK_HDR_SZ; if (!ERTS_ALC_IS_CPOOL_ENABLED(pref_allctr)) - iallctr = erts_smp_atomic_read_dirty(&crr->allctr); + iallctr = erts_atomic_read_dirty(&crr->allctr); else { int locked_pref_allctr = 0; - iallctr = erts_smp_atomic_read_ddrb(&crr->allctr); + iallctr = erts_atomic_read_ddrb(&crr->allctr); if (ERTS_ALC_TS_PREF_LOCK_IF_USED == pref_lock && pref_allctr->thread_safe) { @@ -1777,7 +1724,7 @@ get_used_allctr(Allctr_t *pref_allctr, int pref_lock, void *p, UWord *sizep, erts_aint_t act; ERTS_ALC_CPOOL_ASSERT(!(iallctr & ERTS_CRR_ALCTR_FLG_BUSY)); - act = erts_smp_atomic_cmpxchg_ddrb(&crr->allctr, + act = erts_atomic_cmpxchg_ddrb(&crr->allctr, iallctr|ERTS_CRR_ALCTR_FLG_BUSY, iallctr); if (act == iallctr) { @@ -2152,10 +2099,10 @@ handle_delayed_dealloc(Allctr_t *allctr, ERTS_ALC_CPOOL_ASSERT(ERTS_ALC_IS_CPOOL_ENABLED(allctr)); ERTS_ALC_CPOOL_ASSERT(allctr == crr->cpool.orig_allctr); ERTS_ALC_CPOOL_ASSERT(((erts_aint_t) allctr) - != (erts_smp_atomic_read_nob(&crr->allctr) + != (erts_atomic_read_nob(&crr->allctr) & ~ERTS_CRR_ALCTR_FLG_MASK)); - erts_smp_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr)); + erts_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr)); schedule_dealloc_carrier(allctr, crr); } @@ -2201,9 +2148,7 @@ enqueue_dealloc_other_instance(ErtsAlcType_t type, erts_alloc_notify_delayed_dealloc(allctr->ix); } -#endif -#ifdef ERTS_SMP static void set_new_allctr_abandon_limit(Allctr_t *allctr); static void @@ -2265,7 +2210,6 @@ erts_alcu_check_delayed_dealloc(Allctr_t *allctr, thr_prgr_p, more_work); } -#endif #define ERTS_ALCU_HANDLE_DD_IN_OP(Allctr, Locked) \ handle_delayed_dealloc((Allctr), (Locked), 1, \ @@ -2276,24 +2220,18 @@ dealloc_block(Allctr_t *allctr, void *ptr, ErtsAlcFixList_t *fix, int dec_cc_on_ { Block_t *blk = UMEM2BLK(ptr); - ERTS_SMP_LC_ASSERT(!allctr->thread_safe + ERTS_LC_ASSERT(!allctr->thread_safe || erts_lc_mtx_is_locked(&allctr->mutex)); if (IS_SBC_BLK(blk)) { destroy_carrier(allctr, blk, NULL); -#ifdef ERTS_SMP if (fix && ERTS_ALC_IS_CPOOL_ENABLED(allctr)) { ErtsAlcType_t type = ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type; if (!(type & ERTS_ALC_FIX_NO_UNUSE)) fix->u.cpool.used--; fix->u.cpool.allocated--; } -#endif } -#ifndef ERTS_SMP - else - mbc_free(allctr, ptr, NULL); -#else else if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr)) mbc_free(allctr, ptr, NULL); else { @@ -2323,7 +2261,6 @@ dealloc_block(Allctr_t *allctr, void *ptr, ErtsAlcFixList_t *fix, int dec_cc_on_ erts_alloc_notify_delayed_dealloc(used_allctr->ix); } } -#endif } /* Multi block carrier alloc/realloc/free ... */ @@ -2571,9 +2508,7 @@ mbc_free(Allctr_t *allctr, void *p, Carrier_t **busy_pcrr_pp) else { (*allctr->link_free_block)(allctr, blk); HARD_CHECK_BLK_CARRIER(allctr, blk); -#ifdef ERTS_SMP check_abandon_carrier(allctr, blk, busy_pcrr_pp); -#endif } } @@ -2607,10 +2542,8 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs, return NULL; #else /* !MBC_REALLOC_ALWAYS_MOVES */ -#ifdef ERTS_SMP if (busy_pcrr_pp && *busy_pcrr_pp) goto realloc_move; /* Don't want to use carrier in pool */ -#endif get_blk_sz = blk_sz = UMEMSZ2BLKSZ(allctr, size); @@ -2731,9 +2664,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs, HARD_CHECK_BLK_CARRIER(allctr, blk); -#ifdef ERTS_SMP check_abandon_carrier(allctr, nxt_blk, NULL); -#endif return p; } @@ -2845,9 +2776,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs, if (cand_blk_sz < get_blk_sz) { /* We wont fit in cand_blk get a new one */ -#ifdef ERTS_SMP realloc_move: -#endif #endif /* !MBC_REALLOC_ALWAYS_MOVES */ new_p = mbc_alloc(allctr, size); @@ -2949,7 +2878,6 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs, #endif /* !MBC_REALLOC_ALWAYS_MOVES */ } -#ifdef ERTS_SMP #define ERTS_ALC_MAX_DEALLOC_CARRIER 10 #define ERTS_ALC_CPOOL_MAX_FETCH_INSPECT 20 @@ -3120,7 +3048,7 @@ cpool_insert(Allctr_t *allctr, Carrier_t *crr) ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_INVALID /* testcase */ || erts_thr_progress_is_managed_thread()); - ERTS_ALC_CPOOL_ASSERT(erts_smp_atomic_read_nob(&crr->allctr) + ERTS_ALC_CPOOL_ASSERT(erts_atomic_read_nob(&crr->allctr) == (erts_aint_t) allctr); erts_atomic_add_nob(&allctr->cpool.stat.blocks_size, @@ -3190,7 +3118,7 @@ cpool_insert(Allctr_t *allctr, Carrier_t *crr) (erts_aint_t) &crr->cpool, (erts_aint_t) cpd1p); - erts_smp_atomic_set_wb(&crr->allctr, + erts_atomic_set_wb(&crr->allctr, ((erts_aint_t) allctr)|ERTS_CRR_ALCTR_FLG_IN_POOL); LTTNG3(carrier_pool_put, ERTS_ALC_A2AD(allctr->alloc_no), allctr->ix, CARRIER_SZ(crr)); } @@ -3322,11 +3250,11 @@ cpool_fetch(Allctr_t *allctr, UWord size) ASSERT(!is_in_list(&allctr->cpool.traitor_list, dl)); ASSERT(crr->cpool.orig_allctr == allctr); dl = dl->next; - exp = erts_smp_atomic_read_rb(&crr->allctr); + exp = erts_atomic_read_rb(&crr->allctr); if ((exp & ERTS_CRR_ALCTR_FLG_MASK) == ERTS_CRR_ALCTR_FLG_IN_POOL && erts_atomic_read_nob(&crr->cpool.max_size) >= size) { /* Try to fetch it... */ - act = erts_smp_atomic_cmpxchg_mb(&crr->allctr, + act = erts_atomic_cmpxchg_mb(&crr->allctr, (erts_aint_t) allctr, exp); if (act == exp) { @@ -3368,12 +3296,12 @@ cpool_fetch(Allctr_t *allctr, UWord size) ASSERT(dl != &allctr->cpool.pooled_list); ASSERT(crr->cpool.orig_allctr == allctr); dl = dl->next; - exp = erts_smp_atomic_read_rb(&crr->allctr); + exp = erts_atomic_read_rb(&crr->allctr); if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) { if (!(exp & ERTS_CRR_ALCTR_FLG_BUSY) && erts_atomic_read_nob(&crr->cpool.max_size) >= size) { /* Try to fetch it... */ - act = erts_smp_atomic_cmpxchg_mb(&crr->allctr, + act = erts_atomic_cmpxchg_mb(&crr->allctr, (erts_aint_t) allctr, exp); if (act == exp) { @@ -3449,12 +3377,12 @@ cpool_fetch(Allctr_t *allctr, UWord size) has_passed_sentinel = 1; } crr = (Carrier_t *)(((char *)cpdp) - offsetof(Carrier_t, cpool)); - exp = erts_smp_atomic_read_rb(&crr->allctr); + exp = erts_atomic_read_rb(&crr->allctr); if (((exp & (ERTS_CRR_ALCTR_FLG_MASK)) == ERTS_CRR_ALCTR_FLG_IN_POOL) && (erts_atomic_read_nob(&cpdp->max_size) >= size)) { erts_aint_t act; /* Try to fetch it... */ - act = erts_smp_atomic_cmpxchg_mb(&crr->allctr, + act = erts_atomic_cmpxchg_mb(&crr->allctr, (erts_aint_t) allctr, exp); if (act == exp) { @@ -3477,11 +3405,11 @@ check_dc_list: Block_t* blk; unlink_carrier(&allctr->cpool.dc_list, crr); #ifdef ERTS_ALC_CPOOL_DEBUG - ERTS_ALC_CPOOL_ASSERT(erts_smp_atomic_xchg_nob(&crr->allctr, + ERTS_ALC_CPOOL_ASSERT(erts_atomic_xchg_nob(&crr->allctr, ((erts_aint_t) allctr)) == (((erts_aint_t) allctr) & ~ERTS_CRR_ALCTR_FLG_MASK)); #else - erts_smp_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr)); + erts_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr)); #endif blk = MBC_TO_FIRST_BLK(allctr, crr); ASSERT(FBLK_TO_MBC(blk) == crr); @@ -3584,7 +3512,7 @@ schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr) ERTS_ALC_CPOOL_ASSERT(crr == FBLK_TO_MBC(blk)); ERTS_ALC_CPOOL_ASSERT(crr == FIRST_BLK_TO_MBC(allctr, blk)); ERTS_ALC_CPOOL_ASSERT(((erts_aint_t) allctr) - == (erts_smp_atomic_read_nob(&crr->allctr) + == (erts_atomic_read_nob(&crr->allctr) & ~ERTS_CRR_ALCTR_FLG_MASK)); if (ddq_enqueue(&orig_allctr->dd.q, BLK2UMEM(blk), cinit)) @@ -3735,7 +3663,6 @@ cpool_read_stat(Allctr_t *allctr, UWord *nocp, UWord *cszp, UWord *nobp, UWord * } -#endif /* ERTS_SMP */ #ifdef DEBUG @@ -3836,7 +3763,6 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags) blk_sz = UMEMSZ2BLKSZ(allctr, umem_sz); } -#ifdef ERTS_SMP allctr->cpool.disable_abandon = ERTS_ALC_CPOOL_MAX_DISABLE_ABANDON; if ((flags & (CFLG_MBC|CFLG_NO_CPOOL)) == CFLG_MBC @@ -3852,7 +3778,6 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags) return blk; } } -#endif #if HAVE_ERTS_MSEG @@ -3982,9 +3907,7 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags) allctr->main_carrier = crr; } -#ifdef ERTS_SMP cpool_init_carrier_data(allctr, crr); -#endif link_carrier(&allctr->mbc_list, crr); @@ -4204,19 +4127,17 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp) } #endif -#ifdef ERTS_SMP if (busy_pcrr_pp && *busy_pcrr_pp) { ERTS_ALC_CPOOL_ASSERT(*busy_pcrr_pp == crr); *busy_pcrr_pp = NULL; - ERTS_ALC_CPOOL_ASSERT(erts_smp_atomic_read_nob(&crr->allctr) + ERTS_ALC_CPOOL_ASSERT(erts_atomic_read_nob(&crr->allctr) == (((erts_aint_t) allctr) | ERTS_CRR_ALCTR_FLG_IN_POOL | ERTS_CRR_ALCTR_FLG_BUSY)); - erts_smp_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr)); + erts_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr)); cpool_delete(allctr, allctr, crr); } else -#endif { unlink_carrier(&allctr->mbc_list, crr); #if HAVE_ERTS_MSEG @@ -4247,11 +4168,7 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp) } #endif -#ifdef ERTS_SMP schedule_dealloc_carrier(allctr, crr); -#else - dealloc_mbc(allctr, crr); -#endif } } @@ -4294,9 +4211,7 @@ static struct { Eterm fix_types; Eterm mbcs; -#ifdef ERTS_SMP Eterm mbcs_pool; -#endif Eterm sbcs; Eterm sys_alloc_carriers_size; @@ -4384,9 +4299,7 @@ init_atoms(Allctr_t *allctr) AM_INIT(fix_types); AM_INIT(mbcs); -#ifdef ERTS_SMP AM_INIT(mbcs_pool); -#endif AM_INIT(sbcs); AM_INIT(sys_alloc_carriers_size); @@ -4636,7 +4549,6 @@ sz_info_carriers(Allctr_t *allctr, return res; } -#ifdef ERTS_SMP static Eterm info_cpool(Allctr_t *allctr, @@ -4690,7 +4602,6 @@ info_cpool(Allctr_t *allctr, return res; } -#endif /* ERTS_SMP */ static Eterm info_carriers(Allctr_t *allctr, @@ -4945,11 +4856,7 @@ info_options(Allctr_t *allctr, return res; } -#ifdef ERTS_SMP acul = allctr->cpool.util_limit; -#else - acul = 0; -#endif if (print_to_p) { char topt[21]; /* Enough for any 64-bit integer */ @@ -5132,19 +5039,15 @@ erts_alcu_info_options(Allctr_t *allctr, if (hpp || szp) ensure_atoms_initialized(allctr); -#ifdef USE_THREADS if (allctr->thread_safe) { erts_allctr_wrapper_pre_lock(); erts_mtx_lock(&allctr->mutex); } -#endif res = info_options(allctr, print_to_p, print_to_arg, hpp, szp); -#ifdef USE_THREADS if (allctr->thread_safe) { erts_mtx_unlock(&allctr->mutex); erts_allctr_wrapper_pre_unlock(); } -#endif return res; } @@ -5160,9 +5063,7 @@ erts_alcu_sz_info(Allctr_t *allctr, Uint *szp) { Eterm res, mbcs, sbcs, fix = THE_NON_VALUE; -#ifdef ERTS_SMP Eterm mbcs_pool; -#endif res = THE_NON_VALUE; @@ -5177,12 +5078,10 @@ erts_alcu_sz_info(Allctr_t *allctr, if (hpp || szp) ensure_atoms_initialized(allctr); -#ifdef USE_THREADS if (allctr->thread_safe) { erts_allctr_wrapper_pre_lock(); erts_mtx_lock(&allctr->mutex); } -#endif ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr); @@ -5198,23 +5097,19 @@ erts_alcu_sz_info(Allctr_t *allctr, fix = sz_info_fix(allctr, internal, print_to_p, print_to_arg, hpp, szp); mbcs = sz_info_carriers(allctr, &allctr->mbcs, "mbcs ", print_to_p, print_to_arg, hpp, szp); -#ifdef ERTS_SMP if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) mbcs_pool = info_cpool(allctr, 1, "mbcs_pool ", print_to_p, print_to_arg, hpp, szp); else mbcs_pool = THE_NON_VALUE; /* shut up annoying warning... */ -#endif sbcs = sz_info_carriers(allctr, &allctr->sbcs, "sbcs ", print_to_p, print_to_arg, hpp, szp); if (hpp || szp) { res = NIL; add_2tup(hpp, szp, &res, am.sbcs, sbcs); -#ifdef ERTS_SMP if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) add_2tup(hpp, szp, &res, am.mbcs_pool, mbcs_pool); -#endif add_2tup(hpp, szp, &res, am.mbcs, mbcs); add_fix_types(allctr, internal, hpp, szp, &res, fix); } @@ -5225,12 +5120,10 @@ erts_alcu_sz_info(Allctr_t *allctr, } -#ifdef USE_THREADS if (allctr->thread_safe) { erts_mtx_unlock(&allctr->mutex); erts_allctr_wrapper_pre_unlock(); } -#endif return res; } @@ -5246,9 +5139,7 @@ erts_alcu_info(Allctr_t *allctr, Uint *szp) { Eterm res, sett, mbcs, sbcs, calls, fix = THE_NON_VALUE; -#ifdef ERTS_SMP Eterm mbcs_pool; -#endif res = THE_NON_VALUE; @@ -5263,12 +5154,10 @@ erts_alcu_info(Allctr_t *allctr, if (hpp || szp) ensure_atoms_initialized(allctr); -#ifdef USE_THREADS if (allctr->thread_safe) { erts_allctr_wrapper_pre_lock(); erts_mtx_lock(&allctr->mutex); } -#endif ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr); @@ -5293,13 +5182,11 @@ erts_alcu_info(Allctr_t *allctr, fix = sz_info_fix(allctr, internal, print_to_p, print_to_arg, hpp, szp); mbcs = info_carriers(allctr, &allctr->mbcs, "mbcs ", print_to_p, print_to_arg, hpp, szp); -#ifdef ERTS_SMP if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) mbcs_pool = info_cpool(allctr, 0, "mbcs_pool ", print_to_p, print_to_arg, hpp, szp); else mbcs_pool = THE_NON_VALUE; /* shut up annoying warning... */ -#endif sbcs = info_carriers(allctr, &allctr->sbcs, "sbcs ", print_to_p, print_to_arg, hpp, szp); calls = info_calls(allctr, print_to_p, print_to_arg, hpp, szp); @@ -5309,10 +5196,8 @@ erts_alcu_info(Allctr_t *allctr, add_2tup(hpp, szp, &res, am.calls, calls); add_2tup(hpp, szp, &res, am.sbcs, sbcs); -#ifdef ERTS_SMP if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) add_2tup(hpp, szp, &res, am.mbcs_pool, mbcs_pool); -#endif add_2tup(hpp, szp, &res, am.mbcs, mbcs); add_fix_types(allctr, internal, hpp, szp, &res, fix); add_2tup(hpp, szp, &res, am.options, sett); @@ -5328,12 +5213,10 @@ erts_alcu_info(Allctr_t *allctr, } -#ifdef USE_THREADS if (allctr->thread_safe) { erts_mtx_unlock(&allctr->mutex); erts_allctr_wrapper_pre_unlock(); } -#endif return res; } @@ -5343,10 +5226,8 @@ void erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size, ErtsAlcUFixInfo_t *fi, int fisz) { -#ifdef USE_THREADS if (allctr->thread_safe) erts_mtx_lock(&allctr->mutex); -#endif size->carriers = allctr->mbcs.curr.norm.mseg.size; size->carriers += allctr->mbcs.curr.norm.sys_alloc.size; @@ -5356,14 +5237,12 @@ erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size, ErtsAlcUFixInfo_t * size->blocks = allctr->mbcs.blocks.curr.size; size->blocks += allctr->sbcs.blocks.curr.size; -#ifdef ERTS_SMP if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) { UWord csz, bsz; cpool_read_stat(allctr, NULL, &csz, NULL, &bsz); size->blocks += bsz; size->carriers += csz; } -#endif if (fi) { int ix; @@ -5385,10 +5264,8 @@ erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size, ErtsAlcUFixInfo_t * } } -#ifdef USE_THREADS if (allctr->thread_safe) erts_mtx_unlock(&allctr->mutex); -#endif } /* ----------------------------------------------------------------------- */ @@ -5403,7 +5280,7 @@ do_erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size) ASSERT(allctr); - ERTS_SMP_LC_ASSERT(!allctr->thread_safe + ERTS_LC_ASSERT(!allctr->thread_safe || erts_lc_mtx_is_locked(&allctr->mutex)); ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr); @@ -5436,18 +5313,13 @@ do_erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size) void *erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size) { void *res; -#ifdef ERTS_SMP ASSERT(!"This is not thread safe"); -#elif defined(USE_THREADS) - ASSERT(erts_equal_tids(erts_main_thread, erts_thr_self())); -#endif res = do_erts_alcu_alloc(type, extra, size); DEBUG_CHECK_ALIGNMENT(res); return res; } -#ifdef USE_THREADS void * erts_alcu_alloc_ts(ErtsAlcType_t type, void *extra, Uint size) @@ -5463,7 +5335,6 @@ erts_alcu_alloc_ts(ErtsAlcType_t type, void *extra, Uint size) return res; } -#ifdef ERTS_SMP void * erts_alcu_alloc_thr_spec(ErtsAlcType_t type, void *extra, Uint size) @@ -5503,21 +5374,17 @@ erts_alcu_alloc_thr_pref(ErtsAlcType_t type, void *extra, Uint size) if (pref_allctr->thread_safe) erts_mtx_lock(&pref_allctr->mutex); -#ifdef ERTS_SMP ASSERT(pref_allctr->dd.use); ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1); -#endif ERTS_ALCU_DBG_CHK_THR_ACCESS(pref_allctr); res = do_erts_alcu_alloc(type, pref_allctr, size); -#ifdef ERTS_SMP if (!res && ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1)) { /* Cleaned up a bit more; try one more time... */ res = do_erts_alcu_alloc(type, pref_allctr, size); } -#endif if (pref_allctr->thread_safe) erts_mtx_unlock(&pref_allctr->mutex); @@ -5528,9 +5395,7 @@ erts_alcu_alloc_thr_pref(ErtsAlcType_t type, void *extra, Uint size) return res; } -#endif -#endif /* ------------------------------------------------------------------------- */ @@ -5543,7 +5408,7 @@ do_erts_alcu_free(ErtsAlcType_t type, void *extra, void *p, ASSERT(allctr); - ERTS_SMP_LC_ASSERT(!allctr->thread_safe + ERTS_LC_ASSERT(!allctr->thread_safe || erts_lc_mtx_is_locked(&allctr->mutex)); ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr); @@ -5573,7 +5438,6 @@ void erts_alcu_free(ErtsAlcType_t type, void *extra, void *p) do_erts_alcu_free(type, extra, p, NULL); } -#ifdef USE_THREADS void erts_alcu_free_ts(ErtsAlcType_t type, void *extra, void *p) @@ -5584,7 +5448,6 @@ erts_alcu_free_ts(ErtsAlcType_t type, void *extra, void *p) erts_mtx_unlock(&allctr->mutex); } -#ifdef ERTS_SMP void erts_alcu_free_thr_spec(ErtsAlcType_t type, void *extra, void *p) @@ -5634,9 +5497,7 @@ erts_alcu_free_thr_pref(ErtsAlcType_t type, void *extra, void *p) } } -#endif -#endif /* ------------------------------------------------------------------------- */ @@ -5656,7 +5517,7 @@ do_erts_alcu_realloc(ErtsAlcType_t type, ASSERT(allctr); - ERTS_SMP_LC_ASSERT(!allctr->thread_safe + ERTS_LC_ASSERT(!allctr->thread_safe || erts_lc_mtx_is_locked(&allctr->mutex)); ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr); @@ -5785,7 +5646,6 @@ erts_alcu_realloc_mv(ErtsAlcType_t type, void *extra, void *p, Uint size) } -#ifdef USE_THREADS void * erts_alcu_realloc_ts(ErtsAlcType_t type, void *extra, void *ptr, Uint size) @@ -5824,7 +5684,6 @@ erts_alcu_realloc_mv_ts(ErtsAlcType_t type, void *extra, void *p, Uint size) return res; } -#ifdef ERTS_SMP void * erts_alcu_realloc_thr_spec(ErtsAlcType_t type, void *extra, @@ -5905,9 +5764,7 @@ realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size, Allctr_t *pref_allctr, *used_allctr; UWord old_user_size; Carrier_t *busy_pcrr_p; -#ifdef ERTS_SMP int retried; -#endif if (!p) return erts_alcu_alloc_thr_pref(type, extra, size); @@ -5917,12 +5774,10 @@ realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size, if (pref_allctr->thread_safe) erts_mtx_lock(&pref_allctr->mutex); -#ifdef ERTS_SMP ASSERT(pref_allctr->dd.use); ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1); retried = 0; restart: -#endif used_allctr = get_used_allctr(pref_allctr, ERTS_ALC_TS_PREF_LOCK_NO, p, &old_user_size, &busy_pcrr_p); @@ -5938,13 +5793,11 @@ restart: 0, &busy_pcrr_p); clear_busy_pool_carrier(used_allctr, busy_pcrr_p); -#ifdef ERTS_SMP if (!res && !retried && ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1)) { /* Cleaned up a bit more; try one more time... */ retried = 1; goto restart; } -#endif if (pref_allctr->thread_safe) erts_mtx_unlock(&pref_allctr->mutex); } @@ -5999,9 +5852,7 @@ erts_alcu_realloc_mv_thr_pref(ErtsAlcType_t type, void *extra, return realloc_thr_pref(type, extra, p, size, 1); } -#endif -#endif /* ------------------------------------------------------------------------- */ @@ -6022,10 +5873,8 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) sys_memcpy((void *) &allctr->mseg_opt, (void *) &erts_mseg_default_opt, sizeof(ErtsMsegOpt_t)); -#ifdef ERTS_SMP if (init->tspec || init->tpref) allctr->mseg_opt.sched_spec = 1; -#endif /* ERTS_SMP */ #endif /* HAVE_ERTS_MSEG */ allctr->name_prefix = init->name_prefix; @@ -6083,7 +5932,6 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) goto error; allctr->min_block_size = UNIT_CEILING(allctr->min_block_size + sizeof(FreeBlkFtr_t)); -#ifdef ERTS_SMP if (init->tpref) { Uint sz = ABLK_HDR_SZ; sz += (init->fix ? @@ -6107,7 +5955,6 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) erts_atomic_init_nob(&allctr->cpool.stat.no_carriers, 0); allctr->cpool.check_limit_count = ERTS_ALC_CPOOL_CHECK_LIMIT_COUNT; allctr->cpool.util_limit = init->ts ? 0 : init->acul; -#endif allctr->sbc_threshold = init->sbct; #ifndef ARCH_64 @@ -6131,7 +5978,6 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) allctr->mseg_opt.abs_shrink_th = ~((UWord) 0) / 100; #endif -#ifdef USE_THREADS if (init->ts) { allctr->thread_safe = 1; @@ -6142,7 +5988,6 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) allctr->debug.saved_tid = 0; #endif } -#endif if(!allctr->get_free_block || !allctr->link_free_block @@ -6155,14 +6000,12 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) if (allctr->mbc_header_size < sizeof(Carrier_t)) goto error; -#ifdef ERTS_SMP allctr->dd.use = 0; if (init->tpref) { allctr->dd.use = 1; init_dd_queue(&allctr->dd.q); allctr->dd.ix = init->ix; } -#endif allctr->mbc_header_size = (UNIT_CEILING(allctr->mbc_header_size + ABLK_HDR_SZ) - ABLK_HDR_SZ); @@ -6216,10 +6059,8 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) | CFLG_NO_CPOOL | CFLG_MAIN_CARRIER); if (!blk) { -#ifdef USE_THREADS if (allctr->thread_safe) erts_mtx_destroy(&allctr->mutex); -#endif erts_exit(ERTS_ABORT_EXIT, "Failed to create main carrier for %salloc\n", init->name_prefix); @@ -6239,9 +6080,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) allctr->fix[i].type_size = init->fix_type_size[i]; allctr->fix[i].list_size = 0; allctr->fix[i].list = NULL; -#ifdef ERTS_SMP ASSERT(allctr->fix[i].type_size >= sizeof(ErtsAllctrFixDDBlock_t)); -#endif if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) { allctr->fix[i].u.cpool.min_list_size = 0; allctr->fix[i].u.cpool.shrink_list = 0; @@ -6261,10 +6100,8 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init) error: -#ifdef USE_THREADS if (allctr->thread_safe) erts_mtx_destroy(&allctr->mutex); -#endif return 0; @@ -6282,10 +6119,8 @@ erts_alcu_stop(Allctr_t *allctr) while (allctr->mbc_list.first) destroy_carrier(allctr, MBC_TO_FIRST_BLK(allctr, allctr->mbc_list.first), NULL); -#ifdef USE_THREADS if (allctr->thread_safe) erts_mtx_destroy(&allctr->mutex); -#endif } @@ -6294,14 +6129,12 @@ erts_alcu_stop(Allctr_t *allctr) void erts_alcu_init(AlcUInit_t *init) { -#ifdef ERTS_SMP int i; for (i = 0; i <= ERTS_ALC_A_MAX; i++) { ErtsAlcCPoolData_t *sentinel = &carrier_pool[i].sentinel; erts_atomic_init_nob(&sentinel->next, (erts_aint_t) sentinel); erts_atomic_init_nob(&sentinel->prev, (erts_aint_t) sentinel); } -#endif ERTS_CT_ASSERT(SBC_BLK_SZ_MASK == MBC_FBLK_SZ_MASK); /* see BLK_SZ */ #if HAVE_ERTS_MSEG ASSERT(erts_mseg_unit_size() == ERTS_SACRR_UNIT_SZ); @@ -6369,7 +6202,6 @@ erts_alcu_test(UWord op, UWord a1, UWord a2) case 0x01c: return (UWord) BLK_TO_MBC((Block_t*) a1); case 0x01d: ((Allctr_t*) a1)->add_mbc((Allctr_t*)a1, (Carrier_t*)a2); break; case 0x01e: ((Allctr_t*) a1)->remove_mbc((Allctr_t*)a1, (Carrier_t*)a2); break; -#ifdef ERTS_SMP case 0x01f: return (UWord) sizeof(ErtsAlcCrrPool_t); case 0x020: SET_CARRIER_HDR((Carrier_t *) a2, 0, SCH_SYS_ALLOC|SCH_MBC, (Allctr_t *) a1); @@ -6383,14 +6215,6 @@ erts_alcu_test(UWord op, UWord a1, UWord a2) return (UWord) a2; case 0x023: return (UWord) cpool_is_empty((Allctr_t *) a1); case 0x024: return (UWord) cpool_dbg_is_in_pool((Allctr_t *) a1, (Carrier_t *) a2); -#else - case 0x01f: return (UWord) 0; - case 0x020: return (UWord) 0; - case 0x021: return (UWord) 0; - case 0x022: return (UWord) 0; - case 0x023: return (UWord) 0; - case 0x024: return (UWord) 0; -#endif case 0x025: /* UMEM2BLK_TEST*/ #ifdef DEBUG # ifdef HARD_DEBUG @@ -6448,13 +6272,9 @@ erts_alcu_verify_unused(Allctr_t *allctr) void erts_alcu_verify_unused_ts(Allctr_t *allctr) { -#ifdef USE_THREADS erts_mtx_lock(&allctr->mutex); -#endif erts_alcu_verify_unused(allctr); -#ifdef USE_THREADS erts_mtx_unlock(&allctr->mutex); -#endif } #ifdef DEBUG diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h index aa13cda9fb..faeb5ef368 100644 --- a/erts/emulator/beam/erl_alloc_util.h +++ b/erts/emulator/beam/erl_alloc_util.h @@ -24,10 +24,8 @@ #define ERTS_ALCU_VSN_STR "3.0" #include "erl_alloc_types.h" -#ifdef USE_THREADS #define ERL_THREADS_EMU_INTERNAL__ #include "erl_threads.h" -#endif #include "erl_mseg.h" #include "lttng-wrapper.h" @@ -162,12 +160,10 @@ void * erts_alcu_alloc(ErtsAlcType_t, void *, Uint); void * erts_alcu_realloc(ErtsAlcType_t, void *, void *, Uint); void * erts_alcu_realloc_mv(ErtsAlcType_t, void *, void *, Uint); void erts_alcu_free(ErtsAlcType_t, void *, void *); -#ifdef USE_THREADS void * erts_alcu_alloc_ts(ErtsAlcType_t, void *, Uint); void * erts_alcu_realloc_ts(ErtsAlcType_t, void *, void *, Uint); void * erts_alcu_realloc_mv_ts(ErtsAlcType_t, void *, void *, Uint); void erts_alcu_free_ts(ErtsAlcType_t, void *, void *); -#ifdef ERTS_SMP void * erts_alcu_alloc_thr_spec(ErtsAlcType_t, void *, Uint); void * erts_alcu_realloc_thr_spec(ErtsAlcType_t, void *, void *, Uint); void * erts_alcu_realloc_mv_thr_spec(ErtsAlcType_t, void *, void *, Uint); @@ -176,8 +172,6 @@ void * erts_alcu_alloc_thr_pref(ErtsAlcType_t, void *, Uint); void * erts_alcu_realloc_thr_pref(ErtsAlcType_t, void *, void *, Uint); void * erts_alcu_realloc_mv_thr_pref(ErtsAlcType_t, void *, void *, Uint); void erts_alcu_free_thr_pref(ErtsAlcType_t, void *, void *); -#endif -#endif Eterm erts_alcu_au_info_options(fmtfn_t *, void *, Uint **, Uint *); Eterm erts_alcu_info_options(Allctr_t *, fmtfn_t *, void *, Uint **, Uint *); Eterm erts_alcu_sz_info(Allctr_t *, int, int, fmtfn_t *, void *, Uint **, Uint *); @@ -185,9 +179,7 @@ Eterm erts_alcu_info(Allctr_t *, int, int, fmtfn_t *, void *, Uint **, Uint *); void erts_alcu_init(AlcUInit_t *); void erts_alcu_current_size(Allctr_t *, AllctrSize_t *, ErtsAlcUFixInfo_t *, int); -#ifdef ERTS_SMP void erts_alcu_check_delayed_dealloc(Allctr_t *, int, int *, ErtsThrPrgrVal *, int *); -#endif erts_aint32_t erts_alcu_fix_alloc_shrink(Allctr_t *, erts_aint32_t); #ifdef ARCH_32 @@ -304,7 +296,6 @@ void erts_lcnt_update_allocator_locks(int enable); typedef union {char c[ERTS_ALLOC_ALIGN_BYTES]; long l; double d;} Unit_t; -#ifdef ERTS_SMP typedef struct ErtsDoubleLink_t_ { struct ErtsDoubleLink_t_ *next; @@ -323,21 +314,18 @@ typedef struct { ErtsDoubleLink_t abandoned; /* node in pooled_list or traitor_list */ } ErtsAlcCPoolData_t; -#endif typedef struct Carrier_t_ Carrier_t; struct Carrier_t_ { UWord chdr; Carrier_t *next; Carrier_t *prev; - erts_smp_atomic_t allctr; -#ifdef ERTS_SMP + erts_atomic_t allctr; ErtsAlcCPoolData_t cpool; /* Overwritten by block if sbc */ -#endif }; #define ERTS_ALC_CARRIER_TO_ALLCTR(C) \ - ((Allctr_t *) (erts_smp_atomic_read_nob(&(C)->allctr) & ~FLG_MASK)) + ((Allctr_t *) (erts_atomic_read_nob(&(C)->allctr) & ~FLG_MASK)) typedef struct { Carrier_t *first; @@ -430,7 +418,6 @@ typedef struct { } while (0) #endif -#ifdef ERTS_SMP typedef union ErtsAllctrDDBlock_t_ ErtsAllctrDDBlock_t; @@ -473,7 +460,6 @@ typedef struct { } head; } ErtsAllctrDDQueue_t; -#endif typedef struct { size_t type_size; @@ -496,7 +482,6 @@ typedef struct { } ErtsAlcFixList_t; struct Allctr_t_ { -#ifdef ERTS_SMP struct { /* * We want the queue at the beginning of @@ -507,7 +492,6 @@ struct Allctr_t_ { int use; int ix; } dd; -#endif /* Allocator name prefix */ char * name_prefix; @@ -556,7 +540,6 @@ struct Allctr_t_ { /* Carriers */ CarrierList_t mbc_list; CarrierList_t sbc_list; -#ifdef ERTS_SMP struct { /* pooled_list, traitor list and dc_list contain only carriers _created_ by this allocator */ @@ -575,7 +558,6 @@ struct Allctr_t_ { erts_atomic_t no_carriers; } stat; } cpool; -#endif /* Main carrier (if there is one) */ Carrier_t * main_carrier; @@ -618,7 +600,6 @@ struct Allctr_t_ { int fix_shrink_scheduled; ErtsAlcFixList_t *fix; -#ifdef USE_THREADS /* Mutex for this allocator */ erts_mtx_t mutex; int thread_safe; @@ -627,7 +608,6 @@ struct Allctr_t_ { Allctr_t *next; } ts_list; -#endif int atoms_initialized; @@ -650,13 +630,11 @@ struct Allctr_t_ { CarriersStats_t mbcs; #ifdef DEBUG -#ifdef USE_THREADS struct { int saved_tid; erts_tid_t tid; } debug; #endif -#endif }; int erts_alcu_start(Allctr_t *, AllctrInit_t *); diff --git a/erts/emulator/beam/erl_arith.c b/erts/emulator/beam/erl_arith.c index 861532f241..b6625db0d3 100644 --- a/erts/emulator/beam/erl_arith.c +++ b/erts/emulator/beam/erl_arith.c @@ -114,7 +114,7 @@ BIF_RETTYPE intdiv_2(BIF_ALIST_2) } if (is_both_small(BIF_ARG_1,BIF_ARG_2)){ Sint ires = signed_val(BIF_ARG_1) / signed_val(BIF_ARG_2); - if (MY_IS_SSMALL(ires)) + if (IS_SSMALL(ires)) BIF_RET(make_small(ires)); } BIF_RET(erts_int_div(BIF_P, BIF_ARG_1, BIF_ARG_2)); @@ -276,8 +276,12 @@ shift(Process* p, Eterm arg1, Eterm arg2, int right) goto do_bsl; } else if (is_small(arg1) || is_big(arg1)) { /* - * N bsl PositiveBigNum is too large to represent. + * N bsl PositiveBigNum is too large to represent, + * unless N is 0. */ + if (arg1 == make_small(0)) { + BIF_RET(arg1); + } BIF_ERROR(p, SYSTEM_LIMIT); } /* Fall through if the left argument is not an integer. */ @@ -336,8 +340,7 @@ erts_mixed_plus(Process* p, Eterm arg1, Eterm arg2) switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) { case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE): ires = signed_val(arg1) + signed_val(arg2); - ASSERT(MY_IS_SSMALL(ires) == IS_SSMALL(ires)); - if (MY_IS_SSMALL(ires)) { + if (IS_SSMALL(ires)) { return make_small(ires); } else { hp = HAlloc(p, 2); @@ -482,8 +485,7 @@ erts_mixed_minus(Process* p, Eterm arg1, Eterm arg2) switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) { case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE): ires = signed_val(arg1) - signed_val(arg2); - ASSERT(MY_IS_SSMALL(ires) == IS_SSMALL(ires)); - if (MY_IS_SSMALL(ires)) { + if (IS_SSMALL(ires)) { return make_small(ires); } else { hp = HAlloc(p, 2); @@ -1177,8 +1179,7 @@ erts_gc_mixed_plus(Process* p, Eterm* reg, Uint live) switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) { case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE): ires = signed_val(arg1) + signed_val(arg2); - ASSERT(MY_IS_SSMALL(ires) == IS_SSMALL(ires)); - if (MY_IS_SSMALL(ires)) { + if (IS_SSMALL(ires)) { return make_small(ires); } else { if (ERTS_NEED_GC(p, 2)) { @@ -1345,8 +1346,7 @@ erts_gc_mixed_minus(Process* p, Eterm* reg, Uint live) switch ((arg2 & _TAG_IMMED1_MASK) >> _TAG_PRIMARY_SIZE) { case (_TAG_IMMED1_SMALL >> _TAG_PRIMARY_SIZE): ires = signed_val(arg1) - signed_val(arg2); - ASSERT(MY_IS_SSMALL(ires) == IS_SSMALL(ires)); - if (MY_IS_SSMALL(ires)) { + if (IS_SSMALL(ires)) { return make_small(ires); } else { if (ERTS_NEED_GC(p, 2)) { diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c index 9a93034fcb..3ceb2fd368 100644 --- a/erts/emulator/beam/erl_async.c +++ b/erts/emulator/beam/erl_async.c @@ -34,9 +34,6 @@ #define ERTS_ASYNC_PRINT_JOB 0 -#if !defined(ERTS_SMP) && defined(USE_THREADS) && !ERTS_USE_ASYNC_READY_Q -# error "Need async ready queue in non-smp case" -#endif typedef struct _erl_async { DE_Handle* hndl; /* The DE_Handle is needed when port is gone */ @@ -46,16 +43,13 @@ typedef struct _erl_async { ErlDrvPDL pdl; void (*async_invoke)(void*); void (*async_free)(void*); -#if ERTS_USE_ASYNC_READY_Q Uint sched_id; union { ErtsThrQPrepEnQ_t *prep_enq; ErtsThrQFinDeQ_t fin_deq; } q; -#endif } ErtsAsync; -#if ERTS_USE_ASYNC_READY_Q /* * We can do without the enqueue mutex since it isn't needed for @@ -94,7 +88,6 @@ typedef union { char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncReadyQ))]; } ErtsAlgndAsyncReadyQ; -#endif /* ERTS_USE_ASYNC_READY_Q */ typedef struct { ErtsThrQ_t thr_q; @@ -119,12 +112,10 @@ typedef struct { char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncInit))]; } init; ErtsAlgndAsyncQ *queue; -#if ERTS_USE_ASYNC_READY_Q ErtsAlgndAsyncReadyQ *ready_queue; -#endif } ErtsAsyncData; -#if defined(USE_THREADS) && defined(USE_VM_PROBES) +#if defined(USE_VM_PROBES) /* * Some compilers, e.g. GCC 4.2.1 and -O3, will optimize away DTrace @@ -140,15 +131,6 @@ int erts_async_thread_suggested_stack_size; /* Initialized by erl_init.c */ static ErtsAsyncData *async; -#ifndef USE_THREADS - -void -erts_init_async(void) -{ - -} - -#else static void *async_main(void *); @@ -158,7 +140,6 @@ async_q(int i) return &async->queue[i].aq; } -#if ERTS_USE_ASYNC_READY_Q static ERTS_INLINE ErtsAsyncReadyQ * async_ready_q(Uint sched_id) @@ -166,16 +147,13 @@ async_ready_q(Uint sched_id) return &async->ready_queue[((int)sched_id)-1].arq; } -#endif void erts_init_async(void) { async = NULL; if (erts_async_max_threads > 0) { -#if ERTS_USE_ASYNC_READY_Q ErtsThrQInit_t qinit = ERTS_THR_Q_INIT_DEFAULT; -#endif erts_thr_opts_t thr_opts = ERTS_THR_OPTS_DEFAULT_INITER; char *ptr, thr_name[16]; size_t tot_size = 0; @@ -183,9 +161,7 @@ erts_init_async(void) tot_size += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncData)); tot_size += sizeof(ErtsAlgndAsyncQ)*erts_async_max_threads; -#if ERTS_USE_ASYNC_READY_Q tot_size += sizeof(ErtsAlgndAsyncReadyQ)*erts_no_schedulers; -#endif ptr = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_ASYNC_DATA, tot_size); @@ -202,7 +178,6 @@ erts_init_async(void) async->queue = (ErtsAlgndAsyncQ *) ptr; ptr += sizeof(ErtsAlgndAsyncQ)*erts_async_max_threads; -#if ERTS_USE_ASYNC_READY_Q qinit.live.queue = ERTS_THR_Q_LIVE_LONG; qinit.live.objects = ERTS_THR_Q_LIVE_SHORT; @@ -222,7 +197,6 @@ erts_init_async(void) erts_thr_q_initialize(&arq->thr_q, &qinit); } -#endif /* Create async threads... */ @@ -253,7 +227,6 @@ erts_init_async(void) } } -#if ERTS_USE_ASYNC_READY_Q void * erts_get_async_ready_queue(Uint sched_id) @@ -261,7 +234,6 @@ erts_get_async_ready_queue(Uint sched_id) return (void *) async ? async_ready_q(sched_id) : NULL; } -#endif static ERTS_INLINE void async_add(ErtsAsync *a, ErtsAsyncQ* q) { @@ -270,10 +242,8 @@ static ERTS_INLINE void async_add(ErtsAsync *a, ErtsAsyncQ* q) #endif if (is_internal_port(a->port)) { -#if ERTS_USE_ASYNC_READY_Q ErtsAsyncReadyQ *arq = async_ready_q(a->sched_id); a->q.prep_enq = erts_thr_q_prepare_enqueue(&arq->thr_q); -#endif /* make sure the driver will stay around */ if (a->hndl) erts_ddll_reference_referenced_driver(a->hndl); @@ -309,10 +279,8 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q, erts_tse_t *tse, ErtsThrQPrepEnQ_t **prep_enq) { -#if ERTS_USE_ASYNC_READY_Q int saved_fin_deq = 0; ErtsThrQFinDeQ_t fin_deq; -#endif #ifdef USE_VM_PROBES int len; #endif @@ -321,12 +289,10 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q, ErtsAsync *a = (ErtsAsync *) erts_thr_q_dequeue(q); if (a) { -#if ERTS_USE_ASYNC_READY_Q *prep_enq = a->q.prep_enq; erts_thr_q_get_finalize_dequeue_data(q, &a->q.fin_deq); if (saved_fin_deq) erts_thr_q_append_finalize_dequeue_data(&a->q.fin_deq, &fin_deq); -#endif #ifdef USE_LTTNG_VM_TRACEPOINTS if (LTTNG_ENABLED(aio_pool_get)) { lttng_decl_portbuf(port_str); @@ -354,7 +320,6 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q, erts_tse_reset(tse); -#if ERTS_USE_ASYNC_READY_Q chk_fin_deq: if (erts_thr_q_get_finalize_dequeue_data(q, &tmp_fin_deq)) { if (!saved_fin_deq) { @@ -364,13 +329,11 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q, erts_thr_q_append_finalize_dequeue_data(&fin_deq, &tmp_fin_deq); } -#endif switch (erts_thr_q_inspect(q, 1)) { case ERTS_THR_Q_DIRTY: break; case ERTS_THR_Q_NEED_THR_PRGR: -#ifdef ERTS_SMP { ErtsThrPrgrVal prgr = erts_thr_q_need_thr_progress(q); erts_thr_progress_wakeup(NULL, prgr); @@ -382,17 +345,14 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q, erts_tse_wait(tse); break; } -#endif case ERTS_THR_Q_CLEAN: -#if ERTS_USE_ASYNC_READY_Q if (saved_fin_deq) { if (erts_thr_q_finalize_dequeue(&fin_deq)) goto chk_fin_deq; else saved_fin_deq = 0; } -#endif erts_tse_wait(tse); break; @@ -408,15 +368,10 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q, static ERTS_INLINE void call_async_ready(ErtsAsync *a) { -#if ERTS_USE_ASYNC_READY_Q Port *p = erts_id2port_sflgs(a->port, NULL, 0, ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP); -#else - Port *p = erts_thr_id2port_sflgs(a->port, - ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP); -#endif if (!p) { if (a->async_free) { ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_PORT); @@ -432,11 +387,7 @@ static ERTS_INLINE void call_async_ready(ErtsAsync *a) ERTS_MSACC_POP_STATE(); } } -#if ERTS_USE_ASYNC_READY_Q erts_port_release(p); -#else - erts_thr_port_release(p); -#endif } if (a->pdl) driver_pdl_dec_refc(a->pdl); @@ -446,7 +397,6 @@ static ERTS_INLINE void call_async_ready(ErtsAsync *a) static ERTS_INLINE void async_reply(ErtsAsync *a, ErtsThrQPrepEnQ_t *prep_enq) { -#if ERTS_USE_ASYNC_READY_Q ErtsAsyncReadyQ *arq; #if ERTS_ASYNC_PRINT_JOB @@ -465,12 +415,6 @@ static ERTS_INLINE void async_reply(ErtsAsync *a, ErtsThrQPrepEnQ_t *prep_enq) erts_mtx_unlock(&arq->x.data.enq_mtx); #endif -#else /* ERTS_USE_ASYNC_READY_Q */ - - call_async_ready(a); - erts_free(ERTS_ALC_T_ASYNC, (void *) a); - -#endif /* ERTS_USE_ASYNC_READY_Q */ } @@ -486,7 +430,6 @@ static erts_tse_t *async_thread_init(ErtsAsyncQ *aq) erts_tse_t *tse = erts_tse_fetch(); ERTS_DECLARE_DUMMY(Uint no); -#ifdef ERTS_SMP ErtsThrPrgrCallbacks callbacks; callbacks.arg = (void *) tse; @@ -495,15 +438,12 @@ static erts_tse_t *async_thread_init(ErtsAsyncQ *aq) callbacks.wait = NULL; erts_thr_progress_register_unmanaged_thread(&callbacks); -#endif qinit.live.queue = ERTS_THR_Q_LIVE_LONG; qinit.live.objects = ERTS_THR_Q_LIVE_SHORT; qinit.arg = (void *) tse; qinit.notify = async_wakeup; -#if ERTS_USE_ASYNC_READY_Q qinit.auto_finalize_dequeue = 0; -#endif erts_thr_q_initialize(&aq->thr_q, &qinit); @@ -545,12 +485,10 @@ static void *async_main(void* arg) return NULL; } -#endif /* USE_THREADS */ void erts_exit_flush_async(void) { -#ifdef USE_THREADS int i; ErtsAsync a; a.port = NIL; @@ -564,11 +502,8 @@ erts_exit_flush_async(void) async_add(&a, async_q(i)); for (i = 0; i < erts_async_max_threads; i++) erts_thr_join(async->queue[i].aq.thr_id, NULL); -#endif } -#if defined(USE_THREADS) && ERTS_USE_ASYNC_READY_Q - int erts_check_async_ready(void *varq) { ErtsAsyncReadyQ *arq = (ErtsAsyncReadyQ *) varq; @@ -609,18 +544,15 @@ int erts_async_ready_clean(void *varq, void *val) case ERTS_THR_Q_DIRTY: return ERTS_ASYNC_READY_DIRTY; case ERTS_THR_Q_NEED_THR_PRGR: -#ifdef ERTS_SMP *((ErtsThrPrgrVal *) val) = erts_thr_q_need_thr_progress(&arq->thr_q); return ERTS_ASYNC_READY_NEED_THR_PRGR; -#endif case ERTS_THR_Q_CLEAN: break; } return ERTS_ASYNC_READY_CLEAN; } -#endif /* ** Generate a fair async key prom an ErlDrvPort @@ -658,28 +590,22 @@ long driver_async(ErlDrvPort ix, unsigned int* key, Port* prt; long id; unsigned int qix; -#if ERTS_USE_ASYNC_READY_Q Uint sched_id; ERTS_MSACC_PUSH_STATE(); sched_id = erts_get_scheduler_id(); if (!sched_id) sched_id = 1; -#else - ERTS_MSACC_PUSH_STATE(); -#endif prt = erts_drvport2port(ix); if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); a = (ErtsAsync*) erts_alloc(ERTS_ALC_T_ASYNC, sizeof(ErtsAsync)); -#if ERTS_USE_ASYNC_READY_Q a->sched_id = sched_id; -#endif a->hndl = (DE_Handle*)prt->drv_ptr->handle; a->port = prt->common.id; a->pdl = NULL; @@ -709,7 +635,6 @@ long driver_async(ErlDrvPort ix, unsigned int* key, (*key % erts_async_max_threads) : 0; *key = qix; } -#ifdef USE_THREADS if (erts_async_max_threads > 0) { if (prt->port_data_lock) { driver_pdl_inc_refc(prt->port_data_lock); @@ -718,7 +643,6 @@ long driver_async(ErlDrvPort ix, unsigned int* key, async_add(a, async_q(qix)); return id; } -#endif ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_PORT); (*a->async_invoke)(a->async_data); diff --git a/erts/emulator/beam/erl_async.h b/erts/emulator/beam/erl_async.h index 4b470e7679..70ef247e0a 100644 --- a/erts/emulator/beam/erl_async.h +++ b/erts/emulator/beam/erl_async.h @@ -27,39 +27,12 @@ extern int erts_async_max_threads; #define ERTS_ASYNC_THREAD_MAX_STACK_SIZE 8192 /* Kilo words */ extern int erts_async_thread_suggested_stack_size; - -#ifdef ERTS_SMP -/* - * With smp support we can choose to have, or not to - * have an async ready queue. - */ -#define ERTS_USE_ASYNC_READY_Q 1 -#endif - -#ifndef ERTS_SMP -/* In non-smp case we *need* the async ready queue */ -# undef ERTS_USE_ASYNC_READY_Q -# define ERTS_USE_ASYNC_READY_Q 1 -#endif - -#ifndef ERTS_USE_ASYNC_READY_Q -# define ERTS_USE_ASYNC_READY_Q 0 -#endif - -#ifndef USE_THREADS -# undef ERTS_USE_ASYNC_READY_Q -# define ERTS_USE_ASYNC_READY_Q 0 -#endif /* !USE_THREADS */ -#if ERTS_USE_ASYNC_READY_Q int erts_check_async_ready(void *); int erts_async_ready_clean(void *, void *); void *erts_get_async_ready_queue(Uint sched_id); #define ERTS_ASYNC_READY_CLEAN 0 #define ERTS_ASYNC_READY_DIRTY 1 -#ifdef ERTS_SMP #define ERTS_ASYNC_READY_NEED_THR_PRGR 2 -#endif -#endif /* ERTS_USE_ASYNC_READY_Q */ void erts_init_async(void); void erts_exit_flush_async(void); diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c index dcffde5777..4cafa499a9 100644 --- a/erts/emulator/beam/erl_bif_binary.c +++ b/erts/emulator/beam/erl_bif_binary.c @@ -171,6 +171,16 @@ static void *my_alloc(MyAllocator *my, Uint size) #define ALPHABET_SIZE 256 +typedef struct _findall_data { + Uint pos; + Uint len; +#ifdef HARDDEBUG + Uint id; +#endif + Eterm epos; + Eterm elen; +} FindallData; + typedef struct _ac_node { #ifdef HARDDEBUG Uint32 id; /* To identify h pointer targets when @@ -208,6 +218,103 @@ typedef struct _bm_data { Sint badshift[ALPHABET_SIZE]; } BMData; +typedef struct _ac_find_all_state { + ACNode *q; + Uint pos; + Uint len; + Uint m; + Uint allocated; + FindallData *out; +} ACFindAllState; + +typedef struct _ac_find_first_state { + ACNode *q; + Uint pos; + Uint len; + ACNode *candidate; + Uint candidate_start; +} ACFindFirstState; + +typedef struct _bm_find_all_state { + Sint pos; + Sint len; + Uint m; + Uint allocated; + FindallData *out; +} BMFindAllState; + +typedef struct _bm_find_first_state { + Sint pos; + Sint len; +} BMFindFirstState; + +typedef enum _bf_return { + BF_RESTART = -3, + BF_NOT_FOUND, + BF_BADARG, + BF_OK +} BFReturn; + +typedef struct _binary_find_all_context { + ErtsHeapFactory factory; + Eterm term; + Sint head; + Sint tail; + Uint end_pos; + Uint size; + FindallData *data; + union { + ACFindAllState ac; + BMFindAllState bm; + } d; +} BinaryFindAllContext; + +typedef struct _binary_find_first_context { + Uint pos; + Uint len; + union { + ACFindFirstState ac; + BMFindFirstState bm; + } d; +} BinaryFindFirstContext; + +typedef struct _binary_find_context BinaryFindContext; + +typedef struct _binary_find_search { + void (*init) (BinaryFindContext *); + BFReturn (*find) (BinaryFindContext *, byte *); + void (*done) (BinaryFindContext *); +} BinaryFindSearch; + +typedef Eterm (*BinaryFindResult)(Process *, Eterm, BinaryFindContext **); + +typedef enum _binary_find_state { + BFSearch, + BFResult, + BFDone +} BinaryFindState; + +struct _binary_find_context { + Eterm pat_type; + Eterm pat_term; + Binary *pat_bin; + Uint flags; + Uint hsstart; + Uint hsend; + int loop_factor; + int exported; + Uint reds; + BinaryFindState state; + Eterm trap_term; + BinaryFindSearch *search; + BinaryFindResult not_found; + BinaryFindResult found; + union { + BinaryFindAllContext fa; + BinaryFindFirstContext ff; + } u; +}; + #ifdef HARDDEBUG static void dump_bm_data(BMData *bm); static void dump_ac_trie(ACTrie *act); @@ -414,32 +521,25 @@ static void ac_compute_failure_functions(ACTrie *act, ACNode **qbuff) * Basic AC finds the first end before the first start... * */ -typedef struct { - ACNode *q; - Uint pos; - Uint len; - ACNode *candidate; - Uint candidate_start; -} ACFindFirstState; - - -static void ac_init_find_first_match(ACFindFirstState *state, ACTrie *act, Sint startpos, Uint len) +static void ac_init_find_first_match(BinaryFindContext *ctx) { + ACFindFirstState *state = &(ctx->u.ff.d.ac); + ACTrie *act = ERTS_MAGIC_BIN_DATA(ctx->pat_bin); state->q = act->root; - state->pos = startpos; - state->len = len; + state->pos = ctx->hsstart; + state->len = ctx->hsend; state->candidate = NULL; state->candidate_start = 0; } -#define AC_OK 0 -#define AC_NOT_FOUND -1 -#define AC_RESTART -2 #define AC_LOOP_FACTOR 10 -static int ac_find_first_match(ACFindFirstState *state, byte *haystack, - Uint *mpos, Uint *mlen, Uint *reductions) +static BFReturn ac_find_first_match(BinaryFindContext *ctx, byte *haystack) { + ACFindFirstState *state = &(ctx->u.ff.d.ac); + Uint *mpos = &(ctx->u.ff.pos); + Uint *mlen = &(ctx->u.ff.len); + Uint *reductions = &(ctx->reds); ACNode *q = state->q; Uint i = state->pos; ACNode *candidate = state->candidate, *r; @@ -455,7 +555,7 @@ static int ac_find_first_match(ACFindFirstState *state, byte *haystack, state->len = len; state->candidate = candidate; state->candidate_start = candidate_start; - return AC_RESTART; + return BF_RESTART; } while (q->g[haystack[i]] == NULL && q->h != q) { @@ -485,68 +585,33 @@ static int ac_find_first_match(ACFindFirstState *state, byte *haystack, } *reductions = reds; if (!candidate) { - return AC_NOT_FOUND; + return BF_NOT_FOUND; } #ifdef HARDDEBUG dump_ac_node(candidate,0,'?'); #endif *mpos = candidate_start; *mlen = candidate->d; - return AC_OK; + return BF_OK; } -typedef struct _findall_data { - Uint pos; - Uint len; -#ifdef HARDDEBUG - Uint id; -#endif - Eterm epos; - Eterm elen; -} FindallData; - -typedef struct { - ACNode *q; - Uint pos; - Uint len; - Uint m; - Uint allocated; - FindallData *out; -} ACFindAllState; - -static void ac_init_find_all(ACFindAllState *state, ACTrie *act, Sint startpos, Uint len) +static void ac_init_find_all(BinaryFindContext *ctx) { + ACFindAllState *state = &(ctx->u.fa.d.ac); + ACTrie *act = ERTS_MAGIC_BIN_DATA(ctx->pat_bin); state->q = act->root; - state->pos = startpos; - state->len = len; + state->pos = ctx->hsstart; + state->len = ctx->hsend; state->m = 0; state->allocated = 0; state->out = NULL; } -static void ac_restore_find_all(ACFindAllState *state, - const ACFindAllState *src) -{ - memcpy(state, src, sizeof(ACFindAllState)); - if (state->allocated > 0) { - state->out = erts_alloc(ERTS_ALC_T_TMP, sizeof(FindallData) * (state->allocated)); - memcpy(state->out, src+1, sizeof(FindallData)*state->m); - } else { - state->out = NULL; - } -} - -static void ac_serialize_find_all(const ACFindAllState *state, - ACFindAllState *dst) -{ - memcpy(dst, state, sizeof(ACFindAllState)); - memcpy(dst+1, state->out, sizeof(FindallData)*state->m); -} - -static void ac_clean_find_all(ACFindAllState *state) +static void ac_clean_find_all(BinaryFindContext *ctx) { + ACFindAllState *state = &(ctx->u.fa.d.ac); if (state->out != NULL) { - erts_free(ERTS_ALC_T_TMP, state->out); + erts_free(ERTS_ALC_T_BINARY_FIND, state->out); } #ifdef HARDDEBUG state->out = NULL; @@ -558,9 +623,10 @@ static void ac_clean_find_all(ACFindAllState *state) * Differs to the find_first function in that it stores all matches and the values * arte returned only in the state. */ -static int ac_find_all_non_overlapping(ACFindAllState *state, byte *haystack, - Uint *reductions) +static BFReturn ac_find_all_non_overlapping(BinaryFindContext *ctx, byte *haystack) { + ACFindAllState *state = &(ctx->u.fa.d.ac); + Uint *reductions = &(ctx->reds); ACNode *q = state->q; Uint i = state->pos; Uint rstart; @@ -571,7 +637,6 @@ static int ac_find_all_non_overlapping(ACFindAllState *state, byte *haystack, FindallData *out = state->out; register Uint reds = *reductions; - while (i < len) { if (--reds == 0) { state->q = q; @@ -580,7 +645,7 @@ static int ac_find_all_non_overlapping(ACFindAllState *state, byte *haystack, state->m = m; state->allocated = allocated; state->out = out; - return AC_RESTART; + return BF_RESTART; } while (q->g[haystack[i]] == NULL && q->h != q) { q = q->h; @@ -618,11 +683,11 @@ static int ac_find_all_non_overlapping(ACFindAllState *state, byte *haystack, if (m >= allocated) { if (!allocated) { allocated = 10; - out = erts_alloc(ERTS_ALC_T_TMP, + out = erts_alloc(ERTS_ALC_T_BINARY_FIND, sizeof(FindallData) * allocated); } else { allocated *= 2; - out = erts_realloc(ERTS_ALC_T_TMP, out, + out = erts_realloc(ERTS_ALC_T_BINARY_FIND, out, sizeof(FindallData) * allocated); } @@ -649,7 +714,7 @@ static int ac_find_all_non_overlapping(ACFindAllState *state, byte *haystack, *reductions = reds; state->m = m; state->out = out; - return (m == 0) ? AC_NOT_FOUND : AC_OK; + return (m == 0) ? BF_NOT_FOUND : BF_OK; } /* @@ -736,27 +801,22 @@ static void compute_goodshifts(BMData *bmd) erts_free(ERTS_ALC_T_TMP, suffixes); } -typedef struct { - Sint pos; - Sint len; -} BMFindFirstState; - -#define BM_OK 0 /* used only for find_all */ -#define BM_NOT_FOUND -1 -#define BM_RESTART -2 #define BM_LOOP_FACTOR 10 /* Should we have a higher value? */ -static void bm_init_find_first_match(BMFindFirstState *state, Sint startpos, - Uint len) +static void bm_init_find_first_match(BinaryFindContext *ctx) { - state->pos = startpos; - state->len = (Sint) len; + BMFindFirstState *state = &(ctx->u.ff.d.bm); + state->pos = ctx->hsstart; + state->len = ctx->hsend; } - -static Sint bm_find_first_match(BMFindFirstState *state, BMData *bmd, - byte *haystack, Uint *reductions) +static BFReturn bm_find_first_match(BinaryFindContext *ctx, byte *haystack) { + BMFindFirstState *state = &(ctx->u.ff.d.bm); + BMData *bmd = ERTS_MAGIC_BIN_DATA(ctx->pat_bin); + Uint *mpos = &(ctx->u.ff.pos); + Uint *mlen = &(ctx->u.ff.len); + Uint *reductions = &(ctx->reds); Sint blen = bmd->len; Sint len = state->len; Sint *gs = bmd->goodshift; @@ -769,61 +829,37 @@ static Sint bm_find_first_match(BMFindFirstState *state, BMData *bmd, while (j <= len - blen) { if (--reds == 0) { state->pos = j; - return BM_RESTART; + return BF_RESTART; } for (i = blen - 1; i >= 0 && needle[i] == haystack[i + j]; --i) ; if (i < 0) { /* found */ *reductions = reds; - return j; + *mpos = (Uint) j; + *mlen = (Uint) blen; + return BF_OK; } j += MAX(gs[i],bs[haystack[i+j]] - blen + 1 + i); } *reductions = reds; - return BM_NOT_FOUND; + return BF_NOT_FOUND; } -typedef struct { - Sint pos; - Sint len; - Uint m; - Uint allocated; - FindallData *out; -} BMFindAllState; - -static void bm_init_find_all(BMFindAllState *state, Sint startpos, Uint len) +static void bm_init_find_all(BinaryFindContext *ctx) { - state->pos = startpos; - state->len = (Sint) len; + BMFindAllState *state = &(ctx->u.fa.d.bm); + state->pos = ctx->hsstart; + state->len = ctx->hsend; state->m = 0; state->allocated = 0; state->out = NULL; } -static void bm_restore_find_all(BMFindAllState *state, - const BMFindAllState *src) -{ - memcpy(state, src, sizeof(BMFindAllState)); - if (state->allocated > 0) { - state->out = erts_alloc(ERTS_ALC_T_TMP, sizeof(FindallData) * - (state->allocated)); - memcpy(state->out, src+1, sizeof(FindallData)*state->m); - } else { - state->out = NULL; - } -} - -static void bm_serialize_find_all(const BMFindAllState *state, - BMFindAllState *dst) -{ - memcpy(dst, state, sizeof(BMFindAllState)); - memcpy(dst+1, state->out, sizeof(FindallData)*state->m); -} - -static void bm_clean_find_all(BMFindAllState *state) +static void bm_clean_find_all(BinaryFindContext *ctx) { + BMFindAllState *state = &(ctx->u.fa.d.bm); if (state->out != NULL) { - erts_free(ERTS_ALC_T_TMP, state->out); + erts_free(ERTS_ALC_T_BINARY_FIND, state->out); } #ifdef HARDDEBUG state->out = NULL; @@ -835,10 +871,11 @@ static void bm_clean_find_all(BMFindAllState *state) * Differs to the find_first function in that it stores all matches and the * values are returned only in the state. */ -static Sint bm_find_all_non_overlapping(BMFindAllState *state, - BMData *bmd, byte *haystack, - Uint *reductions) +static BFReturn bm_find_all_non_overlapping(BinaryFindContext *ctx, byte *haystack) { + BMFindAllState *state = &(ctx->u.fa.d.bm); + BMData *bmd = ERTS_MAGIC_BIN_DATA(ctx->pat_bin); + Uint *reductions = &(ctx->reds); Sint blen = bmd->len; Sint len = state->len; Sint *gs = bmd->goodshift; @@ -857,7 +894,7 @@ static Sint bm_find_all_non_overlapping(BMFindAllState *state, state->m = m; state->allocated = allocated; state->out = out; - return BM_RESTART; + return BF_RESTART; } for (i = blen - 1; i >= 0 && needle[i] == haystack[i + j]; --i) ; @@ -865,10 +902,11 @@ static Sint bm_find_all_non_overlapping(BMFindAllState *state, if (m >= allocated) { if (!allocated) { allocated = 10; - out = erts_alloc(ERTS_ALC_T_TMP, sizeof(FindallData) * allocated); + out = erts_alloc(ERTS_ALC_T_BINARY_FIND, + sizeof(FindallData) * allocated); } else { allocated *= 2; - out = erts_realloc(ERTS_ALC_T_TMP, out, + out = erts_realloc(ERTS_ALC_T_BINARY_FIND, out, sizeof(FindallData) * allocated); } } @@ -883,7 +921,7 @@ static Sint bm_find_all_non_overlapping(BMFindAllState *state, state->m = m; state->out = out; *reductions = reds; - return (m == 0) ? BM_NOT_FOUND : BM_OK; + return (m == 0) ? BF_NOT_FOUND : BF_OK; } /* @@ -1009,51 +1047,160 @@ BIF_RETTYPE binary_compile_pattern_1(BIF_ALIST_1) BIF_RET(ret); } -#define DO_BIN_MATCH_OK 0 -#define DO_BIN_MATCH_BADARG -1 -#define DO_BIN_MATCH_RESTART -2 +#define BF_FLAG_GLOBAL 0x01 +#define BF_FLAG_SPLIT_TRIM 0x02 +#define BF_FLAG_SPLIT_TRIM_ALL 0x04 -#define BINARY_FIND_ALL 0x01 -#define BINARY_SPLIT_TRIM 0x02 -#define BINARY_SPLIT_TRIM_ALL 0x04 +static void bf_context_init(BinaryFindContext *ctx, BinaryFindResult not_found, + BinaryFindResult single, BinaryFindResult global, + Binary *pat_bin); +static BinaryFindContext *bf_context_export(Process *p, BinaryFindContext *src); +static int bf_context_destructor(Binary *ctx_bin); +#ifdef HARDDEBUG +static void bf_context_dump(BinaryFindContext *ctx); +#endif -typedef struct BinaryFindState { - Eterm type; - Uint flags; - Uint hsstart; - Uint hsend; - Eterm (*not_found_result) (Process *, Eterm, struct BinaryFindState *); - Eterm (*single_result) (Process *, Eterm, struct BinaryFindState *, Sint, Sint); - Eterm (*global_result) (Process *, Eterm, struct BinaryFindState *, FindallData *, Uint); -} BinaryFindState; +static BinaryFindSearch bf_search_ac_global = { + ac_init_find_all, + ac_find_all_non_overlapping, + ac_clean_find_all +}; + +static BinaryFindSearch bf_search_ac_single = { + ac_init_find_first_match, + ac_find_first_match, + NULL +}; + +static BinaryFindSearch bf_search_bm_global = { + bm_init_find_all, + bm_find_all_non_overlapping, + bm_clean_find_all +}; + +static BinaryFindSearch bf_search_bm_single = { + bm_init_find_first_match, + bm_find_first_match, + NULL +}; + +static void bf_context_init(BinaryFindContext *ctx, BinaryFindResult not_found, + BinaryFindResult single, BinaryFindResult global, + Binary *pat_bin) +{ + ctx->exported = 0; + ctx->state = BFSearch; + ctx->not_found = not_found; + if (ctx->flags & BF_FLAG_GLOBAL) { + ctx->found = global; + if (ctx->pat_type == am_bm) { + ctx->search = &bf_search_bm_global; + ctx->loop_factor = BM_LOOP_FACTOR; + } else if (ctx->pat_type == am_ac) { + ctx->search = &bf_search_ac_global; + ctx->loop_factor = AC_LOOP_FACTOR; + } + } else { + ctx->found = single; + if (ctx->pat_type == am_bm) { + ctx->search = &bf_search_bm_single; + ctx->loop_factor = BM_LOOP_FACTOR; + } else if (ctx->pat_type == am_ac) { + ctx->search = &bf_search_ac_single; + ctx->loop_factor = AC_LOOP_FACTOR; + } + } + ctx->trap_term = THE_NON_VALUE; + ctx->pat_bin = pat_bin; + ctx->search->init(ctx); +} -typedef struct BinaryFindState_bignum { - Eterm bignum_hdr; - BinaryFindState bfs; - union { - BMFindFirstState bmffs; - BMFindAllState bmfas; - ACFindFirstState acffs; - ACFindAllState acfas; - } data; -} BinaryFindState_bignum; - -#define SIZEOF_BINARY_FIND_STATE(S) \ - (sizeof(BinaryFindState)+sizeof(S)) - -#define SIZEOF_BINARY_FIND_ALL_STATE(S) \ - (sizeof(BinaryFindState)+sizeof(S)+(sizeof(FindallData)*(S).m)) - -static Eterm do_match_not_found_result(Process *p, Eterm subject, BinaryFindState *bfs); -static Eterm do_match_single_result(Process *p, Eterm subject, BinaryFindState *bfs, - Sint pos, Sint len); -static Eterm do_match_global_result(Process *p, Eterm subject, BinaryFindState *bfs, - FindallData *fad, Uint fad_sz); -static Eterm do_split_not_found_result(Process *p, Eterm subject, BinaryFindState *bfs); -static Eterm do_split_single_result(Process *p, Eterm subject, BinaryFindState *bfs, - Sint pos, Sint len); -static Eterm do_split_global_result(Process *p, Eterm subject, BinaryFindState *bfs, - FindallData *fad, Uint fad_sz); +static BinaryFindContext *bf_context_export(Process *p, BinaryFindContext *src) +{ + Binary *ctx_bin; + BinaryFindContext *ctx; + Eterm *hp; + + ASSERT(src->exported == 0); + ctx_bin = erts_create_magic_binary(sizeof(BinaryFindContext), + bf_context_destructor); + ctx = ERTS_MAGIC_BIN_DATA(ctx_bin); + sys_memcpy(ctx, src, sizeof(BinaryFindContext)); + if (ctx->pat_bin != NULL && ctx->pat_term == THE_NON_VALUE) { + hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE * 2); + ctx->pat_term = erts_mk_magic_ref(&hp, &MSO(p), ctx->pat_bin); + } else { + hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE); + } + ctx->trap_term = erts_mk_magic_ref(&hp, &MSO(p), ctx_bin); + ctx->exported = 1; + return ctx; +} + +static int bf_context_destructor(Binary *ctx_bin) +{ + BinaryFindContext *ctx; + + ctx = ERTS_MAGIC_BIN_DATA(ctx_bin); + if (ctx->state != BFDone) { + if (ctx->search->done != NULL) { + ctx->search->done(ctx); + } + ctx->state = BFDone; + } + return 1; +} + +#ifdef HARDDEBUG +static void bf_context_dump(BinaryFindContext *ctx) +{ + if (ctx->pat_type == am_bm) { + BMData *bm; + bm = ERTS_MAGIC_BIN_DATA(ctx->pat_bin); + dump_bm_data(bm); + } else { + ACTrie *act; + act = ERTS_MAGIC_BIN_DATA(ctx->pat_bin); + dump_ac_trie(act); + } +} +#endif + +static Eterm do_match_not_found_result(Process *p, Eterm subject, BinaryFindContext **ctxp); +static Eterm do_match_single_result(Process *p, Eterm subject, BinaryFindContext **ctxp); +static Eterm do_match_global_result(Process *p, Eterm subject, BinaryFindContext **ctxp); +static Eterm do_split_not_found_result(Process *p, Eterm subject, BinaryFindContext **ctxp); +static Eterm do_split_single_result(Process *p, Eterm subject, BinaryFindContext **ctxp); +static Eterm do_split_global_result(Process *p, Eterm subject, BinaryFindContext **ctxp); + +static BFReturn maybe_binary_match_compile(BinaryFindContext *ctx, Eterm arg, Binary **pat_bin) +{ + Eterm *tp; + ctx->pat_term = THE_NON_VALUE; + if (is_tuple(arg)) { + tp = tuple_val(arg); + if (arityval(*tp) != 2 || is_not_atom(tp[1])) { + return BF_BADARG; + } + if (((tp[1] != am_bm) && (tp[1] != am_ac)) || + !is_internal_magic_ref(tp[2])) { + return BF_BADARG; + } + *pat_bin = erts_magic_ref2bin(tp[2]); + if ((tp[1] == am_bm && + ERTS_MAGIC_BIN_DESTRUCTOR(*pat_bin) != cleanup_my_data_bm) || + (tp[1] == am_ac && + ERTS_MAGIC_BIN_DESTRUCTOR(*pat_bin) != cleanup_my_data_ac)) { + *pat_bin = NULL; + return BF_BADARG; + } + ctx->pat_type = tp[1]; + ctx->pat_term = tp[2]; + } else if (do_binary_match_compile(arg, &(ctx->pat_type), pat_bin) != 0) { + return BF_BADARG; + } + return BF_OK; +} static int parse_match_opts_list(Eterm l, Eterm bin, Uint *posp, Uint *endp) { @@ -1134,17 +1281,17 @@ static int parse_split_opts_list(Eterm l, Eterm bin, Uint *posp, Uint *endp, Uin Uint orig_size; if (is_atom(t)) { if (t == am_global) { - *optp |= BINARY_FIND_ALL; + *optp |= BF_FLAG_GLOBAL; l = CDR(list_val(l)); continue; } if (t == am_trim) { - *optp |= BINARY_SPLIT_TRIM; + *optp |= BF_FLAG_SPLIT_TRIM; l = CDR(list_val(l)); continue; } if (t == am_trim_all) { - *optp |= BINARY_SPLIT_TRIM_ALL; + *optp |= BF_FLAG_SPLIT_TRIM_ALL; l = CDR(list_val(l)); continue; } @@ -1197,266 +1344,160 @@ static int parse_split_opts_list(Eterm l, Eterm bin, Uint *posp, Uint *endp, Uin } } -static int do_binary_find(Process *p, Eterm subject, BinaryFindState *bfs, Binary *bin, - Eterm state_term, Eterm *res_term) +static BFReturn do_binary_find(Process *p, Eterm subject, BinaryFindContext **ctxp, + Binary *pat_bin, Binary *ctx_bin, Eterm *res_term) { - byte *bytes; - Uint bitoffs, bitsize; - byte *temp_alloc = NULL; - BinaryFindState_bignum *state_ptr = NULL; + BinaryFindContext *ctx; + int is_first_call; + Uint initial_reds; + BFReturn runres; - ERTS_GET_BINARY_BYTES(subject, bytes, bitoffs, bitsize); - if (bitsize != 0) { - goto badarg; - } - if (bitoffs != 0) { - bytes = erts_get_aligned_binary_bytes(subject, &temp_alloc); - } - if (state_term != NIL) { - state_ptr = (BinaryFindState_bignum *)(big_val(state_term)); - bfs = &(state_ptr->bfs); + if (ctx_bin == NULL) { + is_first_call = 1; + ctx = *ctxp; + } else { + is_first_call = 0; + ctx = ERTS_MAGIC_BIN_DATA(ctx_bin); + ctx->pat_bin = pat_bin; + *ctxp = ctx; } - if (bfs->flags & BINARY_FIND_ALL) { - if (bfs->type == am_bm) { - BMData *bm; - Sint pos; - BMFindAllState state; - Uint reds = get_reds(p, BM_LOOP_FACTOR); - Uint save_reds = reds; + initial_reds = ctx->reds = get_reds(p, ctx->loop_factor); - bm = (BMData *) ERTS_MAGIC_BIN_DATA(bin); -#ifdef HARDDEBUG - dump_bm_data(bm); -#endif - if (state_term == NIL) { - bm_init_find_all(&state, bfs->hsstart, bfs->hsend); - } else { - bm_restore_find_all(&state, &(state_ptr->data.bmfas)); - } + switch (ctx->state) { + case BFSearch: { + byte *bytes; + Uint bitoffs, bitsize; + byte *temp_alloc = NULL; - pos = bm_find_all_non_overlapping(&state, bm, bytes, &reds); - if (pos == BM_NOT_FOUND) { - *res_term = bfs->not_found_result(p, subject, bfs); - } else if (pos == BM_RESTART) { - int x = - (SIZEOF_BINARY_FIND_ALL_STATE(state) / sizeof(Eterm)) + - !!(SIZEOF_BINARY_FIND_ALL_STATE(state) % sizeof(Eterm)); -#ifdef HARDDEBUG - erts_printf("Trap bm!\n"); -#endif - state_ptr = (BinaryFindState_bignum*) HAlloc(p, x+1); - state_ptr->bignum_hdr = make_pos_bignum_header(x); - memcpy(&state_ptr->bfs, bfs, sizeof(BinaryFindState)); - bm_serialize_find_all(&state, &state_ptr->data.bmfas); - *res_term = make_big(&state_ptr->bignum_hdr); - erts_free_aligned_binary_bytes(temp_alloc); - bm_clean_find_all(&state); - return DO_BIN_MATCH_RESTART; - } else { - *res_term = bfs->global_result(p, subject, bfs, state.out, state.m); - } - erts_free_aligned_binary_bytes(temp_alloc); - bm_clean_find_all(&state); - BUMP_REDS(p, (save_reds - reds) / BM_LOOP_FACTOR); - return DO_BIN_MATCH_OK; - } else if (bfs->type == am_ac) { - ACTrie *act; - int acr; - ACFindAllState state; - Uint reds = get_reds(p, AC_LOOP_FACTOR); - Uint save_reds = reds; - - act = (ACTrie *) ERTS_MAGIC_BIN_DATA(bin); + ERTS_GET_BINARY_BYTES(subject, bytes, bitoffs, bitsize); + if (bitsize != 0) { + goto badarg; + } + if (bitoffs != 0) { + bytes = erts_get_aligned_binary_bytes(subject, &temp_alloc); + } #ifdef HARDDEBUG - dump_ac_trie(act); + bf_context_dump(ctx); #endif - if (state_term == NIL) { - ac_init_find_all(&state, act, bfs->hsstart, bfs->hsend); - } else { - ac_restore_find_all(&state, &(state_ptr->data.acfas)); - } - acr = ac_find_all_non_overlapping(&state, bytes, &reds); - if (acr == AC_NOT_FOUND) { - *res_term = bfs->not_found_result(p, subject, bfs); - } else if (acr == AC_RESTART) { - int x = - (SIZEOF_BINARY_FIND_ALL_STATE(state) / sizeof(Eterm)) + - !!(SIZEOF_BINARY_FIND_ALL_STATE(state) % sizeof(Eterm)); + runres = ctx->search->find(ctx, bytes); + if (runres == BF_NOT_FOUND) { + *res_term = ctx->not_found(p, subject, &ctx); + *ctxp = ctx; + } else if (runres == BF_RESTART) { #ifdef HARDDEBUG + if (ctx->pat_type == am_ac) { erts_printf("Trap ac!\n"); -#endif - state_ptr = (BinaryFindState_bignum*) HAlloc(p, x+1); - state_ptr->bignum_hdr = make_pos_bignum_header(x); - memcpy(&state_ptr->bfs, bfs, sizeof(BinaryFindState)); - ac_serialize_find_all(&state, &state_ptr->data.acfas); - *res_term = make_big(&state_ptr->bignum_hdr); - erts_free_aligned_binary_bytes(temp_alloc); - ac_clean_find_all(&state); - return DO_BIN_MATCH_RESTART; - } else { - *res_term = bfs->global_result(p, subject, bfs, state.out, state.m); - } - erts_free_aligned_binary_bytes(temp_alloc); - ac_clean_find_all(&state); - BUMP_REDS(p, (save_reds - reds) / AC_LOOP_FACTOR); - return DO_BIN_MATCH_OK; - } - } else { - if (bfs->type == am_bm) { - BMData *bm; - Sint pos; - BMFindFirstState state; - Uint reds = get_reds(p, BM_LOOP_FACTOR); - Uint save_reds = reds; - - bm = (BMData *) ERTS_MAGIC_BIN_DATA(bin); -#ifdef HARDDEBUG - dump_bm_data(bm); -#endif - if (state_term == NIL) { - bm_init_find_first_match(&state, bfs->hsstart, bfs->hsend); } else { - memcpy(&state, &state_ptr->data.bmffs, sizeof(BMFindFirstState)); - } - -#ifdef HARDDEBUG - erts_printf("(bm) state->pos = %ld, state->len = %lu\n",state.pos, - state.len); -#endif - pos = bm_find_first_match(&state, bm, bytes, &reds); - if (pos == BM_NOT_FOUND) { - *res_term = bfs->not_found_result(p, subject, bfs); - } else if (pos == BM_RESTART) { - int x = - (SIZEOF_BINARY_FIND_STATE(state) / sizeof(Eterm)) + - !!(SIZEOF_BINARY_FIND_STATE(state) % sizeof(Eterm)); -#ifdef HARDDEBUG erts_printf("Trap bm!\n"); + } #endif - state_ptr = (BinaryFindState_bignum*) HAlloc(p, x+1); - state_ptr->bignum_hdr = make_pos_bignum_header(x); - memcpy(&state_ptr->bfs, bfs, sizeof(BinaryFindState)); - memcpy(&state_ptr->data.acffs, &state, sizeof(BMFindFirstState)); - *res_term = make_big(&state_ptr->bignum_hdr); - erts_free_aligned_binary_bytes(temp_alloc); - return DO_BIN_MATCH_RESTART; - } else { - *res_term = bfs->single_result(p, subject, bfs, pos, bm->len); + if (is_first_call) { + ctx = bf_context_export(p, ctx); + *ctxp = ctx; + erts_set_gc_state(p, 0); } erts_free_aligned_binary_bytes(temp_alloc); - BUMP_REDS(p, (save_reds - reds) / BM_LOOP_FACTOR); - return DO_BIN_MATCH_OK; - } else if (bfs->type == am_ac) { - ACTrie *act; - Uint pos, rlen; - int acr; - ACFindFirstState state; - Uint reds = get_reds(p, AC_LOOP_FACTOR); - Uint save_reds = reds; - - act = (ACTrie *) ERTS_MAGIC_BIN_DATA(bin); -#ifdef HARDDEBUG - dump_ac_trie(act); -#endif - if (state_term == NIL) { - ac_init_find_first_match(&state, act, bfs->hsstart, bfs->hsend); - } else { - memcpy(&state, &state_ptr->data.acffs, sizeof(ACFindFirstState)); + *res_term = THE_NON_VALUE; + BUMP_ALL_REDS(p); + return BF_RESTART; + } else { + *res_term = ctx->found(p, subject, &ctx); + *ctxp = ctx; + } + erts_free_aligned_binary_bytes(temp_alloc); + if (*res_term == THE_NON_VALUE) { + if (is_first_call) { + erts_set_gc_state(p, 0); } - acr = ac_find_first_match(&state, bytes, &pos, &rlen, &reds); - if (acr == AC_NOT_FOUND) { - *res_term = bfs->not_found_result(p, subject, bfs); - } else if (acr == AC_RESTART) { - int x = - (SIZEOF_BINARY_FIND_STATE(state) / sizeof(Eterm)) + - !!(SIZEOF_BINARY_FIND_STATE(state) % sizeof(Eterm)); -#ifdef HARDDEBUG - erts_printf("Trap ac!\n"); -#endif - state_ptr = (BinaryFindState_bignum*) HAlloc(p, x+1); - state_ptr->bignum_hdr = make_pos_bignum_header(x); - memcpy(&state_ptr->bfs, bfs, sizeof(BinaryFindState)); - memcpy(&state_ptr->data.acffs, &state, sizeof(ACFindFirstState)); - *res_term = make_big(&state_ptr->bignum_hdr); - erts_free_aligned_binary_bytes(temp_alloc); - return DO_BIN_MATCH_RESTART; - } else { - *res_term = bfs->single_result(p, subject, bfs, pos, rlen); + BUMP_ALL_REDS(p); + return BF_RESTART; + } + if (ctx->search->done != NULL) { + ctx->search->done(ctx); + } + ctx->state = BFDone; + if (!is_first_call) { + erts_set_gc_state(p, 1); + } + BUMP_REDS(p, (initial_reds - ctx->reds) / ctx->loop_factor); + return BF_OK; + } + case BFResult: { + *res_term = ctx->found(p, subject, &ctx); + *ctxp = ctx; + if (*res_term == THE_NON_VALUE) { + if (is_first_call) { + erts_set_gc_state(p, 0); } - erts_free_aligned_binary_bytes(temp_alloc); - BUMP_REDS(p, (save_reds - reds) / AC_LOOP_FACTOR); - return DO_BIN_MATCH_OK; + BUMP_ALL_REDS(p); + return BF_RESTART; + } + if (ctx->search->done != NULL) { + ctx->search->done(ctx); } + ctx->state = BFDone; + if (!is_first_call) { + erts_set_gc_state(p, 1); + } + BUMP_REDS(p, (initial_reds - ctx->reds) / ctx->loop_factor); + return BF_OK; } - badarg: - return DO_BIN_MATCH_BADARG; + default: + ASSERT(!"Unknown state in do_binary_find"); + } + +badarg: + if (!is_first_call) { + if (ctx->search->done != NULL) { + ctx->search->done(ctx); + } + ctx->state = BFDone; + erts_set_gc_state(p, 1); + } + return BF_BADARG; } static BIF_RETTYPE binary_match(Process *p, Eterm arg1, Eterm arg2, Eterm arg3, Uint flags) { - BinaryFindState bfs; - Eterm *tp; - Binary *bin; - Eterm bin_term = NIL; + BinaryFindContext c_buff; + BinaryFindContext *ctx = &c_buff; + Binary *pat_bin; int runres; Eterm result; - if (is_not_binary(arg1)) { + if (is_not_binary(arg1) || binary_bitsize(arg1) != 0) { goto badarg; } - bfs.flags = flags; - if (parse_match_opts_list(arg3, arg1, &(bfs.hsstart), &(bfs.hsend))) { + ctx->flags = flags; + if (parse_match_opts_list(arg3, arg1, &(ctx->hsstart), &(ctx->hsend))) { goto badarg; } - if (bfs.hsend == 0) { - BIF_RET(do_match_not_found_result(p, arg1, &bfs)); + if (ctx->hsend == 0) { + result = do_match_not_found_result(p, arg1, &ctx); + BIF_RET(result); } - if (is_tuple(arg2)) { - tp = tuple_val(arg2); - if (arityval(*tp) != 2 || is_not_atom(tp[1])) { - goto badarg; - } - if (((tp[1] != am_bm) && (tp[1] != am_ac)) || - !is_internal_magic_ref(tp[2])) { - goto badarg; - } - bfs.type = tp[1]; - bin = erts_magic_ref2bin(tp[2]); - if (bfs.type == am_bm && - ERTS_MAGIC_BIN_DESTRUCTOR(bin) != cleanup_my_data_bm) { - goto badarg; - } - if (bfs.type == am_ac && - ERTS_MAGIC_BIN_DESTRUCTOR(bin) != cleanup_my_data_ac) { - goto badarg; - } - bin_term = tp[2]; - } else if (do_binary_match_compile(arg2, &(bfs.type), &bin)) { + if (maybe_binary_match_compile(ctx, arg2, &pat_bin) != BF_OK) { goto badarg; } - bfs.not_found_result = &do_match_not_found_result; - bfs.single_result = &do_match_single_result; - bfs.global_result = &do_match_global_result; - runres = do_binary_find(p, arg1, &bfs, bin, NIL, &result); - if (runres == DO_BIN_MATCH_RESTART && bin_term == NIL) { - Eterm *hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE); - bin_term = erts_mk_magic_ref(&hp, &MSO(p), bin); - } else if (bin_term == NIL) { - erts_bin_free(bin); + bf_context_init(ctx, do_match_not_found_result, do_match_single_result, + do_match_global_result, pat_bin); + runres = do_binary_find(p, arg1, &ctx, pat_bin, NULL, &result); + if (runres == BF_OK && ctx->pat_term == THE_NON_VALUE) { + erts_bin_free(pat_bin); } switch (runres) { - case DO_BIN_MATCH_OK: + case BF_OK: BIF_RET(result); - case DO_BIN_MATCH_RESTART: - BUMP_ALL_REDS(p); - BIF_TRAP3(&binary_find_trap_export, p, arg1, result, bin_term); + case BF_RESTART: + ASSERT(result == THE_NON_VALUE && ctx->trap_term != result && ctx->pat_term != result); + BIF_TRAP3(&binary_find_trap_export, p, arg1, ctx->trap_term, ctx->pat_term); default: goto badarg; } - badarg: - BIF_ERROR(p,BADARG); +badarg: + BIF_ERROR(p, BADARG); } BIF_RETTYPE binary_match_2(BIF_ALIST_2) @@ -1471,76 +1512,52 @@ BIF_RETTYPE binary_match_3(BIF_ALIST_3) BIF_RETTYPE binary_matches_2(BIF_ALIST_2) { - return binary_match(BIF_P, BIF_ARG_1, BIF_ARG_2, THE_NON_VALUE, BINARY_FIND_ALL); + return binary_match(BIF_P, BIF_ARG_1, BIF_ARG_2, THE_NON_VALUE, BF_FLAG_GLOBAL); } BIF_RETTYPE binary_matches_3(BIF_ALIST_3) { - return binary_match(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, BINARY_FIND_ALL); + return binary_match(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3, BF_FLAG_GLOBAL); } static BIF_RETTYPE binary_split(Process *p, Eterm arg1, Eterm arg2, Eterm arg3) { - BinaryFindState bfs; - Eterm *tp; - Binary *bin; - Eterm bin_term = NIL; + BinaryFindContext c_buff; + BinaryFindContext *ctx = &c_buff; + Binary *pat_bin; int runres; Eterm result; - if (is_not_binary(arg1)) { + if (is_not_binary(arg1) || binary_bitsize(arg1) != 0) { goto badarg; } - if (parse_split_opts_list(arg3, arg1, &(bfs.hsstart), &(bfs.hsend), &(bfs.flags))) { + if (parse_split_opts_list(arg3, arg1, &(ctx->hsstart), &(ctx->hsend), &(ctx->flags))) { goto badarg; } - if (bfs.hsend == 0) { - result = do_split_not_found_result(p, arg1, &bfs); + if (ctx->hsend == 0) { + result = do_split_not_found_result(p, arg1, &ctx); BIF_RET(result); } - if (is_tuple(arg2)) { - tp = tuple_val(arg2); - if (arityval(*tp) != 2 || is_not_atom(tp[1])) { - goto badarg; - } - if (((tp[1] != am_bm) && (tp[1] != am_ac)) || - !is_internal_magic_ref(tp[2])) { - goto badarg; - } - bfs.type = tp[1]; - bin = erts_magic_ref2bin(tp[2]); - if (bfs.type == am_bm && - ERTS_MAGIC_BIN_DESTRUCTOR(bin) != cleanup_my_data_bm) { - goto badarg; - } - if (bfs.type == am_ac && - ERTS_MAGIC_BIN_DESTRUCTOR(bin) != cleanup_my_data_ac) { - goto badarg; - } - bin_term = tp[2]; - } else if (do_binary_match_compile(arg2, &(bfs.type), &bin)) { + if (maybe_binary_match_compile(ctx, arg2, &pat_bin) != BF_OK) { goto badarg; } - bfs.not_found_result = &do_split_not_found_result; - bfs.single_result = &do_split_single_result; - bfs.global_result = &do_split_global_result; - runres = do_binary_find(p, arg1, &bfs, bin, NIL, &result); - if (runres == DO_BIN_MATCH_RESTART && bin_term == NIL) { - Eterm *hp = HAlloc(p, ERTS_MAGIC_REF_THING_SIZE); - bin_term = erts_mk_magic_ref(&hp, &MSO(p), bin); - } else if (bin_term == NIL) { - erts_bin_free(bin); - } - switch(runres) { - case DO_BIN_MATCH_OK: + bf_context_init(ctx, do_split_not_found_result, do_split_single_result, + do_split_global_result, pat_bin); + runres = do_binary_find(p, arg1, &ctx, pat_bin, NULL, &result); + if (runres == BF_OK && ctx->pat_term == THE_NON_VALUE) { + erts_bin_free(pat_bin); + } + switch (runres) { + case BF_OK: BIF_RET(result); - case DO_BIN_MATCH_RESTART: - BIF_TRAP3(&binary_find_trap_export, p, arg1, result, bin_term); + case BF_RESTART: + ASSERT(result == THE_NON_VALUE && ctx->trap_term != result && ctx->pat_term != result); + BIF_TRAP3(&binary_find_trap_export, p, arg1, ctx->trap_term, ctx->pat_term); default: goto badarg; } - badarg: +badarg: BIF_ERROR(p, BADARG); } @@ -1554,72 +1571,117 @@ BIF_RETTYPE binary_split_3(BIF_ALIST_3) return binary_split(BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3); } -static Eterm do_match_not_found_result(Process *p, Eterm subject, BinaryFindState *bfs) +static Eterm do_match_not_found_result(Process *p, Eterm subject, BinaryFindContext **ctxp) { - if (bfs->flags & BINARY_FIND_ALL) { + if ((*ctxp)->flags & BF_FLAG_GLOBAL) { return NIL; } else { return am_nomatch; } } -static Eterm do_match_single_result(Process *p, Eterm subject, BinaryFindState *bfs, - Sint pos, Sint len) +static Eterm do_match_single_result(Process *p, Eterm subject, BinaryFindContext **ctxp) { + BinaryFindContext *ctx = (*ctxp); + BinaryFindFirstContext *ff = &(ctx->u.ff); Eterm erlen; Eterm *hp; Eterm ret; - erlen = erts_make_integer((Uint)(len), p); - ret = erts_make_integer(pos, p); + erlen = erts_make_integer((Uint)(ff->len), p); + ret = erts_make_integer(ff->pos, p); hp = HAlloc(p, 3); ret = TUPLE2(hp, ret, erlen); return ret; } -static Eterm do_match_global_result(Process *p, Eterm subject, BinaryFindState *bfs, - FindallData *fad, Uint fad_sz) +static Eterm do_match_global_result(Process *p, Eterm subject, BinaryFindContext **ctxp) { - Sint i; + BinaryFindContext *ctx = (*ctxp); + BinaryFindAllContext *fa = &(ctx->u.fa); + FindallData *fad; Eterm tpl; - Eterm *hp; - Eterm ret; + Sint i; + register Uint reds = ctx->reds; - for (i = 0; i < fad_sz; ++i) { - fad[i].epos = erts_make_integer(fad[i].pos, p); - fad[i].elen = erts_make_integer(fad[i].len, p); + if (ctx->state == BFSearch) { + if (ctx->pat_type == am_ac) { + fa->data = fa->d.ac.out; + fa->size = fa->d.ac.m; + } else { + fa->data = fa->d.bm.out; + fa->size = fa->d.bm.m; + } + fa->tail = fa->size - 1; + fa->head = 0; + fa->end_pos = 0; + fa->term = NIL; + if (ctx->exported == 0 && ((fa->size * 2) >= reds)) { + ctx = bf_context_export(p, ctx); + *ctxp = ctx; + fa = &(ctx->u.fa); + } + erts_factory_proc_prealloc_init(&(fa->factory), p, fa->size * (3 + 2)); + ctx->state = BFResult; + } + + fad = fa->data; + + if (fa->end_pos == 0) { + for (i = fa->head; i < fa->size; ++i) { + if (--reds == 0) { + ASSERT(ctx->exported == 1); + fa->head = i; + ctx->reds = reds; + return THE_NON_VALUE; + } + fad[i].epos = erts_make_integer(fad[i].pos, p); + fad[i].elen = erts_make_integer(fad[i].len, p); + } + fa->end_pos = 1; + fa->head = fa->tail; } - hp = HAlloc(p, fad_sz * (3 + 2)); - ret = NIL; - for (i = fad_sz - 1; i >= 0; --i) { - tpl = TUPLE2(hp, fad[i].epos, fad[i].elen); - hp += 3; - ret = CONS(hp, tpl, ret); - hp += 2; + + for (i = fa->head; i >= 0; --i) { + if (--reds == 0) { + ASSERT(ctx->exported == 1); + fa->head = i; + ctx->reds = reds; + return THE_NON_VALUE; + } + tpl = TUPLE2(fa->factory.hp, fad[i].epos, fad[i].elen); + fa->factory.hp += 3; + fa->term = CONS(fa->factory.hp, tpl, fa->term); + fa->factory.hp += 2; } + ctx->reds = reds; + erts_factory_close(&(fa->factory)); - return ret; + return fa->term; } -static Eterm do_split_not_found_result(Process *p, Eterm subject, BinaryFindState *bfs) +static Eterm do_split_not_found_result(Process *p, Eterm subject, BinaryFindContext **ctxp) { + BinaryFindContext *ctx = (*ctxp); Eterm *hp; Eterm ret; - if (bfs->flags & (BINARY_SPLIT_TRIM | BINARY_SPLIT_TRIM_ALL) + if (ctx->flags & (BF_FLAG_SPLIT_TRIM | BF_FLAG_SPLIT_TRIM_ALL) && binary_size(subject) == 0) { - return NIL; + return NIL; } hp = HAlloc(p, 2); ret = CONS(hp, subject, NIL); - return ret; } -static Eterm do_split_single_result(Process *p, Eterm subject, BinaryFindState *bfs, - Sint pos, Sint len) +static Eterm do_split_single_result(Process *p, Eterm subject, BinaryFindContext **ctxp) { + BinaryFindContext *ctx = (*ctxp); + BinaryFindFirstContext *ff = &(ctx->u.ff); + Sint pos; + Sint len; size_t orig_size; Eterm orig; Uint offset; @@ -1630,9 +1692,12 @@ static Eterm do_split_single_result(Process *p, Eterm subject, BinaryFindState * Eterm *hp; Eterm ret; + pos = ff->pos; + len = ff->len; + orig_size = binary_size(subject); - if ((bfs->flags & (BINARY_SPLIT_TRIM | BINARY_SPLIT_TRIM_ALL)) && + if ((ctx->flags & (BF_FLAG_SPLIT_TRIM | BF_FLAG_SPLIT_TRIM_ALL)) && (orig_size - pos - len) == 0) { if (pos == 0) { ret = NIL; @@ -1653,7 +1718,7 @@ static Eterm do_split_single_result(Process *p, Eterm subject, BinaryFindState * hp += 2; } } else { - if ((bfs->flags & BINARY_SPLIT_TRIM_ALL) && (pos == 0)) { + if ((ctx->flags & BF_FLAG_SPLIT_TRIM_ALL) && (pos == 0)) { hp = HAlloc(p, 1 * (ERL_SUB_BIN_SIZE + 2)); ERTS_GET_REAL_BIN(subject, orig, offset, bit_offset, bit_size); sb1 = NULL; @@ -1691,39 +1756,60 @@ static Eterm do_split_single_result(Process *p, Eterm subject, BinaryFindState * return ret; } -static Eterm do_split_global_result(Process *p, Eterm subject, BinaryFindState *bfs, - FindallData *fad, Uint fad_sz) +static Eterm do_split_global_result(Process *p, Eterm subject, BinaryFindContext **ctxp) { - size_t orig_size; + BinaryFindContext *ctx = (*ctxp); + BinaryFindAllContext *fa = &(ctx->u.fa); + FindallData *fad; Eterm orig; + size_t orig_size; Uint offset; Uint bit_offset; Uint bit_size; ErlSubBin *sb; + Uint do_trim; Sint i; - Sint tail; - Uint list_size; - Uint end_pos; - Uint do_trim = bfs->flags & (BINARY_SPLIT_TRIM | BINARY_SPLIT_TRIM_ALL); - Eterm *hp; - Eterm *hendp; - Eterm ret; + register Uint reds = ctx->reds; - tail = fad_sz - 1; - list_size = fad_sz + 1; - orig_size = binary_size(subject); - end_pos = (Uint)(orig_size); + if (ctx->state == BFSearch) { + if (ctx->pat_type == am_ac) { + fa->data = fa->d.ac.out; + fa->size = fa->d.ac.m; + } else { + fa->data = fa->d.bm.out; + fa->size = fa->d.bm.m; + } + fa->tail = fa->size - 1; + fa->head = fa->tail; + orig_size = binary_size(subject); + fa->end_pos = (Uint)(orig_size); + fa->term = NIL; + if (ctx->exported == 0 && ((fa->head + 1) >= reds)) { + ctx = bf_context_export(p, ctx); + *ctxp = ctx; + fa = &(ctx->u.fa); + } + erts_factory_proc_prealloc_init(&(fa->factory), p, (fa->size + 1) * (ERL_SUB_BIN_SIZE + 2)); + ctx->state = BFResult; + } - hp = HAlloc(p, list_size * (ERL_SUB_BIN_SIZE + 2)); - hendp = hp + list_size * (ERL_SUB_BIN_SIZE + 2); ERTS_GET_REAL_BIN(subject, orig, offset, bit_offset, bit_size); ASSERT(bit_size == 0); + fad = fa->data; + do_trim = ctx->flags & (BF_FLAG_SPLIT_TRIM | BF_FLAG_SPLIT_TRIM_ALL); - ret = NIL; - - for (i = tail; i >= 0; --i) { - sb = (ErlSubBin *)(hp); - sb->size = end_pos - (fad[i].pos + fad[i].len); + for (i = fa->head; i >= 0; --i) { + if (--reds == 0) { + ASSERT(ctx->exported == 1); + fa->head = i; + ctx->reds = reds; + if (!do_trim && (ctx->flags & BF_FLAG_SPLIT_TRIM)) { + ctx->flags &= ~BF_FLAG_SPLIT_TRIM; + } + return THE_NON_VALUE; + } + sb = (ErlSubBin *)(fa->factory.hp); + sb->size = fa->end_pos - (fad[i].pos + fad[i].len); if (!(sb->size == 0 && do_trim)) { sb->thing_word = HEADER_SUB_BIN; sb->offs = offset + fad[i].pos + fad[i].len; @@ -1731,15 +1817,18 @@ static Eterm do_split_global_result(Process *p, Eterm subject, BinaryFindState * sb->bitoffs = bit_offset; sb->bitsize = 0; sb->is_writable = 0; - hp += ERL_SUB_BIN_SIZE; - ret = CONS(hp, make_binary(sb), ret); - hp += 2; - do_trim &= ~BINARY_SPLIT_TRIM; + fa->factory.hp += ERL_SUB_BIN_SIZE; + fa->term = CONS(fa->factory.hp, make_binary(sb), fa->term); + fa->factory.hp += 2; + do_trim &= ~BF_FLAG_SPLIT_TRIM; } - end_pos = fad[i].pos; + fa->end_pos = fad[i].pos; } - sb = (ErlSubBin *)(hp); + fa->head = i; + ctx->reds = reds; + + sb = (ErlSubBin *)(fa->factory.hp); sb->size = fad[0].pos; if (!(sb->size == 0 && do_trim)) { sb->thing_word = HEADER_SUB_BIN; @@ -1748,26 +1837,31 @@ static Eterm do_split_global_result(Process *p, Eterm subject, BinaryFindState * sb->bitoffs = bit_offset; sb->bitsize = 0; sb->is_writable = 0; - hp += ERL_SUB_BIN_SIZE; - ret = CONS(hp, make_binary(sb), ret); - hp += 2; + fa->factory.hp += ERL_SUB_BIN_SIZE; + fa->term = CONS(fa->factory.hp, make_binary(sb), fa->term); + fa->factory.hp += 2; } - HRelease(p, hendp, hp); - return ret; + erts_factory_close(&(fa->factory)); + + return fa->term; } static BIF_RETTYPE binary_find_trap(BIF_ALIST_3) { int runres; Eterm result; - Binary *bin = erts_magic_ref2bin(BIF_ARG_3); - - runres = do_binary_find(BIF_P, BIF_ARG_1, NULL, bin, BIF_ARG_2, &result); - if (runres == DO_BIN_MATCH_OK) { + Binary *ctx_bin = erts_magic_ref2bin(BIF_ARG_2); + Binary *pat_bin = erts_magic_ref2bin(BIF_ARG_3); + BinaryFindContext *ctx = NULL; + + ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(ctx_bin) == bf_context_destructor); + runres = do_binary_find(BIF_P, BIF_ARG_1, &ctx, pat_bin, ctx_bin, &result); + if (runres == BF_OK) { + ASSERT(result != THE_NON_VALUE); BIF_RET(result); } else { - BUMP_ALL_REDS(BIF_P); - BIF_TRAP3(&binary_find_trap_export, BIF_P, BIF_ARG_1, result, BIF_ARG_3); + ASSERT(result == THE_NON_VALUE && ctx->trap_term != result && ctx->pat_term != result); + BIF_TRAP3(&binary_find_trap_export, BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3); } } diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c index e9bfb39035..f673ef3194 100644 --- a/erts/emulator/beam/erl_bif_ddll.c +++ b/erts/emulator/beam/erl_bif_ddll.c @@ -50,13 +50,6 @@ #include "dtrace-wrapper.h" #include "lttng-wrapper.h" -#ifdef ERTS_SMP -#define DDLL_SMP 1 -#else -#define DDLL_SMP 0 -#endif - - /* * Local types */ @@ -107,18 +100,18 @@ static void dereference_all_processes(DE_Handle *dh); static void restore_process_references(DE_Handle *dh); static void ddll_no_more_references(void *vdh); -#define lock_drv_list() erts_smp_rwmtx_rwlock(&erts_driver_list_lock) -#define unlock_drv_list() erts_smp_rwmtx_rwunlock(&erts_driver_list_lock) +#define lock_drv_list() erts_rwmtx_rwlock(&erts_driver_list_lock) +#define unlock_drv_list() erts_rwmtx_rwunlock(&erts_driver_list_lock) #define assert_drv_list_locked() \ - ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&erts_driver_list_lock) \ - || erts_smp_lc_rwmtx_is_rlocked(&erts_driver_list_lock)) + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_driver_list_lock) \ + || erts_lc_rwmtx_is_rlocked(&erts_driver_list_lock)) #define assert_drv_list_rwlocked() \ - ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&erts_driver_list_lock)) + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_driver_list_lock)) #define assert_drv_list_rlocked() \ - ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rlocked(&erts_driver_list_lock)) + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&erts_driver_list_lock)) #define assert_drv_list_not_locked() \ - ERTS_SMP_LC_ASSERT(!erts_smp_lc_rwmtx_is_rwlocked(&erts_driver_list_lock) \ - && !erts_smp_lc_rwmtx_is_rlocked(&erts_driver_list_lock)) + ERTS_LC_ASSERT(!erts_lc_rwmtx_is_rwlocked(&erts_driver_list_lock) \ + && !erts_lc_rwmtx_is_rlocked(&erts_driver_list_lock)) #define FREE_PORT_FLAGS (ERTS_PORT_SFLGS_DEAD & (~ERTS_PORT_SFLG_INITIALIZING)) @@ -134,13 +127,13 @@ kill_ports_driver_unloaded(DE_Handle *dh) if (!prt) continue; - ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER; + ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER; state = erts_atomic32_read_nob(&prt->state); if (state & FREE_PORT_FLAGS) continue; - erts_smp_port_lock(prt); + erts_port_lock(prt); state = erts_atomic32_read_nob(&prt->state); if (!(state & ERTS_PORT_SFLGS_DEAD) && prt->drv_ptr->handle == dh) @@ -280,10 +273,8 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3) path[path_len++] = '/'; sys_strcpy(path+path_len,name); -#if DDLL_SMP - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); lock_drv_list(); -#endif if ((drv = lookup_driver(name)) != NULL) { if (drv->handle == NULL) { /* static_driver */ @@ -404,24 +395,18 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3) erts_ddll_reference_driver(dh); ASSERT(dh->status == ERL_DE_RELOAD); dh->status = ERL_DE_FORCE_RELOAD; -#if DDLL_SMP unlock_drv_list(); -#endif kill_ports_driver_unloaded(dh); /* Dereference, eventually causing driver destruction */ -#if DDLL_SMP lock_drv_list(); -#endif erts_ddll_dereference_driver(dh); } -#if DDLL_SMP erts_ddll_reference_driver(dh); unlock_drv_list(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); lock_drv_list(); erts_ddll_dereference_driver(dh); -#endif BIF_P->flags |= F_USING_DDLL; if (monitor) { @@ -432,18 +417,14 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3) hp = HAlloc(BIF_P, 3); t = TUPLE2(hp, am_ok, ok_term); } -#if DDLL_SMP unlock_drv_list(); -#endif erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) path); erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P)); BIF_RET(t); soft_error: -#if DDLL_SMP unlock_drv_list(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); -#endif + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); if (do_build_load_error) { soft_error_term = build_load_error(BIF_P, build_this_load_error); } @@ -452,11 +433,11 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3) t = TUPLE2(hp, am_error, soft_error_term); erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) path); erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P)); BIF_RET(t); error: assert_drv_list_not_locked(); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P)); if (path != NULL) { erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) path); } @@ -518,7 +499,7 @@ Eterm erl_ddll_try_unload_2(BIF_ALIST_2) Eterm l; int kill_ports = 0; - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); for(l = options; is_list(l); l = CDR(list_val(l))) { Eterm opt = CAR(list_val(l)); @@ -551,9 +532,7 @@ Eterm erl_ddll_try_unload_2(BIF_ALIST_2) goto error; } -#if DDLL_SMP lock_drv_list(); -#endif if ((drv = lookup_driver(name)) == NULL) { soft_error_term = am_not_loaded; @@ -597,7 +576,7 @@ Eterm erl_ddll_try_unload_2(BIF_ALIST_2) dh->reload_full_path = dh->reload_driver_name = NULL; dh->reload_flags = 0; } - if (erts_smp_atomic32_read_nob(&dh->port_count) > 0) { + if (erts_atomic32_read_nob(&dh->port_count) > 0) { ++kill_ports; } dh->status = ERL_DE_UNLOAD; @@ -608,23 +587,17 @@ done: /* Avoid closing the driver by referencing it */ erts_ddll_reference_driver(dh); dh->status = ERL_DE_FORCE_UNLOAD; -#if DDLL_SMP unlock_drv_list(); -#endif kill_ports_driver_unloaded(dh); -#if DDLL_SMP lock_drv_list(); -#endif erts_ddll_dereference_driver(dh); } -#if DDLL_SMP erts_ddll_reference_driver(dh); unlock_drv_list(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); lock_drv_list(); erts_ddll_dereference_driver(dh); -#endif erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name); BIF_P->flags |= F_USING_DDLL; if (monitor > 0) { @@ -638,17 +611,13 @@ done: if (kill_ports > 1) { ERTS_BIF_CHK_EXITED(BIF_P); /* May be exited by port killing */ } -#if DDLL_SMP unlock_drv_list(); -#endif BIF_RET(t); soft_error: -#if DDLL_SMP unlock_drv_list(); -#endif erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); hp = HAlloc(BIF_P, 3); t = TUPLE2(hp, am_error, soft_error_term); BIF_RET(t); @@ -658,7 +627,7 @@ soft_error: if (name != NULL) { erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name); } - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_ERROR(BIF_P, BADARG); } @@ -697,9 +666,7 @@ BIF_RETTYPE erl_ddll_loaded_drivers_0(BIF_ALIST_0) int need = 3; Eterm res = NIL; erts_driver_t *drv; -#if DDLL_SMP lock_drv_list(); -#endif for (drv = driver_list; drv; drv = drv->next) { need += sys_strlen(drv->name)*2+2; } @@ -712,9 +679,7 @@ BIF_RETTYPE erl_ddll_loaded_drivers_0(BIF_ALIST_0) } res = TUPLE2(hp,am_ok,res); /* hp += 3 */ -#if DDLL_SMP unlock_drv_list(); -#endif BIF_RET(res); } @@ -736,9 +701,7 @@ BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2) Eterm *hp; int i; Uint filter; -#if DDLL_SMP int have_lock = 0; -#endif if ((name = pick_list_or_atom(name_term)) == NULL) { goto error; @@ -748,10 +711,8 @@ BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2) goto error; } -#if DDLL_SMP lock_drv_list(); have_lock = 1; -#endif if ((drv = lookup_driver(name)) == NULL) { goto error; } @@ -781,7 +742,7 @@ BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2) } else if (drv->handle->status == ERL_DE_PERMANENT) { res = am_permanent; } else { - res = make_small(erts_smp_atomic32_read_nob(&drv->handle->port_count)); + res = make_small(erts_atomic32_read_nob(&drv->handle->port_count)); } goto done; case am_linked_in_driver: @@ -827,9 +788,7 @@ BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2) hp += 2; } done: -#if DDLL_SMP unlock_drv_list(); -#endif if (pei) erts_free(ERTS_ALC_T_DDLL_TMP_BUF, pei); erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name); @@ -838,11 +797,9 @@ BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2) if (name != NULL) { erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name); } -#if DDLL_SMP if (have_lock) { unlock_drv_list(); } -#endif BIF_ERROR(p,BADARG); } @@ -899,13 +856,9 @@ BIF_RETTYPE erl_ddll_format_error_int_1(BIF_ALIST_1) if (errdesc_to_code(code_term,&errint) != 0) { goto error; } -#if DDLL_SMP lock_drv_list(); -#endif errstring = erts_ddll_error(errint); -#if DDLL_SMP unlock_drv_list(); -#endif break; } if (errstring == NULL) { @@ -968,7 +921,7 @@ Eterm erts_ddll_monitor_driver(Process *p, void erts_ddll_remove_monitor(Process *p, Eterm ref, ErtsProcLocks plocks) { erts_driver_t *drv; - erts_smp_proc_unlock(p, plocks); + erts_proc_unlock(p, plocks); lock_drv_list(); drv = driver_list; while (drv != NULL) { @@ -993,7 +946,7 @@ void erts_ddll_remove_monitor(Process *p, Eterm ref, ErtsProcLocks plocks) } done: unlock_drv_list(); - erts_smp_proc_lock(p, plocks); + erts_proc_lock(p, plocks); } /* @@ -1002,7 +955,7 @@ void erts_ddll_remove_monitor(Process *p, Eterm ref, ErtsProcLocks plocks) void erts_ddll_proc_dead(Process *p, ErtsProcLocks plocks) { erts_driver_t *drv; - erts_smp_proc_unlock(p, plocks); + erts_proc_unlock(p, plocks); lock_drv_list(); drv = driver_list; while (drv != NULL) { @@ -1040,18 +993,14 @@ void erts_ddll_proc_dead(Process *p, ErtsProcLocks plocks) dh->status = ERL_DE_UNLOAD; } if (!left - && erts_smp_atomic32_read_nob(&drv->handle->port_count) > 0) { + && erts_atomic32_read_nob(&drv->handle->port_count) > 0) { if (kill_ports) { DE_Handle *dh = drv->handle; erts_ddll_reference_driver(dh); dh->status = ERL_DE_FORCE_UNLOAD; -#if DDLL_SMP unlock_drv_list(); -#endif kill_ports_driver_unloaded(dh); -#if DDLL_SMP lock_drv_list(); /* Needed for future list operations */ -#endif drv = drv->next; /* before allowing destruction */ erts_ddll_dereference_driver(dh); } else { @@ -1065,7 +1014,7 @@ void erts_ddll_proc_dead(Process *p, ErtsProcLocks plocks) } } unlock_drv_list(); - erts_smp_proc_lock(p, plocks); + erts_proc_lock(p, plocks); } void erts_ddll_lock_driver(DE_Handle *dh, char *name) { @@ -1093,41 +1042,41 @@ void erts_ddll_lock_driver(DE_Handle *dh, char *name) void erts_ddll_increment_port_count(DE_Handle *dh) { assert_drv_list_locked(); - erts_smp_atomic32_inc_nob(&dh->port_count); + erts_atomic32_inc_nob(&dh->port_count); } void erts_ddll_decrement_port_count(DE_Handle *dh) { assert_drv_list_locked(); #ifdef DEBUG - ASSERT(erts_smp_atomic32_dec_read_nob(&dh->port_count) >= 0); + ASSERT(erts_atomic32_dec_read_nob(&dh->port_count) >= 0); #else - erts_smp_atomic32_dec_nob(&dh->port_count); + erts_atomic32_dec_nob(&dh->port_count); #endif } static void first_ddll_reference(DE_Handle *dh) { assert_drv_list_rwlocked(); - erts_smp_refc_init(&(dh->refc),1); + erts_refc_init(&(dh->refc),1); } void erts_ddll_reference_driver(DE_Handle *dh) { assert_drv_list_locked(); - if (erts_smp_refc_inctest(&(dh->refc),1) == 1) { - erts_smp_refc_inc(&(dh->refc),2); /* add a reference for the scheduled operation */ + if (erts_refc_inctest(&(dh->refc),1) == 1) { + erts_refc_inc(&(dh->refc),2); /* add a reference for the scheduled operation */ } } void erts_ddll_reference_referenced_driver(DE_Handle *dh) { - erts_smp_refc_inc(&(dh->refc),2); + erts_refc_inc(&(dh->refc),2); } void erts_ddll_dereference_driver(DE_Handle *dh) { - if (erts_smp_refc_dectest(&(dh->refc),0) == 0) { + if (erts_refc_dectest(&(dh->refc),0) == 0) { /* No lock here, but if the driver is referenced again, the scheduled deletion is added as a reference too, see above */ erts_schedule_misc_op(ddll_no_more_references, (void *) dh); @@ -1150,11 +1099,11 @@ static void restore_process_references(DE_Handle *dh) { DE_ProcEntry *p; assert_drv_list_rwlocked(); - ASSERT(erts_smp_refc_read(&(dh->refc),0) == 0); + ASSERT(erts_refc_read(&(dh->refc),0) == 0); for(p = dh->procs;p != NULL; p = p->next) { if (p->awaiting_status == ERL_DE_PROC_LOADED) { ASSERT(p->flags & ERL_DE_FL_DEREFERENCED); - erts_smp_refc_inc(&(dh->refc),1); + erts_refc_inc(&(dh->refc),1); p->flags &= ~ERL_DE_FL_DEREFERENCED; } } @@ -1176,9 +1125,9 @@ static void ddll_no_more_references(void *vdh) lock_drv_list(); - x = erts_smp_refc_read(&(dh->refc),0); + x = erts_refc_read(&(dh->refc),0); if (x > 0) { - x = erts_smp_refc_dectest(&(dh->refc),0); /* delete the reference added for me */ + x = erts_refc_dectest(&(dh->refc),0); /* delete the reference added for me */ } @@ -1281,10 +1230,8 @@ static Eterm notify_when_loaded(Process *p, Eterm name_term, char *name, ErtsPro Eterm immediate_type = NIL; erts_driver_t *drv; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & plocks); -#if DDLL_SMP + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & plocks); lock_drv_list(); -#endif if ((drv = lookup_driver(name)) == NULL) { immediate_tag = am_unloaded; immediate_type = am_DOWN; @@ -1314,20 +1261,14 @@ static Eterm notify_when_loaded(Process *p, Eterm name_term, char *name, ErtsPro } p->flags |= F_USING_DDLL; r = add_monitor(p, drv->handle, ERL_DE_PROC_AWAIT_LOAD); -#if DDLL_SMP unlock_drv_list(); -#endif BIF_RET(r); immediate: r = erts_make_ref(p); -#if DDLL_SMP - erts_smp_proc_unlock(p, plocks); -#endif + erts_proc_unlock(p, plocks); notify_proc(p, r, name_term, immediate_type, immediate_tag, 0); -#if DDLL_SMP unlock_drv_list(); - erts_smp_proc_lock(p, plocks); -#endif + erts_proc_lock(p, plocks); BIF_RET(r); } @@ -1338,10 +1279,8 @@ static Eterm notify_when_unloaded(Process *p, Eterm name_term, char *name, ErtsP Eterm immediate_type = NIL; erts_driver_t *drv; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & plocks); -#if DDLL_SMP + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & plocks); lock_drv_list(); -#endif if ((drv = lookup_driver(name)) == NULL) { immediate_tag = am_unloaded; immediate_type = am_DOWN; @@ -1355,20 +1294,14 @@ static Eterm notify_when_unloaded(Process *p, Eterm name_term, char *name, ErtsP p->flags |= F_USING_DDLL; r = add_monitor(p, drv->handle, flag); -#if DDLL_SMP unlock_drv_list(); -#endif BIF_RET(r); immediate: r = erts_make_ref(p); -#if DDLL_SMP - erts_smp_proc_unlock(p, plocks); -#endif + erts_proc_unlock(p, plocks); notify_proc(p, r, name_term, immediate_type, immediate_tag, 0); -#if DDLL_SMP unlock_drv_list(); - erts_smp_proc_lock(p, plocks); -#endif + erts_proc_lock(p, plocks); BIF_RET(r); } @@ -1572,8 +1505,8 @@ static int do_load_driver_entry(DE_Handle *dh, char *path, char *name) res = ERL_DE_LOAD_ERROR_BAD_NAME; goto error; } - erts_smp_atomic_init_nob(&(dh->refc), (erts_aint_t) 0); - erts_smp_atomic32_init_nob(&dh->port_count, 0); + erts_atomic_init_nob(&(dh->refc), (erts_aint_t) 0); + erts_atomic32_init_nob(&dh->port_count, 0); dh->full_path = erts_alloc(ERTS_ALC_T_DDLL_HANDLE, sys_strlen(path) + 1); sys_strcpy(dh->full_path, path); dh->flags = 0; @@ -1644,8 +1577,8 @@ static int load_driver_entry(DE_Handle **dhp, char *path, char *name) dh->handle = NULL; dh->procs = NULL; - erts_smp_atomic32_init_nob(&dh->port_count, 0); - erts_smp_refc_init(&(dh->refc), (erts_aint_t) 0); + erts_atomic32_init_nob(&dh->port_count, 0); + erts_refc_init(&(dh->refc), (erts_aint_t) 0); dh->status = -1; dh->reload_full_path = NULL; dh->reload_driver_name = NULL; @@ -1683,7 +1616,7 @@ static int reload_driver_entry(DE_Handle *dh) dh->reload_full_path = NULL; dh->reload_driver_name = NULL; - ASSERT(erts_smp_refc_read(&(dh->refc),0) == 0); + ASSERT(erts_refc_read(&(dh->refc),0) == 0); ASSERT(dh->full_path != NULL); erts_free(ERTS_ALC_T_DDLL_HANDLE, (void *) dh->full_path); dh->full_path = NULL; @@ -1714,7 +1647,7 @@ static void notify_proc(Process *proc, Eterm ref, Eterm driver_name, Eterm type, ErtsMessage *mp; ErtsProcLocks rp_locks = 0; ErlOffHeap *ohp; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; assert_drv_list_rwlocked(); if (errcode != 0) { @@ -1740,8 +1673,8 @@ static void notify_proc(Process *proc, Eterm ref, Eterm driver_name, Eterm type, mess = TUPLE5(hp,type,r,am_driver,driver_name,tag); } erts_queue_message(proc, rp_locks, mp, mess, am_system); - erts_smp_proc_unlock(proc, rp_locks); - ERTS_SMP_CHK_NO_PROC_LOCKS; + erts_proc_unlock(proc, rp_locks); + ERTS_CHK_NO_PROC_LOCKS; } static void notify_all(DE_Handle *dh, char *name, Uint awaiting, Eterm type, Eterm tag) @@ -1813,7 +1746,7 @@ static Eterm build_load_error(Process *p, int code) { int need = load_error_need(code); Eterm *hp = NULL; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p)); if (need) { hp = HAlloc(p,need); } diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index 0547b4d75c..17c936f041 100644 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -46,6 +46,7 @@ #include "erl_thr_progress.h" #include "erl_bif_unique.h" #include "erl_map.h" +#include "erl_check_io.h" #define ERTS_PTAB_WANT_DEBUG_FUNCS__ #include "erl_ptab.h" #include "erl_time.h" @@ -88,24 +89,15 @@ static char erts_system_version[] = ("Erlang/OTP " ERLANG_OTP_RELEASE #ifdef ARCH_64 " [64-bit]" #endif -#ifdef ERTS_SMP " [smp:%beu:%beu]" -#endif -#ifdef USE_THREADS -#if defined(ERTS_DIRTY_SCHEDULERS) && defined(ERTS_SMP) " [ds:%beu:%beu:%beu]" -#endif #if defined(ERTS_DIRTY_SCHEDULERS_TEST) " [dirty-schedulers-TEST]" #endif " [async-threads:%d]" -#endif #ifdef HIPE " [hipe]" #endif -#ifdef ERTS_ENABLE_KERNEL_POLL - " [kernel-poll:%s]" -#endif #ifdef ET_DEBUG #if ET_DEBUG " [type-assertions]" @@ -354,14 +346,12 @@ erts_print_system_version(fmtfn_t to, void *arg, Process *c_p) char *rc_str = ""; char rc_buf[100]; char *ov = otp_version; -#ifdef ERTS_SMP Uint total, online, active; Uint dirty_cpu, dirty_cpu_onln, dirty_io; erts_schedulers_state(&total, &online, &active, &dirty_cpu, &dirty_cpu_onln, NULL, &dirty_io, NULL); -#endif for (i = 0; i < sizeof(otp_version)-4; i++) { if (ov[i] == '-' && ov[i+1] == 'r' && ov[i+2] == 'c') rc = atoi(&ov[i+3]); @@ -376,18 +366,9 @@ erts_print_system_version(fmtfn_t to, void *arg, Process *c_p) } return erts_print(to, arg, erts_system_version, rc_str -#ifdef ERTS_SMP , total, online -#ifdef ERTS_DIRTY_SCHEDULERS , dirty_cpu, dirty_cpu_onln, dirty_io -#endif -#endif -#ifdef USE_THREADS , erts_async_max_threads -#endif -#ifdef ERTS_ENABLE_KERNEL_POLL - , erts_use_kernel_poll ? "true" : "false" -#endif ); } @@ -764,7 +745,6 @@ process_info_init(void) static ERTS_INLINE Process * pi_pid2proc(Process *c_p, Eterm pid, ErtsProcLocks info_locks) { -#ifdef ERTS_SMP /* * If the main lock is needed, we use erts_pid2proc_not_running() * instead of erts_pid2proc() for two reasons: @@ -782,7 +762,6 @@ pi_pid2proc(Process *c_p, Eterm pid, ErtsProcLocks info_locks) return erts_pid2proc_not_running(c_p, ERTS_PROC_LOCK_MAIN, pid, info_locks); else -#endif return erts_pid2proc(c_p, ERTS_PROC_LOCK_MAIN, pid, info_locks); } @@ -900,13 +879,13 @@ process_info_list(Process *c_p, Eterm pid, Eterm list, int always_wrap, * is being inspected... */ ASSERT(locks & ERTS_PROC_LOCK_MAIN); - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp); + ERTS_MSGQ_MV_INQ2PRIVQ(rp); locks &= ~ERTS_PROC_LOCK_MSGQ; unlock_locks |= ERTS_PROC_LOCK_MSGQ; } if (unlock_locks) - erts_smp_proc_unlock(rp, unlock_locks); + erts_proc_unlock(rp, unlock_locks); } @@ -964,7 +943,7 @@ process_info_list(Process *c_p, Eterm pid, Eterm list, int always_wrap, if (c_p == rp) locks &= ~ERTS_PROC_LOCK_MAIN; if (locks && rp) - erts_smp_proc_unlock(rp, locks); + erts_proc_unlock(rp, locks); if (res_elem_ix != &def_res_elem_ix_buf[0]) erts_free(ERTS_ALC_T_TMP, res_elem_ix); @@ -1055,7 +1034,7 @@ BIF_RETTYPE process_info_2(BIF_ALIST_2) ERTS_BIF_YIELD2(bif_export[BIF_process_info_2], BIF_P, BIF_ARG_1, BIF_ARG_2); else if (rp != BIF_P && ERTS_PROC_PENDING_EXIT(rp)) { - erts_smp_proc_unlock(rp, info_locks|ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(rp, info_locks|ERTS_PROC_LOCK_STATUS); ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_undefined); } else { @@ -1075,24 +1054,22 @@ BIF_RETTYPE process_info_2(BIF_ALIST_2) * is being inspected... */ ASSERT(info_locks & ERTS_PROC_LOCK_MAIN); - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp); + ERTS_MSGQ_MV_INQ2PRIVQ(rp); info_locks &= ~ERTS_PROC_LOCK_MSGQ; unlock_locks |= ERTS_PROC_LOCK_MSGQ; } if (unlock_locks) - erts_smp_proc_unlock(rp, unlock_locks); + erts_proc_unlock(rp, unlock_locks); res = process_info_aux(BIF_P, rp, info_locks, pid, BIF_ARG_2, 0); } ASSERT(is_value(res)); -#ifdef ERTS_SMP if (BIF_P == rp) info_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp && info_locks) - erts_smp_proc_unlock(rp, info_locks); -#endif + erts_proc_unlock(rp, info_locks); ASSERT(!(BIF_P->flags & F_P2PNR_RESCHED)); BIF_RET(res); @@ -1379,7 +1356,7 @@ process_info_aux(Process *BIF_P, break; case am_trap_exit: { - erts_aint32_t state = erts_smp_atomic32_read_nob(&rp->state); + erts_aint32_t state = erts_atomic32_read_nob(&rp->state); hp = HAlloc(BIF_P, 3); if (state & ERTS_PSFLG_TRAP_EXIT) res = am_true; @@ -2145,14 +2122,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) ASSERT(erts_compat_rel > 0); BIF_RET(make_small(erts_compat_rel)); } else if (BIF_ARG_1 == am_multi_scheduling) { -#ifndef ERTS_SMP - BIF_RET(am_disabled); -#else -#ifndef ERTS_DIRTY_SCHEDULERS - if (erts_no_schedulers == 1) - BIF_RET(am_disabled); - else -#endif { int msb = erts_is_multi_scheduling_blocked(); BIF_RET(!msb @@ -2161,7 +2130,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) ? am_blocked : am_blocked_normal)); } -#endif } else if (BIF_ARG_1 == am_build_type) { #if defined(DEBUG) ERTS_DECL_AM(debug); @@ -2271,7 +2239,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) res = TUPLE2(hp, am_sequential_tracer, val); BIF_RET(res); } else if (BIF_ARG_1 == am_garbage_collection){ - Uint val = (Uint) erts_smp_atomic32_read_nob(&erts_max_gen_gcs); + Uint val = (Uint) erts_atomic32_read_nob(&erts_max_gen_gcs); Eterm tup; hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2 + 3+2); @@ -2289,7 +2257,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) BIF_RET(res); } else if (BIF_ARG_1 == am_fullsweep_after){ - Uint val = (Uint) erts_smp_atomic32_read_nob(&erts_max_gen_gcs); + Uint val = (Uint) erts_atomic32_read_nob(&erts_max_gen_gcs); hp = HAlloc(BIF_P, 3); res = TUPLE2(hp, am_fullsweep_after, make_small(val)); BIF_RET(res); @@ -2322,8 +2290,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) erts_dsprintf_buf_t *dsbufp = erts_create_info_dsbuf(0); /* Need to be the only thread running... */ - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); if (BIF_ARG_1 == am_info) info(ERTS_PRINT_DSBUF, (void *) dsbufp); @@ -2334,8 +2302,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) else distribution_info(ERTS_PRINT_DSBUF, (void *) dsbufp); - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); ASSERT(dsbufp && dsbufp->str); res = new_binary(BIF_P, (byte *) dsbufp->str, dsbufp->str_len); @@ -2344,7 +2312,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) } else if (ERTS_IS_ATOM_STR("dist_ctrl", BIF_ARG_1)) { DistEntry *dep; i = 0; - erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx); + erts_rwmtx_rlock(&erts_dist_table_rwmtx); for (dep = erts_visible_dist_entries; dep; dep = dep->next) ++i; for (dep = erts_hidden_dist_entries; dep; dep = dep->next) @@ -2367,7 +2335,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) res = CONS(hp, tpl, res); hp += 2; } - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); + erts_rwmtx_runlock(&erts_dist_table_rwmtx); BIF_RET(res); } else if (BIF_ARG_1 == am_system_version) { erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0); @@ -2393,16 +2361,10 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) BIF_RET(erts_allocator_options((void *) BIF_P)); } else if (BIF_ARG_1 == am_thread_pool_size) { -#ifdef USE_THREADS extern int erts_async_max_threads; -#endif int n; -#ifdef USE_THREADS n = erts_async_max_threads; -#else - n = 0; -#endif BIF_RET(make_small(n)); } else if (BIF_ARG_1 == am_alloc_util_allocators) { @@ -2470,7 +2432,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) #endif BIF_RET(res); -#endif /* #ifndef ERTS_SMP */ +#endif /* #ifndef ERTS_OPCODE_COUNTER_SUPPORT */ } else if (BIF_ARG_1 == am_wordsize) { return make_small(sizeof(Eterm)); } else if (BIF_ARG_1 == am_endian) { @@ -2550,11 +2512,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) BIF_RET(res); #endif } else if (BIF_ARG_1 == am_threads) { -#ifdef USE_THREADS return am_true; -#else - return am_false; -#endif } else if (BIF_ARG_1 == am_creation) { return make_small(erts_this_node->creation); } else if (BIF_ARG_1 == am_break_ignored) { @@ -2613,11 +2571,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) hp = HAlloc(BIF_P, 2*n); BIF_RET(buf_to_intlist(&hp, buf, n, NIL)); } else if (ERTS_IS_ATOM_STR("smp_support", BIF_ARG_1)) { -#ifdef ERTS_SMP BIF_RET(am_true); -#else - BIF_RET(am_false); -#endif } else if (ERTS_IS_ATOM_STR("scheduler_bind_type", BIF_ARG_1)) { BIF_RET(erts_bound_schedulers_term(BIF_P)); } else if (ERTS_IS_ATOM_STR("scheduler_bindings", BIF_ARG_1)) { @@ -2629,11 +2583,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) res = make_small(erts_no_schedulers); BIF_RET(res); } else if (ERTS_IS_ATOM_STR("schedulers_state", BIF_ARG_1)) { -#ifndef ERTS_SMP - Eterm *hp = HAlloc(BIF_P, 4); - res = TUPLE3(hp, make_small(1), make_small(1), make_small(1)); - BIF_RET(res); -#else Eterm *hp; Uint total, online, active; erts_schedulers_state(&total, &online, &active, @@ -2644,13 +2593,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) make_small(online), make_small(active)); BIF_RET(res); -#endif } else if (ERTS_IS_ATOM_STR("schedulers_state", BIF_ARG_1)) { -#ifndef ERTS_SMP - Eterm *hp = HAlloc(BIF_P, 4); - res = TUPLE3(hp, make_small(1), make_small(1), make_small(1)); - BIF_RET(res); -#else Eterm *hp; Uint total, online, active; erts_schedulers_state(&total, &online, &active, @@ -2661,19 +2604,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) make_small(online), make_small(active)); BIF_RET(res); -#endif } else if (ERTS_IS_ATOM_STR("all_schedulers_state", BIF_ARG_1)) { -#ifndef ERTS_SMP - Eterm *hp = HAlloc(BIF_P, 2+5); - res = CONS(hp+5, - TUPLE4(hp, - am_normal, - make_small(1), - make_small(1), - make_small(1)), - NIL); - BIF_RET(res); -#else Eterm *hp, tpl; Uint sz, total, online, active, dirty_cpu_total, dirty_cpu_online, dirty_cpu_active, @@ -2719,46 +2650,25 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) hp += 5; res = CONS(hp, tpl, res); BIF_RET(res); -#endif } else if (ERTS_IS_ATOM_STR("schedulers_online", BIF_ARG_1)) { -#ifndef ERTS_SMP - BIF_RET(make_small(1)); -#else Uint online; erts_schedulers_state(NULL, &online, NULL, NULL, NULL, NULL, NULL, NULL); BIF_RET(make_small(online)); -#endif } else if (ERTS_IS_ATOM_STR("schedulers_active", BIF_ARG_1)) { -#ifndef ERTS_SMP - BIF_RET(make_small(1)); -#else Uint active; erts_schedulers_state(NULL, NULL, &active, NULL, NULL, NULL, NULL, NULL); BIF_RET(make_small(active)); -#endif } else if (ERTS_IS_ATOM_STR("dirty_cpu_schedulers", BIF_ARG_1)) { Uint dirty_cpu; -#ifdef ERTS_DIRTY_SCHEDULERS erts_schedulers_state(NULL, NULL, NULL, &dirty_cpu, NULL, NULL, NULL, NULL); -#else - dirty_cpu = 0; -#endif BIF_RET(make_small(dirty_cpu)); } else if (ERTS_IS_ATOM_STR("dirty_cpu_schedulers_online", BIF_ARG_1)) { Uint dirty_cpu_onln; -#ifdef ERTS_DIRTY_SCHEDULERS erts_schedulers_state(NULL, NULL, NULL, NULL, &dirty_cpu_onln, NULL, NULL, NULL); -#else - dirty_cpu_onln = 0; -#endif BIF_RET(make_small(dirty_cpu_onln)); } else if (ERTS_IS_ATOM_STR("dirty_io_schedulers", BIF_ARG_1)) { Uint dirty_io; -#ifdef ERTS_DIRTY_SCHEDULERS erts_schedulers_state(NULL, NULL, NULL, NULL, NULL, NULL, &dirty_io, NULL); -#else - dirty_io = 0; -#endif BIF_RET(make_small(dirty_io)); } else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) { res = make_small(erts_no_run_queues); @@ -2780,7 +2690,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) BIF_RET(make_small(CONTEXT_REDS)); } else if (ERTS_IS_ATOM_STR("kernel_poll", BIF_ARG_1)) { #ifdef ERTS_ENABLE_KERNEL_POLL - BIF_RET(erts_use_kernel_poll ? am_true : am_false); + BIF_RET(am_true); #else BIF_RET(am_false); #endif @@ -2805,23 +2715,15 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) } else if (ERTS_IS_ATOM_STR("check_io", BIF_ARG_1)) { BIF_RET(erts_check_io_info(BIF_P)); } else if (ERTS_IS_ATOM_STR("multi_scheduling_blockers", BIF_ARG_1)) { -#ifndef ERTS_SMP - BIF_RET(NIL); -#else if (erts_no_schedulers == 1) BIF_RET(NIL); else BIF_RET(erts_multi_scheduling_blockers(BIF_P, 0)); -#endif } else if (ERTS_IS_ATOM_STR("normal_multi_scheduling_blockers", BIF_ARG_1)) { -#ifndef ERTS_SMP - BIF_RET(NIL); -#else if (erts_no_schedulers == 1) BIF_RET(NIL); else BIF_RET(erts_multi_scheduling_blockers(BIF_P, 1)); -#endif } else if (ERTS_IS_ATOM_STR("modified_timing_level", BIF_ARG_1)) { BIF_RET(ERTS_USE_MODIFIED_TIMING() ? make_small(erts_modified_timing_level) @@ -2884,12 +2786,10 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) BIF_RET(am_false); #endif } -#ifdef ERTS_SMP else if (ERTS_IS_ATOM_STR("thread_progress", BIF_ARG_1)) { erts_thr_progress_dbg_print_state(); BIF_RET(am_true); } -#endif else if (BIF_ARG_1 == am_message_queue_data) { switch (erts_default_spo_flags & (SPO_ON_HEAP_MSGQ|SPO_OFF_HEAP_MSGQ)) { case SPO_OFF_HEAP_MSGQ: @@ -2943,7 +2843,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) BIF_RET(am_disabled); } else if (ERTS_IS_ATOM_STR("eager_check_io",BIF_ARG_1)) { - BIF_RET(erts_eager_check_io ? am_true : am_false); + BIF_RET(am_true); } else if (ERTS_IS_ATOM_STR("literal_test",BIF_ARG_1)) { #ifdef ERTS_HAVE_IS_IN_LITERAL_RANGE @@ -2981,7 +2881,7 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt, { Eterm res = THE_NON_VALUE; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (item == am_id) { if (hpp) @@ -3172,9 +3072,6 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt, } else if (ERTS_IS_ATOM_STR("locking", item)) { if (hpp) { -#ifndef ERTS_SMP - res = am_false; -#else if (erts_atomic32_read_nob(&prt->state) & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) { DECL_AM(port_level); @@ -3188,7 +3085,6 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt, & ERL_DRV_FLAG_USE_PORT_LOCKING)); res = AM_driver_level; } -#endif } if (szp) { res = am_true; @@ -3201,7 +3097,7 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt, goto done; } res = ((ERTS_PTS_FLG_PARALLELISM & - erts_smp_atomic32_read_nob(&prt->sched.flags)) + erts_atomic32_read_nob(&prt->sched.flags)) ? am_true : am_false); } @@ -3277,7 +3173,7 @@ fun_info_2(BIF_ALIST_2) } break; case am_refc: - val = erts_make_integer(erts_smp_atomic_read_nob(&funp->fe->refc), p); + val = erts_make_integer(erts_atomic_read_nob(&funp->fe->refc), p); hp = HAlloc(p, 3); break; case am_arity: @@ -3382,7 +3278,7 @@ BIF_RETTYPE is_process_alive_1(BIF_ALIST_1) BIF_RET(am_false); } else { - if (erts_smp_atomic32_read_acqb(&rp->state) + if (erts_atomic32_read_acqb(&rp->state) & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_false); else @@ -3417,7 +3313,7 @@ BIF_RETTYPE process_display_2(BIF_ALIST_2) BIF_ARG_1, BIF_ARG_2); if (rp != BIF_P && ERTS_PROC_PENDING_EXIT(rp)) { Eterm args[2] = {BIF_ARG_1, BIF_ARG_2}; - erts_smp_proc_unlock(rp, ERTS_PROC_LOCKS_ALL); + erts_proc_unlock(rp, ERTS_PROC_LOCKS_ALL); ERTS_BIF_AWAIT_X_APPLY_TRAP(BIF_P, BIF_ARG_1, am_erlang, @@ -3426,11 +3322,9 @@ BIF_RETTYPE process_display_2(BIF_ALIST_2) 2); } erts_stack_dump(ERTS_PRINT_STDERR, NULL, rp); -#ifdef ERTS_SMP - erts_smp_proc_unlock(rp, (BIF_P == rp + erts_proc_unlock(rp, (BIF_P == rp ? ERTS_PROC_LOCKS_ALL_MINOR : ERTS_PROC_LOCKS_ALL)); -#endif BIF_RET(am_true); } @@ -3608,7 +3502,7 @@ BIF_RETTYPE error_logger_warning_map_0(BIF_ALIST_0) BIF_RET(erts_error_logger_warnings); } -static erts_smp_atomic_t available_internal_state; +static erts_atomic_t available_internal_state; static int empty_magic_ref_destructor(Binary *bin) { @@ -3621,7 +3515,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) * NOTE: Only supposed to be used for testing, and debugging. */ - if (!erts_smp_atomic_read_nob(&available_internal_state)) { + if (!erts_atomic_read_nob(&available_internal_state)) { BIF_ERROR(BIF_P, EXC_UNDEF); } @@ -3664,9 +3558,9 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) int no_errors; ErtsCheckIoDebugInfo ciodi = {0}; #ifdef HAVE_ERTS_CHECK_IO_DEBUG - erts_smp_proc_unlock(BIF_P,ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P,ERTS_PROC_LOCK_MAIN); no_errors = erts_check_io_debug(&ciodi); - erts_smp_proc_lock(BIF_P,ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P,ERTS_PROC_LOCK_MAIN); #else no_errors = 0; #endif @@ -3681,8 +3575,8 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) (Uint) ciodi.no_used_fds), erts_bld_uint(hpp, szp, (Uint) ciodi.no_driver_select_structs), - erts_bld_uint(hpp, szp, - (Uint) ciodi.no_driver_event_structs)); + erts_bld_uint(hpp, szp, + (Uint) ciodi.no_enif_select_structs)); if (hpp) break; hp = HAlloc(BIF_P, sz); @@ -3716,9 +3610,9 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) } else if (ERTS_IS_ATOM_STR("nbalance", BIF_ARG_1)) { Uint n; - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); n = erts_debug_nbalance(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(erts_make_integer(n, BIF_P)); } else if (ERTS_IS_ATOM_STR("available_internal_state", BIF_ARG_1)) { @@ -3733,11 +3627,11 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) } else if (ERTS_IS_ATOM_STR("memory", BIF_ARG_1)) { Eterm res; - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); res = erts_memory(NULL, NULL, BIF_P, THE_NON_VALUE); - erts_smp_thr_progress_unblock(); + erts_thr_progress_unblock(); BIF_RET(res); } else if (ERTS_IS_ATOM_STR("mmap", BIF_ARG_1)) { @@ -3804,11 +3698,11 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) tp[2], ERTS_PROC_LOCK_LINK); if (!p) { - ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P); + ERTS_ASSERT_IS_NOT_EXITING(BIF_P); BIF_RET(am_undefined); } res = make_link_list(BIF_P, ERTS_P_LINKS(p), NIL); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); BIF_RET(res); } else if(is_internal_port(tp[2])) { @@ -3827,11 +3721,10 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) DistEntry *dep = erts_find_dist_entry(tp[2]); if(dep) { Eterm subres; - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); subres = make_link_list(BIF_P, dep->nlinks, NIL); subres = make_link_list(BIF_P, dep->node_links, subres); - erts_smp_de_links_unlock(dep); - erts_deref_dist_entry(dep); + erts_de_links_unlock(dep); BIF_RET(subres); } else { BIF_RET(am_undefined); @@ -3849,20 +3742,19 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) tp[2], ERTS_PROC_LOCK_LINK); if (!p) { - ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P); + ERTS_ASSERT_IS_NOT_EXITING(BIF_P); BIF_RET(am_undefined); } res = make_monitor_list(BIF_P, ERTS_P_MONITORS(p)); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); BIF_RET(res); } else if(is_node_name_atom(tp[2])) { DistEntry *dep = erts_find_dist_entry(tp[2]); if(dep) { Eterm ml; - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); ml = make_monitor_list(BIF_P, dep->monitors); - erts_smp_de_links_unlock(dep); - erts_deref_dist_entry(dep); + erts_de_links_unlock(dep); BIF_RET(ml); } else { BIF_RET(am_undefined); @@ -3877,7 +3769,6 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) else { Uint cno = dist_entry_channel_no(dep); res = make_small(cno); - erts_deref_dist_entry(dep); } BIF_RET(res); } @@ -3889,7 +3780,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) } else { Eterm res = ERTS_PROC_PENDING_EXIT(rp) ? am_true : am_false; - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); BIF_RET(res); } } @@ -3939,15 +3830,14 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) DFLAG_BIT_BINARIES); BIF_RET(erts_term_to_binary(BIF_P, tp[2], 0, dflags)); } - else if (ERTS_IS_ATOM_STR("dist_port", tp[1])) { + else if (ERTS_IS_ATOM_STR("dist_ctrl", tp[1])) { Eterm res = am_undefined; DistEntry *dep = erts_sysname_to_connected_dist_entry(tp[2]); if (dep) { - erts_smp_de_rlock(dep); - if (is_internal_port(dep->cid)) + erts_de_rlock(dep); + if (is_internal_port(dep->cid) || is_internal_pid(dep->cid)) res = dep->cid; - erts_smp_de_runlock(dep); - erts_deref_dist_entry(dep); + erts_de_runlock(dep); } BIF_RET(res); } @@ -4091,7 +3981,7 @@ BIF_RETTYPE erts_internal_system_check_1(BIF_ALIST_1) BIF_ERROR(BIF_P, BADARG); } -static erts_smp_atomic_t hipe_test_reschedule_flag; +static erts_atomic_t hipe_test_reschedule_flag; #if defined(VALGRIND) && defined(__GNUC__) /* Force noinline for valgrind suppression */ @@ -4115,7 +4005,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) if (ERTS_IS_ATOM_STR("available_internal_state", BIF_ARG_1) && (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false)) { erts_aint_t on = (erts_aint_t) (BIF_ARG_2 == am_true); - erts_aint_t prev_on = erts_smp_atomic_xchg_nob(&available_internal_state, on); + erts_aint_t prev_on = erts_atomic_xchg_nob(&available_internal_state, on); if (on) { erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf(); erts_dsprintf(dsbufp, "Process %T ", BIF_P->common.id); @@ -4131,7 +4021,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) BIF_RET(prev_on ? am_true : am_false); } - if (!erts_smp_atomic_read_nob(&available_internal_state)) { + if (!erts_atomic_read_nob(&available_internal_state)) { BIF_ERROR(BIF_P, EXC_UNDEF); } @@ -4155,13 +4045,13 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) Sint ms; if (term_to_Sint(BIF_ARG_2, &ms) != 0) { if (ms > 0) { - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); if (block) - erts_smp_thr_progress_block(); + erts_thr_progress_block(); while (erts_milli_sleep((long) ms) != 0); if (block) - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); } BIF_RET(am_true); } @@ -4170,9 +4060,9 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) Sint ms; if (term_to_Sint(BIF_ARG_2, &ms) != 0) { if (ms > 0) { - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); while (erts_milli_sleep((long) ms) != 0); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); } BIF_RET(am_true); } @@ -4240,10 +4130,8 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) BIF_RET(AM_dead); } -#ifdef ERTS_SMP if (BIF_P == rp) rp_locks |= ERTS_PROC_LOCK_MAIN; -#endif xres = erts_send_exit_signal(NULL, /* NULL in order to force a pending exit when we send to our @@ -4255,11 +4143,9 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) NIL, NULL, 0); -#ifdef ERTS_SMP if (BIF_P == rp) rp_locks &= ~ERTS_PROC_LOCK_MAIN; -#endif - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); if (xres > 1) { DECL_AM(message); BIF_RET(AM_message); @@ -4321,14 +4207,14 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) } else if (ERTS_IS_ATOM_STR("hipe_test_reschedule_suspend", BIF_ARG_1)) { /* Used by hipe test suites */ - erts_aint_t flag = erts_smp_atomic_read_nob(&hipe_test_reschedule_flag); + erts_aint_t flag = erts_atomic_read_nob(&hipe_test_reschedule_flag); if (!flag && BIF_ARG_2 != am_false) { - erts_smp_atomic_set_nob(&hipe_test_reschedule_flag, 1); + erts_atomic_set_nob(&hipe_test_reschedule_flag, 1); erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); ERTS_BIF_YIELD2(bif_export[BIF_erts_debug_set_internal_state_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } - erts_smp_atomic_set_nob(&hipe_test_reschedule_flag, !flag); + erts_atomic_set_nob(&hipe_test_reschedule_flag, !flag); BIF_RET(NIL); } else if (ERTS_IS_ATOM_STR("hipe_test_reschedule_resume", BIF_ARG_1)) { @@ -4339,7 +4225,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) if (rp) { erts_resume(rp, ERTS_PROC_LOCK_STATUS); res = am_true; - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); } BIF_RET(res); } @@ -4356,16 +4242,14 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) BIF_RET(am_false); else { Uint32 con_id; - erts_smp_de_rlock(dep); + erts_de_rlock(dep); con_id = dep->connection_id; - erts_smp_de_runlock(dep); + erts_de_runlock(dep); erts_kill_dist_connection(dep, con_id); - erts_deref_dist_entry(dep); BIF_RET(am_true); } } else if (ERTS_IS_ATOM_STR("not_running_optimization", BIF_ARG_1)) { -#ifdef ERTS_SMP int old_use_opt, use_opt; switch (BIF_ARG_2) { case am_true: @@ -4378,16 +4262,13 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) BIF_ERROR(BIF_P, BADARG); } - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); old_use_opt = !erts_disable_proc_not_running_opt; erts_disable_proc_not_running_opt = !use_opt; - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(old_use_opt ? am_true : am_false); -#else - BIF_ERROR(BIF_P, EXC_NOTSUP); -#endif } else if (ERTS_IS_ATOM_STR("wait", BIF_ARG_1)) { if (ERTS_IS_ATOM_STR("deallocations", BIF_ARG_2)) { @@ -4415,9 +4296,9 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) Sint64 msecs; if (term_to_Sint64(BIF_ARG_2, &msecs)) { /* Negative value restore original value... */ - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_debug_test_node_tab_delayed_delete(msecs); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(am_ok); } } @@ -4865,8 +4746,8 @@ static void os_info_init(void) void erts_bif_info_init(void) { - erts_smp_atomic_init_nob(&available_internal_state, 0); - erts_smp_atomic_init_nob(&hipe_test_reschedule_flag, 0); + erts_atomic_init_nob(&available_internal_state, 0); + erts_atomic_init_nob(&hipe_test_reschedule_flag, 0); alloc_info_trap = erts_export_put(am_erlang, am_alloc_info, 1); alloc_sizes_trap = erts_export_put(am_erlang, am_alloc_sizes, 1); diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c index 1121a450cd..3b8e70d44a 100644 --- a/erts/emulator/beam/erl_bif_port.c +++ b/erts/emulator/beam/erl_bif_port.c @@ -86,25 +86,25 @@ BIF_RETTYPE erts_internal_open_port_2(BIF_ALIST_2) erts_make_ref_in_array(port->async_open_port->ref); port->async_open_port->to = BIF_P->common.id; - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCKS_MSG_RECEIVE | ERTS_PROC_LOCK_LINK); + erts_proc_lock(BIF_P, ERTS_PROC_LOCKS_MSG_RECEIVE | ERTS_PROC_LOCK_LINK); if (ERTS_PROC_PENDING_EXIT(BIF_P)) { /* need to exit caller instead */ - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCKS_MSG_RECEIVE | ERTS_PROC_LOCK_LINK); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCKS_MSG_RECEIVE | ERTS_PROC_LOCK_LINK); KILL_CATCHES(BIF_P); BIF_P->freason = EXC_EXIT; erts_port_release(port); BIF_RET(am_badarg); } - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(BIF_P); + ERTS_MSGQ_MV_INQ2PRIVQ(BIF_P); BIF_P->msg.save = BIF_P->msg.last; - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCKS_MSG_RECEIVE); res = erts_proc_store_ref(BIF_P, port->async_open_port->ref); } else { res = port->common.id; - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); } erts_add_link(&ERTS_P_LINKS(port), LINK_PID, BIF_P->common.id); @@ -114,7 +114,7 @@ BIF_RETTYPE erts_internal_open_port_2(BIF_ALIST_2) trace_proc(BIF_P, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK, BIF_P, am_link, port->common.id); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); erts_port_release(port); @@ -271,12 +271,10 @@ BIF_RETTYPE erts_internal_port_call_3(BIF_ALIST_3) break; } - state = erts_smp_atomic32_read_acqb(&BIF_P->state); + state = erts_atomic32_read_acqb(&BIF_P->state); if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) { -#ifdef ERTS_SMP if (state & ERTS_PSFLG_PENDING_EXIT) erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN); -#endif ERTS_BIF_EXITED(BIF_P); } @@ -321,12 +319,10 @@ BIF_RETTYPE erts_internal_port_control_3(BIF_ALIST_3) break; } - state = erts_smp_atomic32_read_acqb(&BIF_P->state); + state = erts_atomic32_read_acqb(&BIF_P->state); if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) { -#ifdef ERTS_SMP if (state & ERTS_PSFLG_PENDING_EXIT) erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN); -#endif ERTS_BIF_EXITED(BIF_P); } @@ -511,39 +507,35 @@ cleanup_old_port_data(erts_aint_t data) ASSERT(is_immed((Eterm) data)); } else { -#ifdef ERTS_SMP ErtsPortDataHeap *pdhp = (ErtsPortDataHeap *) data; size_t size; - ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER; + ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER; size = sizeof(ErtsPortDataHeap) + (pdhp->hsize-1)*sizeof(Eterm); erts_schedule_thr_prgr_later_cleanup_op(free_port_data_heap, (void *) pdhp, &pdhp->later_op, size); -#else - free_port_data_heap((void *) data); -#endif } } void erts_init_port_data(Port *prt) { - erts_smp_atomic_init_nob(&prt->data, (erts_aint_t) am_undefined); + erts_atomic_init_nob(&prt->data, (erts_aint_t) am_undefined); } void erts_cleanup_port_data(Port *prt) { ASSERT(erts_atomic32_read_nob(&prt->state) & ERTS_PORT_SFLGS_INVALID_LOOKUP); - cleanup_old_port_data(erts_smp_atomic_xchg_nob(&prt->data, + cleanup_old_port_data(erts_atomic_xchg_nob(&prt->data, (erts_aint_t) NULL)); } Uint erts_port_data_size(Port *prt) { - erts_aint_t data = erts_smp_atomic_read_ddrb(&prt->data); + erts_aint_t data = erts_atomic_read_ddrb(&prt->data); if ((data & 0x3) != 0) { ASSERT(is_immed((Eterm) (UWord) data)); @@ -558,7 +550,7 @@ erts_port_data_size(Port *prt) ErlOffHeap * erts_port_data_offheap(Port *prt) { - erts_aint_t data = erts_smp_atomic_read_ddrb(&prt->data); + erts_aint_t data = erts_atomic_read_ddrb(&prt->data); if ((data & 0x3) != 0) { ASSERT(is_immed((Eterm) (UWord) data)); @@ -603,11 +595,11 @@ BIF_RETTYPE port_set_data_2(BIF_ALIST_2) ASSERT((data & 0x3) == 0); } - data = erts_smp_atomic_xchg_wb(&prt->data, data); + data = erts_atomic_xchg_wb(&prt->data, data); if (data == (erts_aint_t)NULL) { /* Port terminated by racing thread */ - data = erts_smp_atomic_xchg_wb(&prt->data, data); + data = erts_atomic_xchg_wb(&prt->data, data); ASSERT(data != (erts_aint_t)NULL); cleanup_old_port_data(data); BIF_ERROR(BIF_P, BADARG); @@ -630,7 +622,7 @@ BIF_RETTYPE port_get_data_1(BIF_ALIST_1) if (!prt) BIF_ERROR(BIF_P, BADARG); - data = erts_smp_atomic_read_ddrb(&prt->data); + data = erts_atomic_read_ddrb(&prt->data); if (data == (erts_aint_t)NULL) BIF_ERROR(BIF_P, BADARG); /* Port terminated by racing thread */ @@ -925,7 +917,7 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump) } - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN); port = erts_open_driver(driver, p->common.id, name_buf, &opts, err_typep, err_nump); #ifdef USE_VM_PROBES @@ -942,7 +934,7 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump) if (port && IS_TRACED_FL(port, F_TRACE_PORTS)) trace_port(port, am_getting_linked, p->common.id); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) { trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in); diff --git a/erts/emulator/beam/erl_bif_re.c b/erts/emulator/beam/erl_bif_re.c index ad124fd979..bc819505e7 100644 --- a/erts/emulator/beam/erl_bif_re.c +++ b/erts/emulator/beam/erl_bif_re.c @@ -66,11 +66,7 @@ static void erts_erts_pcre_stack_free(void *ptr) { #define ERTS_PCRE_STACK_MARGIN (10*1024) -#ifdef ERTS_SMP # define ERTS_STACK_LIMIT ((char *) ethr_get_stacklimit()) -#else -# define ERTS_STACK_LIMIT ((char *) erts_scheduler_stack_limit) -#endif static int stack_guard_downwards(void) diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c index 45159c4392..22942b40c4 100644 --- a/erts/emulator/beam/erl_bif_trace.c +++ b/erts/emulator/beam/erl_bif_trace.c @@ -60,10 +60,8 @@ static struct { /* Protected by code write permission */ int local; BpFunctions f; /* Local functions */ BpFunctions e; /* Export entries */ -#ifdef ERTS_SMP Process* stager; ErtsThrPrgrLaterOp lop; -#endif } finish_bp; static Eterm @@ -71,9 +69,7 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist); static int erts_set_tracing_event_pattern(Eterm event, Binary*, int on); -#ifdef ERTS_SMP static void smp_bp_finisher(void* arg); -#endif static BIF_RETTYPE system_monitor(Process *p, Eterm monitor_pid, Eterm list); @@ -345,7 +341,6 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist) ERTS_TRACER_CLEAR(&meta_tracer); -#ifdef ERTS_SMP if (finish_bp.current >= 0) { ASSERT(matches >= 0); ASSERT(finish_bp.stager == NULL); @@ -355,7 +350,6 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist) erts_suspend(p, ERTS_PROC_LOCK_MAIN, NULL); ERTS_BIF_YIELD_RETURN(p, make_small(matches)); } -#endif erts_release_code_write_permission(); @@ -367,7 +361,6 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist) } } -#ifdef ERTS_SMP static void smp_bp_finisher(void* null) { if (erts_finish_breakpointing()) { /* Not done */ @@ -380,15 +373,14 @@ static void smp_bp_finisher(void* null) finish_bp.stager = NULL; #endif erts_release_code_write_permission(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); if (!ERTS_PROC_IS_EXITING(p)) { erts_resume(p, ERTS_PROC_LOCK_STATUS); } - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); erts_proc_dec_refc(p); } } -#endif /* ERTS_SMP */ void erts_get_default_trace_pattern(int *trace_pattern_is_on, @@ -397,8 +389,8 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on, struct trace_pattern_flags *trace_pattern_flags, ErtsTracer *meta_tracer) { - ERTS_SMP_LC_ASSERT(erts_has_code_write_permission() || - erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_has_code_write_permission() || + erts_thr_progress_is_blocking()); if (trace_pattern_is_on) *trace_pattern_is_on = erts_default_trace_pattern_is_on; if (match_spec) @@ -413,8 +405,8 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on, int erts_is_default_trace_enabled(void) { - ERTS_SMP_LC_ASSERT(erts_has_code_write_permission() || - erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_has_code_write_permission() || + erts_thr_progress_is_blocking()); return erts_default_trace_pattern_is_on; } @@ -543,9 +535,7 @@ Eterm erts_internal_trace_3(BIF_ALIST_3) int matches = 0; Uint mask = 0; int cpu_ts = 0; -#ifdef ERTS_SMP int system_blocked = 0; -#endif if (! erts_trace_flags(list, &mask, &tracer, &cpu_ts)) { BIF_ERROR(p, BADARG); @@ -620,13 +610,13 @@ Eterm erts_internal_trace_3(BIF_ALIST_3) goto error; if (start_trace(tracee_p, tracer, &tracee_p->common, on, mask)) { - erts_smp_proc_unlock(tracee_p, + erts_proc_unlock(tracee_p, (tracee_p == p ? ERTS_PROC_LOCKS_ALL_MINOR : ERTS_PROC_LOCKS_ALL)); goto already_traced; } - erts_smp_proc_unlock(tracee_p, + erts_proc_unlock(tracee_p, (tracee_p == p ? ERTS_PROC_LOCKS_ALL_MINOR : ERTS_PROC_LOCKS_ALL)); @@ -699,11 +689,9 @@ Eterm erts_internal_trace_3(BIF_ALIST_3) mods = 1; } -#ifdef ERTS_SMP - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); system_blocked = 1; -#endif ok = 1; if (procs || mods) { @@ -766,12 +754,10 @@ Eterm erts_internal_trace_3(BIF_ALIST_3) goto error; } -#ifdef ERTS_SMP if (system_blocked) { - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); } -#endif erts_release_code_write_permission(); ERTS_TRACER_CLEAR(&tracer); @@ -785,12 +771,10 @@ Eterm erts_internal_trace_3(BIF_ALIST_3) ERTS_TRACER_CLEAR(&tracer); -#ifdef ERTS_SMP if (system_blocked) { - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); } -#endif erts_release_code_write_permission(); BIF_ERROR(p, BADARG); @@ -878,7 +862,7 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key) trace_flags = ERTS_TRACE_FLAGS(tracee); if (tracee != p) - erts_smp_proc_unlock(tracee, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(tracee, ERTS_PROC_LOCK_MAIN); } else if (is_external_pid(pid_spec) && external_pid_dist_entry(pid_spec) == erts_this_dist_entry) { return am_undefined; @@ -982,12 +966,12 @@ static int function_is_traced(Process *p, if ((ep = export_get(&e)) != NULL) { pc = ep->beam; if (ep->addressv[erts_active_code_ix()] == pc && - *pc != (BeamInstr) em_call_error_handler) { + ! BeamIsOpCode(*pc, op_call_error_handler)) { int r = 0; - ASSERT(*pc == (BeamInstr) em_apply_bif || - *pc == (BeamInstr) BeamOp(op_i_generic_breakpoint)); + ASSERT(BeamIsOpCode(*pc, op_apply_bif) || + BeamIsOpCode(*pc, op_i_generic_breakpoint)); if (erts_is_trace_break(&ep->info, ms, 0)) { return FUNC_TRACE_GLOBAL_TRACE; @@ -1055,28 +1039,20 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key) mfa[1] = tp[2]; mfa[2] = signed_val(tp[3]); -#ifdef ERTS_SMP if ( (key == am_call_time) || (key == am_all)) { - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); } -#endif -#ifdef ERTS_DIRTY_SCHEDULERS - erts_smp_mtx_lock(&erts_dirty_bp_ix_mtx); -#endif + erts_mtx_lock(&erts_dirty_bp_ix_mtx); r = function_is_traced(p, mfa, &ms, &ms_meta, &meta, &count, &call_time); -#ifdef ERTS_DIRTY_SCHEDULERS - erts_smp_mtx_unlock(&erts_dirty_bp_ix_mtx); -#endif -#ifdef ERTS_SMP + erts_mtx_unlock(&erts_dirty_bp_ix_mtx); if ( (key == am_call_time) || (key == am_all)) { - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); } -#endif switch (r) { case FUNC_TRACE_NOEXIST: @@ -1385,14 +1361,14 @@ erts_set_trace_pattern(Process*p, ErtsCodeMFA *mfa, int specified, if (ep->addressv[code_ix] != pc) { fp[i].mod->curr.num_traced_exports++; #ifdef DEBUG - ep->info.op = (BeamInstr) BeamOp(op_i_func_info_IaaI); + ep->info.op = BeamOpCodeAddr(op_i_func_info_IaaI); #endif - ep->beam[0] = (BeamInstr) BeamOp(op_jump_f); + ep->beam[0] = BeamOpCodeAddr(op_trace_jump_W); ep->beam[1] = (BeamInstr) ep->addressv[code_ix]; } erts_set_call_trace_bif(ci, match_prog_set, 0); if (ep->addressv[code_ix] != pc) { - ep->beam[0] = (BeamInstr) BeamOp(op_i_generic_breakpoint); + ep->beam[0] = BeamOpCodeAddr(op_i_generic_breakpoint); } } else if (!on && flags.breakpoint) { /* Turn off breakpoint tracing -- nothing to do here. */ @@ -1402,8 +1378,8 @@ erts_set_trace_pattern(Process*p, ErtsCodeMFA *mfa, int specified, * before turning on breakpoint tracing. */ erts_clear_call_trace_bif(ci, 0); - if (ep->beam[0] == (BeamInstr) BeamOp(op_i_generic_breakpoint)) { - ep->beam[0] = (BeamInstr) BeamOp(op_jump_f); + if (BeamIsOpCode(ep->beam[0], op_i_generic_breakpoint)) { + ep->beam[0] = BeamOpCodeAddr(op_trace_jump_W); } } } @@ -1526,17 +1502,13 @@ erts_set_trace_pattern(Process*p, ErtsCodeMFA *mfa, int specified, finish_bp.install = on; finish_bp.local = flags.breakpoint; -#ifdef ERTS_SMP if (is_blocking) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); -#endif + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); while (erts_finish_breakpointing()) { /* Empty loop body */ } -#ifdef ERTS_SMP finish_bp.current = -1; } -#endif if (flags.breakpoint) { matches += finish_bp.f.matched; @@ -1571,11 +1543,6 @@ erts_set_tracing_event_pattern(Eterm event, Binary* match_spec, int on) finish_bp.f.matched = 0; finish_bp.f.matching = NULL; -#ifndef ERTS_SMP - while (erts_finish_breakpointing()) { - /* Empty loop body */ - } -#endif return 1; } @@ -1594,7 +1561,7 @@ consolidate_event_tracing(ErtsTracingEvent te[]) int erts_finish_breakpointing(void) { - ERTS_SMP_LC_ASSERT(erts_has_code_write_permission()); + ERTS_LC_ASSERT(erts_has_code_write_permission()); /* * Memory barriers will be issued for all schedulers *before* @@ -1704,7 +1671,7 @@ uninstall_exp_breakpoints(BpFunctions* f) if (ep->addressv[code_ix] != ep->beam) { continue; } - ASSERT(ep->beam[0] == (BeamInstr) BeamOp(op_jump_f)); + ASSERT(BeamIsOpCode(ep->beam[0], op_trace_jump_W)); ep->addressv[code_ix] = (BeamInstr *) ep->beam[1]; } } @@ -1723,7 +1690,7 @@ clean_export_entries(BpFunctions* f) if (ep->addressv[code_ix] == ep->beam) { continue; } - if (ep->beam[0] == (BeamInstr) BeamOp(op_jump_f)) { + if (BeamIsOpCode(ep->beam[0], op_trace_jump_W)) { ep->beam[0] = (BeamInstr) 0; ep->beam[1] = (BeamInstr) 0; } @@ -2015,24 +1982,20 @@ BIF_RETTYPE seq_trace_print_2(BIF_ALIST_2) } void erts_system_monitor_clear(Process *c_p) { -#ifdef ERTS_SMP if (c_p) { - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); } -#endif erts_set_system_monitor(NIL); erts_system_monitor_long_gc = 0; erts_system_monitor_long_schedule = 0; erts_system_monitor_large_heap = 0; erts_system_monitor_flags.busy_port = 0; erts_system_monitor_flags.busy_dist_port = 0; -#ifdef ERTS_SMP if (c_p) { - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } -#endif } @@ -2142,8 +2105,8 @@ system_monitor(Process *p, Eterm monitor_pid, Eterm list) int busy_port, busy_dist_port; system_blocked = 1; - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); if (!erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, monitor_pid, 0)) goto error; @@ -2182,16 +2145,16 @@ system_monitor(Process *p, Eterm monitor_pid, Eterm list) erts_system_monitor_flags.busy_port = !!busy_port; erts_system_monitor_flags.busy_dist_port = !!busy_dist_port; - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); BIF_RET(prev); } error: if (system_blocked) { - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); } BIF_ERROR(p, BADARG); @@ -2200,23 +2163,19 @@ system_monitor(Process *p, Eterm monitor_pid, Eterm list) /* Begin: Trace for System Profiling */ void erts_system_profile_clear(Process *c_p) { -#ifdef ERTS_SMP if (c_p) { - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); } -#endif erts_set_system_profile(NIL); erts_system_profile_flags.scheduler = 0; erts_system_profile_flags.runnable_procs = 0; erts_system_profile_flags.runnable_ports = 0; erts_system_profile_flags.exclusive = 0; -#ifdef ERTS_SMP if (c_p) { - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } -#endif } static Eterm system_profile_get(Process *p) { @@ -2278,8 +2237,8 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2) int scheduler, runnable_procs, runnable_ports, exclusive; system_blocked = 1; - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); /* Check if valid process, no locks are taken */ @@ -2330,8 +2289,8 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2) erts_system_profile_flags.runnable_procs = !!runnable_procs; erts_system_profile_flags.exclusive = !!exclusive; erts_system_profile_ts_type = ts; - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); BIF_RET(prev); @@ -2339,8 +2298,8 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2) error: if (system_blocked) { - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); } BIF_ERROR(p, BADARG); @@ -2365,7 +2324,7 @@ typedef struct { Eterm ref; Eterm ref_heap[ERTS_REF_THING_SIZE]; Eterm target; - erts_smp_atomic32_t refc; + erts_atomic32_t refc; } ErtsTraceDeliveredAll; static void @@ -2373,31 +2332,20 @@ reply_trace_delivered_all(void *vtdarp) { ErtsTraceDeliveredAll *tdarp = (ErtsTraceDeliveredAll *) vtdarp; - if (erts_smp_atomic32_dec_read_nob(&tdarp->refc) == 0) { + if (erts_atomic32_dec_read_nob(&tdarp->refc) == 0) { Eterm ref_copy, msg; Process *rp = tdarp->proc; Eterm *hp = NULL; ErlOffHeap *ohp; -#ifdef ERTS_SMP ErlHeapFragment *bp; bp = new_message_buffer(4 + NC_HEAP_SIZE(tdarp->ref)); hp = &bp->mem[0]; ohp = &bp->off_heap; -#else - ErtsProcLocks rp_locks = 0; - ErtsMessage *mp; - mp = erts_alloc_message_heap( - rp, &rp_locks, 4 + NC_HEAP_SIZE(tdarp->ref), &hp, &ohp); -#endif ref_copy = STORE_NC(&hp, ohp, tdarp->ref); msg = TUPLE3(hp, am_trace_delivered, tdarp->target, ref_copy); -#ifdef ERTS_SMP erts_send_sys_msg_proc(rp->common.id, rp->common.id, msg, bp); -#else - erts_queue_message(rp, rp_locks, mp, msg, am_system); -#endif erts_free(ERTS_ALC_T_MISC_AUX_WORK, vtdarp); erts_proc_dec_refc(rp); @@ -2418,7 +2366,7 @@ trace_delivered_1(BIF_ALIST_1) hp = &tdarp->ref_heap[0]; tdarp->ref = STORE_NC(&hp, NULL, ref); tdarp->target = BIF_ARG_1; - erts_smp_atomic32_init_nob(&tdarp->refc, + erts_atomic32_init_nob(&tdarp->refc, (erts_aint32_t) erts_no_schedulers); erts_proc_add_refc(BIF_P, 1); erts_schedule_multi_misc_aux_work(0, diff --git a/erts/emulator/beam/erl_bif_unique.c b/erts/emulator/beam/erl_bif_unique.c index 2f8adc87d5..19d46537f9 100644 --- a/erts/emulator/beam/erl_bif_unique.c +++ b/erts/emulator/beam/erl_bif_unique.c @@ -77,11 +77,9 @@ init_reference(void) ref_init_value += (Uint64) tv.tv_usec; #ifdef DEBUG max_thr_id = (Uint32) erts_no_schedulers; -#ifdef ERTS_DIRTY_SCHEDULERS max_thr_id += (Uint32) erts_no_dirty_cpu_schedulers; max_thr_id += (Uint32) erts_no_dirty_io_schedulers; #endif -#endif erts_atomic64_init_nob(&global_reference.count, (erts_aint64_t) ref_init_value); init_magic_ref_tables(); @@ -136,7 +134,7 @@ Eterm erts_make_ref(Process *c_p) Eterm* hp; Uint32 ref[ERTS_REF_NUMBERS]; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p)); hp = HAlloc(c_p, ERTS_REF_THING_SIZE); @@ -439,10 +437,8 @@ init_unique_integer(void) { int bits; unique_data.r.o.val0_max = (Uint64) erts_no_schedulers; -#ifdef ERTS_DIRTY_SCHEDULERS unique_data.r.o.val0_max += (Uint64) erts_no_dirty_cpu_schedulers; unique_data.r.o.val0_max += (Uint64) erts_no_dirty_io_schedulers; -#endif bits = erts_fit_in_bits_int64(unique_data.r.o.val0_max); unique_data.r.o.left_shift = bits; unique_data.r.o.right_shift = 64 - bits; @@ -803,7 +799,7 @@ BIF_RETTYPE make_ref_0(BIF_ALIST_0) BIF_RETTYPE res; Eterm* hp; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P)); hp = HAlloc(BIF_P, ERTS_REF_THING_SIZE); diff --git a/erts/emulator/beam/erl_binary.h b/erts/emulator/beam/erl_binary.h index b036b28dbf..05007e864e 100644 --- a/erts/emulator/beam/erl_binary.h +++ b/erts/emulator/beam/erl_binary.h @@ -291,7 +291,7 @@ typedef union { * atomics are used they might * differ in size. */ - erts_smp_atomic_t smp_atomic_word; + erts_atomic_t smp_atomic_word; erts_atomic_t atomic_word; } ErtsMagicIndirectionWord; @@ -326,7 +326,7 @@ ERTS_GLB_INLINE Binary *erts_create_magic_binary_x(Uint size, ERTS_GLB_INLINE Binary *erts_create_magic_binary(Uint size, int (*destructor)(Binary *)); ERTS_GLB_INLINE Binary *erts_create_magic_indirection(int (*destructor)(Binary *)); -ERTS_GLB_INLINE erts_smp_atomic_t *erts_smp_binary_to_magic_indirection(Binary *bp); +ERTS_GLB_INLINE erts_atomic_t *erts_binary_to_magic_indirection(Binary *bp); ERTS_GLB_INLINE erts_atomic_t *erts_binary_to_magic_indirection(Binary *bp); #if ERTS_GLB_INLINE_INCL_FUNC_DEF @@ -519,16 +519,6 @@ erts_create_magic_indirection(int (*destructor)(Binary *)) but word aligned */ } -ERTS_GLB_INLINE erts_smp_atomic_t * -erts_smp_binary_to_magic_indirection(Binary *bp) -{ - ErtsMagicIndirectionWord *mip; - ASSERT(bp->intern.flags & BIN_FLAG_MAGIC); - ASSERT(ERTS_MAGIC_BIN_ATYPE(bp) == ERTS_ALC_T_MINDIRECTION); - mip = ERTS_MAGIC_BIN_UNALIGNED_DATA(bp); - return &mip->smp_atomic_word; -} - ERTS_GLB_INLINE erts_atomic_t * erts_binary_to_magic_indirection(Binary *bp) { @@ -536,7 +526,7 @@ erts_binary_to_magic_indirection(Binary *bp) ASSERT(bp->intern.flags & BIN_FLAG_MAGIC); ASSERT(ERTS_MAGIC_BIN_ATYPE(bp) == ERTS_ALC_T_MINDIRECTION); mip = ERTS_MAGIC_BIN_UNALIGNED_DATA(bp); - return &mip->atomic_word; + return &mip->smp_atomic_word; } #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ diff --git a/erts/emulator/beam/erl_bits.c b/erts/emulator/beam/erl_bits.c index 51d23a8965..3a16913473 100644 --- a/erts/emulator/beam/erl_bits.c +++ b/erts/emulator/beam/erl_bits.c @@ -55,30 +55,19 @@ static byte get_bit(byte b, size_t a_offs); -#if defined(ERTS_SMP) /* the state resides in the current process' scheduler data */ -#elif defined(ERL_BITS_REENTRANT) -/* reentrant API but with a hidden single global state, for testing only */ -struct erl_bits_state ErlBitsState_; -#else -/* non-reentrant API with a single global state */ -struct erl_bits_state ErlBitsState; -#endif #define byte_buf (ErlBitsState.byte_buf_) #define byte_buf_len (ErlBitsState.byte_buf_len_) -static erts_smp_atomic_t bits_bufs_size; +static erts_atomic_t bits_bufs_size; Uint erts_bits_bufs_size(void) { - return (Uint) erts_smp_atomic_read_nob(&bits_bufs_size); + return (Uint) erts_atomic_read_nob(&bits_bufs_size); } -#if !defined(ERTS_SMP) -static -#endif void erts_bits_init_state(ERL_BITS_PROTO_0) { @@ -88,13 +77,11 @@ erts_bits_init_state(ERL_BITS_PROTO_0) erts_bin_offset = 0; } -#if defined(ERTS_SMP) void erts_bits_destroy_state(ERL_BITS_PROTO_0) { erts_free(ERTS_ALC_T_BITS_BUF, byte_buf); } -#endif void erts_init_bits(void) @@ -104,13 +91,8 @@ erts_init_bits(void) ERTS_CT_ASSERT(offsetof(ErtsBinary,driver.binary.orig_bytes) == offsetof(Binary,orig_bytes)); - erts_smp_atomic_init_nob(&bits_bufs_size, 0); -#if defined(ERTS_SMP) + erts_atomic_init_nob(&bits_bufs_size, 0); /* erl_process.c calls erts_bits_init_state() on all state instances */ -#else - ERL_BITS_DECLARE_STATEP; - erts_bits_init_state(ERL_BITS_ARGS_0); -#endif } /***************************************************************** @@ -744,7 +726,7 @@ static void ERTS_INLINE need_byte_buf(ERL_BITS_PROTO_1(int need)) { if (byte_buf_len < need) { - erts_smp_atomic_add_nob(&bits_bufs_size, need - byte_buf_len); + erts_atomic_add_nob(&bits_bufs_size, need - byte_buf_len); byte_buf_len = need; byte_buf = erts_realloc(ERTS_ALC_T_BITS_BUF, byte_buf, byte_buf_len); } diff --git a/erts/emulator/beam/erl_bits.h b/erts/emulator/beam/erl_bits.h index 5da2b28a89..b9d141d585 100644 --- a/erts/emulator/beam/erl_bits.h +++ b/erts/emulator/beam/erl_bits.h @@ -84,31 +84,14 @@ typedef struct erl_bin_match_struct{ #define ms_matchbuffer(_Ms) &(((ErlBinMatchState*) boxed_val(_Ms))->mb) -#if defined(ERTS_SMP) -#define ERL_BITS_REENTRANT -#else -/* uncomment to test the reentrant API in the non-SMP runtime system */ -/* #define ERL_BITS_REENTRANT */ -#endif - -#ifdef ERL_BITS_REENTRANT - /* * Reentrant API with the state passed as a parameter. * (Except when the current Process* already is a parameter.) */ -#ifdef ERTS_SMP /* the state resides in the current process' scheduler data */ #define ERL_BITS_DECLARE_STATEP struct erl_bits_state *EBS #define ERL_BITS_RELOAD_STATEP(P) do{EBS = &erts_proc_sched_data((P))->erl_bits_state;}while(0) #define ERL_BITS_DEFINE_STATEP(P) struct erl_bits_state *EBS = &erts_proc_sched_data((P))->erl_bits_state -#else -/* reentrant API but with a hidden single global state, for testing only */ -extern struct erl_bits_state ErlBitsState_; -#define ERL_BITS_DECLARE_STATEP struct erl_bits_state *EBS = &ErlBitsState_ -#define ERL_BITS_RELOAD_STATEP(P) do{}while(0) -#define ERL_BITS_DEFINE_STATEP(P) ERL_BITS_DECLARE_STATEP -#endif #define ErlBitsState (*EBS) #define ERL_BITS_PROTO_0 struct erl_bits_state *EBS @@ -120,26 +103,6 @@ extern struct erl_bits_state ErlBitsState_; #define ERL_BITS_ARGS_2(ARG1,ARG2) EBS, ARG1, ARG2 #define ERL_BITS_ARGS_3(ARG1,ARG2,ARG3) EBS, ARG1, ARG2, ARG3 -#else /* ERL_BITS_REENTRANT */ - -/* - * Non-reentrant API with a single global state. - */ -extern struct erl_bits_state ErlBitsState; -#define ERL_BITS_DECLARE_STATEP /*empty*/ -#define ERL_BITS_RELOAD_STATEP(P) do{}while(0) -#define ERL_BITS_DEFINE_STATEP(P) /*empty*/ - -#define ERL_BITS_PROTO_0 void -#define ERL_BITS_PROTO_1(PARM1) PARM1 -#define ERL_BITS_PROTO_2(PARM1,PARM2) PARM1, PARM2 -#define ERL_BITS_PROTO_3(PARM1,PARM2,PARM3) PARM1, PARM2, PARM3 -#define ERL_BITS_ARGS_0 /*empty*/ -#define ERL_BITS_ARGS_1(ARG1) ARG1 -#define ERL_BITS_ARGS_2(ARG1,ARG2) ARG1, ARG2 -#define ERL_BITS_ARGS_3(ARG1,ARG2,ARG3) ARG1, ARG2, ARG3 - -#endif /* ERL_BITS_REENTRANT */ #define erts_bin_offset (ErlBitsState.erts_bin_offset_) #define erts_current_bin (ErlBitsState.erts_current_bin_) @@ -158,10 +121,8 @@ extern struct erl_bits_state ErlBitsState; } while (0) void erts_init_bits(void); /* Initialization once. */ -#ifdef ERTS_SMP void erts_bits_init_state(ERL_BITS_PROTO_0); void erts_bits_destroy_state(ERL_BITS_PROTO_0); -#endif /* diff --git a/erts/emulator/beam/erl_cpu_topology.c b/erts/emulator/beam/erl_cpu_topology.c index f8b2fa744f..49f9beb19f 100644 --- a/erts/emulator/beam/erl_cpu_topology.c +++ b/erts/emulator/beam/erl_cpu_topology.c @@ -60,7 +60,7 @@ static int max_main_threads; static int reader_groups; static ErtsCpuBindData *scheduler2cpu_map; -static erts_smp_rwmtx_t cpuinfo_rwmtx; +static erts_rwmtx_t cpuinfo_rwmtx; typedef enum { ERTS_CPU_BIND_UNDEFINED, @@ -131,13 +131,11 @@ static erts_cpu_groups_map_t *reader_groups_map; #define ERTS_MAX_CPU_TOPOLOGY_ID ((int) 0xffff) -#ifdef ERTS_SMP static void cpu_bind_order_sort(erts_cpu_topology_t *cpudata, int size, ErtsCpuBindOrder bind_order, int mk_seq); static void write_schedulers_bind_change(erts_cpu_topology_t *cpudata, int size); -#endif static void reader_groups_callback(int, ErtsSchedulerData *, int, void *); static erts_cpu_groups_map_t *add_cpu_groups(int groups, @@ -434,7 +432,6 @@ processor_order_cmp(const void *vx, const void *vy) return 0; } -#ifdef ERTS_SMP void erts_sched_check_cpu_bind_prep_suspend(ErtsSchedulerData *esdp) { @@ -444,7 +441,7 @@ erts_sched_check_cpu_bind_prep_suspend(ErtsSchedulerData *esdp) int cgcc_ix; /* Unbind from cpu */ - erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx); + erts_rwmtx_rwlock(&cpuinfo_rwmtx); if (scheduler2cpu_map[esdp->no].bound_id >= 0 && erts_unbind_from_cpu(cpuinfo) == 0) { esdp->cpu_id = scheduler2cpu_map[esdp->no].bound_id = -1; @@ -463,7 +460,7 @@ erts_sched_check_cpu_bind_prep_suspend(ErtsSchedulerData *esdp) } } ASSERT(no_cpu_groups_callbacks == cgcc_ix); - erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx); + erts_rwmtx_rwunlock(&cpuinfo_rwmtx); for (cgcc_ix = 0; cgcc_ix < no_cpu_groups_callbacks; cgcc_ix++) cgcc[cgcc_ix].callback(1, @@ -481,7 +478,7 @@ erts_sched_check_cpu_bind_prep_suspend(ErtsSchedulerData *esdp) void erts_sched_check_cpu_bind_post_suspend(ErtsSchedulerData *esdp) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(esdp->run_queue)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(esdp->run_queue)); if (esdp->no <= max_main_threads) erts_thr_set_main_status(1, (int) esdp->no); @@ -490,7 +487,6 @@ erts_sched_check_cpu_bind_post_suspend(ErtsSchedulerData *esdp) (void) ERTS_RUNQ_FLGS_SET(esdp->run_queue, ERTS_RUNQ_FLG_CHK_CPU_BIND); } -#endif void erts_sched_check_cpu_bind(ErtsSchedulerData *esdp) @@ -499,8 +495,8 @@ erts_sched_check_cpu_bind(ErtsSchedulerData *esdp) erts_cpu_groups_map_t *cgm; erts_cpu_groups_callback_list_t *cgcl; erts_cpu_groups_callback_call_t *cgcc; - erts_smp_runq_unlock(esdp->run_queue); - erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx); + erts_runq_unlock(esdp->run_queue); + erts_rwmtx_rwlock(&cpuinfo_rwmtx); cpu_id = scheduler2cpu_map[esdp->no].bind_id; if (cpu_id >= 0 && cpu_id != scheduler2cpu_map[esdp->no].bound_id) { res = erts_bind_to_cpu(cpuinfo, cpu_id); @@ -543,7 +539,7 @@ erts_sched_check_cpu_bind(ErtsSchedulerData *esdp) } ASSERT(no_cpu_groups_callbacks == cgcc_ix); - erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx); + erts_rwmtx_rwunlock(&cpuinfo_rwmtx); for (cgcc_ix = 0; cgcc_ix < no_cpu_groups_callbacks; cgcc_ix++) cgcc[cgcc_ix].callback(0, @@ -553,10 +549,9 @@ erts_sched_check_cpu_bind(ErtsSchedulerData *esdp) erts_free(ERTS_ALC_T_TMP, cgcc); - erts_smp_runq_lock(esdp->run_queue); + erts_runq_lock(esdp->run_queue); } -#ifdef ERTS_SMP void erts_sched_init_check_cpu_bind(ErtsSchedulerData *esdp) { @@ -565,7 +560,7 @@ erts_sched_init_check_cpu_bind(ErtsSchedulerData *esdp) erts_cpu_groups_callback_list_t *cgcl; erts_cpu_groups_callback_call_t *cgcc; - erts_smp_rwmtx_rlock(&cpuinfo_rwmtx); + erts_rwmtx_rlock(&cpuinfo_rwmtx); cgcc = erts_alloc(ERTS_ALC_T_TMP, (no_cpu_groups_callbacks @@ -581,7 +576,7 @@ erts_sched_init_check_cpu_bind(ErtsSchedulerData *esdp) } ASSERT(no_cpu_groups_callbacks == cgcc_ix); - erts_smp_rwmtx_runlock(&cpuinfo_rwmtx); + erts_rwmtx_runlock(&cpuinfo_rwmtx); for (cgcc_ix = 0; cgcc_ix < no_cpu_groups_callbacks; cgcc_ix++) cgcc[cgcc_ix].callback(0, @@ -594,7 +589,6 @@ erts_sched_init_check_cpu_bind(ErtsSchedulerData *esdp) if (esdp->no <= max_main_threads) erts_thr_set_main_status(1, (int) esdp->no); } -#endif static void write_schedulers_bind_change(erts_cpu_topology_t *cpudata, int size) @@ -602,7 +596,7 @@ write_schedulers_bind_change(erts_cpu_topology_t *cpudata, int size) int s_ix = 1; int cpu_ix; - ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx)); + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx)); if (cpu_bind_order != ERTS_CPU_BIND_NONE && size) { @@ -702,9 +696,9 @@ Eterm erts_bound_schedulers_term(Process *c_p) { ErtsCpuBindOrder order; - erts_smp_rwmtx_rlock(&cpuinfo_rwmtx); + erts_rwmtx_rlock(&cpuinfo_rwmtx); order = cpu_bind_order; - erts_smp_rwmtx_runlock(&cpuinfo_rwmtx); + erts_rwmtx_runlock(&cpuinfo_rwmtx); return bound_schedulers_term(order); } @@ -717,7 +711,7 @@ erts_bind_schedulers(Process *c_p, Eterm how) int cpudata_size; ErtsCpuBindOrder old_cpu_bind_order; - erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx); + erts_rwmtx_rwlock(&cpuinfo_rwmtx); if (erts_bind_to_cpu(cpuinfo, -1) == -ENOTSUP) { if (cpu_bind_order == ERTS_CPU_BIND_NONE @@ -773,7 +767,7 @@ erts_bind_schedulers(Process *c_p, Eterm how) done: - erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx); + erts_rwmtx_rwunlock(&cpuinfo_rwmtx); if (notify) erts_sched_notify_check_cpu_bind(); @@ -793,9 +787,9 @@ erts_sched_bind_atthrcreate_child(int unbind) { int res = 0; if (unbind) { - erts_smp_rwmtx_rlock(&cpuinfo_rwmtx); + erts_rwmtx_rlock(&cpuinfo_rwmtx); res = erts_unbind_from_cpu(cpuinfo); - erts_smp_rwmtx_runlock(&cpuinfo_rwmtx); + erts_rwmtx_runlock(&cpuinfo_rwmtx); } return res; } @@ -812,7 +806,7 @@ erts_sched_bind_atfork_prepare(void) ErtsSchedulerData *esdp = erts_get_scheduler_data(); int unbind = esdp != NULL && erts_is_scheduler_bound(esdp); if (unbind) - erts_smp_rwmtx_rlock(&cpuinfo_rwmtx); + erts_rwmtx_rlock(&cpuinfo_rwmtx); return unbind; } @@ -820,7 +814,7 @@ int erts_sched_bind_atfork_child(int unbind) { if (unbind) { - ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&cpuinfo_rwmtx) + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&cpuinfo_rwmtx) || erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx)); return erts_unbind_from_cpu(cpuinfo); } @@ -831,7 +825,7 @@ void erts_sched_bind_atfork_parent(int unbind) { if (unbind) - erts_smp_rwmtx_runlock(&cpuinfo_rwmtx); + erts_rwmtx_runlock(&cpuinfo_rwmtx); } Eterm @@ -865,9 +859,9 @@ erts_fake_scheduler_bindings(Process *p, Eterm how) return res; } - erts_smp_rwmtx_rlock(&cpuinfo_rwmtx); + erts_rwmtx_rlock(&cpuinfo_rwmtx); create_tmp_cpu_topology_copy(&cpudata, &cpudata_size); - erts_smp_rwmtx_runlock(&cpuinfo_rwmtx); + erts_rwmtx_runlock(&cpuinfo_rwmtx); if (!cpudata || fake_cpu_bind_order == ERTS_CPU_BIND_NONE) ERTS_BIF_PREP_RET(res, am_false); @@ -930,12 +924,12 @@ erts_get_schedulers_binds(Process *c_p) Eterm res = make_tuple(hp); *(hp++) = make_arityval(erts_no_schedulers); - erts_smp_rwmtx_rlock(&cpuinfo_rwmtx); + erts_rwmtx_rlock(&cpuinfo_rwmtx); for (ix = 1; ix <= erts_no_schedulers; ix++) *(hp++) = (scheduler2cpu_map[ix].bound_id >= 0 ? make_small(scheduler2cpu_map[ix].bound_id) : AM_unbound); - erts_smp_rwmtx_runlock(&cpuinfo_rwmtx); + erts_rwmtx_runlock(&cpuinfo_rwmtx); return res; } @@ -1346,7 +1340,7 @@ erts_set_cpu_topology(Process *c_p, Eterm term) int cpudata_size = 0; Eterm res; - erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx); + erts_rwmtx_rwlock(&cpuinfo_rwmtx); res = get_cpu_topology_term(c_p, ERTS_GET_USED_CPU_TOPOLOGY); if (term == am_undefined) { if (user_cpudata) @@ -1367,7 +1361,7 @@ erts_set_cpu_topology(Process *c_p, Eterm term) } else if (is_not_list(term)) { error: - erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx); + erts_rwmtx_rwunlock(&cpuinfo_rwmtx); res = THE_NON_VALUE; goto done; } @@ -1461,7 +1455,7 @@ erts_set_cpu_topology(Process *c_p, Eterm term) write_schedulers_bind_change(cpudata, cpudata_size); - erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx); + erts_rwmtx_rwunlock(&cpuinfo_rwmtx); erts_sched_notify_check_cpu_bind(); done: @@ -1615,7 +1609,7 @@ erts_get_cpu_topology_term(Process *c_p, Eterm which) { Eterm res; int type; - erts_smp_rwmtx_rlock(&cpuinfo_rwmtx); + erts_rwmtx_rlock(&cpuinfo_rwmtx); if (ERTS_IS_ATOM_STR("used", which)) type = ERTS_GET_USED_CPU_TOPOLOGY; else if (ERTS_IS_ATOM_STR("detected", which)) @@ -1628,7 +1622,7 @@ erts_get_cpu_topology_term(Process *c_p, Eterm which) res = THE_NON_VALUE; else res = get_cpu_topology_term(c_p, type); - erts_smp_rwmtx_runlock(&cpuinfo_rwmtx); + erts_rwmtx_runlock(&cpuinfo_rwmtx); return res; } @@ -1646,9 +1640,9 @@ get_logical_processors(int *conf, int *onln, int *avail) void erts_get_logical_processors(int *conf, int *onln, int *avail) { - erts_smp_rwmtx_rlock(&cpuinfo_rwmtx); + erts_rwmtx_rlock(&cpuinfo_rwmtx); get_logical_processors(conf, onln, avail); - erts_smp_rwmtx_runlock(&cpuinfo_rwmtx); + erts_rwmtx_runlock(&cpuinfo_rwmtx); } void @@ -1706,9 +1700,9 @@ erts_init_cpu_topology(void) { int ix; - erts_smp_rwmtx_init(&cpuinfo_rwmtx, "cpu_info", NIL, + erts_rwmtx_init(&cpuinfo_rwmtx, "cpu_info", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); - erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx); + erts_rwmtx_rwlock(&cpuinfo_rwmtx); scheduler2cpu_map = erts_alloc(ERTS_ALC_T_CPUDATA, (sizeof(ErtsCpuBindData) @@ -1726,13 +1720,13 @@ erts_init_cpu_topology(void) NULL); if (cpu_bind_order == ERTS_CPU_BIND_NONE) - erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx); + erts_rwmtx_rwunlock(&cpuinfo_rwmtx); else { erts_cpu_topology_t *cpudata; int cpudata_size; create_tmp_cpu_topology_copy(&cpudata, &cpudata_size); write_schedulers_bind_change(cpudata, cpudata_size); - erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx); + erts_rwmtx_rwunlock(&cpuinfo_rwmtx); erts_sched_notify_check_cpu_bind(); destroy_tmp_cpu_topology_copy(cpudata); } @@ -1742,7 +1736,7 @@ int erts_update_cpu_info(void) { int changed; - erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx); + erts_rwmtx_rwlock(&cpuinfo_rwmtx); changed = erts_cpu_info_update(cpuinfo); if (changed) { erts_cpu_topology_t *cpudata; @@ -1775,7 +1769,7 @@ erts_update_cpu_info(void) write_schedulers_bind_change(cpudata, cpudata_size); destroy_tmp_cpu_topology_copy(cpudata); } - erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx); + erts_rwmtx_rwunlock(&cpuinfo_rwmtx); if (changed) erts_sched_notify_check_cpu_bind(); return changed; @@ -1792,7 +1786,7 @@ reader_groups_callback(int suspending, void *unused) { if (reader_groups && esdp->no <= max_main_threads) - erts_smp_rwmtx_set_reader_group(suspending ? 0 : group+1); + erts_rwmtx_set_reader_group(suspending ? 0 : group+1); } static Eterm get_cpu_groups_map(Process *c_p, @@ -1821,9 +1815,9 @@ Eterm erts_get_reader_groups_map(Process *c_p) { Eterm res; - erts_smp_rwmtx_rlock(&cpuinfo_rwmtx); + erts_rwmtx_rlock(&cpuinfo_rwmtx); res = get_cpu_groups_map(c_p, reader_groups_map, 1); - erts_smp_rwmtx_runlock(&cpuinfo_rwmtx); + erts_rwmtx_runlock(&cpuinfo_rwmtx); return res; } @@ -2203,7 +2197,7 @@ add_cpu_groups(int groups, erts_cpu_groups_callback_list_t *cgcl; erts_cpu_groups_map_t *cgm; - ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx)); + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx)); if (use_groups > max_main_threads) use_groups = max_main_threads; @@ -2250,7 +2244,7 @@ cpu_groups_lookup(erts_cpu_groups_map_t *map, { int start, logical, ix; - ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&cpuinfo_rwmtx) + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&cpuinfo_rwmtx) || erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx)); if (esdp->cpu_id < 0) @@ -2278,7 +2272,7 @@ static void update_cpu_groups_maps(void) { erts_cpu_groups_map_t *cgm; - ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx)); + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx)); for (cgm = cpu_groups_maps; cgm; cgm = cgm->next) make_cpu_groups_map(cgm, 0); diff --git a/erts/emulator/beam/erl_cpu_topology.h b/erts/emulator/beam/erl_cpu_topology.h index c922214702..88bcad79ab 100644 --- a/erts/emulator/beam/erl_cpu_topology.h +++ b/erts/emulator/beam/erl_cpu_topology.h @@ -60,11 +60,9 @@ int erts_init_scheduler_bind_type_string(char *how); int erts_init_cpu_topology_string(char *topology_str); void erts_sched_check_cpu_bind(ErtsSchedulerData *esdp); -#ifdef ERTS_SMP void erts_sched_init_check_cpu_bind(ErtsSchedulerData *esdp); void erts_sched_check_cpu_bind_prep_suspend(ErtsSchedulerData *esdp); void erts_sched_check_cpu_bind_post_suspend(ErtsSchedulerData *esdp); -#endif int erts_update_cpu_info(void); diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c index 6d4a895ef6..3ba0886464 100644 --- a/erts/emulator/beam/erl_db.c +++ b/erts/emulator/beam/erl_db.c @@ -44,7 +44,7 @@ #include "erl_binary.h" -erts_smp_atomic_t erts_ets_misc_mem_size; +erts_atomic_t erts_ets_misc_mem_size; /* ** Utility macros @@ -61,15 +61,9 @@ enum DbIterSafety { ITER_SAFE_LOCKED, /* Safe while table is locked, not between trap calls */ ITER_SAFE /* No need to fixate at all */ }; -#ifdef ERTS_SMP # define ITERATION_SAFETY(Proc,Tab) \ ((IS_TREE_TABLE((Tab)->common.status) || ONLY_WRITER(Proc,Tab)) ? ITER_SAFE \ : (((Tab)->common.status & DB_FINE_LOCKED) ? ITER_UNSAFE : ITER_SAFE_LOCKED)) -#else -# define ITERATION_SAFETY(Proc,Tab) \ - ((IS_TREE_TABLE((Tab)->common.status) || ONLY_WRITER(Proc,Tab)) \ - ? ITER_SAFE : ITER_SAFE_LOCKED) -#endif #define DID_TRAP(P,Ret) (!is_value(Ret) && ((P)->freason == TRAP)) @@ -195,7 +189,7 @@ static void delete_sched_table(Process *c_p, DbTable *tb); static void table_dec_refc(DbTable *tb, erts_aint_t min_val) { - if (erts_smp_refc_dectest(&tb->common.refc, min_val) == 0) + if (erts_refc_dectest(&tb->common.refc, min_val) == 0) schedule_free_dbtable(tb); } @@ -209,21 +203,21 @@ static ERTS_INLINE void make_btid(DbTable *tb) { Binary *btid = erts_create_magic_indirection(db_table_tid_destructor); - erts_smp_atomic_t *tbref = erts_smp_binary_to_magic_indirection(btid); - erts_smp_atomic_init_nob(tbref, (erts_aint_t) tb); + erts_atomic_t *tbref = erts_binary_to_magic_indirection(btid); + erts_atomic_init_nob(tbref, (erts_aint_t) tb); tb->common.btid = btid; /* * Table and magic indirection refer eachother, * and table is refered once by being alive... */ - erts_smp_refc_init(&tb->common.refc, 2); + erts_refc_init(&tb->common.refc, 2); erts_refc_inc(&btid->intern.refc, 1); } static ERTS_INLINE DbTable* btid2tab(Binary* btid) { - erts_smp_atomic_t *tbref = erts_smp_binary_to_magic_indirection(btid); - return (DbTable *) erts_smp_atomic_read_nob(tbref); + erts_atomic_t *tbref = erts_binary_to_magic_indirection(btid); + return (DbTable *) erts_atomic_read_nob(tbref); } static DbTable * @@ -231,7 +225,7 @@ tid2tab(Eterm tid) { DbTable *tb; Binary *btid; - erts_smp_atomic_t *tbref; + erts_atomic_t *tbref; if (!is_internal_magic_ref(tid)) return NULL; @@ -239,8 +233,8 @@ tid2tab(Eterm tid) if (ERTS_MAGIC_BIN_DESTRUCTOR(btid) != db_table_tid_destructor) return NULL; - tbref = erts_smp_binary_to_magic_indirection(btid); - tb = (DbTable *) erts_smp_atomic_read_nob(tbref); + tbref = erts_binary_to_magic_indirection(btid); + tb = (DbTable *) erts_atomic_read_nob(tbref); ASSERT(!tb || tb->common.btid == btid); @@ -250,11 +244,11 @@ tid2tab(Eterm tid) static ERTS_INLINE int is_table_alive(DbTable *tb) { - erts_smp_atomic_t *tbref; + erts_atomic_t *tbref; DbTable *rtb; - tbref = erts_smp_binary_to_magic_indirection(tb->common.btid); - rtb = (DbTable *) erts_smp_atomic_read_nob(tbref); + tbref = erts_binary_to_magic_indirection(tb->common.btid); + rtb = (DbTable *) erts_atomic_read_nob(tbref); ASSERT(!rtb || rtb == tb); @@ -264,11 +258,7 @@ is_table_alive(DbTable *tb) static ERTS_INLINE int is_table_named(DbTable *tb) { -#ifdef ERTS_SMP return tb->common.type & DB_NAMED_TABLE; -#else - return tb->common.status & DB_NAMED_TABLE; -#endif } @@ -277,8 +267,8 @@ tid_clear(Process *c_p, DbTable *tb) { DbTable *rtb; Binary *btid = tb->common.btid; - erts_smp_atomic_t *tbref = erts_smp_binary_to_magic_indirection(btid); - rtb = (DbTable *) erts_smp_atomic_xchg_nob(tbref, (erts_aint_t) NULL); + erts_atomic_t *tbref = erts_binary_to_magic_indirection(btid); + rtb = (DbTable *) erts_atomic_xchg_nob(tbref, (erts_aint_t) NULL); ASSERT(!rtb || tb == rtb); if (rtb) { table_dec_refc(tb, 1); @@ -297,13 +287,11 @@ make_tid(Process *c_p, DbTable *tb) /* ** The meta hash table of all NAMED ets tables */ -#ifdef ERTS_SMP # define META_NAME_TAB_LOCK_CNT 16 union { - erts_smp_rwmtx_t lck; + erts_rwmtx_t lck; byte _cache_line_alignment[64]; }meta_name_tab_rwlocks[META_NAME_TAB_LOCK_CNT]; -#endif static struct meta_name_tab_entry { union { Eterm name_atom; @@ -319,13 +307,11 @@ static unsigned meta_name_tab_mask; static ERTS_INLINE struct meta_name_tab_entry* meta_name_tab_bucket(Eterm name, - erts_smp_rwmtx_t** lockp) + erts_rwmtx_t** lockp) { unsigned bix = atom_val(name) & meta_name_tab_mask; struct meta_name_tab_entry* bucket = &meta_name_tab[bix]; -#ifdef ERTS_SMP *lockp = &meta_name_tab_rwlocks[bix % META_NAME_TAB_LOCK_CNT].lck; -#endif return bucket; } @@ -390,16 +376,14 @@ free_dbtable(void *vtb) { DbTable *tb = (DbTable *) vtb; #ifdef HARDDEBUG - if (erts_smp_atomic_read_nob(&tb->common.memory_size) != sizeof(DbTable)) { + if (erts_atomic_read_nob(&tb->common.memory_size) != sizeof(DbTable)) { erts_fprintf(stderr, "ets: free_dbtable memory remain=%ld fix=%x\n", - erts_smp_atomic_read_nob(&tb->common.memory_size)-sizeof(DbTable), + erts_atomic_read_nob(&tb->common.memory_size)-sizeof(DbTable), tb->common.fixations); } #endif -#ifdef ERTS_SMP - erts_smp_rwmtx_destroy(&tb->common.rwlock); - erts_smp_mtx_destroy(&tb->common.fixlock); -#endif + erts_rwmtx_destroy(&tb->common.rwlock); + erts_mtx_destroy(&tb->common.fixlock); ASSERT(is_immed(tb->common.heir_data)); if (tb->common.btid) @@ -419,8 +403,8 @@ static void schedule_free_dbtable(DbTable* tb) * Caller is *not* allowed to access the specialized part * (hash or tree) of *tb after this function has returned. */ - ASSERT(erts_smp_refc_read(&tb->common.refc, 0) == 0); - ASSERT(erts_smp_refc_read(&tb->common.fix_count, 0) == 0); + ASSERT(erts_refc_read(&tb->common.refc, 0) == 0); + ASSERT(erts_refc_read(&tb->common.fix_count, 0) == 0); erts_schedule_thr_prgr_later_cleanup_op(free_dbtable, (void *) tb, &tb->release.data, @@ -435,7 +419,7 @@ save_sched_table(Process *c_p, DbTable *tb) ASSERT(esdp); esdp->ets_tables.count++; - erts_smp_refc_inc(&tb->common.refc, 1); + erts_refc_inc(&tb->common.refc, 1); first = esdp->ets_tables.clist; if (!first) { @@ -525,11 +509,11 @@ save_owned_table(Process *c_p, DbTable *tb) { DbTable *first; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); first = (DbTable*) erts_psd_get(c_p, ERTS_PSD_ETS_OWNED_TABLES); - erts_smp_refc_inc(&tb->common.refc, 1); + erts_refc_inc(&tb->common.refc, 1); if (!first) { tb->common.owned.next = tb->common.owned.prev = tb; @@ -541,13 +525,13 @@ save_owned_table(Process *c_p, DbTable *tb) tb->common.owned.prev->common.owned.next = tb; first->common.owned.prev = tb; } - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); } static ERTS_INLINE void delete_owned_table(Process *p, DbTable *tb) { - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); if (tb->common.owned.next == tb) { DbTable* old; ASSERT(tb->common.owned.prev == tb); @@ -570,38 +554,33 @@ delete_owned_table(Process *p, DbTable *tb) if (tb == first) erts_psd_set(p, ERTS_PSD_ETS_OWNED_TABLES, tb->common.owned.next); } - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); table_dec_refc(tb, 1); } static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock) { -#ifdef ERTS_SMP - erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; + erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER; if (use_frequent_read_lock) - rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; + rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ; if (erts_ets_rwmtx_spin_count >= 0) rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count; -#endif -#ifdef ERTS_SMP - erts_smp_rwmtx_init_opt(&tb->common.rwlock, &rwmtx_opt, "db_tab", + erts_rwmtx_init_opt(&tb->common.rwlock, &rwmtx_opt, "db_tab", tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB); - erts_smp_mtx_init(&tb->common.fixlock, "db_tab_fix", + erts_mtx_init(&tb->common.fixlock, "db_tab_fix", tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB); tb->common.is_thread_safe = !(tb->common.status & DB_FINE_LOCKED); -#endif } static ERTS_INLINE void db_lock(DbTable* tb, db_lock_kind_t kind) { -#ifdef ERTS_SMP if (tb->common.type & DB_FINE_LOCKED) { if (kind == LCK_WRITE) { - erts_smp_rwmtx_rwlock(&tb->common.rwlock); + erts_rwmtx_rwlock(&tb->common.rwlock); tb->common.is_thread_safe = 1; } else { - erts_smp_rwmtx_rlock(&tb->common.rwlock); + erts_rwmtx_rlock(&tb->common.rwlock); ASSERT(!tb->common.is_thread_safe); } } @@ -610,14 +589,13 @@ static ERTS_INLINE void db_lock(DbTable* tb, db_lock_kind_t kind) switch (kind) { case LCK_WRITE: case LCK_WRITE_REC: - erts_smp_rwmtx_rwlock(&tb->common.rwlock); + erts_rwmtx_rwlock(&tb->common.rwlock); break; default: - erts_smp_rwmtx_rlock(&tb->common.rwlock); + erts_rwmtx_rlock(&tb->common.rwlock); } ASSERT(tb->common.is_thread_safe); } -#endif } static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind) @@ -627,16 +605,15 @@ static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind) * DbTable structure. That is, ONLY the SMP case is allowed * to follow the tb pointer! */ -#ifdef ERTS_SMP if (tb->common.type & DB_FINE_LOCKED) { if (kind == LCK_WRITE) { ASSERT(tb->common.is_thread_safe); tb->common.is_thread_safe = 0; - erts_smp_rwmtx_rwunlock(&tb->common.rwlock); + erts_rwmtx_rwunlock(&tb->common.rwlock); } else { ASSERT(!tb->common.is_thread_safe); - erts_smp_rwmtx_runlock(&tb->common.rwlock); + erts_rwmtx_runlock(&tb->common.rwlock); } } else { @@ -644,13 +621,12 @@ static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind) switch (kind) { case LCK_WRITE: case LCK_WRITE_REC: - erts_smp_rwmtx_rwunlock(&tb->common.rwlock); + erts_rwmtx_rwunlock(&tb->common.rwlock); break; default: - erts_smp_rwmtx_runlock(&tb->common.rwlock); + erts_rwmtx_runlock(&tb->common.rwlock); } } -#endif } static ERTS_INLINE @@ -661,7 +637,7 @@ DbTable* db_get_table_aux(Process *p, int meta_already_locked) { DbTable *tb; - erts_smp_rwmtx_t *mtl = NULL; + erts_rwmtx_t *mtl = NULL; /* * IMPORTANT: Only scheduler threads are allowed @@ -673,9 +649,9 @@ DbTable* db_get_table_aux(Process *p, if (is_atom(id)) { struct meta_name_tab_entry* bucket = meta_name_tab_bucket(id,&mtl); if (!meta_already_locked) - erts_smp_rwmtx_rlock(mtl); + erts_rwmtx_rlock(mtl); else{ - ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(mtl) + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(mtl) || erts_lc_rwmtx_is_rwlocked(mtl)); mtl = NULL; } @@ -709,7 +685,7 @@ DbTable* db_get_table_aux(Process *p, } } if (mtl) - erts_smp_rwmtx_runlock(mtl); + erts_rwmtx_runlock(mtl); return tb; } @@ -725,12 +701,12 @@ DbTable* db_get_table(Process *p, static int insert_named_tab(Eterm name_atom, DbTable* tb, int have_lock) { int ret = 0; - erts_smp_rwmtx_t* rwlock; + erts_rwmtx_t* rwlock; struct meta_name_tab_entry* new_entry; struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom, &rwlock); if (!have_lock) - erts_smp_rwmtx_rwlock(rwlock); + erts_rwmtx_rwlock(rwlock); if (bucket->pu.tb == NULL) { /* empty */ new_entry = bucket; @@ -778,27 +754,25 @@ static int insert_named_tab(Eterm name_atom, DbTable* tb, int have_lock) done: if (!have_lock) - erts_smp_rwmtx_rwunlock(rwlock); + erts_rwmtx_rwunlock(rwlock); return ret; } static int remove_named_tab(DbTable *tb, int have_lock) { int ret = 0; - erts_smp_rwmtx_t* rwlock; + erts_rwmtx_t* rwlock; Eterm name_atom = tb->common.the_name; struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom, &rwlock); ASSERT(is_table_named(tb)); -#ifdef ERTS_SMP - if (!have_lock && erts_smp_rwmtx_tryrwlock(rwlock) == EBUSY) { + if (!have_lock && erts_rwmtx_tryrwlock(rwlock) == EBUSY) { db_unlock(tb, LCK_WRITE); - erts_smp_rwmtx_rwlock(rwlock); + erts_rwmtx_rwlock(rwlock); db_lock(tb, LCK_WRITE); } -#endif - ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(rwlock)); + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(rwlock)); if (bucket->pu.tb == NULL) { goto done; @@ -851,7 +825,7 @@ static int remove_named_tab(DbTable *tb, int have_lock) done: if (!have_lock) - erts_smp_rwmtx_rwunlock(rwlock); + erts_rwmtx_rwunlock(rwlock); return ret; } @@ -860,11 +834,11 @@ done: */ static ERTS_INLINE void local_fix_table(DbTable* tb) { - erts_smp_refc_inc(&tb->common.fix_count, 1); + erts_refc_inc(&tb->common.fix_count, 1); } static ERTS_INLINE void local_unfix_table(DbTable* tb) { - if (erts_smp_refc_dectest(&tb->common.fix_count, 0) == 0) { + if (erts_refc_dectest(&tb->common.fix_count, 0) == 0) { ASSERT(IS_HASH_TABLE(tb->common.status)); db_unfix_table_hash(&(tb->hash)); } @@ -1505,7 +1479,7 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2) DbTable* tb; Eterm ret; Eterm old_name; - erts_smp_rwmtx_t *lck1, *lck2; + erts_rwmtx_t *lck1, *lck2; #ifdef HARDDEBUG erts_fprintf(stderr, @@ -1528,7 +1502,7 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2) if (lck1 == lck2) lck2 = NULL; else if (lck1 > lck2) { - erts_smp_rwmtx_t *tmp = lck1; + erts_rwmtx_t *tmp = lck1; lck1 = lck2; lck2 = tmp; } @@ -1546,9 +1520,9 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2) } } - erts_smp_rwmtx_rwlock(lck1); + erts_rwmtx_rwlock(lck1); if (lck2) - erts_smp_rwmtx_rwlock(lck2); + erts_rwmtx_rwlock(lck2); tb = db_get_table_aux(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE, 1); if (!tb) @@ -1568,16 +1542,16 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2) tb->common.the_name = BIF_ARG_2; db_unlock(tb, LCK_WRITE); - erts_smp_rwmtx_rwunlock(lck1); + erts_rwmtx_rwunlock(lck1); if (lck2) - erts_smp_rwmtx_rwunlock(lck2); + erts_rwmtx_rwunlock(lck2); BIF_RET(ret); badarg: if (tb) db_unlock(tb, LCK_WRITE); - erts_smp_rwmtx_rwunlock(lck1); + erts_rwmtx_rwunlock(lck1); if (lck2) - erts_smp_rwmtx_rwunlock(lck2); + erts_rwmtx_rwunlock(lck2); BIF_ERROR(BIF_P, BADARG); } @@ -1598,9 +1572,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) Uint32 status; Sint keypos; int is_named, is_compressed; -#ifdef ERTS_SMP int is_fine_locked, frequent_read; -#endif #ifdef DEBUG int cret; #endif @@ -1616,10 +1588,8 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) status = DB_SET | DB_PROTECTED; keypos = 1; is_named = 0; -#ifdef ERTS_SMP is_fine_locked = 0; frequent_read = 0; -#endif heir = am_none; heir_data = (UWord) am_undefined; is_compressed = erts_ets_always_compress; @@ -1647,30 +1617,18 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) keypos = signed_val(tp[2]); } else if (tp[1] == am_write_concurrency) { -#ifdef ERTS_SMP if (tp[2] == am_true) { is_fine_locked = 1; } else if (tp[2] == am_false) { is_fine_locked = 0; } else break; -#else - if ((tp[2] != am_true) && (tp[2] != am_false)) { - break; - } -#endif } else if (tp[1] == am_read_concurrency) { -#ifdef ERTS_SMP if (tp[2] == am_true) { frequent_read = 1; } else if (tp[2] == am_false) { frequent_read = 0; } else break; -#else - if ((tp[2] != am_true) && (tp[2] != am_false)) { - break; - } -#endif } else if (tp[1] == am_heir && tp[2] == am_none) { @@ -1712,11 +1670,9 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) } if (IS_HASH_TABLE(status)) { meth = &db_hash; -#ifdef ERTS_SMP if (is_fine_locked && !(status & DB_PRIVATE)) { status |= DB_FINE_LOCKED; } -#endif } else if (IS_TREE_TABLE(status)) { meth = &db_tree; @@ -1725,10 +1681,8 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) BIF_ERROR(BIF_P, BADARG); } -#ifdef ERTS_SMP if (frequent_read && !(status & DB_PRIVATE)) status |= DB_FREQ_READ; -#endif /* we create table outside any table lock * and take the unusal cost of destroy table if it @@ -1737,27 +1691,25 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) { DbTable init_tb; - erts_smp_atomic_init_nob(&init_tb.common.memory_size, 0); + erts_atomic_init_nob(&init_tb.common.memory_size, 0); tb = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE, &init_tb, sizeof(DbTable)); - erts_smp_atomic_init_nob(&tb->common.memory_size, - erts_smp_atomic_read_nob(&init_tb.common.memory_size)); + erts_atomic_init_nob(&tb->common.memory_size, + erts_atomic_read_nob(&init_tb.common.memory_size)); } tb->common.meth = meth; tb->common.the_name = BIF_ARG_1; tb->common.status = status; -#ifdef ERTS_SMP tb->common.type = status & ERTS_ETS_TABLE_TYPES; /* Note, 'type' is *read only* from now on... */ -#endif - erts_smp_refc_init(&tb->common.fix_count, 0); + erts_refc_init(&tb->common.fix_count, 0); db_init_lock(tb, status & (DB_FINE_LOCKED|DB_FREQ_READ)); tb->common.keypos = keypos; tb->common.owner = BIF_P->common.id; set_heir(BIF_P, tb, heir, heir_data); - erts_smp_atomic_init_nob(&tb->common.nitems, 0); + erts_atomic_init_nob(&tb->common.nitems, 0); tb->common.fixing_procs = NULL; tb->common.compress = is_compressed; @@ -1940,7 +1892,7 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1) * Process 'rp' might be exiting, but our table lock prevents it * from terminating as it cannot complete erts_db_process_exiting(). */ - ASSERT(!(ERTS_PSFLG_FREE & erts_smp_atomic32_read_nob(&rp->state))); + ASSERT(!(ERTS_PSFLG_FREE & erts_atomic32_read_nob(&rp->state))); delete_owned_table(rp, tb); BIF_P->flags |= F_USING_DB; @@ -2015,12 +1967,12 @@ BIF_RETTYPE ets_give_away_3(BIF_ALIST_3) db_unlock(tb,LCK_WRITE); send_ets_transfer_message(BIF_P, to_proc, &to_locks, tb, BIF_ARG_3); - erts_smp_proc_unlock(to_proc, to_locks); + erts_proc_unlock(to_proc, to_locks); UnUseTmpHeap(5,BIF_P); BIF_RET(am_true); badarg: - if (to_proc != NULL && to_proc != BIF_P) erts_smp_proc_unlock(to_proc, to_locks); + if (to_proc != NULL && to_proc != BIF_P) erts_proc_unlock(to_proc, to_locks); if (tb != NULL) db_unlock(tb, LCK_WRITE); BIF_ERROR(BIF_P, BADARG); } @@ -2244,7 +2196,7 @@ BIF_RETTYPE ets_select_delete_2(BIF_ALIST_2) if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL) { BIF_ERROR(BIF_P, BADARG); } - nitems = erts_smp_atomic_read_nob(&tb->common.nitems); + nitems = erts_atomic_read_nob(&tb->common.nitems); tb->common.meth->db_delete_all_objects(BIF_P, tb); db_unlock(tb, LCK_WRITE); BIF_RET(erts_make_integer(nitems,BIF_P)); @@ -2295,7 +2247,7 @@ BIF_RETTYPE ets_select_delete_2(BIF_ALIST_2) */ struct ErtsEtsAllReq_ { - erts_smp_atomic32_t refc; + erts_atomic32_t refc; Process *proc; ErtsOIRefStorage ref; ErtsEtsAllReqList list[1]; /* one per scheduler */ @@ -2428,7 +2380,7 @@ ets_all_reply(ErtsSchedulerData *esdp, ErtsEtsAllReq **reqpp, erts_proc_dec_refc(reqp->proc); - if (erts_smp_atomic32_dec_read_nob(&reqp->refc) == 0) + if (erts_atomic32_dec_read_nob(&reqp->refc) == 0) erts_free(ERTS_ALC_T_ETS_ALL_REQ, reqp); *reqpp = NULL; @@ -2516,19 +2468,17 @@ BIF_RETTYPE ets_internal_request_all_0(BIF_ALIST_0) Eterm ref = erts_make_ref(BIF_P); ErtsEtsAllReq *req = erts_alloc(ERTS_ALC_T_ETS_ALL_REQ, ERTS_ETS_ALL_REQ_SIZE); - erts_smp_atomic32_init_nob(&req->refc, + erts_atomic32_init_nob(&req->refc, (erts_aint32_t) erts_no_schedulers); erts_oiref_storage_save(&req->ref, ref); req->proc = BIF_P; erts_proc_add_refc(BIF_P, (Sint) erts_no_schedulers); -#ifdef ERTS_SMP if (erts_no_schedulers > 1) erts_schedule_multi_misc_aux_work(1, erts_no_schedulers, handle_ets_all_request, (void *) req); -#endif handle_ets_all_request((void *) req); BIF_RET(ref); @@ -3212,7 +3162,7 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1) if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ)) == NULL || tb->common.owner != owner) { if (BIF_P != rp) - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); if (is_atom(BIF_ARG_1) || is_small(BIF_ARG_1)) { BIF_RET(am_undefined); } @@ -3226,7 +3176,7 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1) db_unlock(tb, LCK_READ); /*if (rp != NULL && rp != BIF_P) - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);*/ + erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);*/ hp = HAlloc(BIF_P, 5*sizeof(fields)/sizeof(Eterm)); res = NIL; @@ -3345,11 +3295,10 @@ void init_db(ErtsDbSpinCount db_spin_count) unsigned bits; size_t size; -#ifdef ERTS_SMP int max_spin_count = (1 << 15) - 1; /* internal limit */ - erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; - rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; - rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED; + erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER; + rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ; + rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED; switch (db_spin_count) { case ERTS_DB_SPNCNT_NONE: @@ -3389,13 +3338,12 @@ void init_db(ErtsDbSpinCount db_spin_count) rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count; for (i=0; i<META_NAME_TAB_LOCK_CNT; i++) { - erts_smp_rwmtx_init_opt(&meta_name_tab_rwlocks[i].lck, &rwmtx_opt, + erts_rwmtx_init_opt(&meta_name_tab_rwlocks[i].lck, &rwmtx_opt, "meta_name_tab", make_small(i), ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DB); } -#endif - erts_smp_atomic_init_nob(&erts_ets_misc_mem_size, 0); + erts_atomic_init_nob(&erts_ets_misc_mem_size, 0); db_initialize_util(); if (user_requested_db_max_tabs < DB_DEF_MAX_TABS) @@ -3496,14 +3444,14 @@ retry: if (tb->common.owner != p->common.id) { if (to_proc != NULL ) { - erts_smp_proc_unlock(to_proc, to_locks); + erts_proc_unlock(to_proc, to_locks); } db_unlock(tb,LCK_WRITE); return !0; /* ok, someone already gave my table away */ } if (tb->common.heir != to_pid) { /* someone changed the heir */ if (to_proc != NULL ) { - erts_smp_proc_unlock(to_proc, to_locks); + erts_proc_unlock(to_proc, to_locks); } if (to_pid == p->common.id || to_pid == am_none) { return 0; /* no real heir, table still mine */ @@ -3516,7 +3464,7 @@ retry: } if (to_proc->common.u.alive.started_interval != tb->common.heir_started_interval) { - erts_smp_proc_unlock(to_proc, to_locks); + erts_proc_unlock(to_proc, to_locks); return 0; /* heir dead and pid reused, table still mine */ } @@ -3533,7 +3481,7 @@ retry: heir_data = tpv[1]; } send_ets_transfer_message(p, to_proc, &to_locks, tb, heir_data); - erts_smp_proc_unlock(to_proc, to_locks); + erts_proc_unlock(to_proc, to_locks); return !0; } @@ -3584,21 +3532,17 @@ static SWord proc_cleanup_fixed_table(Process* p, DbFixation* fix) db_lock(tb, LCK_WRITE_REC); if (!(tb->common.status & DB_DELETE)) { erts_aint_t diff; - #ifdef ERTS_SMP - erts_smp_mtx_lock(&tb->common.fixlock); - #endif + erts_mtx_lock(&tb->common.fixlock); ASSERT(fixing_procs_rbt_lookup(tb->common.fixing_procs, p)); diff = -((erts_aint_t) fix->counter); - erts_smp_refc_add(&tb->common.fix_count,diff,0); + erts_refc_add(&tb->common.fix_count,diff,0); fix->counter = 0; fixing_procs_rbt_delete(&tb->common.fixing_procs, fix); - #ifdef ERTS_SMP - erts_smp_mtx_unlock(&tb->common.fixlock); - #endif + erts_mtx_unlock(&tb->common.fixlock); if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)) { work += db_unfix_table_hash(&(tb->hash)); } @@ -3655,9 +3599,9 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks) switch (state->op) { case GET_OWNED_TABLE: { DbTable* tb; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); tb = (DbTable*) erts_psd_get(c_p, ERTS_PSD_ETS_OWNED_TABLES); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); if (!tb) { /* Done with owned tables; now fixations */ @@ -3748,10 +3692,8 @@ static void fix_table_locked(Process* p, DbTable* tb) { DbFixation *fix; -#ifdef ERTS_SMP - erts_smp_mtx_lock(&tb->common.fixlock); -#endif - erts_smp_refc_inc(&tb->common.fix_count,1); + erts_mtx_lock(&tb->common.fixlock); + erts_refc_inc(&tb->common.fix_count,1); fix = tb->common.fixing_procs; if (fix == NULL) { tb->common.time.monotonic @@ -3764,9 +3706,7 @@ static void fix_table_locked(Process* p, DbTable* tb) ASSERT(fixed_tabs_find(NULL, fix)); ++(fix->counter); -#ifdef ERTS_SMP - erts_smp_mtx_unlock(&tb->common.fixlock); -#endif + erts_mtx_unlock(&tb->common.fixlock); return; } } @@ -3779,9 +3719,7 @@ static void fix_table_locked(Process* p, DbTable* tb) fix->counter = 1; fixing_procs_rbt_insert(&tb->common.fixing_procs, fix); -#ifdef ERTS_SMP - erts_smp_mtx_unlock(&tb->common.fixlock); -#endif + erts_mtx_unlock(&tb->common.fixlock); p->flags |= F_USING_DB; fixed_tabs_insert(p, fix); @@ -3794,20 +3732,16 @@ static void unfix_table_locked(Process* p, DbTable* tb, { DbFixation* fix; -#ifdef ERTS_SMP - erts_smp_mtx_lock(&tb->common.fixlock); -#endif + erts_mtx_lock(&tb->common.fixlock); fix = fixing_procs_rbt_lookup(tb->common.fixing_procs, p); if (fix) { - erts_smp_refc_dec(&tb->common.fix_count,0); + erts_refc_dec(&tb->common.fix_count,0); --(fix->counter); ASSERT(fix->counter >= 0); if (fix->counter == 0) { fixing_procs_rbt_delete(&tb->common.fixing_procs, fix); -#ifdef ERTS_SMP - erts_smp_mtx_unlock(&tb->common.fixlock); -#endif + erts_mtx_unlock(&tb->common.fixlock); fixed_tabs_delete(p, fix); erts_refc_dec(&fix->tabs.btid->intern.refc, 1); @@ -3818,22 +3752,18 @@ static void unfix_table_locked(Process* p, DbTable* tb, goto unlocked; } } -#ifdef ERTS_SMP - erts_smp_mtx_unlock(&tb->common.fixlock); -#endif + erts_mtx_unlock(&tb->common.fixlock); unlocked: if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status) - && erts_smp_atomic_read_nob(&tb->hash.fixdel) != (erts_aint_t)NULL) { -#ifdef ERTS_SMP + && erts_atomic_read_nob(&tb->hash.fixdel) != (erts_aint_t)NULL) { if (*kind_p == LCK_READ && tb->common.is_thread_safe) { /* Must have write lock while purging pseudo-deleted (OTP-8166) */ - erts_smp_rwmtx_runlock(&tb->common.rwlock); - erts_smp_rwmtx_rwlock(&tb->common.rwlock); + erts_rwmtx_runlock(&tb->common.rwlock); + erts_rwmtx_rwlock(&tb->common.rwlock); *kind_p = LCK_WRITE; if (tb->common.status & DB_DELETE) return; } -#endif db_unfix_table_hash(&(tb->hash)); } } @@ -3855,9 +3785,8 @@ static void free_fixations_op(DbFixation* fix, void* vctx) ASSERT(ctx->tb->common.status & DB_DELETE); diff = -((erts_aint_t) fix->counter); - erts_smp_refc_add(&ctx->tb->common.fix_count, diff, 0); + erts_refc_add(&ctx->tb->common.fix_count, diff, 0); -#ifdef ERTS_SMP if (fix->procs.p != ctx->p) { /* Fixated by other process */ fix->counter = 0; @@ -3873,7 +3802,6 @@ static void free_fixations_op(DbFixation* fix, void* vctx) */ } else -#endif { fixed_tabs_delete(fix->procs.p, fix); @@ -3886,7 +3814,6 @@ static void free_fixations_op(DbFixation* fix, void* vctx) ctx->cnt++; } -#ifdef ERTS_SMP int erts_db_execute_free_fixation(Process* p, DbFixation* fix) { ASSERT(fix->counter == 0); @@ -3898,13 +3825,12 @@ int erts_db_execute_free_fixation(Process* p, DbFixation* fix) ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation)); return 1; } -#endif static SWord free_fixations_locked(Process* p, DbTable *tb) { struct free_fixations_ctx ctx; - ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&tb->common.rwlock)); + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock)); ctx.p = p; ctx.tb = tb; @@ -4047,7 +3973,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What) int use_monotonic; if (What == am_size) { - ret = make_small(erts_smp_atomic_read_nob(&tb->common.nitems)); + ret = make_small(erts_atomic_read_nob(&tb->common.nitems)); } else if (What == am_type) { if (tb->common.status & DB_SET) { ret = am_set; @@ -4060,7 +3986,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What) ret = am_bag; } } else if (What == am_memory) { - Uint words = (Uint) ((erts_smp_atomic_read_nob(&tb->common.memory_size) + Uint words = (Uint) ((erts_atomic_read_nob(&tb->common.memory_size) + sizeof(Uint) - 1) / sizeof(Uint)); @@ -4106,9 +4032,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What) = ERTS_IS_ATOM_STR("safe_fixed_monotonic_time", What)) || ERTS_IS_ATOM_STR("safe_fixed", What)) { -#ifdef ERTS_SMP - erts_smp_mtx_lock(&tb->common.fixlock); -#endif + erts_mtx_lock(&tb->common.fixlock); if (IS_FIXED(tb)) { Uint need; Eterm *hp; @@ -4150,9 +4074,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What) } else { ret = am_false; } -#ifdef ERTS_SMP - erts_smp_mtx_unlock(&tb->common.fixlock); -#endif + erts_mtx_unlock(&tb->common.fixlock); } else if (What == am_atom_put("stats",5)) { if (IS_HASH_TABLE(tb->common.status)) { FloatDef f; @@ -4176,7 +4098,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What) std_dev_exp = make_float(hp); PUT_DOUBLE(f, hp); hp += FLOAT_SIZE_OBJECT; - ret = TUPLE7(hp, make_small(erts_smp_atomic_read_nob(&tb->hash.nactive)), + ret = TUPLE7(hp, make_small(erts_atomic_read_nob(&tb->hash.nactive)), avg, std_dev_real, std_dev_exp, make_small(stats.min_chain_len), make_small(stats.max_chain_len), @@ -4208,9 +4130,9 @@ static void print_table(fmtfn_t to, void *to_arg, int show, DbTable* tb) tb->common.meth->db_print(to, to_arg, show, tb); - erts_print(to, to_arg, "Objects: %d\n", (int)erts_smp_atomic_read_nob(&tb->common.nitems)); + erts_print(to, to_arg, "Objects: %d\n", (int)erts_atomic_read_nob(&tb->common.nitems)); erts_print(to, to_arg, "Words: %bpu\n", - (Uint) ((erts_smp_atomic_read_nob(&tb->common.memory_size) + (Uint) ((erts_atomic_read_nob(&tb->common.memory_size) + sizeof(Uint) - 1) / sizeof(Uint))); @@ -4250,9 +4172,9 @@ void db_info(fmtfn_t to, void *to_arg, int show) /* Called by break handler * Uint erts_get_ets_misc_mem_size(void) { - ERTS_SMP_MEMORY_BARRIER; + ERTS_THR_MEMORY_BARRIER; /* Memory not allocated in ets_alloc */ - return (Uint) erts_smp_atomic_read_nob(&erts_ets_misc_mem_size); + return (Uint) erts_atomic_read_nob(&erts_ets_misc_mem_size); } /* SMP Note: May only be used when system is locked */ @@ -4261,7 +4183,7 @@ erts_db_foreach_table(void (*func)(DbTable *, void *), void *arg) { int ix; - ASSERT(erts_smp_thr_progress_is_blocking()); + ASSERT(erts_thr_progress_is_blocking()); for (ix = 0; ix < erts_no_schedulers; ix++) { ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(ix); @@ -4367,4 +4289,4 @@ void erts_lcnt_update_db_locks(int enable) { &lcnt_update_db_locks_per_sched, (void*)(UWord)enable); } -#endif /* ERTS_ENABLE_LOCK_COUNT */
\ No newline at end of file +#endif /* ERTS_ENABLE_LOCK_COUNT */ diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h index d83126b3a2..318e90cb28 100644 --- a/erts/emulator/beam/erl_db.h +++ b/erts/emulator/beam/erl_db.h @@ -124,7 +124,7 @@ extern Export ets_select_delete_continue_exp; extern Export ets_select_count_continue_exp; extern Export ets_select_replace_continue_exp; extern Export ets_select_continue_exp; -extern erts_smp_atomic_t erts_ets_misc_mem_size; +extern erts_atomic_t erts_ets_misc_mem_size; Eterm erts_ets_colliding_names(Process*, Eterm name, Uint cnt); Uint erts_db_get_max_tabs(void); @@ -151,11 +151,11 @@ do { \ erts_aint_t sz__ = (((erts_aint_t) (ALLOC_SZ)) \ - ((erts_aint_t) (FREE_SZ))); \ ASSERT((TAB)); \ - erts_smp_atomic_add_nob(&(TAB)->common.memory_size, sz__); \ + erts_atomic_add_nob(&(TAB)->common.memory_size, sz__); \ } while (0) #define ERTS_ETS_MISC_MEM_ADD(SZ) \ - erts_smp_atomic_add_nob(&erts_ets_misc_mem_size, (SZ)); + erts_atomic_add_nob(&erts_ets_misc_mem_size, (SZ)); ERTS_GLB_INLINE void *erts_db_alloc(ErtsAlcType_t type, DbTable *tab, @@ -292,7 +292,7 @@ erts_db_free(ErtsAlcType_t type, DbTable *tab, void *ptr, Uint size) ERTS_DB_ALC_MEM_UPDATE_(tab, size, 0); ASSERT(((void *) tab) != ptr - || erts_smp_atomic_read_nob(&tab->common.memory_size) == 0); + || erts_atomic_read_nob(&tab->common.memory_size) == 0); erts_free(type, ptr); } diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index cf928a9035..5d49b2ea14 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -109,22 +109,18 @@ #define NSEG_2 256 /* Size of second segment table */ #define NSEG_INC 128 /* Number of segments to grow after that */ -#ifdef ERTS_SMP # define DB_USING_FINE_LOCKING(TB) (((TB))->common.type & DB_FINE_LOCKED) -#else -# define DB_USING_FINE_LOCKING(TB) 0 -#endif #ifdef ETHR_ORDERED_READ_DEPEND -#define SEGTAB(tb) ((struct segment**) erts_smp_atomic_read_nob(&(tb)->segtab)) +#define SEGTAB(tb) ((struct segment**) erts_atomic_read_nob(&(tb)->segtab)) #else #define SEGTAB(tb) \ (DB_USING_FINE_LOCKING(tb) \ - ? ((struct segment**) erts_smp_atomic_read_ddrb(&(tb)->segtab)) \ - : ((struct segment**) erts_smp_atomic_read_nob(&(tb)->segtab))) + ? ((struct segment**) erts_atomic_read_ddrb(&(tb)->segtab)) \ + : ((struct segment**) erts_atomic_read_nob(&(tb)->segtab))) #endif -#define NACTIVE(tb) ((int)erts_smp_atomic_read_nob(&(tb)->nactive)) -#define NITEMS(tb) ((int)erts_smp_atomic_read_nob(&(tb)->common.nitems)) +#define NACTIVE(tb) ((int)erts_atomic_read_nob(&(tb)->nactive)) +#define NITEMS(tb) ((int)erts_atomic_read_nob(&(tb)->common.nitems)) #define SLOT_IX_TO_SEG_IX(i) (((i)+(EXT_SEGSZ-FIRST_SEGSZ)) >> EXT_SEGSZ_EXP) @@ -142,12 +138,12 @@ static ERTS_INLINE Uint hash_to_ix(DbTableHash* tb, HashValue hval) { Uint mask = (DB_USING_FINE_LOCKING(tb) - ? erts_smp_atomic_read_acqb(&tb->szm) - : erts_smp_atomic_read_nob(&tb->szm)); + ? erts_atomic_read_acqb(&tb->szm) + : erts_atomic_read_nob(&tb->szm)); Uint ix = hval & mask; - if (ix >= erts_smp_atomic_read_nob(&tb->nactive)) { + if (ix >= erts_atomic_read_nob(&tb->nactive)) { ix &= mask>>1; - ASSERT(ix < erts_smp_atomic_read_nob(&tb->nactive)); + ASSERT(ix < erts_atomic_read_nob(&tb->nactive)); } return ix; } @@ -166,7 +162,7 @@ static ERTS_INLINE int add_fixed_deletion(DbTableHash* tb, int ix, sizeof(FixedDeletion)); ERTS_ETS_MISC_MEM_ADD(sizeof(FixedDeletion)); fixd->slot = ix; - was_next = erts_smp_atomic_read_acqb(&tb->fixdel); + was_next = erts_atomic_read_acqb(&tb->fixdel); do { /* Lockless atomic insertion in linked list: */ if (NFIXED(tb) <= fixated_by_me) { erts_db_free(ERTS_ALC_T_DB_FIX_DEL, (DbTable*)tb, @@ -175,7 +171,7 @@ static ERTS_INLINE int add_fixed_deletion(DbTableHash* tb, int ix, } exp_next = was_next; fixd->next = (FixedDeletion*) exp_next; - was_next = erts_smp_atomic_cmpxchg_mb(&tb->fixdel, + was_next = erts_atomic_cmpxchg_mb(&tb->fixdel, (erts_aint_t) fixd, exp_next); }while (was_next != exp_next); @@ -191,62 +187,55 @@ static ERTS_INLINE int add_fixed_deletion(DbTableHash* tb, int ix, ((is_atom(term) ? (atom_tab(atom_val(term))->slot.bucket.hvalue) : \ make_internal_hash(term, 0)) % MAX_HASH) -#ifdef ERTS_SMP # define DB_HASH_LOCK_MASK (DB_HASH_LOCK_CNT-1) # define GET_LOCK(tb,hval) (&(tb)->locks->lck_vec[(hval) & DB_HASH_LOCK_MASK].lck) # define GET_LOCK_MAYBE(tb,hval) ((tb)->common.is_thread_safe ? NULL : GET_LOCK(tb,hval)) /* Fine grained read lock */ -static ERTS_INLINE erts_smp_rwmtx_t* RLOCK_HASH(DbTableHash* tb, HashValue hval) +static ERTS_INLINE erts_rwmtx_t* RLOCK_HASH(DbTableHash* tb, HashValue hval) { if (tb->common.is_thread_safe) { return NULL; } else { - erts_smp_rwmtx_t* lck = GET_LOCK(tb,hval); + erts_rwmtx_t* lck = GET_LOCK(tb,hval); ASSERT(tb->common.type & DB_FINE_LOCKED); - erts_smp_rwmtx_rlock(lck); + erts_rwmtx_rlock(lck); return lck; } } /* Fine grained write lock */ -static ERTS_INLINE erts_smp_rwmtx_t* WLOCK_HASH(DbTableHash* tb, HashValue hval) +static ERTS_INLINE erts_rwmtx_t* WLOCK_HASH(DbTableHash* tb, HashValue hval) { if (tb->common.is_thread_safe) { return NULL; } else { - erts_smp_rwmtx_t* lck = GET_LOCK(tb,hval); + erts_rwmtx_t* lck = GET_LOCK(tb,hval); ASSERT(tb->common.type & DB_FINE_LOCKED); - erts_smp_rwmtx_rwlock(lck); + erts_rwmtx_rwlock(lck); return lck; } } -static ERTS_INLINE void RUNLOCK_HASH(erts_smp_rwmtx_t* lck) +static ERTS_INLINE void RUNLOCK_HASH(erts_rwmtx_t* lck) { if (lck != NULL) { - erts_smp_rwmtx_runlock(lck); + erts_rwmtx_runlock(lck); } } -static ERTS_INLINE void WUNLOCK_HASH(erts_smp_rwmtx_t* lck) +static ERTS_INLINE void WUNLOCK_HASH(erts_rwmtx_t* lck) { if (lck != NULL) { - erts_smp_rwmtx_rwunlock(lck); + erts_rwmtx_rwunlock(lck); } } -#else /* ERTS_SMP */ -# define RLOCK_HASH(tb,hval) NULL -# define WLOCK_HASH(tb,hval) NULL -# define RUNLOCK_HASH(lck) ((void)lck) -# define WUNLOCK_HASH(lck) ((void)lck) -#endif /* ERTS_SMP */ #ifdef ERTS_ENABLE_LOCK_CHECK # define IFN_EXCL(tb,cmd) (((tb)->common.is_thread_safe) || (cmd)) -# define IS_HASH_RLOCKED(tb,hval) IFN_EXCL(tb,erts_smp_lc_rwmtx_is_rlocked(GET_LOCK(tb,hval))) -# define IS_HASH_WLOCKED(tb,lck) IFN_EXCL(tb,erts_smp_lc_rwmtx_is_rwlocked(lck)) -# define IS_TAB_WLOCKED(tb) erts_smp_lc_rwmtx_is_rwlocked(&(tb)->common.rwlock) +# define IS_HASH_RLOCKED(tb,hval) IFN_EXCL(tb,erts_lc_rwmtx_is_rlocked(GET_LOCK(tb,hval))) +# define IS_HASH_WLOCKED(tb,lck) IFN_EXCL(tb,erts_lc_rwmtx_is_rwlocked(lck)) +# define IS_TAB_WLOCKED(tb) erts_lc_rwmtx_is_rwlocked(&(tb)->common.rwlock) #else # define IS_HASH_RLOCKED(tb,hval) (1) # define IS_HASH_WLOCKED(tb,hval) (1) @@ -259,33 +248,25 @@ static ERTS_INLINE void WUNLOCK_HASH(erts_smp_rwmtx_t* lck) ** Slot READ locks updated accordingly, unlocked if EOT. */ static ERTS_INLINE Sint next_slot(DbTableHash* tb, Uint ix, - erts_smp_rwmtx_t** lck_ptr) + erts_rwmtx_t** lck_ptr) { -#ifdef ERTS_SMP ix += DB_HASH_LOCK_CNT; if (ix < NACTIVE(tb)) return ix; RUNLOCK_HASH(*lck_ptr); ix = (ix + 1) & DB_HASH_LOCK_MASK; if (ix != 0) *lck_ptr = RLOCK_HASH(tb,ix); return ix; -#else - return (++ix < NACTIVE(tb)) ? ix : 0; -#endif } /* Same as next_slot but with WRITE locking */ static ERTS_INLINE Sint next_slot_w(DbTableHash* tb, Uint ix, - erts_smp_rwmtx_t** lck_ptr) + erts_rwmtx_t** lck_ptr) { -#ifdef ERTS_SMP ix += DB_HASH_LOCK_CNT; if (ix < NACTIVE(tb)) return ix; WUNLOCK_HASH(*lck_ptr); ix = (ix + 1) & DB_HASH_LOCK_MASK; if (ix != 0) *lck_ptr = WLOCK_HASH(tb,ix); return ix; -#else - return next_slot(tb,ix,lck_ptr); -#endif } @@ -331,9 +312,7 @@ struct segment { /* An extended segment table */ struct ext_segtab { -#ifdef ERTS_SMP ErtsThrPrgrLaterOp lop; -#endif struct segment** prev_segtab; /* Used when table is shrinking */ int prev_nsegs; /* Size of prev_segtab */ int nsegs; /* Size of this segtab */ @@ -347,9 +326,9 @@ static ERTS_INLINE void SET_SEGTAB(DbTableHash* tb, struct segment** segtab) { if (DB_USING_FINE_LOCKING(tb)) - erts_smp_atomic_set_wb(&tb->segtab, (erts_aint_t) segtab); + erts_atomic_set_wb(&tb->segtab, (erts_aint_t) segtab); else - erts_smp_atomic_set_nob(&tb->segtab, (erts_aint_t) segtab); + erts_atomic_set_nob(&tb->segtab, (erts_aint_t) segtab); } /* Used by select_replace on analyze_pattern */ @@ -361,7 +340,7 @@ typedef int (*extra_match_validator_t)(int keypos, Eterm match, Eterm guard, Ete static struct ext_segtab* alloc_ext_segtab(DbTableHash* tb, unsigned seg_ix); static void alloc_seg(DbTableHash *tb); static int free_seg(DbTableHash *tb, int free_records); -static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_smp_rwmtx_t** lck_ptr, +static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_rwmtx_t** lck_ptr, HashDbTerm *list); static HashDbTerm* search_list(DbTableHash* tb, Eterm key, HashValue hval, HashDbTerm *list); @@ -559,7 +538,7 @@ static void restore_fixdel(DbTableHash* tb, FixedDeletion* fixdel) { /*int tries = 0;*/ DEBUG_WAIT(); - if (erts_smp_atomic_cmpxchg_relb(&tb->fixdel, + if (erts_atomic_cmpxchg_relb(&tb->fixdel, (erts_aint_t) fixdel, (erts_aint_t) NULL) != (erts_aint_t) NULL) { /* Oboy, must join lists */ @@ -568,13 +547,13 @@ static void restore_fixdel(DbTableHash* tb, FixedDeletion* fixdel) erts_aint_t exp_tail; while (last->next != NULL) last = last->next; - was_tail = erts_smp_atomic_read_acqb(&tb->fixdel); + was_tail = erts_atomic_read_acqb(&tb->fixdel); do { /* Lockless atomic list insertion */ exp_tail = was_tail; last->next = (FixedDeletion*) exp_tail; /*++tries;*/ DEBUG_WAIT(); - was_tail = erts_smp_atomic_cmpxchg_relb(&tb->fixdel, + was_tail = erts_atomic_cmpxchg_relb(&tb->fixdel, (erts_aint_t) fixdel, exp_tail); }while (was_tail != exp_tail); @@ -590,18 +569,18 @@ SWord db_unfix_table_hash(DbTableHash *tb) FixedDeletion* fixdel; SWord work = 0; - ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&tb->common.rwlock) - || (erts_smp_lc_rwmtx_is_rlocked(&tb->common.rwlock) + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock) + || (erts_lc_rwmtx_is_rlocked(&tb->common.rwlock) && !tb->common.is_thread_safe)); restart: - fixdel = (FixedDeletion*) erts_smp_atomic_xchg_mb(&tb->fixdel, + fixdel = (FixedDeletion*) erts_atomic_xchg_mb(&tb->fixdel, (erts_aint_t) NULL); while (fixdel != NULL) { FixedDeletion *fx = fixdel; int ix = fx->slot; HashDbTerm **bp; HashDbTerm *b; - erts_smp_rwmtx_t* lck = WLOCK_HASH(tb,ix); + erts_rwmtx_t* lck = WLOCK_HASH(tb,ix); if (IS_FIXED(tb)) { /* interrupted by fixer */ WUNLOCK_HASH(lck); @@ -647,10 +626,10 @@ int db_create_hash(Process *p, DbTable *tbl) { DbTableHash *tb = &tbl->hash; - erts_smp_atomic_init_nob(&tb->szm, FIRST_SEGSZ_MASK); - erts_smp_atomic_init_nob(&tb->nactive, FIRST_SEGSZ); - erts_smp_atomic_init_nob(&tb->fixdel, (erts_aint_t)NULL); - erts_smp_atomic_init_nob(&tb->segtab, (erts_aint_t)NULL); + erts_atomic_init_nob(&tb->szm, FIRST_SEGSZ_MASK); + erts_atomic_init_nob(&tb->nactive, FIRST_SEGSZ); + erts_atomic_init_nob(&tb->fixdel, (erts_aint_t)NULL); + erts_atomic_init_nob(&tb->segtab, (erts_aint_t)NULL); SET_SEGTAB(tb, tb->first_segtab); tb->nsegs = NSEG_1; tb->nslots = FIRST_SEGSZ; @@ -659,32 +638,30 @@ int db_create_hash(Process *p, DbTable *tbl) SIZEOF_SEGMENT(FIRST_SEGSZ)); sys_memset(tb->first_segtab[0], 0, SIZEOF_SEGMENT(FIRST_SEGSZ)); -#ifdef ERTS_SMP - erts_smp_atomic_init_nob(&tb->is_resizing, 0); + erts_atomic_init_nob(&tb->is_resizing, 0); if (tb->common.type & DB_FINE_LOCKED) { - erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; + erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER; int i; if (tb->common.type & DB_FREQ_READ) - rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; + rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ; if (erts_ets_rwmtx_spin_count >= 0) rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count; tb->locks = (DbTableHashFineLocks*) erts_db_alloc_fnf(ERTS_ALC_T_DB_SEG, /* Other type maybe? */ (DbTable *) tb, sizeof(DbTableHashFineLocks)); for (i=0; i<DB_HASH_LOCK_CNT; ++i) { - erts_smp_rwmtx_init_opt(&tb->locks->lck_vec[i].lck, &rwmtx_opt, + erts_rwmtx_init_opt(&tb->locks->lck_vec[i].lck, &rwmtx_opt, "db_hash_slot", tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB); } /* This important property is needed to guarantee the two buckets * involved in a grow/shrink operation it protected by the same lock: */ - ASSERT(erts_smp_atomic_read_nob(&tb->nactive) % DB_HASH_LOCK_CNT == 0); + ASSERT(erts_atomic_read_nob(&tb->nactive) % DB_HASH_LOCK_CNT == 0); } else { /* coarse locking */ tb->locks = NULL; } ERTS_THR_MEMORY_BARRIER; -#endif /* ERST_SMP */ return DB_ERROR_NONE; } @@ -692,7 +669,7 @@ static int db_first_hash(Process *p, DbTable *tbl, Eterm *ret) { DbTableHash *tb = &tbl->hash; Uint ix = 0; - erts_smp_rwmtx_t* lck = RLOCK_HASH(tb,ix); + erts_rwmtx_t* lck = RLOCK_HASH(tb,ix); HashDbTerm* list; for (;;) { @@ -725,7 +702,7 @@ static int db_next_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret) HashValue hval; Uint ix; HashDbTerm* b; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; hval = MAKE_HASH(key); lck = RLOCK_HASH(tb,hval); @@ -772,7 +749,7 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail) HashDbTerm** bp; HashDbTerm* b; HashDbTerm* q; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; int nitems; int ret = DB_ERROR_NONE; @@ -798,7 +775,7 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail) if (tb->common.status & DB_SET) { HashDbTerm* bnext = b->next; if (b->hvalue == INVALID_HASH) { - erts_smp_atomic_inc_nob(&tb->common.nitems); + erts_atomic_inc_nob(&tb->common.nitems); } else if (key_clash_fail) { ret = DB_ERROR_BADKEY; @@ -826,7 +803,7 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail) do { if (db_eq(&tb->common,obj,&q->dbterm)) { if (q->hvalue == INVALID_HASH) { - erts_smp_atomic_inc_nob(&tb->common.nitems); + erts_atomic_inc_nob(&tb->common.nitems); q->hvalue = hval; if (q != b) { /* must move to preserve key insertion order */ *qp = q->next; @@ -847,7 +824,7 @@ Lnew: q->hvalue = hval; q->next = b; *bp = q; - nitems = erts_smp_atomic_inc_read_nob(&tb->common.nitems); + nitems = erts_atomic_inc_read_nob(&tb->common.nitems); WUNLOCK_HASH(lck); { int nactive = NACTIVE(tb); @@ -891,7 +868,7 @@ int db_get_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret) HashValue hval; int ix; HashDbTerm* b; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; hval = MAKE_HASH(key); lck = RLOCK_HASH(tb,hval); @@ -917,7 +894,7 @@ static int db_member_hash(DbTable *tbl, Eterm key, Eterm *ret) HashValue hval; int ix; HashDbTerm* b1; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; hval = MAKE_HASH(key); ix = hash_to_ix(tb, hval); @@ -946,7 +923,7 @@ static int db_get_element_hash(Process *p, DbTable *tbl, HashValue hval; int ix; HashDbTerm* b1; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; int retval; hval = MAKE_HASH(key); @@ -1011,7 +988,7 @@ int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret) int ix; HashDbTerm** bp; HashDbTerm* b; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; int nitems_diff = 0; hval = MAKE_HASH(key); @@ -1043,7 +1020,7 @@ int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret) } WUNLOCK_HASH(lck); if (nitems_diff) { - erts_smp_atomic_add_nob(&tb->common.nitems, nitems_diff); + erts_atomic_add_nob(&tb->common.nitems, nitems_diff); try_shrink(tb); } *ret = am_true; @@ -1060,7 +1037,7 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret) int ix; HashDbTerm** bp; HashDbTerm* b; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; int nitems_diff = 0; int nkeys = 0; Eterm key; @@ -1101,7 +1078,7 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret) } WUNLOCK_HASH(lck); if (nitems_diff) { - erts_smp_atomic_add_nob(&tb->common.nitems, nitems_diff); + erts_atomic_add_nob(&tb->common.nitems, nitems_diff); try_shrink(tb); } *ret = am_true; @@ -1112,7 +1089,7 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret) static int db_slot_hash(Process *p, DbTable *tbl, Eterm slot_term, Eterm *ret) { DbTableHash *tb = &tbl->hash; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; Sint slot; int retval; int nactive; @@ -1227,18 +1204,13 @@ static int match_traverse(Process* p, DbTableHash* tb, unsigned current_list_pos = 0; /* Prefound buckets list index */ Eterm match_res; Sint got = 0; /* Matched terms counter */ - erts_smp_rwmtx_t* lck; /* Slot lock */ + erts_rwmtx_t* lck; /* Slot lock */ int ret_value; -#ifdef ERTS_SMP - erts_smp_rwmtx_t* (*lock_hash_function)(DbTableHash*, HashValue) + erts_rwmtx_t* (*lock_hash_function)(DbTableHash*, HashValue) = (lock_for_write ? WLOCK_HASH : RLOCK_HASH); - void (*unlock_hash_function)(erts_smp_rwmtx_t*) + void (*unlock_hash_function)(erts_rwmtx_t*) = (lock_for_write ? WUNLOCK_HASH : RUNLOCK_HASH); -#else - #define lock_hash_function(tb, hval) NULL - #define unlock_hash_function(lck) ((void)lck) -#endif - Sint (*next_slot_function)(DbTableHash*, Uint, erts_smp_rwmtx_t**) + Sint (*next_slot_function)(DbTableHash*, Uint, erts_rwmtx_t**) = (lock_for_write ? next_slot_w : next_slot); if ((ret_value = analyze_pattern(tb, pattern, extra_match_validator, &mpi)) @@ -1356,10 +1328,6 @@ done: } return ret_value; -#ifndef SMP -#undef lock_hash_function -#undef unlock_hash_function -#endif } /* @@ -1386,18 +1354,13 @@ static int match_traverse_continue(Process* p, DbTableHash* tb, */ HashDbTerm* saved_current; /* Helper to avoid double skip on match */ Eterm match_res; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; int ret_value; -#ifdef ERTS_SMP - erts_smp_rwmtx_t* (*lock_hash_function)(DbTableHash*, HashValue) + erts_rwmtx_t* (*lock_hash_function)(DbTableHash*, HashValue) = (lock_for_write ? WLOCK_HASH : RLOCK_HASH); - void (*unlock_hash_function)(erts_smp_rwmtx_t*) + void (*unlock_hash_function)(erts_rwmtx_t*) = (lock_for_write ? WUNLOCK_HASH : RUNLOCK_HASH); -#else - #define lock_hash_function(tb, hval) NULL - #define unlock_hash_function(lck) ((void)lck) -#endif - Sint (*next_slot_function)(DbTableHash* tb, Uint ix, erts_smp_rwmtx_t** lck_ptr) + Sint (*next_slot_function)(DbTableHash* tb, Uint ix, erts_rwmtx_t** lck_ptr) = (lock_for_write ? next_slot_w : next_slot); if (got < 0) { @@ -1472,10 +1435,6 @@ done: */ return ret_value; -#ifndef SMP -#undef lock_hash_function -#undef unlock_hash_function -#endif } @@ -2008,7 +1967,7 @@ static int mtraversal_select_delete_on_match_res(void* context_ptr, Sint slot_ix *current_ptr = (*current_ptr)->next; // replace pointer to term using next free_term(sd_context_ptr->tb, del); } - erts_smp_atomic_dec_nob(&sd_context_ptr->tb->common.nitems); + erts_atomic_dec_nob(&sd_context_ptr->tb->common.nitems); return 1; } @@ -2048,11 +2007,7 @@ static int db_select_delete_hash(Process *p, DbTable *tbl, Eterm tid, Eterm patt sd_context.tid = tid; sd_context.hp = NULL; sd_context.prev_continuation_tptr = NULL; -#ifdef ERTS_SMP sd_context.fixated_by_me = sd_context.tb->common.is_thread_safe ? 0 : 1; /* TODO: something nicer */ -#else - sd_context.fixated_by_me = 0; -#endif sd_context.last_pseudo_delete = (Uint) -1; return match_traverse( @@ -2251,7 +2206,7 @@ static int db_take_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret) DbTableHash *tb = &tbl->hash; HashDbTerm **bp, *b; HashValue hval = MAKE_HASH(key); - erts_smp_rwmtx_t *lck = WLOCK_HASH(tb, hval); + erts_rwmtx_t *lck = WLOCK_HASH(tb, hval); int ix = hash_to_ix(tb, hval); int nitems_diff = 0; @@ -2280,7 +2235,7 @@ static int db_take_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret) } WUNLOCK_HASH(lck); if (nitems_diff) { - erts_smp_atomic_add_nob(&tb->common.nitems, nitems_diff); + erts_atomic_add_nob(&tb->common.nitems, nitems_diff); try_shrink(tb); } return DB_ERROR_NONE; @@ -2302,7 +2257,7 @@ int db_mark_all_deleted_hash(DbTable *tbl) HashDbTerm* list; int i; - ERTS_SMP_LC_ASSERT(IS_TAB_WLOCKED(tb)); + ERTS_LC_ASSERT(IS_TAB_WLOCKED(tb)); for (i = 0; i < NACTIVE(tb); i++) { if ((list = BUCKET(tb,i)) != NULL) { @@ -2313,7 +2268,7 @@ int db_mark_all_deleted_hash(DbTable *tbl) }while(list != NULL); } } - erts_smp_atomic_set_nob(&tb->common.nitems, 0); + erts_atomic_set_nob(&tb->common.nitems, 0); return DB_ERROR_NONE; } @@ -2327,7 +2282,6 @@ static void db_print_hash(fmtfn_t to, void *to_arg, int show, DbTable *tbl) erts_print(to, to_arg, "Buckets: %d\n", NACTIVE(tb)); -#ifdef ERTS_SMP i = tbl->common.is_thread_safe; /* If crash dumping we set table to thread safe in order to avoid taking any locks */ @@ -2337,9 +2291,6 @@ static void db_print_hash(fmtfn_t to, void *to_arg, int show, DbTable *tbl) db_calc_stats_hash(&tbl->hash, &stats); tbl->common.is_thread_safe = i; -#else - db_calc_stats_hash(&tbl->hash, &stats); -#endif erts_print(to, to_arg, "Chain Length Avg: %f\n", stats.avg_chain_len); erts_print(to, to_arg, "Chain Length Max: %d\n", stats.max_chain_len); @@ -2391,8 +2342,8 @@ static int db_free_table_hash(DbTable *tbl) static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds) { DbTableHash *tb = &tbl->hash; - FixedDeletion* fixdel = (FixedDeletion*) erts_smp_atomic_read_acqb(&tb->fixdel); - ERTS_SMP_LC_ASSERT(IS_TAB_WLOCKED(tb) || (tb->common.status & DB_DELETE)); + FixedDeletion* fixdel = (FixedDeletion*) erts_atomic_read_acqb(&tb->fixdel); + ERTS_LC_ASSERT(IS_TAB_WLOCKED(tb) || (tb->common.status & DB_DELETE)); while (fixdel != NULL) { FixedDeletion *fx = fixdel; @@ -2404,11 +2355,11 @@ static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds) sizeof(FixedDeletion)); ERTS_ETS_MISC_MEM_ADD(-sizeof(FixedDeletion)); if (--reds < 0) { - erts_smp_atomic_set_relb(&tb->fixdel, (erts_aint_t)fixdel); + erts_atomic_set_relb(&tb->fixdel, (erts_aint_t)fixdel); return reds; /* Not done */ } } - erts_smp_atomic_set_relb(&tb->fixdel, (erts_aint_t)NULL); + erts_atomic_set_relb(&tb->fixdel, (erts_aint_t)NULL); while(tb->nslots != 0) { reds -= EXT_SEGSZ/64 + free_seg(tb, 1); @@ -2420,7 +2371,6 @@ static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds) return reds; /* Not done */ } } -#ifdef ERTS_SMP if (tb->locks != NULL) { int i; for (i=0; i<DB_HASH_LOCK_CNT; ++i) { @@ -2430,8 +2380,7 @@ static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds) (void*)tb->locks, sizeof(DbTableHashFineLocks)); tb->locks = NULL; } -#endif - ASSERT(erts_smp_atomic_read_nob(&tb->common.memory_size) == sizeof(DbTable)); + ASSERT(erts_atomic_read_nob(&tb->common.memory_size) == sizeof(DbTable)); return reds; /* Done */ } @@ -2530,7 +2479,7 @@ static int analyze_pattern(DbTableHash *tb, Eterm pattern, if (!db_has_variable(key)) { /* Bound key */ int ix, search_slot; HashDbTerm** bp; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; hval = MAKE_HASH(key); lck = RLOCK_HASH(tb,hval); ix = hash_to_ix(tb, hval); @@ -2634,14 +2583,12 @@ static void alloc_seg(DbTableHash *tb) tb->nslots += EXT_SEGSZ; } -#ifdef ERTS_SMP static void dealloc_ext_segtab(void* lop_data) { struct ext_segtab* est = (struct ext_segtab*) lop_data; erts_free(ERTS_ALC_T_DB_SEG, est); } -#endif /* Shrink table by freeing the top segment ** free_records: 1=free any records in segment, 0=assume segment is empty @@ -2680,7 +2627,6 @@ static int free_seg(DbTableHash *tb, int free_records) SET_SEGTAB(tb, est->prev_segtab); tb->nsegs = est->prev_nsegs; -#ifdef ERTS_SMP if (!tb->common.is_thread_safe) { /* * Table is doing a graceful shrink operation and we must avoid @@ -2698,7 +2644,6 @@ static int free_seg(DbTableHash *tb, int free_records) sz); } else -#endif erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable*)tb, est, SIZEOF_EXT_SEGTAB(est->nsegs)); } @@ -2759,22 +2704,18 @@ static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2, static ERTS_INLINE int begin_resizing(DbTableHash* tb) { -#ifdef ERTS_SMP if (DB_USING_FINE_LOCKING(tb)) return !erts_atomic_xchg_acqb(&tb->is_resizing, 1); else ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock)); -#endif return 1; } static ERTS_INLINE void done_resizing(DbTableHash* tb) { -#ifdef ERTS_SMP if (DB_USING_FINE_LOCKING(tb)) erts_atomic_set_relb(&tb->is_resizing, 0); -#endif } /* Grow table with one or more new buckets. @@ -2785,7 +2726,7 @@ static void grow(DbTableHash* tb, int nitems) HashDbTerm** pnext; HashDbTerm** to_pnext; HashDbTerm* p; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; int nactive; int from_ix, to_ix; int szm; @@ -2807,7 +2748,7 @@ static void grow(DbTableHash* tb, int nitems) } ASSERT(nactive < tb->nslots); - szm = erts_smp_atomic_read_nob(&tb->szm); + szm = erts_atomic_read_nob(&tb->szm); if (nactive <= szm) { from_ix = nactive & (szm >> 1); } else { @@ -2818,7 +2759,7 @@ static void grow(DbTableHash* tb, int nitems) to_ix = nactive; lck = WLOCK_HASH(tb, from_ix); - ERTS_SMP_ASSERT(lck == GET_LOCK_MAYBE(tb,to_ix)); + ERTS_ASSERT(lck == GET_LOCK_MAYBE(tb,to_ix)); /* Now a final double check (with the from_ix lock held) * that we did not get raced by a table fixer. */ @@ -2826,12 +2767,12 @@ static void grow(DbTableHash* tb, int nitems) WUNLOCK_HASH(lck); goto abort; } - erts_smp_atomic_set_nob(&tb->nactive, ++nactive); + erts_atomic_set_nob(&tb->nactive, ++nactive); if (from_ix == 0) { if (DB_USING_FINE_LOCKING(tb)) - erts_smp_atomic_set_relb(&tb->szm, szm); + erts_atomic_set_relb(&tb->szm, szm); else - erts_smp_atomic_set_nob(&tb->szm, szm); + erts_atomic_set_nob(&tb->szm, szm); } done_resizing(tb); @@ -2879,7 +2820,7 @@ static void shrink(DbTableHash* tb, int nitems) HashDbTerm** src_bp; HashDbTerm** dst_bp; HashDbTerm** bp; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; int src_ix, dst_ix, low_szm; int nactive; int loop_limit = 5; @@ -2892,13 +2833,13 @@ static void shrink(DbTableHash* tb, int nitems) goto abort; /* already done (race) */ } src_ix = nactive - 1; - low_szm = erts_smp_atomic_read_nob(&tb->szm) >> 1; + low_szm = erts_atomic_read_nob(&tb->szm) >> 1; dst_ix = src_ix & low_szm; ASSERT(dst_ix < src_ix); ASSERT(nactive > FIRST_SEGSZ); lck = WLOCK_HASH(tb, dst_ix); - ERTS_SMP_ASSERT(lck == GET_LOCK_MAYBE(tb,src_ix)); + ERTS_ASSERT(lck == GET_LOCK_MAYBE(tb,src_ix)); /* Double check for racing table fixers */ if (IS_FIXED(tb)) { WUNLOCK_HASH(lck); @@ -2927,9 +2868,9 @@ static void shrink(DbTableHash* tb, int nitems) *src_bp = NULL; nactive = src_ix; - erts_smp_atomic_set_nob(&tb->nactive, nactive); + erts_atomic_set_nob(&tb->nactive, nactive); if (dst_ix == 0) { - erts_smp_atomic_set_relb(&tb->szm, low_szm); + erts_atomic_set_relb(&tb->szm, low_szm); } WUNLOCK_HASH(lck); @@ -2964,12 +2905,12 @@ static HashDbTerm* search_list(DbTableHash* tb, Eterm key, /* It return the next live object in a table, NULL if no more */ /* In-bucket: RLOCKED */ /* Out-bucket: RLOCKED unless NULL */ -static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_smp_rwmtx_t** lck_ptr, +static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_rwmtx_t** lck_ptr, HashDbTerm *list) { int i; - ERTS_SMP_LC_ASSERT(IS_HASH_RLOCKED(tb,*iptr)); + ERTS_LC_ASSERT(IS_HASH_RLOCKED(tb,*iptr)); for (list = list->next; list != NULL; list = list->next) { if (list->hvalue != INVALID_HASH) @@ -2999,7 +2940,7 @@ db_lookup_dbterm_hash(Process *p, DbTable *tbl, Eterm key, Eterm obj, DbTableHash *tb = &tbl->hash; HashValue hval; HashDbTerm **bp, *b; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; int flags = 0; ASSERT(tb->common.status & DB_SET); @@ -3055,7 +2996,7 @@ db_lookup_dbterm_hash(Process *p, DbTable *tbl, Eterm key, Eterm obj, q->next = next; q->hvalue = hval; *bp = b = q; - erts_smp_atomic_inc_nob(&tb->common.nitems); + erts_atomic_inc_nob(&tb->common.nitems); } HRelease(p, hend, htop); @@ -3081,10 +3022,10 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle) DbTableHash *tb = &tbl->hash; HashDbTerm **bp = (HashDbTerm **) handle->bp; HashDbTerm *b = *bp; - erts_smp_rwmtx_t* lck = (erts_smp_rwmtx_t*) handle->lck; + erts_rwmtx_t* lck = (erts_rwmtx_t*) handle->lck; HashDbTerm* free_me = NULL; - ERTS_SMP_LC_ASSERT(IS_HASH_WLOCKED(tb, lck)); /* locked by db_lookup_dbterm_hash */ + ERTS_LC_ASSERT(IS_HASH_WLOCKED(tb, lck)); /* locked by db_lookup_dbterm_hash */ ASSERT((&b->dbterm == handle->dbterm) == !(tb->common.compress && handle->flags & DB_MUST_RESIZE)); @@ -3098,7 +3039,7 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle) } WUNLOCK_HASH(lck); - erts_smp_atomic_dec_nob(&tb->common.nitems); + erts_atomic_dec_nob(&tb->common.nitems); try_shrink(tb); } else { if (handle->flags & DB_MUST_RESIZE) { @@ -3107,7 +3048,7 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle) } if (handle->flags & DB_INC_TRY_GROW) { int nactive; - int nitems = erts_smp_atomic_inc_read_nob(&tb->common.nitems); + int nitems = erts_atomic_inc_read_nob(&tb->common.nitems); WUNLOCK_HASH(lck); nactive = NACTIVE(tb); @@ -3135,7 +3076,7 @@ static int db_delete_all_objects_hash(Process* p, DbTable* tbl) } else { db_free_table_hash(tbl); db_create_hash(p, tbl); - erts_smp_atomic_set_nob(&tbl->hash.common.nitems, 0); + erts_atomic_set_nob(&tbl->hash.common.nitems, 0); } return 0; } @@ -3165,7 +3106,7 @@ void db_foreach_offheap_hash(DbTable *tbl, void db_calc_stats_hash(DbTableHash* tb, DbHashStats* stats) { HashDbTerm* b; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; int sum = 0; int sq_sum = 0; int kept_items = 0; diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h index 523ed7860e..7d27609825 100644 --- a/erts/emulator/beam/erl_db_hash.h +++ b/erts/emulator/beam/erl_db_hash.h @@ -42,8 +42,8 @@ typedef struct hash_db_term { typedef struct db_table_hash_fine_locks { union { - erts_smp_rwmtx_t lck; - byte _cache_line_alignment[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_smp_rwmtx_t))]; + erts_rwmtx_t lck; + byte _cache_line_alignment[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_rwmtx_t))]; }lck_vec[DB_HASH_LOCK_CNT]; } DbTableHashFineLocks; @@ -51,10 +51,10 @@ typedef struct db_table_hash { DbTableCommon common; /* SMP: szm and nactive are write-protected by is_resizing or table write lock */ - erts_smp_atomic_t szm; /* current size mask. */ - erts_smp_atomic_t nactive; /* Number of "active" slots */ + erts_atomic_t szm; /* current size mask. */ + erts_atomic_t nactive; /* Number of "active" slots */ - erts_smp_atomic_t segtab; /* The segment table (struct segment**) */ + erts_atomic_t segtab; /* The segment table (struct segment**) */ struct segment* first_segtab[1]; /* SMP: nslots and nsegs are protected by is_resizing or table write lock */ @@ -62,11 +62,9 @@ typedef struct db_table_hash { int nsegs; /* Size of segment table */ /* List of slots where elements have been deleted while table was fixed */ - erts_smp_atomic_t fixdel; /* (FixedDeletion*) */ -#ifdef ERTS_SMP - erts_smp_atomic_t is_resizing; /* grow/shrink in progress */ + erts_atomic_t fixdel; /* (FixedDeletion*) */ + erts_atomic_t is_resizing; /* grow/shrink in progress */ DbTableHashFineLocks* locks; -#endif } DbTableHash; diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c index 7c80e92e50..5a276b9d88 100644 --- a/erts/emulator/beam/erl_db_tree.c +++ b/erts/emulator/beam/erl_db_tree.c @@ -50,7 +50,7 @@ #include "erl_db_tree.h" #define GETKEY_WITH_POS(Keypos, Tplp) (*((Tplp) + Keypos)) -#define NITEMS(tb) ((int)erts_smp_atomic_read_nob(&(tb)->common.nitems)) +#define NITEMS(tb) ((int)erts_atomic_read_nob(&(tb)->common.nitems)) /* ** A stack of this size is enough for an AVL tree with more than @@ -91,7 +91,7 @@ */ static DbTreeStack* get_static_stack(DbTableTree* tb) { - if (!erts_smp_atomic_xchg_acqb(&tb->is_stack_busy, 1)) { + if (!erts_atomic_xchg_acqb(&tb->is_stack_busy, 1)) { return &tb->static_stack; } return NULL; @@ -103,7 +103,7 @@ static DbTreeStack* get_static_stack(DbTableTree* tb) static DbTreeStack* get_any_stack(DbTableTree* tb) { DbTreeStack* stack; - if (!erts_smp_atomic_xchg_acqb(&tb->is_stack_busy, 1)) { + if (!erts_atomic_xchg_acqb(&tb->is_stack_busy, 1)) { return &tb->static_stack; } stack = erts_db_alloc(ERTS_ALC_T_DB_STK, (DbTable *) tb, @@ -117,8 +117,8 @@ static DbTreeStack* get_any_stack(DbTableTree* tb) static void release_stack(DbTableTree* tb, DbTreeStack* stack) { if (stack == &tb->static_stack) { - ASSERT(erts_smp_atomic_read_nob(&tb->is_stack_busy) == 1); - erts_smp_atomic_set_relb(&tb->is_stack_busy, 0); + ASSERT(erts_atomic_read_nob(&tb->is_stack_busy) == 1); + erts_atomic_set_relb(&tb->is_stack_busy, 0); } else { erts_db_free(ERTS_ALC_T_DB_STK, (DbTable *) tb, @@ -514,7 +514,7 @@ int db_create_tree(Process *p, DbTable *tbl) sizeof(TreeDbTerm *) * STACK_NEED); tb->static_stack.pos = 0; tb->static_stack.slot = 0; - erts_smp_atomic_init_nob(&tb->is_stack_busy, 0); + erts_atomic_init_nob(&tb->is_stack_busy, 0); tb->deletion = 0; return DB_ERROR_NONE; } @@ -643,8 +643,8 @@ static int db_put_tree(DbTable *tbl, Eterm obj, int key_clash_fail) for (;;) if (!*this) { /* Found our place */ state = 1; - if (erts_smp_atomic_inc_read_nob(&tb->common.nitems) >= TREE_MAX_ELEMENTS) { - erts_smp_atomic_dec_nob(&tb->common.nitems); + if (erts_atomic_inc_read_nob(&tb->common.nitems) >= TREE_MAX_ELEMENTS) { + erts_atomic_dec_nob(&tb->common.nitems); return DB_ERROR_SYSRES; } *this = new_dbterm(tb, obj); @@ -1605,7 +1605,7 @@ static int db_select_delete_continue_tree(Process *p, sc.max = 1000; sc.keypos = tb->common.keypos; - ASSERT(!erts_smp_atomic_read_nob(&tb->is_stack_busy)); + ASSERT(!erts_atomic_read_nob(&tb->is_stack_busy)); traverse_backwards(tb, &tb->static_stack, lastkey, &doit_select_delete, &sc); BUMP_REDS(p, 1000 - sc.max); @@ -2017,7 +2017,7 @@ static SWord db_free_table_continue_tree(DbTable *tbl, SWord reds) (DbTable *) tb, (void *) tb->static_stack.array, sizeof(TreeDbTerm *) * STACK_NEED); - ASSERT(erts_smp_atomic_read_nob(&tb->common.memory_size) + ASSERT(erts_atomic_read_nob(&tb->common.memory_size) == sizeof(DbTable)); } return reds; @@ -2027,7 +2027,7 @@ static int db_delete_all_objects_tree(Process* p, DbTable* tbl) { db_free_table_tree(tbl); db_create_tree(p, tbl); - erts_smp_atomic_set_nob(&tbl->tree.common.nitems, 0); + erts_atomic_set_nob(&tbl->tree.common.nitems, 0); return 0; } @@ -2107,7 +2107,7 @@ static TreeDbTerm *linkout_tree(DbTableTree *tb, Eterm key) { tstack[tpos++] = this; state = delsub(this); } - erts_smp_atomic_dec_nob(&tb->common.nitems); + erts_atomic_dec_nob(&tb->common.nitems); break; } } @@ -2174,7 +2174,7 @@ static TreeDbTerm *linkout_object_tree(DbTableTree *tb, tstack[tpos++] = this; state = delsub(this); } - erts_smp_atomic_dec_nob(&tb->common.nitems); + erts_atomic_dec_nob(&tb->common.nitems); break; } } diff --git a/erts/emulator/beam/erl_db_tree.h b/erts/emulator/beam/erl_db_tree.h index 72749ead1e..dc1b93d410 100644 --- a/erts/emulator/beam/erl_db_tree.h +++ b/erts/emulator/beam/erl_db_tree.h @@ -41,7 +41,7 @@ typedef struct db_table_tree { /* Tree-specific fields */ TreeDbTerm *root; /* The tree root */ Uint deletion; /* Being deleted */ - erts_smp_atomic_t is_stack_busy; + erts_atomic_t is_stack_busy; DbTreeStack static_stack; } DbTableTree; diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c index 13eacaa8a9..e017b9552b 100644 --- a/erts/emulator/beam/erl_db_util.c +++ b/erts/emulator/beam/erl_db_util.c @@ -170,7 +170,7 @@ static Eterm set_match_trace(Process *tracee_p, Eterm fail_term, ErtsTracer tracer, Uint d_flags, Uint e_flags) { - ERTS_SMP_LC_ASSERT( + ERTS_LC_ASSERT( ERTS_PROC_LOCKS_ALL == erts_proc_lc_my_proc_locks(tracee_p) || erts_thr_progress_is_blocking()); @@ -361,11 +361,7 @@ typedef struct { } ErtsMatchPseudoProcess; -#ifdef ERTS_SMP -static erts_smp_tsd_key_t match_pseudo_process_key; -#else -static ErtsMatchPseudoProcess *match_pseudo_process; -#endif +static erts_tsd_key_t match_pseudo_process_key; static ERTS_INLINE void cleanup_match_pseudo_process(ErtsMatchPseudoProcess *mpsp, int keep_heap) @@ -414,32 +410,27 @@ static ERTS_INLINE ErtsMatchPseudoProcess * get_match_pseudo_process(Process *c_p, Uint heap_size) { ErtsMatchPseudoProcess *mpsp; -#ifdef ERTS_SMP ErtsSchedulerData *esdp; esdp = c_p ? c_p->scheduler_data : erts_get_scheduler_data(); mpsp = esdp ? esdp->match_pseudo_process : - (ErtsMatchPseudoProcess*) erts_smp_tsd_get(match_pseudo_process_key); + (ErtsMatchPseudoProcess*) erts_tsd_get(match_pseudo_process_key); if (mpsp) { - ASSERT(mpsp == erts_smp_tsd_get(match_pseudo_process_key)); + ASSERT(mpsp == erts_tsd_get(match_pseudo_process_key)); ASSERT(mpsp->process.scheduler_data == esdp); cleanup_match_pseudo_process(mpsp, 0); } else { - ASSERT(erts_smp_tsd_get(match_pseudo_process_key) == NULL); + ASSERT(erts_tsd_get(match_pseudo_process_key) == NULL); mpsp = create_match_pseudo_process(); if (esdp) { esdp->match_pseudo_process = (void *) mpsp; } mpsp->process.scheduler_data = esdp; - erts_smp_tsd_set(match_pseudo_process_key, (void *) mpsp); + erts_tsd_set(match_pseudo_process_key, (void *) mpsp); } -#else - mpsp = match_pseudo_process; - cleanup_match_pseudo_process(mpsp, 0); -#endif if (heap_size > ERTS_DEFAULT_MS_HEAP_SIZE*sizeof(Eterm)) { mpsp->u.heap = (Eterm*) erts_alloc(ERTS_ALC_T_DB_MS_RUN_HEAP, heap_size); } @@ -449,31 +440,25 @@ get_match_pseudo_process(Process *c_p, Uint heap_size) return mpsp; } -#ifdef ERTS_SMP static void destroy_match_pseudo_process(void) { ErtsMatchPseudoProcess *mpsp; - mpsp = (ErtsMatchPseudoProcess *)erts_smp_tsd_get(match_pseudo_process_key); + mpsp = (ErtsMatchPseudoProcess *)erts_tsd_get(match_pseudo_process_key); if (mpsp) { cleanup_match_pseudo_process(mpsp, 0); erts_free(ERTS_ALC_T_DB_MS_PSDO_PROC, (void *) mpsp); - erts_smp_tsd_set(match_pseudo_process_key, (void *) NULL); + erts_tsd_set(match_pseudo_process_key, (void *) NULL); } } -#endif static void match_pseudo_process_init(void) { -#ifdef ERTS_SMP - erts_smp_tsd_key_create(&match_pseudo_process_key, + erts_tsd_key_create(&match_pseudo_process_key, "erts_match_pseudo_process_key"); - erts_smp_install_exit_handler(destroy_match_pseudo_process); -#else - match_pseudo_process = create_match_pseudo_process(); -#endif + erts_thr_install_exit_handler(destroy_match_pseudo_process); } void @@ -484,7 +469,7 @@ erts_match_set_release_result(Process* c_p) /* The trace control word. */ -static erts_smp_atomic32_t trace_control_word; +static erts_atomic32_t trace_control_word; /* This needs to be here, before the bif table... */ @@ -923,7 +908,7 @@ static void db_free_tmp_uncompressed(DbTerm* obj); */ BIF_RETTYPE db_get_trace_control_word(Process *p) { - Uint32 tcw = (Uint32) erts_smp_atomic32_read_acqb(&trace_control_word); + Uint32 tcw = (Uint32) erts_atomic32_read_acqb(&trace_control_word); BIF_RET(erts_make_integer((Uint) tcw, p)); } @@ -941,7 +926,7 @@ BIF_RETTYPE db_set_trace_control_word(Process *p, Eterm new) if (val != ((Uint32)val)) BIF_ERROR(p, BADARG); - old_tcw = (Uint32) erts_smp_atomic32_xchg_relb(&trace_control_word, + old_tcw = (Uint32) erts_atomic32_xchg_relb(&trace_control_word, (erts_aint32_t) val); BIF_RET(erts_make_integer((Uint) old_tcw, p)); } @@ -1466,7 +1451,7 @@ void db_initialize_util(void){ sizeof(DMCGuardBif), (int (*)(const void *, const void *)) &cmp_guard_bif); match_pseudo_process_init(); - erts_smp_atomic32_init_nob(&trace_control_word, 0); + erts_atomic32_init_nob(&trace_control_word, 0); } @@ -2528,9 +2513,9 @@ restart: case matchEnableTrace: ASSERT(c_p == self); if ( (n = erts_trace_flag2bit(esp[-1]))) { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); set_tracee_flags(c_p, ERTS_TRACER(c_p), 0, n); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); esp[-1] = am_true; } else { esp[-1] = FAIL_TERM; @@ -2545,9 +2530,9 @@ restart: /* Always take over the tracer of the current process */ set_tracee_flags(tmpp, ERTS_TRACER(c_p), 0, n); if (tmpp == c_p) - erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL_MINOR); else - erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL); + erts_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL); esp[-1] = am_true; } } @@ -2555,9 +2540,9 @@ restart: case matchDisableTrace: ASSERT(c_p == self); if ( (n = erts_trace_flag2bit(esp[-1]))) { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); set_tracee_flags(c_p, ERTS_TRACER(c_p), n, 0); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); esp[-1] = am_true; } else { esp[-1] = FAIL_TERM; @@ -2572,9 +2557,9 @@ restart: /* Always take over the tracer of the current process */ set_tracee_flags(tmpp, ERTS_TRACER(c_p), n, 0); if (tmpp == c_p) - erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL_MINOR); else - erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL); + erts_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL); esp[-1] = am_true; } } @@ -2598,14 +2583,14 @@ restart: if (in_flags & ERTS_PAM_IGNORE_TRACE_SILENT) break; if (*esp == am_true) { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); ERTS_TRACE_FLAGS(c_p) |= F_TRACE_SILENT; - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); } else if (*esp == am_false) { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); ERTS_TRACE_FLAGS(c_p) &= ~F_TRACE_SILENT; - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); } break; case matchTrace2: @@ -2634,10 +2619,10 @@ restart: ERTS_TRACER_CLEAR(&tracer); break; } - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); (--esp)[-1] = set_match_trace(c_p, FAIL_TERM, tracer, d_flags, e_flags); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); ERTS_TRACER_CLEAR(&tracer); } break; @@ -2667,13 +2652,13 @@ restart: if (tmpp == c_p) { (--esp)[-1] = set_match_trace(c_p, FAIL_TERM, tracer, d_flags, e_flags); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); } else { - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); (--esp)[-1] = set_match_trace(tmpp, FAIL_TERM, tracer, d_flags, e_flags); - erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } ERTS_TRACER_CLEAR(&tracer); } @@ -3277,7 +3262,7 @@ void db_cleanup_offheap_comp(DbTerm* obj) break; case FUN_SUBTAG: ASSERT(u.pb != &tmp); - if (erts_smp_refc_dectest(&u.fun->fe->refc, 0) == 0) { + if (erts_refc_dectest(&u.fun->fe->refc, 0) == 0) { erts_erase_fun_entry(u.fun->fe); } break; diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h index 7ce104a84c..6b126f35d6 100644 --- a/erts/emulator/beam/erl_db_util.h +++ b/erts/emulator/beam/erl_db_util.h @@ -240,16 +240,14 @@ typedef struct { */ typedef struct db_table_common { - erts_smp_refc_t refc; /* reference count of table struct */ - erts_smp_refc_t fix_count;/* fixation counter */ + erts_refc_t refc; /* reference count of table struct */ + erts_refc_t fix_count;/* fixation counter */ DbTableList all; DbTableList owned; -#ifdef ERTS_SMP - erts_smp_rwmtx_t rwlock; /* rw lock on table */ - erts_smp_mtx_t fixlock; /* Protects fixing_procs and time */ + erts_rwmtx_t rwlock; /* rw lock on table */ + erts_mtx_t fixlock; /* Protects fixing_procs and time */ int is_thread_safe; /* No fine locking inside table needed */ Uint32 type; /* table type, *read only* after creation */ -#endif Eterm owner; /* Pid of the creator */ Eterm heir; /* Pid of the heir */ UWord heir_data; /* To send in ETS-TRANSFER (is_immed or (DbTerm*) */ @@ -257,8 +255,8 @@ typedef struct db_table_common { Eterm the_name; /* an atom */ Binary *btid; DbTableMethod* meth; /* table methods */ - erts_smp_atomic_t nitems; /* Total number of items in table */ - erts_smp_atomic_t memory_size;/* Total memory size. NOTE: in bytes! */ + erts_atomic_t nitems; /* Total number of items in table */ + erts_atomic_t memory_size;/* Total memory size. NOTE: in bytes! */ struct { /* Last fixation time */ ErtsMonotonicTime monotonic; ErtsMonotonicTime offset; @@ -291,7 +289,7 @@ typedef struct db_table_common { (DB_BAG | DB_SET | DB_DUPLICATE_BAG))) #define IS_TREE_TABLE(Status) (!!((Status) & \ DB_ORDERED_SET)) -#define NFIXED(T) (erts_smp_refc_read(&(T)->common.fix_count,0)) +#define NFIXED(T) (erts_refc_read(&(T)->common.fix_count,0)) #define IS_FIXED(T) (NFIXED(T) != 0) /* diff --git a/erts/emulator/beam/erl_driver.h b/erts/emulator/beam/erl_driver.h index 5ad616fec3..d5379a40d5 100644 --- a/erts/emulator/beam/erl_driver.h +++ b/erts/emulator/beam/erl_driver.h @@ -148,14 +148,6 @@ typedef struct _erl_drv_event* ErlDrvEvent; /* An event to be selected on. */ typedef struct _erl_drv_port* ErlDrvPort; /* A port descriptor. */ typedef struct _erl_drv_port* ErlDrvThreadData; /* Thread data. */ -#if !defined(__WIN32__) && !defined(_WIN32) && !defined(_WIN32_) && !defined(USE_SELECT) -struct erl_drv_event_data { - short events; - short revents; -}; -#endif -typedef struct erl_drv_event_data *ErlDrvEventData; /* Event data */ - typedef struct { unsigned long megasecs; unsigned long secs; @@ -270,10 +262,7 @@ typedef struct erl_drv_entry { unsigned int *flags); /* Works mostly like 'control', a synchronous call into the driver. */ - void (*event)(ErlDrvData drv_data, ErlDrvEvent event, - ErlDrvEventData event_data); - /* Called when an event selected by - driver_event() has occurred */ + void (*unused_event_callback)(void); int extended_marker; /* ERL_DRV_EXTENDED_MARKER */ int major_version; /* ERL_DRV_EXTENDED_MAJOR_VERSION */ int minor_version; /* ERL_DRV_EXTENDED_MINOR_VERSION */ @@ -340,8 +329,6 @@ EXTERN void erl_drv_busy_msgq_limits(ErlDrvPort port, ErlDrvSizeT *high); EXTERN int driver_select(ErlDrvPort port, ErlDrvEvent event, int mode, int on); -EXTERN int driver_event(ErlDrvPort port, ErlDrvEvent event, - ErlDrvEventData event_data); EXTERN int driver_output(ErlDrvPort port, char *buf, ErlDrvSizeT len); EXTERN int driver_output2(ErlDrvPort port, char *hbuf, ErlDrvSizeT hlen, diff --git a/erts/emulator/beam/erl_drv_thread.c b/erts/emulator/beam/erl_drv_thread.c index 742c428f2a..71d4534ef9 100644 --- a/erts/emulator/beam/erl_drv_thread.c +++ b/erts/emulator/beam/erl_drv_thread.c @@ -50,7 +50,6 @@ fatal_error(int err, char *func) #define ERL_DRV_TSD_EXTRA 10 #define ERL_DRV_INVALID_TSD_KEY INT_MAX -#ifdef USE_THREADS struct ErlDrvMutex_ { ethr_mutex mtx; @@ -85,10 +84,6 @@ struct ErlDrvTid_ { static ethr_tsd_key tid_key; -#else /* USE_THREADS */ -static Uint tsd_len; -static void **tsd; -#endif static ErlDrvTSDKey next_tsd_key; static ErlDrvTSDKey max_used_tsd_key; @@ -97,7 +92,6 @@ static char **used_tsd_keys; static erts_mtx_t tsd_mtx; static char *no_name; -#ifdef USE_THREADS static void thread_exit_handler(void) @@ -122,21 +116,15 @@ erl_drv_thread_wrapper(void *vdtid) return (*dtid->func)(dtid->arg); } -#endif void erl_drv_thr_init(void) { int i; -#ifdef USE_THREADS int res = ethr_tsd_key_create(&tid_key,"erts_tid_key"); if (res == 0) res = ethr_install_exit_handler(thread_exit_handler); if (res != 0) fatal_error(res, "erl_drv_thr_init()"); -#else - tsd_len = 0; - tsd = NULL; -#endif no_name = "unknown"; next_tsd_key = 0; @@ -153,13 +141,12 @@ void erl_drv_thr_init(void) /* * These functions implement the driver thread interface in erl_driver.h. * NOTE: Only use this interface from drivers. From within the emulator use - * either the erl_threads.h, the erl_smp.h or the ethread.h interface. + * either the erl_threads.h or the ethread.h interface. */ ErlDrvMutex * erl_drv_mutex_create(char *name) { -#ifdef USE_THREADS ErlDrvMutex *dmtx = erts_alloc_fnf(ERTS_ALC_T_DRV_MTX, (sizeof(ErlDrvMutex) + (name ? sys_strlen(name) + 1 : 0))); @@ -182,15 +169,11 @@ erl_drv_mutex_create(char *name) #endif } return dmtx; -#else - return (ErlDrvMutex *) NULL; -#endif } void erl_drv_mutex_destroy(ErlDrvMutex *dmtx) { -#ifdef USE_THREADS int res; #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_uninstall(&dmtx->lcnt); @@ -199,24 +182,18 @@ erl_drv_mutex_destroy(ErlDrvMutex *dmtx) if (res != 0) fatal_error(res, "erl_drv_mutex_destroy()"); erts_free(ERTS_ALC_T_DRV_MTX, (void *) dmtx); -#endif } char * erl_drv_mutex_name(ErlDrvMutex *dmtx) { -#ifdef USE_THREADS return dmtx ? dmtx->name : NULL; -#else - return NULL; -#endif } int erl_drv_mutex_trylock(ErlDrvMutex *dmtx) { -#ifdef USE_THREADS int res; if (!dmtx) fatal_error(EINVAL, "erl_drv_mutex_trylock()"); @@ -225,22 +202,17 @@ erl_drv_mutex_trylock(ErlDrvMutex *dmtx) erts_lcnt_trylock(&dmtx->lcnt, res); #endif return res; -#else - return 0; -#endif } void erl_drv_mutex_lock(ErlDrvMutex *dmtx) { -#ifdef USE_THREADS if (!dmtx) fatal_error(EINVAL, "erl_drv_mutex_lock()"); #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_lock(&dmtx->lcnt); #endif ethr_mutex_lock(&dmtx->mtx); -#endif #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_lock_post(&dmtx->lcnt); #endif @@ -249,20 +221,17 @@ erl_drv_mutex_lock(ErlDrvMutex *dmtx) void erl_drv_mutex_unlock(ErlDrvMutex *dmtx) { -#ifdef USE_THREADS if (!dmtx) fatal_error(EINVAL, "erl_drv_mutex_unlock()"); #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_unlock(&dmtx->lcnt); #endif ethr_mutex_unlock(&dmtx->mtx); -#endif } ErlDrvCond * erl_drv_cond_create(char *name) { -#ifdef USE_THREADS ErlDrvCond *dcnd = erts_alloc_fnf(ERTS_ALC_T_DRV_CND, (sizeof(ErlDrvCond) + (name ? sys_strlen(name) + 1 : 0))); @@ -281,57 +250,43 @@ erl_drv_cond_create(char *name) } } return dcnd; -#else - return (ErlDrvCond *) NULL; -#endif } void erl_drv_cond_destroy(ErlDrvCond *dcnd) { -#ifdef USE_THREADS int res = dcnd ? ethr_cond_destroy(&dcnd->cnd) : EINVAL; if (res != 0) fatal_error(res, "erl_drv_cond_destroy()"); erts_free(ERTS_ALC_T_DRV_CND, (void *) dcnd); -#endif } char * erl_drv_cond_name(ErlDrvCond *dcnd) { -#ifdef USE_THREADS return dcnd ? dcnd->name : NULL; -#else - return NULL; -#endif } void erl_drv_cond_signal(ErlDrvCond *dcnd) { -#ifdef USE_THREADS if (!dcnd) fatal_error(EINVAL, "erl_drv_cond_signal()"); ethr_cond_signal(&dcnd->cnd); -#endif } void erl_drv_cond_broadcast(ErlDrvCond *dcnd) { -#ifdef USE_THREADS if (!dcnd) fatal_error(EINVAL, "erl_drv_cond_broadcast()"); ethr_cond_broadcast(&dcnd->cnd); -#endif } void erl_drv_cond_wait(ErlDrvCond *dcnd, ErlDrvMutex *dmtx) { -#ifdef USE_THREADS if (!dcnd || !dmtx) { fatal_error(EINVAL, "erl_drv_cond_wait()"); } @@ -348,13 +303,11 @@ erl_drv_cond_wait(ErlDrvCond *dcnd, ErlDrvMutex *dmtx) break; } } -#endif } ErlDrvRWLock * erl_drv_rwlock_create(char *name) { -#ifdef USE_THREADS ErlDrvRWLock *drwlck = erts_alloc_fnf(ERTS_ALC_T_DRV_RWLCK, (sizeof(ErlDrvRWLock) + (name ? sys_strlen(name) + 1 : 0))); @@ -375,15 +328,11 @@ erl_drv_rwlock_create(char *name) #endif } return drwlck; -#else - return (ErlDrvRWLock *) NULL; -#endif } void erl_drv_rwlock_destroy(ErlDrvRWLock *drwlck) { -#ifdef USE_THREADS int res; #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_uninstall(&drwlck->lcnt); @@ -392,23 +341,17 @@ erl_drv_rwlock_destroy(ErlDrvRWLock *drwlck) if (res != 0) fatal_error(res, "erl_drv_rwlock_destroy()"); erts_free(ERTS_ALC_T_DRV_RWLCK, (void *) drwlck); -#endif } char * erl_drv_rwlock_name(ErlDrvRWLock *drwlck) { -#ifdef USE_THREADS return drwlck ? drwlck->name : NULL; -#else - return NULL; -#endif } int erl_drv_rwlock_tryrlock(ErlDrvRWLock *drwlck) { -#ifdef USE_THREADS int res; if (!drwlck) fatal_error(EINVAL, "erl_drv_rwlock_tryrlock()"); @@ -417,15 +360,11 @@ erl_drv_rwlock_tryrlock(ErlDrvRWLock *drwlck) erts_lcnt_trylock_opt(&drwlck->lcnt, res, ERTS_LOCK_OPTIONS_READ); #endif return res; -#else - return 0; -#endif } void erl_drv_rwlock_rlock(ErlDrvRWLock *drwlck) { -#ifdef USE_THREADS if (!drwlck) fatal_error(EINVAL, "erl_drv_rwlock_rlock()"); #ifdef ERTS_ENABLE_LOCK_COUNT @@ -435,26 +374,22 @@ erl_drv_rwlock_rlock(ErlDrvRWLock *drwlck) #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_lock_post(&drwlck->lcnt); #endif -#endif } void erl_drv_rwlock_runlock(ErlDrvRWLock *drwlck) { -#ifdef USE_THREADS if (!drwlck) fatal_error(EINVAL, "erl_drv_rwlock_runlock()"); #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_unlock_opt(&drwlck->lcnt, ERTS_LOCK_OPTIONS_READ); #endif ethr_rwmutex_runlock(&drwlck->rwmtx); -#endif } int erl_drv_rwlock_tryrwlock(ErlDrvRWLock *drwlck) { -#ifdef USE_THREADS int res; if (!drwlck) fatal_error(EINVAL, "erl_drv_rwlock_tryrwlock()"); @@ -463,15 +398,11 @@ erl_drv_rwlock_tryrwlock(ErlDrvRWLock *drwlck) erts_lcnt_trylock_opt(&drwlck->lcnt, res, ERTS_LOCK_OPTIONS_RDWR); #endif return res; -#else - return 0; -#endif } void erl_drv_rwlock_rwlock(ErlDrvRWLock *drwlck) { -#ifdef USE_THREADS if (!drwlck) fatal_error(EINVAL, "erl_drv_rwlock_rwlock()"); #ifdef ERTS_ENABLE_LOCK_COUNT @@ -481,20 +412,17 @@ erl_drv_rwlock_rwlock(ErlDrvRWLock *drwlck) #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_lock_post(&drwlck->lcnt); #endif -#endif } void erl_drv_rwlock_rwunlock(ErlDrvRWLock *drwlck) { -#ifdef USE_THREADS if (!drwlck) fatal_error(EINVAL, "erl_drv_rwlock_rwunlock()"); #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_unlock_opt(&drwlck->lcnt, ERTS_LOCK_OPTIONS_RDWR); #endif ethr_rwmutex_rwunlock(&drwlck->rwmtx); -#endif } int @@ -588,20 +516,13 @@ erl_drv_tsd_key_destroy(ErlDrvTSDKey key) } -#ifdef USE_THREADS #define ERL_DRV_TSD__ (dtid->tsd) #define ERL_DRV_TSD_LEN__ (dtid->tsd_len) -#else -#define ERL_DRV_TSD__ (tsd) -#define ERL_DRV_TSD_LEN__ (tsd_len) -#endif void erl_drv_tsd_set(ErlDrvTSDKey key, void *data) { -#ifdef USE_THREADS struct ErlDrvTid_ *dtid = (struct ErlDrvTid_ *) erl_drv_thread_self(); -#endif if (key < 0 || max_used_tsd_key < key || !used_tsd_keys[key]) fatal_error(EINVAL, "erl_drv_tsd_set()"); @@ -629,15 +550,11 @@ erl_drv_tsd_set(ErlDrvTSDKey key, void *data) void * erl_drv_tsd_get(ErlDrvTSDKey key) { -#ifdef USE_THREADS struct ErlDrvTid_ *dtid = ethr_tsd_get(tid_key); -#endif if (key < 0 || max_used_tsd_key < key || !used_tsd_keys[key]) fatal_error(EINVAL, "erl_drv_tsd_get()"); -#ifdef USE_THREADS if (!dtid) return NULL; -#endif if (ERL_DRV_TSD_LEN__ <= key) return NULL; return ERL_DRV_TSD__[key]; @@ -672,7 +589,6 @@ erl_drv_thread_create(char *name, void* arg, ErlDrvThreadOpts *opts) { -#ifdef USE_THREADS int res; struct ErlDrvTid_ *dtid; ethr_thr_opts ethr_opts = ETHR_THR_OPTS_DEFAULT_INITER; @@ -714,27 +630,19 @@ erl_drv_thread_create(char *name, *tid = (ErlDrvTid) dtid; return 0; -#else - return ENOTSUP; -#endif } char * erl_drv_thread_name(ErlDrvTid tid) { -#ifdef USE_THREADS struct ErlDrvTid_ *dtid = (struct ErlDrvTid_ *) tid; return dtid ? dtid->name : NULL; -#else - return NULL; -#endif } ErlDrvTid erl_drv_thread_self(void) { -#ifdef USE_THREADS struct ErlDrvTid_ *dtid = ethr_tsd_get(tid_key); if (!dtid) { int res; @@ -753,15 +661,11 @@ erl_drv_thread_self(void) fatal_error(res, "erl_drv_thread_self()"); } return (ErlDrvTid) dtid; -#else - return (ErlDrvTid) NULL; -#endif } int erl_drv_equal_tids(ErlDrvTid tid1, ErlDrvTid tid2) { -#ifdef USE_THREADS int res; struct ErlDrvTid_ *dtid1 = (struct ErlDrvTid_ *) tid1; struct ErlDrvTid_ *dtid2 = (struct ErlDrvTid_ *) tid2; @@ -775,28 +679,22 @@ erl_drv_equal_tids(ErlDrvTid tid1, ErlDrvTid tid2) : !ethr_equal_tids(dtid1->tid, dtid2->tid)); return res; -#else - return 1; -#endif } void erl_drv_thread_exit(void *res) { -#ifdef USE_THREADS struct ErlDrvTid_ *dtid = ethr_tsd_get(tid_key); if (dtid && dtid->drv_thr) { ethr_thr_exit(res); fatal_error(0, "erl_drv_thread_exit()"); } -#endif fatal_error(EACCES, "erl_drv_thread_exit()"); } int erl_drv_thread_join(ErlDrvTid tid, void **respp) { -#ifdef USE_THREADS int res; struct ErlDrvTid_ *dtid = (struct ErlDrvTid_ *) tid; @@ -809,12 +707,9 @@ erl_drv_thread_join(ErlDrvTid tid, void **respp) if (res == 0) erts_free(ERTS_ALC_T_DRV_TID, dtid); return res; -#else - return ENOTSUP; -#endif } -#if defined(__DARWIN__) && defined(USE_THREADS) && defined(ERTS_SMP) +#if defined(__DARWIN__) extern int erts_darwin_main_thread_pipe[2]; extern int erts_darwin_main_thread_result_pipe[2]; diff --git a/erts/emulator/beam/erl_fun.c b/erts/emulator/beam/erl_fun.c index 535f677bb3..9c866250bb 100644 --- a/erts/emulator/beam/erl_fun.c +++ b/erts/emulator/beam/erl_fun.c @@ -30,17 +30,16 @@ static Hash erts_fun_table; -#include "erl_smp.h" #ifdef HIPE # include "hipe_mode_switch.h" #endif -static erts_smp_rwmtx_t erts_fun_table_lock; +static erts_rwmtx_t erts_fun_table_lock; -#define erts_fun_read_lock() erts_smp_rwmtx_rlock(&erts_fun_table_lock) -#define erts_fun_read_unlock() erts_smp_rwmtx_runlock(&erts_fun_table_lock) -#define erts_fun_write_lock() erts_smp_rwmtx_rwlock(&erts_fun_table_lock) -#define erts_fun_write_unlock() erts_smp_rwmtx_rwunlock(&erts_fun_table_lock) +#define erts_fun_read_lock() erts_rwmtx_rlock(&erts_fun_table_lock) +#define erts_fun_read_unlock() erts_rwmtx_runlock(&erts_fun_table_lock) +#define erts_fun_write_lock() erts_rwmtx_rwlock(&erts_fun_table_lock) +#define erts_fun_write_unlock() erts_rwmtx_rwunlock(&erts_fun_table_lock) static HashValue fun_hash(ErlFunEntry* obj); static int fun_cmp(ErlFunEntry* obj1, ErlFunEntry* obj2); @@ -59,11 +58,11 @@ void erts_init_fun_table(void) { HashFunctions f; - erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; - rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; - rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED; + erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER; + rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ; + rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED; - erts_smp_rwmtx_init_opt(&erts_fun_table_lock, &rwmtx_opt, "fun_tab", NIL, + erts_rwmtx_init_opt(&erts_fun_table_lock, &rwmtx_opt, "fun_tab", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); f.hash = (H_FUN) fun_hash; @@ -114,9 +113,9 @@ erts_put_fun_entry(Eterm mod, int uniq, int index) fe = (ErlFunEntry *) hash_put(&erts_fun_table, (void*) &template); sys_memset(fe->uniq, 0, sizeof(fe->uniq)); fe->index = 0; - refc = erts_smp_refc_inctest(&fe->refc, 0); + refc = erts_refc_inctest(&fe->refc, 0); if (refc < 2) /* New or pending delete */ - erts_smp_refc_inc(&fe->refc, 1); + erts_refc_inc(&fe->refc, 1); erts_fun_write_unlock(); return fe; } @@ -138,9 +137,9 @@ erts_put_fun_entry2(Eterm mod, int old_uniq, int old_index, sys_memcpy(fe->uniq, uniq, sizeof(fe->uniq)); fe->index = index; fe->arity = arity; - refc = erts_smp_refc_inctest(&fe->refc, 0); + refc = erts_refc_inctest(&fe->refc, 0); if (refc < 2) /* New or pending delete */ - erts_smp_refc_inc(&fe->refc, 1); + erts_refc_inc(&fe->refc, 1); erts_fun_write_unlock(); return fe; } @@ -165,9 +164,9 @@ erts_get_fun_entry(Eterm mod, int uniq, int index) erts_fun_read_lock(); ret = (ErlFunEntry *) hash_get(&erts_fun_table, (void*) &template); if (ret) { - erts_aint_t refc = erts_smp_refc_inctest(&ret->refc, 1); + erts_aint_t refc = erts_refc_inctest(&ret->refc, 1); if (refc < 2) /* Pending delete */ - erts_smp_refc_inc(&ret->refc, 1); + erts_refc_inc(&ret->refc, 1); } erts_fun_read_unlock(); return ret; @@ -183,13 +182,11 @@ void erts_erase_fun_entry(ErlFunEntry* fe) { erts_fun_write_lock(); -#ifdef ERTS_SMP /* * We have to check refc again since someone might have looked up * the fun entry and incremented refc after last check. */ - if (erts_smp_refc_dectest(&fe->refc, -1) <= 0) -#endif + if (erts_refc_dectest(&fe->refc, -1) <= 0) { if (fe->address != unloaded_fun) erts_exit(ERTS_ERROR_EXIT, @@ -221,7 +218,7 @@ erts_fun_purge_prepare(BeamInstr* start, BeamInstr* end) if (start <= addr && addr < end) { fe->pend_purge_address = addr; - ERTS_SMP_WRITE_MEMORY_BARRIER; + ERTS_THR_WRITE_MEMORY_BARRIER; fe->address = unloaded_fun; #ifdef HIPE fe->pend_purge_native_address = fe->native_address; @@ -275,10 +272,10 @@ erts_fun_purge_complete(ErlFunEntry **funs, Uint no) #ifdef HIPE fe->pend_purge_native_address = NULL; #endif - if (erts_smp_refc_dectest(&fe->refc, 0) == 0) + if (erts_refc_dectest(&fe->refc, 0) == 0) erts_erase_fun_entry(fe); } - ERTS_SMP_WRITE_MEMORY_BARRIER; + ERTS_THR_WRITE_MEMORY_BARRIER; } void @@ -307,7 +304,7 @@ erts_dump_fun_entries(fmtfn_t to, void *to_arg) #ifdef HIPE erts_print(to, to_arg, "Native_address: %p\n", fe->native_address); #endif - erts_print(to, to_arg, "Refc: %ld\n", erts_smp_refc_read(&fe->refc, 1)); + erts_print(to, to_arg, "Refc: %ld\n", erts_refc_read(&fe->refc, 1)); b = b->next; } } @@ -338,7 +335,7 @@ fun_alloc(ErlFunEntry* template) obj->old_uniq = template->old_uniq; obj->old_index = template->old_index; obj->module = template->module; - erts_smp_refc_init(&obj->refc, -1); + erts_refc_init(&obj->refc, -1); obj->address = unloaded_fun; obj->pend_purge_address = NULL; #ifdef HIPE diff --git a/erts/emulator/beam/erl_fun.h b/erts/emulator/beam/erl_fun.h index 289d0d0b28..fb2901d866 100644 --- a/erts/emulator/beam/erl_fun.h +++ b/erts/emulator/beam/erl_fun.h @@ -21,7 +21,7 @@ #ifndef __ERLFUNTABLE_H__ #define __ERLFUNTABLE_H__ -#include "erl_smp.h" +#include "erl_threads.h" /* * Fun entry. @@ -42,7 +42,7 @@ typedef struct erl_fun_entry { Uint arity; /* The arity of the fun. */ Eterm module; /* Tagged atom for module. */ - erts_smp_refc_t refc; /* Reference count: One for code + one for each + erts_refc_t refc; /* Reference count: One for code + one for each fun object in each process. */ BeamInstr *pend_purge_address; /* address stored during a pending purge */ #ifdef HIPE diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c index 8cb977a7f3..97a1ca915f 100644 --- a/erts/emulator/beam/erl_gc.c +++ b/erts/emulator/beam/erl_gc.c @@ -180,15 +180,13 @@ typedef struct { Eterm ref; Eterm ref_heap[ERTS_REF_THING_SIZE]; Uint req_sched; - erts_smp_atomic32_t refc; + erts_atomic32_t refc; } ErtsGCInfoReq; -#ifdef ERTS_DIRTY_SCHEDULERS static struct { erts_mtx_t mtx; ErtsGCInfo info; } dirty_gc; -#endif static ERTS_INLINE int gc_cost(Uint gc_moved_live_words, Uint resize_moved_words) @@ -273,11 +271,9 @@ erts_init_gc(void) init_gc_info(&esdp->gc_info); } -#ifdef ERTS_DIRTY_SCHEDULERS - erts_smp_mtx_init(&dirty_gc.mtx, "dirty_gc_info", NIL, + erts_mtx_init(&dirty_gc.mtx, "dirty_gc_info", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); init_gc_info(&dirty_gc.info); -#endif init_gcireq_alloc(); } @@ -341,7 +337,7 @@ erts_heap_sizes(Process* p) for (i = num_heap_sizes-1; i >= 0; i--) { n += 2; - if (!MY_IS_SSMALL(heap_sizes[i])) { + if (!IS_SSMALL(heap_sizes[i])) { big += BIG_UINT_HEAP_SIZE; } } @@ -356,7 +352,7 @@ erts_heap_sizes(Process* p) Eterm num; Sint sz = heap_sizes[i]; - if (MY_IS_SSMALL(sz)) { + if (IS_SSMALL(sz)) { num = make_small(sz); } else { num = uint_to_big(sz, bigp); @@ -481,12 +477,10 @@ delay_garbage_collection(Process *p, ErlHeapFragment *live_hf_end, int need, int } if (need == 0) { -#ifdef ERTS_DIRTY_SCHEDULERS if (p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)) { ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(p))); goto force_reschedule; } -#endif return 1; } /* @@ -541,9 +535,7 @@ delay_garbage_collection(Process *p, ErlHeapFragment *live_hf_end, int need, int p->heap_hfrag = hfrag; #endif -#ifdef ERTS_DIRTY_SCHEDULERS force_reschedule: -#endif /* Make sure that we do a proper GC as soon as possible... */ p->flags |= F_FORCE_GC; @@ -616,7 +608,6 @@ young_gen_usage(Process *p) } \ } while (0) -#ifdef ERTS_DIRTY_SCHEDULERS static ERTS_INLINE void check_for_possibly_long_gc(Process *p, Uint ygen_usage) @@ -640,7 +631,6 @@ check_for_possibly_long_gc(Process *p, Uint ygen_usage) } } -#endif /* * Garbage collect a process. @@ -672,24 +662,20 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end, ASSERT(CONTEXT_REDS - ERTS_REDS_LEFT(p, fcalls) >= esdp->virtual_reds); - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); if ((p->flags & (F_DISABLE_GC|F_DELAY_GC)) || state & ERTS_PSFLG_EXITING) { -#ifdef ERTS_DIRTY_SCHEDULERS delay_gc_before_start: -#endif return delay_garbage_collection(p, live_hf_end, need, fcalls); } ygen_usage = max_young_gen_usage ? max_young_gen_usage : young_gen_usage(p); -#ifdef ERTS_DIRTY_SCHEDULERS if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { check_for_possibly_long_gc(p, ygen_usage); if (p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)) goto delay_gc_before_start; } -#endif if (p->abandoned_heap) live_hf_end = ERTS_INVALID_HFRAG_PTR; @@ -698,7 +684,7 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end, ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_GC); - erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC); + erts_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC); if (erts_system_monitor_long_gc != 0) start_time = erts_get_monotonic_time(esdp); @@ -731,14 +717,12 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end, if (IS_TRACED_FL(p, F_TRACE_GC)) { trace_gc(p, am_gc_minor_end, reclaimed_now, THE_NON_VALUE); } -#ifdef ERTS_DIRTY_SCHEDULERS if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { p->flags |= F_NEED_FULLSWEEP; check_for_possibly_long_gc(p, ygen_usage); if (p->flags & F_DIRTY_MAJOR_GC) goto delay_gc_after_start; } -#endif goto do_major_collection; } if (ERTS_SCHEDULER_IS_DIRTY(esdp)) @@ -779,17 +763,15 @@ do_major_collection: ErtsProcLocks locks = ERTS_PROC_LOCKS_ALL; int res; - erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); erts_send_exit_signal(p, p->common.id, p, &locks, am_kill, NIL, NULL, 0); - erts_smp_proc_unlock(p, locks & ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(p, locks & ERTS_PROC_LOCKS_ALL_MINOR); -#ifdef ERTS_DIRTY_SCHEDULERS delay_gc_after_start: -#endif /* erts_send_exit_signal looks for ERTS_PSFLG_GC, so we have to remove it after the signal is sent */ - erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); + erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); /* We have to make sure that we have space for need on the heap */ res = delay_garbage_collection(p, live_hf_end, need, fcalls); @@ -797,7 +779,7 @@ do_major_collection: return res; } - erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); + erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); if (IS_TRACED_FL(p, F_TRACE_GC)) { trace_gc(p, gc_trace_end_tag, reclaimed_now, THE_NON_VALUE); @@ -821,7 +803,6 @@ do_major_collection: monitor_large_heap(p); } -#ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_SCHEDULER_IS_DIRTY(esdp)) { erts_mtx_lock(&dirty_gc.mtx); dirty_gc.info.garbage_cols++; @@ -829,7 +810,6 @@ do_major_collection: erts_mtx_unlock(&dirty_gc.mtx); } else -#endif { esdp->gc_info.garbage_cols++; esdp->gc_info.reclaimed += reclaimed_now; @@ -907,7 +887,6 @@ garbage_collect_hibernate(Process* p, int check_long_gc) if (p->flags & F_DISABLE_GC) ERTS_INTERNAL_ERROR("GC disabled"); -#ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(p))) p->flags &= ~(F_DIRTY_GC_HIBERNATE|F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC); else if (check_long_gc) { @@ -920,11 +899,10 @@ garbage_collect_hibernate(Process* p, int check_long_gc) } p->flags = flags; } -#endif /* * Preliminaries. */ - erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC); + erts_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC); ErtsGcQuickSanityCheck(p); ASSERT(p->stop == p->hend); /* Stack must be empty. */ @@ -1015,7 +993,7 @@ garbage_collect_hibernate(Process* p, int check_long_gc) p->flags |= F_HIBERNATED; - erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); + erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); reds = gc_cost(actual_size, actual_size); return reds; @@ -1110,7 +1088,6 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, p->flags |= F_NEED_FULLSWEEP; -#ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(p))) p->flags &= ~F_DIRTY_CLA; else { @@ -1126,7 +1103,6 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, return 10; } } -#endif reds = (Sint64) garbage_collect(p, ERTS_INVALID_HFRAG_PTR, 0, p->arg_reg, p->arity, fcalls, @@ -1137,7 +1113,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, /* * Set GC state. */ - erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC); + erts_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC); /* * Just did a major collection (which has discarded the old heap), @@ -1284,7 +1260,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, /* * Restore status. */ - erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); + erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); reds += (Sint64) gc_cost((p->htop - p->heap) + byte_lit_size/sizeof(Uint), 0); @@ -2914,7 +2890,7 @@ sweep_off_heap(Process *p, int fullsweep) case FUN_SUBTAG: { ErlFunEntry* fe = ((ErlFunThing*)ptr)->fe; - if (erts_smp_refc_dectest(&fe->refc, 0) == 0) { + if (erts_refc_dectest(&fe->refc, 0) == 0) { erts_erase_fun_entry(fe); } break; @@ -3230,7 +3206,6 @@ reply_gc_info(void *vgcirp) reclaimed = esdp->gc_info.reclaimed; garbage_cols = esdp->gc_info.garbage_cols; -#ifdef ERTS_DIRTY_SCHEDULERS /* * Add dirty schedulers info on requesting * schedulers info @@ -3241,7 +3216,6 @@ reply_gc_info(void *vgcirp) garbage_cols += dirty_gc.info.garbage_cols; erts_mtx_unlock(&dirty_gc.mtx); } -#endif sz = 0; hpp = NULL; @@ -3274,11 +3248,11 @@ reply_gc_info(void *vgcirp) rp_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); erts_proc_dec_refc(rp); - if (erts_smp_atomic32_dec_read_nob(&gcirp->refc) == 0) + if (erts_atomic32_dec_read_nob(&gcirp->refc) == 0) gcireq_free(vgcirp); } @@ -3330,18 +3304,16 @@ erts_gc_info_request(Process *c_p) gcirp->proc = c_p; gcirp->ref = STORE_NC(&hp, NULL, ref); gcirp->req_sched = esdp->no; - erts_smp_atomic32_init_nob(&gcirp->refc, + erts_atomic32_init_nob(&gcirp->refc, (erts_aint32_t) erts_no_schedulers); erts_proc_add_refc(c_p, (Sint) erts_no_schedulers); -#ifdef ERTS_SMP if (erts_no_schedulers > 1) erts_schedule_multi_misc_aux_work(1, erts_no_schedulers, reply_gc_info, (void *) gcirp); -#endif reply_gc_info((void *) gcirp); @@ -3628,12 +3600,12 @@ erts_check_off_heap2(Process *p, Eterm *htop) refc = erts_refc_read(&u.pb->val->intern.refc, 1); break; case FUN_SUBTAG: - refc = erts_smp_refc_read(&u.fun->fe->refc, 1); + refc = erts_refc_read(&u.fun->fe->refc, 1); break; case EXTERNAL_PID_SUBTAG: case EXTERNAL_PORT_SUBTAG: case EXTERNAL_REF_SUBTAG: - refc = erts_smp_refc_read(&u.ext->node->refc, 1); + refc = erts_refc_read(&u.ext->node->refc, 1); break; case REF_SUBTAG: ASSERT(is_magic_ref_thing(u.hdr)); diff --git a/erts/emulator/beam/erl_hl_timer.c b/erts/emulator/beam/erl_hl_timer.c index 6e5cc7b801..bda2c9b94d 100644 --- a/erts/emulator/beam/erl_hl_timer.c +++ b/erts/emulator/beam/erl_hl_timer.c @@ -96,13 +96,6 @@ typedef enum { #define ERTS_BIF_TIMER_SHORT_TIME 5000 -#ifdef ERTS_SMP -# define ERTS_HLT_SMP_MEMBAR_LoadLoad_LoadStore \ - ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore) -#else -# define ERTS_HLT_SMP_MEMBAR_LoadLoad_LoadStore -#endif - /* Bit 0 to 10 contains scheduler id (see mask below) */ #define ERTS_TMR_ROFLG_HLT (((Uint32) 1) << 11) #define ERTS_TMR_ROFLG_BIF_TMR (((Uint32) 1) << 12) @@ -159,7 +152,7 @@ typedef struct { typedef struct { Uint32 roflgs; - erts_smp_atomic32_t refc; + erts_atomic32_t refc; union { void *arg; erts_atomic_t next; @@ -200,7 +193,7 @@ struct ErtsBifTimer_ { ErtsTWTimer twt; } type; struct { - erts_smp_atomic32_t state; + erts_atomic32_t state; #ifdef ERTS_MAGIC_REF_BIF_TIMERS ErtsMagicBinary *mbin; ErtsHLTimerList proc_list; @@ -269,7 +262,6 @@ typedef struct { erts_atomic_t last; } ErtsHLTCncldTmrQTail; -#ifdef ERTS_SMP typedef struct { /* @@ -301,7 +293,6 @@ typedef struct { } head; } ErtsHLTCncldTmrQ; -#endif /* ERTS_SMP */ typedef struct { ErtsHLTimer *root; @@ -309,9 +300,7 @@ typedef struct { } ErtsYieldingTimeoutState; struct ErtsHLTimerService_ { -#ifdef ERTS_SMP ErtsHLTCncldTmrQ canceled_queue; -#endif ErtsHLTimer *time_tree; #ifndef ERTS_MAGIC_REF_BIF_TIMERS ErtsBifTimer *btm_tree; @@ -720,9 +709,7 @@ proc_btm_list_foreach_destroy_yielding(ErtsBifTimer **list, #endif /* !ERTS_MAGIC_REF_BIF_TIMERS */ -#ifdef ERTS_SMP static void init_canceled_queue(ErtsHLTCncldTmrQ *cq); -#endif void erts_hl_timer_init(void) @@ -747,9 +734,7 @@ erts_create_timer_service(void) srv->yield = init_yield; erts_twheel_init_timer(&srv->service_timer); -#ifdef ERTS_SMP init_canceled_queue(&srv->canceled_queue); -#endif return srv; } @@ -791,13 +776,13 @@ get_time_left(ErtsSchedulerData *esdp, ErtsMonotonicTime timeout_pos) static ERTS_INLINE int proc_timeout_common(Process *proc, void *tmr) { - if (tmr == (void *) erts_smp_atomic_cmpxchg_mb(&proc->common.timer, + if (tmr == (void *) erts_atomic_cmpxchg_mb(&proc->common.timer, ERTS_PTMR_TIMEDOUT, (erts_aint_t) tmr)) { erts_aint32_t state; - erts_smp_proc_lock(proc, ERTS_PROC_LOCKS_MSG_RECEIVE); - state = erts_smp_atomic32_read_acqb(&proc->state); - erts_smp_proc_unlock(proc, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_lock(proc, ERTS_PROC_LOCKS_MSG_RECEIVE); + state = erts_atomic32_read_acqb(&proc->state); + erts_proc_unlock(proc, ERTS_PROC_LOCKS_MSG_RECEIVE); if (!(state & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_EXITING))) erts_schedule_process(proc, state, 0); return 1; @@ -808,7 +793,7 @@ proc_timeout_common(Process *proc, void *tmr) static ERTS_INLINE int port_timeout_common(Port *port, void *tmr) { - if (tmr == (void *) erts_smp_atomic_cmpxchg_mb(&port->common.timer, + if (tmr == (void *) erts_atomic_cmpxchg_mb(&port->common.timer, ERTS_PTMR_TIMEDOUT, (erts_aint_t) tmr)) { erts_port_task_schedule(port->common.id, @@ -821,24 +806,24 @@ port_timeout_common(Port *port, void *tmr) #ifdef ERTS_MAGIC_REF_BIF_TIMERS -static erts_smp_atomic_t * +static erts_atomic_t * mbin_to_btmref__(ErtsMagicBinary *mbin) { - return erts_smp_binary_to_magic_indirection((Binary *) mbin); + return erts_binary_to_magic_indirection((Binary *) mbin); } static ERTS_INLINE void magic_binary_init(ErtsMagicBinary *mbin, ErtsBifTimer *tmr) { - erts_smp_atomic_t *aptr = mbin_to_btmref__(mbin); - erts_smp_atomic_init_nob(aptr, (erts_aint_t) tmr); + erts_atomic_t *aptr = mbin_to_btmref__(mbin); + erts_atomic_init_nob(aptr, (erts_aint_t) tmr); } static ERTS_INLINE ErtsBifTimer * magic_binary_to_btm(ErtsMagicBinary *mbin) { - erts_smp_atomic_t *aptr = mbin_to_btmref__(mbin); - ErtsBifTimer *tmr = (ErtsBifTimer *) erts_smp_atomic_read_nob(aptr); + erts_atomic_t *aptr = mbin_to_btmref__(mbin); + ErtsBifTimer *tmr = (ErtsBifTimer *) erts_atomic_read_nob(aptr); ERTS_HLT_ASSERT(!tmr || tmr->btm.mbin == mbin); return tmr; } @@ -884,7 +869,7 @@ init_btm_specifics(ErtsSchedulerData *esdp, btm_rbt_insert(&esdp->timer_service->btm_tree, tmr); #endif - erts_smp_atomic32_init_nob(&tmr->btm.state, ERTS_TMR_STATE_ACTIVE); + erts_atomic32_init_nob(&tmr->btm.state, ERTS_TMR_STATE_ACTIVE); return refc; /* refc from magic binary... */ } @@ -917,10 +902,10 @@ timer_pre_dec_refc(ErtsTimer *tmr) { #ifdef ERTS_HLT_DEBUG erts_aint_t refc; - refc = erts_smp_atomic32_dec_read_nob(&tmr->head.refc); + refc = erts_atomic32_dec_read_nob(&tmr->head.refc); ERTS_HLT_ASSERT(refc > 0); #else - erts_smp_atomic32_dec_nob(&tmr->head.refc); + erts_atomic32_dec_nob(&tmr->head.refc); #endif } @@ -969,8 +954,8 @@ schedule_tw_timer_destroy(ErtsTWTimer *tmr) static ERTS_INLINE void tw_timer_dec_refc(ErtsTWTimer *tmr) { - if (erts_smp_atomic32_dec_read_relb(&tmr->head.refc) == 0) { - ERTS_HLT_SMP_MEMBAR_LoadLoad_LoadStore; + if (erts_atomic32_dec_read_relb(&tmr->head.refc) == 0) { + ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore); schedule_tw_timer_destroy(tmr); } } @@ -1114,7 +1099,7 @@ create_tw_timer(ErtsSchedulerData *esdp, return NULL; } - erts_smp_atomic32_init_nob(&tmr->head.refc, refc); + erts_atomic32_init_nob(&tmr->head.refc, refc); erts_twheel_set_timer(esdp->timer_wheel, &tmr->u.tw_tmr, @@ -1147,7 +1132,7 @@ schedule_hl_timer_destroy(ErtsHLTimer *tmr, Uint32 roflgs) * at once... */ - ERTS_HLT_ASSERT(erts_smp_atomic32_read_nob(&tmr->head.refc) == 0); + ERTS_HLT_ASSERT(erts_atomic32_read_nob(&tmr->head.refc) == 0); if (roflgs & ERTS_TMR_ROFLG_REG_NAME) { ERTS_HLT_ASSERT(is_atom(tmr->head.receiver.name)); @@ -1179,14 +1164,13 @@ schedule_hl_timer_destroy(ErtsHLTimer *tmr, Uint32 roflgs) static ERTS_INLINE void hl_timer_dec_refc(ErtsHLTimer *tmr, Uint32 roflgs) { - if (erts_smp_atomic32_dec_read_relb(&tmr->head.refc) == 0) { - ERTS_HLT_SMP_MEMBAR_LoadLoad_LoadStore; + if (erts_atomic32_dec_read_relb(&tmr->head.refc) == 0) { + ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore); schedule_hl_timer_destroy(tmr, roflgs); } } static void hlt_service_timeout(void *vesdp); -#ifdef ERTS_SMP static void handle_canceled_queue(ErtsSchedulerData *esdp, ErtsHLTCncldTmrQ *cq, int use_limit, @@ -1194,12 +1178,11 @@ static void handle_canceled_queue(ErtsSchedulerData *esdp, int *need_thr_progress, ErtsThrPrgrVal *thr_prgr_p, int *need_more_work); -#endif static ERTS_INLINE void check_canceled_queue(ErtsSchedulerData *esdp, ErtsHLTimerService *srv) { -#if defined(ERTS_SMP) && ERTS_TMR_CHECK_CANCEL_ON_CREATE +#if ERTS_TMR_CHECK_CANCEL_ON_CREATE ErtsHLTCncldTmrQ *cq = &srv->canceled_queue; if (cq->head.first != cq->head.unref_end) handle_canceled_queue(esdp, cq, 1, @@ -1219,14 +1202,14 @@ bif_timer_ref_destructor(Binary *unused) static ERTS_INLINE void btm_clear_magic_binary(ErtsBifTimer *tmr) { - erts_smp_atomic_t *aptr = mbin_to_btmref__(tmr->btm.mbin); + erts_atomic_t *aptr = mbin_to_btmref__(tmr->btm.mbin); Uint32 roflgs = tmr->type.head.roflgs; #ifdef ERTS_HLT_DEBUG - erts_aint_t tval = erts_smp_atomic_xchg_nob(aptr, + erts_aint_t tval = erts_atomic_xchg_nob(aptr, (erts_aint_t) NULL); ERTS_HLT_ASSERT(tval == (erts_aint_t) tmr); #else - erts_smp_atomic_set_nob(aptr, (erts_aint_t) NULL); + erts_atomic_set_nob(aptr, (erts_aint_t) NULL); #endif if (roflgs & ERTS_TMR_ROFLG_HLT) hl_timer_dec_refc(&tmr->type.hlt, roflgs); @@ -1246,7 +1229,7 @@ bif_timer_timeout(ErtsHLTimerService *srv, ERTS_HLT_ASSERT(tmr->type.head.roflgs == roflgs); ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_BIF_TMR); - state = erts_smp_atomic32_cmpxchg_acqb(&tmr->btm.state, + state = erts_atomic32_cmpxchg_acqb(&tmr->btm.state, ERTS_TMR_STATE_TIMED_OUT, ERTS_TMR_STATE_ACTIVE); @@ -1279,7 +1262,7 @@ bif_timer_timeout(ErtsHLTimerService *srv, tmr->btm.bp = NULL; erts_queue_message(proc, 0, mp, tmr->btm.message, am_clock_service); - erts_smp_proc_lock(proc, ERTS_PROC_LOCK_BTM); + erts_proc_lock(proc, ERTS_PROC_LOCK_BTM); /* If the process is exiting do not disturb the cleanup... */ if (!ERTS_PROC_IS_EXITING(proc)) { #ifdef ERTS_MAGIC_REF_BIF_TIMERS @@ -1295,7 +1278,7 @@ bif_timer_timeout(ErtsHLTimerService *srv, } #endif } - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_BTM); + erts_proc_unlock(proc, ERTS_PROC_LOCK_BTM); if (dec_refc) timer_pre_dec_refc((ErtsTimer *) tmr); } @@ -1432,7 +1415,7 @@ create_hl_timer(ErtsSchedulerData *esdp, } tmr->head.roflgs = roflgs; - erts_smp_atomic32_init_nob(&tmr->head.refc, refc); + erts_atomic32_init_nob(&tmr->head.refc, refc); if (!srv->next_timeout || tmr->timeout < srv->next_timeout->timeout) { @@ -1664,7 +1647,6 @@ cleanup_sched_local_canceled_timer(ErtsSchedulerData *esdp, } } -#ifdef ERTS_SMP static void init_canceled_queue(ErtsHLTCncldTmrQ *cq) @@ -1794,7 +1776,7 @@ cq_check_incoming(ErtsSchedulerData *esdp, ErtsHLTCncldTmrQ *cq) cq->head.next.thr_progress_reached = 1; /* Move unreferenced end pointer forward... */ - ERTS_HLT_SMP_MEMBAR_LoadLoad_LoadStore; + ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore); cq->head.unref_end = cq->head.next.unref_end; @@ -1887,31 +1869,24 @@ erts_handle_canceled_timers(void *vesdp, need_more_work); } -#endif /* ERTS_SMP */ static void queue_canceled_timer(ErtsSchedulerData *esdp, int rsched_id, ErtsTimer *tmr) { -#ifdef ERTS_SMP ErtsHLTCncldTmrQ *cq; cq = &ERTS_SCHEDULER_IX(rsched_id-1)->timer_service->canceled_queue; if (cq_enqueue(cq, tmr, rsched_id - (int) esdp->no)) erts_notify_canceled_timer(esdp, rsched_id); -#else - ERTS_INTERNAL_ERROR("Unexpected enqueue of canceled timer"); -#endif } static void continue_cancel_ptimer(ErtsSchedulerData *esdp, ErtsTimer *tmr) { -#ifdef ERTS_SMP Uint32 sid = (tmr->head.roflgs & ERTS_TMR_ROFLG_SID_MASK); if (esdp->no != sid) queue_canceled_timer(esdp, sid, tmr); else -#endif cleanup_sched_local_canceled_timer(esdp, tmr); } @@ -1997,7 +1972,7 @@ setup_bif_timer(Process *c_p, int twheel, ErtsMonotonicTime timeout_pos, #else proc_btm_rbt_insert(&proc->bif_timers, tmr); #endif - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_BTM); + erts_proc_unlock(proc, ERTS_PROC_LOCK_BTM); tmr->type.head.receiver.proc = proc; } } @@ -2018,7 +1993,7 @@ cancel_bif_timer(ErtsBifTimer *tmr) Uint32 roflgs; int res; - state = erts_smp_atomic32_cmpxchg_acqb(&tmr->btm.state, + state = erts_atomic32_cmpxchg_acqb(&tmr->btm.state, ERTS_TMR_STATE_CANCELED, ERTS_TMR_STATE_ACTIVE); if (state != ERTS_TMR_STATE_ACTIVE) @@ -2040,7 +2015,7 @@ cancel_bif_timer(ErtsBifTimer *tmr) proc = tmr->type.head.receiver.proc; ERTS_HLT_ASSERT(!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_REG_NAME)); - erts_smp_proc_lock(proc, ERTS_PROC_LOCK_BTM); + erts_proc_lock(proc, ERTS_PROC_LOCK_BTM); /* * If process is exiting, let it clean up * the btm tree by itself (it may be in @@ -2059,7 +2034,7 @@ cancel_bif_timer(ErtsBifTimer *tmr) res = 1; } #endif - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_BTM); + erts_proc_unlock(proc, ERTS_PROC_LOCK_BTM); } return res; @@ -2082,7 +2057,7 @@ access_btm(ErtsBifTimer *tmr, Uint32 sid, ErtsSchedulerData *esdp, int cancel) : erts_tweel_read_timeout(&tmr->type.twt.u.tw_tmr)); if (!cancel) { - erts_aint32_t state = erts_smp_atomic32_read_acqb(&tmr->btm.state); + erts_aint32_t state = erts_atomic32_read_acqb(&tmr->btm.state); if (state == ERTS_TMR_STATE_ACTIVE) return get_time_left(esdp, timeout); return -1; @@ -2176,7 +2151,7 @@ send_async_info(Process *proc, ErtsProcLocks initial_locks, locks &= ~initial_locks; if (locks) - erts_smp_proc_unlock(proc, locks); + erts_proc_unlock(proc, locks); return am_ok; } @@ -2262,7 +2237,7 @@ send_sync_info(Process *proc, ErtsProcLocks initial_locks, locks &= ~initial_locks; if (locks) - erts_smp_proc_unlock(proc, locks); + erts_proc_unlock(proc, locks); return am_ok; } @@ -2376,9 +2351,9 @@ try_access_sched_remote_btm(ErtsSchedulerData *esdp, * Check if the timer is aimed at current * process... */ - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_BTM); + erts_proc_lock(c_p, ERTS_PROC_LOCK_BTM); tmr = proc_btm_rbt_lookup(c_p->bif_timers, trefn); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_BTM); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_BTM); if (!tmr) return 0; @@ -2419,7 +2394,7 @@ no_timer_result(Process *c_p, Eterm tref, int cancel, int async, int info) erts_queue_message(c_p, locks, mp, msg, am_clock_service); locks &= ~ERTS_PROC_LOCK_MAIN; if (locks) - erts_smp_proc_unlock(c_p, locks); + erts_proc_unlock(c_p, locks); return am_ok; } @@ -2495,7 +2470,7 @@ access_bif_timer(Process *c_p, Eterm tref, int cancel, int async, int info) req->rrefn[1] = rrefn[1]; req->rrefn[2] = rrefn[2]; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); if (ERTS_PROC_PENDING_EXIT(c_p)) ERTS_VBUMP_ALL_REDS(c_p); @@ -2513,10 +2488,10 @@ access_bif_timer(Process *c_p, Eterm tref, int cancel, int async, int info) * otherwise, next receive will *not* work * as expected! */ - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); + ERTS_MSGQ_MV_INQ2PRIVQ(c_p); c_p->msg.save = c_p->msg.last; } - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); ERTS_BIF_PREP_TRAP1(ret, erts_await_result, c_p, rref); } @@ -2606,7 +2581,7 @@ exit_cancel_bif_timer(ErtsBifTimer *tmr, void *vesdp) erts_aint_t state; int is_hlt; - state = erts_smp_atomic32_cmpxchg_acqb(&tmr->btm.state, + state = erts_atomic32_cmpxchg_acqb(&tmr->btm.state, ERTS_TMR_STATE_CANCELED, ERTS_TMR_STATE_ACTIVE); @@ -2992,7 +2967,7 @@ set_proc_timer_common(Process *c_p, ErtsSchedulerData *esdp, Sint64 tmo, ERTS_TMR_PROC, (void *) c_p, c_p->common.id, THE_NON_VALUE, NULL, NULL, NULL); - erts_smp_atomic_set_relb(&c_p->common.timer, (erts_aint_t) tmr); + erts_atomic_set_relb(&c_p->common.timer, (erts_aint_t) tmr); } } @@ -3003,7 +2978,7 @@ erts_set_proc_timer_term(Process *c_p, Eterm etmo) ErtsMonotonicTime tmo, timeout_pos; int short_time, tres; - ERTS_HLT_ASSERT(erts_smp_atomic_read_nob(&c_p->common.timer) + ERTS_HLT_ASSERT(erts_atomic_read_nob(&c_p->common.timer) == ERTS_PTMR_NONE); tres = parse_timeout_pos(esdp, etmo, &tmo, 0, @@ -3023,7 +2998,7 @@ erts_set_proc_timer_uword(Process *c_p, UWord tmo) { ErtsSchedulerData *esdp = erts_proc_sched_data(c_p); - ERTS_HLT_ASSERT(erts_smp_atomic_read_nob(&c_p->common.timer) + ERTS_HLT_ASSERT(erts_atomic_read_nob(&c_p->common.timer) == ERTS_PTMR_NONE); #ifndef ARCH_32 @@ -3046,13 +3021,13 @@ void erts_cancel_proc_timer(Process *c_p) { erts_aint_t tval; - tval = erts_smp_atomic_xchg_acqb(&c_p->common.timer, + tval = erts_atomic_xchg_acqb(&c_p->common.timer, ERTS_PTMR_NONE); c_p->flags &= ~(F_INSLPQUEUE|F_TIMO); if (tval == ERTS_PTMR_NONE) return; if (tval == ERTS_PTMR_TIMEDOUT) { - erts_smp_atomic_set_nob(&c_p->common.timer, ERTS_PTMR_NONE); + erts_atomic_set_nob(&c_p->common.timer, ERTS_PTMR_NONE); return; } continue_cancel_ptimer(erts_proc_sched_data(c_p), @@ -3067,7 +3042,7 @@ erts_set_port_timer(Port *c_prt, Sint64 tmo) ErtsMonotonicTime timeout_pos; ErtsCreateTimerFunc create_timer; - if (erts_smp_atomic_read_nob(&c_prt->common.timer) != ERTS_PTMR_NONE) + if (erts_atomic_read_nob(&c_prt->common.timer) != ERTS_PTMR_NONE) erts_cancel_port_timer(c_prt); check_canceled_queue(esdp, esdp->timer_service); @@ -3080,14 +3055,14 @@ erts_set_port_timer(Port *c_prt, Sint64 tmo) tmr = (void *) create_timer(esdp, timeout_pos, 0, ERTS_TMR_PORT, (void *) c_prt, c_prt->common.id, THE_NON_VALUE, NULL, NULL, NULL); - erts_smp_atomic_set_relb(&c_prt->common.timer, (erts_aint_t) tmr); + erts_atomic_set_relb(&c_prt->common.timer, (erts_aint_t) tmr); } void erts_cancel_port_timer(Port *c_prt) { erts_aint_t tval; - tval = erts_smp_atomic_xchg_acqb(&c_prt->common.timer, + tval = erts_atomic_xchg_acqb(&c_prt->common.timer, ERTS_PTMR_NONE); if (tval == ERTS_PTMR_NONE) return; @@ -3095,7 +3070,7 @@ erts_cancel_port_timer(Port *c_prt) while (!erts_port_task_is_scheduled(&c_prt->timeout_task)) erts_thr_yield(); erts_port_task_abort(&c_prt->timeout_task); - erts_smp_atomic_set_nob(&c_prt->common.timer, ERTS_PTMR_NONE); + erts_atomic_set_nob(&c_prt->common.timer, ERTS_PTMR_NONE); return; } continue_cancel_ptimer(erts_get_scheduler_data(), @@ -3109,7 +3084,7 @@ erts_read_port_timer(Port *c_prt) erts_aint_t itmr; ErtsMonotonicTime timeout_pos; - itmr = erts_smp_atomic_read_acqb(&c_prt->common.timer); + itmr = erts_atomic_read_acqb(&c_prt->common.timer); if (itmr == ERTS_PTMR_NONE) return (Sint64) -1; if (itmr == ERTS_PTMR_TIMEDOUT) @@ -3246,7 +3221,7 @@ debug_btm_foreach(ErtsBifTimer *tmr, void *vbtmfd) if (!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_BIF_TMR)) return; #endif - if (erts_smp_atomic32_read_nob(&tmr->btm.state) == ERTS_TMR_STATE_ACTIVE) { + if (erts_atomic32_read_nob(&tmr->btm.state) == ERTS_TMR_STATE_ACTIVE) { ErtsBTMForeachDebug *btmfd = (ErtsBTMForeachDebug *) vbtmfd; Eterm id = ((tmr->type.head.roflgs & ERTS_TMR_ROFLG_REG_NAME) ? tmr->type.head.receiver.name @@ -3284,7 +3259,7 @@ erts_debug_bif_timer_foreach(void (*func)(Eterm, btmfd.func = func; btmfd.arg = arg; - if (!erts_smp_thr_progress_is_blocking()) + if (!erts_thr_progress_is_blocking()) ERTS_INTERNAL_ERROR("Not blocking thread progress"); for (six = 0; six < erts_no_schedulers; six++) { @@ -3375,7 +3350,7 @@ erts_debug_callback_timer_foreach(void (*tclbk)(void *), dfct.func = func; dfct.arg = arg; - if (!erts_smp_thr_progress_is_blocking()) + if (!erts_thr_progress_is_blocking()) ERTS_INTERNAL_ERROR("Not blocking thread progress"); for (six = 0; six < erts_no_schedulers; six++) { diff --git a/erts/emulator/beam/erl_hl_timer.h b/erts/emulator/beam/erl_hl_timer.h index ff31f04cb9..e6f5e8b67d 100644 --- a/erts/emulator/beam/erl_hl_timer.h +++ b/erts/emulator/beam/erl_hl_timer.h @@ -36,16 +36,16 @@ typedef struct ErtsHLTimerService_ ErtsHLTimerService; #define ERTS_PTMR_TIMEDOUT (ERTS_PTMR_NONE + ((erts_aint_t) 1)) #define ERTS_PTMR_INIT(P) \ - erts_smp_atomic_init_nob(&(P)->common.timer, ERTS_PTMR_NONE) + erts_atomic_init_nob(&(P)->common.timer, ERTS_PTMR_NONE) #define ERTS_PTMR_IS_SET(P) \ - (ERTS_PTMR_NONE != erts_smp_atomic_read_nob(&(P)->common.timer)) + (ERTS_PTMR_NONE != erts_atomic_read_nob(&(P)->common.timer)) #define ERTS_PTMR_IS_TIMED_OUT(P) \ - (ERTS_PTMR_TIMEDOUT == erts_smp_atomic_read_nob(&(P)->common.timer)) + (ERTS_PTMR_TIMEDOUT == erts_atomic_read_nob(&(P)->common.timer)) #define ERTS_PTMR_CLEAR(P) \ do { \ ASSERT(ERTS_PTMR_IS_TIMED_OUT((P))); \ - erts_smp_atomic_set_nob(&(P)->common.timer, \ + erts_atomic_set_nob(&(P)->common.timer, \ ERTS_PTMR_NONE); \ } while (0) @@ -63,13 +63,11 @@ void erts_hl_timer_init(void); void erts_start_timer_callback(ErtsMonotonicTime, void (*)(void *), void *); -#ifdef ERTS_SMP void erts_handle_canceled_timers(void *vesdp, int *need_thr_progress, ErtsThrPrgrVal *thr_prgr_p, int *need_more_work); -#endif Uint erts_bif_timer_memory_size(void); void erts_print_bif_timer_info(fmtfn_t to, void *to_arg); diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c index 5206d7564f..6cef9bd0e3 100644 --- a/erts/emulator/beam/erl_init.c +++ b/erts/emulator/beam/erl_init.c @@ -49,6 +49,7 @@ #include "erl_bif_unique.h" #define ERTS_WANT_TIMER_WHEEL_API #include "erl_time.h" +#include "erl_check_io.h" #ifdef HIPE #include "hipe_mode_switch.h" /* for hipe_mode_switch_init() */ @@ -69,23 +70,17 @@ * The variables below (prefixed with etp_) are for erts/etc/unix/etp-commands * only. Do not remove even though they aren't used elsewhere in the emulator! */ -#ifdef ERTS_SMP const int etp_smp_compiled = 1; -#else -const int etp_smp_compiled = 0; -#endif -#ifdef USE_THREADS const int etp_thread_compiled = 1; -#else -const int etp_thread_compiled = 0; -#endif const char etp_erts_version[] = ERLANG_VERSION; const char etp_otp_release[] = ERLANG_OTP_RELEASE; const char etp_compile_date[] = ERLANG_COMPILE_DATE; const char etp_arch[] = ERLANG_ARCHITECTURE; #ifdef ERTS_ENABLE_KERNEL_POLL +const int erts_use_kernel_poll = 1; const int etp_kernel_poll_support = 1; #else +const int erts_use_kernel_poll = 0; const int etp_kernel_poll_support = 0; #endif #if defined(ARCH_64) @@ -156,17 +151,10 @@ static void erl_init(int ncpu, static erts_atomic_t exiting; -#ifdef ERTS_SMP -erts_smp_atomic32_t erts_writing_erl_crash_dump; +erts_atomic32_t erts_writing_erl_crash_dump; erts_tsd_key_t erts_is_crash_dumping_key; -#else -volatile int erts_writing_erl_crash_dump = 0; -#endif int erts_initialized = 0; -#if defined(USE_THREADS) && !defined(ERTS_SMP) -erts_tid_t erts_main_thread; -#endif int erts_use_sender_punish; @@ -185,7 +173,7 @@ int erts_backtrace_depth; /* How many functions to show in a backtrace * in error codes. */ -erts_smp_atomic32_t erts_max_gen_gcs; +erts_atomic32_t erts_max_gen_gcs; Eterm erts_error_logger_warnings; /* What to map warning logs to, am_error, am_info or am_warning, am_error is @@ -195,11 +183,9 @@ int erts_compat_rel; static int no_schedulers; static int no_schedulers_online; -#ifdef ERTS_DIRTY_SCHEDULERS static int no_dirty_cpu_schedulers; static int no_dirty_cpu_schedulers_online; static int no_dirty_io_schedulers; -#endif #ifdef DEBUG Uint32 verbose; /* See erl_debug.h for information about verbose */ @@ -220,16 +206,16 @@ int erts_no_line_info = 0; /* -L: Don't load line information */ */ ErtsModifiedTimings erts_modified_timings[] = { - /* 0 */ {make_small(0), CONTEXT_REDS, INPUT_REDUCTIONS}, - /* 1 */ {make_small(0), (3*CONTEXT_REDS)/4, 2*INPUT_REDUCTIONS}, - /* 2 */ {make_small(0), CONTEXT_REDS/2, INPUT_REDUCTIONS/2}, - /* 3 */ {make_small(0), (7*CONTEXT_REDS)/8, 3*INPUT_REDUCTIONS}, - /* 4 */ {make_small(0), CONTEXT_REDS/3, 3*INPUT_REDUCTIONS}, - /* 5 */ {make_small(0), (10*CONTEXT_REDS)/11, INPUT_REDUCTIONS/2}, - /* 6 */ {make_small(1), CONTEXT_REDS/4, 2*INPUT_REDUCTIONS}, - /* 7 */ {make_small(1), (5*CONTEXT_REDS)/7, INPUT_REDUCTIONS/3}, - /* 8 */ {make_small(10), CONTEXT_REDS/5, 3*INPUT_REDUCTIONS}, - /* 9 */ {make_small(10), (6*CONTEXT_REDS)/7, INPUT_REDUCTIONS/4} + /* 0 */ {make_small(0), CONTEXT_REDS}, + /* 1 */ {make_small(0), (3*CONTEXT_REDS)/4}, + /* 2 */ {make_small(0), CONTEXT_REDS/2}, + /* 3 */ {make_small(0), (7*CONTEXT_REDS)/8}, + /* 4 */ {make_small(0), CONTEXT_REDS/3}, + /* 5 */ {make_small(0), (10*CONTEXT_REDS)/11}, + /* 6 */ {make_small(1), CONTEXT_REDS/4}, + /* 7 */ {make_small(1), (5*CONTEXT_REDS)/7}, + /* 8 */ {make_small(10), CONTEXT_REDS/5}, + /* 9 */ {make_small(10), (6*CONTEXT_REDS)/7} }; #define ERTS_MODIFIED_TIMING_LEVELS \ @@ -327,12 +313,11 @@ erl_init(int ncpu, erts_init_sys_common_misc(); erts_init_process(ncpu, proc_tab_sz, legacy_proc_tab); erts_init_scheduling(no_schedulers, - no_schedulers_online -#ifdef ERTS_DIRTY_SCHEDULERS - , no_dirty_cpu_schedulers, + no_schedulers_online, + erts_no_poll_threads, + no_dirty_cpu_schedulers, no_dirty_cpu_schedulers_online, no_dirty_io_schedulers -#endif ); erts_late_init_time_sup(); erts_init_cpu_topology(); /* Must be after init_scheduling */ @@ -410,7 +395,7 @@ erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char** */ erts_init_empty_process(&parent); - erts_smp_proc_lock(&parent, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(&parent, ERTS_PROC_LOCK_MAIN); hp = HAlloc(&parent, argc*2 + 4); args = NIL; for (i = argc-1; i >= 0; i--) { @@ -425,7 +410,7 @@ erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char** so.flags = erts_default_spo_flags|SPO_SYSTEM_PROC; res = erl_create_process(&parent, start_mod, am_start, args, &so); - erts_smp_proc_unlock(&parent, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(&parent, ERTS_PROC_LOCK_MAIN); erts_cleanup_empty_process(&parent); return res; } @@ -450,7 +435,7 @@ erl_system_process_otp(Eterm parent_pid, char* modname, int off_heap_msgq) if (off_heap_msgq) so.flags |= SPO_OFF_HEAP_MSGQ; res = erl_create_process(parent, start_mod, am_start, NIL, &so); - erts_smp_proc_unlock(parent, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(parent, ERTS_PROC_LOCK_MAIN); return res; } @@ -577,9 +562,19 @@ void erts_usage(void) erts_fprintf(stderr, "-hmqd val set default message queue data flag for processes,\n"); erts_fprintf(stderr, " valid values are: off_heap | on_heap\n"); + erts_fprintf(stderr, "-IOp number set number of pollsets to be used to poll for I/O,\n"); + erts_fprintf(stderr, " This value has to be equal or smaller than the\n"); + erts_fprintf(stderr, " number of poll threads. If the current platform\n"); + erts_fprintf(stderr, " does not support concurrent update of pollsets\n"); + erts_fprintf(stderr, " this value is ignored.\n"); + erts_fprintf(stderr, "-IOt number set number of threads to be used to poll for I/O\n"); + erts_fprintf(stderr, "-IOPp number set number of pollsets as a percentage of the\n"); + erts_fprintf(stderr, " number of poll threads."); + erts_fprintf(stderr, "-IOPt number set number of threads to be used to poll for I/O\n"); + erts_fprintf(stderr, " as a percentage of the number of schedulers."); + /* erts_fprintf(stderr, "-i module set the boot module (default init)\n"); */ - erts_fprintf(stderr, "-K boolean enable or disable kernel poll\n"); erts_fprintf(stderr, "-n[s|a|d] Control behavior of signals to ports\n"); erts_fprintf(stderr, " Note that this flag is deprecated!\n"); erts_fprintf(stderr, "-M<X> <Y> memory allocator switches,\n"); @@ -624,7 +619,6 @@ void erts_usage(void) ERTS_SCHED_THREAD_MIN_STACK_SIZE, ERTS_SCHED_THREAD_MAX_STACK_SIZE, ERTS_DEFAULT_SCHED_STACK_SIZE); -#ifdef ERTS_DIRTY_SCHEDULERS erts_fprintf(stderr, "-sssdcpu size suggested stack size in kilo words for dirty CPU scheduler\n"); erts_fprintf(stderr, " threads, valid range is [%d-%d] (default %d)\n", ERTS_SCHED_THREAD_MIN_STACK_SIZE, @@ -635,7 +629,6 @@ void erts_usage(void) ERTS_SCHED_THREAD_MIN_STACK_SIZE, ERTS_SCHED_THREAD_MAX_STACK_SIZE, ERTS_DEFAULT_DIO_SCHED_STACK_SIZE); -#endif erts_fprintf(stderr, "-spp Bool set port parallelism scheduling hint\n"); erts_fprintf(stderr, "-S n1:n2 set number of schedulers (n1), and number of\n"); erts_fprintf(stderr, " schedulers online (n2), maximum for both\n"); @@ -644,7 +637,6 @@ void erts_usage(void) erts_fprintf(stderr, "-SP p1:p2 specify schedulers (p1) and schedulers online (p2)\n"); erts_fprintf(stderr, " as percentages of logical processors configured and logical\n"); erts_fprintf(stderr, " processors available, respectively\n"); -#ifdef ERTS_DIRTY_SCHEDULERS erts_fprintf(stderr, "-SDcpu n1:n2 set number of dirty CPU schedulers (n1), and number of\n"); erts_fprintf(stderr, " dirty CPU schedulers online (n2), valid range for both\n"); erts_fprintf(stderr, " numbers is [1-%d], and n2 must be less than or equal to n1\n", @@ -654,7 +646,6 @@ void erts_usage(void) erts_fprintf(stderr, " and logical processors available, respectively\n"); erts_fprintf(stderr, "-SDio n set number of dirty I/O schedulers, valid range is [0-%d]\n", ERTS_MAX_NO_OF_DIRTY_IO_SCHEDULERS); -#endif erts_fprintf(stderr, "-t size set the maximum number of atoms the emulator can handle\n"); erts_fprintf(stderr, " valid range is [%d-%d]\n", MIN_ATOM_TABLE_SIZE, MAX_ATOM_TABLE_SIZE); @@ -682,7 +673,6 @@ void erts_usage(void) erts_exit(1, ""); } -#ifdef USE_THREADS /* * allocators for thread lib */ @@ -724,7 +714,6 @@ static void ethr_ll_free(void *ptr) erts_free(ERTS_ALC_T_ETHR_LL, ptr); } -#endif static int early_init(int *argc, char **argv) /* @@ -742,22 +731,16 @@ early_init(int *argc, char **argv) /* int schdlrs_percentage = 100; int schdlrs_onln_percentage = 100; int max_main_threads; -#ifdef ERTS_DIRTY_SCHEDULERS int dirty_cpu_scheds; int dirty_cpu_scheds_online; int dirty_cpu_scheds_pctg = 100; int dirty_cpu_scheds_onln_pctg = 100; int dirty_io_scheds; -#endif int max_reader_groups; int reader_groups; char envbuf[21]; /* enough for any 64-bit integer */ size_t envbufsz; -#if defined(USE_THREADS) && !defined(ERTS_SMP) - erts_main_thread = erts_thr_self(); -#endif - erts_save_emu_args(*argc, argv); erts_sched_compact_load = 1; @@ -781,11 +764,6 @@ early_init(int *argc, char **argv) /* &ncpu, &ncpuonln, &ncpuavail); -#ifndef ERTS_SMP - ncpu = 1; - ncpuonln = 1; - ncpuavail = 1; -#endif ignore_break = 0; replace_intr = 0; @@ -797,18 +775,12 @@ early_init(int *argc, char **argv) /* erts_sys_pre_init(); erts_atomic_init_nob(&exiting, 0); -#ifdef ERTS_SMP erts_thr_progress_pre_init(); -#endif -#ifdef ERTS_SMP - erts_smp_atomic32_init_nob(&erts_writing_erl_crash_dump, 0L); + erts_atomic32_init_nob(&erts_writing_erl_crash_dump, 0L); erts_tsd_key_create(&erts_is_crash_dumping_key,"erts_is_crash_dumping_key"); -#else - erts_writing_erl_crash_dump = 0; -#endif - erts_smp_atomic32_init_nob(&erts_max_gen_gcs, + erts_atomic32_init_nob(&erts_max_gen_gcs, (erts_aint32_t) ((Uint16) -1)); erts_pre_init_process(); @@ -825,11 +797,9 @@ early_init(int *argc, char **argv) /* schdlrs = no_schedulers; schdlrs_onln = no_schedulers_online; -#ifdef ERTS_DIRTY_SCHEDULERS dirty_cpu_scheds = no_schedulers; dirty_cpu_scheds_online = no_schedulers_online; dirty_io_scheds = 10; -#endif envbufsz = sizeof(envbuf); @@ -882,6 +852,7 @@ early_init(int *argc, char **argv) /* } break; } + case 'S' : if (argv[i][2] == 'P') { int ptot, ponln; @@ -922,7 +893,6 @@ early_init(int *argc, char **argv) /* ("using %d:%d scheduler percentages\n", schdlrs_percentage, schdlrs_onln_percentage)); } -#ifdef ERTS_DIRTY_SCHEDULERS else if (argv[i][2] == 'D') { char *arg; char *type = argv[i]+3; @@ -1034,7 +1004,6 @@ early_init(int *argc, char **argv) /* break; } } -#endif else { int tot, onln; char *arg = get_arg(argv[i]+2, argv[i+1], &i); @@ -1093,7 +1062,6 @@ early_init(int *argc, char **argv) /* i++; } -#ifdef ERTS_SMP /* apply any scheduler percentages */ if (schdlrs_percentage != 100 || schdlrs_onln_percentage != 100) { schdlrs = schdlrs * schdlrs_percentage / 100; @@ -1117,12 +1085,6 @@ early_init(int *argc, char **argv) /* erts_usage(); } } -#else - /* Silence gcc warnings */ - (void)schdlrs_percentage; - (void)schdlrs_onln_percentage; -#endif -#ifdef ERTS_DIRTY_SCHEDULERS /* apply any dirty scheduler precentages */ if (dirty_cpu_scheds_pctg != 100 || dirty_cpu_scheds_onln_pctg != 100) { dirty_cpu_scheds = dirty_cpu_scheds * dirty_cpu_scheds_pctg / 100; @@ -1136,33 +1098,25 @@ early_init(int *argc, char **argv) /* dirty_cpu_scheds_online = schdlrs_onln; if (dirty_cpu_scheds_online < 1) dirty_cpu_scheds_online = 1; -#endif } -#ifndef USE_THREADS - erts_async_max_threads = 0; -#endif -#ifdef ERTS_SMP no_schedulers = schdlrs; no_schedulers_online = schdlrs_onln; erts_no_schedulers = (Uint) no_schedulers; -#else - erts_no_schedulers = 1; -#endif -#ifdef ERTS_DIRTY_SCHEDULERS erts_no_dirty_cpu_schedulers = no_dirty_cpu_schedulers = dirty_cpu_scheds; no_dirty_cpu_schedulers_online = dirty_cpu_scheds_online; erts_no_dirty_io_schedulers = no_dirty_io_schedulers = dirty_io_scheds; -#endif erts_early_init_scheduling(no_schedulers); alloc_opts.ncpu = ncpu; erts_alloc_init(argc, argv, &alloc_opts); /* Handles (and removes) -M flags. */ /* Require allocators */ -#ifdef ERTS_SMP + + erts_init_check_io(argc, argv); + /* * Thread progress management: * @@ -1170,22 +1124,18 @@ early_init(int *argc, char **argv) /* * ** Scheduler threads (see erl_process.c) * ** Aux thread (see erl_process.c) * ** Sys message dispatcher thread (see erl_trace.c) + * ** IO Poll threads (see erl_check_io.c) * * * Unmanaged threads that need to register: * ** Async threads (see erl_async.c) * ** Dirty scheduler threads */ erts_thr_progress_init(no_schedulers, - no_schedulers+2, -#ifndef ERTS_DIRTY_SCHEDULERS - erts_async_max_threads -#else + no_schedulers+2+erts_no_poll_threads, erts_async_max_threads + erts_no_dirty_cpu_schedulers + erts_no_dirty_io_schedulers -#endif ); -#endif erts_thr_q_init(); erts_init_utils(); erts_early_init_cpu_topology(no_schedulers, @@ -1193,7 +1143,6 @@ early_init(int *argc, char **argv) /* max_reader_groups, &reader_groups); -#ifdef USE_THREADS { erts_thr_late_init_data_t elid = ERTS_THR_LATE_INIT_DATA_DEF_INITER; elid.mem.std.alloc = ethr_std_alloc; @@ -1210,7 +1159,6 @@ early_init(int *argc, char **argv) /* erts_thr_late_init(&elid); } -#endif erts_msacc_early_init(); #ifdef ERTS_ENABLE_LOCK_CHECK @@ -1237,40 +1185,6 @@ early_init(int *argc, char **argv) /* return ncpu; } -#ifndef ERTS_SMP - -void *erts_scheduler_stack_limit; - - -static void set_main_stack_size(void) -{ - char c; - UWord stacksize; -# if HAVE_DECL_GETRLIMIT && HAVE_DECL_SETRLIMIT && HAVE_DECL_RLIMIT_STACK - struct rlimit rl; - int bytes; - stacksize = erts_sched_thread_suggested_stack_size * sizeof(Uint) * 1024; - /* Add some extra pages... neede by some systems... */ - bytes = (int) stacksize + 3*erts_sys_get_page_size(); - if (getrlimit(RLIMIT_STACK, &rl) != 0 || - (rl.rlim_cur = bytes, setrlimit(RLIMIT_STACK, &rl) != 0)) { - erts_fprintf(stderr, "failed to set stack size for scheduler " - "thread to %d bytes\n", bytes); - erts_usage(); - } -# else - if (modified_sched_thread_suggested_stack_size) { - erts_fprintf(stderr, "no OS support for dynamic stack size limit\n"); - erts_usage(); - } - /* Be conservative and hope it is not more than 64 kWords... */ - stacksize = 64*1024*sizeof(void *); -# endif - - erts_scheduler_stack_limit = erts_calc_stacklimit(&c, stacksize); -} - -#endif void erl_start(int argc, char **argv) @@ -1304,7 +1218,7 @@ erl_start(int argc, char **argv) envbufsz = sizeof(envbuf); if (erts_sys_getenv_raw("ERL_FULLSWEEP_AFTER", envbuf, &envbufsz) == 0) { Uint16 max_gen_gcs = atoi(envbuf); - erts_smp_atomic32_set_nob(&erts_max_gen_gcs, + erts_atomic32_set_nob(&erts_max_gen_gcs, (erts_aint32_t) max_gen_gcs); } @@ -1319,10 +1233,8 @@ erl_start(int argc, char **argv) * a lot of stack. */ erts_sched_thread_suggested_stack_size = ERTS_DEFAULT_SCHED_STACK_SIZE; -#ifdef ERTS_DIRTY_SCHEDULERS erts_dcpu_sched_thread_suggested_stack_size = ERTS_DEFAULT_DCPU_SCHED_STACK_SIZE; erts_dio_sched_thread_suggested_stack_size = ERTS_DEFAULT_DIO_SCHED_STACK_SIZE; -#endif #ifdef DEBUG verbose = DEBUG_DEFAULT; @@ -1490,12 +1402,8 @@ erl_start(int argc, char **argv) #ifdef DEBUG strcat(tmp, ",DEBUG"); #endif -#ifdef ERTS_SMP strcat(tmp, ",SMP"); -#endif -#ifdef USE_THREADS strcat(tmp, ",ASYNC_THREADS"); -#endif #ifdef HIPE strcat(tmp, ",HIPE"); #endif @@ -1671,16 +1579,6 @@ erl_start(int argc, char **argv) have_break_handler = 0; break; - case 'K': - /* If kernel poll support is present, - erl_sys_args() will remove the K parameter - and value */ - get_arg(argv[i]+2, argv[i+1], &i); - erts_fprintf(stderr, - "kernel-poll not supported; \"K\" parameter ignored\n", - arg); - break; - case 'n': arg = get_arg(argv[i]+2, argv[i+1], &i); switch (arg[0]) { @@ -1850,22 +1748,9 @@ erl_start(int argc, char **argv) erts_usage(); } } - else if (has_prefix("ecio", sub_param)) { - arg = get_arg(sub_param+4, argv[i+1], &i); -#ifndef __OSE__ - if (sys_strcmp("true", arg) == 0) - erts_eager_check_io = 1; - else -#endif - if (sys_strcmp("false", arg) == 0) - erts_eager_check_io = 0; - else { - erts_fprintf(stderr, - "bad schedule eager check I/O value '%s'\n", - arg); - erts_usage(); - } - } + else if (has_prefix("ecio", sub_param)) { + /* ignore argument, eager check io no longer used */ + } else if (has_prefix("pp", sub_param)) { arg = get_arg(sub_param+2, argv[i+1], &i); if (sys_strcmp(arg, "true") == 0) @@ -1941,7 +1826,6 @@ erl_start(int argc, char **argv) VERBOSE(DEBUG_SYSTEM, ("scheduler wakeup threshold: %s\n", arg)); } -#ifdef ERTS_DIRTY_SCHEDULERS else if (has_prefix("ssdcpu", sub_param)) { /* suggested stack size (Kilo Words) for dirty CPU scheduler threads */ arg = get_arg(sub_param+6, argv[i+1], &i); @@ -1976,7 +1860,6 @@ erl_start(int argc, char **argv) ("suggested dirty IO scheduler thread stack size %d kilo words\n", erts_dio_sched_thread_suggested_stack_size)); } -#endif else if (has_prefix("ss", sub_param)) { /* suggested stack size (Kilo Words) for scheduler threads */ arg = get_arg(sub_param+2, argv[i+1], &i); @@ -2007,9 +1890,7 @@ erl_start(int argc, char **argv) arg); erts_usage(); } -#ifdef ERTS_SMP erts_runq_supervision_interval = val; -#endif } else { erts_fprintf(stderr, "bad scheduling option %s\n", argv[i]); @@ -2293,12 +2174,10 @@ erl_start(int argc, char **argv) if (erts_sched_thread_suggested_stack_size < ERTS_SCHED_THREAD_MIN_STACK_SIZE) erts_sched_thread_suggested_stack_size = ERTS_SCHED_THREAD_MIN_STACK_SIZE; -#ifdef ERTS_DIRTY_SCHEDULERS if (erts_dcpu_sched_thread_suggested_stack_size < ERTS_SCHED_THREAD_MIN_STACK_SIZE) erts_dcpu_sched_thread_suggested_stack_size = ERTS_SCHED_THREAD_MIN_STACK_SIZE; if (erts_dio_sched_thread_suggested_stack_size < ERTS_SCHED_THREAD_MIN_STACK_SIZE) erts_dio_sched_thread_suggested_stack_size = ERTS_SCHED_THREAD_MIN_STACK_SIZE; -#endif erl_init(ncpu, proc_tab_sz, @@ -2343,7 +2222,6 @@ erl_start(int argc, char **argv) && erts_literal_area_collector->common.id == pid); erts_proc_inc_refc(erts_literal_area_collector); -#ifdef ERTS_DIRTY_SCHEDULERS pid = erl_system_process_otp(otp_ring0_pid, "erts_dirty_process_code_checker", !0); erts_dirty_process_code_checker = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc, @@ -2351,11 +2229,9 @@ erl_start(int argc, char **argv) ASSERT(erts_dirty_process_code_checker && erts_dirty_process_code_checker->common.id == pid); erts_proc_inc_refc(erts_dirty_process_code_checker); -#endif } -#ifdef ERTS_SMP erts_start_schedulers(); #ifdef ERTS_ENABLE_LOCK_COUNT @@ -2364,31 +2240,9 @@ erl_start(int argc, char **argv) /* Let system specific code decide what to do with the main thread... */ erts_sys_main_thread(); /* May or may not return! */ -#else - { - ErtsSchedulerData *esdp = erts_get_scheduler_data(); - erts_msacc_init_thread("scheduler", 1, 1); - erts_thr_set_main_status(1, 1); -#if ERTS_USE_ASYNC_READY_Q - esdp->aux_work_data.async_ready.queue - = erts_get_async_ready_queue(1); -#endif - set_main_stack_size(); - erts_sched_init_time_sup(esdp); - erts_ets_sched_spec_data_init(esdp); - erts_aux_work_timeout_late_init(esdp); - -#ifdef ERTS_ENABLE_LOCK_COUNT - erts_lcnt_post_startup(); -#endif - - process_main(esdp->x_reg_array, esdp->f_reg_array); - } -#endif } -#ifdef USE_THREADS __decl_noreturn void erts_thr_fatal_error(int err, char *what) { @@ -2402,7 +2256,6 @@ __decl_noreturn void erts_thr_fatal_error(int err, char *what) abort(); } -#endif static void system_cleanup(int flush_async) @@ -2415,7 +2268,6 @@ system_cleanup(int flush_async) * Another thread is currently exiting the system; * wait for it to do its job. */ -#ifdef ERTS_SMP if (erts_thr_progress_is_managed_thread()) { /* * The exiting thread might be waiting for @@ -2424,7 +2276,6 @@ system_cleanup(int flush_async) erts_thr_progress_active(NULL, 0); erts_thr_progress_prepare_wait(NULL); } -#endif /* Wait forever... */ while (1) erts_milli_sleep(10000000); @@ -2439,17 +2290,12 @@ system_cleanup(int flush_async) if (!flush_async || !erts_initialized -#if defined(USE_THREADS) && !defined(ERTS_SMP) - || !erts_equal_tids(erts_main_thread, erts_thr_self()) -#endif ) return; -#ifdef ERTS_SMP #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_check_exact(NULL, 0); #endif -#endif erts_exit_flush_async(); } diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c index 189c88ac4a..4cdef0200f 100644 --- a/erts/emulator/beam/erl_lock_check.c +++ b/erts/emulator/beam/erl_lock_check.c @@ -75,12 +75,9 @@ static erts_lc_lock_order_t erts_lock_order[] = { * if only one lock use * the lock name)" */ -#ifdef ERTS_SMP { "driver_lock", "driver_name" }, { "port_lock", "port_id" }, -#endif { "port_data_lock", "address" }, -#ifdef ERTS_SMP { "bif_timers", NULL }, { "reg_tab", NULL }, { "proc_main", "pid" }, @@ -89,9 +86,7 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "hipe_mfait_lock", NULL }, #endif { "nodes_monitors", NULL }, -#ifdef ERTS_SMP { "resource_monitors", "address" }, -#endif { "driver_list", NULL }, { "proc_link", "pid" }, { "proc_msgq", "pid" }, @@ -114,22 +109,18 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "fun_tab", NULL }, { "environ", NULL }, { "release_literal_areas", NULL }, -#endif { "efile_drv", "address" }, { "drv_ev_state_grow", NULL, }, { "drv_ev_state", "address" }, { "safe_hash", "address" }, - { "pollset_rm_list", NULL }, { "removed_fd_pre_alloc_lock", "address" }, { "state_prealloc", NULL }, { "schdlr_sspnd", NULL }, { "migration_info_update", NULL }, { "run_queue", "address" }, -#ifdef ERTS_DIRTY_SCHEDULERS { "dirty_run_queue_sleep_list", "address" }, { "dirty_gc_info", NULL }, { "dirty_break_point_index", NULL }, -#endif { "process_table", NULL }, { "cpu_info", NULL }, { "pollset", "address" }, @@ -144,7 +135,6 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "async_enq_mtx", NULL }, { "msacc_list_mutex", NULL }, { "msacc_unmanaged_mutex", NULL }, -#ifdef ERTS_SMP { "atom_tab", NULL }, { "misc_op_list_pre_alloc_lock", "address" }, { "message_pre_alloc_lock", "address" }, @@ -155,17 +145,13 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "sys_msg_q", NULL }, { "tracer_mtx", NULL }, { "port_table", NULL }, -#endif { "magic_ref_table", "address" }, { "mtrace_op", NULL }, { "instr_x", NULL }, { "instr", NULL }, -#ifdef ERTS_SMP { "pollsets_lock", NULL }, -#endif { "alcu_allocator", "index" }, { "mseg", NULL }, -#ifdef ERTS_SMP { "port_task_pre_alloc_lock", "address" }, { "proclist_pre_alloc_lock", "address" }, { "xports_list_pre_alloc_lock", "address" }, @@ -178,7 +164,6 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "pix_lock", "address" }, { "run_queues_lists", NULL }, { "sched_stat", NULL }, -#endif { "async_init_mtx", NULL }, #ifdef __WIN32__ #ifdef DEBUG @@ -189,9 +174,7 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "efile_drv dtrace mutex", NULL }, #endif { "mtrace_buf", NULL }, -#ifdef ERTS_SMP { "os_monotonic_time", NULL }, -#endif { "erts_alloc_hard_debug", NULL }, { "hard_dbg_mseg", NULL }, { "erts_mmap", NULL } diff --git a/erts/emulator/beam/erl_lock_check.h b/erts/emulator/beam/erl_lock_check.h index 8c754a8dfa..5c2c38e8f2 100644 --- a/erts/emulator/beam/erl_lock_check.h +++ b/erts/emulator/beam/erl_lock_check.h @@ -96,13 +96,7 @@ int erts_lc_is_emu_thr(void); #define ERTS_LC_ASSERT(A) \ ((void) (((A) || ERTS_SOMEONE_IS_CRASH_DUMPING) ? 1 : erts_lc_assert_failed(__FILE__, __LINE__, #A))) -#ifdef ERTS_SMP -#define ERTS_SMP_LC_ASSERT(A) ERTS_LC_ASSERT(A) -#else -#define ERTS_SMP_LC_ASSERT(A) ((void) 1) -#endif #else /* #ifdef ERTS_ENABLE_LOCK_CHECK */ -#define ERTS_SMP_LC_ASSERT(A) ((void) 1) #define ERTS_LC_ASSERT(A) ((void) 1) #endif /* #ifdef ERTS_ENABLE_LOCK_CHECK */ diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c index d2e8f47d59..1ae6076b12 100644 --- a/erts/emulator/beam/erl_lock_count.c +++ b/erts/emulator/beam/erl_lock_count.c @@ -554,16 +554,6 @@ erts_lock_flags_t erts_lcnt_get_category_mask() { return lcnt_category_mask__; } -#ifdef ERTS_ENABLE_KERNEL_POLL -/* erl_poll/erl_check_io only exports one of these variants at a time, and we - * may need to use either one depending on emulator startup flags. */ -void erts_lcnt_update_pollset_locks_nkp(int); -void erts_lcnt_update_pollset_locks_kp(int); - -void erts_lcnt_update_cio_locks_nkp(int); -void erts_lcnt_update_cio_locks_kp(int); -#endif - void erts_lcnt_set_category_mask(erts_lock_flags_t mask) { erts_lock_flags_t changed_categories; @@ -590,19 +580,7 @@ void erts_lcnt_set_category_mask(erts_lock_flags_t mask) { } if(changed_categories & ERTS_LOCK_FLAGS_CATEGORY_IO) { -#ifdef ERTS_ENABLE_KERNEL_POLL - if(erts_use_kernel_poll) { - erts_lcnt_update_pollset_locks_kp(mask & ERTS_LOCK_FLAGS_CATEGORY_IO); - erts_lcnt_update_cio_locks_kp(mask & ERTS_LOCK_FLAGS_CATEGORY_IO); - } else { - erts_lcnt_update_pollset_locks_nkp(mask & ERTS_LOCK_FLAGS_CATEGORY_IO); - erts_lcnt_update_cio_locks_nkp(mask & ERTS_LOCK_FLAGS_CATEGORY_IO); - } -#else - erts_lcnt_update_pollset_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_IO); erts_lcnt_update_cio_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_IO); -#endif - erts_lcnt_update_driver_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_IO); erts_lcnt_update_port_locks(mask & ERTS_LOCK_FLAGS_CATEGORY_IO); } diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c index b30c4a49d7..abf194cf94 100644 --- a/erts/emulator/beam/erl_message.c +++ b/erts/emulator/beam/erl_message.c @@ -170,7 +170,7 @@ erts_cleanup_offheap(ErlOffHeap *offheap) erts_bin_release(u.pb->val); break; case FUN_SUBTAG: - if (erts_smp_refc_dectest(&u.fun->fe->refc, 0) == 0) { + if (erts_refc_dectest(&u.fun->fe->refc, 0) == 0) { erts_erase_fun_entry(u.fun->fe); } break; @@ -265,11 +265,9 @@ erts_queue_dist_message(Process *rcvr, Sint tok_lastcnt = 0; Sint tok_serial = 0; #endif -#ifdef ERTS_SMP erts_aint_t state; -#endif - ERTS_SMP_LC_ASSERT(rcvr_locks == erts_proc_lc_my_proc_locks(rcvr)); + ERTS_LC_ASSERT(rcvr_locks == erts_proc_lc_my_proc_locks(rcvr)); mp = erts_alloc_message(0, NULL); mp->data.dist_ext = dist_ext; @@ -283,36 +281,34 @@ erts_queue_dist_message(Process *rcvr, #endif ERL_MESSAGE_TOKEN(mp) = token; -#ifdef ERTS_SMP if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) { - if (erts_smp_proc_trylock(rcvr, ERTS_PROC_LOCK_MSGQ) == EBUSY) { + if (erts_proc_trylock(rcvr, ERTS_PROC_LOCK_MSGQ) == EBUSY) { ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ; ErtsProcLocks unlocks = rcvr_locks & ERTS_PROC_LOCKS_HIGHER_THAN(ERTS_PROC_LOCK_MSGQ); if (unlocks) { - erts_smp_proc_unlock(rcvr, unlocks); + erts_proc_unlock(rcvr, unlocks); need_locks |= unlocks; } - erts_smp_proc_lock(rcvr, need_locks); + erts_proc_lock(rcvr, need_locks); } } - state = erts_smp_atomic32_read_acqb(&rcvr->state); + state = erts_atomic32_read_acqb(&rcvr->state); if (state & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) { if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) - erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); + erts_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); /* Drop message if receiver is exiting or has a pending exit ... */ erts_cleanup_messages(mp); } else -#endif if (IS_TRACED_FL(rcvr, F_TRACE_RECEIVE)) { if (from == am_Empty) from = dist_ext->dep->sysname; /* Ahh... need to decode it in order to trace it... */ if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) - erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); + erts_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); if (!erts_decode_dist_message(rcvr, rcvr_locks, mp, 0)) erts_free_message(mp); else { @@ -361,14 +357,10 @@ erts_queue_dist_message(Process *rcvr, LINK_MESSAGE(rcvr, mp, &mp->next, 1); if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) - erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); + erts_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); erts_proc_notify_new_message(rcvr, -#ifdef ERTS_SMP rcvr_locks -#else - 0 -#endif ); } } @@ -393,50 +385,45 @@ queue_messages(Process* receiver, ERL_MESSAGE_TOKEN(first) == NIL || is_tuple(ERL_MESSAGE_TOKEN(first))); -#ifdef ERTS_SMP #ifdef ERTS_ENABLE_LOCK_CHECK - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(receiver) < ERTS_PROC_LOCK_MSGQ || + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(receiver) < ERTS_PROC_LOCK_MSGQ || receiver_locks == erts_proc_lc_my_proc_locks(receiver)); #endif if (!(receiver_locks & ERTS_PROC_LOCK_MSGQ)) { - if (erts_smp_proc_trylock(receiver, ERTS_PROC_LOCK_MSGQ) == EBUSY) { + if (erts_proc_trylock(receiver, ERTS_PROC_LOCK_MSGQ) == EBUSY) { ErtsProcLocks need_locks; if (receiver_state) state = *receiver_state; else - state = erts_smp_atomic32_read_nob(&receiver->state); + state = erts_atomic32_read_nob(&receiver->state); if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) goto exiting; need_locks = receiver_locks & ERTS_PROC_LOCKS_HIGHER_THAN(ERTS_PROC_LOCK_MSGQ); if (need_locks) { - erts_smp_proc_unlock(receiver, need_locks); + erts_proc_unlock(receiver, need_locks); } need_locks |= ERTS_PROC_LOCK_MSGQ; - erts_smp_proc_lock(receiver, need_locks); + erts_proc_lock(receiver, need_locks); } locked_msgq = 1; } -#endif - state = erts_smp_atomic32_read_nob(&receiver->state); + state = erts_atomic32_read_nob(&receiver->state); if (state & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) { -#ifdef ERTS_SMP exiting: -#endif /* Drop message if receiver is exiting or has a pending exit... */ if (locked_msgq) - erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); + erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); erts_cleanup_messages(first); return 0; } res = receiver->msg.len; -#ifdef ERTS_SMP if (receiver_locks & ERTS_PROC_LOCK_MAIN) { /* * We move 'in queue' to 'private queue' and place @@ -447,11 +434,10 @@ queue_messages(Process* receiver, * the root set when garbage collecting. */ res += receiver->msg_inq.len; - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(receiver); + ERTS_MSGQ_MV_INQ2PRIVQ(receiver); LINK_MESSAGE_PRIVQ(receiver, first, last, len); } else -#endif { LINK_MESSAGE(receiver, first, last, len); } @@ -489,14 +475,10 @@ queue_messages(Process* receiver, } if (locked_msgq) { - erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); + erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); } -#ifdef ERTS_SMP erts_proc_notify_new_message(receiver, receiver_locks); -#else - erts_proc_notify_new_message(receiver, 0); -#endif return res; } @@ -594,9 +576,7 @@ erts_try_alloc_message_on_heap(Process *pp, ErlOffHeap **ohpp, int *on_heap_p) { -#ifdef ERTS_SMP int locked_main = 0; -#endif ErtsMessage *mp; ASSERT(!(*psp & ERTS_PSFLG_OFF_HEAP_MSGQ)); @@ -604,15 +584,9 @@ erts_try_alloc_message_on_heap(Process *pp, if ((*psp) & ERTS_PSFLGS_VOLATILE_HEAP) goto in_message_fragment; else if ( -#if defined(ERTS_SMP) *plp & ERTS_PROC_LOCK_MAIN -#else - pp -#endif ) { -#ifdef ERTS_SMP try_on_heap: -#endif if (((*psp) & ERTS_PSFLGS_VOLATILE_HEAP) || (pp->flags & F_DISABLE_GC) || HEAP_LIMIT(pp) - HEAP_TOP(pp) <= sz) { @@ -620,12 +594,10 @@ erts_try_alloc_message_on_heap(Process *pp, * The heap is either potentially in an inconsistent * state, or not large enough. */ -#ifdef ERTS_SMP if (locked_main) { *plp &= ~ERTS_PROC_LOCK_MAIN; - erts_smp_proc_unlock(pp, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(pp, ERTS_PROC_LOCK_MAIN); } -#endif goto in_message_fragment; } @@ -636,14 +608,12 @@ erts_try_alloc_message_on_heap(Process *pp, mp->data.attached = NULL; *on_heap_p = !0; } -#ifdef ERTS_SMP - else if (pp && erts_smp_proc_trylock(pp, ERTS_PROC_LOCK_MAIN) == 0) { + else if (pp && erts_proc_trylock(pp, ERTS_PROC_LOCK_MAIN) == 0) { locked_main = 1; - *psp = erts_smp_atomic32_read_nob(&pp->state); + *psp = erts_atomic32_read_nob(&pp->state); *plp |= ERTS_PROC_LOCK_MAIN; goto try_on_heap; } -#endif else { in_message_fragment: if (!((*psp) & ERTS_PSFLG_ON_HEAP_MSGQ)) { @@ -712,7 +682,7 @@ erts_send_message(Process* sender, } #endif - receiver_state = erts_smp_atomic32_read_nob(&receiver->state); + receiver_state = erts_atomic32_read_nob(&receiver->state); if (SEQ_TRACE_TOKEN(sender) != NIL && !(flags & ERTS_SND_FLG_NO_SEQ_TRACE)) { Eterm* hp; @@ -961,7 +931,7 @@ erts_move_messages_off_heap(Process *c_p) reds += c_p->msg.len / 10; - ASSERT(erts_smp_atomic32_read_nob(&c_p->state) + ASSERT(erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ); ASSERT(c_p->flags & F_OFF_HEAP_MSGQ_CHNG); @@ -1026,9 +996,9 @@ erts_complete_off_heap_message_queue_change(Process *c_p) { int reds = 1; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); ASSERT(c_p->flags & F_OFF_HEAP_MSGQ_CHNG); - ASSERT(erts_smp_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ); + ASSERT(erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ); /* * This job was first initiated when the process changed to off heap @@ -1040,13 +1010,13 @@ erts_complete_off_heap_message_queue_change(Process *c_p) */ if (!(c_p->flags & F_OFF_HEAP_MSGQ)) - erts_smp_atomic32_read_band_nob(&c_p->state, + erts_atomic32_read_band_nob(&c_p->state, ~ERTS_PSFLG_OFF_HEAP_MSGQ); else { reds += 2; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ); - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ); + ERTS_MSGQ_MV_INQ2PRIVQ(c_p); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ); reds += erts_move_messages_off_heap(c_p); } c_p->flags &= ~F_OFF_HEAP_MSGQ_CHNG; @@ -1083,16 +1053,16 @@ erts_change_message_queue_management(Process *c_p, Eterm new_state) #ifdef DEBUG if (c_p->flags & F_OFF_HEAP_MSGQ) { - ASSERT(erts_smp_atomic32_read_nob(&c_p->state) + ASSERT(erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ); } else { if (c_p->flags & F_OFF_HEAP_MSGQ_CHNG) { - ASSERT(erts_smp_atomic32_read_nob(&c_p->state) + ASSERT(erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ); } else { - ASSERT(!(erts_smp_atomic32_read_nob(&c_p->state) + ASSERT(!(erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ)); } } @@ -1109,7 +1079,7 @@ erts_change_message_queue_management(Process *c_p, Eterm new_state) case am_on_heap: c_p->flags |= F_ON_HEAP_MSGQ; c_p->flags &= ~F_OFF_HEAP_MSGQ; - erts_smp_atomic32_read_bor_nob(&c_p->state, + erts_atomic32_read_bor_nob(&c_p->state, ERTS_PSFLG_ON_HEAP_MSGQ); /* * We are not allowed to clear ERTS_PSFLG_OFF_HEAP_MSGQ @@ -1118,7 +1088,7 @@ erts_change_message_queue_management(Process *c_p, Eterm new_state) */ if (!(c_p->flags & F_OFF_HEAP_MSGQ_CHNG)) { /* Safe to clear ERTS_PSFLG_OFF_HEAP_MSGQ... */ - erts_smp_atomic32_read_band_nob(&c_p->state, + erts_atomic32_read_band_nob(&c_p->state, ~ERTS_PSFLG_OFF_HEAP_MSGQ); } break; @@ -1136,7 +1106,7 @@ erts_change_message_queue_management(Process *c_p, Eterm new_state) break; case am_off_heap: c_p->flags &= ~F_ON_HEAP_MSGQ; - erts_smp_atomic32_read_band_nob(&c_p->state, + erts_atomic32_read_band_nob(&c_p->state, ~ERTS_PSFLG_ON_HEAP_MSGQ); goto change_to_off_heap; default: @@ -1171,7 +1141,7 @@ change_to_off_heap: * change has completed, GC does not need to inspect * the message queue at all. */ - erts_smp_atomic32_read_bor_nob(&c_p->state, + erts_atomic32_read_bor_nob(&c_p->state, ERTS_PSFLG_OFF_HEAP_MSGQ); c_p->flags |= F_OFF_HEAP_MSGQ_CHNG; cohmq = erts_alloc(ERTS_ALC_T_MSGQ_CHNG, @@ -1452,7 +1422,7 @@ erts_factory_message_create(ErtsHeapFactory* factory, int on_heap; erts_aint32_t state; - state = proc ? erts_smp_atomic32_read_nob(&proc->state) : 0; + state = proc ? erts_atomic32_read_nob(&proc->state) : 0; if (state & ERTS_PSFLG_OFF_HEAP_MSGQ) { msgp = erts_alloc_message(sz, &hp); @@ -1467,7 +1437,7 @@ erts_factory_message_create(ErtsHeapFactory* factory, } if (on_heap) { - ERTS_SMP_ASSERT(*proc_locksp & ERTS_PROC_LOCK_MAIN); + ERTS_ASSERT(*proc_locksp & ERTS_PROC_LOCK_MAIN); ASSERT(ohp == &proc->off_heap); factory->mode = FACTORY_HALLOC; factory->p = proc; diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h index 42ed14e69c..9c8cf84e43 100644 --- a/erts/emulator/beam/erl_message.h +++ b/erts/emulator/beam/erl_message.h @@ -174,7 +174,6 @@ typedef struct { ErtsMessage** saved_last; /* saved last pointer */ } ErlMessageQueue; -#ifdef ERTS_SMP typedef struct { ErtsMessage* first; @@ -190,7 +189,6 @@ typedef struct erl_trace_message_queue__ { Sint len; /* queue length */ } ErlTraceMessageQueue; -#endif /* Get "current" message */ #define PEEK_MESSAGE(p) (*(p)->msg.save) @@ -207,7 +205,6 @@ typedef struct erl_trace_message_queue__ { (p)->where.len += (num_msgs); \ } while(0) -#ifdef ERTS_SMP /* Add message last in private message queue */ #define LINK_MESSAGE_PRIVQ(p, first_msg, last_msg, len) \ @@ -219,7 +216,7 @@ typedef struct erl_trace_message_queue__ { #define LINK_MESSAGE(p, first_msg, last_msg, len) \ LINK_MESSAGE_IMPL(p, first_msg, last_msg, len, msg_inq) -#define ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p) \ +#define ERTS_MSGQ_MV_INQ2PRIVQ(p) \ do { \ if (p->msg_inq.first) { \ *p->msg.last = p->msg_inq.first; \ @@ -231,17 +228,6 @@ typedef struct erl_trace_message_queue__ { } \ } while (0) -#else - -#define ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p) - -/* Add message last_msg in message queue */ -#define LINK_MESSAGE(p, first_msg, last_msg, len) \ - do { \ - LINK_MESSAGE_IMPL(p, first_msg, last_msg, len, msg); \ - } while(0) - -#endif /* Unlink current message */ #define UNLINK_MESSAGE(p,msgp) do { \ diff --git a/erts/emulator/beam/erl_monitors.c b/erts/emulator/beam/erl_monitors.c index 3994800ba7..1c840d89f6 100644 --- a/erts/emulator/beam/erl_monitors.c +++ b/erts/emulator/beam/erl_monitors.c @@ -54,7 +54,7 @@ #define DIR_RIGHT 1 #define DIR_END 2 -static erts_smp_atomic_t tot_link_lh_size; +static erts_atomic_t tot_link_lh_size; /* Implements the sort order in monitor trees, which is different from the ordinary term order. @@ -123,7 +123,7 @@ do { \ (*((Hp)++)) = boxed_val((From))[i__]; \ if (is_external((To))) { \ external_thing_ptr((To))->next = NULL; \ - erts_smp_refc_inc(&(external_thing_ptr((To))->node->refc), 2);\ + erts_refc_inc(&(external_thing_ptr((To))->node->refc), 2);\ } \ } \ } while (0) @@ -145,7 +145,7 @@ static ErtsMonitor *create_monitor(Uint type, Eterm ref, UWord entity, Eterm nam } else { n = (ErtsMonitor *) erts_alloc(ERTS_ALC_T_MONITOR_LH, mon_size*sizeof(Uint)); - erts_smp_atomic_add_nob(&tot_link_lh_size, mon_size*sizeof(Uint)); + erts_atomic_add_nob(&tot_link_lh_size, mon_size*sizeof(Uint)); } hp = n->heap; @@ -179,7 +179,7 @@ static ErtsLink *create_link(Uint type, Eterm pid) } else { n = (ErtsLink *) erts_alloc(ERTS_ALC_T_NLINK_LH, lnk_size*sizeof(Uint)); - erts_smp_atomic_add_nob(&tot_link_lh_size, lnk_size*sizeof(Uint)); + erts_atomic_add_nob(&tot_link_lh_size, lnk_size*sizeof(Uint)); } hp = n->heap; @@ -214,13 +214,13 @@ static ErtsSuspendMonitor *create_suspend_monitor(Eterm pid) void erts_init_monitors(void) { - erts_smp_atomic_init_nob(&tot_link_lh_size, 0); + erts_atomic_init_nob(&tot_link_lh_size, 0); } Uint erts_tot_link_lh_size(void) { - return (Uint) erts_smp_atomic_read_nob(&tot_link_lh_size); + return (Uint) erts_atomic_read_nob(&tot_link_lh_size); } void erts_destroy_monitor(ErtsMonitor *mon) @@ -245,7 +245,7 @@ void erts_destroy_monitor(ErtsMonitor *mon) erts_free(ERTS_ALC_T_MONITOR_SH, (void *) mon); } else { erts_free(ERTS_ALC_T_MONITOR_LH, (void *) mon); - erts_smp_atomic_add_nob(&tot_link_lh_size, -1*mon_size*sizeof(Uint)); + erts_atomic_add_nob(&tot_link_lh_size, -1*mon_size*sizeof(Uint)); } } @@ -267,7 +267,7 @@ void erts_destroy_link(ErtsLink *lnk) erts_free(ERTS_ALC_T_NLINK_SH, (void *) lnk); } else { erts_free(ERTS_ALC_T_NLINK_LH, (void *) lnk); - erts_smp_atomic_add_nob(&tot_link_lh_size, -1*lnk_size*sizeof(Uint)); + erts_atomic_add_nob(&tot_link_lh_size, -1*lnk_size*sizeof(Uint)); } } @@ -985,15 +985,14 @@ Eterm erts_debug_dump_monitors_1(BIF_ALIST_1) DistEntry *dep; rp = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, pid, ERTS_PROC_LOCK_LINK); if (!rp) { - ERTS_SMP_ASSERT_IS_NOT_EXITING(p); + ERTS_ASSERT_IS_NOT_EXITING(p); if (is_atom(pid) && is_node_name_atom(pid) && (dep = erts_find_dist_entry(pid)) != NULL) { erts_printf("Dumping dist monitors-------------------\n"); - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); erts_dump_monitors(dep->monitors,0); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); erts_printf("Monitors dumped-------------------------\n"); - erts_deref_dist_entry(dep); BIF_RET(am_true); } else { BIF_ERROR(p,BADARG); @@ -1002,7 +1001,7 @@ Eterm erts_debug_dump_monitors_1(BIF_ALIST_1) erts_printf("Dumping pid monitors--------------------\n"); erts_dump_monitors(ERTS_P_MONITORS(rp),0); erts_printf("Monitors dumped-------------------------\n"); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); BIF_RET(am_true); } } @@ -1030,15 +1029,14 @@ Eterm erts_debug_dump_links_1(BIF_ALIST_1) } else { rp = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, pid, ERTS_PROC_LOCK_LINK); if (!rp) { - ERTS_SMP_ASSERT_IS_NOT_EXITING(p); + ERTS_ASSERT_IS_NOT_EXITING(p); if (is_atom(pid) && is_node_name_atom(pid) && (dep = erts_find_dist_entry(pid)) != NULL) { erts_printf("Dumping dist links----------------------\n"); - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); erts_dump_links(dep->nlinks,0); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); erts_printf("Links dumped----------------------------\n"); - erts_deref_dist_entry(dep); BIF_RET(am_true); } else { BIF_ERROR(p,BADARG); @@ -1048,7 +1046,7 @@ Eterm erts_debug_dump_links_1(BIF_ALIST_1) erts_printf("Dumping pid links-----------------------\n"); erts_dump_links(ERTS_P_LINKS(rp), 0); erts_printf("Links dumped----------------------------\n"); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); BIF_RET(am_true); } } diff --git a/erts/emulator/beam/erl_msacc.c b/erts/emulator/beam/erl_msacc.c index 6c477be615..d659842b7e 100644 --- a/erts/emulator/beam/erl_msacc.c +++ b/erts/emulator/beam/erl_msacc.c @@ -48,11 +48,7 @@ static Eterm erts_msacc_gather_stats(ErtsMsAcc *msacc, ErtsHeapFactory *factory) static void erts_msacc_reset(ErtsMsAcc *msacc); static ErtsMsAcc* get_msacc(void); -#ifdef USE_THREADS erts_tsd_key_t ERTS_WRITE_UNLIKELY(erts_msacc_key); -#else -ErtsMsAcc *ERTS_WRITE_UNLIKELY(erts_msacc) = NULL; -#endif #ifndef ERTS_MSACC_ALWAYS_ON int ERTS_WRITE_UNLIKELY(erts_msacc_enabled); #endif @@ -60,10 +56,8 @@ int ERTS_WRITE_UNLIKELY(erts_msacc_enabled); static Eterm *erts_msacc_state_atoms = NULL; static erts_rwmtx_t msacc_mutex; static ErtsMsAcc *msacc_managed = NULL; -#ifdef USE_THREADS static ErtsMsAcc *msacc_unmanaged = NULL; static Uint msacc_unmanaged_count = 0; -#endif #if ERTS_MSACC_STATE_COUNT < MAP_SMALL_MAP_LIMIT #define DEFAULT_MSACC_MSG_SIZE (3 + 1 + ERTS_MSACC_STATE_COUNT * 2 + 3 + ERTS_REF_THING_SIZE) @@ -78,11 +72,7 @@ void erts_msacc_early_init(void) { #endif erts_rwmtx_init(&msacc_mutex, "msacc_list_mutex", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG); -#ifdef USE_THREADS erts_tsd_key_create(&erts_msacc_key,"erts_msacc_key"); -#else - erts_msacc = NULL; -#endif } void erts_msacc_init(void) { @@ -107,7 +97,6 @@ void erts_msacc_init_thread(char *type, int id, int managed) { msacc->tid = erts_thr_self(); msacc->perf_counter = 0; -#ifdef USE_THREADS erts_rwmtx_rwlock(&msacc_mutex); if (!managed) { erts_mtx_init(&msacc->mtx, "msacc_unmanaged_mutex", NIL, @@ -121,9 +110,6 @@ void erts_msacc_init_thread(char *type, int id, int managed) { msacc_managed = msacc; } erts_rwmtx_rwunlock(&msacc_mutex); -#else - msacc_managed = msacc; -#endif erts_msacc_reset(msacc); @@ -216,7 +202,7 @@ typedef struct { Eterm ref; Eterm ref_heap[ERTS_REF_THING_SIZE]; Uint req_sched; - erts_smp_atomic32_t refc; + erts_atomic32_t refc; } ErtsMSAccReq; static ErtsMsAcc* get_msacc(void) { @@ -267,7 +253,7 @@ static void send_reply(ErtsMsAcc *msacc, ErtsMSAccReq *msaccrp) { rp_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } @@ -303,7 +289,7 @@ reply_msacc(void *vmsaccrp) erts_proc_dec_refc(msaccrp->proc); - if (erts_smp_atomic32_dec_read_nob(&msaccrp->refc) == 0) + if (erts_atomic32_dec_read_nob(&msaccrp->refc) == 0) erts_free(ERTS_ALC_T_MSACC, vmsaccrp); } @@ -370,14 +356,10 @@ erts_msacc_request(Process *c_p, int action, Eterm *threads) msaccrp->ref = STORE_NC(&hp, NULL, ref); msaccrp->req_sched = esdp->no; -#ifdef ERTS_SMP *threads = erts_no_schedulers; *threads += 1; /* aux thread */ -#else - *threads = 1; -#endif - erts_smp_atomic32_init_nob(&msaccrp->refc,(erts_aint32_t)*threads); + erts_atomic32_init_nob(&msaccrp->refc,(erts_aint32_t)*threads); erts_proc_add_refc(c_p, *threads); @@ -386,12 +368,9 @@ erts_msacc_request(Process *c_p, int action, Eterm *threads) erts_no_schedulers, reply_msacc, (void *) msaccrp); -#ifdef ERTS_SMP /* aux thread */ erts_schedule_misc_aux_work(0, reply_msacc, (void *) msaccrp); -#endif -#ifdef USE_THREADS /* Manage unmanaged threads */ switch (action) { case ERTS_MSACC_GATHER: { @@ -468,7 +447,6 @@ erts_msacc_request(Process *c_p, int action, Eterm *threads) default: { ASSERT(0); } } -#endif *threads = make_small(*threads); diff --git a/erts/emulator/beam/erl_msacc.h b/erts/emulator/beam/erl_msacc.h index d64ef8c8b9..2d4637f800 100644 --- a/erts/emulator/beam/erl_msacc.h +++ b/erts/emulator/beam/erl_msacc.h @@ -159,11 +159,7 @@ struct erl_msacc_t_ { #ifdef ERTS_ENABLE_MSACC -#ifdef USE_THREADS extern erts_tsd_key_t erts_msacc_key; -#else -extern ErtsMsAcc *erts_msacc; -#endif #ifdef ERTS_MSACC_ALWAYS_ON #define erts_msacc_enabled 1 @@ -171,13 +167,8 @@ extern ErtsMsAcc *erts_msacc; extern int erts_msacc_enabled; #endif -#ifdef USE_THREADS #define ERTS_MSACC_TSD_GET() erts_tsd_get(erts_msacc_key) #define ERTS_MSACC_TSD_SET(tsd) erts_tsd_set(erts_msacc_key,tsd) -#else -#define ERTS_MSACC_TSD_GET() erts_msacc -#define ERTS_MSACC_TSD_SET(tsd) erts_msacc = tsd -#endif void erts_msacc_early_init(void); void erts_msacc_init(void); @@ -327,8 +318,8 @@ ERTS_GLB_INLINE void erts_msacc_set_state_um__(ErtsMsAcc *msacc, Uint new_state, int increment) { if (ERTS_UNLIKELY(msacc->unmanaged)) { erts_mtx_lock(&msacc->mtx); - msacc->state = new_state; if (ERTS_LIKELY(!msacc->perf_counter)) { + msacc->state = new_state; erts_mtx_unlock(&msacc->mtx); return; } diff --git a/erts/emulator/beam/erl_nfunc_sched.c b/erts/emulator/beam/erl_nfunc_sched.c index 1bebc1eda4..f97e86bf95 100644 --- a/erts/emulator/beam/erl_nfunc_sched.c +++ b/erts/emulator/beam/erl_nfunc_sched.c @@ -113,7 +113,7 @@ erts_nif_export_schedule(Process *c_p, Process *dirty_shadow_proc, NifExport* nep; int i; - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); if (dirty_shadow_proc) { diff --git a/erts/emulator/beam/erl_nfunc_sched.h b/erts/emulator/beam/erl_nfunc_sched.h index 55a3a6dbf6..b8a4e4ebc3 100644 --- a/erts/emulator/beam/erl_nfunc_sched.h +++ b/erts/emulator/beam/erl_nfunc_sched.h @@ -144,9 +144,9 @@ ERTS_GLB_INLINE void erts_nif_export_restore(Process *c_p, NifExport *ep, Eterm result) { ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data())); - ERTS_SMP_LC_ASSERT(!(c_p->static_flags + ERTS_LC_ASSERT(!(c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC)); - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); c_p->current = ep->current; @@ -193,7 +193,6 @@ erts_nif_export_check_save_trace(Process *c_p, Eterm result, ERTS_GLB_INLINE Process * erts_proc_shadow2real(Process *c_p) { -#ifdef ERTS_DIRTY_SCHEDULERS if (c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC) { Process *real_c_p = c_p->next; ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data())); @@ -201,7 +200,6 @@ erts_proc_shadow2real(Process *c_p) return real_c_p; } ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data())); -#endif return c_p; } @@ -213,11 +211,10 @@ erts_proc_shadow2real(Process *c_p) #define ERTS_NFUNC_SCHED_INTERNALS__ #define ERTS_I_BEAM_OP_TO_NIF_EXPORT(I) \ - (ASSERT(BeamOp(op_apply_bif) == (BeamInstr *) (*(I)) \ - || BeamOp(op_call_nif) == (BeamInstr *) (*(I))), \ + (ASSERT(BeamIsOpCode(*(I), op_apply_bif) || \ + BeamIsOpCode(*(I), op_call_nif)), \ ((NifExport *) (((char *) (I)) - offsetof(NifExport, exp.beam[0])))) -#ifdef ERTS_DIRTY_SCHEDULERS #include "erl_message.h" #include <stddef.h> @@ -235,7 +232,7 @@ erts_flush_dirty_shadow_proc(Process *sproc) Process *c_p = sproc->next; ASSERT(sproc->common.id == c_p->common.id); - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); ASSERT(c_p->stop == sproc->stop); @@ -283,7 +280,7 @@ erts_cache_dirty_shadow_proc(Process *sproc) Process *c_p = sproc->next; ASSERT(c_p); ASSERT(sproc->common.id == c_p->common.id); - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); sproc->htop = c_p->htop; @@ -311,7 +308,7 @@ erts_make_dirty_shadow_proc(ErtsSchedulerData *esdp, Process *c_p) sproc = esdp->dirty_shadow_process; ASSERT(sproc); ASSERT(sproc->static_flags & ERTS_STC_FLG_SHADOW_PROC); - ASSERT(erts_smp_atomic32_read_nob(&sproc->state) + ASSERT(erts_atomic32_read_nob(&sproc->state) == (ERTS_PSFLG_ACTIVE | ERTS_PSFLG_DIRTY_RUNNING | ERTS_PSFLG_PROXY)); @@ -326,7 +323,6 @@ erts_make_dirty_shadow_proc(ErtsSchedulerData *esdp, Process *c_p) #endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */ -#endif /* ERTS_DIRTY_SCHEDULERS */ #endif /* defined(ERTS_WANT_NFUNC_SCHED_INTERNALS__) && !defined(ERTS_NFUNC_SCHED_INTERNALS__) */ diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c index ac4ecd77e5..f7f12efe28 100644 --- a/erts/emulator/beam/erl_nif.c +++ b/erts/emulator/beam/erl_nif.c @@ -138,7 +138,7 @@ execution_state(ErlNifEnv *env, Process **c_pp, int *schedp) Process *c_p = env->proc; if (!(c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC)) { - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); } else { @@ -220,7 +220,7 @@ void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif, ASSERT(esdp); if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { - erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state); + erts_aint32_t state = erts_atomic32_read_nob(&p->state); ASSERT(p->scheduler_data == esdp); ASSERT((state & (ERTS_PSFLG_RUNNING @@ -287,12 +287,12 @@ schedule(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp, else dirty_shadow_proc = env->proc; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p)); ep = erts_nif_export_schedule(c_p, dirty_shadow_proc, c_p->current, c_p->cp, - (BeamInstr) em_call_nif, + BeamOpCodeAddr(op_call_nif), direct_fp, indirect_fp, mod, func_name, argc, (const Eterm *) argv); @@ -304,7 +304,6 @@ schedule(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp, return (ERL_NIF_TERM) THE_NON_VALUE; } -#ifdef ERTS_DIRTY_SCHEDULERS static ERL_NIF_TERM dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]); static ERL_NIF_TERM dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]); @@ -320,7 +319,7 @@ erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm * ErlNifEnv env; ERL_NIF_TERM result; #ifdef DEBUG - erts_aint32_t state = erts_smp_atomic32_read_nob(&c_p->state); + erts_aint32_t state = erts_atomic32_read_nob(&c_p->state); ASSERT(nep == ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p)); @@ -343,14 +342,14 @@ erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm * ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p))); - erts_smp_atomic32_read_band_mb(&c_p->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC + erts_atomic32_read_band_mb(&c_p->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC | ERTS_PSFLG_DIRTY_IO_PROC)); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); result = (*dirty_nif)(&env, codemfa->arity, argv); /* Call dirty NIF */ - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); ASSERT(env.proc->static_flags & ERTS_STC_FLG_SHADOW_PROC); ASSERT(env.proc->next == c_p); @@ -394,24 +393,19 @@ erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm * return exiting; } -#endif static void full_flush_env(ErlNifEnv* env) { flush_env(env); -#ifdef ERTS_DIRTY_SCHEDULERS if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) /* Dirty nif call using shadow process struct */ erts_flush_dirty_shadow_proc(env->proc); -#endif } static void full_cache_env(ErlNifEnv* env) { -#ifdef ERTS_DIRTY_SCHEDULERS if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) erts_cache_dirty_shadow_proc(env->proc); -#endif cache_env(env); } @@ -564,7 +558,6 @@ void enif_clear_env(ErlNifEnv* env) free_tmp_objs(env); } -#ifdef ERTS_SMP #ifdef DEBUG static int enif_send_delay = 0; #define ERTS_FORCE_ENIF_SEND_DELAY() (enif_send_delay++ % 2 == 0) @@ -590,9 +583,9 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks) /* Only one thread at a time is allowed to flush trace messages, so we require the main lock to be held when doing the flush */ - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p); + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_TRACE); + erts_proc_lock(c_p, ERTS_PROC_LOCK_TRACE); msgq = c_p->trace_msg_q; @@ -611,7 +604,7 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks) msgq->first = NULL; msgq->last = &msgq->first; msgq->len = 0; - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE); ASSERT(len != 0); @@ -624,13 +617,13 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks) if (rp->common.id == c_p->common.id) rp_locks &= ~c_p_locks; if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); reds += len; } else { erts_cleanup_messages(first); } reds += 1; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_TRACE); + erts_proc_lock(c_p, ERTS_PROC_LOCK_TRACE); msgq = msgq->next; } while (msgq); @@ -647,21 +640,18 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks) } error: - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE); return reds; } -#endif int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, ErlNifEnv* msg_env, ERL_NIF_TERM msg) { struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)msg_env; ErtsProcLocks rp_locks = 0; -#ifdef ERTS_SMP ErtsProcLocks lc_locks = 0; -#endif Process* rp; Process* c_p; ErtsMessage *mp; @@ -670,13 +660,6 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, execution_state(env, &c_p, &scheduler); -#ifndef ERTS_SMP - if (!scheduler) { - erts_exit(ERTS_ABORT_EXIT, - "enif_send: called from non-scheduler thread on non-SMP VM"); - return 0; - } -#endif if (scheduler > 0) { /* Normal scheduler */ rp = erts_proc_lookup(receiver); @@ -690,7 +673,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, return 0; if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } } @@ -699,7 +682,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, ERTS_P2P_FLG_INC_REFC); if (!rp) { if (c_p && (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); return 0; } } @@ -734,7 +717,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, full_cache_env(env); } else { - erts_aint_t state = erts_smp_atomic32_read_nob(&rp->state); + erts_aint_t state = erts_atomic32_read_nob(&rp->state); if (state & ERTS_PSFLG_OFF_HEAP_MSGQ) { mp = erts_alloc_message(sz, &hp); ohp = sz == 0 ? NULL : &mp->hfrag.off_heap; @@ -760,7 +743,6 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, full_cache_env(env); } } -#ifdef ERTS_SMP else { /* This clause is taken when the nif is called in the context of a traced process. We do not know which locks we have @@ -771,7 +753,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, Process *t_p = env->tracee; - erts_smp_proc_lock(t_p, ERTS_PROC_LOCK_TRACE); + erts_proc_lock(t_p, ERTS_PROC_LOCK_TRACE); msgq = t_p->trace_msg_q; @@ -788,7 +770,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, #endif if (ERTS_FORCE_ENIF_SEND_DELAY() || msgq || rp_locks & ERTS_PROC_LOCK_MSGQ || - erts_smp_proc_trylock(rp, ERTS_PROC_LOCK_MSGQ) == EBUSY) { + erts_proc_trylock(rp, ERTS_PROC_LOCK_MSGQ) == EBUSY) { if (!msgq) { msgq = erts_alloc(ERTS_ALC_T_TRACE_MSG_QUEUE, @@ -802,36 +784,33 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, msgq->next = t_p->trace_msg_q; t_p->trace_msg_q = msgq; - erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE); erts_schedule_flush_trace_messages(t_p, 0); } else { msgq->len++; *msgq->last = mp; msgq->last = &mp->next; - erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE); } goto done; } else { - erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE); rp_locks &= ~ERTS_PROC_LOCK_TRACE; rp_locks |= ERTS_PROC_LOCK_MSGQ; } } -#endif /* ERTS_SMP */ erts_queue_message(rp, rp_locks, mp, msg, c_p ? c_p->common.id : am_undefined); -#ifdef ERTS_SMP done: if (c_p == rp) rp_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp_locks & ~lc_locks) - erts_smp_proc_unlock(rp, rp_locks & ~lc_locks); + erts_proc_unlock(rp, rp_locks & ~lc_locks); if (c_p && (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); -#endif + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); if (scheduler <= 0) erts_proc_dec_refc(rp); @@ -861,15 +840,9 @@ enif_port_command(ErlNifEnv *env, const ErlNifPort* to_port, if (scheduler > 0) prt = erts_port_lookup(to_port->port_id, iflags); else { -#ifdef ERTS_SMP if (ERTS_PROC_IS_EXITING(c_p)) return 0; prt = erts_thr_port_lookup(to_port->port_id, iflags); -#else - erts_exit(ERTS_ABORT_EXIT, - "enif_port_command: called from non-scheduler " - "thread on non-SMP VM"); -#endif } if (!prt) @@ -1852,18 +1825,11 @@ int enif_is_process_alive(ErlNifEnv* env, ErlNifPid *proc) if (scheduler > 0) return !!erts_proc_lookup(proc->pid); else { -#ifdef ERTS_SMP Process* rp = erts_pid2proc_opt(NULL, 0, proc->pid, 0, ERTS_P2P_FLG_INC_REFC); if (rp) erts_proc_dec_refc(rp); return !!rp; -#else - erts_exit(ERTS_ABORT_EXIT, "enif_is_process_alive: " - "called from non-scheduler thread " - "in non-smp emulator"); - return 0; -#endif } } @@ -1879,17 +1845,10 @@ int enif_is_port_alive(ErlNifEnv *env, ErlNifPort *port) if (scheduler > 0) return !!erts_port_lookup(port->port_id, iflags); else { -#ifdef ERTS_SMP Port *prt = erts_thr_port_lookup(port->port_id, iflags); if (prt) erts_port_dec_refc(prt); return !!prt; -#else - erts_exit(ERTS_ABORT_EXIT, "enif_is_port_alive: " - "called from non-scheduler thread " - "in non-smp emulator"); - return 0; -#endif } } @@ -2097,7 +2056,7 @@ ErlNifResourceType* open_resource_type(ErlNifEnv* env, ErlNifResourceFlags op = flags; Eterm module_am, name_am; - ASSERT(erts_smp_thr_progress_is_blocking()); + ASSERT(erts_thr_progress_is_blocking()); module_am = make_atom(env->mod_nif->mod->module); name_am = enif_make_atom(env, name_str); @@ -2236,19 +2195,14 @@ static void destroy_one_monitor(ErtsMonitor* mon, void* context) rp = erts_proc_lookup(mon->u.pid); } else { -#ifdef ERTS_SMP rp = erts_proc_lookup_inc_refc(mon->u.pid); -#else - ASSERT(!"nif monitor destruction in non-scheduler thread"); - rp = NULL; -#endif } if (!rp) { is_exiting = 1; } if (rp) { - erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_lock(rp, ERTS_PROC_LOCK_LINK); if (ERTS_PROC_IS_EXITING(rp)) { is_exiting = 1; } else { @@ -2256,11 +2210,9 @@ static void destroy_one_monitor(ErtsMonitor* mon, void* context) ASSERT(rmon); is_exiting = 0; } - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); -#ifdef ERTS_SMP + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); if (ctx->scheduler <= 0) erts_proc_dec_refc(rp); -#endif } if (is_exiting) { ctx->resource->monitors->pending_failed_fire++; @@ -2286,34 +2238,7 @@ static void destroy_all_monitors(ErtsMonitor* monitors, ErtsResource* resource) } -#ifdef ERTS_SMP # define NIF_RESOURCE_DTOR &nif_resource_dtor -#else -# define NIF_RESOURCE_DTOR &nosmp_nif_resource_dtor_prologue - -/* - * NO-SMP: Always run resource destructor on scheduler thread - * as we may have to remove process monitors. - */ -static int nif_resource_dtor(Binary*); - -static void nosmp_nif_resource_dtor_scheduled(void* vbin) -{ - erts_bin_free((Binary*)vbin); -} - -static int nosmp_nif_resource_dtor_prologue(Binary* bin) -{ - if (is_scheduler()) { - return nif_resource_dtor(bin); - } - else { - erts_schedule_misc_aux_work(1, nosmp_nif_resource_dtor_scheduled, bin); - return 0; /* do not free */ - } -} - -#endif /* !ERTS_SMP */ static int nif_resource_dtor(Binary* bin) { @@ -2325,7 +2250,7 @@ static int nif_resource_dtor(Binary* bin) ErtsResourceMonitors* rm = resource->monitors; ASSERT(type->down); - erts_smp_mtx_lock(&rm->lock); + erts_mtx_lock(&rm->lock); ASSERT(erts_refc_read(&bin->intern.refc, 0) == 0); if (rm->root) { ASSERT(!rm->is_dying); @@ -2347,11 +2272,11 @@ static int nif_resource_dtor(Binary* bin) */ ASSERT(!rm->is_dying); rm->is_dying = 1; - erts_smp_mtx_unlock(&rm->lock); + erts_mtx_unlock(&rm->lock); return 0; } - erts_smp_mtx_unlock(&rm->lock); - erts_smp_mtx_destroy(&rm->lock); + erts_mtx_unlock(&rm->lock); + erts_mtx_destroy(&rm->lock); } if (type->dtor != NULL) { @@ -2392,12 +2317,12 @@ void erts_fire_nif_monitor(ErtsResource* resource, Eterm pid, Eterm ref) ASSERT(rmp); ASSERT(resource->type->down); - erts_smp_mtx_lock(&rmp->lock); + erts_mtx_lock(&rmp->lock); rmon = erts_remove_monitor(&rmp->root, ref); if (!rmon) { int free_me = (--rmp->pending_failed_fire == 0) && rmp->is_dying; ASSERT(rmp->pending_failed_fire >= 0); - erts_smp_mtx_unlock(&rmp->lock); + erts_mtx_unlock(&rmp->lock); if (free_me) { ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) == 0); @@ -2413,10 +2338,10 @@ void erts_fire_nif_monitor(ErtsResource* resource, Eterm pid, Eterm ref) * we avoid calling 'down' and just silently remove the monitor. * This can happen even for non smp as destructor calls may be scheduled. */ - erts_smp_mtx_unlock(&rmp->lock); + erts_mtx_unlock(&rmp->lock); } else { - erts_smp_mtx_unlock(&rmp->lock); + erts_mtx_unlock(&rmp->lock); ASSERT(rmon->u.pid == pid); erts_ref_to_driver_monitor(ref, &nif_monitor); @@ -2461,7 +2386,7 @@ void* enif_alloc_resource(ErlNifResourceType* type, size_t data_sz) erts_refc_inc(&resource->type->refc, 2); if (type->down) { resource->monitors = (ErtsResourceMonitors*) (resource->data + monitors_offs); - erts_smp_mtx_init(&resource->monitors->lock, "resource_monitors", NIL, + erts_mtx_init(&resource->monitors->lock, "resource_monitors", NIL, ERTS_LOCK_FLAGS_CATEGORY_GENERIC); resource->monitors->root = NULL; resource->monitors->pending_failed_fire = 0; @@ -2660,7 +2585,6 @@ nif_export_restore(Process *c_p, NifExport *ep, Eterm res) } -#ifdef ERTS_DIRTY_SCHEDULERS /* * Finalize a dirty NIF call. This function is scheduled to cause the VM to @@ -2730,7 +2654,7 @@ schedule_dirty_nif(ErlNifEnv* env, int flags, NativeFunPtr fp, execution_state(env, &proc, NULL); - (void) erts_smp_atomic32_read_bset_nob(&proc->state, + (void) erts_atomic32_read_bset_nob(&proc->state, (ERTS_PSFLG_DIRTY_CPU_PROC | ERTS_PSFLG_DIRTY_IO_PROC), (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND @@ -2768,7 +2692,7 @@ static_schedule_dirty_nif(ErlNifEnv* env, erts_aint32_t dirty_psflg, ASSERT(is_atom(mod) && is_atom(func)); ASSERT(fp); - (void) erts_smp_atomic32_read_bset_nob(&proc->state, + (void) erts_atomic32_read_bset_nob(&proc->state, (ERTS_PSFLG_DIRTY_CPU_PROC | ERTS_PSFLG_DIRTY_IO_PROC), dirty_psflg); @@ -2788,7 +2712,6 @@ static_schedule_dirty_cpu_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[ return static_schedule_dirty_nif(env, ERTS_PSFLG_DIRTY_CPU_PROC, argc, argv); } -#endif /* ERTS_DIRTY_SCHEDULERS */ /* * NIF execution wrapper used by enif_schedule_nif() for regular NIFs. It @@ -2862,24 +2785,20 @@ enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags, if (scheduler <= 0) { if (scheduler == 0) enif_make_badarg(env); - erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN); } if (flags == 0) result = schedule(env, execute_nif, fp, proc->current->module, fun_name_atom, argc, argv); else if (!(flags & ~(ERL_NIF_DIRTY_JOB_IO_BOUND|ERL_NIF_DIRTY_JOB_CPU_BOUND))) { -#ifdef ERTS_DIRTY_SCHEDULERS result = schedule_dirty_nif(env, flags, fp, fun_name_atom, argc, argv); -#else - result = enif_raise_exception(env, am_notsup); -#endif } else result = enif_make_badarg(env); if (scheduler < 0) - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); return result; } @@ -2895,12 +2814,10 @@ enif_thread_type(void) switch (esdp->type) { case ERTS_SCHED_NORMAL: return ERL_NIF_THR_NORMAL_SCHEDULER; -#ifdef ERTS_DIRTY_SCHEDULERS case ERTS_SCHED_DIRTY_CPU: return ERL_NIF_THR_DIRTY_CPU_SCHEDULER; case ERTS_SCHED_DIRTY_IO: return ERL_NIF_THR_DIRTY_IO_SCHEDULER; -#endif default: ERTS_INTERNAL_ERROR("Invalid scheduler type"); return -1; @@ -3221,27 +3138,19 @@ int enif_monitor_process(ErlNifEnv* env, void* obj, const ErlNifPid* target_pid, execution_state(env, NULL, &scheduler); -#ifdef ERTS_SMP if (scheduler > 0) /* Normal scheduler */ rp = erts_proc_lookup_raw(target_pid->pid); else rp = erts_proc_lookup_raw_inc_refc(target_pid->pid); -#else - if (scheduler <= 0) { - erts_exit(ERTS_ABORT_EXIT, "enif_monitor_process: called from " - "non-scheduler thread on non-SMP VM"); - } - rp = erts_proc_lookup(target_pid->pid); -#endif if (!rp) return 1; ref = erts_make_ref_in_buffer(tmp); - erts_smp_mtx_lock(&rsrc->monitors->lock); - erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK); - if (ERTS_PSFLG_FREE & erts_smp_atomic32_read_nob(&rp->state)) { + erts_mtx_lock(&rsrc->monitors->lock); + erts_proc_lock(rp, ERTS_PROC_LOCK_LINK); + if (ERTS_PSFLG_FREE & erts_atomic32_read_nob(&rp->state)) { retval = 1; } else { @@ -3249,13 +3158,11 @@ int enif_monitor_process(ErlNifEnv* env, void* obj, const ErlNifPid* target_pid, erts_add_monitor(&ERTS_P_MONITORS(rp), MON_NIF_TARGET, ref, (UWord)rsrc, NIL); retval = 0; } - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); - erts_smp_mtx_unlock(&rsrc->monitors->lock); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_mtx_unlock(&rsrc->monitors->lock); -#ifdef ERTS_SMP if (scheduler <= 0) erts_proc_dec_refc(rp); -#endif if (monitor) erts_ref_to_driver_monitor(ref,monitor); @@ -3283,35 +3190,27 @@ int enif_demonitor_process(ErlNifEnv* env, void* obj, const ErlNifMonitor* monit ref = erts_driver_monitor_to_ref(ref_heap, monitor); - erts_smp_mtx_lock(&rsrc->monitors->lock); + erts_mtx_lock(&rsrc->monitors->lock); mon = erts_remove_monitor(&rsrc->monitors->root, ref); if (mon == NULL) { - erts_smp_mtx_unlock(&rsrc->monitors->lock); + erts_mtx_unlock(&rsrc->monitors->lock); return 1; } ASSERT(mon->type == MON_ORIGIN); ASSERT(is_internal_pid(mon->u.pid)); -#ifdef ERTS_SMP if (scheduler > 0) /* Normal scheduler */ rp = erts_proc_lookup(mon->u.pid); else rp = erts_proc_lookup_inc_refc(mon->u.pid); -#else - if (scheduler <= 0) { - erts_exit(ERTS_ABORT_EXIT, "enif_demonitor_process: called from " - "non-scheduler thread on non-SMP VM"); - } - rp = erts_proc_lookup(mon->u.pid); -#endif if (!rp) { is_exiting = 1; } else { - erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_lock(rp, ERTS_PROC_LOCK_LINK); if (ERTS_PROC_IS_EXITING(rp)) { is_exiting = 1; } else { @@ -3319,17 +3218,15 @@ int enif_demonitor_process(ErlNifEnv* env, void* obj, const ErlNifMonitor* monit ASSERT(rmon); is_exiting = 0; } - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); -#ifdef ERTS_SMP if (scheduler <= 0) erts_proc_dec_refc(rp); -#endif } if (is_exiting) { rsrc->monitors->pending_failed_fire++; } - erts_smp_mtx_unlock(&rsrc->monitors->lock); + erts_mtx_unlock(&rsrc->monitors->lock); if (rmon) { ASSERT(rmon->type == MON_NIF_TARGET); @@ -3716,7 +3613,7 @@ static ErtsCodeInfo** get_func_pp(BeamCodeHeader* mod_code, Eterm f_atom, unsign int j; for (j = 0; j < n; ++j) { ErtsCodeInfo* ci = mod_code->functions[j]; - ASSERT(ci->op == (BeamInstr) BeamOp(op_i_func_info_IaaI)); + ASSERT(BeamIsOpCode(ci->op, op_i_func_info_IaaI)); if (f_atom == ci->mfa.function && arity == ci->mfa.arity) { return mod_code->functions+j; @@ -3912,8 +3809,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) } /* Block system (is this the right place to do it?) */ - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); /* Find calling module */ ASSERT(BIF_P->current != NULL); @@ -4018,14 +3915,9 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) * dirty scheduler support, treat a non-zero flags field as * a load error. */ -#ifdef ERTS_DIRTY_SCHEDULERS if (f->flags != ERL_NIF_DIRTY_JOB_IO_BOUND && f->flags != ERL_NIF_DIRTY_JOB_CPU_BOUND) ret = load_nif_error(BIF_P, bad_lib, "Illegal flags field value %d for NIF %T:%s/%u", f->flags, mod_atom, f->name, f->arity); -#else - ret = load_nif_error(BIF_P, bad_lib, "NIF %T:%s/%u requires a runtime with dirty scheduler support.", - mod_atom, f->name, f->arity); -#endif } else if (erts_codeinfo_to_code(ci_pp[1]) - erts_codeinfo_to_code(ci_pp[0]) < BEAM_NIF_MIN_FUNC_SZ) @@ -4090,15 +3982,13 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) code_ptr = erts_codeinfo_to_code(ci); if (ci->u.gen_bp == NULL) { - code_ptr[0] = (BeamInstr) BeamOp(op_call_nif); + code_ptr[0] = BeamOpCodeAddr(op_call_nif); } else { /* Function traced, patch the original instruction word */ GenericBp* g = ci->u.gen_bp; - ASSERT(code_ptr[0] == - (BeamInstr) BeamOp(op_i_generic_breakpoint)); - g->orig_instr = (BeamInstr) BeamOp(op_call_nif); + ASSERT(BeamIsOpCode(code_ptr[0], op_i_generic_breakpoint)); + g->orig_instr = BeamOpCodeAddr(op_call_nif); } -#ifdef ERTS_DIRTY_SCHEDULERS if (f->flags) { code_ptr[3] = (BeamInstr) f->fptr; code_ptr[1] = (f->flags == ERL_NIF_DIRTY_JOB_IO_BOUND) ? @@ -4106,7 +3996,6 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) (BeamInstr) static_schedule_dirty_cpu_nif; } else -#endif code_ptr[1] = (BeamInstr) f->fptr; code_ptr[2] = (BeamInstr) lib; } @@ -4124,8 +4013,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) erts_sys_ddll_free_error(&errdesc); } - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); erts_free(ERTS_ALC_T_TMP, lib_name); @@ -4138,7 +4027,7 @@ erts_unload_nif(struct erl_module_nif* lib) { ErlNifResourceType* rt; ErlNifResourceType* next; - ASSERT(erts_smp_thr_progress_is_blocking()); + ASSERT(erts_thr_progress_is_blocking()); ASSERT(lib != NULL); ASSERT(lib->mod != NULL); @@ -4210,8 +4099,8 @@ Eterm erts_nif_call_function(Process *p, Process *tracee, break; ASSERT(i < mod->entry.num_of_funcs); if (p) - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN - || erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN + || erts_thr_progress_is_blocking()); #endif if (p) { /* This is almost a normal nif call like in beam_emu, diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c index deadf435e9..0f3dfa797c 100644 --- a/erts/emulator/beam/erl_node_tables.c +++ b/erts/emulator/beam/erl_node_tables.c @@ -29,11 +29,13 @@ #include "error.h" #include "erl_thr_progress.h" #include "dtrace-wrapper.h" +#include "erl_binary.h" +#include "erl_bif_unique.h" Hash erts_dist_table; Hash erts_node_table; -erts_smp_rwmtx_t erts_dist_table_rwmtx; -erts_smp_rwmtx_t erts_node_table_rwmtx; +erts_rwmtx_t erts_dist_table_rwmtx; +erts_rwmtx_t erts_node_table_rwmtx; DistEntry *erts_hidden_dist_entries; DistEntry *erts_visible_dist_entries; @@ -57,6 +59,58 @@ static ErtsMonotonicTime node_tab_delete_delay; /* -- The distribution table ---------------------------------------------- */ +#define ErtsBin2DistEntry(B) \ + ((DistEntry *) ERTS_MAGIC_BIN_DATA((B))) +#define ErtsDistEntry2Bin(DEP) \ + ((Binary *) ERTS_MAGIC_BIN_FROM_DATA((DEP))) + +static ERTS_INLINE erts_aint_t +de_refc_read(DistEntry *dep, erts_aint_t min) +{ + return erts_refc_read(&ErtsDistEntry2Bin(dep)->intern.refc, min); +} + +static ERTS_INLINE erts_aint_t +de_refc_inc_read(DistEntry *dep, erts_aint_t min) +{ + return erts_refc_inctest(&ErtsDistEntry2Bin(dep)->intern.refc, min); +} + +static ERTS_INLINE void +de_refc_inc(DistEntry *dep, erts_aint_t min) +{ + erts_refc_inc(&ErtsDistEntry2Bin(dep)->intern.refc, min); +} + +static ERTS_INLINE void +de_refc_dec(DistEntry *dep, erts_aint_t min) +{ +#ifdef DEBUG + (void) erts_refc_read(&ErtsDistEntry2Bin(dep)->intern.refc, min+1); +#endif + erts_bin_release(ErtsDistEntry2Bin(dep)); +} + +static ERTS_INLINE erts_aint_t +de_refc_dec_read(DistEntry *dep, erts_aint_t min) +{ + return erts_refc_dectest(&ErtsDistEntry2Bin(dep)->intern.refc, min); +} + +void +erts_ref_dist_entry(DistEntry *dep) +{ + ASSERT(dep); + de_refc_inc(dep, 1); +} + +void +erts_deref_dist_entry(DistEntry *dep) +{ + ASSERT(dep); + de_refc_dec(dep, 0); +} + #ifdef DEBUG static int is_in_de_list(DistEntry *dep, DistEntry *dep_list) @@ -85,45 +139,66 @@ dist_table_cmp(void *dep1, void *dep2) static void* dist_table_alloc(void *dep_tmpl) { +#ifdef DEBUG + erts_aint_t refc; +#endif Eterm sysname; + Binary *bin; DistEntry *dep; - erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; - rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; + erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER; + rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ; sysname = ((DistEntry *) dep_tmpl)->sysname; - dep = (DistEntry *) erts_alloc(ERTS_ALC_T_DIST_ENTRY, sizeof(DistEntry)); + + bin = erts_create_magic_binary_x(sizeof(DistEntry), + erts_dist_entry_destructor, + ERTS_ALC_T_DIST_ENTRY, + 0); + dep = ErtsBin2DistEntry(bin); dist_entries++; +#ifdef DEBUG + refc = +#else + (void) +#endif + de_refc_dec_read(dep, -1); + ASSERT(refc == -1); + dep->prev = NULL; - erts_smp_refc_init(&dep->refc, -1); - erts_smp_rwmtx_init_opt(&dep->rwmtx, &rwmtx_opt, "dist_entry", sysname, + erts_rwmtx_init_opt(&dep->rwmtx, &rwmtx_opt, "dist_entry", sysname, ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION); dep->sysname = sysname; dep->cid = NIL; + erts_atomic_init_nob(&dep->input_handler, (erts_aint_t) NIL); dep->connection_id = 0; dep->status = 0; dep->flags = 0; dep->version = 0; - erts_smp_mtx_init(&dep->lnk_mtx, "dist_entry_links", sysname, + erts_mtx_init(&dep->lnk_mtx, "dist_entry_links", sysname, ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION); dep->node_links = NULL; dep->nlinks = NULL; dep->monitors = NULL; - erts_smp_mtx_init(&dep->qlock, "dist_entry_out_queue", sysname, + erts_mtx_init(&dep->qlock, "dist_entry_out_queue", sysname, ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION); - dep->qflgs = 0; - dep->qsize = 0; + erts_atomic32_init_nob(&dep->qflgs, 0); + erts_atomic_init_nob(&dep->qsize, 0); + erts_atomic64_init_nob(&dep->in, 0); + erts_atomic64_init_nob(&dep->out, 0); dep->out_queue.first = NULL; dep->out_queue.last = NULL; dep->suspended = NULL; + dep->tmp_out_queue.first = NULL; + dep->tmp_out_queue.last = NULL; dep->finalized_out_queue.first = NULL; dep->finalized_out_queue.last = NULL; - erts_smp_atomic_init_nob(&dep->dist_cmd_scheduled, 0); + erts_atomic_init_nob(&dep->dist_cmd_scheduled, 0); erts_port_task_handle_init(&dep->dist_cmd); dep->send = NULL; dep->cache = NULL; @@ -174,14 +249,14 @@ dist_table_free(void *vdep) erts_no_of_not_connected_dist_entries--; ASSERT(!dep->cache); - erts_smp_rwmtx_destroy(&dep->rwmtx); - erts_smp_mtx_destroy(&dep->lnk_mtx); - erts_smp_mtx_destroy(&dep->qlock); + erts_rwmtx_destroy(&dep->rwmtx); + erts_mtx_destroy(&dep->lnk_mtx); + erts_mtx_destroy(&dep->qlock); #ifdef DEBUG sys_memset(vdep, 0x77, sizeof(DistEntry)); #endif - erts_free(ERTS_ALC_T_DIST_ENTRY, (void *) dep); + erts_bin_free(ErtsDistEntry2Bin(dep)); ASSERT(dist_entries > 0); dist_entries--; @@ -193,25 +268,58 @@ erts_dist_table_info(fmtfn_t to, void *to_arg) { int lock = !ERTS_IS_CRASH_DUMPING; if (lock) - erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx); + erts_rwmtx_rlock(&erts_dist_table_rwmtx); hash_info(to, to_arg, &erts_dist_table); if (lock) - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); + erts_rwmtx_runlock(&erts_dist_table_rwmtx); +} + +static ERTS_INLINE DistEntry *find_dist_entry(Eterm sysname, + int inc_refc, + int connected_only) +{ + DistEntry *res; + DistEntry de; + de.sysname = sysname; + erts_rwmtx_rlock(&erts_dist_table_rwmtx); + res = hash_get(&erts_dist_table, (void *) &de); + if (res) { + if (connected_only && is_nil(res->cid)) + res = NULL; + else { + int pend_delete; + erts_aint_t refc; + if (inc_refc) { + refc = de_refc_inc_read(res, 1); + pend_delete = refc < 2; + } + else { + refc = de_refc_read(res, 0); + pend_delete = refc < 1; + } + if (pend_delete) /* Pending delete */ + de_refc_inc(res, 1); + } + } + erts_rwmtx_runlock(&erts_dist_table_rwmtx); + return res; } DistEntry * erts_channel_no_to_dist_entry(Uint cno) { + /* + * Does NOT increase reference count! + */ + /* * For this node (and previous incarnations of this node), * ERST_INTERNAL_CHANNEL_NO (will always be 0 I guess) is used as * channel no. For other nodes, the atom index of the atom corresponding * to the node name is used as channel no. */ - if(cno == ERST_INTERNAL_CHANNEL_NO) { - erts_smp_refc_inc(&erts_this_dist_entry->refc, 2); + if (cno == ERST_INTERNAL_CHANNEL_NO) return erts_this_dist_entry; - } if((cno > MAX_ATOM_INDEX) || (cno >= atom_table_size()) @@ -220,83 +328,100 @@ erts_channel_no_to_dist_entry(Uint cno) /* cno is a valid atom index; find corresponding dist entry (if there is one) */ - return erts_find_dist_entry(make_atom(cno)); + return find_dist_entry(make_atom(cno), 0, 0); } - DistEntry * erts_sysname_to_connected_dist_entry(Eterm sysname) { - DistEntry de; - DistEntry *res_dep; - de.sysname = sysname; - - if(erts_this_dist_entry->sysname == sysname) { - erts_smp_refc_inc(&erts_this_dist_entry->refc, 2); + /* + * Does NOT increase reference count! + */ + if(erts_this_dist_entry->sysname == sysname) return erts_this_dist_entry; - } - - erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx); - res_dep = (DistEntry *) hash_get(&erts_dist_table, (void *) &de); - if (res_dep) { - erts_aint_t refc = erts_smp_refc_inctest(&res_dep->refc, 1); - if (refc < 2) /* Pending delete */ - erts_smp_refc_inc(&res_dep->refc, 1); - } - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); - if (res_dep) { - int deref; - erts_smp_rwmtx_rlock(&res_dep->rwmtx); - deref = is_nil(res_dep->cid); - erts_smp_rwmtx_runlock(&res_dep->rwmtx); - if (deref) { - erts_deref_dist_entry(res_dep); - res_dep = NULL; - } - } - return res_dep; + return find_dist_entry(sysname, 0, 1); } DistEntry *erts_find_or_insert_dist_entry(Eterm sysname) { + /* + * This function DOES increase reference count! + */ DistEntry *res; DistEntry de; erts_aint_t refc; - res = erts_find_dist_entry(sysname); + res = find_dist_entry(sysname, 1, 0); if (res) return res; de.sysname = sysname; - erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx); + erts_rwmtx_rwlock(&erts_dist_table_rwmtx); res = hash_put(&erts_dist_table, (void *) &de); - refc = erts_smp_refc_inctest(&res->refc, 0); + refc = de_refc_inc_read(res, 0); if (refc < 2) /* New or pending delete */ - erts_smp_refc_inc(&res->refc, 1); - erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx); + de_refc_inc(res, 1); + erts_rwmtx_rwunlock(&erts_dist_table_rwmtx); return res; } DistEntry *erts_find_dist_entry(Eterm sysname) { - DistEntry *res; - DistEntry de; - de.sysname = sysname; - erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx); - res = hash_get(&erts_dist_table, (void *) &de); - if (res) { - erts_aint_t refc = erts_smp_refc_inctest(&res->refc, 1); - if (refc < 2) /* Pending delete */ - erts_smp_refc_inc(&res->refc, 1); - } - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); - return res; + /* + * Does NOT increase reference count! + */ + return find_dist_entry(sysname, 0, 0); } -static void try_delete_dist_entry(void *vdep) +DistEntry * +erts_dhandle_to_dist_entry(Eterm dhandle) { - DistEntry *dep = (DistEntry *) vdep; + Binary *bin; + if (!is_internal_magic_ref(dhandle)) + return NULL; + bin = erts_magic_ref2bin(dhandle); + if (ERTS_MAGIC_BIN_DESTRUCTOR(bin) != erts_dist_entry_destructor) + return NULL; + return ErtsBin2DistEntry(bin); +} + +Eterm +erts_make_dhandle(Process *c_p, DistEntry *dep) +{ + Binary *bin; + Eterm *hp; + + bin = ErtsDistEntry2Bin(dep); + ASSERT(bin); + ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(bin) == erts_dist_entry_destructor); + hp = HAlloc(c_p, ERTS_MAGIC_REF_THING_SIZE); + return erts_mk_magic_ref(&hp, &c_p->off_heap, bin); +} + +static void try_delete_dist_entry(void *vbin); + +static void +prepare_try_delete_dist_entry(void *vbin) +{ + Binary *bin = (Binary *) vbin; + DistEntry *dep = ErtsBin2DistEntry(bin); + Uint size; erts_aint_t refc; - erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx); + refc = de_refc_read(dep, 0); + if (refc > 0) + return; + + size = ERTS_MAGIC_BIN_SIZE(sizeof(DistEntry)); + erts_schedule_thr_prgr_later_cleanup_op(try_delete_dist_entry, + vbin, &dep->later_op, size); +} + +static void try_delete_dist_entry(void *vbin) +{ + Binary *bin = (Binary *) vbin; + DistEntry *dep = ErtsBin2DistEntry(bin); + erts_aint_t refc; + + erts_rwmtx_rwlock(&erts_dist_table_rwmtx); /* * Another thread might have looked up this dist entry after * we decided to delete it (refc became zero). If so, the other @@ -312,26 +437,39 @@ static void try_delete_dist_entry(void *vdep) * * If refc > 0, the entry is in use. Keep the entry. */ - refc = erts_smp_refc_dectest(&dep->refc, -1); + refc = de_refc_dec_read(dep, -1); if (refc == -1) (void) hash_erase(&erts_dist_table, (void *) dep); - erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx); - - if (refc == 0) - erts_schedule_delete_dist_entry(dep); + erts_rwmtx_rwunlock(&erts_dist_table_rwmtx); + + if (refc == 0) { + if (node_tab_delete_delay == 0) + prepare_try_delete_dist_entry(vbin); + else if (node_tab_delete_delay > 0) + erts_start_timer_callback(node_tab_delete_delay, + prepare_try_delete_dist_entry, + vbin); + } } -void erts_schedule_delete_dist_entry(DistEntry *dep) +int erts_dist_entry_destructor(Binary *bin) { - ASSERT(dep != erts_this_dist_entry); - if (dep != erts_this_dist_entry) { - if (node_tab_delete_delay == 0) - try_delete_dist_entry((void *) dep); - else if (node_tab_delete_delay > 0) - erts_start_timer_callback(node_tab_delete_delay, - try_delete_dist_entry, - (void *) dep); - } + DistEntry *dep = ErtsBin2DistEntry(bin); + erts_aint_t refc; + + refc = de_refc_read(dep, -1); + + if (refc == -1) + return 1; /* Allow deallocation of structure... */ + + if (node_tab_delete_delay == 0) + prepare_try_delete_dist_entry((void *) bin); + else if (node_tab_delete_delay > 0) + erts_start_timer_callback(node_tab_delete_delay, + prepare_try_delete_dist_entry, + (void *) bin); + + return 0; } Uint @@ -346,7 +484,7 @@ erts_dist_table_size(void) int lock = !ERTS_IS_CRASH_DUMPING; if (lock) - erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx); + erts_rwmtx_rlock(&erts_dist_table_rwmtx); #ifdef DEBUG hash_get_info(&hi, &erts_dist_table); ASSERT(dist_entries == hi.objs); @@ -373,18 +511,18 @@ erts_dist_table_size(void) + dist_entries*sizeof(DistEntry) + erts_dist_cache_size()); if (lock) - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); + erts_rwmtx_runlock(&erts_dist_table_rwmtx); return res; } void erts_set_dist_entry_not_connected(DistEntry *dep) { - ERTS_SMP_LC_ASSERT(erts_lc_is_de_rwlocked(dep)); - erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx); + ERTS_LC_ASSERT(erts_lc_is_de_rwlocked(dep)); + erts_rwmtx_rwlock(&erts_dist_table_rwmtx); ASSERT(dep != erts_this_dist_entry); - ASSERT(is_internal_port(dep->cid)); + ASSERT(is_internal_port(dep->cid) || is_internal_pid(dep->cid)); if(dep->flags & DFLAG_PUBLISHED) { if(dep->prev) { @@ -428,18 +566,18 @@ erts_set_dist_entry_not_connected(DistEntry *dep) } erts_not_connected_dist_entries = dep; erts_no_of_not_connected_dist_entries++; - erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx); + erts_rwmtx_rwunlock(&erts_dist_table_rwmtx); } void erts_set_dist_entry_connected(DistEntry *dep, Eterm cid, Uint flags) { - ERTS_SMP_LC_ASSERT(erts_lc_is_de_rwlocked(dep)); - erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx); + ERTS_LC_ASSERT(erts_lc_is_de_rwlocked(dep)); + erts_rwmtx_rwlock(&erts_dist_table_rwmtx); ASSERT(dep != erts_this_dist_entry); ASSERT(is_nil(dep->cid)); - ASSERT(is_internal_port(cid)); + ASSERT(is_internal_port(cid) || is_internal_pid(cid)); if(dep->prev) { ASSERT(is_in_de_list(dep, erts_not_connected_dist_entries)); @@ -459,10 +597,19 @@ erts_set_dist_entry_connected(DistEntry *dep, Eterm cid, Uint flags) dep->status |= ERTS_DE_SFLG_CONNECTED; dep->flags = flags; dep->cid = cid; + erts_atomic_set_nob(&dep->input_handler, + (erts_aint_t) cid); + dep->connection_id++; dep->connection_id &= ERTS_DIST_EXT_CON_ID_MASK; dep->prev = NULL; + erts_atomic64_set_nob(&dep->in, 0); + erts_atomic64_set_nob(&dep->out, 0); + erts_atomic32_set_nob(&dep->qflgs, + (is_internal_port(cid) + ? ERTS_DE_QFLG_PORT_CTRL + : ERTS_DE_QFLG_PROC_CTRL)); if(flags & DFLAG_PUBLISHED) { dep->next = erts_visible_dist_entries; if(erts_visible_dist_entries) { @@ -481,7 +628,7 @@ erts_set_dist_entry_connected(DistEntry *dep, Eterm cid, Uint flags) erts_hidden_dist_entries = dep; erts_no_of_hidden_dist_entries++; } - erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx); + erts_rwmtx_rwunlock(&erts_dist_table_rwmtx); } /* -- Node table --------------------------------------------------------- */ @@ -519,7 +666,7 @@ node_table_alloc(void *venp_tmpl) node_entries++; - erts_smp_refc_init(&enp->refc, -1); + erts_refc_init(&enp->refc, -1); enp->creation = ((ErlNode *) venp_tmpl)->creation; enp->sysname = ((ErlNode *) venp_tmpl)->sysname; enp->dist_entry = erts_find_or_insert_dist_entry(((ErlNode *) venp_tmpl)->sysname); @@ -532,7 +679,7 @@ node_table_free(void *venp) { ErlNode *enp = (ErlNode *) venp; - ERTS_SMP_LC_ASSERT(enp != erts_this_node || erts_thr_progress_is_blocking()); + ERTS_LC_ASSERT(enp != erts_this_node || erts_thr_progress_is_blocking()); erts_deref_dist_entry(enp->dist_entry); #ifdef DEBUG @@ -553,14 +700,14 @@ erts_node_table_size(void) #endif int lock = !ERTS_IS_CRASH_DUMPING; if (lock) - erts_smp_rwmtx_rlock(&erts_node_table_rwmtx); + erts_rwmtx_rlock(&erts_node_table_rwmtx); #ifdef DEBUG hash_get_info(&hi, &erts_node_table); ASSERT(node_entries == hi.objs); #endif res = hash_table_sz(&erts_node_table) + node_entries*sizeof(ErlNode); if (lock) - erts_smp_rwmtx_runlock(&erts_node_table_rwmtx); + erts_rwmtx_runlock(&erts_node_table_rwmtx); return res; } @@ -569,10 +716,10 @@ erts_node_table_info(fmtfn_t to, void *to_arg) { int lock = !ERTS_IS_CRASH_DUMPING; if (lock) - erts_smp_rwmtx_rlock(&erts_node_table_rwmtx); + erts_rwmtx_rlock(&erts_node_table_rwmtx); hash_info(to, to_arg, &erts_node_table); if (lock) - erts_smp_rwmtx_runlock(&erts_node_table_rwmtx); + erts_rwmtx_runlock(&erts_node_table_rwmtx); } @@ -583,26 +730,26 @@ ErlNode *erts_find_or_insert_node(Eterm sysname, Uint32 creation) ne.sysname = sysname; ne.creation = creation; - erts_smp_rwmtx_rlock(&erts_node_table_rwmtx); + erts_rwmtx_rlock(&erts_node_table_rwmtx); res = hash_get(&erts_node_table, (void *) &ne); if (res && res != erts_this_node) { - erts_aint_t refc = erts_smp_refc_inctest(&res->refc, 0); + erts_aint_t refc = erts_refc_inctest(&res->refc, 0); if (refc < 2) /* New or pending delete */ - erts_smp_refc_inc(&res->refc, 1); + erts_refc_inc(&res->refc, 1); } - erts_smp_rwmtx_runlock(&erts_node_table_rwmtx); + erts_rwmtx_runlock(&erts_node_table_rwmtx); if (res) return res; - erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx); + erts_rwmtx_rwlock(&erts_node_table_rwmtx); res = hash_put(&erts_node_table, (void *) &ne); ASSERT(res); if (res != erts_this_node) { - erts_aint_t refc = erts_smp_refc_inctest(&res->refc, 0); + erts_aint_t refc = erts_refc_inctest(&res->refc, 0); if (refc < 2) /* New or pending delete */ - erts_smp_refc_inc(&res->refc, 1); + erts_refc_inc(&res->refc, 1); } - erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx); + erts_rwmtx_rwunlock(&erts_node_table_rwmtx); return res; } @@ -611,7 +758,7 @@ static void try_delete_node(void *venp) ErlNode *enp = (ErlNode *) venp; erts_aint_t refc; - erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx); + erts_rwmtx_rwlock(&erts_node_table_rwmtx); /* * Another thread might have looked up this node after we * decided to delete it (refc became zero). If so, the other @@ -627,10 +774,10 @@ static void try_delete_node(void *venp) * * If refc > 0, the entry is in use. Keep the entry. */ - refc = erts_smp_refc_dectest(&enp->refc, -1); + refc = erts_refc_dectest(&enp->refc, -1); if (refc == -1) (void) hash_erase(&erts_node_table, (void *) enp); - erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx); + erts_rwmtx_rwunlock(&erts_node_table_rwmtx); if (refc == 0) erts_schedule_delete_node(enp); @@ -673,7 +820,7 @@ static void print_node(void *venp, void *vpndp) erts_print(pndp->to, pndp->to_arg, " %d", enp->creation); #ifdef DEBUG erts_print(pndp->to, pndp->to_arg, " (refc=%ld)", - erts_smp_refc_read(&enp->refc, 0)); + erts_refc_read(&enp->refc, 0)); #endif pndp->no_sysname++; } @@ -696,13 +843,13 @@ void erts_print_node_info(fmtfn_t to, pnd.no_total = 0; if (lock) - erts_smp_rwmtx_rlock(&erts_node_table_rwmtx); + erts_rwmtx_rlock(&erts_node_table_rwmtx); hash_foreach(&erts_node_table, print_node, (void *) &pnd); if (pnd.no_sysname != 0) { erts_print(to, to_arg, "\n"); } if (lock) - erts_smp_rwmtx_runlock(&erts_node_table_rwmtx); + erts_rwmtx_runlock(&erts_node_table_rwmtx); if(no_sysname) *no_sysname = pnd.no_sysname; @@ -715,20 +862,19 @@ void erts_print_node_info(fmtfn_t to, void erts_set_this_node(Eterm sysname, Uint creation) { - ERTS_SMP_LC_ASSERT(erts_thr_progress_is_blocking()); - ASSERT(erts_smp_refc_read(&erts_this_dist_entry->refc, 2)); + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); + ASSERT(2 <= de_refc_read(erts_this_dist_entry, 2)); - if (erts_smp_refc_dectest(&erts_this_node->refc, 0) == 0) + if (erts_refc_dectest(&erts_this_node->refc, 0) == 0) try_delete_node(erts_this_node); - if (erts_smp_refc_dectest(&erts_this_dist_entry->refc, 0) == 0) - try_delete_dist_entry(erts_this_dist_entry); + erts_deref_dist_entry(erts_this_dist_entry); erts_this_node = NULL; /* to make sure refc is bumped for this node */ erts_this_node = erts_find_or_insert_node(sysname, creation); erts_this_dist_entry = erts_this_node->dist_entry; - erts_smp_refc_inc(&erts_this_dist_entry->refc, 2); + erts_ref_dist_entry(erts_this_dist_entry); erts_this_node_sysname = erts_this_node_sysname_BUFFER; erts_snprintf(erts_this_node_sysname, sizeof(erts_this_node_sysname_BUFFER), @@ -747,7 +893,7 @@ erts_delayed_node_table_gc(void) void erts_init_node_tables(int dd_sec) { - erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; + erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER; HashFunctions f; ErlNode node_tmpl; @@ -758,12 +904,12 @@ void erts_init_node_tables(int dd_sec) orig_node_tab_delete_delay = node_tab_delete_delay; - rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; - rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED; + rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ; + rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED; - erts_smp_rwmtx_init_opt(&erts_node_table_rwmtx, &rwmtx_opt, "node_table", NIL, + erts_rwmtx_init_opt(&erts_node_table_rwmtx, &rwmtx_opt, "node_table", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION); - erts_smp_rwmtx_init_opt(&erts_dist_table_rwmtx, &rwmtx_opt, "dist_table", NIL, + erts_rwmtx_init_opt(&erts_dist_table_rwmtx, &rwmtx_opt, "dist_table", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION); f.hash = (H_FUN) dist_table_hash; @@ -792,14 +938,14 @@ void erts_init_node_tables(int dd_sec) node_tmpl.creation = 0; erts_this_node = hash_put(&erts_node_table, &node_tmpl); /* +1 for erts_this_node */ - erts_smp_refc_init(&erts_this_node->refc, 1); + erts_refc_init(&erts_this_node->refc, 1); ASSERT(erts_this_node->dist_entry != NULL); erts_this_dist_entry = erts_this_node->dist_entry; /* +1 for erts_this_dist_entry */ - /* +1 for erts_this_node->dist_entry */ - erts_smp_refc_init(&erts_this_dist_entry->refc, 2); + erts_ref_dist_entry(erts_this_dist_entry); + ASSERT(2 == de_refc_read(erts_this_dist_entry, 2)); erts_this_node_sysname = erts_this_node_sysname_BUFFER; erts_snprintf(erts_this_node_sysname, sizeof(erts_this_node_sysname_BUFFER), @@ -808,18 +954,16 @@ void erts_init_node_tables(int dd_sec) references_atoms_need_init = 1; } -#ifdef ERTS_SMP #ifdef ERTS_ENABLE_LOCK_CHECK int erts_lc_is_de_rwlocked(DistEntry *dep) { - return erts_smp_lc_rwmtx_is_rwlocked(&dep->rwmtx); + return erts_lc_rwmtx_is_rwlocked(&dep->rwmtx); } int erts_lc_is_de_rlocked(DistEntry *dep) { - return erts_smp_lc_rwmtx_is_rlocked(&dep->rwmtx); + return erts_lc_rwmtx_is_rlocked(&dep->rwmtx); } #endif -#endif #ifdef ERTS_ENABLE_LOCK_COUNT @@ -841,10 +985,10 @@ static void erts_lcnt_enable_dist_lock_count(void *dep_raw, void *enable) { } void erts_lcnt_update_distribution_locks(int enable) { - erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx); + erts_rwmtx_rlock(&erts_dist_table_rwmtx); hash_foreach(&erts_dist_table, erts_lcnt_enable_dist_lock_count, (void*)(UWord)enable); - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); + erts_rwmtx_runlock(&erts_dist_table_rwmtx); } #endif @@ -878,6 +1022,7 @@ static Eterm AM_node_references; static Eterm AM_system; static Eterm AM_timer; static Eterm AM_delayed_delete_timer; +static Eterm AM_thread_progress_delete_timer; static void setup_reference_table(void); static Eterm reference_table_term(Uint **hpp, ErlOffHeap *ohp, Uint *szp); @@ -946,8 +1091,8 @@ erts_get_node_and_dist_references(struct process *proc) Uint *endp; #endif - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); /* No need to lock any thing since we are alone... */ if (references_atoms_need_init) { @@ -967,6 +1112,7 @@ erts_get_node_and_dist_references(struct process *proc) INIT_AM(timer); INIT_AM(system); INIT_AM(delayed_delete_timer); + INIT_AM(thread_progress_delete_timer); references_atoms_need_init = 0; } @@ -989,8 +1135,8 @@ erts_get_node_and_dist_references(struct process *proc) delete_reference_table(); - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN); return res; } @@ -1150,6 +1296,10 @@ insert_offheap2(ErlOffHeap *oh, void *arg) insert_offheap(oh, a->type, a->id); } +#define ErtsIsDistEntryBinary(Bin) \ + (((Bin)->intern.flags & BIN_FLAG_MAGIC) \ + && ERTS_MAGIC_BIN_DESTRUCTOR((Bin)) == erts_dist_entry_destructor) + static void insert_offheap(ErlOffHeap *oh, int type, Eterm id) { @@ -1160,7 +1310,10 @@ insert_offheap(ErlOffHeap *oh, int type, Eterm id) for (u.hdr = oh->first; u.hdr; u.hdr = u.hdr->next) { switch (thing_subtag(u.hdr->thing_word)) { case REF_SUBTAG: - if(IsMatchProgBinary(u.mref->mb)) { + if (ErtsIsDistEntryBinary(u.mref->mb)) + insert_dist_entry(ErtsBin2DistEntry(u.mref->mb), + type, id, 0); + else if(IsMatchProgBinary(u.mref->mb)) { InsertedBin *ib; int insert_bin = 1; for (ib = inserted_bins; ib; ib = ib->next) @@ -1292,39 +1445,45 @@ init_referred_dist(void *dist, void *unused) no_referred_dists++; } -#ifdef ERTS_SMP static void insert_sys_msg(Eterm from, Eterm to, Eterm msg, ErlHeapFragment *bp) { insert_offheap(&bp->off_heap, HEAP_REF, to); } -#endif static void insert_delayed_delete_node(void *state, ErtsMonotonicTime timeout_pos, void *vnp) { - DeclareTmpHeapNoproc(heap,3); - UseTmpHeapNoproc(3); + Eterm heap[3]; insert_node((ErlNode *) vnp, SYSTEM_REF, TUPLE2(&heap[0], AM_system, AM_delayed_delete_timer)); - UnUseTmpHeapNoproc(3); +} + +static void +insert_thr_prgr_delete_dist_entry(void *arg, ErtsThrPrgrVal thr_prgr, void *vbin) +{ + DistEntry *dep = ErtsBin2DistEntry(vbin); + Eterm heap[3]; + insert_dist_entry(dep, + SYSTEM_REF, + TUPLE2(&heap[0], AM_system, AM_thread_progress_delete_timer), + 0); } static void insert_delayed_delete_dist_entry(void *state, ErtsMonotonicTime timeout_pos, - void *vdep) + void *vbin) { - DeclareTmpHeapNoproc(heap,3); - UseTmpHeapNoproc(3); - insert_dist_entry((DistEntry *) vdep, + DistEntry *dep = ErtsBin2DistEntry(vbin); + Eterm heap[3]; + insert_dist_entry(dep, SYSTEM_REF, TUPLE2(&heap[0], AM_system, AM_delayed_delete_timer), 0); - UnUseTmpHeapNoproc(3); } static void @@ -1358,9 +1517,12 @@ setup_reference_table(void) erts_debug_callback_timer_foreach(try_delete_node, insert_delayed_delete_node, NULL); - erts_debug_callback_timer_foreach(try_delete_dist_entry, + erts_debug_callback_timer_foreach(prepare_try_delete_dist_entry, insert_delayed_delete_dist_entry, NULL); + erts_debug_later_op_foreach(try_delete_dist_entry, + insert_thr_prgr_delete_dist_entry, + NULL); UseTmpHeapNoproc(3); insert_node(erts_this_node, @@ -1381,9 +1543,7 @@ setup_reference_table(void) int mli; ErtsMessage *msg_list[] = { proc->msg.first, -#ifdef ERTS_SMP proc->msg_inq.first, -#endif proc->msg_frag}; /* Insert Heap */ @@ -1427,12 +1587,18 @@ setup_reference_table(void) insert_links(ERTS_P_LINKS(proc), proc->common.id); if (ERTS_P_MONITORS(proc)) insert_monitors(ERTS_P_MONITORS(proc), proc->common.id); + { + DistEntry *dep = ERTS_PROC_GET_DIST_ENTRY(proc); + if (dep) + insert_dist_entry(dep, + CTRL_REF, + proc->common.id, + 0); + } } } -#ifdef ERTS_SMP erts_foreach_sys_msg_in_q(insert_sys_msg); -#endif /* Insert all ports */ max = erts_ptab_max(&erts_port); @@ -1666,7 +1832,7 @@ reference_table_term(Uint **hpp, ErlOffHeap *ohp, Uint *szp) tup = MK_2TUP(referred_nodes[i].node->sysname, MK_UINT(referred_nodes[i].node->creation)); - tup = MK_3TUP(tup, MK_UINT(erts_smp_refc_read(&referred_nodes[i].node->refc, 0)), nril); + tup = MK_3TUP(tup, MK_UINT(erts_refc_read(&referred_nodes[i].node->refc, 0)), nril); nl = MK_CONS(tup, nl); } @@ -1727,7 +1893,7 @@ reference_table_term(Uint **hpp, ErlOffHeap *ohp, Uint *szp) /* DistList = [{Dist, Refc, ReferenceIdList}] */ tup = MK_3TUP(referred_dists[i].dist->sysname, - MK_UINT(erts_smp_refc_read(&referred_dists[i].dist->refc, 0)), + MK_UINT(de_refc_read(referred_dists[i].dist, 0)), dril); dl = MK_CONS(tup, dl); } @@ -1786,12 +1952,12 @@ delete_reference_table(void) void erts_debug_test_node_tab_delayed_delete(Sint64 millisecs) { - erts_smp_thr_progress_block(); + erts_thr_progress_block(); if (millisecs < 0) node_tab_delete_delay = orig_node_tab_delete_delay; else node_tab_delete_delay = millisecs; - erts_smp_thr_progress_unblock(); + erts_thr_progress_unblock(); } diff --git a/erts/emulator/beam/erl_node_tables.h b/erts/emulator/beam/erl_node_tables.h index 91bcb4fce1..3bba673435 100644 --- a/erts/emulator/beam/erl_node_tables.h +++ b/erts/emulator/beam/erl_node_tables.h @@ -44,10 +44,12 @@ #include "erl_alloc.h" #include "erl_process.h" #include "erl_monitors.h" -#include "erl_smp.h" #define ERTS_PORT_TASK_ONLY_BASIC_TYPES__ #include "erl_port_task.h" #undef ERTS_PORT_TASK_ONLY_BASIC_TYPES__ +#define ERTS_BINARY_TYPES_ONLY__ +#include "erl_binary.h" +#undef ERTS_BINARY_TYPES_ONLY__ #define ERTS_NODE_TAB_DELAY_GC_DEFAULT (60) #define ERTS_NODE_TAB_DELAY_GC_MAX (100*1000*1000) @@ -61,11 +63,17 @@ #define ERTS_DE_SFLGS_ALL (ERTS_DE_SFLG_CONNECTED \ | ERTS_DE_SFLG_EXITING) -#define ERTS_DE_QFLG_BUSY (((Uint32) 1) << 0) -#define ERTS_DE_QFLG_EXIT (((Uint32) 1) << 1) +#define ERTS_DE_QFLG_BUSY (((erts_aint32_t) 1) << 0) +#define ERTS_DE_QFLG_EXIT (((erts_aint32_t) 1) << 1) +#define ERTS_DE_QFLG_REQ_INFO (((erts_aint32_t) 1) << 2) +#define ERTS_DE_QFLG_PORT_CTRL (((erts_aint32_t) 1) << 3) +#define ERTS_DE_QFLG_PROC_CTRL (((erts_aint32_t) 1) << 4) #define ERTS_DE_QFLGS_ALL (ERTS_DE_QFLG_BUSY \ - | ERTS_DE_QFLG_EXIT) + | ERTS_DE_QFLG_EXIT \ + | ERTS_DE_QFLG_REQ_INFO \ + | ERTS_DE_QFLG_PORT_CTRL \ + | ERTS_DE_QFLG_PROC_CTRL) #if defined(ARCH_64) #define ERTS_DIST_OUTPUT_BUF_DBG_PATTERN ((Uint) 0xf713f713f713f713UL) @@ -107,12 +115,13 @@ typedef struct dist_entry_ { HashBucket hash_bucket; /* Hash bucket */ struct dist_entry_ *next; /* Next entry in dist_table (not sorted) */ struct dist_entry_ *prev; /* Previous entry in dist_table (not sorted) */ - erts_smp_refc_t refc; /* Reference count */ - erts_smp_rwmtx_t rwmtx; /* Protects all fields below until lck_mtx. */ + erts_rwmtx_t rwmtx; /* Protects all fields below until lck_mtx. */ Eterm sysname; /* name@host atom for efficiency */ Uint32 creation; /* creation of connected node */ - Eterm cid; /* connection handler (pid or port), NIL == free */ + erts_atomic_t input_handler; /* Input handler */ + Eterm cid; /* connection handler (pid or port), + NIL == free */ Uint32 connection_id; /* Connection id incremented on connect */ Uint32 status; /* Slot status, like exiting reserved etc */ Uint32 flags; /* Distribution flags, like hidden, @@ -120,7 +129,7 @@ typedef struct dist_entry_ { unsigned long version; /* Protocol version */ - erts_smp_mtx_t lnk_mtx; /* Protects node_links, nlinks, and + erts_mtx_t lnk_mtx; /* Protects node_links, nlinks, and monitors. */ ErtsLink *node_links; /* In a dist entry, node links are kept in a separate tree, while they are @@ -132,24 +141,29 @@ typedef struct dist_entry_ { ErtsLink *nlinks; /* Link tree with subtrees */ ErtsMonitor *monitors; /* Monitor tree */ - erts_smp_mtx_t qlock; /* Protects qflgs and out_queue */ - Uint32 qflgs; - Sint qsize; + erts_mtx_t qlock; /* Protects qflgs and out_queue */ + erts_atomic32_t qflgs; + erts_atomic_t qsize; + erts_atomic64_t in; + erts_atomic64_t out; ErtsDistOutputQueue out_queue; struct ErtsProcList_ *suspended; + ErtsDistOutputQueue tmp_out_queue; ErtsDistOutputQueue finalized_out_queue; - erts_smp_atomic_t dist_cmd_scheduled; + erts_atomic_t dist_cmd_scheduled; ErtsPortTaskHandle dist_cmd; Uint (*send)(Port *prt, ErtsDistOutputBuf *obuf); struct cache* cache; /* The atom cache */ + + ErtsThrPrgrLaterOp later_op; } DistEntry; typedef struct erl_node_ { HashBucket hash_bucket; /* Hash bucket */ - erts_smp_refc_t refc; /* Reference count */ + erts_refc_t refc; /* Reference count */ Eterm sysname; /* name@host atom for efficiency */ Uint32 creation; /* Creation */ DistEntry *dist_entry; /* Corresponding dist entry */ @@ -158,8 +172,8 @@ typedef struct erl_node_ { extern Hash erts_dist_table; extern Hash erts_node_table; -extern erts_smp_rwmtx_t erts_dist_table_rwmtx; -extern erts_smp_rwmtx_t erts_node_table_rwmtx; +extern erts_rwmtx_t erts_dist_table_rwmtx; +extern erts_rwmtx_t erts_node_table_rwmtx; extern DistEntry *erts_hidden_dist_entries; extern DistEntry *erts_visible_dist_entries; @@ -190,76 +204,68 @@ void erts_init_node_tables(int); void erts_node_table_info(fmtfn_t, void *); void erts_print_node_info(fmtfn_t, void *, Eterm, int*, int*); Eterm erts_get_node_and_dist_references(struct process *); -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) int erts_lc_is_de_rwlocked(DistEntry *); int erts_lc_is_de_rlocked(DistEntry *); #endif +int erts_dist_entry_destructor(Binary *bin); +DistEntry *erts_dhandle_to_dist_entry(Eterm dhandle); +Eterm erts_make_dhandle(Process *c_p, DistEntry *dep); +void erts_ref_dist_entry(DistEntry *dep); +void erts_deref_dist_entry(DistEntry *dep); -#ifdef ERTS_ENABLE_LOCK_COUNT -void erts_lcnt_update_distribution_locks(int enable); -#endif - -ERTS_GLB_INLINE void erts_deref_dist_entry(DistEntry *dep); ERTS_GLB_INLINE void erts_deref_node_entry(ErlNode *np); -ERTS_GLB_INLINE void erts_smp_de_rlock(DistEntry *dep); -ERTS_GLB_INLINE void erts_smp_de_runlock(DistEntry *dep); -ERTS_GLB_INLINE void erts_smp_de_rwlock(DistEntry *dep); -ERTS_GLB_INLINE void erts_smp_de_rwunlock(DistEntry *dep); -ERTS_GLB_INLINE void erts_smp_de_links_lock(DistEntry *dep); -ERTS_GLB_INLINE void erts_smp_de_links_unlock(DistEntry *dep); +ERTS_GLB_INLINE void erts_de_rlock(DistEntry *dep); +ERTS_GLB_INLINE void erts_de_runlock(DistEntry *dep); +ERTS_GLB_INLINE void erts_de_rwlock(DistEntry *dep); +ERTS_GLB_INLINE void erts_de_rwunlock(DistEntry *dep); +ERTS_GLB_INLINE void erts_de_links_lock(DistEntry *dep); +ERTS_GLB_INLINE void erts_de_links_unlock(DistEntry *dep); #if ERTS_GLB_INLINE_INCL_FUNC_DEF ERTS_GLB_INLINE void -erts_deref_dist_entry(DistEntry *dep) -{ - ASSERT(dep); - if (erts_smp_refc_dectest(&dep->refc, 0) == 0) - erts_schedule_delete_dist_entry(dep); -} - -ERTS_GLB_INLINE void erts_deref_node_entry(ErlNode *np) { ASSERT(np); - if (erts_smp_refc_dectest(&np->refc, 0) == 0) + if (erts_refc_dectest(&np->refc, 0) == 0) erts_schedule_delete_node(np); } ERTS_GLB_INLINE void -erts_smp_de_rlock(DistEntry *dep) +erts_de_rlock(DistEntry *dep) { - erts_smp_rwmtx_rlock(&dep->rwmtx); + erts_rwmtx_rlock(&dep->rwmtx); } ERTS_GLB_INLINE void -erts_smp_de_runlock(DistEntry *dep) +erts_de_runlock(DistEntry *dep) { - erts_smp_rwmtx_runlock(&dep->rwmtx); + erts_rwmtx_runlock(&dep->rwmtx); } ERTS_GLB_INLINE void -erts_smp_de_rwlock(DistEntry *dep) +erts_de_rwlock(DistEntry *dep) { - erts_smp_rwmtx_rwlock(&dep->rwmtx); + erts_rwmtx_rwlock(&dep->rwmtx); } ERTS_GLB_INLINE void -erts_smp_de_rwunlock(DistEntry *dep) +erts_de_rwunlock(DistEntry *dep) { - erts_smp_rwmtx_rwunlock(&dep->rwmtx); + erts_rwmtx_rwunlock(&dep->rwmtx); } ERTS_GLB_INLINE void -erts_smp_de_links_lock(DistEntry *dep) +erts_de_links_lock(DistEntry *dep) { - erts_smp_mtx_lock(&dep->lnk_mtx); + erts_mtx_lock(&dep->lnk_mtx); } ERTS_GLB_INLINE void -erts_smp_de_links_unlock(DistEntry *dep) +erts_de_links_unlock(DistEntry *dep) { - erts_smp_mtx_unlock(&dep->lnk_mtx); + erts_mtx_unlock(&dep->lnk_mtx); } #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ diff --git a/erts/emulator/beam/erl_port.h b/erts/emulator/beam/erl_port.h index b64de624dd..9117eb1f72 100644 --- a/erts/emulator/beam/erl_port.h +++ b/erts/emulator/beam/erl_port.h @@ -119,9 +119,7 @@ typedef struct { void *data[ERTS_PRTSD_SIZE]; } ErtsPrtSD; -#ifdef ERTS_SMP typedef struct ErtsXPortsList_ ErtsXPortsList; -#endif /* * Port locking: @@ -146,17 +144,12 @@ struct _erl_drv_port { ErtsPortTaskSched sched; ErtsPortTaskHandle timeout_task; -#ifdef ERTS_SMP erts_mtx_t *lock; ErtsXPortsList *xports; - erts_smp_atomic_t run_queue; -#else - erts_atomic32_t refc; - int cleanup; -#endif + erts_atomic_t run_queue; erts_atomic_t connected; /* A connected process */ Eterm caller; /* Current caller. */ - erts_smp_atomic_t data; /* Data associated with port. */ + erts_atomic_t data; /* Data associated with port. */ Uint bytes_in; /* Number of bytes read */ Uint bytes_out; /* Number of bytes written */ @@ -173,7 +166,7 @@ struct _erl_drv_port { int control_flags; /* Flags for port_control() */ ErlDrvPDL port_data_lock; - erts_smp_atomic_t psd; /* Port specific data */ + erts_atomic_t psd; /* Port specific data */ int reds; /* Only used while executing driver callbacks */ struct { @@ -209,24 +202,20 @@ ERTS_GLB_INLINE ErtsRunQueue *erts_port_runq(Port *prt); ERTS_GLB_INLINE ErtsRunQueue * erts_port_runq(Port *prt) { -#ifdef ERTS_SMP ErtsRunQueue *rq1, *rq2; - rq1 = (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue); + rq1 = (ErtsRunQueue *) erts_atomic_read_nob(&prt->run_queue); if (!rq1) return NULL; while (1) { - erts_smp_runq_lock(rq1); - rq2 = (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue); + erts_runq_lock(rq1); + rq2 = (ErtsRunQueue *) erts_atomic_read_nob(&prt->run_queue); if (rq1 == rq2) return rq1; - erts_smp_runq_unlock(rq1); + erts_runq_unlock(rq1); rq1 = rq2; if (!rq1) return NULL; } -#else - return ERTS_RUNQ_IX(0); -#endif } #endif @@ -240,10 +229,10 @@ ERTS_GLB_INLINE void *erts_prtsd_set(Port *p, int ix, void *new); ERTS_GLB_INLINE void * erts_prtsd_get(Port *prt, int ix) { - ErtsPrtSD *psd = (ErtsPrtSD *) erts_smp_atomic_read_nob(&prt->psd); + ErtsPrtSD *psd = (ErtsPrtSD *) erts_atomic_read_nob(&prt->psd); if (!psd) return NULL; - ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER; + ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER; return psd->data[ix]; } @@ -254,16 +243,14 @@ erts_prtsd_set(Port *prt, int ix, void *data) void *old; int i; - psd = (ErtsPrtSD *) erts_smp_atomic_read_nob(&prt->psd); + psd = (ErtsPrtSD *) erts_atomic_read_nob(&prt->psd); if (psd) { -#ifdef ERTS_SMP #ifdef ETHR_ORDERED_READ_DEPEND ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore); #else ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreStore); #endif -#endif old = psd->data[ix]; psd->data[ix] = data; return old; @@ -275,7 +262,7 @@ erts_prtsd_set(Port *prt, int ix, void *data) new_psd = erts_alloc(ERTS_ALC_T_PRTSD, sizeof(ErtsPrtSD)); for (i = 0; i < ERTS_PRTSD_SIZE; i++) new_psd->data[i] = NULL; - psd = (ErtsPrtSD *) erts_smp_atomic_cmpxchg_mb(&prt->psd, + psd = (ErtsPrtSD *) erts_atomic_cmpxchg_mb(&prt->psd, (erts_aint_t) new_psd, (erts_aint_t) NULL); if (psd) @@ -359,15 +346,10 @@ Eterm erts_request_io_bytes(Process *c_p); void print_port_info(Port *, fmtfn_t, void *); void erts_port_free(Port *); -#ifndef ERTS_SMP -void erts_port_cleanup(Port *); -#endif void erts_fire_port_monitor(Port *prt, Eterm ref); -#ifdef ERTS_SMP int erts_port_handle_xports(Port *); -#endif -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) int erts_lc_is_port_locked(Port *); #endif @@ -376,9 +358,9 @@ ERTS_GLB_INLINE void erts_port_dec_refc(Port *prt); ERTS_GLB_INLINE void erts_port_add_refc(Port *prt, Sint32 add_refc); ERTS_GLB_INLINE Sint erts_port_read_refc(Port *prt); -ERTS_GLB_INLINE int erts_smp_port_trylock(Port *prt); -ERTS_GLB_INLINE void erts_smp_port_lock(Port *prt); -ERTS_GLB_INLINE void erts_smp_port_unlock(Port *prt); +ERTS_GLB_INLINE int erts_port_trylock(Port *prt); +ERTS_GLB_INLINE void erts_port_lock(Port *prt); +ERTS_GLB_INLINE void erts_port_unlock(Port *prt); #if ERTS_GLB_INLINE_INCL_FUNC_DEF @@ -407,35 +389,27 @@ ERTS_GLB_INLINE Sint erts_port_read_refc(Port *prt) } ERTS_GLB_INLINE int -erts_smp_port_trylock(Port *prt) +erts_port_trylock(Port *prt) { -#ifdef ERTS_SMP /* *Need* to be a managed thread */ - ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread()); + ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread()); return erts_mtx_trylock(prt->lock); -#else - return 0; -#endif } ERTS_GLB_INLINE void -erts_smp_port_lock(Port *prt) +erts_port_lock(Port *prt) { -#ifdef ERTS_SMP /* *Need* to be a managed thread */ - ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread()); + ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread()); erts_mtx_lock(prt->lock); -#endif } ERTS_GLB_INLINE void -erts_smp_port_unlock(Port *prt) +erts_port_unlock(Port *prt) { -#ifdef ERTS_SMP /* *Need* to be a managed thread */ - ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread()); + ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread()); erts_mtx_unlock(prt->lock); -#endif } #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ @@ -466,9 +440,7 @@ extern const Port erts_invalid_port; int erts_is_port_ioq_empty(Port *); void erts_terminate_port(Port *); -#ifdef ERTS_SMP Port *erts_de2port(DistEntry *, Process *, ErtsProcLocks); -#endif ERTS_GLB_INLINE Port *erts_pix2port(int); ERTS_GLB_INLINE Port *erts_port_lookup_raw(Eterm); @@ -476,11 +448,9 @@ ERTS_GLB_INLINE Port *erts_port_lookup(Eterm, Uint32); ERTS_GLB_INLINE Port*erts_id2port(Eterm id); ERTS_GLB_INLINE Port *erts_id2port_sflgs(Eterm, Process *, ErtsProcLocks, Uint32); ERTS_GLB_INLINE void erts_port_release(Port *); -#ifdef ERTS_SMP ERTS_GLB_INLINE Port *erts_thr_port_lookup(Eterm id, Uint32 invalid_sflgs); ERTS_GLB_INLINE Port *erts_thr_id2port_sflgs(Eterm id, Uint32 invalid_sflgs); ERTS_GLB_INLINE void erts_thr_port_release(Port *prt); -#endif ERTS_GLB_INLINE Port *erts_thr_drvport2port(ErlDrvPort, int); ERTS_GLB_INLINE Port *erts_drvport2port_state(ErlDrvPort, erts_aint32_t *); ERTS_GLB_INLINE Eterm erts_drvport2id(ErlDrvPort); @@ -506,7 +476,7 @@ erts_port_lookup_raw(Eterm id) { Port *prt; - ERTS_SMP_LC_ASSERT(erts_thr_progress_lc_is_delaying()); + ERTS_LC_ASSERT(erts_thr_progress_lc_is_delaying()); if (is_not_internal_port(id)) return NULL; @@ -535,7 +505,7 @@ erts_id2port(Eterm id) Port *prt; /* Only allowed to be called from managed threads */ - ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread()); + ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread()); if (is_not_internal_port(id)) return NULL; @@ -546,10 +516,10 @@ erts_id2port(Eterm id) if (!prt || prt->common.id != id) return NULL; - erts_smp_port_lock(prt); + erts_port_lock(prt); state = erts_atomic32_read_nob(&prt->state); if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP) { - erts_smp_port_unlock(prt); + erts_port_unlock(prt); return NULL; } @@ -562,14 +532,12 @@ erts_id2port_sflgs(Eterm id, Process *c_p, ErtsProcLocks c_p_locks, Uint32 invalid_sflgs) { -#ifdef ERTS_SMP int no_proc_locks = !c_p || !c_p_locks; -#endif erts_aint32_t state; Port *prt; /* Only allowed to be called from managed threads */ - ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread()); + ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread()); if (is_not_internal_port(id)) return NULL; @@ -580,21 +548,17 @@ erts_id2port_sflgs(Eterm id, if (!prt || prt->common.id != id) return NULL; -#ifdef ERTS_SMP if (no_proc_locks) - erts_smp_port_lock(prt); - else if (erts_smp_port_trylock(prt) == EBUSY) { + erts_port_lock(prt); + else if (erts_port_trylock(prt) == EBUSY) { /* Unlock process locks, and acquire locks in lock order... */ - erts_smp_proc_unlock(c_p, c_p_locks); - erts_smp_port_lock(prt); - erts_smp_proc_lock(c_p, c_p_locks); + erts_proc_unlock(c_p, c_p_locks); + erts_port_lock(prt); + erts_proc_lock(c_p, c_p_locks); } -#endif state = erts_atomic32_read_nob(&prt->state); if (state & invalid_sflgs) { -#ifdef ERTS_SMP - erts_smp_port_unlock(prt); -#endif + erts_port_unlock(prt); return NULL; } @@ -605,18 +569,10 @@ ERTS_GLB_INLINE void erts_port_release(Port *prt) { /* Only allowed to be called from managed threads */ - ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread()); -#ifdef ERTS_SMP - erts_smp_port_unlock(prt); -#else - if (prt->cleanup) { - prt->cleanup = 0; - erts_port_cleanup(prt); - } -#endif + ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread()); + erts_port_unlock(prt); } -#ifdef ERTS_SMP /* * erts_thr_id2port_sflgs() and erts_port_dec_refc(prt) can * be used by unmanaged threads in the SMP case. @@ -702,13 +658,10 @@ ERTS_GLB_INLINE void erts_thr_port_release(Port *prt) { erts_mtx_unlock(prt->lock); -#ifdef ERTS_SMP if (!erts_thr_progress_is_managed_thread()) erts_port_dec_refc(prt); -#endif } -#endif ERTS_GLB_INLINE Port * erts_thr_drvport2port(ErlDrvPort drvport, int lock_pdl) @@ -724,7 +677,7 @@ erts_thr_drvport2port(ErlDrvPort drvport, int lock_pdl) #ifdef ERTS_ENABLE_LOCK_CHECK if (!ERTS_IS_CRASH_DUMPING) { if (erts_lc_is_emu_thr()) { - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); ERTS_LC_ASSERT(!prt->port_data_lock || erts_lc_mtx_is_locked(&prt->port_data_lock->mtx)); } @@ -753,7 +706,7 @@ erts_drvport2port_state(ErlDrvPort drvport, erts_aint32_t *statep) // ERTS_LC_ASSERT(erts_lc_is_emu_thr()); if (prt == ERTS_INVALID_ERL_DRV_PORT) return ERTS_INVALID_ERL_DRV_PORT; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt) + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt) || ERTS_IS_CRASH_DUMPING); /* * This state check is only needed since a driver callback @@ -810,23 +763,21 @@ erts_port_driver_callback_epilogue(Port *prt, erts_aint32_t *statep) int reds = 0; erts_aint32_t state; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); state = erts_atomic32_read_nob(&prt->state); if ((state & ERTS_PORT_SFLG_CLOSING) && erts_is_port_ioq_empty(prt)) { reds += ERTS_PORT_REDS_TERMINATE; erts_terminate_port(prt); state = erts_atomic32_read_nob(&prt->state); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); } -#ifdef ERTS_SMP if (prt->xports) { reds += erts_port_handle_xports(prt); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); ASSERT(!prt->xports); } -#endif if (statep) *statep = state; diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c index 4d7a86398a..a588477320 100644 --- a/erts/emulator/beam/erl_port_task.c +++ b/erts/emulator/beam/erl_port_task.c @@ -36,6 +36,7 @@ #include "erl_check_io.h" #include "dtrace-wrapper.h" #include "lttng-wrapper.h" +#include "erl_check_io.h" #include <stdarg.h> /* @@ -83,15 +84,13 @@ static void chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_q #define LTTNG_DRIVER(TRACEPOINT, PP) do {} while(0) #endif -#define ERTS_SMP_LC_VERIFY_RQ(RQ, PP) \ +#define ERTS_LC_VERIFY_RQ(RQ, PP) \ do { \ - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); \ - ERTS_SMP_LC_ASSERT((RQ) == ((ErtsRunQueue *) \ - erts_smp_atomic_read_nob(&(PP)->run_queue))); \ + ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq)); \ + ERTS_LC_ASSERT((RQ) == ((ErtsRunQueue *) \ + erts_atomic_read_nob(&(PP)->run_queue))); \ } while (0) -erts_smp_atomic_t erts_port_task_outstanding_io_tasks; - #define ERTS_PT_STATE_SCHEDULED 0 #define ERTS_PT_STATE_ABORTED 1 #define ERTS_PT_STATE_EXECUTING 2 @@ -99,7 +98,6 @@ erts_smp_atomic_t erts_port_task_outstanding_io_tasks; typedef union { struct { /* I/O tasks */ ErlDrvEvent event; - ErlDrvEventData event_data; } io; struct { ErtsProc2PortSigCallback callback; @@ -108,7 +106,7 @@ typedef union { } ErtsPortTaskTypeData; struct ErtsPortTask_ { - erts_smp_atomic32_t state; + erts_atomic32_t state; ErtsPortTaskType type; union { struct { @@ -126,9 +124,7 @@ struct ErtsPortTaskHandleList_ { ErtsPortTaskHandle handle; union { ErtsPortTaskHandleList *next; -#ifdef ERTS_SMP ErtsThrPrgrLaterOp release; -#endif } u; }; @@ -151,35 +147,29 @@ static void begin_port_cleanup(Port *pp, ErtsPortTask **execq, int *processing_busy_q_p); -ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(port_task, - ErtsPortTask, - 1000, - ERTS_ALC_T_PORT_TASK) +ERTS_THR_PREF_QUICK_ALLOC_IMPL(port_task, + ErtsPortTask, + 1000, + ERTS_ALC_T_PORT_TASK) ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(busy_caller_table, ErtsPortTaskBusyCallerTable, 50, ERTS_ALC_T_BUSY_CALLER_TAB) -#ifdef ERTS_SMP static void call_port_task_free(void *vptp) { port_task_free((ErtsPortTask *) vptp); } -#endif static ERTS_INLINE void schedule_port_task_free(ErtsPortTask *ptp) { -#ifdef ERTS_SMP erts_schedule_thr_prgr_later_cleanup_op(call_port_task_free, (void *) ptp, &ptp->u.release, sizeof(ErtsPortTask)); -#else - port_task_free(ptp); -#endif } static ERTS_INLINE ErtsPortTask * @@ -199,7 +189,7 @@ p2p_sig_data_init(ErtsPortTask *ptp) ptp->type = ERTS_PORT_TASK_PROC_SIG; ptp->u.alive.flags = ERTS_PT_FLG_SIG_DEP; - erts_smp_atomic32_init_nob(&ptp->state, ERTS_PT_STATE_SCHEDULED); + erts_atomic32_init_nob(&ptp->state, ERTS_PT_STATE_SCHEDULED); ASSERT(ptp == p2p_sig_data_to_task(&ptp->u.alive.td.psig.data)); @@ -290,7 +280,7 @@ popped_from_busy_queue(Port *pp, ErtsPortTask *ptp, int last) #ifdef DEBUG erts_aint32_t flags = #endif - erts_smp_atomic32_read_band_nob( + erts_atomic32_read_band_nob( &pp->sched.flags, ~ERTS_PTS_FLG_HAVE_BUSY_TASKS); ASSERT(flags & ERTS_PTS_FLG_HAVE_BUSY_TASKS); @@ -337,7 +327,7 @@ busy_wait_move_to_busy_queue(Port *pp, ErtsPortTask *ptp) #ifdef DEBUG flags = #endif - erts_smp_atomic32_read_bor_nob(&pp->sched.flags, + erts_atomic32_read_bor_nob(&pp->sched.flags, ERTS_PTS_FLG_HAVE_BUSY_TASKS); ASSERT(!(flags & ERTS_PTS_FLG_HAVE_BUSY_TASKS)); @@ -477,7 +467,7 @@ no_sig_dep_move_from_busyq(Port *pp) int bix; erts_aint32_t flags = #endif - erts_smp_atomic32_read_band_nob( + erts_atomic32_read_band_nob( &pp->sched.flags, ~ERTS_PTS_FLG_HAVE_BUSY_TASKS); ASSERT(flags & ERTS_PTS_FLG_HAVE_BUSY_TASKS); @@ -510,11 +500,11 @@ chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_queue) if (!first) { ASSERT(!tabp); ASSERT(!pp->sched.taskq.local.busy.last); - ASSERT(!(erts_smp_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_HAVE_BUSY_TASKS)); + ASSERT(!(erts_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_HAVE_BUSY_TASKS)); return; } - ASSERT(erts_smp_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_HAVE_BUSY_TASKS); + ASSERT(erts_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_HAVE_BUSY_TASKS); ASSERT(tabp); tot_count = 0; @@ -570,13 +560,13 @@ chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_queue) static ERTS_INLINE void reset_port_task_handle(ErtsPortTaskHandle *pthp) { - erts_smp_atomic_set_relb(pthp, (erts_aint_t) NULL); + erts_atomic_set_relb(pthp, (erts_aint_t) NULL); } static ERTS_INLINE ErtsPortTask * handle2task(ErtsPortTaskHandle *pthp) { - return (ErtsPortTask *) erts_smp_atomic_read_acqb(pthp); + return (ErtsPortTask *) erts_atomic_read_acqb(pthp); } static ERTS_INLINE void @@ -593,8 +583,9 @@ reset_executed_io_task_handle(ErtsPortTask *ptp) { if (ptp->u.alive.handle) { ASSERT(ptp == handle2task(ptp->u.alive.handle)); - erts_io_notify_port_task_executed(ptp->u.alive.handle); - reset_port_task_handle(ptp->u.alive.handle); + /* The port task handle is reset inside task_executed */ + erts_io_notify_port_task_executed(ptp->type, ptp->u.alive.handle, + reset_port_task_handle); } } @@ -603,7 +594,7 @@ set_handle(ErtsPortTask *ptp, ErtsPortTaskHandle *pthp) { ptp->u.alive.handle = pthp; if (pthp) { - erts_smp_atomic_set_relb(pthp, (erts_aint_t) ptp); + erts_atomic_set_relb(pthp, (erts_aint_t) ptp); ASSERT(ptp == handle2task(ptp->u.alive.handle)); } } @@ -617,7 +608,7 @@ set_tmp_handle(ErtsPortTask *ptp, ErtsPortTaskHandle *pthp) * IMPORTANT! Task either need to be aborted, or task handle * need to be detached before thread progress has been made. */ - erts_smp_atomic_set_relb(pthp, (erts_aint_t) ptp); + erts_atomic_set_relb(pthp, (erts_aint_t) ptp); } } @@ -635,20 +626,20 @@ check_unset_busy_port_q(Port *pp, int resume_procs = 0; ASSERT(bpq); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(pp)); erts_port_task_sched_lock(&pp->sched); - qsize = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->size); - low = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low); + qsize = (ErlDrvSizeT) erts_atomic_read_nob(&bpq->size); + low = (ErlDrvSizeT) erts_atomic_read_nob(&bpq->low); if (qsize < low) { erts_aint32_t mask = ~(ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q | ERTS_PTS_FLG_BUSY_PORT_Q); - flags = erts_smp_atomic32_read_band_relb(&pp->sched.flags, mask); + flags = erts_atomic32_read_band_relb(&pp->sched.flags, mask); if ((flags & ERTS_PTS_FLGS_BUSY) == ERTS_PTS_FLG_BUSY_PORT_Q) resume_procs = 1; } else if (flags & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q) { - flags = erts_smp_atomic32_read_band_relb(&pp->sched.flags, + flags = erts_atomic32_read_band_relb(&pp->sched.flags, ~ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q); flags &= ~ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q; } @@ -673,16 +664,16 @@ aborted_proc2port_data(Port *pp, ErlDrvSizeT size) bpq = pp->sched.taskq.bpq; - qsz = (ErlDrvSizeT) erts_smp_atomic_add_read_acqb(&bpq->size, + qsz = (ErlDrvSizeT) erts_atomic_add_read_acqb(&bpq->size, (erts_aint_t) -size); ASSERT(qsz + size > qsz); - flags = erts_smp_atomic32_read_nob(&pp->sched.flags); + flags = erts_atomic32_read_nob(&pp->sched.flags); ASSERT(pp->sched.taskq.bpq); if ((flags & (ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q | ERTS_PTS_FLG_BUSY_PORT_Q)) != ERTS_PTS_FLG_BUSY_PORT_Q) return; - if (qsz < (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low)) - erts_smp_atomic32_read_bor_nob(&pp->sched.flags, + if (qsz < (ErlDrvSizeT) erts_atomic_read_nob(&bpq->low)) + erts_atomic32_read_bor_nob(&pp->sched.flags, ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q); } @@ -700,13 +691,13 @@ dequeued_proc2port_data(Port *pp, ErlDrvSizeT size) bpq = pp->sched.taskq.bpq; - qsz = (ErlDrvSizeT) erts_smp_atomic_add_read_acqb(&bpq->size, + qsz = (ErlDrvSizeT) erts_atomic_add_read_acqb(&bpq->size, (erts_aint_t) -size); ASSERT(qsz + size > qsz); - flags = erts_smp_atomic32_read_nob(&pp->sched.flags); + flags = erts_atomic32_read_nob(&pp->sched.flags); if (!(flags & ERTS_PTS_FLG_BUSY_PORT_Q)) return; - if (qsz < (ErlDrvSizeT) erts_smp_atomic_read_acqb(&bpq->low)) + if (qsz < (ErlDrvSizeT) erts_atomic_read_acqb(&bpq->low)) check_unset_busy_port_q(pp, flags, bpq); } @@ -719,19 +710,19 @@ enqueue_proc2port_data(Port *pp, if (sigdp && bpq) { ErlDrvSizeT size = erts_proc2port_sig_command_data_size(sigdp); if (size) { - erts_aint_t asize = erts_smp_atomic_add_read_acqb(&bpq->size, + erts_aint_t asize = erts_atomic_add_read_acqb(&bpq->size, (erts_aint_t) size); ErlDrvSizeT qsz = (ErlDrvSizeT) asize; ASSERT(qsz - size < qsz); if (!(flags & ERTS_PTS_FLG_BUSY_PORT_Q) && qsz > bpq->high) { - flags = erts_smp_atomic32_read_bor_acqb(&pp->sched.flags, + flags = erts_atomic32_read_bor_acqb(&pp->sched.flags, ERTS_PTS_FLG_BUSY_PORT_Q); flags |= ERTS_PTS_FLG_BUSY_PORT_Q; - qsz = (ErlDrvSizeT) erts_smp_atomic_read_acqb(&bpq->size); - if (qsz < (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low)) { - flags = (erts_smp_atomic32_read_bor_relb( + qsz = (ErlDrvSizeT) erts_atomic_read_acqb(&bpq->size); + if (qsz < (ErlDrvSizeT) erts_atomic_read_nob(&bpq->low)) { + flags = (erts_atomic32_read_bor_relb( &pp->sched.flags, ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q)); flags |= ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q; @@ -779,18 +770,18 @@ erl_drv_busy_msgq_limits(ErlDrvPort dport, ErlDrvSizeT *lowp, ErlDrvSizeT *highp erts_aint32_t flags; pp->sched.taskq.bpq = NULL; flags = ~(ERTS_PTS_FLG_BUSY_PORT_Q|ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q); - flags = erts_smp_atomic32_read_band_acqb(&pp->sched.flags, flags); + flags = erts_atomic32_read_band_acqb(&pp->sched.flags, flags); if ((flags & ERTS_PTS_FLGS_BUSY) == ERTS_PTS_FLG_BUSY_PORT_Q) resume_procs = 1; } else { if (!low) - low = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low); + low = (ErlDrvSizeT) erts_atomic_read_nob(&bpq->low); else { if (bpq->high < low) bpq->high = low; - erts_smp_atomic_set_relb(&bpq->low, (erts_aint_t) low); + erts_atomic_set_relb(&bpq->low, (erts_aint_t) low); written = 1; } @@ -799,19 +790,19 @@ erl_drv_busy_msgq_limits(ErlDrvPort dport, ErlDrvSizeT *lowp, ErlDrvSizeT *highp else { if (low > high) { low = high; - erts_smp_atomic_set_relb(&bpq->low, (erts_aint_t) low); + erts_atomic_set_relb(&bpq->low, (erts_aint_t) low); } bpq->high = high; written = 1; } if (written) { - ErlDrvSizeT size = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->size); + ErlDrvSizeT size = (ErlDrvSizeT) erts_atomic_read_nob(&bpq->size); if (size > high) - erts_smp_atomic32_read_bor_relb(&pp->sched.flags, + erts_atomic32_read_bor_relb(&pp->sched.flags, ERTS_PTS_FLG_BUSY_PORT_Q); else if (size < low) - erts_smp_atomic32_read_bor_relb(&pp->sched.flags, + erts_atomic32_read_bor_relb(&pp->sched.flags, ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q); } } @@ -830,25 +821,19 @@ erl_drv_busy_msgq_limits(ErlDrvPort dport, ErlDrvSizeT *lowp, ErlDrvSizeT *highp * No-suspend handles. */ -#ifdef ERTS_SMP static void free_port_task_handle_list(void *vpthlp) { erts_free(ERTS_ALC_T_PT_HNDL_LIST, vpthlp); } -#endif static void schedule_port_task_handle_list_free(ErtsPortTaskHandleList *pthlp) { -#ifdef ERTS_SMP erts_schedule_thr_prgr_later_cleanup_op(free_port_task_handle_list, (void *) pthlp, &pthlp->u.release, sizeof(ErtsPortTaskHandleList)); -#else - erts_free(ERTS_ALC_T_PT_HNDL_LIST, pthlp); -#endif } static ERTS_INLINE void @@ -891,7 +876,7 @@ get_free_nosuspend_handles(Port *pp) { ErtsPortTaskHandleList *nshp, *last_nshp = NULL; - ERTS_SMP_LC_ASSERT(erts_port_task_sched_lock_is_locked(&pp->sched)); + ERTS_LC_ASSERT(erts_port_task_sched_lock_is_locked(&pp->sched)); nshp = pp->sched.taskq.local.busy.nosuspend; @@ -907,7 +892,7 @@ get_free_nosuspend_handles(Port *pp) pp->sched.taskq.local.busy.nosuspend = last_nshp->u.next; last_nshp->u.next = NULL; if (!pp->sched.taskq.local.busy.nosuspend) - erts_smp_atomic32_read_band_nob(&pp->sched.flags, + erts_atomic32_read_band_nob(&pp->sched.flags, ~ERTS_PTS_FLG_HAVE_NS_TASKS); } return nshp; @@ -930,7 +915,7 @@ free_nosuspend_handles(ErtsPortTaskHandleList *free_nshp) static ERTS_INLINE void enqueue_port(ErtsRunQueue *runq, Port *pp) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq)); pp->sched.next = NULL; if (runq->ports.end) { ASSERT(runq->ports.start); @@ -944,19 +929,17 @@ enqueue_port(ErtsRunQueue *runq, Port *pp) runq->ports.end = pp; ASSERT(runq->ports.start && runq->ports.end); - erts_smp_inc_runq_len(runq, &runq->ports.info, ERTS_PORT_PRIO_LEVEL); + erts_inc_runq_len(runq, &runq->ports.info, ERTS_PORT_PRIO_LEVEL); -#ifdef ERTS_SMP if (ERTS_RUNQ_FLGS_GET_NOB(runq) & ERTS_RUNQ_FLG_HALTING) erts_non_empty_runq(runq); -#endif } static ERTS_INLINE Port * pop_port(ErtsRunQueue *runq) { Port *pp = runq->ports.start; - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq)); if (!pp) { ASSERT(!runq->ports.end); } @@ -966,7 +949,7 @@ pop_port(ErtsRunQueue *runq) ASSERT(runq->ports.end == pp); runq->ports.end = NULL; } - erts_smp_dec_runq_len(runq, &runq->ports.info, ERTS_PORT_PRIO_LEVEL); + erts_dec_runq_len(runq, &runq->ports.info, ERTS_PORT_PRIO_LEVEL); } ASSERT(runq->ports.start || !runq->ports.end); @@ -993,7 +976,7 @@ enqueue_task(Port *pp, if (ns_pthlp) fail_flags |= ERTS_PTS_FLG_BUSY_PORT; erts_port_task_sched_lock(&pp->sched); - flags = erts_smp_atomic32_read_nob(&pp->sched.flags); + flags = erts_atomic32_read_nob(&pp->sched.flags); if (flags & fail_flags) res = 0; else { @@ -1024,7 +1007,7 @@ enqueue_task(Port *pp, static ERTS_INLINE void prepare_exec(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p) { - erts_aint32_t act = erts_smp_atomic32_read_nob(&pp->sched.flags); + erts_aint32_t act = erts_atomic32_read_nob(&pp->sched.flags); if (!pp->sched.taskq.local.busy.first || (act & ERTS_PTS_FLG_BUSY_PORT)) { *execqp = pp->sched.taskq.local.first; @@ -1045,7 +1028,7 @@ prepare_exec(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p) new &= ~ERTS_PTS_FLG_IN_RUNQ; new |= ERTS_PTS_FLG_EXEC; - act = erts_smp_atomic32_cmpxchg_nob(&pp->sched.flags, new, exp); + act = erts_atomic32_cmpxchg_nob(&pp->sched.flags, new, exp); ASSERT(act & ERTS_PTS_FLG_IN_RUNQ); @@ -1072,7 +1055,7 @@ finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q) *execq = NULL; - act = erts_smp_atomic32_read_nob(&pp->sched.flags); + act = erts_atomic32_read_nob(&pp->sched.flags); if (act & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q) act = check_unset_busy_port_q(pp, act, pp->sched.taskq.bpq); @@ -1089,7 +1072,7 @@ finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q) if (act & ERTS_PTS_FLG_HAVE_TASKS) new |= ERTS_PTS_FLG_IN_RUNQ; - act = erts_smp_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp); + act = erts_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp); ERTS_LC_ASSERT(!(act & ERTS_PTS_FLG_IN_RUNQ)); ERTS_LC_ASSERT(!(act & ERTS_PTS_FLG_EXEC_IMM)); @@ -1115,7 +1098,7 @@ finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q) static ERTS_INLINE erts_aint32_t select_queue_for_exec(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p) { - erts_aint32_t flags = erts_smp_atomic32_read_nob(&pp->sched.flags); + erts_aint32_t flags = erts_atomic32_read_nob(&pp->sched.flags); if (flags & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q) flags = check_unset_busy_port_q(pp, flags, pp->sched.taskq.bpq); @@ -1225,7 +1208,7 @@ fetch_in_queue(Port *pp, ErtsPortTask **execqp) if (ptp) *execqp = ptp->u.alive.next; else - erts_smp_atomic32_read_band_nob(&pp->sched.flags, + erts_atomic32_read_band_nob(&pp->sched.flags, ~ERTS_PTS_FLG_HAVE_TASKS); @@ -1288,7 +1271,7 @@ erl_drv_consume_timeslice(ErlDrvPort dprt, int percent) void erts_port_task_tmp_handle_detach(ErtsPortTaskHandle *pthp) { - ERTS_SMP_LC_ASSERT(erts_thr_progress_lc_is_delaying()); + ERTS_LC_ASSERT(erts_thr_progress_lc_is_delaying()); reset_port_task_handle(pthp); } @@ -1301,9 +1284,7 @@ erts_port_task_abort(ErtsPortTaskHandle *pthp) { int res; ErtsPortTask *ptp; -#ifdef ERTS_SMP ErtsThrPrgrDelayHandle dhndl = erts_thr_progress_unmanaged_delay(); -#endif ptp = handle2task(pthp); if (!ptp) @@ -1313,41 +1294,25 @@ erts_port_task_abort(ErtsPortTaskHandle *pthp) #ifdef DEBUG ErtsPortTaskHandle *saved_pthp = ptp->u.alive.handle; - ERTS_SMP_READ_MEMORY_BARRIER; - old_state = erts_smp_atomic32_read_nob(&ptp->state); + ERTS_THR_READ_MEMORY_BARRIER; + old_state = erts_atomic32_read_nob(&ptp->state); if (old_state == ERTS_PT_STATE_SCHEDULED) { ASSERT(!saved_pthp || saved_pthp == pthp); } #endif - old_state = erts_smp_atomic32_cmpxchg_nob(&ptp->state, + old_state = erts_atomic32_cmpxchg_nob(&ptp->state, ERTS_PT_STATE_ABORTED, ERTS_PT_STATE_SCHEDULED); if (old_state != ERTS_PT_STATE_SCHEDULED) res = - 1; /* Task already aborted, executing, or executed */ else { - reset_port_task_handle(pthp); - - switch (ptp->type) { - case ERTS_PORT_TASK_INPUT: - case ERTS_PORT_TASK_OUTPUT: - case ERTS_PORT_TASK_EVENT: - ASSERT(erts_smp_atomic_read_nob( - &erts_port_task_outstanding_io_tasks) > 0); - erts_smp_atomic_dec_relb(&erts_port_task_outstanding_io_tasks); - break; - default: - break; - } - res = 0; } } -#ifdef ERTS_SMP erts_thr_progress_unmanaged_continue(dhndl); -#endif return res; } @@ -1356,12 +1321,10 @@ void erts_port_task_abort_nosuspend_tasks(Port *pp) { ErtsPortTaskHandleList *abort_list; -#ifdef ERTS_SMP ErtsThrPrgrDelayHandle dhndl = ERTS_THR_PRGR_DHANDLE_INVALID; -#endif erts_port_task_sched_lock(&pp->sched); - erts_smp_atomic32_read_band_nob(&pp->sched.flags, + erts_atomic32_read_band_nob(&pp->sched.flags, ~ERTS_PTS_FLG_HAVE_NS_TASKS); abort_list = pp->sched.taskq.local.busy.nosuspend; pp->sched.taskq.local.busy.nosuspend = NULL; @@ -1381,40 +1344,34 @@ erts_port_task_abort_nosuspend_tasks(Port *pp) pthlp = abort_list; abort_list = pthlp->u.next; -#ifdef ERTS_SMP if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) dhndl = erts_thr_progress_unmanaged_delay(); -#endif pthp = &pthlp->handle; ptp = handle2task(pthp); if (!ptp) { -#ifdef ERTS_SMP if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) erts_thr_progress_unmanaged_continue(dhndl); -#endif schedule_port_task_handle_list_free(pthlp); continue; } #ifdef DEBUG saved_pthp = ptp->u.alive.handle; - ERTS_SMP_READ_MEMORY_BARRIER; - old_state = erts_smp_atomic32_read_nob(&ptp->state); + ERTS_THR_READ_MEMORY_BARRIER; + old_state = erts_atomic32_read_nob(&ptp->state); if (old_state == ERTS_PT_STATE_SCHEDULED) { ASSERT(saved_pthp == pthp); } #endif - old_state = erts_smp_atomic32_cmpxchg_nob(&ptp->state, + old_state = erts_atomic32_cmpxchg_nob(&ptp->state, ERTS_PT_STATE_ABORTED, ERTS_PT_STATE_SCHEDULED); if (old_state != ERTS_PT_STATE_SCHEDULED) { /* Task already aborted, executing, or executed */ -#ifdef ERTS_SMP if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) erts_thr_progress_unmanaged_continue(dhndl); -#endif schedule_port_task_handle_list_free(pthlp); continue; } @@ -1424,10 +1381,8 @@ erts_port_task_abort_nosuspend_tasks(Port *pp) type = ptp->type; td = ptp->u.alive.td; -#ifdef ERTS_SMP if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) erts_thr_progress_unmanaged_continue(dhndl); -#endif schedule_port_task_handle_list_free(pthlp); abort_nosuspend_task(pp, type, &td, pp->sched.taskq.bpq != NULL); @@ -1446,10 +1401,8 @@ erts_port_task_schedule(Eterm id, { ErtsProc2PortSigData *sigdp = NULL; ErtsPortTaskHandleList *ns_pthlp = NULL; -#ifdef ERTS_SMP ErtsRunQueue *xrunq; ErtsThrPrgrDelayHandle dhndl; -#endif ErtsRunQueue *runq; Port *pp; ErtsPortTask *ptp = NULL; @@ -1460,19 +1413,15 @@ erts_port_task_schedule(Eterm id, ASSERT(is_internal_port(id)); -#ifdef ERTS_SMP dhndl = erts_thr_progress_unmanaged_delay(); -#endif pp = erts_port_lookup_raw(id); -#ifdef ERTS_SMP if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) { if (pp) erts_port_inc_refc(pp); erts_thr_progress_unmanaged_continue(dhndl); } -#endif if (type != ERTS_PORT_TASK_PROC_SIG) { if (!pp) @@ -1483,7 +1432,7 @@ erts_port_task_schedule(Eterm id, ptp->type = type; ptp->u.alive.flags = 0; - erts_smp_atomic32_init_nob(&ptp->state, ERTS_PT_STATE_SCHEDULED); + erts_atomic32_init_nob(&ptp->state, ERTS_PT_STATE_SCHEDULED); set_handle(ptp, pthp); } @@ -1495,16 +1444,6 @@ erts_port_task_schedule(Eterm id, va_start(argp, type); ptp->u.alive.td.io.event = va_arg(argp, ErlDrvEvent); va_end(argp); - erts_smp_atomic_inc_relb(&erts_port_task_outstanding_io_tasks); - break; - } - case ERTS_PORT_TASK_EVENT: { - va_list argp; - va_start(argp, type); - ptp->u.alive.td.io.event = va_arg(argp, ErlDrvEvent); - ptp->u.alive.td.io.event_data = va_arg(argp, ErlDrvEventData); - va_end(argp); - erts_smp_atomic_inc_relb(&erts_port_task_outstanding_io_tasks); break; } case ERTS_PORT_TASK_PROC_SIG: { @@ -1559,7 +1498,7 @@ erts_port_task_schedule(Eterm id, if (!(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC))) new |= ERTS_PTS_FLG_IN_RUNQ; - act = erts_smp_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp); + act = erts_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp); if (exp == act) { if (!(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC))) @@ -1584,44 +1523,38 @@ erts_port_task_schedule(Eterm id, if (!runq) ERTS_INTERNAL_ERROR("Missing run-queue"); -#ifdef ERTS_SMP xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL); - ERTS_SMP_LC_ASSERT(runq != xrunq); - ERTS_SMP_LC_VERIFY_RQ(runq, pp); + ERTS_LC_ASSERT(runq != xrunq); + ERTS_LC_VERIFY_RQ(runq, pp); if (xrunq) { /* Emigrate port ... */ - erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq); - erts_smp_runq_unlock(runq); + erts_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq); + erts_runq_unlock(runq); runq = erts_port_runq(pp); if (!runq) ERTS_INTERNAL_ERROR("Missing run-queue"); } -#endif enqueue_port(runq, pp); - erts_smp_runq_unlock(runq); + erts_runq_unlock(runq); - erts_smp_notify_inc_runq(runq); + erts_notify_inc_runq(runq); done: if (prof_runnable_ports) erts_port_task_sched_unlock(&pp->sched); -#ifdef ERTS_SMP if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) erts_port_dec_refc(pp); -#endif return 0; abort_nosuspend: -#ifdef ERTS_SMP if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) erts_port_dec_refc(pp); -#endif abort_nosuspend_task(pp, ptp->type, &ptp->u.alive.td, 0); @@ -1635,10 +1568,8 @@ abort_nosuspend: fail: -#ifdef ERTS_SMP if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) erts_port_dec_refc(pp); -#endif if (ptp) { abort_signal_task(pp, ERTS_PROC2PORT_SIG_ABORT, @@ -1658,14 +1589,14 @@ erts_port_task_free_port(Port *pp) erts_aint32_t flags; ErtsRunQueue *runq; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(pp)); ASSERT(!(erts_atomic32_read_nob(&pp->state) & ERTS_PORT_SFLGS_DEAD)); runq = erts_port_runq(pp); if (!runq) ERTS_INTERNAL_ERROR("Missing run-queue"); erts_port_task_sched_lock(&pp->sched); - flags = erts_smp_atomic32_read_bor_relb(&pp->sched.flags, + flags = erts_atomic32_read_bor_relb(&pp->sched.flags, ERTS_PTS_FLG_EXIT); erts_port_task_sched_unlock(&pp->sched); erts_atomic32_read_bset_relb(&pp->state, @@ -1675,7 +1606,7 @@ erts_port_task_free_port(Port *pp) | ERTS_PORT_SFLG_FREE), ERTS_PORT_SFLG_FREE); - erts_smp_runq_unlock(runq); + erts_runq_unlock(runq); if (!(flags & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC))) begin_port_cleanup(pp, NULL, NULL); @@ -1688,13 +1619,12 @@ erts_port_task_free_port(Port *pp) * scheduling of processes. Run-queue lock should be held by caller. */ -int +void erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) { Port *pp; ErtsPortTask *execq; int processing_busy_q; - int res = 0; int vreds = 0; int reds = 0; erts_aint_t io_tasks_executed = 0; @@ -1705,17 +1635,16 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) ErtsSchedulerData *esdp = runq->scheduler; ERTS_MSACC_PUSH_STATE_M(); - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq)); pp = pop_port(runq); if (!pp) { - res = 0; goto done; } - ERTS_SMP_LC_VERIFY_RQ(runq, pp); + ERTS_LC_VERIFY_RQ(runq, pp); - erts_smp_runq_unlock(runq); + erts_runq_unlock(runq); *curr_port_pp = pp; @@ -1723,19 +1652,19 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) Uint old = ERTS_PORT_SCHED_ID(pp, esdp->no); int migrated = old && old != esdp->no; - erts_smp_spin_lock(&erts_sched_stat.lock); + erts_spin_lock(&erts_sched_stat.lock); erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].total_executed++; erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].executed++; if (migrated) { erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].total_migrated++; erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].migrated++; } - erts_smp_spin_unlock(&erts_sched_stat.lock); + erts_spin_unlock(&erts_sched_stat.lock); } prepare_exec(pp, &execq, &processing_busy_q); - erts_smp_port_lock(pp); + erts_port_lock(pp); /* trace port scheduling, in */ if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) { @@ -1757,7 +1686,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) if (!ptp) break; - task_state = erts_smp_atomic32_cmpxchg_nob(&ptp->state, + task_state = erts_atomic32_cmpxchg_nob(&ptp->state, ERTS_PT_STATE_EXECUTING, ERTS_PT_STATE_SCHEDULED); if (task_state != ERTS_PT_STATE_SCHEDULED) { @@ -1769,8 +1698,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) start_time = erts_timestamp_millis(); } - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(pp)); + ERTS_CHK_NO_PROC_LOCKS; ASSERT(pp->drv_ptr); switch (ptp->type) { @@ -1812,17 +1741,6 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) reset_executed_io_task_handle(ptp); io_tasks_executed++; break; - case ERTS_PORT_TASK_EVENT: - reds = ERTS_PORT_REDS_EVENT; - ASSERT((state & ERTS_PORT_SFLGS_DEAD) == 0); - DTRACE_DRIVER(driver_event, pp); - LTTNG_DRIVER(driver_event, pp); - (*pp->drv_ptr->event)((ErlDrvData) pp->drv_data, - ptp->u.alive.td.io.event, - ptp->u.alive.td.io.event_data); - reset_executed_io_task_handle(ptp); - io_tasks_executed++; - break; case ERTS_PORT_TASK_PROC_SIG: { ErtsProc2PortSigData *sigdp = &ptp->u.alive.td.psig.data; reset_handle(ptp); @@ -1887,17 +1805,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) erts_unblock_fpe(fpe_was_unmasked); ERTS_MSACC_POP_STATE_M(); - - if (io_tasks_executed) { - ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) - >= io_tasks_executed); - erts_smp_atomic_add_relb(&erts_port_task_outstanding_io_tasks, - -1*io_tasks_executed); - } - -#ifdef ERTS_SMP - ASSERT(runq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue)); -#endif + ASSERT(runq == (ErtsRunQueue *) erts_atomic_read_nob(&pp->run_queue)); active = finalize_exec(pp, &execq, processing_busy_q); @@ -1907,54 +1815,43 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) *curr_port_pp = NULL; - erts_smp_runq_lock(runq); + erts_runq_lock(runq); if (active) { -#ifdef ERTS_SMP ErtsRunQueue *xrunq; -#endif ASSERT(!(erts_atomic32_read_nob(&pp->state) & ERTS_PORT_SFLGS_DEAD)); -#ifdef ERTS_SMP xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL); - ERTS_SMP_LC_ASSERT(runq != xrunq); - ERTS_SMP_LC_VERIFY_RQ(runq, pp); + ERTS_LC_ASSERT(runq != xrunq); + ERTS_LC_VERIFY_RQ(runq, pp); if (!xrunq) { -#endif enqueue_port(runq, pp); /* No need to notify ourselves about inc in runq. */ -#ifdef ERTS_SMP } else { /* Emigrate port... */ - erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq); - erts_smp_runq_unlock(runq); + erts_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq); + erts_runq_unlock(runq); xrunq = erts_port_runq(pp); ASSERT(xrunq); enqueue_port(xrunq, pp); - erts_smp_runq_unlock(xrunq); - erts_smp_notify_inc_runq(xrunq); + erts_runq_unlock(xrunq); + erts_notify_inc_runq(xrunq); - erts_smp_runq_lock(runq); + erts_runq_lock(runq); } -#endif } done: - res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) - != (erts_aint_t) 0); runq->scheduler->reductions += reds; - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq)); ERTS_PORT_REDUCTIONS_EXECUTED(esdp, runq, reds); - - return res; } -#ifdef ERTS_SMP static void release_port(void *vport) { @@ -1970,7 +1867,6 @@ schedule_release_port(void *vport) { &pp->common.u.release); } -#endif static void begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p) @@ -1981,7 +1877,7 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p) ErtsPortTaskHandleList *free_nshp = NULL; ErtsProcList *plp; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(pp)); /* * Abort remaining tasks... @@ -2054,11 +1950,11 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p) qs[i] = ptp->u.alive.next; /* Normal case here is aborted tasks... */ - state = erts_smp_atomic32_read_nob(&ptp->state); + state = erts_atomic32_read_nob(&ptp->state); if (state == ERTS_PT_STATE_ABORTED) goto aborted_port_task; - state = erts_smp_atomic32_cmpxchg_nob(&ptp->state, + state = erts_atomic32_cmpxchg_nob(&ptp->state, ERTS_PT_STATE_EXECUTING, ERTS_PT_STATE_SCHEDULED); if (state != ERTS_PT_STATE_SCHEDULED) { @@ -2085,13 +1981,6 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p) DO_WRITE, 1); break; - case ERTS_PORT_TASK_EVENT: - erts_stale_drv_select(pp->common.id, - ERTS_Port2ErlDrvPort(pp), - ptp->u.alive.td.io.event, - 0, - 1); - break; case ERTS_PORT_TASK_DIST_CMD: break; case ERTS_PORT_TASK_PROC_SIG: { @@ -2122,7 +2011,7 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p) } } - erts_smp_atomic32_read_band_nob(&pp->sched.flags, + erts_atomic32_read_band_nob(&pp->sched.flags, ~(ERTS_PTS_FLG_HAVE_BUSY_TASKS |ERTS_PTS_FLG_HAVE_TASKS |ERTS_PTS_FLGS_BUSY)); @@ -2164,7 +2053,6 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p) /* * Schedule cleanup of port structure... */ -#ifdef ERTS_SMP /* We might not be a scheduler, eg. traceing to port we are sys_msg_dispatcher */ if (!erts_get_scheduler_data()) { erts_schedule_misc_aux_work(1, schedule_release_port, (void*)pp); @@ -2174,19 +2062,15 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p) (void *) pp, &pp->common.u.release); } -#else - pp->cleanup = 1; -#endif } -#ifdef ERTS_SMP void erts_enqueue_port(ErtsRunQueue *rq, Port *pp) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); - ASSERT(rq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue)); - ASSERT(erts_smp_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_IN_RUNQ); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); + ASSERT(rq == (ErtsRunQueue *) erts_atomic_read_nob(&pp->run_queue)); + ASSERT(erts_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_IN_RUNQ); enqueue_port(rq, pp); } @@ -2194,16 +2078,15 @@ Port * erts_dequeue_port(ErtsRunQueue *rq) { Port *pp; - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); pp = pop_port(rq); ASSERT(!pp - || rq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue)); - ASSERT(!pp || (erts_smp_atomic32_read_nob(&pp->sched.flags) + || rq == (ErtsRunQueue *) erts_atomic_read_nob(&pp->run_queue)); + ASSERT(!pp || (erts_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_IN_RUNQ)); return pp; } -#endif /* * Initialize the module. @@ -2211,8 +2094,7 @@ erts_dequeue_port(ErtsRunQueue *rq) void erts_port_task_init(void) { - erts_smp_atomic_init_nob(&erts_port_task_outstanding_io_tasks, - (erts_aint_t) 0); - init_port_task_alloc(); + init_port_task_alloc(erts_no_schedulers + erts_no_poll_threads + + 1); /* aux_thread */ init_busy_caller_table_alloc(); } diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h index 39f403b443..ae78a7d8a3 100644 --- a/erts/emulator/beam/erl_port_task.h +++ b/erts/emulator/beam/erl_port_task.h @@ -27,11 +27,11 @@ #ifndef ERTS_PORT_TASK_H_BASIC_TYPES__ #define ERTS_PORT_TASK_H_BASIC_TYPES__ #include "erl_sys_driver.h" -#include "erl_smp.h" +#include "erl_threads.h" #define ERL_PORT_GET_PORT_TYPE_ONLY__ #include "erl_port.h" #undef ERL_PORT_GET_PORT_TYPE_ONLY__ -typedef erts_smp_atomic_t ErtsPortTaskHandle; +typedef erts_atomic_t ErtsPortTaskHandle; #endif #ifndef ERTS_PORT_TASK_ONLY_BASIC_TYPES__ @@ -56,17 +56,11 @@ typedef erts_smp_atomic_t ErtsPortTaskHandle; typedef enum { ERTS_PORT_TASK_INPUT, ERTS_PORT_TASK_OUTPUT, - ERTS_PORT_TASK_EVENT, ERTS_PORT_TASK_TIMEOUT, ERTS_PORT_TASK_DIST_CMD, ERTS_PORT_TASK_PROC_SIG } ErtsPortTaskType; -#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS -/* NOTE: Do not access any of the exported variables directly */ -extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks; -#endif - #define ERTS_PTS_FLG_IN_RUNQ (((erts_aint32_t) 1) << 0) #define ERTS_PTS_FLG_EXEC (((erts_aint32_t) 1) << 1) #define ERTS_PTS_FLG_HAVE_TASKS (((erts_aint32_t) 1) << 2) @@ -98,8 +92,8 @@ extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks; typedef struct { ErlDrvSizeT high; - erts_smp_atomic_t low; - erts_smp_atomic_t size; + erts_atomic_t low; + erts_atomic_t size; } ErtsPortTaskBusyPortQ; typedef struct ErtsPortTask_ ErtsPortTask; @@ -124,10 +118,8 @@ typedef struct { } in; ErtsPortTaskBusyPortQ *bpq; } taskq; - erts_smp_atomic32_t flags; -#ifdef ERTS_SMP + erts_atomic32_t flags; erts_mtx_t mtx; -#endif } ErtsPortTaskSched; ERTS_GLB_INLINE void erts_port_task_handle_init(ErtsPortTaskHandle *pthp); @@ -142,22 +134,18 @@ ERTS_GLB_INLINE void erts_port_task_sched_unlock(ErtsPortTaskSched *ptsp); ERTS_GLB_INLINE int erts_port_task_sched_lock_is_locked(ErtsPortTaskSched *ptsp); ERTS_GLB_INLINE void erts_port_task_sched_enter_exiting_state(ErtsPortTaskSched *ptsp); -#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS -ERTS_GLB_INLINE int erts_port_task_have_outstanding_io_tasks(void); -#endif - #if ERTS_GLB_INLINE_INCL_FUNC_DEF ERTS_GLB_INLINE void erts_port_task_handle_init(ErtsPortTaskHandle *pthp) { - erts_smp_atomic_init_nob(pthp, (erts_aint_t) NULL); + erts_atomic_init_nob(pthp, (erts_aint_t) NULL); } ERTS_GLB_INLINE int erts_port_task_is_scheduled(ErtsPortTaskHandle *pthp) { - return ((void *) erts_smp_atomic_read_acqb(pthp)) != NULL; + return ((void *) erts_atomic_read_acqb(pthp)) != NULL; } ERTS_GLB_INLINE void erts_port_task_pre_init_sched(ErtsPortTaskSched *ptsp, @@ -165,9 +153,9 @@ ERTS_GLB_INLINE void erts_port_task_pre_init_sched(ErtsPortTaskSched *ptsp, { if (bpq) { erts_aint_t low = (erts_aint_t) ERTS_PORT_TASK_DEFAULT_BUSY_PORT_Q_LOW; - erts_smp_atomic_init_nob(&bpq->low, low); + erts_atomic_init_nob(&bpq->low, low); bpq->high = (ErlDrvSizeT) ERTS_PORT_TASK_DEFAULT_BUSY_PORT_Q_HIGH; - erts_smp_atomic_init_nob(&bpq->size, (erts_aint_t) 0); + erts_atomic_init_nob(&bpq->size, (erts_aint_t) 0); } ptsp->taskq.bpq = bpq; } @@ -175,9 +163,7 @@ ERTS_GLB_INLINE void erts_port_task_pre_init_sched(ErtsPortTaskSched *ptsp, ERTS_GLB_INLINE void erts_port_task_init_sched(ErtsPortTaskSched *ptsp, Eterm instr_id) { -#ifdef ERTS_SMP char *lock_str = "port_sched_lock"; -#endif ptsp->next = NULL; ptsp->taskq.local.busy.first = NULL; ptsp->taskq.local.busy.last = NULL; @@ -186,32 +172,26 @@ erts_port_task_init_sched(ErtsPortTaskSched *ptsp, Eterm instr_id) ptsp->taskq.local.first = NULL; ptsp->taskq.in.first = NULL; ptsp->taskq.in.last = NULL; - erts_smp_atomic32_init_nob(&ptsp->flags, 0); -#ifdef ERTS_SMP + erts_atomic32_init_nob(&ptsp->flags, 0); erts_mtx_init(&ptsp->mtx, lock_str, instr_id, ERTS_LOCK_FLAGS_CATEGORY_IO); -#endif } ERTS_GLB_INLINE void erts_port_task_sched_lock(ErtsPortTaskSched *ptsp) { -#ifdef ERTS_SMP erts_mtx_lock(&ptsp->mtx); -#endif } ERTS_GLB_INLINE void erts_port_task_sched_unlock(ErtsPortTaskSched *ptsp) { -#ifdef ERTS_SMP erts_mtx_unlock(&ptsp->mtx); -#endif } ERTS_GLB_INLINE int erts_port_task_sched_lock_is_locked(ErtsPortTaskSched *ptsp) { -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) return erts_lc_mtx_is_locked(&ptsp->mtx); #else return 0; @@ -222,35 +202,25 @@ erts_port_task_sched_lock_is_locked(ErtsPortTaskSched *ptsp) ERTS_GLB_INLINE void erts_port_task_fini_sched(ErtsPortTaskSched *ptsp) { -#ifdef ERTS_SMP erts_mtx_destroy(&ptsp->mtx); -#endif } ERTS_GLB_INLINE void erts_port_task_sched_enter_exiting_state(ErtsPortTaskSched *ptsp) { - erts_smp_atomic32_read_bor_nob(&ptsp->flags, ERTS_PTS_FLG_EXITING); -} - -#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS - -ERTS_GLB_INLINE int -erts_port_task_have_outstanding_io_tasks(void) -{ - return (erts_smp_atomic_read_acqb(&erts_port_task_outstanding_io_tasks) - != 0); + erts_atomic32_read_bor_nob(&ptsp->flags, ERTS_PTS_FLG_EXITING); } -#endif /* ERTS_INCLUDE_SCHEDULER_INTERNALS */ - #endif #ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS -int erts_port_task_execute(ErtsRunQueue *, Port **); +void erts_port_task_execute(ErtsRunQueue *, Port **); void erts_port_task_init(void); #endif +/* generated for 'port_task' quick allocator */ +void erts_port_task_pre_alloc_init_thread(void); + void erts_port_task_tmp_handle_detach(ErtsPortTaskHandle *); int erts_port_task_abort(ErtsPortTaskHandle *); @@ -265,10 +235,8 @@ ErtsProc2PortSigData *erts_port_task_alloc_p2p_sig_data(void); ErtsProc2PortSigData *erts_port_task_alloc_p2p_sig_data_extra(size_t extra, void **extra_ptr); void erts_port_task_free_p2p_sig_data(ErtsProc2PortSigData *sigdp); -#ifdef ERTS_SMP void erts_enqueue_port(ErtsRunQueue *rq, Port *pp); Port *erts_dequeue_port(ErtsRunQueue *rq); -#endif #undef ERTS_INCLUDE_SCHEDULER_INTERNALS #endif /* ERL_PORT_TASK_H__ */ #endif /* ERTS_PORT_TASK_ONLY_BASIC_TYPES__ */ diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index b72bac00c1..61fdf86a56 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -24,6 +24,8 @@ # include "config.h" #endif +#define ERTS_WANT_BREAK_HANDLING + #include <stddef.h> /* offsetof() */ #include "sys.h" #include "erl_vm.h" @@ -49,6 +51,8 @@ #define ERTS_WANT_TIMER_WHEEL_API #include "erl_time.h" #include "erl_nfunc_sched.h" +#include "erl_check_io.h" +#include "erl_poll.h" #define ERTS_CHECK_TIME_REDS CONTEXT_REDS #define ERTS_DELAYED_WAKEUP_INFINITY (~(Uint64) 0) @@ -127,18 +131,16 @@ runq_got_work_to_execute_flags(Uint32 flags) return !ERTS_IS_RUNQ_EMPTY_FLGS(flags); } -#ifdef ERTS_SMP static ERTS_INLINE int runq_got_work_to_execute(ErtsRunQueue *rq) { return runq_got_work_to_execute_flags(ERTS_RUNQ_FLGS_GET_NOB(rq)); } -#endif #undef RUNQ_READ_RQ #undef RUNQ_SET_RQ -#define RUNQ_READ_RQ(X) ((ErtsRunQueue *) erts_smp_atomic_read_nob((X))) -#define RUNQ_SET_RQ(X, RQ) erts_smp_atomic_set_nob((X), (erts_aint_t) (RQ)) +#define RUNQ_READ_RQ(X) ((ErtsRunQueue *) erts_atomic_read_nob((X))) +#define RUNQ_SET_RQ(X, RQ) erts_atomic_set_nob((X), (erts_aint_t) (RQ)) #ifdef DEBUG # if defined(ARCH_64) @@ -172,7 +174,6 @@ extern BeamInstr beam_exit[]; extern BeamInstr beam_continue_exit[]; int ERTS_WRITE_UNLIKELY(erts_default_spo_flags) = SPO_ON_HEAP_MSGQ; -int ERTS_WRITE_UNLIKELY(erts_eager_check_io) = 1; int ERTS_WRITE_UNLIKELY(erts_sched_compact_load); int ERTS_WRITE_UNLIKELY(erts_sched_balance_util) = 0; Uint ERTS_WRITE_UNLIKELY(erts_no_schedulers); @@ -194,57 +195,51 @@ static UWord thr_prgr_later_cleanup_op_threshold = ERTS_THR_PRGR_LATER_CLEANUP_O ErtsPTab erts_proc erts_align_attribute(ERTS_CACHE_LINE_SIZE); int erts_sched_thread_suggested_stack_size = -1; -#ifdef ERTS_DIRTY_SCHEDULERS int erts_dcpu_sched_thread_suggested_stack_size = -1; int erts_dio_sched_thread_suggested_stack_size = -1; -#endif #ifdef ERTS_ENABLE_LOCK_CHECK ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE]; #endif -static struct { +static struct ErtsSchedBusyWait_ { int aux_work; int tse; int sys_schedule; } sched_busy_wait; -#ifdef ERTS_SMP int erts_disable_proc_not_running_opt; static ErtsAuxWorkData *aux_thread_aux_work_data; +static ErtsAuxWorkData *poll_thread_aux_work_data; #define ERTS_SCHDLR_SSPND_CHNG_NMSB (((erts_aint32_t) 1) << 0) #define ERTS_SCHDLR_SSPND_CHNG_MSB (((erts_aint32_t) 1) << 1) #define ERTS_SCHDLR_SSPND_CHNG_ONLN (((erts_aint32_t) 1) << 2) #define ERTS_SCHDLR_SSPND_CHNG_DCPU_ONLN (((erts_aint32_t) 1) << 3) -typedef struct { +typedef struct ErtsMultiSchedulingBlock_ { int ongoing; ErtsProcList *blckrs; ErtsProcList *chngq; } ErtsMultiSchedulingBlock; -typedef struct { +typedef struct ErtsSchedTypeCounters_ { Uint32 normal; -#ifdef ERTS_DIRTY_SCHEDULERS Uint32 dirty_cpu; Uint32 dirty_io; -#endif } ErtsSchedTypeCounters; -static struct { - erts_smp_mtx_t mtx; +static struct ErtsSchedSuspend_ { + erts_mtx_t mtx; ErtsSchedTypeCounters online; ErtsSchedTypeCounters curr_online; ErtsSchedTypeCounters active; - erts_smp_atomic32_t changing; + erts_atomic32_t changing; ErtsProcList *chngq; Eterm changer; ErtsMultiSchedulingBlock nmsb; /* Normal multi Scheduling Block */ ErtsMultiSchedulingBlock msb; /* Multi Scheduling Block */ -#ifdef ERTS_DIRTY_SCHEDULERS ErtsSchedType last_msb_dirty_type; -#endif } schdlr_sspnd; static void init_scheduler_suspend(void); @@ -253,10 +248,8 @@ static ERTS_INLINE Uint32 schdlr_sspnd_eq_nscheds(ErtsSchedTypeCounters *val1p, ErtsSchedTypeCounters *val2p) { int res = val1p->normal == val2p->normal; -#ifdef ERTS_DIRTY_SCHEDULERS res &= val1p->dirty_cpu == val2p->dirty_cpu; res &= val1p->dirty_io == val2p->dirty_io; -#endif return res; } @@ -267,16 +260,10 @@ schdlr_sspnd_get_nscheds(ErtsSchedTypeCounters *valp, switch (type) { case ERTS_SCHED_NORMAL: return valp->normal; -#ifdef ERTS_DIRTY_SCHEDULERS case ERTS_SCHED_DIRTY_CPU: return valp->dirty_cpu; case ERTS_SCHED_DIRTY_IO: return valp->dirty_io; -#else - case ERTS_SCHED_DIRTY_CPU: - case ERTS_SCHED_DIRTY_IO: - return 0; -#endif default: ERTS_INTERNAL_ERROR("Invalid scheduler type"); return 0; @@ -288,10 +275,8 @@ static ERTS_INLINE Uint32 schdlr_sspnd_get_nscheds_tot(ErtsSchedTypeCounters *valp) { Uint32 res = valp->normal; -#ifdef ERTS_DIRTY_SCHEDULERS res += valp->dirty_cpu; res += valp->dirty_io; -#endif return res; } #endif @@ -306,14 +291,12 @@ schdlr_sspnd_dec_nscheds(ErtsSchedTypeCounters *valp, case ERTS_SCHED_NORMAL: valp->normal--; break; -#ifdef ERTS_DIRTY_SCHEDULERS case ERTS_SCHED_DIRTY_CPU: valp->dirty_cpu--; break; case ERTS_SCHED_DIRTY_IO: valp->dirty_io--; break; -#endif default: ERTS_INTERNAL_ERROR("Invalid scheduler type"); } @@ -327,14 +310,12 @@ schdlr_sspnd_inc_nscheds(ErtsSchedTypeCounters *valp, case ERTS_SCHED_NORMAL: valp->normal++; break; -#ifdef ERTS_DIRTY_SCHEDULERS case ERTS_SCHED_DIRTY_CPU: valp->dirty_cpu++; break; case ERTS_SCHED_DIRTY_IO: valp->dirty_io++; break; -#endif default: ERTS_INTERNAL_ERROR("Invalid scheduler type"); } @@ -348,25 +329,23 @@ schdlr_sspnd_set_nscheds(ErtsSchedTypeCounters *valp, case ERTS_SCHED_NORMAL: valp->normal = no; break; -#ifdef ERTS_DIRTY_SCHEDULERS case ERTS_SCHED_DIRTY_CPU: valp->dirty_cpu = no; break; case ERTS_SCHED_DIRTY_IO: valp->dirty_io = no; break; -#endif default: ERTS_INTERNAL_ERROR("Invalid scheduler type"); } } static struct { - erts_smp_mtx_t update_mtx; - erts_smp_atomic32_t no_runqs; + erts_mtx_t update_mtx; + erts_atomic32_t no_runqs; int last_active_runqs; int forced_check_balance; - erts_smp_atomic32_t checking_balance; + erts_atomic32_t checking_balance; int halftime; int full_reds_history_index; struct { @@ -384,51 +363,38 @@ do { \ balance_info.prev_rise.reds = (REDS); \ } while (0) -#endif erts_sched_stat_t erts_sched_stat; -#ifdef USE_THREADS static erts_tsd_key_t ERTS_WRITE_UNLIKELY(sched_data_key); -#endif -static erts_smp_atomic32_t function_calls; - -#ifdef ERTS_SMP -static erts_smp_atomic32_t doing_sys_schedule; -static erts_smp_atomic32_t no_empty_run_queues; +static erts_atomic32_t no_empty_run_queues; long erts_runq_supervision_interval = 0; static ethr_event runq_supervision_event; static erts_tid_t runq_supervisor_tid; static erts_atomic_t runq_supervisor_sleeping; -#else /* !ERTS_SMP */ -ErtsSchedulerData *erts_scheduler_data; -#endif ErtsAlignedRunQueue * ERTS_WRITE_UNLIKELY(erts_aligned_run_queues); Uint ERTS_WRITE_UNLIKELY(erts_no_run_queues); -#ifdef ERTS_DIRTY_SCHEDULERS struct { union { - erts_smp_atomic32_t active; + erts_atomic32_t active; char align__[ERTS_CACHE_LINE_SIZE]; } cpu; union { - erts_smp_atomic32_t active; + erts_atomic32_t active; char align__[ERTS_CACHE_LINE_SIZE]; } io; } dirty_count erts_align_attribute(ERTS_CACHE_LINE_SIZE); -#endif static ERTS_INLINE void dirty_active(ErtsSchedulerData *esdp, erts_aint32_t add) { -#ifdef ERTS_DIRTY_SCHEDULERS erts_aint32_t val; - erts_smp_atomic32_t *ap; + erts_atomic32_t *ap; switch (esdp->type) { case ERTS_SCHED_DIRTY_CPU: ap = &dirty_count.cpu.active; @@ -446,23 +412,20 @@ dirty_active(ErtsSchedulerData *esdp, erts_aint32_t add) * All updates done under run-queue lock, so * no inc or dec needed... */ - ERTS_SMP_ASSERT(erts_smp_lc_runq_is_locked(esdp->run_queue)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(esdp->run_queue)); - val = erts_smp_atomic32_read_nob(ap); + val = erts_atomic32_read_nob(ap); val += add; - erts_smp_atomic32_set_nob(ap, val); -#endif + erts_atomic32_set_nob(ap, val); } ErtsAlignedSchedulerData * ERTS_WRITE_UNLIKELY(erts_aligned_scheduler_data); -#ifdef ERTS_DIRTY_SCHEDULERS ErtsAlignedSchedulerData * ERTS_WRITE_UNLIKELY(erts_aligned_dirty_cpu_scheduler_data); ErtsAlignedSchedulerData * ERTS_WRITE_UNLIKELY(erts_aligned_dirty_io_scheduler_data); typedef union { Process dsp; char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(Process))]; } ErtsAlignedDirtyShadowProcess; -#endif typedef union { ErtsSchedulerSleepInfo ssi; @@ -470,12 +433,9 @@ typedef union { } ErtsAlignedSchedulerSleepInfo; static ErtsAlignedSchedulerSleepInfo *aligned_sched_sleep_info; -#ifdef ERTS_DIRTY_SCHEDULERS -#ifdef ERTS_SMP static ErtsAlignedSchedulerSleepInfo *aligned_dirty_cpu_sched_sleep_info; static ErtsAlignedSchedulerSleepInfo *aligned_dirty_io_sched_sleep_info; -#endif -#endif +static ErtsAlignedSchedulerSleepInfo *aligned_poll_thread_sleep_info; static Uint last_reductions; static Uint last_exact_reductions; @@ -543,11 +503,14 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(proclist, 200, ERTS_ALC_T_PROC_LIST) +#define ERTS_POLL_THREAD_SLEEP_INFO_IX(IX) \ + (ASSERT(0 <= ((int) (IX)) \ + && ((int) (IX)) < ((int) erts_no_poll_threads)), \ + &aligned_poll_thread_sleep_info[(IX)].ssi) #define ERTS_SCHED_SLEEP_INFO_IX(IX) \ - (ASSERT(-1 <= ((int) (IX)) \ - && ((int) (IX)) < ((int) erts_no_schedulers)), \ + (ASSERT(((int)-1) <= ((int) (IX)) \ + && ((int) (IX)) < ((int) erts_no_schedulers)), \ &aligned_sched_sleep_info[(IX)].ssi) -#ifdef ERTS_DIRTY_SCHEDULERS #define ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(IX) \ (ASSERT(0 <= ((int) (IX)) \ && ((int) (IX)) < ((int) erts_no_dirty_cpu_schedulers)), \ @@ -556,7 +519,6 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(proclist, (ASSERT(0 <= ((int) (IX)) \ && ((int) (IX)) < ((int) erts_no_dirty_io_schedulers)), \ &aligned_dirty_io_sched_sleep_info[(IX)].ssi) -#endif #define ERTS_FOREACH_RUNQ(RQVAR, DO) \ do { \ @@ -564,9 +526,9 @@ do { \ int ix__; \ for (ix__ = 0; ix__ < erts_no_run_queues; ix__++) { \ RQVAR = ERTS_RUNQ_IX(ix__); \ - erts_smp_runq_lock(RQVAR); \ + erts_runq_lock(RQVAR); \ { DO; } \ - erts_smp_runq_unlock(RQVAR); \ + erts_runq_unlock(RQVAR); \ } \ } while (0) @@ -576,12 +538,12 @@ do { \ int ix__; \ int online__ = (int) schdlr_sspnd_get_nscheds(&schdlr_sspnd.online, \ ERTS_SCHED_NORMAL); \ - ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&schdlr_sspnd.mtx)); \ + ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&schdlr_sspnd.mtx)); \ for (ix__ = 0; ix__ < online__; ix__++) { \ RQVAR = ERTS_RUNQ_IX(ix__); \ - erts_smp_runq_lock(RQVAR); \ + erts_runq_lock(RQVAR); \ { DO; } \ - erts_smp_runq_unlock(RQVAR); \ + erts_runq_unlock(RQVAR); \ } \ } while (0) @@ -592,12 +554,12 @@ do { \ int ix__; \ for (ix__ = 0; ix__ < nrqs; ix__++) { \ RQVAR = ERTS_RUNQ_IX(ix__); \ - erts_smp_runq_lock(RQVAR); \ + erts_runq_lock(RQVAR); \ { DO; } \ } \ { DOX; } \ for (ix__ = 0; ix__ < nrqs; ix__++) \ - erts_smp_runq_unlock(ERTS_RUNQ_IX(ix__)); \ + erts_runq_unlock(ERTS_RUNQ_IX(ix__)); \ } while (0) #define ERTS_ATOMIC_FOREACH_RUNQ(RQVAR, DO) \ @@ -638,11 +600,8 @@ dbg_chk_aux_work_val(erts_aint32_t value) valid |= ERTS_SSI_AUX_WORK_MISC; valid |= ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM; valid |= ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC; -#if ERTS_USE_ASYNC_READY_Q valid |= ERTS_SSI_AUX_WORK_ASYNC_READY; valid |= ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN; -#endif -#ifdef ERTS_SMP valid |= ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP; valid |= ERTS_SSI_AUX_WORK_MISC_THR_PRGR; valid |= ERTS_SSI_AUX_WORK_DD; @@ -651,7 +610,6 @@ dbg_chk_aux_work_val(erts_aint32_t value) valid |= ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR; valid |= ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP; valid |= ERTS_SSI_AUX_WORK_PENDING_EXITERS; -#endif #if HAVE_ERTS_MSEG valid |= ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK; #endif @@ -673,16 +631,14 @@ dbg_chk_aux_work_val(erts_aint32_t value) #define ERTS_DBG_CHK_SSI_AUX_WORK(SSI) #endif -#ifdef ERTS_SMP static void do_handle_pending_exiters(ErtsProcList *); static void wake_scheduler(ErtsRunQueue *rq); -#endif -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) int -erts_smp_lc_runq_is_locked(ErtsRunQueue *runq) +erts_lc_runq_is_locked(ErtsRunQueue *runq) { - return erts_smp_lc_mtx_is_locked(&runq->mtx); + return erts_lc_mtx_is_locked(&runq->mtx); } #endif @@ -690,13 +646,13 @@ erts_smp_lc_runq_is_locked(ErtsRunQueue *runq) static ERTS_INLINE Uint64 ensure_later_proc_interval(Uint64 interval) { - return erts_smp_ensure_later_interval_nob(erts_ptab_interval(&erts_proc), interval); + return erts_ensure_later_interval_nob(erts_ptab_interval(&erts_proc), interval); } Uint64 erts_get_proc_interval(void) { - return erts_smp_current_interval_nob(erts_ptab_interval(&erts_proc)); + return erts_current_interval_nob(erts_ptab_interval(&erts_proc)); } Uint64 @@ -708,15 +664,13 @@ erts_ensure_later_proc_interval(Uint64 interval) Uint64 erts_step_proc_interval(void) { - return erts_smp_step_interval_nob(erts_ptab_interval(&erts_proc)); + return erts_step_interval_nob(erts_ptab_interval(&erts_proc)); } void erts_pre_init_process(void) { -#ifdef USE_THREADS erts_tsd_key_create(&sched_data_key, "erts_sched_data_key"); -#endif erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP_IX] = "DELAYED_AW_WAKEUP"; @@ -796,6 +750,11 @@ erts_pre_init_process(void) = ERTS_PSD_ETS_FIXED_TABLES_GET_LOCKS; erts_psd_required_locks[ERTS_PSD_ETS_FIXED_TABLES].set_locks = ERTS_PSD_ETS_FIXED_TABLES_SET_LOCKS; + + erts_psd_required_locks[ERTS_PSD_DIST_ENTRY].get_locks + = ERTS_PSD_DIST_ENTRY_GET_LOCKS; + erts_psd_required_locks[ERTS_PSD_DIST_ENTRY].set_locks + = ERTS_PSD_DIST_ENTRY_SET_LOCKS; #endif } @@ -810,10 +769,8 @@ void erts_init_process(int ncpu, int proc_tab_size, int legacy_proc_tab) { -#ifdef ERTS_SMP erts_disable_proc_not_running_opt = 0; erts_init_proc_lock(ncpu); -#endif init_proclist_alloc(); @@ -825,11 +782,7 @@ erts_init_process(int ncpu, int proc_tab_size, int legacy_proc_tab) sizeof(Process), "process_table", legacy_proc_tab, -#ifdef ERTS_SMP 1 -#else - 0 -#endif ); last_reductions = 0; @@ -841,7 +794,7 @@ erts_late_init_process(void) { int ix; - erts_smp_spinlock_init(&erts_sched_stat.lock, "sched_stat", NIL, + erts_spinlock_init(&erts_sched_stat.lock, "sched_stat", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER); for (ix = 0; ix < ERTS_NO_PRIO_LEVELS; ix++) { @@ -883,7 +836,6 @@ erts_late_init_process(void) static void init_sched_wall_time(ErtsSchedulerData *esdp, Uint64 time_stamp) { -#ifdef ERTS_DIRTY_SCHEDULERS if (esdp->type != ERTS_SCHED_NORMAL) { erts_atomic32_init_nob(&esdp->sched_wall_time.u.mod, 0); esdp->sched_wall_time.enabled = 1; @@ -892,7 +844,6 @@ init_sched_wall_time(ErtsSchedulerData *esdp, Uint64 time_stamp) esdp->sched_wall_time.working.start = ERTS_SCHED_WTIME_IDLE; } else -#endif { esdp->sched_wall_time.u.need = erts_sched_balance_util; esdp->sched_wall_time.enabled = 0; @@ -1041,14 +992,14 @@ erts_get_sched_util(ErtsRunQueue *rq, int initially_locked, int short_interval) if (!locked) { if (++try >= ERTS_GET_AVG_MAX_UNLOCKED_TRY) { /* Writer will eventually block on runq-lock */ - erts_smp_runq_lock(rq); + erts_runq_lock(rq); locked = 1; } } } if (!initially_locked && locked) - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); now = sched_wall_time_ts(); worktime = calc_sched_worktime(is_working, now, last, interval, old_worktime); @@ -1090,7 +1041,6 @@ init_runq_sched_util(ErtsRunQueueSchedUtil *rqsu, int enabled) #endif /* ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT */ -#ifdef ERTS_DIRTY_SCHEDULERS typedef struct { Uint64 working; @@ -1143,9 +1093,7 @@ read_dirty_sched_wall_time(ErtsSchedulerData *esdp, ErtsDirtySchedWallTime *info info->working = info->total; } -#endif -#ifdef ERTS_SMP static void dirty_sched_wall_time_change(ErtsSchedulerData *esdp, int working) @@ -1193,16 +1141,13 @@ dirty_sched_wall_time_change(ErtsSchedulerData *esdp, int working) mod++; erts_atomic32_set_nob(&esdp->sched_wall_time.u.mod, mod); -#if 0 if (!working) { - ERTS_MSACC_SET_STATE_M_X(ERTS_MSACC_STATE_BUSY_WAIT); + ERTS_MSACC_SET_STATE_X(ERTS_MSACC_STATE_BUSY_WAIT); } else { - ERTS_MSACC_SET_STATE_M_X(ERTS_MSACC_STATE_OTHER); + ERTS_MSACC_SET_STATE_X(ERTS_MSACC_STATE_OTHER); } -#endif } -#endif /* ERTS_SMP */ static void sched_wall_time_change(ErtsSchedulerData *esdp, int working) @@ -1247,11 +1192,9 @@ typedef struct { Eterm ref; Eterm ref_heap[ERTS_REF_THING_SIZE]; Uint req_sched; - erts_smp_atomic32_t refc; -#ifdef ERTS_DIRTY_SCHEDULERS + erts_atomic32_t refc; int want_dirty_cpu; int want_dirty_io; -#endif } ErtsSchedWallTimeReq; typedef struct { @@ -1259,7 +1202,7 @@ typedef struct { Eterm ref; Eterm ref_heap[ERTS_REF_THING_SIZE]; Uint req_sched; - erts_smp_atomic32_t refc; + erts_atomic32_t refc; } ErtsSystemCheckReq; @@ -1291,10 +1234,8 @@ reply_sched_wall_time(void *vswtrp) ErlOffHeap *ohp = NULL; ErtsMessage *mp = NULL; - ASSERT(esdp); -#ifdef ERTS_DIRTY_SCHEDULERS - ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp)); -#endif + ASSERT(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp)); + if (swtrp->set) { if (!swtrp->enable && esdp->sched_wall_time.enabled) { esdp->sched_wall_time.u.need = erts_sched_balance_util; @@ -1324,7 +1265,6 @@ reply_sched_wall_time(void *vswtrp) hpp = NULL; szp = &sz; -#ifdef ERTS_DIRTY_SCHEDULERS if (esdp->sched_wall_time.enabled && swtrp->req_sched == esdp->no && (swtrp->want_dirty_cpu || swtrp->want_dirty_io)) { @@ -1406,7 +1346,6 @@ reply_sched_wall_time(void *vswtrp) erts_free(ERTS_ALC_T_TMP, dswt); } else -#endif { /* Reply with info about this scheduler only... */ @@ -1443,11 +1382,11 @@ reply_sched_wall_time(void *vswtrp) rp_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); erts_proc_dec_refc(rp); - if (erts_smp_atomic32_dec_read_nob(&swtrp->refc) == 0) + if (erts_atomic32_dec_read_nob(&swtrp->refc) == 0) swtreq_free(vswtrp); } @@ -1460,11 +1399,10 @@ erts_sched_wall_time_request(Process *c_p, int set, int enable, ErtsSchedWallTimeReq *swtrp; Eterm *hp; + ASSERT(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp)); + if (!set && !esdp->sched_wall_time.enabled) return THE_NON_VALUE; -#ifdef ERTS_DIRTY_SCHEDULERS - ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp)); -#endif swtrp = swtreq_alloc(); ref = erts_make_ref(c_p); @@ -1475,22 +1413,18 @@ erts_sched_wall_time_request(Process *c_p, int set, int enable, swtrp->proc = c_p; swtrp->ref = STORE_NC(&hp, NULL, ref); swtrp->req_sched = esdp->no; -#ifdef ERTS_DIRTY_SCHEDULERS swtrp->want_dirty_cpu = want_dirty_cpu; swtrp->want_dirty_io = want_dirty_io; -#endif - erts_smp_atomic32_init_nob(&swtrp->refc, + erts_atomic32_init_nob(&swtrp->refc, (erts_aint32_t) erts_no_schedulers); erts_proc_add_refc(c_p, (Sint32) erts_no_schedulers); -#ifdef ERTS_SMP if (erts_no_schedulers > 1) erts_schedule_multi_misc_aux_work(1, erts_no_schedulers, reply_sched_wall_time, (void *) swtrp); -#endif reply_sched_wall_time((void *) swtrp); @@ -1511,10 +1445,7 @@ reply_system_check(void *vscrp) ErlOffHeap *ohp = NULL; ErtsMessage *mp = NULL; - ASSERT(esdp); -#ifdef ERTS_DIRTY_SCHEDULERS - ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp)); -#endif + ASSERT(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp)); sz = ERTS_REF_THING_SIZE; mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp); @@ -1527,11 +1458,11 @@ reply_system_check(void *vscrp) rp_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); erts_proc_dec_refc(rp); - if (erts_smp_atomic32_dec_read_nob(&scrp->refc) == 0) + if (erts_atomic32_dec_read_nob(&scrp->refc) == 0) screq_free(vscrp); } @@ -1549,17 +1480,15 @@ Eterm erts_system_check_request(Process *c_p) { scrp->proc = c_p; scrp->ref = STORE_NC(&hp, NULL, ref); scrp->req_sched = esdp->no; - erts_smp_atomic32_init_nob(&scrp->refc, (erts_aint32_t) erts_no_schedulers); + erts_atomic32_init_nob(&scrp->refc, (erts_aint32_t) erts_no_schedulers); erts_proc_add_refc(c_p, (Sint) erts_no_schedulers); -#ifdef ERTS_SMP if (erts_no_schedulers > 1) erts_schedule_multi_misc_aux_work(1, erts_no_schedulers, reply_system_check, (void *) scrp); -#endif reply_system_check((void *) scrp); @@ -1620,7 +1549,7 @@ erts_psd_set_init(Process *p, int ix, void *data) for (i = 0; i < ERTS_PSD_SIZE; i++) new_psd->data[i] = NULL; - psd = (ErtsPSD *) erts_smp_atomic_cmpxchg_mb(&p->psd, + psd = (ErtsPSD *) erts_atomic_cmpxchg_mb(&p->psd, (erts_aint_t) new_psd, (erts_aint_t) NULL); if (psd) @@ -1632,21 +1561,21 @@ erts_psd_set_init(Process *p, int ix, void *data) return old; } -#ifdef ERTS_SMP void -erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flags) +erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi, + erts_aint32_t flags) { switch (flags & ERTS_SSI_FLGS_SLEEP_TYPE) { case ERTS_SSI_FLG_POLL_SLEEPING: - erts_sys_schedule_interrupt(1); + erts_check_io_interrupt(ssi->psi, 1); break; case ERTS_SSI_FLG_POLL_SLEEPING|ERTS_SSI_FLG_TSE_SLEEPING: /* * Thread progress blocking while poll sleeping; need * to signal on both... */ - erts_sys_schedule_interrupt(1); + erts_check_io_interrupt(ssi->psi, 1); /* fall through */ case ERTS_SSI_FLG_TSE_SLEEPING: erts_tse_set(ssi->event); @@ -1660,7 +1589,6 @@ erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flags) } } -#endif static ERTS_INLINE void set_aux_work_flags_wakeup_nob(ErtsSchedulerSleepInfo *ssi, @@ -1676,11 +1604,7 @@ set_aux_work_flags_wakeup_nob(ErtsSchedulerSleepInfo *ssi, old_flgs = erts_atomic32_read_bor_nob(&ssi->aux_work, flgs); if ((old_flgs & flgs) != flgs) { -#ifdef ERTS_SMP erts_sched_poke(ssi); -#else - erts_sys_schedule_interrupt(1); -#endif } } } @@ -1696,11 +1620,7 @@ set_aux_work_flags_wakeup_relb(ErtsSchedulerSleepInfo *ssi, old_flgs = erts_atomic32_read_bor_relb(&ssi->aux_work, flgs); if ((old_flgs & flgs) != flgs) { -#ifdef ERTS_SMP erts_sched_poke(ssi); -#else - erts_sys_schedule_interrupt(1); -#endif } } @@ -1716,7 +1636,6 @@ unset_aux_work_flags(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flgs) return erts_atomic32_read_band_nob(&ssi->aux_work, ~flgs); } -#ifdef ERTS_SMP static ERTS_INLINE void haw_chk_later_cleanup_op_wakeup(ErtsAuxWorkData *awdp, ErtsThrPrgrVal val) @@ -1786,9 +1705,9 @@ static ERTS_INLINE void haw_thr_prgr_current_check_progress(ErtsAuxWorkData *awdp) { ErtsThrPrgrVal current = awdp->current_thr_prgr; -#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); -#endif + if (current != ERTS_THR_PRGR_INVALID && !erts_thr_progress_equal(current, erts_thr_progress_current())) { /* @@ -1805,9 +1724,7 @@ handle_delayed_aux_work_wakeup(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, in { int jix, max_jix; -#ifdef ERTS_DIRTY_SCHEDULERS ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); -#endif ASSERT(awdp->delayed_wakeup.next != ERTS_DELAYED_WAKEUP_INFINITY); @@ -1865,7 +1782,6 @@ schedule_aux_work_wakeup(ErtsAuxWorkData *awdp, } } -#endif typedef struct erts_misc_aux_work_t_ erts_misc_aux_work_t; struct erts_misc_aux_work_t_ { @@ -1906,11 +1822,7 @@ init_misc_aux_work(void) sizeof(erts_algnd_misc_aux_work_q_t) * (erts_no_schedulers+1)); -#ifdef ERTS_SMP ix = 0; /* aux_thread + schedulers */ -#else - ix = 1; /* scheduler only */ -#endif for (; ix <= erts_no_schedulers; ix++) { qinit.arg = (void *) ERTS_SCHED_SLEEP_INFO_IX(ix-1); @@ -1928,10 +1840,8 @@ misc_aux_work_clean(ErtsThrQ_t *q, set_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MISC); return aux_work | ERTS_SSI_AUX_WORK_MISC; case ERTS_THR_Q_NEED_THR_PRGR: -#ifdef ERTS_SMP set_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MISC_THR_PRGR); haw_thr_prgr_soft_wakeup(awdp, erts_thr_q_need_thr_progress(q)); -#endif case ERTS_THR_Q_CLEAN: break; } @@ -1957,16 +1867,14 @@ handle_misc_aux_work(ErtsAuxWorkData *awdp, return misc_aux_work_clean(q, awdp, aux_work & ~ERTS_SSI_AUX_WORK_MISC); } -#ifdef ERTS_SMP static ERTS_INLINE erts_aint32_t handle_misc_aux_work_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) { -#ifdef ERTS_DIRTY_SCHEDULERS ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); -#endif + if (!erts_thr_progress_has_reached_this(haw_thr_prgr_current(awdp), awdp->misc.thr_prgr)) return aux_work & ~ERTS_SSI_AUX_WORK_MISC_THR_PRGR; @@ -1978,7 +1886,6 @@ handle_misc_aux_work_thr_prgr(ErtsAuxWorkData *awdp, aux_work & ~ERTS_SSI_AUX_WORK_MISC_THR_PRGR); } -#endif static ERTS_INLINE void schedule_misc_aux_work(int sched_id, @@ -1988,11 +1895,7 @@ schedule_misc_aux_work(int sched_id, ErtsThrQ_t *q; erts_misc_aux_work_t *mawp; -#ifdef ERTS_SMP ASSERT(0 <= sched_id && sched_id <= erts_no_schedulers); -#else - ASSERT(sched_id == 1); -#endif q = &misc_aux_work_queues[sched_id].q; mawp = misc_aux_work_alloc(); @@ -2036,7 +1939,6 @@ erts_schedule_multi_misc_aux_work(int ignore_self, } } -#if ERTS_USE_ASYNC_READY_Q void erts_notify_check_async_ready_queue(void *vno) @@ -2052,9 +1954,9 @@ handle_async_ready(ErtsAuxWorkData *awdp, int waiting) { ErtsSchedulerSleepInfo *ssi = awdp->ssi; -#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); -#endif + unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY); if (erts_check_async_ready(awdp->async_ready.queue)) { if (set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY) @@ -2064,9 +1966,7 @@ handle_async_ready(ErtsAuxWorkData *awdp, } return aux_work; } -#ifdef ERTS_SMP awdp->async_ready.need_thr_prgr = 0; -#endif set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN); return ((aux_work & ~ERTS_SSI_AUX_WORK_ASYNC_READY) | ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN); @@ -2079,10 +1979,8 @@ handle_async_ready_clean(ErtsAuxWorkData *awdp, { void *thr_prgr_p; -#ifdef ERTS_DIRTY_SCHEDULERS ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); -#endif -#ifdef ERTS_SMP + if (awdp->async_ready.need_thr_prgr && !erts_thr_progress_has_reached_this(haw_thr_prgr_current(awdp), awdp->async_ready.thr_prgr)) { @@ -2091,26 +1989,20 @@ handle_async_ready_clean(ErtsAuxWorkData *awdp, awdp->async_ready.need_thr_prgr = 0; thr_prgr_p = (void *) &awdp->async_ready.thr_prgr; -#else - thr_prgr_p = NULL; -#endif switch (erts_async_ready_clean(awdp->async_ready.queue, thr_prgr_p)) { case ERTS_ASYNC_READY_CLEAN: unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN); return aux_work & ~ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN; -#ifdef ERTS_SMP case ERTS_ASYNC_READY_NEED_THR_PRGR: haw_thr_prgr_soft_wakeup(awdp, awdp->async_ready.thr_prgr); awdp->async_ready.need_thr_prgr = 1; return aux_work & ~ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN; -#endif default: return aux_work; } } -#endif /* ERTS_USE_ASYNC_READY_Q */ static ERTS_INLINE erts_aint32_t @@ -2119,9 +2011,8 @@ handle_fix_alloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) ErtsSchedulerSleepInfo *ssi = awdp->ssi; erts_aint32_t res; -#ifdef ERTS_DIRTY_SCHEDULERS ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); -#endif + unset_aux_work_flags(ssi, (ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC)); aux_work &= ~(ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM @@ -2135,7 +2026,6 @@ handle_fix_alloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) return aux_work; } -#ifdef ERTS_SMP void erts_alloc_notify_delayed_dealloc(int ix) @@ -2169,9 +2059,9 @@ handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waitin ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID; int more_work = 0; ERTS_MSACC_PUSH_STATE_M_X(); -#ifdef ERTS_DIRTY_SCHEDULERS + ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); -#endif + unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD); ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_ALLOC); erts_alloc_scheduler_handle_delayed_dealloc((void *) awdp->esdp, @@ -2208,9 +2098,8 @@ handle_delayed_dealloc_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, i ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID; ErtsThrPrgrVal current = haw_thr_prgr_current(awdp); -#ifdef ERTS_DIRTY_SCHEDULERS ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); -#endif + if (!erts_thr_progress_has_reached_this(current, awdp->dd.thr_prgr)) return aux_work & ~ERTS_SSI_AUX_WORK_DD_THR_PRGR; @@ -2267,9 +2156,8 @@ handle_canceled_timers(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waitin ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID; int more_work = 0; -#ifdef ERTS_DIRTY_SCHEDULERS ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); -#endif + unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_CNCLD_TMRS); erts_handle_canceled_timers((void *) awdp->esdp, &need_thr_progress, @@ -2303,9 +2191,8 @@ handle_canceled_timers_thr_prgr(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, i ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID; ErtsThrPrgrVal current = haw_thr_prgr_current(awdp); -#ifdef ERTS_DIRTY_SCHEDULERS ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); -#endif + if (!erts_thr_progress_has_reached_this(current, awdp->cncld_tmrs.thr_prgr)) return aux_work & ~ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR; @@ -2348,9 +2235,8 @@ handle_thr_prgr_later_op(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int wait int lops; ErtsThrPrgrVal current = haw_thr_prgr_current(awdp); -#ifdef ERTS_DIRTY_SCHEDULERS ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); -#endif + for (lops = 0; lops < ERTS_MAX_THR_PRGR_LATER_OPS; lops++) { ErtsThrPrgrLaterOp *lop = awdp->later_op.first; @@ -2380,7 +2266,7 @@ enqueue_later_op(ErtsSchedulerData *esdp, ErtsThrPrgrLaterOp *lop) { ErtsThrPrgrVal later = erts_thr_progress_later(esdp); - ASSERT(esdp); + ASSERT(esdp && !ERTS_SCHEDULER_IS_DIRTY(esdp)); lop->func = later_func; lop->data = later_data; @@ -2396,20 +2282,15 @@ enqueue_later_op(ErtsSchedulerData *esdp, return later; } -#endif /* ERTS_SMP */ void erts_schedule_thr_prgr_later_op(void (*later_func)(void *), void *later_data, ErtsThrPrgrLaterOp *lop) { -#ifndef ERTS_SMP - later_func(later_data); -#else ErtsSchedulerData *esdp = erts_get_scheduler_data(); ErtsThrPrgrVal later = enqueue_later_op(esdp, later_func, later_data, lop); haw_thr_prgr_wakeup(&esdp->aux_work_data, later); -#endif } void @@ -2418,13 +2299,9 @@ erts_schedule_thr_prgr_later_cleanup_op(void (*later_func)(void *), ErtsThrPrgrLaterOp *lop, UWord size) { -#ifndef ERTS_SMP - later_func(later_data); -#else ErtsSchedulerData *esdp = erts_get_scheduler_data(); ErtsThrPrgrVal later = enqueue_later_op(esdp, later_func, later_data, lop); haw_thr_prgr_later_cleanup_op_wakeup(&esdp->aux_work_data, later, size); -#endif } static ERTS_INLINE erts_aint32_t @@ -2433,9 +2310,7 @@ handle_debug_wait_completed(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int w ErtsSchedulerSleepInfo *ssi = awdp->ssi; erts_aint32_t saved_aux_work, flags; -#ifdef ERTS_DIRTY_SCHEDULERS ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); -#endif flags = awdp->debug.wait_completed.flags; @@ -2476,11 +2351,7 @@ setup_thr_debug_wait_completed(void *vproc) ErtsSchedulerData *esdp = erts_get_scheduler_data(); ErtsAuxWorkData *awdp; erts_aint32_t wait_flags, aux_work_flags; -#ifdef ERTS_SMP awdp = esdp ? &esdp->aux_work_data : aux_thread_aux_work_data; -#else - awdp = &esdp->aux_work_data; -#endif wait_flags = 0; aux_work_flags = ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED; @@ -2489,18 +2360,14 @@ setup_thr_debug_wait_completed(void *vproc) erts_alloc_fix_alloc_shrink(awdp->sched_id, 0); wait_flags |= (ERTS_SSI_AUX_WORK_DD | ERTS_SSI_AUX_WORK_DD_THR_PRGR); -#ifdef ERTS_SMP aux_work_flags |= ERTS_SSI_AUX_WORK_DD; -#endif } if (debug_wait_completed_flags & ERTS_DEBUG_WAIT_COMPLETED_TIMER_CANCELLATIONS) { wait_flags |= (ERTS_SSI_AUX_WORK_CNCLD_TMRS | ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR); -#ifdef ERTS_SMP if (awdp->esdp && !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)) aux_work_flags |= ERTS_SSI_AUX_WORK_CNCLD_TMRS; -#endif } set_aux_work_flags_wakeup_nob(awdp->ssi, aux_work_flags); @@ -2519,21 +2386,17 @@ static void later_thr_debug_wait_completed(void *vlop) { struct debug_lop *lop = vlop; erts_aint32_t count = (erts_aint32_t) erts_no_schedulers; -#ifdef ERTS_SMP count += 1; /* aux thread */ -#endif if (erts_atomic32_dec_read_mb(&debug_wait_completed_count) == count) { /* scheduler threads */ erts_schedule_multi_misc_aux_work(0, erts_no_schedulers, setup_thr_debug_wait_completed, lop->proc); -#ifdef ERTS_SMP /* aux_thread */ erts_schedule_misc_aux_work(0, setup_thr_debug_wait_completed, lop->proc); -#endif } erts_free(ERTS_ALC_T_DEBUG, lop); } @@ -2554,9 +2417,7 @@ erts_debug_wait_completed(Process *c_p, int flags) { /* Only one process at a time can do this */ erts_aint32_t count = (erts_aint32_t) (2*erts_no_schedulers); -#ifdef ERTS_SMP count += 1; /* aux thread */ -#endif if (0 == erts_atomic32_cmpxchg_mb(&debug_wait_completed_count, count, 0)) { @@ -2585,7 +2446,7 @@ notify_reap_ports_relb(void) } } -erts_smp_atomic32_t erts_halt_progress; +erts_atomic32_t erts_halt_progress; int erts_halt_code; static ERTS_INLINE erts_aint32_t @@ -2594,9 +2455,9 @@ handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_REAP_PORTS); ERTS_RUNQ_FLGS_SET(awdp->esdp->run_queue, ERTS_RUNQ_FLG_HALTING); - if (erts_smp_atomic32_dec_read_acqb(&erts_halt_progress) == 0) { + if (erts_atomic32_dec_read_acqb(&erts_halt_progress) == 0) { int i, max = erts_ptab_max(&erts_port); - erts_smp_atomic32_set_nob(&erts_halt_progress, 1); + erts_atomic32_set_nob(&erts_halt_progress, 1); for (i = 0; i < max; i++) { erts_aint32_t state; Port *prt = erts_pix2port(i); @@ -2609,21 +2470,21 @@ handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) /* We need to set the halt flag - get the port lock */ - erts_smp_port_lock(prt); + erts_port_lock(prt); state = erts_atomic32_read_nob(&prt->state); if (!(state & (ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP | ERTS_PORT_SFLG_HALT))) { state = erts_atomic32_read_bor_relb(&prt->state, ERTS_PORT_SFLG_HALT); - erts_smp_atomic32_inc_nob(&erts_halt_progress); + erts_atomic32_inc_nob(&erts_halt_progress); if (!(state & (ERTS_PORT_SFLG_EXITING|ERTS_PORT_SFLG_CLOSING))) erts_deliver_port_exit(prt, prt->common.id, am_killed, 0, 1); } erts_port_release(prt); } - if (erts_smp_atomic32_dec_read_nob(&erts_halt_progress) == 0) { + if (erts_atomic32_dec_read_nob(&erts_halt_progress) == 0) { erts_flush_async_exit(erts_halt_code, ""); } } @@ -2684,7 +2545,6 @@ handle_mseg_cache_check(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiti #endif -#ifdef ERTS_SMP static ERTS_INLINE erts_aint32_t handle_pending_exiters(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) @@ -2695,10 +2555,10 @@ handle_pending_exiters(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waitin rq = awdp->esdp->run_queue; unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_PENDING_EXITERS); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); pnd_xtrs = rq->procs.pending_exiters; rq->procs.pending_exiters = NULL; - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); if (erts_proclist_fetch(&pnd_xtrs, NULL)) do_handle_pending_exiters(pnd_xtrs); @@ -2706,7 +2566,6 @@ handle_pending_exiters(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waitin return aux_work & ~ERTS_SSI_AUX_WORK_PENDING_EXITERS; } -#endif static ERTS_INLINE erts_aint32_t handle_setup_aux_work_timer(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) @@ -2738,9 +2597,7 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting) ERTS_MSACC_PUSH_AND_SET_STATE_M(ERTS_MSACC_STATE_AUX); ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp)); -#ifdef ERTS_SMP haw_thr_prgr_current_reset(awdp); -#endif ERTS_DBG_CHK_AUX_WORK_VAL(aux_work); ASSERT(aux_work); @@ -2759,7 +2616,6 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting) * Keep ERTS_SSI_AUX_WORK flags in expected frequency order relative * eachother. Most frequent first. */ -#ifdef ERTS_SMP HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP, handle_delayed_aux_work_wakeup); HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_DD, @@ -2767,13 +2623,11 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting) /* DD must be before DD_THR_PRGR */ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_DD_THR_PRGR, handle_delayed_dealloc_thr_prgr); -#endif HANDLE_AUX_WORK((ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM | ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC), handle_fix_alloc); -#ifdef ERTS_SMP HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP, handle_thr_prgr_later_op); HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_CNCLD_TMRS, @@ -2781,28 +2635,21 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting) /* CNCLD_TMRS must be before CNCLD_TMRS_THR_PRGR */ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR, handle_canceled_timers_thr_prgr); -#endif -#if ERTS_USE_ASYNC_READY_Q HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_ASYNC_READY, handle_async_ready); /* ASYNC_READY must be before ASYNC_READY_CLEAN */ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN, handle_async_ready_clean); -#endif -#ifdef ERTS_SMP HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_MISC_THR_PRGR, handle_misc_aux_work_thr_prgr); -#endif /* MISC_THR_PRGR must be before MISC */ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_MISC, handle_misc_aux_work); -#ifdef ERTS_SMP HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_PENDING_EXITERS, handle_pending_exiters); -#endif HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_SET_TMO, handle_setup_aux_work_timer); @@ -2828,10 +2675,8 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting) ERTS_DBG_CHK_AUX_WORK_VAL(aux_work); -#ifdef ERTS_SMP if (waiting && !aux_work) haw_thr_prgr_current_check_progress(awdp); -#endif ERTS_MSACC_UPDATE_CACHE(); ERTS_MSACC_POP_STATE_M(); @@ -2930,11 +2775,7 @@ aux_work_timeout(void *vesdp) ASSERT(esdp == (ErtsSchedulerData *) vesdp); #endif -#ifdef ERTS_SMP i = 0; -#else - i = 1; -#endif for (; i <= erts_no_schedulers; i++) { erts_aint32_t type; @@ -2968,9 +2809,6 @@ erts_set_aux_work_timeout(int ix, erts_aint32_t type, int enable) { erts_aint32_t old, refc; -#ifndef ERTS_SMP - ix = 1; -#endif ERTS_DBG_CHK_AUX_WORK_VAL(type); ERTS_DBG_CHK_AUX_WORK_VAL(erts_atomic32_read_nob(&aux_work_tmo->type[ix])); @@ -2997,36 +2835,6 @@ erts_set_aux_work_timeout(int ix, erts_aint32_t type, int enable) return old; } - - -static ERTS_INLINE void -sched_waiting_sys(Uint no, ErtsRunQueue *rq) -{ - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); - ASSERT(rq->waiting >= 0); - (void) ERTS_RUNQ_FLGS_SET(rq, (ERTS_RUNQ_FLG_OUT_OF_WORK - | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK)); - rq->waiting++; - rq->waiting *= -1; - rq->woken = 0; - if (erts_system_profile_flags.scheduler) - profile_scheduler(make_small(no), am_inactive); -} - -static ERTS_INLINE void -sched_active_sys(Uint no, ErtsRunQueue *rq) -{ - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); -#ifdef ERTS_DIRTY_SCHEDULERS - ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix)); -#endif - ASSERT(rq->waiting < 0); - rq->waiting *= -1; - rq->waiting--; - if (erts_system_profile_flags.scheduler) - profile_scheduler(make_small(no), am_active); -} - Uint erts_active_schedulers(void) { @@ -3037,64 +2845,10 @@ erts_active_schedulers(void) return as; } -#ifdef ERTS_SMP - -static ERTS_INLINE void -clear_sys_scheduling(void) -{ - erts_smp_atomic32_set_mb(&doing_sys_schedule, 0); -} - -static ERTS_INLINE int -try_set_sys_scheduling(void) -{ - return 0 == erts_smp_atomic32_cmpxchg_acqb(&doing_sys_schedule, 1, 0); -} - -#endif - -static ERTS_INLINE int -prepare_for_sys_schedule(int non_blocking) -{ - if (non_blocking && erts_eager_check_io) { -#ifdef ERTS_SMP - return try_set_sys_scheduling(); -#else - return 1; -#endif - } - else { -#ifdef ERTS_SMP - while (!erts_port_task_have_outstanding_io_tasks() - && try_set_sys_scheduling()) { - if (!erts_port_task_have_outstanding_io_tasks()) - return 1; - clear_sys_scheduling(); - } - return 0; -#else - return !erts_port_task_have_outstanding_io_tasks(); -#endif - } -} - -#ifdef ERTS_SMP - -static ERTS_INLINE void -sched_change_waiting_sys_to_waiting(Uint no, ErtsRunQueue *rq) -{ - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); -#ifdef ERTS_DIRTY_SCHEDULERS - ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix)); -#endif - ASSERT(rq->waiting < 0); - rq->waiting *= -1; -} - static ERTS_INLINE void sched_waiting(Uint no, ErtsRunQueue *rq) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); (void) ERTS_RUNQ_FLGS_SET(rq, (ERTS_RUNQ_FLG_OUT_OF_WORK | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK)); if (rq->waiting < 0) @@ -3109,7 +2863,7 @@ sched_waiting(Uint no, ErtsRunQueue *rq) static ERTS_INLINE void sched_active(Uint no, ErtsRunQueue *rq) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); if (rq->waiting < 0) rq->waiting++; else @@ -3123,7 +2877,7 @@ empty_runq_aux(ErtsRunQueue *rq, Uint32 old_flags) { if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && old_flags & ERTS_RUNQ_FLG_NONEMPTY) { #ifdef DEBUG - erts_aint32_t empty = erts_smp_atomic32_read_nob(&no_empty_run_queues); + erts_aint32_t empty = erts_atomic32_read_nob(&no_empty_run_queues); /* * For a short period of time no_empty_run_queues may have * been increased twice for a specific run queue. @@ -3131,9 +2885,9 @@ empty_runq_aux(ErtsRunQueue *rq, Uint32 old_flags) ASSERT(0 <= empty && empty < 2*erts_no_run_queues); #endif if (!erts_runq_supervision_interval) - erts_smp_atomic32_inc_relb(&no_empty_run_queues); + erts_atomic32_inc_relb(&no_empty_run_queues); else { - erts_smp_atomic32_inc_mb(&no_empty_run_queues); + erts_atomic32_inc_mb(&no_empty_run_queues); if (erts_atomic_read_nob(&runq_supervisor_sleeping)) ethr_event_set(&runq_supervision_event); } @@ -3163,7 +2917,7 @@ non_empty_runq(ErtsRunQueue *rq) Uint32 old_flags = ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_NONEMPTY); if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && (!(old_flags & ERTS_RUNQ_FLG_NONEMPTY))) { #ifdef DEBUG - erts_aint32_t empty = erts_smp_atomic32_read_nob(&no_empty_run_queues); + erts_aint32_t empty = erts_atomic32_read_nob(&no_empty_run_queues); /* * For a short period of time no_empty_run_queues may have * been increased twice for a specific run queue. @@ -3171,10 +2925,10 @@ non_empty_runq(ErtsRunQueue *rq) ASSERT(0 < empty && empty <= 2*erts_no_run_queues); #endif if (!erts_runq_supervision_interval) - erts_smp_atomic32_dec_relb(&no_empty_run_queues); + erts_atomic32_dec_relb(&no_empty_run_queues); else { erts_aint32_t no; - no = erts_smp_atomic32_dec_read_mb(&no_empty_run_queues); + no = erts_atomic32_dec_read_mb(&no_empty_run_queues); if (no > 0 && erts_atomic_read_nob(&runq_supervisor_sleeping)) ethr_event_set(&runq_supervision_event); } @@ -3203,7 +2957,7 @@ sched_prep_spin_wait(ErtsSchedulerSleepInfo *ssi) do { nflgs = (xflgs & ERTS_SSI_FLG_MSB_EXEC); nflgs |= ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING; - oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); + oflgs = erts_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); if (oflgs == xflgs) return nflgs; xflgs = oflgs; @@ -3220,7 +2974,7 @@ sched_prep_cont_spin_wait(ErtsSchedulerSleepInfo *ssi) erts_aint32_t xflgs = ERTS_SSI_FLG_WAITING; do { - oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); + oflgs = erts_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); if (oflgs == xflgs) return nflgs; xflgs = oflgs; @@ -3237,7 +2991,7 @@ sched_spin_wait(ErtsSchedulerSleepInfo *ssi, int spincount) erts_aint32_t flgs; do { - flgs = erts_smp_atomic32_read_acqb(&ssi->flags); + flgs = erts_atomic32_read_acqb(&ssi->flags); if ((flgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING)) != (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING)) { break; @@ -3262,11 +3016,11 @@ sched_set_sleeptype(ErtsSchedulerSleepInfo *ssi, erts_aint32_t sleep_type) erts_tse_reset(ssi->event); else { ASSERT(sleep_type == ERTS_SSI_FLG_POLL_SLEEPING); - erts_sys_schedule_interrupt(0); + erts_check_io_interrupt(ssi->psi, 0); } while (1) { - oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); + oflgs = erts_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); if (oflgs == xflgs) return nflgs; if ((oflgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING)) @@ -3293,7 +3047,7 @@ static void thr_prgr_prep_wait(void *vssi) { ErtsSchedulerSleepInfo *ssi = (ErtsSchedulerSleepInfo *) vssi; - erts_smp_atomic32_read_bor_acqb(&ssi->flags, + erts_atomic32_read_bor_acqb(&ssi->flags, ERTS_SSI_FLG_SLEEPING); } @@ -3308,7 +3062,7 @@ thr_prgr_wait(void *vssi) while (1) { erts_aint32_t aflgs, nflgs; nflgs = xflgs | ERTS_SSI_FLG_TSE_SLEEPING; - aflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); + aflgs = erts_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); if (aflgs == xflgs) { erts_tse_wait(ssi->event); break; @@ -3323,13 +3077,19 @@ static void thr_prgr_fin_wait(void *vssi) { ErtsSchedulerSleepInfo *ssi = (ErtsSchedulerSleepInfo *) vssi; - erts_smp_atomic32_read_band_nob(&ssi->flags, + erts_atomic32_read_band_nob(&ssi->flags, ~(ERTS_SSI_FLG_SLEEPING | ERTS_SSI_FLG_TSE_SLEEPING)); } static void init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp); +void +erts_aux_thread_poke() +{ + erts_sched_poke(ERTS_SCHED_SLEEP_INFO_IX(-1)); +} + static void * aux_thread(void *unused) { @@ -3338,6 +3098,7 @@ aux_thread(void *unused) erts_aint32_t aux_work; ErtsThrPrgrCallbacks callbacks; int thr_prgr_active = 1; + ERTS_MSACC_DECLARE_CACHE(); #ifdef ERTS_ENABLE_LOCK_CHECK { @@ -3346,6 +3107,7 @@ aux_thread(void *unused) } #endif + erts_port_task_pre_alloc_init_thread(); ssi->event = erts_tse_fetch(); erts_msacc_init_thread("aux", 1, 1); @@ -3360,9 +3122,14 @@ aux_thread(void *unused) init_aux_work_data(awdp, NULL, NULL); awdp->ssi = ssi; +#if ERTS_POLL_USE_FALLBACK + ssi->psi = erts_create_pollset_thread(-1); +#endif sched_prep_spin_wait(ssi); + ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_OTHER); + while (1) { erts_aint32_t flgs; @@ -3371,30 +3138,54 @@ aux_thread(void *unused) if (!thr_prgr_active) erts_thr_progress_active(NULL, thr_prgr_active = 1); aux_work = handle_aux_work(awdp, aux_work, 1); + ERTS_MSACC_UPDATE_CACHE(); if (aux_work && erts_thr_progress_update(NULL)) erts_thr_progress_leader_update(NULL); } if (!aux_work) { + +#ifdef ERTS_BREAK_REQUESTED + if (ERTS_BREAK_REQUESTED) + erts_do_break_handling(); +#endif + if (thr_prgr_active) erts_thr_progress_active(NULL, thr_prgr_active = 0); - erts_thr_progress_prepare_wait(NULL); + +#if ERTS_POLL_USE_FALLBACK flgs = sched_spin_wait(ssi, 0); if (flgs & ERTS_SSI_FLG_SLEEPING) { ASSERT(flgs & ERTS_SSI_FLG_WAITING); + flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_POLL_SLEEPING); + if (flgs & ERTS_SSI_FLG_SLEEPING) { + ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING); + ASSERT(flgs & ERTS_SSI_FLG_WAITING); + erts_check_io(ssi->psi); + } + } +#else + erts_thr_progress_prepare_wait(NULL); + + flgs = sched_spin_wait(ssi, 0); + + if (flgs & ERTS_SSI_FLG_SLEEPING) { flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING); if (flgs & ERTS_SSI_FLG_SLEEPING) { - int res; + int res; ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING); ASSERT(flgs & ERTS_SSI_FLG_WAITING); - do { - res = erts_tse_wait(ssi->event); - } while (res == EINTR); + ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_SLEEP); + do { + res = erts_tse_wait(ssi->event); + } while (res == EINTR); + ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_OTHER); } - } - erts_thr_progress_finalize_wait(NULL); + } + erts_thr_progress_finalize_wait(NULL); +#endif } flgs = sched_prep_spin_wait(ssi); @@ -3402,9 +3193,79 @@ aux_thread(void *unused) return NULL; } -static void suspend_scheduler(ErtsSchedulerData *esdp); +static void * +poll_thread(void *arg) +{ + int id = (int)(UWord)arg; + ErtsAuxWorkData *awdp = poll_thread_aux_work_data+id; + ErtsSchedulerSleepInfo *ssi = ERTS_POLL_THREAD_SLEEP_INFO_IX(id); + erts_aint32_t aux_work; + ErtsThrPrgrCallbacks callbacks; + int thr_prgr_active = 1; + struct erts_poll_thread *psi = erts_create_pollset_thread(id); + ERTS_MSACC_DECLARE_CACHE(); -#endif /* ERTS_SMP */ +#ifdef ERTS_ENABLE_LOCK_CHECK + { + char buf[] = "poll_thread"; + erts_lc_set_thread_name(buf); + } +#endif + + erts_port_task_pre_alloc_init_thread(); + ssi->event = erts_tse_fetch(); + + erts_msacc_init_thread("poll", id, 0); + + callbacks.arg = (void *) ssi; + callbacks.wakeup = thr_prgr_wakeup; + callbacks.prepare_wait = thr_prgr_prep_wait; + callbacks.wait = thr_prgr_wait; + callbacks.finalize_wait = thr_prgr_fin_wait; + + erts_thr_progress_register_managed_thread(NULL, &callbacks, 0); + init_aux_work_data(awdp, NULL, NULL); + awdp->ssi = ssi; + ssi->psi = psi; + + sched_prep_spin_wait(ssi); + + ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_OTHER); + + while (1) { + erts_aint32_t flgs; + + aux_work = erts_atomic32_read_acqb(&ssi->aux_work); + if (aux_work) { + if (!thr_prgr_active) + erts_thr_progress_active(NULL, thr_prgr_active = 1); + aux_work = handle_aux_work(awdp, aux_work, 1); + ERTS_MSACC_UPDATE_CACHE(); + if (aux_work && erts_thr_progress_update(NULL)) + erts_thr_progress_leader_update(NULL); + } + + if (!aux_work) { + if (thr_prgr_active) + erts_thr_progress_active(NULL, thr_prgr_active = 0); + + flgs = sched_spin_wait(ssi, 0); + + if (flgs & ERTS_SSI_FLG_SLEEPING) { + ASSERT(flgs & ERTS_SSI_FLG_WAITING); + flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_POLL_SLEEPING); + if (flgs & ERTS_SSI_FLG_SLEEPING) { + ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING); + ASSERT(flgs & ERTS_SSI_FLG_WAITING); + erts_check_io(psi); + } + } + } + + flgs = sched_prep_spin_wait(ssi); + } + return NULL; +} static void scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) @@ -3413,386 +3274,168 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) ErtsSchedulerSleepInfo *ssi = esdp->ssi; int spincount; erts_aint32_t aux_work = 0; -#ifdef ERTS_SMP int thr_prgr_active = 1; erts_aint32_t flgs; -#endif ERTS_MSACC_PUSH_STATE_M(); -#ifdef ERTS_SMP - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); -#ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) - erts_smp_spin_lock(&rq->sleepers.lock); -#endif + erts_spin_lock(&rq->sleepers.lock); flgs = sched_prep_spin_wait(ssi); if (flgs & ERTS_SSI_FLG_SUSPENDED) { /* Go suspend instead... */ -#ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) - erts_smp_spin_unlock(&rq->sleepers.lock); -#endif + erts_spin_unlock(&rq->sleepers.lock); return; } -#ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) { ssi->prev = NULL; ssi->next = rq->sleepers.list; if (rq->sleepers.list) rq->sleepers.list->prev = ssi; rq->sleepers.list = ssi; - erts_smp_spin_unlock(&rq->sleepers.lock); + erts_spin_unlock(&rq->sleepers.lock); dirty_active(esdp, -1); } -#endif - /* - * If all schedulers are waiting, one of them *should* - * be waiting in erl_sys_schedule() - */ - - if (ERTS_SCHEDULER_IS_DIRTY(esdp) || !prepare_for_sys_schedule(0)) { - - sched_waiting(esdp->no, rq); - - erts_smp_runq_unlock(rq); - - spincount = sched_busy_wait.tse; + sched_waiting(esdp->no, rq); - tse_wait: + erts_runq_unlock(rq); - if (ERTS_SCHEDULER_IS_DIRTY(esdp)) - dirty_sched_wall_time_change(esdp, working = 0); - else if (thr_prgr_active != working) - sched_wall_time_change(esdp, working = thr_prgr_active); + spincount = sched_busy_wait.tse; - while (1) { - ErtsMonotonicTime current_time = 0; - - aux_work = erts_atomic32_read_acqb(&ssi->aux_work); - if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp)) { - if (!thr_prgr_active) { - erts_thr_progress_active(esdp, thr_prgr_active = 1); - sched_wall_time_change(esdp, 1); - } - aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1); - ERTS_MSACC_UPDATE_CACHE(); - if (aux_work && erts_thr_progress_update(esdp)) - erts_thr_progress_leader_update(esdp); - } - - if (aux_work) { - if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { - flgs = erts_smp_atomic32_read_acqb(&ssi->flags); - current_time = erts_get_monotonic_time(esdp); - if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) { - if (!thr_prgr_active) { - erts_thr_progress_active(esdp, thr_prgr_active = 1); - sched_wall_time_change(esdp, 1); - } - erts_bump_timers(esdp->timer_wheel, current_time); - } - } - } - else { - ErtsMonotonicTime timeout_time; - int do_timeout = 0; - if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { - timeout_time = erts_check_next_timeout_time(esdp); - current_time = erts_get_monotonic_time(esdp); - do_timeout = (current_time >= timeout_time); - } else { - current_time = 0; - timeout_time = ERTS_MONOTONIC_TIME_MAX; - } - if (do_timeout) { - if (!thr_prgr_active) { - erts_thr_progress_active(esdp, thr_prgr_active = 1); - sched_wall_time_change(esdp, 1); - } - } - else { - if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { - if (thr_prgr_active) { - erts_thr_progress_active(esdp, thr_prgr_active = 0); - sched_wall_time_change(esdp, 0); - } - erts_thr_progress_prepare_wait(esdp); - } + if (ERTS_SCHEDULER_IS_DIRTY(esdp)) + dirty_sched_wall_time_change(esdp, working = 0); + else if (thr_prgr_active != working) + sched_wall_time_change(esdp, working = thr_prgr_active); - flgs = sched_spin_wait(ssi, spincount); - if (flgs & ERTS_SSI_FLG_SLEEPING) { - ASSERT(flgs & ERTS_SSI_FLG_WAITING); - flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING); - if (flgs & ERTS_SSI_FLG_SLEEPING) { - int res; - ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING); - ASSERT(flgs & ERTS_SSI_FLG_WAITING); - current_time = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 : - erts_get_monotonic_time(esdp); - do { - Sint64 timeout; - if (current_time >= timeout_time) - break; - if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { - timeout = ERTS_MONOTONIC_TO_NSEC(timeout_time - - current_time - - 1) + 1; - } else - timeout = -1; - ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP); - res = erts_tse_twait(ssi->event, timeout); - ERTS_MSACC_POP_STATE_M(); - current_time = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 : - erts_get_monotonic_time(esdp); - } while (res == EINTR); - } - } - if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) - erts_thr_progress_finalize_wait(esdp); - } - if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && current_time >= timeout_time) - erts_bump_timers(esdp->timer_wheel, current_time); - } + while (1) { + ErtsMonotonicTime current_time = 0; - if (!(flgs & ERTS_SSI_FLG_WAITING)) { - ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING)); - break; - } + aux_work = erts_atomic32_read_acqb(&ssi->aux_work); + if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp)) { + if (!thr_prgr_active) { + erts_thr_progress_active(esdp, thr_prgr_active = 1); + sched_wall_time_change(esdp, 1); + } + aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1); + ERTS_MSACC_UPDATE_CACHE(); + if (aux_work && erts_thr_progress_update(esdp)) + erts_thr_progress_leader_update(esdp); + } - flgs = sched_prep_cont_spin_wait(ssi); - spincount = sched_busy_wait.aux_work; + if (aux_work) { + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + flgs = erts_atomic32_read_acqb(&ssi->flags); + current_time = erts_get_monotonic_time(esdp); + if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) { + if (!thr_prgr_active) { + erts_thr_progress_active(esdp, thr_prgr_active = 1); + sched_wall_time_change(esdp, 1); + } + erts_bump_timers(esdp->timer_wheel, current_time); + } + } + } + else { + ErtsMonotonicTime timeout_time; + int do_timeout = 0; + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + timeout_time = erts_check_next_timeout_time(esdp); + current_time = erts_get_monotonic_time(esdp); + do_timeout = (current_time >= timeout_time); + } else { + current_time = 0; + timeout_time = ERTS_MONOTONIC_TIME_MAX; + } + if (do_timeout) { + if (!thr_prgr_active) { + erts_thr_progress_active(esdp, thr_prgr_active = 1); + sched_wall_time_change(esdp, 1); + } + } + else { + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + if (thr_prgr_active) { + erts_thr_progress_active(esdp, thr_prgr_active = 0); + sched_wall_time_change(esdp, 0); + } + erts_thr_progress_prepare_wait(esdp); + } - if (!(flgs & ERTS_SSI_FLG_WAITING)) { - ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING)); - break; - } + flgs = sched_spin_wait(ssi, spincount); + if (flgs & ERTS_SSI_FLG_SLEEPING) { + ASSERT(flgs & ERTS_SSI_FLG_WAITING); + flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING); + if (flgs & ERTS_SSI_FLG_SLEEPING) { + int res; + ASSERT(flgs & ERTS_SSI_FLG_TSE_SLEEPING); + ASSERT(flgs & ERTS_SSI_FLG_WAITING); + current_time = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 : + erts_get_monotonic_time(esdp); + do { + Sint64 timeout; + if (current_time >= timeout_time) + break; + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { + timeout = ERTS_MONOTONIC_TO_NSEC(timeout_time + - current_time + - 1) + 1; + } else + timeout = -1; + ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP); + res = erts_tse_twait(ssi->event, timeout); + ERTS_MSACC_POP_STATE_M(); + current_time = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 : + erts_get_monotonic_time(esdp); + } while (res == EINTR); + } + } + if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) + erts_thr_progress_finalize_wait(esdp); + } + if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && current_time >= timeout_time) + erts_bump_timers(esdp->timer_wheel, current_time); + } - } + if (!(flgs & ERTS_SSI_FLG_WAITING)) { + ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING)); + break; + } - if (flgs & ~(ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC)) - erts_smp_atomic32_read_band_nob(&ssi->flags, - (ERTS_SSI_FLG_SUSPENDED - | ERTS_SSI_FLG_MSB_EXEC)); + flgs = sched_prep_cont_spin_wait(ssi); + spincount = sched_busy_wait.aux_work; - if (ERTS_SCHEDULER_IS_DIRTY(esdp)) - dirty_sched_wall_time_change(esdp, working = 1); - else if (!thr_prgr_active) { - erts_thr_progress_active(esdp, thr_prgr_active = 1); - sched_wall_time_change(esdp, 1); + if (!(flgs & ERTS_SSI_FLG_WAITING)) { + ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING)); + break; } - erts_smp_runq_lock(rq); - sched_active(esdp->no, rq); - } - else -#endif - { - - erts_smp_atomic32_set_relb(&function_calls, 0); - *fcalls = 0; -#ifdef ERTS_DIRTY_SCHEDULERS - ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp)); -#endif - sched_waiting_sys(esdp->no, rq); - - erts_smp_runq_unlock(rq); - - ASSERT(working); - sched_wall_time_change(esdp, working = 0); - - spincount = sched_busy_wait.sys_schedule; - if (spincount == 0) - goto sys_aux_work; - - while (spincount-- > 0) { - ErtsMonotonicTime current_time; - - sys_poll_aux_work: + if (flgs & ~(ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC)) + erts_atomic32_read_band_nob(&ssi->flags, + (ERTS_SSI_FLG_SUSPENDED + | ERTS_SSI_FLG_MSB_EXEC)); - if (working) - sched_wall_time_change(esdp, working = 0); - - ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_CHECK_IO); - - ASSERT(!erts_port_task_have_outstanding_io_tasks()); - LTTNG2(scheduler_poll, esdp->no, 1); - erl_sys_schedule(1); /* Might give us something to do */ - - ERTS_MSACC_POP_STATE_M(); - - if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { - current_time = erts_get_monotonic_time(esdp); - if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) - erts_bump_timers(esdp->timer_wheel, current_time); - } - - sys_aux_work: -#ifndef ERTS_SMP - erts_sys_schedule_interrupt(0); -#endif - - aux_work = erts_atomic32_read_acqb(&ssi->aux_work); - if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp)) { - if (!working) - sched_wall_time_change(esdp, working = 1); -#ifdef ERTS_SMP - if (!thr_prgr_active) - erts_thr_progress_active(esdp, thr_prgr_active = 1); -#endif - aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1); - ERTS_MSACC_UPDATE_CACHE(); -#ifdef ERTS_SMP - if (aux_work && erts_thr_progress_update(esdp)) - erts_thr_progress_leader_update(esdp); -#endif - } - -#ifndef ERTS_SMP - if (erts_smp_atomic32_read_dirty(&rq->len) != 0 || rq->misc.start) - goto sys_woken; -#else - flgs = erts_smp_atomic32_read_acqb(&ssi->flags); - if (!(flgs & ERTS_SSI_FLG_WAITING)) { - ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING)); - goto sys_woken; - } - - /* - * If we got new I/O tasks we aren't allowed to - * call erl_sys_schedule() until it is handled. - */ - if (erts_port_task_have_outstanding_io_tasks()) { - clear_sys_scheduling(); - /* - * Got to check that we still got I/O tasks; otherwise - * we have to continue checking for I/O... - */ - if (!prepare_for_sys_schedule(0)) { - spincount *= ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT; - goto tse_wait; - } - } -#endif - } - - erts_smp_runq_lock(rq); - -#ifdef ERTS_SMP - /* - * If we got new I/O tasks we aren't allowed to - * sleep in erl_sys_schedule(). - */ - if (erts_port_task_have_outstanding_io_tasks()) { - clear_sys_scheduling(); - - /* - * Got to check that we still got I/O tasks; otherwise - * we have to wait in erl_sys_schedule() after all... - */ - if (!prepare_for_sys_schedule(0)) { - /* - * Not allowed to wait in erl_sys_schedule; - * do tse wait instead... - */ - sched_change_waiting_sys_to_waiting(esdp->no, rq); - erts_smp_runq_unlock(rq); - spincount = 0; - goto tse_wait; - } - } -#endif - if (aux_work) { - erts_smp_runq_unlock(rq); - goto sys_poll_aux_work; - } -#ifdef ERTS_SMP - flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_POLL_SLEEPING); - if (!(flgs & ERTS_SSI_FLG_SLEEPING)) { - if (!(flgs & ERTS_SSI_FLG_WAITING)) { - ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING)); - goto sys_locked_woken; - } - erts_smp_runq_unlock(rq); - flgs = sched_prep_cont_spin_wait(ssi); - if (!(flgs & ERTS_SSI_FLG_WAITING)) { - ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING)); - goto sys_woken; - } - ASSERT(!erts_port_task_have_outstanding_io_tasks()); - goto sys_poll_aux_work; - } - - ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING); - ASSERT(flgs & ERTS_SSI_FLG_WAITING); -#endif - - erts_smp_runq_unlock(rq); - - if (working) - sched_wall_time_change(esdp, working = 0); - -#ifdef ERTS_SMP - if (thr_prgr_active) - erts_thr_progress_active(esdp, thr_prgr_active = 0); -#endif - - ASSERT(!erts_port_task_have_outstanding_io_tasks()); - - ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_CHECK_IO); - LTTNG2(scheduler_poll, esdp->no, 0); - - erl_sys_schedule(0); - - ERTS_MSACC_POP_STATE_M(); - - if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { - ErtsMonotonicTime current_time = erts_get_monotonic_time(esdp); - if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) - erts_bump_timers(esdp->timer_wheel, current_time); - } - -#ifndef ERTS_SMP - if (erts_smp_atomic32_read_dirty(&rq->len) == 0 && !rq->misc.start) - goto sys_aux_work; - sys_woken: -#else - flgs = sched_prep_cont_spin_wait(ssi); - if (flgs & ERTS_SSI_FLG_WAITING) - goto sys_aux_work; - - sys_woken: - if (!thr_prgr_active) - erts_thr_progress_active(esdp, thr_prgr_active = 1); - erts_smp_runq_lock(rq); - sys_locked_woken: - if (!thr_prgr_active) { - erts_smp_runq_unlock(rq); - erts_thr_progress_active(esdp, thr_prgr_active = 1); - erts_smp_runq_lock(rq); - } - clear_sys_scheduling(); - if (flgs & ~(ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC)) - erts_smp_atomic32_read_band_nob(&ssi->flags, - (ERTS_SSI_FLG_SUSPENDED - | ERTS_SSI_FLG_MSB_EXEC)); -#endif - if (!working) - sched_wall_time_change(esdp, working = 1); - sched_active_sys(esdp->no, rq); + if (ERTS_SCHEDULER_IS_DIRTY(esdp)) + dirty_sched_wall_time_change(esdp, working = 1); + else if (!thr_prgr_active) { + erts_thr_progress_active(esdp, thr_prgr_active = 1); + sched_wall_time_change(esdp, 1); } + erts_runq_lock(rq); + sched_active(esdp->no, rq); + if (ERTS_SCHEDULER_IS_DIRTY(esdp)) dirty_active(esdp, 1); - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); } -#ifdef ERTS_SMP static ERTS_INLINE erts_aint32_t ssi_flags_set_wake(ErtsSchedulerSleepInfo *ssi) @@ -3802,7 +3445,7 @@ ssi_flags_set_wake(ErtsSchedulerSleepInfo *ssi) erts_aint32_t nflgs = 0; erts_aint32_t xflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING; while (1) { - oflgs = erts_smp_atomic32_cmpxchg_relb(&ssi->flags, nflgs, xflgs); + oflgs = erts_atomic32_cmpxchg_relb(&ssi->flags, nflgs, xflgs); if (oflgs == xflgs) return oflgs; nflgs = oflgs & (ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC); @@ -3816,13 +3459,12 @@ ssi_wake(ErtsSchedulerSleepInfo *ssi) erts_sched_finish_poke(ssi, ssi_flags_set_wake(ssi)); } -#ifdef ERTS_DIRTY_SCHEDULERS static void dcpu_sched_ix_suspend_wake(Uint ix) { ErtsSchedulerSleepInfo* ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix); - erts_smp_atomic32_read_bor_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED); + erts_atomic32_read_bor_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED); ssi_wake(ssi); } @@ -3830,7 +3472,7 @@ static void dio_sched_ix_suspend_wake(Uint ix) { ErtsSchedulerSleepInfo* ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix); - erts_smp_atomic32_read_bor_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED); + erts_atomic32_read_bor_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED); ssi_wake(ssi); } @@ -3848,7 +3490,6 @@ dio_sched_ix_wake(Uint ix) } #endif -#endif static void wake_scheduler(ErtsRunQueue *rq) @@ -3861,13 +3502,12 @@ wake_scheduler(ErtsRunQueue *rq) * so all code *should* handle this without having * the lock on the run queue. */ - ERTS_SMP_LC_ASSERT(!erts_smp_lc_runq_is_locked(rq) + ERTS_LC_ASSERT(!erts_lc_runq_is_locked(rq) || ERTS_RUNQ_IX_IS_DIRTY(rq->ix)); ssi_wake(rq->scheduler->ssi); } -#ifdef ERTS_DIRTY_SCHEDULERS static void wake_dirty_schedulers(ErtsRunQueue *rq, int one) { @@ -3877,10 +3517,10 @@ wake_dirty_schedulers(ErtsRunQueue *rq, int one) ASSERT(ERTS_RUNQ_IX_IS_DIRTY(rq->ix)); sl = &rq->sleepers; - erts_smp_spin_lock(&sl->lock); + erts_spin_lock(&sl->lock); ssi = sl->list; if (!ssi) { - erts_smp_spin_unlock(&sl->lock); + erts_spin_unlock(&sl->lock); if (one) wake_scheduler(rq); } else if (one) { @@ -3894,14 +3534,14 @@ wake_dirty_schedulers(ErtsRunQueue *rq, int one) if (ssi->next) ssi->next->prev = ssi->prev; - erts_smp_spin_unlock(&sl->lock); + erts_spin_unlock(&sl->lock); ERTS_THR_MEMORY_BARRIER; flgs = ssi_flags_set_wake(ssi); erts_sched_finish_poke(ssi, flgs); } else { sl->list = NULL; - erts_smp_spin_unlock(&sl->lock); + erts_spin_unlock(&sl->lock); ERTS_THR_MEMORY_BARRIER; do { @@ -3918,7 +3558,6 @@ wake_dirty_scheduler(ErtsRunQueue *rq) wake_dirty_schedulers(rq, 1); } -#endif #define ERTS_NO_USED_RUNQS_SHIFT 16 #define ERTS_NO_RUNQS_MASK 0xffffU @@ -3932,13 +3571,13 @@ init_no_runqs(int active, int used) { erts_aint32_t no_runqs = (erts_aint32_t) (active & ERTS_NO_RUNQS_MASK); no_runqs |= (erts_aint32_t) ((used & ERTS_NO_RUNQS_MASK) << ERTS_NO_USED_RUNQS_SHIFT); - erts_smp_atomic32_init_nob(&balance_info.no_runqs, no_runqs); + erts_atomic32_init_nob(&balance_info.no_runqs, no_runqs); } static ERTS_INLINE void get_no_runqs(int *active, int *used) { - erts_aint32_t no_runqs = erts_smp_atomic32_read_nob(&balance_info.no_runqs); + erts_aint32_t no_runqs = erts_atomic32_read_nob(&balance_info.no_runqs); if (active) *active = (int) (no_runqs & ERTS_NO_RUNQS_MASK); if (used) @@ -3948,12 +3587,12 @@ get_no_runqs(int *active, int *used) static ERTS_INLINE void set_no_used_runqs(int used) { - erts_aint32_t exp = erts_smp_atomic32_read_nob(&balance_info.no_runqs); + erts_aint32_t exp = erts_atomic32_read_nob(&balance_info.no_runqs); while (1) { erts_aint32_t act, new; new = (used & ERTS_NO_RUNQS_MASK) << ERTS_NO_USED_RUNQS_SHIFT; new |= exp & ERTS_NO_RUNQS_MASK; - act = erts_smp_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp); + act = erts_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp); if (act == exp) break; exp = act; @@ -3963,14 +3602,14 @@ set_no_used_runqs(int used) static ERTS_INLINE void set_no_active_runqs(int active) { - erts_aint32_t exp = erts_smp_atomic32_read_nob(&balance_info.no_runqs); + erts_aint32_t exp = erts_atomic32_read_nob(&balance_info.no_runqs); while (1) { erts_aint32_t act, new; if ((exp & ERTS_NO_RUNQS_MASK) == active) break; new = exp & (ERTS_NO_RUNQS_MASK << ERTS_NO_USED_RUNQS_SHIFT); new |= active & ERTS_NO_RUNQS_MASK; - act = erts_smp_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp); + act = erts_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp); if (act == exp) break; exp = act; @@ -3980,14 +3619,14 @@ set_no_active_runqs(int active) static ERTS_INLINE int try_inc_no_active_runqs(int active) { - erts_aint32_t exp = erts_smp_atomic32_read_nob(&balance_info.no_runqs); + erts_aint32_t exp = erts_atomic32_read_nob(&balance_info.no_runqs); if (((exp >> ERTS_NO_USED_RUNQS_SHIFT) & ERTS_NO_RUNQS_MASK) < active) return 0; if ((exp & ERTS_NO_RUNQS_MASK) + 1 == active) { erts_aint32_t new, act; new = exp & (ERTS_NO_RUNQS_MASK << ERTS_NO_USED_RUNQS_SHIFT); new |= active & ERTS_NO_RUNQS_MASK; - act = erts_smp_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp); + act = erts_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp); if (act == exp) return 1; } @@ -4049,25 +3688,20 @@ wake_scheduler_on_empty_runq(ErtsRunQueue *crq) } } -#endif /* ERTS_SMP */ static ERTS_INLINE void smp_notify_inc_runq(ErtsRunQueue *runq) { -#ifdef ERTS_SMP if (runq) { -#ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix)) wake_dirty_scheduler(runq); else -#endif wake_scheduler(runq); } -#endif } void -erts_smp_notify_inc_runq(ErtsRunQueue *runq) +erts_notify_inc_runq(ErtsRunQueue *runq) { smp_notify_inc_runq(runq); } @@ -4075,16 +3709,12 @@ erts_smp_notify_inc_runq(ErtsRunQueue *runq) void erts_sched_notify_check_cpu_bind(void) { -#ifdef ERTS_SMP int ix; for (ix = 0; ix < erts_no_run_queues; ix++) { ErtsRunQueue *rq = ERTS_RUNQ_IX(ix); (void) ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_CHK_CPU_BIND); wake_scheduler(rq); } -#else - erts_sched_check_cpu_bind(erts_get_scheduler_data()); -#endif } @@ -4093,9 +3723,9 @@ enqueue_process(ErtsRunQueue *runq, int prio, Process *p) { ErtsRunPrioQueue *rpq; - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq)); - erts_smp_inc_runq_len(runq, &runq->procs.prio_info[prio], prio); + erts_inc_runq_len(runq, &runq->procs.prio_info[prio], prio); if (prio == PRIORITY_LOW) { p->schedule_count = RESCHEDULE_LOW; @@ -4123,7 +3753,7 @@ unqueue_process(ErtsRunQueue *runq, Process *prev_proc, Process *proc) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq)); if (prev_proc) prev_proc->next = proc->next; @@ -4135,7 +3765,7 @@ unqueue_process(ErtsRunQueue *runq, if (!rpq->first) rpq->last = NULL; - erts_smp_dec_runq_len(runq, rqi, prio); + erts_dec_runq_len(runq, rqi, prio); } @@ -4148,7 +3778,7 @@ dequeue_process(ErtsRunQueue *runq, int prio_q, erts_aint32_t *statep) ErtsRunQueueInfo *rqi; Process *p; - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq)); ASSERT(PRIORITY_NORMAL == prio_q || PRIORITY_HIGH == prio_q @@ -4159,9 +3789,9 @@ dequeue_process(ErtsRunQueue *runq, int prio_q, erts_aint32_t *statep) if (!p) return NULL; - ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER; + ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER; - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); if (statep) *statep = state; @@ -4194,11 +3824,10 @@ check_requeue_process(ErtsRunQueue *rq, int prio_q) static ERTS_INLINE void free_proxy_proc(Process *proxy) { - ASSERT(erts_smp_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY); + ASSERT(erts_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY); erts_free(ERTS_ALC_T_PROC, proxy); } -#ifdef ERTS_SMP static ErtsRunQueue * check_immigration_need(ErtsRunQueue *c_rq, ErtsMigrationPath *mp, int prio) @@ -4251,7 +3880,7 @@ static void immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp) { Uint32 iflags, iflag; - erts_smp_runq_unlock(c_rq); + erts_runq_unlock(c_rq); ASSERT(erts_thr_progress_is_managed_thread()); @@ -4292,13 +3921,13 @@ immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp) rq = check_immigration_need(c_rq, mp, prio); if (rq) { - erts_smp_runq_lock(rq); + erts_runq_lock(rq); if (prio == ERTS_PORT_PRIO_LEVEL) { Port *prt; prt = erts_dequeue_port(rq); if (prt) RUNQ_SET_RQ(&prt->run_queue, c_rq); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); if (prt) { /* port might terminate while we have no lock... */ rq = erts_port_runq(prt); @@ -4310,7 +3939,7 @@ immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp) erts_enqueue_port(c_rq, prt); if (!iflag) return; /* done */ - erts_smp_runq_unlock(c_rq); + erts_runq_unlock(c_rq); } } } @@ -4324,38 +3953,38 @@ immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp) while (proc) { erts_aint32_t state; - state = erts_smp_atomic32_read_acqb(&proc->state); + state = erts_atomic32_read_acqb(&proc->state); if (!(ERTS_PSFLG_BOUND & state) && (prio == (int) ERTS_PSFLGS_GET_PRQ_PRIO(state))) { ErtsRunQueueInfo *rqi = &rq->procs.prio_info[prio]; unqueue_process(rq, rpq, rqi, prio, prev_proc, proc); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); RUNQ_SET_RQ(&proc->run_queue, c_rq); rq_locked = 0; - erts_smp_runq_lock(c_rq); + erts_runq_lock(c_rq); enqueue_process(c_rq, prio, proc); if (!iflag) return; /* done */ - erts_smp_runq_unlock(c_rq); + erts_runq_unlock(c_rq); break; } prev_proc = proc; proc = proc->next; } if (rq_locked) - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); } } } - erts_smp_runq_lock(c_rq); + erts_runq_lock(c_rq); } static ERTS_INLINE void suspend_run_queue(ErtsRunQueue *rq) { - erts_smp_atomic32_read_bor_nob(&rq->scheduler->ssi->flags, + erts_atomic32_read_bor_nob(&rq->scheduler->ssi->flags, ERTS_SSI_FLG_SUSPENDED); (void) ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_SUSPENDED); @@ -4372,7 +4001,7 @@ resume_run_queue(ErtsRunQueue *rq) ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix)); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); oflgs = ERTS_RUNQ_FLGS_READ_BSET(rq, (ERTS_RUNQ_FLG_OUT_OF_WORK @@ -4387,19 +4016,19 @@ resume_run_queue(ErtsRunQueue *rq) rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS; for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) { - len = erts_smp_atomic32_read_dirty(&rq->procs.prio_info[pix].len); + len = erts_atomic32_read_dirty(&rq->procs.prio_info[pix].len); rq->procs.prio_info[pix].max_len = len; rq->procs.prio_info[pix].reds = 0; } - len = erts_smp_atomic32_read_dirty(&rq->ports.info.len); + len = erts_atomic32_read_dirty(&rq->ports.info.len); rq->ports.info.max_len = len; rq->ports.info.reds = 0; - len = erts_smp_atomic32_read_dirty(&rq->len); + len = erts_atomic32_read_dirty(&rq->len); rq->max_len = len; } - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); nrml_sched_ix_resume_wake(rq->ix); } @@ -4414,18 +4043,17 @@ schedule_bound_processes(ErtsRunQueue *rq, ErtsStuckBoundProcesses *sbpp) { Process *proc, *next; - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); proc = sbpp->first; while (proc) { - erts_aint32_t state = erts_smp_atomic32_read_acqb(&proc->state); + erts_aint32_t state = erts_atomic32_read_acqb(&proc->state); next = proc->next; enqueue_process(rq, (int) ERTS_PSFLGS_GET_PRQ_PRIO(state), proc); proc = next; } } -#ifdef ERTS_DIRTY_SCHEDULERS static ERTS_INLINE void clear_proc_dirty_queue_bit(Process *p, ErtsRunQueue *rq, int prio_bit) @@ -4445,11 +4073,10 @@ clear_proc_dirty_queue_bit(Process *p, ErtsRunQueue *rq, int prio_bit) #else (void) #endif - erts_smp_atomic32_read_band_mb(&p->dirty_state, ~qb); + erts_atomic32_read_band_mb(&p->dirty_state, ~qb); ASSERT(old & qb); } -#endif /* ERTS_DIRTY_SCHEDULERS */ static void @@ -4461,7 +4088,7 @@ evacuate_run_queue(ErtsRunQueue *rq, ErtsMigrationPaths *mps; ErtsMigrationPath *mp; - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED); @@ -4484,9 +4111,9 @@ evacuate_run_queue(ErtsRunQueue *rq, rq->misc.start = NULL; rq->misc.end = NULL; ERTS_RUNQ_FLGS_UNSET_NOB(rq, ERTS_RUNQ_FLG_MISC_OP); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); - erts_smp_runq_lock(to_rq); + erts_runq_lock(to_rq); if (to_rq->misc.end) to_rq->misc.end->next = start; else @@ -4496,9 +4123,9 @@ evacuate_run_queue(ErtsRunQueue *rq, non_empty_runq(to_rq); - erts_smp_runq_unlock(to_rq); + erts_runq_unlock(to_rq); smp_notify_inc_runq(to_rq); - erts_smp_runq_lock(to_rq); + erts_runq_lock(to_rq); } if (rq->ports.start) { @@ -4514,7 +4141,7 @@ evacuate_run_queue(ErtsRunQueue *rq, ErtsRunQueue *prt_rq; prt = erts_dequeue_port(rq); RUNQ_SET_RQ(&prt->run_queue, to_rq); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); /* * The port might terminate while * we have no lock on it... @@ -4526,9 +4153,9 @@ evacuate_run_queue(ErtsRunQueue *rq, "%s:%d:%s() internal error\n", __FILE__, __LINE__, __func__); erts_enqueue_port(to_rq, prt); - erts_smp_runq_unlock(to_rq); + erts_runq_unlock(to_rq); } - erts_smp_runq_lock(rq); + erts_runq_lock(rq); prt = rq->ports.start; } smp_notify_inc_runq(to_rq); @@ -4565,7 +4192,7 @@ evacuate_run_queue(ErtsRunQueue *rq, free_proxy_proc(proc); goto handle_next_proc; } - real_state = erts_smp_atomic32_read_acqb(&real_proc->state); + real_state = erts_atomic32_read_acqb(&real_proc->state); } max_qbit = (state >> ERTS_PSFLGS_IN_PRQ_MASK_OFFSET); @@ -4591,7 +4218,7 @@ evacuate_run_queue(ErtsRunQueue *rq, #else (void) #endif - erts_smp_atomic32_read_band_mb(&proc->state, + erts_atomic32_read_band_mb(&proc->state, ~clr_bits); ASSERT((old & clr_bits) == clr_bits); @@ -4611,17 +4238,17 @@ evacuate_run_queue(ErtsRunQueue *rq, } else { int prio = (int) ERTS_PSFLGS_GET_PRQ_PRIO(state); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); to_rq = mp->prio[prio].runq; RUNQ_SET_RQ(&proc->run_queue, to_rq); - erts_smp_runq_lock(to_rq); + erts_runq_lock(to_rq); enqueue_process(to_rq, prio, proc); - erts_smp_runq_unlock(to_rq); + erts_runq_unlock(to_rq); notify = 1; - erts_smp_runq_lock(rq); + erts_runq_lock(rq); } handle_next_proc: @@ -4640,13 +4267,13 @@ try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq, ErtsRunPrioQueue *rpq; if (*rq_lockedp) { - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); *rq_lockedp = 0; } - ERTS_SMP_LC_ASSERT(!erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(!erts_lc_runq_is_locked(rq)); - erts_smp_runq_lock(vrq); + erts_runq_lock(vrq); if (ERTS_RUNQ_FLGS_GET_NOB(rq) & ERTS_RUNQ_FLG_HALTING) goto no_procs; @@ -4682,16 +4309,16 @@ try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq, proc = rpq->first; while (proc) { - erts_aint32_t state = erts_smp_atomic32_read_acqb(&proc->state); + erts_aint32_t state = erts_atomic32_read_acqb(&proc->state); if (!(ERTS_PSFLG_BOUND & state)) { /* Steal process */ int prio = (int) ERTS_PSFLGS_GET_PRQ_PRIO(state); ErtsRunQueueInfo *rqi = &vrq->procs.prio_info[prio]; unqueue_process(vrq, rpq, rqi, prio, prev_proc, proc); - erts_smp_runq_unlock(vrq); + erts_runq_unlock(vrq); RUNQ_SET_RQ(&proc->run_queue, rq); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); *rq_lockedp = 1; enqueue_process(rq, prio, proc); return !0; @@ -4705,7 +4332,7 @@ try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq, no_procs: - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(vrq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(vrq)); /* * Check for a runnable port to steal... @@ -4715,7 +4342,7 @@ no_procs: ErtsRunQueue *prt_rq; Port *prt = erts_dequeue_port(vrq); RUNQ_SET_RQ(&prt->run_queue, rq); - erts_smp_runq_unlock(vrq); + erts_runq_unlock(vrq); /* * The port might terminate while @@ -4736,7 +4363,7 @@ no_procs: } } - erts_smp_runq_unlock(vrq); + erts_runq_unlock(vrq); return 0; } @@ -4768,7 +4395,7 @@ try_steal_task(ErtsRunQueue *rq) res = 0; rq_locked = 1; - ERTS_SMP_LC_CHK_RUNQ_LOCK(rq, rq_locked); + ERTS_LC_CHK_RUNQ_LOCK(rq, rq_locked); get_no_runqs(&active_rqs, &blnc_rqs); @@ -4781,7 +4408,7 @@ try_steal_task(ErtsRunQueue *rq) if (active_rqs < blnc_rqs) { int no = blnc_rqs - active_rqs; int stop_ix = vix = active_rqs + rq->ix % no; - while (erts_smp_atomic32_read_acqb(&no_empty_run_queues) < blnc_rqs) { + while (erts_atomic32_read_acqb(&no_empty_run_queues) < blnc_rqs) { res = check_possible_steal_victim(rq, &rq_locked, vix); if (res) goto done; @@ -4796,7 +4423,7 @@ try_steal_task(ErtsRunQueue *rq) vix = rq->ix; /* ... then try to steal a job from another active queue... */ - while (erts_smp_atomic32_read_acqb(&no_empty_run_queues) < blnc_rqs) { + while (erts_atomic32_read_acqb(&no_empty_run_queues) < blnc_rqs) { vix++; if (vix >= active_rqs) vix = 0; @@ -4813,7 +4440,7 @@ try_steal_task(ErtsRunQueue *rq) done: if (!rq_locked) - erts_smp_runq_lock(rq); + erts_runq_lock(rq); if (res) return res; @@ -4939,7 +4566,7 @@ alloc_mpaths(void) { void *block; ErtsMigrationPaths *res; - ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&balance_info.update_mtx)); + ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&balance_info.update_mtx)); res = mpaths.freelist; if (res) { @@ -4962,7 +4589,7 @@ retire_mpaths(ErtsMigrationPaths *mps) { ErtsThrPrgrVal current; - ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&balance_info.update_mtx)); + ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&balance_info.update_mtx)); current = erts_thr_progress_current(); @@ -5008,7 +4635,7 @@ check_balance(ErtsRunQueue *c_rq) int sched_util_balancing; #endif - if (erts_smp_atomic32_xchg_nob(&balance_info.checking_balance, 1)) { + if (erts_atomic32_xchg_nob(&balance_info.checking_balance, 1)) { c_rq->check_balance_reds = INT_MAX; return; } @@ -5016,15 +4643,15 @@ check_balance(ErtsRunQueue *c_rq) get_no_runqs(NULL, &blnc_no_rqs); if (blnc_no_rqs == 1) { c_rq->check_balance_reds = INT_MAX; - erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0); + erts_atomic32_set_nob(&balance_info.checking_balance, 0); return; } - erts_smp_runq_unlock(c_rq); + erts_runq_unlock(c_rq); if (balance_info.halftime) { balance_info.halftime = 0; - erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0); + erts_atomic32_set_nob(&balance_info.checking_balance, 0); ERTS_FOREACH_RUNQ(rq, { if (rq->waiting) @@ -5034,7 +4661,7 @@ check_balance(ErtsRunQueue *c_rq) rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS; }); - erts_smp_runq_lock(c_rq); + erts_runq_lock(c_rq); return; } @@ -5047,7 +4674,7 @@ check_balance(ErtsRunQueue *c_rq) * is manipulated. Such updates of the migration information * might clash with balancing. */ - erts_smp_mtx_lock(&balance_info.update_mtx); + erts_mtx_lock(&balance_info.update_mtx); forced = balance_info.forced_check_balance; balance_info.forced_check_balance = 0; @@ -5055,10 +4682,10 @@ check_balance(ErtsRunQueue *c_rq) get_no_runqs(¤t_active, &blnc_no_rqs); if (blnc_no_rqs == 1) { - erts_smp_mtx_unlock(&balance_info.update_mtx); - erts_smp_runq_lock(c_rq); + erts_mtx_unlock(&balance_info.update_mtx); + erts_runq_lock(c_rq); c_rq->check_balance_reds = INT_MAX; - erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0); + erts_atomic32_set_nob(&balance_info.checking_balance, 0); return; } @@ -5074,7 +4701,7 @@ check_balance(ErtsRunQueue *c_rq) /* Read balance information for all run queues */ for (qix = 0; qix < blnc_no_rqs; qix++) { ErtsRunQueue *rq = ERTS_RUNQ_IX(qix); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); run_queue_info[qix].flags = ERTS_RUNQ_FLGS_GET_NOB(rq); for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) { @@ -5102,7 +4729,7 @@ check_balance(ErtsRunQueue *c_rq) run_queue_info[qix].sched_util = erts_get_sched_util(rq, 1, 0); #endif - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); } full_scheds = 0; @@ -5541,7 +5168,7 @@ erts_fprintf(stderr, "--------------------------------\n"); Uint32 flags = run_queue_info[qix].flags; ErtsRunQueue *rq = ERTS_RUNQ_IX(qix); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); ASSERT(!(flags & ERTS_RUNQ_FLG_OUT_OF_WORK)); if (rq->waiting) flags |= ERTS_RUNQ_FLG_OUT_OF_WORK; @@ -5556,27 +5183,27 @@ erts_fprintf(stderr, "--------------------------------\n"); rq->out_of_work_count = 0; (void) ERTS_RUNQ_FLGS_READ_BSET(rq, ERTS_RUNQ_FLGS_MIGRATION_INFO, flags); - rq->max_len = erts_smp_atomic32_read_dirty(&rq->len); + rq->max_len = erts_atomic32_read_dirty(&rq->len); for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) { ErtsRunQueueInfo *rqi; rqi = (pix == ERTS_PORT_PRIO_LEVEL ? &rq->ports.info : &rq->procs.prio_info[pix]); - erts_smp_reset_max_len(rq, rqi); + erts_reset_max_len(rq, rqi); rqi->reds = 0; } rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS; - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); } - erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0); + erts_atomic32_set_nob(&balance_info.checking_balance, 0); balance_info.n++; retire_mpaths(old_mpaths); - erts_smp_mtx_unlock(&balance_info.update_mtx); + erts_mtx_unlock(&balance_info.update_mtx); - erts_smp_runq_lock(c_rq); + erts_runq_lock(c_rq); } static void @@ -5584,7 +5211,7 @@ change_no_used_runqs(int used) { ErtsMigrationPaths *new_mpaths, *old_mpaths; int qix; - erts_smp_mtx_lock(&balance_info.update_mtx); + erts_mtx_lock(&balance_info.update_mtx); set_no_used_runqs(used); old_mpaths = erts_get_migration_paths_managed(); @@ -5631,28 +5258,23 @@ change_no_used_runqs(int used) /* Make sure that we balance soon... */ balance_info.forced_check_balance = 1; - erts_smp_mtx_unlock(&balance_info.update_mtx); + erts_mtx_unlock(&balance_info.update_mtx); - erts_smp_runq_lock(ERTS_RUNQ_IX(0)); + erts_runq_lock(ERTS_RUNQ_IX(0)); ERTS_RUNQ_IX(0)->check_balance_reds = 0; - erts_smp_runq_unlock(ERTS_RUNQ_IX(0)); + erts_runq_unlock(ERTS_RUNQ_IX(0)); } -#endif /* #ifdef ERTS_SMP */ Uint erts_debug_nbalance(void) { -#ifdef ERTS_SMP Uint n; - erts_smp_mtx_lock(&balance_info.update_mtx); + erts_mtx_lock(&balance_info.update_mtx); n = balance_info.n; - erts_smp_mtx_unlock(&balance_info.update_mtx); + erts_mtx_unlock(&balance_info.update_mtx); return n; -#else - return 0; -#endif } /* Wakeup other schedulers */ @@ -5698,7 +5320,6 @@ typedef enum { #define ERTS_WAKEUP_OTHER_DEC_LEGACY 10 #define ERTS_WAKEUP_OTHER_FIXED_INC_LEGACY (CONTEXT_REDS/10) -#ifdef ERTS_SMP static struct { ErtsSchedWakeupOtherThreshold threshold; @@ -5714,7 +5335,7 @@ wakeup_other_check(ErtsRunQueue *rq, Uint32 flags) { int wo_reds = rq->wakeup_other_reds; if (wo_reds) { - int left_len = erts_smp_atomic32_read_dirty(&rq->len) - 1; + int left_len = erts_atomic32_read_dirty(&rq->len) - 1; if (left_len < 1) { int wo_reduce = wo_reds << wakeup_other.dec_shift; wo_reduce &= wakeup_other.dec_mask; @@ -5726,16 +5347,14 @@ wakeup_other_check(ErtsRunQueue *rq, Uint32 flags) rq->wakeup_other += (left_len*wo_reds + ERTS_WAKEUP_OTHER_FIXED_INC); if (rq->wakeup_other > wakeup_other.limit) { -#ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) { if (rq->waiting) { wake_dirty_scheduler(rq); } } else -#endif { int empty_rqs = - erts_smp_atomic32_read_acqb(&no_empty_run_queues); + erts_atomic32_read_acqb(&no_empty_run_queues); if (flags & ERTS_RUNQ_FLG_PROTECTED) (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED); if (empty_rqs != 0) @@ -5787,7 +5406,7 @@ wakeup_other_check_legacy(ErtsRunQueue *rq, Uint32 flags) { int wo_reds = rq->wakeup_other_reds; if (wo_reds) { - erts_aint32_t len = erts_smp_atomic32_read_dirty(&rq->len); + erts_aint32_t len = erts_atomic32_read_dirty(&rq->len); if (len < 2) { rq->wakeup_other -= ERTS_WAKEUP_OTHER_DEC_LEGACY*wo_reds; if (rq->wakeup_other < 0) @@ -5798,7 +5417,7 @@ wakeup_other_check_legacy(ErtsRunQueue *rq, Uint32 flags) else { if (flags & ERTS_RUNQ_FLG_PROTECTED) (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED); - if (erts_smp_atomic32_read_acqb(&no_empty_run_queues) != 0) { + if (erts_atomic32_read_acqb(&no_empty_run_queues) != 0) { wake_scheduler_on_empty_runq(rq); rq->wakeup_other = 0; } @@ -5849,7 +5468,7 @@ static int no_runqs_to_supervise(void) { int used; - erts_aint32_t nerq = erts_smp_atomic32_read_acqb(&no_empty_run_queues); + erts_aint32_t nerq = erts_atomic32_read_acqb(&no_empty_run_queues); if (nerq <= 0) return 0; get_no_runqs(NULL, &used); @@ -5882,26 +5501,23 @@ runq_supervisor(void *unused) for (ix = 0; ix < no_rqs; ix++) { ErtsRunQueue *rq = ERTS_RUNQ_IX(ix); if (ERTS_RUNQ_FLGS_GET(rq) & ERTS_RUNQ_FLG_NONEMPTY) { - erts_smp_runq_lock(rq); - if (erts_smp_atomic32_read_dirty(&rq->len) != 0) + erts_runq_lock(rq); + if (erts_atomic32_read_dirty(&rq->len) != 0) wake_scheduler_on_empty_runq(rq); /* forced wakeup... */ - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); } } } return NULL; } -#endif void erts_early_init_scheduling(int no_schedulers) { aux_work_timeout_early_init(no_schedulers); -#ifdef ERTS_SMP wakeup_other.threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM; wakeup_other.type = ERTS_SCHED_WAKEUP_OTHER_TYPE_DEFAULT; -#endif sched_busy_wait.sys_schedule = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM; sched_busy_wait.tse = (ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM * ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT); @@ -5912,7 +5528,6 @@ erts_early_init_scheduling(int no_schedulers) int erts_sched_set_wakeup_other_thresold(char *str) { -#ifdef ERTS_SMP ErtsSchedWakeupOtherThreshold threshold; if (sys_strcmp(str, "very_high") == 0) threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_HIGH; @@ -5929,20 +5544,11 @@ erts_sched_set_wakeup_other_thresold(char *str) wakeup_other.threshold = threshold; set_wakeup_other_data(); return 0; -#else - if (sys_strcmp(str, "very_high") == 0 || sys_strcmp(str, "high") == 0 || - sys_strcmp(str, "medium") == 0 || sys_strcmp(str, "low") == 0 || - sys_strcmp(str, "very_low") == 0) { - return 0; - } - return EINVAL; -#endif } int erts_sched_set_wakeup_other_type(char *str) { -#ifdef ERTS_SMP ErtsSchedWakeupOtherType type; if (sys_strcmp(str, "default") == 0) type = ERTS_SCHED_WAKEUP_OTHER_TYPE_DEFAULT; @@ -5952,12 +5558,6 @@ erts_sched_set_wakeup_other_type(char *str) return EINVAL; wakeup_other.type = type; return 0; -#else - if (sys_strcmp(str, "default") == 0 || sys_strcmp(str, "legacy") == 0) { - return 0; - } - return EINVAL; -#endif } int @@ -6028,7 +5628,6 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp) case ERTS_SCHED_NORMAL: id = (int) esdp->no; break; -#ifdef ERTS_DIRTY_SCHEDULERS case ERTS_SCHED_DIRTY_CPU: id = (int) erts_no_schedulers; id += (int) esdp->dirty_no; @@ -6038,7 +5637,6 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp) id += (int) erts_no_dirty_cpu_schedulers; id += (int) esdp->dirty_no; break; -#endif default: ERTS_INTERNAL_ERROR("Invalid scheduler type"); break; @@ -6048,7 +5646,6 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp) awdp->sched_id = id; awdp->esdp = esdp; awdp->ssi = esdp ? esdp->ssi : NULL; -#ifdef ERTS_SMP awdp->latest_wakeup = ERTS_THR_PRGR_VAL_FIRST; awdp->misc.thr_prgr = ERTS_THR_PRGR_VAL_WAITING; awdp->dd.thr_prgr = ERTS_THR_PRGR_VAL_WAITING; @@ -6057,15 +5654,9 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp) awdp->later_op.size = 0; awdp->later_op.first = NULL; awdp->later_op.last = NULL; -#endif -#ifdef ERTS_USE_ASYNC_READY_Q -#ifdef ERTS_SMP awdp->async_ready.need_thr_prgr = 0; awdp->async_ready.thr_prgr = ERTS_THR_PRGR_VAL_WAITING; -#endif awdp->async_ready.queue = NULL; -#endif -#ifdef ERTS_SMP awdp->delayed_wakeup.next = ERTS_DELAYED_WAKEUP_INFINITY; if (!dawwp) { awdp->delayed_wakeup.job = NULL; @@ -6081,7 +5672,6 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp) for (i = 0; i <= erts_no_schedulers; i++) awdp->delayed_wakeup.sched2jix[i] = -1; } -#endif awdp->debug.wait_completed.flags = 0; awdp->debug.wait_completed.callback = NULL; awdp->debug.wait_completed.arg = NULL; @@ -6096,11 +5686,9 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num, Uint64 time_stamp) { esdp->timer_wheel = NULL; -#ifdef ERTS_SMP erts_bits_init_state(&esdp->erl_bits_state); esdp->match_pseudo_process = NULL; esdp->free_process = NULL; -#endif esdp->x_reg_array = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_BEAM_REGISTER, ERTS_X_REGS_ALLOCATED * @@ -6108,7 +5696,6 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num, esdp->f_reg_array = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_BEAM_REGISTER, MAX_REG * sizeof(FloatDef)); -#ifdef ERTS_DIRTY_SCHEDULERS esdp->run_queue = runq; if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix)) { esdp->no = 0; @@ -6136,19 +5723,14 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num, esdp->dirty_shadow_process = shadow_proc; if (shadow_proc) { erts_init_empty_process(shadow_proc); - erts_smp_atomic32_init_nob(&shadow_proc->state, + erts_atomic32_init_nob(&shadow_proc->state, (ERTS_PSFLG_ACTIVE | ERTS_PSFLG_DIRTY_RUNNING | ERTS_PSFLG_PROXY)); shadow_proc->static_flags = ERTS_STC_FLG_SHADOW_PROC; } -#else - runq->scheduler = esdp; - esdp->run_queue = runq; - esdp->no = (Uint) num; - esdp->type = ERTS_SCHED_NORMAL; -#endif + ssi->esdp = esdp; esdp->ssi = ssi; esdp->current_process = NULL; esdp->current_port = NULL; @@ -6169,9 +5751,7 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num, if (daww_ptr) { init_aux_work_data(&esdp->aux_work_data, esdp, *daww_ptr); -#ifdef ERTS_SMP *daww_ptr += daww_sz; -#endif } esdp->reductions = 0; @@ -6181,27 +5761,21 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num, } void -erts_init_scheduling(int no_schedulers, int no_schedulers_online -#ifdef ERTS_DIRTY_SCHEDULERS - , int no_dirty_cpu_schedulers, int no_dirty_cpu_schedulers_online, +erts_init_scheduling(int no_schedulers, int no_schedulers_online, int no_poll_threads, + int no_dirty_cpu_schedulers, int no_dirty_cpu_schedulers_online, int no_dirty_io_schedulers -#endif ) { int ix, n, no_ssi, tot_rqs; char *daww_ptr; size_t daww_sz; size_t size_runqs; -#ifdef ERTS_SMP erts_aint32_t set_schdlr_sspnd_change_flags; -#endif init_misc_op_list_alloc(); init_proc_sys_task_queues_alloc(); -#ifdef ERTS_SMP set_wakeup_other_data(); -#endif #if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT if (erts_sched_balance_util) @@ -6211,12 +5785,11 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online ASSERT(no_schedulers_online <= no_schedulers); ASSERT(no_schedulers_online >= 1); ASSERT(no_schedulers >= 1); -#ifdef ERTS_DIRTY_SCHEDULERS ASSERT(no_dirty_cpu_schedulers <= no_schedulers); ASSERT(no_dirty_cpu_schedulers >= 1); ASSERT(no_dirty_cpu_schedulers_online <= no_schedulers_online); ASSERT(no_dirty_cpu_schedulers_online >= 1); -#endif + ASSERT(erts_no_poll_threads == no_poll_threads); /* Create and initialize run queues */ @@ -6225,9 +5798,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online size_runqs = sizeof(ErtsAlignedRunQueue) * tot_rqs; erts_aligned_run_queues = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_RUNQS, size_runqs); -#ifdef ERTS_SMP - erts_smp_atomic32_init_nob(&no_empty_run_queues, 0); -#endif + erts_atomic32_init_nob(&no_empty_run_queues, 0); erts_no_run_queues = n; @@ -6241,20 +5812,16 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online * id if the esdp->no <-> ix+1 mapping change. */ - erts_smp_mtx_init(&rq->mtx, "run_queue", make_small(ix + 1), + erts_mtx_init(&rq->mtx, "run_queue", make_small(ix + 1), ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER); - erts_smp_cnd_init(&rq->cnd); + erts_cnd_init(&rq->cnd); -#ifdef ERTS_DIRTY_SCHEDULERS -#ifdef ERTS_SMP if (ERTS_RUNQ_IX_IS_DIRTY(ix)) { - erts_smp_spinlock_init(&rq->sleepers.lock, "dirty_run_queue_sleep_list", + erts_spinlock_init(&rq->sleepers.lock, "dirty_run_queue_sleep_list", make_small(ix + 1), ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER); } rq->sleepers.list = NULL; -#endif -#endif rq->waiting = 0; rq->woken = 0; @@ -6267,7 +5834,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online } rq->out_of_work_count = 0; rq->max_len = 0; - erts_smp_atomic32_set_nob(&rq->len, 0); + erts_atomic32_set_nob(&rq->len, 0); rq->wakeup_other = 0; rq->wakeup_other_reds = 0; @@ -6276,7 +5843,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online rq->procs.reductions = 0; for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) { - erts_smp_atomic32_init_nob(&rq->procs.prio_info[pix].len, 0); + erts_atomic32_init_nob(&rq->procs.prio_info[pix].len, 0); rq->procs.prio_info[pix].max_len = 0; rq->procs.prio_info[pix].reds = 0; if (pix < ERTS_NO_PROC_PRIO_LEVELS - 1) { @@ -6288,7 +5855,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online rq->misc.start = NULL; rq->misc.end = NULL; - erts_smp_atomic32_init_nob(&rq->ports.info.len, 0); + erts_atomic32_init_nob(&rq->ports.info.len, 0); rq->ports.info.max_len = 0; rq->ports.info.reds = 0; rq->ports.start = NULL; @@ -6300,7 +5867,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online } -#ifdef ERTS_SMP if (erts_no_run_queues != 1) { run_queue_info = erts_alloc(ERTS_ALC_T_RUNQ_BLNS, @@ -6311,52 +5877,42 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online * erts_no_run_queues)); } -#endif n = (int) no_schedulers; erts_no_schedulers = n; erts_no_total_schedulers = n; -#ifdef ERTS_DIRTY_SCHEDULERS erts_no_dirty_cpu_schedulers = no_dirty_cpu_schedulers; erts_no_total_schedulers += no_dirty_cpu_schedulers; erts_no_dirty_io_schedulers = no_dirty_io_schedulers; erts_no_total_schedulers += no_dirty_io_schedulers; -#endif /* Create and initialize scheduler sleep info */ -#ifdef ERTS_SMP - no_ssi = n+1; -#else - no_ssi = 1; -#endif + no_ssi = n + 1 /* aux thread */; aligned_sched_sleep_info = erts_alloc_permanent_cache_aligned( ERTS_ALC_T_SCHDLR_SLP_INFO, no_ssi*sizeof(ErtsAlignedSchedulerSleepInfo)); for (ix = 0; ix < no_ssi; ix++) { ErtsSchedulerSleepInfo *ssi = &aligned_sched_sleep_info[ix].ssi; -#ifdef ERTS_SMP #if 0 /* no need to initialize these... */ ssi->next = NULL; ssi->prev = NULL; #endif - erts_smp_atomic32_init_nob(&ssi->flags, 0); + ssi->esdp = NULL; + erts_atomic32_init_nob(&ssi->flags, 0); ssi->event = NULL; /* initialized in sched_thread_func */ -#endif erts_atomic32_init_nob(&ssi->aux_work, 0); } -#ifdef ERTS_SMP - aligned_sched_sleep_info++; + aligned_sched_sleep_info += 1 /* aux thread */; -#ifdef ERTS_DIRTY_SCHEDULERS aligned_dirty_cpu_sched_sleep_info = erts_alloc_permanent_cache_aligned( ERTS_ALC_T_SCHDLR_SLP_INFO, no_dirty_cpu_schedulers*sizeof(ErtsAlignedSchedulerSleepInfo)); for (ix = 0; ix < no_dirty_cpu_schedulers; ix++) { ErtsSchedulerSleepInfo *ssi = &aligned_dirty_cpu_sched_sleep_info[ix].ssi; - erts_smp_atomic32_init_nob(&ssi->flags, 0); + erts_atomic32_init_nob(&ssi->flags, 0); ssi->event = NULL; /* initialized in sched_dirty_cpu_thread_func */ erts_atomic32_init_nob(&ssi->aux_work, 0); } @@ -6366,24 +5922,29 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online no_dirty_io_schedulers*sizeof(ErtsAlignedSchedulerSleepInfo)); for (ix = 0; ix < no_dirty_io_schedulers; ix++) { ErtsSchedulerSleepInfo *ssi = &aligned_dirty_io_sched_sleep_info[ix].ssi; - erts_smp_atomic32_init_nob(&ssi->flags, 0); + erts_atomic32_init_nob(&ssi->flags, 0); ssi->event = NULL; /* initialized in sched_dirty_io_thread_func */ erts_atomic32_init_nob(&ssi->aux_work, 0); } -#endif -#endif + + aligned_poll_thread_sleep_info = + erts_alloc_permanent_cache_aligned( + ERTS_ALC_T_SCHDLR_SLP_INFO, + no_poll_threads*sizeof(ErtsAlignedSchedulerSleepInfo)); + for (ix = 0; ix < no_poll_threads; ix++) { + ErtsSchedulerSleepInfo *ssi = &aligned_poll_thread_sleep_info[ix].ssi; + ssi->esdp = NULL; + erts_atomic32_init_nob(&ssi->flags, 0); + ssi->event = NULL; /* initialized in poll_thread */ + erts_atomic32_init_nob(&ssi->aux_work, 0); + } /* Create and initialize scheduler specific data */ -#ifdef ERTS_SMP daww_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE((sizeof(ErtsDelayedAuxWorkWakeupJob) + sizeof(int))*(n+1)); daww_ptr = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA, daww_sz*n); -#else - daww_sz = 0; - daww_ptr = NULL; -#endif erts_aligned_scheduler_data = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA, @@ -6396,7 +5957,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online NULL, 0); } -#ifdef ERTS_DIRTY_SCHEDULERS { Uint64 ts = sched_wall_time_ts(); int dirty_scheds = no_dirty_cpu_schedulers + no_dirty_io_schedulers; @@ -6427,7 +5987,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online &adsp[adspix++].dsp, ts); } } -#endif init_misc_aux_work(); init_swtreq_alloc(); @@ -6436,20 +5995,22 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online erts_atomic32_init_nob(&debug_wait_completed_count, 0); /* debug only */ debug_wait_completed_flags = 0; -#ifdef ERTS_SMP - aux_thread_aux_work_data = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA, sizeof(ErtsAuxWorkData)); + poll_thread_aux_work_data = + erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA, + no_poll_threads * sizeof(ErtsAuxWorkData)); + init_no_runqs(no_schedulers_online, no_schedulers_online); balance_info.last_active_runqs = no_schedulers; - erts_smp_mtx_init(&balance_info.update_mtx, "migration_info_update", NIL, + erts_mtx_init(&balance_info.update_mtx, "migration_info_update", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER); balance_info.forced_check_balance = 0; balance_info.halftime = 1; balance_info.full_reds_history_index = 0; - erts_smp_atomic32_init_nob(&balance_info.checking_balance, 0); + erts_atomic32_init_nob(&balance_info.checking_balance, 0); balance_info.prev_rise.active_runqs = 0; balance_info.prev_rise.max_len = 0; balance_info.prev_rise.reds = 0; @@ -6480,7 +6041,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online suspend_run_queue(ERTS_RUNQ_IX(ix)); } -#ifdef ERTS_DIRTY_SCHEDULERS schdlr_sspnd_set_nscheds(&schdlr_sspnd.online, ERTS_SCHED_DIRTY_CPU, @@ -6497,7 +6057,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online set_schdlr_sspnd_change_flags |= ERTS_SCHDLR_SSPND_CHNG_DCPU_ONLN; for (ix = no_dirty_cpu_schedulers_online; ix < no_dirty_cpu_schedulers; ix++) { ErtsSchedulerData* esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix); - erts_smp_atomic32_read_bor_nob(&esdp->ssi->flags, ERTS_SSI_FLG_SUSPENDED); + erts_atomic32_read_bor_nob(&esdp->ssi->flags, ERTS_SSI_FLG_SUSPENDED); } } @@ -6511,54 +6071,26 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online ERTS_SCHED_DIRTY_IO, no_dirty_io_schedulers); - erts_smp_atomic32_init_nob(&dirty_count.cpu.active, + erts_atomic32_init_nob(&dirty_count.cpu.active, (erts_aint32_t) no_dirty_cpu_schedulers); - erts_smp_atomic32_init_nob(&dirty_count.io.active, + erts_atomic32_init_nob(&dirty_count.io.active, (erts_aint32_t) no_dirty_io_schedulers); -#endif if (set_schdlr_sspnd_change_flags) - erts_smp_atomic32_set_nob(&schdlr_sspnd.changing, + erts_atomic32_set_nob(&schdlr_sspnd.changing, set_schdlr_sspnd_change_flags); - erts_smp_atomic32_init_nob(&doing_sys_schedule, 0); - init_misc_aux_work(); -#else /* !ERTS_SMP */ - { - ErtsSchedulerData *esdp; - esdp = ERTS_SCHEDULER_IX(0); - erts_scheduler_data = esdp; -#ifdef USE_THREADS - erts_tsd_set(sched_data_key, (void *) esdp); -#endif - } - erts_no_dirty_cpu_schedulers = 0; - erts_no_dirty_io_schedulers = 0; -#endif - - erts_smp_atomic32_init_nob(&function_calls, 0); /* init port tasks */ erts_port_task_init(); -#ifndef ERTS_SMP -#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC - erts_scheduler_data->verify_unused_temp_alloc - = erts_alloc_get_verify_unused_temp_alloc( - &erts_scheduler_data->verify_unused_temp_alloc_data); - ERTS_VERIFY_UNUSED_TEMP_ALLOC(NULL); -#endif -#endif - erts_smp_atomic32_init_relb(&erts_halt_progress, -1); + erts_atomic32_init_relb(&erts_halt_progress, -1); erts_halt_code = 0; -#if !defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) - erts_lc_set_thread_name("scheduler 1"); -#endif } @@ -6571,7 +6103,6 @@ erts_schedid2runq(Uint id) return ERTS_RUNQ_IX(ix); } -#ifdef USE_THREADS ErtsSchedulerData * erts_get_scheduler_data(void) @@ -6579,16 +6110,13 @@ erts_get_scheduler_data(void) return (ErtsSchedulerData *) erts_tsd_get(sched_data_key); } -#endif static Process * make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio) { erts_aint32_t state; Process *proxy; -#ifdef ERTS_SMP ErtsRunQueue *rq = RUNQ_READ_RQ(&proc->run_queue); -#endif state = (ERTS_PSFLG_PROXY | ERTS_PSFLG_IN_RUNQ @@ -6599,11 +6127,9 @@ make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio) if (prev_proxy) { proxy = prev_proxy; - ASSERT(erts_smp_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY); - erts_smp_atomic32_set_nob(&proxy->state, state); -#ifdef ERTS_SMP + ASSERT(erts_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY); + erts_atomic32_set_nob(&proxy->state, state); RUNQ_SET_RQ(&proc->run_queue, rq); -#endif } else { proxy = erts_alloc(ERTS_ALC_T_PROC, sizeof(Process)); @@ -6615,11 +6141,9 @@ make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio) ui32[i] = (Uint32) 0xdeadbeef; } #endif - erts_smp_atomic32_init_nob(&proxy->state, state); -#ifdef ERTS_SMP - erts_smp_atomic_init_nob(&proxy->run_queue, - erts_smp_atomic_read_nob(&proc->run_queue)); -#endif + erts_atomic32_init_nob(&proxy->state, state); + erts_atomic_init_nob(&proxy->run_queue, + erts_atomic_read_nob(&proc->run_queue)); } proxy->common.id = proc->common.id; @@ -6632,7 +6156,6 @@ make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio) #define ERTS_ENQUEUE_DIRTY_CPU_QUEUE 2 #define ERTS_ENQUEUE_DIRTY_IO_QUEUE 3 -#ifdef ERTS_DIRTY_SCHEDULERS static int check_dirty_enqueue_in_prio_queue(Process *c_p, @@ -6663,7 +6186,7 @@ check_dirty_enqueue_in_prio_queue(Process *c_p, if ((*newp) & ERTS_PSFLG_ACTIVE_SYS) return ERTS_ENQUEUE_NORMAL_QUEUE; - dact = erts_smp_atomic32_read_mb(&c_p->dirty_state); + dact = erts_atomic32_read_mb(&c_p->dirty_state); if (actual & (ERTS_PSFLG_DIRTY_ACTIVE_SYS | ERTS_PSFLG_DIRTY_CPU_PROC)) { max_qbit = ((dact >> ERTS_PDSFLGS_IN_CPU_PRQ_MASK_OFFSET) @@ -6708,7 +6231,7 @@ fin_dirty_enq_s_change(Process *p, erts_aint32_t qbit = 1 << enq_prio; qbit <<= qmask_offset; - if (qbit & erts_smp_atomic32_read_bor_mb(&p->dirty_state, qbit)) { + if (qbit & erts_atomic32_read_bor_mb(&p->dirty_state, qbit)) { /* Already enqueue by someone else... */ if (pstruct_reserved) { /* We reserved process struct for enqueue; clear it... */ @@ -6717,7 +6240,7 @@ fin_dirty_enq_s_change(Process *p, #else (void) #endif - erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_IN_RUNQ); + erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_IN_RUNQ); ASSERT(old & ERTS_PSFLG_IN_RUNQ); } return 0; @@ -6726,7 +6249,6 @@ fin_dirty_enq_s_change(Process *p, return !0; } -#endif /* ERTS_DIRTY_SCHEDULERS */ static ERTS_INLINE int check_enqueue_in_prio_queue(Process *c_p, @@ -6741,14 +6263,12 @@ check_enqueue_in_prio_queue(Process *c_p, *prq_prio_p = aprio; -#ifdef ERTS_DIRTY_SCHEDULERS if (actual & ERTS_PSFLGS_DIRTY_WORK) { int res = check_dirty_enqueue_in_prio_queue(c_p, newp, actual, aprio, qbit); if (res != ERTS_ENQUEUE_NORMAL_QUEUE) return res; } -#endif max_qbit = (actual >> ERTS_PSFLGS_IN_PRQ_MASK_OFFSET) & ERTS_PSFLGS_QMASK; max_qbit |= 1 << ERTS_PSFLGS_QMASK_BITS; @@ -6791,7 +6311,6 @@ select_enqueue_run_queue(int enqueue, int enq_prio, Process *p, erts_aint32_t st return NULL; -#ifdef ERTS_DIRTY_SCHEDULERS case ERTS_ENQUEUE_DIRTY_CPU_QUEUE: case -ERTS_ENQUEUE_DIRTY_CPU_QUEUE: @@ -6812,7 +6331,6 @@ select_enqueue_run_queue(int enqueue, int enq_prio, Process *p, erts_aint32_t st return NULL; -#endif default: { ErtsRunQueue* runq; @@ -6822,7 +6340,6 @@ select_enqueue_run_queue(int enqueue, int enq_prio, Process *p, erts_aint32_t st runq = erts_get_runq_proc(p); -#ifdef ERTS_SMP if (!(ERTS_PSFLG_BOUND & state)) { ErtsRunQueue *new_runq = erts_check_emigration_need(runq, enq_prio); if (new_runq) { @@ -6830,7 +6347,6 @@ select_enqueue_run_queue(int enqueue, int enq_prio, Process *p, erts_aint32_t st runq = new_runq; } } -#endif ASSERT(runq); @@ -6858,7 +6374,6 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, running_flgs = ERTS_PSFLG_DIRTY_RUNNING|ERTS_PSFLG_DIRTY_RUNNING_SYS; else { running_flgs = ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS; -#ifdef ERTS_DIRTY_SCHEDULERS if (state & ERTS_PSFLG_DIRTY_ACTIVE_SYS && (p->flags & (F_DELAY_GC|F_DISABLE_GC))) { /* @@ -6872,11 +6387,10 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, */ ASSERT(!(p->flags & (F_DIRTY_CLA | F_DIRTY_GC_HIBERNATE))); - state = erts_smp_atomic32_read_band_nob(&p->state, + state = erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_DIRTY_ACTIVE_SYS); state &= ~ERTS_PSFLG_DIRTY_ACTIVE_SYS; } -#endif } a = state; @@ -6893,7 +6407,7 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, || (a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE) { enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a); } - a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); + a = erts_atomic32_cmpxchg_mb(&p->state, n, e); if (a == e) break; } @@ -6905,7 +6419,7 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, if (erts_system_profile_flags.runnable_procs) { /* Status lock prevents out of order "runnable proc" trace msgs */ - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); if (!(a & (ERTS_PSFLG_ACTIVE_SYS|ERTS_PSFLG_DIRTY_ACTIVE_SYS)) && (!(a & ERTS_PSFLG_ACTIVE) || (a & ERTS_PSFLG_SUSPENDED))) { @@ -6917,15 +6431,10 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, if (proxy) free_proxy_proc(proxy); - erts_smp_runq_lock(c_rq); + erts_runq_lock(c_rq); -#if !defined(ERTS_SMP) - /* Decrement refc if process struct is free... */ - return !!(n & ERTS_PSFLG_FREE); -#else /* Decrement refc if scheduled out from dirty scheduler... */ return !is_normal_sched; -#endif } else { Process* sched_p; @@ -6944,7 +6453,7 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, ASSERT(runq); - erts_smp_runq_lock(runq); + erts_runq_lock(runq); if (is_normal_sched && sched_p == p && ERTS_RUNQ_IX_IS_DIRTY(runq->ix)) erts_proc_inc_refc(p); /* Needs to be done before enqueue_process() */ @@ -6955,11 +6464,11 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, if (runq == c_rq) return 0; - erts_smp_runq_unlock(runq); + erts_runq_unlock(runq); smp_notify_inc_runq(runq); - erts_smp_runq_lock(c_rq); + erts_runq_lock(c_rq); /* * Decrement refc if process is scheduled out by a @@ -7007,12 +6516,12 @@ add2runq(int enqueue, erts_aint32_t prio, sched_p = make_proxy_proc(pxy, proc, prio); } - erts_smp_runq_lock(runq); + erts_runq_lock(runq); /* Enqueue the process */ enqueue_process(runq, (int) prio, sched_p); - erts_smp_runq_unlock(runq); + erts_runq_unlock(runq); smp_notify_inc_runq(runq); } } @@ -7037,7 +6546,7 @@ change_proc_schedule_state(Process *p, unsigned int lock_status = (prof_runnable_procs && !(locks & ERTS_PROC_LOCK_STATUS)); - ERTS_SMP_LC_ASSERT(locks == erts_proc_lc_my_proc_locks(p)); + ERTS_LC_ASSERT(locks == erts_proc_lc_my_proc_locks(p)); ASSERT(!(a & ERTS_PSFLG_PROXY)); ASSERT((clear_state_flags & (ERTS_PSFLG_RUNNING @@ -7052,7 +6561,7 @@ change_proc_schedule_state(Process *p, | ERTS_PSFLG_ACTIVE_SYS)) == 0); if (lock_status) - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); while (1) { erts_aint32_t e; @@ -7076,11 +6585,9 @@ change_proc_schedule_state(Process *p, | ERTS_PSFLG_DIRTY_RUNNING_SYS | ERTS_PSFLG_IN_RUNQ | ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE -#ifdef ERTS_DIRTY_SCHEDULERS || (n & (ERTS_PSFLG_RUNNING | ERTS_PSFLG_RUNNING_SYS | ERTS_PSFLG_EXITING)) == ERTS_PSFLG_EXITING -#endif ) { /* * Active and seemingly need to be enqueued, but @@ -7090,7 +6597,7 @@ change_proc_schedule_state(Process *p, enqueue = check_enqueue_in_prio_queue(p, enq_prio_p, &n, a); } - a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); + a = erts_atomic32_cmpxchg_mb(&p->state, n, e); if (a == e) break; if (enqueue == ERTS_ENQUEUE_NOT && n == a) @@ -7115,7 +6622,7 @@ change_proc_schedule_state(Process *p, } if (lock_status) - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); } @@ -7159,7 +6666,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st, res = 1; /* prepare for success */ st->next = st->prev = st; /* Prep for empty prio queue */ - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); prof_runnable_procs = erts_system_profile_flags.runnable_procs; locked = 0; free_stqs = NULL; @@ -7179,9 +6686,9 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st, if (!locked) { locked = 1; - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); if (state & fail_state) { *fail_state_p = (state & fail_state); free_stqs = stqs; @@ -7225,7 +6732,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st, n = e = a; n &= ~ERTS_PSFLGS_ACT_PRIO_MASK; n |= (prio << ERTS_PSFLGS_ACT_PRIO_OFFSET); - a = erts_smp_atomic32_cmpxchg_nob(&p->state, n, e); + a = erts_atomic32_cmpxchg_nob(&p->state, n, e); } while (a != e); state = n; } @@ -7235,10 +6742,10 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st, enq_prio = -1; /* Status lock prevents out of order "runnable proc" trace msgs */ - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); if (!prof_runnable_procs) { - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); locked = 0; } @@ -7258,7 +6765,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st, | ERTS_PSFLG_DIRTY_RUNNING | ERTS_PSFLG_DIRTY_RUNNING_SYS))) enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a); - a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); + a = erts_atomic32_cmpxchg_mb(&p->state, n, e); if (a == e) break; if (a == n && enqueue == ERTS_ENQUEUE_NOT) @@ -7277,7 +6784,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st, profile_runnable_proc(p, am_active); } - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); locked = 0; } @@ -7286,12 +6793,12 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st, cleanup: if (locked) - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); if (free_stqs) proc_sys_task_queues_free(free_stqs); - ERTS_SMP_LC_ASSERT(!(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p))); + ERTS_LC_ASSERT(!(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p))); return res; } @@ -7301,15 +6808,15 @@ suspend_process(Process *c_p, Process *p) { erts_aint32_t state; int suspended = 0; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); - state = erts_smp_atomic32_read_acqb(&p->state); + state = erts_atomic32_read_acqb(&p->state); if ((state & ERTS_PSFLG_SUSPENDED)) suspended = -1; else { if (c_p == p) { - state = erts_smp_atomic32_read_bor_relb(&p->state, + state = erts_atomic32_read_bor_relb(&p->state, ERTS_PSFLG_SUSPENDED); ASSERT(state & (ERTS_PSFLG_RUNNING | ERTS_PSFLG_RUNNING_SYS @@ -7325,7 +6832,7 @@ suspend_process(Process *c_p, Process *p) n = e = state; n |= ERTS_PSFLG_SUSPENDED; - state = erts_smp_atomic32_cmpxchg_relb(&p->state, n, e); + state = erts_atomic32_cmpxchg_relb(&p->state, n, e); if (state == e) { suspended = 1; break; @@ -7370,14 +6877,14 @@ resume_process(Process *p, ErtsProcLocks locks) erts_aint32_t state, enq_prio = -1; int enqueue; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); ASSERT(p->rcount > 0); if (--p->rcount > 0) /* multiple suspend */ return; - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); enqueue = change_proc_schedule_state(p, ERTS_PSFLG_SUSPENDED, 0, @@ -7387,7 +6894,6 @@ resume_process(Process *p, ErtsProcLocks locks) add2runq(enqueue, enq_prio, p, state, NULL); } -#ifdef ERTS_SMP static ERTS_INLINE void sched_resume_wake__(ErtsSchedulerSleepInfo *ssi) @@ -7398,7 +6904,7 @@ sched_resume_wake__(ErtsSchedulerSleepInfo *ssi) | ERTS_SSI_FLG_SUSPENDED); erts_aint32_t oflgs; do { - oflgs = erts_smp_atomic32_cmpxchg_relb(&ssi->flags, 0, xflgs); + oflgs = erts_atomic32_cmpxchg_relb(&ssi->flags, 0, xflgs); if (oflgs == xflgs) { erts_sched_finish_poke(ssi, oflgs); break; @@ -7413,7 +6919,6 @@ nrml_sched_ix_resume_wake(Uint ix) sched_resume_wake__(ERTS_SCHED_SLEEP_INFO_IX(ix)); } -#ifdef ERTS_DIRTY_SCHEDULERS static void dcpu_sched_ix_resume_wake(Uint ix) @@ -7427,7 +6932,6 @@ dio_sched_ix_resume_wake(Uint ix) sched_resume_wake__(ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix)); } -#endif static erts_aint32_t sched_prep_spin_suspended(ErtsSchedulerSleepInfo *ssi, erts_aint32_t xpct) @@ -7439,7 +6943,7 @@ sched_prep_spin_suspended(ErtsSchedulerSleepInfo *ssi, erts_aint32_t xpct) erts_aint32_t xflgs = xpct; do { - oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); + oflgs = erts_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); if (oflgs == xflgs) return nflgs; xflgs = oflgs; @@ -7456,7 +6960,7 @@ sched_spin_suspended(ErtsSchedulerSleepInfo *ssi, int spincount) erts_aint32_t flgs; do { - flgs = erts_smp_atomic32_read_acqb(&ssi->flags); + flgs = erts_atomic32_read_acqb(&ssi->flags); if ((flgs & (ERTS_SSI_FLG_SLEEPING | ERTS_SSI_FLG_WAITING | ERTS_SSI_FLG_SUSPENDED)) @@ -7475,21 +6979,23 @@ sched_spin_suspended(ErtsSchedulerSleepInfo *ssi, int spincount) } static erts_aint32_t -sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi) +sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi, + erts_aint32_t sleep_type) { erts_aint32_t oflgs; - erts_aint32_t nflgs = (ERTS_SSI_FLG_SLEEPING - | ERTS_SSI_FLG_TSE_SLEEPING - | ERTS_SSI_FLG_WAITING - | ERTS_SSI_FLG_SUSPENDED); + erts_aint32_t nflgs = ((ERTS_SSI_FLG_SLEEPING + | ERTS_SSI_FLG_WAITING + | ERTS_SSI_FLG_SUSPENDED) + | sleep_type); erts_aint32_t xflgs = (ERTS_SSI_FLG_SLEEPING | ERTS_SSI_FLG_WAITING | ERTS_SSI_FLG_SUSPENDED); + ASSERT(sleep_type == ERTS_SSI_FLG_TSE_SLEEPING); erts_tse_reset(ssi->event); while (1) { - oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); + oflgs = erts_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); if (oflgs == xflgs) return nflgs; if ((oflgs & (ERTS_SSI_FLG_SLEEPING @@ -7507,12 +7013,11 @@ sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi) static void init_scheduler_suspend(void) { - erts_smp_mtx_init(&schdlr_sspnd.mtx, "schdlr_sspnd", NIL, + erts_mtx_init(&schdlr_sspnd.mtx, "schdlr_sspnd", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER); schdlr_sspnd.online.normal = 1; schdlr_sspnd.curr_online.normal = 1; schdlr_sspnd.active.normal = 1; -#ifdef ERTS_DIRTY_SCHEDULERS schdlr_sspnd.online.dirty_cpu = 0; schdlr_sspnd.curr_online.dirty_cpu = 0; schdlr_sspnd.active.dirty_cpu = 0; @@ -7520,8 +7025,7 @@ init_scheduler_suspend(void) schdlr_sspnd.curr_online.dirty_io = 0; schdlr_sspnd.active.dirty_io = 0; schdlr_sspnd.last_msb_dirty_type = ERTS_SCHED_DIRTY_IO; -#endif - erts_smp_atomic32_init_nob(&schdlr_sspnd.changing, 0); + erts_atomic32_init_nob(&schdlr_sspnd.changing, 0); schdlr_sspnd.chngq = NULL; schdlr_sspnd.changer = am_false; schdlr_sspnd.nmsb.ongoing = 0; @@ -7552,7 +7056,7 @@ schdlr_sspnd_resume_proc(ErtsSchedType sched_type, Eterm pid) : 0)); if (p) { resume_process(p, ERTS_PROC_LOCK_STATUS); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); if (sched_type != ERTS_SCHED_NORMAL) erts_proc_dec_refc(p); } @@ -7581,7 +7085,6 @@ schdlr_sspnd_resume_procs(ErtsSchedType sched_type, } } -#ifdef ERTS_DIRTY_SCHEDULERS static ERTS_INLINE int have_dirty_work(void) @@ -7741,17 +7244,6 @@ msb_scheduler_type_switch(ErtsSchedType sched_type, if (exec_type != ERTS_SCHED_NORMAL) schdlr_sspnd.last_msb_dirty_type = exec_type; else { - erts_aint32_t calls; - /* - * Going back to normal scheduler after - * dirty execution; make sure it will check - * for I/O... - */ - if (ERTS_USE_MODIFIED_TIMING()) - calls = ERTS_MODIFIED_TIMING_INPUT_REDS + 1; - else - calls = INPUT_REDUCTIONS + 1; - erts_smp_atomic32_set_nob(&function_calls, calls); if ((nrml_prio == ERTS_MSB_NONE_PRIO_BIT) & ((dcpu_prio != ERTS_MSB_NONE_PRIO_BIT) @@ -7786,7 +7278,7 @@ msb_scheduler_type_switch(ErtsSchedType sched_type, #else (void) #endif - erts_smp_atomic32_read_bset_mb(&esdp->ssi->flags, + erts_atomic32_read_bset_mb(&esdp->ssi->flags, (ERTS_SSI_FLG_SUSPENDED | ERTS_SSI_FLG_MSB_EXEC), ERTS_SSI_FLG_SUSPENDED); @@ -7813,7 +7305,7 @@ msb_scheduler_type_switch(ErtsSchedType sched_type, #else (void) #endif - erts_smp_atomic32_read_bset_mb(&exec_rq->scheduler->ssi->flags, + erts_atomic32_read_bset_mb(&exec_rq->scheduler->ssi->flags, (ERTS_SSI_FLG_SUSPENDED | ERTS_SSI_FLG_MSB_EXEC), ERTS_SSI_FLG_MSB_EXEC); @@ -7825,7 +7317,34 @@ msb_scheduler_type_switch(ErtsSchedType sched_type, } -#endif +static ERTS_INLINE void +suspend_normal_scheduler_sleep(ErtsSchedulerData *esdp) +{ + ErtsSchedulerSleepInfo *ssi = esdp->ssi; + erts_aint32_t flgs = sched_spin_suspended(ssi, + ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT); + if (flgs == (ERTS_SSI_FLG_SLEEPING + | ERTS_SSI_FLG_WAITING + | ERTS_SSI_FLG_SUSPENDED)) { + flgs = sched_set_suspended_sleeptype(ssi, ERTS_SSI_FLG_TSE_SLEEPING); + if (flgs == (ERTS_SSI_FLG_SLEEPING + | ERTS_SSI_FLG_TSE_SLEEPING + | ERTS_SSI_FLG_WAITING + | ERTS_SSI_FLG_SUSPENDED)) { + int res; + + do { + res = erts_tse_wait(ssi->event); + } while (res == EINTR); + } + } +} + +static ERTS_INLINE void +suspend_dirty_scheduler_sleep(ErtsSchedulerData *esdp) +{ + suspend_normal_scheduler_sleep(esdp); +} static void suspend_scheduler(ErtsSchedulerData *esdp) @@ -7853,14 +7372,6 @@ suspend_scheduler(ErtsSchedulerData *esdp) */ -#if !defined(ERTS_DIRTY_SCHEDULERS) - - sched_type = ERTS_SCHED_NORMAL; - online_flag = ERTS_SCHDLR_SSPND_CHNG_ONLN; - no = esdp->no; - ASSERT(no != 1); - -#else sched_type = esdp->type; switch (sched_type) { @@ -7881,25 +7392,24 @@ suspend_scheduler(ErtsSchedulerData *esdp) return; } - if (erts_smp_atomic32_read_nob(&ssi->flags) & ERTS_SSI_FLG_MSB_EXEC) { + if (erts_atomic32_read_nob(&ssi->flags) & ERTS_SSI_FLG_MSB_EXEC) { ASSERT(no == 1); if (!msb_scheduler_type_switch(sched_type, esdp, no)) return; /* Suspend and let scheduler 1 of another type execute... */ } -#endif if (sched_type != ERTS_SCHED_NORMAL) { dirty_active(esdp, -1); - erts_smp_runq_unlock(esdp->run_queue); + erts_runq_unlock(esdp->run_queue); dirty_sched_wall_time_change(esdp, 0); } else { if (no != 1) evacuate_run_queue(esdp->run_queue, &sbp); - erts_smp_runq_unlock(esdp->run_queue); + erts_runq_unlock(esdp->run_queue); erts_sched_check_cpu_bind_prep_suspend(esdp); @@ -7907,14 +7417,14 @@ suspend_scheduler(ErtsSchedulerData *esdp) profile_scheduler(make_small(esdp->no), am_inactive); } - erts_smp_mtx_lock(&schdlr_sspnd.mtx); + erts_mtx_lock(&schdlr_sspnd.mtx); flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED); if (flgs & ERTS_SSI_FLG_SUSPENDED) { schdlr_sspnd_dec_nscheds(&schdlr_sspnd.active, sched_type); - changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); + changing = erts_atomic32_read_nob(&schdlr_sspnd.changing); while (1) { @@ -7946,7 +7456,7 @@ suspend_scheduler(ErtsSchedulerData *esdp) if (clr_flg) { ErtsProcList *plp, *end_plp; - changing = erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing, + changing = erts_atomic32_read_band_nob(&schdlr_sspnd.changing, ~clr_flg); changing &= ~clr_flg; (void) erts_proclist_fetch(&msb[i]->chngq, &end_plp); @@ -7992,7 +7502,7 @@ suspend_scheduler(ErtsSchedulerData *esdp) == schdlr_sspnd_get_nscheds(&schdlr_sspnd.curr_online, sched_type))) { ErtsProcList *plp; - changing = erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing, + changing = erts_atomic32_read_band_nob(&schdlr_sspnd.changing, ~online_flag); changing &= ~online_flag; if (sched_type == ERTS_SCHED_NORMAL) { @@ -8014,23 +7524,24 @@ suspend_scheduler(ErtsSchedulerData *esdp) } if (curr_online) { - flgs = erts_smp_atomic32_read_acqb(&ssi->flags); + flgs = erts_atomic32_read_acqb(&ssi->flags); if (!(flgs & ERTS_SSI_FLG_SUSPENDED)) break; } - erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + erts_mtx_unlock(&schdlr_sspnd.mtx); schdlr_sspnd_resume_procs(sched_type, &resume); while (1) { - ErtsMonotonicTime current_time; - erts_aint32_t flgs; - if (sched_type != ERTS_SCHED_NORMAL) - aux_work = 0; - else { + suspend_dirty_scheduler_sleep(esdp); + else + { + ErtsMonotonicTime current_time, timeout_time; int evacuate = no == 1 ? 0 : !ERTS_EMPTY_RUNQ(esdp->run_queue); + ASSERT(sched_type == ERTS_SCHED_NORMAL); + aux_work = erts_atomic32_read_acqb(&ssi->aux_work); if (aux_work|evacuate) { @@ -8046,123 +7557,62 @@ suspend_scheduler(ErtsSchedulerData *esdp) if (aux_work && erts_thr_progress_update(esdp)) erts_thr_progress_leader_update(esdp); if (evacuate) { - erts_smp_runq_lock(esdp->run_queue); + erts_runq_lock(esdp->run_queue); evacuate_run_queue(esdp->run_queue, &sbp); - erts_smp_runq_unlock(esdp->run_queue); + erts_runq_unlock(esdp->run_queue); } } - } - if (aux_work) { - ASSERT(sched_type == ERTS_SCHED_NORMAL); - current_time = erts_get_monotonic_time(esdp); - if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) { - if (!thr_prgr_active) { - erts_thr_progress_active(esdp, thr_prgr_active = 1); - sched_wall_time_change(esdp, 1); - } - erts_bump_timers(esdp->timer_wheel, current_time); - } - } - else { - ErtsMonotonicTime timeout_time; - int do_timeout; + if (aux_work) + timeout_time = erts_next_timeout_time(esdp->next_tmo_ref); + else + timeout_time = erts_check_next_timeout_time(esdp); - if (sched_type == ERTS_SCHED_NORMAL) { - timeout_time = erts_check_next_timeout_time(esdp); - current_time = erts_get_monotonic_time(esdp); - do_timeout = (current_time >= timeout_time); - } - else { - timeout_time = ERTS_MONOTONIC_TIME_MAX; - current_time = 0; - do_timeout = 0; - } + current_time = erts_get_monotonic_time(esdp); - if (do_timeout) { - ASSERT(sched_type == ERTS_SCHED_NORMAL); - if (!thr_prgr_active) { - erts_thr_progress_active(esdp, thr_prgr_active = 1); - sched_wall_time_change(esdp, 1); - } - } - else { - if (sched_type == ERTS_SCHED_NORMAL) { - if (thr_prgr_active) { - erts_thr_progress_active(esdp, thr_prgr_active = 0); - sched_wall_time_change(esdp, 0); - } - erts_thr_progress_prepare_wait(esdp); - } - flgs = sched_spin_suspended(ssi, - ERTS_SCHED_SUSPEND_SLEEP_SPINCOUNT); - if (flgs == (ERTS_SSI_FLG_SLEEPING - | ERTS_SSI_FLG_WAITING - | ERTS_SSI_FLG_SUSPENDED)) { - flgs = sched_set_suspended_sleeptype(ssi); - if (flgs == (ERTS_SSI_FLG_SLEEPING - | ERTS_SSI_FLG_TSE_SLEEPING - | ERTS_SSI_FLG_WAITING - | ERTS_SSI_FLG_SUSPENDED)) { - int res; - - if (sched_type == ERTS_SCHED_NORMAL) - current_time = erts_get_monotonic_time(esdp); - else - current_time = 0; - - do { - Sint64 timeout; - if (current_time >= timeout_time) - break; - if (sched_type != ERTS_SCHED_NORMAL) - timeout = -1; - else - timeout = ERTS_MONOTONIC_TO_NSEC(timeout_time - - current_time - - 1) + 1; - res = erts_tse_twait(ssi->event, timeout); - - if (sched_type == ERTS_SCHED_NORMAL) - current_time = erts_get_monotonic_time(esdp); - else - current_time = 0; - - } while (res == EINTR); - } - } - if (sched_type == ERTS_SCHED_NORMAL) - erts_thr_progress_finalize_wait(esdp); - } + if (!aux_work && current_time < timeout_time) { + /* go to sleep... */ + if (thr_prgr_active) { + erts_thr_progress_active(esdp, thr_prgr_active = 0); + sched_wall_time_change(esdp, 0); + } + erts_thr_progress_prepare_wait(NULL); + suspend_normal_scheduler_sleep(esdp); + erts_thr_progress_finalize_wait(NULL); + current_time = erts_get_monotonic_time(esdp); + } - if (current_time >= timeout_time) { - ASSERT(sched_type == ERTS_SCHED_NORMAL); - erts_bump_timers(esdp->timer_wheel, current_time); - } - } + if (current_time >= timeout_time) { + if (!thr_prgr_active) { + erts_thr_progress_active(esdp, thr_prgr_active = 1); + sched_wall_time_change(esdp, 1); + } + erts_bump_timers(esdp->timer_wheel, current_time); + } + } flgs = sched_prep_spin_suspended(ssi, (ERTS_SSI_FLG_WAITING | ERTS_SSI_FLG_SUSPENDED)); if (!(flgs & ERTS_SSI_FLG_SUSPENDED)) break; - changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); + changing = erts_atomic32_read_nob(&schdlr_sspnd.changing); if (changing) break; } - erts_smp_mtx_lock(&schdlr_sspnd.mtx); - changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); + erts_mtx_lock(&schdlr_sspnd.mtx); + changing = erts_atomic32_read_nob(&schdlr_sspnd.changing); } schdlr_sspnd_inc_nscheds(&schdlr_sspnd.active, sched_type); - changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); + changing = erts_atomic32_read_nob(&schdlr_sspnd.changing); if (changing) { if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB) && !schdlr_sspnd.msb.ongoing && schdlr_sspnd_eq_nscheds(&schdlr_sspnd.online, &schdlr_sspnd.active)) { - erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing, + erts_atomic32_read_band_nob(&schdlr_sspnd.changing, ~ERTS_SCHDLR_SSPND_CHNG_MSB); } if ((changing & ERTS_SCHDLR_SSPND_CHNG_NMSB) @@ -8171,14 +7621,14 @@ suspend_scheduler(ErtsSchedulerData *esdp) ERTS_SCHED_NORMAL) == schdlr_sspnd_get_nscheds(&schdlr_sspnd.active, ERTS_SCHED_NORMAL))) { - erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing, + erts_atomic32_read_band_nob(&schdlr_sspnd.changing, ~ERTS_SCHDLR_SSPND_CHNG_NMSB); } } ASSERT(no <= schdlr_sspnd_get_nscheds(&schdlr_sspnd.online, sched_type)); } - erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + erts_mtx_unlock(&schdlr_sspnd.mtx); schdlr_sspnd_resume_procs(sched_type, &resume); @@ -8197,7 +7647,7 @@ suspend_scheduler(ErtsSchedulerData *esdp) } } - erts_smp_runq_lock(esdp->run_queue); + erts_runq_lock(esdp->run_queue); non_empty_runq(esdp->run_queue); if (sched_type != ERTS_SCHED_NORMAL) @@ -8221,7 +7671,7 @@ erts_schedulers_state(Uint *total, { if (active || online || dirty_cpu_online || dirty_cpu_active || dirty_io_active) { - erts_smp_mtx_lock(&schdlr_sspnd.mtx); + erts_mtx_lock(&schdlr_sspnd.mtx); if (active) *active = schdlr_sspnd_get_nscheds(&schdlr_sspnd.active, ERTS_SCHED_NORMAL); @@ -8237,7 +7687,7 @@ erts_schedulers_state(Uint *total, if (dirty_io_active) *dirty_io_active = schdlr_sspnd_get_nscheds(&schdlr_sspnd.active, ERTS_SCHED_DIRTY_IO); - erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + erts_mtx_unlock(&schdlr_sspnd.mtx); } if (total) @@ -8253,7 +7703,7 @@ abort_sched_onln_chng_waitq(Process *p) { Eterm resume = NIL; - erts_smp_mtx_lock(&schdlr_sspnd.mtx); + erts_mtx_lock(&schdlr_sspnd.mtx); #ifdef DEBUG { @@ -8303,7 +7753,7 @@ abort_sched_onln_chng_waitq(Process *p) } } - erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + erts_mtx_unlock(&schdlr_sspnd.mtx); if (is_internal_pid(resume)) schdlr_sspnd_resume_proc(ERTS_SCHED_NORMAL, resume); @@ -8320,11 +7770,7 @@ erts_set_schedulers_online(Process *p, erts_aint32_t changing = 0, change_flags; int online, increase; ErtsProcList *plp; -#ifdef ERTS_DIRTY_SCHEDULERS int dirty_no, change_dirty, dirty_online; -#else - ASSERT(!dirty_only); -#endif if (new_no < 1) return ERTS_SCHDLR_SSPND_EINVAL; @@ -8333,11 +7779,9 @@ erts_set_schedulers_online(Process *p, else if (erts_no_schedulers < new_no) return ERTS_SCHDLR_SSPND_EINVAL; -#ifdef ERTS_DIRTY_SCHEDULERS if (dirty_only) resume_proc = 0; else -#endif { resume_proc = 1; /* @@ -8346,23 +7790,21 @@ erts_set_schedulers_online(Process *p, * race... */ if (!(plocks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); suspend_process(p, p); if (!(plocks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); } - erts_smp_mtx_lock(&schdlr_sspnd.mtx); + erts_mtx_lock(&schdlr_sspnd.mtx); change_flags = 0; have_unlocked_plocks = 0; no = (int) new_no; -#ifdef ERTS_DIRTY_SCHEDULERS if (!dirty_only) -#endif { - changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); + changing = erts_atomic32_read_nob(&schdlr_sspnd.changing); if (changing & ERTS_SCHDLR_SSPND_CHNG_ONLN) { enqueue_wait: p->flags |= F_SCHDLR_ONLN_WAITQ; @@ -8388,12 +7830,6 @@ erts_set_schedulers_online(Process *p, *old_no = online = schdlr_sspnd_get_nscheds(&schdlr_sspnd.online, ERTS_SCHED_NORMAL); -#ifndef ERTS_DIRTY_SCHEDULERS - if (no == online) { - res = ERTS_SCHDLR_SSPND_DONE; - goto done; - } -#else /* ERTS_DIRTY_SCHEDULERS */ dirty_online = schdlr_sspnd_get_nscheds(&schdlr_sspnd.online, ERTS_SCHED_DIRTY_CPU); if (dirty_only) @@ -8445,7 +7881,6 @@ erts_set_schedulers_online(Process *p, if (dirty_only) increase = (dirty_no > dirty_online); else -#endif /* ERTS_DIRTY_SCHEDULERS */ { change_flags |= ERTS_SCHDLR_SSPND_CHNG_ONLN; schdlr_sspnd_set_nscheds(&schdlr_sspnd.online, @@ -8454,12 +7889,11 @@ erts_set_schedulers_online(Process *p, increase = (no > online); } - erts_smp_atomic32_read_bor_nob(&schdlr_sspnd.changing, change_flags); + erts_atomic32_read_bor_nob(&schdlr_sspnd.changing, change_flags); res = ERTS_SCHDLR_SSPND_DONE; if (increase) { int ix; -#ifdef ERTS_DIRTY_SCHEDULERS if (change_dirty) { ErtsSchedulerSleepInfo* ssi; if (schdlr_sspnd.msb.ongoing) { @@ -8473,7 +7907,6 @@ erts_set_schedulers_online(Process *p, } } if (!dirty_only) -#endif { if (schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing) { for (ix = online; ix < no; ix++) @@ -8482,7 +7915,7 @@ erts_set_schedulers_online(Process *p, else { if (plocks) { have_unlocked_plocks = 1; - erts_smp_proc_unlock(p, plocks); + erts_proc_unlock(p, plocks); } change_no_used_runqs(no); @@ -8495,7 +7928,6 @@ erts_set_schedulers_online(Process *p, } } else /* if decrease */ { -#ifdef ERTS_DIRTY_SCHEDULERS if (change_dirty) { if (schdlr_sspnd.msb.ongoing) { for (ix = dirty_no; ix < dirty_online; ix++) @@ -8513,7 +7945,6 @@ erts_set_schedulers_online(Process *p, } } if (!dirty_only) -#endif { if (schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing) { for (ix = no; ix < online; ix++) @@ -8522,7 +7953,7 @@ erts_set_schedulers_online(Process *p, else { if (plocks) { have_unlocked_plocks = 1; - erts_smp_proc_unlock(p, plocks); + erts_proc_unlock(p, plocks); } change_no_used_runqs(no); @@ -8551,17 +7982,17 @@ done: <= schdlr_sspnd_get_nscheds(&schdlr_sspnd.online, ERTS_SCHED_NORMAL)); - erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + erts_mtx_unlock(&schdlr_sspnd.mtx); if (have_unlocked_plocks) - erts_smp_proc_lock(p, plocks); + erts_proc_lock(p, plocks); if (resume_proc) { if (!(plocks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); resume_process(p, plocks|ERTS_PROC_LOCK_STATUS); if (!(plocks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); } return res; @@ -8599,13 +8030,13 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal else { resume_proc = 1; if (!(plocks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); suspend_process(p, p); if (!(plocks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); } - erts_smp_mtx_lock(&schdlr_sspnd.mtx); + erts_mtx_lock(&schdlr_sspnd.mtx); if (on) { /* ------ BLOCK ------ */ if (msbp->chngq) { ASSERT(msbp->ongoing); @@ -8635,12 +8066,12 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal p->flags |= have_blckd_flg; if (plocks) { have_unlocked_plocks = 1; - erts_smp_proc_unlock(p, plocks); + erts_proc_unlock(p, plocks); } ASSERT(!msbp->ongoing); msbp->ongoing = 1; - erts_smp_atomic32_read_bor_nob(&schdlr_sspnd.changing, + erts_atomic32_read_bor_nob(&schdlr_sspnd.changing, chng_flg); change_no_used_runqs(1); for (ix = 1; ix < erts_no_run_queues; ix++) @@ -8651,21 +8082,19 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal wake_scheduler(rq); } -#ifdef ERTS_DIRTY_SCHEDULERS if (!normal) { ERTS_RUNQ_FLGS_SET_NOB(ERTS_RUNQ_IX(0), ERTS_RUNQ_FLG_MSB_EXEC); - erts_smp_atomic32_read_bor_nob(&ERTS_RUNQ_IX(0)->scheduler->ssi->flags, + erts_atomic32_read_bor_nob(&ERTS_RUNQ_IX(0)->scheduler->ssi->flags, ERTS_SSI_FLG_MSB_EXEC); for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) dcpu_sched_ix_suspend_wake(ix); for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) dio_sched_ix_suspend_wake(ix); } -#endif wait_until_msb: - ASSERT(chng_flg & erts_smp_atomic32_read_nob(&schdlr_sspnd.changing)); + ASSERT(chng_flg & erts_atomic32_read_nob(&schdlr_sspnd.changing)); plp = proclist_create(p); erts_proclist_store_last(&msbp->chngq, plp); @@ -8706,14 +8135,14 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal } if (!msbp->blckrs && !msbp->chngq) { int online; - erts_smp_atomic32_read_bor_nob(&schdlr_sspnd.changing, + erts_atomic32_read_bor_nob(&schdlr_sspnd.changing, chng_flg); p->flags &= ~have_blckd_flg; msbp->ongoing = 0; if (!(schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing)) { if (plocks) { have_unlocked_plocks = 1; - erts_smp_proc_unlock(p, plocks); + erts_proc_unlock(p, plocks); } online = (int) schdlr_sspnd_get_nscheds(&schdlr_sspnd.online, @@ -8727,7 +8156,6 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal for (ix = online; ix < erts_no_run_queues; ix++) suspend_run_queue(ERTS_RUNQ_IX(ix)); } -#ifdef ERTS_DIRTY_SCHEDULERS if (!schdlr_sspnd.msb.ongoing) { /* Get rid of msb-exec flag in run-queue of scheduler 1 */ resume_run_queue(ERTS_RUNQ_IX(0)); @@ -8738,7 +8166,6 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) dio_sched_ix_resume_wake(ix); } -#endif } unblock_res: @@ -8750,17 +8177,17 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal res = ERTS_SCHDLR_SSPND_DONE; } - erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + erts_mtx_unlock(&schdlr_sspnd.mtx); if (have_unlocked_plocks) - erts_smp_proc_lock(p, plocks); + erts_proc_lock(p, plocks); if (resume_proc) { if (!(plocks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); resume_process(p, plocks|ERTS_PROC_LOCK_STATUS); if (!(plocks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); } return res; @@ -8770,14 +8197,14 @@ int erts_is_multi_scheduling_blocked(void) { int res; - erts_smp_mtx_lock(&schdlr_sspnd.mtx); + erts_mtx_lock(&schdlr_sspnd.mtx); if (schdlr_sspnd.msb.blckrs) res = 1; else if (schdlr_sspnd.nmsb.blckrs) res = -1; else res = 0; - erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + erts_mtx_unlock(&schdlr_sspnd.mtx); return res; } @@ -8789,7 +8216,7 @@ erts_multi_scheduling_blockers(Process *p, int normal) msbp = normal ? &schdlr_sspnd.nmsb : &schdlr_sspnd.msb; - erts_smp_mtx_lock(&schdlr_sspnd.mtx); + erts_mtx_lock(&schdlr_sspnd.mtx); if (!erts_proclist_is_empty(msbp->blckrs)) { Eterm *hp, *hp_end; ErtsProcList *plp1, *plp2; @@ -8817,7 +8244,7 @@ erts_multi_scheduling_blockers(Process *p, int normal) } HRelease(p, hp_end, hp); } - erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + erts_mtx_unlock(&schdlr_sspnd.mtx); return res; } @@ -8827,10 +8254,9 @@ sched_thread_func(void *vesdp) ErtsThrPrgrCallbacks callbacks; ErtsSchedulerData *esdp = vesdp; Uint no = esdp->no; -#ifdef ERTS_SMP erts_tse_t *tse; -#endif + erts_port_task_pre_alloc_init_thread(); erts_sched_init_time_sup(esdp); if (no == 1) @@ -8839,7 +8265,6 @@ sched_thread_func(void *vesdp) (void) ERTS_RUNQ_FLGS_SET_NOB(esdp->run_queue, ERTS_RUNQ_FLG_EXEC); -#ifdef ERTS_SMP tse = erts_tse_fetch(); erts_tse_prepare_timed(tse); ERTS_SCHED_SLEEP_INFO_IX(no - 1)->event = tse; @@ -8853,7 +8278,6 @@ sched_thread_func(void *vesdp) erts_thr_progress_register_managed_thread(esdp, &callbacks, 0); erts_alloc_register_scheduler(vesdp); -#endif #ifdef ERTS_ENABLE_LOCK_CHECK { char buf[31]; @@ -8862,18 +8286,14 @@ sched_thread_func(void *vesdp) } #endif erts_tsd_set(sched_data_key, vesdp); -#ifdef ERTS_SMP #if HAVE_ERTS_MSEG erts_mseg_late_init(); #endif -#if ERTS_USE_ASYNC_READY_Q esdp->aux_work_data.async_ready.queue = erts_get_async_ready_queue(no); -#endif erts_sched_init_check_cpu_bind(esdp); erts_proc_lock_prepare_proc_lock_waiter(); -#endif #ifdef HIPE hipe_thread_signal_init(); @@ -8898,8 +8318,6 @@ sched_thread_func(void *vesdp) return NULL; } -#ifdef ERTS_DIRTY_SCHEDULERS -#ifdef ERTS_SMP static void* sched_dirty_cpu_thread_func(void *vesdp) { @@ -8929,9 +8347,7 @@ sched_dirty_cpu_thread_func(void *vesdp) } #endif erts_tsd_set(sched_data_key, vesdp); -#if ERTS_USE_ASYNC_READY_Q esdp->aux_work_data.async_ready.queue = NULL; -#endif erts_proc_lock_prepare_proc_lock_waiter(); @@ -8977,9 +8393,7 @@ sched_dirty_io_thread_func(void *vesdp) } #endif erts_tsd_set(sched_data_key, vesdp); -#if ERTS_USE_ASYNC_READY_Q esdp->aux_work_data.async_ready.queue = NULL; -#endif erts_proc_lock_prepare_proc_lock_waiter(); @@ -8995,40 +8409,38 @@ sched_dirty_io_thread_func(void *vesdp) no); return NULL; } -#endif -#endif - -static ethr_tid aux_tid; void erts_start_schedulers(void) { + ethr_tid tid; int res = 0; Uint actual; Uint wanted = erts_no_schedulers; Uint wanted_no_schedulers = erts_no_schedulers; char name[16]; ethr_thr_opts opts = ETHR_THR_OPTS_DEFAULT_INITER; + int ix; opts.detached = 1; opts.name = name; -#ifdef ERTS_SMP if (erts_runq_supervision_interval) { opts.suggested_stack_size = 16; erts_snprintf(opts.name, 16, "runq_supervisor"); erts_atomic_init_nob(&runq_supervisor_sleeping, 0); if (0 != ethr_event_init(&runq_supervision_event)) erts_exit(ERTS_ERROR_EXIT, "Failed to create run-queue supervision event\n"); - if (0 != ethr_thr_create(&runq_supervisor_tid, - runq_supervisor, - NULL, - &opts)) - erts_exit(ERTS_ERROR_EXIT, "Failed to create run-queue supervision thread\n"); + res = ethr_thr_create(&runq_supervisor_tid, + runq_supervisor, + NULL, + &opts); + if (0 != res) + erts_exit(ERTS_ERROR_EXIT, "Failed to create run-queue supervision thread, " + "error = %d\n", res); } -#endif opts.suggested_stack_size = erts_sched_thread_suggested_stack_size; @@ -9054,17 +8466,14 @@ erts_start_schedulers(void) } erts_no_schedulers = actual; -#ifdef ERTS_DIRTY_SCHEDULERS -#ifdef ERTS_SMP { - int ix; for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) { ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix); erts_snprintf(opts.name, 16, "%d_dirty_cpu_scheduler", ix + 1); opts.suggested_stack_size = erts_dcpu_sched_thread_suggested_stack_size; res = ethr_thr_create(&esdp->tid,sched_dirty_cpu_thread_func,(void*)esdp,&opts); if (res != 0) - erts_exit(ERTS_ERROR_EXIT, "Failed to create dirty cpu scheduler thread %d\n", ix); + erts_exit(ERTS_ERROR_EXIT, "Failed to create dirty cpu scheduler thread %d, error = %d\n", ix, res); } for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) { ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix); @@ -9072,19 +8481,25 @@ erts_start_schedulers(void) opts.suggested_stack_size = erts_dio_sched_thread_suggested_stack_size; res = ethr_thr_create(&esdp->tid,sched_dirty_io_thread_func,(void*)esdp,&opts); if (res != 0) - erts_exit(ERTS_ERROR_EXIT, "Failed to create dirty io scheduler thread %d\n", ix); + erts_exit(ERTS_ERROR_EXIT, "Failed to create dirty io scheduler thread %d, error = %d\n", ix, res); } } -#endif -#endif ERTS_THR_MEMORY_BARRIER; erts_snprintf(opts.name, 16, "aux"); - res = ethr_thr_create(&aux_tid, aux_thread, NULL, &opts); + res = ethr_thr_create(&tid, aux_thread, NULL, &opts); if (res != 0) - erts_exit(ERTS_ERROR_EXIT, "Failed to create aux thread\n"); + erts_exit(ERTS_ERROR_EXIT, "Failed to create aux thread, error = %d\n", res); + + for (ix = 0; ix < erts_no_poll_threads; ix++) { + erts_snprintf(opts.name, 16, "%d_poller", ix); + + res = ethr_thr_create(&tid, poll_thread, (void*)(UWord)ix, &opts); + if (res != 0) + erts_exit(ERTS_ERROR_EXIT, "Failed to create poll thread\n"); + } if (actual < 1) erts_exit(ERTS_ERROR_EXIT, @@ -9103,9 +8518,7 @@ erts_start_schedulers(void) } } -#endif /* ERTS_SMP */ -#ifdef ERTS_SMP static void add_pend_suspend(Process *suspendee, @@ -9141,7 +8554,7 @@ handle_pending_suspend(Process *p, ErtsProcLocks p_locks) ErtsPendingSuspend *psp; int is_alive = !ERTS_PROC_IS_EXITING(p); - ERTS_SMP_LC_ASSERT(p_locks & ERTS_PROC_LOCK_STATUS); + ERTS_LC_ASSERT(p_locks & ERTS_PROC_LOCK_STATUS); /* * New pending suspenders might appear while we are processing @@ -9167,15 +8580,15 @@ cancel_suspend_of_suspendee(Process *p, ErtsProcLocks p_locks) if (is_not_nil(p->suspendee)) { Process *rp; if (!(p_locks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); rp = erts_pid2proc(p, p_locks|ERTS_PROC_LOCK_STATUS, p->suspendee, ERTS_PROC_LOCK_STATUS); if (rp) { erts_resume(rp, ERTS_PROC_LOCK_STATUS); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); } if (!(p_locks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); p->suspendee = NIL; } } @@ -9188,7 +8601,7 @@ handle_pend_sync_suspend(Process *suspendee, { Process *suspender; - ERTS_SMP_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS); + ERTS_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS); suspender = erts_pid2proc(suspendee, suspendee_locks, @@ -9204,7 +8617,7 @@ handle_pend_sync_suspend(Process *suspendee, resume suspender */ ASSERT(suspendee != suspender); resume_process(suspender, ERTS_PROC_LOCK_STATUS); - erts_smp_proc_unlock(suspender, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(suspender, ERTS_PROC_LOCK_STATUS); } } @@ -9215,10 +8628,10 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, Process *rp; int unlock_c_p_status; - ERTS_SMP_LC_ASSERT(c_p_locks == erts_proc_lc_my_proc_locks(c_p)); + ERTS_LC_ASSERT(c_p_locks == erts_proc_lc_my_proc_locks(c_p)); - ERTS_SMP_LC_ASSERT(c_p_locks & ERTS_PROC_LOCK_MAIN); - ERTS_SMP_LC_ASSERT(pid_locks & (ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS)); + ERTS_LC_ASSERT(c_p_locks & ERTS_PROC_LOCK_MAIN); + ERTS_LC_ASSERT(pid_locks & (ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS)); if (c_p->common.id == pid) return erts_pid2proc(c_p, c_p_locks, pid, pid_locks); @@ -9227,7 +8640,7 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, unlock_c_p_status = 0; else { unlock_c_p_status = 1; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); } if (c_p->suspendee == pid) { @@ -9266,21 +8679,19 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, /* Other process running */ ASSERT((ERTS_PSFLG_RUNNING | ERTS_PSFLG_DIRTY_RUNNING) - & erts_smp_atomic32_read_nob(&rp->state)); + & erts_atomic32_read_nob(&rp->state)); -#ifdef ERTS_DIRTY_SCHEDULERS if (!suspend - && (erts_smp_atomic32_read_nob(&rp->state) + && (erts_atomic32_read_nob(&rp->state) & ERTS_PSFLG_DIRTY_RUNNING)) { ErtsProcLocks need_locks = pid_locks & ~ERTS_PROC_LOCK_STATUS; - if (need_locks && erts_smp_proc_trylock(rp, need_locks) == EBUSY) { - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + if (need_locks && erts_proc_trylock(rp, need_locks) == EBUSY) { + erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS, pid, pid_locks|ERTS_PROC_LOCK_STATUS); } goto done; } -#endif running: @@ -9300,19 +8711,19 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, c_p->flags |= F_P2PNR_RESCHED; } /* Yield (caller is assumed to yield immediately in bif). */ - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); rp = ERTS_PROC_LOCK_BUSY; } else { ErtsProcLocks need_locks = pid_locks & ~ERTS_PROC_LOCK_STATUS; - if (need_locks && erts_smp_proc_trylock(rp, need_locks) == EBUSY) { + if (need_locks && erts_proc_trylock(rp, need_locks) == EBUSY) { if ((ERTS_PSFLG_RUNNING_SYS|ERTS_PSFLG_DIRTY_RUNNING_SYS) - & erts_smp_atomic32_read_nob(&rp->state)) { + & erts_atomic32_read_nob(&rp->state)) { /* Executing system task... */ resume_process(rp, ERTS_PROC_LOCK_STATUS); goto running; } - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); /* * If we are unlucky, the process just got selected for * execution of a system task. In this case we may be @@ -9336,7 +8747,7 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, #ifdef DEBUG { erts_aint32_t state; - state = erts_smp_atomic32_read_nob(&rp->state); + state = erts_atomic32_read_nob(&rp->state); ASSERT((state & ERTS_PSFLG_PENDING_EXIT) || !(state & ERTS_PSFLG_RUNNING)); } @@ -9350,9 +8761,9 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, done: if (rp && rp != ERTS_PROC_LOCK_BUSY && !(pid_locks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); if (unlock_c_p_status) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); return rp; } @@ -9398,7 +8809,7 @@ do_bif_suspend_process(Process *c_p, { ASSERT(suspendee); ASSERT(!ERTS_PROC_IS_EXITING(suspendee)); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS + ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(suspendee)); if (smon) { if (!smon->active) { @@ -9421,7 +8832,7 @@ handle_pend_bif_sync_suspend(Process *suspendee, { Process *suspender; - ERTS_SMP_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS); + ERTS_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS); suspender = erts_pid2proc(suspendee, suspendee_locks, @@ -9450,7 +8861,7 @@ handle_pend_bif_sync_suspend(Process *suspendee, resume suspender */ ASSERT(suspender != suspendee); resume_process(suspender, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); - erts_smp_proc_unlock(suspender, + erts_proc_unlock(suspender, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); } } @@ -9464,7 +8875,7 @@ handle_pend_bif_async_suspend(Process *suspendee, Process *suspender; - ERTS_SMP_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS); + ERTS_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS); suspender = erts_pid2proc(suspendee, suspendee_locks, @@ -9488,11 +8899,10 @@ handle_pend_bif_async_suspend(Process *suspendee, do_bif_suspend_process(suspendee, smon, suspendee); ASSERT(!smon || res != 0); } - erts_smp_proc_unlock(suspender, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(suspender, ERTS_PROC_LOCK_LINK); } } -#endif /* ERTS_SMP */ /* * The erlang:suspend_process/2 BIF @@ -9541,7 +8951,7 @@ suspend_process_2(BIF_ALIST_2) ? (ErtsProcLocks) 0 : ERTS_PROC_LOCK_STATUS); - erts_smp_proc_lock(BIF_P, xlocks); + erts_proc_lock(BIF_P, xlocks); suspendee = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN|xlocks, @@ -9552,34 +8962,15 @@ suspend_process_2(BIF_ALIST_2) smon = erts_add_or_lookup_suspend_monitor(&BIF_P->suspend_monitors, BIF_ARG_1); -#ifndef ERTS_SMP /* no ERTS_SMP */ - - /* This is really a piece of cake without SMP support... */ - if (!smon->active) { - erts_smp_atomic32_read_bor_nob(&suspendee->state, ERTS_PSFLG_SUSPENDED); - suspend_process(BIF_P, suspendee); - smon->active++; - res = am_true; - } - else if (unless_suspending) - res = am_false; - else if (smon->active == INT_MAX) - goto system_limit; - else { - smon->active++; - res = am_true; - } - -#else /* ERTS_SMP */ /* ... but a little trickier with SMP support ... */ if (asynchronous) { /* --- Asynchronous suspend begin ---------------------------------- */ - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_LINK + ERTS_LC_ASSERT(ERTS_PROC_LOCK_LINK & erts_proc_lc_my_proc_locks(BIF_P)); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS + ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS == erts_proc_lc_my_proc_locks(suspendee)); if (smon->active) { @@ -9619,10 +9010,10 @@ suspend_process_2(BIF_ALIST_2) else /* if (!asynchronous) */ { /* --- Synchronous suspend begin ----------------------------------- */ - ERTS_SMP_LC_ASSERT(((ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS) + ERTS_LC_ASSERT(((ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS) & erts_proc_lc_my_proc_locks(BIF_P)) == (ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS)); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS + ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS == erts_proc_lc_my_proc_locks(suspendee)); if (BIF_P->suspendee == BIF_ARG_1) { @@ -9688,10 +9079,9 @@ suspend_process_2(BIF_ALIST_2) /* --- Synchronous suspend end ------------------------------------- */ } -#endif /* ERTS_SMP */ #ifdef DEBUG { - erts_aint32_t state = erts_smp_atomic32_read_acqb(&suspendee->state); + erts_aint32_t state = erts_atomic32_read_acqb(&suspendee->state); ASSERT((state & ERTS_PSFLG_SUSPENDED) || (asynchronous && smon->pending)); ASSERT((state & ERTS_PSFLG_SUSPENDED) @@ -9699,8 +9089,8 @@ suspend_process_2(BIF_ALIST_2) } #endif - erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); - erts_smp_proc_unlock(BIF_P, xlocks); + erts_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(BIF_P, xlocks); BIF_RET(res); system_limit: @@ -9708,26 +9098,22 @@ suspend_process_2(BIF_ALIST_2) goto do_return; no_suspendee: -#ifdef ERTS_SMP BIF_P->suspendee = NIL; -#endif erts_delete_suspend_monitor(&BIF_P->suspend_monitors, BIF_ARG_1); badarg: ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG); -#ifdef ERTS_SMP goto do_return; yield: ERTS_BIF_PREP_YIELD2(res, bif_export[BIF_suspend_process_2], BIF_P, BIF_ARG_1, BIF_ARG_2); -#endif do_return: if (suspendee) - erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); if (xlocks) - erts_smp_proc_unlock(BIF_P, xlocks); + erts_proc_unlock(BIF_P, xlocks); return res; } @@ -9747,7 +9133,7 @@ resume_process_1(BIF_ALIST_1) if (BIF_P->common.id == BIF_ARG_1) BIF_ERROR(BIF_P, BADARG); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); smon = erts_lookup_suspend_monitor(BIF_P->suspend_monitors, BIF_ARG_1); if (!smon) { @@ -9793,17 +9179,17 @@ resume_process_1(BIF_ALIST_1) goto no_suspendee; ASSERT(ERTS_PSFLG_SUSPENDED - & erts_smp_atomic32_read_nob(&suspendee->state)); + & erts_atomic32_read_nob(&suspendee->state)); ASSERT(BIF_P != suspendee); resume_process(suspendee, ERTS_PROC_LOCK_STATUS); - erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); } if (!smon->active && !smon->pending) erts_delete_suspend_monitor(&BIF_P->suspend_monitors, BIF_ARG_1); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); BIF_RET(am_true); @@ -9812,7 +9198,7 @@ resume_process_1(BIF_ALIST_1) erts_delete_suspend_monitor(&BIF_P->suspend_monitors, BIF_ARG_1); error: - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); BIF_ERROR(BIF_P, BADARG); } @@ -9821,18 +9207,16 @@ erts_internal_is_process_executing_dirty_1(BIF_ALIST_1) { if (is_not_internal_pid(BIF_ARG_1)) BIF_ERROR(BIF_P, BADARG); -#ifdef ERTS_DIRTY_SCHEDULERS else { Process *rp = erts_proc_lookup(BIF_ARG_1); if (rp) { - erts_aint32_t state = erts_smp_atomic32_read_nob(&rp->state); + erts_aint32_t state = erts_atomic32_read_nob(&rp->state); if (state & (ERTS_PSFLG_DIRTY_RUNNING |ERTS_PSFLG_DIRTY_RUNNING_SYS)) { BIF_RET(am_true); } } } -#endif BIF_RET(am_false); } @@ -9842,28 +9226,26 @@ run_queues_len_aux(ErtsRunQueue *rq, Uint *tot_len, Uint *qlen, int *ip, int inc Sint rq_len; if (locked) - rq_len = (Sint) erts_smp_atomic32_read_dirty(&rq->len); + rq_len = (Sint) erts_atomic32_read_dirty(&rq->len); else - rq_len = (Sint) erts_smp_atomic32_read_nob(&rq->len); + rq_len = (Sint) erts_atomic32_read_nob(&rq->len); ASSERT(rq_len >= 0); if (incl_active_sched) { -#ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) { erts_aint32_t dcnt; if (ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(rq)) { - dcnt = erts_smp_atomic32_read_nob(&dirty_count.cpu.active); + dcnt = erts_atomic32_read_nob(&dirty_count.cpu.active); ASSERT(0 <= dcnt && dcnt <= erts_no_dirty_cpu_schedulers); } else { ASSERT(ERTS_RUNQ_IS_DIRTY_IO_RUNQ(rq)); - dcnt = erts_smp_atomic32_read_nob(&dirty_count.io.active); + dcnt = erts_atomic32_read_nob(&dirty_count.io.active); ASSERT(0 <= dcnt && dcnt <= erts_no_dirty_io_schedulers); } rq_len += (Sint) dcnt; } else -#endif { if (ERTS_RUNQ_FLGS_GET_NOB(rq) & ERTS_RUNQ_FLG_EXEC) rq_len++; @@ -9882,12 +9264,10 @@ erts_run_queues_len(Uint *qlen, int atomic_queues_read, int incl_active_sched, Uint len = 0; int no_rqs = erts_no_run_queues; -#ifdef ERTS_DIRTY_SCHEDULERS if (incl_dirty_io) no_rqs += ERTS_NUM_DIRTY_RUNQS; else no_rqs += ERTS_NUM_DIRTY_CPU_RUNQS; -#endif if (atomic_queues_read) { ERTS_ATOMIC_FOREACH_RUNQ_X(rq, no_rqs, @@ -9941,27 +9321,25 @@ erts_process_status(Process *rp, Eterm rpid) Process *p = rp ? rp : erts_proc_lookup_raw(rpid); if (p) { - erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state); + erts_aint32_t state = erts_atomic32_read_acqb(&p->state); res = erts_process_state2status(state); } -#ifdef ERTS_SMP else { int i; ErtsSchedulerData *esdp; for (i = 0; i < erts_no_schedulers; i++) { esdp = ERTS_SCHEDULER_IX(i); - erts_smp_runq_lock(esdp->run_queue); + erts_runq_lock(esdp->run_queue); if (esdp->free_process && esdp->free_process->common.id == rpid) { res = am_free; - erts_smp_runq_unlock(esdp->run_queue); + erts_runq_unlock(esdp->run_queue); break; } - erts_smp_runq_unlock(esdp->run_queue); + erts_runq_unlock(esdp->run_queue); } } -#endif return res; } @@ -9977,9 +9355,9 @@ erts_suspend(Process* c_p, ErtsProcLocks c_p_locks, Port *busy_port) int suspend; ASSERT(c_p == erts_get_current_process()); - ERTS_SMP_LC_ASSERT(c_p_locks == erts_proc_lc_my_proc_locks(c_p)); + ERTS_LC_ASSERT(c_p_locks == erts_proc_lc_my_proc_locks(c_p)); if (!(c_p_locks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); if (busy_port) suspend = erts_save_suspend_process_on_port(busy_port, c_p); @@ -9995,7 +9373,7 @@ erts_suspend(Process* c_p, ErtsProcLocks c_p_locks, Port *busy_port) } if (!(c_p_locks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); if (suspend && busy_port && erts_system_monitor_flags.busy_port) monitor_generic(c_p, am_busy_port, busy_port->common.id); @@ -10004,12 +9382,12 @@ erts_suspend(Process* c_p, ErtsProcLocks c_p_locks, Port *busy_port) void erts_resume(Process* process, ErtsProcLocks process_locks) { - ERTS_SMP_LC_ASSERT(process_locks == erts_proc_lc_my_proc_locks(process)); + ERTS_LC_ASSERT(process_locks == erts_proc_lc_my_proc_locks(process)); if (!(process_locks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_lock(process, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(process, ERTS_PROC_LOCK_STATUS); resume_process(process, process_locks|ERTS_PROC_LOCK_STATUS); if (!(process_locks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_unlock(process, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(process, ERTS_PROC_LOCK_STATUS); } int @@ -10029,7 +9407,7 @@ erts_resume_processes(ErtsProcList *list) resume_process(proc, ERTS_PROC_LOCK_STATUS); nresumed++; } - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(proc, ERTS_PROC_LOCK_STATUS); } fplp = plp; plp = plp->next; @@ -10041,7 +9419,7 @@ erts_resume_processes(ErtsProcList *list) Eterm erts_get_process_priority(Process *p) { - erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state); + erts_aint32_t state = erts_atomic32_read_nob(&p->state); switch (ERTS_PSFLGS_GET_USR_PRIO(state)) { case PRIORITY_MAX: return am_max; case PRIORITY_HIGH: return am_high; @@ -10064,7 +9442,7 @@ erts_set_process_priority(Process *p, Eterm value) default: return THE_NON_VALUE; break; } - a = erts_smp_atomic32_read_nob(&p->state); + a = erts_atomic32_read_nob(&p->state); if (nprio == ERTS_PSFLGS_GET_USR_PRIO(a)) oprio = nprio; else { @@ -10072,7 +9450,7 @@ erts_set_process_priority(Process *p, Eterm value) erts_aint32_t e, n, aprio; if (a & ERTS_PSFLG_ACTIVE_SYS) { - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); slocked = 1; } @@ -10086,7 +9464,7 @@ erts_set_process_priority(Process *p, Eterm value) int max_qbit; if (!slocked) { - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); slocked = 1; } @@ -10127,11 +9505,11 @@ erts_set_process_priority(Process *p, Eterm value) n |= ((nprio << ERTS_PSFLGS_USR_PRIO_OFFSET) | (aprio << ERTS_PSFLGS_ACT_PRIO_OFFSET)); - a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); + a = erts_atomic32_cmpxchg_mb(&p->state, n, e); } while (a != e); if (slocked) - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); } @@ -10189,7 +9567,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) ErtsRunQueue *rq; int context_reds; int fcalls; - int input_reductions; int actual_reds; int reds; Uint32 flags; @@ -10209,21 +9586,18 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) if (ERTS_USE_MODIFIED_TIMING()) { context_reds = ERTS_MODIFIED_TIMING_CONTEXT_REDS; - input_reductions = ERTS_MODIFIED_TIMING_INPUT_REDS; } else { context_reds = CONTEXT_REDS; - input_reductions = INPUT_REDUCTIONS; } - ERTS_SMP_LC_ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()) + ERTS_LC_ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()) || !erts_thr_progress_is_blocking()); /* * Clean up after the process being scheduled out. */ if (!p) { /* NULL in the very first schedule() call */ -#ifdef ERTS_DIRTY_SCHEDULERS is_normal_sched = !esdp; if (is_normal_sched) { esdp = erts_get_scheduler_data(); @@ -10232,18 +9606,11 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) else { ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp)); } -#else - esdp = erts_get_scheduler_data(); - is_normal_sched = 1; -#endif rq = erts_get_runq_current(esdp); ASSERT(esdp); - fcalls = (int) erts_smp_atomic32_read_acqb(&function_calls); actual_reds = reds = 0; - erts_smp_runq_lock(rq); + erts_runq_lock(rq); } else { -#ifdef ERTS_SMP -#ifdef ERTS_DIRTY_SCHEDULERS is_normal_sched = !esdp; if (is_normal_sched) { esdp = p->scheduler_data; @@ -10252,21 +9619,12 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) else { ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp)); } -#else - esdp = p->scheduler_data; - is_normal_sched = 1; -#endif ASSERT(esdp->current_process == p || esdp->free_process == p); -#else - esdp = erts_scheduler_data; - ASSERT(esdp->current_process == p); - is_normal_sched = 1; -#endif sched_out_proc: - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); reds = actual_reds = calls - esdp->virtual_reds; @@ -10275,14 +9633,13 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) reds = ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST; esdp->virtual_reds = 0; - fcalls = (int) erts_smp_atomic32_add_read_acqb(&function_calls, reds); ASSERT(esdp && esdp == erts_get_scheduler_data()); rq = erts_get_runq_current(esdp); p->reds += actual_reds; - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); if (IS_TRACED(p)) { if (IS_TRACED_FL(p, F_TRACE_CALLS) && !(state & ERTS_PSFLG_FREE)) @@ -10301,20 +9658,17 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) } } - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); -#ifdef ERTS_SMP if (p->trace_msg_q) { - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); erts_schedule_flush_trace_messages(p, 1); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); } -#endif /* have to re-read state after taking lock */ - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); -#ifdef ERTS_SMP if (is_normal_sched && (state & ERTS_PSFLG_PENDING_EXIT)) erts_handle_pending_exit(p, (ERTS_PROC_LOCK_MAIN | ERTS_PROC_LOCK_TRACE @@ -10323,7 +9677,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) handle_pending_suspend(p, (ERTS_PROC_LOCK_MAIN | ERTS_PROC_LOCK_TRACE | ERTS_PROC_LOCK_STATUS)); -#endif esdp->reductions += reds; @@ -10341,18 +9694,15 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) actual_reds); esdp->current_process = NULL; -#ifdef ERTS_SMP if (is_normal_sched) p->scheduler_data = NULL; -#endif - erts_smp_proc_unlock(p, (ERTS_PROC_LOCK_MAIN + erts_proc_unlock(p, (ERTS_PROC_LOCK_MAIN | ERTS_PROC_LOCK_STATUS | ERTS_PROC_LOCK_TRACE)); ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_OTHER); -#ifdef ERTS_SMP if (state & ERTS_PSFLG_FREE) { if (!is_normal_sched) { ASSERT(p->flags & F_DELAYED_DEL_PROC); @@ -10362,44 +9712,40 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) esdp->free_process = NULL; } } -#endif if (dec_refc) erts_proc_dec_refc(p); } -#ifdef ERTS_SMP ASSERT(!esdp->free_process); -#endif ASSERT(!esdp->current_process); - ERTS_SMP_CHK_NO_PROC_LOCKS; - - if (is_normal_sched) { - if (esdp->check_time_reds >= ERTS_CHECK_TIME_REDS) - (void) erts_get_monotonic_time(esdp); + ERTS_CHK_NO_PROC_LOCKS; - if (esdp->last_monotonic_time >= erts_next_timeout_time(esdp->next_tmo_ref)) { - erts_smp_runq_unlock(rq); - erts_bump_timers(esdp->timer_wheel, esdp->last_monotonic_time); - erts_smp_runq_lock(rq); - } - } } - ERTS_SMP_LC_ASSERT(!is_normal_sched || !erts_thr_progress_is_blocking()); + ERTS_LC_ASSERT(!is_normal_sched || !erts_thr_progress_is_blocking()); check_activities_to_run: { erts_aint32_t psflg_running, psflg_running_sys; -#ifdef ERTS_SMP ErtsMigrationPaths *mps; ErtsMigrationPath *mp; if (is_normal_sched) { + + if (esdp->check_time_reds >= ERTS_CHECK_TIME_REDS) + (void) erts_get_monotonic_time(esdp); + + if (esdp->last_monotonic_time >= erts_next_timeout_time(esdp->next_tmo_ref)) { + erts_runq_unlock(rq); + erts_bump_timers(esdp->timer_wheel, esdp->last_monotonic_time); + erts_runq_lock(rq); + } + if (rq->check_balance_reds <= 0) check_balance(rq); - ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); + ERTS_LC_ASSERT(!erts_thr_progress_is_blocking()); mps = erts_get_migration_paths_managed(); mp = &mps->mpath[rq->ix]; @@ -10408,14 +9754,14 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) immigrate(rq, mp); } - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); continue_check_activities_to_run: flags = ERTS_RUNQ_FLGS_GET_NOB(rq); continue_check_activities_to_run_known_flags: ASSERT(!is_normal_sched || (flags & ERTS_RUNQ_FLG_NONEMPTY)); if (!is_normal_sched) { - if (erts_smp_atomic32_read_acqb(&esdp->ssi->flags) + if (erts_atomic32_read_acqb(&esdp->ssi->flags) & (ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC)) { suspend_scheduler(esdp); } @@ -10445,26 +9791,18 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) leader_update = erts_thr_progress_update(esdp); aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work); if (aux_work | leader_update) { - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); if (leader_update) erts_thr_progress_leader_update(esdp); if (aux_work) handle_aux_work(&esdp->aux_work_data, aux_work, 0); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); } - ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); + ERTS_LC_ASSERT(!erts_thr_progress_is_blocking()); } - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); -#else /* ERTS_SMP */ - { - erts_aint32_t aux_work; - aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work); - if (aux_work) - handle_aux_work(&esdp->aux_work_data, aux_work, 0); - } -#endif /* ERTS_SMP */ flags = ERTS_RUNQ_FLGS_GET_NOB(rq); @@ -10475,8 +9813,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) } else if (!runq_got_work_to_execute_flags(flags)) { /* Prepare for scheduler wait */ -#ifdef ERTS_SMP - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); rq->wakeup_other = 0; rq->wakeup_other_reds = 0; @@ -10490,7 +9827,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) ASSERT(!runq_got_work_to_execute(rq)); if (!is_normal_sched) { /* Dirty scheduler */ - if (erts_smp_atomic32_read_acqb(&esdp->ssi->flags) + if (erts_atomic32_read_acqb(&esdp->ssi->flags) & (ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC)) { /* Go suspend... */ goto continue_check_activities_to_run_known_flags; @@ -10506,7 +9843,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) */ flags = ERTS_RUNQ_FLGS_GET_NOB(rq); if ((flags & ERTS_RUNQ_FLG_SUSPENDED) -#ifdef ERTS_DIRTY_SCHEDULERS /* If multi scheduling block and we have * dirty work, suspend and let dirty * scheduler handle work... */ @@ -10514,7 +9850,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) | ERTS_RUNQ_FLG_MSB_EXEC)) == ERTS_RUNQ_FLG_MSB_EXEC)) && have_dirty_work()) -#endif ) { non_empty_runq(rq); flags |= ERTS_RUNQ_FLG_NONEMPTY; @@ -10526,62 +9861,21 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) } empty_runq(rq); } -#endif (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_EXEC); scheduler_wait(&fcalls, esdp, rq); flags = ERTS_RUNQ_FLGS_SET_NOB(rq, ERTS_RUNQ_FLG_EXEC); flags |= ERTS_RUNQ_FLG_EXEC; ERTS_MSACC_UPDATE_CACHE(); -#ifdef ERTS_SMP non_empty_runq(rq); -#endif goto check_activities_to_run; } - else if (is_normal_sched - && (fcalls > input_reductions - && prepare_for_sys_schedule(!0))) { - ErtsMonotonicTime current_time; - /* - * Schedule system-level activities. - */ - - ERTS_MSACC_PUSH_STATE_CACHED_M(); - - erts_smp_atomic32_set_relb(&function_calls, 0); - fcalls = 0; - -#if 0 /* Not needed since we wont wait in sys schedule */ - erts_sys_schedule_interrupt(0); -#endif - erts_smp_runq_unlock(rq); - - ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_CHECK_IO); - LTTNG2(scheduler_poll, esdp->no, 1); - - erl_sys_schedule(1); - ERTS_MSACC_POP_STATE_M(); - - current_time = erts_get_monotonic_time(esdp); - if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) - erts_bump_timers(esdp->timer_wheel, current_time); - -#ifdef ERTS_SMP - erts_smp_runq_lock(rq); - clear_sys_scheduling(); - goto continue_check_activities_to_run; -#else - goto check_activities_to_run; -#endif - } if (flags & ERTS_RUNQ_FLG_MISC_OP) exec_misc_ops(rq); -#ifdef ERTS_SMP wakeup_other.check(rq, flags); -#endif /* * Find a new port to run. @@ -10590,29 +9884,9 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) flags = ERTS_RUNQ_FLGS_GET_NOB(rq); if (flags & PORT_BIT) { - int have_outstanding_io; - have_outstanding_io = erts_port_task_execute(rq, &esdp->current_port); - if ((!erts_eager_check_io - && have_outstanding_io - && fcalls > 2*input_reductions) - || (flags & ERTS_RUNQ_FLG_HALTING)) { - /* - * If we have performed more than 2*INPUT_REDUCTIONS since - * last call to erl_sys_schedule() and we still haven't - * handled all I/O tasks we stop running processes and - * focus completely on ports. - * - * One could argue that this is a strange behavior. The - * reason for doing it this way is that it is similar - * to the behavior before port tasks were introduced. - * We don't want to change the behavior too much, at - * least not at the time of writing. This behavior - * might change in the future. - * - * /rickard - */ - goto check_activities_to_run; - } + erts_port_task_execute(rq, &esdp->current_port); + if (flags & ERTS_RUNQ_FLG_HALTING) + goto check_activities_to_run; } /* @@ -10679,13 +9953,11 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) proxy_p = NULL; goto pick_next_process; } - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); } -#ifdef ERTS_DIRTY_SCHEDULERS if (!is_normal_sched) clear_proc_dirty_queue_bit(p, rq, qbit); -#endif while (1) { erts_aint32_t exp, new; @@ -10703,7 +9975,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) | ERTS_PSFLG_DIRTY_RUNNING | ERTS_PSFLG_DIRTY_RUNNING_SYS | ERTS_PSFLG_FREE))) -#ifdef ERTS_DIRTY_SCHEDULERS | (((state & (ERTS_PSFLG_RUNNING | ERTS_PSFLG_FREE @@ -10712,7 +9983,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) | ERTS_PSFLG_EXITING)) == ERTS_PSFLG_EXITING) & (!!is_normal_sched)) -#endif ) & ((state & (ERTS_PSFLG_SUSPENDED | ERTS_PSFLG_EXITING @@ -10721,11 +9991,9 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) | ERTS_PSFLG_ACTIVE_SYS | ERTS_PSFLG_DIRTY_ACTIVE_SYS)) != ERTS_PSFLG_SUSPENDED) -#ifdef ERTS_DIRTY_SCHEDULERS & (!(state & (ERTS_PSFLG_EXITING | ERTS_PSFLG_PENDING_EXIT)) | (!!is_normal_sched)) -#endif ); if (run_process) { @@ -10735,7 +10003,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) else new |= psflg_running; } - state = erts_smp_atomic32_cmpxchg_relb(&p->state, new, exp); + state = erts_atomic32_cmpxchg_relb(&p->state, new, exp); if (state == exp) { if (!run_process) { if (proxy_p) { @@ -10762,22 +10030,21 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) calls = 0; reds = context_reds; - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); } ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_EMULATOR); -#ifdef ERTS_SMP if (flags & ERTS_RUNQ_FLG_PROTECTED) (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS); - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); if (erts_sched_stat.enabled) { int prio; @@ -10788,28 +10055,24 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) prio = (int) ERTS_PSFLGS_GET_USR_PRIO(state); - erts_smp_spin_lock(&erts_sched_stat.lock); + erts_spin_lock(&erts_sched_stat.lock); erts_sched_stat.prio[prio].total_executed++; erts_sched_stat.prio[prio].executed++; if (migrated) { erts_sched_stat.prio[prio].total_migrated++; erts_sched_stat.prio[prio].migrated++; } - erts_smp_spin_unlock(&erts_sched_stat.lock); + erts_spin_unlock(&erts_sched_stat.lock); } - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); -#ifndef ERTS_DIRTY_SCHEDULERS - ASSERT(!p->scheduler_data); - p->scheduler_data = esdp; -#else /* ERTS_DIRTY_SCHEDULERS */ if (is_normal_sched) { if ((!!(state & ERTS_PSFLGS_DIRTY_WORK)) & (!(state & ERTS_PSFLG_ACTIVE_SYS))) { /* Migrate to dirty scheduler... */ sunlock_sched_out_proc: - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); goto sched_out_proc; } ASSERT(!p->scheduler_data); @@ -10839,17 +10102,15 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) : (rq == ERTS_DIRTY_IO_RUNQ && (state & ERTS_PSFLG_DIRTY_IO_PROC))); } -#endif if (state & ERTS_PSFLG_PENDING_EXIT) { erts_handle_pending_exit(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS); - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); } -#endif /* ERTS_SMP */ - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); /* Clear tracer if it has been removed */ if (IS_TRACED(p) && erts_is_tracer_proc_enabled( @@ -10885,10 +10146,8 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) reds -= cost; if (reds <= 0) goto sched_out_proc; -#ifdef ERTS_DIRTY_SCHEDULERS if (state & ERTS_PSFLGS_DIRTY_WORK) goto sched_out_proc; -#endif } ASSERT(state & psflg_running_sys); @@ -10907,7 +10166,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) n &= ~psflg_running_sys; n |= psflg_running; - state = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); + state = erts_atomic32_cmpxchg_mb(&p->state, n, e); if (state == e) { state = n; break; @@ -10926,10 +10185,8 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) reds -= cost; if (reds <= 0) goto sched_out_proc; -#ifdef ERTS_DIRTY_SCHEDULERS if (p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)) goto sched_out_proc; -#endif } } } @@ -10941,12 +10198,12 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) p->fcalls = reds; - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); /* Never run a suspended process */ #ifdef DEBUG { - erts_aint32_t dstate = erts_smp_atomic32_read_nob(&p->state); + erts_aint32_t dstate = erts_atomic32_read_nob(&p->state); ASSERT(!(ERTS_PSFLG_SUSPENDED & dstate) || (ERTS_PSFLG_DIRTY_RUNNING_SYS & dstate)); } @@ -10956,9 +10213,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) if (!(state & ERTS_PSFLG_EXITING) && ERTS_PTMR_IS_TIMED_OUT(p)) { BeamInstr** pi; -#ifdef ERTS_SMP ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore); -#endif pi = (BeamInstr **) p->def_arg_reg; p->i = *pi; p->flags &= ~F_INSLPQUEUE; @@ -10975,13 +10230,11 @@ notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st, Eterm st_result, int normal_sched) { Process *rp; -#ifdef ERTS_DIRTY_SCHEDULERS if (!normal_sched) rp = erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN, st->requester, 0, ERTS_P2P_FLG_INC_REFC); else -#endif rp = erts_proc_lookup(st->requester); if (rp) { ErtsProcLocks rp_locks; @@ -11029,12 +10282,10 @@ notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st, rp_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); -#ifdef ERTS_DIRTY_SCHEDULERS if (!normal_sched) erts_proc_dec_refc(rp); -#endif } erts_cleanup_offheap(&st->off_heap); @@ -11053,7 +10304,7 @@ fetch_sys_task(Process *c_p, erts_aint32_t state, int *qmaskp, int *priop) *priop = -1; /* Shut up annoying erroneous warning */ - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); if (!c_p->sys_task_qs) { qmask = 0; @@ -11173,13 +10424,13 @@ fetch_sys_task(Process *c_p, erts_aint32_t state, int *qmaskp, int *priop) if (a == n) break; - a = erts_smp_atomic32_cmpxchg_nob(&c_p->state, n, e); + a = erts_atomic32_cmpxchg_nob(&c_p->state, n, e); } while (a != e); } done: - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); if (unused_qs) proc_sys_task_queues_free(unused_qs); @@ -11190,9 +10441,7 @@ done: } static void save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio); -#ifdef ERTS_DIRTY_SCHEDULERS static void save_dirty_task(Process *c_p, ErtsProcSysTask *st); -#endif static int execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds) @@ -11203,7 +10452,7 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds) int qmask = 0; ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p))); - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN); do { ErtsProcSysTaskType type; @@ -11212,10 +10461,8 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds) Eterm st_res; if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) { -#ifdef ERTS_SMP if (state & ERTS_PSFLG_PENDING_EXIT) erts_handle_pending_exit(c_p, ERTS_PROC_LOCK_MAIN); -#endif ASSERT(ERTS_PROC_IS_EXITING(c_p)); break; } @@ -11242,13 +10489,11 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds) FLAGS(c_p) |= F_NEED_FULLSWEEP; } reds -= scheduler_gc_proc(c_p, reds); -#ifdef ERTS_DIRTY_SCHEDULERS if (c_p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)) { save_dirty_task(c_p, st); st = NULL; break; } -#endif if (type == ERTS_PSTT_GC_MAJOR) minor_gc = major_gc = 1; else @@ -11290,13 +10535,11 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds) fcalls, do_gc); reds -= cla_reds; if (is_non_value(st_res)) { -#ifdef ERTS_DIRTY_SCHEDULERS if (c_p->flags & F_DIRTY_CLA) { save_dirty_task(c_p, st); st = NULL; break; } -#endif /* Needed gc, but gc was disabled */ save_gc_task(c_p, st, st_prio); st = NULL; @@ -11310,18 +10553,14 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds) reds -= erts_complete_off_heap_message_queue_change(c_p); st_res = am_true; break; -#ifdef ERTS_SMP case ERTS_PSTT_FTMQ: reds -= erts_flush_trace_messages(c_p, ERTS_PROC_LOCK_MAIN); st_res = am_true; break; -#endif -#ifdef ERTS_SMP case ERTS_PSTT_ETS_FREE_FIXATION: reds -= erts_db_execute_free_fixation(c_p, (DbFixation*)st->arg[0]); st_res = am_true; break; -#endif default: ERTS_INTERNAL_ERROR("Invalid process sys task type"); st_res = am_false; @@ -11330,7 +10569,7 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds) if (st) reds += notify_sys_task_executed(c_p, st, st_res, 1); - state = erts_smp_atomic32_read_acqb(&c_p->state); + state = erts_atomic32_read_acqb(&c_p->state); } while (qmask && reds > 0); *statep = state; @@ -11351,20 +10590,18 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds) * are dirty tasks. */ - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN); do { ErtsProcSysTask *st; Eterm st_res; int st_prio; -#ifdef ERTS_DIRTY_SCHEDULERS if (c_p->dirty_sys_tasks) { st = c_p->dirty_sys_tasks; c_p->dirty_sys_tasks = st->next; } else -#endif { st = fetch_sys_task(c_p, state, &qmask, &st_prio); if (!st) @@ -11382,12 +10619,10 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds) case ERTS_PSTT_CLA: st_res = am_ok; break; -#ifdef ERTS_SMP case ERTS_PSTT_FTMQ: reds -= erts_flush_trace_messages(c_p, ERTS_PROC_LOCK_MAIN); st_res = am_true; break; -#endif default: ERTS_INTERNAL_ERROR("Invalid process sys task type"); st_res = am_false; @@ -11396,13 +10631,12 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds) reds += notify_sys_task_executed(c_p, st, st_res, 1); - state = erts_smp_atomic32_read_acqb(&c_p->state); + state = erts_atomic32_read_acqb(&c_p->state); } while (qmask && reds < max_reds); return reds; } -#ifdef ERTS_DIRTY_SCHEDULERS void erts_execute_dirty_system_task(Process *c_p) @@ -11431,19 +10665,19 @@ erts_execute_dirty_system_task(Process *c_p) } if (c_p->flags & F_DIRTY_GC_HIBERNATE) { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); + ERTS_MSGQ_MV_INQ2PRIVQ(c_p); if (c_p->msg.len) c_p->flags &= ~F_DIRTY_GC_HIBERNATE; /* operation aborted... */ else { - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); c_p->fvalue = NIL; erts_garbage_collect_hibernate(c_p); ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); ASSERT(!ERTS_PROC_IS_EXITING(c_p)); } - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); } if (c_p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)) { @@ -11487,7 +10721,7 @@ erts_execute_dirty_system_task(Process *c_p) } - erts_smp_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_DIRTY_ACTIVE_SYS); + erts_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_DIRTY_ACTIVE_SYS); } static BIF_RETTYPE @@ -11540,12 +10774,11 @@ dispatch_system_task(Process *c_p, erts_aint_t fail_state, erts_queue_message(rp, rp_locks, mp, msg, st->requester); if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); return ret; } -#endif static BIF_RETTYPE request_system_task(Process *c_p, Eterm requester, Eterm target, @@ -11650,7 +10883,6 @@ request_system_task(Process *c_p, Eterm requester, Eterm target, st->type = ERTS_PSTT_CPC; if (!rp) goto noproc; -#ifdef ERTS_DIRTY_SCHEDULERS /* * If the process should start executing dirty * code it is important that this task is @@ -11658,7 +10890,6 @@ request_system_task(Process *c_p, Eterm requester, Eterm target, */ fail_state |= (ERTS_PSFLG_DIRTY_RUNNING | ERTS_PSFLG_DIRTY_RUNNING_SYS); -#endif break; case am_copy_literals: @@ -11680,14 +10911,12 @@ request_system_task(Process *c_p, Eterm requester, Eterm target, noproc: failure = noproc_res; } -#ifdef ERTS_DIRTY_SCHEDULERS else if (fail_state & (ERTS_PSFLG_DIRTY_RUNNING | ERTS_PSFLG_DIRTY_RUNNING_SYS)) { ret = dispatch_system_task(c_p, fail_state, st, target, priority, operation); goto cleanup_return; } -#endif else { ERTS_INTERNAL_ERROR("Unknown failure schedule_process_sys_task()"); failure = am_internal_error; @@ -11703,9 +10932,7 @@ badarg: ERTS_BIF_PREP_ERROR(ret, c_p, BADARG); -#ifdef ERTS_DIRTY_SCHEDULERS cleanup_return: -#endif if (st) { erts_cleanup_offheap(&st->off_heap); @@ -11746,7 +10973,7 @@ erts_schedule_generic_sys_task(Eterm pid, ErtsProcSysTaskType type, void* arg) st->req_id_sz = 0; st->arg[0] = (Eterm)arg; ERTS_INIT_OFF_HEAP(&st->off_heap); - state = erts_smp_atomic32_read_nob(&rp->state); + state = erts_atomic32_read_nob(&rp->state); fail_state = ERTS_PSFLG_EXITING; @@ -11769,7 +10996,6 @@ erts_schedule_ets_free_fixation(Eterm pid, DbFixation* fix) erts_schedule_generic_sys_task(pid, ERTS_PSTT_ETS_FREE_FIXATION, fix); } -#ifdef ERTS_DIRTY_SCHEDULERS static void flush_dirty_trace_messages(void *vpid) @@ -11786,45 +11012,35 @@ flush_dirty_trace_messages(void *vpid) proc = erts_pid2proc_opt(NULL, 0, pid, ERTS_PROC_LOCK_MAIN, 0); if (proc) { (void) erts_flush_trace_messages(proc, ERTS_PROC_LOCK_MAIN); - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); } } -#endif /* ERTS_DIRTY_SCHEDULERS */ void erts_schedule_flush_trace_messages(Process *proc, int force_on_proc) { -#ifdef ERTS_SMP ErtsThrPrgrDelayHandle dhndl; -#endif Eterm pid = proc->common.id; -#ifdef ERTS_DIRTY_SCHEDULERS erts_aint32_t state; if (!force_on_proc) { - state = erts_smp_atomic32_read_nob(&proc->state); + state = erts_atomic32_read_nob(&proc->state); if (state & (ERTS_PSFLG_DIRTY_RUNNING | ERTS_PSFLG_DIRTY_RUNNING_SYS)) { goto sched_flush_dirty; } } -#endif -#ifdef ERTS_SMP dhndl = erts_thr_progress_unmanaged_delay(); -#endif erts_schedule_generic_sys_task(pid, ERTS_PSTT_FTMQ, NULL); -#ifdef ERTS_SMP erts_thr_progress_unmanaged_continue(dhndl); -#endif -#ifdef ERTS_DIRTY_SCHEDULERS if (!force_on_proc) { - state = erts_smp_atomic32_read_mb(&proc->state); + state = erts_atomic32_read_mb(&proc->state); if (state & (ERTS_PSFLG_DIRTY_RUNNING | ERTS_PSFLG_DIRTY_RUNNING_SYS)) { void *vargp; @@ -11852,7 +11068,6 @@ erts_schedule_flush_trace_messages(Process *proc, int force_on_proc) erts_schedule_misc_aux_work(1, flush_dirty_trace_messages, vargp); } } -#endif } static void @@ -11861,7 +11076,7 @@ save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio) erts_aint32_t state; ErtsProcSysTaskQs *qs; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); qs = ERTS_PROC_GET_DELAYED_GC_TASK_QS(c_p); if (!qs) { @@ -11891,7 +11106,7 @@ save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio) } } - state = erts_smp_atomic32_read_nob(&c_p->state); + state = erts_atomic32_read_nob(&c_p->state); ASSERT((ERTS_PSFLG_RUNNING | ERTS_PSFLG_RUNNING_SYS | ERTS_PSFLG_DIRTY_RUNNING @@ -11907,20 +11122,18 @@ save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio) n &= ~ERTS_PSFLGS_ACT_PRIO_MASK; n |= prio << ERTS_PSFLGS_ACT_PRIO_OFFSET; } - state = erts_smp_atomic32_cmpxchg_relb(&c_p->state, n, e); + state = erts_atomic32_cmpxchg_relb(&c_p->state, n, e); if (state == e) break; } } -#ifdef ERTS_DIRTY_SCHEDULERS static void save_dirty_task(Process *c_p, ErtsProcSysTask *st) { st->next = c_p->dirty_sys_tasks; c_p->dirty_sys_tasks = st; } -#endif int erts_set_gc_state(Process *c_p, int enable) @@ -11928,8 +11141,8 @@ erts_set_gc_state(Process *c_p, int enable) ErtsProcSysTaskQs *dgc_tsk_qs; ASSERT(c_p == erts_get_current_process()); ASSERT((ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS) - & erts_smp_atomic32_read_nob(&c_p->state)); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); + & erts_atomic32_read_nob(&c_p->state)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); if (!enable) { c_p->flags |= F_DISABLE_GC; @@ -11944,7 +11157,7 @@ erts_set_gc_state(Process *c_p, int enable) /* Move delayed gc tasks into sys tasks queues. */ - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); if (!c_p->sys_task_qs) { c_p->sys_task_qs = dgc_tsk_qs; @@ -12017,7 +11230,7 @@ erts_set_gc_state(Process *c_p, int enable) erts_aint32_t aprio, state = #endif - erts_smp_atomic32_read_bset_nob(&c_p->state, + erts_atomic32_read_bset_nob(&c_p->state, (ERTS_PSFLG_DELAYED_SYS | ERTS_PSFLG_ACTIVE_SYS), ERTS_PSFLG_ACTIVE_SYS); @@ -12031,7 +11244,7 @@ erts_set_gc_state(Process *c_p, int enable) } #endif - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); (void) ERTS_PROC_SET_DELAYED_GC_TASK_QS(c_p, NULL); @@ -12047,24 +11260,24 @@ erts_sched_stat_modify(int what) int ix; switch (what) { case ERTS_SCHED_STAT_MODIFY_ENABLE: - erts_smp_thr_progress_block(); + erts_thr_progress_block(); erts_sched_stat.enabled = 1; - erts_smp_thr_progress_unblock(); + erts_thr_progress_unblock(); break; case ERTS_SCHED_STAT_MODIFY_DISABLE: - erts_smp_thr_progress_block(); + erts_thr_progress_block(); erts_sched_stat.enabled = 0; - erts_smp_thr_progress_unblock(); + erts_thr_progress_unblock(); break; case ERTS_SCHED_STAT_MODIFY_CLEAR: - erts_smp_spin_lock(&erts_sched_stat.lock); + erts_spin_lock(&erts_sched_stat.lock); for (ix = 0; ix < ERTS_NO_PRIO_LEVELS; ix++) { erts_sched_stat.prio[ix].total_executed = 0; erts_sched_stat.prio[ix].executed = 0; erts_sched_stat.prio[ix].total_migrated = 0; erts_sched_stat.prio[ix].migrated = 0; } - erts_smp_spin_unlock(&erts_sched_stat.lock); + erts_spin_unlock(&erts_sched_stat.lock); break; } } @@ -12078,7 +11291,7 @@ erts_sched_stat_term(Process *p, int total) Uint executed[ERTS_NO_PRIO_LEVELS]; Uint migrated[ERTS_NO_PRIO_LEVELS]; - erts_smp_spin_lock(&erts_sched_stat.lock); + erts_spin_lock(&erts_sched_stat.lock); if (total) { int i; for (i = 0; i < ERTS_NO_PRIO_LEVELS; i++) { @@ -12097,7 +11310,7 @@ erts_sched_stat_term(Process *p, int total) erts_sched_stat.prio[i].migrated = 0; } } - erts_smp_spin_unlock(&erts_sched_stat.lock); + erts_spin_unlock(&erts_sched_stat.lock); sz = 0; (void) erts_bld_atom_2uint_3tup_list(NULL, &sz, ERTS_NO_PRIO_LEVELS, @@ -12117,7 +11330,6 @@ erts_schedule_misc_op(void (*func)(void *), void *arg) ErtsSchedulerData *esdp = erts_get_scheduler_data(); ErtsRunQueue *rq = esdp ? esdp->run_queue : ERTS_RUNQ_IX(0); ErtsMiscOpList *molp = misc_op_list_alloc(); -#ifdef ERTS_SMP ErtsMigrationPaths *mpaths = erts_get_migration_paths(); if (!mpaths) @@ -12127,9 +11339,8 @@ erts_schedule_misc_op(void (*func)(void *), void *arg) if (erq) rq = erq; } -#endif - erts_smp_runq_lock(rq); + erts_runq_lock(rq); molp->next = NULL; molp->func = func; @@ -12140,13 +11351,11 @@ erts_schedule_misc_op(void (*func)(void *), void *arg) rq->misc.start = molp; rq->misc.end = molp; -#ifdef ERTS_SMP non_empty_runq(rq); -#endif ERTS_RUNQ_FLGS_SET_NOB(rq, ERTS_RUNQ_FLG_MISC_OP); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); smp_notify_inc_runq(rq); } @@ -12179,7 +11388,7 @@ exec_misc_ops(ErtsRunQueue *rq) if (!rq->misc.start) ERTS_RUNQ_FLGS_UNSET_NOB(rq, ERTS_RUNQ_FLG_MISC_OP); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); while (molp) { tmp_molp = molp; @@ -12188,7 +11397,7 @@ exec_misc_ops(ErtsRunQueue *rq) misc_op_list_free(tmp_molp); } - erts_smp_runq_lock(rq); + erts_runq_lock(rq); } Uint @@ -12219,12 +11428,12 @@ erts_get_exact_total_reductions(Process *c_p, Uint *redsp, Uint *diffp) { Uint reds = erts_current_reductions(c_p, c_p); int ix; - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); /* * Wait for other schedulers to schedule out their processes * and update 'reductions'. */ - erts_smp_thr_progress_block(); + erts_thr_progress_block(); for (reds = 0, ix = 0; ix < erts_no_run_queues; ix++) reds += ERTS_RUNQ_IX(ix)->procs.reductions; if (redsp) @@ -12232,8 +11441,8 @@ erts_get_exact_total_reductions(Process *c_p, Uint *redsp, Uint *diffp) if (diffp) *diffp = reds - last_exact_reductions; last_exact_reductions = reds; - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } static void delete_process(Process* p); @@ -12241,10 +11450,8 @@ static void delete_process(Process* p); void erts_free_proc(Process *p) { -#ifdef ERTS_SMP erts_proc_lock_fin(p); -#endif - ASSERT(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_FREE); + ASSERT(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_FREE); ASSERT(0 == erts_proc_read_refc(p)); if (p->flags & F_DELAYED_DEL_PROC) delete_process(p); @@ -12263,17 +11470,13 @@ static void early_init_process_struct(void *varg, Eterm data) Process *proc = arg->proc; proc->common.id = make_internal_pid(data); -#ifdef ERTS_DIRTY_SCHEDULERS - erts_smp_atomic32_init_nob(&proc->dirty_state, 0); + erts_atomic32_init_nob(&proc->dirty_state, 0); proc->dirty_sys_tasks = NULL; -#endif - erts_smp_atomic32_init_relb(&proc->state, arg->state); + erts_atomic32_init_relb(&proc->state, arg->state); -#ifdef ERTS_SMP RUNQ_SET_RQ(&proc->run_queue, arg->run_queue); erts_proc_lock_init(proc); /* All locks locked */ -#endif } @@ -12346,7 +11549,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). INITIALIZE_LITERAL_PURGE_AREA(litarea); #endif - erts_smp_proc_lock(parent, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(parent, ERTS_PROC_LOCKS_ALL_MINOR); /* * Check for errors. @@ -12394,9 +11597,9 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). goto error; } - ASSERT((erts_smp_atomic32_read_nob(&p->state) + ASSERT((erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_ON_HEAP_MSGQ) - || (erts_smp_atomic32_read_nob(&p->state) + || (erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ)); #ifdef SHCOPY_SPAWN @@ -12422,7 +11625,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->min_vheap_size = BIN_VH_MIN_SIZE; MAX_HEAP_SIZE_SET(p, H_MAX_SIZE); MAX_HEAP_SIZE_FLAGS_SET(p, H_MAX_FLAGS); - p->max_gen_gcs = (Uint16) erts_smp_atomic32_read_nob(&erts_max_gen_gcs); + p->max_gen_gcs = (Uint16) erts_atomic32_read_nob(&erts_max_gen_gcs); } p->schedule_count = 0; ASSERT(p->min_heap_size == erts_next_heap_size(p->min_heap_size, 0)); @@ -12448,10 +11651,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). #ifdef HIPE hipe_init_process(&p->hipe); -#ifdef ERTS_SMP hipe_init_process_smp(&p->hipe_smp); #endif -#endif p->heap = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP, sizeof(Eterm)*sz); p->old_hend = p->old_htop = p->old_heap = NULL; p->high_water = p->heap; @@ -12520,16 +11721,14 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->msg.save = &p->msg.first; p->msg.saved_last = &p->msg.first; p->msg.len = 0; -#ifdef ERTS_SMP p->msg_inq.first = NULL; p->msg_inq.last = &p->msg_inq.first; p->msg_inq.len = 0; -#endif p->bif_timers = NULL; p->mbuf = NULL; p->msg_frag = NULL; p->mbuf_sz = 0; - erts_smp_atomic_init_nob(&p->psd, (erts_aint_t) NULL); + erts_atomic_init_nob(&p->psd, (erts_aint_t) NULL); p->dictionary = NULL; p->seq_trace_lastcnt = 0; p->seq_trace_clock = 0; @@ -12547,14 +11746,12 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->last_old_htop = NULL; #endif -#ifdef ERTS_SMP p->trace_msg_q = NULL; p->scheduler_data = NULL; p->suspendee = NIL; p->pending_suspenders = NULL; p->pending_exit.reason = THE_NON_VALUE; p->pending_exit.bp = NULL; -#endif #if !defined(NO_FPE_SIGNALS) || defined(HIPE) p->fp_exception = 0; @@ -12582,8 +11779,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). } if (ARE_TRACE_FLAGS_ON(parent, F_TRACE_PROCS)) { locks &= ~(ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); - erts_smp_proc_unlock(parent, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(parent, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); trace_proc_spawn(parent, am_spawn, p->common.id, mod, func, args); if (so->flags & SPO_LINK) trace_proc(parent, locks, parent, am_link, p->common.id); @@ -12595,8 +11792,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). == (ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE)) { /* This happens when parent was not traced, but child is */ locks &= ~(ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); - erts_smp_proc_unlock(parent, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(parent, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); } trace_proc_spawn(p, am_spawned, parent->common.id, mod, func, args); if (so->flags & SPO_LINK) @@ -12635,7 +11832,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). so->mref = mref; } - erts_smp_proc_unlock(p, locks); + erts_proc_unlock(p, locks); res = p->common.id; @@ -12643,7 +11840,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). * Schedule process for execution. */ - erts_smp_proc_unlock(parent, locks & ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(parent, locks & ERTS_PROC_LOCKS_ALL_MINOR); schedule_process(p, state, 0); @@ -12663,7 +11860,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). error: - erts_smp_proc_unlock(parent, locks & ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(parent, locks & ERTS_PROC_LOCKS_ALL_MINOR); return res; } @@ -12714,7 +11911,7 @@ void erts_init_empty_process(Process *p) p->mbuf = NULL; p->msg_frag = NULL; p->mbuf_sz = 0; - erts_smp_atomic_init_nob(&p->psd, (erts_aint_t) NULL); + erts_atomic_init_nob(&p->psd, (erts_aint_t) NULL); ERTS_P_MONITORS(p) = NULL; ERTS_P_LINKS(p) = NULL; /* List of links */ p->nodes_monitors = NULL; @@ -12757,23 +11954,18 @@ void erts_init_empty_process(Process *p) #ifdef HIPE hipe_init_process(&p->hipe); -#ifdef ERTS_SMP hipe_init_process_smp(&p->hipe_smp); #endif -#endif INIT_HOLE_CHECK(p); #ifdef DEBUG p->last_old_htop = NULL; #endif -#ifdef ERTS_DIRTY_SCHEDULERS - erts_smp_atomic32_init_nob(&p->dirty_state, 0); + erts_atomic32_init_nob(&p->dirty_state, 0); p->dirty_sys_tasks = NULL; -#endif - erts_smp_atomic32_init_nob(&p->state, (erts_aint32_t) PRIORITY_NORMAL); + erts_atomic32_init_nob(&p->state, (erts_aint32_t) PRIORITY_NORMAL); -#ifdef ERTS_SMP p->scheduler_data = NULL; p->msg_inq.first = NULL; p->msg_inq.last = &p->msg_inq.first; @@ -12783,9 +11975,8 @@ void erts_init_empty_process(Process *p) p->pending_exit.reason = THE_NON_VALUE; p->pending_exit.bp = NULL; erts_proc_lock_init(p); - erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL); + erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL); RUNQ_SET_RQ(&p->run_queue, ERTS_RUNQ_IX(0)); -#endif #if !defined(NO_FPE_SIGNALS) || defined(HIPE) p->fp_exception = 0; @@ -12832,14 +12023,12 @@ erts_debug_verify_clean_empty_process(Process* p) ASSERT(p->parent == NIL); -#ifdef ERTS_SMP ASSERT(p->msg_inq.first == NULL); ASSERT(p->msg_inq.len == 0); ASSERT(p->suspendee == NIL); ASSERT(p->pending_suspenders == NULL); ASSERT(p->pending_exit.reason == THE_NON_VALUE); ASSERT(p->pending_exit.bp == NULL); -#endif /* Thing that erts_cleanup_empty_process() cleans up */ @@ -12864,9 +12053,7 @@ erts_cleanup_empty_process(Process* p) free_message_buffer(p->mbuf); p->mbuf = NULL; } -#ifdef ERTS_SMP erts_proc_lock_fin(p); -#endif #ifdef DEBUG erts_debug_verify_clean_empty_process(p); #endif @@ -12898,10 +12085,10 @@ delete_process(Process* p) /* Cleanup psd */ - psd = (ErtsPSD *) erts_smp_atomic_read_nob(&p->psd); + psd = (ErtsPSD *) erts_atomic_read_nob(&p->psd); if (psd) { - erts_smp_atomic_set_nob(&p->psd, (erts_aint_t) NULL); /* Reduction counting depends on this... */ + erts_atomic_set_nob(&p->psd, (erts_aint_t) NULL); /* Reduction counting depends on this... */ erts_free(ERTS_ALC_T_PSD, psd); } @@ -12959,7 +12146,7 @@ set_proc_exiting(Process *p, { erts_aint32_t state = in_state, enq_prio = -1; int enqueue; - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(p) == ERTS_PROC_LOCKS_ALL); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(p) == ERTS_PROC_LOCKS_ALL); enqueue = change_proc_schedule_state(p, (ERTS_PSFLG_SUSPENDED @@ -12982,22 +12169,6 @@ set_proc_exiting(Process *p, KILL_CATCHES(p); p->i = (BeamInstr *) beam_exit; -#ifndef ERTS_SMP - if (state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS) - && !(state & ERTS_PSFLG_GC)) { - /* - * I non smp case: - * - * Currently executing process might be sent an exit - * signal if it is traced by a port that it also is - * linked to, and the port terminates during the - * trace. In this case we want schedule out the - * process as quickly as possible in order to detect - * the event as fast as possible. - */ - ERTS_VBUMP_ALL_REDS(p); - } -#endif add2runq(enqueue, enq_prio, p, state, NULL); } @@ -13010,9 +12181,9 @@ set_proc_self_exiting(Process *c_p) #endif erts_aint32_t state, enq_prio = -1; - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCKS_ALL); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCKS_ALL); - state = erts_smp_atomic32_read_nob(&c_p->state); + state = erts_atomic32_read_nob(&c_p->state); ASSERT(state & (ERTS_PSFLG_RUNNING |ERTS_PSFLG_RUNNING_SYS | ERTS_PSFLG_DIRTY_RUNNING @@ -13032,38 +12203,37 @@ set_proc_self_exiting(Process *c_p) return state; } -#ifdef ERTS_SMP void erts_handle_pending_exit(Process *c_p, ErtsProcLocks locks) { ErtsProcLocks xlocks; ASSERT(is_value(c_p->pending_exit.reason)); - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == locks); - ERTS_SMP_LC_ASSERT(locks & ERTS_PROC_LOCK_MAIN); - ERTS_SMP_LC_ASSERT(!((ERTS_PSFLG_EXITING|ERTS_PSFLG_FREE) - & erts_smp_atomic32_read_nob(&c_p->state))); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == locks); + ERTS_LC_ASSERT(locks & ERTS_PROC_LOCK_MAIN); + ERTS_LC_ASSERT(!((ERTS_PSFLG_EXITING|ERTS_PSFLG_FREE) + & erts_atomic32_read_nob(&c_p->state))); /* Ensure that all locks on c_p are locked before proceeding... */ if (locks == ERTS_PROC_LOCKS_ALL) xlocks = 0; else { xlocks = ~locks & ERTS_PROC_LOCKS_ALL; - if (erts_smp_proc_trylock(c_p, xlocks) == EBUSY) { - erts_smp_proc_unlock(c_p, locks & ~ERTS_PROC_LOCK_MAIN); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + if (erts_proc_trylock(c_p, xlocks) == EBUSY) { + erts_proc_unlock(c_p, locks & ~ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); } } set_proc_exiting(c_p, - erts_smp_atomic32_read_acqb(&c_p->state), + erts_atomic32_read_acqb(&c_p->state), c_p->pending_exit.reason, c_p->pending_exit.bp); c_p->pending_exit.reason = THE_NON_VALUE; c_p->pending_exit.bp = NULL; if (xlocks) - erts_smp_proc_unlock(c_p, xlocks); + erts_proc_unlock(c_p, xlocks); } static void save_pending_exiter(Process *p, ErtsProcList *plp); @@ -13084,9 +12254,9 @@ do_handle_pending_exiters(ErtsProcList *pnd_xtrs) * pending exit will soon be detected and handled by the * scheduler running the process (at schedule in/out). */ - if (erts_smp_proc_trylock(p, ERTS_PROC_LOCKS_ALL) != EBUSY) { + if (erts_proc_trylock(p, ERTS_PROC_LOCKS_ALL) != EBUSY) { if (erts_proclist_same(plp, p)) { - state = erts_smp_atomic32_read_acqb(&p->state); + state = erts_atomic32_read_acqb(&p->state); if (!(state & (ERTS_PSFLG_RUNNING | ERTS_PSFLG_RUNNING_SYS | ERTS_PSFLG_EXITING))) { @@ -13094,12 +12264,12 @@ do_handle_pending_exiters(ErtsProcList *pnd_xtrs) erts_handle_pending_exit(p, ERTS_PROC_LOCKS_ALL); } } - erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL); + erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL); } else { - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); if (erts_proclist_same(plp, p)) { - state = erts_smp_atomic32_read_acqb(&p->state); + state = erts_atomic32_read_acqb(&p->state); if (!(state & (ERTS_PSFLG_RUNNING | ERTS_PSFLG_RUNNING_SYS | ERTS_PSFLG_EXITING))) { @@ -13111,7 +12281,7 @@ do_handle_pending_exiters(ErtsProcList *pnd_xtrs) plp = NULL; } } - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); } } if (plp) @@ -13126,7 +12296,7 @@ save_pending_exiter(Process *p, ErtsProcList *plp) ErtsSchedulerSleepInfo *ssi; ErtsRunQueue *rq; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); rq = RUNQ_READ_RQ(&p->run_queue); ASSERT(rq && !ERTS_RUNQ_IX_IS_DIRTY(rq->ix)); @@ -13134,7 +12304,7 @@ save_pending_exiter(Process *p, ErtsProcList *plp) if (!plp) plp = proclist_create(p); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); erts_proclist_store_last(&rq->procs.pending_exiters, plp); @@ -13142,12 +12312,11 @@ save_pending_exiter(Process *p, ErtsProcList *plp) ssi = rq->scheduler->ssi; - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); set_aux_work_flags_wakeup_nob(ssi, ERTS_SSI_AUX_WORK_PENDING_EXITERS); } -#endif /* * This function delivers an EXIT message to a process @@ -13283,11 +12452,11 @@ send_exit_signal(Process *c_p, /* current process if and only Uint32 flags /* flags */ ) { - erts_aint32_t state = erts_smp_atomic32_read_nob(&rp->state); + erts_aint32_t state = erts_atomic32_read_nob(&rp->state); Eterm rsn = reason == am_kill ? am_killed : reason; - ERTS_SMP_LC_ASSERT(*rp_locks == erts_proc_lc_my_proc_locks(rp)); - ERTS_SMP_LC_ASSERT((*rp_locks & ERTS_PROC_LOCKS_XSIG_SEND) + ERTS_LC_ASSERT(*rp_locks == erts_proc_lc_my_proc_locks(rp)); + ERTS_LC_ASSERT((*rp_locks & ERTS_PROC_LOCKS_XSIG_SEND) == ERTS_PROC_LOCKS_XSIG_SEND); ASSERT(reason != THE_NON_VALUE); @@ -13308,7 +12477,7 @@ send_exit_signal(Process *c_p, /* current process if and only if ((state & ERTS_PSFLG_TRAP_EXIT) && (reason != am_kill || (flags & ERTS_XSIG_FLG_IGN_KILL))) { /* have to release the status lock in order to send the exit message */ - erts_smp_proc_unlock(rp, *rp_locks & ERTS_PROC_LOCKS_XSIG_SEND); + erts_proc_unlock(rp, *rp_locks & ERTS_PROC_LOCKS_XSIG_SEND); *rp_locks &= ~ERTS_PROC_LOCKS_XSIG_SEND; if (have_seqtrace(token) && token_update) seq_trace_update_send(token_update); @@ -13319,7 +12488,6 @@ send_exit_signal(Process *c_p, /* current process if and only return 1; /* Receiver will get a message */ } else if (reason != am_normal || (flags & ERTS_XSIG_FLG_NO_IGN_NORMAL)) { -#ifdef ERTS_SMP if (!(state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT))) { ASSERT(!rp->pending_exit.bp); @@ -13329,10 +12497,10 @@ send_exit_signal(Process *c_p, /* current process if and only if (*rp_locks != ERTS_PROC_LOCKS_ALL) { ErtsProcLocks need_locks = (~(*rp_locks) & ERTS_PROC_LOCKS_ALL); - if (erts_smp_proc_trylock(c_p, need_locks) == EBUSY) { - erts_smp_proc_unlock(c_p, + if (erts_proc_trylock(c_p, need_locks) == EBUSY) { + erts_proc_unlock(c_p, *rp_locks & ~ERTS_PROC_LOCK_MAIN); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); } *rp_locks = ERTS_PROC_LOCKS_ALL; } @@ -13346,7 +12514,7 @@ send_exit_signal(Process *c_p, /* current process if and only ErlHeapFragment *bp = NULL; Eterm rsn_cpy; if (need_locks - && erts_smp_proc_trylock(rp, need_locks) == EBUSY) { + && erts_proc_trylock(rp, need_locks) == EBUSY) { /* ... but we havn't got all locks on it ... */ save_pending_exiter(rp, NULL); /* @@ -13358,7 +12526,7 @@ send_exit_signal(Process *c_p, /* current process if and only /* ...and we have all locks on it... */ *rp_locks = ERTS_PROC_LOCKS_ALL; - state = erts_smp_atomic32_read_nob(&rp->state); + state = erts_atomic32_read_nob(&rp->state); if (is_immed(rsn)) rsn_cpy = rsn; @@ -13366,14 +12534,12 @@ send_exit_signal(Process *c_p, /* current process if and only Eterm *hp; ErlOffHeap *ohp; Uint rsn_sz = size_object(rsn); -#ifdef ERTS_DIRTY_SCHEDULERS if (state & ERTS_PSFLG_DIRTY_RUNNING) { bp = new_message_buffer(rsn_sz); ohp = &bp->off_heap; hp = &bp->mem[0]; } else -#endif { hp = HAlloc(rp, rsn_sz); ohp = &rp->off_heap; @@ -13413,12 +12579,8 @@ send_exit_signal(Process *c_p, /* current process if and only * has been scheduled, we may need to add it to a normal run * queue... */ -#ifndef ERTS_DIRTY_SCHEDULERS - (void) erts_smp_atomic32_read_bor_relb(&rp->state, - ERTS_PSFLG_PENDING_EXIT); -#else { - erts_aint32_t a = erts_smp_atomic32_read_nob(&rp->state); + erts_aint32_t a = erts_atomic32_read_nob(&rp->state); while (1) { erts_aint32_t n, e; int dwork; @@ -13426,7 +12588,7 @@ send_exit_signal(Process *c_p, /* current process if and only n |= ERTS_PSFLG_PENDING_EXIT; dwork = !!(n & ERTS_PSFLGS_DIRTY_WORK); n &= ~ERTS_PSFLGS_DIRTY_WORK; - a = erts_smp_atomic32_cmpxchg_mb(&rp->state, n, e); + a = erts_atomic32_cmpxchg_mb(&rp->state, n, e); if (a == e) { if (dwork) erts_schedule_process(rp, n, *rp_locks); @@ -13434,7 +12596,6 @@ send_exit_signal(Process *c_p, /* current process if and only } } } -#endif } } /* else: @@ -13446,17 +12607,6 @@ send_exit_signal(Process *c_p, /* current process if and only * that the receiver *will* exit; either on the pending * exit or by itself before seeing the pending exit. */ -#else /* !ERTS_SMP */ - erts_aint32_t state = erts_smp_atomic32_read_nob(&rp->state); - if (!(state & ERTS_PSFLG_EXITING)) { - set_proc_exiting(rp, - state, - (is_immed(rsn) || c_p == rp - ? rsn - : copy_object(rsn, rp)), - NULL); - } -#endif return -1; /* Receiver will exit */ } @@ -13504,9 +12654,9 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext) ASSERT(is_node_name_atom(mon->u.pid)); dep = erts_sysname_to_connected_dist_entry(mon->u.pid); if (dep) { - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); rmon = erts_remove_monitor(&(dep->monitors), mon->ref); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); if (rmon) { ErtsDSigData dsd; int code = erts_dsig_prepare(&dsd, dep, NULL, @@ -13521,7 +12671,6 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext) } erts_destroy_monitor(rmon); } - erts_deref_dist_entry(dep); } } else { ASSERT(is_pid(mon->u.pid) || is_port(mon->u.pid)); @@ -13532,7 +12681,7 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext) goto done; } rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); if (rmon == NULL) { goto done; } @@ -13551,9 +12700,9 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext) dep = external_pid_dist_entry(mon->u.pid); ASSERT(dep != NULL); if (dep) { - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); rmon = erts_remove_monitor(&(dep->monitors), mon->ref); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); if (rmon) { ErtsDSigData dsd; int code = erts_dsig_prepare(&dsd, dep, NULL, @@ -13604,15 +12753,15 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext) } UnUseTmpHeapNoproc(3); /* else: demonitor while we exited, i.e. do nothing... */ - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } else { /* external by pid or name */ ASSERT(is_external_pid(mon->u.pid)); dep = external_pid_dist_entry(mon->u.pid); ASSERT(dep != NULL); if (dep) { - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); rmon = erts_remove_monitor(&(dep->monitors), mon->ref); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); if (rmon) { ErtsDSigData dsd; int code = erts_dsig_prepare(&dsd, dep, NULL, @@ -13720,7 +12869,7 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext) /* We didn't exit the process and it is traced */ if (IS_TRACED_FL(rp, F_TRACE_PROCS)) { if (rp_locks & ERTS_PROC_LOCKS_XSIG_SEND) { - erts_smp_proc_unlock(rp, ERTS_PROC_LOCKS_XSIG_SEND); + erts_proc_unlock(rp, ERTS_PROC_LOCKS_XSIG_SEND); rp_locks &= ~ERTS_PROC_LOCKS_XSIG_SEND; } trace_proc(NULL, 0, rp, am_getting_unlinked, p->common.id); @@ -13728,7 +12877,7 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext) } } ASSERT(rp != p); - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } } else if (is_external_pid(item)) { @@ -13738,14 +12887,14 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext) int code; ErtsDistLinkData dld; erts_remove_dist_link(&dld, p->common.id, item, dep); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); code = erts_dsig_prepare(&dsd, dep, p, ERTS_DSP_NO_LOCK, 0); if (code == ERTS_DSIG_PREP_CONNECTED) { code = erts_dsig_send_exit_tt(&dsd, p->common.id, item, reason, SEQ_TRACE_TOKEN(p)); ASSERT(code == ERTS_DSIG_SEND_OK); } - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN); erts_destroy_dist_link(&dld); } } @@ -13756,12 +12905,11 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext) if(dep) { /* dist entries have node links in a separate structure to avoid confusion */ - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); rlnk = erts_remove_link(&(dep->node_links), p->common.id); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); if (rlnk) erts_destroy_link(rlnk); - erts_deref_dist_entry(dep); } break; @@ -13781,7 +12929,7 @@ resume_suspend_monitor(ErtsSuspendMonitor *smon, void *vc_p) ASSERT(suspendee != vc_p); if (smon->active) resume_process(suspendee, ERTS_PROC_LOCK_STATUS); - erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); } erts_destroy_suspend_monitor(smon); } @@ -13810,18 +12958,13 @@ erts_do_exit_process(Process* p, Eterm reason) erts_exit(ERTS_DUMP_EXIT, "System process %T terminated: %T\n", p->common.id, reason); -#ifdef ERTS_SMP - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); /* By locking all locks (main lock is already locked) when going to exiting state (ERTS_PSFLG_EXITING), it is enough to take any lock when looking up a process (erts_pid2proc()) to prevent the looked up process from exiting until the lock has been released. */ - erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); -#endif + erts_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); -#ifndef ERTS_SMP - set_proc_self_exiting(p); -#else if (ERTS_PSFLG_PENDING_EXIT & set_proc_self_exiting(p)) { /* Process exited before pending exit was received... */ p->pending_exit.reason = THE_NON_VALUE; @@ -13833,8 +12976,7 @@ erts_do_exit_process(Process* p, Eterm reason) cancel_suspend_of_suspendee(p, ERTS_PROC_LOCKS_ALL); - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p); -#endif + ERTS_MSGQ_MV_INQ2PRIVQ(p); if (IS_TRACED(p)) { if (IS_TRACED_FL(p, F_TRACE_CALLS)) @@ -13853,7 +12995,7 @@ erts_do_exit_process(Process* p, Eterm reason) ASSERT(erts_proc_read_refc(p) > 0); } - erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR); if (IS_TRACED_FL(p,F_TRACE_PROCS)) trace_proc(p, ERTS_PROC_LOCK_MAIN, p, am_exit, reason); @@ -13875,7 +13017,7 @@ erts_continue_exit_process(Process *p) ErtsMonitor *mon; ErtsProcLocks curr_locks = ERTS_PROC_LOCK_MAIN; Eterm reason = p->fvalue; - DistEntry *dep; + DistEntry *dep = NULL; erts_aint32_t state; int delay_del_proc = 0; @@ -13883,7 +13025,7 @@ erts_continue_exit_process(Process *p) int yield_allowed = 1; #endif - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p)); ASSERT(ERTS_PROC_IS_EXITING(p)); @@ -13897,7 +13039,6 @@ erts_continue_exit_process(Process *p) p->bif_timers = NULL; } -#ifdef ERTS_SMP if (p->flags & F_SCHDLR_ONLN_WAITQ) abort_sched_onln_chng_waitq(p); @@ -13941,7 +13082,6 @@ erts_continue_exit_process(Process *p) __FILE__, __LINE__, (int) ssr); } } -#endif if (p->flags & F_USING_DB) { if (erts_db_process_exiting(p, ERTS_PROC_LOCK_MAIN)) @@ -13950,24 +13090,20 @@ erts_continue_exit_process(Process *p) } erts_set_gc_state(p, 1); - state = erts_smp_atomic32_read_acqb(&p->state); + state = erts_atomic32_read_acqb(&p->state); if (state & ERTS_PSFLG_ACTIVE_SYS -#ifdef ERTS_DIRTY_SCHEDULERS || p->dirty_sys_tasks -#endif ) { if (cleanup_sys_tasks(p, state, CONTEXT_REDS) >= CONTEXT_REDS/2) goto yield; } #ifdef DEBUG - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); ASSERT(p->sys_task_qs == NULL); ASSERT(ERTS_PROC_GET_DELAYED_GC_TASK_QS(p) == NULL); -#ifdef ERTS_DIRTY_SCHEDULERS ASSERT(p->dirty_sys_tasks == NULL); -#endif - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); #endif if (p->flags & F_USING_DDLL) { @@ -14000,7 +13136,7 @@ erts_continue_exit_process(Process *p) if (IS_TRACED_FL(p, F_TRACE_SCHED_EXIT)) trace_sched(p, curr_locks, am_out_exited); - erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); curr_locks = ERTS_PROC_LOCKS_ALL; /* @@ -14023,23 +13159,19 @@ erts_continue_exit_process(Process *p) ErtsRunQueue *rq; rq = erts_get_runq_current(erts_proc_sched_data(p)); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); -#ifdef ERTS_SMP ASSERT(p->scheduler_data); ASSERT(p->scheduler_data->current_process == p); ASSERT(p->scheduler_data->free_process == NULL); p->scheduler_data->current_process = NULL; p->scheduler_data->free_process = p; -#else - erts_proc_inc_refc(p); /* Decremented in schedule() */ -#endif /* Time of death! */ erts_ptab_delete_element(&erts_proc, &p->common); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); } /* @@ -14051,7 +13183,7 @@ erts_continue_exit_process(Process *p) { /* Inactivate and notify free */ - erts_aint32_t n, e, a = erts_smp_atomic32_read_nob(&p->state); + erts_aint32_t n, e, a = erts_atomic32_read_nob(&p->state); int refc_inced = 0; while (1) { n = e = a; @@ -14062,12 +13194,11 @@ erts_continue_exit_process(Process *p) erts_proc_inc_refc(p); refc_inced = 1; } - a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); + a = erts_atomic32_cmpxchg_mb(&p->state, n, e); if (a == e) break; } -#ifdef ERTS_DIRTY_SCHEDULERS if (a & (ERTS_PSFLG_DIRTY_RUNNING | ERTS_PSFLG_DIRTY_RUNNING_SYS)) { p->flags |= F_DELAYED_DEL_PROC; @@ -14077,18 +13208,20 @@ erts_continue_exit_process(Process *p) * when done with the process... */ } -#endif if (refc_inced && !(n & ERTS_PSFLG_IN_RUNQ)) erts_proc_dec_refc(p); } - - dep = (p->flags & F_DISTRIBUTION) ? erts_this_dist_entry : NULL; - erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL); + dep = ((p->flags & F_DISTRIBUTION) + ? ERTS_PROC_SET_DIST_ENTRY(p, NULL) + : NULL); + + erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL); if (dep) { - erts_do_net_exits(dep, reason); + erts_do_net_exits(dep, (reason == am_kill) ? am_killed : reason); + erts_deref_dist_entry(dep); } /* @@ -14120,12 +13253,10 @@ erts_continue_exit_process(Process *p) have none here */ } - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); -#ifdef ERTS_SMP erts_flush_trace_messages(p, ERTS_PROC_LOCK_MAIN); -#endif ERTS_TRACER_CLEAR(&ERTS_TRACER(p)); @@ -14140,20 +13271,20 @@ erts_continue_exit_process(Process *p) ASSERT(yield_allowed); #endif - ERTS_SMP_LC_ASSERT(curr_locks == erts_proc_lc_my_proc_locks(p)); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & curr_locks); + ERTS_LC_ASSERT(curr_locks == erts_proc_lc_my_proc_locks(p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & curr_locks); p->i = (BeamInstr *) beam_continue_exit; if (!(curr_locks & ERTS_PROC_LOCK_STATUS)) { - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); curr_locks |= ERTS_PROC_LOCK_STATUS; } if (curr_locks != ERTS_PROC_LOCK_MAIN) - erts_smp_proc_unlock(p, ~ERTS_PROC_LOCK_MAIN & curr_locks); + erts_proc_unlock(p, ~ERTS_PROC_LOCK_MAIN & curr_locks); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p)); BUMP_ALL_REDS(p); } @@ -14189,7 +13320,7 @@ erts_program_counter_info(fmtfn_t to, void *to_arg, Process *p) erts_print(to, to_arg, "CP: %p (", p->cp); print_function_from_pc(to, to_arg, p->cp); erts_print(to, to_arg, ")\n"); - state = erts_smp_atomic32_read_acqb(&p->state); + state = erts_atomic32_read_acqb(&p->state); if (!(state & (ERTS_PSFLG_RUNNING | ERTS_PSFLG_RUNNING_SYS | ERTS_PSFLG_GC))) { @@ -14270,8 +13401,7 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) { erts_print(to, to_arg, "=scheduler:%u\n", esdp->no); -#ifdef ERTS_SMP - flg = erts_smp_atomic32_read_dirty(&esdp->ssi->flags); + flg = erts_atomic32_read_dirty(&esdp->ssi->flags); erts_print(to, to_arg, "Scheduler Sleep Info Flags: "); for (i = 0; i < ERTS_SSI_FLGS_MAX && flg; i++) { erts_aint32_t chk = (1 << i); @@ -14298,7 +13428,6 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) { } } erts_print(to, to_arg, "\n"); -#endif flg = erts_atomic32_read_dirty(&esdp->ssi->aux_work); erts_print(to, to_arg, "Scheduler Sleep Info Aux Work: "); @@ -14341,12 +13470,12 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) { break; } erts_print(to, to_arg, "Length: %d\n", - erts_smp_atomic32_read_dirty(&esdp->run_queue->procs.prio_info[i].len)); + erts_atomic32_read_dirty(&esdp->run_queue->procs.prio_info[i].len)); } erts_print(to, to_arg, "Run Queue Port Length: %d\n", - erts_smp_atomic32_read_dirty(&esdp->run_queue->ports.info.len)); + erts_atomic32_read_dirty(&esdp->run_queue->ports.info.len)); - flg = erts_smp_atomic32_read_dirty(&esdp->run_queue->flags); + flg = erts_atomic32_read_dirty(&esdp->run_queue->flags); erts_print(to, to_arg, "Run Queue Flags: "); for (i = 0; i < ERTS_RUNQ_FLG_MAX && flg; i++) { erts_aint32_t chk = (1 << i); @@ -14418,7 +13547,7 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) { p = esdp->current_process; erts_print(to, to_arg, "Current Process: "); if (esdp->current_process && !(ERTS_TRACE_FLAGS(p) & F_SENSITIVE)) { - flg = erts_smp_atomic32_read_dirty(&p->state); + flg = erts_atomic32_read_dirty(&p->state); erts_print(to, to_arg, "%T\n", p->common.id); erts_print(to, to_arg, "Current Process State: "); @@ -14468,19 +13597,17 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) { */ void erts_halt(int code) { - if (-1 == erts_smp_atomic32_cmpxchg_acqb(&erts_halt_progress, + if (-1 == erts_atomic32_cmpxchg_acqb(&erts_halt_progress, erts_no_schedulers, -1)) { -#ifdef ERTS_DIRTY_SCHEDULERS ERTS_RUNQ_FLGS_SET(ERTS_DIRTY_CPU_RUNQ, ERTS_RUNQ_FLG_HALTING); ERTS_RUNQ_FLGS_SET(ERTS_DIRTY_IO_RUNQ, ERTS_RUNQ_FLG_HALTING); -#endif erts_halt_code = code; notify_reap_ports_relb(); } } -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) int erts_dbg_check_halloc_lock(Process *p) { @@ -14500,3 +13627,24 @@ erts_dbg_check_halloc_lock(Process *p) return 0; } #endif + +void +erts_debug_later_op_foreach(void (*callback)(void*), + void (*func)(void *, ErtsThrPrgrVal, void *), + void *arg) +{ + int six; + if (!erts_thr_progress_is_blocking()) + ERTS_INTERNAL_ERROR("Not blocking thread progress"); + + for (six = 0; six < erts_no_schedulers; six++) { + ErtsSchedulerData *esdp = &erts_aligned_scheduler_data[six].esd; + ErtsThrPrgrLaterOp *lop = esdp->aux_work_data.later_op.first; + + while (lop) { + if (lop->func == callback) + func(arg, lop->later, lop->data); + lop = lop->next; + } + } +} diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h index 639818c20c..66d7848f89 100644 --- a/erts/emulator/beam/erl_process.h +++ b/erts/emulator/beam/erl_process.h @@ -47,7 +47,6 @@ typedef struct process Process; #include "erl_port.h" #undef ERL_PORT_GET_PORT_TYPE_ONLY__ #include "erl_vm.h" -#include "erl_smp.h" #include "erl_message.h" #include "erl_process_dict.h" #include "erl_node_container_utils.h" @@ -106,27 +105,20 @@ struct saved_calls { }; extern Export exp_send, exp_receive, exp_timeout; -extern int erts_eager_check_io; extern int erts_sched_compact_load; extern int erts_sched_balance_util; extern Uint erts_no_schedulers; extern Uint erts_no_total_schedulers; -#ifdef ERTS_DIRTY_SCHEDULERS extern Uint erts_no_dirty_cpu_schedulers; extern Uint erts_no_dirty_io_schedulers; -#endif extern Uint erts_no_run_queues; extern int erts_sched_thread_suggested_stack_size; -#ifdef ERTS_DIRTY_SCHEDULERS extern int erts_dcpu_sched_thread_suggested_stack_size; extern int erts_dio_sched_thread_suggested_stack_size; -#endif #define ERTS_SCHED_THREAD_MIN_STACK_SIZE 20 /* Kilo words */ #define ERTS_SCHED_THREAD_MAX_STACK_SIZE 8192 /* Kilo words */ -#ifdef ERTS_SMP #include "erl_bits.h" -#endif /* process priorities */ #define PRIORITY_MAX 0 @@ -224,31 +216,31 @@ extern int erts_dio_sched_thread_suggested_stack_size; ((FLGS) &= ~ERTS_RUNQ_FLG_EVACUATE((PRIO))) #define ERTS_RUNQ_FLGS_INIT(RQ, INIT) \ - erts_smp_atomic32_init_nob(&(RQ)->flags, (erts_aint32_t) (INIT)) + erts_atomic32_init_nob(&(RQ)->flags, (erts_aint32_t) (INIT)) #define ERTS_RUNQ_FLGS_SET(RQ, FLGS) \ - ((Uint32) erts_smp_atomic32_read_bor_relb(&(RQ)->flags, \ + ((Uint32) erts_atomic32_read_bor_relb(&(RQ)->flags, \ (erts_aint32_t) (FLGS))) #define ERTS_RUNQ_FLGS_SET_NOB(RQ, FLGS) \ - ((Uint32) erts_smp_atomic32_read_bor_nob(&(RQ)->flags, \ + ((Uint32) erts_atomic32_read_bor_nob(&(RQ)->flags, \ (erts_aint32_t) (FLGS))) #define ERTS_RUNQ_FLGS_BSET(RQ, MSK, FLGS) \ - ((Uint32) erts_smp_atomic32_read_bset_relb(&(RQ)->flags, \ + ((Uint32) erts_atomic32_read_bset_relb(&(RQ)->flags, \ (erts_aint32_t) (MSK), \ (erts_aint32_t) (FLGS))) #define ERTS_RUNQ_FLGS_UNSET(RQ, FLGS) \ - ((Uint32) erts_smp_atomic32_read_band_relb(&(RQ)->flags, \ + ((Uint32) erts_atomic32_read_band_relb(&(RQ)->flags, \ (erts_aint32_t) ~(FLGS))) #define ERTS_RUNQ_FLGS_UNSET_NOB(RQ, FLGS) \ - ((Uint32) erts_smp_atomic32_read_band_nob(&(RQ)->flags, \ + ((Uint32) erts_atomic32_read_band_nob(&(RQ)->flags, \ (erts_aint32_t) ~(FLGS))) #define ERTS_RUNQ_FLGS_GET(RQ) \ - ((Uint32) erts_smp_atomic32_read_acqb(&(RQ)->flags)) + ((Uint32) erts_atomic32_read_acqb(&(RQ)->flags)) #define ERTS_RUNQ_FLGS_GET_NOB(RQ) \ - ((Uint32) erts_smp_atomic32_read_nob(&(RQ)->flags)) + ((Uint32) erts_atomic32_read_nob(&(RQ)->flags)) #define ERTS_RUNQ_FLGS_GET_MB(RQ) \ - ((Uint32) erts_smp_atomic32_read_mb(&(RQ)->flags)) + ((Uint32) erts_atomic32_read_mb(&(RQ)->flags)) #define ERTS_RUNQ_FLGS_READ_BSET(RQ, MSK, FLGS) \ - ((Uint32) erts_smp_atomic32_read_bset_relb(&(RQ)->flags, \ + ((Uint32) erts_atomic32_read_bset_relb(&(RQ)->flags, \ (erts_aint32_t) (MSK), \ (erts_aint32_t) (FLGS))) @@ -365,20 +357,18 @@ typedef enum { typedef struct ErtsSchedulerSleepInfo_ ErtsSchedulerSleepInfo; -#ifdef ERTS_DIRTY_SCHEDULERS typedef struct { - erts_smp_spinlock_t lock; + erts_spinlock_t lock; ErtsSchedulerSleepInfo *list; } ErtsSchedulerSleepList; -#endif struct ErtsSchedulerSleepInfo_ { -#ifdef ERTS_SMP + struct ErtsSchedulerData_ *esdp; ErtsSchedulerSleepInfo *next; ErtsSchedulerSleepInfo *prev; - erts_smp_atomic32_t flags; + erts_atomic32_t flags; erts_tse_t *event; -#endif + struct erts_poll_thread *psi; erts_atomic32_t aux_work; }; @@ -422,7 +412,7 @@ typedef struct ErtsSchedulerData_ ErtsSchedulerData; typedef struct ErtsRunQueue_ ErtsRunQueue; typedef struct { - erts_smp_atomic32_t len; + erts_atomic32_t len; erts_aint32_t max_len; int reds; } ErtsRunQueueInfo; @@ -433,7 +423,6 @@ typedef struct { # define ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT 1 #endif -#ifdef ERTS_SMP #undef ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT #define ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT @@ -476,30 +465,25 @@ struct ErtsMigrationPaths_ { ErtsMigrationPath mpath[1]; }; -#endif /* ERTS_SMP */ struct ErtsRunQueue_ { int ix; - erts_smp_mtx_t mtx; - erts_smp_cnd_t cnd; + erts_mtx_t mtx; + erts_cnd_t cnd; -#ifdef ERTS_DIRTY_SCHEDULERS -#ifdef ERTS_SMP ErtsSchedulerSleepList sleepers; -#endif -#endif ErtsSchedulerData *scheduler; int waiting; /* < 0 in sys schedule; > 0 on cnd variable */ int woken; - erts_smp_atomic32_t flags; + erts_atomic32_t flags; int check_balance_reds; int full_reds_history_sum; int full_reds_history[ERTS_FULL_REDS_HISTORY_SIZE]; int out_of_work_count; erts_aint32_t max_len; - erts_smp_atomic32_t len; + erts_atomic32_t len; int wakeup_other; int wakeup_other_reds; @@ -518,7 +502,7 @@ struct ErtsRunQueue_ { struct { ErtsMiscOpList *start; ErtsMiscOpList *end; - erts_smp_atomic_t evac_runq; + erts_atomic_t evac_runq; } misc; struct { @@ -531,9 +515,7 @@ struct ErtsRunQueue_ { #endif }; -#ifdef ERTS_SMP extern long erts_runq_supervision_interval; -#endif typedef union { ErtsRunQueue runq; @@ -581,17 +563,12 @@ typedef struct { int sched_id; ErtsSchedulerData *esdp; ErtsSchedulerSleepInfo *ssi; -#ifdef ERTS_SMP ErtsThrPrgrVal current_thr_prgr; ErtsThrPrgrVal latest_wakeup; -#endif struct { int ix; -#ifdef ERTS_SMP ErtsThrPrgrVal thr_prgr; -#endif } misc; -#ifdef ERTS_SMP struct { ErtsThrPrgrVal thr_prgr; } dd; @@ -604,24 +581,17 @@ typedef struct { ErtsThrPrgrLaterOp *first; ErtsThrPrgrLaterOp *last; } later_op; -#endif -#ifdef ERTS_USE_ASYNC_READY_Q struct { -#ifdef ERTS_SMP int need_thr_prgr; ErtsThrPrgrVal thr_prgr; -#endif void *queue; } async_ready; -#endif -#ifdef ERTS_SMP struct { Uint64 next; int *sched2jix; int jix; ErtsDelayedAuxWorkWakeupJob *job; } delayed_wakeup; -#endif struct { ErtsEtsAllYieldData ets_all; /* Other yielding operations... */ @@ -639,13 +609,11 @@ typedef struct { (&(ESDP)->aux_work_data.yield.NAME) void erts_notify_new_aux_yield_work(ErtsSchedulerData *esdp); -#ifdef ERTS_DIRTY_SCHEDULERS typedef enum { ERTS_DIRTY_CPU_SCHEDULER, ERTS_DIRTY_IO_SCHEDULER } ErtsDirtySchedulerType; -#endif struct ErtsSchedulerData_ { /* @@ -659,21 +627,17 @@ struct ErtsSchedulerData_ { ErtsTimerWheel *timer_wheel; ErtsNextTimeoutRef next_tmo_ref; ErtsHLTimerService *timer_service; -#ifdef ERTS_SMP ethr_tid tid; /* Thread id */ struct erl_bits_state erl_bits_state; /* erl_bits.c state */ void *match_pseudo_process; /* erl_db_util.c:db_prog_match() */ Process *free_process; ErtsThrPrgrData thr_progress_data; -#endif ErtsSchedulerSleepInfo *ssi; Process *current_process; ErtsSchedType type; Uint no; /* Scheduler number for normal schedulers */ -#ifdef ERTS_DIRTY_SCHEDULERS Uint dirty_no; /* Scheduler number for dirty schedulers */ Process *dirty_shadow_process; -#endif Port *current_port; ErtsRunQueue *run_queue; int virtual_reds; @@ -712,25 +676,23 @@ typedef union { } ErtsAlignedSchedulerData; extern ErtsAlignedSchedulerData *erts_aligned_scheduler_data; -#ifdef ERTS_DIRTY_SCHEDULERS extern ErtsAlignedSchedulerData *erts_aligned_dirty_cpu_scheduler_data; extern ErtsAlignedSchedulerData *erts_aligned_dirty_io_scheduler_data; -#endif -#ifndef ERTS_SMP -extern ErtsSchedulerData *erts_scheduler_data; -#endif -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) -int erts_smp_lc_runq_is_locked(ErtsRunQueue *); +#if defined(ERTS_ENABLE_LOCK_CHECK) +int erts_lc_runq_is_locked(ErtsRunQueue *); #endif +void +erts_debug_later_op_foreach(void (*callback)(void*), + void (*func)(void *, ErtsThrPrgrVal, void *), + void *arg); + #ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS -#ifdef ERTS_SMP void erts_empty_runq(ErtsRunQueue *rq); void erts_non_empty_runq(ErtsRunQueue *rq); -#endif /* @@ -738,86 +700,84 @@ void erts_non_empty_runq(ErtsRunQueue *rq); * other threads peek at values without run queue lock. */ -ERTS_GLB_INLINE void erts_smp_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio); -ERTS_GLB_INLINE void erts_smp_dec_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio); -ERTS_GLB_INLINE void erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi); +ERTS_GLB_INLINE void erts_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio); +ERTS_GLB_INLINE void erts_dec_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio); +ERTS_GLB_INLINE void erts_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi); #if ERTS_GLB_INLINE_INCL_FUNC_DEF ERTS_GLB_INLINE void -erts_smp_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio) +erts_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio) { erts_aint32_t len; - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); - len = erts_smp_atomic32_read_dirty(&rq->len); + len = erts_atomic32_read_dirty(&rq->len); -#ifdef ERTS_SMP if (len == 0) erts_non_empty_runq(rq); -#endif len++; if (rq->max_len < len) rq->max_len = len; ASSERT(len > 0); - erts_smp_atomic32_set_nob(&rq->len, len); + erts_atomic32_set_nob(&rq->len, len); - len = erts_smp_atomic32_read_dirty(&rqi->len); + len = erts_atomic32_read_dirty(&rqi->len); ASSERT(len >= 0); if (len == 0) { - ASSERT((erts_smp_atomic32_read_nob(&rq->flags) + ASSERT((erts_atomic32_read_nob(&rq->flags) & ((erts_aint32_t) (1 << prio))) == 0); - erts_smp_atomic32_read_bor_nob(&rq->flags, + erts_atomic32_read_bor_nob(&rq->flags, (erts_aint32_t) (1 << prio)); } len++; if (rqi->max_len < len) rqi->max_len = len; - erts_smp_atomic32_set_relb(&rqi->len, len); + erts_atomic32_set_relb(&rqi->len, len); } ERTS_GLB_INLINE void -erts_smp_dec_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio) +erts_dec_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio) { erts_aint32_t len; - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); - len = erts_smp_atomic32_read_dirty(&rq->len); + len = erts_atomic32_read_dirty(&rq->len); len--; ASSERT(len >= 0); - erts_smp_atomic32_set_nob(&rq->len, len); + erts_atomic32_set_nob(&rq->len, len); - len = erts_smp_atomic32_read_dirty(&rqi->len); + len = erts_atomic32_read_dirty(&rqi->len); len--; ASSERT(len >= 0); if (len == 0) { - ASSERT((erts_smp_atomic32_read_nob(&rq->flags) + ASSERT((erts_atomic32_read_nob(&rq->flags) & ((erts_aint32_t) (1 << prio)))); - erts_smp_atomic32_read_band_nob(&rq->flags, + erts_atomic32_read_band_nob(&rq->flags, ~((erts_aint32_t) (1 << prio))); } - erts_smp_atomic32_set_relb(&rqi->len, len); + erts_atomic32_set_relb(&rqi->len, len); } ERTS_GLB_INLINE void -erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi) +erts_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi) { erts_aint32_t len; - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); - len = erts_smp_atomic32_read_dirty(&rqi->len); + len = erts_atomic32_read_dirty(&rqi->len); ASSERT(rqi->max_len >= len); rqi->max_len = len; } #endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */ -#define RUNQ_READ_LEN(X) erts_smp_atomic32_read_nob((X)) +#define RUNQ_READ_LEN(X) erts_atomic32_read_nob((X)) #endif /* ERTS_INCLUDE_SCHEDULER_INTERNALS */ @@ -835,14 +795,15 @@ erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi) #define ERTS_PSD_NIF_TRAP_EXPORT 5 #define ERTS_PSD_ETS_OWNED_TABLES 6 #define ERTS_PSD_ETS_FIXED_TABLES 7 -#define ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF 8 +#define ERTS_PSD_DIST_ENTRY 8 +#define ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF 9 /* keep last... */ -#define ERTS_PSD_SIZE 9 +#define ERTS_PSD_SIZE 10 #if !defined(HIPE) # undef ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF # undef ERTS_PSD_SIZE -# define ERTS_PSD_SIZE 8 +# define ERTS_PSD_SIZE 9 #endif typedef struct { @@ -876,6 +837,9 @@ typedef struct { #define ERTS_PSD_ETS_FIXED_TABLES_GET_LOCKS ERTS_PROC_LOCK_MAIN #define ERTS_PSD_ETS_FIXED_TABLES_SET_LOCKS ERTS_PROC_LOCK_MAIN +#define ERTS_PSD_DIST_ENTRY_GET_LOCKS ERTS_PROC_LOCK_MAIN +#define ERTS_PSD_DIST_ENTRY_SET_LOCKS ERTS_PROC_LOCK_MAIN + typedef struct { ErtsProcLocks get_locks; ErtsProcLocks set_locks; @@ -890,7 +854,7 @@ extern ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE]; #define ERTS_SCHED_STAT_MODIFY_CLEAR 3 typedef struct { - erts_smp_spinlock_t lock; + erts_spinlock_t lock; int enabled; struct { Eterm name; @@ -911,7 +875,6 @@ typedef struct { typedef struct ErtsProcSysTask_ ErtsProcSysTask; typedef struct ErtsProcSysTaskQs_ ErtsProcSysTaskQs; -#ifdef ERTS_SMP typedef struct ErtsPendingSuspend_ ErtsPendingSuspend; struct ErtsPendingSuspend_ { @@ -924,7 +887,6 @@ struct ErtsPendingSuspend_ { Eterm pid); }; -#endif /* Defines to ease the change of memory architecture */ @@ -1076,23 +1038,18 @@ struct process { ErlHeapFragment* live_hf_end; ErtsMessage *msg_frag; /* Pointer to message fragment list */ Uint mbuf_sz; /* Total size of heap fragments and message fragments */ - erts_smp_atomic_t psd; /* Rarely used process specific data */ + erts_atomic_t psd; /* Rarely used process specific data */ Uint64 bin_vheap_sz; /* Virtual heap block size for binaries */ Uint64 bin_old_vheap_sz; /* Virtual old heap block size for binaries */ Uint64 bin_old_vheap; /* Virtual old heap size for binaries */ ErtsProcSysTaskQs *sys_task_qs; -#ifdef ERTS_DIRTY_SCHEDULERS ErtsProcSysTask *dirty_sys_tasks; -#endif - erts_smp_atomic32_t state; /* Process state flags (see ERTS_PSFLG_*) */ -#ifdef ERTS_DIRTY_SCHEDULERS - erts_smp_atomic32_t dirty_state; /* Process dirty state flags (see ERTS_PDSFLG_*) */ -#endif + erts_atomic32_t state; /* Process state flags (see ERTS_PSFLG_*) */ + erts_atomic32_t dirty_state; /* Process dirty state flags (see ERTS_PDSFLG_*) */ -#ifdef ERTS_SMP ErlMessageInQueue msg_inq; ErlTraceMessageQueue *trace_msg_q; ErtsPendExit pending_exit; @@ -1100,11 +1057,10 @@ struct process { ErtsSchedulerData *scheduler_data; Eterm suspendee; ErtsPendingSuspend *pending_suspenders; - erts_smp_atomic_t run_queue; + erts_atomic_t run_queue; #ifdef HIPE struct hipe_process_state_smp hipe_smp; #endif -#endif #ifdef CHECK_FOR_HOLES Eterm* last_htop; /* No need to scan the heap below this point. */ @@ -1249,7 +1205,6 @@ void erts_check_for_holes(Process* p); #define ERTS_PSFLGS_GET_PRQ_PRIO(PSFLGS) \ (((PSFLGS) >> ERTS_PSFLGS_PRQ_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK) -#ifdef ERTS_DIRTY_SCHEDULERS /* * Flags in the dirty_state field. @@ -1276,7 +1231,6 @@ void erts_check_for_holes(Process* p); | ERTS_PDSFLG_IN_CPU_PRQ_HIGH \ | ERTS_PDSFLG_IN_CPU_PRQ_NORMAL\ | ERTS_PDSFLG_IN_CPU_PRQ_LOW) -#endif /* @@ -1367,7 +1321,7 @@ Eterm* erts_heap_alloc(Process* p, Uint need, Uint xtra); Eterm* erts_set_hole_marker(Eterm* ptr, Uint sz); #endif -extern erts_smp_rwmtx_t erts_cpu_bind_rwmtx; +extern erts_rwmtx_t erts_cpu_bind_rwmtx; /* If any of the erts_system_monitor_* variables are set (enabled), ** erts_system_monitor must be != NIL, to allow testing on just ** the erts_system_monitor_* variables. @@ -1541,20 +1495,14 @@ extern int erts_system_profile_ts_type; } \ } while (0) -#if defined(ERTS_DIRTY_SCHEDULERS) && defined(ERTS_SMP) #define ERTS_NUM_DIRTY_CPU_RUNQS 1 #define ERTS_NUM_DIRTY_IO_RUNQS 1 -#else -#define ERTS_NUM_DIRTY_CPU_RUNQS 0 -#define ERTS_NUM_DIRTY_IO_RUNQS 0 -#endif #define ERTS_NUM_DIRTY_RUNQS (ERTS_NUM_DIRTY_CPU_RUNQS+ERTS_NUM_DIRTY_IO_RUNQS) #define ERTS_RUNQ_IX(IX) \ (ASSERT(0 <= (IX) && (IX) < erts_no_run_queues+ERTS_NUM_DIRTY_RUNQS), \ &erts_aligned_run_queues[(IX)].runq) -#ifdef ERTS_DIRTY_SCHEDULERS #define ERTS_RUNQ_IX_IS_DIRTY(IX) \ (ASSERT(0 <= (IX) && (IX) < erts_no_run_queues+ERTS_NUM_DIRTY_RUNQS), \ (erts_no_run_queues <= (IX))) @@ -1565,13 +1513,9 @@ extern int erts_system_profile_ts_type; #define ERTS_DIRTY_IO_RUNQ (&erts_aligned_run_queues[erts_no_run_queues+1].runq) #define ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(RQ) ((RQ) == ERTS_DIRTY_CPU_RUNQ) #define ERTS_RUNQ_IS_DIRTY_IO_RUNQ(RQ) ((RQ) == ERTS_DIRTY_IO_RUNQ) -#else -#define ERTS_RUNQ_IX_IS_DIRTY(IX) 0 -#endif #define ERTS_SCHEDULER_IX(IX) \ (ASSERT(0 <= (IX) && (IX) < erts_no_schedulers), \ &erts_aligned_scheduler_data[(IX)].esd) -#ifdef ERTS_DIRTY_SCHEDULERS #define ERTS_DIRTY_CPU_SCHEDULER_IX(IX) \ (ASSERT(0 <= (IX) && (IX) < erts_no_dirty_cpu_schedulers), \ &erts_aligned_dirty_cpu_scheduler_data[(IX)].esd) @@ -1584,24 +1528,12 @@ extern int erts_system_profile_ts_type; ((ESDP)->type == ERTS_SCHED_DIRTY_CPU) #define ERTS_SCHEDULER_IS_DIRTY_IO(ESDP) \ ((ESDP)->type == ERTS_SCHED_DIRTY_IO) -#else /* !ERTS_DIRTY_SCHEDULERS */ -#define ERTS_RUNQ_IX_IS_DIRTY(IX) 0 -#define ERTS_SCHEDULER_IS_DIRTY(ESDP) 0 -#define ERTS_SCHEDULER_IS_DIRTY_CPU(ESDP) 0 -#define ERTS_SCHEDULER_IS_DIRTY_IO(ESDP) 0 -#endif void erts_pre_init_process(void); void erts_late_init_process(void); void erts_early_init_scheduling(int); -void erts_init_scheduling(int, int -#ifdef ERTS_DIRTY_SCHEDULERS - , int, int, int -#endif - ); -#ifdef ERTS_DIRTY_SCHEDULERS +void erts_init_scheduling(int, int, int, int, int, int); void erts_execute_dirty_system_task(Process *c_p); -#endif int erts_set_gc_state(Process *c_p, int enable); Eterm erts_sched_wall_time_request(Process *c_p, int set, int enable, int dirty_cpu, int want_dirty_io); @@ -1799,14 +1731,11 @@ void erts_schedule_ets_free_fixation(Eterm pid, struct db_fixation*); void erts_schedule_flush_trace_messages(Process *proc, int force_on_proc); int erts_flush_trace_messages(Process *c_p, ErtsProcLocks locks); -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) int erts_dbg_check_halloc_lock(Process *p); #endif -#if defined(ERTS_SMP) || defined(ERTS_DIRTY_SCHEDULERS) void erts_schedulers_state(Uint *, Uint *, Uint *, Uint *, Uint *, Uint *, Uint *, Uint *); -#endif -#ifdef ERTS_SMP ErtsSchedSuspendResult erts_set_schedulers_online(Process *p, ErtsProcLocks plocks, @@ -1821,14 +1750,9 @@ void erts_start_schedulers(void); void erts_alloc_notify_delayed_dealloc(int); void erts_alloc_ensure_handle_delayed_dealloc_call(int); void erts_notify_canceled_timer(ErtsSchedulerData *, int); -#endif -#if ERTS_USE_ASYNC_READY_Q void erts_notify_check_async_ready_queue(void *); -#endif -#ifdef ERTS_SMP void erts_notify_code_ix_activation(Process* p, ErtsThrPrgrVal later); void erts_notify_finish_breakpointing(Process* p); -#endif void erts_schedule_misc_aux_work(int sched_id, void (*func)(void *), void *arg); @@ -1897,13 +1821,9 @@ int erts_send_exit_signal(Process *, Eterm, Process *, Uint32); -#ifdef ERTS_SMP void erts_handle_pending_exit(Process *, ErtsProcLocks); #define ERTS_PROC_PENDING_EXIT(P) \ - (ERTS_PSFLG_PENDING_EXIT & erts_smp_atomic32_read_acqb(&(P)->state)) -#else -#define ERTS_PROC_PENDING_EXIT(P) 0 -#endif + (ERTS_PSFLG_PENDING_EXIT & erts_atomic32_read_acqb(&(P)->state)) void erts_deep_process_dump(fmtfn_t, void *); @@ -1933,19 +1853,7 @@ do { \ # define ERTS_VERIFY_UNUSED_TEMP_ALLOC(ESDP) #endif -#if defined(ERTS_SMP) || defined(USE_THREADS) ErtsSchedulerData *erts_get_scheduler_data(void); -#else -ERTS_GLB_INLINE ErtsSchedulerData *erts_get_scheduler_data(void); -#if ERTS_GLB_INLINE_INCL_FUNC_DEF - -ERTS_GLB_INLINE -ErtsSchedulerData *erts_get_scheduler_data(void) -{ - return erts_scheduler_data; -} -#endif -#endif void erts_schedule_process(Process *, erts_aint32_t, ErtsProcLocks); @@ -1957,7 +1865,7 @@ ERTS_GLB_INLINE void erts_proc_notify_new_message(Process *p, ErtsProcLocks locks) { /* No barrier needed, due to msg lock */ - erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state); + erts_aint32_t state = erts_atomic32_read_nob(&p->state); if (!(state & ERTS_PSFLG_ACTIVE)) erts_schedule_process(p, state, locks); } @@ -1967,7 +1875,7 @@ erts_schedule_dirty_sys_execution(Process *c_p) { erts_aint32_t a, n, e; - a = erts_smp_atomic32_read_nob(&c_p->state); + a = erts_atomic32_read_nob(&c_p->state); /* * Only a currently executing process schedules @@ -1983,7 +1891,7 @@ erts_schedule_dirty_sys_execution(Process *c_p) | ERTS_PSFLG_PENDING_EXIT))) { e = a; n = a | ERTS_PSFLG_DIRTY_ACTIVE_SYS; - a = erts_smp_atomic32_cmpxchg_mb(&c_p->state, n, e); + a = erts_atomic32_cmpxchg_mb(&c_p->state, n, e); if (a == e) break; /* dirty-active-sys set */ } @@ -1991,21 +1899,21 @@ erts_schedule_dirty_sys_execution(Process *c_p) #endif -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) #define ERTS_PROCESS_LOCK_ONLY_LOCK_CHECK_PROTO__ #include "erl_process_lock.h" #undef ERTS_PROCESS_LOCK_ONLY_LOCK_CHECK_PROTO__ -#define ERTS_SMP_LC_CHK_RUNQ_LOCK(RQ, L) \ +#define ERTS_LC_CHK_RUNQ_LOCK(RQ, L) \ do { \ if ((L)) \ - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked((RQ))); \ + ERTS_LC_ASSERT(erts_lc_runq_is_locked((RQ))); \ else \ - ERTS_SMP_LC_ASSERT(!erts_smp_lc_runq_is_locked((RQ))); \ + ERTS_LC_ASSERT(!erts_lc_runq_is_locked((RQ))); \ } while (0) #else -#define ERTS_SMP_LC_CHK_RUNQ_LOCK(RQ, L) +#define ERTS_LC_CHK_RUNQ_LOCK(RQ, L) #endif void *erts_psd_set_init(Process *p, int ix, void *data); @@ -2021,22 +1929,22 @@ ERTS_GLB_INLINE void * erts_psd_get(Process *p, int ix) { ErtsPSD *psd; -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) ErtsProcLocks locks = erts_proc_lc_my_proc_locks(p); if (ERTS_LC_PSD_ANY_LOCK == erts_psd_required_locks[ix].get_locks) - ERTS_SMP_LC_ASSERT(locks || erts_thr_progress_is_blocking()); + ERTS_LC_ASSERT(locks || erts_thr_progress_is_blocking()); else { locks &= erts_psd_required_locks[ix].get_locks; - ERTS_SMP_LC_ASSERT(erts_psd_required_locks[ix].get_locks == locks + ERTS_LC_ASSERT(erts_psd_required_locks[ix].get_locks == locks || erts_thr_progress_is_blocking()); } #endif - psd = (ErtsPSD *) erts_smp_atomic_read_nob(&p->psd); + psd = (ErtsPSD *) erts_atomic_read_nob(&p->psd); ASSERT(0 <= ix && ix < ERTS_PSD_SIZE); if (!psd) return NULL; - ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER; + ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER; return psd->data[ix]; } @@ -2044,30 +1952,28 @@ ERTS_GLB_INLINE void * erts_psd_set(Process *p, int ix, void *data) { ErtsPSD *psd; -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) ErtsProcLocks locks = erts_proc_lc_my_proc_locks(p); - erts_aint32_t state = state = erts_smp_atomic32_read_nob(&p->state); + erts_aint32_t state = state = erts_atomic32_read_nob(&p->state); if (!(state & ERTS_PSFLG_FREE)) { if (ERTS_LC_PSD_ANY_LOCK == erts_psd_required_locks[ix].set_locks) - ERTS_SMP_LC_ASSERT(locks || erts_thr_progress_is_blocking()); + ERTS_LC_ASSERT(locks || erts_thr_progress_is_blocking()); else { locks &= erts_psd_required_locks[ix].set_locks; - ERTS_SMP_LC_ASSERT(erts_psd_required_locks[ix].set_locks == locks + ERTS_LC_ASSERT(erts_psd_required_locks[ix].set_locks == locks || erts_thr_progress_is_blocking()); } } #endif - psd = (ErtsPSD *) erts_smp_atomic_read_nob(&p->psd); + psd = (ErtsPSD *) erts_atomic_read_nob(&p->psd); ASSERT(0 <= ix && ix < ERTS_PSD_SIZE); if (psd) { void *old; -#ifdef ERTS_SMP #ifdef ETHR_ORDERED_READ_DEPEND ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore); #else ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreStore); #endif -#endif old = psd->data[ix]; psd->data[ix] = data; return old; @@ -2104,6 +2010,11 @@ erts_psd_set(Process *p, int ix, void *data) #define ERTS_PROC_SET_NIF_TRAP_EXPORT(P, NTE) \ erts_psd_set((P), ERTS_PSD_NIF_TRAP_EXPORT, (void *) (NTE)) +#define ERTS_PROC_GET_DIST_ENTRY(P) \ + ((DistEntry *) erts_psd_get((P), ERTS_PSD_DIST_ENTRY)) +#define ERTS_PROC_SET_DIST_ENTRY(P, DE) \ + ((DistEntry *) erts_psd_set((P), ERTS_PSD_DIST_ENTRY, (void *) (DE))) + #ifdef HIPE #define ERTS_PROC_GET_SUSPENDED_SAVED_CALLS_BUF(P) \ ((struct saved_calls *) erts_psd_get((P), ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF)) @@ -2147,7 +2058,6 @@ erts_proc_set_error_handler(Process *p, Eterm handler) #ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS -#ifdef ERTS_SMP #include "erl_thr_progress.h" @@ -2249,7 +2159,6 @@ erts_check_emigration_need(ErtsRunQueue *c_rq, int prio) #endif -#endif #endif @@ -2260,13 +2169,13 @@ ERTS_GLB_INLINE Eterm erts_get_current_pid(void); ERTS_GLB_INLINE Uint erts_get_scheduler_id(void); ERTS_GLB_INLINE ErtsRunQueue *erts_get_runq_proc(Process *p); ERTS_GLB_INLINE ErtsRunQueue *erts_get_runq_current(ErtsSchedulerData *esdp); -ERTS_GLB_INLINE void erts_smp_runq_lock(ErtsRunQueue *rq); -ERTS_GLB_INLINE int erts_smp_runq_trylock(ErtsRunQueue *rq); -ERTS_GLB_INLINE void erts_smp_runq_unlock(ErtsRunQueue *rq); -ERTS_GLB_INLINE void erts_smp_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq); -ERTS_GLB_INLINE void erts_smp_xrunq_unlock(ErtsRunQueue *rq, ErtsRunQueue *xrq); -ERTS_GLB_INLINE void erts_smp_runqs_lock(ErtsRunQueue *rq1, ErtsRunQueue *rq2); -ERTS_GLB_INLINE void erts_smp_runqs_unlock(ErtsRunQueue *rq1, ErtsRunQueue *rq2); +ERTS_GLB_INLINE void erts_runq_lock(ErtsRunQueue *rq); +ERTS_GLB_INLINE int erts_runq_trylock(ErtsRunQueue *rq); +ERTS_GLB_INLINE void erts_runq_unlock(ErtsRunQueue *rq); +ERTS_GLB_INLINE void erts_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq); +ERTS_GLB_INLINE void erts_xrunq_unlock(ErtsRunQueue *rq, ErtsRunQueue *xrq); +ERTS_GLB_INLINE void erts_runqs_lock(ErtsRunQueue *rq1, ErtsRunQueue *rq2); +ERTS_GLB_INLINE void erts_runqs_unlock(ErtsRunQueue *rq1, ErtsRunQueue *rq2); ERTS_GLB_INLINE ErtsMessage *erts_alloc_message_heap_state(Process *pp, erts_aint32_t *psp, @@ -2291,11 +2200,7 @@ ErtsSchedulerData *erts_proc_sched_data(Process *c_p) { ErtsSchedulerData *esdp; ASSERT(c_p); -#if !defined(ERTS_SMP) - esdp = erts_get_scheduler_data(); -#else esdp = c_p->scheduler_data; -# if defined(ERTS_DIRTY_SCHEDULERS) if (esdp) { ASSERT(esdp == erts_get_scheduler_data()); ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp)); @@ -2305,8 +2210,6 @@ ErtsSchedulerData *erts_proc_sched_data(Process *c_p) ASSERT(esdp); ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp)); } -# endif -#endif ASSERT(esdp); return esdp; } @@ -2337,124 +2240,94 @@ Eterm erts_get_current_pid(void) ERTS_GLB_INLINE Uint erts_get_scheduler_id(void) { -#ifdef ERTS_SMP ErtsSchedulerData *esdp = erts_get_scheduler_data(); -#ifdef ERTS_DIRTY_SCHEDULERS if (esdp && ERTS_SCHEDULER_IS_DIRTY(esdp)) return 0; else -#endif return esdp ? esdp->no : (Uint) 0; -#else - return erts_get_scheduler_data() ? (Uint) 1 : (Uint) 0; -#endif } ERTS_GLB_INLINE ErtsRunQueue * erts_get_runq_proc(Process *p) { -#ifdef ERTS_SMP ASSERT(ERTS_AINT_NULL != erts_atomic_read_nob(&p->run_queue)); return (ErtsRunQueue *) erts_atomic_read_nob(&p->run_queue); -#else - return ERTS_RUNQ_IX(0); -#endif } ERTS_GLB_INLINE ErtsRunQueue * erts_get_runq_current(ErtsSchedulerData *esdp) { ASSERT(!esdp || esdp == erts_get_scheduler_data()); -#ifdef ERTS_SMP if (!esdp) esdp = erts_get_scheduler_data(); return esdp->run_queue; -#else - return ERTS_RUNQ_IX(0); -#endif } ERTS_GLB_INLINE void -erts_smp_runq_lock(ErtsRunQueue *rq) +erts_runq_lock(ErtsRunQueue *rq) { -#ifdef ERTS_SMP - erts_smp_mtx_lock(&rq->mtx); -#endif + erts_mtx_lock(&rq->mtx); } ERTS_GLB_INLINE int -erts_smp_runq_trylock(ErtsRunQueue *rq) +erts_runq_trylock(ErtsRunQueue *rq) { -#ifdef ERTS_SMP - return erts_smp_mtx_trylock(&rq->mtx); -#else - return 0; -#endif + return erts_mtx_trylock(&rq->mtx); } ERTS_GLB_INLINE void -erts_smp_runq_unlock(ErtsRunQueue *rq) +erts_runq_unlock(ErtsRunQueue *rq) { -#ifdef ERTS_SMP - erts_smp_mtx_unlock(&rq->mtx); -#endif + erts_mtx_unlock(&rq->mtx); } ERTS_GLB_INLINE void -erts_smp_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq) +erts_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq) { -#ifdef ERTS_SMP - ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&rq->mtx)); + ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&rq->mtx)); if (xrq != rq) { - if (erts_smp_mtx_trylock(&xrq->mtx) == EBUSY) { + if (erts_mtx_trylock(&xrq->mtx) == EBUSY) { if (rq < xrq) - erts_smp_mtx_lock(&xrq->mtx); + erts_mtx_lock(&xrq->mtx); else { - erts_smp_mtx_unlock(&rq->mtx); - erts_smp_mtx_lock(&xrq->mtx); - erts_smp_mtx_lock(&rq->mtx); + erts_mtx_unlock(&rq->mtx); + erts_mtx_lock(&xrq->mtx); + erts_mtx_lock(&rq->mtx); } } } -#endif } ERTS_GLB_INLINE void -erts_smp_xrunq_unlock(ErtsRunQueue *rq, ErtsRunQueue *xrq) +erts_xrunq_unlock(ErtsRunQueue *rq, ErtsRunQueue *xrq) { -#ifdef ERTS_SMP if (xrq != rq) - erts_smp_mtx_unlock(&xrq->mtx); -#endif + erts_mtx_unlock(&xrq->mtx); } ERTS_GLB_INLINE void -erts_smp_runqs_lock(ErtsRunQueue *rq1, ErtsRunQueue *rq2) +erts_runqs_lock(ErtsRunQueue *rq1, ErtsRunQueue *rq2) { -#ifdef ERTS_SMP ASSERT(rq1 && rq2); if (rq1 == rq2) - erts_smp_mtx_lock(&rq1->mtx); + erts_mtx_lock(&rq1->mtx); else if (rq1 < rq2) { - erts_smp_mtx_lock(&rq1->mtx); - erts_smp_mtx_lock(&rq2->mtx); + erts_mtx_lock(&rq1->mtx); + erts_mtx_lock(&rq2->mtx); } else { - erts_smp_mtx_lock(&rq2->mtx); - erts_smp_mtx_lock(&rq1->mtx); + erts_mtx_lock(&rq2->mtx); + erts_mtx_lock(&rq1->mtx); } -#endif } ERTS_GLB_INLINE void -erts_smp_runqs_unlock(ErtsRunQueue *rq1, ErtsRunQueue *rq2) +erts_runqs_unlock(ErtsRunQueue *rq1, ErtsRunQueue *rq2) { -#ifdef ERTS_SMP ASSERT(rq1 && rq2); - erts_smp_mtx_unlock(&rq1->mtx); + erts_mtx_unlock(&rq1->mtx); if (rq1 != rq2) - erts_smp_mtx_unlock(&rq2->mtx); -#endif + erts_mtx_unlock(&rq2->mtx); } ERTS_GLB_INLINE ErtsMessage * @@ -2486,7 +2359,7 @@ erts_alloc_message_heap(Process *pp, Eterm **hpp, ErlOffHeap **ohpp) { - erts_aint32_t state = pp ? erts_smp_atomic32_read_nob(&pp->state) : 0; + erts_aint32_t state = pp ? erts_atomic32_read_nob(&pp->state) : 0; return erts_alloc_message_heap_state(pp, &state, plp, sz, hpp, ohpp); } @@ -2500,7 +2373,7 @@ erts_shrink_message_heap(ErtsMessage **msgpp, Process *pp, *msgpp = erts_shrink_message(*msgpp, used_hp - start_hp, brefs, brefs_size); else if (!(*msgpp)->data.attached) { - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(pp)); HRelease(pp, end_hp, used_hp); } @@ -2562,7 +2435,6 @@ ERTS_TIME2REDS_IMPL__(ErtsMonotonicTime start, ErtsMonotonicTime end) } #endif -#ifdef ERTS_SMP Process *erts_pid2proc_not_running(Process *, ErtsProcLocks, @@ -2575,37 +2447,29 @@ Process *erts_pid2proc_nropt(Process *c_p, extern int erts_disable_proc_not_running_opt; #ifdef DEBUG -#define ERTS_SMP_ASSERT_IS_NOT_EXITING(P) \ +#define ERTS_ASSERT_IS_NOT_EXITING(P) \ do { ASSERT(!ERTS_PROC_IS_EXITING((P))); } while (0) #else -#define ERTS_SMP_ASSERT_IS_NOT_EXITING(P) +#define ERTS_ASSERT_IS_NOT_EXITING(P) #endif -#else /* !ERTS_SMP */ - -#define ERTS_SMP_ASSERT_IS_NOT_EXITING(P) - -#define erts_pid2proc_not_running erts_pid2proc -#define erts_pid2proc_nropt erts_pid2proc - -#endif #define ERTS_PROC_IS_EXITING(P) \ - (ERTS_PSFLG_EXITING & erts_smp_atomic32_read_acqb(&(P)->state)) + (ERTS_PSFLG_EXITING & erts_atomic32_read_acqb(&(P)->state)) /* Minimum NUMBER of processes for a small system to start */ #define ERTS_MIN_PROCESSES 1024 -#if defined(ERTS_SMP) && ERTS_MIN_PROCESSES < ERTS_NO_OF_PIX_LOCKS +#if ERTS_MIN_PROCESSES < ERTS_NO_OF_PIX_LOCKS #undef ERTS_MIN_PROCESSES #define ERTS_MIN_PROCESSES ERTS_NO_OF_PIX_LOCKS #endif -void erts_smp_notify_inc_runq(ErtsRunQueue *runq); +void erts_notify_inc_runq(ErtsRunQueue *runq); -#ifdef ERTS_SMP void erts_sched_finish_poke(ErtsSchedulerSleepInfo *, erts_aint32_t); ERTS_GLB_INLINE void erts_sched_poke(ErtsSchedulerSleepInfo *ssi); +void erts_aux_thread_poke(void); #if ERTS_GLB_INLINE_INCL_FUNC_DEF @@ -2614,9 +2478,9 @@ erts_sched_poke(ErtsSchedulerSleepInfo *ssi) { erts_aint32_t flags; ERTS_THR_MEMORY_BARRIER; - flags = erts_smp_atomic32_read_nob(&ssi->flags); + flags = erts_atomic32_read_nob(&ssi->flags); if (flags & ERTS_SSI_FLG_SLEEPING) { - flags = erts_smp_atomic32_read_band_nob(&ssi->flags, ~ERTS_SSI_FLGS_SLEEP); + flags = erts_atomic32_read_band_nob(&ssi->flags, ~ERTS_SSI_FLGS_SLEEP); erts_sched_finish_poke(ssi, flags); } } @@ -2624,7 +2488,6 @@ erts_sched_poke(ErtsSchedulerSleepInfo *ssi) #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ -#endif /* #ifdef ERTS_SMP */ #include "erl_process_lock.h" @@ -2634,5 +2497,5 @@ erts_sched_poke(ErtsSchedulerSleepInfo *ssi) void erts_halt(int code); -extern erts_smp_atomic32_t erts_halt_progress; +extern erts_atomic32_t erts_halt_progress; extern int erts_halt_code; diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c index b826e6c5d3..5a2c262ff1 100644 --- a/erts/emulator/beam/erl_process_dump.c +++ b/erts/emulator/beam/erl_process_dump.c @@ -69,7 +69,7 @@ erts_deep_process_dump(fmtfn_t to, void *to_arg) for (i = 0; i < max; i++) { Process *p = erts_pix2proc(i); if (p && p->i != ENULL) { - erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state); + erts_aint32_t state = erts_atomic32_read_acqb(&p->state); if (!(state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_GC))) dump_process_info(to, to_arg, p); } @@ -85,7 +85,7 @@ Uint erts_process_memory(Process *p, int incl_msg_inq) { size += sizeof(Process); if (incl_msg_inq) - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p); + ERTS_MSGQ_MV_INQ2PRIVQ(p); erts_doforall_links(ERTS_P_LINKS(p), &erts_one_link_size, &size); erts_doforall_monitors(ERTS_P_MONITORS(p), &erts_one_mon_size, &size); @@ -106,7 +106,7 @@ Uint erts_process_memory(Process *p, int incl_msg_inq) { size += p->arity * sizeof(p->arg_reg[0]); } - if (erts_smp_atomic_read_nob(&p->psd) != (erts_aint_t) NULL) + if (erts_atomic_read_nob(&p->psd) != (erts_aint_t) NULL) size += sizeof(ErtsPSD); scb = ERTS_PROC_GET_SAVED_CALLS_BUF(p); @@ -126,7 +126,7 @@ dump_process_info(fmtfn_t to, void *to_arg, Process *p) ErtsMessage* mp; int yreg = -1; - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p); + ERTS_MSGQ_MV_INQ2PRIVQ(p); if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0 && p->msg.first) { erts_print(to, to_arg, "=proc_messages:%T\n", p->common.id); diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c index ff124d5ba7..431867f27e 100644 --- a/erts/emulator/beam/erl_process_lock.c +++ b/erts/emulator/beam/erl_process_lock.c @@ -56,9 +56,9 @@ * Note that wait flags may be read without the pix lock, but * it is important that wait flags only are modified when the pix * lock is held. - * This implementation assumes that erts_smp_atomic_or_retold() + * This implementation assumes that erts_atomic_or_retold() * provides necessary memorybarriers for a lock operation, and that - * erts_smp_atomic_and_retold() provides necessary memorybarriers + * erts_atomic_and_retold() provides necessary memorybarriers * for an unlock operation. */ @@ -69,7 +69,6 @@ #include "erl_process.h" #include "erl_thr_progress.h" -#ifdef ERTS_SMP #if ERTS_PROC_LOCK_OWN_IMPL @@ -464,7 +463,7 @@ wait_for_locks(Process *p, } /* - * erts_proc_lock_failed() is called when erts_smp_proc_lock() + * erts_proc_lock_failed() is called when erts_proc_lock() * wasn't able to lock all locks. We may need to transfer locks * to waiters and wait for our turn on locks. * @@ -543,7 +542,7 @@ erts_proc_lock_failed(Process *p, } /* - * erts_proc_unlock_failed() is called when erts_smp_proc_unlock() + * erts_proc_unlock_failed() is called when erts_proc_unlock() * wasn't able to unlock all locks. We may need to transfer locks * to waiters. */ @@ -709,7 +708,7 @@ proc_safelock(int is_managed, refc1 = 1; erts_proc_inc_refc(p1); } - erts_smp_proc_unlock(p1, unlock_locks); + erts_proc_unlock(p1, unlock_locks); } unlock_locks = unlock_mask & have_locks2; if (unlock_locks) { @@ -719,7 +718,7 @@ proc_safelock(int is_managed, refc2 = 1; erts_proc_inc_refc(p2); } - erts_smp_proc_unlock(p2, unlock_locks); + erts_proc_unlock(p2, unlock_locks); } } @@ -750,7 +749,7 @@ proc_safelock(int is_managed, if (need_locks2 & lock) lock_no--; locks = need_locks1 & lock_mask; - erts_smp_proc_lock(p1, locks); + erts_proc_lock(p1, locks); have_locks1 |= locks; need_locks1 &= ~locks; } @@ -761,7 +760,7 @@ proc_safelock(int is_managed, lock = (1 << ++lock_no); } locks = need_locks2 & lock_mask; - erts_smp_proc_lock(p2, locks); + erts_proc_lock(p2, locks); have_locks2 |= locks; need_locks2 &= ~locks; } @@ -898,7 +897,7 @@ erts_pid2proc_opt(Process *c_p, #endif /* ERTS_PROC_LOCK_OWN_IMPL */ { /* Try a quick trylock to grab all the locks we need. */ - busy = (int) erts_smp_proc_raw_trylock__(proc, need_locks); + busy = (int) erts_proc_raw_trylock__(proc, need_locks); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_CHECK) erts_proc_lc_trylock(proc, need_locks, !busy, __FILE__,__LINE__); @@ -976,7 +975,7 @@ erts_pid2proc_opt(Process *c_p, : (proc != (Process *) erts_ptab_pix2intptr_nob(&erts_proc, pix)))) { - erts_smp_proc_unlock(proc, need_locks); + erts_proc_unlock(proc, need_locks); if (flags & ERTS_P2P_FLG_INC_REFC) dec_refc_proc = proc; @@ -1002,11 +1001,9 @@ static ERTS_INLINE Process *proc_lookup_inc_refc(Eterm pid, int allow_exit) { Process *proc; -#ifdef ERTS_SMP ErtsThrPrgrDelayHandle dhndl; dhndl = erts_thr_progress_unmanaged_delay(); -#endif proc = erts_proc_lookup_raw(pid); if (proc) { @@ -1016,9 +1013,7 @@ Process *proc_lookup_inc_refc(Eterm pid, int allow_exit) erts_proc_inc_refc(proc); } -#ifdef ERTS_SMP erts_thr_progress_unmanaged_continue(dhndl); -#endif return proc; } @@ -1042,7 +1037,7 @@ erts_proc_lock_init(Process *p) #if ERTS_PROC_LOCK_OWN_IMPL /* We always start with all locks locked */ #if ERTS_PROC_LOCK_ATOMIC_IMPL - erts_smp_atomic32_init_nob(&p->lock.flags, + erts_atomic32_init_nob(&p->lock.flags, (erts_aint32_t) ERTS_PROC_LOCKS_ALL); #else p->lock.flags = ERTS_PROC_LOCKS_ALL; @@ -1093,7 +1088,7 @@ erts_proc_lock_init(Process *p) #endif #ifdef ERTS_PROC_LOCK_DEBUG for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++) - erts_smp_atomic32_init_nob(&p->lock.locked[i], (erts_aint32_t) 1); + erts_atomic32_init_nob(&p->lock.locked[i], (erts_aint32_t) 1); #endif #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_proc_lock_init(p); @@ -1113,7 +1108,7 @@ erts_proc_lock_fin(Process *p) erts_mtx_destroy(&p->lock.status); erts_mtx_destroy(&p->lock.trace); #endif -#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP) +#if defined(ERTS_ENABLE_LOCK_COUNT) erts_lcnt_proc_lock_destroy(p); #endif } @@ -1785,4 +1780,3 @@ check_queue(erts_proc_lock_t *lck) } #endif -#endif /* ERTS_SMP */ diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h index 023ba4d4ae..9d5691d3c4 100644 --- a/erts/emulator/beam/erl_process_lock.h +++ b/erts/emulator/beam/erl_process_lock.h @@ -36,7 +36,7 @@ #include "erl_lock_count.h" #endif -#include "erl_smp.h" +#include "erl_threads.h" #if defined(VALGRIND) || defined(ETHR_DISABLE_NATIVE_IMPLS) # define ERTS_PROC_LOCK_OWN_IMPL 0 @@ -73,7 +73,7 @@ typedef erts_aint32_t ErtsProcLocks; typedef struct erts_proc_lock_t_ { #if ERTS_PROC_LOCK_OWN_IMPL #if ERTS_PROC_LOCK_ATOMIC_IMPL - erts_smp_atomic32_t flags; + erts_atomic32_t flags; #else ErtsProcLocks flags; #endif @@ -103,7 +103,7 @@ typedef struct erts_proc_lock_t_ { # error "no implementation" #endif #ifdef ERTS_PROC_LOCK_DEBUG - erts_smp_atomic32_t locked[ERTS_PROC_LOCK_MAX_BIT+1]; + erts_atomic32_t locked[ERTS_PROC_LOCK_MAX_BIT+1]; #endif } erts_proc_lock_t; @@ -243,11 +243,11 @@ typedef struct erts_proc_lock_t_ { /* Lock counter implemetation */ #ifdef ERTS_ENABLE_LOCK_POSITION -#define erts_smp_proc_lock__(P,I,L) erts_smp_proc_lock_x__(P,I,L,__FILE__,__LINE__) -#define erts_smp_proc_lock(P,L) erts_smp_proc_lock_x(P,L,__FILE__,__LINE__) +#define erts_proc_lock__(P,I,L) erts_proc_lock_x__(P,I,L,__FILE__,__LINE__) +#define erts_proc_lock(P,L) erts_proc_lock_x(P,L,__FILE__,__LINE__) #endif -#if defined(ERTS_SMP) && defined (ERTS_ENABLE_LOCK_COUNT) +#if defined (ERTS_ENABLE_LOCK_COUNT) void erts_lcnt_proc_lock_init(Process *p); void erts_lcnt_proc_lock_destroy(Process *p); @@ -421,10 +421,10 @@ void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res /* --- Process lock checking ----------------------------------------------- */ -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) -#define ERTS_SMP_CHK_NO_PROC_LOCKS \ +#if defined(ERTS_ENABLE_LOCK_CHECK) +#define ERTS_CHK_NO_PROC_LOCKS \ erts_proc_lc_chk_no_proc_locks(__FILE__, __LINE__) -#define ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(P) \ +#define ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(P) \ erts_proc_lc_chk_only_proc_main((P)) void erts_proc_lc_lock(Process *p, ErtsProcLocks locks, char *file, unsigned int line); @@ -443,8 +443,8 @@ void erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks, char* file, unsigned int line); void erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks); #else -#define ERTS_SMP_CHK_NO_PROC_LOCKS -#define ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(P) +#define ERTS_CHK_NO_PROC_LOCKS +#define ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(P) #endif #endif /* #ifndef ERTS_PROC_LOCK_LOCK_CHECK__ */ @@ -455,7 +455,6 @@ void erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks); #ifndef ERTS_PROCESS_LOCK_H__ #define ERTS_PROCESS_LOCK_H__ -#ifdef ERTS_SMP typedef struct { union { @@ -472,21 +471,21 @@ typedef struct { #if ERTS_PROC_LOCK_ATOMIC_IMPL #define ERTS_PROC_LOCK_FLGS_BAND_(L, MSK) \ - ((ErtsProcLocks) erts_smp_atomic32_read_band_nob(&(L)->flags, \ + ((ErtsProcLocks) erts_atomic32_read_band_nob(&(L)->flags, \ (erts_aint32_t) (MSK))) #define ERTS_PROC_LOCK_FLGS_BOR_ACQB_(L, MSK) \ - ((ErtsProcLocks) erts_smp_atomic32_read_bor_acqb(&(L)->flags, \ + ((ErtsProcLocks) erts_atomic32_read_bor_acqb(&(L)->flags, \ (erts_aint32_t) (MSK))) #define ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(L, NEW, EXPECTED) \ - ((ErtsProcLocks) erts_smp_atomic32_cmpxchg_acqb(&(L)->flags, \ + ((ErtsProcLocks) erts_atomic32_cmpxchg_acqb(&(L)->flags, \ (erts_aint32_t) (NEW), \ (erts_aint32_t) (EXPECTED))) #define ERTS_PROC_LOCK_FLGS_CMPXCHG_RELB_(L, NEW, EXPECTED) \ - ((ErtsProcLocks) erts_smp_atomic32_cmpxchg_relb(&(L)->flags, \ + ((ErtsProcLocks) erts_atomic32_cmpxchg_relb(&(L)->flags, \ (erts_aint32_t) (NEW), \ (erts_aint32_t) (EXPECTED))) #define ERTS_PROC_LOCK_FLGS_READ_(L) \ - ((ErtsProcLocks) erts_smp_atomic32_read_nob(&(L)->flags)) + ((ErtsProcLocks) erts_atomic32_read_nob(&(L)->flags)) #else /* no opt atomic ops */ @@ -557,22 +556,22 @@ ERTS_GLB_INLINE void erts_pix_lock(erts_pix_lock_t *); ERTS_GLB_INLINE void erts_pix_unlock(erts_pix_lock_t *); ERTS_GLB_INLINE int erts_lc_pix_lock_is_locked(erts_pix_lock_t *); -ERTS_GLB_INLINE ErtsProcLocks erts_smp_proc_raw_trylock__(Process *p, +ERTS_GLB_INLINE ErtsProcLocks erts_proc_raw_trylock__(Process *p, ErtsProcLocks locks); #ifdef ERTS_ENABLE_LOCK_POSITION -ERTS_GLB_INLINE void erts_smp_proc_lock_x__(Process *, +ERTS_GLB_INLINE void erts_proc_lock_x__(Process *, erts_pix_lock_t *, ErtsProcLocks, char *file, unsigned int line); #else -ERTS_GLB_INLINE void erts_smp_proc_lock__(Process *, +ERTS_GLB_INLINE void erts_proc_lock__(Process *, erts_pix_lock_t *, ErtsProcLocks); #endif -ERTS_GLB_INLINE void erts_smp_proc_unlock__(Process *, +ERTS_GLB_INLINE void erts_proc_unlock__(Process *, erts_pix_lock_t *, ErtsProcLocks); -ERTS_GLB_INLINE int erts_smp_proc_trylock__(Process *, +ERTS_GLB_INLINE int erts_proc_trylock__(Process *, erts_pix_lock_t *, ErtsProcLocks); @@ -600,7 +599,7 @@ ERTS_GLB_INLINE int erts_lc_pix_lock_is_locked(erts_pix_lock_t *pixlck) } /* - * Helper function for erts_smp_proc_lock__ and erts_smp_proc_trylock__. + * Helper function for erts_proc_lock__ and erts_proc_trylock__. * * Attempts to grab all of 'locks' simultaneously. * @@ -613,7 +612,7 @@ ERTS_GLB_INLINE int erts_lc_pix_lock_is_locked(erts_pix_lock_t *pixlck) * Does not release the pix lock. */ ERTS_GLB_INLINE ErtsProcLocks -erts_smp_proc_raw_trylock__(Process *p, ErtsProcLocks locks) +erts_proc_raw_trylock__(Process *p, ErtsProcLocks locks) { #if ERTS_PROC_LOCK_OWN_IMPL ErtsProcLocks expct_lflgs = 0; @@ -682,12 +681,12 @@ busy_main: ERTS_GLB_INLINE void #ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_proc_lock_x__(Process *p, +erts_proc_lock_x__(Process *p, erts_pix_lock_t *pix_lck, ErtsProcLocks locks, char *file, unsigned int line) #else -erts_smp_proc_lock__(Process *p, +erts_proc_lock__(Process *p, erts_pix_lock_t *pix_lck, ErtsProcLocks locks) #endif @@ -709,7 +708,7 @@ erts_smp_proc_lock__(Process *p, erts_proc_lc_lock(p, locks, file, line); #endif - old_lflgs = erts_smp_proc_raw_trylock__(p, locks); + old_lflgs = erts_proc_raw_trylock__(p, locks); if (old_lflgs != 0) { /* @@ -761,7 +760,7 @@ erts_smp_proc_lock__(Process *p, } ERTS_GLB_INLINE void -erts_smp_proc_unlock__(Process *p, +erts_proc_unlock__(Process *p, erts_pix_lock_t *pix_lck, ErtsProcLocks locks) { @@ -854,7 +853,7 @@ erts_smp_proc_unlock__(Process *p, } ERTS_GLB_INLINE int -erts_smp_proc_trylock__(Process *p, +erts_proc_trylock__(Process *p, erts_pix_lock_t *pix_lck, ErtsProcLocks locks) { @@ -875,7 +874,7 @@ erts_smp_proc_trylock__(Process *p, erts_pix_lock(pix_lck); #endif - if (erts_smp_proc_raw_trylock__(p, locks) != 0) { + if (erts_proc_raw_trylock__(p, locks) != 0) { /* Didn't get all locks... */ res = EBUSY; @@ -912,7 +911,7 @@ erts_smp_proc_trylock__(Process *p, return res; #elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL - if (erts_smp_proc_raw_trylock__(p, locks) != 0) + if (erts_proc_raw_trylock__(p, locks) != 0) return EBUSY; else { #ifdef ERTS_PROC_LOCK_DEBUG @@ -933,11 +932,11 @@ erts_proc_lock_op_debug(Process *p, ErtsProcLocks locks, int locked) if (locks & lock) { erts_aint32_t lock_count; if (locked) { - lock_count = erts_smp_atomic32_inc_read_nob(&p->lock.locked[i]); + lock_count = erts_atomic32_inc_read_nob(&p->lock.locked[i]); ERTS_LC_ASSERT(lock_count == 1); } else { - lock_count = erts_smp_atomic32_dec_read_nob(&p->lock.locked[i]); + lock_count = erts_atomic32_dec_read_nob(&p->lock.locked[i]); ERTS_LC_ASSERT(lock_count == 0); } } @@ -947,15 +946,14 @@ erts_proc_lock_op_debug(Process *p, ErtsProcLocks locks, int locked) #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ -#endif /* ERTS_SMP */ #ifdef ERTS_ENABLE_LOCK_POSITION -ERTS_GLB_INLINE void erts_smp_proc_lock_x(Process *, ErtsProcLocks, char *file, unsigned int line); +ERTS_GLB_INLINE void erts_proc_lock_x(Process *, ErtsProcLocks, char *file, unsigned int line); #else -ERTS_GLB_INLINE void erts_smp_proc_lock(Process *, ErtsProcLocks); +ERTS_GLB_INLINE void erts_proc_lock(Process *, ErtsProcLocks); #endif -ERTS_GLB_INLINE void erts_smp_proc_unlock(Process *, ErtsProcLocks); -ERTS_GLB_INLINE int erts_smp_proc_trylock(Process *, ErtsProcLocks); +ERTS_GLB_INLINE void erts_proc_unlock(Process *, ErtsProcLocks); +ERTS_GLB_INLINE int erts_proc_trylock(Process *, ErtsProcLocks); ERTS_GLB_INLINE void erts_proc_inc_refc(Process *); ERTS_GLB_INLINE void erts_proc_dec_refc(Process *); @@ -966,79 +964,65 @@ ERTS_GLB_INLINE Sint erts_proc_read_refc(Process *); ERTS_GLB_INLINE void #ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_proc_lock_x(Process *p, ErtsProcLocks locks, char *file, unsigned int line) +erts_proc_lock_x(Process *p, ErtsProcLocks locks, char *file, unsigned int line) #else -erts_smp_proc_lock(Process *p, ErtsProcLocks locks) +erts_proc_lock(Process *p, ErtsProcLocks locks) #endif { -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION) - erts_smp_proc_lock_x__(p, +#if defined(ERTS_ENABLE_LOCK_POSITION) + erts_proc_lock_x__(p, #if ERTS_PROC_LOCK_ATOMIC_IMPL NULL, #else ERTS_PID2PIXLOCK(p->common.id), #endif /*ERTS_PROC_LOCK_ATOMIC_IMPL*/ locks, file, line); -#elif defined(ERTS_SMP) - erts_smp_proc_lock__(p, +#else + erts_proc_lock__(p, #if ERTS_PROC_LOCK_ATOMIC_IMPL NULL, #else ERTS_PID2PIXLOCK(p->common.id), #endif /*ERTS_PROC_LOCK_ATOMIC_IMPL*/ locks); -#endif /*ERTS_SMP*/ +#endif /*ERTS_ENABLE_LOCK_POSITION*/ } ERTS_GLB_INLINE void -erts_smp_proc_unlock(Process *p, ErtsProcLocks locks) +erts_proc_unlock(Process *p, ErtsProcLocks locks) { -#ifdef ERTS_SMP - erts_smp_proc_unlock__(p, + erts_proc_unlock__(p, #if ERTS_PROC_LOCK_ATOMIC_IMPL NULL, #else ERTS_PID2PIXLOCK(p->common.id), #endif locks); -#endif } ERTS_GLB_INLINE int -erts_smp_proc_trylock(Process *p, ErtsProcLocks locks) +erts_proc_trylock(Process *p, ErtsProcLocks locks) { -#ifndef ERTS_SMP - return 0; -#else - return erts_smp_proc_trylock__(p, + return erts_proc_trylock__(p, #if ERTS_PROC_LOCK_ATOMIC_IMPL NULL, #else ERTS_PID2PIXLOCK(p->common.id), #endif locks); -#endif } ERTS_GLB_INLINE void erts_proc_inc_refc(Process *p) { - ASSERT(!(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY)); -#ifdef ERTS_SMP + ASSERT(!(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY)); erts_ptab_atmc_inc_refc(&p->common); -#else - erts_ptab_inc_refc(&p->common); -#endif } ERTS_GLB_INLINE void erts_proc_dec_refc(Process *p) { Sint referred; - ASSERT(!(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY)); -#ifdef ERTS_SMP + ASSERT(!(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY)); referred = erts_ptab_atmc_dec_test_refc(&p->common); -#else - referred = erts_ptab_dec_test_refc(&p->common); -#endif if (!referred) { ASSERT(ERTS_PROC_IS_EXITING(p)); erts_free_proc(p); @@ -1048,12 +1032,8 @@ ERTS_GLB_INLINE void erts_proc_dec_refc(Process *p) ERTS_GLB_INLINE void erts_proc_add_refc(Process *p, Sint add_refc) { Sint referred; - ASSERT(!(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY)); -#ifdef ERTS_SMP + ASSERT(!(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY)); referred = erts_ptab_atmc_add_test_refc(&p->common, add_refc); -#else - referred = erts_ptab_add_test_refc(&p->common, add_refc); -#endif if (!referred) { ASSERT(ERTS_PROC_IS_EXITING(p)); erts_free_proc(p); @@ -1062,17 +1042,12 @@ ERTS_GLB_INLINE void erts_proc_add_refc(Process *p, Sint add_refc) ERTS_GLB_INLINE Sint erts_proc_read_refc(Process *p) { - ASSERT(!(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY)); -#ifdef ERTS_SMP + ASSERT(!(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY)); return erts_ptab_atmc_read_refc(&p->common); -#else - return erts_ptab_read_refc(&p->common); -#endif } #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ -#ifdef ERTS_SMP void erts_proc_lock_init(Process *); void erts_proc_lock_fin(Process *); void erts_proc_safelock(Process *a_proc, @@ -1081,7 +1056,6 @@ void erts_proc_safelock(Process *a_proc, Process *b_proc, ErtsProcLocks b_have_locks, ErtsProcLocks b_need_locks); -#endif /* * --- Process table lookup ------------------------------------------------ @@ -1113,9 +1087,6 @@ ERTS_GLB_INLINE Process *erts_pix2proc(int ix); ERTS_GLB_INLINE Process *erts_proc_lookup_raw(Eterm pid); ERTS_GLB_INLINE Process *erts_proc_lookup(Eterm pid); -#ifndef ERTS_SMP -ERTS_GLB_INLINE -#endif Process *erts_pid2proc_opt(Process *, ErtsProcLocks, Eterm, ErtsProcLocks, int); #if ERTS_GLB_INLINE_INCL_FUNC_DEF @@ -1132,7 +1103,7 @@ ERTS_GLB_INLINE Process *erts_proc_lookup_raw(Eterm pid) { Process *proc; - ERTS_SMP_LC_ASSERT(erts_thr_progress_lc_is_delaying()); + ERTS_LC_ASSERT(erts_thr_progress_lc_is_delaying()); if (is_not_internal_pid(pid)) return NULL; @@ -1152,25 +1123,6 @@ ERTS_GLB_INLINE Process *erts_proc_lookup(Eterm pid) return proc; } -#ifndef ERTS_SMP -ERTS_GLB_INLINE Process * -erts_pid2proc_opt(Process *c_p_unused, - ErtsProcLocks c_p_have_locks_unused, - Eterm pid, - ErtsProcLocks pid_need_locks_unused, - int flags) -{ - Process *proc = erts_proc_lookup_raw(pid); - if (!proc) - return NULL; - if (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) - && ERTS_PROC_IS_EXITING(proc)) - return NULL; - if (flags & ERTS_P2P_FLG_INC_REFC) - erts_proc_inc_refc(proc); - return proc; -} -#endif /* !ERTS_SMP */ #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ diff --git a/erts/emulator/beam/erl_ptab.c b/erts/emulator/beam/erl_ptab.c index b3bcb3af3f..38c095fb4a 100644 --- a/erts/emulator/beam/erl_ptab.c +++ b/erts/emulator/beam/erl_ptab.c @@ -284,31 +284,31 @@ struct ErtsPTabListBifData_ { static ERTS_INLINE void last_data_init_nob(ErtsPTab *ptab, Uint64 val) { - erts_smp_atomic64_init_nob(&ptab->vola.tile.last_data, (erts_aint64_t) val); + erts_atomic64_init_nob(&ptab->vola.tile.last_data, (erts_aint64_t) val); } static ERTS_INLINE void last_data_set_relb(ErtsPTab *ptab, Uint64 val) { - erts_smp_atomic64_set_relb(&ptab->vola.tile.last_data, (erts_aint64_t) val); + erts_atomic64_set_relb(&ptab->vola.tile.last_data, (erts_aint64_t) val); } static ERTS_INLINE Uint64 last_data_read_nob(ErtsPTab *ptab) { - return (Uint64) erts_smp_atomic64_read_nob(&ptab->vola.tile.last_data); + return (Uint64) erts_atomic64_read_nob(&ptab->vola.tile.last_data); } static ERTS_INLINE Uint64 last_data_read_acqb(ErtsPTab *ptab) { - return (Uint64) erts_smp_atomic64_read_acqb(&ptab->vola.tile.last_data); + return (Uint64) erts_atomic64_read_acqb(&ptab->vola.tile.last_data); } static ERTS_INLINE Uint64 last_data_cmpxchg_relb(ErtsPTab *ptab, Uint64 new, Uint64 exp) { - return (Uint64) erts_smp_atomic64_cmpxchg_relb(&ptab->vola.tile.last_data, + return (Uint64) erts_atomic64_cmpxchg_relb(&ptab->vola.tile.last_data, (erts_aint64_t) new, (erts_aint64_t) exp); } @@ -346,9 +346,9 @@ ix_to_free_id_data_ix(ErtsPTab *ptab, Uint32 ix) UWord erts_ptab_mem_size(ErtsPTab *ptab) { - UWord size = ptab->r.o.max*sizeof(erts_smp_atomic_t); + UWord size = ptab->r.o.max*sizeof(erts_atomic_t); if (ptab->r.o.free_id_data) - size += ptab->r.o.max*sizeof(erts_smp_atomic32_t); + size += ptab->r.o.max*sizeof(erts_atomic32_t); return size; } @@ -367,14 +367,14 @@ erts_ptab_init_table(ErtsPTab *ptab, size_t tab_sz, alloc_sz; Uint32 bits, cl, cli, ix, ix_per_cache_line, tab_cache_lines; char *tab_end; - erts_smp_atomic_t *tab_entry; - erts_smp_rwmtx_opt_t rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; - rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; - rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED; + erts_atomic_t *tab_entry; + erts_rwmtx_opt_t rwmtx_opts = ERTS_RWMTX_OPT_DEFAULT_INITER; + rwmtx_opts.type = ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; + rwmtx_opts.lived = ERTS_RWMTX_LONG_LIVED; - erts_smp_rwmtx_init_opt(&ptab->list.data.rwmtx, &rwmtx_opts, name, NIL, + erts_rwmtx_init_opt(&ptab->list.data.rwmtx, &rwmtx_opts, name, NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); - erts_smp_atomic32_init_nob(&ptab->vola.tile.count, 0); + erts_atomic32_init_nob(&ptab->vola.tile.count, 0); last_data_init_nob(ptab, ~((Uint64) 0)); /* A size that is a power of 2 is to prefer performance wise */ @@ -388,20 +388,20 @@ erts_ptab_init_table(ErtsPTab *ptab, ptab->r.o.element_size = element_size; ptab->r.o.max = size; - tab_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_smp_atomic_t)); + tab_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_atomic_t)); alloc_sz = tab_sz; if (!legacy) - alloc_sz += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_smp_atomic32_t)); + alloc_sz += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_atomic32_t)); ptab->r.o.tab = erts_alloc_permanent_cache_aligned(atype, alloc_sz); tab_end = ((char *) ptab->r.o.tab) + tab_sz; tab_entry = ptab->r.o.tab; while (tab_end > ((char *) tab_entry)) { - erts_smp_atomic_init_nob(tab_entry, ERTS_AINT_NULL); + erts_atomic_init_nob(tab_entry, ERTS_AINT_NULL); tab_entry++; } tab_cache_lines = tab_sz/ERTS_CACHE_LINE_SIZE; - ix_per_cache_line = (ERTS_CACHE_LINE_SIZE/sizeof(erts_smp_atomic_t)); + ix_per_cache_line = (ERTS_CACHE_LINE_SIZE/sizeof(erts_atomic_t)); ASSERT((ptab->r.o.max & (ptab->r.o.max - 1)) == 0); /* power of 2 */ ASSERT((ix_per_cache_line & (ix_per_cache_line - 1)) == 0); /* power of 2 */ ASSERT((tab_cache_lines & (tab_cache_lines - 1)) == 0); /* power of 2 */ @@ -429,11 +429,11 @@ erts_ptab_init_table(ErtsPTab *ptab, } else { - tab_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_smp_atomic32_t)); - ptab->r.o.free_id_data = (erts_smp_atomic32_t *) tab_end; + tab_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_atomic32_t)); + ptab->r.o.free_id_data = (erts_atomic32_t *) tab_end; tab_cache_lines = tab_sz/ERTS_CACHE_LINE_SIZE; - ix_per_cache_line = (ERTS_CACHE_LINE_SIZE/sizeof(erts_smp_atomic32_t)); + ix_per_cache_line = (ERTS_CACHE_LINE_SIZE/sizeof(erts_atomic32_t)); ptab->r.o.dix_cl_mask = tab_cache_lines-1; ptab->r.o.dix_cl_shift = erts_fit_in_bits_int32(ix_per_cache_line-1); @@ -448,19 +448,19 @@ erts_ptab_init_table(ErtsPTab *ptab, ix = 0; for (cl = 0; cl < tab_cache_lines; cl++) { for (cli = 0; cli < ix_per_cache_line; cli++) { - erts_smp_atomic32_init_nob(&ptab->r.o.free_id_data[ix], + erts_atomic32_init_nob(&ptab->r.o.free_id_data[ix], cli*tab_cache_lines+cl); - ASSERT(erts_smp_atomic32_read_nob(&ptab->r.o.free_id_data[ix]) != ptab->r.o.invalid_data); + ASSERT(erts_atomic32_read_nob(&ptab->r.o.free_id_data[ix]) != ptab->r.o.invalid_data); ix++; } } - erts_smp_atomic32_init_nob(&ptab->vola.tile.aid_ix, -1); - erts_smp_atomic32_init_nob(&ptab->vola.tile.fid_ix, -1); + erts_atomic32_init_nob(&ptab->vola.tile.aid_ix, -1); + erts_atomic32_init_nob(&ptab->vola.tile.fid_ix, -1); } - erts_smp_interval_init(&ptab->list.data.interval); + erts_interval_init(&ptab->list.data.interval); ptab->list.data.deleted.start = NULL; ptab->list.data.deleted.end = NULL; ptab->list.data.chunks = (((ptab->r.o.max - 1) @@ -480,9 +480,9 @@ erts_ptab_init_table(ErtsPTab *ptab, * have ERTS_PTAB_MAX_SIZE-1 valid elements in the table while * still having a table size of the power of 2. */ - erts_smp_atomic32_inc_nob(&ptab->vola.tile.count); + erts_atomic32_inc_nob(&ptab->vola.tile.count); pix = erts_ptab_data2pix(ptab, ptab->r.o.invalid_data); - erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], + erts_atomic_set_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab->r.o.invalid_element); } @@ -506,12 +506,12 @@ erts_ptab_new_element(ErtsPTab *ptab, erts_ptab_rlock(ptab); - count = erts_smp_atomic32_inc_read_acqb(&ptab->vola.tile.count); + count = erts_atomic32_inc_read_acqb(&ptab->vola.tile.count); if (count > ptab->r.o.max) { while (1) { erts_aint32_t act_count; - act_count = erts_smp_atomic32_cmpxchg_relb(&ptab->vola.tile.count, + act_count = erts_atomic32_cmpxchg_relb(&ptab->vola.tile.count, count-1, count); if (act_count == count) { @@ -525,14 +525,14 @@ erts_ptab_new_element(ErtsPTab *ptab, } ptab_el->u.alive.started_interval - = erts_smp_current_interval_nob(erts_ptab_interval(ptab)); + = erts_current_interval_nob(erts_ptab_interval(ptab)); if (ptab->r.o.free_id_data) { do { - ix = (Uint32) erts_smp_atomic32_inc_read_acqb(&ptab->vola.tile.aid_ix); + ix = (Uint32) erts_atomic32_inc_read_acqb(&ptab->vola.tile.aid_ix); ix = ix_to_free_id_data_ix(ptab, ix); - data = erts_smp_atomic32_xchg_nob(&ptab->r.o.free_id_data[ix], + data = erts_atomic32_xchg_nob(&ptab->r.o.free_id_data[ix], (erts_aint32_t)ptab->r.o.invalid_data); }while ((Eterm)data == ptab->r.o.invalid_data); @@ -546,10 +546,10 @@ erts_ptab_new_element(ErtsPTab *ptab, pix = erts_ptab_data2pix(ptab, (Eterm) data); #ifdef DEBUG - ASSERT(ERTS_AINT_NULL == erts_smp_atomic_xchg_relb(&ptab->r.o.tab[pix], + ASSERT(ERTS_AINT_NULL == erts_atomic_xchg_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el)); #else - erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el); + erts_atomic_set_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el); #endif erts_ptab_runlock(ptab); @@ -563,7 +563,7 @@ erts_ptab_new_element(ErtsPTab *ptab, restart: ptab_el->u.alive.started_interval - = erts_smp_current_interval_nob(erts_ptab_interval(ptab)); + = erts_current_interval_nob(erts_ptab_interval(ptab)); ld = last_data_read_acqb(ptab); @@ -571,10 +571,10 @@ erts_ptab_new_element(ErtsPTab *ptab, while (1) { ld++; pix = erts_ptab_data2pix(ptab, ERTS_PTAB_LastData2EtermData(ld)); - if (erts_smp_atomic_read_nob(&ptab->r.o.tab[pix]) + if (erts_atomic_read_nob(&ptab->r.o.tab[pix]) == ERTS_AINT_NULL) { erts_aint_t val; - val = erts_smp_atomic_cmpxchg_relb(&ptab->r.o.tab[pix], + val = erts_atomic_cmpxchg_relb(&ptab->r.o.tab[pix], invalid, ERTS_AINT_NULL); @@ -621,10 +621,10 @@ erts_ptab_new_element(ErtsPTab *ptab, /* Move into slot reserved */ #ifdef DEBUG - ASSERT(invalid == erts_smp_atomic_xchg_relb(&ptab->r.o.tab[pix], + ASSERT(invalid == erts_atomic_xchg_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el)); #else - erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el); + erts_atomic_set_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el); #endif if (rlocked) @@ -644,7 +644,7 @@ save_deleted_element(ErtsPTab *ptab, ErtsPTabElementCommon *ptab_el) sizeof(ErtsPTabDeletedElement)); ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.start && ptab->list.data.deleted.end); - ERTS_SMP_LC_ASSERT(erts_smp_lc_ptab_is_rwlocked(ptab)); + ERTS_LC_ASSERT(erts_lc_ptab_is_rwlocked(ptab)); ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab); @@ -654,7 +654,7 @@ save_deleted_element(ErtsPTab *ptab, ErtsPTabElementCommon *ptab_el) ptdep->u.element.id = ptab_el->id; ptdep->u.element.inserted = ptab_el->u.alive.started_interval; ptdep->u.element.deleted = - erts_smp_current_interval_nob(erts_ptab_interval(ptab)); + erts_current_interval_nob(erts_ptab_interval(ptab)); ptab->list.data.deleted.end->next = ptdep; ptab->list.data.deleted.end = ptdep; @@ -678,7 +678,7 @@ erts_ptab_delete_element(ErtsPTab *ptab, pix = erts_ptab_id2pix(ptab, ptab_el->id); /* *Need* to be an managed thread */ - ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread()); + ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread()); erts_ptab_rlock(ptab); maybe_save = ptab->list.data.deleted.end != NULL; @@ -687,7 +687,7 @@ erts_ptab_delete_element(ErtsPTab *ptab, erts_ptab_rwlock(ptab); } - erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], ERTS_AINT_NULL); + erts_atomic_set_relb(&ptab->r.o.tab[pix], ERTS_AINT_NULL); if (ptab->r.o.free_id_data) { Uint32 prev_data; @@ -703,17 +703,17 @@ erts_ptab_delete_element(ErtsPTab *ptab, ASSERT(pix == erts_ptab_data2pix(ptab, data)); do { - ix = (Uint32) erts_smp_atomic32_inc_read_relb(&ptab->vola.tile.fid_ix); + ix = (Uint32) erts_atomic32_inc_read_relb(&ptab->vola.tile.fid_ix); ix = ix_to_free_id_data_ix(ptab, ix); - prev_data = erts_smp_atomic32_cmpxchg_nob(&ptab->r.o.free_id_data[ix], + prev_data = erts_atomic32_cmpxchg_nob(&ptab->r.o.free_id_data[ix], data, ptab->r.o.invalid_data); }while ((Eterm)prev_data != ptab->r.o.invalid_data); } - ASSERT(erts_smp_atomic32_read_nob(&ptab->vola.tile.count) > 0); - erts_smp_atomic32_dec_relb(&ptab->vola.tile.count); + ASSERT(erts_atomic32_read_nob(&ptab->vola.tile.count) > 0); + erts_atomic32_dec_relb(&ptab->vola.tile.count); if (!maybe_save) erts_ptab_runlock(ptab); @@ -927,7 +927,7 @@ ptab_list_bif_engine(Process *c_p, Eterm *res_accp, Binary *mbp) sizeof(ErtsPTabDeletedElement)); ptlbdp->bif_invocation->ix = -1; ptlbdp->bif_invocation->u.bif_invocation.interval - = erts_smp_step_interval_nob(erts_ptab_interval(ptab)); + = erts_step_interval_nob(erts_ptab_interval(ptab)); ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab); ptlbdp->bif_invocation->next = NULL; @@ -968,12 +968,12 @@ ptab_list_bif_engine(Process *c_p, Eterm *res_accp, Binary *mbp) locked = 1; } - ERTS_SMP_LC_ASSERT(erts_smp_lc_ptab_is_rwlocked(ptab)); + ERTS_LC_ASSERT(erts_lc_ptab_is_rwlocked(ptab)); ERTS_PTAB_LIST_DBG_TRACE(p->common.id, insp_table); if (cix != 0) ptlbdp->chunk[cix].interval - = erts_smp_step_interval_nob(erts_ptab_interval(ptab)); + = erts_step_interval_nob(erts_ptab_interval(ptab)); else if (ptlbdp->bif_invocation) ptlbdp->chunk[0].interval = *invocation_interval_p; /* else: interval is irrelevant */ @@ -1331,18 +1331,18 @@ static void assert_ptab_consistency(ErtsPTab *ptab) int null_slots = 0; for (ix=0; ix < ptab->r.o.max; ix++) { - if (erts_smp_atomic32_read_nob(&ptab->r.o.free_id_data[ix]) != ptab->r.o.invalid_data) { + if (erts_atomic32_read_nob(&ptab->r.o.free_id_data[ix]) != ptab->r.o.invalid_data) { ++free_pids; - data = erts_smp_atomic32_read_nob(&ptab->r.o.free_id_data[ix]); + data = erts_atomic32_read_nob(&ptab->r.o.free_id_data[ix]); pix = erts_ptab_data2pix(ptab, (Eterm) data); ASSERT(erts_ptab_pix2intptr_nob(ptab, pix) == ERTS_AINT_NULL); } - if (erts_smp_atomic_read_nob(&ptab->r.o.tab[ix]) == ERTS_AINT_NULL) { + if (erts_atomic_read_nob(&ptab->r.o.tab[ix]) == ERTS_AINT_NULL) { ++null_slots; } } ASSERT(free_pids == null_slots); - ASSERT(free_pids == ptab->r.o.max - erts_smp_atomic32_read_nob(&ptab->vola.tile.count)); + ASSERT(free_pids == ptab->r.o.max - erts_atomic32_read_nob(&ptab->vola.tile.count)); } #endif } @@ -1366,7 +1366,7 @@ erts_ptab_test_next_id(ErtsPTab *ptab, int set, Uint next) Uint32 i, max_ix, num, stop_id_ix; max_ix = ptab->r.o.max - 1; num = next; - id_ix = (Uint32) erts_smp_atomic32_read_nob(&ptab->vola.tile.aid_ix); + id_ix = (Uint32) erts_atomic32_read_nob(&ptab->vola.tile.aid_ix); for (i=0; i <= max_ix; ++i) { Uint32 pix; @@ -1380,26 +1380,26 @@ erts_ptab_test_next_id(ErtsPTab *ptab, int set, Uint next) if (ERTS_AINT_NULL == erts_ptab_pix2intptr_nob(ptab, pix)) { ++id_ix; dix = ix_to_free_id_data_ix(ptab, id_ix); - erts_smp_atomic32_set_nob(&ptab->r.o.free_id_data[dix], num); + erts_atomic32_set_nob(&ptab->r.o.free_id_data[dix], num); ASSERT(pix == erts_ptab_data2pix(ptab, num)); } } - erts_smp_atomic32_set_nob(&ptab->vola.tile.fid_ix, id_ix); + erts_atomic32_set_nob(&ptab->vola.tile.fid_ix, id_ix); /* Write invalid_data in rest of free_id_data[]: */ - stop_id_ix = (1 + erts_smp_atomic32_read_nob(&ptab->vola.tile.aid_ix)) & max_ix; + stop_id_ix = (1 + erts_atomic32_read_nob(&ptab->vola.tile.aid_ix)) & max_ix; while (1) { id_ix = (id_ix+1) & max_ix; if (id_ix == stop_id_ix) break; dix = ix_to_free_id_data_ix(ptab, id_ix); - erts_smp_atomic32_set_nob(&ptab->r.o.free_id_data[dix], + erts_atomic32_set_nob(&ptab->r.o.free_id_data[dix], ptab->r.o.invalid_data); } } - id_ix = (Uint32) erts_smp_atomic32_read_nob(&ptab->vola.tile.aid_ix) + 1; + id_ix = (Uint32) erts_atomic32_read_nob(&ptab->vola.tile.aid_ix) + 1; dix = ix_to_free_id_data_ix(ptab, id_ix); - res = (Sint) erts_smp_atomic32_read_nob(&ptab->r.o.free_id_data[dix]); + res = (Sint) erts_atomic32_read_nob(&ptab->r.o.free_id_data[dix]); } else { /* Deprecated legacy algorithm... */ @@ -1616,11 +1616,11 @@ debug_ptab_list_verify_all_pids(ErtsPTabListBifData *ptlbdp) static void debug_ptab_list_check_del_list(ErtsPTab *ptab) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_ptab_is_rwlocked(ptab)); + ERTS_LC_ASSERT(erts_lc_ptab_is_rwlocked(ptab)); if (!ptab->list.data.deleted.start) ERTS_PTAB_LIST_ASSERT(!ptab->list.data.deleted.end); else { - Uint64 curr_interval = erts_smp_current_interval_nob(erts_ptab_interval(ptab)); + Uint64 curr_interval = erts_current_interval_nob(erts_ptab_interval(ptab)); Uint64 *prev_x_interval_p = NULL; ErtsPTabDeletedElement *ptdep; diff --git a/erts/emulator/beam/erl_ptab.h b/erts/emulator/beam/erl_ptab.h index fecfd96ab0..4858cc8ab8 100644 --- a/erts/emulator/beam/erl_ptab.h +++ b/erts/emulator/beam/erl_ptab.h @@ -60,7 +60,7 @@ typedef struct { } refc; ErtsTracer tracer; Uint trace_flags; - erts_smp_atomic_t timer; + erts_atomic_t timer; union { /* --- While being alive --- */ struct { @@ -78,7 +78,7 @@ typedef struct { typedef struct ErtsPTabDeletedElement_ ErtsPTabDeletedElement; typedef struct { - erts_smp_rwmtx_t rwmtx; + erts_rwmtx_t rwmtx; erts_interval_t interval; struct { ErtsPTabDeletedElement *start; @@ -88,15 +88,15 @@ typedef struct { } ErtsPTabListData; typedef struct { - erts_smp_atomic64_t last_data; - erts_smp_atomic32_t count; - erts_smp_atomic32_t aid_ix; - erts_smp_atomic32_t fid_ix; + erts_atomic64_t last_data; + erts_atomic32_t count; + erts_atomic32_t aid_ix; + erts_atomic32_t fid_ix; } ErtsPTabVolatileData; typedef struct { - erts_smp_atomic_t *tab; - erts_smp_atomic32_t *free_id_data; + erts_atomic_t *tab; + erts_atomic32_t *free_id_data; Uint32 max; Uint32 pix_mask; Uint32 pix_cl_mask; @@ -223,8 +223,8 @@ ERTS_GLB_INLINE void erts_ptab_runlock(ErtsPTab *ptab); ERTS_GLB_INLINE void erts_ptab_rwlock(ErtsPTab *ptab); ERTS_GLB_INLINE int erts_ptab_tryrwlock(ErtsPTab *ptab); ERTS_GLB_INLINE void erts_ptab_rwunlock(ErtsPTab *ptab); -ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rlocked(ErtsPTab *ptab); -ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rwlocked(ErtsPTab *ptab); +ERTS_GLB_INLINE int erts_lc_ptab_is_rlocked(ErtsPTab *ptab); +ERTS_GLB_INLINE int erts_lc_ptab_is_rwlocked(ErtsPTab *ptab); #if ERTS_GLB_INLINE_INCL_FUNC_DEF @@ -245,7 +245,7 @@ ERTS_GLB_INLINE int erts_ptab_count(ErtsPTab *ptab) { int max = ptab->r.o.max; - erts_aint32_t res = erts_smp_atomic32_read_nob(&ptab->vola.tile.count); + erts_aint32_t res = erts_atomic32_read_nob(&ptab->vola.tile.count); if (max == ERTS_PTAB_MAX_SIZE) { max--; res--; @@ -352,25 +352,25 @@ erts_ptab_id2data(ErtsPTab *ptab, Eterm id) ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_nob(ErtsPTab *ptab, int ix) { ASSERT(0 <= ix && ix < ptab->r.o.max); - return erts_smp_atomic_read_nob(&ptab->r.o.tab[ix]); + return erts_atomic_read_nob(&ptab->r.o.tab[ix]); } ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_ddrb(ErtsPTab *ptab, int ix) { ASSERT(0 <= ix && ix < ptab->r.o.max); - return erts_smp_atomic_read_ddrb(&ptab->r.o.tab[ix]); + return erts_atomic_read_ddrb(&ptab->r.o.tab[ix]); } ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_rb(ErtsPTab *ptab, int ix) { ASSERT(0 <= ix && ix < ptab->r.o.max); - return erts_smp_atomic_read_rb(&ptab->r.o.tab[ix]); + return erts_atomic_read_rb(&ptab->r.o.tab[ix]); } ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_acqb(ErtsPTab *ptab, int ix) { ASSERT(0 <= ix && ix < ptab->r.o.max); - return erts_smp_atomic_read_acqb(&ptab->r.o.tab[ix]); + return erts_atomic_read_acqb(&ptab->r.o.tab[ix]); } ERTS_GLB_INLINE void erts_ptab_atmc_inc_refc(ErtsPTabElementCommon *ptab_el) @@ -386,11 +386,9 @@ ERTS_GLB_INLINE void erts_ptab_atmc_inc_refc(ErtsPTabElementCommon *ptab_el) ERTS_GLB_INLINE Sint erts_ptab_atmc_dec_test_refc(ErtsPTabElementCommon *ptab_el) { erts_aint_t refc = erts_atomic_dec_read_relb(&ptab_el->refc.atmc); - ERTS_SMP_LC_ASSERT(refc >= 0); -#ifdef ERTS_SMP + ERTS_LC_ASSERT(refc >= 0); if (refc == 0) ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore); -#endif return (Sint) refc; } @@ -399,7 +397,7 @@ ERTS_GLB_INLINE Sint erts_ptab_atmc_add_test_refc(ErtsPTabElementCommon *ptab_el { erts_aint_t refc = erts_atomic_add_read_mb(&ptab_el->refc.atmc, (erts_aint_t) add_refc); - ERTS_SMP_LC_ASSERT(refc >= 0); + ERTS_LC_ASSERT(refc >= 0); return (Sint) refc; } @@ -417,7 +415,7 @@ ERTS_GLB_INLINE void erts_ptab_inc_refc(ErtsPTabElementCommon *ptab_el) ERTS_GLB_INLINE Sint erts_ptab_dec_test_refc(ErtsPTabElementCommon *ptab_el) { Sint refc = --ptab_el->refc.sint; - ERTS_SMP_LC_ASSERT(refc >= 0); + ERTS_LC_ASSERT(refc >= 0); return refc; } @@ -425,7 +423,7 @@ ERTS_GLB_INLINE Sint erts_ptab_add_test_refc(ErtsPTabElementCommon *ptab_el, Sint add_refc) { ptab_el->refc.sint += add_refc; - ERTS_SMP_LC_ASSERT(ptab_el->refc.sint >= 0); + ERTS_LC_ASSERT(ptab_el->refc.sint >= 0); return (Sint) ptab_el->refc.sint; } @@ -436,42 +434,42 @@ ERTS_GLB_INLINE Sint erts_ptab_read_refc(ErtsPTabElementCommon *ptab_el) ERTS_GLB_INLINE void erts_ptab_rlock(ErtsPTab *ptab) { - erts_smp_rwmtx_rlock(&ptab->list.data.rwmtx); + erts_rwmtx_rlock(&ptab->list.data.rwmtx); } ERTS_GLB_INLINE int erts_ptab_tryrlock(ErtsPTab *ptab) { - return erts_smp_rwmtx_tryrlock(&ptab->list.data.rwmtx); + return erts_rwmtx_tryrlock(&ptab->list.data.rwmtx); } ERTS_GLB_INLINE void erts_ptab_runlock(ErtsPTab *ptab) { - erts_smp_rwmtx_runlock(&ptab->list.data.rwmtx); + erts_rwmtx_runlock(&ptab->list.data.rwmtx); } ERTS_GLB_INLINE void erts_ptab_rwlock(ErtsPTab *ptab) { - erts_smp_rwmtx_rwlock(&ptab->list.data.rwmtx); + erts_rwmtx_rwlock(&ptab->list.data.rwmtx); } ERTS_GLB_INLINE int erts_ptab_tryrwlock(ErtsPTab *ptab) { - return erts_smp_rwmtx_tryrwlock(&ptab->list.data.rwmtx); + return erts_rwmtx_tryrwlock(&ptab->list.data.rwmtx); } ERTS_GLB_INLINE void erts_ptab_rwunlock(ErtsPTab *ptab) { - erts_smp_rwmtx_rwunlock(&ptab->list.data.rwmtx); + erts_rwmtx_rwunlock(&ptab->list.data.rwmtx); } -ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rlocked(ErtsPTab *ptab) +ERTS_GLB_INLINE int erts_lc_ptab_is_rlocked(ErtsPTab *ptab) { - return erts_smp_lc_rwmtx_is_rlocked(&ptab->list.data.rwmtx); + return erts_lc_rwmtx_is_rlocked(&ptab->list.data.rwmtx); } -ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rwlocked(ErtsPTab *ptab) +ERTS_GLB_INLINE int erts_lc_ptab_is_rwlocked(ErtsPTab *ptab) { - return erts_smp_lc_rwmtx_is_rwlocked(&ptab->list.data.rwmtx); + return erts_lc_rwmtx_is_rwlocked(&ptab->list.data.rwmtx); } #endif diff --git a/erts/emulator/beam/erl_sched_spec_pre_alloc.c b/erts/emulator/beam/erl_sched_spec_pre_alloc.c index 96238318c9..ab204303d7 100644 --- a/erts/emulator/beam/erl_sched_spec_pre_alloc.c +++ b/erts/emulator/beam/erl_sched_spec_pre_alloc.c @@ -32,13 +32,12 @@ # include "config.h" #endif -#ifdef ERTS_SMP #include "erl_process.h" #include "erl_thr_progress.h" erts_sspa_data_t * -erts_sspa_create(size_t blk_sz, int pa_size) +erts_sspa_create(size_t blk_sz, int pa_size, int nthreads, const char* name) { erts_sspa_data_t *data; size_t tot_size; @@ -49,22 +48,30 @@ erts_sspa_create(size_t blk_sz, int pa_size) int no_blocks = pa_size; int no_blocks_per_chunk; - if (erts_no_schedulers == 1) + if (!name) { /* schedulers only variant */ + ASSERT(!nthreads); + nthreads = erts_no_schedulers; + } + else { + ASSERT(nthreads > 0); + } + + if (nthreads == 1) no_blocks_per_chunk = no_blocks; else { int extra = (no_blocks - 1)/4 + 1; if (extra == 0) extra = 1; no_blocks_per_chunk = no_blocks; - no_blocks_per_chunk += extra*erts_no_schedulers; - no_blocks_per_chunk /= erts_no_schedulers; + no_blocks_per_chunk += extra * nthreads; + no_blocks_per_chunk /= nthreads; } - no_blocks = no_blocks_per_chunk * erts_no_schedulers; + no_blocks = no_blocks_per_chunk * nthreads; chunk_mem_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_chunk_header_t)); chunk_mem_size += blk_sz * no_blocks_per_chunk; chunk_mem_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(chunk_mem_size); tot_size = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_sspa_data_t)); - tot_size += chunk_mem_size*erts_no_schedulers; + tot_size += chunk_mem_size * nthreads; p = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_PRE_ALLOC_DATA, tot_size); data = (erts_sspa_data_t *) p; @@ -73,10 +80,16 @@ erts_sspa_create(size_t blk_sz, int pa_size) data->chunks_mem_size = chunk_mem_size; data->start = chunk_start; - data->end = chunk_start + chunk_mem_size*erts_no_schedulers; + data->end = chunk_start + chunk_mem_size * nthreads; + data->nthreads = nthreads; + + if (name) { /* thread variant */ + erts_tsd_key_create(&data->tsd_key, (char*)name); + erts_atomic_init_nob(&data->id_generator, 0); + } /* Initialize all chunks */ - for (cix = 0; cix < erts_no_schedulers; cix++) { + for (cix = 0; cix < nthreads; cix++) { erts_sspa_chunk_t *chnk = erts_sspa_cix2chunk(data, cix); erts_sspa_chunk_header_t *chdr = &chnk->aligned.header; erts_sspa_blk_t *blk; @@ -325,4 +338,3 @@ erts_sspa_process_remote_frees(erts_sspa_chunk_header_t *chdr, return res; } -#endif /* ERTS_SMP */ diff --git a/erts/emulator/beam/erl_sched_spec_pre_alloc.h b/erts/emulator/beam/erl_sched_spec_pre_alloc.h index 7808d7d438..d232db0e69 100644 --- a/erts/emulator/beam/erl_sched_spec_pre_alloc.h +++ b/erts/emulator/beam/erl_sched_spec_pre_alloc.h @@ -31,7 +31,6 @@ #ifndef ERTS_SCHED_SPEC_PRE_ALLOC_H__ #define ERTS_SCHED_SPEC_PRE_ALLOC_H__ -#ifdef ERTS_SMP #undef ERL_THR_PROGRESS_TSD_TYPE_ONLY #define ERL_THR_PROGRESS_TSD_TYPE_ONLY @@ -60,6 +59,11 @@ typedef struct { char *start; char *end; int chunks_mem_size; + int nthreads; + + /* Used only by thread variant: */ + erts_tsd_key_t tsd_key; + erts_atomic_t id_generator; } erts_sspa_data_t; typedef union erts_sspa_blk_t_ erts_sspa_blk_t; @@ -141,7 +145,9 @@ check_local_list(erts_sspa_chunk_header_t *chdr) #endif erts_sspa_data_t *erts_sspa_create(size_t blk_sz, - int pa_size); + int pa_size, + int nthreads, + const char* name); void erts_sspa_remote_free(erts_sspa_chunk_header_t *chdr, erts_sspa_blk_t *blk, int cinit); @@ -159,7 +165,7 @@ ERTS_GLB_INLINE int erts_sspa_free(erts_sspa_data_t *data, int cix, char *blk); ERTS_GLB_INLINE erts_sspa_chunk_t * erts_sspa_cix2chunk(erts_sspa_data_t *data, int cix) { - ASSERT(0 <= cix && cix < erts_no_schedulers); + ASSERT(0 <= cix && cix < data->nthreads); return (erts_sspa_chunk_t *) (data->start + cix*data->chunks_mem_size); } @@ -172,7 +178,7 @@ erts_sspa_ptr2cix(erts_sspa_data_t *data, void *ptr) return -1; diff = ((char *) ptr) - data->start; cix = (int) diff / data->chunks_mem_size; - ASSERT(0 <= cix && cix < erts_no_schedulers); + ASSERT(0 <= cix && cix < data->nthreads); return cix; } @@ -236,6 +242,5 @@ erts_sspa_free(erts_sspa_data_t *data, int cix, char *cblk) #endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */ -#endif /* ERTS_SMP */ #endif /* ERTS_SCHED_SPEC_PRE_ALLOC_H__ */ diff --git a/erts/emulator/beam/erl_smp.h b/erts/emulator/beam/erl_smp.h deleted file mode 100644 index 696bdbdaf1..0000000000 --- a/erts/emulator/beam/erl_smp.h +++ /dev/null @@ -1,1585 +0,0 @@ -/* - * %CopyrightBegin% - * - * Copyright Ericsson AB 2005-2017. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * %CopyrightEnd% - */ -/* - * SMP interface to ethread library. - * This is essentially "sed s/erts_/erts_smp_/g < erl_threads.h > erl_smp.h", - * plus changes to NOP operations when ERTS_SMP is disabled. - * Author: Mikael Pettersson - */ -#ifndef ERL_SMP_H -#define ERL_SMP_H -#include "erl_threads.h" - -#ifdef ERTS_ENABLE_LOCK_POSITION -#define erts_smp_mtx_lock(L) erts_smp_mtx_lock_x(L, __FILE__, __LINE__) -#define erts_smp_mtx_trylock(L) erts_smp_mtx_trylock_x(L, __FILE__, __LINE__) -#define erts_smp_spin_lock(L) erts_smp_spin_lock_x(L, __FILE__, __LINE__) -#define erts_smp_rwmtx_tryrlock(L) erts_smp_rwmtx_tryrlock_x(L, __FILE__, __LINE__) -#define erts_smp_rwmtx_rlock(L) erts_smp_rwmtx_rlock_x(L, __FILE__, __LINE__) -#define erts_smp_rwmtx_tryrwlock(L) erts_smp_rwmtx_tryrwlock_x(L, __FILE__, __LINE__) -#define erts_smp_rwmtx_rwlock(L) erts_smp_rwmtx_rwlock_x(L, __FILE__, __LINE__) -#define erts_smp_read_lock(L) erts_smp_read_lock_x(L, __FILE__, __LINE__) -#define erts_smp_write_lock(L) erts_smp_write_lock_x(L, __FILE__, __LINE__) -#endif - - -#ifdef ERTS_SMP -#define ERTS_SMP_THR_OPTS_DEFAULT_INITER ERTS_THR_OPTS_DEFAULT_INITER -typedef erts_thr_opts_t erts_smp_thr_opts_t; -typedef erts_thr_init_data_t erts_smp_thr_init_data_t; -typedef erts_tid_t erts_smp_tid_t; -typedef erts_mtx_t erts_smp_mtx_t; -typedef erts_cnd_t erts_smp_cnd_t; -#define ERTS_SMP_RWMTX_OPT_DEFAULT_INITER ERTS_RWMTX_OPT_DEFAULT_INITER -#define ERTS_SMP_RWMTX_TYPE_NORMAL ERTS_RWMTX_TYPE_NORMAL -#define ERTS_SMP_RWMTX_TYPE_FREQUENT_READ ERTS_RWMTX_TYPE_FREQUENT_READ -#define ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ \ - ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ -#define ERTS_SMP_RWMTX_LONG_LIVED ERTS_RWMTX_LONG_LIVED -#define ERTS_SMP_RWMTX_SHORT_LIVED ERTS_RWMTX_SHORT_LIVED -#define ERTS_SMP_RWMTX_UNKNOWN_LIVED ERTS_RWMTX_UNKNOWN_LIVED -typedef erts_rwmtx_opt_t erts_smp_rwmtx_opt_t; -typedef erts_rwmtx_t erts_smp_rwmtx_t; -typedef erts_tsd_key_t erts_smp_tsd_key_t; -#define erts_smp_dw_atomic_t erts_dw_atomic_t -#define erts_smp_atomic_t erts_atomic_t -#define erts_smp_atomic32_t erts_atomic32_t -#define erts_smp_atomic64_t erts_atomic64_t -typedef erts_spinlock_t erts_smp_spinlock_t; -typedef erts_rwlock_t erts_smp_rwlock_t; -void erts_thr_fatal_error(int, char *); /* implemented in erl_init.c */ - -#define ERTS_SMP_MEMORY_BARRIER ERTS_THR_MEMORY_BARRIER -#define ERTS_SMP_WRITE_MEMORY_BARRIER ERTS_THR_WRITE_MEMORY_BARRIER -#define ERTS_SMP_READ_MEMORY_BARRIER ERTS_THR_READ_MEMORY_BARRIER -#define ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER - -#else /* #ifdef ERTS_SMP */ - -#define ERTS_SMP_THR_OPTS_DEFAULT_INITER {0} -typedef int erts_smp_thr_opts_t; -typedef int erts_smp_thr_init_data_t; -typedef int erts_smp_tid_t; -typedef int erts_smp_mtx_t; -typedef int erts_smp_cnd_t; -#define ERTS_SMP_RWMTX_OPT_DEFAULT_INITER {0} -#define ERTS_SMP_RWMTX_TYPE_NORMAL 0 -#define ERTS_SMP_RWMTX_TYPE_FREQUENT_READ 0 -#define ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ 0 -#define ERTS_SMP_RWMTX_LONG_LIVED 0 -#define ERTS_SMP_RWMTX_SHORT_LIVED 0 -#define ERTS_SMP_RWMTX_UNKNOWN_LIVED 0 -typedef struct { - char type; - char lived; - int main_spincount; - int aux_spincount; -} erts_smp_rwmtx_opt_t; -typedef int erts_smp_rwmtx_t; -typedef int erts_smp_tsd_key_t; -#define erts_smp_dw_atomic_t erts_no_dw_atomic_t -#define erts_smp_atomic_t erts_no_atomic_t -#define erts_smp_atomic32_t erts_no_atomic32_t -#define erts_smp_atomic64_t erts_no_atomic64_t -#if __GNUC__ > 2 -typedef struct { } erts_smp_spinlock_t; -typedef struct { } erts_smp_rwlock_t; -#else -typedef struct { int gcc_is_buggy; } erts_smp_spinlock_t; -typedef struct { int gcc_is_buggy; } erts_smp_rwlock_t; -#endif - -#define ERTS_SMP_MEMORY_BARRIER -#define ERTS_SMP_WRITE_MEMORY_BARRIER -#define ERTS_SMP_READ_MEMORY_BARRIER -#define ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER - -#endif /* #ifdef ERTS_SMP */ - -ERTS_GLB_INLINE void erts_smp_thr_init(erts_smp_thr_init_data_t *id); -ERTS_GLB_INLINE void erts_smp_thr_create(erts_smp_tid_t *tid, - void * (*func)(void *), - void *arg, - erts_smp_thr_opts_t *opts); -ERTS_GLB_INLINE void erts_smp_thr_join(erts_smp_tid_t tid, void **thr_res); -ERTS_GLB_INLINE void erts_smp_thr_detach(erts_smp_tid_t tid); -ERTS_GLB_INLINE void erts_smp_thr_exit(void *res); -ERTS_GLB_INLINE void erts_smp_install_exit_handler(void (*exit_handler)(void)); -ERTS_GLB_INLINE erts_smp_tid_t erts_smp_thr_self(void); -ERTS_GLB_INLINE int erts_smp_equal_tids(erts_smp_tid_t x, erts_smp_tid_t y); -#ifdef ERTS_HAVE_REC_MTX_INIT -#define ERTS_SMP_HAVE_REC_MTX_INIT 1 -ERTS_GLB_INLINE void erts_smp_rec_mtx_init(erts_smp_mtx_t *mtx); -#endif -ERTS_GLB_INLINE void erts_smp_mtx_init(erts_smp_mtx_t *mtx, - char *name, - Eterm extra, - erts_lock_flags_t flags); -ERTS_GLB_INLINE void erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, - char *name, - Eterm extra, - erts_lock_flags_t flags); -ERTS_GLB_INLINE void erts_smp_mtx_destroy(erts_smp_mtx_t *mtx); -#ifdef ERTS_ENABLE_LOCK_POSITION -ERTS_GLB_INLINE int erts_smp_mtx_trylock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line); -ERTS_GLB_INLINE void erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line); -#else -ERTS_GLB_INLINE int erts_smp_mtx_trylock(erts_smp_mtx_t *mtx); -ERTS_GLB_INLINE void erts_smp_mtx_lock(erts_smp_mtx_t *mtx); -#endif -ERTS_GLB_INLINE void erts_smp_mtx_unlock(erts_smp_mtx_t *mtx); -ERTS_GLB_INLINE int erts_smp_lc_mtx_is_locked(erts_smp_mtx_t *mtx); -ERTS_GLB_INLINE void erts_smp_cnd_init(erts_smp_cnd_t *cnd); -ERTS_GLB_INLINE void erts_smp_cnd_destroy(erts_smp_cnd_t *cnd); -ERTS_GLB_INLINE void erts_smp_cnd_wait(erts_smp_cnd_t *cnd, - erts_smp_mtx_t *mtx); -ERTS_GLB_INLINE void erts_smp_cnd_signal(erts_smp_cnd_t *cnd); -ERTS_GLB_INLINE void erts_smp_cnd_broadcast(erts_smp_cnd_t *cnd); -ERTS_GLB_INLINE void erts_smp_rwmtx_set_reader_group(int no); -ERTS_GLB_INLINE void erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx, - erts_smp_rwmtx_opt_t *opt, - char *name, - Eterm extra, - erts_lock_flags_t flags); -ERTS_GLB_INLINE void erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx, - char *name, - Eterm extra, - erts_lock_flags_t flags); -ERTS_GLB_INLINE void erts_smp_rwmtx_destroy(erts_smp_rwmtx_t *rwmtx); -#ifdef ERTS_ENABLE_LOCK_POSITION -ERTS_GLB_INLINE int erts_smp_rwmtx_tryrlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line); -ERTS_GLB_INLINE void erts_smp_rwmtx_rlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line); -ERTS_GLB_INLINE void erts_smp_rwmtx_rwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line); -ERTS_GLB_INLINE int erts_smp_rwmtx_tryrwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line); -#else -ERTS_GLB_INLINE int erts_smp_rwmtx_tryrlock(erts_smp_rwmtx_t *rwmtx); -ERTS_GLB_INLINE void erts_smp_rwmtx_rlock(erts_smp_rwmtx_t *rwmtx); -ERTS_GLB_INLINE void erts_smp_rwmtx_rwlock(erts_smp_rwmtx_t *rwmtx); -ERTS_GLB_INLINE int erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx); -#endif -ERTS_GLB_INLINE void erts_smp_rwmtx_runlock(erts_smp_rwmtx_t *rwmtx); -ERTS_GLB_INLINE void erts_smp_rwmtx_rwunlock(erts_smp_rwmtx_t *rwmtx); -ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rlocked(erts_smp_rwmtx_t *mtx); -ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx); -ERTS_GLB_INLINE void erts_smp_spinlock_init(erts_smp_spinlock_t *lock, - char *name, - Eterm extra, - erts_lock_flags_t flags); -ERTS_GLB_INLINE void erts_smp_spinlock_destroy(erts_smp_spinlock_t *lock); -ERTS_GLB_INLINE void erts_smp_spin_unlock(erts_smp_spinlock_t *lock); -#ifdef ERTS_ENABLE_LOCK_POSITION -ERTS_GLB_INLINE void erts_smp_spin_lock_x(erts_smp_spinlock_t *lock, char *file, unsigned int line); -#else -ERTS_GLB_INLINE void erts_smp_spin_lock(erts_smp_spinlock_t *lock); -#endif -ERTS_GLB_INLINE int erts_smp_lc_spinlock_is_locked(erts_smp_spinlock_t *lock); -ERTS_GLB_INLINE void erts_smp_rwlock_init(erts_smp_rwlock_t *lock, - char *name, - Eterm extra, - erts_lock_flags_t flags); -ERTS_GLB_INLINE void erts_smp_rwlock_destroy(erts_smp_rwlock_t *lock); -ERTS_GLB_INLINE void erts_smp_read_unlock(erts_smp_rwlock_t *lock); -#ifdef ERTS_ENABLE_LOCK_POSITION -ERTS_GLB_INLINE void erts_smp_read_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line); -ERTS_GLB_INLINE void erts_smp_write_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line); -#else -ERTS_GLB_INLINE void erts_smp_read_lock(erts_smp_rwlock_t *lock); -ERTS_GLB_INLINE void erts_smp_write_lock(erts_smp_rwlock_t *lock); -#endif -ERTS_GLB_INLINE void erts_smp_write_unlock(erts_smp_rwlock_t *lock); -ERTS_GLB_INLINE int erts_smp_lc_rwlock_is_rlocked(erts_smp_rwlock_t *lock); -ERTS_GLB_INLINE int erts_smp_lc_rwlock_is_rwlocked(erts_smp_rwlock_t *lock); -ERTS_GLB_INLINE void erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp, - char *keyname); -ERTS_GLB_INLINE void erts_smp_tsd_key_delete(erts_smp_tsd_key_t key); -ERTS_GLB_INLINE void erts_smp_tsd_set(erts_smp_tsd_key_t key, void *value); -ERTS_GLB_INLINE void * erts_smp_tsd_get(erts_smp_tsd_key_t key); - -#ifdef ERTS_THR_HAVE_SIG_FUNCS -#define ERTS_SMP_THR_HAVE_SIG_FUNCS 1 -ERTS_GLB_INLINE void erts_smp_thr_sigmask(int how, - const sigset_t *set, - sigset_t *oset); -ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); -#endif /* #ifdef ERTS_THR_HAVE_SIG_FUNCS */ - -/* - * See "Documentation of atomics and memory barriers" at the top - * of erl_threads.h for info on atomics. - */ - -#ifdef ERTS_SMP - -/* Double word size atomics */ - -#define erts_smp_dw_atomic_init_nob erts_dw_atomic_init_nob -#define erts_smp_dw_atomic_set_nob erts_dw_atomic_set_nob -#define erts_smp_dw_atomic_read_nob erts_dw_atomic_read_nob -#define erts_smp_dw_atomic_cmpxchg_nob erts_dw_atomic_cmpxchg_nob - -#define erts_smp_dw_atomic_init_mb erts_dw_atomic_init_mb -#define erts_smp_dw_atomic_set_mb erts_dw_atomic_set_mb -#define erts_smp_dw_atomic_read_mb erts_dw_atomic_read_mb -#define erts_smp_dw_atomic_cmpxchg_mb erts_dw_atomic_cmpxchg_mb - -#define erts_smp_dw_atomic_init_acqb erts_dw_atomic_init_acqb -#define erts_smp_dw_atomic_set_acqb erts_dw_atomic_set_acqb -#define erts_smp_dw_atomic_read_acqb erts_dw_atomic_read_acqb -#define erts_smp_dw_atomic_cmpxchg_acqb erts_dw_atomic_cmpxchg_acqb - -#define erts_smp_dw_atomic_init_relb erts_dw_atomic_init_relb -#define erts_smp_dw_atomic_set_relb erts_dw_atomic_set_relb -#define erts_smp_dw_atomic_read_relb erts_dw_atomic_read_relb -#define erts_smp_dw_atomic_cmpxchg_relb erts_dw_atomic_cmpxchg_relb - -#define erts_smp_dw_atomic_init_ddrb erts_dw_atomic_init_ddrb -#define erts_smp_dw_atomic_set_ddrb erts_dw_atomic_set_ddrb -#define erts_smp_dw_atomic_read_ddrb erts_dw_atomic_read_ddrb -#define erts_smp_dw_atomic_cmpxchg_ddrb erts_dw_atomic_cmpxchg_ddrb - -#define erts_smp_dw_atomic_init_rb erts_dw_atomic_init_rb -#define erts_smp_dw_atomic_set_rb erts_dw_atomic_set_rb -#define erts_smp_dw_atomic_read_rb erts_dw_atomic_read_rb -#define erts_smp_dw_atomic_cmpxchg_rb erts_dw_atomic_cmpxchg_rb - -#define erts_smp_dw_atomic_init_wb erts_dw_atomic_init_wb -#define erts_smp_dw_atomic_set_wb erts_dw_atomic_set_wb -#define erts_smp_dw_atomic_read_wb erts_dw_atomic_read_wb -#define erts_smp_dw_atomic_cmpxchg_wb erts_dw_atomic_cmpxchg_wb - -#define erts_smp_dw_atomic_set_dirty erts_dw_atomic_set_dirty -#define erts_smp_dw_atomic_read_dirty erts_dw_atomic_read_dirty - -/* Word size atomics */ - -#define erts_smp_atomic_init_nob erts_atomic_init_nob -#define erts_smp_atomic_set_nob erts_atomic_set_nob -#define erts_smp_atomic_read_nob erts_atomic_read_nob -#define erts_smp_atomic_inc_read_nob erts_atomic_inc_read_nob -#define erts_smp_atomic_dec_read_nob erts_atomic_dec_read_nob -#define erts_smp_atomic_inc_nob erts_atomic_inc_nob -#define erts_smp_atomic_dec_nob erts_atomic_dec_nob -#define erts_smp_atomic_add_read_nob erts_atomic_add_read_nob -#define erts_smp_atomic_add_nob erts_atomic_add_nob -#define erts_smp_atomic_read_bor_nob erts_atomic_read_bor_nob -#define erts_smp_atomic_read_band_nob erts_atomic_read_band_nob -#define erts_smp_atomic_xchg_nob erts_atomic_xchg_nob -#define erts_smp_atomic_cmpxchg_nob erts_atomic_cmpxchg_nob -#define erts_smp_atomic_read_bset_nob erts_atomic_read_bset_nob - -#define erts_smp_atomic_init_mb erts_atomic_init_mb -#define erts_smp_atomic_set_mb erts_atomic_set_mb -#define erts_smp_atomic_read_mb erts_atomic_read_mb -#define erts_smp_atomic_inc_read_mb erts_atomic_inc_read_mb -#define erts_smp_atomic_dec_read_mb erts_atomic_dec_read_mb -#define erts_smp_atomic_inc_mb erts_atomic_inc_mb -#define erts_smp_atomic_dec_mb erts_atomic_dec_mb -#define erts_smp_atomic_add_read_mb erts_atomic_add_read_mb -#define erts_smp_atomic_add_mb erts_atomic_add_mb -#define erts_smp_atomic_read_bor_mb erts_atomic_read_bor_mb -#define erts_smp_atomic_read_band_mb erts_atomic_read_band_mb -#define erts_smp_atomic_xchg_mb erts_atomic_xchg_mb -#define erts_smp_atomic_cmpxchg_mb erts_atomic_cmpxchg_mb -#define erts_smp_atomic_read_bset_mb erts_atomic_read_bset_mb - -#define erts_smp_atomic_init_acqb erts_atomic_init_acqb -#define erts_smp_atomic_set_acqb erts_atomic_set_acqb -#define erts_smp_atomic_read_acqb erts_atomic_read_acqb -#define erts_smp_atomic_inc_read_acqb erts_atomic_inc_read_acqb -#define erts_smp_atomic_dec_read_acqb erts_atomic_dec_read_acqb -#define erts_smp_atomic_inc_acqb erts_atomic_inc_acqb -#define erts_smp_atomic_dec_acqb erts_atomic_dec_acqb -#define erts_smp_atomic_add_read_acqb erts_atomic_add_read_acqb -#define erts_smp_atomic_add_acqb erts_atomic_add_acqb -#define erts_smp_atomic_read_bor_acqb erts_atomic_read_bor_acqb -#define erts_smp_atomic_read_band_acqb erts_atomic_read_band_acqb -#define erts_smp_atomic_xchg_acqb erts_atomic_xchg_acqb -#define erts_smp_atomic_cmpxchg_acqb erts_atomic_cmpxchg_acqb -#define erts_smp_atomic_read_bset_acqb erts_atomic_read_bset_acqb - -#define erts_smp_atomic_init_relb erts_atomic_init_relb -#define erts_smp_atomic_set_relb erts_atomic_set_relb -#define erts_smp_atomic_read_relb erts_atomic_read_relb -#define erts_smp_atomic_inc_read_relb erts_atomic_inc_read_relb -#define erts_smp_atomic_dec_read_relb erts_atomic_dec_read_relb -#define erts_smp_atomic_inc_relb erts_atomic_inc_relb -#define erts_smp_atomic_dec_relb erts_atomic_dec_relb -#define erts_smp_atomic_add_read_relb erts_atomic_add_read_relb -#define erts_smp_atomic_add_relb erts_atomic_add_relb -#define erts_smp_atomic_read_bor_relb erts_atomic_read_bor_relb -#define erts_smp_atomic_read_band_relb erts_atomic_read_band_relb -#define erts_smp_atomic_xchg_relb erts_atomic_xchg_relb -#define erts_smp_atomic_cmpxchg_relb erts_atomic_cmpxchg_relb -#define erts_smp_atomic_read_bset_relb erts_atomic_read_bset_relb - -#define erts_smp_atomic_init_ddrb erts_atomic_init_ddrb -#define erts_smp_atomic_set_ddrb erts_atomic_set_ddrb -#define erts_smp_atomic_read_ddrb erts_atomic_read_ddrb -#define erts_smp_atomic_inc_read_ddrb erts_atomic_inc_read_ddrb -#define erts_smp_atomic_dec_read_ddrb erts_atomic_dec_read_ddrb -#define erts_smp_atomic_inc_ddrb erts_atomic_inc_ddrb -#define erts_smp_atomic_dec_ddrb erts_atomic_dec_ddrb -#define erts_smp_atomic_add_read_ddrb erts_atomic_add_read_ddrb -#define erts_smp_atomic_add_ddrb erts_atomic_add_ddrb -#define erts_smp_atomic_read_bor_ddrb erts_atomic_read_bor_ddrb -#define erts_smp_atomic_read_band_ddrb erts_atomic_read_band_ddrb -#define erts_smp_atomic_xchg_ddrb erts_atomic_xchg_ddrb -#define erts_smp_atomic_cmpxchg_ddrb erts_atomic_cmpxchg_ddrb -#define erts_smp_atomic_read_bset_ddrb erts_atomic_read_bset_ddrb - -#define erts_smp_atomic_init_rb erts_atomic_init_rb -#define erts_smp_atomic_set_rb erts_atomic_set_rb -#define erts_smp_atomic_read_rb erts_atomic_read_rb -#define erts_smp_atomic_inc_read_rb erts_atomic_inc_read_rb -#define erts_smp_atomic_dec_read_rb erts_atomic_dec_read_rb -#define erts_smp_atomic_inc_rb erts_atomic_inc_rb -#define erts_smp_atomic_dec_rb erts_atomic_dec_rb -#define erts_smp_atomic_add_read_rb erts_atomic_add_read_rb -#define erts_smp_atomic_add_rb erts_atomic_add_rb -#define erts_smp_atomic_read_bor_rb erts_atomic_read_bor_rb -#define erts_smp_atomic_read_band_rb erts_atomic_read_band_rb -#define erts_smp_atomic_xchg_rb erts_atomic_xchg_rb -#define erts_smp_atomic_cmpxchg_rb erts_atomic_cmpxchg_rb -#define erts_smp_atomic_read_bset_rb erts_atomic_read_bset_rb - -#define erts_smp_atomic_init_wb erts_atomic_init_wb -#define erts_smp_atomic_set_wb erts_atomic_set_wb -#define erts_smp_atomic_read_wb erts_atomic_read_wb -#define erts_smp_atomic_inc_read_wb erts_atomic_inc_read_wb -#define erts_smp_atomic_dec_read_wb erts_atomic_dec_read_wb -#define erts_smp_atomic_inc_wb erts_atomic_inc_wb -#define erts_smp_atomic_dec_wb erts_atomic_dec_wb -#define erts_smp_atomic_add_read_wb erts_atomic_add_read_wb -#define erts_smp_atomic_add_wb erts_atomic_add_wb -#define erts_smp_atomic_read_bor_wb erts_atomic_read_bor_wb -#define erts_smp_atomic_read_band_wb erts_atomic_read_band_wb -#define erts_smp_atomic_xchg_wb erts_atomic_xchg_wb -#define erts_smp_atomic_cmpxchg_wb erts_atomic_cmpxchg_wb -#define erts_smp_atomic_read_bset_wb erts_atomic_read_bset_wb - -#define erts_smp_atomic_set_dirty erts_atomic_set_dirty -#define erts_smp_atomic_read_dirty erts_atomic_read_dirty - -/* 32-bit atomics */ - -#define erts_smp_atomic32_init_nob erts_atomic32_init_nob -#define erts_smp_atomic32_set_nob erts_atomic32_set_nob -#define erts_smp_atomic32_read_nob erts_atomic32_read_nob -#define erts_smp_atomic32_inc_read_nob erts_atomic32_inc_read_nob -#define erts_smp_atomic32_dec_read_nob erts_atomic32_dec_read_nob -#define erts_smp_atomic32_inc_nob erts_atomic32_inc_nob -#define erts_smp_atomic32_dec_nob erts_atomic32_dec_nob -#define erts_smp_atomic32_add_read_nob erts_atomic32_add_read_nob -#define erts_smp_atomic32_add_nob erts_atomic32_add_nob -#define erts_smp_atomic32_read_bor_nob erts_atomic32_read_bor_nob -#define erts_smp_atomic32_read_band_nob erts_atomic32_read_band_nob -#define erts_smp_atomic32_xchg_nob erts_atomic32_xchg_nob -#define erts_smp_atomic32_cmpxchg_nob erts_atomic32_cmpxchg_nob -#define erts_smp_atomic32_read_bset_nob erts_atomic32_read_bset_nob - -#define erts_smp_atomic32_init_mb erts_atomic32_init_mb -#define erts_smp_atomic32_set_mb erts_atomic32_set_mb -#define erts_smp_atomic32_read_mb erts_atomic32_read_mb -#define erts_smp_atomic32_inc_read_mb erts_atomic32_inc_read_mb -#define erts_smp_atomic32_dec_read_mb erts_atomic32_dec_read_mb -#define erts_smp_atomic32_inc_mb erts_atomic32_inc_mb -#define erts_smp_atomic32_dec_mb erts_atomic32_dec_mb -#define erts_smp_atomic32_add_read_mb erts_atomic32_add_read_mb -#define erts_smp_atomic32_add_mb erts_atomic32_add_mb -#define erts_smp_atomic32_read_bor_mb erts_atomic32_read_bor_mb -#define erts_smp_atomic32_read_band_mb erts_atomic32_read_band_mb -#define erts_smp_atomic32_xchg_mb erts_atomic32_xchg_mb -#define erts_smp_atomic32_cmpxchg_mb erts_atomic32_cmpxchg_mb -#define erts_smp_atomic32_read_bset_mb erts_atomic32_read_bset_mb - -#define erts_smp_atomic32_init_acqb erts_atomic32_init_acqb -#define erts_smp_atomic32_set_acqb erts_atomic32_set_acqb -#define erts_smp_atomic32_read_acqb erts_atomic32_read_acqb -#define erts_smp_atomic32_inc_read_acqb erts_atomic32_inc_read_acqb -#define erts_smp_atomic32_dec_read_acqb erts_atomic32_dec_read_acqb -#define erts_smp_atomic32_inc_acqb erts_atomic32_inc_acqb -#define erts_smp_atomic32_dec_acqb erts_atomic32_dec_acqb -#define erts_smp_atomic32_add_read_acqb erts_atomic32_add_read_acqb -#define erts_smp_atomic32_add_acqb erts_atomic32_add_acqb -#define erts_smp_atomic32_read_bor_acqb erts_atomic32_read_bor_acqb -#define erts_smp_atomic32_read_band_acqb erts_atomic32_read_band_acqb -#define erts_smp_atomic32_xchg_acqb erts_atomic32_xchg_acqb -#define erts_smp_atomic32_cmpxchg_acqb erts_atomic32_cmpxchg_acqb -#define erts_smp_atomic32_read_bset_acqb erts_atomic32_read_bset_acqb - -#define erts_smp_atomic32_init_relb erts_atomic32_init_relb -#define erts_smp_atomic32_set_relb erts_atomic32_set_relb -#define erts_smp_atomic32_read_relb erts_atomic32_read_relb -#define erts_smp_atomic32_inc_read_relb erts_atomic32_inc_read_relb -#define erts_smp_atomic32_dec_read_relb erts_atomic32_dec_read_relb -#define erts_smp_atomic32_inc_relb erts_atomic32_inc_relb -#define erts_smp_atomic32_dec_relb erts_atomic32_dec_relb -#define erts_smp_atomic32_add_read_relb erts_atomic32_add_read_relb -#define erts_smp_atomic32_add_relb erts_atomic32_add_relb -#define erts_smp_atomic32_read_bor_relb erts_atomic32_read_bor_relb -#define erts_smp_atomic32_read_band_relb erts_atomic32_read_band_relb -#define erts_smp_atomic32_xchg_relb erts_atomic32_xchg_relb -#define erts_smp_atomic32_cmpxchg_relb erts_atomic32_cmpxchg_relb -#define erts_smp_atomic32_read_bset_relb erts_atomic32_read_bset_relb - -#define erts_smp_atomic32_init_ddrb erts_atomic32_init_ddrb -#define erts_smp_atomic32_set_ddrb erts_atomic32_set_ddrb -#define erts_smp_atomic32_read_ddrb erts_atomic32_read_ddrb -#define erts_smp_atomic32_inc_read_ddrb erts_atomic32_inc_read_ddrb -#define erts_smp_atomic32_dec_read_ddrb erts_atomic32_dec_read_ddrb -#define erts_smp_atomic32_inc_ddrb erts_atomic32_inc_ddrb -#define erts_smp_atomic32_dec_ddrb erts_atomic32_dec_ddrb -#define erts_smp_atomic32_add_read_ddrb erts_atomic32_add_read_ddrb -#define erts_smp_atomic32_add_ddrb erts_atomic32_add_ddrb -#define erts_smp_atomic32_read_bor_ddrb erts_atomic32_read_bor_ddrb -#define erts_smp_atomic32_read_band_ddrb erts_atomic32_read_band_ddrb -#define erts_smp_atomic32_xchg_ddrb erts_atomic32_xchg_ddrb -#define erts_smp_atomic32_cmpxchg_ddrb erts_atomic32_cmpxchg_ddrb -#define erts_smp_atomic32_read_bset_ddrb erts_atomic32_read_bset_ddrb - -#define erts_smp_atomic32_init_rb erts_atomic32_init_rb -#define erts_smp_atomic32_set_rb erts_atomic32_set_rb -#define erts_smp_atomic32_read_rb erts_atomic32_read_rb -#define erts_smp_atomic32_inc_read_rb erts_atomic32_inc_read_rb -#define erts_smp_atomic32_dec_read_rb erts_atomic32_dec_read_rb -#define erts_smp_atomic32_inc_rb erts_atomic32_inc_rb -#define erts_smp_atomic32_dec_rb erts_atomic32_dec_rb -#define erts_smp_atomic32_add_read_rb erts_atomic32_add_read_rb -#define erts_smp_atomic32_add_rb erts_atomic32_add_rb -#define erts_smp_atomic32_read_bor_rb erts_atomic32_read_bor_rb -#define erts_smp_atomic32_read_band_rb erts_atomic32_read_band_rb -#define erts_smp_atomic32_xchg_rb erts_atomic32_xchg_rb -#define erts_smp_atomic32_cmpxchg_rb erts_atomic32_cmpxchg_rb -#define erts_smp_atomic32_read_bset_rb erts_atomic32_read_bset_rb - -#define erts_smp_atomic32_init_wb erts_atomic32_init_wb -#define erts_smp_atomic32_set_wb erts_atomic32_set_wb -#define erts_smp_atomic32_read_wb erts_atomic32_read_wb -#define erts_smp_atomic32_inc_read_wb erts_atomic32_inc_read_wb -#define erts_smp_atomic32_dec_read_wb erts_atomic32_dec_read_wb -#define erts_smp_atomic32_inc_wb erts_atomic32_inc_wb -#define erts_smp_atomic32_dec_wb erts_atomic32_dec_wb -#define erts_smp_atomic32_add_read_wb erts_atomic32_add_read_wb -#define erts_smp_atomic32_add_wb erts_atomic32_add_wb -#define erts_smp_atomic32_read_bor_wb erts_atomic32_read_bor_wb -#define erts_smp_atomic32_read_band_wb erts_atomic32_read_band_wb -#define erts_smp_atomic32_xchg_wb erts_atomic32_xchg_wb -#define erts_smp_atomic32_cmpxchg_wb erts_atomic32_cmpxchg_wb -#define erts_smp_atomic32_read_bset_wb erts_atomic32_read_bset_wb - -#define erts_smp_atomic32_set_dirty erts_atomic32_set_dirty -#define erts_smp_atomic32_read_dirty erts_atomic32_read_dirty - -/* 64-bit atomics */ - -#define erts_smp_atomic64_init_nob erts_atomic64_init_nob -#define erts_smp_atomic64_set_nob erts_atomic64_set_nob -#define erts_smp_atomic64_read_nob erts_atomic64_read_nob -#define erts_smp_atomic64_inc_read_nob erts_atomic64_inc_read_nob -#define erts_smp_atomic64_dec_read_nob erts_atomic64_dec_read_nob -#define erts_smp_atomic64_inc_nob erts_atomic64_inc_nob -#define erts_smp_atomic64_dec_nob erts_atomic64_dec_nob -#define erts_smp_atomic64_add_read_nob erts_atomic64_add_read_nob -#define erts_smp_atomic64_add_nob erts_atomic64_add_nob -#define erts_smp_atomic64_read_bor_nob erts_atomic64_read_bor_nob -#define erts_smp_atomic64_read_band_nob erts_atomic64_read_band_nob -#define erts_smp_atomic64_xchg_nob erts_atomic64_xchg_nob -#define erts_smp_atomic64_cmpxchg_nob erts_atomic64_cmpxchg_nob -#define erts_smp_atomic64_read_bset_nob erts_atomic64_read_bset_nob - -#define erts_smp_atomic64_init_mb erts_atomic64_init_mb -#define erts_smp_atomic64_set_mb erts_atomic64_set_mb -#define erts_smp_atomic64_read_mb erts_atomic64_read_mb -#define erts_smp_atomic64_inc_read_mb erts_atomic64_inc_read_mb -#define erts_smp_atomic64_dec_read_mb erts_atomic64_dec_read_mb -#define erts_smp_atomic64_inc_mb erts_atomic64_inc_mb -#define erts_smp_atomic64_dec_mb erts_atomic64_dec_mb -#define erts_smp_atomic64_add_read_mb erts_atomic64_add_read_mb -#define erts_smp_atomic64_add_mb erts_atomic64_add_mb -#define erts_smp_atomic64_read_bor_mb erts_atomic64_read_bor_mb -#define erts_smp_atomic64_read_band_mb erts_atomic64_read_band_mb -#define erts_smp_atomic64_xchg_mb erts_atomic64_xchg_mb -#define erts_smp_atomic64_cmpxchg_mb erts_atomic64_cmpxchg_mb -#define erts_smp_atomic64_read_bset_mb erts_atomic64_read_bset_mb - -#define erts_smp_atomic64_init_acqb erts_atomic64_init_acqb -#define erts_smp_atomic64_set_acqb erts_atomic64_set_acqb -#define erts_smp_atomic64_read_acqb erts_atomic64_read_acqb -#define erts_smp_atomic64_inc_read_acqb erts_atomic64_inc_read_acqb -#define erts_smp_atomic64_dec_read_acqb erts_atomic64_dec_read_acqb -#define erts_smp_atomic64_inc_acqb erts_atomic64_inc_acqb -#define erts_smp_atomic64_dec_acqb erts_atomic64_dec_acqb -#define erts_smp_atomic64_add_read_acqb erts_atomic64_add_read_acqb -#define erts_smp_atomic64_add_acqb erts_atomic64_add_acqb -#define erts_smp_atomic64_read_bor_acqb erts_atomic64_read_bor_acqb -#define erts_smp_atomic64_read_band_acqb erts_atomic64_read_band_acqb -#define erts_smp_atomic64_xchg_acqb erts_atomic64_xchg_acqb -#define erts_smp_atomic64_cmpxchg_acqb erts_atomic64_cmpxchg_acqb -#define erts_smp_atomic64_read_bset_acqb erts_atomic64_read_bset_acqb - -#define erts_smp_atomic64_init_relb erts_atomic64_init_relb -#define erts_smp_atomic64_set_relb erts_atomic64_set_relb -#define erts_smp_atomic64_read_relb erts_atomic64_read_relb -#define erts_smp_atomic64_inc_read_relb erts_atomic64_inc_read_relb -#define erts_smp_atomic64_dec_read_relb erts_atomic64_dec_read_relb -#define erts_smp_atomic64_inc_relb erts_atomic64_inc_relb -#define erts_smp_atomic64_dec_relb erts_atomic64_dec_relb -#define erts_smp_atomic64_add_read_relb erts_atomic64_add_read_relb -#define erts_smp_atomic64_add_relb erts_atomic64_add_relb -#define erts_smp_atomic64_read_bor_relb erts_atomic64_read_bor_relb -#define erts_smp_atomic64_read_band_relb erts_atomic64_read_band_relb -#define erts_smp_atomic64_xchg_relb erts_atomic64_xchg_relb -#define erts_smp_atomic64_cmpxchg_relb erts_atomic64_cmpxchg_relb -#define erts_smp_atomic64_read_bset_relb erts_atomic64_read_bset_relb - -#define erts_smp_atomic64_init_ddrb erts_atomic64_init_ddrb -#define erts_smp_atomic64_set_ddrb erts_atomic64_set_ddrb -#define erts_smp_atomic64_read_ddrb erts_atomic64_read_ddrb -#define erts_smp_atomic64_inc_read_ddrb erts_atomic64_inc_read_ddrb -#define erts_smp_atomic64_dec_read_ddrb erts_atomic64_dec_read_ddrb -#define erts_smp_atomic64_inc_ddrb erts_atomic64_inc_ddrb -#define erts_smp_atomic64_dec_ddrb erts_atomic64_dec_ddrb -#define erts_smp_atomic64_add_read_ddrb erts_atomic64_add_read_ddrb -#define erts_smp_atomic64_add_ddrb erts_atomic64_add_ddrb -#define erts_smp_atomic64_read_bor_ddrb erts_atomic64_read_bor_ddrb -#define erts_smp_atomic64_read_band_ddrb erts_atomic64_read_band_ddrb -#define erts_smp_atomic64_xchg_ddrb erts_atomic64_xchg_ddrb -#define erts_smp_atomic64_cmpxchg_ddrb erts_atomic64_cmpxchg_ddrb -#define erts_smp_atomic64_read_bset_ddrb erts_atomic64_read_bset_ddrb - -#define erts_smp_atomic64_init_rb erts_atomic64_init_rb -#define erts_smp_atomic64_set_rb erts_atomic64_set_rb -#define erts_smp_atomic64_read_rb erts_atomic64_read_rb -#define erts_smp_atomic64_inc_read_rb erts_atomic64_inc_read_rb -#define erts_smp_atomic64_dec_read_rb erts_atomic64_dec_read_rb -#define erts_smp_atomic64_inc_rb erts_atomic64_inc_rb -#define erts_smp_atomic64_dec_rb erts_atomic64_dec_rb -#define erts_smp_atomic64_add_read_rb erts_atomic64_add_read_rb -#define erts_smp_atomic64_add_rb erts_atomic64_add_rb -#define erts_smp_atomic64_read_bor_rb erts_atomic64_read_bor_rb -#define erts_smp_atomic64_read_band_rb erts_atomic64_read_band_rb -#define erts_smp_atomic64_xchg_rb erts_atomic64_xchg_rb -#define erts_smp_atomic64_cmpxchg_rb erts_atomic64_cmpxchg_rb -#define erts_smp_atomic64_read_bset_rb erts_atomic64_read_bset_rb - -#define erts_smp_atomic64_init_wb erts_atomic64_init_wb -#define erts_smp_atomic64_set_wb erts_atomic64_set_wb -#define erts_smp_atomic64_read_wb erts_atomic64_read_wb -#define erts_smp_atomic64_inc_read_wb erts_atomic64_inc_read_wb -#define erts_smp_atomic64_dec_read_wb erts_atomic64_dec_read_wb -#define erts_smp_atomic64_inc_wb erts_atomic64_inc_wb -#define erts_smp_atomic64_dec_wb erts_atomic64_dec_wb -#define erts_smp_atomic64_add_read_wb erts_atomic64_add_read_wb -#define erts_smp_atomic64_add_wb erts_atomic64_add_wb -#define erts_smp_atomic64_read_bor_wb erts_atomic64_read_bor_wb -#define erts_smp_atomic64_read_band_wb erts_atomic64_read_band_wb -#define erts_smp_atomic64_xchg_wb erts_atomic64_xchg_wb -#define erts_smp_atomic64_cmpxchg_wb erts_atomic64_cmpxchg_wb -#define erts_smp_atomic64_read_bset_wb erts_atomic64_read_bset_wb - -#define erts_smp_atomic64_set_dirty erts_atomic64_set_dirty -#define erts_smp_atomic64_read_dirty erts_atomic64_read_dirty - -#else /* !ERTS_SMP */ - -/* Double word size atomics */ - -#define erts_smp_dw_atomic_init_nob erts_no_dw_atomic_set -#define erts_smp_dw_atomic_set_nob erts_no_dw_atomic_set -#define erts_smp_dw_atomic_read_nob erts_no_dw_atomic_read -#define erts_smp_dw_atomic_cmpxchg_nob erts_no_dw_atomic_cmpxchg - -#define erts_smp_dw_atomic_init_mb erts_no_dw_atomic_init -#define erts_smp_dw_atomic_set_mb erts_no_dw_atomic_set -#define erts_smp_dw_atomic_read_mb erts_no_dw_atomic_read -#define erts_smp_dw_atomic_cmpxchg_mb erts_no_dw_atomic_cmpxchg - -#define erts_smp_dw_atomic_init_acqb erts_no_dw_atomic_init -#define erts_smp_dw_atomic_set_acqb erts_no_dw_atomic_set -#define erts_smp_dw_atomic_read_acqb erts_no_dw_atomic_read -#define erts_smp_dw_atomic_cmpxchg_acqb erts_no_dw_atomic_cmpxchg - -#define erts_smp_dw_atomic_init_relb erts_no_dw_atomic_init -#define erts_smp_dw_atomic_set_relb erts_no_dw_atomic_set -#define erts_smp_dw_atomic_read_relb erts_no_dw_atomic_read -#define erts_smp_dw_atomic_cmpxchg_relb erts_no_dw_atomic_cmpxchg - -#define erts_smp_dw_atomic_init_ddrb erts_no_dw_atomic_init -#define erts_smp_dw_atomic_set_ddrb erts_no_dw_atomic_set -#define erts_smp_dw_atomic_read_ddrb erts_no_dw_atomic_read -#define erts_smp_dw_atomic_cmpxchg_ddrb erts_no_dw_atomic_cmpxchg - -#define erts_smp_dw_atomic_init_rb erts_no_dw_atomic_init -#define erts_smp_dw_atomic_set_rb erts_no_dw_atomic_set -#define erts_smp_dw_atomic_read_rb erts_no_dw_atomic_read -#define erts_smp_dw_atomic_cmpxchg_rb erts_no_dw_atomic_cmpxchg - -#define erts_smp_dw_atomic_init_wb erts_no_dw_atomic_init -#define erts_smp_dw_atomic_set_wb erts_no_dw_atomic_set -#define erts_smp_dw_atomic_read_wb erts_no_dw_atomic_read -#define erts_smp_dw_atomic_cmpxchg_wb erts_no_dw_atomic_cmpxchg - -#define erts_smp_dw_atomic_set_dirty erts_no_dw_atomic_set -#define erts_smp_dw_atomic_read_dirty erts_no_dw_atomic_read - -/* Word size atomics */ - -#define erts_smp_atomic_init_nob erts_no_atomic_set -#define erts_smp_atomic_set_nob erts_no_atomic_set -#define erts_smp_atomic_read_nob erts_no_atomic_read -#define erts_smp_atomic_inc_read_nob erts_no_atomic_inc_read -#define erts_smp_atomic_dec_read_nob erts_no_atomic_dec_read -#define erts_smp_atomic_inc_nob erts_no_atomic_inc -#define erts_smp_atomic_dec_nob erts_no_atomic_dec -#define erts_smp_atomic_add_read_nob erts_no_atomic_add_read -#define erts_smp_atomic_add_nob erts_no_atomic_add -#define erts_smp_atomic_read_bor_nob erts_no_atomic_read_bor -#define erts_smp_atomic_read_band_nob erts_no_atomic_read_band -#define erts_smp_atomic_xchg_nob erts_no_atomic_xchg -#define erts_smp_atomic_cmpxchg_nob erts_no_atomic_cmpxchg -#define erts_smp_atomic_read_bset_nob erts_no_atomic_read_bset - -#define erts_smp_atomic_init_mb erts_no_atomic_set -#define erts_smp_atomic_set_mb erts_no_atomic_set -#define erts_smp_atomic_read_mb erts_no_atomic_read -#define erts_smp_atomic_inc_read_mb erts_no_atomic_inc_read -#define erts_smp_atomic_dec_read_mb erts_no_atomic_dec_read -#define erts_smp_atomic_inc_mb erts_no_atomic_inc -#define erts_smp_atomic_dec_mb erts_no_atomic_dec -#define erts_smp_atomic_add_read_mb erts_no_atomic_add_read -#define erts_smp_atomic_add_mb erts_no_atomic_add -#define erts_smp_atomic_read_bor_mb erts_no_atomic_read_bor -#define erts_smp_atomic_read_band_mb erts_no_atomic_read_band -#define erts_smp_atomic_xchg_mb erts_no_atomic_xchg -#define erts_smp_atomic_cmpxchg_mb erts_no_atomic_cmpxchg -#define erts_smp_atomic_read_bset_mb erts_no_atomic_read_bset - -#define erts_smp_atomic_init_acqb erts_no_atomic_set -#define erts_smp_atomic_set_acqb erts_no_atomic_set -#define erts_smp_atomic_read_acqb erts_no_atomic_read -#define erts_smp_atomic_inc_read_acqb erts_no_atomic_inc_read -#define erts_smp_atomic_dec_read_acqb erts_no_atomic_dec_read -#define erts_smp_atomic_inc_acqb erts_no_atomic_inc -#define erts_smp_atomic_dec_acqb erts_no_atomic_dec -#define erts_smp_atomic_add_read_acqb erts_no_atomic_add_read -#define erts_smp_atomic_add_acqb erts_no_atomic_add -#define erts_smp_atomic_read_bor_acqb erts_no_atomic_read_bor -#define erts_smp_atomic_read_band_acqb erts_no_atomic_read_band -#define erts_smp_atomic_xchg_acqb erts_no_atomic_xchg -#define erts_smp_atomic_cmpxchg_acqb erts_no_atomic_cmpxchg -#define erts_smp_atomic_read_bset_acqb erts_no_atomic_read_bset - -#define erts_smp_atomic_init_relb erts_no_atomic_set -#define erts_smp_atomic_set_relb erts_no_atomic_set -#define erts_smp_atomic_read_relb erts_no_atomic_read -#define erts_smp_atomic_inc_read_relb erts_no_atomic_inc_read -#define erts_smp_atomic_dec_read_relb erts_no_atomic_dec_read -#define erts_smp_atomic_inc_relb erts_no_atomic_inc -#define erts_smp_atomic_dec_relb erts_no_atomic_dec -#define erts_smp_atomic_add_read_relb erts_no_atomic_add_read -#define erts_smp_atomic_add_relb erts_no_atomic_add -#define erts_smp_atomic_read_bor_relb erts_no_atomic_read_bor -#define erts_smp_atomic_read_band_relb erts_no_atomic_read_band -#define erts_smp_atomic_xchg_relb erts_no_atomic_xchg -#define erts_smp_atomic_cmpxchg_relb erts_no_atomic_cmpxchg -#define erts_smp_atomic_read_bset_relb erts_no_atomic_read_bset - -#define erts_smp_atomic_init_ddrb erts_no_atomic_set -#define erts_smp_atomic_set_ddrb erts_no_atomic_set -#define erts_smp_atomic_read_ddrb erts_no_atomic_read -#define erts_smp_atomic_inc_read_ddrb erts_no_atomic_inc_read -#define erts_smp_atomic_dec_read_ddrb erts_no_atomic_dec_read -#define erts_smp_atomic_inc_ddrb erts_no_atomic_inc -#define erts_smp_atomic_dec_ddrb erts_no_atomic_dec -#define erts_smp_atomic_add_read_ddrb erts_no_atomic_add_read -#define erts_smp_atomic_add_ddrb erts_no_atomic_add -#define erts_smp_atomic_read_bor_ddrb erts_no_atomic_read_bor -#define erts_smp_atomic_read_band_ddrb erts_no_atomic_read_band -#define erts_smp_atomic_xchg_ddrb erts_no_atomic_xchg -#define erts_smp_atomic_cmpxchg_ddrb erts_no_atomic_cmpxchg -#define erts_smp_atomic_read_bset_ddrb erts_no_atomic_read_bset - -#define erts_smp_atomic_init_rb erts_no_atomic_set -#define erts_smp_atomic_set_rb erts_no_atomic_set -#define erts_smp_atomic_read_rb erts_no_atomic_read -#define erts_smp_atomic_inc_read_rb erts_no_atomic_inc_read -#define erts_smp_atomic_dec_read_rb erts_no_atomic_dec_read -#define erts_smp_atomic_inc_rb erts_no_atomic_inc -#define erts_smp_atomic_dec_rb erts_no_atomic_dec -#define erts_smp_atomic_add_read_rb erts_no_atomic_add_read -#define erts_smp_atomic_add_rb erts_no_atomic_add -#define erts_smp_atomic_read_bor_rb erts_no_atomic_read_bor -#define erts_smp_atomic_read_band_rb erts_no_atomic_read_band -#define erts_smp_atomic_xchg_rb erts_no_atomic_xchg -#define erts_smp_atomic_cmpxchg_rb erts_no_atomic_cmpxchg -#define erts_smp_atomic_read_bset_rb erts_no_atomic_read_bset - -#define erts_smp_atomic_init_wb erts_no_atomic_set -#define erts_smp_atomic_set_wb erts_no_atomic_set -#define erts_smp_atomic_read_wb erts_no_atomic_read -#define erts_smp_atomic_inc_read_wb erts_no_atomic_inc_read -#define erts_smp_atomic_dec_read_wb erts_no_atomic_dec_read -#define erts_smp_atomic_inc_wb erts_no_atomic_inc -#define erts_smp_atomic_dec_wb erts_no_atomic_dec -#define erts_smp_atomic_add_read_wb erts_no_atomic_add_read -#define erts_smp_atomic_add_wb erts_no_atomic_add -#define erts_smp_atomic_read_bor_wb erts_no_atomic_read_bor -#define erts_smp_atomic_read_band_wb erts_no_atomic_read_band -#define erts_smp_atomic_xchg_wb erts_no_atomic_xchg -#define erts_smp_atomic_cmpxchg_wb erts_no_atomic_cmpxchg -#define erts_smp_atomic_read_bset_wb erts_no_atomic_read_bset - -#define erts_smp_atomic_set_dirty erts_no_atomic_set -#define erts_smp_atomic_read_dirty erts_no_atomic_read - -/* 32-bit atomics */ - -#define erts_smp_atomic32_init_nob erts_no_atomic32_set -#define erts_smp_atomic32_set_nob erts_no_atomic32_set -#define erts_smp_atomic32_read_nob erts_no_atomic32_read -#define erts_smp_atomic32_inc_read_nob erts_no_atomic32_inc_read -#define erts_smp_atomic32_dec_read_nob erts_no_atomic32_dec_read -#define erts_smp_atomic32_inc_nob erts_no_atomic32_inc -#define erts_smp_atomic32_dec_nob erts_no_atomic32_dec -#define erts_smp_atomic32_add_read_nob erts_no_atomic32_add_read -#define erts_smp_atomic32_add_nob erts_no_atomic32_add -#define erts_smp_atomic32_read_bor_nob erts_no_atomic32_read_bor -#define erts_smp_atomic32_read_band_nob erts_no_atomic32_read_band -#define erts_smp_atomic32_xchg_nob erts_no_atomic32_xchg -#define erts_smp_atomic32_cmpxchg_nob erts_no_atomic32_cmpxchg -#define erts_smp_atomic32_read_bset_nob erts_no_atomic32_read_bset - -#define erts_smp_atomic32_init_mb erts_no_atomic32_set -#define erts_smp_atomic32_set_mb erts_no_atomic32_set -#define erts_smp_atomic32_read_mb erts_no_atomic32_read -#define erts_smp_atomic32_inc_read_mb erts_no_atomic32_inc_read -#define erts_smp_atomic32_dec_read_mb erts_no_atomic32_dec_read -#define erts_smp_atomic32_inc_mb erts_no_atomic32_inc -#define erts_smp_atomic32_dec_mb erts_no_atomic32_dec -#define erts_smp_atomic32_add_read_mb erts_no_atomic32_add_read -#define erts_smp_atomic32_add_mb erts_no_atomic32_add -#define erts_smp_atomic32_read_bor_mb erts_no_atomic32_read_bor -#define erts_smp_atomic32_read_band_mb erts_no_atomic32_read_band -#define erts_smp_atomic32_xchg_mb erts_no_atomic32_xchg -#define erts_smp_atomic32_cmpxchg_mb erts_no_atomic32_cmpxchg -#define erts_smp_atomic32_read_bset_mb erts_no_atomic32_read_bset - -#define erts_smp_atomic32_init_acqb erts_no_atomic32_set -#define erts_smp_atomic32_set_acqb erts_no_atomic32_set -#define erts_smp_atomic32_read_acqb erts_no_atomic32_read -#define erts_smp_atomic32_inc_read_acqb erts_no_atomic32_inc_read -#define erts_smp_atomic32_dec_read_acqb erts_no_atomic32_dec_read -#define erts_smp_atomic32_inc_acqb erts_no_atomic32_inc -#define erts_smp_atomic32_dec_acqb erts_no_atomic32_dec -#define erts_smp_atomic32_add_read_acqb erts_no_atomic32_add_read -#define erts_smp_atomic32_add_acqb erts_no_atomic32_add -#define erts_smp_atomic32_read_bor_acqb erts_no_atomic32_read_bor -#define erts_smp_atomic32_read_band_acqb erts_no_atomic32_read_band -#define erts_smp_atomic32_xchg_acqb erts_no_atomic32_xchg -#define erts_smp_atomic32_cmpxchg_acqb erts_no_atomic32_cmpxchg -#define erts_smp_atomic32_read_bset_acqb erts_no_atomic32_read_bset - -#define erts_smp_atomic32_init_relb erts_no_atomic32_set -#define erts_smp_atomic32_set_relb erts_no_atomic32_set -#define erts_smp_atomic32_read_relb erts_no_atomic32_read -#define erts_smp_atomic32_inc_read_relb erts_no_atomic32_inc_read -#define erts_smp_atomic32_dec_read_relb erts_no_atomic32_dec_read -#define erts_smp_atomic32_inc_relb erts_no_atomic32_inc -#define erts_smp_atomic32_dec_relb erts_no_atomic32_dec -#define erts_smp_atomic32_add_read_relb erts_no_atomic32_add_read -#define erts_smp_atomic32_add_relb erts_no_atomic32_add -#define erts_smp_atomic32_read_bor_relb erts_no_atomic32_read_bor -#define erts_smp_atomic32_read_band_relb erts_no_atomic32_read_band -#define erts_smp_atomic32_xchg_relb erts_no_atomic32_xchg -#define erts_smp_atomic32_cmpxchg_relb erts_no_atomic32_cmpxchg -#define erts_smp_atomic32_read_bset_relb erts_no_atomic32_read_bset - -#define erts_smp_atomic32_init_ddrb erts_no_atomic32_set -#define erts_smp_atomic32_set_ddrb erts_no_atomic32_set -#define erts_smp_atomic32_read_ddrb erts_no_atomic32_read -#define erts_smp_atomic32_inc_read_ddrb erts_no_atomic32_inc_read -#define erts_smp_atomic32_dec_read_ddrb erts_no_atomic32_dec_read -#define erts_smp_atomic32_inc_ddrb erts_no_atomic32_inc -#define erts_smp_atomic32_dec_ddrb erts_no_atomic32_dec -#define erts_smp_atomic32_add_read_ddrb erts_no_atomic32_add_read -#define erts_smp_atomic32_add_ddrb erts_no_atomic32_add -#define erts_smp_atomic32_read_bor_ddrb erts_no_atomic32_read_bor -#define erts_smp_atomic32_read_band_ddrb erts_no_atomic32_read_band -#define erts_smp_atomic32_xchg_ddrb erts_no_atomic32_xchg -#define erts_smp_atomic32_cmpxchg_ddrb erts_no_atomic32_cmpxchg -#define erts_smp_atomic32_read_bset_ddrb erts_no_atomic32_read_bset - -#define erts_smp_atomic32_init_rb erts_no_atomic32_set -#define erts_smp_atomic32_set_rb erts_no_atomic32_set -#define erts_smp_atomic32_read_rb erts_no_atomic32_read -#define erts_smp_atomic32_inc_read_rb erts_no_atomic32_inc_read -#define erts_smp_atomic32_dec_read_rb erts_no_atomic32_dec_read -#define erts_smp_atomic32_inc_rb erts_no_atomic32_inc -#define erts_smp_atomic32_dec_rb erts_no_atomic32_dec -#define erts_smp_atomic32_add_read_rb erts_no_atomic32_add_read -#define erts_smp_atomic32_add_rb erts_no_atomic32_add -#define erts_smp_atomic32_read_bor_rb erts_no_atomic32_read_bor -#define erts_smp_atomic32_read_band_rb erts_no_atomic32_read_band -#define erts_smp_atomic32_xchg_rb erts_no_atomic32_xchg -#define erts_smp_atomic32_cmpxchg_rb erts_no_atomic32_cmpxchg -#define erts_smp_atomic32_read_bset_rb erts_no_atomic32_read_bset - -#define erts_smp_atomic32_init_wb erts_no_atomic32_set -#define erts_smp_atomic32_set_wb erts_no_atomic32_set -#define erts_smp_atomic32_read_wb erts_no_atomic32_read -#define erts_smp_atomic32_inc_read_wb erts_no_atomic32_inc_read -#define erts_smp_atomic32_dec_read_wb erts_no_atomic32_dec_read -#define erts_smp_atomic32_inc_wb erts_no_atomic32_inc -#define erts_smp_atomic32_dec_wb erts_no_atomic32_dec -#define erts_smp_atomic32_add_read_wb erts_no_atomic32_add_read -#define erts_smp_atomic32_add_wb erts_no_atomic32_add -#define erts_smp_atomic32_read_bor_wb erts_no_atomic32_read_bor -#define erts_smp_atomic32_read_band_wb erts_no_atomic32_read_band -#define erts_smp_atomic32_xchg_wb erts_no_atomic32_xchg -#define erts_smp_atomic32_cmpxchg_wb erts_no_atomic32_cmpxchg -#define erts_smp_atomic32_read_bset_wb erts_no_atomic32_read_bset - -#define erts_smp_atomic32_set_dirty erts_no_atomic32_set -#define erts_smp_atomic32_read_dirty erts_no_atomic32_read - -/* 64-bit atomics */ - -#define erts_smp_atomic64_init_nob erts_no_atomic64_set -#define erts_smp_atomic64_set_nob erts_no_atomic64_set -#define erts_smp_atomic64_read_nob erts_no_atomic64_read -#define erts_smp_atomic64_inc_read_nob erts_no_atomic64_inc_read -#define erts_smp_atomic64_dec_read_nob erts_no_atomic64_dec_read -#define erts_smp_atomic64_inc_nob erts_no_atomic64_inc -#define erts_smp_atomic64_dec_nob erts_no_atomic64_dec -#define erts_smp_atomic64_add_read_nob erts_no_atomic64_add_read -#define erts_smp_atomic64_add_nob erts_no_atomic64_add -#define erts_smp_atomic64_read_bor_nob erts_no_atomic64_read_bor -#define erts_smp_atomic64_read_band_nob erts_no_atomic64_read_band -#define erts_smp_atomic64_xchg_nob erts_no_atomic64_xchg -#define erts_smp_atomic64_cmpxchg_nob erts_no_atomic64_cmpxchg -#define erts_smp_atomic64_read_bset_nob erts_no_atomic64_read_bset - -#define erts_smp_atomic64_init_mb erts_no_atomic64_set -#define erts_smp_atomic64_set_mb erts_no_atomic64_set -#define erts_smp_atomic64_read_mb erts_no_atomic64_read -#define erts_smp_atomic64_inc_read_mb erts_no_atomic64_inc_read -#define erts_smp_atomic64_dec_read_mb erts_no_atomic64_dec_read -#define erts_smp_atomic64_inc_mb erts_no_atomic64_inc -#define erts_smp_atomic64_dec_mb erts_no_atomic64_dec -#define erts_smp_atomic64_add_read_mb erts_no_atomic64_add_read -#define erts_smp_atomic64_add_mb erts_no_atomic64_add -#define erts_smp_atomic64_read_bor_mb erts_no_atomic64_read_bor -#define erts_smp_atomic64_read_band_mb erts_no_atomic64_read_band -#define erts_smp_atomic64_xchg_mb erts_no_atomic64_xchg -#define erts_smp_atomic64_cmpxchg_mb erts_no_atomic64_cmpxchg -#define erts_smp_atomic64_read_bset_mb erts_no_atomic64_read_bset - -#define erts_smp_atomic64_init_acqb erts_no_atomic64_set -#define erts_smp_atomic64_set_acqb erts_no_atomic64_set -#define erts_smp_atomic64_read_acqb erts_no_atomic64_read -#define erts_smp_atomic64_inc_read_acqb erts_no_atomic64_inc_read -#define erts_smp_atomic64_dec_read_acqb erts_no_atomic64_dec_read -#define erts_smp_atomic64_inc_acqb erts_no_atomic64_inc -#define erts_smp_atomic64_dec_acqb erts_no_atomic64_dec -#define erts_smp_atomic64_add_read_acqb erts_no_atomic64_add_read -#define erts_smp_atomic64_add_acqb erts_no_atomic64_add -#define erts_smp_atomic64_read_bor_acqb erts_no_atomic64_read_bor -#define erts_smp_atomic64_read_band_acqb erts_no_atomic64_read_band -#define erts_smp_atomic64_xchg_acqb erts_no_atomic64_xchg -#define erts_smp_atomic64_cmpxchg_acqb erts_no_atomic64_cmpxchg -#define erts_smp_atomic64_read_bset_acqb erts_no_atomic64_read_bset - -#define erts_smp_atomic64_init_relb erts_no_atomic64_set -#define erts_smp_atomic64_set_relb erts_no_atomic64_set -#define erts_smp_atomic64_read_relb erts_no_atomic64_read -#define erts_smp_atomic64_inc_read_relb erts_no_atomic64_inc_read -#define erts_smp_atomic64_dec_read_relb erts_no_atomic64_dec_read -#define erts_smp_atomic64_inc_relb erts_no_atomic64_inc -#define erts_smp_atomic64_dec_relb erts_no_atomic64_dec -#define erts_smp_atomic64_add_read_relb erts_no_atomic64_add_read -#define erts_smp_atomic64_add_relb erts_no_atomic64_add -#define erts_smp_atomic64_read_bor_relb erts_no_atomic64_read_bor -#define erts_smp_atomic64_read_band_relb erts_no_atomic64_read_band -#define erts_smp_atomic64_xchg_relb erts_no_atomic64_xchg -#define erts_smp_atomic64_cmpxchg_relb erts_no_atomic64_cmpxchg -#define erts_smp_atomic64_read_bset_relb erts_no_atomic64_read_bset - -#define erts_smp_atomic64_init_ddrb erts_no_atomic64_set -#define erts_smp_atomic64_set_ddrb erts_no_atomic64_set -#define erts_smp_atomic64_read_ddrb erts_no_atomic64_read -#define erts_smp_atomic64_inc_read_ddrb erts_no_atomic64_inc_read -#define erts_smp_atomic64_dec_read_ddrb erts_no_atomic64_dec_read -#define erts_smp_atomic64_inc_ddrb erts_no_atomic64_inc -#define erts_smp_atomic64_dec_ddrb erts_no_atomic64_dec -#define erts_smp_atomic64_add_read_ddrb erts_no_atomic64_add_read -#define erts_smp_atomic64_add_ddrb erts_no_atomic64_add -#define erts_smp_atomic64_read_bor_ddrb erts_no_atomic64_read_bor -#define erts_smp_atomic64_read_band_ddrb erts_no_atomic64_read_band -#define erts_smp_atomic64_xchg_ddrb erts_no_atomic64_xchg -#define erts_smp_atomic64_cmpxchg_ddrb erts_no_atomic64_cmpxchg -#define erts_smp_atomic64_read_bset_ddrb erts_no_atomic64_read_bset - -#define erts_smp_atomic64_init_rb erts_no_atomic64_set -#define erts_smp_atomic64_set_rb erts_no_atomic64_set -#define erts_smp_atomic64_read_rb erts_no_atomic64_read -#define erts_smp_atomic64_inc_read_rb erts_no_atomic64_inc_read -#define erts_smp_atomic64_dec_read_rb erts_no_atomic64_dec_read -#define erts_smp_atomic64_inc_rb erts_no_atomic64_inc -#define erts_smp_atomic64_dec_rb erts_no_atomic64_dec -#define erts_smp_atomic64_add_read_rb erts_no_atomic64_add_read -#define erts_smp_atomic64_add_rb erts_no_atomic64_add -#define erts_smp_atomic64_read_bor_rb erts_no_atomic64_read_bor -#define erts_smp_atomic64_read_band_rb erts_no_atomic64_read_band -#define erts_smp_atomic64_xchg_rb erts_no_atomic64_xchg -#define erts_smp_atomic64_cmpxchg_rb erts_no_atomic64_cmpxchg -#define erts_smp_atomic64_read_bset_rb erts_no_atomic64_read_bset - -#define erts_smp_atomic64_init_wb erts_no_atomic64_set -#define erts_smp_atomic64_set_wb erts_no_atomic64_set -#define erts_smp_atomic64_read_wb erts_no_atomic64_read -#define erts_smp_atomic64_inc_read_wb erts_no_atomic64_inc_read -#define erts_smp_atomic64_dec_read_wb erts_no_atomic64_dec_read -#define erts_smp_atomic64_inc_wb erts_no_atomic64_inc -#define erts_smp_atomic64_dec_wb erts_no_atomic64_dec -#define erts_smp_atomic64_add_read_wb erts_no_atomic64_add_read -#define erts_smp_atomic64_add_wb erts_no_atomic64_add -#define erts_smp_atomic64_read_bor_wb erts_no_atomic64_read_bor -#define erts_smp_atomic64_read_band_wb erts_no_atomic64_read_band -#define erts_smp_atomic64_xchg_wb erts_no_atomic64_xchg -#define erts_smp_atomic64_cmpxchg_wb erts_no_atomic64_cmpxchg -#define erts_smp_atomic64_read_bset_wb erts_no_atomic64_read_bset - -#define erts_smp_atomic64_set_dirty erts_no_atomic64_set -#define erts_smp_atomic64_read_dirty erts_no_atomic64_read - -#endif /* !ERTS_SMP */ - -#if ERTS_GLB_INLINE_INCL_FUNC_DEF - -ERTS_GLB_INLINE void -erts_smp_thr_init(erts_smp_thr_init_data_t *id) -{ -#ifdef ERTS_SMP - erts_thr_init(id); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_thr_create(erts_smp_tid_t *tid, void * (*func)(void *), void *arg, - erts_smp_thr_opts_t *opts) -{ -#ifdef ERTS_SMP - erts_thr_create(tid, func, arg, opts); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_thr_join(erts_smp_tid_t tid, void **thr_res) -{ -#ifdef ERTS_SMP - erts_thr_join(tid, thr_res); -#endif -} - - -ERTS_GLB_INLINE void -erts_smp_thr_detach(erts_smp_tid_t tid) -{ -#ifdef ERTS_SMP - erts_thr_detach(tid); -#endif -} - - -ERTS_GLB_INLINE void -erts_smp_thr_exit(void *res) -{ -#ifdef ERTS_SMP - erts_thr_exit(res); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_install_exit_handler(void (*exit_handler)(void)) -{ -#ifdef ERTS_SMP - erts_thr_install_exit_handler(exit_handler); -#endif -} - -ERTS_GLB_INLINE erts_smp_tid_t -erts_smp_thr_self(void) -{ -#ifdef ERTS_SMP - return erts_thr_self(); -#else - return 0; -#endif -} - - -ERTS_GLB_INLINE int -erts_smp_equal_tids(erts_smp_tid_t x, erts_smp_tid_t y) -{ -#ifdef ERTS_SMP - return erts_equal_tids(x, y); -#else - return 1; -#endif -} - - -#ifdef ERTS_HAVE_REC_MTX_INIT -ERTS_GLB_INLINE void -erts_smp_rec_mtx_init(erts_smp_mtx_t *mtx) -{ -#ifdef ERTS_SMP - erts_rec_mtx_init(mtx); -#endif -} -#endif - -ERTS_GLB_INLINE void -erts_smp_mtx_init(erts_smp_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags) -{ -#ifdef ERTS_SMP - erts_mtx_init(mtx, name, extra, flags); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags) -{ -#ifdef ERTS_SMP - erts_mtx_init_locked(mtx, name, extra, flags); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_mtx_destroy(erts_smp_mtx_t *mtx) -{ -#ifdef ERTS_SMP - erts_mtx_destroy(mtx); -#endif -} - -ERTS_GLB_INLINE int -#ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_mtx_trylock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line) -#else -erts_smp_mtx_trylock(erts_smp_mtx_t *mtx) -#endif -{ -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION) - return erts_mtx_trylock_x(mtx,file,line); -#elif defined(ERTS_SMP) - return erts_mtx_trylock(mtx); -#else - return 0; -#endif - -} - - -ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line) -#else -erts_smp_mtx_lock(erts_smp_mtx_t *mtx) -#endif -{ -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION) - erts_mtx_lock_x(mtx, file, line); -#elif defined(ERTS_SMP) - erts_mtx_lock(mtx); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_mtx_unlock(erts_smp_mtx_t *mtx) -{ -#ifdef ERTS_SMP - erts_mtx_unlock(mtx); -#endif -} - -ERTS_GLB_INLINE int -erts_smp_lc_mtx_is_locked(erts_smp_mtx_t *mtx) -{ -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) - return erts_lc_mtx_is_locked(mtx); -#else - return 0; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_cnd_init(erts_smp_cnd_t *cnd) -{ -#ifdef ERTS_SMP - erts_cnd_init(cnd); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_cnd_destroy(erts_smp_cnd_t *cnd) -{ -#ifdef ERTS_SMP - erts_cnd_destroy(cnd); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_cnd_wait(erts_smp_cnd_t *cnd, erts_smp_mtx_t *mtx) -{ -#ifdef ERTS_SMP - erts_cnd_wait(cnd, mtx); -#endif -} - -/* - * IMPORTANT note about erts_smp_cnd_signal() and erts_smp_cnd_broadcast() - * - * POSIX allow a call to `pthread_cond_signal' or `pthread_cond_broadcast' - * even though the associated mutex/mutexes isn't/aren't locked by the - * caller. Our implementation do not allow that in order to avoid a - * performance penalty. That is, all associated mutexes *need* to be - * locked by the caller of erts_smp_cnd_signal()/erts_smp_cnd_broadcast()! - */ - -ERTS_GLB_INLINE void -erts_smp_cnd_signal(erts_smp_cnd_t *cnd) -{ -#ifdef ERTS_SMP - erts_cnd_signal(cnd); -#endif -} - - -ERTS_GLB_INLINE void -erts_smp_cnd_broadcast(erts_smp_cnd_t *cnd) -{ -#ifdef ERTS_SMP - erts_cnd_broadcast(cnd); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_rwmtx_set_reader_group(int no) -{ -#ifdef ERTS_SMP - erts_rwmtx_set_reader_group(no); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx, - char *name, - Eterm extra, - erts_lock_flags_t flags) -{ -#ifdef ERTS_SMP - erts_smp_rwmtx_init_opt(rwmtx, NULL, name, extra, flags); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx, - erts_smp_rwmtx_opt_t *opt, - char *name, - Eterm extra, - erts_lock_flags_t flags) -{ -#ifdef ERTS_SMP - erts_rwmtx_init_opt(rwmtx, opt, name, extra, flags); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_rwmtx_destroy(erts_smp_rwmtx_t *rwmtx) -{ -#ifdef ERTS_SMP - erts_rwmtx_destroy(rwmtx); -#endif -} - -ERTS_GLB_INLINE int -#ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_rwmtx_tryrlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line) -#else -erts_smp_rwmtx_tryrlock(erts_smp_rwmtx_t *rwmtx) -#endif -{ -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION) - return erts_rwmtx_tryrlock_x(rwmtx, file, line); -#elif defined(ERTS_SMP) - return erts_rwmtx_tryrlock(rwmtx); -#else - return 0; -#endif -} - -ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_rwmtx_rlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line) -#else -erts_smp_rwmtx_rlock(erts_smp_rwmtx_t *rwmtx) -#endif -{ -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION) - erts_rwmtx_rlock_x(rwmtx, file, line); -#elif defined(ERTS_SMP) - erts_rwmtx_rlock(rwmtx); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_rwmtx_runlock(erts_smp_rwmtx_t *rwmtx) -{ -#ifdef ERTS_SMP - erts_rwmtx_runlock(rwmtx); -#endif -} - - -ERTS_GLB_INLINE int -#ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_rwmtx_tryrwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line) -#else -erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx) -#endif -{ -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION) - return erts_rwmtx_tryrwlock_x(rwmtx, file, line); -#elif defined(ERTS_SMP) - return erts_rwmtx_tryrwlock(rwmtx); -#else - return 0; -#endif -} - -ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_rwmtx_rwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line) -#else -erts_smp_rwmtx_rwlock(erts_smp_rwmtx_t *rwmtx) -#endif -{ -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION) - erts_rwmtx_rwlock_x(rwmtx, file, line); -#elif defined(ERTS_SMP) - erts_rwmtx_rwlock(rwmtx); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_rwmtx_rwunlock(erts_smp_rwmtx_t *rwmtx) -{ -#ifdef ERTS_SMP - erts_rwmtx_rwunlock(rwmtx); -#endif -} - -#if 0 /* The following rwmtx function names are - reserved for potential future use. */ - -/* Try upgrade from r-locked state to rw-locked state */ -ERTS_GLB_INLINE int -erts_smp_rwmtx_trywlock(erts_smp_rwmtx_t *rwmtx) -{ - return 0; -} - -/* Upgrade from r-locked state to rw-locked state */ -ERTS_GLB_INLINE void -erts_smp_rwmtx_wlock(erts_smp_rwmtx_t *rwmtx) -{ - -} - -/* Downgrade from rw-locked state to r-locked state */ -ERTS_GLB_INLINE void -erts_smp_rwmtx_wunlock(erts_smp_rwmtx_t *rwmtx) -{ - -} - -#endif - -ERTS_GLB_INLINE int -erts_smp_lc_rwmtx_is_rlocked(erts_smp_rwmtx_t *mtx) -{ -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) - return erts_lc_rwmtx_is_rlocked(mtx); -#else - return 0; -#endif -} - -ERTS_GLB_INLINE int -erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx) -{ -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) - return erts_lc_rwmtx_is_rwlocked(mtx); -#else - return 0; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_spinlock_init(erts_smp_spinlock_t *lock, char *name, Eterm extra, erts_lock_flags_t flags) -{ -#ifdef ERTS_SMP - erts_spinlock_init(lock, name, extra, flags); -#else - (void)lock; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_spinlock_destroy(erts_smp_spinlock_t *lock) -{ -#ifdef ERTS_SMP - erts_spinlock_destroy(lock); -#else - (void)lock; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_spin_unlock(erts_smp_spinlock_t *lock) -{ -#ifdef ERTS_SMP - erts_spin_unlock(lock); -#else - (void)lock; -#endif -} - -ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_spin_lock_x(erts_smp_spinlock_t *lock, char *file, unsigned int line) -#else -erts_smp_spin_lock(erts_smp_spinlock_t *lock) -#endif -{ -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION) - erts_spin_lock_x(lock, file, line); -#elif defined(ERTS_SMP) - erts_spin_lock(lock); -#else - (void)lock; -#endif -} - -ERTS_GLB_INLINE int -erts_smp_lc_spinlock_is_locked(erts_smp_spinlock_t *lock) -{ -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) - return erts_lc_spinlock_is_locked(lock); -#else - return 0; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_rwlock_init(erts_smp_rwlock_t *lock, char *name, Eterm extra, erts_lock_flags_t flags) -{ -#ifdef ERTS_SMP - erts_rwlock_init(lock, name, extra, flags); -#else - (void)lock; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_rwlock_destroy(erts_smp_rwlock_t *lock) -{ -#ifdef ERTS_SMP - erts_rwlock_destroy(lock); -#else - (void)lock; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_read_unlock(erts_smp_rwlock_t *lock) -{ -#ifdef ERTS_SMP - erts_read_unlock(lock); -#else - (void)lock; -#endif -} - -ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_read_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line) -#else -erts_smp_read_lock(erts_smp_rwlock_t *lock) -#endif -{ -#if defined(ERTS_ENABLE_LOCK_POSITION) && defined(ERTS_SMP) - erts_read_lock_x(lock, file, line); -#elif defined(ERTS_SMP) - erts_read_lock(lock); -#else - (void)lock; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_write_unlock(erts_smp_rwlock_t *lock) -{ -#ifdef ERTS_SMP - erts_write_unlock(lock); -#else - (void)lock; -#endif -} - -ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_write_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line) -#else -erts_smp_write_lock(erts_smp_rwlock_t *lock) -#endif -{ -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION) - erts_write_lock_x(lock, file, line); -#elif defined(ERTS_SMP) - erts_write_lock(lock); -#else - (void)lock; -#endif -} - -ERTS_GLB_INLINE int -erts_smp_lc_rwlock_is_rlocked(erts_smp_rwlock_t *lock) -{ -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) - return erts_lc_rwlock_is_rlocked(lock); -#else - return 0; -#endif -} - -ERTS_GLB_INLINE int -erts_smp_lc_rwlock_is_rwlocked(erts_smp_rwlock_t *lock) -{ -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) - return erts_lc_rwlock_is_rwlocked(lock); -#else - return 0; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp, char* keyname) -{ -#ifdef ERTS_SMP - erts_tsd_key_create(keyp,keyname); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_tsd_key_delete(erts_smp_tsd_key_t key) -{ -#ifdef ERTS_SMP - erts_tsd_key_delete(key); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_tsd_set(erts_smp_tsd_key_t key, void *value) -{ -#ifdef ERTS_SMP - erts_tsd_set(key, value); -#endif -} - -ERTS_GLB_INLINE void * -erts_smp_tsd_get(erts_smp_tsd_key_t key) -{ -#ifdef ERTS_SMP - return erts_tsd_get(key); -#else - return NULL; -#endif -} - -#ifdef ERTS_THR_HAVE_SIG_FUNCS -#define ERTS_SMP_THR_HAVE_SIG_FUNCS 1 - -ERTS_GLB_INLINE void -erts_smp_thr_sigmask(int how, const sigset_t *set, sigset_t *oset) -{ -#ifdef ERTS_SMP - erts_thr_sigmask(how, set, oset); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_thr_sigwait(const sigset_t *set, int *sig) -{ -#ifdef ERTS_SMP - erts_thr_sigwait(set, sig); -#endif -} - -#endif /* #ifdef ERTS_THR_HAVE_SIG_FUNCS */ - -#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ - -#endif /* ERL_SMP_H */ - -#ifdef ERTS_UNDEF_DEPRECATED_ATOMICS - -/* Deprecated functions to replace */ - -#undef erts_smp_atomic_init -#undef erts_smp_atomic_set -#undef erts_smp_atomic_read -#undef erts_smp_atomic_inctest -#undef erts_smp_atomic_dectest -#undef erts_smp_atomic_inc -#undef erts_smp_atomic_dec -#undef erts_smp_atomic_addtest -#undef erts_smp_atomic_add -#undef erts_smp_atomic_xchg -#undef erts_smp_atomic_cmpxchg -#undef erts_smp_atomic_bor -#undef erts_smp_atomic_band - -#undef erts_smp_atomic32_init -#undef erts_smp_atomic32_set -#undef erts_smp_atomic32_read -#undef erts_smp_atomic32_inctest -#undef erts_smp_atomic32_dectest -#undef erts_smp_atomic32_inc -#undef erts_smp_atomic32_dec -#undef erts_smp_atomic32_addtest -#undef erts_smp_atomic32_add -#undef erts_smp_atomic32_xchg -#undef erts_smp_atomic32_cmpxchg -#undef erts_smp_atomic32_bor -#undef erts_smp_atomic32_band - -#endif diff --git a/erts/emulator/beam/erl_term.h b/erts/emulator/beam/erl_term.h index 842802f8d9..6daf043117 100644 --- a/erts/emulator/beam/erl_term.h +++ b/erts/emulator/beam/erl_term.h @@ -270,7 +270,6 @@ _ET_DECLARE_CHECKED(Eterm*,list_val,Wterm) #define is_byte(x) (((x) & ((~(Uint)0 << (_TAG_IMMED1_SIZE+8)) + _TAG_IMMED1_MASK)) == _TAG_IMMED1_SMALL) #define is_valid_bit_size(x) (((Sint)(x)) >= 0 && ((x) & 0x7F) == _TAG_IMMED1_SMALL) #define is_not_valid_bit_size(x) (!is_valid_bit_size((x))) -#define MY_IS_SSMALL(x) (((Uint) ((((x)) >> (SMALL_BITS-1)) + 1)) < 2) #define _unchecked_unsigned_val(x) ((x) >> _TAG_IMMED1_SIZE) _ET_DECLARE_CHECKED(Uint,unsigned_val,Eterm) #define unsigned_val(x) _ET_APPLY(unsigned_val,(x)) diff --git a/erts/emulator/beam/erl_thr_progress.c b/erts/emulator/beam/erl_thr_progress.c index 2a9f276e02..96824dc06e 100644 --- a/erts/emulator/beam/erl_thr_progress.c +++ b/erts/emulator/beam/erl_thr_progress.c @@ -80,7 +80,6 @@ #include "erl_thr_progress.h" #include "global.h" -#ifdef ERTS_SMP #define ERTS_THR_PRGR_DBG_CHK_WAKEUP_REQUEST_VALUE 0 @@ -1513,4 +1512,3 @@ void erts_thr_progress_dbg_print_state(void) } -#endif diff --git a/erts/emulator/beam/erl_thr_progress.h b/erts/emulator/beam/erl_thr_progress.h index b2894ba1fe..fa936b5707 100644 --- a/erts/emulator/beam/erl_thr_progress.h +++ b/erts/emulator/beam/erl_thr_progress.h @@ -33,18 +33,6 @@ #include "sys.h" -#ifndef ERTS_SMP - -#define erts_smp_thr_progress_block() ((void) 0) -#define erts_smp_thr_progress_unblock() ((void) 0) -#define erts_smp_thr_progress_is_blocking() 1 - -#else /* ERTS_SMP */ - -#define erts_smp_thr_progress_block erts_thr_progress_block -#define erts_smp_thr_progress_unblock erts_thr_progress_unblock -#define erts_smp_thr_progress_is_blocking erts_thr_progress_is_blocking - void erts_thr_progress_block(void); void erts_thr_progress_unblock(void); int erts_thr_progress_is_blocking(void); @@ -87,13 +75,10 @@ typedef struct { int erts_thr_progress_fatal_error_block(ErtsThrPrgrData *tmp_tpd_bufp); void erts_thr_progress_fatal_error_wait(SWord timeout); -#endif /* ERTS_SMP */ typedef struct ErtsThrPrgrLaterOp_ ErtsThrPrgrLaterOp; struct ErtsThrPrgrLaterOp_ { -#ifdef ERTS_SMP ErtsThrPrgrVal later; -#endif void (*func)(void *); void *data; ErtsThrPrgrLaterOp *next; @@ -107,7 +92,6 @@ struct ErtsThrPrgrLaterOp_ { #include "erl_threads.h" #include "erl_process.h" -#ifdef ERTS_SMP /* ERTS_THR_PRGR_VAL_FIRST should only be used when initializing... */ #define ERTS_THR_PRGR_VAL_FIRST ((ErtsThrPrgrVal) 0) @@ -324,6 +308,5 @@ erts_thr_progress_has_reached(ErtsThrPrgrVal val) #endif -#endif /* ERTS_SMP */ #endif diff --git a/erts/emulator/beam/erl_thr_queue.c b/erts/emulator/beam/erl_thr_queue.c index f56d0828dd..548c2768e5 100644 --- a/erts/emulator/beam/erl_thr_queue.c +++ b/erts/emulator/beam/erl_thr_queue.c @@ -87,32 +87,10 @@ #define ERTS_THR_Q_MAX_FINI_DEQ_OPS 50 -#ifdef ERTS_SMP ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(sl_element, ErtsThrQElement_t, 1000, ERTS_ALC_T_THR_Q_EL_SL) -#else - -static void -init_sl_element_alloc(void) -{ -} - -static ErtsThrQElement_t * -sl_element_alloc(void) -{ - return erts_alloc(ERTS_ALC_T_THR_Q_EL_SL, - sizeof(ErtsThrQElement_t)); -} - -static void -sl_element_free(ErtsThrQElement_t *p) -{ - erts_free(ERTS_ALC_T_THR_Q_EL_SL, p); -} - -#endif #define ErtsThrQDirtyReadEl(A) \ ((ErtsThrQElement_t *) erts_atomic_read_dirty((A))) @@ -135,14 +113,6 @@ static void noop_callback(void *arg) { } void erts_thr_q_initialize(ErtsThrQ_t *q, ErtsThrQInit_t *qi) { -#ifndef USE_THREADS - q->init = *qi; - if (!q->init.notify) - q->init.notify = noop_callback; - q->first = NULL; - q->last = NULL; - q->q.blk = NULL; -#else erts_atomic_init_nob(&q->tail.data.marker.next, ERTS_AINT_NULL); q->tail.data.marker.data.ptr = NULL; erts_atomic_init_nob(&q->tail.data.last, @@ -164,10 +134,8 @@ erts_thr_q_initialize(ErtsThrQ_t *q, ErtsThrQInit_t *qi) q->head.deq_fini.automatic = qi->auto_finalize_dequeue; q->head.deq_fini.start = NULL; q->head.deq_fini.end = NULL; -#ifdef ERTS_SMP q->head.next.thr_progress = erts_thr_progress_current(); q->head.next.thr_progress_reached = 1; -#endif q->head.next.um_refc_ix = 1; q->head.next.unref_end = &q->tail.data.marker; q->head.used_marker = 1; @@ -176,15 +144,12 @@ erts_thr_q_initialize(ErtsThrQ_t *q, ErtsThrQInit_t *qi) q->q.finalizing = 0; q->q.live = qi->live.queue; q->q.blk = NULL; -#endif } ErtsThrQCleanState_t erts_thr_q_finalize(ErtsThrQ_t *q) { -#ifdef USE_THREADS q->q.finalizing = 1; -#endif while (erts_thr_q_dequeue(q)); return erts_thr_q_clean(q); } @@ -229,7 +194,6 @@ erts_thr_q_destroy(ErtsThrQ_t *q) return erts_thr_q_finalize(q); } -#ifdef USE_THREADS static void destroy(ErtsThrQ_t *q) @@ -249,7 +213,6 @@ destroy(ErtsThrQ_t *q) erts_free(atype, q->q.blk); } -#endif static ERTS_INLINE ErtsThrQElement_t * element_live_alloc(ErtsThrQLive_t live) @@ -267,11 +230,7 @@ static ERTS_INLINE ErtsThrQElement_t * element_alloc(ErtsThrQ_t *q) { ErtsThrQLive_t live; -#ifdef USE_THREADS live = q->tail.data.live; -#else - live = q->init.live.objects; -#endif return element_live_alloc(live); } @@ -291,15 +250,10 @@ static ERTS_INLINE void element_free(ErtsThrQ_t *q, ErtsThrQElement_t *el) { ErtsThrQLive_t live; -#ifdef USE_THREADS live = q->head.live; -#else - live = q->init.live.objects; -#endif element_live_free(live, el); } -#ifdef USE_THREADS static ERTS_INLINE ErtsThrQElement_t * enqueue_managed(ErtsThrQ_t *q, ErtsThrQElement_t *this) @@ -423,11 +377,9 @@ clean(ErtsThrQ_t *q, int max_ops, int do_notify) return ERTS_THR_Q_CLEAN; } -#ifdef ERTS_SMP if (q->head.next.thr_progress_reached || erts_thr_progress_has_reached(q->head.next.thr_progress)) { q->head.next.thr_progress_reached = 1; -#endif um_refc_ix = q->head.next.um_refc_ix; if (erts_atomic_read_acqb(&q->tail.data.um_refc[um_refc_ix]) == 0) { /* Move unreferenced end pointer forward... */ @@ -439,23 +391,17 @@ clean(ErtsThrQ_t *q, int max_ops, int do_notify) ilast = (erts_aint_t) enqueue_marker(q, NULL); if (q->head.unref_end == (ErtsThrQElement_t *) ilast) - ERTS_SMP_MEMORY_BARRIER; + ERTS_THR_MEMORY_BARRIER; else { q->head.next.unref_end = (ErtsThrQElement_t *) ilast; -#ifdef ERTS_SMP q->head.next.thr_progress = erts_thr_progress_later(NULL); -#endif erts_atomic32_set_relb(&q->tail.data.um_refc_ix, um_refc_ix); q->head.next.um_refc_ix = um_refc_ix == 0 ? 1 : 0; -#ifdef ERTS_SMP q->head.next.thr_progress_reached = 0; -#endif } } -#ifdef ERTS_SMP } -#endif head = ErtsThrQDirtyReadEl(&q->head.head); if (q->head.first == head) { @@ -489,9 +435,7 @@ clean(ErtsThrQ_t *q, int max_ops, int do_notify) check_thr_progress: -#ifdef ERTS_SMP if (q->head.next.thr_progress_reached) -#endif { int um_refc_ix = q->head.next.um_refc_ix; if (erts_atomic_read_acqb(&q->tail.data.um_refc[um_refc_ix]) == 0) { @@ -505,24 +449,16 @@ check_thr_progress: return ERTS_THR_Q_NEED_THR_PRGR; } -#endif ErtsThrQCleanState_t erts_thr_q_clean(ErtsThrQ_t *q) { -#ifdef USE_THREADS return clean(q, ERTS_THR_Q_MAX_SCHED_CLEAN_OPS, 0); -#else - return ERTS_THR_Q_CLEAN; -#endif } ErtsThrQCleanState_t erts_thr_q_inspect(ErtsThrQ_t *q, int ensure_empty) { -#ifndef USE_THREADS - return ERTS_THR_Q_CLEAN; -#else ErtsThrQElement_t *head = ErtsThrQDirtyReadEl(&q->head.head); if (ensure_empty) { erts_aint_t inext; @@ -553,39 +489,21 @@ erts_thr_q_inspect(ErtsThrQ_t *q, int ensure_empty) if (q->head.first != q->head.unref_end) return ERTS_THR_Q_DIRTY; -#ifdef ERTS_SMP if (q->head.next.thr_progress_reached) -#endif { int um_refc_ix = q->head.next.um_refc_ix; if (erts_atomic_read_acqb(&q->tail.data.um_refc[um_refc_ix]) == 0) return ERTS_THR_Q_DIRTY; } return ERTS_THR_Q_NEED_THR_PRGR; -#endif } static void enqueue(ErtsThrQ_t *q, void *data, ErtsThrQElement_t *this) { -#ifndef USE_THREADS - ASSERT(data); - - this->next = NULL; - this->data.ptr = data; - - if (q->last) - q->last->next = this; - else { - q->first = q->last = this; - q->init.notify(q->init.arg); - } -#else int notify; int um_refc_ix = 0; -#ifdef ERTS_SMP int unmanaged_thread; -#endif #if ERTS_THR_Q_DBG_CHK_DATA if (!data) @@ -596,10 +514,8 @@ enqueue(ErtsThrQ_t *q, void *data, ErtsThrQElement_t *this) this->data.ptr = data; -#ifdef ERTS_SMP unmanaged_thread = !erts_thr_progress_is_managed_thread(); if (unmanaged_thread) -#endif { um_refc_ix = erts_atomic32_read_acqb(&q->tail.data.um_refc_ix); while (1) { @@ -616,9 +532,7 @@ enqueue(ErtsThrQ_t *q, void *data, ErtsThrQElement_t *this) notify = this == enqueue_managed(q, this); -#ifdef ERTS_SMP if (unmanaged_thread) -#endif { if (notify) erts_atomic_dec_relb(&q->tail.data.um_refc[um_refc_ix]); @@ -627,7 +541,6 @@ enqueue(ErtsThrQ_t *q, void *data, ErtsThrQElement_t *this) } if (notify) q->tail.data.notify(q->tail.data.arg); -#endif } void @@ -645,9 +558,6 @@ erts_thr_q_prepare_enqueue(ErtsThrQ_t *q) int erts_thr_q_get_finalize_dequeue_data(ErtsThrQ_t *q, ErtsThrQFinDeQ_t *fdp) { -#ifndef USE_THREADS - return 0; -#else #ifdef DEBUG if (!q->head.deq_fini.start) { ASSERT(!q->head.deq_fini.end); @@ -670,14 +580,12 @@ erts_thr_q_get_finalize_dequeue_data(ErtsThrQ_t *q, ErtsThrQFinDeQ_t *fdp) q->head.deq_fini.start = NULL; q->head.deq_fini.end = NULL; return fdp->start != NULL; -#endif } void erts_thr_q_append_finalize_dequeue_data(ErtsThrQFinDeQ_t *fdp0, ErtsThrQFinDeQ_t *fdp1) { -#ifdef USE_THREADS if (fdp1->start) { if (fdp0->end) ErtsThrQDirtySetEl(&fdp0->end->next, fdp1->start); @@ -685,13 +593,11 @@ erts_thr_q_append_finalize_dequeue_data(ErtsThrQFinDeQ_t *fdp0, fdp0->start = fdp1->start; fdp0->end = fdp1->end; } -#endif } int erts_thr_q_finalize_dequeue(ErtsThrQFinDeQ_t *state) { -#ifdef USE_THREADS ErtsThrQElement_t *start = state->start; if (start) { ErtsThrQLive_t live; @@ -710,17 +616,14 @@ int erts_thr_q_finalize_dequeue(ErtsThrQFinDeQ_t *state) return 1; /* More to do */ state->end = NULL; } -#endif return 0; } void erts_thr_q_finalize_dequeue_state_init(ErtsThrQFinDeQ_t *state) { -#ifdef USE_THREADS state->start = NULL; state->end = NULL; -#endif } @@ -734,22 +637,6 @@ erts_thr_q_enqueue_prepared(ErtsThrQ_t *q, void *data, ErtsThrQPrepEnQ_t *prep) void * erts_thr_q_dequeue(ErtsThrQ_t *q) { -#ifndef USE_THREADS - void *res; - ErtsThrQElement_t *tmp; - - if (!q->first) - return NULL; - tmp = q->first; - res = tmp->data.ptr; - q->first = tmp->next; - if (!q->first) - q->last = NULL; - - element_free(q, tmp); - - return res; -#else ErtsThrQElement_t *head; erts_aint_t inext; void *res; @@ -778,7 +665,6 @@ erts_thr_q_dequeue(ErtsThrQ_t *q) ? ERTS_THR_Q_MAX_DEQUEUE_CLEAN_OPS : ERTS_THR_Q_MAX_SCHED_CLEAN_OPS), 1); return res; -#endif } #ifdef USE_LTTNG_VM_TRACEPOINTS @@ -786,14 +672,6 @@ int erts_thr_q_length_dirty(ErtsThrQ_t *q) { int n = 0; -#ifndef USE_THREADS - void *res; - ErtsThrQElement_t *tmp; - - for (tmp = q->first; tmp != NULL; tmp = tmp->next) { - n++; - } -#else ErtsThrQElement_t *e; erts_aint_t inext; @@ -808,7 +686,6 @@ erts_thr_q_length_dirty(ErtsThrQ_t *q) } inext = erts_atomic_read_acqb(&e->next); } -#endif return n; } #endif diff --git a/erts/emulator/beam/erl_thr_queue.h b/erts/emulator/beam/erl_thr_queue.h index 705a67af4c..163a25318d 100644 --- a/erts/emulator/beam/erl_thr_queue.h +++ b/erts/emulator/beam/erl_thr_queue.h @@ -78,11 +78,7 @@ typedef struct ErtsThrQElement_t_ ErtsThrQElement_t; typedef struct ErtsThrQElement_t ErtsThrQPrepEnQ_t; struct ErtsThrQElement_t_ { -#ifdef USE_THREADS erts_atomic_t next; -#else - ErtsThrQElement_t *next; -#endif union { erts_atomic_t atmc; void *ptr; @@ -100,7 +96,6 @@ typedef enum { ERTS_THR_Q_DIRTY, } ErtsThrQCleanState_t; -#ifdef USE_THREADS typedef struct { ErtsThrQElement_t marker; @@ -108,9 +103,7 @@ typedef struct { erts_atomic_t um_refc[2]; erts_atomic32_t um_refc_ix; ErtsThrQLive_t live; -#ifdef ERTS_SMP erts_atomic32_t thr_prgr_clean_scheduled; -#endif void *arg; void (*notify)(void *); } ErtsThrQTail_t; @@ -141,10 +134,8 @@ struct ErtsThrQ_t_ { ErtsThrQElement_t *end; } deq_fini; struct { -#ifdef ERTS_SMP ErtsThrPrgrVal thr_progress; int thr_progress_reached; -#endif int um_refc_ix; ErtsThrQElement_t *unref_end; } next; @@ -159,18 +150,6 @@ struct ErtsThrQ_t_ { } q; }; -#else /* !USE_THREADS */ - -struct ErtsThrQ_t_ { - ErtsThrQInit_t init; - ErtsThrQElement_t *first; - ErtsThrQElement_t *last; - struct { - void *blk; - } q; -}; - -#endif void erts_thr_q_init(void); void erts_thr_q_initialize(ErtsThrQ_t *, ErtsThrQInit_t *); @@ -194,19 +173,15 @@ void erts_thr_q_finalize_dequeue_state_init(ErtsThrQFinDeQ_t *); int erts_thr_q_length_dirty(ErtsThrQ_t *); #endif -#ifdef ERTS_SMP ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_q_need_thr_progress(ErtsThrQ_t *q); -#endif #if ERTS_GLB_INLINE_INCL_FUNC_DEF -#ifdef ERTS_SMP ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_q_need_thr_progress(ErtsThrQ_t *q) { return q->head.next.thr_progress; } -#endif #endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */ diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h index 8b5c17d739..aedceb6fc2 100644 --- a/erts/emulator/beam/erl_threads.h +++ b/erts/emulator/beam/erl_threads.h @@ -45,11 +45,6 @@ * Data dependency read barrier. Orders *only* loads * according to data dependency across the barrier. * - * If thread support has been disabled, these barriers will become no-ops. - * - * If the prefix ERTS_THR_ is replaced with ERTS_SMP_, the barriers will - * be enabled only in the SMP enabled runtime system. - * * --- Atomic operations --- * * Atomics operations exist for 32-bit, word size, and double word size @@ -86,20 +81,6 @@ * barrier. Load in atomic operation is ordered * before the barrier. * - * If thread support has been disabled, these functions are mapped to - * functions that performs the same operation, but aren't atomic - * and don't imply any memory barriers. - * - * If the atomic operations are prefixed with erts_smp_ instead of only - * erts_ the atomic operations will only be atomic in the SMP enabled - * runtime system, and will be mapped to non-atomic operations without - * memory barriers in the runtime system without SMP support. Atomic - * operations with erts_smp_ prefix should use the atomic types - * erts_smp_atomic32_t, erts_smp_atomic_t, and erts_smp_dw_atomic_t - * instead of erts_atomic32_t, erts_atomic_t, and erts_dw_atomic_t. The - * integer data types erts_aint32_t, erts_aint_t, and erts_dw_atomic_t - * are the same. - * * --- 32-bit atomic operations --- * * The following 32-bit atomic operations exist. <B> should be @@ -262,7 +243,6 @@ #include "erl_lock_flags.h" #include "erl_term.h" -#ifdef USE_THREADS #define ETHR_TRY_INLINE_FUNCS #include "ethread.h" @@ -405,76 +385,6 @@ __decl_noreturn void __noreturn erts_thr_fatal_error(int, char *); # define ERTS_HAVE_REC_MTX_INIT ETHR_HAVE_ETHR_REC_MUTEX_INIT #endif -#else /* #ifdef USE_THREADS */ - -#define ERTS_THR_MEMORY_BARRIER -#define ERTS_THR_WRITE_MEMORY_BARRIER -#define ERTS_THR_READ_MEMORY_BARRIER -#define ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER - -#define ERTS_THR_OPTS_DEFAULT_INITER 0 -typedef int erts_thr_opts_t; -typedef int erts_thr_init_data_t; -typedef int erts_thr_late_init_data_t; -typedef int erts_tid_t; -typedef int erts_mtx_t; -typedef int erts_cnd_t; -#define ERTS_RWMTX_OPT_DEFAULT_INITER {0} -#define ERTS_RWMTX_TYPE_NORMAL 0 -#define ERTS_RWMTX_TYPE_FREQUENT_READ 0 -#define ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ 0 -#define ERTS_RWMTX_LONG_LIVED 0 -#define ERTS_RWMTX_SHORT_LIVED 0 -#define ERTS_RWMTX_UNKNOWN_LIVED 0 -typedef struct { - char type; - char lived; - int main_spincount; - int aux_spincount; -} erts_rwmtx_opt_t; -typedef int erts_rwmtx_t; -typedef int erts_tsd_key_t; -typedef int erts_tse_t; - -typedef struct { SWord sint[2]; } erts_dw_aint_t; -typedef SWord erts_aint_t; -typedef Sint32 erts_aint32_t; -typedef Sint64 erts_aint64_t; - -#define erts_dw_atomic_t erts_dw_aint_t -#define erts_atomic_t erts_aint_t -#define erts_atomic32_t erts_aint32_t -#define erts_atomic64_t erts_aint64_t - -#if __GNUC__ > 2 -typedef struct { } erts_spinlock_t; -typedef struct { } erts_rwlock_t; -#else -typedef struct { int gcc_is_buggy; } erts_spinlock_t; -typedef struct { int gcc_is_buggy; } erts_rwlock_t; -#endif - -#ifdef WORDS_BIGENDIAN -#define ERTS_DW_AINT_LOW_WORD 1 -#define ERTS_DW_AINT_HIGH_WORD 0 -#else -#define ERTS_DW_AINT_LOW_WORD 0 -#define ERTS_DW_AINT_HIGH_WORD 1 -#endif - -#define ERTS_MTX_INITER 0 -#define ERTS_CND_INITER 0 -#define ERTS_THR_INIT_DATA_DEF_INITER 0 - -#define ERTS_HAVE_REC_MTX_INIT 1 - -#endif /* #ifdef USE_THREADS */ - -#define erts_no_dw_atomic_t erts_dw_aint_t -#define erts_no_atomic_t erts_aint_t -#define erts_no_atomic32_t erts_aint32_t -#define erts_no_atomic64_t erts_aint64_t - #define ERTS_AINT_NULL ((erts_aint_t) NULL) #define ERTS_AINT_T_MAX (~(((erts_aint_t) 1) << (sizeof(erts_aint_t)*8-1))) @@ -544,79 +454,6 @@ ERTS_GLB_INLINE void erts_rwmtx_runlock(erts_rwmtx_t *rwmtx); ERTS_GLB_INLINE void erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx); ERTS_GLB_INLINE int erts_lc_rwmtx_is_rlocked(erts_rwmtx_t *mtx); ERTS_GLB_INLINE int erts_lc_rwmtx_is_rwlocked(erts_rwmtx_t *mtx); - -ERTS_GLB_INLINE void erts_no_dw_atomic_set(erts_no_dw_atomic_t *var, erts_no_dw_atomic_t *val); -ERTS_GLB_INLINE void erts_no_dw_atomic_read(erts_no_dw_atomic_t *var, erts_no_dw_atomic_t *val); -ERTS_GLB_INLINE int erts_no_dw_atomic_cmpxchg(erts_no_dw_atomic_t *var, - erts_no_dw_atomic_t *val, - erts_no_dw_atomic_t *old_val); -ERTS_GLB_INLINE void erts_no_atomic_set(erts_no_atomic_t *var, erts_aint_t i); -ERTS_GLB_INLINE erts_aint_t erts_no_atomic_read(erts_no_atomic_t *var); -ERTS_GLB_INLINE erts_aint_t erts_no_atomic_inc_read(erts_no_atomic_t *incp); -ERTS_GLB_INLINE erts_aint_t erts_no_atomic_dec_read(erts_no_atomic_t *decp); -ERTS_GLB_INLINE void erts_no_atomic_inc(erts_no_atomic_t *incp); -ERTS_GLB_INLINE void erts_no_atomic_dec(erts_no_atomic_t *decp); -ERTS_GLB_INLINE erts_aint_t erts_no_atomic_add_read(erts_no_atomic_t *addp, - erts_aint_t i); -ERTS_GLB_INLINE void erts_no_atomic_add(erts_no_atomic_t *addp, erts_aint_t i); -ERTS_GLB_INLINE erts_aint_t erts_no_atomic_read_bor(erts_no_atomic_t *var, - erts_aint_t mask); -ERTS_GLB_INLINE erts_aint_t erts_no_atomic_read_band(erts_no_atomic_t *var, - erts_aint_t mask); -ERTS_GLB_INLINE erts_aint_t erts_no_atomic_xchg(erts_no_atomic_t *xchgp, - erts_aint_t new); -ERTS_GLB_INLINE erts_aint_t erts_no_atomic_cmpxchg(erts_no_atomic_t *xchgp, - erts_aint_t new, - erts_aint_t expected); -ERTS_GLB_INLINE erts_aint_t erts_no_atomic_read_bset(erts_no_atomic_t *var, - erts_aint_t mask, - erts_aint_t set); -ERTS_GLB_INLINE void erts_no_atomic32_set(erts_no_atomic32_t *var, - erts_aint32_t i); -ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_read(erts_no_atomic32_t *var); -ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_inc_read(erts_no_atomic32_t *incp); -ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_dec_read(erts_no_atomic32_t *decp); -ERTS_GLB_INLINE void erts_no_atomic32_inc(erts_no_atomic32_t *incp); -ERTS_GLB_INLINE void erts_no_atomic32_dec(erts_no_atomic32_t *decp); -ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_add_read(erts_no_atomic32_t *addp, - erts_aint32_t i); -ERTS_GLB_INLINE void erts_no_atomic32_add(erts_no_atomic32_t *addp, - erts_aint32_t i); -ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_read_bor(erts_no_atomic32_t *var, - erts_aint32_t mask); -ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_read_band(erts_no_atomic32_t *var, - erts_aint32_t mask); -ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_xchg(erts_no_atomic32_t *xchgp, - erts_aint32_t new); -ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_cmpxchg(erts_no_atomic32_t *xchgp, - erts_aint32_t new, - erts_aint32_t expected); -ERTS_GLB_INLINE erts_aint32_t erts_no_atomic32_read_bset(erts_no_atomic32_t *var, - erts_aint32_t mask, - erts_aint32_t set); -ERTS_GLB_INLINE void erts_no_atomic64_set(erts_no_atomic64_t *var, - erts_aint64_t i); -ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_read(erts_no_atomic64_t *var); -ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_inc_read(erts_no_atomic64_t *incp); -ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_dec_read(erts_no_atomic64_t *decp); -ERTS_GLB_INLINE void erts_no_atomic64_inc(erts_no_atomic64_t *incp); -ERTS_GLB_INLINE void erts_no_atomic64_dec(erts_no_atomic64_t *decp); -ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_add_read(erts_no_atomic64_t *addp, - erts_aint64_t i); -ERTS_GLB_INLINE void erts_no_atomic64_add(erts_no_atomic64_t *addp, - erts_aint64_t i); -ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_read_bor(erts_no_atomic64_t *var, - erts_aint64_t mask); -ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_read_band(erts_no_atomic64_t *var, - erts_aint64_t mask); -ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_xchg(erts_no_atomic64_t *xchgp, - erts_aint64_t new); -ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_cmpxchg(erts_no_atomic64_t *xchgp, - erts_aint64_t new, - erts_aint64_t expected); -ERTS_GLB_INLINE erts_aint64_t erts_no_atomic64_read_bset(erts_no_atomic64_t *var, - erts_aint64_t mask, - erts_aint64_t set); ERTS_GLB_INLINE void erts_spinlock_init(erts_spinlock_t *lock, char *name, Eterm extra, @@ -670,13 +507,10 @@ ERTS_GLB_INLINE void erts_thr_sigmask(int how, const sigset_t *set, sigset_t *oset); ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig); -#ifdef USE_THREADS ERTS_GLB_INLINE void erts_thr_kill(erts_tid_t tid, int sig); -#endif #endif /* #ifdef HAVE_ETHR_SIG_FUNCS */ -#ifdef USE_THREADS ERTS_GLB_INLINE erts_aint_t erts_atomic_read_bset_nob(erts_atomic_t *var, @@ -1684,379 +1518,6 @@ erts_atomic64_read_dirty(erts_atomic64_t *var) #endif /* ARCH_32 */ -#else /* !USE_THREADS */ - -/* Double word size atomics */ - -#define erts_dw_atomic_init_nob erts_no_dw_atomic_set -#define erts_dw_atomic_set_nob erts_no_dw_atomic_set -#define erts_dw_atomic_read_nob erts_no_dw_atomic_read -#define erts_dw_atomic_cmpxchg_nob erts_no_dw_atomic_cmpxchg - -#define erts_dw_atomic_init_mb erts_no_dw_atomic_init -#define erts_dw_atomic_set_mb erts_no_dw_atomic_set -#define erts_dw_atomic_read_mb erts_no_dw_atomic_read -#define erts_dw_atomic_cmpxchg_mb erts_no_dw_atomic_cmpxchg - -#define erts_dw_atomic_init_acqb erts_no_dw_atomic_init -#define erts_dw_atomic_set_acqb erts_no_dw_atomic_set -#define erts_dw_atomic_read_acqb erts_no_dw_atomic_read -#define erts_dw_atomic_cmpxchg_acqb erts_no_dw_atomic_cmpxchg - -#define erts_dw_atomic_init_relb erts_no_dw_atomic_init -#define erts_dw_atomic_set_relb erts_no_dw_atomic_set -#define erts_dw_atomic_read_relb erts_no_dw_atomic_read -#define erts_dw_atomic_cmpxchg_relb erts_no_dw_atomic_cmpxchg - -#define erts_dw_atomic_init_ddrb erts_no_dw_atomic_init -#define erts_dw_atomic_set_ddrb erts_no_dw_atomic_set -#define erts_dw_atomic_read_ddrb erts_no_dw_atomic_read -#define erts_dw_atomic_cmpxchg_ddrb erts_no_dw_atomic_cmpxchg - -#define erts_dw_atomic_init_rb erts_no_dw_atomic_init -#define erts_dw_atomic_set_rb erts_no_dw_atomic_set -#define erts_dw_atomic_read_rb erts_no_dw_atomic_read -#define erts_dw_atomic_cmpxchg_rb erts_no_dw_atomic_cmpxchg - -#define erts_dw_atomic_init_wb erts_no_dw_atomic_init -#define erts_dw_atomic_set_wb erts_no_dw_atomic_set -#define erts_dw_atomic_read_wb erts_no_dw_atomic_read -#define erts_dw_atomic_cmpxchg_wb erts_no_dw_atomic_cmpxchg - -#define erts_dw_atomic_set_dirty erts_no_dw_atomic_set -#define erts_dw_atomic_read_dirty erts_no_dw_atomic_read - -/* Word size atomics */ - -#define erts_atomic_init_nob erts_no_atomic_set -#define erts_atomic_set_nob erts_no_atomic_set -#define erts_atomic_read_nob erts_no_atomic_read -#define erts_atomic_inc_read_nob erts_no_atomic_inc_read -#define erts_atomic_dec_read_nob erts_no_atomic_dec_read -#define erts_atomic_inc_nob erts_no_atomic_inc -#define erts_atomic_dec_nob erts_no_atomic_dec -#define erts_atomic_add_read_nob erts_no_atomic_add_read -#define erts_atomic_add_nob erts_no_atomic_add -#define erts_atomic_read_bor_nob erts_no_atomic_read_bor -#define erts_atomic_read_band_nob erts_no_atomic_read_band -#define erts_atomic_xchg_nob erts_no_atomic_xchg -#define erts_atomic_cmpxchg_nob erts_no_atomic_cmpxchg -#define erts_atomic_read_bset_nob erts_no_atomic_read_bset - -#define erts_atomic_init_mb erts_no_atomic_set -#define erts_atomic_set_mb erts_no_atomic_set -#define erts_atomic_read_mb erts_no_atomic_read -#define erts_atomic_inc_read_mb erts_no_atomic_inc_read -#define erts_atomic_dec_read_mb erts_no_atomic_dec_read -#define erts_atomic_inc_mb erts_no_atomic_inc -#define erts_atomic_dec_mb erts_no_atomic_dec -#define erts_atomic_add_read_mb erts_no_atomic_add_read -#define erts_atomic_add_mb erts_no_atomic_add -#define erts_atomic_read_bor_mb erts_no_atomic_read_bor -#define erts_atomic_read_band_mb erts_no_atomic_read_band -#define erts_atomic_xchg_mb erts_no_atomic_xchg -#define erts_atomic_cmpxchg_mb erts_no_atomic_cmpxchg -#define erts_atomic_read_bset_mb erts_no_atomic_read_bset - -#define erts_atomic_init_acqb erts_no_atomic_set -#define erts_atomic_set_acqb erts_no_atomic_set -#define erts_atomic_read_acqb erts_no_atomic_read -#define erts_atomic_inc_read_acqb erts_no_atomic_inc_read -#define erts_atomic_dec_read_acqb erts_no_atomic_dec_read -#define erts_atomic_inc_acqb erts_no_atomic_inc -#define erts_atomic_dec_acqb erts_no_atomic_dec -#define erts_atomic_add_read_acqb erts_no_atomic_add_read -#define erts_atomic_add_acqb erts_no_atomic_add -#define erts_atomic_read_bor_acqb erts_no_atomic_read_bor -#define erts_atomic_read_band_acqb erts_no_atomic_read_band -#define erts_atomic_xchg_acqb erts_no_atomic_xchg -#define erts_atomic_cmpxchg_acqb erts_no_atomic_cmpxchg -#define erts_atomic_read_bset_acqb erts_no_atomic_read_bset - -#define erts_atomic_init_relb erts_no_atomic_set -#define erts_atomic_set_relb erts_no_atomic_set -#define erts_atomic_read_relb erts_no_atomic_read -#define erts_atomic_inc_read_relb erts_no_atomic_inc_read -#define erts_atomic_dec_read_relb erts_no_atomic_dec_read -#define erts_atomic_inc_relb erts_no_atomic_inc -#define erts_atomic_dec_relb erts_no_atomic_dec -#define erts_atomic_add_read_relb erts_no_atomic_add_read -#define erts_atomic_add_relb erts_no_atomic_add -#define erts_atomic_read_bor_relb erts_no_atomic_read_bor -#define erts_atomic_read_band_relb erts_no_atomic_read_band -#define erts_atomic_xchg_relb erts_no_atomic_xchg -#define erts_atomic_cmpxchg_relb erts_no_atomic_cmpxchg -#define erts_atomic_read_bset_relb erts_no_atomic_read_bset - -#define erts_atomic_init_ddrb erts_no_atomic_set -#define erts_atomic_set_ddrb erts_no_atomic_set -#define erts_atomic_read_ddrb erts_no_atomic_read -#define erts_atomic_inc_read_ddrb erts_no_atomic_inc_read -#define erts_atomic_dec_read_ddrb erts_no_atomic_dec_read -#define erts_atomic_inc_ddrb erts_no_atomic_inc -#define erts_atomic_dec_ddrb erts_no_atomic_dec -#define erts_atomic_add_read_ddrb erts_no_atomic_add_read -#define erts_atomic_add_ddrb erts_no_atomic_add -#define erts_atomic_read_bor_ddrb erts_no_atomic_read_bor -#define erts_atomic_read_band_ddrb erts_no_atomic_read_band -#define erts_atomic_xchg_ddrb erts_no_atomic_xchg -#define erts_atomic_cmpxchg_ddrb erts_no_atomic_cmpxchg -#define erts_atomic_read_bset_ddrb erts_no_atomic_read_bset - -#define erts_atomic_init_rb erts_no_atomic_set -#define erts_atomic_set_rb erts_no_atomic_set -#define erts_atomic_read_rb erts_no_atomic_read -#define erts_atomic_inc_read_rb erts_no_atomic_inc_read -#define erts_atomic_dec_read_rb erts_no_atomic_dec_read -#define erts_atomic_inc_rb erts_no_atomic_inc -#define erts_atomic_dec_rb erts_no_atomic_dec -#define erts_atomic_add_read_rb erts_no_atomic_add_read -#define erts_atomic_add_rb erts_no_atomic_add -#define erts_atomic_read_bor_rb erts_no_atomic_read_bor -#define erts_atomic_read_band_rb erts_no_atomic_read_band -#define erts_atomic_xchg_rb erts_no_atomic_xchg -#define erts_atomic_cmpxchg_rb erts_no_atomic_cmpxchg -#define erts_atomic_read_bset_rb erts_no_atomic_read_bset - -#define erts_atomic_init_wb erts_no_atomic_set -#define erts_atomic_set_wb erts_no_atomic_set -#define erts_atomic_read_wb erts_no_atomic_read -#define erts_atomic_inc_read_wb erts_no_atomic_inc_read -#define erts_atomic_dec_read_wb erts_no_atomic_dec_read -#define erts_atomic_inc_wb erts_no_atomic_inc -#define erts_atomic_dec_wb erts_no_atomic_dec -#define erts_atomic_add_read_wb erts_no_atomic_add_read -#define erts_atomic_add_wb erts_no_atomic_add -#define erts_atomic_read_bor_wb erts_no_atomic_read_bor -#define erts_atomic_read_band_wb erts_no_atomic_read_band -#define erts_atomic_xchg_wb erts_no_atomic_xchg -#define erts_atomic_cmpxchg_wb erts_no_atomic_cmpxchg -#define erts_atomic_read_bset_wb erts_no_atomic_read_bset - -#define erts_atomic_set_dirty erts_no_atomic_set -#define erts_atomic_read_dirty erts_no_atomic_read - -/* 32-bit atomics */ - -#define erts_atomic32_init_nob erts_no_atomic32_set -#define erts_atomic32_set_nob erts_no_atomic32_set -#define erts_atomic32_read_nob erts_no_atomic32_read -#define erts_atomic32_inc_read_nob erts_no_atomic32_inc_read -#define erts_atomic32_dec_read_nob erts_no_atomic32_dec_read -#define erts_atomic32_inc_nob erts_no_atomic32_inc -#define erts_atomic32_dec_nob erts_no_atomic32_dec -#define erts_atomic32_add_read_nob erts_no_atomic32_add_read -#define erts_atomic32_add_nob erts_no_atomic32_add -#define erts_atomic32_read_bor_nob erts_no_atomic32_read_bor -#define erts_atomic32_read_band_nob erts_no_atomic32_read_band -#define erts_atomic32_xchg_nob erts_no_atomic32_xchg -#define erts_atomic32_cmpxchg_nob erts_no_atomic32_cmpxchg -#define erts_atomic32_read_bset_nob erts_no_atomic32_read_bset - -#define erts_atomic32_init_mb erts_no_atomic32_set -#define erts_atomic32_set_mb erts_no_atomic32_set -#define erts_atomic32_read_mb erts_no_atomic32_read -#define erts_atomic32_inc_read_mb erts_no_atomic32_inc_read -#define erts_atomic32_dec_read_mb erts_no_atomic32_dec_read -#define erts_atomic32_inc_mb erts_no_atomic32_inc -#define erts_atomic32_dec_mb erts_no_atomic32_dec -#define erts_atomic32_add_read_mb erts_no_atomic32_add_read -#define erts_atomic32_add_mb erts_no_atomic32_add -#define erts_atomic32_read_bor_mb erts_no_atomic32_read_bor -#define erts_atomic32_read_band_mb erts_no_atomic32_read_band -#define erts_atomic32_xchg_mb erts_no_atomic32_xchg -#define erts_atomic32_cmpxchg_mb erts_no_atomic32_cmpxchg -#define erts_atomic32_read_bset_mb erts_no_atomic32_read_bset - -#define erts_atomic32_init_acqb erts_no_atomic32_set -#define erts_atomic32_set_acqb erts_no_atomic32_set -#define erts_atomic32_read_acqb erts_no_atomic32_read -#define erts_atomic32_inc_read_acqb erts_no_atomic32_inc_read -#define erts_atomic32_dec_read_acqb erts_no_atomic32_dec_read -#define erts_atomic32_inc_acqb erts_no_atomic32_inc -#define erts_atomic32_dec_acqb erts_no_atomic32_dec -#define erts_atomic32_add_read_acqb erts_no_atomic32_add_read -#define erts_atomic32_add_acqb erts_no_atomic32_add -#define erts_atomic32_read_bor_acqb erts_no_atomic32_read_bor -#define erts_atomic32_read_band_acqb erts_no_atomic32_read_band -#define erts_atomic32_xchg_acqb erts_no_atomic32_xchg -#define erts_atomic32_cmpxchg_acqb erts_no_atomic32_cmpxchg -#define erts_atomic32_read_bset_acqb erts_no_atomic32_read_bset - -#define erts_atomic32_init_relb erts_no_atomic32_set -#define erts_atomic32_set_relb erts_no_atomic32_set -#define erts_atomic32_read_relb erts_no_atomic32_read -#define erts_atomic32_inc_read_relb erts_no_atomic32_inc_read -#define erts_atomic32_dec_read_relb erts_no_atomic32_dec_read -#define erts_atomic32_inc_relb erts_no_atomic32_inc -#define erts_atomic32_dec_relb erts_no_atomic32_dec -#define erts_atomic32_add_read_relb erts_no_atomic32_add_read -#define erts_atomic32_add_relb erts_no_atomic32_add -#define erts_atomic32_read_bor_relb erts_no_atomic32_read_bor -#define erts_atomic32_read_band_relb erts_no_atomic32_read_band -#define erts_atomic32_xchg_relb erts_no_atomic32_xchg -#define erts_atomic32_cmpxchg_relb erts_no_atomic32_cmpxchg -#define erts_atomic32_read_bset_relb erts_no_atomic32_read_bset - -#define erts_atomic32_init_ddrb erts_no_atomic32_set -#define erts_atomic32_set_ddrb erts_no_atomic32_set -#define erts_atomic32_read_ddrb erts_no_atomic32_read -#define erts_atomic32_inc_read_ddrb erts_no_atomic32_inc_read -#define erts_atomic32_dec_read_ddrb erts_no_atomic32_dec_read -#define erts_atomic32_inc_ddrb erts_no_atomic32_inc -#define erts_atomic32_dec_ddrb erts_no_atomic32_dec -#define erts_atomic32_add_read_ddrb erts_no_atomic32_add_read -#define erts_atomic32_add_ddrb erts_no_atomic32_add -#define erts_atomic32_read_bor_ddrb erts_no_atomic32_read_bor -#define erts_atomic32_read_band_ddrb erts_no_atomic32_read_band -#define erts_atomic32_xchg_ddrb erts_no_atomic32_xchg -#define erts_atomic32_cmpxchg_ddrb erts_no_atomic32_cmpxchg -#define erts_atomic32_read_bset_ddrb erts_no_atomic32_read_bset - -#define erts_atomic32_init_rb erts_no_atomic32_set -#define erts_atomic32_set_rb erts_no_atomic32_set -#define erts_atomic32_read_rb erts_no_atomic32_read -#define erts_atomic32_inc_read_rb erts_no_atomic32_inc_read -#define erts_atomic32_dec_read_rb erts_no_atomic32_dec_read -#define erts_atomic32_inc_rb erts_no_atomic32_inc -#define erts_atomic32_dec_rb erts_no_atomic32_dec -#define erts_atomic32_add_read_rb erts_no_atomic32_add_read -#define erts_atomic32_add_rb erts_no_atomic32_add -#define erts_atomic32_read_bor_rb erts_no_atomic32_read_bor -#define erts_atomic32_read_band_rb erts_no_atomic32_read_band -#define erts_atomic32_xchg_rb erts_no_atomic32_xchg -#define erts_atomic32_cmpxchg_rb erts_no_atomic32_cmpxchg -#define erts_atomic32_read_bset_rb erts_no_atomic32_read_bset - -#define erts_atomic32_init_wb erts_no_atomic32_set -#define erts_atomic32_set_wb erts_no_atomic32_set -#define erts_atomic32_read_wb erts_no_atomic32_read -#define erts_atomic32_inc_read_wb erts_no_atomic32_inc_read -#define erts_atomic32_dec_read_wb erts_no_atomic32_dec_read -#define erts_atomic32_inc_wb erts_no_atomic32_inc -#define erts_atomic32_dec_wb erts_no_atomic32_dec -#define erts_atomic32_add_read_wb erts_no_atomic32_add_read -#define erts_atomic32_add_wb erts_no_atomic32_add -#define erts_atomic32_read_bor_wb erts_no_atomic32_read_bor -#define erts_atomic32_read_band_wb erts_no_atomic32_read_band -#define erts_atomic32_xchg_wb erts_no_atomic32_xchg -#define erts_atomic32_cmpxchg_wb erts_no_atomic32_cmpxchg -#define erts_atomic32_read_bset_wb erts_no_atomic32_read_bset - -#define erts_atomic32_set_dirty erts_no_atomic32_set -#define erts_atomic32_read_dirty erts_no_atomic32_read - -/* 64-bit atomics */ - -#define erts_atomic64_init_nob erts_no_atomic64_set -#define erts_atomic64_set_nob erts_no_atomic64_set -#define erts_atomic64_read_nob erts_no_atomic64_read -#define erts_atomic64_inc_read_nob erts_no_atomic64_inc_read -#define erts_atomic64_dec_read_nob erts_no_atomic64_dec_read -#define erts_atomic64_inc_nob erts_no_atomic64_inc -#define erts_atomic64_dec_nob erts_no_atomic64_dec -#define erts_atomic64_add_read_nob erts_no_atomic64_add_read -#define erts_atomic64_add_nob erts_no_atomic64_add -#define erts_atomic64_read_bor_nob erts_no_atomic64_read_bor -#define erts_atomic64_read_band_nob erts_no_atomic64_read_band -#define erts_atomic64_xchg_nob erts_no_atomic64_xchg -#define erts_atomic64_cmpxchg_nob erts_no_atomic64_cmpxchg -#define erts_atomic64_read_bset_nob erts_no_atomic64_read_bset - -#define erts_atomic64_init_mb erts_no_atomic64_set -#define erts_atomic64_set_mb erts_no_atomic64_set -#define erts_atomic64_read_mb erts_no_atomic64_read -#define erts_atomic64_inc_read_mb erts_no_atomic64_inc_read -#define erts_atomic64_dec_read_mb erts_no_atomic64_dec_read -#define erts_atomic64_inc_mb erts_no_atomic64_inc -#define erts_atomic64_dec_mb erts_no_atomic64_dec -#define erts_atomic64_add_read_mb erts_no_atomic64_add_read -#define erts_atomic64_add_mb erts_no_atomic64_add -#define erts_atomic64_read_bor_mb erts_no_atomic64_read_bor -#define erts_atomic64_read_band_mb erts_no_atomic64_read_band -#define erts_atomic64_xchg_mb erts_no_atomic64_xchg -#define erts_atomic64_cmpxchg_mb erts_no_atomic64_cmpxchg -#define erts_atomic64_read_bset_mb erts_no_atomic64_read_bset - -#define erts_atomic64_init_acqb erts_no_atomic64_set -#define erts_atomic64_set_acqb erts_no_atomic64_set -#define erts_atomic64_read_acqb erts_no_atomic64_read -#define erts_atomic64_inc_read_acqb erts_no_atomic64_inc_read -#define erts_atomic64_dec_read_acqb erts_no_atomic64_dec_read -#define erts_atomic64_inc_acqb erts_no_atomic64_inc -#define erts_atomic64_dec_acqb erts_no_atomic64_dec -#define erts_atomic64_add_read_acqb erts_no_atomic64_add_read -#define erts_atomic64_add_acqb erts_no_atomic64_add -#define erts_atomic64_read_bor_acqb erts_no_atomic64_read_bor -#define erts_atomic64_read_band_acqb erts_no_atomic64_read_band -#define erts_atomic64_xchg_acqb erts_no_atomic64_xchg -#define erts_atomic64_cmpxchg_acqb erts_no_atomic64_cmpxchg -#define erts_atomic64_read_bset_acqb erts_no_atomic64_read_bset - -#define erts_atomic64_init_relb erts_no_atomic64_set -#define erts_atomic64_set_relb erts_no_atomic64_set -#define erts_atomic64_read_relb erts_no_atomic64_read -#define erts_atomic64_inc_read_relb erts_no_atomic64_inc_read -#define erts_atomic64_dec_read_relb erts_no_atomic64_dec_read -#define erts_atomic64_inc_relb erts_no_atomic64_inc -#define erts_atomic64_dec_relb erts_no_atomic64_dec -#define erts_atomic64_add_read_relb erts_no_atomic64_add_read -#define erts_atomic64_add_relb erts_no_atomic64_add -#define erts_atomic64_read_bor_relb erts_no_atomic64_read_bor -#define erts_atomic64_read_band_relb erts_no_atomic64_read_band -#define erts_atomic64_xchg_relb erts_no_atomic64_xchg -#define erts_atomic64_cmpxchg_relb erts_no_atomic64_cmpxchg -#define erts_atomic64_read_bset_relb erts_no_atomic64_read_bset - -#define erts_atomic64_init_ddrb erts_no_atomic64_set -#define erts_atomic64_set_ddrb erts_no_atomic64_set -#define erts_atomic64_read_ddrb erts_no_atomic64_read -#define erts_atomic64_inc_read_ddrb erts_no_atomic64_inc_read -#define erts_atomic64_dec_read_ddrb erts_no_atomic64_dec_read -#define erts_atomic64_inc_ddrb erts_no_atomic64_inc -#define erts_atomic64_dec_ddrb erts_no_atomic64_dec -#define erts_atomic64_add_read_ddrb erts_no_atomic64_add_read -#define erts_atomic64_add_ddrb erts_no_atomic64_add -#define erts_atomic64_read_bor_ddrb erts_no_atomic64_read_bor -#define erts_atomic64_read_band_ddrb erts_no_atomic64_read_band -#define erts_atomic64_xchg_ddrb erts_no_atomic64_xchg -#define erts_atomic64_cmpxchg_ddrb erts_no_atomic64_cmpxchg -#define erts_atomic64_read_bset_ddrb erts_no_atomic64_read_bset - -#define erts_atomic64_init_rb erts_no_atomic64_set -#define erts_atomic64_set_rb erts_no_atomic64_set -#define erts_atomic64_read_rb erts_no_atomic64_read -#define erts_atomic64_inc_read_rb erts_no_atomic64_inc_read -#define erts_atomic64_dec_read_rb erts_no_atomic64_dec_read -#define erts_atomic64_inc_rb erts_no_atomic64_inc -#define erts_atomic64_dec_rb erts_no_atomic64_dec -#define erts_atomic64_add_read_rb erts_no_atomic64_add_read -#define erts_atomic64_add_rb erts_no_atomic64_add -#define erts_atomic64_read_bor_rb erts_no_atomic64_read_bor -#define erts_atomic64_read_band_rb erts_no_atomic64_read_band -#define erts_atomic64_xchg_rb erts_no_atomic64_xchg -#define erts_atomic64_cmpxchg_rb erts_no_atomic64_cmpxchg -#define erts_atomic64_read_bset_rb erts_no_atomic64_read_bset - -#define erts_atomic64_init_wb erts_no_atomic64_set -#define erts_atomic64_set_wb erts_no_atomic64_set -#define erts_atomic64_read_wb erts_no_atomic64_read -#define erts_atomic64_inc_read_wb erts_no_atomic64_inc_read -#define erts_atomic64_dec_read_wb erts_no_atomic64_dec_read -#define erts_atomic64_inc_wb erts_no_atomic64_inc -#define erts_atomic64_dec_wb erts_no_atomic64_dec -#define erts_atomic64_add_read_wb erts_no_atomic64_add_read -#define erts_atomic64_add_wb erts_no_atomic64_add -#define erts_atomic64_read_bor_wb erts_no_atomic64_read_bor -#define erts_atomic64_read_band_wb erts_no_atomic64_read_band -#define erts_atomic64_xchg_wb erts_no_atomic64_xchg -#define erts_atomic64_cmpxchg_wb erts_no_atomic64_cmpxchg -#define erts_atomic64_read_bset_wb erts_no_atomic64_read_bset - -#define erts_atomic64_set_dirty erts_no_atomic64_set -#define erts_atomic64_read_dirty erts_no_atomic64_read - -#endif /* !USE_THREADS */ #include "erl_msacc.h" @@ -2065,110 +1526,83 @@ erts_atomic64_read_dirty(erts_atomic64_t *var) ERTS_GLB_INLINE void erts_thr_init(erts_thr_init_data_t *id) { -#ifdef USE_THREADS int res = ethr_init(id); if (res) erts_thr_fatal_error(res, "initialize thread library"); -#endif } ERTS_GLB_INLINE void erts_thr_late_init(erts_thr_late_init_data_t *id) { -#ifdef USE_THREADS int res = ethr_late_init(id); if (res) erts_thr_fatal_error(res, "complete initialization of thread library"); -#endif } ERTS_GLB_INLINE void erts_thr_create(erts_tid_t *tid, void * (*func)(void *), void *arg, erts_thr_opts_t *opts) { -#ifdef USE_THREADS int res = ethr_thr_create(tid, func, arg, opts); if (res) erts_thr_fatal_error(res, "create thread"); -#endif } ERTS_GLB_INLINE void erts_thr_join(erts_tid_t tid, void **thr_res) { -#ifdef USE_THREADS int res = ethr_thr_join(tid, thr_res); if (res) erts_thr_fatal_error(res, "join thread"); -#endif } ERTS_GLB_INLINE void erts_thr_detach(erts_tid_t tid) { -#ifdef USE_THREADS int res = ethr_thr_detach(tid); if (res) erts_thr_fatal_error(res, "detach thread"); -#endif } ERTS_GLB_INLINE void erts_thr_exit(void *res) { -#ifdef USE_THREADS ethr_thr_exit(res); erts_thr_fatal_error(0, "terminate thread"); -#endif } ERTS_GLB_INLINE void erts_thr_install_exit_handler(void (*exit_handler)(void)) { -#ifdef USE_THREADS int res = ethr_install_exit_handler(exit_handler); if (res != 0) erts_thr_fatal_error(res, "install thread exit handler"); -#endif } ERTS_GLB_INLINE erts_tid_t erts_thr_self(void) { -#ifdef USE_THREADS return ethr_self(); -#else - return 0; -#endif } ERTS_GLB_INLINE int erts_thr_getname(erts_tid_t tid, char *buf, size_t len) { -#ifdef USE_THREADS return ethr_getname(tid, buf, len); -#else - return -1; -#endif } ERTS_GLB_INLINE int erts_equal_tids(erts_tid_t x, erts_tid_t y) { -#ifdef USE_THREADS return ethr_equal_tids(x, y); -#else - return 1; -#endif } ERTS_GLB_INLINE void erts_mtx_init(erts_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags) { -#ifdef USE_THREADS int res = ethr_mutex_init(&mtx->mtx); if (res) { erts_thr_fatal_error(res, "initialize mutex"); @@ -2185,13 +1619,11 @@ erts_mtx_init(erts_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags) #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_init_ref_x(&mtx->lcnt, name, extra, flags); #endif -#endif /* USE_THREADS */ } ERTS_GLB_INLINE void erts_mtx_init_locked(erts_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags) { -#ifdef USE_THREADS erts_mtx_init(mtx, name, extra, flags); ethr_mutex_lock(&mtx->mtx); @@ -2201,13 +1633,11 @@ erts_mtx_init_locked(erts_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_trylock(&mtx->lcnt, 1); #endif -#endif } ERTS_GLB_INLINE void erts_mtx_destroy(erts_mtx_t *mtx) { -#ifdef USE_THREADS int res; ASSERT(!(mtx->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC)); @@ -2230,7 +1660,6 @@ erts_mtx_destroy(erts_mtx_t *mtx) #endif erts_thr_fatal_error(res, "destroy mutex"); } -#endif } ERTS_GLB_INLINE int @@ -2240,7 +1669,6 @@ erts_mtx_trylock_x(erts_mtx_t *mtx, char *file, unsigned int line) erts_mtx_trylock(erts_mtx_t *mtx) #endif { -#ifdef USE_THREADS int res; #ifdef ERTS_ENABLE_LOCK_CHECK @@ -2262,9 +1690,6 @@ erts_mtx_trylock(erts_mtx_t *mtx) erts_lcnt_trylock(&mtx->lcnt, res); #endif return res; -#else - return 0; -#endif } @@ -2275,7 +1700,6 @@ erts_mtx_lock_x(erts_mtx_t *mtx, char *file, unsigned int line) erts_mtx_lock(erts_mtx_t *mtx) #endif { -#ifdef USE_THREADS #ifdef ERTS_ENABLE_LOCK_CHECK #ifdef ERTS_ENABLE_LOCK_POSITION erts_lc_lock_x(&mtx->lc, file, line); @@ -2290,13 +1714,11 @@ erts_mtx_lock(erts_mtx_t *mtx) #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_lock_post_x(&mtx->lcnt, file, line); #endif -#endif } ERTS_GLB_INLINE void erts_mtx_unlock(erts_mtx_t *mtx) { -#ifdef USE_THREADS #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_unlock(&mtx->lc); #endif @@ -2304,13 +1726,12 @@ erts_mtx_unlock(erts_mtx_t *mtx) erts_lcnt_unlock(&mtx->lcnt); #endif ethr_mutex_unlock(&mtx->mtx); -#endif } ERTS_GLB_INLINE int erts_lc_mtx_is_locked(erts_mtx_t *mtx) { -#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) int res; erts_lc_lock_t lc = mtx->lc; lc.flags = ERTS_LOCK_FLAGS_TYPE_MUTEX; @@ -2325,17 +1746,14 @@ erts_lc_mtx_is_locked(erts_mtx_t *mtx) ERTS_GLB_INLINE void erts_cnd_init(erts_cnd_t *cnd) { -#ifdef USE_THREADS int res = ethr_cond_init(cnd); if (res) erts_thr_fatal_error(res, "initialize condition variable"); -#endif } ERTS_GLB_INLINE void erts_cnd_destroy(erts_cnd_t *cnd) { -#ifdef USE_THREADS int res = ethr_cond_destroy(cnd); if (res != 0) { #ifdef ERTS_THR_HAVE_BUSY_DESTROY_BUG @@ -2348,13 +1766,11 @@ erts_cnd_destroy(erts_cnd_t *cnd) #endif erts_thr_fatal_error(res, "destroy condition variable"); } -#endif } ERTS_GLB_INLINE void erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx) { -#ifdef USE_THREADS int res; ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_SLEEP); #ifdef ERTS_ENABLE_LOCK_CHECK @@ -2376,7 +1792,6 @@ erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx) if (res != 0 && res != EINTR) erts_thr_fatal_error(res, "wait on condition variable"); ERTS_MSACC_POP_STATE(); -#endif } /* @@ -2392,18 +1807,14 @@ erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx) ERTS_GLB_INLINE void erts_cnd_signal(erts_cnd_t *cnd) { -#ifdef USE_THREADS ethr_cond_signal(cnd); -#endif } ERTS_GLB_INLINE void erts_cnd_broadcast(erts_cnd_t *cnd) { -#ifdef USE_THREADS ethr_cond_broadcast(cnd); -#endif } /* rwmutex */ @@ -2411,7 +1822,6 @@ erts_cnd_broadcast(erts_cnd_t *cnd) ERTS_GLB_INLINE void erts_rwmtx_set_reader_group(int no) { -#ifdef USE_THREADS int res; #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_check_no_locked_of_type(ERTS_LOCK_TYPE_RWMUTEX); @@ -2419,13 +1829,11 @@ erts_rwmtx_set_reader_group(int no) res = ethr_rwmutex_set_reader_group(no); if (res != 0) erts_thr_fatal_error(res, "set reader group"); -#endif } ERTS_GLB_INLINE void erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx, erts_rwmtx_opt_t *opt, char *name, Eterm extra, erts_lock_flags_t flags) { -#ifdef USE_THREADS int res = ethr_rwmutex_init_opt(&rwmtx->rwmtx, opt); if (res != 0) { erts_thr_fatal_error(res, "initialize rwmutex"); @@ -2442,7 +1850,6 @@ erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx, erts_rwmtx_opt_t *opt, #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_init_ref_x(&rwmtx->lcnt, name, extra, flags); #endif -#endif /* USE_THREADS */ } ERTS_GLB_INLINE void @@ -2454,7 +1861,6 @@ erts_rwmtx_init(erts_rwmtx_t *rwmtx, char *name, Eterm extra, ERTS_GLB_INLINE void erts_rwmtx_destroy(erts_rwmtx_t *rwmtx) { -#ifdef USE_THREADS int res; ASSERT(!(rwmtx->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC)); @@ -2477,7 +1883,6 @@ erts_rwmtx_destroy(erts_rwmtx_t *rwmtx) #endif erts_thr_fatal_error(res, "destroy rwmutex"); } -#endif } ERTS_GLB_INLINE int @@ -2487,7 +1892,6 @@ erts_rwmtx_tryrlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line) erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx) #endif { -#ifdef USE_THREADS int res; #ifdef ERTS_ENABLE_LOCK_CHECK @@ -2510,9 +1914,6 @@ erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx) #endif return res; -#else - return 0; -#endif } ERTS_GLB_INLINE void @@ -2522,7 +1923,6 @@ erts_rwmtx_rlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line) erts_rwmtx_rlock(erts_rwmtx_t *rwmtx) #endif { -#ifdef USE_THREADS #ifdef ERTS_ENABLE_LOCK_CHECK #ifdef ERTS_ENABLE_LOCK_POSITION erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LOCK_OPTIONS_READ,file,line); @@ -2537,13 +1937,11 @@ erts_rwmtx_rlock(erts_rwmtx_t *rwmtx) #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_lock_post_x(&rwmtx->lcnt, file, line); #endif -#endif } ERTS_GLB_INLINE void erts_rwmtx_runlock(erts_rwmtx_t *rwmtx) { -#ifdef USE_THREADS #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_unlock_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_READ); #endif @@ -2551,7 +1949,6 @@ erts_rwmtx_runlock(erts_rwmtx_t *rwmtx) erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LOCK_OPTIONS_READ); #endif ethr_rwmutex_runlock(&rwmtx->rwmtx); -#endif } @@ -2562,7 +1959,6 @@ erts_rwmtx_tryrwlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line) erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx) #endif { -#ifdef USE_THREADS int res; #ifdef ERTS_ENABLE_LOCK_CHECK @@ -2585,9 +1981,6 @@ erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx) #endif return res; -#else - return 0; -#endif } ERTS_GLB_INLINE void @@ -2597,7 +1990,6 @@ erts_rwmtx_rwlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line) erts_rwmtx_rwlock(erts_rwmtx_t *rwmtx) #endif { -#ifdef USE_THREADS #ifdef ERTS_ENABLE_LOCK_CHECK #ifdef ERTS_ENABLE_LOCK_POSITION erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR,file,line); @@ -2612,13 +2004,11 @@ erts_rwmtx_rwlock(erts_rwmtx_t *rwmtx) #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_lock_post_x(&rwmtx->lcnt, file, line); #endif -#endif } ERTS_GLB_INLINE void erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx) { -#ifdef USE_THREADS #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_unlock_flg(&rwmtx->lc, ERTS_LOCK_OPTIONS_RDWR); #endif @@ -2626,7 +2016,6 @@ erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx) erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LOCK_OPTIONS_RDWR); #endif ethr_rwmutex_rwunlock(&rwmtx->rwmtx); -#endif } #if 0 /* The following rwmtx function names are @@ -2658,7 +2047,7 @@ erts_rwmtx_wunlock(erts_rwmtx_t *rwmtx) ERTS_GLB_INLINE int erts_lc_rwmtx_is_rlocked(erts_rwmtx_t *mtx) { -#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) int res; erts_lc_lock_t lc = mtx->lc; lc.flags = ERTS_LOCK_TYPE_RWMUTEX; @@ -2673,7 +2062,7 @@ erts_lc_rwmtx_is_rlocked(erts_rwmtx_t *mtx) ERTS_GLB_INLINE int erts_lc_rwmtx_is_rwlocked(erts_rwmtx_t *mtx) { -#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) int res; erts_lc_lock_t lc = mtx->lc; lc.flags = ERTS_LOCK_TYPE_RWMUTEX; @@ -2685,334 +2074,11 @@ erts_lc_rwmtx_is_rwlocked(erts_rwmtx_t *mtx) #endif } -/* No atomic ops */ - -ERTS_GLB_INLINE void -erts_no_dw_atomic_set(erts_no_dw_atomic_t *var, erts_no_dw_atomic_t *val) -{ - var->sint[0] = val->sint[0]; - var->sint[1] = val->sint[1]; -} - -ERTS_GLB_INLINE void -erts_no_dw_atomic_read(erts_no_dw_atomic_t *var, erts_no_dw_atomic_t *val) -{ - val->sint[0] = var->sint[0]; - val->sint[1] = var->sint[1]; -} - -ERTS_GLB_INLINE int erts_no_dw_atomic_cmpxchg(erts_no_dw_atomic_t *var, - erts_no_dw_atomic_t *new_val, - erts_no_dw_atomic_t *old_val) -{ - if (var->sint[0] != old_val->sint[0] || var->sint[1] != old_val->sint[1]) { - erts_no_dw_atomic_read(var, old_val); - return 0; - } - else { - erts_no_dw_atomic_set(var, new_val); - return !0; - } -} - -ERTS_GLB_INLINE void -erts_no_atomic_set(erts_no_atomic_t *var, erts_aint_t i) -{ - *var = i; -} - -ERTS_GLB_INLINE erts_aint_t -erts_no_atomic_read(erts_no_atomic_t *var) -{ - return *var; -} - -ERTS_GLB_INLINE erts_aint_t -erts_no_atomic_inc_read(erts_no_atomic_t *incp) -{ - return ++(*incp); -} - -ERTS_GLB_INLINE erts_aint_t -erts_no_atomic_dec_read(erts_no_atomic_t *decp) -{ - return --(*decp); -} - -ERTS_GLB_INLINE void -erts_no_atomic_inc(erts_no_atomic_t *incp) -{ - ++(*incp); -} - -ERTS_GLB_INLINE void -erts_no_atomic_dec(erts_no_atomic_t *decp) -{ - --(*decp); -} - -ERTS_GLB_INLINE erts_aint_t -erts_no_atomic_add_read(erts_no_atomic_t *addp, erts_aint_t i) -{ - return *addp += i; -} - -ERTS_GLB_INLINE void -erts_no_atomic_add(erts_no_atomic_t *addp, erts_aint_t i) -{ - *addp += i; -} - -ERTS_GLB_INLINE erts_aint_t -erts_no_atomic_read_bor(erts_no_atomic_t *var, erts_aint_t mask) -{ - erts_aint_t old; - old = *var; - *var |= mask; - return old; -} - -ERTS_GLB_INLINE erts_aint_t -erts_no_atomic_read_band(erts_no_atomic_t *var, erts_aint_t mask) -{ - erts_aint_t old; - old = *var; - *var &= mask; - return old; -} - -ERTS_GLB_INLINE erts_aint_t -erts_no_atomic_xchg(erts_no_atomic_t *xchgp, erts_aint_t new) -{ - erts_aint_t old = *xchgp; - *xchgp = new; - return old; -} - -ERTS_GLB_INLINE erts_aint_t -erts_no_atomic_cmpxchg(erts_no_atomic_t *xchgp, - erts_aint_t new, - erts_aint_t expected) -{ - erts_aint_t old = *xchgp; - if (old == expected) - *xchgp = new; - return old; -} - -ERTS_GLB_INLINE erts_aint_t -erts_no_atomic_read_bset(erts_no_atomic_t *var, - erts_aint_t mask, - erts_aint_t set) -{ - erts_aint_t old = *var; - *var &= ~mask; - *var |= (mask & set); - return old; -} - -/* atomic32 */ - -ERTS_GLB_INLINE void -erts_no_atomic32_set(erts_no_atomic32_t *var, erts_aint32_t i) -{ - *var = i; -} - -ERTS_GLB_INLINE erts_aint32_t -erts_no_atomic32_read(erts_no_atomic32_t *var) -{ - return *var; -} - -ERTS_GLB_INLINE erts_aint32_t -erts_no_atomic32_inc_read(erts_no_atomic32_t *incp) -{ - return ++(*incp); -} - -ERTS_GLB_INLINE erts_aint32_t -erts_no_atomic32_dec_read(erts_no_atomic32_t *decp) -{ - return --(*decp); -} - -ERTS_GLB_INLINE void -erts_no_atomic32_inc(erts_no_atomic32_t *incp) -{ - ++(*incp); -} - -ERTS_GLB_INLINE void -erts_no_atomic32_dec(erts_no_atomic32_t *decp) -{ - --(*decp); -} - -ERTS_GLB_INLINE erts_aint32_t -erts_no_atomic32_add_read(erts_no_atomic32_t *addp, erts_aint32_t i) -{ - return *addp += i; -} - -ERTS_GLB_INLINE void -erts_no_atomic32_add(erts_no_atomic32_t *addp, erts_aint32_t i) -{ - *addp += i; -} - -ERTS_GLB_INLINE erts_aint32_t -erts_no_atomic32_read_bor(erts_no_atomic32_t *var, erts_aint32_t mask) -{ - erts_aint32_t old; - old = *var; - *var |= mask; - return old; -} - -ERTS_GLB_INLINE erts_aint32_t -erts_no_atomic32_read_band(erts_no_atomic32_t *var, erts_aint32_t mask) -{ - erts_aint32_t old; - old = *var; - *var &= mask; - return old; -} - -ERTS_GLB_INLINE erts_aint32_t -erts_no_atomic32_xchg(erts_no_atomic32_t *xchgp, erts_aint32_t new) -{ - erts_aint32_t old = *xchgp; - *xchgp = new; - return old; -} - -ERTS_GLB_INLINE erts_aint32_t -erts_no_atomic32_cmpxchg(erts_no_atomic32_t *xchgp, - erts_aint32_t new, - erts_aint32_t expected) -{ - erts_aint32_t old = *xchgp; - if (old == expected) - *xchgp = new; - return old; -} - -ERTS_GLB_INLINE erts_aint32_t -erts_no_atomic32_read_bset(erts_no_atomic32_t *var, - erts_aint32_t mask, - erts_aint32_t set) -{ - erts_aint32_t old = *var; - *var &= ~mask; - *var |= (mask & set); - return old; -} - -/* atomic64 */ - -ERTS_GLB_INLINE void -erts_no_atomic64_set(erts_no_atomic64_t *var, erts_aint64_t i) -{ - *var = i; -} - -ERTS_GLB_INLINE erts_aint64_t -erts_no_atomic64_read(erts_no_atomic64_t *var) -{ - return *var; -} - -ERTS_GLB_INLINE erts_aint64_t -erts_no_atomic64_inc_read(erts_no_atomic64_t *incp) -{ - return ++(*incp); -} - -ERTS_GLB_INLINE erts_aint64_t -erts_no_atomic64_dec_read(erts_no_atomic64_t *decp) -{ - return --(*decp); -} - -ERTS_GLB_INLINE void -erts_no_atomic64_inc(erts_no_atomic64_t *incp) -{ - ++(*incp); -} - -ERTS_GLB_INLINE void -erts_no_atomic64_dec(erts_no_atomic64_t *decp) -{ - --(*decp); -} - -ERTS_GLB_INLINE erts_aint64_t -erts_no_atomic64_add_read(erts_no_atomic64_t *addp, erts_aint64_t i) -{ - return *addp += i; -} - -ERTS_GLB_INLINE void -erts_no_atomic64_add(erts_no_atomic64_t *addp, erts_aint64_t i) -{ - *addp += i; -} - -ERTS_GLB_INLINE erts_aint64_t -erts_no_atomic64_read_bor(erts_no_atomic64_t *var, erts_aint64_t mask) -{ - erts_aint64_t old; - old = *var; - *var |= mask; - return old; -} - -ERTS_GLB_INLINE erts_aint64_t -erts_no_atomic64_read_band(erts_no_atomic64_t *var, erts_aint64_t mask) -{ - erts_aint64_t old; - old = *var; - *var &= mask; - return old; -} - -ERTS_GLB_INLINE erts_aint64_t -erts_no_atomic64_xchg(erts_no_atomic64_t *xchgp, erts_aint64_t new) -{ - erts_aint64_t old = *xchgp; - *xchgp = new; - return old; -} - -ERTS_GLB_INLINE erts_aint64_t -erts_no_atomic64_cmpxchg(erts_no_atomic64_t *xchgp, - erts_aint64_t new, - erts_aint64_t expected) -{ - erts_aint64_t old = *xchgp; - if (old == expected) - *xchgp = new; - return old; -} - -ERTS_GLB_INLINE erts_aint64_t -erts_no_atomic64_read_bset(erts_no_atomic64_t *var, - erts_aint64_t mask, - erts_aint64_t set) -{ - erts_aint64_t old = *var; - *var &= ~mask; - *var |= (mask & set); - return old; -} - /* spinlock */ ERTS_GLB_INLINE void erts_spinlock_init(erts_spinlock_t *lock, char *name, Eterm extra, erts_lock_flags_t flags) { -#ifdef USE_THREADS int res = ethr_spinlock_init(&lock->slck); if (res) { erts_thr_fatal_error(res, "init spinlock"); @@ -3029,13 +2095,11 @@ erts_spinlock_init(erts_spinlock_t *lock, char *name, Eterm extra, erts_lock_fla #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_init_ref_x(&lock->lcnt, name, extra, flags); #endif -#endif /* USE_THREADS */ } ERTS_GLB_INLINE void erts_spinlock_destroy(erts_spinlock_t *lock) { -#ifdef USE_THREADS int res; ASSERT(!(lock->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC)); @@ -3058,15 +2122,11 @@ erts_spinlock_destroy(erts_spinlock_t *lock) #endif erts_thr_fatal_error(res, "destroy rwlock"); } -#else - (void)lock; -#endif } ERTS_GLB_INLINE void erts_spin_unlock(erts_spinlock_t *lock) { -#ifdef USE_THREADS #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_unlock(&lock->lc); #endif @@ -3074,9 +2134,6 @@ erts_spin_unlock(erts_spinlock_t *lock) erts_lcnt_unlock(&lock->lcnt); #endif ethr_spin_unlock(&lock->slck); -#else - (void)lock; -#endif } ERTS_GLB_INLINE void @@ -3086,7 +2143,6 @@ erts_spin_lock_x(erts_spinlock_t *lock, char *file, unsigned int line) erts_spin_lock(erts_spinlock_t *lock) #endif { -#ifdef USE_THREADS #ifdef ERTS_ENABLE_LOCK_CHECK #ifdef ERTS_ENABLE_LOCK_POSITION erts_lc_lock_x(&lock->lc,file,line); @@ -3101,15 +2157,12 @@ erts_spin_lock(erts_spinlock_t *lock) #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_lock_post_x(&lock->lcnt, file, line); #endif -#else - (void)lock; -#endif } ERTS_GLB_INLINE int erts_lc_spinlock_is_locked(erts_spinlock_t *lock) { -#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) int res; erts_lc_lock_t lc = lock->lc; lc.flags = ERTS_LOCK_TYPE_SPINLOCK; @@ -3126,7 +2179,6 @@ erts_lc_spinlock_is_locked(erts_spinlock_t *lock) ERTS_GLB_INLINE void erts_rwlock_init(erts_rwlock_t *lock, char *name, Eterm extra, erts_lock_flags_t flags) { -#ifdef USE_THREADS int res = ethr_rwlock_init(&lock->rwlck); if (res) { erts_thr_fatal_error(res, "init rwlock"); @@ -3143,13 +2195,11 @@ erts_rwlock_init(erts_rwlock_t *lock, char *name, Eterm extra, erts_lock_flags_t #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_init_ref_x(&lock->lcnt, name, extra, flags); #endif -#endif /* USE_THREADS */ } ERTS_GLB_INLINE void erts_rwlock_destroy(erts_rwlock_t *lock) { -#ifdef USE_THREADS int res; ASSERT(!(lock->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC)); @@ -3172,15 +2222,11 @@ erts_rwlock_destroy(erts_rwlock_t *lock) #endif erts_thr_fatal_error(res, "destroy rwlock"); } -#else - (void)lock; -#endif } ERTS_GLB_INLINE void erts_read_unlock(erts_rwlock_t *lock) { -#ifdef USE_THREADS #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_unlock_flg(&lock->lc, ERTS_LOCK_OPTIONS_READ); #endif @@ -3188,9 +2234,6 @@ erts_read_unlock(erts_rwlock_t *lock) erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LOCK_OPTIONS_READ); #endif ethr_read_unlock(&lock->rwlck); -#else - (void)lock; -#endif } ERTS_GLB_INLINE void @@ -3200,7 +2243,6 @@ erts_read_lock_x(erts_rwlock_t *lock, char *file, unsigned int line) erts_read_lock(erts_rwlock_t *lock) #endif { -#ifdef USE_THREADS #ifdef ERTS_ENABLE_LOCK_CHECK #ifdef ERTS_ENABLE_LOCK_POSITION erts_lc_lock_flg_x(&lock->lc, ERTS_LOCK_OPTIONS_READ,file,line); @@ -3215,15 +2257,11 @@ erts_read_lock(erts_rwlock_t *lock) #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_lock_post_x(&lock->lcnt, file, line); #endif -#else - (void)lock; -#endif } ERTS_GLB_INLINE void erts_write_unlock(erts_rwlock_t *lock) { -#ifdef USE_THREADS #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_unlock_flg(&lock->lc, ERTS_LOCK_OPTIONS_RDWR); #endif @@ -3231,9 +2269,6 @@ erts_write_unlock(erts_rwlock_t *lock) erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LOCK_OPTIONS_RDWR); #endif ethr_write_unlock(&lock->rwlck); -#else - (void)lock; -#endif } ERTS_GLB_INLINE void @@ -3243,7 +2278,6 @@ erts_write_lock_x(erts_rwlock_t *lock, char *file, unsigned int line) erts_write_lock(erts_rwlock_t *lock) #endif { -#ifdef USE_THREADS #ifdef ERTS_ENABLE_LOCK_CHECK #ifdef ERTS_ENABLE_LOCK_POSITION erts_lc_lock_flg_x(&lock->lc, ERTS_LOCK_OPTIONS_RDWR,file,line); @@ -3258,15 +2292,12 @@ erts_write_lock(erts_rwlock_t *lock) #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_lock_post_x(&lock->lcnt, file, line); #endif -#else - (void)lock; -#endif } ERTS_GLB_INLINE int erts_lc_rwlock_is_rlocked(erts_rwlock_t *lock) { -#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) int res; erts_lc_lock_t lc = lock->lc; lc.flags = ERTS_LOCK_TYPE_RWSPINLOCK; @@ -3281,7 +2312,7 @@ erts_lc_rwlock_is_rlocked(erts_rwlock_t *lock) ERTS_GLB_INLINE int erts_lc_rwlock_is_rwlocked(erts_rwlock_t *lock) { -#if defined(USE_THREADS) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) int res; erts_lc_lock_t lc = lock->lc; lc.flags = ERTS_LOCK_TYPE_RWSPINLOCK; @@ -3296,125 +2327,90 @@ erts_lc_rwlock_is_rwlocked(erts_rwlock_t *lock) ERTS_GLB_INLINE void erts_tsd_key_create(erts_tsd_key_t *keyp, char *keyname) { -#ifdef USE_THREADS int res = ethr_tsd_key_create(keyp, keyname); if (res) erts_thr_fatal_error(res, "create thread specific data key"); -#endif } ERTS_GLB_INLINE void erts_tsd_key_delete(erts_tsd_key_t key) { -#ifdef USE_THREADS int res = ethr_tsd_key_delete(key); if (res) erts_thr_fatal_error(res, "delete thread specific data key"); -#endif } ERTS_GLB_INLINE void erts_tsd_set(erts_tsd_key_t key, void *value) { -#ifdef USE_THREADS int res = ethr_tsd_set(key, value); if (res) erts_thr_fatal_error(res, "set thread specific data"); -#endif } ERTS_GLB_INLINE void * erts_tsd_get(erts_tsd_key_t key) { -#ifdef USE_THREADS return ethr_tsd_get(key); -#else - return NULL; -#endif } ERTS_GLB_INLINE erts_tse_t *erts_tse_fetch(void) { -#ifdef USE_THREADS return (erts_tse_t *) ethr_get_ts_event(); -#else - return (erts_tse_t *) NULL; -#endif } ERTS_GLB_INLINE void erts_tse_return(erts_tse_t *ep) { -#ifdef USE_THREADS ethr_leave_ts_event(ep); -#endif } ERTS_GLB_INLINE void erts_tse_prepare_timed(erts_tse_t *ep) { -#ifdef USE_THREADS int res = ethr_event_prepare_timed(&((ethr_ts_event *) ep)->event); if (res != 0) erts_thr_fatal_error(res, "prepare timed"); -#endif } ERTS_GLB_INLINE void erts_tse_set(erts_tse_t *ep) { -#ifdef USE_THREADS ethr_event_set(&((ethr_ts_event *) ep)->event); -#endif } ERTS_GLB_INLINE void erts_tse_reset(erts_tse_t *ep) { -#ifdef USE_THREADS ethr_event_reset(&((ethr_ts_event *) ep)->event); -#endif } ERTS_GLB_INLINE int erts_tse_wait(erts_tse_t *ep) { -#ifdef USE_THREADS int res; ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_SLEEP); res = ethr_event_wait(&((ethr_ts_event *) ep)->event); ERTS_MSACC_POP_STATE(); return res; -#else - return ENOTSUP; -#endif } ERTS_GLB_INLINE int erts_tse_swait(erts_tse_t *ep, int spincount) { -#ifdef USE_THREADS int res; ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_SLEEP); res = ethr_event_swait(&((ethr_ts_event *) ep)->event, spincount); ERTS_MSACC_POP_STATE(); return res; -#else - return ENOTSUP; -#endif } ERTS_GLB_INLINE int erts_tse_twait(erts_tse_t *ep, Sint64 tmo) { -#ifdef USE_THREADS int res; ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_SLEEP); res = ethr_event_twait(&((ethr_ts_event *) ep)->event, (ethr_sint64_t) tmo); ERTS_MSACC_POP_STATE(); return res; -#else - return ENOTSUP; -#endif } ERTS_GLB_INLINE int erts_tse_stwait(erts_tse_t *ep, int spincount, Sint64 tmo) { -#ifdef USE_THREADS int res; ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_SLEEP); res = ethr_event_stwait(&((ethr_ts_event *) ep)->event, @@ -3422,49 +2418,34 @@ ERTS_GLB_INLINE int erts_tse_stwait(erts_tse_t *ep, int spincount, Sint64 tmo) (ethr_sint64_t) tmo); ERTS_MSACC_POP_STATE(); return res; -#else - return ENOTSUP; -#endif } ERTS_GLB_INLINE int erts_tse_is_tmp(erts_tse_t *ep) { -#ifdef USE_THREADS return (ep->iflgs & ETHR_TS_EV_TMP) == ETHR_TS_EV_TMP; -#else - return 0; -#endif } ERTS_GLB_INLINE void erts_thr_set_main_status(int on, int no) { -#ifdef USE_THREADS int res = ethr_set_main_thr_status(on, no); if (res != 0) erts_thr_fatal_error(res, "set thread main status"); -#endif } ERTS_GLB_INLINE int erts_thr_get_main_status(void) { -#ifdef USE_THREADS int main_status; int res = ethr_get_main_thr_status(&main_status); if (res != 0) erts_thr_fatal_error(res, "get thread main status"); return main_status; -#else - return 1; -#endif } ERTS_GLB_INLINE void erts_thr_yield(void) { -#ifdef USE_THREADS int res = ETHR_YIELD(); if (res != 0) erts_thr_fatal_error(res, "yield"); -#endif } @@ -3472,34 +2453,28 @@ ERTS_GLB_INLINE void erts_thr_yield(void) ERTS_GLB_INLINE void erts_thr_kill(erts_tid_t tid, int sig) { -#ifdef USE_THREADS int res = ethr_kill((ethr_tid)tid, sig); if (res) erts_thr_fatal_error(res, "killing thread"); -#endif } ERTS_GLB_INLINE void erts_thr_sigmask(int how, const sigset_t *set, sigset_t *oset) { -#ifdef USE_THREADS int res = ethr_sigmask(how, set, oset); if (res) erts_thr_fatal_error(res, "get or set signal mask"); -#endif } ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig) { -#ifdef USE_THREADS int res; do { res = ethr_sigwait(set, sig); } while (res == EINTR); if (res) erts_thr_fatal_error(res, "to wait for signal"); -#endif } #endif /* #ifdef HAVE_ETHR_SIG_FUNCS */ @@ -3507,37 +2482,3 @@ erts_thr_sigwait(const sigset_t *set, int *sig) #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ #endif /* #ifndef ERL_THREAD_H__ */ - -#ifdef ERTS_UNDEF_DEPRECATED_ATOMICS - -/* Deprecated functions to replace */ - -#undef erts_atomic_init -#undef erts_atomic_set -#undef erts_atomic_read -#undef erts_atomic_inctest -#undef erts_atomic_dectest -#undef erts_atomic_inc -#undef erts_atomic_dec -#undef erts_atomic_addtest -#undef erts_atomic_add -#undef erts_atomic_xchg -#undef erts_atomic_cmpxchg -#undef erts_atomic_bor -#undef erts_atomic_band - -#undef erts_atomic32_init -#undef erts_atomic32_set -#undef erts_atomic32_read -#undef erts_atomic32_inctest -#undef erts_atomic32_dectest -#undef erts_atomic32_inc -#undef erts_atomic32_dec -#undef erts_atomic32_addtest -#undef erts_atomic32_add -#undef erts_atomic32_xchg -#undef erts_atomic32_cmpxchg -#undef erts_atomic32_bor -#undef erts_atomic32_band - -#endif diff --git a/erts/emulator/beam/erl_time_sup.c b/erts/emulator/beam/erl_time_sup.c index 979c03fd43..f2e0900fec 100644 --- a/erts/emulator/beam/erl_time_sup.c +++ b/erts/emulator/beam/erl_time_sup.c @@ -36,11 +36,11 @@ #include "erl_driver.h" #include "erl_nif.h" -static erts_smp_mtx_t erts_get_time_mtx; +static erts_mtx_t erts_get_time_mtx; /* used by erts_runtime_elapsed_both */ typedef struct { - erts_smp_mtx_t mtx; + erts_mtx_t mtx; ErtsMonotonicTime user; ErtsMonotonicTime sys; } ErtsRunTimePrevData; @@ -51,13 +51,13 @@ static union { } runtime_prev erts_align_attribute(ERTS_CACHE_LINE_SIZE); static union { - erts_smp_atomic64_t time; - char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_smp_atomic64_t))]; + erts_atomic64_t time; + char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_atomic64_t))]; } wall_clock_prev erts_align_attribute(ERTS_CACHE_LINE_SIZE); static union { - erts_smp_atomic64_t time; - char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_smp_atomic64_t))]; + erts_atomic64_t time; + char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_atomic64_t))]; } now_prev erts_align_attribute(ERTS_CACHE_LINE_SIZE); static ErtsMonitor *time_offset_monitors = NULL; @@ -157,7 +157,7 @@ typedef struct { struct time_sup_infrequently_changed__ { #ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT struct { - erts_smp_rwmtx_t rwmtx; + erts_rwmtx_t rwmtx; ErtsTWheelTimer timer; ErtsMonotonicCorrectionData cdata; } parmon; @@ -165,9 +165,9 @@ struct time_sup_infrequently_changed__ { #endif ErtsSystemTime sinit; ErtsMonotonicTime not_corrected_moffset; - erts_smp_atomic64_t offset; + erts_atomic64_t offset; ErtsMonotonicTime shadow_offset; - erts_smp_atomic32_t preliminary_offset; + erts_atomic32_t preliminary_offset; }; struct time_sup_frequently_changed__ { @@ -205,19 +205,19 @@ erts_get_approx_time(void) static ERTS_INLINE void init_time_offset(ErtsMonotonicTime offset) { - erts_smp_atomic64_init_nob(&time_sup.inf.c.offset, (erts_aint64_t) offset); + erts_atomic64_init_nob(&time_sup.inf.c.offset, (erts_aint64_t) offset); } static ERTS_INLINE void set_time_offset(ErtsMonotonicTime offset) { - erts_smp_atomic64_set_relb(&time_sup.inf.c.offset, (erts_aint64_t) offset); + erts_atomic64_set_relb(&time_sup.inf.c.offset, (erts_aint64_t) offset); } static ERTS_INLINE ErtsMonotonicTime get_time_offset(void) { - return (ErtsMonotonicTime) erts_smp_atomic64_read_acqb(&time_sup.inf.c.offset); + return (ErtsMonotonicTime) erts_atomic64_read_acqb(&time_sup.inf.c.offset); } static ERTS_INLINE void @@ -298,7 +298,7 @@ read_corrected_time(int os_drift_corrected) ErtsMonotonicTime os_mtime; ErtsMonotonicCorrectionInstance ci; - erts_smp_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx); + erts_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx); os_mtime = erts_os_monotonic_time(); @@ -311,7 +311,7 @@ read_corrected_time(int os_drift_corrected) ci = time_sup.inf.c.parmon.cdata.insts.prev; } - erts_smp_rwmtx_runlock(&time_sup.inf.c.parmon.rwmtx); + erts_rwmtx_runlock(&time_sup.inf.c.parmon.rwmtx); return calc_corrected_erl_mtime(os_mtime, &ci, NULL, os_drift_corrected); @@ -389,13 +389,13 @@ check_time_correction(void *vesdp) int os_drift_corrected = time_sup.r.o.os_corrected_monotonic_time; int set_new_correction = 0, begin_short_intervals = 0; - erts_smp_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx); + erts_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx); erts_os_times(&os_mtime, &os_stime); ci = time_sup.inf.c.parmon.cdata.insts.curr; - erts_smp_rwmtx_runlock(&time_sup.inf.c.parmon.rwmtx); + erts_rwmtx_runlock(&time_sup.inf.c.parmon.rwmtx); if (os_mtime < ci.os_mtime) erts_exit(ERTS_ABORT_EXIT, @@ -410,7 +410,7 @@ check_time_correction(void *vesdp) if (time_sup.inf.c.shadow_offset) { ERTS_TIME_ASSERT(time_sup.r.o.warp_mode == ERTS_SINGLE_TIME_WARP_MODE); - if (erts_smp_atomic32_read_nob(&time_sup.inf.c.preliminary_offset)) + if (erts_atomic32_read_nob(&time_sup.inf.c.preliminary_offset)) sdiff += time_sup.inf.c.shadow_offset; else time_sup.inf.c.shadow_offset = 0; @@ -433,7 +433,7 @@ check_time_correction(void *vesdp) } } else if ((time_sup.r.o.warp_mode == ERTS_SINGLE_TIME_WARP_MODE - && erts_smp_atomic32_read_nob(&time_sup.inf.c.preliminary_offset)) + && erts_atomic32_read_nob(&time_sup.inf.c.preliminary_offset)) && (sdiff < -2*time_sup.r.o.adj.small_diff || 2*time_sup.r.o.adj.small_diff < sdiff)) { /* @@ -658,7 +658,7 @@ check_time_correction(void *vesdp) #endif if (set_new_correction) { - erts_smp_rwmtx_rwlock(&time_sup.inf.c.parmon.rwmtx); + erts_rwmtx_rwlock(&time_sup.inf.c.parmon.rwmtx); os_mtime = erts_os_monotonic_time(); @@ -686,7 +686,7 @@ check_time_correction(void *vesdp) time_sup.inf.c.parmon.cdata.insts.curr.os_mtime = os_mtime; time_sup.inf.c.parmon.cdata.insts.curr.correction = new_correction; - erts_smp_rwmtx_rwunlock(&time_sup.inf.c.parmon.rwmtx); + erts_rwmtx_rwunlock(&time_sup.inf.c.parmon.rwmtx); } if (!esdp) @@ -804,13 +804,13 @@ finalize_corrected_time_offset(ErtsSystemTime *stimep) ErtsMonotonicCorrectionInstance ci; int os_drift_corrected = time_sup.r.o.os_corrected_monotonic_time; - erts_smp_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx); + erts_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx); erts_os_times(&os_mtime, stimep); ci = time_sup.inf.c.parmon.cdata.insts.curr; - erts_smp_rwmtx_runlock(&time_sup.inf.c.parmon.rwmtx); + erts_rwmtx_runlock(&time_sup.inf.c.parmon.rwmtx); if (os_mtime < ci.os_mtime) erts_exit(ERTS_ABORT_EXIT, @@ -863,7 +863,7 @@ static ErtsMonotonicTime get_not_corrected_time(void) { ErtsMonotonicTime stime, mtime; - erts_smp_mtx_lock(&erts_get_time_mtx); + erts_mtx_lock(&erts_get_time_mtx); stime = erts_os_system_time(); @@ -889,7 +889,7 @@ static ErtsMonotonicTime get_not_corrected_time(void) ASSERT(stime == mtime + time_sup.inf.c.not_corrected_moffset); - erts_smp_mtx_unlock(&erts_get_time_mtx); + erts_mtx_unlock(&erts_get_time_mtx); return mtime; } @@ -971,9 +971,9 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode) ASSERT(ERTS_MONOTONIC_TIME_MIN < ERTS_MONOTONIC_TIME_MAX); - erts_smp_mtx_init(&erts_get_time_mtx, "get_time", NIL, + erts_mtx_init(&erts_get_time_mtx, "get_time", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); - erts_smp_mtx_init(&runtime_prev.data.mtx, "runtime", NIL, + erts_mtx_init(&runtime_prev.data.mtx, "runtime", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); runtime_prev.data.user = 0; runtime_prev.data.sys = 0; @@ -982,9 +982,9 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode) time_sup.r.o.warp_mode = time_warp_mode; if (time_warp_mode == ERTS_SINGLE_TIME_WARP_MODE) - erts_smp_atomic32_init_nob(&time_sup.inf.c.preliminary_offset, 1); + erts_atomic32_init_nob(&time_sup.inf.c.preliminary_offset, 1); else - erts_smp_atomic32_init_nob(&time_sup.inf.c.preliminary_offset, 0); + erts_atomic32_init_nob(&time_sup.inf.c.preliminary_offset, 0); time_sup.inf.c.shadow_offset = 0; #if !ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT @@ -1128,7 +1128,7 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode) if (time_sup.r.o.correction) { ErtsMonotonicCorrectionData *cdatap; - erts_smp_rwmtx_opt_t rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; + erts_rwmtx_opt_t rwmtx_opts = ERTS_RWMTX_OPT_DEFAULT_INITER; ErtsMonotonicTime offset; erts_os_times(&time_sup.inf.c.minit, &time_sup.inf.c.sinit); @@ -1138,10 +1138,10 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode) offset -= ERTS_MONOTONIC_BEGIN; init_time_offset(offset); - rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; - rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED; + rwmtx_opts.type = ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; + rwmtx_opts.lived = ERTS_RWMTX_LONG_LIVED; - erts_smp_rwmtx_init_opt(&time_sup.inf.c.parmon.rwmtx, &rwmtx_opts, + erts_rwmtx_init_opt(&time_sup.inf.c.parmon.rwmtx, &rwmtx_opts, "get_corrected_time", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); @@ -1176,10 +1176,10 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode) time_sup.f.c.last_not_corrected_time = 0; } - erts_smp_atomic64_init_nob(&wall_clock_prev.time, - (erts_aint64_t) 0); + erts_atomic64_init_nob(&wall_clock_prev.time, + (erts_aint64_t) 0); - erts_smp_atomic64_init_nob( + erts_atomic64_init_nob( &now_prev.time, (erts_aint64_t) ERTS_MONOTONIC_TO_USEC(get_time_offset())); @@ -1223,7 +1223,7 @@ ErtsTimeOffsetState erts_time_offset_state(void) case ERTS_NO_TIME_WARP_MODE: return ERTS_TIME_OFFSET_FINAL; case ERTS_SINGLE_TIME_WARP_MODE: - if (erts_smp_atomic32_read_nob(&time_sup.inf.c.preliminary_offset)) + if (erts_atomic32_read_nob(&time_sup.inf.c.preliminary_offset)) return ERTS_TIME_OFFSET_PRELIMINARY; return ERTS_TIME_OFFSET_FINAL; case ERTS_MULTI_TIME_WARP_MODE: @@ -1256,9 +1256,9 @@ erts_finalize_time_offset(void) case ERTS_SINGLE_TIME_WARP_MODE: { ErtsTimeOffsetState res = ERTS_TIME_OFFSET_FINAL; - erts_smp_mtx_lock(&erts_get_time_mtx); + erts_mtx_lock(&erts_get_time_mtx); - if (erts_smp_atomic32_read_nob(&time_sup.inf.c.preliminary_offset)) { + if (erts_atomic32_read_nob(&time_sup.inf.c.preliminary_offset)) { ErtsMonotonicTime mtime, new_offset; #ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT @@ -1295,11 +1295,11 @@ erts_finalize_time_offset(void) set_time_offset(new_offset); schedule_send_time_offset_changed_notifications(new_offset); - erts_smp_atomic32_set_nob(&time_sup.inf.c.preliminary_offset, 0); + erts_atomic32_set_nob(&time_sup.inf.c.preliminary_offset, 0); res = ERTS_TIME_OFFSET_PRELIMINARY; } - erts_smp_mtx_unlock(&erts_get_time_mtx); + erts_mtx_unlock(&erts_get_time_mtx); return res; } @@ -1358,14 +1358,14 @@ erts_runtime_elapsed_both(ErtsMonotonicTime *ms_user, ErtsMonotonicTime *ms_sys, if (ms_user_diff || ms_sys_diff) { - erts_smp_mtx_lock(&runtime_prev.data.mtx); + erts_mtx_lock(&runtime_prev.data.mtx); prev_user = runtime_prev.data.user; prev_sys = runtime_prev.data.sys; runtime_prev.data.user = user; runtime_prev.data.sys = sys; - erts_smp_mtx_unlock(&runtime_prev.data.mtx); + erts_mtx_unlock(&runtime_prev.data.mtx); if (ms_user_diff) *ms_user_diff = user - prev_user; @@ -1394,8 +1394,8 @@ erts_wall_clock_elapsed_both(ErtsMonotonicTime *ms_total, ErtsMonotonicTime *ms_ ErtsMonotonicTime prev; prev = ((ErtsMonotonicTime) - erts_smp_atomic64_xchg_mb(&wall_clock_prev.time, - (erts_aint64_t) elapsed)); + erts_atomic64_xchg_mb(&wall_clock_prev.time, + (erts_aint64_t) elapsed)); *ms_diff = elapsed - prev; } @@ -1784,15 +1784,15 @@ get_now(Uint* megasec, Uint* sec, Uint* microsec) now = ERTS_MONOTONIC_TO_USEC(mtime + time_offset); /* Make sure now time is later than last time */ - prev = erts_smp_atomic64_read_nob(&now_prev.time); + prev = erts_atomic64_read_nob(&now_prev.time); while (1) { ErtsMonotonicTime act; if (now <= prev) now = prev + 1; act = ((ErtsMonotonicTime) - erts_smp_atomic64_cmpxchg_mb(&now_prev.time, - (erts_aint64_t) now, - (erts_aint64_t) prev)); + erts_atomic64_cmpxchg_mb(&now_prev.time, + (erts_aint64_t) now, + (erts_aint64_t) prev)); if (act == prev) break; prev = act; @@ -1883,10 +1883,10 @@ void erts_get_now_cpu(Uint* megasec, Uint* sec, Uint* microsec) { void erts_monitor_time_offset(Eterm id, Eterm ref) { - erts_smp_mtx_lock(&erts_get_time_mtx); + erts_mtx_lock(&erts_get_time_mtx); erts_add_monitor(&time_offset_monitors, MON_TIME_OFFSET, ref, id, NIL); no_time_offset_monitors++; - erts_smp_mtx_unlock(&erts_get_time_mtx); + erts_mtx_unlock(&erts_get_time_mtx); } int @@ -1895,7 +1895,7 @@ erts_demonitor_time_offset(Eterm ref) int res; ErtsMonitor *mon; ASSERT(is_internal_ref(ref)); - erts_smp_mtx_lock(&erts_get_time_mtx); + erts_mtx_lock(&erts_get_time_mtx); if (is_internal_ordinary_ref(ref)) mon = erts_remove_monitor(&time_offset_monitors, ref); else @@ -1907,7 +1907,7 @@ erts_demonitor_time_offset(Eterm ref) no_time_offset_monitors--; res = 1; } - erts_smp_mtx_unlock(&erts_get_time_mtx); + erts_mtx_unlock(&erts_get_time_mtx); if (res) erts_destroy_monitor(mon); return res; @@ -1965,7 +1965,7 @@ send_time_offset_changed_notifications(void *new_offsetp) #endif new_offset -= ERTS_MONOTONIC_OFFSET_NATIVE; - erts_smp_mtx_lock(&erts_get_time_mtx); + erts_mtx_lock(&erts_get_time_mtx); no_monitors = no_time_offset_monitors; if (no_monitors) { @@ -1990,7 +1990,7 @@ send_time_offset_changed_notifications(void *new_offsetp) ASSERT(cntxt.ix == no_monitors); } - erts_smp_mtx_unlock(&erts_get_time_mtx); + erts_mtx_unlock(&erts_get_time_mtx); if (no_monitors) { Eterm *hp, *patch_refp, new_offset_term, message_template; @@ -2023,7 +2023,7 @@ send_time_offset_changed_notifications(void *new_offsetp) if (rp) { Eterm ref = to_mon_info[mix].ref; ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK; - erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_lock(rp, ERTS_PROC_LOCK_LINK); if (erts_lookup_monitor(ERTS_P_MONITORS(rp), ref)) { ErtsMessage *mp; ErlOffHeap *ohp; @@ -2036,7 +2036,7 @@ send_time_offset_changed_notifications(void *new_offsetp) message = copy_struct(message_template, hsz, &hp, ohp); erts_queue_message(rp, rp_locks, mp, message, am_clock_service); } - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } } diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c index db7d0ac449..4b996d8fc2 100644 --- a/erts/emulator/beam/erl_trace.c +++ b/erts/emulator/beam/erl_trace.c @@ -77,8 +77,8 @@ static Eterm system_profile; int erts_cpu_timestamp; #endif -static erts_smp_mtx_t smq_mtx; -static erts_smp_rwmtx_t sys_trace_rwmtx; +static erts_mtx_t smq_mtx; +static erts_rwmtx_t sys_trace_rwmtx; enum ErtsSysMsgType { SYS_MSG_TYPE_UNDEFINED, @@ -237,7 +237,6 @@ write_timestamp(ErtsTraceTimeStamp *tsp, Eterm **hpp) } } -#ifdef ERTS_SMP static ERTS_INLINE Uint patch_ts_size(int ts_type) @@ -257,7 +256,6 @@ patch_ts_size(int ts_type) return 0; } } -#endif /* ERTS_SMP */ /* * Write a timestamp. The timestamp MUST be the last @@ -298,18 +296,11 @@ write_ts(int ts_type, Eterm *hp, ErlHeapFragment *bp, Process *tracer) if (shrink) { if (bp) bp->used_size -= shrink; -#ifndef ERTS_SMP - else if (tracer) { - Eterm *endp = ts_hp + shrink; - HRelease(tracer, endp, ts_hp); - } -#endif } return res; } -#ifdef ERTS_SMP static void enqueue_sys_msg_unlocked(enum ErtsSysMsgType type, Eterm from, Eterm to, @@ -321,7 +312,6 @@ static void enqueue_sys_msg(enum ErtsSysMsgType type, Eterm msg, ErlHeapFragment *bp); static void init_sys_msg_dispatcher(void); -#endif static void init_tracer_nif(void); static int tracer_cmp_fun(void*, void*); @@ -332,11 +322,11 @@ static void tracer_free_fun(void*); typedef struct ErtsTracerNif_ ErtsTracerNif; void erts_init_trace(void) { - erts_smp_rwmtx_opt_t rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; - rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; - rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED; + erts_rwmtx_opt_t rwmtx_opts = ERTS_RWMTX_OPT_DEFAULT_INITER; + rwmtx_opts.type = ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; + rwmtx_opts.lived = ERTS_RWMTX_LONG_LIVED; - erts_smp_rwmtx_init_opt(&sys_trace_rwmtx, &rwmtx_opts, "sys_tracers", NIL, + erts_rwmtx_init_opt(&sys_trace_rwmtx, &rwmtx_opts, "sys_tracers", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG); #ifdef HAVE_ERTS_NOW_CPU @@ -350,9 +340,7 @@ void erts_init_trace(void) { default_port_trace_flags = F_INITIAL_TRACE_FLAGS; default_port_tracer = erts_tracer_nil; system_seq_tracer = erts_tracer_nil; -#ifdef ERTS_SMP init_sys_msg_dispatcher(); -#endif init_tracer_nif(); } @@ -412,43 +400,35 @@ static Uint active_sched; void erts_system_profile_setup_active_schedulers(void) { - ERTS_SMP_LC_ASSERT(erts_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); active_sched = erts_active_schedulers(); } static void exiting_reset(Eterm exiting) { - erts_smp_rwmtx_rwlock(&sys_trace_rwmtx); + erts_rwmtx_rwlock(&sys_trace_rwmtx); if (exiting == system_monitor) { -#ifdef ERTS_SMP system_monitor = NIL; /* Let the trace message dispatcher clear flags, etc */ -#else - erts_system_monitor_clear(NULL); -#endif } if (exiting == system_profile) { -#ifdef ERTS_SMP system_profile = NIL; /* Let the trace message dispatcher clear flags, etc */ -#else - erts_system_profile_clear(NULL); -#endif } - erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx); + erts_rwmtx_rwunlock(&sys_trace_rwmtx); } void erts_trace_check_exiting(Eterm exiting) { int reset = 0; - erts_smp_rwmtx_rlock(&sys_trace_rwmtx); + erts_rwmtx_rlock(&sys_trace_rwmtx); if (exiting == system_monitor) reset = 1; else if (exiting == system_profile) reset = 1; - erts_smp_rwmtx_runlock(&sys_trace_rwmtx); + erts_rwmtx_runlock(&sys_trace_rwmtx); if (reset) exiting_reset(exiting); } @@ -468,7 +448,7 @@ erts_set_system_seq_tracer(Process *c_p, ErtsProcLocks c_p_locks, ErtsTracer new } } - erts_smp_rwmtx_rwlock(&sys_trace_rwmtx); + erts_rwmtx_rwlock(&sys_trace_rwmtx); old = system_seq_tracer; system_seq_tracer = erts_tracer_nil; erts_tracer_update(&system_seq_tracer, new); @@ -476,7 +456,7 @@ erts_set_system_seq_tracer(Process *c_p, ErtsProcLocks c_p_locks, ErtsTracer new #ifdef DEBUG_PRINTOUTS erts_fprintf(stderr, "set seq tracer new=%T old=%T\n", new, old); #endif - erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx); + erts_rwmtx_rwunlock(&sys_trace_rwmtx); return old; } @@ -484,12 +464,12 @@ ErtsTracer erts_get_system_seq_tracer(void) { ErtsTracer st; - erts_smp_rwmtx_rlock(&sys_trace_rwmtx); + erts_rwmtx_rlock(&sys_trace_rwmtx); st = system_seq_tracer; #ifdef DEBUG_PRINTOUTS erts_fprintf(stderr, "get seq tracer %T\n", st); #endif - erts_smp_rwmtx_runlock(&sys_trace_rwmtx); + erts_rwmtx_runlock(&sys_trace_rwmtx); if (st != erts_tracer_nil && call_enabled_tracer(st, NULL, TRACE_FUN_ENABLED, @@ -522,8 +502,8 @@ get_default_tracing(Uint *flagsp, ErtsTracer *tracerp, ErtsTracer curr_default_tracer = *default_tracer; if (tracerp) { /* we only have a rlock, so we have to unlock and then rwlock */ - erts_smp_rwmtx_runlock(&sys_trace_rwmtx); - erts_smp_rwmtx_rwlock(&sys_trace_rwmtx); + erts_rwmtx_runlock(&sys_trace_rwmtx); + erts_rwmtx_rwlock(&sys_trace_rwmtx); } /* check if someone else changed default tracer while we got the write lock, if so we don't do @@ -533,8 +513,8 @@ get_default_tracing(Uint *flagsp, ErtsTracer *tracerp, ERTS_TRACER_CLEAR(default_tracer); } if (tracerp) { - erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx); - erts_smp_rwmtx_rlock(&sys_trace_rwmtx); + erts_rwmtx_rwunlock(&sys_trace_rwmtx); + erts_rwmtx_rlock(&sys_trace_rwmtx); } } } @@ -567,81 +547,81 @@ void erts_change_default_proc_tracing(int setflags, Uint flagsp, const ErtsTracer tracer) { - erts_smp_rwmtx_rwlock(&sys_trace_rwmtx); + erts_rwmtx_rwlock(&sys_trace_rwmtx); erts_change_default_tracing( setflags, flagsp, tracer, &default_proc_trace_flags, &default_proc_tracer); - erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx); + erts_rwmtx_rwunlock(&sys_trace_rwmtx); } void erts_change_default_port_tracing(int setflags, Uint flagsp, const ErtsTracer tracer) { - erts_smp_rwmtx_rwlock(&sys_trace_rwmtx); + erts_rwmtx_rwlock(&sys_trace_rwmtx); erts_change_default_tracing( setflags, flagsp, tracer, &default_port_trace_flags, &default_port_tracer); - erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx); + erts_rwmtx_rwunlock(&sys_trace_rwmtx); } void erts_get_default_proc_tracing(Uint *flagsp, ErtsTracer *tracerp) { - erts_smp_rwmtx_rlock(&sys_trace_rwmtx); + erts_rwmtx_rlock(&sys_trace_rwmtx); *tracerp = erts_tracer_nil; /* initialize */ get_default_tracing( flagsp, tracerp, &default_proc_trace_flags, &default_proc_tracer); - erts_smp_rwmtx_runlock(&sys_trace_rwmtx); + erts_rwmtx_runlock(&sys_trace_rwmtx); } void erts_get_default_port_tracing(Uint *flagsp, ErtsTracer *tracerp) { - erts_smp_rwmtx_rlock(&sys_trace_rwmtx); + erts_rwmtx_rlock(&sys_trace_rwmtx); *tracerp = erts_tracer_nil; /* initialize */ get_default_tracing( flagsp, tracerp, &default_port_trace_flags, &default_port_tracer); - erts_smp_rwmtx_runlock(&sys_trace_rwmtx); + erts_rwmtx_runlock(&sys_trace_rwmtx); } void erts_set_system_monitor(Eterm monitor) { - erts_smp_rwmtx_rwlock(&sys_trace_rwmtx); + erts_rwmtx_rwlock(&sys_trace_rwmtx); system_monitor = monitor; - erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx); + erts_rwmtx_rwunlock(&sys_trace_rwmtx); } Eterm erts_get_system_monitor(void) { Eterm monitor; - erts_smp_rwmtx_rlock(&sys_trace_rwmtx); + erts_rwmtx_rlock(&sys_trace_rwmtx); monitor = system_monitor; - erts_smp_rwmtx_runlock(&sys_trace_rwmtx); + erts_rwmtx_runlock(&sys_trace_rwmtx); return monitor; } /* Performance monitoring */ void erts_set_system_profile(Eterm profile) { - erts_smp_rwmtx_rwlock(&sys_trace_rwmtx); + erts_rwmtx_rwlock(&sys_trace_rwmtx); system_profile = profile; - erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx); + erts_rwmtx_rwunlock(&sys_trace_rwmtx); } Eterm erts_get_system_profile(void) { Eterm profile; - erts_smp_rwmtx_rlock(&sys_trace_rwmtx); + erts_rwmtx_rlock(&sys_trace_rwmtx); profile = system_profile; - erts_smp_rwmtx_runlock(&sys_trace_rwmtx); + erts_rwmtx_runlock(&sys_trace_rwmtx); return profile; } @@ -679,71 +659,11 @@ write_sys_msg_to_port(Eterm unused_to, erts_exit(ERTS_ERROR_EXIT, "Internal error in do_send_to_port: %d\n", ptr-buffer); } -#ifndef ERTS_SMP - if (!INVALID_TRACER_PORT(trace_port, trace_port->common.id)) -#endif erts_raw_port_command(trace_port, buffer, ptr-buffer); erts_free(ERTS_ALC_T_TMP, (void *) buffer); } -#ifndef ERTS_SMP -/* Profile send - * Checks if profiler is port or process - * Eterm msg is local, need copying. - */ - -static void -profile_send(Eterm from, Eterm message) { - Uint sz = 0; - Uint *hp = NULL; - Eterm msg = NIL; - Process *profile_p = NULL; - - Eterm profiler = erts_get_system_profile(); - - /* do not profile profiler pid */ - if (from == profiler) return; - - if (is_internal_port(profiler)) { - Port *profiler_port = NULL; - - /* not smp */ - - profiler_port = erts_id2port_sflgs(profiler, - NULL, - 0, - ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP); - if (profiler_port) { - write_sys_msg_to_port(profiler, - profiler_port, - NIL, /* or current process->common.id */ - SYS_MSG_TYPE_SYSPROF, - message); - erts_port_release(profiler_port); - } - - } else { - ErtsMessage *mp; - ASSERT(is_internal_pid(profiler)); - - profile_p = erts_proc_lookup(profiler); - - if (!profile_p) - return; - - sz = size_object(message); - mp = erts_alloc_message(sz, &hp); - if (sz == 0) - msg = message; - else - msg = copy_struct(message, sz, &hp, &mp->hfrag.off_heap); - - erts_queue_message(profile_p, 0, mp, msg, from); - } -} - -#endif static void trace_sched_aux(Process *p, ErtsProcLocks locks, Eterm what) @@ -815,9 +735,7 @@ trace_send(Process *p, Eterm to, Eterm msg) ErtsTracerNif *tnif = NULL; ErtsTracingEvent* te; Eterm pam_result; -#ifdef ERTS_SMP ErtsThrPrgrDelayHandle dhndl; -#endif ASSERT(ARE_TRACE_FLAGS_ON(p, F_TRACE_SEND)); @@ -842,9 +760,7 @@ trace_send(Process *p, Eterm to, Eterm msg) } else pam_result = am_true; -#ifdef ERTS_SMP dhndl = erts_thr_progress_unmanaged_delay(); -#endif if (is_internal_pid(to)) { if (!erts_proc_lookup(to)) @@ -862,9 +778,7 @@ trace_send(Process *p, Eterm to, Eterm msg) operation, msg, to, pam_result); } -#ifdef ERTS_SMP erts_thr_progress_unmanaged_continue(dhndl); -#endif erts_match_set_release_result_trace(p, pam_result); } @@ -1179,7 +1093,7 @@ erts_call_trace(Process* p, ErtsCodeInfo *info, Binary *match_spec, Eterm transformed_args[MAX_ARG]; ErtsTracer pre_ms_tracer = erts_tracer_nil; - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN); ASSERT(tracer); if (ERTS_TRACER_COMPARE(*tracer, erts_tracer_true)) { @@ -1467,21 +1381,11 @@ monitor_long_schedule_proc(Process *p, ErtsCodeMFA *in_fp, { ErlHeapFragment *bp; ErlOffHeap *off_heap; -#ifndef ERTS_SMP - Process *monitor_p; -#endif Uint hsz; Eterm *hp, list, in_mfa = am_undefined, out_mfa = am_undefined; Eterm in_tpl, out_tpl, tmo_tpl, tmo, msg; -#ifndef ERTS_SMP - ASSERT(is_internal_pid(system_monitor)); - monitor_p = erts_proc_lookup(system_monitor); - if (!monitor_p || p == monitor_p) { - return; - } -#endif /* * Size: {monitor, pid, long_schedule, [{timeout, T}, {in, {M,F,A}},{out,{M,F,A}}]} -> * 5 (top tuple of 4), (3 (elements) * 2 (cons)) + 3 (timeout tuple of 2) + size of Timeout + @@ -1517,36 +1421,18 @@ monitor_long_schedule_proc(Process *p, ErtsCodeMFA *in_fp, hp += 2; msg = TUPLE4(hp, am_monitor, p->common.id, am_long_schedule, list); hp += 5; -#ifdef ERTS_SMP enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp); -#else - { - ErtsMessage *mp = erts_alloc_message(0, NULL); - mp->data.heap_frag = bp; - erts_queue_message(monitor_p, 0, mp, msg, am_system); - } -#endif } void monitor_long_schedule_port(Port *pp, ErtsPortTaskType type, Uint time) { ErlHeapFragment *bp; ErlOffHeap *off_heap; -#ifndef ERTS_SMP - Process *monitor_p; -#endif Uint hsz; Eterm *hp, list, op; Eterm op_tpl, tmo_tpl, tmo, msg; -#ifndef ERTS_SMP - ASSERT(is_internal_pid(system_monitor)); - monitor_p = erts_proc_lookup(system_monitor); - if (!monitor_p) { - return; - } -#endif /* * Size: {monitor, port, long_schedule, [{timeout, T}, {op, Operation}]} -> * 5 (top tuple of 4), (2 (elements) * 2 (cons)) + 3 (timeout tuple of 2) @@ -1563,7 +1449,6 @@ monitor_long_schedule_port(Port *pp, ErtsPortTaskType type, Uint time) case ERTS_PORT_TASK_TIMEOUT: op = am_timeout; break; case ERTS_PORT_TASK_INPUT: op = am_input; break; case ERTS_PORT_TASK_OUTPUT: op = am_output; break; - case ERTS_PORT_TASK_EVENT: op = am_event; break; case ERTS_PORT_TASK_DIST_CMD: op = am_dist_cmd; break; default: op = am_undefined; break; } @@ -1582,24 +1467,13 @@ monitor_long_schedule_port(Port *pp, ErtsPortTaskType type, Uint time) hp += 2; msg = TUPLE4(hp, am_monitor, pp->common.id, am_long_schedule, list); hp += 5; -#ifdef ERTS_SMP enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, pp->common.id, NIL, msg, bp); -#else - { - ErtsMessage *mp = erts_alloc_message(0, NULL); - mp->data.heap_frag = bp; - erts_queue_message(monitor_p, 0, mp, msg, am_system); - } -#endif } void monitor_long_gc(Process *p, Uint time) { ErlHeapFragment *bp; ErlOffHeap *off_heap; -#ifndef ERTS_SMP - Process *monitor_p; -#endif Uint hsz; Eterm *hp, list, msg; Eterm tags[] = { @@ -1624,12 +1498,6 @@ monitor_long_gc(Process *p, Uint time) { Eterm *hp_end; #endif -#ifndef ERTS_SMP - ASSERT(is_internal_pid(system_monitor)); - monitor_p = erts_proc_lookup(system_monitor); - if (!monitor_p || p == monitor_p) - return; -#endif hsz = 0; (void) erts_bld_atom_uword_2tup_list(NULL, @@ -1657,24 +1525,13 @@ monitor_long_gc(Process *p, Uint time) { ASSERT(hp == hp_end); #endif -#ifdef ERTS_SMP enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp); -#else - { - ErtsMessage *mp = erts_alloc_message(0, NULL); - mp->data.heap_frag = bp; - erts_queue_message(monitor_p, 0, mp, msg, am_system); - } -#endif } void monitor_large_heap(Process *p) { ErlHeapFragment *bp; ErlOffHeap *off_heap; -#ifndef ERTS_SMP - Process *monitor_p; -#endif Uint hsz; Eterm *hp, list, msg; Eterm tags[] = { @@ -1698,13 +1555,6 @@ monitor_large_heap(Process *p) { #endif -#ifndef ERTS_SMP - ASSERT(is_internal_pid(system_monitor)); - monitor_p = erts_proc_lookup(system_monitor); - if (!monitor_p || p == monitor_p) { - return; - } -#endif hsz = 0; (void) erts_bld_atom_uword_2tup_list(NULL, @@ -1732,47 +1582,22 @@ monitor_large_heap(Process *p) { ASSERT(hp == hp_end); #endif -#ifdef ERTS_SMP enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp); -#else - { - ErtsMessage *mp = erts_alloc_message(0, NULL); - mp->data.heap_frag = bp; - erts_queue_message(monitor_p, 0, mp, msg, am_system); - } -#endif } void monitor_generic(Process *p, Eterm type, Eterm spec) { ErlHeapFragment *bp; ErlOffHeap *off_heap; -#ifndef ERTS_SMP - Process *monitor_p; -#endif Eterm *hp, msg; -#ifndef ERTS_SMP - ASSERT(is_internal_pid(system_monitor)); - monitor_p = erts_proc_lookup(system_monitor); - if (!monitor_p || p == monitor_p) - return; -#endif hp = ERTS_ALLOC_SYSMSG_HEAP(5, &bp, &off_heap, monitor_p); msg = TUPLE4(hp, am_monitor, p->common.id, type, spec); hp += 5; -#ifdef ERTS_SMP enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp); -#else - { - ErtsMessage *mp = erts_alloc_message(0, NULL); - mp->data.heap_frag = bp; - erts_queue_message(monitor_p, 0, mp, msg, am_system); - } -#endif } @@ -1785,21 +1610,14 @@ profile_scheduler(Eterm scheduler_id, Eterm state) { Eterm *hp, msg; ErlHeapFragment *bp = NULL; -#ifndef ERTS_SMP -#define LOCAL_HEAP_SIZE (7 + ERTS_TRACE_PATCH_TS_MAX_SIZE) - DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE); - UseTmpHeapNoproc(LOCAL_HEAP_SIZE); - hp = local_heap; -#else Uint hsz; hsz = 7 + patch_ts_size(erts_system_profile_ts_type)-1; bp = new_message_buffer(hsz); hp = bp->mem; -#endif - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); switch (state) { case am_active: @@ -1821,14 +1639,8 @@ profile_scheduler(Eterm scheduler_id, Eterm state) { /* Write timestamp in element 6 of the 'msg' tuple */ hp[-1] = write_ts(erts_system_profile_ts_type, hp, bp, NULL); -#ifndef ERTS_SMP - profile_send(NIL, msg); - UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE); -#undef LOCAL_HEAP_SIZE -#else enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, NIL, NIL, msg, bp); -#endif - erts_smp_mtx_unlock(&smq_mtx); + erts_mtx_unlock(&smq_mtx); } @@ -1837,7 +1649,7 @@ profile_scheduler(Eterm scheduler_id, Eterm state) { void trace_port_open(Port *p, Eterm calling_pid, Eterm drv_name) { ErtsTracerNif *tnif = NULL; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (is_tracer_enabled(NULL, 0, &p->common, &tnif, TRACE_FUN_E_PORTS, am_open)) send_to_tracer_nif(NULL, &p->common, p->common.id, tnif, TRACE_FUN_T_PORTS, am_open, calling_pid, drv_name, am_true); @@ -1854,9 +1666,9 @@ void trace_port(Port *t_p, Eterm what, Eterm data) { ErtsTracerNif *tnif = NULL; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p) + ERTS_LC_ASSERT(erts_lc_is_port_locked(t_p) || erts_thr_progress_is_blocking()); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_PORTS, what)) send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_PORTS, what, data, THE_NON_VALUE, am_true); @@ -1899,9 +1711,9 @@ void trace_port_receive(Port *t_p, Eterm caller, Eterm what, ...) { ErtsTracerNif *tnif = NULL; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p) + ERTS_LC_ASSERT(erts_lc_is_port_locked(t_p) || erts_thr_progress_is_blocking()); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_RECEIVE, am_receive)) { /* We can use a stack heap here, as the nif is called in the context of a port */ @@ -2016,9 +1828,9 @@ trace_port_send(Port *t_p, Eterm receiver, Eterm msg, int exists) { ErtsTracerNif *tnif = NULL; Eterm op = exists ? am_send : am_send_to_non_existing_process; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p) + ERTS_LC_ASSERT(erts_lc_is_port_locked(t_p) || erts_thr_progress_is_blocking()); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_SEND, op)) send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_SEND, op, msg, receiver, am_true); @@ -2027,9 +1839,9 @@ trace_port_send(Port *t_p, Eterm receiver, Eterm msg, int exists) void trace_port_send_binary(Port *t_p, Eterm to, Eterm what, char *bin, Sint sz) { ErtsTracerNif *tnif = NULL; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p) + ERTS_LC_ASSERT(erts_lc_is_port_locked(t_p) || erts_thr_progress_is_blocking()); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_SEND, am_send)) { Eterm msg; Binary* bptr = NULL; @@ -2075,9 +1887,9 @@ trace_sched_ports(Port *p, Eterm what) { void trace_sched_ports_where(Port *t_p, Eterm what, Eterm where) { ErtsTracerNif *tnif = NULL; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p) + ERTS_LC_ASSERT(erts_lc_is_port_locked(t_p) || erts_thr_progress_is_blocking()); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_SCHED_PORT, what)) send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_SCHED_PORT, @@ -2092,24 +1904,14 @@ profile_runnable_port(Port *p, Eterm status) { ErlHeapFragment *bp = NULL; Eterm count = make_small(0); -#ifndef ERTS_SMP -#define LOCAL_HEAP_SIZE (6 + ERTS_TRACE_PATCH_TS_MAX_SIZE) - - DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE); - UseTmpHeapNoproc(LOCAL_HEAP_SIZE); - - hp = local_heap; - -#else Uint hsz; hsz = 6 + patch_ts_size(erts_system_profile_ts_type)-1; bp = new_message_buffer(hsz); hp = bp->mem; -#endif - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); msg = TUPLE5(hp, am_profile, p->common.id, status, count, NIL /* Will be overwritten by timestamp */); @@ -2118,14 +1920,8 @@ profile_runnable_port(Port *p, Eterm status) { /* Write timestamp in element 5 of the 'msg' tuple */ hp[-1] = write_ts(erts_system_profile_ts_type, hp, bp, NULL); -#ifndef ERTS_SMP - profile_send(p->common.id, msg); - UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE); -#undef LOCAL_HEAP_SIZE -#else enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, p->common.id, NIL, msg, bp); -#endif - erts_smp_mtx_unlock(&smq_mtx); + erts_mtx_unlock(&smq_mtx); } /* Process profiling */ @@ -2136,23 +1932,13 @@ profile_runnable_proc(Process *p, Eterm status){ ErlHeapFragment *bp = NULL; ErtsCodeMFA *cmfa = NULL; -#ifndef ERTS_SMP -#define LOCAL_HEAP_SIZE (4 + 6 + ERTS_TRACE_PATCH_TS_MAX_SIZE) - DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE); - UseTmpHeapNoproc(LOCAL_HEAP_SIZE); - - hp = local_heap; -#else ErtsThrPrgrDelayHandle dhndl; Uint hsz = 4 + 6 + patch_ts_size(erts_system_profile_ts_type)-1; -#endif /* Assumptions: * We possibly don't have the MAIN_LOCK for the process p here. * We assume that we can read from p->current and p->i atomically */ -#ifdef ERTS_SMP dhndl = erts_thr_progress_unmanaged_delay(); /* suspend purge operations */ -#endif if (!ERTS_PROC_IS_EXITING(p)) { if (p->current) { @@ -2162,14 +1948,12 @@ profile_runnable_proc(Process *p, Eterm status){ } } -#ifdef ERTS_SMP if (!cmfa) { hsz -= 4; } bp = new_message_buffer(hsz); hp = bp->mem; -#endif if (cmfa) { where = TUPLE3(hp, cmfa->module, cmfa->function, @@ -2179,11 +1963,9 @@ profile_runnable_proc(Process *p, Eterm status){ where = make_small(0); } -#ifdef ERTS_SMP erts_thr_progress_unmanaged_continue(dhndl); -#endif - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); msg = TUPLE5(hp, am_profile, p->common.id, status, where, NIL /* Will be overwritten by timestamp */); @@ -2192,20 +1974,13 @@ profile_runnable_proc(Process *p, Eterm status){ /* Write timestamp in element 5 of the 'msg' tuple */ hp[-1] = write_ts(erts_system_profile_ts_type, hp, bp, NULL); -#ifndef ERTS_SMP - profile_send(p->common.id, msg); - UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE); -#undef LOCAL_HEAP_SIZE -#else enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, p->common.id, NIL, msg, bp); -#endif - erts_smp_mtx_unlock(&smq_mtx); + erts_mtx_unlock(&smq_mtx); } /* End system_profile tracing */ -#ifdef ERTS_SMP typedef struct ErtsSysMsgQ_ ErtsSysMsgQ; struct ErtsSysMsgQ_ { @@ -2251,7 +2026,7 @@ enqueue_sys_msg_unlocked(enum ErtsSysMsgType type, sys_message_queue = smqp; } sys_message_queue_end = smqp; - erts_smp_cnd_signal(&smq_cnd); + erts_cnd_signal(&smq_cnd); } static void @@ -2261,9 +2036,9 @@ enqueue_sys_msg(enum ErtsSysMsgType type, Eterm msg, ErlHeapFragment *bp) { - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); enqueue_sys_msg_unlocked(type, from, to, msg, bp); - erts_smp_mtx_unlock(&smq_mtx); + erts_mtx_unlock(&smq_mtx); } void @@ -2315,10 +2090,10 @@ sys_msg_disp_failure(ErtsSysMsgQ *smqp, Eterm receiver) && !erts_system_monitor_flags.busy_port && !erts_system_monitor_flags.busy_dist_port) break; /* Everything is disabled */ - erts_smp_thr_progress_block(); + erts_thr_progress_block(); if (system_monitor == receiver || receiver == NIL) erts_system_monitor_clear(NULL); - erts_smp_thr_progress_unblock(); + erts_thr_progress_unblock(); break; case SYS_MSG_TYPE_SYSPROF: if (receiver == NIL @@ -2328,11 +2103,11 @@ sys_msg_disp_failure(ErtsSysMsgQ *smqp, Eterm receiver) && !erts_system_profile_flags.scheduler) break; /* Block system to clear flags */ - erts_smp_thr_progress_block(); + erts_thr_progress_block(); if (system_profile == receiver || receiver == NIL) { erts_system_profile_clear(NULL); } - erts_smp_thr_progress_unblock(); + erts_thr_progress_unblock(); break; case SYS_MSG_TYPE_ERRLGR: { char *no_elgger = "(no error logger present)"; @@ -2377,38 +2152,38 @@ static void sys_msg_dispatcher_wakeup(void *vwait_p) { int *wait_p = (int *) vwait_p; - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); *wait_p = 0; - erts_smp_cnd_signal(&smq_cnd); - erts_smp_mtx_unlock(&smq_mtx); + erts_cnd_signal(&smq_cnd); + erts_mtx_unlock(&smq_mtx); } static void sys_msg_dispatcher_prep_wait(void *vwait_p) { int *wait_p = (int *) vwait_p; - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); *wait_p = 1; - erts_smp_mtx_unlock(&smq_mtx); + erts_mtx_unlock(&smq_mtx); } static void sys_msg_dispatcher_fin_wait(void *vwait_p) { int *wait_p = (int *) vwait_p; - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); *wait_p = 0; - erts_smp_mtx_unlock(&smq_mtx); + erts_mtx_unlock(&smq_mtx); } static void sys_msg_dispatcher_wait(void *vwait_p) { int *wait_p = (int *) vwait_p; - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); while (*wait_p) - erts_smp_cnd_wait(&smq_cnd, &smq_mtx); - erts_smp_mtx_unlock(&smq_mtx); + erts_cnd_wait(&smq_cnd, &smq_mtx); + erts_mtx_unlock(&smq_mtx); } static void * @@ -2434,9 +2209,9 @@ sys_msg_dispatcher_func(void *unused) int end_wait = 0; ErtsSysMsgQ *smqp; - ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); + ERTS_LC_ASSERT(!erts_thr_progress_is_blocking()); - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); /* Free previously used queue ... */ while (local_sys_message_queue) { @@ -2447,21 +2222,21 @@ sys_msg_dispatcher_func(void *unused) /* Fetch current trace message queue ... */ if (!sys_message_queue) { - erts_smp_mtx_unlock(&smq_mtx); + erts_mtx_unlock(&smq_mtx); end_wait = 1; erts_thr_progress_active(NULL, 0); erts_thr_progress_prepare_wait(NULL); - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); } while (!sys_message_queue) - erts_smp_cnd_wait(&smq_cnd, &smq_mtx); + erts_cnd_wait(&smq_cnd, &smq_mtx); local_sys_message_queue = sys_message_queue; sys_message_queue = NULL; sys_message_queue_end = NULL; - erts_smp_mtx_unlock(&smq_mtx); + erts_mtx_unlock(&smq_mtx); if (end_wait) { erts_thr_progress_finalize_wait(NULL); @@ -2535,7 +2310,7 @@ sys_msg_dispatcher_func(void *unused) #ifdef DEBUG_PRINTOUTS erts_fprintf(stderr, "delivered\n"); #endif - erts_smp_proc_unlock(proc, proc_locks); + erts_proc_unlock(proc, proc_locks); } } else if (receiver == am_error_logger) { @@ -2573,7 +2348,7 @@ sys_msg_dispatcher_func(void *unused) sys_msg_disp_failure(smqp, receiver); drop_sys_msg: if (proc) - erts_smp_proc_unlock(proc, proc_locks); + erts_proc_unlock(proc, proc_locks); if (smqp->bp) free_message_buffer(smqp->bp); #ifdef DEBUG_PRINTOUTS @@ -2593,7 +2368,7 @@ erts_foreach_sys_msg_in_q(void (*func)(Eterm, ErlHeapFragment *)) { ErtsSysMsgQ *sm; - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); for (sm = sys_message_queue; sm; sm = sm->next) { Eterm to; switch (sm->type) { @@ -2612,29 +2387,28 @@ erts_foreach_sys_msg_in_q(void (*func)(Eterm, } (*func)(sm->from, to, sm->msg, sm->bp); } - erts_smp_mtx_unlock(&smq_mtx); + erts_mtx_unlock(&smq_mtx); } static void init_sys_msg_dispatcher(void) { - erts_smp_thr_opts_t thr_opts = ERTS_SMP_THR_OPTS_DEFAULT_INITER; + erts_thr_opts_t thr_opts = ERTS_THR_OPTS_DEFAULT_INITER; thr_opts.detached = 1; thr_opts.name = "sys_msg_dispatcher"; init_smq_element_alloc(); sys_message_queue = NULL; sys_message_queue_end = NULL; - erts_smp_cnd_init(&smq_cnd); - erts_smp_mtx_init(&smq_mtx, "sys_msg_q", NIL, + erts_cnd_init(&smq_cnd); + erts_mtx_init(&smq_mtx, "sys_msg_q", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG); - erts_smp_thr_create(&sys_msg_dispatcher_tid, + erts_thr_create(&sys_msg_dispatcher_tid, sys_msg_dispatcher_func, NULL, &thr_opts); } -#endif #include "erl_nif.h" @@ -2730,7 +2504,7 @@ static void init_tracer_template(ErtsTracerNif *tnif) { } static Hash *tracer_hash = NULL; -static erts_smp_rwmtx_t tracer_mtx; +static erts_rwmtx_t tracer_mtx; static ErtsTracerNif * load_tracer_nif(const ErtsTracer tracer) @@ -2770,9 +2544,9 @@ load_tracer_nif(const ErtsTracer tracer) return NULL; } - erts_smp_rwmtx_rwlock(&tracer_mtx); + erts_rwmtx_rwlock(&tracer_mtx); tnif = hash_put(tracer_hash, &tnif_tmpl); - erts_smp_rwmtx_rwunlock(&tracer_mtx); + erts_rwmtx_rwunlock(&tracer_mtx); return tnif; } @@ -2783,14 +2557,14 @@ lookup_tracer_nif(const ErtsTracer tracer) ErtsTracerNif tnif_tmpl; ErtsTracerNif *tnif; tnif_tmpl.module = ERTS_TRACER_MODULE(tracer); - erts_smp_rwmtx_rlock(&tracer_mtx); + erts_rwmtx_rlock(&tracer_mtx); if ((tnif = hash_get(tracer_hash, &tnif_tmpl)) == NULL) { - erts_smp_rwmtx_runlock(&tracer_mtx); + erts_rwmtx_runlock(&tracer_mtx); tnif = load_tracer_nif(tracer); ASSERT(!tnif || tnif->nif_mod); return tnif; } - erts_smp_rwmtx_runlock(&tracer_mtx); + erts_rwmtx_runlock(&tracer_mtx); ASSERT(tnif->nif_mod); return tnif; } @@ -2928,17 +2702,17 @@ send_to_tracer_nif(Process *c_p, ErtsPTabElementCommon *t_p, Eterm t_p_id, ErtsTracerNif *tnif, enum ErtsTracerOpt topt, Eterm tag, Eterm msg, Eterm extra, Eterm pam_result) { -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) if (c_p) { /* We have to hold the main lock of the currently executing process */ erts_proc_lc_chk_have_proc_locks(c_p, ERTS_PROC_LOCK_MAIN); } if (is_internal_pid(t_p->id)) { /* We have to have at least one lock */ - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks((Process*)t_p) & ERTS_PROC_LOCKS_ALL); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks((Process*)t_p) & ERTS_PROC_LOCKS_ALL); } else { ASSERT(is_internal_port(t_p->id)); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked((Port*)t_p)); + ERTS_LC_ASSERT(erts_lc_is_port_locked((Port*)t_p)); } #endif @@ -2981,17 +2755,17 @@ is_tracer_enabled(Process* c_p, ErtsProcLocks c_p_locks, enum ErtsTracerOpt topt, Eterm tag) { Eterm nif_result; -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) if (c_p) - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == c_p_locks + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == c_p_locks || erts_thr_progress_is_blocking()); if (is_internal_pid(t_p->id)) { /* We have to have at least one lock */ - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks((Process*)t_p) & ERTS_PROC_LOCKS_ALL + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks((Process*)t_p) & ERTS_PROC_LOCKS_ALL || erts_thr_progress_is_blocking()); } else { ASSERT(is_internal_port(t_p->id)); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked((Port*)t_p) + ERTS_LC_ASSERT(erts_lc_is_port_locked((Port*)t_p) || erts_thr_progress_is_blocking()); } #endif @@ -3013,12 +2787,12 @@ is_tracer_enabled(Process* c_p, ErtsProcLocks c_p_locks, if (is_internal_port(t_p->id) || (c_p && c_p->common.id == t_p->id)) { ErtsProcLocks c_p_xlocks = 0; if (is_internal_pid(t_p->id)) { - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); if (c_p_locks != ERTS_PROC_LOCKS_ALL) { c_p_xlocks = ~c_p_locks & ERTS_PROC_LOCKS_ALL; - if (erts_smp_proc_trylock(c_p, c_p_xlocks) == EBUSY) { - erts_smp_proc_unlock(c_p, c_p_locks & ~ERTS_PROC_LOCK_MAIN); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + if (erts_proc_trylock(c_p, c_p_xlocks) == EBUSY) { + erts_proc_unlock(c_p, c_p_locks & ~ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); } } } @@ -3026,7 +2800,7 @@ is_tracer_enabled(Process* c_p, ErtsProcLocks c_p_locks, t_p->trace_flags &= ~TRACEE_FLAGS; if (c_p_xlocks) - erts_smp_proc_unlock(c_p, c_p_xlocks); + erts_proc_unlock(c_p, c_p_xlocks); } return 0; @@ -3066,7 +2840,7 @@ int erts_is_tracer_proc_enabled_send(Process* c_p, ErtsProcLocks c_p_locks, void erts_tracer_replace(ErtsPTabElementCommon *t_p, const ErtsTracer tracer) { -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) if (is_internal_pid(t_p->id) && !erts_thr_progress_is_blocking()) { erts_proc_lc_chk_have_proc_locks((Process*)t_p, ERTS_PROC_LOCKS_ALL); } else if (is_internal_port(t_p->id)) { @@ -3184,11 +2958,11 @@ erts_tracer_update(ErtsTracer *tracer, const ErtsTracer new_tracer) static void init_tracer_nif() { - erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; - rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; - rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED; + erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER; + rwmtx_opt.type = ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; + rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED; - erts_smp_rwmtx_init_opt(&tracer_mtx, &rwmtx_opt, "tracer_mtx", NIL, + erts_rwmtx_init_opt(&tracer_mtx, &rwmtx_opt, "tracer_mtx", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG); erts_tracer_nif_clear(); @@ -3198,7 +2972,7 @@ static void init_tracer_nif() int erts_tracer_nif_clear() { - erts_smp_rwmtx_rlock(&tracer_mtx); + erts_rwmtx_rlock(&tracer_mtx); if (!tracer_hash || tracer_hash->nobjs) { HashFunctions hf; @@ -3210,19 +2984,19 @@ int erts_tracer_nif_clear() hf.meta_free = (HMFREE_FUN) erts_free; hf.meta_print = (HMPRINT_FUN) erts_print; - erts_smp_rwmtx_runlock(&tracer_mtx); - erts_smp_rwmtx_rwlock(&tracer_mtx); + erts_rwmtx_runlock(&tracer_mtx); + erts_rwmtx_rwlock(&tracer_mtx); if (tracer_hash) hash_delete(tracer_hash); tracer_hash = hash_new(ERTS_ALC_T_TRACER_NIF, "tracer_hash", 10, hf); - erts_smp_rwmtx_rwunlock(&tracer_mtx); + erts_rwmtx_rwunlock(&tracer_mtx); return 1; } - erts_smp_rwmtx_runlock(&tracer_mtx); + erts_rwmtx_runlock(&tracer_mtx); return 0; } diff --git a/erts/emulator/beam/erl_trace.h b/erts/emulator/beam/erl_trace.h index 01fe1e5e23..dbf7ebd2a1 100644 --- a/erts/emulator/beam/erl_trace.h +++ b/erts/emulator/beam/erl_trace.h @@ -87,7 +87,6 @@ void erts_set_system_monitor(Eterm monitor); Eterm erts_get_system_monitor(void); int erts_is_tracer_valid(Process* p); -#ifdef ERTS_SMP void erts_check_my_tracer_proc(Process *); void erts_block_sys_msg_dispatcher(void); void erts_release_sys_msg_dispatcher(void); @@ -97,7 +96,6 @@ void erts_foreach_sys_msg_in_q(void (*func)(Eterm, ErlHeapFragment *)); void erts_queue_error_logger_message(Eterm, Eterm, ErlHeapFragment *); void erts_send_sys_msg_proc(Eterm, Eterm, Eterm, ErlHeapFragment *); -#endif void trace_send(Process*, Eterm, Eterm); void trace_receive(Process*, Eterm, Eterm, ErtsTracingEvent*); @@ -149,16 +147,12 @@ erts_bif_trace_epilogue(Process *p, Eterm result, int applying, Uint32 flags_meta, BeamInstr* I, ErtsTracer meta_tracer); -#ifdef ERTS_SMP void erts_send_pending_trace_msgs(ErtsSchedulerData *esdp); -#define ERTS_SMP_CHK_PEND_TRACE_MSGS(ESDP) \ +#define ERTS_CHK_PEND_TRACE_MSGS(ESDP) \ do { \ if ((ESDP)->pending_trace_msgs) \ erts_send_pending_trace_msgs((ESDP)); \ } while (0) -#else -#define ERTS_SMP_CHK_PEND_TRACE_MSGS(ESDP) -#endif #define seq_trace_output(token, msg, type, receiver, process) \ seq_trace_output_generic((token), (msg), (type), (receiver), (process), NIL) diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h index 3d28b05752..44d8c85867 100644 --- a/erts/emulator/beam/erl_utils.h +++ b/erts/emulator/beam/erl_utils.h @@ -22,15 +22,11 @@ #define ERL_UTILS_H__ #include "sys.h" -#include "erl_smp.h" #include "erl_printf.h" struct process; typedef struct { -#ifdef DEBUG - int smp_api; -#endif union { Uint64 not_atomic; erts_atomic64_t atomic; @@ -38,70 +34,25 @@ typedef struct { } erts_interval_t; void erts_interval_init(erts_interval_t *); -void erts_smp_interval_init(erts_interval_t *); Uint64 erts_step_interval_nob(erts_interval_t *); Uint64 erts_step_interval_relb(erts_interval_t *); -Uint64 erts_smp_step_interval_nob(erts_interval_t *); -Uint64 erts_smp_step_interval_relb(erts_interval_t *); Uint64 erts_ensure_later_interval_nob(erts_interval_t *, Uint64); Uint64 erts_ensure_later_interval_acqb(erts_interval_t *, Uint64); -Uint64 erts_smp_ensure_later_interval_nob(erts_interval_t *, Uint64); -Uint64 erts_smp_ensure_later_interval_acqb(erts_interval_t *, Uint64); -ERTS_GLB_INLINE Uint64 erts_current_interval_nob__(erts_interval_t *); -ERTS_GLB_INLINE Uint64 erts_current_interval_acqb__(erts_interval_t *); ERTS_GLB_INLINE Uint64 erts_current_interval_nob(erts_interval_t *); ERTS_GLB_INLINE Uint64 erts_current_interval_acqb(erts_interval_t *); -ERTS_GLB_INLINE Uint64 erts_smp_current_interval_nob(erts_interval_t *); -ERTS_GLB_INLINE Uint64 erts_smp_current_interval_acqb(erts_interval_t *); #if ERTS_GLB_INLINE_INCL_FUNC_DEF ERTS_GLB_INLINE Uint64 -erts_current_interval_nob__(erts_interval_t *icp) -{ - return (Uint64) erts_atomic64_read_nob(&icp->counter.atomic); -} - -ERTS_GLB_INLINE Uint64 -erts_current_interval_acqb__(erts_interval_t *icp) -{ - return (Uint64) erts_atomic64_read_acqb(&icp->counter.atomic); -} - -ERTS_GLB_INLINE Uint64 erts_current_interval_nob(erts_interval_t *icp) { - ASSERT(!icp->smp_api); - return erts_current_interval_nob__(icp); + return (Uint64) erts_atomic64_read_nob(&icp->counter.atomic); } ERTS_GLB_INLINE Uint64 erts_current_interval_acqb(erts_interval_t *icp) { - ASSERT(!icp->smp_api); - return erts_current_interval_acqb__(icp); -} - -ERTS_GLB_INLINE Uint64 -erts_smp_current_interval_nob(erts_interval_t *icp) -{ - ASSERT(icp->smp_api); -#ifdef ERTS_SMP - return erts_current_interval_nob__(icp); -#else - return icp->counter.not_atomic; -#endif -} - -ERTS_GLB_INLINE Uint64 -erts_smp_current_interval_acqb(erts_interval_t *icp) -{ - ASSERT(icp->smp_api); -#ifdef ERTS_SMP - return erts_current_interval_acqb__(icp); -#else - return icp->counter.not_atomic; -#endif + return (Uint64) erts_atomic64_read_acqb(&icp->counter.atomic); } #endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */ diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h index 0b8d78c469..661538eadd 100644 --- a/erts/emulator/beam/erl_vm.h +++ b/erts/emulator/beam/erl_vm.h @@ -46,8 +46,6 @@ */ #define ERTS_X_REGS_ALLOCATED (MAX_REG+3) -#define INPUT_REDUCTIONS (2 * CONTEXT_REDS) - #define H_DEFAULT_SIZE 233 /* default (heap + stack) min size */ #define VH_DEFAULT_SIZE 32768 /* default virtual (bin) heap min size (words) */ #define H_DEFAULT_MAX_SIZE 0 /* default max heap size is off */ @@ -55,7 +53,7 @@ #define CP_SIZE 1 #define ErtsHAllocLockCheck(P) \ - ERTS_SMP_LC_ASSERT(erts_dbg_check_halloc_lock((P))) + ERTS_LC_ASSERT(erts_dbg_check_halloc_lock((P))) #ifdef DEBUG @@ -102,9 +100,11 @@ if ((ptr) == (endp)) { \ ; \ } else if (HEAP_START(p) <= (ptr) && (ptr) < HEAP_TOP(p)) { \ + ASSERT(HEAP_TOP(p) == (endp)); \ HEAP_TOP(p) = (ptr); \ } else { \ - erts_heap_frag_shrink(p, ptr); \ + ASSERT(MBUF(p)->mem + MBUF(p)->used_size == (endp)); \ + erts_heap_frag_shrink(p, ptr); \ } #define HeapWordsLeft(p) (HEAP_LIMIT(p) - HEAP_TOP(p)) @@ -157,6 +157,7 @@ typedef struct op_entry { Uint32 mask[3]; /* Signature mask. */ unsigned involves_r; /* Needs special attention when matching. */ int sz; /* Number of loaded words. */ + int adjust; /* Adjustment for start of instruction. */ char* pack; /* Instructions for packing engine. */ char* sign; /* Signature string. */ } OpEntry; @@ -199,11 +200,15 @@ extern int erts_pd_initial_size;/* Initial Process dictionary table size */ #include "erl_term.h" -#ifdef NO_JUMP_TABLE -#define BeamOp(Op) (Op) +#if defined(NO_JUMP_TABLE) +# define BeamOpsAreInitialized() (1) +# define BeamOpCodeAddr(OpCode) ((BeamInstr)(OpCode)) #else extern void** beam_ops; -#define BeamOp(Op) beam_ops[(Op)] +# define BeamOpsAreInitialized() (beam_ops != 0) +# define BeamOpCodeAddr(OpCode) ((BeamInstr)beam_ops[(OpCode)]) #endif +#define BeamIsOpCode(InstrWord, OpCode) ((InstrWord) == BeamOpCodeAddr(OpCode)) + #endif /* __ERL_VM_H__ */ diff --git a/erts/emulator/beam/erlang_lttng.h b/erts/emulator/beam/erlang_lttng.h index 4e869671f7..feb05f4f4c 100644 --- a/erts/emulator/beam/erlang_lttng.h +++ b/erts/emulator/beam/erlang_lttng.h @@ -159,21 +159,6 @@ TRACEPOINT_EVENT( TRACEPOINT_EVENT( org_erlang_otp, - driver_event, - TP_ARGS( - char*, pid, - char*, port, - char*, driver - ), - TP_FIELDS( - ctf_string(pid, pid) - ctf_string(port, port) - ctf_string(driver, driver) - ) -) - -TRACEPOINT_EVENT( - org_erlang_otp, driver_timeout, TP_ARGS( char*, pid, diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c index 828c833ffc..946ffeffb8 100644 --- a/erts/emulator/beam/export.c +++ b/erts/emulator/beam/export.c @@ -41,16 +41,13 @@ static IndexTable export_tables[ERTS_NUM_CODE_IX]; /* Active not locked */ -static erts_smp_atomic_t total_entries_bytes; - -#include "erl_smp.h" +static erts_atomic_t total_entries_bytes; /* This lock protects the staging export table from concurrent access * AND it protects the staging table from becoming active. */ -erts_smp_mtx_t export_staging_lock; +erts_mtx_t export_staging_lock; -extern BeamInstr* em_call_error_handler; extern BeamInstr* em_call_traced_function; struct export_entry @@ -85,17 +82,13 @@ static struct export_blob* entry_to_blob(struct export_entry* ee) void export_info(fmtfn_t to, void *to_arg) { -#ifdef ERTS_SMP int lock = !ERTS_IS_CRASH_DUMPING; if (lock) export_staging_lock(); -#endif index_info(to, to_arg, &export_tables[erts_active_code_ix()]); hash_info(to, to_arg, &export_tables[erts_staging_code_ix()].htable); -#ifdef ERTS_SMP if (lock) export_staging_unlock(); -#endif } @@ -129,14 +122,17 @@ export_alloc(struct export_entry* tmpl_e) Export* obj; blob = (struct export_blob*) erts_alloc(ERTS_ALC_T_EXPORT, sizeof(*blob)); - erts_smp_atomic_add_nob(&total_entries_bytes, sizeof(*blob)); + erts_atomic_add_nob(&total_entries_bytes, sizeof(*blob)); obj = &blob->exp; obj->info.op = 0; obj->info.u.gen_bp = NULL; obj->info.mfa.module = tmpl->info.mfa.module; obj->info.mfa.function = tmpl->info.mfa.function; obj->info.mfa.arity = tmpl->info.mfa.arity; - obj->beam[0] = (BeamInstr) em_call_error_handler; + obj->beam[0] = 0; + if (BeamOpsAreInitialized()) { + obj->beam[0] = BeamOpCodeAddr(op_call_error_handler); + } obj->beam[1] = 0; for (ix=0; ix<ERTS_NUM_CODE_IX; ix++) { @@ -173,7 +169,7 @@ export_free(struct export_entry* obj) } DBG_TRACE_MFA_P(&blob->exp.info.mfa, "export blob deallocation at %p", &blob->exp); erts_free(ERTS_ALC_T_EXPORT, blob); - erts_smp_atomic_add_nob(&total_entries_bytes, -sizeof(*blob)); + erts_atomic_add_nob(&total_entries_bytes, -sizeof(*blob)); } void @@ -182,9 +178,9 @@ init_export_table(void) HashFunctions f; int i; - erts_smp_mtx_init(&export_staging_lock, "export_tab", NIL, + erts_mtx_init(&export_staging_lock, "export_tab", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); - erts_smp_atomic_init_nob(&total_entries_bytes, 0); + erts_atomic_init_nob(&total_entries_bytes, 0); f.hash = (H_FUN) export_hash; f.cmp = (HCMP_FUN) export_cmp; @@ -273,7 +269,7 @@ erts_find_function(Eterm m, Eterm f, unsigned int a, ErtsCodeIndex code_ix) ee = hash_get(&export_tables[code_ix].htable, init_template(&templ, m, f, a)); if (ee == NULL || (ee->ep->addressv[code_ix] == ee->ep->beam && - ee->ep->beam[0] != (BeamInstr) BeamOp(op_i_generic_breakpoint))) { + ! BeamIsOpCode(ee->ep->beam[0], op_i_generic_breakpoint))) { return NULL; } return ee->ep; @@ -373,7 +369,7 @@ int export_table_sz(void) } int export_entries_sz(void) { - return erts_smp_atomic_read_nob(&total_entries_bytes); + return erts_atomic_read_nob(&total_entries_bytes); } Export *export_get(Export *e) { diff --git a/erts/emulator/beam/export.h b/erts/emulator/beam/export.h index 7c812b306c..194e514b12 100644 --- a/erts/emulator/beam/export.h +++ b/erts/emulator/beam/export.h @@ -66,14 +66,14 @@ Export *export_get(Export*); void export_start_staging(void); void export_end_staging(int commit); -extern erts_smp_mtx_t export_staging_lock; -#define export_staging_lock() erts_smp_mtx_lock(&export_staging_lock) -#define export_staging_unlock() erts_smp_mtx_unlock(&export_staging_lock) +extern erts_mtx_t export_staging_lock; +#define export_staging_lock() erts_mtx_lock(&export_staging_lock) +#define export_staging_unlock() erts_mtx_unlock(&export_staging_lock) #include "beam_load.h" /* For em_* extern declarations */ #define ExportIsBuiltIn(EntryPtr) \ (((EntryPtr)->addressv[erts_active_code_ix()] == (EntryPtr)->beam) && \ - ((EntryPtr)->beam[0] == (BeamInstr) em_apply_bif)) + (BeamIsOpCode((EntryPtr)->beam[0], op_apply_bif))) #if ERTS_GLB_INLINE_INCL_FUNC_DEF diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c index c0a3838d42..970158933f 100644 --- a/erts/emulator/beam/external.c +++ b/erts/emulator/beam/external.c @@ -616,7 +616,7 @@ erts_make_dist_ext_copy(ErtsDistExternal *edep, Uint xsize) sys_memcpy((void *) ep, (void *) edep, dist_ext_sz); ep += dist_ext_sz; if (new_edep->dep) - erts_smp_refc_inc(&new_edep->dep->refc, 1); + erts_ref_dist_entry(new_edep->dep); new_edep->extp = ep; new_edep->ext_endp = ep + ext_sz; new_edep->heap_size = -1; @@ -629,7 +629,8 @@ erts_prepare_dist_ext(ErtsDistExternal *edep, byte *ext, Uint size, DistEntry *dep, - ErtsAtomCache *cache) + ErtsAtomCache *cache, + Uint32 *connection_id) { #undef ERTS_EXT_FAIL #undef ERTS_EXT_HDR_FAIL @@ -650,33 +651,36 @@ erts_prepare_dist_ext(ErtsDistExternal *edep, if (size < 2) ERTS_EXT_FAIL; + if (!dep) + ERTS_INTERNAL_ERROR("Invalid use"); + if (ep[0] != VERSION_MAGIC) { erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf(); - if (dep) - erts_dsprintf(dsbufp, - "** Got message from incompatible erlang on " - "channel %d\n", - dist_entry_channel_no(dep)); - else - erts_dsprintf(dsbufp, - "** Attempt to convert old incompatible " - "binary %d\n", - *ep); + erts_dsprintf(dsbufp, + "** Got message from incompatible erlang on " + "channel %d\n", + dist_entry_channel_no(dep)); erts_send_error_to_logger_nogl(dsbufp); ERTS_EXT_FAIL; } edep->flags = 0; edep->dep = dep; - if (dep) { - erts_smp_de_rlock(dep); - if (dep->flags & DFLAG_DIST_HDR_ATOM_CACHE) - edep->flags |= ERTS_DIST_EXT_DFLAG_HDR; - - edep->flags |= (dep->connection_id & ERTS_DIST_EXT_CON_ID_MASK); - erts_smp_de_runlock(dep); + + erts_de_rlock(dep); + + if ((dep->status & (ERTS_DE_SFLG_EXITING|ERTS_DE_SFLG_CONNECTED)) + != ERTS_DE_SFLG_CONNECTED) { + erts_de_runlock(dep); + return ERTS_PREP_DIST_EXT_CLOSED; } + if (dep->flags & DFLAG_DIST_HDR_ATOM_CACHE) + edep->flags |= ERTS_DIST_EXT_DFLAG_HDR; + + *connection_id = dep->connection_id; + edep->flags |= (dep->connection_id & ERTS_DIST_EXT_CON_ID_MASK); + if (ep[1] != DIST_HEADER) { if (edep->flags & ERTS_DIST_EXT_DFLAG_HDR) ERTS_EXT_HDR_FAIL; @@ -835,14 +839,15 @@ erts_prepare_dist_ext(ErtsDistExternal *edep, ERTS_EXT_FAIL; #endif - return 0; + erts_de_runlock(dep); + + return ERTS_PREP_DIST_EXT_SUCCESS; #undef CHKSIZE #undef ERTS_EXT_FAIL #undef ERTS_EXT_HDR_FAIL - bad_hdr: - if (dep) { + bad_hdr: { erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf(); erts_dsprintf(dsbufp, "%T got a corrupted distribution header from %T " @@ -855,10 +860,11 @@ erts_prepare_dist_ext(ErtsDistExternal *edep, erts_dsprintf(dsbufp, ">>"); erts_send_warning_to_logger_nogl(dsbufp); } - fail: - if (dep) - erts_kill_dist_connection(dep, dep->connection_id); - return -1; + fail: { + erts_de_runlock(dep); + erts_kill_dist_connection(dep, *connection_id); + } + return ERTS_PREP_DIST_EXT_FAILED; } static void @@ -3109,7 +3115,7 @@ dec_term(ErtsDistExternal *edep, #if defined(ARCH_64) *objp = make_small(sn); #else - if (MY_IS_SSMALL(sn)) { + if (IS_SSMALL(sn)) { *objp = make_small(sn); } else { *objp = small_to_big(sn, hp); diff --git a/erts/emulator/beam/external.h b/erts/emulator/beam/external.h index f00426cc16..3c61d013da 100644 --- a/erts/emulator/beam/external.h +++ b/erts/emulator/beam/external.h @@ -185,8 +185,13 @@ ERTS_GLB_INLINE void *erts_dist_ext_trailer(ErtsDistExternal *); ErtsDistExternal *erts_make_dist_ext_copy(ErtsDistExternal *, Uint); void *erts_dist_ext_trailer(ErtsDistExternal *); void erts_destroy_dist_ext_copy(ErtsDistExternal *); + +#define ERTS_PREP_DIST_EXT_FAILED (-1) +#define ERTS_PREP_DIST_EXT_SUCCESS (0) +#define ERTS_PREP_DIST_EXT_CLOSED (1) + int erts_prepare_dist_ext(ErtsDistExternal *, byte *, Uint, - DistEntry *, ErtsAtomCache *); + DistEntry *, ErtsAtomCache *, Uint32 *); Sint erts_decode_dist_ext_size(ErtsDistExternal *); Eterm erts_decode_dist_ext(ErtsHeapFactory* factory, ErtsDistExternal *); diff --git a/erts/emulator/beam/float_instrs.tab b/erts/emulator/beam/float_instrs.tab new file mode 100644 index 0000000000..3d4db77892 --- /dev/null +++ b/erts/emulator/beam/float_instrs.tab @@ -0,0 +1,88 @@ +// -*- c -*- +// +// %CopyrightBegin% +// +// Copyright Ericsson AB 2017. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// %CopyrightEnd% +// + +LOAD_DOUBLE(Src, Dst) { + GET_DOUBLE($Src, *(FloatDef *) &$Dst); +} + +fload(Reg, Dst) { + $LOAD_DOUBLE($Reg, $Dst); +} + +fstore(Float, Dst) { + PUT_DOUBLE(*((FloatDef *) &$Float), HTOP); + $Dst = make_float(HTOP); + HTOP += FLOAT_SIZE_OBJECT; +} + +fconv(Src, Dst) { + Eterm src = $Src; + + if (is_small(src)) { + $Dst = (double) signed_val(src); + } else if (is_big(src)) { + if (big_to_double(src, &$Dst) < 0) { + $BADARITH0(); + } + } else if (is_float(src)) { + $LOAD_DOUBLE(src, $Dst); + } else { + $BADARITH0(); + } +} + +FLOAT_OP(Src1, OP, Src2, Dst) { + ERTS_NO_FPE_CHECK_INIT(c_p); + $Dst = $Src1 $OP $Src2; + ERTS_NO_FPE_ERROR(c_p, $Dst, $BADARITH0()); +} + +i_fadd(Src1, Src2, Dst) { + $FLOAT_OP($Src1, +, $Src2, $Dst); +} + +i_fsub(Src1, Src2, Dst) { + $FLOAT_OP($Src1, -, $Src2, $Dst); +} + +i_fmul(Src1, Src2, Dst) { + $FLOAT_OP($Src1, *, $Src2, $Dst); +} + +i_fdiv(Src1, Src2, Dst) { + $FLOAT_OP($Src1, /, $Src2, $Dst); +} + +i_fnegate(Src, Dst) { + ERTS_NO_FPE_CHECK_INIT(c_p); + $Dst = -$Src; + ERTS_NO_FPE_ERROR(c_p, $Dst, $BADARITH0()); +} + +%unless NO_FPE_SIGNALS +fclearerror() { + ERTS_FP_CHECK_INIT(c_p); +} + +i_fcheckerror() { + ERTS_FP_ERROR(c_p, freg[0].fd, $BADARITH0()); +} +%endif diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h index ddf7f03265..604172857a 100644 --- a/erts/emulator/beam/global.h +++ b/erts/emulator/beam/global.h @@ -85,7 +85,7 @@ struct enif_resource_type_t typedef struct { - erts_smp_mtx_t lock; + erts_mtx_t lock; ErtsMonitor* root; int pending_failed_fire; int is_dying; @@ -128,10 +128,8 @@ extern Eterm erts_nif_call_function(Process *p, Process *tracee, struct enif_func_t *, int argc, Eterm *argv); -#ifdef ERTS_DIRTY_SCHEDULERS int erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm *reg); -#endif /* ERTS_DIRTY_SCHEDULERS */ /* Driver handle (wrapper for old plain handle) */ @@ -183,9 +181,9 @@ typedef struct { void *handle; /* Handle for DLL or SO (for dyn. drivers). */ DE_ProcEntry *procs; /* List of pids that have loaded this driver, or that wait for it to change state */ - erts_smp_refc_t refc; /* Number of ports/processes having + erts_refc_t refc; /* Number of ports/processes having references to the driver */ - erts_smp_atomic32_t port_count; /* Number of ports using the driver */ + erts_atomic32_t port_count; /* Number of ports using the driver */ Uint flags; /* ERL_DE_FL_KILL_PORTS */ int status; /* ERL_DE_xxx */ char *full_path; /* Full path of the driver */ @@ -209,9 +207,7 @@ struct erts_driver_t_ { } version; int flags; DE_Handle *handle; -#ifdef ERTS_SMP - erts_smp_mtx_t *lock; -#endif + erts_mtx_t *lock; ErlDrvEntry *entry; ErlDrvData (*start)(ErlDrvPort port, char *command, SysDriverOpts* opts); void (*stop)(ErlDrvData drv_data); @@ -226,8 +222,6 @@ struct erts_driver_t_ { char *buf, ErlDrvSizeT len, char **rbuf, ErlDrvSizeT rlen, /* Might be NULL */ unsigned int *flags); - void (*event)(ErlDrvData drv_data, ErlDrvEvent event, - ErlDrvEventData event_data); void (*ready_input)(ErlDrvData drv_data, ErlDrvEvent event); void (*ready_output)(ErlDrvData drv_data, ErlDrvEvent event); void (*timeout)(ErlDrvData drv_data); @@ -238,7 +232,7 @@ struct erts_driver_t_ { }; extern erts_driver_t *driver_list; -extern erts_smp_rwmtx_t erts_driver_list_lock; +extern erts_rwmtx_t erts_driver_list_lock; extern void erts_ddll_init(void); extern void erts_ddll_lock_driver(DE_Handle *dh, char *name); @@ -299,7 +293,7 @@ extern Eterm node_cookie; extern Uint display_items; /* no of items to display in traces etc */ extern int erts_backtrace_depth; -extern erts_smp_atomic32_t erts_max_gen_gcs; +extern erts_atomic32_t erts_max_gen_gcs; extern int bif_reductions; /* reductions + fcalls (when doing call_bif) */ extern int stackdump_on_exit; @@ -909,13 +903,11 @@ typedef struct ErtsLiteralArea_ { #define ERTS_LITERAL_AREA_ALLOC_SIZE(N) \ (sizeof(ErtsLiteralArea) + sizeof(Eterm)*((N) - 1)) -extern erts_smp_atomic_t erts_copy_literal_area__; +extern erts_atomic_t erts_copy_literal_area__; #define ERTS_COPY_LITERAL_AREA() \ - ((ErtsLiteralArea *) erts_smp_atomic_read_nob(&erts_copy_literal_area__)) + ((ErtsLiteralArea *) erts_atomic_read_nob(&erts_copy_literal_area__)) extern Process *erts_literal_area_collector; -#ifdef ERTS_DIRTY_SCHEDULERS extern Process *erts_dirty_process_code_checker; -#endif extern Process *erts_code_purger; @@ -1109,7 +1101,6 @@ void erts_save_stacktrace(Process* p, struct StackTrace* s, int depth); typedef struct { Eterm delay_time; int context_reds; - int input_reds; } ErtsModifiedTimings; extern Export *erts_delay_trap; @@ -1127,18 +1118,12 @@ extern ErtsModifiedTimings erts_modified_timings[]; extern int erts_no_line_info; extern Eterm erts_error_logger_warnings; extern int erts_initialized; -#if defined(USE_THREADS) && !defined(ERTS_SMP) -extern erts_tid_t erts_main_thread; -#endif extern int erts_compat_rel; extern int erts_use_sender_punish; void erl_start(int, char**); void erts_usage(void); Eterm erts_preloaded(Process* p); -#ifndef ERTS_SMP -extern void *erts_scheduler_stack_limit; -#endif /* erl_md5.c */ @@ -1182,7 +1167,7 @@ void erts_emergency_close_ports(void); void erts_ref_to_driver_monitor(Eterm ref, ErlDrvMonitor *mon); Eterm erts_driver_monitor_to_ref(Eterm* hp, const ErlDrvMonitor *mon); -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_COUNT) +#if defined(ERTS_ENABLE_LOCK_COUNT) void erts_lcnt_update_driver_locks(int enable); void erts_lcnt_update_port_locks(int enable); #endif @@ -1394,7 +1379,7 @@ Uint erts_current_reductions(Process* current, Process *p); int erts_print_system_version(fmtfn_t to, void *arg, Process *c_p); -int erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* reg); +int erts_hibernate(Process* c_p, Eterm* reg); ERTS_GLB_FORCE_INLINE int erts_is_literal(Eterm tptr, Eterm *ptr); diff --git a/erts/emulator/beam/index.c b/erts/emulator/beam/index.c index a1f6f54543..93d1111904 100644 --- a/erts/emulator/beam/index.c +++ b/erts/emulator/beam/index.c @@ -98,7 +98,7 @@ index_put_entry(IndexTable* t, void* tmpl) * Do a write barrier here to allow readers to do lock free iteration. * erts_index_num_entries() does matching read barrier. */ - ERTS_SMP_WRITE_MEMORY_BARRIER; + ERTS_THR_WRITE_MEMORY_BARRIER; t->entries++; return p; diff --git a/erts/emulator/beam/index.h b/erts/emulator/beam/index.h index 6c07571df6..30bc6a1121 100644 --- a/erts/emulator/beam/index.h +++ b/erts/emulator/beam/index.h @@ -88,7 +88,7 @@ ERTS_GLB_INLINE int erts_index_num_entries(IndexTable* t) * on tables where entries are never erased. * index_put_entry() does matching write barrier. */ - ERTS_SMP_READ_MEMORY_BARRIER; + ERTS_THR_READ_MEMORY_BARRIER; return ret; } diff --git a/erts/emulator/beam/instrs.tab b/erts/emulator/beam/instrs.tab new file mode 100644 index 0000000000..07cc4bd527 --- /dev/null +++ b/erts/emulator/beam/instrs.tab @@ -0,0 +1,922 @@ +// -*- c -*- +// +// %CopyrightBegin% +// +// Copyright Ericsson AB 2017. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// %CopyrightEnd% +// + +// Stack manipulation instructions + +allocate(NeedStack, Live) { + $AH($NeedStack, 0, $Live); +} + +allocate_heap(NeedStack, NeedHeap, Live) { + $AH($NeedStack, $NeedHeap, $Live); +} + +allocate_init(NeedStack, Live, Y) { + $AH($NeedStack, 0, $Live); + make_blank($Y); +} + +allocate_zero(NeedStack, Live) { + Eterm* ptr; + int i = $NeedStack; + $AH(i, 0, $Live); + for (ptr = E + i; ptr > E; ptr--) { + make_blank(*ptr); + } +} + +allocate_heap_zero(NeedStack, NeedHeap, Live) { + Eterm* ptr; + int i = $NeedStack; + $AH(i, $NeedHeap, $Live); + for (ptr = E + i; ptr > E; ptr--) { + make_blank(*ptr); + } +} + +// This instruction is probably never used (because it is combined with a +// a return). However, a future compiler might for some reason emit a +// deallocate not followed by a return, and that should work. + +deallocate(Deallocate) { + //| -no_prefetch + SET_CP(c_p, (BeamInstr *) cp_val(*E)); + E = ADD_BYTE_OFFSET(E, $Deallocate); +} + +deallocate_return(Deallocate) { + //| -no_next + int words_to_pop = $Deallocate; + SET_I((BeamInstr *) cp_val(*E)); + E = ADD_BYTE_OFFSET(E, words_to_pop); + CHECK_TERM(x(0)); + DispatchReturn; +} + +move_deallocate_return(Src, Deallocate) { + x(0) = $Src; + $deallocate_return($Deallocate); +} + +// Call instructions + +DISPATCH_REL(CallDest) { + //| -no_next + $SET_I_REL($CallDest); + DTRACE_LOCAL_CALL(c_p, erts_code_to_codemfa(I)); + Dispatch(); +} + +DISPATCH_ABS(CallDest) { + //| -no_next + SET_I((BeamInstr *) $CallDest); + DTRACE_LOCAL_CALL(c_p, erts_code_to_codemfa(I)); + Dispatch(); +} + +i_call(CallDest) { + SET_CP(c_p, $NEXT_INSTRUCTION); + $DISPATCH_REL($CallDest); +} + +move_call(Src, CallDest) { + x(0) = $Src; + SET_CP(c_p, $NEXT_INSTRUCTION); + $DISPATCH_REL($CallDest); +} + +i_call_last(CallDest, Deallocate) { + $deallocate($Deallocate); + $DISPATCH_REL($CallDest); +} + +move_call_last(Src, CallDest, Deallocate) { + x(0) = $Src; + $i_call_last($CallDest, $Deallocate); +} + +i_call_only(CallDest) { + $DISPATCH_REL($CallDest); +} + +move_call_only(Src, CallDest) { + x(0) = $Src; + $i_call_only($CallDest); +} + +DISPATCHX(Dest) { + //| -no_next + DTRACE_GLOBAL_CALL_FROM_EXPORT(c_p, $Dest); + // Dispatchx assumes the Export* is in Arg(0) + I = (&$Dest) - 1; + Dispatchx(); +} + +i_call_ext(Dest) { + SET_CP(c_p, $NEXT_INSTRUCTION); + $DISPATCHX($Dest); +} + +i_move_call_ext(Src, Dest) { + x(0) = $Src; + $i_call_ext($Dest); +} + +i_call_ext_only(Dest) { + $DISPATCHX($Dest); +} + +i_move_call_ext_only(Dest, Src) { + x(0) = $Src; + $i_call_ext_only($Dest); +} + +i_call_ext_last(Dest, Deallocate) { + $deallocate($Deallocate); + $DISPATCHX($Dest); +} + +i_move_call_ext_last(Dest, StackOffset, Src) { + x(0) = $Src; + $i_call_ext_last($Dest, $StackOffset); +} + +APPLY(I, Deallocate, Next) { + //| -no_next + HEAVY_SWAPOUT; + $Next = apply(c_p, reg, $I, $Deallocate); + HEAVY_SWAPIN; +} + +HANDLE_APPLY_ERROR() { + I = handle_error(c_p, I, reg, &bif_export[BIF_apply_3]->info.mfa); + goto post_error_handling; +} + +i_apply() { + BeamInstr *next; + $APPLY(NULL, 0, next); + if (ERTS_LIKELY(next != NULL)) { + SET_CP(c_p, $NEXT_INSTRUCTION); + $DISPATCH_ABS(next); + } + $HANDLE_APPLY_ERROR(); +} + +i_apply_last(Deallocate) { + BeamInstr *next; + $APPLY(I, $Deallocate, next); + if (ERTS_LIKELY(next != NULL)) { + $deallocate($Deallocate); + $DISPATCH_ABS(next); + } + $HANDLE_APPLY_ERROR(); +} + +i_apply_only() { + BeamInstr *next; + $APPLY(I, 0, next); + if (ERTS_LIKELY(next != NULL)) { + $DISPATCH_ABS(next); + } + $HANDLE_APPLY_ERROR(); +} + +FIXED_APPLY(Arity, I, Deallocate, Next) { + //| -no_next + HEAVY_SWAPOUT; + $Next = fixed_apply(c_p, reg, $Arity, $I, $Deallocate); + HEAVY_SWAPIN; +} + +apply(Arity) { + BeamInstr *next; + $FIXED_APPLY($Arity, NULL, 0, next); + if (ERTS_LIKELY(next != NULL)) { + SET_CP(c_p, $NEXT_INSTRUCTION); + $DISPATCH_ABS(next); + } + $HANDLE_APPLY_ERROR(); +} + +apply_last(Arity, Deallocate) { + BeamInstr *next; + $FIXED_APPLY($Arity, I, $Deallocate, next); + if (ERTS_LIKELY(next != NULL)) { + $deallocate($Deallocate); + $DISPATCH_ABS(next); + } + $HANDLE_APPLY_ERROR(); +} + +APPLY_FUN(Next) { + HEAVY_SWAPOUT; + $Next = apply_fun(c_p, r(0), x(1), reg); + HEAVY_SWAPIN; +} + +HANDLE_APPLY_FUN_ERROR() { + goto find_func_info; +} + +DISPATCH_FUN(I) { + SET_I($I); + Dispatchfun(); +} + +i_apply_fun() { + BeamInstr *next; + $APPLY_FUN(next); + if (ERTS_LIKELY(next != NULL)) { + SET_CP(c_p, $NEXT_INSTRUCTION); + $DISPATCH_FUN(next); + } + $HANDLE_APPLY_FUN_ERROR(); +} + +i_apply_fun_last(Deallocate) { + BeamInstr *next; + $APPLY_FUN(next); + if (ERTS_LIKELY(next != NULL)) { + $deallocate($Deallocate); + $DISPATCH_FUN(next); + } + $HANDLE_APPLY_FUN_ERROR(); +} + +i_apply_fun_only() { + BeamInstr *next; + $APPLY_FUN(next); + if (ERTS_LIKELY(next != NULL)) { + $DISPATCH_FUN(next); + } + $HANDLE_APPLY_FUN_ERROR(); +} + +CALL_FUN(Fun, Next) { + //| -no_next + HEAVY_SWAPOUT; + $Next = call_fun(c_p, $Fun, reg, THE_NON_VALUE); + HEAVY_SWAPIN; +} + +i_call_fun(Fun) { + BeamInstr *next; + $CALL_FUN($Fun, next); + if (ERTS_LIKELY(next != NULL)) { + SET_CP(c_p, $NEXT_INSTRUCTION); + $DISPATCH_FUN(next); + } + $HANDLE_APPLY_FUN_ERROR(); +} + +i_call_fun_last(Fun, Deallocate) { + BeamInstr *next; + $CALL_FUN($Fun, next); + if (ERTS_LIKELY(next != NULL)) { + $deallocate($Deallocate); + $DISPATCH_FUN(next); + } + $HANDLE_APPLY_FUN_ERROR(); +} + +return() { + SET_I(c_p->cp); + DTRACE_RETURN_FROM_PC(c_p); + + /* + * We must clear the CP to make sure that a stale value do not + * create a false module dependcy preventing code upgrading. + * It also means that we can use the CP in stack backtraces. + */ + c_p->cp = 0; + CHECK_TERM(r(0)); + HEAP_SPACE_VERIFIED(0); + DispatchReturn; +} + +get_list(Src, Hd, Tl) { + Eterm* tmp_ptr = list_val($Src); + Eterm hd, tl; + hd = CAR(tmp_ptr); + tl = CDR(tmp_ptr); + $Hd = hd; + $Tl = tl; +} + +i_get(Src, Dst) { + $Dst = erts_pd_hash_get(c_p, $Src); +} + +i_get_hash(Src, Hash, Dst) { + $Dst = erts_pd_hash_get_with_hx(c_p, $Hash, $Src); +} + +i_get_tuple_element(Src, Element, Dst) { + Eterm* src = ADD_BYTE_OFFSET(tuple_val($Src), $Element); + $Dst = *src; +} + +i_get_tuple_element2(Src, Element, Dst) { + Eterm* src; + Eterm* dst; + Eterm E1, E2; + src = ADD_BYTE_OFFSET(tuple_val($Src), $Element); + dst = &($Dst); + E1 = src[0]; + E2 = src[1]; + dst[0] = E1; + dst[1] = E2; +} + +i_get_tuple_element2y(Src, Element, D1, D2) { + Eterm* src; + Eterm E1, E2; + src = ADD_BYTE_OFFSET(tuple_val($Src), $Element); + E1 = src[0]; + E2 = src[1]; + $D1 = E1; + $D2 = E2; +} + +i_get_tuple_element3(Src, Element, Dst) { + Eterm* src; + Eterm* dst; + Eterm E1, E2, E3; + src = ADD_BYTE_OFFSET(tuple_val($Src), $Element); + dst = &($Dst); + E1 = src[0]; + E2 = src[1]; + E3 = src[2]; + dst[0] = E1; + dst[1] = E2; + dst[2] = E3; +} + +i_element := element_group.fetch.execute; + + +element_group.head() { + Eterm element_tuple; +} + +element_group.fetch(Src) { + element_tuple = $Src; +} + +element_group.execute(Fail, Index, Dst) { + Eterm element_index = $Index; + if (ERTS_LIKELY(is_small(element_index) && is_tuple(element_tuple))) { + Eterm* tp = tuple_val(element_tuple); + + if ((signed_val(element_index) >= 1) && + (signed_val(element_index) <= arityval(*tp))) { + $Dst = tp[signed_val(element_index)]; + $NEXT0(); + } + } + c_p->freason = BADARG; + $BIF_ERROR_ARITY_2($Fail, BIF_element_2, element_index, element_tuple); +} + +i_fast_element := fast_element_group.fetch.execute; + +fast_element_group.head() { + Eterm fast_element_tuple; +} + +fast_element_group.fetch(Src) { + fast_element_tuple = $Src; +} + +fast_element_group.execute(Fail, Index, Dst) { + if (ERTS_LIKELY(is_tuple(fast_element_tuple))) { + Eterm* tp = tuple_val(fast_element_tuple); + Eterm pos = $Index; /* Untagged integer >= 1 */ + if (pos <= arityval(*tp)) { + $Dst = tp[pos]; + $NEXT0(); + } + } + c_p->freason = BADARG; + $BIF_ERROR_ARITY_2($Fail, BIF_element_2, make_small($Index), fast_element_tuple); +} + +init(Y) { + make_blank($Y); +} + +init2(Y1, Y2) { + make_blank($Y1); + make_blank($Y2); +} + +init3(Y1, Y2, Y3) { + make_blank($Y1); + make_blank($Y2); + make_blank($Y3); +} + +i_make_fun(FunP, NumFree) { + HEAVY_SWAPOUT; + x(0) = new_fun(c_p, reg, (ErlFunEntry *) $FunP, $NumFree); + HEAVY_SWAPIN; +} + +i_trim(Words) { + Uint cp = E[0]; + E += $Words; + E[0] = cp; +} + +move(Src, Dst) { + $Dst = $Src; +} + +move3(S1, D1, S2, D2, S3, D3) { + $D1 = $S1; + $D2 = $S2; + $D3 = $S3; +} + +move_dup(Src, D1, D2) { + $D1 = $D2 = $Src; +} + +move2_par(S1, D1, S2, D2) { + Eterm V1, V2; + V1 = $S1; + V2 = $S2; + $D1 = V1; + $D2 = V2; +} + +move_shift(Src, SD, D) { + Eterm V; + V = $Src; + $D = $SD; + $SD = V; +} + +move_window3(S1, S2, S3, D) { + Eterm xt0, xt1, xt2; + Eterm* y = &$D; + xt0 = $S1; + xt1 = $S2; + xt2 = $S3; + y[0] = xt0; + y[1] = xt1; + y[2] = xt2; +} + +move_window4(S1, S2, S3, S4, D) { + Eterm xt0, xt1, xt2, xt3; + Eterm* y = &$D; + xt0 = $S1; + xt1 = $S2; + xt2 = $S3; + xt3 = $S4; + y[0] = xt0; + y[1] = xt1; + y[2] = xt2; + y[3] = xt3; +} + +move_window5(S1, S2, S3, S4, S5, D) { + Eterm xt0, xt1, xt2, xt3, xt4; + Eterm *y = &$D; + xt0 = $S1; + xt1 = $S2; + xt2 = $S3; + xt3 = $S4; + xt4 = $S5; + y[0] = xt0; + y[1] = xt1; + y[2] = xt2; + y[3] = xt3; + y[4] = xt4; +} + +move_return(Src) { + //| -no_next + x(0) = $Src; + SET_I(c_p->cp); + c_p->cp = 0; + DispatchReturn; +} + +move_x1(Src) { + x(1) = $Src; +} + +move_x2(Src) { + x(2) = $Src; +} + +node(Dst) { + $Dst = erts_this_node->sysname; +} + +put_list(Hd, Tl, Dst) { + HTOP[0] = $Hd; + HTOP[1] = $Tl; + $Dst = make_list(HTOP); + HTOP += 2; +} + +i_put_tuple := i_put_tuple.make.fill; + +i_put_tuple.make(Dst) { + $Dst = make_tuple(HTOP); +} + +i_put_tuple.fill(Arity) { + Eterm* hp = HTOP; + Eterm arity = $Arity; + + //| -no_next + *hp++ = make_arityval(arity); + I = $NEXT_INSTRUCTION; + do { + Eterm term = *I++; + switch (loader_tag(term)) { + case LOADER_X_REG: + *hp++ = x(loader_x_reg_index(term)); + break; + case LOADER_Y_REG: + *hp++ = y(loader_y_reg_index(term)); + break; + default: + *hp++ = term; + break; + } + } while (--arity != 0); + HTOP = hp; + ASSERT(VALID_INSTR(* (Eterm *)I)); + Goto(*I); +} + +self(Dst) { + $Dst = c_p->common.id; +} + +set_tuple_element(Element, Tuple, Offset) { + Eterm* p; + + ASSERT(is_tuple($Tuple)); + p = (Eterm *) ((unsigned char *) tuple_val($Tuple) + $Offset); + *p = $Element; +} + +swap(R1, R2) { + Eterm V = $R1; + $R1 = $R2; + $R2 = V; +} + +swap_temp(R1, R2, Tmp) { + Eterm V = $R1; + $R1 = $R2; + $R2 = $Tmp = V; +} + +test_heap(Nh, Live) { + $GC_TEST(0, $Nh, $Live); +} + +test_heap_1_put_list(Nh, Reg) { + $test_heap($Nh, 1); + $put_list($Reg, x(0), x(0)); +} + +is_integer_allocate(Fail, Src, NeedStack, Live) { + //| -no_prefetch + $is_integer($Fail, $Src); + $AH($NeedStack, 0, $Live); +} + +is_nonempty_list(Fail, Src) { + //| -no_prefetch + if (is_not_list($Src)) { + $FAIL($Fail); + } +} + +is_nonempty_list_test_heap(Fail, Need, Live) { + //| -no_prefetch + $is_nonempty_list($Fail, x(0)); + $test_heap($Need, $Live); +} + +is_nonempty_list_allocate(Fail, Src, Need, Live) { + //| -no_prefetch + $is_nonempty_list($Fail, $Src); + $AH($Need, 0, $Live); +} + +is_nonempty_list_get_list(Fail, Src, Hd, Tl) { + //| -no_prefetch + $is_nonempty_list($Fail, $Src); + $get_list($Src, $Hd, $Tl); +} + +jump(Fail) { + $JUMP($Fail); +} + +move_jump(Fail, Src) { + x(0) = $Src; + $jump($Fail); +} + +// +// Test instructions. +// + +is_atom(Fail, Src) { + if (is_not_atom($Src)) { + $FAIL($Fail); + } +} + +is_boolean(Fail, Src) { + if (($Src) != am_true && ($Src) != am_false) { + $FAIL($Fail); + } +} + +is_binary(Fail, Src) { + if (is_not_binary($Src) || binary_bitsize($Src) != 0) { + $FAIL($Fail); + } +} + +is_bitstring(Fail, Src) { + if (is_not_binary($Src)) { + $FAIL($Fail); + } +} + +is_float(Fail, Src) { + if (is_not_float($Src)) { + $FAIL($Fail); + } +} + +is_function(Fail, Src) { + if ( !(is_any_fun($Src)) ) { + $FAIL($Fail); + } +} + +is_function2(Fail, Fun, Arity) { + if (erl_is_function(c_p, $Fun, $Arity) != am_true ) { + $FAIL($Fail); + } +} + +is_integer(Fail, Src) { + if (is_not_integer($Src)) { + $FAIL($Fail); + } +} + +is_list(Fail, Src) { + if (is_not_list($Src) && is_not_nil($Src)) { + $FAIL($Fail); + } +} + +is_map(Fail, Src) { + if (is_not_map($Src)) { + $FAIL($Fail); + } +} + +is_nil(Fail, Src) { + if (is_not_nil($Src)) { + $FAIL($Fail); + } +} + +is_number(Fail, Src) { + if (is_not_integer($Src) && is_not_float($Src)) { + $FAIL($Fail); + } +} + +is_pid(Fail, Src) { + if (is_not_pid($Src)) { + $FAIL($Fail); + } +} + +is_port(Fail, Src) { + if (is_not_port($Src)) { + $FAIL($Fail); + } +} + +is_reference(Fail, Src) { + if (is_not_ref($Src)) { + $FAIL($Fail); + } +} + +is_tagged_tuple(Fail, Src, Arityval, Tag) { + Eterm term = $Src; + if (!(BEAM_IS_TUPLE(term) && + (tuple_val(term))[0] == $Arityval && + (tuple_val(term))[1] == $Tag)) { + $FAIL($Fail); + } +} + +is_tuple(Fail, Src) { + if (is_not_tuple($Src)) { + $FAIL($Fail); + } +} + +is_tuple_of_arity(Fail, Src, Arityval) { + Eterm term = $Src; + if (!(BEAM_IS_TUPLE(term) && *tuple_val(term) == $Arityval)) { + $FAIL($Fail); + } +} + +test_arity(Fail, Pointer, Arity) { + if (*tuple_val($Pointer) != $Arity) { + $FAIL($Fail); + } +} + +i_is_eq_exact_immed(Fail, X, Y) { + if ($X != $Y) { + $FAIL($Fail); + } +} + +i_is_ne_exact_immed(Fail, X, Y) { + if ($X == $Y) { + $FAIL($Fail); + } +} + +is_eq_exact(Fail, X, Y) { + if (!EQ($X, $Y)) { + $FAIL($Fail); + } +} + +i_is_eq_exact_literal(Fail, Src, Literal) { + if (!eq($Src, $Literal)) { + $FAIL($Fail); + } +} + +is_ne_exact(Fail, X, Y) { + if (EQ($X, $Y)) { + $FAIL($Fail); + } +} + +i_is_ne_exact_literal(Fail, Src, Literal) { + if (eq($Src, $Literal)) { + $FAIL($Fail); + } +} + +is_eq(Fail, X, Y) { + CMP_EQ_ACTION($X, $Y, $FAIL($Fail)); +} + +is_ne(Fail, X, Y) { + CMP_NE_ACTION($X, $Y, $FAIL($Fail)); +} + +is_lt(Fail, X, Y) { + CMP_LT_ACTION($X, $Y, $FAIL($Fail)); +} + +is_ge(Fail, X, Y) { + CMP_GE_ACTION($X, $Y, $FAIL($Fail)); +} + +badarg(Fail) { + $BADARG($Fail); + //| -no_next; +} + +badmatch(Src) { + c_p->fvalue = $Src; + c_p->freason = BADMATCH; + goto find_func_info; + //| -no_next; +} + +case_end(Src) { + c_p->fvalue = $Src; + c_p->freason = EXC_CASE_CLAUSE; + goto find_func_info; + //| -no_next; +} + +if_end() { + c_p->freason = EXC_IF_CLAUSE; + goto find_func_info; + //| -no_next; +} + +system_limit(Fail) { + $SYSTEM_LIMIT($Fail); + //| -no_next; +} + +catch(Y, Fail) { + c_p->catches++; + $Y = $Fail; +} + +catch_end(Y) { + c_p->catches--; + make_blank($Y); + if (is_non_value(r(0))) { + c_p->fvalue = NIL; + if (x(1) == am_throw) { + r(0) = x(2); + } else { + if (x(1) == am_error) { + SWAPOUT; + x(2) = add_stacktrace(c_p, x(2), x(3)); + SWAPIN; + } + /* only x(2) is included in the rootset here */ + if (E - HTOP < 3) { + SWAPOUT; + PROCESS_MAIN_CHK_LOCKS(c_p); + FCALLS -= erts_garbage_collect_nobump(c_p, 3, reg+2, 1, FCALLS); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + PROCESS_MAIN_CHK_LOCKS(c_p); + SWAPIN; + } + r(0) = TUPLE2(HTOP, am_EXIT, x(2)); + HTOP += 3; + } + } + CHECK_TERM(r(0)); +} + +try_end(Y) { + c_p->catches--; + make_blank($Y); + if (is_non_value(r(0))) { + c_p->fvalue = NIL; + r(0) = x(1); + x(1) = x(2); + x(2) = x(3); + } +} + +try_case_end(Src) { + c_p->fvalue = $Src; + c_p->freason = EXC_TRY_CLAUSE; + goto find_func_info; + //| -no_next; +} + +i_raise() { + Eterm raise_trace = x(2); + Eterm raise_value = x(1); + struct StackTrace *s; + + c_p->fvalue = raise_value; + c_p->ftrace = raise_trace; + s = get_trace_from_exc(raise_trace); + if (s == NULL) { + c_p->freason = EXC_ERROR; + } else { + c_p->freason = PRIMARY_EXCEPTION(s->freason); + } + goto find_func_info; + //| -no_next +} + diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c index 3a5ddde5f4..85013af3ad 100644 --- a/erts/emulator/beam/io.c +++ b/erts/emulator/beam/io.c @@ -63,10 +63,10 @@ extern ErlDrvEntry forker_driver_entry; extern ErlDrvEntry *driver_tab[]; /* table of static drivers, only used during initialization */ erts_driver_t *driver_list; /* List of all drivers, static and dynamic. */ -erts_smp_rwmtx_t erts_driver_list_lock; /* Mutex for driver list */ -static erts_smp_tsd_key_t driver_list_lock_status_key; /*stop recursive locks when calling +erts_rwmtx_t erts_driver_list_lock; /* Mutex for driver list */ +static erts_tsd_key_t driver_list_lock_status_key; /*stop recursive locks when calling driver init */ -static erts_smp_tsd_key_t driver_list_last_error_key; /* Save last DDLL error on a +static erts_tsd_key_t driver_list_last_error_key; /* Save last DDLL error on a per thread basis (for BC interfaces) */ ErtsPTab erts_port erts_align_attribute(ERTS_CACHE_LINE_SIZE); /* The port table */ @@ -94,17 +94,11 @@ static int init_driver(erts_driver_t *, ErlDrvEntry *, DE_Handle *); static void terminate_port(Port *p); static void pdl_init(void); static int driver_failure_term(ErlDrvPort ix, Eterm term, int eof); -#ifdef ERTS_SMP static void driver_monitor_lock_pdl(Port *p); static void driver_monitor_unlock_pdl(Port *p); #define DRV_MONITOR_LOOKUP_PORT_LOCK_PDL(Port) erts_thr_drvport2port((Port), 1) #define DRV_MONITOR_LOCK_PDL(Port) driver_monitor_lock_pdl(Port) #define DRV_MONITOR_UNLOCK_PDL(Port) driver_monitor_unlock_pdl(Port) -#else -#define DRV_MONITOR_LOOKUP_PORT_LOCK_PDL(Port) erts_thr_drvport2port((Port), 0) -#define DRV_MONITOR_LOCK_PDL(Port) /* nothing */ -#define DRV_MONITOR_UNLOCK_PDL(Port) /* nothing */ -#endif #define ERL_SMALL_IO_BIN_LIMIT (4*ERL_ONHEAP_BIN_LIMIT) #define SMALL_WRITE_VEC 16 @@ -122,7 +116,7 @@ static ERTS_INLINE int is_port_ioq_empty(Port *pp) { int res; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(pp)); if (!pp->port_data_lock) res = (erts_ioq_size(&pp->ioq) == 0); else { @@ -144,7 +138,7 @@ Uint erts_port_ioq_size(Port *pp) { ErlDrvSizeT res; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(pp)); if (!pp->port_data_lock) res = erts_ioq_size(&pp->ioq); else { @@ -212,14 +206,13 @@ dtrace_drvport_str(ErlDrvPort drvport, char *port_buf) static ERTS_INLINE void kill_port(Port *pp) { - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(pp)); ERTS_TRACER_CLEAR(&ERTS_TRACER(pp)); erts_ptab_delete_element(&erts_port, &pp->common); /* Time of death */ erts_port_task_free_port(pp); /* In non-smp case the port structure may have been deallocated now */ } -#ifdef ERTS_SMP #ifdef ERTS_ENABLE_LOCK_CHECK int @@ -227,12 +220,11 @@ erts_lc_is_port_locked(Port *prt) { if (!prt) return 0; - ERTS_SMP_LC_ASSERT(prt->lock); - return erts_smp_lc_mtx_is_locked(prt->lock); + ERTS_LC_ASSERT(prt->lock); + return erts_lc_mtx_is_locked(prt->lock); } #endif -#endif /* #ifdef ERTS_SMP */ static void initq(Port* prt); @@ -256,25 +248,21 @@ static ERTS_INLINE void port_init_instr(Port *prt * Stuff that need to be initialized with the port id * in the instrumented case, but not in the normal case. */ -#ifdef ERTS_SMP ASSERT(prt->drv_ptr && prt->lock); if (!prt->drv_ptr->lock) { erts_mtx_init_locked(prt->lock, "port_lock", id, ERTS_LOCK_FLAGS_CATEGORY_IO); } -#endif erts_port_task_init_sched(&prt->sched, id); } #if !ERTS_PORT_INIT_INSTR_NEED_ID static ERTS_INLINE void port_init_instr_abort(Port *prt) { -#ifdef ERTS_SMP ASSERT(prt->drv_ptr && prt->lock); if (!prt->drv_ptr->lock) { erts_mtx_unlock(prt->lock); erts_mtx_destroy(prt->lock); } -#endif erts_port_task_fini_sched(&prt->sched); } #endif @@ -310,7 +298,6 @@ static Port *create_port(char *name, erts_aint32_t state = ERTS_PORT_SFLG_CONNECTED; erts_aint32_t x_pts_flgs = 0; -#ifdef ERTS_SMP ErtsRunQueue *runq; if (!driver_lock) { /* Align size for mutex following port struct */ @@ -318,7 +305,6 @@ static Port *create_port(char *name, size += sizeof(erts_mtx_t); } else -#endif port_size = size = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(Port)); #ifdef DEBUG @@ -352,7 +338,6 @@ static Port *create_port(char *name, p += busy_port_queue_size; } -#ifdef ERTS_SMP if (driver_lock) { prt->lock = driver_lock; erts_mtx_lock(driver_lock); @@ -366,13 +351,9 @@ static Port *create_port(char *name, runq = erts_get_runq_current(NULL); else runq = ERTS_RUNQ_IX(0); - erts_smp_atomic_set_nob(&prt->run_queue, (erts_aint_t) runq); + erts_atomic_set_nob(&prt->run_queue, (erts_aint_t) runq); prt->xports = NULL; -#else - erts_atomic32_init_nob(&prt->refc, 1); - prt->cleanup = 0; -#endif erts_port_task_pre_init_sched(&prt->sched, busy_port_queue); @@ -393,7 +374,7 @@ static Port *create_port(char *name, prt->common.u.alive.reg = NULL; ERTS_PTMR_INIT(prt); erts_port_task_handle_init(&prt->timeout_task); - erts_smp_atomic_init_nob(&prt->psd, (erts_aint_t) NULL); + erts_atomic_init_nob(&prt->psd, (erts_aint_t) NULL); prt->async_open_port = NULL; prt->drv_data = (SWord) 0; prt->os_pid = -1; @@ -420,10 +401,8 @@ static Port *create_port(char *name, #if !ERTS_PORT_INIT_INSTR_NEED_ID port_init_instr_abort(prt); #endif -#ifdef ERTS_SMP if (driver_lock) erts_mtx_unlock(driver_lock); -#endif if (enop) *enop = 0; erts_free(ERTS_ALC_T_PORT, prt); @@ -436,7 +415,7 @@ static Port *create_port(char *name, initq(prt); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (erts_port_schedule_all_ops) x_pts_flgs |= ERTS_PTS_FLG_FORCE_SCHED; @@ -445,29 +424,17 @@ static Port *create_port(char *name, x_pts_flgs |= ERTS_PTS_FLG_PARALLELISM; if (x_pts_flgs) - erts_smp_atomic32_read_bor_nob(&prt->sched.flags, x_pts_flgs); + erts_atomic32_read_bor_nob(&prt->sched.flags, x_pts_flgs); erts_atomic32_set_relb(&prt->state, state); return prt; } -#ifndef ERTS_SMP -void -erts_port_cleanup(Port *prt) -{ - if (prt->drv_ptr && prt->drv_ptr->handle) - erts_ddll_dereference_driver(prt->drv_ptr->handle); - prt->drv_ptr = NULL; - erts_port_dec_refc(prt); -} -#endif void erts_port_free(Port *prt) { -#if defined(ERTS_SMP) || defined(DEBUG) || defined(ERTS_ENABLE_LOCK_CHECK) erts_aint32_t state = erts_atomic32_read_nob(&prt->state); -#endif ERTS_LC_ASSERT(state & (ERTS_PORT_SFLG_INITIALIZING | ERTS_PORT_SFLG_FREE)); ASSERT(state & ERTS_PORT_SFLG_PORT_DEBUG); @@ -481,7 +448,6 @@ erts_port_free(Port *prt) prt->async_open_port = NULL; } -#ifdef ERTS_SMP ASSERT(prt->lock); if (state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) erts_mtx_destroy(prt->lock); @@ -498,7 +464,6 @@ erts_port_free(Port *prt) */ if (prt->drv_ptr->handle) erts_ddll_dereference_driver(prt->drv_ptr->handle); -#endif erts_free(ERTS_ALC_T_PORT, prt); } @@ -533,7 +498,7 @@ erts_save_suspend_process_on_port(Port *prt, Process *process) int saved; erts_aint32_t flags; erts_port_task_sched_lock(&prt->sched); - flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + flags = erts_atomic32_read_nob(&prt->sched.flags); saved = (flags & ERTS_PTS_FLGS_BUSY) && !(flags & ERTS_PTS_FLG_EXIT); if (saved) erts_proclist_store_last(&prt->suspended, erts_proclist_create(process)); @@ -577,16 +542,16 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */ erts_mtx_t *driver_lock = NULL; int cprt_flgs = 0; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; - erts_smp_rwmtx_rlock(&erts_driver_list_lock); + erts_rwmtx_rlock(&erts_driver_list_lock); if (!driver) { for (driver = driver_list; driver; driver = driver->next) { if (sys_strcmp(driver->name, name) == 0) break; } if (!driver) { - erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_rwmtx_runlock(&erts_driver_list_lock); ERTS_OPEN_DRIVER_RET(NULL, -3, BADARG); } } @@ -631,19 +596,17 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */ } if (driver == NULL || (driver != &spawn_driver && opts->exit_status)) { - erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_rwmtx_runlock(&erts_driver_list_lock); ERTS_OPEN_DRIVER_RET(NULL, -3, BADARG); } -#ifdef ERTS_SMP driver_lock = driver->lock; -#endif if (driver->handle != NULL) { erts_ddll_increment_port_count(driver->handle); erts_ddll_reference_driver(driver->handle); } - erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_rwmtx_runlock(&erts_driver_list_lock); /* * We'll set up the port before calling the start function, @@ -656,9 +619,9 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */ port = create_port(name, driver, driver_lock, cprt_flgs, pid, &port_errno); if (!port) { if (driver->handle) { - erts_smp_rwmtx_rlock(&erts_driver_list_lock); + erts_rwmtx_rlock(&erts_driver_list_lock); erts_ddll_decrement_port_count(driver->handle); - erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_rwmtx_runlock(&erts_driver_list_lock); erts_ddll_dereference_driver(driver->handle); } if (port_errno) @@ -726,11 +689,9 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */ if (IS_TRACED_FL(port, F_TRACE_SCHED_PORTS)) { trace_sched_ports_where(port, am_out, am_open); } -#ifdef ERTS_SMP if (port->xports) erts_port_handle_xports(port); ASSERT(!port->xports); -#endif } if (error_type) { @@ -745,9 +706,9 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */ port->linebuf = NULL; } if (driver->handle != NULL) { - erts_smp_rwmtx_rlock(&erts_driver_list_lock); + erts_rwmtx_rlock(&erts_driver_list_lock); erts_ddll_decrement_port_count(driver->handle); - erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_rwmtx_runlock(&erts_driver_list_lock); } kill_port(port); erts_port_release(port); @@ -759,7 +720,6 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */ #undef ERTS_OPEN_DRIVER_RET } -#ifdef ERTS_SMP struct ErtsXPortsList_ { ErtsXPortsList *next; @@ -768,7 +728,6 @@ struct ErtsXPortsList_ { ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(xports_list, ErtsXPortsList, 50, ERTS_ALC_T_XPORTS_LIST) -#endif /* * Driver function to create new instances of a driver @@ -788,7 +747,7 @@ driver_create_port(ErlDrvPort creator_port_ix, /* Creating port */ Process *rp; erts_mtx_t *driver_lock = NULL; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; /* Need to be called from a scheduler thread */ if (!erts_get_scheduler_id()) @@ -802,12 +761,12 @@ driver_create_port(ErlDrvPort creator_port_ix, /* Creating port */ if (!rp) return ERTS_INVALID_ERL_DRV_PORT; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(creator_port)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(creator_port)); driver = creator_port->drv_ptr; - erts_smp_rwmtx_rlock(&erts_driver_list_lock); + erts_rwmtx_rlock(&erts_driver_list_lock); if (!erts_ddll_driver_ok(driver->handle)) { - erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_rwmtx_runlock(&erts_driver_list_lock); return ERTS_INVALID_ERL_DRV_PORT; } @@ -816,35 +775,33 @@ driver_create_port(ErlDrvPort creator_port_ix, /* Creating port */ erts_ddll_reference_referenced_driver(driver->handle); } -#ifdef ERTS_SMP driver_lock = driver->lock; -#endif - erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_rwmtx_runlock(&erts_driver_list_lock); /* Inherit parallelism flag from parent */ if (ERTS_PTS_FLG_PARALLELISM & - erts_smp_atomic32_read_nob(&creator_port->sched.flags)) + erts_atomic32_read_nob(&creator_port->sched.flags)) cprt_flgs |= ERTS_CREATE_PORT_FLAG_PARALLELISM; port = create_port(name, driver, driver_lock, cprt_flgs, pid, NULL); if (!port) { if (driver->handle) { - erts_smp_rwmtx_rlock(&erts_driver_list_lock); + erts_rwmtx_rlock(&erts_driver_list_lock); erts_ddll_decrement_port_count(driver->handle); - erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_rwmtx_runlock(&erts_driver_list_lock); erts_ddll_dereference_driver(driver->handle); } return ERTS_INVALID_ERL_DRV_PORT; } - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(port)); - erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_lock(rp, ERTS_PROC_LOCK_LINK); if (ERTS_PROC_IS_EXITING(rp)) { - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); if (driver->handle) { - erts_smp_rwmtx_rlock(&erts_driver_list_lock); + erts_rwmtx_rlock(&erts_driver_list_lock); erts_ddll_decrement_port_count(driver->handle); - erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_rwmtx_runlock(&erts_driver_list_lock); } kill_port(port); erts_port_release(port); @@ -853,23 +810,20 @@ driver_create_port(ErlDrvPort creator_port_ix, /* Creating port */ erts_add_link(&ERTS_P_LINKS(port), LINK_PID, pid); erts_add_link(&ERTS_P_LINKS(rp), LINK_PID, port->common.id); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); -#ifdef ERTS_SMP if (!driver_lock) { ErtsXPortsList *xplp = xports_list_alloc(); xplp->port = port; xplp->next = creator_port->xports; creator_port->xports = xplp; } -#endif port->drv_data = (UWord) drv_data; return ERTS_Port2ErlDrvPort(port); } -#ifdef ERTS_SMP int erts_port_handle_xports(Port *prt) { int reds = 0; @@ -898,7 +852,6 @@ int erts_port_handle_xports(Port *prt) prt->xports = NULL; return reds; } -#endif typedef enum { ERTS_TRY_IMM_DRV_CALL_OK, @@ -945,12 +898,12 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp) invalid_sched_flags |= ERTS_PTS_FLG_PARALLELISM; if (sp->pre_chk_sched_flags) { - sp->sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + sp->sched_flags = erts_atomic32_read_nob(&prt->sched.flags); if (sp->sched_flags & invalid_sched_flags) return ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS; } - if (erts_smp_port_trylock(prt) == EBUSY) + if (erts_port_trylock(prt) == EBUSY) return ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK; invalid_state = sp->state; @@ -964,7 +917,7 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp) if (prof_runnable_ports) erts_port_task_sched_lock(&prt->sched); - act = erts_smp_atomic32_read_nob(&prt->sched.flags); + act = erts_atomic32_read_nob(&prt->sched.flags); do { erts_aint32_t new; @@ -976,7 +929,7 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp) } exp = act; new = act | ERTS_PTS_FLG_EXEC_IMM; - act = erts_smp_atomic32_cmpxchg_mb(&prt->sched.flags, new, exp); + act = erts_atomic32_cmpxchg_mb(&prt->sched.flags, new, exp); } while (act != exp); sp->sched_flags = act; @@ -998,14 +951,14 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp) profile_runnable_proc(c_p, am_inactive); reds_left_in = ERTS_BIF_REDS_LEFT(c_p); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); } ASSERT(0 <= reds_left_in && reds_left_in <= CONTEXT_REDS); sp->reds_left_in = reds_left_in; prt->reds = CONTEXT_REDS - reds_left_in; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (prof_runnable_ports | IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) { if (prof_runnable_ports && !(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC))) @@ -1043,9 +996,9 @@ finalize_imm_drv_call(ErtsTryImmDrvCallState *sp) if (prof_runnable_ports) erts_port_task_sched_lock(&prt->sched); - act = erts_smp_atomic32_read_band_mb(&prt->sched.flags, + act = erts_atomic32_read_band_mb(&prt->sched.flags, ~ERTS_PTS_FLG_EXEC_IMM); - ERTS_SMP_LC_ASSERT(act & ERTS_PTS_FLG_EXEC_IMM); + ERTS_LC_ASSERT(act & ERTS_PTS_FLG_EXEC_IMM); if (prof_runnable_ports | IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) { if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) @@ -1060,7 +1013,7 @@ finalize_imm_drv_call(ErtsTryImmDrvCallState *sp) erts_port_release(prt); if (c_p) { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); if (reds != (CONTEXT_REDS - sp->reds_left_in)) { int bump_reds = reds - (CONTEXT_REDS - sp->reds_left_in); @@ -1171,7 +1124,7 @@ port_sched_op_reply(Eterm to, Uint32 *ref_num, Eterm msg, Port* prt) prt); if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } } @@ -1189,7 +1142,7 @@ erts_schedule_proc2port_signal(Process *c_p, int sched_res; if (!refp) { if (c_p) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); } else { ASSERT(c_p); @@ -1210,20 +1163,20 @@ erts_schedule_proc2port_signal(Process *c_p, * otherwise, next receive will *not* work * as expected! */ - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); if (ERTS_PROC_PENDING_EXIT(c_p)) { /* need to exit caller instead */ - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); KILL_CATCHES(c_p); c_p->freason = EXC_EXIT; return ERTS_PORT_OP_CALLER_EXIT; } - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); + ERTS_MSGQ_MV_INQ2PRIVQ(c_p); c_p->msg.save = c_p->msg.last; - erts_smp_proc_unlock(c_p, (ERTS_PROC_LOCKS_MSG_RECEIVE + erts_proc_unlock(c_p, (ERTS_PROC_LOCKS_MSG_RECEIVE | ERTS_PROC_LOCK_MAIN)); } @@ -1239,7 +1192,7 @@ erts_schedule_proc2port_signal(Process *c_p, task_flags); if (c_p) - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); if (sched_res != 0) { if (refp) { @@ -1250,9 +1203,9 @@ erts_schedule_proc2port_signal(Process *c_p, * containing the reference created above... */ ASSERT(c_p); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); JOIN_MESSAGE(c_p); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); *refp = NIL; } return ERTS_PORT_OP_DROPPED; @@ -1285,14 +1238,14 @@ send_badsig(Port *prt) { ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND; Process* rp; Eterm connected = ERTS_PORT_GET_CONNECTED(prt); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; ERTS_LC_ASSERT(erts_get_scheduler_id()); ASSERT(is_internal_pid(connected)); rp = erts_proc_lookup_raw(connected); if (rp) { - erts_smp_proc_lock(rp, rp_locks); + erts_proc_lock(rp, rp_locks); if (!ERTS_PROC_IS_EXITING(rp)) (void) erts_send_exit_signal(NULL, prt->common.id, @@ -1303,7 +1256,7 @@ send_badsig(Port *prt) { NULL, 0); if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } /* exit sent */ } /* send_badsig */ @@ -1426,7 +1379,7 @@ call_driver_outputv(int bang_op, ErlDrvSizeT size = evp->size; ERTS_MSACC_PUSH_AND_SET_STATE_M(ERTS_MSACC_STATE_PORT); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt) + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt) || ERTS_IS_CRASH_DUMPING); @@ -1483,7 +1436,7 @@ port_sig_outputv(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *s case ERTS_PROC2PORT_SIG_EXEC: /* Execution of a scheduled outputv() call */ - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP) reply = am_badarg; @@ -1539,7 +1492,7 @@ call_driver_output(int bang_op, else { ErtsSchedulerData *esdp = erts_get_scheduler_data(); ERTS_MSACC_PUSH_AND_SET_STATE_M(ERTS_MSACC_STATE_PORT); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt) + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt) || ERTS_IS_CRASH_DUMPING); #ifdef USE_VM_PROBES @@ -1590,7 +1543,7 @@ port_sig_output(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *si case ERTS_PROC2PORT_SIG_EXEC: /* Execution of a scheduled output() call */ - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP) reply = am_badarg; @@ -1805,7 +1758,7 @@ erts_port_output(Process *c_p, * Assumes caller have checked that port is valid... */ - sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + sched_flags = erts_atomic32_read_nob(&prt->sched.flags); if (sched_flags & (busy_flgs|ERTS_PTS_FLG_EXIT)) return ((sched_flags & ERTS_PTS_FLG_EXIT) ? ERTS_PORT_OP_DROPPED @@ -2188,7 +2141,7 @@ erts_port_output(Process *c_p, } if (!(flags & ERTS_PORT_SIG_FLG_FORCE)) { - sched_flags = erts_smp_atomic32_read_acqb(&prt->sched.flags); + sched_flags = erts_atomic32_read_acqb(&prt->sched.flags); if (!(sched_flags & ERTS_PTS_FLG_BUSY_PORT)) { if (async_nosuspend) erts_port_task_tmp_handle_detach(ns_pthp); @@ -2413,9 +2366,9 @@ set_port_connected(int bang_op, Process *rp = erts_proc_lookup_raw(connect); if (!rp) return ERTS_PORT_OP_DROPPED; - erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_lock(rp, ERTS_PROC_LOCK_LINK); if (ERTS_PROC_IS_EXITING(rp)) { - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); return ERTS_PORT_OP_DROPPED; } @@ -2427,7 +2380,7 @@ set_port_connected(int bang_op, ERTS_PORT_SET_CONNECTED(prt, connect); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); if (IS_TRACED_FL(prt, F_TRACE_PORTS)) trace_port(prt, am_getting_linked, connect); @@ -2620,7 +2573,7 @@ port_link_failure(Eterm port_id, Eterm linker) trace_proc(NULL, 0, rp, am_getting_unlinked, port_id); } if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } } } @@ -2706,7 +2659,7 @@ port_monitor_failure(Eterm port_id, Eterm origin, Eterm ref_DOWN) * caller has never seen it yet. */ erts_queue_monitor_message(origin_p, &p_locks, ref_DOWN, am_port, port_id, am_noproc); - erts_smp_proc_unlock(origin_p, p_locks); + erts_proc_unlock(origin_p, p_locks); } /* Origin wants to monitor port Prt. State contains possible error, which has @@ -2734,7 +2687,7 @@ port_monitor(Port *prt, erts_aint32_t state, Eterm origin, erts_add_monitor(&ERTS_P_MONITORS(prt), MON_TARGET, ref, origin, name_or_nil); - erts_smp_proc_unlock(origin_p, p_locks); + erts_proc_unlock(origin_p, p_locks); } else { failure: port_monitor_failure(prt->common.id, origin, ref); @@ -2825,7 +2778,7 @@ port_demonitor_failure(Eterm port_id, Eterm origin, Eterm ref) erts_destroy_monitor(mon1); } - erts_smp_proc_unlock(origin_p, rp_locks); + erts_proc_unlock(origin_p, rp_locks); } /* Origin wants to demonitor port Prt. State contains possible error, which has @@ -2855,7 +2808,7 @@ port_demonitor(Port *port, erts_aint32_t state, Eterm origin, Eterm ref) } } if (origin_p) { /* when origin is dying, it won't be found */ - erts_smp_proc_unlock(origin_p, p_locks); + erts_proc_unlock(origin_p, p_locks); } } else { port_demonitor_failure(port->common.id, origin, ref); @@ -2942,10 +2895,10 @@ init_ack_send_reply(Port *port, Eterm resp) if (!is_internal_port(resp)) { Process *rp = erts_proc_lookup_raw(port->async_open_port->to); - erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_lock(rp, ERTS_PROC_LOCK_LINK); erts_remove_link(&ERTS_P_LINKS(port), port->async_open_port->to); erts_remove_link(&ERTS_P_LINKS(rp), port->common.id); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); } port_sched_op_reply(port->async_open_port->to, port->async_open_port->ref, @@ -3009,9 +2962,9 @@ void erts_init_io(int port_tab_size, { ErlDrvEntry** dp; UWord common_element_size; - erts_smp_rwmtx_opt_t drv_list_rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; - drv_list_rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; - drv_list_rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED; + erts_rwmtx_opt_t drv_list_rwmtx_opts = ERTS_RWMTX_OPT_DEFAULT_INITER; + drv_list_rwmtx_opts.type = ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; + drv_list_rwmtx_opts.lived = ERTS_RWMTX_LONG_LIVED; erts_atomic64_init_nob(&bytes_in, 0); erts_atomic64_init_nob(&bytes_out, 0); @@ -3019,11 +2972,9 @@ void erts_init_io(int port_tab_size, common_element_size = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(Port)); common_element_size += ERTS_ALC_DATA_ALIGN_SIZE(sizeof(ErtsPortTaskBusyPortQ)); common_element_size += 10; /* name */ -#ifdef ERTS_SMP common_element_size += sizeof(erts_mtx_t); init_xports_list_alloc(); -#endif pdl_init(); @@ -3038,12 +2989,12 @@ void erts_init_io(int port_tab_size, else if (port_tab_size < ERTS_MIN_PORTS) port_tab_size = ERTS_MIN_PORTS; - erts_smp_rwmtx_init_opt(&erts_driver_list_lock, &drv_list_rwmtx_opts, "driver_list", NIL, + erts_rwmtx_init_opt(&erts_driver_list_lock, &drv_list_rwmtx_opts, "driver_list", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO); driver_list = NULL; - erts_smp_tsd_key_create(&driver_list_lock_status_key, + erts_tsd_key_create(&driver_list_lock_status_key, "erts_driver_list_lock_status_key"); - erts_smp_tsd_key_create(&driver_list_last_error_key, + erts_tsd_key_create(&driver_list_last_error_key, "erts_driver_list_last_error_key"); erts_ptab_init_table(&erts_port, @@ -3058,8 +3009,8 @@ void erts_init_io(int port_tab_size, sys_init_io(); - erts_smp_tsd_set(driver_list_lock_status_key, (void *) 1); - erts_smp_rwmtx_rwlock(&erts_driver_list_lock); + erts_tsd_set(driver_list_lock_status_key, (void *) 1); + erts_rwmtx_rwlock(&erts_driver_list_lock); init_driver(&fd_driver, &fd_driver_entry, NULL); init_driver(&vanilla_driver, &vanilla_driver_entry, NULL); @@ -3071,11 +3022,11 @@ void erts_init_io(int port_tab_size, for (dp = driver_tab; *dp != NULL; dp++) erts_add_driver_entry(*dp, NULL, 1); - erts_smp_tsd_set(driver_list_lock_status_key, NULL); - erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); + erts_tsd_set(driver_list_lock_status_key, NULL); + erts_rwmtx_rwunlock(&erts_driver_list_lock); } -#if defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP) +#if defined(ERTS_ENABLE_LOCK_COUNT) static void lcnt_enable_driver_lock_count(erts_driver_t *dp, int enable) { if (dp->lock) { @@ -3164,7 +3115,8 @@ void erts_lcnt_update_port_locks(int enable) { } } -#endif /* defined(ERTS_ENABLE_LOCK_COUNT) && defined(ERTS_SMP) */ +#endif /* defined(ERTS_ENABLE_LOCK_COUNT) */ + /* * Buffering of data when using line oriented I/O on ports */ @@ -3343,12 +3295,10 @@ deliver_result(Port *prt, Eterm sender, Eterm pid, Eterm res) ErtsProcLocks rp_locks = 0; int scheduler = erts_get_scheduler_id() != 0; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; ASSERT(!prt || prt->common.id == sender); -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) - ASSERT(!prt || erts_lc_is_port_locked(prt)); -#endif + ERTS_LC_ASSERT(!prt || erts_lc_is_port_locked(prt)); ASSERT(is_internal_port(sender) && is_internal_pid(pid)); @@ -3376,7 +3326,7 @@ deliver_result(Port *prt, Eterm sender, Eterm pid, Eterm res) erts_queue_message(rp, rp_locks, mp, tuple, sender); if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); if (!scheduler) erts_proc_dec_refc(rp); @@ -3407,8 +3357,8 @@ static void deliver_read_message(Port* prt, erts_aint32_t state, Eterm to, int scheduler = erts_get_scheduler_id() != 0; int trace_send = IS_TRACED_FL(prt, F_TRACE_SEND); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_CHK_NO_PROC_LOCKS; need = 3 + 3 + 2*hlen; @@ -3475,7 +3425,7 @@ static void deliver_read_message(Port* prt, erts_aint32_t state, Eterm to, ERL_MESSAGE_TOKEN(mp) = am_undefined; erts_queue_message(rp, rp_locks, mp, tuple, prt->common.id); if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); if (!scheduler) erts_proc_dec_refc(rp); } @@ -3510,7 +3460,7 @@ static void flush_linebuf_messages(Port *prt, erts_aint32_t state) LineBufContext lc; int ret; - ERTS_SMP_LC_ASSERT(!prt || erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(!prt || erts_lc_is_port_locked(prt)); if (!prt) return; @@ -3554,8 +3504,8 @@ deliver_vec_message(Port* prt, /* Port */ erts_aint32_t state; int trace_send = IS_TRACED_FL(prt, F_TRACE_SEND); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_CHK_NO_PROC_LOCKS; /* * Check arguments for validity. @@ -3646,7 +3596,7 @@ deliver_vec_message(Port* prt, /* Port */ ERL_MESSAGE_TOKEN(mp) = am_undefined; erts_queue_message(rp, rp_locks, mp, tuple, prt->common.id); - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); if (!scheduler) erts_proc_dec_refc(rp); } @@ -3681,8 +3631,8 @@ static void flush_port(Port *p) { int fpe_was_unmasked; - ERTS_SMP_CHK_NO_PROC_LOCKS; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p)); + ERTS_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(p)); if (p->drv_ptr->flush != NULL) { ERTS_MSACC_PUSH_STATE_M(); @@ -3714,11 +3664,9 @@ static void flush_port(Port *p) if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) { trace_sched_ports_where(p, am_out, am_flush); } -#ifdef ERTS_SMP if (p->xports) erts_port_handle_xports(p); ASSERT(!p->xports); -#endif } if ((erts_atomic32_read_nob(&p->state) & ERTS_PORT_SFLGS_DEAD) == 0 && is_port_ioq_empty(p)) { @@ -3736,8 +3684,8 @@ terminate_port(Port *prt) erts_aint32_t state; ErtsPrtSD *psd; - ERTS_SMP_CHK_NO_PROC_LOCKS; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); ASSERT(!ERTS_P_LINKS(prt)); ASSERT(!ERTS_P_MONITORS(prt)); @@ -3779,11 +3727,9 @@ terminate_port(Port *prt) (*drv->stop)((ErlDrvData)prt->drv_data); erts_unblock_fpe(fpe_was_unmasked); ERTS_MSACC_POP_STATE_M(); -#ifdef ERTS_SMP if (prt->xports) erts_port_handle_xports(prt); ASSERT(!prt->xports); -#endif } if (is_internal_port(send_closed_port_id) @@ -3791,9 +3737,9 @@ terminate_port(Port *prt) trace_port_send(prt, connected_id, am_closed, 1); if(drv->handle != NULL) { - erts_smp_rwmtx_rlock(&erts_driver_list_lock); + erts_rwmtx_rlock(&erts_driver_list_lock); erts_ddll_decrement_port_count(drv->handle); - erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_rwmtx_runlock(&erts_driver_list_lock); } stopq(prt); /* clear queue memory */ if(prt->linebuf != NULL){ @@ -3803,7 +3749,7 @@ terminate_port(Port *prt) erts_cleanup_port_data(prt); - psd = (ErtsPrtSD *) erts_smp_atomic_read_nob(&prt->psd); + psd = (ErtsPrtSD *) erts_atomic_read_nob(&prt->psd); if (psd) erts_free(ERTS_ALC_T_PRTSD, psd); @@ -3816,7 +3762,7 @@ terminate_port(Port *prt) * port has been removed from the port table (in kill_port()). */ if ((state & ERTS_PORT_SFLG_HALT) - && (erts_smp_atomic32_dec_read_nob(&erts_halt_progress) == 0)) { + && (erts_atomic32_dec_read_nob(&erts_halt_progress) == 0)) { erts_port_release(prt); /* We will exit and never return */ erts_flush_async_exit(erts_halt_code, ""); } @@ -3844,7 +3790,7 @@ static void sweep_one_monitor(ErtsMonitor *mon, void *vpsc) goto done; } rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); if (rmon == NULL) { goto done; } @@ -3918,7 +3864,7 @@ static void sweep_one_link(ErtsLink *lnk, void *vpsc) 0); if (xres >= 0) { if (rp_locks & ERTS_PROC_LOCKS_XSIG_SEND) { - erts_smp_proc_unlock(rp, ERTS_PROC_LOCKS_XSIG_SEND); + erts_proc_unlock(rp, ERTS_PROC_LOCKS_XSIG_SEND); rp_locks &= ~ERTS_PROC_LOCKS_XSIG_SEND; } /* We didn't exit the process and it is traced */ @@ -3928,7 +3874,7 @@ static void sweep_one_link(ErtsLink *lnk, void *vpsc) erts_destroy_link(rlnk); } - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } } erts_destroy_link(lnk); @@ -3963,7 +3909,7 @@ port_fire_one_monitor(ErtsMonitor *mon, void *ctx0) UnUseTmpHeapNoproc(3); rmon = erts_remove_monitor(&ERTS_P_MONITORS(origin), mon->ref); - erts_smp_proc_unlock(origin, origin_locks); + erts_proc_unlock(origin, origin_locks); if (rmon) { erts_destroy_monitor(rmon); @@ -3989,8 +3935,8 @@ erts_deliver_port_exit(Port *prt, Eterm from, Eterm reason, int send_closed, Eterm modified_reason; erts_aint32_t state, set_state_flags; - ERTS_SMP_CHK_NO_PROC_LOCKS; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); modified_reason = (reason == am_kill) ? am_killed : reason; @@ -4351,7 +4297,7 @@ port_sig_control(Port *prt, prt); if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); goto done; } } @@ -4404,7 +4350,7 @@ erts_port_control(Process* c_p, int copy; ErtsProc2PortSigData *sigdp; - sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + sched_flags = erts_atomic32_read_nob(&prt->sched.flags); if (sched_flags & ERTS_PTS_FLG_EXIT) return ERTS_PORT_OP_BADARG; @@ -4716,11 +4662,11 @@ port_sig_call(Port *prt, prt); if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); goto done; } if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } } } @@ -4754,7 +4700,7 @@ erts_port_call(Process* c_p, erts_aint32_t sched_flags; ErtsProc2PortSigData *sigdp; - sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + sched_flags = erts_atomic32_read_nob(&prt->sched.flags); if (sched_flags & ERTS_PTS_FLG_EXIT) { return ERTS_PORT_OP_BADARG; } @@ -4972,7 +4918,7 @@ port_sig_info(Port *prt, prt); } if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } return ERTS_PORT_REDS_INFO; } @@ -5041,7 +4987,7 @@ typedef struct { Uint sched_id; Eterm pid; Uint32 refn[ERTS_REF_NUMBERS]; - erts_smp_atomic32_t refc; + erts_atomic32_t refc; } ErtsIOBytesReq; static void @@ -5091,10 +5037,10 @@ reply_io_bytes(void *vreq) if (req->sched_id == sched_id) rp_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } - if (erts_smp_atomic32_dec_read_nob(&req->refc) == 0) + if (erts_atomic32_dec_read_nob(&req->refc) == 0) erts_free(ERTS_ALC_T_IOB_REQ, req); } @@ -5117,16 +5063,14 @@ erts_request_io_bytes(Process *c_p) req->refn[0] = refn[0]; req->refn[1] = refn[1]; req->refn[2] = refn[2]; - erts_smp_atomic32_init_nob(&req->refc, + erts_atomic32_init_nob(&req->refc, (erts_aint32_t) erts_no_schedulers); -#ifdef ERTS_SMP if (erts_no_schedulers > 1) erts_schedule_multi_misc_aux_work(1, erts_no_schedulers, reply_io_bytes, (void *) req); -#endif reply_io_bytes((void *) req); @@ -5211,14 +5155,14 @@ set_busy_port(ErlDrvPort dprt, int on) DTRACE_CHARBUF(port_str, 16); #endif - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; prt = erts_drvport2port(dprt); if (prt == ERTS_INVALID_ERL_DRV_PORT) return; if (on) { - flags = erts_smp_atomic32_read_bor_acqb(&prt->sched.flags, + flags = erts_atomic32_read_bor_acqb(&prt->sched.flags, ERTS_PTS_FLG_BUSY_PORT); if (flags & ERTS_PTS_FLG_BUSY_PORT) return; /* Already busy */ @@ -5234,7 +5178,7 @@ set_busy_port(ErlDrvPort dprt, int on) } #endif } else { - flags = erts_smp_atomic32_read_band_acqb(&prt->sched.flags, + flags = erts_atomic32_read_band_acqb(&prt->sched.flags, ~ERTS_PTS_FLG_BUSY_PORT); if (!(flags & ERTS_PTS_FLG_BUSY_PORT)) return; /* Already non-busy */ @@ -5328,7 +5272,7 @@ int get_port_flags(ErlDrvPort ix) if (prt == ERTS_INVALID_ERL_DRV_PORT) return 0; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); flags = 0; if (state & ERTS_PORT_SFLG_BINARY_IO) @@ -5344,8 +5288,8 @@ void erts_raw_port_command(Port* p, byte* buf, Uint len) int fpe_was_unmasked; ERTS_MSACC_PUSH_STATE_M(); - ERTS_SMP_CHK_NO_PROC_LOCKS; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p)); + ERTS_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(p)); if (len > (Uint) INT_MAX) erts_exit(ERTS_ABORT_EXIT, @@ -5374,10 +5318,10 @@ int async_ready(Port *p, void* data) { int need_free = 1; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (p) { - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(p)); if (p->drv_ptr->ready_async != NULL) { ERTS_MSACC_PUSH_AND_SET_STATE_M(ERTS_MSACC_STATE_PORT); #ifdef USE_VM_PROBES @@ -5446,24 +5390,17 @@ erts_stale_drv_select(Eterm port, switch (mode) { case ERL_DRV_READ | ERL_DRV_WRITE: type = "Input/Output"; - goto deselect; case ERL_DRV_WRITE: type = "Output"; - goto deselect; case ERL_DRV_READ: type = "Input"; - deselect: - if (deselect) { - driver_select(drv_port, hndl, - mode | ERL_DRV_USE_NO_CALLBACK, - 0); - } - break; default: - type = "Event"; - if (deselect) - driver_event(drv_port, hndl, NULL); - break; + type = ""; + } + if (deselect) { + driver_select(drv_port, hndl, + mode | ERL_DRV_USE_NO_CALLBACK, + 0); } dsbufp = erts_create_logger_dsbuf(); @@ -5562,8 +5499,8 @@ void driver_report_exit(ErlDrvPort ix, int status) if (prt == ERTS_INVALID_ERL_DRV_PORT) return; - ERTS_SMP_CHK_NO_PROC_LOCKS; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); pid = ERTS_PORT_GET_CONNECTED(prt); ASSERT(is_internal_pid(pid)); @@ -5586,7 +5523,7 @@ void driver_report_exit(ErlDrvPort ix, int status) ERL_MESSAGE_TOKEN(mp) = am_undefined; erts_queue_message(rp, rp_locks, mp, tuple, prt->common.id); - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); if (!scheduler) erts_proc_dec_refc(rp); } @@ -6226,7 +6163,7 @@ driver_deliver_term(Port *prt, Eterm to, ErlDrvTermData* data, int len) } if (rp) { if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); if (!scheduler) erts_proc_dec_refc(rp); } @@ -6241,9 +6178,7 @@ static ERTS_INLINE int deliver_term_check_port(ErlDrvTermData port_id, Eterm *connected_p, Port **trace_prt) { -#ifdef ERTS_SMP ErtsThrPrgrDelayHandle dhndl = erts_thr_progress_unmanaged_delay(); -#endif erts_aint32_t state; int res = 1; Port *prt = erts_port_lookup_raw((Eterm) port_id); @@ -6261,24 +6196,20 @@ deliver_term_check_port(ErlDrvTermData port_id, Eterm *connected_p, goto done; } if (connected_p) { -#ifdef ERTS_SMP if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) ETHR_MEMBAR(ETHR_LoadLoad); -#endif *connected_p = ERTS_PORT_GET_CONNECTED(prt); } done: -#ifdef ERTS_SMP if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) { - ERTS_SMP_LC_ASSERT(!prt || !erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(!prt || !erts_lc_is_port_locked(prt)); erts_thr_progress_unmanaged_continue(dhndl); ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore); } else -#endif if (res == 1) { - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); *trace_prt = prt; } return res; @@ -6306,13 +6237,13 @@ driver_output_term(ErlDrvPort drvport, ErlDrvTermData* data, int len) erts_aint32_t state; Port* prt; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; /* NOTE! It *not* safe to access 'drvport' from unmanaged threads. */ prt = erts_drvport2port_state(drvport, &state); if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; /* invalid (dead) */ - ERTS_SMP_CHK_NO_PROC_LOCKS; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (state & ERTS_PORT_SFLG_CLOSING) return 0; @@ -6349,16 +6280,14 @@ driver_send_term(ErlDrvPort drvport, * internal data representation for ErlDrvPort. */ Port* prt = NULL; - ERTS_SMP_CHK_NO_PROC_LOCKS; -#ifdef ERTS_SMP + ERTS_CHK_NO_PROC_LOCKS; if (erts_thr_progress_is_managed_thread()) -#endif { erts_aint32_t state; prt = erts_drvport2port_state(drvport, &state); if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; /* invalid (dead) */ - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (state & ERTS_PORT_SFLG_CLOSING) return 0; } @@ -6378,11 +6307,11 @@ int driver_output_binary(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, Port* prt = erts_drvport2port_state(ix, &state); ErtsSchedulerData *esdp = erts_get_scheduler_data(); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (state & ERTS_PORT_SFLG_CLOSING) return 0; @@ -6392,6 +6321,7 @@ int driver_output_binary(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, else erts_atomic64_add_nob(&bytes_in, (erts_aint64_t) (hlen + len)); if (state & ERTS_PORT_SFLG_DISTRIBUTION) { + erts_atomic64_inc_nob(&prt->dist_entry->in); return erts_net_message(prt, prt->dist_entry, (byte*) hbuf, hlen, @@ -6417,12 +6347,12 @@ int driver_output2(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, Port* prt = erts_drvport2port_state(ix, &state); ErtsSchedulerData *esdp = erts_get_scheduler_data(); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (state & ERTS_PORT_SFLG_CLOSING) return 0; @@ -6432,6 +6362,7 @@ int driver_output2(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, else erts_atomic64_add_nob(&bytes_in, (erts_aint64_t) (hlen + len)); if (state & ERTS_PORT_SFLG_DISTRIBUTION) { + erts_atomic64_inc_nob(&prt->dist_entry->in); if (len == 0) return erts_net_message(prt, prt->dist_entry, @@ -6456,7 +6387,7 @@ int driver_output2(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, int driver_output(ErlDrvPort ix, char* buf, ErlDrvSizeT len) { - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; return driver_output2(ix, NULL, 0, buf, len); } @@ -6472,7 +6403,7 @@ int driver_outputv(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, erts_aint32_t state; ErtsSchedulerData *esdp = erts_get_scheduler_data(); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; ASSERT(vec->size >= skip); if (vec->size <= skip) @@ -6483,7 +6414,7 @@ int driver_outputv(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (state & ERTS_PORT_SFLG_CLOSING) return 0; @@ -6697,7 +6628,6 @@ static ERTS_INLINE void pdl_destroy(ErlDrvPDL pdl) erts_free(ERTS_ALC_T_PORT_DATA_LOCK, pdl); } -#ifdef ERTS_SMP static void driver_monitor_lock_pdl(Port *p) { if (p->port_data_lock) { @@ -6706,7 +6636,7 @@ static void driver_monitor_lock_pdl(Port *p) { /* Now we either have the port lock or the port_data_lock */ ERTS_LC_ASSERT(!p->port_data_lock || erts_lc_mtx_is_locked(&(p->port_data_lock->mtx))); - ERTS_SMP_LC_ASSERT(p->port_data_lock + ERTS_LC_ASSERT(p->port_data_lock || erts_lc_is_port_locked(p)); } @@ -6714,14 +6644,13 @@ static void driver_monitor_unlock_pdl(Port *p) { /* We should either have the port lock or the port_data_lock */ ERTS_LC_ASSERT(!p->port_data_lock || erts_lc_mtx_is_locked(&(p->port_data_lock->mtx))); - ERTS_SMP_LC_ASSERT(p->port_data_lock + ERTS_LC_ASSERT(p->port_data_lock || erts_lc_is_port_locked(p)); if (p->port_data_lock) { driver_pdl_unlock(p->port_data_lock); } } -#endif /* * exported driver_pdl_* functions ... @@ -6924,7 +6853,7 @@ int driver_set_timer(ErlDrvPort ix, unsigned long t) { Port* prt = erts_drvport2port(ix); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; @@ -6941,7 +6870,7 @@ int driver_cancel_timer(ErlDrvPort ix) Port* prt = erts_drvport2port(ix); if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); erts_cancel_port_timer(prt); return 0; } @@ -6952,11 +6881,11 @@ driver_read_timer(ErlDrvPort ix, unsigned long* t) Port* prt = erts_drvport2port(ix); Sint64 left; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); left = erts_read_port_timer(prt); if (left < 0) @@ -6971,7 +6900,7 @@ int driver_get_now(ErlDrvNowData *now_data) { Uint mega,secs,micro; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (now_data == NULL) { return -1; @@ -7045,7 +6974,7 @@ static int do_driver_monitor_process(Port *prt, erts_add_monitor(&ERTS_P_MONITORS(prt), MON_ORIGIN, ref, rp->common.id, NIL); erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, ref, prt->common.id, NIL); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); erts_ref_to_driver_monitor(ref,monitor); return 0; } @@ -7059,7 +6988,7 @@ int driver_monitor_process(ErlDrvPort drvport, { Port *prt; int ret; -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) ErtsSchedulerData *sched = erts_get_scheduler_data(); #endif @@ -7069,7 +6998,7 @@ int driver_monitor_process(ErlDrvPort drvport, /* Now (in SMP) we should have either the port lock (if we have a scheduler) or the port data lock (if we're a driver thread) */ - ERTS_SMP_LC_ASSERT((sched != NULL || prt->port_data_lock)); + ERTS_LC_ASSERT((sched != NULL || prt->port_data_lock)); ret = do_driver_monitor_process(prt,process,monitor); DRV_MONITOR_UNLOCK_PDL(prt); return ret; @@ -7104,7 +7033,7 @@ static int do_driver_demonitor_process(Port *prt, const ErlDrvMonitor *monitor) if (rp) { ErtsMonitor *rmon; rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); if (rmon != NULL) { erts_destroy_monitor(rmon); } @@ -7117,7 +7046,7 @@ int driver_demonitor_process(ErlDrvPort drvport, { Port *prt; int ret; -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) ErtsSchedulerData *sched = erts_get_scheduler_data(); #endif @@ -7127,7 +7056,7 @@ int driver_demonitor_process(ErlDrvPort drvport, /* Now we should have either the port lock (if we have a scheduler) or the port data lock (if we're a driver thread) */ - ERTS_SMP_LC_ASSERT((sched != NULL || prt->port_data_lock)); + ERTS_LC_ASSERT((sched != NULL || prt->port_data_lock)); ret = do_driver_demonitor_process(prt,monitor); DRV_MONITOR_UNLOCK_PDL(prt); return ret; @@ -7158,7 +7087,7 @@ ErlDrvTermData driver_get_monitored_process(ErlDrvPort drvport, { Port *prt; ErlDrvTermData ret; -#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK) +#if defined(ERTS_ENABLE_LOCK_CHECK) ErtsSchedulerData *sched = erts_get_scheduler_data(); #endif @@ -7168,7 +7097,7 @@ ErlDrvTermData driver_get_monitored_process(ErlDrvPort drvport, /* Now we should have either the port lock (if we have a scheduler) or the port data lock (if we're a driver thread) */ - ERTS_SMP_LC_ASSERT((sched != NULL || prt->port_data_lock)); + ERTS_LC_ASSERT((sched != NULL || prt->port_data_lock)); ret = do_driver_get_monitored_process(prt,monitor); DRV_MONITOR_UNLOCK_PDL(prt); return ret; @@ -7189,7 +7118,7 @@ void erts_fire_port_monitor(Port *prt, Eterm ref) int fpe_was_unmasked; ERTS_MSACC_PUSH_STATE_M(); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); ASSERT(prt->drv_ptr != NULL); DRV_MONITOR_LOCK_PDL(prt); if (erts_lookup_monitor(ERTS_P_MONITORS(prt), ref) == NULL) { @@ -7236,11 +7165,11 @@ driver_failure_term(ErlDrvPort ix, Eterm term, int eof) erts_aint32_t state; Port* prt = erts_drvport2port_state(ix, &state); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (prt->async_open_port) init_ack_send_reply(prt, prt->common.id); @@ -7275,7 +7204,7 @@ int driver_exit(ErlDrvPort ix, int err) ErtsLink *lnk, *rlnk = NULL; Eterm connected; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; @@ -7288,10 +7217,8 @@ int driver_exit(ErlDrvPort ix, int err) lnk = erts_remove_link(&ERTS_P_LINKS(prt), connected); -#ifdef ERTS_SMP if (rp) - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); -#endif + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); if (rlnk != NULL) { erts_destroy_link(rlnk); @@ -7345,7 +7272,7 @@ ErlDrvTermData driver_mk_atom(char* string) sys_strlen(string), ERTS_ATOM_ENC_LATIN1, 1); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; return (ErlDrvTermData) am; } @@ -7354,27 +7281,27 @@ ErlDrvTermData driver_mk_port(ErlDrvPort ix) Port* prt = erts_drvport2port(ix); if (prt == ERTS_INVALID_ERL_DRV_PORT) return (ErlDrvTermData) NIL; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); return (ErlDrvTermData) prt->common.id; } ErlDrvTermData driver_connected(ErlDrvPort ix) { Port* prt = erts_drvport2port(ix); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (prt == ERTS_INVALID_ERL_DRV_PORT) return NIL; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); return ERTS_PORT_GET_CONNECTED(prt); } ErlDrvTermData driver_caller(ErlDrvPort ix) { Port* prt = erts_drvport2port(ix); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (prt == ERTS_INVALID_ERL_DRV_PORT) return NIL; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); return prt->caller; } @@ -7383,20 +7310,20 @@ int driver_lock_driver(ErlDrvPort ix) Port* prt = erts_drvport2port(ix); DE_Handle* dh; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; - erts_smp_rwmtx_rwlock(&erts_driver_list_lock); + erts_rwmtx_rwlock(&erts_driver_list_lock); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if ((dh = (DE_Handle*)prt->drv_ptr->handle ) == NULL) { - erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); + erts_rwmtx_rwunlock(&erts_driver_list_lock); return -1; } erts_ddll_lock_driver(dh, prt->drv_ptr->name); - erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); + erts_rwmtx_rwunlock(&erts_driver_list_lock); return 0; } @@ -7404,9 +7331,9 @@ int driver_lock_driver(ErlDrvPort ix) static int maybe_lock_driver_list(void) { void *rec_lock; - rec_lock = erts_smp_tsd_get(driver_list_lock_status_key); + rec_lock = erts_tsd_get(driver_list_lock_status_key); if (rec_lock == 0) { - erts_smp_rwmtx_rwlock(&erts_driver_list_lock); + erts_rwmtx_rwlock(&erts_driver_list_lock); return 1; } return 0; @@ -7414,7 +7341,7 @@ static int maybe_lock_driver_list(void) static void maybe_unlock_driver_list(int doit) { if (doit) { - erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); + erts_rwmtx_rwunlock(&erts_driver_list_lock); } } /* @@ -7437,7 +7364,7 @@ void *driver_dl_open(char * path) { void *ptr; int res; - int *last_error_p = erts_smp_tsd_get(driver_list_last_error_key); + int *last_error_p = erts_tsd_get(driver_list_last_error_key); int locked = maybe_lock_driver_list(); if ((res = erts_sys_ddll_open(path, &ptr, NULL)) == 0) { maybe_unlock_driver_list(locked); @@ -7445,7 +7372,7 @@ void *driver_dl_open(char * path) } else { if (!last_error_p) { last_error_p = erts_alloc(ERTS_ALC_T_DDLL_ERRCODES, sizeof(int)); - erts_smp_tsd_set(driver_list_last_error_key,last_error_p); + erts_tsd_set(driver_list_last_error_key,last_error_p); } *last_error_p = res; maybe_unlock_driver_list(locked); @@ -7457,7 +7384,7 @@ void *driver_dl_sym(void * handle, char *func_name) { void *ptr; int res; - int *last_error_p = erts_smp_tsd_get(driver_list_lock_status_key); + int *last_error_p = erts_tsd_get(driver_list_lock_status_key); int locked = maybe_lock_driver_list(); if ((res = erts_sys_ddll_sym(handle, func_name, &ptr)) == 0) { maybe_unlock_driver_list(locked); @@ -7465,7 +7392,7 @@ void *driver_dl_sym(void * handle, char *func_name) } else { if (!last_error_p) { last_error_p = erts_alloc(ERTS_ALC_T_DDLL_ERRCODES, sizeof(int)); - erts_smp_tsd_set(driver_list_lock_status_key,last_error_p); + erts_tsd_set(driver_list_lock_status_key,last_error_p); } *last_error_p = res; maybe_unlock_driver_list(locked); @@ -7485,7 +7412,7 @@ int driver_dl_close(void *handle) char *driver_dl_error(void) { char *res; - int *last_error_p = erts_smp_tsd_get(driver_list_lock_status_key); + int *last_error_p = erts_tsd_get(driver_list_lock_status_key); int locked = maybe_lock_driver_list(); res = erts_ddll_error((last_error_p != NULL) ? (*last_error_p) : ERL_DE_ERROR_UNSPECIFIED); maybe_unlock_driver_list(locked); @@ -7523,20 +7450,8 @@ driver_system_info(ErlDrvSysInfo *sip, size_t si_size) sip->driver_minor_version = ERL_DRV_EXTENDED_MINOR_VERSION; sip->erts_version = ERLANG_VERSION; sip->otp_release = ERLANG_OTP_RELEASE; - sip->thread_support = -#ifdef USE_THREADS - 1 -#else - 0 -#endif - ; - sip->smp_support = -#ifdef ERTS_SMP - 1 -#else - 0 -#endif - ; + sip->thread_support = 1; + sip->smp_support = 1; } @@ -7562,11 +7477,7 @@ driver_system_info(ErlDrvSysInfo *sip, size_t si_size) */ if (si_size >= ERL_DRV_SYS_INFO_SIZE(dirty_scheduler_support)) { sip->dirty_scheduler_support = -#ifdef ERTS_DIRTY_SCHEDULERS 1 -#else - 0 -#endif ; } @@ -7593,14 +7504,6 @@ no_output_callback(ErlDrvData drv_data, char *buf, ErlDrvSizeT len) } static void -no_event_callback(ErlDrvData drv_data, ErlDrvEvent event, ErlDrvEventData event_data) -{ - Port *prt = get_current_port(); - report_missing_drv_callback(prt, "Event", "event()"); - driver_event(ERTS_Port2ErlDrvPort(prt), event, NULL); -} - -static void no_ready_input_callback(ErlDrvData drv_data, ErlDrvEvent event) { Port *prt = get_current_port(); @@ -7646,7 +7549,6 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle) drv->version.minor = de->minor_version; drv->flags = de->driver_flags; drv->handle = handle; -#ifdef ERTS_SMP if (drv->flags & ERL_DRV_FLAG_USE_PORT_LOCKING) { drv->lock = NULL; } else { @@ -7658,7 +7560,6 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle) erts_mtx_init(drv->lock, "driver_lock", driver_id, ERTS_LOCK_FLAGS_CATEGORY_IO); } -#endif drv->entry = de; drv->start = de->start; @@ -7669,7 +7570,6 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle) drv->outputv = de->outputv; drv->control = de->control; drv->call = de->call; - drv->event = de->event ? de->event : no_event_callback; drv->ready_input = de->ready_input ? de->ready_input : no_ready_input_callback; drv->ready_output = de->ready_output ? de->ready_output : no_ready_output_callback; drv->timeout = de->timeout ? de->timeout : no_timeout_callback; @@ -7701,12 +7601,10 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle) void erts_destroy_driver(erts_driver_t *drv) { -#ifdef ERTS_SMP if (drv->lock) { - erts_smp_mtx_destroy(drv->lock); + erts_mtx_destroy(drv->lock); erts_free(ERTS_ALC_T_DRIVER_LOCK, drv->lock); } -#endif erts_free(ERTS_ALC_T_DRIVER, drv); } @@ -7717,7 +7615,7 @@ erts_destroy_driver(erts_driver_t *drv) void add_driver_entry(ErlDrvEntry *drv){ void *rec_lock; - rec_lock = erts_smp_tsd_get(driver_list_lock_status_key); + rec_lock = erts_tsd_get(driver_list_lock_status_key); /* * Ignore result of erts_add_driver_entry, the init is not * allowed to fail when drivers are added by drivers. @@ -7731,7 +7629,7 @@ int erts_add_driver_entry(ErlDrvEntry *de, DE_Handle *handle, int driver_list_lo int res; if (!driver_list_locked) { - erts_smp_rwmtx_rwlock(&erts_driver_list_lock); + erts_rwmtx_rwlock(&erts_driver_list_lock); } dp->next = driver_list; @@ -7742,7 +7640,7 @@ int erts_add_driver_entry(ErlDrvEntry *de, DE_Handle *handle, int driver_list_lo driver_list = dp; if (!driver_list_locked) { - erts_smp_tsd_set(driver_list_lock_status_key, (void *) 1); + erts_tsd_set(driver_list_lock_status_key, (void *) 1); } res = init_driver(dp, de, handle); @@ -7759,8 +7657,8 @@ int erts_add_driver_entry(ErlDrvEntry *de, DE_Handle *handle, int driver_list_lo } if (!driver_list_locked) { - erts_smp_tsd_set(driver_list_lock_status_key, NULL); - erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); + erts_tsd_set(driver_list_lock_status_key, NULL); + erts_rwmtx_rwunlock(&erts_driver_list_lock); } return res; } @@ -7771,9 +7669,9 @@ int remove_driver_entry(ErlDrvEntry *drv) erts_driver_t *dp; void *rec_lock; - rec_lock = erts_smp_tsd_get(driver_list_lock_status_key); + rec_lock = erts_tsd_get(driver_list_lock_status_key); if (rec_lock == NULL) { - erts_smp_rwmtx_rwlock(&erts_driver_list_lock); + erts_rwmtx_rwlock(&erts_driver_list_lock); } dp = driver_list; while (dp && dp->entry != drv) @@ -7781,7 +7679,7 @@ int remove_driver_entry(ErlDrvEntry *drv) if (dp) { if (dp->handle) { if (rec_lock == NULL) { - erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); + erts_rwmtx_rwunlock(&erts_driver_list_lock); } return -1; } @@ -7795,12 +7693,12 @@ int remove_driver_entry(ErlDrvEntry *drv) } erts_destroy_driver(dp); if (rec_lock == NULL) { - erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); + erts_rwmtx_rwunlock(&erts_driver_list_lock); } return 1; } if (rec_lock == NULL) { - erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); + erts_rwmtx_rwunlock(&erts_driver_list_lock); } return 0; } diff --git a/erts/emulator/beam/macros.tab b/erts/emulator/beam/macros.tab new file mode 100644 index 0000000000..0d175a7ec6 --- /dev/null +++ b/erts/emulator/beam/macros.tab @@ -0,0 +1,174 @@ +// -*- c -*- +// +// %CopyrightBegin% +// +// Copyright Ericsson AB 2017. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// %CopyrightEnd% +// + +// +// Use if there is a garbage collection before storing to a +// general destination (either X or Y register). +// + +REFRESH_GEN_DEST() { + dst_ptr = REG_TARGET_PTR(dst); +} + +// $Offset is relative to the start of the instruction (not to the +// location of the failure label reference). Since combined +// instructions may increment the instruction pointer (e.g. in +// 'increment') for some of the instructions in the group, we actually +// use a virtual start position common to all instructions in the +// group. To calculate the correct virtual position, we will need to +// add $IP_ADJUSTMENT to the offset. ($IP_ADJUSTMENT will usually be +// zero, except in a few bit syntax instructions.) + +SET_I_REL(Offset) { + ASSERT(VALID_INSTR(*(I + ($Offset)))); + I += $Offset + $IP_ADJUSTMENT; +} + +SET_CP_I_ABS(Target) { + c_p->i = $Target; + ASSERT(VALID_INSTR(*c_p->i)); +} + +SET_REL_I(Dst, Offset) { + $Dst = I + ($Offset); + ASSERT(VALID_INSTR(*$Dst)); +} + +FAIL(Fail) { + //| -no_prefetch + $SET_I_REL($Fail); + Goto(*I); +} + +JUMP(Fail) { + //| -no_next + $SET_I_REL($Fail); + Goto(*I); +} + +GC_TEST(Ns, Nh, Live) { + Uint need = $Nh + $Ns; + if (ERTS_UNLIKELY(E - HTOP < need)) { + SWAPOUT; + PROCESS_MAIN_CHK_LOCKS(c_p); + FCALLS -= erts_garbage_collect_nobump(c_p, need, reg, $Live, FCALLS); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + PROCESS_MAIN_CHK_LOCKS(c_p); + SWAPIN; + } + HEAP_SPACE_VERIFIED($Nh); +} + +GC_TEST_PRESERVE(NeedHeap, Live, PreserveTerm) { + Uint need = $NeedHeap; + if (ERTS_UNLIKELY(E - HTOP < need)) { + SWAPOUT; + reg[$Live] = $PreserveTerm; + PROCESS_MAIN_CHK_LOCKS(c_p); + FCALLS -= erts_garbage_collect_nobump(c_p, need, reg, $Live+1, FCALLS); + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + PROCESS_MAIN_CHK_LOCKS(c_p); + $PreserveTerm = reg[$Live]; + SWAPIN; + } + HEAP_SPACE_VERIFIED($NeedHeap); +} + + +// Make sure that there are NeedStack + NeedHeap + 1 words available +// on the combined heap/stack segment, then allocates NeedHeap + 1 +// words on the stack and saves CP. +AH(NeedStack, NeedHeap, Live) { + unsigned needed = $NeedStack + 1; + $GC_TEST(needed, $NeedHeap, $Live); + E -= needed; + *E = make_cp(c_p->cp); + c_p->cp = 0; +} + +NEXT0() { + //| -no_next + SET_I((BeamInstr *) $NEXT_INSTRUCTION); + Goto(*I); +} + +NEXT(Addr) { + //| -no_next + SET_I((BeamInstr *) $Addr); + Goto(*I); +} + +FAIL_BODY() { + //| -no_prefetch + goto find_func_info; +} + +FAIL_HEAD_OR_BODY(Fail) { + //| -no_prefetch + + /* + * In a correctly working program, we expect failures in + * guards to be more likely than failures in bodies. + */ + + if (ERTS_LIKELY($Fail)) { + $FAIL($Fail); + } + goto find_func_info; +} + +BADARG(Fail) { + c_p->freason = BADARG; + $FAIL_HEAD_OR_BODY($Fail); +} + +BADARITH0() { + c_p->freason = BADARITH; + goto find_func_info; +} + +SYSTEM_LIMIT(Fail) { + c_p->freason = SYSTEM_LIMIT; + $FAIL_HEAD_OR_BODY($Fail); +} + +BIF_ERROR_ARITY_1(Fail, BIF, Op1) { + //| -no_prefetch + if (ERTS_LIKELY($Fail)) { + $FAIL($Fail); + } + reg[0] = $Op1; + SWAPOUT; + I = handle_error(c_p, I, reg, &bif_export[$BIF]->info.mfa); + goto post_error_handling; +} + +BIF_ERROR_ARITY_2(Fail, BIF, Op1, Op2) { + //| -no_prefetch + if (ERTS_LIKELY($Fail)) { + $FAIL($Fail); + } + reg[0] = $Op1; + reg[1] = $Op2; + SWAPOUT; + I = handle_error(c_p, I, reg, &bif_export[$BIF]->info.mfa); + goto post_error_handling; +} diff --git a/erts/emulator/beam/map_instrs.tab b/erts/emulator/beam/map_instrs.tab new file mode 100644 index 0000000000..bbb2f49b66 --- /dev/null +++ b/erts/emulator/beam/map_instrs.tab @@ -0,0 +1,159 @@ +// -*- c -*- +// +// %CopyrightBegin% +// +// Copyright Ericsson AB 2017. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// %CopyrightEnd% +// + +ensure_map(Map) { + if (is_not_map($Map)) { + c_p->freason = BADMAP; + c_p->fvalue = $Map; + $FAIL_BODY(); + } +} + +new_map(Dst, Live, N) { + Eterm res; + + HEAVY_SWAPOUT; + res = new_map(c_p, reg, $Live, $N, $NEXT_INSTRUCTION); + HEAVY_SWAPIN; + $REFRESH_GEN_DEST(); + $Dst = res; + $NEXT($NEXT_INSTRUCTION+$N); +} + +i_new_small_map_lit(Dst, Live, Keys) { + Eterm res; + Uint n; + Eterm keys = $Keys; + + HEAVY_SWAPOUT; + res = new_small_map_lit(c_p, reg, keys, $Live, $NEXT_INSTRUCTION); + HEAVY_SWAPIN; + $REFRESH_GEN_DEST(); + $Dst = res; + n = arityval(*tuple_val(keys)); + $NEXT($NEXT_INSTRUCTION+n); +} + +i_get_map_element(Fail, Src, Key, Dst) { + Eterm res = get_map_element($Src, $Key); + if (is_non_value(res)) { + $FAIL($Fail); + } + $Dst = res; +} + +i_get_map_element_hash(Fail, Src, Key, Hx, Dst) { + Eterm res = get_map_element_hash($Src, $Key, $Hx); + if (is_non_value(res)) { + $FAIL($Fail); + } + $Dst = res; +} + +i_get_map_elements(Fail, Src, N) { + Eterm map; + BeamInstr *fs; + Uint sz, n; + + map = $Src; + + /* This instruction assumes Arg1 is a map, + * i.e. that it follows a test is_map if needed. + */ + + n = (Uint)$N / 3; + fs = $NEXT_INSTRUCTION; + + if (is_flatmap(map)) { + flatmap_t *mp; + Eterm *ks; + Eterm *vs; + + mp = (flatmap_t *)flatmap_val(map); + sz = flatmap_get_size(mp); + + if (sz == 0) { + $FAIL($Fail); + } + + ks = flatmap_get_keys(mp); + vs = flatmap_get_values(mp); + + while(sz) { + if (EQ((Eterm) fs[0], *ks)) { + PUT_TERM_REG(*vs, fs[1]); + n--; + fs += 3; + /* no more values to fetch, we are done */ + if (n == 0) { + $NEXT(fs); + } + } + ks++, sz--, vs++; + } + $FAIL($Fail); + } else { + const Eterm *v; + Uint32 hx; + ASSERT(is_hashmap(map)); + while(n--) { + hx = fs[2]; + ASSERT(hx == hashmap_make_hash((Eterm)fs[0])); + if ((v = erts_hashmap_get(hx, (Eterm)fs[0], map)) == NULL) { + $FAIL($Fail); + } + PUT_TERM_REG(*v, fs[1]); + fs += 3; + } + $NEXT(fs); + } +} + +update_map_assoc(Src, Dst, Live, N) { + Eterm res; + Uint live = $Live; + + reg[live] = $Src; + HEAVY_SWAPOUT; + res = update_map_assoc(c_p, reg, live, $N, $NEXT_INSTRUCTION); + HEAVY_SWAPIN; + ASSERT(is_value(res)); + $REFRESH_GEN_DEST(); + $Dst = res; + $NEXT($NEXT_INSTRUCTION+$N); +} + +update_map_exact(Fail, Src, Dst, Live, N) { + Eterm res; + Uint live = $Live; + + reg[live] = $Src; + HEAVY_SWAPOUT; + res = update_map_exact(c_p, reg, live, $N, $NEXT_INSTRUCTION); + HEAVY_SWAPIN; + if (is_value(res)) { + $REFRESH_GEN_DEST(); + $Dst = res; + $NEXT($NEXT_INSTRUCTION+$N); + } else { + $FAIL_HEAD_OR_BODY($Fail); + } +} diff --git a/erts/emulator/beam/module.c b/erts/emulator/beam/module.c index 7987cb2eb5..baeec115ea 100644 --- a/erts/emulator/beam/module.c +++ b/erts/emulator/beam/module.c @@ -39,9 +39,9 @@ static IndexTable module_tables[ERTS_NUM_CODE_IX]; -erts_smp_rwmtx_t the_old_code_rwlocks[ERTS_NUM_CODE_IX]; +erts_rwmtx_t the_old_code_rwlocks[ERTS_NUM_CODE_IX]; -static erts_smp_atomic_t tot_module_bytes; +static erts_atomic_t tot_module_bytes; /* SMP note: Active module table lookup and current module instance can be * read without any locks. Old module instances are protected by @@ -49,8 +49,6 @@ static erts_smp_atomic_t tot_module_bytes; * Staging table is protected by the "code_ix lock". */ -#include "erl_smp.h" - void module_info(fmtfn_t to, void *to_arg) { index_info(to, to_arg, &module_tables[erts_active_code_ix()]); @@ -84,7 +82,7 @@ void erts_module_instance_init(struct erl_module_instance* modi) static Module* module_alloc(Module* tmpl) { Module* obj = (Module*) erts_alloc(ERTS_ALC_T_MODULE, sizeof(Module)); - erts_smp_atomic_add_nob(&tot_module_bytes, sizeof(Module)); + erts_atomic_add_nob(&tot_module_bytes, sizeof(Module)); obj->module = tmpl->module; obj->slot.index = -1; @@ -98,7 +96,7 @@ static Module* module_alloc(Module* tmpl) static void module_free(Module* mod) { erts_free(ERTS_ALC_T_MODULE, mod); - erts_smp_atomic_add_nob(&tot_module_bytes, -sizeof(Module)); + erts_atomic_add_nob(&tot_module_bytes, -sizeof(Module)); } void init_module_table(void) @@ -120,10 +118,10 @@ void init_module_table(void) } for (i=0; i<ERTS_NUM_CODE_IX; i++) { - erts_smp_rwmtx_init(&the_old_code_rwlocks[i], "old_code", make_small(i), + erts_rwmtx_init(&the_old_code_rwlocks[i], "old_code", make_small(i), ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); } - erts_smp_atomic_init_nob(&tot_module_bytes, 0); + erts_atomic_init_nob(&tot_module_bytes, 0); } @@ -159,14 +157,14 @@ static Module* put_module(Eterm mod, IndexTable* mod_tab) oldsz = index_table_sz(mod_tab); res = (Module*) index_put_entry(mod_tab, (void*) &e); newsz = index_table_sz(mod_tab); - erts_smp_atomic_add_nob(&tot_module_bytes, (newsz - oldsz)); + erts_atomic_add_nob(&tot_module_bytes, (newsz - oldsz)); return res; } Module* erts_put_module(Eterm mod) { - ERTS_SMP_LC_ASSERT(erts_initialized == 0 + ERTS_LC_ASSERT(erts_initialized == 0 || erts_has_code_write_permission()); return put_module(mod, &module_tables[erts_staging_code_ix()]); @@ -184,7 +182,7 @@ int module_code_size(ErtsCodeIndex code_ix) int module_table_sz(void) { - return erts_smp_atomic_read_nob(&tot_module_bytes); + return erts_atomic_read_nob(&tot_module_bytes); } #ifdef DEBUG @@ -233,7 +231,7 @@ void module_start_staging(void) copy_module(dst_mod, src_mod); } newsz = index_table_sz(dst); - erts_smp_atomic_add_nob(&tot_module_bytes, (newsz - oldsz)); + erts_atomic_add_nob(&tot_module_bytes, (newsz - oldsz)); entries_at_start_staging = dst->entries; IF_DEBUG(dbg_load_code_ix = erts_staging_code_ix()); @@ -251,7 +249,7 @@ void module_end_staging(int commit) oldsz = index_table_sz(tab); index_erase_latest_from(tab, entries_at_start_staging); newsz = index_table_sz(tab); - erts_smp_atomic_add_nob(&tot_module_bytes, (newsz - oldsz)); + erts_atomic_add_nob(&tot_module_bytes, (newsz - oldsz)); } IF_DEBUG(dbg_load_code_ix = -1); diff --git a/erts/emulator/beam/module.h b/erts/emulator/beam/module.h index 9d258d5dbf..9a81e6035b 100644 --- a/erts/emulator/beam/module.h +++ b/erts/emulator/beam/module.h @@ -72,29 +72,29 @@ int erts_is_old_code_rlocked(ErtsCodeIndex); #if ERTS_GLB_INLINE_INCL_FUNC_DEF -extern erts_smp_rwmtx_t the_old_code_rwlocks[ERTS_NUM_CODE_IX]; +extern erts_rwmtx_t the_old_code_rwlocks[ERTS_NUM_CODE_IX]; ERTS_GLB_INLINE void erts_rwlock_old_code(ErtsCodeIndex code_ix) { - erts_smp_rwmtx_rwlock(&the_old_code_rwlocks[code_ix]); + erts_rwmtx_rwlock(&the_old_code_rwlocks[code_ix]); } ERTS_GLB_INLINE void erts_rwunlock_old_code(ErtsCodeIndex code_ix) { - erts_smp_rwmtx_rwunlock(&the_old_code_rwlocks[code_ix]); + erts_rwmtx_rwunlock(&the_old_code_rwlocks[code_ix]); } ERTS_GLB_INLINE void erts_rlock_old_code(ErtsCodeIndex code_ix) { - erts_smp_rwmtx_rlock(&the_old_code_rwlocks[code_ix]); + erts_rwmtx_rlock(&the_old_code_rwlocks[code_ix]); } ERTS_GLB_INLINE void erts_runlock_old_code(ErtsCodeIndex code_ix) { - erts_smp_rwmtx_runlock(&the_old_code_rwlocks[code_ix]); + erts_rwmtx_runlock(&the_old_code_rwlocks[code_ix]); } #ifdef ERTS_ENABLE_LOCK_CHECK ERTS_GLB_INLINE int erts_is_old_code_rlocked(ErtsCodeIndex code_ix) { - return erts_smp_lc_rwmtx_is_rlocked(&the_old_code_rwlocks[code_ix]); + return erts_lc_rwmtx_is_rlocked(&the_old_code_rwlocks[code_ix]); } #endif diff --git a/erts/emulator/beam/msg_instrs.tab b/erts/emulator/beam/msg_instrs.tab new file mode 100644 index 0000000000..8055a8616f --- /dev/null +++ b/erts/emulator/beam/msg_instrs.tab @@ -0,0 +1,390 @@ +// -*- c -*- +// +// %CopyrightBegin% +// +// Copyright Ericsson AB 2017. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// %CopyrightEnd% +// + +// /* +// * Skeleton for receive statement: +// * +// * recv_mark L1 Optional +// * call make_ref/monitor Optional +// * ... +// * recv_set L1 Optional +// * L1: <-------------------+ +// * <-----------+ | +// * | | +// * loop_rec L2 ------+---+ | +// * ... | | | +// * remove_message | | | +// * jump L3 | | | +// * ... | | | +// * loop_rec_end L1 --+ | | +// * L2: <---------------+ | +// * wait L1 -------------------+ or wait_timeout +// * timeout +// * +// * L3: Code after receive... +// * +// */ + +recv_mark(Dest) { + /* + * Save the current position in message buffer and the + * the label for the loop_rec/2 instruction for the + * the receive statement. + */ + $SET_REL_I(c_p->msg.mark, $Dest); + c_p->msg.saved_last = c_p->msg.last; +} + +i_recv_set() { + /* + * If the mark is valid (points to the loop_rec/2 + * instruction that follows), we know that the saved + * position points to the first message that could + * possibly be matched out. + * + * If the mark is invalid, we do nothing, meaning that + * we will look through all messages in the message queue. + */ + if (c_p->msg.mark == (BeamInstr *) ($NEXT_INSTRUCTION)) { + c_p->msg.save = c_p->msg.saved_last; + } + SET_I($NEXT_INSTRUCTION); + goto loop_rec_top__; + //| -no_next +} + +i_loop_rec(Dest) { + //| -no_prefetch + + /* + * Pick up the next message and place it in x(0). + * If no message, jump to a wait or wait_timeout instruction. + */ + + ErtsMessage* msgp; + + /* Entry point from recv_set */ + loop_rec_top__: + ; + + /* + * We need to disable GC while matching messages + * in the queue. This since messages with data outside + * the heap will be corrupted by a GC. + */ + ASSERT(!(c_p->flags & F_DELAY_GC)); + c_p->flags |= F_DELAY_GC; + + /* Entry point from loop_rec_end */ + loop_rec__: + + PROCESS_MAIN_CHK_LOCKS(c_p); + + msgp = PEEK_MESSAGE(c_p); + + if (!msgp) { + erts_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + /* Make sure messages wont pass exit signals... */ + if (ERTS_PROC_PENDING_EXIT(c_p)) { + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + SWAPOUT; + c_p->flags &= ~F_DELAY_GC; + c_p->arity = 0; + goto do_schedule; /* Will be rescheduled for exit */ + } + ERTS_MSGQ_MV_INQ2PRIVQ(c_p); + msgp = PEEK_MESSAGE(c_p); + if (msgp) { + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + } else { + c_p->flags &= ~F_DELAY_GC; + $SET_I_REL($Dest); + Goto(*I); /* Jump to a wait or wait_timeout instruction */ + } + } + if (is_non_value(ERL_MESSAGE_TERM(msgp))) { + SWAPOUT; /* erts_decode_dist_message() may write to heap... */ + if (!erts_decode_dist_message(c_p, ERTS_PROC_LOCK_MAIN, msgp, 0)) { + /* + * A corrupt distribution message that we weren't able to decode; + * remove it... + */ + /* No swapin should be needed */ + ASSERT(HTOP == c_p->htop && E == c_p->stop); + /* TODO: Add DTrace probe for this bad message situation? */ + UNLINK_MESSAGE(c_p, msgp); + msgp->next = NULL; + erts_cleanup_messages(msgp); + goto loop_rec__; + } + SWAPIN; + } + r(0) = ERL_MESSAGE_TERM(msgp); +} + +remove_message() { + //| -no_prefetch + + /* + * Remove a (matched) message from the message queue. + */ + + ErtsMessage* msgp; + PROCESS_MAIN_CHK_LOCKS(c_p); + + ERTS_CHK_MBUF_SZ(c_p); + + msgp = PEEK_MESSAGE(c_p); + + if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) { + save_calls(c_p, &exp_receive); + } + if (ERL_MESSAGE_TOKEN(msgp) == NIL) { +#ifdef USE_VM_PROBES + if (DT_UTAG(c_p) != NIL) { + if (DT_UTAG_FLAGS(c_p) & DT_UTAG_PERMANENT) { + SEQ_TRACE_TOKEN(c_p) = am_have_dt_utag; + } else { + DT_UTAG(c_p) = NIL; + SEQ_TRACE_TOKEN(c_p) = NIL; + } + } else { +#endif + SEQ_TRACE_TOKEN(c_p) = NIL; +#ifdef USE_VM_PROBES + } + DT_UTAG_FLAGS(c_p) &= ~DT_UTAG_SPREADING; +#endif + } else if (ERL_MESSAGE_TOKEN(msgp) != am_undefined) { + Eterm msg; + SEQ_TRACE_TOKEN(c_p) = ERL_MESSAGE_TOKEN(msgp); +#ifdef USE_VM_PROBES + if (ERL_MESSAGE_TOKEN(msgp) == am_have_dt_utag) { + if (DT_UTAG(c_p) == NIL) { + DT_UTAG(c_p) = ERL_MESSAGE_DT_UTAG(msgp); + } + DT_UTAG_FLAGS(c_p) |= DT_UTAG_SPREADING; + } else { +#endif + ASSERT(is_tuple(SEQ_TRACE_TOKEN(c_p))); + ASSERT(SEQ_TRACE_TOKEN_ARITY(c_p) == 5); + ASSERT(is_small(SEQ_TRACE_TOKEN_SERIAL(c_p))); + ASSERT(is_small(SEQ_TRACE_TOKEN_LASTCNT(c_p))); + ASSERT(is_small(SEQ_TRACE_TOKEN_FLAGS(c_p))); + ASSERT(is_pid(SEQ_TRACE_TOKEN_SENDER(c_p))); + c_p->seq_trace_lastcnt = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p)); + if (c_p->seq_trace_clock < unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p))) { + c_p->seq_trace_clock = unsigned_val(SEQ_TRACE_TOKEN_SERIAL(c_p)); + } + msg = ERL_MESSAGE_TERM(msgp); + seq_trace_output(SEQ_TRACE_TOKEN(c_p), msg, SEQ_TRACE_RECEIVE, + c_p->common.id, c_p); +#ifdef USE_VM_PROBES + } +#endif + } +#ifdef USE_VM_PROBES + if (DTRACE_ENABLED(message_receive)) { + Eterm token2 = NIL; + DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE); + Sint tok_label = 0; + Sint tok_lastcnt = 0; + Sint tok_serial = 0; + + dtrace_proc_str(c_p, receiver_name); + token2 = SEQ_TRACE_TOKEN(c_p); + if (have_seqtrace(token2)) { + tok_label = signed_val(SEQ_TRACE_T_LABEL(token2)); + tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token2)); + tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token2)); + } + DTRACE6(message_receive, + receiver_name, size_object(ERL_MESSAGE_TERM(msgp)), + c_p->msg.len - 1, tok_label, tok_lastcnt, tok_serial); + } +#endif + UNLINK_MESSAGE(c_p, msgp); + JOIN_MESSAGE(c_p); + CANCEL_TIMER(c_p); + + erts_save_message_in_proc(c_p, msgp); + c_p->flags &= ~F_DELAY_GC; + + if (ERTS_IS_GC_DESIRED_INTERNAL(c_p, HTOP, E)) { + /* + * We want to GC soon but we leave a few + * reductions giving the message some time + * to turn into garbage. + */ + ERTS_VBUMP_LEAVE_REDS_INTERNAL(c_p, 5, FCALLS); + } + + ERTS_DBG_CHK_REDS(c_p, FCALLS); + ERTS_CHK_MBUF_SZ(c_p); + + ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); + PROCESS_MAIN_CHK_LOCKS(c_p); +} + +loop_rec_end(Dest) { + //| -no_next + /* + * Advance the save pointer to the next message (the current + * message didn't match), then jump to the loop_rec instruction. + */ + + ASSERT(c_p->flags & F_DELAY_GC); + + $SET_I_REL($Dest); + SAVE_MESSAGE(c_p); + if (FCALLS > 0 || FCALLS > neg_o_reds) { + FCALLS--; + goto loop_rec__; + } + + c_p->flags &= ~F_DELAY_GC; + $SET_CP_I_ABS(I); + SWAPOUT; + c_p->arity = 0; + c_p->current = NULL; + goto do_schedule; +} + +timeout_locked() { + /* + * A timeout has occurred. Reset the save pointer so that the next + * receive statement will examine the first message first. + */ + + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + $timeout(); +} + +timeout() { + if (IS_TRACED_FL(c_p, F_TRACE_RECEIVE)) { + trace_receive(c_p, am_clock_service, am_timeout, NULL); + } + if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) { + save_calls(c_p, &exp_timeout); + } + c_p->flags &= ~F_TIMO; + JOIN_MESSAGE(c_p); +} + +TIMEOUT_VALUE() { + c_p->freason = EXC_TIMEOUT_VALUE; + goto find_func_info; + //| -no_next +} + +i_wait_error_locked() { + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + $TIMEOUT_VALUE(); +} + +i_wait_error() { + $TIMEOUT_VALUE(); +} + +wait_timeout_unlocked_int := wait.lock.int.execute; +wait_timeout_locked_int := wait.int.execute; + +wait_timeout_unlocked := wait.lock.src.execute; +wait_timeout_locked := wait.src.execute; + +wait_unlocked := wait.lock.execute; +wait_locked := wait.unlocked.execute; + +wait.lock() { + erts_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); +} + +wait.unlocked() { +} + +wait.int(Int) { + /* + * If we have already set the timer, we must NOT set it again. Therefore, + * we must test the F_INSLPQUEUE flag as well as the F_TIMO flag. + */ + if ((c_p->flags & (F_INSLPQUEUE | F_TIMO)) == 0) { + BeamInstr** pi = (BeamInstr **) c_p->def_arg_reg; + *pi = $NEXT_INSTRUCTION; + erts_set_proc_timer_uword(c_p, $Int); + } +} + +wait.src(Src) { + /* + * If we have already set the timer, we must NOT set it again. Therefore, + * we must test the F_INSLPQUEUE flag as well as the F_TIMO flag. + */ + if ((c_p->flags & (F_INSLPQUEUE | F_TIMO)) == 0) { + Eterm timeout_value = $Src; + if (timeout_value == make_small(0)) { + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + $NEXT0(); + } else if (timeout_value == am_infinity) { + c_p->flags |= F_TIMO; + } else { + int tres = erts_set_proc_timer_term(c_p, timeout_value); + if (tres == 0) { + /* + * The timer routiner will set c_p->i to the value in + * c_p->def_arg_reg[0]. Note that it is safe to use this + * location because there are no living x registers in + * a receive statement. + */ + BeamInstr** pi = (BeamInstr**) c_p->def_arg_reg; + *pi = $NEXT_INSTRUCTION; + } else { /* Wrong time */ + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + c_p->freason = EXC_TIMEOUT_VALUE; + goto find_func_info; + } + } + } +} + +// +// Prepare to wait indefinitely for a new message to arrive +// (or the time set above if falling through from above). +// When a new message arrives, control will be transferred +// the loop_rec instruction (at label L1). In case of +// of timeout, control will be transferred to the timeout +// instruction following the wait_timeout instruction. +// + +wait.execute(JumpTarget) { + $SET_REL_I(c_p->i, $JumpTarget); /* L1 */ + SWAPOUT; + c_p->arity = 0; + + if (!ERTS_PTMR_IS_TIMED_OUT(c_p)) { + erts_atomic32_read_band_relb(&c_p->state, + ~ERTS_PSFLG_ACTIVE); + } + ASSERT(!ERTS_PROC_IS_EXITING(c_p)); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + c_p->current = NULL; + goto do_schedule; + //| -no_next +} diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab index 44613c7d85..75ff40606b 100644 --- a/erts/emulator/beam/ops.tab +++ b/erts/emulator/beam/ops.tab @@ -59,6 +59,7 @@ put_tuple u==0 d => too_old_compiler # All the other instructions. # +%cold label L i_func_info I a a I int_code_end @@ -68,6 +69,8 @@ i_debug_breakpoint i_return_time_trace i_return_to_trace i_yield +trace_jump W +%hot return @@ -96,22 +99,19 @@ line Loc | func_info M F A => func_info M F A | line Loc line I - -%macro: allocate Allocate -pack -%macro: allocate_zero AllocateZero -pack -%macro: allocate_heap AllocateHeap -pack -%macro: allocate_heap_zero AllocateHeapZero -pack -%macro: test_heap TestHeap -pack - allocate t t allocate_heap t I t -deallocate I + +%cold +deallocate Q +%hot + init y allocate_zero t t allocate_heap_zero t I t trim N Remaining => i_trim N -i_trim I +i_trim t test_heap I t @@ -122,8 +122,6 @@ init2 y y init3 y y y init Y1 | init Y2 | init Y3 => init3 Y1 Y2 Y3 init Y1 | init Y2 => init2 Y1 Y2 -%macro: init2 Init2 -pack -%macro: init3 Init3 -pack # Selecting values @@ -160,28 +158,20 @@ is_tuple Fail=f S | select_tuple_arity S=d Fail=f Size=u Rest=* => \ select_tuple_arity S=d Fail=f Size=u Rest=* => \ gen_select_tuple_arity(S, Fail, Size, Rest) -i_select_val_bins x f I -i_select_val_bins y f I +i_select_val_bins xy f I -i_select_val_lins x f I -i_select_val_lins y f I +i_select_val_lins xy f I -i_select_val2 x f c c f f -i_select_val2 y f c c f f +i_select_val2 xy f c c -i_select_tuple_arity x f I -i_select_tuple_arity y f I +i_select_tuple_arity xy f I -i_select_tuple_arity2 x f A A f f -i_select_tuple_arity2 y f A A f f +i_select_tuple_arity2 xy f A A -i_jump_on_val_zero x f I -i_jump_on_val_zero y f I +i_jump_on_val_zero xy f I -i_jump_on_val x f I I -i_jump_on_val y f I I +i_jump_on_val xy f I W -%macro: get_list GetList -pack get_list xy xy xy # The following get_list instructions using x(0) are frequently used. @@ -201,31 +191,27 @@ try Y F => catch Y F try_case Y => try_end Y try_end y +%cold try_case_end s +%hot # Destructive set tuple element -set_tuple_element s d P +set_tuple_element s S P # Get tuple element -%macro: i_get_tuple_element GetTupleElement -pack i_get_tuple_element xy P x %cold i_get_tuple_element xy P y %hot -%macro: i_get_tuple_element2 GetTupleElement2 -pack i_get_tuple_element2 x P x - -%macro: i_get_tuple_element2y GetTupleElement2Y -pack i_get_tuple_element2y x P y y -%macro: i_get_tuple_element3 GetTupleElement3 -pack i_get_tuple_element3 x P x -%macro: is_number IsNumber -fail_action %cold is_number f x is_number f y @@ -236,6 +222,11 @@ is_number Fail Literal=q => move Literal x | is_number Fail x jump f +# +# Expection rasing instructions. Infrequently executed. +# + +%cold case_end NotInX=cy => move NotInX x | case_end x badmatch NotInX=cy => move NotInX x | badmatch x @@ -257,9 +248,14 @@ i_raise badarg j system_limit j +%hot + +# +# Move instructions. +# + move C=cxy x==0 | jump Lbl => move_jump Lbl C -%macro: move_jump MoveJump -nonext move_jump f ncxy # Movement to and from the stack is common @@ -283,10 +279,6 @@ move_window X1=x X2=x X3=x X4=x Y1=y Y4=y | move X5=x Y5=y | succ(Y4,Y5) => \ move_window X1=x X2=x X3=x Y1=y Y3=y => move_window3 X1 X2 X3 Y1 move_window X1=x X2=x X3=x X4=x Y1=y Y4=y => move_window4 X1 X2 X3 X4 Y1 -%macro: move_window3 MoveWindow3 -pack -%macro: move_window4 MoveWindow4 -pack -%macro: move_window5 MoveWindow5 -pack - move_window3 x x x y move_window4 x x x x y move_window5 x x x x x y @@ -311,10 +303,8 @@ swap_temp R1 R2 Tmp | line Loc | call_ext_only Live Addr | \ swap_temp R1 R2 Tmp | line Loc | call_ext_last Live Addr D | \ is_killed(Tmp, Live) => swap R1 R2 | line Loc | call_ext_last Live Addr D -%macro: swap_temp SwapTemp -pack swap_temp x xy x -%macro: swap Swap -pack swap x xy move Src=x D1=x | move Src=x D2=x => move_dup Src D1 D2 @@ -358,17 +348,13 @@ move C=aiq X=x==2 => move_x2 C move_x1 c move_x2 c -%macro: move_shift MoveShift -pack move_shift x x x move_shift y x x move_shift x y x move_shift x x y -%macro: move_dup MoveDup -pack move_dup xy x xy -%macro: move2_par Move2Par -pack - move2_par x y x y move2_par y x y x move2_par x x x x @@ -380,7 +366,6 @@ move2_par y x x y move2_par x x y x move2_par y x x x -%macro: move3 Move3 -pack move3 x y x y x y move3 y x y x y x move3 x x x x x x @@ -390,7 +375,6 @@ move3 x x x x x x move S=n D=y => init D move S=c D=y => move S x | move x D -%macro:move Move -pack move x x move x y move y x @@ -410,13 +394,15 @@ move r y loop_rec Fail x==0 | smp_mark_target_label(Fail) => i_loop_rec Fail -label L | wait_timeout Fail Src | smp_already_locked(L) => label L | i_wait_timeout_locked Fail Src -wait_timeout Fail Src => i_wait_timeout Fail Src -i_wait_timeout Fail Src=aiq => gen_literal_timeout(Fail, Src) -i_wait_timeout_locked Fail Src=aiq => gen_literal_timeout_locked(Fail, Src) +label L | wait_timeout Fail Src | smp_already_locked(L) => \ + label L | wait_timeout_locked Src Fail +wait_timeout Fail Src => wait_timeout_unlocked Src Fail + +wait_timeout_unlocked Src=aiq Fail => gen_literal_timeout(Fail, Src) +wait_timeout_locked Src=aiq Fail => gen_literal_timeout_locked(Fail, Src) label L | wait Fail | smp_already_locked(L) => label L | wait_locked Fail -wait Fail | smp() => wait_unlocked Fail +wait Fail => wait_unlocked Fail label L | timeout | smp_already_locked(L) => label L | timeout_locked @@ -425,15 +411,19 @@ timeout timeout_locked i_loop_rec f loop_rec_end f -wait f wait_locked f wait_unlocked f -i_wait_timeout f I -i_wait_timeout f s -i_wait_timeout_locked f I -i_wait_timeout_locked f s + +# Note that a timeout value must fit in 32 bits. +wait_timeout_unlocked_int I f +wait_timeout_unlocked s f +wait_timeout_locked_int I f +wait_timeout_locked s f + +%cold i_wait_error i_wait_error_locked +%hot send @@ -441,33 +431,35 @@ send # Optimized comparisons with one immediate/literal operand. # -is_eq_exact Lbl R=xy C=ian => i_is_eq_exact_immed Lbl R C +is_eq_exact Lbl S S => +is_eq_exact Lbl C1=c C2=c => move C1 x | is_eq_exact Lbl x C2 +is_eq_exact Lbl C=c R=xy => is_eq_exact Lbl R C + +is_eq_exact Lbl R=xy n => is_nil Lbl R +is_eq_exact Lbl R=xy C=ia => i_is_eq_exact_immed Lbl R C is_eq_exact Lbl R=xy C=q => i_is_eq_exact_literal Lbl R C +is_ne_exact Lbl S S => jump Lbl +is_ne_exact Lbl C1=c C2=c => move C1 x | is_ne_exact Lbl x C2 +is_ne_exact Lbl C=c R=xy => is_ne_exact Lbl R C + is_ne_exact Lbl R=xy C=ian => i_is_ne_exact_immed Lbl R C is_ne_exact Lbl R=xy C=q => i_is_ne_exact_literal Lbl R C -%macro: i_is_eq_exact_immed EqualImmed -fail_action -i_is_eq_exact_immed f r c -i_is_eq_exact_immed f x c -i_is_eq_exact_immed f y c +i_is_eq_exact_immed f rxy c -i_is_eq_exact_literal f x c -i_is_eq_exact_literal f y c +i_is_eq_exact_literal f xy c -%macro: i_is_ne_exact_immed NotEqualImmed -fail_action -i_is_ne_exact_immed f x c -i_is_ne_exact_immed f y c +i_is_ne_exact_immed f xy c -i_is_ne_exact_literal f x c -i_is_ne_exact_literal f y c +i_is_ne_exact_literal f xy c is_eq_exact Lbl Y=y X=x => is_eq_exact Lbl X Y -%macro: is_eq_exact EqualExact -fail_action -pack is_eq_exact f x xy -is_eq_exact f s s +is_eq_exact f y y + +is_ne_exact f S S -%macro: is_lt IsLessThan -fail_action is_lt f x x is_lt f x c is_lt f c x @@ -475,7 +467,6 @@ is_lt f c x is_lt f s s %hot -%macro: is_ge IsGreaterEqual -fail_action is_ge f x x is_ge f x c is_ge f c x @@ -483,13 +474,8 @@ is_ge f c x is_ge f s s %hot -%macro: is_ne_exact NotEqualExact -fail_action -is_ne_exact f s s - -%macro: is_eq Equal -fail_action is_eq f s s -%macro: is_ne NotEqual -fail_action is_ne f s s # @@ -507,9 +493,7 @@ i_put_tuple Dst Arity Puts=* | put S => \ i_put_tuple/2 -%macro:i_put_tuple PutTuple -pack -goto:do_put_tuple -i_put_tuple x I -i_put_tuple y I +i_put_tuple xy I # # The instruction "put_list Const [] Dst" were generated in rare @@ -518,8 +502,6 @@ i_put_tuple y I # put_list Const=c n Dst => move Const x | put_list x n Dst -%macro:put_list PutList -pack - put_list x n x put_list y n x put_list x x x @@ -560,6 +542,7 @@ put_list s s d # Some more only used by the emulator # +%cold normal_exit continue_exit apply_bif @@ -567,6 +550,7 @@ call_nif call_error_handler error_action_code return_trace +%hot # # Instruction transformations & folded instructions. @@ -577,27 +561,18 @@ return_trace move S x==0 | return => move_return S -%macro: move_return MoveReturn -nonext -move_return x -move_return c -move_return n +move_return xcn move S x==0 | deallocate D | return => move_deallocate_return S D -%macro: move_deallocate_return MoveDeallocateReturn -pack -nonext -move_deallocate_return x Q -move_deallocate_return y Q -move_deallocate_return c Q -move_deallocate_return n Q +move_deallocate_return xycn Q deallocate D | return => deallocate_return D -%macro: deallocate_return DeallocateReturn -nonext deallocate_return Q test_heap Need u==1 | put_list Y=y x==0 x==0 => test_heap_1_put_list Need Y -%macro: test_heap_1_put_list TestHeapPutList -pack test_heap_1_put_list I y # @@ -608,8 +583,6 @@ is_tagged_tuple Fail Literal=q Arity Atom => \ move Literal x | is_tagged_tuple Fail x Arity Atom is_tagged_tuple Fail=f c Arity Atom => jump Fail -%macro:is_tagged_tuple IsTaggedTuple -fail_action - is_tagged_tuple f rxy A a # Test tuple & arity (head) @@ -618,17 +591,13 @@ is_tuple Fail Literal=q => move Literal x | is_tuple Fail x is_tuple Fail=f c => jump Fail is_tuple Fail=f S=xy | test_arity Fail=f S=xy Arity => is_tuple_of_arity Fail S Arity -%macro:is_tuple_of_arity IsTupleOfArity -fail_action - is_tuple_of_arity f rxy A -%macro: is_tuple IsTuple -fail_action is_tuple f rxy test_arity Fail Literal=q Arity => move Literal x | test_arity Fail x Arity test_arity Fail=f c Arity => jump Fail -%macro: test_arity IsArity -fail_action test_arity f xy A get_tuple_element Reg=x P1 D1=x | get_tuple_element Reg=x P2 D2=x | \ @@ -650,16 +619,13 @@ is_integer Fail Literal=q => move Literal x | is_integer Fail x is_integer Fail=f S=x | allocate Need Regs => is_integer_allocate Fail S Need Regs -%macro: is_integer_allocate IsIntegerAllocate -fail_action -is_integer_allocate f x I I +is_integer_allocate f x t t -%macro: is_integer IsInteger -fail_action is_integer f xy is_list Fail=f n => is_list Fail Literal=q => move Literal x | is_list Fail x is_list Fail=f c => jump Fail -%macro: is_list IsList -fail_action is_list f x %cold is_list f y @@ -667,24 +633,16 @@ is_list f y is_nonempty_list Fail=f S=x | allocate Need Rs => is_nonempty_list_allocate Fail S Need Rs -%macro:is_nonempty_list_allocate IsNonemptyListAllocate -fail_action -pack -is_nonempty_list_allocate f rx I t - -is_nonempty_list F=f x==0 | test_heap I1 I2 => is_non_empty_list_test_heap F I1 I2 - -%macro: is_non_empty_list_test_heap IsNonemptyListTestHeap -fail_action -pack -is_non_empty_list_test_heap f I t +is_nonempty_list F=f x==0 | test_heap I1 I2 => is_nonempty_list_test_heap F I1 I2 is_nonempty_list Fail=f S=x | get_list S D1=x D2=x => \ is_nonempty_list_get_list Fail S D1 D2 -%macro: is_nonempty_list_get_list IsNonemptyListGetList -fail_action -pack +is_nonempty_list_allocate f rx t t +is_nonempty_list_test_heap f I t is_nonempty_list_get_list f rx x x - -%macro: is_nonempty_list IsNonemptyList -fail_action is_nonempty_list f xy -%macro: is_atom IsAtom -fail_action is_atom f x %cold is_atom f y @@ -692,7 +650,6 @@ is_atom f y is_atom Fail=f a => is_atom Fail=f niq => jump Fail -%macro: is_float IsFloat -fail_action is_float f x %cold is_float f y @@ -703,12 +660,10 @@ is_float Fail Literal=q => move Literal x | is_float Fail x is_nil Fail=f n => is_nil Fail=f qia => jump Fail -%macro: is_nil IsNil -fail_action is_nil f xy is_binary Fail Literal=q => move Literal x | is_binary Fail x is_binary Fail=f c => jump Fail -%macro: is_binary IsBinary -fail_action is_binary f x %cold is_binary f y @@ -719,28 +674,24 @@ is_bitstr Fail Term => is_bitstring Fail Term is_bitstring Fail Literal=q => move Literal x | is_bitstring Fail x is_bitstring Fail=f c => jump Fail -%macro: is_bitstring IsBitstring -fail_action is_bitstring f x %cold is_bitstring f y %hot is_reference Fail=f cq => jump Fail -%macro: is_reference IsRef -fail_action is_reference f x %cold is_reference f y %hot is_pid Fail=f cq => jump Fail -%macro: is_pid IsPid -fail_action is_pid f x %cold is_pid f y %hot is_port Fail=f cq => jump Fail -%macro: is_port IsPort -fail_action is_port f x %cold is_port f y @@ -751,22 +702,19 @@ is_boolean Fail=f a==am_false => is_boolean Fail=f ac => jump Fail %cold -%macro: is_boolean IsBoolean -fail_action is_boolean f xy %hot is_function2 Fail=f acq Arity => jump Fail is_function2 Fail=f Fun a => jump Fail -is_function2 f s s -%macro: is_function2 IsFunction2 -fail_action +is_function2 f S s # Allocating & initializing. allocate Need Regs | init Y => allocate_init Need Regs Y init Y1 | init Y2 => init2 Y1 Y2 -%macro: allocate_init AllocateInit -pack -allocate_init t I y +allocate_init t t y ################################################################# # External function and bif calls. @@ -1013,16 +961,18 @@ call_ext_last Ar Func D => i_call_ext_last Func D call_ext_only Ar Func => i_call_ext_only Func i_apply -i_apply_last P +i_apply_last Q i_apply_only i_apply_fun -i_apply_fun_last P +i_apply_fun_last Q i_apply_fun_only +%cold i_hibernate i_perf_counter +%hot call_bif e @@ -1045,19 +995,18 @@ bif2 Fail Bif S1 S2 Dst => i_bif2 Fail Bif S1 S2 Dst i_get_hash c I d i_get s d -%macro: self Self self xy -%macro: node Node node x %cold node y %hot -i_fast_element j x I d -i_fast_element j y I d +# Note: 'I' is sufficient because this instruction will only be used +# if the arity fits in 24 bits. +i_fast_element xy j I d -i_element j xy s d +i_element xy j s d bif1 f b s d bif1_body b s d @@ -1068,50 +1017,35 @@ i_bif2_body b s s d # Internal calls. # -move S=c x==0 | call Ar P=f => i_move_call S P -move S=s x==0 | call Ar P=f => move_call S P - -i_move_call c f +move S=cxy x==0 | call Ar P=f => move_call S P -%macro:move_call MoveCall -arg_f -size -nonext move_call/2 +move_call cxy f -move_call xy f - -move S=c x==0 | call_last Ar P=f D => i_move_call_last P D S move S x==0 | call_last Ar P=f D => move_call_last S P D -i_move_call_last f P c - -%macro:move_call_last MoveCallLast -arg_f -nonext -pack - move_call_last/3 -move_call_last xy f Q +move_call_last cxy f Q -move S=c x==0 | call_only Ar P=f => i_move_call_only P S -move S=x x==0 | call_only Ar P=f => move_call_only S P +move S=cx x==0 | call_only Ar P=f => move_call_only S P -i_move_call_only f c - -%macro:move_call_only MoveCallOnly -arg_f -nonext move_call_only/2 - -move_call_only x f +move_call_only cx f call Ar Func => i_call Func call_last Ar Func D => i_call_last Func D call_only Ar Func => i_call_only Func i_call f -i_call_last f P +i_call_last f Q i_call_only f i_call_ext e -i_call_ext_last e P +i_call_ext_last e Q i_call_ext_only e i_move_call_ext c e -i_move_call_ext_last e P c +i_move_call_ext_last e Q c i_move_call_ext_only e c # Fun calls. @@ -1119,17 +1053,15 @@ i_move_call_ext_only e c call_fun Arity | deallocate D | return => i_call_fun_last Arity D call_fun Arity => i_call_fun Arity -i_call_fun I -i_call_fun_last I P +i_call_fun t +i_call_fun_last t Q make_fun2 OldIndex=u => gen_make_fun2(OldIndex) -%macro: i_make_fun MakeFun -pack %cold -i_make_fun I t +i_make_fun W t %hot -%macro: is_function IsFunction -fail_action is_function f xy is_function Fail=f c => jump Fail @@ -1139,45 +1071,44 @@ func_info M F A => i_func_info u M F A # New bit syntax matching (R11B). # ================================================================ -%cold +%warm bs_start_match2 Fail=f ica X Y D => jump Fail bs_start_match2 Fail Bin X Y D => i_bs_start_match2 Bin Fail X Y D -i_bs_start_match2 xy f I I d +i_bs_start_match2 xy f t t x bs_save2 Reg Index => gen_bs_save(Reg, Index) -i_bs_save2 x I +i_bs_save2 x t bs_restore2 Reg Index => gen_bs_restore(Reg, Index) -i_bs_restore2 x I +i_bs_restore2 x t # Matching integers bs_match_string Fail Ms Bits Val => i_bs_match_string Ms Fail Bits Val -i_bs_match_string x f I I +i_bs_match_string x f W W # Fetching integers from binaries. bs_get_integer2 Fail=f Ms=x Live=u Sz=sq Unit=u Flags=u Dst=d => \ gen_get_integer2(Fail, Ms, Live, Sz, Unit, Flags, Dst) -i_bs_get_integer_small_imm x I f I d -i_bs_get_integer_imm x I I f I d -i_bs_get_integer f I I s s d -i_bs_get_integer_8 x f d -i_bs_get_integer_16 x f d -i_bs_get_integer_32 x f I d +i_bs_get_integer_small_imm x W f t x +i_bs_get_integer_imm x W t f t x +i_bs_get_integer f t t x s x +i_bs_get_integer_8 x f x +i_bs_get_integer_16 x f x + +%if ARCH_64 +i_bs_get_integer_32 x f x +%endif # Fetching binaries from binaries. bs_get_binary2 Fail=f Ms=x Live=u Sz=sq Unit=u Flags=u Dst=d => \ gen_get_binary2(Fail, Ms, Live, Sz, Unit, Flags, Dst) -%macro: i_bs_get_binary_imm2 BsGetBinaryImm_2 -fail_action -%macro: i_bs_get_binary2 BsGetBinary_2 -fail_action -%macro: i_bs_get_binary_all2 BsGetBinaryAll_2 -fail_action - -i_bs_get_binary_imm2 f x I I I x -i_bs_get_binary2 f x I s I x -i_bs_get_binary_all2 f x I I x -i_bs_get_binary_all_reuse x f I +i_bs_get_binary_imm2 f x t W t x +i_bs_get_binary2 f x t s t x +i_bs_get_binary_all2 f x t t x +i_bs_get_binary_all_reuse x f t # Fetching float from binaries. bs_get_float2 Fail=f Ms=x Live=u Sz=s Unit=u Flags=u Dst=d => \ @@ -1185,30 +1116,24 @@ bs_get_float2 Fail=f Ms=x Live=u Sz=s Unit=u Flags=u Dst=d => \ bs_get_float2 Fail=f Ms=x Live=u Sz=q Unit=u Flags=u Dst=d => jump Fail -%macro: i_bs_get_float2 BsGetFloat2 -fail_action -i_bs_get_float2 f x I s I x +i_bs_get_float2 f x t s t x # Miscellanous bs_skip_bits2 Fail=f Ms=x Sz=sq Unit=u Flags=u => \ gen_skip_bits2(Fail, Ms, Sz, Unit, Flags) -%macro: i_bs_skip_bits_imm2 BsSkipBitsImm2 -fail_action -i_bs_skip_bits_imm2 f x I - -%macro: i_bs_skip_bits2 BsSkipBits2 -fail_action -i_bs_skip_bits2 f x xy I - -%macro: i_bs_skip_bits_all2 BsSkipBitsAll2 -fail_action -i_bs_skip_bits_all2 f x I +i_bs_skip_bits_imm2 f x W +i_bs_skip_bits2 f x xy t +i_bs_skip_bits_all2 f x t bs_test_tail2 Fail=f Ms=x Bits=u==0 => bs_test_zero_tail2 Fail Ms bs_test_tail2 Fail=f Ms=x Bits=u => bs_test_tail_imm2 Fail Ms Bits bs_test_zero_tail2 f x -bs_test_tail_imm2 f x I +bs_test_tail_imm2 f x W bs_test_unit F Ms Unit=u==8 => bs_test_unit8 F Ms -bs_test_unit f x I +bs_test_unit f x t bs_test_unit8 f x # An y register operand for bs_context_to_binary is rare, @@ -1222,14 +1147,14 @@ bs_context_to_binary x # Utf8/utf16/utf32 support. (R12B-5) # bs_get_utf8 Fail=f Ms=x u u Dst=d => i_bs_get_utf8 Ms Fail Dst -i_bs_get_utf8 x f d +i_bs_get_utf8 x f x bs_skip_utf8 Fail=f Ms=x u u => i_bs_get_utf8 Ms Fail x bs_get_utf16 Fail=f Ms=x u Flags=u Dst=d => i_bs_get_utf16 Ms Fail Flags Dst bs_skip_utf16 Fail=f Ms=x u Flags=u => i_bs_get_utf16 Ms Fail Flags x -i_bs_get_utf16 x f I d +i_bs_get_utf16 x f t x bs_get_utf32 Fail=f Ms=x Live=u Flags=u Dst=d => \ bs_get_integer2 Fail Ms Live i=32 u=1 Flags Dst | \ @@ -1238,22 +1163,18 @@ bs_skip_utf32 Fail=f Ms=x Live=u Flags=u => \ bs_get_integer2 Fail Ms Live i=32 u=1 Flags x | \ i_bs_validate_unicode_retract Fail x Ms -i_bs_validate_unicode_retract j s s +i_bs_validate_unicode_retract j s S %hot # # Constructing binaries # -%cold +%warm bs_init2 Fail Sz Words Regs Flags Dst | binary_too_big(Sz) => system_limit Fail -bs_init2 Fail Sz=u Words=u==0 Regs Flags Dst | should_gen_heap_bin(Sz) => \ - i_bs_init_heap_bin Sz Regs Dst bs_init2 Fail Sz=u Words=u==0 Regs Flags Dst => i_bs_init Sz Regs Dst -bs_init2 Fail Sz=u Words Regs Flags Dst | should_gen_heap_bin(Sz) => \ - i_bs_init_heap_bin_heap Sz Words Regs Dst bs_init2 Fail Sz=u Words Regs Flags Dst => \ i_bs_init_heap Sz Words Regs Dst @@ -1262,15 +1183,13 @@ bs_init2 Fail Sz Words=u==0 Regs Flags Dst => \ bs_init2 Fail Sz Words Regs Flags Dst => \ i_bs_init_fail_heap Sz Words Fail Regs Dst -i_bs_init_fail xy j I d +i_bs_init_fail xy j t x -i_bs_init_fail_heap s I j I d +i_bs_init_fail_heap s I j t x -i_bs_init I I d -i_bs_init_heap_bin I I d +i_bs_init W t x -i_bs_init_heap I I I d -i_bs_init_heap_bin_heap I I I d +i_bs_init_heap W I t x bs_init_bits Fail Sz=o Words Regs Flags Dst => system_limit Fail @@ -1283,16 +1202,16 @@ bs_init_bits Fail Sz Words=u==0 Regs Flags Dst => \ bs_init_bits Fail Sz Words Regs Flags Dst => \ i_bs_init_bits_fail_heap Sz Words Fail Regs Dst -i_bs_init_bits_fail xy j I d +i_bs_init_bits_fail xy j t x -i_bs_init_bits_fail_heap s I j I d +i_bs_init_bits_fail_heap s I j t x -i_bs_init_bits I I d -i_bs_init_bits_heap I I I d +i_bs_init_bits W t x +i_bs_init_bits_heap W I t x bs_add Fail S1=i==0 S2 Unit=u==1 D => move S2 D -bs_add j s s I d +bs_add j s s t x bs_append Fail Size Extra Live Unit Bin Flags Dst => \ move Bin x | i_bs_append Fail Extra Live Unit Size Dst @@ -1302,8 +1221,8 @@ bs_private_append Fail Size Unit Bin Flags Dst => \ bs_init_writable -i_bs_append j I I I s d -i_bs_private_append j I s s d +i_bs_append j I t t s x +i_bs_private_append j t s S x # # Storing integers into binaries. @@ -1312,11 +1231,8 @@ i_bs_private_append j I s s d bs_put_integer Fail=j Sz=sq Unit=u Flags=u Src=s => \ gen_put_integer(Fail, Sz, Unit, Flags, Src) -%macro: i_new_bs_put_integer NewBsPutInteger -%macro: i_new_bs_put_integer_imm NewBsPutIntegerImm - -i_new_bs_put_integer j s I s -i_new_bs_put_integer_imm j I I s +i_new_bs_put_integer j s t s +i_new_bs_put_integer_imm j W t s # # Utf8/utf16/utf32 support. (R12B-5) @@ -1324,17 +1240,17 @@ i_new_bs_put_integer_imm j I I s bs_utf8_size j Src=s Dst=d => i_bs_utf8_size Src Dst -i_bs_utf8_size s d +i_bs_utf8_size s x bs_utf16_size j Src=s Dst=d => i_bs_utf16_size Src Dst -i_bs_utf16_size s d +i_bs_utf16_size s x bs_put_utf8 Fail u Src=s => i_bs_put_utf8 Fail Src i_bs_put_utf8 j s -bs_put_utf16 j I s +bs_put_utf16 j t s bs_put_utf32 Fail=j Flags=u Src=s => \ i_bs_validate_unicode Fail Src | bs_put_integer Fail i=32 u=1 Flags Src @@ -1349,11 +1265,8 @@ bs_put_float Fail Sz=q Unit Flags Val => badarg Fail bs_put_float Fail=j Sz=s Unit=u Flags=u Src=s => \ gen_put_float(Fail, Sz, Unit, Flags, Src) -%macro: i_new_bs_put_float NewBsPutFloat -%macro: i_new_bs_put_float_imm NewBsPutFloatImm - -i_new_bs_put_float j s I s -i_new_bs_put_float_imm j I I s +i_new_bs_put_float j s t s +i_new_bs_put_float_imm j W t s # # Storing binaries into binaries. @@ -1362,14 +1275,9 @@ i_new_bs_put_float_imm j I I s bs_put_binary Fail=j Sz=s Unit=u Flags=u Src=s => \ gen_put_binary(Fail, Sz, Unit, Flags, Src) -%macro: i_new_bs_put_binary NewBsPutBinary -i_new_bs_put_binary j s I s - -%macro: i_new_bs_put_binary_imm NewBsPutBinaryImm -i_new_bs_put_binary_imm j I s - -%macro: i_new_bs_put_binary_all NewBsPutBinaryAll -i_new_bs_put_binary_all j s I +i_new_bs_put_binary j s t s +i_new_bs_put_binary_imm j W s +i_new_bs_put_binary_all j s t # # Warning: The i_bs_put_string and i_new_bs_put_string instructions @@ -1377,9 +1285,7 @@ i_new_bs_put_binary_all j s I # Don't change the instruction format unless you change the loader too. # -bs_put_string I I - -%hot +bs_put_string W W # # New floating point instructions (R8). @@ -1393,11 +1299,13 @@ fnegate p FR1 FR2 => i_fnegate FR1 FR2 fconv Arg=iqan Dst=l => move Arg x | fconv x Dst -fmove q l -fmove d l -fmove l d +fmove Arg=l Dst=d => fstore Arg Dst +fmove Arg=dq Dst=l => fload Arg Dst -fconv d l +fstore l d +fload Sq l + +fconv S l i_fadd l l l i_fsub l l l @@ -1407,50 +1315,87 @@ i_fnegate l l fclearerror | no_fpe_signals() => fcheckerror p | no_fpe_signals() => + +%unless NO_FPE_SIGNALS fcheckerror p => i_fcheckerror i_fcheckerror fclearerror +%endif + +%hot # # New apply instructions in R10B. # -apply I -apply_last I P +apply t +apply_last t Q # -# Map instructions in R17. +# Handle compatibility with OTP 17 here. # -sorted_put_map_assoc/5 -put_map_assoc F Map Dst Live Size Rest=* | map_key_sort(Size, Rest) => \ - sorted_put_map_assoc F Map Dst Live Size Rest +i_put_map_assoc/4 + +# We KNOW that in OTP 20 (actually OTP 18 and higher), a put_map_assoc instruction +# is always preceded by an is_map test. That means that put_map_assoc can never +# fail and does not need any failure label. + +put_map_assoc Fail Map Dst Live Size Rest=* | compiled_with_otp_20_or_higher() => \ + i_put_map_assoc Map Dst Live Size Rest + +# Translate the put_map_assoc instruction if the module was compiled by a compiler +# before 20. This is only necessary if the OTP 17 compiler was used, but we +# have no safe and relatively easy way to know whether OTP 18/19 was used. + +put_map_assoc Fail=p Map Dst Live Size Rest=* => \ + ensure_map Map | i_put_map_assoc Map Dst Live Size Rest +put_map_assoc Fail=f Map Dst Live Size Rest=* => \ + is_map Fail Map | i_put_map_assoc Map Dst Live Size Rest + +ensure_map Lit=q | literal_is_map(Lit) => +ensure_map Src=cqy => move Src x | ensure_map x + +%cold +ensure_map x +%hot + +# +# Map instructions. First introduced in R17. +# + +sorted_put_map_assoc/4 +i_put_map_assoc Map Dst Live Size Rest=* | map_key_sort(Size, Rest) => \ + sorted_put_map_assoc Map Dst Live Size Rest sorted_put_map_exact/5 put_map_exact F Map Dst Live Size Rest=* | map_key_sort(Size, Rest) => \ sorted_put_map_exact F Map Dst Live Size Rest -sorted_put_map_assoc j Map Dst Live Size Rest=* | is_empty_map(Map) => \ +sorted_put_map_assoc Map Dst Live Size Rest=* | is_empty_map(Map) => \ new_map Dst Live Size Rest -sorted_put_map_assoc F Src=s Dst Live Size Rest=* => \ - update_map_assoc F Src Dst Live Size Rest -sorted_put_map_assoc F Src Dst Live Size Rest=* => \ - move Src x | update_map_assoc F x Dst Live Size Rest +sorted_put_map_assoc Src=s Dst Live Size Rest=* => \ + update_map_assoc Src Dst Live Size Rest +sorted_put_map_assoc Src Dst Live Size Rest=* => \ + move Src x | update_map_assoc x Dst Live Size Rest sorted_put_map_exact F Src=s Dst Live Size Rest=* => \ update_map_exact F Src Dst Live Size Rest sorted_put_map_exact F Src Dst Live Size Rest=* => \ move Src x | update_map_exact F x Dst Live Size Rest -new_map d I I -update_map_assoc j s d I I -update_map_exact j s d I I +new_map Dst Live Size Rest=* | is_small_map_literal_keys(Size, Rest) => \ + gen_new_small_map_lit(Dst, Live, Size, Rest) + +new_map d t I +i_new_small_map_lit d t q +update_map_assoc s d t I +update_map_exact j s d t I is_map Fail Lit=q | literal_is_map(Lit) => is_map Fail cq => jump Fail -%macro: is_map IsMap -fail_action is_map f xy ## Transform has_map_fields #{ K1 := _, K2 := _ } to has_map_elements @@ -1470,10 +1415,8 @@ i_get_map_elements f s I i_get_map_element Fail Src=xy Key=y Dst => \ move Key x | i_get_map_element Fail Src x Dst -%macro: i_get_map_element_hash GetMapElementHash -fail_action i_get_map_element_hash f xy c I xy -%macro: i_get_map_element GetMapElement -fail_action i_get_map_element f xy x xy # @@ -1509,9 +1452,9 @@ gen_minus p Live Reg=d Int=i Dst | negation_is_small(Int) => \ # GCing arithmetic instructions. # -gen_plus Fail Live S1 S2 Dst => i_plus Fail Live S1 S2 Dst +gen_plus Fail Live S1 S2 Dst => i_plus S1 S2 Fail Live Dst -gen_minus Fail Live S1 S2 Dst => i_minus Fail Live S1 S2 Dst +gen_minus Fail Live S1 S2 Dst => i_minus S1 S2 Fail Live Dst gc_bif2 Fail Live u$bif:erlang:stimes/2 S1 S2 Dst => \ i_times Fail Live S1 S2 Dst @@ -1522,15 +1465,15 @@ gc_bif2 Fail Live u$bif:erlang:intdiv/2 S1 S2 Dst => \ i_int_div Fail Live S1 S2 Dst gc_bif2 Fail Live u$bif:erlang:rem/2 S1 S2 Dst => \ - i_rem Fail Live S1 S2 Dst + i_rem S1 S2 Fail Live Dst gc_bif2 Fail Live u$bif:erlang:bsl/2 S1 S2 Dst => \ - i_bsl Fail Live S1 S2 Dst + i_bsl S1 S2 Fail Live Dst gc_bif2 Fail Live u$bif:erlang:bsr/2 S1 S2 Dst => \ - i_bsr Fail Live S1 S2 Dst + i_bsr S1 S2 Fail Live Dst gc_bif2 Fail Live u$bif:erlang:band/2 S1 S2 Dst => \ - i_band Fail Live S1 S2 Dst + i_band S1 S2 Fail Live Dst gc_bif2 Fail Live u$bif:erlang:bor/2 S1 S2 Dst => \ i_bor Fail Live S1 S2 Dst @@ -1540,32 +1483,34 @@ gc_bif2 Fail Live u$bif:erlang:bxor/2 S1 S2 Dst => \ gc_bif1 Fail I u$bif:erlang:bnot/1 Src Dst=d => i_int_bnot Fail Src I Dst -i_increment rxy I I d +i_increment rxy W t d -i_plus j I x xy d -i_plus j I s s d +i_plus x xy j t d +i_plus s s j t d -i_minus j I x x d -i_minus j I s s d +i_minus x x j t d +i_minus s s j t d -i_times j I s s d +i_times j t s s d -i_m_div j I s s d -i_int_div j I s s d +i_m_div j t s s d +i_int_div j t s s d -i_rem j I x x d -i_rem j I s s d +i_rem x x j t d +i_rem s s j t d -i_bsl j I s s d -i_bsr j I s s d +i_bsl s s j t d +i_bsr s s j t d -i_band j I x c d -i_band j I s s d +i_band x c j t d +i_band s s j t d i_bor j I s s d i_bxor j I s s d -i_int_bnot j s I d +i_int_bnot Fail Src=c Live Dst => move Src x | i_int_bnot Fail x Live Dst + +i_int_bnot j S t d # # Old guard BIFs that creates heap fragments are no longer allowed. @@ -1589,9 +1534,9 @@ gc_bif2 Fail I Bif S1 S2 Dst => \ gc_bif3 Fail I Bif S1 S2 S3 Dst => \ gen_guard_bif3(Fail, I, Bif, S1, S2, S3, Dst) -i_gc_bif1 j I s I d +i_gc_bif1 j W s t d -i_gc_bif2 j I I s s d +i_gc_bif2 j W t s s d ii_gc_bif3/7 @@ -1600,7 +1545,7 @@ ii_gc_bif3/7 ii_gc_bif3 Fail Bif Live S1 S2 S3 Dst => \ move S1 x | i_gc_bif3 Fail Bif Live S2 S3 Dst -i_gc_bif3 j I I s s d +i_gc_bif3 j W t s s d # # The following instruction is specially handled in beam_load.c diff --git a/erts/emulator/beam/register.c b/erts/emulator/beam/register.c index bf3267cff1..92a0854ad3 100644 --- a/erts/emulator/beam/register.c +++ b/erts/emulator/beam/register.c @@ -38,16 +38,15 @@ static Hash process_reg; #define REG_HASH(term) ((HashValue) atom_val(term)) -static erts_smp_rwmtx_t regtab_rwmtx; +static erts_rwmtx_t regtab_rwmtx; -#define reg_try_read_lock() erts_smp_rwmtx_tryrlock(®tab_rwmtx) -#define reg_try_write_lock() erts_smp_rwmtx_tryrwlock(®tab_rwmtx) -#define reg_read_lock() erts_smp_rwmtx_rlock(®tab_rwmtx) -#define reg_write_lock() erts_smp_rwmtx_rwlock(®tab_rwmtx) -#define reg_read_unlock() erts_smp_rwmtx_runlock(®tab_rwmtx) -#define reg_write_unlock() erts_smp_rwmtx_rwunlock(®tab_rwmtx) +#define reg_try_read_lock() erts_rwmtx_tryrlock(®tab_rwmtx) +#define reg_try_write_lock() erts_rwmtx_tryrwlock(®tab_rwmtx) +#define reg_read_lock() erts_rwmtx_rlock(®tab_rwmtx) +#define reg_write_lock() erts_rwmtx_rwlock(®tab_rwmtx) +#define reg_read_unlock() erts_rwmtx_runlock(®tab_rwmtx) +#define reg_write_unlock() erts_rwmtx_rwunlock(®tab_rwmtx) -#ifdef ERTS_SMP static ERTS_INLINE void reg_safe_read_lock(Process *c_p, ErtsProcLocks *c_p_locks) { @@ -64,7 +63,7 @@ reg_safe_read_lock(Process *c_p, ErtsProcLocks *c_p_locks) } /* Release process locks in order to avoid deadlock */ - erts_smp_proc_unlock(c_p, *c_p_locks); + erts_proc_unlock(c_p, *c_p_locks); *c_p_locks = 0; } @@ -87,14 +86,13 @@ reg_safe_write_lock(Process *c_p, ErtsProcLocks *c_p_locks) } /* Release process locks in order to avoid deadlock */ - erts_smp_proc_unlock(c_p, *c_p_locks); + erts_proc_unlock(c_p, *c_p_locks); *c_p_locks = 0; } reg_write_lock(); } -#endif static ERTS_INLINE int is_proc_alive(Process *p) @@ -141,11 +139,11 @@ static void reg_free(RegProc *obj) void init_register_table(void) { HashFunctions f; - erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; - rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; - rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED; + erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER; + rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ; + rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED; - erts_smp_rwmtx_init_opt(®tab_rwmtx, &rwmtx_opt, "reg_tab", NIL, + erts_rwmtx_init_opt(®tab_rwmtx, &rwmtx_opt, "reg_tab", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); f.hash = (H_FUN) reg_hash; @@ -175,7 +173,7 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id) Process *proc = NULL; Port *port = NULL; RegProc r, *rp; - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p); + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p); if (is_not_atom(name) || name == am_undefined) return res; @@ -185,7 +183,7 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id) else { if (is_not_internal_pid(id) && is_not_internal_port(id)) return res; - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); if (is_internal_port(id)) { port = erts_id2port(id); if (!port) @@ -193,15 +191,13 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id) } } -#ifdef ERTS_SMP { ErtsProcLocks proc_locks = proc ? ERTS_PROC_LOCK_MAIN : 0; reg_safe_write_lock(proc, &proc_locks); if (proc && !proc_locks) - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } -#endif if (is_internal_pid(id)) { if (!proc) @@ -215,7 +211,7 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id) } else { ASSERT(!INVALID_PORT(port, id)); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(port)); r.pt = port; if (r.pt->common.u.alive.reg) goto done; @@ -250,8 +246,8 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id) erts_port_release(port); if (c_p != proc) { if (proc) - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } return res; } @@ -272,17 +268,15 @@ erts_whereis_name_to_id(Process *c_p, Eterm name) HashValue hval; int ix; HashBucket* b; -#ifdef ERTS_SMP ErtsProcLocks c_p_locks = 0; if (c_p) { c_p_locks = ERTS_PROC_LOCK_MAIN; - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p); + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p); } reg_safe_read_lock(c_p, &c_p_locks); if (c_p && !c_p_locks) - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); -#endif + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); hval = REG_HASH(name); ix = hval % process_reg.size; @@ -331,7 +325,6 @@ erts_whereis_name(Process *c_p, HashValue hval; int ix; HashBucket* b; -#ifdef ERTS_SMP ErtsProcLocks current_c_p_locks; Port *pending_port = NULL; @@ -348,7 +341,6 @@ erts_whereis_name(Process *c_p, * - read reg lock * - current_c_p_locks (either c_p_locks or 0) on c_p */ -#endif hval = REG_HASH(name); ix = hval % process_reg.size; @@ -370,7 +362,6 @@ erts_whereis_name(Process *c_p, if (!rp) *proc = NULL; else { -#ifdef ERTS_SMP if (!rp->p) *proc = NULL; else { @@ -387,17 +378,10 @@ erts_whereis_name(Process *c_p, *proc = rp->p; else { if (need_locks) - erts_smp_proc_unlock(rp->p, need_locks); + erts_proc_unlock(rp->p, need_locks); *proc = NULL; } } -#else - if (rp->p - && ((flags & ERTS_P2P_FLG_ALLOW_OTHER_X) || is_proc_alive(rp->p))) - *proc = rp->p; - else - *proc = NULL; -#endif if (*proc && (flags & ERTS_P2P_FLG_INC_REFC)) erts_proc_inc_refc(*proc); } @@ -407,7 +391,6 @@ erts_whereis_name(Process *c_p, if (!rp || !rp->pt) *port = NULL; else { -#ifdef ERTS_SMP if (lock_port) { if (pending_port == rp->pt) pending_port = NULL; @@ -419,11 +402,11 @@ erts_whereis_name(Process *c_p, pending_port = NULL; } - if (erts_smp_port_trylock(rp->pt) == EBUSY) { + if (erts_port_trylock(rp->pt) == EBUSY) { Eterm id = rp->pt->common.id; /* id read only... */ /* Unlock all locks, acquire port lock, and restart... */ if (current_c_p_locks) { - erts_smp_proc_unlock(c_p, current_c_p_locks); + erts_proc_unlock(c_p, current_c_p_locks); current_c_p_locks = 0; } reg_read_unlock(); @@ -431,19 +414,16 @@ erts_whereis_name(Process *c_p, goto restart; } } - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(rp->pt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(rp->pt)); } -#endif *port = rp->pt; } } -#ifdef ERTS_SMP if (c_p && !current_c_p_locks) - erts_smp_proc_lock(c_p, c_p_locks); + erts_proc_lock(c_p, c_p_locks); if (pending_port) erts_port_release(pending_port); -#endif reg_read_unlock(); } @@ -476,7 +456,6 @@ int erts_unregister_name(Process *c_p, RegProc r, *rp; Port *port = c_prt; ErtsProcLocks current_c_p_locks = 0; -#ifdef ERTS_SMP /* * SMP note: If 'c_prt != NULL' and 'c_prt->reg->name == name', @@ -492,18 +471,15 @@ int erts_unregister_name(Process *c_p, restart: reg_safe_write_lock(c_p, ¤t_c_p_locks); -#endif r.name = name; if (is_non_value(name)) { /* Unregister current process name */ ASSERT(c_p); -#ifdef ERTS_SMP if (current_c_p_locks != c_p_locks) { - erts_smp_proc_lock(c_p, c_p_locks); + erts_proc_lock(c_p, c_p_locks); current_c_p_locks = c_p_locks; } -#endif if (c_p->common.u.alive.reg) { r.name = c_p->common.u.alive.reg->name; } else { @@ -516,36 +492,34 @@ int erts_unregister_name(Process *c_p, if ((rp = (RegProc*) hash_get(&process_reg, (void*) &r)) != NULL) { if (rp->pt) { if (port != rp->pt) { -#ifdef ERTS_SMP if (port) { ASSERT(port != c_prt); erts_port_release(port); port = NULL; } - if (erts_smp_port_trylock(rp->pt) == EBUSY) { + if (erts_port_trylock(rp->pt) == EBUSY) { Eterm id = rp->pt->common.id; /* id read only... */ /* Unlock all locks, acquire port lock, and restart... */ if (current_c_p_locks) { - erts_smp_proc_unlock(c_p, current_c_p_locks); + erts_proc_unlock(c_p, current_c_p_locks); current_c_p_locks = 0; } reg_write_unlock(); port = erts_id2port(id); goto restart; } -#endif port = rp->pt; } ASSERT(rp->pt == port); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(port)); rp->pt->common.u.alive.reg = NULL; if (IS_TRACED_FL(port, F_TRACE_PORTS)) { if (current_c_p_locks) { - erts_smp_proc_unlock(c_p, current_c_p_locks); + erts_proc_unlock(c_p, current_c_p_locks); current_c_p_locks = 0; } trace_port(port, am_unregister, r.name); @@ -553,7 +527,6 @@ int erts_unregister_name(Process *c_p, } else if (rp->p) { -#ifdef ERTS_SMP erts_proc_safelock(c_p, current_c_p_locks, c_p_locks, @@ -561,17 +534,14 @@ int erts_unregister_name(Process *c_p, (c_p == rp->p) ? current_c_p_locks : 0, ERTS_PROC_LOCK_MAIN); current_c_p_locks = c_p_locks; -#endif rp->p->common.u.alive.reg = NULL; if (IS_TRACED_FL(rp->p, F_TRACE_PROCS)) { trace_proc(rp->p, (c_p == rp->p) ? c_p_locks : ERTS_PROC_LOCK_MAIN, rp->p, am_unregister, r.name); } -#ifdef ERTS_SMP if (rp->p != c_p) { - erts_smp_proc_unlock(rp->p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(rp->p, ERTS_PROC_LOCK_MAIN); } -#endif } hash_erase(&process_reg, (void*) &r); res = 1; @@ -585,14 +555,12 @@ int erts_unregister_name(Process *c_p, erts_port_release(port); } if (c_prt) { - erts_smp_port_lock(c_prt); + erts_port_lock(c_prt); } } -#ifdef ERTS_SMP if (c_p && !current_c_p_locks) { - erts_smp_proc_lock(c_p, c_p_locks); + erts_proc_lock(c_p, c_p_locks); } -#endif return res; } @@ -633,14 +601,12 @@ BIF_RETTYPE registered_0(BIF_ALIST_0) Uint need; Eterm* hp; HashBucket **bucket; -#ifdef ERTS_SMP ErtsProcLocks proc_locks = ERTS_PROC_LOCK_MAIN; - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(BIF_P); + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(BIF_P); reg_safe_read_lock(BIF_P, &proc_locks); if (!proc_locks) - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); -#endif + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); bucket = process_reg.bucket; diff --git a/erts/emulator/beam/safe_hash.c b/erts/emulator/beam/safe_hash.c index 527c9efeca..73306030ae 100644 --- a/erts/emulator/beam/safe_hash.c +++ b/erts/emulator/beam/safe_hash.c @@ -62,7 +62,7 @@ static ERTS_INLINE int align_up_pow2(int val) */ static void rehash(SafeHash* h, int grow_limit) { - if (erts_smp_atomic_xchg_acqb(&h->is_rehashing, 1) != 0) { + if (erts_atomic_xchg_acqb(&h->is_rehashing, 1) != 0) { return; /* already in progress */ } if (h->grow_limit == grow_limit) { @@ -77,7 +77,7 @@ static void rehash(SafeHash* h, int grow_limit) sys_memzero(new_tab, bytes); for (i=0; i<SAFE_HASH_LOCK_CNT; i++) { /* stop all traffic */ - erts_smp_mtx_lock(&h->lock_vec[i].mtx); + erts_mtx_lock(&h->lock_vec[i].mtx); } h->tab = new_tab; @@ -95,12 +95,12 @@ static void rehash(SafeHash* h, int grow_limit) } for (i=0; i<SAFE_HASH_LOCK_CNT; i++) { - erts_smp_mtx_unlock(&h->lock_vec[i].mtx); + erts_mtx_unlock(&h->lock_vec[i].mtx); } erts_free(h->type, (void *) old_tab); } /*else already done */ - erts_smp_atomic_set_relb(&h->is_rehashing, 0); + erts_atomic_set_relb(&h->is_rehashing, 0); } @@ -115,7 +115,7 @@ void safe_hash_get_info(SafeHashInfo *hi, SafeHash *h) int objects = 0; for (lock_ix=0; lock_ix<SAFE_HASH_LOCK_CNT; lock_ix++) { - erts_smp_mtx_lock(&h->lock_vec[lock_ix].mtx); + erts_mtx_lock(&h->lock_vec[lock_ix].mtx); size = h->size_mask + 1; for (i = lock_ix; i < size; i += SAFE_HASH_LOCK_CNT) { int depth = 0; @@ -128,7 +128,7 @@ void safe_hash_get_info(SafeHashInfo *hi, SafeHash *h) if (depth > max_depth) max_depth = depth; } - erts_smp_mtx_unlock(&h->lock_vec[lock_ix].mtx); + erts_mtx_unlock(&h->lock_vec[lock_ix].mtx); } hi->name = h->name; @@ -145,9 +145,9 @@ int safe_hash_table_sz(SafeHash *h) int i, size; for(i=0; h->name[i]; i++); i++; - erts_smp_mtx_lock(&h->lock_vec[0].mtx); /* any lock will do to read size */ + erts_mtx_lock(&h->lock_vec[0].mtx); /* any lock will do to read size */ size = h->size_mask + 1; - erts_smp_mtx_unlock(&h->lock_vec[0].mtx); + erts_mtx_unlock(&h->lock_vec[0].mtx); return sizeof(SafeHash) + size*sizeof(SafeHashBucket*) + i; } @@ -168,10 +168,10 @@ SafeHash* safe_hash_init(ErtsAlcType_t type, SafeHash* h, char* name, erts_lock_ h->name = name; h->fun = fun; set_size(h,size); - erts_smp_atomic_init_nob(&h->is_rehashing, 0); - erts_smp_atomic_init_nob(&h->nitems, 0); + erts_atomic_init_nob(&h->is_rehashing, 0); + erts_atomic_init_nob(&h->nitems, 0); for (i=0; i<SAFE_HASH_LOCK_CNT; i++) { - erts_smp_mtx_init(&h->lock_vec[i].mtx, "safe_hash", NIL, + erts_mtx_init(&h->lock_vec[i].mtx, "safe_hash", NIL, flags); } return h; @@ -185,8 +185,8 @@ void* safe_hash_get(SafeHash* h, void* tmpl) { SafeHashValue hval = h->fun.hash(tmpl); SafeHashBucket* b; - erts_smp_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx; - erts_smp_mtx_lock(lock); + erts_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx; + erts_mtx_lock(lock); b = h->tab[hval & h->size_mask]; while(b != NULL) { @@ -194,7 +194,7 @@ void* safe_hash_get(SafeHash* h, void* tmpl) break; b = b->next; } - erts_smp_mtx_unlock(lock); + erts_mtx_unlock(lock); return (void*) b; } @@ -207,13 +207,13 @@ void* safe_hash_put(SafeHash* h, void* tmpl) SafeHashValue hval = h->fun.hash(tmpl); SafeHashBucket* b; SafeHashBucket** head; - erts_smp_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx; - erts_smp_mtx_lock(lock); + erts_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx; + erts_mtx_lock(lock); head = &h->tab[hval & h->size_mask]; b = *head; while(b != NULL) { if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0)) { - erts_smp_mtx_unlock(lock); + erts_mtx_unlock(lock); return b; } b = b->next; @@ -224,8 +224,8 @@ void* safe_hash_put(SafeHash* h, void* tmpl) b->next = *head; *head = b; grow_limit = h->grow_limit; - erts_smp_mtx_unlock(lock); - if (erts_smp_atomic_inc_read_nob(&h->nitems) > grow_limit) { + erts_mtx_unlock(lock); + if (erts_atomic_inc_read_nob(&h->nitems) > grow_limit) { rehash(h, grow_limit); } return (void*) b; @@ -240,36 +240,37 @@ void* safe_hash_erase(SafeHash* h, void* tmpl) SafeHashValue hval = h->fun.hash(tmpl); SafeHashBucket* b; SafeHashBucket** prevp; - erts_smp_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx; - erts_smp_mtx_lock(lock); + erts_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx; + erts_mtx_lock(lock); prevp = &h->tab[hval & h->size_mask]; b = *prevp; while(b != NULL) { if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0)) { *prevp = b->next; - erts_smp_mtx_unlock(lock); - erts_smp_atomic_dec_nob(&h->nitems); + erts_mtx_unlock(lock); + erts_atomic_dec_nob(&h->nitems); h->fun.free((void*)b); return tmpl; } prevp = &b->next; b = b->next; } - erts_smp_mtx_unlock(lock); + erts_mtx_unlock(lock); return NULL; } /* -** Call 'func(obj,func_arg2)' for all objects in table. NOT SAFE!!! +** Call 'func(obj,func_arg2,func_arg3)' for all objects in table. NOT SAFE!!! */ -void safe_hash_for_each(SafeHash* h, void (*func)(void *, void *), void *func_arg2) +void safe_hash_for_each(SafeHash* h, void (*func)(void *, void *, void *), + void *func_arg2, void *func_arg3) { int i; for (i = 0; i <= h->size_mask; i++) { SafeHashBucket* b = h->tab[i]; while (b != NULL) { - (*func)((void *) b, func_arg2); + (*func)((void *) b, func_arg2, func_arg3); b = b->next; } } @@ -280,7 +281,7 @@ void erts_lcnt_enable_hash_lock_count(SafeHash *h, erts_lock_flags_t flags, int int i; for(i = 0; i < SAFE_HASH_LOCK_CNT; i++) { - erts_smp_mtx_t *lock = &h->lock_vec[i].mtx; + erts_mtx_t *lock = &h->lock_vec[i].mtx; if(enable) { erts_lcnt_install_new_lock_info(&lock->lcnt, "safe_hash", NIL, diff --git a/erts/emulator/beam/safe_hash.h b/erts/emulator/beam/safe_hash.h index dde48a6de8..af97b4cb4d 100644 --- a/erts/emulator/beam/safe_hash.h +++ b/erts/emulator/beam/safe_hash.h @@ -73,11 +73,11 @@ typedef struct int size_mask; /* (RW) Number of slots - 1 */ SafeHashBucket** tab; /* (RW) Vector of bucket pointers (objects) */ int grow_limit; /* (RW) Threshold for growing table */ - erts_smp_atomic_t nitems; /* (A) Number of items in table */ - erts_smp_atomic_t is_rehashing; /* (A) Table rehashing in progress */ + erts_atomic_t nitems; /* (A) Number of items in table */ + erts_atomic_t is_rehashing; /* (A) Table rehashing in progress */ union { - erts_smp_mtx_t mtx; + erts_mtx_t mtx; byte __cache_line__[64]; }lock_vec[SAFE_HASH_LOCK_CNT]; @@ -95,7 +95,7 @@ void* safe_hash_get(SafeHash*, void*); void* safe_hash_put(SafeHash*, void*); void* safe_hash_erase(SafeHash*, void*); -void safe_hash_for_each(SafeHash*, void (*func)(void *, void *), void *); +void safe_hash_for_each(SafeHash*, void (*func)(void *, void *, void *), void *, void *); #ifdef ERTS_ENABLE_LOCK_COUNT void erts_lcnt_enable_hash_lock_count(SafeHash*, erts_lock_flags_t, int); diff --git a/erts/emulator/beam/select_instrs.tab b/erts/emulator/beam/select_instrs.tab new file mode 100644 index 0000000000..2951949d38 --- /dev/null +++ b/erts/emulator/beam/select_instrs.tab @@ -0,0 +1,190 @@ +// -*- c -*- +// +// %CopyrightBegin% +// +// Copyright Ericsson AB 2017. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// %CopyrightEnd% +// + +i_select_val_bins := select_val_bins.fetch.select; + +select_val_bins.head() { + Eterm select_val; +} + +select_val_bins.fetch(Src) { + select_val = $Src; +} + +select_val_bins.select(Fail, NumElements) { + struct Singleton { + BeamInstr val; + }; + struct Singleton* low; + struct Singleton* high; + struct Singleton* mid; + int bdiff; /* int not long because the arrays aren't that large */ + + low = (struct Singleton *) ($NEXT_INSTRUCTION); + high = low + $NumElements; + + /* The pointer subtraction (high-low) below must produce + * a signed result, because high could be < low. That + * requires the compiler to insert quite a bit of code. + * + * However, high will be > low so the result will be + * positive. We can use that knowledge to optimise the + * entire sequence, from the initial comparison to the + * computation of mid. + * + * -- Mikael Pettersson, Acumem AB + * + * Original loop control code: + * + * while (low < high) { + * mid = low + (high-low) / 2; + * + */ + while ((bdiff = (int)((char*)high - (char*)low)) > 0) { + unsigned int boffset = ((unsigned int)bdiff >> 1) & ~(sizeof(struct Singleton)-1); + + mid = (struct Singleton*)((char*)low + boffset); + if (select_val < mid->val) { + high = mid; + } else if (select_val > mid->val) { + low = mid + 1; + } else { + Sint32* jump_tab = (Sint32 *) ($NEXT_INSTRUCTION + $NumElements); + Sint32 offset = jump_tab[mid - (struct Singleton *)($NEXT_INSTRUCTION)]; + $JUMP(offset); + } + } + $JUMP($Fail); +} + +i_select_tuple_arity2 := select_val2.src.get_arity.execute; +i_select_val2 := select_val2.src.execute; + +select_val2.head() { + Eterm select_val2; +} + +select_val2.src(Src) { + select_val2 = $Src; +} + +select_val2.get_arity() { + if (ERTS_LIKELY(is_tuple(select_val2))) { + select_val2 = *tuple_val(select_val2); + } else { + select_val2 = NIL; + } +} + +select_val2.execute(Fail, T1, T2) { + Sint32* jump_tab = (Sint32 *) ($NEXT_INSTRUCTION); + + if (select_val2 == $T1) { + $JUMP(jump_tab[0]); + } else if (select_val2 == $T2) { + $JUMP(jump_tab[1]); + } else { + $FAIL($Fail); + } +} + +i_select_tuple_arity := select_val_lin.fetch.get_arity.execute; +i_select_val_lins := select_val_lin.fetch.execute; + +select_val_lin.head() { + Eterm select_val; +} + +select_val_lin.fetch(Src) { + select_val = $Src; +} + +select_val_lin.get_arity() { + if (ERTS_LIKELY(is_tuple(select_val))) { + select_val = *tuple_val(select_val); + } else { + select_val = NIL; + } +} + +select_val_lin.execute(Fail, N) { + BeamInstr* vs = $NEXT_INSTRUCTION; + int ix = 0; + + for (;;) { + if (vs[ix+0] >= select_val) { + ix += 0; + break; + } + if (vs[ix+1] >= select_val) { + ix += 1; + break; + } + ix += 2; + } + + if (vs[ix] == select_val) { + Sint32* jump_tab = (Sint32 *) ($NEXT_INSTRUCTION + $N); + Eterm offset = jump_tab[ix]; + $JUMP(offset); + } else { + $JUMP($Fail); + } +} + +JUMP_ON_VAL(Fail, Index, N, Base) { + if (is_small($Index)) { + $Index = (Uint) (signed_val($Index) - $Base); + if ($Index < $N) { + Sint32* jump_tab = (Sint32 *) ($NEXT_INSTRUCTION); + $JUMP(jump_tab[$Index]); + } + } + $FAIL($Fail); +} + +i_jump_on_val_zero := jump_on_val_zero.fetch.execute; + +jump_on_val_zero.head() { + Eterm index; +} + +jump_on_val_zero.fetch(Src) { + index = $Src; +} + +jump_on_val_zero.execute(Fail, N) { + $JUMP_ON_VAL($Fail, index, $N, 0); +} + +i_jump_on_val := jump_on_val.fetch.execute; + +jump_on_val.head() { + Eterm index; +} + +jump_on_val.fetch(Src) { + index = $Src; +} + +jump_on_val.execute(Fail, N, Base) { + $JUMP_ON_VAL($Fail, index, $N, $Base); +} diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h index 704d567337..9106091ca6 100644 --- a/erts/emulator/beam/sys.h +++ b/erts/emulator/beam/sys.h @@ -21,7 +21,7 @@ #ifndef __SYS_H__ #define __SYS_H__ -#if !defined(__GNUC__) +#if !defined(__GNUC__) || defined(__e2k__) # define ERTS_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) 0 #elif !defined(__GNUC_MINOR__) # define ERTS_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) \ @@ -34,9 +34,6 @@ (((__GNUC__ << 24) | (__GNUC_MINOR__ << 12) | __GNUC_PATCHLEVEL__) >= (((MAJ) << 24) | ((MIN) << 12) | (PL))) #endif -#if defined(ERTS_DIRTY_SCHEDULERS) && !defined(ERTS_SMP) -# error "Dirty schedulers not supported without smp support" -#endif #ifdef ERTS_INLINE # ifndef ERTS_CAN_INLINE @@ -221,12 +218,6 @@ __decl_noreturn void __noreturn erl_assert_error(const char* expr, const char *f # define ASSERT(e) ((void) 1) #endif -#ifdef ERTS_SMP -# define ERTS_SMP_ASSERT(e) ASSERT(e) -#else -# define ERTS_SMP_ASSERT(e) ((void)1) -#endif - /* ERTS_UNDEF can be used to silence false warnings about * "variable may be used uninitialized" while keeping the variable * marked as undefined by valgrind. @@ -327,9 +318,9 @@ __decl_noreturn void __noreturn erl_assert_error(const char* expr, const char *f #endif #if SIZEOF_VOID_P == SIZEOF_LONG -typedef unsigned long Eterm; -typedef unsigned long Uint; -typedef long Sint; +typedef unsigned long Eterm erts_align_attribute(sizeof(long)); +typedef unsigned long Uint erts_align_attribute(sizeof(long)); +typedef long Sint erts_align_attribute(sizeof(long)); #define SWORD_CONSTANT(Const) Const##L #define UWORD_CONSTANT(Const) Const##UL #define ERTS_UWORD_MAX ULONG_MAX @@ -337,9 +328,9 @@ typedef long Sint; #define ERTS_SIZEOF_ETERM SIZEOF_LONG #define ErtsStrToSint strtol #elif SIZEOF_VOID_P == SIZEOF_INT -typedef unsigned int Eterm; -typedef unsigned int Uint; -typedef int Sint; +typedef unsigned int Eterm erts_align_attribute(sizeof(int)); +typedef unsigned int Uint erts_align_attribute(sizeof(int)); +typedef int Sint erts_align_attribute(sizeof(int)); #define SWORD_CONSTANT(Const) Const #define UWORD_CONSTANT(Const) Const##U #define ERTS_UWORD_MAX UINT_MAX @@ -347,9 +338,9 @@ typedef int Sint; #define ERTS_SIZEOF_ETERM SIZEOF_INT #define ErtsStrToSint strtol #elif SIZEOF_VOID_P == SIZEOF_LONG_LONG -typedef unsigned long long Eterm; -typedef unsigned long long Uint; -typedef long long Sint; +typedef unsigned long long Eterm erts_align_attribute(sizeof(long long)); +typedef unsigned long long Uint erts_align_attribute(sizeof(long long)); +typedef long long Sint erts_align_attribute(sizeof(long long)); #define SWORD_CONSTANT(Const) Const##LL #define UWORD_CONSTANT(Const) Const##ULL #define ERTS_UWORD_MAX ULLONG_MAX @@ -470,41 +461,25 @@ typedef union { #include "erl_lock_check.h" -/* needed by erl_smp.h */ +/* needed by erl_threads.h */ int erts_send_warning_to_logger_str_nogl(char *); -#include "erl_smp.h" +#include "erl_threads.h" #ifdef ERTS_WANT_BREAK_HANDLING -# ifdef ERTS_SMP -extern erts_smp_atomic32_t erts_break_requested; +extern erts_atomic32_t erts_break_requested; # define ERTS_BREAK_REQUESTED \ - ((int) erts_smp_atomic32_read_nob(&erts_break_requested)) -# else -extern volatile int erts_break_requested; -# define ERTS_BREAK_REQUESTED erts_break_requested -# endif + ((int) erts_atomic32_read_nob(&erts_break_requested)) void erts_do_break_handling(void); #endif -#if !defined(ERTS_SMP) && !defined(__WIN32__) -extern volatile Uint erts_signal_state; -#define ERTS_SIGNAL_STATE erts_signal_state -void erts_handle_signal_state(void); -#endif -#ifdef ERTS_SMP -extern erts_smp_atomic32_t erts_writing_erl_crash_dump; +extern erts_atomic32_t erts_writing_erl_crash_dump; extern erts_tsd_key_t erts_is_crash_dumping_key; #define ERTS_SOMEONE_IS_CRASH_DUMPING \ - ((int) erts_smp_atomic32_read_mb(&erts_writing_erl_crash_dump)) + ((int) erts_atomic32_read_mb(&erts_writing_erl_crash_dump)) #define ERTS_IS_CRASH_DUMPING \ ((int) (SWord) erts_tsd_get(erts_is_crash_dumping_key)) -#else -extern volatile int erts_writing_erl_crash_dump; -#define ERTS_SOMEONE_IS_CRASH_DUMPING erts_writing_erl_crash_dump -#define ERTS_IS_CRASH_DUMPING erts_writing_erl_crash_dump -#endif /* Deal with memcpy() vs bcopy() etc. We want to use the mem*() functions, but be able to fall back on bcopy() etc on systems that don't have @@ -600,8 +575,6 @@ __decl_noreturn void __noreturn erts_exit(int n, char*, ...); erts_exit(ERTS_ABORT_EXIT, "%s:%d:%s(): Internal error: %s\n", \ __FILE__, __LINE__, __func__, What) -Eterm erts_check_io_info(void *p); - UWord erts_sys_get_page_size(void); /* Size of misc memory allocated from system dependent code */ @@ -643,7 +616,7 @@ int erts_send_info_to_logger_nogl(erts_dsprintf_buf_t *); int erts_send_warning_to_logger_nogl(erts_dsprintf_buf_t *); int erts_send_error_to_logger_nogl(erts_dsprintf_buf_t *); int erts_send_info_to_logger_str_nogl(char *); -/* needed by erl_smp.h (declared above) +/* needed by erl_threads.h (declared above) int erts_send_warning_to_logger_str_nogl(char *); */ int erts_send_error_to_logger_str_nogl(char *); @@ -764,11 +737,7 @@ extern char *erts_sys_ddll_error(int code); /* * System interfaces for startup. */ -void erts_sys_schedule_interrupt(int set); -#ifdef ERTS_SMP -void erts_sys_schedule_interrupt_timed(int, ErtsMonotonicTime); void erts_sys_main_thread(void); -#endif extern int erts_sys_prepare_crash_dump(int secs); extern void erts_sys_pre_init(void); @@ -821,7 +790,7 @@ void fini_getenv_state(GETENV_STATE *); typedef struct { int no_used_fds; int no_driver_select_structs; - int no_driver_event_structs; + int no_enif_select_structs; } ErtsCheckIoDebugInfo; int erts_check_io_debug(ErtsCheckIoDebugInfo *ip); @@ -855,13 +824,11 @@ int erts_sys_unsetenv(char *key); char *erts_read_env(char *key); void erts_free_read_env(void *value); -#if defined(ERTS_SMP) #if defined(ERTS_THR_HAVE_SIG_FUNCS) && !defined(ETHR_UNUSABLE_SIGUSRX) extern void sys_thr_resume(erts_tid_t tid); extern void sys_thr_suspend(erts_tid_t tid); #define ERTS_SYS_SUSPEND_SIGNAL SIGUSR2 #endif -#endif /* utils.c */ @@ -1025,144 +992,6 @@ erts_refc_read(erts_refc_t *refcp, erts_aint_t min_val) #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ -typedef erts_smp_atomic_t erts_smp_refc_t; - -ERTS_GLB_INLINE void erts_smp_refc_init(erts_smp_refc_t *refcp, erts_aint_t val); -ERTS_GLB_INLINE void erts_smp_refc_inc(erts_smp_refc_t *refcp, erts_aint_t min_val); -ERTS_GLB_INLINE erts_aint_t erts_smp_refc_inc_unless(erts_smp_refc_t *refcp, - erts_aint_t unless_val, - erts_aint_t min_val); -ERTS_GLB_INLINE erts_aint_t erts_smp_refc_inctest(erts_smp_refc_t *refcp, - erts_aint_t min_val); -ERTS_GLB_INLINE void erts_smp_refc_dec(erts_smp_refc_t *refcp, erts_aint_t min_val); -ERTS_GLB_INLINE erts_aint_t erts_smp_refc_dectest(erts_smp_refc_t *refcp, - erts_aint_t min_val); -ERTS_GLB_INLINE void erts_smp_refc_add(erts_smp_refc_t *refcp, erts_aint_t diff, - erts_aint_t min_val); -ERTS_GLB_INLINE erts_aint_t erts_smp_refc_read(erts_smp_refc_t *refcp, - erts_aint_t min_val); - -#if ERTS_GLB_INLINE_INCL_FUNC_DEF - -ERTS_GLB_INLINE void -erts_smp_refc_init(erts_smp_refc_t *refcp, erts_aint_t val) -{ - erts_smp_atomic_init_nob((erts_smp_atomic_t *) refcp, val); -} - -ERTS_GLB_INLINE void -erts_smp_refc_inc(erts_smp_refc_t *refcp, erts_aint_t min_val) -{ -#ifdef ERTS_REFC_DEBUG - erts_aint_t val = erts_smp_atomic_inc_read_nob((erts_smp_atomic_t *) refcp); - if (val < min_val) - erts_exit(ERTS_ABORT_EXIT, - "erts_smp_refc_inc(): Bad refc found (refc=%ld < %ld)!\n", - val, min_val); -#else - erts_smp_atomic_inc_nob((erts_smp_atomic_t *) refcp); -#endif -} - -ERTS_GLB_INLINE erts_aint_t -erts_smp_refc_inc_unless(erts_smp_refc_t *refcp, - erts_aint_t unless_val, - erts_aint_t min_val) -{ - erts_aint_t val = erts_smp_atomic_read_nob((erts_smp_atomic_t *) refcp); - while (1) { - erts_aint_t exp, new; -#ifdef ERTS_REFC_DEBUG - if (val < 0) - erts_exit(ERTS_ABORT_EXIT, - "erts_smp_refc_inc_unless(): Bad refc found (refc=%ld < %ld)!\n", - val, min_val); -#endif - if (val == unless_val) - return val; - new = val + 1; - exp = val; - val = erts_smp_atomic_cmpxchg_nob((erts_smp_atomic_t *) refcp, new, exp); - if (val == exp) - return new; - } -} - - -ERTS_GLB_INLINE erts_aint_t -erts_smp_refc_inctest(erts_smp_refc_t *refcp, erts_aint_t min_val) -{ - erts_aint_t val = erts_smp_atomic_inc_read_nob((erts_smp_atomic_t *) refcp); -#ifdef ERTS_REFC_DEBUG - if (val < min_val) - erts_exit(ERTS_ABORT_EXIT, - "erts_smp_refc_inctest(): Bad refc found (refc=%ld < %ld)!\n", - val, min_val); -#endif - return val; -} - -ERTS_GLB_INLINE void -erts_smp_refc_dec(erts_smp_refc_t *refcp, erts_aint_t min_val) -{ -#ifdef ERTS_REFC_DEBUG - erts_aint_t val = erts_smp_atomic_dec_read_nob((erts_smp_atomic_t *) refcp); - if (val < min_val) - erts_exit(ERTS_ABORT_EXIT, - "erts_smp_refc_dec(): Bad refc found (refc=%ld < %ld)!\n", - val, min_val); -#else - erts_smp_atomic_dec_nob((erts_smp_atomic_t *) refcp); -#endif -} - -ERTS_GLB_INLINE erts_aint_t -erts_smp_refc_dectest(erts_smp_refc_t *refcp, erts_aint_t min_val) -{ - erts_aint_t val = erts_smp_atomic_dec_read_nob((erts_smp_atomic_t *) refcp); -#ifdef ERTS_REFC_DEBUG - if (val < min_val) - erts_exit(ERTS_ABORT_EXIT, - "erts_smp_refc_dectest(): Bad refc found (refc=%ld < %ld)!\n", - val, min_val); -#endif - return val; -} - -ERTS_GLB_INLINE void -erts_smp_refc_add(erts_smp_refc_t *refcp, erts_aint_t diff, erts_aint_t min_val) -{ -#ifdef ERTS_REFC_DEBUG - erts_aint_t val = erts_smp_atomic_add_read_nob((erts_smp_atomic_t *) refcp, diff); - if (val < min_val) - erts_exit(ERTS_ABORT_EXIT, - "erts_smp_refc_add(%ld): Bad refc found (refc=%ld < %ld)!\n", - diff, val, min_val); -#else - erts_smp_atomic_add_nob((erts_smp_atomic_t *) refcp, diff); -#endif -} - -ERTS_GLB_INLINE erts_aint_t -erts_smp_refc_read(erts_smp_refc_t *refcp, erts_aint_t min_val) -{ - erts_aint_t val = erts_smp_atomic_read_nob((erts_smp_atomic_t *) refcp); -#ifdef ERTS_REFC_DEBUG - if (val < min_val) - erts_exit(ERTS_ABORT_EXIT, - "erts_smp_refc_read(): Bad refc found (refc=%ld < %ld)!\n", - val, min_val); -#endif - return val; -} - -#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ - - -#ifdef ERTS_ENABLE_KERNEL_POLL -extern int erts_use_kernel_poll; -#endif - #define sys_memcpy(s1,s2,n) memcpy(s1,s2,n) #define sys_memmove(s1,s2,n) memmove(s1,s2,n) #define sys_memcmp(s1,s2,n) memcmp(s1,s2,n) diff --git a/erts/emulator/beam/trace_instrs.tab b/erts/emulator/beam/trace_instrs.tab new file mode 100644 index 0000000000..3eee81c053 --- /dev/null +++ b/erts/emulator/beam/trace_instrs.tab @@ -0,0 +1,168 @@ +// -*- c -*- +// +// %CopyrightBegin% +// +// Copyright Ericsson AB 2017. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// %CopyrightEnd% +// + +return_trace() { + ErtsCodeMFA* mfa = (ErtsCodeMFA *)(E[0]); + + SWAPOUT; /* Needed for shared heap */ + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); + erts_trace_return(c_p, mfa, r(0), ERTS_TRACER_FROM_ETERM(E+1)/* tracer */); + ERTS_REQ_PROC_MAIN_LOCK(c_p); + SWAPIN; + c_p->cp = NULL; + SET_I((BeamInstr *) cp_val(E[2])); + E += 3; + Goto(*I); + //| -no_next +} + +i_generic_breakpoint() { + BeamInstr real_I; + HEAVY_SWAPOUT; + real_I = erts_generic_breakpoint(c_p, erts_code_to_codeinfo(I), reg); + HEAVY_SWAPIN; + ASSERT(VALID_INSTR(real_I)); + Goto(real_I); + //| -no_next +} + +i_return_time_trace() { + BeamInstr *pc = (BeamInstr *) (UWord) E[0]; + SWAPOUT; + erts_trace_time_return(c_p, erts_code_to_codeinfo(pc)); + SWAPIN; + c_p->cp = NULL; + SET_I((BeamInstr *) cp_val(E[1])); + E += 2; + Goto(*I); + //| -no_next +} + +i_return_to_trace() { + if (IS_TRACED_FL(c_p, F_TRACE_RETURN_TO)) { + Uint *cpp = (Uint*) E; + for(;;) { + ASSERT(is_CP(*cpp)); + if (IsOpCode(*cp_val(*cpp), return_trace)) { + do + ++cpp; + while (is_not_CP(*cpp)); + cpp += 2; + } else if (IsOpCode(*cp_val(*cpp), i_return_to_trace)) { + do + ++cpp; + while (is_not_CP(*cpp)); + } else { + break; + } + } + SWAPOUT; /* Needed for shared heap */ + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); + erts_trace_return_to(c_p, cp_val(*cpp)); + ERTS_REQ_PROC_MAIN_LOCK(c_p); + SWAPIN; + } + c_p->cp = NULL; + SET_I((BeamInstr *) cp_val(E[0])); + E += 1; + Goto(*I); + //| -no_next +} + +i_yield() { + /* This is safe as long as REDS_IN(c_p) is never stored + * in c_p->arg_reg[0]. It is currently stored in c_p->def_arg_reg[5]. + */ + c_p->arg_reg[0] = am_true; + c_p->arity = 1; /* One living register (the 'true' return value) */ + SWAPOUT; + $SET_CP_I_ABS($NEXT_INSTRUCTION); + c_p->current = NULL; + goto do_schedule; + //| -no_next +} + +i_hibernate() { + HEAVY_SWAPOUT; + if (erts_hibernate(c_p, reg)) { + FCALLS = c_p->fcalls; + c_p->flags &= ~F_HIBERNATE_SCHED; + goto do_schedule; + } else { + HEAVY_SWAPIN; + I = handle_error(c_p, I, reg, &bif_export[BIF_hibernate_3]->info.mfa); + goto post_error_handling; + } + //| -no_next +} + +// This is optimised as an instruction because +// it has to be very very fast. + +i_perf_counter() { + ErtsSysPerfCounter ts; + + ts = erts_sys_perf_counter(); + if (IS_SSMALL(ts)) { + r(0) = make_small((Sint)ts); + } else { + $GC_TEST(0, ERTS_SINT64_HEAP_SIZE(ts), 0); + r(0) = make_big(HTOP); +#if defined(ARCH_32) + if (ts >= (((Uint64) 1) << 32)) { + *HTOP = make_pos_bignum_header(2); + BIG_DIGIT(HTOP, 0) = (Uint) (ts & ((Uint) 0xffffffff)); + BIG_DIGIT(HTOP, 1) = (Uint) ((ts >> 32) & ((Uint) 0xffffffff)); + HTOP += 3; + } + else +#endif + { + *HTOP = make_pos_bignum_header(1); + BIG_DIGIT(HTOP, 0) = (Uint) ts; + HTOP += 2; + } + } +} + +i_debug_breakpoint() { + HEAVY_SWAPOUT; + I = call_error_handler(c_p, erts_code_to_codemfa(I), reg, am_breakpoint); + HEAVY_SWAPIN; + if (I) { + Goto(*I); + } + goto handle_error; + //| -no_next +} + + + +// +// Special jump instruction used for tracing. Takes an absolute +// failure address. +// + +trace_jump(Fail) { + //| -no_next + SET_I((BeamInstr *) $Fail); + Goto(*I); +} diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c index d7116bd2c3..993585be10 100644 --- a/erts/emulator/beam/utils.c +++ b/erts/emulator/beam/utils.c @@ -43,7 +43,6 @@ #include "erl_printf.h" #include "erl_threads.h" #include "erl_lock_count.h" -#include "erl_smp.h" #include "erl_time.h" #include "erl_thr_progress.h" #include "erl_thr_queue.h" @@ -1932,27 +1931,6 @@ do_allocate_logger_message(Eterm gleader, Eterm **hp, ErlOffHeap **ohp, gl_sz = IS_CONST(gleader) ? 0 : size_object(gleader); sz = sz + gl_sz; -#ifndef ERTS_SMP -#ifdef USE_THREADS - if (!erts_get_scheduler_data()) /* Must be scheduler thread */ - *p = NULL; - else -#endif - { - *p = erts_whereis_process(NULL, 0, am_error_logger, 0, 0); - if (*p) { - erts_aint32_t state = erts_smp_atomic32_read_acqb(&(*p)->state); - if (state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)) - *p = NULL; - } - } - - if (!*p) { - return NIL; - } - - /* So we have an error logger, lets build the message */ -#endif *bp = new_message_buffer(sz); *ohp = &(*bp)->off_heap; *hp = (*bp)->mem; @@ -1970,20 +1948,12 @@ static void do_send_logger_message(Eterm *hp, ErlOffHeap *ohp, ErlHeapFragment * #ifdef HARDDEBUG erts_fprintf(stderr, "%T\n", message); #endif -#ifdef ERTS_SMP { Eterm from = erts_get_current_pid(); if (is_not_internal_pid(from)) from = NIL; erts_queue_error_logger_message(from, message, bp); } -#else - { - ErtsMessage *mp = erts_alloc_message(0, NULL); - mp->data.heap_frag = bp; - erts_queue_message(p, 0, mp, message, am_system); - } -#endif } /* error_logger ! @@ -3557,7 +3527,7 @@ store_external_or_ref_(Uint **hpp, ErlOffHeap* oh, Eterm ns) if (is_external_header(*from_hp)) { ExternalThing *etp = (ExternalThing *) from_hp; ASSERT(is_external(ns)); - erts_smp_refc_inc(&etp->node->refc, 2); + erts_refc_inc(&etp->node->refc, 2); } else if (is_ordinary_ref_thing(from_hp)) return make_internal_ref(to_hp); @@ -4712,22 +4682,6 @@ void erts_interval_init(erts_interval_t *icp) { erts_atomic64_init_nob(&icp->counter.atomic, 0); -#ifdef DEBUG - icp->smp_api = 0; -#endif -} - -void -erts_smp_interval_init(erts_interval_t *icp) -{ -#ifdef ERTS_SMP - erts_interval_init(icp); -#else - icp->counter.not_atomic = 0; -#endif -#ifdef DEBUG - icp->smp_api = 1; -#endif } static ERTS_INLINE Uint64 @@ -4767,79 +4721,25 @@ ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic) Uint64 erts_step_interval_nob(erts_interval_t *icp) { - ASSERT(!icp->smp_api); return step_interval_nob(icp); } Uint64 erts_step_interval_relb(erts_interval_t *icp) { - ASSERT(!icp->smp_api); return step_interval_relb(icp); } Uint64 -erts_smp_step_interval_nob(erts_interval_t *icp) -{ - ASSERT(icp->smp_api); -#ifdef ERTS_SMP - return step_interval_nob(icp); -#else - return ++icp->counter.not_atomic; -#endif -} - -Uint64 -erts_smp_step_interval_relb(erts_interval_t *icp) -{ - ASSERT(icp->smp_api); -#ifdef ERTS_SMP - return step_interval_relb(icp); -#else - return ++icp->counter.not_atomic; -#endif -} - -Uint64 erts_ensure_later_interval_nob(erts_interval_t *icp, Uint64 ic) { - ASSERT(!icp->smp_api); return ensure_later_interval_nob(icp, ic); } Uint64 erts_ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic) { - ASSERT(!icp->smp_api); - return ensure_later_interval_acqb(icp, ic); -} - -Uint64 -erts_smp_ensure_later_interval_nob(erts_interval_t *icp, Uint64 ic) -{ - ASSERT(icp->smp_api); -#ifdef ERTS_SMP - return ensure_later_interval_nob(icp, ic); -#else - if (icp->counter.not_atomic > ic) - return icp->counter.not_atomic; - else - return ++icp->counter.not_atomic; -#endif -} - -Uint64 -erts_smp_ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic) -{ - ASSERT(icp->smp_api); -#ifdef ERTS_SMP return ensure_later_interval_acqb(icp, ic); -#else - if (icp->counter.not_atomic > ic) - return icp->counter.not_atomic; - else - return ++icp->counter.not_atomic; -#endif } /* |