diff options
105 files changed, 3202 insertions, 4547 deletions
diff --git a/erts/emulator/beam/atom.c b/erts/emulator/beam/atom.c index 8023414821..1b691386eb 100644 --- a/erts/emulator/beam/atom.c +++ b/erts/emulator/beam/atom.c @@ -34,20 +34,18 @@ IndexTable erts_atom_table; /* The index table */ -#include "erl_smp.h" +static erts_rwmtx_t atom_table_lock; -static erts_smp_rwmtx_t atom_table_lock; - -#define atom_read_lock() erts_smp_rwmtx_rlock(&atom_table_lock) -#define atom_read_unlock() erts_smp_rwmtx_runlock(&atom_table_lock) -#define atom_write_lock() erts_smp_rwmtx_rwlock(&atom_table_lock) -#define atom_write_unlock() erts_smp_rwmtx_rwunlock(&atom_table_lock) +#define atom_read_lock() erts_rwmtx_rlock(&atom_table_lock) +#define atom_read_unlock() erts_rwmtx_runlock(&atom_table_lock) +#define atom_write_lock() erts_rwmtx_rwlock(&atom_table_lock) +#define atom_write_unlock() erts_rwmtx_rwunlock(&atom_table_lock) #if 0 #define ERTS_ATOM_PUT_OPS_STAT #endif #ifdef ERTS_ATOM_PUT_OPS_STAT -static erts_smp_atomic_t atom_put_ops; +static erts_atomic_t atom_put_ops; #endif /* Functions for allocating space for the ext of atoms. We do not @@ -76,7 +74,7 @@ void atom_info(fmtfn_t to, void *to_arg) index_info(to, to_arg, &erts_atom_table); #ifdef ERTS_ATOM_PUT_OPS_STAT erts_print(to, to_arg, "atom_put_ops: %ld\n", - erts_smp_atomic_read_nob(&atom_put_ops)); + erts_atomic_read_nob(&atom_put_ops)); #endif if (lock) @@ -246,7 +244,7 @@ erts_atom_put_index(const byte *name, int len, ErtsAtomEncoding enc, int trunc) int aix; #ifdef ERTS_ATOM_PUT_OPS_STAT - erts_smp_atomic_inc_nob(&atom_put_ops); + erts_atomic_inc_nob(&atom_put_ops); #endif if (tlen < 0) { @@ -421,16 +419,16 @@ init_atom_table(void) HashFunctions f; int i; Atom a; - erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; + erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER; - rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; - rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED; + rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ; + rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED; #ifdef ERTS_ATOM_PUT_OPS_STAT - erts_smp_atomic_init_nob(&atom_put_ops, 0); + erts_atomic_init_nob(&atom_put_ops, 0); #endif - erts_smp_rwmtx_init_opt(&atom_table_lock, &rwmtx_opt, "atom_tab", NIL, + erts_rwmtx_init_opt(&atom_table_lock, &rwmtx_opt, "atom_tab", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); f.hash = (H_FUN) atom_hash; @@ -493,4 +491,4 @@ Uint erts_get_atom_limit(void) { return erts_atom_table.limit; -}
\ No newline at end of file +} diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c index fb22d28af0..b78f617560 100644 --- a/erts/emulator/beam/beam_bif_load.c +++ b/erts/emulator/beam/beam_bif_load.c @@ -50,7 +50,7 @@ static struct { Eterm module; - erts_smp_mtx_t mtx; + erts_mtx_t mtx; Export *pending_purge_lambda; Eterm *sprocs; Eterm def_sprocs[10]; @@ -68,9 +68,9 @@ Process *erts_code_purger = NULL; #ifdef ERTS_DIRTY_SCHEDULERS Process *erts_dirty_process_code_checker; #endif -erts_smp_atomic_t erts_copy_literal_area__; +erts_atomic_t erts_copy_literal_area__; #define ERTS_SET_COPY_LITERAL_AREA(LA) \ - erts_smp_atomic_set_nob(&erts_copy_literal_area__, \ + erts_atomic_set_nob(&erts_copy_literal_area__, \ (erts_aint_t) (LA)) Process *erts_literal_area_collector = NULL; @@ -81,7 +81,7 @@ struct ErtsLiteralAreaRef_ { }; struct { - erts_smp_mtx_t mtx; + erts_mtx_t mtx; ErtsLiteralAreaRef *first; ErtsLiteralAreaRef *last; } release_literal_areas; @@ -97,7 +97,7 @@ init_purge_state(void) { purge_state.module = THE_NON_VALUE; - erts_smp_mtx_init(&purge_state.mtx, "purge_state", NIL, + erts_mtx_init(&purge_state.mtx, "purge_state", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); purge_state.pending_purge_lambda = @@ -119,12 +119,12 @@ init_purge_state(void) void erts_beam_bif_load_init(void) { - erts_smp_mtx_init(&release_literal_areas.mtx, "release_literal_areas", NIL, + erts_mtx_init(&release_literal_areas.mtx, "release_literal_areas", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); release_literal_areas.first = NULL; release_literal_areas.last = NULL; - erts_smp_atomic_init_nob(&erts_copy_literal_area__, + erts_atomic_init_nob(&erts_copy_literal_area__, (erts_aint_t) NULL); init_purge_state(); @@ -172,8 +172,8 @@ BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3) BIF_P, BIF_ARG_1, BIF_ARG_2, BIF_ARG_3); } - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); modp = erts_get_module(mod, erts_active_code_ix()); @@ -197,8 +197,8 @@ BIF_RETTYPE code_make_stub_module_3(BIF_ALIST_3) else { erts_abort_staging_code_ix(); } - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); return res; #endif @@ -399,8 +399,8 @@ finish_loading_1(BIF_ALIST_1) erts_is_default_trace_enabled() || IF_HIPE(hipe_need_blocking(p[i].modp))) { /* tracing or hipe need thread blocking */ - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); is_blocking = 1; break; } @@ -486,8 +486,8 @@ staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking, erts_free(ERTS_ALC_T_LOADER_TMP, mods); } if (is_blocking) { - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } erts_release_code_write_permission(); return res; @@ -528,11 +528,11 @@ static void smp_code_ix_commiter(void* null) committer_state.stager = NULL; #endif erts_release_code_write_permission(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); if (!ERTS_PROC_IS_EXITING(p)) { erts_resume(p, ERTS_PROC_LOCK_STATUS); } - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); erts_proc_dec_refc(p); } @@ -631,7 +631,7 @@ BIF_RETTYPE erts_internal_check_dirty_process_code_2(BIF_ALIST_2) res = erts_check_process_code(rp, BIF_ARG_2, &reds, BIF_P->fcalls); if (BIF_P != rp) - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); ASSERT(is_value(res)); @@ -674,8 +674,8 @@ BIF_RETTYPE delete_module_1(BIF_ALIST_1) modp->curr.num_traced_exports > 0 || IF_HIPE(hipe_need_blocking(modp))) { /* tracing or hipe need to go single threaded */ - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); is_blocking = 1; if (modp->curr.num_breakpoints) { erts_clear_module_break(modp); @@ -779,16 +779,16 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2) /* ToDo: Use code_ix staging instead of thread blocking */ - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); code_ix = erts_active_code_ix(); modp = erts_get_module(BIF_ARG_1, code_ix); if (!modp || !modp->on_load || !modp->on_load->code_hdr) { error: - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); BIF_ERROR(BIF_P, BADARG); } @@ -859,8 +859,8 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2) ep->beam[1] = 0; } } - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); BIF_RET(am_true); } @@ -921,9 +921,9 @@ erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed * any other heap than the message it self. */ - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ); - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ); + ERTS_MSGQ_MV_INQ2PRIVQ(c_p); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ); for (msgp = c_p->msg.first; msgp; msgp = msgp->next) { ErlHeapFragment *hf; @@ -1325,9 +1325,9 @@ static void complete_literal_area_switch(void *literal_area) { Process *p = erts_literal_area_collector; - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); erts_resume(p, ERTS_PROC_LOCK_STATUS); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); if (literal_area) erts_release_literal_area((ErtsLiteralArea *) literal_area); } @@ -1340,7 +1340,7 @@ BIF_RETTYPE erts_internal_release_literal_area_switch_0(BIF_ALIST_0) if (BIF_P != erts_literal_area_collector) BIF_ERROR(BIF_P, EXC_NOTSUP); - erts_smp_mtx_lock(&release_literal_areas.mtx); + erts_mtx_lock(&release_literal_areas.mtx); la_ref = release_literal_areas.first; if (la_ref) { @@ -1349,7 +1349,7 @@ BIF_RETTYPE erts_internal_release_literal_area_switch_0(BIF_ALIST_0) release_literal_areas.last = NULL; } - erts_smp_mtx_unlock(&release_literal_areas.mtx); + erts_mtx_unlock(&release_literal_areas.mtx); unused_la = ERTS_COPY_LITERAL_AREA(); @@ -1407,7 +1407,7 @@ erts_purge_state_add_fun(ErlFunEntry *fe) Export * erts_suspend_process_on_pending_purge_lambda(Process *c_p, ErlFunEntry* fe) { - erts_smp_mtx_lock(&purge_state.mtx); + erts_mtx_lock(&purge_state.mtx); if (purge_state.module == fe->module) { /* * The process c_p is about to call a fun in the code @@ -1433,7 +1433,7 @@ erts_suspend_process_on_pending_purge_lambda(Process *c_p, ErlFunEntry* fe) erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL); ERTS_VBUMP_ALL_REDS(c_p); } - erts_smp_mtx_unlock(&purge_state.mtx); + erts_mtx_unlock(&purge_state.mtx); return purge_state.pending_purge_lambda; } @@ -1443,9 +1443,9 @@ finalize_purge_operation(Process *c_p, int succeded) Uint ix; if (c_p) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); - erts_smp_mtx_lock(&purge_state.mtx); + erts_mtx_lock(&purge_state.mtx); ASSERT(purge_state.module != THE_NON_VALUE); @@ -1461,14 +1461,14 @@ finalize_purge_operation(Process *c_p, int succeded) ERTS_PROC_LOCK_STATUS); if (rp) { erts_resume(rp, ERTS_PROC_LOCK_STATUS); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); } } - erts_smp_mtx_unlock(&purge_state.mtx); + erts_mtx_unlock(&purge_state.mtx); if (c_p) - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); if (purge_state.sprocs != &purge_state.def_sprocs[0]) { erts_free(ERTS_ALC_T_PURGE_DATA, purge_state.sprocs); @@ -1494,9 +1494,9 @@ static void resume_purger(void *unused) { Process *p = erts_code_purger; - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); erts_resume(p, ERTS_PROC_LOCK_STATUS); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); } static void @@ -1567,9 +1567,9 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) else { BeamInstr* code; BeamInstr* end; - erts_smp_mtx_lock(&purge_state.mtx); + erts_mtx_lock(&purge_state.mtx); purge_state.module = BIF_ARG_1; - erts_smp_mtx_unlock(&purge_state.mtx); + erts_mtx_unlock(&purge_state.mtx); res = am_true; code = (BeamInstr*) modp->old.code_hdr; end = (BeamInstr *)((char *)code + modp->old.code_length); @@ -1680,8 +1680,8 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) || IF_HIPE(hipe_purge_need_blocking(modp))) { /* ToDo: Do unload nif without blocking */ erts_rwunlock_old_code(code_ix); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); is_blocking = 1; erts_rwlock_old_code(code_ix); if (modp->old.nif) { @@ -1719,8 +1719,8 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) erts_rwunlock_old_code(code_ix); } if (is_blocking) { - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); } erts_release_code_write_permission(); @@ -1733,7 +1733,7 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) sizeof(ErtsLiteralAreaRef)); ref->literal_area = literals; ref->next = NULL; - erts_smp_mtx_lock(&release_literal_areas.mtx); + erts_mtx_lock(&release_literal_areas.mtx); if (release_literal_areas.last) { release_literal_areas.last->next = ref; release_literal_areas.last = ref; @@ -1742,7 +1742,7 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2) release_literal_areas.first = ref; release_literal_areas.last = ref; } - erts_smp_mtx_unlock(&release_literal_areas.mtx); + erts_mtx_unlock(&release_literal_areas.mtx); erts_queue_message(erts_literal_area_collector, 0, erts_alloc_message(0, NULL), @@ -1779,7 +1779,7 @@ delete_code(Module* modp) } else if (ep->beam[0] == (BeamInstr) BeamOp(op_i_generic_breakpoint)) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); ASSERT(modp->curr.num_traced_exports > 0); DBG_TRACE_MFA_P(&ep->info.mfa, "export trace cleared, code_ix=%d", code_ix); diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c index 5b24db2e24..79a75f6698 100644 --- a/erts/emulator/beam/beam_bp.c +++ b/erts/emulator/beam/beam_bp.c @@ -47,14 +47,14 @@ #define Free(P) erts_free(ERTS_ALC_T_BPD, (P)) #if defined(ERTS_ENABLE_LOCK_CHECK) -# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \ +# define ERTS_REQ_PROC_MAIN_LOCK(P) \ if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN,\ __FILE__, __LINE__) -# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \ +# define ERTS_UNREQ_PROC_MAIN_LOCK(P) \ if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN) #else -# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) -# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) +# define ERTS_REQ_PROC_MAIN_LOCK(P) +# define ERTS_UNREQ_PROC_MAIN_LOCK(P) #endif #define ERTS_BPF_LOCAL_TRACE 0x01 @@ -73,10 +73,10 @@ extern BeamInstr beam_return_trace[1]; /* OpCode(i_return_trace) */ extern BeamInstr beam_exception_trace[1]; /* OpCode(i_exception_trace) */ extern BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */ -erts_smp_atomic32_t erts_active_bp_index; -erts_smp_atomic32_t erts_staging_bp_index; +erts_atomic32_t erts_active_bp_index; +erts_atomic32_t erts_staging_bp_index; #ifdef ERTS_DIRTY_SCHEDULERS -erts_smp_mtx_t erts_dirty_bp_ix_mtx; +erts_mtx_t erts_dirty_bp_ix_mtx; #endif /* @@ -96,7 +96,7 @@ acquire_bp_sched_ix(Process *c_p) ASSERT(esdp); #ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_SCHEDULER_IS_DIRTY(esdp)) { - erts_smp_mtx_lock(&erts_dirty_bp_ix_mtx); + erts_mtx_lock(&erts_dirty_bp_ix_mtx); return (Uint32) erts_no_schedulers; } #endif @@ -108,7 +108,7 @@ release_bp_sched_ix(Uint32 ix) { #ifdef ERTS_DIRTY_SCHEDULERS if (ix == (Uint32) erts_no_schedulers) - erts_smp_mtx_unlock(&erts_dirty_bp_ix_mtx); + erts_mtx_unlock(&erts_dirty_bp_ix_mtx); #endif } @@ -162,10 +162,10 @@ static void bp_hash_delete(bp_time_hash_t *hash); void erts_bp_init(void) { - erts_smp_atomic32_init_nob(&erts_active_bp_index, 0); - erts_smp_atomic32_init_nob(&erts_staging_bp_index, 1); + erts_atomic32_init_nob(&erts_active_bp_index, 0); + erts_atomic32_init_nob(&erts_staging_bp_index, 1); #ifdef ERTS_DIRTY_SCHEDULERS - erts_smp_mtx_init(&erts_dirty_bp_ix_mtx, "dirty_break_point_index", NIL, + erts_mtx_init(&erts_dirty_bp_ix_mtx, "dirty_break_point_index", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG); #endif } @@ -306,7 +306,7 @@ erts_consolidate_bp_data(BpFunctions* f, int local) Uint i; Uint n = f->matched; - ERTS_SMP_LC_ASSERT(erts_has_code_write_permission()); + ERTS_LC_ASSERT(erts_has_code_write_permission()); for (i = 0; i < n; i++) { consolidate_bp_data(fs[i].mod, fs[i].ci, local); @@ -318,7 +318,7 @@ erts_consolidate_bif_bp_data(void) { int i; - ERTS_SMP_LC_ASSERT(erts_has_code_write_permission()); + ERTS_LC_ASSERT(erts_has_code_write_permission()); for (i = 0; i < BIF_SIZE; i++) { Export *ep = bif_export[i]; consolidate_bp_data(0, &ep->info, 0); @@ -393,17 +393,17 @@ consolidate_bp_data(Module* modp, ErtsCodeInfo *ci, int local) } if (flags & ERTS_BPF_META_TRACE) { dst->meta_tracer = src->meta_tracer; - erts_smp_refc_inc(&dst->meta_tracer->refc, 1); + erts_refc_inc(&dst->meta_tracer->refc, 1); dst->meta_ms = src->meta_ms; MatchSetRef(dst->meta_ms); } if (flags & ERTS_BPF_COUNT) { dst->count = src->count; - erts_smp_refc_inc(&dst->count->refc, 1); + erts_refc_inc(&dst->count->refc, 1); } if (flags & ERTS_BPF_TIME_TRACE) { dst->time = src->time; - erts_smp_refc_inc(&dst->time->refc, 1); + erts_refc_inc(&dst->time->refc, 1); ASSERT(dst->time->hash); } } @@ -414,8 +414,8 @@ erts_commit_staged_bp(void) ErtsBpIndex staging = erts_staging_bp_ix(); ErtsBpIndex active = erts_active_bp_ix(); - erts_smp_atomic32_set_nob(&erts_active_bp_index, staging); - erts_smp_atomic32_set_nob(&erts_staging_bp_index, active); + erts_atomic32_set_nob(&erts_active_bp_index, staging); + erts_atomic32_set_nob(&erts_staging_bp_index, active); } void @@ -575,7 +575,7 @@ erts_clear_mtrace_bif(ErtsCodeInfo *ci) void erts_clear_debug_break(BpFunctions* f) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); clear_break(f, ERTS_BPF_DEBUG); } @@ -603,7 +603,7 @@ erts_clear_module_break(Module *modp) { Uint n; Uint i; - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); ASSERT(modp); code_hdr = modp->curr.code_hdr; if (!code_hdr) { @@ -633,7 +633,7 @@ erts_clear_module_break(Module *modp) { void erts_clear_export_break(Module* modp, ErtsCodeInfo *ci) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); clear_function_break(ci, ERTS_BPF_ALL); erts_commit_staged_bp(); @@ -679,12 +679,12 @@ erts_generic_breakpoint(Process* c_p, ErtsCodeInfo *info, Eterm* reg) if (bp_flags & ERTS_BPF_META_TRACE) { ErtsTracer old_tracer, new_tracer; - old_tracer = erts_smp_atomic_read_nob(&bp->meta_tracer->tracer); + old_tracer = erts_atomic_read_nob(&bp->meta_tracer->tracer); new_tracer = do_call_trace(c_p, info, reg, 1, bp->meta_ms, old_tracer); if (!ERTS_TRACER_COMPARE(new_tracer, old_tracer)) { - if (old_tracer == erts_smp_atomic_cmpxchg_acqb( + if (old_tracer == erts_atomic_cmpxchg_acqb( &bp->meta_tracer->tracer, (erts_aint_t)new_tracer, (erts_aint_t)old_tracer)) { @@ -696,7 +696,7 @@ erts_generic_breakpoint(Process* c_p, ErtsCodeInfo *info, Eterm* reg) } if (bp_flags & ERTS_BPF_COUNT_ACTIVE) { - erts_smp_atomic_inc_nob(&bp->count->acount); + erts_atomic_inc_nob(&bp->count->acount); } if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE) { @@ -753,7 +753,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I) GenericBpData* bp = NULL; Uint bp_flags = 0; - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); g = ep->info.u.gen_bp; if (g) { @@ -777,7 +777,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I) if (bp_flags & ERTS_BPF_META_TRACE) { ErtsTracer old_tracer; - meta_tracer = erts_smp_atomic_read_nob(&bp->meta_tracer->tracer); + meta_tracer = erts_atomic_read_nob(&bp->meta_tracer->tracer); old_tracer = meta_tracer; flags_meta = erts_call_trace(p, &ep->info, bp->meta_ms, args, 0, &meta_tracer); @@ -785,7 +785,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I) if (!ERTS_TRACER_COMPARE(old_tracer, meta_tracer)) { ErtsTracer new_tracer = erts_tracer_nil; erts_tracer_update(&new_tracer, meta_tracer); - if (old_tracer == erts_smp_atomic_cmpxchg_acqb( + if (old_tracer == erts_atomic_cmpxchg_acqb( &bp->meta_tracer->tracer, (erts_aint_t)new_tracer, (erts_aint_t)old_tracer)) { @@ -912,9 +912,9 @@ erts_bif_trace_epilogue(Process *p, Eterm result, int applying, } } if ((flags_meta|flags) & MATCH_SET_EXCEPTION_TRACE) { - erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); ERTS_TRACE_FLAGS(p) |= F_EXCEPTION_TRACE; - erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR); } } } else { @@ -937,7 +937,7 @@ erts_bif_trace_epilogue(Process *p, Eterm result, int applying, } } } - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); return result; } @@ -982,9 +982,9 @@ do_call_trace(Process* c_p, ErtsCodeInfo* info, Eterm* reg, c_p->cp = (BeamInstr *) cp_val(*cpp); ASSERT(is_CP(*cpp)); } - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); flags = erts_call_trace(c_p, info, ms, reg, local, &tracer); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); if (cpp) { c_p->cp = cp_save; } @@ -1024,9 +1024,9 @@ do_call_trace(Process* c_p, ErtsCodeInfo* info, Eterm* reg, the funcinfo is above i. */ c_p->cp = (flags & MATCH_SET_EXCEPTION_TRACE) ? beam_exception_trace : beam_return_trace; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); ERTS_TRACE_FLAGS(c_p) |= F_EXCEPTION_TRACE; - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); } else c_p->stop = E; return tracer; @@ -1043,7 +1043,7 @@ erts_trace_time_call(Process* c_p, ErtsCodeInfo *info, BpDataTime* bdt) Uint32 six = acquire_bp_sched_ix(c_p); ASSERT(c_p); - ASSERT(erts_smp_atomic32_read_acqb(&c_p->state) & (ERTS_PSFLG_RUNNING + ASSERT(erts_atomic32_read_acqb(&c_p->state) & (ERTS_PSFLG_RUNNING | ERTS_PSFLG_DIRTY_RUNNING)); /* get previous timestamp and breakpoint @@ -1124,7 +1124,7 @@ erts_trace_time_return(Process *p, ErtsCodeInfo *ci) Uint32 six = acquire_bp_sched_ix(p); ASSERT(p); - ASSERT(erts_smp_atomic32_read_acqb(&p->state) & (ERTS_PSFLG_RUNNING + ASSERT(erts_atomic32_read_acqb(&p->state) & (ERTS_PSFLG_RUNNING | ERTS_PSFLG_DIRTY_RUNNING)); /* get previous timestamp and breakpoint @@ -1206,7 +1206,7 @@ erts_is_mtrace_break(ErtsCodeInfo *ci, Binary **match_spec_ret, *match_spec_ret = bp->meta_ms; } if (tracer_ret) { - *tracer_ret = erts_smp_atomic_read_nob(&bp->meta_tracer->tracer); + *tracer_ret = erts_atomic_read_nob(&bp->meta_tracer->tracer); } return 1; } @@ -1220,7 +1220,7 @@ erts_is_count_break(ErtsCodeInfo *ci, Uint *count_ret) if (bp) { if (count_ret) { - *count_ret = (Uint) erts_smp_atomic_read_nob(&bp->count->acount); + *count_ret = (Uint) erts_atomic_read_nob(&bp->count->acount); } return 1; } @@ -1500,7 +1500,7 @@ set_function_break(ErtsCodeInfo *ci, Binary *match_spec, Uint break_flags, Uint common; ErtsBpIndex ix = erts_staging_bp_ix(); - ERTS_SMP_LC_ASSERT(erts_has_code_write_permission()); + ERTS_LC_ASSERT(erts_has_code_write_permission()); g = ci->u.gen_bp; if (g == 0) { int i; @@ -1532,7 +1532,7 @@ set_function_break(ErtsCodeInfo *ci, Binary *match_spec, Uint break_flags, bp->flags &= ~ERTS_BPF_COUNT_ACTIVE; } else { bp->flags |= ERTS_BPF_COUNT_ACTIVE; - erts_smp_atomic_set_nob(&bp->count->acount, 0); + erts_atomic_set_nob(&bp->count->acount, 0); } ASSERT((bp->flags & ~ERTS_BPF_ALL) == 0); return; @@ -1566,17 +1566,17 @@ set_function_break(ErtsCodeInfo *ci, Binary *match_spec, Uint break_flags, MatchSetRef(match_spec); bp->meta_ms = match_spec; bmt = Alloc(sizeof(BpMetaTracer)); - erts_smp_refc_init(&bmt->refc, 1); + erts_refc_init(&bmt->refc, 1); erts_tracer_update(&meta_tracer, tracer); /* copy tracer */ - erts_smp_atomic_init_nob(&bmt->tracer, (erts_aint_t)meta_tracer); + erts_atomic_init_nob(&bmt->tracer, (erts_aint_t)meta_tracer); bp->meta_tracer = bmt; } else if (break_flags & ERTS_BPF_COUNT) { BpCount* bcp; ASSERT((bp->flags & ERTS_BPF_COUNT) == 0); bcp = Alloc(sizeof(BpCount)); - erts_smp_refc_init(&bcp->refc, 1); - erts_smp_atomic_init_nob(&bcp->acount, 0); + erts_refc_init(&bcp->refc, 1); + erts_atomic_init_nob(&bcp->acount, 0); bp->count = bcp; } else if (break_flags & ERTS_BPF_TIME_TRACE) { BpDataTime* bdt; @@ -1584,7 +1584,7 @@ set_function_break(ErtsCodeInfo *ci, Binary *match_spec, Uint break_flags, ASSERT((bp->flags & ERTS_BPF_TIME_TRACE) == 0); bdt = Alloc(sizeof(BpDataTime)); - erts_smp_refc_init(&bdt->refc, 1); + erts_refc_init(&bdt->refc, 1); #ifdef ERTS_DIRTY_SCHEDULERS bdt->n = erts_no_schedulers + 1; #else @@ -1621,7 +1621,7 @@ clear_function_break(ErtsCodeInfo *ci, Uint break_flags) Uint common; ErtsBpIndex ix = erts_staging_bp_ix(); - ERTS_SMP_LC_ASSERT(erts_has_code_write_permission()); + ERTS_LC_ASSERT(erts_has_code_write_permission()); if ((g = ci->u.gen_bp) == NULL) { return 1; @@ -1654,8 +1654,8 @@ clear_function_break(ErtsCodeInfo *ci, Uint break_flags) static void bp_meta_unref(BpMetaTracer* bmt) { - if (erts_smp_refc_dectest(&bmt->refc, 0) <= 0) { - ErtsTracer trc = erts_smp_atomic_read_nob(&bmt->tracer); + if (erts_refc_dectest(&bmt->refc, 0) <= 0) { + ErtsTracer trc = erts_atomic_read_nob(&bmt->tracer); ERTS_TRACER_CLEAR(&trc); Free(bmt); } @@ -1664,7 +1664,7 @@ bp_meta_unref(BpMetaTracer* bmt) static void bp_count_unref(BpCount* bcp) { - if (erts_smp_refc_dectest(&bcp->refc, 0) <= 0) { + if (erts_refc_dectest(&bcp->refc, 0) <= 0) { Free(bcp); } } @@ -1672,7 +1672,7 @@ bp_count_unref(BpCount* bcp) static void bp_time_unref(BpDataTime* bdt) { - if (erts_smp_refc_dectest(&bdt->refc, 0) <= 0) { + if (erts_refc_dectest(&bdt->refc, 0) <= 0) { Uint i = 0; Uint j = 0; Process *h_p = NULL; @@ -1696,7 +1696,7 @@ bp_time_unref(BpDataTime* bdt) if (pbt) { Free(pbt); } - erts_smp_proc_unlock(h_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(h_p, ERTS_PROC_LOCK_MAIN); } } } diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h index 56fa82b912..1e1f6a7534 100644 --- a/erts/emulator/beam/beam_bp.h +++ b/erts/emulator/beam/beam_bp.h @@ -41,7 +41,7 @@ typedef struct { typedef struct bp_data_time { /* Call time */ Uint n; bp_time_hash_t *hash; - erts_smp_refc_t refc; + erts_refc_t refc; } BpDataTime; typedef struct { @@ -50,13 +50,13 @@ typedef struct { } process_breakpoint_time_t; /* used within psd */ typedef struct { - erts_smp_atomic_t acount; - erts_smp_refc_t refc; + erts_atomic_t acount; + erts_refc_t refc; } BpCount; typedef struct { - erts_smp_atomic_t tracer; - erts_smp_refc_t refc; + erts_atomic_t tracer; + erts_refc_t refc; } BpMetaTracer; typedef struct generic_bp_data { @@ -80,7 +80,7 @@ typedef struct generic_bp { #define ERTS_BP_CALL_TIME_SCHEDULE_EXITING (2) #ifdef ERTS_DIRTY_SCHEDULERS -extern erts_smp_mtx_t erts_dirty_bp_ix_mtx; +extern erts_mtx_t erts_dirty_bp_ix_mtx; #endif enum erts_break_op{ @@ -173,17 +173,17 @@ ErtsCodeInfo *erts_find_local_func(ErtsCodeMFA *mfa); #if ERTS_GLB_INLINE_INCL_FUNC_DEF -extern erts_smp_atomic32_t erts_active_bp_index; -extern erts_smp_atomic32_t erts_staging_bp_index; +extern erts_atomic32_t erts_active_bp_index; +extern erts_atomic32_t erts_staging_bp_index; ERTS_GLB_INLINE ErtsBpIndex erts_active_bp_ix(void) { - return erts_smp_atomic32_read_nob(&erts_active_bp_index); + return erts_atomic32_read_nob(&erts_active_bp_index); } ERTS_GLB_INLINE ErtsBpIndex erts_staging_bp_ix(void) { - return erts_smp_atomic32_read_nob(&erts_staging_bp_index); + return erts_atomic32_read_nob(&erts_staging_bp_index); } #endif diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c index 5f47a7c99f..fa912e52e9 100644 --- a/erts/emulator/beam/beam_debug.c +++ b/erts/emulator/beam/beam_debug.c @@ -157,8 +157,8 @@ erts_debug_breakpoint_2(BIF_ALIST_2) ERTS_BIF_YIELD2(bif_export[BIF_erts_debug_breakpoint_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); erts_bp_match_functions(&f, &mfa, specified); if (boolean == am_true) { @@ -174,8 +174,8 @@ erts_debug_breakpoint_2(BIF_ALIST_2) res = make_small(f.matched); erts_bp_free_matched_functions(&f); - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); return res; @@ -1096,7 +1096,7 @@ dirty_send_message(Process *c_p, Eterm to, Eterm tag) if (rp == real_c_p) rp_locks &= ~c_p_locks; if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); erts_proc_dec_refc(rp); diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c index 0b4a69411a..25e16764ab 100644 --- a/erts/emulator/beam/beam_emu.c +++ b/erts/emulator/beam/beam_emu.c @@ -66,23 +66,23 @@ do { \ if ((P)) \ erts_proc_lc_chk_only_proc_main((P)); \ - ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); \ + ERTS_LC_ASSERT(!erts_thr_progress_is_blocking()); \ } while (0) -# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \ +# define ERTS_REQ_PROC_MAIN_LOCK(P) \ do { \ if ((P)) \ erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN, \ __FILE__, __LINE__); \ } while (0) -# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \ +# define ERTS_UNREQ_PROC_MAIN_LOCK(P) \ do { \ if ((P)) \ erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN); \ } while (0) #else # define PROCESS_MAIN_CHK_LOCKS(P) -# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) -# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) +# define ERTS_REQ_PROC_MAIN_LOCK(P) +# define ERTS_UNREQ_PROC_MAIN_LOCK(P) #endif /* @@ -294,7 +294,7 @@ void** beam_ops; HEAP_TOP((P)) = HTOP; \ (P)->stop = E; \ PROCESS_MAIN_CHK_LOCKS((P)); \ - ERTS_SMP_UNREQ_PROC_MAIN_LOCK((P)) + ERTS_UNREQ_PROC_MAIN_LOCK((P)) #define db(N) (N) #define tb(N) (N) @@ -1358,7 +1358,7 @@ void process_main(Eterm * x_reg_array, FloatDef* f_reg_array) } PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); c_p = erts_schedule(NULL, c_p, reds_used); ASSERT(!(c_p->flags & F_HIPE_MODE)); @@ -1367,7 +1367,7 @@ void process_main(Eterm * x_reg_array, FloatDef* f_reg_array) #ifdef DEBUG pid = c_p->common.id; /* Save for debugging purposes */ #endif - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); PROCESS_MAIN_CHK_LOCKS(c_p); ERTS_MSACC_UPDATE_CACHE_X(); @@ -1740,7 +1740,7 @@ void process_main(Eterm * x_reg_array, FloatDef* f_reg_array) result = erl_send(c_p, r(0), x(1)); PreFetch(0, next); ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); PROCESS_MAIN_CHK_LOCKS(c_p); HTOP = HEAP_TOP(c_p); FCALLS = c_p->fcalls; @@ -1937,19 +1937,19 @@ void process_main(Eterm * x_reg_array, FloatDef* f_reg_array) msgp = PEEK_MESSAGE(c_p); if (!msgp) { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); /* Make sure messages wont pass exit signals... */ if (ERTS_PROC_PENDING_EXIT(c_p)) { - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); SWAPOUT; c_p->flags &= ~F_DELAY_GC; c_p->arity = 0; goto do_schedule; /* Will be rescheduled for exit */ } - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); + ERTS_MSGQ_MV_INQ2PRIVQ(c_p); msgp = PEEK_MESSAGE(c_p); if (msgp) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); else { c_p->flags &= ~F_DELAY_GC; @@ -2115,7 +2115,7 @@ void process_main(Eterm * x_reg_array, FloatDef* f_reg_array) OpCase(i_wait_timeout_fs): { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); /* Fall through */ } @@ -2148,7 +2148,7 @@ void process_main(Eterm * x_reg_array, FloatDef* f_reg_array) } else { /* Wrong time */ OpCase(i_wait_error_locked): { - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); /* Fall through */ } OpCase(i_wait_error): { @@ -2177,24 +2177,24 @@ void process_main(Eterm * x_reg_array, FloatDef* f_reg_array) c_p->arity = 0; if (!ERTS_PTMR_IS_TIMED_OUT(c_p)) - erts_smp_atomic32_read_band_relb(&c_p->state, + erts_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_ACTIVE); ASSERT(!ERTS_PROC_IS_EXITING(c_p)); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); c_p->current = NULL; goto do_schedule; } OpCase(wait_unlocked_f): { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); goto wait2; } } - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); Next(2); } OpCase(i_wait_timeout_fI): { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); } OpCase(i_wait_timeout_locked_fI): @@ -2216,7 +2216,7 @@ void process_main(Eterm * x_reg_array, FloatDef* f_reg_array) * receive statement will examine the first message first. */ OpCase(timeout_locked): { - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); } OpCase(timeout): { @@ -2672,12 +2672,12 @@ do { \ c_p->fcalls = FCALLS; SWAPOUT; PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); ERTS_CHK_MBUF_SZ(c_p); result = (*bf)(c_p, reg, live); ERTS_CHK_MBUF_SZ(c_p); ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); PROCESS_MAIN_CHK_LOCKS(c_p); SWAPIN; ERTS_HOLE_CHECK(c_p); @@ -2715,12 +2715,12 @@ do { \ c_p->fcalls = FCALLS; SWAPOUT; PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); ERTS_CHK_MBUF_SZ(c_p); result = (*bf)(c_p, reg, live); ERTS_CHK_MBUF_SZ(c_p); ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); PROCESS_MAIN_CHK_LOCKS(c_p); SWAPIN; ERTS_HOLE_CHECK(c_p); @@ -2760,12 +2760,12 @@ do { \ c_p->fcalls = FCALLS; SWAPOUT; PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); ERTS_CHK_MBUF_SZ(c_p); result = (*bf)(c_p, reg, live); ERTS_CHK_MBUF_SZ(c_p); ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); PROCESS_MAIN_CHK_LOCKS(c_p); SWAPIN; ERTS_HOLE_CHECK(c_p); @@ -2890,7 +2890,7 @@ do { \ ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(result)); ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); ERTS_HOLE_CHECK(c_p); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); if (ERTS_IS_GC_DESIRED(c_p)) { Uint arity = GET_BIF_ARITY(export); result = erts_gc_after_bif_call_lhf(c_p, live_hf_end, result, reg, arity); @@ -3400,7 +3400,7 @@ do { \ Eterm* argp; int i; - if (erts_smp_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_EXITING) { + if (erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_EXITING) { c_p->i = beam_exit; c_p->arity = 0; c_p->current = NULL; @@ -3476,16 +3476,16 @@ do { \ SWAPOUT; c_p->freason = EXC_NORMAL; c_p->arity = 0; /* In case this process will never be garbed again. */ - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); erts_do_exit_process(c_p, am_normal); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); goto do_schedule; } OpCase(continue_exit): { - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); erts_continue_exit_process(c_p); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); goto do_schedule; } @@ -3590,7 +3590,7 @@ do { \ PROCESS_MAIN_CHK_LOCKS(c_p); bif_nif_arity = codemfa->arity; - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); ASSERT(!ERTS_PROC_IS_EXITING(c_p)); { @@ -3655,7 +3655,7 @@ do { \ PROCESS_MAIN_CHK_LOCKS(c_p); bif_nif_arity = codemfa->arity; ASSERT(bif_nif_arity <= 4); - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); { ErtsBifFunc bf = vbf; @@ -3678,7 +3678,7 @@ do { \ DTRACE_BIF_RETURN(c_p, codemfa); apply_bif_or_nif_epilogue: - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); ERTS_HOLE_CHECK(c_p); if (ERTS_IS_GC_DESIRED(c_p)) { nif_bif_result = erts_gc_after_bif_call_lhf(c_p, live_hf_end, @@ -4751,9 +4751,9 @@ do { \ ErtsCodeMFA* mfa = (ErtsCodeMFA *)(E[0]); SWAPOUT; /* Needed for shared heap */ - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); erts_trace_return(c_p, mfa, r(0), ERTS_TRACER_FROM_ETERM(E+1)/* tracer */); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); SWAPIN; c_p->cp = NULL; SET_I((BeamInstr *) cp_val(E[2])); @@ -4794,9 +4794,9 @@ do { \ } else break; } SWAPOUT; /* Needed for shared heap */ - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); erts_trace_return_to(c_p, cp_val(*cpp)); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); SWAPIN; } c_p->cp = NULL; @@ -5331,7 +5331,7 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp) } PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); c_p = erts_schedule(esdp, c_p, reds_used); @@ -5345,7 +5345,7 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp) #ifdef DEBUG pid = c_p->common.id; /* Save for debugging purposes */ #endif - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); PROCESS_MAIN_CHK_LOCKS(c_p); ASSERT(!(c_p->flags & F_HIPE_MODE)); @@ -5360,7 +5360,7 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp) else c_p->fcalls = CONTEXT_REDS; - if (erts_smp_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_DIRTY_RUNNING_SYS) { + if (erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_DIRTY_RUNNING_SYS) { erts_execute_dirty_system_task(c_p); goto do_dirty_schedule; } @@ -5431,7 +5431,7 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp) c_p->current = codemfa; SWAPOUT; PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); ASSERT(!ERTS_PROC_IS_EXITING(c_p)); if (em_apply_bif == (BeamInstr *) *I) { @@ -5445,7 +5445,7 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp) ASSERT(!(c_p->flags & F_HIBERNATE_SCHED)); PROCESS_MAIN_CHK_LOCKS(c_p); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR); if (exiting) @@ -5624,9 +5624,9 @@ handle_error(Process* c_p, BeamInstr* pc, Eterm* reg, ErtsCodeMFA *bif_mfa) } if (c_p->catches > 0) erts_exit(ERTS_ERROR_EXIT, "Catch not found"); } - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p); + ERTS_UNREQ_PROC_MAIN_LOCK(c_p); terminate_proc(c_p, Value); - ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p); + ERTS_REQ_PROC_MAIN_LOCK(c_p); return NULL; } @@ -6579,22 +6579,22 @@ erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* re * If there are no waiting messages, garbage collect and * shrink the heap. */ - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); + ERTS_MSGQ_MV_INQ2PRIVQ(c_p); if (!c_p->msg.len) { - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); c_p->fvalue = NIL; PROCESS_MAIN_CHK_LOCKS(c_p); erts_garbage_collect_hibernate(c_p); ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); PROCESS_MAIN_CHK_LOCKS(c_p); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); + ERTS_MSGQ_MV_INQ2PRIVQ(c_p); if (!c_p->msg.len) - erts_smp_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_ACTIVE); + erts_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_ACTIVE); ASSERT(!ERTS_PROC_IS_EXITING(c_p)); } - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); c_p->current = &bif_export[BIF_hibernate_3]->info.mfa; c_p->flags |= F_HIBERNATE_SCHED; /* Needed also when woken! */ return 1; @@ -6684,7 +6684,7 @@ call_fun(Process* p, /* Current process. */ module = fe->module; - ERTS_SMP_READ_MEMORY_BARRIER; + ERTS_THR_READ_MEMORY_BARRIER; if (fe->pend_purge_address) { /* * The system is currently trying to purge the @@ -6815,7 +6815,7 @@ new_fun(Process* p, Eterm* reg, ErlFunEntry* fe, int num_free) p->htop = hp + needed; funp = (ErlFunThing *) hp; hp = funp->env; - erts_smp_refc_inc(&fe->refc, 2); + erts_refc_inc(&fe->refc, 2); funp->thing_word = HEADER_FUN; funp->next = MSO(p).first; MSO(p).first = (struct erl_off_heap_header*) funp; diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c index 9bb4525306..5429a61d7b 100644 --- a/erts/emulator/beam/beam_load.c +++ b/erts/emulator/beam/beam_load.c @@ -794,8 +794,8 @@ erts_finish_loading(Binary* magic, Process* c_p, * table which is not protected by any locks. */ - ERTS_SMP_LC_ASSERT(erts_initialized == 0 || erts_has_code_write_permission() || - erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_initialized == 0 || erts_has_code_write_permission() || + erts_thr_progress_is_blocking()); /* * Make current code for the module old and insert the new code * as current. This will fail if there already exists old code @@ -830,7 +830,7 @@ erts_finish_loading(Binary* magic, Process* c_p, continue; } else if (ep->beam[0] == (BeamInstr) BeamOp(op_i_generic_breakpoint)) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); ASSERT(mod_tab_p->curr.num_traced_exports > 0); erts_clear_export_break(mod_tab_p, &ep->info); ep->addressv[code_ix] = (BeamInstr *) ep->beam[1]; @@ -4934,7 +4934,7 @@ final_touch(LoaderState* stp, struct erl_module_instance* inst_p) /* * We are hiding a pointer into older code. */ - erts_smp_refc_dec(&fe->refc, 1); + erts_refc_dec(&fe->refc, 1); } fe->address = code_ptr; #ifdef HIPE @@ -6367,7 +6367,7 @@ patch_funentries(Eterm Patchlist) fe = erts_get_fun_entry(Mod, uniq, index); fe->native_address = (Uint *)native_address; - erts_smp_refc_dec(&fe->refc, 1); + erts_refc_dec(&fe->refc, 1); if (!patch(Addresses, (Uint) fe)) return 0; diff --git a/erts/emulator/beam/beam_ranges.c b/erts/emulator/beam/beam_ranges.c index 9b0335e83d..6e373a3480 100644 --- a/erts/emulator/beam/beam_ranges.c +++ b/erts/emulator/beam/beam_ranges.c @@ -29,12 +29,12 @@ typedef struct { BeamInstr* start; /* Pointer to start of module. */ - erts_smp_atomic_t end; /* (BeamInstr*) Points one word beyond last function in module. */ + erts_atomic_t end; /* (BeamInstr*) Points one word beyond last function in module. */ } Range; /* Range 'end' needs to be atomic as we purge module by setting end=start in active code_ix */ -#define RANGE_END(R) ((BeamInstr*)erts_smp_atomic_read_nob(&(R)->end)) +#define RANGE_END(R) ((BeamInstr*)erts_atomic_read_nob(&(R)->end)) static Range* find_range(BeamInstr* pc); static void lookup_loc(FunctionInfo* fi, const BeamInstr* pc, @@ -49,10 +49,10 @@ struct ranges { Range* modules; /* Sorted lists of module addresses. */ Sint n; /* Number of range entries. */ Sint allocated; /* Number of allocated entries. */ - erts_smp_atomic_t mid; /* Cached search start point */ + erts_atomic_t mid; /* Cached search start point */ }; static struct ranges r[ERTS_NUM_CODE_IX]; -static erts_smp_atomic_t mem_used; +static erts_atomic_t mem_used; static Range* write_ptr; #ifdef HARD_DEBUG @@ -90,12 +90,12 @@ erts_init_ranges(void) { Sint i; - erts_smp_atomic_init_nob(&mem_used, 0); + erts_atomic_init_nob(&mem_used, 0); for (i = 0; i < ERTS_NUM_CODE_IX; i++) { r[i].modules = 0; r[i].n = 0; r[i].allocated = 0; - erts_smp_atomic_init_nob(&r[i].mid, 0); + erts_atomic_init_nob(&r[i].mid, 0); } } @@ -107,12 +107,12 @@ erts_start_staging_ranges(int num_new) Sint need; if (r[dst].modules) { - erts_smp_atomic_add_nob(&mem_used, -r[dst].allocated); + erts_atomic_add_nob(&mem_used, -r[dst].allocated); erts_free(ERTS_ALC_T_MODULE_REFS, r[dst].modules); } need = r[dst].allocated = r[src].n + num_new; - erts_smp_atomic_add_nob(&mem_used, need); + erts_atomic_add_nob(&mem_used, need); write_ptr = erts_alloc(ERTS_ALC_T_MODULE_REFS, need * sizeof(Range)); r[dst].modules = write_ptr; @@ -135,7 +135,7 @@ erts_end_staging_ranges(int commit) if (rp->start < RANGE_END(rp)) { /* Only insert a module that has not been purged. */ write_ptr->start = rp->start; - erts_smp_atomic_init_nob(&write_ptr->end, + erts_atomic_init_nob(&write_ptr->end, (erts_aint_t)(RANGE_END(rp))); write_ptr++; } @@ -161,7 +161,7 @@ erts_end_staging_ranges(int commit) } r[dst].modules = mp; CHECK(&r[dst]); - erts_smp_atomic_set_nob(&r[dst].mid, + erts_atomic_set_nob(&r[dst].mid, (erts_aint_t) (r[dst].modules + r[dst].n / 2)); } @@ -182,7 +182,7 @@ erts_update_ranges(BeamInstr* code, Uint size) */ if (r[dst].modules == NULL) { Sint need = 128; - erts_smp_atomic_add_nob(&mem_used, need); + erts_atomic_add_nob(&mem_used, need); r[dst].modules = erts_alloc(ERTS_ALC_T_MODULE_REFS, need * sizeof(Range)); r[dst].allocated = need; @@ -192,7 +192,7 @@ erts_update_ranges(BeamInstr* code, Uint size) ASSERT(r[dst].modules); write_ptr->start = code; - erts_smp_atomic_init_nob(&(write_ptr->end), + erts_atomic_init_nob(&(write_ptr->end), (erts_aint_t)(((byte *)code) + size)); write_ptr++; } @@ -201,13 +201,13 @@ void erts_remove_from_ranges(BeamInstr* code) { Range* rp = find_range(code); - erts_smp_atomic_set_nob(&rp->end, (erts_aint_t)rp->start); + erts_atomic_set_nob(&rp->end, (erts_aint_t)rp->start); } UWord erts_ranges_sz(void) { - return erts_smp_atomic_read_nob(&mem_used) * sizeof(Range); + return erts_atomic_read_nob(&mem_used) * sizeof(Range); } /* @@ -262,7 +262,7 @@ find_range(BeamInstr* pc) ErtsCodeIndex active = erts_active_code_ix(); Range* low = r[active].modules; Range* high = low + r[active].n; - Range* mid = (Range *) erts_smp_atomic_read_nob(&r[active].mid); + Range* mid = (Range *) erts_atomic_read_nob(&r[active].mid); CHECK(&r[active]); while (low < high) { @@ -271,7 +271,7 @@ find_range(BeamInstr* pc) } else if (pc >= RANGE_END(mid)) { low = mid + 1; } else { - erts_smp_atomic_set_nob(&r[active].mid, (erts_aint_t) mid); + erts_atomic_set_nob(&r[active].mid, (erts_aint_t) mid); return mid; } mid = low + (high-low) / 2; diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c index e9a0668e2d..4b45e98685 100644 --- a/erts/emulator/beam/bif.c +++ b/erts/emulator/beam/bif.c @@ -57,10 +57,10 @@ static Export dsend_continue_trap_export; Export *erts_convert_time_unit_trap = NULL; static Export *await_msacc_mod_trap = NULL; -static erts_smp_atomic32_t msacc; +static erts_atomic32_t msacc; static Export *await_sched_wall_time_mod_trap; -static erts_smp_atomic32_t sched_wall_time; +static erts_atomic32_t sched_wall_time; #define DECL_AM(S) Eterm AM_ ## S = am_atom_put(#S, sizeof(#S) - 1) @@ -103,7 +103,7 @@ static int insert_internal_link(Process* p, Eterm rpid) rp_locks = ERTS_PROC_LOCKS_ALL; } - erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK); + erts_proc_lock(p, ERTS_PROC_LOCK_LINK); /* get a pointer to the process struct of the linked process */ rp = erts_pid2proc_opt(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK, @@ -111,7 +111,7 @@ static int insert_internal_link(Process* p, Eterm rpid) ERTS_P2P_FLG_ALLOW_OTHER_X); if (!rp) { - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); return 0; } @@ -137,10 +137,10 @@ static int insert_internal_link(Process* p, Eterm rpid) rp, am_getting_linked, p->common.id); if (p == rp) - erts_smp_proc_unlock(p, rp_locks & ~ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(p, rp_locks & ~ERTS_PROC_LOCK_MAIN); else { - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, rp_locks); } return 1; @@ -176,13 +176,13 @@ BIF_RETTYPE link_1(BIF_ALIST_1) goto res_no_proc; } - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); if (erts_add_link(&ERTS_P_LINKS(BIF_P), LINK_PID, BIF_ARG_1) >= 0) send_link_signal = 1; /* else: already linked */ - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); if (send_link_signal) { Eterm ref; @@ -210,11 +210,11 @@ BIF_RETTYPE link_1(BIF_ALIST_1) if (is_external_pid(BIF_ARG_1)) { - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); /* We may earn time by checking first that we're not linked already */ if (erts_lookup_link(ERTS_P_LINKS(BIF_P), BIF_ARG_1) != NULL) { - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); BIF_RET(am_true); } else { @@ -223,7 +223,7 @@ BIF_RETTYPE link_1(BIF_ALIST_1) ErtsDSigData dsd; dep = external_pid_dist_entry(BIF_ARG_1); if (dep == erts_this_dist_entry) { - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); goto res_no_proc; } @@ -232,13 +232,13 @@ BIF_RETTYPE link_1(BIF_ALIST_1) case ERTS_DSIG_PREP_NOT_ALIVE: /* Let the dlink trap handle it */ case ERTS_DSIG_PREP_NOT_CONNECTED: - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); BIF_TRAP1(dlink_trap, BIF_P, BIF_ARG_1); case ERTS_DSIG_PREP_CONNECTED: /* We are connected. Setup link and send link signal */ - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); erts_add_link(&ERTS_P_LINKS(BIF_P), LINK_PID, BIF_ARG_1); lnk = erts_add_or_lookup_link(&(dep->nlinks), @@ -247,9 +247,9 @@ BIF_RETTYPE link_1(BIF_ALIST_1) ASSERT(lnk != NULL); erts_add_link(&ERTS_LINK_ROOT(lnk), LINK_PID, BIF_ARG_1); - erts_smp_de_links_unlock(dep); - erts_smp_de_runlock(dep); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_de_links_unlock(dep); + erts_de_runlock(dep); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); code = erts_dsig_send_link(&dsd, BIF_P->common.id, BIF_ARG_1); if (code == ERTS_DSIG_SEND_YIELD) @@ -265,11 +265,11 @@ BIF_RETTYPE link_1(BIF_ALIST_1) BIF_ERROR(BIF_P, BADARG); res_no_proc: { - erts_aint32_t state = erts_smp_atomic32_read_nob(&BIF_P->state); + erts_aint32_t state = erts_atomic32_read_nob(&BIF_P->state); if (state & ERTS_PSFLG_TRAP_EXIT) { ErtsProcLocks locks = ERTS_PROC_LOCK_MAIN; erts_deliver_exit_message(BIF_ARG_1, BIF_P, &locks, am_noproc, NIL); - erts_smp_proc_unlock(BIF_P, ~ERTS_PROC_LOCK_MAIN & locks); + erts_proc_unlock(BIF_P, ~ERTS_PROC_LOCK_MAIN & locks); BIF_RET(am_true); } else @@ -289,7 +289,7 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to) int code; Eterm res = am_false; - ERTS_SMP_LC_ASSERT((ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK) + ERTS_LC_ASSERT((ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK) == erts_proc_lc_my_proc_locks(c_p)); code = erts_dsig_prepare(&dsd, dep, c_p, ERTS_DSP_RLOCK, 0); @@ -301,26 +301,26 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to) * down just before the call to demonitor. */ if (dep) { - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); dmon = erts_remove_monitor(&dep->monitors, ref); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); if (dmon) erts_destroy_monitor(dmon); } mon = erts_remove_monitor(&ERTS_P_MONITORS(c_p), ref); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_LINK); res = am_true; break; case ERTS_DSIG_PREP_CONNECTED: - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); mon = erts_remove_monitor(&ERTS_P_MONITORS(c_p), ref); dmon = erts_remove_monitor(&dep->monitors, ref); - erts_smp_de_links_unlock(dep); - erts_smp_de_runlock(dep); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_LINK); + erts_de_links_unlock(dep); + erts_de_runlock(dep); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_LINK); if (!dmon) { /* @@ -360,7 +360,7 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to) lookup and remove */ erts_destroy_monitor(mon); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); return res; } @@ -385,12 +385,12 @@ demonitor_local_process(Process *c_p, Eterm ref, Eterm to, Eterm *res) ErtsMonitor *rmon; rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref); if (rp != c_p) - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); if (rmon != NULL) erts_destroy_monitor(rmon); } else { - ERTS_SMP_ASSERT_IS_NOT_EXITING(c_p); + ERTS_ASSERT_IS_NOT_EXITING(c_p); } } @@ -403,7 +403,7 @@ demonitor_local_port(Process *origin, Eterm ref, Eterm target) if (!port) { BIF_ERROR(origin, BADARG); } - erts_smp_proc_unlock(origin, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(origin, ERTS_PROC_LOCK_LINK); if (port) { Eterm trap_ref; @@ -423,7 +423,7 @@ demonitor_local_port(Process *origin, Eterm ref, Eterm target) } } else { - ERTS_SMP_ASSERT_IS_NOT_EXITING(origin); + ERTS_ASSERT_IS_NOT_EXITING(origin); } BIF_RET(res); } @@ -441,7 +441,7 @@ BIF_RETTYPE demonitor(Process *c_p, Eterm ref, Eterm *multip) BIF_RETTYPE res = am_false; int unlock_link = 1; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_LINK); + erts_proc_lock(c_p, ERTS_PROC_LOCK_LINK); if (is_not_internal_ref(ref)) { res = am_badarg; @@ -503,14 +503,14 @@ badarg: done: if (unlock_link) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_LINK); if (deref_de) { ASSERT(dep); erts_deref_dist_entry(dep); } - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); BIF_RET(res); } @@ -633,12 +633,12 @@ local_pid_monitor(Process *p, Eterm target, Eterm mon_ref, int boolean) return ret; } - erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK); + erts_proc_lock(p, ERTS_PROC_LOCK_LINK); rp = erts_pid2proc_opt(p, p_locks, target, ERTS_PROC_LOCK_LINK, ERTS_P2P_FLG_ALLOW_OTHER_X); if (!rp) { - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); p_locks &= ~ERTS_PROC_LOCK_LINK; if (boolean) ret = am_false; @@ -655,10 +655,10 @@ local_pid_monitor(Process *p, Eterm target, Eterm mon_ref, int boolean) erts_add_monitor(&ERTS_P_MONITORS(p), MON_ORIGIN, mon_ref, target, NIL); erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, mon_ref, p->common.id, NIL); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); } - erts_smp_proc_unlock(p, p_locks & ~ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(p, p_locks & ~ERTS_PROC_LOCK_MAIN); return ret; } @@ -692,7 +692,7 @@ res_no_proc: break; } } - erts_smp_proc_unlock(origin, p_locks & ~ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(origin, p_locks & ~ERTS_PROC_LOCK_MAIN); BIF_RET(ref); } @@ -707,7 +707,7 @@ local_name_monitor(Process *self, Eterm type, Eterm target_name) Process *proc = NULL; Port *port = NULL; - erts_smp_proc_lock(self, ERTS_PROC_LOCK_LINK); + erts_proc_lock(self, ERTS_PROC_LOCK_LINK); erts_whereis_name(self, p_locks, target_name, &proc, ERTS_PROC_LOCK_LINK, @@ -726,7 +726,7 @@ local_name_monitor(Process *self, Eterm type, Eterm target_name) Eterm item; UseTmpHeap(3,self); - erts_smp_proc_unlock(self, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(self, ERTS_PROC_LOCK_LINK); p_locks &= ~ERTS_PROC_LOCK_LINK; item = TUPLE2(lhp, target_name, erts_this_dist_entry->sysname); @@ -737,7 +737,7 @@ local_name_monitor(Process *self, Eterm type, Eterm target_name) UnUseTmpHeap(3,self); } else if (port) { - erts_smp_proc_unlock(self, p_locks & ~ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(self, p_locks & ~ERTS_PROC_LOCK_MAIN); p_locks &= ~ERTS_PROC_LOCK_MAIN; switch (erts_port_monitor(self, port, target_name, &ret)) { @@ -758,16 +758,16 @@ local_name_monitor(Process *self, Eterm type, Eterm target_name) proc->common.id, target_name); erts_add_monitor(&ERTS_P_MONITORS(proc), MON_TARGET, ret, self->common.id, target_name); - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(proc, ERTS_PROC_LOCK_LINK); } if (p_locks) { - erts_smp_proc_unlock(self, p_locks & ~ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(self, p_locks & ~ERTS_PROC_LOCK_MAIN); } BIF_RET(ret); badarg: if (p_locks) { - erts_smp_proc_unlock(self, p_locks & ~ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(self, p_locks & ~ERTS_PROC_LOCK_MAIN); } BIF_ERROR(self, BADARG); } @@ -780,20 +780,20 @@ remote_monitor(Process *p, Eterm bifarg1, Eterm bifarg2, BIF_RETTYPE ret; int code; - erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK); + erts_proc_lock(p, ERTS_PROC_LOCK_LINK); code = erts_dsig_prepare(&dsd, dep, p, ERTS_DSP_RLOCK, 0); switch (code) { case ERTS_DSIG_PREP_NOT_ALIVE: /* Let the dmonitor_p trap handle it */ case ERTS_DSIG_PREP_NOT_CONNECTED: - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); ERTS_BIF_PREP_TRAP2(ret, dmonitor_p_trap, p, bifarg1, bifarg2); break; case ERTS_DSIG_PREP_CONNECTED: if (!(dep->flags & DFLAG_DIST_MONITOR) || (byname && !(dep->flags & DFLAG_DIST_MONITOR_NAME))) { - erts_smp_de_runlock(dep); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_de_runlock(dep); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); ERTS_BIF_PREP_ERROR(ret, p, BADARG); } else { @@ -812,16 +812,16 @@ remote_monitor(Process *p, Eterm bifarg1, Eterm bifarg2, d_name = NIL; } - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); erts_add_monitor(&ERTS_P_MONITORS(p), MON_ORIGIN, mon_ref, p_trgt, p_name); erts_add_monitor(&(dep->monitors), MON_TARGET, mon_ref, p->common.id, d_name); - erts_smp_de_links_unlock(dep); - erts_smp_de_runlock(dep); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_de_links_unlock(dep); + erts_de_runlock(dep); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); code = erts_dsig_send_monitor(&dsd, p->common.id, target, mon_ref); if (code == ERTS_DSIG_SEND_YIELD) @@ -854,10 +854,10 @@ BIF_RETTYPE monitor_2(BIF_ALIST_2) goto badarg; } ref = erts_make_ref(BIF_P); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); erts_add_monitor(&ERTS_P_MONITORS(BIF_P), MON_TIME_OFFSET, ref, am_clock_service, NIL); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); erts_monitor_time_offset(BIF_P->common.id, ref); BIF_RET(ref); } @@ -974,7 +974,7 @@ BIF_RETTYPE spawn_opt_1(BIF_ALIST_1) so.max_heap_size = H_MAX_SIZE; so.max_heap_flags = H_MAX_FLAGS; so.priority = PRIORITY_NORMAL; - so.max_gen_gcs = (Uint16) erts_smp_atomic32_read_nob(&erts_max_gen_gcs); + so.max_gen_gcs = (Uint16) erts_atomic32_read_nob(&erts_max_gen_gcs); so.scheduler = 0; /* @@ -1115,13 +1115,13 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1) } if (is_internal_port(BIF_ARG_1)) { - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); if (ERTS_PROC_PENDING_EXIT(BIF_P)) goto handle_pending_exit; l = erts_remove_link(&ERTS_P_LINKS(BIF_P), BIF_ARG_1); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); if (l) { Port *prt; @@ -1163,12 +1163,12 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1) /* Blind removal, we might have trapped or anything, this leaves us in a state where monitors might be inconsistent, but the dist code should take care of it. */ - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); if (ERTS_PROC_PENDING_EXIT(BIF_P)) goto handle_pending_exit; l = erts_remove_link(&ERTS_P_LINKS(BIF_P), BIF_ARG_1); - erts_smp_proc_unlock(BIF_P, + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); if (l) @@ -1210,7 +1210,7 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1) /* Internal pid... */ - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); cp_locks |= ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS; @@ -1221,7 +1221,7 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1) if (ERTS_PROC_PENDING_EXIT(BIF_P)) { if (rp && rp != BIF_P) - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); goto handle_pending_exit; } @@ -1231,7 +1231,7 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1) erts_destroy_link(l); if (!rp) { - ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P); + ERTS_ASSERT_IS_NOT_EXITING(BIF_P); } else { rl = erts_remove_link(&ERTS_P_LINKS(rp), BIF_P->common.id); @@ -1239,17 +1239,17 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1) erts_destroy_link(rl); if (IS_TRACED_FL(rp, F_TRACE_PROCS) && rl != NULL) { - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_STATUS); cp_locks &= ~ERTS_PROC_LOCK_STATUS; trace_proc(BIF_P, (ERTS_PROC_LOCK_MAIN | ERTS_PROC_LOCK_LINK), rp, am_getting_unlinked, BIF_P->common.id); } if (rp != BIF_P) - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); } - erts_smp_proc_unlock(BIF_P, cp_locks & ~ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, cp_locks & ~ERTS_PROC_LOCK_MAIN); BIF_RET(am_true); @@ -1258,7 +1258,7 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1) | ERTS_PROC_LOCK_LINK | ERTS_PROC_LOCK_STATUS)); ASSERT(ERTS_PROC_IS_EXITING(BIF_P)); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); ERTS_BIF_EXITED(BIF_P); } @@ -1592,7 +1592,7 @@ BIF_RETTYPE exit_2(BIF_ALIST_2) if (BIF_ARG_1 == BIF_P->common.id) { rp_locks = ERTS_PROC_LOCKS_ALL; rp = BIF_P; - erts_smp_proc_lock(rp, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(rp, ERTS_PROC_LOCKS_ALL_MINOR); } else { rp_locks = ERTS_PROC_LOCKS_XSIG_SEND; @@ -1617,7 +1617,7 @@ BIF_RETTYPE exit_2(BIF_ALIST_2) if (rp == BIF_P) rp_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); /* * We may have exited ourselves and may have to take action. */ @@ -1729,14 +1729,14 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2) * true. For more info, see implementation of * erts_send_exit_signal(). */ - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCKS_XSIG_SEND); + erts_proc_lock(BIF_P, ERTS_PROC_LOCKS_XSIG_SEND); if (trap_exit) - state = erts_smp_atomic32_read_bor_mb(&BIF_P->state, + state = erts_atomic32_read_bor_mb(&BIF_P->state, ERTS_PSFLG_TRAP_EXIT); else - state = erts_smp_atomic32_read_band_mb(&BIF_P->state, + state = erts_atomic32_read_band_mb(&BIF_P->state, ~ERTS_PSFLG_TRAP_EXIT); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCKS_XSIG_SEND); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCKS_XSIG_SEND); if (state & ERTS_PSFLG_PENDING_EXIT) { erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN); @@ -1759,13 +1759,13 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2) if (sched == 0) { new = NULL; - state = erts_smp_atomic32_read_band_mb(&BIF_P->state, + state = erts_atomic32_read_band_mb(&BIF_P->state, ~ERTS_PSFLG_BOUND); } else { new = erts_schedid2runq(sched); erts_atomic_set_nob(&BIF_P->run_queue, (erts_aint_t) new); - state = erts_smp_atomic32_read_bor_mb(&BIF_P->state, + state = erts_atomic32_read_bor_mb(&BIF_P->state, ERTS_PSFLG_BOUND); } @@ -1846,7 +1846,7 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2) } else { goto error; } - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR); old_value = (ERTS_TRACE_FLAGS(BIF_P) & F_SENSITIVE ? am_true : am_false); @@ -1855,7 +1855,7 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2) } else { ERTS_TRACE_FLAGS(BIF_P) &= ~F_SENSITIVE; } - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR); /* make sure to bump all reds so that we get rescheduled immediately so setting takes effect */ BIF_RET2(old_value, CONTEXT_REDS); @@ -1909,7 +1909,7 @@ BIF_RETTYPE process_flag_3(BIF_ALIST_3) res = process_flag_aux(BIF_P, rp, BIF_ARG_2, BIF_ARG_3); if (rp != BIF_P) - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); return res; } @@ -2251,7 +2251,7 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext *ctx) res *= 4; else res = 0; - erts_smp_proc_unlock(rp, + erts_proc_unlock(rp, p == rp ? (rp_locks & ~ERTS_PROC_LOCK_MAIN) : rp_locks); @@ -3954,14 +3954,14 @@ BIF_RETTYPE halt_2(BIF_ALIST_2) ERTS_BIF_YIELD2(bif_export[BIF_halt_2], BIF_P, am_undefined, am_undefined); } else { - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_exit(pos_int_code, ""); } } else if (ERTS_IS_ATOM_STR("abort", BIF_ARG_1)) { VERBOSE(DEBUG_SYSTEM, ("System halted by BIF halt(%T, %T)\n", BIF_ARG_1, BIF_ARG_2)); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_exit(ERTS_ABORT_EXIT, ""); } else if (is_string(BIF_ARG_1) || BIF_ARG_1 == NIL) { @@ -3976,7 +3976,7 @@ BIF_RETTYPE halt_2(BIF_ALIST_2) halt_msg[i] = '\0'; VERBOSE(DEBUG_SYSTEM, ("System halted by BIF halt(%T, %T)\n", BIF_ARG_1, BIF_ARG_2)); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_exit(ERTS_DUMP_EXIT, "%s\n", halt_msg); } else @@ -4450,9 +4450,9 @@ BIF_RETTYPE group_leader_2(BIF_ALIST_2) new_member->group_leader = BIF_ARG_1; else { locks &= ~ERTS_PROC_LOCK_STATUS; - erts_smp_proc_unlock(new_member, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(new_member, ERTS_PROC_LOCK_STATUS); if (new_member == BIF_P - || !(erts_smp_atomic32_read_nob(&new_member->state) + || !(erts_atomic32_read_nob(&new_member->state) & ERTS_PSFLG_DIRTY_RUNNING)) { new_member->group_leader = STORE_NC_IN_PROC(new_member, BIF_ARG_1); @@ -4481,7 +4481,7 @@ BIF_RETTYPE group_leader_2(BIF_ALIST_2) if (new_member == BIF_P) locks &= ~ERTS_PROC_LOCK_MAIN; if (locks) - erts_smp_proc_unlock(new_member, locks); + erts_proc_unlock(new_member, locks); if (await_x) { /* Wait for new_member to terminate; then badarg */ @@ -4575,7 +4575,7 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) goto error; } nval = (n > (Sint) ((Uint16) -1)) ? ((Uint16) -1) : ((Uint16) n); - oval = (Uint) erts_smp_atomic32_xchg_nob(&erts_max_gen_gcs, + oval = (Uint) erts_atomic32_xchg_nob(&erts_max_gen_gcs, (erts_aint32_t) nval); BIF_RET(make_small(oval)); } else if (BIF_ARG_1 == am_min_heap_size) { @@ -4585,13 +4585,13 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) goto error; } - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); H_MIN_SIZE = erts_next_heap_size(n, 0); - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(make_small(oval)); } else if (BIF_ARG_1 == am_min_bin_vheap_size) { @@ -4601,13 +4601,13 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) goto error; } - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); BIN_VH_MIN_SIZE = erts_next_heap_size(n, 0); - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(make_small(oval)); } else if (BIF_ARG_1 == am_max_heap_size) { @@ -4625,14 +4625,14 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) hp = HAlloc(BIF_P, sz); old_value = erts_max_heap_size_map(H_MAX_SIZE, H_MAX_FLAGS, &hp, NULL); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); H_MAX_SIZE = max_heap_size; H_MAX_FLAGS = max_heap_flags; - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(old_value); } else if (BIF_ARG_1 == am_display_items) { @@ -4686,8 +4686,8 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) } else if (BIF_ARG_1 == make_small(1)) { int i, max; ErtsMessage* mp; - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); max = erts_ptab_max(&erts_proc); for (i = 0; i < max; i++) { @@ -4700,7 +4700,7 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) #endif p->seq_trace_clock = 0; p->seq_trace_lastcnt = 0; - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p); + ERTS_MSGQ_MV_INQ2PRIVQ(p); mp = p->msg.first; while(mp != NULL) { #ifdef USE_VM_PROBES @@ -4713,14 +4713,14 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) } } - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(am_true); } else if (BIF_ARG_1 == am_scheduler_wall_time) { if (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false) { erts_aint32_t new = BIF_ARG_2 == am_true ? 1 : 0; - erts_aint32_t old = erts_smp_atomic32_xchg_nob(&sched_wall_time, + erts_aint32_t old = erts_atomic32_xchg_nob(&sched_wall_time, new); Eterm ref = erts_sched_wall_time_request(BIF_P, 1, new, 0, 0); ASSERT(is_value(ref)); @@ -4759,9 +4759,9 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) } else if (BIF_ARG_1 == am_time_offset && ERTS_IS_ATOM_STR("finalize", BIF_ARG_2)) { ErtsTimeOffsetState res; - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); res = erts_finalize_time_offset(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); switch (res) { case ERTS_TIME_OFFSET_PRELIMINARY: { DECL_AM(preliminary); @@ -4783,7 +4783,7 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) Eterm threads; if (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false) { erts_aint32_t new = BIF_ARG_2 == am_true ? ERTS_MSACC_ENABLE : ERTS_MSACC_DISABLE; - erts_aint32_t old = erts_smp_atomic32_xchg_nob(&msacc, new); + erts_aint32_t old = erts_atomic32_xchg_nob(&msacc, new); Eterm ref = erts_msacc_request(BIF_P, new, &threads); if (is_non_value(ref)) BIF_RET(old ? am_true : am_false); @@ -4794,7 +4794,7 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) threads); } else if (BIF_ARG_2 == am_reset) { Eterm ref = erts_msacc_request(BIF_P, ERTS_MSACC_RESET, &threads); - erts_aint32_t old = erts_smp_atomic32_read_nob(&msacc); + erts_aint32_t old = erts_atomic32_read_nob(&msacc); ASSERT(is_value(ref)); BIF_TRAP3(await_msacc_mod_trap, BIF_P, @@ -4813,9 +4813,9 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2) what = ERTS_SCHED_STAT_MODIFY_CLEAR; else goto error; - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_sched_stat_modify(what); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(am_true); } else if (ERTS_IS_ATOM_STR("internal_cpu_topology", BIF_ARG_1)) { Eterm res = erts_set_cpu_topology(BIF_P, BIF_ARG_2); @@ -4991,18 +4991,18 @@ skip_current_msgq(Process *c_p) erts_proc_lc_chk_only_proc_main(c_p); #endif - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); if (ERTS_PROC_PENDING_EXIT(c_p)) { KILL_CATCHES(c_p); c_p->freason = EXC_EXIT; res = 0; } else { - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); + ERTS_MSGQ_MV_INQ2PRIVQ(c_p); c_p->msg.save = c_p->msg.last; res = 1; } - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); return res; } @@ -5105,8 +5105,8 @@ void erts_init_bif(void) await_msacc_mod_trap = erts_export_put(am_erts_internal, am_await_microstate_accounting_modifications, 3); - erts_smp_atomic32_init_nob(&sched_wall_time, 0); - erts_smp_atomic32_init_nob(&msacc, ERTS_MSACC_IS_ENABLED()); + erts_atomic32_init_nob(&sched_wall_time, 0); + erts_atomic32_init_nob(&msacc, ERTS_MSACC_IS_ENABLED()); } /* @@ -5124,7 +5124,7 @@ schedule(Process *c_p, Process *dirty_shadow_proc, Eterm module, Eterm function, int argc, Eterm *argv) { - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p)); (void) erts_nif_export_schedule(c_p, dirty_shadow_proc, mfa, pc, (BeamInstr) em_apply_bif, dfunc, ifunc, @@ -5198,7 +5198,7 @@ erts_schedule_bif(Process *proc, dirty_shadow_proc = proc; c_p = proc->next; ASSERT(c_p->common.id == dirty_shadow_proc->common.id); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } else #endif @@ -5240,7 +5240,7 @@ erts_schedule_bif(Process *proc, break; } - (void) erts_smp_atomic32_read_bset_nob(&c_p->state, mask, set); + (void) erts_atomic32_read_bset_nob(&c_p->state, mask, set); #else dbif = call_bif; ibif = bif; @@ -5285,7 +5285,7 @@ erts_schedule_bif(Process *proc, } if (dirty_shadow_proc) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); return THE_NON_VALUE; } @@ -5335,7 +5335,7 @@ erts_call_dirty_bif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm * erts_aint32_t state; ASSERT(!c_p->scheduler_data); - state = erts_smp_atomic32_read_nob(&c_p->state); + state = erts_atomic32_read_nob(&c_p->state); ASSERT((state & ERTS_PSFLG_DIRTY_RUNNING) && !(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))); ASSERT(esdp); @@ -5349,7 +5349,7 @@ erts_call_dirty_bif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm * bf = (ErtsBifFunc) I[1]; - erts_smp_atomic32_read_band_mb(&c_p->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC + erts_atomic32_read_band_mb(&c_p->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC | ERTS_PSFLG_DIRTY_IO_PROC)); dirty_shadow_proc = erts_make_dirty_shadow_proc(esdp, c_p); @@ -5364,11 +5364,11 @@ erts_call_dirty_bif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm * c_p_htop = c_p->htop; #endif - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); result = (*bf)(dirty_shadow_proc, reg, I); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); ASSERT(c_p_htop == c_p->htop); ASSERT(dirty_shadow_proc->static_flags & ERTS_STC_FLG_SHADOW_PROC); @@ -5391,7 +5391,7 @@ erts_call_dirty_bif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm * } else if (nep->func == ERTS_SCHED_BIF_TRAP_MARKER) { /* Dirty BIF did an ordinary trap... */ - ASSERT(!(erts_smp_atomic32_read_nob(&c_p->state) + ASSERT(!(erts_atomic32_read_nob(&c_p->state) & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC))); schedule(c_p, dirty_shadow_proc, NULL, NULL, dirty_bif_trap, (void *) dirty_shadow_proc->i, diff --git a/erts/emulator/beam/bif.h b/erts/emulator/beam/bif.h index 01cca90a7a..9b0870dee2 100644 --- a/erts/emulator/beam/bif.h +++ b/erts/emulator/beam/bif.h @@ -93,7 +93,7 @@ do { \ #define BUMP_REDS(p, gc) do { \ ASSERT(p); \ - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));\ + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p));\ (p)->fcalls -= (gc); \ if ((p)->fcalls < 0) { \ if (!ERTS_PROC_GET_SAVED_CALLS_BUF((p))) \ diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c index 9bcf50653d..35b2365655 100644 --- a/erts/emulator/beam/break.c +++ b/erts/emulator/beam/break.c @@ -109,8 +109,8 @@ process_killer(void) ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND; erts_aint32_t state; erts_proc_inc_refc(rp); - erts_smp_proc_lock(rp, rp_locks); - state = erts_smp_atomic32_read_acqb(&rp->state); + erts_proc_lock(rp, rp_locks); + state = erts_atomic32_read_acqb(&rp->state); if (state & (ERTS_PSFLG_FREE | ERTS_PSFLG_EXITING | ERTS_PSFLG_ACTIVE @@ -132,7 +132,7 @@ process_killer(void) NULL, 0); } - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); erts_proc_dec_refc(rp); } case 'n': br = 1; break; @@ -219,7 +219,7 @@ print_process_info(fmtfn_t to, void *to_arg, Process *p) /* Display the state */ erts_print(to, to_arg, "State: "); - state = erts_smp_atomic32_read_acqb(&p->state); + state = erts_atomic32_read_acqb(&p->state); erts_dump_process_state(to, to_arg, state); if (state & ERTS_PSFLG_GC) { garbing = 1; @@ -258,7 +258,7 @@ print_process_info(fmtfn_t to, void *to_arg, Process *p) erts_print(to, to_arg, "Spawned by: %T\n", p->parent); approx_started = (time_t) p->approx_started; erts_print(to, to_arg, "Started: %s", ctime(&approx_started)); - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p); + ERTS_MSGQ_MV_INQ2PRIVQ(p); erts_print(to, to_arg, "Message queue length: %d\n", p->msg.len); /* display the message queue only if there is anything in it */ @@ -508,7 +508,7 @@ do_break(void) erts_free_read_env(mode); #endif /* __WIN32__ */ - ASSERT(erts_smp_thr_progress_is_blocking()); + ASSERT(erts_thr_progress_is_blocking()); erts_printf("\n" "BREAK: (a)bort (c)ontinue (p)roc info (i)nfo (l)oaded\n" @@ -734,8 +734,8 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args) #endif /* Allow us to pass certain places without locking... */ - erts_smp_atomic32_set_mb(&erts_writing_erl_crash_dump, 1); - erts_smp_tsd_set(erts_is_crash_dumping_key, (void *) 1); + erts_atomic32_set_mb(&erts_writing_erl_crash_dump, 1); + erts_tsd_set(erts_is_crash_dumping_key, (void *) 1); envsz = sizeof(env); diff --git a/erts/emulator/beam/code_ix.c b/erts/emulator/beam/code_ix.c index ba68d612e3..34e46f5f33 100644 --- a/erts/emulator/beam/code_ix.c +++ b/erts/emulator/beam/code_ix.c @@ -34,8 +34,8 @@ # define CIX_TRACE(text) #endif -erts_smp_atomic32_t the_active_code_index; -erts_smp_atomic32_t the_staging_code_index; +erts_atomic32_t the_active_code_index; +erts_atomic32_t the_staging_code_index; static Process* code_writing_process = NULL; struct code_write_queue_item { @@ -43,7 +43,7 @@ struct code_write_queue_item { struct code_write_queue_item* next; }; static struct code_write_queue_item* code_write_queue = NULL; -static erts_smp_mtx_t code_write_permission_mtx; +static erts_mtx_t code_write_permission_mtx; #ifdef ERTS_ENABLE_LOCK_CHECK static erts_tsd_key_t has_code_write_permission; @@ -55,9 +55,9 @@ void erts_code_ix_init(void) * single threaded with active and staging set both to zero. * Preloading is finished by a commit that will set things straight. */ - erts_smp_atomic32_init_nob(&the_active_code_index, 0); - erts_smp_atomic32_init_nob(&the_staging_code_index, 0); - erts_smp_mtx_init(&code_write_permission_mtx, "code_write_permission", NIL, + erts_atomic32_init_nob(&the_active_code_index, 0); + erts_atomic32_init_nob(&the_staging_code_index, 0); + erts_mtx_init(&code_write_permission_mtx, "code_write_permission", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); #ifdef ERTS_ENABLE_LOCK_CHECK erts_tsd_key_create(&has_code_write_permission, @@ -91,9 +91,9 @@ void erts_commit_staging_code_ix(void) /* We need to this lock as we are now making the staging export table active */ export_staging_lock(); ix = erts_staging_code_ix(); - erts_smp_atomic32_set_nob(&the_active_code_index, ix); + erts_atomic32_set_nob(&the_active_code_index, ix); ix = (ix + 1) % ERTS_NUM_CODE_IX; - erts_smp_atomic32_set_nob(&the_staging_code_index, ix); + erts_atomic32_set_nob(&the_staging_code_index, ix); export_staging_unlock(); erts_tracer_nif_clear(); CIX_TRACE("activate"); @@ -115,10 +115,10 @@ void erts_abort_staging_code_ix(void) int erts_try_seize_code_write_permission(Process* c_p) { int success; - ASSERT(!erts_smp_thr_progress_is_blocking()); /* to avoid deadlock */ + ASSERT(!erts_thr_progress_is_blocking()); /* to avoid deadlock */ ASSERT(c_p != NULL); - erts_smp_mtx_lock(&code_write_permission_mtx); + erts_mtx_lock(&code_write_permission_mtx); success = (code_writing_process == NULL); if (success) { code_writing_process = c_p; @@ -136,21 +136,21 @@ int erts_try_seize_code_write_permission(Process* c_p) code_write_queue = qitem; erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL); } - erts_smp_mtx_unlock(&code_write_permission_mtx); + erts_mtx_unlock(&code_write_permission_mtx); return success; } void erts_release_code_write_permission(void) { - erts_smp_mtx_lock(&code_write_permission_mtx); - ERTS_SMP_LC_ASSERT(erts_has_code_write_permission()); + erts_mtx_lock(&code_write_permission_mtx); + ERTS_LC_ASSERT(erts_has_code_write_permission()); while (code_write_queue != NULL) { /* unleash the entire herd */ struct code_write_queue_item* qitem = code_write_queue; - erts_smp_proc_lock(qitem->p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(qitem->p, ERTS_PROC_LOCK_STATUS); if (!ERTS_PROC_IS_EXITING(qitem->p)) { erts_resume(qitem->p, ERTS_PROC_LOCK_STATUS); } - erts_smp_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS); code_write_queue = qitem->next; erts_proc_dec_refc(qitem->p); erts_free(ERTS_ALC_T_CODE_IX_LOCK_Q, qitem); @@ -159,7 +159,7 @@ void erts_release_code_write_permission(void) #ifdef ERTS_ENABLE_LOCK_CHECK erts_tsd_set(has_code_write_permission, (void *) 0); #endif - erts_smp_mtx_unlock(&code_write_permission_mtx); + erts_mtx_unlock(&code_write_permission_mtx); } #ifdef ERTS_ENABLE_LOCK_CHECK diff --git a/erts/emulator/beam/code_ix.h b/erts/emulator/beam/code_ix.h index a28b0cd36e..9e3280cd98 100644 --- a/erts/emulator/beam/code_ix.h +++ b/erts/emulator/beam/code_ix.h @@ -205,16 +205,16 @@ ErtsCodeMFA *erts_code_to_codemfa(BeamInstr *I) return mfa; } -extern erts_smp_atomic32_t the_active_code_index; -extern erts_smp_atomic32_t the_staging_code_index; +extern erts_atomic32_t the_active_code_index; +extern erts_atomic32_t the_staging_code_index; ERTS_GLB_INLINE ErtsCodeIndex erts_active_code_ix(void) { - return erts_smp_atomic32_read_nob(&the_active_code_index); + return erts_atomic32_read_nob(&the_active_code_index); } ERTS_GLB_INLINE ErtsCodeIndex erts_staging_code_ix(void) { - return erts_smp_atomic32_read_nob(&the_staging_code_index); + return erts_atomic32_read_nob(&the_staging_code_index); } #endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */ diff --git a/erts/emulator/beam/copy.c b/erts/emulator/beam/copy.c index fefde256d7..10bf197405 100644 --- a/erts/emulator/beam/copy.c +++ b/erts/emulator/beam/copy.c @@ -845,7 +845,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint funp = (ErlFunThing *) tp; funp->next = off_heap->first; off_heap->first = (struct erl_off_heap_header*) funp; - erts_smp_refc_inc(&funp->fe->refc, 2); + erts_refc_inc(&funp->fe->refc, 2); *argp = make_fun(tp); } break; @@ -854,7 +854,7 @@ Eterm copy_struct_x(Eterm obj, Uint sz, Eterm** hpp, ErlOffHeap* off_heap, Uint case EXTERNAL_REF_SUBTAG: { ExternalThing *etp = (ExternalThing *) objp; - erts_smp_refc_inc(&etp->node->refc, 2); + erts_refc_inc(&etp->node->refc, 2); } L_off_heap_node_container_common: { @@ -1531,7 +1531,7 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info, } funp->next = off_heap->first; off_heap->first = (struct erl_off_heap_header*) funp; - erts_smp_refc_inc(&funp->fe->refc, 2); + erts_refc_inc(&funp->fe->refc, 2); goto cleanup_next; } case MAP_SUBTAG: @@ -1658,7 +1658,7 @@ Uint copy_shared_perform(Eterm obj, Uint size, erts_shcopy_t *info, case EXTERNAL_REF_SUBTAG: { ExternalThing *etp = (ExternalThing *) ptr; - erts_smp_refc_inc(&etp->node->refc, 2); + erts_refc_inc(&etp->node->refc, 2); } off_heap_node_container_common: { @@ -1855,7 +1855,7 @@ Eterm copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap) case FUN_SUBTAG: { ErlFunThing* funp = (ErlFunThing *) (tp-1); - erts_smp_refc_inc(&funp->fe->refc, 2); + erts_refc_inc(&funp->fe->refc, 2); } goto off_heap_common; case EXTERNAL_PID_SUBTAG: @@ -1863,7 +1863,7 @@ Eterm copy_shallow(Eterm* ptr, Uint sz, Eterm** hpp, ErlOffHeap* off_heap) case EXTERNAL_REF_SUBTAG: { ExternalThing* etp = (ExternalThing *) (tp-1); - erts_smp_refc_inc(&etp->node->refc, 2); + erts_refc_inc(&etp->node->refc, 2); } off_heap_common: { diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c index 2be05eed29..491c4d378e 100644 --- a/erts/emulator/beam/dist.c +++ b/erts/emulator/beam/dist.c @@ -130,8 +130,8 @@ static int dsig_send_ctl(ErtsDSigData* dsdp, Eterm ctl, int force_busy); static void send_nodes_mon_msgs(Process *, Eterm, Eterm, Eterm, Eterm); static void init_nodes_monitors(void); -static erts_smp_atomic_t no_caches; -static erts_smp_atomic_t no_nodes; +static erts_atomic_t no_caches; +static erts_atomic_t no_nodes; struct { Eterm reason; @@ -144,8 +144,8 @@ delete_cache(ErtsAtomCache *cache) { if (cache) { erts_free(ERTS_ALC_T_DCACHE, (void *) cache); - ASSERT(erts_smp_atomic_read_nob(&no_caches) > 0); - erts_smp_atomic_dec_nob(&no_caches); + ASSERT(erts_atomic_read_nob(&no_caches) > 0); + erts_atomic_dec_nob(&no_caches); } } @@ -156,14 +156,14 @@ create_cache(DistEntry *dep) int i; ErtsAtomCache *cp; - ERTS_SMP_LC_ASSERT( + ERTS_LC_ASSERT( is_internal_port(dep->cid) && erts_lc_is_port_locked(erts_port_lookup_raw(dep->cid))); ASSERT(!dep->cache); dep->cache = cp = (ErtsAtomCache*) erts_alloc(ERTS_ALC_T_DCACHE, sizeof(ErtsAtomCache)); - erts_smp_atomic_inc_nob(&no_caches); + erts_atomic_inc_nob(&no_caches); for (i = 0; i < sizeof(cp->in_arr)/sizeof(cp->in_arr[0]); i++) { cp->in_arr[i] = THE_NON_VALUE; cp->out_arr[i] = THE_NON_VALUE; @@ -172,13 +172,13 @@ create_cache(DistEntry *dep) Uint erts_dist_cache_size(void) { - return (Uint) erts_smp_atomic_read_mb(&no_caches)*sizeof(ErtsAtomCache); + return (Uint) erts_atomic_read_mb(&no_caches)*sizeof(ErtsAtomCache); } static ErtsProcList * get_suspended_on_de(DistEntry *dep, Uint32 unset_qflgs) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&dep->qlock)); + ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&dep->qlock)); dep->qflgs &= ~unset_qflgs; if (dep->qflgs & ERTS_DE_QFLG_EXIT) { /* No resume when exit has been scheduled */ @@ -284,14 +284,14 @@ static void doit_monitor_net_exits(ErtsMonitor *mon, void *vnecp) ? TUPLE2(lhp, rmon->name, dep->sysname) : rmon->u.pid); rp_locks |= ERTS_PROC_LOCKS_MSG_SEND; - erts_smp_proc_lock(rp, ERTS_PROC_LOCKS_MSG_SEND); + erts_proc_lock(rp, ERTS_PROC_LOCKS_MSG_SEND); erts_queue_monitor_message(rp, &rp_locks, mon->ref, am_process, watched, am_noconnection); erts_destroy_monitor(rmon); } UnUseTmpHeapNoproc(3); } - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); done: erts_destroy_monitor(mon); } @@ -340,7 +340,7 @@ static void doit_link_net_exits_sub(ErtsLink *sublnk, void *vlnecp) trace_proc(NULL, 0, rp, am_getting_unlinked, sublnk->pid); } } - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } done: erts_destroy_link(sublnk); @@ -382,7 +382,7 @@ static void doit_node_link_net_exits(ErtsLink *lnk, void *vnecp) rp = erts_proc_lookup(lnk->pid); if (!rp) goto done; - erts_smp_proc_lock(rp, rp_locks); + erts_proc_lock(rp, rp_locks); rlnk = erts_remove_link(&ERTS_P_LINKS(rp), name); if (rlnk != NULL) { ASSERT(is_atom(rlnk->pid) && (rlnk->type == LINK_NODE)); @@ -399,7 +399,7 @@ static void doit_node_link_net_exits(ErtsLink *lnk, void *vnecp) tup = TUPLE2(hp, am_nodedown, name); erts_queue_message(rp, rp_locks, msgp, tup, am_system); } - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } done: erts_destroy_link(lnk); @@ -411,16 +411,16 @@ set_node_not_alive(void *unused) ErlHeapFragment *bp; Eterm nodename = erts_this_dist_entry->sysname; - ASSERT(erts_smp_atomic_read_nob(&no_nodes) == 0); + ASSERT(erts_atomic_read_nob(&no_nodes) == 0); - erts_smp_thr_progress_block(); + erts_thr_progress_block(); erts_set_this_node(am_Noname, 0); erts_is_alive = 0; send_nodes_mon_msgs(NULL, am_nodedown, nodename, am_visible, nodedown.reason); nodedown.reason = NIL; bp = nodedown.bp; nodedown.bp = NULL; - erts_smp_thr_progress_unblock(); + erts_thr_progress_unblock(); if (bp) free_message_buffer(bp); } @@ -428,7 +428,7 @@ set_node_not_alive(void *unused) static ERTS_INLINE void dec_no_nodes(void) { - erts_aint_t no = erts_smp_atomic_dec_read_mb(&no_nodes); + erts_aint_t no = erts_atomic_dec_read_mb(&no_nodes); ASSERT(no >= 0); ASSERT(erts_get_scheduler_id()); /* Need to be a scheduler */ if (no == 0) @@ -441,10 +441,10 @@ static ERTS_INLINE void inc_no_nodes(void) { #ifdef DEBUG - erts_aint_t no = erts_smp_atomic_read_nob(&no_nodes); + erts_aint_t no = erts_atomic_read_nob(&no_nodes); ASSERT(erts_is_alive ? no > 0 : no == 0); #endif - erts_smp_atomic_inc_mb(&no_nodes); + erts_atomic_inc_mb(&no_nodes); } /* @@ -460,7 +460,7 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason) Eterm nd_reason = (reason == am_no_network ? am_no_network : am_net_kernel_terminated); - erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx); + erts_rwmtx_rlock(&erts_dist_table_rwmtx); for (tdep = erts_hidden_dist_entries; tdep; tdep = tdep->next) no_dist_port++; @@ -469,7 +469,7 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason) /* KILL all port controllers */ if (no_dist_port == 0) - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); + erts_rwmtx_runlock(&erts_dist_table_rwmtx); else { Eterm def_buf[128]; int i = 0; @@ -488,7 +488,7 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason) ASSERT(is_internal_port(tdep->cid)); dist_port[i++] = tdep->cid; } - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); + erts_rwmtx_runlock(&erts_dist_table_rwmtx); for (i = 0; i < no_dist_port; i++) { Port *prt = erts_port_lookup(dist_port[i], @@ -531,10 +531,10 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason) ErtsMonitor *monitors; Uint32 flags; - erts_smp_atomic_set_mb(&dep->dist_cmd_scheduled, 1); - erts_smp_de_rwlock(dep); + erts_atomic_set_mb(&dep->dist_cmd_scheduled, 1); + erts_de_rwlock(dep); - ERTS_SMP_LC_ASSERT(is_internal_port(dep->cid) + ERTS_LC_ASSERT(is_internal_port(dep->cid) && erts_lc_is_port_locked(erts_port_lookup_raw(dep->cid))); if (erts_port_task_is_scheduled(&dep->dist_cmd)) @@ -542,34 +542,34 @@ int erts_do_net_exits(DistEntry *dep, Eterm reason) if (dep->status & ERTS_DE_SFLG_EXITING) { #ifdef DEBUG - erts_smp_mtx_lock(&dep->qlock); + erts_mtx_lock(&dep->qlock); ASSERT(dep->qflgs & ERTS_DE_QFLG_EXIT); - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_unlock(&dep->qlock); #endif } else { dep->status |= ERTS_DE_SFLG_EXITING; - erts_smp_mtx_lock(&dep->qlock); + erts_mtx_lock(&dep->qlock); ASSERT(!(dep->qflgs & ERTS_DE_QFLG_EXIT)); dep->qflgs |= ERTS_DE_QFLG_EXIT; - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_unlock(&dep->qlock); } - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); monitors = dep->monitors; nlinks = dep->nlinks; node_links = dep->node_links; dep->monitors = NULL; dep->nlinks = NULL; dep->node_links = NULL; - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); nodename = dep->sysname; flags = dep->flags; erts_set_dist_entry_not_connected(dep); - erts_smp_de_rwunlock(dep); + erts_de_rwunlock(dep); erts_sweep_monitors(monitors, &doit_monitor_net_exits, (void *) &nec); erts_sweep_links(nlinks, &doit_link_net_exits, (void *) &nec); @@ -603,8 +603,8 @@ void init_dist(void) nodedown.reason = NIL; nodedown.bp = NULL; - erts_smp_atomic_init_nob(&no_nodes, 0); - erts_smp_atomic_init_nob(&no_caches, 0); + erts_atomic_init_nob(&no_nodes, 0); + erts_atomic_init_nob(&no_caches, 0); /* Lookup/Install all references to trap functions */ dsend2_trap = trap_function(am_dsend,2); @@ -657,19 +657,19 @@ static void clear_dist_entry(DistEntry *dep) ErtsProcList *suspendees; ErtsDistOutputBuf *obuf; - erts_smp_de_rwlock(dep); + erts_de_rwlock(dep); cache = dep->cache; dep->cache = NULL; #ifdef DEBUG - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); ASSERT(!dep->nlinks); ASSERT(!dep->node_links); ASSERT(!dep->monitors); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); #endif - erts_smp_mtx_lock(&dep->qlock); + erts_mtx_lock(&dep->qlock); if (!dep->out_queue.last) obuf = dep->finalized_out_queue.first; @@ -685,10 +685,10 @@ static void clear_dist_entry(DistEntry *dep) dep->status = 0; suspendees = get_suspended_on_de(dep, ERTS_DE_QFLGS_ALL); - erts_smp_mtx_unlock(&dep->qlock); - erts_smp_atomic_set_nob(&dep->dist_cmd_scheduled, 0); + erts_mtx_unlock(&dep->qlock); + erts_atomic_set_nob(&dep->dist_cmd_scheduled, 0); dep->send = NULL; - erts_smp_de_rwunlock(dep); + erts_de_rwunlock(dep); erts_resume_processes(suspendees); @@ -703,10 +703,10 @@ static void clear_dist_entry(DistEntry *dep) } if (obufsize) { - erts_smp_mtx_lock(&dep->qlock); + erts_mtx_lock(&dep->qlock); ASSERT(dep->qsize >= obufsize); dep->qsize -= obufsize; - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_unlock(&dep->qlock); } } @@ -811,9 +811,9 @@ erts_dsig_send_m_exit(ErtsDSigData *dsdp, Eterm watcher, Eterm watched, watched, watcher, ref, reason); #ifdef DEBUG - erts_smp_de_links_lock(dsdp->dep); + erts_de_links_lock(dsdp->dep); ASSERT(!erts_lookup_monitor(dsdp->dep->monitors, ref)); - erts_smp_de_links_unlock(dsdp->dep); + erts_de_links_unlock(dsdp->dep); #endif res = dsig_send_ctl(dsdp, ctl, 1); @@ -1151,9 +1151,9 @@ int erts_net_message(Port *prt, UseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (!erts_is_alive) { UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE); @@ -1261,23 +1261,23 @@ int erts_net_message(Port *prt, break; } - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); res = erts_add_link(&ERTS_P_LINKS(rp), LINK_PID, from); if (res < 0) { /* It was already there! Lets skip the rest... */ - erts_smp_de_links_unlock(dep); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_de_links_unlock(dep); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); break; } lnk = erts_add_or_lookup_link(&(dep->nlinks), LINK_PID, rp->common.id); erts_add_link(&(ERTS_LINK_ROOT(lnk)), LINK_PID, from); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); if (IS_TRACED_FL(rp, F_TRACE_PROCS)) trace_proc(NULL, 0, rp, am_getting_linked, from); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); break; case DOP_UNLINK: { @@ -1303,7 +1303,7 @@ int erts_net_message(Port *prt, trace_proc(NULL, 0, rp, am_getting_unlinked, from); } - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); erts_remove_dist_link(&dld, to, from, dep); erts_destroy_dist_link(&dld); @@ -1355,11 +1355,11 @@ int erts_net_message(Port *prt, else { if (is_atom(watched)) watched = rp->common.id; - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); erts_add_monitor(&(dep->monitors), MON_ORIGIN, ref, watched, name); erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, ref, watcher, name); - erts_smp_de_links_unlock(dep); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_de_links_unlock(dep); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); } break; @@ -1381,9 +1381,9 @@ int erts_net_message(Port *prt, goto invalid_message; } - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); mon = erts_remove_monitor(&(dep->monitors),ref); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); /* ASSERT(mon != NULL); can happen in case of broken dist message */ if (mon == NULL) { break; @@ -1397,7 +1397,7 @@ int erts_net_message(Port *prt, break; } mon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); ASSERT(mon != NULL); if (mon == NULL) { break; @@ -1458,7 +1458,7 @@ int erts_net_message(Port *prt, erts_queue_dist_message(rp, locks, ede_copy, token, from); if (locks) - erts_smp_proc_unlock(rp, locks); + erts_proc_unlock(rp, locks); } break; @@ -1507,7 +1507,7 @@ int erts_net_message(Port *prt, erts_queue_dist_message(rp, locks, ede_copy, token, tuple[2]); if (locks) - erts_smp_proc_unlock(rp, locks); + erts_proc_unlock(rp, locks); } break; @@ -1533,7 +1533,7 @@ int erts_net_message(Port *prt, goto invalid_message; } - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); sysname = dep->sysname; mon = erts_remove_monitor(&(dep->monitors), ref); /* @@ -1542,7 +1542,7 @@ int erts_net_message(Port *prt, * removed info about monitor. In this case, do nothing * and everything will be as it should. */ - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); if (mon == NULL) { break; } @@ -1556,7 +1556,7 @@ int erts_net_message(Port *prt, mon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref); if (mon == NULL) { - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); break; } UseTmpHeapNoproc(3); @@ -1567,7 +1567,7 @@ int erts_net_message(Port *prt, erts_queue_monitor_message(rp, &rp_locks, ref, am_process, watched, reason); - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); erts_destroy_monitor(mon); UnUseTmpHeapNoproc(3); break; @@ -1629,13 +1629,13 @@ int erts_net_message(Port *prt, if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) { /* We didn't exit the process and it is traced */ if (rp_locks & ERTS_PROC_LOCKS_XSIG_SEND) { - erts_smp_proc_unlock(rp, ERTS_PROC_LOCKS_XSIG_SEND); + erts_proc_unlock(rp, ERTS_PROC_LOCKS_XSIG_SEND); rp_locks &= ~ERTS_PROC_LOCKS_XSIG_SEND; } trace_proc(NULL, 0, rp, am_getting_unlinked, from); } } - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } erts_remove_dist_link(&dld, to, from, dep); if (lnk) @@ -1677,7 +1677,7 @@ int erts_net_message(Port *prt, token, NULL, 0); - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } break; } @@ -1695,7 +1695,7 @@ int erts_net_message(Port *prt, if (!rp) break; rp->group_leader = STORE_NC_IN_PROC(rp, from); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); break; default: @@ -1707,7 +1707,7 @@ int erts_net_message(Port *prt, erts_free(ERTS_ALC_T_DCTRL_BUF, (void *) ctl); } UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; return 0; invalid_message: { @@ -1724,7 +1724,7 @@ decode_error: data_error: UnUseTmpHeapNoproc(DIST_CTL_DEFAULT_SIZE); erts_deliver_port_exit(prt, dep->cid, am_killed, 0, 1); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; return -1; } @@ -1760,7 +1760,7 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx) if (!ctx->c_p || dsdp->no_suspend) ctx->force_busy = 1; - ERTS_SMP_LC_ASSERT(!ctx->c_p + ERTS_LC_ASSERT(!ctx->c_p || (ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(ctx->c_p))); @@ -1849,28 +1849,28 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx) * and if so enqueue the signal and schedule it for send. */ ctx->obuf->next = NULL; - erts_smp_de_rlock(dep); + erts_de_rlock(dep); cid = dep->cid; if (cid != dsdp->cid || dep->connection_id != dsdp->connection_id || dep->status & ERTS_DE_SFLG_EXITING) { /* Not the same connection as when we started; drop message... */ - erts_smp_de_runlock(dep); + erts_de_runlock(dep); free_dist_obuf(ctx->obuf); } else { ErtsProcList *plp = NULL; - erts_smp_mtx_lock(&dep->qlock); + erts_mtx_lock(&dep->qlock); dep->qsize += size_obuf(ctx->obuf); if (dep->qsize >= erts_dist_buf_busy_limit) dep->qflgs |= ERTS_DE_QFLG_BUSY; if (!ctx->force_busy && (dep->qflgs & ERTS_DE_QFLG_BUSY)) { - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_unlock(&dep->qlock); plp = erts_proclist_create(ctx->c_p); erts_suspend(ctx->c_p, ERTS_PROC_LOCK_MAIN, NULL); suspended = 1; - erts_smp_mtx_lock(&dep->qlock); + erts_mtx_lock(&dep->qlock); } /* Enqueue obuf on dist entry */ @@ -1905,9 +1905,9 @@ erts_dsig_send(ErtsDSigData *dsdp, struct erts_dsig_send_context* ctx) } } - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_unlock(&dep->qlock); erts_schedule_dist_command(NULL, dep); - erts_smp_de_runlock(dep); + erts_de_runlock(dep); if (resume) { erts_resume(ctx->c_p, ERTS_PROC_LOCK_MAIN); @@ -1963,8 +1963,8 @@ dist_port_command(Port *prt, ErtsDistOutputBuf *obuf) int fpe_was_unmasked; Uint size = obuf->ext_endp - obuf->extp; - ERTS_SMP_CHK_NO_PROC_LOCKS; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (size > (Uint) INT_MAX) erts_exit(ERTS_DUMP_EXIT, @@ -2003,8 +2003,8 @@ dist_port_commandv(Port *prt, ErtsDistOutputBuf *obuf) ErlDrvBinary* bv[2]; ErlIOVec eiov; - ERTS_SMP_CHK_NO_PROC_LOCKS; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (size > (Uint) INT_MAX) erts_exit(ERTS_DUMP_EXIT, @@ -2079,18 +2079,18 @@ erts_dist_command(Port *prt, int reds_limit) erts_aint32_t sched_flags; ErtsSchedulerData *esdp = erts_get_scheduler_data(); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); - erts_smp_refc_inc(&dep->refc, 1); /* Otherwise dist_entry might be + erts_refc_inc(&dep->refc, 1); /* Otherwise dist_entry might be removed if port command fails */ - erts_smp_atomic_set_mb(&dep->dist_cmd_scheduled, 0); + erts_atomic_set_mb(&dep->dist_cmd_scheduled, 0); - erts_smp_de_rlock(dep); + erts_de_rlock(dep); flags = dep->flags; status = dep->status; send = dep->send; - erts_smp_de_runlock(dep); + erts_de_runlock(dep); if (status & ERTS_DE_SFLG_EXITING) { erts_deliver_port_exit(prt, prt->common.id, am_killed, 0, 1); @@ -2108,19 +2108,19 @@ erts_dist_command(Port *prt, int reds_limit) * a mess. */ - erts_smp_mtx_lock(&dep->qlock); + erts_mtx_lock(&dep->qlock); oq.first = dep->out_queue.first; oq.last = dep->out_queue.last; dep->out_queue.first = NULL; dep->out_queue.last = NULL; - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_unlock(&dep->qlock); foq.first = dep->finalized_out_queue.first; foq.last = dep->finalized_out_queue.last; dep->finalized_out_queue.first = NULL; dep->finalized_out_queue.last = NULL; - sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + sched_flags = erts_atomic32_read_nob(&prt->sched.flags); if (reds > reds_limit) goto preempted; @@ -2142,7 +2142,7 @@ erts_dist_command(Port *prt, int reds_limit) obufsize += size_obuf(fob); foq.first = foq.first->next; free_dist_obuf(fob); - sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + sched_flags = erts_atomic32_read_nob(&prt->sched.flags); preempt = reds > reds_limit || (sched_flags & ERTS_PTS_FLG_EXIT); if (sched_flags & ERTS_PTS_FLG_BUSY_PORT) break; @@ -2227,7 +2227,7 @@ erts_dist_command(Port *prt, int reds_limit) obufsize += size_obuf(fob); oq.first = oq.first->next; free_dist_obuf(fob); - sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + sched_flags = erts_atomic32_read_nob(&prt->sched.flags); preempt = reds > reds_limit || (sched_flags & ERTS_PTS_FLG_EXIT); if ((sched_flags & ERTS_PTS_FLG_BUSY_PORT) && oq.first && !preempt) goto finalize_only; @@ -2254,7 +2254,7 @@ erts_dist_command(Port *prt, int reds_limit) * dist entry in a non-busy state and resume suspended * processes. */ - erts_smp_mtx_lock(&dep->qlock); + erts_mtx_lock(&dep->qlock); ASSERT(dep->qsize >= obufsize); dep->qsize -= obufsize; obufsize = 0; @@ -2264,13 +2264,13 @@ erts_dist_command(Port *prt, int reds_limit) ErtsProcList *suspendees; int resumed; suspendees = get_suspended_on_de(dep, ERTS_DE_QFLG_BUSY); - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_unlock(&dep->qlock); resumed = erts_resume_processes(suspendees); reds += resumed*ERTS_PORT_REDS_DIST_CMD_RESUMED; } else - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_unlock(&dep->qlock); } ASSERT(!oq.first && !oq.last); @@ -2279,10 +2279,10 @@ erts_dist_command(Port *prt, int reds_limit) if (obufsize != 0) { ASSERT(obufsize > 0); - erts_smp_mtx_lock(&dep->qlock); + erts_mtx_lock(&dep->qlock); ASSERT(dep->qsize >= obufsize); dep->qsize -= obufsize; - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_unlock(&dep->qlock); } ASSERT(foq.first || !foq.last); @@ -2336,9 +2336,9 @@ erts_dist_command(Port *prt, int reds_limit) foq.last = NULL; #ifdef DEBUG - erts_smp_mtx_lock(&dep->qlock); + erts_mtx_lock(&dep->qlock); ASSERT(dep->qsize == obufsize); - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_unlock(&dep->qlock); #endif } else { @@ -2347,14 +2347,14 @@ erts_dist_command(Port *prt, int reds_limit) * Unhandle buffers need to be put back first * in out_queue. */ - erts_smp_mtx_lock(&dep->qlock); + erts_mtx_lock(&dep->qlock); dep->qsize -= obufsize; obufsize = 0; oq.last->next = dep->out_queue.first; dep->out_queue.first = oq.first; if (!dep->out_queue.last) dep->out_queue.last = oq.last; - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_unlock(&dep->qlock); } erts_schedule_dist_command(prt, NULL); @@ -2384,21 +2384,21 @@ erts_dist_port_not_busy(Port *prt) void erts_kill_dist_connection(DistEntry *dep, Uint32 connection_id) { - erts_smp_de_rwlock(dep); + erts_de_rwlock(dep); if (is_internal_port(dep->cid) && connection_id == dep->connection_id && !(dep->status & ERTS_DE_SFLG_EXITING)) { dep->status |= ERTS_DE_SFLG_EXITING; - erts_smp_mtx_lock(&dep->qlock); + erts_mtx_lock(&dep->qlock); ASSERT(!(dep->qflgs & ERTS_DE_QFLG_EXIT)); dep->qflgs |= ERTS_DE_QFLG_EXIT; - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_unlock(&dep->qlock); erts_schedule_dist_command(NULL, dep); } - erts_smp_de_rwunlock(dep); + erts_de_rwunlock(dep); } struct print_to_data { @@ -2514,7 +2514,7 @@ info_dist_entry(fmtfn_t to, void *arg, DistEntry *dep, int visible, int connecte erts_print(to, arg, "Name: %T", dep->sysname); #ifdef DEBUG - erts_print(to, arg, " (refc=%d)", erts_smp_refc_read(&dep->refc, 0)); + erts_print(to, arg, " (refc=%d)", erts_refc_read(&dep->refc, 0)); #endif erts_print(to, arg, "\n"); if (!connected && is_nil(dep->cid)) { @@ -2645,22 +2645,22 @@ BIF_RETTYPE setnode_2(BIF_ALIST_2) net_kernel->flags |= F_DISTRIBUTION; if (net_kernel != BIF_P) - erts_smp_proc_unlock(net_kernel, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(net_kernel, ERTS_PROC_LOCK_MAIN); #ifdef DEBUG - erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx); + erts_rwmtx_rlock(&erts_dist_table_rwmtx); ASSERT(!erts_visible_dist_entries && !erts_hidden_dist_entries); - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); + erts_rwmtx_runlock(&erts_dist_table_rwmtx); #endif - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); inc_no_nodes(); erts_set_this_node(BIF_ARG_1, (Uint32) creation); erts_is_alive = 1; send_nodes_mon_msgs(NULL, am_nodeup, BIF_ARG_1, am_visible, NIL); - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(am_true); @@ -2750,7 +2750,7 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3) BIF_P, ERTS_PROC_LOCK_MAIN, ERTS_PORT_SFLGS_INVALID_LOOKUP); - erts_smp_de_rwlock(dep); + erts_de_rwlock(dep); if (!pp || (erts_atomic32_read_nob(&pp->state) & ERTS_PORT_SFLG_EXITING)) @@ -2767,9 +2767,9 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3) ErtsProcList *plp = erts_proclist_create(BIF_P); plp->next = NULL; erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); - erts_smp_mtx_lock(&dep->qlock); + erts_mtx_lock(&dep->qlock); erts_proclist_store_last(&dep->suspended, plp); - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_unlock(&dep->qlock); goto yield; } @@ -2806,9 +2806,9 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3) ASSERT(dep->send); #ifdef DEBUG - erts_smp_mtx_lock(&dep->qlock); + erts_mtx_lock(&dep->qlock); ASSERT(dep->qsize == 0); - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_unlock(&dep->qlock); #endif erts_set_dist_entry_connected(dep, BIF_ARG_2, flags); @@ -2816,7 +2816,7 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3) if (flags & DFLAG_DIST_HDR_ATOM_CACHE) create_cache(dep); - erts_smp_de_rwunlock(dep); + erts_de_rwunlock(dep); dep = NULL; /* inc of refc transferred to port (dist_entry field) */ inc_no_nodes(); @@ -2829,7 +2829,7 @@ BIF_RETTYPE setnode_3(BIF_ALIST_3) done: if (dep && dep != erts_this_dist_entry) { - erts_smp_de_rwunlock(dep); + erts_de_rwunlock(dep); erts_deref_dist_entry(dep); } @@ -2881,7 +2881,7 @@ BIF_RETTYPE dist_exit_3(BIF_ALIST_3) if (BIF_P->common.id == local) { lp_locks = ERTS_PROC_LOCKS_ALL; lp = BIF_P; - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(BIF_P, ERTS_PROC_LOCKS_ALL_MINOR); } else { lp_locks = ERTS_PROC_LOCKS_XSIG_SEND; @@ -2902,9 +2902,9 @@ BIF_RETTYPE dist_exit_3(BIF_ALIST_3) 0); if (lp == BIF_P) lp_locks &= ~ERTS_PROC_LOCK_MAIN; - erts_smp_proc_unlock(lp, lp_locks); + erts_proc_unlock(lp, lp_locks); if (lp == BIF_P) { - erts_aint32_t state = erts_smp_atomic32_read_acqb(&BIF_P->state); + erts_aint32_t state = erts_atomic32_read_acqb(&BIF_P->state); /* * We may have exited current process and may have to take action. */ @@ -2996,7 +2996,7 @@ BIF_RETTYPE nodes_1(BIF_ALIST_1) length = 0; - erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx); + erts_rwmtx_rlock(&erts_dist_table_rwmtx); ASSERT(erts_no_of_not_connected_dist_entries > 0); ASSERT(erts_no_of_hidden_dist_entries >= 0); @@ -3013,7 +3013,7 @@ BIF_RETTYPE nodes_1(BIF_ALIST_1) result = NIL; if (length == 0) { - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); + erts_rwmtx_runlock(&erts_dist_table_rwmtx); goto done; } @@ -3044,7 +3044,7 @@ BIF_RETTYPE nodes_1(BIF_ALIST_1) hp += 2; } ASSERT(endp == hp); - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); + erts_rwmtx_runlock(&erts_dist_table_rwmtx); done: UnUseTmpHeap(2,BIF_P); @@ -3099,15 +3099,15 @@ monitor_node(Process* p, Eterm Node, Eterm Bool, Eterm Options) if (dep == erts_this_dist_entry) goto done; - erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK); - erts_smp_de_rlock(dep); + erts_proc_lock(p, ERTS_PROC_LOCK_LINK); + erts_de_rlock(dep); if (ERTS_DE_IS_NOT_CONNECTED(dep)) { - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); - erts_smp_de_runlock(dep); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_de_runlock(dep); goto do_trap; } - erts_smp_de_links_lock(dep); - erts_smp_de_runlock(dep); + erts_de_links_lock(dep); + erts_de_runlock(dep); if (Bool == am_true) { ASSERT(dep->cid != NIL); @@ -3134,8 +3134,8 @@ monitor_node(Process* p, Eterm Node, Eterm Bool, Eterm Options) } } - erts_smp_de_links_unlock(dep); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_de_links_unlock(dep); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); done: erts_deref_dist_entry(dep); @@ -3167,9 +3167,9 @@ BIF_RETTYPE net_kernel_dflag_unicode_io_1(BIF_ALIST_1) if (de == erts_this_dist_entry) { BIF_RET(am_true); } - erts_smp_de_rlock(de); + erts_de_rlock(de); f = de->flags; - erts_smp_de_runlock(de); + erts_de_runlock(de); BIF_RET(((f & DFLAG_UNICODE_IO) ? am_true : am_false)); } @@ -3199,7 +3199,7 @@ struct ErtsNodesMonitor_ { Uint16 no; }; -static erts_smp_mtx_t nodes_monitors_mtx; +static erts_mtx_t nodes_monitors_mtx; static ErtsNodesMonitor *nodes_monitors; static ErtsNodesMonitor *nodes_monitors_end; @@ -3217,7 +3217,7 @@ static ErtsNodesMonitor *nodes_monitors_end; static void init_nodes_monitors(void) { - erts_smp_mtx_init(&nodes_monitors_mtx, "nodes_monitors", NIL, + erts_mtx_init(&nodes_monitors_mtx, "nodes_monitors", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION); nodes_monitors = NULL; nodes_monitors_end = NULL; @@ -3343,10 +3343,10 @@ send_nodes_mon_msgs(Process *c_p, Eterm what, Eterm node, Eterm type, Eterm reas } #endif - ERTS_SMP_LC_ASSERT(!c_p + ERTS_LC_ASSERT(!c_p || (erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN)); - erts_smp_mtx_lock(&nodes_monitors_mtx); + erts_mtx_lock(&nodes_monitors_mtx); for (nmp = nodes_monitors; nmp; nmp = nmp->next) { int i; @@ -3379,7 +3379,7 @@ send_nodes_mon_msgs(Process *c_p, Eterm what, Eterm node, Eterm type, Eterm reas if (rp) { if (rp == c_p) rp_locks &= ~ERTS_PROC_LOCK_MAIN; - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } rp = nmp->proc; @@ -3406,10 +3406,10 @@ send_nodes_mon_msgs(Process *c_p, Eterm what, Eterm node, Eterm type, Eterm reas if (rp) { if (rp == c_p) rp_locks &= ~ERTS_PROC_LOCK_MAIN; - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } - erts_smp_mtx_unlock(&nodes_monitors_mtx); + erts_mtx_unlock(&nodes_monitors_mtx); } static Eterm @@ -3419,8 +3419,8 @@ insert_nodes_monitor(Process *c_p, Uint32 opts) Eterm res = am_false; ErtsNodesMonitor *xnmp, *nmp; - ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&nodes_monitors_mtx)); - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); + ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&nodes_monitors_mtx)); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); xnmp = c_p->nodes_monitors; if (xnmp) { @@ -3504,8 +3504,8 @@ remove_nodes_monitors(Process *c_p, Uint32 opts, int all) Eterm res = am_false; ErtsNodesMonitor *nmp; - ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&nodes_monitors_mtx)); - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); + ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&nodes_monitors_mtx)); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); nmp = c_p->nodes_monitors; ASSERT(!nmp || !nmp->prev || nmp->prev->proc != c_p); @@ -3554,16 +3554,16 @@ erts_delete_nodes_monitors(Process *c_p, ErtsProcLocks locks) erts_proc_lc_might_unlock(c_p, might_unlock); } #endif - if (erts_smp_mtx_trylock(&nodes_monitors_mtx) == EBUSY) { + if (erts_mtx_trylock(&nodes_monitors_mtx) == EBUSY) { ErtsProcLocks unlock_locks = locks & ~ERTS_PROC_LOCK_MAIN; if (c_p && unlock_locks) - erts_smp_proc_unlock(c_p, unlock_locks); - erts_smp_mtx_lock(&nodes_monitors_mtx); + erts_proc_unlock(c_p, unlock_locks); + erts_mtx_lock(&nodes_monitors_mtx); if (c_p && unlock_locks) - erts_smp_proc_lock(c_p, unlock_locks); + erts_proc_lock(c_p, unlock_locks); } remove_nodes_monitors(c_p, 0, 1); - erts_smp_mtx_unlock(&nodes_monitors_mtx); + erts_mtx_unlock(&nodes_monitors_mtx); } Eterm @@ -3574,7 +3574,7 @@ erts_monitor_nodes(Process *c_p, Eterm on, Eterm olist) Uint16 opts = (Uint16) 0; ASSERT(c_p); - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN); if (on != am_true && on != am_false) return THE_NON_VALUE; @@ -3630,14 +3630,14 @@ erts_monitor_nodes(Process *c_p, Eterm on, Eterm olist) return THE_NON_VALUE; } - erts_smp_mtx_lock(&nodes_monitors_mtx); + erts_mtx_lock(&nodes_monitors_mtx); if (on == am_true) res = insert_nodes_monitor(c_p, opts); else res = remove_nodes_monitors(c_p, opts, 0); - erts_smp_mtx_unlock(&nodes_monitors_mtx); + erts_mtx_unlock(&nodes_monitors_mtx); return res; } @@ -3660,8 +3660,8 @@ erts_processes_monitoring_nodes(Process *c_p) #endif ASSERT(c_p); - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN); - erts_smp_mtx_lock(&nodes_monitors_mtx); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN); + erts_mtx_lock(&nodes_monitors_mtx); sz = 0; szp = &sz; @@ -3710,7 +3710,7 @@ erts_processes_monitoring_nodes(Process *c_p) ASSERT(hp == hend); - erts_smp_mtx_unlock(&nodes_monitors_mtx); + erts_mtx_unlock(&nodes_monitors_mtx); return res; } diff --git a/erts/emulator/beam/dist.h b/erts/emulator/beam/dist.h index 3e17645997..05016cafc5 100644 --- a/erts/emulator/beam/dist.h +++ b/erts/emulator/beam/dist.h @@ -100,7 +100,7 @@ typedef struct { } ErtsDSigData; #define ERTS_DE_IS_NOT_CONNECTED(DEP) \ - (ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&(DEP)->rwmtx) \ + (ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&(DEP)->rwmtx) \ || erts_lc_rwmtx_is_rwlocked(&(DEP)->rwmtx)), \ (is_nil((DEP)->cid) || ((DEP)->status & ERTS_DE_SFLG_EXITING))) @@ -153,19 +153,19 @@ erts_dsig_prepare(ErtsDSigData *dsdp, if (!dep) return ERTS_DSIG_PREP_NOT_CONNECTED; if (dspl == ERTS_DSP_RWLOCK) - erts_smp_de_rwlock(dep); + erts_de_rwlock(dep); else - erts_smp_de_rlock(dep); + erts_de_rlock(dep); if (ERTS_DE_IS_NOT_CONNECTED(dep)) { failure = ERTS_DSIG_PREP_NOT_CONNECTED; goto fail; } if (no_suspend) { failure = ERTS_DSIG_PREP_CONNECTED; - erts_smp_mtx_lock(&dep->qlock); + erts_mtx_lock(&dep->qlock); if (dep->qflgs & ERTS_DE_QFLG_BUSY) failure = ERTS_DSIG_PREP_WOULD_SUSPEND; - erts_smp_mtx_unlock(&dep->qlock); + erts_mtx_unlock(&dep->qlock); if (failure == ERTS_DSIG_PREP_WOULD_SUSPEND) goto fail; } @@ -175,14 +175,14 @@ erts_dsig_prepare(ErtsDSigData *dsdp, dsdp->connection_id = dep->connection_id; dsdp->no_suspend = no_suspend; if (dspl == ERTS_DSP_NO_LOCK) - erts_smp_de_runlock(dep); + erts_de_runlock(dep); return ERTS_DSIG_PREP_CONNECTED; fail: if (dspl == ERTS_DSP_RWLOCK) - erts_smp_de_rwunlock(dep); + erts_de_rwunlock(dep); else - erts_smp_de_runlock(dep); + erts_de_runlock(dep); return failure; } @@ -194,7 +194,7 @@ void erts_schedule_dist_command(Port *prt, DistEntry *dist_entry) Eterm id; if (prt) { - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); ASSERT((erts_atomic32_read_nob(&prt->state) & ERTS_PORT_SFLGS_DEAD) == 0); ASSERT(prt->dist_entry); @@ -204,7 +204,7 @@ void erts_schedule_dist_command(Port *prt, DistEntry *dist_entry) } else { ASSERT(dist_entry); - ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&dist_entry->rwmtx) + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&dist_entry->rwmtx) || erts_lc_rwmtx_is_rwlocked(&dist_entry->rwmtx)); ASSERT(is_internal_port(dist_entry->cid)); @@ -212,7 +212,7 @@ void erts_schedule_dist_command(Port *prt, DistEntry *dist_entry) id = dep->cid; } - if (!erts_smp_atomic_xchg_mb(&dep->dist_cmd_scheduled, 1)) + if (!erts_atomic_xchg_mb(&dep->dist_cmd_scheduled, 1)) erts_port_task_schedule(id, &dep->dist_cmd, ERTS_PORT_TASK_DIST_CMD); } @@ -238,7 +238,7 @@ erts_remove_dist_link(ErtsDistLinkData *dldp, Eterm rid, DistEntry *dep) { - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); dldp->d_lnk = erts_lookup_link(dep->nlinks, lid); if (!dldp->d_lnk) dldp->d_sub_lnk = NULL; @@ -248,7 +248,7 @@ erts_remove_dist_link(ErtsDistLinkData *dldp, ? NULL : erts_remove_link(&dep->nlinks, lid)); } - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); } ERTS_GLB_INLINE int diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c index 80ee3d7f33..aee54ad0a8 100644 --- a/erts/emulator/beam/erl_alloc.c +++ b/erts/emulator/beam/erl_alloc.c @@ -140,7 +140,7 @@ enum { }; typedef struct { - erts_smp_atomic32_t refc; + erts_atomic32_t refc; int only_sz; int internal; Uint req_sched; @@ -2103,7 +2103,7 @@ erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg) int only_one_value = 0; ErtsAlcUFixInfo_t fi[ERTS_ALC_NO_FIXED_SIZES] = {{0,0}}; - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); /* Figure out whats wanted... */ @@ -2276,10 +2276,10 @@ erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg) if (proc) { - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(proc)); /* We'll need locks early in the lock order */ - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); } /* Calculate values needed... */ @@ -2437,7 +2437,7 @@ erts_memory(fmtfn_t *print_to_p, void *print_to_arg, void *proc, Eterm earg) Uint *hp; Uint hsz; - erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN); if (only_one_value) { ASSERT(length == 1); @@ -2486,11 +2486,11 @@ erts_allocated_areas(fmtfn_t *print_to_p, void *print_to_arg, void *proc) Uint reserved_atom_space, atom_space; if (proc) { - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(proc)); /* We'll need locks early in the lock order */ - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); } i = 0; @@ -2642,7 +2642,7 @@ erts_allocated_areas(fmtfn_t *print_to_p, void *print_to_arg, void *proc) Uint hsz; Uint *hszp; - erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN); hpp = NULL; hsz = 0; @@ -2730,7 +2730,7 @@ erts_allocator_info(fmtfn_t to, void *arg) { ErtsAlcType_t a; - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); for (a = ERTS_ALC_A_MIN; a <= ERTS_ALC_A_MAX; a++) { int ai; @@ -3298,10 +3298,10 @@ reply_alloc_info(void *vair) if (air->req_sched == sched_id) rp_locks &= ~ERTS_PROC_LOCK_MAIN; - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); erts_proc_dec_refc(rp); - if (erts_smp_atomic32_dec_read_nob(&air->refc) == 0) { + if (erts_atomic32_dec_read_nob(&air->refc) == 0) { erts_iref_storage_clean(&air->iref); aireq_free(air); } @@ -3380,7 +3380,7 @@ erts_request_alloc_info(struct process *c_p, air->allocs[airix] = ERTS_ALC_A_INVALID; - erts_smp_atomic32_init_nob(&air->refc, + erts_atomic32_init_nob(&air->refc, (erts_aint32_t) erts_no_schedulers); erts_proc_add_refc(c_p, (Sint) erts_no_schedulers); diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h index ed5ff4a2ff..c661d0b226 100644 --- a/erts/emulator/beam/erl_alloc.h +++ b/erts/emulator/beam/erl_alloc.h @@ -334,24 +334,10 @@ erts_alloc_get_verify_unused_temp_alloc(Allctr_t **allctr); (((((SZ) - 1) / ERTS_CACHE_LINE_SIZE) + 1) * ERTS_CACHE_LINE_SIZE) #define ERTS_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \ -ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, \ - (void) 0, (void) 0, (void) 0) - -#define ERTS_SMP_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \ -static erts_smp_spinlock_t NAME##_lck; \ -ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, \ - erts_smp_spinlock_init(&NAME##_lck, #NAME "_alloc_lock", NIL, \ - ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR),\ - erts_smp_spin_lock(&NAME##_lck), \ - erts_smp_spin_unlock(&NAME##_lck)) - + ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, (void) 0, (void) 0, (void) 0) #define ERTS_TS_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \ -ERTS_SMP_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) - - -#define ERTS_PALLOC_IMPL(NAME, TYPE, PASZ) \ -ERTS_PRE_ALLOC_IMPL(NAME, TYPE, PASZ, (void) 0, (void) 0, (void) 0) +ERTS_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) #define ERTS_TS_PALLOC_IMPL(NAME, TYPE, PASZ) \ static erts_spinlock_t NAME##_lck; \ @@ -362,7 +348,7 @@ ERTS_PRE_ALLOC_IMPL(NAME, TYPE, PASZ, \ erts_spin_unlock(&NAME##_lck)) -#define ERTS_SMP_PALLOC_IMPL(NAME, TYPE, PASZ) \ +#define ERTS_PALLOC_IMPL(NAME, TYPE, PASZ) \ ERTS_TS_PALLOC_IMPL(NAME, TYPE, PASZ) diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c index f4c88438f5..4d4bddb93f 100644 --- a/erts/emulator/beam/erl_alloc_util.c +++ b/erts/emulator/beam/erl_alloc_util.c @@ -382,7 +382,7 @@ do { \ #define SET_CARRIER_HDR(C, Sz, F, AP) \ (ASSERT(((Sz) & FLG_MASK) == 0), (C)->chdr = ((Sz) | (F)), \ - erts_smp_atomic_init_nob(&(C)->allctr, (erts_aint_t) (AP))) + erts_atomic_init_nob(&(C)->allctr, (erts_aint_t) (AP))) #define BLK_TO_SBC(B) \ ((Carrier_t *) (((char *) (B)) - SBC_HEADER_SIZE)) @@ -670,7 +670,7 @@ do { \ (A)->debug.saved_tid = 1; \ } \ else { \ - ERTS_SMP_LC_ASSERT( \ + ERTS_LC_ASSERT( \ ethr_equal_tids((A)->debug.tid, erts_thr_self())); \ } \ } \ @@ -826,7 +826,7 @@ erts_alcu_literal_32_mseg_alloc(Allctr_t *allctr, Uint *size_p, Uint flags) Uint sz = ERTS_SUPERALIGNED_CEILING(*size_p); ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL && allctr->t == 0); - ERTS_SMP_LC_ASSERT(allctr->thread_safe); + ERTS_LC_ASSERT(allctr->thread_safe); res = erts_alcu_mseg_alloc(allctr, &sz, flags); if (res) { @@ -844,7 +844,7 @@ erts_alcu_literal_32_mseg_realloc(Allctr_t *allctr, void *seg, Uint new_sz = ERTS_SUPERALIGNED_CEILING(*new_size_p); ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL && allctr->t == 0); - ERTS_SMP_LC_ASSERT(allctr->thread_safe); + ERTS_LC_ASSERT(allctr->thread_safe); if (seg && old_size) clear_literal_range(seg, old_size); @@ -862,7 +862,7 @@ erts_alcu_literal_32_mseg_dealloc(Allctr_t *allctr, void *seg, Uint size, { ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL && allctr->t == 0); - ERTS_SMP_LC_ASSERT(allctr->thread_safe); + ERTS_LC_ASSERT(allctr->thread_safe); erts_alcu_mseg_dealloc(allctr, seg, size, flags); @@ -1022,7 +1022,7 @@ erts_alcu_literal_32_sys_alloc(Allctr_t *allctr, Uint* size_p, int superalign) Uint size = ERTS_SUPERALIGNED_CEILING(*size_p); ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL && allctr->t == 0); - ERTS_SMP_LC_ASSERT(allctr->thread_safe); + ERTS_LC_ASSERT(allctr->thread_safe); res = erts_alcu_sys_alloc(allctr, &size, 1); if (res) { @@ -1040,7 +1040,7 @@ erts_alcu_literal_32_sys_realloc(Allctr_t *allctr, void *ptr, Uint* size_p, Uint ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL && allctr->t == 0); - ERTS_SMP_LC_ASSERT(allctr->thread_safe); + ERTS_LC_ASSERT(allctr->thread_safe); if (ptr && old_size) clear_literal_range(ptr, old_size); @@ -1057,7 +1057,7 @@ erts_alcu_literal_32_sys_dealloc(Allctr_t *allctr, void *ptr, Uint size, int sup { ERTS_LC_ASSERT(allctr->alloc_no == ERTS_ALC_A_LITERAL && allctr->t == 0); - ERTS_SMP_LC_ASSERT(allctr->thread_safe); + ERTS_LC_ASSERT(allctr->thread_safe); erts_alcu_sys_dealloc(allctr, ptr, size, 1); @@ -1255,11 +1255,11 @@ clear_busy_pool_carrier(Allctr_t *allctr, Carrier_t *crr) erts_aint_t old_val = new_val|ERTS_CRR_ALCTR_FLG_BUSY; ERTS_ALC_CPOOL_ASSERT(old_val - == erts_smp_atomic_xchg_relb(&crr->allctr, + == erts_atomic_xchg_relb(&crr->allctr, new_val)); } #else - erts_smp_atomic_set_relb(&crr->allctr, new_val); + erts_atomic_set_relb(&crr->allctr, new_val); #endif } } @@ -1697,7 +1697,7 @@ get_used_allctr(Allctr_t *pref_allctr, int pref_lock, void *p, UWord *sizep, crr = BLK_TO_SBC(blk); if (sizep) *sizep = SBC_BLK_SZ(blk) - ABLK_HDR_SZ; - iallctr = erts_smp_atomic_read_dirty(&crr->allctr); + iallctr = erts_atomic_read_dirty(&crr->allctr); } else { crr = ABLK_TO_MBC(blk); @@ -1705,10 +1705,10 @@ get_used_allctr(Allctr_t *pref_allctr, int pref_lock, void *p, UWord *sizep, if (sizep) *sizep = MBC_ABLK_SZ(blk) - ABLK_HDR_SZ; if (!ERTS_ALC_IS_CPOOL_ENABLED(pref_allctr)) - iallctr = erts_smp_atomic_read_dirty(&crr->allctr); + iallctr = erts_atomic_read_dirty(&crr->allctr); else { int locked_pref_allctr = 0; - iallctr = erts_smp_atomic_read_ddrb(&crr->allctr); + iallctr = erts_atomic_read_ddrb(&crr->allctr); if (ERTS_ALC_TS_PREF_LOCK_IF_USED == pref_lock && pref_allctr->thread_safe) { @@ -1724,7 +1724,7 @@ get_used_allctr(Allctr_t *pref_allctr, int pref_lock, void *p, UWord *sizep, erts_aint_t act; ERTS_ALC_CPOOL_ASSERT(!(iallctr & ERTS_CRR_ALCTR_FLG_BUSY)); - act = erts_smp_atomic_cmpxchg_ddrb(&crr->allctr, + act = erts_atomic_cmpxchg_ddrb(&crr->allctr, iallctr|ERTS_CRR_ALCTR_FLG_BUSY, iallctr); if (act == iallctr) { @@ -2099,10 +2099,10 @@ handle_delayed_dealloc(Allctr_t *allctr, ERTS_ALC_CPOOL_ASSERT(ERTS_ALC_IS_CPOOL_ENABLED(allctr)); ERTS_ALC_CPOOL_ASSERT(allctr == crr->cpool.orig_allctr); ERTS_ALC_CPOOL_ASSERT(((erts_aint_t) allctr) - != (erts_smp_atomic_read_nob(&crr->allctr) + != (erts_atomic_read_nob(&crr->allctr) & ~ERTS_CRR_ALCTR_FLG_MASK)); - erts_smp_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr)); + erts_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr)); schedule_dealloc_carrier(allctr, crr); } @@ -2220,7 +2220,7 @@ dealloc_block(Allctr_t *allctr, void *ptr, ErtsAlcFixList_t *fix, int dec_cc_on_ { Block_t *blk = UMEM2BLK(ptr); - ERTS_SMP_LC_ASSERT(!allctr->thread_safe + ERTS_LC_ASSERT(!allctr->thread_safe || erts_lc_mtx_is_locked(&allctr->mutex)); if (IS_SBC_BLK(blk)) { @@ -3048,7 +3048,7 @@ cpool_insert(Allctr_t *allctr, Carrier_t *crr) ERTS_ALC_CPOOL_ASSERT(allctr->alloc_no == ERTS_ALC_A_INVALID /* testcase */ || erts_thr_progress_is_managed_thread()); - ERTS_ALC_CPOOL_ASSERT(erts_smp_atomic_read_nob(&crr->allctr) + ERTS_ALC_CPOOL_ASSERT(erts_atomic_read_nob(&crr->allctr) == (erts_aint_t) allctr); erts_atomic_add_nob(&allctr->cpool.stat.blocks_size, @@ -3118,7 +3118,7 @@ cpool_insert(Allctr_t *allctr, Carrier_t *crr) (erts_aint_t) &crr->cpool, (erts_aint_t) cpd1p); - erts_smp_atomic_set_wb(&crr->allctr, + erts_atomic_set_wb(&crr->allctr, ((erts_aint_t) allctr)|ERTS_CRR_ALCTR_FLG_IN_POOL); LTTNG3(carrier_pool_put, ERTS_ALC_A2AD(allctr->alloc_no), allctr->ix, CARRIER_SZ(crr)); } @@ -3250,11 +3250,11 @@ cpool_fetch(Allctr_t *allctr, UWord size) ASSERT(!is_in_list(&allctr->cpool.traitor_list, dl)); ASSERT(crr->cpool.orig_allctr == allctr); dl = dl->next; - exp = erts_smp_atomic_read_rb(&crr->allctr); + exp = erts_atomic_read_rb(&crr->allctr); if ((exp & ERTS_CRR_ALCTR_FLG_MASK) == ERTS_CRR_ALCTR_FLG_IN_POOL && erts_atomic_read_nob(&crr->cpool.max_size) >= size) { /* Try to fetch it... */ - act = erts_smp_atomic_cmpxchg_mb(&crr->allctr, + act = erts_atomic_cmpxchg_mb(&crr->allctr, (erts_aint_t) allctr, exp); if (act == exp) { @@ -3296,12 +3296,12 @@ cpool_fetch(Allctr_t *allctr, UWord size) ASSERT(dl != &allctr->cpool.pooled_list); ASSERT(crr->cpool.orig_allctr == allctr); dl = dl->next; - exp = erts_smp_atomic_read_rb(&crr->allctr); + exp = erts_atomic_read_rb(&crr->allctr); if (exp & ERTS_CRR_ALCTR_FLG_IN_POOL) { if (!(exp & ERTS_CRR_ALCTR_FLG_BUSY) && erts_atomic_read_nob(&crr->cpool.max_size) >= size) { /* Try to fetch it... */ - act = erts_smp_atomic_cmpxchg_mb(&crr->allctr, + act = erts_atomic_cmpxchg_mb(&crr->allctr, (erts_aint_t) allctr, exp); if (act == exp) { @@ -3377,12 +3377,12 @@ cpool_fetch(Allctr_t *allctr, UWord size) has_passed_sentinel = 1; } crr = (Carrier_t *)(((char *)cpdp) - offsetof(Carrier_t, cpool)); - exp = erts_smp_atomic_read_rb(&crr->allctr); + exp = erts_atomic_read_rb(&crr->allctr); if (((exp & (ERTS_CRR_ALCTR_FLG_MASK)) == ERTS_CRR_ALCTR_FLG_IN_POOL) && (erts_atomic_read_nob(&cpdp->max_size) >= size)) { erts_aint_t act; /* Try to fetch it... */ - act = erts_smp_atomic_cmpxchg_mb(&crr->allctr, + act = erts_atomic_cmpxchg_mb(&crr->allctr, (erts_aint_t) allctr, exp); if (act == exp) { @@ -3405,11 +3405,11 @@ check_dc_list: Block_t* blk; unlink_carrier(&allctr->cpool.dc_list, crr); #ifdef ERTS_ALC_CPOOL_DEBUG - ERTS_ALC_CPOOL_ASSERT(erts_smp_atomic_xchg_nob(&crr->allctr, + ERTS_ALC_CPOOL_ASSERT(erts_atomic_xchg_nob(&crr->allctr, ((erts_aint_t) allctr)) == (((erts_aint_t) allctr) & ~ERTS_CRR_ALCTR_FLG_MASK)); #else - erts_smp_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr)); + erts_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr)); #endif blk = MBC_TO_FIRST_BLK(allctr, crr); ASSERT(FBLK_TO_MBC(blk) == crr); @@ -3512,7 +3512,7 @@ schedule_dealloc_carrier(Allctr_t *allctr, Carrier_t *crr) ERTS_ALC_CPOOL_ASSERT(crr == FBLK_TO_MBC(blk)); ERTS_ALC_CPOOL_ASSERT(crr == FIRST_BLK_TO_MBC(allctr, blk)); ERTS_ALC_CPOOL_ASSERT(((erts_aint_t) allctr) - == (erts_smp_atomic_read_nob(&crr->allctr) + == (erts_atomic_read_nob(&crr->allctr) & ~ERTS_CRR_ALCTR_FLG_MASK)); if (ddq_enqueue(&orig_allctr->dd.q, BLK2UMEM(blk), cinit)) @@ -4130,11 +4130,11 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp) if (busy_pcrr_pp && *busy_pcrr_pp) { ERTS_ALC_CPOOL_ASSERT(*busy_pcrr_pp == crr); *busy_pcrr_pp = NULL; - ERTS_ALC_CPOOL_ASSERT(erts_smp_atomic_read_nob(&crr->allctr) + ERTS_ALC_CPOOL_ASSERT(erts_atomic_read_nob(&crr->allctr) == (((erts_aint_t) allctr) | ERTS_CRR_ALCTR_FLG_IN_POOL | ERTS_CRR_ALCTR_FLG_BUSY)); - erts_smp_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr)); + erts_atomic_set_nob(&crr->allctr, ((erts_aint_t) allctr)); cpool_delete(allctr, allctr, crr); } else @@ -5280,7 +5280,7 @@ do_erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size) ASSERT(allctr); - ERTS_SMP_LC_ASSERT(!allctr->thread_safe + ERTS_LC_ASSERT(!allctr->thread_safe || erts_lc_mtx_is_locked(&allctr->mutex)); ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr); @@ -5408,7 +5408,7 @@ do_erts_alcu_free(ErtsAlcType_t type, void *extra, void *p, ASSERT(allctr); - ERTS_SMP_LC_ASSERT(!allctr->thread_safe + ERTS_LC_ASSERT(!allctr->thread_safe || erts_lc_mtx_is_locked(&allctr->mutex)); ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr); @@ -5517,7 +5517,7 @@ do_erts_alcu_realloc(ErtsAlcType_t type, ASSERT(allctr); - ERTS_SMP_LC_ASSERT(!allctr->thread_safe + ERTS_LC_ASSERT(!allctr->thread_safe || erts_lc_mtx_is_locked(&allctr->mutex)); ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr); diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h index 0cce4c9125..30d7baf769 100644 --- a/erts/emulator/beam/erl_alloc_util.h +++ b/erts/emulator/beam/erl_alloc_util.h @@ -324,12 +324,12 @@ struct Carrier_t_ { UWord chdr; Carrier_t *next; Carrier_t *prev; - erts_smp_atomic_t allctr; + erts_atomic_t allctr; ErtsAlcCPoolData_t cpool; /* Overwritten by block if sbc */ }; #define ERTS_ALC_CARRIER_TO_ALLCTR(C) \ - ((Allctr_t *) (erts_smp_atomic_read_nob(&(C)->allctr) & ~FLG_MASK)) + ((Allctr_t *) (erts_atomic_read_nob(&(C)->allctr) & ~FLG_MASK)) typedef struct { Carrier_t *first; diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c index eafa7be738..3ceb2fd368 100644 --- a/erts/emulator/beam/erl_async.c +++ b/erts/emulator/beam/erl_async.c @@ -601,7 +601,7 @@ long driver_async(ErlDrvPort ix, unsigned int* key, if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); a = (ErtsAsync*) erts_alloc(ERTS_ALC_T_ASYNC, sizeof(ErtsAsync)); diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c index fa41505a83..f673ef3194 100644 --- a/erts/emulator/beam/erl_bif_ddll.c +++ b/erts/emulator/beam/erl_bif_ddll.c @@ -100,18 +100,18 @@ static void dereference_all_processes(DE_Handle *dh); static void restore_process_references(DE_Handle *dh); static void ddll_no_more_references(void *vdh); -#define lock_drv_list() erts_smp_rwmtx_rwlock(&erts_driver_list_lock) -#define unlock_drv_list() erts_smp_rwmtx_rwunlock(&erts_driver_list_lock) +#define lock_drv_list() erts_rwmtx_rwlock(&erts_driver_list_lock) +#define unlock_drv_list() erts_rwmtx_rwunlock(&erts_driver_list_lock) #define assert_drv_list_locked() \ - ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&erts_driver_list_lock) \ - || erts_smp_lc_rwmtx_is_rlocked(&erts_driver_list_lock)) + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_driver_list_lock) \ + || erts_lc_rwmtx_is_rlocked(&erts_driver_list_lock)) #define assert_drv_list_rwlocked() \ - ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&erts_driver_list_lock)) + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&erts_driver_list_lock)) #define assert_drv_list_rlocked() \ - ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rlocked(&erts_driver_list_lock)) + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&erts_driver_list_lock)) #define assert_drv_list_not_locked() \ - ERTS_SMP_LC_ASSERT(!erts_smp_lc_rwmtx_is_rwlocked(&erts_driver_list_lock) \ - && !erts_smp_lc_rwmtx_is_rlocked(&erts_driver_list_lock)) + ERTS_LC_ASSERT(!erts_lc_rwmtx_is_rwlocked(&erts_driver_list_lock) \ + && !erts_lc_rwmtx_is_rlocked(&erts_driver_list_lock)) #define FREE_PORT_FLAGS (ERTS_PORT_SFLGS_DEAD & (~ERTS_PORT_SFLG_INITIALIZING)) @@ -127,13 +127,13 @@ kill_ports_driver_unloaded(DE_Handle *dh) if (!prt) continue; - ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER; + ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER; state = erts_atomic32_read_nob(&prt->state); if (state & FREE_PORT_FLAGS) continue; - erts_smp_port_lock(prt); + erts_port_lock(prt); state = erts_atomic32_read_nob(&prt->state); if (!(state & ERTS_PORT_SFLGS_DEAD) && prt->drv_ptr->handle == dh) @@ -273,7 +273,7 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3) path[path_len++] = '/'; sys_strcpy(path+path_len,name); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); lock_drv_list(); if ((drv = lookup_driver(name)) != NULL) { if (drv->handle == NULL) { @@ -404,7 +404,7 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3) erts_ddll_reference_driver(dh); unlock_drv_list(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); lock_drv_list(); erts_ddll_dereference_driver(dh); @@ -420,11 +420,11 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3) unlock_drv_list(); erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) path); erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P)); BIF_RET(t); soft_error: unlock_drv_list(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); if (do_build_load_error) { soft_error_term = build_load_error(BIF_P, build_this_load_error); } @@ -433,11 +433,11 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3) t = TUPLE2(hp, am_error, soft_error_term); erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) path); erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P)); BIF_RET(t); error: assert_drv_list_not_locked(); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P)); if (path != NULL) { erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) path); } @@ -499,7 +499,7 @@ Eterm erl_ddll_try_unload_2(BIF_ALIST_2) Eterm l; int kill_ports = 0; - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); for(l = options; is_list(l); l = CDR(list_val(l))) { Eterm opt = CAR(list_val(l)); @@ -576,7 +576,7 @@ Eterm erl_ddll_try_unload_2(BIF_ALIST_2) dh->reload_full_path = dh->reload_driver_name = NULL; dh->reload_flags = 0; } - if (erts_smp_atomic32_read_nob(&dh->port_count) > 0) { + if (erts_atomic32_read_nob(&dh->port_count) > 0) { ++kill_ports; } dh->status = ERL_DE_UNLOAD; @@ -595,7 +595,7 @@ done: erts_ddll_reference_driver(dh); unlock_drv_list(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); lock_drv_list(); erts_ddll_dereference_driver(dh); erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name); @@ -617,7 +617,7 @@ done: soft_error: unlock_drv_list(); erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); hp = HAlloc(BIF_P, 3); t = TUPLE2(hp, am_error, soft_error_term); BIF_RET(t); @@ -627,7 +627,7 @@ soft_error: if (name != NULL) { erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name); } - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_ERROR(BIF_P, BADARG); } @@ -742,7 +742,7 @@ BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2) } else if (drv->handle->status == ERL_DE_PERMANENT) { res = am_permanent; } else { - res = make_small(erts_smp_atomic32_read_nob(&drv->handle->port_count)); + res = make_small(erts_atomic32_read_nob(&drv->handle->port_count)); } goto done; case am_linked_in_driver: @@ -921,7 +921,7 @@ Eterm erts_ddll_monitor_driver(Process *p, void erts_ddll_remove_monitor(Process *p, Eterm ref, ErtsProcLocks plocks) { erts_driver_t *drv; - erts_smp_proc_unlock(p, plocks); + erts_proc_unlock(p, plocks); lock_drv_list(); drv = driver_list; while (drv != NULL) { @@ -946,7 +946,7 @@ void erts_ddll_remove_monitor(Process *p, Eterm ref, ErtsProcLocks plocks) } done: unlock_drv_list(); - erts_smp_proc_lock(p, plocks); + erts_proc_lock(p, plocks); } /* @@ -955,7 +955,7 @@ void erts_ddll_remove_monitor(Process *p, Eterm ref, ErtsProcLocks plocks) void erts_ddll_proc_dead(Process *p, ErtsProcLocks plocks) { erts_driver_t *drv; - erts_smp_proc_unlock(p, plocks); + erts_proc_unlock(p, plocks); lock_drv_list(); drv = driver_list; while (drv != NULL) { @@ -993,7 +993,7 @@ void erts_ddll_proc_dead(Process *p, ErtsProcLocks plocks) dh->status = ERL_DE_UNLOAD; } if (!left - && erts_smp_atomic32_read_nob(&drv->handle->port_count) > 0) { + && erts_atomic32_read_nob(&drv->handle->port_count) > 0) { if (kill_ports) { DE_Handle *dh = drv->handle; erts_ddll_reference_driver(dh); @@ -1014,7 +1014,7 @@ void erts_ddll_proc_dead(Process *p, ErtsProcLocks plocks) } } unlock_drv_list(); - erts_smp_proc_lock(p, plocks); + erts_proc_lock(p, plocks); } void erts_ddll_lock_driver(DE_Handle *dh, char *name) { @@ -1042,41 +1042,41 @@ void erts_ddll_lock_driver(DE_Handle *dh, char *name) void erts_ddll_increment_port_count(DE_Handle *dh) { assert_drv_list_locked(); - erts_smp_atomic32_inc_nob(&dh->port_count); + erts_atomic32_inc_nob(&dh->port_count); } void erts_ddll_decrement_port_count(DE_Handle *dh) { assert_drv_list_locked(); #ifdef DEBUG - ASSERT(erts_smp_atomic32_dec_read_nob(&dh->port_count) >= 0); + ASSERT(erts_atomic32_dec_read_nob(&dh->port_count) >= 0); #else - erts_smp_atomic32_dec_nob(&dh->port_count); + erts_atomic32_dec_nob(&dh->port_count); #endif } static void first_ddll_reference(DE_Handle *dh) { assert_drv_list_rwlocked(); - erts_smp_refc_init(&(dh->refc),1); + erts_refc_init(&(dh->refc),1); } void erts_ddll_reference_driver(DE_Handle *dh) { assert_drv_list_locked(); - if (erts_smp_refc_inctest(&(dh->refc),1) == 1) { - erts_smp_refc_inc(&(dh->refc),2); /* add a reference for the scheduled operation */ + if (erts_refc_inctest(&(dh->refc),1) == 1) { + erts_refc_inc(&(dh->refc),2); /* add a reference for the scheduled operation */ } } void erts_ddll_reference_referenced_driver(DE_Handle *dh) { - erts_smp_refc_inc(&(dh->refc),2); + erts_refc_inc(&(dh->refc),2); } void erts_ddll_dereference_driver(DE_Handle *dh) { - if (erts_smp_refc_dectest(&(dh->refc),0) == 0) { + if (erts_refc_dectest(&(dh->refc),0) == 0) { /* No lock here, but if the driver is referenced again, the scheduled deletion is added as a reference too, see above */ erts_schedule_misc_op(ddll_no_more_references, (void *) dh); @@ -1099,11 +1099,11 @@ static void restore_process_references(DE_Handle *dh) { DE_ProcEntry *p; assert_drv_list_rwlocked(); - ASSERT(erts_smp_refc_read(&(dh->refc),0) == 0); + ASSERT(erts_refc_read(&(dh->refc),0) == 0); for(p = dh->procs;p != NULL; p = p->next) { if (p->awaiting_status == ERL_DE_PROC_LOADED) { ASSERT(p->flags & ERL_DE_FL_DEREFERENCED); - erts_smp_refc_inc(&(dh->refc),1); + erts_refc_inc(&(dh->refc),1); p->flags &= ~ERL_DE_FL_DEREFERENCED; } } @@ -1125,9 +1125,9 @@ static void ddll_no_more_references(void *vdh) lock_drv_list(); - x = erts_smp_refc_read(&(dh->refc),0); + x = erts_refc_read(&(dh->refc),0); if (x > 0) { - x = erts_smp_refc_dectest(&(dh->refc),0); /* delete the reference added for me */ + x = erts_refc_dectest(&(dh->refc),0); /* delete the reference added for me */ } @@ -1230,7 +1230,7 @@ static Eterm notify_when_loaded(Process *p, Eterm name_term, char *name, ErtsPro Eterm immediate_type = NIL; erts_driver_t *drv; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & plocks); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & plocks); lock_drv_list(); if ((drv = lookup_driver(name)) == NULL) { immediate_tag = am_unloaded; @@ -1265,10 +1265,10 @@ static Eterm notify_when_loaded(Process *p, Eterm name_term, char *name, ErtsPro BIF_RET(r); immediate: r = erts_make_ref(p); - erts_smp_proc_unlock(p, plocks); + erts_proc_unlock(p, plocks); notify_proc(p, r, name_term, immediate_type, immediate_tag, 0); unlock_drv_list(); - erts_smp_proc_lock(p, plocks); + erts_proc_lock(p, plocks); BIF_RET(r); } @@ -1279,7 +1279,7 @@ static Eterm notify_when_unloaded(Process *p, Eterm name_term, char *name, ErtsP Eterm immediate_type = NIL; erts_driver_t *drv; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & plocks); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & plocks); lock_drv_list(); if ((drv = lookup_driver(name)) == NULL) { immediate_tag = am_unloaded; @@ -1298,10 +1298,10 @@ static Eterm notify_when_unloaded(Process *p, Eterm name_term, char *name, ErtsP BIF_RET(r); immediate: r = erts_make_ref(p); - erts_smp_proc_unlock(p, plocks); + erts_proc_unlock(p, plocks); notify_proc(p, r, name_term, immediate_type, immediate_tag, 0); unlock_drv_list(); - erts_smp_proc_lock(p, plocks); + erts_proc_lock(p, plocks); BIF_RET(r); } @@ -1505,8 +1505,8 @@ static int do_load_driver_entry(DE_Handle *dh, char *path, char *name) res = ERL_DE_LOAD_ERROR_BAD_NAME; goto error; } - erts_smp_atomic_init_nob(&(dh->refc), (erts_aint_t) 0); - erts_smp_atomic32_init_nob(&dh->port_count, 0); + erts_atomic_init_nob(&(dh->refc), (erts_aint_t) 0); + erts_atomic32_init_nob(&dh->port_count, 0); dh->full_path = erts_alloc(ERTS_ALC_T_DDLL_HANDLE, sys_strlen(path) + 1); sys_strcpy(dh->full_path, path); dh->flags = 0; @@ -1577,8 +1577,8 @@ static int load_driver_entry(DE_Handle **dhp, char *path, char *name) dh->handle = NULL; dh->procs = NULL; - erts_smp_atomic32_init_nob(&dh->port_count, 0); - erts_smp_refc_init(&(dh->refc), (erts_aint_t) 0); + erts_atomic32_init_nob(&dh->port_count, 0); + erts_refc_init(&(dh->refc), (erts_aint_t) 0); dh->status = -1; dh->reload_full_path = NULL; dh->reload_driver_name = NULL; @@ -1616,7 +1616,7 @@ static int reload_driver_entry(DE_Handle *dh) dh->reload_full_path = NULL; dh->reload_driver_name = NULL; - ASSERT(erts_smp_refc_read(&(dh->refc),0) == 0); + ASSERT(erts_refc_read(&(dh->refc),0) == 0); ASSERT(dh->full_path != NULL); erts_free(ERTS_ALC_T_DDLL_HANDLE, (void *) dh->full_path); dh->full_path = NULL; @@ -1647,7 +1647,7 @@ static void notify_proc(Process *proc, Eterm ref, Eterm driver_name, Eterm type, ErtsMessage *mp; ErtsProcLocks rp_locks = 0; ErlOffHeap *ohp; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; assert_drv_list_rwlocked(); if (errcode != 0) { @@ -1673,8 +1673,8 @@ static void notify_proc(Process *proc, Eterm ref, Eterm driver_name, Eterm type, mess = TUPLE5(hp,type,r,am_driver,driver_name,tag); } erts_queue_message(proc, rp_locks, mp, mess, am_system); - erts_smp_proc_unlock(proc, rp_locks); - ERTS_SMP_CHK_NO_PROC_LOCKS; + erts_proc_unlock(proc, rp_locks); + ERTS_CHK_NO_PROC_LOCKS; } static void notify_all(DE_Handle *dh, char *name, Uint awaiting, Eterm type, Eterm tag) @@ -1746,7 +1746,7 @@ static Eterm build_load_error(Process *p, int code) { int need = load_error_need(code); Eterm *hp = NULL; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p)); if (need) { hp = HAlloc(p,need); } diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index dcbef9ce2d..2ff95a3338 100644 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -887,13 +887,13 @@ process_info_list(Process *c_p, Eterm pid, Eterm list, int always_wrap, * is being inspected... */ ASSERT(locks & ERTS_PROC_LOCK_MAIN); - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp); + ERTS_MSGQ_MV_INQ2PRIVQ(rp); locks &= ~ERTS_PROC_LOCK_MSGQ; unlock_locks |= ERTS_PROC_LOCK_MSGQ; } if (unlock_locks) - erts_smp_proc_unlock(rp, unlock_locks); + erts_proc_unlock(rp, unlock_locks); } @@ -951,7 +951,7 @@ process_info_list(Process *c_p, Eterm pid, Eterm list, int always_wrap, if (c_p == rp) locks &= ~ERTS_PROC_LOCK_MAIN; if (locks && rp) - erts_smp_proc_unlock(rp, locks); + erts_proc_unlock(rp, locks); if (res_elem_ix != &def_res_elem_ix_buf[0]) erts_free(ERTS_ALC_T_TMP, res_elem_ix); @@ -1042,7 +1042,7 @@ BIF_RETTYPE process_info_2(BIF_ALIST_2) ERTS_BIF_YIELD2(bif_export[BIF_process_info_2], BIF_P, BIF_ARG_1, BIF_ARG_2); else if (rp != BIF_P && ERTS_PROC_PENDING_EXIT(rp)) { - erts_smp_proc_unlock(rp, info_locks|ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(rp, info_locks|ERTS_PROC_LOCK_STATUS); ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_undefined); } else { @@ -1062,13 +1062,13 @@ BIF_RETTYPE process_info_2(BIF_ALIST_2) * is being inspected... */ ASSERT(info_locks & ERTS_PROC_LOCK_MAIN); - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(rp); + ERTS_MSGQ_MV_INQ2PRIVQ(rp); info_locks &= ~ERTS_PROC_LOCK_MSGQ; unlock_locks |= ERTS_PROC_LOCK_MSGQ; } if (unlock_locks) - erts_smp_proc_unlock(rp, unlock_locks); + erts_proc_unlock(rp, unlock_locks); res = process_info_aux(BIF_P, rp, info_locks, pid, BIF_ARG_2, 0); } @@ -1077,7 +1077,7 @@ BIF_RETTYPE process_info_2(BIF_ALIST_2) if (BIF_P == rp) info_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp && info_locks) - erts_smp_proc_unlock(rp, info_locks); + erts_proc_unlock(rp, info_locks); ASSERT(!(BIF_P->flags & F_P2PNR_RESCHED)); BIF_RET(res); @@ -1364,7 +1364,7 @@ process_info_aux(Process *BIF_P, break; case am_trap_exit: { - erts_aint32_t state = erts_smp_atomic32_read_nob(&rp->state); + erts_aint32_t state = erts_atomic32_read_nob(&rp->state); hp = HAlloc(BIF_P, 3); if (state & ERTS_PSFLG_TRAP_EXIT) res = am_true; @@ -2252,7 +2252,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) res = TUPLE2(hp, am_sequential_tracer, val); BIF_RET(res); } else if (BIF_ARG_1 == am_garbage_collection){ - Uint val = (Uint) erts_smp_atomic32_read_nob(&erts_max_gen_gcs); + Uint val = (Uint) erts_atomic32_read_nob(&erts_max_gen_gcs); Eterm tup; hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2 + 3+2); @@ -2270,7 +2270,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) BIF_RET(res); } else if (BIF_ARG_1 == am_fullsweep_after){ - Uint val = (Uint) erts_smp_atomic32_read_nob(&erts_max_gen_gcs); + Uint val = (Uint) erts_atomic32_read_nob(&erts_max_gen_gcs); hp = HAlloc(BIF_P, 3); res = TUPLE2(hp, am_fullsweep_after, make_small(val)); BIF_RET(res); @@ -2303,8 +2303,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) erts_dsprintf_buf_t *dsbufp = erts_create_info_dsbuf(0); /* Need to be the only thread running... */ - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); if (BIF_ARG_1 == am_info) info(ERTS_PRINT_DSBUF, (void *) dsbufp); @@ -2315,8 +2315,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) else distribution_info(ERTS_PRINT_DSBUF, (void *) dsbufp); - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); ASSERT(dsbufp && dsbufp->str); res = new_binary(BIF_P, (byte *) dsbufp->str, dsbufp->str_len); @@ -2325,7 +2325,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) } else if (ERTS_IS_ATOM_STR("dist_ctrl", BIF_ARG_1)) { DistEntry *dep; i = 0; - erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx); + erts_rwmtx_rlock(&erts_dist_table_rwmtx); for (dep = erts_visible_dist_entries; dep; dep = dep->next) ++i; for (dep = erts_hidden_dist_entries; dep; dep = dep->next) @@ -2348,7 +2348,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) res = CONS(hp, tpl, res); hp += 2; } - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); + erts_rwmtx_runlock(&erts_dist_table_rwmtx); BIF_RET(res); } else if (BIF_ARG_1 == am_system_version) { erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0); @@ -2906,7 +2906,7 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt, { Eterm res = THE_NON_VALUE; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (item == am_id) { if (hpp) @@ -3122,7 +3122,7 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt, goto done; } res = ((ERTS_PTS_FLG_PARALLELISM & - erts_smp_atomic32_read_nob(&prt->sched.flags)) + erts_atomic32_read_nob(&prt->sched.flags)) ? am_true : am_false); } @@ -3198,7 +3198,7 @@ fun_info_2(BIF_ALIST_2) } break; case am_refc: - val = erts_make_integer(erts_smp_atomic_read_nob(&funp->fe->refc), p); + val = erts_make_integer(erts_atomic_read_nob(&funp->fe->refc), p); hp = HAlloc(p, 3); break; case am_arity: @@ -3303,7 +3303,7 @@ BIF_RETTYPE is_process_alive_1(BIF_ALIST_1) BIF_RET(am_false); } else { - if (erts_smp_atomic32_read_acqb(&rp->state) + if (erts_atomic32_read_acqb(&rp->state) & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) ERTS_BIF_AWAIT_X_DATA_TRAP(BIF_P, BIF_ARG_1, am_false); else @@ -3338,7 +3338,7 @@ BIF_RETTYPE process_display_2(BIF_ALIST_2) BIF_ARG_1, BIF_ARG_2); if (rp != BIF_P && ERTS_PROC_PENDING_EXIT(rp)) { Eterm args[2] = {BIF_ARG_1, BIF_ARG_2}; - erts_smp_proc_unlock(rp, ERTS_PROC_LOCKS_ALL); + erts_proc_unlock(rp, ERTS_PROC_LOCKS_ALL); ERTS_BIF_AWAIT_X_APPLY_TRAP(BIF_P, BIF_ARG_1, am_erlang, @@ -3347,7 +3347,7 @@ BIF_RETTYPE process_display_2(BIF_ALIST_2) 2); } erts_stack_dump(ERTS_PRINT_STDERR, NULL, rp); - erts_smp_proc_unlock(rp, (BIF_P == rp + erts_proc_unlock(rp, (BIF_P == rp ? ERTS_PROC_LOCKS_ALL_MINOR : ERTS_PROC_LOCKS_ALL)); BIF_RET(am_true); @@ -3527,7 +3527,7 @@ BIF_RETTYPE error_logger_warning_map_0(BIF_ALIST_0) BIF_RET(erts_error_logger_warnings); } -static erts_smp_atomic_t available_internal_state; +static erts_atomic_t available_internal_state; static int empty_magic_ref_destructor(Binary *bin) { @@ -3540,7 +3540,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) * NOTE: Only supposed to be used for testing, and debugging. */ - if (!erts_smp_atomic_read_nob(&available_internal_state)) { + if (!erts_atomic_read_nob(&available_internal_state)) { BIF_ERROR(BIF_P, EXC_UNDEF); } @@ -3583,9 +3583,9 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) int no_errors; ErtsCheckIoDebugInfo ciodi = {0}; #ifdef HAVE_ERTS_CHECK_IO_DEBUG - erts_smp_proc_unlock(BIF_P,ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P,ERTS_PROC_LOCK_MAIN); no_errors = erts_check_io_debug(&ciodi); - erts_smp_proc_lock(BIF_P,ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P,ERTS_PROC_LOCK_MAIN); #else no_errors = 0; #endif @@ -3635,9 +3635,9 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) } else if (ERTS_IS_ATOM_STR("nbalance", BIF_ARG_1)) { Uint n; - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); n = erts_debug_nbalance(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(erts_make_integer(n, BIF_P)); } else if (ERTS_IS_ATOM_STR("available_internal_state", BIF_ARG_1)) { @@ -3652,11 +3652,11 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) } else if (ERTS_IS_ATOM_STR("memory", BIF_ARG_1)) { Eterm res; - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); res = erts_memory(NULL, NULL, BIF_P, THE_NON_VALUE); - erts_smp_thr_progress_unblock(); + erts_thr_progress_unblock(); BIF_RET(res); } else if (ERTS_IS_ATOM_STR("mmap", BIF_ARG_1)) { @@ -3723,11 +3723,11 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) tp[2], ERTS_PROC_LOCK_LINK); if (!p) { - ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P); + ERTS_ASSERT_IS_NOT_EXITING(BIF_P); BIF_RET(am_undefined); } res = make_link_list(BIF_P, ERTS_P_LINKS(p), NIL); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); BIF_RET(res); } else if(is_internal_port(tp[2])) { @@ -3746,10 +3746,10 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) DistEntry *dep = erts_find_dist_entry(tp[2]); if(dep) { Eterm subres; - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); subres = make_link_list(BIF_P, dep->nlinks, NIL); subres = make_link_list(BIF_P, dep->node_links, subres); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); erts_deref_dist_entry(dep); BIF_RET(subres); } else { @@ -3768,19 +3768,19 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) tp[2], ERTS_PROC_LOCK_LINK); if (!p) { - ERTS_SMP_ASSERT_IS_NOT_EXITING(BIF_P); + ERTS_ASSERT_IS_NOT_EXITING(BIF_P); BIF_RET(am_undefined); } res = make_monitor_list(BIF_P, ERTS_P_MONITORS(p)); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(p, ERTS_PROC_LOCK_LINK); BIF_RET(res); } else if(is_node_name_atom(tp[2])) { DistEntry *dep = erts_find_dist_entry(tp[2]); if(dep) { Eterm ml; - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); ml = make_monitor_list(BIF_P, dep->monitors); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); erts_deref_dist_entry(dep); BIF_RET(ml); } else { @@ -3808,7 +3808,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) } else { Eterm res = ERTS_PROC_PENDING_EXIT(rp) ? am_true : am_false; - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); BIF_RET(res); } } @@ -3862,10 +3862,10 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1) Eterm res = am_undefined; DistEntry *dep = erts_sysname_to_connected_dist_entry(tp[2]); if (dep) { - erts_smp_de_rlock(dep); + erts_de_rlock(dep); if (is_internal_port(dep->cid)) res = dep->cid; - erts_smp_de_runlock(dep); + erts_de_runlock(dep); erts_deref_dist_entry(dep); } BIF_RET(res); @@ -4010,7 +4010,7 @@ BIF_RETTYPE erts_internal_system_check_1(BIF_ALIST_1) BIF_ERROR(BIF_P, BADARG); } -static erts_smp_atomic_t hipe_test_reschedule_flag; +static erts_atomic_t hipe_test_reschedule_flag; #if defined(VALGRIND) && defined(__GNUC__) /* Force noinline for valgrind suppression */ @@ -4034,7 +4034,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) if (ERTS_IS_ATOM_STR("available_internal_state", BIF_ARG_1) && (BIF_ARG_2 == am_true || BIF_ARG_2 == am_false)) { erts_aint_t on = (erts_aint_t) (BIF_ARG_2 == am_true); - erts_aint_t prev_on = erts_smp_atomic_xchg_nob(&available_internal_state, on); + erts_aint_t prev_on = erts_atomic_xchg_nob(&available_internal_state, on); if (on) { erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf(); erts_dsprintf(dsbufp, "Process %T ", BIF_P->common.id); @@ -4050,7 +4050,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) BIF_RET(prev_on ? am_true : am_false); } - if (!erts_smp_atomic_read_nob(&available_internal_state)) { + if (!erts_atomic_read_nob(&available_internal_state)) { BIF_ERROR(BIF_P, EXC_UNDEF); } @@ -4074,13 +4074,13 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) Sint ms; if (term_to_Sint(BIF_ARG_2, &ms) != 0) { if (ms > 0) { - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); if (block) - erts_smp_thr_progress_block(); + erts_thr_progress_block(); while (erts_milli_sleep((long) ms) != 0); if (block) - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); } BIF_RET(am_true); } @@ -4089,9 +4089,9 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) Sint ms; if (term_to_Sint(BIF_ARG_2, &ms) != 0) { if (ms > 0) { - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); while (erts_milli_sleep((long) ms) != 0); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); } BIF_RET(am_true); } @@ -4174,7 +4174,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) 0); if (BIF_P == rp) rp_locks &= ~ERTS_PROC_LOCK_MAIN; - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); if (xres > 1) { DECL_AM(message); BIF_RET(AM_message); @@ -4236,14 +4236,14 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) } else if (ERTS_IS_ATOM_STR("hipe_test_reschedule_suspend", BIF_ARG_1)) { /* Used by hipe test suites */ - erts_aint_t flag = erts_smp_atomic_read_nob(&hipe_test_reschedule_flag); + erts_aint_t flag = erts_atomic_read_nob(&hipe_test_reschedule_flag); if (!flag && BIF_ARG_2 != am_false) { - erts_smp_atomic_set_nob(&hipe_test_reschedule_flag, 1); + erts_atomic_set_nob(&hipe_test_reschedule_flag, 1); erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); ERTS_BIF_YIELD2(bif_export[BIF_erts_debug_set_internal_state_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } - erts_smp_atomic_set_nob(&hipe_test_reschedule_flag, !flag); + erts_atomic_set_nob(&hipe_test_reschedule_flag, !flag); BIF_RET(NIL); } else if (ERTS_IS_ATOM_STR("hipe_test_reschedule_resume", BIF_ARG_1)) { @@ -4254,7 +4254,7 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) if (rp) { erts_resume(rp, ERTS_PROC_LOCK_STATUS); res = am_true; - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); } BIF_RET(res); } @@ -4271,9 +4271,9 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) BIF_RET(am_false); else { Uint32 con_id; - erts_smp_de_rlock(dep); + erts_de_rlock(dep); con_id = dep->connection_id; - erts_smp_de_runlock(dep); + erts_de_runlock(dep); erts_kill_dist_connection(dep, con_id); erts_deref_dist_entry(dep); BIF_RET(am_true); @@ -4292,12 +4292,12 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) BIF_ERROR(BIF_P, BADARG); } - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); old_use_opt = !erts_disable_proc_not_running_opt; erts_disable_proc_not_running_opt = !use_opt; - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(old_use_opt ? am_true : am_false); } else if (ERTS_IS_ATOM_STR("wait", BIF_ARG_1)) { @@ -4326,9 +4326,9 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2) Sint64 msecs; if (term_to_Sint64(BIF_ARG_2, &msecs)) { /* Negative value restore original value... */ - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_debug_test_node_tab_delayed_delete(msecs); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); BIF_RET(am_ok); } } @@ -4776,8 +4776,8 @@ static void os_info_init(void) void erts_bif_info_init(void) { - erts_smp_atomic_init_nob(&available_internal_state, 0); - erts_smp_atomic_init_nob(&hipe_test_reschedule_flag, 0); + erts_atomic_init_nob(&available_internal_state, 0); + erts_atomic_init_nob(&hipe_test_reschedule_flag, 0); alloc_info_trap = erts_export_put(am_erlang, am_alloc_info, 1); alloc_sizes_trap = erts_export_put(am_erlang, am_alloc_sizes, 1); diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c index 106f18b747..4b73be55c6 100644 --- a/erts/emulator/beam/erl_bif_port.c +++ b/erts/emulator/beam/erl_bif_port.c @@ -86,25 +86,25 @@ BIF_RETTYPE erts_internal_open_port_2(BIF_ALIST_2) erts_make_ref_in_array(port->async_open_port->ref); port->async_open_port->to = BIF_P->common.id; - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCKS_MSG_RECEIVE | ERTS_PROC_LOCK_LINK); + erts_proc_lock(BIF_P, ERTS_PROC_LOCKS_MSG_RECEIVE | ERTS_PROC_LOCK_LINK); if (ERTS_PROC_PENDING_EXIT(BIF_P)) { /* need to exit caller instead */ - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCKS_MSG_RECEIVE | ERTS_PROC_LOCK_LINK); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCKS_MSG_RECEIVE | ERTS_PROC_LOCK_LINK); KILL_CATCHES(BIF_P); BIF_P->freason = EXC_EXIT; erts_port_release(port); BIF_RET(am_badarg); } - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(BIF_P); + ERTS_MSGQ_MV_INQ2PRIVQ(BIF_P); BIF_P->msg.save = BIF_P->msg.last; - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCKS_MSG_RECEIVE); res = erts_proc_store_ref(BIF_P, port->async_open_port->ref); } else { res = port->common.id; - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); } erts_add_link(&ERTS_P_LINKS(port), LINK_PID, BIF_P->common.id); @@ -114,7 +114,7 @@ BIF_RETTYPE erts_internal_open_port_2(BIF_ALIST_2) trace_proc(BIF_P, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK, BIF_P, am_link, port->common.id); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); erts_port_release(port); @@ -271,7 +271,7 @@ BIF_RETTYPE erts_internal_port_call_3(BIF_ALIST_3) break; } - state = erts_smp_atomic32_read_acqb(&BIF_P->state); + state = erts_atomic32_read_acqb(&BIF_P->state); if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) { if (state & ERTS_PSFLG_PENDING_EXIT) erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN); @@ -319,7 +319,7 @@ BIF_RETTYPE erts_internal_port_control_3(BIF_ALIST_3) break; } - state = erts_smp_atomic32_read_acqb(&BIF_P->state); + state = erts_atomic32_read_acqb(&BIF_P->state); if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) { if (state & ERTS_PSFLG_PENDING_EXIT) erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN); @@ -509,7 +509,7 @@ cleanup_old_port_data(erts_aint_t data) else { ErtsPortDataHeap *pdhp = (ErtsPortDataHeap *) data; size_t size; - ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER; + ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER; size = sizeof(ErtsPortDataHeap) + (pdhp->hsize-1)*sizeof(Eterm); erts_schedule_thr_prgr_later_cleanup_op(free_port_data_heap, (void *) pdhp, @@ -521,21 +521,21 @@ cleanup_old_port_data(erts_aint_t data) void erts_init_port_data(Port *prt) { - erts_smp_atomic_init_nob(&prt->data, (erts_aint_t) am_undefined); + erts_atomic_init_nob(&prt->data, (erts_aint_t) am_undefined); } void erts_cleanup_port_data(Port *prt) { ASSERT(erts_atomic32_read_nob(&prt->state) & ERTS_PORT_SFLGS_INVALID_LOOKUP); - cleanup_old_port_data(erts_smp_atomic_xchg_nob(&prt->data, + cleanup_old_port_data(erts_atomic_xchg_nob(&prt->data, (erts_aint_t) NULL)); } Uint erts_port_data_size(Port *prt) { - erts_aint_t data = erts_smp_atomic_read_ddrb(&prt->data); + erts_aint_t data = erts_atomic_read_ddrb(&prt->data); if ((data & 0x3) != 0) { ASSERT(is_immed((Eterm) (UWord) data)); @@ -550,7 +550,7 @@ erts_port_data_size(Port *prt) ErlOffHeap * erts_port_data_offheap(Port *prt) { - erts_aint_t data = erts_smp_atomic_read_ddrb(&prt->data); + erts_aint_t data = erts_atomic_read_ddrb(&prt->data); if ((data & 0x3) != 0) { ASSERT(is_immed((Eterm) (UWord) data)); @@ -595,11 +595,11 @@ BIF_RETTYPE port_set_data_2(BIF_ALIST_2) ASSERT((data & 0x3) == 0); } - data = erts_smp_atomic_xchg_wb(&prt->data, data); + data = erts_atomic_xchg_wb(&prt->data, data); if (data == (erts_aint_t)NULL) { /* Port terminated by racing thread */ - data = erts_smp_atomic_xchg_wb(&prt->data, data); + data = erts_atomic_xchg_wb(&prt->data, data); ASSERT(data != (erts_aint_t)NULL); cleanup_old_port_data(data); BIF_ERROR(BIF_P, BADARG); @@ -622,7 +622,7 @@ BIF_RETTYPE port_get_data_1(BIF_ALIST_1) if (!prt) BIF_ERROR(BIF_P, BADARG); - data = erts_smp_atomic_read_ddrb(&prt->data); + data = erts_atomic_read_ddrb(&prt->data); if (data == (erts_aint_t)NULL) BIF_ERROR(BIF_P, BADARG); /* Port terminated by racing thread */ @@ -917,7 +917,7 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump) } - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN); port = erts_open_driver(driver, p->common.id, name_buf, &opts, err_typep, err_nump); #ifdef USE_VM_PROBES @@ -934,7 +934,7 @@ open_port(Process* p, Eterm name, Eterm settings, int *err_typep, int *err_nump) if (port && IS_TRACED_FL(port, F_TRACE_PORTS)) trace_port(port, am_getting_linked, p->common.id); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); if (IS_TRACED_FL(p, F_TRACE_SCHED_PROCS)) { trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in); diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c index c476e18c33..b02f966558 100644 --- a/erts/emulator/beam/erl_bif_trace.c +++ b/erts/emulator/beam/erl_bif_trace.c @@ -373,11 +373,11 @@ static void smp_bp_finisher(void* null) finish_bp.stager = NULL; #endif erts_release_code_write_permission(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); if (!ERTS_PROC_IS_EXITING(p)) { erts_resume(p, ERTS_PROC_LOCK_STATUS); } - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); erts_proc_dec_refc(p); } } @@ -389,8 +389,8 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on, struct trace_pattern_flags *trace_pattern_flags, ErtsTracer *meta_tracer) { - ERTS_SMP_LC_ASSERT(erts_has_code_write_permission() || - erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_has_code_write_permission() || + erts_thr_progress_is_blocking()); if (trace_pattern_is_on) *trace_pattern_is_on = erts_default_trace_pattern_is_on; if (match_spec) @@ -405,8 +405,8 @@ erts_get_default_trace_pattern(int *trace_pattern_is_on, int erts_is_default_trace_enabled(void) { - ERTS_SMP_LC_ASSERT(erts_has_code_write_permission() || - erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_has_code_write_permission() || + erts_thr_progress_is_blocking()); return erts_default_trace_pattern_is_on; } @@ -610,13 +610,13 @@ Eterm erts_internal_trace_3(BIF_ALIST_3) goto error; if (start_trace(tracee_p, tracer, &tracee_p->common, on, mask)) { - erts_smp_proc_unlock(tracee_p, + erts_proc_unlock(tracee_p, (tracee_p == p ? ERTS_PROC_LOCKS_ALL_MINOR : ERTS_PROC_LOCKS_ALL)); goto already_traced; } - erts_smp_proc_unlock(tracee_p, + erts_proc_unlock(tracee_p, (tracee_p == p ? ERTS_PROC_LOCKS_ALL_MINOR : ERTS_PROC_LOCKS_ALL)); @@ -689,8 +689,8 @@ Eterm erts_internal_trace_3(BIF_ALIST_3) mods = 1; } - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); system_blocked = 1; ok = 1; @@ -755,8 +755,8 @@ Eterm erts_internal_trace_3(BIF_ALIST_3) } if (system_blocked) { - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); } erts_release_code_write_permission(); ERTS_TRACER_CLEAR(&tracer); @@ -772,8 +772,8 @@ Eterm erts_internal_trace_3(BIF_ALIST_3) ERTS_TRACER_CLEAR(&tracer); if (system_blocked) { - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); } erts_release_code_write_permission(); @@ -862,7 +862,7 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key) trace_flags = ERTS_TRACE_FLAGS(tracee); if (tracee != p) - erts_smp_proc_unlock(tracee, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(tracee, ERTS_PROC_LOCK_MAIN); } else if (is_external_pid(pid_spec) && external_pid_dist_entry(pid_spec) == erts_this_dist_entry) { return am_undefined; @@ -1040,22 +1040,22 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key) mfa[2] = signed_val(tp[3]); if ( (key == am_call_time) || (key == am_all)) { - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); } #ifdef ERTS_DIRTY_SCHEDULERS - erts_smp_mtx_lock(&erts_dirty_bp_ix_mtx); + erts_mtx_lock(&erts_dirty_bp_ix_mtx); #endif r = function_is_traced(p, mfa, &ms, &ms_meta, &meta, &count, &call_time); #ifdef ERTS_DIRTY_SCHEDULERS - erts_smp_mtx_unlock(&erts_dirty_bp_ix_mtx); + erts_mtx_unlock(&erts_dirty_bp_ix_mtx); #endif if ( (key == am_call_time) || (key == am_all)) { - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); } switch (r) { @@ -1507,7 +1507,7 @@ erts_set_trace_pattern(Process*p, ErtsCodeMFA *mfa, int specified, finish_bp.local = flags.breakpoint; if (is_blocking) { - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); while (erts_finish_breakpointing()) { /* Empty loop body */ } @@ -1565,7 +1565,7 @@ consolidate_event_tracing(ErtsTracingEvent te[]) int erts_finish_breakpointing(void) { - ERTS_SMP_LC_ASSERT(erts_has_code_write_permission()); + ERTS_LC_ASSERT(erts_has_code_write_permission()); /* * Memory barriers will be issued for all schedulers *before* @@ -1987,8 +1987,8 @@ BIF_RETTYPE seq_trace_print_2(BIF_ALIST_2) void erts_system_monitor_clear(Process *c_p) { if (c_p) { - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); } erts_set_system_monitor(NIL); erts_system_monitor_long_gc = 0; @@ -1997,8 +1997,8 @@ void erts_system_monitor_clear(Process *c_p) { erts_system_monitor_flags.busy_port = 0; erts_system_monitor_flags.busy_dist_port = 0; if (c_p) { - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } } @@ -2109,8 +2109,8 @@ system_monitor(Process *p, Eterm monitor_pid, Eterm list) int busy_port, busy_dist_port; system_blocked = 1; - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); if (!erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, monitor_pid, 0)) goto error; @@ -2149,16 +2149,16 @@ system_monitor(Process *p, Eterm monitor_pid, Eterm list) erts_system_monitor_flags.busy_port = !!busy_port; erts_system_monitor_flags.busy_dist_port = !!busy_dist_port; - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); BIF_RET(prev); } error: if (system_blocked) { - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); } BIF_ERROR(p, BADARG); @@ -2168,8 +2168,8 @@ system_monitor(Process *p, Eterm monitor_pid, Eterm list) void erts_system_profile_clear(Process *c_p) { if (c_p) { - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); } erts_set_system_profile(NIL); erts_system_profile_flags.scheduler = 0; @@ -2177,8 +2177,8 @@ void erts_system_profile_clear(Process *c_p) { erts_system_profile_flags.runnable_ports = 0; erts_system_profile_flags.exclusive = 0; if (c_p) { - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } } @@ -2241,8 +2241,8 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2) int scheduler, runnable_procs, runnable_ports, exclusive; system_blocked = 1; - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); /* Check if valid process, no locks are taken */ @@ -2293,8 +2293,8 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2) erts_system_profile_flags.runnable_procs = !!runnable_procs; erts_system_profile_flags.exclusive = !!exclusive; erts_system_profile_ts_type = ts; - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); BIF_RET(prev); @@ -2302,8 +2302,8 @@ BIF_RETTYPE system_profile_2(BIF_ALIST_2) error: if (system_blocked) { - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); } BIF_ERROR(p, BADARG); @@ -2328,7 +2328,7 @@ typedef struct { Eterm ref; Eterm ref_heap[ERTS_REF_THING_SIZE]; Eterm target; - erts_smp_atomic32_t refc; + erts_atomic32_t refc; } ErtsTraceDeliveredAll; static void @@ -2336,7 +2336,7 @@ reply_trace_delivered_all(void *vtdarp) { ErtsTraceDeliveredAll *tdarp = (ErtsTraceDeliveredAll *) vtdarp; - if (erts_smp_atomic32_dec_read_nob(&tdarp->refc) == 0) { + if (erts_atomic32_dec_read_nob(&tdarp->refc) == 0) { Eterm ref_copy, msg; Process *rp = tdarp->proc; Eterm *hp = NULL; @@ -2370,7 +2370,7 @@ trace_delivered_1(BIF_ALIST_1) hp = &tdarp->ref_heap[0]; tdarp->ref = STORE_NC(&hp, NULL, ref); tdarp->target = BIF_ARG_1; - erts_smp_atomic32_init_nob(&tdarp->refc, + erts_atomic32_init_nob(&tdarp->refc, (erts_aint32_t) erts_no_schedulers); erts_proc_add_refc(BIF_P, 1); erts_schedule_multi_misc_aux_work(0, diff --git a/erts/emulator/beam/erl_bif_unique.c b/erts/emulator/beam/erl_bif_unique.c index 2f8adc87d5..aa79503819 100644 --- a/erts/emulator/beam/erl_bif_unique.c +++ b/erts/emulator/beam/erl_bif_unique.c @@ -136,7 +136,7 @@ Eterm erts_make_ref(Process *c_p) Eterm* hp; Uint32 ref[ERTS_REF_NUMBERS]; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p)); hp = HAlloc(c_p, ERTS_REF_THING_SIZE); @@ -803,7 +803,7 @@ BIF_RETTYPE make_ref_0(BIF_ALIST_0) BIF_RETTYPE res; Eterm* hp; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P)); hp = HAlloc(BIF_P, ERTS_REF_THING_SIZE); diff --git a/erts/emulator/beam/erl_binary.h b/erts/emulator/beam/erl_binary.h index b036b28dbf..05007e864e 100644 --- a/erts/emulator/beam/erl_binary.h +++ b/erts/emulator/beam/erl_binary.h @@ -291,7 +291,7 @@ typedef union { * atomics are used they might * differ in size. */ - erts_smp_atomic_t smp_atomic_word; + erts_atomic_t smp_atomic_word; erts_atomic_t atomic_word; } ErtsMagicIndirectionWord; @@ -326,7 +326,7 @@ ERTS_GLB_INLINE Binary *erts_create_magic_binary_x(Uint size, ERTS_GLB_INLINE Binary *erts_create_magic_binary(Uint size, int (*destructor)(Binary *)); ERTS_GLB_INLINE Binary *erts_create_magic_indirection(int (*destructor)(Binary *)); -ERTS_GLB_INLINE erts_smp_atomic_t *erts_smp_binary_to_magic_indirection(Binary *bp); +ERTS_GLB_INLINE erts_atomic_t *erts_binary_to_magic_indirection(Binary *bp); ERTS_GLB_INLINE erts_atomic_t *erts_binary_to_magic_indirection(Binary *bp); #if ERTS_GLB_INLINE_INCL_FUNC_DEF @@ -519,16 +519,6 @@ erts_create_magic_indirection(int (*destructor)(Binary *)) but word aligned */ } -ERTS_GLB_INLINE erts_smp_atomic_t * -erts_smp_binary_to_magic_indirection(Binary *bp) -{ - ErtsMagicIndirectionWord *mip; - ASSERT(bp->intern.flags & BIN_FLAG_MAGIC); - ASSERT(ERTS_MAGIC_BIN_ATYPE(bp) == ERTS_ALC_T_MINDIRECTION); - mip = ERTS_MAGIC_BIN_UNALIGNED_DATA(bp); - return &mip->smp_atomic_word; -} - ERTS_GLB_INLINE erts_atomic_t * erts_binary_to_magic_indirection(Binary *bp) { @@ -536,7 +526,7 @@ erts_binary_to_magic_indirection(Binary *bp) ASSERT(bp->intern.flags & BIN_FLAG_MAGIC); ASSERT(ERTS_MAGIC_BIN_ATYPE(bp) == ERTS_ALC_T_MINDIRECTION); mip = ERTS_MAGIC_BIN_UNALIGNED_DATA(bp); - return &mip->atomic_word; + return &mip->smp_atomic_word; } #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ diff --git a/erts/emulator/beam/erl_bits.c b/erts/emulator/beam/erl_bits.c index e52b5b4bc5..2035b56eb5 100644 --- a/erts/emulator/beam/erl_bits.c +++ b/erts/emulator/beam/erl_bits.c @@ -69,12 +69,12 @@ static byte get_bit(byte b, size_t a_offs); #define byte_buf (ErlBitsState.byte_buf_) #define byte_buf_len (ErlBitsState.byte_buf_len_) -static erts_smp_atomic_t bits_bufs_size; +static erts_atomic_t bits_bufs_size; Uint erts_bits_bufs_size(void) { - return (Uint) erts_smp_atomic_read_nob(&bits_bufs_size); + return (Uint) erts_atomic_read_nob(&bits_bufs_size); } void @@ -100,7 +100,7 @@ erts_init_bits(void) ERTS_CT_ASSERT(offsetof(ErtsBinary,driver.binary.orig_bytes) == offsetof(Binary,orig_bytes)); - erts_smp_atomic_init_nob(&bits_bufs_size, 0); + erts_atomic_init_nob(&bits_bufs_size, 0); /* erl_process.c calls erts_bits_init_state() on all state instances */ } @@ -735,7 +735,7 @@ static void ERTS_INLINE need_byte_buf(ERL_BITS_PROTO_1(int need)) { if (byte_buf_len < need) { - erts_smp_atomic_add_nob(&bits_bufs_size, need - byte_buf_len); + erts_atomic_add_nob(&bits_bufs_size, need - byte_buf_len); byte_buf_len = need; byte_buf = erts_realloc(ERTS_ALC_T_BITS_BUF, byte_buf, byte_buf_len); } diff --git a/erts/emulator/beam/erl_cpu_topology.c b/erts/emulator/beam/erl_cpu_topology.c index 48b02c839e..49f9beb19f 100644 --- a/erts/emulator/beam/erl_cpu_topology.c +++ b/erts/emulator/beam/erl_cpu_topology.c @@ -60,7 +60,7 @@ static int max_main_threads; static int reader_groups; static ErtsCpuBindData *scheduler2cpu_map; -static erts_smp_rwmtx_t cpuinfo_rwmtx; +static erts_rwmtx_t cpuinfo_rwmtx; typedef enum { ERTS_CPU_BIND_UNDEFINED, @@ -441,7 +441,7 @@ erts_sched_check_cpu_bind_prep_suspend(ErtsSchedulerData *esdp) int cgcc_ix; /* Unbind from cpu */ - erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx); + erts_rwmtx_rwlock(&cpuinfo_rwmtx); if (scheduler2cpu_map[esdp->no].bound_id >= 0 && erts_unbind_from_cpu(cpuinfo) == 0) { esdp->cpu_id = scheduler2cpu_map[esdp->no].bound_id = -1; @@ -460,7 +460,7 @@ erts_sched_check_cpu_bind_prep_suspend(ErtsSchedulerData *esdp) } } ASSERT(no_cpu_groups_callbacks == cgcc_ix); - erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx); + erts_rwmtx_rwunlock(&cpuinfo_rwmtx); for (cgcc_ix = 0; cgcc_ix < no_cpu_groups_callbacks; cgcc_ix++) cgcc[cgcc_ix].callback(1, @@ -478,7 +478,7 @@ erts_sched_check_cpu_bind_prep_suspend(ErtsSchedulerData *esdp) void erts_sched_check_cpu_bind_post_suspend(ErtsSchedulerData *esdp) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(esdp->run_queue)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(esdp->run_queue)); if (esdp->no <= max_main_threads) erts_thr_set_main_status(1, (int) esdp->no); @@ -495,8 +495,8 @@ erts_sched_check_cpu_bind(ErtsSchedulerData *esdp) erts_cpu_groups_map_t *cgm; erts_cpu_groups_callback_list_t *cgcl; erts_cpu_groups_callback_call_t *cgcc; - erts_smp_runq_unlock(esdp->run_queue); - erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx); + erts_runq_unlock(esdp->run_queue); + erts_rwmtx_rwlock(&cpuinfo_rwmtx); cpu_id = scheduler2cpu_map[esdp->no].bind_id; if (cpu_id >= 0 && cpu_id != scheduler2cpu_map[esdp->no].bound_id) { res = erts_bind_to_cpu(cpuinfo, cpu_id); @@ -539,7 +539,7 @@ erts_sched_check_cpu_bind(ErtsSchedulerData *esdp) } ASSERT(no_cpu_groups_callbacks == cgcc_ix); - erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx); + erts_rwmtx_rwunlock(&cpuinfo_rwmtx); for (cgcc_ix = 0; cgcc_ix < no_cpu_groups_callbacks; cgcc_ix++) cgcc[cgcc_ix].callback(0, @@ -549,7 +549,7 @@ erts_sched_check_cpu_bind(ErtsSchedulerData *esdp) erts_free(ERTS_ALC_T_TMP, cgcc); - erts_smp_runq_lock(esdp->run_queue); + erts_runq_lock(esdp->run_queue); } void @@ -560,7 +560,7 @@ erts_sched_init_check_cpu_bind(ErtsSchedulerData *esdp) erts_cpu_groups_callback_list_t *cgcl; erts_cpu_groups_callback_call_t *cgcc; - erts_smp_rwmtx_rlock(&cpuinfo_rwmtx); + erts_rwmtx_rlock(&cpuinfo_rwmtx); cgcc = erts_alloc(ERTS_ALC_T_TMP, (no_cpu_groups_callbacks @@ -576,7 +576,7 @@ erts_sched_init_check_cpu_bind(ErtsSchedulerData *esdp) } ASSERT(no_cpu_groups_callbacks == cgcc_ix); - erts_smp_rwmtx_runlock(&cpuinfo_rwmtx); + erts_rwmtx_runlock(&cpuinfo_rwmtx); for (cgcc_ix = 0; cgcc_ix < no_cpu_groups_callbacks; cgcc_ix++) cgcc[cgcc_ix].callback(0, @@ -596,7 +596,7 @@ write_schedulers_bind_change(erts_cpu_topology_t *cpudata, int size) int s_ix = 1; int cpu_ix; - ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx)); + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx)); if (cpu_bind_order != ERTS_CPU_BIND_NONE && size) { @@ -696,9 +696,9 @@ Eterm erts_bound_schedulers_term(Process *c_p) { ErtsCpuBindOrder order; - erts_smp_rwmtx_rlock(&cpuinfo_rwmtx); + erts_rwmtx_rlock(&cpuinfo_rwmtx); order = cpu_bind_order; - erts_smp_rwmtx_runlock(&cpuinfo_rwmtx); + erts_rwmtx_runlock(&cpuinfo_rwmtx); return bound_schedulers_term(order); } @@ -711,7 +711,7 @@ erts_bind_schedulers(Process *c_p, Eterm how) int cpudata_size; ErtsCpuBindOrder old_cpu_bind_order; - erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx); + erts_rwmtx_rwlock(&cpuinfo_rwmtx); if (erts_bind_to_cpu(cpuinfo, -1) == -ENOTSUP) { if (cpu_bind_order == ERTS_CPU_BIND_NONE @@ -767,7 +767,7 @@ erts_bind_schedulers(Process *c_p, Eterm how) done: - erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx); + erts_rwmtx_rwunlock(&cpuinfo_rwmtx); if (notify) erts_sched_notify_check_cpu_bind(); @@ -787,9 +787,9 @@ erts_sched_bind_atthrcreate_child(int unbind) { int res = 0; if (unbind) { - erts_smp_rwmtx_rlock(&cpuinfo_rwmtx); + erts_rwmtx_rlock(&cpuinfo_rwmtx); res = erts_unbind_from_cpu(cpuinfo); - erts_smp_rwmtx_runlock(&cpuinfo_rwmtx); + erts_rwmtx_runlock(&cpuinfo_rwmtx); } return res; } @@ -806,7 +806,7 @@ erts_sched_bind_atfork_prepare(void) ErtsSchedulerData *esdp = erts_get_scheduler_data(); int unbind = esdp != NULL && erts_is_scheduler_bound(esdp); if (unbind) - erts_smp_rwmtx_rlock(&cpuinfo_rwmtx); + erts_rwmtx_rlock(&cpuinfo_rwmtx); return unbind; } @@ -814,7 +814,7 @@ int erts_sched_bind_atfork_child(int unbind) { if (unbind) { - ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&cpuinfo_rwmtx) + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&cpuinfo_rwmtx) || erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx)); return erts_unbind_from_cpu(cpuinfo); } @@ -825,7 +825,7 @@ void erts_sched_bind_atfork_parent(int unbind) { if (unbind) - erts_smp_rwmtx_runlock(&cpuinfo_rwmtx); + erts_rwmtx_runlock(&cpuinfo_rwmtx); } Eterm @@ -859,9 +859,9 @@ erts_fake_scheduler_bindings(Process *p, Eterm how) return res; } - erts_smp_rwmtx_rlock(&cpuinfo_rwmtx); + erts_rwmtx_rlock(&cpuinfo_rwmtx); create_tmp_cpu_topology_copy(&cpudata, &cpudata_size); - erts_smp_rwmtx_runlock(&cpuinfo_rwmtx); + erts_rwmtx_runlock(&cpuinfo_rwmtx); if (!cpudata || fake_cpu_bind_order == ERTS_CPU_BIND_NONE) ERTS_BIF_PREP_RET(res, am_false); @@ -924,12 +924,12 @@ erts_get_schedulers_binds(Process *c_p) Eterm res = make_tuple(hp); *(hp++) = make_arityval(erts_no_schedulers); - erts_smp_rwmtx_rlock(&cpuinfo_rwmtx); + erts_rwmtx_rlock(&cpuinfo_rwmtx); for (ix = 1; ix <= erts_no_schedulers; ix++) *(hp++) = (scheduler2cpu_map[ix].bound_id >= 0 ? make_small(scheduler2cpu_map[ix].bound_id) : AM_unbound); - erts_smp_rwmtx_runlock(&cpuinfo_rwmtx); + erts_rwmtx_runlock(&cpuinfo_rwmtx); return res; } @@ -1340,7 +1340,7 @@ erts_set_cpu_topology(Process *c_p, Eterm term) int cpudata_size = 0; Eterm res; - erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx); + erts_rwmtx_rwlock(&cpuinfo_rwmtx); res = get_cpu_topology_term(c_p, ERTS_GET_USED_CPU_TOPOLOGY); if (term == am_undefined) { if (user_cpudata) @@ -1361,7 +1361,7 @@ erts_set_cpu_topology(Process *c_p, Eterm term) } else if (is_not_list(term)) { error: - erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx); + erts_rwmtx_rwunlock(&cpuinfo_rwmtx); res = THE_NON_VALUE; goto done; } @@ -1455,7 +1455,7 @@ erts_set_cpu_topology(Process *c_p, Eterm term) write_schedulers_bind_change(cpudata, cpudata_size); - erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx); + erts_rwmtx_rwunlock(&cpuinfo_rwmtx); erts_sched_notify_check_cpu_bind(); done: @@ -1609,7 +1609,7 @@ erts_get_cpu_topology_term(Process *c_p, Eterm which) { Eterm res; int type; - erts_smp_rwmtx_rlock(&cpuinfo_rwmtx); + erts_rwmtx_rlock(&cpuinfo_rwmtx); if (ERTS_IS_ATOM_STR("used", which)) type = ERTS_GET_USED_CPU_TOPOLOGY; else if (ERTS_IS_ATOM_STR("detected", which)) @@ -1622,7 +1622,7 @@ erts_get_cpu_topology_term(Process *c_p, Eterm which) res = THE_NON_VALUE; else res = get_cpu_topology_term(c_p, type); - erts_smp_rwmtx_runlock(&cpuinfo_rwmtx); + erts_rwmtx_runlock(&cpuinfo_rwmtx); return res; } @@ -1640,9 +1640,9 @@ get_logical_processors(int *conf, int *onln, int *avail) void erts_get_logical_processors(int *conf, int *onln, int *avail) { - erts_smp_rwmtx_rlock(&cpuinfo_rwmtx); + erts_rwmtx_rlock(&cpuinfo_rwmtx); get_logical_processors(conf, onln, avail); - erts_smp_rwmtx_runlock(&cpuinfo_rwmtx); + erts_rwmtx_runlock(&cpuinfo_rwmtx); } void @@ -1700,9 +1700,9 @@ erts_init_cpu_topology(void) { int ix; - erts_smp_rwmtx_init(&cpuinfo_rwmtx, "cpu_info", NIL, + erts_rwmtx_init(&cpuinfo_rwmtx, "cpu_info", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); - erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx); + erts_rwmtx_rwlock(&cpuinfo_rwmtx); scheduler2cpu_map = erts_alloc(ERTS_ALC_T_CPUDATA, (sizeof(ErtsCpuBindData) @@ -1720,13 +1720,13 @@ erts_init_cpu_topology(void) NULL); if (cpu_bind_order == ERTS_CPU_BIND_NONE) - erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx); + erts_rwmtx_rwunlock(&cpuinfo_rwmtx); else { erts_cpu_topology_t *cpudata; int cpudata_size; create_tmp_cpu_topology_copy(&cpudata, &cpudata_size); write_schedulers_bind_change(cpudata, cpudata_size); - erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx); + erts_rwmtx_rwunlock(&cpuinfo_rwmtx); erts_sched_notify_check_cpu_bind(); destroy_tmp_cpu_topology_copy(cpudata); } @@ -1736,7 +1736,7 @@ int erts_update_cpu_info(void) { int changed; - erts_smp_rwmtx_rwlock(&cpuinfo_rwmtx); + erts_rwmtx_rwlock(&cpuinfo_rwmtx); changed = erts_cpu_info_update(cpuinfo); if (changed) { erts_cpu_topology_t *cpudata; @@ -1769,7 +1769,7 @@ erts_update_cpu_info(void) write_schedulers_bind_change(cpudata, cpudata_size); destroy_tmp_cpu_topology_copy(cpudata); } - erts_smp_rwmtx_rwunlock(&cpuinfo_rwmtx); + erts_rwmtx_rwunlock(&cpuinfo_rwmtx); if (changed) erts_sched_notify_check_cpu_bind(); return changed; @@ -1786,7 +1786,7 @@ reader_groups_callback(int suspending, void *unused) { if (reader_groups && esdp->no <= max_main_threads) - erts_smp_rwmtx_set_reader_group(suspending ? 0 : group+1); + erts_rwmtx_set_reader_group(suspending ? 0 : group+1); } static Eterm get_cpu_groups_map(Process *c_p, @@ -1815,9 +1815,9 @@ Eterm erts_get_reader_groups_map(Process *c_p) { Eterm res; - erts_smp_rwmtx_rlock(&cpuinfo_rwmtx); + erts_rwmtx_rlock(&cpuinfo_rwmtx); res = get_cpu_groups_map(c_p, reader_groups_map, 1); - erts_smp_rwmtx_runlock(&cpuinfo_rwmtx); + erts_rwmtx_runlock(&cpuinfo_rwmtx); return res; } @@ -2197,7 +2197,7 @@ add_cpu_groups(int groups, erts_cpu_groups_callback_list_t *cgcl; erts_cpu_groups_map_t *cgm; - ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx)); + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx)); if (use_groups > max_main_threads) use_groups = max_main_threads; @@ -2244,7 +2244,7 @@ cpu_groups_lookup(erts_cpu_groups_map_t *map, { int start, logical, ix; - ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&cpuinfo_rwmtx) + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&cpuinfo_rwmtx) || erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx)); if (esdp->cpu_id < 0) @@ -2272,7 +2272,7 @@ static void update_cpu_groups_maps(void) { erts_cpu_groups_map_t *cgm; - ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx)); + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&cpuinfo_rwmtx)); for (cgm = cpu_groups_maps; cgm; cgm = cgm->next) make_cpu_groups_map(cgm, 0); diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c index 2e5d401ca4..a21b9b9c0c 100644 --- a/erts/emulator/beam/erl_db.c +++ b/erts/emulator/beam/erl_db.c @@ -44,7 +44,7 @@ #include "erl_binary.h" -erts_smp_atomic_t erts_ets_misc_mem_size; +erts_atomic_t erts_ets_misc_mem_size; /* ** Utility macros @@ -189,7 +189,7 @@ static void delete_sched_table(Process *c_p, DbTable *tb); static void table_dec_refc(DbTable *tb, erts_aint_t min_val) { - if (erts_smp_refc_dectest(&tb->common.refc, min_val) == 0) + if (erts_refc_dectest(&tb->common.refc, min_val) == 0) schedule_free_dbtable(tb); } @@ -203,21 +203,21 @@ static ERTS_INLINE void make_btid(DbTable *tb) { Binary *btid = erts_create_magic_indirection(db_table_tid_destructor); - erts_smp_atomic_t *tbref = erts_smp_binary_to_magic_indirection(btid); - erts_smp_atomic_init_nob(tbref, (erts_aint_t) tb); + erts_atomic_t *tbref = erts_binary_to_magic_indirection(btid); + erts_atomic_init_nob(tbref, (erts_aint_t) tb); tb->common.btid = btid; /* * Table and magic indirection refer eachother, * and table is refered once by being alive... */ - erts_smp_refc_init(&tb->common.refc, 2); + erts_refc_init(&tb->common.refc, 2); erts_refc_inc(&btid->intern.refc, 1); } static ERTS_INLINE DbTable* btid2tab(Binary* btid) { - erts_smp_atomic_t *tbref = erts_smp_binary_to_magic_indirection(btid); - return (DbTable *) erts_smp_atomic_read_nob(tbref); + erts_atomic_t *tbref = erts_binary_to_magic_indirection(btid); + return (DbTable *) erts_atomic_read_nob(tbref); } static DbTable * @@ -225,7 +225,7 @@ tid2tab(Eterm tid) { DbTable *tb; Binary *btid; - erts_smp_atomic_t *tbref; + erts_atomic_t *tbref; if (!is_internal_magic_ref(tid)) return NULL; @@ -233,8 +233,8 @@ tid2tab(Eterm tid) if (ERTS_MAGIC_BIN_DESTRUCTOR(btid) != db_table_tid_destructor) return NULL; - tbref = erts_smp_binary_to_magic_indirection(btid); - tb = (DbTable *) erts_smp_atomic_read_nob(tbref); + tbref = erts_binary_to_magic_indirection(btid); + tb = (DbTable *) erts_atomic_read_nob(tbref); ASSERT(!tb || tb->common.btid == btid); @@ -244,11 +244,11 @@ tid2tab(Eterm tid) static ERTS_INLINE int is_table_alive(DbTable *tb) { - erts_smp_atomic_t *tbref; + erts_atomic_t *tbref; DbTable *rtb; - tbref = erts_smp_binary_to_magic_indirection(tb->common.btid); - rtb = (DbTable *) erts_smp_atomic_read_nob(tbref); + tbref = erts_binary_to_magic_indirection(tb->common.btid); + rtb = (DbTable *) erts_atomic_read_nob(tbref); ASSERT(!rtb || rtb == tb); @@ -267,8 +267,8 @@ tid_clear(Process *c_p, DbTable *tb) { DbTable *rtb; Binary *btid = tb->common.btid; - erts_smp_atomic_t *tbref = erts_smp_binary_to_magic_indirection(btid); - rtb = (DbTable *) erts_smp_atomic_xchg_nob(tbref, (erts_aint_t) NULL); + erts_atomic_t *tbref = erts_binary_to_magic_indirection(btid); + rtb = (DbTable *) erts_atomic_xchg_nob(tbref, (erts_aint_t) NULL); ASSERT(!rtb || tb == rtb); if (rtb) { table_dec_refc(tb, 1); @@ -289,7 +289,7 @@ make_tid(Process *c_p, DbTable *tb) */ # define META_NAME_TAB_LOCK_CNT 16 union { - erts_smp_rwmtx_t lck; + erts_rwmtx_t lck; byte _cache_line_alignment[64]; }meta_name_tab_rwlocks[META_NAME_TAB_LOCK_CNT]; static struct meta_name_tab_entry { @@ -307,7 +307,7 @@ static unsigned meta_name_tab_mask; static ERTS_INLINE struct meta_name_tab_entry* meta_name_tab_bucket(Eterm name, - erts_smp_rwmtx_t** lockp) + erts_rwmtx_t** lockp) { unsigned bix = atom_val(name) & meta_name_tab_mask; struct meta_name_tab_entry* bucket = &meta_name_tab[bix]; @@ -376,14 +376,14 @@ free_dbtable(void *vtb) { DbTable *tb = (DbTable *) vtb; #ifdef HARDDEBUG - if (erts_smp_atomic_read_nob(&tb->common.memory_size) != sizeof(DbTable)) { + if (erts_atomic_read_nob(&tb->common.memory_size) != sizeof(DbTable)) { erts_fprintf(stderr, "ets: free_dbtable memory remain=%ld fix=%x\n", - erts_smp_atomic_read_nob(&tb->common.memory_size)-sizeof(DbTable), + erts_atomic_read_nob(&tb->common.memory_size)-sizeof(DbTable), tb->common.fixations); } #endif - erts_smp_rwmtx_destroy(&tb->common.rwlock); - erts_smp_mtx_destroy(&tb->common.fixlock); + erts_rwmtx_destroy(&tb->common.rwlock); + erts_mtx_destroy(&tb->common.fixlock); ASSERT(is_immed(tb->common.heir_data)); if (tb->common.btid) @@ -403,8 +403,8 @@ static void schedule_free_dbtable(DbTable* tb) * Caller is *not* allowed to access the specialized part * (hash or tree) of *tb after this function has returned. */ - ASSERT(erts_smp_refc_read(&tb->common.refc, 0) == 0); - ASSERT(erts_smp_refc_read(&tb->common.fix_count, 0) == 0); + ASSERT(erts_refc_read(&tb->common.refc, 0) == 0); + ASSERT(erts_refc_read(&tb->common.fix_count, 0) == 0); erts_schedule_thr_prgr_later_cleanup_op(free_dbtable, (void *) tb, &tb->release.data, @@ -419,7 +419,7 @@ save_sched_table(Process *c_p, DbTable *tb) ASSERT(esdp); esdp->ets_tables.count++; - erts_smp_refc_inc(&tb->common.refc, 1); + erts_refc_inc(&tb->common.refc, 1); first = esdp->ets_tables.clist; if (!first) { @@ -509,11 +509,11 @@ save_owned_table(Process *c_p, DbTable *tb) { DbTable *first; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); first = (DbTable*) erts_psd_get(c_p, ERTS_PSD_ETS_OWNED_TABLES); - erts_smp_refc_inc(&tb->common.refc, 1); + erts_refc_inc(&tb->common.refc, 1); if (!first) { tb->common.owned.next = tb->common.owned.prev = tb; @@ -525,13 +525,13 @@ save_owned_table(Process *c_p, DbTable *tb) tb->common.owned.prev->common.owned.next = tb; first->common.owned.prev = tb; } - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); } static ERTS_INLINE void delete_owned_table(Process *p, DbTable *tb) { - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); if (tb->common.owned.next == tb) { DbTable* old; ASSERT(tb->common.owned.prev == tb); @@ -554,21 +554,21 @@ delete_owned_table(Process *p, DbTable *tb) if (tb == first) erts_psd_set(p, ERTS_PSD_ETS_OWNED_TABLES, tb->common.owned.next); } - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); table_dec_refc(tb, 1); } static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock) { - erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; + erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER; if (use_frequent_read_lock) - rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; + rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ; if (erts_ets_rwmtx_spin_count >= 0) rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count; - erts_smp_rwmtx_init_opt(&tb->common.rwlock, &rwmtx_opt, "db_tab", + erts_rwmtx_init_opt(&tb->common.rwlock, &rwmtx_opt, "db_tab", tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB); - erts_smp_mtx_init(&tb->common.fixlock, "db_tab_fix", + erts_mtx_init(&tb->common.fixlock, "db_tab_fix", tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB); tb->common.is_thread_safe = !(tb->common.status & DB_FINE_LOCKED); } @@ -577,10 +577,10 @@ static ERTS_INLINE void db_lock(DbTable* tb, db_lock_kind_t kind) { if (tb->common.type & DB_FINE_LOCKED) { if (kind == LCK_WRITE) { - erts_smp_rwmtx_rwlock(&tb->common.rwlock); + erts_rwmtx_rwlock(&tb->common.rwlock); tb->common.is_thread_safe = 1; } else { - erts_smp_rwmtx_rlock(&tb->common.rwlock); + erts_rwmtx_rlock(&tb->common.rwlock); ASSERT(!tb->common.is_thread_safe); } } @@ -589,10 +589,10 @@ static ERTS_INLINE void db_lock(DbTable* tb, db_lock_kind_t kind) switch (kind) { case LCK_WRITE: case LCK_WRITE_REC: - erts_smp_rwmtx_rwlock(&tb->common.rwlock); + erts_rwmtx_rwlock(&tb->common.rwlock); break; default: - erts_smp_rwmtx_rlock(&tb->common.rwlock); + erts_rwmtx_rlock(&tb->common.rwlock); } ASSERT(tb->common.is_thread_safe); } @@ -609,11 +609,11 @@ static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind) if (kind == LCK_WRITE) { ASSERT(tb->common.is_thread_safe); tb->common.is_thread_safe = 0; - erts_smp_rwmtx_rwunlock(&tb->common.rwlock); + erts_rwmtx_rwunlock(&tb->common.rwlock); } else { ASSERT(!tb->common.is_thread_safe); - erts_smp_rwmtx_runlock(&tb->common.rwlock); + erts_rwmtx_runlock(&tb->common.rwlock); } } else { @@ -621,10 +621,10 @@ static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind) switch (kind) { case LCK_WRITE: case LCK_WRITE_REC: - erts_smp_rwmtx_rwunlock(&tb->common.rwlock); + erts_rwmtx_rwunlock(&tb->common.rwlock); break; default: - erts_smp_rwmtx_runlock(&tb->common.rwlock); + erts_rwmtx_runlock(&tb->common.rwlock); } } } @@ -637,7 +637,7 @@ DbTable* db_get_table_aux(Process *p, int meta_already_locked) { DbTable *tb; - erts_smp_rwmtx_t *mtl = NULL; + erts_rwmtx_t *mtl = NULL; /* * IMPORTANT: Only scheduler threads are allowed @@ -649,9 +649,9 @@ DbTable* db_get_table_aux(Process *p, if (is_atom(id)) { struct meta_name_tab_entry* bucket = meta_name_tab_bucket(id,&mtl); if (!meta_already_locked) - erts_smp_rwmtx_rlock(mtl); + erts_rwmtx_rlock(mtl); else{ - ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(mtl) + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(mtl) || erts_lc_rwmtx_is_rwlocked(mtl)); mtl = NULL; } @@ -685,7 +685,7 @@ DbTable* db_get_table_aux(Process *p, } } if (mtl) - erts_smp_rwmtx_runlock(mtl); + erts_rwmtx_runlock(mtl); return tb; } @@ -701,12 +701,12 @@ DbTable* db_get_table(Process *p, static int insert_named_tab(Eterm name_atom, DbTable* tb, int have_lock) { int ret = 0; - erts_smp_rwmtx_t* rwlock; + erts_rwmtx_t* rwlock; struct meta_name_tab_entry* new_entry; struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom, &rwlock); if (!have_lock) - erts_smp_rwmtx_rwlock(rwlock); + erts_rwmtx_rwlock(rwlock); if (bucket->pu.tb == NULL) { /* empty */ new_entry = bucket; @@ -754,25 +754,25 @@ static int insert_named_tab(Eterm name_atom, DbTable* tb, int have_lock) done: if (!have_lock) - erts_smp_rwmtx_rwunlock(rwlock); + erts_rwmtx_rwunlock(rwlock); return ret; } static int remove_named_tab(DbTable *tb, int have_lock) { int ret = 0; - erts_smp_rwmtx_t* rwlock; + erts_rwmtx_t* rwlock; Eterm name_atom = tb->common.the_name; struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom, &rwlock); ASSERT(is_table_named(tb)); - if (!have_lock && erts_smp_rwmtx_tryrwlock(rwlock) == EBUSY) { + if (!have_lock && erts_rwmtx_tryrwlock(rwlock) == EBUSY) { db_unlock(tb, LCK_WRITE); - erts_smp_rwmtx_rwlock(rwlock); + erts_rwmtx_rwlock(rwlock); db_lock(tb, LCK_WRITE); } - ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(rwlock)); + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(rwlock)); if (bucket->pu.tb == NULL) { goto done; @@ -825,7 +825,7 @@ static int remove_named_tab(DbTable *tb, int have_lock) done: if (!have_lock) - erts_smp_rwmtx_rwunlock(rwlock); + erts_rwmtx_rwunlock(rwlock); return ret; } @@ -834,11 +834,11 @@ done: */ static ERTS_INLINE void local_fix_table(DbTable* tb) { - erts_smp_refc_inc(&tb->common.fix_count, 1); + erts_refc_inc(&tb->common.fix_count, 1); } static ERTS_INLINE void local_unfix_table(DbTable* tb) { - if (erts_smp_refc_dectest(&tb->common.fix_count, 0) == 0) { + if (erts_refc_dectest(&tb->common.fix_count, 0) == 0) { ASSERT(IS_HASH_TABLE(tb->common.status)); db_unfix_table_hash(&(tb->hash)); } @@ -1479,7 +1479,7 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2) DbTable* tb; Eterm ret; Eterm old_name; - erts_smp_rwmtx_t *lck1, *lck2; + erts_rwmtx_t *lck1, *lck2; #ifdef HARDDEBUG erts_fprintf(stderr, @@ -1502,7 +1502,7 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2) if (lck1 == lck2) lck2 = NULL; else if (lck1 > lck2) { - erts_smp_rwmtx_t *tmp = lck1; + erts_rwmtx_t *tmp = lck1; lck1 = lck2; lck2 = tmp; } @@ -1520,9 +1520,9 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2) } } - erts_smp_rwmtx_rwlock(lck1); + erts_rwmtx_rwlock(lck1); if (lck2) - erts_smp_rwmtx_rwlock(lck2); + erts_rwmtx_rwlock(lck2); tb = db_get_table_aux(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE, 1); if (!tb) @@ -1542,16 +1542,16 @@ BIF_RETTYPE ets_rename_2(BIF_ALIST_2) tb->common.the_name = BIF_ARG_2; db_unlock(tb, LCK_WRITE); - erts_smp_rwmtx_rwunlock(lck1); + erts_rwmtx_rwunlock(lck1); if (lck2) - erts_smp_rwmtx_rwunlock(lck2); + erts_rwmtx_rwunlock(lck2); BIF_RET(ret); badarg: if (tb) db_unlock(tb, LCK_WRITE); - erts_smp_rwmtx_rwunlock(lck1); + erts_rwmtx_rwunlock(lck1); if (lck2) - erts_smp_rwmtx_rwunlock(lck2); + erts_rwmtx_rwunlock(lck2); BIF_ERROR(BIF_P, BADARG); } @@ -1691,11 +1691,11 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) { DbTable init_tb; - erts_smp_atomic_init_nob(&init_tb.common.memory_size, 0); + erts_atomic_init_nob(&init_tb.common.memory_size, 0); tb = (DbTable*) erts_db_alloc(ERTS_ALC_T_DB_TABLE, &init_tb, sizeof(DbTable)); - erts_smp_atomic_init_nob(&tb->common.memory_size, - erts_smp_atomic_read_nob(&init_tb.common.memory_size)); + erts_atomic_init_nob(&tb->common.memory_size, + erts_atomic_read_nob(&init_tb.common.memory_size)); } tb->common.meth = meth; @@ -1703,13 +1703,13 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2) tb->common.status = status; tb->common.type = status & ERTS_ETS_TABLE_TYPES; /* Note, 'type' is *read only* from now on... */ - erts_smp_refc_init(&tb->common.fix_count, 0); + erts_refc_init(&tb->common.fix_count, 0); db_init_lock(tb, status & (DB_FINE_LOCKED|DB_FREQ_READ)); tb->common.keypos = keypos; tb->common.owner = BIF_P->common.id; set_heir(BIF_P, tb, heir, heir_data); - erts_smp_atomic_init_nob(&tb->common.nitems, 0); + erts_atomic_init_nob(&tb->common.nitems, 0); tb->common.fixing_procs = NULL; tb->common.compress = is_compressed; @@ -1892,7 +1892,7 @@ BIF_RETTYPE ets_delete_1(BIF_ALIST_1) * Process 'rp' might be exiting, but our table lock prevents it * from terminating as it cannot complete erts_db_process_exiting(). */ - ASSERT(!(ERTS_PSFLG_FREE & erts_smp_atomic32_read_nob(&rp->state))); + ASSERT(!(ERTS_PSFLG_FREE & erts_atomic32_read_nob(&rp->state))); delete_owned_table(rp, tb); BIF_P->flags |= F_USING_DB; @@ -1967,12 +1967,12 @@ BIF_RETTYPE ets_give_away_3(BIF_ALIST_3) db_unlock(tb,LCK_WRITE); send_ets_transfer_message(BIF_P, to_proc, &to_locks, tb, BIF_ARG_3); - erts_smp_proc_unlock(to_proc, to_locks); + erts_proc_unlock(to_proc, to_locks); UnUseTmpHeap(5,BIF_P); BIF_RET(am_true); badarg: - if (to_proc != NULL && to_proc != BIF_P) erts_smp_proc_unlock(to_proc, to_locks); + if (to_proc != NULL && to_proc != BIF_P) erts_proc_unlock(to_proc, to_locks); if (tb != NULL) db_unlock(tb, LCK_WRITE); BIF_ERROR(BIF_P, BADARG); } @@ -2196,7 +2196,7 @@ BIF_RETTYPE ets_select_delete_2(BIF_ALIST_2) if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL) { BIF_ERROR(BIF_P, BADARG); } - nitems = erts_smp_atomic_read_nob(&tb->common.nitems); + nitems = erts_atomic_read_nob(&tb->common.nitems); tb->common.meth->db_delete_all_objects(BIF_P, tb); db_unlock(tb, LCK_WRITE); BIF_RET(erts_make_integer(nitems,BIF_P)); @@ -2247,7 +2247,7 @@ BIF_RETTYPE ets_select_delete_2(BIF_ALIST_2) */ struct ErtsEtsAllReq_ { - erts_smp_atomic32_t refc; + erts_atomic32_t refc; Process *proc; ErtsOIRefStorage ref; ErtsEtsAllReqList list[1]; /* one per scheduler */ @@ -2380,7 +2380,7 @@ ets_all_reply(ErtsSchedulerData *esdp, ErtsEtsAllReq **reqpp, erts_proc_dec_refc(reqp->proc); - if (erts_smp_atomic32_dec_read_nob(&reqp->refc) == 0) + if (erts_atomic32_dec_read_nob(&reqp->refc) == 0) erts_free(ERTS_ALC_T_ETS_ALL_REQ, reqp); *reqpp = NULL; @@ -2468,7 +2468,7 @@ BIF_RETTYPE ets_internal_request_all_0(BIF_ALIST_0) Eterm ref = erts_make_ref(BIF_P); ErtsEtsAllReq *req = erts_alloc(ERTS_ALC_T_ETS_ALL_REQ, ERTS_ETS_ALL_REQ_SIZE); - erts_smp_atomic32_init_nob(&req->refc, + erts_atomic32_init_nob(&req->refc, (erts_aint32_t) erts_no_schedulers); erts_oiref_storage_save(&req->ref, ref); req->proc = BIF_P; @@ -3162,7 +3162,7 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1) if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_INFO, LCK_READ)) == NULL || tb->common.owner != owner) { if (BIF_P != rp) - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); if (is_atom(BIF_ARG_1) || is_small(BIF_ARG_1)) { BIF_RET(am_undefined); } @@ -3176,7 +3176,7 @@ BIF_RETTYPE ets_info_1(BIF_ALIST_1) db_unlock(tb, LCK_READ); /*if (rp != NULL && rp != BIF_P) - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);*/ + erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);*/ hp = HAlloc(BIF_P, 5*sizeof(fields)/sizeof(Eterm)); res = NIL; @@ -3296,9 +3296,9 @@ void init_db(ErtsDbSpinCount db_spin_count) size_t size; int max_spin_count = (1 << 15) - 1; /* internal limit */ - erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; - rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; - rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED; + erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER; + rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ; + rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED; switch (db_spin_count) { case ERTS_DB_SPNCNT_NONE: @@ -3338,12 +3338,12 @@ void init_db(ErtsDbSpinCount db_spin_count) rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count; for (i=0; i<META_NAME_TAB_LOCK_CNT; i++) { - erts_smp_rwmtx_init_opt(&meta_name_tab_rwlocks[i].lck, &rwmtx_opt, + erts_rwmtx_init_opt(&meta_name_tab_rwlocks[i].lck, &rwmtx_opt, "meta_name_tab", make_small(i), ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DB); } - erts_smp_atomic_init_nob(&erts_ets_misc_mem_size, 0); + erts_atomic_init_nob(&erts_ets_misc_mem_size, 0); db_initialize_util(); if (user_requested_db_max_tabs < DB_DEF_MAX_TABS) @@ -3444,14 +3444,14 @@ retry: if (tb->common.owner != p->common.id) { if (to_proc != NULL ) { - erts_smp_proc_unlock(to_proc, to_locks); + erts_proc_unlock(to_proc, to_locks); } db_unlock(tb,LCK_WRITE); return !0; /* ok, someone already gave my table away */ } if (tb->common.heir != to_pid) { /* someone changed the heir */ if (to_proc != NULL ) { - erts_smp_proc_unlock(to_proc, to_locks); + erts_proc_unlock(to_proc, to_locks); } if (to_pid == p->common.id || to_pid == am_none) { return 0; /* no real heir, table still mine */ @@ -3464,7 +3464,7 @@ retry: } if (to_proc->common.u.alive.started_interval != tb->common.heir_started_interval) { - erts_smp_proc_unlock(to_proc, to_locks); + erts_proc_unlock(to_proc, to_locks); return 0; /* heir dead and pid reused, table still mine */ } @@ -3481,7 +3481,7 @@ retry: heir_data = tpv[1]; } send_ets_transfer_message(p, to_proc, &to_locks, tb, heir_data); - erts_smp_proc_unlock(to_proc, to_locks); + erts_proc_unlock(to_proc, to_locks); return !0; } @@ -3532,17 +3532,17 @@ static SWord proc_cleanup_fixed_table(Process* p, DbFixation* fix) db_lock(tb, LCK_WRITE_REC); if (!(tb->common.status & DB_DELETE)) { erts_aint_t diff; - erts_smp_mtx_lock(&tb->common.fixlock); + erts_mtx_lock(&tb->common.fixlock); ASSERT(fixing_procs_rbt_lookup(tb->common.fixing_procs, p)); diff = -((erts_aint_t) fix->counter); - erts_smp_refc_add(&tb->common.fix_count,diff,0); + erts_refc_add(&tb->common.fix_count,diff,0); fix->counter = 0; fixing_procs_rbt_delete(&tb->common.fixing_procs, fix); - erts_smp_mtx_unlock(&tb->common.fixlock); + erts_mtx_unlock(&tb->common.fixlock); if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)) { work += db_unfix_table_hash(&(tb->hash)); } @@ -3605,9 +3605,9 @@ erts_db_process_exiting(Process *c_p, ErtsProcLocks c_p_locks) switch (state->op) { case GET_OWNED_TABLE: { DbTable* tb; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); tb = (DbTable*) erts_psd_get(c_p, ERTS_PSD_ETS_OWNED_TABLES); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); if (!tb) { /* Done with owned tables; now fixations */ @@ -3698,8 +3698,8 @@ static void fix_table_locked(Process* p, DbTable* tb) { DbFixation *fix; - erts_smp_mtx_lock(&tb->common.fixlock); - erts_smp_refc_inc(&tb->common.fix_count,1); + erts_mtx_lock(&tb->common.fixlock); + erts_refc_inc(&tb->common.fix_count,1); fix = tb->common.fixing_procs; if (fix == NULL) { tb->common.time.monotonic @@ -3712,7 +3712,7 @@ static void fix_table_locked(Process* p, DbTable* tb) ASSERT(fixed_tabs_find(NULL, fix)); ++(fix->counter); - erts_smp_mtx_unlock(&tb->common.fixlock); + erts_mtx_unlock(&tb->common.fixlock); return; } } @@ -3725,7 +3725,7 @@ static void fix_table_locked(Process* p, DbTable* tb) fix->counter = 1; fixing_procs_rbt_insert(&tb->common.fixing_procs, fix); - erts_smp_mtx_unlock(&tb->common.fixlock); + erts_mtx_unlock(&tb->common.fixlock); p->flags |= F_USING_DB; fixed_tabs_insert(p, fix); @@ -3738,16 +3738,16 @@ static void unfix_table_locked(Process* p, DbTable* tb, { DbFixation* fix; - erts_smp_mtx_lock(&tb->common.fixlock); + erts_mtx_lock(&tb->common.fixlock); fix = fixing_procs_rbt_lookup(tb->common.fixing_procs, p); if (fix) { - erts_smp_refc_dec(&tb->common.fix_count,0); + erts_refc_dec(&tb->common.fix_count,0); --(fix->counter); ASSERT(fix->counter >= 0); if (fix->counter == 0) { fixing_procs_rbt_delete(&tb->common.fixing_procs, fix); - erts_smp_mtx_unlock(&tb->common.fixlock); + erts_mtx_unlock(&tb->common.fixlock); fixed_tabs_delete(p, fix); erts_refc_dec(&fix->tabs.btid->intern.refc, 1); @@ -3758,15 +3758,15 @@ static void unfix_table_locked(Process* p, DbTable* tb, goto unlocked; } } - erts_smp_mtx_unlock(&tb->common.fixlock); + erts_mtx_unlock(&tb->common.fixlock); unlocked: if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status) - && erts_smp_atomic_read_nob(&tb->hash.fixdel) != (erts_aint_t)NULL) { + && erts_atomic_read_nob(&tb->hash.fixdel) != (erts_aint_t)NULL) { if (*kind_p == LCK_READ && tb->common.is_thread_safe) { /* Must have write lock while purging pseudo-deleted (OTP-8166) */ - erts_smp_rwmtx_runlock(&tb->common.rwlock); - erts_smp_rwmtx_rwlock(&tb->common.rwlock); + erts_rwmtx_runlock(&tb->common.rwlock); + erts_rwmtx_rwlock(&tb->common.rwlock); *kind_p = LCK_WRITE; if (tb->common.status & DB_DELETE) return; } @@ -3794,7 +3794,7 @@ static void free_fixations_op(DbFixation* fix, void* vctx) ASSERT(ctx->tb->common.status & DB_DELETE); diff = -((erts_aint_t) fix->counter); - erts_smp_refc_add(&ctx->tb->common.fix_count, diff, 0); + erts_refc_add(&ctx->tb->common.fix_count, diff, 0); if (fix->procs.p != ctx->p) { /* Fixated by other process */ fix->counter = 0; @@ -3839,7 +3839,7 @@ static SWord free_fixations_locked(Process* p, DbTable *tb) { struct free_fixations_ctx ctx; - ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&tb->common.rwlock)); + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock)); ctx.p = p; ctx.tb = tb; @@ -3982,7 +3982,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What) int use_monotonic; if (What == am_size) { - ret = make_small(erts_smp_atomic_read_nob(&tb->common.nitems)); + ret = make_small(erts_atomic_read_nob(&tb->common.nitems)); } else if (What == am_type) { if (tb->common.status & DB_SET) { ret = am_set; @@ -3995,7 +3995,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What) ret = am_bag; } } else if (What == am_memory) { - Uint words = (Uint) ((erts_smp_atomic_read_nob(&tb->common.memory_size) + Uint words = (Uint) ((erts_atomic_read_nob(&tb->common.memory_size) + sizeof(Uint) - 1) / sizeof(Uint)); @@ -4041,7 +4041,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What) = ERTS_IS_ATOM_STR("safe_fixed_monotonic_time", What)) || ERTS_IS_ATOM_STR("safe_fixed", What)) { - erts_smp_mtx_lock(&tb->common.fixlock); + erts_mtx_lock(&tb->common.fixlock); if (IS_FIXED(tb)) { Uint need; Eterm *hp; @@ -4083,7 +4083,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What) } else { ret = am_false; } - erts_smp_mtx_unlock(&tb->common.fixlock); + erts_mtx_unlock(&tb->common.fixlock); } else if (What == am_atom_put("stats",5)) { if (IS_HASH_TABLE(tb->common.status)) { FloatDef f; @@ -4107,7 +4107,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What) std_dev_exp = make_float(hp); PUT_DOUBLE(f, hp); hp += FLOAT_SIZE_OBJECT; - ret = TUPLE7(hp, make_small(erts_smp_atomic_read_nob(&tb->hash.nactive)), + ret = TUPLE7(hp, make_small(erts_atomic_read_nob(&tb->hash.nactive)), avg, std_dev_real, std_dev_exp, make_small(stats.min_chain_len), make_small(stats.max_chain_len), @@ -4139,9 +4139,9 @@ static void print_table(fmtfn_t to, void *to_arg, int show, DbTable* tb) tb->common.meth->db_print(to, to_arg, show, tb); - erts_print(to, to_arg, "Objects: %d\n", (int)erts_smp_atomic_read_nob(&tb->common.nitems)); + erts_print(to, to_arg, "Objects: %d\n", (int)erts_atomic_read_nob(&tb->common.nitems)); erts_print(to, to_arg, "Words: %bpu\n", - (Uint) ((erts_smp_atomic_read_nob(&tb->common.memory_size) + (Uint) ((erts_atomic_read_nob(&tb->common.memory_size) + sizeof(Uint) - 1) / sizeof(Uint))); @@ -4181,9 +4181,9 @@ void db_info(fmtfn_t to, void *to_arg, int show) /* Called by break handler * Uint erts_get_ets_misc_mem_size(void) { - ERTS_SMP_MEMORY_BARRIER; + ERTS_THR_MEMORY_BARRIER; /* Memory not allocated in ets_alloc */ - return (Uint) erts_smp_atomic_read_nob(&erts_ets_misc_mem_size); + return (Uint) erts_atomic_read_nob(&erts_ets_misc_mem_size); } /* SMP Note: May only be used when system is locked */ @@ -4192,7 +4192,7 @@ erts_db_foreach_table(void (*func)(DbTable *, void *), void *arg) { int ix; - ASSERT(erts_smp_thr_progress_is_blocking()); + ASSERT(erts_thr_progress_is_blocking()); for (ix = 0; ix < erts_no_schedulers; ix++) { ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(ix); @@ -4298,4 +4298,4 @@ void erts_lcnt_update_db_locks(int enable) { &lcnt_update_db_locks_per_sched, (void*)(UWord)enable); } -#endif /* ERTS_ENABLE_LOCK_COUNT */
\ No newline at end of file +#endif /* ERTS_ENABLE_LOCK_COUNT */ diff --git a/erts/emulator/beam/erl_db.h b/erts/emulator/beam/erl_db.h index d83126b3a2..318e90cb28 100644 --- a/erts/emulator/beam/erl_db.h +++ b/erts/emulator/beam/erl_db.h @@ -124,7 +124,7 @@ extern Export ets_select_delete_continue_exp; extern Export ets_select_count_continue_exp; extern Export ets_select_replace_continue_exp; extern Export ets_select_continue_exp; -extern erts_smp_atomic_t erts_ets_misc_mem_size; +extern erts_atomic_t erts_ets_misc_mem_size; Eterm erts_ets_colliding_names(Process*, Eterm name, Uint cnt); Uint erts_db_get_max_tabs(void); @@ -151,11 +151,11 @@ do { \ erts_aint_t sz__ = (((erts_aint_t) (ALLOC_SZ)) \ - ((erts_aint_t) (FREE_SZ))); \ ASSERT((TAB)); \ - erts_smp_atomic_add_nob(&(TAB)->common.memory_size, sz__); \ + erts_atomic_add_nob(&(TAB)->common.memory_size, sz__); \ } while (0) #define ERTS_ETS_MISC_MEM_ADD(SZ) \ - erts_smp_atomic_add_nob(&erts_ets_misc_mem_size, (SZ)); + erts_atomic_add_nob(&erts_ets_misc_mem_size, (SZ)); ERTS_GLB_INLINE void *erts_db_alloc(ErtsAlcType_t type, DbTable *tab, @@ -292,7 +292,7 @@ erts_db_free(ErtsAlcType_t type, DbTable *tab, void *ptr, Uint size) ERTS_DB_ALC_MEM_UPDATE_(tab, size, 0); ASSERT(((void *) tab) != ptr - || erts_smp_atomic_read_nob(&tab->common.memory_size) == 0); + || erts_atomic_read_nob(&tab->common.memory_size) == 0); erts_free(type, ptr); } diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c index 2945afe275..25072ede97 100644 --- a/erts/emulator/beam/erl_db_hash.c +++ b/erts/emulator/beam/erl_db_hash.c @@ -112,15 +112,15 @@ # define DB_USING_FINE_LOCKING(TB) (((TB))->common.type & DB_FINE_LOCKED) #ifdef ETHR_ORDERED_READ_DEPEND -#define SEGTAB(tb) ((struct segment**) erts_smp_atomic_read_nob(&(tb)->segtab)) +#define SEGTAB(tb) ((struct segment**) erts_atomic_read_nob(&(tb)->segtab)) #else #define SEGTAB(tb) \ (DB_USING_FINE_LOCKING(tb) \ - ? ((struct segment**) erts_smp_atomic_read_ddrb(&(tb)->segtab)) \ - : ((struct segment**) erts_smp_atomic_read_nob(&(tb)->segtab))) + ? ((struct segment**) erts_atomic_read_ddrb(&(tb)->segtab)) \ + : ((struct segment**) erts_atomic_read_nob(&(tb)->segtab))) #endif -#define NACTIVE(tb) ((int)erts_smp_atomic_read_nob(&(tb)->nactive)) -#define NITEMS(tb) ((int)erts_smp_atomic_read_nob(&(tb)->common.nitems)) +#define NACTIVE(tb) ((int)erts_atomic_read_nob(&(tb)->nactive)) +#define NITEMS(tb) ((int)erts_atomic_read_nob(&(tb)->common.nitems)) #define SLOT_IX_TO_SEG_IX(i) (((i)+(EXT_SEGSZ-FIRST_SEGSZ)) >> EXT_SEGSZ_EXP) @@ -138,12 +138,12 @@ static ERTS_INLINE Uint hash_to_ix(DbTableHash* tb, HashValue hval) { Uint mask = (DB_USING_FINE_LOCKING(tb) - ? erts_smp_atomic_read_acqb(&tb->szm) - : erts_smp_atomic_read_nob(&tb->szm)); + ? erts_atomic_read_acqb(&tb->szm) + : erts_atomic_read_nob(&tb->szm)); Uint ix = hval & mask; - if (ix >= erts_smp_atomic_read_nob(&tb->nactive)) { + if (ix >= erts_atomic_read_nob(&tb->nactive)) { ix &= mask>>1; - ASSERT(ix < erts_smp_atomic_read_nob(&tb->nactive)); + ASSERT(ix < erts_atomic_read_nob(&tb->nactive)); } return ix; } @@ -162,7 +162,7 @@ static ERTS_INLINE int add_fixed_deletion(DbTableHash* tb, int ix, sizeof(FixedDeletion)); ERTS_ETS_MISC_MEM_ADD(sizeof(FixedDeletion)); fixd->slot = ix; - was_next = erts_smp_atomic_read_acqb(&tb->fixdel); + was_next = erts_atomic_read_acqb(&tb->fixdel); do { /* Lockless atomic insertion in linked list: */ if (NFIXED(tb) <= fixated_by_me) { erts_db_free(ERTS_ALC_T_DB_FIX_DEL, (DbTable*)tb, @@ -171,7 +171,7 @@ static ERTS_INLINE int add_fixed_deletion(DbTableHash* tb, int ix, } exp_next = was_next; fixd->next = (FixedDeletion*) exp_next; - was_next = erts_smp_atomic_cmpxchg_mb(&tb->fixdel, + was_next = erts_atomic_cmpxchg_mb(&tb->fixdel, (erts_aint_t) fixd, exp_next); }while (was_next != exp_next); @@ -192,50 +192,50 @@ static ERTS_INLINE int add_fixed_deletion(DbTableHash* tb, int ix, # define GET_LOCK_MAYBE(tb,hval) ((tb)->common.is_thread_safe ? NULL : GET_LOCK(tb,hval)) /* Fine grained read lock */ -static ERTS_INLINE erts_smp_rwmtx_t* RLOCK_HASH(DbTableHash* tb, HashValue hval) +static ERTS_INLINE erts_rwmtx_t* RLOCK_HASH(DbTableHash* tb, HashValue hval) { if (tb->common.is_thread_safe) { return NULL; } else { - erts_smp_rwmtx_t* lck = GET_LOCK(tb,hval); + erts_rwmtx_t* lck = GET_LOCK(tb,hval); ASSERT(tb->common.type & DB_FINE_LOCKED); - erts_smp_rwmtx_rlock(lck); + erts_rwmtx_rlock(lck); return lck; } } /* Fine grained write lock */ -static ERTS_INLINE erts_smp_rwmtx_t* WLOCK_HASH(DbTableHash* tb, HashValue hval) +static ERTS_INLINE erts_rwmtx_t* WLOCK_HASH(DbTableHash* tb, HashValue hval) { if (tb->common.is_thread_safe) { return NULL; } else { - erts_smp_rwmtx_t* lck = GET_LOCK(tb,hval); + erts_rwmtx_t* lck = GET_LOCK(tb,hval); ASSERT(tb->common.type & DB_FINE_LOCKED); - erts_smp_rwmtx_rwlock(lck); + erts_rwmtx_rwlock(lck); return lck; } } -static ERTS_INLINE void RUNLOCK_HASH(erts_smp_rwmtx_t* lck) +static ERTS_INLINE void RUNLOCK_HASH(erts_rwmtx_t* lck) { if (lck != NULL) { - erts_smp_rwmtx_runlock(lck); + erts_rwmtx_runlock(lck); } } -static ERTS_INLINE void WUNLOCK_HASH(erts_smp_rwmtx_t* lck) +static ERTS_INLINE void WUNLOCK_HASH(erts_rwmtx_t* lck) { if (lck != NULL) { - erts_smp_rwmtx_rwunlock(lck); + erts_rwmtx_rwunlock(lck); } } #ifdef ERTS_ENABLE_LOCK_CHECK # define IFN_EXCL(tb,cmd) (((tb)->common.is_thread_safe) || (cmd)) -# define IS_HASH_RLOCKED(tb,hval) IFN_EXCL(tb,erts_smp_lc_rwmtx_is_rlocked(GET_LOCK(tb,hval))) -# define IS_HASH_WLOCKED(tb,lck) IFN_EXCL(tb,erts_smp_lc_rwmtx_is_rwlocked(lck)) -# define IS_TAB_WLOCKED(tb) erts_smp_lc_rwmtx_is_rwlocked(&(tb)->common.rwlock) +# define IS_HASH_RLOCKED(tb,hval) IFN_EXCL(tb,erts_lc_rwmtx_is_rlocked(GET_LOCK(tb,hval))) +# define IS_HASH_WLOCKED(tb,lck) IFN_EXCL(tb,erts_lc_rwmtx_is_rwlocked(lck)) +# define IS_TAB_WLOCKED(tb) erts_lc_rwmtx_is_rwlocked(&(tb)->common.rwlock) #else # define IS_HASH_RLOCKED(tb,hval) (1) # define IS_HASH_WLOCKED(tb,hval) (1) @@ -248,7 +248,7 @@ static ERTS_INLINE void WUNLOCK_HASH(erts_smp_rwmtx_t* lck) ** Slot READ locks updated accordingly, unlocked if EOT. */ static ERTS_INLINE Sint next_slot(DbTableHash* tb, Uint ix, - erts_smp_rwmtx_t** lck_ptr) + erts_rwmtx_t** lck_ptr) { ix += DB_HASH_LOCK_CNT; if (ix < NACTIVE(tb)) return ix; @@ -259,7 +259,7 @@ static ERTS_INLINE Sint next_slot(DbTableHash* tb, Uint ix, } /* Same as next_slot but with WRITE locking */ static ERTS_INLINE Sint next_slot_w(DbTableHash* tb, Uint ix, - erts_smp_rwmtx_t** lck_ptr) + erts_rwmtx_t** lck_ptr) { ix += DB_HASH_LOCK_CNT; if (ix < NACTIVE(tb)) return ix; @@ -329,9 +329,9 @@ static ERTS_INLINE void SET_SEGTAB(DbTableHash* tb, struct segment** segtab) { if (DB_USING_FINE_LOCKING(tb)) - erts_smp_atomic_set_wb(&tb->segtab, (erts_aint_t) segtab); + erts_atomic_set_wb(&tb->segtab, (erts_aint_t) segtab); else - erts_smp_atomic_set_nob(&tb->segtab, (erts_aint_t) segtab); + erts_atomic_set_nob(&tb->segtab, (erts_aint_t) segtab); } /* Used by select_replace on analyze_pattern */ @@ -343,7 +343,7 @@ typedef int (*extra_match_validator_t)(int keypos, Eterm match, Eterm guard, Ete static struct ext_segtab* alloc_ext_segtab(DbTableHash* tb, unsigned seg_ix); static void alloc_seg(DbTableHash *tb); static int free_seg(DbTableHash *tb, int free_records); -static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_smp_rwmtx_t** lck_ptr, +static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_rwmtx_t** lck_ptr, HashDbTerm *list); static HashDbTerm* search_list(DbTableHash* tb, Eterm key, HashValue hval, HashDbTerm *list); @@ -541,7 +541,7 @@ static void restore_fixdel(DbTableHash* tb, FixedDeletion* fixdel) { /*int tries = 0;*/ DEBUG_WAIT(); - if (erts_smp_atomic_cmpxchg_relb(&tb->fixdel, + if (erts_atomic_cmpxchg_relb(&tb->fixdel, (erts_aint_t) fixdel, (erts_aint_t) NULL) != (erts_aint_t) NULL) { /* Oboy, must join lists */ @@ -550,13 +550,13 @@ static void restore_fixdel(DbTableHash* tb, FixedDeletion* fixdel) erts_aint_t exp_tail; while (last->next != NULL) last = last->next; - was_tail = erts_smp_atomic_read_acqb(&tb->fixdel); + was_tail = erts_atomic_read_acqb(&tb->fixdel); do { /* Lockless atomic list insertion */ exp_tail = was_tail; last->next = (FixedDeletion*) exp_tail; /*++tries;*/ DEBUG_WAIT(); - was_tail = erts_smp_atomic_cmpxchg_relb(&tb->fixdel, + was_tail = erts_atomic_cmpxchg_relb(&tb->fixdel, (erts_aint_t) fixdel, exp_tail); }while (was_tail != exp_tail); @@ -572,18 +572,18 @@ SWord db_unfix_table_hash(DbTableHash *tb) FixedDeletion* fixdel; SWord work = 0; - ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rwlocked(&tb->common.rwlock) - || (erts_smp_lc_rwmtx_is_rlocked(&tb->common.rwlock) + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock) + || (erts_lc_rwmtx_is_rlocked(&tb->common.rwlock) && !tb->common.is_thread_safe)); restart: - fixdel = (FixedDeletion*) erts_smp_atomic_xchg_mb(&tb->fixdel, + fixdel = (FixedDeletion*) erts_atomic_xchg_mb(&tb->fixdel, (erts_aint_t) NULL); while (fixdel != NULL) { FixedDeletion *fx = fixdel; int ix = fx->slot; HashDbTerm **bp; HashDbTerm *b; - erts_smp_rwmtx_t* lck = WLOCK_HASH(tb,ix); + erts_rwmtx_t* lck = WLOCK_HASH(tb,ix); if (IS_FIXED(tb)) { /* interrupted by fixer */ WUNLOCK_HASH(lck); @@ -629,10 +629,10 @@ int db_create_hash(Process *p, DbTable *tbl) { DbTableHash *tb = &tbl->hash; - erts_smp_atomic_init_nob(&tb->szm, FIRST_SEGSZ_MASK); - erts_smp_atomic_init_nob(&tb->nactive, FIRST_SEGSZ); - erts_smp_atomic_init_nob(&tb->fixdel, (erts_aint_t)NULL); - erts_smp_atomic_init_nob(&tb->segtab, (erts_aint_t)NULL); + erts_atomic_init_nob(&tb->szm, FIRST_SEGSZ_MASK); + erts_atomic_init_nob(&tb->nactive, FIRST_SEGSZ); + erts_atomic_init_nob(&tb->fixdel, (erts_aint_t)NULL); + erts_atomic_init_nob(&tb->segtab, (erts_aint_t)NULL); SET_SEGTAB(tb, tb->first_segtab); tb->nsegs = NSEG_1; tb->nslots = FIRST_SEGSZ; @@ -641,25 +641,25 @@ int db_create_hash(Process *p, DbTable *tbl) SIZEOF_SEGMENT(FIRST_SEGSZ)); sys_memset(tb->first_segtab[0], 0, SIZEOF_SEGMENT(FIRST_SEGSZ)); - erts_smp_atomic_init_nob(&tb->is_resizing, 0); + erts_atomic_init_nob(&tb->is_resizing, 0); if (tb->common.type & DB_FINE_LOCKED) { - erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; + erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER; int i; if (tb->common.type & DB_FREQ_READ) - rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; + rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ; if (erts_ets_rwmtx_spin_count >= 0) rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count; tb->locks = (DbTableHashFineLocks*) erts_db_alloc_fnf(ERTS_ALC_T_DB_SEG, /* Other type maybe? */ (DbTable *) tb, sizeof(DbTableHashFineLocks)); for (i=0; i<DB_HASH_LOCK_CNT; ++i) { - erts_smp_rwmtx_init_opt(&tb->locks->lck_vec[i].lck, &rwmtx_opt, + erts_rwmtx_init_opt(&tb->locks->lck_vec[i].lck, &rwmtx_opt, "db_hash_slot", tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB); } /* This important property is needed to guarantee the two buckets * involved in a grow/shrink operation it protected by the same lock: */ - ASSERT(erts_smp_atomic_read_nob(&tb->nactive) % DB_HASH_LOCK_CNT == 0); + ASSERT(erts_atomic_read_nob(&tb->nactive) % DB_HASH_LOCK_CNT == 0); } else { /* coarse locking */ tb->locks = NULL; @@ -672,7 +672,7 @@ static int db_first_hash(Process *p, DbTable *tbl, Eterm *ret) { DbTableHash *tb = &tbl->hash; Uint ix = 0; - erts_smp_rwmtx_t* lck = RLOCK_HASH(tb,ix); + erts_rwmtx_t* lck = RLOCK_HASH(tb,ix); HashDbTerm* list; for (;;) { @@ -705,7 +705,7 @@ static int db_next_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret) HashValue hval; Uint ix; HashDbTerm* b; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; hval = MAKE_HASH(key); lck = RLOCK_HASH(tb,hval); @@ -752,7 +752,7 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail) HashDbTerm** bp; HashDbTerm* b; HashDbTerm* q; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; int nitems; int ret = DB_ERROR_NONE; @@ -778,7 +778,7 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail) if (tb->common.status & DB_SET) { HashDbTerm* bnext = b->next; if (b->hvalue == INVALID_HASH) { - erts_smp_atomic_inc_nob(&tb->common.nitems); + erts_atomic_inc_nob(&tb->common.nitems); } else if (key_clash_fail) { ret = DB_ERROR_BADKEY; @@ -806,7 +806,7 @@ int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail) do { if (db_eq(&tb->common,obj,&q->dbterm)) { if (q->hvalue == INVALID_HASH) { - erts_smp_atomic_inc_nob(&tb->common.nitems); + erts_atomic_inc_nob(&tb->common.nitems); q->hvalue = hval; if (q != b) { /* must move to preserve key insertion order */ *qp = q->next; @@ -827,7 +827,7 @@ Lnew: q->hvalue = hval; q->next = b; *bp = q; - nitems = erts_smp_atomic_inc_read_nob(&tb->common.nitems); + nitems = erts_atomic_inc_read_nob(&tb->common.nitems); WUNLOCK_HASH(lck); { int nactive = NACTIVE(tb); @@ -871,7 +871,7 @@ int db_get_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret) HashValue hval; int ix; HashDbTerm* b; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; hval = MAKE_HASH(key); lck = RLOCK_HASH(tb,hval); @@ -897,7 +897,7 @@ static int db_member_hash(DbTable *tbl, Eterm key, Eterm *ret) HashValue hval; int ix; HashDbTerm* b1; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; hval = MAKE_HASH(key); ix = hash_to_ix(tb, hval); @@ -926,7 +926,7 @@ static int db_get_element_hash(Process *p, DbTable *tbl, HashValue hval; int ix; HashDbTerm* b1; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; int retval; hval = MAKE_HASH(key); @@ -991,7 +991,7 @@ int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret) int ix; HashDbTerm** bp; HashDbTerm* b; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; int nitems_diff = 0; hval = MAKE_HASH(key); @@ -1023,7 +1023,7 @@ int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret) } WUNLOCK_HASH(lck); if (nitems_diff) { - erts_smp_atomic_add_nob(&tb->common.nitems, nitems_diff); + erts_atomic_add_nob(&tb->common.nitems, nitems_diff); try_shrink(tb); } *ret = am_true; @@ -1040,7 +1040,7 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret) int ix; HashDbTerm** bp; HashDbTerm* b; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; int nitems_diff = 0; int nkeys = 0; Eterm key; @@ -1081,7 +1081,7 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret) } WUNLOCK_HASH(lck); if (nitems_diff) { - erts_smp_atomic_add_nob(&tb->common.nitems, nitems_diff); + erts_atomic_add_nob(&tb->common.nitems, nitems_diff); try_shrink(tb); } *ret = am_true; @@ -1092,7 +1092,7 @@ static int db_erase_object_hash(DbTable *tbl, Eterm object, Eterm *ret) static int db_slot_hash(Process *p, DbTable *tbl, Eterm slot_term, Eterm *ret) { DbTableHash *tb = &tbl->hash; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; Sint slot; int retval; int nactive; @@ -1207,13 +1207,13 @@ static int match_traverse(Process* p, DbTableHash* tb, unsigned current_list_pos = 0; /* Prefound buckets list index */ Eterm match_res; Sint got = 0; /* Matched terms counter */ - erts_smp_rwmtx_t* lck; /* Slot lock */ + erts_rwmtx_t* lck; /* Slot lock */ int ret_value; - erts_smp_rwmtx_t* (*lock_hash_function)(DbTableHash*, HashValue) + erts_rwmtx_t* (*lock_hash_function)(DbTableHash*, HashValue) = (lock_for_write ? WLOCK_HASH : RLOCK_HASH); - void (*unlock_hash_function)(erts_smp_rwmtx_t*) + void (*unlock_hash_function)(erts_rwmtx_t*) = (lock_for_write ? WUNLOCK_HASH : RUNLOCK_HASH); - Sint (*next_slot_function)(DbTableHash*, Uint, erts_smp_rwmtx_t**) + Sint (*next_slot_function)(DbTableHash*, Uint, erts_rwmtx_t**) = (lock_for_write ? next_slot_w : next_slot); if ((ret_value = analyze_pattern(tb, pattern, extra_match_validator, &mpi)) @@ -1357,13 +1357,13 @@ static int match_traverse_continue(Process* p, DbTableHash* tb, */ HashDbTerm* saved_current; /* Helper to avoid double skip on match */ Eterm match_res; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; int ret_value; - erts_smp_rwmtx_t* (*lock_hash_function)(DbTableHash*, HashValue) + erts_rwmtx_t* (*lock_hash_function)(DbTableHash*, HashValue) = (lock_for_write ? WLOCK_HASH : RLOCK_HASH); - void (*unlock_hash_function)(erts_smp_rwmtx_t*) + void (*unlock_hash_function)(erts_rwmtx_t*) = (lock_for_write ? WUNLOCK_HASH : RUNLOCK_HASH); - Sint (*next_slot_function)(DbTableHash* tb, Uint ix, erts_smp_rwmtx_t** lck_ptr) + Sint (*next_slot_function)(DbTableHash* tb, Uint ix, erts_rwmtx_t** lck_ptr) = (lock_for_write ? next_slot_w : next_slot); if (got < 0) { @@ -1970,7 +1970,7 @@ static int mtraversal_select_delete_on_match_res(void* context_ptr, Sint slot_ix *current_ptr = (*current_ptr)->next; // replace pointer to term using next free_term(sd_context_ptr->tb, del); } - erts_smp_atomic_dec_nob(&sd_context_ptr->tb->common.nitems); + erts_atomic_dec_nob(&sd_context_ptr->tb->common.nitems); return 1; } @@ -2209,7 +2209,7 @@ static int db_take_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret) DbTableHash *tb = &tbl->hash; HashDbTerm **bp, *b; HashValue hval = MAKE_HASH(key); - erts_smp_rwmtx_t *lck = WLOCK_HASH(tb, hval); + erts_rwmtx_t *lck = WLOCK_HASH(tb, hval); int ix = hash_to_ix(tb, hval); int nitems_diff = 0; @@ -2238,7 +2238,7 @@ static int db_take_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret) } WUNLOCK_HASH(lck); if (nitems_diff) { - erts_smp_atomic_add_nob(&tb->common.nitems, nitems_diff); + erts_atomic_add_nob(&tb->common.nitems, nitems_diff); try_shrink(tb); } return DB_ERROR_NONE; @@ -2260,7 +2260,7 @@ int db_mark_all_deleted_hash(DbTable *tbl) HashDbTerm* list; int i; - ERTS_SMP_LC_ASSERT(IS_TAB_WLOCKED(tb)); + ERTS_LC_ASSERT(IS_TAB_WLOCKED(tb)); for (i = 0; i < NACTIVE(tb); i++) { if ((list = BUCKET(tb,i)) != NULL) { @@ -2271,7 +2271,7 @@ int db_mark_all_deleted_hash(DbTable *tbl) }while(list != NULL); } } - erts_smp_atomic_set_nob(&tb->common.nitems, 0); + erts_atomic_set_nob(&tb->common.nitems, 0); return DB_ERROR_NONE; } @@ -2345,8 +2345,8 @@ static int db_free_table_hash(DbTable *tbl) static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds) { DbTableHash *tb = &tbl->hash; - FixedDeletion* fixdel = (FixedDeletion*) erts_smp_atomic_read_acqb(&tb->fixdel); - ERTS_SMP_LC_ASSERT(IS_TAB_WLOCKED(tb) || (tb->common.status & DB_DELETE)); + FixedDeletion* fixdel = (FixedDeletion*) erts_atomic_read_acqb(&tb->fixdel); + ERTS_LC_ASSERT(IS_TAB_WLOCKED(tb) || (tb->common.status & DB_DELETE)); while (fixdel != NULL) { FixedDeletion *fx = fixdel; @@ -2358,11 +2358,11 @@ static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds) sizeof(FixedDeletion)); ERTS_ETS_MISC_MEM_ADD(-sizeof(FixedDeletion)); if (--reds < 0) { - erts_smp_atomic_set_relb(&tb->fixdel, (erts_aint_t)fixdel); + erts_atomic_set_relb(&tb->fixdel, (erts_aint_t)fixdel); return reds; /* Not done */ } } - erts_smp_atomic_set_relb(&tb->fixdel, (erts_aint_t)NULL); + erts_atomic_set_relb(&tb->fixdel, (erts_aint_t)NULL); while(tb->nslots != 0) { reds -= EXT_SEGSZ/64 + free_seg(tb, 1); @@ -2383,7 +2383,7 @@ static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds) (void*)tb->locks, sizeof(DbTableHashFineLocks)); tb->locks = NULL; } - ASSERT(erts_smp_atomic_read_nob(&tb->common.memory_size) == sizeof(DbTable)); + ASSERT(erts_atomic_read_nob(&tb->common.memory_size) == sizeof(DbTable)); return reds; /* Done */ } @@ -2482,7 +2482,7 @@ static int analyze_pattern(DbTableHash *tb, Eterm pattern, if (!db_has_variable(key)) { /* Bound key */ int ix, search_slot; HashDbTerm** bp; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; hval = MAKE_HASH(key); lck = RLOCK_HASH(tb,hval); ix = hash_to_ix(tb, hval); @@ -2729,7 +2729,7 @@ static void grow(DbTableHash* tb, int nitems) HashDbTerm** pnext; HashDbTerm** to_pnext; HashDbTerm* p; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; int nactive; int from_ix, to_ix; int szm; @@ -2751,7 +2751,7 @@ static void grow(DbTableHash* tb, int nitems) } ASSERT(nactive < tb->nslots); - szm = erts_smp_atomic_read_nob(&tb->szm); + szm = erts_atomic_read_nob(&tb->szm); if (nactive <= szm) { from_ix = nactive & (szm >> 1); } else { @@ -2762,7 +2762,7 @@ static void grow(DbTableHash* tb, int nitems) to_ix = nactive; lck = WLOCK_HASH(tb, from_ix); - ERTS_SMP_ASSERT(lck == GET_LOCK_MAYBE(tb,to_ix)); + ERTS_ASSERT(lck == GET_LOCK_MAYBE(tb,to_ix)); /* Now a final double check (with the from_ix lock held) * that we did not get raced by a table fixer. */ @@ -2770,12 +2770,12 @@ static void grow(DbTableHash* tb, int nitems) WUNLOCK_HASH(lck); goto abort; } - erts_smp_atomic_set_nob(&tb->nactive, ++nactive); + erts_atomic_set_nob(&tb->nactive, ++nactive); if (from_ix == 0) { if (DB_USING_FINE_LOCKING(tb)) - erts_smp_atomic_set_relb(&tb->szm, szm); + erts_atomic_set_relb(&tb->szm, szm); else - erts_smp_atomic_set_nob(&tb->szm, szm); + erts_atomic_set_nob(&tb->szm, szm); } done_resizing(tb); @@ -2823,7 +2823,7 @@ static void shrink(DbTableHash* tb, int nitems) HashDbTerm** src_bp; HashDbTerm** dst_bp; HashDbTerm** bp; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; int src_ix, dst_ix, low_szm; int nactive; int loop_limit = 5; @@ -2836,13 +2836,13 @@ static void shrink(DbTableHash* tb, int nitems) goto abort; /* already done (race) */ } src_ix = nactive - 1; - low_szm = erts_smp_atomic_read_nob(&tb->szm) >> 1; + low_szm = erts_atomic_read_nob(&tb->szm) >> 1; dst_ix = src_ix & low_szm; ASSERT(dst_ix < src_ix); ASSERT(nactive > FIRST_SEGSZ); lck = WLOCK_HASH(tb, dst_ix); - ERTS_SMP_ASSERT(lck == GET_LOCK_MAYBE(tb,src_ix)); + ERTS_ASSERT(lck == GET_LOCK_MAYBE(tb,src_ix)); /* Double check for racing table fixers */ if (IS_FIXED(tb)) { WUNLOCK_HASH(lck); @@ -2871,9 +2871,9 @@ static void shrink(DbTableHash* tb, int nitems) *src_bp = NULL; nactive = src_ix; - erts_smp_atomic_set_nob(&tb->nactive, nactive); + erts_atomic_set_nob(&tb->nactive, nactive); if (dst_ix == 0) { - erts_smp_atomic_set_relb(&tb->szm, low_szm); + erts_atomic_set_relb(&tb->szm, low_szm); } WUNLOCK_HASH(lck); @@ -2908,12 +2908,12 @@ static HashDbTerm* search_list(DbTableHash* tb, Eterm key, /* It return the next live object in a table, NULL if no more */ /* In-bucket: RLOCKED */ /* Out-bucket: RLOCKED unless NULL */ -static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_smp_rwmtx_t** lck_ptr, +static HashDbTerm* next(DbTableHash *tb, Uint *iptr, erts_rwmtx_t** lck_ptr, HashDbTerm *list) { int i; - ERTS_SMP_LC_ASSERT(IS_HASH_RLOCKED(tb,*iptr)); + ERTS_LC_ASSERT(IS_HASH_RLOCKED(tb,*iptr)); for (list = list->next; list != NULL; list = list->next) { if (list->hvalue != INVALID_HASH) @@ -2943,7 +2943,7 @@ db_lookup_dbterm_hash(Process *p, DbTable *tbl, Eterm key, Eterm obj, DbTableHash *tb = &tbl->hash; HashValue hval; HashDbTerm **bp, *b; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; int flags = 0; ASSERT(tb->common.status & DB_SET); @@ -2999,7 +2999,7 @@ db_lookup_dbterm_hash(Process *p, DbTable *tbl, Eterm key, Eterm obj, q->next = next; q->hvalue = hval; *bp = b = q; - erts_smp_atomic_inc_nob(&tb->common.nitems); + erts_atomic_inc_nob(&tb->common.nitems); } HRelease(p, hend, htop); @@ -3025,10 +3025,10 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle) DbTableHash *tb = &tbl->hash; HashDbTerm **bp = (HashDbTerm **) handle->bp; HashDbTerm *b = *bp; - erts_smp_rwmtx_t* lck = (erts_smp_rwmtx_t*) handle->lck; + erts_rwmtx_t* lck = (erts_rwmtx_t*) handle->lck; HashDbTerm* free_me = NULL; - ERTS_SMP_LC_ASSERT(IS_HASH_WLOCKED(tb, lck)); /* locked by db_lookup_dbterm_hash */ + ERTS_LC_ASSERT(IS_HASH_WLOCKED(tb, lck)); /* locked by db_lookup_dbterm_hash */ ASSERT((&b->dbterm == handle->dbterm) == !(tb->common.compress && handle->flags & DB_MUST_RESIZE)); @@ -3042,7 +3042,7 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle) } WUNLOCK_HASH(lck); - erts_smp_atomic_dec_nob(&tb->common.nitems); + erts_atomic_dec_nob(&tb->common.nitems); try_shrink(tb); } else { if (handle->flags & DB_MUST_RESIZE) { @@ -3051,7 +3051,7 @@ db_finalize_dbterm_hash(int cret, DbUpdateHandle* handle) } if (handle->flags & DB_INC_TRY_GROW) { int nactive; - int nitems = erts_smp_atomic_inc_read_nob(&tb->common.nitems); + int nitems = erts_atomic_inc_read_nob(&tb->common.nitems); WUNLOCK_HASH(lck); nactive = NACTIVE(tb); @@ -3079,7 +3079,7 @@ static int db_delete_all_objects_hash(Process* p, DbTable* tbl) } else { db_free_table_hash(tbl); db_create_hash(p, tbl); - erts_smp_atomic_set_nob(&tbl->hash.common.nitems, 0); + erts_atomic_set_nob(&tbl->hash.common.nitems, 0); } return 0; } @@ -3109,7 +3109,7 @@ void db_foreach_offheap_hash(DbTable *tbl, void db_calc_stats_hash(DbTableHash* tb, DbHashStats* stats) { HashDbTerm* b; - erts_smp_rwmtx_t* lck; + erts_rwmtx_t* lck; int sum = 0; int sq_sum = 0; int kept_items = 0; diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h index 319d563859..7d27609825 100644 --- a/erts/emulator/beam/erl_db_hash.h +++ b/erts/emulator/beam/erl_db_hash.h @@ -42,8 +42,8 @@ typedef struct hash_db_term { typedef struct db_table_hash_fine_locks { union { - erts_smp_rwmtx_t lck; - byte _cache_line_alignment[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_smp_rwmtx_t))]; + erts_rwmtx_t lck; + byte _cache_line_alignment[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(erts_rwmtx_t))]; }lck_vec[DB_HASH_LOCK_CNT]; } DbTableHashFineLocks; @@ -51,10 +51,10 @@ typedef struct db_table_hash { DbTableCommon common; /* SMP: szm and nactive are write-protected by is_resizing or table write lock */ - erts_smp_atomic_t szm; /* current size mask. */ - erts_smp_atomic_t nactive; /* Number of "active" slots */ + erts_atomic_t szm; /* current size mask. */ + erts_atomic_t nactive; /* Number of "active" slots */ - erts_smp_atomic_t segtab; /* The segment table (struct segment**) */ + erts_atomic_t segtab; /* The segment table (struct segment**) */ struct segment* first_segtab[1]; /* SMP: nslots and nsegs are protected by is_resizing or table write lock */ @@ -62,8 +62,8 @@ typedef struct db_table_hash { int nsegs; /* Size of segment table */ /* List of slots where elements have been deleted while table was fixed */ - erts_smp_atomic_t fixdel; /* (FixedDeletion*) */ - erts_smp_atomic_t is_resizing; /* grow/shrink in progress */ + erts_atomic_t fixdel; /* (FixedDeletion*) */ + erts_atomic_t is_resizing; /* grow/shrink in progress */ DbTableHashFineLocks* locks; } DbTableHash; diff --git a/erts/emulator/beam/erl_db_tree.c b/erts/emulator/beam/erl_db_tree.c index d7deadacf0..038f6602bf 100644 --- a/erts/emulator/beam/erl_db_tree.c +++ b/erts/emulator/beam/erl_db_tree.c @@ -50,7 +50,7 @@ #include "erl_db_tree.h" #define GETKEY_WITH_POS(Keypos, Tplp) (*((Tplp) + Keypos)) -#define NITEMS(tb) ((int)erts_smp_atomic_read_nob(&(tb)->common.nitems)) +#define NITEMS(tb) ((int)erts_atomic_read_nob(&(tb)->common.nitems)) /* ** A stack of this size is enough for an AVL tree with more than @@ -94,7 +94,7 @@ */ static DbTreeStack* get_static_stack(DbTableTree* tb) { - if (!erts_smp_atomic_xchg_acqb(&tb->is_stack_busy, 1)) { + if (!erts_atomic_xchg_acqb(&tb->is_stack_busy, 1)) { return &tb->static_stack; } return NULL; @@ -106,7 +106,7 @@ static DbTreeStack* get_static_stack(DbTableTree* tb) static DbTreeStack* get_any_stack(DbTableTree* tb) { DbTreeStack* stack; - if (!erts_smp_atomic_xchg_acqb(&tb->is_stack_busy, 1)) { + if (!erts_atomic_xchg_acqb(&tb->is_stack_busy, 1)) { return &tb->static_stack; } stack = erts_db_alloc(ERTS_ALC_T_DB_STK, (DbTable *) tb, @@ -120,8 +120,8 @@ static DbTreeStack* get_any_stack(DbTableTree* tb) static void release_stack(DbTableTree* tb, DbTreeStack* stack) { if (stack == &tb->static_stack) { - ASSERT(erts_smp_atomic_read_nob(&tb->is_stack_busy) == 1); - erts_smp_atomic_set_relb(&tb->is_stack_busy, 0); + ASSERT(erts_atomic_read_nob(&tb->is_stack_busy) == 1); + erts_atomic_set_relb(&tb->is_stack_busy, 0); } else { erts_db_free(ERTS_ALC_T_DB_STK, (DbTable *) tb, @@ -517,7 +517,7 @@ int db_create_tree(Process *p, DbTable *tbl) sizeof(TreeDbTerm *) * STACK_NEED); tb->static_stack.pos = 0; tb->static_stack.slot = 0; - erts_smp_atomic_init_nob(&tb->is_stack_busy, 0); + erts_atomic_init_nob(&tb->is_stack_busy, 0); tb->deletion = 0; return DB_ERROR_NONE; } @@ -646,8 +646,8 @@ static int db_put_tree(DbTable *tbl, Eterm obj, int key_clash_fail) for (;;) if (!*this) { /* Found our place */ state = 1; - if (erts_smp_atomic_inc_read_nob(&tb->common.nitems) >= TREE_MAX_ELEMENTS) { - erts_smp_atomic_dec_nob(&tb->common.nitems); + if (erts_atomic_inc_read_nob(&tb->common.nitems) >= TREE_MAX_ELEMENTS) { + erts_atomic_dec_nob(&tb->common.nitems); return DB_ERROR_SYSRES; } *this = new_dbterm(tb, obj); @@ -1608,7 +1608,7 @@ static int db_select_delete_continue_tree(Process *p, sc.max = 1000; sc.keypos = tb->common.keypos; - ASSERT(!erts_smp_atomic_read_nob(&tb->is_stack_busy)); + ASSERT(!erts_atomic_read_nob(&tb->is_stack_busy)); traverse_backwards(tb, &tb->static_stack, lastkey, &doit_select_delete, &sc); BUMP_REDS(p, 1000 - sc.max); @@ -2020,7 +2020,7 @@ static SWord db_free_table_continue_tree(DbTable *tbl, SWord reds) (DbTable *) tb, (void *) tb->static_stack.array, sizeof(TreeDbTerm *) * STACK_NEED); - ASSERT(erts_smp_atomic_read_nob(&tb->common.memory_size) + ASSERT(erts_atomic_read_nob(&tb->common.memory_size) == sizeof(DbTable)); } return reds; @@ -2030,7 +2030,7 @@ static int db_delete_all_objects_tree(Process* p, DbTable* tbl) { db_free_table_tree(tbl); db_create_tree(p, tbl); - erts_smp_atomic_set_nob(&tbl->tree.common.nitems, 0); + erts_atomic_set_nob(&tbl->tree.common.nitems, 0); return 0; } @@ -2110,7 +2110,7 @@ static TreeDbTerm *linkout_tree(DbTableTree *tb, Eterm key) { tstack[tpos++] = this; state = delsub(this); } - erts_smp_atomic_dec_nob(&tb->common.nitems); + erts_atomic_dec_nob(&tb->common.nitems); break; } } @@ -2177,7 +2177,7 @@ static TreeDbTerm *linkout_object_tree(DbTableTree *tb, tstack[tpos++] = this; state = delsub(this); } - erts_smp_atomic_dec_nob(&tb->common.nitems); + erts_atomic_dec_nob(&tb->common.nitems); break; } } diff --git a/erts/emulator/beam/erl_db_tree.h b/erts/emulator/beam/erl_db_tree.h index 72749ead1e..dc1b93d410 100644 --- a/erts/emulator/beam/erl_db_tree.h +++ b/erts/emulator/beam/erl_db_tree.h @@ -41,7 +41,7 @@ typedef struct db_table_tree { /* Tree-specific fields */ TreeDbTerm *root; /* The tree root */ Uint deletion; /* Being deleted */ - erts_smp_atomic_t is_stack_busy; + erts_atomic_t is_stack_busy; DbTreeStack static_stack; } DbTableTree; diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c index 7310694e63..e017b9552b 100644 --- a/erts/emulator/beam/erl_db_util.c +++ b/erts/emulator/beam/erl_db_util.c @@ -170,7 +170,7 @@ static Eterm set_match_trace(Process *tracee_p, Eterm fail_term, ErtsTracer tracer, Uint d_flags, Uint e_flags) { - ERTS_SMP_LC_ASSERT( + ERTS_LC_ASSERT( ERTS_PROC_LOCKS_ALL == erts_proc_lc_my_proc_locks(tracee_p) || erts_thr_progress_is_blocking()); @@ -361,7 +361,7 @@ typedef struct { } ErtsMatchPseudoProcess; -static erts_smp_tsd_key_t match_pseudo_process_key; +static erts_tsd_key_t match_pseudo_process_key; static ERTS_INLINE void cleanup_match_pseudo_process(ErtsMatchPseudoProcess *mpsp, int keep_heap) @@ -415,21 +415,21 @@ get_match_pseudo_process(Process *c_p, Uint heap_size) esdp = c_p ? c_p->scheduler_data : erts_get_scheduler_data(); mpsp = esdp ? esdp->match_pseudo_process : - (ErtsMatchPseudoProcess*) erts_smp_tsd_get(match_pseudo_process_key); + (ErtsMatchPseudoProcess*) erts_tsd_get(match_pseudo_process_key); if (mpsp) { - ASSERT(mpsp == erts_smp_tsd_get(match_pseudo_process_key)); + ASSERT(mpsp == erts_tsd_get(match_pseudo_process_key)); ASSERT(mpsp->process.scheduler_data == esdp); cleanup_match_pseudo_process(mpsp, 0); } else { - ASSERT(erts_smp_tsd_get(match_pseudo_process_key) == NULL); + ASSERT(erts_tsd_get(match_pseudo_process_key) == NULL); mpsp = create_match_pseudo_process(); if (esdp) { esdp->match_pseudo_process = (void *) mpsp; } mpsp->process.scheduler_data = esdp; - erts_smp_tsd_set(match_pseudo_process_key, (void *) mpsp); + erts_tsd_set(match_pseudo_process_key, (void *) mpsp); } if (heap_size > ERTS_DEFAULT_MS_HEAP_SIZE*sizeof(Eterm)) { mpsp->u.heap = (Eterm*) erts_alloc(ERTS_ALC_T_DB_MS_RUN_HEAP, heap_size); @@ -444,11 +444,11 @@ static void destroy_match_pseudo_process(void) { ErtsMatchPseudoProcess *mpsp; - mpsp = (ErtsMatchPseudoProcess *)erts_smp_tsd_get(match_pseudo_process_key); + mpsp = (ErtsMatchPseudoProcess *)erts_tsd_get(match_pseudo_process_key); if (mpsp) { cleanup_match_pseudo_process(mpsp, 0); erts_free(ERTS_ALC_T_DB_MS_PSDO_PROC, (void *) mpsp); - erts_smp_tsd_set(match_pseudo_process_key, (void *) NULL); + erts_tsd_set(match_pseudo_process_key, (void *) NULL); } } @@ -456,9 +456,9 @@ static void match_pseudo_process_init(void) { - erts_smp_tsd_key_create(&match_pseudo_process_key, + erts_tsd_key_create(&match_pseudo_process_key, "erts_match_pseudo_process_key"); - erts_smp_install_exit_handler(destroy_match_pseudo_process); + erts_thr_install_exit_handler(destroy_match_pseudo_process); } void @@ -469,7 +469,7 @@ erts_match_set_release_result(Process* c_p) /* The trace control word. */ -static erts_smp_atomic32_t trace_control_word; +static erts_atomic32_t trace_control_word; /* This needs to be here, before the bif table... */ @@ -908,7 +908,7 @@ static void db_free_tmp_uncompressed(DbTerm* obj); */ BIF_RETTYPE db_get_trace_control_word(Process *p) { - Uint32 tcw = (Uint32) erts_smp_atomic32_read_acqb(&trace_control_word); + Uint32 tcw = (Uint32) erts_atomic32_read_acqb(&trace_control_word); BIF_RET(erts_make_integer((Uint) tcw, p)); } @@ -926,7 +926,7 @@ BIF_RETTYPE db_set_trace_control_word(Process *p, Eterm new) if (val != ((Uint32)val)) BIF_ERROR(p, BADARG); - old_tcw = (Uint32) erts_smp_atomic32_xchg_relb(&trace_control_word, + old_tcw = (Uint32) erts_atomic32_xchg_relb(&trace_control_word, (erts_aint32_t) val); BIF_RET(erts_make_integer((Uint) old_tcw, p)); } @@ -1451,7 +1451,7 @@ void db_initialize_util(void){ sizeof(DMCGuardBif), (int (*)(const void *, const void *)) &cmp_guard_bif); match_pseudo_process_init(); - erts_smp_atomic32_init_nob(&trace_control_word, 0); + erts_atomic32_init_nob(&trace_control_word, 0); } @@ -2513,9 +2513,9 @@ restart: case matchEnableTrace: ASSERT(c_p == self); if ( (n = erts_trace_flag2bit(esp[-1]))) { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); set_tracee_flags(c_p, ERTS_TRACER(c_p), 0, n); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); esp[-1] = am_true; } else { esp[-1] = FAIL_TERM; @@ -2530,9 +2530,9 @@ restart: /* Always take over the tracer of the current process */ set_tracee_flags(tmpp, ERTS_TRACER(c_p), 0, n); if (tmpp == c_p) - erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL_MINOR); else - erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL); + erts_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL); esp[-1] = am_true; } } @@ -2540,9 +2540,9 @@ restart: case matchDisableTrace: ASSERT(c_p == self); if ( (n = erts_trace_flag2bit(esp[-1]))) { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); set_tracee_flags(c_p, ERTS_TRACER(c_p), n, 0); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); esp[-1] = am_true; } else { esp[-1] = FAIL_TERM; @@ -2557,9 +2557,9 @@ restart: /* Always take over the tracer of the current process */ set_tracee_flags(tmpp, ERTS_TRACER(c_p), n, 0); if (tmpp == c_p) - erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL_MINOR); else - erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL); + erts_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL); esp[-1] = am_true; } } @@ -2583,14 +2583,14 @@ restart: if (in_flags & ERTS_PAM_IGNORE_TRACE_SILENT) break; if (*esp == am_true) { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); ERTS_TRACE_FLAGS(c_p) |= F_TRACE_SILENT; - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); } else if (*esp == am_false) { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); ERTS_TRACE_FLAGS(c_p) &= ~F_TRACE_SILENT; - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); } break; case matchTrace2: @@ -2619,10 +2619,10 @@ restart: ERTS_TRACER_CLEAR(&tracer); break; } - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); (--esp)[-1] = set_match_trace(c_p, FAIL_TERM, tracer, d_flags, e_flags); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); ERTS_TRACER_CLEAR(&tracer); } break; @@ -2652,13 +2652,13 @@ restart: if (tmpp == c_p) { (--esp)[-1] = set_match_trace(c_p, FAIL_TERM, tracer, d_flags, e_flags); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); } else { - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); (--esp)[-1] = set_match_trace(tmpp, FAIL_TERM, tracer, d_flags, e_flags); - erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } ERTS_TRACER_CLEAR(&tracer); } @@ -3262,7 +3262,7 @@ void db_cleanup_offheap_comp(DbTerm* obj) break; case FUN_SUBTAG: ASSERT(u.pb != &tmp); - if (erts_smp_refc_dectest(&u.fun->fe->refc, 0) == 0) { + if (erts_refc_dectest(&u.fun->fe->refc, 0) == 0) { erts_erase_fun_entry(u.fun->fe); } break; diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h index fb9db3b010..1c99b661e4 100644 --- a/erts/emulator/beam/erl_db_util.h +++ b/erts/emulator/beam/erl_db_util.h @@ -237,12 +237,12 @@ typedef struct { */ typedef struct db_table_common { - erts_smp_refc_t refc; /* reference count of table struct */ - erts_smp_refc_t fix_count;/* fixation counter */ + erts_refc_t refc; /* reference count of table struct */ + erts_refc_t fix_count;/* fixation counter */ DbTableList all; DbTableList owned; - erts_smp_rwmtx_t rwlock; /* rw lock on table */ - erts_smp_mtx_t fixlock; /* Protects fixing_procs and time */ + erts_rwmtx_t rwlock; /* rw lock on table */ + erts_mtx_t fixlock; /* Protects fixing_procs and time */ int is_thread_safe; /* No fine locking inside table needed */ Uint32 type; /* table type, *read only* after creation */ Eterm owner; /* Pid of the creator */ @@ -252,8 +252,8 @@ typedef struct db_table_common { Eterm the_name; /* an atom */ Binary *btid; DbTableMethod* meth; /* table methods */ - erts_smp_atomic_t nitems; /* Total number of items in table */ - erts_smp_atomic_t memory_size;/* Total memory size. NOTE: in bytes! */ + erts_atomic_t nitems; /* Total number of items in table */ + erts_atomic_t memory_size;/* Total memory size. NOTE: in bytes! */ struct { /* Last fixation time */ ErtsMonotonicTime monotonic; ErtsMonotonicTime offset; @@ -286,7 +286,7 @@ typedef struct db_table_common { (DB_BAG | DB_SET | DB_DUPLICATE_BAG))) #define IS_TREE_TABLE(Status) (!!((Status) & \ DB_ORDERED_SET)) -#define NFIXED(T) (erts_smp_refc_read(&(T)->common.fix_count,0)) +#define NFIXED(T) (erts_refc_read(&(T)->common.fix_count,0)) #define IS_FIXED(T) (NFIXED(T) != 0) /* diff --git a/erts/emulator/beam/erl_drv_thread.c b/erts/emulator/beam/erl_drv_thread.c index 7b19724814..ff9f2c0d22 100644 --- a/erts/emulator/beam/erl_drv_thread.c +++ b/erts/emulator/beam/erl_drv_thread.c @@ -141,7 +141,7 @@ void erl_drv_thr_init(void) /* * These functions implement the driver thread interface in erl_driver.h. * NOTE: Only use this interface from drivers. From within the emulator use - * either the erl_threads.h, the erl_smp.h or the ethread.h interface. + * either the erl_threads.h or the ethread.h interface. */ ErlDrvMutex * diff --git a/erts/emulator/beam/erl_fun.c b/erts/emulator/beam/erl_fun.c index bfecb7a85f..9c866250bb 100644 --- a/erts/emulator/beam/erl_fun.c +++ b/erts/emulator/beam/erl_fun.c @@ -30,17 +30,16 @@ static Hash erts_fun_table; -#include "erl_smp.h" #ifdef HIPE # include "hipe_mode_switch.h" #endif -static erts_smp_rwmtx_t erts_fun_table_lock; +static erts_rwmtx_t erts_fun_table_lock; -#define erts_fun_read_lock() erts_smp_rwmtx_rlock(&erts_fun_table_lock) -#define erts_fun_read_unlock() erts_smp_rwmtx_runlock(&erts_fun_table_lock) -#define erts_fun_write_lock() erts_smp_rwmtx_rwlock(&erts_fun_table_lock) -#define erts_fun_write_unlock() erts_smp_rwmtx_rwunlock(&erts_fun_table_lock) +#define erts_fun_read_lock() erts_rwmtx_rlock(&erts_fun_table_lock) +#define erts_fun_read_unlock() erts_rwmtx_runlock(&erts_fun_table_lock) +#define erts_fun_write_lock() erts_rwmtx_rwlock(&erts_fun_table_lock) +#define erts_fun_write_unlock() erts_rwmtx_rwunlock(&erts_fun_table_lock) static HashValue fun_hash(ErlFunEntry* obj); static int fun_cmp(ErlFunEntry* obj1, ErlFunEntry* obj2); @@ -59,11 +58,11 @@ void erts_init_fun_table(void) { HashFunctions f; - erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; - rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; - rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED; + erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER; + rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ; + rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED; - erts_smp_rwmtx_init_opt(&erts_fun_table_lock, &rwmtx_opt, "fun_tab", NIL, + erts_rwmtx_init_opt(&erts_fun_table_lock, &rwmtx_opt, "fun_tab", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); f.hash = (H_FUN) fun_hash; @@ -114,9 +113,9 @@ erts_put_fun_entry(Eterm mod, int uniq, int index) fe = (ErlFunEntry *) hash_put(&erts_fun_table, (void*) &template); sys_memset(fe->uniq, 0, sizeof(fe->uniq)); fe->index = 0; - refc = erts_smp_refc_inctest(&fe->refc, 0); + refc = erts_refc_inctest(&fe->refc, 0); if (refc < 2) /* New or pending delete */ - erts_smp_refc_inc(&fe->refc, 1); + erts_refc_inc(&fe->refc, 1); erts_fun_write_unlock(); return fe; } @@ -138,9 +137,9 @@ erts_put_fun_entry2(Eterm mod, int old_uniq, int old_index, sys_memcpy(fe->uniq, uniq, sizeof(fe->uniq)); fe->index = index; fe->arity = arity; - refc = erts_smp_refc_inctest(&fe->refc, 0); + refc = erts_refc_inctest(&fe->refc, 0); if (refc < 2) /* New or pending delete */ - erts_smp_refc_inc(&fe->refc, 1); + erts_refc_inc(&fe->refc, 1); erts_fun_write_unlock(); return fe; } @@ -165,9 +164,9 @@ erts_get_fun_entry(Eterm mod, int uniq, int index) erts_fun_read_lock(); ret = (ErlFunEntry *) hash_get(&erts_fun_table, (void*) &template); if (ret) { - erts_aint_t refc = erts_smp_refc_inctest(&ret->refc, 1); + erts_aint_t refc = erts_refc_inctest(&ret->refc, 1); if (refc < 2) /* Pending delete */ - erts_smp_refc_inc(&ret->refc, 1); + erts_refc_inc(&ret->refc, 1); } erts_fun_read_unlock(); return ret; @@ -187,7 +186,7 @@ erts_erase_fun_entry(ErlFunEntry* fe) * We have to check refc again since someone might have looked up * the fun entry and incremented refc after last check. */ - if (erts_smp_refc_dectest(&fe->refc, -1) <= 0) + if (erts_refc_dectest(&fe->refc, -1) <= 0) { if (fe->address != unloaded_fun) erts_exit(ERTS_ERROR_EXIT, @@ -219,7 +218,7 @@ erts_fun_purge_prepare(BeamInstr* start, BeamInstr* end) if (start <= addr && addr < end) { fe->pend_purge_address = addr; - ERTS_SMP_WRITE_MEMORY_BARRIER; + ERTS_THR_WRITE_MEMORY_BARRIER; fe->address = unloaded_fun; #ifdef HIPE fe->pend_purge_native_address = fe->native_address; @@ -273,10 +272,10 @@ erts_fun_purge_complete(ErlFunEntry **funs, Uint no) #ifdef HIPE fe->pend_purge_native_address = NULL; #endif - if (erts_smp_refc_dectest(&fe->refc, 0) == 0) + if (erts_refc_dectest(&fe->refc, 0) == 0) erts_erase_fun_entry(fe); } - ERTS_SMP_WRITE_MEMORY_BARRIER; + ERTS_THR_WRITE_MEMORY_BARRIER; } void @@ -305,7 +304,7 @@ erts_dump_fun_entries(fmtfn_t to, void *to_arg) #ifdef HIPE erts_print(to, to_arg, "Native_address: %p\n", fe->native_address); #endif - erts_print(to, to_arg, "Refc: %ld\n", erts_smp_refc_read(&fe->refc, 1)); + erts_print(to, to_arg, "Refc: %ld\n", erts_refc_read(&fe->refc, 1)); b = b->next; } } @@ -336,7 +335,7 @@ fun_alloc(ErlFunEntry* template) obj->old_uniq = template->old_uniq; obj->old_index = template->old_index; obj->module = template->module; - erts_smp_refc_init(&obj->refc, -1); + erts_refc_init(&obj->refc, -1); obj->address = unloaded_fun; obj->pend_purge_address = NULL; #ifdef HIPE diff --git a/erts/emulator/beam/erl_fun.h b/erts/emulator/beam/erl_fun.h index 289d0d0b28..fb2901d866 100644 --- a/erts/emulator/beam/erl_fun.h +++ b/erts/emulator/beam/erl_fun.h @@ -21,7 +21,7 @@ #ifndef __ERLFUNTABLE_H__ #define __ERLFUNTABLE_H__ -#include "erl_smp.h" +#include "erl_threads.h" /* * Fun entry. @@ -42,7 +42,7 @@ typedef struct erl_fun_entry { Uint arity; /* The arity of the fun. */ Eterm module; /* Tagged atom for module. */ - erts_smp_refc_t refc; /* Reference count: One for code + one for each + erts_refc_t refc; /* Reference count: One for code + one for each fun object in each process. */ BeamInstr *pend_purge_address; /* address stored during a pending purge */ #ifdef HIPE diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c index 14eeeaf70a..bc3bcdc9ad 100644 --- a/erts/emulator/beam/erl_gc.c +++ b/erts/emulator/beam/erl_gc.c @@ -180,7 +180,7 @@ typedef struct { Eterm ref; Eterm ref_heap[ERTS_REF_THING_SIZE]; Uint req_sched; - erts_smp_atomic32_t refc; + erts_atomic32_t refc; } ErtsGCInfoReq; #ifdef ERTS_DIRTY_SCHEDULERS @@ -274,7 +274,7 @@ erts_init_gc(void) } #ifdef ERTS_DIRTY_SCHEDULERS - erts_smp_mtx_init(&dirty_gc.mtx, "dirty_gc_info", NIL, + erts_mtx_init(&dirty_gc.mtx, "dirty_gc_info", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); init_gc_info(&dirty_gc.info); #endif @@ -672,7 +672,7 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end, ASSERT(CONTEXT_REDS - ERTS_REDS_LEFT(p, fcalls) >= esdp->virtual_reds); - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); if ((p->flags & (F_DISABLE_GC|F_DELAY_GC)) || state & ERTS_PSFLG_EXITING) { #ifdef ERTS_DIRTY_SCHEDULERS @@ -698,7 +698,7 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end, ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_GC); - erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC); + erts_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC); if (erts_system_monitor_long_gc != 0) start_time = erts_get_monotonic_time(esdp); @@ -779,17 +779,17 @@ do_major_collection: ErtsProcLocks locks = ERTS_PROC_LOCKS_ALL; int res; - erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); erts_send_exit_signal(p, p->common.id, p, &locks, am_kill, NIL, NULL, 0); - erts_smp_proc_unlock(p, locks & ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(p, locks & ERTS_PROC_LOCKS_ALL_MINOR); #ifdef ERTS_DIRTY_SCHEDULERS delay_gc_after_start: #endif /* erts_send_exit_signal looks for ERTS_PSFLG_GC, so we have to remove it after the signal is sent */ - erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); + erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); /* We have to make sure that we have space for need on the heap */ res = delay_garbage_collection(p, live_hf_end, need, fcalls); @@ -797,7 +797,7 @@ do_major_collection: return res; } - erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); + erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); if (IS_TRACED_FL(p, F_TRACE_GC)) { trace_gc(p, gc_trace_end_tag, reclaimed_now, THE_NON_VALUE); @@ -924,7 +924,7 @@ garbage_collect_hibernate(Process* p, int check_long_gc) /* * Preliminaries. */ - erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC); + erts_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC); ErtsGcQuickSanityCheck(p); ASSERT(p->stop == p->hend); /* Stack must be empty. */ @@ -1015,7 +1015,7 @@ garbage_collect_hibernate(Process* p, int check_long_gc) p->flags |= F_HIBERNATED; - erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); + erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); reds = gc_cost(actual_size, actual_size); return reds; @@ -1137,7 +1137,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, /* * Set GC state. */ - erts_smp_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC); + erts_atomic32_read_bor_nob(&p->state, ERTS_PSFLG_GC); /* * Just did a major collection (which has discarded the old heap), @@ -1284,7 +1284,7 @@ erts_garbage_collect_literals(Process* p, Eterm* literals, /* * Restore status. */ - erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); + erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC); reds += (Sint64) gc_cost((p->htop - p->heap) + byte_lit_size/sizeof(Uint), 0); @@ -2914,7 +2914,7 @@ sweep_off_heap(Process *p, int fullsweep) case FUN_SUBTAG: { ErlFunEntry* fe = ((ErlFunThing*)ptr)->fe; - if (erts_smp_refc_dectest(&fe->refc, 0) == 0) { + if (erts_refc_dectest(&fe->refc, 0) == 0) { erts_erase_fun_entry(fe); } break; @@ -3274,11 +3274,11 @@ reply_gc_info(void *vgcirp) rp_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); erts_proc_dec_refc(rp); - if (erts_smp_atomic32_dec_read_nob(&gcirp->refc) == 0) + if (erts_atomic32_dec_read_nob(&gcirp->refc) == 0) gcireq_free(vgcirp); } @@ -3330,7 +3330,7 @@ erts_gc_info_request(Process *c_p) gcirp->proc = c_p; gcirp->ref = STORE_NC(&hp, NULL, ref); gcirp->req_sched = esdp->no; - erts_smp_atomic32_init_nob(&gcirp->refc, + erts_atomic32_init_nob(&gcirp->refc, (erts_aint32_t) erts_no_schedulers); erts_proc_add_refc(c_p, (Sint) erts_no_schedulers); @@ -3626,12 +3626,12 @@ erts_check_off_heap2(Process *p, Eterm *htop) refc = erts_refc_read(&u.pb->val->intern.refc, 1); break; case FUN_SUBTAG: - refc = erts_smp_refc_read(&u.fun->fe->refc, 1); + refc = erts_refc_read(&u.fun->fe->refc, 1); break; case EXTERNAL_PID_SUBTAG: case EXTERNAL_PORT_SUBTAG: case EXTERNAL_REF_SUBTAG: - refc = erts_smp_refc_read(&u.ext->node->refc, 1); + refc = erts_refc_read(&u.ext->node->refc, 1); break; case REF_SUBTAG: ASSERT(is_magic_ref_thing(u.hdr)); diff --git a/erts/emulator/beam/erl_hl_timer.c b/erts/emulator/beam/erl_hl_timer.c index 80f3aa04ab..f8cbe6f49a 100644 --- a/erts/emulator/beam/erl_hl_timer.c +++ b/erts/emulator/beam/erl_hl_timer.c @@ -152,7 +152,7 @@ typedef struct { typedef struct { Uint32 roflgs; - erts_smp_atomic32_t refc; + erts_atomic32_t refc; union { void *arg; erts_atomic_t next; @@ -193,7 +193,7 @@ struct ErtsBifTimer_ { ErtsTWTimer twt; } type; struct { - erts_smp_atomic32_t state; + erts_atomic32_t state; #ifdef ERTS_MAGIC_REF_BIF_TIMERS ErtsMagicBinary *mbin; ErtsHLTimerList proc_list; @@ -776,13 +776,13 @@ get_time_left(ErtsSchedulerData *esdp, ErtsMonotonicTime timeout_pos) static ERTS_INLINE int proc_timeout_common(Process *proc, void *tmr) { - if (tmr == (void *) erts_smp_atomic_cmpxchg_mb(&proc->common.timer, + if (tmr == (void *) erts_atomic_cmpxchg_mb(&proc->common.timer, ERTS_PTMR_TIMEDOUT, (erts_aint_t) tmr)) { erts_aint32_t state; - erts_smp_proc_lock(proc, ERTS_PROC_LOCKS_MSG_RECEIVE); - state = erts_smp_atomic32_read_acqb(&proc->state); - erts_smp_proc_unlock(proc, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_lock(proc, ERTS_PROC_LOCKS_MSG_RECEIVE); + state = erts_atomic32_read_acqb(&proc->state); + erts_proc_unlock(proc, ERTS_PROC_LOCKS_MSG_RECEIVE); if (!(state & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_EXITING))) erts_schedule_process(proc, state, 0); return 1; @@ -793,7 +793,7 @@ proc_timeout_common(Process *proc, void *tmr) static ERTS_INLINE int port_timeout_common(Port *port, void *tmr) { - if (tmr == (void *) erts_smp_atomic_cmpxchg_mb(&port->common.timer, + if (tmr == (void *) erts_atomic_cmpxchg_mb(&port->common.timer, ERTS_PTMR_TIMEDOUT, (erts_aint_t) tmr)) { erts_port_task_schedule(port->common.id, @@ -806,24 +806,24 @@ port_timeout_common(Port *port, void *tmr) #ifdef ERTS_MAGIC_REF_BIF_TIMERS -static erts_smp_atomic_t * +static erts_atomic_t * mbin_to_btmref__(ErtsMagicBinary *mbin) { - return erts_smp_binary_to_magic_indirection((Binary *) mbin); + return erts_binary_to_magic_indirection((Binary *) mbin); } static ERTS_INLINE void magic_binary_init(ErtsMagicBinary *mbin, ErtsBifTimer *tmr) { - erts_smp_atomic_t *aptr = mbin_to_btmref__(mbin); - erts_smp_atomic_init_nob(aptr, (erts_aint_t) tmr); + erts_atomic_t *aptr = mbin_to_btmref__(mbin); + erts_atomic_init_nob(aptr, (erts_aint_t) tmr); } static ERTS_INLINE ErtsBifTimer * magic_binary_to_btm(ErtsMagicBinary *mbin) { - erts_smp_atomic_t *aptr = mbin_to_btmref__(mbin); - ErtsBifTimer *tmr = (ErtsBifTimer *) erts_smp_atomic_read_nob(aptr); + erts_atomic_t *aptr = mbin_to_btmref__(mbin); + ErtsBifTimer *tmr = (ErtsBifTimer *) erts_atomic_read_nob(aptr); ERTS_HLT_ASSERT(!tmr || tmr->btm.mbin == mbin); return tmr; } @@ -869,7 +869,7 @@ init_btm_specifics(ErtsSchedulerData *esdp, btm_rbt_insert(&esdp->timer_service->btm_tree, tmr); #endif - erts_smp_atomic32_init_nob(&tmr->btm.state, ERTS_TMR_STATE_ACTIVE); + erts_atomic32_init_nob(&tmr->btm.state, ERTS_TMR_STATE_ACTIVE); return refc; /* refc from magic binary... */ } @@ -902,10 +902,10 @@ timer_pre_dec_refc(ErtsTimer *tmr) { #ifdef ERTS_HLT_DEBUG erts_aint_t refc; - refc = erts_smp_atomic32_dec_read_nob(&tmr->head.refc); + refc = erts_atomic32_dec_read_nob(&tmr->head.refc); ERTS_HLT_ASSERT(refc > 0); #else - erts_smp_atomic32_dec_nob(&tmr->head.refc); + erts_atomic32_dec_nob(&tmr->head.refc); #endif } @@ -954,7 +954,7 @@ schedule_tw_timer_destroy(ErtsTWTimer *tmr) static ERTS_INLINE void tw_timer_dec_refc(ErtsTWTimer *tmr) { - if (erts_smp_atomic32_dec_read_relb(&tmr->head.refc) == 0) { + if (erts_atomic32_dec_read_relb(&tmr->head.refc) == 0) { ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore); schedule_tw_timer_destroy(tmr); } @@ -1099,7 +1099,7 @@ create_tw_timer(ErtsSchedulerData *esdp, return NULL; } - erts_smp_atomic32_init_nob(&tmr->head.refc, refc); + erts_atomic32_init_nob(&tmr->head.refc, refc); erts_twheel_set_timer(esdp->timer_wheel, &tmr->u.tw_tmr, @@ -1132,7 +1132,7 @@ schedule_hl_timer_destroy(ErtsHLTimer *tmr, Uint32 roflgs) * at once... */ - ERTS_HLT_ASSERT(erts_smp_atomic32_read_nob(&tmr->head.refc) == 0); + ERTS_HLT_ASSERT(erts_atomic32_read_nob(&tmr->head.refc) == 0); if (roflgs & ERTS_TMR_ROFLG_REG_NAME) { ERTS_HLT_ASSERT(is_atom(tmr->head.receiver.name)); @@ -1164,7 +1164,7 @@ schedule_hl_timer_destroy(ErtsHLTimer *tmr, Uint32 roflgs) static ERTS_INLINE void hl_timer_dec_refc(ErtsHLTimer *tmr, Uint32 roflgs) { - if (erts_smp_atomic32_dec_read_relb(&tmr->head.refc) == 0) { + if (erts_atomic32_dec_read_relb(&tmr->head.refc) == 0) { ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore); schedule_hl_timer_destroy(tmr, roflgs); } @@ -1202,14 +1202,14 @@ bif_timer_ref_destructor(Binary *unused) static ERTS_INLINE void btm_clear_magic_binary(ErtsBifTimer *tmr) { - erts_smp_atomic_t *aptr = mbin_to_btmref__(tmr->btm.mbin); + erts_atomic_t *aptr = mbin_to_btmref__(tmr->btm.mbin); Uint32 roflgs = tmr->type.head.roflgs; #ifdef ERTS_HLT_DEBUG - erts_aint_t tval = erts_smp_atomic_xchg_nob(aptr, + erts_aint_t tval = erts_atomic_xchg_nob(aptr, (erts_aint_t) NULL); ERTS_HLT_ASSERT(tval == (erts_aint_t) tmr); #else - erts_smp_atomic_set_nob(aptr, (erts_aint_t) NULL); + erts_atomic_set_nob(aptr, (erts_aint_t) NULL); #endif if (roflgs & ERTS_TMR_ROFLG_HLT) hl_timer_dec_refc(&tmr->type.hlt, roflgs); @@ -1229,7 +1229,7 @@ bif_timer_timeout(ErtsHLTimerService *srv, ERTS_HLT_ASSERT(tmr->type.head.roflgs == roflgs); ERTS_HLT_ASSERT(roflgs & ERTS_TMR_ROFLG_BIF_TMR); - state = erts_smp_atomic32_cmpxchg_acqb(&tmr->btm.state, + state = erts_atomic32_cmpxchg_acqb(&tmr->btm.state, ERTS_TMR_STATE_TIMED_OUT, ERTS_TMR_STATE_ACTIVE); @@ -1263,7 +1263,7 @@ bif_timer_timeout(ErtsHLTimerService *srv, tmr->btm.bp = NULL; erts_queue_message(proc, 0, mp, tmr->btm.message, am_clock_service); - erts_smp_proc_lock(proc, ERTS_PROC_LOCK_BTM); + erts_proc_lock(proc, ERTS_PROC_LOCK_BTM); #ifdef ERTS_MAGIC_REF_BIF_TIMERS if (tmr->btm.proc_list.next) { proc_btm_list_delete(&proc->bif_timers, tmr); @@ -1276,7 +1276,7 @@ bif_timer_timeout(ErtsHLTimerService *srv, dec_refc = 1; } #endif - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_BTM); + erts_proc_unlock(proc, ERTS_PROC_LOCK_BTM); if (dec_refc) timer_pre_dec_refc((ErtsTimer *) tmr); } @@ -1414,7 +1414,7 @@ create_hl_timer(ErtsSchedulerData *esdp, } tmr->head.roflgs = roflgs; - erts_smp_atomic32_init_nob(&tmr->head.refc, refc); + erts_atomic32_init_nob(&tmr->head.refc, refc); if (!srv->next_timeout || tmr->timeout < srv->next_timeout->timeout) { @@ -1971,7 +1971,7 @@ setup_bif_timer(Process *c_p, int twheel, ErtsMonotonicTime timeout_pos, #else proc_btm_rbt_insert(&proc->bif_timers, tmr); #endif - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_BTM); + erts_proc_unlock(proc, ERTS_PROC_LOCK_BTM); tmr->type.head.receiver.proc = proc; } } @@ -1992,7 +1992,7 @@ cancel_bif_timer(ErtsBifTimer *tmr) Uint32 roflgs; int res; - state = erts_smp_atomic32_cmpxchg_acqb(&tmr->btm.state, + state = erts_atomic32_cmpxchg_acqb(&tmr->btm.state, ERTS_TMR_STATE_CANCELED, ERTS_TMR_STATE_ACTIVE); if (state != ERTS_TMR_STATE_ACTIVE) @@ -2014,7 +2014,7 @@ cancel_bif_timer(ErtsBifTimer *tmr) proc = tmr->type.head.receiver.proc; ERTS_HLT_ASSERT(!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_REG_NAME)); - erts_smp_proc_lock(proc, ERTS_PROC_LOCK_BTM); + erts_proc_lock(proc, ERTS_PROC_LOCK_BTM); /* * If process is exiting, let it clean up * the btm tree by itself (it may be in @@ -2033,7 +2033,7 @@ cancel_bif_timer(ErtsBifTimer *tmr) res = 1; } #endif - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_BTM); + erts_proc_unlock(proc, ERTS_PROC_LOCK_BTM); } return res; @@ -2056,7 +2056,7 @@ access_btm(ErtsBifTimer *tmr, Uint32 sid, ErtsSchedulerData *esdp, int cancel) : erts_tweel_read_timeout(&tmr->type.twt.u.tw_tmr)); if (!cancel) { - erts_aint32_t state = erts_smp_atomic32_read_acqb(&tmr->btm.state); + erts_aint32_t state = erts_atomic32_read_acqb(&tmr->btm.state); if (state == ERTS_TMR_STATE_ACTIVE) return get_time_left(esdp, timeout); return -1; @@ -2150,7 +2150,7 @@ send_async_info(Process *proc, ErtsProcLocks initial_locks, locks &= ~initial_locks; if (locks) - erts_smp_proc_unlock(proc, locks); + erts_proc_unlock(proc, locks); return am_ok; } @@ -2236,7 +2236,7 @@ send_sync_info(Process *proc, ErtsProcLocks initial_locks, locks &= ~initial_locks; if (locks) - erts_smp_proc_unlock(proc, locks); + erts_proc_unlock(proc, locks); return am_ok; } @@ -2350,9 +2350,9 @@ try_access_sched_remote_btm(ErtsSchedulerData *esdp, * Check if the timer is aimed at current * process... */ - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_BTM); + erts_proc_lock(c_p, ERTS_PROC_LOCK_BTM); tmr = proc_btm_rbt_lookup(c_p->bif_timers, trefn); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_BTM); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_BTM); if (!tmr) return 0; @@ -2393,7 +2393,7 @@ no_timer_result(Process *c_p, Eterm tref, int cancel, int async, int info) erts_queue_message(c_p, locks, mp, msg, am_clock_service); locks &= ~ERTS_PROC_LOCK_MAIN; if (locks) - erts_smp_proc_unlock(c_p, locks); + erts_proc_unlock(c_p, locks); return am_ok; } @@ -2469,7 +2469,7 @@ access_bif_timer(Process *c_p, Eterm tref, int cancel, int async, int info) req->rrefn[1] = rrefn[1]; req->rrefn[2] = rrefn[2]; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); if (ERTS_PROC_PENDING_EXIT(c_p)) ERTS_VBUMP_ALL_REDS(c_p); @@ -2487,10 +2487,10 @@ access_bif_timer(Process *c_p, Eterm tref, int cancel, int async, int info) * otherwise, next receive will *not* work * as expected! */ - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); + ERTS_MSGQ_MV_INQ2PRIVQ(c_p); c_p->msg.save = c_p->msg.last; } - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); ERTS_BIF_PREP_TRAP1(ret, erts_await_result, c_p, rref); } @@ -2580,7 +2580,7 @@ exit_cancel_bif_timer(ErtsBifTimer *tmr, void *vesdp) erts_aint_t state; int is_hlt; - state = erts_smp_atomic32_cmpxchg_acqb(&tmr->btm.state, + state = erts_atomic32_cmpxchg_acqb(&tmr->btm.state, ERTS_TMR_STATE_CANCELED, ERTS_TMR_STATE_ACTIVE); @@ -2966,7 +2966,7 @@ set_proc_timer_common(Process *c_p, ErtsSchedulerData *esdp, Sint64 tmo, ERTS_TMR_PROC, (void *) c_p, c_p->common.id, THE_NON_VALUE, NULL, NULL, NULL); - erts_smp_atomic_set_relb(&c_p->common.timer, (erts_aint_t) tmr); + erts_atomic_set_relb(&c_p->common.timer, (erts_aint_t) tmr); } } @@ -2977,7 +2977,7 @@ erts_set_proc_timer_term(Process *c_p, Eterm etmo) ErtsMonotonicTime tmo, timeout_pos; int short_time, tres; - ERTS_HLT_ASSERT(erts_smp_atomic_read_nob(&c_p->common.timer) + ERTS_HLT_ASSERT(erts_atomic_read_nob(&c_p->common.timer) == ERTS_PTMR_NONE); tres = parse_timeout_pos(esdp, etmo, &tmo, 0, @@ -2997,7 +2997,7 @@ erts_set_proc_timer_uword(Process *c_p, UWord tmo) { ErtsSchedulerData *esdp = erts_proc_sched_data(c_p); - ERTS_HLT_ASSERT(erts_smp_atomic_read_nob(&c_p->common.timer) + ERTS_HLT_ASSERT(erts_atomic_read_nob(&c_p->common.timer) == ERTS_PTMR_NONE); #ifndef ARCH_32 @@ -3020,13 +3020,13 @@ void erts_cancel_proc_timer(Process *c_p) { erts_aint_t tval; - tval = erts_smp_atomic_xchg_acqb(&c_p->common.timer, + tval = erts_atomic_xchg_acqb(&c_p->common.timer, ERTS_PTMR_NONE); c_p->flags &= ~(F_INSLPQUEUE|F_TIMO); if (tval == ERTS_PTMR_NONE) return; if (tval == ERTS_PTMR_TIMEDOUT) { - erts_smp_atomic_set_nob(&c_p->common.timer, ERTS_PTMR_NONE); + erts_atomic_set_nob(&c_p->common.timer, ERTS_PTMR_NONE); return; } continue_cancel_ptimer(erts_proc_sched_data(c_p), @@ -3041,7 +3041,7 @@ erts_set_port_timer(Port *c_prt, Sint64 tmo) ErtsMonotonicTime timeout_pos; ErtsCreateTimerFunc create_timer; - if (erts_smp_atomic_read_nob(&c_prt->common.timer) != ERTS_PTMR_NONE) + if (erts_atomic_read_nob(&c_prt->common.timer) != ERTS_PTMR_NONE) erts_cancel_port_timer(c_prt); check_canceled_queue(esdp, esdp->timer_service); @@ -3054,14 +3054,14 @@ erts_set_port_timer(Port *c_prt, Sint64 tmo) tmr = (void *) create_timer(esdp, timeout_pos, 0, ERTS_TMR_PORT, (void *) c_prt, c_prt->common.id, THE_NON_VALUE, NULL, NULL, NULL); - erts_smp_atomic_set_relb(&c_prt->common.timer, (erts_aint_t) tmr); + erts_atomic_set_relb(&c_prt->common.timer, (erts_aint_t) tmr); } void erts_cancel_port_timer(Port *c_prt) { erts_aint_t tval; - tval = erts_smp_atomic_xchg_acqb(&c_prt->common.timer, + tval = erts_atomic_xchg_acqb(&c_prt->common.timer, ERTS_PTMR_NONE); if (tval == ERTS_PTMR_NONE) return; @@ -3069,7 +3069,7 @@ erts_cancel_port_timer(Port *c_prt) while (!erts_port_task_is_scheduled(&c_prt->timeout_task)) erts_thr_yield(); erts_port_task_abort(&c_prt->timeout_task); - erts_smp_atomic_set_nob(&c_prt->common.timer, ERTS_PTMR_NONE); + erts_atomic_set_nob(&c_prt->common.timer, ERTS_PTMR_NONE); return; } continue_cancel_ptimer(erts_get_scheduler_data(), @@ -3083,7 +3083,7 @@ erts_read_port_timer(Port *c_prt) erts_aint_t itmr; ErtsMonotonicTime timeout_pos; - itmr = erts_smp_atomic_read_acqb(&c_prt->common.timer); + itmr = erts_atomic_read_acqb(&c_prt->common.timer); if (itmr == ERTS_PTMR_NONE) return (Sint64) -1; if (itmr == ERTS_PTMR_TIMEDOUT) @@ -3220,7 +3220,7 @@ debug_btm_foreach(ErtsBifTimer *tmr, void *vbtmfd) if (!(tmr->type.head.roflgs & ERTS_TMR_ROFLG_BIF_TMR)) return; #endif - if (erts_smp_atomic32_read_nob(&tmr->btm.state) == ERTS_TMR_STATE_ACTIVE) { + if (erts_atomic32_read_nob(&tmr->btm.state) == ERTS_TMR_STATE_ACTIVE) { ErtsBTMForeachDebug *btmfd = (ErtsBTMForeachDebug *) vbtmfd; Eterm id = ((tmr->type.head.roflgs & ERTS_TMR_ROFLG_REG_NAME) ? tmr->type.head.receiver.name @@ -3258,7 +3258,7 @@ erts_debug_bif_timer_foreach(void (*func)(Eterm, btmfd.func = func; btmfd.arg = arg; - if (!erts_smp_thr_progress_is_blocking()) + if (!erts_thr_progress_is_blocking()) ERTS_INTERNAL_ERROR("Not blocking thread progress"); for (six = 0; six < erts_no_schedulers; six++) { @@ -3349,7 +3349,7 @@ erts_debug_callback_timer_foreach(void (*tclbk)(void *), dfct.func = func; dfct.arg = arg; - if (!erts_smp_thr_progress_is_blocking()) + if (!erts_thr_progress_is_blocking()) ERTS_INTERNAL_ERROR("Not blocking thread progress"); for (six = 0; six < erts_no_schedulers; six++) { diff --git a/erts/emulator/beam/erl_hl_timer.h b/erts/emulator/beam/erl_hl_timer.h index 852bf88174..e6f5e8b67d 100644 --- a/erts/emulator/beam/erl_hl_timer.h +++ b/erts/emulator/beam/erl_hl_timer.h @@ -36,16 +36,16 @@ typedef struct ErtsHLTimerService_ ErtsHLTimerService; #define ERTS_PTMR_TIMEDOUT (ERTS_PTMR_NONE + ((erts_aint_t) 1)) #define ERTS_PTMR_INIT(P) \ - erts_smp_atomic_init_nob(&(P)->common.timer, ERTS_PTMR_NONE) + erts_atomic_init_nob(&(P)->common.timer, ERTS_PTMR_NONE) #define ERTS_PTMR_IS_SET(P) \ - (ERTS_PTMR_NONE != erts_smp_atomic_read_nob(&(P)->common.timer)) + (ERTS_PTMR_NONE != erts_atomic_read_nob(&(P)->common.timer)) #define ERTS_PTMR_IS_TIMED_OUT(P) \ - (ERTS_PTMR_TIMEDOUT == erts_smp_atomic_read_nob(&(P)->common.timer)) + (ERTS_PTMR_TIMEDOUT == erts_atomic_read_nob(&(P)->common.timer)) #define ERTS_PTMR_CLEAR(P) \ do { \ ASSERT(ERTS_PTMR_IS_TIMED_OUT((P))); \ - erts_smp_atomic_set_nob(&(P)->common.timer, \ + erts_atomic_set_nob(&(P)->common.timer, \ ERTS_PTMR_NONE); \ } while (0) diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c index fe7f09e5fa..34affaa015 100644 --- a/erts/emulator/beam/erl_init.c +++ b/erts/emulator/beam/erl_init.c @@ -148,7 +148,7 @@ static void erl_init(int ncpu, static erts_atomic_t exiting; -erts_smp_atomic32_t erts_writing_erl_crash_dump; +erts_atomic32_t erts_writing_erl_crash_dump; erts_tsd_key_t erts_is_crash_dumping_key; int erts_initialized = 0; @@ -170,7 +170,7 @@ int erts_backtrace_depth; /* How many functions to show in a backtrace * in error codes. */ -erts_smp_atomic32_t erts_max_gen_gcs; +erts_atomic32_t erts_max_gen_gcs; Eterm erts_error_logger_warnings; /* What to map warning logs to, am_error, am_info or am_warning, am_error is @@ -395,7 +395,7 @@ erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char** */ erts_init_empty_process(&parent); - erts_smp_proc_lock(&parent, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(&parent, ERTS_PROC_LOCK_MAIN); hp = HAlloc(&parent, argc*2 + 4); args = NIL; for (i = argc-1; i >= 0; i--) { @@ -410,7 +410,7 @@ erl_first_process_otp(char* modname, void* code, unsigned size, int argc, char** so.flags = erts_default_spo_flags|SPO_SYSTEM_PROC; res = erl_create_process(&parent, start_mod, am_start, args, &so); - erts_smp_proc_unlock(&parent, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(&parent, ERTS_PROC_LOCK_MAIN); erts_cleanup_empty_process(&parent); return res; } @@ -435,7 +435,7 @@ erl_system_process_otp(Eterm parent_pid, char* modname, int off_heap_msgq) if (off_heap_msgq) so.flags |= SPO_OFF_HEAP_MSGQ; res = erl_create_process(parent, start_mod, am_start, NIL, &so); - erts_smp_proc_unlock(parent, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(parent, ERTS_PROC_LOCK_MAIN); return res; } @@ -774,10 +774,10 @@ early_init(int *argc, char **argv) /* erts_atomic_init_nob(&exiting, 0); erts_thr_progress_pre_init(); - erts_smp_atomic32_init_nob(&erts_writing_erl_crash_dump, 0L); + erts_atomic32_init_nob(&erts_writing_erl_crash_dump, 0L); erts_tsd_key_create(&erts_is_crash_dumping_key,"erts_is_crash_dumping_key"); - erts_smp_atomic32_init_nob(&erts_max_gen_gcs, + erts_atomic32_init_nob(&erts_max_gen_gcs, (erts_aint32_t) ((Uint16) -1)); erts_pre_init_process(); @@ -1222,7 +1222,7 @@ erl_start(int argc, char **argv) envbufsz = sizeof(envbuf); if (erts_sys_getenv_raw("ERL_FULLSWEEP_AFTER", envbuf, &envbufsz) == 0) { Uint16 max_gen_gcs = atoi(envbuf); - erts_smp_atomic32_set_nob(&erts_max_gen_gcs, + erts_atomic32_set_nob(&erts_max_gen_gcs, (erts_aint32_t) max_gen_gcs); } diff --git a/erts/emulator/beam/erl_lock_check.h b/erts/emulator/beam/erl_lock_check.h index ca5a72b183..cad9bf74c7 100644 --- a/erts/emulator/beam/erl_lock_check.h +++ b/erts/emulator/beam/erl_lock_check.h @@ -116,9 +116,7 @@ int erts_lc_is_emu_thr(void); #define ERTS_LC_ASSERT(A) \ ((void) (((A) || ERTS_SOMEONE_IS_CRASH_DUMPING) ? 1 : erts_lc_assert_failed(__FILE__, __LINE__, #A))) -#define ERTS_SMP_LC_ASSERT(A) ERTS_LC_ASSERT(A) #else /* #ifdef ERTS_ENABLE_LOCK_CHECK */ -#define ERTS_SMP_LC_ASSERT(A) ((void) 1) #define ERTS_LC_ASSERT(A) ((void) 1) #endif /* #ifdef ERTS_ENABLE_LOCK_CHECK */ diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c index d4e9ff7d18..3418a7f4df 100644 --- a/erts/emulator/beam/erl_message.c +++ b/erts/emulator/beam/erl_message.c @@ -170,7 +170,7 @@ erts_cleanup_offheap(ErlOffHeap *offheap) erts_bin_release(u.pb->val); break; case FUN_SUBTAG: - if (erts_smp_refc_dectest(&u.fun->fe->refc, 0) == 0) { + if (erts_refc_dectest(&u.fun->fe->refc, 0) == 0) { erts_erase_fun_entry(u.fun->fe); } break; @@ -267,7 +267,7 @@ erts_queue_dist_message(Process *rcvr, #endif erts_aint_t state; - ERTS_SMP_LC_ASSERT(rcvr_locks == erts_proc_lc_my_proc_locks(rcvr)); + ERTS_LC_ASSERT(rcvr_locks == erts_proc_lc_my_proc_locks(rcvr)); mp = erts_alloc_message(0, NULL); mp->data.dist_ext = dist_ext; @@ -282,22 +282,22 @@ erts_queue_dist_message(Process *rcvr, ERL_MESSAGE_TOKEN(mp) = token; if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) { - if (erts_smp_proc_trylock(rcvr, ERTS_PROC_LOCK_MSGQ) == EBUSY) { + if (erts_proc_trylock(rcvr, ERTS_PROC_LOCK_MSGQ) == EBUSY) { ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ; ErtsProcLocks unlocks = rcvr_locks & ERTS_PROC_LOCKS_HIGHER_THAN(ERTS_PROC_LOCK_MSGQ); if (unlocks) { - erts_smp_proc_unlock(rcvr, unlocks); + erts_proc_unlock(rcvr, unlocks); need_locks |= unlocks; } - erts_smp_proc_lock(rcvr, need_locks); + erts_proc_lock(rcvr, need_locks); } } - state = erts_smp_atomic32_read_acqb(&rcvr->state); + state = erts_atomic32_read_acqb(&rcvr->state); if (state & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) { if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) - erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); + erts_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); /* Drop message if receiver is exiting or has a pending exit ... */ erts_cleanup_messages(mp); } @@ -308,7 +308,7 @@ erts_queue_dist_message(Process *rcvr, /* Ahh... need to decode it in order to trace it... */ if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) - erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); + erts_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); if (!erts_decode_dist_message(rcvr, rcvr_locks, mp, 0)) erts_free_message(mp); else { @@ -357,7 +357,7 @@ erts_queue_dist_message(Process *rcvr, LINK_MESSAGE(rcvr, mp, &mp->next, 1); if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) - erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); + erts_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); erts_proc_notify_new_message(rcvr, rcvr_locks @@ -386,39 +386,39 @@ queue_messages(Process* receiver, is_tuple(ERL_MESSAGE_TOKEN(first))); #ifdef ERTS_ENABLE_LOCK_CHECK - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(receiver) < ERTS_PROC_LOCK_MSGQ || + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(receiver) < ERTS_PROC_LOCK_MSGQ || receiver_locks == erts_proc_lc_my_proc_locks(receiver)); #endif if (!(receiver_locks & ERTS_PROC_LOCK_MSGQ)) { - if (erts_smp_proc_trylock(receiver, ERTS_PROC_LOCK_MSGQ) == EBUSY) { + if (erts_proc_trylock(receiver, ERTS_PROC_LOCK_MSGQ) == EBUSY) { ErtsProcLocks need_locks; if (receiver_state) state = *receiver_state; else - state = erts_smp_atomic32_read_nob(&receiver->state); + state = erts_atomic32_read_nob(&receiver->state); if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) goto exiting; need_locks = receiver_locks & ERTS_PROC_LOCKS_HIGHER_THAN(ERTS_PROC_LOCK_MSGQ); if (need_locks) { - erts_smp_proc_unlock(receiver, need_locks); + erts_proc_unlock(receiver, need_locks); } need_locks |= ERTS_PROC_LOCK_MSGQ; - erts_smp_proc_lock(receiver, need_locks); + erts_proc_lock(receiver, need_locks); } locked_msgq = 1; } - state = erts_smp_atomic32_read_nob(&receiver->state); + state = erts_atomic32_read_nob(&receiver->state); if (state & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) { exiting: /* Drop message if receiver is exiting or has a pending exit... */ if (locked_msgq) - erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); + erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); erts_cleanup_messages(first); return 0; } @@ -434,7 +434,7 @@ queue_messages(Process* receiver, * the root set when garbage collecting. */ res += receiver->msg_inq.len; - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(receiver); + ERTS_MSGQ_MV_INQ2PRIVQ(receiver); LINK_MESSAGE_PRIVQ(receiver, first, last, len); } else @@ -475,7 +475,7 @@ queue_messages(Process* receiver, } if (locked_msgq) { - erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); + erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); } erts_proc_notify_new_message(receiver, receiver_locks); @@ -599,7 +599,7 @@ erts_try_alloc_message_on_heap(Process *pp, */ if (locked_main) { *plp &= ~ERTS_PROC_LOCK_MAIN; - erts_smp_proc_unlock(pp, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(pp, ERTS_PROC_LOCK_MAIN); } goto in_message_fragment; } @@ -611,9 +611,9 @@ erts_try_alloc_message_on_heap(Process *pp, mp->data.attached = NULL; *on_heap_p = !0; } - else if (pp && erts_smp_proc_trylock(pp, ERTS_PROC_LOCK_MAIN) == 0) { + else if (pp && erts_proc_trylock(pp, ERTS_PROC_LOCK_MAIN) == 0) { locked_main = 1; - *psp = erts_smp_atomic32_read_nob(&pp->state); + *psp = erts_atomic32_read_nob(&pp->state); *plp |= ERTS_PROC_LOCK_MAIN; goto try_on_heap; } @@ -685,7 +685,7 @@ erts_send_message(Process* sender, } #endif - receiver_state = erts_smp_atomic32_read_nob(&receiver->state); + receiver_state = erts_atomic32_read_nob(&receiver->state); if (SEQ_TRACE_TOKEN(sender) != NIL && !(flags & ERTS_SND_FLG_NO_SEQ_TRACE)) { Eterm* hp; @@ -934,7 +934,7 @@ erts_move_messages_off_heap(Process *c_p) reds += c_p->msg.len / 10; - ASSERT(erts_smp_atomic32_read_nob(&c_p->state) + ASSERT(erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ); ASSERT(c_p->flags & F_OFF_HEAP_MSGQ_CHNG); @@ -999,9 +999,9 @@ erts_complete_off_heap_message_queue_change(Process *c_p) { int reds = 1; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); ASSERT(c_p->flags & F_OFF_HEAP_MSGQ_CHNG); - ASSERT(erts_smp_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ); + ASSERT(erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ); /* * This job was first initiated when the process changed to off heap @@ -1013,13 +1013,13 @@ erts_complete_off_heap_message_queue_change(Process *c_p) */ if (!(c_p->flags & F_OFF_HEAP_MSGQ)) - erts_smp_atomic32_read_band_nob(&c_p->state, + erts_atomic32_read_band_nob(&c_p->state, ~ERTS_PSFLG_OFF_HEAP_MSGQ); else { reds += 2; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ); - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ); + ERTS_MSGQ_MV_INQ2PRIVQ(c_p); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ); reds += erts_move_messages_off_heap(c_p); } c_p->flags &= ~F_OFF_HEAP_MSGQ_CHNG; @@ -1056,16 +1056,16 @@ erts_change_message_queue_management(Process *c_p, Eterm new_state) #ifdef DEBUG if (c_p->flags & F_OFF_HEAP_MSGQ) { - ASSERT(erts_smp_atomic32_read_nob(&c_p->state) + ASSERT(erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ); } else { if (c_p->flags & F_OFF_HEAP_MSGQ_CHNG) { - ASSERT(erts_smp_atomic32_read_nob(&c_p->state) + ASSERT(erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ); } else { - ASSERT(!(erts_smp_atomic32_read_nob(&c_p->state) + ASSERT(!(erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ)); } } @@ -1082,7 +1082,7 @@ erts_change_message_queue_management(Process *c_p, Eterm new_state) case am_on_heap: c_p->flags |= F_ON_HEAP_MSGQ; c_p->flags &= ~F_OFF_HEAP_MSGQ; - erts_smp_atomic32_read_bor_nob(&c_p->state, + erts_atomic32_read_bor_nob(&c_p->state, ERTS_PSFLG_ON_HEAP_MSGQ); /* * We are not allowed to clear ERTS_PSFLG_OFF_HEAP_MSGQ @@ -1091,7 +1091,7 @@ erts_change_message_queue_management(Process *c_p, Eterm new_state) */ if (!(c_p->flags & F_OFF_HEAP_MSGQ_CHNG)) { /* Safe to clear ERTS_PSFLG_OFF_HEAP_MSGQ... */ - erts_smp_atomic32_read_band_nob(&c_p->state, + erts_atomic32_read_band_nob(&c_p->state, ~ERTS_PSFLG_OFF_HEAP_MSGQ); } break; @@ -1109,7 +1109,7 @@ erts_change_message_queue_management(Process *c_p, Eterm new_state) break; case am_off_heap: c_p->flags &= ~F_ON_HEAP_MSGQ; - erts_smp_atomic32_read_band_nob(&c_p->state, + erts_atomic32_read_band_nob(&c_p->state, ~ERTS_PSFLG_ON_HEAP_MSGQ); goto change_to_off_heap; default: @@ -1144,7 +1144,7 @@ change_to_off_heap: * change has completed, GC does not need to inspect * the message queue at all. */ - erts_smp_atomic32_read_bor_nob(&c_p->state, + erts_atomic32_read_bor_nob(&c_p->state, ERTS_PSFLG_OFF_HEAP_MSGQ); c_p->flags |= F_OFF_HEAP_MSGQ_CHNG; cohmq = erts_alloc(ERTS_ALC_T_MSGQ_CHNG, @@ -1425,7 +1425,7 @@ erts_factory_message_create(ErtsHeapFactory* factory, int on_heap; erts_aint32_t state; - state = proc ? erts_smp_atomic32_read_nob(&proc->state) : 0; + state = proc ? erts_atomic32_read_nob(&proc->state) : 0; if (state & ERTS_PSFLG_OFF_HEAP_MSGQ) { msgp = erts_alloc_message(sz, &hp); @@ -1440,7 +1440,7 @@ erts_factory_message_create(ErtsHeapFactory* factory, } if (on_heap) { - ERTS_SMP_ASSERT(*proc_locksp & ERTS_PROC_LOCK_MAIN); + ERTS_ASSERT(*proc_locksp & ERTS_PROC_LOCK_MAIN); ASSERT(ohp == &proc->off_heap); factory->mode = FACTORY_HALLOC; factory->p = proc; diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h index 1f95ffaa5a..9c8cf84e43 100644 --- a/erts/emulator/beam/erl_message.h +++ b/erts/emulator/beam/erl_message.h @@ -216,7 +216,7 @@ typedef struct erl_trace_message_queue__ { #define LINK_MESSAGE(p, first_msg, last_msg, len) \ LINK_MESSAGE_IMPL(p, first_msg, last_msg, len, msg_inq) -#define ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p) \ +#define ERTS_MSGQ_MV_INQ2PRIVQ(p) \ do { \ if (p->msg_inq.first) { \ *p->msg.last = p->msg_inq.first; \ diff --git a/erts/emulator/beam/erl_monitors.c b/erts/emulator/beam/erl_monitors.c index 3994800ba7..67c552b364 100644 --- a/erts/emulator/beam/erl_monitors.c +++ b/erts/emulator/beam/erl_monitors.c @@ -54,7 +54,7 @@ #define DIR_RIGHT 1 #define DIR_END 2 -static erts_smp_atomic_t tot_link_lh_size; +static erts_atomic_t tot_link_lh_size; /* Implements the sort order in monitor trees, which is different from the ordinary term order. @@ -123,7 +123,7 @@ do { \ (*((Hp)++)) = boxed_val((From))[i__]; \ if (is_external((To))) { \ external_thing_ptr((To))->next = NULL; \ - erts_smp_refc_inc(&(external_thing_ptr((To))->node->refc), 2);\ + erts_refc_inc(&(external_thing_ptr((To))->node->refc), 2);\ } \ } \ } while (0) @@ -145,7 +145,7 @@ static ErtsMonitor *create_monitor(Uint type, Eterm ref, UWord entity, Eterm nam } else { n = (ErtsMonitor *) erts_alloc(ERTS_ALC_T_MONITOR_LH, mon_size*sizeof(Uint)); - erts_smp_atomic_add_nob(&tot_link_lh_size, mon_size*sizeof(Uint)); + erts_atomic_add_nob(&tot_link_lh_size, mon_size*sizeof(Uint)); } hp = n->heap; @@ -179,7 +179,7 @@ static ErtsLink *create_link(Uint type, Eterm pid) } else { n = (ErtsLink *) erts_alloc(ERTS_ALC_T_NLINK_LH, lnk_size*sizeof(Uint)); - erts_smp_atomic_add_nob(&tot_link_lh_size, lnk_size*sizeof(Uint)); + erts_atomic_add_nob(&tot_link_lh_size, lnk_size*sizeof(Uint)); } hp = n->heap; @@ -214,13 +214,13 @@ static ErtsSuspendMonitor *create_suspend_monitor(Eterm pid) void erts_init_monitors(void) { - erts_smp_atomic_init_nob(&tot_link_lh_size, 0); + erts_atomic_init_nob(&tot_link_lh_size, 0); } Uint erts_tot_link_lh_size(void) { - return (Uint) erts_smp_atomic_read_nob(&tot_link_lh_size); + return (Uint) erts_atomic_read_nob(&tot_link_lh_size); } void erts_destroy_monitor(ErtsMonitor *mon) @@ -245,7 +245,7 @@ void erts_destroy_monitor(ErtsMonitor *mon) erts_free(ERTS_ALC_T_MONITOR_SH, (void *) mon); } else { erts_free(ERTS_ALC_T_MONITOR_LH, (void *) mon); - erts_smp_atomic_add_nob(&tot_link_lh_size, -1*mon_size*sizeof(Uint)); + erts_atomic_add_nob(&tot_link_lh_size, -1*mon_size*sizeof(Uint)); } } @@ -267,7 +267,7 @@ void erts_destroy_link(ErtsLink *lnk) erts_free(ERTS_ALC_T_NLINK_SH, (void *) lnk); } else { erts_free(ERTS_ALC_T_NLINK_LH, (void *) lnk); - erts_smp_atomic_add_nob(&tot_link_lh_size, -1*lnk_size*sizeof(Uint)); + erts_atomic_add_nob(&tot_link_lh_size, -1*lnk_size*sizeof(Uint)); } } @@ -985,13 +985,13 @@ Eterm erts_debug_dump_monitors_1(BIF_ALIST_1) DistEntry *dep; rp = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, pid, ERTS_PROC_LOCK_LINK); if (!rp) { - ERTS_SMP_ASSERT_IS_NOT_EXITING(p); + ERTS_ASSERT_IS_NOT_EXITING(p); if (is_atom(pid) && is_node_name_atom(pid) && (dep = erts_find_dist_entry(pid)) != NULL) { erts_printf("Dumping dist monitors-------------------\n"); - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); erts_dump_monitors(dep->monitors,0); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); erts_printf("Monitors dumped-------------------------\n"); erts_deref_dist_entry(dep); BIF_RET(am_true); @@ -1002,7 +1002,7 @@ Eterm erts_debug_dump_monitors_1(BIF_ALIST_1) erts_printf("Dumping pid monitors--------------------\n"); erts_dump_monitors(ERTS_P_MONITORS(rp),0); erts_printf("Monitors dumped-------------------------\n"); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); BIF_RET(am_true); } } @@ -1030,13 +1030,13 @@ Eterm erts_debug_dump_links_1(BIF_ALIST_1) } else { rp = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN, pid, ERTS_PROC_LOCK_LINK); if (!rp) { - ERTS_SMP_ASSERT_IS_NOT_EXITING(p); + ERTS_ASSERT_IS_NOT_EXITING(p); if (is_atom(pid) && is_node_name_atom(pid) && (dep = erts_find_dist_entry(pid)) != NULL) { erts_printf("Dumping dist links----------------------\n"); - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); erts_dump_links(dep->nlinks,0); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); erts_printf("Links dumped----------------------------\n"); erts_deref_dist_entry(dep); BIF_RET(am_true); @@ -1048,7 +1048,7 @@ Eterm erts_debug_dump_links_1(BIF_ALIST_1) erts_printf("Dumping pid links-----------------------\n"); erts_dump_links(ERTS_P_LINKS(rp), 0); erts_printf("Links dumped----------------------------\n"); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); BIF_RET(am_true); } } diff --git a/erts/emulator/beam/erl_msacc.c b/erts/emulator/beam/erl_msacc.c index 957358bcf5..d659842b7e 100644 --- a/erts/emulator/beam/erl_msacc.c +++ b/erts/emulator/beam/erl_msacc.c @@ -202,7 +202,7 @@ typedef struct { Eterm ref; Eterm ref_heap[ERTS_REF_THING_SIZE]; Uint req_sched; - erts_smp_atomic32_t refc; + erts_atomic32_t refc; } ErtsMSAccReq; static ErtsMsAcc* get_msacc(void) { @@ -253,7 +253,7 @@ static void send_reply(ErtsMsAcc *msacc, ErtsMSAccReq *msaccrp) { rp_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } @@ -289,7 +289,7 @@ reply_msacc(void *vmsaccrp) erts_proc_dec_refc(msaccrp->proc); - if (erts_smp_atomic32_dec_read_nob(&msaccrp->refc) == 0) + if (erts_atomic32_dec_read_nob(&msaccrp->refc) == 0) erts_free(ERTS_ALC_T_MSACC, vmsaccrp); } @@ -359,7 +359,7 @@ erts_msacc_request(Process *c_p, int action, Eterm *threads) *threads = erts_no_schedulers; *threads += 1; /* aux thread */ - erts_smp_atomic32_init_nob(&msaccrp->refc,(erts_aint32_t)*threads); + erts_atomic32_init_nob(&msaccrp->refc,(erts_aint32_t)*threads); erts_proc_add_refc(c_p, *threads); diff --git a/erts/emulator/beam/erl_nfunc_sched.c b/erts/emulator/beam/erl_nfunc_sched.c index 1bebc1eda4..f97e86bf95 100644 --- a/erts/emulator/beam/erl_nfunc_sched.c +++ b/erts/emulator/beam/erl_nfunc_sched.c @@ -113,7 +113,7 @@ erts_nif_export_schedule(Process *c_p, Process *dirty_shadow_proc, NifExport* nep; int i; - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); if (dirty_shadow_proc) { diff --git a/erts/emulator/beam/erl_nfunc_sched.h b/erts/emulator/beam/erl_nfunc_sched.h index 55a3a6dbf6..69008084df 100644 --- a/erts/emulator/beam/erl_nfunc_sched.h +++ b/erts/emulator/beam/erl_nfunc_sched.h @@ -144,9 +144,9 @@ ERTS_GLB_INLINE void erts_nif_export_restore(Process *c_p, NifExport *ep, Eterm result) { ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data())); - ERTS_SMP_LC_ASSERT(!(c_p->static_flags + ERTS_LC_ASSERT(!(c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC)); - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); c_p->current = ep->current; @@ -235,7 +235,7 @@ erts_flush_dirty_shadow_proc(Process *sproc) Process *c_p = sproc->next; ASSERT(sproc->common.id == c_p->common.id); - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); ASSERT(c_p->stop == sproc->stop); @@ -283,7 +283,7 @@ erts_cache_dirty_shadow_proc(Process *sproc) Process *c_p = sproc->next; ASSERT(c_p); ASSERT(sproc->common.id == c_p->common.id); - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); sproc->htop = c_p->htop; @@ -311,7 +311,7 @@ erts_make_dirty_shadow_proc(ErtsSchedulerData *esdp, Process *c_p) sproc = esdp->dirty_shadow_process; ASSERT(sproc); ASSERT(sproc->static_flags & ERTS_STC_FLG_SHADOW_PROC); - ASSERT(erts_smp_atomic32_read_nob(&sproc->state) + ASSERT(erts_atomic32_read_nob(&sproc->state) == (ERTS_PSFLG_ACTIVE | ERTS_PSFLG_DIRTY_RUNNING | ERTS_PSFLG_PROXY)); diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c index b586a4636a..af25971130 100644 --- a/erts/emulator/beam/erl_nif.c +++ b/erts/emulator/beam/erl_nif.c @@ -138,7 +138,7 @@ execution_state(ErlNifEnv *env, Process **c_pp, int *schedp) Process *c_p = env->proc; if (!(c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC)) { - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); } else { @@ -220,7 +220,7 @@ void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif, ASSERT(esdp); if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { - erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state); + erts_aint32_t state = erts_atomic32_read_nob(&p->state); ASSERT(p->scheduler_data == esdp); ASSERT((state & (ERTS_PSFLG_RUNNING @@ -287,7 +287,7 @@ schedule(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp, else dirty_shadow_proc = env->proc; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(c_p)); ep = erts_nif_export_schedule(c_p, dirty_shadow_proc, c_p->current, @@ -320,7 +320,7 @@ erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm * ErlNifEnv env; ERL_NIF_TERM result; #ifdef DEBUG - erts_aint32_t state = erts_smp_atomic32_read_nob(&c_p->state); + erts_aint32_t state = erts_atomic32_read_nob(&c_p->state); ASSERT(nep == ERTS_PROC_GET_NIF_TRAP_EXPORT(c_p)); @@ -343,14 +343,14 @@ erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm * ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p))); - erts_smp_atomic32_read_band_mb(&c_p->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC + erts_atomic32_read_band_mb(&c_p->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC | ERTS_PSFLG_DIRTY_IO_PROC)); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); result = (*dirty_nif)(&env, codemfa->arity, argv); /* Call dirty NIF */ - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); ASSERT(env.proc->static_flags & ERTS_STC_FLG_SHADOW_PROC); ASSERT(env.proc->next == c_p); @@ -587,7 +587,7 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks) ErlTraceMessageQueue *msgq, **last_msgq; int reds = 0; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_TRACE); + erts_proc_lock(c_p, ERTS_PROC_LOCK_TRACE); msgq = c_p->trace_msg_q; @@ -606,7 +606,7 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks) msgq->first = NULL; msgq->last = &msgq->first; msgq->len = 0; - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE); ASSERT(len != 0); @@ -619,13 +619,13 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks) if (rp->common.id == c_p->common.id) rp_locks &= ~c_p_locks; if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); reds += len; } else { erts_cleanup_messages(first); } reds += 1; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_TRACE); + erts_proc_lock(c_p, ERTS_PROC_LOCK_TRACE); msgq = msgq->next; } while (msgq); @@ -642,7 +642,7 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks) } error: - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_TRACE); return reds; } @@ -675,7 +675,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, return 0; if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } } @@ -684,7 +684,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, ERTS_P2P_FLG_INC_REFC); if (!rp) { if (c_p && (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); return 0; } } @@ -719,7 +719,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, full_cache_env(env); } else { - erts_aint_t state = erts_smp_atomic32_read_nob(&rp->state); + erts_aint_t state = erts_atomic32_read_nob(&rp->state); if (state & ERTS_PSFLG_OFF_HEAP_MSGQ) { mp = erts_alloc_message(sz, &hp); ohp = sz == 0 ? NULL : &mp->hfrag.off_heap; @@ -755,7 +755,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, Process *t_p = env->tracee; - erts_smp_proc_lock(t_p, ERTS_PROC_LOCK_TRACE); + erts_proc_lock(t_p, ERTS_PROC_LOCK_TRACE); msgq = t_p->trace_msg_q; @@ -772,7 +772,7 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, #endif if (ERTS_FORCE_ENIF_SEND_DELAY() || msgq || rp_locks & ERTS_PROC_LOCK_MSGQ || - erts_smp_proc_trylock(rp, ERTS_PROC_LOCK_MSGQ) == EBUSY) { + erts_proc_trylock(rp, ERTS_PROC_LOCK_MSGQ) == EBUSY) { if (!msgq) { msgq = erts_alloc(ERTS_ALC_T_TRACE_MSG_QUEUE, @@ -786,18 +786,18 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid, msgq->next = t_p->trace_msg_q; t_p->trace_msg_q = msgq; - erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE); erts_schedule_flush_trace_messages(t_p, 0); } else { msgq->len++; *msgq->last = mp; msgq->last = &mp->next; - erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE); } goto done; } else { - erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE); rp_locks &= ~ERTS_PROC_LOCK_TRACE; rp_locks |= ERTS_PROC_LOCK_MSGQ; } @@ -810,9 +810,9 @@ done: if (c_p == rp) rp_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp_locks & ~lc_locks) - erts_smp_proc_unlock(rp, rp_locks & ~lc_locks); + erts_proc_unlock(rp, rp_locks & ~lc_locks); if (c_p && (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); if (scheduler <= 0) erts_proc_dec_refc(rp); @@ -880,14 +880,14 @@ static Eterm call_whereis(ErlNifEnv *env, Eterm name) return 0; if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); unlock = 1; } } res = erts_whereis_name_to_id(c_p, name); if (unlock) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); return res; } @@ -2057,7 +2057,7 @@ ErlNifResourceType* open_resource_type(ErlNifEnv* env, ErlNifResourceFlags op = flags; Eterm module_am, name_am; - ASSERT(erts_smp_thr_progress_is_blocking()); + ASSERT(erts_thr_progress_is_blocking()); module_am = make_atom(env->mod_nif->mod->module); name_am = enif_make_atom(env, name_str); @@ -2203,7 +2203,7 @@ static void destroy_one_monitor(ErtsMonitor* mon, void* context) is_exiting = 1; } if (rp) { - erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_lock(rp, ERTS_PROC_LOCK_LINK); if (ERTS_PROC_IS_EXITING(rp)) { is_exiting = 1; } else { @@ -2211,7 +2211,7 @@ static void destroy_one_monitor(ErtsMonitor* mon, void* context) ASSERT(rmon); is_exiting = 0; } - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); if (ctx->scheduler <= 0) erts_proc_dec_refc(rp); } @@ -2251,7 +2251,7 @@ static int nif_resource_dtor(Binary* bin) ErtsResourceMonitors* rm = resource->monitors; ASSERT(type->down); - erts_smp_mtx_lock(&rm->lock); + erts_mtx_lock(&rm->lock); ASSERT(erts_refc_read(&bin->intern.refc, 0) == 0); if (rm->root) { ASSERT(!rm->is_dying); @@ -2273,11 +2273,11 @@ static int nif_resource_dtor(Binary* bin) */ ASSERT(!rm->is_dying); rm->is_dying = 1; - erts_smp_mtx_unlock(&rm->lock); + erts_mtx_unlock(&rm->lock); return 0; } - erts_smp_mtx_unlock(&rm->lock); - erts_smp_mtx_destroy(&rm->lock); + erts_mtx_unlock(&rm->lock); + erts_mtx_destroy(&rm->lock); } if (type->dtor != NULL) { @@ -2318,12 +2318,12 @@ void erts_fire_nif_monitor(ErtsResource* resource, Eterm pid, Eterm ref) ASSERT(rmp); ASSERT(resource->type->down); - erts_smp_mtx_lock(&rmp->lock); + erts_mtx_lock(&rmp->lock); rmon = erts_remove_monitor(&rmp->root, ref); if (!rmon) { int free_me = (--rmp->pending_failed_fire == 0) && rmp->is_dying; ASSERT(rmp->pending_failed_fire >= 0); - erts_smp_mtx_unlock(&rmp->lock); + erts_mtx_unlock(&rmp->lock); if (free_me) { ASSERT(erts_refc_read(&bin->binary.intern.refc, 0) == 0); @@ -2339,10 +2339,10 @@ void erts_fire_nif_monitor(ErtsResource* resource, Eterm pid, Eterm ref) * we avoid calling 'down' and just silently remove the monitor. * This can happen even for non smp as destructor calls may be scheduled. */ - erts_smp_mtx_unlock(&rmp->lock); + erts_mtx_unlock(&rmp->lock); } else { - erts_smp_mtx_unlock(&rmp->lock); + erts_mtx_unlock(&rmp->lock); ASSERT(rmon->u.pid == pid); erts_ref_to_driver_monitor(ref, &nif_monitor); @@ -2387,7 +2387,7 @@ void* enif_alloc_resource(ErlNifResourceType* type, size_t data_sz) erts_refc_inc(&resource->type->refc, 2); if (type->down) { resource->monitors = (ErtsResourceMonitors*) (resource->data + monitors_offs); - erts_smp_mtx_init(&resource->monitors->lock, "resource_monitors", NIL, + erts_mtx_init(&resource->monitors->lock, "resource_monitors", NIL, ERTS_LOCK_FLAGS_CATEGORY_GENERIC); resource->monitors->root = NULL; resource->monitors->pending_failed_fire = 0; @@ -2656,7 +2656,7 @@ schedule_dirty_nif(ErlNifEnv* env, int flags, NativeFunPtr fp, execution_state(env, &proc, NULL); - (void) erts_smp_atomic32_read_bset_nob(&proc->state, + (void) erts_atomic32_read_bset_nob(&proc->state, (ERTS_PSFLG_DIRTY_CPU_PROC | ERTS_PSFLG_DIRTY_IO_PROC), (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND @@ -2694,7 +2694,7 @@ static_schedule_dirty_nif(ErlNifEnv* env, erts_aint32_t dirty_psflg, ASSERT(is_atom(mod) && is_atom(func)); ASSERT(fp); - (void) erts_smp_atomic32_read_bset_nob(&proc->state, + (void) erts_atomic32_read_bset_nob(&proc->state, (ERTS_PSFLG_DIRTY_CPU_PROC | ERTS_PSFLG_DIRTY_IO_PROC), dirty_psflg); @@ -2788,7 +2788,7 @@ enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags, if (scheduler <= 0) { if (scheduler == 0) enif_make_badarg(env); - erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN); } if (flags == 0) @@ -2805,7 +2805,7 @@ enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags, result = enif_make_badarg(env); if (scheduler < 0) - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); return result; } @@ -3157,9 +3157,9 @@ int enif_monitor_process(ErlNifEnv* env, void* obj, const ErlNifPid* target_pid, ref = erts_make_ref_in_buffer(tmp); - erts_smp_mtx_lock(&rsrc->monitors->lock); - erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK); - if (ERTS_PSFLG_FREE & erts_smp_atomic32_read_nob(&rp->state)) { + erts_mtx_lock(&rsrc->monitors->lock); + erts_proc_lock(rp, ERTS_PROC_LOCK_LINK); + if (ERTS_PSFLG_FREE & erts_atomic32_read_nob(&rp->state)) { retval = 1; } else { @@ -3167,8 +3167,8 @@ int enif_monitor_process(ErlNifEnv* env, void* obj, const ErlNifPid* target_pid, erts_add_monitor(&ERTS_P_MONITORS(rp), MON_NIF_TARGET, ref, (UWord)rsrc, NIL); retval = 0; } - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); - erts_smp_mtx_unlock(&rsrc->monitors->lock); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_mtx_unlock(&rsrc->monitors->lock); if (scheduler <= 0) erts_proc_dec_refc(rp); @@ -3199,11 +3199,11 @@ int enif_demonitor_process(ErlNifEnv* env, void* obj, const ErlNifMonitor* monit ref = erts_driver_monitor_to_ref(ref_heap, monitor); - erts_smp_mtx_lock(&rsrc->monitors->lock); + erts_mtx_lock(&rsrc->monitors->lock); mon = erts_remove_monitor(&rsrc->monitors->root, ref); if (mon == NULL) { - erts_smp_mtx_unlock(&rsrc->monitors->lock); + erts_mtx_unlock(&rsrc->monitors->lock); return 1; } @@ -3219,7 +3219,7 @@ int enif_demonitor_process(ErlNifEnv* env, void* obj, const ErlNifMonitor* monit is_exiting = 1; } else { - erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_lock(rp, ERTS_PROC_LOCK_LINK); if (ERTS_PROC_IS_EXITING(rp)) { is_exiting = 1; } else { @@ -3227,7 +3227,7 @@ int enif_demonitor_process(ErlNifEnv* env, void* obj, const ErlNifMonitor* monit ASSERT(rmon); is_exiting = 0; } - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); if (scheduler <= 0) erts_proc_dec_refc(rp); @@ -3235,7 +3235,7 @@ int enif_demonitor_process(ErlNifEnv* env, void* obj, const ErlNifMonitor* monit if (is_exiting) { rsrc->monitors->pending_failed_fire++; } - erts_smp_mtx_unlock(&rsrc->monitors->lock); + erts_mtx_unlock(&rsrc->monitors->lock); if (rmon) { ASSERT(rmon->type == MON_NIF_TARGET); @@ -3461,8 +3461,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) } /* Block system (is this the right place to do it?) */ - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); /* Find calling module */ ASSERT(BIF_P->current != NULL); @@ -3673,8 +3673,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) erts_sys_ddll_free_error(&errdesc); } - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); erts_release_code_write_permission(); erts_free(ERTS_ALC_T_TMP, lib_name); @@ -3687,7 +3687,7 @@ erts_unload_nif(struct erl_module_nif* lib) { ErlNifResourceType* rt; ErlNifResourceType* next; - ASSERT(erts_smp_thr_progress_is_blocking()); + ASSERT(erts_thr_progress_is_blocking()); ASSERT(lib != NULL); ASSERT(lib->mod != NULL); @@ -3759,8 +3759,8 @@ Eterm erts_nif_call_function(Process *p, Process *tracee, break; ASSERT(i < mod->entry.num_of_funcs); if (p) - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN - || erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN + || erts_thr_progress_is_blocking()); #endif if (p) { /* This is almost a normal nif call like in beam_emu, diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c index c12f231a3b..f8e9fec27a 100644 --- a/erts/emulator/beam/erl_node_tables.c +++ b/erts/emulator/beam/erl_node_tables.c @@ -32,8 +32,8 @@ Hash erts_dist_table; Hash erts_node_table; -erts_smp_rwmtx_t erts_dist_table_rwmtx; -erts_smp_rwmtx_t erts_node_table_rwmtx; +erts_rwmtx_t erts_dist_table_rwmtx; +erts_rwmtx_t erts_node_table_rwmtx; DistEntry *erts_hidden_dist_entries; DistEntry *erts_visible_dist_entries; @@ -87,8 +87,8 @@ dist_table_alloc(void *dep_tmpl) { Eterm sysname; DistEntry *dep; - erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; - rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; + erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER; + rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ; sysname = ((DistEntry *) dep_tmpl)->sysname; dep = (DistEntry *) erts_alloc(ERTS_ALC_T_DIST_ENTRY, sizeof(DistEntry)); @@ -96,8 +96,8 @@ dist_table_alloc(void *dep_tmpl) dist_entries++; dep->prev = NULL; - erts_smp_refc_init(&dep->refc, -1); - erts_smp_rwmtx_init_opt(&dep->rwmtx, &rwmtx_opt, "dist_entry", sysname, + erts_refc_init(&dep->refc, -1); + erts_rwmtx_init_opt(&dep->rwmtx, &rwmtx_opt, "dist_entry", sysname, ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION); dep->sysname = sysname; dep->cid = NIL; @@ -106,13 +106,13 @@ dist_table_alloc(void *dep_tmpl) dep->flags = 0; dep->version = 0; - erts_smp_mtx_init(&dep->lnk_mtx, "dist_entry_links", sysname, + erts_mtx_init(&dep->lnk_mtx, "dist_entry_links", sysname, ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION); dep->node_links = NULL; dep->nlinks = NULL; dep->monitors = NULL; - erts_smp_mtx_init(&dep->qlock, "dist_entry_out_queue", sysname, + erts_mtx_init(&dep->qlock, "dist_entry_out_queue", sysname, ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION); dep->qflgs = 0; dep->qsize = 0; @@ -123,7 +123,7 @@ dist_table_alloc(void *dep_tmpl) dep->finalized_out_queue.first = NULL; dep->finalized_out_queue.last = NULL; - erts_smp_atomic_init_nob(&dep->dist_cmd_scheduled, 0); + erts_atomic_init_nob(&dep->dist_cmd_scheduled, 0); erts_port_task_handle_init(&dep->dist_cmd); dep->send = NULL; dep->cache = NULL; @@ -174,9 +174,9 @@ dist_table_free(void *vdep) erts_no_of_not_connected_dist_entries--; ASSERT(!dep->cache); - erts_smp_rwmtx_destroy(&dep->rwmtx); - erts_smp_mtx_destroy(&dep->lnk_mtx); - erts_smp_mtx_destroy(&dep->qlock); + erts_rwmtx_destroy(&dep->rwmtx); + erts_mtx_destroy(&dep->lnk_mtx); + erts_mtx_destroy(&dep->qlock); #ifdef DEBUG sys_memset(vdep, 0x77, sizeof(DistEntry)); @@ -193,10 +193,10 @@ erts_dist_table_info(fmtfn_t to, void *to_arg) { int lock = !ERTS_IS_CRASH_DUMPING; if (lock) - erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx); + erts_rwmtx_rlock(&erts_dist_table_rwmtx); hash_info(to, to_arg, &erts_dist_table); if (lock) - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); + erts_rwmtx_runlock(&erts_dist_table_rwmtx); } DistEntry * @@ -209,7 +209,7 @@ erts_channel_no_to_dist_entry(Uint cno) * to the node name is used as channel no. */ if(cno == ERST_INTERNAL_CHANNEL_NO) { - erts_smp_refc_inc(&erts_this_dist_entry->refc, 2); + erts_refc_inc(&erts_this_dist_entry->refc, 2); return erts_this_dist_entry; } @@ -232,23 +232,23 @@ erts_sysname_to_connected_dist_entry(Eterm sysname) de.sysname = sysname; if(erts_this_dist_entry->sysname == sysname) { - erts_smp_refc_inc(&erts_this_dist_entry->refc, 2); + erts_refc_inc(&erts_this_dist_entry->refc, 2); return erts_this_dist_entry; } - erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx); + erts_rwmtx_rlock(&erts_dist_table_rwmtx); res_dep = (DistEntry *) hash_get(&erts_dist_table, (void *) &de); if (res_dep) { - erts_aint_t refc = erts_smp_refc_inctest(&res_dep->refc, 1); + erts_aint_t refc = erts_refc_inctest(&res_dep->refc, 1); if (refc < 2) /* Pending delete */ - erts_smp_refc_inc(&res_dep->refc, 1); + erts_refc_inc(&res_dep->refc, 1); } - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); + erts_rwmtx_runlock(&erts_dist_table_rwmtx); if (res_dep) { int deref; - erts_smp_rwmtx_rlock(&res_dep->rwmtx); + erts_rwmtx_rlock(&res_dep->rwmtx); deref = is_nil(res_dep->cid); - erts_smp_rwmtx_runlock(&res_dep->rwmtx); + erts_rwmtx_runlock(&res_dep->rwmtx); if (deref) { erts_deref_dist_entry(res_dep); res_dep = NULL; @@ -266,12 +266,12 @@ DistEntry *erts_find_or_insert_dist_entry(Eterm sysname) if (res) return res; de.sysname = sysname; - erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx); + erts_rwmtx_rwlock(&erts_dist_table_rwmtx); res = hash_put(&erts_dist_table, (void *) &de); - refc = erts_smp_refc_inctest(&res->refc, 0); + refc = erts_refc_inctest(&res->refc, 0); if (refc < 2) /* New or pending delete */ - erts_smp_refc_inc(&res->refc, 1); - erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx); + erts_refc_inc(&res->refc, 1); + erts_rwmtx_rwunlock(&erts_dist_table_rwmtx); return res; } @@ -280,14 +280,14 @@ DistEntry *erts_find_dist_entry(Eterm sysname) DistEntry *res; DistEntry de; de.sysname = sysname; - erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx); + erts_rwmtx_rlock(&erts_dist_table_rwmtx); res = hash_get(&erts_dist_table, (void *) &de); if (res) { - erts_aint_t refc = erts_smp_refc_inctest(&res->refc, 1); + erts_aint_t refc = erts_refc_inctest(&res->refc, 1); if (refc < 2) /* Pending delete */ - erts_smp_refc_inc(&res->refc, 1); + erts_refc_inc(&res->refc, 1); } - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); + erts_rwmtx_runlock(&erts_dist_table_rwmtx); return res; } @@ -296,7 +296,7 @@ static void try_delete_dist_entry(void *vdep) DistEntry *dep = (DistEntry *) vdep; erts_aint_t refc; - erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx); + erts_rwmtx_rwlock(&erts_dist_table_rwmtx); /* * Another thread might have looked up this dist entry after * we decided to delete it (refc became zero). If so, the other @@ -312,10 +312,10 @@ static void try_delete_dist_entry(void *vdep) * * If refc > 0, the entry is in use. Keep the entry. */ - refc = erts_smp_refc_dectest(&dep->refc, -1); + refc = erts_refc_dectest(&dep->refc, -1); if (refc == -1) (void) hash_erase(&erts_dist_table, (void *) dep); - erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx); + erts_rwmtx_rwunlock(&erts_dist_table_rwmtx); if (refc == 0) erts_schedule_delete_dist_entry(dep); @@ -346,7 +346,7 @@ erts_dist_table_size(void) int lock = !ERTS_IS_CRASH_DUMPING; if (lock) - erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx); + erts_rwmtx_rlock(&erts_dist_table_rwmtx); #ifdef DEBUG hash_get_info(&hi, &erts_dist_table); ASSERT(dist_entries == hi.objs); @@ -373,15 +373,15 @@ erts_dist_table_size(void) + dist_entries*sizeof(DistEntry) + erts_dist_cache_size()); if (lock) - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); + erts_rwmtx_runlock(&erts_dist_table_rwmtx); return res; } void erts_set_dist_entry_not_connected(DistEntry *dep) { - ERTS_SMP_LC_ASSERT(erts_lc_is_de_rwlocked(dep)); - erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx); + ERTS_LC_ASSERT(erts_lc_is_de_rwlocked(dep)); + erts_rwmtx_rwlock(&erts_dist_table_rwmtx); ASSERT(dep != erts_this_dist_entry); ASSERT(is_internal_port(dep->cid)); @@ -428,14 +428,14 @@ erts_set_dist_entry_not_connected(DistEntry *dep) } erts_not_connected_dist_entries = dep; erts_no_of_not_connected_dist_entries++; - erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx); + erts_rwmtx_rwunlock(&erts_dist_table_rwmtx); } void erts_set_dist_entry_connected(DistEntry *dep, Eterm cid, Uint flags) { - ERTS_SMP_LC_ASSERT(erts_lc_is_de_rwlocked(dep)); - erts_smp_rwmtx_rwlock(&erts_dist_table_rwmtx); + ERTS_LC_ASSERT(erts_lc_is_de_rwlocked(dep)); + erts_rwmtx_rwlock(&erts_dist_table_rwmtx); ASSERT(dep != erts_this_dist_entry); ASSERT(is_nil(dep->cid)); @@ -481,7 +481,7 @@ erts_set_dist_entry_connected(DistEntry *dep, Eterm cid, Uint flags) erts_hidden_dist_entries = dep; erts_no_of_hidden_dist_entries++; } - erts_smp_rwmtx_rwunlock(&erts_dist_table_rwmtx); + erts_rwmtx_rwunlock(&erts_dist_table_rwmtx); } /* -- Node table --------------------------------------------------------- */ @@ -519,7 +519,7 @@ node_table_alloc(void *venp_tmpl) node_entries++; - erts_smp_refc_init(&enp->refc, -1); + erts_refc_init(&enp->refc, -1); enp->creation = ((ErlNode *) venp_tmpl)->creation; enp->sysname = ((ErlNode *) venp_tmpl)->sysname; enp->dist_entry = erts_find_or_insert_dist_entry(((ErlNode *) venp_tmpl)->sysname); @@ -532,7 +532,7 @@ node_table_free(void *venp) { ErlNode *enp = (ErlNode *) venp; - ERTS_SMP_LC_ASSERT(enp != erts_this_node || erts_thr_progress_is_blocking()); + ERTS_LC_ASSERT(enp != erts_this_node || erts_thr_progress_is_blocking()); erts_deref_dist_entry(enp->dist_entry); #ifdef DEBUG @@ -553,14 +553,14 @@ erts_node_table_size(void) #endif int lock = !ERTS_IS_CRASH_DUMPING; if (lock) - erts_smp_rwmtx_rlock(&erts_node_table_rwmtx); + erts_rwmtx_rlock(&erts_node_table_rwmtx); #ifdef DEBUG hash_get_info(&hi, &erts_node_table); ASSERT(node_entries == hi.objs); #endif res = hash_table_sz(&erts_node_table) + node_entries*sizeof(ErlNode); if (lock) - erts_smp_rwmtx_runlock(&erts_node_table_rwmtx); + erts_rwmtx_runlock(&erts_node_table_rwmtx); return res; } @@ -569,10 +569,10 @@ erts_node_table_info(fmtfn_t to, void *to_arg) { int lock = !ERTS_IS_CRASH_DUMPING; if (lock) - erts_smp_rwmtx_rlock(&erts_node_table_rwmtx); + erts_rwmtx_rlock(&erts_node_table_rwmtx); hash_info(to, to_arg, &erts_node_table); if (lock) - erts_smp_rwmtx_runlock(&erts_node_table_rwmtx); + erts_rwmtx_runlock(&erts_node_table_rwmtx); } @@ -583,26 +583,26 @@ ErlNode *erts_find_or_insert_node(Eterm sysname, Uint32 creation) ne.sysname = sysname; ne.creation = creation; - erts_smp_rwmtx_rlock(&erts_node_table_rwmtx); + erts_rwmtx_rlock(&erts_node_table_rwmtx); res = hash_get(&erts_node_table, (void *) &ne); if (res && res != erts_this_node) { - erts_aint_t refc = erts_smp_refc_inctest(&res->refc, 0); + erts_aint_t refc = erts_refc_inctest(&res->refc, 0); if (refc < 2) /* New or pending delete */ - erts_smp_refc_inc(&res->refc, 1); + erts_refc_inc(&res->refc, 1); } - erts_smp_rwmtx_runlock(&erts_node_table_rwmtx); + erts_rwmtx_runlock(&erts_node_table_rwmtx); if (res) return res; - erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx); + erts_rwmtx_rwlock(&erts_node_table_rwmtx); res = hash_put(&erts_node_table, (void *) &ne); ASSERT(res); if (res != erts_this_node) { - erts_aint_t refc = erts_smp_refc_inctest(&res->refc, 0); + erts_aint_t refc = erts_refc_inctest(&res->refc, 0); if (refc < 2) /* New or pending delete */ - erts_smp_refc_inc(&res->refc, 1); + erts_refc_inc(&res->refc, 1); } - erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx); + erts_rwmtx_rwunlock(&erts_node_table_rwmtx); return res; } @@ -611,7 +611,7 @@ static void try_delete_node(void *venp) ErlNode *enp = (ErlNode *) venp; erts_aint_t refc; - erts_smp_rwmtx_rwlock(&erts_node_table_rwmtx); + erts_rwmtx_rwlock(&erts_node_table_rwmtx); /* * Another thread might have looked up this node after we * decided to delete it (refc became zero). If so, the other @@ -627,10 +627,10 @@ static void try_delete_node(void *venp) * * If refc > 0, the entry is in use. Keep the entry. */ - refc = erts_smp_refc_dectest(&enp->refc, -1); + refc = erts_refc_dectest(&enp->refc, -1); if (refc == -1) (void) hash_erase(&erts_node_table, (void *) enp); - erts_smp_rwmtx_rwunlock(&erts_node_table_rwmtx); + erts_rwmtx_rwunlock(&erts_node_table_rwmtx); if (refc == 0) erts_schedule_delete_node(enp); @@ -673,7 +673,7 @@ static void print_node(void *venp, void *vpndp) erts_print(pndp->to, pndp->to_arg, " %d", enp->creation); #ifdef DEBUG erts_print(pndp->to, pndp->to_arg, " (refc=%ld)", - erts_smp_refc_read(&enp->refc, 0)); + erts_refc_read(&enp->refc, 0)); #endif pndp->no_sysname++; } @@ -696,13 +696,13 @@ void erts_print_node_info(fmtfn_t to, pnd.no_total = 0; if (lock) - erts_smp_rwmtx_rlock(&erts_node_table_rwmtx); + erts_rwmtx_rlock(&erts_node_table_rwmtx); hash_foreach(&erts_node_table, print_node, (void *) &pnd); if (pnd.no_sysname != 0) { erts_print(to, to_arg, "\n"); } if (lock) - erts_smp_rwmtx_runlock(&erts_node_table_rwmtx); + erts_rwmtx_runlock(&erts_node_table_rwmtx); if(no_sysname) *no_sysname = pnd.no_sysname; @@ -715,20 +715,20 @@ void erts_print_node_info(fmtfn_t to, void erts_set_this_node(Eterm sysname, Uint creation) { - ERTS_SMP_LC_ASSERT(erts_thr_progress_is_blocking()); - ASSERT(erts_smp_refc_read(&erts_this_dist_entry->refc, 2)); + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); + ASSERT(erts_refc_read(&erts_this_dist_entry->refc, 2)); - if (erts_smp_refc_dectest(&erts_this_node->refc, 0) == 0) + if (erts_refc_dectest(&erts_this_node->refc, 0) == 0) try_delete_node(erts_this_node); - if (erts_smp_refc_dectest(&erts_this_dist_entry->refc, 0) == 0) + if (erts_refc_dectest(&erts_this_dist_entry->refc, 0) == 0) try_delete_dist_entry(erts_this_dist_entry); erts_this_node = NULL; /* to make sure refc is bumped for this node */ erts_this_node = erts_find_or_insert_node(sysname, creation); erts_this_dist_entry = erts_this_node->dist_entry; - erts_smp_refc_inc(&erts_this_dist_entry->refc, 2); + erts_refc_inc(&erts_this_dist_entry->refc, 2); erts_this_node_sysname = erts_this_node_sysname_BUFFER; erts_snprintf(erts_this_node_sysname, sizeof(erts_this_node_sysname_BUFFER), @@ -747,7 +747,7 @@ erts_delayed_node_table_gc(void) void erts_init_node_tables(int dd_sec) { - erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; + erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER; HashFunctions f; ErlNode node_tmpl; @@ -758,12 +758,12 @@ void erts_init_node_tables(int dd_sec) orig_node_tab_delete_delay = node_tab_delete_delay; - rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; - rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED; + rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ; + rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED; - erts_smp_rwmtx_init_opt(&erts_node_table_rwmtx, &rwmtx_opt, "node_table", NIL, + erts_rwmtx_init_opt(&erts_node_table_rwmtx, &rwmtx_opt, "node_table", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION); - erts_smp_rwmtx_init_opt(&erts_dist_table_rwmtx, &rwmtx_opt, "dist_table", NIL, + erts_rwmtx_init_opt(&erts_dist_table_rwmtx, &rwmtx_opt, "dist_table", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DISTRIBUTION); f.hash = (H_FUN) dist_table_hash; @@ -792,13 +792,13 @@ void erts_init_node_tables(int dd_sec) node_tmpl.creation = 0; erts_this_node = hash_put(&erts_node_table, &node_tmpl); /* +1 for erts_this_node */ - erts_smp_refc_init(&erts_this_node->refc, 1); + erts_refc_init(&erts_this_node->refc, 1); ASSERT(erts_this_node->dist_entry != NULL); erts_this_dist_entry = erts_this_node->dist_entry; /* +1 for erts_this_dist_entry */ /* +1 for erts_this_node->dist_entry */ - erts_smp_refc_init(&erts_this_dist_entry->refc, 2); + erts_refc_init(&erts_this_dist_entry->refc, 2); erts_this_node_sysname = erts_this_node_sysname_BUFFER; @@ -811,11 +811,11 @@ void erts_init_node_tables(int dd_sec) #ifdef ERTS_ENABLE_LOCK_CHECK int erts_lc_is_de_rwlocked(DistEntry *dep) { - return erts_smp_lc_rwmtx_is_rwlocked(&dep->rwmtx); + return erts_lc_rwmtx_is_rwlocked(&dep->rwmtx); } int erts_lc_is_de_rlocked(DistEntry *dep) { - return erts_smp_lc_rwmtx_is_rlocked(&dep->rwmtx); + return erts_lc_rwmtx_is_rlocked(&dep->rwmtx); } #endif @@ -839,10 +839,10 @@ static void erts_lcnt_enable_dist_lock_count(void *dep_raw, void *enable) { } void erts_lcnt_update_distribution_locks(int enable) { - erts_smp_rwmtx_rlock(&erts_dist_table_rwmtx); + erts_rwmtx_rlock(&erts_dist_table_rwmtx); hash_foreach(&erts_dist_table, erts_lcnt_enable_dist_lock_count, (void*)(UWord)enable); - erts_smp_rwmtx_runlock(&erts_dist_table_rwmtx); + erts_rwmtx_runlock(&erts_dist_table_rwmtx); } #endif @@ -944,8 +944,8 @@ erts_get_node_and_dist_references(struct process *proc) Uint *endp; #endif - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); - erts_smp_thr_progress_block(); + erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_block(); /* No need to lock any thing since we are alone... */ if (references_atoms_need_init) { @@ -987,8 +987,8 @@ erts_get_node_and_dist_references(struct process *proc) delete_reference_table(); - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(proc, ERTS_PROC_LOCK_MAIN); return res; } @@ -1658,7 +1658,7 @@ reference_table_term(Uint **hpp, ErlOffHeap *ohp, Uint *szp) tup = MK_2TUP(referred_nodes[i].node->sysname, MK_UINT(referred_nodes[i].node->creation)); - tup = MK_3TUP(tup, MK_UINT(erts_smp_refc_read(&referred_nodes[i].node->refc, 0)), nril); + tup = MK_3TUP(tup, MK_UINT(erts_refc_read(&referred_nodes[i].node->refc, 0)), nril); nl = MK_CONS(tup, nl); } @@ -1719,7 +1719,7 @@ reference_table_term(Uint **hpp, ErlOffHeap *ohp, Uint *szp) /* DistList = [{Dist, Refc, ReferenceIdList}] */ tup = MK_3TUP(referred_dists[i].dist->sysname, - MK_UINT(erts_smp_refc_read(&referred_dists[i].dist->refc, 0)), + MK_UINT(erts_refc_read(&referred_dists[i].dist->refc, 0)), dril); dl = MK_CONS(tup, dl); } @@ -1778,12 +1778,12 @@ delete_reference_table(void) void erts_debug_test_node_tab_delayed_delete(Sint64 millisecs) { - erts_smp_thr_progress_block(); + erts_thr_progress_block(); if (millisecs < 0) node_tab_delete_delay = orig_node_tab_delete_delay; else node_tab_delete_delay = millisecs; - erts_smp_thr_progress_unblock(); + erts_thr_progress_unblock(); } diff --git a/erts/emulator/beam/erl_node_tables.h b/erts/emulator/beam/erl_node_tables.h index b036a55609..7974b25444 100644 --- a/erts/emulator/beam/erl_node_tables.h +++ b/erts/emulator/beam/erl_node_tables.h @@ -44,7 +44,6 @@ #include "erl_alloc.h" #include "erl_process.h" #include "erl_monitors.h" -#include "erl_smp.h" #define ERTS_PORT_TASK_ONLY_BASIC_TYPES__ #include "erl_port_task.h" #undef ERTS_PORT_TASK_ONLY_BASIC_TYPES__ @@ -107,9 +106,9 @@ typedef struct dist_entry_ { HashBucket hash_bucket; /* Hash bucket */ struct dist_entry_ *next; /* Next entry in dist_table (not sorted) */ struct dist_entry_ *prev; /* Previous entry in dist_table (not sorted) */ - erts_smp_refc_t refc; /* Reference count */ + erts_refc_t refc; /* Reference count */ - erts_smp_rwmtx_t rwmtx; /* Protects all fields below until lck_mtx. */ + erts_rwmtx_t rwmtx; /* Protects all fields below until lck_mtx. */ Eterm sysname; /* name@host atom for efficiency */ Uint32 creation; /* creation of connected node */ Eterm cid; /* connection handler (pid or port), NIL == free */ @@ -120,7 +119,7 @@ typedef struct dist_entry_ { unsigned long version; /* Protocol version */ - erts_smp_mtx_t lnk_mtx; /* Protects node_links, nlinks, and + erts_mtx_t lnk_mtx; /* Protects node_links, nlinks, and monitors. */ ErtsLink *node_links; /* In a dist entry, node links are kept in a separate tree, while they are @@ -132,14 +131,14 @@ typedef struct dist_entry_ { ErtsLink *nlinks; /* Link tree with subtrees */ ErtsMonitor *monitors; /* Monitor tree */ - erts_smp_mtx_t qlock; /* Protects qflgs and out_queue */ + erts_mtx_t qlock; /* Protects qflgs and out_queue */ Uint32 qflgs; Sint qsize; ErtsDistOutputQueue out_queue; struct ErtsProcList_ *suspended; ErtsDistOutputQueue finalized_out_queue; - erts_smp_atomic_t dist_cmd_scheduled; + erts_atomic_t dist_cmd_scheduled; ErtsPortTaskHandle dist_cmd; Uint (*send)(Port *prt, ErtsDistOutputBuf *obuf); @@ -149,7 +148,7 @@ typedef struct dist_entry_ { typedef struct erl_node_ { HashBucket hash_bucket; /* Hash bucket */ - erts_smp_refc_t refc; /* Reference count */ + erts_refc_t refc; /* Reference count */ Eterm sysname; /* name@host atom for efficiency */ Uint32 creation; /* Creation */ DistEntry *dist_entry; /* Corresponding dist entry */ @@ -158,8 +157,8 @@ typedef struct erl_node_ { extern Hash erts_dist_table; extern Hash erts_node_table; -extern erts_smp_rwmtx_t erts_dist_table_rwmtx; -extern erts_smp_rwmtx_t erts_node_table_rwmtx; +extern erts_rwmtx_t erts_dist_table_rwmtx; +extern erts_rwmtx_t erts_node_table_rwmtx; extern DistEntry *erts_hidden_dist_entries; extern DistEntry *erts_visible_dist_entries; @@ -201,12 +200,12 @@ void erts_lcnt_update_distribution_locks(int enable); ERTS_GLB_INLINE void erts_deref_dist_entry(DistEntry *dep); ERTS_GLB_INLINE void erts_deref_node_entry(ErlNode *np); -ERTS_GLB_INLINE void erts_smp_de_rlock(DistEntry *dep); -ERTS_GLB_INLINE void erts_smp_de_runlock(DistEntry *dep); -ERTS_GLB_INLINE void erts_smp_de_rwlock(DistEntry *dep); -ERTS_GLB_INLINE void erts_smp_de_rwunlock(DistEntry *dep); -ERTS_GLB_INLINE void erts_smp_de_links_lock(DistEntry *dep); -ERTS_GLB_INLINE void erts_smp_de_links_unlock(DistEntry *dep); +ERTS_GLB_INLINE void erts_de_rlock(DistEntry *dep); +ERTS_GLB_INLINE void erts_de_runlock(DistEntry *dep); +ERTS_GLB_INLINE void erts_de_rwlock(DistEntry *dep); +ERTS_GLB_INLINE void erts_de_rwunlock(DistEntry *dep); +ERTS_GLB_INLINE void erts_de_links_lock(DistEntry *dep); +ERTS_GLB_INLINE void erts_de_links_unlock(DistEntry *dep); #if ERTS_GLB_INLINE_INCL_FUNC_DEF @@ -214,7 +213,7 @@ ERTS_GLB_INLINE void erts_deref_dist_entry(DistEntry *dep) { ASSERT(dep); - if (erts_smp_refc_dectest(&dep->refc, 0) == 0) + if (erts_refc_dectest(&dep->refc, 0) == 0) erts_schedule_delete_dist_entry(dep); } @@ -222,44 +221,44 @@ ERTS_GLB_INLINE void erts_deref_node_entry(ErlNode *np) { ASSERT(np); - if (erts_smp_refc_dectest(&np->refc, 0) == 0) + if (erts_refc_dectest(&np->refc, 0) == 0) erts_schedule_delete_node(np); } ERTS_GLB_INLINE void -erts_smp_de_rlock(DistEntry *dep) +erts_de_rlock(DistEntry *dep) { - erts_smp_rwmtx_rlock(&dep->rwmtx); + erts_rwmtx_rlock(&dep->rwmtx); } ERTS_GLB_INLINE void -erts_smp_de_runlock(DistEntry *dep) +erts_de_runlock(DistEntry *dep) { - erts_smp_rwmtx_runlock(&dep->rwmtx); + erts_rwmtx_runlock(&dep->rwmtx); } ERTS_GLB_INLINE void -erts_smp_de_rwlock(DistEntry *dep) +erts_de_rwlock(DistEntry *dep) { - erts_smp_rwmtx_rwlock(&dep->rwmtx); + erts_rwmtx_rwlock(&dep->rwmtx); } ERTS_GLB_INLINE void -erts_smp_de_rwunlock(DistEntry *dep) +erts_de_rwunlock(DistEntry *dep) { - erts_smp_rwmtx_rwunlock(&dep->rwmtx); + erts_rwmtx_rwunlock(&dep->rwmtx); } ERTS_GLB_INLINE void -erts_smp_de_links_lock(DistEntry *dep) +erts_de_links_lock(DistEntry *dep) { - erts_smp_mtx_lock(&dep->lnk_mtx); + erts_mtx_lock(&dep->lnk_mtx); } ERTS_GLB_INLINE void -erts_smp_de_links_unlock(DistEntry *dep) +erts_de_links_unlock(DistEntry *dep) { - erts_smp_mtx_unlock(&dep->lnk_mtx); + erts_mtx_unlock(&dep->lnk_mtx); } #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ diff --git a/erts/emulator/beam/erl_port.h b/erts/emulator/beam/erl_port.h index 8adf56e9fe..98e9b9ccaf 100644 --- a/erts/emulator/beam/erl_port.h +++ b/erts/emulator/beam/erl_port.h @@ -158,10 +158,10 @@ struct _erl_drv_port { ErtsPortTaskHandle timeout_task; erts_mtx_t *lock; ErtsXPortsList *xports; - erts_smp_atomic_t run_queue; + erts_atomic_t run_queue; erts_atomic_t connected; /* A connected process */ Eterm caller; /* Current caller. */ - erts_smp_atomic_t data; /* Data associated with port. */ + erts_atomic_t data; /* Data associated with port. */ Uint bytes_in; /* Number of bytes read */ Uint bytes_out; /* Number of bytes written */ @@ -178,7 +178,7 @@ struct _erl_drv_port { int control_flags; /* Flags for port_control() */ ErlDrvPDL port_data_lock; - erts_smp_atomic_t psd; /* Port specific data */ + erts_atomic_t psd; /* Port specific data */ int reds; /* Only used while executing driver callbacks */ struct { @@ -215,15 +215,15 @@ ERTS_GLB_INLINE ErtsRunQueue * erts_port_runq(Port *prt) { ErtsRunQueue *rq1, *rq2; - rq1 = (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue); + rq1 = (ErtsRunQueue *) erts_atomic_read_nob(&prt->run_queue); if (!rq1) return NULL; while (1) { - erts_smp_runq_lock(rq1); - rq2 = (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue); + erts_runq_lock(rq1); + rq2 = (ErtsRunQueue *) erts_atomic_read_nob(&prt->run_queue); if (rq1 == rq2) return rq1; - erts_smp_runq_unlock(rq1); + erts_runq_unlock(rq1); rq1 = rq2; if (!rq1) return NULL; @@ -241,10 +241,10 @@ ERTS_GLB_INLINE void *erts_prtsd_set(Port *p, int ix, void *new); ERTS_GLB_INLINE void * erts_prtsd_get(Port *prt, int ix) { - ErtsPrtSD *psd = (ErtsPrtSD *) erts_smp_atomic_read_nob(&prt->psd); + ErtsPrtSD *psd = (ErtsPrtSD *) erts_atomic_read_nob(&prt->psd); if (!psd) return NULL; - ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER; + ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER; return psd->data[ix]; } @@ -255,7 +255,7 @@ erts_prtsd_set(Port *prt, int ix, void *data) void *old; int i; - psd = (ErtsPrtSD *) erts_smp_atomic_read_nob(&prt->psd); + psd = (ErtsPrtSD *) erts_atomic_read_nob(&prt->psd); if (psd) { #ifdef ETHR_ORDERED_READ_DEPEND @@ -274,7 +274,7 @@ erts_prtsd_set(Port *prt, int ix, void *data) new_psd = erts_alloc(ERTS_ALC_T_PRTSD, sizeof(ErtsPrtSD)); for (i = 0; i < ERTS_PRTSD_SIZE; i++) new_psd->data[i] = NULL; - psd = (ErtsPrtSD *) erts_smp_atomic_cmpxchg_mb(&prt->psd, + psd = (ErtsPrtSD *) erts_atomic_cmpxchg_mb(&prt->psd, (erts_aint_t) new_psd, (erts_aint_t) NULL); if (psd) @@ -370,9 +370,9 @@ ERTS_GLB_INLINE void erts_port_dec_refc(Port *prt); ERTS_GLB_INLINE void erts_port_add_refc(Port *prt, Sint32 add_refc); ERTS_GLB_INLINE Sint erts_port_read_refc(Port *prt); -ERTS_GLB_INLINE int erts_smp_port_trylock(Port *prt); -ERTS_GLB_INLINE void erts_smp_port_lock(Port *prt); -ERTS_GLB_INLINE void erts_smp_port_unlock(Port *prt); +ERTS_GLB_INLINE int erts_port_trylock(Port *prt); +ERTS_GLB_INLINE void erts_port_lock(Port *prt); +ERTS_GLB_INLINE void erts_port_unlock(Port *prt); #if ERTS_GLB_INLINE_INCL_FUNC_DEF @@ -401,26 +401,26 @@ ERTS_GLB_INLINE Sint erts_port_read_refc(Port *prt) } ERTS_GLB_INLINE int -erts_smp_port_trylock(Port *prt) +erts_port_trylock(Port *prt) { /* *Need* to be a managed thread */ - ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread()); + ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread()); return erts_mtx_trylock(prt->lock); } ERTS_GLB_INLINE void -erts_smp_port_lock(Port *prt) +erts_port_lock(Port *prt) { /* *Need* to be a managed thread */ - ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread()); + ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread()); erts_mtx_lock(prt->lock); } ERTS_GLB_INLINE void -erts_smp_port_unlock(Port *prt) +erts_port_unlock(Port *prt) { /* *Need* to be a managed thread */ - ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread()); + ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread()); erts_mtx_unlock(prt->lock); } @@ -488,7 +488,7 @@ erts_port_lookup_raw(Eterm id) { Port *prt; - ERTS_SMP_LC_ASSERT(erts_thr_progress_lc_is_delaying()); + ERTS_LC_ASSERT(erts_thr_progress_lc_is_delaying()); if (is_not_internal_port(id)) return NULL; @@ -517,7 +517,7 @@ erts_id2port(Eterm id) Port *prt; /* Only allowed to be called from managed threads */ - ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread()); + ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread()); if (is_not_internal_port(id)) return NULL; @@ -528,10 +528,10 @@ erts_id2port(Eterm id) if (!prt || prt->common.id != id) return NULL; - erts_smp_port_lock(prt); + erts_port_lock(prt); state = erts_atomic32_read_nob(&prt->state); if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP) { - erts_smp_port_unlock(prt); + erts_port_unlock(prt); return NULL; } @@ -549,7 +549,7 @@ erts_id2port_sflgs(Eterm id, Port *prt; /* Only allowed to be called from managed threads */ - ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread()); + ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread()); if (is_not_internal_port(id)) return NULL; @@ -561,16 +561,16 @@ erts_id2port_sflgs(Eterm id, return NULL; if (no_proc_locks) - erts_smp_port_lock(prt); - else if (erts_smp_port_trylock(prt) == EBUSY) { + erts_port_lock(prt); + else if (erts_port_trylock(prt) == EBUSY) { /* Unlock process locks, and acquire locks in lock order... */ - erts_smp_proc_unlock(c_p, c_p_locks); - erts_smp_port_lock(prt); - erts_smp_proc_lock(c_p, c_p_locks); + erts_proc_unlock(c_p, c_p_locks); + erts_port_lock(prt); + erts_proc_lock(c_p, c_p_locks); } state = erts_atomic32_read_nob(&prt->state); if (state & invalid_sflgs) { - erts_smp_port_unlock(prt); + erts_port_unlock(prt); return NULL; } @@ -581,8 +581,8 @@ ERTS_GLB_INLINE void erts_port_release(Port *prt) { /* Only allowed to be called from managed threads */ - ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread()); - erts_smp_port_unlock(prt); + ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread()); + erts_port_unlock(prt); } /* @@ -689,7 +689,7 @@ erts_thr_drvport2port(ErlDrvPort drvport, int lock_pdl) #ifdef ERTS_ENABLE_LOCK_CHECK if (!ERTS_IS_CRASH_DUMPING) { if (erts_lc_is_emu_thr()) { - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); ERTS_LC_ASSERT(!prt->port_data_lock || erts_lc_mtx_is_locked(&prt->port_data_lock->mtx)); } @@ -718,7 +718,7 @@ erts_drvport2port_state(ErlDrvPort drvport, erts_aint32_t *statep) // ERTS_LC_ASSERT(erts_lc_is_emu_thr()); if (prt == ERTS_INVALID_ERL_DRV_PORT) return ERTS_INVALID_ERL_DRV_PORT; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt) + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt) || ERTS_IS_CRASH_DUMPING); /* * This state check is only needed since a driver callback @@ -775,19 +775,19 @@ erts_port_driver_callback_epilogue(Port *prt, erts_aint32_t *statep) int reds = 0; erts_aint32_t state; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); state = erts_atomic32_read_nob(&prt->state); if ((state & ERTS_PORT_SFLG_CLOSING) && erts_is_port_ioq_empty(prt)) { reds += ERTS_PORT_REDS_TERMINATE; erts_terminate_port(prt); state = erts_atomic32_read_nob(&prt->state); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); } if (prt->xports) { reds += erts_port_handle_xports(prt); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); ASSERT(!prt->xports); } diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c index b13d43af91..1420fb9c06 100644 --- a/erts/emulator/beam/erl_port_task.c +++ b/erts/emulator/beam/erl_port_task.c @@ -83,14 +83,14 @@ static void chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_q #define LTTNG_DRIVER(TRACEPOINT, PP) do {} while(0) #endif -#define ERTS_SMP_LC_VERIFY_RQ(RQ, PP) \ +#define ERTS_LC_VERIFY_RQ(RQ, PP) \ do { \ - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); \ - ERTS_SMP_LC_ASSERT((RQ) == ((ErtsRunQueue *) \ - erts_smp_atomic_read_nob(&(PP)->run_queue))); \ + ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq)); \ + ERTS_LC_ASSERT((RQ) == ((ErtsRunQueue *) \ + erts_atomic_read_nob(&(PP)->run_queue))); \ } while (0) -erts_smp_atomic_t erts_port_task_outstanding_io_tasks; +erts_atomic_t erts_port_task_outstanding_io_tasks; #define ERTS_PT_STATE_SCHEDULED 0 #define ERTS_PT_STATE_ABORTED 1 @@ -108,7 +108,7 @@ typedef union { } ErtsPortTaskTypeData; struct ErtsPortTask_ { - erts_smp_atomic32_t state; + erts_atomic32_t state; ErtsPortTaskType type; union { struct { @@ -191,7 +191,7 @@ p2p_sig_data_init(ErtsPortTask *ptp) ptp->type = ERTS_PORT_TASK_PROC_SIG; ptp->u.alive.flags = ERTS_PT_FLG_SIG_DEP; - erts_smp_atomic32_init_nob(&ptp->state, ERTS_PT_STATE_SCHEDULED); + erts_atomic32_init_nob(&ptp->state, ERTS_PT_STATE_SCHEDULED); ASSERT(ptp == p2p_sig_data_to_task(&ptp->u.alive.td.psig.data)); @@ -282,7 +282,7 @@ popped_from_busy_queue(Port *pp, ErtsPortTask *ptp, int last) #ifdef DEBUG erts_aint32_t flags = #endif - erts_smp_atomic32_read_band_nob( + erts_atomic32_read_band_nob( &pp->sched.flags, ~ERTS_PTS_FLG_HAVE_BUSY_TASKS); ASSERT(flags & ERTS_PTS_FLG_HAVE_BUSY_TASKS); @@ -329,7 +329,7 @@ busy_wait_move_to_busy_queue(Port *pp, ErtsPortTask *ptp) #ifdef DEBUG flags = #endif - erts_smp_atomic32_read_bor_nob(&pp->sched.flags, + erts_atomic32_read_bor_nob(&pp->sched.flags, ERTS_PTS_FLG_HAVE_BUSY_TASKS); ASSERT(!(flags & ERTS_PTS_FLG_HAVE_BUSY_TASKS)); @@ -469,7 +469,7 @@ no_sig_dep_move_from_busyq(Port *pp) int bix; erts_aint32_t flags = #endif - erts_smp_atomic32_read_band_nob( + erts_atomic32_read_band_nob( &pp->sched.flags, ~ERTS_PTS_FLG_HAVE_BUSY_TASKS); ASSERT(flags & ERTS_PTS_FLG_HAVE_BUSY_TASKS); @@ -502,11 +502,11 @@ chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_queue) if (!first) { ASSERT(!tabp); ASSERT(!pp->sched.taskq.local.busy.last); - ASSERT(!(erts_smp_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_HAVE_BUSY_TASKS)); + ASSERT(!(erts_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_HAVE_BUSY_TASKS)); return; } - ASSERT(erts_smp_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_HAVE_BUSY_TASKS); + ASSERT(erts_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_HAVE_BUSY_TASKS); ASSERT(tabp); tot_count = 0; @@ -562,13 +562,13 @@ chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_queue) static ERTS_INLINE void reset_port_task_handle(ErtsPortTaskHandle *pthp) { - erts_smp_atomic_set_relb(pthp, (erts_aint_t) NULL); + erts_atomic_set_relb(pthp, (erts_aint_t) NULL); } static ERTS_INLINE ErtsPortTask * handle2task(ErtsPortTaskHandle *pthp) { - return (ErtsPortTask *) erts_smp_atomic_read_acqb(pthp); + return (ErtsPortTask *) erts_atomic_read_acqb(pthp); } static ERTS_INLINE void @@ -595,7 +595,7 @@ set_handle(ErtsPortTask *ptp, ErtsPortTaskHandle *pthp) { ptp->u.alive.handle = pthp; if (pthp) { - erts_smp_atomic_set_relb(pthp, (erts_aint_t) ptp); + erts_atomic_set_relb(pthp, (erts_aint_t) ptp); ASSERT(ptp == handle2task(ptp->u.alive.handle)); } } @@ -609,7 +609,7 @@ set_tmp_handle(ErtsPortTask *ptp, ErtsPortTaskHandle *pthp) * IMPORTANT! Task either need to be aborted, or task handle * need to be detached before thread progress has been made. */ - erts_smp_atomic_set_relb(pthp, (erts_aint_t) ptp); + erts_atomic_set_relb(pthp, (erts_aint_t) ptp); } } @@ -627,20 +627,20 @@ check_unset_busy_port_q(Port *pp, int resume_procs = 0; ASSERT(bpq); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(pp)); erts_port_task_sched_lock(&pp->sched); - qsize = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->size); - low = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low); + qsize = (ErlDrvSizeT) erts_atomic_read_nob(&bpq->size); + low = (ErlDrvSizeT) erts_atomic_read_nob(&bpq->low); if (qsize < low) { erts_aint32_t mask = ~(ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q | ERTS_PTS_FLG_BUSY_PORT_Q); - flags = erts_smp_atomic32_read_band_relb(&pp->sched.flags, mask); + flags = erts_atomic32_read_band_relb(&pp->sched.flags, mask); if ((flags & ERTS_PTS_FLGS_BUSY) == ERTS_PTS_FLG_BUSY_PORT_Q) resume_procs = 1; } else if (flags & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q) { - flags = erts_smp_atomic32_read_band_relb(&pp->sched.flags, + flags = erts_atomic32_read_band_relb(&pp->sched.flags, ~ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q); flags &= ~ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q; } @@ -665,16 +665,16 @@ aborted_proc2port_data(Port *pp, ErlDrvSizeT size) bpq = pp->sched.taskq.bpq; - qsz = (ErlDrvSizeT) erts_smp_atomic_add_read_acqb(&bpq->size, + qsz = (ErlDrvSizeT) erts_atomic_add_read_acqb(&bpq->size, (erts_aint_t) -size); ASSERT(qsz + size > qsz); - flags = erts_smp_atomic32_read_nob(&pp->sched.flags); + flags = erts_atomic32_read_nob(&pp->sched.flags); ASSERT(pp->sched.taskq.bpq); if ((flags & (ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q | ERTS_PTS_FLG_BUSY_PORT_Q)) != ERTS_PTS_FLG_BUSY_PORT_Q) return; - if (qsz < (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low)) - erts_smp_atomic32_read_bor_nob(&pp->sched.flags, + if (qsz < (ErlDrvSizeT) erts_atomic_read_nob(&bpq->low)) + erts_atomic32_read_bor_nob(&pp->sched.flags, ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q); } @@ -692,13 +692,13 @@ dequeued_proc2port_data(Port *pp, ErlDrvSizeT size) bpq = pp->sched.taskq.bpq; - qsz = (ErlDrvSizeT) erts_smp_atomic_add_read_acqb(&bpq->size, + qsz = (ErlDrvSizeT) erts_atomic_add_read_acqb(&bpq->size, (erts_aint_t) -size); ASSERT(qsz + size > qsz); - flags = erts_smp_atomic32_read_nob(&pp->sched.flags); + flags = erts_atomic32_read_nob(&pp->sched.flags); if (!(flags & ERTS_PTS_FLG_BUSY_PORT_Q)) return; - if (qsz < (ErlDrvSizeT) erts_smp_atomic_read_acqb(&bpq->low)) + if (qsz < (ErlDrvSizeT) erts_atomic_read_acqb(&bpq->low)) check_unset_busy_port_q(pp, flags, bpq); } @@ -711,19 +711,19 @@ enqueue_proc2port_data(Port *pp, if (sigdp && bpq) { ErlDrvSizeT size = erts_proc2port_sig_command_data_size(sigdp); if (size) { - erts_aint_t asize = erts_smp_atomic_add_read_acqb(&bpq->size, + erts_aint_t asize = erts_atomic_add_read_acqb(&bpq->size, (erts_aint_t) size); ErlDrvSizeT qsz = (ErlDrvSizeT) asize; ASSERT(qsz - size < qsz); if (!(flags & ERTS_PTS_FLG_BUSY_PORT_Q) && qsz > bpq->high) { - flags = erts_smp_atomic32_read_bor_acqb(&pp->sched.flags, + flags = erts_atomic32_read_bor_acqb(&pp->sched.flags, ERTS_PTS_FLG_BUSY_PORT_Q); flags |= ERTS_PTS_FLG_BUSY_PORT_Q; - qsz = (ErlDrvSizeT) erts_smp_atomic_read_acqb(&bpq->size); - if (qsz < (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low)) { - flags = (erts_smp_atomic32_read_bor_relb( + qsz = (ErlDrvSizeT) erts_atomic_read_acqb(&bpq->size); + if (qsz < (ErlDrvSizeT) erts_atomic_read_nob(&bpq->low)) { + flags = (erts_atomic32_read_bor_relb( &pp->sched.flags, ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q)); flags |= ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q; @@ -771,18 +771,18 @@ erl_drv_busy_msgq_limits(ErlDrvPort dport, ErlDrvSizeT *lowp, ErlDrvSizeT *highp erts_aint32_t flags; pp->sched.taskq.bpq = NULL; flags = ~(ERTS_PTS_FLG_BUSY_PORT_Q|ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q); - flags = erts_smp_atomic32_read_band_acqb(&pp->sched.flags, flags); + flags = erts_atomic32_read_band_acqb(&pp->sched.flags, flags); if ((flags & ERTS_PTS_FLGS_BUSY) == ERTS_PTS_FLG_BUSY_PORT_Q) resume_procs = 1; } else { if (!low) - low = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->low); + low = (ErlDrvSizeT) erts_atomic_read_nob(&bpq->low); else { if (bpq->high < low) bpq->high = low; - erts_smp_atomic_set_relb(&bpq->low, (erts_aint_t) low); + erts_atomic_set_relb(&bpq->low, (erts_aint_t) low); written = 1; } @@ -791,19 +791,19 @@ erl_drv_busy_msgq_limits(ErlDrvPort dport, ErlDrvSizeT *lowp, ErlDrvSizeT *highp else { if (low > high) { low = high; - erts_smp_atomic_set_relb(&bpq->low, (erts_aint_t) low); + erts_atomic_set_relb(&bpq->low, (erts_aint_t) low); } bpq->high = high; written = 1; } if (written) { - ErlDrvSizeT size = (ErlDrvSizeT) erts_smp_atomic_read_nob(&bpq->size); + ErlDrvSizeT size = (ErlDrvSizeT) erts_atomic_read_nob(&bpq->size); if (size > high) - erts_smp_atomic32_read_bor_relb(&pp->sched.flags, + erts_atomic32_read_bor_relb(&pp->sched.flags, ERTS_PTS_FLG_BUSY_PORT_Q); else if (size < low) - erts_smp_atomic32_read_bor_relb(&pp->sched.flags, + erts_atomic32_read_bor_relb(&pp->sched.flags, ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q); } } @@ -877,7 +877,7 @@ get_free_nosuspend_handles(Port *pp) { ErtsPortTaskHandleList *nshp, *last_nshp = NULL; - ERTS_SMP_LC_ASSERT(erts_port_task_sched_lock_is_locked(&pp->sched)); + ERTS_LC_ASSERT(erts_port_task_sched_lock_is_locked(&pp->sched)); nshp = pp->sched.taskq.local.busy.nosuspend; @@ -893,7 +893,7 @@ get_free_nosuspend_handles(Port *pp) pp->sched.taskq.local.busy.nosuspend = last_nshp->u.next; last_nshp->u.next = NULL; if (!pp->sched.taskq.local.busy.nosuspend) - erts_smp_atomic32_read_band_nob(&pp->sched.flags, + erts_atomic32_read_band_nob(&pp->sched.flags, ~ERTS_PTS_FLG_HAVE_NS_TASKS); } return nshp; @@ -916,7 +916,7 @@ free_nosuspend_handles(ErtsPortTaskHandleList *free_nshp) static ERTS_INLINE void enqueue_port(ErtsRunQueue *runq, Port *pp) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq)); pp->sched.next = NULL; if (runq->ports.end) { ASSERT(runq->ports.start); @@ -930,7 +930,7 @@ enqueue_port(ErtsRunQueue *runq, Port *pp) runq->ports.end = pp; ASSERT(runq->ports.start && runq->ports.end); - erts_smp_inc_runq_len(runq, &runq->ports.info, ERTS_PORT_PRIO_LEVEL); + erts_inc_runq_len(runq, &runq->ports.info, ERTS_PORT_PRIO_LEVEL); if (ERTS_RUNQ_FLGS_GET_NOB(runq) & ERTS_RUNQ_FLG_HALTING) erts_non_empty_runq(runq); @@ -940,7 +940,7 @@ static ERTS_INLINE Port * pop_port(ErtsRunQueue *runq) { Port *pp = runq->ports.start; - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq)); if (!pp) { ASSERT(!runq->ports.end); } @@ -950,7 +950,7 @@ pop_port(ErtsRunQueue *runq) ASSERT(runq->ports.end == pp); runq->ports.end = NULL; } - erts_smp_dec_runq_len(runq, &runq->ports.info, ERTS_PORT_PRIO_LEVEL); + erts_dec_runq_len(runq, &runq->ports.info, ERTS_PORT_PRIO_LEVEL); } ASSERT(runq->ports.start || !runq->ports.end); @@ -977,7 +977,7 @@ enqueue_task(Port *pp, if (ns_pthlp) fail_flags |= ERTS_PTS_FLG_BUSY_PORT; erts_port_task_sched_lock(&pp->sched); - flags = erts_smp_atomic32_read_nob(&pp->sched.flags); + flags = erts_atomic32_read_nob(&pp->sched.flags); if (flags & fail_flags) res = 0; else { @@ -1008,7 +1008,7 @@ enqueue_task(Port *pp, static ERTS_INLINE void prepare_exec(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p) { - erts_aint32_t act = erts_smp_atomic32_read_nob(&pp->sched.flags); + erts_aint32_t act = erts_atomic32_read_nob(&pp->sched.flags); if (!pp->sched.taskq.local.busy.first || (act & ERTS_PTS_FLG_BUSY_PORT)) { *execqp = pp->sched.taskq.local.first; @@ -1029,7 +1029,7 @@ prepare_exec(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p) new &= ~ERTS_PTS_FLG_IN_RUNQ; new |= ERTS_PTS_FLG_EXEC; - act = erts_smp_atomic32_cmpxchg_nob(&pp->sched.flags, new, exp); + act = erts_atomic32_cmpxchg_nob(&pp->sched.flags, new, exp); ASSERT(act & ERTS_PTS_FLG_IN_RUNQ); @@ -1056,7 +1056,7 @@ finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q) *execq = NULL; - act = erts_smp_atomic32_read_nob(&pp->sched.flags); + act = erts_atomic32_read_nob(&pp->sched.flags); if (act & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q) act = check_unset_busy_port_q(pp, act, pp->sched.taskq.bpq); @@ -1073,7 +1073,7 @@ finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q) if (act & ERTS_PTS_FLG_HAVE_TASKS) new |= ERTS_PTS_FLG_IN_RUNQ; - act = erts_smp_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp); + act = erts_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp); ERTS_LC_ASSERT(!(act & ERTS_PTS_FLG_IN_RUNQ)); ERTS_LC_ASSERT(!(act & ERTS_PTS_FLG_EXEC_IMM)); @@ -1099,7 +1099,7 @@ finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q) static ERTS_INLINE erts_aint32_t select_queue_for_exec(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p) { - erts_aint32_t flags = erts_smp_atomic32_read_nob(&pp->sched.flags); + erts_aint32_t flags = erts_atomic32_read_nob(&pp->sched.flags); if (flags & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q) flags = check_unset_busy_port_q(pp, flags, pp->sched.taskq.bpq); @@ -1209,7 +1209,7 @@ fetch_in_queue(Port *pp, ErtsPortTask **execqp) if (ptp) *execqp = ptp->u.alive.next; else - erts_smp_atomic32_read_band_nob(&pp->sched.flags, + erts_atomic32_read_band_nob(&pp->sched.flags, ~ERTS_PTS_FLG_HAVE_TASKS); @@ -1272,7 +1272,7 @@ erl_drv_consume_timeslice(ErlDrvPort dprt, int percent) void erts_port_task_tmp_handle_detach(ErtsPortTaskHandle *pthp) { - ERTS_SMP_LC_ASSERT(erts_thr_progress_lc_is_delaying()); + ERTS_LC_ASSERT(erts_thr_progress_lc_is_delaying()); reset_port_task_handle(pthp); } @@ -1295,14 +1295,14 @@ erts_port_task_abort(ErtsPortTaskHandle *pthp) #ifdef DEBUG ErtsPortTaskHandle *saved_pthp = ptp->u.alive.handle; - ERTS_SMP_READ_MEMORY_BARRIER; - old_state = erts_smp_atomic32_read_nob(&ptp->state); + ERTS_THR_READ_MEMORY_BARRIER; + old_state = erts_atomic32_read_nob(&ptp->state); if (old_state == ERTS_PT_STATE_SCHEDULED) { ASSERT(!saved_pthp || saved_pthp == pthp); } #endif - old_state = erts_smp_atomic32_cmpxchg_nob(&ptp->state, + old_state = erts_atomic32_cmpxchg_nob(&ptp->state, ERTS_PT_STATE_ABORTED, ERTS_PT_STATE_SCHEDULED); if (old_state != ERTS_PT_STATE_SCHEDULED) @@ -1315,9 +1315,9 @@ erts_port_task_abort(ErtsPortTaskHandle *pthp) case ERTS_PORT_TASK_INPUT: case ERTS_PORT_TASK_OUTPUT: case ERTS_PORT_TASK_EVENT: - ASSERT(erts_smp_atomic_read_nob( + ASSERT(erts_atomic_read_nob( &erts_port_task_outstanding_io_tasks) > 0); - erts_smp_atomic_dec_relb(&erts_port_task_outstanding_io_tasks); + erts_atomic_dec_relb(&erts_port_task_outstanding_io_tasks); break; default: break; @@ -1339,7 +1339,7 @@ erts_port_task_abort_nosuspend_tasks(Port *pp) ErtsThrPrgrDelayHandle dhndl = ERTS_THR_PRGR_DHANDLE_INVALID; erts_port_task_sched_lock(&pp->sched); - erts_smp_atomic32_read_band_nob(&pp->sched.flags, + erts_atomic32_read_band_nob(&pp->sched.flags, ~ERTS_PTS_FLG_HAVE_NS_TASKS); abort_list = pp->sched.taskq.local.busy.nosuspend; pp->sched.taskq.local.busy.nosuspend = NULL; @@ -1373,14 +1373,14 @@ erts_port_task_abort_nosuspend_tasks(Port *pp) #ifdef DEBUG saved_pthp = ptp->u.alive.handle; - ERTS_SMP_READ_MEMORY_BARRIER; - old_state = erts_smp_atomic32_read_nob(&ptp->state); + ERTS_THR_READ_MEMORY_BARRIER; + old_state = erts_atomic32_read_nob(&ptp->state); if (old_state == ERTS_PT_STATE_SCHEDULED) { ASSERT(saved_pthp == pthp); } #endif - old_state = erts_smp_atomic32_cmpxchg_nob(&ptp->state, + old_state = erts_atomic32_cmpxchg_nob(&ptp->state, ERTS_PT_STATE_ABORTED, ERTS_PT_STATE_SCHEDULED); if (old_state != ERTS_PT_STATE_SCHEDULED) { @@ -1447,7 +1447,7 @@ erts_port_task_schedule(Eterm id, ptp->type = type; ptp->u.alive.flags = 0; - erts_smp_atomic32_init_nob(&ptp->state, ERTS_PT_STATE_SCHEDULED); + erts_atomic32_init_nob(&ptp->state, ERTS_PT_STATE_SCHEDULED); set_handle(ptp, pthp); } @@ -1459,7 +1459,7 @@ erts_port_task_schedule(Eterm id, va_start(argp, type); ptp->u.alive.td.io.event = va_arg(argp, ErlDrvEvent); va_end(argp); - erts_smp_atomic_inc_relb(&erts_port_task_outstanding_io_tasks); + erts_atomic_inc_relb(&erts_port_task_outstanding_io_tasks); break; } case ERTS_PORT_TASK_EVENT: { @@ -1468,7 +1468,7 @@ erts_port_task_schedule(Eterm id, ptp->u.alive.td.io.event = va_arg(argp, ErlDrvEvent); ptp->u.alive.td.io.event_data = va_arg(argp, ErlDrvEventData); va_end(argp); - erts_smp_atomic_inc_relb(&erts_port_task_outstanding_io_tasks); + erts_atomic_inc_relb(&erts_port_task_outstanding_io_tasks); break; } case ERTS_PORT_TASK_PROC_SIG: { @@ -1520,7 +1520,7 @@ erts_port_task_schedule(Eterm id, if (!(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC))) new |= ERTS_PTS_FLG_IN_RUNQ; - act = erts_smp_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp); + act = erts_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp); if (exp == act) { if (!(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC))) @@ -1546,12 +1546,12 @@ erts_port_task_schedule(Eterm id, ERTS_INTERNAL_ERROR("Missing run-queue"); xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL); - ERTS_SMP_LC_ASSERT(runq != xrunq); - ERTS_SMP_LC_VERIFY_RQ(runq, pp); + ERTS_LC_ASSERT(runq != xrunq); + ERTS_LC_VERIFY_RQ(runq, pp); if (xrunq) { /* Emigrate port ... */ - erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq); - erts_smp_runq_unlock(runq); + erts_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq); + erts_runq_unlock(runq); runq = erts_port_runq(pp); if (!runq) ERTS_INTERNAL_ERROR("Missing run-queue"); @@ -1559,9 +1559,9 @@ erts_port_task_schedule(Eterm id, enqueue_port(runq, pp); - erts_smp_runq_unlock(runq); + erts_runq_unlock(runq); - erts_smp_notify_inc_runq(runq); + erts_notify_inc_runq(runq); done: @@ -1611,14 +1611,14 @@ erts_port_task_free_port(Port *pp) erts_aint32_t flags; ErtsRunQueue *runq; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(pp)); ASSERT(!(erts_atomic32_read_nob(&pp->state) & ERTS_PORT_SFLGS_DEAD)); runq = erts_port_runq(pp); if (!runq) ERTS_INTERNAL_ERROR("Missing run-queue"); erts_port_task_sched_lock(&pp->sched); - flags = erts_smp_atomic32_read_bor_relb(&pp->sched.flags, + flags = erts_atomic32_read_bor_relb(&pp->sched.flags, ERTS_PTS_FLG_EXIT); erts_port_task_sched_unlock(&pp->sched); erts_atomic32_read_bset_relb(&pp->state, @@ -1628,7 +1628,7 @@ erts_port_task_free_port(Port *pp) | ERTS_PORT_SFLG_FREE), ERTS_PORT_SFLG_FREE); - erts_smp_runq_unlock(runq); + erts_runq_unlock(runq); if (!(flags & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC))) begin_port_cleanup(pp, NULL, NULL); @@ -1658,7 +1658,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) ErtsSchedulerData *esdp = runq->scheduler; ERTS_MSACC_PUSH_STATE_M(); - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq)); pp = pop_port(runq); if (!pp) { @@ -1666,9 +1666,9 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) goto done; } - ERTS_SMP_LC_VERIFY_RQ(runq, pp); + ERTS_LC_VERIFY_RQ(runq, pp); - erts_smp_runq_unlock(runq); + erts_runq_unlock(runq); *curr_port_pp = pp; @@ -1676,19 +1676,19 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) Uint old = ERTS_PORT_SCHED_ID(pp, esdp->no); int migrated = old && old != esdp->no; - erts_smp_spin_lock(&erts_sched_stat.lock); + erts_spin_lock(&erts_sched_stat.lock); erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].total_executed++; erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].executed++; if (migrated) { erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].total_migrated++; erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].migrated++; } - erts_smp_spin_unlock(&erts_sched_stat.lock); + erts_spin_unlock(&erts_sched_stat.lock); } prepare_exec(pp, &execq, &processing_busy_q); - erts_smp_port_lock(pp); + erts_port_lock(pp); /* trace port scheduling, in */ if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) { @@ -1710,7 +1710,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) if (!ptp) break; - task_state = erts_smp_atomic32_cmpxchg_nob(&ptp->state, + task_state = erts_atomic32_cmpxchg_nob(&ptp->state, ERTS_PT_STATE_EXECUTING, ERTS_PT_STATE_SCHEDULED); if (task_state != ERTS_PT_STATE_SCHEDULED) { @@ -1722,8 +1722,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) start_time = erts_timestamp_millis(); } - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(pp)); + ERTS_CHK_NO_PROC_LOCKS; ASSERT(pp->drv_ptr); switch (ptp->type) { @@ -1842,13 +1842,13 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) if (io_tasks_executed) { - ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) + ASSERT(erts_atomic_read_nob(&erts_port_task_outstanding_io_tasks) >= io_tasks_executed); - erts_smp_atomic_add_relb(&erts_port_task_outstanding_io_tasks, + erts_atomic_add_relb(&erts_port_task_outstanding_io_tasks, -1*io_tasks_executed); } - ASSERT(runq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue)); + ASSERT(runq == (ErtsRunQueue *) erts_atomic_read_nob(&pp->run_queue)); active = finalize_exec(pp, &execq, processing_busy_q); @@ -1858,7 +1858,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) *curr_port_pp = NULL; - erts_smp_runq_lock(runq); + erts_runq_lock(runq); if (active) { ErtsRunQueue *xrunq; @@ -1866,34 +1866,34 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) ASSERT(!(erts_atomic32_read_nob(&pp->state) & ERTS_PORT_SFLGS_DEAD)); xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL); - ERTS_SMP_LC_ASSERT(runq != xrunq); - ERTS_SMP_LC_VERIFY_RQ(runq, pp); + ERTS_LC_ASSERT(runq != xrunq); + ERTS_LC_VERIFY_RQ(runq, pp); if (!xrunq) { enqueue_port(runq, pp); /* No need to notify ourselves about inc in runq. */ } else { /* Emigrate port... */ - erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq); - erts_smp_runq_unlock(runq); + erts_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq); + erts_runq_unlock(runq); xrunq = erts_port_runq(pp); ASSERT(xrunq); enqueue_port(xrunq, pp); - erts_smp_runq_unlock(xrunq); - erts_smp_notify_inc_runq(xrunq); + erts_runq_unlock(xrunq); + erts_notify_inc_runq(xrunq); - erts_smp_runq_lock(runq); + erts_runq_lock(runq); } } done: - res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) + res = (erts_atomic_read_nob(&erts_port_task_outstanding_io_tasks) != (erts_aint_t) 0); runq->scheduler->reductions += reds; - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq)); ERTS_PORT_REDUCTIONS_EXECUTED(esdp, runq, reds); return res; @@ -1924,7 +1924,7 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p) ErtsPortTaskHandleList *free_nshp = NULL; ErtsProcList *plp; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(pp)); /* * Abort remaining tasks... @@ -1997,11 +1997,11 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p) qs[i] = ptp->u.alive.next; /* Normal case here is aborted tasks... */ - state = erts_smp_atomic32_read_nob(&ptp->state); + state = erts_atomic32_read_nob(&ptp->state); if (state == ERTS_PT_STATE_ABORTED) goto aborted_port_task; - state = erts_smp_atomic32_cmpxchg_nob(&ptp->state, + state = erts_atomic32_cmpxchg_nob(&ptp->state, ERTS_PT_STATE_EXECUTING, ERTS_PT_STATE_SCHEDULED); if (state != ERTS_PT_STATE_SCHEDULED) { @@ -2065,7 +2065,7 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p) } } - erts_smp_atomic32_read_band_nob(&pp->sched.flags, + erts_atomic32_read_band_nob(&pp->sched.flags, ~(ERTS_PTS_FLG_HAVE_BUSY_TASKS |ERTS_PTS_FLG_HAVE_TASKS |ERTS_PTS_FLGS_BUSY)); @@ -2122,9 +2122,9 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p) void erts_enqueue_port(ErtsRunQueue *rq, Port *pp) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); - ASSERT(rq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue)); - ASSERT(erts_smp_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_IN_RUNQ); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); + ASSERT(rq == (ErtsRunQueue *) erts_atomic_read_nob(&pp->run_queue)); + ASSERT(erts_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_IN_RUNQ); enqueue_port(rq, pp); } @@ -2132,11 +2132,11 @@ Port * erts_dequeue_port(ErtsRunQueue *rq) { Port *pp; - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); pp = pop_port(rq); ASSERT(!pp - || rq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue)); - ASSERT(!pp || (erts_smp_atomic32_read_nob(&pp->sched.flags) + || rq == (ErtsRunQueue *) erts_atomic_read_nob(&pp->run_queue)); + ASSERT(!pp || (erts_atomic32_read_nob(&pp->sched.flags) & ERTS_PTS_FLG_IN_RUNQ)); return pp; } @@ -2148,7 +2148,7 @@ erts_dequeue_port(ErtsRunQueue *rq) void erts_port_task_init(void) { - erts_smp_atomic_init_nob(&erts_port_task_outstanding_io_tasks, + erts_atomic_init_nob(&erts_port_task_outstanding_io_tasks, (erts_aint_t) 0); init_port_task_alloc(); init_busy_caller_table_alloc(); diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h index 1a06041d8e..561f4ca936 100644 --- a/erts/emulator/beam/erl_port_task.h +++ b/erts/emulator/beam/erl_port_task.h @@ -27,11 +27,11 @@ #ifndef ERTS_PORT_TASK_H_BASIC_TYPES__ #define ERTS_PORT_TASK_H_BASIC_TYPES__ #include "erl_sys_driver.h" -#include "erl_smp.h" +#include "erl_threads.h" #define ERL_PORT_GET_PORT_TYPE_ONLY__ #include "erl_port.h" #undef ERL_PORT_GET_PORT_TYPE_ONLY__ -typedef erts_smp_atomic_t ErtsPortTaskHandle; +typedef erts_atomic_t ErtsPortTaskHandle; #endif #ifndef ERTS_PORT_TASK_ONLY_BASIC_TYPES__ @@ -64,7 +64,7 @@ typedef enum { #ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS /* NOTE: Do not access any of the exported variables directly */ -extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks; +extern erts_atomic_t erts_port_task_outstanding_io_tasks; #endif #define ERTS_PTS_FLG_IN_RUNQ (((erts_aint32_t) 1) << 0) @@ -98,8 +98,8 @@ extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks; typedef struct { ErlDrvSizeT high; - erts_smp_atomic_t low; - erts_smp_atomic_t size; + erts_atomic_t low; + erts_atomic_t size; } ErtsPortTaskBusyPortQ; typedef struct ErtsPortTask_ ErtsPortTask; @@ -124,7 +124,7 @@ typedef struct { } in; ErtsPortTaskBusyPortQ *bpq; } taskq; - erts_smp_atomic32_t flags; + erts_atomic32_t flags; erts_mtx_t mtx; } ErtsPortTaskSched; @@ -149,13 +149,13 @@ ERTS_GLB_INLINE int erts_port_task_have_outstanding_io_tasks(void); ERTS_GLB_INLINE void erts_port_task_handle_init(ErtsPortTaskHandle *pthp) { - erts_smp_atomic_init_nob(pthp, (erts_aint_t) NULL); + erts_atomic_init_nob(pthp, (erts_aint_t) NULL); } ERTS_GLB_INLINE int erts_port_task_is_scheduled(ErtsPortTaskHandle *pthp) { - return ((void *) erts_smp_atomic_read_acqb(pthp)) != NULL; + return ((void *) erts_atomic_read_acqb(pthp)) != NULL; } ERTS_GLB_INLINE void erts_port_task_pre_init_sched(ErtsPortTaskSched *ptsp, @@ -163,9 +163,9 @@ ERTS_GLB_INLINE void erts_port_task_pre_init_sched(ErtsPortTaskSched *ptsp, { if (bpq) { erts_aint_t low = (erts_aint_t) ERTS_PORT_TASK_DEFAULT_BUSY_PORT_Q_LOW; - erts_smp_atomic_init_nob(&bpq->low, low); + erts_atomic_init_nob(&bpq->low, low); bpq->high = (ErlDrvSizeT) ERTS_PORT_TASK_DEFAULT_BUSY_PORT_Q_HIGH; - erts_smp_atomic_init_nob(&bpq->size, (erts_aint_t) 0); + erts_atomic_init_nob(&bpq->size, (erts_aint_t) 0); } ptsp->taskq.bpq = bpq; } @@ -182,7 +182,7 @@ erts_port_task_init_sched(ErtsPortTaskSched *ptsp, Eterm instr_id) ptsp->taskq.local.first = NULL; ptsp->taskq.in.first = NULL; ptsp->taskq.in.last = NULL; - erts_smp_atomic32_init_nob(&ptsp->flags, 0); + erts_atomic32_init_nob(&ptsp->flags, 0); erts_mtx_init(&ptsp->mtx, lock_str, instr_id, ERTS_LOCK_FLAGS_CATEGORY_IO); } @@ -218,7 +218,7 @@ erts_port_task_fini_sched(ErtsPortTaskSched *ptsp) ERTS_GLB_INLINE void erts_port_task_sched_enter_exiting_state(ErtsPortTaskSched *ptsp) { - erts_smp_atomic32_read_bor_nob(&ptsp->flags, ERTS_PTS_FLG_EXITING); + erts_atomic32_read_bor_nob(&ptsp->flags, ERTS_PTS_FLG_EXITING); } #ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS @@ -226,7 +226,7 @@ erts_port_task_sched_enter_exiting_state(ErtsPortTaskSched *ptsp) ERTS_GLB_INLINE int erts_port_task_have_outstanding_io_tasks(void) { - return (erts_smp_atomic_read_acqb(&erts_port_task_outstanding_io_tasks) + return (erts_atomic_read_acqb(&erts_port_task_outstanding_io_tasks) != 0); } diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index 8a218d9d69..88b2bda59c 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -135,8 +135,8 @@ runq_got_work_to_execute(ErtsRunQueue *rq) #undef RUNQ_READ_RQ #undef RUNQ_SET_RQ -#define RUNQ_READ_RQ(X) ((ErtsRunQueue *) erts_smp_atomic_read_nob((X))) -#define RUNQ_SET_RQ(X, RQ) erts_smp_atomic_set_nob((X), (erts_aint_t) (RQ)) +#define RUNQ_READ_RQ(X) ((ErtsRunQueue *) erts_atomic_read_nob((X))) +#define RUNQ_SET_RQ(X, RQ) erts_atomic_set_nob((X), (erts_aint_t) (RQ)) #ifdef DEBUG # if defined(ARCH_64) @@ -230,11 +230,11 @@ typedef struct { } ErtsSchedTypeCounters; static struct { - erts_smp_mtx_t mtx; + erts_mtx_t mtx; ErtsSchedTypeCounters online; ErtsSchedTypeCounters curr_online; ErtsSchedTypeCounters active; - erts_smp_atomic32_t changing; + erts_atomic32_t changing; ErtsProcList *chngq; Eterm changer; ErtsMultiSchedulingBlock nmsb; /* Normal multi Scheduling Block */ @@ -359,11 +359,11 @@ schdlr_sspnd_set_nscheds(ErtsSchedTypeCounters *valp, } static struct { - erts_smp_mtx_t update_mtx; - erts_smp_atomic32_t no_runqs; + erts_mtx_t update_mtx; + erts_atomic32_t no_runqs; int last_active_runqs; int forced_check_balance; - erts_smp_atomic32_t checking_balance; + erts_atomic32_t checking_balance; int halftime; int full_reds_history_index; struct { @@ -386,10 +386,10 @@ erts_sched_stat_t erts_sched_stat; static erts_tsd_key_t ERTS_WRITE_UNLIKELY(sched_data_key); -static erts_smp_atomic32_t function_calls; +static erts_atomic32_t function_calls; -static erts_smp_atomic32_t doing_sys_schedule; -static erts_smp_atomic32_t no_empty_run_queues; +static erts_atomic32_t doing_sys_schedule; +static erts_atomic32_t no_empty_run_queues; long erts_runq_supervision_interval = 0; static ethr_event runq_supervision_event; static erts_tid_t runq_supervisor_tid; @@ -402,11 +402,11 @@ Uint ERTS_WRITE_UNLIKELY(erts_no_run_queues); struct { union { - erts_smp_atomic32_t active; + erts_atomic32_t active; char align__[ERTS_CACHE_LINE_SIZE]; } cpu; union { - erts_smp_atomic32_t active; + erts_atomic32_t active; char align__[ERTS_CACHE_LINE_SIZE]; } io; } dirty_count erts_align_attribute(ERTS_CACHE_LINE_SIZE); @@ -418,7 +418,7 @@ dirty_active(ErtsSchedulerData *esdp, erts_aint32_t add) { #ifdef ERTS_DIRTY_SCHEDULERS erts_aint32_t val; - erts_smp_atomic32_t *ap; + erts_atomic32_t *ap; switch (esdp->type) { case ERTS_SCHED_DIRTY_CPU: ap = &dirty_count.cpu.active; @@ -436,11 +436,11 @@ dirty_active(ErtsSchedulerData *esdp, erts_aint32_t add) * All updates done under run-queue lock, so * no inc or dec needed... */ - ERTS_SMP_ASSERT(erts_smp_lc_runq_is_locked(esdp->run_queue)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(esdp->run_queue)); - val = erts_smp_atomic32_read_nob(ap); + val = erts_atomic32_read_nob(ap); val += add; - erts_smp_atomic32_set_nob(ap, val); + erts_atomic32_set_nob(ap, val); #endif } @@ -552,9 +552,9 @@ do { \ int ix__; \ for (ix__ = 0; ix__ < erts_no_run_queues; ix__++) { \ RQVAR = ERTS_RUNQ_IX(ix__); \ - erts_smp_runq_lock(RQVAR); \ + erts_runq_lock(RQVAR); \ { DO; } \ - erts_smp_runq_unlock(RQVAR); \ + erts_runq_unlock(RQVAR); \ } \ } while (0) @@ -564,12 +564,12 @@ do { \ int ix__; \ int online__ = (int) schdlr_sspnd_get_nscheds(&schdlr_sspnd.online, \ ERTS_SCHED_NORMAL); \ - ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&schdlr_sspnd.mtx)); \ + ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&schdlr_sspnd.mtx)); \ for (ix__ = 0; ix__ < online__; ix__++) { \ RQVAR = ERTS_RUNQ_IX(ix__); \ - erts_smp_runq_lock(RQVAR); \ + erts_runq_lock(RQVAR); \ { DO; } \ - erts_smp_runq_unlock(RQVAR); \ + erts_runq_unlock(RQVAR); \ } \ } while (0) @@ -580,12 +580,12 @@ do { \ int ix__; \ for (ix__ = 0; ix__ < nrqs; ix__++) { \ RQVAR = ERTS_RUNQ_IX(ix__); \ - erts_smp_runq_lock(RQVAR); \ + erts_runq_lock(RQVAR); \ { DO; } \ } \ { DOX; } \ for (ix__ = 0; ix__ < nrqs; ix__++) \ - erts_smp_runq_unlock(ERTS_RUNQ_IX(ix__)); \ + erts_runq_unlock(ERTS_RUNQ_IX(ix__)); \ } while (0) #define ERTS_ATOMIC_FOREACH_RUNQ(RQVAR, DO) \ @@ -662,9 +662,9 @@ static void wake_scheduler(ErtsRunQueue *rq); #if defined(ERTS_ENABLE_LOCK_CHECK) int -erts_smp_lc_runq_is_locked(ErtsRunQueue *runq) +erts_lc_runq_is_locked(ErtsRunQueue *runq) { - return erts_smp_lc_mtx_is_locked(&runq->mtx); + return erts_lc_mtx_is_locked(&runq->mtx); } #endif @@ -672,13 +672,13 @@ erts_smp_lc_runq_is_locked(ErtsRunQueue *runq) static ERTS_INLINE Uint64 ensure_later_proc_interval(Uint64 interval) { - return erts_smp_ensure_later_interval_nob(erts_ptab_interval(&erts_proc), interval); + return erts_ensure_later_interval_nob(erts_ptab_interval(&erts_proc), interval); } Uint64 erts_get_proc_interval(void) { - return erts_smp_current_interval_nob(erts_ptab_interval(&erts_proc)); + return erts_current_interval_nob(erts_ptab_interval(&erts_proc)); } Uint64 @@ -690,7 +690,7 @@ erts_ensure_later_proc_interval(Uint64 interval) Uint64 erts_step_proc_interval(void) { - return erts_smp_step_interval_nob(erts_ptab_interval(&erts_proc)); + return erts_step_interval_nob(erts_ptab_interval(&erts_proc)); } void @@ -815,7 +815,7 @@ erts_late_init_process(void) { int ix; - erts_smp_spinlock_init(&erts_sched_stat.lock, "sched_stat", NIL, + erts_spinlock_init(&erts_sched_stat.lock, "sched_stat", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER); for (ix = 0; ix < ERTS_NO_PRIO_LEVELS; ix++) { @@ -1015,14 +1015,14 @@ erts_get_sched_util(ErtsRunQueue *rq, int initially_locked, int short_interval) if (!locked) { if (++try >= ERTS_GET_AVG_MAX_UNLOCKED_TRY) { /* Writer will eventually block on runq-lock */ - erts_smp_runq_lock(rq); + erts_runq_lock(rq); locked = 1; } } } if (!initially_locked && locked) - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); now = sched_wall_time_ts(); worktime = calc_sched_worktime(is_working, now, last, interval, old_worktime); @@ -1219,7 +1219,7 @@ typedef struct { Eterm ref; Eterm ref_heap[ERTS_REF_THING_SIZE]; Uint req_sched; - erts_smp_atomic32_t refc; + erts_atomic32_t refc; #ifdef ERTS_DIRTY_SCHEDULERS int want_dirty_cpu; int want_dirty_io; @@ -1231,7 +1231,7 @@ typedef struct { Eterm ref; Eterm ref_heap[ERTS_REF_THING_SIZE]; Uint req_sched; - erts_smp_atomic32_t refc; + erts_atomic32_t refc; } ErtsSystemCheckReq; @@ -1413,11 +1413,11 @@ reply_sched_wall_time(void *vswtrp) rp_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); erts_proc_dec_refc(rp); - if (erts_smp_atomic32_dec_read_nob(&swtrp->refc) == 0) + if (erts_atomic32_dec_read_nob(&swtrp->refc) == 0) swtreq_free(vswtrp); } @@ -1448,7 +1448,7 @@ erts_sched_wall_time_request(Process *c_p, int set, int enable, swtrp->want_dirty_cpu = want_dirty_cpu; swtrp->want_dirty_io = want_dirty_io; #endif - erts_smp_atomic32_init_nob(&swtrp->refc, + erts_atomic32_init_nob(&swtrp->refc, (erts_aint32_t) erts_no_schedulers); erts_proc_add_refc(c_p, (Sint32) erts_no_schedulers); @@ -1491,11 +1491,11 @@ reply_system_check(void *vscrp) rp_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); erts_proc_dec_refc(rp); - if (erts_smp_atomic32_dec_read_nob(&scrp->refc) == 0) + if (erts_atomic32_dec_read_nob(&scrp->refc) == 0) screq_free(vscrp); } @@ -1513,7 +1513,7 @@ Eterm erts_system_check_request(Process *c_p) { scrp->proc = c_p; scrp->ref = STORE_NC(&hp, NULL, ref); scrp->req_sched = esdp->no; - erts_smp_atomic32_init_nob(&scrp->refc, (erts_aint32_t) erts_no_schedulers); + erts_atomic32_init_nob(&scrp->refc, (erts_aint32_t) erts_no_schedulers); erts_proc_add_refc(c_p, (Sint) erts_no_schedulers); @@ -1576,7 +1576,7 @@ erts_psd_set_init(Process *p, int ix, void *data) for (i = 0; i < ERTS_PSD_SIZE; i++) new_psd->data[i] = NULL; - psd = (ErtsPSD *) erts_smp_atomic_cmpxchg_mb(&p->psd, + psd = (ErtsPSD *) erts_atomic_cmpxchg_mb(&p->psd, (erts_aint_t) new_psd, (erts_aint_t) NULL); if (psd) @@ -2472,7 +2472,7 @@ notify_reap_ports_relb(void) } } -erts_smp_atomic32_t erts_halt_progress; +erts_atomic32_t erts_halt_progress; int erts_halt_code; static ERTS_INLINE erts_aint32_t @@ -2481,9 +2481,9 @@ handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_REAP_PORTS); ERTS_RUNQ_FLGS_SET(awdp->esdp->run_queue, ERTS_RUNQ_FLG_HALTING); - if (erts_smp_atomic32_dec_read_acqb(&erts_halt_progress) == 0) { + if (erts_atomic32_dec_read_acqb(&erts_halt_progress) == 0) { int i, max = erts_ptab_max(&erts_port); - erts_smp_atomic32_set_nob(&erts_halt_progress, 1); + erts_atomic32_set_nob(&erts_halt_progress, 1); for (i = 0; i < max; i++) { erts_aint32_t state; Port *prt = erts_pix2port(i); @@ -2496,21 +2496,21 @@ handle_reap_ports(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting) /* We need to set the halt flag - get the port lock */ - erts_smp_port_lock(prt); + erts_port_lock(prt); state = erts_atomic32_read_nob(&prt->state); if (!(state & (ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP | ERTS_PORT_SFLG_HALT))) { state = erts_atomic32_read_bor_relb(&prt->state, ERTS_PORT_SFLG_HALT); - erts_smp_atomic32_inc_nob(&erts_halt_progress); + erts_atomic32_inc_nob(&erts_halt_progress); if (!(state & (ERTS_PORT_SFLG_EXITING|ERTS_PORT_SFLG_CLOSING))) erts_deliver_port_exit(prt, prt->common.id, am_killed, 0, 1); } erts_port_release(prt); } - if (erts_smp_atomic32_dec_read_nob(&erts_halt_progress) == 0) { + if (erts_atomic32_dec_read_nob(&erts_halt_progress) == 0) { erts_flush_async_exit(erts_halt_code, ""); } } @@ -2581,10 +2581,10 @@ handle_pending_exiters(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waitin rq = awdp->esdp->run_queue; unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_PENDING_EXITERS); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); pnd_xtrs = rq->procs.pending_exiters; rq->procs.pending_exiters = NULL; - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); if (erts_proclist_fetch(&pnd_xtrs, NULL)) do_handle_pending_exiters(pnd_xtrs); @@ -2866,7 +2866,7 @@ erts_set_aux_work_timeout(int ix, erts_aint32_t type, int enable) static ERTS_INLINE void sched_waiting_sys(Uint no, ErtsRunQueue *rq) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); ASSERT(rq->waiting >= 0); (void) ERTS_RUNQ_FLGS_SET(rq, (ERTS_RUNQ_FLG_OUT_OF_WORK | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK)); @@ -2880,7 +2880,7 @@ sched_waiting_sys(Uint no, ErtsRunQueue *rq) static ERTS_INLINE void sched_active_sys(Uint no, ErtsRunQueue *rq) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix)); @@ -2905,13 +2905,13 @@ erts_active_schedulers(void) static ERTS_INLINE void clear_sys_scheduling(void) { - erts_smp_atomic32_set_mb(&doing_sys_schedule, 0); + erts_atomic32_set_mb(&doing_sys_schedule, 0); } static ERTS_INLINE int try_set_sys_scheduling(void) { - return 0 == erts_smp_atomic32_cmpxchg_acqb(&doing_sys_schedule, 1, 0); + return 0 == erts_atomic32_cmpxchg_acqb(&doing_sys_schedule, 1, 0); } @@ -2936,7 +2936,7 @@ prepare_for_sys_schedule(int non_blocking) static ERTS_INLINE void sched_change_waiting_sys_to_waiting(Uint no, ErtsRunQueue *rq) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix)); @@ -2947,7 +2947,7 @@ sched_change_waiting_sys_to_waiting(Uint no, ErtsRunQueue *rq) static ERTS_INLINE void sched_waiting(Uint no, ErtsRunQueue *rq) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); (void) ERTS_RUNQ_FLGS_SET(rq, (ERTS_RUNQ_FLG_OUT_OF_WORK | ERTS_RUNQ_FLG_HALFTIME_OUT_OF_WORK)); if (rq->waiting < 0) @@ -2962,7 +2962,7 @@ sched_waiting(Uint no, ErtsRunQueue *rq) static ERTS_INLINE void sched_active(Uint no, ErtsRunQueue *rq) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); if (rq->waiting < 0) rq->waiting++; else @@ -2976,7 +2976,7 @@ empty_runq_aux(ErtsRunQueue *rq, Uint32 old_flags) { if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && old_flags & ERTS_RUNQ_FLG_NONEMPTY) { #ifdef DEBUG - erts_aint32_t empty = erts_smp_atomic32_read_nob(&no_empty_run_queues); + erts_aint32_t empty = erts_atomic32_read_nob(&no_empty_run_queues); /* * For a short period of time no_empty_run_queues may have * been increased twice for a specific run queue. @@ -2984,9 +2984,9 @@ empty_runq_aux(ErtsRunQueue *rq, Uint32 old_flags) ASSERT(0 <= empty && empty < 2*erts_no_run_queues); #endif if (!erts_runq_supervision_interval) - erts_smp_atomic32_inc_relb(&no_empty_run_queues); + erts_atomic32_inc_relb(&no_empty_run_queues); else { - erts_smp_atomic32_inc_mb(&no_empty_run_queues); + erts_atomic32_inc_mb(&no_empty_run_queues); if (erts_atomic_read_nob(&runq_supervisor_sleeping)) ethr_event_set(&runq_supervision_event); } @@ -3016,7 +3016,7 @@ non_empty_runq(ErtsRunQueue *rq) Uint32 old_flags = ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_NONEMPTY); if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && (!(old_flags & ERTS_RUNQ_FLG_NONEMPTY))) { #ifdef DEBUG - erts_aint32_t empty = erts_smp_atomic32_read_nob(&no_empty_run_queues); + erts_aint32_t empty = erts_atomic32_read_nob(&no_empty_run_queues); /* * For a short period of time no_empty_run_queues may have * been increased twice for a specific run queue. @@ -3024,10 +3024,10 @@ non_empty_runq(ErtsRunQueue *rq) ASSERT(0 < empty && empty <= 2*erts_no_run_queues); #endif if (!erts_runq_supervision_interval) - erts_smp_atomic32_dec_relb(&no_empty_run_queues); + erts_atomic32_dec_relb(&no_empty_run_queues); else { erts_aint32_t no; - no = erts_smp_atomic32_dec_read_mb(&no_empty_run_queues); + no = erts_atomic32_dec_read_mb(&no_empty_run_queues); if (no > 0 && erts_atomic_read_nob(&runq_supervisor_sleeping)) ethr_event_set(&runq_supervision_event); } @@ -3056,7 +3056,7 @@ sched_prep_spin_wait(ErtsSchedulerSleepInfo *ssi) do { nflgs = (xflgs & ERTS_SSI_FLG_MSB_EXEC); nflgs |= ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING; - oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); + oflgs = erts_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); if (oflgs == xflgs) return nflgs; xflgs = oflgs; @@ -3073,7 +3073,7 @@ sched_prep_cont_spin_wait(ErtsSchedulerSleepInfo *ssi) erts_aint32_t xflgs = ERTS_SSI_FLG_WAITING; do { - oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); + oflgs = erts_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); if (oflgs == xflgs) return nflgs; xflgs = oflgs; @@ -3090,7 +3090,7 @@ sched_spin_wait(ErtsSchedulerSleepInfo *ssi, int spincount) erts_aint32_t flgs; do { - flgs = erts_smp_atomic32_read_acqb(&ssi->flags); + flgs = erts_atomic32_read_acqb(&ssi->flags); if ((flgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING)) != (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING)) { break; @@ -3119,7 +3119,7 @@ sched_set_sleeptype(ErtsSchedulerSleepInfo *ssi, erts_aint32_t sleep_type) } while (1) { - oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); + oflgs = erts_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); if (oflgs == xflgs) return nflgs; if ((oflgs & (ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING)) @@ -3146,7 +3146,7 @@ static void thr_prgr_prep_wait(void *vssi) { ErtsSchedulerSleepInfo *ssi = (ErtsSchedulerSleepInfo *) vssi; - erts_smp_atomic32_read_bor_acqb(&ssi->flags, + erts_atomic32_read_bor_acqb(&ssi->flags, ERTS_SSI_FLG_SLEEPING); } @@ -3161,7 +3161,7 @@ thr_prgr_wait(void *vssi) while (1) { erts_aint32_t aflgs, nflgs; nflgs = xflgs | ERTS_SSI_FLG_TSE_SLEEPING; - aflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); + aflgs = erts_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); if (aflgs == xflgs) { erts_tse_wait(ssi->event); break; @@ -3176,7 +3176,7 @@ static void thr_prgr_fin_wait(void *vssi) { ErtsSchedulerSleepInfo *ssi = (ErtsSchedulerSleepInfo *) vssi; - erts_smp_atomic32_read_band_nob(&ssi->flags, + erts_atomic32_read_band_nob(&ssi->flags, ~(ERTS_SSI_FLG_SLEEPING | ERTS_SSI_FLG_TSE_SLEEPING)); } @@ -3269,18 +3269,18 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) erts_aint32_t flgs; ERTS_MSACC_PUSH_STATE_M(); - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); #ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) - erts_smp_spin_lock(&rq->sleepers.lock); + erts_spin_lock(&rq->sleepers.lock); #endif flgs = sched_prep_spin_wait(ssi); if (flgs & ERTS_SSI_FLG_SUSPENDED) { /* Go suspend instead... */ #ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) - erts_smp_spin_unlock(&rq->sleepers.lock); + erts_spin_unlock(&rq->sleepers.lock); #endif return; } @@ -3292,7 +3292,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) if (rq->sleepers.list) rq->sleepers.list->prev = ssi; rq->sleepers.list = ssi; - erts_smp_spin_unlock(&rq->sleepers.lock); + erts_spin_unlock(&rq->sleepers.lock); dirty_active(esdp, -1); } #endif @@ -3306,7 +3306,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) sched_waiting(esdp->no, rq); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); spincount = sched_busy_wait.tse; @@ -3334,7 +3334,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) if (aux_work) { if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) { - flgs = erts_smp_atomic32_read_acqb(&ssi->flags); + flgs = erts_atomic32_read_acqb(&ssi->flags); current_time = erts_get_monotonic_time(esdp); if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) { if (!thr_prgr_active) { @@ -3422,7 +3422,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) } if (flgs & ~(ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC)) - erts_smp_atomic32_read_band_nob(&ssi->flags, + erts_atomic32_read_band_nob(&ssi->flags, (ERTS_SSI_FLG_SUSPENDED | ERTS_SSI_FLG_MSB_EXEC)); @@ -3433,21 +3433,21 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) sched_wall_time_change(esdp, 1); } - erts_smp_runq_lock(rq); + erts_runq_lock(rq); sched_active(esdp->no, rq); } else { - erts_smp_atomic32_set_relb(&function_calls, 0); + erts_atomic32_set_relb(&function_calls, 0); *fcalls = 0; ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp)); sched_waiting_sys(esdp->no, rq); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); ASSERT(working); sched_wall_time_change(esdp, working = 0); @@ -3492,7 +3492,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) erts_thr_progress_leader_update(esdp); } - flgs = erts_smp_atomic32_read_acqb(&ssi->flags); + flgs = erts_atomic32_read_acqb(&ssi->flags); if (!(flgs & ERTS_SSI_FLG_WAITING)) { ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING)); goto sys_woken; @@ -3515,7 +3515,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) } } - erts_smp_runq_lock(rq); + erts_runq_lock(rq); /* * If we got new I/O tasks we aren't allowed to @@ -3534,13 +3534,13 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) * do tse wait instead... */ sched_change_waiting_sys_to_waiting(esdp->no, rq); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); spincount = 0; goto tse_wait; } } if (aux_work) { - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); goto sys_poll_aux_work; } flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_POLL_SLEEPING); @@ -3549,7 +3549,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING)); goto sys_locked_woken; } - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); flgs = sched_prep_cont_spin_wait(ssi); if (!(flgs & ERTS_SSI_FLG_WAITING)) { ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING)); @@ -3562,7 +3562,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING); ASSERT(flgs & ERTS_SSI_FLG_WAITING); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); if (working) sched_wall_time_change(esdp, working = 0); @@ -3592,16 +3592,16 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) sys_woken: if (!thr_prgr_active) erts_thr_progress_active(esdp, thr_prgr_active = 1); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); sys_locked_woken: if (!thr_prgr_active) { - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); erts_thr_progress_active(esdp, thr_prgr_active = 1); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); } clear_sys_scheduling(); if (flgs & ~(ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC)) - erts_smp_atomic32_read_band_nob(&ssi->flags, + erts_atomic32_read_band_nob(&ssi->flags, (ERTS_SSI_FLG_SUSPENDED | ERTS_SSI_FLG_MSB_EXEC)); if (!working) @@ -3612,7 +3612,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq) if (ERTS_SCHEDULER_IS_DIRTY(esdp)) dirty_active(esdp, 1); - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); } @@ -3624,7 +3624,7 @@ ssi_flags_set_wake(ErtsSchedulerSleepInfo *ssi) erts_aint32_t nflgs = 0; erts_aint32_t xflgs = ERTS_SSI_FLG_SLEEPING|ERTS_SSI_FLG_WAITING; while (1) { - oflgs = erts_smp_atomic32_cmpxchg_relb(&ssi->flags, nflgs, xflgs); + oflgs = erts_atomic32_cmpxchg_relb(&ssi->flags, nflgs, xflgs); if (oflgs == xflgs) return oflgs; nflgs = oflgs & (ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC); @@ -3644,7 +3644,7 @@ static void dcpu_sched_ix_suspend_wake(Uint ix) { ErtsSchedulerSleepInfo* ssi = ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix); - erts_smp_atomic32_read_bor_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED); + erts_atomic32_read_bor_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED); ssi_wake(ssi); } @@ -3652,7 +3652,7 @@ static void dio_sched_ix_suspend_wake(Uint ix) { ErtsSchedulerSleepInfo* ssi = ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix); - erts_smp_atomic32_read_bor_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED); + erts_atomic32_read_bor_nob(&ssi->flags, ERTS_SSI_FLG_SUSPENDED); ssi_wake(ssi); } @@ -3683,7 +3683,7 @@ wake_scheduler(ErtsRunQueue *rq) * so all code *should* handle this without having * the lock on the run queue. */ - ERTS_SMP_LC_ASSERT(!erts_smp_lc_runq_is_locked(rq) + ERTS_LC_ASSERT(!erts_lc_runq_is_locked(rq) || ERTS_RUNQ_IX_IS_DIRTY(rq->ix)); ssi_wake(rq->scheduler->ssi); @@ -3699,10 +3699,10 @@ wake_dirty_schedulers(ErtsRunQueue *rq, int one) ASSERT(ERTS_RUNQ_IX_IS_DIRTY(rq->ix)); sl = &rq->sleepers; - erts_smp_spin_lock(&sl->lock); + erts_spin_lock(&sl->lock); ssi = sl->list; if (!ssi) { - erts_smp_spin_unlock(&sl->lock); + erts_spin_unlock(&sl->lock); if (one) wake_scheduler(rq); } else if (one) { @@ -3716,14 +3716,14 @@ wake_dirty_schedulers(ErtsRunQueue *rq, int one) if (ssi->next) ssi->next->prev = ssi->prev; - erts_smp_spin_unlock(&sl->lock); + erts_spin_unlock(&sl->lock); ERTS_THR_MEMORY_BARRIER; flgs = ssi_flags_set_wake(ssi); erts_sched_finish_poke(ssi, flgs); } else { sl->list = NULL; - erts_smp_spin_unlock(&sl->lock); + erts_spin_unlock(&sl->lock); ERTS_THR_MEMORY_BARRIER; do { @@ -3754,13 +3754,13 @@ init_no_runqs(int active, int used) { erts_aint32_t no_runqs = (erts_aint32_t) (active & ERTS_NO_RUNQS_MASK); no_runqs |= (erts_aint32_t) ((used & ERTS_NO_RUNQS_MASK) << ERTS_NO_USED_RUNQS_SHIFT); - erts_smp_atomic32_init_nob(&balance_info.no_runqs, no_runqs); + erts_atomic32_init_nob(&balance_info.no_runqs, no_runqs); } static ERTS_INLINE void get_no_runqs(int *active, int *used) { - erts_aint32_t no_runqs = erts_smp_atomic32_read_nob(&balance_info.no_runqs); + erts_aint32_t no_runqs = erts_atomic32_read_nob(&balance_info.no_runqs); if (active) *active = (int) (no_runqs & ERTS_NO_RUNQS_MASK); if (used) @@ -3770,12 +3770,12 @@ get_no_runqs(int *active, int *used) static ERTS_INLINE void set_no_used_runqs(int used) { - erts_aint32_t exp = erts_smp_atomic32_read_nob(&balance_info.no_runqs); + erts_aint32_t exp = erts_atomic32_read_nob(&balance_info.no_runqs); while (1) { erts_aint32_t act, new; new = (used & ERTS_NO_RUNQS_MASK) << ERTS_NO_USED_RUNQS_SHIFT; new |= exp & ERTS_NO_RUNQS_MASK; - act = erts_smp_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp); + act = erts_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp); if (act == exp) break; exp = act; @@ -3785,14 +3785,14 @@ set_no_used_runqs(int used) static ERTS_INLINE void set_no_active_runqs(int active) { - erts_aint32_t exp = erts_smp_atomic32_read_nob(&balance_info.no_runqs); + erts_aint32_t exp = erts_atomic32_read_nob(&balance_info.no_runqs); while (1) { erts_aint32_t act, new; if ((exp & ERTS_NO_RUNQS_MASK) == active) break; new = exp & (ERTS_NO_RUNQS_MASK << ERTS_NO_USED_RUNQS_SHIFT); new |= active & ERTS_NO_RUNQS_MASK; - act = erts_smp_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp); + act = erts_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp); if (act == exp) break; exp = act; @@ -3802,14 +3802,14 @@ set_no_active_runqs(int active) static ERTS_INLINE int try_inc_no_active_runqs(int active) { - erts_aint32_t exp = erts_smp_atomic32_read_nob(&balance_info.no_runqs); + erts_aint32_t exp = erts_atomic32_read_nob(&balance_info.no_runqs); if (((exp >> ERTS_NO_USED_RUNQS_SHIFT) & ERTS_NO_RUNQS_MASK) < active) return 0; if ((exp & ERTS_NO_RUNQS_MASK) + 1 == active) { erts_aint32_t new, act; new = exp & (ERTS_NO_RUNQS_MASK << ERTS_NO_USED_RUNQS_SHIFT); new |= active & ERTS_NO_RUNQS_MASK; - act = erts_smp_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp); + act = erts_atomic32_cmpxchg_nob(&balance_info.no_runqs, new, exp); if (act == exp) return 1; } @@ -3886,7 +3886,7 @@ smp_notify_inc_runq(ErtsRunQueue *runq) } void -erts_smp_notify_inc_runq(ErtsRunQueue *runq) +erts_notify_inc_runq(ErtsRunQueue *runq) { smp_notify_inc_runq(runq); } @@ -3908,9 +3908,9 @@ enqueue_process(ErtsRunQueue *runq, int prio, Process *p) { ErtsRunPrioQueue *rpq; - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq)); - erts_smp_inc_runq_len(runq, &runq->procs.prio_info[prio], prio); + erts_inc_runq_len(runq, &runq->procs.prio_info[prio], prio); if (prio == PRIORITY_LOW) { p->schedule_count = RESCHEDULE_LOW; @@ -3938,7 +3938,7 @@ unqueue_process(ErtsRunQueue *runq, Process *prev_proc, Process *proc) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq)); if (prev_proc) prev_proc->next = proc->next; @@ -3950,7 +3950,7 @@ unqueue_process(ErtsRunQueue *runq, if (!rpq->first) rpq->last = NULL; - erts_smp_dec_runq_len(runq, rqi, prio); + erts_dec_runq_len(runq, rqi, prio); } @@ -3963,7 +3963,7 @@ dequeue_process(ErtsRunQueue *runq, int prio_q, erts_aint32_t *statep) ErtsRunQueueInfo *rqi; Process *p; - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(runq)); ASSERT(PRIORITY_NORMAL == prio_q || PRIORITY_HIGH == prio_q @@ -3974,9 +3974,9 @@ dequeue_process(ErtsRunQueue *runq, int prio_q, erts_aint32_t *statep) if (!p) return NULL; - ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER; + ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER; - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); if (statep) *statep = state; @@ -4009,7 +4009,7 @@ check_requeue_process(ErtsRunQueue *rq, int prio_q) static ERTS_INLINE void free_proxy_proc(Process *proxy) { - ASSERT(erts_smp_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY); + ASSERT(erts_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY); erts_free(ERTS_ALC_T_PROC, proxy); } @@ -4065,7 +4065,7 @@ static void immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp) { Uint32 iflags, iflag; - erts_smp_runq_unlock(c_rq); + erts_runq_unlock(c_rq); ASSERT(erts_thr_progress_is_managed_thread()); @@ -4106,13 +4106,13 @@ immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp) rq = check_immigration_need(c_rq, mp, prio); if (rq) { - erts_smp_runq_lock(rq); + erts_runq_lock(rq); if (prio == ERTS_PORT_PRIO_LEVEL) { Port *prt; prt = erts_dequeue_port(rq); if (prt) RUNQ_SET_RQ(&prt->run_queue, c_rq); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); if (prt) { /* port might terminate while we have no lock... */ rq = erts_port_runq(prt); @@ -4124,7 +4124,7 @@ immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp) erts_enqueue_port(c_rq, prt); if (!iflag) return; /* done */ - erts_smp_runq_unlock(c_rq); + erts_runq_unlock(c_rq); } } } @@ -4138,38 +4138,38 @@ immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp) while (proc) { erts_aint32_t state; - state = erts_smp_atomic32_read_acqb(&proc->state); + state = erts_atomic32_read_acqb(&proc->state); if (!(ERTS_PSFLG_BOUND & state) && (prio == (int) ERTS_PSFLGS_GET_PRQ_PRIO(state))) { ErtsRunQueueInfo *rqi = &rq->procs.prio_info[prio]; unqueue_process(rq, rpq, rqi, prio, prev_proc, proc); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); RUNQ_SET_RQ(&proc->run_queue, c_rq); rq_locked = 0; - erts_smp_runq_lock(c_rq); + erts_runq_lock(c_rq); enqueue_process(c_rq, prio, proc); if (!iflag) return; /* done */ - erts_smp_runq_unlock(c_rq); + erts_runq_unlock(c_rq); break; } prev_proc = proc; proc = proc->next; } if (rq_locked) - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); } } } - erts_smp_runq_lock(c_rq); + erts_runq_lock(c_rq); } static ERTS_INLINE void suspend_run_queue(ErtsRunQueue *rq) { - erts_smp_atomic32_read_bor_nob(&rq->scheduler->ssi->flags, + erts_atomic32_read_bor_nob(&rq->scheduler->ssi->flags, ERTS_SSI_FLG_SUSPENDED); (void) ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_SUSPENDED); @@ -4186,7 +4186,7 @@ resume_run_queue(ErtsRunQueue *rq) ASSERT(!ERTS_RUNQ_IX_IS_DIRTY(rq->ix)); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); oflgs = ERTS_RUNQ_FLGS_READ_BSET(rq, (ERTS_RUNQ_FLG_OUT_OF_WORK @@ -4201,19 +4201,19 @@ resume_run_queue(ErtsRunQueue *rq) rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS; for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) { - len = erts_smp_atomic32_read_dirty(&rq->procs.prio_info[pix].len); + len = erts_atomic32_read_dirty(&rq->procs.prio_info[pix].len); rq->procs.prio_info[pix].max_len = len; rq->procs.prio_info[pix].reds = 0; } - len = erts_smp_atomic32_read_dirty(&rq->ports.info.len); + len = erts_atomic32_read_dirty(&rq->ports.info.len); rq->ports.info.max_len = len; rq->ports.info.reds = 0; - len = erts_smp_atomic32_read_dirty(&rq->len); + len = erts_atomic32_read_dirty(&rq->len); rq->max_len = len; } - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); nrml_sched_ix_resume_wake(rq->ix); } @@ -4228,11 +4228,11 @@ schedule_bound_processes(ErtsRunQueue *rq, ErtsStuckBoundProcesses *sbpp) { Process *proc, *next; - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); proc = sbpp->first; while (proc) { - erts_aint32_t state = erts_smp_atomic32_read_acqb(&proc->state); + erts_aint32_t state = erts_atomic32_read_acqb(&proc->state); next = proc->next; enqueue_process(rq, (int) ERTS_PSFLGS_GET_PRQ_PRIO(state), proc); proc = next; @@ -4259,7 +4259,7 @@ clear_proc_dirty_queue_bit(Process *p, ErtsRunQueue *rq, int prio_bit) #else (void) #endif - erts_smp_atomic32_read_band_mb(&p->dirty_state, ~qb); + erts_atomic32_read_band_mb(&p->dirty_state, ~qb); ASSERT(old & qb); } @@ -4275,7 +4275,7 @@ evacuate_run_queue(ErtsRunQueue *rq, ErtsMigrationPaths *mps; ErtsMigrationPath *mp; - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED); @@ -4298,9 +4298,9 @@ evacuate_run_queue(ErtsRunQueue *rq, rq->misc.start = NULL; rq->misc.end = NULL; ERTS_RUNQ_FLGS_UNSET_NOB(rq, ERTS_RUNQ_FLG_MISC_OP); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); - erts_smp_runq_lock(to_rq); + erts_runq_lock(to_rq); if (to_rq->misc.end) to_rq->misc.end->next = start; else @@ -4310,9 +4310,9 @@ evacuate_run_queue(ErtsRunQueue *rq, non_empty_runq(to_rq); - erts_smp_runq_unlock(to_rq); + erts_runq_unlock(to_rq); smp_notify_inc_runq(to_rq); - erts_smp_runq_lock(to_rq); + erts_runq_lock(to_rq); } if (rq->ports.start) { @@ -4328,7 +4328,7 @@ evacuate_run_queue(ErtsRunQueue *rq, ErtsRunQueue *prt_rq; prt = erts_dequeue_port(rq); RUNQ_SET_RQ(&prt->run_queue, to_rq); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); /* * The port might terminate while * we have no lock on it... @@ -4340,9 +4340,9 @@ evacuate_run_queue(ErtsRunQueue *rq, "%s:%d:%s() internal error\n", __FILE__, __LINE__, __func__); erts_enqueue_port(to_rq, prt); - erts_smp_runq_unlock(to_rq); + erts_runq_unlock(to_rq); } - erts_smp_runq_lock(rq); + erts_runq_lock(rq); prt = rq->ports.start; } smp_notify_inc_runq(to_rq); @@ -4379,7 +4379,7 @@ evacuate_run_queue(ErtsRunQueue *rq, free_proxy_proc(proc); goto handle_next_proc; } - real_state = erts_smp_atomic32_read_acqb(&real_proc->state); + real_state = erts_atomic32_read_acqb(&real_proc->state); } max_qbit = (state >> ERTS_PSFLGS_IN_PRQ_MASK_OFFSET); @@ -4405,7 +4405,7 @@ evacuate_run_queue(ErtsRunQueue *rq, #else (void) #endif - erts_smp_atomic32_read_band_mb(&proc->state, + erts_atomic32_read_band_mb(&proc->state, ~clr_bits); ASSERT((old & clr_bits) == clr_bits); @@ -4425,17 +4425,17 @@ evacuate_run_queue(ErtsRunQueue *rq, } else { int prio = (int) ERTS_PSFLGS_GET_PRQ_PRIO(state); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); to_rq = mp->prio[prio].runq; RUNQ_SET_RQ(&proc->run_queue, to_rq); - erts_smp_runq_lock(to_rq); + erts_runq_lock(to_rq); enqueue_process(to_rq, prio, proc); - erts_smp_runq_unlock(to_rq); + erts_runq_unlock(to_rq); notify = 1; - erts_smp_runq_lock(rq); + erts_runq_lock(rq); } handle_next_proc: @@ -4454,13 +4454,13 @@ try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq, ErtsRunPrioQueue *rpq; if (*rq_lockedp) { - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); *rq_lockedp = 0; } - ERTS_SMP_LC_ASSERT(!erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(!erts_lc_runq_is_locked(rq)); - erts_smp_runq_lock(vrq); + erts_runq_lock(vrq); if (ERTS_RUNQ_FLGS_GET_NOB(rq) & ERTS_RUNQ_FLG_HALTING) goto no_procs; @@ -4496,16 +4496,16 @@ try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq, proc = rpq->first; while (proc) { - erts_aint32_t state = erts_smp_atomic32_read_acqb(&proc->state); + erts_aint32_t state = erts_atomic32_read_acqb(&proc->state); if (!(ERTS_PSFLG_BOUND & state)) { /* Steal process */ int prio = (int) ERTS_PSFLGS_GET_PRQ_PRIO(state); ErtsRunQueueInfo *rqi = &vrq->procs.prio_info[prio]; unqueue_process(vrq, rpq, rqi, prio, prev_proc, proc); - erts_smp_runq_unlock(vrq); + erts_runq_unlock(vrq); RUNQ_SET_RQ(&proc->run_queue, rq); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); *rq_lockedp = 1; enqueue_process(rq, prio, proc); return !0; @@ -4519,7 +4519,7 @@ try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq, no_procs: - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(vrq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(vrq)); /* * Check for a runnable port to steal... @@ -4529,7 +4529,7 @@ no_procs: ErtsRunQueue *prt_rq; Port *prt = erts_dequeue_port(vrq); RUNQ_SET_RQ(&prt->run_queue, rq); - erts_smp_runq_unlock(vrq); + erts_runq_unlock(vrq); /* * The port might terminate while @@ -4550,7 +4550,7 @@ no_procs: } } - erts_smp_runq_unlock(vrq); + erts_runq_unlock(vrq); return 0; } @@ -4582,7 +4582,7 @@ try_steal_task(ErtsRunQueue *rq) res = 0; rq_locked = 1; - ERTS_SMP_LC_CHK_RUNQ_LOCK(rq, rq_locked); + ERTS_LC_CHK_RUNQ_LOCK(rq, rq_locked); get_no_runqs(&active_rqs, &blnc_rqs); @@ -4595,7 +4595,7 @@ try_steal_task(ErtsRunQueue *rq) if (active_rqs < blnc_rqs) { int no = blnc_rqs - active_rqs; int stop_ix = vix = active_rqs + rq->ix % no; - while (erts_smp_atomic32_read_acqb(&no_empty_run_queues) < blnc_rqs) { + while (erts_atomic32_read_acqb(&no_empty_run_queues) < blnc_rqs) { res = check_possible_steal_victim(rq, &rq_locked, vix); if (res) goto done; @@ -4610,7 +4610,7 @@ try_steal_task(ErtsRunQueue *rq) vix = rq->ix; /* ... then try to steal a job from another active queue... */ - while (erts_smp_atomic32_read_acqb(&no_empty_run_queues) < blnc_rqs) { + while (erts_atomic32_read_acqb(&no_empty_run_queues) < blnc_rqs) { vix++; if (vix >= active_rqs) vix = 0; @@ -4627,7 +4627,7 @@ try_steal_task(ErtsRunQueue *rq) done: if (!rq_locked) - erts_smp_runq_lock(rq); + erts_runq_lock(rq); if (res) return res; @@ -4753,7 +4753,7 @@ alloc_mpaths(void) { void *block; ErtsMigrationPaths *res; - ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&balance_info.update_mtx)); + ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&balance_info.update_mtx)); res = mpaths.freelist; if (res) { @@ -4776,7 +4776,7 @@ retire_mpaths(ErtsMigrationPaths *mps) { ErtsThrPrgrVal current; - ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&balance_info.update_mtx)); + ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&balance_info.update_mtx)); current = erts_thr_progress_current(); @@ -4822,7 +4822,7 @@ check_balance(ErtsRunQueue *c_rq) int sched_util_balancing; #endif - if (erts_smp_atomic32_xchg_nob(&balance_info.checking_balance, 1)) { + if (erts_atomic32_xchg_nob(&balance_info.checking_balance, 1)) { c_rq->check_balance_reds = INT_MAX; return; } @@ -4830,15 +4830,15 @@ check_balance(ErtsRunQueue *c_rq) get_no_runqs(NULL, &blnc_no_rqs); if (blnc_no_rqs == 1) { c_rq->check_balance_reds = INT_MAX; - erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0); + erts_atomic32_set_nob(&balance_info.checking_balance, 0); return; } - erts_smp_runq_unlock(c_rq); + erts_runq_unlock(c_rq); if (balance_info.halftime) { balance_info.halftime = 0; - erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0); + erts_atomic32_set_nob(&balance_info.checking_balance, 0); ERTS_FOREACH_RUNQ(rq, { if (rq->waiting) @@ -4848,7 +4848,7 @@ check_balance(ErtsRunQueue *c_rq) rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS; }); - erts_smp_runq_lock(c_rq); + erts_runq_lock(c_rq); return; } @@ -4861,7 +4861,7 @@ check_balance(ErtsRunQueue *c_rq) * is manipulated. Such updates of the migration information * might clash with balancing. */ - erts_smp_mtx_lock(&balance_info.update_mtx); + erts_mtx_lock(&balance_info.update_mtx); forced = balance_info.forced_check_balance; balance_info.forced_check_balance = 0; @@ -4869,10 +4869,10 @@ check_balance(ErtsRunQueue *c_rq) get_no_runqs(¤t_active, &blnc_no_rqs); if (blnc_no_rqs == 1) { - erts_smp_mtx_unlock(&balance_info.update_mtx); - erts_smp_runq_lock(c_rq); + erts_mtx_unlock(&balance_info.update_mtx); + erts_runq_lock(c_rq); c_rq->check_balance_reds = INT_MAX; - erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0); + erts_atomic32_set_nob(&balance_info.checking_balance, 0); return; } @@ -4888,7 +4888,7 @@ check_balance(ErtsRunQueue *c_rq) /* Read balance information for all run queues */ for (qix = 0; qix < blnc_no_rqs; qix++) { ErtsRunQueue *rq = ERTS_RUNQ_IX(qix); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); run_queue_info[qix].flags = ERTS_RUNQ_FLGS_GET_NOB(rq); for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) { @@ -4916,7 +4916,7 @@ check_balance(ErtsRunQueue *c_rq) run_queue_info[qix].sched_util = erts_get_sched_util(rq, 1, 0); #endif - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); } full_scheds = 0; @@ -5355,7 +5355,7 @@ erts_fprintf(stderr, "--------------------------------\n"); Uint32 flags = run_queue_info[qix].flags; ErtsRunQueue *rq = ERTS_RUNQ_IX(qix); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); ASSERT(!(flags & ERTS_RUNQ_FLG_OUT_OF_WORK)); if (rq->waiting) flags |= ERTS_RUNQ_FLG_OUT_OF_WORK; @@ -5370,27 +5370,27 @@ erts_fprintf(stderr, "--------------------------------\n"); rq->out_of_work_count = 0; (void) ERTS_RUNQ_FLGS_READ_BSET(rq, ERTS_RUNQ_FLGS_MIGRATION_INFO, flags); - rq->max_len = erts_smp_atomic32_read_dirty(&rq->len); + rq->max_len = erts_atomic32_read_dirty(&rq->len); for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) { ErtsRunQueueInfo *rqi; rqi = (pix == ERTS_PORT_PRIO_LEVEL ? &rq->ports.info : &rq->procs.prio_info[pix]); - erts_smp_reset_max_len(rq, rqi); + erts_reset_max_len(rq, rqi); rqi->reds = 0; } rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS; - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); } - erts_smp_atomic32_set_nob(&balance_info.checking_balance, 0); + erts_atomic32_set_nob(&balance_info.checking_balance, 0); balance_info.n++; retire_mpaths(old_mpaths); - erts_smp_mtx_unlock(&balance_info.update_mtx); + erts_mtx_unlock(&balance_info.update_mtx); - erts_smp_runq_lock(c_rq); + erts_runq_lock(c_rq); } static void @@ -5398,7 +5398,7 @@ change_no_used_runqs(int used) { ErtsMigrationPaths *new_mpaths, *old_mpaths; int qix; - erts_smp_mtx_lock(&balance_info.update_mtx); + erts_mtx_lock(&balance_info.update_mtx); set_no_used_runqs(used); old_mpaths = erts_get_migration_paths_managed(); @@ -5445,11 +5445,11 @@ change_no_used_runqs(int used) /* Make sure that we balance soon... */ balance_info.forced_check_balance = 1; - erts_smp_mtx_unlock(&balance_info.update_mtx); + erts_mtx_unlock(&balance_info.update_mtx); - erts_smp_runq_lock(ERTS_RUNQ_IX(0)); + erts_runq_lock(ERTS_RUNQ_IX(0)); ERTS_RUNQ_IX(0)->check_balance_reds = 0; - erts_smp_runq_unlock(ERTS_RUNQ_IX(0)); + erts_runq_unlock(ERTS_RUNQ_IX(0)); } @@ -5458,9 +5458,9 @@ Uint erts_debug_nbalance(void) { Uint n; - erts_smp_mtx_lock(&balance_info.update_mtx); + erts_mtx_lock(&balance_info.update_mtx); n = balance_info.n; - erts_smp_mtx_unlock(&balance_info.update_mtx); + erts_mtx_unlock(&balance_info.update_mtx); return n; } @@ -5522,7 +5522,7 @@ wakeup_other_check(ErtsRunQueue *rq, Uint32 flags) { int wo_reds = rq->wakeup_other_reds; if (wo_reds) { - int left_len = erts_smp_atomic32_read_dirty(&rq->len) - 1; + int left_len = erts_atomic32_read_dirty(&rq->len) - 1; if (left_len < 1) { int wo_reduce = wo_reds << wakeup_other.dec_shift; wo_reduce &= wakeup_other.dec_mask; @@ -5543,7 +5543,7 @@ wakeup_other_check(ErtsRunQueue *rq, Uint32 flags) #endif { int empty_rqs = - erts_smp_atomic32_read_acqb(&no_empty_run_queues); + erts_atomic32_read_acqb(&no_empty_run_queues); if (flags & ERTS_RUNQ_FLG_PROTECTED) (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED); if (empty_rqs != 0) @@ -5595,7 +5595,7 @@ wakeup_other_check_legacy(ErtsRunQueue *rq, Uint32 flags) { int wo_reds = rq->wakeup_other_reds; if (wo_reds) { - erts_aint32_t len = erts_smp_atomic32_read_dirty(&rq->len); + erts_aint32_t len = erts_atomic32_read_dirty(&rq->len); if (len < 2) { rq->wakeup_other -= ERTS_WAKEUP_OTHER_DEC_LEGACY*wo_reds; if (rq->wakeup_other < 0) @@ -5606,7 +5606,7 @@ wakeup_other_check_legacy(ErtsRunQueue *rq, Uint32 flags) else { if (flags & ERTS_RUNQ_FLG_PROTECTED) (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED); - if (erts_smp_atomic32_read_acqb(&no_empty_run_queues) != 0) { + if (erts_atomic32_read_acqb(&no_empty_run_queues) != 0) { wake_scheduler_on_empty_runq(rq); rq->wakeup_other = 0; } @@ -5657,7 +5657,7 @@ static int no_runqs_to_supervise(void) { int used; - erts_aint32_t nerq = erts_smp_atomic32_read_acqb(&no_empty_run_queues); + erts_aint32_t nerq = erts_atomic32_read_acqb(&no_empty_run_queues); if (nerq <= 0) return 0; get_no_runqs(NULL, &used); @@ -5690,10 +5690,10 @@ runq_supervisor(void *unused) for (ix = 0; ix < no_rqs; ix++) { ErtsRunQueue *rq = ERTS_RUNQ_IX(ix); if (ERTS_RUNQ_FLGS_GET(rq) & ERTS_RUNQ_FLG_NONEMPTY) { - erts_smp_runq_lock(rq); - if (erts_smp_atomic32_read_dirty(&rq->len) != 0) + erts_runq_lock(rq); + if (erts_atomic32_read_dirty(&rq->len) != 0) wake_scheduler_on_empty_runq(rq); /* forced wakeup... */ - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); } } } @@ -5915,7 +5915,7 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num, esdp->dirty_shadow_process = shadow_proc; if (shadow_proc) { erts_init_empty_process(shadow_proc); - erts_smp_atomic32_init_nob(&shadow_proc->state, + erts_atomic32_init_nob(&shadow_proc->state, (ERTS_PSFLG_ACTIVE | ERTS_PSFLG_DIRTY_RUNNING | ERTS_PSFLG_PROXY)); @@ -5998,7 +5998,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online size_runqs = sizeof(ErtsAlignedRunQueue) * tot_rqs; erts_aligned_run_queues = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_RUNQS, size_runqs); - erts_smp_atomic32_init_nob(&no_empty_run_queues, 0); + erts_atomic32_init_nob(&no_empty_run_queues, 0); erts_no_run_queues = n; @@ -6012,13 +6012,13 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online * id if the esdp->no <-> ix+1 mapping change. */ - erts_smp_mtx_init(&rq->mtx, "run_queue", make_small(ix + 1), + erts_mtx_init(&rq->mtx, "run_queue", make_small(ix + 1), ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER); - erts_smp_cnd_init(&rq->cnd); + erts_cnd_init(&rq->cnd); #ifdef ERTS_DIRTY_SCHEDULERS if (ERTS_RUNQ_IX_IS_DIRTY(ix)) { - erts_smp_spinlock_init(&rq->sleepers.lock, "dirty_run_queue_sleep_list", + erts_spinlock_init(&rq->sleepers.lock, "dirty_run_queue_sleep_list", make_small(ix + 1), ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER); } @@ -6036,7 +6036,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online } rq->out_of_work_count = 0; rq->max_len = 0; - erts_smp_atomic32_set_nob(&rq->len, 0); + erts_atomic32_set_nob(&rq->len, 0); rq->wakeup_other = 0; rq->wakeup_other_reds = 0; @@ -6045,7 +6045,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online rq->procs.reductions = 0; for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++) { - erts_smp_atomic32_init_nob(&rq->procs.prio_info[pix].len, 0); + erts_atomic32_init_nob(&rq->procs.prio_info[pix].len, 0); rq->procs.prio_info[pix].max_len = 0; rq->procs.prio_info[pix].reds = 0; if (pix < ERTS_NO_PROC_PRIO_LEVELS - 1) { @@ -6057,7 +6057,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online rq->misc.start = NULL; rq->misc.end = NULL; - erts_smp_atomic32_init_nob(&rq->ports.info.len, 0); + erts_atomic32_init_nob(&rq->ports.info.len, 0); rq->ports.info.max_len = 0; rq->ports.info.reds = 0; rq->ports.start = NULL; @@ -6102,7 +6102,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online ssi->next = NULL; ssi->prev = NULL; #endif - erts_smp_atomic32_init_nob(&ssi->flags, 0); + erts_atomic32_init_nob(&ssi->flags, 0); ssi->event = NULL; /* initialized in sched_thread_func */ erts_atomic32_init_nob(&ssi->aux_work, 0); } @@ -6116,7 +6116,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online no_dirty_cpu_schedulers*sizeof(ErtsAlignedSchedulerSleepInfo)); for (ix = 0; ix < no_dirty_cpu_schedulers; ix++) { ErtsSchedulerSleepInfo *ssi = &aligned_dirty_cpu_sched_sleep_info[ix].ssi; - erts_smp_atomic32_init_nob(&ssi->flags, 0); + erts_atomic32_init_nob(&ssi->flags, 0); ssi->event = NULL; /* initialized in sched_dirty_cpu_thread_func */ erts_atomic32_init_nob(&ssi->aux_work, 0); } @@ -6126,7 +6126,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online no_dirty_io_schedulers*sizeof(ErtsAlignedSchedulerSleepInfo)); for (ix = 0; ix < no_dirty_io_schedulers; ix++) { ErtsSchedulerSleepInfo *ssi = &aligned_dirty_io_sched_sleep_info[ix].ssi; - erts_smp_atomic32_init_nob(&ssi->flags, 0); + erts_atomic32_init_nob(&ssi->flags, 0); ssi->event = NULL; /* initialized in sched_dirty_io_thread_func */ erts_atomic32_init_nob(&ssi->aux_work, 0); } @@ -6197,12 +6197,12 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online init_no_runqs(no_schedulers_online, no_schedulers_online); balance_info.last_active_runqs = no_schedulers; - erts_smp_mtx_init(&balance_info.update_mtx, "migration_info_update", NIL, + erts_mtx_init(&balance_info.update_mtx, "migration_info_update", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER); balance_info.forced_check_balance = 0; balance_info.halftime = 1; balance_info.full_reds_history_index = 0; - erts_smp_atomic32_init_nob(&balance_info.checking_balance, 0); + erts_atomic32_init_nob(&balance_info.checking_balance, 0); balance_info.prev_rise.active_runqs = 0; balance_info.prev_rise.max_len = 0; balance_info.prev_rise.reds = 0; @@ -6250,7 +6250,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online set_schdlr_sspnd_change_flags |= ERTS_SCHDLR_SSPND_CHNG_DCPU_ONLN; for (ix = no_dirty_cpu_schedulers_online; ix < no_dirty_cpu_schedulers; ix++) { ErtsSchedulerData* esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix); - erts_smp_atomic32_read_bor_nob(&esdp->ssi->flags, ERTS_SSI_FLG_SUSPENDED); + erts_atomic32_read_bor_nob(&esdp->ssi->flags, ERTS_SSI_FLG_SUSPENDED); } } @@ -6264,29 +6264,29 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online ERTS_SCHED_DIRTY_IO, no_dirty_io_schedulers); - erts_smp_atomic32_init_nob(&dirty_count.cpu.active, + erts_atomic32_init_nob(&dirty_count.cpu.active, (erts_aint32_t) no_dirty_cpu_schedulers); - erts_smp_atomic32_init_nob(&dirty_count.io.active, + erts_atomic32_init_nob(&dirty_count.io.active, (erts_aint32_t) no_dirty_io_schedulers); #endif if (set_schdlr_sspnd_change_flags) - erts_smp_atomic32_set_nob(&schdlr_sspnd.changing, + erts_atomic32_set_nob(&schdlr_sspnd.changing, set_schdlr_sspnd_change_flags); - erts_smp_atomic32_init_nob(&doing_sys_schedule, 0); + erts_atomic32_init_nob(&doing_sys_schedule, 0); init_misc_aux_work(); - erts_smp_atomic32_init_nob(&function_calls, 0); + erts_atomic32_init_nob(&function_calls, 0); /* init port tasks */ erts_port_task_init(); - erts_smp_atomic32_init_relb(&erts_halt_progress, -1); + erts_atomic32_init_relb(&erts_halt_progress, -1); erts_halt_code = 0; @@ -6325,8 +6325,8 @@ make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio) if (prev_proxy) { proxy = prev_proxy; - ASSERT(erts_smp_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY); - erts_smp_atomic32_set_nob(&proxy->state, state); + ASSERT(erts_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY); + erts_atomic32_set_nob(&proxy->state, state); RUNQ_SET_RQ(&proc->run_queue, rq); } else { @@ -6339,9 +6339,9 @@ make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio) ui32[i] = (Uint32) 0xdeadbeef; } #endif - erts_smp_atomic32_init_nob(&proxy->state, state); - erts_smp_atomic_init_nob(&proxy->run_queue, - erts_smp_atomic_read_nob(&proc->run_queue)); + erts_atomic32_init_nob(&proxy->state, state); + erts_atomic_init_nob(&proxy->run_queue, + erts_atomic_read_nob(&proc->run_queue)); } proxy->common.id = proc->common.id; @@ -6385,7 +6385,7 @@ check_dirty_enqueue_in_prio_queue(Process *c_p, if ((*newp) & ERTS_PSFLG_ACTIVE_SYS) return ERTS_ENQUEUE_NORMAL_QUEUE; - dact = erts_smp_atomic32_read_mb(&c_p->dirty_state); + dact = erts_atomic32_read_mb(&c_p->dirty_state); if (actual & (ERTS_PSFLG_DIRTY_ACTIVE_SYS | ERTS_PSFLG_DIRTY_CPU_PROC)) { max_qbit = ((dact >> ERTS_PDSFLGS_IN_CPU_PRQ_MASK_OFFSET) @@ -6430,7 +6430,7 @@ fin_dirty_enq_s_change(Process *p, erts_aint32_t qbit = 1 << enq_prio; qbit <<= qmask_offset; - if (qbit & erts_smp_atomic32_read_bor_mb(&p->dirty_state, qbit)) { + if (qbit & erts_atomic32_read_bor_mb(&p->dirty_state, qbit)) { /* Already enqueue by someone else... */ if (pstruct_reserved) { /* We reserved process struct for enqueue; clear it... */ @@ -6439,7 +6439,7 @@ fin_dirty_enq_s_change(Process *p, #else (void) #endif - erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_IN_RUNQ); + erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_IN_RUNQ); ASSERT(old & ERTS_PSFLG_IN_RUNQ); } return 0; @@ -6592,7 +6592,7 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, */ ASSERT(!(p->flags & (F_DIRTY_CLA | F_DIRTY_GC_HIBERNATE))); - state = erts_smp_atomic32_read_band_nob(&p->state, + state = erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_DIRTY_ACTIVE_SYS); state &= ~ERTS_PSFLG_DIRTY_ACTIVE_SYS; } @@ -6613,7 +6613,7 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, || (a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE) { enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a); } - a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); + a = erts_atomic32_cmpxchg_mb(&p->state, n, e); if (a == e) break; } @@ -6625,7 +6625,7 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, if (erts_system_profile_flags.runnable_procs) { /* Status lock prevents out of order "runnable proc" trace msgs */ - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); if (!(a & (ERTS_PSFLG_ACTIVE_SYS|ERTS_PSFLG_DIRTY_ACTIVE_SYS)) && (!(a & ERTS_PSFLG_ACTIVE) || (a & ERTS_PSFLG_SUSPENDED))) { @@ -6637,7 +6637,7 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, if (proxy) free_proxy_proc(proxy); - erts_smp_runq_lock(c_rq); + erts_runq_lock(c_rq); /* Decrement refc if scheduled out from dirty scheduler... */ return !is_normal_sched; @@ -6659,7 +6659,7 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, ASSERT(runq); - erts_smp_runq_lock(runq); + erts_runq_lock(runq); if (is_normal_sched && sched_p == p && ERTS_RUNQ_IX_IS_DIRTY(runq->ix)) erts_proc_inc_refc(p); /* Needs to be done before enqueue_process() */ @@ -6670,11 +6670,11 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, if (runq == c_rq) return 0; - erts_smp_runq_unlock(runq); + erts_runq_unlock(runq); smp_notify_inc_runq(runq); - erts_smp_runq_lock(c_rq); + erts_runq_lock(c_rq); /* * Decrement refc if process is scheduled out by a @@ -6722,12 +6722,12 @@ add2runq(int enqueue, erts_aint32_t prio, sched_p = make_proxy_proc(pxy, proc, prio); } - erts_smp_runq_lock(runq); + erts_runq_lock(runq); /* Enqueue the process */ enqueue_process(runq, (int) prio, sched_p); - erts_smp_runq_unlock(runq); + erts_runq_unlock(runq); smp_notify_inc_runq(runq); } } @@ -6752,7 +6752,7 @@ change_proc_schedule_state(Process *p, unsigned int lock_status = (prof_runnable_procs && !(locks & ERTS_PROC_LOCK_STATUS)); - ERTS_SMP_LC_ASSERT(locks == erts_proc_lc_my_proc_locks(p)); + ERTS_LC_ASSERT(locks == erts_proc_lc_my_proc_locks(p)); ASSERT(!(a & ERTS_PSFLG_PROXY)); ASSERT((clear_state_flags & (ERTS_PSFLG_RUNNING @@ -6767,7 +6767,7 @@ change_proc_schedule_state(Process *p, | ERTS_PSFLG_ACTIVE_SYS)) == 0); if (lock_status) - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); while (1) { erts_aint32_t e; @@ -6805,7 +6805,7 @@ change_proc_schedule_state(Process *p, enqueue = check_enqueue_in_prio_queue(p, enq_prio_p, &n, a); } - a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); + a = erts_atomic32_cmpxchg_mb(&p->state, n, e); if (a == e) break; if (enqueue == ERTS_ENQUEUE_NOT && n == a) @@ -6830,7 +6830,7 @@ change_proc_schedule_state(Process *p, } if (lock_status) - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); } @@ -6874,7 +6874,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st, res = 1; /* prepare for success */ st->next = st->prev = st; /* Prep for empty prio queue */ - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); prof_runnable_procs = erts_system_profile_flags.runnable_procs; locked = 0; free_stqs = NULL; @@ -6894,9 +6894,9 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st, if (!locked) { locked = 1; - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); if (state & fail_state) { *fail_state_p = (state & fail_state); free_stqs = stqs; @@ -6940,7 +6940,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st, n = e = a; n &= ~ERTS_PSFLGS_ACT_PRIO_MASK; n |= (prio << ERTS_PSFLGS_ACT_PRIO_OFFSET); - a = erts_smp_atomic32_cmpxchg_nob(&p->state, n, e); + a = erts_atomic32_cmpxchg_nob(&p->state, n, e); } while (a != e); state = n; } @@ -6950,10 +6950,10 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st, enq_prio = -1; /* Status lock prevents out of order "runnable proc" trace msgs */ - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); if (!prof_runnable_procs) { - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); locked = 0; } @@ -6973,7 +6973,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st, | ERTS_PSFLG_DIRTY_RUNNING | ERTS_PSFLG_DIRTY_RUNNING_SYS))) enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a); - a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); + a = erts_atomic32_cmpxchg_mb(&p->state, n, e); if (a == e) break; if (a == n && enqueue == ERTS_ENQUEUE_NOT) @@ -6992,7 +6992,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st, profile_runnable_proc(p, am_active); } - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); locked = 0; } @@ -7001,12 +7001,12 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st, cleanup: if (locked) - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); if (free_stqs) proc_sys_task_queues_free(free_stqs); - ERTS_SMP_LC_ASSERT(!(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p))); + ERTS_LC_ASSERT(!(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p))); return res; } @@ -7016,15 +7016,15 @@ suspend_process(Process *c_p, Process *p) { erts_aint32_t state; int suspended = 0; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); - state = erts_smp_atomic32_read_acqb(&p->state); + state = erts_atomic32_read_acqb(&p->state); if ((state & ERTS_PSFLG_SUSPENDED)) suspended = -1; else { if (c_p == p) { - state = erts_smp_atomic32_read_bor_relb(&p->state, + state = erts_atomic32_read_bor_relb(&p->state, ERTS_PSFLG_SUSPENDED); ASSERT(state & (ERTS_PSFLG_RUNNING | ERTS_PSFLG_RUNNING_SYS @@ -7040,7 +7040,7 @@ suspend_process(Process *c_p, Process *p) n = e = state; n |= ERTS_PSFLG_SUSPENDED; - state = erts_smp_atomic32_cmpxchg_relb(&p->state, n, e); + state = erts_atomic32_cmpxchg_relb(&p->state, n, e); if (state == e) { suspended = 1; break; @@ -7085,14 +7085,14 @@ resume_process(Process *p, ErtsProcLocks locks) erts_aint32_t state, enq_prio = -1; int enqueue; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); ASSERT(p->rcount > 0); if (--p->rcount > 0) /* multiple suspend */ return; - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); enqueue = change_proc_schedule_state(p, ERTS_PSFLG_SUSPENDED, 0, @@ -7112,7 +7112,7 @@ sched_resume_wake__(ErtsSchedulerSleepInfo *ssi) | ERTS_SSI_FLG_SUSPENDED); erts_aint32_t oflgs; do { - oflgs = erts_smp_atomic32_cmpxchg_relb(&ssi->flags, 0, xflgs); + oflgs = erts_atomic32_cmpxchg_relb(&ssi->flags, 0, xflgs); if (oflgs == xflgs) { erts_sched_finish_poke(ssi, oflgs); break; @@ -7153,7 +7153,7 @@ sched_prep_spin_suspended(ErtsSchedulerSleepInfo *ssi, erts_aint32_t xpct) erts_aint32_t xflgs = xpct; do { - oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); + oflgs = erts_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); if (oflgs == xflgs) return nflgs; xflgs = oflgs; @@ -7170,7 +7170,7 @@ sched_spin_suspended(ErtsSchedulerSleepInfo *ssi, int spincount) erts_aint32_t flgs; do { - flgs = erts_smp_atomic32_read_acqb(&ssi->flags); + flgs = erts_atomic32_read_acqb(&ssi->flags); if ((flgs & (ERTS_SSI_FLG_SLEEPING | ERTS_SSI_FLG_WAITING | ERTS_SSI_FLG_SUSPENDED)) @@ -7203,7 +7203,7 @@ sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi) erts_tse_reset(ssi->event); while (1) { - oflgs = erts_smp_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); + oflgs = erts_atomic32_cmpxchg_acqb(&ssi->flags, nflgs, xflgs); if (oflgs == xflgs) return nflgs; if ((oflgs & (ERTS_SSI_FLG_SLEEPING @@ -7221,7 +7221,7 @@ sched_set_suspended_sleeptype(ErtsSchedulerSleepInfo *ssi) static void init_scheduler_suspend(void) { - erts_smp_mtx_init(&schdlr_sspnd.mtx, "schdlr_sspnd", NIL, + erts_mtx_init(&schdlr_sspnd.mtx, "schdlr_sspnd", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER); schdlr_sspnd.online.normal = 1; schdlr_sspnd.curr_online.normal = 1; @@ -7235,7 +7235,7 @@ init_scheduler_suspend(void) schdlr_sspnd.active.dirty_io = 0; schdlr_sspnd.last_msb_dirty_type = ERTS_SCHED_DIRTY_IO; #endif - erts_smp_atomic32_init_nob(&schdlr_sspnd.changing, 0); + erts_atomic32_init_nob(&schdlr_sspnd.changing, 0); schdlr_sspnd.chngq = NULL; schdlr_sspnd.changer = am_false; schdlr_sspnd.nmsb.ongoing = 0; @@ -7266,7 +7266,7 @@ schdlr_sspnd_resume_proc(ErtsSchedType sched_type, Eterm pid) : 0)); if (p) { resume_process(p, ERTS_PROC_LOCK_STATUS); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); if (sched_type != ERTS_SCHED_NORMAL) erts_proc_dec_refc(p); } @@ -7465,7 +7465,7 @@ msb_scheduler_type_switch(ErtsSchedType sched_type, calls = ERTS_MODIFIED_TIMING_INPUT_REDS + 1; else calls = INPUT_REDUCTIONS + 1; - erts_smp_atomic32_set_nob(&function_calls, calls); + erts_atomic32_set_nob(&function_calls, calls); if ((nrml_prio == ERTS_MSB_NONE_PRIO_BIT) & ((dcpu_prio != ERTS_MSB_NONE_PRIO_BIT) @@ -7500,7 +7500,7 @@ msb_scheduler_type_switch(ErtsSchedType sched_type, #else (void) #endif - erts_smp_atomic32_read_bset_mb(&esdp->ssi->flags, + erts_atomic32_read_bset_mb(&esdp->ssi->flags, (ERTS_SSI_FLG_SUSPENDED | ERTS_SSI_FLG_MSB_EXEC), ERTS_SSI_FLG_SUSPENDED); @@ -7527,7 +7527,7 @@ msb_scheduler_type_switch(ErtsSchedType sched_type, #else (void) #endif - erts_smp_atomic32_read_bset_mb(&exec_rq->scheduler->ssi->flags, + erts_atomic32_read_bset_mb(&exec_rq->scheduler->ssi->flags, (ERTS_SSI_FLG_SUSPENDED | ERTS_SSI_FLG_MSB_EXEC), ERTS_SSI_FLG_MSB_EXEC); @@ -7595,7 +7595,7 @@ suspend_scheduler(ErtsSchedulerData *esdp) return; } - if (erts_smp_atomic32_read_nob(&ssi->flags) & ERTS_SSI_FLG_MSB_EXEC) { + if (erts_atomic32_read_nob(&ssi->flags) & ERTS_SSI_FLG_MSB_EXEC) { ASSERT(no == 1); if (!msb_scheduler_type_switch(sched_type, esdp, no)) return; @@ -7606,14 +7606,14 @@ suspend_scheduler(ErtsSchedulerData *esdp) if (sched_type != ERTS_SCHED_NORMAL) { dirty_active(esdp, -1); - erts_smp_runq_unlock(esdp->run_queue); + erts_runq_unlock(esdp->run_queue); dirty_sched_wall_time_change(esdp, 0); } else { if (no != 1) evacuate_run_queue(esdp->run_queue, &sbp); - erts_smp_runq_unlock(esdp->run_queue); + erts_runq_unlock(esdp->run_queue); erts_sched_check_cpu_bind_prep_suspend(esdp); @@ -7621,14 +7621,14 @@ suspend_scheduler(ErtsSchedulerData *esdp) profile_scheduler(make_small(esdp->no), am_inactive); } - erts_smp_mtx_lock(&schdlr_sspnd.mtx); + erts_mtx_lock(&schdlr_sspnd.mtx); flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED); if (flgs & ERTS_SSI_FLG_SUSPENDED) { schdlr_sspnd_dec_nscheds(&schdlr_sspnd.active, sched_type); - changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); + changing = erts_atomic32_read_nob(&schdlr_sspnd.changing); while (1) { @@ -7660,7 +7660,7 @@ suspend_scheduler(ErtsSchedulerData *esdp) if (clr_flg) { ErtsProcList *plp, *end_plp; - changing = erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing, + changing = erts_atomic32_read_band_nob(&schdlr_sspnd.changing, ~clr_flg); changing &= ~clr_flg; (void) erts_proclist_fetch(&msb[i]->chngq, &end_plp); @@ -7706,7 +7706,7 @@ suspend_scheduler(ErtsSchedulerData *esdp) == schdlr_sspnd_get_nscheds(&schdlr_sspnd.curr_online, sched_type))) { ErtsProcList *plp; - changing = erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing, + changing = erts_atomic32_read_band_nob(&schdlr_sspnd.changing, ~online_flag); changing &= ~online_flag; if (sched_type == ERTS_SCHED_NORMAL) { @@ -7728,11 +7728,11 @@ suspend_scheduler(ErtsSchedulerData *esdp) } if (curr_online) { - flgs = erts_smp_atomic32_read_acqb(&ssi->flags); + flgs = erts_atomic32_read_acqb(&ssi->flags); if (!(flgs & ERTS_SSI_FLG_SUSPENDED)) break; } - erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + erts_mtx_unlock(&schdlr_sspnd.mtx); schdlr_sspnd_resume_procs(sched_type, &resume); @@ -7760,9 +7760,9 @@ suspend_scheduler(ErtsSchedulerData *esdp) if (aux_work && erts_thr_progress_update(esdp)) erts_thr_progress_leader_update(esdp); if (evacuate) { - erts_smp_runq_lock(esdp->run_queue); + erts_runq_lock(esdp->run_queue); evacuate_run_queue(esdp->run_queue, &sbp); - erts_smp_runq_unlock(esdp->run_queue); + erts_runq_unlock(esdp->run_queue); } } @@ -7860,23 +7860,23 @@ suspend_scheduler(ErtsSchedulerData *esdp) | ERTS_SSI_FLG_SUSPENDED)); if (!(flgs & ERTS_SSI_FLG_SUSPENDED)) break; - changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); + changing = erts_atomic32_read_nob(&schdlr_sspnd.changing); if (changing) break; } - erts_smp_mtx_lock(&schdlr_sspnd.mtx); - changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); + erts_mtx_lock(&schdlr_sspnd.mtx); + changing = erts_atomic32_read_nob(&schdlr_sspnd.changing); } schdlr_sspnd_inc_nscheds(&schdlr_sspnd.active, sched_type); - changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); + changing = erts_atomic32_read_nob(&schdlr_sspnd.changing); if (changing) { if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB) && !schdlr_sspnd.msb.ongoing && schdlr_sspnd_eq_nscheds(&schdlr_sspnd.online, &schdlr_sspnd.active)) { - erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing, + erts_atomic32_read_band_nob(&schdlr_sspnd.changing, ~ERTS_SCHDLR_SSPND_CHNG_MSB); } if ((changing & ERTS_SCHDLR_SSPND_CHNG_NMSB) @@ -7885,14 +7885,14 @@ suspend_scheduler(ErtsSchedulerData *esdp) ERTS_SCHED_NORMAL) == schdlr_sspnd_get_nscheds(&schdlr_sspnd.active, ERTS_SCHED_NORMAL))) { - erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing, + erts_atomic32_read_band_nob(&schdlr_sspnd.changing, ~ERTS_SCHDLR_SSPND_CHNG_NMSB); } } ASSERT(no <= schdlr_sspnd_get_nscheds(&schdlr_sspnd.online, sched_type)); } - erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + erts_mtx_unlock(&schdlr_sspnd.mtx); schdlr_sspnd_resume_procs(sched_type, &resume); @@ -7911,7 +7911,7 @@ suspend_scheduler(ErtsSchedulerData *esdp) } } - erts_smp_runq_lock(esdp->run_queue); + erts_runq_lock(esdp->run_queue); non_empty_runq(esdp->run_queue); if (sched_type != ERTS_SCHED_NORMAL) @@ -7935,7 +7935,7 @@ erts_schedulers_state(Uint *total, { if (active || online || dirty_cpu_online || dirty_cpu_active || dirty_io_active) { - erts_smp_mtx_lock(&schdlr_sspnd.mtx); + erts_mtx_lock(&schdlr_sspnd.mtx); if (active) *active = schdlr_sspnd_get_nscheds(&schdlr_sspnd.active, ERTS_SCHED_NORMAL); @@ -7951,7 +7951,7 @@ erts_schedulers_state(Uint *total, if (dirty_io_active) *dirty_io_active = schdlr_sspnd_get_nscheds(&schdlr_sspnd.active, ERTS_SCHED_DIRTY_IO); - erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + erts_mtx_unlock(&schdlr_sspnd.mtx); } if (total) @@ -7967,7 +7967,7 @@ abort_sched_onln_chng_waitq(Process *p) { Eterm resume = NIL; - erts_smp_mtx_lock(&schdlr_sspnd.mtx); + erts_mtx_lock(&schdlr_sspnd.mtx); #ifdef DEBUG { @@ -8017,7 +8017,7 @@ abort_sched_onln_chng_waitq(Process *p) } } - erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + erts_mtx_unlock(&schdlr_sspnd.mtx); if (is_internal_pid(resume)) schdlr_sspnd_resume_proc(ERTS_SCHED_NORMAL, resume); @@ -8060,13 +8060,13 @@ erts_set_schedulers_online(Process *p, * race... */ if (!(plocks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); suspend_process(p, p); if (!(plocks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); } - erts_smp_mtx_lock(&schdlr_sspnd.mtx); + erts_mtx_lock(&schdlr_sspnd.mtx); change_flags = 0; have_unlocked_plocks = 0; @@ -8076,7 +8076,7 @@ erts_set_schedulers_online(Process *p, if (!dirty_only) #endif { - changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing); + changing = erts_atomic32_read_nob(&schdlr_sspnd.changing); if (changing & ERTS_SCHDLR_SSPND_CHNG_ONLN) { enqueue_wait: p->flags |= F_SCHDLR_ONLN_WAITQ; @@ -8168,7 +8168,7 @@ erts_set_schedulers_online(Process *p, increase = (no > online); } - erts_smp_atomic32_read_bor_nob(&schdlr_sspnd.changing, change_flags); + erts_atomic32_read_bor_nob(&schdlr_sspnd.changing, change_flags); res = ERTS_SCHDLR_SSPND_DONE; if (increase) { @@ -8196,7 +8196,7 @@ erts_set_schedulers_online(Process *p, else { if (plocks) { have_unlocked_plocks = 1; - erts_smp_proc_unlock(p, plocks); + erts_proc_unlock(p, plocks); } change_no_used_runqs(no); @@ -8236,7 +8236,7 @@ erts_set_schedulers_online(Process *p, else { if (plocks) { have_unlocked_plocks = 1; - erts_smp_proc_unlock(p, plocks); + erts_proc_unlock(p, plocks); } change_no_used_runqs(no); @@ -8265,17 +8265,17 @@ done: <= schdlr_sspnd_get_nscheds(&schdlr_sspnd.online, ERTS_SCHED_NORMAL)); - erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + erts_mtx_unlock(&schdlr_sspnd.mtx); if (have_unlocked_plocks) - erts_smp_proc_lock(p, plocks); + erts_proc_lock(p, plocks); if (resume_proc) { if (!(plocks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); resume_process(p, plocks|ERTS_PROC_LOCK_STATUS); if (!(plocks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); } return res; @@ -8313,13 +8313,13 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal else { resume_proc = 1; if (!(plocks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); suspend_process(p, p); if (!(plocks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); } - erts_smp_mtx_lock(&schdlr_sspnd.mtx); + erts_mtx_lock(&schdlr_sspnd.mtx); if (on) { /* ------ BLOCK ------ */ if (msbp->chngq) { ASSERT(msbp->ongoing); @@ -8349,12 +8349,12 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal p->flags |= have_blckd_flg; if (plocks) { have_unlocked_plocks = 1; - erts_smp_proc_unlock(p, plocks); + erts_proc_unlock(p, plocks); } ASSERT(!msbp->ongoing); msbp->ongoing = 1; - erts_smp_atomic32_read_bor_nob(&schdlr_sspnd.changing, + erts_atomic32_read_bor_nob(&schdlr_sspnd.changing, chng_flg); change_no_used_runqs(1); for (ix = 1; ix < erts_no_run_queues; ix++) @@ -8368,7 +8368,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal #ifdef ERTS_DIRTY_SCHEDULERS if (!normal) { ERTS_RUNQ_FLGS_SET_NOB(ERTS_RUNQ_IX(0), ERTS_RUNQ_FLG_MSB_EXEC); - erts_smp_atomic32_read_bor_nob(&ERTS_RUNQ_IX(0)->scheduler->ssi->flags, + erts_atomic32_read_bor_nob(&ERTS_RUNQ_IX(0)->scheduler->ssi->flags, ERTS_SSI_FLG_MSB_EXEC); for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) dcpu_sched_ix_suspend_wake(ix); @@ -8379,7 +8379,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal wait_until_msb: - ASSERT(chng_flg & erts_smp_atomic32_read_nob(&schdlr_sspnd.changing)); + ASSERT(chng_flg & erts_atomic32_read_nob(&schdlr_sspnd.changing)); plp = proclist_create(p); erts_proclist_store_last(&msbp->chngq, plp); @@ -8420,14 +8420,14 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal } if (!msbp->blckrs && !msbp->chngq) { int online; - erts_smp_atomic32_read_bor_nob(&schdlr_sspnd.changing, + erts_atomic32_read_bor_nob(&schdlr_sspnd.changing, chng_flg); p->flags &= ~have_blckd_flg; msbp->ongoing = 0; if (!(schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing)) { if (plocks) { have_unlocked_plocks = 1; - erts_smp_proc_unlock(p, plocks); + erts_proc_unlock(p, plocks); } online = (int) schdlr_sspnd_get_nscheds(&schdlr_sspnd.online, @@ -8464,17 +8464,17 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal res = ERTS_SCHDLR_SSPND_DONE; } - erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + erts_mtx_unlock(&schdlr_sspnd.mtx); if (have_unlocked_plocks) - erts_smp_proc_lock(p, plocks); + erts_proc_lock(p, plocks); if (resume_proc) { if (!(plocks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); resume_process(p, plocks|ERTS_PROC_LOCK_STATUS); if (!(plocks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); } return res; @@ -8484,14 +8484,14 @@ int erts_is_multi_scheduling_blocked(void) { int res; - erts_smp_mtx_lock(&schdlr_sspnd.mtx); + erts_mtx_lock(&schdlr_sspnd.mtx); if (schdlr_sspnd.msb.blckrs) res = 1; else if (schdlr_sspnd.nmsb.blckrs) res = -1; else res = 0; - erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + erts_mtx_unlock(&schdlr_sspnd.mtx); return res; } @@ -8503,7 +8503,7 @@ erts_multi_scheduling_blockers(Process *p, int normal) msbp = normal ? &schdlr_sspnd.nmsb : &schdlr_sspnd.msb; - erts_smp_mtx_lock(&schdlr_sspnd.mtx); + erts_mtx_lock(&schdlr_sspnd.mtx); if (!erts_proclist_is_empty(msbp->blckrs)) { Eterm *hp, *hp_end; ErtsProcList *plp1, *plp2; @@ -8531,7 +8531,7 @@ erts_multi_scheduling_blockers(Process *p, int normal) } HRelease(p, hp_end, hp); } - erts_smp_mtx_unlock(&schdlr_sspnd.mtx); + erts_mtx_unlock(&schdlr_sspnd.mtx); return res; } @@ -8835,7 +8835,7 @@ handle_pending_suspend(Process *p, ErtsProcLocks p_locks) ErtsPendingSuspend *psp; int is_alive = !ERTS_PROC_IS_EXITING(p); - ERTS_SMP_LC_ASSERT(p_locks & ERTS_PROC_LOCK_STATUS); + ERTS_LC_ASSERT(p_locks & ERTS_PROC_LOCK_STATUS); /* * New pending suspenders might appear while we are processing @@ -8861,15 +8861,15 @@ cancel_suspend_of_suspendee(Process *p, ErtsProcLocks p_locks) if (is_not_nil(p->suspendee)) { Process *rp; if (!(p_locks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); rp = erts_pid2proc(p, p_locks|ERTS_PROC_LOCK_STATUS, p->suspendee, ERTS_PROC_LOCK_STATUS); if (rp) { erts_resume(rp, ERTS_PROC_LOCK_STATUS); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); } if (!(p_locks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); p->suspendee = NIL; } } @@ -8882,7 +8882,7 @@ handle_pend_sync_suspend(Process *suspendee, { Process *suspender; - ERTS_SMP_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS); + ERTS_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS); suspender = erts_pid2proc(suspendee, suspendee_locks, @@ -8898,7 +8898,7 @@ handle_pend_sync_suspend(Process *suspendee, resume suspender */ ASSERT(suspendee != suspender); resume_process(suspender, ERTS_PROC_LOCK_STATUS); - erts_smp_proc_unlock(suspender, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(suspender, ERTS_PROC_LOCK_STATUS); } } @@ -8909,10 +8909,10 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, Process *rp; int unlock_c_p_status; - ERTS_SMP_LC_ASSERT(c_p_locks == erts_proc_lc_my_proc_locks(c_p)); + ERTS_LC_ASSERT(c_p_locks == erts_proc_lc_my_proc_locks(c_p)); - ERTS_SMP_LC_ASSERT(c_p_locks & ERTS_PROC_LOCK_MAIN); - ERTS_SMP_LC_ASSERT(pid_locks & (ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS)); + ERTS_LC_ASSERT(c_p_locks & ERTS_PROC_LOCK_MAIN); + ERTS_LC_ASSERT(pid_locks & (ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS)); if (c_p->common.id == pid) return erts_pid2proc(c_p, c_p_locks, pid, pid_locks); @@ -8921,7 +8921,7 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, unlock_c_p_status = 0; else { unlock_c_p_status = 1; - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); } if (c_p->suspendee == pid) { @@ -8960,15 +8960,15 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, /* Other process running */ ASSERT((ERTS_PSFLG_RUNNING | ERTS_PSFLG_DIRTY_RUNNING) - & erts_smp_atomic32_read_nob(&rp->state)); + & erts_atomic32_read_nob(&rp->state)); #ifdef ERTS_DIRTY_SCHEDULERS if (!suspend - && (erts_smp_atomic32_read_nob(&rp->state) + && (erts_atomic32_read_nob(&rp->state) & ERTS_PSFLG_DIRTY_RUNNING)) { ErtsProcLocks need_locks = pid_locks & ~ERTS_PROC_LOCK_STATUS; - if (need_locks && erts_smp_proc_trylock(rp, need_locks) == EBUSY) { - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + if (need_locks && erts_proc_trylock(rp, need_locks) == EBUSY) { + erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS, pid, pid_locks|ERTS_PROC_LOCK_STATUS); } @@ -8994,19 +8994,19 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, c_p->flags |= F_P2PNR_RESCHED; } /* Yield (caller is assumed to yield immediately in bif). */ - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); rp = ERTS_PROC_LOCK_BUSY; } else { ErtsProcLocks need_locks = pid_locks & ~ERTS_PROC_LOCK_STATUS; - if (need_locks && erts_smp_proc_trylock(rp, need_locks) == EBUSY) { + if (need_locks && erts_proc_trylock(rp, need_locks) == EBUSY) { if ((ERTS_PSFLG_RUNNING_SYS|ERTS_PSFLG_DIRTY_RUNNING_SYS) - & erts_smp_atomic32_read_nob(&rp->state)) { + & erts_atomic32_read_nob(&rp->state)) { /* Executing system task... */ resume_process(rp, ERTS_PROC_LOCK_STATUS); goto running; } - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); /* * If we are unlucky, the process just got selected for * execution of a system task. In this case we may be @@ -9030,7 +9030,7 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, #ifdef DEBUG { erts_aint32_t state; - state = erts_smp_atomic32_read_nob(&rp->state); + state = erts_atomic32_read_nob(&rp->state); ASSERT((state & ERTS_PSFLG_PENDING_EXIT) || !(state & ERTS_PSFLG_RUNNING)); } @@ -9044,9 +9044,9 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, done: if (rp && rp != ERTS_PROC_LOCK_BUSY && !(pid_locks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); if (unlock_c_p_status) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); return rp; } @@ -9092,7 +9092,7 @@ do_bif_suspend_process(Process *c_p, { ASSERT(suspendee); ASSERT(!ERTS_PROC_IS_EXITING(suspendee)); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS + ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(suspendee)); if (smon) { if (!smon->active) { @@ -9115,7 +9115,7 @@ handle_pend_bif_sync_suspend(Process *suspendee, { Process *suspender; - ERTS_SMP_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS); + ERTS_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS); suspender = erts_pid2proc(suspendee, suspendee_locks, @@ -9144,7 +9144,7 @@ handle_pend_bif_sync_suspend(Process *suspendee, resume suspender */ ASSERT(suspender != suspendee); resume_process(suspender, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); - erts_smp_proc_unlock(suspender, + erts_proc_unlock(suspender, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); } } @@ -9158,7 +9158,7 @@ handle_pend_bif_async_suspend(Process *suspendee, Process *suspender; - ERTS_SMP_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS); + ERTS_LC_ASSERT(suspendee_locks & ERTS_PROC_LOCK_STATUS); suspender = erts_pid2proc(suspendee, suspendee_locks, @@ -9182,7 +9182,7 @@ handle_pend_bif_async_suspend(Process *suspendee, do_bif_suspend_process(suspendee, smon, suspendee); ASSERT(!smon || res != 0); } - erts_smp_proc_unlock(suspender, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(suspender, ERTS_PROC_LOCK_LINK); } } @@ -9234,7 +9234,7 @@ suspend_process_2(BIF_ALIST_2) ? (ErtsProcLocks) 0 : ERTS_PROC_LOCK_STATUS); - erts_smp_proc_lock(BIF_P, xlocks); + erts_proc_lock(BIF_P, xlocks); suspendee = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN|xlocks, @@ -9251,9 +9251,9 @@ suspend_process_2(BIF_ALIST_2) if (asynchronous) { /* --- Asynchronous suspend begin ---------------------------------- */ - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_LINK + ERTS_LC_ASSERT(ERTS_PROC_LOCK_LINK & erts_proc_lc_my_proc_locks(BIF_P)); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS + ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS == erts_proc_lc_my_proc_locks(suspendee)); if (smon->active) { @@ -9293,10 +9293,10 @@ suspend_process_2(BIF_ALIST_2) else /* if (!asynchronous) */ { /* --- Synchronous suspend begin ----------------------------------- */ - ERTS_SMP_LC_ASSERT(((ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS) + ERTS_LC_ASSERT(((ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS) & erts_proc_lc_my_proc_locks(BIF_P)) == (ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS)); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS + ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS == erts_proc_lc_my_proc_locks(suspendee)); if (BIF_P->suspendee == BIF_ARG_1) { @@ -9364,7 +9364,7 @@ suspend_process_2(BIF_ALIST_2) #ifdef DEBUG { - erts_aint32_t state = erts_smp_atomic32_read_acqb(&suspendee->state); + erts_aint32_t state = erts_atomic32_read_acqb(&suspendee->state); ASSERT((state & ERTS_PSFLG_SUSPENDED) || (asynchronous && smon->pending)); ASSERT((state & ERTS_PSFLG_SUSPENDED) @@ -9372,8 +9372,8 @@ suspend_process_2(BIF_ALIST_2) } #endif - erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); - erts_smp_proc_unlock(BIF_P, xlocks); + erts_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(BIF_P, xlocks); BIF_RET(res); system_limit: @@ -9394,9 +9394,9 @@ suspend_process_2(BIF_ALIST_2) do_return: if (suspendee) - erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); if (xlocks) - erts_smp_proc_unlock(BIF_P, xlocks); + erts_proc_unlock(BIF_P, xlocks); return res; } @@ -9416,7 +9416,7 @@ resume_process_1(BIF_ALIST_1) if (BIF_P->common.id == BIF_ARG_1) BIF_ERROR(BIF_P, BADARG); - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK); smon = erts_lookup_suspend_monitor(BIF_P->suspend_monitors, BIF_ARG_1); if (!smon) { @@ -9462,17 +9462,17 @@ resume_process_1(BIF_ALIST_1) goto no_suspendee; ASSERT(ERTS_PSFLG_SUSPENDED - & erts_smp_atomic32_read_nob(&suspendee->state)); + & erts_atomic32_read_nob(&suspendee->state)); ASSERT(BIF_P != suspendee); resume_process(suspendee, ERTS_PROC_LOCK_STATUS); - erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); } if (!smon->active && !smon->pending) erts_delete_suspend_monitor(&BIF_P->suspend_monitors, BIF_ARG_1); - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); BIF_RET(am_true); @@ -9481,7 +9481,7 @@ resume_process_1(BIF_ALIST_1) erts_delete_suspend_monitor(&BIF_P->suspend_monitors, BIF_ARG_1); error: - erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK); BIF_ERROR(BIF_P, BADARG); } @@ -9494,7 +9494,7 @@ erts_internal_is_process_executing_dirty_1(BIF_ALIST_1) else { Process *rp = erts_proc_lookup(BIF_ARG_1); if (rp) { - erts_aint32_t state = erts_smp_atomic32_read_nob(&rp->state); + erts_aint32_t state = erts_atomic32_read_nob(&rp->state); if (state & (ERTS_PSFLG_DIRTY_RUNNING |ERTS_PSFLG_DIRTY_RUNNING_SYS)) { BIF_RET(am_true); @@ -9511,9 +9511,9 @@ run_queues_len_aux(ErtsRunQueue *rq, Uint *tot_len, Uint *qlen, int *ip, int inc Sint rq_len; if (locked) - rq_len = (Sint) erts_smp_atomic32_read_dirty(&rq->len); + rq_len = (Sint) erts_atomic32_read_dirty(&rq->len); else - rq_len = (Sint) erts_smp_atomic32_read_nob(&rq->len); + rq_len = (Sint) erts_atomic32_read_nob(&rq->len); ASSERT(rq_len >= 0); if (incl_active_sched) { @@ -9521,12 +9521,12 @@ run_queues_len_aux(ErtsRunQueue *rq, Uint *tot_len, Uint *qlen, int *ip, int inc if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) { erts_aint32_t dcnt; if (ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(rq)) { - dcnt = erts_smp_atomic32_read_nob(&dirty_count.cpu.active); + dcnt = erts_atomic32_read_nob(&dirty_count.cpu.active); ASSERT(0 <= dcnt && dcnt <= erts_no_dirty_cpu_schedulers); } else { ASSERT(ERTS_RUNQ_IS_DIRTY_IO_RUNQ(rq)); - dcnt = erts_smp_atomic32_read_nob(&dirty_count.io.active); + dcnt = erts_atomic32_read_nob(&dirty_count.io.active); ASSERT(0 <= dcnt && dcnt <= erts_no_dirty_io_schedulers); } rq_len += (Sint) dcnt; @@ -9610,7 +9610,7 @@ erts_process_status(Process *rp, Eterm rpid) Process *p = rp ? rp : erts_proc_lookup_raw(rpid); if (p) { - erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state); + erts_aint32_t state = erts_atomic32_read_acqb(&p->state); res = erts_process_state2status(state); } else { @@ -9619,14 +9619,14 @@ erts_process_status(Process *rp, Eterm rpid) for (i = 0; i < erts_no_schedulers; i++) { esdp = ERTS_SCHEDULER_IX(i); - erts_smp_runq_lock(esdp->run_queue); + erts_runq_lock(esdp->run_queue); if (esdp->free_process && esdp->free_process->common.id == rpid) { res = am_free; - erts_smp_runq_unlock(esdp->run_queue); + erts_runq_unlock(esdp->run_queue); break; } - erts_smp_runq_unlock(esdp->run_queue); + erts_runq_unlock(esdp->run_queue); } } return res; @@ -9644,9 +9644,9 @@ erts_suspend(Process* c_p, ErtsProcLocks c_p_locks, Port *busy_port) int suspend; ASSERT(c_p == erts_get_current_process()); - ERTS_SMP_LC_ASSERT(c_p_locks == erts_proc_lc_my_proc_locks(c_p)); + ERTS_LC_ASSERT(c_p_locks == erts_proc_lc_my_proc_locks(c_p)); if (!(c_p_locks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); if (busy_port) suspend = erts_save_suspend_process_on_port(busy_port, c_p); @@ -9662,7 +9662,7 @@ erts_suspend(Process* c_p, ErtsProcLocks c_p_locks, Port *busy_port) } if (!(c_p_locks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); if (suspend && busy_port && erts_system_monitor_flags.busy_port) monitor_generic(c_p, am_busy_port, busy_port->common.id); @@ -9671,12 +9671,12 @@ erts_suspend(Process* c_p, ErtsProcLocks c_p_locks, Port *busy_port) void erts_resume(Process* process, ErtsProcLocks process_locks) { - ERTS_SMP_LC_ASSERT(process_locks == erts_proc_lc_my_proc_locks(process)); + ERTS_LC_ASSERT(process_locks == erts_proc_lc_my_proc_locks(process)); if (!(process_locks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_lock(process, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(process, ERTS_PROC_LOCK_STATUS); resume_process(process, process_locks|ERTS_PROC_LOCK_STATUS); if (!(process_locks & ERTS_PROC_LOCK_STATUS)) - erts_smp_proc_unlock(process, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(process, ERTS_PROC_LOCK_STATUS); } int @@ -9696,7 +9696,7 @@ erts_resume_processes(ErtsProcList *list) resume_process(proc, ERTS_PROC_LOCK_STATUS); nresumed++; } - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(proc, ERTS_PROC_LOCK_STATUS); } fplp = plp; plp = plp->next; @@ -9708,7 +9708,7 @@ erts_resume_processes(ErtsProcList *list) Eterm erts_get_process_priority(Process *p) { - erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state); + erts_aint32_t state = erts_atomic32_read_nob(&p->state); switch (ERTS_PSFLGS_GET_USR_PRIO(state)) { case PRIORITY_MAX: return am_max; case PRIORITY_HIGH: return am_high; @@ -9731,7 +9731,7 @@ erts_set_process_priority(Process *p, Eterm value) default: return THE_NON_VALUE; break; } - a = erts_smp_atomic32_read_nob(&p->state); + a = erts_atomic32_read_nob(&p->state); if (nprio == ERTS_PSFLGS_GET_USR_PRIO(a)) oprio = nprio; else { @@ -9739,7 +9739,7 @@ erts_set_process_priority(Process *p, Eterm value) erts_aint32_t e, n, aprio; if (a & ERTS_PSFLG_ACTIVE_SYS) { - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); slocked = 1; } @@ -9753,7 +9753,7 @@ erts_set_process_priority(Process *p, Eterm value) int max_qbit; if (!slocked) { - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); slocked = 1; } @@ -9794,11 +9794,11 @@ erts_set_process_priority(Process *p, Eterm value) n |= ((nprio << ERTS_PSFLGS_USR_PRIO_OFFSET) | (aprio << ERTS_PSFLGS_ACT_PRIO_OFFSET)); - a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); + a = erts_atomic32_cmpxchg_mb(&p->state, n, e); } while (a != e); if (slocked) - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); } @@ -9883,7 +9883,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) input_reductions = INPUT_REDUCTIONS; } - ERTS_SMP_LC_ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()) + ERTS_LC_ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()) || !erts_thr_progress_is_blocking()); /* @@ -9905,9 +9905,9 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) #endif rq = erts_get_runq_current(esdp); ASSERT(esdp); - fcalls = (int) erts_smp_atomic32_read_acqb(&function_calls); + fcalls = (int) erts_atomic32_read_acqb(&function_calls); actual_reds = reds = 0; - erts_smp_runq_lock(rq); + erts_runq_lock(rq); } else { #ifdef ERTS_DIRTY_SCHEDULERS is_normal_sched = !esdp; @@ -9927,7 +9927,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) sched_out_proc: - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); reds = actual_reds = calls - esdp->virtual_reds; @@ -9936,14 +9936,14 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) reds = ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST; esdp->virtual_reds = 0; - fcalls = (int) erts_smp_atomic32_add_read_acqb(&function_calls, reds); + fcalls = (int) erts_atomic32_add_read_acqb(&function_calls, reds); ASSERT(esdp && esdp == erts_get_scheduler_data()); rq = erts_get_runq_current(esdp); p->reds += actual_reds; - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); if (IS_TRACED(p)) { if (IS_TRACED_FL(p, F_TRACE_CALLS) && !(state & ERTS_PSFLG_FREE)) @@ -9962,16 +9962,16 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) } } - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); if (p->trace_msg_q) { - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); erts_schedule_flush_trace_messages(p, 1); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); } /* have to re-read state after taking lock */ - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); if (is_normal_sched && (state & ERTS_PSFLG_PENDING_EXIT)) erts_handle_pending_exit(p, (ERTS_PROC_LOCK_MAIN @@ -10001,7 +10001,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) if (is_normal_sched) p->scheduler_data = NULL; - erts_smp_proc_unlock(p, (ERTS_PROC_LOCK_MAIN + erts_proc_unlock(p, (ERTS_PROC_LOCK_MAIN | ERTS_PROC_LOCK_STATUS | ERTS_PROC_LOCK_TRACE)); @@ -10024,21 +10024,21 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) ASSERT(!esdp->free_process); ASSERT(!esdp->current_process); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (is_normal_sched) { if (esdp->check_time_reds >= ERTS_CHECK_TIME_REDS) (void) erts_get_monotonic_time(esdp); if (esdp->last_monotonic_time >= erts_next_timeout_time(esdp->next_tmo_ref)) { - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); erts_bump_timers(esdp->timer_wheel, esdp->last_monotonic_time); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); } } } - ERTS_SMP_LC_ASSERT(!is_normal_sched || !erts_thr_progress_is_blocking()); + ERTS_LC_ASSERT(!is_normal_sched || !erts_thr_progress_is_blocking()); check_activities_to_run: { erts_aint32_t psflg_running, psflg_running_sys; @@ -10049,7 +10049,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) if (rq->check_balance_reds <= 0) check_balance(rq); - ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); + ERTS_LC_ASSERT(!erts_thr_progress_is_blocking()); mps = erts_get_migration_paths_managed(); mp = &mps->mpath[rq->ix]; @@ -10058,14 +10058,14 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) immigrate(rq, mp); } - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); continue_check_activities_to_run: flags = ERTS_RUNQ_FLGS_GET_NOB(rq); continue_check_activities_to_run_known_flags: ASSERT(!is_normal_sched || (flags & ERTS_RUNQ_FLG_NONEMPTY)); if (!is_normal_sched) { - if (erts_smp_atomic32_read_acqb(&esdp->ssi->flags) + if (erts_atomic32_read_acqb(&esdp->ssi->flags) & (ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC)) { suspend_scheduler(esdp); } @@ -10095,17 +10095,17 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) leader_update = erts_thr_progress_update(esdp); aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work); if (aux_work | leader_update) { - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); if (leader_update) erts_thr_progress_leader_update(esdp); if (aux_work) handle_aux_work(&esdp->aux_work_data, aux_work, 0); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); } - ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); + ERTS_LC_ASSERT(!erts_thr_progress_is_blocking()); } - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); flags = ERTS_RUNQ_FLGS_GET_NOB(rq); @@ -10117,7 +10117,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) } else if (!runq_got_work_to_execute_flags(flags)) { /* Prepare for scheduler wait */ - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); rq->wakeup_other = 0; rq->wakeup_other_reds = 0; @@ -10131,7 +10131,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) ASSERT(!runq_got_work_to_execute(rq)); if (!is_normal_sched) { /* Dirty scheduler */ - if (erts_smp_atomic32_read_acqb(&esdp->ssi->flags) + if (erts_atomic32_read_acqb(&esdp->ssi->flags) & (ERTS_SSI_FLG_SUSPENDED|ERTS_SSI_FLG_MSB_EXEC)) { /* Go suspend... */ goto continue_check_activities_to_run_known_flags; @@ -10187,13 +10187,13 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) ERTS_MSACC_PUSH_STATE_CACHED_M(); - erts_smp_atomic32_set_relb(&function_calls, 0); + erts_atomic32_set_relb(&function_calls, 0); fcalls = 0; #if 0 /* Not needed since we wont wait in sys schedule */ erts_sys_schedule_interrupt(0); #endif - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_CHECK_IO); LTTNG2(scheduler_poll, esdp->no, 1); @@ -10205,7 +10205,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref)) erts_bump_timers(esdp->timer_wheel, current_time); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); clear_sys_scheduling(); goto continue_check_activities_to_run; } @@ -10311,7 +10311,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) proxy_p = NULL; goto pick_next_process; } - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); } #ifdef ERTS_DIRTY_SCHEDULERS @@ -10367,7 +10367,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) else new |= psflg_running; } - state = erts_smp_atomic32_cmpxchg_relb(&p->state, new, exp); + state = erts_atomic32_cmpxchg_relb(&p->state, new, exp); if (state == exp) { if (!run_process) { if (proxy_p) { @@ -10394,7 +10394,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) calls = 0; reds = context_reds; - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); } @@ -10404,11 +10404,11 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) if (flags & ERTS_RUNQ_FLG_PROTECTED) (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS); - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); if (erts_sched_stat.enabled) { int prio; @@ -10419,17 +10419,17 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) prio = (int) ERTS_PSFLGS_GET_USR_PRIO(state); - erts_smp_spin_lock(&erts_sched_stat.lock); + erts_spin_lock(&erts_sched_stat.lock); erts_sched_stat.prio[prio].total_executed++; erts_sched_stat.prio[prio].executed++; if (migrated) { erts_sched_stat.prio[prio].total_migrated++; erts_sched_stat.prio[prio].migrated++; } - erts_smp_spin_unlock(&erts_sched_stat.lock); + erts_spin_unlock(&erts_sched_stat.lock); } - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); #ifndef ERTS_DIRTY_SCHEDULERS ASSERT(!p->scheduler_data); @@ -10440,7 +10440,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) & (!(state & ERTS_PSFLG_ACTIVE_SYS))) { /* Migrate to dirty scheduler... */ sunlock_sched_out_proc: - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); goto sched_out_proc; } ASSERT(!p->scheduler_data); @@ -10475,11 +10475,11 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) if (state & ERTS_PSFLG_PENDING_EXIT) { erts_handle_pending_exit(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS); - state = erts_smp_atomic32_read_nob(&p->state); + state = erts_atomic32_read_nob(&p->state); } - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); /* Clear tracer if it has been removed */ if (IS_TRACED(p) && erts_is_tracer_proc_enabled( @@ -10537,7 +10537,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) n &= ~psflg_running_sys; n |= psflg_running; - state = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); + state = erts_atomic32_cmpxchg_mb(&p->state, n, e); if (state == e) { state = n; break; @@ -10571,12 +10571,12 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls) p->fcalls = reds; - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); /* Never run a suspended process */ #ifdef DEBUG { - erts_aint32_t dstate = erts_smp_atomic32_read_nob(&p->state); + erts_aint32_t dstate = erts_atomic32_read_nob(&p->state); ASSERT(!(ERTS_PSFLG_SUSPENDED & dstate) || (ERTS_PSFLG_DIRTY_RUNNING_SYS & dstate)); } @@ -10657,7 +10657,7 @@ notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st, rp_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); #ifdef ERTS_DIRTY_SCHEDULERS if (!normal_sched) @@ -10681,7 +10681,7 @@ fetch_sys_task(Process *c_p, erts_aint32_t state, int *qmaskp, int *priop) *priop = -1; /* Shut up annoying erroneous warning */ - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); if (!c_p->sys_task_qs) { qmask = 0; @@ -10801,13 +10801,13 @@ fetch_sys_task(Process *c_p, erts_aint32_t state, int *qmaskp, int *priop) if (a == n) break; - a = erts_smp_atomic32_cmpxchg_nob(&c_p->state, n, e); + a = erts_atomic32_cmpxchg_nob(&c_p->state, n, e); } while (a != e); } done: - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); if (unused_qs) proc_sys_task_queues_free(unused_qs); @@ -10831,7 +10831,7 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds) int qmask = 0; ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p))); - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN); do { ErtsProcSysTaskType type; @@ -10952,7 +10952,7 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds) if (st) reds += notify_sys_task_executed(c_p, st, st_res, 1); - state = erts_smp_atomic32_read_acqb(&c_p->state); + state = erts_atomic32_read_acqb(&c_p->state); } while (qmask && reds > 0); *statep = state; @@ -10973,7 +10973,7 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds) * are dirty tasks. */ - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN); do { ErtsProcSysTask *st; @@ -11016,7 +11016,7 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds) reds += notify_sys_task_executed(c_p, st, st_res, 1); - state = erts_smp_atomic32_read_acqb(&c_p->state); + state = erts_atomic32_read_acqb(&c_p->state); } while (qmask && reds < max_reds); return reds; @@ -11051,19 +11051,19 @@ erts_execute_dirty_system_task(Process *c_p) } if (c_p->flags & F_DIRTY_GC_HIBERNATE) { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); + ERTS_MSGQ_MV_INQ2PRIVQ(c_p); if (c_p->msg.len) c_p->flags &= ~F_DIRTY_GC_HIBERNATE; /* operation aborted... */ else { - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); c_p->fvalue = NIL; erts_garbage_collect_hibernate(c_p); ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); ASSERT(!ERTS_PROC_IS_EXITING(c_p)); } - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS); } if (c_p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)) { @@ -11107,7 +11107,7 @@ erts_execute_dirty_system_task(Process *c_p) } - erts_smp_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_DIRTY_ACTIVE_SYS); + erts_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_DIRTY_ACTIVE_SYS); } static BIF_RETTYPE @@ -11160,7 +11160,7 @@ dispatch_system_task(Process *c_p, erts_aint_t fail_state, erts_queue_message(rp, rp_locks, mp, msg, st->requester); if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); return ret; } @@ -11366,7 +11366,7 @@ erts_schedule_generic_sys_task(Eterm pid, ErtsProcSysTaskType type, void* arg) st->req_id_sz = 0; st->arg[0] = (Eterm)arg; ERTS_INIT_OFF_HEAP(&st->off_heap); - state = erts_smp_atomic32_read_nob(&rp->state); + state = erts_atomic32_read_nob(&rp->state); fail_state = ERTS_PSFLG_EXITING; @@ -11420,7 +11420,7 @@ erts_schedule_flush_trace_messages(Process *proc, int force_on_proc) erts_aint32_t state; if (!force_on_proc) { - state = erts_smp_atomic32_read_nob(&proc->state); + state = erts_atomic32_read_nob(&proc->state); if (state & (ERTS_PSFLG_DIRTY_RUNNING | ERTS_PSFLG_DIRTY_RUNNING_SYS)) { goto sched_flush_dirty; @@ -11436,7 +11436,7 @@ erts_schedule_flush_trace_messages(Process *proc, int force_on_proc) #ifdef ERTS_DIRTY_SCHEDULERS if (!force_on_proc) { - state = erts_smp_atomic32_read_mb(&proc->state); + state = erts_atomic32_read_mb(&proc->state); if (state & (ERTS_PSFLG_DIRTY_RUNNING | ERTS_PSFLG_DIRTY_RUNNING_SYS)) { void *vargp; @@ -11473,7 +11473,7 @@ save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio) erts_aint32_t state; ErtsProcSysTaskQs *qs; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); qs = ERTS_PROC_GET_DELAYED_GC_TASK_QS(c_p); if (!qs) { @@ -11503,7 +11503,7 @@ save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio) } } - state = erts_smp_atomic32_read_nob(&c_p->state); + state = erts_atomic32_read_nob(&c_p->state); ASSERT((ERTS_PSFLG_RUNNING | ERTS_PSFLG_RUNNING_SYS | ERTS_PSFLG_DIRTY_RUNNING @@ -11519,7 +11519,7 @@ save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio) n &= ~ERTS_PSFLGS_ACT_PRIO_MASK; n |= prio << ERTS_PSFLGS_ACT_PRIO_OFFSET; } - state = erts_smp_atomic32_cmpxchg_relb(&c_p->state, n, e); + state = erts_atomic32_cmpxchg_relb(&c_p->state, n, e); if (state == e) break; } @@ -11540,8 +11540,8 @@ erts_set_gc_state(Process *c_p, int enable) ErtsProcSysTaskQs *dgc_tsk_qs; ASSERT(c_p == erts_get_current_process()); ASSERT((ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS) - & erts_smp_atomic32_read_nob(&c_p->state)); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); + & erts_atomic32_read_nob(&c_p->state)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); if (!enable) { c_p->flags |= F_DISABLE_GC; @@ -11556,7 +11556,7 @@ erts_set_gc_state(Process *c_p, int enable) /* Move delayed gc tasks into sys tasks queues. */ - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(c_p, ERTS_PROC_LOCK_STATUS); if (!c_p->sys_task_qs) { c_p->sys_task_qs = dgc_tsk_qs; @@ -11629,7 +11629,7 @@ erts_set_gc_state(Process *c_p, int enable) erts_aint32_t aprio, state = #endif - erts_smp_atomic32_read_bset_nob(&c_p->state, + erts_atomic32_read_bset_nob(&c_p->state, (ERTS_PSFLG_DELAYED_SYS | ERTS_PSFLG_ACTIVE_SYS), ERTS_PSFLG_ACTIVE_SYS); @@ -11643,7 +11643,7 @@ erts_set_gc_state(Process *c_p, int enable) } #endif - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS); (void) ERTS_PROC_SET_DELAYED_GC_TASK_QS(c_p, NULL); @@ -11659,24 +11659,24 @@ erts_sched_stat_modify(int what) int ix; switch (what) { case ERTS_SCHED_STAT_MODIFY_ENABLE: - erts_smp_thr_progress_block(); + erts_thr_progress_block(); erts_sched_stat.enabled = 1; - erts_smp_thr_progress_unblock(); + erts_thr_progress_unblock(); break; case ERTS_SCHED_STAT_MODIFY_DISABLE: - erts_smp_thr_progress_block(); + erts_thr_progress_block(); erts_sched_stat.enabled = 0; - erts_smp_thr_progress_unblock(); + erts_thr_progress_unblock(); break; case ERTS_SCHED_STAT_MODIFY_CLEAR: - erts_smp_spin_lock(&erts_sched_stat.lock); + erts_spin_lock(&erts_sched_stat.lock); for (ix = 0; ix < ERTS_NO_PRIO_LEVELS; ix++) { erts_sched_stat.prio[ix].total_executed = 0; erts_sched_stat.prio[ix].executed = 0; erts_sched_stat.prio[ix].total_migrated = 0; erts_sched_stat.prio[ix].migrated = 0; } - erts_smp_spin_unlock(&erts_sched_stat.lock); + erts_spin_unlock(&erts_sched_stat.lock); break; } } @@ -11690,7 +11690,7 @@ erts_sched_stat_term(Process *p, int total) Uint executed[ERTS_NO_PRIO_LEVELS]; Uint migrated[ERTS_NO_PRIO_LEVELS]; - erts_smp_spin_lock(&erts_sched_stat.lock); + erts_spin_lock(&erts_sched_stat.lock); if (total) { int i; for (i = 0; i < ERTS_NO_PRIO_LEVELS; i++) { @@ -11709,7 +11709,7 @@ erts_sched_stat_term(Process *p, int total) erts_sched_stat.prio[i].migrated = 0; } } - erts_smp_spin_unlock(&erts_sched_stat.lock); + erts_spin_unlock(&erts_sched_stat.lock); sz = 0; (void) erts_bld_atom_2uint_3tup_list(NULL, &sz, ERTS_NO_PRIO_LEVELS, @@ -11739,7 +11739,7 @@ erts_schedule_misc_op(void (*func)(void *), void *arg) rq = erq; } - erts_smp_runq_lock(rq); + erts_runq_lock(rq); molp->next = NULL; molp->func = func; @@ -11754,7 +11754,7 @@ erts_schedule_misc_op(void (*func)(void *), void *arg) ERTS_RUNQ_FLGS_SET_NOB(rq, ERTS_RUNQ_FLG_MISC_OP); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); smp_notify_inc_runq(rq); } @@ -11787,7 +11787,7 @@ exec_misc_ops(ErtsRunQueue *rq) if (!rq->misc.start) ERTS_RUNQ_FLGS_UNSET_NOB(rq, ERTS_RUNQ_FLG_MISC_OP); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); while (molp) { tmp_molp = molp; @@ -11796,7 +11796,7 @@ exec_misc_ops(ErtsRunQueue *rq) misc_op_list_free(tmp_molp); } - erts_smp_runq_lock(rq); + erts_runq_lock(rq); } Uint @@ -11827,12 +11827,12 @@ erts_get_exact_total_reductions(Process *c_p, Uint *redsp, Uint *diffp) { Uint reds = erts_current_reductions(c_p, c_p); int ix; - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); /* * Wait for other schedulers to schedule out their processes * and update 'reductions'. */ - erts_smp_thr_progress_block(); + erts_thr_progress_block(); for (reds = 0, ix = 0; ix < erts_no_run_queues; ix++) reds += ERTS_RUNQ_IX(ix)->procs.reductions; if (redsp) @@ -11840,8 +11840,8 @@ erts_get_exact_total_reductions(Process *c_p, Uint *redsp, Uint *diffp) if (diffp) *diffp = reds - last_exact_reductions; last_exact_reductions = reds; - erts_smp_thr_progress_unblock(); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_thr_progress_unblock(); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } static void delete_process(Process* p); @@ -11850,7 +11850,7 @@ void erts_free_proc(Process *p) { erts_proc_lock_fin(p); - ASSERT(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_FREE); + ASSERT(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_FREE); ASSERT(0 == erts_proc_read_refc(p)); if (p->flags & F_DELAYED_DEL_PROC) delete_process(p); @@ -11870,10 +11870,10 @@ static void early_init_process_struct(void *varg, Eterm data) proc->common.id = make_internal_pid(data); #ifdef ERTS_DIRTY_SCHEDULERS - erts_smp_atomic32_init_nob(&proc->dirty_state, 0); + erts_atomic32_init_nob(&proc->dirty_state, 0); proc->dirty_sys_tasks = NULL; #endif - erts_smp_atomic32_init_relb(&proc->state, arg->state); + erts_atomic32_init_relb(&proc->state, arg->state); RUNQ_SET_RQ(&proc->run_queue, arg->run_queue); @@ -11950,7 +11950,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). INITIALIZE_LITERAL_PURGE_AREA(litarea); #endif - erts_smp_proc_lock(parent, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(parent, ERTS_PROC_LOCKS_ALL_MINOR); /* * Check for errors. @@ -11998,9 +11998,9 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). goto error; } - ASSERT((erts_smp_atomic32_read_nob(&p->state) + ASSERT((erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_ON_HEAP_MSGQ) - || (erts_smp_atomic32_read_nob(&p->state) + || (erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ)); #ifdef SHCOPY_SPAWN @@ -12026,7 +12026,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->min_vheap_size = BIN_VH_MIN_SIZE; MAX_HEAP_SIZE_SET(p, H_MAX_SIZE); MAX_HEAP_SIZE_FLAGS_SET(p, H_MAX_FLAGS); - p->max_gen_gcs = (Uint16) erts_smp_atomic32_read_nob(&erts_max_gen_gcs); + p->max_gen_gcs = (Uint16) erts_atomic32_read_nob(&erts_max_gen_gcs); } p->schedule_count = 0; ASSERT(p->min_heap_size == erts_next_heap_size(p->min_heap_size, 0)); @@ -12128,7 +12128,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). p->mbuf = NULL; p->msg_frag = NULL; p->mbuf_sz = 0; - erts_smp_atomic_init_nob(&p->psd, (erts_aint_t) NULL); + erts_atomic_init_nob(&p->psd, (erts_aint_t) NULL); p->dictionary = NULL; p->seq_trace_lastcnt = 0; p->seq_trace_clock = 0; @@ -12179,8 +12179,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). } if (ARE_TRACE_FLAGS_ON(parent, F_TRACE_PROCS)) { locks &= ~(ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); - erts_smp_proc_unlock(parent, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(parent, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); trace_proc_spawn(parent, am_spawn, p->common.id, mod, func, args); if (so->flags & SPO_LINK) trace_proc(parent, locks, parent, am_link, p->common.id); @@ -12192,8 +12192,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). == (ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE)) { /* This happens when parent was not traced, but child is */ locks &= ~(ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); - erts_smp_proc_unlock(parent, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); + erts_proc_unlock(parent, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE); } trace_proc_spawn(p, am_spawned, parent->common.id, mod, func, args); if (so->flags & SPO_LINK) @@ -12232,7 +12232,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). so->mref = mref; } - erts_smp_proc_unlock(p, locks); + erts_proc_unlock(p, locks); res = p->common.id; @@ -12240,7 +12240,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). * Schedule process for execution. */ - erts_smp_proc_unlock(parent, locks & ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(parent, locks & ERTS_PROC_LOCKS_ALL_MINOR); schedule_process(p, state, 0); @@ -12260,7 +12260,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). error: - erts_smp_proc_unlock(parent, locks & ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(parent, locks & ERTS_PROC_LOCKS_ALL_MINOR); return res; } @@ -12311,7 +12311,7 @@ void erts_init_empty_process(Process *p) p->mbuf = NULL; p->msg_frag = NULL; p->mbuf_sz = 0; - erts_smp_atomic_init_nob(&p->psd, (erts_aint_t) NULL); + erts_atomic_init_nob(&p->psd, (erts_aint_t) NULL); ERTS_P_MONITORS(p) = NULL; ERTS_P_LINKS(p) = NULL; /* List of links */ p->nodes_monitors = NULL; @@ -12363,10 +12363,10 @@ void erts_init_empty_process(Process *p) #endif #ifdef ERTS_DIRTY_SCHEDULERS - erts_smp_atomic32_init_nob(&p->dirty_state, 0); + erts_atomic32_init_nob(&p->dirty_state, 0); p->dirty_sys_tasks = NULL; #endif - erts_smp_atomic32_init_nob(&p->state, (erts_aint32_t) PRIORITY_NORMAL); + erts_atomic32_init_nob(&p->state, (erts_aint32_t) PRIORITY_NORMAL); p->scheduler_data = NULL; p->msg_inq.first = NULL; @@ -12377,7 +12377,7 @@ void erts_init_empty_process(Process *p) p->pending_exit.reason = THE_NON_VALUE; p->pending_exit.bp = NULL; erts_proc_lock_init(p); - erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL); + erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL); RUNQ_SET_RQ(&p->run_queue, ERTS_RUNQ_IX(0)); #if !defined(NO_FPE_SIGNALS) || defined(HIPE) @@ -12487,10 +12487,10 @@ delete_process(Process* p) /* Cleanup psd */ - psd = (ErtsPSD *) erts_smp_atomic_read_nob(&p->psd); + psd = (ErtsPSD *) erts_atomic_read_nob(&p->psd); if (psd) { - erts_smp_atomic_set_nob(&p->psd, (erts_aint_t) NULL); /* Reduction counting depends on this... */ + erts_atomic_set_nob(&p->psd, (erts_aint_t) NULL); /* Reduction counting depends on this... */ erts_free(ERTS_ALC_T_PSD, psd); } @@ -12548,7 +12548,7 @@ set_proc_exiting(Process *p, { erts_aint32_t state = in_state, enq_prio = -1; int enqueue; - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(p) == ERTS_PROC_LOCKS_ALL); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(p) == ERTS_PROC_LOCKS_ALL); enqueue = change_proc_schedule_state(p, (ERTS_PSFLG_SUSPENDED @@ -12583,9 +12583,9 @@ set_proc_self_exiting(Process *c_p) #endif erts_aint32_t state, enq_prio = -1; - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCKS_ALL); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCKS_ALL); - state = erts_smp_atomic32_read_nob(&c_p->state); + state = erts_atomic32_read_nob(&c_p->state); ASSERT(state & (ERTS_PSFLG_RUNNING |ERTS_PSFLG_RUNNING_SYS | ERTS_PSFLG_DIRTY_RUNNING @@ -12611,31 +12611,31 @@ erts_handle_pending_exit(Process *c_p, ErtsProcLocks locks) { ErtsProcLocks xlocks; ASSERT(is_value(c_p->pending_exit.reason)); - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == locks); - ERTS_SMP_LC_ASSERT(locks & ERTS_PROC_LOCK_MAIN); - ERTS_SMP_LC_ASSERT(!((ERTS_PSFLG_EXITING|ERTS_PSFLG_FREE) - & erts_smp_atomic32_read_nob(&c_p->state))); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == locks); + ERTS_LC_ASSERT(locks & ERTS_PROC_LOCK_MAIN); + ERTS_LC_ASSERT(!((ERTS_PSFLG_EXITING|ERTS_PSFLG_FREE) + & erts_atomic32_read_nob(&c_p->state))); /* Ensure that all locks on c_p are locked before proceeding... */ if (locks == ERTS_PROC_LOCKS_ALL) xlocks = 0; else { xlocks = ~locks & ERTS_PROC_LOCKS_ALL; - if (erts_smp_proc_trylock(c_p, xlocks) == EBUSY) { - erts_smp_proc_unlock(c_p, locks & ~ERTS_PROC_LOCK_MAIN); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + if (erts_proc_trylock(c_p, xlocks) == EBUSY) { + erts_proc_unlock(c_p, locks & ~ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); } } set_proc_exiting(c_p, - erts_smp_atomic32_read_acqb(&c_p->state), + erts_atomic32_read_acqb(&c_p->state), c_p->pending_exit.reason, c_p->pending_exit.bp); c_p->pending_exit.reason = THE_NON_VALUE; c_p->pending_exit.bp = NULL; if (xlocks) - erts_smp_proc_unlock(c_p, xlocks); + erts_proc_unlock(c_p, xlocks); } static void save_pending_exiter(Process *p, ErtsProcList *plp); @@ -12656,9 +12656,9 @@ do_handle_pending_exiters(ErtsProcList *pnd_xtrs) * pending exit will soon be detected and handled by the * scheduler running the process (at schedule in/out). */ - if (erts_smp_proc_trylock(p, ERTS_PROC_LOCKS_ALL) != EBUSY) { + if (erts_proc_trylock(p, ERTS_PROC_LOCKS_ALL) != EBUSY) { if (erts_proclist_same(plp, p)) { - state = erts_smp_atomic32_read_acqb(&p->state); + state = erts_atomic32_read_acqb(&p->state); if (!(state & (ERTS_PSFLG_RUNNING | ERTS_PSFLG_RUNNING_SYS | ERTS_PSFLG_EXITING))) { @@ -12666,12 +12666,12 @@ do_handle_pending_exiters(ErtsProcList *pnd_xtrs) erts_handle_pending_exit(p, ERTS_PROC_LOCKS_ALL); } } - erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL); + erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL); } else { - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); if (erts_proclist_same(plp, p)) { - state = erts_smp_atomic32_read_acqb(&p->state); + state = erts_atomic32_read_acqb(&p->state); if (!(state & (ERTS_PSFLG_RUNNING | ERTS_PSFLG_RUNNING_SYS | ERTS_PSFLG_EXITING))) { @@ -12683,7 +12683,7 @@ do_handle_pending_exiters(ErtsProcList *pnd_xtrs) plp = NULL; } } - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); } } if (plp) @@ -12698,7 +12698,7 @@ save_pending_exiter(Process *p, ErtsProcList *plp) ErtsSchedulerSleepInfo *ssi; ErtsRunQueue *rq; - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); rq = RUNQ_READ_RQ(&p->run_queue); ASSERT(rq && !ERTS_RUNQ_IX_IS_DIRTY(rq->ix)); @@ -12706,7 +12706,7 @@ save_pending_exiter(Process *p, ErtsProcList *plp) if (!plp) plp = proclist_create(p); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); erts_proclist_store_last(&rq->procs.pending_exiters, plp); @@ -12714,7 +12714,7 @@ save_pending_exiter(Process *p, ErtsProcList *plp) ssi = rq->scheduler->ssi; - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); set_aux_work_flags_wakeup_nob(ssi, ERTS_SSI_AUX_WORK_PENDING_EXITERS); } @@ -12854,11 +12854,11 @@ send_exit_signal(Process *c_p, /* current process if and only Uint32 flags /* flags */ ) { - erts_aint32_t state = erts_smp_atomic32_read_nob(&rp->state); + erts_aint32_t state = erts_atomic32_read_nob(&rp->state); Eterm rsn = reason == am_kill ? am_killed : reason; - ERTS_SMP_LC_ASSERT(*rp_locks == erts_proc_lc_my_proc_locks(rp)); - ERTS_SMP_LC_ASSERT((*rp_locks & ERTS_PROC_LOCKS_XSIG_SEND) + ERTS_LC_ASSERT(*rp_locks == erts_proc_lc_my_proc_locks(rp)); + ERTS_LC_ASSERT((*rp_locks & ERTS_PROC_LOCKS_XSIG_SEND) == ERTS_PROC_LOCKS_XSIG_SEND); ASSERT(reason != THE_NON_VALUE); @@ -12879,7 +12879,7 @@ send_exit_signal(Process *c_p, /* current process if and only if ((state & ERTS_PSFLG_TRAP_EXIT) && (reason != am_kill || (flags & ERTS_XSIG_FLG_IGN_KILL))) { /* have to release the status lock in order to send the exit message */ - erts_smp_proc_unlock(rp, *rp_locks & ERTS_PROC_LOCKS_XSIG_SEND); + erts_proc_unlock(rp, *rp_locks & ERTS_PROC_LOCKS_XSIG_SEND); *rp_locks &= ~ERTS_PROC_LOCKS_XSIG_SEND; if (have_seqtrace(token) && token_update) seq_trace_update_send(token_update); @@ -12899,10 +12899,10 @@ send_exit_signal(Process *c_p, /* current process if and only if (*rp_locks != ERTS_PROC_LOCKS_ALL) { ErtsProcLocks need_locks = (~(*rp_locks) & ERTS_PROC_LOCKS_ALL); - if (erts_smp_proc_trylock(c_p, need_locks) == EBUSY) { - erts_smp_proc_unlock(c_p, + if (erts_proc_trylock(c_p, need_locks) == EBUSY) { + erts_proc_unlock(c_p, *rp_locks & ~ERTS_PROC_LOCK_MAIN); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); } *rp_locks = ERTS_PROC_LOCKS_ALL; } @@ -12916,7 +12916,7 @@ send_exit_signal(Process *c_p, /* current process if and only ErlHeapFragment *bp = NULL; Eterm rsn_cpy; if (need_locks - && erts_smp_proc_trylock(rp, need_locks) == EBUSY) { + && erts_proc_trylock(rp, need_locks) == EBUSY) { /* ... but we havn't got all locks on it ... */ save_pending_exiter(rp, NULL); /* @@ -12928,7 +12928,7 @@ send_exit_signal(Process *c_p, /* current process if and only /* ...and we have all locks on it... */ *rp_locks = ERTS_PROC_LOCKS_ALL; - state = erts_smp_atomic32_read_nob(&rp->state); + state = erts_atomic32_read_nob(&rp->state); if (is_immed(rsn)) rsn_cpy = rsn; @@ -12984,11 +12984,11 @@ send_exit_signal(Process *c_p, /* current process if and only * queue... */ #ifndef ERTS_DIRTY_SCHEDULERS - (void) erts_smp_atomic32_read_bor_relb(&rp->state, + (void) erts_atomic32_read_bor_relb(&rp->state, ERTS_PSFLG_PENDING_EXIT); #else { - erts_aint32_t a = erts_smp_atomic32_read_nob(&rp->state); + erts_aint32_t a = erts_atomic32_read_nob(&rp->state); while (1) { erts_aint32_t n, e; int dwork; @@ -12996,7 +12996,7 @@ send_exit_signal(Process *c_p, /* current process if and only n |= ERTS_PSFLG_PENDING_EXIT; dwork = !!(n & ERTS_PSFLGS_DIRTY_WORK); n &= ~ERTS_PSFLGS_DIRTY_WORK; - a = erts_smp_atomic32_cmpxchg_mb(&rp->state, n, e); + a = erts_atomic32_cmpxchg_mb(&rp->state, n, e); if (a == e) { if (dwork) erts_schedule_process(rp, n, *rp_locks); @@ -13063,9 +13063,9 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext) ASSERT(is_node_name_atom(mon->u.pid)); dep = erts_sysname_to_connected_dist_entry(mon->u.pid); if (dep) { - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); rmon = erts_remove_monitor(&(dep->monitors), mon->ref); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); if (rmon) { ErtsDSigData dsd; int code = erts_dsig_prepare(&dsd, dep, NULL, @@ -13091,7 +13091,7 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext) goto done; } rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); if (rmon == NULL) { goto done; } @@ -13110,9 +13110,9 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext) dep = external_pid_dist_entry(mon->u.pid); ASSERT(dep != NULL); if (dep) { - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); rmon = erts_remove_monitor(&(dep->monitors), mon->ref); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); if (rmon) { ErtsDSigData dsd; int code = erts_dsig_prepare(&dsd, dep, NULL, @@ -13163,15 +13163,15 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext) } UnUseTmpHeapNoproc(3); /* else: demonitor while we exited, i.e. do nothing... */ - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } else { /* external by pid or name */ ASSERT(is_external_pid(mon->u.pid)); dep = external_pid_dist_entry(mon->u.pid); ASSERT(dep != NULL); if (dep) { - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); rmon = erts_remove_monitor(&(dep->monitors), mon->ref); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); if (rmon) { ErtsDSigData dsd; int code = erts_dsig_prepare(&dsd, dep, NULL, @@ -13279,7 +13279,7 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext) /* We didn't exit the process and it is traced */ if (IS_TRACED_FL(rp, F_TRACE_PROCS)) { if (rp_locks & ERTS_PROC_LOCKS_XSIG_SEND) { - erts_smp_proc_unlock(rp, ERTS_PROC_LOCKS_XSIG_SEND); + erts_proc_unlock(rp, ERTS_PROC_LOCKS_XSIG_SEND); rp_locks &= ~ERTS_PROC_LOCKS_XSIG_SEND; } trace_proc(NULL, 0, rp, am_getting_unlinked, p->common.id); @@ -13287,7 +13287,7 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext) } } ASSERT(rp != p); - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } } else if (is_external_pid(item)) { @@ -13297,14 +13297,14 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext) int code; ErtsDistLinkData dld; erts_remove_dist_link(&dld, p->common.id, item, dep); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); code = erts_dsig_prepare(&dsd, dep, p, ERTS_DSP_NO_LOCK, 0); if (code == ERTS_DSIG_PREP_CONNECTED) { code = erts_dsig_send_exit_tt(&dsd, p->common.id, item, reason, SEQ_TRACE_TOKEN(p)); ASSERT(code == ERTS_DSIG_SEND_OK); } - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN); erts_destroy_dist_link(&dld); } } @@ -13315,9 +13315,9 @@ static void doit_exit_link(ErtsLink *lnk, void *vpcontext) if(dep) { /* dist entries have node links in a separate structure to avoid confusion */ - erts_smp_de_links_lock(dep); + erts_de_links_lock(dep); rlnk = erts_remove_link(&(dep->node_links), p->common.id); - erts_smp_de_links_unlock(dep); + erts_de_links_unlock(dep); if (rlnk) erts_destroy_link(rlnk); erts_deref_dist_entry(dep); @@ -13340,7 +13340,7 @@ resume_suspend_monitor(ErtsSuspendMonitor *smon, void *vc_p) ASSERT(suspendee != vc_p); if (smon->active) resume_process(suspendee, ERTS_PROC_LOCK_STATUS); - erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); } erts_destroy_suspend_monitor(smon); } @@ -13369,12 +13369,12 @@ erts_do_exit_process(Process* p, Eterm reason) erts_exit(ERTS_DUMP_EXIT, "System process %T terminated: %T\n", p->common.id, reason); - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); /* By locking all locks (main lock is already locked) when going to exiting state (ERTS_PSFLG_EXITING), it is enough to take any lock when looking up a process (erts_pid2proc()) to prevent the looked up process from exiting until the lock has been released. */ - erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); if (ERTS_PSFLG_PENDING_EXIT & set_proc_self_exiting(p)) { /* Process exited before pending exit was received... */ @@ -13387,7 +13387,7 @@ erts_do_exit_process(Process* p, Eterm reason) cancel_suspend_of_suspendee(p, ERTS_PROC_LOCKS_ALL); - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p); + ERTS_MSGQ_MV_INQ2PRIVQ(p); if (IS_TRACED(p)) { if (IS_TRACED_FL(p, F_TRACE_CALLS)) @@ -13406,7 +13406,7 @@ erts_do_exit_process(Process* p, Eterm reason) ASSERT(erts_proc_read_refc(p) > 0); } - erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR); if (IS_TRACED_FL(p,F_TRACE_PROCS)) trace_proc(p, ERTS_PROC_LOCK_MAIN, p, am_exit, reason); @@ -13436,7 +13436,7 @@ erts_continue_exit_process(Process *p) int yield_allowed = 1; #endif - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p)); ASSERT(ERTS_PROC_IS_EXITING(p)); @@ -13501,7 +13501,7 @@ erts_continue_exit_process(Process *p) } erts_set_gc_state(p, 1); - state = erts_smp_atomic32_read_acqb(&p->state); + state = erts_atomic32_read_acqb(&p->state); if (state & ERTS_PSFLG_ACTIVE_SYS #ifdef ERTS_DIRTY_SCHEDULERS || p->dirty_sys_tasks @@ -13512,13 +13512,13 @@ erts_continue_exit_process(Process *p) } #ifdef DEBUG - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); ASSERT(p->sys_task_qs == NULL); ASSERT(ERTS_PROC_GET_DELAYED_GC_TASK_QS(p) == NULL); #ifdef ERTS_DIRTY_SCHEDULERS ASSERT(p->dirty_sys_tasks == NULL); #endif - erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS); #endif if (p->flags & F_USING_DDLL) { @@ -13551,7 +13551,7 @@ erts_continue_exit_process(Process *p) if (IS_TRACED_FL(p, F_TRACE_SCHED_EXIT)) trace_sched(p, curr_locks, am_out_exited); - erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); + erts_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR); curr_locks = ERTS_PROC_LOCKS_ALL; /* @@ -13574,7 +13574,7 @@ erts_continue_exit_process(Process *p) ErtsRunQueue *rq; rq = erts_get_runq_current(erts_proc_sched_data(p)); - erts_smp_runq_lock(rq); + erts_runq_lock(rq); ASSERT(p->scheduler_data); ASSERT(p->scheduler_data->current_process == p); @@ -13586,7 +13586,7 @@ erts_continue_exit_process(Process *p) /* Time of death! */ erts_ptab_delete_element(&erts_proc, &p->common); - erts_smp_runq_unlock(rq); + erts_runq_unlock(rq); } /* @@ -13598,7 +13598,7 @@ erts_continue_exit_process(Process *p) { /* Inactivate and notify free */ - erts_aint32_t n, e, a = erts_smp_atomic32_read_nob(&p->state); + erts_aint32_t n, e, a = erts_atomic32_read_nob(&p->state); int refc_inced = 0; while (1) { n = e = a; @@ -13609,7 +13609,7 @@ erts_continue_exit_process(Process *p) erts_proc_inc_refc(p); refc_inced = 1; } - a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e); + a = erts_atomic32_cmpxchg_mb(&p->state, n, e); if (a == e) break; } @@ -13632,7 +13632,7 @@ erts_continue_exit_process(Process *p) dep = (p->flags & F_DISTRIBUTION) ? erts_this_dist_entry : NULL; - erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL); + erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL); if (dep) { erts_do_net_exits(dep, reason); @@ -13674,8 +13674,8 @@ erts_continue_exit_process(Process *p) if (!delay_del_proc) delete_process(p); - erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN); - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); + erts_proc_lock(p, ERTS_PROC_LOCK_MAIN); + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p); return; @@ -13685,20 +13685,20 @@ erts_continue_exit_process(Process *p) ASSERT(yield_allowed); #endif - ERTS_SMP_LC_ASSERT(curr_locks == erts_proc_lc_my_proc_locks(p)); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & curr_locks); + ERTS_LC_ASSERT(curr_locks == erts_proc_lc_my_proc_locks(p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & curr_locks); p->i = (BeamInstr *) beam_continue_exit; if (!(curr_locks & ERTS_PROC_LOCK_STATUS)) { - erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + erts_proc_lock(p, ERTS_PROC_LOCK_STATUS); curr_locks |= ERTS_PROC_LOCK_STATUS; } if (curr_locks != ERTS_PROC_LOCK_MAIN) - erts_smp_proc_unlock(p, ~ERTS_PROC_LOCK_MAIN & curr_locks); + erts_proc_unlock(p, ~ERTS_PROC_LOCK_MAIN & curr_locks); - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p)); + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p)); BUMP_ALL_REDS(p); } @@ -13734,7 +13734,7 @@ erts_program_counter_info(fmtfn_t to, void *to_arg, Process *p) erts_print(to, to_arg, "CP: %p (", p->cp); print_function_from_pc(to, to_arg, p->cp); erts_print(to, to_arg, ")\n"); - state = erts_smp_atomic32_read_acqb(&p->state); + state = erts_atomic32_read_acqb(&p->state); if (!(state & (ERTS_PSFLG_RUNNING | ERTS_PSFLG_RUNNING_SYS | ERTS_PSFLG_GC))) { @@ -13815,7 +13815,7 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) { erts_print(to, to_arg, "=scheduler:%u\n", esdp->no); - flg = erts_smp_atomic32_read_dirty(&esdp->ssi->flags); + flg = erts_atomic32_read_dirty(&esdp->ssi->flags); erts_print(to, to_arg, "Scheduler Sleep Info Flags: "); for (i = 0; i < ERTS_SSI_FLGS_MAX && flg; i++) { erts_aint32_t chk = (1 << i); @@ -13884,12 +13884,12 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) { break; } erts_print(to, to_arg, "Length: %d\n", - erts_smp_atomic32_read_dirty(&esdp->run_queue->procs.prio_info[i].len)); + erts_atomic32_read_dirty(&esdp->run_queue->procs.prio_info[i].len)); } erts_print(to, to_arg, "Run Queue Port Length: %d\n", - erts_smp_atomic32_read_dirty(&esdp->run_queue->ports.info.len)); + erts_atomic32_read_dirty(&esdp->run_queue->ports.info.len)); - flg = erts_smp_atomic32_read_dirty(&esdp->run_queue->flags); + flg = erts_atomic32_read_dirty(&esdp->run_queue->flags); erts_print(to, to_arg, "Run Queue Flags: "); for (i = 0; i < ERTS_RUNQ_FLG_MAX && flg; i++) { erts_aint32_t chk = (1 << i); @@ -13961,7 +13961,7 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) { p = esdp->current_process; erts_print(to, to_arg, "Current Process: "); if (esdp->current_process && !(ERTS_TRACE_FLAGS(p) & F_SENSITIVE)) { - flg = erts_smp_atomic32_read_dirty(&p->state); + flg = erts_atomic32_read_dirty(&p->state); erts_print(to, to_arg, "%T\n", p->common.id); erts_print(to, to_arg, "Current Process State: "); @@ -14011,7 +14011,7 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) { */ void erts_halt(int code) { - if (-1 == erts_smp_atomic32_cmpxchg_acqb(&erts_halt_progress, + if (-1 == erts_atomic32_cmpxchg_acqb(&erts_halt_progress, erts_no_schedulers, -1)) { #ifdef ERTS_DIRTY_SCHEDULERS diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h index 513397ef3f..7ca37882c2 100644 --- a/erts/emulator/beam/erl_process.h +++ b/erts/emulator/beam/erl_process.h @@ -47,7 +47,6 @@ typedef struct process Process; #include "erl_port.h" #undef ERL_PORT_GET_PORT_TYPE_ONLY__ #include "erl_vm.h" -#include "erl_smp.h" #include "erl_message.h" #include "erl_process_dict.h" #include "erl_node_container_utils.h" @@ -222,31 +221,31 @@ extern int erts_dio_sched_thread_suggested_stack_size; ((FLGS) &= ~ERTS_RUNQ_FLG_EVACUATE((PRIO))) #define ERTS_RUNQ_FLGS_INIT(RQ, INIT) \ - erts_smp_atomic32_init_nob(&(RQ)->flags, (erts_aint32_t) (INIT)) + erts_atomic32_init_nob(&(RQ)->flags, (erts_aint32_t) (INIT)) #define ERTS_RUNQ_FLGS_SET(RQ, FLGS) \ - ((Uint32) erts_smp_atomic32_read_bor_relb(&(RQ)->flags, \ + ((Uint32) erts_atomic32_read_bor_relb(&(RQ)->flags, \ (erts_aint32_t) (FLGS))) #define ERTS_RUNQ_FLGS_SET_NOB(RQ, FLGS) \ - ((Uint32) erts_smp_atomic32_read_bor_nob(&(RQ)->flags, \ + ((Uint32) erts_atomic32_read_bor_nob(&(RQ)->flags, \ (erts_aint32_t) (FLGS))) #define ERTS_RUNQ_FLGS_BSET(RQ, MSK, FLGS) \ - ((Uint32) erts_smp_atomic32_read_bset_relb(&(RQ)->flags, \ + ((Uint32) erts_atomic32_read_bset_relb(&(RQ)->flags, \ (erts_aint32_t) (MSK), \ (erts_aint32_t) (FLGS))) #define ERTS_RUNQ_FLGS_UNSET(RQ, FLGS) \ - ((Uint32) erts_smp_atomic32_read_band_relb(&(RQ)->flags, \ + ((Uint32) erts_atomic32_read_band_relb(&(RQ)->flags, \ (erts_aint32_t) ~(FLGS))) #define ERTS_RUNQ_FLGS_UNSET_NOB(RQ, FLGS) \ - ((Uint32) erts_smp_atomic32_read_band_nob(&(RQ)->flags, \ + ((Uint32) erts_atomic32_read_band_nob(&(RQ)->flags, \ (erts_aint32_t) ~(FLGS))) #define ERTS_RUNQ_FLGS_GET(RQ) \ - ((Uint32) erts_smp_atomic32_read_acqb(&(RQ)->flags)) + ((Uint32) erts_atomic32_read_acqb(&(RQ)->flags)) #define ERTS_RUNQ_FLGS_GET_NOB(RQ) \ - ((Uint32) erts_smp_atomic32_read_nob(&(RQ)->flags)) + ((Uint32) erts_atomic32_read_nob(&(RQ)->flags)) #define ERTS_RUNQ_FLGS_GET_MB(RQ) \ - ((Uint32) erts_smp_atomic32_read_mb(&(RQ)->flags)) + ((Uint32) erts_atomic32_read_mb(&(RQ)->flags)) #define ERTS_RUNQ_FLGS_READ_BSET(RQ, MSK, FLGS) \ - ((Uint32) erts_smp_atomic32_read_bset_relb(&(RQ)->flags, \ + ((Uint32) erts_atomic32_read_bset_relb(&(RQ)->flags, \ (erts_aint32_t) (MSK), \ (erts_aint32_t) (FLGS))) @@ -365,7 +364,7 @@ typedef struct ErtsSchedulerSleepInfo_ ErtsSchedulerSleepInfo; #ifdef ERTS_DIRTY_SCHEDULERS typedef struct { - erts_smp_spinlock_t lock; + erts_spinlock_t lock; ErtsSchedulerSleepInfo *list; } ErtsSchedulerSleepList; #endif @@ -373,7 +372,7 @@ typedef struct { struct ErtsSchedulerSleepInfo_ { ErtsSchedulerSleepInfo *next; ErtsSchedulerSleepInfo *prev; - erts_smp_atomic32_t flags; + erts_atomic32_t flags; erts_tse_t *event; erts_atomic32_t aux_work; }; @@ -418,7 +417,7 @@ typedef struct ErtsSchedulerData_ ErtsSchedulerData; typedef struct ErtsRunQueue_ ErtsRunQueue; typedef struct { - erts_smp_atomic32_t len; + erts_atomic32_t len; erts_aint32_t max_len; int reds; } ErtsRunQueueInfo; @@ -475,8 +474,8 @@ struct ErtsMigrationPaths_ { struct ErtsRunQueue_ { int ix; - erts_smp_mtx_t mtx; - erts_smp_cnd_t cnd; + erts_mtx_t mtx; + erts_cnd_t cnd; #ifdef ERTS_DIRTY_SCHEDULERS ErtsSchedulerSleepList sleepers; @@ -485,13 +484,13 @@ struct ErtsRunQueue_ { ErtsSchedulerData *scheduler; int waiting; /* < 0 in sys schedule; > 0 on cnd variable */ int woken; - erts_smp_atomic32_t flags; + erts_atomic32_t flags; int check_balance_reds; int full_reds_history_sum; int full_reds_history[ERTS_FULL_REDS_HISTORY_SIZE]; int out_of_work_count; erts_aint32_t max_len; - erts_smp_atomic32_t len; + erts_atomic32_t len; int wakeup_other; int wakeup_other_reds; @@ -510,7 +509,7 @@ struct ErtsRunQueue_ { struct { ErtsMiscOpList *start; ErtsMiscOpList *end; - erts_smp_atomic_t evac_runq; + erts_atomic_t evac_runq; } misc; struct { @@ -695,7 +694,7 @@ extern ErtsAlignedSchedulerData *erts_aligned_dirty_io_scheduler_data; #if defined(ERTS_ENABLE_LOCK_CHECK) -int erts_smp_lc_runq_is_locked(ErtsRunQueue *); +int erts_lc_runq_is_locked(ErtsRunQueue *); #endif #ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS @@ -709,20 +708,20 @@ void erts_non_empty_runq(ErtsRunQueue *rq); * other threads peek at values without run queue lock. */ -ERTS_GLB_INLINE void erts_smp_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio); -ERTS_GLB_INLINE void erts_smp_dec_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio); -ERTS_GLB_INLINE void erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi); +ERTS_GLB_INLINE void erts_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio); +ERTS_GLB_INLINE void erts_dec_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio); +ERTS_GLB_INLINE void erts_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi); #if ERTS_GLB_INLINE_INCL_FUNC_DEF ERTS_GLB_INLINE void -erts_smp_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio) +erts_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio) { erts_aint32_t len; - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); - len = erts_smp_atomic32_read_dirty(&rq->len); + len = erts_atomic32_read_dirty(&rq->len); if (len == 0) erts_non_empty_runq(rq); @@ -730,63 +729,63 @@ erts_smp_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio) if (rq->max_len < len) rq->max_len = len; ASSERT(len > 0); - erts_smp_atomic32_set_nob(&rq->len, len); + erts_atomic32_set_nob(&rq->len, len); - len = erts_smp_atomic32_read_dirty(&rqi->len); + len = erts_atomic32_read_dirty(&rqi->len); ASSERT(len >= 0); if (len == 0) { - ASSERT((erts_smp_atomic32_read_nob(&rq->flags) + ASSERT((erts_atomic32_read_nob(&rq->flags) & ((erts_aint32_t) (1 << prio))) == 0); - erts_smp_atomic32_read_bor_nob(&rq->flags, + erts_atomic32_read_bor_nob(&rq->flags, (erts_aint32_t) (1 << prio)); } len++; if (rqi->max_len < len) rqi->max_len = len; - erts_smp_atomic32_set_relb(&rqi->len, len); + erts_atomic32_set_relb(&rqi->len, len); } ERTS_GLB_INLINE void -erts_smp_dec_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio) +erts_dec_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio) { erts_aint32_t len; - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); - len = erts_smp_atomic32_read_dirty(&rq->len); + len = erts_atomic32_read_dirty(&rq->len); len--; ASSERT(len >= 0); - erts_smp_atomic32_set_nob(&rq->len, len); + erts_atomic32_set_nob(&rq->len, len); - len = erts_smp_atomic32_read_dirty(&rqi->len); + len = erts_atomic32_read_dirty(&rqi->len); len--; ASSERT(len >= 0); if (len == 0) { - ASSERT((erts_smp_atomic32_read_nob(&rq->flags) + ASSERT((erts_atomic32_read_nob(&rq->flags) & ((erts_aint32_t) (1 << prio)))); - erts_smp_atomic32_read_band_nob(&rq->flags, + erts_atomic32_read_band_nob(&rq->flags, ~((erts_aint32_t) (1 << prio))); } - erts_smp_atomic32_set_relb(&rqi->len, len); + erts_atomic32_set_relb(&rqi->len, len); } ERTS_GLB_INLINE void -erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi) +erts_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi) { erts_aint32_t len; - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq)); + ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq)); - len = erts_smp_atomic32_read_dirty(&rqi->len); + len = erts_atomic32_read_dirty(&rqi->len); ASSERT(rqi->max_len >= len); rqi->max_len = len; } #endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */ -#define RUNQ_READ_LEN(X) erts_smp_atomic32_read_nob((X)) +#define RUNQ_READ_LEN(X) erts_atomic32_read_nob((X)) #endif /* ERTS_INCLUDE_SCHEDULER_INTERNALS */ @@ -859,7 +858,7 @@ extern ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE]; #define ERTS_SCHED_STAT_MODIFY_CLEAR 3 typedef struct { - erts_smp_spinlock_t lock; + erts_spinlock_t lock; int enabled; struct { Eterm name; @@ -1043,7 +1042,7 @@ struct process { ErlHeapFragment* live_hf_end; ErtsMessage *msg_frag; /* Pointer to message fragment list */ Uint mbuf_sz; /* Total size of heap fragments and message fragments */ - erts_smp_atomic_t psd; /* Rarely used process specific data */ + erts_atomic_t psd; /* Rarely used process specific data */ Uint64 bin_vheap_sz; /* Virtual heap block size for binaries */ Uint64 bin_old_vheap_sz; /* Virtual old heap block size for binaries */ @@ -1054,9 +1053,9 @@ struct process { ErtsProcSysTask *dirty_sys_tasks; #endif - erts_smp_atomic32_t state; /* Process state flags (see ERTS_PSFLG_*) */ + erts_atomic32_t state; /* Process state flags (see ERTS_PSFLG_*) */ #ifdef ERTS_DIRTY_SCHEDULERS - erts_smp_atomic32_t dirty_state; /* Process dirty state flags (see ERTS_PDSFLG_*) */ + erts_atomic32_t dirty_state; /* Process dirty state flags (see ERTS_PDSFLG_*) */ #endif ErlMessageInQueue msg_inq; @@ -1066,7 +1065,7 @@ struct process { ErtsSchedulerData *scheduler_data; Eterm suspendee; ErtsPendingSuspend *pending_suspenders; - erts_smp_atomic_t run_queue; + erts_atomic_t run_queue; #ifdef HIPE struct hipe_process_state_smp hipe_smp; #endif @@ -1332,7 +1331,7 @@ Eterm* erts_heap_alloc(Process* p, Uint need, Uint xtra); Eterm* erts_set_hole_marker(Eterm* ptr, Uint sz); #endif -extern erts_smp_rwmtx_t erts_cpu_bind_rwmtx; +extern erts_rwmtx_t erts_cpu_bind_rwmtx; /* If any of the erts_system_monitor_* variables are set (enabled), ** erts_system_monitor must be != NIL, to allow testing on just ** the erts_system_monitor_* variables. @@ -1855,7 +1854,7 @@ int erts_send_exit_signal(Process *, Uint32); void erts_handle_pending_exit(Process *, ErtsProcLocks); #define ERTS_PROC_PENDING_EXIT(P) \ - (ERTS_PSFLG_PENDING_EXIT & erts_smp_atomic32_read_acqb(&(P)->state)) + (ERTS_PSFLG_PENDING_EXIT & erts_atomic32_read_acqb(&(P)->state)) void erts_deep_process_dump(fmtfn_t, void *); @@ -1897,7 +1896,7 @@ ERTS_GLB_INLINE void erts_proc_notify_new_message(Process *p, ErtsProcLocks locks) { /* No barrier needed, due to msg lock */ - erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state); + erts_aint32_t state = erts_atomic32_read_nob(&p->state); if (!(state & ERTS_PSFLG_ACTIVE)) erts_schedule_process(p, state, locks); } @@ -1907,7 +1906,7 @@ erts_schedule_dirty_sys_execution(Process *c_p) { erts_aint32_t a, n, e; - a = erts_smp_atomic32_read_nob(&c_p->state); + a = erts_atomic32_read_nob(&c_p->state); /* * Only a currently executing process schedules @@ -1923,7 +1922,7 @@ erts_schedule_dirty_sys_execution(Process *c_p) | ERTS_PSFLG_PENDING_EXIT))) { e = a; n = a | ERTS_PSFLG_DIRTY_ACTIVE_SYS; - a = erts_smp_atomic32_cmpxchg_mb(&c_p->state, n, e); + a = erts_atomic32_cmpxchg_mb(&c_p->state, n, e); if (a == e) break; /* dirty-active-sys set */ } @@ -1937,15 +1936,15 @@ erts_schedule_dirty_sys_execution(Process *c_p) #include "erl_process_lock.h" #undef ERTS_PROCESS_LOCK_ONLY_LOCK_CHECK_PROTO__ -#define ERTS_SMP_LC_CHK_RUNQ_LOCK(RQ, L) \ +#define ERTS_LC_CHK_RUNQ_LOCK(RQ, L) \ do { \ if ((L)) \ - ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked((RQ))); \ + ERTS_LC_ASSERT(erts_lc_runq_is_locked((RQ))); \ else \ - ERTS_SMP_LC_ASSERT(!erts_smp_lc_runq_is_locked((RQ))); \ + ERTS_LC_ASSERT(!erts_lc_runq_is_locked((RQ))); \ } while (0) #else -#define ERTS_SMP_LC_CHK_RUNQ_LOCK(RQ, L) +#define ERTS_LC_CHK_RUNQ_LOCK(RQ, L) #endif void *erts_psd_set_init(Process *p, int ix, void *data); @@ -1964,19 +1963,19 @@ erts_psd_get(Process *p, int ix) #if defined(ERTS_ENABLE_LOCK_CHECK) ErtsProcLocks locks = erts_proc_lc_my_proc_locks(p); if (ERTS_LC_PSD_ANY_LOCK == erts_psd_required_locks[ix].get_locks) - ERTS_SMP_LC_ASSERT(locks || erts_thr_progress_is_blocking()); + ERTS_LC_ASSERT(locks || erts_thr_progress_is_blocking()); else { locks &= erts_psd_required_locks[ix].get_locks; - ERTS_SMP_LC_ASSERT(erts_psd_required_locks[ix].get_locks == locks + ERTS_LC_ASSERT(erts_psd_required_locks[ix].get_locks == locks || erts_thr_progress_is_blocking()); } #endif - psd = (ErtsPSD *) erts_smp_atomic_read_nob(&p->psd); + psd = (ErtsPSD *) erts_atomic_read_nob(&p->psd); ASSERT(0 <= ix && ix < ERTS_PSD_SIZE); if (!psd) return NULL; - ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER; + ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER; return psd->data[ix]; } @@ -1986,18 +1985,18 @@ erts_psd_set(Process *p, int ix, void *data) ErtsPSD *psd; #if defined(ERTS_ENABLE_LOCK_CHECK) ErtsProcLocks locks = erts_proc_lc_my_proc_locks(p); - erts_aint32_t state = state = erts_smp_atomic32_read_nob(&p->state); + erts_aint32_t state = state = erts_atomic32_read_nob(&p->state); if (!(state & ERTS_PSFLG_FREE)) { if (ERTS_LC_PSD_ANY_LOCK == erts_psd_required_locks[ix].set_locks) - ERTS_SMP_LC_ASSERT(locks || erts_thr_progress_is_blocking()); + ERTS_LC_ASSERT(locks || erts_thr_progress_is_blocking()); else { locks &= erts_psd_required_locks[ix].set_locks; - ERTS_SMP_LC_ASSERT(erts_psd_required_locks[ix].set_locks == locks + ERTS_LC_ASSERT(erts_psd_required_locks[ix].set_locks == locks || erts_thr_progress_is_blocking()); } } #endif - psd = (ErtsPSD *) erts_smp_atomic_read_nob(&p->psd); + psd = (ErtsPSD *) erts_atomic_read_nob(&p->psd); ASSERT(0 <= ix && ix < ERTS_PSD_SIZE); if (psd) { void *old; @@ -2196,13 +2195,13 @@ ERTS_GLB_INLINE Eterm erts_get_current_pid(void); ERTS_GLB_INLINE Uint erts_get_scheduler_id(void); ERTS_GLB_INLINE ErtsRunQueue *erts_get_runq_proc(Process *p); ERTS_GLB_INLINE ErtsRunQueue *erts_get_runq_current(ErtsSchedulerData *esdp); -ERTS_GLB_INLINE void erts_smp_runq_lock(ErtsRunQueue *rq); -ERTS_GLB_INLINE int erts_smp_runq_trylock(ErtsRunQueue *rq); -ERTS_GLB_INLINE void erts_smp_runq_unlock(ErtsRunQueue *rq); -ERTS_GLB_INLINE void erts_smp_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq); -ERTS_GLB_INLINE void erts_smp_xrunq_unlock(ErtsRunQueue *rq, ErtsRunQueue *xrq); -ERTS_GLB_INLINE void erts_smp_runqs_lock(ErtsRunQueue *rq1, ErtsRunQueue *rq2); -ERTS_GLB_INLINE void erts_smp_runqs_unlock(ErtsRunQueue *rq1, ErtsRunQueue *rq2); +ERTS_GLB_INLINE void erts_runq_lock(ErtsRunQueue *rq); +ERTS_GLB_INLINE int erts_runq_trylock(ErtsRunQueue *rq); +ERTS_GLB_INLINE void erts_runq_unlock(ErtsRunQueue *rq); +ERTS_GLB_INLINE void erts_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq); +ERTS_GLB_INLINE void erts_xrunq_unlock(ErtsRunQueue *rq, ErtsRunQueue *xrq); +ERTS_GLB_INLINE void erts_runqs_lock(ErtsRunQueue *rq1, ErtsRunQueue *rq2); +ERTS_GLB_INLINE void erts_runqs_unlock(ErtsRunQueue *rq1, ErtsRunQueue *rq2); ERTS_GLB_INLINE ErtsMessage *erts_alloc_message_heap_state(Process *pp, erts_aint32_t *psp, @@ -2295,70 +2294,70 @@ erts_get_runq_current(ErtsSchedulerData *esdp) } ERTS_GLB_INLINE void -erts_smp_runq_lock(ErtsRunQueue *rq) +erts_runq_lock(ErtsRunQueue *rq) { - erts_smp_mtx_lock(&rq->mtx); + erts_mtx_lock(&rq->mtx); } ERTS_GLB_INLINE int -erts_smp_runq_trylock(ErtsRunQueue *rq) +erts_runq_trylock(ErtsRunQueue *rq) { - return erts_smp_mtx_trylock(&rq->mtx); + return erts_mtx_trylock(&rq->mtx); } ERTS_GLB_INLINE void -erts_smp_runq_unlock(ErtsRunQueue *rq) +erts_runq_unlock(ErtsRunQueue *rq) { - erts_smp_mtx_unlock(&rq->mtx); + erts_mtx_unlock(&rq->mtx); } ERTS_GLB_INLINE void -erts_smp_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq) +erts_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&rq->mtx)); + ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&rq->mtx)); if (xrq != rq) { - if (erts_smp_mtx_trylock(&xrq->mtx) == EBUSY) { + if (erts_mtx_trylock(&xrq->mtx) == EBUSY) { if (rq < xrq) - erts_smp_mtx_lock(&xrq->mtx); + erts_mtx_lock(&xrq->mtx); else { - erts_smp_mtx_unlock(&rq->mtx); - erts_smp_mtx_lock(&xrq->mtx); - erts_smp_mtx_lock(&rq->mtx); + erts_mtx_unlock(&rq->mtx); + erts_mtx_lock(&xrq->mtx); + erts_mtx_lock(&rq->mtx); } } } } ERTS_GLB_INLINE void -erts_smp_xrunq_unlock(ErtsRunQueue *rq, ErtsRunQueue *xrq) +erts_xrunq_unlock(ErtsRunQueue *rq, ErtsRunQueue *xrq) { if (xrq != rq) - erts_smp_mtx_unlock(&xrq->mtx); + erts_mtx_unlock(&xrq->mtx); } ERTS_GLB_INLINE void -erts_smp_runqs_lock(ErtsRunQueue *rq1, ErtsRunQueue *rq2) +erts_runqs_lock(ErtsRunQueue *rq1, ErtsRunQueue *rq2) { ASSERT(rq1 && rq2); if (rq1 == rq2) - erts_smp_mtx_lock(&rq1->mtx); + erts_mtx_lock(&rq1->mtx); else if (rq1 < rq2) { - erts_smp_mtx_lock(&rq1->mtx); - erts_smp_mtx_lock(&rq2->mtx); + erts_mtx_lock(&rq1->mtx); + erts_mtx_lock(&rq2->mtx); } else { - erts_smp_mtx_lock(&rq2->mtx); - erts_smp_mtx_lock(&rq1->mtx); + erts_mtx_lock(&rq2->mtx); + erts_mtx_lock(&rq1->mtx); } } ERTS_GLB_INLINE void -erts_smp_runqs_unlock(ErtsRunQueue *rq1, ErtsRunQueue *rq2) +erts_runqs_unlock(ErtsRunQueue *rq1, ErtsRunQueue *rq2) { ASSERT(rq1 && rq2); - erts_smp_mtx_unlock(&rq1->mtx); + erts_mtx_unlock(&rq1->mtx); if (rq1 != rq2) - erts_smp_mtx_unlock(&rq2->mtx); + erts_mtx_unlock(&rq2->mtx); } ERTS_GLB_INLINE ErtsMessage * @@ -2390,7 +2389,7 @@ erts_alloc_message_heap(Process *pp, Eterm **hpp, ErlOffHeap **ohpp) { - erts_aint32_t state = pp ? erts_smp_atomic32_read_nob(&pp->state) : 0; + erts_aint32_t state = pp ? erts_atomic32_read_nob(&pp->state) : 0; return erts_alloc_message_heap_state(pp, &state, plp, sz, hpp, ohpp); } @@ -2404,7 +2403,7 @@ erts_shrink_message_heap(ErtsMessage **msgpp, Process *pp, *msgpp = erts_shrink_message(*msgpp, used_hp - start_hp, brefs, brefs_size); else if (!(*msgpp)->data.attached) { - ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN + ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(pp)); HRelease(pp, end_hp, used_hp); } @@ -2478,15 +2477,15 @@ Process *erts_pid2proc_nropt(Process *c_p, extern int erts_disable_proc_not_running_opt; #ifdef DEBUG -#define ERTS_SMP_ASSERT_IS_NOT_EXITING(P) \ +#define ERTS_ASSERT_IS_NOT_EXITING(P) \ do { ASSERT(!ERTS_PROC_IS_EXITING((P))); } while (0) #else -#define ERTS_SMP_ASSERT_IS_NOT_EXITING(P) +#define ERTS_ASSERT_IS_NOT_EXITING(P) #endif #define ERTS_PROC_IS_EXITING(P) \ - (ERTS_PSFLG_EXITING & erts_smp_atomic32_read_acqb(&(P)->state)) + (ERTS_PSFLG_EXITING & erts_atomic32_read_acqb(&(P)->state)) /* Minimum NUMBER of processes for a small system to start */ @@ -2496,7 +2495,7 @@ extern int erts_disable_proc_not_running_opt; #define ERTS_MIN_PROCESSES ERTS_NO_OF_PIX_LOCKS #endif -void erts_smp_notify_inc_runq(ErtsRunQueue *runq); +void erts_notify_inc_runq(ErtsRunQueue *runq); void erts_sched_finish_poke(ErtsSchedulerSleepInfo *, erts_aint32_t); ERTS_GLB_INLINE void erts_sched_poke(ErtsSchedulerSleepInfo *ssi); @@ -2508,9 +2507,9 @@ erts_sched_poke(ErtsSchedulerSleepInfo *ssi) { erts_aint32_t flags; ERTS_THR_MEMORY_BARRIER; - flags = erts_smp_atomic32_read_nob(&ssi->flags); + flags = erts_atomic32_read_nob(&ssi->flags); if (flags & ERTS_SSI_FLG_SLEEPING) { - flags = erts_smp_atomic32_read_band_nob(&ssi->flags, ~ERTS_SSI_FLGS_SLEEP); + flags = erts_atomic32_read_band_nob(&ssi->flags, ~ERTS_SSI_FLGS_SLEEP); erts_sched_finish_poke(ssi, flags); } } @@ -2527,5 +2526,5 @@ erts_sched_poke(ErtsSchedulerSleepInfo *ssi) void erts_halt(int code); -extern erts_smp_atomic32_t erts_halt_progress; +extern erts_atomic32_t erts_halt_progress; extern int erts_halt_code; diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c index b826e6c5d3..5a2c262ff1 100644 --- a/erts/emulator/beam/erl_process_dump.c +++ b/erts/emulator/beam/erl_process_dump.c @@ -69,7 +69,7 @@ erts_deep_process_dump(fmtfn_t to, void *to_arg) for (i = 0; i < max; i++) { Process *p = erts_pix2proc(i); if (p && p->i != ENULL) { - erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state); + erts_aint32_t state = erts_atomic32_read_acqb(&p->state); if (!(state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_GC))) dump_process_info(to, to_arg, p); } @@ -85,7 +85,7 @@ Uint erts_process_memory(Process *p, int incl_msg_inq) { size += sizeof(Process); if (incl_msg_inq) - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p); + ERTS_MSGQ_MV_INQ2PRIVQ(p); erts_doforall_links(ERTS_P_LINKS(p), &erts_one_link_size, &size); erts_doforall_monitors(ERTS_P_MONITORS(p), &erts_one_mon_size, &size); @@ -106,7 +106,7 @@ Uint erts_process_memory(Process *p, int incl_msg_inq) { size += p->arity * sizeof(p->arg_reg[0]); } - if (erts_smp_atomic_read_nob(&p->psd) != (erts_aint_t) NULL) + if (erts_atomic_read_nob(&p->psd) != (erts_aint_t) NULL) size += sizeof(ErtsPSD); scb = ERTS_PROC_GET_SAVED_CALLS_BUF(p); @@ -126,7 +126,7 @@ dump_process_info(fmtfn_t to, void *to_arg, Process *p) ErtsMessage* mp; int yreg = -1; - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p); + ERTS_MSGQ_MV_INQ2PRIVQ(p); if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0 && p->msg.first) { erts_print(to, to_arg, "=proc_messages:%T\n", p->common.id); diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c index 0894c5233a..9d535646df 100644 --- a/erts/emulator/beam/erl_process_lock.c +++ b/erts/emulator/beam/erl_process_lock.c @@ -56,9 +56,9 @@ * Note that wait flags may be read without the pix lock, but * it is important that wait flags only are modified when the pix * lock is held. - * This implementation assumes that erts_smp_atomic_or_retold() + * This implementation assumes that erts_atomic_or_retold() * provides necessary memorybarriers for a lock operation, and that - * erts_smp_atomic_and_retold() provides necessary memorybarriers + * erts_atomic_and_retold() provides necessary memorybarriers * for an unlock operation. */ @@ -463,7 +463,7 @@ wait_for_locks(Process *p, } /* - * erts_proc_lock_failed() is called when erts_smp_proc_lock() + * erts_proc_lock_failed() is called when erts_proc_lock() * wasn't able to lock all locks. We may need to transfer locks * to waiters and wait for our turn on locks. * @@ -542,7 +542,7 @@ erts_proc_lock_failed(Process *p, } /* - * erts_proc_unlock_failed() is called when erts_smp_proc_unlock() + * erts_proc_unlock_failed() is called when erts_proc_unlock() * wasn't able to unlock all locks. We may need to transfer locks * to waiters. */ @@ -708,7 +708,7 @@ proc_safelock(int is_managed, refc1 = 1; erts_proc_inc_refc(p1); } - erts_smp_proc_unlock(p1, unlock_locks); + erts_proc_unlock(p1, unlock_locks); } unlock_locks = unlock_mask & have_locks2; if (unlock_locks) { @@ -718,7 +718,7 @@ proc_safelock(int is_managed, refc2 = 1; erts_proc_inc_refc(p2); } - erts_smp_proc_unlock(p2, unlock_locks); + erts_proc_unlock(p2, unlock_locks); } } @@ -749,7 +749,7 @@ proc_safelock(int is_managed, if (need_locks2 & lock) lock_no--; locks = need_locks1 & lock_mask; - erts_smp_proc_lock(p1, locks); + erts_proc_lock(p1, locks); have_locks1 |= locks; need_locks1 &= ~locks; } @@ -760,7 +760,7 @@ proc_safelock(int is_managed, lock = (1 << ++lock_no); } locks = need_locks2 & lock_mask; - erts_smp_proc_lock(p2, locks); + erts_proc_lock(p2, locks); have_locks2 |= locks; need_locks2 &= ~locks; } @@ -897,7 +897,7 @@ erts_pid2proc_opt(Process *c_p, #endif /* ERTS_PROC_LOCK_OWN_IMPL */ { /* Try a quick trylock to grab all the locks we need. */ - busy = (int) erts_smp_proc_raw_trylock__(proc, need_locks); + busy = (int) erts_proc_raw_trylock__(proc, need_locks); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_CHECK) erts_proc_lc_trylock(proc, need_locks, !busy, __FILE__,__LINE__); @@ -975,7 +975,7 @@ erts_pid2proc_opt(Process *c_p, : (proc != (Process *) erts_ptab_pix2intptr_nob(&erts_proc, pix)))) { - erts_smp_proc_unlock(proc, need_locks); + erts_proc_unlock(proc, need_locks); if (flags & ERTS_P2P_FLG_INC_REFC) dec_refc_proc = proc; @@ -1037,7 +1037,7 @@ erts_proc_lock_init(Process *p) #if ERTS_PROC_LOCK_OWN_IMPL /* We always start with all locks locked */ #if ERTS_PROC_LOCK_ATOMIC_IMPL - erts_smp_atomic32_init_nob(&p->lock.flags, + erts_atomic32_init_nob(&p->lock.flags, (erts_aint32_t) ERTS_PROC_LOCKS_ALL); #else p->lock.flags = ERTS_PROC_LOCKS_ALL; @@ -1088,7 +1088,7 @@ erts_proc_lock_init(Process *p) #endif #ifdef ERTS_PROC_LOCK_DEBUG for (i = 0; i <= ERTS_PROC_LOCK_MAX_BIT; i++) - erts_smp_atomic32_init_nob(&p->lock.locked[i], (erts_aint32_t) 1); + erts_atomic32_init_nob(&p->lock.locked[i], (erts_aint32_t) 1); #endif #ifdef ERTS_ENABLE_LOCK_COUNT erts_lcnt_proc_lock_init(p); diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h index 062dbbe2a6..9d5691d3c4 100644 --- a/erts/emulator/beam/erl_process_lock.h +++ b/erts/emulator/beam/erl_process_lock.h @@ -36,7 +36,7 @@ #include "erl_lock_count.h" #endif -#include "erl_smp.h" +#include "erl_threads.h" #if defined(VALGRIND) || defined(ETHR_DISABLE_NATIVE_IMPLS) # define ERTS_PROC_LOCK_OWN_IMPL 0 @@ -73,7 +73,7 @@ typedef erts_aint32_t ErtsProcLocks; typedef struct erts_proc_lock_t_ { #if ERTS_PROC_LOCK_OWN_IMPL #if ERTS_PROC_LOCK_ATOMIC_IMPL - erts_smp_atomic32_t flags; + erts_atomic32_t flags; #else ErtsProcLocks flags; #endif @@ -103,7 +103,7 @@ typedef struct erts_proc_lock_t_ { # error "no implementation" #endif #ifdef ERTS_PROC_LOCK_DEBUG - erts_smp_atomic32_t locked[ERTS_PROC_LOCK_MAX_BIT+1]; + erts_atomic32_t locked[ERTS_PROC_LOCK_MAX_BIT+1]; #endif } erts_proc_lock_t; @@ -243,8 +243,8 @@ typedef struct erts_proc_lock_t_ { /* Lock counter implemetation */ #ifdef ERTS_ENABLE_LOCK_POSITION -#define erts_smp_proc_lock__(P,I,L) erts_smp_proc_lock_x__(P,I,L,__FILE__,__LINE__) -#define erts_smp_proc_lock(P,L) erts_smp_proc_lock_x(P,L,__FILE__,__LINE__) +#define erts_proc_lock__(P,I,L) erts_proc_lock_x__(P,I,L,__FILE__,__LINE__) +#define erts_proc_lock(P,L) erts_proc_lock_x(P,L,__FILE__,__LINE__) #endif #if defined (ERTS_ENABLE_LOCK_COUNT) @@ -422,9 +422,9 @@ void erts_lcnt_proc_trylock(erts_proc_lock_t *lock, ErtsProcLocks locks, int res /* --- Process lock checking ----------------------------------------------- */ #if defined(ERTS_ENABLE_LOCK_CHECK) -#define ERTS_SMP_CHK_NO_PROC_LOCKS \ +#define ERTS_CHK_NO_PROC_LOCKS \ erts_proc_lc_chk_no_proc_locks(__FILE__, __LINE__) -#define ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(P) \ +#define ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(P) \ erts_proc_lc_chk_only_proc_main((P)) void erts_proc_lc_lock(Process *p, ErtsProcLocks locks, char *file, unsigned int line); @@ -443,8 +443,8 @@ void erts_proc_lc_require_lock(Process *p, ErtsProcLocks locks, char* file, unsigned int line); void erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks); #else -#define ERTS_SMP_CHK_NO_PROC_LOCKS -#define ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(P) +#define ERTS_CHK_NO_PROC_LOCKS +#define ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(P) #endif #endif /* #ifndef ERTS_PROC_LOCK_LOCK_CHECK__ */ @@ -471,21 +471,21 @@ typedef struct { #if ERTS_PROC_LOCK_ATOMIC_IMPL #define ERTS_PROC_LOCK_FLGS_BAND_(L, MSK) \ - ((ErtsProcLocks) erts_smp_atomic32_read_band_nob(&(L)->flags, \ + ((ErtsProcLocks) erts_atomic32_read_band_nob(&(L)->flags, \ (erts_aint32_t) (MSK))) #define ERTS_PROC_LOCK_FLGS_BOR_ACQB_(L, MSK) \ - ((ErtsProcLocks) erts_smp_atomic32_read_bor_acqb(&(L)->flags, \ + ((ErtsProcLocks) erts_atomic32_read_bor_acqb(&(L)->flags, \ (erts_aint32_t) (MSK))) #define ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(L, NEW, EXPECTED) \ - ((ErtsProcLocks) erts_smp_atomic32_cmpxchg_acqb(&(L)->flags, \ + ((ErtsProcLocks) erts_atomic32_cmpxchg_acqb(&(L)->flags, \ (erts_aint32_t) (NEW), \ (erts_aint32_t) (EXPECTED))) #define ERTS_PROC_LOCK_FLGS_CMPXCHG_RELB_(L, NEW, EXPECTED) \ - ((ErtsProcLocks) erts_smp_atomic32_cmpxchg_relb(&(L)->flags, \ + ((ErtsProcLocks) erts_atomic32_cmpxchg_relb(&(L)->flags, \ (erts_aint32_t) (NEW), \ (erts_aint32_t) (EXPECTED))) #define ERTS_PROC_LOCK_FLGS_READ_(L) \ - ((ErtsProcLocks) erts_smp_atomic32_read_nob(&(L)->flags)) + ((ErtsProcLocks) erts_atomic32_read_nob(&(L)->flags)) #else /* no opt atomic ops */ @@ -556,22 +556,22 @@ ERTS_GLB_INLINE void erts_pix_lock(erts_pix_lock_t *); ERTS_GLB_INLINE void erts_pix_unlock(erts_pix_lock_t *); ERTS_GLB_INLINE int erts_lc_pix_lock_is_locked(erts_pix_lock_t *); -ERTS_GLB_INLINE ErtsProcLocks erts_smp_proc_raw_trylock__(Process *p, +ERTS_GLB_INLINE ErtsProcLocks erts_proc_raw_trylock__(Process *p, ErtsProcLocks locks); #ifdef ERTS_ENABLE_LOCK_POSITION -ERTS_GLB_INLINE void erts_smp_proc_lock_x__(Process *, +ERTS_GLB_INLINE void erts_proc_lock_x__(Process *, erts_pix_lock_t *, ErtsProcLocks, char *file, unsigned int line); #else -ERTS_GLB_INLINE void erts_smp_proc_lock__(Process *, +ERTS_GLB_INLINE void erts_proc_lock__(Process *, erts_pix_lock_t *, ErtsProcLocks); #endif -ERTS_GLB_INLINE void erts_smp_proc_unlock__(Process *, +ERTS_GLB_INLINE void erts_proc_unlock__(Process *, erts_pix_lock_t *, ErtsProcLocks); -ERTS_GLB_INLINE int erts_smp_proc_trylock__(Process *, +ERTS_GLB_INLINE int erts_proc_trylock__(Process *, erts_pix_lock_t *, ErtsProcLocks); @@ -599,7 +599,7 @@ ERTS_GLB_INLINE int erts_lc_pix_lock_is_locked(erts_pix_lock_t *pixlck) } /* - * Helper function for erts_smp_proc_lock__ and erts_smp_proc_trylock__. + * Helper function for erts_proc_lock__ and erts_proc_trylock__. * * Attempts to grab all of 'locks' simultaneously. * @@ -612,7 +612,7 @@ ERTS_GLB_INLINE int erts_lc_pix_lock_is_locked(erts_pix_lock_t *pixlck) * Does not release the pix lock. */ ERTS_GLB_INLINE ErtsProcLocks -erts_smp_proc_raw_trylock__(Process *p, ErtsProcLocks locks) +erts_proc_raw_trylock__(Process *p, ErtsProcLocks locks) { #if ERTS_PROC_LOCK_OWN_IMPL ErtsProcLocks expct_lflgs = 0; @@ -681,12 +681,12 @@ busy_main: ERTS_GLB_INLINE void #ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_proc_lock_x__(Process *p, +erts_proc_lock_x__(Process *p, erts_pix_lock_t *pix_lck, ErtsProcLocks locks, char *file, unsigned int line) #else -erts_smp_proc_lock__(Process *p, +erts_proc_lock__(Process *p, erts_pix_lock_t *pix_lck, ErtsProcLocks locks) #endif @@ -708,7 +708,7 @@ erts_smp_proc_lock__(Process *p, erts_proc_lc_lock(p, locks, file, line); #endif - old_lflgs = erts_smp_proc_raw_trylock__(p, locks); + old_lflgs = erts_proc_raw_trylock__(p, locks); if (old_lflgs != 0) { /* @@ -760,7 +760,7 @@ erts_smp_proc_lock__(Process *p, } ERTS_GLB_INLINE void -erts_smp_proc_unlock__(Process *p, +erts_proc_unlock__(Process *p, erts_pix_lock_t *pix_lck, ErtsProcLocks locks) { @@ -853,7 +853,7 @@ erts_smp_proc_unlock__(Process *p, } ERTS_GLB_INLINE int -erts_smp_proc_trylock__(Process *p, +erts_proc_trylock__(Process *p, erts_pix_lock_t *pix_lck, ErtsProcLocks locks) { @@ -874,7 +874,7 @@ erts_smp_proc_trylock__(Process *p, erts_pix_lock(pix_lck); #endif - if (erts_smp_proc_raw_trylock__(p, locks) != 0) { + if (erts_proc_raw_trylock__(p, locks) != 0) { /* Didn't get all locks... */ res = EBUSY; @@ -911,7 +911,7 @@ erts_smp_proc_trylock__(Process *p, return res; #elif ERTS_PROC_LOCK_RAW_MUTEX_IMPL - if (erts_smp_proc_raw_trylock__(p, locks) != 0) + if (erts_proc_raw_trylock__(p, locks) != 0) return EBUSY; else { #ifdef ERTS_PROC_LOCK_DEBUG @@ -932,11 +932,11 @@ erts_proc_lock_op_debug(Process *p, ErtsProcLocks locks, int locked) if (locks & lock) { erts_aint32_t lock_count; if (locked) { - lock_count = erts_smp_atomic32_inc_read_nob(&p->lock.locked[i]); + lock_count = erts_atomic32_inc_read_nob(&p->lock.locked[i]); ERTS_LC_ASSERT(lock_count == 1); } else { - lock_count = erts_smp_atomic32_dec_read_nob(&p->lock.locked[i]); + lock_count = erts_atomic32_dec_read_nob(&p->lock.locked[i]); ERTS_LC_ASSERT(lock_count == 0); } } @@ -948,12 +948,12 @@ erts_proc_lock_op_debug(Process *p, ErtsProcLocks locks, int locked) #ifdef ERTS_ENABLE_LOCK_POSITION -ERTS_GLB_INLINE void erts_smp_proc_lock_x(Process *, ErtsProcLocks, char *file, unsigned int line); +ERTS_GLB_INLINE void erts_proc_lock_x(Process *, ErtsProcLocks, char *file, unsigned int line); #else -ERTS_GLB_INLINE void erts_smp_proc_lock(Process *, ErtsProcLocks); +ERTS_GLB_INLINE void erts_proc_lock(Process *, ErtsProcLocks); #endif -ERTS_GLB_INLINE void erts_smp_proc_unlock(Process *, ErtsProcLocks); -ERTS_GLB_INLINE int erts_smp_proc_trylock(Process *, ErtsProcLocks); +ERTS_GLB_INLINE void erts_proc_unlock(Process *, ErtsProcLocks); +ERTS_GLB_INLINE int erts_proc_trylock(Process *, ErtsProcLocks); ERTS_GLB_INLINE void erts_proc_inc_refc(Process *); ERTS_GLB_INLINE void erts_proc_dec_refc(Process *); @@ -964,13 +964,13 @@ ERTS_GLB_INLINE Sint erts_proc_read_refc(Process *); ERTS_GLB_INLINE void #ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_proc_lock_x(Process *p, ErtsProcLocks locks, char *file, unsigned int line) +erts_proc_lock_x(Process *p, ErtsProcLocks locks, char *file, unsigned int line) #else -erts_smp_proc_lock(Process *p, ErtsProcLocks locks) +erts_proc_lock(Process *p, ErtsProcLocks locks) #endif { #if defined(ERTS_ENABLE_LOCK_POSITION) - erts_smp_proc_lock_x__(p, + erts_proc_lock_x__(p, #if ERTS_PROC_LOCK_ATOMIC_IMPL NULL, #else @@ -978,7 +978,7 @@ erts_smp_proc_lock(Process *p, ErtsProcLocks locks) #endif /*ERTS_PROC_LOCK_ATOMIC_IMPL*/ locks, file, line); #else - erts_smp_proc_lock__(p, + erts_proc_lock__(p, #if ERTS_PROC_LOCK_ATOMIC_IMPL NULL, #else @@ -989,9 +989,9 @@ erts_smp_proc_lock(Process *p, ErtsProcLocks locks) } ERTS_GLB_INLINE void -erts_smp_proc_unlock(Process *p, ErtsProcLocks locks) +erts_proc_unlock(Process *p, ErtsProcLocks locks) { - erts_smp_proc_unlock__(p, + erts_proc_unlock__(p, #if ERTS_PROC_LOCK_ATOMIC_IMPL NULL, #else @@ -1001,9 +1001,9 @@ erts_smp_proc_unlock(Process *p, ErtsProcLocks locks) } ERTS_GLB_INLINE int -erts_smp_proc_trylock(Process *p, ErtsProcLocks locks) +erts_proc_trylock(Process *p, ErtsProcLocks locks) { - return erts_smp_proc_trylock__(p, + return erts_proc_trylock__(p, #if ERTS_PROC_LOCK_ATOMIC_IMPL NULL, #else @@ -1014,14 +1014,14 @@ erts_smp_proc_trylock(Process *p, ErtsProcLocks locks) ERTS_GLB_INLINE void erts_proc_inc_refc(Process *p) { - ASSERT(!(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY)); + ASSERT(!(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY)); erts_ptab_atmc_inc_refc(&p->common); } ERTS_GLB_INLINE void erts_proc_dec_refc(Process *p) { Sint referred; - ASSERT(!(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY)); + ASSERT(!(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY)); referred = erts_ptab_atmc_dec_test_refc(&p->common); if (!referred) { ASSERT(ERTS_PROC_IS_EXITING(p)); @@ -1032,7 +1032,7 @@ ERTS_GLB_INLINE void erts_proc_dec_refc(Process *p) ERTS_GLB_INLINE void erts_proc_add_refc(Process *p, Sint add_refc) { Sint referred; - ASSERT(!(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY)); + ASSERT(!(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY)); referred = erts_ptab_atmc_add_test_refc(&p->common, add_refc); if (!referred) { ASSERT(ERTS_PROC_IS_EXITING(p)); @@ -1042,7 +1042,7 @@ ERTS_GLB_INLINE void erts_proc_add_refc(Process *p, Sint add_refc) ERTS_GLB_INLINE Sint erts_proc_read_refc(Process *p) { - ASSERT(!(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY)); + ASSERT(!(erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY)); return erts_ptab_atmc_read_refc(&p->common); } @@ -1103,7 +1103,7 @@ ERTS_GLB_INLINE Process *erts_proc_lookup_raw(Eterm pid) { Process *proc; - ERTS_SMP_LC_ASSERT(erts_thr_progress_lc_is_delaying()); + ERTS_LC_ASSERT(erts_thr_progress_lc_is_delaying()); if (is_not_internal_pid(pid)) return NULL; diff --git a/erts/emulator/beam/erl_ptab.c b/erts/emulator/beam/erl_ptab.c index b3bcb3af3f..38c095fb4a 100644 --- a/erts/emulator/beam/erl_ptab.c +++ b/erts/emulator/beam/erl_ptab.c @@ -284,31 +284,31 @@ struct ErtsPTabListBifData_ { static ERTS_INLINE void last_data_init_nob(ErtsPTab *ptab, Uint64 val) { - erts_smp_atomic64_init_nob(&ptab->vola.tile.last_data, (erts_aint64_t) val); + erts_atomic64_init_nob(&ptab->vola.tile.last_data, (erts_aint64_t) val); } static ERTS_INLINE void last_data_set_relb(ErtsPTab *ptab, Uint64 val) { - erts_smp_atomic64_set_relb(&ptab->vola.tile.last_data, (erts_aint64_t) val); + erts_atomic64_set_relb(&ptab->vola.tile.last_data, (erts_aint64_t) val); } static ERTS_INLINE Uint64 last_data_read_nob(ErtsPTab *ptab) { - return (Uint64) erts_smp_atomic64_read_nob(&ptab->vola.tile.last_data); + return (Uint64) erts_atomic64_read_nob(&ptab->vola.tile.last_data); } static ERTS_INLINE Uint64 last_data_read_acqb(ErtsPTab *ptab) { - return (Uint64) erts_smp_atomic64_read_acqb(&ptab->vola.tile.last_data); + return (Uint64) erts_atomic64_read_acqb(&ptab->vola.tile.last_data); } static ERTS_INLINE Uint64 last_data_cmpxchg_relb(ErtsPTab *ptab, Uint64 new, Uint64 exp) { - return (Uint64) erts_smp_atomic64_cmpxchg_relb(&ptab->vola.tile.last_data, + return (Uint64) erts_atomic64_cmpxchg_relb(&ptab->vola.tile.last_data, (erts_aint64_t) new, (erts_aint64_t) exp); } @@ -346,9 +346,9 @@ ix_to_free_id_data_ix(ErtsPTab *ptab, Uint32 ix) UWord erts_ptab_mem_size(ErtsPTab *ptab) { - UWord size = ptab->r.o.max*sizeof(erts_smp_atomic_t); + UWord size = ptab->r.o.max*sizeof(erts_atomic_t); if (ptab->r.o.free_id_data) - size += ptab->r.o.max*sizeof(erts_smp_atomic32_t); + size += ptab->r.o.max*sizeof(erts_atomic32_t); return size; } @@ -367,14 +367,14 @@ erts_ptab_init_table(ErtsPTab *ptab, size_t tab_sz, alloc_sz; Uint32 bits, cl, cli, ix, ix_per_cache_line, tab_cache_lines; char *tab_end; - erts_smp_atomic_t *tab_entry; - erts_smp_rwmtx_opt_t rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; - rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; - rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED; + erts_atomic_t *tab_entry; + erts_rwmtx_opt_t rwmtx_opts = ERTS_RWMTX_OPT_DEFAULT_INITER; + rwmtx_opts.type = ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; + rwmtx_opts.lived = ERTS_RWMTX_LONG_LIVED; - erts_smp_rwmtx_init_opt(&ptab->list.data.rwmtx, &rwmtx_opts, name, NIL, + erts_rwmtx_init_opt(&ptab->list.data.rwmtx, &rwmtx_opts, name, NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); - erts_smp_atomic32_init_nob(&ptab->vola.tile.count, 0); + erts_atomic32_init_nob(&ptab->vola.tile.count, 0); last_data_init_nob(ptab, ~((Uint64) 0)); /* A size that is a power of 2 is to prefer performance wise */ @@ -388,20 +388,20 @@ erts_ptab_init_table(ErtsPTab *ptab, ptab->r.o.element_size = element_size; ptab->r.o.max = size; - tab_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_smp_atomic_t)); + tab_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_atomic_t)); alloc_sz = tab_sz; if (!legacy) - alloc_sz += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_smp_atomic32_t)); + alloc_sz += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_atomic32_t)); ptab->r.o.tab = erts_alloc_permanent_cache_aligned(atype, alloc_sz); tab_end = ((char *) ptab->r.o.tab) + tab_sz; tab_entry = ptab->r.o.tab; while (tab_end > ((char *) tab_entry)) { - erts_smp_atomic_init_nob(tab_entry, ERTS_AINT_NULL); + erts_atomic_init_nob(tab_entry, ERTS_AINT_NULL); tab_entry++; } tab_cache_lines = tab_sz/ERTS_CACHE_LINE_SIZE; - ix_per_cache_line = (ERTS_CACHE_LINE_SIZE/sizeof(erts_smp_atomic_t)); + ix_per_cache_line = (ERTS_CACHE_LINE_SIZE/sizeof(erts_atomic_t)); ASSERT((ptab->r.o.max & (ptab->r.o.max - 1)) == 0); /* power of 2 */ ASSERT((ix_per_cache_line & (ix_per_cache_line - 1)) == 0); /* power of 2 */ ASSERT((tab_cache_lines & (tab_cache_lines - 1)) == 0); /* power of 2 */ @@ -429,11 +429,11 @@ erts_ptab_init_table(ErtsPTab *ptab, } else { - tab_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_smp_atomic32_t)); - ptab->r.o.free_id_data = (erts_smp_atomic32_t *) tab_end; + tab_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE(size*sizeof(erts_atomic32_t)); + ptab->r.o.free_id_data = (erts_atomic32_t *) tab_end; tab_cache_lines = tab_sz/ERTS_CACHE_LINE_SIZE; - ix_per_cache_line = (ERTS_CACHE_LINE_SIZE/sizeof(erts_smp_atomic32_t)); + ix_per_cache_line = (ERTS_CACHE_LINE_SIZE/sizeof(erts_atomic32_t)); ptab->r.o.dix_cl_mask = tab_cache_lines-1; ptab->r.o.dix_cl_shift = erts_fit_in_bits_int32(ix_per_cache_line-1); @@ -448,19 +448,19 @@ erts_ptab_init_table(ErtsPTab *ptab, ix = 0; for (cl = 0; cl < tab_cache_lines; cl++) { for (cli = 0; cli < ix_per_cache_line; cli++) { - erts_smp_atomic32_init_nob(&ptab->r.o.free_id_data[ix], + erts_atomic32_init_nob(&ptab->r.o.free_id_data[ix], cli*tab_cache_lines+cl); - ASSERT(erts_smp_atomic32_read_nob(&ptab->r.o.free_id_data[ix]) != ptab->r.o.invalid_data); + ASSERT(erts_atomic32_read_nob(&ptab->r.o.free_id_data[ix]) != ptab->r.o.invalid_data); ix++; } } - erts_smp_atomic32_init_nob(&ptab->vola.tile.aid_ix, -1); - erts_smp_atomic32_init_nob(&ptab->vola.tile.fid_ix, -1); + erts_atomic32_init_nob(&ptab->vola.tile.aid_ix, -1); + erts_atomic32_init_nob(&ptab->vola.tile.fid_ix, -1); } - erts_smp_interval_init(&ptab->list.data.interval); + erts_interval_init(&ptab->list.data.interval); ptab->list.data.deleted.start = NULL; ptab->list.data.deleted.end = NULL; ptab->list.data.chunks = (((ptab->r.o.max - 1) @@ -480,9 +480,9 @@ erts_ptab_init_table(ErtsPTab *ptab, * have ERTS_PTAB_MAX_SIZE-1 valid elements in the table while * still having a table size of the power of 2. */ - erts_smp_atomic32_inc_nob(&ptab->vola.tile.count); + erts_atomic32_inc_nob(&ptab->vola.tile.count); pix = erts_ptab_data2pix(ptab, ptab->r.o.invalid_data); - erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], + erts_atomic_set_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab->r.o.invalid_element); } @@ -506,12 +506,12 @@ erts_ptab_new_element(ErtsPTab *ptab, erts_ptab_rlock(ptab); - count = erts_smp_atomic32_inc_read_acqb(&ptab->vola.tile.count); + count = erts_atomic32_inc_read_acqb(&ptab->vola.tile.count); if (count > ptab->r.o.max) { while (1) { erts_aint32_t act_count; - act_count = erts_smp_atomic32_cmpxchg_relb(&ptab->vola.tile.count, + act_count = erts_atomic32_cmpxchg_relb(&ptab->vola.tile.count, count-1, count); if (act_count == count) { @@ -525,14 +525,14 @@ erts_ptab_new_element(ErtsPTab *ptab, } ptab_el->u.alive.started_interval - = erts_smp_current_interval_nob(erts_ptab_interval(ptab)); + = erts_current_interval_nob(erts_ptab_interval(ptab)); if (ptab->r.o.free_id_data) { do { - ix = (Uint32) erts_smp_atomic32_inc_read_acqb(&ptab->vola.tile.aid_ix); + ix = (Uint32) erts_atomic32_inc_read_acqb(&ptab->vola.tile.aid_ix); ix = ix_to_free_id_data_ix(ptab, ix); - data = erts_smp_atomic32_xchg_nob(&ptab->r.o.free_id_data[ix], + data = erts_atomic32_xchg_nob(&ptab->r.o.free_id_data[ix], (erts_aint32_t)ptab->r.o.invalid_data); }while ((Eterm)data == ptab->r.o.invalid_data); @@ -546,10 +546,10 @@ erts_ptab_new_element(ErtsPTab *ptab, pix = erts_ptab_data2pix(ptab, (Eterm) data); #ifdef DEBUG - ASSERT(ERTS_AINT_NULL == erts_smp_atomic_xchg_relb(&ptab->r.o.tab[pix], + ASSERT(ERTS_AINT_NULL == erts_atomic_xchg_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el)); #else - erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el); + erts_atomic_set_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el); #endif erts_ptab_runlock(ptab); @@ -563,7 +563,7 @@ erts_ptab_new_element(ErtsPTab *ptab, restart: ptab_el->u.alive.started_interval - = erts_smp_current_interval_nob(erts_ptab_interval(ptab)); + = erts_current_interval_nob(erts_ptab_interval(ptab)); ld = last_data_read_acqb(ptab); @@ -571,10 +571,10 @@ erts_ptab_new_element(ErtsPTab *ptab, while (1) { ld++; pix = erts_ptab_data2pix(ptab, ERTS_PTAB_LastData2EtermData(ld)); - if (erts_smp_atomic_read_nob(&ptab->r.o.tab[pix]) + if (erts_atomic_read_nob(&ptab->r.o.tab[pix]) == ERTS_AINT_NULL) { erts_aint_t val; - val = erts_smp_atomic_cmpxchg_relb(&ptab->r.o.tab[pix], + val = erts_atomic_cmpxchg_relb(&ptab->r.o.tab[pix], invalid, ERTS_AINT_NULL); @@ -621,10 +621,10 @@ erts_ptab_new_element(ErtsPTab *ptab, /* Move into slot reserved */ #ifdef DEBUG - ASSERT(invalid == erts_smp_atomic_xchg_relb(&ptab->r.o.tab[pix], + ASSERT(invalid == erts_atomic_xchg_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el)); #else - erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el); + erts_atomic_set_relb(&ptab->r.o.tab[pix], (erts_aint_t) ptab_el); #endif if (rlocked) @@ -644,7 +644,7 @@ save_deleted_element(ErtsPTab *ptab, ErtsPTabElementCommon *ptab_el) sizeof(ErtsPTabDeletedElement)); ERTS_PTAB_LIST_ASSERT(ptab->list.data.deleted.start && ptab->list.data.deleted.end); - ERTS_SMP_LC_ASSERT(erts_smp_lc_ptab_is_rwlocked(ptab)); + ERTS_LC_ASSERT(erts_lc_ptab_is_rwlocked(ptab)); ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab); @@ -654,7 +654,7 @@ save_deleted_element(ErtsPTab *ptab, ErtsPTabElementCommon *ptab_el) ptdep->u.element.id = ptab_el->id; ptdep->u.element.inserted = ptab_el->u.alive.started_interval; ptdep->u.element.deleted = - erts_smp_current_interval_nob(erts_ptab_interval(ptab)); + erts_current_interval_nob(erts_ptab_interval(ptab)); ptab->list.data.deleted.end->next = ptdep; ptab->list.data.deleted.end = ptdep; @@ -678,7 +678,7 @@ erts_ptab_delete_element(ErtsPTab *ptab, pix = erts_ptab_id2pix(ptab, ptab_el->id); /* *Need* to be an managed thread */ - ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread()); + ERTS_LC_ASSERT(erts_thr_progress_is_managed_thread()); erts_ptab_rlock(ptab); maybe_save = ptab->list.data.deleted.end != NULL; @@ -687,7 +687,7 @@ erts_ptab_delete_element(ErtsPTab *ptab, erts_ptab_rwlock(ptab); } - erts_smp_atomic_set_relb(&ptab->r.o.tab[pix], ERTS_AINT_NULL); + erts_atomic_set_relb(&ptab->r.o.tab[pix], ERTS_AINT_NULL); if (ptab->r.o.free_id_data) { Uint32 prev_data; @@ -703,17 +703,17 @@ erts_ptab_delete_element(ErtsPTab *ptab, ASSERT(pix == erts_ptab_data2pix(ptab, data)); do { - ix = (Uint32) erts_smp_atomic32_inc_read_relb(&ptab->vola.tile.fid_ix); + ix = (Uint32) erts_atomic32_inc_read_relb(&ptab->vola.tile.fid_ix); ix = ix_to_free_id_data_ix(ptab, ix); - prev_data = erts_smp_atomic32_cmpxchg_nob(&ptab->r.o.free_id_data[ix], + prev_data = erts_atomic32_cmpxchg_nob(&ptab->r.o.free_id_data[ix], data, ptab->r.o.invalid_data); }while ((Eterm)prev_data != ptab->r.o.invalid_data); } - ASSERT(erts_smp_atomic32_read_nob(&ptab->vola.tile.count) > 0); - erts_smp_atomic32_dec_relb(&ptab->vola.tile.count); + ASSERT(erts_atomic32_read_nob(&ptab->vola.tile.count) > 0); + erts_atomic32_dec_relb(&ptab->vola.tile.count); if (!maybe_save) erts_ptab_runlock(ptab); @@ -927,7 +927,7 @@ ptab_list_bif_engine(Process *c_p, Eterm *res_accp, Binary *mbp) sizeof(ErtsPTabDeletedElement)); ptlbdp->bif_invocation->ix = -1; ptlbdp->bif_invocation->u.bif_invocation.interval - = erts_smp_step_interval_nob(erts_ptab_interval(ptab)); + = erts_step_interval_nob(erts_ptab_interval(ptab)); ERTS_PTAB_LIST_DBG_CHK_DEL_LIST(ptab); ptlbdp->bif_invocation->next = NULL; @@ -968,12 +968,12 @@ ptab_list_bif_engine(Process *c_p, Eterm *res_accp, Binary *mbp) locked = 1; } - ERTS_SMP_LC_ASSERT(erts_smp_lc_ptab_is_rwlocked(ptab)); + ERTS_LC_ASSERT(erts_lc_ptab_is_rwlocked(ptab)); ERTS_PTAB_LIST_DBG_TRACE(p->common.id, insp_table); if (cix != 0) ptlbdp->chunk[cix].interval - = erts_smp_step_interval_nob(erts_ptab_interval(ptab)); + = erts_step_interval_nob(erts_ptab_interval(ptab)); else if (ptlbdp->bif_invocation) ptlbdp->chunk[0].interval = *invocation_interval_p; /* else: interval is irrelevant */ @@ -1331,18 +1331,18 @@ static void assert_ptab_consistency(ErtsPTab *ptab) int null_slots = 0; for (ix=0; ix < ptab->r.o.max; ix++) { - if (erts_smp_atomic32_read_nob(&ptab->r.o.free_id_data[ix]) != ptab->r.o.invalid_data) { + if (erts_atomic32_read_nob(&ptab->r.o.free_id_data[ix]) != ptab->r.o.invalid_data) { ++free_pids; - data = erts_smp_atomic32_read_nob(&ptab->r.o.free_id_data[ix]); + data = erts_atomic32_read_nob(&ptab->r.o.free_id_data[ix]); pix = erts_ptab_data2pix(ptab, (Eterm) data); ASSERT(erts_ptab_pix2intptr_nob(ptab, pix) == ERTS_AINT_NULL); } - if (erts_smp_atomic_read_nob(&ptab->r.o.tab[ix]) == ERTS_AINT_NULL) { + if (erts_atomic_read_nob(&ptab->r.o.tab[ix]) == ERTS_AINT_NULL) { ++null_slots; } } ASSERT(free_pids == null_slots); - ASSERT(free_pids == ptab->r.o.max - erts_smp_atomic32_read_nob(&ptab->vola.tile.count)); + ASSERT(free_pids == ptab->r.o.max - erts_atomic32_read_nob(&ptab->vola.tile.count)); } #endif } @@ -1366,7 +1366,7 @@ erts_ptab_test_next_id(ErtsPTab *ptab, int set, Uint next) Uint32 i, max_ix, num, stop_id_ix; max_ix = ptab->r.o.max - 1; num = next; - id_ix = (Uint32) erts_smp_atomic32_read_nob(&ptab->vola.tile.aid_ix); + id_ix = (Uint32) erts_atomic32_read_nob(&ptab->vola.tile.aid_ix); for (i=0; i <= max_ix; ++i) { Uint32 pix; @@ -1380,26 +1380,26 @@ erts_ptab_test_next_id(ErtsPTab *ptab, int set, Uint next) if (ERTS_AINT_NULL == erts_ptab_pix2intptr_nob(ptab, pix)) { ++id_ix; dix = ix_to_free_id_data_ix(ptab, id_ix); - erts_smp_atomic32_set_nob(&ptab->r.o.free_id_data[dix], num); + erts_atomic32_set_nob(&ptab->r.o.free_id_data[dix], num); ASSERT(pix == erts_ptab_data2pix(ptab, num)); } } - erts_smp_atomic32_set_nob(&ptab->vola.tile.fid_ix, id_ix); + erts_atomic32_set_nob(&ptab->vola.tile.fid_ix, id_ix); /* Write invalid_data in rest of free_id_data[]: */ - stop_id_ix = (1 + erts_smp_atomic32_read_nob(&ptab->vola.tile.aid_ix)) & max_ix; + stop_id_ix = (1 + erts_atomic32_read_nob(&ptab->vola.tile.aid_ix)) & max_ix; while (1) { id_ix = (id_ix+1) & max_ix; if (id_ix == stop_id_ix) break; dix = ix_to_free_id_data_ix(ptab, id_ix); - erts_smp_atomic32_set_nob(&ptab->r.o.free_id_data[dix], + erts_atomic32_set_nob(&ptab->r.o.free_id_data[dix], ptab->r.o.invalid_data); } } - id_ix = (Uint32) erts_smp_atomic32_read_nob(&ptab->vola.tile.aid_ix) + 1; + id_ix = (Uint32) erts_atomic32_read_nob(&ptab->vola.tile.aid_ix) + 1; dix = ix_to_free_id_data_ix(ptab, id_ix); - res = (Sint) erts_smp_atomic32_read_nob(&ptab->r.o.free_id_data[dix]); + res = (Sint) erts_atomic32_read_nob(&ptab->r.o.free_id_data[dix]); } else { /* Deprecated legacy algorithm... */ @@ -1616,11 +1616,11 @@ debug_ptab_list_verify_all_pids(ErtsPTabListBifData *ptlbdp) static void debug_ptab_list_check_del_list(ErtsPTab *ptab) { - ERTS_SMP_LC_ASSERT(erts_smp_lc_ptab_is_rwlocked(ptab)); + ERTS_LC_ASSERT(erts_lc_ptab_is_rwlocked(ptab)); if (!ptab->list.data.deleted.start) ERTS_PTAB_LIST_ASSERT(!ptab->list.data.deleted.end); else { - Uint64 curr_interval = erts_smp_current_interval_nob(erts_ptab_interval(ptab)); + Uint64 curr_interval = erts_current_interval_nob(erts_ptab_interval(ptab)); Uint64 *prev_x_interval_p = NULL; ErtsPTabDeletedElement *ptdep; diff --git a/erts/emulator/beam/erl_ptab.h b/erts/emulator/beam/erl_ptab.h index 285d325cc4..4858cc8ab8 100644 --- a/erts/emulator/beam/erl_ptab.h +++ b/erts/emulator/beam/erl_ptab.h @@ -60,7 +60,7 @@ typedef struct { } refc; ErtsTracer tracer; Uint trace_flags; - erts_smp_atomic_t timer; + erts_atomic_t timer; union { /* --- While being alive --- */ struct { @@ -78,7 +78,7 @@ typedef struct { typedef struct ErtsPTabDeletedElement_ ErtsPTabDeletedElement; typedef struct { - erts_smp_rwmtx_t rwmtx; + erts_rwmtx_t rwmtx; erts_interval_t interval; struct { ErtsPTabDeletedElement *start; @@ -88,15 +88,15 @@ typedef struct { } ErtsPTabListData; typedef struct { - erts_smp_atomic64_t last_data; - erts_smp_atomic32_t count; - erts_smp_atomic32_t aid_ix; - erts_smp_atomic32_t fid_ix; + erts_atomic64_t last_data; + erts_atomic32_t count; + erts_atomic32_t aid_ix; + erts_atomic32_t fid_ix; } ErtsPTabVolatileData; typedef struct { - erts_smp_atomic_t *tab; - erts_smp_atomic32_t *free_id_data; + erts_atomic_t *tab; + erts_atomic32_t *free_id_data; Uint32 max; Uint32 pix_mask; Uint32 pix_cl_mask; @@ -223,8 +223,8 @@ ERTS_GLB_INLINE void erts_ptab_runlock(ErtsPTab *ptab); ERTS_GLB_INLINE void erts_ptab_rwlock(ErtsPTab *ptab); ERTS_GLB_INLINE int erts_ptab_tryrwlock(ErtsPTab *ptab); ERTS_GLB_INLINE void erts_ptab_rwunlock(ErtsPTab *ptab); -ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rlocked(ErtsPTab *ptab); -ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rwlocked(ErtsPTab *ptab); +ERTS_GLB_INLINE int erts_lc_ptab_is_rlocked(ErtsPTab *ptab); +ERTS_GLB_INLINE int erts_lc_ptab_is_rwlocked(ErtsPTab *ptab); #if ERTS_GLB_INLINE_INCL_FUNC_DEF @@ -245,7 +245,7 @@ ERTS_GLB_INLINE int erts_ptab_count(ErtsPTab *ptab) { int max = ptab->r.o.max; - erts_aint32_t res = erts_smp_atomic32_read_nob(&ptab->vola.tile.count); + erts_aint32_t res = erts_atomic32_read_nob(&ptab->vola.tile.count); if (max == ERTS_PTAB_MAX_SIZE) { max--; res--; @@ -352,25 +352,25 @@ erts_ptab_id2data(ErtsPTab *ptab, Eterm id) ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_nob(ErtsPTab *ptab, int ix) { ASSERT(0 <= ix && ix < ptab->r.o.max); - return erts_smp_atomic_read_nob(&ptab->r.o.tab[ix]); + return erts_atomic_read_nob(&ptab->r.o.tab[ix]); } ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_ddrb(ErtsPTab *ptab, int ix) { ASSERT(0 <= ix && ix < ptab->r.o.max); - return erts_smp_atomic_read_ddrb(&ptab->r.o.tab[ix]); + return erts_atomic_read_ddrb(&ptab->r.o.tab[ix]); } ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_rb(ErtsPTab *ptab, int ix) { ASSERT(0 <= ix && ix < ptab->r.o.max); - return erts_smp_atomic_read_rb(&ptab->r.o.tab[ix]); + return erts_atomic_read_rb(&ptab->r.o.tab[ix]); } ERTS_GLB_INLINE erts_aint_t erts_ptab_pix2intptr_acqb(ErtsPTab *ptab, int ix) { ASSERT(0 <= ix && ix < ptab->r.o.max); - return erts_smp_atomic_read_acqb(&ptab->r.o.tab[ix]); + return erts_atomic_read_acqb(&ptab->r.o.tab[ix]); } ERTS_GLB_INLINE void erts_ptab_atmc_inc_refc(ErtsPTabElementCommon *ptab_el) @@ -386,7 +386,7 @@ ERTS_GLB_INLINE void erts_ptab_atmc_inc_refc(ErtsPTabElementCommon *ptab_el) ERTS_GLB_INLINE Sint erts_ptab_atmc_dec_test_refc(ErtsPTabElementCommon *ptab_el) { erts_aint_t refc = erts_atomic_dec_read_relb(&ptab_el->refc.atmc); - ERTS_SMP_LC_ASSERT(refc >= 0); + ERTS_LC_ASSERT(refc >= 0); if (refc == 0) ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore); return (Sint) refc; @@ -397,7 +397,7 @@ ERTS_GLB_INLINE Sint erts_ptab_atmc_add_test_refc(ErtsPTabElementCommon *ptab_el { erts_aint_t refc = erts_atomic_add_read_mb(&ptab_el->refc.atmc, (erts_aint_t) add_refc); - ERTS_SMP_LC_ASSERT(refc >= 0); + ERTS_LC_ASSERT(refc >= 0); return (Sint) refc; } @@ -415,7 +415,7 @@ ERTS_GLB_INLINE void erts_ptab_inc_refc(ErtsPTabElementCommon *ptab_el) ERTS_GLB_INLINE Sint erts_ptab_dec_test_refc(ErtsPTabElementCommon *ptab_el) { Sint refc = --ptab_el->refc.sint; - ERTS_SMP_LC_ASSERT(refc >= 0); + ERTS_LC_ASSERT(refc >= 0); return refc; } @@ -423,7 +423,7 @@ ERTS_GLB_INLINE Sint erts_ptab_add_test_refc(ErtsPTabElementCommon *ptab_el, Sint add_refc) { ptab_el->refc.sint += add_refc; - ERTS_SMP_LC_ASSERT(ptab_el->refc.sint >= 0); + ERTS_LC_ASSERT(ptab_el->refc.sint >= 0); return (Sint) ptab_el->refc.sint; } @@ -434,42 +434,42 @@ ERTS_GLB_INLINE Sint erts_ptab_read_refc(ErtsPTabElementCommon *ptab_el) ERTS_GLB_INLINE void erts_ptab_rlock(ErtsPTab *ptab) { - erts_smp_rwmtx_rlock(&ptab->list.data.rwmtx); + erts_rwmtx_rlock(&ptab->list.data.rwmtx); } ERTS_GLB_INLINE int erts_ptab_tryrlock(ErtsPTab *ptab) { - return erts_smp_rwmtx_tryrlock(&ptab->list.data.rwmtx); + return erts_rwmtx_tryrlock(&ptab->list.data.rwmtx); } ERTS_GLB_INLINE void erts_ptab_runlock(ErtsPTab *ptab) { - erts_smp_rwmtx_runlock(&ptab->list.data.rwmtx); + erts_rwmtx_runlock(&ptab->list.data.rwmtx); } ERTS_GLB_INLINE void erts_ptab_rwlock(ErtsPTab *ptab) { - erts_smp_rwmtx_rwlock(&ptab->list.data.rwmtx); + erts_rwmtx_rwlock(&ptab->list.data.rwmtx); } ERTS_GLB_INLINE int erts_ptab_tryrwlock(ErtsPTab *ptab) { - return erts_smp_rwmtx_tryrwlock(&ptab->list.data.rwmtx); + return erts_rwmtx_tryrwlock(&ptab->list.data.rwmtx); } ERTS_GLB_INLINE void erts_ptab_rwunlock(ErtsPTab *ptab) { - erts_smp_rwmtx_rwunlock(&ptab->list.data.rwmtx); + erts_rwmtx_rwunlock(&ptab->list.data.rwmtx); } -ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rlocked(ErtsPTab *ptab) +ERTS_GLB_INLINE int erts_lc_ptab_is_rlocked(ErtsPTab *ptab) { - return erts_smp_lc_rwmtx_is_rlocked(&ptab->list.data.rwmtx); + return erts_lc_rwmtx_is_rlocked(&ptab->list.data.rwmtx); } -ERTS_GLB_INLINE int erts_smp_lc_ptab_is_rwlocked(ErtsPTab *ptab) +ERTS_GLB_INLINE int erts_lc_ptab_is_rwlocked(ErtsPTab *ptab) { - return erts_smp_lc_rwmtx_is_rwlocked(&ptab->list.data.rwmtx); + return erts_lc_rwmtx_is_rwlocked(&ptab->list.data.rwmtx); } #endif diff --git a/erts/emulator/beam/erl_smp.h b/erts/emulator/beam/erl_smp.h deleted file mode 100644 index dabf8702c8..0000000000 --- a/erts/emulator/beam/erl_smp.h +++ /dev/null @@ -1,1063 +0,0 @@ -/* - * %CopyrightBegin% - * - * Copyright Ericsson AB 2005-2017. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * %CopyrightEnd% - */ -/* - * SMP interface to ethread library. - * This is essentially "sed s/erts_/erts_smp_/g < erl_threads.h > erl_smp.h", - * plus changes to NOP operations when ERTS_SMP is disabled. - * Author: Mikael Pettersson - */ -#ifndef ERL_SMP_H -#define ERL_SMP_H -#include "erl_threads.h" - -#ifdef ERTS_ENABLE_LOCK_POSITION -#define erts_smp_mtx_lock(L) erts_smp_mtx_lock_x(L, __FILE__, __LINE__) -#define erts_smp_mtx_trylock(L) erts_smp_mtx_trylock_x(L, __FILE__, __LINE__) -#define erts_smp_spin_lock(L) erts_smp_spin_lock_x(L, __FILE__, __LINE__) -#define erts_smp_rwmtx_tryrlock(L) erts_smp_rwmtx_tryrlock_x(L, __FILE__, __LINE__) -#define erts_smp_rwmtx_rlock(L) erts_smp_rwmtx_rlock_x(L, __FILE__, __LINE__) -#define erts_smp_rwmtx_tryrwlock(L) erts_smp_rwmtx_tryrwlock_x(L, __FILE__, __LINE__) -#define erts_smp_rwmtx_rwlock(L) erts_smp_rwmtx_rwlock_x(L, __FILE__, __LINE__) -#define erts_smp_read_lock(L) erts_smp_read_lock_x(L, __FILE__, __LINE__) -#define erts_smp_write_lock(L) erts_smp_write_lock_x(L, __FILE__, __LINE__) -#endif - - -#define ERTS_SMP_THR_OPTS_DEFAULT_INITER ERTS_THR_OPTS_DEFAULT_INITER -typedef erts_thr_opts_t erts_smp_thr_opts_t; -typedef erts_thr_init_data_t erts_smp_thr_init_data_t; -typedef erts_tid_t erts_smp_tid_t; -typedef erts_mtx_t erts_smp_mtx_t; -typedef erts_cnd_t erts_smp_cnd_t; -#define ERTS_SMP_RWMTX_OPT_DEFAULT_INITER ERTS_RWMTX_OPT_DEFAULT_INITER -#define ERTS_SMP_RWMTX_TYPE_NORMAL ERTS_RWMTX_TYPE_NORMAL -#define ERTS_SMP_RWMTX_TYPE_FREQUENT_READ ERTS_RWMTX_TYPE_FREQUENT_READ -#define ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ \ - ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ -#define ERTS_SMP_RWMTX_LONG_LIVED ERTS_RWMTX_LONG_LIVED -#define ERTS_SMP_RWMTX_SHORT_LIVED ERTS_RWMTX_SHORT_LIVED -#define ERTS_SMP_RWMTX_UNKNOWN_LIVED ERTS_RWMTX_UNKNOWN_LIVED -typedef erts_rwmtx_opt_t erts_smp_rwmtx_opt_t; -typedef erts_rwmtx_t erts_smp_rwmtx_t; -typedef erts_tsd_key_t erts_smp_tsd_key_t; -#define erts_smp_dw_atomic_t erts_dw_atomic_t -#define erts_smp_atomic_t erts_atomic_t -#define erts_smp_atomic32_t erts_atomic32_t -#define erts_smp_atomic64_t erts_atomic64_t -typedef erts_spinlock_t erts_smp_spinlock_t; -typedef erts_rwlock_t erts_smp_rwlock_t; -void erts_thr_fatal_error(int, char *); /* implemented in erl_init.c */ - -#define ERTS_SMP_MEMORY_BARRIER ERTS_THR_MEMORY_BARRIER -#define ERTS_SMP_WRITE_MEMORY_BARRIER ERTS_THR_WRITE_MEMORY_BARRIER -#define ERTS_SMP_READ_MEMORY_BARRIER ERTS_THR_READ_MEMORY_BARRIER -#define ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER - - -ERTS_GLB_INLINE void erts_smp_thr_init(erts_smp_thr_init_data_t *id); -ERTS_GLB_INLINE void erts_smp_thr_create(erts_smp_tid_t *tid, - void * (*func)(void *), - void *arg, - erts_smp_thr_opts_t *opts); -ERTS_GLB_INLINE void erts_smp_thr_join(erts_smp_tid_t tid, void **thr_res); -ERTS_GLB_INLINE void erts_smp_thr_detach(erts_smp_tid_t tid); -ERTS_GLB_INLINE void erts_smp_thr_exit(void *res); -ERTS_GLB_INLINE void erts_smp_install_exit_handler(void (*exit_handler)(void)); -ERTS_GLB_INLINE erts_smp_tid_t erts_smp_thr_self(void); -ERTS_GLB_INLINE int erts_smp_equal_tids(erts_smp_tid_t x, erts_smp_tid_t y); -#ifdef ERTS_HAVE_REC_MTX_INIT -#define ERTS_SMP_HAVE_REC_MTX_INIT 1 -ERTS_GLB_INLINE void erts_smp_rec_mtx_init(erts_smp_mtx_t *mtx); -#endif -ERTS_GLB_INLINE void erts_smp_mtx_init(erts_smp_mtx_t *mtx, - char *name, - Eterm extra, - erts_lock_flags_t flags); -ERTS_GLB_INLINE void erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, - char *name, - Eterm extra, - erts_lock_flags_t flags); -ERTS_GLB_INLINE void erts_smp_mtx_destroy(erts_smp_mtx_t *mtx); -#ifdef ERTS_ENABLE_LOCK_POSITION -ERTS_GLB_INLINE int erts_smp_mtx_trylock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line); -ERTS_GLB_INLINE void erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line); -#else -ERTS_GLB_INLINE int erts_smp_mtx_trylock(erts_smp_mtx_t *mtx); -ERTS_GLB_INLINE void erts_smp_mtx_lock(erts_smp_mtx_t *mtx); -#endif -ERTS_GLB_INLINE void erts_smp_mtx_unlock(erts_smp_mtx_t *mtx); -ERTS_GLB_INLINE int erts_smp_lc_mtx_is_locked(erts_smp_mtx_t *mtx); -ERTS_GLB_INLINE void erts_smp_cnd_init(erts_smp_cnd_t *cnd); -ERTS_GLB_INLINE void erts_smp_cnd_destroy(erts_smp_cnd_t *cnd); -ERTS_GLB_INLINE void erts_smp_cnd_wait(erts_smp_cnd_t *cnd, - erts_smp_mtx_t *mtx); -ERTS_GLB_INLINE void erts_smp_cnd_signal(erts_smp_cnd_t *cnd); -ERTS_GLB_INLINE void erts_smp_cnd_broadcast(erts_smp_cnd_t *cnd); -ERTS_GLB_INLINE void erts_smp_rwmtx_set_reader_group(int no); -ERTS_GLB_INLINE void erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx, - erts_smp_rwmtx_opt_t *opt, - char *name, - Eterm extra, - erts_lock_flags_t flags); -ERTS_GLB_INLINE void erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx, - char *name, - Eterm extra, - erts_lock_flags_t flags); -ERTS_GLB_INLINE void erts_smp_rwmtx_destroy(erts_smp_rwmtx_t *rwmtx); -#ifdef ERTS_ENABLE_LOCK_POSITION -ERTS_GLB_INLINE int erts_smp_rwmtx_tryrlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line); -ERTS_GLB_INLINE void erts_smp_rwmtx_rlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line); -ERTS_GLB_INLINE void erts_smp_rwmtx_rwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line); -ERTS_GLB_INLINE int erts_smp_rwmtx_tryrwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line); -#else -ERTS_GLB_INLINE int erts_smp_rwmtx_tryrlock(erts_smp_rwmtx_t *rwmtx); -ERTS_GLB_INLINE void erts_smp_rwmtx_rlock(erts_smp_rwmtx_t *rwmtx); -ERTS_GLB_INLINE void erts_smp_rwmtx_rwlock(erts_smp_rwmtx_t *rwmtx); -ERTS_GLB_INLINE int erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx); -#endif -ERTS_GLB_INLINE void erts_smp_rwmtx_runlock(erts_smp_rwmtx_t *rwmtx); -ERTS_GLB_INLINE void erts_smp_rwmtx_rwunlock(erts_smp_rwmtx_t *rwmtx); -ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rlocked(erts_smp_rwmtx_t *mtx); -ERTS_GLB_INLINE int erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx); -ERTS_GLB_INLINE void erts_smp_spinlock_init(erts_smp_spinlock_t *lock, - char *name, - Eterm extra, - erts_lock_flags_t flags); -ERTS_GLB_INLINE void erts_smp_spinlock_destroy(erts_smp_spinlock_t *lock); -ERTS_GLB_INLINE void erts_smp_spin_unlock(erts_smp_spinlock_t *lock); -#ifdef ERTS_ENABLE_LOCK_POSITION -ERTS_GLB_INLINE void erts_smp_spin_lock_x(erts_smp_spinlock_t *lock, char *file, unsigned int line); -#else -ERTS_GLB_INLINE void erts_smp_spin_lock(erts_smp_spinlock_t *lock); -#endif -ERTS_GLB_INLINE int erts_smp_lc_spinlock_is_locked(erts_smp_spinlock_t *lock); -ERTS_GLB_INLINE void erts_smp_rwlock_init(erts_smp_rwlock_t *lock, - char *name, - Eterm extra, - erts_lock_flags_t flags); -ERTS_GLB_INLINE void erts_smp_rwlock_destroy(erts_smp_rwlock_t *lock); -ERTS_GLB_INLINE void erts_smp_read_unlock(erts_smp_rwlock_t *lock); -#ifdef ERTS_ENABLE_LOCK_POSITION -ERTS_GLB_INLINE void erts_smp_read_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line); -ERTS_GLB_INLINE void erts_smp_write_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line); -#else -ERTS_GLB_INLINE void erts_smp_read_lock(erts_smp_rwlock_t *lock); -ERTS_GLB_INLINE void erts_smp_write_lock(erts_smp_rwlock_t *lock); -#endif -ERTS_GLB_INLINE void erts_smp_write_unlock(erts_smp_rwlock_t *lock); -ERTS_GLB_INLINE int erts_smp_lc_rwlock_is_rlocked(erts_smp_rwlock_t *lock); -ERTS_GLB_INLINE int erts_smp_lc_rwlock_is_rwlocked(erts_smp_rwlock_t *lock); -ERTS_GLB_INLINE void erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp, - char *keyname); -ERTS_GLB_INLINE void erts_smp_tsd_key_delete(erts_smp_tsd_key_t key); -ERTS_GLB_INLINE void erts_smp_tsd_set(erts_smp_tsd_key_t key, void *value); -ERTS_GLB_INLINE void * erts_smp_tsd_get(erts_smp_tsd_key_t key); - -#ifdef ERTS_THR_HAVE_SIG_FUNCS -#define ERTS_SMP_THR_HAVE_SIG_FUNCS 1 -ERTS_GLB_INLINE void erts_smp_thr_sigmask(int how, - const sigset_t *set, - sigset_t *oset); -ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig); -#endif /* #ifdef ERTS_THR_HAVE_SIG_FUNCS */ - -/* - * See "Documentation of atomics and memory barriers" at the top - * of erl_threads.h for info on atomics. - */ - - -/* Double word size atomics */ - -#define erts_smp_dw_atomic_init_nob erts_dw_atomic_init_nob -#define erts_smp_dw_atomic_set_nob erts_dw_atomic_set_nob -#define erts_smp_dw_atomic_read_nob erts_dw_atomic_read_nob -#define erts_smp_dw_atomic_cmpxchg_nob erts_dw_atomic_cmpxchg_nob - -#define erts_smp_dw_atomic_init_mb erts_dw_atomic_init_mb -#define erts_smp_dw_atomic_set_mb erts_dw_atomic_set_mb -#define erts_smp_dw_atomic_read_mb erts_dw_atomic_read_mb -#define erts_smp_dw_atomic_cmpxchg_mb erts_dw_atomic_cmpxchg_mb - -#define erts_smp_dw_atomic_init_acqb erts_dw_atomic_init_acqb -#define erts_smp_dw_atomic_set_acqb erts_dw_atomic_set_acqb -#define erts_smp_dw_atomic_read_acqb erts_dw_atomic_read_acqb -#define erts_smp_dw_atomic_cmpxchg_acqb erts_dw_atomic_cmpxchg_acqb - -#define erts_smp_dw_atomic_init_relb erts_dw_atomic_init_relb -#define erts_smp_dw_atomic_set_relb erts_dw_atomic_set_relb -#define erts_smp_dw_atomic_read_relb erts_dw_atomic_read_relb -#define erts_smp_dw_atomic_cmpxchg_relb erts_dw_atomic_cmpxchg_relb - -#define erts_smp_dw_atomic_init_ddrb erts_dw_atomic_init_ddrb -#define erts_smp_dw_atomic_set_ddrb erts_dw_atomic_set_ddrb -#define erts_smp_dw_atomic_read_ddrb erts_dw_atomic_read_ddrb -#define erts_smp_dw_atomic_cmpxchg_ddrb erts_dw_atomic_cmpxchg_ddrb - -#define erts_smp_dw_atomic_init_rb erts_dw_atomic_init_rb -#define erts_smp_dw_atomic_set_rb erts_dw_atomic_set_rb -#define erts_smp_dw_atomic_read_rb erts_dw_atomic_read_rb -#define erts_smp_dw_atomic_cmpxchg_rb erts_dw_atomic_cmpxchg_rb - -#define erts_smp_dw_atomic_init_wb erts_dw_atomic_init_wb -#define erts_smp_dw_atomic_set_wb erts_dw_atomic_set_wb -#define erts_smp_dw_atomic_read_wb erts_dw_atomic_read_wb -#define erts_smp_dw_atomic_cmpxchg_wb erts_dw_atomic_cmpxchg_wb - -#define erts_smp_dw_atomic_set_dirty erts_dw_atomic_set_dirty -#define erts_smp_dw_atomic_read_dirty erts_dw_atomic_read_dirty - -/* Word size atomics */ - -#define erts_smp_atomic_init_nob erts_atomic_init_nob -#define erts_smp_atomic_set_nob erts_atomic_set_nob -#define erts_smp_atomic_read_nob erts_atomic_read_nob -#define erts_smp_atomic_inc_read_nob erts_atomic_inc_read_nob -#define erts_smp_atomic_dec_read_nob erts_atomic_dec_read_nob -#define erts_smp_atomic_inc_nob erts_atomic_inc_nob -#define erts_smp_atomic_dec_nob erts_atomic_dec_nob -#define erts_smp_atomic_add_read_nob erts_atomic_add_read_nob -#define erts_smp_atomic_add_nob erts_atomic_add_nob -#define erts_smp_atomic_read_bor_nob erts_atomic_read_bor_nob -#define erts_smp_atomic_read_band_nob erts_atomic_read_band_nob -#define erts_smp_atomic_xchg_nob erts_atomic_xchg_nob -#define erts_smp_atomic_cmpxchg_nob erts_atomic_cmpxchg_nob -#define erts_smp_atomic_read_bset_nob erts_atomic_read_bset_nob - -#define erts_smp_atomic_init_mb erts_atomic_init_mb -#define erts_smp_atomic_set_mb erts_atomic_set_mb -#define erts_smp_atomic_read_mb erts_atomic_read_mb -#define erts_smp_atomic_inc_read_mb erts_atomic_inc_read_mb -#define erts_smp_atomic_dec_read_mb erts_atomic_dec_read_mb -#define erts_smp_atomic_inc_mb erts_atomic_inc_mb -#define erts_smp_atomic_dec_mb erts_atomic_dec_mb -#define erts_smp_atomic_add_read_mb erts_atomic_add_read_mb -#define erts_smp_atomic_add_mb erts_atomic_add_mb -#define erts_smp_atomic_read_bor_mb erts_atomic_read_bor_mb -#define erts_smp_atomic_read_band_mb erts_atomic_read_band_mb -#define erts_smp_atomic_xchg_mb erts_atomic_xchg_mb -#define erts_smp_atomic_cmpxchg_mb erts_atomic_cmpxchg_mb -#define erts_smp_atomic_read_bset_mb erts_atomic_read_bset_mb - -#define erts_smp_atomic_init_acqb erts_atomic_init_acqb -#define erts_smp_atomic_set_acqb erts_atomic_set_acqb -#define erts_smp_atomic_read_acqb erts_atomic_read_acqb -#define erts_smp_atomic_inc_read_acqb erts_atomic_inc_read_acqb -#define erts_smp_atomic_dec_read_acqb erts_atomic_dec_read_acqb -#define erts_smp_atomic_inc_acqb erts_atomic_inc_acqb -#define erts_smp_atomic_dec_acqb erts_atomic_dec_acqb -#define erts_smp_atomic_add_read_acqb erts_atomic_add_read_acqb -#define erts_smp_atomic_add_acqb erts_atomic_add_acqb -#define erts_smp_atomic_read_bor_acqb erts_atomic_read_bor_acqb -#define erts_smp_atomic_read_band_acqb erts_atomic_read_band_acqb -#define erts_smp_atomic_xchg_acqb erts_atomic_xchg_acqb -#define erts_smp_atomic_cmpxchg_acqb erts_atomic_cmpxchg_acqb -#define erts_smp_atomic_read_bset_acqb erts_atomic_read_bset_acqb - -#define erts_smp_atomic_init_relb erts_atomic_init_relb -#define erts_smp_atomic_set_relb erts_atomic_set_relb -#define erts_smp_atomic_read_relb erts_atomic_read_relb -#define erts_smp_atomic_inc_read_relb erts_atomic_inc_read_relb -#define erts_smp_atomic_dec_read_relb erts_atomic_dec_read_relb -#define erts_smp_atomic_inc_relb erts_atomic_inc_relb -#define erts_smp_atomic_dec_relb erts_atomic_dec_relb -#define erts_smp_atomic_add_read_relb erts_atomic_add_read_relb -#define erts_smp_atomic_add_relb erts_atomic_add_relb -#define erts_smp_atomic_read_bor_relb erts_atomic_read_bor_relb -#define erts_smp_atomic_read_band_relb erts_atomic_read_band_relb -#define erts_smp_atomic_xchg_relb erts_atomic_xchg_relb -#define erts_smp_atomic_cmpxchg_relb erts_atomic_cmpxchg_relb -#define erts_smp_atomic_read_bset_relb erts_atomic_read_bset_relb - -#define erts_smp_atomic_init_ddrb erts_atomic_init_ddrb -#define erts_smp_atomic_set_ddrb erts_atomic_set_ddrb -#define erts_smp_atomic_read_ddrb erts_atomic_read_ddrb -#define erts_smp_atomic_inc_read_ddrb erts_atomic_inc_read_ddrb -#define erts_smp_atomic_dec_read_ddrb erts_atomic_dec_read_ddrb -#define erts_smp_atomic_inc_ddrb erts_atomic_inc_ddrb -#define erts_smp_atomic_dec_ddrb erts_atomic_dec_ddrb -#define erts_smp_atomic_add_read_ddrb erts_atomic_add_read_ddrb -#define erts_smp_atomic_add_ddrb erts_atomic_add_ddrb -#define erts_smp_atomic_read_bor_ddrb erts_atomic_read_bor_ddrb -#define erts_smp_atomic_read_band_ddrb erts_atomic_read_band_ddrb -#define erts_smp_atomic_xchg_ddrb erts_atomic_xchg_ddrb -#define erts_smp_atomic_cmpxchg_ddrb erts_atomic_cmpxchg_ddrb -#define erts_smp_atomic_read_bset_ddrb erts_atomic_read_bset_ddrb - -#define erts_smp_atomic_init_rb erts_atomic_init_rb -#define erts_smp_atomic_set_rb erts_atomic_set_rb -#define erts_smp_atomic_read_rb erts_atomic_read_rb -#define erts_smp_atomic_inc_read_rb erts_atomic_inc_read_rb -#define erts_smp_atomic_dec_read_rb erts_atomic_dec_read_rb -#define erts_smp_atomic_inc_rb erts_atomic_inc_rb -#define erts_smp_atomic_dec_rb erts_atomic_dec_rb -#define erts_smp_atomic_add_read_rb erts_atomic_add_read_rb -#define erts_smp_atomic_add_rb erts_atomic_add_rb -#define erts_smp_atomic_read_bor_rb erts_atomic_read_bor_rb -#define erts_smp_atomic_read_band_rb erts_atomic_read_band_rb -#define erts_smp_atomic_xchg_rb erts_atomic_xchg_rb -#define erts_smp_atomic_cmpxchg_rb erts_atomic_cmpxchg_rb -#define erts_smp_atomic_read_bset_rb erts_atomic_read_bset_rb - -#define erts_smp_atomic_init_wb erts_atomic_init_wb -#define erts_smp_atomic_set_wb erts_atomic_set_wb -#define erts_smp_atomic_read_wb erts_atomic_read_wb -#define erts_smp_atomic_inc_read_wb erts_atomic_inc_read_wb -#define erts_smp_atomic_dec_read_wb erts_atomic_dec_read_wb -#define erts_smp_atomic_inc_wb erts_atomic_inc_wb -#define erts_smp_atomic_dec_wb erts_atomic_dec_wb -#define erts_smp_atomic_add_read_wb erts_atomic_add_read_wb -#define erts_smp_atomic_add_wb erts_atomic_add_wb -#define erts_smp_atomic_read_bor_wb erts_atomic_read_bor_wb -#define erts_smp_atomic_read_band_wb erts_atomic_read_band_wb -#define erts_smp_atomic_xchg_wb erts_atomic_xchg_wb -#define erts_smp_atomic_cmpxchg_wb erts_atomic_cmpxchg_wb -#define erts_smp_atomic_read_bset_wb erts_atomic_read_bset_wb - -#define erts_smp_atomic_set_dirty erts_atomic_set_dirty -#define erts_smp_atomic_read_dirty erts_atomic_read_dirty - -/* 32-bit atomics */ - -#define erts_smp_atomic32_init_nob erts_atomic32_init_nob -#define erts_smp_atomic32_set_nob erts_atomic32_set_nob -#define erts_smp_atomic32_read_nob erts_atomic32_read_nob -#define erts_smp_atomic32_inc_read_nob erts_atomic32_inc_read_nob -#define erts_smp_atomic32_dec_read_nob erts_atomic32_dec_read_nob -#define erts_smp_atomic32_inc_nob erts_atomic32_inc_nob -#define erts_smp_atomic32_dec_nob erts_atomic32_dec_nob -#define erts_smp_atomic32_add_read_nob erts_atomic32_add_read_nob -#define erts_smp_atomic32_add_nob erts_atomic32_add_nob -#define erts_smp_atomic32_read_bor_nob erts_atomic32_read_bor_nob -#define erts_smp_atomic32_read_band_nob erts_atomic32_read_band_nob -#define erts_smp_atomic32_xchg_nob erts_atomic32_xchg_nob -#define erts_smp_atomic32_cmpxchg_nob erts_atomic32_cmpxchg_nob -#define erts_smp_atomic32_read_bset_nob erts_atomic32_read_bset_nob - -#define erts_smp_atomic32_init_mb erts_atomic32_init_mb -#define erts_smp_atomic32_set_mb erts_atomic32_set_mb -#define erts_smp_atomic32_read_mb erts_atomic32_read_mb -#define erts_smp_atomic32_inc_read_mb erts_atomic32_inc_read_mb -#define erts_smp_atomic32_dec_read_mb erts_atomic32_dec_read_mb -#define erts_smp_atomic32_inc_mb erts_atomic32_inc_mb -#define erts_smp_atomic32_dec_mb erts_atomic32_dec_mb -#define erts_smp_atomic32_add_read_mb erts_atomic32_add_read_mb -#define erts_smp_atomic32_add_mb erts_atomic32_add_mb -#define erts_smp_atomic32_read_bor_mb erts_atomic32_read_bor_mb -#define erts_smp_atomic32_read_band_mb erts_atomic32_read_band_mb -#define erts_smp_atomic32_xchg_mb erts_atomic32_xchg_mb -#define erts_smp_atomic32_cmpxchg_mb erts_atomic32_cmpxchg_mb -#define erts_smp_atomic32_read_bset_mb erts_atomic32_read_bset_mb - -#define erts_smp_atomic32_init_acqb erts_atomic32_init_acqb -#define erts_smp_atomic32_set_acqb erts_atomic32_set_acqb -#define erts_smp_atomic32_read_acqb erts_atomic32_read_acqb -#define erts_smp_atomic32_inc_read_acqb erts_atomic32_inc_read_acqb -#define erts_smp_atomic32_dec_read_acqb erts_atomic32_dec_read_acqb -#define erts_smp_atomic32_inc_acqb erts_atomic32_inc_acqb -#define erts_smp_atomic32_dec_acqb erts_atomic32_dec_acqb -#define erts_smp_atomic32_add_read_acqb erts_atomic32_add_read_acqb -#define erts_smp_atomic32_add_acqb erts_atomic32_add_acqb -#define erts_smp_atomic32_read_bor_acqb erts_atomic32_read_bor_acqb -#define erts_smp_atomic32_read_band_acqb erts_atomic32_read_band_acqb -#define erts_smp_atomic32_xchg_acqb erts_atomic32_xchg_acqb -#define erts_smp_atomic32_cmpxchg_acqb erts_atomic32_cmpxchg_acqb -#define erts_smp_atomic32_read_bset_acqb erts_atomic32_read_bset_acqb - -#define erts_smp_atomic32_init_relb erts_atomic32_init_relb -#define erts_smp_atomic32_set_relb erts_atomic32_set_relb -#define erts_smp_atomic32_read_relb erts_atomic32_read_relb -#define erts_smp_atomic32_inc_read_relb erts_atomic32_inc_read_relb -#define erts_smp_atomic32_dec_read_relb erts_atomic32_dec_read_relb -#define erts_smp_atomic32_inc_relb erts_atomic32_inc_relb -#define erts_smp_atomic32_dec_relb erts_atomic32_dec_relb -#define erts_smp_atomic32_add_read_relb erts_atomic32_add_read_relb -#define erts_smp_atomic32_add_relb erts_atomic32_add_relb -#define erts_smp_atomic32_read_bor_relb erts_atomic32_read_bor_relb -#define erts_smp_atomic32_read_band_relb erts_atomic32_read_band_relb -#define erts_smp_atomic32_xchg_relb erts_atomic32_xchg_relb -#define erts_smp_atomic32_cmpxchg_relb erts_atomic32_cmpxchg_relb -#define erts_smp_atomic32_read_bset_relb erts_atomic32_read_bset_relb - -#define erts_smp_atomic32_init_ddrb erts_atomic32_init_ddrb -#define erts_smp_atomic32_set_ddrb erts_atomic32_set_ddrb -#define erts_smp_atomic32_read_ddrb erts_atomic32_read_ddrb -#define erts_smp_atomic32_inc_read_ddrb erts_atomic32_inc_read_ddrb -#define erts_smp_atomic32_dec_read_ddrb erts_atomic32_dec_read_ddrb -#define erts_smp_atomic32_inc_ddrb erts_atomic32_inc_ddrb -#define erts_smp_atomic32_dec_ddrb erts_atomic32_dec_ddrb -#define erts_smp_atomic32_add_read_ddrb erts_atomic32_add_read_ddrb -#define erts_smp_atomic32_add_ddrb erts_atomic32_add_ddrb -#define erts_smp_atomic32_read_bor_ddrb erts_atomic32_read_bor_ddrb -#define erts_smp_atomic32_read_band_ddrb erts_atomic32_read_band_ddrb -#define erts_smp_atomic32_xchg_ddrb erts_atomic32_xchg_ddrb -#define erts_smp_atomic32_cmpxchg_ddrb erts_atomic32_cmpxchg_ddrb -#define erts_smp_atomic32_read_bset_ddrb erts_atomic32_read_bset_ddrb - -#define erts_smp_atomic32_init_rb erts_atomic32_init_rb -#define erts_smp_atomic32_set_rb erts_atomic32_set_rb -#define erts_smp_atomic32_read_rb erts_atomic32_read_rb -#define erts_smp_atomic32_inc_read_rb erts_atomic32_inc_read_rb -#define erts_smp_atomic32_dec_read_rb erts_atomic32_dec_read_rb -#define erts_smp_atomic32_inc_rb erts_atomic32_inc_rb -#define erts_smp_atomic32_dec_rb erts_atomic32_dec_rb -#define erts_smp_atomic32_add_read_rb erts_atomic32_add_read_rb -#define erts_smp_atomic32_add_rb erts_atomic32_add_rb -#define erts_smp_atomic32_read_bor_rb erts_atomic32_read_bor_rb -#define erts_smp_atomic32_read_band_rb erts_atomic32_read_band_rb -#define erts_smp_atomic32_xchg_rb erts_atomic32_xchg_rb -#define erts_smp_atomic32_cmpxchg_rb erts_atomic32_cmpxchg_rb -#define erts_smp_atomic32_read_bset_rb erts_atomic32_read_bset_rb - -#define erts_smp_atomic32_init_wb erts_atomic32_init_wb -#define erts_smp_atomic32_set_wb erts_atomic32_set_wb -#define erts_smp_atomic32_read_wb erts_atomic32_read_wb -#define erts_smp_atomic32_inc_read_wb erts_atomic32_inc_read_wb -#define erts_smp_atomic32_dec_read_wb erts_atomic32_dec_read_wb -#define erts_smp_atomic32_inc_wb erts_atomic32_inc_wb -#define erts_smp_atomic32_dec_wb erts_atomic32_dec_wb -#define erts_smp_atomic32_add_read_wb erts_atomic32_add_read_wb -#define erts_smp_atomic32_add_wb erts_atomic32_add_wb -#define erts_smp_atomic32_read_bor_wb erts_atomic32_read_bor_wb -#define erts_smp_atomic32_read_band_wb erts_atomic32_read_band_wb -#define erts_smp_atomic32_xchg_wb erts_atomic32_xchg_wb -#define erts_smp_atomic32_cmpxchg_wb erts_atomic32_cmpxchg_wb -#define erts_smp_atomic32_read_bset_wb erts_atomic32_read_bset_wb - -#define erts_smp_atomic32_set_dirty erts_atomic32_set_dirty -#define erts_smp_atomic32_read_dirty erts_atomic32_read_dirty - -/* 64-bit atomics */ - -#define erts_smp_atomic64_init_nob erts_atomic64_init_nob -#define erts_smp_atomic64_set_nob erts_atomic64_set_nob -#define erts_smp_atomic64_read_nob erts_atomic64_read_nob -#define erts_smp_atomic64_inc_read_nob erts_atomic64_inc_read_nob -#define erts_smp_atomic64_dec_read_nob erts_atomic64_dec_read_nob -#define erts_smp_atomic64_inc_nob erts_atomic64_inc_nob -#define erts_smp_atomic64_dec_nob erts_atomic64_dec_nob -#define erts_smp_atomic64_add_read_nob erts_atomic64_add_read_nob -#define erts_smp_atomic64_add_nob erts_atomic64_add_nob -#define erts_smp_atomic64_read_bor_nob erts_atomic64_read_bor_nob -#define erts_smp_atomic64_read_band_nob erts_atomic64_read_band_nob -#define erts_smp_atomic64_xchg_nob erts_atomic64_xchg_nob -#define erts_smp_atomic64_cmpxchg_nob erts_atomic64_cmpxchg_nob -#define erts_smp_atomic64_read_bset_nob erts_atomic64_read_bset_nob - -#define erts_smp_atomic64_init_mb erts_atomic64_init_mb -#define erts_smp_atomic64_set_mb erts_atomic64_set_mb -#define erts_smp_atomic64_read_mb erts_atomic64_read_mb -#define erts_smp_atomic64_inc_read_mb erts_atomic64_inc_read_mb -#define erts_smp_atomic64_dec_read_mb erts_atomic64_dec_read_mb -#define erts_smp_atomic64_inc_mb erts_atomic64_inc_mb -#define erts_smp_atomic64_dec_mb erts_atomic64_dec_mb -#define erts_smp_atomic64_add_read_mb erts_atomic64_add_read_mb -#define erts_smp_atomic64_add_mb erts_atomic64_add_mb -#define erts_smp_atomic64_read_bor_mb erts_atomic64_read_bor_mb -#define erts_smp_atomic64_read_band_mb erts_atomic64_read_band_mb -#define erts_smp_atomic64_xchg_mb erts_atomic64_xchg_mb -#define erts_smp_atomic64_cmpxchg_mb erts_atomic64_cmpxchg_mb -#define erts_smp_atomic64_read_bset_mb erts_atomic64_read_bset_mb - -#define erts_smp_atomic64_init_acqb erts_atomic64_init_acqb -#define erts_smp_atomic64_set_acqb erts_atomic64_set_acqb -#define erts_smp_atomic64_read_acqb erts_atomic64_read_acqb -#define erts_smp_atomic64_inc_read_acqb erts_atomic64_inc_read_acqb -#define erts_smp_atomic64_dec_read_acqb erts_atomic64_dec_read_acqb -#define erts_smp_atomic64_inc_acqb erts_atomic64_inc_acqb -#define erts_smp_atomic64_dec_acqb erts_atomic64_dec_acqb -#define erts_smp_atomic64_add_read_acqb erts_atomic64_add_read_acqb -#define erts_smp_atomic64_add_acqb erts_atomic64_add_acqb -#define erts_smp_atomic64_read_bor_acqb erts_atomic64_read_bor_acqb -#define erts_smp_atomic64_read_band_acqb erts_atomic64_read_band_acqb -#define erts_smp_atomic64_xchg_acqb erts_atomic64_xchg_acqb -#define erts_smp_atomic64_cmpxchg_acqb erts_atomic64_cmpxchg_acqb -#define erts_smp_atomic64_read_bset_acqb erts_atomic64_read_bset_acqb - -#define erts_smp_atomic64_init_relb erts_atomic64_init_relb -#define erts_smp_atomic64_set_relb erts_atomic64_set_relb -#define erts_smp_atomic64_read_relb erts_atomic64_read_relb -#define erts_smp_atomic64_inc_read_relb erts_atomic64_inc_read_relb -#define erts_smp_atomic64_dec_read_relb erts_atomic64_dec_read_relb -#define erts_smp_atomic64_inc_relb erts_atomic64_inc_relb -#define erts_smp_atomic64_dec_relb erts_atomic64_dec_relb -#define erts_smp_atomic64_add_read_relb erts_atomic64_add_read_relb -#define erts_smp_atomic64_add_relb erts_atomic64_add_relb -#define erts_smp_atomic64_read_bor_relb erts_atomic64_read_bor_relb -#define erts_smp_atomic64_read_band_relb erts_atomic64_read_band_relb -#define erts_smp_atomic64_xchg_relb erts_atomic64_xchg_relb -#define erts_smp_atomic64_cmpxchg_relb erts_atomic64_cmpxchg_relb -#define erts_smp_atomic64_read_bset_relb erts_atomic64_read_bset_relb - -#define erts_smp_atomic64_init_ddrb erts_atomic64_init_ddrb -#define erts_smp_atomic64_set_ddrb erts_atomic64_set_ddrb -#define erts_smp_atomic64_read_ddrb erts_atomic64_read_ddrb -#define erts_smp_atomic64_inc_read_ddrb erts_atomic64_inc_read_ddrb -#define erts_smp_atomic64_dec_read_ddrb erts_atomic64_dec_read_ddrb -#define erts_smp_atomic64_inc_ddrb erts_atomic64_inc_ddrb -#define erts_smp_atomic64_dec_ddrb erts_atomic64_dec_ddrb -#define erts_smp_atomic64_add_read_ddrb erts_atomic64_add_read_ddrb -#define erts_smp_atomic64_add_ddrb erts_atomic64_add_ddrb -#define erts_smp_atomic64_read_bor_ddrb erts_atomic64_read_bor_ddrb -#define erts_smp_atomic64_read_band_ddrb erts_atomic64_read_band_ddrb -#define erts_smp_atomic64_xchg_ddrb erts_atomic64_xchg_ddrb -#define erts_smp_atomic64_cmpxchg_ddrb erts_atomic64_cmpxchg_ddrb -#define erts_smp_atomic64_read_bset_ddrb erts_atomic64_read_bset_ddrb - -#define erts_smp_atomic64_init_rb erts_atomic64_init_rb -#define erts_smp_atomic64_set_rb erts_atomic64_set_rb -#define erts_smp_atomic64_read_rb erts_atomic64_read_rb -#define erts_smp_atomic64_inc_read_rb erts_atomic64_inc_read_rb -#define erts_smp_atomic64_dec_read_rb erts_atomic64_dec_read_rb -#define erts_smp_atomic64_inc_rb erts_atomic64_inc_rb -#define erts_smp_atomic64_dec_rb erts_atomic64_dec_rb -#define erts_smp_atomic64_add_read_rb erts_atomic64_add_read_rb -#define erts_smp_atomic64_add_rb erts_atomic64_add_rb -#define erts_smp_atomic64_read_bor_rb erts_atomic64_read_bor_rb -#define erts_smp_atomic64_read_band_rb erts_atomic64_read_band_rb -#define erts_smp_atomic64_xchg_rb erts_atomic64_xchg_rb -#define erts_smp_atomic64_cmpxchg_rb erts_atomic64_cmpxchg_rb -#define erts_smp_atomic64_read_bset_rb erts_atomic64_read_bset_rb - -#define erts_smp_atomic64_init_wb erts_atomic64_init_wb -#define erts_smp_atomic64_set_wb erts_atomic64_set_wb -#define erts_smp_atomic64_read_wb erts_atomic64_read_wb -#define erts_smp_atomic64_inc_read_wb erts_atomic64_inc_read_wb -#define erts_smp_atomic64_dec_read_wb erts_atomic64_dec_read_wb -#define erts_smp_atomic64_inc_wb erts_atomic64_inc_wb -#define erts_smp_atomic64_dec_wb erts_atomic64_dec_wb -#define erts_smp_atomic64_add_read_wb erts_atomic64_add_read_wb -#define erts_smp_atomic64_add_wb erts_atomic64_add_wb -#define erts_smp_atomic64_read_bor_wb erts_atomic64_read_bor_wb -#define erts_smp_atomic64_read_band_wb erts_atomic64_read_band_wb -#define erts_smp_atomic64_xchg_wb erts_atomic64_xchg_wb -#define erts_smp_atomic64_cmpxchg_wb erts_atomic64_cmpxchg_wb -#define erts_smp_atomic64_read_bset_wb erts_atomic64_read_bset_wb - -#define erts_smp_atomic64_set_dirty erts_atomic64_set_dirty -#define erts_smp_atomic64_read_dirty erts_atomic64_read_dirty - - -#if ERTS_GLB_INLINE_INCL_FUNC_DEF - -ERTS_GLB_INLINE void -erts_smp_thr_init(erts_smp_thr_init_data_t *id) -{ - erts_thr_init(id); -} - -ERTS_GLB_INLINE void -erts_smp_thr_create(erts_smp_tid_t *tid, void * (*func)(void *), void *arg, - erts_smp_thr_opts_t *opts) -{ - erts_thr_create(tid, func, arg, opts); -} - -ERTS_GLB_INLINE void -erts_smp_thr_join(erts_smp_tid_t tid, void **thr_res) -{ - erts_thr_join(tid, thr_res); -} - - -ERTS_GLB_INLINE void -erts_smp_thr_detach(erts_smp_tid_t tid) -{ - erts_thr_detach(tid); -} - - -ERTS_GLB_INLINE void -erts_smp_thr_exit(void *res) -{ - erts_thr_exit(res); -} - -ERTS_GLB_INLINE void -erts_smp_install_exit_handler(void (*exit_handler)(void)) -{ - erts_thr_install_exit_handler(exit_handler); -} - -ERTS_GLB_INLINE erts_smp_tid_t -erts_smp_thr_self(void) -{ - return erts_thr_self(); -} - - -ERTS_GLB_INLINE int -erts_smp_equal_tids(erts_smp_tid_t x, erts_smp_tid_t y) -{ - return erts_equal_tids(x, y); -} - - -#ifdef ERTS_HAVE_REC_MTX_INIT -ERTS_GLB_INLINE void -erts_smp_rec_mtx_init(erts_smp_mtx_t *mtx) -{ - erts_rec_mtx_init(mtx); -} -#endif - -ERTS_GLB_INLINE void -erts_smp_mtx_init(erts_smp_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags) -{ - erts_mtx_init(mtx, name, extra, flags); -} - -ERTS_GLB_INLINE void -erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags) -{ - erts_mtx_init_locked(mtx, name, extra, flags); -} - -ERTS_GLB_INLINE void -erts_smp_mtx_destroy(erts_smp_mtx_t *mtx) -{ - erts_mtx_destroy(mtx); -} - -ERTS_GLB_INLINE int -#ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_mtx_trylock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line) -#else -erts_smp_mtx_trylock(erts_smp_mtx_t *mtx) -#endif -{ -#if defined(ERTS_ENABLE_LOCK_POSITION) - return erts_mtx_trylock_x(mtx,file,line); -#else - return erts_mtx_trylock(mtx); -#endif - -} - - -ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_mtx_lock_x(erts_smp_mtx_t *mtx, char *file, unsigned int line) -#else -erts_smp_mtx_lock(erts_smp_mtx_t *mtx) -#endif -{ -#if defined(ERTS_ENABLE_LOCK_POSITION) - erts_mtx_lock_x(mtx, file, line); -#else - erts_mtx_lock(mtx); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_mtx_unlock(erts_smp_mtx_t *mtx) -{ - erts_mtx_unlock(mtx); -} - -ERTS_GLB_INLINE int -erts_smp_lc_mtx_is_locked(erts_smp_mtx_t *mtx) -{ -#if defined(ERTS_ENABLE_LOCK_CHECK) - return erts_lc_mtx_is_locked(mtx); -#else - return 0; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_cnd_init(erts_smp_cnd_t *cnd) -{ - erts_cnd_init(cnd); -} - -ERTS_GLB_INLINE void -erts_smp_cnd_destroy(erts_smp_cnd_t *cnd) -{ - erts_cnd_destroy(cnd); -} - -ERTS_GLB_INLINE void -erts_smp_cnd_wait(erts_smp_cnd_t *cnd, erts_smp_mtx_t *mtx) -{ - erts_cnd_wait(cnd, mtx); -} - -/* - * IMPORTANT note about erts_smp_cnd_signal() and erts_smp_cnd_broadcast() - * - * POSIX allow a call to `pthread_cond_signal' or `pthread_cond_broadcast' - * even though the associated mutex/mutexes isn't/aren't locked by the - * caller. Our implementation do not allow that in order to avoid a - * performance penalty. That is, all associated mutexes *need* to be - * locked by the caller of erts_smp_cnd_signal()/erts_smp_cnd_broadcast()! - */ - -ERTS_GLB_INLINE void -erts_smp_cnd_signal(erts_smp_cnd_t *cnd) -{ - erts_cnd_signal(cnd); -} - - -ERTS_GLB_INLINE void -erts_smp_cnd_broadcast(erts_smp_cnd_t *cnd) -{ - erts_cnd_broadcast(cnd); -} - -ERTS_GLB_INLINE void -erts_smp_rwmtx_set_reader_group(int no) -{ - erts_rwmtx_set_reader_group(no); -} - -ERTS_GLB_INLINE void -erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx, - char *name, - Eterm extra, - erts_lock_flags_t flags) -{ - erts_smp_rwmtx_init_opt(rwmtx, NULL, name, extra, flags); -} - -ERTS_GLB_INLINE void -erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx, - erts_smp_rwmtx_opt_t *opt, - char *name, - Eterm extra, - erts_lock_flags_t flags) -{ - erts_rwmtx_init_opt(rwmtx, opt, name, extra, flags); -} - -ERTS_GLB_INLINE void -erts_smp_rwmtx_destroy(erts_smp_rwmtx_t *rwmtx) -{ - erts_rwmtx_destroy(rwmtx); -} - -ERTS_GLB_INLINE int -#ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_rwmtx_tryrlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line) -#else -erts_smp_rwmtx_tryrlock(erts_smp_rwmtx_t *rwmtx) -#endif -{ -#if defined(ERTS_ENABLE_LOCK_POSITION) - return erts_rwmtx_tryrlock_x(rwmtx, file, line); -#else - return erts_rwmtx_tryrlock(rwmtx); -#endif -} - -ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_rwmtx_rlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line) -#else -erts_smp_rwmtx_rlock(erts_smp_rwmtx_t *rwmtx) -#endif -{ -#if defined(ERTS_ENABLE_LOCK_POSITION) - erts_rwmtx_rlock_x(rwmtx, file, line); -#else - erts_rwmtx_rlock(rwmtx); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_rwmtx_runlock(erts_smp_rwmtx_t *rwmtx) -{ - erts_rwmtx_runlock(rwmtx); -} - - -ERTS_GLB_INLINE int -#ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_rwmtx_tryrwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line) -#else -erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx) -#endif -{ -#if defined(ERTS_ENABLE_LOCK_POSITION) - return erts_rwmtx_tryrwlock_x(rwmtx, file, line); -#else - return erts_rwmtx_tryrwlock(rwmtx); -#endif -} - -ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_rwmtx_rwlock_x(erts_smp_rwmtx_t *rwmtx, char *file, unsigned int line) -#else -erts_smp_rwmtx_rwlock(erts_smp_rwmtx_t *rwmtx) -#endif -{ -#if defined(ERTS_ENABLE_LOCK_POSITION) - erts_rwmtx_rwlock_x(rwmtx, file, line); -#else - erts_rwmtx_rwlock(rwmtx); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_rwmtx_rwunlock(erts_smp_rwmtx_t *rwmtx) -{ - erts_rwmtx_rwunlock(rwmtx); -} - -#if 0 /* The following rwmtx function names are - reserved for potential future use. */ - -/* Try upgrade from r-locked state to rw-locked state */ -ERTS_GLB_INLINE int -erts_smp_rwmtx_trywlock(erts_smp_rwmtx_t *rwmtx) -{ - return 0; -} - -/* Upgrade from r-locked state to rw-locked state */ -ERTS_GLB_INLINE void -erts_smp_rwmtx_wlock(erts_smp_rwmtx_t *rwmtx) -{ - -} - -/* Downgrade from rw-locked state to r-locked state */ -ERTS_GLB_INLINE void -erts_smp_rwmtx_wunlock(erts_smp_rwmtx_t *rwmtx) -{ - -} - -#endif - -ERTS_GLB_INLINE int -erts_smp_lc_rwmtx_is_rlocked(erts_smp_rwmtx_t *mtx) -{ -#if defined(ERTS_ENABLE_LOCK_CHECK) - return erts_lc_rwmtx_is_rlocked(mtx); -#else - return 0; -#endif -} - -ERTS_GLB_INLINE int -erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx) -{ -#if defined(ERTS_ENABLE_LOCK_CHECK) - return erts_lc_rwmtx_is_rwlocked(mtx); -#else - return 0; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_spinlock_init(erts_smp_spinlock_t *lock, char *name, Eterm extra, erts_lock_flags_t flags) -{ - erts_spinlock_init(lock, name, extra, flags); -} - -ERTS_GLB_INLINE void -erts_smp_spinlock_destroy(erts_smp_spinlock_t *lock) -{ - erts_spinlock_destroy(lock); -} - -ERTS_GLB_INLINE void -erts_smp_spin_unlock(erts_smp_spinlock_t *lock) -{ - erts_spin_unlock(lock); -} - -ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_spin_lock_x(erts_smp_spinlock_t *lock, char *file, unsigned int line) -#else -erts_smp_spin_lock(erts_smp_spinlock_t *lock) -#endif -{ -#if defined(ERTS_ENABLE_LOCK_POSITION) - erts_spin_lock_x(lock, file, line); -#else - erts_spin_lock(lock); -#endif -} - -ERTS_GLB_INLINE int -erts_smp_lc_spinlock_is_locked(erts_smp_spinlock_t *lock) -{ -#if defined(ERTS_ENABLE_LOCK_CHECK) - return erts_lc_spinlock_is_locked(lock); -#else - return 0; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_rwlock_init(erts_smp_rwlock_t *lock, char *name, Eterm extra, erts_lock_flags_t flags) -{ - erts_rwlock_init(lock, name, extra, flags); -} - -ERTS_GLB_INLINE void -erts_smp_rwlock_destroy(erts_smp_rwlock_t *lock) -{ - erts_rwlock_destroy(lock); -} - -ERTS_GLB_INLINE void -erts_smp_read_unlock(erts_smp_rwlock_t *lock) -{ - erts_read_unlock(lock); -} - -ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_read_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line) -#else -erts_smp_read_lock(erts_smp_rwlock_t *lock) -#endif -{ -#if defined(ERTS_ENABLE_LOCK_POSITION) - erts_read_lock_x(lock, file, line); -#else - erts_read_lock(lock); -#endif -} - -ERTS_GLB_INLINE void -erts_smp_write_unlock(erts_smp_rwlock_t *lock) -{ - erts_write_unlock(lock); -} - -ERTS_GLB_INLINE void -#ifdef ERTS_ENABLE_LOCK_POSITION -erts_smp_write_lock_x(erts_smp_rwlock_t *lock, char *file, unsigned int line) -#else -erts_smp_write_lock(erts_smp_rwlock_t *lock) -#endif -{ -#if defined(ERTS_ENABLE_LOCK_POSITION) - erts_write_lock_x(lock, file, line); -#else - erts_write_lock(lock); -#endif -} - -ERTS_GLB_INLINE int -erts_smp_lc_rwlock_is_rlocked(erts_smp_rwlock_t *lock) -{ -#if defined(ERTS_ENABLE_LOCK_CHECK) - return erts_lc_rwlock_is_rlocked(lock); -#else - return 0; -#endif -} - -ERTS_GLB_INLINE int -erts_smp_lc_rwlock_is_rwlocked(erts_smp_rwlock_t *lock) -{ -#if defined(ERTS_ENABLE_LOCK_CHECK) - return erts_lc_rwlock_is_rwlocked(lock); -#else - return 0; -#endif -} - -ERTS_GLB_INLINE void -erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp, char* keyname) -{ - erts_tsd_key_create(keyp,keyname); -} - -ERTS_GLB_INLINE void -erts_smp_tsd_key_delete(erts_smp_tsd_key_t key) -{ - erts_tsd_key_delete(key); -} - -ERTS_GLB_INLINE void -erts_smp_tsd_set(erts_smp_tsd_key_t key, void *value) -{ - erts_tsd_set(key, value); -} - -ERTS_GLB_INLINE void * -erts_smp_tsd_get(erts_smp_tsd_key_t key) -{ - return erts_tsd_get(key); -} - -#ifdef ERTS_THR_HAVE_SIG_FUNCS -#define ERTS_SMP_THR_HAVE_SIG_FUNCS 1 - -ERTS_GLB_INLINE void -erts_smp_thr_sigmask(int how, const sigset_t *set, sigset_t *oset) -{ - erts_thr_sigmask(how, set, oset); -} - -ERTS_GLB_INLINE void -erts_smp_thr_sigwait(const sigset_t *set, int *sig) -{ - erts_thr_sigwait(set, sig); -} - -#endif /* #ifdef ERTS_THR_HAVE_SIG_FUNCS */ - -#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ - -#endif /* ERL_SMP_H */ - -#ifdef ERTS_UNDEF_DEPRECATED_ATOMICS - -/* Deprecated functions to replace */ - -#undef erts_smp_atomic_init -#undef erts_smp_atomic_set -#undef erts_smp_atomic_read -#undef erts_smp_atomic_inctest -#undef erts_smp_atomic_dectest -#undef erts_smp_atomic_inc -#undef erts_smp_atomic_dec -#undef erts_smp_atomic_addtest -#undef erts_smp_atomic_add -#undef erts_smp_atomic_xchg -#undef erts_smp_atomic_cmpxchg -#undef erts_smp_atomic_bor -#undef erts_smp_atomic_band - -#undef erts_smp_atomic32_init -#undef erts_smp_atomic32_set -#undef erts_smp_atomic32_read -#undef erts_smp_atomic32_inctest -#undef erts_smp_atomic32_dectest -#undef erts_smp_atomic32_inc -#undef erts_smp_atomic32_dec -#undef erts_smp_atomic32_addtest -#undef erts_smp_atomic32_add -#undef erts_smp_atomic32_xchg -#undef erts_smp_atomic32_cmpxchg -#undef erts_smp_atomic32_bor -#undef erts_smp_atomic32_band - -#endif diff --git a/erts/emulator/beam/erl_thr_progress.h b/erts/emulator/beam/erl_thr_progress.h index 80ede05d73..fa936b5707 100644 --- a/erts/emulator/beam/erl_thr_progress.h +++ b/erts/emulator/beam/erl_thr_progress.h @@ -33,11 +33,6 @@ #include "sys.h" - -#define erts_smp_thr_progress_block erts_thr_progress_block -#define erts_smp_thr_progress_unblock erts_thr_progress_unblock -#define erts_smp_thr_progress_is_blocking erts_thr_progress_is_blocking - void erts_thr_progress_block(void); void erts_thr_progress_unblock(void); int erts_thr_progress_is_blocking(void); diff --git a/erts/emulator/beam/erl_thr_queue.c b/erts/emulator/beam/erl_thr_queue.c index 66566edec5..548c2768e5 100644 --- a/erts/emulator/beam/erl_thr_queue.c +++ b/erts/emulator/beam/erl_thr_queue.c @@ -391,7 +391,7 @@ clean(ErtsThrQ_t *q, int max_ops, int do_notify) ilast = (erts_aint_t) enqueue_marker(q, NULL); if (q->head.unref_end == (ErtsThrQElement_t *) ilast) - ERTS_SMP_MEMORY_BARRIER; + ERTS_THR_MEMORY_BARRIER; else { q->head.next.unref_end = (ErtsThrQElement_t *) ilast; q->head.next.thr_progress = erts_thr_progress_later(NULL); diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h index 22a359554e..52af77e303 100644 --- a/erts/emulator/beam/erl_threads.h +++ b/erts/emulator/beam/erl_threads.h @@ -45,11 +45,6 @@ * Data dependency read barrier. Orders *only* loads * according to data dependency across the barrier. * - * If thread support has been disabled, these barriers will become no-ops. - * - * If the prefix ERTS_THR_ is replaced with ERTS_SMP_, the barriers will - * be enabled only in the SMP enabled runtime system. - * * --- Atomic operations --- * * Atomics operations exist for 32-bit, word size, and double word size @@ -86,20 +81,6 @@ * barrier. Load in atomic operation is ordered * before the barrier. * - * If thread support has been disabled, these functions are mapped to - * functions that performs the same operation, but aren't atomic - * and don't imply any memory barriers. - * - * If the atomic operations are prefixed with erts_smp_ instead of only - * erts_ the atomic operations will only be atomic in the SMP enabled - * runtime system, and will be mapped to non-atomic operations without - * memory barriers in the runtime system without SMP support. Atomic - * operations with erts_smp_ prefix should use the atomic types - * erts_smp_atomic32_t, erts_smp_atomic_t, and erts_smp_dw_atomic_t - * instead of erts_atomic32_t, erts_atomic_t, and erts_dw_atomic_t. The - * integer data types erts_aint32_t, erts_aint_t, and erts_dw_atomic_t - * are the same. - * * --- 32-bit atomic operations --- * * The following 32-bit atomic operations exist. <B> should be diff --git a/erts/emulator/beam/erl_time_sup.c b/erts/emulator/beam/erl_time_sup.c index f6bb52dde1..c06b464458 100644 --- a/erts/emulator/beam/erl_time_sup.c +++ b/erts/emulator/beam/erl_time_sup.c @@ -36,8 +36,8 @@ #include "erl_driver.h" #include "erl_nif.h" -static erts_smp_mtx_t erts_timeofday_mtx; -static erts_smp_mtx_t erts_get_time_mtx; +static erts_mtx_t erts_timeofday_mtx; +static erts_mtx_t erts_get_time_mtx; static SysTimes t_start; /* Used in elapsed_time_both */ static ErtsMonotonicTime prev_wall_clock_elapsed; /* Used in wall_clock_elapsed_time_both */ @@ -140,7 +140,7 @@ typedef struct { struct time_sup_infrequently_changed__ { #ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT struct { - erts_smp_rwmtx_t rwmtx; + erts_rwmtx_t rwmtx; ErtsTWheelTimer timer; ErtsMonotonicCorrectionData cdata; } parmon; @@ -148,9 +148,9 @@ struct time_sup_infrequently_changed__ { #endif ErtsSystemTime sinit; ErtsMonotonicTime not_corrected_moffset; - erts_smp_atomic64_t offset; + erts_atomic64_t offset; ErtsMonotonicTime shadow_offset; - erts_smp_atomic32_t preliminary_offset; + erts_atomic32_t preliminary_offset; }; struct time_sup_frequently_changed__ { @@ -188,19 +188,19 @@ erts_get_approx_time(void) static ERTS_INLINE void init_time_offset(ErtsMonotonicTime offset) { - erts_smp_atomic64_init_nob(&time_sup.inf.c.offset, (erts_aint64_t) offset); + erts_atomic64_init_nob(&time_sup.inf.c.offset, (erts_aint64_t) offset); } static ERTS_INLINE void set_time_offset(ErtsMonotonicTime offset) { - erts_smp_atomic64_set_relb(&time_sup.inf.c.offset, (erts_aint64_t) offset); + erts_atomic64_set_relb(&time_sup.inf.c.offset, (erts_aint64_t) offset); } static ERTS_INLINE ErtsMonotonicTime get_time_offset(void) { - return (ErtsMonotonicTime) erts_smp_atomic64_read_acqb(&time_sup.inf.c.offset); + return (ErtsMonotonicTime) erts_atomic64_read_acqb(&time_sup.inf.c.offset); } static ERTS_INLINE void @@ -281,7 +281,7 @@ read_corrected_time(int os_drift_corrected) ErtsMonotonicTime os_mtime; ErtsMonotonicCorrectionInstance ci; - erts_smp_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx); + erts_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx); os_mtime = erts_os_monotonic_time(); @@ -294,7 +294,7 @@ read_corrected_time(int os_drift_corrected) ci = time_sup.inf.c.parmon.cdata.insts.prev; } - erts_smp_rwmtx_runlock(&time_sup.inf.c.parmon.rwmtx); + erts_rwmtx_runlock(&time_sup.inf.c.parmon.rwmtx); return calc_corrected_erl_mtime(os_mtime, &ci, NULL, os_drift_corrected); @@ -372,13 +372,13 @@ check_time_correction(void *vesdp) int os_drift_corrected = time_sup.r.o.os_corrected_monotonic_time; int set_new_correction = 0, begin_short_intervals = 0; - erts_smp_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx); + erts_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx); erts_os_times(&os_mtime, &os_stime); ci = time_sup.inf.c.parmon.cdata.insts.curr; - erts_smp_rwmtx_runlock(&time_sup.inf.c.parmon.rwmtx); + erts_rwmtx_runlock(&time_sup.inf.c.parmon.rwmtx); if (os_mtime < ci.os_mtime) erts_exit(ERTS_ABORT_EXIT, @@ -393,7 +393,7 @@ check_time_correction(void *vesdp) if (time_sup.inf.c.shadow_offset) { ERTS_TIME_ASSERT(time_sup.r.o.warp_mode == ERTS_SINGLE_TIME_WARP_MODE); - if (erts_smp_atomic32_read_nob(&time_sup.inf.c.preliminary_offset)) + if (erts_atomic32_read_nob(&time_sup.inf.c.preliminary_offset)) sdiff += time_sup.inf.c.shadow_offset; else time_sup.inf.c.shadow_offset = 0; @@ -416,7 +416,7 @@ check_time_correction(void *vesdp) } } else if ((time_sup.r.o.warp_mode == ERTS_SINGLE_TIME_WARP_MODE - && erts_smp_atomic32_read_nob(&time_sup.inf.c.preliminary_offset)) + && erts_atomic32_read_nob(&time_sup.inf.c.preliminary_offset)) && (sdiff < -2*time_sup.r.o.adj.small_diff || 2*time_sup.r.o.adj.small_diff < sdiff)) { /* @@ -641,7 +641,7 @@ check_time_correction(void *vesdp) #endif if (set_new_correction) { - erts_smp_rwmtx_rwlock(&time_sup.inf.c.parmon.rwmtx); + erts_rwmtx_rwlock(&time_sup.inf.c.parmon.rwmtx); os_mtime = erts_os_monotonic_time(); @@ -669,7 +669,7 @@ check_time_correction(void *vesdp) time_sup.inf.c.parmon.cdata.insts.curr.os_mtime = os_mtime; time_sup.inf.c.parmon.cdata.insts.curr.correction = new_correction; - erts_smp_rwmtx_rwunlock(&time_sup.inf.c.parmon.rwmtx); + erts_rwmtx_rwunlock(&time_sup.inf.c.parmon.rwmtx); } if (!esdp) @@ -787,13 +787,13 @@ finalize_corrected_time_offset(ErtsSystemTime *stimep) ErtsMonotonicCorrectionInstance ci; int os_drift_corrected = time_sup.r.o.os_corrected_monotonic_time; - erts_smp_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx); + erts_rwmtx_rlock(&time_sup.inf.c.parmon.rwmtx); erts_os_times(&os_mtime, stimep); ci = time_sup.inf.c.parmon.cdata.insts.curr; - erts_smp_rwmtx_runlock(&time_sup.inf.c.parmon.rwmtx); + erts_rwmtx_runlock(&time_sup.inf.c.parmon.rwmtx); if (os_mtime < ci.os_mtime) erts_exit(ERTS_ABORT_EXIT, @@ -846,7 +846,7 @@ static ErtsMonotonicTime get_not_corrected_time(void) { ErtsMonotonicTime stime, mtime; - erts_smp_mtx_lock(&erts_get_time_mtx); + erts_mtx_lock(&erts_get_time_mtx); stime = erts_os_system_time(); @@ -872,7 +872,7 @@ static ErtsMonotonicTime get_not_corrected_time(void) ASSERT(stime == mtime + time_sup.inf.c.not_corrected_moffset); - erts_smp_mtx_unlock(&erts_get_time_mtx); + erts_mtx_unlock(&erts_get_time_mtx); return mtime; } @@ -954,18 +954,18 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode) ASSERT(ERTS_MONOTONIC_TIME_MIN < ERTS_MONOTONIC_TIME_MAX); - erts_smp_mtx_init(&erts_timeofday_mtx, "timeofday", NIL, + erts_mtx_init(&erts_timeofday_mtx, "timeofday", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); - erts_smp_mtx_init(&erts_get_time_mtx, "get_time", NIL, + erts_mtx_init(&erts_get_time_mtx, "get_time", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); time_sup.r.o.correction = time_correction; time_sup.r.o.warp_mode = time_warp_mode; if (time_warp_mode == ERTS_SINGLE_TIME_WARP_MODE) - erts_smp_atomic32_init_nob(&time_sup.inf.c.preliminary_offset, 1); + erts_atomic32_init_nob(&time_sup.inf.c.preliminary_offset, 1); else - erts_smp_atomic32_init_nob(&time_sup.inf.c.preliminary_offset, 0); + erts_atomic32_init_nob(&time_sup.inf.c.preliminary_offset, 0); time_sup.inf.c.shadow_offset = 0; #if !ERTS_COMPILE_TIME_MONOTONIC_TIME_UNIT @@ -1109,7 +1109,7 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode) if (time_sup.r.o.correction) { ErtsMonotonicCorrectionData *cdatap; - erts_smp_rwmtx_opt_t rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; + erts_rwmtx_opt_t rwmtx_opts = ERTS_RWMTX_OPT_DEFAULT_INITER; ErtsMonotonicTime offset; erts_os_times(&time_sup.inf.c.minit, &time_sup.inf.c.sinit); @@ -1119,10 +1119,10 @@ erts_init_time_sup(int time_correction, ErtsTimeWarpMode time_warp_mode) offset -= ERTS_MONOTONIC_BEGIN; init_time_offset(offset); - rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; - rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED; + rwmtx_opts.type = ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; + rwmtx_opts.lived = ERTS_RWMTX_LONG_LIVED; - erts_smp_rwmtx_init_opt(&time_sup.inf.c.parmon.rwmtx, &rwmtx_opts, + erts_rwmtx_init_opt(&time_sup.inf.c.parmon.rwmtx, &rwmtx_opts, "get_corrected_time", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); @@ -1200,7 +1200,7 @@ ErtsTimeOffsetState erts_time_offset_state(void) case ERTS_NO_TIME_WARP_MODE: return ERTS_TIME_OFFSET_FINAL; case ERTS_SINGLE_TIME_WARP_MODE: - if (erts_smp_atomic32_read_nob(&time_sup.inf.c.preliminary_offset)) + if (erts_atomic32_read_nob(&time_sup.inf.c.preliminary_offset)) return ERTS_TIME_OFFSET_PRELIMINARY; return ERTS_TIME_OFFSET_FINAL; case ERTS_MULTI_TIME_WARP_MODE: @@ -1233,9 +1233,9 @@ erts_finalize_time_offset(void) case ERTS_SINGLE_TIME_WARP_MODE: { ErtsTimeOffsetState res = ERTS_TIME_OFFSET_FINAL; - erts_smp_mtx_lock(&erts_get_time_mtx); + erts_mtx_lock(&erts_get_time_mtx); - if (erts_smp_atomic32_read_nob(&time_sup.inf.c.preliminary_offset)) { + if (erts_atomic32_read_nob(&time_sup.inf.c.preliminary_offset)) { ErtsMonotonicTime mtime, new_offset; #ifdef ERTS_HAVE_OS_MONOTONIC_TIME_SUPPORT @@ -1272,11 +1272,11 @@ erts_finalize_time_offset(void) set_time_offset(new_offset); schedule_send_time_offset_changed_notifications(new_offset); - erts_smp_atomic32_set_nob(&time_sup.inf.c.preliminary_offset, 0); + erts_atomic32_set_nob(&time_sup.inf.c.preliminary_offset, 0); res = ERTS_TIME_OFFSET_PRELIMINARY; } - erts_smp_mtx_unlock(&erts_get_time_mtx); + erts_mtx_unlock(&erts_get_time_mtx); return res; } @@ -1306,13 +1306,13 @@ elapsed_time_both(ErtsMonotonicTime *ms_user, ErtsMonotonicTime *ms_sys, *ms_sys = total_sys; if (ms_user_diff || ms_sys_diff) { - erts_smp_mtx_lock(&erts_timeofday_mtx); + erts_mtx_lock(&erts_timeofday_mtx); prev_total_user = (ErtsMonotonicTime) ((t_start.tms_utime * 1000) / SYS_CLK_TCK); prev_total_sys = (ErtsMonotonicTime) ((t_start.tms_stime * 1000) / SYS_CLK_TCK); t_start = now; - erts_smp_mtx_unlock(&erts_timeofday_mtx); + erts_mtx_unlock(&erts_timeofday_mtx); if (ms_user_diff != NULL) *ms_user_diff = total_user - prev_total_user; @@ -1338,12 +1338,12 @@ wall_clock_elapsed_time_both(ErtsMonotonicTime *ms_total, ErtsMonotonicTime *ms_ *ms_total = elapsed; if (ms_diff) { - erts_smp_mtx_lock(&erts_timeofday_mtx); + erts_mtx_lock(&erts_timeofday_mtx); *ms_diff = elapsed - prev_wall_clock_elapsed; prev_wall_clock_elapsed = elapsed; - erts_smp_mtx_unlock(&erts_timeofday_mtx); + erts_mtx_unlock(&erts_timeofday_mtx); } } @@ -1729,7 +1729,7 @@ get_now(Uint* megasec, Uint* sec, Uint* microsec) update_last_mtime(NULL, mtime); now = ERTS_MONOTONIC_TO_USEC(mtime + time_offset); - erts_smp_mtx_lock(&erts_timeofday_mtx); + erts_mtx_lock(&erts_timeofday_mtx); /* Make sure now time is later than last time */ if (now <= previous_now) @@ -1737,7 +1737,7 @@ get_now(Uint* megasec, Uint* sec, Uint* microsec) previous_now = now; - erts_smp_mtx_unlock(&erts_timeofday_mtx); + erts_mtx_unlock(&erts_timeofday_mtx); now_megasec = now / ERTS_MONOTONIC_TIME_TERA; now_sec = now / ERTS_MONOTONIC_TIME_MEGA; @@ -1824,10 +1824,10 @@ void erts_get_now_cpu(Uint* megasec, Uint* sec, Uint* microsec) { void erts_monitor_time_offset(Eterm id, Eterm ref) { - erts_smp_mtx_lock(&erts_get_time_mtx); + erts_mtx_lock(&erts_get_time_mtx); erts_add_monitor(&time_offset_monitors, MON_TIME_OFFSET, ref, id, NIL); no_time_offset_monitors++; - erts_smp_mtx_unlock(&erts_get_time_mtx); + erts_mtx_unlock(&erts_get_time_mtx); } int @@ -1836,7 +1836,7 @@ erts_demonitor_time_offset(Eterm ref) int res; ErtsMonitor *mon; ASSERT(is_internal_ref(ref)); - erts_smp_mtx_lock(&erts_get_time_mtx); + erts_mtx_lock(&erts_get_time_mtx); if (is_internal_ordinary_ref(ref)) mon = erts_remove_monitor(&time_offset_monitors, ref); else @@ -1848,7 +1848,7 @@ erts_demonitor_time_offset(Eterm ref) no_time_offset_monitors--; res = 1; } - erts_smp_mtx_unlock(&erts_get_time_mtx); + erts_mtx_unlock(&erts_get_time_mtx); if (res) erts_destroy_monitor(mon); return res; @@ -1906,7 +1906,7 @@ send_time_offset_changed_notifications(void *new_offsetp) #endif new_offset -= ERTS_MONOTONIC_OFFSET_NATIVE; - erts_smp_mtx_lock(&erts_get_time_mtx); + erts_mtx_lock(&erts_get_time_mtx); no_monitors = no_time_offset_monitors; if (no_monitors) { @@ -1931,7 +1931,7 @@ send_time_offset_changed_notifications(void *new_offsetp) ASSERT(cntxt.ix == no_monitors); } - erts_smp_mtx_unlock(&erts_get_time_mtx); + erts_mtx_unlock(&erts_get_time_mtx); if (no_monitors) { Eterm *hp, *patch_refp, new_offset_term, message_template; @@ -1964,7 +1964,7 @@ send_time_offset_changed_notifications(void *new_offsetp) if (rp) { Eterm ref = to_mon_info[mix].ref; ErtsProcLocks rp_locks = ERTS_PROC_LOCK_LINK; - erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_lock(rp, ERTS_PROC_LOCK_LINK); if (erts_lookup_monitor(ERTS_P_MONITORS(rp), ref)) { ErtsMessage *mp; ErlOffHeap *ohp; @@ -1977,7 +1977,7 @@ send_time_offset_changed_notifications(void *new_offsetp) message = copy_struct(message_template, hsz, &hp, ohp); erts_queue_message(rp, rp_locks, mp, message, am_clock_service); } - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } } diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c index 069cc27d1b..a07e3642f6 100644 --- a/erts/emulator/beam/erl_trace.c +++ b/erts/emulator/beam/erl_trace.c @@ -77,8 +77,8 @@ static Eterm system_profile; int erts_cpu_timestamp; #endif -static erts_smp_mtx_t smq_mtx; -static erts_smp_rwmtx_t sys_trace_rwmtx; +static erts_mtx_t smq_mtx; +static erts_rwmtx_t sys_trace_rwmtx; enum ErtsSysMsgType { SYS_MSG_TYPE_UNDEFINED, @@ -322,11 +322,11 @@ static void tracer_free_fun(void*); typedef struct ErtsTracerNif_ ErtsTracerNif; void erts_init_trace(void) { - erts_smp_rwmtx_opt_t rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; - rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; - rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED; + erts_rwmtx_opt_t rwmtx_opts = ERTS_RWMTX_OPT_DEFAULT_INITER; + rwmtx_opts.type = ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; + rwmtx_opts.lived = ERTS_RWMTX_LONG_LIVED; - erts_smp_rwmtx_init_opt(&sys_trace_rwmtx, &rwmtx_opts, "sys_tracers", NIL, + erts_rwmtx_init_opt(&sys_trace_rwmtx, &rwmtx_opts, "sys_tracers", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG); #ifdef HAVE_ERTS_NOW_CPU @@ -400,14 +400,14 @@ static Uint active_sched; void erts_system_profile_setup_active_schedulers(void) { - ERTS_SMP_LC_ASSERT(erts_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); active_sched = erts_active_schedulers(); } static void exiting_reset(Eterm exiting) { - erts_smp_rwmtx_rwlock(&sys_trace_rwmtx); + erts_rwmtx_rwlock(&sys_trace_rwmtx); if (exiting == system_monitor) { system_monitor = NIL; /* Let the trace message dispatcher clear flags, etc */ @@ -416,19 +416,19 @@ exiting_reset(Eterm exiting) system_profile = NIL; /* Let the trace message dispatcher clear flags, etc */ } - erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx); + erts_rwmtx_rwunlock(&sys_trace_rwmtx); } void erts_trace_check_exiting(Eterm exiting) { int reset = 0; - erts_smp_rwmtx_rlock(&sys_trace_rwmtx); + erts_rwmtx_rlock(&sys_trace_rwmtx); if (exiting == system_monitor) reset = 1; else if (exiting == system_profile) reset = 1; - erts_smp_rwmtx_runlock(&sys_trace_rwmtx); + erts_rwmtx_runlock(&sys_trace_rwmtx); if (reset) exiting_reset(exiting); } @@ -448,7 +448,7 @@ erts_set_system_seq_tracer(Process *c_p, ErtsProcLocks c_p_locks, ErtsTracer new } } - erts_smp_rwmtx_rwlock(&sys_trace_rwmtx); + erts_rwmtx_rwlock(&sys_trace_rwmtx); old = system_seq_tracer; system_seq_tracer = erts_tracer_nil; erts_tracer_update(&system_seq_tracer, new); @@ -456,7 +456,7 @@ erts_set_system_seq_tracer(Process *c_p, ErtsProcLocks c_p_locks, ErtsTracer new #ifdef DEBUG_PRINTOUTS erts_fprintf(stderr, "set seq tracer new=%T old=%T\n", new, old); #endif - erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx); + erts_rwmtx_rwunlock(&sys_trace_rwmtx); return old; } @@ -464,12 +464,12 @@ ErtsTracer erts_get_system_seq_tracer(void) { ErtsTracer st; - erts_smp_rwmtx_rlock(&sys_trace_rwmtx); + erts_rwmtx_rlock(&sys_trace_rwmtx); st = system_seq_tracer; #ifdef DEBUG_PRINTOUTS erts_fprintf(stderr, "get seq tracer %T\n", st); #endif - erts_smp_rwmtx_runlock(&sys_trace_rwmtx); + erts_rwmtx_runlock(&sys_trace_rwmtx); if (st != erts_tracer_nil && call_enabled_tracer(st, NULL, TRACE_FUN_ENABLED, @@ -502,8 +502,8 @@ get_default_tracing(Uint *flagsp, ErtsTracer *tracerp, ErtsTracer curr_default_tracer = *default_tracer; if (tracerp) { /* we only have a rlock, so we have to unlock and then rwlock */ - erts_smp_rwmtx_runlock(&sys_trace_rwmtx); - erts_smp_rwmtx_rwlock(&sys_trace_rwmtx); + erts_rwmtx_runlock(&sys_trace_rwmtx); + erts_rwmtx_rwlock(&sys_trace_rwmtx); } /* check if someone else changed default tracer while we got the write lock, if so we don't do @@ -513,8 +513,8 @@ get_default_tracing(Uint *flagsp, ErtsTracer *tracerp, ERTS_TRACER_CLEAR(default_tracer); } if (tracerp) { - erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx); - erts_smp_rwmtx_rlock(&sys_trace_rwmtx); + erts_rwmtx_rwunlock(&sys_trace_rwmtx); + erts_rwmtx_rlock(&sys_trace_rwmtx); } } } @@ -547,81 +547,81 @@ void erts_change_default_proc_tracing(int setflags, Uint flagsp, const ErtsTracer tracer) { - erts_smp_rwmtx_rwlock(&sys_trace_rwmtx); + erts_rwmtx_rwlock(&sys_trace_rwmtx); erts_change_default_tracing( setflags, flagsp, tracer, &default_proc_trace_flags, &default_proc_tracer); - erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx); + erts_rwmtx_rwunlock(&sys_trace_rwmtx); } void erts_change_default_port_tracing(int setflags, Uint flagsp, const ErtsTracer tracer) { - erts_smp_rwmtx_rwlock(&sys_trace_rwmtx); + erts_rwmtx_rwlock(&sys_trace_rwmtx); erts_change_default_tracing( setflags, flagsp, tracer, &default_port_trace_flags, &default_port_tracer); - erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx); + erts_rwmtx_rwunlock(&sys_trace_rwmtx); } void erts_get_default_proc_tracing(Uint *flagsp, ErtsTracer *tracerp) { - erts_smp_rwmtx_rlock(&sys_trace_rwmtx); + erts_rwmtx_rlock(&sys_trace_rwmtx); *tracerp = erts_tracer_nil; /* initialize */ get_default_tracing( flagsp, tracerp, &default_proc_trace_flags, &default_proc_tracer); - erts_smp_rwmtx_runlock(&sys_trace_rwmtx); + erts_rwmtx_runlock(&sys_trace_rwmtx); } void erts_get_default_port_tracing(Uint *flagsp, ErtsTracer *tracerp) { - erts_smp_rwmtx_rlock(&sys_trace_rwmtx); + erts_rwmtx_rlock(&sys_trace_rwmtx); *tracerp = erts_tracer_nil; /* initialize */ get_default_tracing( flagsp, tracerp, &default_port_trace_flags, &default_port_tracer); - erts_smp_rwmtx_runlock(&sys_trace_rwmtx); + erts_rwmtx_runlock(&sys_trace_rwmtx); } void erts_set_system_monitor(Eterm monitor) { - erts_smp_rwmtx_rwlock(&sys_trace_rwmtx); + erts_rwmtx_rwlock(&sys_trace_rwmtx); system_monitor = monitor; - erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx); + erts_rwmtx_rwunlock(&sys_trace_rwmtx); } Eterm erts_get_system_monitor(void) { Eterm monitor; - erts_smp_rwmtx_rlock(&sys_trace_rwmtx); + erts_rwmtx_rlock(&sys_trace_rwmtx); monitor = system_monitor; - erts_smp_rwmtx_runlock(&sys_trace_rwmtx); + erts_rwmtx_runlock(&sys_trace_rwmtx); return monitor; } /* Performance monitoring */ void erts_set_system_profile(Eterm profile) { - erts_smp_rwmtx_rwlock(&sys_trace_rwmtx); + erts_rwmtx_rwlock(&sys_trace_rwmtx); system_profile = profile; - erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx); + erts_rwmtx_rwunlock(&sys_trace_rwmtx); } Eterm erts_get_system_profile(void) { Eterm profile; - erts_smp_rwmtx_rlock(&sys_trace_rwmtx); + erts_rwmtx_rlock(&sys_trace_rwmtx); profile = system_profile; - erts_smp_rwmtx_runlock(&sys_trace_rwmtx); + erts_rwmtx_runlock(&sys_trace_rwmtx); return profile; } @@ -1093,7 +1093,7 @@ erts_call_trace(Process* p, ErtsCodeInfo *info, Binary *match_spec, Eterm transformed_args[MAX_ARG]; ErtsTracer pre_ms_tracer = erts_tracer_nil; - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(p) & ERTS_PROC_LOCK_MAIN); ASSERT(tracer); if (ERTS_TRACER_COMPARE(*tracer, erts_tracer_true)) { @@ -1618,7 +1618,7 @@ profile_scheduler(Eterm scheduler_id, Eterm state) { bp = new_message_buffer(hsz); hp = bp->mem; - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); switch (state) { case am_active: @@ -1641,7 +1641,7 @@ profile_scheduler(Eterm scheduler_id, Eterm state) { hp[-1] = write_ts(erts_system_profile_ts_type, hp, bp, NULL); enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, NIL, NIL, msg, bp); - erts_smp_mtx_unlock(&smq_mtx); + erts_mtx_unlock(&smq_mtx); } @@ -1650,7 +1650,7 @@ profile_scheduler(Eterm scheduler_id, Eterm state) { void trace_port_open(Port *p, Eterm calling_pid, Eterm drv_name) { ErtsTracerNif *tnif = NULL; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (is_tracer_enabled(NULL, 0, &p->common, &tnif, TRACE_FUN_E_PORTS, am_open)) send_to_tracer_nif(NULL, &p->common, p->common.id, tnif, TRACE_FUN_T_PORTS, am_open, calling_pid, drv_name, am_true); @@ -1667,9 +1667,9 @@ void trace_port(Port *t_p, Eterm what, Eterm data) { ErtsTracerNif *tnif = NULL; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p) + ERTS_LC_ASSERT(erts_lc_is_port_locked(t_p) || erts_thr_progress_is_blocking()); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_PORTS, what)) send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_PORTS, what, data, THE_NON_VALUE, am_true); @@ -1712,9 +1712,9 @@ void trace_port_receive(Port *t_p, Eterm caller, Eterm what, ...) { ErtsTracerNif *tnif = NULL; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p) + ERTS_LC_ASSERT(erts_lc_is_port_locked(t_p) || erts_thr_progress_is_blocking()); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_RECEIVE, am_receive)) { /* We can use a stack heap here, as the nif is called in the context of a port */ @@ -1829,9 +1829,9 @@ trace_port_send(Port *t_p, Eterm receiver, Eterm msg, int exists) { ErtsTracerNif *tnif = NULL; Eterm op = exists ? am_send : am_send_to_non_existing_process; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p) + ERTS_LC_ASSERT(erts_lc_is_port_locked(t_p) || erts_thr_progress_is_blocking()); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_SEND, op)) send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_SEND, op, msg, receiver, am_true); @@ -1840,9 +1840,9 @@ trace_port_send(Port *t_p, Eterm receiver, Eterm msg, int exists) void trace_port_send_binary(Port *t_p, Eterm to, Eterm what, char *bin, Sint sz) { ErtsTracerNif *tnif = NULL; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p) + ERTS_LC_ASSERT(erts_lc_is_port_locked(t_p) || erts_thr_progress_is_blocking()); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_SEND, am_send)) { Eterm msg; Binary* bptr = NULL; @@ -1888,9 +1888,9 @@ trace_sched_ports(Port *p, Eterm what) { void trace_sched_ports_where(Port *t_p, Eterm what, Eterm where) { ErtsTracerNif *tnif = NULL; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(t_p) + ERTS_LC_ASSERT(erts_lc_is_port_locked(t_p) || erts_thr_progress_is_blocking()); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_SCHED_PORT, what)) send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_SCHED_PORT, @@ -1912,7 +1912,7 @@ profile_runnable_port(Port *p, Eterm status) { bp = new_message_buffer(hsz); hp = bp->mem; - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); msg = TUPLE5(hp, am_profile, p->common.id, status, count, NIL /* Will be overwritten by timestamp */); @@ -1922,7 +1922,7 @@ profile_runnable_port(Port *p, Eterm status) { hp[-1] = write_ts(erts_system_profile_ts_type, hp, bp, NULL); enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, p->common.id, NIL, msg, bp); - erts_smp_mtx_unlock(&smq_mtx); + erts_mtx_unlock(&smq_mtx); } /* Process profiling */ @@ -1966,7 +1966,7 @@ profile_runnable_proc(Process *p, Eterm status){ erts_thr_progress_unmanaged_continue(dhndl); - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); msg = TUPLE5(hp, am_profile, p->common.id, status, where, NIL /* Will be overwritten by timestamp */); @@ -1976,7 +1976,7 @@ profile_runnable_proc(Process *p, Eterm status){ hp[-1] = write_ts(erts_system_profile_ts_type, hp, bp, NULL); enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, p->common.id, NIL, msg, bp); - erts_smp_mtx_unlock(&smq_mtx); + erts_mtx_unlock(&smq_mtx); } /* End system_profile tracing */ @@ -2027,7 +2027,7 @@ enqueue_sys_msg_unlocked(enum ErtsSysMsgType type, sys_message_queue = smqp; } sys_message_queue_end = smqp; - erts_smp_cnd_signal(&smq_cnd); + erts_cnd_signal(&smq_cnd); } static void @@ -2037,9 +2037,9 @@ enqueue_sys_msg(enum ErtsSysMsgType type, Eterm msg, ErlHeapFragment *bp) { - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); enqueue_sys_msg_unlocked(type, from, to, msg, bp); - erts_smp_mtx_unlock(&smq_mtx); + erts_mtx_unlock(&smq_mtx); } void @@ -2091,10 +2091,10 @@ sys_msg_disp_failure(ErtsSysMsgQ *smqp, Eterm receiver) && !erts_system_monitor_flags.busy_port && !erts_system_monitor_flags.busy_dist_port) break; /* Everything is disabled */ - erts_smp_thr_progress_block(); + erts_thr_progress_block(); if (system_monitor == receiver || receiver == NIL) erts_system_monitor_clear(NULL); - erts_smp_thr_progress_unblock(); + erts_thr_progress_unblock(); break; case SYS_MSG_TYPE_SYSPROF: if (receiver == NIL @@ -2104,11 +2104,11 @@ sys_msg_disp_failure(ErtsSysMsgQ *smqp, Eterm receiver) && !erts_system_profile_flags.scheduler) break; /* Block system to clear flags */ - erts_smp_thr_progress_block(); + erts_thr_progress_block(); if (system_profile == receiver || receiver == NIL) { erts_system_profile_clear(NULL); } - erts_smp_thr_progress_unblock(); + erts_thr_progress_unblock(); break; case SYS_MSG_TYPE_ERRLGR: { char *no_elgger = "(no error logger present)"; @@ -2153,38 +2153,38 @@ static void sys_msg_dispatcher_wakeup(void *vwait_p) { int *wait_p = (int *) vwait_p; - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); *wait_p = 0; - erts_smp_cnd_signal(&smq_cnd); - erts_smp_mtx_unlock(&smq_mtx); + erts_cnd_signal(&smq_cnd); + erts_mtx_unlock(&smq_mtx); } static void sys_msg_dispatcher_prep_wait(void *vwait_p) { int *wait_p = (int *) vwait_p; - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); *wait_p = 1; - erts_smp_mtx_unlock(&smq_mtx); + erts_mtx_unlock(&smq_mtx); } static void sys_msg_dispatcher_fin_wait(void *vwait_p) { int *wait_p = (int *) vwait_p; - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); *wait_p = 0; - erts_smp_mtx_unlock(&smq_mtx); + erts_mtx_unlock(&smq_mtx); } static void sys_msg_dispatcher_wait(void *vwait_p) { int *wait_p = (int *) vwait_p; - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); while (*wait_p) - erts_smp_cnd_wait(&smq_cnd, &smq_mtx); - erts_smp_mtx_unlock(&smq_mtx); + erts_cnd_wait(&smq_cnd, &smq_mtx); + erts_mtx_unlock(&smq_mtx); } static void * @@ -2210,9 +2210,9 @@ sys_msg_dispatcher_func(void *unused) int end_wait = 0; ErtsSysMsgQ *smqp; - ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); + ERTS_LC_ASSERT(!erts_thr_progress_is_blocking()); - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); /* Free previously used queue ... */ while (local_sys_message_queue) { @@ -2223,21 +2223,21 @@ sys_msg_dispatcher_func(void *unused) /* Fetch current trace message queue ... */ if (!sys_message_queue) { - erts_smp_mtx_unlock(&smq_mtx); + erts_mtx_unlock(&smq_mtx); end_wait = 1; erts_thr_progress_active(NULL, 0); erts_thr_progress_prepare_wait(NULL); - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); } while (!sys_message_queue) - erts_smp_cnd_wait(&smq_cnd, &smq_mtx); + erts_cnd_wait(&smq_cnd, &smq_mtx); local_sys_message_queue = sys_message_queue; sys_message_queue = NULL; sys_message_queue_end = NULL; - erts_smp_mtx_unlock(&smq_mtx); + erts_mtx_unlock(&smq_mtx); if (end_wait) { erts_thr_progress_finalize_wait(NULL); @@ -2311,7 +2311,7 @@ sys_msg_dispatcher_func(void *unused) #ifdef DEBUG_PRINTOUTS erts_fprintf(stderr, "delivered\n"); #endif - erts_smp_proc_unlock(proc, proc_locks); + erts_proc_unlock(proc, proc_locks); } } else if (receiver == am_error_logger) { @@ -2349,7 +2349,7 @@ sys_msg_dispatcher_func(void *unused) sys_msg_disp_failure(smqp, receiver); drop_sys_msg: if (proc) - erts_smp_proc_unlock(proc, proc_locks); + erts_proc_unlock(proc, proc_locks); if (smqp->bp) free_message_buffer(smqp->bp); #ifdef DEBUG_PRINTOUTS @@ -2369,7 +2369,7 @@ erts_foreach_sys_msg_in_q(void (*func)(Eterm, ErlHeapFragment *)) { ErtsSysMsgQ *sm; - erts_smp_mtx_lock(&smq_mtx); + erts_mtx_lock(&smq_mtx); for (sm = sys_message_queue; sm; sm = sm->next) { Eterm to; switch (sm->type) { @@ -2388,23 +2388,23 @@ erts_foreach_sys_msg_in_q(void (*func)(Eterm, } (*func)(sm->from, to, sm->msg, sm->bp); } - erts_smp_mtx_unlock(&smq_mtx); + erts_mtx_unlock(&smq_mtx); } static void init_sys_msg_dispatcher(void) { - erts_smp_thr_opts_t thr_opts = ERTS_SMP_THR_OPTS_DEFAULT_INITER; + erts_thr_opts_t thr_opts = ERTS_THR_OPTS_DEFAULT_INITER; thr_opts.detached = 1; thr_opts.name = "sys_msg_dispatcher"; init_smq_element_alloc(); sys_message_queue = NULL; sys_message_queue_end = NULL; - erts_smp_cnd_init(&smq_cnd); - erts_smp_mtx_init(&smq_mtx, "sys_msg_q", NIL, + erts_cnd_init(&smq_cnd); + erts_mtx_init(&smq_mtx, "sys_msg_q", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG); - erts_smp_thr_create(&sys_msg_dispatcher_tid, + erts_thr_create(&sys_msg_dispatcher_tid, sys_msg_dispatcher_func, NULL, &thr_opts); @@ -2505,7 +2505,7 @@ static void init_tracer_template(ErtsTracerNif *tnif) { } static Hash *tracer_hash = NULL; -static erts_smp_rwmtx_t tracer_mtx; +static erts_rwmtx_t tracer_mtx; static ErtsTracerNif * load_tracer_nif(const ErtsTracer tracer) @@ -2545,9 +2545,9 @@ load_tracer_nif(const ErtsTracer tracer) return NULL; } - erts_smp_rwmtx_rwlock(&tracer_mtx); + erts_rwmtx_rwlock(&tracer_mtx); tnif = hash_put(tracer_hash, &tnif_tmpl); - erts_smp_rwmtx_rwunlock(&tracer_mtx); + erts_rwmtx_rwunlock(&tracer_mtx); return tnif; } @@ -2558,14 +2558,14 @@ lookup_tracer_nif(const ErtsTracer tracer) ErtsTracerNif tnif_tmpl; ErtsTracerNif *tnif; tnif_tmpl.module = ERTS_TRACER_MODULE(tracer); - erts_smp_rwmtx_rlock(&tracer_mtx); + erts_rwmtx_rlock(&tracer_mtx); if ((tnif = hash_get(tracer_hash, &tnif_tmpl)) == NULL) { - erts_smp_rwmtx_runlock(&tracer_mtx); + erts_rwmtx_runlock(&tracer_mtx); tnif = load_tracer_nif(tracer); ASSERT(!tnif || tnif->nif_mod); return tnif; } - erts_smp_rwmtx_runlock(&tracer_mtx); + erts_rwmtx_runlock(&tracer_mtx); ASSERT(tnif->nif_mod); return tnif; } @@ -2710,10 +2710,10 @@ send_to_tracer_nif(Process *c_p, ErtsPTabElementCommon *t_p, } if (is_internal_pid(t_p->id)) { /* We have to have at least one lock */ - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks((Process*)t_p) & ERTS_PROC_LOCKS_ALL); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks((Process*)t_p) & ERTS_PROC_LOCKS_ALL); } else { ASSERT(is_internal_port(t_p->id)); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked((Port*)t_p)); + ERTS_LC_ASSERT(erts_lc_is_port_locked((Port*)t_p)); } #endif @@ -2758,15 +2758,15 @@ is_tracer_enabled(Process* c_p, ErtsProcLocks c_p_locks, #if defined(ERTS_ENABLE_LOCK_CHECK) if (c_p) - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == c_p_locks + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == c_p_locks || erts_thr_progress_is_blocking()); if (is_internal_pid(t_p->id)) { /* We have to have at least one lock */ - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks((Process*)t_p) & ERTS_PROC_LOCKS_ALL + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks((Process*)t_p) & ERTS_PROC_LOCKS_ALL || erts_thr_progress_is_blocking()); } else { ASSERT(is_internal_port(t_p->id)); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked((Port*)t_p) + ERTS_LC_ASSERT(erts_lc_is_port_locked((Port*)t_p) || erts_thr_progress_is_blocking()); } #endif @@ -2788,12 +2788,12 @@ is_tracer_enabled(Process* c_p, ErtsProcLocks c_p_locks, if (is_internal_port(t_p->id) || (c_p && c_p->common.id == t_p->id)) { ErtsProcLocks c_p_xlocks = 0; if (is_internal_pid(t_p->id)) { - ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); + ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) & ERTS_PROC_LOCK_MAIN); if (c_p_locks != ERTS_PROC_LOCKS_ALL) { c_p_xlocks = ~c_p_locks & ERTS_PROC_LOCKS_ALL; - if (erts_smp_proc_trylock(c_p, c_p_xlocks) == EBUSY) { - erts_smp_proc_unlock(c_p, c_p_locks & ~ERTS_PROC_LOCK_MAIN); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); + if (erts_proc_trylock(c_p, c_p_xlocks) == EBUSY) { + erts_proc_unlock(c_p, c_p_locks & ~ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR); } } } @@ -2801,7 +2801,7 @@ is_tracer_enabled(Process* c_p, ErtsProcLocks c_p_locks, t_p->trace_flags &= ~TRACEE_FLAGS; if (c_p_xlocks) - erts_smp_proc_unlock(c_p, c_p_xlocks); + erts_proc_unlock(c_p, c_p_xlocks); } return 0; @@ -2959,11 +2959,11 @@ erts_tracer_update(ErtsTracer *tracer, const ErtsTracer new_tracer) static void init_tracer_nif() { - erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; - rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; - rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED; + erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER; + rwmtx_opt.type = ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; + rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED; - erts_smp_rwmtx_init_opt(&tracer_mtx, &rwmtx_opt, "tracer_mtx", NIL, + erts_rwmtx_init_opt(&tracer_mtx, &rwmtx_opt, "tracer_mtx", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG); erts_tracer_nif_clear(); @@ -2973,7 +2973,7 @@ static void init_tracer_nif() int erts_tracer_nif_clear() { - erts_smp_rwmtx_rlock(&tracer_mtx); + erts_rwmtx_rlock(&tracer_mtx); if (!tracer_hash || tracer_hash->nobjs) { HashFunctions hf; @@ -2985,19 +2985,19 @@ int erts_tracer_nif_clear() hf.meta_free = (HMFREE_FUN) erts_free; hf.meta_print = (HMPRINT_FUN) erts_print; - erts_smp_rwmtx_runlock(&tracer_mtx); - erts_smp_rwmtx_rwlock(&tracer_mtx); + erts_rwmtx_runlock(&tracer_mtx); + erts_rwmtx_rwlock(&tracer_mtx); if (tracer_hash) hash_delete(tracer_hash); tracer_hash = hash_new(ERTS_ALC_T_TRACER_NIF, "tracer_hash", 10, hf); - erts_smp_rwmtx_rwunlock(&tracer_mtx); + erts_rwmtx_rwunlock(&tracer_mtx); return 1; } - erts_smp_rwmtx_runlock(&tracer_mtx); + erts_rwmtx_runlock(&tracer_mtx); return 0; } diff --git a/erts/emulator/beam/erl_trace.h b/erts/emulator/beam/erl_trace.h index 7bbd93713d..dbf7ebd2a1 100644 --- a/erts/emulator/beam/erl_trace.h +++ b/erts/emulator/beam/erl_trace.h @@ -148,7 +148,7 @@ erts_bif_trace_epilogue(Process *p, Eterm result, int applying, ErtsTracer meta_tracer); void erts_send_pending_trace_msgs(ErtsSchedulerData *esdp); -#define ERTS_SMP_CHK_PEND_TRACE_MSGS(ESDP) \ +#define ERTS_CHK_PEND_TRACE_MSGS(ESDP) \ do { \ if ((ESDP)->pending_trace_msgs) \ erts_send_pending_trace_msgs((ESDP)); \ diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h index b1ba47df7b..44d8c85867 100644 --- a/erts/emulator/beam/erl_utils.h +++ b/erts/emulator/beam/erl_utils.h @@ -22,15 +22,11 @@ #define ERL_UTILS_H__ #include "sys.h" -#include "erl_smp.h" #include "erl_printf.h" struct process; typedef struct { -#ifdef DEBUG - int smp_api; -#endif union { Uint64 not_atomic; erts_atomic64_t atomic; @@ -38,62 +34,25 @@ typedef struct { } erts_interval_t; void erts_interval_init(erts_interval_t *); -void erts_smp_interval_init(erts_interval_t *); Uint64 erts_step_interval_nob(erts_interval_t *); Uint64 erts_step_interval_relb(erts_interval_t *); -Uint64 erts_smp_step_interval_nob(erts_interval_t *); -Uint64 erts_smp_step_interval_relb(erts_interval_t *); Uint64 erts_ensure_later_interval_nob(erts_interval_t *, Uint64); Uint64 erts_ensure_later_interval_acqb(erts_interval_t *, Uint64); -Uint64 erts_smp_ensure_later_interval_nob(erts_interval_t *, Uint64); -Uint64 erts_smp_ensure_later_interval_acqb(erts_interval_t *, Uint64); -ERTS_GLB_INLINE Uint64 erts_current_interval_nob__(erts_interval_t *); -ERTS_GLB_INLINE Uint64 erts_current_interval_acqb__(erts_interval_t *); ERTS_GLB_INLINE Uint64 erts_current_interval_nob(erts_interval_t *); ERTS_GLB_INLINE Uint64 erts_current_interval_acqb(erts_interval_t *); -ERTS_GLB_INLINE Uint64 erts_smp_current_interval_nob(erts_interval_t *); -ERTS_GLB_INLINE Uint64 erts_smp_current_interval_acqb(erts_interval_t *); #if ERTS_GLB_INLINE_INCL_FUNC_DEF ERTS_GLB_INLINE Uint64 -erts_current_interval_nob__(erts_interval_t *icp) -{ - return (Uint64) erts_atomic64_read_nob(&icp->counter.atomic); -} - -ERTS_GLB_INLINE Uint64 -erts_current_interval_acqb__(erts_interval_t *icp) -{ - return (Uint64) erts_atomic64_read_acqb(&icp->counter.atomic); -} - -ERTS_GLB_INLINE Uint64 erts_current_interval_nob(erts_interval_t *icp) { - ASSERT(!icp->smp_api); - return erts_current_interval_nob__(icp); + return (Uint64) erts_atomic64_read_nob(&icp->counter.atomic); } ERTS_GLB_INLINE Uint64 erts_current_interval_acqb(erts_interval_t *icp) { - ASSERT(!icp->smp_api); - return erts_current_interval_acqb__(icp); -} - -ERTS_GLB_INLINE Uint64 -erts_smp_current_interval_nob(erts_interval_t *icp) -{ - ASSERT(icp->smp_api); - return erts_current_interval_nob__(icp); -} - -ERTS_GLB_INLINE Uint64 -erts_smp_current_interval_acqb(erts_interval_t *icp) -{ - ASSERT(icp->smp_api); - return erts_current_interval_acqb__(icp); + return (Uint64) erts_atomic64_read_acqb(&icp->counter.atomic); } #endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */ diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h index 0b8d78c469..42082f9c3e 100644 --- a/erts/emulator/beam/erl_vm.h +++ b/erts/emulator/beam/erl_vm.h @@ -55,7 +55,7 @@ #define CP_SIZE 1 #define ErtsHAllocLockCheck(P) \ - ERTS_SMP_LC_ASSERT(erts_dbg_check_halloc_lock((P))) + ERTS_LC_ASSERT(erts_dbg_check_halloc_lock((P))) #ifdef DEBUG diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c index 2524ad40cb..c81503f722 100644 --- a/erts/emulator/beam/export.c +++ b/erts/emulator/beam/export.c @@ -41,14 +41,12 @@ static IndexTable export_tables[ERTS_NUM_CODE_IX]; /* Active not locked */ -static erts_smp_atomic_t total_entries_bytes; - -#include "erl_smp.h" +static erts_atomic_t total_entries_bytes; /* This lock protects the staging export table from concurrent access * AND it protects the staging table from becoming active. */ -erts_smp_mtx_t export_staging_lock; +erts_mtx_t export_staging_lock; extern BeamInstr* em_call_error_handler; extern BeamInstr* em_call_traced_function; @@ -125,7 +123,7 @@ export_alloc(struct export_entry* tmpl_e) Export* obj; blob = (struct export_blob*) erts_alloc(ERTS_ALC_T_EXPORT, sizeof(*blob)); - erts_smp_atomic_add_nob(&total_entries_bytes, sizeof(*blob)); + erts_atomic_add_nob(&total_entries_bytes, sizeof(*blob)); obj = &blob->exp; obj->info.op = 0; obj->info.u.gen_bp = NULL; @@ -169,7 +167,7 @@ export_free(struct export_entry* obj) } DBG_TRACE_MFA_P(&blob->exp.info.mfa, "export blob deallocation at %p", &blob->exp); erts_free(ERTS_ALC_T_EXPORT, blob); - erts_smp_atomic_add_nob(&total_entries_bytes, -sizeof(*blob)); + erts_atomic_add_nob(&total_entries_bytes, -sizeof(*blob)); } void @@ -178,9 +176,9 @@ init_export_table(void) HashFunctions f; int i; - erts_smp_mtx_init(&export_staging_lock, "export_tab", NIL, + erts_mtx_init(&export_staging_lock, "export_tab", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); - erts_smp_atomic_init_nob(&total_entries_bytes, 0); + erts_atomic_init_nob(&total_entries_bytes, 0); f.hash = (H_FUN) export_hash; f.cmp = (HCMP_FUN) export_cmp; @@ -369,7 +367,7 @@ int export_table_sz(void) } int export_entries_sz(void) { - return erts_smp_atomic_read_nob(&total_entries_bytes); + return erts_atomic_read_nob(&total_entries_bytes); } Export *export_get(Export *e) { diff --git a/erts/emulator/beam/export.h b/erts/emulator/beam/export.h index 7c812b306c..be6cce07bf 100644 --- a/erts/emulator/beam/export.h +++ b/erts/emulator/beam/export.h @@ -66,9 +66,9 @@ Export *export_get(Export*); void export_start_staging(void); void export_end_staging(int commit); -extern erts_smp_mtx_t export_staging_lock; -#define export_staging_lock() erts_smp_mtx_lock(&export_staging_lock) -#define export_staging_unlock() erts_smp_mtx_unlock(&export_staging_lock) +extern erts_mtx_t export_staging_lock; +#define export_staging_lock() erts_mtx_lock(&export_staging_lock) +#define export_staging_unlock() erts_mtx_unlock(&export_staging_lock) #include "beam_load.h" /* For em_* extern declarations */ #define ExportIsBuiltIn(EntryPtr) \ diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c index 1560844521..0874be7250 100644 --- a/erts/emulator/beam/external.c +++ b/erts/emulator/beam/external.c @@ -616,7 +616,7 @@ erts_make_dist_ext_copy(ErtsDistExternal *edep, Uint xsize) sys_memcpy((void *) ep, (void *) edep, dist_ext_sz); ep += dist_ext_sz; if (new_edep->dep) - erts_smp_refc_inc(&new_edep->dep->refc, 1); + erts_refc_inc(&new_edep->dep->refc, 1); new_edep->extp = ep; new_edep->ext_endp = ep + ext_sz; new_edep->heap_size = -1; @@ -669,12 +669,12 @@ erts_prepare_dist_ext(ErtsDistExternal *edep, edep->flags = 0; edep->dep = dep; if (dep) { - erts_smp_de_rlock(dep); + erts_de_rlock(dep); if (dep->flags & DFLAG_DIST_HDR_ATOM_CACHE) edep->flags |= ERTS_DIST_EXT_DFLAG_HDR; edep->flags |= (dep->connection_id & ERTS_DIST_EXT_CON_ID_MASK); - erts_smp_de_runlock(dep); + erts_de_runlock(dep); } if (ep[1] != DIST_HEADER) { diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h index 97054b2ee4..7cb94ba3d2 100644 --- a/erts/emulator/beam/global.h +++ b/erts/emulator/beam/global.h @@ -85,7 +85,7 @@ struct enif_resource_type_t typedef struct { - erts_smp_mtx_t lock; + erts_mtx_t lock; ErtsMonitor* root; int pending_failed_fire; int is_dying; @@ -183,9 +183,9 @@ typedef struct { void *handle; /* Handle for DLL or SO (for dyn. drivers). */ DE_ProcEntry *procs; /* List of pids that have loaded this driver, or that wait for it to change state */ - erts_smp_refc_t refc; /* Number of ports/processes having + erts_refc_t refc; /* Number of ports/processes having references to the driver */ - erts_smp_atomic32_t port_count; /* Number of ports using the driver */ + erts_atomic32_t port_count; /* Number of ports using the driver */ Uint flags; /* ERL_DE_FL_KILL_PORTS */ int status; /* ERL_DE_xxx */ char *full_path; /* Full path of the driver */ @@ -209,7 +209,7 @@ struct erts_driver_t_ { } version; int flags; DE_Handle *handle; - erts_smp_mtx_t *lock; + erts_mtx_t *lock; ErlDrvEntry *entry; ErlDrvData (*start)(ErlDrvPort port, char *command, SysDriverOpts* opts); void (*stop)(ErlDrvData drv_data); @@ -236,7 +236,7 @@ struct erts_driver_t_ { }; extern erts_driver_t *driver_list; -extern erts_smp_rwmtx_t erts_driver_list_lock; +extern erts_rwmtx_t erts_driver_list_lock; extern void erts_ddll_init(void); extern void erts_ddll_lock_driver(DE_Handle *dh, char *name); @@ -297,7 +297,7 @@ extern Eterm node_cookie; extern Uint display_items; /* no of items to display in traces etc */ extern int erts_backtrace_depth; -extern erts_smp_atomic32_t erts_max_gen_gcs; +extern erts_atomic32_t erts_max_gen_gcs; extern int bif_reductions; /* reductions + fcalls (when doing call_bif) */ extern int stackdump_on_exit; @@ -907,9 +907,9 @@ typedef struct ErtsLiteralArea_ { #define ERTS_LITERAL_AREA_ALLOC_SIZE(N) \ (sizeof(ErtsLiteralArea) + sizeof(Eterm)*((N) - 1)) -extern erts_smp_atomic_t erts_copy_literal_area__; +extern erts_atomic_t erts_copy_literal_area__; #define ERTS_COPY_LITERAL_AREA() \ - ((ErtsLiteralArea *) erts_smp_atomic_read_nob(&erts_copy_literal_area__)) + ((ErtsLiteralArea *) erts_atomic_read_nob(&erts_copy_literal_area__)) extern Process *erts_literal_area_collector; #ifdef ERTS_DIRTY_SCHEDULERS extern Process *erts_dirty_process_code_checker; diff --git a/erts/emulator/beam/index.c b/erts/emulator/beam/index.c index a1f6f54543..93d1111904 100644 --- a/erts/emulator/beam/index.c +++ b/erts/emulator/beam/index.c @@ -98,7 +98,7 @@ index_put_entry(IndexTable* t, void* tmpl) * Do a write barrier here to allow readers to do lock free iteration. * erts_index_num_entries() does matching read barrier. */ - ERTS_SMP_WRITE_MEMORY_BARRIER; + ERTS_THR_WRITE_MEMORY_BARRIER; t->entries++; return p; diff --git a/erts/emulator/beam/index.h b/erts/emulator/beam/index.h index 6c07571df6..30bc6a1121 100644 --- a/erts/emulator/beam/index.h +++ b/erts/emulator/beam/index.h @@ -88,7 +88,7 @@ ERTS_GLB_INLINE int erts_index_num_entries(IndexTable* t) * on tables where entries are never erased. * index_put_entry() does matching write barrier. */ - ERTS_SMP_READ_MEMORY_BARRIER; + ERTS_THR_READ_MEMORY_BARRIER; return ret; } diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c index cc7c717e6d..c8925e159e 100644 --- a/erts/emulator/beam/io.c +++ b/erts/emulator/beam/io.c @@ -62,10 +62,10 @@ extern ErlDrvEntry forker_driver_entry; extern ErlDrvEntry *driver_tab[]; /* table of static drivers, only used during initialization */ erts_driver_t *driver_list; /* List of all drivers, static and dynamic. */ -erts_smp_rwmtx_t erts_driver_list_lock; /* Mutex for driver list */ -static erts_smp_tsd_key_t driver_list_lock_status_key; /*stop recursive locks when calling +erts_rwmtx_t erts_driver_list_lock; /* Mutex for driver list */ +static erts_tsd_key_t driver_list_lock_status_key; /*stop recursive locks when calling driver init */ -static erts_smp_tsd_key_t driver_list_last_error_key; /* Save last DDLL error on a +static erts_tsd_key_t driver_list_last_error_key; /* Save last DDLL error on a per thread basis (for BC interfaces) */ ErtsPTab erts_port erts_align_attribute(ERTS_CACHE_LINE_SIZE); /* The port table */ @@ -115,7 +115,7 @@ static ERTS_INLINE int is_port_ioq_empty(Port *pp) { int res; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(pp)); if (!pp->port_data_lock) res = (pp->ioq.size == 0); else { @@ -137,7 +137,7 @@ Uint erts_port_ioq_size(Port *pp) { int res; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(pp)); if (!pp->port_data_lock) res = pp->ioq.size; else { @@ -205,7 +205,7 @@ dtrace_drvport_str(ErlDrvPort drvport, char *port_buf) static ERTS_INLINE void kill_port(Port *pp) { - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(pp)); ERTS_TRACER_CLEAR(&ERTS_TRACER(pp)); erts_ptab_delete_element(&erts_port, &pp->common); /* Time of death */ erts_port_task_free_port(pp); @@ -219,8 +219,8 @@ erts_lc_is_port_locked(Port *prt) { if (!prt) return 0; - ERTS_SMP_LC_ASSERT(prt->lock); - return erts_smp_lc_mtx_is_locked(prt->lock); + ERTS_LC_ASSERT(prt->lock); + return erts_lc_mtx_is_locked(prt->lock); } #endif @@ -350,7 +350,7 @@ static Port *create_port(char *name, runq = erts_get_runq_current(NULL); else runq = ERTS_RUNQ_IX(0); - erts_smp_atomic_set_nob(&prt->run_queue, (erts_aint_t) runq); + erts_atomic_set_nob(&prt->run_queue, (erts_aint_t) runq); prt->xports = NULL; @@ -373,7 +373,7 @@ static Port *create_port(char *name, prt->common.u.alive.reg = NULL; ERTS_PTMR_INIT(prt); erts_port_task_handle_init(&prt->timeout_task); - erts_smp_atomic_init_nob(&prt->psd, (erts_aint_t) NULL); + erts_atomic_init_nob(&prt->psd, (erts_aint_t) NULL); prt->async_open_port = NULL; prt->drv_data = (SWord) 0; prt->os_pid = -1; @@ -414,7 +414,7 @@ static Port *create_port(char *name, initq(prt); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (erts_port_schedule_all_ops) x_pts_flgs |= ERTS_PTS_FLG_FORCE_SCHED; @@ -423,7 +423,7 @@ static Port *create_port(char *name, x_pts_flgs |= ERTS_PTS_FLG_PARALLELISM; if (x_pts_flgs) - erts_smp_atomic32_read_bor_nob(&prt->sched.flags, x_pts_flgs); + erts_atomic32_read_bor_nob(&prt->sched.flags, x_pts_flgs); erts_atomic32_set_relb(&prt->state, state); return prt; @@ -521,7 +521,7 @@ erts_save_suspend_process_on_port(Port *prt, Process *process) int saved; erts_aint32_t flags; erts_port_task_sched_lock(&prt->sched); - flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + flags = erts_atomic32_read_nob(&prt->sched.flags); saved = (flags & ERTS_PTS_FLGS_BUSY) && !(flags & ERTS_PTS_FLG_EXIT); if (saved) erts_proclist_store_last(&prt->suspended, erts_proclist_create(process)); @@ -565,16 +565,16 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */ erts_mtx_t *driver_lock = NULL; int cprt_flgs = 0; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; - erts_smp_rwmtx_rlock(&erts_driver_list_lock); + erts_rwmtx_rlock(&erts_driver_list_lock); if (!driver) { for (driver = driver_list; driver; driver = driver->next) { if (sys_strcmp(driver->name, name) == 0) break; } if (!driver) { - erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_rwmtx_runlock(&erts_driver_list_lock); ERTS_OPEN_DRIVER_RET(NULL, -3, BADARG); } } @@ -619,7 +619,7 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */ } if (driver == NULL || (driver != &spawn_driver && opts->exit_status)) { - erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_rwmtx_runlock(&erts_driver_list_lock); ERTS_OPEN_DRIVER_RET(NULL, -3, BADARG); } @@ -629,7 +629,7 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */ erts_ddll_increment_port_count(driver->handle); erts_ddll_reference_driver(driver->handle); } - erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_rwmtx_runlock(&erts_driver_list_lock); /* * We'll set up the port before calling the start function, @@ -642,9 +642,9 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */ port = create_port(name, driver, driver_lock, cprt_flgs, pid, &port_errno); if (!port) { if (driver->handle) { - erts_smp_rwmtx_rlock(&erts_driver_list_lock); + erts_rwmtx_rlock(&erts_driver_list_lock); erts_ddll_decrement_port_count(driver->handle); - erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_rwmtx_runlock(&erts_driver_list_lock); erts_ddll_dereference_driver(driver->handle); } if (port_errno) @@ -729,9 +729,9 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */ port->linebuf = NULL; } if (driver->handle != NULL) { - erts_smp_rwmtx_rlock(&erts_driver_list_lock); + erts_rwmtx_rlock(&erts_driver_list_lock); erts_ddll_decrement_port_count(driver->handle); - erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_rwmtx_runlock(&erts_driver_list_lock); } kill_port(port); erts_port_release(port); @@ -770,7 +770,7 @@ driver_create_port(ErlDrvPort creator_port_ix, /* Creating port */ Process *rp; erts_mtx_t *driver_lock = NULL; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; /* Need to be called from a scheduler thread */ if (!erts_get_scheduler_id()) @@ -784,12 +784,12 @@ driver_create_port(ErlDrvPort creator_port_ix, /* Creating port */ if (!rp) return ERTS_INVALID_ERL_DRV_PORT; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(creator_port)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(creator_port)); driver = creator_port->drv_ptr; - erts_smp_rwmtx_rlock(&erts_driver_list_lock); + erts_rwmtx_rlock(&erts_driver_list_lock); if (!erts_ddll_driver_ok(driver->handle)) { - erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_rwmtx_runlock(&erts_driver_list_lock); return ERTS_INVALID_ERL_DRV_PORT; } @@ -800,31 +800,31 @@ driver_create_port(ErlDrvPort creator_port_ix, /* Creating port */ driver_lock = driver->lock; - erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_rwmtx_runlock(&erts_driver_list_lock); /* Inherit parallelism flag from parent */ if (ERTS_PTS_FLG_PARALLELISM & - erts_smp_atomic32_read_nob(&creator_port->sched.flags)) + erts_atomic32_read_nob(&creator_port->sched.flags)) cprt_flgs |= ERTS_CREATE_PORT_FLAG_PARALLELISM; port = create_port(name, driver, driver_lock, cprt_flgs, pid, NULL); if (!port) { if (driver->handle) { - erts_smp_rwmtx_rlock(&erts_driver_list_lock); + erts_rwmtx_rlock(&erts_driver_list_lock); erts_ddll_decrement_port_count(driver->handle); - erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_rwmtx_runlock(&erts_driver_list_lock); erts_ddll_dereference_driver(driver->handle); } return ERTS_INVALID_ERL_DRV_PORT; } - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(port)); - erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_lock(rp, ERTS_PROC_LOCK_LINK); if (ERTS_PROC_IS_EXITING(rp)) { - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); if (driver->handle) { - erts_smp_rwmtx_rlock(&erts_driver_list_lock); + erts_rwmtx_rlock(&erts_driver_list_lock); erts_ddll_decrement_port_count(driver->handle); - erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_rwmtx_runlock(&erts_driver_list_lock); } kill_port(port); erts_port_release(port); @@ -833,7 +833,7 @@ driver_create_port(ErlDrvPort creator_port_ix, /* Creating port */ erts_add_link(&ERTS_P_LINKS(port), LINK_PID, pid); erts_add_link(&ERTS_P_LINKS(rp), LINK_PID, port->common.id); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); if (!driver_lock) { ErtsXPortsList *xplp = xports_list_alloc(); @@ -1226,12 +1226,12 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp) invalid_sched_flags |= ERTS_PTS_FLG_PARALLELISM; if (sp->pre_chk_sched_flags) { - sp->sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + sp->sched_flags = erts_atomic32_read_nob(&prt->sched.flags); if (sp->sched_flags & invalid_sched_flags) return ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS; } - if (erts_smp_port_trylock(prt) == EBUSY) + if (erts_port_trylock(prt) == EBUSY) return ERTS_TRY_IMM_DRV_CALL_BUSY_LOCK; invalid_state = sp->state; @@ -1245,7 +1245,7 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp) if (prof_runnable_ports) erts_port_task_sched_lock(&prt->sched); - act = erts_smp_atomic32_read_nob(&prt->sched.flags); + act = erts_atomic32_read_nob(&prt->sched.flags); do { erts_aint32_t new; @@ -1257,7 +1257,7 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp) } exp = act; new = act | ERTS_PTS_FLG_EXEC_IMM; - act = erts_smp_atomic32_cmpxchg_mb(&prt->sched.flags, new, exp); + act = erts_atomic32_cmpxchg_mb(&prt->sched.flags, new, exp); } while (act != exp); sp->sched_flags = act; @@ -1279,14 +1279,14 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp) profile_runnable_proc(c_p, am_inactive); reds_left_in = ERTS_BIF_REDS_LEFT(c_p); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); } ASSERT(0 <= reds_left_in && reds_left_in <= CONTEXT_REDS); sp->reds_left_in = reds_left_in; prt->reds = CONTEXT_REDS - reds_left_in; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (prof_runnable_ports | IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) { if (prof_runnable_ports && !(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC))) @@ -1324,9 +1324,9 @@ finalize_imm_drv_call(ErtsTryImmDrvCallState *sp) if (prof_runnable_ports) erts_port_task_sched_lock(&prt->sched); - act = erts_smp_atomic32_read_band_mb(&prt->sched.flags, + act = erts_atomic32_read_band_mb(&prt->sched.flags, ~ERTS_PTS_FLG_EXEC_IMM); - ERTS_SMP_LC_ASSERT(act & ERTS_PTS_FLG_EXEC_IMM); + ERTS_LC_ASSERT(act & ERTS_PTS_FLG_EXEC_IMM); if (prof_runnable_ports | IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) { if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) @@ -1341,7 +1341,7 @@ finalize_imm_drv_call(ErtsTryImmDrvCallState *sp) erts_port_release(prt); if (c_p) { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); if (reds != (CONTEXT_REDS - sp->reds_left_in)) { int bump_reds = reds - (CONTEXT_REDS - sp->reds_left_in); @@ -1452,7 +1452,7 @@ port_sched_op_reply(Eterm to, Uint32 *ref_num, Eterm msg, Port* prt) prt); if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } } @@ -1470,7 +1470,7 @@ erts_schedule_proc2port_signal(Process *c_p, int sched_res; if (!refp) { if (c_p) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); } else { ASSERT(c_p); @@ -1491,20 +1491,20 @@ erts_schedule_proc2port_signal(Process *c_p, * otherwise, next receive will *not* work * as expected! */ - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); if (ERTS_PROC_PENDING_EXIT(c_p)) { /* need to exit caller instead */ - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); KILL_CATCHES(c_p); c_p->freason = EXC_EXIT; return ERTS_PORT_OP_CALLER_EXIT; } - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); + ERTS_MSGQ_MV_INQ2PRIVQ(c_p); c_p->msg.save = c_p->msg.last; - erts_smp_proc_unlock(c_p, (ERTS_PROC_LOCKS_MSG_RECEIVE + erts_proc_unlock(c_p, (ERTS_PROC_LOCKS_MSG_RECEIVE | ERTS_PROC_LOCK_MAIN)); } @@ -1520,7 +1520,7 @@ erts_schedule_proc2port_signal(Process *c_p, task_flags); if (c_p) - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); if (sched_res != 0) { if (refp) { @@ -1531,9 +1531,9 @@ erts_schedule_proc2port_signal(Process *c_p, * containing the reference created above... */ ASSERT(c_p); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); JOIN_MESSAGE(c_p); - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); *refp = NIL; } return ERTS_PORT_OP_DROPPED; @@ -1566,14 +1566,14 @@ send_badsig(Port *prt) { ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND; Process* rp; Eterm connected = ERTS_PORT_GET_CONNECTED(prt); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; ERTS_LC_ASSERT(erts_get_scheduler_id()); ASSERT(is_internal_pid(connected)); rp = erts_proc_lookup_raw(connected); if (rp) { - erts_smp_proc_lock(rp, rp_locks); + erts_proc_lock(rp, rp_locks); if (!ERTS_PROC_IS_EXITING(rp)) (void) erts_send_exit_signal(NULL, prt->common.id, @@ -1584,7 +1584,7 @@ send_badsig(Port *prt) { NULL, 0); if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } /* exit sent */ } /* send_badsig */ @@ -1707,7 +1707,7 @@ call_driver_outputv(int bang_op, ErlDrvSizeT size = evp->size; ERTS_MSACC_PUSH_AND_SET_STATE_M(ERTS_MSACC_STATE_PORT); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt) + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt) || ERTS_IS_CRASH_DUMPING); @@ -1765,7 +1765,7 @@ port_sig_outputv(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *s case ERTS_PROC2PORT_SIG_EXEC: /* Execution of a scheduled outputv() call */ - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP) reply = am_badarg; @@ -1821,7 +1821,7 @@ call_driver_output(int bang_op, else { ErtsSchedulerData *esdp = erts_get_scheduler_data(); ERTS_MSACC_PUSH_AND_SET_STATE_M(ERTS_MSACC_STATE_PORT); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt) + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt) || ERTS_IS_CRASH_DUMPING); #ifdef USE_VM_PROBES @@ -1872,7 +1872,7 @@ port_sig_output(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *si case ERTS_PROC2PORT_SIG_EXEC: /* Execution of a scheduled output() call */ - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (state & ERTS_PORT_SFLGS_INVALID_LOOKUP) reply = am_badarg; @@ -2116,7 +2116,7 @@ erts_port_output(Process *c_p, * Assumes caller have checked that port is valid... */ - sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + sched_flags = erts_atomic32_read_nob(&prt->sched.flags); if (sched_flags & (busy_flgs|ERTS_PTS_FLG_EXIT)) return ((sched_flags & ERTS_PTS_FLG_EXIT) ? ERTS_PORT_OP_DROPPED @@ -2495,7 +2495,7 @@ erts_port_output(Process *c_p, } if (!(flags & ERTS_PORT_SIG_FLG_FORCE)) { - sched_flags = erts_smp_atomic32_read_acqb(&prt->sched.flags); + sched_flags = erts_atomic32_read_acqb(&prt->sched.flags); if (!(sched_flags & ERTS_PTS_FLG_BUSY_PORT)) { if (async_nosuspend) erts_port_task_tmp_handle_detach(ns_pthp); @@ -2720,9 +2720,9 @@ set_port_connected(int bang_op, Process *rp = erts_proc_lookup_raw(connect); if (!rp) return ERTS_PORT_OP_DROPPED; - erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_lock(rp, ERTS_PROC_LOCK_LINK); if (ERTS_PROC_IS_EXITING(rp)) { - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); return ERTS_PORT_OP_DROPPED; } @@ -2734,7 +2734,7 @@ set_port_connected(int bang_op, ERTS_PORT_SET_CONNECTED(prt, connect); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); if (IS_TRACED_FL(prt, F_TRACE_PORTS)) trace_port(prt, am_getting_linked, connect); @@ -2927,7 +2927,7 @@ port_link_failure(Eterm port_id, Eterm linker) trace_proc(NULL, 0, rp, am_getting_unlinked, port_id); } if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } } } @@ -3013,7 +3013,7 @@ port_monitor_failure(Eterm port_id, Eterm origin, Eterm ref_DOWN) * caller has never seen it yet. */ erts_queue_monitor_message(origin_p, &p_locks, ref_DOWN, am_port, port_id, am_noproc); - erts_smp_proc_unlock(origin_p, p_locks); + erts_proc_unlock(origin_p, p_locks); } /* Origin wants to monitor port Prt. State contains possible error, which has @@ -3041,7 +3041,7 @@ port_monitor(Port *prt, erts_aint32_t state, Eterm origin, erts_add_monitor(&ERTS_P_MONITORS(prt), MON_TARGET, ref, origin, name_or_nil); - erts_smp_proc_unlock(origin_p, p_locks); + erts_proc_unlock(origin_p, p_locks); } else { failure: port_monitor_failure(prt->common.id, origin, ref); @@ -3132,7 +3132,7 @@ port_demonitor_failure(Eterm port_id, Eterm origin, Eterm ref) erts_destroy_monitor(mon1); } - erts_smp_proc_unlock(origin_p, rp_locks); + erts_proc_unlock(origin_p, rp_locks); } /* Origin wants to demonitor port Prt. State contains possible error, which has @@ -3162,7 +3162,7 @@ port_demonitor(Port *port, erts_aint32_t state, Eterm origin, Eterm ref) } } if (origin_p) { /* when origin is dying, it won't be found */ - erts_smp_proc_unlock(origin_p, p_locks); + erts_proc_unlock(origin_p, p_locks); } } else { port_demonitor_failure(port->common.id, origin, ref); @@ -3249,10 +3249,10 @@ init_ack_send_reply(Port *port, Eterm resp) if (!is_internal_port(resp)) { Process *rp = erts_proc_lookup_raw(port->async_open_port->to); - erts_smp_proc_lock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_lock(rp, ERTS_PROC_LOCK_LINK); erts_remove_link(&ERTS_P_LINKS(port), port->async_open_port->to); erts_remove_link(&ERTS_P_LINKS(rp), port->common.id); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); } port_sched_op_reply(port->async_open_port->to, port->async_open_port->ref, @@ -3316,9 +3316,9 @@ void erts_init_io(int port_tab_size, { ErlDrvEntry** dp; UWord common_element_size; - erts_smp_rwmtx_opt_t drv_list_rwmtx_opts = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; - drv_list_rwmtx_opts.type = ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; - drv_list_rwmtx_opts.lived = ERTS_SMP_RWMTX_LONG_LIVED; + erts_rwmtx_opt_t drv_list_rwmtx_opts = ERTS_RWMTX_OPT_DEFAULT_INITER; + drv_list_rwmtx_opts.type = ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ; + drv_list_rwmtx_opts.lived = ERTS_RWMTX_LONG_LIVED; erts_atomic64_init_nob(&bytes_in, 0); erts_atomic64_init_nob(&bytes_out, 0); @@ -3343,12 +3343,12 @@ void erts_init_io(int port_tab_size, else if (port_tab_size < ERTS_MIN_PORTS) port_tab_size = ERTS_MIN_PORTS; - erts_smp_rwmtx_init_opt(&erts_driver_list_lock, &drv_list_rwmtx_opts, "driver_list", NIL, + erts_rwmtx_init_opt(&erts_driver_list_lock, &drv_list_rwmtx_opts, "driver_list", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO); driver_list = NULL; - erts_smp_tsd_key_create(&driver_list_lock_status_key, + erts_tsd_key_create(&driver_list_lock_status_key, "erts_driver_list_lock_status_key"); - erts_smp_tsd_key_create(&driver_list_last_error_key, + erts_tsd_key_create(&driver_list_last_error_key, "erts_driver_list_last_error_key"); erts_ptab_init_table(&erts_port, @@ -3363,8 +3363,8 @@ void erts_init_io(int port_tab_size, sys_init_io(); - erts_smp_tsd_set(driver_list_lock_status_key, (void *) 1); - erts_smp_rwmtx_rwlock(&erts_driver_list_lock); + erts_tsd_set(driver_list_lock_status_key, (void *) 1); + erts_rwmtx_rwlock(&erts_driver_list_lock); init_driver(&fd_driver, &fd_driver_entry, NULL); init_driver(&vanilla_driver, &vanilla_driver_entry, NULL); @@ -3376,8 +3376,8 @@ void erts_init_io(int port_tab_size, for (dp = driver_tab; *dp != NULL; dp++) erts_add_driver_entry(*dp, NULL, 1); - erts_smp_tsd_set(driver_list_lock_status_key, NULL); - erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); + erts_tsd_set(driver_list_lock_status_key, NULL); + erts_rwmtx_rwunlock(&erts_driver_list_lock); } #if defined(ERTS_ENABLE_LOCK_COUNT) @@ -3649,7 +3649,7 @@ deliver_result(Port *prt, Eterm sender, Eterm pid, Eterm res) ErtsProcLocks rp_locks = 0; int scheduler = erts_get_scheduler_id() != 0; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; ASSERT(!prt || prt->common.id == sender); ERTS_LC_ASSERT(!prt || erts_lc_is_port_locked(prt)); @@ -3680,7 +3680,7 @@ deliver_result(Port *prt, Eterm sender, Eterm pid, Eterm res) erts_queue_message(rp, rp_locks, mp, tuple, sender); if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); if (!scheduler) erts_proc_dec_refc(rp); @@ -3711,8 +3711,8 @@ static void deliver_read_message(Port* prt, erts_aint32_t state, Eterm to, int scheduler = erts_get_scheduler_id() != 0; int trace_send = IS_TRACED_FL(prt, F_TRACE_SEND); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_CHK_NO_PROC_LOCKS; need = 3 + 3 + 2*hlen; @@ -3779,7 +3779,7 @@ static void deliver_read_message(Port* prt, erts_aint32_t state, Eterm to, ERL_MESSAGE_TOKEN(mp) = am_undefined; erts_queue_message(rp, rp_locks, mp, tuple, prt->common.id); if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); if (!scheduler) erts_proc_dec_refc(rp); } @@ -3814,7 +3814,7 @@ static void flush_linebuf_messages(Port *prt, erts_aint32_t state) LineBufContext lc; int ret; - ERTS_SMP_LC_ASSERT(!prt || erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(!prt || erts_lc_is_port_locked(prt)); if (!prt) return; @@ -3858,8 +3858,8 @@ deliver_vec_message(Port* prt, /* Port */ erts_aint32_t state; int trace_send = IS_TRACED_FL(prt, F_TRACE_SEND); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_CHK_NO_PROC_LOCKS; /* * Check arguments for validity. @@ -3950,7 +3950,7 @@ deliver_vec_message(Port* prt, /* Port */ ERL_MESSAGE_TOKEN(mp) = am_undefined; erts_queue_message(rp, rp_locks, mp, tuple, prt->common.id); - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); if (!scheduler) erts_proc_dec_refc(rp); } @@ -3985,8 +3985,8 @@ static void flush_port(Port *p) { int fpe_was_unmasked; - ERTS_SMP_CHK_NO_PROC_LOCKS; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p)); + ERTS_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(p)); if (p->drv_ptr->flush != NULL) { ERTS_MSACC_PUSH_STATE_M(); @@ -4038,8 +4038,8 @@ terminate_port(Port *prt) erts_aint32_t state; ErtsPrtSD *psd; - ERTS_SMP_CHK_NO_PROC_LOCKS; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); ASSERT(!ERTS_P_LINKS(prt)); ASSERT(!ERTS_P_MONITORS(prt)); @@ -4091,9 +4091,9 @@ terminate_port(Port *prt) trace_port_send(prt, connected_id, am_closed, 1); if(drv->handle != NULL) { - erts_smp_rwmtx_rlock(&erts_driver_list_lock); + erts_rwmtx_rlock(&erts_driver_list_lock); erts_ddll_decrement_port_count(drv->handle); - erts_smp_rwmtx_runlock(&erts_driver_list_lock); + erts_rwmtx_runlock(&erts_driver_list_lock); } stopq(prt); /* clear queue memory */ if(prt->linebuf != NULL){ @@ -4103,7 +4103,7 @@ terminate_port(Port *prt) erts_cleanup_port_data(prt); - psd = (ErtsPrtSD *) erts_smp_atomic_read_nob(&prt->psd); + psd = (ErtsPrtSD *) erts_atomic_read_nob(&prt->psd); if (psd) erts_free(ERTS_ALC_T_PRTSD, psd); @@ -4116,7 +4116,7 @@ terminate_port(Port *prt) * port has been removed from the port table (in kill_port()). */ if ((state & ERTS_PORT_SFLG_HALT) - && (erts_smp_atomic32_dec_read_nob(&erts_halt_progress) == 0)) { + && (erts_atomic32_dec_read_nob(&erts_halt_progress) == 0)) { erts_port_release(prt); /* We will exit and never return */ erts_flush_async_exit(erts_halt_code, ""); } @@ -4144,7 +4144,7 @@ static void sweep_one_monitor(ErtsMonitor *mon, void *vpsc) goto done; } rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); if (rmon == NULL) { goto done; } @@ -4218,7 +4218,7 @@ static void sweep_one_link(ErtsLink *lnk, void *vpsc) 0); if (xres >= 0) { if (rp_locks & ERTS_PROC_LOCKS_XSIG_SEND) { - erts_smp_proc_unlock(rp, ERTS_PROC_LOCKS_XSIG_SEND); + erts_proc_unlock(rp, ERTS_PROC_LOCKS_XSIG_SEND); rp_locks &= ~ERTS_PROC_LOCKS_XSIG_SEND; } /* We didn't exit the process and it is traced */ @@ -4228,7 +4228,7 @@ static void sweep_one_link(ErtsLink *lnk, void *vpsc) erts_destroy_link(rlnk); } - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } } erts_destroy_link(lnk); @@ -4263,7 +4263,7 @@ port_fire_one_monitor(ErtsMonitor *mon, void *ctx0) UnUseTmpHeapNoproc(3); rmon = erts_remove_monitor(&ERTS_P_MONITORS(origin), mon->ref); - erts_smp_proc_unlock(origin, origin_locks); + erts_proc_unlock(origin, origin_locks); if (rmon) { erts_destroy_monitor(rmon); @@ -4289,8 +4289,8 @@ erts_deliver_port_exit(Port *prt, Eterm from, Eterm reason, int send_closed, Eterm modified_reason; erts_aint32_t state, set_state_flags; - ERTS_SMP_CHK_NO_PROC_LOCKS; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); modified_reason = (reason == am_kill) ? am_killed : reason; @@ -4651,7 +4651,7 @@ port_sig_control(Port *prt, prt); if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); goto done; } } @@ -4704,7 +4704,7 @@ erts_port_control(Process* c_p, int copy; ErtsProc2PortSigData *sigdp; - sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + sched_flags = erts_atomic32_read_nob(&prt->sched.flags); if (sched_flags & ERTS_PTS_FLG_EXIT) return ERTS_PORT_OP_BADARG; @@ -5016,11 +5016,11 @@ port_sig_call(Port *prt, prt); if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); goto done; } if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } } } @@ -5054,7 +5054,7 @@ erts_port_call(Process* c_p, erts_aint32_t sched_flags; ErtsProc2PortSigData *sigdp; - sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); + sched_flags = erts_atomic32_read_nob(&prt->sched.flags); if (sched_flags & ERTS_PTS_FLG_EXIT) { return ERTS_PORT_OP_BADARG; } @@ -5272,7 +5272,7 @@ port_sig_info(Port *prt, prt); } if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } return ERTS_PORT_REDS_INFO; } @@ -5341,7 +5341,7 @@ typedef struct { Uint sched_id; Eterm pid; Uint32 refn[ERTS_REF_NUMBERS]; - erts_smp_atomic32_t refc; + erts_atomic32_t refc; } ErtsIOBytesReq; static void @@ -5391,10 +5391,10 @@ reply_io_bytes(void *vreq) if (req->sched_id == sched_id) rp_locks &= ~ERTS_PROC_LOCK_MAIN; if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } - if (erts_smp_atomic32_dec_read_nob(&req->refc) == 0) + if (erts_atomic32_dec_read_nob(&req->refc) == 0) erts_free(ERTS_ALC_T_IOB_REQ, req); } @@ -5417,7 +5417,7 @@ erts_request_io_bytes(Process *c_p) req->refn[0] = refn[0]; req->refn[1] = refn[1]; req->refn[2] = refn[2]; - erts_smp_atomic32_init_nob(&req->refc, + erts_atomic32_init_nob(&req->refc, (erts_aint32_t) erts_no_schedulers); if (erts_no_schedulers > 1) @@ -5509,14 +5509,14 @@ set_busy_port(ErlDrvPort dprt, int on) DTRACE_CHARBUF(port_str, 16); #endif - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; prt = erts_drvport2port(dprt); if (prt == ERTS_INVALID_ERL_DRV_PORT) return; if (on) { - flags = erts_smp_atomic32_read_bor_acqb(&prt->sched.flags, + flags = erts_atomic32_read_bor_acqb(&prt->sched.flags, ERTS_PTS_FLG_BUSY_PORT); if (flags & ERTS_PTS_FLG_BUSY_PORT) return; /* Already busy */ @@ -5532,7 +5532,7 @@ set_busy_port(ErlDrvPort dprt, int on) } #endif } else { - flags = erts_smp_atomic32_read_band_acqb(&prt->sched.flags, + flags = erts_atomic32_read_band_acqb(&prt->sched.flags, ~ERTS_PTS_FLG_BUSY_PORT); if (!(flags & ERTS_PTS_FLG_BUSY_PORT)) return; /* Already non-busy */ @@ -5626,7 +5626,7 @@ int get_port_flags(ErlDrvPort ix) if (prt == ERTS_INVALID_ERL_DRV_PORT) return 0; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); flags = 0; if (state & ERTS_PORT_SFLG_BINARY_IO) @@ -5642,8 +5642,8 @@ void erts_raw_port_command(Port* p, byte* buf, Uint len) int fpe_was_unmasked; ERTS_MSACC_PUSH_STATE_M(); - ERTS_SMP_CHK_NO_PROC_LOCKS; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p)); + ERTS_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(p)); if (len > (Uint) INT_MAX) erts_exit(ERTS_ABORT_EXIT, @@ -5672,10 +5672,10 @@ int async_ready(Port *p, void* data) { int need_free = 1; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (p) { - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(p)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(p)); if (p->drv_ptr->ready_async != NULL) { ERTS_MSACC_PUSH_AND_SET_STATE_M(ERTS_MSACC_STATE_PORT); #ifdef USE_VM_PROBES @@ -5860,8 +5860,8 @@ void driver_report_exit(ErlDrvPort ix, int status) if (prt == ERTS_INVALID_ERL_DRV_PORT) return; - ERTS_SMP_CHK_NO_PROC_LOCKS; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); pid = ERTS_PORT_GET_CONNECTED(prt); ASSERT(is_internal_pid(pid)); @@ -5884,7 +5884,7 @@ void driver_report_exit(ErlDrvPort ix, int status) ERL_MESSAGE_TOKEN(mp) = am_undefined; erts_queue_message(rp, rp_locks, mp, tuple, prt->common.id); - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); if (!scheduler) erts_proc_dec_refc(rp); } @@ -6524,7 +6524,7 @@ driver_deliver_term(Port *prt, Eterm to, ErlDrvTermData* data, int len) } if (rp) { if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); if (!scheduler) erts_proc_dec_refc(rp); } @@ -6565,12 +6565,12 @@ deliver_term_check_port(ErlDrvTermData port_id, Eterm *connected_p, done: if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) { - ERTS_SMP_LC_ASSERT(!prt || !erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(!prt || !erts_lc_is_port_locked(prt)); erts_thr_progress_unmanaged_continue(dhndl); ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore); } else if (res == 1) { - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); *trace_prt = prt; } return res; @@ -6598,13 +6598,13 @@ driver_output_term(ErlDrvPort drvport, ErlDrvTermData* data, int len) erts_aint32_t state; Port* prt; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; /* NOTE! It *not* safe to access 'drvport' from unmanaged threads. */ prt = erts_drvport2port_state(drvport, &state); if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; /* invalid (dead) */ - ERTS_SMP_CHK_NO_PROC_LOCKS; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_CHK_NO_PROC_LOCKS; + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (state & ERTS_PORT_SFLG_CLOSING) return 0; @@ -6641,14 +6641,14 @@ driver_send_term(ErlDrvPort drvport, * internal data representation for ErlDrvPort. */ Port* prt = NULL; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (erts_thr_progress_is_managed_thread()) { erts_aint32_t state; prt = erts_drvport2port_state(drvport, &state); if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; /* invalid (dead) */ - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (state & ERTS_PORT_SFLG_CLOSING) return 0; } @@ -6668,11 +6668,11 @@ int driver_output_binary(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, Port* prt = erts_drvport2port_state(ix, &state); ErtsSchedulerData *esdp = erts_get_scheduler_data(); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (state & ERTS_PORT_SFLG_CLOSING) return 0; @@ -6707,12 +6707,12 @@ int driver_output2(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, Port* prt = erts_drvport2port_state(ix, &state); ErtsSchedulerData *esdp = erts_get_scheduler_data(); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (state & ERTS_PORT_SFLG_CLOSING) return 0; @@ -6746,7 +6746,7 @@ int driver_output2(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, int driver_output(ErlDrvPort ix, char* buf, ErlDrvSizeT len) { - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; return driver_output2(ix, NULL, 0, buf, len); } @@ -6762,7 +6762,7 @@ int driver_outputv(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, erts_aint32_t state; ErtsSchedulerData *esdp = erts_get_scheduler_data(); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; ASSERT(vec->size >= skip); if (vec->size <= skip) @@ -6773,7 +6773,7 @@ int driver_outputv(ErlDrvPort ix, char* hbuf, ErlDrvSizeT hlen, if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (state & ERTS_PORT_SFLG_CLOSING) return 0; @@ -6995,7 +6995,7 @@ static void driver_monitor_lock_pdl(Port *p) { /* Now we either have the port lock or the port_data_lock */ ERTS_LC_ASSERT(!p->port_data_lock || erts_lc_mtx_is_locked(&(p->port_data_lock->mtx))); - ERTS_SMP_LC_ASSERT(p->port_data_lock + ERTS_LC_ASSERT(p->port_data_lock || erts_lc_is_port_locked(p)); } @@ -7003,7 +7003,7 @@ static void driver_monitor_unlock_pdl(Port *p) { /* We should either have the port lock or the port_data_lock */ ERTS_LC_ASSERT(!p->port_data_lock || erts_lc_mtx_is_locked(&(p->port_data_lock->mtx))); - ERTS_SMP_LC_ASSERT(p->port_data_lock + ERTS_LC_ASSERT(p->port_data_lock || erts_lc_is_port_locked(p)); if (p->port_data_lock) { driver_pdl_unlock(p->port_data_lock); @@ -7468,7 +7468,7 @@ int driver_set_timer(ErlDrvPort ix, unsigned long t) { Port* prt = erts_drvport2port(ix); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; @@ -7485,7 +7485,7 @@ int driver_cancel_timer(ErlDrvPort ix) Port* prt = erts_drvport2port(ix); if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); erts_cancel_port_timer(prt); return 0; } @@ -7496,11 +7496,11 @@ driver_read_timer(ErlDrvPort ix, unsigned long* t) Port* prt = erts_drvport2port(ix); Sint64 left; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); left = erts_read_port_timer(prt); if (left < 0) @@ -7515,7 +7515,7 @@ int driver_get_now(ErlDrvNowData *now_data) { Uint mega,secs,micro; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (now_data == NULL) { return -1; @@ -7589,7 +7589,7 @@ static int do_driver_monitor_process(Port *prt, erts_add_monitor(&ERTS_P_MONITORS(prt), MON_ORIGIN, ref, rp->common.id, NIL); erts_add_monitor(&ERTS_P_MONITORS(rp), MON_TARGET, ref, prt->common.id, NIL); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); erts_ref_to_driver_monitor(ref,monitor); return 0; } @@ -7613,7 +7613,7 @@ int driver_monitor_process(ErlDrvPort drvport, /* Now (in SMP) we should have either the port lock (if we have a scheduler) or the port data lock (if we're a driver thread) */ - ERTS_SMP_LC_ASSERT((sched != NULL || prt->port_data_lock)); + ERTS_LC_ASSERT((sched != NULL || prt->port_data_lock)); ret = do_driver_monitor_process(prt,process,monitor); DRV_MONITOR_UNLOCK_PDL(prt); return ret; @@ -7648,7 +7648,7 @@ static int do_driver_demonitor_process(Port *prt, const ErlDrvMonitor *monitor) if (rp) { ErtsMonitor *rmon; rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), ref); - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); if (rmon != NULL) { erts_destroy_monitor(rmon); } @@ -7671,7 +7671,7 @@ int driver_demonitor_process(ErlDrvPort drvport, /* Now we should have either the port lock (if we have a scheduler) or the port data lock (if we're a driver thread) */ - ERTS_SMP_LC_ASSERT((sched != NULL || prt->port_data_lock)); + ERTS_LC_ASSERT((sched != NULL || prt->port_data_lock)); ret = do_driver_demonitor_process(prt,monitor); DRV_MONITOR_UNLOCK_PDL(prt); return ret; @@ -7712,7 +7712,7 @@ ErlDrvTermData driver_get_monitored_process(ErlDrvPort drvport, /* Now we should have either the port lock (if we have a scheduler) or the port data lock (if we're a driver thread) */ - ERTS_SMP_LC_ASSERT((sched != NULL || prt->port_data_lock)); + ERTS_LC_ASSERT((sched != NULL || prt->port_data_lock)); ret = do_driver_get_monitored_process(prt,monitor); DRV_MONITOR_UNLOCK_PDL(prt); return ret; @@ -7733,7 +7733,7 @@ void erts_fire_port_monitor(Port *prt, Eterm ref) int fpe_was_unmasked; ERTS_MSACC_PUSH_STATE_M(); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); ASSERT(prt->drv_ptr != NULL); DRV_MONITOR_LOCK_PDL(prt); if (erts_lookup_monitor(ERTS_P_MONITORS(prt), ref) == NULL) { @@ -7780,11 +7780,11 @@ driver_failure_term(ErlDrvPort ix, Eterm term, int eof) erts_aint32_t state; Port* prt = erts_drvport2port_state(ix, &state); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if (prt->async_open_port) init_ack_send_reply(prt, prt->common.id); @@ -7819,7 +7819,7 @@ int driver_exit(ErlDrvPort ix, int err) ErtsLink *lnk, *rlnk = NULL; Eterm connected; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; @@ -7833,7 +7833,7 @@ int driver_exit(ErlDrvPort ix, int err) lnk = erts_remove_link(&ERTS_P_LINKS(prt), connected); if (rp) - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK); + erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK); if (rlnk != NULL) { erts_destroy_link(rlnk); @@ -7887,7 +7887,7 @@ ErlDrvTermData driver_mk_atom(char* string) sys_strlen(string), ERTS_ATOM_ENC_LATIN1, 1); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; return (ErlDrvTermData) am; } @@ -7896,27 +7896,27 @@ ErlDrvTermData driver_mk_port(ErlDrvPort ix) Port* prt = erts_drvport2port(ix); if (prt == ERTS_INVALID_ERL_DRV_PORT) return (ErlDrvTermData) NIL; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); return (ErlDrvTermData) prt->common.id; } ErlDrvTermData driver_connected(ErlDrvPort ix) { Port* prt = erts_drvport2port(ix); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (prt == ERTS_INVALID_ERL_DRV_PORT) return NIL; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); return ERTS_PORT_GET_CONNECTED(prt); } ErlDrvTermData driver_caller(ErlDrvPort ix) { Port* prt = erts_drvport2port(ix); - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (prt == ERTS_INVALID_ERL_DRV_PORT) return NIL; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); return prt->caller; } @@ -7925,20 +7925,20 @@ int driver_lock_driver(ErlDrvPort ix) Port* prt = erts_drvport2port(ix); DE_Handle* dh; - ERTS_SMP_CHK_NO_PROC_LOCKS; + ERTS_CHK_NO_PROC_LOCKS; if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; - erts_smp_rwmtx_rwlock(&erts_driver_list_lock); + erts_rwmtx_rwlock(&erts_driver_list_lock); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); if ((dh = (DE_Handle*)prt->drv_ptr->handle ) == NULL) { - erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); + erts_rwmtx_rwunlock(&erts_driver_list_lock); return -1; } erts_ddll_lock_driver(dh, prt->drv_ptr->name); - erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); + erts_rwmtx_rwunlock(&erts_driver_list_lock); return 0; } @@ -7946,9 +7946,9 @@ int driver_lock_driver(ErlDrvPort ix) static int maybe_lock_driver_list(void) { void *rec_lock; - rec_lock = erts_smp_tsd_get(driver_list_lock_status_key); + rec_lock = erts_tsd_get(driver_list_lock_status_key); if (rec_lock == 0) { - erts_smp_rwmtx_rwlock(&erts_driver_list_lock); + erts_rwmtx_rwlock(&erts_driver_list_lock); return 1; } return 0; @@ -7956,7 +7956,7 @@ static int maybe_lock_driver_list(void) static void maybe_unlock_driver_list(int doit) { if (doit) { - erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); + erts_rwmtx_rwunlock(&erts_driver_list_lock); } } /* @@ -7979,7 +7979,7 @@ void *driver_dl_open(char * path) { void *ptr; int res; - int *last_error_p = erts_smp_tsd_get(driver_list_last_error_key); + int *last_error_p = erts_tsd_get(driver_list_last_error_key); int locked = maybe_lock_driver_list(); if ((res = erts_sys_ddll_open(path, &ptr, NULL)) == 0) { maybe_unlock_driver_list(locked); @@ -7987,7 +7987,7 @@ void *driver_dl_open(char * path) } else { if (!last_error_p) { last_error_p = erts_alloc(ERTS_ALC_T_DDLL_ERRCODES, sizeof(int)); - erts_smp_tsd_set(driver_list_last_error_key,last_error_p); + erts_tsd_set(driver_list_last_error_key,last_error_p); } *last_error_p = res; maybe_unlock_driver_list(locked); @@ -7999,7 +7999,7 @@ void *driver_dl_sym(void * handle, char *func_name) { void *ptr; int res; - int *last_error_p = erts_smp_tsd_get(driver_list_lock_status_key); + int *last_error_p = erts_tsd_get(driver_list_lock_status_key); int locked = maybe_lock_driver_list(); if ((res = erts_sys_ddll_sym(handle, func_name, &ptr)) == 0) { maybe_unlock_driver_list(locked); @@ -8007,7 +8007,7 @@ void *driver_dl_sym(void * handle, char *func_name) } else { if (!last_error_p) { last_error_p = erts_alloc(ERTS_ALC_T_DDLL_ERRCODES, sizeof(int)); - erts_smp_tsd_set(driver_list_lock_status_key,last_error_p); + erts_tsd_set(driver_list_lock_status_key,last_error_p); } *last_error_p = res; maybe_unlock_driver_list(locked); @@ -8027,7 +8027,7 @@ int driver_dl_close(void *handle) char *driver_dl_error(void) { char *res; - int *last_error_p = erts_smp_tsd_get(driver_list_lock_status_key); + int *last_error_p = erts_tsd_get(driver_list_lock_status_key); int locked = maybe_lock_driver_list(); res = erts_ddll_error((last_error_p != NULL) ? (*last_error_p) : ERL_DE_ERROR_UNSPECIFIED); maybe_unlock_driver_list(locked); @@ -8230,7 +8230,7 @@ void erts_destroy_driver(erts_driver_t *drv) { if (drv->lock) { - erts_smp_mtx_destroy(drv->lock); + erts_mtx_destroy(drv->lock); erts_free(ERTS_ALC_T_DRIVER_LOCK, drv->lock); } erts_free(ERTS_ALC_T_DRIVER, drv); @@ -8243,7 +8243,7 @@ erts_destroy_driver(erts_driver_t *drv) void add_driver_entry(ErlDrvEntry *drv){ void *rec_lock; - rec_lock = erts_smp_tsd_get(driver_list_lock_status_key); + rec_lock = erts_tsd_get(driver_list_lock_status_key); /* * Ignore result of erts_add_driver_entry, the init is not * allowed to fail when drivers are added by drivers. @@ -8257,7 +8257,7 @@ int erts_add_driver_entry(ErlDrvEntry *de, DE_Handle *handle, int driver_list_lo int res; if (!driver_list_locked) { - erts_smp_rwmtx_rwlock(&erts_driver_list_lock); + erts_rwmtx_rwlock(&erts_driver_list_lock); } dp->next = driver_list; @@ -8268,7 +8268,7 @@ int erts_add_driver_entry(ErlDrvEntry *de, DE_Handle *handle, int driver_list_lo driver_list = dp; if (!driver_list_locked) { - erts_smp_tsd_set(driver_list_lock_status_key, (void *) 1); + erts_tsd_set(driver_list_lock_status_key, (void *) 1); } res = init_driver(dp, de, handle); @@ -8285,8 +8285,8 @@ int erts_add_driver_entry(ErlDrvEntry *de, DE_Handle *handle, int driver_list_lo } if (!driver_list_locked) { - erts_smp_tsd_set(driver_list_lock_status_key, NULL); - erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); + erts_tsd_set(driver_list_lock_status_key, NULL); + erts_rwmtx_rwunlock(&erts_driver_list_lock); } return res; } @@ -8297,9 +8297,9 @@ int remove_driver_entry(ErlDrvEntry *drv) erts_driver_t *dp; void *rec_lock; - rec_lock = erts_smp_tsd_get(driver_list_lock_status_key); + rec_lock = erts_tsd_get(driver_list_lock_status_key); if (rec_lock == NULL) { - erts_smp_rwmtx_rwlock(&erts_driver_list_lock); + erts_rwmtx_rwlock(&erts_driver_list_lock); } dp = driver_list; while (dp && dp->entry != drv) @@ -8307,7 +8307,7 @@ int remove_driver_entry(ErlDrvEntry *drv) if (dp) { if (dp->handle) { if (rec_lock == NULL) { - erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); + erts_rwmtx_rwunlock(&erts_driver_list_lock); } return -1; } @@ -8321,12 +8321,12 @@ int remove_driver_entry(ErlDrvEntry *drv) } erts_destroy_driver(dp); if (rec_lock == NULL) { - erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); + erts_rwmtx_rwunlock(&erts_driver_list_lock); } return 1; } if (rec_lock == NULL) { - erts_smp_rwmtx_rwunlock(&erts_driver_list_lock); + erts_rwmtx_rwunlock(&erts_driver_list_lock); } return 0; } diff --git a/erts/emulator/beam/module.c b/erts/emulator/beam/module.c index 7987cb2eb5..baeec115ea 100644 --- a/erts/emulator/beam/module.c +++ b/erts/emulator/beam/module.c @@ -39,9 +39,9 @@ static IndexTable module_tables[ERTS_NUM_CODE_IX]; -erts_smp_rwmtx_t the_old_code_rwlocks[ERTS_NUM_CODE_IX]; +erts_rwmtx_t the_old_code_rwlocks[ERTS_NUM_CODE_IX]; -static erts_smp_atomic_t tot_module_bytes; +static erts_atomic_t tot_module_bytes; /* SMP note: Active module table lookup and current module instance can be * read without any locks. Old module instances are protected by @@ -49,8 +49,6 @@ static erts_smp_atomic_t tot_module_bytes; * Staging table is protected by the "code_ix lock". */ -#include "erl_smp.h" - void module_info(fmtfn_t to, void *to_arg) { index_info(to, to_arg, &module_tables[erts_active_code_ix()]); @@ -84,7 +82,7 @@ void erts_module_instance_init(struct erl_module_instance* modi) static Module* module_alloc(Module* tmpl) { Module* obj = (Module*) erts_alloc(ERTS_ALC_T_MODULE, sizeof(Module)); - erts_smp_atomic_add_nob(&tot_module_bytes, sizeof(Module)); + erts_atomic_add_nob(&tot_module_bytes, sizeof(Module)); obj->module = tmpl->module; obj->slot.index = -1; @@ -98,7 +96,7 @@ static Module* module_alloc(Module* tmpl) static void module_free(Module* mod) { erts_free(ERTS_ALC_T_MODULE, mod); - erts_smp_atomic_add_nob(&tot_module_bytes, -sizeof(Module)); + erts_atomic_add_nob(&tot_module_bytes, -sizeof(Module)); } void init_module_table(void) @@ -120,10 +118,10 @@ void init_module_table(void) } for (i=0; i<ERTS_NUM_CODE_IX; i++) { - erts_smp_rwmtx_init(&the_old_code_rwlocks[i], "old_code", make_small(i), + erts_rwmtx_init(&the_old_code_rwlocks[i], "old_code", make_small(i), ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); } - erts_smp_atomic_init_nob(&tot_module_bytes, 0); + erts_atomic_init_nob(&tot_module_bytes, 0); } @@ -159,14 +157,14 @@ static Module* put_module(Eterm mod, IndexTable* mod_tab) oldsz = index_table_sz(mod_tab); res = (Module*) index_put_entry(mod_tab, (void*) &e); newsz = index_table_sz(mod_tab); - erts_smp_atomic_add_nob(&tot_module_bytes, (newsz - oldsz)); + erts_atomic_add_nob(&tot_module_bytes, (newsz - oldsz)); return res; } Module* erts_put_module(Eterm mod) { - ERTS_SMP_LC_ASSERT(erts_initialized == 0 + ERTS_LC_ASSERT(erts_initialized == 0 || erts_has_code_write_permission()); return put_module(mod, &module_tables[erts_staging_code_ix()]); @@ -184,7 +182,7 @@ int module_code_size(ErtsCodeIndex code_ix) int module_table_sz(void) { - return erts_smp_atomic_read_nob(&tot_module_bytes); + return erts_atomic_read_nob(&tot_module_bytes); } #ifdef DEBUG @@ -233,7 +231,7 @@ void module_start_staging(void) copy_module(dst_mod, src_mod); } newsz = index_table_sz(dst); - erts_smp_atomic_add_nob(&tot_module_bytes, (newsz - oldsz)); + erts_atomic_add_nob(&tot_module_bytes, (newsz - oldsz)); entries_at_start_staging = dst->entries; IF_DEBUG(dbg_load_code_ix = erts_staging_code_ix()); @@ -251,7 +249,7 @@ void module_end_staging(int commit) oldsz = index_table_sz(tab); index_erase_latest_from(tab, entries_at_start_staging); newsz = index_table_sz(tab); - erts_smp_atomic_add_nob(&tot_module_bytes, (newsz - oldsz)); + erts_atomic_add_nob(&tot_module_bytes, (newsz - oldsz)); } IF_DEBUG(dbg_load_code_ix = -1); diff --git a/erts/emulator/beam/module.h b/erts/emulator/beam/module.h index 9d258d5dbf..9a81e6035b 100644 --- a/erts/emulator/beam/module.h +++ b/erts/emulator/beam/module.h @@ -72,29 +72,29 @@ int erts_is_old_code_rlocked(ErtsCodeIndex); #if ERTS_GLB_INLINE_INCL_FUNC_DEF -extern erts_smp_rwmtx_t the_old_code_rwlocks[ERTS_NUM_CODE_IX]; +extern erts_rwmtx_t the_old_code_rwlocks[ERTS_NUM_CODE_IX]; ERTS_GLB_INLINE void erts_rwlock_old_code(ErtsCodeIndex code_ix) { - erts_smp_rwmtx_rwlock(&the_old_code_rwlocks[code_ix]); + erts_rwmtx_rwlock(&the_old_code_rwlocks[code_ix]); } ERTS_GLB_INLINE void erts_rwunlock_old_code(ErtsCodeIndex code_ix) { - erts_smp_rwmtx_rwunlock(&the_old_code_rwlocks[code_ix]); + erts_rwmtx_rwunlock(&the_old_code_rwlocks[code_ix]); } ERTS_GLB_INLINE void erts_rlock_old_code(ErtsCodeIndex code_ix) { - erts_smp_rwmtx_rlock(&the_old_code_rwlocks[code_ix]); + erts_rwmtx_rlock(&the_old_code_rwlocks[code_ix]); } ERTS_GLB_INLINE void erts_runlock_old_code(ErtsCodeIndex code_ix) { - erts_smp_rwmtx_runlock(&the_old_code_rwlocks[code_ix]); + erts_rwmtx_runlock(&the_old_code_rwlocks[code_ix]); } #ifdef ERTS_ENABLE_LOCK_CHECK ERTS_GLB_INLINE int erts_is_old_code_rlocked(ErtsCodeIndex code_ix) { - return erts_smp_lc_rwmtx_is_rlocked(&the_old_code_rwlocks[code_ix]); + return erts_lc_rwmtx_is_rlocked(&the_old_code_rwlocks[code_ix]); } #endif diff --git a/erts/emulator/beam/register.c b/erts/emulator/beam/register.c index aec8777e8b..92a0854ad3 100644 --- a/erts/emulator/beam/register.c +++ b/erts/emulator/beam/register.c @@ -38,14 +38,14 @@ static Hash process_reg; #define REG_HASH(term) ((HashValue) atom_val(term)) -static erts_smp_rwmtx_t regtab_rwmtx; +static erts_rwmtx_t regtab_rwmtx; -#define reg_try_read_lock() erts_smp_rwmtx_tryrlock(®tab_rwmtx) -#define reg_try_write_lock() erts_smp_rwmtx_tryrwlock(®tab_rwmtx) -#define reg_read_lock() erts_smp_rwmtx_rlock(®tab_rwmtx) -#define reg_write_lock() erts_smp_rwmtx_rwlock(®tab_rwmtx) -#define reg_read_unlock() erts_smp_rwmtx_runlock(®tab_rwmtx) -#define reg_write_unlock() erts_smp_rwmtx_rwunlock(®tab_rwmtx) +#define reg_try_read_lock() erts_rwmtx_tryrlock(®tab_rwmtx) +#define reg_try_write_lock() erts_rwmtx_tryrwlock(®tab_rwmtx) +#define reg_read_lock() erts_rwmtx_rlock(®tab_rwmtx) +#define reg_write_lock() erts_rwmtx_rwlock(®tab_rwmtx) +#define reg_read_unlock() erts_rwmtx_runlock(®tab_rwmtx) +#define reg_write_unlock() erts_rwmtx_rwunlock(®tab_rwmtx) static ERTS_INLINE void reg_safe_read_lock(Process *c_p, ErtsProcLocks *c_p_locks) @@ -63,7 +63,7 @@ reg_safe_read_lock(Process *c_p, ErtsProcLocks *c_p_locks) } /* Release process locks in order to avoid deadlock */ - erts_smp_proc_unlock(c_p, *c_p_locks); + erts_proc_unlock(c_p, *c_p_locks); *c_p_locks = 0; } @@ -86,7 +86,7 @@ reg_safe_write_lock(Process *c_p, ErtsProcLocks *c_p_locks) } /* Release process locks in order to avoid deadlock */ - erts_smp_proc_unlock(c_p, *c_p_locks); + erts_proc_unlock(c_p, *c_p_locks); *c_p_locks = 0; } @@ -139,11 +139,11 @@ static void reg_free(RegProc *obj) void init_register_table(void) { HashFunctions f; - erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER; - rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ; - rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED; + erts_rwmtx_opt_t rwmtx_opt = ERTS_RWMTX_OPT_DEFAULT_INITER; + rwmtx_opt.type = ERTS_RWMTX_TYPE_FREQUENT_READ; + rwmtx_opt.lived = ERTS_RWMTX_LONG_LIVED; - erts_smp_rwmtx_init_opt(®tab_rwmtx, &rwmtx_opt, "reg_tab", NIL, + erts_rwmtx_init_opt(®tab_rwmtx, &rwmtx_opt, "reg_tab", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); f.hash = (H_FUN) reg_hash; @@ -173,7 +173,7 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id) Process *proc = NULL; Port *port = NULL; RegProc r, *rp; - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p); + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p); if (is_not_atom(name) || name == am_undefined) return res; @@ -183,7 +183,7 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id) else { if (is_not_internal_pid(id) && is_not_internal_port(id)) return res; - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN); if (is_internal_port(id)) { port = erts_id2port(id); if (!port) @@ -196,7 +196,7 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id) reg_safe_write_lock(proc, &proc_locks); if (proc && !proc_locks) - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } if (is_internal_pid(id)) { @@ -211,7 +211,7 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id) } else { ASSERT(!INVALID_PORT(port, id)); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(port)); r.pt = port; if (r.pt->common.u.alive.reg) goto done; @@ -246,8 +246,8 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id) erts_port_release(port); if (c_p != proc) { if (proc) - erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(proc, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } return res; } @@ -271,12 +271,12 @@ erts_whereis_name_to_id(Process *c_p, Eterm name) ErtsProcLocks c_p_locks = 0; if (c_p) { c_p_locks = ERTS_PROC_LOCK_MAIN; - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p); + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(c_p); } reg_safe_read_lock(c_p, &c_p_locks); if (c_p && !c_p_locks) - erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); hval = REG_HASH(name); ix = hval % process_reg.size; @@ -378,7 +378,7 @@ erts_whereis_name(Process *c_p, *proc = rp->p; else { if (need_locks) - erts_smp_proc_unlock(rp->p, need_locks); + erts_proc_unlock(rp->p, need_locks); *proc = NULL; } } @@ -402,11 +402,11 @@ erts_whereis_name(Process *c_p, pending_port = NULL; } - if (erts_smp_port_trylock(rp->pt) == EBUSY) { + if (erts_port_trylock(rp->pt) == EBUSY) { Eterm id = rp->pt->common.id; /* id read only... */ /* Unlock all locks, acquire port lock, and restart... */ if (current_c_p_locks) { - erts_smp_proc_unlock(c_p, current_c_p_locks); + erts_proc_unlock(c_p, current_c_p_locks); current_c_p_locks = 0; } reg_read_unlock(); @@ -414,14 +414,14 @@ erts_whereis_name(Process *c_p, goto restart; } } - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(rp->pt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(rp->pt)); } *port = rp->pt; } } if (c_p && !current_c_p_locks) - erts_smp_proc_lock(c_p, c_p_locks); + erts_proc_lock(c_p, c_p_locks); if (pending_port) erts_port_release(pending_port); @@ -477,7 +477,7 @@ int erts_unregister_name(Process *c_p, /* Unregister current process name */ ASSERT(c_p); if (current_c_p_locks != c_p_locks) { - erts_smp_proc_lock(c_p, c_p_locks); + erts_proc_lock(c_p, c_p_locks); current_c_p_locks = c_p_locks; } if (c_p->common.u.alive.reg) { @@ -498,11 +498,11 @@ int erts_unregister_name(Process *c_p, port = NULL; } - if (erts_smp_port_trylock(rp->pt) == EBUSY) { + if (erts_port_trylock(rp->pt) == EBUSY) { Eterm id = rp->pt->common.id; /* id read only... */ /* Unlock all locks, acquire port lock, and restart... */ if (current_c_p_locks) { - erts_smp_proc_unlock(c_p, current_c_p_locks); + erts_proc_unlock(c_p, current_c_p_locks); current_c_p_locks = 0; } reg_write_unlock(); @@ -513,13 +513,13 @@ int erts_unregister_name(Process *c_p, } ASSERT(rp->pt == port); - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(port)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(port)); rp->pt->common.u.alive.reg = NULL; if (IS_TRACED_FL(port, F_TRACE_PORTS)) { if (current_c_p_locks) { - erts_smp_proc_unlock(c_p, current_c_p_locks); + erts_proc_unlock(c_p, current_c_p_locks); current_c_p_locks = 0; } trace_port(port, am_unregister, r.name); @@ -540,7 +540,7 @@ int erts_unregister_name(Process *c_p, rp->p, am_unregister, r.name); } if (rp->p != c_p) { - erts_smp_proc_unlock(rp->p, ERTS_PROC_LOCK_MAIN); + erts_proc_unlock(rp->p, ERTS_PROC_LOCK_MAIN); } } hash_erase(&process_reg, (void*) &r); @@ -555,11 +555,11 @@ int erts_unregister_name(Process *c_p, erts_port_release(port); } if (c_prt) { - erts_smp_port_lock(c_prt); + erts_port_lock(c_prt); } } if (c_p && !current_c_p_locks) { - erts_smp_proc_lock(c_p, c_p_locks); + erts_proc_lock(c_p, c_p_locks); } return res; } @@ -603,10 +603,10 @@ BIF_RETTYPE registered_0(BIF_ALIST_0) HashBucket **bucket; ErtsProcLocks proc_locks = ERTS_PROC_LOCK_MAIN; - ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(BIF_P); + ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(BIF_P); reg_safe_read_lock(BIF_P, &proc_locks); if (!proc_locks) - erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); + erts_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN); bucket = process_reg.bucket; diff --git a/erts/emulator/beam/safe_hash.c b/erts/emulator/beam/safe_hash.c index 527c9efeca..ac9ebd4714 100644 --- a/erts/emulator/beam/safe_hash.c +++ b/erts/emulator/beam/safe_hash.c @@ -62,7 +62,7 @@ static ERTS_INLINE int align_up_pow2(int val) */ static void rehash(SafeHash* h, int grow_limit) { - if (erts_smp_atomic_xchg_acqb(&h->is_rehashing, 1) != 0) { + if (erts_atomic_xchg_acqb(&h->is_rehashing, 1) != 0) { return; /* already in progress */ } if (h->grow_limit == grow_limit) { @@ -77,7 +77,7 @@ static void rehash(SafeHash* h, int grow_limit) sys_memzero(new_tab, bytes); for (i=0; i<SAFE_HASH_LOCK_CNT; i++) { /* stop all traffic */ - erts_smp_mtx_lock(&h->lock_vec[i].mtx); + erts_mtx_lock(&h->lock_vec[i].mtx); } h->tab = new_tab; @@ -95,12 +95,12 @@ static void rehash(SafeHash* h, int grow_limit) } for (i=0; i<SAFE_HASH_LOCK_CNT; i++) { - erts_smp_mtx_unlock(&h->lock_vec[i].mtx); + erts_mtx_unlock(&h->lock_vec[i].mtx); } erts_free(h->type, (void *) old_tab); } /*else already done */ - erts_smp_atomic_set_relb(&h->is_rehashing, 0); + erts_atomic_set_relb(&h->is_rehashing, 0); } @@ -115,7 +115,7 @@ void safe_hash_get_info(SafeHashInfo *hi, SafeHash *h) int objects = 0; for (lock_ix=0; lock_ix<SAFE_HASH_LOCK_CNT; lock_ix++) { - erts_smp_mtx_lock(&h->lock_vec[lock_ix].mtx); + erts_mtx_lock(&h->lock_vec[lock_ix].mtx); size = h->size_mask + 1; for (i = lock_ix; i < size; i += SAFE_HASH_LOCK_CNT) { int depth = 0; @@ -128,7 +128,7 @@ void safe_hash_get_info(SafeHashInfo *hi, SafeHash *h) if (depth > max_depth) max_depth = depth; } - erts_smp_mtx_unlock(&h->lock_vec[lock_ix].mtx); + erts_mtx_unlock(&h->lock_vec[lock_ix].mtx); } hi->name = h->name; @@ -145,9 +145,9 @@ int safe_hash_table_sz(SafeHash *h) int i, size; for(i=0; h->name[i]; i++); i++; - erts_smp_mtx_lock(&h->lock_vec[0].mtx); /* any lock will do to read size */ + erts_mtx_lock(&h->lock_vec[0].mtx); /* any lock will do to read size */ size = h->size_mask + 1; - erts_smp_mtx_unlock(&h->lock_vec[0].mtx); + erts_mtx_unlock(&h->lock_vec[0].mtx); return sizeof(SafeHash) + size*sizeof(SafeHashBucket*) + i; } @@ -168,10 +168,10 @@ SafeHash* safe_hash_init(ErtsAlcType_t type, SafeHash* h, char* name, erts_lock_ h->name = name; h->fun = fun; set_size(h,size); - erts_smp_atomic_init_nob(&h->is_rehashing, 0); - erts_smp_atomic_init_nob(&h->nitems, 0); + erts_atomic_init_nob(&h->is_rehashing, 0); + erts_atomic_init_nob(&h->nitems, 0); for (i=0; i<SAFE_HASH_LOCK_CNT; i++) { - erts_smp_mtx_init(&h->lock_vec[i].mtx, "safe_hash", NIL, + erts_mtx_init(&h->lock_vec[i].mtx, "safe_hash", NIL, flags); } return h; @@ -185,8 +185,8 @@ void* safe_hash_get(SafeHash* h, void* tmpl) { SafeHashValue hval = h->fun.hash(tmpl); SafeHashBucket* b; - erts_smp_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx; - erts_smp_mtx_lock(lock); + erts_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx; + erts_mtx_lock(lock); b = h->tab[hval & h->size_mask]; while(b != NULL) { @@ -194,7 +194,7 @@ void* safe_hash_get(SafeHash* h, void* tmpl) break; b = b->next; } - erts_smp_mtx_unlock(lock); + erts_mtx_unlock(lock); return (void*) b; } @@ -207,13 +207,13 @@ void* safe_hash_put(SafeHash* h, void* tmpl) SafeHashValue hval = h->fun.hash(tmpl); SafeHashBucket* b; SafeHashBucket** head; - erts_smp_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx; - erts_smp_mtx_lock(lock); + erts_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx; + erts_mtx_lock(lock); head = &h->tab[hval & h->size_mask]; b = *head; while(b != NULL) { if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0)) { - erts_smp_mtx_unlock(lock); + erts_mtx_unlock(lock); return b; } b = b->next; @@ -224,8 +224,8 @@ void* safe_hash_put(SafeHash* h, void* tmpl) b->next = *head; *head = b; grow_limit = h->grow_limit; - erts_smp_mtx_unlock(lock); - if (erts_smp_atomic_inc_read_nob(&h->nitems) > grow_limit) { + erts_mtx_unlock(lock); + if (erts_atomic_inc_read_nob(&h->nitems) > grow_limit) { rehash(h, grow_limit); } return (void*) b; @@ -240,22 +240,22 @@ void* safe_hash_erase(SafeHash* h, void* tmpl) SafeHashValue hval = h->fun.hash(tmpl); SafeHashBucket* b; SafeHashBucket** prevp; - erts_smp_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx; - erts_smp_mtx_lock(lock); + erts_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx; + erts_mtx_lock(lock); prevp = &h->tab[hval & h->size_mask]; b = *prevp; while(b != NULL) { if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0)) { *prevp = b->next; - erts_smp_mtx_unlock(lock); - erts_smp_atomic_dec_nob(&h->nitems); + erts_mtx_unlock(lock); + erts_atomic_dec_nob(&h->nitems); h->fun.free((void*)b); return tmpl; } prevp = &b->next; b = b->next; } - erts_smp_mtx_unlock(lock); + erts_mtx_unlock(lock); return NULL; } @@ -280,7 +280,7 @@ void erts_lcnt_enable_hash_lock_count(SafeHash *h, erts_lock_flags_t flags, int int i; for(i = 0; i < SAFE_HASH_LOCK_CNT; i++) { - erts_smp_mtx_t *lock = &h->lock_vec[i].mtx; + erts_mtx_t *lock = &h->lock_vec[i].mtx; if(enable) { erts_lcnt_install_new_lock_info(&lock->lcnt, "safe_hash", NIL, diff --git a/erts/emulator/beam/safe_hash.h b/erts/emulator/beam/safe_hash.h index dde48a6de8..259c58cff9 100644 --- a/erts/emulator/beam/safe_hash.h +++ b/erts/emulator/beam/safe_hash.h @@ -73,11 +73,11 @@ typedef struct int size_mask; /* (RW) Number of slots - 1 */ SafeHashBucket** tab; /* (RW) Vector of bucket pointers (objects) */ int grow_limit; /* (RW) Threshold for growing table */ - erts_smp_atomic_t nitems; /* (A) Number of items in table */ - erts_smp_atomic_t is_rehashing; /* (A) Table rehashing in progress */ + erts_atomic_t nitems; /* (A) Number of items in table */ + erts_atomic_t is_rehashing; /* (A) Table rehashing in progress */ union { - erts_smp_mtx_t mtx; + erts_mtx_t mtx; byte __cache_line__[64]; }lock_vec[SAFE_HASH_LOCK_CNT]; diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h index 848f104871..615f44364b 100644 --- a/erts/emulator/beam/sys.h +++ b/erts/emulator/beam/sys.h @@ -218,8 +218,6 @@ __decl_noreturn void __noreturn erl_assert_error(const char* expr, const char *f # define ASSERT(e) ((void) 1) #endif -# define ERTS_SMP_ASSERT(e) ASSERT(e) - /* ERTS_UNDEF can be used to silence false warnings about * "variable may be used uninitialized" while keeping the variable * marked as undefined by valgrind. @@ -463,23 +461,23 @@ typedef union { #include "erl_lock_check.h" -/* needed by erl_smp.h */ +/* needed by erl_threads.h */ int erts_send_warning_to_logger_str_nogl(char *); -#include "erl_smp.h" +#include "erl_threads.h" #ifdef ERTS_WANT_BREAK_HANDLING -extern erts_smp_atomic32_t erts_break_requested; +extern erts_atomic32_t erts_break_requested; # define ERTS_BREAK_REQUESTED \ - ((int) erts_smp_atomic32_read_nob(&erts_break_requested)) + ((int) erts_atomic32_read_nob(&erts_break_requested)) void erts_do_break_handling(void); #endif -extern erts_smp_atomic32_t erts_writing_erl_crash_dump; +extern erts_atomic32_t erts_writing_erl_crash_dump; extern erts_tsd_key_t erts_is_crash_dumping_key; #define ERTS_SOMEONE_IS_CRASH_DUMPING \ - ((int) erts_smp_atomic32_read_mb(&erts_writing_erl_crash_dump)) + ((int) erts_atomic32_read_mb(&erts_writing_erl_crash_dump)) #define ERTS_IS_CRASH_DUMPING \ ((int) (SWord) erts_tsd_get(erts_is_crash_dumping_key)) @@ -620,7 +618,7 @@ int erts_send_info_to_logger_nogl(erts_dsprintf_buf_t *); int erts_send_warning_to_logger_nogl(erts_dsprintf_buf_t *); int erts_send_error_to_logger_nogl(erts_dsprintf_buf_t *); int erts_send_info_to_logger_str_nogl(char *); -/* needed by erl_smp.h (declared above) +/* needed by erl_threads.h (declared above) int erts_send_warning_to_logger_str_nogl(char *); */ int erts_send_error_to_logger_str_nogl(char *); @@ -1002,140 +1000,6 @@ erts_refc_read(erts_refc_t *refcp, erts_aint_t min_val) #endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ -typedef erts_smp_atomic_t erts_smp_refc_t; - -ERTS_GLB_INLINE void erts_smp_refc_init(erts_smp_refc_t *refcp, erts_aint_t val); -ERTS_GLB_INLINE void erts_smp_refc_inc(erts_smp_refc_t *refcp, erts_aint_t min_val); -ERTS_GLB_INLINE erts_aint_t erts_smp_refc_inc_unless(erts_smp_refc_t *refcp, - erts_aint_t unless_val, - erts_aint_t min_val); -ERTS_GLB_INLINE erts_aint_t erts_smp_refc_inctest(erts_smp_refc_t *refcp, - erts_aint_t min_val); -ERTS_GLB_INLINE void erts_smp_refc_dec(erts_smp_refc_t *refcp, erts_aint_t min_val); -ERTS_GLB_INLINE erts_aint_t erts_smp_refc_dectest(erts_smp_refc_t *refcp, - erts_aint_t min_val); -ERTS_GLB_INLINE void erts_smp_refc_add(erts_smp_refc_t *refcp, erts_aint_t diff, - erts_aint_t min_val); -ERTS_GLB_INLINE erts_aint_t erts_smp_refc_read(erts_smp_refc_t *refcp, - erts_aint_t min_val); - -#if ERTS_GLB_INLINE_INCL_FUNC_DEF - -ERTS_GLB_INLINE void -erts_smp_refc_init(erts_smp_refc_t *refcp, erts_aint_t val) -{ - erts_smp_atomic_init_nob((erts_smp_atomic_t *) refcp, val); -} - -ERTS_GLB_INLINE void -erts_smp_refc_inc(erts_smp_refc_t *refcp, erts_aint_t min_val) -{ -#ifdef ERTS_REFC_DEBUG - erts_aint_t val = erts_smp_atomic_inc_read_nob((erts_smp_atomic_t *) refcp); - if (val < min_val) - erts_exit(ERTS_ABORT_EXIT, - "erts_smp_refc_inc(): Bad refc found (refc=%ld < %ld)!\n", - val, min_val); -#else - erts_smp_atomic_inc_nob((erts_smp_atomic_t *) refcp); -#endif -} - -ERTS_GLB_INLINE erts_aint_t -erts_smp_refc_inc_unless(erts_smp_refc_t *refcp, - erts_aint_t unless_val, - erts_aint_t min_val) -{ - erts_aint_t val = erts_smp_atomic_read_nob((erts_smp_atomic_t *) refcp); - while (1) { - erts_aint_t exp, new; -#ifdef ERTS_REFC_DEBUG - if (val < 0) - erts_exit(ERTS_ABORT_EXIT, - "erts_smp_refc_inc_unless(): Bad refc found (refc=%ld < %ld)!\n", - val, min_val); -#endif - if (val == unless_val) - return val; - new = val + 1; - exp = val; - val = erts_smp_atomic_cmpxchg_nob((erts_smp_atomic_t *) refcp, new, exp); - if (val == exp) - return new; - } -} - - -ERTS_GLB_INLINE erts_aint_t -erts_smp_refc_inctest(erts_smp_refc_t *refcp, erts_aint_t min_val) -{ - erts_aint_t val = erts_smp_atomic_inc_read_nob((erts_smp_atomic_t *) refcp); -#ifdef ERTS_REFC_DEBUG - if (val < min_val) - erts_exit(ERTS_ABORT_EXIT, - "erts_smp_refc_inctest(): Bad refc found (refc=%ld < %ld)!\n", - val, min_val); -#endif - return val; -} - -ERTS_GLB_INLINE void -erts_smp_refc_dec(erts_smp_refc_t *refcp, erts_aint_t min_val) -{ -#ifdef ERTS_REFC_DEBUG - erts_aint_t val = erts_smp_atomic_dec_read_nob((erts_smp_atomic_t *) refcp); - if (val < min_val) - erts_exit(ERTS_ABORT_EXIT, - "erts_smp_refc_dec(): Bad refc found (refc=%ld < %ld)!\n", - val, min_val); -#else - erts_smp_atomic_dec_nob((erts_smp_atomic_t *) refcp); -#endif -} - -ERTS_GLB_INLINE erts_aint_t -erts_smp_refc_dectest(erts_smp_refc_t *refcp, erts_aint_t min_val) -{ - erts_aint_t val = erts_smp_atomic_dec_read_nob((erts_smp_atomic_t *) refcp); -#ifdef ERTS_REFC_DEBUG - if (val < min_val) - erts_exit(ERTS_ABORT_EXIT, - "erts_smp_refc_dectest(): Bad refc found (refc=%ld < %ld)!\n", - val, min_val); -#endif - return val; -} - -ERTS_GLB_INLINE void -erts_smp_refc_add(erts_smp_refc_t *refcp, erts_aint_t diff, erts_aint_t min_val) -{ -#ifdef ERTS_REFC_DEBUG - erts_aint_t val = erts_smp_atomic_add_read_nob((erts_smp_atomic_t *) refcp, diff); - if (val < min_val) - erts_exit(ERTS_ABORT_EXIT, - "erts_smp_refc_add(%ld): Bad refc found (refc=%ld < %ld)!\n", - diff, val, min_val); -#else - erts_smp_atomic_add_nob((erts_smp_atomic_t *) refcp, diff); -#endif -} - -ERTS_GLB_INLINE erts_aint_t -erts_smp_refc_read(erts_smp_refc_t *refcp, erts_aint_t min_val) -{ - erts_aint_t val = erts_smp_atomic_read_nob((erts_smp_atomic_t *) refcp); -#ifdef ERTS_REFC_DEBUG - if (val < min_val) - erts_exit(ERTS_ABORT_EXIT, - "erts_smp_refc_read(): Bad refc found (refc=%ld < %ld)!\n", - val, min_val); -#endif - return val; -} - -#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */ - - #ifdef ERTS_ENABLE_KERNEL_POLL extern int erts_use_kernel_poll; #endif diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c index 3713b756e8..dcb1468d60 100644 --- a/erts/emulator/beam/utils.c +++ b/erts/emulator/beam/utils.c @@ -43,7 +43,6 @@ #include "erl_printf.h" #include "erl_threads.h" #include "erl_lock_count.h" -#include "erl_smp.h" #include "erl_time.h" #include "erl_thr_progress.h" #include "erl_thr_queue.h" @@ -3527,7 +3526,7 @@ store_external_or_ref_(Uint **hpp, ErlOffHeap* oh, Eterm ns) if (is_external_header(*from_hp)) { ExternalThing *etp = (ExternalThing *) from_hp; ASSERT(is_external(ns)); - erts_smp_refc_inc(&etp->node->refc, 2); + erts_refc_inc(&etp->node->refc, 2); } else if (is_ordinary_ref_thing(from_hp)) return make_internal_ref(to_hp); @@ -4628,18 +4627,6 @@ void erts_interval_init(erts_interval_t *icp) { erts_atomic64_init_nob(&icp->counter.atomic, 0); -#ifdef DEBUG - icp->smp_api = 0; -#endif -} - -void -erts_smp_interval_init(erts_interval_t *icp) -{ - erts_interval_init(icp); -#ifdef DEBUG - icp->smp_api = 1; -#endif } static ERTS_INLINE Uint64 @@ -4679,56 +4666,24 @@ ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic) Uint64 erts_step_interval_nob(erts_interval_t *icp) { - ASSERT(!icp->smp_api); return step_interval_nob(icp); } Uint64 erts_step_interval_relb(erts_interval_t *icp) { - ASSERT(!icp->smp_api); - return step_interval_relb(icp); -} - -Uint64 -erts_smp_step_interval_nob(erts_interval_t *icp) -{ - ASSERT(icp->smp_api); - return step_interval_nob(icp); -} - -Uint64 -erts_smp_step_interval_relb(erts_interval_t *icp) -{ - ASSERT(icp->smp_api); return step_interval_relb(icp); } Uint64 erts_ensure_later_interval_nob(erts_interval_t *icp, Uint64 ic) { - ASSERT(!icp->smp_api); return ensure_later_interval_nob(icp, ic); } Uint64 erts_ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic) { - ASSERT(!icp->smp_api); - return ensure_later_interval_acqb(icp, ic); -} - -Uint64 -erts_smp_ensure_later_interval_nob(erts_interval_t *icp, Uint64 ic) -{ - ASSERT(icp->smp_api); - return ensure_later_interval_nob(icp, ic); -} - -Uint64 -erts_smp_ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic) -{ - ASSERT(icp->smp_api); return ensure_later_interval_acqb(icp, ic); } diff --git a/erts/emulator/hipe/hipe_bif0.c b/erts/emulator/hipe/hipe_bif0.c index 94bc563fda..05663648e9 100644 --- a/erts/emulator/hipe/hipe_bif0.c +++ b/erts/emulator/hipe/hipe_bif0.c @@ -1000,7 +1000,7 @@ BIF_RETTYPE hipe_bifs_set_native_address_in_fe_2(BIF_ALIST_2) BIF_ERROR(BIF_P, BADARG); fe->native_address = native_address; - if (erts_smp_refc_dectest(&fe->refc, 0) == 0) + if (erts_refc_dectest(&fe->refc, 0) == 0) erts_erase_fun_entry(fe); BIF_RET(am_true); } @@ -1048,7 +1048,7 @@ static struct { * they create a new stub for the mfa, which forces locking. * XXX: Redesign apply et al to avoid those updates. */ - erts_smp_rwmtx_t lock; + erts_rwmtx_t lock; } hipe_mfa_info_table; Hash mod2mfa_tab; /* map from module atom to list of hipe_mfa_info */ @@ -1129,28 +1129,28 @@ struct hipe_ref { static inline void hipe_mfa_info_table_init_lock(void) { - erts_smp_rwmtx_init(&hipe_mfa_info_table.lock, "hipe_mfait_lock", NIL, + erts_rwmtx_init(&hipe_mfa_info_table.lock, "hipe_mfait_lock", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); } static inline void hipe_mfa_info_table_rlock(void) { - erts_smp_rwmtx_rlock(&hipe_mfa_info_table.lock); + erts_rwmtx_rlock(&hipe_mfa_info_table.lock); } static inline void hipe_mfa_info_table_runlock(void) { - erts_smp_rwmtx_runlock(&hipe_mfa_info_table.lock); + erts_rwmtx_runlock(&hipe_mfa_info_table.lock); } static inline void hipe_mfa_info_table_rwlock(void) { - erts_smp_rwmtx_rwlock(&hipe_mfa_info_table.lock); + erts_rwmtx_rwlock(&hipe_mfa_info_table.lock); } static inline void hipe_mfa_info_table_rwunlock(void) { - erts_smp_rwmtx_rwunlock(&hipe_mfa_info_table.lock); + erts_rwmtx_rwunlock(&hipe_mfa_info_table.lock); } static ERTS_INLINE @@ -1636,7 +1636,7 @@ void hipe_purge_refs(struct hipe_ref* first_ref, Eterm caller_module, { struct hipe_ref* ref = first_ref; - ERTS_SMP_LC_ASSERT(is_blocking == erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(is_blocking == erts_thr_progress_is_blocking()); while (ref) { struct hipe_ref* free_ref = ref; @@ -1682,9 +1682,9 @@ void hipe_purge_sdescs(struct hipe_sdesc* first_sdesc, Eterm module, { struct hipe_sdesc* sdesc = first_sdesc; - ERTS_SMP_LC_ASSERT(is_blocking == erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(is_blocking == erts_thr_progress_is_blocking()); - ERTS_SMP_LC_ASSERT(is_blocking); /*XXX Fix safe sdesc destruction */ + ERTS_LC_ASSERT(is_blocking); /*XXX Fix safe sdesc destruction */ while (sdesc) { struct hipe_sdesc* free_sdesc = sdesc; @@ -1702,7 +1702,7 @@ void hipe_purge_module(Module* modp, int is_blocking) { ASSERT(modp); - ERTS_SMP_LC_ASSERT(is_blocking == erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(is_blocking == erts_thr_progress_is_blocking()); DBG_TRACE_MFA(make_atom(modp->module), 0, 0, "hipe_purge_module"); @@ -1711,7 +1711,7 @@ void hipe_purge_module(Module* modp, int is_blocking) * Remove all hipe_ref's (external calls) from the old module instance */ if (modp->old.hipe_code->first_hipe_ref) { - ERTS_SMP_LC_ASSERT(is_blocking); + ERTS_LC_ASSERT(is_blocking); hipe_purge_refs(modp->old.hipe_code->first_hipe_ref, make_atom(modp->module), is_blocking); @@ -1722,7 +1722,7 @@ void hipe_purge_module(Module* modp, int is_blocking) * Remove all hipe_sdesc's for the old module instance */ if (modp->old.hipe_code->first_hipe_sdesc) { - ERTS_SMP_LC_ASSERT(is_blocking); + ERTS_LC_ASSERT(is_blocking); hipe_purge_sdescs(modp->old.hipe_code->first_hipe_sdesc, make_atom(modp->module), is_blocking); @@ -1773,7 +1773,7 @@ void hipe_redirect_to_module(Module* modp) struct hipe_mfa_info *p; struct hipe_ref_head* refh; - ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking()); + ERTS_LC_ASSERT(erts_thr_progress_is_blocking()); for (p = mod2mfa_get(modp); p; p = p->next_in_mod) { if (p->new_address) { diff --git a/erts/emulator/hipe/hipe_bif2.c b/erts/emulator/hipe/hipe_bif2.c index 9cac733db5..9ebbb22846 100644 --- a/erts/emulator/hipe/hipe_bif2.c +++ b/erts/emulator/hipe/hipe_bif2.c @@ -45,7 +45,7 @@ static void proc_unlock(Process* c_p, Process* rp) locks &= ~ERTS_PROC_LOCK_MAIN; } if (rp && locks) { - erts_smp_proc_unlock(rp, locks); + erts_proc_unlock(rp, locks); } } @@ -157,10 +157,10 @@ BIF_RETTYPE hipe_bifs_modeswitch_debug_off_0(BIF_ALIST_0) BIF_RETTYPE hipe_debug_bif_wrapper(NBIF_ALIST_1); -# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \ +# define ERTS_REQ_PROC_MAIN_LOCK(P) \ if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN,\ __FILE__, __LINE__) -# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \ +# define ERTS_UNREQ_PROC_MAIN_LOCK(P) \ if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN) BIF_RETTYPE hipe_debug_bif_wrapper(NBIF_ALIST_1) @@ -168,9 +168,9 @@ BIF_RETTYPE hipe_debug_bif_wrapper(NBIF_ALIST_1) typedef BIF_RETTYPE nBif(NBIF_ALIST_1); nBif* fp = (nBif*) (BIF_P->hipe.bif_callee); BIF_RETTYPE res; - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(BIF_P); + ERTS_UNREQ_PROC_MAIN_LOCK(BIF_P); res = (*fp)(NBIF_CALL_ARGS); - ERTS_SMP_REQ_PROC_MAIN_LOCK(BIF_P); + ERTS_REQ_PROC_MAIN_LOCK(BIF_P); return res; } diff --git a/erts/emulator/hipe/hipe_mode_switch.c b/erts/emulator/hipe/hipe_mode_switch.c index fd90c46fa8..b7f81fc4a6 100644 --- a/erts/emulator/hipe/hipe_mode_switch.c +++ b/erts/emulator/hipe/hipe_mode_switch.c @@ -37,14 +37,14 @@ #include "hipe_bif0.h" /* hipe_mfa_info_table_init() */ #if defined(ERTS_ENABLE_LOCK_CHECK) -# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \ +# define ERTS_REQ_PROC_MAIN_LOCK(P) \ if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN, \ __FILE__, __LINE__) -# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \ +# define ERTS_UNREQ_PROC_MAIN_LOCK(P) \ if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN) #else -# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) -# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) +# define ERTS_REQ_PROC_MAIN_LOCK(P) +# define ERTS_UNREQ_PROC_MAIN_LOCK(P) #endif @@ -394,7 +394,7 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[]) goto do_schedule; } - if (!(erts_smp_atomic32_read_acqb(&p->state) & ERTS_PSFLG_ACTIVE)) { + if (!(erts_atomic32_read_acqb(&p->state) & ERTS_PSFLG_ACTIVE)) { for (i = 0; i < p->arity; ++i) p->arg_reg[i] = reg[i]; goto do_schedule; @@ -495,12 +495,12 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[]) if (p->hipe_smp.have_receive_locks) p->hipe_smp.have_receive_locks = 0; else - erts_smp_proc_lock(p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_lock(p, ERTS_PROC_LOCKS_MSG_RECEIVE); p->i = hipe_beam_pc_resume; p->arity = 0; - erts_smp_atomic32_read_band_relb(&p->state, + erts_atomic32_read_band_relb(&p->state, ~ERTS_PSFLG_ACTIVE); - erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(p, ERTS_PROC_LOCKS_MSG_RECEIVE); do_schedule: { struct saved_calls *scb; @@ -511,16 +511,16 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[]) /* The process may have died while it was executing, if so we return out from native code to the interpreter */ - if (erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_EXITING) + if (erts_atomic32_read_nob(&p->state) & ERTS_PSFLG_EXITING) p->i = beam_exit; #ifdef DEBUG ASSERT(p->debug_reds_in == reds_in); #endif p->flags &= ~F_HIPE_MODE; - ERTS_SMP_UNREQ_PROC_MAIN_LOCK(p); + ERTS_UNREQ_PROC_MAIN_LOCK(p); p = erts_schedule(NULL, p, reds_in - p->fcalls); - ERTS_SMP_REQ_PROC_MAIN_LOCK(p); + ERTS_REQ_PROC_MAIN_LOCK(p); ASSERT(!(p->flags & F_HIPE_MODE)); p->hipe_smp.have_receive_locks = 0; reg = p->scheduler_data->x_reg_array; diff --git a/erts/emulator/hipe/hipe_native_bif.c b/erts/emulator/hipe/hipe_native_bif.c index f6ecf841ba..23f64a6991 100644 --- a/erts/emulator/hipe/hipe_native_bif.c +++ b/erts/emulator/hipe/hipe_native_bif.c @@ -145,7 +145,7 @@ BIF_RETTYPE nbif_impl_hipe_set_timeout(NBIF_ALIST_1) if (tres != 0) { /* Wrong time */ if (p->hipe_smp.have_receive_locks) { p->hipe_smp.have_receive_locks = 0; - erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(p, ERTS_PROC_LOCKS_MSG_RECEIVE); } BIF_ERROR(p, EXC_TIMEOUT_VALUE); } @@ -526,16 +526,16 @@ Eterm hipe_check_get_msg(Process *c_p) msgp = PEEK_MESSAGE(c_p); if (!msgp) { - erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); /* Make sure messages wont pass exit signals... */ if (ERTS_PROC_PENDING_EXIT(c_p)) { - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); return THE_NON_VALUE; /* Will be rescheduled for exit */ } - ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p); + ERTS_MSGQ_MV_INQ2PRIVQ(c_p); msgp = PEEK_MESSAGE(c_p); if (msgp) - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); else { /* XXX: BEAM doesn't need this */ c_p->hipe_smp.have_receive_locks = 1; @@ -579,7 +579,7 @@ void hipe_clear_timeout(Process *c_p) cases. HiPE doesn't, so we must check dynamically. */ if (c_p->hipe_smp.have_receive_locks) { c_p->hipe_smp.have_receive_locks = 0; - erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); + erts_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE); } if (IS_TRACED_FL(c_p, F_TRACE_RECEIVE)) { trace_receive(c_p, am_clock_service, am_timeout, NULL); @@ -590,6 +590,6 @@ void hipe_clear_timeout(Process *c_p) void hipe_atomic_inc(int *counter) { - erts_smp_atomic_inc_nob((erts_smp_atomic_t*)counter); + erts_atomic_inc_nob((erts_atomic_t*)counter); } diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c index 3e029303a7..834b77eb58 100644 --- a/erts/emulator/sys/common/erl_check_io.c +++ b/erts/emulator/sys/common/erl_check_io.c @@ -97,16 +97,16 @@ typedef char EventStateFlags; static struct pollset_info { ErtsPollSet ps; - erts_smp_atomic_t in_poll_wait; /* set while doing poll */ + erts_atomic_t in_poll_wait; /* set while doing poll */ struct { int six; /* start index */ int eix; /* end index */ - erts_smp_atomic32_t no; + erts_atomic32_t no; int size; ErtsSysFdType *array; } active_fd; struct removed_fd* removed_list; /* list of deselected fd's*/ - erts_smp_spinlock_t removed_list_lock; + erts_spinlock_t removed_list_lock; }pollset; #define NUM_OF_POLLSETS 1 @@ -150,11 +150,11 @@ static int max_fds = -1; #endif #define DRV_EV_STATE_LOCK_CNT 16 static union { - erts_smp_mtx_t lck; + erts_mtx_t lck; byte _cache_line_alignment[64]; }drv_ev_state_locks[DRV_EV_STATE_LOCK_CNT]; -static ERTS_INLINE erts_smp_mtx_t* fd_mtx(ErtsSysFdType fd) +static ERTS_INLINE erts_mtx_t* fd_mtx(ErtsSysFdType fd) { int hash = (int)fd; # ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS @@ -165,15 +165,15 @@ static ERTS_INLINE erts_smp_mtx_t* fd_mtx(ErtsSysFdType fd) #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS -static erts_smp_atomic_t drv_ev_state_len; +static erts_atomic_t drv_ev_state_len; static ErtsDrvEventState *drv_ev_state; -static erts_smp_mtx_t drv_ev_state_grow_lock; /* prevent lock-hogging of racing growers */ +static erts_mtx_t drv_ev_state_grow_lock; /* prevent lock-hogging of racing growers */ #else static SafeHash drv_ev_state_tab; static int num_state_prealloc; static ErtsDrvEventState *state_prealloc_first; -erts_smp_spinlock_t state_prealloc_lock; +erts_spinlock_t state_prealloc_lock; static ERTS_INLINE ErtsDrvEventState *hash_get_drv_ev_state(ErtsSysFdType fd) { @@ -244,7 +244,7 @@ static ERTS_INLINE void init_iotask(ErtsIoTask *io_task) { erts_port_task_handle_init(&io_task->task); - erts_smp_atomic_init_nob(&io_task->executed_time, ~((erts_aint_t) 0)); + erts_atomic_init_nob(&io_task->executed_time, ~((erts_aint_t) 0)); } static ERTS_INLINE int @@ -252,7 +252,7 @@ is_iotask_active(ErtsIoTask *io_task, erts_aint_t current_cio_time) { if (erts_port_task_is_scheduled(&io_task->task)) return 1; - if (erts_smp_atomic_read_nob(&io_task->executed_time) == current_cio_time) + if (erts_atomic_read_nob(&io_task->executed_time) == current_cio_time) return 1; return 0; } @@ -325,8 +325,8 @@ static ERTS_INLINE void remember_removed(ErtsDrvEventState *state, struct pollset_info* psi) { struct removed_fd *fdlp; - ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(fd_mtx(state->fd))); - if (erts_smp_atomic_read_nob(&psi->in_poll_wait)) { + ERTS_LC_ASSERT(erts_lc_mtx_is_locked(fd_mtx(state->fd))); + if (erts_atomic_read_nob(&psi->in_poll_wait)) { state->remove_cnt++; ASSERT(state->remove_cnt > 0); fdlp = removed_fd_alloc(); @@ -336,10 +336,10 @@ remember_removed(ErtsDrvEventState *state, struct pollset_info* psi) #ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS fdlp->state = state; #endif - erts_smp_spin_lock(&psi->removed_list_lock); + erts_spin_lock(&psi->removed_list_lock); fdlp->next = psi->removed_list; psi->removed_list = fdlp; - erts_smp_spin_unlock(&psi->removed_list_lock); + erts_spin_unlock(&psi->removed_list_lock); } } @@ -363,29 +363,29 @@ forget_removed(struct pollset_info* psi) /* Fast track: if (atomic_ptr(removed_list)==NULL) return; */ - erts_smp_spin_lock(&psi->removed_list_lock); + erts_spin_lock(&psi->removed_list_lock); fdlp = psi->removed_list; psi->removed_list = NULL; - erts_smp_spin_unlock(&psi->removed_list_lock); + erts_spin_unlock(&psi->removed_list_lock); while (fdlp) { ErtsResource* resource = NULL; erts_driver_t* drv_ptr = NULL; - erts_smp_mtx_t* mtx; + erts_mtx_t* mtx; ErtsSysFdType fd; ErtsDrvEventState *state; #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS fd = fdlp->fd; mtx = fd_mtx(fd); - erts_smp_mtx_lock(mtx); + erts_mtx_lock(mtx); state = &drv_ev_state[(int) fd]; #else state = fdlp->state; fd = state->fd; ASSERT(fd == fdlp->fd); mtx = fd_mtx(fd); - erts_smp_mtx_lock(mtx); + erts_mtx_lock(mtx); #endif ASSERT(state->remove_cnt > 0); if (--state->remove_cnt == 0) { @@ -420,7 +420,7 @@ forget_removed(struct pollset_info* psi) ASSERT(0); } } - erts_smp_mtx_unlock(mtx); + erts_mtx_unlock(mtx); if (drv_ptr) { int was_unmasked = erts_block_fpe(); DTRACE1(driver_stop_select, drv_ptr->name); @@ -450,15 +450,15 @@ grow_drv_ev_state(int min_ix) int old_len; int new_len; - erts_smp_mtx_lock(&drv_ev_state_grow_lock); - old_len = erts_smp_atomic_read_nob(&drv_ev_state_len); + erts_mtx_lock(&drv_ev_state_grow_lock); + old_len = erts_atomic_read_nob(&drv_ev_state_len); if (min_ix >= old_len) { new_len = erts_poll_new_table_len(old_len, min_ix + 1); if (new_len > max_fds) new_len = max_fds; for (i=0; i<DRV_EV_STATE_LOCK_CNT; i++) { /* lock all fd's */ - erts_smp_mtx_lock(&drv_ev_state_locks[i].lck); + erts_mtx_lock(&drv_ev_state_locks[i].lck); } drv_ev_state = (drv_ev_state ? erts_realloc(ERTS_ALC_T_DRV_EV_STATE, @@ -479,14 +479,14 @@ grow_drv_ev_state(int min_ix) drv_ev_state[i].type = ERTS_EV_TYPE_NONE; drv_ev_state[i].flags = 0; } - erts_smp_atomic_set_nob(&drv_ev_state_len, new_len); + erts_atomic_set_nob(&drv_ev_state_len, new_len); for (i=0; i<DRV_EV_STATE_LOCK_CNT; i++) { - erts_smp_mtx_unlock(&drv_ev_state_locks[i].lck); + erts_mtx_unlock(&drv_ev_state_locks[i].lck); } } /*else already grown by racing thread */ - erts_smp_mtx_unlock(&drv_ev_state_grow_lock); + erts_mtx_unlock(&drv_ev_state_grow_lock); } #endif /* ERTS_SYS_CONTINOUS_FD_NUMBERS */ @@ -544,7 +544,7 @@ deselect(ErtsDrvEventState *state, int mode) { int do_wake = 0; ErtsPollEvents rm_events; - ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(fd_mtx(state->fd))); + ERTS_LC_ASSERT(erts_lc_mtx_is_locked(fd_mtx(state->fd))); ASSERT(state->events); abort_tasks(state, mode); @@ -616,9 +616,9 @@ check_fd_cleanup(ErtsDrvEventState *state, { erts_aint_t current_cio_time; - ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(fd_mtx(state->fd))); + ERTS_LC_ASSERT(erts_lc_mtx_is_locked(fd_mtx(state->fd))); - current_cio_time = erts_smp_atomic_read_acqb(&erts_check_io_time); + current_cio_time = erts_atomic_read_acqb(&erts_check_io_time); *free_select = NULL; if (state->driver.select && (state->type != ERTS_EV_TYPE_DRV_SEL) @@ -671,7 +671,7 @@ check_cleanup_active_fd(ErtsSysFdType fd, { ErtsDrvEventState *state; int active = 0; - erts_smp_mtx_t *mtx = fd_mtx(fd); + erts_mtx_t *mtx = fd_mtx(fd); void *free_select = NULL; void *free_nif = NULL; #if ERTS_CIO_HAVE_DRV_EVENT @@ -681,7 +681,7 @@ check_cleanup_active_fd(ErtsSysFdType fd, ErtsPollEvents evon = 0, evoff = 0; #endif - erts_smp_mtx_lock(mtx); + erts_mtx_lock(mtx); #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS state = &drv_ev_state[(int) fd]; @@ -802,7 +802,7 @@ check_cleanup_active_fd(ErtsSysFdType fd, } - erts_smp_mtx_unlock(mtx); + erts_mtx_unlock(mtx); if (free_select) free_drv_select_data(free_select); @@ -836,7 +836,7 @@ check_cleanup_active_fds(erts_aint_t current_cio_time, int may_sleep) { int six = pollset.active_fd.six; int eix = pollset.active_fd.eix; - erts_aint32_t no = erts_smp_atomic32_read_dirty(&pollset.active_fd.no); + erts_aint32_t no = erts_atomic32_read_dirty(&pollset.active_fd.no); int size = pollset.active_fd.size; int ix = six; #if ERTS_CIO_DEFER_ACTIVE_EVENTS @@ -891,7 +891,7 @@ check_cleanup_active_fds(erts_aint_t current_cio_time, int may_sleep) pollset.active_fd.six = six; pollset.active_fd.eix = eix; - erts_smp_atomic32_set_relb(&pollset.active_fd.no, no); + erts_atomic32_set_relb(&pollset.active_fd.no, no); } static void grow_active_fds(void) @@ -920,8 +920,8 @@ add_active_fd(ErtsSysFdType fd) pollset.active_fd.array[eix] = fd; - erts_smp_atomic32_set_relb(&pollset.active_fd.no, - (erts_smp_atomic32_read_dirty(&pollset.active_fd.no) + erts_atomic32_set_relb(&pollset.active_fd.no, + (erts_atomic32_read_dirty(&pollset.active_fd.no) + 1)); eix++; @@ -961,10 +961,10 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix, if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS - if ((unsigned)fd >= (unsigned)erts_smp_atomic_read_nob(&drv_ev_state_len)) { + if ((unsigned)fd >= (unsigned)erts_atomic_read_nob(&drv_ev_state_len)) { if (fd < 0) { return -1; } @@ -976,7 +976,7 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix, } #endif - erts_smp_mtx_lock(fd_mtx(fd)); + erts_mtx_lock(fd_mtx(fd)); #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS state = &drv_ev_state[(int) fd]; @@ -1157,7 +1157,7 @@ done: &free_nif); done_unknown: - erts_smp_mtx_unlock(fd_mtx(fd)); + erts_mtx_unlock(fd_mtx(fd)); if (stop_select_fn) { int was_unmasked = erts_block_fpe(); DTRACE1(driver_stop_select, name); @@ -1206,7 +1206,7 @@ ERTS_CIO_EXPORT(enif_select)(ErlNifEnv* env, ASSERT(!(resource->monitors && resource->monitors->is_dying)); #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS - if ((unsigned)fd >= (unsigned)erts_smp_atomic_read_nob(&drv_ev_state_len)) { + if ((unsigned)fd >= (unsigned)erts_atomic_read_nob(&drv_ev_state_len)) { if (fd < 0) { return INT_MIN | ERL_NIF_SELECT_INVALID_EVENT; } @@ -1218,7 +1218,7 @@ ERTS_CIO_EXPORT(enif_select)(ErlNifEnv* env, } #endif - erts_smp_mtx_lock(fd_mtx(fd)); + erts_mtx_lock(fd_mtx(fd)); #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS state = &drv_ev_state[(int) fd]; @@ -1412,7 +1412,7 @@ done: &free_nif); done_unknown: - erts_smp_mtx_unlock(fd_mtx(fd)); + erts_mtx_unlock(fd_mtx(fd)); if (call_stop) { erts_resource_stop(resource, (ErlNifEvent)fd, 1); if (call_stop == CALL_STOP_AND_RELEASE) { @@ -1458,10 +1458,10 @@ ERTS_CIO_EXPORT(driver_event)(ErlDrvPort ix, if (prt == ERTS_INVALID_ERL_DRV_PORT) return -1; - ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); + ERTS_LC_ASSERT(erts_lc_is_port_locked(prt)); #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS - if ((unsigned)fd >= (unsigned)erts_smp_atomic_read_nob(&drv_ev_state_len)) { + if ((unsigned)fd >= (unsigned)erts_atomic_read_nob(&drv_ev_state_len)) { if (fd < 0) return -1; if (fd >= max_fds) { @@ -1472,7 +1472,7 @@ ERTS_CIO_EXPORT(driver_event)(ErlDrvPort ix, } #endif - erts_smp_mtx_lock(fd_mtx(fd)); + erts_mtx_lock(fd_mtx(fd)); #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS state = &drv_ev_state[(int) fd]; @@ -1565,7 +1565,7 @@ done: &free_select, &free_nif); - erts_smp_mtx_unlock(fd_mtx(fd)); + erts_mtx_unlock(fd_mtx(fd)); if (free_select) free_drv_select_data(free_select); @@ -1995,7 +1995,7 @@ iready(Eterm id, ErtsDrvEventState *state, erts_aint_t current_cio_time) ERTS_PORT_TASK_INPUT, current_cio_time)) { ErtsIoTask *iotask = &state->driver.select->iniotask; - erts_smp_atomic_set_nob(&iotask->executed_time, current_cio_time); + erts_atomic_set_nob(&iotask->executed_time, current_cio_time); if (erts_port_task_schedule(id, &iotask->task, ERTS_PORT_TASK_INPUT, @@ -2013,7 +2013,7 @@ oready(Eterm id, ErtsDrvEventState *state, erts_aint_t current_cio_time) ERTS_PORT_TASK_OUTPUT, current_cio_time)) { ErtsIoTask *iotask = &state->driver.select->outiotask; - erts_smp_atomic_set_nob(&iotask->executed_time, current_cio_time); + erts_atomic_set_nob(&iotask->executed_time, current_cio_time); if (erts_port_task_schedule(id, &iotask->task, ERTS_PORT_TASK_OUTPUT, @@ -2069,7 +2069,7 @@ send_event_tuple(struct erts_nif_select_event* e, ErtsResource* resource, erts_queue_message(rp, rp_locks, mp, tuple, am_system); if (rp_locks) - erts_smp_proc_unlock(rp, rp_locks); + erts_proc_unlock(rp, rp_locks); } @@ -2082,7 +2082,7 @@ eready(Eterm id, ErtsDrvEventState *state, ErlDrvEventData event_data, ERTS_PORT_TASK_EVENT, current_cio_time)) { ErtsIoTask *iotask = &state->driver.event->iotask; - erts_smp_atomic_set_nob(&iotask->executed_time, current_cio_time); + erts_atomic_set_nob(&iotask->executed_time, current_cio_time); if (erts_port_task_schedule(id, &iotask->task, ERTS_PORT_TASK_EVENT, @@ -2157,9 +2157,9 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait) * erts_check_io_time, since only one thread can * check io at a time. */ - current_cio_time = erts_smp_atomic_read_dirty(&erts_check_io_time); + current_cio_time = erts_atomic_read_dirty(&erts_check_io_time); current_cio_time++; - erts_smp_atomic_set_relb(&erts_check_io_time, current_cio_time); + erts_atomic_set_relb(&erts_check_io_time, current_cio_time); check_cleanup_active_fds(current_cio_time, timeout_time != ERTS_POLL_NO_TIMEOUT); @@ -2168,11 +2168,11 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait) erts_lc_check_exact(NULL, 0); /* No locks should be locked */ #endif - pollres_len = erts_smp_atomic32_read_dirty(&pollset.active_fd.no) + ERTS_CHECK_IO_POLL_RES_LEN; + pollres_len = erts_atomic32_read_dirty(&pollset.active_fd.no) + ERTS_CHECK_IO_POLL_RES_LEN; pollres = erts_alloc(ERTS_ALC_T_TMP, sizeof(ErtsPollResFd)*pollres_len); - erts_smp_atomic_set_nob(&pollset.in_poll_wait, 1); + erts_atomic_set_nob(&pollset.in_poll_wait, 1); poll_ret = ERTS_CIO_POLL_WAIT(pollset.ps, pollres, &pollres_len, timeout_time); @@ -2194,7 +2194,7 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait) if (poll_ret != 0) { - erts_smp_atomic_set_nob(&pollset.in_poll_wait, 0); + erts_atomic_set_nob(&pollset.in_poll_wait, 0); forget_removed(&pollset); erts_free(ERTS_ALC_T_TMP, pollres); if (poll_ret == EAGAIN) { @@ -2220,7 +2220,7 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait) ErtsSysFdType fd = (ErtsSysFdType) pollres[i].fd; ErtsDrvEventState *state; - erts_smp_mtx_lock(fd_mtx(fd)); + erts_mtx_lock(fd_mtx(fd)); #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS state = &drv_ev_state[ (int) fd]; @@ -2327,7 +2327,7 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait) add_active_fd(state->fd); } - erts_smp_mtx_unlock(fd_mtx(fd)); + erts_mtx_unlock(fd_mtx(fd)); if (is_not_nil(in.pid)) { send_event_tuple(&in, resource, am_ready_input); } @@ -2374,11 +2374,11 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait) } next_pollres:; - erts_smp_mtx_unlock(fd_mtx(fd)); + erts_mtx_unlock(fd_mtx(fd)); next_pollres_unlocked:; } - erts_smp_atomic_set_nob(&pollset.in_poll_wait, 0); + erts_atomic_set_nob(&pollset.in_poll_wait, 0); erts_free(ERTS_ALC_T_TMP, pollres); forget_removed(&pollset); } @@ -2474,16 +2474,16 @@ static int drv_ev_state_cmp(void *des1, void *des2) static void *drv_ev_state_alloc(void *des_tmpl) { ErtsDrvEventState *evstate; - erts_smp_spin_lock(&state_prealloc_lock); + erts_spin_lock(&state_prealloc_lock); if (state_prealloc_first == NULL) { - erts_smp_spin_unlock(&state_prealloc_lock); + erts_spin_unlock(&state_prealloc_lock); evstate = (ErtsDrvEventState *) erts_alloc(ERTS_ALC_T_DRV_EV_STATE, sizeof(ErtsDrvEventState)); } else { evstate = state_prealloc_first; state_prealloc_first = (ErtsDrvEventState *) evstate->hb.next; --num_state_prealloc; - erts_smp_spin_unlock(&state_prealloc_lock); + erts_spin_unlock(&state_prealloc_lock); } /* XXX: Already valid data if prealloced, could ignore template! */ *evstate = *((ErtsDrvEventState *) des_tmpl); @@ -2493,11 +2493,11 @@ static void *drv_ev_state_alloc(void *des_tmpl) static void drv_ev_state_free(void *des) { - erts_smp_spin_lock(&state_prealloc_lock); + erts_spin_lock(&state_prealloc_lock); ((ErtsDrvEventState *) des)->hb.next = &state_prealloc_first->hb; state_prealloc_first = (ErtsDrvEventState *) des; ++num_state_prealloc; - erts_smp_spin_unlock(&state_prealloc_lock); + erts_spin_unlock(&state_prealloc_lock); } #endif @@ -2508,15 +2508,15 @@ ERTS_CIO_EXPORT(erts_init_check_io)(void) ERL_NIF_SELECT_STOP_SCHEDULED | ERL_NIF_SELECT_INVALID_EVENT | ERL_NIF_SELECT_FAILED)) == 0); - erts_smp_atomic_init_nob(&erts_check_io_time, 0); - erts_smp_atomic_init_nob(&pollset.in_poll_wait, 0); + erts_atomic_init_nob(&erts_check_io_time, 0); + erts_atomic_init_nob(&pollset.in_poll_wait, 0); ERTS_CIO_POLL_INIT(); pollset.ps = ERTS_CIO_NEW_POLLSET(); pollset.active_fd.six = 0; pollset.active_fd.eix = 0; - erts_smp_atomic32_init_nob(&pollset.active_fd.no, 0); + erts_atomic32_init_nob(&pollset.active_fd.no, 0); pollset.active_fd.size = ERTS_ACTIVE_FD_INC; pollset.active_fd.array = erts_alloc(ERTS_ALC_T_ACTIVE_FD_ARR, sizeof(ErtsSysFdType)*ERTS_ACTIVE_FD_INC); @@ -2531,20 +2531,20 @@ ERTS_CIO_EXPORT(erts_init_check_io)(void) init_removed_fd_alloc(); pollset.removed_list = NULL; - erts_smp_spinlock_init(&pollset.removed_list_lock, "pollset_rm_list", NIL, + erts_spinlock_init(&pollset.removed_list_lock, "pollset_rm_list", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO); { int i; for (i=0; i<DRV_EV_STATE_LOCK_CNT; i++) { - erts_smp_mtx_init(&drv_ev_state_locks[i].lck, "drv_ev_state", make_small(i), + erts_mtx_init(&drv_ev_state_locks[i].lck, "drv_ev_state", make_small(i), ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO); } } #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS max_fds = ERTS_CIO_POLL_MAX_FDS(); - erts_smp_atomic_init_nob(&drv_ev_state_len, 0); + erts_atomic_init_nob(&drv_ev_state_len, 0); drv_ev_state = NULL; - erts_smp_mtx_init(&drv_ev_state_grow_lock, "drv_ev_state_grow", NIL, + erts_mtx_init(&drv_ev_state_grow_lock, "drv_ev_state_grow", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO); #else { @@ -2555,7 +2555,7 @@ ERTS_CIO_EXPORT(erts_init_check_io)(void) hf.free = &drv_ev_state_free; num_state_prealloc = 0; state_prealloc_first = NULL; - erts_smp_spinlock_init(&state_prealloc_lock,"state_prealloc", NIL, + erts_spinlock_init(&state_prealloc_lock,"state_prealloc", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO); safe_hash_init(ERTS_ALC_T_DRV_EV_STATE, &drv_ev_state_tab, "drv_ev_state_tab", @@ -2582,7 +2582,7 @@ ERTS_CIO_EXPORT(erts_check_io_size)(void) ERTS_CIO_POLL_INFO(pollset.ps, &pi); res = pi.memory_size; #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS - res += sizeof(ErtsDrvEventState) * erts_smp_atomic_read_nob(&drv_ev_state_len); + res += sizeof(ErtsDrvEventState) * erts_atomic_read_nob(&drv_ev_state_len); #else res += safe_hash_table_sz(&drv_ev_state_tab); { @@ -2590,9 +2590,9 @@ ERTS_CIO_EXPORT(erts_check_io_size)(void) safe_hash_get_info(&hi, &drv_ev_state_tab); res += hi.objs * sizeof(ErtsDrvEventState); } - erts_smp_spin_lock(&state_prealloc_lock); + erts_spin_lock(&state_prealloc_lock); res += num_state_prealloc * sizeof(ErtsDrvEventState); - erts_smp_spin_unlock(&state_prealloc_lock); + erts_spin_unlock(&state_prealloc_lock); #endif return res; } @@ -2605,8 +2605,8 @@ ERTS_CIO_EXPORT(erts_check_io_info)(void *proc) Uint sz, *szp, *hp, **hpp, memory_size; Sint i; ErtsPollInfo pi; - erts_aint_t cio_time = erts_smp_atomic_read_acqb(&erts_check_io_time); - int active_fds = (int) erts_smp_atomic32_read_acqb(&pollset.active_fd.no); + erts_aint_t cio_time = erts_atomic_read_acqb(&erts_check_io_time); + int active_fds = (int) erts_atomic32_read_acqb(&pollset.active_fd.no); while (1) { erts_aint_t post_cio_time; @@ -2614,8 +2614,8 @@ ERTS_CIO_EXPORT(erts_check_io_info)(void *proc) ERTS_CIO_POLL_INFO(pollset.ps, &pi); - post_cio_time = erts_smp_atomic_read_mb(&erts_check_io_time); - post_active_fds = (int) erts_smp_atomic32_read_acqb(&pollset.active_fd.no); + post_cio_time = erts_atomic_read_mb(&erts_check_io_time); + post_active_fds = (int) erts_atomic32_read_acqb(&pollset.active_fd.no); if (cio_time == post_cio_time && active_fds == post_active_fds) break; cio_time = post_cio_time; @@ -2624,7 +2624,7 @@ ERTS_CIO_EXPORT(erts_check_io_info)(void *proc) memory_size = pi.memory_size; #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS - memory_size += sizeof(ErtsDrvEventState) * erts_smp_atomic_read_nob(&drv_ev_state_len); + memory_size += sizeof(ErtsDrvEventState) * erts_atomic_read_nob(&drv_ev_state_len); #else memory_size += safe_hash_table_sz(&drv_ev_state_tab); { @@ -2632,9 +2632,9 @@ ERTS_CIO_EXPORT(erts_check_io_info)(void *proc) safe_hash_get_info(&hi, &drv_ev_state_tab); memory_size += hi.objs * sizeof(ErtsDrvEventState); } - erts_smp_spin_lock(&state_prealloc_lock); + erts_spin_lock(&state_prealloc_lock); memory_size += num_state_prealloc * sizeof(ErtsDrvEventState); - erts_smp_spin_unlock(&state_prealloc_lock); + erts_spin_unlock(&state_prealloc_lock); #endif hpp = NULL; @@ -3046,7 +3046,7 @@ ERTS_CIO_EXPORT(erts_check_io_debug)(ErtsCheckIoDebugInfo *ciodip) erts_lc_check_exact(NULL, 0); /* No locks should be locked */ #endif - erts_smp_thr_progress_block(); /* stop the world to avoid messy locking */ + erts_thr_progress_block(); /* stop the world to avoid messy locking */ #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS counters.epep = erts_alloc(ERTS_ALC_T_TMP, sizeof(ErtsPollEvents)*max_fds); @@ -3059,7 +3059,7 @@ ERTS_CIO_EXPORT(erts_check_io_debug)(ErtsCheckIoDebugInfo *ciodip) counters.no_driver_event_structs = 0; #ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS - len = erts_smp_atomic_read_nob(&drv_ev_state_len); + len = erts_atomic_read_nob(&drv_ev_state_len); for (fd = 0; fd < len; fd++) { doit_erts_check_io_debug((void *) &drv_ev_state[fd], (void *) &counters); } @@ -3071,7 +3071,7 @@ ERTS_CIO_EXPORT(erts_check_io_debug)(ErtsCheckIoDebugInfo *ciodip) safe_hash_for_each(&drv_ev_state_tab, &doit_erts_check_io_debug, (void *) &counters); #endif - erts_smp_thr_progress_unblock(); + erts_thr_progress_unblock(); ciodip->no_used_fds = counters.used_fds; ciodip->no_driver_select_structs = counters.no_driver_select_structs; diff --git a/erts/emulator/sys/common/erl_check_io.h b/erts/emulator/sys/common/erl_check_io.h index d74eb764d2..777942a473 100644 --- a/erts/emulator/sys/common/erl_check_io.h +++ b/erts/emulator/sys/common/erl_check_io.h @@ -76,11 +76,11 @@ void erts_lcnt_update_cio_locks(int enable); #endif -extern erts_smp_atomic_t erts_check_io_time; +extern erts_atomic_t erts_check_io_time; typedef struct { ErtsPortTaskHandle task; - erts_smp_atomic_t executed_time; + erts_atomic_t executed_time; } ErtsIoTask; ERTS_GLB_INLINE void erts_io_notify_port_task_executed(ErtsPortTaskHandle *pthp); @@ -91,8 +91,8 @@ ERTS_GLB_INLINE void erts_io_notify_port_task_executed(ErtsPortTaskHandle *pthp) { ErtsIoTask *itp = (ErtsIoTask *) (((char *) pthp) - offsetof(ErtsIoTask, task)); - erts_aint_t ci_time = erts_smp_atomic_read_acqb(&erts_check_io_time); - erts_smp_atomic_set_relb(&itp->executed_time, ci_time); + erts_aint_t ci_time = erts_atomic_read_acqb(&erts_check_io_time); + erts_atomic_set_relb(&itp->executed_time, ci_time); } #endif diff --git a/erts/emulator/sys/common/erl_mmap.c b/erts/emulator/sys/common/erl_mmap.c index 214ed01c82..a9c6e72c5f 100644 --- a/erts/emulator/sys/common/erl_mmap.c +++ b/erts/emulator/sys/common/erl_mmap.c @@ -24,7 +24,6 @@ #define ERTS_WANT_MEM_MAPPERS #include "sys.h" #include "erl_process.h" -#include "erl_smp.h" #include "atom.h" #include "erl_mmap.h" #include <stddef.h> @@ -62,11 +61,11 @@ (((UWord) (PTR)) - ((UWord) mm->sa.bot) \ < ((UWord) mm->sua.top) - ((UWord) mm->sa.bot)) #define ERTS_MMAP_IN_SUPERALIGNED_AREA(PTR) \ - (ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&mm->mtx)), \ + (ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mm->mtx)), \ (((UWord) (PTR)) - ((UWord) mm->sa.bot) \ < ((UWord) mm->sa.top) - ((UWord) mm->sa.bot))) #define ERTS_MMAP_IN_SUPERUNALIGNED_AREA(PTR) \ - (ERTS_SMP_LC_ASSERT(erts_lc_mtx_is_locked(&mm->mtx)), \ + (ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mm->mtx)), \ (((UWord) (PTR)) - ((UWord) mm->sua.bot) \ < ((UWord) mm->sua.top) - ((UWord) mm->sua.bot))) @@ -199,10 +198,10 @@ static ErtsMMapOp mmap_ops[ERTS_MMAP_OP_RINGBUF_SZ]; #define ERTS_MMAP_OP_LCK(RES, IN_SZ, OUT_SZ) \ do { \ - erts_smp_mtx_lock(&mm->mtx); \ + erts_mtx_lock(&mm->mtx); \ ERTS_MMAP_OP_START((IN_SZ)); \ ERTS_MMAP_OP_END((RES), (OUT_SZ)); \ - erts_smp_mtx_unlock(&mm->mtx); \ + erts_mtx_unlock(&mm->mtx); \ } while (0) #define ERTS_MUNMAP_OP(PTR, SZ) \ @@ -221,9 +220,9 @@ static ErtsMMapOp mmap_ops[ERTS_MMAP_OP_RINGBUF_SZ]; #define ERTS_MUNMAP_OP_LCK(PTR, SZ) \ do { \ - erts_smp_mtx_lock(&mm->mtx); \ + erts_mtx_lock(&mm->mtx); \ ERTS_MUNMAP_OP((PTR), (SZ)); \ - erts_smp_mtx_unlock(&mm->mtx); \ + erts_mtx_unlock(&mm->mtx); \ } while (0) #define ERTS_MREMAP_OP_START(OLD_PTR, OLD_SZ, IN_SZ) \ @@ -249,10 +248,10 @@ static ErtsMMapOp mmap_ops[ERTS_MMAP_OP_RINGBUF_SZ]; #define ERTS_MREMAP_OP_LCK(RES, OLD_PTR, OLD_SZ, IN_SZ, OUT_SZ) \ do { \ - erts_smp_mtx_lock(&mm->mtx); \ + erts_mtx_lock(&mm->mtx); \ ERTS_MREMAP_OP_START((OLD_PTR), (OLD_SZ), (IN_SZ)); \ ERTS_MREMAP_OP_END((RES), (OUT_SZ)); \ - erts_smp_mtx_unlock(&mm->mtx); \ + erts_mtx_unlock(&mm->mtx); \ } while (0) #define ERTS_MMAP_OP_ABORT() \ @@ -321,7 +320,7 @@ struct ErtsMemMapper_ { #if HAVE_MMAP && (!defined(MAP_ANON) && !defined(MAP_ANONYMOUS)) int mmap_fd; #endif - erts_smp_mtx_t mtx; + erts_mtx_t mtx; struct { char *free_list; char *unused_start; @@ -1536,7 +1535,7 @@ erts_mmap(ErtsMemMapper* mm, Uint32 flags, UWord *sizep) ErtsFreeSegDesc *desc; Uint32 superaligned = (ERTS_MMAPFLG_SUPERALIGNED & flags); - erts_smp_mtx_lock(&mm->mtx); + erts_mtx_lock(&mm->mtx); ERTS_MMAP_OP_START(*sizep); @@ -1660,7 +1659,7 @@ erts_mmap(ErtsMemMapper* mm, Uint32 flags, UWord *sizep) } ERTS_MMAP_OP_ABORT(); - erts_smp_mtx_unlock(&mm->mtx); + erts_mtx_unlock(&mm->mtx); } #if ERTS_HAVE_OS_MMAP @@ -1724,13 +1723,13 @@ supercarrier_success: #endif ERTS_MMAP_OP_END(seg, asize); - erts_smp_mtx_unlock(&mm->mtx); + erts_mtx_unlock(&mm->mtx); *sizep = asize; return (void *) seg; supercarrier_reserve_failure: - erts_smp_mtx_unlock(&mm->mtx); + erts_mtx_unlock(&mm->mtx); *sizep = 0; return NULL; } @@ -1760,7 +1759,7 @@ erts_munmap(ErtsMemMapper* mm, Uint32 flags, void *ptr, UWord size) start = (char *) ptr; end = start + size; - erts_smp_mtx_lock(&mm->mtx); + erts_mtx_lock(&mm->mtx); ERTS_MUNMAP_OP(ptr, size); @@ -1829,7 +1828,7 @@ erts_munmap(ErtsMemMapper* mm, Uint32 flags, void *ptr, UWord size) if (unres_sz) mm->unreserve_physical(((char *) ptr) + ad_sz, unres_sz); - erts_smp_mtx_unlock(&mm->mtx); + erts_mtx_unlock(&mm->mtx); } } } @@ -1948,12 +1947,12 @@ erts_mremap(ErtsMemMapper* mm, ? ERTS_SUPERALIGNED_CEILING(*sizep) : ERTS_PAGEALIGNED_CEILING(*sizep)); - erts_smp_mtx_lock(&mm->mtx); + erts_mtx_lock(&mm->mtx); if (ERTS_MMAP_IN_SUPERALIGNED_AREA(ptr) ? (!superaligned && lookup_free_seg(&mm->sua.map, asize)) : (superaligned && lookup_free_seg(&mm->sa.map, asize))) { - erts_smp_mtx_unlock(&mm->mtx); + erts_mtx_unlock(&mm->mtx); /* * Segment currently in wrong area (due to a previous memory * shortage), move it to the right area. @@ -2068,7 +2067,7 @@ erts_mremap(ErtsMemMapper* mm, } ERTS_MMAP_OP_ABORT(); - erts_smp_mtx_unlock(&mm->mtx); + erts_mtx_unlock(&mm->mtx); /* Failed to resize... */ } @@ -2090,14 +2089,14 @@ supercarrier_resize_success: #endif ERTS_MREMAP_OP_END(new_ptr, asize); - erts_smp_mtx_unlock(&mm->mtx); + erts_mtx_unlock(&mm->mtx); *sizep = asize; return new_ptr; supercarrier_reserve_failure: ERTS_MREMAP_OP_END(NULL, old_size); - erts_smp_mtx_unlock(&mm->mtx); + erts_mtx_unlock(&mm->mtx); *sizep = old_size; return NULL; @@ -2212,7 +2211,7 @@ erts_mmap_init(ErtsMemMapper* mm, ErtsMMapInit *init, int executable) erts_exit(1, "erts_mmap: Failed to open /dev/zero\n"); #endif - erts_smp_mtx_init(&mm->mtx, "erts_mmap", NIL, + erts_mtx_init(&mm->mtx, "erts_mmap", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); if (is_first_call) { erts_mtx_init(&am.init_mutex, "mmap_init_atoms", NIL, @@ -2407,7 +2406,7 @@ Eterm erts_mmap_info(ErtsMemMapper* mm, Eterm res = THE_NON_VALUE; if (!hpp) { - erts_smp_mtx_lock(&mm->mtx); + erts_mtx_lock(&mm->mtx); emis->sizes[0] = mm->size.supercarrier.total; emis->sizes[1] = mm->sa.top - mm->sa.bot; emis->sizes[2] = mm->sua.top - mm->sua.bot; @@ -2423,7 +2422,7 @@ Eterm erts_mmap_info(ErtsMemMapper* mm, emis->segs[5] = mm->sua.map.nseg; emis->os_used = mm->size.os.used; - erts_smp_mtx_unlock(&mm->mtx); + erts_mtx_unlock(&mm->mtx); } list[lix] = erts_mmap_info_options(mm, "option ", print_to_p, print_to_arg, @@ -2543,14 +2542,14 @@ Eterm erts_mmap_debug_info(Process* p) Eterm *hp, *hp_end; Uint may_need; - erts_smp_mtx_lock(&mm->mtx); + erts_mtx_lock(&mm->mtx); values[0] = (UWord)mm->sa.bot; values[1] = (UWord)mm->sa.top; values[2] = (UWord)mm->sua.bot; values[3] = (UWord)mm->sua.top; sa_list = build_free_seg_list(p, &mm->sa.map); sua_list = build_free_seg_list(p, &mm->sua.map); - erts_smp_mtx_unlock(&mm->mtx); + erts_mtx_unlock(&mm->mtx); may_need = 4*(2+3+2) + 2*(2+3); hp = HAlloc(p, may_need); diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c index d5b2fa87e4..bf6de9b13a 100644 --- a/erts/emulator/sys/common/erl_mseg.c +++ b/erts/emulator/sys/common/erl_mseg.c @@ -339,11 +339,11 @@ mseg_recreate(ErtsMsegAllctr_t *ma, Uint flags, void *old_seg, UWord old_size, U do { \ if ((MA)->is_thread_safe) \ ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&(MA)->mtx) \ - || erts_smp_thr_progress_is_blocking() \ + || erts_thr_progress_is_blocking() \ || ERTS_IS_CRASH_DUMPING); \ else \ ERTS_LC_ASSERT((MA)->ix == (int) erts_get_scheduler_id() \ - || erts_smp_thr_progress_is_blocking() \ + || erts_thr_progress_is_blocking() \ || ERTS_IS_CRASH_DUMPING); \ } while (0) #else diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c index 4171770f02..7d26839b0f 100644 --- a/erts/emulator/sys/common/erl_poll.c +++ b/erts/emulator/sys/common/erl_poll.c @@ -150,9 +150,9 @@ int ERTS_SELECT(int nfds, ERTS_fd_set *readfds, ERTS_fd_set *writefds, #define ERTS_POLL_COALESCE_KP_RES (ERTS_POLL_USE_KQUEUE || ERTS_POLL_USE_EPOLL) #define ERTS_POLLSET_LOCK(PS) \ - erts_smp_mtx_lock(&(PS)->mtx) + erts_mtx_lock(&(PS)->mtx) #define ERTS_POLLSET_UNLOCK(PS) \ - erts_smp_mtx_unlock(&(PS)->mtx) + erts_mtx_unlock(&(PS)->mtx) #define ERTS_POLLSET_SET_POLLED_CHK(PS) \ ((int) erts_atomic32_xchg_nob(&(PS)->polled, (erts_aint32_t) 1)) @@ -163,11 +163,11 @@ int ERTS_SELECT(int nfds, ERTS_fd_set *readfds, ERTS_fd_set *writefds, #define ERTS_POLLSET_SET_HAVE_UPDATE_REQUESTS(PS) \ - erts_smp_atomic32_set_nob(&(PS)->have_update_requests, (erts_aint32_t) 1) + erts_atomic32_set_nob(&(PS)->have_update_requests, (erts_aint32_t) 1) #define ERTS_POLLSET_UNSET_HAVE_UPDATE_REQUESTS(PS) \ - erts_smp_atomic32_set_nob(&(PS)->have_update_requests, (erts_aint32_t) 0) + erts_atomic32_set_nob(&(PS)->have_update_requests, (erts_aint32_t) 0) #define ERTS_POLLSET_HAVE_UPDATE_REQUESTS(PS) \ - ((int) erts_smp_atomic32_read_nob(&(PS)->have_update_requests)) + ((int) erts_atomic32_read_nob(&(PS)->have_update_requests)) #if ERTS_POLL_USE_FALLBACK # if ERTS_POLL_USE_POLL @@ -232,7 +232,7 @@ struct ErtsPollSet_ { ErtsPollSet next; int internal_fd_limit; ErtsFdStatus *fds_status; - erts_smp_atomic_t no_of_user_fds; + erts_atomic_t no_of_user_fds; int fds_status_len; #if ERTS_POLL_USE_KERNEL_POLL int kp_fd; @@ -263,9 +263,9 @@ struct ErtsPollSet_ { #endif ErtsPollSetUpdateRequestsBlock update_requests; ErtsPollSetUpdateRequestsBlock *curr_upd_req_block; - erts_smp_atomic32_t have_update_requests; + erts_atomic32_t have_update_requests; erts_atomic32_t polled; - erts_smp_mtx_t mtx; + erts_mtx_t mtx; int wake_fds[2]; #if ERTS_POLL_USE_TIMERFD int timer_fd; @@ -276,9 +276,9 @@ struct ErtsPollSet_ { erts_atomic32_t wakeup_state; erts_atomic64_t timeout_time; #ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS - erts_smp_atomic_t no_avoided_wakeups; - erts_smp_atomic_t no_avoided_interrupts; - erts_smp_atomic_t no_interrupt_timed; + erts_atomic_t no_avoided_wakeups; + erts_atomic_t no_avoided_interrupts; + erts_atomic_t no_interrupt_timed; #endif }; @@ -288,7 +288,7 @@ static void fatal_error_async_signal_safe(char *error_str); static int max_fds = -1; static ErtsPollSet pollsets; -static erts_smp_mtx_t pollsets_lock; +static erts_mtx_t pollsets_lock; #if ERTS_POLL_USE_POLL @@ -955,7 +955,7 @@ write_batch_buf(ErtsPollSet ps, ErtsPollBatchBuf *bbp) ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_USEFLBCK; ASSERT(ps->fds_status[fd].used_events); ps->fds_status[fd].used_events = 0; - erts_smp_atomic_dec_nob(&ps->no_of_user_fds); + erts_atomic_dec_nob(&ps->no_of_user_fds); update_fallback_pollset(ps, fd); ASSERT(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK); break; @@ -1005,11 +1005,11 @@ batch_update_pollset(ErtsPollSet ps, int fd, ErtsPollBatchBuf *bbp) events = ERTS_POLL_EV_E2N(ps->fds_status[fd].events); if (!events) { buf[buf_len].events = POLLREMOVE; - erts_smp_atomic_dec_nob(&ps->no_of_user_fds); + erts_atomic_dec_nob(&ps->no_of_user_fds); } else if (!ps->fds_status[fd].used_events) { buf[buf_len].events = events; - erts_smp_atomic_inc_nob(&ps->no_of_user_fds); + erts_atomic_inc_nob(&ps->no_of_user_fds); } else { if ((ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_RST) @@ -1099,12 +1099,12 @@ batch_update_pollset(ErtsPollSet ps, int fd, ErtsPollBatchBuf *bbp) } if (used_events) { if (!events) { - erts_smp_atomic_dec_nob(&ps->no_of_user_fds); + erts_atomic_dec_nob(&ps->no_of_user_fds); } } else { if (events) - erts_smp_atomic_inc_nob(&ps->no_of_user_fds); + erts_atomic_inc_nob(&ps->no_of_user_fds); } ASSERT((events & ~(ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT)) == 0); ASSERT((used_events & ~(ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT)) == 0); @@ -1178,7 +1178,7 @@ update_pollset(ErtsPollSet ps, int fd) epe.data.fd = epe_templ.data.fd; res = epoll_ctl(ps->kp_fd, EPOLL_CTL_DEL, fd, &epe); } while (res != 0 && errno == EINTR); - erts_smp_atomic_dec_nob(&ps->no_of_user_fds); + erts_atomic_dec_nob(&ps->no_of_user_fds); ps->fds_status[fd].used_events = 0; } @@ -1186,11 +1186,11 @@ update_pollset(ErtsPollSet ps, int fd) /* A note on EPOLL_CTL_DEL: linux kernel versions before 2.6.9 need a non-NULL event pointer even though it is ignored... */ op = EPOLL_CTL_DEL; - erts_smp_atomic_dec_nob(&ps->no_of_user_fds); + erts_atomic_dec_nob(&ps->no_of_user_fds); } else if (!ps->fds_status[fd].used_events) { op = EPOLL_CTL_ADD; - erts_smp_atomic_inc_nob(&ps->no_of_user_fds); + erts_atomic_inc_nob(&ps->no_of_user_fds); } else { op = EPOLL_CTL_MOD; @@ -1240,7 +1240,7 @@ update_pollset(ErtsPollSet ps, int fd) /* Fall through ... */ case EPOLL_CTL_ADD: { ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_USEFLBCK; - erts_smp_atomic_dec_nob(&ps->no_of_user_fds); + erts_atomic_dec_nob(&ps->no_of_user_fds); #if ERTS_POLL_USE_CONCURRENT_UPDATE if (!*update_fallback) { *update_fallback = 1; @@ -1328,7 +1328,7 @@ static int update_pollset(ErtsPollSet ps, int fd) #if ERTS_POLL_USE_FALLBACK ASSERT(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK); #endif - erts_smp_atomic_dec_nob(&ps->no_of_user_fds); + erts_atomic_dec_nob(&ps->no_of_user_fds); last_pix = --ps->no_poll_fds; if (pix != last_pix) { /* Move last pix to this pix */ @@ -1355,7 +1355,7 @@ static int update_pollset(ErtsPollSet ps, int fd) ASSERT(!(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_INFLBCK) || fd == ps->kp_fd); #endif - erts_smp_atomic_inc_nob(&ps->no_of_user_fds); + erts_atomic_inc_nob(&ps->no_of_user_fds); ps->fds_status[fd].pix = pix = ps->no_poll_fds++; if (pix >= ps->poll_fds_len) grow_poll_fds(ps, pix); @@ -1407,7 +1407,7 @@ static int update_pollset(ErtsPollSet ps, int fd) if (!ps->fds_status[fd].used_events) { ASSERT(events); - erts_smp_atomic_inc_nob(&ps->no_of_user_fds); + erts_atomic_inc_nob(&ps->no_of_user_fds); #if ERTS_POLL_USE_FALLBACK ps->no_select_fds++; ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_INFLBCK; @@ -1415,7 +1415,7 @@ static int update_pollset(ErtsPollSet ps, int fd) } else if (!events) { ASSERT(ps->fds_status[fd].used_events); - erts_smp_atomic_dec_nob(&ps->no_of_user_fds); + erts_atomic_dec_nob(&ps->no_of_user_fds); ps->fds_status[fd].events = events; #if ERTS_POLL_USE_FALLBACK ps->no_select_fds--; @@ -2139,7 +2139,7 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res) { int res; ERTS_MSACC_PUSH_STATE_M(); - if (erts_smp_atomic_read_nob(&ps->no_of_user_fds) == 0 + if (erts_atomic_read_nob(&ps->no_of_user_fds) == 0 && timeout_time == ERTS_POLL_NO_TIMEOUT) { /* Nothing to poll and zero timeout; done... */ return 0; @@ -2197,7 +2197,7 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res) * the maximum number of file descriptors in the poll set. */ struct dvpoll poll_res; - int nfds = (int) erts_smp_atomic_read_nob(&ps->no_of_user_fds); + int nfds = (int) erts_atomic_read_nob(&ps->no_of_user_fds); nfds++; /* Wakeup pipe */ timeout = (int) get_timeout(ps, 1000, timeout_time); poll_res.dp_nfds = nfds < max_res ? nfds : max_res; @@ -2416,10 +2416,10 @@ ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet ps, #ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS else { if (ERTS_POLLSET_IS_POLLED(ps)) - erts_smp_atomic_inc_nob(&ps->no_avoided_wakeups); - erts_smp_atomic_inc_nob(&ps->no_avoided_interrupts); + erts_atomic_inc_nob(&ps->no_avoided_wakeups); + erts_atomic_inc_nob(&ps->no_avoided_interrupts); } - erts_smp_atomic_inc_nob(&ps->no_interrupt_timed); + erts_atomic_inc_nob(&ps->no_interrupt_timed); #endif } } @@ -2436,7 +2436,7 @@ ERTS_POLL_EXPORT(erts_poll_max_fds)(void) void ERTS_POLL_EXPORT(erts_poll_init)(void) { - erts_smp_mtx_init(&pollsets_lock, "pollsets_lock", NIL, + erts_mtx_init(&pollsets_lock, "pollsets_lock", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO); pollsets = NULL; @@ -2476,7 +2476,7 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void) ps->internal_fd_limit = 0; ps->fds_status = NULL; ps->fds_status_len = 0; - erts_smp_atomic_init_nob(&ps->no_of_user_fds, 0); + erts_atomic_init_nob(&ps->no_of_user_fds, 0); #if ERTS_POLL_USE_KERNEL_POLL ps->kp_fd = -1; #if ERTS_POLL_USE_EPOLL @@ -2538,9 +2538,9 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void) ps->update_requests.next = NULL; ps->update_requests.len = 0; ps->curr_upd_req_block = &ps->update_requests; - erts_smp_atomic32_init_nob(&ps->have_update_requests, 0); + erts_atomic32_init_nob(&ps->have_update_requests, 0); erts_atomic32_init_nob(&ps->polled, 0); - erts_smp_mtx_init(&ps->mtx, "pollset", NIL, ERTS_LOCK_FLAGS_CATEGORY_IO); + erts_mtx_init(&ps->mtx, "pollset", NIL, ERTS_LOCK_FLAGS_CATEGORY_IO); erts_atomic32_init_nob(&ps->wakeup_state, (erts_aint32_t) 0); create_wakeup_pipe(ps); #if ERTS_POLL_USE_TIMERFD @@ -2565,20 +2565,20 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void) #endif init_timeout_time(ps); #ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS - erts_smp_atomic_init_nob(&ps->no_avoided_wakeups, 0); - erts_smp_atomic_init_nob(&ps->no_avoided_interrupts, 0); - erts_smp_atomic_init_nob(&ps->no_interrupt_timed, 0); + erts_atomic_init_nob(&ps->no_avoided_wakeups, 0); + erts_atomic_init_nob(&ps->no_avoided_interrupts, 0); + erts_atomic_init_nob(&ps->no_interrupt_timed, 0); #endif handle_update_requests(ps); #if ERTS_POLL_USE_FALLBACK ps->fallback_used = 0; #endif - erts_smp_atomic_set_nob(&ps->no_of_user_fds, 0); /* Don't count wakeup pipe and fallback fd */ + erts_atomic_set_nob(&ps->no_of_user_fds, 0); /* Don't count wakeup pipe and fallback fd */ - erts_smp_mtx_lock(&pollsets_lock); + erts_mtx_lock(&pollsets_lock); ps->next = pollsets; pollsets = ps; - erts_smp_mtx_unlock(&pollsets_lock); + erts_mtx_unlock(&pollsets_lock); return ps; } @@ -2623,7 +2623,7 @@ ERTS_POLL_EXPORT(erts_poll_destroy_pollset)(ErtsPollSet ps) free_update_requests_block(ps, free_urqbp); } } - erts_smp_mtx_destroy(&ps->mtx); + erts_mtx_destroy(&ps->mtx); if (ps->wake_fds[0] >= 0) close(ps->wake_fds[0]); if (ps->wake_fds[1] >= 0) @@ -2633,7 +2633,7 @@ ERTS_POLL_EXPORT(erts_poll_destroy_pollset)(ErtsPollSet ps) close(ps->timer_fd); #endif - erts_smp_mtx_lock(&pollsets_lock); + erts_mtx_lock(&pollsets_lock); if (ps == pollsets) pollsets = pollsets->next; else { @@ -2643,7 +2643,7 @@ ERTS_POLL_EXPORT(erts_poll_destroy_pollset)(ErtsPollSet ps) ASSERT(ps == prev_ps->next); prev_ps->next = ps->next; } - erts_smp_mtx_unlock(&pollsets_lock); + erts_mtx_unlock(&pollsets_lock); erts_free(ERTS_ALC_T_POLLSET, (void *) ps); } @@ -2728,7 +2728,7 @@ ERTS_POLL_EXPORT(erts_poll_info)(ErtsPollSet ps, ErtsPollInfo *pip) pip->memory_size = size; - pip->poll_set_size = (int) erts_smp_atomic_read_nob(&ps->no_of_user_fds); + pip->poll_set_size = (int) erts_atomic_read_nob(&ps->no_of_user_fds); pip->poll_set_size++; /* Wakeup pipe */ #if ERTS_POLL_USE_TIMERFD pip->poll_set_size++; /* timerfd */ @@ -2779,9 +2779,9 @@ ERTS_POLL_EXPORT(erts_poll_info)(ErtsPollSet ps, ErtsPollInfo *pip) pip->max_fds = max_fds; #ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS - pip->no_avoided_wakeups = erts_smp_atomic_read_nob(&ps->no_avoided_wakeups); - pip->no_avoided_interrupts = erts_smp_atomic_read_nob(&ps->no_avoided_interrupts); - pip->no_interrupt_timed = erts_smp_atomic_read_nob(&ps->no_interrupt_timed); + pip->no_avoided_wakeups = erts_atomic_read_nob(&ps->no_avoided_wakeups); + pip->no_avoided_interrupts = erts_atomic_read_nob(&ps->no_avoided_interrupts); + pip->no_interrupt_timed = erts_atomic_read_nob(&ps->no_interrupt_timed); #endif ERTS_POLLSET_UNLOCK(ps); @@ -2980,12 +2980,12 @@ static void erts_lcnt_enable_pollset_lock_count(ErtsPollSet pollset, int enable) void ERTS_POLL_EXPORT(erts_lcnt_update_pollset_locks)(int enable) { ErtsPollSet iterator; - erts_smp_mtx_lock(&pollsets_lock); + erts_mtx_lock(&pollsets_lock); for(iterator = pollsets; iterator != NULL; iterator = iterator->next) { erts_lcnt_enable_pollset_lock_count(iterator, enable); } - erts_smp_mtx_unlock(&pollsets_lock); + erts_mtx_unlock(&pollsets_lock); } #endif diff --git a/erts/emulator/sys/common/erl_sys_common_misc.c b/erts/emulator/sys/common/erl_sys_common_misc.c index 79f87eb3a9..09237c81ce 100644 --- a/erts/emulator/sys/common/erl_sys_common_misc.c +++ b/erts/emulator/sys/common/erl_sys_common_misc.c @@ -51,7 +51,7 @@ * (often) exist two versions of erl_check_io (kernel-poll and * non-kernel-poll), and we dont want two versions of this variable. */ -erts_smp_atomic_t erts_check_io_time; +erts_atomic_t erts_check_io_time; /* Written once and only once */ diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c index 83edca1c2f..237614b0fb 100644 --- a/erts/emulator/sys/unix/sys.c +++ b/erts/emulator/sys/unix/sys.c @@ -63,7 +63,7 @@ #include "erl_mseg.h" extern char **environ; -erts_smp_rwmtx_t environ_rwmtx; +erts_rwmtx_t environ_rwmtx; #define MAX_VSIZE 16 /* Max number of entries allowed in an I/O * vector sock_sendv(). @@ -92,11 +92,11 @@ extern void erts_sys_init_float(void); static int debug_log = 0; #endif -static erts_smp_atomic32_t have_prepared_crash_dump; +static erts_atomic32_t have_prepared_crash_dump; #define ERTS_PREPARED_CRASH_DUMP \ - ((int) erts_smp_atomic32_xchg_nob(&have_prepared_crash_dump, 1)) + ((int) erts_atomic32_xchg_nob(&have_prepared_crash_dump, 1)) -erts_smp_atomic_t sys_misc_mem_sz; +erts_atomic_t sys_misc_mem_sz; static void smp_sig_notify(int signum); static int sig_notify_fds[2] = {-1, -1}; @@ -118,11 +118,11 @@ static int max_files = -1; /* * a few variables used by the break handler */ -erts_smp_atomic32_t erts_break_requested; +erts_atomic32_t erts_break_requested; #define ERTS_SET_BREAK_REQUESTED \ - erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 1) + erts_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 1) #define ERTS_UNSET_BREAK_REQUESTED \ - erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 0) + erts_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 0) /* set early so the break handler has access to initial mode */ @@ -262,7 +262,7 @@ Uint erts_sys_misc_mem_sz(void) { Uint res = ERTS_CHK_IO_SZ(); - res += erts_smp_atomic_read_mb(&sys_misc_mem_sz); + res += erts_atomic_read_mb(&sys_misc_mem_sz); return res; } @@ -399,11 +399,11 @@ erts_sys_pre_init(void) erts_init_sys_time_sup(); - erts_smp_atomic32_init_nob(&erts_break_requested, 0); - erts_smp_atomic32_init_nob(&have_prepared_crash_dump, 0); + erts_atomic32_init_nob(&erts_break_requested, 0); + erts_atomic32_init_nob(&have_prepared_crash_dump, 0); - erts_smp_atomic_init_nob(&sys_misc_mem_sz, 0); + erts_atomic_init_nob(&sys_misc_mem_sz, 0); { /* @@ -605,7 +605,7 @@ static void signal_notify_requested(Eterm type) { erts_queue_message(p, locks, msgp, msg, am_system); if (locks) - erts_smp_proc_unlock(p, locks); + erts_proc_unlock(p, locks); erts_proc_dec_refc(p); } } @@ -869,7 +869,7 @@ void os_version(int *pMajor, int *pMinor, int *pBuild) { void init_getenv_state(GETENV_STATE *state) { - erts_smp_rwmtx_rlock(&environ_rwmtx); + erts_rwmtx_rlock(&environ_rwmtx); *state = NULL; } @@ -878,7 +878,7 @@ char *getenv_string(GETENV_STATE *state0) char **state = (char **) *state0; char *cp; - ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rlocked(&environ_rwmtx)); + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&environ_rwmtx)); if (state == NULL) state = environ; @@ -892,7 +892,7 @@ char *getenv_string(GETENV_STATE *state0) void fini_getenv_state(GETENV_STATE *state) { *state = NULL; - erts_smp_rwmtx_runlock(&environ_rwmtx); + erts_rwmtx_runlock(&environ_rwmtx); } void erts_do_break_handling(void) @@ -905,7 +905,7 @@ void erts_do_break_handling(void) * therefore, make sure that all threads but this one are blocked before * proceeding! */ - erts_smp_thr_progress_block(); + erts_thr_progress_block(); /* during break we revert to initial settings */ /* this is done differently for oldshell */ @@ -933,7 +933,7 @@ void erts_do_break_handling(void) tcsetattr(0,TCSANOW,&temp_mode); } - erts_smp_thr_progress_unblock(); + erts_thr_progress_unblock(); } @@ -963,14 +963,14 @@ erts_sys_putenv(char *key, char *value) env = erts_alloc(ERTS_ALC_T_TMP, need); #else env = erts_alloc(ERTS_ALC_T_PUTENV_STR, need); - erts_smp_atomic_add_nob(&sys_misc_mem_sz, need); + erts_atomic_add_nob(&sys_misc_mem_sz, need); #endif strcpy(env,key); strcat(env,"="); strcat(env,value); - erts_smp_rwmtx_rwlock(&environ_rwmtx); + erts_rwmtx_rwlock(&environ_rwmtx); res = putenv(env); - erts_smp_rwmtx_rwunlock(&environ_rwmtx); + erts_rwmtx_rwunlock(&environ_rwmtx); #ifdef HAVE_COPYING_PUTENV erts_free(ERTS_ALC_T_TMP, env); #endif @@ -1017,9 +1017,9 @@ int erts_sys_getenv(char *key, char *value, size_t *size) { int res; - erts_smp_rwmtx_rlock(&environ_rwmtx); + erts_rwmtx_rlock(&environ_rwmtx); res = erts_sys_getenv__(key, value, size); - erts_smp_rwmtx_runlock(&environ_rwmtx); + erts_rwmtx_runlock(&environ_rwmtx); return res; } @@ -1027,9 +1027,9 @@ int erts_sys_unsetenv(char *key) { int res; - erts_smp_rwmtx_rwlock(&environ_rwmtx); + erts_rwmtx_rwlock(&environ_rwmtx); res = unsetenv(key); - erts_smp_rwmtx_rwunlock(&environ_rwmtx); + erts_rwmtx_rwunlock(&environ_rwmtx); return res; } @@ -1200,12 +1200,12 @@ void erl_sys_schedule(int runnable) { ERTS_CHK_IO(!runnable); - ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); + ERTS_LC_ASSERT(!erts_thr_progress_is_blocking()); } -static erts_smp_tid_t sig_dispatcher_tid; +static erts_tid_t sig_dispatcher_tid; static void smp_sig_notify(int signum) @@ -1279,7 +1279,7 @@ signal_dispatcher_thread_func(void *unused) } signal_notify_requested(signal); } - ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); + ERTS_LC_ASSERT(!erts_thr_progress_is_blocking()); } return NULL; } @@ -1287,7 +1287,7 @@ signal_dispatcher_thread_func(void *unused) static void init_smp_sig_notify(void) { - erts_smp_thr_opts_t thr_opts = ERTS_SMP_THR_OPTS_DEFAULT_INITER; + erts_thr_opts_t thr_opts = ERTS_THR_OPTS_DEFAULT_INITER; thr_opts.detached = 1; thr_opts.name = "sys_sig_dispatcher"; @@ -1299,7 +1299,7 @@ init_smp_sig_notify(void) } /* Start signal handler thread */ - erts_smp_thr_create(&sig_dispatcher_tid, + erts_thr_create(&sig_dispatcher_tid, signal_dispatcher_thread_func, NULL, &thr_opts); @@ -1425,7 +1425,7 @@ erl_sys_args(int* argc, char** argv) { int i, j; - erts_smp_rwmtx_init(&environ_rwmtx, "environ", NIL, + erts_rwmtx_init(&environ_rwmtx, "environ", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); i = 1; diff --git a/erts/emulator/sys/unix/sys_drivers.c b/erts/emulator/sys/unix/sys_drivers.c index c451a0a674..7c9a532fed 100644 --- a/erts/emulator/sys/unix/sys_drivers.c +++ b/erts/emulator/sys/unix/sys_drivers.c @@ -56,9 +56,9 @@ #include "erl_threads.h" extern char **environ; -extern erts_smp_rwmtx_t environ_rwmtx; +extern erts_rwmtx_t environ_rwmtx; -extern erts_smp_atomic_t sys_misc_mem_sz; +extern erts_atomic_t sys_misc_mem_sz; static Eterm forker_port; @@ -343,7 +343,7 @@ static int set_blocking_data(ErtsSysDriverData *dd) { dd->blocking = erts_alloc(ERTS_ALC_T_SYS_BLOCKING, sizeof(ErtsSysBlocking)); - erts_smp_atomic_add_nob(&sys_misc_mem_sz, sizeof(ErtsSysBlocking)); + erts_atomic_add_nob(&sys_misc_mem_sz, sizeof(ErtsSysBlocking)); dd->blocking->pdl = driver_pdl_create(dd->port_num); dd->blocking->res = 0; @@ -386,7 +386,7 @@ create_driver_data(ErlDrvPort port_num, size += sizeof(ErtsSysFdData); data = erts_alloc(ERTS_ALC_T_DRV_TAB,size); - erts_smp_atomic_add_nob(&sys_misc_mem_sz, size); + erts_atomic_add_nob(&sys_misc_mem_sz, size); driver_data = (ErtsSysDriverData*)data; data += sizeof(*driver_data); @@ -452,7 +452,7 @@ static char **build_unix_environment(char *block) char **cpp; char** old_env; - ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rlocked(&environ_rwmtx)); + ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&environ_rwmtx)); cp = block; len = 0; @@ -600,12 +600,12 @@ static ErlDrvData spawn_start(ErlDrvPort port_num, char* name, len = CMD_LINE_PREFIX_STR_SZ + len + 1; } - erts_smp_rwmtx_rlock(&environ_rwmtx); + erts_rwmtx_rlock(&environ_rwmtx); if (opts->envir == NULL) { new_environ = environ; } else if ((new_environ = build_unix_environment(opts->envir)) == NULL) { - erts_smp_rwmtx_runlock(&environ_rwmtx); + erts_rwmtx_runlock(&environ_rwmtx); close_pipes(ifd, ofd); erts_free(ERTS_ALC_T_TMP, (void *) cmd_line); errno = ENOMEM; @@ -621,7 +621,7 @@ static ErlDrvData spawn_start(ErlDrvPort port_num, char* name, erts_free(ERTS_ALC_T_TMP, (void *) cmd_line); if (new_environ != environ) erts_free(ERTS_ALC_T_ENVIRONMENT, (void *) new_environ); - erts_smp_rwmtx_runlock(&environ_rwmtx); + erts_rwmtx_runlock(&environ_rwmtx); errno = err; return ERL_DRV_ERROR_ERRNO; } @@ -661,7 +661,7 @@ static ErlDrvData spawn_start(ErlDrvPort port_num, char* name, if (!io_vector) { close_pipes(ifd, ofd); - erts_smp_rwmtx_runlock(&environ_rwmtx); + erts_rwmtx_runlock(&environ_rwmtx); erts_free(ERTS_ALC_T_TMP, (void *) cmd_line); if (new_environ != environ) erts_free(ERTS_ALC_T_ENVIRONMENT, (void *) new_environ); @@ -746,7 +746,7 @@ static ErlDrvData spawn_start(ErlDrvPort port_num, char* name, erts_free(ERTS_ALC_T_TMP, io_vector); if (new_environ != environ) erts_free(ERTS_ALC_T_ENVIRONMENT, (void *) new_environ); - erts_smp_rwmtx_runlock(&environ_rwmtx); + erts_rwmtx_runlock(&environ_rwmtx); erts_free(ERTS_ALC_T_TMP, (void *) cmd_line); errno = err; return ERL_DRV_ERROR_ERRNO; @@ -775,7 +775,7 @@ static ErlDrvData spawn_start(ErlDrvPort port_num, char* name, if (new_environ != environ) erts_free(ERTS_ALC_T_ENVIRONMENT, (void *) new_environ); - erts_smp_rwmtx_runlock(&environ_rwmtx); + erts_rwmtx_runlock(&environ_rwmtx); dd = create_driver_data(port_num, ifd[0], ofd[1], opts->packet_bytes, DO_WRITE | DO_READ, opts->exit_status, @@ -1048,8 +1048,8 @@ static void clear_fd_data(ErtsSysFdData *fdd) { if (fdd->sz > 0) { erts_free(ERTS_ALC_T_FD_ENTRY_BUF, (void *) fdd->buf); - ASSERT(erts_smp_atomic_read_nob(&sys_misc_mem_sz) >= fdd->sz); - erts_smp_atomic_add_nob(&sys_misc_mem_sz, -1*fdd->sz); + ASSERT(erts_atomic_read_nob(&sys_misc_mem_sz) >= fdd->sz); + erts_atomic_add_nob(&sys_misc_mem_sz, -1*fdd->sz); } fdd->buf = NULL; fdd->sz = 0; @@ -1088,7 +1088,7 @@ static void fd_stop(ErlDrvData ev) /* Does not close the fds */ } erts_free(ERTS_ALC_T_DRV_TAB, dd); - erts_smp_atomic_add_nob(&sys_misc_mem_sz, -sz); + erts_atomic_add_nob(&sys_misc_mem_sz, -sz); } static void fd_flush(ErlDrvData ev) @@ -1384,7 +1384,7 @@ static void ready_input(ErlDrvData e, ErlDrvEvent ready_fd) if (dd->ifd->fd < 0) { driver_select(port_num, abs(dd->ifd->fd), ERL_DRV_READ|ERL_DRV_USE, 0); - erts_smp_atomic_add_nob(&sys_misc_mem_sz, -sizeof(ErtsSysFdData)); + erts_atomic_add_nob(&sys_misc_mem_sz, -sizeof(ErtsSysFdData)); dd->ifd = NULL; } @@ -1490,7 +1490,7 @@ static void ready_input(ErlDrvData e, ErlDrvEvent ready_fd) port_inp_failure(dd, -1); } else { - erts_smp_atomic_add_nob(&sys_misc_mem_sz, h); + erts_atomic_add_nob(&sys_misc_mem_sz, h); sys_memcpy(buf, cpos, bytes_left); dd->ifd->buf = buf; dd->ifd->sz = h; @@ -1525,7 +1525,7 @@ static void ready_output(ErlDrvData e, ErlDrvEvent ready_fd) should close the output fd as soon as the command has been sent. */ driver_select(ix, ready_fd, ERL_DRV_WRITE|ERL_DRV_USE, 0); - erts_smp_atomic_add_nob(&sys_misc_mem_sz, -sizeof(ErtsSysFdData)); + erts_atomic_add_nob(&sys_misc_mem_sz, -sizeof(ErtsSysFdData)); dd->ofd = NULL; } if (dd->terminating) diff --git a/erts/emulator/sys/unix/sys_time.c b/erts/emulator/sys/unix/sys_time.c index 60afb3b672..ef05380d17 100644 --- a/erts/emulator/sys/unix/sys_time.c +++ b/erts/emulator/sys/unix/sys_time.c @@ -160,7 +160,7 @@ struct sys_time_internal_state_read_mostly__ { #ifdef ERTS_SYS_TIME_INTERNAL_STATE_WRITE_FREQ__ struct sys_time_internal_state_write_freq__ { - erts_smp_mtx_t mtx; + erts_mtx_t mtx; #if defined(__linux__) && defined(OS_MONOTONIC_TIME_USING_CLOCK_GETTIME) ErtsMonotonicTime last_delivered; #endif @@ -304,7 +304,7 @@ sys_init_time(ErtsSysInitTimeResult *init_resp) erts_sys_time_data__.r.o.os_times = clock_gettime_times_verified; #endif - erts_smp_mtx_init(&internal_state.w.f.mtx, "os_monotonic_time", NIL, + erts_mtx_init(&internal_state.w.f.mtx, "os_monotonic_time", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO); internal_state.w.f.last_delivered = clock_gettime_monotonic(); @@ -525,12 +525,12 @@ static ErtsMonotonicTime clock_gettime_monotonic_verified(void) mtime = (ErtsMonotonicTime) posix_clock_gettime(MONOTONIC_CLOCK_ID, MONOTONIC_CLOCK_ID_STR); - erts_smp_mtx_lock(&internal_state.w.f.mtx); + erts_mtx_lock(&internal_state.w.f.mtx); if (mtime < internal_state.w.f.last_delivered) mtime = internal_state.w.f.last_delivered; else internal_state.w.f.last_delivered = mtime; - erts_smp_mtx_unlock(&internal_state.w.f.mtx); + erts_mtx_unlock(&internal_state.w.f.mtx); return mtime; } @@ -547,12 +547,12 @@ static void clock_gettime_times_verified(ErtsMonotonicTime *mtimep, WALL_CLOCK_ID_STR, stimep); - erts_smp_mtx_lock(&internal_state.w.f.mtx); + erts_mtx_lock(&internal_state.w.f.mtx); if (*mtimep < internal_state.w.f.last_delivered) *mtimep = internal_state.w.f.last_delivered; else internal_state.w.f.last_delivered = *mtimep; - erts_smp_mtx_unlock(&internal_state.w.f.mtx); + erts_mtx_unlock(&internal_state.w.f.mtx); } #endif /* defined(OS_SYSTEM_TIME_USING_CLOCK_GETTIME) */ diff --git a/erts/emulator/sys/win32/erl_poll.c b/erts/emulator/sys/win32/erl_poll.c index 41ff186c44..0bd43bb4fb 100644 --- a/erts/emulator/sys/win32/erl_poll.c +++ b/erts/emulator/sys/win32/erl_poll.c @@ -286,26 +286,26 @@ struct ErtsPollSet_ { CRITICAL_SECTION standby_crit; /* CS to guard the counter */ HANDLE standby_wait_event; /* Event signalled when counte == 0 */ erts_atomic32_t wakeup_state; - erts_smp_mtx_t mtx; + erts_mtx_t mtx; erts_atomic64_t timeout_time; }; #define ERTS_POLLSET_LOCK(PS) \ - erts_smp_mtx_lock(&(PS)->mtx) + erts_mtx_lock(&(PS)->mtx) #define ERTS_POLLSET_UNLOCK(PS) \ - erts_smp_mtx_unlock(&(PS)->mtx) + erts_mtx_unlock(&(PS)->mtx) /* * Communication with sys_interrupt */ -extern erts_smp_atomic32_t erts_break_requested; +extern erts_atomic32_t erts_break_requested; #define ERTS_SET_BREAK_REQUESTED \ - erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 1) + erts_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 1) #define ERTS_UNSET_BREAK_REQUESTED \ - erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 0) + erts_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 0) static erts_mtx_t break_waiter_lock; static HANDLE break_happened_event; @@ -1340,7 +1340,7 @@ ErtsPollSet erts_poll_create_pollset(void) ps->restore_events = 0; erts_atomic32_init_nob(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN); - erts_smp_mtx_init(&ps->mtx, "pollset", NIL, ERTS_LOCK_FLAGS_CATEGORY_IO); + erts_mtx_init(&ps->mtx, "pollset", NIL, ERTS_LOCK_FLAGS_CATEGORY_IO); init_timeout_time(ps); HARDTRACEF(("Out erts_poll_create_pollset")); @@ -1370,7 +1370,7 @@ void erts_poll_destroy_pollset(ErtsPollSet ps) CloseHandle(ps->event_io_ready); CloseHandle(ps->standby_wait_event); ERTS_POLLSET_UNLOCK(ps); - erts_smp_mtx_destroy(&ps->mtx); + erts_mtx_destroy(&ps->mtx); SEL_FREE(ERTS_ALC_T_POLLSET, (void *) ps); HARDTRACEF(("Out erts_poll_destroy_pollset")); } diff --git a/erts/emulator/sys/win32/sys.c b/erts/emulator/sys/win32/sys.c index 4c06535e4e..b23dbecbac 100644 --- a/erts/emulator/sys/win32/sys.c +++ b/erts/emulator/sys/win32/sys.c @@ -80,9 +80,9 @@ static int application_type(const wchar_t* originalName, wchar_t fullPath[MAX_PA HANDLE erts_service_event; -static erts_smp_tsd_key_t win32_errstr_key; +static erts_tsd_key_t win32_errstr_key; -static erts_smp_atomic_t pipe_creation_counter; +static erts_atomic_t pipe_creation_counter; /* Results from application_type(_w) is one of */ #define APPL_NONE 0 @@ -141,7 +141,7 @@ static BOOL (WINAPI *fpCancelIoEx)(HANDLE,LPOVERLAPPED); - call erl_start() to parse arguments and do other init */ -static erts_smp_atomic_t sys_misc_mem_sz; +static erts_atomic_t sys_misc_mem_sz; HMODULE beam_module = NULL; @@ -192,7 +192,7 @@ Uint erts_sys_misc_mem_sz(void) { Uint res = (Uint) erts_check_io_size(); - res += (Uint) erts_smp_atomic_read_mb(&sys_misc_mem_sz); + res += (Uint) erts_atomic_read_mb(&sys_misc_mem_sz); return res; } @@ -659,7 +659,7 @@ new_driver_data(ErlDrvPort port_num, int packet_bytes, int wait_objs_required, i dp->inbuf = DRV_BUF_ALLOC(dp->inBufSize); if (dp->inbuf == NULL) goto buf_alloc_error; - erts_smp_atomic_add_nob(&sys_misc_mem_sz, dp->inBufSize); + erts_atomic_add_nob(&sys_misc_mem_sz, dp->inBufSize); dp->outBufSize = 0; dp->outbuf = NULL; dp->port_num = port_num; @@ -729,8 +729,8 @@ release_driver_data(DriverData* dp) } if (dp->inbuf != NULL) { - ASSERT(erts_smp_atomic_read_nob(&sys_misc_mem_sz) >= dp->inBufSize); - erts_smp_atomic_add_nob(&sys_misc_mem_sz, -1*dp->inBufSize); + ASSERT(erts_atomic_read_nob(&sys_misc_mem_sz) >= dp->inBufSize); + erts_atomic_add_nob(&sys_misc_mem_sz, -1*dp->inBufSize); DRV_BUF_FREE(dp->inbuf); dp->inBufSize = 0; dp->inbuf = NULL; @@ -738,8 +738,8 @@ release_driver_data(DriverData* dp) ASSERT(dp->inBufSize == 0); if (dp->outbuf != NULL) { - ASSERT(erts_smp_atomic_read_nob(&sys_misc_mem_sz) >= dp->outBufSize); - erts_smp_atomic_add_nob(&sys_misc_mem_sz, -1*dp->outBufSize); + ASSERT(erts_atomic_read_nob(&sys_misc_mem_sz) >= dp->outBufSize); + erts_atomic_add_nob(&sys_misc_mem_sz, -1*dp->outBufSize); DRV_BUF_FREE(dp->outbuf); dp->outBufSize = 0; dp->outbuf = NULL; @@ -1737,7 +1737,7 @@ static int create_pipe(HANDLE *phRead, HANDLE *phWrite, BOOL inheritRead, BOOL o * Otherwise, create named pipes. */ - calls = (UWord) erts_smp_atomic_inc_read_nob(&pipe_creation_counter); + calls = (UWord) erts_atomic_inc_read_nob(&pipe_creation_counter); erts_snprintf(pipe_name, sizeof(pipe_name), "\\\\.\\pipe\\erlang44_%d_%bpu", getpid(), calls); @@ -2422,7 +2422,7 @@ output(ErlDrvData drv_data, char* buf, ErlDrvSizeT len) } dp->outBufSize = pb+len; - erts_smp_atomic_add_nob(&sys_misc_mem_sz, dp->outBufSize); + erts_atomic_add_nob(&sys_misc_mem_sz, dp->outBufSize); /* * Store header bytes (if any). @@ -2451,8 +2451,8 @@ output(ErlDrvData drv_data, char* buf, ErlDrvSizeT len) } else { dp->out.ov.Offset += pb+len; /* For vanilla driver. */ /* XXX OffsetHigh should be changed too. */ - ASSERT(erts_smp_atomic_read_nob(&sys_misc_mem_sz) >= dp->outBufSize); - erts_smp_atomic_add_nob(&sys_misc_mem_sz, -1*dp->outBufSize); + ASSERT(erts_atomic_read_nob(&sys_misc_mem_sz) >= dp->outBufSize); + erts_atomic_add_nob(&sys_misc_mem_sz, -1*dp->outBufSize); DRV_BUF_FREE(dp->outbuf); dp->outBufSize = 0; dp->outbuf = NULL; @@ -2563,8 +2563,8 @@ ready_input(ErlDrvData drv_data, ErlDrvEvent ready_event) error = ERROR_NOT_ENOUGH_MEMORY; break; /* Break out of loop into error handler. */ } - ASSERT(erts_smp_atomic_read_nob(&sys_misc_mem_sz) >= dp->inBufSize); - erts_smp_atomic_add_nob(&sys_misc_mem_sz, + ASSERT(erts_atomic_read_nob(&sys_misc_mem_sz) >= dp->inBufSize); + erts_atomic_add_nob(&sys_misc_mem_sz, dp->totalNeeded - dp->inBufSize); dp->inBufSize = dp->totalNeeded; dp->inbuf = new_buf; @@ -2663,8 +2663,8 @@ ready_output(ErlDrvData drv_data, ErlDrvEvent ready_event) write... */ return; } - ASSERT(erts_smp_atomic_read_nob(&sys_misc_mem_sz) >= dp->outBufSize); - erts_smp_atomic_add_nob(&sys_misc_mem_sz, -1*dp->outBufSize); + ASSERT(erts_atomic_read_nob(&sys_misc_mem_sz) >= dp->outBufSize); + erts_atomic_add_nob(&sys_misc_mem_sz, -1*dp->outBufSize); DRV_BUF_FREE(dp->outbuf); dp->outBufSize = 0; dp->outbuf = NULL; @@ -2812,7 +2812,7 @@ Preload* sys_preloaded(void) (num_preloaded+1)*sizeof(Preload)); res_name = erts_alloc(ERTS_ALC_T_PRELOADED, (num_preloaded+1)*sizeof(unsigned)); - erts_smp_atomic_add_nob(&sys_misc_mem_sz, + erts_atomic_add_nob(&sys_misc_mem_sz, (num_preloaded+1)*sizeof(Preload) + (num_preloaded+1)*sizeof(unsigned)); for (i = 0; i < num_preloaded; i++) { @@ -2825,7 +2825,7 @@ Preload* sys_preloaded(void) n = GETWORD(data); data += 2; preloaded[i].name = erts_alloc(ERTS_ALC_T_PRELOADED, n+1); - erts_smp_atomic_add_nob(&sys_misc_mem_sz, n+1); + erts_atomic_add_nob(&sys_misc_mem_sz, n+1); sys_memcpy(preloaded[i].name, data, n); preloaded[i].name[n] = '\0'; data += n; @@ -2907,7 +2907,7 @@ sys_get_key(int fd) char* win32_errorstr(int error) { - LPTSTR lpBufPtr = erts_smp_tsd_get(win32_errstr_key); + LPTSTR lpBufPtr = erts_tsd_get(win32_errstr_key); if (lpBufPtr) { LocalFree(lpBufPtr); } @@ -2921,7 +2921,7 @@ char* win32_errorstr(int error) 0, NULL); SetLastError(error); - erts_smp_tsd_set(win32_errstr_key,lpBufPtr); + erts_tsd_set(win32_errstr_key,lpBufPtr); return lpBufPtr; } @@ -3170,7 +3170,7 @@ erts_sys_pre_init(void) erts_init_sys_time_sup(); - erts_smp_atomic_init_nob(&sys_misc_mem_sz, 0); + erts_atomic_init_nob(&sys_misc_mem_sz, 0); } void noinherit_std_handle(DWORD type) @@ -3190,9 +3190,9 @@ void erl_sys_init(void) noinherit_std_handle(STD_INPUT_HANDLE); noinherit_std_handle(STD_ERROR_HANDLE); - erts_smp_tsd_key_create(&win32_errstr_key,"win32_errstr_key"); + erts_tsd_key_create(&win32_errstr_key,"win32_errstr_key"); InitializeCriticalSection(&htbc_lock); - erts_smp_atomic_init_nob(&pipe_creation_counter,0); + erts_atomic_init_nob(&pipe_creation_counter,0); /* * Test if we have named pipes or not. */ @@ -3269,6 +3269,6 @@ void erl_sys_schedule(int runnable) { erts_check_io(!runnable); - ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); + ERTS_LC_ASSERT(!erts_thr_progress_is_blocking()); } diff --git a/erts/emulator/sys/win32/sys_env.c b/erts/emulator/sys/win32/sys_env.c index 8fcee1cbb6..5792816267 100644 --- a/erts/emulator/sys/win32/sys_env.c +++ b/erts/emulator/sys/win32/sys_env.c @@ -32,12 +32,12 @@ static WCHAR **env_to_arg(WCHAR *env); static WCHAR **find_arg(WCHAR **arg, WCHAR *str);
static int compare(const void *a, const void *b);
-static erts_smp_rwmtx_t environ_rwmtx;
+static erts_rwmtx_t environ_rwmtx; void
erts_sys_env_init(void)
{
- erts_smp_rwmtx_init(&environ_rwmtx, "environ", NIL,
+ erts_rwmtx_init(&environ_rwmtx, "environ", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
}
@@ -45,10 +45,10 @@ int erts_sys_putenv_raw(char *key, char *value)
{
int res;
- erts_smp_rwmtx_rwlock(&environ_rwmtx);
+ erts_rwmtx_rwlock(&environ_rwmtx); res = (SetEnvironmentVariable((LPCTSTR) key,
(LPCTSTR) value) ? 0 : 1);
- erts_smp_rwmtx_rwunlock(&environ_rwmtx);
+ erts_rwmtx_rwunlock(&environ_rwmtx); return res;
}
@@ -58,10 +58,10 @@ erts_sys_putenv(char *key, char *value) int res;
WCHAR *wkey = (WCHAR *) key;
WCHAR *wvalue = (WCHAR *) value;
- erts_smp_rwmtx_rwlock(&environ_rwmtx);
+ erts_rwmtx_rwlock(&environ_rwmtx); res = (SetEnvironmentVariableW(wkey,
wvalue) ? 0 : 1);
- erts_smp_rwmtx_rwunlock(&environ_rwmtx);
+ erts_rwmtx_rwunlock(&environ_rwmtx); return res;
}
@@ -76,12 +76,12 @@ erts_sys_getenv(char *key, char *value, size_t *size) DWORD wsize = *size / (sizeof(WCHAR) / sizeof(char));
SetLastError(0);
- erts_smp_rwmtx_rlock(&environ_rwmtx);
+ erts_rwmtx_rlock(&environ_rwmtx); new_size = GetEnvironmentVariableW(wkey,
wvalue,
(DWORD) wsize);
res = !new_size && GetLastError() == ERROR_ENVVAR_NOT_FOUND ? -1 : 0;
- erts_smp_rwmtx_runlock(&environ_rwmtx);
+ erts_rwmtx_runlock(&environ_rwmtx); if (res < 0)
return res;
res = new_size > wsize ? 1 : 0;
@@ -111,22 +111,22 @@ int erts_sys_getenv_raw(char *key, char *value, size_t *size)
{
int res;
- erts_smp_rwmtx_rlock(&environ_rwmtx);
+ erts_rwmtx_rlock(&environ_rwmtx); res = erts_sys_getenv__(key, value, size);
- erts_smp_rwmtx_runlock(&environ_rwmtx);
+ erts_rwmtx_runlock(&environ_rwmtx); return res;
}
void init_getenv_state(GETENV_STATE *state)
{
- erts_smp_rwmtx_rlock(&environ_rwmtx);
+ erts_rwmtx_rlock(&environ_rwmtx); state->environment_strings = GetEnvironmentStringsW();
state->next_string = state->environment_strings;
}
char *getenv_string(GETENV_STATE *state)
{
- ERTS_SMP_LC_ASSERT(erts_smp_lc_rwmtx_is_rlocked(&environ_rwmtx));
+ ERTS_LC_ASSERT(erts_lc_rwmtx_is_rlocked(&environ_rwmtx)); if (state->next_string[0] == L'\0') {
return NULL;
} else {
@@ -140,7 +140,7 @@ void fini_getenv_state(GETENV_STATE *state) {
FreeEnvironmentStringsW(state->environment_strings);
state->environment_strings = state->next_string = NULL;
- erts_smp_rwmtx_runlock(&environ_rwmtx);
+ erts_rwmtx_runlock(&environ_rwmtx); }
int erts_sys_unsetenv(char *key)
@@ -149,7 +149,7 @@ int erts_sys_unsetenv(char *key) WCHAR *wkey = (WCHAR *) key;
SetLastError(0);
- erts_smp_rwmtx_rlock(&environ_rwmtx);
+ erts_rwmtx_rlock(&environ_rwmtx); GetEnvironmentVariableW(wkey,
NULL,
0);
@@ -157,7 +157,7 @@ int erts_sys_unsetenv(char *key) res = (SetEnvironmentVariableW(wkey,
NULL) ? 0 : 1);
}
- erts_smp_rwmtx_runlock(&environ_rwmtx);
+ erts_rwmtx_runlock(&environ_rwmtx); return res;
}
@@ -171,12 +171,12 @@ win_build_environment(char* new_env) tmp_new = (WCHAR *) new_env;
- erts_smp_rwmtx_rlock(&environ_rwmtx);
+ erts_rwmtx_rlock(&environ_rwmtx); tmp = GetEnvironmentStringsW();
merged = merge_environment(tmp, tmp_new);
FreeEnvironmentStringsW(tmp);
- erts_smp_rwmtx_runlock(&environ_rwmtx);
+ erts_rwmtx_runlock(&environ_rwmtx); return (char *) merged;
}
}
diff --git a/erts/emulator/sys/win32/sys_interrupt.c b/erts/emulator/sys/win32/sys_interrupt.c index 4632ab52b1..02aa50500f 100644 --- a/erts/emulator/sys/win32/sys_interrupt.c +++ b/erts/emulator/sys/win32/sys_interrupt.c @@ -35,11 +35,11 @@ # define WIN_SYS_INLINE __forceinline #endif -erts_smp_atomic32_t erts_break_requested; +erts_atomic32_t erts_break_requested; #define ERTS_SET_BREAK_REQUESTED \ - erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 1) + erts_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 1) #define ERTS_UNSET_BREAK_REQUESTED \ - erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 0) + erts_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 0) extern int nohup; HANDLE erts_sys_break_event = NULL; @@ -51,14 +51,14 @@ void erts_do_break_handling(void) * therefore, make sure that all threads but this one are blocked before * proceeding! */ - erts_smp_thr_progress_block(); + erts_thr_progress_block(); /* call the break handling function, reset the flag */ do_break(); ResetEvent(erts_sys_break_event); ERTS_UNSET_BREAK_REQUESTED; - erts_smp_thr_progress_unblock(); + erts_thr_progress_unblock(); } diff --git a/erts/emulator/sys/win32/sys_time.c b/erts/emulator/sys/win32/sys_time.c index 359010e9f1..25c2ad385c 100644 --- a/erts/emulator/sys/win32/sys_time.c +++ b/erts/emulator/sys/win32/sys_time.c @@ -95,7 +95,7 @@ struct sys_time_internal_state_read_mostly__ { }; struct sys_time_internal_state_write_freq__ { - erts_smp_mtx_t mtime_mtx; + erts_mtx_t mtime_mtx; ULONGLONG wrap; ULONGLONG last_tick_count; }; @@ -294,7 +294,7 @@ sys_init_time(ErtsSysInitTimeResult *init_resp) module = GetModuleHandle(kernel_dll_name); if (!module) { get_tick_count: - erts_smp_mtx_init(&internal_state.w.f.mtime_mtx, "os_monotonic_time", NIL, + erts_mtx_init(&internal_state.w.f.mtime_mtx, "os_monotonic_time", NIL, ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC); internal_state.w.f.wrap = 0; internal_state.w.f.last_tick_count = 0; |