aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--erts/emulator/beam/atom.c12
-rw-r--r--erts/emulator/beam/beam_bif_load.c31
-rw-r--r--erts/emulator/beam/beam_debug.c4
-rw-r--r--erts/emulator/beam/beam_emu.c54
-rw-r--r--erts/emulator/beam/beam_load.c14
-rw-r--r--erts/emulator/beam/bif.c68
-rw-r--r--erts/emulator/beam/break.c16
-rw-r--r--erts/emulator/beam/code_ix.c2
-rw-r--r--erts/emulator/beam/dist.c6
-rw-r--r--erts/emulator/beam/erl_alloc.c88
-rw-r--r--erts/emulator/beam/erl_alloc.h34
-rw-r--r--erts/emulator/beam/erl_alloc_util.c180
-rw-r--r--erts/emulator/beam/erl_alloc_util.h22
-rw-r--r--erts/emulator/beam/erl_async.c75
-rw-r--r--erts/emulator/beam/erl_async.h18
-rw-r--r--erts/emulator/beam/erl_bif_ddll.c64
-rw-r--r--erts/emulator/beam/erl_bif_info.c88
-rw-r--r--erts/emulator/beam/erl_bif_port.c8
-rw-r--r--erts/emulator/beam/erl_bif_re.c4
-rw-r--r--erts/emulator/beam/erl_bif_trace.c48
-rw-r--r--erts/emulator/beam/erl_bits.c18
-rw-r--r--erts/emulator/beam/erl_bits.h36
-rw-r--r--erts/emulator/beam/erl_cpu_topology.c6
-rw-r--r--erts/emulator/beam/erl_cpu_topology.h2
-rw-r--r--erts/emulator/beam/erl_db.c78
-rw-r--r--erts/emulator/beam/erl_db_hash.c59
-rw-r--r--erts/emulator/beam/erl_db_hash.h2
-rw-r--r--erts/emulator/beam/erl_db_util.c15
-rw-r--r--erts/emulator/beam/erl_db_util.h2
-rw-r--r--erts/emulator/beam/erl_drv_thread.c105
-rw-r--r--erts/emulator/beam/erl_fun.c2
-rw-r--r--erts/emulator/beam/erl_gc.c5
-rw-r--r--erts/emulator/beam/erl_hl_timer.c22
-rw-r--r--erts/emulator/beam/erl_hl_timer.h2
-rw-r--r--erts/emulator/beam/erl_init.c119
-rw-r--r--erts/emulator/beam/erl_lock_check.c14
-rw-r--r--erts/emulator/beam/erl_lock_check.h4
-rw-r--r--erts/emulator/beam/erl_message.c34
-rw-r--r--erts/emulator/beam/erl_message.h14
-rw-r--r--erts/emulator/beam/erl_msacc.c22
-rw-r--r--erts/emulator/beam/erl_msacc.h9
-rw-r--r--erts/emulator/beam/erl_nif.c89
-rw-r--r--erts/emulator/beam/erl_node_tables.c8
-rw-r--r--erts/emulator/beam/erl_port.h49
-rw-r--r--erts/emulator/beam/erl_port_task.c64
-rw-r--r--erts/emulator/beam/erl_port_task.h14
-rw-r--r--erts/emulator/beam/erl_process.c416
-rw-r--r--erts/emulator/beam/erl_process.h106
-rw-r--r--erts/emulator/beam/erl_process_lock.c6
-rw-r--r--erts/emulator/beam/erl_process_lock.h50
-rw-r--r--erts/emulator/beam/erl_ptab.h2
-rw-r--r--erts/emulator/beam/erl_sched_spec_pre_alloc.c2
-rw-r--r--erts/emulator/beam/erl_sched_spec_pre_alloc.h2
-rw-r--r--erts/emulator/beam/erl_smp.h540
-rw-r--r--erts/emulator/beam/erl_thr_progress.c2
-rw-r--r--erts/emulator/beam/erl_thr_progress.h12
-rw-r--r--erts/emulator/beam/erl_thr_queue.c123
-rw-r--r--erts/emulator/beam/erl_thr_queue.h25
-rw-r--r--erts/emulator/beam/erl_threads.h605
-rw-r--r--erts/emulator/beam/erl_trace.c225
-rw-r--r--erts/emulator/beam/erl_trace.h6
-rw-r--r--erts/emulator/beam/erl_utils.h8
-rw-r--r--erts/emulator/beam/export.c4
-rw-r--r--erts/emulator/beam/global.h8
-rw-r--r--erts/emulator/beam/io.c79
-rw-r--r--erts/emulator/beam/register.c34
-rw-r--r--erts/emulator/beam/sys.h27
-rw-r--r--erts/emulator/beam/utils.c55
-rw-r--r--erts/emulator/drivers/common/efile_drv.c9
-rw-r--r--erts/emulator/hipe/hipe_mkliterals.c6
-rw-r--r--erts/emulator/hipe/hipe_mode_switch.c26
-rw-r--r--erts/emulator/hipe/hipe_native_bif.c12
-rw-r--r--erts/emulator/hipe/hipe_native_bif.h6
-rw-r--r--erts/emulator/hipe/hipe_primops.h2
-rw-r--r--erts/emulator/hipe/hipe_process.h2
-rw-r--r--erts/emulator/hipe/hipe_stack.h5
-rw-r--r--erts/emulator/hipe/hipe_x86_signal.c8
-rw-r--r--erts/emulator/sys/common/erl_check_io.c34
-rw-r--r--erts/emulator/sys/common/erl_check_io.h7
-rw-r--r--erts/emulator/sys/common/erl_mseg.c17
-rw-r--r--erts/emulator/sys/common/erl_os_monotonic_time_extender.c10
-rw-r--r--erts/emulator/sys/common/erl_os_monotonic_time_extender.h19
-rw-r--r--erts/emulator/sys/common/erl_poll.c184
-rw-r--r--erts/emulator/sys/common/erl_poll.h3
-rw-r--r--erts/emulator/sys/unix/erl_unix_sys.h10
-rw-r--r--erts/emulator/sys/unix/sys.c126
-rw-r--r--erts/emulator/sys/unix/sys_drivers.c24
-rw-r--r--erts/emulator/sys/unix/sys_float.c8
-rw-r--r--erts/emulator/sys/win32/erl_poll.c23
-rw-r--r--erts/emulator/sys/win32/erl_win_sys.h2
-rw-r--r--erts/emulator/sys/win32/sys.c47
-rw-r--r--erts/emulator/sys/win32/sys_interrupt.c6
-rw-r--r--erts/etc/common/erlexec.c21
-rw-r--r--erts/include/internal/ethread.h3
-rw-r--r--erts/lib_src/common/erl_printf.c3
95 files changed, 15 insertions, 4575 deletions
diff --git a/erts/emulator/beam/atom.c b/erts/emulator/beam/atom.c
index c2d78aaccb..8023414821 100644
--- a/erts/emulator/beam/atom.c
+++ b/erts/emulator/beam/atom.c
@@ -359,32 +359,24 @@ am_atom_put(const char* name, int len)
int atom_table_size(void)
{
int ret;
-#ifdef ERTS_SMP
int lock = !ERTS_IS_CRASH_DUMPING;
if (lock)
atom_read_lock();
-#endif
ret = erts_atom_table.entries;
-#ifdef ERTS_SMP
if (lock)
atom_read_unlock();
-#endif
return ret;
}
int atom_table_sz(void)
{
int ret;
-#ifdef ERTS_SMP
int lock = !ERTS_IS_CRASH_DUMPING;
if (lock)
atom_read_lock();
-#endif
ret = index_table_sz(&erts_atom_table);
-#ifdef ERTS_SMP
if (lock)
atom_read_unlock();
-#endif
return ret;
}
@@ -412,19 +404,15 @@ erts_atom_get(const char *name, int len, Eterm* ap, ErtsAtomEncoding enc)
void
erts_atom_get_text_space_sizes(Uint *reserved, Uint *used)
{
-#ifdef ERTS_SMP
int lock = !ERTS_IS_CRASH_DUMPING;
if (lock)
atom_read_lock();
-#endif
if (reserved)
*reserved = reserved_atom_space;
if (used)
*used = atom_space;
-#ifdef ERTS_SMP
if (lock)
atom_read_unlock();
-#endif
}
void
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 14ddb74324..fb22d28af0 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -265,7 +265,6 @@ struct m {
};
static Eterm staging_epilogue(Process* c_p, int, Eterm res, int, struct m*, int, int);
-#ifdef ERTS_SMP
static void smp_code_ix_commiter(void*);
static struct /* Protected by code_write_permission */
@@ -273,7 +272,6 @@ static struct /* Protected by code_write_permission */
Process* stager;
ErtsThrPrgrLaterOp lop;
} committer_state;
-#endif
static Eterm
exception_list(Process* p, Eterm tag, struct m* mp, Sint exceptions)
@@ -465,9 +463,7 @@ static Eterm
staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking,
struct m* mods, int nmods, int free_mods)
{
-#ifdef ERTS_SMP
if (is_blocking || !commit)
-#endif
{
if (commit) {
int i;
@@ -496,7 +492,6 @@ staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking,
erts_release_code_write_permission();
return res;
}
-#ifdef ERTS_SMP
else {
ASSERT(is_value(res));
@@ -521,11 +516,9 @@ staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking,
*/
ERTS_BIF_YIELD_RETURN(c_p, res);
}
-#endif
}
-#ifdef ERTS_SMP
static void smp_code_ix_commiter(void* null)
{
Process* p = committer_state.stager;
@@ -542,7 +535,6 @@ static void smp_code_ix_commiter(void* null)
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
erts_proc_dec_refc(p);
}
-#endif /* ERTS_SMP */
@@ -1312,7 +1304,6 @@ hfrag_literal_copy(Eterm **hpp, ErlOffHeap *ohp,
}
}
-#ifdef ERTS_SMP
ErtsThrPrgrLaterOp later_literal_area_switch;
@@ -1340,7 +1331,6 @@ complete_literal_area_switch(void *literal_area)
if (literal_area)
erts_release_literal_area((ErtsLiteralArea *) literal_area);
}
-#endif
BIF_RETTYPE erts_internal_release_literal_area_switch_0(BIF_ALIST_0)
{
@@ -1366,7 +1356,6 @@ BIF_RETTYPE erts_internal_release_literal_area_switch_0(BIF_ALIST_0)
if (!la_ref) {
ERTS_SET_COPY_LITERAL_AREA(NULL);
if (unused_la) {
-#ifdef ERTS_SMP
ErtsLaterReleasLiteralArea *lrlap;
lrlap = erts_alloc(ERTS_ALC_T_RELEASE_LAREA,
sizeof(ErtsLaterReleasLiteralArea));
@@ -1380,9 +1369,6 @@ BIF_RETTYPE erts_internal_release_literal_area_switch_0(BIF_ALIST_0)
+ ((unused_la->end
- &unused_la->start[0])
- 1)*(sizeof(Eterm))));
-#else
- erts_release_literal_area(unused_la);
-#endif
}
BIF_RET(am_false);
}
@@ -1391,16 +1377,11 @@ BIF_RETTYPE erts_internal_release_literal_area_switch_0(BIF_ALIST_0)
erts_free(ERTS_ALC_T_LITERAL_REF, la_ref);
-#ifdef ERTS_SMP
erts_schedule_thr_prgr_later_op(complete_literal_area_switch,
unused_la,
&later_literal_area_switch);
erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
-#else
- erts_release_literal_area(unused_la);
- BIF_RET(am_true);
-#endif
}
@@ -1506,7 +1487,6 @@ finalize_purge_operation(Process *c_p, int succeded)
purge_state.fe_ix = 0;
}
-#ifdef ERTS_SMP
static ErtsThrPrgrLaterOp purger_lop_data;
@@ -1529,7 +1509,6 @@ finalize_purge_abort(void *unused)
resume_purger(NULL);
}
-#endif /* ERTS_SMP */
BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
{
@@ -1604,9 +1583,6 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
}
}
-#ifndef ERTS_SMP
- BIF_RET(res);
-#else
if (res != am_true)
BIF_RET(res);
else {
@@ -1625,7 +1601,6 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
ERTS_BIF_YIELD_RETURN(BIF_P, am_true);
}
-#endif
}
case am_abort: {
@@ -1639,11 +1614,6 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
erts_fun_purge_abort_prepare(purge_state.funs, purge_state.fe_ix);
-#ifndef ERTS_SMP
- erts_fun_purge_abort_finalize(purge_state.funs, purge_state.fe_ix);
- finalize_purge_operation(BIF_P, 0);
- BIF_RET(am_false);
-#else
/*
* We need to restore the code addresses of the funs in
* two stages in order to ensure that we do not get any
@@ -1659,7 +1629,6 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
&purger_lop_data);
erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL);
ERTS_BIF_YIELD_RETURN(BIF_P, am_false);
-#endif
}
case am_complete: {
diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c
index 74e6ab8bc5..5f47a7c99f 100644
--- a/erts/emulator/beam/beam_debug.c
+++ b/erts/emulator/beam/beam_debug.c
@@ -1148,11 +1148,7 @@ ms_wait(Process *c_p, Eterm etimeout, int busy)
#endif /* ERTS_DIRTY_SCHEDULERS */
-#ifdef ERTS_SMP
# define ERTS_STACK_LIMIT ((char *) ethr_get_stacklimit())
-#else
-# define ERTS_STACK_LIMIT ((char *) erts_scheduler_stack_limit)
-#endif
/*
* The below functions is for testing of the stack
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 377b8e651d..0b4a69411a 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -62,7 +62,6 @@
#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
-# ifdef ERTS_SMP
# define PROCESS_MAIN_CHK_LOCKS(P) \
do { \
if ((P)) \
@@ -80,11 +79,6 @@ do { \
if ((P)) \
erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN); \
} while (0)
-# else
-# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
-# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
-# define PROCESS_MAIN_CHK_LOCKS(P) erts_lc_check_exact(NULL, 0)
-# endif
#else
# define PROCESS_MAIN_CHK_LOCKS(P)
# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
@@ -1943,7 +1937,6 @@ void process_main(Eterm * x_reg_array, FloatDef* f_reg_array)
msgp = PEEK_MESSAGE(c_p);
if (!msgp) {
-#ifdef ERTS_SMP
erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
/* Make sure messages wont pass exit signals... */
if (ERTS_PROC_PENDING_EXIT(c_p)) {
@@ -1958,7 +1951,6 @@ void process_main(Eterm * x_reg_array, FloatDef* f_reg_array)
if (msgp)
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
else
-#endif
{
c_p->flags &= ~F_DELAY_GC;
SET_I((BeamInstr *) Arg(0));
@@ -2180,23 +2172,6 @@ void process_main(Eterm * x_reg_array, FloatDef* f_reg_array)
OpCase(wait_f):
wait2: {
-#ifndef ERTS_SMP
- if (ERTS_PROC_IS_EXITING(c_p)) {
- /*
- * I non smp case:
- *
- * Currently executing process might be sent an exit
- * signal if it is traced by a port that it also is
- * linked to, and the port terminates during the
- * trace. In this case we do *not* want to clear
- * the active flag, which will make the process hang
- * in limbo forever.
- */
- SWAPOUT;
- c_p->arity = 0;
- goto do_schedule;
- }
-#endif
c_p->i = (BeamInstr *) Arg(0); /* L1 */
SWAPOUT;
c_p->arity = 0;
@@ -3622,9 +3597,7 @@ do { \
typedef Eterm NifF(struct enif_environment_t*, int argc, Eterm argv[]);
NifF* fp = vbf = (NifF*) I[1];
struct enif_environment_t env;
-#ifdef ERTS_SMP
ASSERT(c_p->scheduler_data);
-#endif
live_hf_end = c_p->mbuf;
ERTS_CHK_MBUF_SZ(c_p);
erts_pre_nif(&env, c_p, (struct erl_module_nif*)I[2], NULL);
@@ -6539,22 +6512,6 @@ erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* re
int arity;
Eterm tmp;
-#ifndef ERTS_SMP
- if (ERTS_PROC_IS_EXITING(c_p)) {
- /*
- * I non smp case:
- *
- * Currently executing process might be sent an exit
- * signal if it is traced by a port that it also is
- * linked to, and the port terminates during the
- * trace. In this case we do *not* want to clear
- * the active flag, which will make the process hang
- * in limbo forever. Get out of here and terminate
- * the process...
- */
- return -1;
- }
-#endif
if (is_not_atom(module) || is_not_atom(function)) {
/*
@@ -6632,19 +6589,8 @@ erts_hibernate(Process* c_p, Eterm module, Eterm function, Eterm args, Eterm* re
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
-#ifndef ERTS_SMP
- if (ERTS_PROC_IS_EXITING(c_p)) {
- /*
- * See comment in the beginning of the function...
- *
- * This second test is needed since gc might be traced.
- */
- return -1;
- }
-#else /* ERTS_SMP */
ERTS_SMP_MSGQ_MV_INQ2PRIVQ(c_p);
if (!c_p->msg.len)
-#endif
erts_smp_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_ACTIVE);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
}
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index 71637cf4d6..9bb4525306 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -86,11 +86,9 @@ typedef struct {
Sint patches; /* Index (into code buffer) to first location
* which must be patched with the value of this label.
*/
-#ifdef ERTS_SMP
Uint looprec_targeted; /* Non-zero if this label is the target of a loop_rec
* instruction.
*/
-#endif
} Label;
/*
@@ -1875,9 +1873,7 @@ read_code_header(LoaderState* stp)
for (i = 0; i < stp->num_labels; i++) {
stp->labels[i].value = 0;
stp->labels[i].patches = -1;
-#ifdef ERTS_SMP
stp->labels[i].looprec_targeted = 0;
-#endif
}
stp->catches = 0;
@@ -3389,11 +3385,7 @@ negation_is_small(LoaderState* stp, GenOpArg Int)
static int
smp(LoaderState* stp)
{
-#ifdef ERTS_SMP
return 1;
-#else
- return 0;
-#endif
}
/*
@@ -3402,10 +3394,8 @@ smp(LoaderState* stp)
static int
smp_mark_target_label(LoaderState* stp, GenOpArg L)
{
-#ifdef ERTS_SMP
ASSERT(L.type == TAG_f);
stp->labels[L.val].looprec_targeted = 1;
-#endif
return 1;
}
@@ -3416,12 +3406,8 @@ smp_mark_target_label(LoaderState* stp, GenOpArg L)
static int
smp_already_locked(LoaderState* stp, GenOpArg L)
{
-#ifdef ERTS_SMP
ASSERT(L.type == TAG_u);
return stp->labels[L.val].looprec_targeted;
-#else
- return 0;
-#endif
}
/*
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 890277a3ba..dbd09ef126 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -98,14 +98,12 @@ static int insert_internal_link(Process* p, Eterm rpid)
ASSERT(is_internal_pid(rpid));
-#ifdef ERTS_SMP
if (IS_TRACED(p)
&& (ERTS_TRACE_FLAGS(p) & (F_TRACE_SOL|F_TRACE_SOL1))) {
rp_locks = ERTS_PROC_LOCKS_ALL;
}
erts_smp_proc_lock(p, ERTS_PROC_LOCK_LINK);
-#endif
/* get a pointer to the process struct of the linked process */
rp = erts_pid2proc_opt(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK,
@@ -290,9 +288,6 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to)
ErtsMonitor *mon;
int code;
Eterm res = am_false;
-#ifndef ERTS_SMP
- int stale_mon = 0;
-#endif
ERTS_SMP_LC_ASSERT((ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK)
== erts_proc_lc_my_proc_locks(c_p));
@@ -301,13 +296,6 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to)
switch (code) {
case ERTS_DSIG_PREP_NOT_ALIVE:
case ERTS_DSIG_PREP_NOT_CONNECTED:
-#ifndef ERTS_SMP
- /* XXX Is this possible? Shouldn't this link
- previously have been removed if the node
- had previously been disconnected. */
- ASSERT(0);
- stale_mon = 1;
-#endif
/*
* In the smp case this is possible if the node goes
* down just before the call to demonitor.
@@ -335,13 +323,6 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to)
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_LINK);
if (!dmon) {
-#ifndef ERTS_SMP
- /* XXX How is this possible? Shouldn't this link
- previously have been removed when the distributed
- end was removed. */
- ASSERT(0);
- stale_mon = 1;
-#endif
/*
* This is possible when smp support is enabled.
* 'DOWN' message just arrived.
@@ -370,18 +351,6 @@ remote_demonitor(Process *c_p, DistEntry *dep, Eterm ref, Eterm to)
return am_internal_error;
}
-#ifndef ERTS_SMP
- if (stale_mon) {
- erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp, "Stale process monitor %T to ", ref);
- if (is_atom(to))
- erts_dsprintf(dsbufp, "{%T, %T}", to, dep->sysname);
- else
- erts_dsprintf(dsbufp, "%T", to);
- erts_dsprintf(dsbufp, " found\n");
- erts_send_error_to_logger(c_p->group_leader, dsbufp);
- }
-#endif
/*
* We aren't allowed to destroy 'mon' until now, since 'to'
@@ -405,13 +374,9 @@ demonitor_local_process(Process *c_p, Eterm ref, Eterm to, Eterm *res)
ERTS_P2P_FLG_ALLOW_OTHER_X);
ErtsMonitor *mon = erts_remove_monitor(&ERTS_P_MONITORS(c_p), ref);
-#ifndef ERTS_SMP
- ASSERT(mon);
-#else
if (!mon)
*res = am_false;
else
-#endif
{
*res = am_true;
erts_destroy_monitor(mon);
@@ -1151,10 +1116,8 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
if (is_internal_port(BIF_ARG_1)) {
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
-#ifdef ERTS_SMP
if (ERTS_PROC_PENDING_EXIT(BIF_P))
goto handle_pending_exit;
-#endif
l = erts_remove_link(&ERTS_P_LINKS(BIF_P), BIF_ARG_1);
@@ -1201,10 +1164,8 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
us in a state where monitors might be inconsistent, but the dist
code should take care of it. */
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
-#ifdef ERTS_SMP
if (ERTS_PROC_PENDING_EXIT(BIF_P))
goto handle_pending_exit;
-#endif
l = erts_remove_link(&ERTS_P_LINKS(BIF_P), BIF_ARG_1);
erts_smp_proc_unlock(BIF_P,
@@ -1258,13 +1219,11 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
BIF_ARG_1, ERTS_PROC_LOCK_LINK,
ERTS_P2P_FLG_ALLOW_OTHER_X);
-#ifdef ERTS_SMP
if (ERTS_PROC_PENDING_EXIT(BIF_P)) {
if (rp && rp != BIF_P)
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
goto handle_pending_exit;
}
-#endif
/* unlink and ignore errors */
l = erts_remove_link(&ERTS_P_LINKS(BIF_P), BIF_ARG_1);
@@ -1294,7 +1253,6 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
BIF_RET(am_true);
-#ifdef ERTS_SMP
handle_pending_exit:
erts_handle_pending_exit(BIF_P, (ERTS_PROC_LOCK_MAIN
| ERTS_PROC_LOCK_LINK
@@ -1302,7 +1260,6 @@ BIF_RETTYPE unlink_1(BIF_ALIST_1)
ASSERT(ERTS_PROC_IS_EXITING(BIF_P));
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
ERTS_BIF_EXITED(BIF_P);
-#endif
}
BIF_RETTYPE hibernate_3(BIF_ALIST_3)
@@ -1657,12 +1614,10 @@ BIF_RETTYPE exit_2(BIF_ALIST_2)
NIL,
NULL,
BIF_P == rp ? ERTS_XSIG_FLG_NO_IGN_NORMAL : 0);
-#ifdef ERTS_SMP
if (rp == BIF_P)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
if (rp_locks)
erts_smp_proc_unlock(rp, rp_locks);
-#endif
/*
* We may have exited ourselves and may have to take action.
*/
@@ -1783,12 +1738,10 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2)
~ERTS_PSFLG_TRAP_EXIT);
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCKS_XSIG_SEND);
-#ifdef ERTS_SMP
if (state & ERTS_PSFLG_PENDING_EXIT) {
erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN);
ERTS_BIF_EXITED(BIF_P);
}
-#endif
old_value = (state & ERTS_PSFLG_TRAP_EXIT) ? am_true : am_false;
BIF_RET(old_value);
@@ -1811,9 +1764,7 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2)
}
else {
new = erts_schedid2runq(sched);
-#ifdef ERTS_SMP
erts_atomic_set_nob(&BIF_P->run_queue, (erts_aint_t) new);
-#endif
state = erts_smp_atomic32_read_bor_mb(&BIF_P->state,
ERTS_PSFLG_BOUND);
}
@@ -1946,15 +1897,11 @@ BIF_RETTYPE process_flag_3(BIF_ALIST_3)
Process *rp;
Eterm res;
-#ifdef ERTS_SMP
rp = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN,
BIF_ARG_1, ERTS_PROC_LOCK_MAIN);
if (rp == ERTS_PROC_LOCK_BUSY)
ERTS_BIF_YIELD3(bif_export[BIF_process_flag_3], BIF_P,
BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
-#else
- rp = erts_proc_lookup(BIF_ARG_1);
-#endif
if (!rp)
BIF_ERROR(BIF_P, BADARG);
@@ -2296,10 +2243,8 @@ do_send(Process *p, Eterm to, Eterm msg, Eterm *refp, ErtsSendContext *ctx)
send_message: {
ErtsProcLocks rp_locks = 0;
Sint res;
-#ifdef ERTS_SMP
if (p == rp)
rp_locks |= ERTS_PROC_LOCK_MAIN;
-#endif
/* send to local process */
res = erts_send_message(p, rp, &rp_locks, msg, 0);
if (erts_use_sender_punish)
@@ -4563,9 +4508,6 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
if (BIF_ARG_1 == am_multi_scheduling) {
if (BIF_ARG_2 == am_block || BIF_ARG_2 == am_unblock
|| BIF_ARG_2 == am_block_normal || BIF_ARG_2 == am_unblock_normal) {
-#ifndef ERTS_SMP
- BIF_RET(am_disabled);
-#else
int block = (BIF_ARG_2 == am_block
|| BIF_ARG_2 == am_block_normal);
int normal = (BIF_ARG_2 == am_block_normal
@@ -4601,15 +4543,8 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
break;
}
-#endif
}
} else if (BIF_ARG_1 == am_schedulers_online) {
-#ifndef ERTS_SMP
- if (BIF_ARG_2 != make_small(1))
- goto error;
- else
- BIF_RET(make_small(1));
-#else
Sint old_no;
if (!is_small(BIF_ARG_2))
goto error;
@@ -4633,7 +4568,6 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
break;
}
-#endif
} else if (BIF_ARG_1 == am_fullsweep_after) {
Uint16 nval;
Uint oval;
@@ -5019,7 +4953,6 @@ static BIF_RETTYPE bif_return_trap(BIF_ALIST_2)
Eterm res = BIF_ARG_1;
switch (BIF_ARG_2) {
-#ifdef ERTS_SMP
case am_multi_scheduling: {
int msb = erts_is_multi_scheduling_blocked();
if (msb > 0)
@@ -5030,7 +4963,6 @@ static BIF_RETTYPE bif_return_trap(BIF_ALIST_2)
ERTS_INTERNAL_ERROR("Unexpected multi scheduling block state");
break;
}
-#endif
default:
break;
}
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index 76a0c5c716..9bcf50653d 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -344,9 +344,7 @@ print_process_info(fmtfn_t to, void *to_arg, Process *p)
erts_program_counter_info(to, to_arg, p);
} else {
erts_print(to, to_arg, "Stack dump:\n");
-#ifdef ERTS_SMP
if (!garbing)
-#endif
erts_stack_dump(to, to_arg, p);
}
@@ -358,11 +356,9 @@ print_process_info(fmtfn_t to, void *to_arg, Process *p)
static void
print_garb_info(fmtfn_t to, void *to_arg, Process* p)
{
-#ifdef ERTS_SMP
/* ERTS_SMP: A scheduler is probably concurrently doing gc... */
if (!ERTS_IS_CRASH_DUMPING)
return;
-#endif
erts_print(to, to_arg, "New heap start: %bpX\n", p->heap);
erts_print(to, to_arg, "New heap top: %bpX\n", p->htop);
erts_print(to, to_arg, "Stack top: %bpX\n", p->stop);
@@ -698,9 +694,7 @@ static int crash_dump_limited_writer(void* vfdp, char* buf, size_t len)
void
erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
{
-#ifdef ERTS_SMP
ErtsThrPrgrData tpd_buf; /* in case we aren't a managed thread... */
-#endif
int fd;
size_t envsz;
time_t now;
@@ -717,7 +711,6 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
if (ERTS_SOMEONE_IS_CRASH_DUMPING)
return;
-#ifdef ERTS_SMP
/* Order all managed threads to block, this has to be done
first to guarantee that this is the only thread to generate
crash dump. */
@@ -744,9 +737,6 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
erts_smp_atomic32_set_mb(&erts_writing_erl_crash_dump, 1);
erts_smp_tsd_set(erts_is_crash_dumping_key, (void *) 1);
-#else /* !ERTS_SMP */
- erts_writing_erl_crash_dump = 1;
-#endif /* ERTS_SMP */
envsz = sizeof(env);
/* ERL_CRASH_DUMP_SECONDS not set
@@ -841,7 +831,6 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
erts_print_nif_taints(to, to_arg);
erts_cbprintf(to, to_arg, "Atoms: %d\n", atom_table_size());
-#ifdef USE_THREADS
/* We want to note which thread it was that called erts_exit */
if (erts_get_scheduler_data()) {
erts_cbprintf(to, to_arg, "Calling Thread: scheduler:%d\n",
@@ -852,9 +841,6 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
else
erts_cbprintf(to, to_arg, "Calling Thread: %p\n", erts_thr_self());
}
-#else
- erts_cbprintf(to, to_arg, "Calling Thread: scheduler:1\n");
-#endif
#if defined(ERTS_HAVE_TRY_CATCH)
@@ -873,7 +859,6 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
}
#endif
-#ifdef ERTS_SMP
#ifdef ERTS_SYS_SUSPEND_SIGNAL
@@ -895,7 +880,6 @@ erl_crash_dump_v(char *file, int line, char* fmt, va_list args)
*/
erts_thr_progress_fatal_error_wait(60000);
/* Either worked or not... */
-#endif
#ifndef ERTS_HAVE_TRY_CATCH
/* This is safe to call here, as all schedulers are blocked */
diff --git a/erts/emulator/beam/code_ix.c b/erts/emulator/beam/code_ix.c
index 8a3d1b20b4..ba68d612e3 100644
--- a/erts/emulator/beam/code_ix.c
+++ b/erts/emulator/beam/code_ix.c
@@ -115,9 +115,7 @@ void erts_abort_staging_code_ix(void)
int erts_try_seize_code_write_permission(Process* c_p)
{
int success;
-#ifdef ERTS_SMP
ASSERT(!erts_smp_thr_progress_is_blocking()); /* to avoid deadlock */
-#endif
ASSERT(c_p != NULL);
erts_smp_mtx_lock(&code_write_permission_mtx);
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index 09fdb897f5..e19ff2be68 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -283,10 +283,8 @@ static void doit_monitor_net_exits(ErtsMonitor *mon, void *vnecp)
watched = (is_atom(rmon->name)
? TUPLE2(lhp, rmon->name, dep->sysname)
: rmon->u.pid);
-#ifdef ERTS_SMP
rp_locks |= ERTS_PROC_LOCKS_MSG_SEND;
erts_smp_proc_lock(rp, ERTS_PROC_LOCKS_MSG_SEND);
-#endif
erts_queue_monitor_message(rp, &rp_locks, mon->ref, am_process,
watched, am_noconnection);
erts_destroy_monitor(rmon);
@@ -2902,10 +2900,8 @@ BIF_RETTYPE dist_exit_3(BIF_ALIST_3)
NIL,
NULL,
0);
-#ifdef ERTS_SMP
if (lp == BIF_P)
lp_locks &= ~ERTS_PROC_LOCK_MAIN;
-#endif
erts_smp_proc_unlock(lp, lp_locks);
if (lp == BIF_P) {
erts_aint32_t state = erts_smp_atomic32_read_acqb(&BIF_P->state);
@@ -2913,10 +2909,8 @@ BIF_RETTYPE dist_exit_3(BIF_ALIST_3)
* We may have exited current process and may have to take action.
*/
if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) {
-#ifdef ERTS_SMP
if (state & ERTS_PSFLG_PENDING_EXIT)
erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN);
-#endif
ERTS_BIF_EXITED(BIF_P);
}
}
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index c7ab444c96..bf7f71ebde 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -83,14 +83,6 @@
#define ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC ERTS_ALC_DEFAULT_ENABLED_ACUL_EHEAP_ALLOC
#define ERTS_ALC_DEFAULT_ACUL_LL_ALLOC ERTS_ALC_DEFAULT_ENABLED_ACUL_LL_ALLOC
-#ifndef ERTS_SMP
-# undef ERTS_ALC_DEFAULT_ACUL
-# define ERTS_ALC_DEFAULT_ACUL 0
-# undef ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC
-# define ERTS_ALC_DEFAULT_ACUL_EHEAP_ALLOC 0
-# undef ERTS_ALC_DEFAULT_ACUL_LL_ALLOC
-# define ERTS_ALC_DEFAULT_ACUL_LL_ALLOC 0
-#endif
#ifdef DEBUG
static Uint install_debug_functions(void);
@@ -528,7 +520,6 @@ set_default_test_alloc_opts(struct au_init *ip)
}
-#ifdef ERTS_SMP
static void
adjust_tpref(struct au_init *ip, int no_sched)
@@ -551,7 +542,6 @@ adjust_tpref(struct au_init *ip, int no_sched)
}
}
-#endif
static void handle_args(int *, char **, erts_alc_hndl_args_init_t *);
@@ -580,7 +570,6 @@ static void adjust_fix_alloc_sizes(UWord extra_block_size)
if (extra_block_size && erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].enabled) {
int j;
-#ifdef ERTS_SMP
if (erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].thr_spec) {
int i;
ErtsAllocatorThrSpec_t* tspec;
@@ -596,7 +585,6 @@ static void adjust_fix_alloc_sizes(UWord extra_block_size)
}
}
else
-#endif
{
Allctr_t* allctr = erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra;
for (j=0; j < ERTS_ALC_NO_FIXED_SIZES; ++j) {
@@ -619,7 +607,6 @@ strategy_support_carrier_migration(struct au_init *auip)
static ERTS_INLINE void
adjust_carrier_migration_support(struct au_init *auip)
{
-#ifdef ERTS_SMP
if (auip->init.util.acul) {
auip->thr_spec = -1; /* Need thread preferred */
@@ -633,9 +620,6 @@ adjust_carrier_migration_support(struct au_init *auip)
auip->init.aoff.flavor = AOFF_BF;
}
}
-#else
- auip->init.util.acul = 0;
-#endif
}
void
@@ -668,10 +652,8 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
= sizeof(ErtsNifSelectDataState);
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_MSG_REF)]
= sizeof(ErtsMessageRef);
-#ifdef ERTS_SMP
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_THR_Q_EL_SL)]
= sizeof(ErtsThrQElement_t);
-#endif
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_LL_PTIMER)]
= erts_timer_type_size(ERTS_ALC_T_LL_PTIMER);
fix_type_sizes[ERTS_ALC_FIX_TYPE_IX(ERTS_ALC_T_HL_PTIMER)]
@@ -734,20 +716,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
#endif
}
-#ifndef ERTS_SMP
- init.sl_alloc.thr_spec = 0;
- init.std_alloc.thr_spec = 0;
- init.ll_alloc.thr_spec = 0;
- init.eheap_alloc.thr_spec = 0;
- init.binary_alloc.thr_spec = 0;
- init.ets_alloc.thr_spec = 0;
- init.driver_alloc.thr_spec = 0;
- init.fix_alloc.thr_spec = 0;
- init.literal_alloc.thr_spec = 0;
-#ifdef ERTS_ALC_A_EXEC
- init.exec_alloc.thr_spec = 0;
-#endif
-#endif
/* Make adjustments for carrier migration support */
init.temp_alloc.init.util.acul = 0;
@@ -798,7 +766,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
#endif
}
-#ifdef ERTS_SMP
/* Only temp_alloc can use thread specific interface */
if (init.temp_alloc.thr_spec)
init.temp_alloc.thr_spec = erts_no_schedulers;
@@ -817,10 +784,6 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
adjust_tpref(&init.exec_alloc, erts_no_schedulers);
#endif
-#else
- /* No thread specific if not smp */
- init.temp_alloc.thr_spec = 0;
-#endif
/*
* The following allocators cannot be run with afit strategy.
@@ -839,10 +802,8 @@ erts_alloc_init(int *argc, char **argv, ErtsAllocInitOpts *eaiop)
refuse_af_strategy(&init.exec_alloc);
#endif
-#ifdef ERTS_SMP
if (!init.temp_alloc.thr_spec)
refuse_af_strategy(&init.temp_alloc);
-#endif
erts_mtrace_pre_init();
#if HAVE_ERTS_MSEG
@@ -1006,8 +967,6 @@ set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init, int ncpu)
return;
}
-#ifdef USE_THREADS
-#ifdef ERTS_SMP
if (init->thr_spec) {
if (init->thr_spec > 0) {
af->alloc = erts_alcu_alloc_thr_spec;
@@ -1037,7 +996,6 @@ set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init, int ncpu)
ai->thr_spec = tspec->size;
}
else
-#endif
if (init->init.util.ts) {
af->alloc = erts_alcu_alloc_ts;
if (init->init.util.fix_type_size)
@@ -1049,21 +1007,9 @@ set_au_allocator(ErtsAlcType_t alctr_n, struct au_init *init, int ncpu)
af->free = erts_alcu_free_ts;
}
else
-#endif
{
-#ifdef ERTS_SMP
erts_exit(ERTS_ABORT_EXIT, "%salloc is not thread safe\n",
init->init.util.name_prefix);
-#else
- af->alloc = erts_alcu_alloc;
- if (init->init.util.fix_type_size)
- af->realloc = erts_realloc_fixed_size;
- else if (init->init.util.ramv)
- af->realloc = erts_alcu_realloc_mv;
- else
- af->realloc = erts_alcu_realloc;
- af->free = erts_alcu_free;
-#endif
}
af->extra = NULL;
ai->alloc_util = 1;
@@ -1915,7 +1861,6 @@ erts_alloc_register_scheduler(void *vesdp)
}
}
-#ifdef ERTS_SMP
void
erts_alloc_scheduler_handle_delayed_dealloc(void *vesdp,
int *need_thr_progress,
@@ -1944,12 +1889,10 @@ erts_alloc_scheduler_handle_delayed_dealloc(void *vesdp,
}
}
}
-#endif
erts_aint32_t
erts_alloc_fix_alloc_shrink(int ix, erts_aint32_t flgs)
{
-#ifdef ERTS_SMP
ErtsAllocatorThrSpec_t *tspec;
tspec = &erts_allctr_thr_spec[ERTS_ALC_A_FIXED_SIZE];
if (erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].thr_spec && tspec->enabled)
@@ -1957,11 +1900,6 @@ erts_alloc_fix_alloc_shrink(int ix, erts_aint32_t flgs)
if (ix == 0 && erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra)
return erts_alcu_fix_alloc_shrink(
erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra, flgs);
-#else
- if (ix == 1 && erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra)
- return erts_alcu_fix_alloc_shrink(
- erts_allctrs_info[ERTS_ALC_A_FIXED_SIZE].extra, flgs);
-#endif
return 0;
}
@@ -2845,11 +2783,7 @@ erts_allocator_info(fmtfn_t to, void *arg)
#if HAVE_ERTS_MSEG
{
struct erts_mmap_info_struct emis;
-#ifdef ERTS_SMP
int max = (int) erts_no_schedulers;
-#else
- int max = 0;
-#endif
int i;
for (i = 0; i <= max; i++) {
erts_print(to, arg, "=allocator:mseg_alloc[%d]\n", i);
@@ -3451,13 +3385,11 @@ erts_request_alloc_info(struct process *c_p,
erts_proc_add_refc(c_p, (Sint) erts_no_schedulers);
-#ifdef ERTS_SMP
if (erts_no_schedulers > 1)
erts_schedule_multi_misc_aux_work(1,
erts_no_schedulers,
reply_alloc_info,
(void *) air);
-#endif
reply_alloc_info((void *) air);
@@ -3532,35 +3464,29 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
case 0xf:
switch (op) {
case 0xf00:
-#ifdef USE_THREADS
if (((Allctr_t *) a1)->thread_safe)
return (UWord) erts_alcu_alloc_ts(ERTS_ALC_T_UNDEF,
(void *) a1,
(Uint) a2);
else
-#endif
return (UWord) erts_alcu_alloc(ERTS_ALC_T_UNDEF,
(void *) a1,
(Uint) a2);
case 0xf01:
-#ifdef USE_THREADS
if (((Allctr_t *) a1)->thread_safe)
return (UWord) erts_alcu_realloc_ts(ERTS_ALC_T_UNDEF,
(void *) a1,
(void *) a2,
(Uint) a3);
else
-#endif
return (UWord) erts_alcu_realloc(ERTS_ALC_T_UNDEF,
(void *) a1,
(void *) a2,
(Uint) a3);
case 0xf02:
-#ifdef USE_THREADS
if (((Allctr_t *) a1)->thread_safe)
erts_alcu_free_ts(ERTS_ALC_T_UNDEF, (void *) a1, (void *) a2);
else
-#endif
erts_alcu_free(ERTS_ALC_T_UNDEF, (void *) a1, (void *) a2);
return 0;
case 0xf03: {
@@ -3571,11 +3497,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
init.enable = 1;
init.atype = GOODFIT;
init.init.util.name_prefix = (char *) a1;
-#ifdef ERTS_SMP
init.init.util.ts = 1;
-#else
- init.init.util.ts = a2 ? 1 : 0;
-#endif
if ((char **) a3) {
char **argv = (char **) a3;
int i = 0;
@@ -3630,7 +3552,6 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
erts_alcu_stop((Allctr_t *) a1);
erts_free(ERTS_ALC_T_UNDEF, (void *) a1);
break;
-#ifdef USE_THREADS
case 0xf05: return (UWord) 1;
case 0xf06: return (UWord) ((Allctr_t *) a1)->thread_safe;
#ifdef ETHR_NO_FORKSAFETY
@@ -3700,12 +3621,7 @@ UWord erts_alc_test(UWord op, UWord a1, UWord a2, UWord a3)
ethr_thr_exit((void *) a1);
ERTS_ALC_TEST_ABORT;
break;
-#endif /* #ifdef USE_THREADS */
-#ifdef ERTS_SMP
case 0xf13: return (UWord) 1;
-#else
- case 0xf13: return (UWord) 0;
-#endif
case 0xf14: return (UWord) erts_alloc(ERTS_ALC_T_TEST, (Uint)a1);
case 0xf15: erts_free(ERTS_ALC_T_TEST, (void*)a1); return 0;
@@ -3909,10 +3825,8 @@ void check_allocators(void)
ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) erts_allctrs[i].extra;
Allctr_t *allctr = real_af->extra;
Carrier_t *ct;
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_lock(&allctr->mutex);
-#endif
if (allctr->check_mbc) {
for (ct = allctr->mbc_list.first; ct; ct = ct->next) {
@@ -3920,10 +3834,8 @@ void check_allocators(void)
allctr->check_mbc(allctr,ct);
}
}
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
-#endif
}
}
}
diff --git a/erts/emulator/beam/erl_alloc.h b/erts/emulator/beam/erl_alloc.h
index 97a1cf1308..ed5ff4a2ff 100644
--- a/erts/emulator/beam/erl_alloc.h
+++ b/erts/emulator/beam/erl_alloc.h
@@ -27,9 +27,7 @@
#include "erl_thr_progress.h"
#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY
#include "erl_alloc_util.h"
-#ifdef USE_THREADS
#include "erl_threads.h"
-#endif
#include "erl_mmap.h"
#ifdef DEBUG
@@ -154,12 +152,10 @@ void erts_allctr_wrapper_pre_lock(void);
void erts_allctr_wrapper_pre_unlock(void);
void erts_alloc_register_scheduler(void *vesdp);
-#ifdef ERTS_SMP
void erts_alloc_scheduler_handle_delayed_dealloc(void *vesdp,
int *need_thr_progress,
ErtsThrPrgrVal *thr_prgr_p,
int *more_work);
-#endif
erts_aint32_t erts_alloc_fix_alloc_shrink(int ix, erts_aint32_t flgs);
__decl_noreturn void erts_alloc_enomem(ErtsAlcType_t,Uint)
@@ -349,23 +345,10 @@ ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, \
erts_smp_spin_lock(&NAME##_lck), \
erts_smp_spin_unlock(&NAME##_lck))
-#ifdef ERTS_SMP
#define ERTS_TS_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \
ERTS_SMP_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT)
-#else /* !ERTS_SMP */
-
-#define ERTS_TS_QUALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \
-static erts_mtx_t NAME##_lck; \
-ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, \
- erts_mtx_init(NAME##_lck, #NAME "_alloc_lock", NIL, \
- ERTS_LOCK_FLAGS_CATEGORY_ALLOCATOR),\
- erts_mtx_lock(&NAME##_lck), \
- erts_mtx_unlock(&NAME##_lck))
-
-
-#endif
#define ERTS_PALLOC_IMPL(NAME, TYPE, PASZ) \
ERTS_PRE_ALLOC_IMPL(NAME, TYPE, PASZ, (void) 0, (void) 0, (void) 0)
@@ -378,17 +361,10 @@ ERTS_PRE_ALLOC_IMPL(NAME, TYPE, PASZ, \
erts_spin_lock(&NAME##_lck), \
erts_spin_unlock(&NAME##_lck))
-#ifdef ERTS_SMP
#define ERTS_SMP_PALLOC_IMPL(NAME, TYPE, PASZ) \
ERTS_TS_PALLOC_IMPL(NAME, TYPE, PASZ)
-#else /* !ERTS_SMP */
-
-#define ERTS_SMP_PALLOC_IMPL(NAME, TYPE, PASZ) \
- ERTS_PALLOC_IMPL(NAME, TYPE, PASZ)
-
-#endif
#define ERTS_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT, ILCK, LCK, ULCK) \
ERTS_PRE_ALLOC_IMPL(NAME##_pre, TYPE, PASZ, ILCK, LCK, ULCK) \
@@ -412,21 +388,11 @@ NAME##_free(TYPE *p) \
erts_free(ALCT, (void *) p); \
}
-#ifdef ERTS_SMP
#define ERTS_SCHED_PREF_PALLOC_IMPL(NAME, TYPE, PASZ) \
ERTS_SCHED_PREF_PRE_ALLOC_IMPL(NAME, TYPE, PASZ)
-#else
-#define ERTS_SCHED_PREF_PALLOC_IMPL(NAME, TYPE, PASZ) \
- ERTS_PRE_ALLOC_IMPL(NAME, TYPE, PASZ, (void) 0, (void) 0, (void) 0)
-#endif
-#ifdef ERTS_SMP
#define ERTS_SCHED_PREF_AUX(NAME, TYPE, PASZ) \
ERTS_SCHED_PREF_PRE_ALLOC_IMPL(NAME##_pre, TYPE, PASZ)
-#else
-#define ERTS_SCHED_PREF_AUX(NAME, TYPE, PASZ) \
-ERTS_PRE_ALLOC_IMPL(NAME##_pre, TYPE, PASZ, (void) 0, (void) 0, (void) 0)
-#endif
#define ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(NAME, TYPE, PASZ, ALCT) \
ERTS_SCHED_PREF_AUX(NAME, TYPE, PASZ) \
diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c
index af86ad0548..f4c88438f5 100644
--- a/erts/emulator/beam/erl_alloc_util.c
+++ b/erts/emulator/beam/erl_alloc_util.c
@@ -305,9 +305,6 @@ MBC after deallocating first block:
# define ERTS_ALC_CPOOL_DEBUG
#endif
-#ifndef ERTS_SMP
-# undef ERTS_ALC_CPOOL_DEBUG
-#endif
#ifdef ERTS_ALC_CPOOL_DEBUG
# define ERTS_ALC_CPOOL_ASSERT(A) \
@@ -322,13 +319,8 @@ MBC after deallocating first block:
# define ERTS_ALC_CPOOL_ASSERT(A) ((void) 1)
#endif
-#ifdef ERTS_SMP
#define ERTS_ALC_IS_CPOOL_ENABLED(A) ((A)->cpool.util_limit)
-#else
-#define ERTS_ALC_IS_CPOOL_ENABLED(A) (0)
-#endif
-#ifdef ERTS_SMP
#define ERTS_ALC_CPOOL_MAX_DISABLE_ABANDON 1000
#define ERTS_ALC_CPOOL_ALLOC_OP_INC 8
@@ -367,28 +359,16 @@ do { \
} \
} while (0)
-#else
-#define ERTS_ALC_CPOOL_ALLOC_OP(A)
-#define ERTS_ALC_CPOOL_REALLOC_OP(A)
-#define ERTS_ALC_CPOOL_FREE_OP(A)
-#endif
#define ERTS_CRR_ALCTR_FLG_IN_POOL (((erts_aint_t) 1) << 0)
#define ERTS_CRR_ALCTR_FLG_BUSY (((erts_aint_t) 1) << 1)
#define ERTS_CRR_ALCTR_FLG_MASK (ERTS_CRR_ALCTR_FLG_IN_POOL | \
ERTS_CRR_ALCTR_FLG_BUSY)
-#ifdef ERTS_SMP
#define SBC_HEADER_SIZE \
(UNIT_CEILING(offsetof(Carrier_t, cpool) \
+ ABLK_HDR_SZ) \
- ABLK_HDR_SZ)
-#else
-#define SBC_HEADER_SIZE \
- (UNIT_CEILING(sizeof(Carrier_t) \
- + ABLK_HDR_SZ) \
- - ABLK_HDR_SZ)
-#endif
#define MBC_HEADER_SIZE(AP) ((AP)->mbc_header_size)
@@ -598,15 +578,11 @@ do { \
(AP)->mbcs.blocks.curr.size -= (CRR)->cpool.blocks_size; \
} while (0)
-#ifdef ERTS_SMP
#define STAT_MBC_BLK_ALLOC_CRR(CRR, BSZ) \
do { \
(CRR)->cpool.blocks++; \
(CRR)->cpool.blocks_size += (BSZ); \
} while (0)
-#else
-#define STAT_MBC_BLK_ALLOC_CRR(CRR, BSZ) ((void) (CRR)) /* Get rid of warning */
-#endif
#define STAT_MBC_BLK_ALLOC(AP, CRR, BSZ, FLGS) \
do { \
@@ -626,7 +602,6 @@ stat_cpool_mbc_blk_free(Allctr_t *allctr,
Carrier_t **busy_pcrr_pp,
UWord blksz)
{
-#ifdef ERTS_SMP
ERTS_ALC_CPOOL_ASSERT(crr->cpool.blocks > 0);
crr->cpool.blocks--;
@@ -651,9 +626,6 @@ stat_cpool_mbc_blk_free(Allctr_t *allctr,
#endif
return 1;
-#else
- return 0;
-#endif
}
#define STAT_MBC_BLK_FREE(AP, CRR, BPCRRPP, BSZ, FLGS) \
@@ -689,12 +661,7 @@ do { \
#endif
#ifdef DEBUG
-#ifdef USE_THREADS
-# ifdef ERTS_SMP
# define IS_ACTUALLY_BLOCKING (erts_thr_progress_is_blocking())
-# else
-# define IS_ACTUALLY_BLOCKING 0
-# endif
#define ERTS_ALCU_DBG_CHK_THR_ACCESS(A) \
do { \
if (!(A)->thread_safe && !IS_ACTUALLY_BLOCKING) { \
@@ -711,9 +678,6 @@ do { \
#else
#define ERTS_ALCU_DBG_CHK_THR_ACCESS(A)
#endif
-#else
-#define ERTS_ALCU_DBG_CHK_THR_ACCESS(A)
-#endif
static void make_name_atoms(Allctr_t *allctr);
@@ -1191,7 +1155,6 @@ unlink_carrier(CarrierList_t *cl, Carrier_t *crr)
}
}
-#ifdef ERTS_SMP
#ifdef DEBUG
static int is_in_list(ErtsDoubleLink_t* sentinel, ErtsDoubleLink_t* node)
@@ -1301,7 +1264,6 @@ clear_busy_pool_carrier(Allctr_t *allctr, Carrier_t *crr)
}
}
-#endif /* ERTS_SMP */
#if 0
#define ERTS_DBG_CHK_FIX_LIST(A, FIX, IX, B) \
@@ -1325,12 +1287,10 @@ chk_fix_list(Allctr_t *allctr, ErtsAlcFixList_t *fix, int ix, int before)
static void *mbc_alloc(Allctr_t *allctr, Uint size);
-#ifdef ERTS_SMP
typedef struct {
ErtsAllctrDDBlock_t ddblock__; /* must be first */
ErtsAlcType_t fix_type;
} ErtsAllctrFixDDBlock_t;
-#endif
#define ERTS_ALC_FIX_NO_UNUSE (((ErtsAlcType_t) 1) << ERTS_ALC_N_BITS)
@@ -1341,11 +1301,9 @@ dealloc_fix_block(Allctr_t *allctr,
ErtsAlcFixList_t *fix,
int dec_cc_on_redirect)
{
-#ifdef ERTS_SMP
/* May be redirected... */
ASSERT((type & ERTS_ALC_FIX_NO_UNUSE) == 0);
((ErtsAllctrFixDDBlock_t *) ptr)->fix_type = type | ERTS_ALC_FIX_NO_UNUSE;
-#endif
dealloc_block(allctr, ptr, fix, dec_cc_on_redirect);
}
@@ -1379,12 +1337,10 @@ fix_cpool_check_shrink(Allctr_t *allctr,
fix->u.cpool.shrink_list = 0;
else {
void *p;
-#ifdef ERTS_SMP
if (busy_pcrr_pp) {
clear_busy_pool_carrier(allctr, *busy_pcrr_pp);
*busy_pcrr_pp = NULL;
}
-#endif
fix->u.cpool.shrink_list--;
p = fix->list;
fix->list = *((void **) p);
@@ -1477,10 +1433,8 @@ fix_cpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
int ix, o;
int flush = flgs == 0;
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_lock(&allctr->mutex);
-#endif
for (ix = 0; ix < ERTS_ALC_NO_FIXED_SIZES; ix++) {
ErtsAlcFixList_t *fix = &allctr->fix[ix];
@@ -1520,10 +1474,8 @@ fix_cpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
if (all_empty)
sched_fix_shrink(allctr, 0);
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
-#endif
return res;
}
@@ -1635,10 +1587,8 @@ fix_nocpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
int ix, o;
int flush = flgs == 0;
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_lock(&allctr->mutex);
-#endif
for (ix = 0; ix < ERTS_ALC_NO_FIXED_SIZES; ix++) {
ErtsAlcFixList_t *fix = &allctr->fix[ix];
@@ -1680,10 +1630,8 @@ fix_nocpool_alloc_shrink(Allctr_t *allctr, erts_aint32_t flgs)
if (all_empty)
sched_fix_shrink(allctr, 0);
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
-#endif
return res;
}
@@ -1709,7 +1657,6 @@ dealloc_mbc(Allctr_t *allctr, Carrier_t *crr)
dealloc_carrier(allctr, crr, 1);
}
-#ifdef ERTS_SMP
static ERTS_INLINE Allctr_t*
get_pref_allctr(void *extra)
@@ -2201,9 +2148,7 @@ enqueue_dealloc_other_instance(ErtsAlcType_t type,
erts_alloc_notify_delayed_dealloc(allctr->ix);
}
-#endif
-#ifdef ERTS_SMP
static void
set_new_allctr_abandon_limit(Allctr_t *allctr);
static void
@@ -2265,7 +2210,6 @@ erts_alcu_check_delayed_dealloc(Allctr_t *allctr,
thr_prgr_p,
more_work);
}
-#endif
#define ERTS_ALCU_HANDLE_DD_IN_OP(Allctr, Locked) \
handle_delayed_dealloc((Allctr), (Locked), 1, \
@@ -2281,19 +2225,13 @@ dealloc_block(Allctr_t *allctr, void *ptr, ErtsAlcFixList_t *fix, int dec_cc_on_
if (IS_SBC_BLK(blk)) {
destroy_carrier(allctr, blk, NULL);
-#ifdef ERTS_SMP
if (fix && ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
ErtsAlcType_t type = ((ErtsAllctrFixDDBlock_t *) ptr)->fix_type;
if (!(type & ERTS_ALC_FIX_NO_UNUSE))
fix->u.cpool.used--;
fix->u.cpool.allocated--;
}
-#endif
}
-#ifndef ERTS_SMP
- else
- mbc_free(allctr, ptr, NULL);
-#else
else if (!ERTS_ALC_IS_CPOOL_ENABLED(allctr))
mbc_free(allctr, ptr, NULL);
else {
@@ -2323,7 +2261,6 @@ dealloc_block(Allctr_t *allctr, void *ptr, ErtsAlcFixList_t *fix, int dec_cc_on_
erts_alloc_notify_delayed_dealloc(used_allctr->ix);
}
}
-#endif
}
/* Multi block carrier alloc/realloc/free ... */
@@ -2571,9 +2508,7 @@ mbc_free(Allctr_t *allctr, void *p, Carrier_t **busy_pcrr_pp)
else {
(*allctr->link_free_block)(allctr, blk);
HARD_CHECK_BLK_CARRIER(allctr, blk);
-#ifdef ERTS_SMP
check_abandon_carrier(allctr, blk, busy_pcrr_pp);
-#endif
}
}
@@ -2607,10 +2542,8 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,
return NULL;
#else /* !MBC_REALLOC_ALWAYS_MOVES */
-#ifdef ERTS_SMP
if (busy_pcrr_pp && *busy_pcrr_pp)
goto realloc_move; /* Don't want to use carrier in pool */
-#endif
get_blk_sz = blk_sz = UMEMSZ2BLKSZ(allctr, size);
@@ -2731,9 +2664,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,
HARD_CHECK_BLK_CARRIER(allctr, blk);
-#ifdef ERTS_SMP
check_abandon_carrier(allctr, nxt_blk, NULL);
-#endif
return p;
}
@@ -2845,9 +2776,7 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,
if (cand_blk_sz < get_blk_sz) {
/* We wont fit in cand_blk get a new one */
-#ifdef ERTS_SMP
realloc_move:
-#endif
#endif /* !MBC_REALLOC_ALWAYS_MOVES */
new_p = mbc_alloc(allctr, size);
@@ -2949,7 +2878,6 @@ mbc_realloc(Allctr_t *allctr, void *p, Uint size, Uint32 alcu_flgs,
#endif /* !MBC_REALLOC_ALWAYS_MOVES */
}
-#ifdef ERTS_SMP
#define ERTS_ALC_MAX_DEALLOC_CARRIER 10
#define ERTS_ALC_CPOOL_MAX_FETCH_INSPECT 20
@@ -3735,7 +3663,6 @@ cpool_read_stat(Allctr_t *allctr, UWord *nocp, UWord *cszp, UWord *nobp, UWord *
}
-#endif /* ERTS_SMP */
#ifdef DEBUG
@@ -3836,7 +3763,6 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
blk_sz = UMEMSZ2BLKSZ(allctr, umem_sz);
}
-#ifdef ERTS_SMP
allctr->cpool.disable_abandon = ERTS_ALC_CPOOL_MAX_DISABLE_ABANDON;
if ((flags & (CFLG_MBC|CFLG_NO_CPOOL)) == CFLG_MBC
@@ -3852,7 +3778,6 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
return blk;
}
}
-#endif
#if HAVE_ERTS_MSEG
@@ -3982,9 +3907,7 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags)
allctr->main_carrier = crr;
}
-#ifdef ERTS_SMP
cpool_init_carrier_data(allctr, crr);
-#endif
link_carrier(&allctr->mbc_list, crr);
@@ -4204,7 +4127,6 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp)
}
#endif
-#ifdef ERTS_SMP
if (busy_pcrr_pp && *busy_pcrr_pp) {
ERTS_ALC_CPOOL_ASSERT(*busy_pcrr_pp == crr);
*busy_pcrr_pp = NULL;
@@ -4216,7 +4138,6 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp)
cpool_delete(allctr, allctr, crr);
}
else
-#endif
{
unlink_carrier(&allctr->mbc_list, crr);
#if HAVE_ERTS_MSEG
@@ -4247,11 +4168,7 @@ destroy_carrier(Allctr_t *allctr, Block_t *blk, Carrier_t **busy_pcrr_pp)
}
#endif
-#ifdef ERTS_SMP
schedule_dealloc_carrier(allctr, crr);
-#else
- dealloc_mbc(allctr, crr);
-#endif
}
}
@@ -4294,9 +4211,7 @@ static struct {
Eterm fix_types;
Eterm mbcs;
-#ifdef ERTS_SMP
Eterm mbcs_pool;
-#endif
Eterm sbcs;
Eterm sys_alloc_carriers_size;
@@ -4384,9 +4299,7 @@ init_atoms(Allctr_t *allctr)
AM_INIT(fix_types);
AM_INIT(mbcs);
-#ifdef ERTS_SMP
AM_INIT(mbcs_pool);
-#endif
AM_INIT(sbcs);
AM_INIT(sys_alloc_carriers_size);
@@ -4636,7 +4549,6 @@ sz_info_carriers(Allctr_t *allctr,
return res;
}
-#ifdef ERTS_SMP
static Eterm
info_cpool(Allctr_t *allctr,
@@ -4690,7 +4602,6 @@ info_cpool(Allctr_t *allctr,
return res;
}
-#endif /* ERTS_SMP */
static Eterm
info_carriers(Allctr_t *allctr,
@@ -4945,11 +4856,7 @@ info_options(Allctr_t *allctr,
return res;
}
-#ifdef ERTS_SMP
acul = allctr->cpool.util_limit;
-#else
- acul = 0;
-#endif
if (print_to_p) {
char topt[21]; /* Enough for any 64-bit integer */
@@ -5132,19 +5039,15 @@ erts_alcu_info_options(Allctr_t *allctr,
if (hpp || szp)
ensure_atoms_initialized(allctr);
-#ifdef USE_THREADS
if (allctr->thread_safe) {
erts_allctr_wrapper_pre_lock();
erts_mtx_lock(&allctr->mutex);
}
-#endif
res = info_options(allctr, print_to_p, print_to_arg, hpp, szp);
-#ifdef USE_THREADS
if (allctr->thread_safe) {
erts_mtx_unlock(&allctr->mutex);
erts_allctr_wrapper_pre_unlock();
}
-#endif
return res;
}
@@ -5160,9 +5063,7 @@ erts_alcu_sz_info(Allctr_t *allctr,
Uint *szp)
{
Eterm res, mbcs, sbcs, fix = THE_NON_VALUE;
-#ifdef ERTS_SMP
Eterm mbcs_pool;
-#endif
res = THE_NON_VALUE;
@@ -5177,12 +5078,10 @@ erts_alcu_sz_info(Allctr_t *allctr,
if (hpp || szp)
ensure_atoms_initialized(allctr);
-#ifdef USE_THREADS
if (allctr->thread_safe) {
erts_allctr_wrapper_pre_lock();
erts_mtx_lock(&allctr->mutex);
}
-#endif
ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
@@ -5198,23 +5097,19 @@ erts_alcu_sz_info(Allctr_t *allctr,
fix = sz_info_fix(allctr, internal, print_to_p, print_to_arg, hpp, szp);
mbcs = sz_info_carriers(allctr, &allctr->mbcs, "mbcs ", print_to_p,
print_to_arg, hpp, szp);
-#ifdef ERTS_SMP
if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
mbcs_pool = info_cpool(allctr, 1, "mbcs_pool ", print_to_p,
print_to_arg, hpp, szp);
else
mbcs_pool = THE_NON_VALUE; /* shut up annoying warning... */
-#endif
sbcs = sz_info_carriers(allctr, &allctr->sbcs, "sbcs ", print_to_p,
print_to_arg, hpp, szp);
if (hpp || szp) {
res = NIL;
add_2tup(hpp, szp, &res, am.sbcs, sbcs);
-#ifdef ERTS_SMP
if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
add_2tup(hpp, szp, &res, am.mbcs_pool, mbcs_pool);
-#endif
add_2tup(hpp, szp, &res, am.mbcs, mbcs);
add_fix_types(allctr, internal, hpp, szp, &res, fix);
}
@@ -5225,12 +5120,10 @@ erts_alcu_sz_info(Allctr_t *allctr,
}
-#ifdef USE_THREADS
if (allctr->thread_safe) {
erts_mtx_unlock(&allctr->mutex);
erts_allctr_wrapper_pre_unlock();
}
-#endif
return res;
}
@@ -5246,9 +5139,7 @@ erts_alcu_info(Allctr_t *allctr,
Uint *szp)
{
Eterm res, sett, mbcs, sbcs, calls, fix = THE_NON_VALUE;
-#ifdef ERTS_SMP
Eterm mbcs_pool;
-#endif
res = THE_NON_VALUE;
@@ -5263,12 +5154,10 @@ erts_alcu_info(Allctr_t *allctr,
if (hpp || szp)
ensure_atoms_initialized(allctr);
-#ifdef USE_THREADS
if (allctr->thread_safe) {
erts_allctr_wrapper_pre_lock();
erts_mtx_lock(&allctr->mutex);
}
-#endif
ERTS_ALCU_DBG_CHK_THR_ACCESS(allctr);
@@ -5293,13 +5182,11 @@ erts_alcu_info(Allctr_t *allctr,
fix = sz_info_fix(allctr, internal, print_to_p, print_to_arg, hpp, szp);
mbcs = info_carriers(allctr, &allctr->mbcs, "mbcs ", print_to_p,
print_to_arg, hpp, szp);
-#ifdef ERTS_SMP
if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
mbcs_pool = info_cpool(allctr, 0, "mbcs_pool ", print_to_p,
print_to_arg, hpp, szp);
else
mbcs_pool = THE_NON_VALUE; /* shut up annoying warning... */
-#endif
sbcs = info_carriers(allctr, &allctr->sbcs, "sbcs ", print_to_p,
print_to_arg, hpp, szp);
calls = info_calls(allctr, print_to_p, print_to_arg, hpp, szp);
@@ -5309,10 +5196,8 @@ erts_alcu_info(Allctr_t *allctr,
add_2tup(hpp, szp, &res, am.calls, calls);
add_2tup(hpp, szp, &res, am.sbcs, sbcs);
-#ifdef ERTS_SMP
if (ERTS_ALC_IS_CPOOL_ENABLED(allctr))
add_2tup(hpp, szp, &res, am.mbcs_pool, mbcs_pool);
-#endif
add_2tup(hpp, szp, &res, am.mbcs, mbcs);
add_fix_types(allctr, internal, hpp, szp, &res, fix);
add_2tup(hpp, szp, &res, am.options, sett);
@@ -5328,12 +5213,10 @@ erts_alcu_info(Allctr_t *allctr,
}
-#ifdef USE_THREADS
if (allctr->thread_safe) {
erts_mtx_unlock(&allctr->mutex);
erts_allctr_wrapper_pre_unlock();
}
-#endif
return res;
}
@@ -5343,10 +5226,8 @@ void
erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size, ErtsAlcUFixInfo_t *fi, int fisz)
{
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_lock(&allctr->mutex);
-#endif
size->carriers = allctr->mbcs.curr.norm.mseg.size;
size->carriers += allctr->mbcs.curr.norm.sys_alloc.size;
@@ -5356,14 +5237,12 @@ erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size, ErtsAlcUFixInfo_t *
size->blocks = allctr->mbcs.blocks.curr.size;
size->blocks += allctr->sbcs.blocks.curr.size;
-#ifdef ERTS_SMP
if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
UWord csz, bsz;
cpool_read_stat(allctr, NULL, &csz, NULL, &bsz);
size->blocks += bsz;
size->carriers += csz;
}
-#endif
if (fi) {
int ix;
@@ -5385,10 +5264,8 @@ erts_alcu_current_size(Allctr_t *allctr, AllctrSize_t *size, ErtsAlcUFixInfo_t *
}
}
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_unlock(&allctr->mutex);
-#endif
}
/* ----------------------------------------------------------------------- */
@@ -5436,18 +5313,13 @@ do_erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size)
void *erts_alcu_alloc(ErtsAlcType_t type, void *extra, Uint size)
{
void *res;
-#ifdef ERTS_SMP
ASSERT(!"This is not thread safe");
-#elif defined(USE_THREADS)
- ASSERT(erts_equal_tids(erts_main_thread, erts_thr_self()));
-#endif
res = do_erts_alcu_alloc(type, extra, size);
DEBUG_CHECK_ALIGNMENT(res);
return res;
}
-#ifdef USE_THREADS
void *
erts_alcu_alloc_ts(ErtsAlcType_t type, void *extra, Uint size)
@@ -5463,7 +5335,6 @@ erts_alcu_alloc_ts(ErtsAlcType_t type, void *extra, Uint size)
return res;
}
-#ifdef ERTS_SMP
void *
erts_alcu_alloc_thr_spec(ErtsAlcType_t type, void *extra, Uint size)
@@ -5503,21 +5374,17 @@ erts_alcu_alloc_thr_pref(ErtsAlcType_t type, void *extra, Uint size)
if (pref_allctr->thread_safe)
erts_mtx_lock(&pref_allctr->mutex);
-#ifdef ERTS_SMP
ASSERT(pref_allctr->dd.use);
ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1);
-#endif
ERTS_ALCU_DBG_CHK_THR_ACCESS(pref_allctr);
res = do_erts_alcu_alloc(type, pref_allctr, size);
-#ifdef ERTS_SMP
if (!res && ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1)) {
/* Cleaned up a bit more; try one more time... */
res = do_erts_alcu_alloc(type, pref_allctr, size);
}
-#endif
if (pref_allctr->thread_safe)
erts_mtx_unlock(&pref_allctr->mutex);
@@ -5528,9 +5395,7 @@ erts_alcu_alloc_thr_pref(ErtsAlcType_t type, void *extra, Uint size)
return res;
}
-#endif
-#endif
/* ------------------------------------------------------------------------- */
@@ -5573,7 +5438,6 @@ void erts_alcu_free(ErtsAlcType_t type, void *extra, void *p)
do_erts_alcu_free(type, extra, p, NULL);
}
-#ifdef USE_THREADS
void
erts_alcu_free_ts(ErtsAlcType_t type, void *extra, void *p)
@@ -5584,7 +5448,6 @@ erts_alcu_free_ts(ErtsAlcType_t type, void *extra, void *p)
erts_mtx_unlock(&allctr->mutex);
}
-#ifdef ERTS_SMP
void
erts_alcu_free_thr_spec(ErtsAlcType_t type, void *extra, void *p)
@@ -5634,9 +5497,7 @@ erts_alcu_free_thr_pref(ErtsAlcType_t type, void *extra, void *p)
}
}
-#endif
-#endif
/* ------------------------------------------------------------------------- */
@@ -5785,7 +5646,6 @@ erts_alcu_realloc_mv(ErtsAlcType_t type, void *extra, void *p, Uint size)
}
-#ifdef USE_THREADS
void *
erts_alcu_realloc_ts(ErtsAlcType_t type, void *extra, void *ptr, Uint size)
@@ -5824,7 +5684,6 @@ erts_alcu_realloc_mv_ts(ErtsAlcType_t type, void *extra, void *p, Uint size)
return res;
}
-#ifdef ERTS_SMP
void *
erts_alcu_realloc_thr_spec(ErtsAlcType_t type, void *extra,
@@ -5905,9 +5764,7 @@ realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size,
Allctr_t *pref_allctr, *used_allctr;
UWord old_user_size;
Carrier_t *busy_pcrr_p;
-#ifdef ERTS_SMP
int retried;
-#endif
if (!p)
return erts_alcu_alloc_thr_pref(type, extra, size);
@@ -5917,12 +5774,10 @@ realloc_thr_pref(ErtsAlcType_t type, void *extra, void *p, Uint size,
if (pref_allctr->thread_safe)
erts_mtx_lock(&pref_allctr->mutex);
-#ifdef ERTS_SMP
ASSERT(pref_allctr->dd.use);
ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1);
retried = 0;
restart:
-#endif
used_allctr = get_used_allctr(pref_allctr, ERTS_ALC_TS_PREF_LOCK_NO,
p, &old_user_size, &busy_pcrr_p);
@@ -5938,13 +5793,11 @@ restart:
0,
&busy_pcrr_p);
clear_busy_pool_carrier(used_allctr, busy_pcrr_p);
-#ifdef ERTS_SMP
if (!res && !retried && ERTS_ALCU_HANDLE_DD_IN_OP(pref_allctr, 1)) {
/* Cleaned up a bit more; try one more time... */
retried = 1;
goto restart;
}
-#endif
if (pref_allctr->thread_safe)
erts_mtx_unlock(&pref_allctr->mutex);
}
@@ -5999,9 +5852,7 @@ erts_alcu_realloc_mv_thr_pref(ErtsAlcType_t type, void *extra,
return realloc_thr_pref(type, extra, p, size, 1);
}
-#endif
-#endif
/* ------------------------------------------------------------------------- */
@@ -6022,10 +5873,8 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
sys_memcpy((void *) &allctr->mseg_opt,
(void *) &erts_mseg_default_opt,
sizeof(ErtsMsegOpt_t));
-#ifdef ERTS_SMP
if (init->tspec || init->tpref)
allctr->mseg_opt.sched_spec = 1;
-#endif /* ERTS_SMP */
#endif /* HAVE_ERTS_MSEG */
allctr->name_prefix = init->name_prefix;
@@ -6083,7 +5932,6 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
goto error;
allctr->min_block_size = UNIT_CEILING(allctr->min_block_size
+ sizeof(FreeBlkFtr_t));
-#ifdef ERTS_SMP
if (init->tpref) {
Uint sz = ABLK_HDR_SZ;
sz += (init->fix ?
@@ -6107,7 +5955,6 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
erts_atomic_init_nob(&allctr->cpool.stat.no_carriers, 0);
allctr->cpool.check_limit_count = ERTS_ALC_CPOOL_CHECK_LIMIT_COUNT;
allctr->cpool.util_limit = init->ts ? 0 : init->acul;
-#endif
allctr->sbc_threshold = init->sbct;
#ifndef ARCH_64
@@ -6131,7 +5978,6 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->mseg_opt.abs_shrink_th = ~((UWord) 0) / 100;
#endif
-#ifdef USE_THREADS
if (init->ts) {
allctr->thread_safe = 1;
@@ -6142,7 +5988,6 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->debug.saved_tid = 0;
#endif
}
-#endif
if(!allctr->get_free_block
|| !allctr->link_free_block
@@ -6155,14 +6000,12 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
if (allctr->mbc_header_size < sizeof(Carrier_t))
goto error;
-#ifdef ERTS_SMP
allctr->dd.use = 0;
if (init->tpref) {
allctr->dd.use = 1;
init_dd_queue(&allctr->dd.q);
allctr->dd.ix = init->ix;
}
-#endif
allctr->mbc_header_size = (UNIT_CEILING(allctr->mbc_header_size
+ ABLK_HDR_SZ)
- ABLK_HDR_SZ);
@@ -6216,10 +6059,8 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
| CFLG_NO_CPOOL
| CFLG_MAIN_CARRIER);
if (!blk) {
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_destroy(&allctr->mutex);
-#endif
erts_exit(ERTS_ABORT_EXIT,
"Failed to create main carrier for %salloc\n",
init->name_prefix);
@@ -6239,9 +6080,7 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
allctr->fix[i].type_size = init->fix_type_size[i];
allctr->fix[i].list_size = 0;
allctr->fix[i].list = NULL;
-#ifdef ERTS_SMP
ASSERT(allctr->fix[i].type_size >= sizeof(ErtsAllctrFixDDBlock_t));
-#endif
if (ERTS_ALC_IS_CPOOL_ENABLED(allctr)) {
allctr->fix[i].u.cpool.min_list_size = 0;
allctr->fix[i].u.cpool.shrink_list = 0;
@@ -6261,10 +6100,8 @@ erts_alcu_start(Allctr_t *allctr, AllctrInit_t *init)
error:
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_destroy(&allctr->mutex);
-#endif
return 0;
@@ -6282,10 +6119,8 @@ erts_alcu_stop(Allctr_t *allctr)
while (allctr->mbc_list.first)
destroy_carrier(allctr, MBC_TO_FIRST_BLK(allctr, allctr->mbc_list.first), NULL);
-#ifdef USE_THREADS
if (allctr->thread_safe)
erts_mtx_destroy(&allctr->mutex);
-#endif
}
@@ -6294,14 +6129,12 @@ erts_alcu_stop(Allctr_t *allctr)
void
erts_alcu_init(AlcUInit_t *init)
{
-#ifdef ERTS_SMP
int i;
for (i = 0; i <= ERTS_ALC_A_MAX; i++) {
ErtsAlcCPoolData_t *sentinel = &carrier_pool[i].sentinel;
erts_atomic_init_nob(&sentinel->next, (erts_aint_t) sentinel);
erts_atomic_init_nob(&sentinel->prev, (erts_aint_t) sentinel);
}
-#endif
ERTS_CT_ASSERT(SBC_BLK_SZ_MASK == MBC_FBLK_SZ_MASK); /* see BLK_SZ */
#if HAVE_ERTS_MSEG
ASSERT(erts_mseg_unit_size() == ERTS_SACRR_UNIT_SZ);
@@ -6369,7 +6202,6 @@ erts_alcu_test(UWord op, UWord a1, UWord a2)
case 0x01c: return (UWord) BLK_TO_MBC((Block_t*) a1);
case 0x01d: ((Allctr_t*) a1)->add_mbc((Allctr_t*)a1, (Carrier_t*)a2); break;
case 0x01e: ((Allctr_t*) a1)->remove_mbc((Allctr_t*)a1, (Carrier_t*)a2); break;
-#ifdef ERTS_SMP
case 0x01f: return (UWord) sizeof(ErtsAlcCrrPool_t);
case 0x020:
SET_CARRIER_HDR((Carrier_t *) a2, 0, SCH_SYS_ALLOC|SCH_MBC, (Allctr_t *) a1);
@@ -6383,14 +6215,6 @@ erts_alcu_test(UWord op, UWord a1, UWord a2)
return (UWord) a2;
case 0x023: return (UWord) cpool_is_empty((Allctr_t *) a1);
case 0x024: return (UWord) cpool_dbg_is_in_pool((Allctr_t *) a1, (Carrier_t *) a2);
-#else
- case 0x01f: return (UWord) 0;
- case 0x020: return (UWord) 0;
- case 0x021: return (UWord) 0;
- case 0x022: return (UWord) 0;
- case 0x023: return (UWord) 0;
- case 0x024: return (UWord) 0;
-#endif
case 0x025: /* UMEM2BLK_TEST*/
#ifdef DEBUG
# ifdef HARD_DEBUG
@@ -6448,13 +6272,9 @@ erts_alcu_verify_unused(Allctr_t *allctr)
void
erts_alcu_verify_unused_ts(Allctr_t *allctr)
{
-#ifdef USE_THREADS
erts_mtx_lock(&allctr->mutex);
-#endif
erts_alcu_verify_unused(allctr);
-#ifdef USE_THREADS
erts_mtx_unlock(&allctr->mutex);
-#endif
}
#ifdef DEBUG
diff --git a/erts/emulator/beam/erl_alloc_util.h b/erts/emulator/beam/erl_alloc_util.h
index 73c467aa0a..0cce4c9125 100644
--- a/erts/emulator/beam/erl_alloc_util.h
+++ b/erts/emulator/beam/erl_alloc_util.h
@@ -24,10 +24,8 @@
#define ERTS_ALCU_VSN_STR "3.0"
#include "erl_alloc_types.h"
-#ifdef USE_THREADS
#define ERL_THREADS_EMU_INTERNAL__
#include "erl_threads.h"
-#endif
#include "erl_mseg.h"
#include "lttng-wrapper.h"
@@ -162,12 +160,10 @@ void * erts_alcu_alloc(ErtsAlcType_t, void *, Uint);
void * erts_alcu_realloc(ErtsAlcType_t, void *, void *, Uint);
void * erts_alcu_realloc_mv(ErtsAlcType_t, void *, void *, Uint);
void erts_alcu_free(ErtsAlcType_t, void *, void *);
-#ifdef USE_THREADS
void * erts_alcu_alloc_ts(ErtsAlcType_t, void *, Uint);
void * erts_alcu_realloc_ts(ErtsAlcType_t, void *, void *, Uint);
void * erts_alcu_realloc_mv_ts(ErtsAlcType_t, void *, void *, Uint);
void erts_alcu_free_ts(ErtsAlcType_t, void *, void *);
-#ifdef ERTS_SMP
void * erts_alcu_alloc_thr_spec(ErtsAlcType_t, void *, Uint);
void * erts_alcu_realloc_thr_spec(ErtsAlcType_t, void *, void *, Uint);
void * erts_alcu_realloc_mv_thr_spec(ErtsAlcType_t, void *, void *, Uint);
@@ -176,8 +172,6 @@ void * erts_alcu_alloc_thr_pref(ErtsAlcType_t, void *, Uint);
void * erts_alcu_realloc_thr_pref(ErtsAlcType_t, void *, void *, Uint);
void * erts_alcu_realloc_mv_thr_pref(ErtsAlcType_t, void *, void *, Uint);
void erts_alcu_free_thr_pref(ErtsAlcType_t, void *, void *);
-#endif
-#endif
Eterm erts_alcu_au_info_options(fmtfn_t *, void *, Uint **, Uint *);
Eterm erts_alcu_info_options(Allctr_t *, fmtfn_t *, void *, Uint **, Uint *);
Eterm erts_alcu_sz_info(Allctr_t *, int, int, fmtfn_t *, void *, Uint **, Uint *);
@@ -185,9 +179,7 @@ Eterm erts_alcu_info(Allctr_t *, int, int, fmtfn_t *, void *, Uint **, Uint *);
void erts_alcu_init(AlcUInit_t *);
void erts_alcu_current_size(Allctr_t *, AllctrSize_t *,
ErtsAlcUFixInfo_t *, int);
-#ifdef ERTS_SMP
void erts_alcu_check_delayed_dealloc(Allctr_t *, int, int *, ErtsThrPrgrVal *, int *);
-#endif
erts_aint32_t erts_alcu_fix_alloc_shrink(Allctr_t *, erts_aint32_t);
#ifdef ARCH_32
@@ -308,7 +300,6 @@ void erts_lcnt_update_allocator_locks(int enable);
typedef union {char c[ERTS_ALLOC_ALIGN_BYTES]; long l; double d;} Unit_t;
-#ifdef ERTS_SMP
typedef struct ErtsDoubleLink_t_ {
struct ErtsDoubleLink_t_ *next;
@@ -327,7 +318,6 @@ typedef struct {
ErtsDoubleLink_t abandoned; /* node in pooled_list or traitor_list */
} ErtsAlcCPoolData_t;
-#endif
typedef struct Carrier_t_ Carrier_t;
struct Carrier_t_ {
@@ -335,9 +325,7 @@ struct Carrier_t_ {
Carrier_t *next;
Carrier_t *prev;
erts_smp_atomic_t allctr;
-#ifdef ERTS_SMP
ErtsAlcCPoolData_t cpool; /* Overwritten by block if sbc */
-#endif
};
#define ERTS_ALC_CARRIER_TO_ALLCTR(C) \
@@ -434,7 +422,6 @@ typedef struct {
} while (0)
#endif
-#ifdef ERTS_SMP
typedef union ErtsAllctrDDBlock_t_ ErtsAllctrDDBlock_t;
@@ -477,7 +464,6 @@ typedef struct {
} head;
} ErtsAllctrDDQueue_t;
-#endif
typedef struct {
size_t type_size;
@@ -500,7 +486,6 @@ typedef struct {
} ErtsAlcFixList_t;
struct Allctr_t_ {
-#ifdef ERTS_SMP
struct {
/*
* We want the queue at the beginning of
@@ -511,7 +496,6 @@ struct Allctr_t_ {
int use;
int ix;
} dd;
-#endif
/* Allocator name prefix */
char * name_prefix;
@@ -560,7 +544,6 @@ struct Allctr_t_ {
/* Carriers */
CarrierList_t mbc_list;
CarrierList_t sbc_list;
-#ifdef ERTS_SMP
struct {
/* pooled_list, traitor list and dc_list contain only
carriers _created_ by this allocator */
@@ -579,7 +562,6 @@ struct Allctr_t_ {
erts_atomic_t no_carriers;
} stat;
} cpool;
-#endif
/* Main carrier (if there is one) */
Carrier_t * main_carrier;
@@ -622,7 +604,6 @@ struct Allctr_t_ {
int fix_shrink_scheduled;
ErtsAlcFixList_t *fix;
-#ifdef USE_THREADS
/* Mutex for this allocator */
erts_mtx_t mutex;
int thread_safe;
@@ -631,7 +612,6 @@ struct Allctr_t_ {
Allctr_t *next;
} ts_list;
-#endif
int atoms_initialized;
@@ -654,13 +634,11 @@ struct Allctr_t_ {
CarriersStats_t mbcs;
#ifdef DEBUG
-#ifdef USE_THREADS
struct {
int saved_tid;
erts_tid_t tid;
} debug;
#endif
-#endif
};
int erts_alcu_start(Allctr_t *, AllctrInit_t *);
diff --git a/erts/emulator/beam/erl_async.c b/erts/emulator/beam/erl_async.c
index 9a93034fcb..75e88afc5e 100644
--- a/erts/emulator/beam/erl_async.c
+++ b/erts/emulator/beam/erl_async.c
@@ -34,9 +34,6 @@
#define ERTS_ASYNC_PRINT_JOB 0
-#if !defined(ERTS_SMP) && defined(USE_THREADS) && !ERTS_USE_ASYNC_READY_Q
-# error "Need async ready queue in non-smp case"
-#endif
typedef struct _erl_async {
DE_Handle* hndl; /* The DE_Handle is needed when port is gone */
@@ -46,16 +43,13 @@ typedef struct _erl_async {
ErlDrvPDL pdl;
void (*async_invoke)(void*);
void (*async_free)(void*);
-#if ERTS_USE_ASYNC_READY_Q
Uint sched_id;
union {
ErtsThrQPrepEnQ_t *prep_enq;
ErtsThrQFinDeQ_t fin_deq;
} q;
-#endif
} ErtsAsync;
-#if ERTS_USE_ASYNC_READY_Q
/*
* We can do without the enqueue mutex since it isn't needed for
@@ -94,7 +88,6 @@ typedef union {
char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncReadyQ))];
} ErtsAlgndAsyncReadyQ;
-#endif /* ERTS_USE_ASYNC_READY_Q */
typedef struct {
ErtsThrQ_t thr_q;
@@ -119,9 +112,7 @@ typedef struct {
char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncInit))];
} init;
ErtsAlgndAsyncQ *queue;
-#if ERTS_USE_ASYNC_READY_Q
ErtsAlgndAsyncReadyQ *ready_queue;
-#endif
} ErtsAsyncData;
#if defined(USE_THREADS) && defined(USE_VM_PROBES)
@@ -140,15 +131,6 @@ int erts_async_thread_suggested_stack_size; /* Initialized by erl_init.c */
static ErtsAsyncData *async;
-#ifndef USE_THREADS
-
-void
-erts_init_async(void)
-{
-
-}
-
-#else
static void *async_main(void *);
@@ -158,7 +140,6 @@ async_q(int i)
return &async->queue[i].aq;
}
-#if ERTS_USE_ASYNC_READY_Q
static ERTS_INLINE ErtsAsyncReadyQ *
async_ready_q(Uint sched_id)
@@ -166,16 +147,13 @@ async_ready_q(Uint sched_id)
return &async->ready_queue[((int)sched_id)-1].arq;
}
-#endif
void
erts_init_async(void)
{
async = NULL;
if (erts_async_max_threads > 0) {
-#if ERTS_USE_ASYNC_READY_Q
ErtsThrQInit_t qinit = ERTS_THR_Q_INIT_DEFAULT;
-#endif
erts_thr_opts_t thr_opts = ERTS_THR_OPTS_DEFAULT_INITER;
char *ptr, thr_name[16];
size_t tot_size = 0;
@@ -183,9 +161,7 @@ erts_init_async(void)
tot_size += ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsAsyncData));
tot_size += sizeof(ErtsAlgndAsyncQ)*erts_async_max_threads;
-#if ERTS_USE_ASYNC_READY_Q
tot_size += sizeof(ErtsAlgndAsyncReadyQ)*erts_no_schedulers;
-#endif
ptr = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_ASYNC_DATA,
tot_size);
@@ -202,7 +178,6 @@ erts_init_async(void)
async->queue = (ErtsAlgndAsyncQ *) ptr;
ptr += sizeof(ErtsAlgndAsyncQ)*erts_async_max_threads;
-#if ERTS_USE_ASYNC_READY_Q
qinit.live.queue = ERTS_THR_Q_LIVE_LONG;
qinit.live.objects = ERTS_THR_Q_LIVE_SHORT;
@@ -222,7 +197,6 @@ erts_init_async(void)
erts_thr_q_initialize(&arq->thr_q, &qinit);
}
-#endif
/* Create async threads... */
@@ -253,7 +227,6 @@ erts_init_async(void)
}
}
-#if ERTS_USE_ASYNC_READY_Q
void *
erts_get_async_ready_queue(Uint sched_id)
@@ -261,7 +234,6 @@ erts_get_async_ready_queue(Uint sched_id)
return (void *) async ? async_ready_q(sched_id) : NULL;
}
-#endif
static ERTS_INLINE void async_add(ErtsAsync *a, ErtsAsyncQ* q)
{
@@ -270,10 +242,8 @@ static ERTS_INLINE void async_add(ErtsAsync *a, ErtsAsyncQ* q)
#endif
if (is_internal_port(a->port)) {
-#if ERTS_USE_ASYNC_READY_Q
ErtsAsyncReadyQ *arq = async_ready_q(a->sched_id);
a->q.prep_enq = erts_thr_q_prepare_enqueue(&arq->thr_q);
-#endif
/* make sure the driver will stay around */
if (a->hndl)
erts_ddll_reference_referenced_driver(a->hndl);
@@ -309,10 +279,8 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q,
erts_tse_t *tse,
ErtsThrQPrepEnQ_t **prep_enq)
{
-#if ERTS_USE_ASYNC_READY_Q
int saved_fin_deq = 0;
ErtsThrQFinDeQ_t fin_deq;
-#endif
#ifdef USE_VM_PROBES
int len;
#endif
@@ -321,12 +289,10 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q,
ErtsAsync *a = (ErtsAsync *) erts_thr_q_dequeue(q);
if (a) {
-#if ERTS_USE_ASYNC_READY_Q
*prep_enq = a->q.prep_enq;
erts_thr_q_get_finalize_dequeue_data(q, &a->q.fin_deq);
if (saved_fin_deq)
erts_thr_q_append_finalize_dequeue_data(&a->q.fin_deq, &fin_deq);
-#endif
#ifdef USE_LTTNG_VM_TRACEPOINTS
if (LTTNG_ENABLED(aio_pool_get)) {
lttng_decl_portbuf(port_str);
@@ -354,7 +320,6 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q,
erts_tse_reset(tse);
-#if ERTS_USE_ASYNC_READY_Q
chk_fin_deq:
if (erts_thr_q_get_finalize_dequeue_data(q, &tmp_fin_deq)) {
if (!saved_fin_deq) {
@@ -364,13 +329,11 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q,
erts_thr_q_append_finalize_dequeue_data(&fin_deq,
&tmp_fin_deq);
}
-#endif
switch (erts_thr_q_inspect(q, 1)) {
case ERTS_THR_Q_DIRTY:
break;
case ERTS_THR_Q_NEED_THR_PRGR:
-#ifdef ERTS_SMP
{
ErtsThrPrgrVal prgr = erts_thr_q_need_thr_progress(q);
erts_thr_progress_wakeup(NULL, prgr);
@@ -382,17 +345,14 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q,
erts_tse_wait(tse);
break;
}
-#endif
case ERTS_THR_Q_CLEAN:
-#if ERTS_USE_ASYNC_READY_Q
if (saved_fin_deq) {
if (erts_thr_q_finalize_dequeue(&fin_deq))
goto chk_fin_deq;
else
saved_fin_deq = 0;
}
-#endif
erts_tse_wait(tse);
break;
@@ -408,15 +368,10 @@ static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q,
static ERTS_INLINE void call_async_ready(ErtsAsync *a)
{
-#if ERTS_USE_ASYNC_READY_Q
Port *p = erts_id2port_sflgs(a->port,
NULL,
0,
ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
-#else
- Port *p = erts_thr_id2port_sflgs(a->port,
- ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
-#endif
if (!p) {
if (a->async_free) {
ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_PORT);
@@ -432,11 +387,7 @@ static ERTS_INLINE void call_async_ready(ErtsAsync *a)
ERTS_MSACC_POP_STATE();
}
}
-#if ERTS_USE_ASYNC_READY_Q
erts_port_release(p);
-#else
- erts_thr_port_release(p);
-#endif
}
if (a->pdl)
driver_pdl_dec_refc(a->pdl);
@@ -446,7 +397,6 @@ static ERTS_INLINE void call_async_ready(ErtsAsync *a)
static ERTS_INLINE void async_reply(ErtsAsync *a, ErtsThrQPrepEnQ_t *prep_enq)
{
-#if ERTS_USE_ASYNC_READY_Q
ErtsAsyncReadyQ *arq;
#if ERTS_ASYNC_PRINT_JOB
@@ -465,12 +415,6 @@ static ERTS_INLINE void async_reply(ErtsAsync *a, ErtsThrQPrepEnQ_t *prep_enq)
erts_mtx_unlock(&arq->x.data.enq_mtx);
#endif
-#else /* ERTS_USE_ASYNC_READY_Q */
-
- call_async_ready(a);
- erts_free(ERTS_ALC_T_ASYNC, (void *) a);
-
-#endif /* ERTS_USE_ASYNC_READY_Q */
}
@@ -486,7 +430,6 @@ static erts_tse_t *async_thread_init(ErtsAsyncQ *aq)
erts_tse_t *tse = erts_tse_fetch();
ERTS_DECLARE_DUMMY(Uint no);
-#ifdef ERTS_SMP
ErtsThrPrgrCallbacks callbacks;
callbacks.arg = (void *) tse;
@@ -495,15 +438,12 @@ static erts_tse_t *async_thread_init(ErtsAsyncQ *aq)
callbacks.wait = NULL;
erts_thr_progress_register_unmanaged_thread(&callbacks);
-#endif
qinit.live.queue = ERTS_THR_Q_LIVE_LONG;
qinit.live.objects = ERTS_THR_Q_LIVE_SHORT;
qinit.arg = (void *) tse;
qinit.notify = async_wakeup;
-#if ERTS_USE_ASYNC_READY_Q
qinit.auto_finalize_dequeue = 0;
-#endif
erts_thr_q_initialize(&aq->thr_q, &qinit);
@@ -545,12 +485,10 @@ static void *async_main(void* arg)
return NULL;
}
-#endif /* USE_THREADS */
void
erts_exit_flush_async(void)
{
-#ifdef USE_THREADS
int i;
ErtsAsync a;
a.port = NIL;
@@ -564,10 +502,8 @@ erts_exit_flush_async(void)
async_add(&a, async_q(i));
for (i = 0; i < erts_async_max_threads; i++)
erts_thr_join(async->queue[i].aq.thr_id, NULL);
-#endif
}
-#if defined(USE_THREADS) && ERTS_USE_ASYNC_READY_Q
int erts_check_async_ready(void *varq)
{
@@ -609,18 +545,15 @@ int erts_async_ready_clean(void *varq, void *val)
case ERTS_THR_Q_DIRTY:
return ERTS_ASYNC_READY_DIRTY;
case ERTS_THR_Q_NEED_THR_PRGR:
-#ifdef ERTS_SMP
*((ErtsThrPrgrVal *) val)
= erts_thr_q_need_thr_progress(&arq->thr_q);
return ERTS_ASYNC_READY_NEED_THR_PRGR;
-#endif
case ERTS_THR_Q_CLEAN:
break;
}
return ERTS_ASYNC_READY_CLEAN;
}
-#endif
/*
** Generate a fair async key prom an ErlDrvPort
@@ -658,16 +591,12 @@ long driver_async(ErlDrvPort ix, unsigned int* key,
Port* prt;
long id;
unsigned int qix;
-#if ERTS_USE_ASYNC_READY_Q
Uint sched_id;
ERTS_MSACC_PUSH_STATE();
sched_id = erts_get_scheduler_id();
if (!sched_id)
sched_id = 1;
-#else
- ERTS_MSACC_PUSH_STATE();
-#endif
prt = erts_drvport2port(ix);
if (prt == ERTS_INVALID_ERL_DRV_PORT)
@@ -677,9 +606,7 @@ long driver_async(ErlDrvPort ix, unsigned int* key,
a = (ErtsAsync*) erts_alloc(ERTS_ALC_T_ASYNC, sizeof(ErtsAsync));
-#if ERTS_USE_ASYNC_READY_Q
a->sched_id = sched_id;
-#endif
a->hndl = (DE_Handle*)prt->drv_ptr->handle;
a->port = prt->common.id;
a->pdl = NULL;
@@ -709,7 +636,6 @@ long driver_async(ErlDrvPort ix, unsigned int* key,
(*key % erts_async_max_threads) : 0;
*key = qix;
}
-#ifdef USE_THREADS
if (erts_async_max_threads > 0) {
if (prt->port_data_lock) {
driver_pdl_inc_refc(prt->port_data_lock);
@@ -718,7 +644,6 @@ long driver_async(ErlDrvPort ix, unsigned int* key,
async_add(a, async_q(qix));
return id;
}
-#endif
ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_PORT);
(*a->async_invoke)(a->async_data);
diff --git a/erts/emulator/beam/erl_async.h b/erts/emulator/beam/erl_async.h
index 4b470e7679..76fc61b560 100644
--- a/erts/emulator/beam/erl_async.h
+++ b/erts/emulator/beam/erl_async.h
@@ -28,38 +28,20 @@ extern int erts_async_max_threads;
extern int erts_async_thread_suggested_stack_size;
-#ifdef ERTS_SMP
/*
* With smp support we can choose to have, or not to
* have an async ready queue.
*/
#define ERTS_USE_ASYNC_READY_Q 1
-#endif
-#ifndef ERTS_SMP
-/* In non-smp case we *need* the async ready queue */
-# undef ERTS_USE_ASYNC_READY_Q
-# define ERTS_USE_ASYNC_READY_Q 1
-#endif
-#ifndef ERTS_USE_ASYNC_READY_Q
-# define ERTS_USE_ASYNC_READY_Q 0
-#endif
-#ifndef USE_THREADS
-# undef ERTS_USE_ASYNC_READY_Q
-# define ERTS_USE_ASYNC_READY_Q 0
-#endif /* !USE_THREADS */
-#if ERTS_USE_ASYNC_READY_Q
int erts_check_async_ready(void *);
int erts_async_ready_clean(void *, void *);
void *erts_get_async_ready_queue(Uint sched_id);
#define ERTS_ASYNC_READY_CLEAN 0
#define ERTS_ASYNC_READY_DIRTY 1
-#ifdef ERTS_SMP
#define ERTS_ASYNC_READY_NEED_THR_PRGR 2
-#endif
-#endif /* ERTS_USE_ASYNC_READY_Q */
void erts_init_async(void);
void erts_exit_flush_async(void);
diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c
index e9bfb39035..cea8b2200c 100644
--- a/erts/emulator/beam/erl_bif_ddll.c
+++ b/erts/emulator/beam/erl_bif_ddll.c
@@ -50,11 +50,7 @@
#include "dtrace-wrapper.h"
#include "lttng-wrapper.h"
-#ifdef ERTS_SMP
#define DDLL_SMP 1
-#else
-#define DDLL_SMP 0
-#endif
/*
@@ -280,10 +276,8 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3)
path[path_len++] = '/';
sys_strcpy(path+path_len,name);
-#if DDLL_SMP
erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
lock_drv_list();
-#endif
if ((drv = lookup_driver(name)) != NULL) {
if (drv->handle == NULL) {
/* static_driver */
@@ -404,24 +398,18 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3)
erts_ddll_reference_driver(dh);
ASSERT(dh->status == ERL_DE_RELOAD);
dh->status = ERL_DE_FORCE_RELOAD;
-#if DDLL_SMP
unlock_drv_list();
-#endif
kill_ports_driver_unloaded(dh);
/* Dereference, eventually causing driver destruction */
-#if DDLL_SMP
lock_drv_list();
-#endif
erts_ddll_dereference_driver(dh);
}
-#if DDLL_SMP
erts_ddll_reference_driver(dh);
unlock_drv_list();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
lock_drv_list();
erts_ddll_dereference_driver(dh);
-#endif
BIF_P->flags |= F_USING_DDLL;
if (monitor) {
@@ -432,18 +420,14 @@ BIF_RETTYPE erl_ddll_try_load_3(BIF_ALIST_3)
hp = HAlloc(BIF_P, 3);
t = TUPLE2(hp, am_ok, ok_term);
}
-#if DDLL_SMP
unlock_drv_list();
-#endif
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) path);
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(BIF_P));
BIF_RET(t);
soft_error:
-#if DDLL_SMP
unlock_drv_list();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
-#endif
if (do_build_load_error) {
soft_error_term = build_load_error(BIF_P, build_this_load_error);
}
@@ -551,9 +535,7 @@ Eterm erl_ddll_try_unload_2(BIF_ALIST_2)
goto error;
}
-#if DDLL_SMP
lock_drv_list();
-#endif
if ((drv = lookup_driver(name)) == NULL) {
soft_error_term = am_not_loaded;
@@ -608,23 +590,17 @@ done:
/* Avoid closing the driver by referencing it */
erts_ddll_reference_driver(dh);
dh->status = ERL_DE_FORCE_UNLOAD;
-#if DDLL_SMP
unlock_drv_list();
-#endif
kill_ports_driver_unloaded(dh);
-#if DDLL_SMP
lock_drv_list();
-#endif
erts_ddll_dereference_driver(dh);
}
-#if DDLL_SMP
erts_ddll_reference_driver(dh);
unlock_drv_list();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
lock_drv_list();
erts_ddll_dereference_driver(dh);
-#endif
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
BIF_P->flags |= F_USING_DDLL;
if (monitor > 0) {
@@ -638,15 +614,11 @@ done:
if (kill_ports > 1) {
ERTS_BIF_CHK_EXITED(BIF_P); /* May be exited by port killing */
}
-#if DDLL_SMP
unlock_drv_list();
-#endif
BIF_RET(t);
soft_error:
-#if DDLL_SMP
unlock_drv_list();
-#endif
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
hp = HAlloc(BIF_P, 3);
@@ -697,9 +669,7 @@ BIF_RETTYPE erl_ddll_loaded_drivers_0(BIF_ALIST_0)
int need = 3;
Eterm res = NIL;
erts_driver_t *drv;
-#if DDLL_SMP
lock_drv_list();
-#endif
for (drv = driver_list; drv; drv = drv->next) {
need += sys_strlen(drv->name)*2+2;
}
@@ -712,9 +682,7 @@ BIF_RETTYPE erl_ddll_loaded_drivers_0(BIF_ALIST_0)
}
res = TUPLE2(hp,am_ok,res);
/* hp += 3 */
-#if DDLL_SMP
unlock_drv_list();
-#endif
BIF_RET(res);
}
@@ -736,9 +704,7 @@ BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2)
Eterm *hp;
int i;
Uint filter;
-#if DDLL_SMP
int have_lock = 0;
-#endif
if ((name = pick_list_or_atom(name_term)) == NULL) {
goto error;
@@ -748,10 +714,8 @@ BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2)
goto error;
}
-#if DDLL_SMP
lock_drv_list();
have_lock = 1;
-#endif
if ((drv = lookup_driver(name)) == NULL) {
goto error;
}
@@ -827,9 +791,7 @@ BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2)
hp += 2;
}
done:
-#if DDLL_SMP
unlock_drv_list();
-#endif
if (pei)
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, pei);
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
@@ -838,11 +800,9 @@ BIF_RETTYPE erl_ddll_info_2(BIF_ALIST_2)
if (name != NULL) {
erts_free(ERTS_ALC_T_DDLL_TMP_BUF, (void *) name);
}
-#if DDLL_SMP
if (have_lock) {
unlock_drv_list();
}
-#endif
BIF_ERROR(p,BADARG);
}
@@ -899,13 +859,9 @@ BIF_RETTYPE erl_ddll_format_error_int_1(BIF_ALIST_1)
if (errdesc_to_code(code_term,&errint) != 0) {
goto error;
}
-#if DDLL_SMP
lock_drv_list();
-#endif
errstring = erts_ddll_error(errint);
-#if DDLL_SMP
unlock_drv_list();
-#endif
break;
}
if (errstring == NULL) {
@@ -1045,13 +1001,9 @@ void erts_ddll_proc_dead(Process *p, ErtsProcLocks plocks)
DE_Handle *dh = drv->handle;
erts_ddll_reference_driver(dh);
dh->status = ERL_DE_FORCE_UNLOAD;
-#if DDLL_SMP
unlock_drv_list();
-#endif
kill_ports_driver_unloaded(dh);
-#if DDLL_SMP
lock_drv_list(); /* Needed for future list operations */
-#endif
drv = drv->next; /* before allowing destruction */
erts_ddll_dereference_driver(dh);
} else {
@@ -1282,9 +1234,7 @@ static Eterm notify_when_loaded(Process *p, Eterm name_term, char *name, ErtsPro
erts_driver_t *drv;
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & plocks);
-#if DDLL_SMP
lock_drv_list();
-#endif
if ((drv = lookup_driver(name)) == NULL) {
immediate_tag = am_unloaded;
immediate_type = am_DOWN;
@@ -1314,20 +1264,14 @@ static Eterm notify_when_loaded(Process *p, Eterm name_term, char *name, ErtsPro
}
p->flags |= F_USING_DDLL;
r = add_monitor(p, drv->handle, ERL_DE_PROC_AWAIT_LOAD);
-#if DDLL_SMP
unlock_drv_list();
-#endif
BIF_RET(r);
immediate:
r = erts_make_ref(p);
-#if DDLL_SMP
erts_smp_proc_unlock(p, plocks);
-#endif
notify_proc(p, r, name_term, immediate_type, immediate_tag, 0);
-#if DDLL_SMP
unlock_drv_list();
erts_smp_proc_lock(p, plocks);
-#endif
BIF_RET(r);
}
@@ -1339,9 +1283,7 @@ static Eterm notify_when_unloaded(Process *p, Eterm name_term, char *name, ErtsP
erts_driver_t *drv;
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN & plocks);
-#if DDLL_SMP
lock_drv_list();
-#endif
if ((drv = lookup_driver(name)) == NULL) {
immediate_tag = am_unloaded;
immediate_type = am_DOWN;
@@ -1355,20 +1297,14 @@ static Eterm notify_when_unloaded(Process *p, Eterm name_term, char *name, ErtsP
p->flags |= F_USING_DDLL;
r = add_monitor(p, drv->handle, flag);
-#if DDLL_SMP
unlock_drv_list();
-#endif
BIF_RET(r);
immediate:
r = erts_make_ref(p);
-#if DDLL_SMP
erts_smp_proc_unlock(p, plocks);
-#endif
notify_proc(p, r, name_term, immediate_type, immediate_tag, 0);
-#if DDLL_SMP
unlock_drv_list();
erts_smp_proc_lock(p, plocks);
-#endif
BIF_RET(r);
}
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index e5d7efcc72..df02554d83 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -87,10 +87,7 @@ static char erts_system_version[] = ("Erlang/OTP " ERLANG_OTP_RELEASE
#ifdef ARCH_64
" [64-bit]"
#endif
-#ifdef ERTS_SMP
" [smp:%beu:%beu]"
-#endif
-#ifdef USE_THREADS
#if defined(ERTS_DIRTY_SCHEDULERS) && defined(ERTS_SMP)
" [ds:%beu:%beu:%beu]"
#endif
@@ -98,7 +95,6 @@ static char erts_system_version[] = ("Erlang/OTP " ERLANG_OTP_RELEASE
" [dirty-schedulers-TEST]"
#endif
" [async-threads:%d]"
-#endif
#ifdef HIPE
" [hipe]"
#endif
@@ -353,14 +349,12 @@ erts_print_system_version(fmtfn_t to, void *arg, Process *c_p)
char *rc_str = "";
char rc_buf[100];
char *ov = otp_version;
-#ifdef ERTS_SMP
Uint total, online, active;
Uint dirty_cpu, dirty_cpu_onln, dirty_io;
erts_schedulers_state(&total, &online, &active,
&dirty_cpu, &dirty_cpu_onln, NULL,
&dirty_io, NULL);
-#endif
for (i = 0; i < sizeof(otp_version)-4; i++) {
if (ov[i] == '-' && ov[i+1] == 'r' && ov[i+2] == 'c')
rc = atoi(&ov[i+3]);
@@ -375,15 +369,11 @@ erts_print_system_version(fmtfn_t to, void *arg, Process *c_p)
}
return erts_print(to, arg, erts_system_version,
rc_str
-#ifdef ERTS_SMP
, total, online
#ifdef ERTS_DIRTY_SCHEDULERS
, dirty_cpu, dirty_cpu_onln, dirty_io
#endif
-#endif
-#ifdef USE_THREADS
, erts_async_max_threads
-#endif
#ifdef ERTS_ENABLE_KERNEL_POLL
, erts_use_kernel_poll ? "true" : "false"
#endif
@@ -763,7 +753,6 @@ process_info_init(void)
static ERTS_INLINE Process *
pi_pid2proc(Process *c_p, Eterm pid, ErtsProcLocks info_locks)
{
-#ifdef ERTS_SMP
/*
* If the main lock is needed, we use erts_pid2proc_not_running()
* instead of erts_pid2proc() for two reasons:
@@ -781,7 +770,6 @@ pi_pid2proc(Process *c_p, Eterm pid, ErtsProcLocks info_locks)
return erts_pid2proc_not_running(c_p, ERTS_PROC_LOCK_MAIN,
pid, info_locks);
else
-#endif
return erts_pid2proc(c_p, ERTS_PROC_LOCK_MAIN,
pid, info_locks);
}
@@ -1086,12 +1074,10 @@ BIF_RETTYPE process_info_2(BIF_ALIST_2)
}
ASSERT(is_value(res));
-#ifdef ERTS_SMP
if (BIF_P == rp)
info_locks &= ~ERTS_PROC_LOCK_MAIN;
if (rp && info_locks)
erts_smp_proc_unlock(rp, info_locks);
-#endif
ASSERT(!(BIF_P->flags & F_P2PNR_RESCHED));
BIF_RET(res);
@@ -2144,9 +2130,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
ASSERT(erts_compat_rel > 0);
BIF_RET(make_small(erts_compat_rel));
} else if (BIF_ARG_1 == am_multi_scheduling) {
-#ifndef ERTS_SMP
- BIF_RET(am_disabled);
-#else
#ifndef ERTS_DIRTY_SCHEDULERS
if (erts_no_schedulers == 1)
BIF_RET(am_disabled);
@@ -2160,7 +2143,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
? am_blocked
: am_blocked_normal));
}
-#endif
} else if (BIF_ARG_1 == am_build_type) {
#if defined(DEBUG)
ERTS_DECL_AM(debug);
@@ -2392,16 +2374,10 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(erts_allocator_options((void *) BIF_P));
}
else if (BIF_ARG_1 == am_thread_pool_size) {
-#ifdef USE_THREADS
extern int erts_async_max_threads;
-#endif
int n;
-#ifdef USE_THREADS
n = erts_async_max_threads;
-#else
- n = 0;
-#endif
BIF_RET(make_small(n));
}
else if (BIF_ARG_1 == am_alloc_util_allocators) {
@@ -2549,11 +2525,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(res);
#endif
} else if (BIF_ARG_1 == am_threads) {
-#ifdef USE_THREADS
return am_true;
-#else
- return am_false;
-#endif
} else if (BIF_ARG_1 == am_creation) {
return make_small(erts_this_node->creation);
} else if (BIF_ARG_1 == am_break_ignored) {
@@ -2612,11 +2584,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
hp = HAlloc(BIF_P, 2*n);
BIF_RET(buf_to_intlist(&hp, buf, n, NIL));
} else if (ERTS_IS_ATOM_STR("smp_support", BIF_ARG_1)) {
-#ifdef ERTS_SMP
BIF_RET(am_true);
-#else
- BIF_RET(am_false);
-#endif
} else if (ERTS_IS_ATOM_STR("scheduler_bind_type", BIF_ARG_1)) {
BIF_RET(erts_bound_schedulers_term(BIF_P));
} else if (ERTS_IS_ATOM_STR("scheduler_bindings", BIF_ARG_1)) {
@@ -2628,11 +2596,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
res = make_small(erts_no_schedulers);
BIF_RET(res);
} else if (ERTS_IS_ATOM_STR("schedulers_state", BIF_ARG_1)) {
-#ifndef ERTS_SMP
- Eterm *hp = HAlloc(BIF_P, 4);
- res = TUPLE3(hp, make_small(1), make_small(1), make_small(1));
- BIF_RET(res);
-#else
Eterm *hp;
Uint total, online, active;
erts_schedulers_state(&total, &online, &active,
@@ -2643,13 +2606,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
make_small(online),
make_small(active));
BIF_RET(res);
-#endif
} else if (ERTS_IS_ATOM_STR("schedulers_state", BIF_ARG_1)) {
-#ifndef ERTS_SMP
- Eterm *hp = HAlloc(BIF_P, 4);
- res = TUPLE3(hp, make_small(1), make_small(1), make_small(1));
- BIF_RET(res);
-#else
Eterm *hp;
Uint total, online, active;
erts_schedulers_state(&total, &online, &active,
@@ -2660,19 +2617,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
make_small(online),
make_small(active));
BIF_RET(res);
-#endif
} else if (ERTS_IS_ATOM_STR("all_schedulers_state", BIF_ARG_1)) {
-#ifndef ERTS_SMP
- Eterm *hp = HAlloc(BIF_P, 2+5);
- res = CONS(hp+5,
- TUPLE4(hp,
- am_normal,
- make_small(1),
- make_small(1),
- make_small(1)),
- NIL);
- BIF_RET(res);
-#else
Eterm *hp, tpl;
Uint sz, total, online, active,
dirty_cpu_total, dirty_cpu_online, dirty_cpu_active,
@@ -2718,23 +2663,14 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
hp += 5;
res = CONS(hp, tpl, res);
BIF_RET(res);
-#endif
} else if (ERTS_IS_ATOM_STR("schedulers_online", BIF_ARG_1)) {
-#ifndef ERTS_SMP
- BIF_RET(make_small(1));
-#else
Uint online;
erts_schedulers_state(NULL, &online, NULL, NULL, NULL, NULL, NULL, NULL);
BIF_RET(make_small(online));
-#endif
} else if (ERTS_IS_ATOM_STR("schedulers_active", BIF_ARG_1)) {
-#ifndef ERTS_SMP
- BIF_RET(make_small(1));
-#else
Uint active;
erts_schedulers_state(NULL, NULL, &active, NULL, NULL, NULL, NULL, NULL);
BIF_RET(make_small(active));
-#endif
} else if (ERTS_IS_ATOM_STR("dirty_cpu_schedulers", BIF_ARG_1)) {
Uint dirty_cpu;
#ifdef ERTS_DIRTY_SCHEDULERS
@@ -2804,23 +2740,15 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
} else if (ERTS_IS_ATOM_STR("check_io", BIF_ARG_1)) {
BIF_RET(erts_check_io_info(BIF_P));
} else if (ERTS_IS_ATOM_STR("multi_scheduling_blockers", BIF_ARG_1)) {
-#ifndef ERTS_SMP
- BIF_RET(NIL);
-#else
if (erts_no_schedulers == 1)
BIF_RET(NIL);
else
BIF_RET(erts_multi_scheduling_blockers(BIF_P, 0));
-#endif
} else if (ERTS_IS_ATOM_STR("normal_multi_scheduling_blockers", BIF_ARG_1)) {
-#ifndef ERTS_SMP
- BIF_RET(NIL);
-#else
if (erts_no_schedulers == 1)
BIF_RET(NIL);
else
BIF_RET(erts_multi_scheduling_blockers(BIF_P, 1));
-#endif
} else if (ERTS_IS_ATOM_STR("modified_timing_level", BIF_ARG_1)) {
BIF_RET(ERTS_USE_MODIFIED_TIMING()
? make_small(erts_modified_timing_level)
@@ -2883,12 +2811,10 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(am_false);
#endif
}
-#ifdef ERTS_SMP
else if (ERTS_IS_ATOM_STR("thread_progress", BIF_ARG_1)) {
erts_thr_progress_dbg_print_state();
BIF_RET(am_true);
}
-#endif
else if (BIF_ARG_1 == am_message_queue_data) {
switch (erts_default_spo_flags & (SPO_ON_HEAP_MSGQ|SPO_OFF_HEAP_MSGQ)) {
case SPO_OFF_HEAP_MSGQ:
@@ -3171,9 +3097,6 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
}
else if (ERTS_IS_ATOM_STR("locking", item)) {
if (hpp) {
-#ifndef ERTS_SMP
- res = am_false;
-#else
if (erts_atomic32_read_nob(&prt->state)
& ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK) {
DECL_AM(port_level);
@@ -3187,7 +3110,6 @@ erts_bld_port_info(Eterm **hpp, ErlOffHeap *ohp, Uint *szp, Port *prt,
& ERL_DRV_FLAG_USE_PORT_LOCKING));
res = AM_driver_level;
}
-#endif
}
if (szp) {
res = am_true;
@@ -3425,11 +3347,9 @@ BIF_RETTYPE process_display_2(BIF_ALIST_2)
2);
}
erts_stack_dump(ERTS_PRINT_STDERR, NULL, rp);
-#ifdef ERTS_SMP
erts_smp_proc_unlock(rp, (BIF_P == rp
? ERTS_PROC_LOCKS_ALL_MINOR
: ERTS_PROC_LOCKS_ALL));
-#endif
BIF_RET(am_true);
}
@@ -4239,10 +4159,8 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
BIF_RET(AM_dead);
}
-#ifdef ERTS_SMP
if (BIF_P == rp)
rp_locks |= ERTS_PROC_LOCK_MAIN;
-#endif
xres = erts_send_exit_signal(NULL, /* NULL in order to
force a pending exit
when we send to our
@@ -4254,10 +4172,8 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
NIL,
NULL,
0);
-#ifdef ERTS_SMP
if (BIF_P == rp)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
-#endif
erts_smp_proc_unlock(rp, rp_locks);
if (xres > 1) {
DECL_AM(message);
@@ -4364,7 +4280,6 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
}
}
else if (ERTS_IS_ATOM_STR("not_running_optimization", BIF_ARG_1)) {
-#ifdef ERTS_SMP
int old_use_opt, use_opt;
switch (BIF_ARG_2) {
case am_true:
@@ -4384,9 +4299,6 @@ BIF_RETTYPE erts_debug_set_internal_state_2(BIF_ALIST_2)
erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(old_use_opt ? am_true : am_false);
-#else
- BIF_ERROR(BIF_P, EXC_NOTSUP);
-#endif
}
else if (ERTS_IS_ATOM_STR("wait", BIF_ARG_1)) {
if (ERTS_IS_ATOM_STR("deallocations", BIF_ARG_2)) {
diff --git a/erts/emulator/beam/erl_bif_port.c b/erts/emulator/beam/erl_bif_port.c
index ff03151619..106f18b747 100644
--- a/erts/emulator/beam/erl_bif_port.c
+++ b/erts/emulator/beam/erl_bif_port.c
@@ -273,10 +273,8 @@ BIF_RETTYPE erts_internal_port_call_3(BIF_ALIST_3)
state = erts_smp_atomic32_read_acqb(&BIF_P->state);
if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) {
-#ifdef ERTS_SMP
if (state & ERTS_PSFLG_PENDING_EXIT)
erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN);
-#endif
ERTS_BIF_EXITED(BIF_P);
}
@@ -323,10 +321,8 @@ BIF_RETTYPE erts_internal_port_control_3(BIF_ALIST_3)
state = erts_smp_atomic32_read_acqb(&BIF_P->state);
if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) {
-#ifdef ERTS_SMP
if (state & ERTS_PSFLG_PENDING_EXIT)
erts_handle_pending_exit(BIF_P, ERTS_PROC_LOCK_MAIN);
-#endif
ERTS_BIF_EXITED(BIF_P);
}
@@ -511,7 +507,6 @@ cleanup_old_port_data(erts_aint_t data)
ASSERT(is_immed((Eterm) data));
}
else {
-#ifdef ERTS_SMP
ErtsPortDataHeap *pdhp = (ErtsPortDataHeap *) data;
size_t size;
ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER;
@@ -520,9 +515,6 @@ cleanup_old_port_data(erts_aint_t data)
(void *) pdhp,
&pdhp->later_op,
size);
-#else
- free_port_data_heap((void *) data);
-#endif
}
}
diff --git a/erts/emulator/beam/erl_bif_re.c b/erts/emulator/beam/erl_bif_re.c
index ad124fd979..bc819505e7 100644
--- a/erts/emulator/beam/erl_bif_re.c
+++ b/erts/emulator/beam/erl_bif_re.c
@@ -66,11 +66,7 @@ static void erts_erts_pcre_stack_free(void *ptr) {
#define ERTS_PCRE_STACK_MARGIN (10*1024)
-#ifdef ERTS_SMP
# define ERTS_STACK_LIMIT ((char *) ethr_get_stacklimit())
-#else
-# define ERTS_STACK_LIMIT ((char *) erts_scheduler_stack_limit)
-#endif
static int
stack_guard_downwards(void)
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index 45159c4392..c476e18c33 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -60,10 +60,8 @@ static struct { /* Protected by code write permission */
int local;
BpFunctions f; /* Local functions */
BpFunctions e; /* Export entries */
-#ifdef ERTS_SMP
Process* stager;
ErtsThrPrgrLaterOp lop;
-#endif
} finish_bp;
static Eterm
@@ -71,9 +69,7 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist);
static int
erts_set_tracing_event_pattern(Eterm event, Binary*, int on);
-#ifdef ERTS_SMP
static void smp_bp_finisher(void* arg);
-#endif
static BIF_RETTYPE
system_monitor(Process *p, Eterm monitor_pid, Eterm list);
@@ -345,7 +341,6 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
ERTS_TRACER_CLEAR(&meta_tracer);
-#ifdef ERTS_SMP
if (finish_bp.current >= 0) {
ASSERT(matches >= 0);
ASSERT(finish_bp.stager == NULL);
@@ -355,7 +350,6 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
erts_suspend(p, ERTS_PROC_LOCK_MAIN, NULL);
ERTS_BIF_YIELD_RETURN(p, make_small(matches));
}
-#endif
erts_release_code_write_permission();
@@ -367,7 +361,6 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
}
}
-#ifdef ERTS_SMP
static void smp_bp_finisher(void* null)
{
if (erts_finish_breakpointing()) { /* Not done */
@@ -388,7 +381,6 @@ static void smp_bp_finisher(void* null)
erts_proc_dec_refc(p);
}
}
-#endif /* ERTS_SMP */
void
erts_get_default_trace_pattern(int *trace_pattern_is_on,
@@ -543,9 +535,7 @@ Eterm erts_internal_trace_3(BIF_ALIST_3)
int matches = 0;
Uint mask = 0;
int cpu_ts = 0;
-#ifdef ERTS_SMP
int system_blocked = 0;
-#endif
if (! erts_trace_flags(list, &mask, &tracer, &cpu_ts)) {
BIF_ERROR(p, BADARG);
@@ -699,11 +689,9 @@ Eterm erts_internal_trace_3(BIF_ALIST_3)
mods = 1;
}
-#ifdef ERTS_SMP
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
erts_smp_thr_progress_block();
system_blocked = 1;
-#endif
ok = 1;
if (procs || mods) {
@@ -766,12 +754,10 @@ Eterm erts_internal_trace_3(BIF_ALIST_3)
goto error;
}
-#ifdef ERTS_SMP
if (system_blocked) {
erts_smp_thr_progress_unblock();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
}
-#endif
erts_release_code_write_permission();
ERTS_TRACER_CLEAR(&tracer);
@@ -785,12 +771,10 @@ Eterm erts_internal_trace_3(BIF_ALIST_3)
ERTS_TRACER_CLEAR(&tracer);
-#ifdef ERTS_SMP
if (system_blocked) {
erts_smp_thr_progress_unblock();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
}
-#endif
erts_release_code_write_permission();
BIF_ERROR(p, BADARG);
@@ -1055,12 +1039,10 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
mfa[1] = tp[2];
mfa[2] = signed_val(tp[3]);
-#ifdef ERTS_SMP
if ( (key == am_call_time) || (key == am_all)) {
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
erts_smp_thr_progress_block();
}
-#endif
#ifdef ERTS_DIRTY_SCHEDULERS
erts_smp_mtx_lock(&erts_dirty_bp_ix_mtx);
#endif
@@ -1071,12 +1053,10 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
#ifdef ERTS_DIRTY_SCHEDULERS
erts_smp_mtx_unlock(&erts_dirty_bp_ix_mtx);
#endif
-#ifdef ERTS_SMP
if ( (key == am_call_time) || (key == am_all)) {
erts_smp_thr_progress_unblock();
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
}
-#endif
switch (r) {
case FUNC_TRACE_NOEXIST:
@@ -1526,17 +1506,13 @@ erts_set_trace_pattern(Process*p, ErtsCodeMFA *mfa, int specified,
finish_bp.install = on;
finish_bp.local = flags.breakpoint;
-#ifdef ERTS_SMP
if (is_blocking) {
ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
-#endif
while (erts_finish_breakpointing()) {
/* Empty loop body */
}
-#ifdef ERTS_SMP
finish_bp.current = -1;
}
-#endif
if (flags.breakpoint) {
matches += finish_bp.f.matched;
@@ -1571,11 +1547,6 @@ erts_set_tracing_event_pattern(Eterm event, Binary* match_spec, int on)
finish_bp.f.matched = 0;
finish_bp.f.matching = NULL;
-#ifndef ERTS_SMP
- while (erts_finish_breakpointing()) {
- /* Empty loop body */
- }
-#endif
return 1;
}
@@ -2015,24 +1986,20 @@ BIF_RETTYPE seq_trace_print_2(BIF_ALIST_2)
}
void erts_system_monitor_clear(Process *c_p) {
-#ifdef ERTS_SMP
if (c_p) {
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
erts_smp_thr_progress_block();
}
-#endif
erts_set_system_monitor(NIL);
erts_system_monitor_long_gc = 0;
erts_system_monitor_long_schedule = 0;
erts_system_monitor_large_heap = 0;
erts_system_monitor_flags.busy_port = 0;
erts_system_monitor_flags.busy_dist_port = 0;
-#ifdef ERTS_SMP
if (c_p) {
erts_smp_thr_progress_unblock();
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
-#endif
}
@@ -2200,23 +2167,19 @@ system_monitor(Process *p, Eterm monitor_pid, Eterm list)
/* Begin: Trace for System Profiling */
void erts_system_profile_clear(Process *c_p) {
-#ifdef ERTS_SMP
if (c_p) {
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
erts_smp_thr_progress_block();
}
-#endif
erts_set_system_profile(NIL);
erts_system_profile_flags.scheduler = 0;
erts_system_profile_flags.runnable_procs = 0;
erts_system_profile_flags.runnable_ports = 0;
erts_system_profile_flags.exclusive = 0;
-#ifdef ERTS_SMP
if (c_p) {
erts_smp_thr_progress_unblock();
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
-#endif
}
static Eterm system_profile_get(Process *p) {
@@ -2378,26 +2341,15 @@ reply_trace_delivered_all(void *vtdarp)
Process *rp = tdarp->proc;
Eterm *hp = NULL;
ErlOffHeap *ohp;
-#ifdef ERTS_SMP
ErlHeapFragment *bp;
bp = new_message_buffer(4 + NC_HEAP_SIZE(tdarp->ref));
hp = &bp->mem[0];
ohp = &bp->off_heap;
-#else
- ErtsProcLocks rp_locks = 0;
- ErtsMessage *mp;
- mp = erts_alloc_message_heap(
- rp, &rp_locks, 4 + NC_HEAP_SIZE(tdarp->ref), &hp, &ohp);
-#endif
ref_copy = STORE_NC(&hp, ohp, tdarp->ref);
msg = TUPLE3(hp, am_trace_delivered, tdarp->target, ref_copy);
-#ifdef ERTS_SMP
erts_send_sys_msg_proc(rp->common.id, rp->common.id, msg, bp);
-#else
- erts_queue_message(rp, rp_locks, mp, msg, am_system);
-#endif
erts_free(ERTS_ALC_T_MISC_AUX_WORK, vtdarp);
erts_proc_dec_refc(rp);
diff --git a/erts/emulator/beam/erl_bits.c b/erts/emulator/beam/erl_bits.c
index 71c64997c1..e52b5b4bc5 100644
--- a/erts/emulator/beam/erl_bits.c
+++ b/erts/emulator/beam/erl_bits.c
@@ -64,15 +64,7 @@
static byte get_bit(byte b, size_t a_offs);
-#if defined(ERTS_SMP)
/* the state resides in the current process' scheduler data */
-#elif defined(ERL_BITS_REENTRANT)
-/* reentrant API but with a hidden single global state, for testing only */
-struct erl_bits_state ErlBitsState_;
-#else
-/* non-reentrant API with a single global state */
-struct erl_bits_state ErlBitsState;
-#endif
#define byte_buf (ErlBitsState.byte_buf_)
#define byte_buf_len (ErlBitsState.byte_buf_len_)
@@ -85,9 +77,6 @@ erts_bits_bufs_size(void)
return (Uint) erts_smp_atomic_read_nob(&bits_bufs_size);
}
-#if !defined(ERTS_SMP)
-static
-#endif
void
erts_bits_init_state(ERL_BITS_PROTO_0)
{
@@ -97,13 +86,11 @@ erts_bits_init_state(ERL_BITS_PROTO_0)
erts_bin_offset = 0;
}
-#if defined(ERTS_SMP)
void
erts_bits_destroy_state(ERL_BITS_PROTO_0)
{
erts_free(ERTS_ALC_T_BITS_BUF, byte_buf);
}
-#endif
void
erts_init_bits(void)
@@ -114,12 +101,7 @@ erts_init_bits(void)
== offsetof(Binary,orig_bytes));
erts_smp_atomic_init_nob(&bits_bufs_size, 0);
-#if defined(ERTS_SMP)
/* erl_process.c calls erts_bits_init_state() on all state instances */
-#else
- ERL_BITS_DECLARE_STATEP;
- erts_bits_init_state(ERL_BITS_ARGS_0);
-#endif
}
/*****************************************************************
diff --git a/erts/emulator/beam/erl_bits.h b/erts/emulator/beam/erl_bits.h
index 5da2b28a89..1445a38772 100644
--- a/erts/emulator/beam/erl_bits.h
+++ b/erts/emulator/beam/erl_bits.h
@@ -84,31 +84,17 @@ typedef struct erl_bin_match_struct{
#define ms_matchbuffer(_Ms) &(((ErlBinMatchState*) boxed_val(_Ms))->mb)
-#if defined(ERTS_SMP)
#define ERL_BITS_REENTRANT
-#else
-/* uncomment to test the reentrant API in the non-SMP runtime system */
-/* #define ERL_BITS_REENTRANT */
-#endif
-#ifdef ERL_BITS_REENTRANT
/*
* Reentrant API with the state passed as a parameter.
* (Except when the current Process* already is a parameter.)
*/
-#ifdef ERTS_SMP
/* the state resides in the current process' scheduler data */
#define ERL_BITS_DECLARE_STATEP struct erl_bits_state *EBS
#define ERL_BITS_RELOAD_STATEP(P) do{EBS = &erts_proc_sched_data((P))->erl_bits_state;}while(0)
#define ERL_BITS_DEFINE_STATEP(P) struct erl_bits_state *EBS = &erts_proc_sched_data((P))->erl_bits_state
-#else
-/* reentrant API but with a hidden single global state, for testing only */
-extern struct erl_bits_state ErlBitsState_;
-#define ERL_BITS_DECLARE_STATEP struct erl_bits_state *EBS = &ErlBitsState_
-#define ERL_BITS_RELOAD_STATEP(P) do{}while(0)
-#define ERL_BITS_DEFINE_STATEP(P) ERL_BITS_DECLARE_STATEP
-#endif
#define ErlBitsState (*EBS)
#define ERL_BITS_PROTO_0 struct erl_bits_state *EBS
@@ -120,26 +106,6 @@ extern struct erl_bits_state ErlBitsState_;
#define ERL_BITS_ARGS_2(ARG1,ARG2) EBS, ARG1, ARG2
#define ERL_BITS_ARGS_3(ARG1,ARG2,ARG3) EBS, ARG1, ARG2, ARG3
-#else /* ERL_BITS_REENTRANT */
-
-/*
- * Non-reentrant API with a single global state.
- */
-extern struct erl_bits_state ErlBitsState;
-#define ERL_BITS_DECLARE_STATEP /*empty*/
-#define ERL_BITS_RELOAD_STATEP(P) do{}while(0)
-#define ERL_BITS_DEFINE_STATEP(P) /*empty*/
-
-#define ERL_BITS_PROTO_0 void
-#define ERL_BITS_PROTO_1(PARM1) PARM1
-#define ERL_BITS_PROTO_2(PARM1,PARM2) PARM1, PARM2
-#define ERL_BITS_PROTO_3(PARM1,PARM2,PARM3) PARM1, PARM2, PARM3
-#define ERL_BITS_ARGS_0 /*empty*/
-#define ERL_BITS_ARGS_1(ARG1) ARG1
-#define ERL_BITS_ARGS_2(ARG1,ARG2) ARG1, ARG2
-#define ERL_BITS_ARGS_3(ARG1,ARG2,ARG3) ARG1, ARG2, ARG3
-
-#endif /* ERL_BITS_REENTRANT */
#define erts_bin_offset (ErlBitsState.erts_bin_offset_)
#define erts_current_bin (ErlBitsState.erts_current_bin_)
@@ -158,10 +124,8 @@ extern struct erl_bits_state ErlBitsState;
} while (0)
void erts_init_bits(void); /* Initialization once. */
-#ifdef ERTS_SMP
void erts_bits_init_state(ERL_BITS_PROTO_0);
void erts_bits_destroy_state(ERL_BITS_PROTO_0);
-#endif
/*
diff --git a/erts/emulator/beam/erl_cpu_topology.c b/erts/emulator/beam/erl_cpu_topology.c
index f8b2fa744f..48b02c839e 100644
--- a/erts/emulator/beam/erl_cpu_topology.c
+++ b/erts/emulator/beam/erl_cpu_topology.c
@@ -131,13 +131,11 @@ static erts_cpu_groups_map_t *reader_groups_map;
#define ERTS_MAX_CPU_TOPOLOGY_ID ((int) 0xffff)
-#ifdef ERTS_SMP
static void cpu_bind_order_sort(erts_cpu_topology_t *cpudata,
int size,
ErtsCpuBindOrder bind_order,
int mk_seq);
static void write_schedulers_bind_change(erts_cpu_topology_t *cpudata, int size);
-#endif
static void reader_groups_callback(int, ErtsSchedulerData *, int, void *);
static erts_cpu_groups_map_t *add_cpu_groups(int groups,
@@ -434,7 +432,6 @@ processor_order_cmp(const void *vx, const void *vy)
return 0;
}
-#ifdef ERTS_SMP
void
erts_sched_check_cpu_bind_prep_suspend(ErtsSchedulerData *esdp)
{
@@ -490,7 +487,6 @@ erts_sched_check_cpu_bind_post_suspend(ErtsSchedulerData *esdp)
(void) ERTS_RUNQ_FLGS_SET(esdp->run_queue, ERTS_RUNQ_FLG_CHK_CPU_BIND);
}
-#endif
void
erts_sched_check_cpu_bind(ErtsSchedulerData *esdp)
@@ -556,7 +552,6 @@ erts_sched_check_cpu_bind(ErtsSchedulerData *esdp)
erts_smp_runq_lock(esdp->run_queue);
}
-#ifdef ERTS_SMP
void
erts_sched_init_check_cpu_bind(ErtsSchedulerData *esdp)
{
@@ -594,7 +589,6 @@ erts_sched_init_check_cpu_bind(ErtsSchedulerData *esdp)
if (esdp->no <= max_main_threads)
erts_thr_set_main_status(1, (int) esdp->no);
}
-#endif
static void
write_schedulers_bind_change(erts_cpu_topology_t *cpudata, int size)
diff --git a/erts/emulator/beam/erl_cpu_topology.h b/erts/emulator/beam/erl_cpu_topology.h
index c922214702..88bcad79ab 100644
--- a/erts/emulator/beam/erl_cpu_topology.h
+++ b/erts/emulator/beam/erl_cpu_topology.h
@@ -60,11 +60,9 @@ int erts_init_scheduler_bind_type_string(char *how);
int erts_init_cpu_topology_string(char *topology_str);
void erts_sched_check_cpu_bind(ErtsSchedulerData *esdp);
-#ifdef ERTS_SMP
void erts_sched_init_check_cpu_bind(ErtsSchedulerData *esdp);
void erts_sched_check_cpu_bind_prep_suspend(ErtsSchedulerData *esdp);
void erts_sched_check_cpu_bind_post_suspend(ErtsSchedulerData *esdp);
-#endif
int erts_update_cpu_info(void);
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index b83134c79c..2e5d401ca4 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -61,15 +61,9 @@ enum DbIterSafety {
ITER_SAFE_LOCKED, /* Safe while table is locked, not between trap calls */
ITER_SAFE /* No need to fixate at all */
};
-#ifdef ERTS_SMP
# define ITERATION_SAFETY(Proc,Tab) \
((IS_TREE_TABLE((Tab)->common.status) || ONLY_WRITER(Proc,Tab)) ? ITER_SAFE \
: (((Tab)->common.status & DB_FINE_LOCKED) ? ITER_UNSAFE : ITER_SAFE_LOCKED))
-#else
-# define ITERATION_SAFETY(Proc,Tab) \
- ((IS_TREE_TABLE((Tab)->common.status) || ONLY_WRITER(Proc,Tab)) \
- ? ITER_SAFE : ITER_SAFE_LOCKED)
-#endif
#define DID_TRAP(P,Ret) (!is_value(Ret) && ((P)->freason == TRAP))
@@ -264,11 +258,7 @@ is_table_alive(DbTable *tb)
static ERTS_INLINE int
is_table_named(DbTable *tb)
{
-#ifdef ERTS_SMP
return tb->common.type & DB_NAMED_TABLE;
-#else
- return tb->common.status & DB_NAMED_TABLE;
-#endif
}
@@ -297,13 +287,11 @@ make_tid(Process *c_p, DbTable *tb)
/*
** The meta hash table of all NAMED ets tables
*/
-#ifdef ERTS_SMP
# define META_NAME_TAB_LOCK_CNT 16
union {
erts_smp_rwmtx_t lck;
byte _cache_line_alignment[64];
}meta_name_tab_rwlocks[META_NAME_TAB_LOCK_CNT];
-#endif
static struct meta_name_tab_entry {
union {
Eterm name_atom;
@@ -323,9 +311,7 @@ struct meta_name_tab_entry* meta_name_tab_bucket(Eterm name,
{
unsigned bix = atom_val(name) & meta_name_tab_mask;
struct meta_name_tab_entry* bucket = &meta_name_tab[bix];
-#ifdef ERTS_SMP
*lockp = &meta_name_tab_rwlocks[bix % META_NAME_TAB_LOCK_CNT].lck;
-#endif
return bucket;
}
@@ -396,10 +382,8 @@ free_dbtable(void *vtb)
tb->common.fixations);
}
#endif
-#ifdef ERTS_SMP
erts_smp_rwmtx_destroy(&tb->common.rwlock);
erts_smp_mtx_destroy(&tb->common.fixlock);
-#endif
ASSERT(is_immed(tb->common.heir_data));
if (tb->common.btid)
@@ -577,25 +561,20 @@ delete_owned_table(Process *p, DbTable *tb)
static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock)
{
-#ifdef ERTS_SMP
erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
if (use_frequent_read_lock)
rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
if (erts_ets_rwmtx_spin_count >= 0)
rwmtx_opt.main_spincount = erts_ets_rwmtx_spin_count;
-#endif
-#ifdef ERTS_SMP
erts_smp_rwmtx_init_opt(&tb->common.rwlock, &rwmtx_opt, "db_tab",
tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
erts_smp_mtx_init(&tb->common.fixlock, "db_tab_fix",
tb->common.the_name, ERTS_LOCK_FLAGS_CATEGORY_DB);
tb->common.is_thread_safe = !(tb->common.status & DB_FINE_LOCKED);
-#endif
}
static ERTS_INLINE void db_lock(DbTable* tb, db_lock_kind_t kind)
{
-#ifdef ERTS_SMP
if (tb->common.type & DB_FINE_LOCKED) {
if (kind == LCK_WRITE) {
erts_smp_rwmtx_rwlock(&tb->common.rwlock);
@@ -617,7 +596,6 @@ static ERTS_INLINE void db_lock(DbTable* tb, db_lock_kind_t kind)
}
ASSERT(tb->common.is_thread_safe);
}
-#endif
}
static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind)
@@ -627,7 +605,6 @@ static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind)
* DbTable structure. That is, ONLY the SMP case is allowed
* to follow the tb pointer!
*/
-#ifdef ERTS_SMP
if (tb->common.type & DB_FINE_LOCKED) {
if (kind == LCK_WRITE) {
ASSERT(tb->common.is_thread_safe);
@@ -650,7 +627,6 @@ static ERTS_INLINE void db_unlock(DbTable* tb, db_lock_kind_t kind)
erts_smp_rwmtx_runlock(&tb->common.rwlock);
}
}
-#endif
}
static ERTS_INLINE
@@ -790,13 +766,11 @@ static int remove_named_tab(DbTable *tb, int have_lock)
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom,
&rwlock);
ASSERT(is_table_named(tb));
-#ifdef ERTS_SMP
if (!have_lock && erts_smp_rwmtx_tryrwlock(rwlock) == EBUSY) {
db_unlock(tb, LCK_WRITE);
erts_smp_rwmtx_rwlock(rwlock);
db_lock(tb, LCK_WRITE);
}
-#endif
ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(rwlock));
@@ -1598,9 +1572,7 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
Uint32 status;
Sint keypos;
int is_named, is_compressed;
-#ifdef ERTS_SMP
int is_fine_locked, frequent_read;
-#endif
#ifdef DEBUG
int cret;
#endif
@@ -1616,10 +1588,8 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
status = DB_SET | DB_PROTECTED;
keypos = 1;
is_named = 0;
-#ifdef ERTS_SMP
is_fine_locked = 0;
frequent_read = 0;
-#endif
heir = am_none;
heir_data = (UWord) am_undefined;
is_compressed = erts_ets_always_compress;
@@ -1647,30 +1617,18 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
keypos = signed_val(tp[2]);
}
else if (tp[1] == am_write_concurrency) {
-#ifdef ERTS_SMP
if (tp[2] == am_true) {
is_fine_locked = 1;
} else if (tp[2] == am_false) {
is_fine_locked = 0;
} else break;
-#else
- if ((tp[2] != am_true) && (tp[2] != am_false)) {
- break;
- }
-#endif
}
else if (tp[1] == am_read_concurrency) {
-#ifdef ERTS_SMP
if (tp[2] == am_true) {
frequent_read = 1;
} else if (tp[2] == am_false) {
frequent_read = 0;
} else break;
-#else
- if ((tp[2] != am_true) && (tp[2] != am_false)) {
- break;
- }
-#endif
}
else if (tp[1] == am_heir && tp[2] == am_none) {
@@ -1712,11 +1670,9 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
}
if (IS_HASH_TABLE(status)) {
meth = &db_hash;
-#ifdef ERTS_SMP
if (is_fine_locked && !(status & DB_PRIVATE)) {
status |= DB_FINE_LOCKED;
}
-#endif
}
else if (IS_TREE_TABLE(status)) {
meth = &db_tree;
@@ -1725,10 +1681,8 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, BADARG);
}
-#ifdef ERTS_SMP
if (frequent_read && !(status & DB_PRIVATE))
status |= DB_FREQ_READ;
-#endif
/* we create table outside any table lock
* and take the unusal cost of destroy table if it
@@ -1747,10 +1701,8 @@ BIF_RETTYPE ets_new_2(BIF_ALIST_2)
tb->common.meth = meth;
tb->common.the_name = BIF_ARG_1;
tb->common.status = status;
-#ifdef ERTS_SMP
tb->common.type = status & ERTS_ETS_TABLE_TYPES;
/* Note, 'type' is *read only* from now on... */
-#endif
erts_smp_refc_init(&tb->common.fix_count, 0);
db_init_lock(tb, status & (DB_FINE_LOCKED|DB_FREQ_READ));
tb->common.keypos = keypos;
@@ -2522,13 +2474,11 @@ BIF_RETTYPE ets_internal_request_all_0(BIF_ALIST_0)
req->proc = BIF_P;
erts_proc_add_refc(BIF_P, (Sint) erts_no_schedulers);
-#ifdef ERTS_SMP
if (erts_no_schedulers > 1)
erts_schedule_multi_misc_aux_work(1,
erts_no_schedulers,
handle_ets_all_request,
(void *) req);
-#endif
handle_ets_all_request((void *) req);
BIF_RET(ref);
@@ -3345,7 +3295,6 @@ void init_db(ErtsDbSpinCount db_spin_count)
unsigned bits;
size_t size;
-#ifdef ERTS_SMP
int max_spin_count = (1 << 15) - 1; /* internal limit */
erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
@@ -3393,7 +3342,6 @@ void init_db(ErtsDbSpinCount db_spin_count)
"meta_name_tab", make_small(i),
ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DB);
}
-#endif
erts_smp_atomic_init_nob(&erts_ets_misc_mem_size, 0);
db_initialize_util();
@@ -3584,9 +3532,7 @@ static SWord proc_cleanup_fixed_table(Process* p, DbFixation* fix)
db_lock(tb, LCK_WRITE_REC);
if (!(tb->common.status & DB_DELETE)) {
erts_aint_t diff;
- #ifdef ERTS_SMP
erts_smp_mtx_lock(&tb->common.fixlock);
- #endif
ASSERT(fixing_procs_rbt_lookup(tb->common.fixing_procs, p));
@@ -3596,9 +3542,7 @@ static SWord proc_cleanup_fixed_table(Process* p, DbFixation* fix)
fixing_procs_rbt_delete(&tb->common.fixing_procs, fix);
- #ifdef ERTS_SMP
erts_smp_mtx_unlock(&tb->common.fixlock);
- #endif
if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)) {
work += db_unfix_table_hash(&(tb->hash));
}
@@ -3754,9 +3698,7 @@ static void fix_table_locked(Process* p, DbTable* tb)
{
DbFixation *fix;
-#ifdef ERTS_SMP
erts_smp_mtx_lock(&tb->common.fixlock);
-#endif
erts_smp_refc_inc(&tb->common.fix_count,1);
fix = tb->common.fixing_procs;
if (fix == NULL) {
@@ -3770,9 +3712,7 @@ static void fix_table_locked(Process* p, DbTable* tb)
ASSERT(fixed_tabs_find(NULL, fix));
++(fix->counter);
-#ifdef ERTS_SMP
erts_smp_mtx_unlock(&tb->common.fixlock);
-#endif
return;
}
}
@@ -3785,9 +3725,7 @@ static void fix_table_locked(Process* p, DbTable* tb)
fix->counter = 1;
fixing_procs_rbt_insert(&tb->common.fixing_procs, fix);
-#ifdef ERTS_SMP
erts_smp_mtx_unlock(&tb->common.fixlock);
-#endif
p->flags |= F_USING_DB;
fixed_tabs_insert(p, fix);
@@ -3800,9 +3738,7 @@ static void unfix_table_locked(Process* p, DbTable* tb,
{
DbFixation* fix;
-#ifdef ERTS_SMP
erts_smp_mtx_lock(&tb->common.fixlock);
-#endif
fix = fixing_procs_rbt_lookup(tb->common.fixing_procs, p);
if (fix) {
@@ -3811,9 +3747,7 @@ static void unfix_table_locked(Process* p, DbTable* tb,
ASSERT(fix->counter >= 0);
if (fix->counter == 0) {
fixing_procs_rbt_delete(&tb->common.fixing_procs, fix);
-#ifdef ERTS_SMP
erts_smp_mtx_unlock(&tb->common.fixlock);
-#endif
fixed_tabs_delete(p, fix);
erts_refc_dec(&fix->tabs.btid->intern.refc, 1);
@@ -3824,14 +3758,11 @@ static void unfix_table_locked(Process* p, DbTable* tb,
goto unlocked;
}
}
-#ifdef ERTS_SMP
erts_smp_mtx_unlock(&tb->common.fixlock);
-#endif
unlocked:
if (!IS_FIXED(tb) && IS_HASH_TABLE(tb->common.status)
&& erts_smp_atomic_read_nob(&tb->hash.fixdel) != (erts_aint_t)NULL) {
-#ifdef ERTS_SMP
if (*kind_p == LCK_READ && tb->common.is_thread_safe) {
/* Must have write lock while purging pseudo-deleted (OTP-8166) */
erts_smp_rwmtx_runlock(&tb->common.rwlock);
@@ -3839,7 +3770,6 @@ unlocked:
*kind_p = LCK_WRITE;
if (tb->common.status & DB_DELETE) return;
}
-#endif
db_unfix_table_hash(&(tb->hash));
}
}
@@ -3866,7 +3796,6 @@ static void free_fixations_op(DbFixation* fix, void* vctx)
diff = -((erts_aint_t) fix->counter);
erts_smp_refc_add(&ctx->tb->common.fix_count, diff, 0);
-#ifdef ERTS_SMP
if (fix->procs.p != ctx->p) { /* Fixated by other process */
fix->counter = 0;
@@ -3882,7 +3811,6 @@ static void free_fixations_op(DbFixation* fix, void* vctx)
*/
}
else
-#endif
{
fixed_tabs_delete(fix->procs.p, fix);
@@ -3895,7 +3823,6 @@ static void free_fixations_op(DbFixation* fix, void* vctx)
ctx->cnt++;
}
-#ifdef ERTS_SMP
int erts_db_execute_free_fixation(Process* p, DbFixation* fix)
{
ASSERT(fix->counter == 0);
@@ -3907,7 +3834,6 @@ int erts_db_execute_free_fixation(Process* p, DbFixation* fix)
ERTS_ETS_MISC_MEM_ADD(-sizeof(DbFixation));
return 1;
}
-#endif
static SWord free_fixations_locked(Process* p, DbTable *tb)
{
@@ -4115,9 +4041,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
= ERTS_IS_ATOM_STR("safe_fixed_monotonic_time",
What))
|| ERTS_IS_ATOM_STR("safe_fixed", What)) {
-#ifdef ERTS_SMP
erts_smp_mtx_lock(&tb->common.fixlock);
-#endif
if (IS_FIXED(tb)) {
Uint need;
Eterm *hp;
@@ -4159,9 +4083,7 @@ static Eterm table_info(Process* p, DbTable* tb, Eterm What)
} else {
ret = am_false;
}
-#ifdef ERTS_SMP
erts_smp_mtx_unlock(&tb->common.fixlock);
-#endif
} else if (What == am_atom_put("stats",5)) {
if (IS_HASH_TABLE(tb->common.status)) {
FloatDef f;
diff --git a/erts/emulator/beam/erl_db_hash.c b/erts/emulator/beam/erl_db_hash.c
index ae9322dfd3..2945afe275 100644
--- a/erts/emulator/beam/erl_db_hash.c
+++ b/erts/emulator/beam/erl_db_hash.c
@@ -109,11 +109,7 @@
#define NSEG_2 256 /* Size of second segment table */
#define NSEG_INC 128 /* Number of segments to grow after that */
-#ifdef ERTS_SMP
# define DB_USING_FINE_LOCKING(TB) (((TB))->common.type & DB_FINE_LOCKED)
-#else
-# define DB_USING_FINE_LOCKING(TB) 0
-#endif
#ifdef ETHR_ORDERED_READ_DEPEND
#define SEGTAB(tb) ((struct segment**) erts_smp_atomic_read_nob(&(tb)->segtab))
@@ -191,7 +187,6 @@ static ERTS_INLINE int add_fixed_deletion(DbTableHash* tb, int ix,
((is_atom(term) ? (atom_tab(atom_val(term))->slot.bucket.hvalue) : \
make_internal_hash(term, 0)) % MAX_HASH)
-#ifdef ERTS_SMP
# define DB_HASH_LOCK_MASK (DB_HASH_LOCK_CNT-1)
# define GET_LOCK(tb,hval) (&(tb)->locks->lck_vec[(hval) & DB_HASH_LOCK_MASK].lck)
# define GET_LOCK_MAYBE(tb,hval) ((tb)->common.is_thread_safe ? NULL : GET_LOCK(tb,hval))
@@ -234,12 +229,6 @@ static ERTS_INLINE void WUNLOCK_HASH(erts_smp_rwmtx_t* lck)
erts_smp_rwmtx_rwunlock(lck);
}
}
-#else /* ERTS_SMP */
-# define RLOCK_HASH(tb,hval) NULL
-# define WLOCK_HASH(tb,hval) NULL
-# define RUNLOCK_HASH(lck) ((void)lck)
-# define WUNLOCK_HASH(lck) ((void)lck)
-#endif /* ERTS_SMP */
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -261,31 +250,23 @@ static ERTS_INLINE void WUNLOCK_HASH(erts_smp_rwmtx_t* lck)
static ERTS_INLINE Sint next_slot(DbTableHash* tb, Uint ix,
erts_smp_rwmtx_t** lck_ptr)
{
-#ifdef ERTS_SMP
ix += DB_HASH_LOCK_CNT;
if (ix < NACTIVE(tb)) return ix;
RUNLOCK_HASH(*lck_ptr);
ix = (ix + 1) & DB_HASH_LOCK_MASK;
if (ix != 0) *lck_ptr = RLOCK_HASH(tb,ix);
return ix;
-#else
- return (++ix < NACTIVE(tb)) ? ix : 0;
-#endif
}
/* Same as next_slot but with WRITE locking */
static ERTS_INLINE Sint next_slot_w(DbTableHash* tb, Uint ix,
erts_smp_rwmtx_t** lck_ptr)
{
-#ifdef ERTS_SMP
ix += DB_HASH_LOCK_CNT;
if (ix < NACTIVE(tb)) return ix;
WUNLOCK_HASH(*lck_ptr);
ix = (ix + 1) & DB_HASH_LOCK_MASK;
if (ix != 0) *lck_ptr = WLOCK_HASH(tb,ix);
return ix;
-#else
- return next_slot(tb,ix,lck_ptr);
-#endif
}
#ifndef MIN
@@ -334,9 +315,7 @@ struct segment {
/* An extended segment table */
struct ext_segtab {
-#ifdef ERTS_SMP
ErtsThrPrgrLaterOp lop;
-#endif
struct segment** prev_segtab; /* Used when table is shrinking */
int prev_nsegs; /* Size of prev_segtab */
int nsegs; /* Size of this segtab */
@@ -662,7 +641,6 @@ int db_create_hash(Process *p, DbTable *tbl)
SIZEOF_SEGMENT(FIRST_SEGSZ));
sys_memset(tb->first_segtab[0], 0, SIZEOF_SEGMENT(FIRST_SEGSZ));
-#ifdef ERTS_SMP
erts_smp_atomic_init_nob(&tb->is_resizing, 0);
if (tb->common.type & DB_FINE_LOCKED) {
erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
@@ -687,7 +665,6 @@ int db_create_hash(Process *p, DbTable *tbl)
tb->locks = NULL;
}
ERTS_THR_MEMORY_BARRIER;
-#endif /* ERST_SMP */
return DB_ERROR_NONE;
}
@@ -1232,15 +1209,10 @@ static int match_traverse(Process* p, DbTableHash* tb,
Sint got = 0; /* Matched terms counter */
erts_smp_rwmtx_t* lck; /* Slot lock */
int ret_value;
-#ifdef ERTS_SMP
erts_smp_rwmtx_t* (*lock_hash_function)(DbTableHash*, HashValue)
= (lock_for_write ? WLOCK_HASH : RLOCK_HASH);
void (*unlock_hash_function)(erts_smp_rwmtx_t*)
= (lock_for_write ? WUNLOCK_HASH : RUNLOCK_HASH);
-#else
- #define lock_hash_function(tb, hval) NULL
- #define unlock_hash_function(lck) ((void)lck)
-#endif
Sint (*next_slot_function)(DbTableHash*, Uint, erts_smp_rwmtx_t**)
= (lock_for_write ? next_slot_w : next_slot);
@@ -1359,10 +1331,6 @@ done:
}
return ret_value;
-#ifndef SMP
-#undef lock_hash_function
-#undef unlock_hash_function
-#endif
}
/*
@@ -1391,15 +1359,10 @@ static int match_traverse_continue(Process* p, DbTableHash* tb,
Eterm match_res;
erts_smp_rwmtx_t* lck;
int ret_value;
-#ifdef ERTS_SMP
erts_smp_rwmtx_t* (*lock_hash_function)(DbTableHash*, HashValue)
= (lock_for_write ? WLOCK_HASH : RLOCK_HASH);
void (*unlock_hash_function)(erts_smp_rwmtx_t*)
= (lock_for_write ? WUNLOCK_HASH : RUNLOCK_HASH);
-#else
- #define lock_hash_function(tb, hval) NULL
- #define unlock_hash_function(lck) ((void)lck)
-#endif
Sint (*next_slot_function)(DbTableHash* tb, Uint ix, erts_smp_rwmtx_t** lck_ptr)
= (lock_for_write ? next_slot_w : next_slot);
@@ -1475,10 +1438,6 @@ done:
*/
return ret_value;
-#ifndef SMP
-#undef lock_hash_function
-#undef unlock_hash_function
-#endif
}
@@ -2051,11 +2010,7 @@ static int db_select_delete_hash(Process *p, DbTable *tbl, Eterm tid, Eterm patt
sd_context.tid = tid;
sd_context.hp = NULL;
sd_context.prev_continuation_tptr = NULL;
-#ifdef ERTS_SMP
sd_context.fixated_by_me = sd_context.tb->common.is_thread_safe ? 0 : 1; /* TODO: something nicer */
-#else
- sd_context.fixated_by_me = 0;
-#endif
sd_context.last_pseudo_delete = (Uint) -1;
return match_traverse(
@@ -2330,7 +2285,6 @@ static void db_print_hash(fmtfn_t to, void *to_arg, int show, DbTable *tbl)
erts_print(to, to_arg, "Buckets: %d\n", NACTIVE(tb));
-#ifdef ERTS_SMP
i = tbl->common.is_thread_safe;
/* If crash dumping we set table to thread safe in order to
avoid taking any locks */
@@ -2340,9 +2294,6 @@ static void db_print_hash(fmtfn_t to, void *to_arg, int show, DbTable *tbl)
db_calc_stats_hash(&tbl->hash, &stats);
tbl->common.is_thread_safe = i;
-#else
- db_calc_stats_hash(&tbl->hash, &stats);
-#endif
erts_print(to, to_arg, "Chain Length Avg: %f\n", stats.avg_chain_len);
erts_print(to, to_arg, "Chain Length Max: %d\n", stats.max_chain_len);
@@ -2423,7 +2374,6 @@ static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds)
return reds; /* Not done */
}
}
-#ifdef ERTS_SMP
if (tb->locks != NULL) {
int i;
for (i=0; i<DB_HASH_LOCK_CNT; ++i) {
@@ -2433,7 +2383,6 @@ static SWord db_free_table_continue_hash(DbTable *tbl, SWord reds)
(void*)tb->locks, sizeof(DbTableHashFineLocks));
tb->locks = NULL;
}
-#endif
ASSERT(erts_smp_atomic_read_nob(&tb->common.memory_size) == sizeof(DbTable));
return reds; /* Done */
}
@@ -2637,14 +2586,12 @@ static void alloc_seg(DbTableHash *tb)
tb->nslots += EXT_SEGSZ;
}
-#ifdef ERTS_SMP
static void dealloc_ext_segtab(void* lop_data)
{
struct ext_segtab* est = (struct ext_segtab*) lop_data;
erts_free(ERTS_ALC_T_DB_SEG, est);
}
-#endif
/* Shrink table by freeing the top segment
** free_records: 1=free any records in segment, 0=assume segment is empty
@@ -2683,7 +2630,6 @@ static int free_seg(DbTableHash *tb, int free_records)
SET_SEGTAB(tb, est->prev_segtab);
tb->nsegs = est->prev_nsegs;
-#ifdef ERTS_SMP
if (!tb->common.is_thread_safe) {
/*
* Table is doing a graceful shrink operation and we must avoid
@@ -2701,7 +2647,6 @@ static int free_seg(DbTableHash *tb, int free_records)
sz);
}
else
-#endif
erts_db_free(ERTS_ALC_T_DB_SEG, (DbTable*)tb, est,
SIZEOF_EXT_SEGTAB(est->nsegs));
}
@@ -2762,22 +2707,18 @@ static Eterm build_term_list(Process* p, HashDbTerm* ptr1, HashDbTerm* ptr2,
static ERTS_INLINE int
begin_resizing(DbTableHash* tb)
{
-#ifdef ERTS_SMP
if (DB_USING_FINE_LOCKING(tb))
return !erts_atomic_xchg_acqb(&tb->is_resizing, 1);
else
ERTS_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(&tb->common.rwlock));
-#endif
return 1;
}
static ERTS_INLINE void
done_resizing(DbTableHash* tb)
{
-#ifdef ERTS_SMP
if (DB_USING_FINE_LOCKING(tb))
erts_atomic_set_relb(&tb->is_resizing, 0);
-#endif
}
/* Grow table with one or more new buckets.
diff --git a/erts/emulator/beam/erl_db_hash.h b/erts/emulator/beam/erl_db_hash.h
index 523ed7860e..319d563859 100644
--- a/erts/emulator/beam/erl_db_hash.h
+++ b/erts/emulator/beam/erl_db_hash.h
@@ -63,10 +63,8 @@ typedef struct db_table_hash {
/* List of slots where elements have been deleted while table was fixed */
erts_smp_atomic_t fixdel; /* (FixedDeletion*) */
-#ifdef ERTS_SMP
erts_smp_atomic_t is_resizing; /* grow/shrink in progress */
DbTableHashFineLocks* locks;
-#endif
} DbTableHash;
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index 13eacaa8a9..7310694e63 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -361,11 +361,7 @@ typedef struct {
} ErtsMatchPseudoProcess;
-#ifdef ERTS_SMP
static erts_smp_tsd_key_t match_pseudo_process_key;
-#else
-static ErtsMatchPseudoProcess *match_pseudo_process;
-#endif
static ERTS_INLINE void
cleanup_match_pseudo_process(ErtsMatchPseudoProcess *mpsp, int keep_heap)
@@ -414,7 +410,6 @@ static ERTS_INLINE ErtsMatchPseudoProcess *
get_match_pseudo_process(Process *c_p, Uint heap_size)
{
ErtsMatchPseudoProcess *mpsp;
-#ifdef ERTS_SMP
ErtsSchedulerData *esdp;
esdp = c_p ? c_p->scheduler_data : erts_get_scheduler_data();
@@ -436,10 +431,6 @@ get_match_pseudo_process(Process *c_p, Uint heap_size)
mpsp->process.scheduler_data = esdp;
erts_smp_tsd_set(match_pseudo_process_key, (void *) mpsp);
}
-#else
- mpsp = match_pseudo_process;
- cleanup_match_pseudo_process(mpsp, 0);
-#endif
if (heap_size > ERTS_DEFAULT_MS_HEAP_SIZE*sizeof(Eterm)) {
mpsp->u.heap = (Eterm*) erts_alloc(ERTS_ALC_T_DB_MS_RUN_HEAP, heap_size);
}
@@ -449,7 +440,6 @@ get_match_pseudo_process(Process *c_p, Uint heap_size)
return mpsp;
}
-#ifdef ERTS_SMP
static void
destroy_match_pseudo_process(void)
{
@@ -461,19 +451,14 @@ destroy_match_pseudo_process(void)
erts_smp_tsd_set(match_pseudo_process_key, (void *) NULL);
}
}
-#endif
static
void
match_pseudo_process_init(void)
{
-#ifdef ERTS_SMP
erts_smp_tsd_key_create(&match_pseudo_process_key,
"erts_match_pseudo_process_key");
erts_smp_install_exit_handler(destroy_match_pseudo_process);
-#else
- match_pseudo_process = create_match_pseudo_process();
-#endif
}
void
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index 19055c6110..fb9db3b010 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -241,12 +241,10 @@ typedef struct db_table_common {
erts_smp_refc_t fix_count;/* fixation counter */
DbTableList all;
DbTableList owned;
-#ifdef ERTS_SMP
erts_smp_rwmtx_t rwlock; /* rw lock on table */
erts_smp_mtx_t fixlock; /* Protects fixing_procs and time */
int is_thread_safe; /* No fine locking inside table needed */
Uint32 type; /* table type, *read only* after creation */
-#endif
Eterm owner; /* Pid of the creator */
Eterm heir; /* Pid of the heir */
UWord heir_data; /* To send in ETS-TRANSFER (is_immed or (DbTerm*) */
diff --git a/erts/emulator/beam/erl_drv_thread.c b/erts/emulator/beam/erl_drv_thread.c
index 49bbab55a8..4238d161ec 100644
--- a/erts/emulator/beam/erl_drv_thread.c
+++ b/erts/emulator/beam/erl_drv_thread.c
@@ -50,7 +50,6 @@ fatal_error(int err, char *func)
#define ERL_DRV_TSD_EXTRA 10
#define ERL_DRV_INVALID_TSD_KEY INT_MAX
-#ifdef USE_THREADS
struct ErlDrvMutex_ {
ethr_mutex mtx;
@@ -85,10 +84,6 @@ struct ErlDrvTid_ {
static ethr_tsd_key tid_key;
-#else /* USE_THREADS */
-static Uint tsd_len;
-static void **tsd;
-#endif
static ErlDrvTSDKey next_tsd_key;
static ErlDrvTSDKey max_used_tsd_key;
@@ -97,7 +92,6 @@ static char **used_tsd_keys;
static erts_mtx_t tsd_mtx;
static char *no_name;
-#ifdef USE_THREADS
static void
thread_exit_handler(void)
@@ -122,21 +116,15 @@ erl_drv_thread_wrapper(void *vdtid)
return (*dtid->func)(dtid->arg);
}
-#endif
void erl_drv_thr_init(void)
{
int i;
-#ifdef USE_THREADS
int res = ethr_tsd_key_create(&tid_key,"erts_tid_key");
if (res == 0)
res = ethr_install_exit_handler(thread_exit_handler);
if (res != 0)
fatal_error(res, "erl_drv_thr_init()");
-#else
- tsd_len = 0;
- tsd = NULL;
-#endif
no_name = "unknown";
next_tsd_key = 0;
@@ -159,7 +147,6 @@ void erl_drv_thr_init(void)
ErlDrvMutex *
erl_drv_mutex_create(char *name)
{
-#ifdef USE_THREADS
ErlDrvMutex *dmtx = erts_alloc_fnf(ERTS_ALC_T_DRV_MTX,
(sizeof(ErlDrvMutex)
+ (name ? sys_strlen(name) + 1 : 0)));
@@ -182,15 +169,11 @@ erl_drv_mutex_create(char *name)
#endif
}
return dmtx;
-#else
- return (ErlDrvMutex *) NULL;
-#endif
}
void
erl_drv_mutex_destroy(ErlDrvMutex *dmtx)
{
-#ifdef USE_THREADS
int res;
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_uninstall(&dmtx->lcnt);
@@ -199,24 +182,18 @@ erl_drv_mutex_destroy(ErlDrvMutex *dmtx)
if (res != 0)
fatal_error(res, "erl_drv_mutex_destroy()");
erts_free(ERTS_ALC_T_DRV_MTX, (void *) dmtx);
-#endif
}
char *
erl_drv_mutex_name(ErlDrvMutex *dmtx)
{
-#ifdef USE_THREADS
return dmtx ? dmtx->name : NULL;
-#else
- return NULL;
-#endif
}
int
erl_drv_mutex_trylock(ErlDrvMutex *dmtx)
{
-#ifdef USE_THREADS
int res;
if (!dmtx)
fatal_error(EINVAL, "erl_drv_mutex_trylock()");
@@ -225,22 +202,17 @@ erl_drv_mutex_trylock(ErlDrvMutex *dmtx)
erts_lcnt_trylock(&dmtx->lcnt, res);
#endif
return res;
-#else
- return 0;
-#endif
}
void
erl_drv_mutex_lock(ErlDrvMutex *dmtx)
{
-#ifdef USE_THREADS
if (!dmtx)
fatal_error(EINVAL, "erl_drv_mutex_lock()");
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock(&dmtx->lcnt);
#endif
ethr_mutex_lock(&dmtx->mtx);
-#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post(&dmtx->lcnt);
#endif
@@ -249,20 +221,17 @@ erl_drv_mutex_lock(ErlDrvMutex *dmtx)
void
erl_drv_mutex_unlock(ErlDrvMutex *dmtx)
{
-#ifdef USE_THREADS
if (!dmtx)
fatal_error(EINVAL, "erl_drv_mutex_unlock()");
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock(&dmtx->lcnt);
#endif
ethr_mutex_unlock(&dmtx->mtx);
-#endif
}
ErlDrvCond *
erl_drv_cond_create(char *name)
{
-#ifdef USE_THREADS
ErlDrvCond *dcnd = erts_alloc_fnf(ERTS_ALC_T_DRV_CND,
(sizeof(ErlDrvCond)
+ (name ? sys_strlen(name) + 1 : 0)));
@@ -281,57 +250,43 @@ erl_drv_cond_create(char *name)
}
}
return dcnd;
-#else
- return (ErlDrvCond *) NULL;
-#endif
}
void
erl_drv_cond_destroy(ErlDrvCond *dcnd)
{
-#ifdef USE_THREADS
int res = dcnd ? ethr_cond_destroy(&dcnd->cnd) : EINVAL;
if (res != 0)
fatal_error(res, "erl_drv_cond_destroy()");
erts_free(ERTS_ALC_T_DRV_CND, (void *) dcnd);
-#endif
}
char *
erl_drv_cond_name(ErlDrvCond *dcnd)
{
-#ifdef USE_THREADS
return dcnd ? dcnd->name : NULL;
-#else
- return NULL;
-#endif
}
void
erl_drv_cond_signal(ErlDrvCond *dcnd)
{
-#ifdef USE_THREADS
if (!dcnd)
fatal_error(EINVAL, "erl_drv_cond_signal()");
ethr_cond_signal(&dcnd->cnd);
-#endif
}
void
erl_drv_cond_broadcast(ErlDrvCond *dcnd)
{
-#ifdef USE_THREADS
if (!dcnd)
fatal_error(EINVAL, "erl_drv_cond_broadcast()");
ethr_cond_broadcast(&dcnd->cnd);
-#endif
}
void
erl_drv_cond_wait(ErlDrvCond *dcnd, ErlDrvMutex *dmtx)
{
-#ifdef USE_THREADS
if (!dcnd || !dmtx) {
fatal_error(EINVAL, "erl_drv_cond_wait()");
}
@@ -348,13 +303,11 @@ erl_drv_cond_wait(ErlDrvCond *dcnd, ErlDrvMutex *dmtx)
break;
}
}
-#endif
}
ErlDrvRWLock *
erl_drv_rwlock_create(char *name)
{
-#ifdef USE_THREADS
ErlDrvRWLock *drwlck = erts_alloc_fnf(ERTS_ALC_T_DRV_RWLCK,
(sizeof(ErlDrvRWLock)
+ (name ? sys_strlen(name) + 1 : 0)));
@@ -375,15 +328,11 @@ erl_drv_rwlock_create(char *name)
#endif
}
return drwlck;
-#else
- return (ErlDrvRWLock *) NULL;
-#endif
}
void
erl_drv_rwlock_destroy(ErlDrvRWLock *drwlck)
{
-#ifdef USE_THREADS
int res;
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_uninstall(&drwlck->lcnt);
@@ -392,23 +341,17 @@ erl_drv_rwlock_destroy(ErlDrvRWLock *drwlck)
if (res != 0)
fatal_error(res, "erl_drv_rwlock_destroy()");
erts_free(ERTS_ALC_T_DRV_RWLCK, (void *) drwlck);
-#endif
}
char *
erl_drv_rwlock_name(ErlDrvRWLock *drwlck)
{
-#ifdef USE_THREADS
return drwlck ? drwlck->name : NULL;
-#else
- return NULL;
-#endif
}
int
erl_drv_rwlock_tryrlock(ErlDrvRWLock *drwlck)
{
-#ifdef USE_THREADS
int res;
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_tryrlock()");
@@ -417,15 +360,11 @@ erl_drv_rwlock_tryrlock(ErlDrvRWLock *drwlck)
erts_lcnt_trylock_opt(&drwlck->lcnt, res, ERTS_LOCK_OPTION_READ);
#endif
return res;
-#else
- return 0;
-#endif
}
void
erl_drv_rwlock_rlock(ErlDrvRWLock *drwlck)
{
-#ifdef USE_THREADS
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_rlock()");
#ifdef ERTS_ENABLE_LOCK_COUNT
@@ -435,26 +374,22 @@ erl_drv_rwlock_rlock(ErlDrvRWLock *drwlck)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post(&drwlck->lcnt);
#endif
-#endif
}
void
erl_drv_rwlock_runlock(ErlDrvRWLock *drwlck)
{
-#ifdef USE_THREADS
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_runlock()");
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock_opt(&drwlck->lcnt, ERTS_LOCK_OPTION_READ);
#endif
ethr_rwmutex_runlock(&drwlck->rwmtx);
-#endif
}
int
erl_drv_rwlock_tryrwlock(ErlDrvRWLock *drwlck)
{
-#ifdef USE_THREADS
int res;
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_tryrwlock()");
@@ -463,15 +398,11 @@ erl_drv_rwlock_tryrwlock(ErlDrvRWLock *drwlck)
erts_lcnt_trylock_opt(&drwlck->lcnt, res, ERTS_LOCK_OPTION_RDWR);
#endif
return res;
-#else
- return 0;
-#endif
}
void
erl_drv_rwlock_rwlock(ErlDrvRWLock *drwlck)
{
-#ifdef USE_THREADS
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_rwlock()");
#ifdef ERTS_ENABLE_LOCK_COUNT
@@ -481,20 +412,17 @@ erl_drv_rwlock_rwlock(ErlDrvRWLock *drwlck)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post(&drwlck->lcnt);
#endif
-#endif
}
void
erl_drv_rwlock_rwunlock(ErlDrvRWLock *drwlck)
{
-#ifdef USE_THREADS
if (!drwlck)
fatal_error(EINVAL, "erl_drv_rwlock_rwunlock()");
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_unlock_opt(&drwlck->lcnt, ERTS_LOCK_OPTION_RDWR);
#endif
ethr_rwmutex_rwunlock(&drwlck->rwmtx);
-#endif
}
int
@@ -588,20 +516,13 @@ erl_drv_tsd_key_destroy(ErlDrvTSDKey key)
}
-#ifdef USE_THREADS
#define ERL_DRV_TSD__ (dtid->tsd)
#define ERL_DRV_TSD_LEN__ (dtid->tsd_len)
-#else
-#define ERL_DRV_TSD__ (tsd)
-#define ERL_DRV_TSD_LEN__ (tsd_len)
-#endif
void
erl_drv_tsd_set(ErlDrvTSDKey key, void *data)
{
-#ifdef USE_THREADS
struct ErlDrvTid_ *dtid = (struct ErlDrvTid_ *) erl_drv_thread_self();
-#endif
if (key < 0 || max_used_tsd_key < key || !used_tsd_keys[key])
fatal_error(EINVAL, "erl_drv_tsd_set()");
@@ -629,15 +550,11 @@ erl_drv_tsd_set(ErlDrvTSDKey key, void *data)
void *
erl_drv_tsd_get(ErlDrvTSDKey key)
{
-#ifdef USE_THREADS
struct ErlDrvTid_ *dtid = ethr_tsd_get(tid_key);
-#endif
if (key < 0 || max_used_tsd_key < key || !used_tsd_keys[key])
fatal_error(EINVAL, "erl_drv_tsd_get()");
-#ifdef USE_THREADS
if (!dtid)
return NULL;
-#endif
if (ERL_DRV_TSD_LEN__ <= key)
return NULL;
return ERL_DRV_TSD__[key];
@@ -672,7 +589,6 @@ erl_drv_thread_create(char *name,
void* arg,
ErlDrvThreadOpts *opts)
{
-#ifdef USE_THREADS
int res;
struct ErlDrvTid_ *dtid;
ethr_thr_opts ethr_opts = ETHR_THR_OPTS_DEFAULT_INITER;
@@ -714,27 +630,19 @@ erl_drv_thread_create(char *name,
*tid = (ErlDrvTid) dtid;
return 0;
-#else
- return ENOTSUP;
-#endif
}
char *
erl_drv_thread_name(ErlDrvTid tid)
{
-#ifdef USE_THREADS
struct ErlDrvTid_ *dtid = (struct ErlDrvTid_ *) tid;
return dtid ? dtid->name : NULL;
-#else
- return NULL;
-#endif
}
ErlDrvTid
erl_drv_thread_self(void)
{
-#ifdef USE_THREADS
struct ErlDrvTid_ *dtid = ethr_tsd_get(tid_key);
if (!dtid) {
int res;
@@ -753,15 +661,11 @@ erl_drv_thread_self(void)
fatal_error(res, "erl_drv_thread_self()");
}
return (ErlDrvTid) dtid;
-#else
- return (ErlDrvTid) NULL;
-#endif
}
int
erl_drv_equal_tids(ErlDrvTid tid1, ErlDrvTid tid2)
{
-#ifdef USE_THREADS
int res;
struct ErlDrvTid_ *dtid1 = (struct ErlDrvTid_ *) tid1;
struct ErlDrvTid_ *dtid2 = (struct ErlDrvTid_ *) tid2;
@@ -775,28 +679,22 @@ erl_drv_equal_tids(ErlDrvTid tid1, ErlDrvTid tid2)
: !ethr_equal_tids(dtid1->tid, dtid2->tid));
return res;
-#else
- return 1;
-#endif
}
void
erl_drv_thread_exit(void *res)
{
-#ifdef USE_THREADS
struct ErlDrvTid_ *dtid = ethr_tsd_get(tid_key);
if (dtid && dtid->drv_thr) {
ethr_thr_exit(res);
fatal_error(0, "erl_drv_thread_exit()");
}
-#endif
fatal_error(EACCES, "erl_drv_thread_exit()");
}
int
erl_drv_thread_join(ErlDrvTid tid, void **respp)
{
-#ifdef USE_THREADS
int res;
struct ErlDrvTid_ *dtid = (struct ErlDrvTid_ *) tid;
@@ -809,9 +707,6 @@ erl_drv_thread_join(ErlDrvTid tid, void **respp)
if (res == 0)
erts_free(ERTS_ALC_T_DRV_TID, dtid);
return res;
-#else
- return ENOTSUP;
-#endif
}
#if defined(__DARWIN__) && defined(USE_THREADS) && defined(ERTS_SMP)
diff --git a/erts/emulator/beam/erl_fun.c b/erts/emulator/beam/erl_fun.c
index 535f677bb3..bfecb7a85f 100644
--- a/erts/emulator/beam/erl_fun.c
+++ b/erts/emulator/beam/erl_fun.c
@@ -183,13 +183,11 @@ void
erts_erase_fun_entry(ErlFunEntry* fe)
{
erts_fun_write_lock();
-#ifdef ERTS_SMP
/*
* We have to check refc again since someone might have looked up
* the fun entry and incremented refc after last check.
*/
if (erts_smp_refc_dectest(&fe->refc, -1) <= 0)
-#endif
{
if (fe->address != unloaded_fun)
erts_exit(ERTS_ERROR_EXIT,
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index 8cb977a7f3..d49564731b 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -60,9 +60,6 @@
# define ERTS_GC_ASSERT(B) ((void) 1)
#endif
-#if defined(DEBUG) && 0
-# define HARDDEBUG 1
-#endif
/*
* Returns number of elements in an array.
@@ -3335,13 +3332,11 @@ erts_gc_info_request(Process *c_p)
erts_proc_add_refc(c_p, (Sint) erts_no_schedulers);
-#ifdef ERTS_SMP
if (erts_no_schedulers > 1)
erts_schedule_multi_misc_aux_work(1,
erts_no_schedulers,
reply_gc_info,
(void *) gcirp);
-#endif
reply_gc_info((void *) gcirp);
diff --git a/erts/emulator/beam/erl_hl_timer.c b/erts/emulator/beam/erl_hl_timer.c
index 99995be464..5e9b7fe42e 100644
--- a/erts/emulator/beam/erl_hl_timer.c
+++ b/erts/emulator/beam/erl_hl_timer.c
@@ -96,12 +96,8 @@ typedef enum {
#define ERTS_BIF_TIMER_SHORT_TIME 5000
-#ifdef ERTS_SMP
# define ERTS_HLT_SMP_MEMBAR_LoadLoad_LoadStore \
ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore)
-#else
-# define ERTS_HLT_SMP_MEMBAR_LoadLoad_LoadStore
-#endif
/* Bit 0 to 9 contains scheduler id (see mask below) */
#define ERTS_TMR_ROFLG_HLT (((Uint32) 1) << 10)
@@ -269,7 +265,6 @@ typedef struct {
erts_atomic_t last;
} ErtsHLTCncldTmrQTail;
-#ifdef ERTS_SMP
typedef struct {
/*
@@ -301,7 +296,6 @@ typedef struct {
} head;
} ErtsHLTCncldTmrQ;
-#endif /* ERTS_SMP */
typedef struct {
ErtsHLTimer *root;
@@ -309,9 +303,7 @@ typedef struct {
} ErtsYieldingTimeoutState;
struct ErtsHLTimerService_ {
-#ifdef ERTS_SMP
ErtsHLTCncldTmrQ canceled_queue;
-#endif
ErtsHLTimer *time_tree;
#ifndef ERTS_MAGIC_REF_BIF_TIMERS
ErtsBifTimer *btm_tree;
@@ -720,9 +712,7 @@ proc_btm_list_foreach_destroy_yielding(ErtsBifTimer **list,
#endif /* !ERTS_MAGIC_REF_BIF_TIMERS */
-#ifdef ERTS_SMP
static void init_canceled_queue(ErtsHLTCncldTmrQ *cq);
-#endif
void
erts_hl_timer_init(void)
@@ -747,9 +737,7 @@ erts_create_timer_service(void)
srv->yield = init_yield;
erts_twheel_init_timer(&srv->service_timer);
-#ifdef ERTS_SMP
init_canceled_queue(&srv->canceled_queue);
-#endif
return srv;
}
@@ -1186,7 +1174,6 @@ hl_timer_dec_refc(ErtsHLTimer *tmr, Uint32 roflgs)
}
static void hlt_service_timeout(void *vesdp);
-#ifdef ERTS_SMP
static void handle_canceled_queue(ErtsSchedulerData *esdp,
ErtsHLTCncldTmrQ *cq,
int use_limit,
@@ -1194,7 +1181,6 @@ static void handle_canceled_queue(ErtsSchedulerData *esdp,
int *need_thr_progress,
ErtsThrPrgrVal *thr_prgr_p,
int *need_more_work);
-#endif
static ERTS_INLINE void
check_canceled_queue(ErtsSchedulerData *esdp, ErtsHLTimerService *srv)
@@ -1663,7 +1649,6 @@ cleanup_sched_local_canceled_timer(ErtsSchedulerData *esdp,
}
}
-#ifdef ERTS_SMP
static void
init_canceled_queue(ErtsHLTCncldTmrQ *cq)
@@ -1886,31 +1871,24 @@ erts_handle_canceled_timers(void *vesdp,
need_more_work);
}
-#endif /* ERTS_SMP */
static void
queue_canceled_timer(ErtsSchedulerData *esdp, int rsched_id, ErtsTimer *tmr)
{
-#ifdef ERTS_SMP
ErtsHLTCncldTmrQ *cq;
cq = &ERTS_SCHEDULER_IX(rsched_id-1)->timer_service->canceled_queue;
if (cq_enqueue(cq, tmr, rsched_id - (int) esdp->no))
erts_notify_canceled_timer(esdp, rsched_id);
-#else
- ERTS_INTERNAL_ERROR("Unexpected enqueue of canceled timer");
-#endif
}
static void
continue_cancel_ptimer(ErtsSchedulerData *esdp, ErtsTimer *tmr)
{
-#ifdef ERTS_SMP
Uint32 sid = (tmr->head.roflgs & ERTS_TMR_ROFLG_SID_MASK);
if (esdp->no != sid)
queue_canceled_timer(esdp, sid, tmr);
else
-#endif
cleanup_sched_local_canceled_timer(esdp, tmr);
}
diff --git a/erts/emulator/beam/erl_hl_timer.h b/erts/emulator/beam/erl_hl_timer.h
index ff31f04cb9..852bf88174 100644
--- a/erts/emulator/beam/erl_hl_timer.h
+++ b/erts/emulator/beam/erl_hl_timer.h
@@ -63,13 +63,11 @@ void erts_hl_timer_init(void);
void erts_start_timer_callback(ErtsMonotonicTime,
void (*)(void *),
void *);
-#ifdef ERTS_SMP
void
erts_handle_canceled_timers(void *vesdp,
int *need_thr_progress,
ErtsThrPrgrVal *thr_prgr_p,
int *need_more_work);
-#endif
Uint erts_bif_timer_memory_size(void);
void erts_print_bif_timer_info(fmtfn_t to, void *to_arg);
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 5206d7564f..fe7f09e5fa 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -69,16 +69,8 @@
* The variables below (prefixed with etp_) are for erts/etc/unix/etp-commands
* only. Do not remove even though they aren't used elsewhere in the emulator!
*/
-#ifdef ERTS_SMP
const int etp_smp_compiled = 1;
-#else
-const int etp_smp_compiled = 0;
-#endif
-#ifdef USE_THREADS
const int etp_thread_compiled = 1;
-#else
-const int etp_thread_compiled = 0;
-#endif
const char etp_erts_version[] = ERLANG_VERSION;
const char etp_otp_release[] = ERLANG_OTP_RELEASE;
const char etp_compile_date[] = ERLANG_COMPILE_DATE;
@@ -156,17 +148,10 @@ static void erl_init(int ncpu,
static erts_atomic_t exiting;
-#ifdef ERTS_SMP
erts_smp_atomic32_t erts_writing_erl_crash_dump;
erts_tsd_key_t erts_is_crash_dumping_key;
-#else
-volatile int erts_writing_erl_crash_dump = 0;
-#endif
int erts_initialized = 0;
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
-erts_tid_t erts_main_thread;
-#endif
int erts_use_sender_punish;
@@ -682,7 +667,6 @@ void erts_usage(void)
erts_exit(1, "");
}
-#ifdef USE_THREADS
/*
* allocators for thread lib
*/
@@ -724,7 +708,6 @@ static void ethr_ll_free(void *ptr)
erts_free(ERTS_ALC_T_ETHR_LL, ptr);
}
-#endif
static int
early_init(int *argc, char **argv) /*
@@ -754,9 +737,6 @@ early_init(int *argc, char **argv) /*
char envbuf[21]; /* enough for any 64-bit integer */
size_t envbufsz;
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
- erts_main_thread = erts_thr_self();
-#endif
erts_save_emu_args(*argc, argv);
@@ -781,11 +761,6 @@ early_init(int *argc, char **argv) /*
&ncpu,
&ncpuonln,
&ncpuavail);
-#ifndef ERTS_SMP
- ncpu = 1;
- ncpuonln = 1;
- ncpuavail = 1;
-#endif
ignore_break = 0;
replace_intr = 0;
@@ -797,16 +772,10 @@ early_init(int *argc, char **argv) /*
erts_sys_pre_init();
erts_atomic_init_nob(&exiting, 0);
-#ifdef ERTS_SMP
erts_thr_progress_pre_init();
-#endif
-#ifdef ERTS_SMP
erts_smp_atomic32_init_nob(&erts_writing_erl_crash_dump, 0L);
erts_tsd_key_create(&erts_is_crash_dumping_key,"erts_is_crash_dumping_key");
-#else
- erts_writing_erl_crash_dump = 0;
-#endif
erts_smp_atomic32_init_nob(&erts_max_gen_gcs,
(erts_aint32_t) ((Uint16) -1));
@@ -1093,7 +1062,6 @@ early_init(int *argc, char **argv) /*
i++;
}
-#ifdef ERTS_SMP
/* apply any scheduler percentages */
if (schdlrs_percentage != 100 || schdlrs_onln_percentage != 100) {
schdlrs = schdlrs * schdlrs_percentage / 100;
@@ -1117,11 +1085,6 @@ early_init(int *argc, char **argv) /*
erts_usage();
}
}
-#else
- /* Silence gcc warnings */
- (void)schdlrs_percentage;
- (void)schdlrs_onln_percentage;
-#endif
#ifdef ERTS_DIRTY_SCHEDULERS
/* apply any dirty scheduler precentages */
if (dirty_cpu_scheds_pctg != 100 || dirty_cpu_scheds_onln_pctg != 100) {
@@ -1139,18 +1102,11 @@ early_init(int *argc, char **argv) /*
#endif
}
-#ifndef USE_THREADS
- erts_async_max_threads = 0;
-#endif
-#ifdef ERTS_SMP
no_schedulers = schdlrs;
no_schedulers_online = schdlrs_onln;
erts_no_schedulers = (Uint) no_schedulers;
-#else
- erts_no_schedulers = 1;
-#endif
#ifdef ERTS_DIRTY_SCHEDULERS
erts_no_dirty_cpu_schedulers = no_dirty_cpu_schedulers = dirty_cpu_scheds;
no_dirty_cpu_schedulers_online = dirty_cpu_scheds_online;
@@ -1162,7 +1118,6 @@ early_init(int *argc, char **argv) /*
erts_alloc_init(argc, argv, &alloc_opts); /* Handles (and removes)
-M flags. */
/* Require allocators */
-#ifdef ERTS_SMP
/*
* Thread progress management:
*
@@ -1185,7 +1140,6 @@ early_init(int *argc, char **argv) /*
erts_no_dirty_io_schedulers
#endif
);
-#endif
erts_thr_q_init();
erts_init_utils();
erts_early_init_cpu_topology(no_schedulers,
@@ -1193,7 +1147,6 @@ early_init(int *argc, char **argv) /*
max_reader_groups,
&reader_groups);
-#ifdef USE_THREADS
{
erts_thr_late_init_data_t elid = ERTS_THR_LATE_INIT_DATA_DEF_INITER;
elid.mem.std.alloc = ethr_std_alloc;
@@ -1210,7 +1163,6 @@ early_init(int *argc, char **argv) /*
erts_thr_late_init(&elid);
}
-#endif
erts_msacc_early_init();
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -1237,40 +1189,6 @@ early_init(int *argc, char **argv) /*
return ncpu;
}
-#ifndef ERTS_SMP
-
-void *erts_scheduler_stack_limit;
-
-
-static void set_main_stack_size(void)
-{
- char c;
- UWord stacksize;
-# if HAVE_DECL_GETRLIMIT && HAVE_DECL_SETRLIMIT && HAVE_DECL_RLIMIT_STACK
- struct rlimit rl;
- int bytes;
- stacksize = erts_sched_thread_suggested_stack_size * sizeof(Uint) * 1024;
- /* Add some extra pages... neede by some systems... */
- bytes = (int) stacksize + 3*erts_sys_get_page_size();
- if (getrlimit(RLIMIT_STACK, &rl) != 0 ||
- (rl.rlim_cur = bytes, setrlimit(RLIMIT_STACK, &rl) != 0)) {
- erts_fprintf(stderr, "failed to set stack size for scheduler "
- "thread to %d bytes\n", bytes);
- erts_usage();
- }
-# else
- if (modified_sched_thread_suggested_stack_size) {
- erts_fprintf(stderr, "no OS support for dynamic stack size limit\n");
- erts_usage();
- }
- /* Be conservative and hope it is not more than 64 kWords... */
- stacksize = 64*1024*sizeof(void *);
-# endif
-
- erts_scheduler_stack_limit = erts_calc_stacklimit(&c, stacksize);
-}
-
-#endif
void
erl_start(int argc, char **argv)
@@ -1490,12 +1408,8 @@ erl_start(int argc, char **argv)
#ifdef DEBUG
strcat(tmp, ",DEBUG");
#endif
-#ifdef ERTS_SMP
strcat(tmp, ",SMP");
-#endif
-#ifdef USE_THREADS
strcat(tmp, ",ASYNC_THREADS");
-#endif
#ifdef HIPE
strcat(tmp, ",HIPE");
#endif
@@ -2007,9 +1921,7 @@ erl_start(int argc, char **argv)
arg);
erts_usage();
}
-#ifdef ERTS_SMP
erts_runq_supervision_interval = val;
-#endif
}
else {
erts_fprintf(stderr, "bad scheduling option %s\n", argv[i]);
@@ -2355,7 +2267,6 @@ erl_start(int argc, char **argv)
}
-#ifdef ERTS_SMP
erts_start_schedulers();
#ifdef ERTS_ENABLE_LOCK_COUNT
@@ -2364,31 +2275,9 @@ erl_start(int argc, char **argv)
/* Let system specific code decide what to do with the main thread... */
erts_sys_main_thread(); /* May or may not return! */
-#else
- {
- ErtsSchedulerData *esdp = erts_get_scheduler_data();
- erts_msacc_init_thread("scheduler", 1, 1);
- erts_thr_set_main_status(1, 1);
-#if ERTS_USE_ASYNC_READY_Q
- esdp->aux_work_data.async_ready.queue
- = erts_get_async_ready_queue(1);
-#endif
- set_main_stack_size();
- erts_sched_init_time_sup(esdp);
- erts_ets_sched_spec_data_init(esdp);
- erts_aux_work_timeout_late_init(esdp);
-
-#ifdef ERTS_ENABLE_LOCK_COUNT
- erts_lcnt_post_startup();
-#endif
-
- process_main(esdp->x_reg_array, esdp->f_reg_array);
- }
-#endif
}
-#ifdef USE_THREADS
__decl_noreturn void erts_thr_fatal_error(int err, char *what)
{
@@ -2402,7 +2291,6 @@ __decl_noreturn void erts_thr_fatal_error(int err, char *what)
abort();
}
-#endif
static void
system_cleanup(int flush_async)
@@ -2415,7 +2303,6 @@ system_cleanup(int flush_async)
* Another thread is currently exiting the system;
* wait for it to do its job.
*/
-#ifdef ERTS_SMP
if (erts_thr_progress_is_managed_thread()) {
/*
* The exiting thread might be waiting for
@@ -2424,7 +2311,6 @@ system_cleanup(int flush_async)
erts_thr_progress_active(NULL, 0);
erts_thr_progress_prepare_wait(NULL);
}
-#endif
/* Wait forever... */
while (1)
erts_milli_sleep(10000000);
@@ -2439,17 +2325,12 @@ system_cleanup(int flush_async)
if (!flush_async
|| !erts_initialized
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
- || !erts_equal_tids(erts_main_thread, erts_thr_self())
-#endif
)
return;
-#ifdef ERTS_SMP
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_check_exact(NULL, 0);
#endif
-#endif
erts_exit_flush_async();
}
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 85ee703c99..35f259f7c8 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -75,12 +75,9 @@ static erts_lc_lock_order_t erts_lock_order[] = {
* if only one lock use
* the lock name)"
*/
-#ifdef ERTS_SMP
{ "driver_lock", "driver_name" },
{ "port_lock", "port_id" },
-#endif
{ "port_data_lock", "address" },
-#ifdef ERTS_SMP
{ "bif_timers", NULL },
{ "reg_tab", NULL },
{ "proc_main", "pid" },
@@ -89,9 +86,7 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "hipe_mfait_lock", NULL },
#endif
{ "nodes_monitors", NULL },
-#ifdef ERTS_SMP
{ "resource_monitors", "address" },
-#endif
{ "driver_list", NULL },
{ "proc_link", "pid" },
{ "proc_msgq", "pid" },
@@ -114,7 +109,6 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "fun_tab", NULL },
{ "environ", NULL },
{ "release_literal_areas", NULL },
-#endif
{ "efile_drv", "address" },
{ "drv_ev_state_grow", NULL, },
{ "drv_ev_state", "address" },
@@ -144,7 +138,6 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "async_enq_mtx", NULL },
{ "msacc_list_mutex", NULL },
{ "msacc_unmanaged_mutex", NULL },
-#ifdef ERTS_SMP
{ "atom_tab", NULL },
{ "misc_op_list_pre_alloc_lock", "address" },
{ "message_pre_alloc_lock", "address" },
@@ -155,17 +148,13 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "sys_msg_q", NULL },
{ "tracer_mtx", NULL },
{ "port_table", NULL },
-#endif
{ "magic_ref_table", "address" },
{ "mtrace_op", NULL },
{ "instr_x", NULL },
{ "instr", NULL },
-#ifdef ERTS_SMP
{ "pollsets_lock", NULL },
-#endif
{ "alcu_allocator", "index" },
{ "mseg", NULL },
-#ifdef ERTS_SMP
{ "port_task_pre_alloc_lock", "address" },
{ "proclist_pre_alloc_lock", "address" },
{ "xports_list_pre_alloc_lock", "address" },
@@ -178,7 +167,6 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "pix_lock", "address" },
{ "run_queues_lists", NULL },
{ "sched_stat", NULL },
-#endif
{ "async_init_mtx", NULL },
#ifdef __WIN32__
#ifdef DEBUG
@@ -189,9 +177,7 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "efile_drv dtrace mutex", NULL },
#endif
{ "mtrace_buf", NULL },
-#ifdef ERTS_SMP
{ "os_monotonic_time", NULL },
-#endif
{ "erts_alloc_hard_debug", NULL },
{ "hard_dbg_mseg", NULL },
{ "erts_mmap", NULL }
diff --git a/erts/emulator/beam/erl_lock_check.h b/erts/emulator/beam/erl_lock_check.h
index 18296d1fec..ca5a72b183 100644
--- a/erts/emulator/beam/erl_lock_check.h
+++ b/erts/emulator/beam/erl_lock_check.h
@@ -116,11 +116,7 @@ int erts_lc_is_emu_thr(void);
#define ERTS_LC_ASSERT(A) \
((void) (((A) || ERTS_SOMEONE_IS_CRASH_DUMPING) ? 1 : erts_lc_assert_failed(__FILE__, __LINE__, #A)))
-#ifdef ERTS_SMP
#define ERTS_SMP_LC_ASSERT(A) ERTS_LC_ASSERT(A)
-#else
-#define ERTS_SMP_LC_ASSERT(A) ((void) 1)
-#endif
#else /* #ifdef ERTS_ENABLE_LOCK_CHECK */
#define ERTS_SMP_LC_ASSERT(A) ((void) 1)
#define ERTS_LC_ASSERT(A) ((void) 1)
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index c1af70592a..b35c14bfc8 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -39,11 +39,7 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(message_ref,
ERL_MESSAGE_BUF_SZ,
ERTS_ALC_T_MSG_REF)
-#if defined(DEBUG) && 0
-#define HARD_DEBUG
-#else
#undef HARD_DEBUG
-#endif
void
init_message(void)
@@ -265,9 +261,7 @@ erts_queue_dist_message(Process *rcvr,
Sint tok_lastcnt = 0;
Sint tok_serial = 0;
#endif
-#ifdef ERTS_SMP
erts_aint_t state;
-#endif
ERTS_SMP_LC_ASSERT(rcvr_locks == erts_proc_lc_my_proc_locks(rcvr));
@@ -283,7 +277,6 @@ erts_queue_dist_message(Process *rcvr,
#endif
ERL_MESSAGE_TOKEN(mp) = token;
-#ifdef ERTS_SMP
if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) {
if (erts_smp_proc_trylock(rcvr, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ;
@@ -305,7 +298,6 @@ erts_queue_dist_message(Process *rcvr,
erts_cleanup_messages(mp);
}
else
-#endif
if (IS_TRACED_FL(rcvr, F_TRACE_RECEIVE)) {
if (from == am_Empty)
from = dist_ext->dep->sysname;
@@ -364,11 +356,7 @@ erts_queue_dist_message(Process *rcvr,
erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ);
erts_proc_notify_new_message(rcvr,
-#ifdef ERTS_SMP
rcvr_locks
-#else
- 0
-#endif
);
}
}
@@ -393,7 +381,6 @@ queue_messages(Process* receiver,
ERL_MESSAGE_TOKEN(first) == NIL ||
is_tuple(ERL_MESSAGE_TOKEN(first)));
-#ifdef ERTS_SMP
#ifdef ERTS_ENABLE_LOCK_CHECK
ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(receiver) < ERTS_PROC_LOCK_MSGQ ||
receiver_locks == erts_proc_lc_my_proc_locks(receiver));
@@ -420,14 +407,11 @@ queue_messages(Process* receiver,
locked_msgq = 1;
}
-#endif
state = erts_smp_atomic32_read_nob(&receiver->state);
if (state & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) {
-#ifdef ERTS_SMP
exiting:
-#endif
/* Drop message if receiver is exiting or has a pending exit... */
if (locked_msgq)
erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
@@ -436,7 +420,6 @@ queue_messages(Process* receiver,
}
res = receiver->msg.len;
-#ifdef ERTS_SMP
if (receiver_locks & ERTS_PROC_LOCK_MAIN) {
/*
* We move 'in queue' to 'private queue' and place
@@ -451,7 +434,6 @@ queue_messages(Process* receiver,
LINK_MESSAGE_PRIVQ(receiver, first, last, len);
}
else
-#endif
{
LINK_MESSAGE(receiver, first, last, len);
}
@@ -492,11 +474,7 @@ queue_messages(Process* receiver,
erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
}
-#ifdef ERTS_SMP
erts_proc_notify_new_message(receiver, receiver_locks);
-#else
- erts_proc_notify_new_message(receiver, 0);
-#endif
return res;
}
@@ -597,9 +575,7 @@ erts_try_alloc_message_on_heap(Process *pp,
ErlOffHeap **ohpp,
int *on_heap_p)
{
-#ifdef ERTS_SMP
int locked_main = 0;
-#endif
ErtsMessage *mp;
ASSERT(!(*psp & ERTS_PSFLG_OFF_HEAP_MSGQ));
@@ -607,15 +583,9 @@ erts_try_alloc_message_on_heap(Process *pp,
if ((*psp) & ERTS_PSFLGS_VOLATILE_HEAP)
goto in_message_fragment;
else if (
-#if defined(ERTS_SMP)
*plp & ERTS_PROC_LOCK_MAIN
-#else
- pp
-#endif
) {
-#ifdef ERTS_SMP
try_on_heap:
-#endif
if (((*psp) & ERTS_PSFLGS_VOLATILE_HEAP)
|| (pp->flags & F_DISABLE_GC)
|| HEAP_LIMIT(pp) - HEAP_TOP(pp) <= sz) {
@@ -623,12 +593,10 @@ erts_try_alloc_message_on_heap(Process *pp,
* The heap is either potentially in an inconsistent
* state, or not large enough.
*/
-#ifdef ERTS_SMP
if (locked_main) {
*plp &= ~ERTS_PROC_LOCK_MAIN;
erts_smp_proc_unlock(pp, ERTS_PROC_LOCK_MAIN);
}
-#endif
goto in_message_fragment;
}
@@ -639,14 +607,12 @@ erts_try_alloc_message_on_heap(Process *pp,
mp->data.attached = NULL;
*on_heap_p = !0;
}
-#ifdef ERTS_SMP
else if (pp && erts_smp_proc_trylock(pp, ERTS_PROC_LOCK_MAIN) == 0) {
locked_main = 1;
*psp = erts_smp_atomic32_read_nob(&pp->state);
*plp |= ERTS_PROC_LOCK_MAIN;
goto try_on_heap;
}
-#endif
else {
in_message_fragment:
if (!((*psp) & ERTS_PSFLG_ON_HEAP_MSGQ)) {
diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h
index 42ed14e69c..1f95ffaa5a 100644
--- a/erts/emulator/beam/erl_message.h
+++ b/erts/emulator/beam/erl_message.h
@@ -174,7 +174,6 @@ typedef struct {
ErtsMessage** saved_last; /* saved last pointer */
} ErlMessageQueue;
-#ifdef ERTS_SMP
typedef struct {
ErtsMessage* first;
@@ -190,7 +189,6 @@ typedef struct erl_trace_message_queue__ {
Sint len; /* queue length */
} ErlTraceMessageQueue;
-#endif
/* Get "current" message */
#define PEEK_MESSAGE(p) (*(p)->msg.save)
@@ -207,7 +205,6 @@ typedef struct erl_trace_message_queue__ {
(p)->where.len += (num_msgs); \
} while(0)
-#ifdef ERTS_SMP
/* Add message last in private message queue */
#define LINK_MESSAGE_PRIVQ(p, first_msg, last_msg, len) \
@@ -231,17 +228,6 @@ typedef struct erl_trace_message_queue__ {
} \
} while (0)
-#else
-
-#define ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p)
-
-/* Add message last_msg in message queue */
-#define LINK_MESSAGE(p, first_msg, last_msg, len) \
- do { \
- LINK_MESSAGE_IMPL(p, first_msg, last_msg, len, msg); \
- } while(0)
-
-#endif
/* Unlink current message */
#define UNLINK_MESSAGE(p,msgp) do { \
diff --git a/erts/emulator/beam/erl_msacc.c b/erts/emulator/beam/erl_msacc.c
index 6c477be615..957358bcf5 100644
--- a/erts/emulator/beam/erl_msacc.c
+++ b/erts/emulator/beam/erl_msacc.c
@@ -48,11 +48,7 @@ static Eterm erts_msacc_gather_stats(ErtsMsAcc *msacc, ErtsHeapFactory *factory)
static void erts_msacc_reset(ErtsMsAcc *msacc);
static ErtsMsAcc* get_msacc(void);
-#ifdef USE_THREADS
erts_tsd_key_t ERTS_WRITE_UNLIKELY(erts_msacc_key);
-#else
-ErtsMsAcc *ERTS_WRITE_UNLIKELY(erts_msacc) = NULL;
-#endif
#ifndef ERTS_MSACC_ALWAYS_ON
int ERTS_WRITE_UNLIKELY(erts_msacc_enabled);
#endif
@@ -60,10 +56,8 @@ int ERTS_WRITE_UNLIKELY(erts_msacc_enabled);
static Eterm *erts_msacc_state_atoms = NULL;
static erts_rwmtx_t msacc_mutex;
static ErtsMsAcc *msacc_managed = NULL;
-#ifdef USE_THREADS
static ErtsMsAcc *msacc_unmanaged = NULL;
static Uint msacc_unmanaged_count = 0;
-#endif
#if ERTS_MSACC_STATE_COUNT < MAP_SMALL_MAP_LIMIT
#define DEFAULT_MSACC_MSG_SIZE (3 + 1 + ERTS_MSACC_STATE_COUNT * 2 + 3 + ERTS_REF_THING_SIZE)
@@ -78,11 +72,7 @@ void erts_msacc_early_init(void) {
#endif
erts_rwmtx_init(&msacc_mutex, "msacc_list_mutex", NIL,
ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
-#ifdef USE_THREADS
erts_tsd_key_create(&erts_msacc_key,"erts_msacc_key");
-#else
- erts_msacc = NULL;
-#endif
}
void erts_msacc_init(void) {
@@ -107,7 +97,6 @@ void erts_msacc_init_thread(char *type, int id, int managed) {
msacc->tid = erts_thr_self();
msacc->perf_counter = 0;
-#ifdef USE_THREADS
erts_rwmtx_rwlock(&msacc_mutex);
if (!managed) {
erts_mtx_init(&msacc->mtx, "msacc_unmanaged_mutex", NIL,
@@ -121,9 +110,6 @@ void erts_msacc_init_thread(char *type, int id, int managed) {
msacc_managed = msacc;
}
erts_rwmtx_rwunlock(&msacc_mutex);
-#else
- msacc_managed = msacc;
-#endif
erts_msacc_reset(msacc);
@@ -370,12 +356,8 @@ erts_msacc_request(Process *c_p, int action, Eterm *threads)
msaccrp->ref = STORE_NC(&hp, NULL, ref);
msaccrp->req_sched = esdp->no;
-#ifdef ERTS_SMP
*threads = erts_no_schedulers;
*threads += 1; /* aux thread */
-#else
- *threads = 1;
-#endif
erts_smp_atomic32_init_nob(&msaccrp->refc,(erts_aint32_t)*threads);
@@ -386,12 +368,9 @@ erts_msacc_request(Process *c_p, int action, Eterm *threads)
erts_no_schedulers,
reply_msacc,
(void *) msaccrp);
-#ifdef ERTS_SMP
/* aux thread */
erts_schedule_misc_aux_work(0, reply_msacc, (void *) msaccrp);
-#endif
-#ifdef USE_THREADS
/* Manage unmanaged threads */
switch (action) {
case ERTS_MSACC_GATHER: {
@@ -468,7 +447,6 @@ erts_msacc_request(Process *c_p, int action, Eterm *threads)
default: { ASSERT(0); }
}
-#endif
*threads = make_small(*threads);
diff --git a/erts/emulator/beam/erl_msacc.h b/erts/emulator/beam/erl_msacc.h
index d64ef8c8b9..8349a7e297 100644
--- a/erts/emulator/beam/erl_msacc.h
+++ b/erts/emulator/beam/erl_msacc.h
@@ -159,11 +159,7 @@ struct erl_msacc_t_ {
#ifdef ERTS_ENABLE_MSACC
-#ifdef USE_THREADS
extern erts_tsd_key_t erts_msacc_key;
-#else
-extern ErtsMsAcc *erts_msacc;
-#endif
#ifdef ERTS_MSACC_ALWAYS_ON
#define erts_msacc_enabled 1
@@ -171,13 +167,8 @@ extern ErtsMsAcc *erts_msacc;
extern int erts_msacc_enabled;
#endif
-#ifdef USE_THREADS
#define ERTS_MSACC_TSD_GET() erts_tsd_get(erts_msacc_key)
#define ERTS_MSACC_TSD_SET(tsd) erts_tsd_set(erts_msacc_key,tsd)
-#else
-#define ERTS_MSACC_TSD_GET() erts_msacc
-#define ERTS_MSACC_TSD_SET(tsd) erts_msacc = tsd
-#endif
void erts_msacc_early_init(void);
void erts_msacc_init(void);
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 848e116621..b586a4636a 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -564,7 +564,6 @@ void enif_clear_env(ErlNifEnv* env)
free_tmp_objs(env);
}
-#ifdef ERTS_SMP
#ifdef DEBUG
static int enif_send_delay = 0;
#define ERTS_FORCE_ENIF_SEND_DELAY() (enif_send_delay++ % 2 == 0)
@@ -648,16 +647,13 @@ error:
return reds;
}
-#endif
int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
ErlNifEnv* msg_env, ERL_NIF_TERM msg)
{
struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)msg_env;
ErtsProcLocks rp_locks = 0;
-#ifdef ERTS_SMP
ErtsProcLocks lc_locks = 0;
-#endif
Process* rp;
Process* c_p;
ErtsMessage *mp;
@@ -666,13 +662,6 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
execution_state(env, &c_p, &scheduler);
-#ifndef ERTS_SMP
- if (!scheduler) {
- erts_exit(ERTS_ABORT_EXIT,
- "enif_send: called from non-scheduler thread on non-SMP VM");
- return 0;
- }
-#endif
if (scheduler > 0) { /* Normal scheduler */
rp = erts_proc_lookup(receiver);
@@ -756,7 +745,6 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
full_cache_env(env);
}
}
-#ifdef ERTS_SMP
else {
/* This clause is taken when the nif is called in the context
of a traced process. We do not know which locks we have
@@ -814,12 +802,10 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
rp_locks |= ERTS_PROC_LOCK_MSGQ;
}
}
-#endif /* ERTS_SMP */
erts_queue_message(rp, rp_locks, mp, msg,
c_p ? c_p->common.id : am_undefined);
-#ifdef ERTS_SMP
done:
if (c_p == rp)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
@@ -827,7 +813,6 @@ done:
erts_smp_proc_unlock(rp, rp_locks & ~lc_locks);
if (c_p && (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC))
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
-#endif
if (scheduler <= 0)
erts_proc_dec_refc(rp);
@@ -857,15 +842,9 @@ enif_port_command(ErlNifEnv *env, const ErlNifPort* to_port,
if (scheduler > 0)
prt = erts_port_lookup(to_port->port_id, iflags);
else {
-#ifdef ERTS_SMP
if (ERTS_PROC_IS_EXITING(c_p))
return 0;
prt = erts_thr_port_lookup(to_port->port_id, iflags);
-#else
- erts_exit(ERTS_ABORT_EXIT,
- "enif_port_command: called from non-scheduler "
- "thread on non-SMP VM");
-#endif
}
if (!prt)
@@ -1847,18 +1826,11 @@ int enif_is_process_alive(ErlNifEnv* env, ErlNifPid *proc)
if (scheduler > 0)
return !!erts_proc_lookup(proc->pid);
else {
-#ifdef ERTS_SMP
Process* rp = erts_pid2proc_opt(NULL, 0, proc->pid, 0,
ERTS_P2P_FLG_INC_REFC);
if (rp)
erts_proc_dec_refc(rp);
return !!rp;
-#else
- erts_exit(ERTS_ABORT_EXIT, "enif_is_process_alive: "
- "called from non-scheduler thread "
- "in non-smp emulator");
- return 0;
-#endif
}
}
@@ -1874,17 +1846,10 @@ int enif_is_port_alive(ErlNifEnv *env, ErlNifPort *port)
if (scheduler > 0)
return !!erts_port_lookup(port->port_id, iflags);
else {
-#ifdef ERTS_SMP
Port *prt = erts_thr_port_lookup(port->port_id, iflags);
if (prt)
erts_port_dec_refc(prt);
return !!prt;
-#else
- erts_exit(ERTS_ABORT_EXIT, "enif_is_port_alive: "
- "called from non-scheduler thread "
- "in non-smp emulator");
- return 0;
-#endif
}
}
@@ -2231,12 +2196,7 @@ static void destroy_one_monitor(ErtsMonitor* mon, void* context)
rp = erts_proc_lookup(mon->u.pid);
}
else {
-#ifdef ERTS_SMP
rp = erts_proc_lookup_inc_refc(mon->u.pid);
-#else
- ASSERT(!"nif monitor destruction in non-scheduler thread");
- rp = NULL;
-#endif
}
if (!rp) {
@@ -2252,10 +2212,8 @@ static void destroy_one_monitor(ErtsMonitor* mon, void* context)
is_exiting = 0;
}
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
-#ifdef ERTS_SMP
if (ctx->scheduler <= 0)
erts_proc_dec_refc(rp);
-#endif
}
if (is_exiting) {
ctx->resource->monitors->pending_failed_fire++;
@@ -2281,34 +2239,7 @@ static void destroy_all_monitors(ErtsMonitor* monitors, ErtsResource* resource)
}
-#ifdef ERTS_SMP
# define NIF_RESOURCE_DTOR &nif_resource_dtor
-#else
-# define NIF_RESOURCE_DTOR &nosmp_nif_resource_dtor_prologue
-
-/*
- * NO-SMP: Always run resource destructor on scheduler thread
- * as we may have to remove process monitors.
- */
-static int nif_resource_dtor(Binary*);
-
-static void nosmp_nif_resource_dtor_scheduled(void* vbin)
-{
- erts_bin_free((Binary*)vbin);
-}
-
-static int nosmp_nif_resource_dtor_prologue(Binary* bin)
-{
- if (is_scheduler()) {
- return nif_resource_dtor(bin);
- }
- else {
- erts_schedule_misc_aux_work(1, nosmp_nif_resource_dtor_scheduled, bin);
- return 0; /* do not free */
- }
-}
-
-#endif /* !ERTS_SMP */
static int nif_resource_dtor(Binary* bin)
{
@@ -3216,18 +3147,10 @@ int enif_monitor_process(ErlNifEnv* env, void* obj, const ErlNifPid* target_pid,
execution_state(env, NULL, &scheduler);
-#ifdef ERTS_SMP
if (scheduler > 0) /* Normal scheduler */
rp = erts_proc_lookup_raw(target_pid->pid);
else
rp = erts_proc_lookup_raw_inc_refc(target_pid->pid);
-#else
- if (scheduler <= 0) {
- erts_exit(ERTS_ABORT_EXIT, "enif_monitor_process: called from "
- "non-scheduler thread on non-SMP VM");
- }
- rp = erts_proc_lookup(target_pid->pid);
-#endif
if (!rp)
return 1;
@@ -3247,10 +3170,8 @@ int enif_monitor_process(ErlNifEnv* env, void* obj, const ErlNifPid* target_pid,
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
erts_smp_mtx_unlock(&rsrc->monitors->lock);
-#ifdef ERTS_SMP
if (scheduler <= 0)
erts_proc_dec_refc(rp);
-#endif
if (monitor)
erts_ref_to_driver_monitor(ref,monitor);
@@ -3289,18 +3210,10 @@ int enif_demonitor_process(ErlNifEnv* env, void* obj, const ErlNifMonitor* monit
ASSERT(mon->type == MON_ORIGIN);
ASSERT(is_internal_pid(mon->u.pid));
-#ifdef ERTS_SMP
if (scheduler > 0) /* Normal scheduler */
rp = erts_proc_lookup(mon->u.pid);
else
rp = erts_proc_lookup_inc_refc(mon->u.pid);
-#else
- if (scheduler <= 0) {
- erts_exit(ERTS_ABORT_EXIT, "enif_demonitor_process: called from "
- "non-scheduler thread on non-SMP VM");
- }
- rp = erts_proc_lookup(mon->u.pid);
-#endif
if (!rp) {
is_exiting = 1;
@@ -3316,10 +3229,8 @@ int enif_demonitor_process(ErlNifEnv* env, void* obj, const ErlNifMonitor* monit
}
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
-#ifdef ERTS_SMP
if (scheduler <= 0)
erts_proc_dec_refc(rp);
-#endif
}
if (is_exiting) {
rsrc->monitors->pending_failed_fire++;
diff --git a/erts/emulator/beam/erl_node_tables.c b/erts/emulator/beam/erl_node_tables.c
index deadf435e9..c12f231a3b 100644
--- a/erts/emulator/beam/erl_node_tables.c
+++ b/erts/emulator/beam/erl_node_tables.c
@@ -808,7 +808,6 @@ void erts_init_node_tables(int dd_sec)
references_atoms_need_init = 1;
}
-#ifdef ERTS_SMP
#ifdef ERTS_ENABLE_LOCK_CHECK
int erts_lc_is_de_rwlocked(DistEntry *dep)
{
@@ -819,7 +818,6 @@ int erts_lc_is_de_rlocked(DistEntry *dep)
return erts_smp_lc_rwmtx_is_rlocked(&dep->rwmtx);
}
#endif
-#endif
#ifdef ERTS_ENABLE_LOCK_COUNT
@@ -1292,13 +1290,11 @@ init_referred_dist(void *dist, void *unused)
no_referred_dists++;
}
-#ifdef ERTS_SMP
static void
insert_sys_msg(Eterm from, Eterm to, Eterm msg, ErlHeapFragment *bp)
{
insert_offheap(&bp->off_heap, HEAP_REF, to);
}
-#endif
static void
insert_delayed_delete_node(void *state,
@@ -1381,9 +1377,7 @@ setup_reference_table(void)
int mli;
ErtsMessage *msg_list[] = {
proc->msg.first,
-#ifdef ERTS_SMP
proc->msg_inq.first,
-#endif
proc->msg_frag};
/* Insert Heap */
@@ -1430,9 +1424,7 @@ setup_reference_table(void)
}
}
-#ifdef ERTS_SMP
erts_foreach_sys_msg_in_q(insert_sys_msg);
-#endif
/* Insert all ports */
max = erts_ptab_max(&erts_port);
diff --git a/erts/emulator/beam/erl_port.h b/erts/emulator/beam/erl_port.h
index 6a3213ec52..4c8e4197aa 100644
--- a/erts/emulator/beam/erl_port.h
+++ b/erts/emulator/beam/erl_port.h
@@ -131,9 +131,7 @@ typedef struct {
void *data[ERTS_PRTSD_SIZE];
} ErtsPrtSD;
-#ifdef ERTS_SMP
typedef struct ErtsXPortsList_ ErtsXPortsList;
-#endif
/*
* Port locking:
@@ -158,14 +156,9 @@ struct _erl_drv_port {
ErtsPortTaskSched sched;
ErtsPortTaskHandle timeout_task;
-#ifdef ERTS_SMP
erts_mtx_t *lock;
ErtsXPortsList *xports;
erts_smp_atomic_t run_queue;
-#else
- erts_atomic32_t refc;
- int cleanup;
-#endif
erts_atomic_t connected; /* A connected process */
Eterm caller; /* Current caller. */
erts_smp_atomic_t data; /* Data associated with port. */
@@ -221,7 +214,6 @@ ERTS_GLB_INLINE ErtsRunQueue *erts_port_runq(Port *prt);
ERTS_GLB_INLINE ErtsRunQueue *
erts_port_runq(Port *prt)
{
-#ifdef ERTS_SMP
ErtsRunQueue *rq1, *rq2;
rq1 = (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue);
if (!rq1)
@@ -236,9 +228,6 @@ erts_port_runq(Port *prt)
if (!rq1)
return NULL;
}
-#else
- return ERTS_RUNQ_IX(0);
-#endif
}
#endif
@@ -269,13 +258,11 @@ erts_prtsd_set(Port *prt, int ix, void *data)
psd = (ErtsPrtSD *) erts_smp_atomic_read_nob(&prt->psd);
if (psd) {
-#ifdef ERTS_SMP
#ifdef ETHR_ORDERED_READ_DEPEND
ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
#else
ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreStore);
#endif
-#endif
old = psd->data[ix];
psd->data[ix] = data;
return old;
@@ -371,13 +358,8 @@ Eterm erts_request_io_bytes(Process *c_p);
void print_port_info(Port *, fmtfn_t, void *);
void erts_port_free(Port *);
-#ifndef ERTS_SMP
-void erts_port_cleanup(Port *);
-#endif
void erts_fire_port_monitor(Port *prt, Eterm ref);
-#ifdef ERTS_SMP
int erts_port_handle_xports(Port *);
-#endif
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
int erts_lc_is_port_locked(Port *);
@@ -421,33 +403,25 @@ ERTS_GLB_INLINE Sint erts_port_read_refc(Port *prt)
ERTS_GLB_INLINE int
erts_smp_port_trylock(Port *prt)
{
-#ifdef ERTS_SMP
/* *Need* to be a managed thread */
ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
return erts_mtx_trylock(prt->lock);
-#else
- return 0;
-#endif
}
ERTS_GLB_INLINE void
erts_smp_port_lock(Port *prt)
{
-#ifdef ERTS_SMP
/* *Need* to be a managed thread */
ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
erts_mtx_lock(prt->lock);
-#endif
}
ERTS_GLB_INLINE void
erts_smp_port_unlock(Port *prt)
{
-#ifdef ERTS_SMP
/* *Need* to be a managed thread */
ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
erts_mtx_unlock(prt->lock);
-#endif
}
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
@@ -478,9 +452,7 @@ extern const Port erts_invalid_port;
int erts_is_port_ioq_empty(Port *);
void erts_terminate_port(Port *);
-#ifdef ERTS_SMP
Port *erts_de2port(DistEntry *, Process *, ErtsProcLocks);
-#endif
ERTS_GLB_INLINE Port *erts_pix2port(int);
ERTS_GLB_INLINE Port *erts_port_lookup_raw(Eterm);
@@ -488,11 +460,9 @@ ERTS_GLB_INLINE Port *erts_port_lookup(Eterm, Uint32);
ERTS_GLB_INLINE Port*erts_id2port(Eterm id);
ERTS_GLB_INLINE Port *erts_id2port_sflgs(Eterm, Process *, ErtsProcLocks, Uint32);
ERTS_GLB_INLINE void erts_port_release(Port *);
-#ifdef ERTS_SMP
ERTS_GLB_INLINE Port *erts_thr_port_lookup(Eterm id, Uint32 invalid_sflgs);
ERTS_GLB_INLINE Port *erts_thr_id2port_sflgs(Eterm id, Uint32 invalid_sflgs);
ERTS_GLB_INLINE void erts_thr_port_release(Port *prt);
-#endif
ERTS_GLB_INLINE Port *erts_thr_drvport2port(ErlDrvPort, int);
ERTS_GLB_INLINE Port *erts_drvport2port_state(ErlDrvPort, erts_aint32_t *);
ERTS_GLB_INLINE Eterm erts_drvport2id(ErlDrvPort);
@@ -574,9 +544,7 @@ erts_id2port_sflgs(Eterm id,
Process *c_p, ErtsProcLocks c_p_locks,
Uint32 invalid_sflgs)
{
-#ifdef ERTS_SMP
int no_proc_locks = !c_p || !c_p_locks;
-#endif
erts_aint32_t state;
Port *prt;
@@ -592,7 +560,6 @@ erts_id2port_sflgs(Eterm id,
if (!prt || prt->common.id != id)
return NULL;
-#ifdef ERTS_SMP
if (no_proc_locks)
erts_smp_port_lock(prt);
else if (erts_smp_port_trylock(prt) == EBUSY) {
@@ -601,12 +568,9 @@ erts_id2port_sflgs(Eterm id,
erts_smp_port_lock(prt);
erts_smp_proc_lock(c_p, c_p_locks);
}
-#endif
state = erts_atomic32_read_nob(&prt->state);
if (state & invalid_sflgs) {
-#ifdef ERTS_SMP
erts_smp_port_unlock(prt);
-#endif
return NULL;
}
@@ -618,17 +582,9 @@ erts_port_release(Port *prt)
{
/* Only allowed to be called from managed threads */
ERTS_SMP_LC_ASSERT(erts_thr_progress_is_managed_thread());
-#ifdef ERTS_SMP
erts_smp_port_unlock(prt);
-#else
- if (prt->cleanup) {
- prt->cleanup = 0;
- erts_port_cleanup(prt);
- }
-#endif
}
-#ifdef ERTS_SMP
/*
* erts_thr_id2port_sflgs() and erts_port_dec_refc(prt) can
* be used by unmanaged threads in the SMP case.
@@ -714,13 +670,10 @@ ERTS_GLB_INLINE void
erts_thr_port_release(Port *prt)
{
erts_mtx_unlock(prt->lock);
-#ifdef ERTS_SMP
if (!erts_thr_progress_is_managed_thread())
erts_port_dec_refc(prt);
-#endif
}
-#endif
ERTS_GLB_INLINE Port *
erts_thr_drvport2port(ErlDrvPort drvport, int lock_pdl)
@@ -832,13 +785,11 @@ erts_port_driver_callback_epilogue(Port *prt, erts_aint32_t *statep)
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
}
-#ifdef ERTS_SMP
if (prt->xports) {
reds += erts_port_handle_xports(prt);
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
ASSERT(!prt->xports);
}
-#endif
if (statep)
*statep = state;
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index 1ab1e47254..5f333ea8d8 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -43,11 +43,7 @@
*/
#define ERTS_PORT_CALLBACK_VREDS (CONTEXT_REDS/20)
-#if defined(DEBUG) && 0
-#define ERTS_HARD_DEBUG_TASK_QUEUES
-#else
#undef ERTS_HARD_DEBUG_TASK_QUEUES
-#endif
#ifdef ERTS_HARD_DEBUG_TASK_QUEUES
static void chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_queue);
@@ -126,9 +122,7 @@ struct ErtsPortTaskHandleList_ {
ErtsPortTaskHandle handle;
union {
ErtsPortTaskHandleList *next;
-#ifdef ERTS_SMP
ErtsThrPrgrLaterOp release;
-#endif
} u;
};
@@ -161,25 +155,19 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(busy_caller_table,
50,
ERTS_ALC_T_BUSY_CALLER_TAB)
-#ifdef ERTS_SMP
static void
call_port_task_free(void *vptp)
{
port_task_free((ErtsPortTask *) vptp);
}
-#endif
static ERTS_INLINE void
schedule_port_task_free(ErtsPortTask *ptp)
{
-#ifdef ERTS_SMP
erts_schedule_thr_prgr_later_cleanup_op(call_port_task_free,
(void *) ptp,
&ptp->u.release,
sizeof(ErtsPortTask));
-#else
- port_task_free(ptp);
-#endif
}
static ERTS_INLINE ErtsPortTask *
@@ -830,25 +818,19 @@ erl_drv_busy_msgq_limits(ErlDrvPort dport, ErlDrvSizeT *lowp, ErlDrvSizeT *highp
* No-suspend handles.
*/
-#ifdef ERTS_SMP
static void
free_port_task_handle_list(void *vpthlp)
{
erts_free(ERTS_ALC_T_PT_HNDL_LIST, vpthlp);
}
-#endif
static void
schedule_port_task_handle_list_free(ErtsPortTaskHandleList *pthlp)
{
-#ifdef ERTS_SMP
erts_schedule_thr_prgr_later_cleanup_op(free_port_task_handle_list,
(void *) pthlp,
&pthlp->u.release,
sizeof(ErtsPortTaskHandleList));
-#else
- erts_free(ERTS_ALC_T_PT_HNDL_LIST, pthlp);
-#endif
}
static ERTS_INLINE void
@@ -946,10 +928,8 @@ enqueue_port(ErtsRunQueue *runq, Port *pp)
erts_smp_inc_runq_len(runq, &runq->ports.info, ERTS_PORT_PRIO_LEVEL);
-#ifdef ERTS_SMP
if (ERTS_RUNQ_FLGS_GET_NOB(runq) & ERTS_RUNQ_FLG_HALTING)
erts_non_empty_runq(runq);
-#endif
}
static ERTS_INLINE Port *
@@ -1301,9 +1281,7 @@ erts_port_task_abort(ErtsPortTaskHandle *pthp)
{
int res;
ErtsPortTask *ptp;
-#ifdef ERTS_SMP
ErtsThrPrgrDelayHandle dhndl = erts_thr_progress_unmanaged_delay();
-#endif
ptp = handle2task(pthp);
if (!ptp)
@@ -1345,9 +1323,7 @@ erts_port_task_abort(ErtsPortTaskHandle *pthp)
}
}
-#ifdef ERTS_SMP
erts_thr_progress_unmanaged_continue(dhndl);
-#endif
return res;
}
@@ -1356,9 +1332,7 @@ void
erts_port_task_abort_nosuspend_tasks(Port *pp)
{
ErtsPortTaskHandleList *abort_list;
-#ifdef ERTS_SMP
ErtsThrPrgrDelayHandle dhndl = ERTS_THR_PRGR_DHANDLE_INVALID;
-#endif
erts_port_task_sched_lock(&pp->sched);
erts_smp_atomic32_read_band_nob(&pp->sched.flags,
@@ -1381,18 +1355,14 @@ erts_port_task_abort_nosuspend_tasks(Port *pp)
pthlp = abort_list;
abort_list = pthlp->u.next;
-#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
dhndl = erts_thr_progress_unmanaged_delay();
-#endif
pthp = &pthlp->handle;
ptp = handle2task(pthp);
if (!ptp) {
-#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
erts_thr_progress_unmanaged_continue(dhndl);
-#endif
schedule_port_task_handle_list_free(pthlp);
continue;
}
@@ -1411,10 +1381,8 @@ erts_port_task_abort_nosuspend_tasks(Port *pp)
ERTS_PT_STATE_SCHEDULED);
if (old_state != ERTS_PT_STATE_SCHEDULED) {
/* Task already aborted, executing, or executed */
-#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
erts_thr_progress_unmanaged_continue(dhndl);
-#endif
schedule_port_task_handle_list_free(pthlp);
continue;
}
@@ -1424,10 +1392,8 @@ erts_port_task_abort_nosuspend_tasks(Port *pp)
type = ptp->type;
td = ptp->u.alive.td;
-#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
erts_thr_progress_unmanaged_continue(dhndl);
-#endif
schedule_port_task_handle_list_free(pthlp);
abort_nosuspend_task(pp, type, &td, pp->sched.taskq.bpq != NULL);
@@ -1446,10 +1412,8 @@ erts_port_task_schedule(Eterm id,
{
ErtsProc2PortSigData *sigdp = NULL;
ErtsPortTaskHandleList *ns_pthlp = NULL;
-#ifdef ERTS_SMP
ErtsRunQueue *xrunq;
ErtsThrPrgrDelayHandle dhndl;
-#endif
ErtsRunQueue *runq;
Port *pp;
ErtsPortTask *ptp = NULL;
@@ -1460,19 +1424,15 @@ erts_port_task_schedule(Eterm id,
ASSERT(is_internal_port(id));
-#ifdef ERTS_SMP
dhndl = erts_thr_progress_unmanaged_delay();
-#endif
pp = erts_port_lookup_raw(id);
-#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) {
if (pp)
erts_port_inc_refc(pp);
erts_thr_progress_unmanaged_continue(dhndl);
}
-#endif
if (!pp)
goto fail;
@@ -1581,7 +1541,6 @@ erts_port_task_schedule(Eterm id,
if (!runq)
ERTS_INTERNAL_ERROR("Missing run-queue");
-#ifdef ERTS_SMP
xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
ERTS_SMP_LC_ASSERT(runq != xrunq);
ERTS_SMP_LC_VERIFY_RQ(runq, pp);
@@ -1593,7 +1552,6 @@ erts_port_task_schedule(Eterm id,
if (!runq)
ERTS_INTERNAL_ERROR("Missing run-queue");
}
-#endif
enqueue_port(runq, pp);
@@ -1606,19 +1564,15 @@ done:
if (prof_runnable_ports)
erts_port_task_sched_unlock(&pp->sched);
-#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
erts_port_dec_refc(pp);
-#endif
return 0;
abort_nosuspend:
-#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
erts_port_dec_refc(pp);
-#endif
abort_nosuspend_task(pp, ptp->type, &ptp->u.alive.td, 0);
@@ -1632,10 +1586,8 @@ abort_nosuspend:
fail:
-#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
erts_port_dec_refc(pp);
-#endif
if (ptp) {
abort_signal_task(pp, ERTS_PROC2PORT_SIG_ABORT,
@@ -1892,9 +1844,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
-1*io_tasks_executed);
}
-#ifdef ERTS_SMP
ASSERT(runq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue));
-#endif
active = finalize_exec(pp, &execq, processing_busy_q);
@@ -1907,21 +1857,16 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
erts_smp_runq_lock(runq);
if (active) {
-#ifdef ERTS_SMP
ErtsRunQueue *xrunq;
-#endif
ASSERT(!(erts_atomic32_read_nob(&pp->state) & ERTS_PORT_SFLGS_DEAD));
-#ifdef ERTS_SMP
xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
ERTS_SMP_LC_ASSERT(runq != xrunq);
ERTS_SMP_LC_VERIFY_RQ(runq, pp);
if (!xrunq) {
-#endif
enqueue_port(runq, pp);
/* No need to notify ourselves about inc in runq. */
-#ifdef ERTS_SMP
}
else {
/* Emigrate port... */
@@ -1936,7 +1881,6 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
erts_smp_runq_lock(runq);
}
-#endif
}
done:
@@ -1951,7 +1895,6 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
return res;
}
-#ifdef ERTS_SMP
static void
release_port(void *vport)
{
@@ -1967,7 +1910,6 @@ schedule_release_port(void *vport) {
&pp->common.u.release);
}
-#endif
static void
begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
@@ -2161,7 +2103,6 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
/*
* Schedule cleanup of port structure...
*/
-#ifdef ERTS_SMP
/* We might not be a scheduler, eg. traceing to port we are sys_msg_dispatcher */
if (!erts_get_scheduler_data()) {
erts_schedule_misc_aux_work(1, schedule_release_port, (void*)pp);
@@ -2171,12 +2112,8 @@ begin_port_cleanup(Port *pp, ErtsPortTask **execqp, int *processing_busy_q_p)
(void *) pp,
&pp->common.u.release);
}
-#else
- pp->cleanup = 1;
-#endif
}
-#ifdef ERTS_SMP
void
erts_enqueue_port(ErtsRunQueue *rq, Port *pp)
@@ -2200,7 +2137,6 @@ erts_dequeue_port(ErtsRunQueue *rq)
return pp;
}
-#endif
/*
* Initialize the module.
diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h
index 39f403b443..14db641bb6 100644
--- a/erts/emulator/beam/erl_port_task.h
+++ b/erts/emulator/beam/erl_port_task.h
@@ -125,9 +125,7 @@ typedef struct {
ErtsPortTaskBusyPortQ *bpq;
} taskq;
erts_smp_atomic32_t flags;
-#ifdef ERTS_SMP
erts_mtx_t mtx;
-#endif
} ErtsPortTaskSched;
ERTS_GLB_INLINE void erts_port_task_handle_init(ErtsPortTaskHandle *pthp);
@@ -175,9 +173,7 @@ ERTS_GLB_INLINE void erts_port_task_pre_init_sched(ErtsPortTaskSched *ptsp,
ERTS_GLB_INLINE void
erts_port_task_init_sched(ErtsPortTaskSched *ptsp, Eterm instr_id)
{
-#ifdef ERTS_SMP
char *lock_str = "port_sched_lock";
-#endif
ptsp->next = NULL;
ptsp->taskq.local.busy.first = NULL;
ptsp->taskq.local.busy.last = NULL;
@@ -187,25 +183,19 @@ erts_port_task_init_sched(ErtsPortTaskSched *ptsp, Eterm instr_id)
ptsp->taskq.in.first = NULL;
ptsp->taskq.in.last = NULL;
erts_smp_atomic32_init_nob(&ptsp->flags, 0);
-#ifdef ERTS_SMP
erts_mtx_init(&ptsp->mtx, lock_str, instr_id, ERTS_LOCK_FLAGS_CATEGORY_IO);
-#endif
}
ERTS_GLB_INLINE void
erts_port_task_sched_lock(ErtsPortTaskSched *ptsp)
{
-#ifdef ERTS_SMP
erts_mtx_lock(&ptsp->mtx);
-#endif
}
ERTS_GLB_INLINE void
erts_port_task_sched_unlock(ErtsPortTaskSched *ptsp)
{
-#ifdef ERTS_SMP
erts_mtx_unlock(&ptsp->mtx);
-#endif
}
ERTS_GLB_INLINE int
@@ -222,9 +212,7 @@ erts_port_task_sched_lock_is_locked(ErtsPortTaskSched *ptsp)
ERTS_GLB_INLINE void
erts_port_task_fini_sched(ErtsPortTaskSched *ptsp)
{
-#ifdef ERTS_SMP
erts_mtx_destroy(&ptsp->mtx);
-#endif
}
ERTS_GLB_INLINE void
@@ -265,10 +253,8 @@ ErtsProc2PortSigData *erts_port_task_alloc_p2p_sig_data(void);
ErtsProc2PortSigData *erts_port_task_alloc_p2p_sig_data_extra(size_t extra, void **extra_ptr);
void erts_port_task_free_p2p_sig_data(ErtsProc2PortSigData *sigdp);
-#ifdef ERTS_SMP
void erts_enqueue_port(ErtsRunQueue *rq, Port *pp);
Port *erts_dequeue_port(ErtsRunQueue *rq);
-#endif
#undef ERTS_INCLUDE_SCHEDULER_INTERNALS
#endif /* ERL_PORT_TASK_H__ */
#endif /* ERTS_PORT_TASK_ONLY_BASIC_TYPES__ */
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index d58203e9e1..ba6d56269d 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -82,11 +82,7 @@
#define ERTS_FAKE_SCHED_BIND_PRINT_SORTED_CPU_DATA
#endif
-#if defined(DEBUG) && 0
-#define HARDDEBUG
-#else
#undef HARDDEBUG
-#endif
#ifdef HARDDEBUG
#define HARDDEBUG_RUNQS
@@ -127,13 +123,11 @@ runq_got_work_to_execute_flags(Uint32 flags)
return !ERTS_IS_RUNQ_EMPTY_FLGS(flags);
}
-#ifdef ERTS_SMP
static ERTS_INLINE int
runq_got_work_to_execute(ErtsRunQueue *rq)
{
return runq_got_work_to_execute_flags(ERTS_RUNQ_FLGS_GET_NOB(rq));
}
-#endif
#undef RUNQ_READ_RQ
#undef RUNQ_SET_RQ
@@ -208,7 +202,6 @@ static struct {
int sys_schedule;
} sched_busy_wait;
-#ifdef ERTS_SMP
int erts_disable_proc_not_running_opt;
static ErtsAuxWorkData *aux_thread_aux_work_data;
@@ -384,26 +377,19 @@ do { \
balance_info.prev_rise.reds = (REDS); \
} while (0)
-#endif
erts_sched_stat_t erts_sched_stat;
-#ifdef USE_THREADS
static erts_tsd_key_t ERTS_WRITE_UNLIKELY(sched_data_key);
-#endif
static erts_smp_atomic32_t function_calls;
-#ifdef ERTS_SMP
static erts_smp_atomic32_t doing_sys_schedule;
static erts_smp_atomic32_t no_empty_run_queues;
long erts_runq_supervision_interval = 0;
static ethr_event runq_supervision_event;
static erts_tid_t runq_supervisor_tid;
static erts_atomic_t runq_supervisor_sleeping;
-#else /* !ERTS_SMP */
-ErtsSchedulerData *erts_scheduler_data;
-#endif
ErtsAlignedRunQueue * ERTS_WRITE_UNLIKELY(erts_aligned_run_queues);
Uint ERTS_WRITE_UNLIKELY(erts_no_run_queues);
@@ -471,11 +457,9 @@ typedef union {
static ErtsAlignedSchedulerSleepInfo *aligned_sched_sleep_info;
#ifdef ERTS_DIRTY_SCHEDULERS
-#ifdef ERTS_SMP
static ErtsAlignedSchedulerSleepInfo *aligned_dirty_cpu_sched_sleep_info;
static ErtsAlignedSchedulerSleepInfo *aligned_dirty_io_sched_sleep_info;
#endif
-#endif
static Uint last_reductions;
static Uint last_exact_reductions;
@@ -638,11 +622,8 @@ dbg_chk_aux_work_val(erts_aint32_t value)
valid |= ERTS_SSI_AUX_WORK_MISC;
valid |= ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM;
valid |= ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC;
-#if ERTS_USE_ASYNC_READY_Q
valid |= ERTS_SSI_AUX_WORK_ASYNC_READY;
valid |= ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN;
-#endif
-#ifdef ERTS_SMP
valid |= ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP;
valid |= ERTS_SSI_AUX_WORK_MISC_THR_PRGR;
valid |= ERTS_SSI_AUX_WORK_DD;
@@ -651,7 +632,6 @@ dbg_chk_aux_work_val(erts_aint32_t value)
valid |= ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR;
valid |= ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP;
valid |= ERTS_SSI_AUX_WORK_PENDING_EXITERS;
-#endif
#if HAVE_ERTS_MSEG
valid |= ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK;
#endif
@@ -673,10 +653,8 @@ dbg_chk_aux_work_val(erts_aint32_t value)
#define ERTS_DBG_CHK_SSI_AUX_WORK(SSI)
#endif
-#ifdef ERTS_SMP
static void do_handle_pending_exiters(ErtsProcList *);
static void wake_scheduler(ErtsRunQueue *rq);
-#endif
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
int
@@ -714,9 +692,7 @@ erts_step_proc_interval(void)
void
erts_pre_init_process(void)
{
-#ifdef USE_THREADS
erts_tsd_key_create(&sched_data_key, "erts_sched_data_key");
-#endif
erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP_IX]
= "DELAYED_AW_WAKEUP";
@@ -810,10 +786,8 @@ void
erts_init_process(int ncpu, int proc_tab_size, int legacy_proc_tab)
{
-#ifdef ERTS_SMP
erts_disable_proc_not_running_opt = 0;
erts_init_proc_lock(ncpu);
-#endif
init_proclist_alloc();
@@ -825,11 +799,7 @@ erts_init_process(int ncpu, int proc_tab_size, int legacy_proc_tab)
sizeof(Process),
"process_table",
legacy_proc_tab,
-#ifdef ERTS_SMP
1
-#else
- 0
-#endif
);
last_reductions = 0;
@@ -1145,7 +1115,6 @@ read_dirty_sched_wall_time(ErtsSchedulerData *esdp, ErtsDirtySchedWallTime *info
#endif
-#ifdef ERTS_SMP
static void
dirty_sched_wall_time_change(ErtsSchedulerData *esdp, int working)
@@ -1202,7 +1171,6 @@ dirty_sched_wall_time_change(ErtsSchedulerData *esdp, int working)
#endif
}
-#endif /* ERTS_SMP */
static void
sched_wall_time_change(ErtsSchedulerData *esdp, int working)
@@ -1481,13 +1449,11 @@ erts_sched_wall_time_request(Process *c_p, int set, int enable,
erts_proc_add_refc(c_p, (Sint32) erts_no_schedulers);
-#ifdef ERTS_SMP
if (erts_no_schedulers > 1)
erts_schedule_multi_misc_aux_work(1,
erts_no_schedulers,
reply_sched_wall_time,
(void *) swtrp);
-#endif
reply_sched_wall_time((void *) swtrp);
@@ -1547,13 +1513,11 @@ Eterm erts_system_check_request(Process *c_p) {
erts_proc_add_refc(c_p, (Sint) erts_no_schedulers);
-#ifdef ERTS_SMP
if (erts_no_schedulers > 1)
erts_schedule_multi_misc_aux_work(1,
erts_no_schedulers,
reply_system_check,
(void *) scrp);
-#endif
reply_system_check((void *) scrp);
@@ -1620,7 +1584,6 @@ erts_psd_set_init(Process *p, int ix, void *data)
return old;
}
-#ifdef ERTS_SMP
void
erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flags)
@@ -1648,7 +1611,6 @@ erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flags)
}
}
-#endif
static ERTS_INLINE void
set_aux_work_flags_wakeup_nob(ErtsSchedulerSleepInfo *ssi,
@@ -1664,11 +1626,7 @@ set_aux_work_flags_wakeup_nob(ErtsSchedulerSleepInfo *ssi,
old_flgs = erts_atomic32_read_bor_nob(&ssi->aux_work, flgs);
if ((old_flgs & flgs) != flgs) {
-#ifdef ERTS_SMP
erts_sched_poke(ssi);
-#else
- erts_sys_schedule_interrupt(1);
-#endif
}
}
}
@@ -1684,11 +1642,7 @@ set_aux_work_flags_wakeup_relb(ErtsSchedulerSleepInfo *ssi,
old_flgs = erts_atomic32_read_bor_relb(&ssi->aux_work, flgs);
if ((old_flgs & flgs) != flgs) {
-#ifdef ERTS_SMP
erts_sched_poke(ssi);
-#else
- erts_sys_schedule_interrupt(1);
-#endif
}
}
@@ -1704,7 +1658,6 @@ unset_aux_work_flags(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flgs)
return erts_atomic32_read_band_nob(&ssi->aux_work, ~flgs);
}
-#ifdef ERTS_SMP
static ERTS_INLINE void
haw_chk_later_cleanup_op_wakeup(ErtsAuxWorkData *awdp, ErtsThrPrgrVal val)
@@ -1851,7 +1804,6 @@ schedule_aux_work_wakeup(ErtsAuxWorkData *awdp,
}
}
-#endif
typedef struct erts_misc_aux_work_t_ erts_misc_aux_work_t;
struct erts_misc_aux_work_t_ {
@@ -1892,11 +1844,7 @@ init_misc_aux_work(void)
sizeof(erts_algnd_misc_aux_work_q_t)
* (erts_no_schedulers+1));
-#ifdef ERTS_SMP
ix = 0; /* aux_thread + schedulers */
-#else
- ix = 1; /* scheduler only */
-#endif
for (; ix <= erts_no_schedulers; ix++) {
qinit.arg = (void *) ERTS_SCHED_SLEEP_INFO_IX(ix-1);
@@ -1914,10 +1862,8 @@ misc_aux_work_clean(ErtsThrQ_t *q,
set_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MISC);
return aux_work | ERTS_SSI_AUX_WORK_MISC;
case ERTS_THR_Q_NEED_THR_PRGR:
-#ifdef ERTS_SMP
set_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_MISC_THR_PRGR);
haw_thr_prgr_soft_wakeup(awdp, erts_thr_q_need_thr_progress(q));
-#endif
case ERTS_THR_Q_CLEAN:
break;
}
@@ -1943,7 +1889,6 @@ handle_misc_aux_work(ErtsAuxWorkData *awdp,
return misc_aux_work_clean(q, awdp, aux_work & ~ERTS_SSI_AUX_WORK_MISC);
}
-#ifdef ERTS_SMP
static ERTS_INLINE erts_aint32_t
handle_misc_aux_work_thr_prgr(ErtsAuxWorkData *awdp,
@@ -1963,7 +1908,6 @@ handle_misc_aux_work_thr_prgr(ErtsAuxWorkData *awdp,
aux_work & ~ERTS_SSI_AUX_WORK_MISC_THR_PRGR);
}
-#endif
static ERTS_INLINE void
schedule_misc_aux_work(int sched_id,
@@ -1973,11 +1917,7 @@ schedule_misc_aux_work(int sched_id,
ErtsThrQ_t *q;
erts_misc_aux_work_t *mawp;
-#ifdef ERTS_SMP
ASSERT(0 <= sched_id && sched_id <= erts_no_schedulers);
-#else
- ASSERT(sched_id == 1);
-#endif
q = &misc_aux_work_queues[sched_id].q;
mawp = misc_aux_work_alloc();
@@ -2021,7 +1961,6 @@ erts_schedule_multi_misc_aux_work(int ignore_self,
}
}
-#if ERTS_USE_ASYNC_READY_Q
void
erts_notify_check_async_ready_queue(void *vno)
@@ -2049,9 +1988,7 @@ handle_async_ready(ErtsAuxWorkData *awdp,
}
return aux_work;
}
-#ifdef ERTS_SMP
awdp->async_ready.need_thr_prgr = 0;
-#endif
set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN);
return ((aux_work & ~ERTS_SSI_AUX_WORK_ASYNC_READY)
| ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN);
@@ -2066,7 +2003,6 @@ handle_async_ready_clean(ErtsAuxWorkData *awdp,
ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
-#ifdef ERTS_SMP
if (awdp->async_ready.need_thr_prgr
&& !erts_thr_progress_has_reached_this(haw_thr_prgr_current(awdp),
awdp->async_ready.thr_prgr)) {
@@ -2075,26 +2011,20 @@ handle_async_ready_clean(ErtsAuxWorkData *awdp,
awdp->async_ready.need_thr_prgr = 0;
thr_prgr_p = (void *) &awdp->async_ready.thr_prgr;
-#else
- thr_prgr_p = NULL;
-#endif
switch (erts_async_ready_clean(awdp->async_ready.queue, thr_prgr_p)) {
case ERTS_ASYNC_READY_CLEAN:
unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN);
return aux_work & ~ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN;
-#ifdef ERTS_SMP
case ERTS_ASYNC_READY_NEED_THR_PRGR:
haw_thr_prgr_soft_wakeup(awdp, awdp->async_ready.thr_prgr);
awdp->async_ready.need_thr_prgr = 1;
return aux_work & ~ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN;
-#endif
default:
return aux_work;
}
}
-#endif /* ERTS_USE_ASYNC_READY_Q */
static ERTS_INLINE erts_aint32_t
@@ -2118,7 +2048,6 @@ handle_fix_alloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
return aux_work;
}
-#ifdef ERTS_SMP
void
erts_alloc_notify_delayed_dealloc(int ix)
@@ -2375,20 +2304,15 @@ enqueue_later_op(ErtsSchedulerData *esdp,
return later;
}
-#endif /* ERTS_SMP */
void
erts_schedule_thr_prgr_later_op(void (*later_func)(void *),
void *later_data,
ErtsThrPrgrLaterOp *lop)
{
-#ifndef ERTS_SMP
- later_func(later_data);
-#else
ErtsSchedulerData *esdp = erts_get_scheduler_data();
ErtsThrPrgrVal later = enqueue_later_op(esdp, later_func, later_data, lop);
haw_thr_prgr_wakeup(&esdp->aux_work_data, later);
-#endif
}
void
@@ -2397,13 +2321,9 @@ erts_schedule_thr_prgr_later_cleanup_op(void (*later_func)(void *),
ErtsThrPrgrLaterOp *lop,
UWord size)
{
-#ifndef ERTS_SMP
- later_func(later_data);
-#else
ErtsSchedulerData *esdp = erts_get_scheduler_data();
ErtsThrPrgrVal later = enqueue_later_op(esdp, later_func, later_data, lop);
haw_thr_prgr_later_cleanup_op_wakeup(&esdp->aux_work_data, later, size);
-#endif
}
static ERTS_INLINE erts_aint32_t
@@ -2453,11 +2373,7 @@ setup_thr_debug_wait_completed(void *vproc)
ErtsSchedulerData *esdp = erts_get_scheduler_data();
ErtsAuxWorkData *awdp;
erts_aint32_t wait_flags, aux_work_flags;
-#ifdef ERTS_SMP
awdp = esdp ? &esdp->aux_work_data : aux_thread_aux_work_data;
-#else
- awdp = &esdp->aux_work_data;
-#endif
wait_flags = 0;
aux_work_flags = ERTS_SSI_AUX_WORK_DEBUG_WAIT_COMPLETED;
@@ -2466,18 +2382,14 @@ setup_thr_debug_wait_completed(void *vproc)
erts_alloc_fix_alloc_shrink(awdp->sched_id, 0);
wait_flags |= (ERTS_SSI_AUX_WORK_DD
| ERTS_SSI_AUX_WORK_DD_THR_PRGR);
-#ifdef ERTS_SMP
aux_work_flags |= ERTS_SSI_AUX_WORK_DD;
-#endif
}
if (debug_wait_completed_flags & ERTS_DEBUG_WAIT_COMPLETED_TIMER_CANCELLATIONS) {
wait_flags |= (ERTS_SSI_AUX_WORK_CNCLD_TMRS
| ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR);
-#ifdef ERTS_SMP
if (awdp->esdp && !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp))
aux_work_flags |= ERTS_SSI_AUX_WORK_CNCLD_TMRS;
-#endif
}
set_aux_work_flags_wakeup_nob(awdp->ssi, aux_work_flags);
@@ -2496,21 +2408,17 @@ static void later_thr_debug_wait_completed(void *vlop)
{
struct debug_lop *lop = vlop;
erts_aint32_t count = (erts_aint32_t) erts_no_schedulers;
-#ifdef ERTS_SMP
count += 1; /* aux thread */
-#endif
if (erts_atomic32_dec_read_mb(&debug_wait_completed_count) == count) {
/* scheduler threads */
erts_schedule_multi_misc_aux_work(0,
erts_no_schedulers,
setup_thr_debug_wait_completed,
lop->proc);
-#ifdef ERTS_SMP
/* aux_thread */
erts_schedule_misc_aux_work(0,
setup_thr_debug_wait_completed,
lop->proc);
-#endif
}
erts_free(ERTS_ALC_T_DEBUG, lop);
}
@@ -2531,9 +2439,7 @@ erts_debug_wait_completed(Process *c_p, int flags)
{
/* Only one process at a time can do this */
erts_aint32_t count = (erts_aint32_t) (2*erts_no_schedulers);
-#ifdef ERTS_SMP
count += 1; /* aux thread */
-#endif
if (0 == erts_atomic32_cmpxchg_mb(&debug_wait_completed_count,
count,
0)) {
@@ -2661,7 +2567,6 @@ handle_mseg_cache_check(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiti
#endif
-#ifdef ERTS_SMP
static ERTS_INLINE erts_aint32_t
handle_pending_exiters(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
@@ -2683,7 +2588,6 @@ handle_pending_exiters(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waitin
return aux_work & ~ERTS_SSI_AUX_WORK_PENDING_EXITERS;
}
-#endif
static ERTS_INLINE erts_aint32_t
handle_setup_aux_work_timer(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
@@ -2715,9 +2619,7 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
ERTS_MSACC_PUSH_AND_SET_STATE_M(ERTS_MSACC_STATE_AUX);
ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
-#ifdef ERTS_SMP
haw_thr_prgr_current_reset(awdp);
-#endif
ERTS_DBG_CHK_AUX_WORK_VAL(aux_work);
ASSERT(aux_work);
@@ -2736,7 +2638,6 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
* Keep ERTS_SSI_AUX_WORK flags in expected frequency order relative
* eachother. Most frequent first.
*/
-#ifdef ERTS_SMP
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_DELAYED_AW_WAKEUP,
handle_delayed_aux_work_wakeup);
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_DD,
@@ -2744,13 +2645,11 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
/* DD must be before DD_THR_PRGR */
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_DD_THR_PRGR,
handle_delayed_dealloc_thr_prgr);
-#endif
HANDLE_AUX_WORK((ERTS_SSI_AUX_WORK_FIX_ALLOC_LOWER_LIM
| ERTS_SSI_AUX_WORK_FIX_ALLOC_DEALLOC),
handle_fix_alloc);
-#ifdef ERTS_SMP
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP,
handle_thr_prgr_later_op);
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_CNCLD_TMRS,
@@ -2758,28 +2657,21 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
/* CNCLD_TMRS must be before CNCLD_TMRS_THR_PRGR */
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR,
handle_canceled_timers_thr_prgr);
-#endif
-#if ERTS_USE_ASYNC_READY_Q
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_ASYNC_READY,
handle_async_ready);
/* ASYNC_READY must be before ASYNC_READY_CLEAN */
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN,
handle_async_ready_clean);
-#endif
-#ifdef ERTS_SMP
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_MISC_THR_PRGR,
handle_misc_aux_work_thr_prgr);
-#endif
/* MISC_THR_PRGR must be before MISC */
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_MISC,
handle_misc_aux_work);
-#ifdef ERTS_SMP
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_PENDING_EXITERS,
handle_pending_exiters);
-#endif
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_SET_TMO,
handle_setup_aux_work_timer);
@@ -2805,10 +2697,8 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
ERTS_DBG_CHK_AUX_WORK_VAL(aux_work);
-#ifdef ERTS_SMP
if (waiting && !aux_work)
haw_thr_prgr_current_check_progress(awdp);
-#endif
ERTS_MSACC_UPDATE_CACHE();
ERTS_MSACC_POP_STATE_M();
@@ -2907,11 +2797,7 @@ aux_work_timeout(void *vesdp)
ASSERT(esdp == (ErtsSchedulerData *) vesdp);
#endif
-#ifdef ERTS_SMP
i = 0;
-#else
- i = 1;
-#endif
for (; i <= erts_no_schedulers; i++) {
erts_aint32_t type;
@@ -2945,9 +2831,6 @@ erts_set_aux_work_timeout(int ix, erts_aint32_t type, int enable)
{
erts_aint32_t old, refc;
-#ifndef ERTS_SMP
- ix = 1;
-#endif
ERTS_DBG_CHK_AUX_WORK_VAL(type);
ERTS_DBG_CHK_AUX_WORK_VAL(erts_atomic32_read_nob(&aux_work_tmo->type[ix]));
@@ -3014,7 +2897,6 @@ erts_active_schedulers(void)
return as;
}
-#ifdef ERTS_SMP
static ERTS_INLINE void
clear_sys_scheduling(void)
@@ -3028,20 +2910,14 @@ try_set_sys_scheduling(void)
return 0 == erts_smp_atomic32_cmpxchg_acqb(&doing_sys_schedule, 1, 0);
}
-#endif
static ERTS_INLINE int
prepare_for_sys_schedule(int non_blocking)
{
if (non_blocking && erts_eager_check_io) {
-#ifdef ERTS_SMP
return try_set_sys_scheduling();
-#else
- return 1;
-#endif
}
else {
-#ifdef ERTS_SMP
while (!erts_port_task_have_outstanding_io_tasks()
&& try_set_sys_scheduling()) {
if (!erts_port_task_have_outstanding_io_tasks())
@@ -3049,13 +2925,9 @@ prepare_for_sys_schedule(int non_blocking)
clear_sys_scheduling();
}
return 0;
-#else
- return !erts_port_task_have_outstanding_io_tasks();
-#endif
}
}
-#ifdef ERTS_SMP
static ERTS_INLINE void
sched_change_waiting_sys_to_waiting(Uint no, ErtsRunQueue *rq)
@@ -3381,7 +3253,6 @@ aux_thread(void *unused)
static void suspend_scheduler(ErtsSchedulerData *esdp);
-#endif /* ERTS_SMP */
static void
scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
@@ -3390,12 +3261,9 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
ErtsSchedulerSleepInfo *ssi = esdp->ssi;
int spincount;
erts_aint32_t aux_work = 0;
-#ifdef ERTS_SMP
int thr_prgr_active = 1;
erts_aint32_t flgs;
-#endif
ERTS_MSACC_PUSH_STATE_M();
-#ifdef ERTS_SMP
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
@@ -3566,7 +3434,6 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
}
else
-#endif
{
erts_smp_atomic32_set_relb(&function_calls, 0);
@@ -3608,30 +3475,19 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
}
sys_aux_work:
-#ifndef ERTS_SMP
- erts_sys_schedule_interrupt(0);
-#endif
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp)) {
if (!working)
sched_wall_time_change(esdp, working = 1);
-#ifdef ERTS_SMP
if (!thr_prgr_active)
erts_thr_progress_active(esdp, thr_prgr_active = 1);
-#endif
aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1);
ERTS_MSACC_UPDATE_CACHE();
-#ifdef ERTS_SMP
if (aux_work && erts_thr_progress_update(esdp))
erts_thr_progress_leader_update(esdp);
-#endif
}
-#ifndef ERTS_SMP
- if (erts_smp_atomic32_read_dirty(&rq->len) != 0 || rq->misc.start)
- goto sys_woken;
-#else
flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
if (!(flgs & ERTS_SSI_FLG_WAITING)) {
ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
@@ -3653,12 +3509,10 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
goto tse_wait;
}
}
-#endif
}
erts_smp_runq_lock(rq);
-#ifdef ERTS_SMP
/*
* If we got new I/O tasks we aren't allowed to
* sleep in erl_sys_schedule().
@@ -3681,12 +3535,10 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
goto tse_wait;
}
}
-#endif
if (aux_work) {
erts_smp_runq_unlock(rq);
goto sys_poll_aux_work;
}
-#ifdef ERTS_SMP
flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_POLL_SLEEPING);
if (!(flgs & ERTS_SSI_FLG_SLEEPING)) {
if (!(flgs & ERTS_SSI_FLG_WAITING)) {
@@ -3705,17 +3557,14 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
ASSERT(flgs & ERTS_SSI_FLG_WAITING);
-#endif
erts_smp_runq_unlock(rq);
if (working)
sched_wall_time_change(esdp, working = 0);
-#ifdef ERTS_SMP
if (thr_prgr_active)
erts_thr_progress_active(esdp, thr_prgr_active = 0);
-#endif
ASSERT(!erts_port_task_have_outstanding_io_tasks());
@@ -3732,11 +3581,6 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
erts_bump_timers(esdp->timer_wheel, current_time);
}
-#ifndef ERTS_SMP
- if (erts_smp_atomic32_read_dirty(&rq->len) == 0 && !rq->misc.start)
- goto sys_aux_work;
- sys_woken:
-#else
flgs = sched_prep_cont_spin_wait(ssi);
if (flgs & ERTS_SSI_FLG_WAITING)
goto sys_aux_work;
@@ -3756,7 +3600,6 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
erts_smp_atomic32_read_band_nob(&ssi->flags,
(ERTS_SSI_FLG_SUSPENDED
| ERTS_SSI_FLG_MSB_EXEC));
-#endif
if (!working)
sched_wall_time_change(esdp, working = 1);
sched_active_sys(esdp->no, rq);
@@ -3768,7 +3611,6 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
}
-#ifdef ERTS_SMP
static ERTS_INLINE erts_aint32_t
ssi_flags_set_wake(ErtsSchedulerSleepInfo *ssi)
@@ -4025,12 +3867,10 @@ wake_scheduler_on_empty_runq(ErtsRunQueue *crq)
}
}
-#endif /* ERTS_SMP */
static ERTS_INLINE void
smp_notify_inc_runq(ErtsRunQueue *runq)
{
-#ifdef ERTS_SMP
if (runq) {
#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix))
@@ -4039,7 +3879,6 @@ smp_notify_inc_runq(ErtsRunQueue *runq)
#endif
wake_scheduler(runq);
}
-#endif
}
void
@@ -4051,16 +3890,12 @@ erts_smp_notify_inc_runq(ErtsRunQueue *runq)
void
erts_sched_notify_check_cpu_bind(void)
{
-#ifdef ERTS_SMP
int ix;
for (ix = 0; ix < erts_no_run_queues; ix++) {
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
(void) ERTS_RUNQ_FLGS_SET(rq, ERTS_RUNQ_FLG_CHK_CPU_BIND);
wake_scheduler(rq);
}
-#else
- erts_sched_check_cpu_bind(erts_get_scheduler_data());
-#endif
}
@@ -4174,7 +4009,6 @@ free_proxy_proc(Process *proxy)
erts_free(ERTS_ALC_T_PROC, proxy);
}
-#ifdef ERTS_SMP
static ErtsRunQueue *
check_immigration_need(ErtsRunQueue *c_rq, ErtsMigrationPath *mp, int prio)
@@ -5615,20 +5449,15 @@ change_no_used_runqs(int used)
}
-#endif /* #ifdef ERTS_SMP */
Uint
erts_debug_nbalance(void)
{
-#ifdef ERTS_SMP
Uint n;
erts_smp_mtx_lock(&balance_info.update_mtx);
n = balance_info.n;
erts_smp_mtx_unlock(&balance_info.update_mtx);
return n;
-#else
- return 0;
-#endif
}
/* Wakeup other schedulers */
@@ -5674,7 +5503,6 @@ typedef enum {
#define ERTS_WAKEUP_OTHER_DEC_LEGACY 10
#define ERTS_WAKEUP_OTHER_FIXED_INC_LEGACY (CONTEXT_REDS/10)
-#ifdef ERTS_SMP
static struct {
ErtsSchedWakeupOtherThreshold threshold;
@@ -5868,16 +5696,13 @@ runq_supervisor(void *unused)
return NULL;
}
-#endif
void
erts_early_init_scheduling(int no_schedulers)
{
aux_work_timeout_early_init(no_schedulers);
-#ifdef ERTS_SMP
wakeup_other.threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM;
wakeup_other.type = ERTS_SCHED_WAKEUP_OTHER_TYPE_DEFAULT;
-#endif
sched_busy_wait.sys_schedule = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM;
sched_busy_wait.tse = (ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM
* ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT);
@@ -5888,7 +5713,6 @@ erts_early_init_scheduling(int no_schedulers)
int
erts_sched_set_wakeup_other_thresold(char *str)
{
-#ifdef ERTS_SMP
ErtsSchedWakeupOtherThreshold threshold;
if (sys_strcmp(str, "very_high") == 0)
threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_VERY_HIGH;
@@ -5905,20 +5729,11 @@ erts_sched_set_wakeup_other_thresold(char *str)
wakeup_other.threshold = threshold;
set_wakeup_other_data();
return 0;
-#else
- if (sys_strcmp(str, "very_high") == 0 || sys_strcmp(str, "high") == 0 ||
- sys_strcmp(str, "medium") == 0 || sys_strcmp(str, "low") == 0 ||
- sys_strcmp(str, "very_low") == 0) {
- return 0;
- }
- return EINVAL;
-#endif
}
int
erts_sched_set_wakeup_other_type(char *str)
{
-#ifdef ERTS_SMP
ErtsSchedWakeupOtherType type;
if (sys_strcmp(str, "default") == 0)
type = ERTS_SCHED_WAKEUP_OTHER_TYPE_DEFAULT;
@@ -5928,12 +5743,6 @@ erts_sched_set_wakeup_other_type(char *str)
return EINVAL;
wakeup_other.type = type;
return 0;
-#else
- if (sys_strcmp(str, "default") == 0 || sys_strcmp(str, "legacy") == 0) {
- return 0;
- }
- return EINVAL;
-#endif
}
int
@@ -6024,7 +5833,6 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp)
awdp->sched_id = id;
awdp->esdp = esdp;
awdp->ssi = esdp ? esdp->ssi : NULL;
-#ifdef ERTS_SMP
awdp->latest_wakeup = ERTS_THR_PRGR_VAL_FIRST;
awdp->misc.thr_prgr = ERTS_THR_PRGR_VAL_WAITING;
awdp->dd.thr_prgr = ERTS_THR_PRGR_VAL_WAITING;
@@ -6033,15 +5841,9 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp)
awdp->later_op.size = 0;
awdp->later_op.first = NULL;
awdp->later_op.last = NULL;
-#endif
-#ifdef ERTS_USE_ASYNC_READY_Q
-#ifdef ERTS_SMP
awdp->async_ready.need_thr_prgr = 0;
awdp->async_ready.thr_prgr = ERTS_THR_PRGR_VAL_WAITING;
-#endif
awdp->async_ready.queue = NULL;
-#endif
-#ifdef ERTS_SMP
awdp->delayed_wakeup.next = ERTS_DELAYED_WAKEUP_INFINITY;
if (!dawwp) {
awdp->delayed_wakeup.job = NULL;
@@ -6057,7 +5859,6 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp)
for (i = 0; i <= erts_no_schedulers; i++)
awdp->delayed_wakeup.sched2jix[i] = -1;
}
-#endif
awdp->debug.wait_completed.flags = 0;
awdp->debug.wait_completed.callback = NULL;
awdp->debug.wait_completed.arg = NULL;
@@ -6072,11 +5873,9 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
Uint64 time_stamp)
{
esdp->timer_wheel = NULL;
-#ifdef ERTS_SMP
erts_bits_init_state(&esdp->erl_bits_state);
esdp->match_pseudo_process = NULL;
esdp->free_process = NULL;
-#endif
esdp->x_reg_array =
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_BEAM_REGISTER,
ERTS_X_REGS_ALLOCATED *
@@ -6145,9 +5944,7 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
if (daww_ptr) {
init_aux_work_data(&esdp->aux_work_data, esdp, *daww_ptr);
-#ifdef ERTS_SMP
*daww_ptr += daww_sz;
-#endif
}
esdp->reductions = 0;
@@ -6168,16 +5965,12 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
char *daww_ptr;
size_t daww_sz;
size_t size_runqs;
-#ifdef ERTS_SMP
erts_aint32_t set_schdlr_sspnd_change_flags;
-#endif
init_misc_op_list_alloc();
init_proc_sys_task_queues_alloc();
-#ifdef ERTS_SMP
set_wakeup_other_data();
-#endif
#if ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
if (erts_sched_balance_util)
@@ -6201,9 +5994,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
size_runqs = sizeof(ErtsAlignedRunQueue) * tot_rqs;
erts_aligned_run_queues =
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_RUNQS, size_runqs);
-#ifdef ERTS_SMP
erts_smp_atomic32_init_nob(&no_empty_run_queues, 0);
-#endif
erts_no_run_queues = n;
@@ -6222,7 +6013,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
erts_smp_cnd_init(&rq->cnd);
#ifdef ERTS_DIRTY_SCHEDULERS
-#ifdef ERTS_SMP
if (ERTS_RUNQ_IX_IS_DIRTY(ix)) {
erts_smp_spinlock_init(&rq->sleepers.lock, "dirty_run_queue_sleep_list",
make_small(ix + 1),
@@ -6230,7 +6020,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
}
rq->sleepers.list = NULL;
#endif
-#endif
rq->waiting = 0;
rq->woken = 0;
@@ -6276,7 +6065,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
}
-#ifdef ERTS_SMP
if (erts_no_run_queues != 1) {
run_queue_info = erts_alloc(ERTS_ALC_T_RUNQ_BLNS,
@@ -6287,7 +6075,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
* erts_no_run_queues));
}
-#endif
n = (int) no_schedulers;
erts_no_schedulers = n;
@@ -6300,29 +6087,22 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
#endif
/* Create and initialize scheduler sleep info */
-#ifdef ERTS_SMP
no_ssi = n+1;
-#else
- no_ssi = 1;
-#endif
aligned_sched_sleep_info =
erts_alloc_permanent_cache_aligned(
ERTS_ALC_T_SCHDLR_SLP_INFO,
no_ssi*sizeof(ErtsAlignedSchedulerSleepInfo));
for (ix = 0; ix < no_ssi; ix++) {
ErtsSchedulerSleepInfo *ssi = &aligned_sched_sleep_info[ix].ssi;
-#ifdef ERTS_SMP
#if 0 /* no need to initialize these... */
ssi->next = NULL;
ssi->prev = NULL;
#endif
erts_smp_atomic32_init_nob(&ssi->flags, 0);
ssi->event = NULL; /* initialized in sched_thread_func */
-#endif
erts_atomic32_init_nob(&ssi->aux_work, 0);
}
-#ifdef ERTS_SMP
aligned_sched_sleep_info++;
#ifdef ERTS_DIRTY_SCHEDULERS
@@ -6347,19 +6127,13 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
erts_atomic32_init_nob(&ssi->aux_work, 0);
}
#endif
-#endif
/* Create and initialize scheduler specific data */
-#ifdef ERTS_SMP
daww_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE((sizeof(ErtsDelayedAuxWorkWakeupJob)
+ sizeof(int))*(n+1));
daww_ptr = erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA,
daww_sz*n);
-#else
- daww_sz = 0;
- daww_ptr = NULL;
-#endif
erts_aligned_scheduler_data =
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA,
@@ -6412,7 +6186,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
erts_atomic32_init_nob(&debug_wait_completed_count, 0); /* debug only */
debug_wait_completed_flags = 0;
-#ifdef ERTS_SMP
aux_thread_aux_work_data =
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA,
@@ -6502,39 +6275,16 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
init_misc_aux_work();
-#else /* !ERTS_SMP */
- {
- ErtsSchedulerData *esdp;
- esdp = ERTS_SCHEDULER_IX(0);
- erts_scheduler_data = esdp;
-#ifdef USE_THREADS
- erts_tsd_set(sched_data_key, (void *) esdp);
-#endif
- }
- erts_no_dirty_cpu_schedulers = 0;
- erts_no_dirty_io_schedulers = 0;
-#endif
erts_smp_atomic32_init_nob(&function_calls, 0);
/* init port tasks */
erts_port_task_init();
-#ifndef ERTS_SMP
-#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC
- erts_scheduler_data->verify_unused_temp_alloc
- = erts_alloc_get_verify_unused_temp_alloc(
- &erts_scheduler_data->verify_unused_temp_alloc_data);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(NULL);
-#endif
-#endif
erts_smp_atomic32_init_relb(&erts_halt_progress, -1);
erts_halt_code = 0;
-#if !defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
- erts_lc_set_thread_name("scheduler 1");
-#endif
}
@@ -6547,7 +6297,6 @@ erts_schedid2runq(Uint id)
return ERTS_RUNQ_IX(ix);
}
-#ifdef USE_THREADS
ErtsSchedulerData *
erts_get_scheduler_data(void)
@@ -6555,16 +6304,13 @@ erts_get_scheduler_data(void)
return (ErtsSchedulerData *) erts_tsd_get(sched_data_key);
}
-#endif
static Process *
make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio)
{
erts_aint32_t state;
Process *proxy;
-#ifdef ERTS_SMP
ErtsRunQueue *rq = RUNQ_READ_RQ(&proc->run_queue);
-#endif
state = (ERTS_PSFLG_PROXY
| ERTS_PSFLG_IN_RUNQ
@@ -6577,9 +6323,7 @@ make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio)
proxy = prev_proxy;
ASSERT(erts_smp_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY);
erts_smp_atomic32_set_nob(&proxy->state, state);
-#ifdef ERTS_SMP
RUNQ_SET_RQ(&proc->run_queue, rq);
-#endif
}
else {
proxy = erts_alloc(ERTS_ALC_T_PROC, sizeof(Process));
@@ -6592,10 +6336,8 @@ make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio)
}
#endif
erts_smp_atomic32_init_nob(&proxy->state, state);
-#ifdef ERTS_SMP
erts_smp_atomic_init_nob(&proxy->run_queue,
erts_smp_atomic_read_nob(&proc->run_queue));
-#endif
}
proxy->common.id = proc->common.id;
@@ -6798,7 +6540,6 @@ select_enqueue_run_queue(int enqueue, int enq_prio, Process *p, erts_aint32_t st
runq = erts_get_runq_proc(p);
-#ifdef ERTS_SMP
if (!(ERTS_PSFLG_BOUND & state)) {
ErtsRunQueue *new_runq = erts_check_emigration_need(runq, enq_prio);
if (new_runq) {
@@ -6806,7 +6547,6 @@ select_enqueue_run_queue(int enqueue, int enq_prio, Process *p, erts_aint32_t st
runq = new_runq;
}
}
-#endif
ASSERT(runq);
@@ -6895,13 +6635,8 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
erts_smp_runq_lock(c_rq);
-#if !defined(ERTS_SMP)
- /* Decrement refc if process struct is free... */
- return !!(n & ERTS_PSFLG_FREE);
-#else
/* Decrement refc if scheduled out from dirty scheduler... */
return !is_normal_sched;
-#endif
}
else {
Process* sched_p;
@@ -7363,7 +7098,6 @@ resume_process(Process *p, ErtsProcLocks locks)
add2runq(enqueue, enq_prio, p, state, NULL);
}
-#ifdef ERTS_SMP
static ERTS_INLINE void
sched_resume_wake__(ErtsSchedulerSleepInfo *ssi)
@@ -8803,9 +8537,7 @@ sched_thread_func(void *vesdp)
ErtsThrPrgrCallbacks callbacks;
ErtsSchedulerData *esdp = vesdp;
Uint no = esdp->no;
-#ifdef ERTS_SMP
erts_tse_t *tse;
-#endif
erts_sched_init_time_sup(esdp);
@@ -8815,7 +8547,6 @@ sched_thread_func(void *vesdp)
(void) ERTS_RUNQ_FLGS_SET_NOB(esdp->run_queue,
ERTS_RUNQ_FLG_EXEC);
-#ifdef ERTS_SMP
tse = erts_tse_fetch();
erts_tse_prepare_timed(tse);
ERTS_SCHED_SLEEP_INFO_IX(no - 1)->event = tse;
@@ -8829,7 +8560,6 @@ sched_thread_func(void *vesdp)
erts_thr_progress_register_managed_thread(esdp, &callbacks, 0);
erts_alloc_register_scheduler(vesdp);
-#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
{
char buf[31];
@@ -8838,18 +8568,14 @@ sched_thread_func(void *vesdp)
}
#endif
erts_tsd_set(sched_data_key, vesdp);
-#ifdef ERTS_SMP
#if HAVE_ERTS_MSEG
erts_mseg_late_init();
#endif
-#if ERTS_USE_ASYNC_READY_Q
esdp->aux_work_data.async_ready.queue = erts_get_async_ready_queue(no);
-#endif
erts_sched_init_check_cpu_bind(esdp);
erts_proc_lock_prepare_proc_lock_waiter();
-#endif
#ifdef HIPE
hipe_thread_signal_init();
@@ -8875,7 +8601,6 @@ sched_thread_func(void *vesdp)
}
#ifdef ERTS_DIRTY_SCHEDULERS
-#ifdef ERTS_SMP
static void*
sched_dirty_cpu_thread_func(void *vesdp)
{
@@ -8905,9 +8630,7 @@ sched_dirty_cpu_thread_func(void *vesdp)
}
#endif
erts_tsd_set(sched_data_key, vesdp);
-#if ERTS_USE_ASYNC_READY_Q
esdp->aux_work_data.async_ready.queue = NULL;
-#endif
erts_proc_lock_prepare_proc_lock_waiter();
@@ -8953,9 +8676,7 @@ sched_dirty_io_thread_func(void *vesdp)
}
#endif
erts_tsd_set(sched_data_key, vesdp);
-#if ERTS_USE_ASYNC_READY_Q
esdp->aux_work_data.async_ready.queue = NULL;
-#endif
erts_proc_lock_prepare_proc_lock_waiter();
@@ -8972,7 +8693,6 @@ sched_dirty_io_thread_func(void *vesdp)
return NULL;
}
#endif
-#endif
static ethr_tid aux_tid;
@@ -8990,7 +8710,6 @@ erts_start_schedulers(void)
opts.name = name;
-#ifdef ERTS_SMP
if (erts_runq_supervision_interval) {
opts.suggested_stack_size = 16;
erts_snprintf(opts.name, 16, "runq_supervisor");
@@ -9004,7 +8723,6 @@ erts_start_schedulers(void)
erts_exit(ERTS_ERROR_EXIT, "Failed to create run-queue supervision thread\n");
}
-#endif
opts.suggested_stack_size = erts_sched_thread_suggested_stack_size;
@@ -9031,7 +8749,6 @@ erts_start_schedulers(void)
erts_no_schedulers = actual;
#ifdef ERTS_DIRTY_SCHEDULERS
-#ifdef ERTS_SMP
{
int ix;
for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) {
@@ -9052,7 +8769,6 @@ erts_start_schedulers(void)
}
}
#endif
-#endif
ERTS_THR_MEMORY_BARRIER;
@@ -9079,9 +8795,7 @@ erts_start_schedulers(void)
}
}
-#endif /* ERTS_SMP */
-#ifdef ERTS_SMP
static void
add_pend_suspend(Process *suspendee,
@@ -9468,7 +9182,6 @@ handle_pend_bif_async_suspend(Process *suspendee,
}
}
-#endif /* ERTS_SMP */
/*
* The erlang:suspend_process/2 BIF
@@ -9684,20 +9397,16 @@ suspend_process_2(BIF_ALIST_2)
goto do_return;
no_suspendee:
-#ifdef ERTS_SMP
BIF_P->suspendee = NIL;
-#endif
erts_delete_suspend_monitor(&BIF_P->suspend_monitors, BIF_ARG_1);
badarg:
ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
-#ifdef ERTS_SMP
goto do_return;
yield:
ERTS_BIF_PREP_YIELD2(res, bif_export[BIF_suspend_process_2],
BIF_P, BIF_ARG_1, BIF_ARG_2);
-#endif
do_return:
if (suspendee)
@@ -9920,7 +9629,6 @@ erts_process_status(Process *rp, Eterm rpid)
erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state);
res = erts_process_state2status(state);
}
-#ifdef ERTS_SMP
else {
int i;
ErtsSchedulerData *esdp;
@@ -9937,7 +9645,6 @@ erts_process_status(Process *rp, Eterm rpid)
erts_smp_runq_unlock(esdp->run_queue);
}
}
-#endif
return res;
}
@@ -10218,7 +9925,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
actual_reds = reds = 0;
erts_smp_runq_lock(rq);
} else {
-#ifdef ERTS_SMP
#ifdef ERTS_DIRTY_SCHEDULERS
is_normal_sched = !esdp;
if (is_normal_sched) {
@@ -10234,11 +9940,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
#endif
ASSERT(esdp->current_process == p
|| esdp->free_process == p);
-#else
- esdp = erts_scheduler_data;
- ASSERT(esdp->current_process == p);
- is_normal_sched = 1;
-#endif
sched_out_proc:
@@ -10279,18 +9980,15 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
-#ifdef ERTS_SMP
if (p->trace_msg_q) {
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
erts_schedule_flush_trace_messages(p, 1);
erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
}
-#endif
/* have to re-read state after taking lock */
state = erts_smp_atomic32_read_nob(&p->state);
-#ifdef ERTS_SMP
if (is_normal_sched && (state & ERTS_PSFLG_PENDING_EXIT))
erts_handle_pending_exit(p, (ERTS_PROC_LOCK_MAIN
| ERTS_PROC_LOCK_TRACE
@@ -10299,7 +9997,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
handle_pending_suspend(p, (ERTS_PROC_LOCK_MAIN
| ERTS_PROC_LOCK_TRACE
| ERTS_PROC_LOCK_STATUS));
-#endif
esdp->reductions += reds;
@@ -10317,10 +10014,8 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
actual_reds);
esdp->current_process = NULL;
-#ifdef ERTS_SMP
if (is_normal_sched)
p->scheduler_data = NULL;
-#endif
erts_smp_proc_unlock(p, (ERTS_PROC_LOCK_MAIN
| ERTS_PROC_LOCK_STATUS
@@ -10328,7 +10023,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_OTHER);
-#ifdef ERTS_SMP
if (state & ERTS_PSFLG_FREE) {
if (!is_normal_sched) {
ASSERT(p->flags & F_DELAYED_DEL_PROC);
@@ -10338,15 +10032,12 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
esdp->free_process = NULL;
}
}
-#endif
if (dec_refc)
erts_proc_dec_refc(p);
}
-#ifdef ERTS_SMP
ASSERT(!esdp->free_process);
-#endif
ASSERT(!esdp->current_process);
ERTS_SMP_CHK_NO_PROC_LOCKS;
@@ -10367,7 +10058,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
check_activities_to_run: {
erts_aint32_t psflg_running, psflg_running_sys;
-#ifdef ERTS_SMP
ErtsMigrationPaths *mps;
ErtsMigrationPath *mp;
@@ -10433,14 +10123,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
}
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
-#else /* ERTS_SMP */
- {
- erts_aint32_t aux_work;
- aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work);
- if (aux_work)
- handle_aux_work(&esdp->aux_work_data, aux_work, 0);
- }
-#endif /* ERTS_SMP */
flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
@@ -10451,7 +10133,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
}
else if (!runq_got_work_to_execute_flags(flags)) {
/* Prepare for scheduler wait */
-#ifdef ERTS_SMP
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
rq->wakeup_other = 0;
@@ -10502,16 +10183,13 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
}
empty_runq(rq);
}
-#endif
(void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_EXEC);
scheduler_wait(&fcalls, esdp, rq);
flags = ERTS_RUNQ_FLGS_SET_NOB(rq, ERTS_RUNQ_FLG_EXEC);
flags |= ERTS_RUNQ_FLG_EXEC;
ERTS_MSACC_UPDATE_CACHE();
-#ifdef ERTS_SMP
non_empty_runq(rq);
-#endif
goto check_activities_to_run;
}
@@ -10543,21 +10221,15 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref))
erts_bump_timers(esdp->timer_wheel, current_time);
-#ifdef ERTS_SMP
erts_smp_runq_lock(rq);
clear_sys_scheduling();
goto continue_check_activities_to_run;
-#else
- goto check_activities_to_run;
-#endif
}
if (flags & ERTS_RUNQ_FLG_MISC_OP)
exec_misc_ops(rq);
-#ifdef ERTS_SMP
wakeup_other.check(rq, flags);
-#endif
/*
* Find a new port to run.
@@ -10744,7 +10416,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_EMULATOR);
-#ifdef ERTS_SMP
if (flags & ERTS_RUNQ_FLG_PROTECTED)
(void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
@@ -10823,7 +10494,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
state = erts_smp_atomic32_read_nob(&p->state);
}
-#endif /* ERTS_SMP */
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
@@ -10932,9 +10602,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
if (!(state & ERTS_PSFLG_EXITING) && ERTS_PTMR_IS_TIMED_OUT(p)) {
BeamInstr** pi;
-#ifdef ERTS_SMP
ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
-#endif
pi = (BeamInstr **) p->def_arg_reg;
p->i = *pi;
p->flags &= ~F_INSLPQUEUE;
@@ -11188,10 +10856,8 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
Eterm st_res;
if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) {
-#ifdef ERTS_SMP
if (state & ERTS_PSFLG_PENDING_EXIT)
erts_handle_pending_exit(c_p, ERTS_PROC_LOCK_MAIN);
-#endif
ASSERT(ERTS_PROC_IS_EXITING(c_p));
break;
}
@@ -11286,18 +10952,14 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
reds -= erts_complete_off_heap_message_queue_change(c_p);
st_res = am_true;
break;
-#ifdef ERTS_SMP
case ERTS_PSTT_FTMQ:
reds -= erts_flush_trace_messages(c_p, ERTS_PROC_LOCK_MAIN);
st_res = am_true;
break;
-#endif
-#ifdef ERTS_SMP
case ERTS_PSTT_ETS_FREE_FIXATION:
reds -= erts_db_execute_free_fixation(c_p, (DbFixation*)st->arg[0]);
st_res = am_true;
break;
-#endif
default:
ERTS_INTERNAL_ERROR("Invalid process sys task type");
st_res = am_false;
@@ -11358,12 +11020,10 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds)
case ERTS_PSTT_CLA:
st_res = am_ok;
break;
-#ifdef ERTS_SMP
case ERTS_PSTT_FTMQ:
reds -= erts_flush_trace_messages(c_p, ERTS_PROC_LOCK_MAIN);
st_res = am_true;
break;
-#endif
default:
ERTS_INTERNAL_ERROR("Invalid process sys task type");
st_res = am_false;
@@ -11769,9 +11429,7 @@ flush_dirty_trace_messages(void *vpid)
void
erts_schedule_flush_trace_messages(Process *proc, int force_on_proc)
{
-#ifdef ERTS_SMP
ErtsThrPrgrDelayHandle dhndl;
-#endif
Eterm pid = proc->common.id;
#ifdef ERTS_DIRTY_SCHEDULERS
@@ -11786,15 +11444,11 @@ erts_schedule_flush_trace_messages(Process *proc, int force_on_proc)
}
#endif
-#ifdef ERTS_SMP
dhndl = erts_thr_progress_unmanaged_delay();
-#endif
erts_schedule_generic_sys_task(pid, ERTS_PSTT_FTMQ, NULL);
-#ifdef ERTS_SMP
erts_thr_progress_unmanaged_continue(dhndl);
-#endif
#ifdef ERTS_DIRTY_SCHEDULERS
if (!force_on_proc) {
@@ -12091,7 +11745,6 @@ erts_schedule_misc_op(void (*func)(void *), void *arg)
ErtsSchedulerData *esdp = erts_get_scheduler_data();
ErtsRunQueue *rq = esdp ? esdp->run_queue : ERTS_RUNQ_IX(0);
ErtsMiscOpList *molp = misc_op_list_alloc();
-#ifdef ERTS_SMP
ErtsMigrationPaths *mpaths = erts_get_migration_paths();
if (!mpaths)
@@ -12101,7 +11754,6 @@ erts_schedule_misc_op(void (*func)(void *), void *arg)
if (erq)
rq = erq;
}
-#endif
erts_smp_runq_lock(rq);
@@ -12114,9 +11766,7 @@ erts_schedule_misc_op(void (*func)(void *), void *arg)
rq->misc.start = molp;
rq->misc.end = molp;
-#ifdef ERTS_SMP
non_empty_runq(rq);
-#endif
ERTS_RUNQ_FLGS_SET_NOB(rq, ERTS_RUNQ_FLG_MISC_OP);
@@ -12215,9 +11865,7 @@ static void delete_process(Process* p);
void
erts_free_proc(Process *p)
{
-#ifdef ERTS_SMP
erts_proc_lock_fin(p);
-#endif
ASSERT(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_FREE);
ASSERT(0 == erts_proc_read_refc(p));
if (p->flags & F_DELAYED_DEL_PROC)
@@ -12243,11 +11891,9 @@ static void early_init_process_struct(void *varg, Eterm data)
#endif
erts_smp_atomic32_init_relb(&proc->state, arg->state);
-#ifdef ERTS_SMP
RUNQ_SET_RQ(&proc->run_queue, arg->run_queue);
erts_proc_lock_init(proc); /* All locks locked */
-#endif
}
@@ -12422,10 +12068,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
#ifdef HIPE
hipe_init_process(&p->hipe);
-#ifdef ERTS_SMP
hipe_init_process_smp(&p->hipe_smp);
#endif
-#endif
p->heap = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP, sizeof(Eterm)*sz);
p->old_hend = p->old_htop = p->old_heap = NULL;
p->high_water = p->heap;
@@ -12493,11 +12137,9 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->msg.last = &p->msg.first;
p->msg.save = &p->msg.first;
p->msg.len = 0;
-#ifdef ERTS_SMP
p->msg_inq.first = NULL;
p->msg_inq.last = &p->msg_inq.first;
p->msg_inq.len = 0;
-#endif
p->bif_timers = NULL;
p->mbuf = NULL;
p->msg_frag = NULL;
@@ -12520,14 +12162,12 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->last_old_htop = NULL;
#endif
-#ifdef ERTS_SMP
p->trace_msg_q = NULL;
p->scheduler_data = NULL;
p->suspendee = NIL;
p->pending_suspenders = NULL;
p->pending_exit.reason = THE_NON_VALUE;
p->pending_exit.bp = NULL;
-#endif
#if !defined(NO_FPE_SIGNALS) || defined(HIPE)
p->fp_exception = 0;
@@ -12730,10 +12370,8 @@ void erts_init_empty_process(Process *p)
#ifdef HIPE
hipe_init_process(&p->hipe);
-#ifdef ERTS_SMP
hipe_init_process_smp(&p->hipe_smp);
#endif
-#endif
INIT_HOLE_CHECK(p);
#ifdef DEBUG
@@ -12746,7 +12384,6 @@ void erts_init_empty_process(Process *p)
#endif
erts_smp_atomic32_init_nob(&p->state, (erts_aint32_t) PRIORITY_NORMAL);
-#ifdef ERTS_SMP
p->scheduler_data = NULL;
p->msg_inq.first = NULL;
p->msg_inq.last = &p->msg_inq.first;
@@ -12758,7 +12395,6 @@ void erts_init_empty_process(Process *p)
erts_proc_lock_init(p);
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
RUNQ_SET_RQ(&p->run_queue, ERTS_RUNQ_IX(0));
-#endif
#if !defined(NO_FPE_SIGNALS) || defined(HIPE)
p->fp_exception = 0;
@@ -12805,14 +12441,12 @@ erts_debug_verify_clean_empty_process(Process* p)
ASSERT(p->parent == NIL);
-#ifdef ERTS_SMP
ASSERT(p->msg_inq.first == NULL);
ASSERT(p->msg_inq.len == 0);
ASSERT(p->suspendee == NIL);
ASSERT(p->pending_suspenders == NULL);
ASSERT(p->pending_exit.reason == THE_NON_VALUE);
ASSERT(p->pending_exit.bp == NULL);
-#endif
/* Thing that erts_cleanup_empty_process() cleans up */
@@ -12837,9 +12471,7 @@ erts_cleanup_empty_process(Process* p)
free_message_buffer(p->mbuf);
p->mbuf = NULL;
}
-#ifdef ERTS_SMP
erts_proc_lock_fin(p);
-#endif
#ifdef DEBUG
erts_debug_verify_clean_empty_process(p);
#endif
@@ -12955,22 +12587,6 @@ set_proc_exiting(Process *p,
KILL_CATCHES(p);
p->i = (BeamInstr *) beam_exit;
-#ifndef ERTS_SMP
- if (state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)
- && !(state & ERTS_PSFLG_GC)) {
- /*
- * I non smp case:
- *
- * Currently executing process might be sent an exit
- * signal if it is traced by a port that it also is
- * linked to, and the port terminates during the
- * trace. In this case we want schedule out the
- * process as quickly as possible in order to detect
- * the event as fast as possible.
- */
- ERTS_VBUMP_ALL_REDS(p);
- }
-#endif
add2runq(enqueue, enq_prio, p, state, NULL);
}
@@ -13005,7 +12621,6 @@ set_proc_self_exiting(Process *c_p)
return state;
}
-#ifdef ERTS_SMP
void
erts_handle_pending_exit(Process *c_p, ErtsProcLocks locks)
@@ -13120,7 +12735,6 @@ save_pending_exiter(Process *p, ErtsProcList *plp)
set_aux_work_flags_wakeup_nob(ssi, ERTS_SSI_AUX_WORK_PENDING_EXITERS);
}
-#endif
/*
* This function delivers an EXIT message to a process
@@ -13292,7 +12906,6 @@ send_exit_signal(Process *c_p, /* current process if and only
return 1; /* Receiver will get a message */
}
else if (reason != am_normal || (flags & ERTS_XSIG_FLG_NO_IGN_NORMAL)) {
-#ifdef ERTS_SMP
if (!(state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT))) {
ASSERT(!rp->pending_exit.bp);
@@ -13419,17 +13032,6 @@ send_exit_signal(Process *c_p, /* current process if and only
* that the receiver *will* exit; either on the pending
* exit or by itself before seeing the pending exit.
*/
-#else /* !ERTS_SMP */
- erts_aint32_t state = erts_smp_atomic32_read_nob(&rp->state);
- if (!(state & ERTS_PSFLG_EXITING)) {
- set_proc_exiting(rp,
- state,
- (is_immed(rsn) || c_p == rp
- ? rsn
- : copy_object(rsn, rp)),
- NULL);
- }
-#endif
return -1; /* Receiver will exit */
}
@@ -13783,18 +13385,13 @@ erts_do_exit_process(Process* p, Eterm reason)
erts_exit(ERTS_DUMP_EXIT, "System process %T terminated: %T\n",
p->common.id, reason);
-#ifdef ERTS_SMP
ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
/* By locking all locks (main lock is already locked) when going
to exiting state (ERTS_PSFLG_EXITING), it is enough to take any lock when
looking up a process (erts_pid2proc()) to prevent the looked up
process from exiting until the lock has been released. */
erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
-#endif
-#ifndef ERTS_SMP
- set_proc_self_exiting(p);
-#else
if (ERTS_PSFLG_PENDING_EXIT & set_proc_self_exiting(p)) {
/* Process exited before pending exit was received... */
p->pending_exit.reason = THE_NON_VALUE;
@@ -13807,7 +13404,6 @@ erts_do_exit_process(Process* p, Eterm reason)
cancel_suspend_of_suspendee(p, ERTS_PROC_LOCKS_ALL);
ERTS_SMP_MSGQ_MV_INQ2PRIVQ(p);
-#endif
if (IS_TRACED(p)) {
if (IS_TRACED_FL(p, F_TRACE_CALLS))
@@ -13870,7 +13466,6 @@ erts_continue_exit_process(Process *p)
p->bif_timers = NULL;
}
-#ifdef ERTS_SMP
if (p->flags & F_SCHDLR_ONLN_WAITQ)
abort_sched_onln_chng_waitq(p);
@@ -13914,7 +13509,6 @@ erts_continue_exit_process(Process *p)
__FILE__, __LINE__, (int) ssr);
}
}
-#endif
if (p->flags & F_USING_DB) {
if (erts_db_process_exiting(p, ERTS_PROC_LOCK_MAIN))
@@ -13998,16 +13592,12 @@ erts_continue_exit_process(Process *p)
erts_smp_runq_lock(rq);
-#ifdef ERTS_SMP
ASSERT(p->scheduler_data);
ASSERT(p->scheduler_data->current_process == p);
ASSERT(p->scheduler_data->free_process == NULL);
p->scheduler_data->current_process = NULL;
p->scheduler_data->free_process = p;
-#else
- erts_proc_inc_refc(p); /* Decremented in schedule() */
-#endif
/* Time of death! */
erts_ptab_delete_element(&erts_proc, &p->common);
@@ -14093,19 +13683,15 @@ erts_continue_exit_process(Process *p)
have none here */
}
-#ifdef ERTS_SMP
erts_flush_trace_messages(p, 0);
-#endif
ERTS_TRACER_CLEAR(&ERTS_TRACER(p));
if (!delay_del_proc)
delete_process(p);
-#ifdef ERTS_SMP
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
-#endif
return;
@@ -14245,7 +13831,6 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) {
erts_print(to, to_arg, "=scheduler:%u\n", esdp->no);
-#ifdef ERTS_SMP
flg = erts_smp_atomic32_read_dirty(&esdp->ssi->flags);
erts_print(to, to_arg, "Scheduler Sleep Info Flags: ");
for (i = 0; i < ERTS_SSI_FLGS_MAX && flg; i++) {
@@ -14273,7 +13858,6 @@ erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) {
}
}
erts_print(to, to_arg, "\n");
-#endif
flg = erts_atomic32_read_dirty(&esdp->ssi->aux_work);
erts_print(to, to_arg, "Scheduler Sleep Info Aux Work: ");
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 9d7ba27c50..5ed1b4e975 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -124,9 +124,7 @@ extern int erts_dio_sched_thread_suggested_stack_size;
#define ERTS_SCHED_THREAD_MIN_STACK_SIZE 20 /* Kilo words */
#define ERTS_SCHED_THREAD_MAX_STACK_SIZE 8192 /* Kilo words */
-#ifdef ERTS_SMP
#include "erl_bits.h"
-#endif
/* process priorities */
#define PRIORITY_MAX 0
@@ -373,12 +371,10 @@ typedef struct {
#endif
struct ErtsSchedulerSleepInfo_ {
-#ifdef ERTS_SMP
ErtsSchedulerSleepInfo *next;
ErtsSchedulerSleepInfo *prev;
erts_smp_atomic32_t flags;
erts_tse_t *event;
-#endif
erts_atomic32_t aux_work;
};
@@ -433,7 +429,6 @@ typedef struct {
# define ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT 1
#endif
-#ifdef ERTS_SMP
#undef ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT
#define ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT_OPT
@@ -476,7 +471,6 @@ struct ErtsMigrationPaths_ {
ErtsMigrationPath mpath[1];
};
-#endif /* ERTS_SMP */
struct ErtsRunQueue_ {
int ix;
@@ -485,10 +479,8 @@ struct ErtsRunQueue_ {
erts_smp_cnd_t cnd;
#ifdef ERTS_DIRTY_SCHEDULERS
-#ifdef ERTS_SMP
ErtsSchedulerSleepList sleepers;
#endif
-#endif
ErtsSchedulerData *scheduler;
int waiting; /* < 0 in sys schedule; > 0 on cnd variable */
@@ -531,9 +523,7 @@ struct ErtsRunQueue_ {
#endif
};
-#ifdef ERTS_SMP
extern long erts_runq_supervision_interval;
-#endif
typedef union {
ErtsRunQueue runq;
@@ -581,17 +571,12 @@ typedef struct {
int sched_id;
ErtsSchedulerData *esdp;
ErtsSchedulerSleepInfo *ssi;
-#ifdef ERTS_SMP
ErtsThrPrgrVal current_thr_prgr;
ErtsThrPrgrVal latest_wakeup;
-#endif
struct {
int ix;
-#ifdef ERTS_SMP
ErtsThrPrgrVal thr_prgr;
-#endif
} misc;
-#ifdef ERTS_SMP
struct {
ErtsThrPrgrVal thr_prgr;
} dd;
@@ -604,24 +589,17 @@ typedef struct {
ErtsThrPrgrLaterOp *first;
ErtsThrPrgrLaterOp *last;
} later_op;
-#endif
-#ifdef ERTS_USE_ASYNC_READY_Q
struct {
-#ifdef ERTS_SMP
int need_thr_prgr;
ErtsThrPrgrVal thr_prgr;
-#endif
void *queue;
} async_ready;
-#endif
-#ifdef ERTS_SMP
struct {
Uint64 next;
int *sched2jix;
int jix;
ErtsDelayedAuxWorkWakeupJob *job;
} delayed_wakeup;
-#endif
struct {
ErtsEtsAllYieldData ets_all;
/* Other yielding operations... */
@@ -659,13 +637,11 @@ struct ErtsSchedulerData_ {
ErtsTimerWheel *timer_wheel;
ErtsNextTimeoutRef next_tmo_ref;
ErtsHLTimerService *timer_service;
-#ifdef ERTS_SMP
ethr_tid tid; /* Thread id */
struct erl_bits_state erl_bits_state; /* erl_bits.c state */
void *match_pseudo_process; /* erl_db_util.c:db_prog_match() */
Process *free_process;
ErtsThrPrgrData thr_progress_data;
-#endif
ErtsSchedulerSleepInfo *ssi;
Process *current_process;
ErtsSchedType type;
@@ -717,9 +693,6 @@ extern ErtsAlignedSchedulerData *erts_aligned_dirty_cpu_scheduler_data;
extern ErtsAlignedSchedulerData *erts_aligned_dirty_io_scheduler_data;
#endif
-#ifndef ERTS_SMP
-extern ErtsSchedulerData *erts_scheduler_data;
-#endif
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
int erts_smp_lc_runq_is_locked(ErtsRunQueue *);
@@ -727,10 +700,8 @@ int erts_smp_lc_runq_is_locked(ErtsRunQueue *);
#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
-#ifdef ERTS_SMP
void erts_empty_runq(ErtsRunQueue *rq);
void erts_non_empty_runq(ErtsRunQueue *rq);
-#endif
/*
@@ -753,10 +724,8 @@ erts_smp_inc_runq_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi, int prio)
len = erts_smp_atomic32_read_dirty(&rq->len);
-#ifdef ERTS_SMP
if (len == 0)
erts_non_empty_runq(rq);
-#endif
len++;
if (rq->max_len < len)
rq->max_len = len;
@@ -911,7 +880,6 @@ typedef struct {
typedef struct ErtsProcSysTask_ ErtsProcSysTask;
typedef struct ErtsProcSysTaskQs_ ErtsProcSysTaskQs;
-#ifdef ERTS_SMP
typedef struct ErtsPendingSuspend_ ErtsPendingSuspend;
struct ErtsPendingSuspend_ {
@@ -924,7 +892,6 @@ struct ErtsPendingSuspend_ {
Eterm pid);
};
-#endif
/* Defines to ease the change of memory architecture */
@@ -1092,7 +1059,6 @@ struct process {
erts_smp_atomic32_t dirty_state; /* Process dirty state flags (see ERTS_PDSFLG_*) */
#endif
-#ifdef ERTS_SMP
ErlMessageInQueue msg_inq;
ErlTraceMessageQueue *trace_msg_q;
ErtsPendExit pending_exit;
@@ -1104,7 +1070,6 @@ struct process {
#ifdef HIPE
struct hipe_process_state_smp hipe_smp;
#endif
-#endif
#ifdef CHECK_FOR_HOLES
Eterm* last_htop; /* No need to scan the heap below this point. */
@@ -1801,11 +1766,8 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks locks);
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
int erts_dbg_check_halloc_lock(Process *p);
#endif
-#if defined(ERTS_SMP) || defined(ERTS_DIRTY_SCHEDULERS)
void
erts_schedulers_state(Uint *, Uint *, Uint *, Uint *, Uint *, Uint *, Uint *, Uint *);
-#endif
-#ifdef ERTS_SMP
ErtsSchedSuspendResult
erts_set_schedulers_online(Process *p,
ErtsProcLocks plocks,
@@ -1820,14 +1782,9 @@ void erts_start_schedulers(void);
void erts_alloc_notify_delayed_dealloc(int);
void erts_alloc_ensure_handle_delayed_dealloc_call(int);
void erts_notify_canceled_timer(ErtsSchedulerData *, int);
-#endif
-#if ERTS_USE_ASYNC_READY_Q
void erts_notify_check_async_ready_queue(void *);
-#endif
-#ifdef ERTS_SMP
void erts_notify_code_ix_activation(Process* p, ErtsThrPrgrVal later);
void erts_notify_finish_breakpointing(Process* p);
-#endif
void erts_schedule_misc_aux_work(int sched_id,
void (*func)(void *),
void *arg);
@@ -1896,13 +1853,9 @@ int erts_send_exit_signal(Process *,
Eterm,
Process *,
Uint32);
-#ifdef ERTS_SMP
void erts_handle_pending_exit(Process *, ErtsProcLocks);
#define ERTS_PROC_PENDING_EXIT(P) \
(ERTS_PSFLG_PENDING_EXIT & erts_smp_atomic32_read_acqb(&(P)->state))
-#else
-#define ERTS_PROC_PENDING_EXIT(P) 0
-#endif
void erts_deep_process_dump(fmtfn_t, void *);
@@ -1932,19 +1885,7 @@ do { \
# define ERTS_VERIFY_UNUSED_TEMP_ALLOC(ESDP)
#endif
-#if defined(ERTS_SMP) || defined(USE_THREADS)
ErtsSchedulerData *erts_get_scheduler_data(void);
-#else
-ERTS_GLB_INLINE ErtsSchedulerData *erts_get_scheduler_data(void);
-#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-
-ERTS_GLB_INLINE
-ErtsSchedulerData *erts_get_scheduler_data(void)
-{
- return erts_scheduler_data;
-}
-#endif
-#endif
void erts_schedule_process(Process *, erts_aint32_t, ErtsProcLocks);
@@ -2060,13 +2001,11 @@ erts_psd_set(Process *p, int ix, void *data)
ASSERT(0 <= ix && ix < ERTS_PSD_SIZE);
if (psd) {
void *old;
-#ifdef ERTS_SMP
#ifdef ETHR_ORDERED_READ_DEPEND
ETHR_MEMBAR(ETHR_LoadStore|ETHR_StoreStore);
#else
ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore|ETHR_StoreStore);
#endif
-#endif
old = psd->data[ix];
psd->data[ix] = data;
return old;
@@ -2146,7 +2085,6 @@ erts_proc_set_error_handler(Process *p, Eterm handler)
#ifdef ERTS_INCLUDE_SCHEDULER_INTERNALS
-#ifdef ERTS_SMP
#include "erl_thr_progress.h"
@@ -2248,7 +2186,6 @@ erts_check_emigration_need(ErtsRunQueue *c_rq, int prio)
#endif
-#endif
#endif
@@ -2290,9 +2227,6 @@ ErtsSchedulerData *erts_proc_sched_data(Process *c_p)
{
ErtsSchedulerData *esdp;
ASSERT(c_p);
-#if !defined(ERTS_SMP)
- esdp = erts_get_scheduler_data();
-#else
esdp = c_p->scheduler_data;
# if defined(ERTS_DIRTY_SCHEDULERS)
if (esdp) {
@@ -2305,7 +2239,6 @@ ErtsSchedulerData *erts_proc_sched_data(Process *c_p)
ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp));
}
# endif
-#endif
ASSERT(esdp);
return esdp;
}
@@ -2336,7 +2269,6 @@ Eterm erts_get_current_pid(void)
ERTS_GLB_INLINE
Uint erts_get_scheduler_id(void)
{
-#ifdef ERTS_SMP
ErtsSchedulerData *esdp = erts_get_scheduler_data();
#ifdef ERTS_DIRTY_SCHEDULERS
if (esdp && ERTS_SCHEDULER_IS_DIRTY(esdp))
@@ -2344,65 +2276,45 @@ Uint erts_get_scheduler_id(void)
else
#endif
return esdp ? esdp->no : (Uint) 0;
-#else
- return erts_get_scheduler_data() ? (Uint) 1 : (Uint) 0;
-#endif
}
ERTS_GLB_INLINE ErtsRunQueue *
erts_get_runq_proc(Process *p)
{
-#ifdef ERTS_SMP
ASSERT(ERTS_AINT_NULL != erts_atomic_read_nob(&p->run_queue));
return (ErtsRunQueue *) erts_atomic_read_nob(&p->run_queue);
-#else
- return ERTS_RUNQ_IX(0);
-#endif
}
ERTS_GLB_INLINE ErtsRunQueue *
erts_get_runq_current(ErtsSchedulerData *esdp)
{
ASSERT(!esdp || esdp == erts_get_scheduler_data());
-#ifdef ERTS_SMP
if (!esdp)
esdp = erts_get_scheduler_data();
return esdp->run_queue;
-#else
- return ERTS_RUNQ_IX(0);
-#endif
}
ERTS_GLB_INLINE void
erts_smp_runq_lock(ErtsRunQueue *rq)
{
-#ifdef ERTS_SMP
erts_smp_mtx_lock(&rq->mtx);
-#endif
}
ERTS_GLB_INLINE int
erts_smp_runq_trylock(ErtsRunQueue *rq)
{
-#ifdef ERTS_SMP
return erts_smp_mtx_trylock(&rq->mtx);
-#else
- return 0;
-#endif
}
ERTS_GLB_INLINE void
erts_smp_runq_unlock(ErtsRunQueue *rq)
{
-#ifdef ERTS_SMP
erts_smp_mtx_unlock(&rq->mtx);
-#endif
}
ERTS_GLB_INLINE void
erts_smp_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq)
{
-#ifdef ERTS_SMP
ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(&rq->mtx));
if (xrq != rq) {
if (erts_smp_mtx_trylock(&xrq->mtx) == EBUSY) {
@@ -2415,22 +2327,18 @@ erts_smp_xrunq_lock(ErtsRunQueue *rq, ErtsRunQueue *xrq)
}
}
}
-#endif
}
ERTS_GLB_INLINE void
erts_smp_xrunq_unlock(ErtsRunQueue *rq, ErtsRunQueue *xrq)
{
-#ifdef ERTS_SMP
if (xrq != rq)
erts_smp_mtx_unlock(&xrq->mtx);
-#endif
}
ERTS_GLB_INLINE void
erts_smp_runqs_lock(ErtsRunQueue *rq1, ErtsRunQueue *rq2)
{
-#ifdef ERTS_SMP
ASSERT(rq1 && rq2);
if (rq1 == rq2)
erts_smp_mtx_lock(&rq1->mtx);
@@ -2442,18 +2350,15 @@ erts_smp_runqs_lock(ErtsRunQueue *rq1, ErtsRunQueue *rq2)
erts_smp_mtx_lock(&rq2->mtx);
erts_smp_mtx_lock(&rq1->mtx);
}
-#endif
}
ERTS_GLB_INLINE void
erts_smp_runqs_unlock(ErtsRunQueue *rq1, ErtsRunQueue *rq2)
{
-#ifdef ERTS_SMP
ASSERT(rq1 && rq2);
erts_smp_mtx_unlock(&rq1->mtx);
if (rq1 != rq2)
erts_smp_mtx_unlock(&rq2->mtx);
-#endif
}
ERTS_GLB_INLINE ErtsMessage *
@@ -2561,7 +2466,6 @@ ERTS_TIME2REDS_IMPL__(ErtsMonotonicTime start, ErtsMonotonicTime end)
}
#endif
-#ifdef ERTS_SMP
Process *erts_pid2proc_not_running(Process *,
ErtsProcLocks,
@@ -2580,14 +2484,6 @@ extern int erts_disable_proc_not_running_opt;
#define ERTS_SMP_ASSERT_IS_NOT_EXITING(P)
#endif
-#else /* !ERTS_SMP */
-
-#define ERTS_SMP_ASSERT_IS_NOT_EXITING(P)
-
-#define erts_pid2proc_not_running erts_pid2proc
-#define erts_pid2proc_nropt erts_pid2proc
-
-#endif
#define ERTS_PROC_IS_EXITING(P) \
(ERTS_PSFLG_EXITING & erts_smp_atomic32_read_acqb(&(P)->state))
@@ -2602,7 +2498,6 @@ extern int erts_disable_proc_not_running_opt;
void erts_smp_notify_inc_runq(ErtsRunQueue *runq);
-#ifdef ERTS_SMP
void erts_sched_finish_poke(ErtsSchedulerSleepInfo *, erts_aint32_t);
ERTS_GLB_INLINE void erts_sched_poke(ErtsSchedulerSleepInfo *ssi);
@@ -2623,7 +2518,6 @@ erts_sched_poke(ErtsSchedulerSleepInfo *ssi)
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-#endif /* #ifdef ERTS_SMP */
#include "erl_process_lock.h"
diff --git a/erts/emulator/beam/erl_process_lock.c b/erts/emulator/beam/erl_process_lock.c
index 23c7414901..c824724cc9 100644
--- a/erts/emulator/beam/erl_process_lock.c
+++ b/erts/emulator/beam/erl_process_lock.c
@@ -69,7 +69,6 @@
#include "erl_process.h"
#include "erl_thr_progress.h"
-#ifdef ERTS_SMP
#if ERTS_PROC_LOCK_OWN_IMPL
@@ -1002,11 +1001,9 @@ static ERTS_INLINE
Process *proc_lookup_inc_refc(Eterm pid, int allow_exit)
{
Process *proc;
-#ifdef ERTS_SMP
ErtsThrPrgrDelayHandle dhndl;
dhndl = erts_thr_progress_unmanaged_delay();
-#endif
proc = erts_proc_lookup_raw(pid);
if (proc) {
@@ -1016,9 +1013,7 @@ Process *proc_lookup_inc_refc(Eterm pid, int allow_exit)
erts_proc_inc_refc(proc);
}
-#ifdef ERTS_SMP
erts_thr_progress_unmanaged_continue(dhndl);
-#endif
return proc;
}
@@ -1785,4 +1780,3 @@ check_queue(erts_proc_lock_t *lck)
}
#endif
-#endif /* ERTS_SMP */
diff --git a/erts/emulator/beam/erl_process_lock.h b/erts/emulator/beam/erl_process_lock.h
index 023ba4d4ae..16cdebb791 100644
--- a/erts/emulator/beam/erl_process_lock.h
+++ b/erts/emulator/beam/erl_process_lock.h
@@ -455,7 +455,6 @@ void erts_proc_lc_unrequire_lock(Process *p, ErtsProcLocks locks);
#ifndef ERTS_PROCESS_LOCK_H__
#define ERTS_PROCESS_LOCK_H__
-#ifdef ERTS_SMP
typedef struct {
union {
@@ -947,7 +946,6 @@ erts_proc_lock_op_debug(Process *p, ErtsProcLocks locks, int locked)
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-#endif /* ERTS_SMP */
#ifdef ERTS_ENABLE_LOCK_POSITION
ERTS_GLB_INLINE void erts_smp_proc_lock_x(Process *, ErtsProcLocks, char *file, unsigned int line);
@@ -979,7 +977,7 @@ erts_smp_proc_lock(Process *p, ErtsProcLocks locks)
ERTS_PID2PIXLOCK(p->common.id),
#endif /*ERTS_PROC_LOCK_ATOMIC_IMPL*/
locks, file, line);
-#elif defined(ERTS_SMP)
+#else
erts_smp_proc_lock__(p,
#if ERTS_PROC_LOCK_ATOMIC_IMPL
NULL,
@@ -993,7 +991,6 @@ erts_smp_proc_lock(Process *p, ErtsProcLocks locks)
ERTS_GLB_INLINE void
erts_smp_proc_unlock(Process *p, ErtsProcLocks locks)
{
-#ifdef ERTS_SMP
erts_smp_proc_unlock__(p,
#if ERTS_PROC_LOCK_ATOMIC_IMPL
NULL,
@@ -1001,15 +998,11 @@ erts_smp_proc_unlock(Process *p, ErtsProcLocks locks)
ERTS_PID2PIXLOCK(p->common.id),
#endif
locks);
-#endif
}
ERTS_GLB_INLINE int
erts_smp_proc_trylock(Process *p, ErtsProcLocks locks)
{
-#ifndef ERTS_SMP
- return 0;
-#else
return erts_smp_proc_trylock__(p,
#if ERTS_PROC_LOCK_ATOMIC_IMPL
NULL,
@@ -1017,28 +1010,19 @@ erts_smp_proc_trylock(Process *p, ErtsProcLocks locks)
ERTS_PID2PIXLOCK(p->common.id),
#endif
locks);
-#endif
}
ERTS_GLB_INLINE void erts_proc_inc_refc(Process *p)
{
ASSERT(!(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY));
-#ifdef ERTS_SMP
erts_ptab_atmc_inc_refc(&p->common);
-#else
- erts_ptab_inc_refc(&p->common);
-#endif
}
ERTS_GLB_INLINE void erts_proc_dec_refc(Process *p)
{
Sint referred;
ASSERT(!(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY));
-#ifdef ERTS_SMP
referred = erts_ptab_atmc_dec_test_refc(&p->common);
-#else
- referred = erts_ptab_dec_test_refc(&p->common);
-#endif
if (!referred) {
ASSERT(ERTS_PROC_IS_EXITING(p));
erts_free_proc(p);
@@ -1049,11 +1033,7 @@ ERTS_GLB_INLINE void erts_proc_add_refc(Process *p, Sint add_refc)
{
Sint referred;
ASSERT(!(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY));
-#ifdef ERTS_SMP
referred = erts_ptab_atmc_add_test_refc(&p->common, add_refc);
-#else
- referred = erts_ptab_add_test_refc(&p->common, add_refc);
-#endif
if (!referred) {
ASSERT(ERTS_PROC_IS_EXITING(p));
erts_free_proc(p);
@@ -1063,16 +1043,11 @@ ERTS_GLB_INLINE void erts_proc_add_refc(Process *p, Sint add_refc)
ERTS_GLB_INLINE Sint erts_proc_read_refc(Process *p)
{
ASSERT(!(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_PROXY));
-#ifdef ERTS_SMP
return erts_ptab_atmc_read_refc(&p->common);
-#else
- return erts_ptab_read_refc(&p->common);
-#endif
}
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
-#ifdef ERTS_SMP
void erts_proc_lock_init(Process *);
void erts_proc_lock_fin(Process *);
void erts_proc_safelock(Process *a_proc,
@@ -1081,7 +1056,6 @@ void erts_proc_safelock(Process *a_proc,
Process *b_proc,
ErtsProcLocks b_have_locks,
ErtsProcLocks b_need_locks);
-#endif
/*
* --- Process table lookup ------------------------------------------------
@@ -1113,9 +1087,6 @@ ERTS_GLB_INLINE Process *erts_pix2proc(int ix);
ERTS_GLB_INLINE Process *erts_proc_lookup_raw(Eterm pid);
ERTS_GLB_INLINE Process *erts_proc_lookup(Eterm pid);
-#ifndef ERTS_SMP
-ERTS_GLB_INLINE
-#endif
Process *erts_pid2proc_opt(Process *, ErtsProcLocks, Eterm, ErtsProcLocks, int);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
@@ -1152,25 +1123,6 @@ ERTS_GLB_INLINE Process *erts_proc_lookup(Eterm pid)
return proc;
}
-#ifndef ERTS_SMP
-ERTS_GLB_INLINE Process *
-erts_pid2proc_opt(Process *c_p_unused,
- ErtsProcLocks c_p_have_locks_unused,
- Eterm pid,
- ErtsProcLocks pid_need_locks_unused,
- int flags)
-{
- Process *proc = erts_proc_lookup_raw(pid);
- if (!proc)
- return NULL;
- if (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X)
- && ERTS_PROC_IS_EXITING(proc))
- return NULL;
- if (flags & ERTS_P2P_FLG_INC_REFC)
- erts_proc_inc_refc(proc);
- return proc;
-}
-#endif /* !ERTS_SMP */
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
diff --git a/erts/emulator/beam/erl_ptab.h b/erts/emulator/beam/erl_ptab.h
index fecfd96ab0..285d325cc4 100644
--- a/erts/emulator/beam/erl_ptab.h
+++ b/erts/emulator/beam/erl_ptab.h
@@ -387,10 +387,8 @@ ERTS_GLB_INLINE Sint erts_ptab_atmc_dec_test_refc(ErtsPTabElementCommon *ptab_el
{
erts_aint_t refc = erts_atomic_dec_read_relb(&ptab_el->refc.atmc);
ERTS_SMP_LC_ASSERT(refc >= 0);
-#ifdef ERTS_SMP
if (refc == 0)
ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
-#endif
return (Sint) refc;
}
diff --git a/erts/emulator/beam/erl_sched_spec_pre_alloc.c b/erts/emulator/beam/erl_sched_spec_pre_alloc.c
index 96238318c9..6cb7ccab8d 100644
--- a/erts/emulator/beam/erl_sched_spec_pre_alloc.c
+++ b/erts/emulator/beam/erl_sched_spec_pre_alloc.c
@@ -32,7 +32,6 @@
# include "config.h"
#endif
-#ifdef ERTS_SMP
#include "erl_process.h"
#include "erl_thr_progress.h"
@@ -325,4 +324,3 @@ erts_sspa_process_remote_frees(erts_sspa_chunk_header_t *chdr,
return res;
}
-#endif /* ERTS_SMP */
diff --git a/erts/emulator/beam/erl_sched_spec_pre_alloc.h b/erts/emulator/beam/erl_sched_spec_pre_alloc.h
index 7808d7d438..1307e65962 100644
--- a/erts/emulator/beam/erl_sched_spec_pre_alloc.h
+++ b/erts/emulator/beam/erl_sched_spec_pre_alloc.h
@@ -31,7 +31,6 @@
#ifndef ERTS_SCHED_SPEC_PRE_ALLOC_H__
#define ERTS_SCHED_SPEC_PRE_ALLOC_H__
-#ifdef ERTS_SMP
#undef ERL_THR_PROGRESS_TSD_TYPE_ONLY
#define ERL_THR_PROGRESS_TSD_TYPE_ONLY
@@ -236,6 +235,5 @@ erts_sspa_free(erts_sspa_data_t *data, int cix, char *cblk)
#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
-#endif /* ERTS_SMP */
#endif /* ERTS_SCHED_SPEC_PRE_ALLOC_H__ */
diff --git a/erts/emulator/beam/erl_smp.h b/erts/emulator/beam/erl_smp.h
index 696bdbdaf1..c1f0fa18cc 100644
--- a/erts/emulator/beam/erl_smp.h
+++ b/erts/emulator/beam/erl_smp.h
@@ -40,7 +40,6 @@
#endif
-#ifdef ERTS_SMP
#define ERTS_SMP_THR_OPTS_DEFAULT_INITER ERTS_THR_OPTS_DEFAULT_INITER
typedef erts_thr_opts_t erts_smp_thr_opts_t;
typedef erts_thr_init_data_t erts_smp_thr_init_data_t;
@@ -71,47 +70,6 @@ void erts_thr_fatal_error(int, char *); /* implemented in erl_init.c */
#define ERTS_SMP_READ_MEMORY_BARRIER ERTS_THR_READ_MEMORY_BARRIER
#define ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER
-#else /* #ifdef ERTS_SMP */
-
-#define ERTS_SMP_THR_OPTS_DEFAULT_INITER {0}
-typedef int erts_smp_thr_opts_t;
-typedef int erts_smp_thr_init_data_t;
-typedef int erts_smp_tid_t;
-typedef int erts_smp_mtx_t;
-typedef int erts_smp_cnd_t;
-#define ERTS_SMP_RWMTX_OPT_DEFAULT_INITER {0}
-#define ERTS_SMP_RWMTX_TYPE_NORMAL 0
-#define ERTS_SMP_RWMTX_TYPE_FREQUENT_READ 0
-#define ERTS_SMP_RWMTX_TYPE_EXTREMELY_FREQUENT_READ 0
-#define ERTS_SMP_RWMTX_LONG_LIVED 0
-#define ERTS_SMP_RWMTX_SHORT_LIVED 0
-#define ERTS_SMP_RWMTX_UNKNOWN_LIVED 0
-typedef struct {
- char type;
- char lived;
- int main_spincount;
- int aux_spincount;
-} erts_smp_rwmtx_opt_t;
-typedef int erts_smp_rwmtx_t;
-typedef int erts_smp_tsd_key_t;
-#define erts_smp_dw_atomic_t erts_no_dw_atomic_t
-#define erts_smp_atomic_t erts_no_atomic_t
-#define erts_smp_atomic32_t erts_no_atomic32_t
-#define erts_smp_atomic64_t erts_no_atomic64_t
-#if __GNUC__ > 2
-typedef struct { } erts_smp_spinlock_t;
-typedef struct { } erts_smp_rwlock_t;
-#else
-typedef struct { int gcc_is_buggy; } erts_smp_spinlock_t;
-typedef struct { int gcc_is_buggy; } erts_smp_rwlock_t;
-#endif
-
-#define ERTS_SMP_MEMORY_BARRIER
-#define ERTS_SMP_WRITE_MEMORY_BARRIER
-#define ERTS_SMP_READ_MEMORY_BARRIER
-#define ERTS_SMP_DATA_DEPENDENCY_READ_MEMORY_BARRIER
-
-#endif /* #ifdef ERTS_SMP */
ERTS_GLB_INLINE void erts_smp_thr_init(erts_smp_thr_init_data_t *id);
ERTS_GLB_INLINE void erts_smp_thr_create(erts_smp_tid_t *tid,
@@ -225,7 +183,6 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
* of erl_threads.h for info on atomics.
*/
-#ifdef ERTS_SMP
/* Double word size atomics */
@@ -597,452 +554,59 @@ ERTS_GLB_INLINE void erts_smp_thr_sigwait(const sigset_t *set, int *sig);
#define erts_smp_atomic64_set_dirty erts_atomic64_set_dirty
#define erts_smp_atomic64_read_dirty erts_atomic64_read_dirty
-#else /* !ERTS_SMP */
-
-/* Double word size atomics */
-
-#define erts_smp_dw_atomic_init_nob erts_no_dw_atomic_set
-#define erts_smp_dw_atomic_set_nob erts_no_dw_atomic_set
-#define erts_smp_dw_atomic_read_nob erts_no_dw_atomic_read
-#define erts_smp_dw_atomic_cmpxchg_nob erts_no_dw_atomic_cmpxchg
-
-#define erts_smp_dw_atomic_init_mb erts_no_dw_atomic_init
-#define erts_smp_dw_atomic_set_mb erts_no_dw_atomic_set
-#define erts_smp_dw_atomic_read_mb erts_no_dw_atomic_read
-#define erts_smp_dw_atomic_cmpxchg_mb erts_no_dw_atomic_cmpxchg
-
-#define erts_smp_dw_atomic_init_acqb erts_no_dw_atomic_init
-#define erts_smp_dw_atomic_set_acqb erts_no_dw_atomic_set
-#define erts_smp_dw_atomic_read_acqb erts_no_dw_atomic_read
-#define erts_smp_dw_atomic_cmpxchg_acqb erts_no_dw_atomic_cmpxchg
-
-#define erts_smp_dw_atomic_init_relb erts_no_dw_atomic_init
-#define erts_smp_dw_atomic_set_relb erts_no_dw_atomic_set
-#define erts_smp_dw_atomic_read_relb erts_no_dw_atomic_read
-#define erts_smp_dw_atomic_cmpxchg_relb erts_no_dw_atomic_cmpxchg
-
-#define erts_smp_dw_atomic_init_ddrb erts_no_dw_atomic_init
-#define erts_smp_dw_atomic_set_ddrb erts_no_dw_atomic_set
-#define erts_smp_dw_atomic_read_ddrb erts_no_dw_atomic_read
-#define erts_smp_dw_atomic_cmpxchg_ddrb erts_no_dw_atomic_cmpxchg
-
-#define erts_smp_dw_atomic_init_rb erts_no_dw_atomic_init
-#define erts_smp_dw_atomic_set_rb erts_no_dw_atomic_set
-#define erts_smp_dw_atomic_read_rb erts_no_dw_atomic_read
-#define erts_smp_dw_atomic_cmpxchg_rb erts_no_dw_atomic_cmpxchg
-
-#define erts_smp_dw_atomic_init_wb erts_no_dw_atomic_init
-#define erts_smp_dw_atomic_set_wb erts_no_dw_atomic_set
-#define erts_smp_dw_atomic_read_wb erts_no_dw_atomic_read
-#define erts_smp_dw_atomic_cmpxchg_wb erts_no_dw_atomic_cmpxchg
-
-#define erts_smp_dw_atomic_set_dirty erts_no_dw_atomic_set
-#define erts_smp_dw_atomic_read_dirty erts_no_dw_atomic_read
-
-/* Word size atomics */
-
-#define erts_smp_atomic_init_nob erts_no_atomic_set
-#define erts_smp_atomic_set_nob erts_no_atomic_set
-#define erts_smp_atomic_read_nob erts_no_atomic_read
-#define erts_smp_atomic_inc_read_nob erts_no_atomic_inc_read
-#define erts_smp_atomic_dec_read_nob erts_no_atomic_dec_read
-#define erts_smp_atomic_inc_nob erts_no_atomic_inc
-#define erts_smp_atomic_dec_nob erts_no_atomic_dec
-#define erts_smp_atomic_add_read_nob erts_no_atomic_add_read
-#define erts_smp_atomic_add_nob erts_no_atomic_add
-#define erts_smp_atomic_read_bor_nob erts_no_atomic_read_bor
-#define erts_smp_atomic_read_band_nob erts_no_atomic_read_band
-#define erts_smp_atomic_xchg_nob erts_no_atomic_xchg
-#define erts_smp_atomic_cmpxchg_nob erts_no_atomic_cmpxchg
-#define erts_smp_atomic_read_bset_nob erts_no_atomic_read_bset
-
-#define erts_smp_atomic_init_mb erts_no_atomic_set
-#define erts_smp_atomic_set_mb erts_no_atomic_set
-#define erts_smp_atomic_read_mb erts_no_atomic_read
-#define erts_smp_atomic_inc_read_mb erts_no_atomic_inc_read
-#define erts_smp_atomic_dec_read_mb erts_no_atomic_dec_read
-#define erts_smp_atomic_inc_mb erts_no_atomic_inc
-#define erts_smp_atomic_dec_mb erts_no_atomic_dec
-#define erts_smp_atomic_add_read_mb erts_no_atomic_add_read
-#define erts_smp_atomic_add_mb erts_no_atomic_add
-#define erts_smp_atomic_read_bor_mb erts_no_atomic_read_bor
-#define erts_smp_atomic_read_band_mb erts_no_atomic_read_band
-#define erts_smp_atomic_xchg_mb erts_no_atomic_xchg
-#define erts_smp_atomic_cmpxchg_mb erts_no_atomic_cmpxchg
-#define erts_smp_atomic_read_bset_mb erts_no_atomic_read_bset
-
-#define erts_smp_atomic_init_acqb erts_no_atomic_set
-#define erts_smp_atomic_set_acqb erts_no_atomic_set
-#define erts_smp_atomic_read_acqb erts_no_atomic_read
-#define erts_smp_atomic_inc_read_acqb erts_no_atomic_inc_read
-#define erts_smp_atomic_dec_read_acqb erts_no_atomic_dec_read
-#define erts_smp_atomic_inc_acqb erts_no_atomic_inc
-#define erts_smp_atomic_dec_acqb erts_no_atomic_dec
-#define erts_smp_atomic_add_read_acqb erts_no_atomic_add_read
-#define erts_smp_atomic_add_acqb erts_no_atomic_add
-#define erts_smp_atomic_read_bor_acqb erts_no_atomic_read_bor
-#define erts_smp_atomic_read_band_acqb erts_no_atomic_read_band
-#define erts_smp_atomic_xchg_acqb erts_no_atomic_xchg
-#define erts_smp_atomic_cmpxchg_acqb erts_no_atomic_cmpxchg
-#define erts_smp_atomic_read_bset_acqb erts_no_atomic_read_bset
-
-#define erts_smp_atomic_init_relb erts_no_atomic_set
-#define erts_smp_atomic_set_relb erts_no_atomic_set
-#define erts_smp_atomic_read_relb erts_no_atomic_read
-#define erts_smp_atomic_inc_read_relb erts_no_atomic_inc_read
-#define erts_smp_atomic_dec_read_relb erts_no_atomic_dec_read
-#define erts_smp_atomic_inc_relb erts_no_atomic_inc
-#define erts_smp_atomic_dec_relb erts_no_atomic_dec
-#define erts_smp_atomic_add_read_relb erts_no_atomic_add_read
-#define erts_smp_atomic_add_relb erts_no_atomic_add
-#define erts_smp_atomic_read_bor_relb erts_no_atomic_read_bor
-#define erts_smp_atomic_read_band_relb erts_no_atomic_read_band
-#define erts_smp_atomic_xchg_relb erts_no_atomic_xchg
-#define erts_smp_atomic_cmpxchg_relb erts_no_atomic_cmpxchg
-#define erts_smp_atomic_read_bset_relb erts_no_atomic_read_bset
-
-#define erts_smp_atomic_init_ddrb erts_no_atomic_set
-#define erts_smp_atomic_set_ddrb erts_no_atomic_set
-#define erts_smp_atomic_read_ddrb erts_no_atomic_read
-#define erts_smp_atomic_inc_read_ddrb erts_no_atomic_inc_read
-#define erts_smp_atomic_dec_read_ddrb erts_no_atomic_dec_read
-#define erts_smp_atomic_inc_ddrb erts_no_atomic_inc
-#define erts_smp_atomic_dec_ddrb erts_no_atomic_dec
-#define erts_smp_atomic_add_read_ddrb erts_no_atomic_add_read
-#define erts_smp_atomic_add_ddrb erts_no_atomic_add
-#define erts_smp_atomic_read_bor_ddrb erts_no_atomic_read_bor
-#define erts_smp_atomic_read_band_ddrb erts_no_atomic_read_band
-#define erts_smp_atomic_xchg_ddrb erts_no_atomic_xchg
-#define erts_smp_atomic_cmpxchg_ddrb erts_no_atomic_cmpxchg
-#define erts_smp_atomic_read_bset_ddrb erts_no_atomic_read_bset
-
-#define erts_smp_atomic_init_rb erts_no_atomic_set
-#define erts_smp_atomic_set_rb erts_no_atomic_set
-#define erts_smp_atomic_read_rb erts_no_atomic_read
-#define erts_smp_atomic_inc_read_rb erts_no_atomic_inc_read
-#define erts_smp_atomic_dec_read_rb erts_no_atomic_dec_read
-#define erts_smp_atomic_inc_rb erts_no_atomic_inc
-#define erts_smp_atomic_dec_rb erts_no_atomic_dec
-#define erts_smp_atomic_add_read_rb erts_no_atomic_add_read
-#define erts_smp_atomic_add_rb erts_no_atomic_add
-#define erts_smp_atomic_read_bor_rb erts_no_atomic_read_bor
-#define erts_smp_atomic_read_band_rb erts_no_atomic_read_band
-#define erts_smp_atomic_xchg_rb erts_no_atomic_xchg
-#define erts_smp_atomic_cmpxchg_rb erts_no_atomic_cmpxchg
-#define erts_smp_atomic_read_bset_rb erts_no_atomic_read_bset
-
-#define erts_smp_atomic_init_wb erts_no_atomic_set
-#define erts_smp_atomic_set_wb erts_no_atomic_set
-#define erts_smp_atomic_read_wb erts_no_atomic_read
-#define erts_smp_atomic_inc_read_wb erts_no_atomic_inc_read
-#define erts_smp_atomic_dec_read_wb erts_no_atomic_dec_read
-#define erts_smp_atomic_inc_wb erts_no_atomic_inc
-#define erts_smp_atomic_dec_wb erts_no_atomic_dec
-#define erts_smp_atomic_add_read_wb erts_no_atomic_add_read
-#define erts_smp_atomic_add_wb erts_no_atomic_add
-#define erts_smp_atomic_read_bor_wb erts_no_atomic_read_bor
-#define erts_smp_atomic_read_band_wb erts_no_atomic_read_band
-#define erts_smp_atomic_xchg_wb erts_no_atomic_xchg
-#define erts_smp_atomic_cmpxchg_wb erts_no_atomic_cmpxchg
-#define erts_smp_atomic_read_bset_wb erts_no_atomic_read_bset
-
-#define erts_smp_atomic_set_dirty erts_no_atomic_set
-#define erts_smp_atomic_read_dirty erts_no_atomic_read
-
-/* 32-bit atomics */
-
-#define erts_smp_atomic32_init_nob erts_no_atomic32_set
-#define erts_smp_atomic32_set_nob erts_no_atomic32_set
-#define erts_smp_atomic32_read_nob erts_no_atomic32_read
-#define erts_smp_atomic32_inc_read_nob erts_no_atomic32_inc_read
-#define erts_smp_atomic32_dec_read_nob erts_no_atomic32_dec_read
-#define erts_smp_atomic32_inc_nob erts_no_atomic32_inc
-#define erts_smp_atomic32_dec_nob erts_no_atomic32_dec
-#define erts_smp_atomic32_add_read_nob erts_no_atomic32_add_read
-#define erts_smp_atomic32_add_nob erts_no_atomic32_add
-#define erts_smp_atomic32_read_bor_nob erts_no_atomic32_read_bor
-#define erts_smp_atomic32_read_band_nob erts_no_atomic32_read_band
-#define erts_smp_atomic32_xchg_nob erts_no_atomic32_xchg
-#define erts_smp_atomic32_cmpxchg_nob erts_no_atomic32_cmpxchg
-#define erts_smp_atomic32_read_bset_nob erts_no_atomic32_read_bset
-
-#define erts_smp_atomic32_init_mb erts_no_atomic32_set
-#define erts_smp_atomic32_set_mb erts_no_atomic32_set
-#define erts_smp_atomic32_read_mb erts_no_atomic32_read
-#define erts_smp_atomic32_inc_read_mb erts_no_atomic32_inc_read
-#define erts_smp_atomic32_dec_read_mb erts_no_atomic32_dec_read
-#define erts_smp_atomic32_inc_mb erts_no_atomic32_inc
-#define erts_smp_atomic32_dec_mb erts_no_atomic32_dec
-#define erts_smp_atomic32_add_read_mb erts_no_atomic32_add_read
-#define erts_smp_atomic32_add_mb erts_no_atomic32_add
-#define erts_smp_atomic32_read_bor_mb erts_no_atomic32_read_bor
-#define erts_smp_atomic32_read_band_mb erts_no_atomic32_read_band
-#define erts_smp_atomic32_xchg_mb erts_no_atomic32_xchg
-#define erts_smp_atomic32_cmpxchg_mb erts_no_atomic32_cmpxchg
-#define erts_smp_atomic32_read_bset_mb erts_no_atomic32_read_bset
-
-#define erts_smp_atomic32_init_acqb erts_no_atomic32_set
-#define erts_smp_atomic32_set_acqb erts_no_atomic32_set
-#define erts_smp_atomic32_read_acqb erts_no_atomic32_read
-#define erts_smp_atomic32_inc_read_acqb erts_no_atomic32_inc_read
-#define erts_smp_atomic32_dec_read_acqb erts_no_atomic32_dec_read
-#define erts_smp_atomic32_inc_acqb erts_no_atomic32_inc
-#define erts_smp_atomic32_dec_acqb erts_no_atomic32_dec
-#define erts_smp_atomic32_add_read_acqb erts_no_atomic32_add_read
-#define erts_smp_atomic32_add_acqb erts_no_atomic32_add
-#define erts_smp_atomic32_read_bor_acqb erts_no_atomic32_read_bor
-#define erts_smp_atomic32_read_band_acqb erts_no_atomic32_read_band
-#define erts_smp_atomic32_xchg_acqb erts_no_atomic32_xchg
-#define erts_smp_atomic32_cmpxchg_acqb erts_no_atomic32_cmpxchg
-#define erts_smp_atomic32_read_bset_acqb erts_no_atomic32_read_bset
-
-#define erts_smp_atomic32_init_relb erts_no_atomic32_set
-#define erts_smp_atomic32_set_relb erts_no_atomic32_set
-#define erts_smp_atomic32_read_relb erts_no_atomic32_read
-#define erts_smp_atomic32_inc_read_relb erts_no_atomic32_inc_read
-#define erts_smp_atomic32_dec_read_relb erts_no_atomic32_dec_read
-#define erts_smp_atomic32_inc_relb erts_no_atomic32_inc
-#define erts_smp_atomic32_dec_relb erts_no_atomic32_dec
-#define erts_smp_atomic32_add_read_relb erts_no_atomic32_add_read
-#define erts_smp_atomic32_add_relb erts_no_atomic32_add
-#define erts_smp_atomic32_read_bor_relb erts_no_atomic32_read_bor
-#define erts_smp_atomic32_read_band_relb erts_no_atomic32_read_band
-#define erts_smp_atomic32_xchg_relb erts_no_atomic32_xchg
-#define erts_smp_atomic32_cmpxchg_relb erts_no_atomic32_cmpxchg
-#define erts_smp_atomic32_read_bset_relb erts_no_atomic32_read_bset
-
-#define erts_smp_atomic32_init_ddrb erts_no_atomic32_set
-#define erts_smp_atomic32_set_ddrb erts_no_atomic32_set
-#define erts_smp_atomic32_read_ddrb erts_no_atomic32_read
-#define erts_smp_atomic32_inc_read_ddrb erts_no_atomic32_inc_read
-#define erts_smp_atomic32_dec_read_ddrb erts_no_atomic32_dec_read
-#define erts_smp_atomic32_inc_ddrb erts_no_atomic32_inc
-#define erts_smp_atomic32_dec_ddrb erts_no_atomic32_dec
-#define erts_smp_atomic32_add_read_ddrb erts_no_atomic32_add_read
-#define erts_smp_atomic32_add_ddrb erts_no_atomic32_add
-#define erts_smp_atomic32_read_bor_ddrb erts_no_atomic32_read_bor
-#define erts_smp_atomic32_read_band_ddrb erts_no_atomic32_read_band
-#define erts_smp_atomic32_xchg_ddrb erts_no_atomic32_xchg
-#define erts_smp_atomic32_cmpxchg_ddrb erts_no_atomic32_cmpxchg
-#define erts_smp_atomic32_read_bset_ddrb erts_no_atomic32_read_bset
-
-#define erts_smp_atomic32_init_rb erts_no_atomic32_set
-#define erts_smp_atomic32_set_rb erts_no_atomic32_set
-#define erts_smp_atomic32_read_rb erts_no_atomic32_read
-#define erts_smp_atomic32_inc_read_rb erts_no_atomic32_inc_read
-#define erts_smp_atomic32_dec_read_rb erts_no_atomic32_dec_read
-#define erts_smp_atomic32_inc_rb erts_no_atomic32_inc
-#define erts_smp_atomic32_dec_rb erts_no_atomic32_dec
-#define erts_smp_atomic32_add_read_rb erts_no_atomic32_add_read
-#define erts_smp_atomic32_add_rb erts_no_atomic32_add
-#define erts_smp_atomic32_read_bor_rb erts_no_atomic32_read_bor
-#define erts_smp_atomic32_read_band_rb erts_no_atomic32_read_band
-#define erts_smp_atomic32_xchg_rb erts_no_atomic32_xchg
-#define erts_smp_atomic32_cmpxchg_rb erts_no_atomic32_cmpxchg
-#define erts_smp_atomic32_read_bset_rb erts_no_atomic32_read_bset
-
-#define erts_smp_atomic32_init_wb erts_no_atomic32_set
-#define erts_smp_atomic32_set_wb erts_no_atomic32_set
-#define erts_smp_atomic32_read_wb erts_no_atomic32_read
-#define erts_smp_atomic32_inc_read_wb erts_no_atomic32_inc_read
-#define erts_smp_atomic32_dec_read_wb erts_no_atomic32_dec_read
-#define erts_smp_atomic32_inc_wb erts_no_atomic32_inc
-#define erts_smp_atomic32_dec_wb erts_no_atomic32_dec
-#define erts_smp_atomic32_add_read_wb erts_no_atomic32_add_read
-#define erts_smp_atomic32_add_wb erts_no_atomic32_add
-#define erts_smp_atomic32_read_bor_wb erts_no_atomic32_read_bor
-#define erts_smp_atomic32_read_band_wb erts_no_atomic32_read_band
-#define erts_smp_atomic32_xchg_wb erts_no_atomic32_xchg
-#define erts_smp_atomic32_cmpxchg_wb erts_no_atomic32_cmpxchg
-#define erts_smp_atomic32_read_bset_wb erts_no_atomic32_read_bset
-
-#define erts_smp_atomic32_set_dirty erts_no_atomic32_set
-#define erts_smp_atomic32_read_dirty erts_no_atomic32_read
-
-/* 64-bit atomics */
-
-#define erts_smp_atomic64_init_nob erts_no_atomic64_set
-#define erts_smp_atomic64_set_nob erts_no_atomic64_set
-#define erts_smp_atomic64_read_nob erts_no_atomic64_read
-#define erts_smp_atomic64_inc_read_nob erts_no_atomic64_inc_read
-#define erts_smp_atomic64_dec_read_nob erts_no_atomic64_dec_read
-#define erts_smp_atomic64_inc_nob erts_no_atomic64_inc
-#define erts_smp_atomic64_dec_nob erts_no_atomic64_dec
-#define erts_smp_atomic64_add_read_nob erts_no_atomic64_add_read
-#define erts_smp_atomic64_add_nob erts_no_atomic64_add
-#define erts_smp_atomic64_read_bor_nob erts_no_atomic64_read_bor
-#define erts_smp_atomic64_read_band_nob erts_no_atomic64_read_band
-#define erts_smp_atomic64_xchg_nob erts_no_atomic64_xchg
-#define erts_smp_atomic64_cmpxchg_nob erts_no_atomic64_cmpxchg
-#define erts_smp_atomic64_read_bset_nob erts_no_atomic64_read_bset
-
-#define erts_smp_atomic64_init_mb erts_no_atomic64_set
-#define erts_smp_atomic64_set_mb erts_no_atomic64_set
-#define erts_smp_atomic64_read_mb erts_no_atomic64_read
-#define erts_smp_atomic64_inc_read_mb erts_no_atomic64_inc_read
-#define erts_smp_atomic64_dec_read_mb erts_no_atomic64_dec_read
-#define erts_smp_atomic64_inc_mb erts_no_atomic64_inc
-#define erts_smp_atomic64_dec_mb erts_no_atomic64_dec
-#define erts_smp_atomic64_add_read_mb erts_no_atomic64_add_read
-#define erts_smp_atomic64_add_mb erts_no_atomic64_add
-#define erts_smp_atomic64_read_bor_mb erts_no_atomic64_read_bor
-#define erts_smp_atomic64_read_band_mb erts_no_atomic64_read_band
-#define erts_smp_atomic64_xchg_mb erts_no_atomic64_xchg
-#define erts_smp_atomic64_cmpxchg_mb erts_no_atomic64_cmpxchg
-#define erts_smp_atomic64_read_bset_mb erts_no_atomic64_read_bset
-
-#define erts_smp_atomic64_init_acqb erts_no_atomic64_set
-#define erts_smp_atomic64_set_acqb erts_no_atomic64_set
-#define erts_smp_atomic64_read_acqb erts_no_atomic64_read
-#define erts_smp_atomic64_inc_read_acqb erts_no_atomic64_inc_read
-#define erts_smp_atomic64_dec_read_acqb erts_no_atomic64_dec_read
-#define erts_smp_atomic64_inc_acqb erts_no_atomic64_inc
-#define erts_smp_atomic64_dec_acqb erts_no_atomic64_dec
-#define erts_smp_atomic64_add_read_acqb erts_no_atomic64_add_read
-#define erts_smp_atomic64_add_acqb erts_no_atomic64_add
-#define erts_smp_atomic64_read_bor_acqb erts_no_atomic64_read_bor
-#define erts_smp_atomic64_read_band_acqb erts_no_atomic64_read_band
-#define erts_smp_atomic64_xchg_acqb erts_no_atomic64_xchg
-#define erts_smp_atomic64_cmpxchg_acqb erts_no_atomic64_cmpxchg
-#define erts_smp_atomic64_read_bset_acqb erts_no_atomic64_read_bset
-
-#define erts_smp_atomic64_init_relb erts_no_atomic64_set
-#define erts_smp_atomic64_set_relb erts_no_atomic64_set
-#define erts_smp_atomic64_read_relb erts_no_atomic64_read
-#define erts_smp_atomic64_inc_read_relb erts_no_atomic64_inc_read
-#define erts_smp_atomic64_dec_read_relb erts_no_atomic64_dec_read
-#define erts_smp_atomic64_inc_relb erts_no_atomic64_inc
-#define erts_smp_atomic64_dec_relb erts_no_atomic64_dec
-#define erts_smp_atomic64_add_read_relb erts_no_atomic64_add_read
-#define erts_smp_atomic64_add_relb erts_no_atomic64_add
-#define erts_smp_atomic64_read_bor_relb erts_no_atomic64_read_bor
-#define erts_smp_atomic64_read_band_relb erts_no_atomic64_read_band
-#define erts_smp_atomic64_xchg_relb erts_no_atomic64_xchg
-#define erts_smp_atomic64_cmpxchg_relb erts_no_atomic64_cmpxchg
-#define erts_smp_atomic64_read_bset_relb erts_no_atomic64_read_bset
-
-#define erts_smp_atomic64_init_ddrb erts_no_atomic64_set
-#define erts_smp_atomic64_set_ddrb erts_no_atomic64_set
-#define erts_smp_atomic64_read_ddrb erts_no_atomic64_read
-#define erts_smp_atomic64_inc_read_ddrb erts_no_atomic64_inc_read
-#define erts_smp_atomic64_dec_read_ddrb erts_no_atomic64_dec_read
-#define erts_smp_atomic64_inc_ddrb erts_no_atomic64_inc
-#define erts_smp_atomic64_dec_ddrb erts_no_atomic64_dec
-#define erts_smp_atomic64_add_read_ddrb erts_no_atomic64_add_read
-#define erts_smp_atomic64_add_ddrb erts_no_atomic64_add
-#define erts_smp_atomic64_read_bor_ddrb erts_no_atomic64_read_bor
-#define erts_smp_atomic64_read_band_ddrb erts_no_atomic64_read_band
-#define erts_smp_atomic64_xchg_ddrb erts_no_atomic64_xchg
-#define erts_smp_atomic64_cmpxchg_ddrb erts_no_atomic64_cmpxchg
-#define erts_smp_atomic64_read_bset_ddrb erts_no_atomic64_read_bset
-
-#define erts_smp_atomic64_init_rb erts_no_atomic64_set
-#define erts_smp_atomic64_set_rb erts_no_atomic64_set
-#define erts_smp_atomic64_read_rb erts_no_atomic64_read
-#define erts_smp_atomic64_inc_read_rb erts_no_atomic64_inc_read
-#define erts_smp_atomic64_dec_read_rb erts_no_atomic64_dec_read
-#define erts_smp_atomic64_inc_rb erts_no_atomic64_inc
-#define erts_smp_atomic64_dec_rb erts_no_atomic64_dec
-#define erts_smp_atomic64_add_read_rb erts_no_atomic64_add_read
-#define erts_smp_atomic64_add_rb erts_no_atomic64_add
-#define erts_smp_atomic64_read_bor_rb erts_no_atomic64_read_bor
-#define erts_smp_atomic64_read_band_rb erts_no_atomic64_read_band
-#define erts_smp_atomic64_xchg_rb erts_no_atomic64_xchg
-#define erts_smp_atomic64_cmpxchg_rb erts_no_atomic64_cmpxchg
-#define erts_smp_atomic64_read_bset_rb erts_no_atomic64_read_bset
-
-#define erts_smp_atomic64_init_wb erts_no_atomic64_set
-#define erts_smp_atomic64_set_wb erts_no_atomic64_set
-#define erts_smp_atomic64_read_wb erts_no_atomic64_read
-#define erts_smp_atomic64_inc_read_wb erts_no_atomic64_inc_read
-#define erts_smp_atomic64_dec_read_wb erts_no_atomic64_dec_read
-#define erts_smp_atomic64_inc_wb erts_no_atomic64_inc
-#define erts_smp_atomic64_dec_wb erts_no_atomic64_dec
-#define erts_smp_atomic64_add_read_wb erts_no_atomic64_add_read
-#define erts_smp_atomic64_add_wb erts_no_atomic64_add
-#define erts_smp_atomic64_read_bor_wb erts_no_atomic64_read_bor
-#define erts_smp_atomic64_read_band_wb erts_no_atomic64_read_band
-#define erts_smp_atomic64_xchg_wb erts_no_atomic64_xchg
-#define erts_smp_atomic64_cmpxchg_wb erts_no_atomic64_cmpxchg
-#define erts_smp_atomic64_read_bset_wb erts_no_atomic64_read_bset
-
-#define erts_smp_atomic64_set_dirty erts_no_atomic64_set
-#define erts_smp_atomic64_read_dirty erts_no_atomic64_read
-
-#endif /* !ERTS_SMP */
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
erts_smp_thr_init(erts_smp_thr_init_data_t *id)
{
-#ifdef ERTS_SMP
erts_thr_init(id);
-#endif
}
ERTS_GLB_INLINE void
erts_smp_thr_create(erts_smp_tid_t *tid, void * (*func)(void *), void *arg,
erts_smp_thr_opts_t *opts)
{
-#ifdef ERTS_SMP
erts_thr_create(tid, func, arg, opts);
-#endif
}
ERTS_GLB_INLINE void
erts_smp_thr_join(erts_smp_tid_t tid, void **thr_res)
{
-#ifdef ERTS_SMP
erts_thr_join(tid, thr_res);
-#endif
}
ERTS_GLB_INLINE void
erts_smp_thr_detach(erts_smp_tid_t tid)
{
-#ifdef ERTS_SMP
erts_thr_detach(tid);
-#endif
}
ERTS_GLB_INLINE void
erts_smp_thr_exit(void *res)
{
-#ifdef ERTS_SMP
erts_thr_exit(res);
-#endif
}
ERTS_GLB_INLINE void
erts_smp_install_exit_handler(void (*exit_handler)(void))
{
-#ifdef ERTS_SMP
erts_thr_install_exit_handler(exit_handler);
-#endif
}
ERTS_GLB_INLINE erts_smp_tid_t
erts_smp_thr_self(void)
{
-#ifdef ERTS_SMP
return erts_thr_self();
-#else
- return 0;
-#endif
}
ERTS_GLB_INLINE int
erts_smp_equal_tids(erts_smp_tid_t x, erts_smp_tid_t y)
{
-#ifdef ERTS_SMP
return erts_equal_tids(x, y);
-#else
- return 1;
-#endif
}
@@ -1050,34 +614,26 @@ erts_smp_equal_tids(erts_smp_tid_t x, erts_smp_tid_t y)
ERTS_GLB_INLINE void
erts_smp_rec_mtx_init(erts_smp_mtx_t *mtx)
{
-#ifdef ERTS_SMP
erts_rec_mtx_init(mtx);
-#endif
}
#endif
ERTS_GLB_INLINE void
erts_smp_mtx_init(erts_smp_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags)
{
-#ifdef ERTS_SMP
erts_mtx_init(mtx, name, extra, flags);
-#endif
}
ERTS_GLB_INLINE void
erts_smp_mtx_init_locked(erts_smp_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags)
{
-#ifdef ERTS_SMP
erts_mtx_init_locked(mtx, name, extra, flags);
-#endif
}
ERTS_GLB_INLINE void
erts_smp_mtx_destroy(erts_smp_mtx_t *mtx)
{
-#ifdef ERTS_SMP
erts_mtx_destroy(mtx);
-#endif
}
ERTS_GLB_INLINE int
@@ -1089,10 +645,8 @@ erts_smp_mtx_trylock(erts_smp_mtx_t *mtx)
{
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
return erts_mtx_trylock_x(mtx,file,line);
-#elif defined(ERTS_SMP)
- return erts_mtx_trylock(mtx);
#else
- return 0;
+ return erts_mtx_trylock(mtx);
#endif
}
@@ -1107,7 +661,7 @@ erts_smp_mtx_lock(erts_smp_mtx_t *mtx)
{
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
erts_mtx_lock_x(mtx, file, line);
-#elif defined(ERTS_SMP)
+#else
erts_mtx_lock(mtx);
#endif
}
@@ -1115,9 +669,7 @@ erts_smp_mtx_lock(erts_smp_mtx_t *mtx)
ERTS_GLB_INLINE void
erts_smp_mtx_unlock(erts_smp_mtx_t *mtx)
{
-#ifdef ERTS_SMP
erts_mtx_unlock(mtx);
-#endif
}
ERTS_GLB_INLINE int
@@ -1133,25 +685,19 @@ erts_smp_lc_mtx_is_locked(erts_smp_mtx_t *mtx)
ERTS_GLB_INLINE void
erts_smp_cnd_init(erts_smp_cnd_t *cnd)
{
-#ifdef ERTS_SMP
erts_cnd_init(cnd);
-#endif
}
ERTS_GLB_INLINE void
erts_smp_cnd_destroy(erts_smp_cnd_t *cnd)
{
-#ifdef ERTS_SMP
erts_cnd_destroy(cnd);
-#endif
}
ERTS_GLB_INLINE void
erts_smp_cnd_wait(erts_smp_cnd_t *cnd, erts_smp_mtx_t *mtx)
{
-#ifdef ERTS_SMP
erts_cnd_wait(cnd, mtx);
-#endif
}
/*
@@ -1167,26 +713,20 @@ erts_smp_cnd_wait(erts_smp_cnd_t *cnd, erts_smp_mtx_t *mtx)
ERTS_GLB_INLINE void
erts_smp_cnd_signal(erts_smp_cnd_t *cnd)
{
-#ifdef ERTS_SMP
erts_cnd_signal(cnd);
-#endif
}
ERTS_GLB_INLINE void
erts_smp_cnd_broadcast(erts_smp_cnd_t *cnd)
{
-#ifdef ERTS_SMP
erts_cnd_broadcast(cnd);
-#endif
}
ERTS_GLB_INLINE void
erts_smp_rwmtx_set_reader_group(int no)
{
-#ifdef ERTS_SMP
erts_rwmtx_set_reader_group(no);
-#endif
}
ERTS_GLB_INLINE void
@@ -1195,9 +735,7 @@ erts_smp_rwmtx_init(erts_smp_rwmtx_t *rwmtx,
Eterm extra,
erts_lock_flags_t flags)
{
-#ifdef ERTS_SMP
erts_smp_rwmtx_init_opt(rwmtx, NULL, name, extra, flags);
-#endif
}
ERTS_GLB_INLINE void
@@ -1207,17 +745,13 @@ erts_smp_rwmtx_init_opt(erts_smp_rwmtx_t *rwmtx,
Eterm extra,
erts_lock_flags_t flags)
{
-#ifdef ERTS_SMP
erts_rwmtx_init_opt(rwmtx, opt, name, extra, flags);
-#endif
}
ERTS_GLB_INLINE void
erts_smp_rwmtx_destroy(erts_smp_rwmtx_t *rwmtx)
{
-#ifdef ERTS_SMP
erts_rwmtx_destroy(rwmtx);
-#endif
}
ERTS_GLB_INLINE int
@@ -1229,10 +763,8 @@ erts_smp_rwmtx_tryrlock(erts_smp_rwmtx_t *rwmtx)
{
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
return erts_rwmtx_tryrlock_x(rwmtx, file, line);
-#elif defined(ERTS_SMP)
- return erts_rwmtx_tryrlock(rwmtx);
#else
- return 0;
+ return erts_rwmtx_tryrlock(rwmtx);
#endif
}
@@ -1245,7 +777,7 @@ erts_smp_rwmtx_rlock(erts_smp_rwmtx_t *rwmtx)
{
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
erts_rwmtx_rlock_x(rwmtx, file, line);
-#elif defined(ERTS_SMP)
+#else
erts_rwmtx_rlock(rwmtx);
#endif
}
@@ -1253,9 +785,7 @@ erts_smp_rwmtx_rlock(erts_smp_rwmtx_t *rwmtx)
ERTS_GLB_INLINE void
erts_smp_rwmtx_runlock(erts_smp_rwmtx_t *rwmtx)
{
-#ifdef ERTS_SMP
erts_rwmtx_runlock(rwmtx);
-#endif
}
@@ -1268,10 +798,8 @@ erts_smp_rwmtx_tryrwlock(erts_smp_rwmtx_t *rwmtx)
{
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
return erts_rwmtx_tryrwlock_x(rwmtx, file, line);
-#elif defined(ERTS_SMP)
- return erts_rwmtx_tryrwlock(rwmtx);
#else
- return 0;
+ return erts_rwmtx_tryrwlock(rwmtx);
#endif
}
@@ -1284,7 +812,7 @@ erts_smp_rwmtx_rwlock(erts_smp_rwmtx_t *rwmtx)
{
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
erts_rwmtx_rwlock_x(rwmtx, file, line);
-#elif defined(ERTS_SMP)
+#else
erts_rwmtx_rwlock(rwmtx);
#endif
}
@@ -1292,9 +820,7 @@ erts_smp_rwmtx_rwlock(erts_smp_rwmtx_t *rwmtx)
ERTS_GLB_INLINE void
erts_smp_rwmtx_rwunlock(erts_smp_rwmtx_t *rwmtx)
{
-#ifdef ERTS_SMP
erts_rwmtx_rwunlock(rwmtx);
-#endif
}
#if 0 /* The following rwmtx function names are
@@ -1346,31 +872,19 @@ erts_smp_lc_rwmtx_is_rwlocked(erts_smp_rwmtx_t *mtx)
ERTS_GLB_INLINE void
erts_smp_spinlock_init(erts_smp_spinlock_t *lock, char *name, Eterm extra, erts_lock_flags_t flags)
{
-#ifdef ERTS_SMP
erts_spinlock_init(lock, name, extra, flags);
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE void
erts_smp_spinlock_destroy(erts_smp_spinlock_t *lock)
{
-#ifdef ERTS_SMP
erts_spinlock_destroy(lock);
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE void
erts_smp_spin_unlock(erts_smp_spinlock_t *lock)
{
-#ifdef ERTS_SMP
erts_spin_unlock(lock);
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE void
@@ -1382,10 +896,8 @@ erts_smp_spin_lock(erts_smp_spinlock_t *lock)
{
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
erts_spin_lock_x(lock, file, line);
-#elif defined(ERTS_SMP)
- erts_spin_lock(lock);
#else
- (void)lock;
+ erts_spin_lock(lock);
#endif
}
@@ -1402,31 +914,19 @@ erts_smp_lc_spinlock_is_locked(erts_smp_spinlock_t *lock)
ERTS_GLB_INLINE void
erts_smp_rwlock_init(erts_smp_rwlock_t *lock, char *name, Eterm extra, erts_lock_flags_t flags)
{
-#ifdef ERTS_SMP
erts_rwlock_init(lock, name, extra, flags);
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE void
erts_smp_rwlock_destroy(erts_smp_rwlock_t *lock)
{
-#ifdef ERTS_SMP
erts_rwlock_destroy(lock);
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE void
erts_smp_read_unlock(erts_smp_rwlock_t *lock)
{
-#ifdef ERTS_SMP
erts_read_unlock(lock);
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE void
@@ -1438,21 +938,15 @@ erts_smp_read_lock(erts_smp_rwlock_t *lock)
{
#if defined(ERTS_ENABLE_LOCK_POSITION) && defined(ERTS_SMP)
erts_read_lock_x(lock, file, line);
-#elif defined(ERTS_SMP)
- erts_read_lock(lock);
#else
- (void)lock;
+ erts_read_lock(lock);
#endif
}
ERTS_GLB_INLINE void
erts_smp_write_unlock(erts_smp_rwlock_t *lock)
{
-#ifdef ERTS_SMP
erts_write_unlock(lock);
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE void
@@ -1464,10 +958,8 @@ erts_smp_write_lock(erts_smp_rwlock_t *lock)
{
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_POSITION)
erts_write_lock_x(lock, file, line);
-#elif defined(ERTS_SMP)
- erts_write_lock(lock);
#else
- (void)lock;
+ erts_write_lock(lock);
#endif
}
@@ -1494,35 +986,25 @@ erts_smp_lc_rwlock_is_rwlocked(erts_smp_rwlock_t *lock)
ERTS_GLB_INLINE void
erts_smp_tsd_key_create(erts_smp_tsd_key_t *keyp, char* keyname)
{
-#ifdef ERTS_SMP
erts_tsd_key_create(keyp,keyname);
-#endif
}
ERTS_GLB_INLINE void
erts_smp_tsd_key_delete(erts_smp_tsd_key_t key)
{
-#ifdef ERTS_SMP
erts_tsd_key_delete(key);
-#endif
}
ERTS_GLB_INLINE void
erts_smp_tsd_set(erts_smp_tsd_key_t key, void *value)
{
-#ifdef ERTS_SMP
erts_tsd_set(key, value);
-#endif
}
ERTS_GLB_INLINE void *
erts_smp_tsd_get(erts_smp_tsd_key_t key)
{
-#ifdef ERTS_SMP
return erts_tsd_get(key);
-#else
- return NULL;
-#endif
}
#ifdef ERTS_THR_HAVE_SIG_FUNCS
@@ -1531,17 +1013,13 @@ erts_smp_tsd_get(erts_smp_tsd_key_t key)
ERTS_GLB_INLINE void
erts_smp_thr_sigmask(int how, const sigset_t *set, sigset_t *oset)
{
-#ifdef ERTS_SMP
erts_thr_sigmask(how, set, oset);
-#endif
}
ERTS_GLB_INLINE void
erts_smp_thr_sigwait(const sigset_t *set, int *sig)
{
-#ifdef ERTS_SMP
erts_thr_sigwait(set, sig);
-#endif
}
#endif /* #ifdef ERTS_THR_HAVE_SIG_FUNCS */
diff --git a/erts/emulator/beam/erl_thr_progress.c b/erts/emulator/beam/erl_thr_progress.c
index 2a9f276e02..96824dc06e 100644
--- a/erts/emulator/beam/erl_thr_progress.c
+++ b/erts/emulator/beam/erl_thr_progress.c
@@ -80,7 +80,6 @@
#include "erl_thr_progress.h"
#include "global.h"
-#ifdef ERTS_SMP
#define ERTS_THR_PRGR_DBG_CHK_WAKEUP_REQUEST_VALUE 0
@@ -1513,4 +1512,3 @@ void erts_thr_progress_dbg_print_state(void)
}
-#endif
diff --git a/erts/emulator/beam/erl_thr_progress.h b/erts/emulator/beam/erl_thr_progress.h
index b2894ba1fe..80ede05d73 100644
--- a/erts/emulator/beam/erl_thr_progress.h
+++ b/erts/emulator/beam/erl_thr_progress.h
@@ -33,13 +33,6 @@
#include "sys.h"
-#ifndef ERTS_SMP
-
-#define erts_smp_thr_progress_block() ((void) 0)
-#define erts_smp_thr_progress_unblock() ((void) 0)
-#define erts_smp_thr_progress_is_blocking() 1
-
-#else /* ERTS_SMP */
#define erts_smp_thr_progress_block erts_thr_progress_block
#define erts_smp_thr_progress_unblock erts_thr_progress_unblock
@@ -87,13 +80,10 @@ typedef struct {
int erts_thr_progress_fatal_error_block(ErtsThrPrgrData *tmp_tpd_bufp);
void erts_thr_progress_fatal_error_wait(SWord timeout);
-#endif /* ERTS_SMP */
typedef struct ErtsThrPrgrLaterOp_ ErtsThrPrgrLaterOp;
struct ErtsThrPrgrLaterOp_ {
-#ifdef ERTS_SMP
ErtsThrPrgrVal later;
-#endif
void (*func)(void *);
void *data;
ErtsThrPrgrLaterOp *next;
@@ -107,7 +97,6 @@ struct ErtsThrPrgrLaterOp_ {
#include "erl_threads.h"
#include "erl_process.h"
-#ifdef ERTS_SMP
/* ERTS_THR_PRGR_VAL_FIRST should only be used when initializing... */
#define ERTS_THR_PRGR_VAL_FIRST ((ErtsThrPrgrVal) 0)
@@ -324,6 +313,5 @@ erts_thr_progress_has_reached(ErtsThrPrgrVal val)
#endif
-#endif /* ERTS_SMP */
#endif
diff --git a/erts/emulator/beam/erl_thr_queue.c b/erts/emulator/beam/erl_thr_queue.c
index f56d0828dd..66566edec5 100644
--- a/erts/emulator/beam/erl_thr_queue.c
+++ b/erts/emulator/beam/erl_thr_queue.c
@@ -87,32 +87,10 @@
#define ERTS_THR_Q_MAX_FINI_DEQ_OPS 50
-#ifdef ERTS_SMP
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(sl_element,
ErtsThrQElement_t,
1000,
ERTS_ALC_T_THR_Q_EL_SL)
-#else
-
-static void
-init_sl_element_alloc(void)
-{
-}
-
-static ErtsThrQElement_t *
-sl_element_alloc(void)
-{
- return erts_alloc(ERTS_ALC_T_THR_Q_EL_SL,
- sizeof(ErtsThrQElement_t));
-}
-
-static void
-sl_element_free(ErtsThrQElement_t *p)
-{
- erts_free(ERTS_ALC_T_THR_Q_EL_SL, p);
-}
-
-#endif
#define ErtsThrQDirtyReadEl(A) \
((ErtsThrQElement_t *) erts_atomic_read_dirty((A)))
@@ -135,14 +113,6 @@ static void noop_callback(void *arg) { }
void
erts_thr_q_initialize(ErtsThrQ_t *q, ErtsThrQInit_t *qi)
{
-#ifndef USE_THREADS
- q->init = *qi;
- if (!q->init.notify)
- q->init.notify = noop_callback;
- q->first = NULL;
- q->last = NULL;
- q->q.blk = NULL;
-#else
erts_atomic_init_nob(&q->tail.data.marker.next, ERTS_AINT_NULL);
q->tail.data.marker.data.ptr = NULL;
erts_atomic_init_nob(&q->tail.data.last,
@@ -164,10 +134,8 @@ erts_thr_q_initialize(ErtsThrQ_t *q, ErtsThrQInit_t *qi)
q->head.deq_fini.automatic = qi->auto_finalize_dequeue;
q->head.deq_fini.start = NULL;
q->head.deq_fini.end = NULL;
-#ifdef ERTS_SMP
q->head.next.thr_progress = erts_thr_progress_current();
q->head.next.thr_progress_reached = 1;
-#endif
q->head.next.um_refc_ix = 1;
q->head.next.unref_end = &q->tail.data.marker;
q->head.used_marker = 1;
@@ -176,15 +144,12 @@ erts_thr_q_initialize(ErtsThrQ_t *q, ErtsThrQInit_t *qi)
q->q.finalizing = 0;
q->q.live = qi->live.queue;
q->q.blk = NULL;
-#endif
}
ErtsThrQCleanState_t
erts_thr_q_finalize(ErtsThrQ_t *q)
{
-#ifdef USE_THREADS
q->q.finalizing = 1;
-#endif
while (erts_thr_q_dequeue(q));
return erts_thr_q_clean(q);
}
@@ -229,7 +194,6 @@ erts_thr_q_destroy(ErtsThrQ_t *q)
return erts_thr_q_finalize(q);
}
-#ifdef USE_THREADS
static void
destroy(ErtsThrQ_t *q)
@@ -249,7 +213,6 @@ destroy(ErtsThrQ_t *q)
erts_free(atype, q->q.blk);
}
-#endif
static ERTS_INLINE ErtsThrQElement_t *
element_live_alloc(ErtsThrQLive_t live)
@@ -267,11 +230,7 @@ static ERTS_INLINE ErtsThrQElement_t *
element_alloc(ErtsThrQ_t *q)
{
ErtsThrQLive_t live;
-#ifdef USE_THREADS
live = q->tail.data.live;
-#else
- live = q->init.live.objects;
-#endif
return element_live_alloc(live);
}
@@ -291,15 +250,10 @@ static ERTS_INLINE void
element_free(ErtsThrQ_t *q, ErtsThrQElement_t *el)
{
ErtsThrQLive_t live;
-#ifdef USE_THREADS
live = q->head.live;
-#else
- live = q->init.live.objects;
-#endif
element_live_free(live, el);
}
-#ifdef USE_THREADS
static ERTS_INLINE ErtsThrQElement_t *
enqueue_managed(ErtsThrQ_t *q, ErtsThrQElement_t *this)
@@ -423,11 +377,9 @@ clean(ErtsThrQ_t *q, int max_ops, int do_notify)
return ERTS_THR_Q_CLEAN;
}
-#ifdef ERTS_SMP
if (q->head.next.thr_progress_reached
|| erts_thr_progress_has_reached(q->head.next.thr_progress)) {
q->head.next.thr_progress_reached = 1;
-#endif
um_refc_ix = q->head.next.um_refc_ix;
if (erts_atomic_read_acqb(&q->tail.data.um_refc[um_refc_ix]) == 0) {
/* Move unreferenced end pointer forward... */
@@ -442,20 +394,14 @@ clean(ErtsThrQ_t *q, int max_ops, int do_notify)
ERTS_SMP_MEMORY_BARRIER;
else {
q->head.next.unref_end = (ErtsThrQElement_t *) ilast;
-#ifdef ERTS_SMP
q->head.next.thr_progress = erts_thr_progress_later(NULL);
-#endif
erts_atomic32_set_relb(&q->tail.data.um_refc_ix,
um_refc_ix);
q->head.next.um_refc_ix = um_refc_ix == 0 ? 1 : 0;
-#ifdef ERTS_SMP
q->head.next.thr_progress_reached = 0;
-#endif
}
}
-#ifdef ERTS_SMP
}
-#endif
head = ErtsThrQDirtyReadEl(&q->head.head);
if (q->head.first == head) {
@@ -489,9 +435,7 @@ clean(ErtsThrQ_t *q, int max_ops, int do_notify)
check_thr_progress:
-#ifdef ERTS_SMP
if (q->head.next.thr_progress_reached)
-#endif
{
int um_refc_ix = q->head.next.um_refc_ix;
if (erts_atomic_read_acqb(&q->tail.data.um_refc[um_refc_ix]) == 0) {
@@ -505,24 +449,16 @@ check_thr_progress:
return ERTS_THR_Q_NEED_THR_PRGR;
}
-#endif
ErtsThrQCleanState_t
erts_thr_q_clean(ErtsThrQ_t *q)
{
-#ifdef USE_THREADS
return clean(q, ERTS_THR_Q_MAX_SCHED_CLEAN_OPS, 0);
-#else
- return ERTS_THR_Q_CLEAN;
-#endif
}
ErtsThrQCleanState_t
erts_thr_q_inspect(ErtsThrQ_t *q, int ensure_empty)
{
-#ifndef USE_THREADS
- return ERTS_THR_Q_CLEAN;
-#else
ErtsThrQElement_t *head = ErtsThrQDirtyReadEl(&q->head.head);
if (ensure_empty) {
erts_aint_t inext;
@@ -553,39 +489,21 @@ erts_thr_q_inspect(ErtsThrQ_t *q, int ensure_empty)
if (q->head.first != q->head.unref_end)
return ERTS_THR_Q_DIRTY;
-#ifdef ERTS_SMP
if (q->head.next.thr_progress_reached)
-#endif
{
int um_refc_ix = q->head.next.um_refc_ix;
if (erts_atomic_read_acqb(&q->tail.data.um_refc[um_refc_ix]) == 0)
return ERTS_THR_Q_DIRTY;
}
return ERTS_THR_Q_NEED_THR_PRGR;
-#endif
}
static void
enqueue(ErtsThrQ_t *q, void *data, ErtsThrQElement_t *this)
{
-#ifndef USE_THREADS
- ASSERT(data);
-
- this->next = NULL;
- this->data.ptr = data;
-
- if (q->last)
- q->last->next = this;
- else {
- q->first = q->last = this;
- q->init.notify(q->init.arg);
- }
-#else
int notify;
int um_refc_ix = 0;
-#ifdef ERTS_SMP
int unmanaged_thread;
-#endif
#if ERTS_THR_Q_DBG_CHK_DATA
if (!data)
@@ -596,10 +514,8 @@ enqueue(ErtsThrQ_t *q, void *data, ErtsThrQElement_t *this)
this->data.ptr = data;
-#ifdef ERTS_SMP
unmanaged_thread = !erts_thr_progress_is_managed_thread();
if (unmanaged_thread)
-#endif
{
um_refc_ix = erts_atomic32_read_acqb(&q->tail.data.um_refc_ix);
while (1) {
@@ -616,9 +532,7 @@ enqueue(ErtsThrQ_t *q, void *data, ErtsThrQElement_t *this)
notify = this == enqueue_managed(q, this);
-#ifdef ERTS_SMP
if (unmanaged_thread)
-#endif
{
if (notify)
erts_atomic_dec_relb(&q->tail.data.um_refc[um_refc_ix]);
@@ -627,7 +541,6 @@ enqueue(ErtsThrQ_t *q, void *data, ErtsThrQElement_t *this)
}
if (notify)
q->tail.data.notify(q->tail.data.arg);
-#endif
}
void
@@ -645,9 +558,6 @@ erts_thr_q_prepare_enqueue(ErtsThrQ_t *q)
int
erts_thr_q_get_finalize_dequeue_data(ErtsThrQ_t *q, ErtsThrQFinDeQ_t *fdp)
{
-#ifndef USE_THREADS
- return 0;
-#else
#ifdef DEBUG
if (!q->head.deq_fini.start) {
ASSERT(!q->head.deq_fini.end);
@@ -670,14 +580,12 @@ erts_thr_q_get_finalize_dequeue_data(ErtsThrQ_t *q, ErtsThrQFinDeQ_t *fdp)
q->head.deq_fini.start = NULL;
q->head.deq_fini.end = NULL;
return fdp->start != NULL;
-#endif
}
void
erts_thr_q_append_finalize_dequeue_data(ErtsThrQFinDeQ_t *fdp0,
ErtsThrQFinDeQ_t *fdp1)
{
-#ifdef USE_THREADS
if (fdp1->start) {
if (fdp0->end)
ErtsThrQDirtySetEl(&fdp0->end->next, fdp1->start);
@@ -685,13 +593,11 @@ erts_thr_q_append_finalize_dequeue_data(ErtsThrQFinDeQ_t *fdp0,
fdp0->start = fdp1->start;
fdp0->end = fdp1->end;
}
-#endif
}
int erts_thr_q_finalize_dequeue(ErtsThrQFinDeQ_t *state)
{
-#ifdef USE_THREADS
ErtsThrQElement_t *start = state->start;
if (start) {
ErtsThrQLive_t live;
@@ -710,17 +616,14 @@ int erts_thr_q_finalize_dequeue(ErtsThrQFinDeQ_t *state)
return 1; /* More to do */
state->end = NULL;
}
-#endif
return 0;
}
void
erts_thr_q_finalize_dequeue_state_init(ErtsThrQFinDeQ_t *state)
{
-#ifdef USE_THREADS
state->start = NULL;
state->end = NULL;
-#endif
}
@@ -734,22 +637,6 @@ erts_thr_q_enqueue_prepared(ErtsThrQ_t *q, void *data, ErtsThrQPrepEnQ_t *prep)
void *
erts_thr_q_dequeue(ErtsThrQ_t *q)
{
-#ifndef USE_THREADS
- void *res;
- ErtsThrQElement_t *tmp;
-
- if (!q->first)
- return NULL;
- tmp = q->first;
- res = tmp->data.ptr;
- q->first = tmp->next;
- if (!q->first)
- q->last = NULL;
-
- element_free(q, tmp);
-
- return res;
-#else
ErtsThrQElement_t *head;
erts_aint_t inext;
void *res;
@@ -778,7 +665,6 @@ erts_thr_q_dequeue(ErtsThrQ_t *q)
? ERTS_THR_Q_MAX_DEQUEUE_CLEAN_OPS
: ERTS_THR_Q_MAX_SCHED_CLEAN_OPS), 1);
return res;
-#endif
}
#ifdef USE_LTTNG_VM_TRACEPOINTS
@@ -786,14 +672,6 @@ int
erts_thr_q_length_dirty(ErtsThrQ_t *q)
{
int n = 0;
-#ifndef USE_THREADS
- void *res;
- ErtsThrQElement_t *tmp;
-
- for (tmp = q->first; tmp != NULL; tmp = tmp->next) {
- n++;
- }
-#else
ErtsThrQElement_t *e;
erts_aint_t inext;
@@ -808,7 +686,6 @@ erts_thr_q_length_dirty(ErtsThrQ_t *q)
}
inext = erts_atomic_read_acqb(&e->next);
}
-#endif
return n;
}
#endif
diff --git a/erts/emulator/beam/erl_thr_queue.h b/erts/emulator/beam/erl_thr_queue.h
index 705a67af4c..163a25318d 100644
--- a/erts/emulator/beam/erl_thr_queue.h
+++ b/erts/emulator/beam/erl_thr_queue.h
@@ -78,11 +78,7 @@ typedef struct ErtsThrQElement_t_ ErtsThrQElement_t;
typedef struct ErtsThrQElement_t ErtsThrQPrepEnQ_t;
struct ErtsThrQElement_t_ {
-#ifdef USE_THREADS
erts_atomic_t next;
-#else
- ErtsThrQElement_t *next;
-#endif
union {
erts_atomic_t atmc;
void *ptr;
@@ -100,7 +96,6 @@ typedef enum {
ERTS_THR_Q_DIRTY,
} ErtsThrQCleanState_t;
-#ifdef USE_THREADS
typedef struct {
ErtsThrQElement_t marker;
@@ -108,9 +103,7 @@ typedef struct {
erts_atomic_t um_refc[2];
erts_atomic32_t um_refc_ix;
ErtsThrQLive_t live;
-#ifdef ERTS_SMP
erts_atomic32_t thr_prgr_clean_scheduled;
-#endif
void *arg;
void (*notify)(void *);
} ErtsThrQTail_t;
@@ -141,10 +134,8 @@ struct ErtsThrQ_t_ {
ErtsThrQElement_t *end;
} deq_fini;
struct {
-#ifdef ERTS_SMP
ErtsThrPrgrVal thr_progress;
int thr_progress_reached;
-#endif
int um_refc_ix;
ErtsThrQElement_t *unref_end;
} next;
@@ -159,18 +150,6 @@ struct ErtsThrQ_t_ {
} q;
};
-#else /* !USE_THREADS */
-
-struct ErtsThrQ_t_ {
- ErtsThrQInit_t init;
- ErtsThrQElement_t *first;
- ErtsThrQElement_t *last;
- struct {
- void *blk;
- } q;
-};
-
-#endif
void erts_thr_q_init(void);
void erts_thr_q_initialize(ErtsThrQ_t *, ErtsThrQInit_t *);
@@ -194,19 +173,15 @@ void erts_thr_q_finalize_dequeue_state_init(ErtsThrQFinDeQ_t *);
int erts_thr_q_length_dirty(ErtsThrQ_t *);
#endif
-#ifdef ERTS_SMP
ERTS_GLB_INLINE ErtsThrPrgrVal erts_thr_q_need_thr_progress(ErtsThrQ_t *q);
-#endif
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
-#ifdef ERTS_SMP
ERTS_GLB_INLINE ErtsThrPrgrVal
erts_thr_q_need_thr_progress(ErtsThrQ_t *q)
{
return q->head.next.thr_progress;
}
-#endif
#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
diff --git a/erts/emulator/beam/erl_threads.h b/erts/emulator/beam/erl_threads.h
index 3fdf29d678..55e8254de2 100644
--- a/erts/emulator/beam/erl_threads.h
+++ b/erts/emulator/beam/erl_threads.h
@@ -262,7 +262,6 @@
#include "erl_lock_flags.h"
#include "erl_term.h"
-#ifdef USE_THREADS
#define ETHR_TRY_INLINE_FUNCS
#include "ethread.h"
@@ -405,70 +404,6 @@ __decl_noreturn void __noreturn erts_thr_fatal_error(int, char *);
# define ERTS_HAVE_REC_MTX_INIT ETHR_HAVE_ETHR_REC_MUTEX_INIT
#endif
-#else /* #ifdef USE_THREADS */
-
-#define ERTS_THR_MEMORY_BARRIER
-#define ERTS_THR_WRITE_MEMORY_BARRIER
-#define ERTS_THR_READ_MEMORY_BARRIER
-#define ERTS_THR_DATA_DEPENDENCY_READ_MEMORY_BARRIER
-
-#define ERTS_THR_OPTS_DEFAULT_INITER 0
-typedef int erts_thr_opts_t;
-typedef int erts_thr_init_data_t;
-typedef int erts_thr_late_init_data_t;
-typedef int erts_tid_t;
-typedef int erts_mtx_t;
-typedef int erts_cnd_t;
-#define ERTS_RWMTX_OPT_DEFAULT_INITER {0}
-#define ERTS_RWMTX_TYPE_NORMAL 0
-#define ERTS_RWMTX_TYPE_FREQUENT_READ 0
-#define ERTS_RWMTX_TYPE_EXTREMELY_FREQUENT_READ 0
-#define ERTS_RWMTX_LONG_LIVED 0
-#define ERTS_RWMTX_SHORT_LIVED 0
-#define ERTS_RWMTX_UNKNOWN_LIVED 0
-typedef struct {
- char type;
- char lived;
- int main_spincount;
- int aux_spincount;
-} erts_rwmtx_opt_t;
-typedef int erts_rwmtx_t;
-typedef int erts_tsd_key_t;
-typedef int erts_tse_t;
-
-typedef struct { SWord sint[2]; } erts_dw_aint_t;
-typedef SWord erts_aint_t;
-typedef Sint32 erts_aint32_t;
-typedef Sint64 erts_aint64_t;
-
-#define erts_dw_atomic_t erts_dw_aint_t
-#define erts_atomic_t erts_aint_t
-#define erts_atomic32_t erts_aint32_t
-#define erts_atomic64_t erts_aint64_t
-
-#if __GNUC__ > 2
-typedef struct { } erts_spinlock_t;
-typedef struct { } erts_rwlock_t;
-#else
-typedef struct { int gcc_is_buggy; } erts_spinlock_t;
-typedef struct { int gcc_is_buggy; } erts_rwlock_t;
-#endif
-
-#ifdef WORDS_BIGENDIAN
-#define ERTS_DW_AINT_LOW_WORD 1
-#define ERTS_DW_AINT_HIGH_WORD 0
-#else
-#define ERTS_DW_AINT_LOW_WORD 0
-#define ERTS_DW_AINT_HIGH_WORD 1
-#endif
-
-#define ERTS_MTX_INITER 0
-#define ERTS_CND_INITER 0
-#define ERTS_THR_INIT_DATA_DEF_INITER 0
-
-#define ERTS_HAVE_REC_MTX_INIT 1
-
-#endif /* #ifdef USE_THREADS */
#define erts_no_dw_atomic_t erts_dw_aint_t
#define erts_no_atomic_t erts_aint_t
@@ -670,13 +605,10 @@ ERTS_GLB_INLINE void erts_thr_sigmask(int how, const sigset_t *set,
sigset_t *oset);
ERTS_GLB_INLINE void erts_thr_sigwait(const sigset_t *set, int *sig);
-#ifdef USE_THREADS
ERTS_GLB_INLINE void erts_thr_kill(erts_tid_t tid, int sig);
-#endif
#endif /* #ifdef HAVE_ETHR_SIG_FUNCS */
-#ifdef USE_THREADS
ERTS_GLB_INLINE erts_aint_t
erts_atomic_read_bset_nob(erts_atomic_t *var,
@@ -1684,379 +1616,6 @@ erts_atomic64_read_dirty(erts_atomic64_t *var)
#endif /* ARCH_32 */
-#else /* !USE_THREADS */
-
-/* Double word size atomics */
-
-#define erts_dw_atomic_init_nob erts_no_dw_atomic_set
-#define erts_dw_atomic_set_nob erts_no_dw_atomic_set
-#define erts_dw_atomic_read_nob erts_no_dw_atomic_read
-#define erts_dw_atomic_cmpxchg_nob erts_no_dw_atomic_cmpxchg
-
-#define erts_dw_atomic_init_mb erts_no_dw_atomic_init
-#define erts_dw_atomic_set_mb erts_no_dw_atomic_set
-#define erts_dw_atomic_read_mb erts_no_dw_atomic_read
-#define erts_dw_atomic_cmpxchg_mb erts_no_dw_atomic_cmpxchg
-
-#define erts_dw_atomic_init_acqb erts_no_dw_atomic_init
-#define erts_dw_atomic_set_acqb erts_no_dw_atomic_set
-#define erts_dw_atomic_read_acqb erts_no_dw_atomic_read
-#define erts_dw_atomic_cmpxchg_acqb erts_no_dw_atomic_cmpxchg
-
-#define erts_dw_atomic_init_relb erts_no_dw_atomic_init
-#define erts_dw_atomic_set_relb erts_no_dw_atomic_set
-#define erts_dw_atomic_read_relb erts_no_dw_atomic_read
-#define erts_dw_atomic_cmpxchg_relb erts_no_dw_atomic_cmpxchg
-
-#define erts_dw_atomic_init_ddrb erts_no_dw_atomic_init
-#define erts_dw_atomic_set_ddrb erts_no_dw_atomic_set
-#define erts_dw_atomic_read_ddrb erts_no_dw_atomic_read
-#define erts_dw_atomic_cmpxchg_ddrb erts_no_dw_atomic_cmpxchg
-
-#define erts_dw_atomic_init_rb erts_no_dw_atomic_init
-#define erts_dw_atomic_set_rb erts_no_dw_atomic_set
-#define erts_dw_atomic_read_rb erts_no_dw_atomic_read
-#define erts_dw_atomic_cmpxchg_rb erts_no_dw_atomic_cmpxchg
-
-#define erts_dw_atomic_init_wb erts_no_dw_atomic_init
-#define erts_dw_atomic_set_wb erts_no_dw_atomic_set
-#define erts_dw_atomic_read_wb erts_no_dw_atomic_read
-#define erts_dw_atomic_cmpxchg_wb erts_no_dw_atomic_cmpxchg
-
-#define erts_dw_atomic_set_dirty erts_no_dw_atomic_set
-#define erts_dw_atomic_read_dirty erts_no_dw_atomic_read
-
-/* Word size atomics */
-
-#define erts_atomic_init_nob erts_no_atomic_set
-#define erts_atomic_set_nob erts_no_atomic_set
-#define erts_atomic_read_nob erts_no_atomic_read
-#define erts_atomic_inc_read_nob erts_no_atomic_inc_read
-#define erts_atomic_dec_read_nob erts_no_atomic_dec_read
-#define erts_atomic_inc_nob erts_no_atomic_inc
-#define erts_atomic_dec_nob erts_no_atomic_dec
-#define erts_atomic_add_read_nob erts_no_atomic_add_read
-#define erts_atomic_add_nob erts_no_atomic_add
-#define erts_atomic_read_bor_nob erts_no_atomic_read_bor
-#define erts_atomic_read_band_nob erts_no_atomic_read_band
-#define erts_atomic_xchg_nob erts_no_atomic_xchg
-#define erts_atomic_cmpxchg_nob erts_no_atomic_cmpxchg
-#define erts_atomic_read_bset_nob erts_no_atomic_read_bset
-
-#define erts_atomic_init_mb erts_no_atomic_set
-#define erts_atomic_set_mb erts_no_atomic_set
-#define erts_atomic_read_mb erts_no_atomic_read
-#define erts_atomic_inc_read_mb erts_no_atomic_inc_read
-#define erts_atomic_dec_read_mb erts_no_atomic_dec_read
-#define erts_atomic_inc_mb erts_no_atomic_inc
-#define erts_atomic_dec_mb erts_no_atomic_dec
-#define erts_atomic_add_read_mb erts_no_atomic_add_read
-#define erts_atomic_add_mb erts_no_atomic_add
-#define erts_atomic_read_bor_mb erts_no_atomic_read_bor
-#define erts_atomic_read_band_mb erts_no_atomic_read_band
-#define erts_atomic_xchg_mb erts_no_atomic_xchg
-#define erts_atomic_cmpxchg_mb erts_no_atomic_cmpxchg
-#define erts_atomic_read_bset_mb erts_no_atomic_read_bset
-
-#define erts_atomic_init_acqb erts_no_atomic_set
-#define erts_atomic_set_acqb erts_no_atomic_set
-#define erts_atomic_read_acqb erts_no_atomic_read
-#define erts_atomic_inc_read_acqb erts_no_atomic_inc_read
-#define erts_atomic_dec_read_acqb erts_no_atomic_dec_read
-#define erts_atomic_inc_acqb erts_no_atomic_inc
-#define erts_atomic_dec_acqb erts_no_atomic_dec
-#define erts_atomic_add_read_acqb erts_no_atomic_add_read
-#define erts_atomic_add_acqb erts_no_atomic_add
-#define erts_atomic_read_bor_acqb erts_no_atomic_read_bor
-#define erts_atomic_read_band_acqb erts_no_atomic_read_band
-#define erts_atomic_xchg_acqb erts_no_atomic_xchg
-#define erts_atomic_cmpxchg_acqb erts_no_atomic_cmpxchg
-#define erts_atomic_read_bset_acqb erts_no_atomic_read_bset
-
-#define erts_atomic_init_relb erts_no_atomic_set
-#define erts_atomic_set_relb erts_no_atomic_set
-#define erts_atomic_read_relb erts_no_atomic_read
-#define erts_atomic_inc_read_relb erts_no_atomic_inc_read
-#define erts_atomic_dec_read_relb erts_no_atomic_dec_read
-#define erts_atomic_inc_relb erts_no_atomic_inc
-#define erts_atomic_dec_relb erts_no_atomic_dec
-#define erts_atomic_add_read_relb erts_no_atomic_add_read
-#define erts_atomic_add_relb erts_no_atomic_add
-#define erts_atomic_read_bor_relb erts_no_atomic_read_bor
-#define erts_atomic_read_band_relb erts_no_atomic_read_band
-#define erts_atomic_xchg_relb erts_no_atomic_xchg
-#define erts_atomic_cmpxchg_relb erts_no_atomic_cmpxchg
-#define erts_atomic_read_bset_relb erts_no_atomic_read_bset
-
-#define erts_atomic_init_ddrb erts_no_atomic_set
-#define erts_atomic_set_ddrb erts_no_atomic_set
-#define erts_atomic_read_ddrb erts_no_atomic_read
-#define erts_atomic_inc_read_ddrb erts_no_atomic_inc_read
-#define erts_atomic_dec_read_ddrb erts_no_atomic_dec_read
-#define erts_atomic_inc_ddrb erts_no_atomic_inc
-#define erts_atomic_dec_ddrb erts_no_atomic_dec
-#define erts_atomic_add_read_ddrb erts_no_atomic_add_read
-#define erts_atomic_add_ddrb erts_no_atomic_add
-#define erts_atomic_read_bor_ddrb erts_no_atomic_read_bor
-#define erts_atomic_read_band_ddrb erts_no_atomic_read_band
-#define erts_atomic_xchg_ddrb erts_no_atomic_xchg
-#define erts_atomic_cmpxchg_ddrb erts_no_atomic_cmpxchg
-#define erts_atomic_read_bset_ddrb erts_no_atomic_read_bset
-
-#define erts_atomic_init_rb erts_no_atomic_set
-#define erts_atomic_set_rb erts_no_atomic_set
-#define erts_atomic_read_rb erts_no_atomic_read
-#define erts_atomic_inc_read_rb erts_no_atomic_inc_read
-#define erts_atomic_dec_read_rb erts_no_atomic_dec_read
-#define erts_atomic_inc_rb erts_no_atomic_inc
-#define erts_atomic_dec_rb erts_no_atomic_dec
-#define erts_atomic_add_read_rb erts_no_atomic_add_read
-#define erts_atomic_add_rb erts_no_atomic_add
-#define erts_atomic_read_bor_rb erts_no_atomic_read_bor
-#define erts_atomic_read_band_rb erts_no_atomic_read_band
-#define erts_atomic_xchg_rb erts_no_atomic_xchg
-#define erts_atomic_cmpxchg_rb erts_no_atomic_cmpxchg
-#define erts_atomic_read_bset_rb erts_no_atomic_read_bset
-
-#define erts_atomic_init_wb erts_no_atomic_set
-#define erts_atomic_set_wb erts_no_atomic_set
-#define erts_atomic_read_wb erts_no_atomic_read
-#define erts_atomic_inc_read_wb erts_no_atomic_inc_read
-#define erts_atomic_dec_read_wb erts_no_atomic_dec_read
-#define erts_atomic_inc_wb erts_no_atomic_inc
-#define erts_atomic_dec_wb erts_no_atomic_dec
-#define erts_atomic_add_read_wb erts_no_atomic_add_read
-#define erts_atomic_add_wb erts_no_atomic_add
-#define erts_atomic_read_bor_wb erts_no_atomic_read_bor
-#define erts_atomic_read_band_wb erts_no_atomic_read_band
-#define erts_atomic_xchg_wb erts_no_atomic_xchg
-#define erts_atomic_cmpxchg_wb erts_no_atomic_cmpxchg
-#define erts_atomic_read_bset_wb erts_no_atomic_read_bset
-
-#define erts_atomic_set_dirty erts_no_atomic_set
-#define erts_atomic_read_dirty erts_no_atomic_read
-
-/* 32-bit atomics */
-
-#define erts_atomic32_init_nob erts_no_atomic32_set
-#define erts_atomic32_set_nob erts_no_atomic32_set
-#define erts_atomic32_read_nob erts_no_atomic32_read
-#define erts_atomic32_inc_read_nob erts_no_atomic32_inc_read
-#define erts_atomic32_dec_read_nob erts_no_atomic32_dec_read
-#define erts_atomic32_inc_nob erts_no_atomic32_inc
-#define erts_atomic32_dec_nob erts_no_atomic32_dec
-#define erts_atomic32_add_read_nob erts_no_atomic32_add_read
-#define erts_atomic32_add_nob erts_no_atomic32_add
-#define erts_atomic32_read_bor_nob erts_no_atomic32_read_bor
-#define erts_atomic32_read_band_nob erts_no_atomic32_read_band
-#define erts_atomic32_xchg_nob erts_no_atomic32_xchg
-#define erts_atomic32_cmpxchg_nob erts_no_atomic32_cmpxchg
-#define erts_atomic32_read_bset_nob erts_no_atomic32_read_bset
-
-#define erts_atomic32_init_mb erts_no_atomic32_set
-#define erts_atomic32_set_mb erts_no_atomic32_set
-#define erts_atomic32_read_mb erts_no_atomic32_read
-#define erts_atomic32_inc_read_mb erts_no_atomic32_inc_read
-#define erts_atomic32_dec_read_mb erts_no_atomic32_dec_read
-#define erts_atomic32_inc_mb erts_no_atomic32_inc
-#define erts_atomic32_dec_mb erts_no_atomic32_dec
-#define erts_atomic32_add_read_mb erts_no_atomic32_add_read
-#define erts_atomic32_add_mb erts_no_atomic32_add
-#define erts_atomic32_read_bor_mb erts_no_atomic32_read_bor
-#define erts_atomic32_read_band_mb erts_no_atomic32_read_band
-#define erts_atomic32_xchg_mb erts_no_atomic32_xchg
-#define erts_atomic32_cmpxchg_mb erts_no_atomic32_cmpxchg
-#define erts_atomic32_read_bset_mb erts_no_atomic32_read_bset
-
-#define erts_atomic32_init_acqb erts_no_atomic32_set
-#define erts_atomic32_set_acqb erts_no_atomic32_set
-#define erts_atomic32_read_acqb erts_no_atomic32_read
-#define erts_atomic32_inc_read_acqb erts_no_atomic32_inc_read
-#define erts_atomic32_dec_read_acqb erts_no_atomic32_dec_read
-#define erts_atomic32_inc_acqb erts_no_atomic32_inc
-#define erts_atomic32_dec_acqb erts_no_atomic32_dec
-#define erts_atomic32_add_read_acqb erts_no_atomic32_add_read
-#define erts_atomic32_add_acqb erts_no_atomic32_add
-#define erts_atomic32_read_bor_acqb erts_no_atomic32_read_bor
-#define erts_atomic32_read_band_acqb erts_no_atomic32_read_band
-#define erts_atomic32_xchg_acqb erts_no_atomic32_xchg
-#define erts_atomic32_cmpxchg_acqb erts_no_atomic32_cmpxchg
-#define erts_atomic32_read_bset_acqb erts_no_atomic32_read_bset
-
-#define erts_atomic32_init_relb erts_no_atomic32_set
-#define erts_atomic32_set_relb erts_no_atomic32_set
-#define erts_atomic32_read_relb erts_no_atomic32_read
-#define erts_atomic32_inc_read_relb erts_no_atomic32_inc_read
-#define erts_atomic32_dec_read_relb erts_no_atomic32_dec_read
-#define erts_atomic32_inc_relb erts_no_atomic32_inc
-#define erts_atomic32_dec_relb erts_no_atomic32_dec
-#define erts_atomic32_add_read_relb erts_no_atomic32_add_read
-#define erts_atomic32_add_relb erts_no_atomic32_add
-#define erts_atomic32_read_bor_relb erts_no_atomic32_read_bor
-#define erts_atomic32_read_band_relb erts_no_atomic32_read_band
-#define erts_atomic32_xchg_relb erts_no_atomic32_xchg
-#define erts_atomic32_cmpxchg_relb erts_no_atomic32_cmpxchg
-#define erts_atomic32_read_bset_relb erts_no_atomic32_read_bset
-
-#define erts_atomic32_init_ddrb erts_no_atomic32_set
-#define erts_atomic32_set_ddrb erts_no_atomic32_set
-#define erts_atomic32_read_ddrb erts_no_atomic32_read
-#define erts_atomic32_inc_read_ddrb erts_no_atomic32_inc_read
-#define erts_atomic32_dec_read_ddrb erts_no_atomic32_dec_read
-#define erts_atomic32_inc_ddrb erts_no_atomic32_inc
-#define erts_atomic32_dec_ddrb erts_no_atomic32_dec
-#define erts_atomic32_add_read_ddrb erts_no_atomic32_add_read
-#define erts_atomic32_add_ddrb erts_no_atomic32_add
-#define erts_atomic32_read_bor_ddrb erts_no_atomic32_read_bor
-#define erts_atomic32_read_band_ddrb erts_no_atomic32_read_band
-#define erts_atomic32_xchg_ddrb erts_no_atomic32_xchg
-#define erts_atomic32_cmpxchg_ddrb erts_no_atomic32_cmpxchg
-#define erts_atomic32_read_bset_ddrb erts_no_atomic32_read_bset
-
-#define erts_atomic32_init_rb erts_no_atomic32_set
-#define erts_atomic32_set_rb erts_no_atomic32_set
-#define erts_atomic32_read_rb erts_no_atomic32_read
-#define erts_atomic32_inc_read_rb erts_no_atomic32_inc_read
-#define erts_atomic32_dec_read_rb erts_no_atomic32_dec_read
-#define erts_atomic32_inc_rb erts_no_atomic32_inc
-#define erts_atomic32_dec_rb erts_no_atomic32_dec
-#define erts_atomic32_add_read_rb erts_no_atomic32_add_read
-#define erts_atomic32_add_rb erts_no_atomic32_add
-#define erts_atomic32_read_bor_rb erts_no_atomic32_read_bor
-#define erts_atomic32_read_band_rb erts_no_atomic32_read_band
-#define erts_atomic32_xchg_rb erts_no_atomic32_xchg
-#define erts_atomic32_cmpxchg_rb erts_no_atomic32_cmpxchg
-#define erts_atomic32_read_bset_rb erts_no_atomic32_read_bset
-
-#define erts_atomic32_init_wb erts_no_atomic32_set
-#define erts_atomic32_set_wb erts_no_atomic32_set
-#define erts_atomic32_read_wb erts_no_atomic32_read
-#define erts_atomic32_inc_read_wb erts_no_atomic32_inc_read
-#define erts_atomic32_dec_read_wb erts_no_atomic32_dec_read
-#define erts_atomic32_inc_wb erts_no_atomic32_inc
-#define erts_atomic32_dec_wb erts_no_atomic32_dec
-#define erts_atomic32_add_read_wb erts_no_atomic32_add_read
-#define erts_atomic32_add_wb erts_no_atomic32_add
-#define erts_atomic32_read_bor_wb erts_no_atomic32_read_bor
-#define erts_atomic32_read_band_wb erts_no_atomic32_read_band
-#define erts_atomic32_xchg_wb erts_no_atomic32_xchg
-#define erts_atomic32_cmpxchg_wb erts_no_atomic32_cmpxchg
-#define erts_atomic32_read_bset_wb erts_no_atomic32_read_bset
-
-#define erts_atomic32_set_dirty erts_no_atomic32_set
-#define erts_atomic32_read_dirty erts_no_atomic32_read
-
-/* 64-bit atomics */
-
-#define erts_atomic64_init_nob erts_no_atomic64_set
-#define erts_atomic64_set_nob erts_no_atomic64_set
-#define erts_atomic64_read_nob erts_no_atomic64_read
-#define erts_atomic64_inc_read_nob erts_no_atomic64_inc_read
-#define erts_atomic64_dec_read_nob erts_no_atomic64_dec_read
-#define erts_atomic64_inc_nob erts_no_atomic64_inc
-#define erts_atomic64_dec_nob erts_no_atomic64_dec
-#define erts_atomic64_add_read_nob erts_no_atomic64_add_read
-#define erts_atomic64_add_nob erts_no_atomic64_add
-#define erts_atomic64_read_bor_nob erts_no_atomic64_read_bor
-#define erts_atomic64_read_band_nob erts_no_atomic64_read_band
-#define erts_atomic64_xchg_nob erts_no_atomic64_xchg
-#define erts_atomic64_cmpxchg_nob erts_no_atomic64_cmpxchg
-#define erts_atomic64_read_bset_nob erts_no_atomic64_read_bset
-
-#define erts_atomic64_init_mb erts_no_atomic64_set
-#define erts_atomic64_set_mb erts_no_atomic64_set
-#define erts_atomic64_read_mb erts_no_atomic64_read
-#define erts_atomic64_inc_read_mb erts_no_atomic64_inc_read
-#define erts_atomic64_dec_read_mb erts_no_atomic64_dec_read
-#define erts_atomic64_inc_mb erts_no_atomic64_inc
-#define erts_atomic64_dec_mb erts_no_atomic64_dec
-#define erts_atomic64_add_read_mb erts_no_atomic64_add_read
-#define erts_atomic64_add_mb erts_no_atomic64_add
-#define erts_atomic64_read_bor_mb erts_no_atomic64_read_bor
-#define erts_atomic64_read_band_mb erts_no_atomic64_read_band
-#define erts_atomic64_xchg_mb erts_no_atomic64_xchg
-#define erts_atomic64_cmpxchg_mb erts_no_atomic64_cmpxchg
-#define erts_atomic64_read_bset_mb erts_no_atomic64_read_bset
-
-#define erts_atomic64_init_acqb erts_no_atomic64_set
-#define erts_atomic64_set_acqb erts_no_atomic64_set
-#define erts_atomic64_read_acqb erts_no_atomic64_read
-#define erts_atomic64_inc_read_acqb erts_no_atomic64_inc_read
-#define erts_atomic64_dec_read_acqb erts_no_atomic64_dec_read
-#define erts_atomic64_inc_acqb erts_no_atomic64_inc
-#define erts_atomic64_dec_acqb erts_no_atomic64_dec
-#define erts_atomic64_add_read_acqb erts_no_atomic64_add_read
-#define erts_atomic64_add_acqb erts_no_atomic64_add
-#define erts_atomic64_read_bor_acqb erts_no_atomic64_read_bor
-#define erts_atomic64_read_band_acqb erts_no_atomic64_read_band
-#define erts_atomic64_xchg_acqb erts_no_atomic64_xchg
-#define erts_atomic64_cmpxchg_acqb erts_no_atomic64_cmpxchg
-#define erts_atomic64_read_bset_acqb erts_no_atomic64_read_bset
-
-#define erts_atomic64_init_relb erts_no_atomic64_set
-#define erts_atomic64_set_relb erts_no_atomic64_set
-#define erts_atomic64_read_relb erts_no_atomic64_read
-#define erts_atomic64_inc_read_relb erts_no_atomic64_inc_read
-#define erts_atomic64_dec_read_relb erts_no_atomic64_dec_read
-#define erts_atomic64_inc_relb erts_no_atomic64_inc
-#define erts_atomic64_dec_relb erts_no_atomic64_dec
-#define erts_atomic64_add_read_relb erts_no_atomic64_add_read
-#define erts_atomic64_add_relb erts_no_atomic64_add
-#define erts_atomic64_read_bor_relb erts_no_atomic64_read_bor
-#define erts_atomic64_read_band_relb erts_no_atomic64_read_band
-#define erts_atomic64_xchg_relb erts_no_atomic64_xchg
-#define erts_atomic64_cmpxchg_relb erts_no_atomic64_cmpxchg
-#define erts_atomic64_read_bset_relb erts_no_atomic64_read_bset
-
-#define erts_atomic64_init_ddrb erts_no_atomic64_set
-#define erts_atomic64_set_ddrb erts_no_atomic64_set
-#define erts_atomic64_read_ddrb erts_no_atomic64_read
-#define erts_atomic64_inc_read_ddrb erts_no_atomic64_inc_read
-#define erts_atomic64_dec_read_ddrb erts_no_atomic64_dec_read
-#define erts_atomic64_inc_ddrb erts_no_atomic64_inc
-#define erts_atomic64_dec_ddrb erts_no_atomic64_dec
-#define erts_atomic64_add_read_ddrb erts_no_atomic64_add_read
-#define erts_atomic64_add_ddrb erts_no_atomic64_add
-#define erts_atomic64_read_bor_ddrb erts_no_atomic64_read_bor
-#define erts_atomic64_read_band_ddrb erts_no_atomic64_read_band
-#define erts_atomic64_xchg_ddrb erts_no_atomic64_xchg
-#define erts_atomic64_cmpxchg_ddrb erts_no_atomic64_cmpxchg
-#define erts_atomic64_read_bset_ddrb erts_no_atomic64_read_bset
-
-#define erts_atomic64_init_rb erts_no_atomic64_set
-#define erts_atomic64_set_rb erts_no_atomic64_set
-#define erts_atomic64_read_rb erts_no_atomic64_read
-#define erts_atomic64_inc_read_rb erts_no_atomic64_inc_read
-#define erts_atomic64_dec_read_rb erts_no_atomic64_dec_read
-#define erts_atomic64_inc_rb erts_no_atomic64_inc
-#define erts_atomic64_dec_rb erts_no_atomic64_dec
-#define erts_atomic64_add_read_rb erts_no_atomic64_add_read
-#define erts_atomic64_add_rb erts_no_atomic64_add
-#define erts_atomic64_read_bor_rb erts_no_atomic64_read_bor
-#define erts_atomic64_read_band_rb erts_no_atomic64_read_band
-#define erts_atomic64_xchg_rb erts_no_atomic64_xchg
-#define erts_atomic64_cmpxchg_rb erts_no_atomic64_cmpxchg
-#define erts_atomic64_read_bset_rb erts_no_atomic64_read_bset
-
-#define erts_atomic64_init_wb erts_no_atomic64_set
-#define erts_atomic64_set_wb erts_no_atomic64_set
-#define erts_atomic64_read_wb erts_no_atomic64_read
-#define erts_atomic64_inc_read_wb erts_no_atomic64_inc_read
-#define erts_atomic64_dec_read_wb erts_no_atomic64_dec_read
-#define erts_atomic64_inc_wb erts_no_atomic64_inc
-#define erts_atomic64_dec_wb erts_no_atomic64_dec
-#define erts_atomic64_add_read_wb erts_no_atomic64_add_read
-#define erts_atomic64_add_wb erts_no_atomic64_add
-#define erts_atomic64_read_bor_wb erts_no_atomic64_read_bor
-#define erts_atomic64_read_band_wb erts_no_atomic64_read_band
-#define erts_atomic64_xchg_wb erts_no_atomic64_xchg
-#define erts_atomic64_cmpxchg_wb erts_no_atomic64_cmpxchg
-#define erts_atomic64_read_bset_wb erts_no_atomic64_read_bset
-
-#define erts_atomic64_set_dirty erts_no_atomic64_set
-#define erts_atomic64_read_dirty erts_no_atomic64_read
-
-#endif /* !USE_THREADS */
#include "erl_msacc.h"
@@ -2065,110 +1624,83 @@ erts_atomic64_read_dirty(erts_atomic64_t *var)
ERTS_GLB_INLINE void
erts_thr_init(erts_thr_init_data_t *id)
{
-#ifdef USE_THREADS
int res = ethr_init(id);
if (res)
erts_thr_fatal_error(res, "initialize thread library");
-#endif
}
ERTS_GLB_INLINE void
erts_thr_late_init(erts_thr_late_init_data_t *id)
{
-#ifdef USE_THREADS
int res = ethr_late_init(id);
if (res)
erts_thr_fatal_error(res, "complete initialization of thread library");
-#endif
}
ERTS_GLB_INLINE void
erts_thr_create(erts_tid_t *tid, void * (*func)(void *), void *arg,
erts_thr_opts_t *opts)
{
-#ifdef USE_THREADS
int res = ethr_thr_create(tid, func, arg, opts);
if (res)
erts_thr_fatal_error(res, "create thread");
-#endif
}
ERTS_GLB_INLINE void
erts_thr_join(erts_tid_t tid, void **thr_res)
{
-#ifdef USE_THREADS
int res = ethr_thr_join(tid, thr_res);
if (res)
erts_thr_fatal_error(res, "join thread");
-#endif
}
ERTS_GLB_INLINE void
erts_thr_detach(erts_tid_t tid)
{
-#ifdef USE_THREADS
int res = ethr_thr_detach(tid);
if (res)
erts_thr_fatal_error(res, "detach thread");
-#endif
}
ERTS_GLB_INLINE void
erts_thr_exit(void *res)
{
-#ifdef USE_THREADS
ethr_thr_exit(res);
erts_thr_fatal_error(0, "terminate thread");
-#endif
}
ERTS_GLB_INLINE void
erts_thr_install_exit_handler(void (*exit_handler)(void))
{
-#ifdef USE_THREADS
int res = ethr_install_exit_handler(exit_handler);
if (res != 0)
erts_thr_fatal_error(res, "install thread exit handler");
-#endif
}
ERTS_GLB_INLINE erts_tid_t
erts_thr_self(void)
{
-#ifdef USE_THREADS
return ethr_self();
-#else
- return 0;
-#endif
}
ERTS_GLB_INLINE int
erts_thr_getname(erts_tid_t tid, char *buf, size_t len)
{
-#ifdef USE_THREADS
return ethr_getname(tid, buf, len);
-#else
- return -1;
-#endif
}
ERTS_GLB_INLINE int
erts_equal_tids(erts_tid_t x, erts_tid_t y)
{
-#ifdef USE_THREADS
return ethr_equal_tids(x, y);
-#else
- return 1;
-#endif
}
ERTS_GLB_INLINE void
erts_mtx_init(erts_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags)
{
-#ifdef USE_THREADS
int res = ethr_mutex_init(&mtx->mtx);
if (res) {
erts_thr_fatal_error(res, "initialize mutex");
@@ -2185,13 +1717,11 @@ erts_mtx_init(erts_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_init_ref_x(&mtx->lcnt, name, extra, flags);
#endif
-#endif /* USE_THREADS */
}
ERTS_GLB_INLINE void
erts_mtx_init_locked(erts_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t flags)
{
-#ifdef USE_THREADS
erts_mtx_init(mtx, name, extra, flags);
ethr_mutex_lock(&mtx->mtx);
@@ -2201,13 +1731,11 @@ erts_mtx_init_locked(erts_mtx_t *mtx, char *name, Eterm extra, erts_lock_flags_t
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_trylock(&mtx->lcnt, 1);
#endif
-#endif
}
ERTS_GLB_INLINE void
erts_mtx_destroy(erts_mtx_t *mtx)
{
-#ifdef USE_THREADS
int res;
ASSERT(!(mtx->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC));
@@ -2230,7 +1758,6 @@ erts_mtx_destroy(erts_mtx_t *mtx)
#endif
erts_thr_fatal_error(res, "destroy mutex");
}
-#endif
}
ERTS_GLB_INLINE int
@@ -2240,7 +1767,6 @@ erts_mtx_trylock_x(erts_mtx_t *mtx, char *file, unsigned int line)
erts_mtx_trylock(erts_mtx_t *mtx)
#endif
{
-#ifdef USE_THREADS
int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -2262,9 +1788,6 @@ erts_mtx_trylock(erts_mtx_t *mtx)
erts_lcnt_trylock(&mtx->lcnt, res);
#endif
return res;
-#else
- return 0;
-#endif
}
@@ -2275,7 +1798,6 @@ erts_mtx_lock_x(erts_mtx_t *mtx, char *file, unsigned int line)
erts_mtx_lock(erts_mtx_t *mtx)
#endif
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
erts_lc_lock_x(&mtx->lc, file, line);
@@ -2290,13 +1812,11 @@ erts_mtx_lock(erts_mtx_t *mtx)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&mtx->lcnt, file, line);
#endif
-#endif
}
ERTS_GLB_INLINE void
erts_mtx_unlock(erts_mtx_t *mtx)
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock(&mtx->lc);
#endif
@@ -2304,7 +1824,6 @@ erts_mtx_unlock(erts_mtx_t *mtx)
erts_lcnt_unlock(&mtx->lcnt);
#endif
ethr_mutex_unlock(&mtx->mtx);
-#endif
}
ERTS_GLB_INLINE int
@@ -2324,17 +1843,14 @@ erts_lc_mtx_is_locked(erts_mtx_t *mtx)
ERTS_GLB_INLINE void
erts_cnd_init(erts_cnd_t *cnd)
{
-#ifdef USE_THREADS
int res = ethr_cond_init(cnd);
if (res)
erts_thr_fatal_error(res, "initialize condition variable");
-#endif
}
ERTS_GLB_INLINE void
erts_cnd_destroy(erts_cnd_t *cnd)
{
-#ifdef USE_THREADS
int res = ethr_cond_destroy(cnd);
if (res != 0) {
#ifdef ERTS_THR_HAVE_BUSY_DESTROY_BUG
@@ -2347,13 +1863,11 @@ erts_cnd_destroy(erts_cnd_t *cnd)
#endif
erts_thr_fatal_error(res, "destroy condition variable");
}
-#endif
}
ERTS_GLB_INLINE void
erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx)
{
-#ifdef USE_THREADS
int res;
ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_SLEEP);
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -2375,7 +1889,6 @@ erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx)
if (res != 0 && res != EINTR)
erts_thr_fatal_error(res, "wait on condition variable");
ERTS_MSACC_POP_STATE();
-#endif
}
/*
@@ -2391,18 +1904,14 @@ erts_cnd_wait(erts_cnd_t *cnd, erts_mtx_t *mtx)
ERTS_GLB_INLINE void
erts_cnd_signal(erts_cnd_t *cnd)
{
-#ifdef USE_THREADS
ethr_cond_signal(cnd);
-#endif
}
ERTS_GLB_INLINE void
erts_cnd_broadcast(erts_cnd_t *cnd)
{
-#ifdef USE_THREADS
ethr_cond_broadcast(cnd);
-#endif
}
/* rwmutex */
@@ -2410,7 +1919,6 @@ erts_cnd_broadcast(erts_cnd_t *cnd)
ERTS_GLB_INLINE void
erts_rwmtx_set_reader_group(int no)
{
-#ifdef USE_THREADS
int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_check_no_locked_of_type(ERTS_LC_FLG_LT_RWMUTEX);
@@ -2418,13 +1926,11 @@ erts_rwmtx_set_reader_group(int no)
res = ethr_rwmutex_set_reader_group(no);
if (res != 0)
erts_thr_fatal_error(res, "set reader group");
-#endif
}
ERTS_GLB_INLINE void
erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx, erts_rwmtx_opt_t *opt,
char *name, Eterm extra, erts_lock_flags_t flags) {
-#ifdef USE_THREADS
int res = ethr_rwmutex_init_opt(&rwmtx->rwmtx, opt);
if (res != 0) {
erts_thr_fatal_error(res, "initialize rwmutex");
@@ -2441,7 +1947,6 @@ erts_rwmtx_init_opt(erts_rwmtx_t *rwmtx, erts_rwmtx_opt_t *opt,
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_init_ref_x(&rwmtx->lcnt, name, extra, flags);
#endif
-#endif /* USE_THREADS */
}
ERTS_GLB_INLINE void
@@ -2453,7 +1958,6 @@ erts_rwmtx_init(erts_rwmtx_t *rwmtx, char *name, Eterm extra,
ERTS_GLB_INLINE void
erts_rwmtx_destroy(erts_rwmtx_t *rwmtx)
{
-#ifdef USE_THREADS
int res;
ASSERT(!(rwmtx->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC));
@@ -2476,7 +1980,6 @@ erts_rwmtx_destroy(erts_rwmtx_t *rwmtx)
#endif
erts_thr_fatal_error(res, "destroy rwmutex");
}
-#endif
}
ERTS_GLB_INLINE int
@@ -2486,7 +1989,6 @@ erts_rwmtx_tryrlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line)
erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx)
#endif
{
-#ifdef USE_THREADS
int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -2509,9 +2011,6 @@ erts_rwmtx_tryrlock(erts_rwmtx_t *rwmtx)
#endif
return res;
-#else
- return 0;
-#endif
}
ERTS_GLB_INLINE void
@@ -2521,7 +2020,6 @@ erts_rwmtx_rlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line)
erts_rwmtx_rlock(erts_rwmtx_t *rwmtx)
#endif
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LC_FLG_LO_READ,file,line);
@@ -2536,13 +2034,11 @@ erts_rwmtx_rlock(erts_rwmtx_t *rwmtx)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&rwmtx->lcnt, file, line);
#endif
-#endif
}
ERTS_GLB_INLINE void
erts_rwmtx_runlock(erts_rwmtx_t *rwmtx)
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ);
#endif
@@ -2550,7 +2046,6 @@ erts_rwmtx_runlock(erts_rwmtx_t *rwmtx)
erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LOCK_OPTION_READ);
#endif
ethr_rwmutex_runlock(&rwmtx->rwmtx);
-#endif
}
@@ -2561,7 +2056,6 @@ erts_rwmtx_tryrwlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line)
erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx)
#endif
{
-#ifdef USE_THREADS
int res;
#ifdef ERTS_ENABLE_LOCK_CHECK
@@ -2584,9 +2078,6 @@ erts_rwmtx_tryrwlock(erts_rwmtx_t *rwmtx)
#endif
return res;
-#else
- return 0;
-#endif
}
ERTS_GLB_INLINE void
@@ -2596,7 +2087,6 @@ erts_rwmtx_rwlock_x(erts_rwmtx_t *rwmtx, char *file, unsigned int line)
erts_rwmtx_rwlock(erts_rwmtx_t *rwmtx)
#endif
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
erts_lc_lock_flg_x(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE,file,line);
@@ -2611,13 +2101,11 @@ erts_rwmtx_rwlock(erts_rwmtx_t *rwmtx)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&rwmtx->lcnt, file, line);
#endif
-#endif
}
ERTS_GLB_INLINE void
erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx)
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock_flg(&rwmtx->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
@@ -2625,7 +2113,6 @@ erts_rwmtx_rwunlock(erts_rwmtx_t *rwmtx)
erts_lcnt_unlock_opt(&rwmtx->lcnt, ERTS_LOCK_OPTION_RDWR);
#endif
ethr_rwmutex_rwunlock(&rwmtx->rwmtx);
-#endif
}
#if 0 /* The following rwmtx function names are
@@ -3009,7 +2496,6 @@ erts_no_atomic64_read_bset(erts_no_atomic64_t *var,
ERTS_GLB_INLINE void
erts_spinlock_init(erts_spinlock_t *lock, char *name, Eterm extra, erts_lock_flags_t flags)
{
-#ifdef USE_THREADS
int res = ethr_spinlock_init(&lock->slck);
if (res) {
erts_thr_fatal_error(res, "init spinlock");
@@ -3026,13 +2512,11 @@ erts_spinlock_init(erts_spinlock_t *lock, char *name, Eterm extra, erts_lock_fla
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_init_ref_x(&lock->lcnt, name, extra, flags);
#endif
-#endif /* USE_THREADS */
}
ERTS_GLB_INLINE void
erts_spinlock_destroy(erts_spinlock_t *lock)
{
-#ifdef USE_THREADS
int res;
ASSERT(!(lock->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC));
@@ -3055,15 +2539,11 @@ erts_spinlock_destroy(erts_spinlock_t *lock)
#endif
erts_thr_fatal_error(res, "destroy rwlock");
}
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE void
erts_spin_unlock(erts_spinlock_t *lock)
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock(&lock->lc);
#endif
@@ -3071,9 +2551,6 @@ erts_spin_unlock(erts_spinlock_t *lock)
erts_lcnt_unlock(&lock->lcnt);
#endif
ethr_spin_unlock(&lock->slck);
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE void
@@ -3083,7 +2560,6 @@ erts_spin_lock_x(erts_spinlock_t *lock, char *file, unsigned int line)
erts_spin_lock(erts_spinlock_t *lock)
#endif
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
erts_lc_lock_x(&lock->lc,file,line);
@@ -3098,9 +2574,6 @@ erts_spin_lock(erts_spinlock_t *lock)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&lock->lcnt, file, line);
#endif
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE int
@@ -3122,7 +2595,6 @@ erts_lc_spinlock_is_locked(erts_spinlock_t *lock)
ERTS_GLB_INLINE void
erts_rwlock_init(erts_rwlock_t *lock, char *name, Eterm extra, erts_lock_flags_t flags)
{
-#ifdef USE_THREADS
int res = ethr_rwlock_init(&lock->rwlck);
if (res) {
erts_thr_fatal_error(res, "init rwlock");
@@ -3139,13 +2611,11 @@ erts_rwlock_init(erts_rwlock_t *lock, char *name, Eterm extra, erts_lock_flags_t
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_init_ref_x(&lock->lcnt, name, extra, flags);
#endif
-#endif /* USE_THREADS */
}
ERTS_GLB_INLINE void
erts_rwlock_destroy(erts_rwlock_t *lock)
{
-#ifdef USE_THREADS
int res;
ASSERT(!(lock->flags & ERTS_LOCK_FLAGS_PROPERTY_STATIC));
@@ -3168,15 +2638,11 @@ erts_rwlock_destroy(erts_rwlock_t *lock)
#endif
erts_thr_fatal_error(res, "destroy rwlock");
}
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE void
erts_read_unlock(erts_rwlock_t *lock)
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock_flg(&lock->lc, ERTS_LC_FLG_LO_READ);
#endif
@@ -3184,9 +2650,6 @@ erts_read_unlock(erts_rwlock_t *lock)
erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LOCK_OPTION_READ);
#endif
ethr_read_unlock(&lock->rwlck);
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE void
@@ -3196,7 +2659,6 @@ erts_read_lock_x(erts_rwlock_t *lock, char *file, unsigned int line)
erts_read_lock(erts_rwlock_t *lock)
#endif
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
erts_lc_lock_flg_x(&lock->lc, ERTS_LC_FLG_LO_READ,file,line);
@@ -3211,15 +2673,11 @@ erts_read_lock(erts_rwlock_t *lock)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&lock->lcnt, file, line);
#endif
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE void
erts_write_unlock(erts_rwlock_t *lock)
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_unlock_flg(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE);
#endif
@@ -3227,9 +2685,6 @@ erts_write_unlock(erts_rwlock_t *lock)
erts_lcnt_unlock_opt(&lock->lcnt, ERTS_LOCK_OPTION_RDWR);
#endif
ethr_write_unlock(&lock->rwlck);
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE void
@@ -3239,7 +2694,6 @@ erts_write_lock_x(erts_rwlock_t *lock, char *file, unsigned int line)
erts_write_lock(erts_rwlock_t *lock)
#endif
{
-#ifdef USE_THREADS
#ifdef ERTS_ENABLE_LOCK_CHECK
#ifdef ERTS_ENABLE_LOCK_POSITION
erts_lc_lock_flg_x(&lock->lc, ERTS_LC_FLG_LO_READ_WRITE,file,line);
@@ -3254,9 +2708,6 @@ erts_write_lock(erts_rwlock_t *lock)
#ifdef ERTS_ENABLE_LOCK_COUNT
erts_lcnt_lock_post_x(&lock->lcnt, file, line);
#endif
-#else
- (void)lock;
-#endif
}
ERTS_GLB_INLINE int
@@ -3290,125 +2741,90 @@ erts_lc_rwlock_is_rwlocked(erts_rwlock_t *lock)
ERTS_GLB_INLINE void
erts_tsd_key_create(erts_tsd_key_t *keyp, char *keyname)
{
-#ifdef USE_THREADS
int res = ethr_tsd_key_create(keyp, keyname);
if (res)
erts_thr_fatal_error(res, "create thread specific data key");
-#endif
}
ERTS_GLB_INLINE void
erts_tsd_key_delete(erts_tsd_key_t key)
{
-#ifdef USE_THREADS
int res = ethr_tsd_key_delete(key);
if (res)
erts_thr_fatal_error(res, "delete thread specific data key");
-#endif
}
ERTS_GLB_INLINE void
erts_tsd_set(erts_tsd_key_t key, void *value)
{
-#ifdef USE_THREADS
int res = ethr_tsd_set(key, value);
if (res)
erts_thr_fatal_error(res, "set thread specific data");
-#endif
}
ERTS_GLB_INLINE void *
erts_tsd_get(erts_tsd_key_t key)
{
-#ifdef USE_THREADS
return ethr_tsd_get(key);
-#else
- return NULL;
-#endif
}
ERTS_GLB_INLINE erts_tse_t *erts_tse_fetch(void)
{
-#ifdef USE_THREADS
return (erts_tse_t *) ethr_get_ts_event();
-#else
- return (erts_tse_t *) NULL;
-#endif
}
ERTS_GLB_INLINE void erts_tse_return(erts_tse_t *ep)
{
-#ifdef USE_THREADS
ethr_leave_ts_event(ep);
-#endif
}
ERTS_GLB_INLINE void erts_tse_prepare_timed(erts_tse_t *ep)
{
-#ifdef USE_THREADS
int res = ethr_event_prepare_timed(&((ethr_ts_event *) ep)->event);
if (res != 0)
erts_thr_fatal_error(res, "prepare timed");
-#endif
}
ERTS_GLB_INLINE void erts_tse_set(erts_tse_t *ep)
{
-#ifdef USE_THREADS
ethr_event_set(&((ethr_ts_event *) ep)->event);
-#endif
}
ERTS_GLB_INLINE void erts_tse_reset(erts_tse_t *ep)
{
-#ifdef USE_THREADS
ethr_event_reset(&((ethr_ts_event *) ep)->event);
-#endif
}
ERTS_GLB_INLINE int erts_tse_wait(erts_tse_t *ep)
{
-#ifdef USE_THREADS
int res;
ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_SLEEP);
res = ethr_event_wait(&((ethr_ts_event *) ep)->event);
ERTS_MSACC_POP_STATE();
return res;
-#else
- return ENOTSUP;
-#endif
}
ERTS_GLB_INLINE int erts_tse_swait(erts_tse_t *ep, int spincount)
{
-#ifdef USE_THREADS
int res;
ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_SLEEP);
res = ethr_event_swait(&((ethr_ts_event *) ep)->event, spincount);
ERTS_MSACC_POP_STATE();
return res;
-#else
- return ENOTSUP;
-#endif
}
ERTS_GLB_INLINE int erts_tse_twait(erts_tse_t *ep, Sint64 tmo)
{
-#ifdef USE_THREADS
int res;
ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_SLEEP);
res = ethr_event_twait(&((ethr_ts_event *) ep)->event,
(ethr_sint64_t) tmo);
ERTS_MSACC_POP_STATE();
return res;
-#else
- return ENOTSUP;
-#endif
}
ERTS_GLB_INLINE int erts_tse_stwait(erts_tse_t *ep, int spincount, Sint64 tmo)
{
-#ifdef USE_THREADS
int res;
ERTS_MSACC_PUSH_AND_SET_STATE(ERTS_MSACC_STATE_SLEEP);
res = ethr_event_stwait(&((ethr_ts_event *) ep)->event,
@@ -3416,49 +2832,34 @@ ERTS_GLB_INLINE int erts_tse_stwait(erts_tse_t *ep, int spincount, Sint64 tmo)
(ethr_sint64_t) tmo);
ERTS_MSACC_POP_STATE();
return res;
-#else
- return ENOTSUP;
-#endif
}
ERTS_GLB_INLINE int erts_tse_is_tmp(erts_tse_t *ep)
{
-#ifdef USE_THREADS
return (ep->iflgs & ETHR_TS_EV_TMP) == ETHR_TS_EV_TMP;
-#else
- return 0;
-#endif
}
ERTS_GLB_INLINE void erts_thr_set_main_status(int on, int no)
{
-#ifdef USE_THREADS
int res = ethr_set_main_thr_status(on, no);
if (res != 0)
erts_thr_fatal_error(res, "set thread main status");
-#endif
}
ERTS_GLB_INLINE int erts_thr_get_main_status(void)
{
-#ifdef USE_THREADS
int main_status;
int res = ethr_get_main_thr_status(&main_status);
if (res != 0)
erts_thr_fatal_error(res, "get thread main status");
return main_status;
-#else
- return 1;
-#endif
}
ERTS_GLB_INLINE void erts_thr_yield(void)
{
-#ifdef USE_THREADS
int res = ETHR_YIELD();
if (res != 0)
erts_thr_fatal_error(res, "yield");
-#endif
}
@@ -3466,34 +2867,28 @@ ERTS_GLB_INLINE void erts_thr_yield(void)
ERTS_GLB_INLINE void
erts_thr_kill(erts_tid_t tid, int sig) {
-#ifdef USE_THREADS
int res = ethr_kill((ethr_tid)tid, sig);
if (res)
erts_thr_fatal_error(res, "killing thread");
-#endif
}
ERTS_GLB_INLINE void
erts_thr_sigmask(int how, const sigset_t *set, sigset_t *oset)
{
-#ifdef USE_THREADS
int res = ethr_sigmask(how, set, oset);
if (res)
erts_thr_fatal_error(res, "get or set signal mask");
-#endif
}
ERTS_GLB_INLINE void
erts_thr_sigwait(const sigset_t *set, int *sig)
{
-#ifdef USE_THREADS
int res;
do {
res = ethr_sigwait(set, sig);
} while (res == EINTR);
if (res)
erts_thr_fatal_error(res, "to wait for signal");
-#endif
}
#endif /* #ifdef HAVE_ETHR_SIG_FUNCS */
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index db7d0ac449..efb3de2942 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -237,7 +237,6 @@ write_timestamp(ErtsTraceTimeStamp *tsp, Eterm **hpp)
}
}
-#ifdef ERTS_SMP
static ERTS_INLINE Uint
patch_ts_size(int ts_type)
@@ -257,7 +256,6 @@ patch_ts_size(int ts_type)
return 0;
}
}
-#endif /* ERTS_SMP */
/*
* Write a timestamp. The timestamp MUST be the last
@@ -298,18 +296,11 @@ write_ts(int ts_type, Eterm *hp, ErlHeapFragment *bp, Process *tracer)
if (shrink) {
if (bp)
bp->used_size -= shrink;
-#ifndef ERTS_SMP
- else if (tracer) {
- Eterm *endp = ts_hp + shrink;
- HRelease(tracer, endp, ts_hp);
- }
-#endif
}
return res;
}
-#ifdef ERTS_SMP
static void enqueue_sys_msg_unlocked(enum ErtsSysMsgType type,
Eterm from,
Eterm to,
@@ -321,7 +312,6 @@ static void enqueue_sys_msg(enum ErtsSysMsgType type,
Eterm msg,
ErlHeapFragment *bp);
static void init_sys_msg_dispatcher(void);
-#endif
static void init_tracer_nif(void);
static int tracer_cmp_fun(void*, void*);
@@ -350,9 +340,7 @@ void erts_init_trace(void) {
default_port_trace_flags = F_INITIAL_TRACE_FLAGS;
default_port_tracer = erts_tracer_nil;
system_seq_tracer = erts_tracer_nil;
-#ifdef ERTS_SMP
init_sys_msg_dispatcher();
-#endif
init_tracer_nif();
}
@@ -421,20 +409,12 @@ exiting_reset(Eterm exiting)
{
erts_smp_rwmtx_rwlock(&sys_trace_rwmtx);
if (exiting == system_monitor) {
-#ifdef ERTS_SMP
system_monitor = NIL;
/* Let the trace message dispatcher clear flags, etc */
-#else
- erts_system_monitor_clear(NULL);
-#endif
}
if (exiting == system_profile) {
-#ifdef ERTS_SMP
system_profile = NIL;
/* Let the trace message dispatcher clear flags, etc */
-#else
- erts_system_profile_clear(NULL);
-#endif
}
erts_smp_rwmtx_rwunlock(&sys_trace_rwmtx);
}
@@ -679,71 +659,11 @@ write_sys_msg_to_port(Eterm unused_to,
erts_exit(ERTS_ERROR_EXIT, "Internal error in do_send_to_port: %d\n", ptr-buffer);
}
-#ifndef ERTS_SMP
- if (!INVALID_TRACER_PORT(trace_port, trace_port->common.id))
-#endif
erts_raw_port_command(trace_port, buffer, ptr-buffer);
erts_free(ERTS_ALC_T_TMP, (void *) buffer);
}
-#ifndef ERTS_SMP
-/* Profile send
- * Checks if profiler is port or process
- * Eterm msg is local, need copying.
- */
-
-static void
-profile_send(Eterm from, Eterm message) {
- Uint sz = 0;
- Uint *hp = NULL;
- Eterm msg = NIL;
- Process *profile_p = NULL;
-
- Eterm profiler = erts_get_system_profile();
-
- /* do not profile profiler pid */
- if (from == profiler) return;
-
- if (is_internal_port(profiler)) {
- Port *profiler_port = NULL;
-
- /* not smp */
-
- profiler_port = erts_id2port_sflgs(profiler,
- NULL,
- 0,
- ERTS_PORT_SFLGS_INVALID_TRACER_LOOKUP);
- if (profiler_port) {
- write_sys_msg_to_port(profiler,
- profiler_port,
- NIL, /* or current process->common.id */
- SYS_MSG_TYPE_SYSPROF,
- message);
- erts_port_release(profiler_port);
- }
-
- } else {
- ErtsMessage *mp;
- ASSERT(is_internal_pid(profiler));
-
- profile_p = erts_proc_lookup(profiler);
-
- if (!profile_p)
- return;
-
- sz = size_object(message);
- mp = erts_alloc_message(sz, &hp);
- if (sz == 0)
- msg = message;
- else
- msg = copy_struct(message, sz, &hp, &mp->hfrag.off_heap);
-
- erts_queue_message(profile_p, 0, mp, msg, from);
- }
-}
-
-#endif
static void
trace_sched_aux(Process *p, ErtsProcLocks locks, Eterm what)
@@ -815,9 +735,7 @@ trace_send(Process *p, Eterm to, Eterm msg)
ErtsTracerNif *tnif = NULL;
ErtsTracingEvent* te;
Eterm pam_result;
-#ifdef ERTS_SMP
ErtsThrPrgrDelayHandle dhndl;
-#endif
ASSERT(ARE_TRACE_FLAGS_ON(p, F_TRACE_SEND));
@@ -842,9 +760,7 @@ trace_send(Process *p, Eterm to, Eterm msg)
} else
pam_result = am_true;
-#ifdef ERTS_SMP
dhndl = erts_thr_progress_unmanaged_delay();
-#endif
if (is_internal_pid(to)) {
if (!erts_proc_lookup(to))
@@ -862,9 +778,7 @@ trace_send(Process *p, Eterm to, Eterm msg)
operation, msg, to, pam_result);
}
-#ifdef ERTS_SMP
erts_thr_progress_unmanaged_continue(dhndl);
-#endif
erts_match_set_release_result_trace(p, pam_result);
}
@@ -1467,21 +1381,11 @@ monitor_long_schedule_proc(Process *p, ErtsCodeMFA *in_fp,
{
ErlHeapFragment *bp;
ErlOffHeap *off_heap;
-#ifndef ERTS_SMP
- Process *monitor_p;
-#endif
Uint hsz;
Eterm *hp, list, in_mfa = am_undefined, out_mfa = am_undefined;
Eterm in_tpl, out_tpl, tmo_tpl, tmo, msg;
-#ifndef ERTS_SMP
- ASSERT(is_internal_pid(system_monitor));
- monitor_p = erts_proc_lookup(system_monitor);
- if (!monitor_p || p == monitor_p) {
- return;
- }
-#endif
/*
* Size: {monitor, pid, long_schedule, [{timeout, T}, {in, {M,F,A}},{out,{M,F,A}}]} ->
* 5 (top tuple of 4), (3 (elements) * 2 (cons)) + 3 (timeout tuple of 2) + size of Timeout +
@@ -1517,36 +1421,18 @@ monitor_long_schedule_proc(Process *p, ErtsCodeMFA *in_fp,
hp += 2;
msg = TUPLE4(hp, am_monitor, p->common.id, am_long_schedule, list);
hp += 5;
-#ifdef ERTS_SMP
enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp);
-#else
- {
- ErtsMessage *mp = erts_alloc_message(0, NULL);
- mp->data.heap_frag = bp;
- erts_queue_message(monitor_p, 0, mp, msg, am_system);
- }
-#endif
}
void
monitor_long_schedule_port(Port *pp, ErtsPortTaskType type, Uint time)
{
ErlHeapFragment *bp;
ErlOffHeap *off_heap;
-#ifndef ERTS_SMP
- Process *monitor_p;
-#endif
Uint hsz;
Eterm *hp, list, op;
Eterm op_tpl, tmo_tpl, tmo, msg;
-#ifndef ERTS_SMP
- ASSERT(is_internal_pid(system_monitor));
- monitor_p = erts_proc_lookup(system_monitor);
- if (!monitor_p) {
- return;
- }
-#endif
/*
* Size: {monitor, port, long_schedule, [{timeout, T}, {op, Operation}]} ->
* 5 (top tuple of 4), (2 (elements) * 2 (cons)) + 3 (timeout tuple of 2)
@@ -1582,24 +1468,13 @@ monitor_long_schedule_port(Port *pp, ErtsPortTaskType type, Uint time)
hp += 2;
msg = TUPLE4(hp, am_monitor, pp->common.id, am_long_schedule, list);
hp += 5;
-#ifdef ERTS_SMP
enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, pp->common.id, NIL, msg, bp);
-#else
- {
- ErtsMessage *mp = erts_alloc_message(0, NULL);
- mp->data.heap_frag = bp;
- erts_queue_message(monitor_p, 0, mp, msg, am_system);
- }
-#endif
}
void
monitor_long_gc(Process *p, Uint time) {
ErlHeapFragment *bp;
ErlOffHeap *off_heap;
-#ifndef ERTS_SMP
- Process *monitor_p;
-#endif
Uint hsz;
Eterm *hp, list, msg;
Eterm tags[] = {
@@ -1624,12 +1499,6 @@ monitor_long_gc(Process *p, Uint time) {
Eterm *hp_end;
#endif
-#ifndef ERTS_SMP
- ASSERT(is_internal_pid(system_monitor));
- monitor_p = erts_proc_lookup(system_monitor);
- if (!monitor_p || p == monitor_p)
- return;
-#endif
hsz = 0;
(void) erts_bld_atom_uword_2tup_list(NULL,
@@ -1657,24 +1526,13 @@ monitor_long_gc(Process *p, Uint time) {
ASSERT(hp == hp_end);
#endif
-#ifdef ERTS_SMP
enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp);
-#else
- {
- ErtsMessage *mp = erts_alloc_message(0, NULL);
- mp->data.heap_frag = bp;
- erts_queue_message(monitor_p, 0, mp, msg, am_system);
- }
-#endif
}
void
monitor_large_heap(Process *p) {
ErlHeapFragment *bp;
ErlOffHeap *off_heap;
-#ifndef ERTS_SMP
- Process *monitor_p;
-#endif
Uint hsz;
Eterm *hp, list, msg;
Eterm tags[] = {
@@ -1698,13 +1556,6 @@ monitor_large_heap(Process *p) {
#endif
-#ifndef ERTS_SMP
- ASSERT(is_internal_pid(system_monitor));
- monitor_p = erts_proc_lookup(system_monitor);
- if (!monitor_p || p == monitor_p) {
- return;
- }
-#endif
hsz = 0;
(void) erts_bld_atom_uword_2tup_list(NULL,
@@ -1732,47 +1583,22 @@ monitor_large_heap(Process *p) {
ASSERT(hp == hp_end);
#endif
-#ifdef ERTS_SMP
enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp);
-#else
- {
- ErtsMessage *mp = erts_alloc_message(0, NULL);
- mp->data.heap_frag = bp;
- erts_queue_message(monitor_p, 0, mp, msg, am_system);
- }
-#endif
}
void
monitor_generic(Process *p, Eterm type, Eterm spec) {
ErlHeapFragment *bp;
ErlOffHeap *off_heap;
-#ifndef ERTS_SMP
- Process *monitor_p;
-#endif
Eterm *hp, msg;
-#ifndef ERTS_SMP
- ASSERT(is_internal_pid(system_monitor));
- monitor_p = erts_proc_lookup(system_monitor);
- if (!monitor_p || p == monitor_p)
- return;
-#endif
hp = ERTS_ALLOC_SYSMSG_HEAP(5, &bp, &off_heap, monitor_p);
msg = TUPLE4(hp, am_monitor, p->common.id, type, spec);
hp += 5;
-#ifdef ERTS_SMP
enqueue_sys_msg(SYS_MSG_TYPE_SYSMON, p->common.id, NIL, msg, bp);
-#else
- {
- ErtsMessage *mp = erts_alloc_message(0, NULL);
- mp->data.heap_frag = bp;
- erts_queue_message(monitor_p, 0, mp, msg, am_system);
- }
-#endif
}
@@ -1785,19 +1611,12 @@ profile_scheduler(Eterm scheduler_id, Eterm state) {
Eterm *hp, msg;
ErlHeapFragment *bp = NULL;
-#ifndef ERTS_SMP
-#define LOCAL_HEAP_SIZE (7 + ERTS_TRACE_PATCH_TS_MAX_SIZE)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
- hp = local_heap;
-#else
Uint hsz;
hsz = 7 + patch_ts_size(erts_system_profile_ts_type)-1;
bp = new_message_buffer(hsz);
hp = bp->mem;
-#endif
erts_smp_mtx_lock(&smq_mtx);
@@ -1821,13 +1640,7 @@ profile_scheduler(Eterm scheduler_id, Eterm state) {
/* Write timestamp in element 6 of the 'msg' tuple */
hp[-1] = write_ts(erts_system_profile_ts_type, hp, bp, NULL);
-#ifndef ERTS_SMP
- profile_send(NIL, msg);
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
-#else
enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, NIL, NIL, msg, bp);
-#endif
erts_smp_mtx_unlock(&smq_mtx);
}
@@ -2092,22 +1905,12 @@ profile_runnable_port(Port *p, Eterm status) {
ErlHeapFragment *bp = NULL;
Eterm count = make_small(0);
-#ifndef ERTS_SMP
-#define LOCAL_HEAP_SIZE (6 + ERTS_TRACE_PATCH_TS_MAX_SIZE)
-
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
- hp = local_heap;
-
-#else
Uint hsz;
hsz = 6 + patch_ts_size(erts_system_profile_ts_type)-1;
bp = new_message_buffer(hsz);
hp = bp->mem;
-#endif
erts_smp_mtx_lock(&smq_mtx);
@@ -2118,13 +1921,7 @@ profile_runnable_port(Port *p, Eterm status) {
/* Write timestamp in element 5 of the 'msg' tuple */
hp[-1] = write_ts(erts_system_profile_ts_type, hp, bp, NULL);
-#ifndef ERTS_SMP
- profile_send(p->common.id, msg);
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
-#else
enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, p->common.id, NIL, msg, bp);
-#endif
erts_smp_mtx_unlock(&smq_mtx);
}
@@ -2136,23 +1933,13 @@ profile_runnable_proc(Process *p, Eterm status){
ErlHeapFragment *bp = NULL;
ErtsCodeMFA *cmfa = NULL;
-#ifndef ERTS_SMP
-#define LOCAL_HEAP_SIZE (4 + 6 + ERTS_TRACE_PATCH_TS_MAX_SIZE)
- DeclareTmpHeapNoproc(local_heap,LOCAL_HEAP_SIZE);
- UseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-
- hp = local_heap;
-#else
ErtsThrPrgrDelayHandle dhndl;
Uint hsz = 4 + 6 + patch_ts_size(erts_system_profile_ts_type)-1;
-#endif
/* Assumptions:
* We possibly don't have the MAIN_LOCK for the process p here.
* We assume that we can read from p->current and p->i atomically
*/
-#ifdef ERTS_SMP
dhndl = erts_thr_progress_unmanaged_delay(); /* suspend purge operations */
-#endif
if (!ERTS_PROC_IS_EXITING(p)) {
if (p->current) {
@@ -2162,14 +1949,12 @@ profile_runnable_proc(Process *p, Eterm status){
}
}
-#ifdef ERTS_SMP
if (!cmfa) {
hsz -= 4;
}
bp = new_message_buffer(hsz);
hp = bp->mem;
-#endif
if (cmfa) {
where = TUPLE3(hp, cmfa->module, cmfa->function,
@@ -2179,9 +1964,7 @@ profile_runnable_proc(Process *p, Eterm status){
where = make_small(0);
}
-#ifdef ERTS_SMP
erts_thr_progress_unmanaged_continue(dhndl);
-#endif
erts_smp_mtx_lock(&smq_mtx);
@@ -2192,20 +1975,13 @@ profile_runnable_proc(Process *p, Eterm status){
/* Write timestamp in element 5 of the 'msg' tuple */
hp[-1] = write_ts(erts_system_profile_ts_type, hp, bp, NULL);
-#ifndef ERTS_SMP
- profile_send(p->common.id, msg);
- UnUseTmpHeapNoproc(LOCAL_HEAP_SIZE);
-#undef LOCAL_HEAP_SIZE
-#else
enqueue_sys_msg_unlocked(SYS_MSG_TYPE_SYSPROF, p->common.id, NIL, msg, bp);
-#endif
erts_smp_mtx_unlock(&smq_mtx);
}
/* End system_profile tracing */
-#ifdef ERTS_SMP
typedef struct ErtsSysMsgQ_ ErtsSysMsgQ;
struct ErtsSysMsgQ_ {
@@ -2634,7 +2410,6 @@ init_sys_msg_dispatcher(void)
&thr_opts);
}
-#endif
#include "erl_nif.h"
diff --git a/erts/emulator/beam/erl_trace.h b/erts/emulator/beam/erl_trace.h
index 01fe1e5e23..7bbd93713d 100644
--- a/erts/emulator/beam/erl_trace.h
+++ b/erts/emulator/beam/erl_trace.h
@@ -87,7 +87,6 @@ void erts_set_system_monitor(Eterm monitor);
Eterm erts_get_system_monitor(void);
int erts_is_tracer_valid(Process* p);
-#ifdef ERTS_SMP
void erts_check_my_tracer_proc(Process *);
void erts_block_sys_msg_dispatcher(void);
void erts_release_sys_msg_dispatcher(void);
@@ -97,7 +96,6 @@ void erts_foreach_sys_msg_in_q(void (*func)(Eterm,
ErlHeapFragment *));
void erts_queue_error_logger_message(Eterm, Eterm, ErlHeapFragment *);
void erts_send_sys_msg_proc(Eterm, Eterm, Eterm, ErlHeapFragment *);
-#endif
void trace_send(Process*, Eterm, Eterm);
void trace_receive(Process*, Eterm, Eterm, ErtsTracingEvent*);
@@ -149,16 +147,12 @@ erts_bif_trace_epilogue(Process *p, Eterm result, int applying,
Uint32 flags_meta, BeamInstr* I,
ErtsTracer meta_tracer);
-#ifdef ERTS_SMP
void erts_send_pending_trace_msgs(ErtsSchedulerData *esdp);
#define ERTS_SMP_CHK_PEND_TRACE_MSGS(ESDP) \
do { \
if ((ESDP)->pending_trace_msgs) \
erts_send_pending_trace_msgs((ESDP)); \
} while (0)
-#else
-#define ERTS_SMP_CHK_PEND_TRACE_MSGS(ESDP)
-#endif
#define seq_trace_output(token, msg, type, receiver, process) \
seq_trace_output_generic((token), (msg), (type), (receiver), (process), NIL)
diff --git a/erts/emulator/beam/erl_utils.h b/erts/emulator/beam/erl_utils.h
index 3d28b05752..b1ba47df7b 100644
--- a/erts/emulator/beam/erl_utils.h
+++ b/erts/emulator/beam/erl_utils.h
@@ -86,22 +86,14 @@ ERTS_GLB_INLINE Uint64
erts_smp_current_interval_nob(erts_interval_t *icp)
{
ASSERT(icp->smp_api);
-#ifdef ERTS_SMP
return erts_current_interval_nob__(icp);
-#else
- return icp->counter.not_atomic;
-#endif
}
ERTS_GLB_INLINE Uint64
erts_smp_current_interval_acqb(erts_interval_t *icp)
{
ASSERT(icp->smp_api);
-#ifdef ERTS_SMP
return erts_current_interval_acqb__(icp);
-#else
- return icp->counter.not_atomic;
-#endif
}
#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
diff --git a/erts/emulator/beam/export.c b/erts/emulator/beam/export.c
index 828c833ffc..2524ad40cb 100644
--- a/erts/emulator/beam/export.c
+++ b/erts/emulator/beam/export.c
@@ -85,17 +85,13 @@ static struct export_blob* entry_to_blob(struct export_entry* ee)
void
export_info(fmtfn_t to, void *to_arg)
{
-#ifdef ERTS_SMP
int lock = !ERTS_IS_CRASH_DUMPING;
if (lock)
export_staging_lock();
-#endif
index_info(to, to_arg, &export_tables[erts_active_code_ix()]);
hash_info(to, to_arg, &export_tables[erts_staging_code_ix()].htable);
-#ifdef ERTS_SMP
if (lock)
export_staging_unlock();
-#endif
}
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 2105ee7a6c..a598709984 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -209,9 +209,7 @@ struct erts_driver_t_ {
} version;
int flags;
DE_Handle *handle;
-#ifdef ERTS_SMP
erts_smp_mtx_t *lock;
-#endif
ErlDrvEntry *entry;
ErlDrvData (*start)(ErlDrvPort port, char *command, SysDriverOpts* opts);
void (*stop)(ErlDrvData drv_data);
@@ -1127,18 +1125,12 @@ extern ErtsModifiedTimings erts_modified_timings[];
extern int erts_no_line_info;
extern Eterm erts_error_logger_warnings;
extern int erts_initialized;
-#if defined(USE_THREADS) && !defined(ERTS_SMP)
-extern erts_tid_t erts_main_thread;
-#endif
extern int erts_compat_rel;
extern int erts_use_sender_punish;
void erl_start(int, char**);
void erts_usage(void);
Eterm erts_preloaded(Process* p);
-#ifndef ERTS_SMP
-extern void *erts_scheduler_stack_limit;
-#endif
/* erl_md5.c */
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index b609f6de39..a65ff03fd8 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -93,17 +93,11 @@ static int init_driver(erts_driver_t *, ErlDrvEntry *, DE_Handle *);
static void terminate_port(Port *p);
static void pdl_init(void);
static int driver_failure_term(ErlDrvPort ix, Eterm term, int eof);
-#ifdef ERTS_SMP
static void driver_monitor_lock_pdl(Port *p);
static void driver_monitor_unlock_pdl(Port *p);
#define DRV_MONITOR_LOOKUP_PORT_LOCK_PDL(Port) erts_thr_drvport2port((Port), 1)
#define DRV_MONITOR_LOCK_PDL(Port) driver_monitor_lock_pdl(Port)
#define DRV_MONITOR_UNLOCK_PDL(Port) driver_monitor_unlock_pdl(Port)
-#else
-#define DRV_MONITOR_LOOKUP_PORT_LOCK_PDL(Port) erts_thr_drvport2port((Port), 0)
-#define DRV_MONITOR_LOCK_PDL(Port) /* nothing */
-#define DRV_MONITOR_UNLOCK_PDL(Port) /* nothing */
-#endif
#define ERL_SMALL_IO_BIN_LIMIT (4*ERL_ONHEAP_BIN_LIMIT)
#define SMALL_WRITE_VEC 16
@@ -218,7 +212,6 @@ kill_port(Port *pp)
/* In non-smp case the port structure may have been deallocated now */
}
-#ifdef ERTS_SMP
#ifdef ERTS_ENABLE_LOCK_CHECK
int
@@ -231,7 +224,6 @@ erts_lc_is_port_locked(Port *prt)
}
#endif
-#endif /* #ifdef ERTS_SMP */
static void initq(Port* prt);
@@ -255,25 +247,21 @@ static ERTS_INLINE void port_init_instr(Port *prt
* Stuff that need to be initialized with the port id
* in the instrumented case, but not in the normal case.
*/
-#ifdef ERTS_SMP
ASSERT(prt->drv_ptr && prt->lock);
if (!prt->drv_ptr->lock) {
erts_mtx_init_locked(prt->lock, "port_lock", id, ERTS_LOCK_FLAGS_CATEGORY_IO);
}
-#endif
erts_port_task_init_sched(&prt->sched, id);
}
#if !ERTS_PORT_INIT_INSTR_NEED_ID
static ERTS_INLINE void port_init_instr_abort(Port *prt)
{
-#ifdef ERTS_SMP
ASSERT(prt->drv_ptr && prt->lock);
if (!prt->drv_ptr->lock) {
erts_mtx_unlock(prt->lock);
erts_mtx_destroy(prt->lock);
}
-#endif
erts_port_task_fini_sched(&prt->sched);
}
#endif
@@ -309,7 +297,6 @@ static Port *create_port(char *name,
erts_aint32_t state = ERTS_PORT_SFLG_CONNECTED;
erts_aint32_t x_pts_flgs = 0;
-#ifdef ERTS_SMP
ErtsRunQueue *runq;
if (!driver_lock) {
/* Align size for mutex following port struct */
@@ -317,7 +304,6 @@ static Port *create_port(char *name,
size += sizeof(erts_mtx_t);
}
else
-#endif
port_size = size = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(Port));
#ifdef DEBUG
@@ -351,7 +337,6 @@ static Port *create_port(char *name,
p += busy_port_queue_size;
}
-#ifdef ERTS_SMP
if (driver_lock) {
prt->lock = driver_lock;
erts_mtx_lock(driver_lock);
@@ -368,10 +353,6 @@ static Port *create_port(char *name,
erts_smp_atomic_set_nob(&prt->run_queue, (erts_aint_t) runq);
prt->xports = NULL;
-#else
- erts_atomic32_init_nob(&prt->refc, 1);
- prt->cleanup = 0;
-#endif
erts_port_task_pre_init_sched(&prt->sched, busy_port_queue);
@@ -419,10 +400,8 @@ static Port *create_port(char *name,
#if !ERTS_PORT_INIT_INSTR_NEED_ID
port_init_instr_abort(prt);
#endif
-#ifdef ERTS_SMP
if (driver_lock)
erts_mtx_unlock(driver_lock);
-#endif
if (enop)
*enop = 0;
erts_free(ERTS_ALC_T_PORT, prt);
@@ -450,23 +429,11 @@ static Port *create_port(char *name,
return prt;
}
-#ifndef ERTS_SMP
-void
-erts_port_cleanup(Port *prt)
-{
- if (prt->drv_ptr && prt->drv_ptr->handle)
- erts_ddll_dereference_driver(prt->drv_ptr->handle);
- prt->drv_ptr = NULL;
- erts_port_dec_refc(prt);
-}
-#endif
void
erts_port_free(Port *prt)
{
-#if defined(ERTS_SMP) || defined(DEBUG) || defined(ERTS_ENABLE_LOCK_CHECK)
erts_aint32_t state = erts_atomic32_read_nob(&prt->state);
-#endif
ERTS_LC_ASSERT(state & (ERTS_PORT_SFLG_INITIALIZING
| ERTS_PORT_SFLG_FREE));
ASSERT(state & ERTS_PORT_SFLG_PORT_DEBUG);
@@ -480,7 +447,6 @@ erts_port_free(Port *prt)
prt->async_open_port = NULL;
}
-#ifdef ERTS_SMP
ASSERT(prt->lock);
if (state & ERTS_PORT_SFLG_PORT_SPECIFIC_LOCK)
erts_mtx_destroy(prt->lock);
@@ -497,7 +463,6 @@ erts_port_free(Port *prt)
*/
if (prt->drv_ptr->handle)
erts_ddll_dereference_driver(prt->drv_ptr->handle);
-#endif
erts_free(ERTS_ALC_T_PORT, prt);
}
@@ -658,9 +623,7 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
ERTS_OPEN_DRIVER_RET(NULL, -3, BADARG);
}
-#ifdef ERTS_SMP
driver_lock = driver->lock;
-#endif
if (driver->handle != NULL) {
erts_ddll_increment_port_count(driver->handle);
@@ -749,11 +712,9 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
if (IS_TRACED_FL(port, F_TRACE_SCHED_PORTS)) {
trace_sched_ports_where(port, am_out, am_open);
}
-#ifdef ERTS_SMP
if (port->xports)
erts_port_handle_xports(port);
ASSERT(!port->xports);
-#endif
}
if (error_type) {
@@ -782,7 +743,6 @@ erts_open_driver(erts_driver_t* driver, /* Pointer to driver. */
#undef ERTS_OPEN_DRIVER_RET
}
-#ifdef ERTS_SMP
struct ErtsXPortsList_ {
ErtsXPortsList *next;
@@ -791,7 +751,6 @@ struct ErtsXPortsList_ {
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(xports_list, ErtsXPortsList, 50, ERTS_ALC_T_XPORTS_LIST)
-#endif
/*
* Driver function to create new instances of a driver
@@ -839,9 +798,7 @@ driver_create_port(ErlDrvPort creator_port_ix, /* Creating port */
erts_ddll_reference_referenced_driver(driver->handle);
}
-#ifdef ERTS_SMP
driver_lock = driver->lock;
-#endif
erts_smp_rwmtx_runlock(&erts_driver_list_lock);
@@ -878,21 +835,18 @@ driver_create_port(ErlDrvPort creator_port_ix, /* Creating port */
erts_add_link(&ERTS_P_LINKS(rp), LINK_PID, port->common.id);
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
-#ifdef ERTS_SMP
if (!driver_lock) {
ErtsXPortsList *xplp = xports_list_alloc();
xplp->port = port;
xplp->next = creator_port->xports;
creator_port->xports = xplp;
}
-#endif
port->drv_data = (UWord) drv_data;
return ERTS_Port2ErlDrvPort(port);
}
-#ifdef ERTS_SMP
int erts_port_handle_xports(Port *prt)
{
int reds = 0;
@@ -921,7 +875,6 @@ int erts_port_handle_xports(Port *prt)
prt->xports = NULL;
return reds;
}
-#endif
/* Fills a possibly deep list of chars and binaries into vec
** Small characters are first stored in the buffer buf of length ln
@@ -3373,11 +3326,9 @@ void erts_init_io(int port_tab_size,
common_element_size = ERTS_ALC_DATA_ALIGN_SIZE(sizeof(Port));
common_element_size += ERTS_ALC_DATA_ALIGN_SIZE(sizeof(ErtsPortTaskBusyPortQ));
common_element_size += 10; /* name */
-#ifdef ERTS_SMP
common_element_size += sizeof(erts_mtx_t);
init_xports_list_alloc();
-#endif
pdl_init();
@@ -4068,11 +4019,9 @@ static void flush_port(Port *p)
if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
trace_sched_ports_where(p, am_out, am_flush);
}
-#ifdef ERTS_SMP
if (p->xports)
erts_port_handle_xports(p);
ASSERT(!p->xports);
-#endif
}
if ((erts_atomic32_read_nob(&p->state) & ERTS_PORT_SFLGS_DEAD) == 0
&& is_port_ioq_empty(p)) {
@@ -4133,11 +4082,9 @@ terminate_port(Port *prt)
(*drv->stop)((ErlDrvData)prt->drv_data);
erts_unblock_fpe(fpe_was_unmasked);
ERTS_MSACC_POP_STATE_M();
-#ifdef ERTS_SMP
if (prt->xports)
erts_port_handle_xports(prt);
ASSERT(!prt->xports);
-#endif
}
if (is_internal_port(send_closed_port_id)
@@ -5474,13 +5421,11 @@ erts_request_io_bytes(Process *c_p)
erts_smp_atomic32_init_nob(&req->refc,
(erts_aint32_t) erts_no_schedulers);
-#ifdef ERTS_SMP
if (erts_no_schedulers > 1)
erts_schedule_multi_misc_aux_work(1,
erts_no_schedulers,
reply_io_bytes,
(void *) req);
-#endif
reply_io_bytes((void *) req);
@@ -6595,9 +6540,7 @@ static ERTS_INLINE int
deliver_term_check_port(ErlDrvTermData port_id, Eterm *connected_p,
Port **trace_prt)
{
-#ifdef ERTS_SMP
ErtsThrPrgrDelayHandle dhndl = erts_thr_progress_unmanaged_delay();
-#endif
erts_aint32_t state;
int res = 1;
Port *prt = erts_port_lookup_raw((Eterm) port_id);
@@ -6615,22 +6558,18 @@ deliver_term_check_port(ErlDrvTermData port_id, Eterm *connected_p,
goto done;
}
if (connected_p) {
-#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
ETHR_MEMBAR(ETHR_LoadLoad);
-#endif
*connected_p = ERTS_PORT_GET_CONNECTED(prt);
}
done:
-#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) {
ERTS_SMP_LC_ASSERT(!prt || !erts_lc_is_port_locked(prt));
erts_thr_progress_unmanaged_continue(dhndl);
ETHR_MEMBAR(ETHR_LoadLoad|ETHR_LoadStore);
} else
-#endif
if (res == 1) {
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
*trace_prt = prt;
@@ -6704,9 +6643,7 @@ driver_send_term(ErlDrvPort drvport,
*/
Port* prt = NULL;
ERTS_SMP_CHK_NO_PROC_LOCKS;
-#ifdef ERTS_SMP
if (erts_thr_progress_is_managed_thread())
-#endif
{
erts_aint32_t state;
prt = erts_drvport2port_state(drvport, &state);
@@ -7051,7 +6988,6 @@ static ERTS_INLINE void pdl_destroy(ErlDrvPDL pdl)
erts_free(ERTS_ALC_T_PORT_DATA_LOCK, pdl);
}
-#ifdef ERTS_SMP
static void driver_monitor_lock_pdl(Port *p) {
if (p->port_data_lock) {
@@ -7075,7 +7011,6 @@ static void driver_monitor_unlock_pdl(Port *p) {
}
}
-#endif
/*
* exported driver_pdl_* functions ...
@@ -7898,10 +7833,8 @@ int driver_exit(ErlDrvPort ix, int err)
lnk = erts_remove_link(&ERTS_P_LINKS(prt), connected);
-#ifdef ERTS_SMP
if (rp)
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
-#endif
if (rlnk != NULL) {
erts_destroy_link(rlnk);
@@ -8134,18 +8067,10 @@ driver_system_info(ErlDrvSysInfo *sip, size_t si_size)
sip->erts_version = ERLANG_VERSION;
sip->otp_release = ERLANG_OTP_RELEASE;
sip->thread_support =
-#ifdef USE_THREADS
1
-#else
- 0
-#endif
;
sip->smp_support =
-#ifdef ERTS_SMP
1
-#else
- 0
-#endif
;
}
@@ -8256,7 +8181,6 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
drv->version.minor = de->minor_version;
drv->flags = de->driver_flags;
drv->handle = handle;
-#ifdef ERTS_SMP
if (drv->flags & ERL_DRV_FLAG_USE_PORT_LOCKING) {
drv->lock = NULL;
} else {
@@ -8268,7 +8192,6 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
erts_mtx_init(drv->lock, "driver_lock", driver_id, ERTS_LOCK_FLAGS_CATEGORY_IO);
}
-#endif
drv->entry = de;
drv->start = de->start;
@@ -8311,12 +8234,10 @@ init_driver(erts_driver_t *drv, ErlDrvEntry *de, DE_Handle *handle)
void
erts_destroy_driver(erts_driver_t *drv)
{
-#ifdef ERTS_SMP
if (drv->lock) {
erts_smp_mtx_destroy(drv->lock);
erts_free(ERTS_ALC_T_DRIVER_LOCK, drv->lock);
}
-#endif
erts_free(ERTS_ALC_T_DRIVER, drv);
}
diff --git a/erts/emulator/beam/register.c b/erts/emulator/beam/register.c
index bf3267cff1..aec8777e8b 100644
--- a/erts/emulator/beam/register.c
+++ b/erts/emulator/beam/register.c
@@ -47,7 +47,6 @@ static erts_smp_rwmtx_t regtab_rwmtx;
#define reg_read_unlock() erts_smp_rwmtx_runlock(&regtab_rwmtx)
#define reg_write_unlock() erts_smp_rwmtx_rwunlock(&regtab_rwmtx)
-#ifdef ERTS_SMP
static ERTS_INLINE void
reg_safe_read_lock(Process *c_p, ErtsProcLocks *c_p_locks)
{
@@ -94,7 +93,6 @@ reg_safe_write_lock(Process *c_p, ErtsProcLocks *c_p_locks)
reg_write_lock();
}
-#endif
static ERTS_INLINE int
is_proc_alive(Process *p)
@@ -193,7 +191,6 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id)
}
}
-#ifdef ERTS_SMP
{
ErtsProcLocks proc_locks = proc ? ERTS_PROC_LOCK_MAIN : 0;
reg_safe_write_lock(proc, &proc_locks);
@@ -201,7 +198,6 @@ int erts_register_name(Process *c_p, Eterm name, Eterm id)
if (proc && !proc_locks)
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
-#endif
if (is_internal_pid(id)) {
if (!proc)
@@ -272,7 +268,6 @@ erts_whereis_name_to_id(Process *c_p, Eterm name)
HashValue hval;
int ix;
HashBucket* b;
-#ifdef ERTS_SMP
ErtsProcLocks c_p_locks = 0;
if (c_p) {
c_p_locks = ERTS_PROC_LOCK_MAIN;
@@ -282,7 +277,6 @@ erts_whereis_name_to_id(Process *c_p, Eterm name)
if (c_p && !c_p_locks)
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
-#endif
hval = REG_HASH(name);
ix = hval % process_reg.size;
@@ -331,7 +325,6 @@ erts_whereis_name(Process *c_p,
HashValue hval;
int ix;
HashBucket* b;
-#ifdef ERTS_SMP
ErtsProcLocks current_c_p_locks;
Port *pending_port = NULL;
@@ -348,7 +341,6 @@ erts_whereis_name(Process *c_p,
* - read reg lock
* - current_c_p_locks (either c_p_locks or 0) on c_p
*/
-#endif
hval = REG_HASH(name);
ix = hval % process_reg.size;
@@ -370,7 +362,6 @@ erts_whereis_name(Process *c_p,
if (!rp)
*proc = NULL;
else {
-#ifdef ERTS_SMP
if (!rp->p)
*proc = NULL;
else {
@@ -391,13 +382,6 @@ erts_whereis_name(Process *c_p,
*proc = NULL;
}
}
-#else
- if (rp->p
- && ((flags & ERTS_P2P_FLG_ALLOW_OTHER_X) || is_proc_alive(rp->p)))
- *proc = rp->p;
- else
- *proc = NULL;
-#endif
if (*proc && (flags & ERTS_P2P_FLG_INC_REFC))
erts_proc_inc_refc(*proc);
}
@@ -407,7 +391,6 @@ erts_whereis_name(Process *c_p,
if (!rp || !rp->pt)
*port = NULL;
else {
-#ifdef ERTS_SMP
if (lock_port) {
if (pending_port == rp->pt)
pending_port = NULL;
@@ -433,17 +416,14 @@ erts_whereis_name(Process *c_p,
}
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(rp->pt));
}
-#endif
*port = rp->pt;
}
}
-#ifdef ERTS_SMP
if (c_p && !current_c_p_locks)
erts_smp_proc_lock(c_p, c_p_locks);
if (pending_port)
erts_port_release(pending_port);
-#endif
reg_read_unlock();
}
@@ -476,7 +456,6 @@ int erts_unregister_name(Process *c_p,
RegProc r, *rp;
Port *port = c_prt;
ErtsProcLocks current_c_p_locks = 0;
-#ifdef ERTS_SMP
/*
* SMP note: If 'c_prt != NULL' and 'c_prt->reg->name == name',
@@ -492,18 +471,15 @@ int erts_unregister_name(Process *c_p,
restart:
reg_safe_write_lock(c_p, &current_c_p_locks);
-#endif
r.name = name;
if (is_non_value(name)) {
/* Unregister current process name */
ASSERT(c_p);
-#ifdef ERTS_SMP
if (current_c_p_locks != c_p_locks) {
erts_smp_proc_lock(c_p, c_p_locks);
current_c_p_locks = c_p_locks;
}
-#endif
if (c_p->common.u.alive.reg) {
r.name = c_p->common.u.alive.reg->name;
} else {
@@ -516,7 +492,6 @@ int erts_unregister_name(Process *c_p,
if ((rp = (RegProc*) hash_get(&process_reg, (void*) &r)) != NULL) {
if (rp->pt) {
if (port != rp->pt) {
-#ifdef ERTS_SMP
if (port) {
ASSERT(port != c_prt);
erts_port_release(port);
@@ -534,7 +509,6 @@ int erts_unregister_name(Process *c_p,
port = erts_id2port(id);
goto restart;
}
-#endif
port = rp->pt;
}
@@ -553,7 +527,6 @@ int erts_unregister_name(Process *c_p,
} else if (rp->p) {
-#ifdef ERTS_SMP
erts_proc_safelock(c_p,
current_c_p_locks,
c_p_locks,
@@ -561,17 +534,14 @@ int erts_unregister_name(Process *c_p,
(c_p == rp->p) ? current_c_p_locks : 0,
ERTS_PROC_LOCK_MAIN);
current_c_p_locks = c_p_locks;
-#endif
rp->p->common.u.alive.reg = NULL;
if (IS_TRACED_FL(rp->p, F_TRACE_PROCS)) {
trace_proc(rp->p, (c_p == rp->p) ? c_p_locks : ERTS_PROC_LOCK_MAIN,
rp->p, am_unregister, r.name);
}
-#ifdef ERTS_SMP
if (rp->p != c_p) {
erts_smp_proc_unlock(rp->p, ERTS_PROC_LOCK_MAIN);
}
-#endif
}
hash_erase(&process_reg, (void*) &r);
res = 1;
@@ -588,11 +558,9 @@ int erts_unregister_name(Process *c_p,
erts_smp_port_lock(c_prt);
}
}
-#ifdef ERTS_SMP
if (c_p && !current_c_p_locks) {
erts_smp_proc_lock(c_p, c_p_locks);
}
-#endif
return res;
}
@@ -633,14 +601,12 @@ BIF_RETTYPE registered_0(BIF_ALIST_0)
Uint need;
Eterm* hp;
HashBucket **bucket;
-#ifdef ERTS_SMP
ErtsProcLocks proc_locks = ERTS_PROC_LOCK_MAIN;
ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(BIF_P);
reg_safe_read_lock(BIF_P, &proc_locks);
if (!proc_locks)
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
-#endif
bucket = process_reg.bucket;
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index 1e09752d87..848f104871 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -34,9 +34,6 @@
(((__GNUC__ << 24) | (__GNUC_MINOR__ << 12) | __GNUC_PATCHLEVEL__) >= (((MAJ) << 24) | ((MIN) << 12) | (PL)))
#endif
-#if defined(ERTS_DIRTY_SCHEDULERS) && !defined(ERTS_SMP)
-# error "Dirty schedulers not supported without smp support"
-#endif
#ifdef ERTS_INLINE
# ifndef ERTS_CAN_INLINE
@@ -221,11 +218,7 @@ __decl_noreturn void __noreturn erl_assert_error(const char* expr, const char *f
# define ASSERT(e) ((void) 1)
#endif
-#ifdef ERTS_SMP
# define ERTS_SMP_ASSERT(e) ASSERT(e)
-#else
-# define ERTS_SMP_ASSERT(e) ((void)1)
-#endif
/* ERTS_UNDEF can be used to silence false warnings about
* "variable may be used uninitialized" while keeping the variable
@@ -476,35 +469,19 @@ int erts_send_warning_to_logger_str_nogl(char *);
#include "erl_smp.h"
#ifdef ERTS_WANT_BREAK_HANDLING
-# ifdef ERTS_SMP
extern erts_smp_atomic32_t erts_break_requested;
# define ERTS_BREAK_REQUESTED \
((int) erts_smp_atomic32_read_nob(&erts_break_requested))
-# else
-extern volatile int erts_break_requested;
-# define ERTS_BREAK_REQUESTED erts_break_requested
-# endif
void erts_do_break_handling(void);
#endif
-#if !defined(ERTS_SMP) && !defined(__WIN32__)
-extern volatile Uint erts_signal_state;
-#define ERTS_SIGNAL_STATE erts_signal_state
-void erts_handle_signal_state(void);
-#endif
-#ifdef ERTS_SMP
extern erts_smp_atomic32_t erts_writing_erl_crash_dump;
extern erts_tsd_key_t erts_is_crash_dumping_key;
#define ERTS_SOMEONE_IS_CRASH_DUMPING \
((int) erts_smp_atomic32_read_mb(&erts_writing_erl_crash_dump))
#define ERTS_IS_CRASH_DUMPING \
((int) (SWord) erts_tsd_get(erts_is_crash_dumping_key))
-#else
-extern volatile int erts_writing_erl_crash_dump;
-#define ERTS_SOMEONE_IS_CRASH_DUMPING erts_writing_erl_crash_dump
-#define ERTS_IS_CRASH_DUMPING erts_writing_erl_crash_dump
-#endif
/* Deal with memcpy() vs bcopy() etc. We want to use the mem*() functions,
but be able to fall back on bcopy() etc on systems that don't have
@@ -765,10 +742,8 @@ extern char *erts_sys_ddll_error(int code);
* System interfaces for startup.
*/
void erts_sys_schedule_interrupt(int set);
-#ifdef ERTS_SMP
void erts_sys_schedule_interrupt_timed(int, ErtsMonotonicTime);
void erts_sys_main_thread(void);
-#endif
extern int erts_sys_prepare_crash_dump(int secs);
extern void erts_sys_pre_init(void);
@@ -859,13 +834,11 @@ int erts_sys_unsetenv(char *key);
char *erts_read_env(char *key);
void erts_free_read_env(void *value);
-#if defined(ERTS_SMP)
#if defined(ERTS_THR_HAVE_SIG_FUNCS) && !defined(ETHR_UNUSABLE_SIGUSRX)
extern void sys_thr_resume(erts_tid_t tid);
extern void sys_thr_suspend(erts_tid_t tid);
#define ERTS_SYS_SUSPEND_SIGNAL SIGUSR2
#endif
-#endif
/* utils.c */
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 0fb25c2082..3713b756e8 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -1931,27 +1931,6 @@ do_allocate_logger_message(Eterm gleader, Eterm **hp, ErlOffHeap **ohp,
gl_sz = IS_CONST(gleader) ? 0 : size_object(gleader);
sz = sz + gl_sz;
-#ifndef ERTS_SMP
-#ifdef USE_THREADS
- if (!erts_get_scheduler_data()) /* Must be scheduler thread */
- *p = NULL;
- else
-#endif
- {
- *p = erts_whereis_process(NULL, 0, am_error_logger, 0, 0);
- if (*p) {
- erts_aint32_t state = erts_smp_atomic32_read_acqb(&(*p)->state);
- if (state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))
- *p = NULL;
- }
- }
-
- if (!*p) {
- return NIL;
- }
-
- /* So we have an error logger, lets build the message */
-#endif
*bp = new_message_buffer(sz);
*ohp = &(*bp)->off_heap;
*hp = (*bp)->mem;
@@ -1969,20 +1948,12 @@ static void do_send_logger_message(Eterm *hp, ErlOffHeap *ohp, ErlHeapFragment *
#ifdef HARDDEBUG
erts_fprintf(stderr, "%T\n", message);
#endif
-#ifdef ERTS_SMP
{
Eterm from = erts_get_current_pid();
if (is_not_internal_pid(from))
from = NIL;
erts_queue_error_logger_message(from, message, bp);
}
-#else
- {
- ErtsMessage *mp = erts_alloc_message(0, NULL);
- mp->data.heap_frag = bp;
- erts_queue_message(p, 0, mp, message, am_system);
- }
-#endif
}
/* error_logger !
@@ -4665,11 +4636,7 @@ erts_interval_init(erts_interval_t *icp)
void
erts_smp_interval_init(erts_interval_t *icp)
{
-#ifdef ERTS_SMP
erts_interval_init(icp);
-#else
- icp->counter.not_atomic = 0;
-#endif
#ifdef DEBUG
icp->smp_api = 1;
#endif
@@ -4727,22 +4694,14 @@ Uint64
erts_smp_step_interval_nob(erts_interval_t *icp)
{
ASSERT(icp->smp_api);
-#ifdef ERTS_SMP
return step_interval_nob(icp);
-#else
- return ++icp->counter.not_atomic;
-#endif
}
Uint64
erts_smp_step_interval_relb(erts_interval_t *icp)
{
ASSERT(icp->smp_api);
-#ifdef ERTS_SMP
return step_interval_relb(icp);
-#else
- return ++icp->counter.not_atomic;
-#endif
}
Uint64
@@ -4763,28 +4722,14 @@ Uint64
erts_smp_ensure_later_interval_nob(erts_interval_t *icp, Uint64 ic)
{
ASSERT(icp->smp_api);
-#ifdef ERTS_SMP
return ensure_later_interval_nob(icp, ic);
-#else
- if (icp->counter.not_atomic > ic)
- return icp->counter.not_atomic;
- else
- return ++icp->counter.not_atomic;
-#endif
}
Uint64
erts_smp_ensure_later_interval_acqb(erts_interval_t *icp, Uint64 ic)
{
ASSERT(icp->smp_api);
-#ifdef ERTS_SMP
return ensure_later_interval_acqb(icp, ic);
-#else
- if (icp->counter.not_atomic > ic)
- return icp->counter.not_atomic;
- else
- return ++icp->counter.not_atomic;
-#endif
}
/*
diff --git a/erts/emulator/drivers/common/efile_drv.c b/erts/emulator/drivers/common/efile_drv.c
index 3a7b3bb50c..a1b15a2199 100644
--- a/erts/emulator/drivers/common/efile_drv.c
+++ b/erts/emulator/drivers/common/efile_drv.c
@@ -167,7 +167,6 @@ dt_private *get_dt_private(int);
#endif
-#ifdef USE_THREADS
#define THRDS_AVAILABLE (sys_info.async_threads > 0)
#ifdef HARDDEBUG /* HARDDEBUG in io.c is expected too */
#define TRACE_DRIVER fprintf(stderr, "Efile: ")
@@ -177,12 +176,6 @@ dt_private *get_dt_private(int);
#define MUTEX_INIT(m, p) do { IF_THRDS { TRACE_DRIVER; (m = driver_pdl_create(p)); } } while (0)
#define MUTEX_LOCK(m) do { IF_THRDS { TRACE_DRIVER; driver_pdl_lock(m); } } while (0)
#define MUTEX_UNLOCK(m) do { IF_THRDS { TRACE_DRIVER; driver_pdl_unlock(m); } } while (0)
-#else
-#define THRDS_AVAILABLE (0)
-#define MUTEX_INIT(m, p)
-#define MUTEX_LOCK(m)
-#define MUTEX_UNLOCK(m)
-#endif
#define IF_THRDS if (THRDS_AVAILABLE)
@@ -2715,7 +2708,6 @@ file_output(ErlDrvData e, char* buf, ErlDrvSizeT count)
}
case FILE_READDIR:
-#ifdef USE_THREADS
if (sys_info.async_threads > 0)
{
d = EF_SAFE_ALLOC(sizeof(struct t_data) - 1 + FILENAME_BYTELEN(name) +
@@ -2736,7 +2728,6 @@ file_output(ErlDrvData e, char* buf, ErlDrvSizeT count)
goto done;
}
else
-#endif
{
size_t resbufsize;
size_t n = 0, total = 0;
diff --git a/erts/emulator/hipe/hipe_mkliterals.c b/erts/emulator/hipe/hipe_mkliterals.c
index 4573980e1e..2e9d29eede 100644
--- a/erts/emulator/hipe/hipe_mkliterals.c
+++ b/erts/emulator/hipe/hipe_mkliterals.c
@@ -441,9 +441,7 @@ static const struct rts_param rts_params[] = {
{ 11, "ERL_FUN_SIZE", 1, ERL_FUN_SIZE },
{ 12, "P_SCHED_DATA",
-#ifdef ERTS_SMP
1, offsetof(struct process, scheduler_data)
-#endif
},
{ 14, "P_FP_EXCEPTION",
#if !defined(NO_FPE_SIGNALS) || defined(HIPE)
@@ -453,11 +451,7 @@ static const struct rts_param rts_params[] = {
/* This flag is always defined, but its value is configuration-dependent. */
{ 15, "ERTS_IS_SMP",
1,
-#if defined(ERTS_SMP)
1
-#else
- 0
-#endif
},
/* This flag is always defined, but its value is configuration-dependent. */
{ 16, "ERTS_NO_FPE_SIGNALS",
diff --git a/erts/emulator/hipe/hipe_mode_switch.c b/erts/emulator/hipe/hipe_mode_switch.c
index ba7ae1e6a8..5775f0730f 100644
--- a/erts/emulator/hipe/hipe_mode_switch.c
+++ b/erts/emulator/hipe/hipe_mode_switch.c
@@ -490,14 +490,12 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[])
case HIPE_MODE_SWITCH_RES_WAIT:
case HIPE_MODE_SWITCH_RES_WAIT_TIMEOUT: {
/* same semantics, different debug trace messages */
-#ifdef ERTS_SMP
/* XXX: BEAM has different entries for the locked and unlocked
cases. HiPE doesn't, so we must check dynamically. */
if (p->hipe_smp.have_receive_locks)
p->hipe_smp.have_receive_locks = 0;
else
erts_smp_proc_lock(p, ERTS_PROC_LOCKS_MSG_RECEIVE);
-#endif
p->i = hipe_beam_pc_resume;
p->arity = 0;
erts_smp_atomic32_read_band_relb(&p->state,
@@ -524,10 +522,8 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[])
p = erts_schedule(NULL, p, reds_in - p->fcalls);
ERTS_SMP_REQ_PROC_MAIN_LOCK(p);
ASSERT(!(p->flags & F_HIPE_MODE));
-#ifdef ERTS_SMP
p->hipe_smp.have_receive_locks = 0;
reg = p->scheduler_data->x_reg_array;
-#endif
}
{
Eterm *argp;
@@ -637,28 +633,6 @@ static unsigned hipe_next_nstack_size(unsigned size)
return size ? size * 2 : HIPE_INITIAL_NSTACK_SIZE;
}
-#if 0 && defined(HIPE_NSTACK_GROWS_UP)
-void hipe_inc_nstack(Process *p)
-{
- Eterm *old_nstack = p->hipe.nstack;
- unsigned old_size = p->hipe.nstend - old_nstack;
- unsigned new_size = hipe_next_nstack_size(old_size);
- Eterm *new_nstack = erts_realloc(ERTS_ALC_T_HIPE,
- (char *) old_nstack,
- new_size*sizeof(Eterm));
- p->hipe.nstend = new_nstack + new_size;
- if (new_nstack != old_nstack) {
- p->hipe.nsp = new_nstack + (p->hipe.nsp - old_nstack);
- p->hipe.nstack = new_nstack;
- if (p->hipe.nstgraylim)
- p->hipe.nstgraylim =
- new_nstack + (p->hipe.nstgraylim - old_nstack);
- if (p->hipe.nstblacklim)
- p->hipe.nstblacklim =
- new_nstack + (p->hipe.nstblacklim - old_nstack);
- }
-}
-#endif
#if defined(HIPE_NSTACK_GROWS_DOWN)
void hipe_inc_nstack(Process *p)
diff --git a/erts/emulator/hipe/hipe_native_bif.c b/erts/emulator/hipe/hipe_native_bif.c
index d8044fe6da..f6ecf841ba 100644
--- a/erts/emulator/hipe/hipe_native_bif.c
+++ b/erts/emulator/hipe/hipe_native_bif.c
@@ -143,12 +143,10 @@ BIF_RETTYPE nbif_impl_hipe_set_timeout(NBIF_ALIST_1)
else {
int tres = erts_set_proc_timer_term(p, timeout_value);
if (tres != 0) { /* Wrong time */
-#ifdef ERTS_SMP
if (p->hipe_smp.have_receive_locks) {
p->hipe_smp.have_receive_locks = 0;
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_MSG_RECEIVE);
}
-#endif
BIF_ERROR(p, EXC_TIMEOUT_VALUE);
}
}
@@ -335,9 +333,7 @@ Binary *hipe_bs_reallocate(Binary* oldbptr, int newsize)
}
int hipe_bs_put_big_integer(
-#ifdef ERTS_SMP
Process *p,
-#endif
Eterm arg, Uint num_bits, byte* base, unsigned offset, unsigned flags)
{
byte *save_bin_buf;
@@ -530,7 +526,6 @@ Eterm hipe_check_get_msg(Process *c_p)
msgp = PEEK_MESSAGE(c_p);
if (!msgp) {
-#ifdef ERTS_SMP
erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
/* Make sure messages wont pass exit signals... */
if (ERTS_PROC_PENDING_EXIT(c_p)) {
@@ -544,12 +539,9 @@ Eterm hipe_check_get_msg(Process *c_p)
else {
/* XXX: BEAM doesn't need this */
c_p->hipe_smp.have_receive_locks = 1;
-#endif
c_p->flags &= ~F_DELAY_GC;
return THE_NON_VALUE;
-#ifdef ERTS_SMP
}
-#endif
}
if (is_non_value(ERL_MESSAGE_TERM(msgp))
@@ -573,7 +565,6 @@ Eterm hipe_check_get_msg(Process *c_p)
/*
* SMP-specific stuff
*/
-#ifdef ERTS_SMP
/*
* This is like the timeout BEAM instruction.
@@ -584,14 +575,12 @@ void hipe_clear_timeout(Process *c_p)
* A timeout has occurred. Reset the save pointer so that the next
* receive statement will examine the first message first.
*/
-#ifdef ERTS_SMP
/* XXX: BEAM has different entries for the locked and unlocked
cases. HiPE doesn't, so we must check dynamically. */
if (c_p->hipe_smp.have_receive_locks) {
c_p->hipe_smp.have_receive_locks = 0;
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_MSG_RECEIVE);
}
-#endif
if (IS_TRACED_FL(c_p, F_TRACE_RECEIVE)) {
trace_receive(c_p, am_clock_service, am_timeout, NULL);
}
@@ -604,4 +593,3 @@ void hipe_atomic_inc(int *counter)
erts_smp_atomic_inc_nob((erts_smp_atomic_t*)counter);
}
-#endif
diff --git a/erts/emulator/hipe/hipe_native_bif.h b/erts/emulator/hipe/hipe_native_bif.h
index 38f874888b..cbc7ab8dc6 100644
--- a/erts/emulator/hipe/hipe_native_bif.h
+++ b/erts/emulator/hipe/hipe_native_bif.h
@@ -107,11 +107,7 @@ void hipe_emasculate_binary(Eterm);
/*
* Stuff that is different in SMP and non-SMP.
*/
-#ifdef ERTS_SMP
int hipe_bs_put_big_integer(Process*, Eterm, Uint, byte*, unsigned, unsigned);
-#else
-int hipe_bs_put_big_integer(Eterm, Uint, byte*, unsigned, unsigned);
-#endif
AEXTERN(Eterm,nbif_check_get_msg,(Process*));
Eterm hipe_check_get_msg(Process*);
@@ -122,12 +118,10 @@ BIF_RETTYPE hipe_bifs_debug_native_called_2(BIF_ALIST_2);
/*
* SMP-specific stuff
*/
-#ifdef ERTS_SMP
AEXTERN(void,nbif_atomic_inc,(void));
AEXTERN(void,nbif_clear_timeout,(Process*));
void hipe_atomic_inc(int*);
void hipe_clear_timeout(Process*);
-#endif
#define BIF_LIST(M,F,A,B,C,I) AEXTERN(Eterm,nbif_##C,(void));
#include "erl_bif_list.h"
diff --git a/erts/emulator/hipe/hipe_primops.h b/erts/emulator/hipe/hipe_primops.h
index 4fcbc9df38..6aac5e6205 100644
--- a/erts/emulator/hipe/hipe_primops.h
+++ b/erts/emulator/hipe/hipe_primops.h
@@ -41,10 +41,8 @@ PRIMOP_LIST(am_bnot, &nbif_bnot_1)
PRIMOP_LIST(am_gc_1, &nbif_gc_1)
PRIMOP_LIST(am_check_get_msg, &nbif_check_get_msg)
-#ifdef ERTS_SMP
PRIMOP_LIST(am_atomic_inc, &nbif_atomic_inc)
PRIMOP_LIST(am_clear_timeout, &nbif_clear_timeout)
-#endif
PRIMOP_LIST(am_select_msg, &nbif_select_msg)
PRIMOP_LIST(am_set_timeout, &nbif_set_timeout)
PRIMOP_LIST(am_rethrow, &nbif_rethrow)
diff --git a/erts/emulator/hipe/hipe_process.h b/erts/emulator/hipe/hipe_process.h
index cc92bf653c..2cc7fd5f4e 100644
--- a/erts/emulator/hipe/hipe_process.h
+++ b/erts/emulator/hipe/hipe_process.h
@@ -82,7 +82,6 @@ static __inline__ void hipe_delete_process(struct hipe_process_state *p)
erts_free(ERTS_ALC_T_HIPE_STK, (void*)p->nstack);
}
-#ifdef ERTS_SMP
struct hipe_process_state_smp {
int have_receive_locks;
};
@@ -91,6 +90,5 @@ static __inline__ void hipe_init_process_smp(struct hipe_process_state_smp *p)
{
p->have_receive_locks = 0;
}
-#endif
#endif /* HIPE_PROCESS_H */
diff --git a/erts/emulator/hipe/hipe_stack.h b/erts/emulator/hipe/hipe_stack.h
index 7e30358767..0d3019cfd0 100644
--- a/erts/emulator/hipe/hipe_stack.h
+++ b/erts/emulator/hipe/hipe_stack.h
@@ -110,11 +110,6 @@ extern void (*hipe_handle_stack_trap(Process*))(void);
extern void hipe_update_stack_trap(Process*, const struct hipe_sdesc*);
extern int hipe_fill_stacktrace(Process*, int, Eterm**);
-#if 0 && defined(HIPE_NSTACK_GROWS_UP)
-#define hipe_nstack_start(p) ((p)->hipe.nstack)
-#define hipe_nstack_used(p) ((p)->hipe.nsp - (p)->hipe.nstack)
-#define hipe_nstack_avail(p) ((p)->hipe.nstend - (p)->hipe.nsp)
-#endif
#if defined(HIPE_NSTACK_GROWS_DOWN)
#define hipe_nstack_start(p) ((p)->hipe.nsp)
#define hipe_nstack_used(p) ((p)->hipe.nstend - (p)->hipe.nsp)
diff --git a/erts/emulator/hipe/hipe_x86_signal.c b/erts/emulator/hipe/hipe_x86_signal.c
index be68d7d463..d3b6933155 100644
--- a/erts/emulator/hipe/hipe_x86_signal.c
+++ b/erts/emulator/hipe/hipe_x86_signal.c
@@ -45,10 +45,8 @@
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
-#ifdef ERTS_SMP
#include "sys.h"
#include "erl_alloc.h"
-#endif
#include "hipe_signal.h"
#if defined(__GLIBC__) && __GLIBC__ == 2 && (__GLIBC_MINOR__ >= 3)
@@ -259,7 +257,6 @@ static void hipe_sigaltstack(void *ss_sp)
}
}
-#ifdef ERTS_SMP
/*
* Set up alternate signal stack for an Erlang process scheduler thread.
*/
@@ -269,7 +266,6 @@ void hipe_thread_signal_init(void)
We use it to suppress false leak report from valgrind */
hipe_sigaltstack(erts_alloc_permanent_cache_aligned(ERTS_ALC_T_HIPE_LL, SIGSTKSZ));
}
-#endif
/*
* Set up alternate signal stack for the main thread,
@@ -277,10 +273,6 @@ void hipe_thread_signal_init(void)
*/
static void hipe_sigaltstack_init(void)
{
-#if !defined(ERTS_SMP)
- static unsigned long my_sigstack[SIGSTKSZ/sizeof(long)];
- hipe_sigaltstack(my_sigstack);
-#endif
}
/*
diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c
index 799f67fc45..ce9f6e3f06 100644
--- a/erts/emulator/sys/common/erl_check_io.c
+++ b/erts/emulator/sys/common/erl_check_io.c
@@ -84,9 +84,6 @@ typedef char EventStateFlags;
#define ERTS_CIO_POLL_CTL ERTS_POLL_EXPORT(erts_poll_control)
#define ERTS_CIO_POLL_CTLV ERTS_POLL_EXPORT(erts_poll_controlv)
#define ERTS_CIO_POLL_WAIT ERTS_POLL_EXPORT(erts_poll_wait)
-#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
-#define ERTS_CIO_POLL_AS_INTR ERTS_POLL_EXPORT(erts_poll_async_sig_interrupt)
-#endif
#define ERTS_CIO_POLL_INTR ERTS_POLL_EXPORT(erts_poll_interrupt)
#define ERTS_CIO_POLL_INTR_TMD ERTS_POLL_EXPORT(erts_poll_interrupt_timed)
#define ERTS_CIO_NEW_POLLSET ERTS_POLL_EXPORT(erts_poll_create_pollset)
@@ -108,10 +105,8 @@ static struct pollset_info
int size;
ErtsSysFdType *array;
} active_fd;
-#ifdef ERTS_SMP
struct removed_fd* removed_list; /* list of deselected fd's*/
erts_smp_spinlock_t removed_list_lock;
-#endif
}pollset;
#define NUM_OF_POLLSETS 1
@@ -137,7 +132,6 @@ typedef struct {
EventStateFlags flags;
} ErtsDrvEventState;
-#ifdef ERTS_SMP
struct removed_fd {
struct removed_fd *next;
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
@@ -150,7 +144,6 @@ struct removed_fd {
#endif
};
-#endif
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
static int max_fds = -1;
@@ -161,7 +154,6 @@ static union {
byte _cache_line_alignment[64];
}drv_ev_state_locks[DRV_EV_STATE_LOCK_CNT];
-#ifdef ERTS_SMP
static ERTS_INLINE erts_smp_mtx_t* fd_mtx(ErtsSysFdType fd)
{
int hash = (int)fd;
@@ -170,9 +162,6 @@ static ERTS_INLINE erts_smp_mtx_t* fd_mtx(ErtsSysFdType fd)
# endif
return &drv_ev_state_locks[hash % DRV_EV_STATE_LOCK_CNT].lck;
}
-#else
-# define fd_mtx(fd) NULL
-#endif
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
@@ -249,9 +238,7 @@ static void
steal_pending_stop_nif(erts_dsprintf_buf_t *dsbufp, ErtsResource*,
ErtsDrvEventState *state, int mode, int on);
-#ifdef ERTS_SMP
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(removed_fd, struct removed_fd, 64, ERTS_ALC_T_FD_LIST)
-#endif
static ERTS_INLINE void
init_iotask(ErtsIoTask *io_task)
@@ -337,7 +324,6 @@ free_drv_event_data(ErtsDrvEventDataState *dep)
static ERTS_INLINE void
remember_removed(ErtsDrvEventState *state, struct pollset_info* psi)
{
-#ifdef ERTS_SMP
struct removed_fd *fdlp;
ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(fd_mtx(state->fd)));
if (erts_smp_atomic_read_nob(&psi->in_poll_wait)) {
@@ -355,29 +341,23 @@ remember_removed(ErtsDrvEventState *state, struct pollset_info* psi)
psi->removed_list = fdlp;
erts_smp_spin_unlock(&psi->removed_list_lock);
}
-#endif
}
static ERTS_INLINE int
is_removed(ErtsDrvEventState *state)
{
-#ifdef ERTS_SMP
/* Note that there is a possible race here, where an fd is removed
(increasing remove_cnt) and then added again just before erts_poll_wait
is called by erts_check_io. Any polled event on the re-added fd will then
be falsely ignored. But that does not matter, as the event will trigger
again next time erl_check_io is called. */
return state->remove_cnt > 0;
-#else
- return 0;
-#endif
}
static void
forget_removed(struct pollset_info* psi)
{
-#ifdef ERTS_SMP
struct removed_fd* fdlp;
struct removed_fd* tofree;
@@ -460,7 +440,6 @@ forget_removed(struct pollset_info* psi)
fdlp = fdlp->next;
removed_fd_free(tofree);
}
-#endif /* ERTS_SMP */
}
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
@@ -2118,13 +2097,6 @@ eready(Eterm id, ErtsDrvEventState *state, ErlDrvEventData event_data,
static void bad_fd_in_pollset(ErtsDrvEventState *, Eterm inport, Eterm outport);
-#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
-void
-ERTS_CIO_EXPORT(erts_check_io_async_sig_interrupt)(void)
-{
- ERTS_CIO_POLL_AS_INTR(pollset.ps);
-}
-#endif
void
ERTS_CIO_EXPORT(erts_check_io_interrupt)(int set)
@@ -2355,9 +2327,7 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
add_active_fd(state->fd);
}
-#ifdef ERTS_SMP
erts_smp_mtx_unlock(fd_mtx(fd));
-#endif
if (is_not_nil(in.pid)) {
send_event_tuple(&in, resource, am_ready_input);
}
@@ -2404,9 +2374,7 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
}
next_pollres:;
-#ifdef ERTS_SMP
erts_smp_mtx_unlock(fd_mtx(fd));
-#endif
next_pollres_unlocked:;
}
@@ -2561,7 +2529,6 @@ ERTS_CIO_EXPORT(erts_init_check_io)(void)
#endif
-#ifdef ERTS_SMP
init_removed_fd_alloc();
pollset.removed_list = NULL;
erts_smp_spinlock_init(&pollset.removed_list_lock, "pollset_rm_list", NIL,
@@ -2573,7 +2540,6 @@ ERTS_CIO_EXPORT(erts_init_check_io)(void)
ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_IO);
}
}
-#endif
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
max_fds = ERTS_CIO_POLL_MAX_FDS();
erts_smp_atomic_init_nob(&drv_ev_state_len, 0);
diff --git a/erts/emulator/sys/common/erl_check_io.h b/erts/emulator/sys/common/erl_check_io.h
index 2d3bb98afa..d74eb764d2 100644
--- a/erts/emulator/sys/common/erl_check_io.h
+++ b/erts/emulator/sys/common/erl_check_io.h
@@ -44,10 +44,6 @@ Eterm erts_check_io_info_kp(void *);
Eterm erts_check_io_info_nkp(void *);
int erts_check_io_max_files_kp(void);
int erts_check_io_max_files_nkp(void);
-#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
-void erts_check_io_async_sig_interrupt_kp(void);
-void erts_check_io_async_sig_interrupt_nkp(void);
-#endif
void erts_check_io_interrupt_kp(int);
void erts_check_io_interrupt_nkp(int);
void erts_check_io_interrupt_timed_kp(int, ErtsMonotonicTime);
@@ -69,9 +65,6 @@ void erts_lcnt_update_cio_locks_nkp(int enable);
Uint erts_check_io_size(void);
Eterm erts_check_io_info(void *);
int erts_check_io_max_files(void);
-#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
-void erts_check_io_async_sig_interrupt(void);
-#endif
void erts_check_io_interrupt(int);
void erts_check_io_interrupt_timed(int, ErtsMonotonicTime);
void erts_check_io(int);
diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c
index d69a79dc2a..d5b2fa87e4 100644
--- a/erts/emulator/sys/common/erl_mseg.c
+++ b/erts/emulator/sys/common/erl_mseg.c
@@ -188,7 +188,6 @@ typedef union {
static int no_mseg_allocators;
static ErtsAlgndMsegAllctr_t *aligned_mseg_allctr;
-#ifdef ERTS_SMP
#define ERTS_MSEG_ALLCTR_IX(IX) \
(&aligned_mseg_allctr[(IX)].mseg_alloc)
@@ -199,18 +198,6 @@ static ErtsAlgndMsegAllctr_t *aligned_mseg_allctr;
#define ERTS_MSEG_ALLCTR_OPT(OPT) \
((OPT)->sched_spec ? ERTS_MSEG_ALLCTR_SS() : ERTS_MSEG_ALLCTR_IX(0))
-#else
-
-#define ERTS_MSEG_ALLCTR_IX(IX) \
- (&aligned_mseg_allctr[0].mseg_alloc)
-
-#define ERTS_MSEG_ALLCTR_SS() \
- (&aligned_mseg_allctr[0].mseg_alloc)
-
-#define ERTS_MSEG_ALLCTR_OPT(OPT) \
- (&aligned_mseg_allctr[0].mseg_alloc)
-
-#endif
#define ERTS_MSEG_LOCK(MA) \
do { \
@@ -1404,11 +1391,7 @@ erts_mseg_init(ErtsMsegInit_t *init)
int i;
UWord x;
-#ifdef ERTS_SMP
no_mseg_allocators = init->nos + 1;
-#else
- no_mseg_allocators = 1;
-#endif
x = (UWord) malloc(sizeof(ErtsAlgndMsegAllctr_t)
*no_mseg_allocators
diff --git a/erts/emulator/sys/common/erl_os_monotonic_time_extender.c b/erts/emulator/sys/common/erl_os_monotonic_time_extender.c
index d53190fdd5..341845cc2a 100644
--- a/erts/emulator/sys/common/erl_os_monotonic_time_extender.c
+++ b/erts/emulator/sys/common/erl_os_monotonic_time_extender.c
@@ -23,7 +23,6 @@
#endif
#include "erl_os_monotonic_time_extender.h"
-#ifdef USE_THREADS
static void *os_monotonic_time_extender(void *vstatep)
{
@@ -49,30 +48,22 @@ static void *os_monotonic_time_extender(void *vstatep)
}
static erts_tid_t os_monotonic_extender_tid;
-#endif
void
erts_init_os_monotonic_time_extender(ErtsOsMonotonicTimeExtendState *statep,
Uint32 (*raw_os_monotonic_time)(void),
int check_seconds)
{
-#ifdef USE_THREADS
statep->raw_os_monotonic_time = raw_os_monotonic_time;
erts_atomic32_init_nob(&statep->extend[0], (erts_aint32_t) 0);
erts_atomic32_init_nob(&statep->extend[1], (erts_aint32_t) 0);
statep->check_interval = check_seconds;
-#else
- statep->extend[0] = (Uint32) 0;
- statep->extend[1] = (Uint32) 0;
- statep->last_msb = (ErtsMonotonicTime) 0;
-#endif
}
void
erts_late_init_os_monotonic_time_extender(ErtsOsMonotonicTimeExtendState *statep)
{
-#ifdef USE_THREADS
erts_thr_opts_t thr_opts = ERTS_THR_OPTS_DEFAULT_INITER;
thr_opts.detached = 1;
thr_opts.suggested_stack_size = 4;
@@ -85,5 +76,4 @@ erts_late_init_os_monotonic_time_extender(ErtsOsMonotonicTimeExtendState *statep
os_monotonic_time_extender,
(void*) statep,
&thr_opts);
-#endif
}
diff --git a/erts/emulator/sys/common/erl_os_monotonic_time_extender.h b/erts/emulator/sys/common/erl_os_monotonic_time_extender.h
index 8089c9aed9..2266f26e24 100644
--- a/erts/emulator/sys/common/erl_os_monotonic_time_extender.h
+++ b/erts/emulator/sys/common/erl_os_monotonic_time_extender.h
@@ -25,36 +25,17 @@
#include "erl_threads.h"
typedef struct {
-#ifdef USE_THREADS
Uint32 (*raw_os_monotonic_time)(void);
erts_atomic32_t extend[2];
int check_interval;
-#else
- Uint32 extend[2];
- ErtsMonotonicTime last_msb;
-#endif
} ErtsOsMonotonicTimeExtendState;
-#ifdef USE_THREADS
# define ERTS_CHK_EXTEND_OS_MONOTONIC_TIME(S, RT) ((void) 1)
# define ERTS_EXTEND_OS_MONOTONIC_TIME(S, RT) \
((((ErtsMonotonicTime) \
erts_atomic32_read_nob(&((S)->extend[((int) ((RT) >> 31)) & 1]))) \
<< 32) \
+ (RT))
-#else
-# define ERTS_CHK_EXTEND_OS_MONOTONIC_TIME(S, RT) \
- do { \
- Uint32 msb__ = (RT) & (((Uint32) 1) << 31); \
- if (msb__ != (S)->last_msb) { \
- int ix__ = ((int) ((S)->last_msb >> 31)) & 1; \
- (S)->extend[ix__]++; \
- (S)->last_msb = msb; \
- } \
- } while (0)
-# define ERTS_EXTEND_OS_MONOTONIC_TIME(S, RT) \
- ((((ErtsMonotonicTime) (S)->extend[((int) ((RT) >> 31)) & 1]) << 32) + (RT))
-#endif
void
erts_init_os_monotonic_time_extender(ErtsOsMonotonicTimeExtendState *statep,
diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c
index 52a8b6a53f..3a6ae85b42 100644
--- a/erts/emulator/sys/common/erl_poll.c
+++ b/erts/emulator/sys/common/erl_poll.c
@@ -95,9 +95,6 @@
#define ERTS_POLL_DEBUG_PRINT
#endif
-#if defined(DEBUG) && 0
-#define HARD_DEBUG
-#endif
#ifdef _DARWIN_UNLIMITED_SELECT
typedef struct {
@@ -155,16 +152,11 @@ int ERTS_SELECT(int nfds, ERTS_fd_set *readfds, ERTS_fd_set *writefds,
#define ERTS_POLL_COALESCE_KP_RES (ERTS_POLL_USE_KQUEUE || ERTS_POLL_USE_EPOLL)
-#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
-# define ERTS_POLL_ASYNC_INTERRUPT_SUPPORT 1
-#else
# define ERTS_POLL_ASYNC_INTERRUPT_SUPPORT 0
-#endif
#define ERTS_POLL_USE_WAKEUP_PIPE \
(ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(USE_THREADS))
-#ifdef ERTS_SMP
#define ERTS_POLLSET_LOCK(PS) \
erts_smp_mtx_lock(&(PS)->mtx)
@@ -178,28 +170,13 @@ int ERTS_SELECT(int nfds, ERTS_fd_set *readfds, ERTS_fd_set *writefds,
#define ERTS_POLLSET_IS_POLLED(PS) \
((int) erts_atomic32_read_nob(&(PS)->polled))
-#else
-
-#define ERTS_POLLSET_LOCK(PS)
-#define ERTS_POLLSET_UNLOCK(PS)
-#define ERTS_POLLSET_SET_POLLED_CHK(PS) 0
-#define ERTS_POLLSET_UNSET_POLLED(PS)
-#define ERTS_POLLSET_IS_POLLED(PS) 0
-#endif
-
-#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
#define ERTS_POLLSET_SET_HAVE_UPDATE_REQUESTS(PS) \
erts_smp_atomic32_set_nob(&(PS)->have_update_requests, (erts_aint32_t) 1)
#define ERTS_POLLSET_UNSET_HAVE_UPDATE_REQUESTS(PS) \
erts_smp_atomic32_set_nob(&(PS)->have_update_requests, (erts_aint32_t) 0)
#define ERTS_POLLSET_HAVE_UPDATE_REQUESTS(PS) \
((int) erts_smp_atomic32_read_nob(&(PS)->have_update_requests))
-#else
-#define ERTS_POLLSET_SET_HAVE_UPDATE_REQUESTS(PS)
-#define ERTS_POLLSET_UNSET_HAVE_UPDATE_REQUESTS(PS)
-#define ERTS_POLLSET_HAVE_UPDATE_REQUESTS(PS) 0
-#endif
#if ERTS_POLL_USE_FALLBACK
# if ERTS_POLL_USE_POLL
@@ -212,7 +189,6 @@ int ERTS_SELECT(int nfds, ERTS_fd_set *readfds, ERTS_fd_set *writefds,
* --- Data types ------------------------------------------------------------
*/
-#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
#define ERTS_POLLSET_UPDATE_REQ_BLOCK_SIZE 128
typedef struct ErtsPollSetUpdateRequestsBlock_ ErtsPollSetUpdateRequestsBlock;
@@ -222,19 +198,14 @@ struct ErtsPollSetUpdateRequestsBlock_ {
int fds[ERTS_POLLSET_UPDATE_REQ_BLOCK_SIZE];
};
-#endif
-#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
# define ERTS_POLL_FD_FLG_INURQ (((unsigned short) 1) << 0)
-#endif
#if ERTS_POLL_USE_FALLBACK
# define ERTS_POLL_FD_FLG_INFLBCK (((unsigned short) 1) << 1)
# define ERTS_POLL_FD_FLG_USEFLBCK (((unsigned short) 1) << 2)
#endif
-#if ERTS_POLL_USE_KERNEL_POLL || defined(ERTS_SMP)
# define ERTS_POLL_FD_FLG_RST (((unsigned short) 1) << 3)
-#endif
typedef struct {
#if ERTS_POLL_USE_POLL
int pix;
@@ -244,9 +215,7 @@ typedef struct {
#if ERTS_POLL_COALESCE_KP_RES
unsigned short res_ev_ix;
#endif
-#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE || ERTS_POLL_USE_FALLBACK
unsigned short flags;
-#endif
} ErtsFdStatus;
@@ -301,27 +270,19 @@ struct ErtsPollSet_ {
ERTS_fd_set output_fds;
ERTS_fd_set res_output_fds;
#endif
-#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
ErtsPollSetUpdateRequestsBlock update_requests;
ErtsPollSetUpdateRequestsBlock *curr_upd_req_block;
erts_smp_atomic32_t have_update_requests;
-#endif
-#ifdef ERTS_SMP
erts_atomic32_t polled;
erts_smp_mtx_t mtx;
-#endif
-#if ERTS_POLL_USE_WAKEUP_PIPE
int wake_fds[2];
-#endif
#if ERTS_POLL_USE_TIMERFD
int timer_fd;
#endif
#if ERTS_POLL_USE_FALLBACK
int fallback_used;
#endif
-#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
erts_atomic32_t wakeup_state;
-#endif
erts_atomic64_t timeout_time;
#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
erts_smp_atomic_t no_avoided_wakeups;
@@ -413,50 +374,37 @@ get_timeout_time(ErtsPollSet ps)
static ERTS_INLINE void
reset_wakeup_state(ErtsPollSet ps)
{
-#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
erts_atomic32_set_mb(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN);
-#endif
}
static ERTS_INLINE int
is_woken(ErtsPollSet ps)
{
-#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
return erts_atomic32_read_acqb(&ps->wakeup_state) != ERTS_POLL_NOT_WOKEN;
-#else
- return 0;
-#endif
}
static ERTS_INLINE int
is_interrupted_reset(ErtsPollSet ps)
{
-#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
return (erts_atomic32_xchg_acqb(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN)
== ERTS_POLL_WOKEN_INTR);
-#else
- return 0;
-#endif
}
static ERTS_INLINE void
woke_up(ErtsPollSet ps)
{
-#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
erts_aint32_t wakeup_state = erts_atomic32_read_acqb(&ps->wakeup_state);
if (wakeup_state == ERTS_POLL_NOT_WOKEN)
(void) erts_atomic32_cmpxchg_nob(&ps->wakeup_state,
ERTS_POLL_WOKEN,
ERTS_POLL_NOT_WOKEN);
ASSERT(erts_atomic32_read_nob(&ps->wakeup_state) != ERTS_POLL_NOT_WOKEN);
-#endif
}
/*
* --- Wakeup pipe -----------------------------------------------------------
*/
-#if ERTS_POLL_USE_WAKEUP_PIPE
static ERTS_INLINE void
wake_poller(ErtsPollSet ps, int interrupted, int async_signal_safe)
@@ -507,18 +455,11 @@ wake_poller(ErtsPollSet ps, int interrupted, int async_signal_safe)
static ERTS_INLINE void
cleanup_wakeup_pipe(ErtsPollSet ps)
{
-#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
- int intr = 0;
-#endif
int fd = ps->wake_fds[0];
int res;
do {
char buf[32];
res = read(fd, buf, sizeof(buf));
-#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
- if (res > 0)
- intr = 1;
-#endif
} while (res > 0 || (res < 0 && errno == EINTR));
if (res < 0 && errno != ERRNO_BLOCK) {
fatal_error("%s:%d:cleanup_wakeup_pipe(): "
@@ -528,10 +469,6 @@ cleanup_wakeup_pipe(ErtsPollSet ps)
fd,
erl_errno_id(errno), errno);
}
-#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
- if (intr)
- erts_atomic32_set_nob(&ps->wakeup_state, ERTS_POLL_WOKEN_INTR);
-#endif
}
static void
@@ -574,7 +511,6 @@ create_wakeup_pipe(ErtsPollSet ps)
ps->wake_fds[1] = wake_fds[1];
}
-#endif /* ERTS_POLL_USE_WAKEUP_PIPE */
/*
* --- timer fd -----------------------------------------------------------
@@ -648,7 +584,6 @@ timerfd_clear(ErtsPollSet ps, int res, int max_res) {
/*
* --- Poll set update requests ----------------------------------------------
*/
-#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
static ERTS_INLINE void
enqueue_update_request(ErtsPollSet ps, int fd)
@@ -691,7 +626,6 @@ free_update_requests_block(ErtsPollSet ps,
}
}
-#endif /* ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE */
/*
* --- Growing poll set structures -------------------------------------------
@@ -819,9 +753,7 @@ grow_fds_status(ErtsPollSet ps, int min_fd)
#if ERTS_POLL_COALESCE_KP_RES
ps->fds_status[i].res_ev_ix = (unsigned short) ERTS_POLL_MAX_RES;
#endif
-#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE || ERTS_POLL_USE_FALLBACK
ps->fds_status[i].flags = (unsigned short) 0;
-#endif
}
ps->fds_status_len = new_len;
}
@@ -849,7 +781,7 @@ need_update(ErtsPollSet ps, int fd)
ps->fds_status[fd].flags &= ~ERTS_POLL_FD_FLG_RST;
reset = 0;
}
-#elif defined(ERTS_SMP)
+#else
ps->fds_status[fd].flags &= ~ERTS_POLL_FD_FLG_RST;
#endif
@@ -1518,7 +1450,6 @@ static int update_pollset(ErtsPollSet ps, int fd)
#endif /* ERTS_POLL_USE_POLL || ERTS_POLL_USE_SELECT || ERTS_POLL_USE_FALLBACK */
-#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
static void
handle_update_requests(ErtsPollSet ps)
@@ -1565,7 +1496,6 @@ handle_update_requests(ErtsPollSet ps)
ERTS_POLLSET_UNSET_HAVE_UPDATE_REQUESTS(ps);
}
-#endif /* ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE */
static ERTS_INLINE ErtsPollEvents
poll_control(ErtsPollSet ps, int fd, ErtsPollEvents events, int on, int *do_wake)
@@ -1583,12 +1513,10 @@ poll_control(ErtsPollSet ps, int fd, ErtsPollEvents events, int on, int *do_wake
goto done;
}
#endif
-#if ERTS_POLL_USE_WAKEUP_PIPE
if (fd == ps->wake_fds[0] || fd == ps->wake_fds[1]) {
new_events = ERTS_POLL_EV_NVAL;
goto done;
}
-#endif
#if ERTS_POLL_USE_TIMERFD
if (fd == ps->timer_fd) {
new_events = ERTS_POLL_EV_NVAL;
@@ -1615,9 +1543,7 @@ poll_control(ErtsPollSet ps, int fd, ErtsPollEvents events, int on, int *do_wake
new_events &= ~events;
if (new_events == (ErtsPollEvents) 0) {
-#if ERTS_POLL_USE_KERNEL_POLL || defined(ERTS_SMP)
ps->fds_status[fd].flags |= ERTS_POLL_FD_FLG_RST;
-#endif
#if ERTS_POLL_USE_FALLBACK
ps->fds_status[fd].flags &= ~ERTS_POLL_FD_FLG_USEFLBCK;
#endif
@@ -1626,18 +1552,12 @@ poll_control(ErtsPollSet ps, int fd, ErtsPollEvents events, int on, int *do_wake
ps->fds_status[fd].events = new_events;
if (new_events == ps->fds_status[fd].used_events
-#if ERTS_POLL_USE_KERNEL_POLL || defined(ERTS_SMP)
&& !(ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_RST)
-#endif
) {
*do_wake = 0;
goto done;
}
-#if !ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
- if (update_pollset(ps, fd) != 0)
- new_events = ERTS_POLL_EV_ERR;
-#else /* ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE */
#if ERTS_POLL_USE_CONCURRENT_UPDATE
if (ERTS_POLLSET_IS_POLLED(ps)) {
@@ -1652,7 +1572,6 @@ poll_control(ErtsPollSet ps, int fd, ErtsPollEvents events, int on, int *do_wake
enqueue_update_request(ps, fd);
-#ifdef ERTS_SMP
/*
* If new events have been added, we need to wake up the
* polling thread, but if events have been removed we don't.
@@ -1660,9 +1579,7 @@ poll_control(ErtsPollSet ps, int fd, ErtsPollEvents events, int on, int *do_wake
if ((new_events && (ps->fds_status[fd].flags & ERTS_POLL_FD_FLG_RST))
|| (~ps->fds_status[fd].used_events & new_events))
*do_wake = 1;
-#endif /* ERTS_SMP */
-#endif /* ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE */
done:
#ifdef ERTS_POLL_DEBUG_PRINT
@@ -1695,10 +1612,8 @@ ERTS_POLL_EXPORT(erts_poll_controlv)(ErtsPollSet ps,
ERTS_POLLSET_UNLOCK(ps);
-#ifdef ERTS_SMP
if (final_do_wake)
wake_poller(ps, 0, 0);
-#endif /* ERTS_SMP */
}
@@ -1718,11 +1633,9 @@ ERTS_POLL_EXPORT(erts_poll_control)(ErtsPollSet ps,
ERTS_POLLSET_UNLOCK(ps);
-#ifdef ERTS_SMP
if (*do_wake) {
wake_poller(ps, 0, 0);
}
-#endif /* ERTS_SMP */
return res;
}
@@ -1739,9 +1652,7 @@ save_kp_result(ErtsPollSet ps, ErtsPollResFd pr[], int max_res, int chk_fds_res)
int res = 0;
int i;
int n = chk_fds_res < max_res ? chk_fds_res : max_res;
-#if ERTS_POLL_USE_WAKEUP_PIPE
int wake_fd = ps->wake_fds[0];
-#endif
#if ERTS_POLL_USE_TIMERFD
int timer_fd = ps->timer_fd;
#endif
@@ -1754,12 +1665,10 @@ save_kp_result(ErtsPollSet ps, ErtsPollResFd pr[], int max_res, int chk_fds_res)
int fd = ps->res_events[i].data.fd;
int ix;
ErtsPollEvents revents;
-#if ERTS_POLL_USE_WAKEUP_PIPE
if (fd == wake_fd) {
cleanup_wakeup_pipe(ps);
continue;
}
-#endif
#if ERTS_POLL_USE_TIMERFD
if (fd == timer_fd) {
continue;
@@ -1805,12 +1714,10 @@ save_kp_result(ErtsPollSet ps, ErtsPollResFd pr[], int max_res, int chk_fds_res)
}
if (ev->filter == EVFILT_READ) {
-#if ERTS_POLL_USE_WAKEUP_PIPE
if (fd == wake_fd) {
cleanup_wakeup_pipe(ps);
continue;
}
-#endif
pr[ix].events |= ERTS_POLL_EV_IN;
}
else if (ev->filter == EVFILT_WRITE)
@@ -1833,12 +1740,10 @@ save_kp_result(ErtsPollSet ps, ErtsPollResFd pr[], int max_res, int chk_fds_res)
if (ps->res_events[i].revents) {
int fd = ps->res_events[i].fd;
ErtsPollEvents revents;
-#if ERTS_POLL_USE_WAKEUP_PIPE
if (fd == wake_fd) {
cleanup_wakeup_pipe(ps);
continue;
}
-#endif
#if ERTS_POLL_USE_TIMERFD
if (fd == timer_fd) {
continue;
@@ -1938,7 +1843,7 @@ save_poll_result(ErtsPollSet ps, ErtsPollResFd pr[], int max_res,
i++;
continue;
}
-#elif ERTS_POLL_USE_WAKEUP_PIPE
+#else
if (fd == wake_fd) {
cleanup_wakeup_pipe(ps);
i++;
@@ -1988,7 +1893,7 @@ save_poll_result(ErtsPollSet ps, ErtsPollResFd pr[], int max_res,
fd++;
continue;
}
-#elif ERTS_POLL_USE_WAKEUP_PIPE
+#else
if (fd == wake_fd) {
cleanup_wakeup_pipe(ps);
fd++;
@@ -2051,7 +1956,7 @@ save_poll_result(ErtsPollSet ps, ErtsPollResFd pr[], int max_res,
fd++;
continue;
}
-#elif ERTS_POLL_USE_WAKEUP_PIPE
+#else
if (fd == wake_fd) {
cleanup_wakeup_pipe(ps);
fd++;
@@ -2073,7 +1978,7 @@ save_poll_result(ErtsPollSet ps, ErtsPollResFd pr[], int max_res,
fd++;
continue;
}
-#elif ERTS_POLL_USE_WAKEUP_PIPE
+#else
if (fd == wake_fd) {
cleanup_wakeup_pipe(ps);
fd++;
@@ -2261,9 +2166,7 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res)
struct itimerspec its;
timeout = get_timeout_itimerspec(ps, &its, timeout_time);
if (timeout) {
-#ifdef ERTS_SMP
erts_thr_progress_prepare_wait(NULL);
-#endif
ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP);
timerfd_set(ps, &its);
res = epoll_wait(ps->kp_fd, ps->res_events, max_res, -1);
@@ -2275,9 +2178,7 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res)
#else /* !ERTS_POLL_USE_TIMERFD */
timeout = (int) get_timeout(ps, 1000, timeout_time);
if (timeout) {
-#ifdef ERTS_SMP
erts_thr_progress_prepare_wait(NULL);
-#endif
ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP);
}
res = epoll_wait(ps->kp_fd, ps->res_events, max_res, timeout);
@@ -2288,9 +2189,7 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res)
grow_res_events(ps, max_res);
timeout = get_timeout_timespec(ps, &ts, timeout_time);
if (timeout) {
-#ifdef ERTS_SMP
erts_thr_progress_prepare_wait(NULL);
-#endif
ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP);
}
res = kevent(ps->kp_fd, NULL, 0, ps->res_events, max_res, &ts);
@@ -2308,18 +2207,14 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res)
*/
struct dvpoll poll_res;
int nfds = (int) erts_smp_atomic_read_nob(&ps->no_of_user_fds);
-#if ERTS_POLL_USE_WAKEUP_PIPE
nfds++; /* Wakeup pipe */
-#endif
timeout = (int) get_timeout(ps, 1000, timeout_time);
poll_res.dp_nfds = nfds < max_res ? nfds : max_res;
if (poll_res.dp_nfds > ps->res_events_len)
grow_res_events(ps, poll_res.dp_nfds);
poll_res.dp_fds = ps->res_events;
if (timeout) {
-#ifdef ERTS_SMP
erts_thr_progress_prepare_wait(NULL);
-#endif
ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP);
}
poll_res.dp_timeout = timeout;
@@ -2328,9 +2223,7 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res)
struct timespec ts;
timeout = get_timeout_timespec(ps, &ts, timeout_time);
if (timeout) {
-#ifdef ERTS_SMP
erts_thr_progress_prepare_wait(NULL);
-#endif
ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP);
}
res = ppoll(ps->poll_fds, ps->no_poll_fds, &ts, NULL);
@@ -2338,9 +2231,7 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res)
timeout = (int) get_timeout(ps, 1000, timeout_time);
if (timeout) {
-#ifdef ERTS_SMP
erts_thr_progress_prepare_wait(NULL);
-#endif
ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP);
}
res = poll(ps->poll_fds, ps->no_poll_fds, timeout);
@@ -2352,9 +2243,7 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res)
ERTS_FD_COPY(&ps->output_fds, &ps->res_output_fds);
if (timeout) {
-#ifdef ERTS_SMP
erts_thr_progress_prepare_wait(NULL);
-#endif
ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP);
}
res = ERTS_SELECT(ps->max_fd + 1,
@@ -2362,7 +2251,6 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res)
&ps->res_output_fds,
NULL,
&to);
-#ifdef ERTS_SMP
if (timeout) {
erts_thr_progress_finalize_wait(NULL);
ERTS_MSACC_POP_STATE_M();
@@ -2397,14 +2285,11 @@ check_fd_events(ErtsPollSet ps, ErtsMonotonicTime timeout_time, int max_res)
res = -1;
}
}
-#endif /* ERTS_SMP */
return res;
#endif /* ----------------------------------------- */
}
if (timeout) {
-#ifdef ERTS_SMP
erts_thr_progress_finalize_wait(NULL);
-#endif
ERTS_MSACC_POP_STATE_M();
}
return res;
@@ -2420,9 +2305,7 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
ErtsMonotonicTime to;
int res, no_fds;
int ebadf = 0;
-#ifdef ERTS_SMP
int ps_locked = 0;
-#endif
no_fds = *len;
#ifdef ERTS_POLL_MAX_RES
@@ -2447,13 +2330,11 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
? ERTS_POLL_NO_TIMEOUT /* Use zero timeout */
: timeout_time);
-#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
if (ERTS_POLLSET_HAVE_UPDATE_REQUESTS(ps)) {
ERTS_POLLSET_LOCK(ps);
handle_update_requests(ps);
ERTS_POLLSET_UNLOCK(ps);
}
-#endif
while (1) {
res = check_fd_events(ps, to, no_fds);
@@ -2484,10 +2365,8 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
save_results:
#endif
-#ifdef ERTS_SMP
ps_locked = 1;
ERTS_POLLSET_LOCK(ps);
-#endif
no_fds = save_poll_result(ps, pr, no_fds, res, ebadf);
@@ -2499,11 +2378,9 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
*len = no_fds;
}
-#ifdef ERTS_SMP
if (ps_locked)
ERTS_POLLSET_UNLOCK(ps);
ERTS_POLLSET_UNSET_POLLED(ps);
-#endif
done:
set_timeout_time(ps, ERTS_MONOTONIC_TIME_MAX);
@@ -2522,25 +2399,12 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
void
ERTS_POLL_EXPORT(erts_poll_interrupt)(ErtsPollSet ps, int set)
{
-#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
if (!set)
reset_wakeup_state(ps);
else
wake_poller(ps, 1, 0);
-#endif
}
-#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
-void
-ERTS_POLL_EXPORT(erts_poll_async_sig_interrupt)(ErtsPollSet ps)
-{
- /*
- * NOTE: This function is called from signal handlers, it,
- * therefore, it has to be async-signal safe.
- */
- wake_poller(ps, 1, 1);
-}
-#endif
/*
* erts_poll_interrupt_timed():
@@ -2552,7 +2416,6 @@ ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet ps,
int set,
ErtsMonotonicTime timeout_time)
{
-#if ERTS_POLL_ASYNC_INTERRUPT_SUPPORT || defined(ERTS_SMP)
if (!set)
reset_wakeup_state(ps);
else {
@@ -2568,7 +2431,6 @@ ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet ps,
erts_smp_atomic_inc_nob(&ps->no_interrupt_timed);
#endif
}
-#endif
}
int
@@ -2682,22 +2544,14 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void)
ERTS_FD_ZERO(&ps->res_output_fds);
#endif
#endif
-#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
ps->update_requests.next = NULL;
ps->update_requests.len = 0;
ps->curr_upd_req_block = &ps->update_requests;
erts_smp_atomic32_init_nob(&ps->have_update_requests, 0);
-#endif
-#ifdef ERTS_SMP
erts_atomic32_init_nob(&ps->polled, 0);
erts_smp_mtx_init(&ps->mtx, "pollset", NIL, ERTS_LOCK_FLAGS_CATEGORY_IO);
-#endif
-#if defined(USE_THREADS) || ERTS_POLL_ASYNC_INTERRUPT_SUPPORT
erts_atomic32_init_nob(&ps->wakeup_state, (erts_aint32_t) 0);
-#endif
-#if ERTS_POLL_USE_WAKEUP_PIPE
create_wakeup_pipe(ps);
-#endif
#if ERTS_POLL_USE_TIMERFD
create_timerfd(ps);
#endif
@@ -2724,9 +2578,7 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(void)
erts_smp_atomic_init_nob(&ps->no_avoided_interrupts, 0);
erts_smp_atomic_init_nob(&ps->no_interrupt_timed, 0);
#endif
-#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
handle_update_requests(ps);
-#endif
#if ERTS_POLL_USE_FALLBACK
ps->fallback_used = 0;
#endif
@@ -2772,7 +2624,6 @@ ERTS_POLL_EXPORT(erts_poll_destroy_pollset)(ErtsPollSet ps)
erts_free(ERTS_ALC_T_SELECT_FDS, (void *) ps->res_output_fds.ptr);
#endif
#endif
-#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
{
ErtsPollSetUpdateRequestsBlock *urqbp = ps->update_requests.next;
while (urqbp) {
@@ -2781,16 +2632,11 @@ ERTS_POLL_EXPORT(erts_poll_destroy_pollset)(ErtsPollSet ps)
free_update_requests_block(ps, free_urqbp);
}
}
-#endif
-#ifdef ERTS_SMP
erts_smp_mtx_destroy(&ps->mtx);
-#endif
-#if ERTS_POLL_USE_WAKEUP_PIPE
if (ps->wake_fds[0] >= 0)
close(ps->wake_fds[0]);
if (ps->wake_fds[1] >= 0)
close(ps->wake_fds[1]);
-#endif
#if ERTS_POLL_USE_TIMERFD
if (ps->timer_fd >= 0)
close(ps->timer_fd);
@@ -2818,9 +2664,7 @@ ERTS_POLL_EXPORT(erts_poll_destroy_pollset)(ErtsPollSet ps)
void
ERTS_POLL_EXPORT(erts_poll_info)(ErtsPollSet ps, ErtsPollInfo *pip)
{
-#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
int pending_updates;
-#endif
Uint size = 0;
ERTS_POLLSET_LOCK(ps);
@@ -2845,7 +2689,6 @@ ERTS_POLL_EXPORT(erts_poll_info)(ErtsPollSet ps, ErtsPollInfo *pip)
#endif
#endif
-#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
{
ErtsPollSetUpdateRequestsBlock *urqbp = ps->update_requests.next;
pending_updates = ps->update_requests.len;
@@ -2855,7 +2698,6 @@ ERTS_POLL_EXPORT(erts_poll_info)(ErtsPollSet ps, ErtsPollInfo *pip)
urqbp = urqbp->next;
}
}
-#endif
pip->primary =
#if ERTS_POLL_USE_KQUEUE
@@ -2896,9 +2738,7 @@ ERTS_POLL_EXPORT(erts_poll_info)(ErtsPollSet ps, ErtsPollInfo *pip)
pip->memory_size = size;
pip->poll_set_size = (int) erts_smp_atomic_read_nob(&ps->no_of_user_fds);
-#if ERTS_POLL_USE_WAKEUP_PIPE
pip->poll_set_size++; /* Wakeup pipe */
-#endif
#if ERTS_POLL_USE_TIMERFD
pip->poll_set_size++; /* timerfd */
#endif
@@ -2922,19 +2762,11 @@ ERTS_POLL_EXPORT(erts_poll_info)(ErtsPollSet ps, ErtsPollInfo *pip)
#endif
pip->lazy_updates =
-#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
1
-#else
- 0
-#endif
;
pip->pending_updates =
-#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
pending_updates
-#else
- 0
-#endif
;
pip->batch_updates =
@@ -3031,9 +2863,7 @@ ERTS_POLL_EXPORT(erts_poll_get_selected_events)(ErtsPollSet ps,
else {
ev[fd] = ps->fds_status[fd].events;
if (
-#if ERTS_POLL_USE_WAKEUP_PIPE
fd == ps->wake_fds[0] || fd == ps->wake_fds[1] ||
-#endif
#if ERTS_POLL_USE_TIMERFD
fd == ps->timer_fd ||
#endif
@@ -3121,11 +2951,7 @@ print_misc_debug_info(void)
"select"
#endif
,
-#if ERTS_POLL_USE_UPDATE_REQUESTS_QUEUE
"true"
-#else
- "false"
-#endif
,
#if ERTS_POLL_USE_BATCH_UPDATE_POLLSET
"true"
diff --git a/erts/emulator/sys/common/erl_poll.h b/erts/emulator/sys/common/erl_poll.h
index b3b4d79984..a57dc51e5b 100644
--- a/erts/emulator/sys/common/erl_poll.h
+++ b/erts/emulator/sys/common/erl_poll.h
@@ -227,9 +227,6 @@ typedef struct {
#endif
} ErtsPollInfo;
-#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
-void ERTS_POLL_EXPORT(erts_poll_async_sig_interrupt)(ErtsPollSet);
-#endif
void ERTS_POLL_EXPORT(erts_poll_interrupt)(ErtsPollSet,
int);
void ERTS_POLL_EXPORT(erts_poll_interrupt_timed)(ErtsPollSet,
diff --git a/erts/emulator/sys/unix/erl_unix_sys.h b/erts/emulator/sys/unix/erl_unix_sys.h
index 22059d21d5..cbbd0e6f40 100644
--- a/erts/emulator/sys/unix/erl_unix_sys.h
+++ b/erts/emulator/sys/unix/erl_unix_sys.h
@@ -128,10 +128,6 @@
/* File descriptors are numbers anc consecutively allocated on Unix */
#define ERTS_SYS_CONTINOUS_FD_NUMBERS
-#ifndef ERTS_SMP
-# undef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
-# define ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
-#endif
typedef void *GETENV_STATE;
@@ -354,9 +350,7 @@ extern void erts_sys_unix_later_init(void);
#ifdef NO_FPE_SIGNALS
#define erts_get_current_fp_exception() NULL
-#ifdef ERTS_SMP
#define erts_thread_init_fp_exception() do{}while(0)
-#endif
# define __ERTS_FP_CHECK_INIT(fpexnp) do {} while (0)
# define __ERTS_FP_ERROR(fpexnp, f, Action) if (!isfinite(f)) { Action; } else {}
# define __ERTS_FP_ERROR_THOROUGH(fpexnp, f, Action) __ERTS_FP_ERROR(fpexnp, f, Action)
@@ -369,9 +363,7 @@ extern void erts_sys_unix_later_init(void);
#else /* !NO_FPE_SIGNALS */
extern volatile unsigned long *erts_get_current_fp_exception(void);
-#ifdef ERTS_SMP
extern void erts_thread_init_fp_exception(void);
-#endif
# if (defined(__i386__) || defined(__x86_64__)) && defined(__GNUC__)
# define erts_fwait(fpexnp,f) \
__asm__ __volatile__("fwait" : "=m"(*(fpexnp)) : "m"(f))
@@ -438,10 +430,8 @@ void erts_sys_unblock_fpe(int);
/* Threads */
-#ifdef USE_THREADS
extern int init_async(int);
extern int exit_async(void);
-#endif
#define ERTS_EXIT_AFTER_DUMP _exit
diff --git a/erts/emulator/sys/unix/sys.c b/erts/emulator/sys/unix/sys.c
index 50d8a35217..83edca1c2f 100644
--- a/erts/emulator/sys/unix/sys.c
+++ b/erts/emulator/sys/unix/sys.c
@@ -58,9 +58,7 @@
#define __DARWIN__ 1
#endif
-#ifdef USE_THREADS
#include "erl_threads.h"
-#endif
#include "erl_mseg.h"
@@ -94,19 +92,12 @@ extern void erts_sys_init_float(void);
static int debug_log = 0;
#endif
-#ifdef ERTS_SMP
static erts_smp_atomic32_t have_prepared_crash_dump;
#define ERTS_PREPARED_CRASH_DUMP \
((int) erts_smp_atomic32_xchg_nob(&have_prepared_crash_dump, 1))
-#else
-static volatile int have_prepared_crash_dump;
-#define ERTS_PREPARED_CRASH_DUMP \
- (have_prepared_crash_dump++)
-#endif
erts_smp_atomic_t sys_misc_mem_sz;
-#if defined(ERTS_SMP)
static void smp_sig_notify(int signum);
static int sig_notify_fds[2] = {-1, -1};
@@ -114,7 +105,6 @@ static int sig_notify_fds[2] = {-1, -1};
static int sig_suspend_fds[2] = {-1, -1};
#endif
-#endif
jmp_buf erts_sys_sigsegv_jmp;
@@ -128,38 +118,12 @@ static int max_files = -1;
/*
* a few variables used by the break handler
*/
-#ifdef ERTS_SMP
erts_smp_atomic32_t erts_break_requested;
#define ERTS_SET_BREAK_REQUESTED \
erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 1)
#define ERTS_UNSET_BREAK_REQUESTED \
erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 0)
-#else
-volatile int erts_break_requested = 0;
-#define ERTS_SET_BREAK_REQUESTED (erts_break_requested = 1)
-#define ERTS_UNSET_BREAK_REQUESTED (erts_break_requested = 0)
-#endif
-#ifndef ERTS_SMP
-static Eterm signalstate_sigterm[] = {
- am_sigint, /* 0 */
- am_sighup, /* 1 */
- am_sigquit, /* 2 */
- am_sigabrt, /* 3 */
- am_sigalrm, /* 4 */
- am_sigterm, /* 5 */
- am_sigusr1, /* 6 */
- am_sigusr2, /* 7 */
- am_sigchld, /* 8 */
- am_sigstop, /* 9 */
- am_sigtstp /* 10 */
-};
-
-volatile Uint erts_signal_state = 0;
-#define ERTS_SET_SIGNAL_STATE(S) (erts_signal_state |= signum_to_signalstate(S))
-#define ERTS_CLEAR_SIGNAL_STATE (erts_signal_state = 0)
-static ERTS_INLINE Uint signum_to_signalstate(int signum);
-#endif
/* set early so the break handler has access to initial mode */
static struct termios initial_tty_mode;
@@ -223,9 +187,6 @@ init_check_io(void)
io_func.select = driver_select_kp;
io_func.enif_select = enif_select_kp;
io_func.event = driver_event_kp;
-#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
- io_func.check_io_as_interrupt = erts_check_io_async_sig_interrupt_kp;
-#endif
io_func.check_io_interrupt = erts_check_io_interrupt_kp;
io_func.check_io_interrupt_tmd = erts_check_io_interrupt_timed_kp;
io_func.check_io = erts_check_io_kp;
@@ -239,9 +200,6 @@ init_check_io(void)
io_func.select = driver_select_nkp;
io_func.enif_select = enif_select_nkp;
io_func.event = driver_event_nkp;
-#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
- io_func.check_io_as_interrupt = erts_check_io_async_sig_interrupt_nkp;
-#endif
io_func.check_io_interrupt = erts_check_io_interrupt_nkp;
io_func.check_io_interrupt_tmd = erts_check_io_interrupt_timed_nkp;
io_func.check_io = erts_check_io_nkp;
@@ -253,11 +211,7 @@ init_check_io(void)
}
}
-#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
-#define ERTS_CHK_IO_AS_INTR() (*io_func.check_io_as_interrupt)()
-#else
#define ERTS_CHK_IO_AS_INTR() (*io_func.check_io_interrupt)(1)
-#endif
#define ERTS_CHK_IO_INTR (*io_func.check_io_interrupt)
#define ERTS_CHK_IO_INTR_TMD (*io_func.check_io_interrupt_tmd)
#define ERTS_CHK_IO (*io_func.check_io)
@@ -272,11 +226,7 @@ init_check_io(void)
max_files = erts_check_io_max_files();
}
-#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
-#define ERTS_CHK_IO_AS_INTR() erts_check_io_async_sig_interrupt()
-#else
#define ERTS_CHK_IO_AS_INTR() erts_check_io_interrupt(1)
-#endif
#define ERTS_CHK_IO_INTR erts_check_io_interrupt
#define ERTS_CHK_IO_INTR_TMD erts_check_io_interrupt_timed
#define ERTS_CHK_IO erts_check_io
@@ -290,13 +240,11 @@ erts_sys_schedule_interrupt(int set)
ERTS_CHK_IO_INTR(set);
}
-#ifdef ERTS_SMP
void
erts_sys_schedule_interrupt_timed(int set, ErtsMonotonicTime timeout_time)
{
ERTS_CHK_IO_INTR_TMD(set, timeout_time);
}
-#endif
UWord
erts_sys_get_page_size(void)
@@ -339,7 +287,6 @@ MALLOC_USE_HASH(1);
#endif
#endif
-#ifdef USE_THREADS
#ifdef ERTS_THR_HAVE_SIG_FUNCS
@@ -418,19 +365,15 @@ thr_create_prepare_child(void *vtcdp)
erts_sched_bind_atthrcreate_child(tcdp->sched_bind_data);
}
-#endif /* #ifdef USE_THREADS */
void
erts_sys_pre_init(void)
{
-#ifdef USE_THREADS
erts_thr_init_data_t eid = ERTS_THR_INIT_DATA_DEF_INITER;
-#endif
erts_printf_add_cr_to_stdout = 1;
erts_printf_add_cr_to_stderr = 1;
-#ifdef USE_THREADS
eid.thread_create_child_func = thr_create_prepare_child;
/* Before creation in parent */
@@ -452,21 +395,13 @@ erts_sys_pre_init(void)
erts_lc_init();
#endif
-#endif /* USE_THREADS */
erts_init_sys_time_sup();
-#ifdef USE_THREADS
-#ifdef ERTS_SMP
erts_smp_atomic32_init_nob(&erts_break_requested, 0);
erts_smp_atomic32_init_nob(&have_prepared_crash_dump, 0);
-#else
- erts_break_requested = 0;
- have_prepared_crash_dump = 0;
-#endif
-#endif /* USE_THREADS */
erts_smp_atomic_init_nob(&sys_misc_mem_sz, 0);
@@ -531,10 +466,8 @@ SIGFUNC sys_signal(int sig, SIGFUNC func)
return(oact.sa_handler);
}
-#ifdef USE_THREADS
#undef sigprocmask
#define sigprocmask erts_thr_sigmask
-#endif
void sys_sigblock(int sig)
{
@@ -697,11 +630,7 @@ break_requested(void)
static RETSIGTYPE request_break(int signum)
{
-#ifdef ERTS_SMP
smp_sig_notify(signum);
-#else
- break_requested();
-#endif
}
#ifdef ETHR_UNUSABLE_SIGUSRX
@@ -810,35 +739,10 @@ signum_to_signalterm(int signum)
}
}
-#ifndef ERTS_SMP
-static ERTS_INLINE Uint
-signum_to_signalstate(int signum)
-{
- switch (signum) {
- case SIGINT: return (1 << 0);
- case SIGHUP: return (1 << 1);
- case SIGQUIT: return (1 << 2);
- case SIGABRT: return (1 << 3);
- case SIGALRM: return (1 << 4);
- case SIGTERM: return (1 << 5);
- case SIGUSR1: return (1 << 6);
- case SIGUSR2: return (1 << 7);
- case SIGCHLD: return (1 << 8);
- case SIGSTOP: return (1 << 9);
- case SIGTSTP: return (1 << 10);
- default: return 0;
- }
-}
-#endif
static RETSIGTYPE generic_signal_handler(int signum)
{
-#ifdef ERTS_SMP
smp_sig_notify(signum);
-#else
- ERTS_SET_SIGNAL_STATE(signum);
- ERTS_CHK_IO_AS_INTR(); /* Make sure we don't sleep in poll */
-#endif
}
int erts_set_signal(Eterm signal, Eterm type) {
@@ -1032,22 +936,6 @@ void erts_do_break_handling(void)
erts_smp_thr_progress_unblock();
}
-#ifdef ERTS_SIGNAL_STATE
-void erts_handle_signal_state(void) {
- Uint signal_state = ERTS_SIGNAL_STATE;
- Uint i = 0;
-
- ERTS_CLEAR_SIGNAL_STATE;
-
- while (signal_state) {
- if (signal_state & 0x1) {
- signal_notify_requested(signalstate_sigterm[i]);
- }
- i++;
- signal_state = signal_state >> 1;
- }
-}
-#endif
/* Fills in the systems representation of the jam/beam process identifier.
** The Pid is put in STRING representation in the supplied buffer,
@@ -1282,16 +1170,6 @@ erl_assert_error(const char* expr, const char* func, const char* file, int line)
fprintf(stderr, "%s:%d:%s() Assertion failed: %s\n",
file, line, func, expr);
fflush(stderr);
-#if !defined(ERTS_SMP) && 0
- /* Writing a crashdump from a failed assertion when smp support
- * is enabled almost a guaranteed deadlocking, don't even bother.
- *
- * It could maybe be useful (but I'm not convinced) to write the
- * crashdump if smp support is disabled...
- */
- if (erts_initialized)
- erl_crash_dump(file, line, "Assertion failed: %s\n", expr);
-#endif
abort();
}
@@ -1326,7 +1204,6 @@ erl_sys_schedule(int runnable)
}
-#ifdef ERTS_SMP
static erts_smp_tid_t sig_dispatcher_tid;
@@ -1515,7 +1392,6 @@ erts_sys_main_thread(void)
}
}
-#endif /* ERTS_SMP */
#ifdef ERTS_ENABLE_KERNEL_POLL /* get_value() is currently only used when
kernel-poll is enabled */
@@ -1602,10 +1478,8 @@ erl_sys_args(int* argc, char** argv)
init_check_io();
-#ifdef ERTS_SMP
init_smp_sig_notify();
init_smp_sig_suspend();
-#endif
/* Handled arguments have been marked with NULL. Slide arguments
not handled towards the beginning of argv. */
diff --git a/erts/emulator/sys/unix/sys_drivers.c b/erts/emulator/sys/unix/sys_drivers.c
index 834706d86f..03d948c6e9 100644
--- a/erts/emulator/sys/unix/sys_drivers.c
+++ b/erts/emulator/sys/unix/sys_drivers.c
@@ -53,9 +53,7 @@
#define WANT_NONBLOCKING /* must define this to pull in defs from sys.h */
#include "sys.h"
-#ifdef USE_THREADS
#include "erl_threads.h"
-#endif
extern char **environ;
extern erts_smp_rwmtx_t environ_rwmtx;
@@ -86,11 +84,7 @@ static Eterm forker_port;
#define MAXIOV 16
#endif
-#ifdef USE_THREADS
# define FDBLOCK 1
-#else
-# define FDBLOCK 0
-#endif
/* Used by the fd driver iff the fd could not be set to non-blocking */
typedef struct ErtsSysBlocking_ {
@@ -178,9 +172,7 @@ void
erl_sys_late_init(void)
{
SysDriverOpts opts;
-#ifdef ERTS_SMP
Port *port;
-#endif
sys_signal(SIGPIPE, SIG_IGN); /* Ignore - we'll handle the write failure */
@@ -197,13 +189,9 @@ erl_sys_late_init(void)
opts.argv = NULL;
opts.parallelism = erts_port_parallelism;
-#ifdef ERTS_SMP
port =
-#endif
erts_open_driver(&forker_driver, make_internal_pid(0), "forker", &opts, NULL, NULL);
-#ifdef ERTS_SMP
erts_mtx_unlock(port->lock);
-#endif
erts_sys_unix_later_init(); /* Need to be called after forker has been started */
}
@@ -220,10 +208,8 @@ static ErlDrvData vanilla_start(ErlDrvPort, char*, SysDriverOpts*);
/* II.III FD prototypes */
static ErlDrvData fd_start(ErlDrvPort, char*, SysDriverOpts*);
-#if FDBLOCK
static void fd_async(void *);
static void fd_ready_async(ErlDrvData drv_data, ErlDrvThreadData thread_data);
-#endif
static ErlDrvSSizeT fd_control(ErlDrvData, unsigned int, char *, ErlDrvSizeT,
char **, ErlDrvSizeT);
static void fd_stop(ErlDrvData);
@@ -287,11 +273,7 @@ struct erl_drv_entry fd_driver_entry = {
fd_control,
NULL,
outputv,
-#if FDBLOCK
fd_ready_async, /* ready_async */
-#else
- NULL,
-#endif
fd_flush, /* flush */
NULL, /* call */
NULL, /* event */
@@ -1092,13 +1074,11 @@ static void fd_stop(ErlDrvData ev) /* Does not close the fds */
ErlDrvPort prt = dd->port_num;
int sz = sizeof(ErtsSysDriverData);
-#if FDBLOCK
if (dd->blocking) {
erts_free(ERTS_ALC_T_SYS_BLOCKING, dd->blocking);
dd->blocking = NULL;
sz += sizeof(ErtsSysBlocking);
}
-#endif
if (dd->ifd) {
sz += sizeof(ErtsSysFdData);
@@ -1220,7 +1200,6 @@ static void outputv(ErlDrvData e, ErlIOVec* ev)
driver_enqv(ix, ev, n); /* n is the skip value */
driver_select(ix, ofd, ERL_DRV_WRITE|ERL_DRV_USE, 1);
}
-#if FDBLOCK
else {
if (ev->size != 0) {
driver_enqv(ix, ev, 0);
@@ -1231,7 +1210,6 @@ static void outputv(ErlDrvData e, ErlIOVec* ev)
driver_pdl_unlock(dd->blocking->pdl);
}
}
-#endif
/* return 0;*/
}
@@ -1579,7 +1557,6 @@ static void stop_select(ErlDrvEvent fd, void* _)
close((int)fd);
}
-#if FDBLOCK
static void
fd_async(void *async_data)
@@ -1658,7 +1635,6 @@ void fd_ready_async(ErlDrvData drv_data,
return; /* 0; */
}
-#endif
/* Forker driver */
diff --git a/erts/emulator/sys/unix/sys_float.c b/erts/emulator/sys/unix/sys_float.c
index 6435da086f..a82c15bd32 100644
--- a/erts/emulator/sys/unix/sys_float.c
+++ b/erts/emulator/sys/unix/sys_float.c
@@ -39,7 +39,6 @@ erts_sys_init_float(void)
#else /* !NO_FPE_SIGNALS */
-#ifdef ERTS_SMP
static erts_tsd_key_t fpe_key;
/* once-only initialisation early in the main thread (via erts_sys_init_float()) */
@@ -61,11 +60,6 @@ static ERTS_INLINE volatile unsigned long *erts_thread_get_fp_exception(void)
{
return (volatile unsigned long*)erts_tsd_get(fpe_key);
}
-#else /* !SMP */
-#define erts_init_fp_exception() /*empty*/
-static volatile unsigned long fp_exception;
-#define erts_thread_get_fp_exception() (&fp_exception)
-#endif /* SMP */
volatile unsigned long *erts_get_current_fp_exception(void)
{
@@ -659,11 +653,9 @@ void erts_sys_init_float(void)
void erts_thread_init_float(void)
{
-#ifdef ERTS_SMP
/* This allows Erlang schedulers to leave Erlang-process context
and still have working FP exceptions. XXX: is this needed? */
erts_thread_init_fp_exception();
-#endif
#ifndef NO_FPE_SIGNALS
/* NOTE:
diff --git a/erts/emulator/sys/win32/erl_poll.c b/erts/emulator/sys/win32/erl_poll.c
index 8743f83a50..41ff186c44 100644
--- a/erts/emulator/sys/win32/erl_poll.c
+++ b/erts/emulator/sys/win32/erl_poll.c
@@ -286,41 +286,26 @@ struct ErtsPollSet_ {
CRITICAL_SECTION standby_crit; /* CS to guard the counter */
HANDLE standby_wait_event; /* Event signalled when counte == 0 */
erts_atomic32_t wakeup_state;
-#ifdef ERTS_SMP
erts_smp_mtx_t mtx;
-#endif
erts_atomic64_t timeout_time;
};
-#ifdef ERTS_SMP
#define ERTS_POLLSET_LOCK(PS) \
erts_smp_mtx_lock(&(PS)->mtx)
#define ERTS_POLLSET_UNLOCK(PS) \
erts_smp_mtx_unlock(&(PS)->mtx)
-#else
-
-#define ERTS_POLLSET_LOCK(PS)
-#define ERTS_POLLSET_UNLOCK(PS)
-
-#endif
/*
* Communication with sys_interrupt
*/
-#ifdef ERTS_SMP
extern erts_smp_atomic32_t erts_break_requested;
#define ERTS_SET_BREAK_REQUESTED \
erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 1)
#define ERTS_UNSET_BREAK_REQUESTED \
erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 0)
-#else
-extern volatile int erts_break_requested;
-#define ERTS_SET_BREAK_REQUESTED (erts_break_requested = 1)
-#define ERTS_UNSET_BREAK_REQUESTED (erts_break_requested = 0)
-#endif
static erts_mtx_t break_waiter_lock;
static HANDLE break_happened_event;
@@ -1193,14 +1178,10 @@ int erts_poll_wait(ErtsPollSet ps,
HARDDEBUGF(("Start waiting %d [%d]",num_h, (int) timeout));
ERTS_POLLSET_UNLOCK(ps);
-#ifdef ERTS_SMP
erts_thr_progress_prepare_wait(NULL);
-#endif
ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP);
WaitForMultipleObjects(num_h, harr, FALSE, timeout);
-#ifdef ERTS_SMP
erts_thr_progress_finalize_wait(NULL);
-#endif
ERTS_MSACC_POP_STATE_M();
ERTS_POLLSET_LOCK(ps);
HARDDEBUGF(("Stop waiting %d [%d]",num_h, (int) timeout));
@@ -1359,9 +1340,7 @@ ErtsPollSet erts_poll_create_pollset(void)
ps->restore_events = 0;
erts_atomic32_init_nob(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN);
-#ifdef ERTS_SMP
erts_smp_mtx_init(&ps->mtx, "pollset", NIL, ERTS_LOCK_FLAGS_CATEGORY_IO);
-#endif
init_timeout_time(ps);
HARDTRACEF(("Out erts_poll_create_pollset"));
@@ -1391,9 +1370,7 @@ void erts_poll_destroy_pollset(ErtsPollSet ps)
CloseHandle(ps->event_io_ready);
CloseHandle(ps->standby_wait_event);
ERTS_POLLSET_UNLOCK(ps);
-#ifdef ERTS_SMP
erts_smp_mtx_destroy(&ps->mtx);
-#endif
SEL_FREE(ERTS_ALC_T_POLLSET, (void *) ps);
HARDTRACEF(("Out erts_poll_destroy_pollset"));
}
diff --git a/erts/emulator/sys/win32/erl_win_sys.h b/erts/emulator/sys/win32/erl_win_sys.h
index 78005aada9..1f53452d17 100644
--- a/erts/emulator/sys/win32/erl_win_sys.h
+++ b/erts/emulator/sys/win32/erl_win_sys.h
@@ -311,10 +311,8 @@ typedef long ssize_t;
#endif
/* Threads */
-#ifdef USE_THREADS
int init_async(int);
int exit_async(void);
-#endif
#define ERTS_HAVE_TRY_CATCH 1
diff --git a/erts/emulator/sys/win32/sys.c b/erts/emulator/sys/win32/sys.c
index 15c59109b1..7fddc9fa99 100644
--- a/erts/emulator/sys/win32/sys.c
+++ b/erts/emulator/sys/win32/sys.c
@@ -80,9 +80,7 @@ static int application_type(const wchar_t* originalName, wchar_t fullPath[MAX_PA
HANDLE erts_service_event;
-#ifdef ERTS_SMP
static erts_smp_tsd_key_t win32_errstr_key;
-#endif
static erts_smp_atomic_t pipe_creation_counter;
@@ -94,10 +92,8 @@ static erts_smp_atomic_t pipe_creation_counter;
static int driver_write(long, HANDLE, byte*, int);
static int create_file_thread(struct async_io* aio, int mode);
-#ifdef ERTS_SMP
static void close_active_handle(DriverData *, HANDLE handle);
static DWORD WINAPI threaded_handle_closer(LPVOID param);
-#endif
static DWORD WINAPI threaded_reader(LPVOID param);
static DWORD WINAPI threaded_writer(LPVOID param);
static DWORD WINAPI threaded_exiter(LPVOID param);
@@ -450,9 +446,7 @@ typedef struct async_io {
* the console for Windows NT).
*/
HANDLE fd; /* Handle for file or pipe. */
-#ifdef ERTS_SMP
int async_io_active; /* if true, a close of the file will signal the event in ov */
-#endif
OVERLAPPED ov; /* Control structure for overlapped reading.
* When overlapped reading is simulated with
* a thread, the fields are used as follows:
@@ -691,7 +685,6 @@ buf_alloc_error:
static void
release_driver_data(DriverData* dp)
{
-#ifdef ERTS_SMP
#ifdef USE_CANCELIOEX
if (fpCancelIoEx != NULL) {
if (dp->in.thread == (HANDLE) -1 && dp->in.fd != INVALID_HANDLE_VALUE) {
@@ -734,14 +727,6 @@ release_driver_data(DriverData* dp)
DEBUGF(("...done\n"));
}
}
-#else
- if (dp->in.thread == (HANDLE) -1 && dp->in.fd != INVALID_HANDLE_VALUE) {
- CancelIo(dp->in.fd);
- }
- if (dp->out.thread == (HANDLE) -1 && dp->out.fd != INVALID_HANDLE_VALUE) {
- CancelIo(dp->out.fd);
- }
-#endif
if (dp->inbuf != NULL) {
ASSERT(erts_smp_atomic_read_nob(&sys_misc_mem_sz) >= dp->inBufSize);
@@ -777,7 +762,6 @@ release_driver_data(DriverData* dp)
unrefer_driver_data(dp);
}
-#ifdef ERTS_SMP
struct handles_to_be_closed {
HANDLE handles[MAXIMUM_WAIT_OBJECTS];
@@ -870,7 +854,6 @@ threaded_handle_closer(LPVOID param)
DEBUGF(("threaded_handle_closer %p terminating\r\n", htbc));
return 0;
}
-#endif /* ERTS_SMP */
/*
* Stores input and output file descriptors in the DriverData structure,
@@ -946,9 +929,7 @@ init_async_io(DriverData *dp, AsyncIo* aio, int use_threads)
aio->flushReplyEvent = NULL;
aio->pendingError = 0;
aio->bytesTransferred = 0;
-#ifdef ERTS_SMP
aio->async_io_active = 0;
-#endif
aio->ov.hEvent = CreateManualEvent(FALSE);
if (aio->ov.hEvent == NULL)
return -1;
@@ -1029,9 +1010,7 @@ async_read_file(AsyncIo* aio, LPVOID buf, DWORD numToRead)
ResetEvent(aio->ov.hEvent);
SetEvent(aio->ioAllowed);
} else {
-#ifdef ERTS_SMP
aio->async_io_active = 1; /* Will get 0 when the event actually happened */
-#endif
if (ReadFile(aio->fd, buf, numToRead,
&aio->bytesTransferred, &aio->ov)) {
DEBUGF(("async_read_file: ReadFile() suceeded: %d bytes\n",
@@ -1079,16 +1058,12 @@ async_write_file(AsyncIo* aio, /* Pointer to async control block. */
ResetEvent(aio->ov.hEvent);
SetEvent(aio->ioAllowed);
} else {
-#ifdef ERTS_SMP
aio->async_io_active = 1; /* Will get 0 when the event actually happened */
-#endif
if (WriteFile(aio->fd, buf, numToWrite,
&aio->bytesTransferred, &aio->ov)) {
DEBUGF(("async_write_file: WriteFile() suceeded: %d bytes\n",
aio->bytesTransferred));
-#ifdef ERTS_SMP
aio->async_io_active = 0; /* The event will not be signalled */
-#endif
ResetEvent(aio->ov.hEvent);
return TRUE;
} else {
@@ -2511,11 +2486,9 @@ ready_input(ErlDrvData drv_data, ErlDrvEvent ready_event)
int pb;
pb = dp->packet_bytes;
-#ifdef ERTS_SMP
if(dp->in.thread == (HANDLE) -1) {
dp->in.async_io_active = 0;
}
-#endif
DEBUGF(("ready_input: dp %p, event 0x%x\n", dp, ready_event));
/*
@@ -2680,11 +2653,9 @@ ready_output(ErlDrvData drv_data, ErlDrvEvent ready_event)
DriverData *dp = (DriverData *) drv_data;
int error;
-#ifdef ERTS_SMP
if(dp->out.thread == (HANDLE) -1) {
dp->out.async_io_active = 0;
}
-#endif
DEBUGF(("ready_output(%p, 0x%x)\n", drv_data, ready_event));
set_busy_port(dp->port_num, 0);
if (!(dp->outbuf)) {
@@ -2743,7 +2714,6 @@ sys_init_io(void)
max_files = 2*erts_ptab_max(&erts_port);
}
-#ifdef ERTS_SMP
void
erts_sys_main_thread(void)
{
@@ -2756,7 +2726,6 @@ erts_sys_main_thread(void)
WaitForSingleObject(dummy, INFINITE);
}
}
-#endif
void erts_sys_alloc_init(void)
{
@@ -2938,11 +2907,7 @@ sys_get_key(int fd)
char* win32_errorstr(int error)
{
-#ifdef SMP
LPTSTR lpBufPtr = erts_smp_tsd_get(win32_errstr_key);
-#else
- static LPTSTR lpBufPtr = NULL;
-#endif
if (lpBufPtr) {
LocalFree(lpBufPtr);
}
@@ -2956,9 +2921,7 @@ char* win32_errorstr(int error)
0,
NULL);
SetLastError(error);
-#ifdef ERTS_SMP
erts_smp_tsd_set(win32_errstr_key,lpBufPtr);
-#endif
return lpBufPtr;
}
@@ -3131,7 +3094,6 @@ check_supported_os_version(void)
#endif
}
-#ifdef USE_THREADS
typedef struct {
int sched_bind_data;
@@ -3176,19 +3138,15 @@ thr_create_prepare_child(void *vtcdp)
erts_sched_bind_atthrcreate_child(tcdp->sched_bind_data);
}
-#endif /* USE_THREADS */
void
erts_sys_pre_init(void)
{
-#ifdef USE_THREADS
erts_thr_init_data_t eid = ERTS_THR_INIT_DATA_DEF_INITER;
-#endif
int_os_version.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
GetVersionEx(&int_os_version);
check_supported_os_version();
-#ifdef USE_THREADS
eid.thread_create_child_func = thr_create_prepare_child;
/* Before creation in parent */
eid.thread_create_prepare_func = thr_create_prepare;
@@ -3209,7 +3167,6 @@ erts_sys_pre_init(void)
erts_lc_init();
#endif
-#endif /* USE_THREADS */
erts_init_sys_time_sup();
@@ -3233,10 +3190,8 @@ void erl_sys_init(void)
noinherit_std_handle(STD_INPUT_HANDLE);
noinherit_std_handle(STD_ERROR_HANDLE);
-#ifdef ERTS_SMP
erts_smp_tsd_key_create(&win32_errstr_key,"win32_errstr_key");
InitializeCriticalSection(&htbc_lock);
-#endif
erts_smp_atomic_init_nob(&pipe_creation_counter,0);
/*
* Test if we have named pipes or not.
@@ -3299,13 +3254,11 @@ erts_sys_schedule_interrupt(int set)
erts_check_io_interrupt(set);
}
-#ifdef ERTS_SMP
void
erts_sys_schedule_interrupt_timed(int set, ErtsMonotonicTime timeout_time)
{
erts_check_io_interrupt_timed(set, timeout_time);
}
-#endif
/*
* Called from schedule() when it runs out of runnable processes,
diff --git a/erts/emulator/sys/win32/sys_interrupt.c b/erts/emulator/sys/win32/sys_interrupt.c
index df838960eb..4632ab52b1 100644
--- a/erts/emulator/sys/win32/sys_interrupt.c
+++ b/erts/emulator/sys/win32/sys_interrupt.c
@@ -35,17 +35,11 @@
# define WIN_SYS_INLINE __forceinline
#endif
-#ifdef ERTS_SMP
erts_smp_atomic32_t erts_break_requested;
#define ERTS_SET_BREAK_REQUESTED \
erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 1)
#define ERTS_UNSET_BREAK_REQUESTED \
erts_smp_atomic32_set_nob(&erts_break_requested, (erts_aint32_t) 0)
-#else
-volatile int erts_break_requested = 0;
-#define ERTS_SET_BREAK_REQUESTED (erts_break_requested = 1)
-#define ERTS_UNSET_BREAK_REQUESTED (erts_break_requested = 0)
-#endif
extern int nohup;
HANDLE erts_sys_break_event = NULL;
diff --git a/erts/etc/common/erlexec.c b/erts/etc/common/erlexec.c
index 48abf77533..55a55b658c 100644
--- a/erts/etc/common/erlexec.c
+++ b/erts/etc/common/erlexec.c
@@ -189,9 +189,7 @@ void error(char* format, ...);
* Local functions.
*/
-#if !defined(ERTS_HAVE_SMP_EMU) || !defined(ERTS_HAVE_PLAIN_EMU)
static void usage_notsup(const char *switchname, const char *alt);
-#endif
static char **build_args_from_env(char *env_var);
static char **build_args_from_string(char *env_var);
static void initial_argv_massage(int *argc, char ***argv);
@@ -463,9 +461,7 @@ int main(int argc, char **argv)
*/
cpuinfo = erts_cpu_info_create();
/* '-smp auto' is default */
-#ifdef ERTS_HAVE_SMP_EMU
start_smp_emu = 1;
-#endif
#if defined(__WIN32__) && defined(WIN32_ALWAYS_DEBUG)
emu_type = "debug";
@@ -497,23 +493,13 @@ int main(int argc, char **argv)
i++;
smp_enable:
;
-#if !defined(ERTS_HAVE_SMP_EMU)
- usage_notsup("-smp enable", "");
-#endif
} else if (strcmp(argv[i+1], "disable") == 0) {
i++;
smp_disable:
-#ifdef ERTS_HAVE_PLAIN_EMU
- start_smp_emu = 0;
-#else
usage_notsup("-smp disable", " Use \"+S 1\" instead.");
-#endif
} else {
smp:
;
-#if !defined(ERTS_HAVE_SMP_EMU)
- usage_notsup("-smp", "");
-#endif
}
} else if (strcmp(argv[i], "-smpenable") == 0) {
goto smp_enable;
@@ -1155,12 +1141,7 @@ usage_aux(void)
"[-start_erl [datafile]] "
#endif
"[-smp [auto"
-#ifdef ERTS_HAVE_SMP_EMU
"|enable"
-#endif
-#ifdef ERTS_HAVE_PLAIN_EMU
- "|disable"
-#endif
"]"
"] "
"[-make] [-man [manopts] MANPAGE] [-x] [-emu_args] [-start_epmd BOOLEAN] "
@@ -1183,14 +1164,12 @@ usage(const char *switchname)
usage_aux();
}
-#if !defined(ERTS_HAVE_SMP_EMU) || !defined(ERTS_HAVE_PLAIN_EMU)
static void
usage_notsup(const char *switchname, const char *alt)
{
fprintf(stderr, "Argument \'%s\' not supported.%s\n", switchname, alt);
usage_aux();
}
-#endif
static void
usage_format(char *format, ...)
diff --git a/erts/include/internal/ethread.h b/erts/include/internal/ethread.h
index fa35bf3d0b..150b58dcda 100644
--- a/erts/include/internal/ethread.h
+++ b/erts/include/internal/ethread.h
@@ -141,9 +141,6 @@ typedef pthread_key_t ethr_tsd_key;
# define ETHR_USE_OWN_RWMTX_IMPL__
#endif
-#if !defined(ETHR_FORCE_PTHREAD_MUTEX) && 0
-# define ETHR_USE_OWN_MTX_IMPL__
-#endif
#elif defined(ETHR_WIN32_THREADS)
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
diff --git a/erts/lib_src/common/erl_printf.c b/erts/lib_src/common/erl_printf.c
index 7781fc2196..98130a72c6 100644
--- a/erts/lib_src/common/erl_printf.c
+++ b/erts/lib_src/common/erl_printf.c
@@ -74,9 +74,6 @@ void (*erts_printf_unblock_fpe)(int) = NULL;
# define FWRITE fwrite_unlocked
# endif
#endif
-#if !defined(USE_THREADS) && defined(putc) && !defined(fwrite)
-# define PUTC_ON_SMALL_WRITES
-#endif
#if !defined(FLOCKFILE) || !defined(FUNLOCKFILE)
# define FLOCKFILE(FP)
# define FUNLOCKFILE(FP)