aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLukas Larsson <[email protected]>2017-09-08 10:08:45 +0200
committerLukas Larsson <[email protected]>2017-09-11 15:06:28 +0200
commit769ff22c750d939fdc9cb45fae1e44817ec04307 (patch)
treea85af00246ed16c54b0a39e7fc6200b25ae89a92
parentc15bb1698267ae64aac08b3b48040c44174700e5 (diff)
downloadotp-769ff22c750d939fdc9cb45fae1e44817ec04307.tar.gz
otp-769ff22c750d939fdc9cb45fae1e44817ec04307.tar.bz2
otp-769ff22c750d939fdc9cb45fae1e44817ec04307.zip
erts: Remove possibility to disable dirty schedulers
-rw-r--r--HOWTO/INSTALL.md5
-rw-r--r--erts/configure.in23
-rw-r--r--erts/emulator/Makefile.in12
-rw-r--r--erts/emulator/beam/beam_bif_load.c10
-rw-r--r--erts/emulator/beam/beam_bp.c12
-rw-r--r--erts/emulator/beam/beam_bp.h2
-rw-r--r--erts/emulator/beam/beam_debug.c12
-rw-r--r--erts/emulator/beam/beam_emu.c2
-rw-r--r--erts/emulator/beam/beam_load.h4
-rw-r--r--erts/emulator/beam/bif.c13
-rw-r--r--erts/emulator/beam/bif.h2
-rw-r--r--erts/emulator/beam/erl_alloc.c2
-rw-r--r--erts/emulator/beam/erl_bif_info.c21
-rw-r--r--erts/emulator/beam/erl_bif_trace.c4
-rw-r--r--erts/emulator/beam/erl_bif_unique.c4
-rw-r--r--erts/emulator/beam/erl_gc.c26
-rw-r--r--erts/emulator/beam/erl_init.c30
-rw-r--r--erts/emulator/beam/erl_lock_check.c2
-rw-r--r--erts/emulator/beam/erl_nfunc_sched.h4
-rw-r--r--erts/emulator/beam/erl_nif.c21
-rw-r--r--erts/emulator/beam/erl_process.c215
-rw-r--r--erts/emulator/beam/erl_process.h44
-rw-r--r--erts/emulator/beam/global.h4
-rw-r--r--erts/emulator/beam/io.c4
-rwxr-xr-xerts/emulator/utils/make_tables33
25 files changed, 15 insertions, 496 deletions
diff --git a/HOWTO/INSTALL.md b/HOWTO/INSTALL.md
index d61aedf5a3..f62429a3ff 100644
--- a/HOWTO/INSTALL.md
+++ b/HOWTO/INSTALL.md
@@ -417,11 +417,6 @@ Some of the available `configure` options are:
and scalability compared to the default clock sources chosen.
* `--disable-saved-compile-time` - Disable saving of compile date and time
in the emulator binary.
-* `--enable-dirty-schedulers` - Enable the **experimental** dirty schedulers
- functionality. Note that the dirty schedulers functionality is experimental,
- and **not supported**. This functionality **will** be subject to backward
- incompatible changes. Note that you should **not** enable the dirty scheduler
- functionality on production systems. It is only provided for testing.
If you or your system has special requirements please read the `Makefile` for
additional configuration information.
diff --git a/erts/configure.in b/erts/configure.in
index 4184025a6c..2cb446b470 100644
--- a/erts/configure.in
+++ b/erts/configure.in
@@ -118,17 +118,9 @@ AS_HELP_STRING([--enable-bootstrap-only],
with_ssl_zlib=no
enable_hipe=no
enable_sctp=no
- enable_dirty_schedulers=no
- fi
+ fi
])
-AC_ARG_ENABLE(dirty-schedulers,
-AS_HELP_STRING([--enable-dirty-schedulers], [enable dirty scheduler support]),
-[ case "$enableval" in
- no) enable_dirty_schedulers=no ;;
- *) enable_dirty_schedulers=yes ;;
- esac ], enable_dirty_schedulers=default)
-
AC_ARG_ENABLE(dirty-schedulers-test,
AS_HELP_STRING([--enable-dirty-schedulers-test], [enable dirty scheduler test (for debugging purposes)]),
[ case "$enableval" in
@@ -1064,20 +1056,7 @@ fi
TYPES=opt
-AC_MSG_CHECKING(whether dirty schedulers should be enabled)
-case $enable_dirty_schedulers in
- yes)
- DIRTY_SCHEDULER_SUPPORT=yes;;
- default)
- DIRTY_SCHEDULER_SUPPORT=yes;;
- *)
- DIRTY_SCHEDULER_SUPPORT=no;;
-esac
-AC_MSG_RESULT($DIRTY_SCHEDULER_SUPPORT)
-AC_SUBST(DIRTY_SCHEDULER_SUPPORT)
-
DIRTY_SCHEDULER_TEST=$enable_dirty_schedulers_test
-test $DIRTY_SCHEDULER_SUPPORT = yes || DIRTY_SCHEDULER_TEST=no
AC_SUBST(DIRTY_SCHEDULER_TEST)
test $DIRTY_SCHEDULER_TEST != yes || {
test -f "$ERL_TOP/erts/CONF_INFO" || echo "" > "$ERL_TOP/erts/CONF_INFO"
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index 1b29065486..02a9a9a93b 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -85,7 +85,6 @@ PROFILE_USE=-fprofile-instr-use=$(OBJDIR)/default.profdata
PROFILE_USE_DEPS=$(OBJDIR)/default.profdata
endif
-DIRTY_SCHEDULER_SUPPORT=@DIRTY_SCHEDULER_SUPPORT@
DIRTY_SCHEDULER_TEST=@DIRTY_SCHEDULER_TEST@
ifeq ($(TYPE),debug)
@@ -216,10 +215,6 @@ override FLAVOR=smp
FLAVOR_MARKER=.smp
ENABLE_ALLOC_TYPE_VARS += nofrag
-ifeq ($(DIRTY_SCHEDULER_SUPPORT),yes)
-THR_DEFS += -DERTS_DIRTY_SCHEDULERS
-DS_SUPPORT=yes
-
ifeq ($(DIRTY_SCHEDULER_TEST),yes)
DS_TEST=yes
THR_DEFS += -DERTS_DIRTY_SCHEDULERS_TEST
@@ -227,11 +222,6 @@ else # DIRTY_SCHEDULER_TEST
DS_TEST=no
endif # DIRTY_SCHEDULER_TEST
-else # DIRTY_SCHEDULER_SUPPORT
-DS_SUPPORT=no
-DS_TEST=no
-endif # DIRTY_SCHEDULER_SUPPORT
-
TF_MARKER=$(TYPEMARKER)$(FLAVOR_MARKER)
ifeq ($(TYPE)-@HAVE_VALGRIND@,valgrind-no)
@@ -627,7 +617,7 @@ $(HIPE_NBIF_FILES) \
: $(TTF_DIR)/TABLES-GENERATED
$(TTF_DIR)/TABLES-GENERATED: $(ATOMS) $(DIRTY_BIFS) $(BIFS) utils/make_tables
$(gen_verbose)LANG=C $(PERL) utils/make_tables -src $(TTF_DIR) -include $(TTF_DIR)\
- -ds $(DS_SUPPORT) -dst $(DS_TEST) -hipe $(HIPE) $(ATOMS) $(DIRTY_BIFS) $(BIFS) && echo $? >$(TTF_DIR)/TABLES-GENERATED
+ -dst $(DS_TEST) -hipe $(HIPE) $(ATOMS) $(DIRTY_BIFS) $(BIFS) && echo $? >$(TTF_DIR)/TABLES-GENERATED
GENERATE += $(TTF_DIR)/TABLES-GENERATED
$(TTF_DIR)/erl_alloc_types.h: beam/erl_alloc.types utils/make_alloc_types
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index b78f617560..7f8dc42aca 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -65,9 +65,7 @@ static struct {
Process *erts_code_purger = NULL;
-#ifdef ERTS_DIRTY_SCHEDULERS
Process *erts_dirty_process_code_checker;
-#endif
erts_atomic_t erts_copy_literal_area__;
#define ERTS_SET_COPY_LITERAL_AREA(LA) \
erts_atomic_set_nob(&erts_copy_literal_area__, \
@@ -604,9 +602,6 @@ badarg:
BIF_RETTYPE erts_internal_check_dirty_process_code_2(BIF_ALIST_2)
{
-#if !defined(ERTS_DIRTY_SCHEDULERS)
- BIF_ERROR(BIF_P, EXC_NOTSUP);
-#else
Process *rp;
int reds = 0;
Eterm res;
@@ -636,7 +631,6 @@ BIF_RETTYPE erts_internal_check_dirty_process_code_2(BIF_ALIST_2)
ASSERT(is_value(res));
BIF_RET2(res, reds);
-#endif
}
BIF_RETTYPE delete_module_1(BIF_ALIST_1)
@@ -1053,10 +1047,8 @@ erts_proc_copy_literal_area(Process *c_p, int *redsp, int fcalls, int gc_allowed
return_ok:
-#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p)))
c_p->flags &= ~F_DIRTY_CLA;
-#endif
return am_ok;
@@ -1071,10 +1063,8 @@ literal_gc:
*redsp += erts_garbage_collect_literals(c_p, (Eterm *) literals, lit_bsize,
oh, fcalls);
-#ifdef ERTS_DIRTY_SCHEDULERS
if (c_p->flags & F_DIRTY_CLA)
return THE_NON_VALUE;
-#endif
return am_ok;
}
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index 79a75f6698..49ec59c989 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -75,9 +75,7 @@ extern BeamInstr beam_return_time_trace[1]; /* OpCode(i_return_time_trace) */
erts_atomic32_t erts_active_bp_index;
erts_atomic32_t erts_staging_bp_index;
-#ifdef ERTS_DIRTY_SCHEDULERS
erts_mtx_t erts_dirty_bp_ix_mtx;
-#endif
/*
* Inlined helpers
@@ -94,22 +92,18 @@ acquire_bp_sched_ix(Process *c_p)
{
ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
ASSERT(esdp);
-#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
erts_mtx_lock(&erts_dirty_bp_ix_mtx);
return (Uint32) erts_no_schedulers;
}
-#endif
return (Uint32) esdp->no - 1;
}
static ERTS_INLINE void
release_bp_sched_ix(Uint32 ix)
{
-#ifdef ERTS_DIRTY_SCHEDULERS
if (ix == (Uint32) erts_no_schedulers)
erts_mtx_unlock(&erts_dirty_bp_ix_mtx);
-#endif
}
@@ -164,10 +158,8 @@ void
erts_bp_init(void) {
erts_atomic32_init_nob(&erts_active_bp_index, 0);
erts_atomic32_init_nob(&erts_staging_bp_index, 1);
-#ifdef ERTS_DIRTY_SCHEDULERS
erts_mtx_init(&erts_dirty_bp_ix_mtx, "dirty_break_point_index", NIL,
ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_DEBUG);
-#endif
}
@@ -1585,11 +1577,7 @@ set_function_break(ErtsCodeInfo *ci, Binary *match_spec, Uint break_flags,
ASSERT((bp->flags & ERTS_BPF_TIME_TRACE) == 0);
bdt = Alloc(sizeof(BpDataTime));
erts_refc_init(&bdt->refc, 1);
-#ifdef ERTS_DIRTY_SCHEDULERS
bdt->n = erts_no_schedulers + 1;
-#else
- bdt->n = erts_no_schedulers;
-#endif
bdt->hash = Alloc(sizeof(bp_time_hash_t)*(bdt->n));
for (i = 0; i < bdt->n; i++) {
bp_hash_init(&(bdt->hash[i]), 32);
diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h
index 1e1f6a7534..a64765822b 100644
--- a/erts/emulator/beam/beam_bp.h
+++ b/erts/emulator/beam/beam_bp.h
@@ -79,9 +79,7 @@ typedef struct generic_bp {
#define ERTS_BP_CALL_TIME_SCHEDULE_OUT (1)
#define ERTS_BP_CALL_TIME_SCHEDULE_EXITING (2)
-#ifdef ERTS_DIRTY_SCHEDULERS
extern erts_mtx_t erts_dirty_bp_ix_mtx;
-#endif
enum erts_break_op{
ERTS_BREAK_NOP = 0, /* Must be false */
diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c
index 21e295c63a..f8c7f9a0fe 100644
--- a/erts/emulator/beam/beam_debug.c
+++ b/erts/emulator/beam/beam_debug.c
@@ -804,10 +804,8 @@ static void print_bif_name(fmtfn_t to, void* to_arg, BifFunction bif)
* test suite.
*/
-#ifdef ERTS_DIRTY_SCHEDULERS
static int ms_wait(Process *c_p, Eterm etimeout, int busy);
static int dirty_send_message(Process *c_p, Eterm to, Eterm tag);
-#endif
static BIF_RETTYPE dirty_test(Process *c_p, Eterm type, Eterm arg1, Eterm arg2, UWord *I);
/*
@@ -836,7 +834,6 @@ erts_debug_dirty_io_2(BIF_ALIST_2)
BIF_RETTYPE
erts_debug_dirty_3(BIF_ALIST_3)
{
-#ifdef ERTS_DIRTY_SCHEDULERS
Eterm argv[2];
switch (BIF_ARG_1) {
case am_normal:
@@ -866,9 +863,6 @@ erts_debug_dirty_3(BIF_ALIST_3)
default:
BIF_ERROR(BIF_P, EXC_BADARG);
}
-#else
- BIF_ERROR(BIF_P, EXC_UNDEF);
-#endif
}
@@ -876,7 +870,6 @@ static BIF_RETTYPE
dirty_test(Process *c_p, Eterm type, Eterm arg1, Eterm arg2, UWord *I)
{
BIF_RETTYPE ret;
-#ifdef ERTS_DIRTY_SCHEDULERS
if (am_scheduler == arg1) {
ErtsSchedulerData *esdp;
if (arg2 != am_type)
@@ -1062,13 +1055,9 @@ dirty_test(Process *c_p, Eterm type, Eterm arg1, Eterm arg2, UWord *I)
badarg:
ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
}
-#else
- ERTS_BIF_PREP_ERROR(ret, c_p, EXC_UNDEF);
-#endif
return ret;
}
-#ifdef ERTS_DIRTY_SCHEDULERS
static int
dirty_send_message(Process *c_p, Eterm to, Eterm tag)
@@ -1155,7 +1144,6 @@ ms_wait(Process *c_p, Eterm etimeout, int busy)
return 1;
}
-#endif /* ERTS_DIRTY_SCHEDULERS */
# define ERTS_STACK_LIMIT ((char *) ethr_get_stacklimit())
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 55990362ff..e086b3cf7b 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -1034,7 +1034,6 @@ void process_main(Eterm * x_reg_array, FloatDef* f_reg_array)
*/
void erts_dirty_process_main(ErtsSchedulerData *esdp)
{
-#ifdef ERTS_DIRTY_SCHEDULERS
Process* c_p = NULL;
ErtsMonotonicTime start_time;
#ifdef DEBUG
@@ -1273,7 +1272,6 @@ void erts_dirty_process_main(ErtsSchedulerData *esdp)
I = c_p->i;
goto context_switch;
}
-#endif /* ERTS_DIRTY_SCHEDULERS */
}
static ErtsCodeMFA *
diff --git a/erts/emulator/beam/beam_load.h b/erts/emulator/beam/beam_load.h
index c088bdb751..c4a90d3f3a 100644
--- a/erts/emulator/beam/beam_load.h
+++ b/erts/emulator/beam/beam_load.h
@@ -111,11 +111,7 @@ typedef struct beam_code_header {
}BeamCodeHeader;
-#ifdef ERTS_DIRTY_SCHEDULERS
# define BEAM_NIF_MIN_FUNC_SZ 4
-#else
-# define BEAM_NIF_MIN_FUNC_SZ 3
-#endif
void erts_release_literal_area(struct ErtsLiteralArea_* literal_area);
int erts_is_module_native(BeamCodeHeader* code);
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index ded6c6f1a4..80f391e91e 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -4701,7 +4701,6 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
ref,
old ? am_true : am_false);
}
-#if defined(ERTS_DIRTY_SCHEDULERS)
} else if (BIF_ARG_1 == am_dirty_cpu_schedulers_online) {
Sint old_no;
if (!is_small(BIF_ARG_2))
@@ -4727,7 +4726,6 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
BIF_ERROR(BIF_P, EXC_INTERNAL_ERROR);
break;
}
-#endif
} else if (BIF_ARG_1 == am_time_offset
&& ERTS_IS_ATOM_STR("finalize", BIF_ARG_2)) {
ErtsTimeOffsetState res;
@@ -5104,7 +5102,6 @@ schedule(Process *c_p, Process *dirty_shadow_proc,
argc, argv);
}
-#ifdef ERTS_DIRTY_SCHEDULERS
static BIF_RETTYPE dirty_bif_result(BIF_ALIST_1)
{
@@ -5147,7 +5144,6 @@ static BIF_RETTYPE dirty_bif_exception(BIF_ALIST_2)
BIF_ERROR(BIF_P, freason);
}
-#endif /* ERTS_DIRTY_SCHEDULERS */
extern BeamInstr* em_call_bif_e;
static BIF_RETTYPE call_bif(Process *c_p, Eterm *reg, BeamInstr *I);
@@ -5165,7 +5161,6 @@ erts_schedule_bif(Process *proc,
Process *c_p, *dirty_shadow_proc;
ErtsCodeMFA *mfa;
-#ifdef ERTS_DIRTY_SCHEDULERS
if (proc->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
dirty_shadow_proc = proc;
c_p = proc->next;
@@ -5173,7 +5168,6 @@ erts_schedule_bif(Process *proc,
erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
else
-#endif
{
dirty_shadow_proc = NULL;
c_p = proc;
@@ -5189,7 +5183,6 @@ erts_schedule_bif(Process *proc,
* ibif - indirect bif
*/
-#ifdef ERTS_DIRTY_SCHEDULERS
erts_aint32_t set, mask;
mask = (ERTS_PSFLG_DIRTY_CPU_PROC
| ERTS_PSFLG_DIRTY_IO_PROC);
@@ -5213,10 +5206,6 @@ erts_schedule_bif(Process *proc,
}
(void) erts_atomic32_read_bset_nob(&c_p->state, mask, set);
-#else
- dbif = call_bif;
- ibif = bif;
-#endif
if (i == NULL) {
ERTS_INTERNAL_ERROR("Missing instruction pointer");
@@ -5292,7 +5281,6 @@ call_bif(Process *c_p, Eterm *reg, BeamInstr *I)
return ret;
}
-#ifdef ERTS_DIRTY_SCHEDULERS
int
erts_call_dirty_bif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm *reg)
@@ -5386,7 +5374,6 @@ erts_call_dirty_bif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm *
return exiting;
}
-#endif /* ERTS_DIRTY_SCHEDULERS */
#ifdef HARDDEBUG
diff --git a/erts/emulator/beam/bif.h b/erts/emulator/beam/bif.h
index 9b0870dee2..a2bc883dbe 100644
--- a/erts/emulator/beam/bif.h
+++ b/erts/emulator/beam/bif.h
@@ -498,10 +498,8 @@ erts_bif_prep_await_proc_exit_apply_trap(Process *c_p,
Eterm args[],
int nargs);
-#ifdef ERTS_DIRTY_SCHEDULERS
int erts_call_dirty_bif(ErtsSchedulerData *esdp, Process *c_p,
BeamInstr *I, Eterm *reg);
-#endif
BIF_RETTYPE
erts_schedule_bif(Process *proc,
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index aee54ad0a8..845cef24c7 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -1841,9 +1841,7 @@ erts_alloc_register_scheduler(void *vesdp)
int ix = (int) esdp->no;
int aix;
-#ifdef ERTS_DIRTY_SCHEDULERS
ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
-#endif
for (aix = ERTS_ALC_A_MIN; aix <= ERTS_ALC_A_MAX; aix++) {
ErtsAllocatorThrSpec_t *tspec = &erts_allctr_thr_spec[aix];
esdp->alloc_data.deallctr[aix] = NULL;
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 93f5f9ec11..36939d6acc 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -89,9 +89,7 @@ static char erts_system_version[] = ("Erlang/OTP " ERLANG_OTP_RELEASE
" [64-bit]"
#endif
" [smp:%beu:%beu]"
-#if defined(ERTS_DIRTY_SCHEDULERS)
" [ds:%beu:%beu:%beu]"
-#endif
#if defined(ERTS_DIRTY_SCHEDULERS_TEST)
" [dirty-schedulers-TEST]"
#endif
@@ -371,9 +369,7 @@ erts_print_system_version(fmtfn_t to, void *arg, Process *c_p)
return erts_print(to, arg, erts_system_version,
rc_str
, total, online
-#ifdef ERTS_DIRTY_SCHEDULERS
, dirty_cpu, dirty_cpu_onln, dirty_io
-#endif
, erts_async_max_threads
#ifdef ERTS_ENABLE_KERNEL_POLL
, erts_use_kernel_poll ? "true" : "false"
@@ -2131,11 +2127,6 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
ASSERT(erts_compat_rel > 0);
BIF_RET(make_small(erts_compat_rel));
} else if (BIF_ARG_1 == am_multi_scheduling) {
-#ifndef ERTS_DIRTY_SCHEDULERS
- if (erts_no_schedulers == 1)
- BIF_RET(am_disabled);
- else
-#endif
{
int msb = erts_is_multi_scheduling_blocked();
BIF_RET(!msb
@@ -2674,27 +2665,15 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
BIF_RET(make_small(active));
} else if (ERTS_IS_ATOM_STR("dirty_cpu_schedulers", BIF_ARG_1)) {
Uint dirty_cpu;
-#ifdef ERTS_DIRTY_SCHEDULERS
erts_schedulers_state(NULL, NULL, NULL, &dirty_cpu, NULL, NULL, NULL, NULL);
-#else
- dirty_cpu = 0;
-#endif
BIF_RET(make_small(dirty_cpu));
} else if (ERTS_IS_ATOM_STR("dirty_cpu_schedulers_online", BIF_ARG_1)) {
Uint dirty_cpu_onln;
-#ifdef ERTS_DIRTY_SCHEDULERS
erts_schedulers_state(NULL, NULL, NULL, NULL, &dirty_cpu_onln, NULL, NULL, NULL);
-#else
- dirty_cpu_onln = 0;
-#endif
BIF_RET(make_small(dirty_cpu_onln));
} else if (ERTS_IS_ATOM_STR("dirty_io_schedulers", BIF_ARG_1)) {
Uint dirty_io;
-#ifdef ERTS_DIRTY_SCHEDULERS
erts_schedulers_state(NULL, NULL, NULL, NULL, NULL, NULL, &dirty_io, NULL);
-#else
- dirty_io = 0;
-#endif
BIF_RET(make_small(dirty_io));
} else if (ERTS_IS_ATOM_STR("run_queues", BIF_ARG_1)) {
res = make_small(erts_no_run_queues);
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index b02f966558..ffc1c3261f 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -1043,16 +1043,12 @@ trace_info_func(Process* p, Eterm func_spec, Eterm key)
erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
erts_thr_progress_block();
}
-#ifdef ERTS_DIRTY_SCHEDULERS
erts_mtx_lock(&erts_dirty_bp_ix_mtx);
-#endif
r = function_is_traced(p, mfa, &ms, &ms_meta, &meta, &count, &call_time);
-#ifdef ERTS_DIRTY_SCHEDULERS
erts_mtx_unlock(&erts_dirty_bp_ix_mtx);
-#endif
if ( (key == am_call_time) || (key == am_all)) {
erts_thr_progress_unblock();
erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
diff --git a/erts/emulator/beam/erl_bif_unique.c b/erts/emulator/beam/erl_bif_unique.c
index aa79503819..19d46537f9 100644
--- a/erts/emulator/beam/erl_bif_unique.c
+++ b/erts/emulator/beam/erl_bif_unique.c
@@ -77,11 +77,9 @@ init_reference(void)
ref_init_value += (Uint64) tv.tv_usec;
#ifdef DEBUG
max_thr_id = (Uint32) erts_no_schedulers;
-#ifdef ERTS_DIRTY_SCHEDULERS
max_thr_id += (Uint32) erts_no_dirty_cpu_schedulers;
max_thr_id += (Uint32) erts_no_dirty_io_schedulers;
#endif
-#endif
erts_atomic64_init_nob(&global_reference.count,
(erts_aint64_t) ref_init_value);
init_magic_ref_tables();
@@ -439,10 +437,8 @@ init_unique_integer(void)
{
int bits;
unique_data.r.o.val0_max = (Uint64) erts_no_schedulers;
-#ifdef ERTS_DIRTY_SCHEDULERS
unique_data.r.o.val0_max += (Uint64) erts_no_dirty_cpu_schedulers;
unique_data.r.o.val0_max += (Uint64) erts_no_dirty_io_schedulers;
-#endif
bits = erts_fit_in_bits_int64(unique_data.r.o.val0_max);
unique_data.r.o.left_shift = bits;
unique_data.r.o.right_shift = 64 - bits;
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index bc3bcdc9ad..8344c164fa 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -183,12 +183,10 @@ typedef struct {
erts_atomic32_t refc;
} ErtsGCInfoReq;
-#ifdef ERTS_DIRTY_SCHEDULERS
static struct {
erts_mtx_t mtx;
ErtsGCInfo info;
} dirty_gc;
-#endif
static ERTS_INLINE int
gc_cost(Uint gc_moved_live_words, Uint resize_moved_words)
@@ -273,11 +271,9 @@ erts_init_gc(void)
init_gc_info(&esdp->gc_info);
}
-#ifdef ERTS_DIRTY_SCHEDULERS
erts_mtx_init(&dirty_gc.mtx, "dirty_gc_info", NIL,
ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_GENERIC);
init_gc_info(&dirty_gc.info);
-#endif
init_gcireq_alloc();
}
@@ -481,12 +477,10 @@ delay_garbage_collection(Process *p, ErlHeapFragment *live_hf_end, int need, int
}
if (need == 0) {
-#ifdef ERTS_DIRTY_SCHEDULERS
if (p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)) {
ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(p)));
goto force_reschedule;
}
-#endif
return 1;
}
/*
@@ -541,9 +535,7 @@ delay_garbage_collection(Process *p, ErlHeapFragment *live_hf_end, int need, int
p->heap_hfrag = hfrag;
#endif
-#ifdef ERTS_DIRTY_SCHEDULERS
force_reschedule:
-#endif
/* Make sure that we do a proper GC as soon as possible... */
p->flags |= F_FORCE_GC;
@@ -616,7 +608,6 @@ young_gen_usage(Process *p)
} \
} while (0)
-#ifdef ERTS_DIRTY_SCHEDULERS
static ERTS_INLINE void
check_for_possibly_long_gc(Process *p, Uint ygen_usage)
@@ -640,7 +631,6 @@ check_for_possibly_long_gc(Process *p, Uint ygen_usage)
}
}
-#endif
/*
* Garbage collect a process.
@@ -675,21 +665,17 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
state = erts_atomic32_read_nob(&p->state);
if ((p->flags & (F_DISABLE_GC|F_DELAY_GC)) || state & ERTS_PSFLG_EXITING) {
-#ifdef ERTS_DIRTY_SCHEDULERS
delay_gc_before_start:
-#endif
return delay_garbage_collection(p, live_hf_end, need, fcalls);
}
ygen_usage = max_young_gen_usage ? max_young_gen_usage : young_gen_usage(p);
-#ifdef ERTS_DIRTY_SCHEDULERS
if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
check_for_possibly_long_gc(p, ygen_usage);
if (p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC))
goto delay_gc_before_start;
}
-#endif
if (p->abandoned_heap)
live_hf_end = ERTS_INVALID_HFRAG_PTR;
@@ -731,14 +717,12 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
if (IS_TRACED_FL(p, F_TRACE_GC)) {
trace_gc(p, am_gc_minor_end, reclaimed_now, THE_NON_VALUE);
}
-#ifdef ERTS_DIRTY_SCHEDULERS
if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
p->flags |= F_NEED_FULLSWEEP;
check_for_possibly_long_gc(p, ygen_usage);
if (p->flags & F_DIRTY_MAJOR_GC)
goto delay_gc_after_start;
}
-#endif
goto do_major_collection;
}
if (ERTS_SCHEDULER_IS_DIRTY(esdp))
@@ -784,9 +768,7 @@ do_major_collection:
am_kill, NIL, NULL, 0);
erts_proc_unlock(p, locks & ERTS_PROC_LOCKS_ALL_MINOR);
-#ifdef ERTS_DIRTY_SCHEDULERS
delay_gc_after_start:
-#endif
/* erts_send_exit_signal looks for ERTS_PSFLG_GC, so
we have to remove it after the signal is sent */
erts_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC);
@@ -821,7 +803,6 @@ do_major_collection:
monitor_large_heap(p);
}
-#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
erts_mtx_lock(&dirty_gc.mtx);
dirty_gc.info.garbage_cols++;
@@ -829,7 +810,6 @@ do_major_collection:
erts_mtx_unlock(&dirty_gc.mtx);
}
else
-#endif
{
esdp->gc_info.garbage_cols++;
esdp->gc_info.reclaimed += reclaimed_now;
@@ -907,7 +887,6 @@ garbage_collect_hibernate(Process* p, int check_long_gc)
if (p->flags & F_DISABLE_GC)
ERTS_INTERNAL_ERROR("GC disabled");
-#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(p)))
p->flags &= ~(F_DIRTY_GC_HIBERNATE|F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC);
else if (check_long_gc) {
@@ -920,7 +899,6 @@ garbage_collect_hibernate(Process* p, int check_long_gc)
}
p->flags = flags;
}
-#endif
/*
* Preliminaries.
*/
@@ -1110,7 +1088,6 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
p->flags |= F_NEED_FULLSWEEP;
-#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(p)))
p->flags &= ~F_DIRTY_CLA;
else {
@@ -1126,7 +1103,6 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
return 10;
}
}
-#endif
reds = (Sint64) garbage_collect(p, ERTS_INVALID_HFRAG_PTR, 0,
p->arg_reg, p->arity, fcalls,
@@ -3230,7 +3206,6 @@ reply_gc_info(void *vgcirp)
reclaimed = esdp->gc_info.reclaimed;
garbage_cols = esdp->gc_info.garbage_cols;
-#ifdef ERTS_DIRTY_SCHEDULERS
/*
* Add dirty schedulers info on requesting
* schedulers info
@@ -3241,7 +3216,6 @@ reply_gc_info(void *vgcirp)
garbage_cols += dirty_gc.info.garbage_cols;
erts_mtx_unlock(&dirty_gc.mtx);
}
-#endif
sz = 0;
hpp = NULL;
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 34affaa015..8c14c86219 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -180,11 +180,9 @@ int erts_compat_rel;
static int no_schedulers;
static int no_schedulers_online;
-#ifdef ERTS_DIRTY_SCHEDULERS
static int no_dirty_cpu_schedulers;
static int no_dirty_cpu_schedulers_online;
static int no_dirty_io_schedulers;
-#endif
#ifdef DEBUG
Uint32 verbose; /* See erl_debug.h for information about verbose */
@@ -313,11 +311,9 @@ erl_init(int ncpu,
erts_init_process(ncpu, proc_tab_sz, legacy_proc_tab);
erts_init_scheduling(no_schedulers,
no_schedulers_online
-#ifdef ERTS_DIRTY_SCHEDULERS
, no_dirty_cpu_schedulers,
no_dirty_cpu_schedulers_online,
no_dirty_io_schedulers
-#endif
);
erts_late_init_time_sup();
erts_init_cpu_topology(); /* Must be after init_scheduling */
@@ -609,7 +605,6 @@ void erts_usage(void)
ERTS_SCHED_THREAD_MIN_STACK_SIZE,
ERTS_SCHED_THREAD_MAX_STACK_SIZE,
ERTS_DEFAULT_SCHED_STACK_SIZE);
-#ifdef ERTS_DIRTY_SCHEDULERS
erts_fprintf(stderr, "-sssdcpu size suggested stack size in kilo words for dirty CPU scheduler\n");
erts_fprintf(stderr, " threads, valid range is [%d-%d] (default %d)\n",
ERTS_SCHED_THREAD_MIN_STACK_SIZE,
@@ -620,7 +615,6 @@ void erts_usage(void)
ERTS_SCHED_THREAD_MIN_STACK_SIZE,
ERTS_SCHED_THREAD_MAX_STACK_SIZE,
ERTS_DEFAULT_DIO_SCHED_STACK_SIZE);
-#endif
erts_fprintf(stderr, "-spp Bool set port parallelism scheduling hint\n");
erts_fprintf(stderr, "-S n1:n2 set number of schedulers (n1), and number of\n");
erts_fprintf(stderr, " schedulers online (n2), maximum for both\n");
@@ -629,7 +623,6 @@ void erts_usage(void)
erts_fprintf(stderr, "-SP p1:p2 specify schedulers (p1) and schedulers online (p2)\n");
erts_fprintf(stderr, " as percentages of logical processors configured and logical\n");
erts_fprintf(stderr, " processors available, respectively\n");
-#ifdef ERTS_DIRTY_SCHEDULERS
erts_fprintf(stderr, "-SDcpu n1:n2 set number of dirty CPU schedulers (n1), and number of\n");
erts_fprintf(stderr, " dirty CPU schedulers online (n2), valid range for both\n");
erts_fprintf(stderr, " numbers is [1-%d], and n2 must be less than or equal to n1\n",
@@ -639,7 +632,6 @@ void erts_usage(void)
erts_fprintf(stderr, " and logical processors available, respectively\n");
erts_fprintf(stderr, "-SDio n set number of dirty I/O schedulers, valid range is [0-%d]\n",
ERTS_MAX_NO_OF_DIRTY_IO_SCHEDULERS);
-#endif
erts_fprintf(stderr, "-t size set the maximum number of atoms the emulator can handle\n");
erts_fprintf(stderr, " valid range is [%d-%d]\n",
MIN_ATOM_TABLE_SIZE, MAX_ATOM_TABLE_SIZE);
@@ -725,13 +717,11 @@ early_init(int *argc, char **argv) /*
int schdlrs_percentage = 100;
int schdlrs_onln_percentage = 100;
int max_main_threads;
-#ifdef ERTS_DIRTY_SCHEDULERS
int dirty_cpu_scheds;
int dirty_cpu_scheds_online;
int dirty_cpu_scheds_pctg = 100;
int dirty_cpu_scheds_onln_pctg = 100;
int dirty_io_scheds;
-#endif
int max_reader_groups;
int reader_groups;
char envbuf[21]; /* enough for any 64-bit integer */
@@ -794,11 +784,9 @@ early_init(int *argc, char **argv) /*
schdlrs = no_schedulers;
schdlrs_onln = no_schedulers_online;
-#ifdef ERTS_DIRTY_SCHEDULERS
dirty_cpu_scheds = no_schedulers;
dirty_cpu_scheds_online = no_schedulers_online;
dirty_io_scheds = 10;
-#endif
envbufsz = sizeof(envbuf);
@@ -891,7 +879,6 @@ early_init(int *argc, char **argv) /*
("using %d:%d scheduler percentages\n",
schdlrs_percentage, schdlrs_onln_percentage));
}
-#ifdef ERTS_DIRTY_SCHEDULERS
else if (argv[i][2] == 'D') {
char *arg;
char *type = argv[i]+3;
@@ -1003,7 +990,6 @@ early_init(int *argc, char **argv) /*
break;
}
}
-#endif
else {
int tot, onln;
char *arg = get_arg(argv[i]+2, argv[i+1], &i);
@@ -1085,7 +1071,6 @@ early_init(int *argc, char **argv) /*
erts_usage();
}
}
-#ifdef ERTS_DIRTY_SCHEDULERS
/* apply any dirty scheduler precentages */
if (dirty_cpu_scheds_pctg != 100 || dirty_cpu_scheds_onln_pctg != 100) {
dirty_cpu_scheds = dirty_cpu_scheds * dirty_cpu_scheds_pctg / 100;
@@ -1099,7 +1084,6 @@ early_init(int *argc, char **argv) /*
dirty_cpu_scheds_online = schdlrs_onln;
if (dirty_cpu_scheds_online < 1)
dirty_cpu_scheds_online = 1;
-#endif
}
@@ -1107,11 +1091,9 @@ early_init(int *argc, char **argv) /*
no_schedulers_online = schdlrs_onln;
erts_no_schedulers = (Uint) no_schedulers;
-#ifdef ERTS_DIRTY_SCHEDULERS
erts_no_dirty_cpu_schedulers = no_dirty_cpu_schedulers = dirty_cpu_scheds;
no_dirty_cpu_schedulers_online = dirty_cpu_scheds_online;
erts_no_dirty_io_schedulers = no_dirty_io_schedulers = dirty_io_scheds;
-#endif
erts_early_init_scheduling(no_schedulers);
alloc_opts.ncpu = ncpu;
@@ -1132,13 +1114,9 @@ early_init(int *argc, char **argv) /*
*/
erts_thr_progress_init(no_schedulers,
no_schedulers+2,
-#ifndef ERTS_DIRTY_SCHEDULERS
- erts_async_max_threads
-#else
erts_async_max_threads +
erts_no_dirty_cpu_schedulers +
erts_no_dirty_io_schedulers
-#endif
);
erts_thr_q_init();
erts_init_utils();
@@ -1237,10 +1215,8 @@ erl_start(int argc, char **argv)
* a lot of stack.
*/
erts_sched_thread_suggested_stack_size = ERTS_DEFAULT_SCHED_STACK_SIZE;
-#ifdef ERTS_DIRTY_SCHEDULERS
erts_dcpu_sched_thread_suggested_stack_size = ERTS_DEFAULT_DCPU_SCHED_STACK_SIZE;
erts_dio_sched_thread_suggested_stack_size = ERTS_DEFAULT_DIO_SCHED_STACK_SIZE;
-#endif
#ifdef DEBUG
verbose = DEBUG_DEFAULT;
@@ -1855,7 +1831,6 @@ erl_start(int argc, char **argv)
VERBOSE(DEBUG_SYSTEM,
("scheduler wakeup threshold: %s\n", arg));
}
-#ifdef ERTS_DIRTY_SCHEDULERS
else if (has_prefix("ssdcpu", sub_param)) {
/* suggested stack size (Kilo Words) for dirty CPU scheduler threads */
arg = get_arg(sub_param+6, argv[i+1], &i);
@@ -1890,7 +1865,6 @@ erl_start(int argc, char **argv)
("suggested dirty IO scheduler thread stack size %d kilo words\n",
erts_dio_sched_thread_suggested_stack_size));
}
-#endif
else if (has_prefix("ss", sub_param)) {
/* suggested stack size (Kilo Words) for scheduler threads */
arg = get_arg(sub_param+2, argv[i+1], &i);
@@ -2205,12 +2179,10 @@ erl_start(int argc, char **argv)
if (erts_sched_thread_suggested_stack_size < ERTS_SCHED_THREAD_MIN_STACK_SIZE)
erts_sched_thread_suggested_stack_size = ERTS_SCHED_THREAD_MIN_STACK_SIZE;
-#ifdef ERTS_DIRTY_SCHEDULERS
if (erts_dcpu_sched_thread_suggested_stack_size < ERTS_SCHED_THREAD_MIN_STACK_SIZE)
erts_dcpu_sched_thread_suggested_stack_size = ERTS_SCHED_THREAD_MIN_STACK_SIZE;
if (erts_dio_sched_thread_suggested_stack_size < ERTS_SCHED_THREAD_MIN_STACK_SIZE)
erts_dio_sched_thread_suggested_stack_size = ERTS_SCHED_THREAD_MIN_STACK_SIZE;
-#endif
erl_init(ncpu,
proc_tab_sz,
@@ -2255,7 +2227,6 @@ erl_start(int argc, char **argv)
&& erts_literal_area_collector->common.id == pid);
erts_proc_inc_refc(erts_literal_area_collector);
-#ifdef ERTS_DIRTY_SCHEDULERS
pid = erl_system_process_otp(otp_ring0_pid, "erts_dirty_process_code_checker", !0);
erts_dirty_process_code_checker
= (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc,
@@ -2263,7 +2234,6 @@ erl_start(int argc, char **argv)
ASSERT(erts_dirty_process_code_checker
&& erts_dirty_process_code_checker->common.id == pid);
erts_proc_inc_refc(erts_dirty_process_code_checker);
-#endif
}
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 47c0fd2869..f81c90818f 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -119,11 +119,9 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "schdlr_sspnd", NULL },
{ "migration_info_update", NULL },
{ "run_queue", "address" },
-#ifdef ERTS_DIRTY_SCHEDULERS
{ "dirty_run_queue_sleep_list", "address" },
{ "dirty_gc_info", NULL },
{ "dirty_break_point_index", NULL },
-#endif
{ "process_table", NULL },
{ "cpu_info", NULL },
{ "pollset", "address" },
diff --git a/erts/emulator/beam/erl_nfunc_sched.h b/erts/emulator/beam/erl_nfunc_sched.h
index 69008084df..9be0e6f7c7 100644
--- a/erts/emulator/beam/erl_nfunc_sched.h
+++ b/erts/emulator/beam/erl_nfunc_sched.h
@@ -193,7 +193,6 @@ erts_nif_export_check_save_trace(Process *c_p, Eterm result,
ERTS_GLB_INLINE Process *
erts_proc_shadow2real(Process *c_p)
{
-#ifdef ERTS_DIRTY_SCHEDULERS
if (c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC) {
Process *real_c_p = c_p->next;
ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()));
@@ -201,7 +200,6 @@ erts_proc_shadow2real(Process *c_p)
return real_c_p;
}
ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_get_scheduler_data()));
-#endif
return c_p;
}
@@ -217,7 +215,6 @@ erts_proc_shadow2real(Process *c_p)
|| BeamOp(op_call_nif) == (BeamInstr *) (*(I))), \
((NifExport *) (((char *) (I)) - offsetof(NifExport, exp.beam[0]))))
-#ifdef ERTS_DIRTY_SCHEDULERS
#include "erl_message.h"
#include <stddef.h>
@@ -326,7 +323,6 @@ erts_make_dirty_shadow_proc(ErtsSchedulerData *esdp, Process *c_p)
#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
-#endif /* ERTS_DIRTY_SCHEDULERS */
#endif /* defined(ERTS_WANT_NFUNC_SCHED_INTERNALS__) && !defined(ERTS_NFUNC_SCHED_INTERNALS__) */
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index d3271a4560..43441e0228 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -304,7 +304,6 @@ schedule(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp,
return (ERL_NIF_TERM) THE_NON_VALUE;
}
-#ifdef ERTS_DIRTY_SCHEDULERS
static ERL_NIF_TERM dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
static ERL_NIF_TERM dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
@@ -394,24 +393,19 @@ erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p, BeamInstr *I, Eterm *
return exiting;
}
-#endif
static void full_flush_env(ErlNifEnv* env)
{
flush_env(env);
-#ifdef ERTS_DIRTY_SCHEDULERS
if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)
/* Dirty nif call using shadow process struct */
erts_flush_dirty_shadow_proc(env->proc);
-#endif
}
static void full_cache_env(ErlNifEnv* env)
{
-#ifdef ERTS_DIRTY_SCHEDULERS
if (env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC)
erts_cache_dirty_shadow_proc(env->proc);
-#endif
cache_env(env);
}
@@ -2591,7 +2585,6 @@ nif_export_restore(Process *c_p, NifExport *ep, Eterm res)
}
-#ifdef ERTS_DIRTY_SCHEDULERS
/*
* Finalize a dirty NIF call. This function is scheduled to cause the VM to
@@ -2719,7 +2712,6 @@ static_schedule_dirty_cpu_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[
return static_schedule_dirty_nif(env, ERTS_PSFLG_DIRTY_CPU_PROC, argc, argv);
}
-#endif /* ERTS_DIRTY_SCHEDULERS */
/*
* NIF execution wrapper used by enif_schedule_nif() for regular NIFs. It
@@ -2800,11 +2792,7 @@ enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags,
result = schedule(env, execute_nif, fp, proc->current->module,
fun_name_atom, argc, argv);
else if (!(flags & ~(ERL_NIF_DIRTY_JOB_IO_BOUND|ERL_NIF_DIRTY_JOB_CPU_BOUND))) {
-#ifdef ERTS_DIRTY_SCHEDULERS
result = schedule_dirty_nif(env, flags, fp, fun_name_atom, argc, argv);
-#else
- result = enif_raise_exception(env, am_notsup);
-#endif
}
else
result = enif_make_badarg(env);
@@ -2826,12 +2814,10 @@ enif_thread_type(void)
switch (esdp->type) {
case ERTS_SCHED_NORMAL:
return ERL_NIF_THR_NORMAL_SCHEDULER;
-#ifdef ERTS_DIRTY_SCHEDULERS
case ERTS_SCHED_DIRTY_CPU:
return ERL_NIF_THR_DIRTY_CPU_SCHEDULER;
case ERTS_SCHED_DIRTY_IO:
return ERL_NIF_THR_DIRTY_IO_SCHEDULER;
-#endif
default:
ERTS_INTERNAL_ERROR("Invalid scheduler type");
return -1;
@@ -3929,14 +3915,9 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
* dirty scheduler support, treat a non-zero flags field as
* a load error.
*/
-#ifdef ERTS_DIRTY_SCHEDULERS
if (f->flags != ERL_NIF_DIRTY_JOB_IO_BOUND && f->flags != ERL_NIF_DIRTY_JOB_CPU_BOUND)
ret = load_nif_error(BIF_P, bad_lib, "Illegal flags field value %d for NIF %T:%s/%u",
f->flags, mod_atom, f->name, f->arity);
-#else
- ret = load_nif_error(BIF_P, bad_lib, "NIF %T:%s/%u requires a runtime with dirty scheduler support.",
- mod_atom, f->name, f->arity);
-#endif
}
else if (erts_codeinfo_to_code(ci_pp[1]) - erts_codeinfo_to_code(ci_pp[0])
< BEAM_NIF_MIN_FUNC_SZ)
@@ -4009,7 +3990,6 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
(BeamInstr) BeamOp(op_i_generic_breakpoint));
g->orig_instr = (BeamInstr) BeamOp(op_call_nif);
}
-#ifdef ERTS_DIRTY_SCHEDULERS
if (f->flags) {
code_ptr[3] = (BeamInstr) f->fptr;
code_ptr[1] = (f->flags == ERL_NIF_DIRTY_JOB_IO_BOUND) ?
@@ -4017,7 +3997,6 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
(BeamInstr) static_schedule_dirty_cpu_nif;
}
else
-#endif
code_ptr[1] = (BeamInstr) f->fptr;
code_ptr[2] = (BeamInstr) lib;
}
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index ab5030e5b9..8c59116fb6 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -192,10 +192,8 @@ static UWord thr_prgr_later_cleanup_op_threshold = ERTS_THR_PRGR_LATER_CLEANUP_O
ErtsPTab erts_proc erts_align_attribute(ERTS_CACHE_LINE_SIZE);
int erts_sched_thread_suggested_stack_size = -1;
-#ifdef ERTS_DIRTY_SCHEDULERS
int erts_dcpu_sched_thread_suggested_stack_size = -1;
int erts_dio_sched_thread_suggested_stack_size = -1;
-#endif
#ifdef ERTS_ENABLE_LOCK_CHECK
ErtsLcPSDLocks erts_psd_required_locks[ERTS_PSD_SIZE];
#endif
@@ -223,10 +221,8 @@ typedef struct {
typedef struct {
Uint32 normal;
-#ifdef ERTS_DIRTY_SCHEDULERS
Uint32 dirty_cpu;
Uint32 dirty_io;
-#endif
} ErtsSchedTypeCounters;
static struct {
@@ -239,9 +235,7 @@ static struct {
Eterm changer;
ErtsMultiSchedulingBlock nmsb; /* Normal multi Scheduling Block */
ErtsMultiSchedulingBlock msb; /* Multi Scheduling Block */
-#ifdef ERTS_DIRTY_SCHEDULERS
ErtsSchedType last_msb_dirty_type;
-#endif
} schdlr_sspnd;
static void init_scheduler_suspend(void);
@@ -250,10 +244,8 @@ static ERTS_INLINE Uint32
schdlr_sspnd_eq_nscheds(ErtsSchedTypeCounters *val1p, ErtsSchedTypeCounters *val2p)
{
int res = val1p->normal == val2p->normal;
-#ifdef ERTS_DIRTY_SCHEDULERS
res &= val1p->dirty_cpu == val2p->dirty_cpu;
res &= val1p->dirty_io == val2p->dirty_io;
-#endif
return res;
}
@@ -264,16 +256,10 @@ schdlr_sspnd_get_nscheds(ErtsSchedTypeCounters *valp,
switch (type) {
case ERTS_SCHED_NORMAL:
return valp->normal;
-#ifdef ERTS_DIRTY_SCHEDULERS
case ERTS_SCHED_DIRTY_CPU:
return valp->dirty_cpu;
case ERTS_SCHED_DIRTY_IO:
return valp->dirty_io;
-#else
- case ERTS_SCHED_DIRTY_CPU:
- case ERTS_SCHED_DIRTY_IO:
- return 0;
-#endif
default:
ERTS_INTERNAL_ERROR("Invalid scheduler type");
return 0;
@@ -285,10 +271,8 @@ static ERTS_INLINE Uint32
schdlr_sspnd_get_nscheds_tot(ErtsSchedTypeCounters *valp)
{
Uint32 res = valp->normal;
-#ifdef ERTS_DIRTY_SCHEDULERS
res += valp->dirty_cpu;
res += valp->dirty_io;
-#endif
return res;
}
#endif
@@ -303,14 +287,12 @@ schdlr_sspnd_dec_nscheds(ErtsSchedTypeCounters *valp,
case ERTS_SCHED_NORMAL:
valp->normal--;
break;
-#ifdef ERTS_DIRTY_SCHEDULERS
case ERTS_SCHED_DIRTY_CPU:
valp->dirty_cpu--;
break;
case ERTS_SCHED_DIRTY_IO:
valp->dirty_io--;
break;
-#endif
default:
ERTS_INTERNAL_ERROR("Invalid scheduler type");
}
@@ -324,14 +306,12 @@ schdlr_sspnd_inc_nscheds(ErtsSchedTypeCounters *valp,
case ERTS_SCHED_NORMAL:
valp->normal++;
break;
-#ifdef ERTS_DIRTY_SCHEDULERS
case ERTS_SCHED_DIRTY_CPU:
valp->dirty_cpu++;
break;
case ERTS_SCHED_DIRTY_IO:
valp->dirty_io++;
break;
-#endif
default:
ERTS_INTERNAL_ERROR("Invalid scheduler type");
}
@@ -345,14 +325,12 @@ schdlr_sspnd_set_nscheds(ErtsSchedTypeCounters *valp,
case ERTS_SCHED_NORMAL:
valp->normal = no;
break;
-#ifdef ERTS_DIRTY_SCHEDULERS
case ERTS_SCHED_DIRTY_CPU:
valp->dirty_cpu = no;
break;
case ERTS_SCHED_DIRTY_IO:
valp->dirty_io = no;
break;
-#endif
default:
ERTS_INTERNAL_ERROR("Invalid scheduler type");
}
@@ -398,7 +376,6 @@ static erts_atomic_t runq_supervisor_sleeping;
ErtsAlignedRunQueue * ERTS_WRITE_UNLIKELY(erts_aligned_run_queues);
Uint ERTS_WRITE_UNLIKELY(erts_no_run_queues);
-#ifdef ERTS_DIRTY_SCHEDULERS
struct {
union {
@@ -411,12 +388,10 @@ struct {
} io;
} dirty_count erts_align_attribute(ERTS_CACHE_LINE_SIZE);
-#endif
static ERTS_INLINE void
dirty_active(ErtsSchedulerData *esdp, erts_aint32_t add)
{
-#ifdef ERTS_DIRTY_SCHEDULERS
erts_aint32_t val;
erts_atomic32_t *ap;
switch (esdp->type) {
@@ -441,18 +416,15 @@ dirty_active(ErtsSchedulerData *esdp, erts_aint32_t add)
val = erts_atomic32_read_nob(ap);
val += add;
erts_atomic32_set_nob(ap, val);
-#endif
}
ErtsAlignedSchedulerData * ERTS_WRITE_UNLIKELY(erts_aligned_scheduler_data);
-#ifdef ERTS_DIRTY_SCHEDULERS
ErtsAlignedSchedulerData * ERTS_WRITE_UNLIKELY(erts_aligned_dirty_cpu_scheduler_data);
ErtsAlignedSchedulerData * ERTS_WRITE_UNLIKELY(erts_aligned_dirty_io_scheduler_data);
typedef union {
Process dsp;
char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(Process))];
} ErtsAlignedDirtyShadowProcess;
-#endif
typedef union {
ErtsSchedulerSleepInfo ssi;
@@ -460,10 +432,8 @@ typedef union {
} ErtsAlignedSchedulerSleepInfo;
static ErtsAlignedSchedulerSleepInfo *aligned_sched_sleep_info;
-#ifdef ERTS_DIRTY_SCHEDULERS
static ErtsAlignedSchedulerSleepInfo *aligned_dirty_cpu_sched_sleep_info;
static ErtsAlignedSchedulerSleepInfo *aligned_dirty_io_sched_sleep_info;
-#endif
static Uint last_reductions;
static Uint last_exact_reductions;
@@ -535,7 +505,6 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(proclist,
(ASSERT(-1 <= ((int) (IX)) \
&& ((int) (IX)) < ((int) erts_no_schedulers)), \
&aligned_sched_sleep_info[(IX)].ssi)
-#ifdef ERTS_DIRTY_SCHEDULERS
#define ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(IX) \
(ASSERT(0 <= ((int) (IX)) \
&& ((int) (IX)) < ((int) erts_no_dirty_cpu_schedulers)), \
@@ -544,7 +513,6 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(proclist,
(ASSERT(0 <= ((int) (IX)) \
&& ((int) (IX)) < ((int) erts_no_dirty_io_schedulers)), \
&aligned_dirty_io_sched_sleep_info[(IX)].ssi)
-#endif
#define ERTS_FOREACH_RUNQ(RQVAR, DO) \
do { \
@@ -862,7 +830,6 @@ erts_late_init_process(void)
static void
init_sched_wall_time(ErtsSchedulerData *esdp, Uint64 time_stamp)
{
-#ifdef ERTS_DIRTY_SCHEDULERS
if (esdp->type != ERTS_SCHED_NORMAL) {
erts_atomic32_init_nob(&esdp->sched_wall_time.u.mod, 0);
esdp->sched_wall_time.enabled = 1;
@@ -871,7 +838,6 @@ init_sched_wall_time(ErtsSchedulerData *esdp, Uint64 time_stamp)
esdp->sched_wall_time.working.start = ERTS_SCHED_WTIME_IDLE;
}
else
-#endif
{
esdp->sched_wall_time.u.need = erts_sched_balance_util;
esdp->sched_wall_time.enabled = 0;
@@ -1069,7 +1035,6 @@ init_runq_sched_util(ErtsRunQueueSchedUtil *rqsu, int enabled)
#endif /* ERTS_HAVE_SCHED_UTIL_BALANCING_SUPPORT */
-#ifdef ERTS_DIRTY_SCHEDULERS
typedef struct {
Uint64 working;
@@ -1122,7 +1087,6 @@ read_dirty_sched_wall_time(ErtsSchedulerData *esdp, ErtsDirtySchedWallTime *info
info->working = info->total;
}
-#endif
static void
@@ -1225,10 +1189,8 @@ typedef struct {
Eterm ref_heap[ERTS_REF_THING_SIZE];
Uint req_sched;
erts_atomic32_t refc;
-#ifdef ERTS_DIRTY_SCHEDULERS
int want_dirty_cpu;
int want_dirty_io;
-#endif
} ErtsSchedWallTimeReq;
typedef struct {
@@ -1299,7 +1261,6 @@ reply_sched_wall_time(void *vswtrp)
hpp = NULL;
szp = &sz;
-#ifdef ERTS_DIRTY_SCHEDULERS
if (esdp->sched_wall_time.enabled
&& swtrp->req_sched == esdp->no
&& (swtrp->want_dirty_cpu || swtrp->want_dirty_io)) {
@@ -1381,7 +1342,6 @@ reply_sched_wall_time(void *vswtrp)
erts_free(ERTS_ALC_T_TMP, dswt);
}
else
-#endif
{
/* Reply with info about this scheduler only... */
@@ -1449,10 +1409,8 @@ erts_sched_wall_time_request(Process *c_p, int set, int enable,
swtrp->proc = c_p;
swtrp->ref = STORE_NC(&hp, NULL, ref);
swtrp->req_sched = esdp->no;
-#ifdef ERTS_DIRTY_SCHEDULERS
swtrp->want_dirty_cpu = want_dirty_cpu;
swtrp->want_dirty_io = want_dirty_io;
-#endif
erts_atomic32_init_nob(&swtrp->refc,
(erts_aint32_t) erts_no_schedulers);
@@ -3276,21 +3234,16 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
ERTS_LC_ASSERT(erts_lc_runq_is_locked(rq));
-#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
erts_spin_lock(&rq->sleepers.lock);
-#endif
flgs = sched_prep_spin_wait(ssi);
if (flgs & ERTS_SSI_FLG_SUSPENDED) {
/* Go suspend instead... */
-#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
erts_spin_unlock(&rq->sleepers.lock);
-#endif
return;
}
-#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) {
ssi->prev = NULL;
ssi->next = rq->sleepers.list;
@@ -3300,7 +3253,6 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
erts_spin_unlock(&rq->sleepers.lock);
dirty_active(esdp, -1);
}
-#endif
/*
* If all schedulers are waiting, one of them *should*
@@ -3643,7 +3595,6 @@ ssi_wake(ErtsSchedulerSleepInfo *ssi)
erts_sched_finish_poke(ssi, ssi_flags_set_wake(ssi));
}
-#ifdef ERTS_DIRTY_SCHEDULERS
static void
dcpu_sched_ix_suspend_wake(Uint ix)
@@ -3675,7 +3626,6 @@ dio_sched_ix_wake(Uint ix)
}
#endif
-#endif
static void
wake_scheduler(ErtsRunQueue *rq)
@@ -3694,7 +3644,6 @@ wake_scheduler(ErtsRunQueue *rq)
ssi_wake(rq->scheduler->ssi);
}
-#ifdef ERTS_DIRTY_SCHEDULERS
static void
wake_dirty_schedulers(ErtsRunQueue *rq, int one)
{
@@ -3745,7 +3694,6 @@ wake_dirty_scheduler(ErtsRunQueue *rq)
wake_dirty_schedulers(rq, 1);
}
-#endif
#define ERTS_NO_USED_RUNQS_SHIFT 16
#define ERTS_NO_RUNQS_MASK 0xffffU
@@ -3881,11 +3829,9 @@ static ERTS_INLINE void
smp_notify_inc_runq(ErtsRunQueue *runq)
{
if (runq) {
-#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix))
wake_dirty_scheduler(runq);
else
-#endif
wake_scheduler(runq);
}
}
@@ -4244,7 +4190,6 @@ schedule_bound_processes(ErtsRunQueue *rq,
}
}
-#ifdef ERTS_DIRTY_SCHEDULERS
static ERTS_INLINE void
clear_proc_dirty_queue_bit(Process *p, ErtsRunQueue *rq, int prio_bit)
@@ -4268,7 +4213,6 @@ clear_proc_dirty_queue_bit(Process *p, ErtsRunQueue *rq, int prio_bit)
ASSERT(old & qb);
}
-#endif /* ERTS_DIRTY_SCHEDULERS */
static void
@@ -5539,13 +5483,11 @@ wakeup_other_check(ErtsRunQueue *rq, Uint32 flags)
rq->wakeup_other += (left_len*wo_reds
+ ERTS_WAKEUP_OTHER_FIXED_INC);
if (rq->wakeup_other > wakeup_other.limit) {
-#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) {
if (rq->waiting) {
wake_dirty_scheduler(rq);
}
} else
-#endif
{
int empty_rqs =
erts_atomic32_read_acqb(&no_empty_run_queues);
@@ -5822,7 +5764,6 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp)
case ERTS_SCHED_NORMAL:
id = (int) esdp->no;
break;
-#ifdef ERTS_DIRTY_SCHEDULERS
case ERTS_SCHED_DIRTY_CPU:
id = (int) erts_no_schedulers;
id += (int) esdp->dirty_no;
@@ -5832,7 +5773,6 @@ init_aux_work_data(ErtsAuxWorkData *awdp, ErtsSchedulerData *esdp, char *dawwp)
id += (int) erts_no_dirty_cpu_schedulers;
id += (int) esdp->dirty_no;
break;
-#endif
default:
ERTS_INTERNAL_ERROR("Invalid scheduler type");
break;
@@ -5892,7 +5832,6 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
esdp->f_reg_array =
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_BEAM_REGISTER,
MAX_REG * sizeof(FloatDef));
-#ifdef ERTS_DIRTY_SCHEDULERS
esdp->run_queue = runq;
if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix)) {
esdp->no = 0;
@@ -5926,12 +5865,6 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
| ERTS_PSFLG_PROXY));
shadow_proc->static_flags = ERTS_STC_FLG_SHADOW_PROC;
}
-#else
- runq->scheduler = esdp;
- esdp->run_queue = runq;
- esdp->no = (Uint) num;
- esdp->type = ERTS_SCHED_NORMAL;
-#endif
esdp->ssi = ssi;
esdp->current_process = NULL;
@@ -5964,10 +5897,8 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
void
erts_init_scheduling(int no_schedulers, int no_schedulers_online
-#ifdef ERTS_DIRTY_SCHEDULERS
, int no_dirty_cpu_schedulers, int no_dirty_cpu_schedulers_online,
int no_dirty_io_schedulers
-#endif
)
{
int ix, n, no_ssi, tot_rqs;
@@ -5989,12 +5920,10 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
ASSERT(no_schedulers_online <= no_schedulers);
ASSERT(no_schedulers_online >= 1);
ASSERT(no_schedulers >= 1);
-#ifdef ERTS_DIRTY_SCHEDULERS
ASSERT(no_dirty_cpu_schedulers <= no_schedulers);
ASSERT(no_dirty_cpu_schedulers >= 1);
ASSERT(no_dirty_cpu_schedulers_online <= no_schedulers_online);
ASSERT(no_dirty_cpu_schedulers_online >= 1);
-#endif
/* Create and initialize run queues */
@@ -6021,14 +5950,12 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
erts_cnd_init(&rq->cnd);
-#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_RUNQ_IX_IS_DIRTY(ix)) {
erts_spinlock_init(&rq->sleepers.lock, "dirty_run_queue_sleep_list",
make_small(ix + 1),
ERTS_LOCK_FLAGS_PROPERTY_STATIC | ERTS_LOCK_FLAGS_CATEGORY_SCHEDULER);
}
rq->sleepers.list = NULL;
-#endif
rq->waiting = 0;
rq->woken = 0;
@@ -6088,12 +6015,10 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
n = (int) no_schedulers;
erts_no_schedulers = n;
erts_no_total_schedulers = n;
-#ifdef ERTS_DIRTY_SCHEDULERS
erts_no_dirty_cpu_schedulers = no_dirty_cpu_schedulers;
erts_no_total_schedulers += no_dirty_cpu_schedulers;
erts_no_dirty_io_schedulers = no_dirty_io_schedulers;
erts_no_total_schedulers += no_dirty_io_schedulers;
-#endif
/* Create and initialize scheduler sleep info */
no_ssi = n+1;
@@ -6114,7 +6039,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
aligned_sched_sleep_info++;
-#ifdef ERTS_DIRTY_SCHEDULERS
aligned_dirty_cpu_sched_sleep_info =
erts_alloc_permanent_cache_aligned(
ERTS_ALC_T_SCHDLR_SLP_INFO,
@@ -6135,7 +6059,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
ssi->event = NULL; /* initialized in sched_dirty_io_thread_func */
erts_atomic32_init_nob(&ssi->aux_work, 0);
}
-#endif
/* Create and initialize scheduler specific data */
@@ -6155,7 +6078,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
NULL, 0);
}
-#ifdef ERTS_DIRTY_SCHEDULERS
{
Uint64 ts = sched_wall_time_ts();
int dirty_scheds = no_dirty_cpu_schedulers + no_dirty_io_schedulers;
@@ -6186,7 +6108,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
&adsp[adspix++].dsp, ts);
}
}
-#endif
init_misc_aux_work();
init_swtreq_alloc();
@@ -6238,7 +6159,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
suspend_run_queue(ERTS_RUNQ_IX(ix));
}
-#ifdef ERTS_DIRTY_SCHEDULERS
schdlr_sspnd_set_nscheds(&schdlr_sspnd.online,
ERTS_SCHED_DIRTY_CPU,
@@ -6274,7 +6194,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
erts_atomic32_init_nob(&dirty_count.io.active,
(erts_aint32_t) no_dirty_io_schedulers);
-#endif
if (set_schdlr_sspnd_change_flags)
erts_atomic32_set_nob(&schdlr_sspnd.changing,
@@ -6359,7 +6278,6 @@ make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio)
#define ERTS_ENQUEUE_DIRTY_CPU_QUEUE 2
#define ERTS_ENQUEUE_DIRTY_IO_QUEUE 3
-#ifdef ERTS_DIRTY_SCHEDULERS
static int
check_dirty_enqueue_in_prio_queue(Process *c_p,
@@ -6453,7 +6371,6 @@ fin_dirty_enq_s_change(Process *p,
return !0;
}
-#endif /* ERTS_DIRTY_SCHEDULERS */
static ERTS_INLINE int
check_enqueue_in_prio_queue(Process *c_p,
@@ -6468,14 +6385,12 @@ check_enqueue_in_prio_queue(Process *c_p,
*prq_prio_p = aprio;
-#ifdef ERTS_DIRTY_SCHEDULERS
if (actual & ERTS_PSFLGS_DIRTY_WORK) {
int res = check_dirty_enqueue_in_prio_queue(c_p, newp, actual,
aprio, qbit);
if (res != ERTS_ENQUEUE_NORMAL_QUEUE)
return res;
}
-#endif
max_qbit = (actual >> ERTS_PSFLGS_IN_PRQ_MASK_OFFSET) & ERTS_PSFLGS_QMASK;
max_qbit |= 1 << ERTS_PSFLGS_QMASK_BITS;
@@ -6518,7 +6433,6 @@ select_enqueue_run_queue(int enqueue, int enq_prio, Process *p, erts_aint32_t st
return NULL;
-#ifdef ERTS_DIRTY_SCHEDULERS
case ERTS_ENQUEUE_DIRTY_CPU_QUEUE:
case -ERTS_ENQUEUE_DIRTY_CPU_QUEUE:
@@ -6539,7 +6453,6 @@ select_enqueue_run_queue(int enqueue, int enq_prio, Process *p, erts_aint32_t st
return NULL;
-#endif
default: {
ErtsRunQueue* runq;
@@ -6583,7 +6496,6 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
running_flgs = ERTS_PSFLG_DIRTY_RUNNING|ERTS_PSFLG_DIRTY_RUNNING_SYS;
else {
running_flgs = ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS;
-#ifdef ERTS_DIRTY_SCHEDULERS
if (state & ERTS_PSFLG_DIRTY_ACTIVE_SYS
&& (p->flags & (F_DELAY_GC|F_DISABLE_GC))) {
/*
@@ -6601,7 +6513,6 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
~ERTS_PSFLG_DIRTY_ACTIVE_SYS);
state &= ~ERTS_PSFLG_DIRTY_ACTIVE_SYS;
}
-#endif
}
a = state;
@@ -6796,11 +6707,9 @@ change_proc_schedule_state(Process *p,
| ERTS_PSFLG_DIRTY_RUNNING_SYS
| ERTS_PSFLG_IN_RUNQ
| ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE
-#ifdef ERTS_DIRTY_SCHEDULERS
|| (n & (ERTS_PSFLG_RUNNING
| ERTS_PSFLG_RUNNING_SYS
| ERTS_PSFLG_EXITING)) == ERTS_PSFLG_EXITING
-#endif
) {
/*
* Active and seemingly need to be enqueued, but
@@ -7132,7 +7041,6 @@ nrml_sched_ix_resume_wake(Uint ix)
sched_resume_wake__(ERTS_SCHED_SLEEP_INFO_IX(ix));
}
-#ifdef ERTS_DIRTY_SCHEDULERS
static void
dcpu_sched_ix_resume_wake(Uint ix)
@@ -7146,7 +7054,6 @@ dio_sched_ix_resume_wake(Uint ix)
sched_resume_wake__(ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix));
}
-#endif
static erts_aint32_t
sched_prep_spin_suspended(ErtsSchedulerSleepInfo *ssi, erts_aint32_t xpct)
@@ -7231,7 +7138,6 @@ init_scheduler_suspend(void)
schdlr_sspnd.online.normal = 1;
schdlr_sspnd.curr_online.normal = 1;
schdlr_sspnd.active.normal = 1;
-#ifdef ERTS_DIRTY_SCHEDULERS
schdlr_sspnd.online.dirty_cpu = 0;
schdlr_sspnd.curr_online.dirty_cpu = 0;
schdlr_sspnd.active.dirty_cpu = 0;
@@ -7239,7 +7145,6 @@ init_scheduler_suspend(void)
schdlr_sspnd.curr_online.dirty_io = 0;
schdlr_sspnd.active.dirty_io = 0;
schdlr_sspnd.last_msb_dirty_type = ERTS_SCHED_DIRTY_IO;
-#endif
erts_atomic32_init_nob(&schdlr_sspnd.changing, 0);
schdlr_sspnd.chngq = NULL;
schdlr_sspnd.changer = am_false;
@@ -7300,7 +7205,6 @@ schdlr_sspnd_resume_procs(ErtsSchedType sched_type,
}
}
-#ifdef ERTS_DIRTY_SCHEDULERS
static ERTS_INLINE int
have_dirty_work(void)
@@ -7544,7 +7448,6 @@ msb_scheduler_type_switch(ErtsSchedType sched_type,
}
-#endif
static void
suspend_scheduler(ErtsSchedulerData *esdp)
@@ -7572,14 +7475,6 @@ suspend_scheduler(ErtsSchedulerData *esdp)
*/
-#if !defined(ERTS_DIRTY_SCHEDULERS)
-
- sched_type = ERTS_SCHED_NORMAL;
- online_flag = ERTS_SCHDLR_SSPND_CHNG_ONLN;
- no = esdp->no;
- ASSERT(no != 1);
-
-#else
sched_type = esdp->type;
switch (sched_type) {
@@ -7607,7 +7502,6 @@ suspend_scheduler(ErtsSchedulerData *esdp)
/* Suspend and let scheduler 1 of another type execute... */
}
-#endif
if (sched_type != ERTS_SCHED_NORMAL) {
dirty_active(esdp, -1);
@@ -8039,11 +7933,7 @@ erts_set_schedulers_online(Process *p,
erts_aint32_t changing = 0, change_flags;
int online, increase;
ErtsProcList *plp;
-#ifdef ERTS_DIRTY_SCHEDULERS
int dirty_no, change_dirty, dirty_online;
-#else
- ASSERT(!dirty_only);
-#endif
if (new_no < 1)
return ERTS_SCHDLR_SSPND_EINVAL;
@@ -8052,11 +7942,9 @@ erts_set_schedulers_online(Process *p,
else if (erts_no_schedulers < new_no)
return ERTS_SCHDLR_SSPND_EINVAL;
-#ifdef ERTS_DIRTY_SCHEDULERS
if (dirty_only)
resume_proc = 0;
else
-#endif
{
resume_proc = 1;
/*
@@ -8077,9 +7965,7 @@ erts_set_schedulers_online(Process *p,
have_unlocked_plocks = 0;
no = (int) new_no;
-#ifdef ERTS_DIRTY_SCHEDULERS
if (!dirty_only)
-#endif
{
changing = erts_atomic32_read_nob(&schdlr_sspnd.changing);
if (changing & ERTS_SCHDLR_SSPND_CHNG_ONLN) {
@@ -8107,12 +7993,6 @@ erts_set_schedulers_online(Process *p,
*old_no = online = schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
ERTS_SCHED_NORMAL);
-#ifndef ERTS_DIRTY_SCHEDULERS
- if (no == online) {
- res = ERTS_SCHDLR_SSPND_DONE;
- goto done;
- }
-#else /* ERTS_DIRTY_SCHEDULERS */
dirty_online = schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
ERTS_SCHED_DIRTY_CPU);
if (dirty_only)
@@ -8164,7 +8044,6 @@ erts_set_schedulers_online(Process *p,
if (dirty_only)
increase = (dirty_no > dirty_online);
else
-#endif /* ERTS_DIRTY_SCHEDULERS */
{
change_flags |= ERTS_SCHDLR_SSPND_CHNG_ONLN;
schdlr_sspnd_set_nscheds(&schdlr_sspnd.online,
@@ -8178,7 +8057,6 @@ erts_set_schedulers_online(Process *p,
res = ERTS_SCHDLR_SSPND_DONE;
if (increase) {
int ix;
-#ifdef ERTS_DIRTY_SCHEDULERS
if (change_dirty) {
ErtsSchedulerSleepInfo* ssi;
if (schdlr_sspnd.msb.ongoing) {
@@ -8192,7 +8070,6 @@ erts_set_schedulers_online(Process *p,
}
}
if (!dirty_only)
-#endif
{
if (schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing) {
for (ix = online; ix < no; ix++)
@@ -8214,7 +8091,6 @@ erts_set_schedulers_online(Process *p,
}
}
else /* if decrease */ {
-#ifdef ERTS_DIRTY_SCHEDULERS
if (change_dirty) {
if (schdlr_sspnd.msb.ongoing) {
for (ix = dirty_no; ix < dirty_online; ix++)
@@ -8232,7 +8108,6 @@ erts_set_schedulers_online(Process *p,
}
}
if (!dirty_only)
-#endif
{
if (schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing) {
for (ix = no; ix < online; ix++)
@@ -8370,7 +8245,6 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
wake_scheduler(rq);
}
-#ifdef ERTS_DIRTY_SCHEDULERS
if (!normal) {
ERTS_RUNQ_FLGS_SET_NOB(ERTS_RUNQ_IX(0), ERTS_RUNQ_FLG_MSB_EXEC);
erts_atomic32_read_bor_nob(&ERTS_RUNQ_IX(0)->scheduler->ssi->flags,
@@ -8380,7 +8254,6 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++)
dio_sched_ix_suspend_wake(ix);
}
-#endif
wait_until_msb:
@@ -8446,7 +8319,6 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
for (ix = online; ix < erts_no_run_queues; ix++)
suspend_run_queue(ERTS_RUNQ_IX(ix));
}
-#ifdef ERTS_DIRTY_SCHEDULERS
if (!schdlr_sspnd.msb.ongoing) {
/* Get rid of msb-exec flag in run-queue of scheduler 1 */
resume_run_queue(ERTS_RUNQ_IX(0));
@@ -8457,7 +8329,6 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++)
dio_sched_ix_resume_wake(ix);
}
-#endif
}
unblock_res:
@@ -8609,7 +8480,6 @@ sched_thread_func(void *vesdp)
return NULL;
}
-#ifdef ERTS_DIRTY_SCHEDULERS
static void*
sched_dirty_cpu_thread_func(void *vesdp)
{
@@ -8701,7 +8571,6 @@ sched_dirty_io_thread_func(void *vesdp)
no);
return NULL;
}
-#endif
static ethr_tid aux_tid;
@@ -8757,7 +8626,6 @@ erts_start_schedulers(void)
}
erts_no_schedulers = actual;
-#ifdef ERTS_DIRTY_SCHEDULERS
{
int ix;
for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) {
@@ -8777,7 +8645,6 @@ erts_start_schedulers(void)
erts_exit(ERTS_ERROR_EXIT, "Failed to create dirty io scheduler thread %d\n", ix);
}
}
-#endif
ERTS_THR_MEMORY_BARRIER;
@@ -8967,7 +8834,6 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
ASSERT((ERTS_PSFLG_RUNNING | ERTS_PSFLG_DIRTY_RUNNING)
& erts_atomic32_read_nob(&rp->state));
-#ifdef ERTS_DIRTY_SCHEDULERS
if (!suspend
&& (erts_atomic32_read_nob(&rp->state)
& ERTS_PSFLG_DIRTY_RUNNING)) {
@@ -8979,7 +8845,6 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
}
goto done;
}
-#endif
running:
@@ -9495,7 +9360,6 @@ erts_internal_is_process_executing_dirty_1(BIF_ALIST_1)
{
if (is_not_internal_pid(BIF_ARG_1))
BIF_ERROR(BIF_P, BADARG);
-#ifdef ERTS_DIRTY_SCHEDULERS
else {
Process *rp = erts_proc_lookup(BIF_ARG_1);
if (rp) {
@@ -9506,7 +9370,6 @@ erts_internal_is_process_executing_dirty_1(BIF_ALIST_1)
}
}
}
-#endif
BIF_RET(am_false);
}
@@ -9522,7 +9385,6 @@ run_queues_len_aux(ErtsRunQueue *rq, Uint *tot_len, Uint *qlen, int *ip, int inc
ASSERT(rq_len >= 0);
if (incl_active_sched) {
-#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) {
erts_aint32_t dcnt;
if (ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(rq)) {
@@ -9537,7 +9399,6 @@ run_queues_len_aux(ErtsRunQueue *rq, Uint *tot_len, Uint *qlen, int *ip, int inc
rq_len += (Sint) dcnt;
}
else
-#endif
{
if (ERTS_RUNQ_FLGS_GET_NOB(rq) & ERTS_RUNQ_FLG_EXEC)
rq_len++;
@@ -9556,12 +9417,10 @@ erts_run_queues_len(Uint *qlen, int atomic_queues_read, int incl_active_sched,
Uint len = 0;
int no_rqs = erts_no_run_queues;
-#ifdef ERTS_DIRTY_SCHEDULERS
if (incl_dirty_io)
no_rqs += ERTS_NUM_DIRTY_RUNQS;
else
no_rqs += ERTS_NUM_DIRTY_CPU_RUNQS;
-#endif
if (atomic_queues_read) {
ERTS_ATOMIC_FOREACH_RUNQ_X(rq, no_rqs,
@@ -9895,7 +9754,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
* Clean up after the process being scheduled out.
*/
if (!p) { /* NULL in the very first schedule() call */
-#ifdef ERTS_DIRTY_SCHEDULERS
is_normal_sched = !esdp;
if (is_normal_sched) {
esdp = erts_get_scheduler_data();
@@ -9904,17 +9762,12 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
else {
ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp));
}
-#else
- esdp = erts_get_scheduler_data();
- is_normal_sched = 1;
-#endif
rq = erts_get_runq_current(esdp);
ASSERT(esdp);
fcalls = (int) erts_atomic32_read_acqb(&function_calls);
actual_reds = reds = 0;
erts_runq_lock(rq);
} else {
-#ifdef ERTS_DIRTY_SCHEDULERS
is_normal_sched = !esdp;
if (is_normal_sched) {
esdp = p->scheduler_data;
@@ -9923,10 +9776,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
else {
ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp));
}
-#else
- esdp = p->scheduler_data;
- is_normal_sched = 1;
-#endif
ASSERT(esdp->current_process == p
|| esdp->free_process == p);
@@ -10152,7 +10001,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
*/
flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
if ((flags & ERTS_RUNQ_FLG_SUSPENDED)
-#ifdef ERTS_DIRTY_SCHEDULERS
/* If multi scheduling block and we have
* dirty work, suspend and let dirty
* scheduler handle work... */
@@ -10160,7 +10008,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
| ERTS_RUNQ_FLG_MSB_EXEC))
== ERTS_RUNQ_FLG_MSB_EXEC))
&& have_dirty_work())
-#endif
) {
non_empty_runq(rq);
flags |= ERTS_RUNQ_FLG_NONEMPTY;
@@ -10319,10 +10166,8 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
state = erts_atomic32_read_nob(&p->state);
}
-#ifdef ERTS_DIRTY_SCHEDULERS
if (!is_normal_sched)
clear_proc_dirty_queue_bit(p, rq, qbit);
-#endif
while (1) {
erts_aint32_t exp, new;
@@ -10340,7 +10185,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
| ERTS_PSFLG_DIRTY_RUNNING
| ERTS_PSFLG_DIRTY_RUNNING_SYS
| ERTS_PSFLG_FREE)))
-#ifdef ERTS_DIRTY_SCHEDULERS
| (((state & (ERTS_PSFLG_RUNNING
| ERTS_PSFLG_FREE
@@ -10349,7 +10193,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
| ERTS_PSFLG_EXITING))
== ERTS_PSFLG_EXITING)
& (!!is_normal_sched))
-#endif
)
& ((state & (ERTS_PSFLG_SUSPENDED
| ERTS_PSFLG_EXITING
@@ -10358,11 +10201,9 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
| ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_DIRTY_ACTIVE_SYS))
!= ERTS_PSFLG_SUSPENDED)
-#ifdef ERTS_DIRTY_SCHEDULERS
& (!(state & (ERTS_PSFLG_EXITING
| ERTS_PSFLG_PENDING_EXIT))
| (!!is_normal_sched))
-#endif
);
if (run_process) {
@@ -10436,10 +10277,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
state = erts_atomic32_read_nob(&p->state);
-#ifndef ERTS_DIRTY_SCHEDULERS
- ASSERT(!p->scheduler_data);
- p->scheduler_data = esdp;
-#else /* ERTS_DIRTY_SCHEDULERS */
if (is_normal_sched) {
if ((!!(state & ERTS_PSFLGS_DIRTY_WORK))
& (!(state & ERTS_PSFLG_ACTIVE_SYS))) {
@@ -10475,7 +10312,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
: (rq == ERTS_DIRTY_IO_RUNQ
&& (state & ERTS_PSFLG_DIRTY_IO_PROC)));
}
-#endif
if (state & ERTS_PSFLG_PENDING_EXIT) {
erts_handle_pending_exit(p,
@@ -10520,10 +10356,8 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
reds -= cost;
if (reds <= 0)
goto sched_out_proc;
-#ifdef ERTS_DIRTY_SCHEDULERS
if (state & ERTS_PSFLGS_DIRTY_WORK)
goto sched_out_proc;
-#endif
}
ASSERT(state & psflg_running_sys);
@@ -10561,10 +10395,8 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
reds -= cost;
if (reds <= 0)
goto sched_out_proc;
-#ifdef ERTS_DIRTY_SCHEDULERS
if (p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC))
goto sched_out_proc;
-#endif
}
}
}
@@ -10608,13 +10440,11 @@ notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st,
Eterm st_result, int normal_sched)
{
Process *rp;
-#ifdef ERTS_DIRTY_SCHEDULERS
if (!normal_sched)
rp = erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN,
st->requester, 0,
ERTS_P2P_FLG_INC_REFC);
else
-#endif
rp = erts_proc_lookup(st->requester);
if (rp) {
ErtsProcLocks rp_locks;
@@ -10664,10 +10494,8 @@ notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st,
if (rp_locks)
erts_proc_unlock(rp, rp_locks);
-#ifdef ERTS_DIRTY_SCHEDULERS
if (!normal_sched)
erts_proc_dec_refc(rp);
-#endif
}
erts_cleanup_offheap(&st->off_heap);
@@ -10823,9 +10651,7 @@ done:
}
static void save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio);
-#ifdef ERTS_DIRTY_SCHEDULERS
static void save_dirty_task(Process *c_p, ErtsProcSysTask *st);
-#endif
static int
execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
@@ -10873,13 +10699,11 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
FLAGS(c_p) |= F_NEED_FULLSWEEP;
}
reds -= scheduler_gc_proc(c_p, reds);
-#ifdef ERTS_DIRTY_SCHEDULERS
if (c_p->flags & (F_DIRTY_MAJOR_GC|F_DIRTY_MINOR_GC)) {
save_dirty_task(c_p, st);
st = NULL;
break;
}
-#endif
if (type == ERTS_PSTT_GC_MAJOR)
minor_gc = major_gc = 1;
else
@@ -10921,13 +10745,11 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
fcalls, do_gc);
reds -= cla_reds;
if (is_non_value(st_res)) {
-#ifdef ERTS_DIRTY_SCHEDULERS
if (c_p->flags & F_DIRTY_CLA) {
save_dirty_task(c_p, st);
st = NULL;
break;
}
-#endif
/* Needed gc, but gc was disabled */
save_gc_task(c_p, st, st_prio);
st = NULL;
@@ -10985,13 +10807,11 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds)
Eterm st_res;
int st_prio;
-#ifdef ERTS_DIRTY_SCHEDULERS
if (c_p->dirty_sys_tasks) {
st = c_p->dirty_sys_tasks;
c_p->dirty_sys_tasks = st->next;
}
else
-#endif
{
st = fetch_sys_task(c_p, state, &qmask, &st_prio);
if (!st)
@@ -11027,7 +10847,6 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds)
return reds;
}
-#ifdef ERTS_DIRTY_SCHEDULERS
void
erts_execute_dirty_system_task(Process *c_p)
@@ -11170,7 +10989,6 @@ dispatch_system_task(Process *c_p, erts_aint_t fail_state,
return ret;
}
-#endif
static BIF_RETTYPE
request_system_task(Process *c_p, Eterm requester, Eterm target,
@@ -11275,7 +11093,6 @@ request_system_task(Process *c_p, Eterm requester, Eterm target,
st->type = ERTS_PSTT_CPC;
if (!rp)
goto noproc;
-#ifdef ERTS_DIRTY_SCHEDULERS
/*
* If the process should start executing dirty
* code it is important that this task is
@@ -11283,7 +11100,6 @@ request_system_task(Process *c_p, Eterm requester, Eterm target,
*/
fail_state |= (ERTS_PSFLG_DIRTY_RUNNING
| ERTS_PSFLG_DIRTY_RUNNING_SYS);
-#endif
break;
case am_copy_literals:
@@ -11305,14 +11121,12 @@ request_system_task(Process *c_p, Eterm requester, Eterm target,
noproc:
failure = noproc_res;
}
-#ifdef ERTS_DIRTY_SCHEDULERS
else if (fail_state & (ERTS_PSFLG_DIRTY_RUNNING
| ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
ret = dispatch_system_task(c_p, fail_state, st,
target, priority, operation);
goto cleanup_return;
}
-#endif
else {
ERTS_INTERNAL_ERROR("Unknown failure schedule_process_sys_task()");
failure = am_internal_error;
@@ -11328,9 +11142,7 @@ badarg:
ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
-#ifdef ERTS_DIRTY_SCHEDULERS
cleanup_return:
-#endif
if (st) {
erts_cleanup_offheap(&st->off_heap);
@@ -11394,7 +11206,6 @@ erts_schedule_ets_free_fixation(Eterm pid, DbFixation* fix)
erts_schedule_generic_sys_task(pid, ERTS_PSTT_ETS_FREE_FIXATION, fix);
}
-#ifdef ERTS_DIRTY_SCHEDULERS
static void
flush_dirty_trace_messages(void *vpid)
@@ -11415,7 +11226,6 @@ flush_dirty_trace_messages(void *vpid)
}
}
-#endif /* ERTS_DIRTY_SCHEDULERS */
void
erts_schedule_flush_trace_messages(Process *proc, int force_on_proc)
@@ -11423,7 +11233,6 @@ erts_schedule_flush_trace_messages(Process *proc, int force_on_proc)
ErtsThrPrgrDelayHandle dhndl;
Eterm pid = proc->common.id;
-#ifdef ERTS_DIRTY_SCHEDULERS
erts_aint32_t state;
if (!force_on_proc) {
@@ -11433,7 +11242,6 @@ erts_schedule_flush_trace_messages(Process *proc, int force_on_proc)
goto sched_flush_dirty;
}
}
-#endif
dhndl = erts_thr_progress_unmanaged_delay();
@@ -11441,7 +11249,6 @@ erts_schedule_flush_trace_messages(Process *proc, int force_on_proc)
erts_thr_progress_unmanaged_continue(dhndl);
-#ifdef ERTS_DIRTY_SCHEDULERS
if (!force_on_proc) {
state = erts_atomic32_read_mb(&proc->state);
if (state & (ERTS_PSFLG_DIRTY_RUNNING
@@ -11471,7 +11278,6 @@ erts_schedule_flush_trace_messages(Process *proc, int force_on_proc)
erts_schedule_misc_aux_work(1, flush_dirty_trace_messages, vargp);
}
}
-#endif
}
static void
@@ -11532,14 +11338,12 @@ save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio)
}
}
-#ifdef ERTS_DIRTY_SCHEDULERS
static void
save_dirty_task(Process *c_p, ErtsProcSysTask *st)
{
st->next = c_p->dirty_sys_tasks;
c_p->dirty_sys_tasks = st;
}
-#endif
int
erts_set_gc_state(Process *c_p, int enable)
@@ -11876,10 +11680,8 @@ static void early_init_process_struct(void *varg, Eterm data)
Process *proc = arg->proc;
proc->common.id = make_internal_pid(data);
-#ifdef ERTS_DIRTY_SCHEDULERS
erts_atomic32_init_nob(&proc->dirty_state, 0);
proc->dirty_sys_tasks = NULL;
-#endif
erts_atomic32_init_relb(&proc->state, arg->state);
RUNQ_SET_RQ(&proc->run_queue, arg->run_queue);
@@ -12369,10 +12171,8 @@ void erts_init_empty_process(Process *p)
p->last_old_htop = NULL;
#endif
-#ifdef ERTS_DIRTY_SCHEDULERS
erts_atomic32_init_nob(&p->dirty_state, 0);
p->dirty_sys_tasks = NULL;
-#endif
erts_atomic32_init_nob(&p->state, (erts_aint32_t) PRIORITY_NORMAL);
p->scheduler_data = NULL;
@@ -12943,14 +12743,12 @@ send_exit_signal(Process *c_p, /* current process if and only
Eterm *hp;
ErlOffHeap *ohp;
Uint rsn_sz = size_object(rsn);
-#ifdef ERTS_DIRTY_SCHEDULERS
if (state & ERTS_PSFLG_DIRTY_RUNNING) {
bp = new_message_buffer(rsn_sz);
ohp = &bp->off_heap;
hp = &bp->mem[0];
}
else
-#endif
{
hp = HAlloc(rp, rsn_sz);
ohp = &rp->off_heap;
@@ -12990,10 +12788,6 @@ send_exit_signal(Process *c_p, /* current process if and only
* has been scheduled, we may need to add it to a normal run
* queue...
*/
-#ifndef ERTS_DIRTY_SCHEDULERS
- (void) erts_atomic32_read_bor_relb(&rp->state,
- ERTS_PSFLG_PENDING_EXIT);
-#else
{
erts_aint32_t a = erts_atomic32_read_nob(&rp->state);
while (1) {
@@ -13011,7 +12805,6 @@ send_exit_signal(Process *c_p, /* current process if and only
}
}
}
-#endif
}
}
/* else:
@@ -13508,9 +13301,7 @@ erts_continue_exit_process(Process *p)
erts_set_gc_state(p, 1);
state = erts_atomic32_read_acqb(&p->state);
if (state & ERTS_PSFLG_ACTIVE_SYS
-#ifdef ERTS_DIRTY_SCHEDULERS
|| p->dirty_sys_tasks
-#endif
) {
if (cleanup_sys_tasks(p, state, CONTEXT_REDS) >= CONTEXT_REDS/2)
goto yield;
@@ -13520,9 +13311,7 @@ erts_continue_exit_process(Process *p)
erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
ASSERT(p->sys_task_qs == NULL);
ASSERT(ERTS_PROC_GET_DELAYED_GC_TASK_QS(p) == NULL);
-#ifdef ERTS_DIRTY_SCHEDULERS
ASSERT(p->dirty_sys_tasks == NULL);
-#endif
erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
#endif
@@ -13619,7 +13408,6 @@ erts_continue_exit_process(Process *p)
break;
}
-#ifdef ERTS_DIRTY_SCHEDULERS
if (a & (ERTS_PSFLG_DIRTY_RUNNING
| ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
p->flags |= F_DELAYED_DEL_PROC;
@@ -13629,7 +13417,6 @@ erts_continue_exit_process(Process *p)
* when done with the process...
*/
}
-#endif
if (refc_inced && !(n & ERTS_PSFLG_IN_RUNQ))
erts_proc_dec_refc(p);
@@ -14022,10 +13809,8 @@ void erts_halt(int code)
if (-1 == erts_atomic32_cmpxchg_acqb(&erts_halt_progress,
erts_no_schedulers,
-1)) {
-#ifdef ERTS_DIRTY_SCHEDULERS
ERTS_RUNQ_FLGS_SET(ERTS_DIRTY_CPU_RUNQ, ERTS_RUNQ_FLG_HALTING);
ERTS_RUNQ_FLGS_SET(ERTS_DIRTY_IO_RUNQ, ERTS_RUNQ_FLG_HALTING);
-#endif
erts_halt_code = code;
notify_reap_ports_relb();
}
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 150f22556c..5afe0acb7b 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -110,16 +110,12 @@ extern int erts_sched_compact_load;
extern int erts_sched_balance_util;
extern Uint erts_no_schedulers;
extern Uint erts_no_total_schedulers;
-#ifdef ERTS_DIRTY_SCHEDULERS
extern Uint erts_no_dirty_cpu_schedulers;
extern Uint erts_no_dirty_io_schedulers;
-#endif
extern Uint erts_no_run_queues;
extern int erts_sched_thread_suggested_stack_size;
-#ifdef ERTS_DIRTY_SCHEDULERS
extern int erts_dcpu_sched_thread_suggested_stack_size;
extern int erts_dio_sched_thread_suggested_stack_size;
-#endif
#define ERTS_SCHED_THREAD_MIN_STACK_SIZE 20 /* Kilo words */
#define ERTS_SCHED_THREAD_MAX_STACK_SIZE 8192 /* Kilo words */
@@ -362,12 +358,10 @@ typedef enum {
typedef struct ErtsSchedulerSleepInfo_ ErtsSchedulerSleepInfo;
-#ifdef ERTS_DIRTY_SCHEDULERS
typedef struct {
erts_spinlock_t lock;
ErtsSchedulerSleepInfo *list;
} ErtsSchedulerSleepList;
-#endif
struct ErtsSchedulerSleepInfo_ {
ErtsSchedulerSleepInfo *next;
@@ -477,9 +471,7 @@ struct ErtsRunQueue_ {
erts_mtx_t mtx;
erts_cnd_t cnd;
-#ifdef ERTS_DIRTY_SCHEDULERS
ErtsSchedulerSleepList sleepers;
-#endif
ErtsSchedulerData *scheduler;
int waiting; /* < 0 in sys schedule; > 0 on cnd variable */
@@ -616,13 +608,11 @@ typedef struct {
(&(ESDP)->aux_work_data.yield.NAME)
void erts_notify_new_aux_yield_work(ErtsSchedulerData *esdp);
-#ifdef ERTS_DIRTY_SCHEDULERS
typedef enum {
ERTS_DIRTY_CPU_SCHEDULER,
ERTS_DIRTY_IO_SCHEDULER
} ErtsDirtySchedulerType;
-#endif
struct ErtsSchedulerData_ {
/*
@@ -645,10 +635,8 @@ struct ErtsSchedulerData_ {
Process *current_process;
ErtsSchedType type;
Uint no; /* Scheduler number for normal schedulers */
-#ifdef ERTS_DIRTY_SCHEDULERS
Uint dirty_no; /* Scheduler number for dirty schedulers */
Process *dirty_shadow_process;
-#endif
Port *current_port;
ErtsRunQueue *run_queue;
int virtual_reds;
@@ -687,10 +675,8 @@ typedef union {
} ErtsAlignedSchedulerData;
extern ErtsAlignedSchedulerData *erts_aligned_scheduler_data;
-#ifdef ERTS_DIRTY_SCHEDULERS
extern ErtsAlignedSchedulerData *erts_aligned_dirty_cpu_scheduler_data;
extern ErtsAlignedSchedulerData *erts_aligned_dirty_io_scheduler_data;
-#endif
#if defined(ERTS_ENABLE_LOCK_CHECK)
@@ -1058,14 +1044,10 @@ struct process {
Uint64 bin_old_vheap; /* Virtual old heap size for binaries */
ErtsProcSysTaskQs *sys_task_qs;
-#ifdef ERTS_DIRTY_SCHEDULERS
ErtsProcSysTask *dirty_sys_tasks;
-#endif
erts_atomic32_t state; /* Process state flags (see ERTS_PSFLG_*) */
-#ifdef ERTS_DIRTY_SCHEDULERS
erts_atomic32_t dirty_state; /* Process dirty state flags (see ERTS_PDSFLG_*) */
-#endif
ErlMessageInQueue msg_inq;
ErlTraceMessageQueue *trace_msg_q;
@@ -1222,7 +1204,6 @@ void erts_check_for_holes(Process* p);
#define ERTS_PSFLGS_GET_PRQ_PRIO(PSFLGS) \
(((PSFLGS) >> ERTS_PSFLGS_PRQ_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK)
-#ifdef ERTS_DIRTY_SCHEDULERS
/*
* Flags in the dirty_state field.
@@ -1249,7 +1230,6 @@ void erts_check_for_holes(Process* p);
| ERTS_PDSFLG_IN_CPU_PRQ_HIGH \
| ERTS_PDSFLG_IN_CPU_PRQ_NORMAL\
| ERTS_PDSFLG_IN_CPU_PRQ_LOW)
-#endif
/*
@@ -1514,20 +1494,14 @@ extern int erts_system_profile_ts_type;
} \
} while (0)
-#if defined(ERTS_DIRTY_SCHEDULERS)
#define ERTS_NUM_DIRTY_CPU_RUNQS 1
#define ERTS_NUM_DIRTY_IO_RUNQS 1
-#else
-#define ERTS_NUM_DIRTY_CPU_RUNQS 0
-#define ERTS_NUM_DIRTY_IO_RUNQS 0
-#endif
#define ERTS_NUM_DIRTY_RUNQS (ERTS_NUM_DIRTY_CPU_RUNQS+ERTS_NUM_DIRTY_IO_RUNQS)
#define ERTS_RUNQ_IX(IX) \
(ASSERT(0 <= (IX) && (IX) < erts_no_run_queues+ERTS_NUM_DIRTY_RUNQS), \
&erts_aligned_run_queues[(IX)].runq)
-#ifdef ERTS_DIRTY_SCHEDULERS
#define ERTS_RUNQ_IX_IS_DIRTY(IX) \
(ASSERT(0 <= (IX) && (IX) < erts_no_run_queues+ERTS_NUM_DIRTY_RUNQS), \
(erts_no_run_queues <= (IX)))
@@ -1538,13 +1512,9 @@ extern int erts_system_profile_ts_type;
#define ERTS_DIRTY_IO_RUNQ (&erts_aligned_run_queues[erts_no_run_queues+1].runq)
#define ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(RQ) ((RQ) == ERTS_DIRTY_CPU_RUNQ)
#define ERTS_RUNQ_IS_DIRTY_IO_RUNQ(RQ) ((RQ) == ERTS_DIRTY_IO_RUNQ)
-#else
-#define ERTS_RUNQ_IX_IS_DIRTY(IX) 0
-#endif
#define ERTS_SCHEDULER_IX(IX) \
(ASSERT(0 <= (IX) && (IX) < erts_no_schedulers), \
&erts_aligned_scheduler_data[(IX)].esd)
-#ifdef ERTS_DIRTY_SCHEDULERS
#define ERTS_DIRTY_CPU_SCHEDULER_IX(IX) \
(ASSERT(0 <= (IX) && (IX) < erts_no_dirty_cpu_schedulers), \
&erts_aligned_dirty_cpu_scheduler_data[(IX)].esd)
@@ -1557,24 +1527,14 @@ extern int erts_system_profile_ts_type;
((ESDP)->type == ERTS_SCHED_DIRTY_CPU)
#define ERTS_SCHEDULER_IS_DIRTY_IO(ESDP) \
((ESDP)->type == ERTS_SCHED_DIRTY_IO)
-#else /* !ERTS_DIRTY_SCHEDULERS */
-#define ERTS_RUNQ_IX_IS_DIRTY(IX) 0
-#define ERTS_SCHEDULER_IS_DIRTY(ESDP) 0
-#define ERTS_SCHEDULER_IS_DIRTY_CPU(ESDP) 0
-#define ERTS_SCHEDULER_IS_DIRTY_IO(ESDP) 0
-#endif
void erts_pre_init_process(void);
void erts_late_init_process(void);
void erts_early_init_scheduling(int);
void erts_init_scheduling(int, int
-#ifdef ERTS_DIRTY_SCHEDULERS
, int, int, int
-#endif
);
-#ifdef ERTS_DIRTY_SCHEDULERS
void erts_execute_dirty_system_task(Process *c_p);
-#endif
int erts_set_gc_state(Process *c_p, int enable);
Eterm erts_sched_wall_time_request(Process *c_p, int set, int enable,
int dirty_cpu, int want_dirty_io);
@@ -2241,7 +2201,6 @@ ErtsSchedulerData *erts_proc_sched_data(Process *c_p)
ErtsSchedulerData *esdp;
ASSERT(c_p);
esdp = c_p->scheduler_data;
-# if defined(ERTS_DIRTY_SCHEDULERS)
if (esdp) {
ASSERT(esdp == erts_get_scheduler_data());
ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
@@ -2251,7 +2210,6 @@ ErtsSchedulerData *erts_proc_sched_data(Process *c_p)
ASSERT(esdp);
ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp));
}
-# endif
ASSERT(esdp);
return esdp;
}
@@ -2283,11 +2241,9 @@ ERTS_GLB_INLINE
Uint erts_get_scheduler_id(void)
{
ErtsSchedulerData *esdp = erts_get_scheduler_data();
-#ifdef ERTS_DIRTY_SCHEDULERS
if (esdp && ERTS_SCHEDULER_IS_DIRTY(esdp))
return 0;
else
-#endif
return esdp ? esdp->no : (Uint) 0;
}
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index c72ae32f2c..2b0ad0b98a 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -128,10 +128,8 @@ extern Eterm erts_nif_call_function(Process *p, Process *tracee,
struct enif_func_t *,
int argc, Eterm *argv);
-#ifdef ERTS_DIRTY_SCHEDULERS
int erts_call_dirty_nif(ErtsSchedulerData *esdp, Process *c_p,
BeamInstr *I, Eterm *reg);
-#endif /* ERTS_DIRTY_SCHEDULERS */
/* Driver handle (wrapper for old plain handle) */
@@ -911,9 +909,7 @@ extern erts_atomic_t erts_copy_literal_area__;
#define ERTS_COPY_LITERAL_AREA() \
((ErtsLiteralArea *) erts_atomic_read_nob(&erts_copy_literal_area__))
extern Process *erts_literal_area_collector;
-#ifdef ERTS_DIRTY_SCHEDULERS
extern Process *erts_dirty_process_code_checker;
-#endif
extern Process *erts_code_purger;
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index 3f44d6e242..bc1b9b6ef4 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -7484,11 +7484,7 @@ driver_system_info(ErlDrvSysInfo *sip, size_t si_size)
*/
if (si_size >= ERL_DRV_SYS_INFO_SIZE(dirty_scheduler_support)) {
sip->dirty_scheduler_support =
-#ifdef ERTS_DIRTY_SCHEDULERS
1
-#else
- 0
-#endif
;
}
diff --git a/erts/emulator/utils/make_tables b/erts/emulator/utils/make_tables
index 47e1528958..094a35ae4b 100755
--- a/erts/emulator/utils/make_tables
+++ b/erts/emulator/utils/make_tables
@@ -59,7 +59,6 @@ my %dirty_bif_tab;
my @bif;
my @bif_info;
-my $dirty_schedulers = 'no';
my $dirty_schedulers_test = 'no';
my $hipe = 'no';
@@ -73,10 +72,6 @@ while (@ARGV && $ARGV[0] =~ /^-(\w+)/) {
$include = shift;
die "No directory for -include argument specified"
unless defined $include;
- } elsif($opt eq '-ds') {
- $dirty_schedulers = shift;
- die "No -ds argument specified"
- unless defined $dirty_schedulers;
} elsif($opt eq '-dst') {
$dirty_schedulers_test = shift;
die "No -dst argument specified"
@@ -140,21 +135,19 @@ while (<>) {
push(@bif_info, [$type, $sched_type, $alias3, $alias]);
} elsif ($type eq 'dirty-cpu' or $type eq 'dirty-io'
or $type eq 'dirty-cpu-test' or $type eq 'dirty-io-test') {
- if ($dirty_schedulers eq 'yes') {
- my($bif,$other) = (@args);
- $bif =~ m@^([a-z_.'0-9]+):(.*)/(\d)$@ or error("invalid BIF");
- my($mod,$name,$arity) = ($1,$2,$3);
- my $mfa = "$mod:$name/$arity";
- if (($type eq 'dirty-cpu')
- or (($dirty_schedulers_test eq 'yes')
- and ($type eq 'dirty-cpu-test'))) {
- $dirty_bif_tab{$mfa} = 'dirty_cpu';
- } elsif (($type eq 'dirty-io')
- or (($dirty_schedulers_test eq 'yes')
- and ($type eq 'dirty-io-test'))) {
- $dirty_bif_tab{$mfa} = 'dirty_io';
- }
- }
+ my($bif,$other) = (@args);
+ $bif =~ m@^([a-z_.'0-9]+):(.*)/(\d)$@ or error("invalid BIF");
+ my($mod,$name,$arity) = ($1,$2,$3);
+ my $mfa = "$mod:$name/$arity";
+ if (($type eq 'dirty-cpu')
+ or (($dirty_schedulers_test eq 'yes')
+ and ($type eq 'dirty-cpu-test'))) {
+ $dirty_bif_tab{$mfa} = 'dirty_cpu';
+ } elsif (($type eq 'dirty-io')
+ or (($dirty_schedulers_test eq 'yes')
+ and ($type eq 'dirty-io-test'))) {
+ $dirty_bif_tab{$mfa} = 'dirty_io';
+ }
} else {
error("invalid line");
}