aboutsummaryrefslogtreecommitdiffstats
path: root/erts
diff options
context:
space:
mode:
authorLukas Larsson <[email protected]>2019-01-07 14:38:48 +0100
committerLukas Larsson <[email protected]>2019-01-18 14:59:04 +0100
commitadec898daf9223484f5900f7d247cbaff6f4a11b (patch)
tree62ec41d2dbec4c964ca5fbb3d10b99a74bd3aefe /erts
parentbe39c49819ea2031f7d11647f34a3519198932e8 (diff)
downloadotp-adec898daf9223484f5900f7d247cbaff6f4a11b.tar.gz
otp-adec898daf9223484f5900f7d247cbaff6f4a11b.tar.bz2
otp-adec898daf9223484f5900f7d247cbaff6f4a11b.zip
erts: Use reduction based polling for starved poll-set
When the schedulers never go to sleep (and thus never polls) it may be that the fds in schedulers poll-sets are never polled. Before this commit, this was solved by starting a timer when an overload was detected. This had issues as overloads were not always detected in time. So this commit reverts to the pre OTP-21 behaviour so keep a global counter makes that the poll is called when it should.
Diffstat (limited to 'erts')
-rw-r--r--erts/emulator/beam/erl_process.c86
-rw-r--r--erts/emulator/beam/erl_process.h2
2 files changed, 37 insertions, 51 deletions
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 2427d87f66..73c99f7fb0 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -344,6 +344,7 @@ erts_sched_stat_t erts_sched_stat;
static erts_tsd_key_t ERTS_WRITE_UNLIKELY(sched_data_key);
#if ERTS_POLL_USE_SCHEDULER_POLLING
+static erts_atomic32_t function_calls;
static erts_atomic32_t doing_sys_schedule;
#endif
static erts_atomic32_t no_empty_run_queues;
@@ -3251,6 +3252,7 @@ poll_thread(void *arg)
static ERTS_INLINE void
clear_sys_scheduling(void)
{
+ erts_atomic32_set_relb(&function_calls, 0);
erts_atomic32_set_mb(&doing_sys_schedule, 0);
}
@@ -3273,28 +3275,6 @@ prepare_for_sys_schedule(void)
return 0;
}
-static void
-check_io_timer(void *null)
-{
- ErtsSchedulerData *esdp = erts_get_scheduler_data();
- if (prepare_for_sys_schedule()) {
- erts_check_io(esdp->ssi->psi, ERTS_POLL_NO_TIMEOUT);
- clear_sys_scheduling();
- }
-
- /* The timer is cleared if this schedulers run-queue became empty
- or if the CHECKIO flag was cleared. The CHECKIO flags is cleared
- when a check_balance assigns another scheduler to be the poller in
- the overload scenario. */
- if ((ERTS_RUNQ_FLGS_GET_NOB(esdp->run_queue) & (ERTS_RUNQ_FLG_OUT_OF_WORK|ERTS_RUNQ_FLG_CHECKIO))
- == ERTS_RUNQ_FLG_CHECKIO) {
- erts_start_timer_callback(ERTS_POLL_SCHEDULER_POLLING_TIMEOUT,
- check_io_timer, NULL);
- } else {
- ERTS_RUNQ_FLGS_UNSET(esdp->run_queue, ERTS_RUNQ_FLG_CHECKIO);
- }
-}
-
#else
#define clear_sys_scheduling()
#define prepare_for_sys_schedule() 0
@@ -3406,6 +3386,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
current_time = erts_get_monotonic_time(esdp);
}
}
+ *fcalls = 0;
clear_sys_scheduling();
} else {
if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
@@ -4661,15 +4642,6 @@ check_balance(ErtsRunQueue *c_rq)
if (blnc_no_rqs == 1) {
c_rq->check_balance_reds = INT_MAX;
erts_atomic32_set_nob(&balance_info.checking_balance, 0);
-#if ERTS_POLL_USE_SCHEDULER_POLLING
- c_rq->check_balance_reds = ERTS_RUNQ_CALL_CHECK_BALANCE_REDS;
- if ((ERTS_RUNQ_FLGS_GET_NOB(c_rq) & (ERTS_RUNQ_FLG_OUT_OF_WORK|ERTS_RUNQ_FLG_CHECKIO))
- == 0) {
- ERTS_RUNQ_FLGS_SET(c_rq, ERTS_RUNQ_FLG_CHECKIO);
- erts_start_timer_callback(ERTS_POLL_SCHEDULER_POLLING_TIMEOUT, check_io_timer, NULL);
- }
- ERTS_RUNQ_FLGS_UNSET(c_rq, ERTS_RUNQ_FLGS_MIGRATION_INFO);
-#endif
return;
}
@@ -5189,19 +5161,6 @@ erts_fprintf(stderr, "--------------------------------\n");
/* Publish new migration paths... */
erts_atomic_set_wb(&erts_migration_paths, (erts_aint_t) new_mpaths);
-#if ERTS_POLL_USE_SCHEDULER_POLLING
- if (full_scheds == current_active) {
- ERTS_ASSERT(full_scheds <= current_active);
- /* All active schedulers ran for full, we need to do active polling,
- so we setup a timer that does active polling */
- if (!(ERTS_RUNQ_FLGS_GET_NOB(c_rq) & ERTS_RUNQ_FLG_CHECKIO)) {
- /* Active polling is not running, start it */
- erts_start_timer_callback(ERTS_POLL_SCHEDULER_POLLING_TIMEOUT, check_io_timer, NULL);
- }
- run_queue_info[c_rq->ix].flags |= ERTS_RUNQ_FLG_CHECKIO;
- }
-#endif
-
/* Reset balance statistics in all online queues */
for (qix = 0; qix < blnc_no_rqs; qix++) {
Uint32 flags = run_queue_info[qix].flags;
@@ -5211,8 +5170,6 @@ erts_fprintf(stderr, "--------------------------------\n");
ASSERT(!(flags & ERTS_RUNQ_FLG_OUT_OF_WORK));
if (rq->waiting)
flags |= ERTS_RUNQ_FLG_OUT_OF_WORK;
- if (rq != c_rq)
- flags &= ~ERTS_RUNQ_FLG_CHECKIO;
rq->full_reds_history_sum
= run_queue_info[qix].full_reds_history_sum;
@@ -5222,7 +5179,7 @@ erts_fprintf(stderr, "--------------------------------\n");
ERTS_DBG_CHK_FULL_REDS_HISTORY(rq);
rq->out_of_work_count = 0;
- (void) ERTS_RUNQ_FLGS_READ_BSET(rq, ERTS_RUNQ_FLGS_MIGRATION_INFO|ERTS_RUNQ_FLG_CHECKIO, flags);
+ (void) ERTS_RUNQ_FLGS_READ_BSET(rq, ERTS_RUNQ_FLGS_MIGRATION_INFO, flags);
rq->max_len = erts_atomic32_read_dirty(&rq->len);
for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) {
ErtsRunQueueInfo *rqi;
@@ -5873,6 +5830,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online, int no_poll_th
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_RUNQS, size_runqs);
#if ERTS_POLL_USE_SCHEDULER_POLLING
erts_atomic32_init_nob(&doing_sys_schedule, 0);
+ erts_atomic32_init_nob(&function_calls, 0);
#endif
erts_atomic32_init_nob(&no_empty_run_queues, 0);
@@ -9217,7 +9175,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
Process *proxy_p = NULL;
ErtsRunQueue *rq;
int context_reds;
- int fcalls;
+ int fcalls = 0;
int actual_reds;
int reds;
Uint32 flags;
@@ -9291,6 +9249,10 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
reds = ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST;
esdp->virtual_reds = 0;
+#if ERTS_POLL_USE_SCHEDULER_POLLING
+ fcalls = (int) erts_atomic32_add_read_acqb(&function_calls, reds);
+#endif
+
ASSERT(esdp && esdp == erts_get_scheduler_data());
rq = erts_get_runq_current(esdp);
@@ -9507,7 +9469,33 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
non_empty_runq(rq);
goto check_activities_to_run;
- }
+ } else if (is_normal_sched &&
+ fcalls > (2 * context_reds) &&
+ prepare_for_sys_schedule()) {
+ ErtsMonotonicTime current_time;
+ /*
+ * Schedule system-level activities.
+ */
+
+ ERTS_MSACC_PUSH_STATE_CACHED_M();
+
+ erts_runq_unlock(rq);
+
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_CHECK_IO);
+ LTTNG2(scheduler_poll, esdp->no, 1);
+
+ erts_check_io(esdp->ssi->psi, ERTS_POLL_NO_TIMEOUT);
+ ERTS_MSACC_POP_STATE_M();
+
+ current_time = erts_get_monotonic_time(esdp);
+ if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref))
+ erts_bump_timers(esdp->timer_wheel, current_time);
+
+ erts_runq_lock(rq);
+ fcalls = 0;
+ clear_sys_scheduling();
+ goto continue_check_activities_to_run;
+ }
if (flags & ERTS_RUNQ_FLG_MISC_OP)
exec_misc_ops(rq);
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index a1b029adbe..9a22858f2a 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -173,8 +173,6 @@ extern int erts_dio_sched_thread_suggested_stack_size;
(((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 9))
#define ERTS_RUNQ_FLG_HALTING \
(((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 10))
-#define ERTS_RUNQ_FLG_CHECKIO \
- (((Uint32) 1) << (ERTS_RUNQ_FLG_BASE2 + 11))
#define ERTS_RUNQ_FLG_MAX (ERTS_RUNQ_FLG_BASE2 + 12)