aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam/erl_process.c
diff options
context:
space:
mode:
authorSverker Eriksson <[email protected]>2016-02-24 17:55:04 +0100
committerSverker Eriksson <[email protected]>2016-02-24 17:55:04 +0100
commit03743cd4193a2ca97f9b9a52a25e63f616e8fc07 (patch)
tree9c1f4094a2105ec4bf19dd0d16e76b598d0e608d /erts/emulator/beam/erl_process.c
parent1b094d72ffc56069c72f17c7edd673dbbfe47e39 (diff)
parent35739bd06776f90526006486b3f4ab7e54f7f951 (diff)
downloadotp-03743cd4193a2ca97f9b9a52a25e63f616e8fc07.tar.gz
otp-03743cd4193a2ca97f9b9a52a25e63f616e8fc07.tar.bz2
otp-03743cd4193a2ca97f9b9a52a25e63f616e8fc07.zip
Merge branch 'master' into sverk/master/halt-INT_MIN
Diffstat (limited to 'erts/emulator/beam/erl_process.c')
-rw-r--r--erts/emulator/beam/erl_process.c1524
1 files changed, 870 insertions, 654 deletions
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 12687deaeb..d976df1d7e 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -58,11 +58,7 @@
#define ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST (CONTEXT_REDS/10)
-#ifndef ERTS_SCHED_MIN_SPIN
#define ERTS_SCHED_SPIN_UNTIL_YIELD 100
-#else
-#define ERTS_SCHED_SPIN_UNTIL_YIELD 1
-#endif
#define ERTS_SCHED_SYS_SLEEP_SPINCOUNT_VERY_LONG 40
#define ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_VERY_LONG 1000
@@ -119,7 +115,7 @@
#define RUNQ_SET_RQ(X, RQ) erts_smp_atomic_set_nob((X), (erts_aint_t) (RQ))
#ifdef DEBUG
-# if defined(ARCH_64) && !HALFWORD_HEAP
+# if defined(ARCH_64)
# define ERTS_DBG_SET_INVALID_RUNQP(RQP, N) \
(RUNQ_SET_RQ((RQP), (0xdeadbeefdead0003LL | ((N) << 4)))
# define ERTS_DBG_VERIFY_VALID_RUNQP(RQP) \
@@ -152,18 +148,14 @@ extern BeamInstr beam_apply[];
extern BeamInstr beam_exit[];
extern BeamInstr beam_continue_exit[];
-#ifdef __OSE__
-/* Eager check I/O not supported on OSE yet. */
-int erts_eager_check_io = 0;
-#else
-int erts_eager_check_io = 1;
-#endif
-int erts_sched_compact_load;
-int erts_sched_balance_util = 0;
-Uint erts_no_schedulers;
+int ERTS_WRITE_UNLIKELY(erts_default_spo_flags) = 0;
+int ERTS_WRITE_UNLIKELY(erts_eager_check_io) = 1;
+int ERTS_WRITE_UNLIKELY(erts_sched_compact_load);
+int ERTS_WRITE_UNLIKELY(erts_sched_balance_util) = 0;
+Uint ERTS_WRITE_UNLIKELY(erts_no_schedulers);
#ifdef ERTS_DIRTY_SCHEDULERS
-Uint erts_no_dirty_cpu_schedulers;
-Uint erts_no_dirty_io_schedulers;
+Uint ERTS_WRITE_UNLIKELY(erts_no_dirty_cpu_schedulers);
+Uint ERTS_WRITE_UNLIKELY(erts_no_dirty_io_schedulers);
#endif
static char *erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_NO_FLAGS] = {0};
@@ -303,7 +295,7 @@ do { \
erts_sched_stat_t erts_sched_stat;
#ifdef USE_THREADS
-static erts_tsd_key_t sched_data_key;
+static erts_tsd_key_t ERTS_WRITE_UNLIKELY(sched_data_key);
#endif
static erts_smp_atomic32_t function_calls;
@@ -343,16 +335,16 @@ static ErtsAlignedSchedulerSleepInfo *aligned_dirty_io_sched_sleep_info;
static Uint last_reductions;
static Uint last_exact_reductions;
-Uint erts_default_process_flags;
-Eterm erts_system_monitor;
-Eterm erts_system_monitor_long_gc;
-Uint erts_system_monitor_long_schedule;
-Eterm erts_system_monitor_large_heap;
+Eterm ERTS_WRITE_UNLIKELY(erts_system_monitor);
+Eterm ERTS_WRITE_UNLIKELY(erts_system_monitor_long_gc);
+Uint ERTS_WRITE_UNLIKELY(erts_system_monitor_long_schedule);
+Eterm ERTS_WRITE_UNLIKELY(erts_system_monitor_large_heap);
struct erts_system_monitor_flags_t erts_system_monitor_flags;
/* system performance monitor */
Eterm erts_system_profile;
struct erts_system_profile_flags_t erts_system_profile_flags;
+int erts_system_profile_ts_type = ERTS_TRACE_FLG_NOW_TIMESTAMP;
#if ERTS_MAX_PROCESSES > 0x7fffffff
#error "Need to store process_count in another type"
@@ -360,7 +352,8 @@ struct erts_system_profile_flags_t erts_system_profile_flags;
typedef enum {
ERTS_PSTT_GC, /* Garbage Collect */
- ERTS_PSTT_CPC /* Check Process Code */
+ ERTS_PSTT_CPC, /* Check Process Code */
+ ERTS_PSTT_COHMQ /* Change off heap message queue */
} ErtsProcSysTaskType;
#define ERTS_MAX_PROC_SYS_TASK_ARGS 2
@@ -506,9 +499,6 @@ dbg_chk_aux_work_val(erts_aint32_t value)
#if HAVE_ERTS_MSEG
valid |= ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK;
#endif
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- valid |= ERTS_SSI_AUX_WORK_CHECK_CHILDREN;
-#endif
#ifdef ERTS_SSI_AUX_WORK_REAP_PORTS
valid |= ERTS_SSI_AUX_WORK_REAP_PORTS;
#endif
@@ -595,8 +585,6 @@ erts_pre_init_process(void)
= "MISC_THR_PRGR";
erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_MISC_IX]
= "MISC";
- erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_CHECK_CHILDREN_IX]
- = "CHECK_CHILDREN";
erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_SET_TMO_IX]
= "SET_TMO";
erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK_IX]
@@ -684,7 +672,6 @@ erts_init_process(int ncpu, int proc_tab_size, int legacy_proc_tab)
last_reductions = 0;
last_exact_reductions = 0;
- erts_default_process_flags = 0;
}
void
@@ -954,6 +941,12 @@ sched_wall_time_change(ErtsSchedulerData *esdp, int working)
}
}
}
+ if (!working) {
+ ERTS_MSACC_SET_STATE_M_X(ERTS_MSACC_STATE_BUSY_WAIT);
+ } else {
+ ERTS_MSACC_SET_STATE_M_X(ERTS_MSACC_STATE_OTHER);
+ }
+
}
typedef struct {
@@ -966,25 +959,10 @@ typedef struct {
erts_smp_atomic32_t refc;
} ErtsSchedWallTimeReq;
-#if !HALFWORD_HEAP
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(swtreq,
- ErtsSchedWallTimeReq,
- 5,
- ERTS_ALC_T_SCHED_WTIME_REQ)
-#else
-static ERTS_INLINE ErtsSchedWallTimeReq *
-swtreq_alloc(void)
-{
- return erts_alloc(ERTS_ALC_T_SCHED_WTIME_REQ,
- sizeof(ErtsSchedWallTimeReq));
-}
-
-static ERTS_INLINE void
-swtreq_free(ErtsSchedWallTimeReq *ptr)
-{
- erts_free(ERTS_ALC_T_SCHED_WTIME_REQ, ptr);
-}
-#endif
+ ErtsSchedWallTimeReq,
+ 5,
+ ERTS_ALC_T_SCHED_WTIME_REQ)
static void
reply_sched_wall_time(void *vswtrp)
@@ -1001,7 +979,7 @@ reply_sched_wall_time(void *vswtrp)
Eterm **hpp;
Uint sz, *szp;
ErlOffHeap *ohp = NULL;
- ErlHeapFragment *bp = NULL;
+ ErtsMessage *mp = NULL;
ASSERT(esdp);
#ifdef ERTS_DIRTY_SCHEDULERS
@@ -1057,12 +1035,12 @@ reply_sched_wall_time(void *vswtrp)
if (hpp)
break;
- hp = erts_alloc_message_heap(sz, &bp, &ohp, rp, &rp_locks);
+ mp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
szp = NULL;
hpp = &hp;
}
- erts_queue_message(rp, &rp_locks, bp, msg, NIL);
+ erts_queue_message(rp, &rp_locks, mp, msg, NIL);
if (swtrp->req_sched == esdp->no)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
@@ -1724,15 +1702,17 @@ handle_delayed_dealloc(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waitin
int need_thr_progress = 0;
ErtsThrPrgrVal wakeup = ERTS_THR_PRGR_INVALID;
int more_work = 0;
-
+ ERTS_MSACC_PUSH_STATE_M_X();
#ifdef ERTS_DIRTY_SCHEDULERS
ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
#endif
unset_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD);
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_ALLOC);
erts_alloc_scheduler_handle_delayed_dealloc((void *) awdp->esdp,
&need_thr_progress,
&wakeup,
&more_work);
+ ERTS_MSACC_POP_STATE_M_X();
if (more_work) {
if (set_aux_work_flags(ssi, ERTS_SSI_AUX_WORK_DD)
& ERTS_SSI_AUX_WORK_DD_THR_PRGR) {
@@ -2119,34 +2099,6 @@ erts_debug_wait_completed(Process *c_p, int flags)
}
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
-void
-erts_smp_notify_check_children_needed(void)
-{
- int i;
- for (i = 0; i < erts_no_schedulers; i++)
- set_aux_work_flags_wakeup_nob(ERTS_SCHED_SLEEP_INFO_IX(i),
- ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
-#ifdef ERTS_DIRTY_SCHEDULERS
- for (i = 0; i < erts_no_dirty_cpu_schedulers; i++)
- set_aux_work_flags_wakeup_nob(ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(i),
- ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
- for (i = 0; i < erts_no_dirty_io_schedulers; i++)
- set_aux_work_flags_wakeup_nob(ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(i),
- ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
-#endif
-}
-
-static ERTS_INLINE erts_aint32_t
-handle_check_children(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
-{
- unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_CHECK_CHILDREN);
- erts_check_children();
- return aux_work & ~ERTS_SSI_AUX_WORK_CHECK_CHILDREN;
-}
-
-#endif
-
static void
notify_reap_ports_relb(void)
{
@@ -2232,13 +2184,17 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
ERTS_DBG_CHK_AUX_WORK_VAL(aux_work); \
if (!(aux_work & ~ignore)) { \
ERTS_DBG_CHK_AUX_WORK_VAL(aux_work); \
+ ERTS_MSACC_UPDATE_CACHE(); \
+ ERTS_MSACC_POP_STATE_M(); \
return aux_work; \
} \
}
erts_aint32_t aux_work = orig_aux_work;
erts_aint32_t ignore = 0;
+ ERTS_MSACC_PUSH_AND_SET_STATE_M(ERTS_MSACC_STATE_AUX);
+ ASSERT(!awdp->esdp || !ERTS_SCHEDULER_IS_DIRTY(awdp->esdp));
#ifdef ERTS_SMP
haw_thr_prgr_current_reset(awdp);
#endif
@@ -2300,10 +2256,6 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_MISC,
handle_misc_aux_work);
-#ifdef ERTS_SMP_SCHEDULERS_NEED_TO_CHECK_CHILDREN
- HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_CHECK_CHILDREN,
- handle_check_children);
-#endif
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_SET_TMO,
handle_setup_aux_work_timer);
@@ -2331,6 +2283,8 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
haw_thr_prgr_current_check_progress(awdp);
#endif
+ ERTS_MSACC_UPDATE_CACHE();
+ ERTS_MSACC_POP_STATE_M();
return aux_work;
#undef HANDLE_AUX_WORK
@@ -2531,19 +2485,10 @@ try_set_sys_scheduling(void)
#endif
static ERTS_INLINE int
-prepare_for_sys_schedule(ErtsSchedulerData *esdp, int non_blocking)
+prepare_for_sys_schedule(int non_blocking)
{
if (non_blocking && erts_eager_check_io) {
#ifdef ERTS_SMP
-#ifdef ERTS_SCHED_ONLY_POLL_SCHED_1
- if (esdp->no != 1) {
- /* If we are not scheduler 1 and ERTS_SCHED_ONLY_POLL_SCHED_1 is used
- then we make sure to wake scheduler 1 */
- ErtsRunQueue *rq = ERTS_RUNQ_IX(0);
- wake_scheduler(rq);
- return 0;
- }
-#endif
return try_set_sys_scheduling();
#else
return 1;
@@ -2553,16 +2498,6 @@ prepare_for_sys_schedule(ErtsSchedulerData *esdp, int non_blocking)
#ifdef ERTS_SMP
while (!erts_port_task_have_outstanding_io_tasks()
&& try_set_sys_scheduling()) {
-#ifdef ERTS_SCHED_ONLY_POLL_SCHED_1
- if (esdp->no != 1) {
- /* If we are not scheduler 1 and ERTS_SCHED_ONLY_POLL_SCHED_1 is used
- then we make sure to wake scheduler 1 */
- ErtsRunQueue *rq = ERTS_RUNQ_IX(0);
- clear_sys_scheduling();
- wake_scheduler(rq);
- return 0;
- }
-#endif
if (!erts_port_task_have_outstanding_io_tasks())
return 1;
clear_sys_scheduling();
@@ -2857,6 +2792,8 @@ aux_thread(void *unused)
ssi->event = erts_tse_fetch();
+ erts_msacc_init_thread("aux", 1, 1);
+
callbacks.arg = (void *) ssi;
callbacks.wakeup = thr_prgr_wakeup;
callbacks.prepare_wait = thr_prgr_prep_wait;
@@ -2867,6 +2804,7 @@ aux_thread(void *unused)
init_aux_work_data(awdp, NULL, NULL);
awdp->ssi = ssi;
+
sched_prep_spin_wait(ssi);
while (1) {
@@ -2886,8 +2824,6 @@ aux_thread(void *unused)
erts_thr_progress_active(NULL, thr_prgr_active = 0);
erts_thr_progress_prepare_wait(NULL);
- ERTS_SCHED_FAIR_YIELD();
-
flgs = sched_spin_wait(ssi, 0);
if (flgs & ERTS_SSI_FLG_SLEEPING) {
@@ -2922,6 +2858,9 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
#ifdef ERTS_SMP
int thr_prgr_active = 1;
erts_aint32_t flgs;
+#endif
+ ERTS_MSACC_PUSH_STATE_M();
+#ifdef ERTS_SMP
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
@@ -2955,7 +2894,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
* be waiting in erl_sys_schedule()
*/
- if (ERTS_SCHEDULER_IS_DIRTY(esdp) || !prepare_for_sys_schedule(esdp, 0)) {
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp) || !prepare_for_sys_schedule(0)) {
sched_waiting(esdp->no, rq);
@@ -2969,17 +2908,17 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
sched_wall_time_change(esdp, thr_prgr_active);
while (1) {
- ErtsMonotonicTime current_time;
+ ErtsMonotonicTime current_time = 0;
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
- if (aux_work) {
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) {
+ if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ if (!thr_prgr_active) {
erts_thr_progress_active(esdp, thr_prgr_active = 1);
sched_wall_time_change(esdp, 1);
}
aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1);
- if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp)
- && erts_thr_progress_update(esdp))
+ ERTS_MSACC_UPDATE_CACHE();
+ if (aux_work && erts_thr_progress_update(esdp))
erts_thr_progress_leader_update(esdp);
}
@@ -3020,8 +2959,6 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
erts_thr_progress_prepare_wait(esdp);
}
- ERTS_SCHED_FAIR_YIELD();
-
flgs = sched_spin_wait(ssi, spincount);
if (flgs & ERTS_SSI_FLG_SLEEPING) {
ASSERT(flgs & ERTS_SSI_FLG_WAITING);
@@ -3042,7 +2979,9 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
- 1) + 1;
} else
timeout = -1;
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_SLEEP);
res = erts_tse_twait(ssi->event, timeout);
+ ERTS_MSACC_POP_STATE_M();
current_time = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0 :
erts_get_monotonic_time(esdp);
} while (res == EINTR);
@@ -3092,13 +3031,8 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
#ifdef ERTS_DIRTY_SCHEDULERS
ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
#endif
-
-#ifdef ERTS_SCHED_ONLY_POLL_SCHED_1
- ASSERT(esdp->no == 1);
-#endif
sched_waiting_sys(esdp->no, rq);
-
erts_smp_runq_unlock(rq);
ASSERT(working);
@@ -3116,9 +3050,13 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
if (working)
sched_wall_time_change(esdp, working = 0);
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_CHECK_IO);
+
ASSERT(!erts_port_task_have_outstanding_io_tasks());
erl_sys_schedule(1); /* Might give us something to do */
+ ERTS_MSACC_POP_STATE_M();
+
if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
current_time = erts_get_monotonic_time(esdp);
if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref))
@@ -3131,25 +3069,23 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
#endif
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
- if (aux_work) {
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- if (!working)
- sched_wall_time_change(esdp, working = 1);
+ if (aux_work && !ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ if (!working)
+ sched_wall_time_change(esdp, working = 1);
#ifdef ERTS_SMP
- if (!thr_prgr_active)
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ if (!thr_prgr_active)
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
#endif
- }
aux_work = handle_aux_work(&esdp->aux_work_data, aux_work, 1);
+ ERTS_MSACC_UPDATE_CACHE();
#ifdef ERTS_SMP
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && aux_work &&
- erts_thr_progress_update(esdp))
+ if (aux_work && erts_thr_progress_update(esdp))
erts_thr_progress_leader_update(esdp);
#endif
}
#ifndef ERTS_SMP
- if (rq->len != 0 || rq->misc.start)
+ if (erts_smp_atomic32_read_dirty(&rq->len) != 0 || rq->misc.start)
goto sys_woken;
#else
flgs = erts_smp_atomic32_read_acqb(&ssi->flags);
@@ -3168,7 +3104,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
* Got to check that we still got I/O tasks; otherwise
* we have to continue checking for I/O...
*/
- if (!prepare_for_sys_schedule(esdp, 0)) {
+ if (!prepare_for_sys_schedule(0)) {
spincount *= ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT;
goto tse_wait;
}
@@ -3190,7 +3126,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
* Got to check that we still got I/O tasks; otherwise
* we have to wait in erl_sys_schedule() after all...
*/
- if (!prepare_for_sys_schedule(esdp, 0)) {
+ if (!prepare_for_sys_schedule(0)) {
/*
* Not allowed to wait in erl_sys_schedule;
* do tse wait instead...
@@ -3239,8 +3175,12 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
ASSERT(!erts_port_task_have_outstanding_io_tasks());
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_CHECK_IO);
+
erl_sys_schedule(0);
+ ERTS_MSACC_POP_STATE_M();
+
if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
ErtsMonotonicTime current_time = erts_get_monotonic_time(esdp);
if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref))
@@ -3248,7 +3188,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
}
#ifndef ERTS_SMP
- if (rq->len == 0 && !rq->misc.start)
+ if (erts_smp_atomic32_read_dirty(&rq->len) == 0 && !rq->misc.start)
goto sys_aux_work;
sys_woken:
#else
@@ -3635,6 +3575,13 @@ check_requeue_process(ErtsRunQueue *rq, int prio_q)
return 0;
}
+static ERTS_INLINE void
+free_proxy_proc(Process *proxy)
+{
+ ASSERT(erts_smp_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY);
+ erts_free(ERTS_ALC_T_PROC, proxy);
+}
+
#ifdef ERTS_SMP
static ErtsRunQueue *
@@ -3949,9 +3896,6 @@ evacuate_run_queue(ErtsRunQueue *rq,
erts_aint32_t state;
Process *proc;
int notify = 0;
-#ifdef ERTS_DIRTY_SCHEDULERS
- int requeue;
-#endif
to_rq = NULL;
#ifdef ERTS_DIRTY_SCHEDULERS
@@ -3966,49 +3910,94 @@ evacuate_run_queue(ErtsRunQueue *rq,
proc = dequeue_process(rq, prio_q, &state);
while (proc) {
-#ifdef ERTS_DIRTY_SCHEDULERS
- requeue = 1;
+ Process *real_proc;
+ int prio;
+ erts_aint32_t max_qbit, qbit, real_state;
+
+ prio = ERTS_PSFLGS_GET_PRQ_PRIO(state);
+ qbit = ((erts_aint32_t) 1) << prio;
+
+ if (!(state & ERTS_PSFLG_PROXY)) {
+ real_proc = proc;
+ real_state = state;
+ }
+ else {
+ real_proc = erts_proc_lookup_raw(proc->common.id);
+ if (!real_proc) {
+ free_proxy_proc(proc);
+ goto handle_next_proc;
+ }
+ real_state = erts_smp_atomic32_read_acqb(&real_proc->state);
+ }
+
+ max_qbit = (state >> ERTS_PSFLGS_IN_PRQ_MASK_OFFSET);
+ max_qbit &= ERTS_PSFLGS_QMASK;
+ max_qbit |= 1 << ERTS_PSFLGS_QMASK_BITS;
+ max_qbit &= -max_qbit;
+
+ if (qbit > max_qbit) {
+ /* Process already queued with higher prio; drop it... */
+ if (real_proc != proc)
+ free_proxy_proc(proc);
+ else {
+ erts_aint32_t clr_bits;
+#ifdef DEBUG
+ erts_aint32_t old;
#endif
- if (ERTS_PSFLG_BOUND & state) {
- /* Bound processes get stuck here... */
- proc->next = NULL;
- if (sbpp->last)
- sbpp->last->next = proc;
- else
- sbpp->first = proc;
- sbpp->last = proc;
-#ifdef ERTS_DIRTY_SCHEDULERS
- requeue = 0;
+
+ clr_bits = ERTS_PSFLG_IN_RUNQ;
+ clr_bits |= qbit << ERTS_PSFLGS_IN_PRQ_MASK_OFFSET;
+
+#ifdef DEBUG
+ old =
+#else
+ (void)
#endif
+ erts_smp_atomic32_read_band_mb(&proc->state,
+ ~clr_bits);
+ ASSERT((old & clr_bits) == clr_bits);
+
+ }
+
+ goto handle_next_proc;
}
+
#ifdef ERTS_DIRTY_SCHEDULERS
- else if (state & ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q) {
+
+ if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) {
+ erts_aint32_t dqbit = qbit;
#ifdef DEBUG
- erts_aint32_t old =
-#else
- (void)
+ erts_aint32_t old_dqbit;
#endif
- erts_smp_atomic32_read_band_nob(&proc->state,
- ~(ERTS_PSFLG_DIRTY_CPU_PROC
- | ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q));
- /* assert that no other dirty flags are set */
- ASSERT(!(old & (ERTS_PSFLG_DIRTY_IO_PROC|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q)));
- } else if (state & ERTS_PSFLG_DIRTY_IO_PROC_IN_Q) {
+
+ if (rq == ERTS_DIRTY_CPU_RUNQ)
+ dqbit <<= ERTS_PDSFLGS_IN_CPU_PRQ_MASK_OFFSET;
+ else {
+ ASSERT(rq == ERTS_DIRTY_IO_RUNQ);
+ dqbit <<= ERTS_PDSFLGS_IN_IO_PRQ_MASK_OFFSET;
+ }
+
#ifdef DEBUG
- erts_aint32_t old =
+ old_dqbit = (int)
#else
- (void)
+ (void)
#endif
- erts_smp_atomic32_read_band_nob(&proc->state,
- ~(ERTS_PSFLG_DIRTY_IO_PROC
- | ERTS_PSFLG_DIRTY_IO_PROC_IN_Q));
- /* assert that no other dirty flags are set */
- ASSERT(!(old & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q)));
+ erts_smp_atomic32_read_band_mb(&real_proc->dirty_state,
+ ~dqbit);
+ ASSERT(old_dqbit & dqbit);
}
- if (requeue) {
-#else
- else {
#endif
+
+ if (ERTS_PSFLG_BOUND & real_state) {
+ /* Bound processes get stuck here... */
+ proc->next = NULL;
+ if (sbpp->last)
+ sbpp->last->next = proc;
+ else
+ sbpp->first = proc;
+ sbpp->last = proc;
+ }
+ else {
int prio = (int) ERTS_PSFLGS_GET_PRQ_PRIO(state);
erts_smp_runq_unlock(rq);
@@ -4031,6 +4020,8 @@ evacuate_run_queue(ErtsRunQueue *rq,
erts_smp_runq_lock(rq);
}
+
+ handle_next_proc:
proc = dequeue_process(rq, prio_q, &state);
}
if (notify)
@@ -4965,7 +4956,7 @@ erts_fprintf(stderr, "--------------------------------\n");
rq->out_of_work_count = 0;
(void) ERTS_RUNQ_FLGS_READ_BSET(rq, ERTS_RUNQ_FLGS_MIGRATION_INFO, flags);
- rq->max_len = rq->len;
+ rq->max_len = erts_smp_atomic32_read_dirty(&rq->len);
for (pix = 0; pix < ERTS_NO_PRIO_LEVELS; pix++) {
ErtsRunQueueInfo *rqi;
rqi = (pix == ERTS_PORT_PRIO_LEVEL
@@ -5123,7 +5114,7 @@ wakeup_other_check(ErtsRunQueue *rq, Uint32 flags)
{
int wo_reds = rq->wakeup_other_reds;
if (wo_reds) {
- int left_len = rq->len - 1;
+ int left_len = erts_smp_atomic32_read_dirty(&rq->len) - 1;
if (left_len < 1) {
int wo_reduce = wo_reds << wakeup_other.dec_shift;
wo_reduce &= wakeup_other.dec_mask;
@@ -5196,7 +5187,7 @@ wakeup_other_check_legacy(ErtsRunQueue *rq, Uint32 flags)
{
int wo_reds = rq->wakeup_other_reds;
if (wo_reds) {
- erts_aint32_t len = rq->len;
+ erts_aint32_t len = erts_smp_atomic32_read_dirty(&rq->len);
if (len < 2) {
rq->wakeup_other -= ERTS_WAKEUP_OTHER_DEC_LEGACY*wo_reds;
if (rq->wakeup_other < 0)
@@ -5292,7 +5283,7 @@ runq_supervisor(void *unused)
ErtsRunQueue *rq = ERTS_RUNQ_IX(ix);
if (ERTS_RUNQ_FLGS_GET(rq) & ERTS_RUNQ_FLG_NONEMPTY) {
erts_smp_runq_lock(rq);
- if (rq->len != 0)
+ if (erts_smp_atomic32_read_dirty(&rq->len) != 0)
wake_scheduler_on_empty_runq(rq); /* forced wakeup... */
erts_smp_runq_unlock(rq);
}
@@ -5311,17 +5302,11 @@ erts_early_init_scheduling(int no_schedulers)
wakeup_other.threshold = ERTS_SCHED_WAKEUP_OTHER_THRESHOLD_MEDIUM;
wakeup_other.type = ERTS_SCHED_WAKEUP_OTHER_TYPE_DEFAULT;
#endif
-#ifndef ERTS_SCHED_MIN_SPIN
sched_busy_wait.sys_schedule = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM;
sched_busy_wait.tse = (ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM
* ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT);
sched_busy_wait.aux_work = (ERTS_SCHED_SYS_SLEEP_SPINCOUNT_MEDIUM
* ERTS_SCHED_AUX_WORK_SLEEP_SPINCOUNT_FACT_MEDIUM);
-#else
- sched_busy_wait.sys_schedule = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_NONE;
- sched_busy_wait.tse = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_NONE;
- sched_busy_wait.aux_work = ERTS_SCHED_SYS_SLEEP_SPINCOUNT_NONE;
-#endif
}
int
@@ -5505,9 +5490,6 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
esdp->f_reg_array =
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_BEAM_REGISTER,
MAX_REG * sizeof(FloatDef));
-#if !HEAP_ON_C_STACK
- esdp->num_tmp_heap_used = 0;
-#endif
#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix)) {
esdp->no = 0;
@@ -5642,7 +5624,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
}
rq->out_of_work_count = 0;
rq->max_len = 0;
- rq->len = 0;
+ erts_smp_atomic32_set_nob(&rq->len, 0);
rq->wakeup_other = 0;
rq->wakeup_other_reds = 0;
rq->halt_in_progress = 0;
@@ -5792,9 +5774,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
#endif
init_misc_aux_work();
-#if !HALFWORD_HEAP
init_swtreq_alloc();
-#endif
erts_atomic32_init_nob(&debug_wait_completed_count, 0); /* debug only */
debug_wait_completed_flags = 0;
@@ -5974,19 +5954,96 @@ make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio)
return proxy;
}
-static ERTS_INLINE void
-free_proxy_proc(Process *proxy)
-{
- ASSERT(erts_smp_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY);
- erts_free(ERTS_ALC_T_PROC, proxy);
-}
-
#define ERTS_ENQUEUE_NOT 0
#define ERTS_ENQUEUE_NORMAL_QUEUE 1
-#ifdef ERTS_DIRTY_SCHEDULERS
#define ERTS_ENQUEUE_DIRTY_CPU_QUEUE 2
#define ERTS_ENQUEUE_DIRTY_IO_QUEUE 3
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+static int
+check_dirty_enqueue_in_prio_queue(Process *c_p,
+ erts_aint32_t *newp,
+ erts_aint32_t actual,
+ erts_aint32_t aprio,
+ erts_aint32_t qbit)
+{
+ int queue;
+ erts_aint32_t dact, max_qbit;
+
+ /* Termination should be done on an ordinary scheduler */
+ if (actual & ERTS_PSFLG_EXITING) {
+ *newp &= ~ERTS_PSFLGS_DIRTY_WORK;
+ return ERTS_ENQUEUE_NORMAL_QUEUE;
+ }
+
+ /*
+ * If we have system tasks, we enqueue on ordinary run-queue
+ * and take care of those system tasks first.
+ */
+ if (actual & ERTS_PSFLG_ACTIVE_SYS)
+ return ERTS_ENQUEUE_NORMAL_QUEUE;
+
+ dact = erts_smp_atomic32_read_mb(&c_p->dirty_state);
+ if (actual & (ERTS_PSFLG_DIRTY_ACTIVE_SYS
+ | ERTS_PSFLG_DIRTY_CPU_PROC)) {
+ max_qbit = ((dact >> ERTS_PDSFLGS_IN_CPU_PRQ_MASK_OFFSET)
+ & ERTS_PDSFLGS_QMASK);
+ queue = ERTS_ENQUEUE_DIRTY_CPU_QUEUE;
+ }
+ else {
+ ASSERT(actual & ERTS_PSFLG_DIRTY_IO_PROC);
+ max_qbit = ((dact >> ERTS_PDSFLGS_IN_IO_PRQ_MASK_OFFSET)
+ & ERTS_PDSFLGS_QMASK);
+ queue = ERTS_ENQUEUE_DIRTY_IO_QUEUE;
+ }
+
+ max_qbit |= 1 << ERTS_PSFLGS_QMASK_BITS;
+ max_qbit &= -max_qbit;
+
+ if (qbit >= max_qbit)
+ return ERTS_ENQUEUE_NOT; /* Already queued in higher or equal prio */
+ if ((actual & (ERTS_PSFLG_IN_RUNQ|ERTS_PSFLGS_USR_PRIO_MASK))
+ != (aprio << ERTS_PSFLGS_USR_PRIO_OFFSET)) {
+ /*
+ * Process struct already enqueued, or actual prio not
+ * equal to user prio, i.e., enqueue using proxy.
+ */
+ return -1*queue;
+ }
+
+ *newp |= ERTS_PSFLG_IN_RUNQ;
+ return queue;
+}
+
+static ERTS_INLINE int
+fin_dirty_enq_s_change(Process *p,
+ int pstruct_reserved,
+ erts_aint32_t enq_prio,
+ int qmask_offset)
+{
+ erts_aint32_t qbit = 1 << enq_prio;
+ qbit <<= qmask_offset;
+
+ if (qbit & erts_smp_atomic32_read_bor_mb(&p->dirty_state, qbit)) {
+ /* Already enqueue by someone else... */
+ if (pstruct_reserved) {
+ /* We reserved process struct for enqueue; clear it... */
+#ifdef DEBUG
+ erts_aint32_t old =
+#else
+ (void)
#endif
+ erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_IN_RUNQ);
+ ASSERT(old & ERTS_PSFLG_IN_RUNQ);
+ }
+ return 0;
+ }
+
+ return !0;
+}
+
+#endif /* ERTS_DIRTY_SCHEDULERS */
static ERTS_INLINE int
check_enqueue_in_prio_queue(Process *c_p,
@@ -6002,61 +6059,14 @@ check_enqueue_in_prio_queue(Process *c_p,
*prq_prio_p = aprio;
#ifdef ERTS_DIRTY_SCHEDULERS
- if (actual & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)) {
- /*
- * If we have system tasks of a priority higher
- * or equal to the user priority, we enqueue
- * on ordinary run-queue and take care of
- * those system tasks first.
- */
- if (actual & ERTS_PSFLG_ACTIVE_SYS) {
- erts_aint32_t uprio, stprio, qmask;
- uprio = (actual >> ERTS_PSFLGS_USR_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK;
- if (aprio < uprio)
- goto enqueue_normal_runq; /* system tasks with higher prio */
- erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
- qmask = c_p->sys_task_qs->qmask;
- erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
- switch (qmask & -qmask) {
- case MAX_BIT:
- stprio = PRIORITY_MAX;
- break;
- case HIGH_BIT:
- stprio = PRIORITY_HIGH;
- break;
- case NORMAL_BIT:
- stprio = PRIORITY_NORMAL;
- break;
- case LOW_BIT:
- stprio = PRIORITY_LOW;
- break;
- default:
- stprio = PRIORITY_LOW+1;
- break;
- }
- if (stprio <= uprio)
- goto enqueue_normal_runq; /* system tasks with higher prio */
- }
-
- /* Enqueue in dirty run queue if not already enqueued */
- if (actual & (ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q))
- return ERTS_ENQUEUE_NOT; /* already in queue */
- if (actual & ERTS_PSFLG_DIRTY_CPU_PROC) {
- *newp |= ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q;
- if (actual & ERTS_PSFLG_IN_RUNQ)
- return -ERTS_ENQUEUE_DIRTY_CPU_QUEUE; /* use proxy */
- *newp |= ERTS_PSFLG_IN_RUNQ;
- return ERTS_ENQUEUE_DIRTY_CPU_QUEUE;
- }
- *newp |= ERTS_PSFLG_DIRTY_IO_PROC_IN_Q;
- if (actual & ERTS_PSFLG_IN_RUNQ)
- return -ERTS_ENQUEUE_DIRTY_IO_QUEUE; /* use proxy */
- *newp |= ERTS_PSFLG_IN_RUNQ;
- return ERTS_ENQUEUE_DIRTY_IO_QUEUE;
+ if (actual & ERTS_PSFLGS_DIRTY_WORK) {
+ int res = check_dirty_enqueue_in_prio_queue(c_p, newp, actual,
+ aprio, qbit);
+ if (res != ERTS_ENQUEUE_NORMAL_QUEUE)
+ return res;
}
-
- enqueue_normal_runq:
#endif
+
max_qbit = (actual >> ERTS_PSFLGS_IN_PRQ_MASK_OFFSET) & ERTS_PSFLGS_QMASK;
max_qbit |= 1 << ERTS_PSFLGS_QMASK_BITS;
max_qbit &= -max_qbit;
@@ -6088,6 +6098,65 @@ check_enqueue_in_prio_queue(Process *c_p,
return ERTS_ENQUEUE_NORMAL_QUEUE;
}
+static ERTS_INLINE ErtsRunQueue *
+select_enqueue_run_queue(int enqueue, int enq_prio, Process *p, erts_aint32_t state)
+{
+
+ switch (enqueue) {
+
+ case ERTS_ENQUEUE_NOT:
+
+ return NULL;
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+ case ERTS_ENQUEUE_DIRTY_CPU_QUEUE:
+ case -ERTS_ENQUEUE_DIRTY_CPU_QUEUE:
+
+ if (fin_dirty_enq_s_change(p, enqueue > 0, enq_prio,
+ ERTS_PDSFLGS_IN_CPU_PRQ_MASK_OFFSET))
+ return ERTS_DIRTY_CPU_RUNQ;
+
+ return NULL;
+
+
+ case ERTS_ENQUEUE_DIRTY_IO_QUEUE:
+ case -ERTS_ENQUEUE_DIRTY_IO_QUEUE:
+
+ if (fin_dirty_enq_s_change(p, enqueue > 0, enq_prio,
+ ERTS_PDSFLGS_IN_IO_PRQ_MASK_OFFSET))
+ return ERTS_DIRTY_IO_RUNQ;
+
+ return NULL;
+
+#endif
+
+ default: {
+ ErtsRunQueue* runq;
+
+ ASSERT(enqueue == ERTS_ENQUEUE_NORMAL_QUEUE
+ || enqueue == -ERTS_ENQUEUE_NORMAL_QUEUE);
+
+ runq = erts_get_runq_proc(p);
+
+#ifdef ERTS_SMP
+ if (!(ERTS_PSFLG_BOUND & state)) {
+ ErtsRunQueue *new_runq = erts_check_emigration_need(runq, enq_prio);
+ if (new_runq) {
+ RUNQ_SET_RQ(&p->run_queue, new_runq);
+ runq = new_runq;
+ }
+ }
+#endif
+
+ ASSERT(runq);
+
+ return runq;
+ }
+ }
+}
+
+
/*
* schedule_out_process() return with c_rq locked.
*/
@@ -6096,11 +6165,7 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Proces
{
erts_aint32_t a, e, n, enq_prio = -1;
int enqueue; /* < 0 -> use proxy */
- Process* sched_p;
ErtsRunQueue* runq;
-#ifdef ERTS_SMP
- int check_emigration_need;
-#endif
a = state;
@@ -6112,7 +6177,7 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Proces
enqueue = ERTS_ENQUEUE_NOT;
n &= ~(ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS);
- if (a & ERTS_PSFLG_ACTIVE_SYS
+ if (a & (ERTS_PSFLG_ACTIVE_SYS|ERTS_PSFLG_DIRTY_ACTIVE_SYS)
|| (a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE) {
enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a);
}
@@ -6121,16 +6186,17 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Proces
break;
}
- switch (enqueue) {
- case ERTS_ENQUEUE_NOT:
+ runq = select_enqueue_run_queue(enqueue, enq_prio, p, n);
+
+ if (!runq) {
+
if (erts_system_profile_flags.runnable_procs) {
/* Status lock prevents out of order "runnable proc" trace msgs */
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
- if (!(a & ERTS_PSFLG_ACTIVE_SYS)
- && (!(a & ERTS_PSFLG_ACTIVE)
- || (a & ERTS_PSFLG_SUSPENDED))) {
+ if (!(a & (ERTS_PSFLG_ACTIVE_SYS|ERTS_PSFLG_DIRTY_ACTIVE_SYS))
+ && (!(a & ERTS_PSFLG_ACTIVE) || (a & ERTS_PSFLG_SUSPENDED))) {
/* Process inactive */
profile_runnable_proc(p, am_inactive);
}
@@ -6143,98 +6209,76 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Proces
return 0;
-#ifdef ERTS_DIRTY_SCHEDULERS
-#ifdef ERTS_SMP
- case ERTS_ENQUEUE_DIRTY_CPU_QUEUE:
- case -ERTS_ENQUEUE_DIRTY_CPU_QUEUE:
- runq = ERTS_DIRTY_CPU_RUNQ;
- ASSERT(ERTS_SCHEDULER_IS_DIRTY_CPU(runq->scheduler));
-#ifdef ERTS_SMP
- check_emigration_need = 0;
-#endif
- break;
+ }
+ else {
+ Process* sched_p;
- case ERTS_ENQUEUE_DIRTY_IO_QUEUE:
- case -ERTS_ENQUEUE_DIRTY_IO_QUEUE:
- runq = ERTS_DIRTY_IO_RUNQ;
- ASSERT(ERTS_SCHEDULER_IS_DIRTY_IO(runq->scheduler));
-#ifdef ERTS_SMP
- check_emigration_need = 0;
-#endif
- break;
-#endif
-#endif
+ ASSERT(!(n & ERTS_PSFLG_SUSPENDED) || (n & (ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_DIRTY_ACTIVE_SYS)));
- default:
- ASSERT(enqueue == ERTS_ENQUEUE_NORMAL_QUEUE
- || enqueue == -ERTS_ENQUEUE_NORMAL_QUEUE);
+ if (enqueue < 0)
+ sched_p = make_proxy_proc(proxy, p, enq_prio);
+ else {
+ sched_p = p;
+ if (proxy)
+ free_proxy_proc(proxy);
+ }
- runq = erts_get_runq_proc(p);
-#ifdef ERTS_SMP
- check_emigration_need = !(ERTS_PSFLG_BOUND & n);
-#endif
- break;
- }
+ ASSERT(runq);
- ASSERT(!(n & ERTS_PSFLG_SUSPENDED) || (n & ERTS_PSFLG_ACTIVE_SYS));
+ erts_smp_runq_lock(runq);
- if (enqueue < 0)
- sched_p = make_proxy_proc(proxy, p, enq_prio);
- else {
- sched_p = p;
- if (proxy)
- free_proxy_proc(proxy);
- }
+ /* Enqueue the process */
+ enqueue_process(runq, (int) enq_prio, sched_p);
-#ifdef ERTS_SMP
- if (check_emigration_need) {
- ErtsRunQueue *new_runq = erts_check_emigration_need(runq, enq_prio);
- if (new_runq) {
- RUNQ_SET_RQ(&sched_p->run_queue, new_runq);
- runq = new_runq;
- }
- }
-#endif
+ if (runq == c_rq)
+ return 1;
- ASSERT(runq);
+ erts_smp_runq_unlock(runq);
- erts_smp_runq_lock(runq);
+ smp_notify_inc_runq(runq);
- /* Enqueue the process */
- enqueue_process(runq, (int) enq_prio, sched_p);
+ erts_smp_runq_lock(c_rq);
- if (runq == c_rq)
return 1;
- erts_smp_runq_unlock(runq);
- smp_notify_inc_runq(runq);
- erts_smp_runq_lock(c_rq);
- return 1;
+ }
+
}
static ERTS_INLINE void
-add2runq(Process *p, erts_aint32_t state, erts_aint32_t prio)
+add2runq(int enqueue, erts_aint32_t prio,
+ Process *proc, erts_aint32_t state,
+ Process **proxy)
{
- ErtsRunQueue *runq = erts_get_runq_proc(p);
+ ErtsRunQueue *runq;
-#ifdef ERTS_SMP
- if (!(ERTS_PSFLG_BOUND & state)) {
- ErtsRunQueue *new_runq = erts_check_emigration_need(runq, (int) prio);
- if (new_runq) {
- RUNQ_SET_RQ(&p->run_queue, new_runq);
- runq = new_runq;
- }
- }
-#endif
- ASSERT(runq);
+ runq = select_enqueue_run_queue(enqueue, prio, proc, state);
+
+ if (runq) {
+ Process *sched_p;
- erts_smp_runq_lock(runq);
+ if (enqueue > 0)
+ sched_p = proc;
+ else {
+ Process *pxy;
+
+ if (!proxy)
+ pxy = NULL;
+ else {
+ pxy = *proxy;
+ *proxy = NULL;
+ }
+ sched_p = make_proxy_proc(pxy, proc, prio);
+ }
- /* Enqueue the process */
- enqueue_process(runq, (int) prio, p);
+ erts_smp_runq_lock(runq);
- erts_smp_runq_unlock(runq);
- smp_notify_inc_runq(runq);
+ /* Enqueue the process */
+ enqueue_process(runq, (int) prio, sched_p);
+ erts_smp_runq_unlock(runq);
+ smp_notify_inc_runq(runq);
+ }
}
static ERTS_INLINE int
@@ -6340,10 +6384,7 @@ schedule_process(Process *p, erts_aint32_t in_state, ErtsProcLocks locks)
&state,
&enq_prio,
locks);
- if (enqueue != ERTS_ENQUEUE_NOT)
- add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio),
- state,
- enq_prio);
+ add2runq(enqueue, enq_prio, p, state, NULL);
}
void
@@ -6352,22 +6393,99 @@ erts_schedule_process(Process *p, erts_aint32_t state, ErtsProcLocks locks)
schedule_process(p, state, locks);
}
-static void
-schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
+static int
+schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st)
{
- /*
- * Expects status lock to be locked when called, and
- * returns with status lock unlocked...
- */
- erts_aint32_t a = state, n, enq_prio = -1;
+ int res;
+ int locked;
+ ErtsProcSysTaskQs *stqs, *free_stqs;
+ erts_aint32_t state, a, n, enq_prio;
int enqueue; /* < 0 -> use proxy */
- unsigned int prof_runnable_procs = erts_system_profile_flags.runnable_procs;
+ unsigned int prof_runnable_procs;
+
+ res = 1; /* prepare for success */
+ st->next = st->prev = st; /* Prep for empty prio queue */
+ state = erts_smp_atomic32_read_nob(&p->state);
+ prof_runnable_procs = erts_system_profile_flags.runnable_procs;
+ locked = 0;
+ free_stqs = NULL;
+ if (state & ERTS_PSFLG_ACTIVE_SYS)
+ stqs = NULL;
+ else {
+ alloc_qs:
+ stqs = proc_sys_task_queues_alloc();
+ stqs->qmask = 1 << prio;
+ stqs->ncount = 0;
+ stqs->q[PRIORITY_MAX] = NULL;
+ stqs->q[PRIORITY_HIGH] = NULL;
+ stqs->q[PRIORITY_NORMAL] = NULL;
+ stqs->q[PRIORITY_LOW] = NULL;
+ stqs->q[prio] = st;
+ }
+
+ if (!locked) {
+ locked = 1;
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+
+ state = erts_smp_atomic32_read_nob(&p->state);
+ if (state & ERTS_PSFLG_EXITING) {
+ free_stqs = stqs;
+ res = 0;
+ goto cleanup;
+ }
+ }
+
+ if (!p->sys_task_qs) {
+ if (stqs)
+ p->sys_task_qs = stqs;
+ else
+ goto alloc_qs;
+ }
+ else {
+ free_stqs = stqs;
+ stqs = p->sys_task_qs;
+ if (!stqs->q[prio]) {
+ stqs->q[prio] = st;
+ stqs->qmask |= 1 << prio;
+ }
+ else {
+ st->next = stqs->q[prio];
+ st->prev = stqs->q[prio]->prev;
+ st->next->prev = st;
+ st->prev->next = st;
+ ASSERT(stqs->qmask & (1 << prio));
+ }
+ }
+
+ if (ERTS_PSFLGS_GET_ACT_PRIO(state) > prio) {
+ erts_aint32_t n, a, e;
+ /* Need to elevate actual prio */
+
+ a = state;
+ do {
+ if (ERTS_PSFLGS_GET_ACT_PRIO(a) <= prio) {
+ n = a;
+ break;
+ }
+ n = e = a;
+ n &= ~ERTS_PSFLGS_ACT_PRIO_MASK;
+ n |= (prio << ERTS_PSFLGS_ACT_PRIO_OFFSET);
+ a = erts_smp_atomic32_cmpxchg_nob(&p->state, n, e);
+ } while (a != e);
+ state = n;
+ }
+
+
+ a = state;
+ enq_prio = -1;
/* Status lock prevents out of order "runnable proc" trace msgs */
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
- if (!prof_runnable_procs)
+ if (!prof_runnable_procs) {
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ locked = 0;
+ }
ASSERT(!(state & ERTS_PSFLG_PROXY));
@@ -6375,8 +6493,10 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
erts_aint32_t e;
n = e = a;
- if (a & ERTS_PSFLG_FREE)
+ if (a & ERTS_PSFLG_FREE) {
+ res = 0;
goto cleanup; /* We don't want to schedule free processes... */
+ }
enqueue = ERTS_ENQUEUE_NOT;
n |= ERTS_PSFLG_ACTIVE_SYS;
@@ -6400,29 +6520,22 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
}
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
- prof_runnable_procs = 0;
+ locked = 0;
}
- if (enqueue != ERTS_ENQUEUE_NOT) {
- Process *sched_p;
- if (enqueue > 0)
- sched_p = p;
- else {
- sched_p = make_proxy_proc(proxy, p, enq_prio);
- proxy = NULL;
- }
- add2runq(sched_p, n, enq_prio);
- }
+ add2runq(enqueue, enq_prio, p, n, NULL);
cleanup:
- if (prof_runnable_procs)
+ if (locked)
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
- if (proxy)
- free_proxy_proc(proxy);
+ if (free_stqs)
+ proc_sys_task_queues_free(free_stqs);
ERTS_SMP_LC_ASSERT(!(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)));
+
+ return res;
}
static ERTS_INLINE int
@@ -6464,9 +6577,6 @@ suspend_process(Process *c_p, Process *p)
if (suspended) {
- ASSERT(!(ERTS_PSFLG_RUNNING & state)
- || p == erts_get_current_process());
-
if (suspended > 0 && erts_system_profile_flags.runnable_procs) {
/* 'state' is before our change... */
@@ -6508,10 +6618,7 @@ resume_process(Process *p, ErtsProcLocks locks)
&state,
&enq_prio,
locks);
- if (enqueue)
- add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio),
- state,
- enq_prio);
+ add2runq(enqueue, enq_prio, p, state, NULL);
}
int
@@ -6798,18 +6905,19 @@ suspend_scheduler(ErtsSchedulerData *esdp)
& ERTS_RUNQ_FLGS_QMASK);
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
if (aux_work|qmask) {
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- sched_wall_time_change(esdp, 1);
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
+ if (aux_work)
+ aux_work = handle_aux_work(&esdp->aux_work_data,
+ aux_work,
+ 1);
+
+ if (aux_work && erts_thr_progress_update(esdp))
+ erts_thr_progress_leader_update(esdp);
}
- if (aux_work)
- aux_work = handle_aux_work(&esdp->aux_work_data,
- aux_work,
- 1);
-
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp) &&
- (aux_work && erts_thr_progress_update(esdp)))
- erts_thr_progress_leader_update(esdp);
if (qmask) {
#ifdef ERTS_DIRTY_SCHEDULERS
if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
@@ -7026,17 +7134,18 @@ suspend_scheduler(ErtsSchedulerData *esdp)
& ERTS_RUNQ_FLGS_QMASK);
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
if (aux_work|qmask) {
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !thr_prgr_active) {
- erts_thr_progress_active(esdp, thr_prgr_active = 1);
- sched_wall_time_change(esdp, 1);
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ if (!thr_prgr_active) {
+ erts_thr_progress_active(esdp, thr_prgr_active = 1);
+ sched_wall_time_change(esdp, 1);
+ }
+ if (aux_work)
+ aux_work = handle_aux_work(&esdp->aux_work_data,
+ aux_work,
+ 1);
+ if (aux_work && erts_thr_progress_update(esdp))
+ erts_thr_progress_leader_update(esdp);
}
- if (aux_work)
- aux_work = handle_aux_work(&esdp->aux_work_data,
- aux_work,
- 1);
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && aux_work &&
- erts_thr_progress_update(esdp))
- erts_thr_progress_leader_update(esdp);
if (qmask) {
erts_smp_runq_lock(esdp->run_queue);
evacuate_run_queue(esdp->run_queue, &sbp);
@@ -7939,6 +8048,9 @@ sched_thread_func(void *vesdp)
erts_sched_init_time_sup(esdp);
+ (void) ERTS_RUNQ_FLGS_SET_NOB(esdp->run_queue,
+ ERTS_RUNQ_FLG_EXEC);
+
#ifdef ERTS_SMP
tse = erts_tse_fetch();
erts_tse_prepare_timed(tse);
@@ -7949,6 +8061,8 @@ sched_thread_func(void *vesdp)
callbacks.wait = thr_prgr_wait;
callbacks.finalize_wait = thr_prgr_fin_wait;
+ erts_msacc_init_thread("scheduler", no, 1);
+
erts_thr_progress_register_managed_thread(esdp, &callbacks, 0);
erts_alloc_register_scheduler(vesdp);
#endif
@@ -8204,18 +8318,12 @@ erts_start_schedulers(void)
erts_snprintf(opts.name, 16, "%lu_scheduler", actual + 1);
-#ifdef __OSE__
- /* This should be done in the bind strategy */
- opts.coreNo = (actual+1) % ose_num_cpus();
-#endif
-
res = ethr_thr_create(&esdp->tid, sched_thread_func, (void*)esdp, &opts);
if (res != 0) {
break;
}
}
-
erts_no_schedulers = actual;
#ifdef ERTS_DIRTY_SCHEDULERS
@@ -8244,10 +8352,6 @@ erts_start_schedulers(void)
erts_snprintf(opts.name, 16, "aux");
-#ifdef __OSE__
- opts.coreNo = 0;
-#endif /* __OSE__ */
-
res = ethr_thr_create(&aux_tid, aux_thread, NULL, &opts);
if (res != 0)
erts_exit(ERTS_ERROR_EXIT, "Failed to create aux thread\n");
@@ -8267,7 +8371,6 @@ erts_start_schedulers(void)
actual, actual == 1 ? " was" : "s were");
erts_send_error_to_logger_nogl(dsbufp);
}
-
}
#endif /* ERTS_SMP */
@@ -8286,7 +8389,7 @@ add_pend_suspend(Process *suspendee,
sizeof(ErtsPendingSuspend));
psp->next = NULL;
#ifdef DEBUG
-#if defined(ARCH_64) && !HALFWORD_HEAP
+#if defined(ARCH_64)
psp->end = (ErtsPendingSuspend *) 0xdeaddeaddeaddead;
#else
psp->end = (ErtsPendingSuspend *) 0xdeaddead;
@@ -8408,9 +8511,8 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
resume_process(rp, rp_locks);
}
else {
-
rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS,
- pid, pid_locks|ERTS_PROC_LOCK_STATUS);
+ pid, ERTS_PROC_LOCK_STATUS);
if (!rp) {
c_p->flags &= ~F_P2PNR_RESCHED;
@@ -8419,40 +8521,84 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
ASSERT(!(c_p->flags & F_P2PNR_RESCHED));
- if (suspend) {
- if (suspend_process(c_p, rp))
- goto done;
- }
- else {
- if (!((ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)
- & erts_smp_atomic32_read_acqb(&rp->state)))
- goto done;
+ /*
+ * Suspend the other process in order to prevent
+ * it from being selected for normal execution.
+ * This will however not prevent it from being
+ * selected for execution of a system task. If
+ * it is selected for execution of a system task
+ * we might be blocked for quite a while if the
+ * try-lock below fails. That is, there is room
+ * for improvement here...
+ */
- }
+ if (!suspend_process(c_p, rp)) {
+ /* Other process running */
- /* Other process running */
+ ASSERT(ERTS_PSFLG_RUNNING
+ & erts_smp_atomic32_read_nob(&rp->state));
- /*
- * If we got pending suspenders and suspend ourselves waiting
- * to suspend another process we might deadlock.
- * In this case we have to yield, be suspended by
- * someone else and then do it all over again.
- */
- if (!c_p->pending_suspenders) {
- /* Mark rp pending for suspend by c_p */
- add_pend_suspend(rp, c_p->common.id, handle_pend_sync_suspend);
- ASSERT(is_nil(c_p->suspendee));
+ running:
- /* Suspend c_p; when rp is suspended c_p will be resumed. */
- suspend_process(c_p, c_p);
- c_p->flags |= F_P2PNR_RESCHED;
+ /*
+ * If we got pending suspenders and suspend ourselves waiting
+ * to suspend another process we might deadlock.
+ * In this case we have to yield, be suspended by
+ * someone else and then do it all over again.
+ */
+ if (!c_p->pending_suspenders) {
+ /* Mark rp pending for suspend by c_p */
+ add_pend_suspend(rp, c_p->common.id, handle_pend_sync_suspend);
+ ASSERT(is_nil(c_p->suspendee));
+
+ /* Suspend c_p; when rp is suspended c_p will be resumed. */
+ suspend_process(c_p, c_p);
+ c_p->flags |= F_P2PNR_RESCHED;
+ }
+ /* Yield (caller is assumed to yield immediately in bif). */
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
+ rp = ERTS_PROC_LOCK_BUSY;
+ }
+ else {
+ ErtsProcLocks need_locks = pid_locks & ~ERTS_PROC_LOCK_STATUS;
+ if (need_locks && erts_smp_proc_trylock(rp, need_locks) == EBUSY) {
+ if (ERTS_PSFLG_RUNNING_SYS
+ & erts_smp_atomic32_read_nob(&rp->state)) {
+ /* Executing system task... */
+ resume_process(rp, ERTS_PROC_LOCK_STATUS);
+ goto running;
+ }
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
+ /*
+ * If we are unlucky, the process just got selected for
+ * execution of a system task. In this case we may be
+ * blocked here for quite a while... Execution of system
+ * tasks are fortunately quite rare events. We try to
+ * avoid this by checking if it is in a state executing
+ * system tasks (above), but it will not prevent all
+ * scenarios for a long block here...
+ */
+ rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS,
+ pid, pid_locks|ERTS_PROC_LOCK_STATUS);
+ if (!rp)
+ goto done;
+ }
+
+ /*
+ * The previous suspend has prevented the process
+ * from being selected for normal execution regardless
+ * of locks held or not held on it...
+ */
+ ASSERT(!(ERTS_PSFLG_RUNNING
+ & erts_smp_atomic32_read_nob(&rp->state)));
+
+ if (!suspend)
+ resume_process(rp, pid_locks|ERTS_PROC_LOCK_STATUS);
}
- /* Yield (caller is assumed to yield immediately in bif). */
- erts_smp_proc_unlock(rp, pid_locks|ERTS_PROC_LOCK_STATUS);
- rp = ERTS_PROC_LOCK_BUSY;
}
done:
+
if (rp && rp != ERTS_PROC_LOCK_BUSY && !(pid_locks & ERTS_PROC_LOCK_STATUS))
erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
if (unlock_c_p_status)
@@ -8947,24 +9093,39 @@ resume_process_1(BIF_ALIST_1)
}
Uint
-erts_run_queues_len(Uint *qlen)
+erts_run_queues_len(Uint *qlen, int atomic_queues_read, int incl_active_sched)
{
int i = 0;
Uint len = 0;
- ERTS_ATOMIC_FOREACH_RUNQ(rq,
- {
- Sint pqlen = 0;
- int pix;
- for (pix = 0; pix < ERTS_NO_PROC_PRIO_LEVELS; pix++)
- pqlen += RUNQ_READ_LEN(&rq->procs.prio_info[pix].len);
+ if (atomic_queues_read)
+ ERTS_ATOMIC_FOREACH_RUNQ(rq,
+ {
+ Sint rq_len = (Sint) erts_smp_atomic32_read_dirty(&rq->len);
+ ASSERT(rq_len >= 0);
+ if (incl_active_sched
+ && (ERTS_RUNQ_FLGS_GET_NOB(rq) & ERTS_RUNQ_FLG_EXEC)) {
+ rq_len++;
+ }
+ if (qlen)
+ qlen[i++] = rq_len;
+ len += (Uint) rq_len;
+ }
+ );
+ else {
+ for (i = 0; i < erts_no_run_queues; i++) {
+ ErtsRunQueue *rq = ERTS_RUNQ_IX(i);
+ Sint rq_len = (Sint) erts_smp_atomic32_read_nob(&rq->len);
+ ASSERT(rq_len >= 0);
+ if (incl_active_sched
+ && (ERTS_RUNQ_FLGS_GET_NOB(rq) & ERTS_RUNQ_FLG_EXEC)) {
+ rq_len++;
+ }
+ if (qlen)
+ qlen[i] = rq_len;
+ len += (Uint) rq_len;
+ }
- if (pqlen < 0)
- pqlen = 0;
- if (qlen)
- qlen[i++] = pqlen;
- len += pqlen;
}
- );
return len;
}
@@ -9223,6 +9384,8 @@ Process *schedule(Process *p, int calls)
Uint32 flags;
erts_aint32_t state = 0; /* Supress warning... */
+ ERTS_MSACC_DECLARE_CACHE();
+
#ifdef USE_VM_PROBES
if (p != NULL && DTRACE_ENABLED(process_unscheduled)) {
DTRACE_CHARBUF(process_buf, DTRACE_TERM_BUF_SIZE);
@@ -9257,6 +9420,8 @@ Process *schedule(Process *p, int calls)
} else {
sched_out_proc:
+ ASSERT(!(p->flags & F_DELAY_GC));
+
#ifdef ERTS_SMP
ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
esdp = p->scheduler_data;
@@ -9325,6 +9490,8 @@ Process *schedule(Process *p, int calls)
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_OTHER);
+
if (state & ERTS_PSFLG_FREE) {
#ifdef ERTS_SMP
ASSERT(esdp->free_process == p);
@@ -9361,15 +9528,16 @@ Process *schedule(Process *p, int calls)
#ifdef ERTS_SMP
ErtsMigrationPaths *mps;
ErtsMigrationPath *mp;
- ErtsProcList *pnd_xtrs = rq->procs.pending_exiters;
- if (erts_proclist_fetch(&pnd_xtrs, NULL)) {
+
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ ErtsProcList *pnd_xtrs = rq->procs.pending_exiters;
+ if (erts_proclist_fetch(&pnd_xtrs, NULL)) {
rq->procs.pending_exiters = NULL;
erts_smp_runq_unlock(rq);
handle_pending_exiters(pnd_xtrs);
erts_smp_runq_lock(rq);
}
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
if (rq->check_balance_reds <= 0)
check_balance(rq);
@@ -9391,8 +9559,10 @@ Process *schedule(Process *p, int calls)
if (flags & (ERTS_RUNQ_FLG_CHK_CPU_BIND|ERTS_RUNQ_FLG_SUSPENDED)) {
if (flags & ERTS_RUNQ_FLG_SUSPENDED) {
+ (void) ERTS_RUNQ_FLGS_UNSET_NOB(rq, ERTS_RUNQ_FLG_EXEC);
suspend_scheduler(esdp);
- flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
+ flags = ERTS_RUNQ_FLGS_SET_NOB(rq, ERTS_RUNQ_FLG_EXEC);
+ flags |= ERTS_RUNQ_FLG_EXEC;
}
if (flags & ERTS_RUNQ_FLG_CHK_CPU_BIND) {
flags = ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_CHK_CPU_BIND);
@@ -9407,24 +9577,20 @@ Process *schedule(Process *p, int calls)
suspend_scheduler(esdp);
#endif
- {
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
erts_aint32_t aux_work;
- int leader_update = ERTS_SCHEDULER_IS_DIRTY(esdp) ? 0
- : erts_thr_progress_update(esdp);
+ int leader_update = erts_thr_progress_update(esdp);
aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work);
- if (aux_work | leader_update | ERTS_SCHED_FAIR) {
+ if (aux_work | leader_update) {
erts_smp_runq_unlock(rq);
if (leader_update)
erts_thr_progress_leader_update(esdp);
- else if (ERTS_SCHED_FAIR)
- ERTS_SCHED_FAIR_YIELD();
if (aux_work)
handle_aux_work(&esdp->aux_work_data, aux_work, 0);
erts_smp_runq_lock(rq);
}
- ERTS_SMP_LC_ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp)
- || !erts_thr_progress_is_blocking());
+ ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking());
}
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
@@ -9483,8 +9649,11 @@ Process *schedule(Process *p, int calls)
}
#endif
+ (void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_EXEC);
scheduler_wait(&fcalls, esdp, rq);
-
+ flags = ERTS_RUNQ_FLGS_SET_NOB(rq, ERTS_RUNQ_FLG_EXEC);
+ flags |= ERTS_RUNQ_FLG_EXEC;
+ ERTS_MSACC_UPDATE_CACHE();
#ifdef ERTS_SMP
non_empty_runq(rq);
#endif
@@ -9493,12 +9662,14 @@ Process *schedule(Process *p, int calls)
}
else if (!ERTS_SCHEDULER_IS_DIRTY(esdp) &&
(fcalls > input_reductions &&
- prepare_for_sys_schedule(esdp, !0))) {
+ prepare_for_sys_schedule(!0))) {
ErtsMonotonicTime current_time;
/*
* Schedule system-level activities.
*/
+ ERTS_MSACC_PUSH_STATE_CACHED_M();
+
erts_smp_atomic32_set_relb(&function_calls, 0);
fcalls = 0;
@@ -9506,7 +9677,9 @@ Process *schedule(Process *p, int calls)
erts_sys_schedule_interrupt(0);
#endif
erts_smp_runq_unlock(rq);
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_CHECK_IO);
erl_sys_schedule(1);
+ ERTS_MSACC_POP_STATE_M();
current_time = erts_get_monotonic_time(esdp);
if (current_time >= erts_next_timeout_time(esdp->next_tmo_ref))
@@ -9564,11 +9737,12 @@ Process *schedule(Process *p, int calls)
pick_next_process: {
erts_aint32_t psflg_band_mask;
int prio_q;
- int qmask;
+ int qmask, qbit;
flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
qmask = (int) (flags & ERTS_RUNQ_FLGS_PROCS_QMASK);
- switch (qmask & -qmask) {
+ qbit = qmask & -qmask;
+ switch (qbit) {
case MAX_BIT:
prio_q = PRIORITY_MAX;
break;
@@ -9584,9 +9758,12 @@ Process *schedule(Process *p, int calls)
case 0: /* No process at all */
default:
ASSERT(qmask == 0);
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_OTHER);
goto check_activities_to_run;
}
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_EMULATOR);
+
BM_START_TIMER(system);
/*
@@ -9596,20 +9773,11 @@ Process *schedule(Process *p, int calls)
ASSERT(p); /* Wrong qmask in rq->flags? */
- psflg_band_mask = ~(((erts_aint32_t) 1) << (ERTS_PSFLGS_GET_PRQ_PRIO(state)
- + ERTS_PSFLGS_IN_PRQ_MASK_OFFSET));
-
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT((state & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)) !=
- (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC));
- if (state & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)) {
- ASSERT((ERTS_SCHEDULER_IS_DIRTY_CPU(esdp) && (state & ERTS_PSFLG_DIRTY_CPU_PROC)) ||
- (ERTS_SCHEDULER_IS_DIRTY_IO(esdp) && (state & ERTS_PSFLG_DIRTY_IO_PROC)));
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp) && !(state & ERTS_PSFLG_ACTIVE_SYS))
- goto pick_next_process;
- state &= ~(ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q);
- }
-#endif
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp))
+ psflg_band_mask = ~((erts_aint32_t) 0);
+ else
+ psflg_band_mask = ~(((erts_aint32_t) 1) << (ERTS_PSFLGS_GET_PRQ_PRIO(state)
+ + ERTS_PSFLGS_IN_PRQ_MASK_OFFSET));
if (!(state & ERTS_PSFLG_PROXY))
psflg_band_mask &= ~ERTS_PSFLG_IN_RUNQ;
@@ -9632,9 +9800,11 @@ Process *schedule(Process *p, int calls)
| ERTS_PSFLG_RUNNING_SYS))) {
tmp = state & (ERTS_PSFLG_SUSPENDED
| ERTS_PSFLG_PENDING_EXIT
- | ERTS_PSFLG_ACTIVE_SYS);
+ | ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_DIRTY_ACTIVE_SYS);
if (tmp != ERTS_PSFLG_SUSPENDED) {
- if (state & ERTS_PSFLG_ACTIVE_SYS)
+ if (state & (ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_DIRTY_ACTIVE_SYS))
new |= ERTS_PSFLG_RUNNING_SYS;
else
new |= ERTS_PSFLG_RUNNING;
@@ -9647,7 +9817,8 @@ Process *schedule(Process *p, int calls)
| ERTS_PSFLG_FREE))
|| ((state & (ERTS_PSFLG_SUSPENDED
| ERTS_PSFLG_PENDING_EXIT
- | ERTS_PSFLG_ACTIVE_SYS))
+ | ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_DIRTY_ACTIVE_SYS))
== ERTS_PSFLG_SUSPENDED)) {
if (state & ERTS_PSFLG_FREE)
erts_proc_dec_refc(p);
@@ -9666,10 +9837,42 @@ Process *schedule(Process *p, int calls)
esdp->current_process = p;
+
+ reds = context_reds;
+
+#ifdef ERTS_SMP
+
+ erts_smp_runq_unlock(rq);
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+#ifdef DEBUG
+ int old_dqbit;
+#endif
+ int dqbit = qbit;
+
+ if (rq == ERTS_DIRTY_CPU_RUNQ)
+ dqbit <<= ERTS_PDSFLGS_IN_CPU_PRQ_MASK_OFFSET;
+ else {
+ ASSERT(rq == ERTS_DIRTY_IO_RUNQ);
+ dqbit <<= ERTS_PDSFLGS_IN_IO_PRQ_MASK_OFFSET;
+ }
+
+#ifdef DEBUG
+ old_dqbit = (int)
+#else
+ (void)
+#endif
+ erts_smp_atomic32_read_band_mb(&p->dirty_state, ~dqbit);
+ ASSERT(old_dqbit & dqbit);
+ }
+#endif /* ERTS_DIRTY_SCHEDULERS */
+
+#endif /* ERTS_SMP */
+
}
#ifdef ERTS_SMP
- erts_smp_runq_unlock(rq);
if (flags & ERTS_RUNQ_FLG_PROTECTED)
(void) ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_PROTECTED);
@@ -9678,6 +9881,8 @@ Process *schedule(Process *p, int calls)
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
+ state = erts_smp_atomic32_read_nob(&p->state);
+
if (erts_sched_stat.enabled) {
int prio;
UWord old = ERTS_PROC_SCHED_ID(p,
@@ -9702,15 +9907,56 @@ Process *schedule(Process *p, int calls)
erts_smp_spin_unlock(&erts_sched_stat.lock);
}
- if (ERTS_PROC_PENDING_EXIT(p)) {
+ ASSERT(!p->scheduler_data);
+ p->scheduler_data = esdp;
+
+ state = erts_smp_atomic32_read_nob(&p->state);
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ if (!!(state & ERTS_PSFLGS_DIRTY_WORK)
+ & !(state & ERTS_PSFLG_ACTIVE_SYS)) {
+ /* Migrate to dirty scheduler... */
+ sunlock_sched_out_proc:
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ p->fcalls = reds;
+ goto sched_out_proc;
+
+ }
+ }
+ else {
+ if (state & (ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_PENDING_EXIT
+ | ERTS_PSFLG_EXITING)) {
+ /* Migrate to normal scheduler... */
+ goto sunlock_sched_out_proc;
+ }
+ if ((state & ERTS_PSFLG_DIRTY_ACTIVE_SYS)
+ && rq == ERTS_DIRTY_IO_RUNQ) {
+ /* Migrate to dirty cpu scheduler... */
+ goto sunlock_sched_out_proc;
+ }
+
+ ASSERT((state & ERTS_PSFLG_DIRTY_ACTIVE_SYS)
+ || *p->i == (BeamInstr) em_call_nif);
+
+ ASSERT(rq == ERTS_DIRTY_CPU_RUNQ
+ ? (state & (ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_ACTIVE_SYS))
+ : (rq == ERTS_DIRTY_IO_RUNQ
+ && (state & ERTS_PSFLG_DIRTY_IO_PROC)));
+ }
+#endif
+
+ if (state & ERTS_PSFLG_PENDING_EXIT) {
erts_handle_pending_exit(p,
ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
state = erts_smp_atomic32_read_nob(&p->state);
}
- ASSERT(!p->scheduler_data);
- p->scheduler_data = esdp;
-#endif
- reds = context_reds;
+
+#endif /* ERTS_SMP */
+
+ p->fcalls = reds;
if (IS_TRACED(p)) {
if (state & ERTS_PSFLG_EXITING) {
@@ -9739,7 +9985,8 @@ Process *schedule(Process *p, int calls)
reds -= execute_sys_tasks(p, &state, reds);
if (reds <= 0
#ifdef ERTS_DIRTY_SCHEDULERS
- || (state & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC))
+ || ERTS_SCHEDULER_IS_DIRTY(esdp)
+ || (state & ERTS_PSFLGS_DIRTY_WORK)
#endif
) {
p->fcalls = reds;
@@ -9772,13 +10019,13 @@ Process *schedule(Process *p, int calls)
}
}
- if (!(state & ERTS_PSFLG_EXITING)
- && ((FLAGS(p) & F_FORCE_GC)
- || (MSO(p).overhead > BIN_VHEAP_SZ(p)))) {
- reds -= erts_garbage_collect(p, 0, p->arg_reg, p->arity);
- if (reds <= 0) {
- p->fcalls = reds;
- goto sched_out_proc;
+ if (ERTS_IS_GC_DESIRED(p)) {
+ if (!(state & ERTS_PSFLG_EXITING) && !(p->flags & F_DISABLE_GC)) {
+ reds -= erts_garbage_collect_nobump(p, 0, p->arg_reg, p->arity);
+ if (reds <= 0) {
+ p->fcalls = reds;
+ goto sched_out_proc;
+ }
}
}
@@ -9787,7 +10034,6 @@ Process *schedule(Process *p, int calls)
proxy_p = NULL;
}
- p->fcalls = reds;
ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
/* Never run a suspended process */
@@ -9818,7 +10064,7 @@ notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st, Eterm st_result)
if (rp) {
ErtsProcLocks rp_locks;
ErlOffHeap *ohp;
- ErlHeapFragment* bp;
+ ErtsMessage *mp;
Eterm *hp, msg, req_id, result;
Uint st_result_sz, hsz;
#ifdef DEBUG
@@ -9830,11 +10076,7 @@ notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st, Eterm st_result)
st_result_sz = is_immed(st_result) ? 0 : size_object(st_result);
hsz = st->req_id_sz + st_result_sz + 4 /* 3-tuple */;
- hp = erts_alloc_message_heap(hsz,
- &bp,
- &ohp,
- rp,
- &rp_locks);
+ mp = erts_alloc_message_heap(rp, &rp_locks, hsz, &hp, &ohp);
#ifdef DEBUG
hp_start = hp;
@@ -9859,7 +10101,7 @@ notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st, Eterm st_result)
ASSERT(hp_start + hsz == hp);
#endif
- erts_queue_message(rp, &rp_locks, bp, msg, NIL);
+ erts_queue_message(rp, &rp_locks, mp, msg, NIL);
if (c_p == rp)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
@@ -10061,10 +10303,10 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
else {
if (!garbage_collected) {
FLAGS(c_p) |= F_NEED_FULLSWEEP;
- reds += erts_garbage_collect(c_p,
- 0,
- c_p->arg_reg,
- c_p->arity);
+ reds += erts_garbage_collect_nobump(c_p,
+ 0,
+ c_p->arg_reg,
+ c_p->arity);
garbage_collected = 1;
}
st_res = am_true;
@@ -10073,7 +10315,7 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
case ERTS_PSTT_CPC:
st_res = erts_check_process_code(c_p,
st->arg[0],
- st->arg[1] == am_true,
+ unsigned_val(st->arg[1]),
&reds);
if (is_non_value(st_res)) {
/* Needed gc, but gc was disabled */
@@ -10081,6 +10323,10 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
st = NULL;
}
break;
+ case ERTS_PSTT_COHMQ:
+ reds += erts_complete_off_heap_message_queue_change(c_p);
+ st_res = am_true;
+ break;
default:
ERTS_INTERNAL_ERROR("Invalid process sys task type");
st_res = am_false;
@@ -10123,6 +10369,9 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds)
case ERTS_PSTT_CPC:
st_res = am_false;
break;
+ case ERTS_PSTT_COHMQ:
+ st_res = am_false;
+ break;
default:
ERTS_INTERNAL_ERROR("Invalid process sys task type");
st_res = am_false;
@@ -10141,10 +10390,8 @@ BIF_RETTYPE
erts_internal_request_system_task_3(BIF_ALIST_3)
{
Process *rp = erts_proc_lookup(BIF_ARG_1);
- ErtsProcSysTaskQs *stqs, *free_stqs = NULL;
ErtsProcSysTask *st = NULL;
- erts_aint32_t prio, rp_state;
- int rp_locked;
+ erts_aint32_t prio;
Eterm noproc_res, req_type;
if (!rp && !is_internal_pid(BIF_ARG_1)) {
@@ -10201,7 +10448,6 @@ erts_internal_request_system_task_3(BIF_ALIST_3)
}
st = erts_alloc(ERTS_ALC_T_PROC_SYS_TSK,
ERTS_PROC_SYS_TASK_SIZE(tot_sz));
- st->next = st->prev = st; /* Prep for empty prio queue */
ERTS_INIT_OFF_HEAP(&st->off_heap);
hp = &st->heap[0];
@@ -10233,7 +10479,7 @@ erts_internal_request_system_task_3(BIF_ALIST_3)
case am_check_process_code:
if (is_not_atom(st->arg[0]))
goto badarg;
- if (st->arg[1] != am_true && st->arg[1] != am_false)
+ if (is_not_small(st->arg[1]) || (unsigned_val(st->arg[1]) & ~ERTS_CPC_ALL))
goto badarg;
noproc_res = am_false;
st->type = ERTS_PSTT_CPC;
@@ -10245,95 +10491,11 @@ erts_internal_request_system_task_3(BIF_ALIST_3)
goto badarg;
}
- rp_state = erts_smp_atomic32_read_nob(&rp->state);
-
- rp_locked = 0;
-
- free_stqs = NULL;
- if (rp_state & ERTS_PSFLG_ACTIVE_SYS)
- stqs = NULL;
- else {
- alloc_qs:
- stqs = proc_sys_task_queues_alloc();
- stqs->qmask = 1 << prio;
- stqs->ncount = 0;
- stqs->q[PRIORITY_MAX] = NULL;
- stqs->q[PRIORITY_HIGH] = NULL;
- stqs->q[PRIORITY_NORMAL] = NULL;
- stqs->q[PRIORITY_LOW] = NULL;
- stqs->q[prio] = st;
- }
-
- if (!rp_locked) {
- rp_locked = 1;
- erts_smp_proc_lock(rp, ERTS_PROC_LOCK_STATUS);
-
- rp_state = erts_smp_atomic32_read_nob(&rp->state);
- if (rp_state & ERTS_PSFLG_EXITING) {
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
- rp = NULL;
- free_stqs = stqs;
- goto noproc;
- }
- }
-
- if (!rp->sys_task_qs) {
- if (stqs)
- rp->sys_task_qs = stqs;
- else
- goto alloc_qs;
- }
- else {
- if (stqs)
- free_stqs = stqs;
- stqs = rp->sys_task_qs;
- if (!stqs->q[prio]) {
- stqs->q[prio] = st;
- stqs->qmask |= 1 << prio;
- }
- else {
- st->next = stqs->q[prio];
- st->prev = stqs->q[prio]->prev;
- st->next->prev = st;
- st->prev->next = st;
- ASSERT(stqs->qmask & (1 << prio));
- }
- }
-
- if (ERTS_PSFLGS_GET_ACT_PRIO(rp_state) > prio) {
- erts_aint32_t n, a, e;
- /* Need to elevate actual prio */
-
- a = rp_state;
- do {
- if (ERTS_PSFLGS_GET_ACT_PRIO(a) <= prio) {
- n = a;
- break;
- }
- n = e = a;
- n &= ~ERTS_PSFLGS_ACT_PRIO_MASK;
- n |= (prio << ERTS_PSFLGS_ACT_PRIO_OFFSET);
- a = erts_smp_atomic32_cmpxchg_nob(&rp->state, n, e);
- } while (a != e);
- rp_state = n;
+ if (!schedule_process_sys_task(rp, prio, st)) {
+ noproc:
+ notify_sys_task_executed(BIF_P, st, noproc_res);
}
- /*
- * schedule_process_sys_task() unlocks status
- * lock on process.
- */
- schedule_process_sys_task(rp, rp_state, NULL);
-
- if (free_stqs)
- proc_sys_task_queues_free(free_stqs);
-
- BIF_RET(am_ok);
-
-noproc:
-
- notify_sys_task_executed(BIF_P, st, noproc_res);
- if (free_stqs)
- proc_sys_task_queues_free(free_stqs);
BIF_RET(am_ok);
badarg:
@@ -10342,11 +10504,35 @@ badarg:
erts_cleanup_offheap(&st->off_heap);
erts_free(ERTS_ALC_T_PROC_SYS_TSK, st);
}
- if (free_stqs)
- proc_sys_task_queues_free(free_stqs);
BIF_ERROR(BIF_P, BADARG);
}
+void
+erts_schedule_complete_off_heap_message_queue_change(Eterm pid)
+{
+ Process *rp = erts_proc_lookup(pid);
+ if (rp) {
+ ErtsProcSysTask *st;
+ erts_aint32_t state;
+ int i;
+
+ st = erts_alloc(ERTS_ALC_T_PROC_SYS_TSK,
+ ERTS_PROC_SYS_TASK_SIZE(0));
+ st->type = ERTS_PSTT_COHMQ;
+ st->requester = NIL;
+ st->reply_tag = NIL;
+ st->req_id = NIL;
+ st->req_id_sz = 0;
+ for (i = 0; i < ERTS_MAX_PROC_SYS_TASK_ARGS; i++)
+ st->arg[i] = NIL;
+ ERTS_INIT_OFF_HEAP(&st->off_heap);
+ state = erts_smp_atomic32_read_nob(&rp->state);
+
+ if (!schedule_process_sys_task(rp, ERTS_PSFLGS_GET_USR_PRIO(state), st))
+ erts_free(ERTS_ALC_T_PROC_SYS_TSK, st);
+ }
+}
+
static void
save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio)
{
@@ -10732,6 +10918,9 @@ static void early_init_process_struct(void *varg, Eterm data)
Process *proc = arg->proc;
proc->common.id = make_internal_pid(data);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_smp_atomic32_init_nob(&proc->dirty_state, 0);
+#endif
erts_smp_atomic32_init_relb(&proc->state, arg->state);
#ifdef ERTS_SMP
@@ -10792,6 +10981,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
Eterm args, /* Arguments for function (must be well-formed list). */
ErlSpawnOpts* so) /* Options for spawn. */
{
+ Uint flags = 0;
ErtsRunQueue *rq = NULL;
Process *p;
Sint arity; /* Number of arguments. */
@@ -10801,6 +10991,10 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
Eterm res = THE_NON_VALUE;
erts_aint32_t state = 0;
erts_aint32_t prio = (erts_aint32_t) PRIORITY_NORMAL;
+#ifdef SHCOPY_SPAWN
+ erts_shcopy_t info;
+ INITIALIZE_SHCOPY(info);
+#endif
#ifdef ERTS_SMP
erts_smp_proc_lock(parent, ERTS_PROC_LOCKS_ALL_MINOR);
@@ -10829,6 +11023,15 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
state |= (((prio & ERTS_PSFLGS_PRIO_MASK) << ERTS_PSFLGS_ACT_PRIO_OFFSET)
| ((prio & ERTS_PSFLGS_PRIO_MASK) << ERTS_PSFLGS_USR_PRIO_OFFSET));
+ if (so->flags & SPO_OFF_HEAP_MSGQ) {
+ state |= ERTS_PSFLG_OFF_HEAP_MSGQ;
+ flags |= F_OFF_HEAP_MSGQ;
+ }
+ else if (so->flags & SPO_ON_HEAP_MSGQ) {
+ state |= ERTS_PSFLG_ON_HEAP_MSGQ;
+ flags |= F_ON_HEAP_MSGQ;
+ }
+
if (!rq)
rq = erts_get_runq_proc(parent);
@@ -10847,11 +11050,15 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
BM_COUNT(processes_spawned);
BM_SWAP_TIMER(system,size);
+#ifdef SHCOPY_SPAWN
+ arg_size = copy_shared_calculate(args, &info);
+#else
arg_size = size_object(args);
+#endif
BM_SWAP_TIMER(size,system);
heap_need = arg_size;
- p->flags = erts_default_process_flags;
+ p->flags = flags;
p->static_flags = 0;
if (so->flags & SPO_SYSTEM_PROC)
@@ -10900,12 +11107,13 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->stop = p->hend = p->heap + sz;
p->htop = p->heap;
p->heap_sz = sz;
+ p->abandoned_heap = NULL;
+ p->live_hf_end = ERTS_INVALID_HFRAG_PTR;
p->catches = 0;
p->bin_vheap_sz = p->min_vheap_size;
p->bin_old_vheap_sz = p->min_vheap_size;
p->bin_old_vheap = 0;
- p->bin_vheap_mature = 0;
p->sys_task_qs = NULL;
@@ -10924,7 +11132,12 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
BM_MESSAGE(args,p,parent);
BM_START_TIMER(system);
BM_SWAP_TIMER(system,copy);
+#ifdef SHCOPY_SPAWN
+ p->arg_reg[2] = copy_shared_perform(args, arg_size, &info, &p->htop, &p->off_heap);
+ DESTROY_SHCOPY(info);
+#else
p->arg_reg[2] = copy_struct(args, arg_size, &p->htop, &p->off_heap);
+#endif
BM_MESSAGE_COPIED(arg_size);
BM_SWAP_TIMER(copy,system);
p->arity = 3;
@@ -10970,6 +11183,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->accessor_bif_timers = NULL;
#endif
p->mbuf = NULL;
+ p->msg_frag = NULL;
p->mbuf_sz = 0;
p->psd = NULL;
p->dictionary = NULL;
@@ -11105,6 +11319,8 @@ void erts_init_empty_process(Process *p)
p->stop = NULL;
p->hend = NULL;
p->heap = NULL;
+ p->abandoned_heap = NULL;
+ p->live_hf_end = ERTS_INVALID_HFRAG_PTR;
p->gen_gcs = 0;
p->max_gen_gcs = 0;
p->min_heap_size = 0;
@@ -11125,7 +11341,6 @@ void erts_init_empty_process(Process *p)
p->bin_old_vheap_sz = BIN_VH_MIN_SIZE;
p->bin_old_vheap = 0;
p->sys_task_qs = NULL;
- p->bin_vheap_mature = 0;
ERTS_PTMR_INIT(p);
p->next = NULL;
p->off_heap.first = NULL;
@@ -11137,6 +11352,7 @@ void erts_init_empty_process(Process *p)
p->old_htop = NULL;
p->old_heap = NULL;
p->mbuf = NULL;
+ p->msg_frag = NULL;
p->mbuf_sz = 0;
p->psd = NULL;
ERTS_P_MONITORS(p) = NULL;
@@ -11194,6 +11410,9 @@ void erts_init_empty_process(Process *p)
p->last_old_htop = NULL;
#endif
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_smp_atomic32_init_nob(&p->dirty_state, 0);
+#endif
erts_smp_atomic32_init_nob(&p->state, (erts_aint32_t) PRIORITY_NORMAL);
#ifdef ERTS_SMP
@@ -11225,6 +11444,8 @@ erts_debug_verify_clean_empty_process(Process* p)
ASSERT(p->htop == NULL);
ASSERT(p->stop == NULL);
ASSERT(p->hend == NULL);
+ ASSERT(p->abandoned_heap == NULL);
+ ASSERT(p->live_hf_end == ERTS_INVALID_HFRAG_PTR);
ASSERT(p->heap == NULL);
ASSERT(p->common.id == ERTS_INVALID_PID);
ASSERT(ERTS_TRACER_PROC(p) == NIL);
@@ -11302,9 +11523,10 @@ erts_cleanup_empty_process(Process* p)
static void
delete_process(Process* p)
{
- ErlMessage* mp;
-
+ Eterm *heap;
VERBOSE(DEBUG_PROCESSES, ("Removing process: %T\n",p->common.id));
+ VERBOSE(DEBUG_SHCOPY, ("[pid=%T] delete process: %p %p %p %p\n", p->common.id,
+ HEAP_START(p), HEAP_END(p), OLD_HEAP(p), OLD_HEND(p)));
/* Cleanup psd */
@@ -11328,16 +11550,17 @@ delete_process(Process* p)
* Release heaps. Clobber contents in DEBUG build.
*/
-
-#ifdef DEBUG
- sys_memset(p->heap, DEBUG_BAD_BYTE, p->heap_sz*sizeof(Eterm));
-#endif
-
#ifdef HIPE
hipe_delete_process(&p->hipe);
#endif
- ERTS_HEAP_FREE(ERTS_ALC_T_HEAP, (void*) p->heap, p->heap_sz*sizeof(Eterm));
+ heap = p->abandoned_heap ? p->abandoned_heap : p->heap;
+
+#ifdef DEBUG
+ sys_memset(heap, DEBUG_BAD_BYTE, p->heap_sz*sizeof(Eterm));
+#endif
+
+ ERTS_HEAP_FREE(ERTS_ALC_T_HEAP, (void*) heap, p->heap_sz*sizeof(Eterm));
if (p->old_heap != NULL) {
#ifdef DEBUG
@@ -11356,27 +11579,14 @@ delete_process(Process* p)
free_message_buffer(p->mbuf);
}
+ if (p->msg_frag)
+ erts_cleanup_messages(p->msg_frag);
+
erts_erase_dicts(p);
/* free all pending messages */
- mp = p->msg.first;
- while(mp != NULL) {
- ErlMessage* next_mp = mp->next;
- if (mp->data.attached) {
- if (is_value(mp->m[0]))
- free_message_buffer(mp->data.heap_frag);
- else {
- if (is_not_nil(mp->m[1])) {
- ErlHeapFragment *heap_frag;
- heap_frag = (ErlHeapFragment *) mp->data.dist_ext->ext_endp;
- erts_cleanup_offheap(&heap_frag->off_heap);
- }
- erts_free_dist_ext_copy(mp->data.dist_ext);
- }
- }
- free_message(mp);
- mp = next_mp;
- }
+ erts_cleanup_messages(p->msg.first);
+ p->msg.first = NULL;
ASSERT(!p->nodes_monitors);
ASSERT(!p->suspend_monitors);
@@ -11395,7 +11605,9 @@ set_proc_exiting(Process *p,
ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(p) == ERTS_PROC_LOCKS_ALL);
enqueue = change_proc_schedule_state(p,
- ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT,
+ (ERTS_PSFLG_SUSPENDED
+ | ERTS_PSFLG_PENDING_EXIT
+ | ERTS_PSFLGS_DIRTY_WORK),
ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE,
&state,
&enq_prio,
@@ -11429,10 +11641,7 @@ set_proc_exiting(Process *p,
}
#endif
- if (enqueue)
- add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio),
- state,
- enq_prio);
+ add2runq(enqueue, enq_prio, p, state, NULL);
}
static ERTS_INLINE erts_aint32_t
@@ -11535,6 +11744,11 @@ save_pending_exiter(Process *p)
else
rq = esdp->run_queue;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
+ rq = ERTS_RUNQ_IX(0); /* Handle on ordinary scheduler */
+#endif
+
plp = proclist_create(p);
erts_smp_runq_lock(rq);
@@ -11544,13 +11758,8 @@ save_pending_exiter(Process *p)
non_empty_runq(rq);
erts_smp_runq_unlock(rq);
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
- wake_dirty_schedulers(rq, 0);
- else
-#endif
- wake_scheduler(rq);
+ wake_scheduler(rq);
}
#endif
@@ -11564,35 +11773,46 @@ static ERTS_INLINE void
send_exit_message(Process *to, ErtsProcLocks *to_locksp,
Eterm exit_term, Uint term_size, Eterm token)
{
- if (token == NIL
-#ifdef USE_VM_PROBES
- || token == am_have_dt_utag
-#endif
- ) {
- Eterm* hp;
- Eterm mess;
- ErlHeapFragment* bp;
- ErlOffHeap *ohp;
-
- hp = erts_alloc_message_heap(term_size, &bp, &ohp, to, to_locksp);
+ ErtsMessage *mp;
+ ErlOffHeap *ohp;
+ Eterm* hp;
+ Eterm mess;
+#ifdef SHCOPY_SEND
+ erts_shcopy_t info;
+#endif
+
+ if (!have_seqtrace(token)) {
+#ifdef SHCOPY_SEND
+ INITIALIZE_SHCOPY(info);
+ term_size = copy_shared_calculate(exit_term, &info);
+ mp = erts_alloc_message_heap(to, to_locksp, term_size, &hp, &ohp);
+ mess = copy_shared_perform(exit_term, term_size, &info, &hp, ohp);
+ DESTROY_SHCOPY(info);
+#else
+ mp = erts_alloc_message_heap(to, to_locksp, term_size, &hp, &ohp);
mess = copy_struct(exit_term, term_size, &hp, ohp);
- erts_queue_message(to, to_locksp, bp, mess, NIL);
+#endif
+ erts_queue_message(to, to_locksp, mp, mess, NIL);
} else {
- ErlHeapFragment* bp;
- Eterm* hp;
- Eterm mess;
Eterm temp_token;
Uint sz_token;
ASSERT(is_tuple(token));
sz_token = size_object(token);
- bp = new_message_buffer(term_size+sz_token);
- hp = bp->mem;
- mess = copy_struct(exit_term, term_size, &hp, &bp->off_heap);
+#ifdef SHCOPY_SEND
+ INITIALIZE_SHCOPY(info);
+ term_size = copy_shared_calculate(exit_term, &info);
+ mp = erts_alloc_message_heap(to, to_locksp, term_size+sz_token, &hp, &ohp);
+ mess = copy_shared_perform(exit_term, term_size, &info, &hp, ohp);
+ DESTROY_SHCOPY(info);
+#else
+ mp = erts_alloc_message_heap(to, to_locksp, term_size+sz_token, &hp, &ohp);
+ mess = copy_struct(exit_term, term_size, &hp, ohp);
+#endif
/* the trace token must in this case be updated by the caller */
seq_trace_output(token, mess, SEQ_TRACE_SEND, to->common.id, NULL);
- temp_token = copy_struct(token, sz_token, &hp, &bp->off_heap);
- erts_queue_message(to, to_locksp, bp, mess, temp_token);
+ temp_token = copy_struct(token, sz_token, &hp, ohp);
+ erts_queue_message(to, to_locksp, mp, mess, temp_token);
}
}
@@ -11701,11 +11921,7 @@ send_exit_signal(Process *c_p, /* current process if and only
if ((state & ERTS_PSFLG_TRAP_EXIT)
&& (reason != am_kill || (flags & ERTS_XSIG_FLG_IGN_KILL))) {
- if (is_not_nil(token)
-#ifdef USE_VM_PROBES
- && token != am_have_dt_utag
-#endif
- && token_update)
+ if (have_seqtrace(token) && token_update)
seq_trace_update_send(token_update);
if (is_value(exit_tuple))
send_exit_message(rp, rp_locks, exit_tuple, exit_tuple_sz, token);
@@ -12538,7 +12754,7 @@ stack_element_dump(int to, void *to_arg, Eterm* sp, int yreg)
}
if (is_CP(x)) {
- erts_print(to, to_arg, "Return addr %p (", (Eterm *) EXPAND_POINTER(x));
+ erts_print(to, to_arg, "Return addr %p (", (Eterm *) x);
print_function_from_pc(to, to_arg, cp_val(x));
erts_print(to, to_arg, ")\n");
yreg = 0;