aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam/erl_process.c
diff options
context:
space:
mode:
authorRickard Green <[email protected]>2018-03-07 01:17:21 +0100
committerRickard Green <[email protected]>2018-03-21 10:27:03 +0100
commit4bc282d812cc2c49aa3e2d073e96c720f16aa270 (patch)
treea7b00cd079368590dc09f62a4d5402be157462ca /erts/emulator/beam/erl_process.c
parent348a4e057db36fac13f1551c0a1c17f0d376da48 (diff)
downloadotp-4bc282d812cc2c49aa3e2d073e96c720f16aa270.tar.gz
otp-4bc282d812cc2c49aa3e2d073e96c720f16aa270.tar.bz2
otp-4bc282d812cc2c49aa3e2d073e96c720f16aa270.zip
Implementation of true asynchronous signaling between processes
Communication between Erlang processes has conceptually always been performed through asynchronous signaling. The runtime system implementation has however previously preformed most operation synchronously. In a system with only one true thread of execution, this is not problematic (often the opposite). In a system with multiple threads of execution (as current runtime system implementation with SMP support) it becomes problematic. This since it often involves locking of structures when updating them which in turn cause resource contention. Utilizing true asynchronous communication often avoids these resource contention issues. The case that triggered this change was contention on the link lock due to frequent updates of the monitor trees during communication with a frequently used server. The signal order delivery guarantees of the language makes it hard to change the implementation of only some signals to use true asynchronous signaling. Therefore the implementations of (almost) all signals have been changed. Currently the following signals have been implemented as true asynchronous signals: - Message signals - Exit signals - Monitor signals - Demonitor signals - Monitor triggered signals (DOWN, CHANGE, etc) - Link signals - Unlink signals - Group leader signals All of the above already defined as asynchronous signals in the language. The implementation of messages signals was quite asynchronous to begin with, but had quite strict delivery constraints due to the ordering guarantees of signals between a pair of processes. The previously used message queue partitioned into two halves has been replaced by a more general signal queue partitioned into three parts that service all kinds of signals. More details regarding the signal queue can be found in comments in the erl_proc_sig_queue.h file. The monitor and link implementations have also been completely replaced in order to fit the new asynchronous signaling implementation as good as possible. More details regarding the new monitor and link implementations can be found in the erl_monitor_link.h file.
Diffstat (limited to 'erts/emulator/beam/erl_process.c')
-rw-r--r--erts/emulator/beam/erl_process.c1906
1 files changed, 810 insertions, 1096 deletions
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 0d02d10ac9..7969025f57 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2017. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2018. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -53,6 +53,7 @@
#include "erl_nfunc_sched.h"
#include "erl_check_io.h"
#include "erl_poll.h"
+#include "erl_proc_sig_queue.h"
#define ERTS_CHECK_TIME_REDS CONTEXT_REDS
#define ERTS_DELAYED_WAKEUP_INFINITY (~(Uint64) 0)
@@ -431,7 +432,8 @@ typedef enum {
ERTS_PSTT_CLA, /* Copy Literal Area */
ERTS_PSTT_COHMQ, /* Change off heap message queue */
ERTS_PSTT_FTMQ, /* Flush trace msg queue */
- ERTS_PSTT_ETS_FREE_FIXATION
+ ERTS_PSTT_ETS_FREE_FIXATION,
+ ERTS_PSTT_PRIO_SIG /* Elevate prio on signal management */
} ErtsProcSysTaskType;
#define ERTS_MAX_PROC_SYS_TASK_ARGS 2
@@ -579,7 +581,6 @@ dbg_chk_aux_work_val(erts_aint32_t value)
valid |= ERTS_SSI_AUX_WORK_CNCLD_TMRS;
valid |= ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR;
valid |= ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP;
- valid |= ERTS_SSI_AUX_WORK_PENDING_EXITERS;
#if HAVE_ERTS_MSEG
valid |= ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK;
#endif
@@ -601,7 +602,6 @@ dbg_chk_aux_work_val(erts_aint32_t value)
#define ERTS_DBG_CHK_SSI_AUX_WORK(SSI)
#endif
-static void do_handle_pending_exiters(ErtsProcList *);
static void wake_scheduler(ErtsRunQueue *rq);
#if defined(ERTS_ENABLE_LOCK_CHECK)
@@ -666,8 +666,6 @@ erts_pre_init_process(void)
= "MISC_THR_PRGR";
erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_MISC_IX]
= "MISC";
- erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_PENDING_EXITERS_IX]
- = "PENDING_EXITERS";
erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_SET_TMO_IX]
= "SET_TMO";
erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK_IX]
@@ -2531,27 +2529,6 @@ handle_mseg_cache_check(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiti
static ERTS_INLINE erts_aint32_t
-handle_pending_exiters(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
-{
- ErtsProcList *pnd_xtrs;
- ErtsRunQueue *rq;
-
- rq = awdp->esdp->run_queue;
- unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_PENDING_EXITERS);
-
- erts_runq_lock(rq);
- pnd_xtrs = rq->procs.pending_exiters;
- rq->procs.pending_exiters = NULL;
- erts_runq_unlock(rq);
-
- if (erts_proclist_fetch(&pnd_xtrs, NULL))
- do_handle_pending_exiters(pnd_xtrs);
-
- return aux_work & ~ERTS_SSI_AUX_WORK_PENDING_EXITERS;
-}
-
-
-static ERTS_INLINE erts_aint32_t
handle_setup_aux_work_timer(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
{
unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_SET_TMO);
@@ -2632,9 +2609,6 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_MISC,
handle_misc_aux_work);
- HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_PENDING_EXITERS,
- handle_pending_exiters);
-
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_SET_TMO,
handle_setup_aux_work_timer);
@@ -5793,7 +5767,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online, int no_poll_th
rq->wakeup_other = 0;
rq->wakeup_other_reds = 0;
- rq->procs.pending_exiters = NULL;
rq->procs.context_switches = 0;
rq->procs.reductions = 0;
@@ -6328,11 +6301,36 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
int enqueue; /* < 0 -> use proxy */
ErtsRunQueue* runq;
+ ASSERT(!(state & (ERTS_PSFLG_DIRTY_IO_PROC
+ |ERTS_PSFLG_DIRTY_CPU_PROC))
+ || (BeamIsOpCode(*p->i, op_call_nif)
+ || BeamIsOpCode(*p->i, op_apply_bif)));
+
+ a = state;
+
+ /* Clear activ-sys if needed... */
+ while (1) {
+ n = e = a;
+ if (a & ERTS_PSFLG_ACTIVE_SYS) {
+ if (a & (ERTS_PSFLG_SIG_Q
+ | ERTS_PSFLG_SIG_IN_Q
+ | ERTS_PSFLG_SYS_TASKS))
+ break;
+ /* Clear active-sys */
+ n &= ~ERTS_PSFLG_ACTIVE_SYS;
+ }
+ a = erts_atomic32_cmpxchg_nob(&p->state, n, e);
+ if (a == e) {
+ a = n;
+ break;
+ }
+ }
+
if (!is_normal_sched)
running_flgs = ERTS_PSFLG_DIRTY_RUNNING|ERTS_PSFLG_DIRTY_RUNNING_SYS;
else {
running_flgs = ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS;
- if (state & ERTS_PSFLG_DIRTY_ACTIVE_SYS
+ if ((a & ERTS_PSFLG_DIRTY_ACTIVE_SYS)
&& (p->flags & (F_DELAY_GC|F_DISABLE_GC))) {
/*
* Delay dirty GC; will be enabled automatically
@@ -6345,14 +6343,12 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
*/
ASSERT(!(p->flags & (F_DIRTY_CLA | F_DIRTY_GC_HIBERNATE)));
- state = erts_atomic32_read_band_nob(&p->state,
- ~ERTS_PSFLG_DIRTY_ACTIVE_SYS);
- state &= ~ERTS_PSFLG_DIRTY_ACTIVE_SYS;
+ a = erts_atomic32_read_band_nob(&p->state,
+ ~ERTS_PSFLG_DIRTY_ACTIVE_SYS);
+ a &= ~ERTS_PSFLG_DIRTY_ACTIVE_SYS;
}
}
- a = state;
-
while (1) {
n = e = a;
@@ -6360,6 +6356,11 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
enqueue = ERTS_ENQUEUE_NOT;
+ ASSERT(((a & (ERTS_PSFLG_EXITING|ERTS_PSFLG_FREE))
+ != ERTS_PSFLG_EXITING)
+ || ((a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED))
+ == ERTS_PSFLG_ACTIVE));
+
n &= ~running_flgs;
if ((a & (ERTS_PSFLG_ACTIVE_SYS|ERTS_PSFLG_DIRTY_ACTIVE_SYS))
|| (a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE) {
@@ -6584,7 +6585,7 @@ change_proc_schedule_state(Process *p,
}
- *statep = a;
+ *statep = n;
return enqueue;
}
@@ -6609,6 +6610,95 @@ erts_schedule_process(Process *p, erts_aint32_t state, ErtsProcLocks locks)
schedule_process(p, state, locks);
}
+static ERTS_INLINE erts_aint32_t
+active_sys_enqueue(Process *p, erts_aint32_t state,
+ erts_aint32_t enable_flags, int status_locked)
+{
+ /*
+ * This function may or may not be called with status locke held.
+ * It always returns without the status lock held!
+ */
+ unsigned int prof_runnable_procs = erts_system_profile_flags.runnable_procs;
+ erts_aint32_t n, a = state, enq_prio = -1;
+ int slocked = status_locked;
+ int enqueue; /* < 0 -> use proxy */
+
+ /* Status lock prevents out of order "runnable proc" trace msgs */
+ ERTS_LC_ASSERT(slocked || !(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)));
+ ERTS_LC_ASSERT(!slocked || (ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)));
+
+ if (!prof_runnable_procs) {
+ if (slocked) {
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ slocked = 0;
+ }
+ }
+ else {
+ if (!slocked) {
+ erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ slocked = !0;
+ }
+ }
+
+ ASSERT(!(state & ERTS_PSFLG_PROXY));
+
+ while (1) {
+ erts_aint32_t e;
+ n = e = a;
+
+ if (a & ERTS_PSFLG_FREE)
+ goto cleanup; /* We don't want to schedule free processes... */
+
+ enqueue = ERTS_ENQUEUE_NOT;
+ n |= enable_flags;
+ n |= ERTS_PSFLG_ACTIVE_SYS;
+ if (!(a & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)))
+ enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a);
+ a = erts_atomic32_cmpxchg_mb(&p->state, n, e);
+ if (a == e)
+ break;
+ if (a == n && enqueue == ERTS_ENQUEUE_NOT)
+ goto cleanup;
+ }
+
+ if (prof_runnable_procs) {
+
+ if (!(a & (ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS))
+ && (!(a & ERTS_PSFLG_ACTIVE) || (a & ERTS_PSFLG_SUSPENDED))) {
+ /* We activated a prevously inactive process */
+ profile_runnable_proc(p, am_active);
+ }
+
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ slocked = 0;
+ }
+
+ add2runq(enqueue, enq_prio, p, n, NULL);
+
+cleanup:
+
+ if (slocked)
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+
+ ERTS_LC_ASSERT(!(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)));
+
+ return n;
+}
+
+erts_aint32_t
+erts_proc_sys_schedule(Process *p, erts_aint32_t state, erts_aint32_t enable_flag)
+{
+ /* We are not allowed to call this function with status lock held... */
+ return active_sys_enqueue(p, state, enable_flag, 0);
+}
+
static int
schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st,
erts_aint32_t *fail_state_p)
@@ -6616,19 +6706,16 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st,
int res;
int locked;
ErtsProcSysTaskQs *stqs, *free_stqs;
- erts_aint32_t fail_state, state, a, n, enq_prio;
- int enqueue; /* < 0 -> use proxy */
- unsigned int prof_runnable_procs;
+ erts_aint32_t fail_state, state;
fail_state = *fail_state_p;
res = 1; /* prepare for success */
st->next = st->prev = st; /* Prep for empty prio queue */
state = erts_atomic32_read_nob(&p->state);
- prof_runnable_procs = erts_system_profile_flags.runnable_procs;
locked = 0;
free_stqs = NULL;
- if (state & ERTS_PSFLG_ACTIVE_SYS)
+ if (state & ERTS_PSFLG_SYS_TASKS)
stqs = NULL;
else {
alloc_qs:
@@ -6648,6 +6735,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st,
state = erts_atomic32_read_nob(&p->state);
if (state & fail_state) {
+ erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
*fail_state_p = (state & fail_state);
free_stqs = stqs;
res = 0;
@@ -6695,69 +6783,14 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st,
state = n;
}
-
- a = state;
- enq_prio = -1;
-
- /* Status lock prevents out of order "runnable proc" trace msgs */
- ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
-
- if (!prof_runnable_procs) {
- erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
- locked = 0;
- }
-
- ASSERT(!(state & ERTS_PSFLG_PROXY));
-
- while (1) {
- erts_aint32_t e;
- n = e = a;
-
- if (a & ERTS_PSFLG_FREE)
- goto cleanup; /* We don't want to schedule free processes... */
-
- enqueue = ERTS_ENQUEUE_NOT;
- n |= ERTS_PSFLG_ACTIVE_SYS;
- if (!(a & (ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_RUNNING_SYS
- | ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_DIRTY_RUNNING_SYS)))
- enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a);
- a = erts_atomic32_cmpxchg_mb(&p->state, n, e);
- if (a == e)
- break;
- if (a == n && enqueue == ERTS_ENQUEUE_NOT)
- goto cleanup;
- }
-
- if (prof_runnable_procs) {
-
- if (!(a & (ERTS_PSFLG_ACTIVE_SYS
- | ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_RUNNING_SYS
- | ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_DIRTY_RUNNING_SYS))
- && (!(a & ERTS_PSFLG_ACTIVE) || (a & ERTS_PSFLG_SUSPENDED))) {
- /* We activated a prevously inactive process */
- profile_runnable_proc(p, am_active);
- }
-
- erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
- locked = 0;
- }
-
- add2runq(enqueue, enq_prio, p, n, NULL);
+ /* active_sys_enqueue() always return with status lock unlocked */
+ (void) active_sys_enqueue(p, state, ERTS_PSFLG_SYS_TASKS, locked);
cleanup:
- if (locked)
- erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
-
if (free_stqs)
proc_sys_task_queues_free(free_stqs);
- ERTS_LC_ASSERT(!(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)));
-
return res;
}
@@ -8536,11 +8569,13 @@ static ERTS_INLINE void
cancel_suspend_of_suspendee(Process *p, ErtsProcLocks p_locks)
{
if (is_not_nil(p->suspendee)) {
+ ErtsMonitor *mon;
+ Eterm suspendee = p->suspendee;
Process *rp;
if (!(p_locks & ERTS_PROC_LOCK_STATUS))
erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
rp = erts_pid2proc(p, p_locks|ERTS_PROC_LOCK_STATUS,
- p->suspendee, ERTS_PROC_LOCK_STATUS);
+ suspendee, ERTS_PROC_LOCK_STATUS);
if (rp) {
erts_resume(rp, ERTS_PROC_LOCK_STATUS);
erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
@@ -8548,6 +8583,14 @@ cancel_suspend_of_suspendee(Process *p, ErtsProcLocks p_locks)
if (!(p_locks & ERTS_PROC_LOCK_STATUS))
erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
p->suspendee = NIL;
+
+ mon = erts_monitor_tree_lookup(p->suspend_monitors,
+ suspendee);
+ if (mon) {
+ erts_monitor_tree_delete(&p->suspend_monitors,
+ mon);
+ erts_monitor_suspend_destroy(erts_monitor_suspend(mon));
+ }
}
}
@@ -8706,8 +8749,7 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
{
erts_aint32_t state;
state = erts_atomic32_read_nob(&rp->state);
- ASSERT((state & ERTS_PSFLG_PENDING_EXIT)
- || !(state & ERTS_PSFLG_RUNNING));
+ ASSERT(!(state & ERTS_PSFLG_RUNNING));
}
#endif
@@ -8762,7 +8804,7 @@ erts_pid2proc_nropt(Process *c_p, ErtsProcLocks c_p_locks,
static ERTS_INLINE int
do_bif_suspend_process(Process *c_p,
- ErtsSuspendMonitor *smon,
+ ErtsMonitorSuspend *smon,
Process *suspendee)
{
ASSERT(suspendee);
@@ -8795,21 +8837,25 @@ handle_pend_bif_sync_suspend(Process *suspendee,
suspender = erts_pid2proc(suspendee,
suspendee_locks,
suspender_pid,
- ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
+ ERTS_PROC_LOCK_STATUS);
if (suspender) {
+ ErtsMonitorSuspend *smon;
+ ErtsMonitor *mon;
+ mon = erts_monitor_tree_lookup(suspender->suspend_monitors,
+ suspendee->common.id);
+ smon = erts_monitor_suspend(mon);
+
ASSERT(is_nil(suspender->suspendee));
- if (!suspendee_alive)
- erts_delete_suspend_monitor(&suspender->suspend_monitors,
- suspendee->common.id);
+ if (!suspendee_alive) {
+ if (mon) {
+ erts_monitor_tree_delete(&suspender->suspend_monitors,
+ mon);
+ erts_monitor_suspend_destroy(smon);
+ }
+ }
else {
#ifdef DEBUG
- int res;
-#endif
- ErtsSuspendMonitor *smon;
- smon = erts_lookup_suspend_monitor(suspender->suspend_monitors,
- suspendee->common.id);
-#ifdef DEBUG
- res =
+ int res =
#endif
do_bif_suspend_process(suspendee, smon, suspendee);
ASSERT(!smon || res != 0);
@@ -8818,9 +8864,8 @@ handle_pend_bif_sync_suspend(Process *suspendee,
/* suspender is suspended waiting for suspendee to suspend;
resume suspender */
ASSERT(suspender != suspendee);
- resume_process(suspender, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
- erts_proc_unlock(suspender,
- ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
+ resume_process(suspender, ERTS_PROC_LOCK_STATUS);
+ erts_proc_unlock(suspender, ERTS_PROC_LOCK_STATUS);
}
}
@@ -8838,26 +8883,29 @@ handle_pend_bif_async_suspend(Process *suspendee,
suspender = erts_pid2proc(suspendee,
suspendee_locks,
suspender_pid,
- ERTS_PROC_LOCK_LINK);
+ ERTS_PROC_LOCK_STATUS);
if (suspender) {
+ ErtsMonitorSuspend *smon;
+ ErtsMonitor *mon;
+ mon = erts_monitor_tree_lookup(suspender->suspend_monitors,
+ suspendee->common.id);
+ smon = erts_monitor_suspend(mon);
ASSERT(is_nil(suspender->suspendee));
- if (!suspendee_alive)
- erts_delete_suspend_monitor(&suspender->suspend_monitors,
- suspendee->common.id);
+ if (!suspendee_alive) {
+ if (mon) {
+ erts_monitor_tree_delete(&suspender->suspend_monitors,
+ mon);
+ erts_monitor_suspend_destroy(smon);
+ }
+ }
else {
#ifdef DEBUG
- int res;
+ int res =
#endif
- ErtsSuspendMonitor *smon;
- smon = erts_lookup_suspend_monitor(suspender->suspend_monitors,
- suspendee->common.id);
-#ifdef DEBUG
- res =
-#endif
- do_bif_suspend_process(suspendee, smon, suspendee);
+ do_bif_suspend_process(suspendee, smon, suspendee);
ASSERT(!smon || res != 0);
}
- erts_proc_unlock(suspender, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(suspender, ERTS_PROC_LOCK_STATUS);
}
}
@@ -8871,8 +8919,9 @@ suspend_process_2(BIF_ALIST_2)
{
Eterm res;
Process* suspendee = NULL;
- ErtsSuspendMonitor *smon;
+ ErtsMonitorSuspend *smon;
ErtsProcLocks xlocks = (ErtsProcLocks) 0;
+ int created;
/* Options and default values: */
int asynchronous = 0;
@@ -8905,9 +8954,7 @@ suspend_process_2(BIF_ALIST_2)
goto badarg;
}
- xlocks = ERTS_PROC_LOCK_LINK | (asynchronous
- ? (ErtsProcLocks) 0
- : ERTS_PROC_LOCK_STATUS);
+ xlocks = ERTS_PROC_LOCK_STATUS;
erts_proc_lock(BIF_P, xlocks);
@@ -8918,15 +8965,14 @@ suspend_process_2(BIF_ALIST_2)
if (!suspendee)
goto no_suspendee;
- smon = erts_add_or_lookup_suspend_monitor(&BIF_P->suspend_monitors,
- BIF_ARG_1);
-
- /* ... but a little trickier with SMP support ... */
+ smon = erts_monitor_suspend_tree_lookup_create(&BIF_P->suspend_monitors,
+ &created,
+ BIF_ARG_1);
if (asynchronous) {
/* --- Asynchronous suspend begin ---------------------------------- */
- ERTS_LC_ASSERT(ERTS_PROC_LOCK_LINK
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS
& erts_proc_lc_my_proc_locks(BIF_P));
ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS
== erts_proc_lc_my_proc_locks(suspendee));
@@ -8968,9 +9014,9 @@ suspend_process_2(BIF_ALIST_2)
else /* if (!asynchronous) */ {
/* --- Synchronous suspend begin ----------------------------------- */
- ERTS_LC_ASSERT(((ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS)
+ ERTS_LC_ASSERT(((ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_STATUS)
& erts_proc_lc_my_proc_locks(BIF_P))
- == (ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS));
+ == (ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_STATUS));
ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS
== erts_proc_lc_my_proc_locks(suspendee));
@@ -9055,9 +9101,15 @@ suspend_process_2(BIF_ALIST_2)
ERTS_BIF_PREP_ERROR(res, BIF_P, SYSTEM_LIMIT);
goto do_return;
- no_suspendee:
- BIF_P->suspendee = NIL;
- erts_delete_suspend_monitor(&BIF_P->suspend_monitors, BIF_ARG_1);
+ no_suspendee: {
+ ErtsMonitor *mon;
+ BIF_P->suspendee = NIL;
+ mon = erts_monitor_tree_lookup(BIF_P->suspend_monitors, BIF_ARG_1);
+ if (mon) {
+ erts_monitor_tree_delete(&BIF_P->suspend_monitors, mon);
+ erts_monitor_suspend_destroy(erts_monitor_suspend(mon));
+ }
+ }
badarg:
ERTS_BIF_PREP_ERROR(res, BIF_P, BADARG);
@@ -9084,15 +9136,17 @@ suspend_process_2(BIF_ALIST_2)
BIF_RETTYPE
resume_process_1(BIF_ALIST_1)
{
- ErtsSuspendMonitor *smon;
+ ErtsMonitor *mon;
+ ErtsMonitorSuspend *smon;
Process *suspendee;
int is_active;
if (BIF_P->common.id == BIF_ARG_1)
BIF_ERROR(BIF_P, BADARG);
- erts_proc_lock(BIF_P, ERTS_PROC_LOCK_LINK);
- smon = erts_lookup_suspend_monitor(BIF_P->suspend_monitors, BIF_ARG_1);
+ erts_proc_lock(BIF_P, ERTS_PROC_LOCK_STATUS);
+ mon = erts_monitor_tree_lookup(BIF_P->suspend_monitors, BIF_ARG_1);
+ smon = erts_monitor_suspend(mon);
if (!smon) {
/* No previous suspend or dead suspendee */
@@ -9109,20 +9163,17 @@ resume_process_1(BIF_ALIST_1)
}
else if (smon->active) {
smon->active--;
- ASSERT(smon->pending >= 0);
+ ASSERT(smon->pending == 0);
is_active = 1;
}
else {
/* No previous suspend or dead suspendee */
- goto error;
+ goto no_suspendee;
}
if (smon->active || smon->pending || !is_active) {
/* Leave the suspendee as it is; just verify that it is still alive */
- suspendee = erts_pid2proc(BIF_P,
- ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK,
- BIF_ARG_1,
- 0);
+ suspendee = erts_proc_lookup(BIF_ARG_1);
if (!suspendee)
goto no_suspendee;
@@ -9130,11 +9181,18 @@ resume_process_1(BIF_ALIST_1)
else {
/* Resume */
suspendee = erts_pid2proc(BIF_P,
- ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_LINK,
+ ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS,
BIF_ARG_1,
ERTS_PROC_LOCK_STATUS);
- if (!suspendee)
+ if (!suspendee) {
+ mon = erts_monitor_tree_lookup(BIF_P->suspend_monitors, BIF_ARG_1);
+ smon = erts_monitor_suspend(mon);
+ if (!mon)
+ goto error;
goto no_suspendee;
+ }
+
+ ASSERT(mon == erts_monitor_tree_lookup(BIF_P->suspend_monitors, BIF_ARG_1));
ASSERT(ERTS_PSFLG_SUSPENDED
& erts_atomic32_read_nob(&suspendee->state));
@@ -9144,19 +9202,24 @@ resume_process_1(BIF_ALIST_1)
erts_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
}
- if (!smon->active && !smon->pending)
- erts_delete_suspend_monitor(&BIF_P->suspend_monitors, BIF_ARG_1);
+ if (!smon->active && !smon->pending) {
+ ASSERT(mon);
+ erts_monitor_tree_delete(&BIF_P->suspend_monitors, mon);
+ erts_monitor_suspend_destroy(smon);
+ }
- erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_STATUS);
BIF_RET(am_true);
no_suspendee:
/* cleanup */
- erts_delete_suspend_monitor(&BIF_P->suspend_monitors, BIF_ARG_1);
+ ASSERT(mon);
+ erts_monitor_tree_delete(&BIF_P->suspend_monitors, mon);
+ erts_monitor_suspend_destroy(smon);
error:
- erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_LINK);
+ erts_proc_unlock(BIF_P, ERTS_PROC_LOCK_STATUS);
BIF_ERROR(BIF_P, BADARG);
}
@@ -9375,9 +9438,8 @@ erts_resume_processes(ErtsProcList *list)
}
Eterm
-erts_get_process_priority(Process *p)
+erts_get_process_priority(erts_aint32_t state)
{
- erts_aint32_t state = erts_atomic32_read_nob(&p->state);
switch (ERTS_PSFLGS_GET_USR_PRIO(state)) {
case PRIORITY_MAX: return am_max;
case PRIORITY_HIGH: return am_high;
@@ -9427,7 +9489,10 @@ erts_set_process_priority(Process *p, Eterm value)
}
max_qbit = 0;
- if (a & ERTS_PSFLG_ACTIVE_SYS)
+ ASSERT((a & ERTS_PSFLG_SYS_TASKS)
+ ? !!p->sys_task_qs
+ : !p->sys_task_qs);
+ if (a & ERTS_PSFLG_SYS_TASKS)
max_qbit |= p->sys_task_qs->qmask;
if (a & ERTS_PSFLG_DELAYED_SYS) {
ErtsProcSysTaskQs *qs;
@@ -9450,8 +9515,8 @@ erts_set_process_priority(Process *p, Eterm value)
aprio = PRIORITY_LOW;
break;
default:
- ERTS_INTERNAL_ERROR("Invalid qmask");
- aprio = -1;
+ aprio = nprio;
+ break;
}
if (aprio > nprio) /* low value -> high prio */
@@ -9627,10 +9692,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
/* have to re-read state after taking lock */
state = erts_atomic32_read_nob(&p->state);
- if (is_normal_sched && (state & ERTS_PSFLG_PENDING_EXIT))
- erts_handle_pending_exit(p, (ERTS_PROC_LOCK_MAIN
- | ERTS_PROC_LOCK_TRACE
- | ERTS_PROC_LOCK_STATUS));
if (p->pending_suspenders)
handle_pending_suspend(p, (ERTS_PROC_LOCK_MAIN
| ERTS_PROC_LOCK_TRACE
@@ -9945,12 +10006,10 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
& ((state & (ERTS_PSFLG_SUSPENDED
| ERTS_PSFLG_EXITING
| ERTS_PSFLG_FREE
- | ERTS_PSFLG_PENDING_EXIT
| ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_DIRTY_ACTIVE_SYS))
!= ERTS_PSFLG_SUSPENDED)
- & (!(state & (ERTS_PSFLG_EXITING
- | ERTS_PSFLG_PENDING_EXIT))
+ & (!(state & ERTS_PSFLG_EXITING)
| (!!is_normal_sched))
);
@@ -10042,7 +10101,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
goto sunlock_sched_out_proc;
}
if (state & (ERTS_PSFLG_ACTIVE_SYS
- | ERTS_PSFLG_PENDING_EXIT
| ERTS_PSFLG_EXITING)) {
/*
* IMPORTANT! We need to take care of
@@ -10065,13 +10123,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
&& (state & ERTS_PSFLG_DIRTY_IO_PROC)));
}
- if (state & ERTS_PSFLG_PENDING_EXIT) {
- erts_handle_pending_exit(p,
- ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
- state = erts_atomic32_read_nob(&p->state);
- }
-
-
erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
/* Clear tracer if it has been removed */
@@ -10093,25 +10144,48 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
}
if (is_normal_sched) {
-
if (state & ERTS_PSFLG_RUNNING_SYS) {
- /*
- * GC is normally never delayed when a process
- * is scheduled out, but might be when executing
- * hand written beam assembly in
- * prim_eval:'receive'. If GC is delayed we are
- * not allowed to execute system tasks.
- */
- if (!(p->flags & F_DELAY_GC)) {
- int cost = execute_sys_tasks(p, &state, reds);
- calls += cost;
- reds -= cost;
- if (reds <= 0)
- goto sched_out_proc;
- if (state & ERTS_PSFLGS_DIRTY_WORK)
- goto sched_out_proc;
+ if (state & (ERTS_PSFLG_SIG_Q|ERTS_PSFLG_SIG_IN_Q)) {
+ int local_only = !!(p->flags & F_LOCAL_SIGS_ONLY);
+ if (!local_only || (state & ERTS_PSFLG_SIG_Q)) {
+ int sig_reds;
+ /*
+ * If we have dirty work scheduled we allow
+ * usage of all reductions since we need to
+ * handle all signals before doing dirty
+ * work...
+ */
+ if (state & ERTS_PSFLGS_DIRTY_WORK)
+ sig_reds = reds;
+ else
+ sig_reds = ERTS_SIG_HANDLE_REDS_MAX_PREFERED;
+ (void) erts_proc_sig_handle_incoming(p,
+ &state,
+ &sig_reds,
+ sig_reds,
+ local_only);
+ reds -= sig_reds;
+ }
+ }
+ if ((state & (ERTS_PSFLG_SYS_TASKS
+ | ERTS_PSFLG_EXITING)) == ERTS_PSFLG_SYS_TASKS) {
+ /*
+ * GC is normally never delayed when a process
+ * is scheduled out, but might be when executing
+ * hand written beam assembly in
+ * prim_eval:'receive'. If GC is delayed we are
+ * not allowed to execute system tasks.
+ */
+ if (!(p->flags & F_DELAY_GC)) {
+ int cost = execute_sys_tasks(p, &state, reds);
+ calls += cost;
+ reds -= cost;
+ }
}
+ if (reds <= 0 || (state & ERTS_PSFLGS_DIRTY_WORK))
+ goto sched_out_proc;
+
ASSERT(state & psflg_running_sys);
ASSERT(!(state & psflg_running));
@@ -10120,7 +10194,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
if (((state & (ERTS_PSFLG_SUSPENDED
| ERTS_PSFLG_ACTIVE)) != ERTS_PSFLG_ACTIVE)
- && !(state & ERTS_PSFLG_EXITING)) {
+ & !(state & ERTS_PSFLG_EXITING)) {
goto sched_out_proc;
}
@@ -10162,15 +10236,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
- /* Never run a suspended process */
-#ifdef DEBUG
- {
- erts_aint32_t dstate = erts_atomic32_read_nob(&p->state);
- ASSERT(!(ERTS_PSFLG_SUSPENDED & dstate)
- || (ERTS_PSFLG_DIRTY_RUNNING_SYS & dstate));
- }
-#endif
-
ASSERT(erts_proc_read_refc(p) > 0);
if (!(state & ERTS_PSFLG_EXITING) && ERTS_PTMR_IS_TIMED_OUT(p)) {
@@ -10183,6 +10248,11 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
ERTS_PTMR_CLEAR(p);
}
+ /* if exiting, we *shall* exit... */
+ ASSERT(!(state & ERTS_PSFLG_EXITING)
+ || p->i == (BeamInstr *) beam_exit
+ || p->i == (BeamInstr *) beam_continue_exit);
+
#ifdef DEBUG
if (is_normal_sched) {
if (state & ERTS_PSFLGS_DIRTY_WORK)
@@ -10198,6 +10268,18 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
ERTS_INTERNAL_ERROR("Executing normal code on dirty UNKNOWN scheduler");
}
}
+ {
+ erts_aint32_t dstate = erts_atomic32_read_nob(&p->state);
+
+ /* Never run a suspended process */
+ ASSERT(!(ERTS_PSFLG_SUSPENDED & dstate)
+ || (ERTS_PSFLG_DIRTY_RUNNING_SYS & dstate));
+
+ /* Do not execute on the wrong type of scheduler... */
+ ASSERT(is_normal_sched
+ ? !(dstate & ERTS_PSFLGS_DIRTY_WORK)
+ : !!(dstate & ERTS_PSFLGS_DIRTY_WORK));
+ }
#endif
return p;
@@ -10399,7 +10481,7 @@ fetch_sys_task(Process *c_p, erts_aint32_t state, int *qmaskp, int *priop)
n |= (prio << ERTS_PSFLGS_ACT_PRIO_OFFSET);
if (!qmask)
- n &= ~ERTS_PSFLG_ACTIVE_SYS;
+ n &= ~ERTS_PSFLG_SYS_TASKS;
if (a == n)
break;
@@ -10439,12 +10521,8 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
int st_prio;
Eterm st_res;
- if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) {
- if (state & ERTS_PSFLG_PENDING_EXIT)
- erts_handle_pending_exit(c_p, ERTS_PROC_LOCK_MAIN);
- ASSERT(ERTS_PROC_IS_EXITING(c_p));
+ if (state & ERTS_PSFLG_EXITING)
break;
- }
st = fetch_sys_task(c_p, state, &qmask, &st_prio);
if (!st)
@@ -10540,6 +10618,70 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
reds -= erts_db_execute_free_fixation(c_p, (DbFixation*)st->arg[0]);
st_res = am_true;
break;
+ case ERTS_PSTT_PRIO_SIG: {
+ erts_aint32_t fail_state, state;
+ int local_only, sig_res, sig_reds = reds;
+ st_res = am_false;
+
+ if (st->arg[0] == am_true)
+ local_only = !0;
+ else
+ local_only = 0;
+
+ sig_reds = reds;
+ sig_res = erts_proc_sig_handle_incoming(c_p, &state, &sig_reds,
+ reds, local_only);
+ reds -= sig_reds;
+
+ if (state & ERTS_PSFLG_EXITING)
+ goto perm_elevate_prio;
+
+ if (sig_res)
+ break;
+
+ st->arg[0] = am_true;
+
+ fail_state = ERTS_PSFLG_EXITING;
+
+ if (schedule_process_sys_task(c_p, st_prio, st, &fail_state)) {
+ /* Successfully rescheduled task... */
+ st = NULL;
+ }
+ else {
+ erts_aint32_t a;
+
+ state = erts_atomic32_read_nob(&c_p->state);
+
+ perm_elevate_prio:
+
+ /*
+ * we are about to terminate; permanently elevate
+ * prio in order to ensure high prio signal
+ * handling...
+ */
+
+ a = state;
+ while (1) {
+ erts_aint32_t aprio, uprio, n, e;
+ ASSERT(!(a & ERTS_PSFLG_FREE));
+ aprio = ERTS_PSFLGS_GET_ACT_PRIO(a);
+ uprio = ERTS_PSFLGS_GET_USR_PRIO(a);
+ if (aprio >= uprio)
+ break; /* user prio >= actual prio */
+ /*
+ * actual prio is higher than user prio; raise
+ * user prio to actual prio...
+ */
+ n = e = a;
+ n &= ~ERTS_PSFLGS_USR_PRIO_MASK;
+ n |= aprio << ERTS_PSFLGS_USR_PRIO_OFFSET;
+ a = erts_atomic32_cmpxchg_mb(&c_p->state, n, e);
+ if (a == e)
+ break;
+ }
+ }
+ break;
+ }
default:
ERTS_INTERNAL_ERROR("Invalid process sys task type");
st_res = am_false;
@@ -10593,6 +10735,7 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds)
case ERTS_PSTT_CPC:
case ERTS_PSTT_COHMQ:
case ERTS_PSTT_ETS_FREE_FIXATION:
+ case ERTS_PSTT_PRIO_SIG:
st_res = am_false;
break;
case ERTS_PSTT_CLA:
@@ -10645,8 +10788,8 @@ erts_execute_dirty_system_task(Process *c_p)
if (c_p->flags & F_DIRTY_GC_HIBERNATE) {
erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
- ERTS_MSGQ_MV_INQ2PRIVQ(c_p);
- if (c_p->msg.len)
+ erts_proc_sig_fetch(c_p);
+ if (c_p->sig_qs.len)
c_p->flags &= ~F_DIRTY_GC_HIBERNATE; /* operation aborted... */
else {
erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
@@ -10718,7 +10861,7 @@ dispatch_system_task(Process *c_p, erts_aint_t fail_state,
switch (st->type) {
case ERTS_PSTT_CPC:
- rp = erts_dirty_process_code_checker;
+ rp = erts_dirty_process_signal_handler;
ASSERT(fail_state & (ERTS_PSFLG_DIRTY_RUNNING
| ERTS_PSFLG_DIRTY_RUNNING_SYS));
if (c_p == rp) {
@@ -10935,13 +11078,15 @@ erts_internal_request_system_task_4(BIF_ALIST_4)
BIF_ARG_2, BIF_ARG_3, BIF_ARG_4);
}
-static void
-erts_schedule_generic_sys_task(Eterm pid, ErtsProcSysTaskType type, void* arg)
+static int
+schedule_generic_sys_task(Eterm pid, ErtsProcSysTaskType type,
+ int prio, Eterm arg0, Eterm arg1)
{
- Process *rp = erts_proc_lookup(pid);
+ int res = 0;
+ Process *rp = erts_proc_lookup_raw(pid);
if (rp) {
ErtsProcSysTask *st;
- erts_aint32_t state, fail_state;
+ erts_aint32_t st_prio, fail_state;
st = erts_alloc(ERTS_ALC_T_PROC_SYS_TSK,
ERTS_PROC_SYS_TASK_SIZE(0));
@@ -10950,31 +11095,46 @@ erts_schedule_generic_sys_task(Eterm pid, ErtsProcSysTaskType type, void* arg)
st->reply_tag = NIL;
st->req_id = NIL;
st->req_id_sz = 0;
- st->arg[0] = (Eterm)arg;
+ st->arg[0] = arg0;
+ st->arg[1] = arg1;
ERTS_INIT_OFF_HEAP(&st->off_heap);
- state = erts_atomic32_read_nob(&rp->state);
-
- fail_state = ERTS_PSFLG_EXITING;
- if (!schedule_process_sys_task(rp, ERTS_PSFLGS_GET_USR_PRIO(state),
- st, &fail_state))
+ if (prio >= 0) {
+ st_prio = (erts_aint32_t) prio;
+ fail_state = ERTS_PSFLG_FREE;
+ }
+ else {
+ erts_aint32_t state = erts_atomic32_read_nob(&rp->state);
+ st_prio = ERTS_PSFLGS_GET_USR_PRIO(state);
+ fail_state = ERTS_PSFLG_EXITING;
+ }
+ res = schedule_process_sys_task(rp, st_prio, st, &fail_state);
+ if (!res)
erts_free(ERTS_ALC_T_PROC_SYS_TSK, st);
}
+ return res;
}
-
void
erts_schedule_complete_off_heap_message_queue_change(Eterm pid)
{
- erts_schedule_generic_sys_task(pid, ERTS_PSTT_COHMQ, NULL);
+ schedule_generic_sys_task(pid, ERTS_PSTT_COHMQ,
+ -1, NIL, NIL);
}
void
erts_schedule_ets_free_fixation(Eterm pid, DbFixation* fix)
{
- erts_schedule_generic_sys_task(pid, ERTS_PSTT_ETS_FREE_FIXATION, fix);
+ schedule_generic_sys_task(pid, ERTS_PSTT_ETS_FREE_FIXATION,
+ -1, (Eterm) fix, NIL);
}
+int
+erts_sig_prio(Eterm pid, int prio)
+{
+ return schedule_generic_sys_task(pid, ERTS_PSTT_PRIO_SIG,
+ prio, am_false, NIL);
+}
static void
flush_dirty_trace_messages(void *vpid)
@@ -11014,7 +11174,7 @@ erts_schedule_flush_trace_messages(Process *proc, int force_on_proc)
dhndl = erts_thr_progress_unmanaged_delay();
- erts_schedule_generic_sys_task(pid, ERTS_PSTT_FTMQ, NULL);
+ schedule_generic_sys_task(pid, ERTS_PSTT_FTMQ, -1, NIL, NIL);
erts_thr_progress_unmanaged_continue(dhndl);
@@ -11210,9 +11370,11 @@ erts_set_gc_state(Process *c_p, int enable)
#endif
erts_atomic32_read_bset_nob(&c_p->state,
- (ERTS_PSFLG_DELAYED_SYS
- | ERTS_PSFLG_ACTIVE_SYS),
- ERTS_PSFLG_ACTIVE_SYS);
+ (ERTS_PSFLG_DELAYED_SYS
+ | ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_SYS_TASKS),
+ (ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_SYS_TASKS));
#ifdef DEBUG
ASSERT(state & ERTS_PSFLG_DELAYED_SYS);
@@ -11675,7 +11837,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->common.u.alive.reg = NULL;
ERTS_P_LINKS(p) = NULL;
ERTS_P_MONITORS(p) = NULL;
- p->nodes_monitors = NULL;
+ ERTS_P_LT_MONITORS(p) = NULL;
p->suspend_monitors = NULL;
ASSERT(is_pid(parent->group_leader));
@@ -11692,14 +11854,20 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
erts_get_default_proc_tracing(&ERTS_TRACE_FLAGS(p), &ERTS_TRACER(p));
- p->msg.first = NULL;
- p->msg.last = &p->msg.first;
- p->msg.save = &p->msg.first;
- p->msg.saved_last = &p->msg.first;
- p->msg.len = 0;
- p->msg_inq.first = NULL;
- p->msg_inq.last = &p->msg_inq.first;
- p->msg_inq.len = 0;
+ p->sig_qs.first = NULL;
+ p->sig_qs.last = &p->sig_qs.first;
+ p->sig_qs.cont = NULL;
+ p->sig_qs.cont_last = &p->sig_qs.cont;
+ p->sig_qs.save = &p->sig_qs.first;
+ p->sig_qs.saved_last = NULL;
+ p->sig_qs.len = 0;
+ p->sig_qs.nmsigs.next = NULL;
+ p->sig_qs.nmsigs.last = NULL;
+ p->sig_inq.first = NULL;
+ p->sig_inq.last = &p->sig_inq.first;
+ p->sig_inq.len = 0;
+ p->sig_inq.nmsigs.next = NULL;
+ p->sig_inq.nmsigs.last = NULL;
p->bif_timers = NULL;
p->mbuf = NULL;
p->msg_frag = NULL;
@@ -11726,8 +11894,6 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->scheduler_data = NULL;
p->suspendee = NIL;
p->pending_suspenders = NULL;
- p->pending_exit.reason = THE_NON_VALUE;
- p->pending_exit.bp = NULL;
#if !defined(NO_FPE_SIGNALS) || defined(HIPE)
p->fp_exception = 0;
@@ -11781,30 +11947,33 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
*/
if (so->flags & SPO_LINK) {
-#ifdef DEBUG
- int ret;
-#endif
-#ifdef DEBUG
- ret = erts_add_link(&ERTS_P_LINKS(parent), LINK_PID, p->common.id);
- ASSERT(ret == 0);
- ret = erts_add_link(&ERTS_P_LINKS(p), LINK_PID, parent->common.id);
- ASSERT(ret == 0);
-#else
- erts_add_link(&ERTS_P_LINKS(parent), LINK_PID, p->common.id);
- erts_add_link(&ERTS_P_LINKS(p), LINK_PID, parent->common.id);
-#endif
-
+ ErtsLink *lnk;
+ ErtsLinkData *ldp = erts_link_create(ERTS_LNK_TYPE_PROC,
+ parent->common.id,
+ p->common.id);
+ lnk = erts_link_tree_lookup_insert(&ERTS_P_LINKS(parent), &ldp->a);
+ if (lnk) {
+ /*
+ * This should more or less never happen, but could
+ * potentially happen if pid:s wrap...
+ */
+ erts_link_release(lnk);
+ }
+ erts_link_tree_insert(&ERTS_P_LINKS(p), &ldp->b);
}
/*
* Test whether this process should be initially monitored by its parent.
*/
if (so->flags & SPO_MONITOR) {
- Eterm mref;
-
- mref = erts_make_ref(parent);
- erts_add_monitor(&ERTS_P_MONITORS(parent), MON_ORIGIN, mref, p->common.id, NIL);
- erts_add_monitor(&ERTS_P_MONITORS(p), MON_TARGET, mref, parent->common.id, NIL);
+ Eterm mref = erts_make_ref(parent);
+ ErtsMonitorData *mdp = erts_monitor_create(ERTS_MON_TYPE_PROC,
+ mref,
+ parent->common.id,
+ p->common.id,
+ NIL);
+ erts_monitor_tree_insert(&ERTS_P_MONITORS(parent), &mdp->origin);
+ erts_monitor_list_insert(&ERTS_P_LT_MONITORS(p), &mdp->target);
so->mref = mref;
}
@@ -11889,13 +12058,23 @@ void erts_init_empty_process(Process *p)
p->mbuf_sz = 0;
erts_atomic_init_nob(&p->psd, (erts_aint_t) NULL);
ERTS_P_MONITORS(p) = NULL;
+ ERTS_P_LT_MONITORS(p) = NULL;
ERTS_P_LINKS(p) = NULL; /* List of links */
- p->nodes_monitors = NULL;
p->suspend_monitors = NULL;
- p->msg.first = NULL;
- p->msg.last = &p->msg.first;
- p->msg.save = &p->msg.first;
- p->msg.len = 0;
+ p->sig_qs.first = NULL;
+ p->sig_qs.last = &p->sig_qs.first;
+ p->sig_qs.cont = NULL;
+ p->sig_qs.cont_last = &p->sig_qs.cont;
+ p->sig_qs.save = &p->sig_qs.first;
+ p->sig_qs.saved_last = NULL;
+ p->sig_qs.len = 0;
+ p->sig_qs.nmsigs.next = NULL;
+ p->sig_qs.nmsigs.last = NULL;
+ p->sig_inq.first = NULL;
+ p->sig_inq.last = &p->sig_inq.first;
+ p->sig_inq.len = 0;
+ p->sig_inq.nmsigs.next = NULL;
+ p->sig_inq.nmsigs.last = NULL;
p->bif_timers = NULL;
p->dictionary = NULL;
p->seq_trace_clock = 0;
@@ -11942,13 +12121,8 @@ void erts_init_empty_process(Process *p)
erts_atomic32_init_nob(&p->state, (erts_aint32_t) PRIORITY_NORMAL);
p->scheduler_data = NULL;
- p->msg_inq.first = NULL;
- p->msg_inq.last = &p->msg_inq.first;
- p->msg_inq.len = 0;
p->suspendee = NIL;
p->pending_suspenders = NULL;
- p->pending_exit.reason = THE_NON_VALUE;
- p->pending_exit.bp = NULL;
erts_proc_lock_init(p);
erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
erts_init_runq_proc(p, ERTS_RUNQ_IX(0), 0);
@@ -11984,11 +12158,11 @@ erts_debug_verify_clean_empty_process(Process* p)
ASSERT(p->old_heap == NULL);
ASSERT(ERTS_P_MONITORS(p) == NULL);
+ ASSERT(ERTS_P_LT_MONITORS(p) == NULL);
ASSERT(ERTS_P_LINKS(p) == NULL);
- ASSERT(p->nodes_monitors == NULL);
ASSERT(p->suspend_monitors == NULL);
- ASSERT(p->msg.first == NULL);
- ASSERT(p->msg.len == 0);
+ ASSERT(p->sig_qs.first == NULL);
+ ASSERT(p->sig_qs.len == 0);
ASSERT(p->bif_timers == NULL);
ASSERT(p->dictionary == NULL);
ASSERT(p->catches == 0);
@@ -11998,12 +12172,10 @@ erts_debug_verify_clean_empty_process(Process* p)
ASSERT(p->parent == NIL);
- ASSERT(p->msg_inq.first == NULL);
- ASSERT(p->msg_inq.len == 0);
+ ASSERT(p->sig_inq.first == NULL);
+ ASSERT(p->sig_inq.len == 0);
ASSERT(p->suspendee == NIL);
ASSERT(p->pending_suspenders == NULL);
- ASSERT(p->pending_exit.reason == THE_NON_VALUE);
- ASSERT(p->pending_exit.bp == NULL);
/* Thing that erts_cleanup_empty_process() cleans up */
@@ -12104,817 +12276,330 @@ delete_process(Process* p)
erts_erase_dicts(p);
/* free all pending messages */
- erts_cleanup_messages(p->msg.first);
- p->msg.first = NULL;
+ erts_cleanup_messages(p->sig_qs.first);
+ p->sig_qs.first = NULL;
+ erts_cleanup_messages(p->sig_qs.cont);
+ p->sig_qs.cont = NULL;
- ASSERT(!p->nodes_monitors);
ASSERT(!p->suspend_monitors);
p->fvalue = NIL;
}
static ERTS_INLINE void
-set_proc_exiting(Process *p,
- erts_aint32_t in_state,
- Eterm reason,
- ErlHeapFragment *bp)
+set_self_exiting(Process *c_p, Eterm reason, int *enqueue,
+ erts_aint32_t *prio, erts_aint32_t *state)
{
- erts_aint32_t state = in_state, enq_prio = -1;
- int enqueue;
- ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(p) == ERTS_PROC_LOCKS_ALL);
-
- enqueue = change_proc_schedule_state(p,
- (ERTS_PSFLG_SUSPENDED
- | ERTS_PSFLG_PENDING_EXIT
- | ERTS_PSFLGS_DIRTY_WORK),
- ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE,
- &state,
- &enq_prio,
- ERTS_PROC_LOCKS_ALL);
-
- p->fvalue = reason;
- if (bp)
- erts_link_mbuf_to_proc(p, bp);
- /*
- * We used to set freason to EXC_EXIT here, but there is no need to
- * save the stack trace since this process irreversibly is going to
- * exit.
- */
- p->freason = EXTAG_EXIT;
- KILL_CATCHES(p);
- p->i = (BeamInstr *) beam_exit;
-
-
- add2runq(enqueue, enq_prio, p, state, NULL);
-}
-
-static ERTS_INLINE erts_aint32_t
-set_proc_self_exiting(Process *c_p)
-{
-#ifdef DEBUG
- int enqueue;
-#endif
- erts_aint32_t state, enq_prio = -1;
+ erts_aint32_t st, enq_prio = -1;
+ int enq;
ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCKS_ALL);
- state = erts_atomic32_read_nob(&c_p->state);
- ASSERT(state & (ERTS_PSFLG_RUNNING
- |ERTS_PSFLG_RUNNING_SYS
- | ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_DIRTY_RUNNING_SYS));
-
-#ifdef DEBUG
- enqueue =
-#endif
- change_proc_schedule_state(c_p,
- ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT,
- ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE,
- &state,
- &enq_prio,
- ERTS_PROC_LOCKS_ALL);
-
- ASSERT(!enqueue);
- return state;
-}
+ c_p->fvalue = reason;
+ st = erts_atomic32_read_nob(&c_p->state);
+ ASSERT(enqueue || (st & (ERTS_PSFLG_RUNNING
+ |ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)));
-void
-erts_handle_pending_exit(Process *c_p, ErtsProcLocks locks)
-{
- ErtsProcLocks xlocks;
- ASSERT(is_value(c_p->pending_exit.reason));
- ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == locks);
- ERTS_LC_ASSERT(locks & ERTS_PROC_LOCK_MAIN);
- ERTS_LC_ASSERT(!((ERTS_PSFLG_EXITING|ERTS_PSFLG_FREE)
- & erts_atomic32_read_nob(&c_p->state)));
-
- /* Ensure that all locks on c_p are locked before proceeding... */
- if (locks == ERTS_PROC_LOCKS_ALL)
- xlocks = 0;
- else {
- xlocks = ~locks & ERTS_PROC_LOCKS_ALL;
- if (erts_proc_trylock(c_p, xlocks) == EBUSY) {
- erts_proc_unlock(c_p, locks & ~ERTS_PROC_LOCK_MAIN);
- erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- }
- }
+ enq = change_proc_schedule_state(c_p,
+ (ERTS_PSFLG_SUSPENDED
+ | ERTS_PSFLGS_DIRTY_WORK),
+ ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE,
+ &st,
+ &enq_prio,
+ ERTS_PROC_LOCKS_ALL);
- set_proc_exiting(c_p,
- erts_atomic32_read_acqb(&c_p->state),
- c_p->pending_exit.reason,
- c_p->pending_exit.bp);
- c_p->pending_exit.reason = THE_NON_VALUE;
- c_p->pending_exit.bp = NULL;
-
- if (xlocks)
- erts_proc_unlock(c_p, xlocks);
+ ASSERT(enqueue || !enq);
+ if (enqueue)
+ *enqueue = enq;
+ if (prio)
+ *prio = enq_prio;
+ if (state)
+ *state = st;
}
-static void save_pending_exiter(Process *p, ErtsProcList *plp);
-
-static void
-do_handle_pending_exiters(ErtsProcList *pnd_xtrs)
-{
- /* 'list' is expected to have been fetched (i.e. not a ring anymore) */
- ErtsProcList *plp = pnd_xtrs;
-
- while (plp) {
- ErtsProcList *next_plp = plp->next;
- Process *p = erts_proc_lookup(plp->pid);
- if (p) {
- erts_aint32_t state;
- /*
- * If the process is running on a normal scheduler, the
- * pending exit will soon be detected and handled by the
- * scheduler running the process (at schedule in/out).
- */
- if (erts_proc_trylock(p, ERTS_PROC_LOCKS_ALL) != EBUSY) {
- if (erts_proclist_same(plp, p)) {
- state = erts_atomic32_read_acqb(&p->state);
- if (!(state & (ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_RUNNING_SYS
- | ERTS_PSFLG_EXITING))) {
- ASSERT(state & ERTS_PSFLG_PENDING_EXIT);
- erts_handle_pending_exit(p, ERTS_PROC_LOCKS_ALL);
- }
- }
- erts_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
- }
- else {
- erts_proc_lock(p, ERTS_PROC_LOCK_STATUS);
- if (erts_proclist_same(plp, p)) {
- state = erts_atomic32_read_acqb(&p->state);
- if (!(state & (ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_RUNNING_SYS
- | ERTS_PSFLG_EXITING))) {
- /*
- * Save process and try to acquire all
- * locks at a later time...
- */
- save_pending_exiter(p, plp);
- plp = NULL;
- }
- }
- erts_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
- }
- }
- if (plp)
- proclist_destroy(plp);
- plp = next_plp;
- }
-}
-
-static void
-save_pending_exiter(Process *p, ErtsProcList *plp)
+void
+erts_set_self_exiting(Process *c_p, Eterm reason)
{
- ErtsSchedulerSleepInfo *ssi;
- ErtsRunQueue *rq;
-
- ERTS_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
-
- rq = erts_get_runq_proc(p, NULL);
- ASSERT(rq && !ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
-
- if (!plp)
- plp = proclist_create(p);
-
- erts_runq_lock(rq);
-
- erts_proclist_store_last(&rq->procs.pending_exiters, plp);
-
- non_empty_runq(rq);
-
- ssi = rq->scheduler->ssi;
-
- erts_runq_unlock(rq);
+ int enqueue;
+ erts_aint32_t enq_prio, state;
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
- set_aux_work_flags_wakeup_nob(ssi, ERTS_SSI_AUX_WORK_PENDING_EXITERS);
-}
+ erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ set_self_exiting(c_p, reason, &enqueue, &enq_prio, &state);
+ c_p->freason = EXTAG_EXIT;
+ KILL_CATCHES(c_p);
+ c_p->i = (BeamInstr *) beam_exit;
-/*
- * This function delivers an EXIT message to a process
- * which is trapping EXITs.
- */
+ /* Always active when exiting... */
+ ASSERT(state & ERTS_PSFLG_ACTIVE);
-static ERTS_INLINE void
-send_exit_message(Process *to, ErtsProcLocks *to_locksp,
- Eterm exit_term, Uint term_size, Eterm token)
-{
- ErtsMessage *mp;
- ErlOffHeap *ohp;
- Eterm* hp;
- Eterm mess;
-#ifdef SHCOPY_SEND
- erts_shcopy_t info;
-#endif
+ /*
+ * If we are terminating a process that currently
+ * is executing on a dirty scheduler. It *should*
+ * be scheduled on a normal scheduler...
+ */
+ ASSERT(!(state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS))
+ || enqueue == ERTS_ENQUEUE_NORMAL_QUEUE
+ || enqueue == -ERTS_ENQUEUE_NORMAL_QUEUE);
- if (!have_seqtrace(token)) {
-#ifdef SHCOPY_SEND
- INITIALIZE_SHCOPY(info);
- term_size = copy_shared_calculate(exit_term, &info);
- mp = erts_alloc_message_heap(to, to_locksp, term_size, &hp, &ohp);
- mess = copy_shared_perform(exit_term, term_size, &info, &hp, ohp);
- DESTROY_SHCOPY(info);
-#else
- mp = erts_alloc_message_heap(to, to_locksp, term_size, &hp, &ohp);
- mess = copy_struct(exit_term, term_size, &hp, ohp);
-#endif
- erts_queue_message(to, *to_locksp, mp, mess, am_system);
- } else {
- Eterm temp_token;
- Uint sz_token;
-
- ASSERT(is_tuple(token));
- sz_token = size_object(token);
-#ifdef SHCOPY_SEND
- INITIALIZE_SHCOPY(info);
- term_size = copy_shared_calculate(exit_term, &info);
- mp = erts_alloc_message_heap(to, to_locksp, term_size+sz_token, &hp, &ohp);
- mess = copy_shared_perform(exit_term, term_size, &info, &hp, ohp);
- DESTROY_SHCOPY(info);
-#else
- mp = erts_alloc_message_heap(to, to_locksp, term_size+sz_token, &hp, &ohp);
- mess = copy_struct(exit_term, term_size, &hp, ohp);
-#endif
- /* the trace token must in this case be updated by the caller */
- seq_trace_output(token, mess, SEQ_TRACE_SEND, to->common.id, to);
- temp_token = copy_struct(token, sz_token, &hp, ohp);
- ERL_MESSAGE_TOKEN(mp) = temp_token;
- erts_queue_message(to, *to_locksp, mp, mess, am_system);
- }
+ erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
+ if (enqueue)
+ add2runq(enqueue, enq_prio, c_p, state, NULL);
}
-/*
- *
- * *** Exit signal behavior ***
- *
- * Exit signals are asynchronous (truly asynchronous in the
- * SMP emulator). When the signal is received the receiver receives an
- * 'EXIT' message if it is trapping exits; otherwise, it will either
- * ignore the signal if the exit reason is normal, or go into an
- * exiting state (ERTS_PSFLG_EXITING). When a process has gone into the
- * exiting state it will not execute any more Erlang code, but it might
- * take a while before it actually exits. The exit signal is being
- * received when the 'EXIT' message is put in the message queue, the
- * signal is dropped, or when it changes state into exiting. The time it
- * is in the exiting state before actually exiting is undefined (it
- * might take a really long time under certain conditions). The
- * receiver of the exit signal does not break links or trigger monitors
- * until it actually exits.
- *
- * Exit signals and other signals, e.g. messages, have to be received
- * by a receiver in the same order as sent by a sender.
- *
- *
- *
- * Exit signal implementation in the SMP emulator:
- *
- * If the receiver is trapping exits, the signal is transformed
- * into an 'EXIT' message and sent as a normal message, if the
- * reason is normal the signal is dropped; otherwise, the process
- * is determined to be exited. The interesting case is when the
- * process is to be exited and this is what is described below.
- *
- * If it is possible, the receiver is set in the exiting state straight
- * away and we are done; otherwise, the sender places the exit reason
- * in the pending_exit field of the process struct and if necessary
- * adds the receiver to the run queue. It is typically not possible
- * to set a scheduled process or a process which we cannot get all locks
- * on without releasing locks on it in an exiting state straight away.
- *
- * The receiver will poll the pending_exit field when it reach certain
- * places during it's execution. When it discovers the pending exit
- * it will change state into the exiting state. If the receiver wasn't
- * scheduled when the pending exit was set, the first scheduler that
- * schedules a new process will set the receiving process in the exiting
- * state just before it schedules next process.
- *
- * When the exit signal is placed in the pending_exit field, the signal
- * is considered as being in transit on the Erlang level. The signal is
- * actually in some kind of semi transit state, since we have already
- * determined how it should be received. It will exit the process no
- * matter what if it is received (the process may exit by itself before
- * reception of the exit signal). The signal is received when it is
- * discovered in the pending_exit field by the receiver.
- *
- * The receiver have to poll the pending_exit field at least before:
- * - moving messages from the message in queue to the private message
- * queue. This in order to preserve signal order.
- * - unlink. Otherwise the process might get exited on a link that
- * have been removed.
- * - changing the trap_exit flag to true. This in order to simplify the
- * implementation; otherwise, we would have to transform the signal
- * into an 'EXIT' message when setting the trap_exit flag to true. We
- * would also have to maintain a queue of exit signals in transit.
- * - being scheduled in or out.
- */
-
-static ERTS_INLINE int
-send_exit_signal(Process *c_p, /* current process if and only
- if reason is stored on it */
- Eterm from, /* Id of sender of signal */
- Process *rp, /* receiving process */
- ErtsProcLocks *rp_locks,/* current locks on receiver */
- Eterm reason, /* exit reason */
- Eterm exit_tuple, /* Prebuild exit tuple
- or THE_NON_VALUE */
- Uint exit_tuple_sz, /* Size of prebuilt exit tuple
- (if exit_tuple != THE_NON_VALUE) */
- Eterm token, /* token */
- Process *token_update, /* token updater */
- Uint32 flags /* flags */
- )
-{
- erts_aint32_t state = erts_atomic32_read_nob(&rp->state);
- Eterm rsn = reason == am_kill ? am_killed : reason;
-
- ERTS_LC_ASSERT(*rp_locks == erts_proc_lc_my_proc_locks(rp));
- ERTS_LC_ASSERT((*rp_locks & ERTS_PROC_LOCKS_XSIG_SEND)
- == ERTS_PROC_LOCKS_XSIG_SEND);
-
- ASSERT(reason != THE_NON_VALUE);
-
-#ifdef USE_VM_PROBES
- if(DTRACE_ENABLED(process_exit_signal) && is_pid(from)) {
- DTRACE_CHARBUF(sender_str, DTRACE_TERM_BUF_SIZE);
- DTRACE_CHARBUF(receiver_str, DTRACE_TERM_BUF_SIZE);
- DTRACE_CHARBUF(reason_buf, DTRACE_TERM_BUF_SIZE);
-
- dtrace_pid_str(from, sender_str);
- dtrace_proc_str(rp, receiver_str);
- erts_snprintf(reason_buf, sizeof(DTRACE_CHARBUF_NAME(reason_buf)) - 1, "%T", reason);
- DTRACE3(process_exit_signal, sender_str, receiver_str, reason_buf);
+void
+erts_proc_exit_handle_monitor(ErtsMonitor *mon, void *vctxt)
+{
+ Process *c_p = ((ErtsProcExitContext *) vctxt)->c_p;
+ Eterm reason = ((ErtsProcExitContext *) vctxt)->reason;
+ ErtsMonitorData *mdp = NULL;
+
+ if (erts_monitor_is_target(mon)) {
+ /* We are being watched... */
+ switch (mon->type) {
+ case ERTS_MON_TYPE_PROC:
+ erts_proc_sig_send_monitor_down(mon, reason);
+ mon = NULL;
+ break;
+ case ERTS_MON_TYPE_PORT: {
+ Port *prt;
+ ASSERT(is_internal_port(mon->other.item));
+ prt = erts_id2port(mon->other.item);
+ if (prt) {
+ erts_fire_port_monitor(prt, mon);
+ erts_port_release(prt);
+ mon = NULL;
+ }
+ break;
+ }
+ case ERTS_MON_TYPE_RESOURCE:
+ erts_fire_nif_monitor(mon);
+ mon = NULL;
+ break;
+ case ERTS_MON_TYPE_DIST_PROC: {
+ ErtsMonLnkDist *dist;
+ DistEntry *dep;
+ ErtsDSigData dsd;
+ int code;
+ Eterm watcher;
+ Eterm watched;
+
+ mdp = erts_monitor_to_data(mon);
+
+ if (mon->flags & ERTS_ML_FLG_NAME)
+ watched = ((ErtsMonitorDataExtended *) mdp)->u.name;
+ else
+ watched = c_p->common.id;
+ ASSERT(is_internal_pid(watched) || is_atom(watched));
+
+ watcher = mon->other.item;
+ ASSERT(is_external_pid(watcher));
+ dep = external_pid_dist_entry(watcher);
+ ASSERT(dep);
+ dist = ((ErtsMonitorDataExtended *) mdp)->dist;
+ ASSERT(dist);
+ code = erts_dsig_prepare(&dsd, dep, NULL, 0,
+ ERTS_DSP_NO_LOCK, 0, 0);
+ switch (code) {
+ case ERTS_DSIG_PREP_CONNECTED:
+ case ERTS_DSIG_PREP_PENDING:
+ if (dist->connection_id == dsd.connection_id) {
+ code = erts_dsig_send_m_exit(&dsd,
+ watcher,
+ watched,
+ mdp->ref,
+ reason);
+ ASSERT(code == ERTS_DSIG_SEND_OK);
+ }
+ default:
+ break;
+ }
+ if (!erts_monitor_dist_delete(&mdp->origin))
+ mdp = NULL;
+ break;
+ }
+ default:
+ ERTS_INTERNAL_ERROR("Invalid target monitor type");
+ break;
+ }
}
-#endif
-
- if ((state & ERTS_PSFLG_TRAP_EXIT)
- && (reason != am_kill || (flags & ERTS_XSIG_FLG_IGN_KILL))) {
- /* have to release the status and trace lock in order to send the exit message */
- erts_proc_unlock(rp, *rp_locks & (ERTS_PROC_LOCKS_XSIG_SEND|ERTS_PROC_LOCK_TRACE));
- *rp_locks &= ~(ERTS_PROC_LOCKS_XSIG_SEND|ERTS_PROC_LOCK_TRACE);
- if (have_seqtrace(token) && token_update)
- seq_trace_update_send(token_update);
- if (is_value(exit_tuple))
- send_exit_message(rp, rp_locks, exit_tuple, exit_tuple_sz, token);
- else
- erts_deliver_exit_message(from, rp, rp_locks, rsn, token);
- return 1; /* Receiver will get a message */
- }
- else if (reason != am_normal || (flags & ERTS_XSIG_FLG_NO_IGN_NORMAL)) {
- if (!(state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT))) {
- ASSERT(!rp->pending_exit.bp);
-
- if (rp == c_p && (*rp_locks & ERTS_PROC_LOCK_MAIN)) {
- /* Ensure that all locks on c_p are locked before
- proceeding... */
- if (*rp_locks != ERTS_PROC_LOCKS_ALL) {
- ErtsProcLocks need_locks = (~(*rp_locks)
- & ERTS_PROC_LOCKS_ALL);
- if (erts_proc_trylock(c_p, need_locks) == EBUSY) {
- erts_proc_unlock(c_p,
- *rp_locks & ~ERTS_PROC_LOCK_MAIN);
- erts_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
- }
- *rp_locks = ERTS_PROC_LOCKS_ALL;
- }
- set_proc_exiting(c_p, state, rsn, NULL);
- }
- else if (!(state & (ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_RUNNING_SYS
- | ERTS_PSFLG_DIRTY_RUNNING_SYS))) {
- /* Process not running ... */
- ErtsProcLocks need_locks = ~(*rp_locks) & ERTS_PROC_LOCKS_ALL;
- ErlHeapFragment *bp = NULL;
- Eterm rsn_cpy;
- if (need_locks
- && erts_proc_trylock(rp, need_locks) == EBUSY) {
- /* ... but we havn't got all locks on it ... */
- save_pending_exiter(rp, NULL);
- /*
- * The pending exit will be discovered when next
- * process is scheduled in
- */
- goto set_pending_exit;
- }
- /* ...and we have all locks on it... */
- *rp_locks = ERTS_PROC_LOCKS_ALL;
-
- state = erts_atomic32_read_nob(&rp->state);
-
- if (is_immed(rsn))
- rsn_cpy = rsn;
- else {
- Eterm *hp;
- ErlOffHeap *ohp;
- Uint rsn_sz = size_object(rsn);
- if (state & ERTS_PSFLG_DIRTY_RUNNING) {
- bp = new_message_buffer(rsn_sz);
- ohp = &bp->off_heap;
- hp = &bp->mem[0];
- }
- else
- {
- hp = HAlloc(rp, rsn_sz);
- ohp = &rp->off_heap;
- }
- rsn_cpy = copy_struct(rsn, rsn_sz, &hp, ohp);
- }
-
- set_proc_exiting(rp, state, rsn_cpy, bp);
- }
- else { /* Process running... */
-
- /*
- * The pending exit will be discovered when the process
- * is scheduled out if not discovered earlier.
- */
-
- set_pending_exit:
- if (is_immed(rsn)) {
- rp->pending_exit.reason = rsn;
- }
- else {
- Eterm *hp;
- Uint sz = size_object(rsn);
- ErlHeapFragment *bp = new_message_buffer(sz);
-
- hp = &bp->mem[0];
- rp->pending_exit.reason = copy_struct(rsn,
- sz,
- &hp,
- &bp->off_heap);
- rp->pending_exit.bp = bp;
- }
-
- /*
- * If no dirty work has been scheduled, pending exit will
- * be discovered when the process is scheduled. If dirty work
- * has been scheduled, we may need to add it to a normal run
- * queue...
- */
- {
- erts_aint32_t a = erts_atomic32_read_nob(&rp->state);
- while (1) {
- erts_aint32_t n, e;
- int dwork;
- n = e = a;
- n |= ERTS_PSFLG_PENDING_EXIT;
- dwork = !!(n & ERTS_PSFLGS_DIRTY_WORK);
- n &= ~ERTS_PSFLGS_DIRTY_WORK;
- a = erts_atomic32_cmpxchg_mb(&rp->state, n, e);
- if (a == e) {
- if (dwork)
- erts_schedule_process(rp, n, *rp_locks);
- break;
- }
- }
+ else { /* Origin monitor */
+ /* We are watching someone else... */
+ switch (mon->type) {
+ case ERTS_MON_TYPE_PROC:
+ erts_proc_sig_send_demonitor(mon);
+ mon = NULL;
+ break;
+ case ERTS_MON_TYPE_TIME_OFFSET:
+ erts_demonitor_time_offset(mon);
+ mon = NULL;
+ break;
+ case ERTS_MON_TYPE_NODE:
+ mdp = erts_monitor_to_data(mon);
+ if (!erts_monitor_dist_delete(&mdp->target))
+ mdp = NULL;
+ break;
+ case ERTS_MON_TYPE_NODES:
+ erts_monitor_nodes_delete(mon);
+ mon = NULL;
+ break;
+ case ERTS_MON_TYPE_PORT: {
+ Port *prt;
+ ASSERT(is_internal_port(mon->other.item));
+ prt = erts_port_lookup_raw(mon->other.item);
+ if (prt) {
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ if (erts_port_demonitor(c_p, prt, mon) != ERTS_PORT_OP_DROPPED)
+ mon = NULL;
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
+ }
+ break;
+ }
+ case ERTS_MON_TYPE_DIST_PROC: {
+ ErtsMonLnkDist *dist;
+ DistEntry *dep;
+ ErtsDSigData dsd;
+ int code;
+ Eterm watched;
+
+ mdp = erts_monitor_to_data(mon);
+ dist = ((ErtsMonitorDataExtended *) mdp)->dist;
+ ASSERT(dist);
+ if (mon->flags & ERTS_ML_FLG_NAME) {
+ watched = ((ErtsMonitorDataExtended *) mdp)->u.name;
+ ASSERT(is_atom(watched));
+ dep = erts_sysname_to_connected_dist_entry(dist->nodename);
+ }
+ else {
+ watched = mon->other.item;
+ ASSERT(is_external_pid(watched));
+ dep = external_pid_dist_entry(watched);
+ }
+ code = erts_dsig_prepare(&dsd, dep, NULL, 0,
+ ERTS_DSP_NO_LOCK, 0, 0);
+ switch (code) {
+ case ERTS_DSIG_PREP_CONNECTED:
+ case ERTS_DSIG_PREP_PENDING:
+ if (dist->connection_id == dsd.connection_id) {
+ code = erts_dsig_send_demonitor(&dsd,
+ c_p->common.id,
+ watched,
+ mdp->ref,
+ 1);
+ ASSERT(code == ERTS_DSIG_SEND_OK);
}
- }
- }
- /* else:
- *
- * The receiver already has a pending exit (or is exiting)
- * so we drop this signal.
- *
- * NOTE: dropping this exit signal is based on the assumption
- * that the receiver *will* exit; either on the pending
- * exit or by itself before seeing the pending exit.
- */
- return -1; /* Receiver will exit */
+ default:
+ break;
+ }
+ if (!erts_monitor_dist_delete(&mdp->target))
+ mdp = NULL;
+ break;
+ }
+ default:
+ ERTS_INTERNAL_ERROR("Invalid origin monitor type");
+ break;
+ }
}
- return 0; /* Receiver unaffected */
+ if (mdp)
+ erts_monitor_release_both(mdp);
+ else if (mon)
+ erts_monitor_release(mon);
}
-
-int
-erts_send_exit_signal(Process *c_p,
- Eterm from,
- Process *rp,
- ErtsProcLocks *rp_locks,
- Eterm reason,
- Eterm token,
- Process *token_update,
- Uint32 flags)
-{
- return send_exit_signal(c_p,
- from,
- rp,
- rp_locks,
- reason,
- THE_NON_VALUE,
- 0,
- token,
- token_update,
- flags);
-}
-
-typedef struct {
- Eterm reason;
- Process *p;
-} ExitMonitorContext;
-
-static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
-{
- ExitMonitorContext *pcontext = vpcontext;
- DistEntry *dep;
- ErtsMonitor *rmon;
-
- switch (mon->type) {
- case MON_ORIGIN:
- /* We are monitoring someone else, we need to demonitor that one.. */
- if (is_atom(mon->u.pid)) { /* remote by name */
- ASSERT(is_node_name_atom(mon->u.pid));
- dep = erts_sysname_to_connected_dist_entry(mon->u.pid);
- if (dep) {
- erts_de_links_lock(dep);
- rmon = erts_remove_monitor(&(dep->monitors), mon->ref);
- erts_de_links_unlock(dep);
- if (rmon) {
- ErtsDSigData dsd;
- int code = erts_dsig_prepare(&dsd, dep, NULL, 0,
- ERTS_DSP_NO_LOCK, 0, 0);
- if (code == ERTS_DSIG_PREP_CONNECTED ||
- code == ERTS_DSIG_PREP_PENDING) {
-
- code = erts_dsig_send_demonitor(&dsd,
- rmon->u.pid,
- mon->name,
- mon->ref,
- 1);
- ASSERT(code == ERTS_DSIG_SEND_OK);
- }
- erts_destroy_monitor(rmon);
- }
- }
- } else {
- ASSERT(is_pid(mon->u.pid) || is_port(mon->u.pid));
- /* if is local by pid or name */
- if (is_internal_pid(mon->u.pid)) {
- Process *rp = erts_pid2proc(NULL, 0, mon->u.pid, ERTS_PROC_LOCK_LINK);
- if (!rp) {
- goto done;
- }
- rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
- erts_proc_unlock(rp, ERTS_PROC_LOCK_LINK);
- if (rmon == NULL) {
- goto done;
- }
- erts_destroy_monitor(rmon);
- } else if (is_internal_port(mon->u.pid)) {
- /* Is a local port */
- Port *prt = erts_port_lookup_raw(mon->u.pid);
- if (!prt) {
- goto done;
- }
- erts_port_demonitor(pcontext->p,
- ERTS_PORT_DEMONITOR_ORIGIN_ON_DEATHBED,
- prt, mon->ref, NULL);
- } else { /* remote by pid */
- ASSERT(is_external_pid(mon->u.pid));
- dep = external_pid_dist_entry(mon->u.pid);
- ASSERT(dep != NULL);
- if (dep) {
- erts_de_links_lock(dep);
- rmon = erts_remove_monitor(&(dep->monitors), mon->ref);
- erts_de_links_unlock(dep);
- if (rmon) {
- ErtsDSigData dsd;
- int code = erts_dsig_prepare(&dsd, dep, NULL, 0,
- ERTS_DSP_NO_LOCK, 0, 0);
- if (code == ERTS_DSIG_PREP_CONNECTED ||
- code == ERTS_DSIG_PREP_PENDING) {
-
- code = erts_dsig_send_demonitor(&dsd,
- rmon->u.pid,
- mon->u.pid,
- mon->ref,
- 1);
- ASSERT(code == ERTS_DSIG_SEND_OK);
- }
- erts_destroy_monitor(rmon);
- }
- }
- }
- }
- break;
- case MON_TARGET:
- ASSERT(is_pid(mon->u.pid) || is_internal_port(mon->u.pid));
- if (is_internal_port(mon->u.pid)) {
- Port *prt = erts_id2port(mon->u.pid);
- if (prt == NULL) {
- goto done;
- }
- erts_fire_port_monitor(prt, mon->ref);
- erts_port_release(prt);
- } else if (is_internal_pid(mon->u.pid)) {/* local by name or pid */
- Eterm watched;
- Process *rp;
- DeclareTmpHeapNoproc(lhp,3);
- ErtsProcLocks rp_locks = (ERTS_PROC_LOCK_LINK
- | ERTS_PROC_LOCKS_MSG_SEND);
- rp = erts_pid2proc(NULL, 0, mon->u.pid, rp_locks);
- if (rp == NULL) {
- goto done;
- }
- UseTmpHeapNoproc(3);
- rmon = erts_remove_monitor(&ERTS_P_MONITORS(rp), mon->ref);
- if (rmon) {
- erts_destroy_monitor(rmon);
- watched = (is_atom(mon->name)
- ? TUPLE2(lhp, mon->name,
- erts_this_dist_entry->sysname)
- : pcontext->p->common.id);
- erts_queue_monitor_message(rp, &rp_locks, mon->ref, am_process,
- watched, pcontext->reason);
- }
- UnUseTmpHeapNoproc(3);
- /* else: demonitor while we exited, i.e. do nothing... */
- erts_proc_unlock(rp, rp_locks);
- } else { /* external by pid or name */
- ASSERT(is_external_pid(mon->u.pid));
- dep = external_pid_dist_entry(mon->u.pid);
- ASSERT(dep != NULL);
- if (dep) {
- erts_de_links_lock(dep);
- rmon = erts_remove_monitor(&(dep->monitors), mon->ref);
- erts_de_links_unlock(dep);
- if (rmon) {
- ErtsDSigData dsd;
- int code = erts_dsig_prepare(&dsd, dep, NULL, 0,
- ERTS_DSP_NO_LOCK, 0, 0);
- if (code == ERTS_DSIG_PREP_CONNECTED) {
- code = erts_dsig_send_m_exit(&dsd,
- mon->u.pid,
- (rmon->name != NIL
- ? rmon->name
- : rmon->u.pid),
- mon->ref,
- pcontext->reason);
- ASSERT(code == ERTS_DSIG_SEND_OK);
- }
- erts_destroy_monitor(rmon);
- }
- }
- }
- break;
- case MON_NIF_TARGET:
- erts_fire_nif_monitor(mon->u.resource,
- pcontext->p->common.id,
- mon->ref);
+void
+erts_proc_exit_handle_link(ErtsLink *lnk, void *vctxt)
+{
+ Process *c_p = ((ErtsProcExitContext *) vctxt)->c_p;
+ Eterm reason = ((ErtsProcExitContext *) vctxt)->reason;
+ ErtsLinkData *ldp = NULL;
+
+ switch (lnk->type) {
+ case ERTS_LNK_TYPE_PROC:
+ ASSERT(is_internal_pid(lnk->other.item));
+ erts_proc_sig_send_link_exit(c_p, c_p->common.id, lnk,
+ reason, SEQ_TRACE_TOKEN(c_p));
+ lnk = NULL;
+ break;
+ case ERTS_LNK_TYPE_PORT: {
+ Port *prt;
+ ASSERT(is_internal_port(lnk->other.item));
+ prt = erts_port_lookup(lnk->other.item,
+ ERTS_PORT_SFLGS_INVALID_LOOKUP);
+ if (prt)
+ erts_port_exit(NULL,
+ (ERTS_PORT_SIG_FLG_FORCE_SCHED
+ | ERTS_PORT_SIG_FLG_BROKEN_LINK),
+ prt,
+ c_p->common.id,
+ reason,
+ NULL);
+ break;
+ }
+ case ERTS_LNK_TYPE_DIST_PROC: {
+ DistEntry *dep;
+ ErtsMonLnkDist *dist;
+ ErtsLink *dlnk;
+ ErtsDSigData dsd;
+ int code;
+
+ dlnk = erts_link_to_other(lnk, &ldp);
+ dist = ((ErtsLinkDataExtended *) ldp)->dist;
+
+ ASSERT(is_external_pid(lnk->other.item));
+ dep = external_pid_dist_entry(lnk->other.item);
+
+ ASSERT(dep != erts_this_dist_entry);
+
+ if (!erts_link_dist_delete(dlnk))
+ ldp = NULL;
+
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
+ code = erts_dsig_prepare(&dsd, dep, c_p, 0, ERTS_DSP_NO_LOCK, 0, 0);
+ switch (code) {
+ case ERTS_DSIG_PREP_CONNECTED:
+ case ERTS_DSIG_PREP_PENDING:
+ if (dist->connection_id == dsd.connection_id) {
+ code = erts_dsig_send_exit_tt(&dsd,
+ c_p->common.id,
+ lnk->other.item,
+ reason,
+ SEQ_TRACE_TOKEN(c_p));
+ ASSERT(code == ERTS_DSIG_SEND_OK);
+ }
+ }
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
break;
- case MON_TIME_OFFSET:
- erts_demonitor_time_offset(mon->ref);
- break;
- default:
- ERTS_INTERNAL_ERROR("Invalid monitor type");
}
- done:
- /* As the monitors are previously removed from the process,
- distribution operations will not cause monitors to disappear,
- we can safely delete it. */
-
- erts_destroy_monitor(mon);
-}
-
-typedef struct {
- Process *p;
- Eterm reason;
- Eterm exit_tuple;
- Uint exit_tuple_sz;
-} ExitLinkContext;
-
-static void doit_exit_link(ErtsLink *lnk, void *vpcontext)
-{
- ExitLinkContext *pcontext = vpcontext;
- /* Unpack context, it's readonly */
- Process *p = pcontext->p;
- Eterm reason = pcontext->reason;
- Eterm exit_tuple = pcontext->exit_tuple;
- Uint exit_tuple_sz = pcontext->exit_tuple_sz;
- Eterm item = lnk->pid;
- ErtsLink *rlnk;
- DistEntry *dep;
- Process *rp;
-
-
- switch(lnk->type) {
- case LINK_PID:
- if(is_internal_port(item)) {
- Port *prt = erts_port_lookup(item, ERTS_PORT_SFLGS_INVALID_LOOKUP);
- if (prt)
- erts_port_exit(NULL,
- (ERTS_PORT_SIG_FLG_FORCE_SCHED
- | ERTS_PORT_SIG_FLG_BROKEN_LINK),
- prt,
- p->common.id,
- reason,
- NULL);
- }
- else if(is_external_port(item)) {
- erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- erts_dsprintf(dsbufp,
- "Erroneous link between %T and external port %T "
- "found\n",
- p->common.id,
- item);
- erts_send_error_to_logger_nogl(dsbufp);
- ASSERT(0); /* It isn't possible to setup such a link... */
- }
- else if (is_internal_pid(item)) {
- ErtsProcLocks rp_locks = (ERTS_PROC_LOCK_LINK
- | ERTS_PROC_LOCKS_XSIG_SEND);
- rp = erts_pid2proc(NULL, 0, item, rp_locks);
- if (rp) {
- rlnk = erts_remove_link(&ERTS_P_LINKS(rp), p->common.id);
- /* If rlnk == NULL, we got unlinked while exiting,
- i.e., do nothing... */
- if (rlnk) {
- int xres;
- erts_destroy_link(rlnk);
- xres = send_exit_signal(NULL,
- p->common.id,
- rp,
- &rp_locks,
- reason,
- exit_tuple,
- exit_tuple_sz,
- SEQ_TRACE_TOKEN(p),
- p,
- ERTS_XSIG_FLG_IGN_KILL);
- if (xres >= 0 && IS_TRACED_FL(rp, F_TRACE_PROCS)) {
- /* We didn't exit the process and it is traced */
- if (IS_TRACED_FL(rp, F_TRACE_PROCS)) {
- if (rp_locks & ERTS_PROC_LOCKS_XSIG_SEND) {
- erts_proc_unlock(rp, ERTS_PROC_LOCKS_XSIG_SEND);
- rp_locks &= ~ERTS_PROC_LOCKS_XSIG_SEND;
- }
- trace_proc(NULL, 0, rp, am_getting_unlinked, p->common.id);
- }
- }
- }
- ASSERT(rp != p);
- erts_proc_unlock(rp, rp_locks);
- }
- }
- else if (is_external_pid(item)) {
- dep = external_pid_dist_entry(item);
- if(dep != erts_this_dist_entry) {
- ErtsDSigData dsd;
- int code;
- ErtsDistLinkData dld;
- erts_remove_dist_link(&dld, p->common.id, item, dep);
- if (dld.d_lnk) {
- erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
- code = erts_dsig_prepare(&dsd, dep, p, 0, ERTS_DSP_NO_LOCK, 0, 0);
- if (code == ERTS_DSIG_PREP_CONNECTED ||
- code == ERTS_DSIG_PREP_PENDING) {
-
- code = erts_dsig_send_exit_tt(&dsd, p->common.id, item,
- reason, SEQ_TRACE_TOKEN(p));
- ASSERT(code == ERTS_DSIG_SEND_OK);
- }
- erts_proc_unlock(p, ERTS_PROC_LOCK_MAIN);
- }
- erts_destroy_dist_link(&dld);
- }
- }
- break;
- case LINK_NODE:
- ASSERT(is_node_name_atom(item));
- dep = erts_sysname_to_connected_dist_entry(item);
- if(dep) {
- /* dist entries have node links in a separate structure to
- avoid confusion */
- erts_de_links_lock(dep);
- rlnk = erts_remove_link(&(dep->node_links), p->common.id);
- erts_de_links_unlock(dep);
- if (rlnk)
- erts_destroy_link(rlnk);
- }
- break;
-
default:
- erts_exit(ERTS_ERROR_EXIT, "bad type in link list\n");
- break;
+ ERTS_INTERNAL_ERROR("Unexpected link type");
+ break;
}
- erts_destroy_link(lnk);
+
+ if (ldp)
+ erts_link_release_both(ldp);
+ else if (lnk)
+ erts_link_release(lnk);
}
static void
-resume_suspend_monitor(ErtsSuspendMonitor *smon, void *vc_p)
+resume_suspend_monitor(ErtsMonitor *mon, void *vc_p)
{
+ ErtsMonitorSuspend *smon = erts_monitor_suspend(mon);
Process *suspendee = erts_pid2proc((Process *) vc_p, ERTS_PROC_LOCK_MAIN,
- smon->pid, ERTS_PROC_LOCK_STATUS);
+ smon->mon.other.item, ERTS_PROC_LOCK_STATUS);
if (suspendee) {
ASSERT(suspendee != vc_p);
if (smon->active)
resume_process(suspendee, ERTS_PROC_LOCK_STATUS);
erts_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
}
- erts_destroy_suspend_monitor(smon);
+ erts_monitor_suspend_destroy(smon);
}
/* this function fishishes a process and propagates exit messages - called
@@ -12923,8 +12608,6 @@ void
erts_do_exit_process(Process* p, Eterm reason)
{
p->arity = 0; /* No live registers */
- p->fvalue = reason;
-
#ifdef USE_VM_PROBES
if (DTRACE_ENABLED(process_exit)) {
@@ -12948,18 +12631,11 @@ erts_do_exit_process(Process* p, Eterm reason)
process from exiting until the lock has been released. */
erts_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
- if (ERTS_PSFLG_PENDING_EXIT & set_proc_self_exiting(p)) {
- /* Process exited before pending exit was received... */
- p->pending_exit.reason = THE_NON_VALUE;
- if (p->pending_exit.bp) {
- free_message_buffer(p->pending_exit.bp);
- p->pending_exit.bp = NULL;
- }
- }
+ set_self_exiting(p, reason, NULL, NULL, NULL);
cancel_suspend_of_suspendee(p, ERTS_PROC_LOCKS_ALL);
- ERTS_MSGQ_MV_INQ2PRIVQ(p);
+ erts_proc_sig_fetch(p);
if (IS_TRACED(p)) {
if (IS_TRACED_FL(p, F_TRACE_CALLS))
@@ -12996,13 +12672,15 @@ erts_do_exit_process(Process* p, Eterm reason)
void
erts_continue_exit_process(Process *p)
{
- ErtsLink* lnk;
- ErtsMonitor *mon;
+ ErtsLink *links;
+ ErtsMonitor *monitors;
+ ErtsMonitor *lt_monitors;
ErtsProcLocks curr_locks = ERTS_PROC_LOCK_MAIN;
Eterm reason = p->fvalue;
DistEntry *dep = NULL;
erts_aint32_t state;
int delay_del_proc = 0;
+ ErtsProcExitContext pectxt;
#ifdef DEBUG
int yield_allowed = 1;
@@ -13074,7 +12752,7 @@ erts_continue_exit_process(Process *p)
erts_set_gc_state(p, 1);
state = erts_atomic32_read_acqb(&p->state);
- if (state & ERTS_PSFLG_ACTIVE_SYS
+ if ((state & ERTS_PSFLG_SYS_TASKS)
|| p->dirty_sys_tasks
) {
if (cleanup_sys_tasks(p, state, CONTEXT_REDS) >= CONTEXT_REDS/2)
@@ -13094,18 +12772,10 @@ erts_continue_exit_process(Process *p)
p->flags &= ~F_USING_DDLL;
}
- if (p->nodes_monitors) {
- erts_delete_nodes_monitors(p, ERTS_PROC_LOCK_MAIN);
- p->nodes_monitors = NULL;
- }
-
-
- if (p->suspend_monitors) {
- erts_sweep_suspend_monitors(p->suspend_monitors,
- resume_suspend_monitor,
- p);
- p->suspend_monitors = NULL;
- }
+ if (p->suspend_monitors)
+ erts_monitor_tree_foreach_delete(&p->suspend_monitors,
+ resume_suspend_monitor,
+ p);
/*
* The registered name *should* be the last "erlang resource" to
@@ -13134,8 +12804,9 @@ erts_continue_exit_process(Process *p)
* Note! The monitor and link fields will be overwritten
* by erts_ptab_delete_element() below.
*/
- mon = ERTS_P_MONITORS(p);
- lnk = ERTS_P_LINKS(p);
+ links = ERTS_P_LINKS(p);
+ monitors = ERTS_P_MONITORS(p);
+ lt_monitors = ERTS_P_LT_MONITORS(p);
{
/* Do *not* use erts_get_runq_proc() */
@@ -13172,7 +12843,9 @@ erts_continue_exit_process(Process *p)
n = e = a;
ASSERT(a & ERTS_PSFLG_EXITING);
n |= ERTS_PSFLG_FREE;
- n &= ~ERTS_PSFLG_ACTIVE;
+ n &= ~(ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_DIRTY_ACTIVE_SYS);
if ((n & ERTS_PSFLG_IN_RUNQ) && !refc_inced) {
erts_proc_inc_refc(p);
refc_inced = 1;
@@ -13207,33 +12880,40 @@ erts_continue_exit_process(Process *p)
erts_deref_dist_entry(dep);
}
- /*
- * Pre-build the EXIT tuple if there are any links.
- */
- if (lnk) {
- DeclareTmpHeap(tmp_heap,4,p);
- Eterm exit_tuple;
- Uint exit_tuple_sz;
- Eterm* hp;
-
- UseTmpHeap(4,p);
- hp = &tmp_heap[0];
+ pectxt.c_p = p;
+ pectxt.reason = reason;
- exit_tuple = TUPLE3(hp, am_EXIT, p->common.id, reason);
+ if (links) {
+ erts_link_tree_foreach_delete(&links,
+ erts_proc_exit_handle_link,
+ (void *) &pectxt);
+ ASSERT(!links);
+ }
- exit_tuple_sz = size_object(exit_tuple);
+ if (monitors) {
+ erts_monitor_tree_foreach_delete(&monitors,
+ erts_proc_exit_handle_monitor,
+ (void *) &pectxt);
+ ASSERT(!monitors);
+ }
- {
- ExitLinkContext context = {p, reason, exit_tuple, exit_tuple_sz};
- erts_sweep_links(lnk, &doit_exit_link, &context);
- }
- UnUseTmpHeap(4,p);
+ if (lt_monitors) {
+ erts_monitor_list_foreach_delete(&lt_monitors,
+ erts_proc_exit_handle_monitor,
+ (void *) &pectxt);
+ ASSERT(!lt_monitors);
}
- {
- ExitMonitorContext context = {reason, p};
- erts_sweep_monitors(mon,&doit_exit_monitor,&context); /* Allocates TmpHeap, but we
- have none here */
+ erts_proc_sig_fetch(p);
+ /*
+ * erts_proc_sig_handle_exit() implements yielding.
+ * However, this function cannot handle it yet... loop
+ * until done...
+ */
+ while (!0) {
+ int reds = CONTEXT_REDS;
+ if (erts_proc_sig_handle_exit(p, &reds))
+ break;
}
erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
@@ -13272,6 +12952,40 @@ erts_continue_exit_process(Process *p)
BUMP_ALL_REDS(p);
}
+Process *
+erts_try_lock_sig_free_proc(Eterm pid, ErtsProcLocks locks)
+{
+ Process *rp = erts_proc_lookup_raw(pid);
+ erts_aint32_t state;
+
+ if (!rp)
+ return NULL;
+
+ ERTS_LC_ASSERT(!erts_proc_lc_my_proc_locks(rp));
+
+ state = erts_atomic32_read_nob(&rp->state);
+ if (state & ERTS_PSFLG_EXITING)
+ return NULL;
+
+ if (state & (ERTS_PSFLG_SIG_IN_Q|ERTS_PSFLG_SIG_Q))
+ return ERTS_PROC_LOCK_BUSY;
+ if (erts_proc_trylock(rp, locks) == EBUSY)
+ return ERTS_PROC_LOCK_BUSY;
+
+ state = erts_atomic32_read_nob(&rp->state);
+ if (state & ERTS_PSFLG_EXITING) {
+ erts_proc_unlock(rp, locks);
+ return NULL;
+ }
+
+ if (state & (ERTS_PSFLG_SIG_IN_Q|ERTS_PSFLG_SIG_Q)) {
+ erts_proc_unlock(rp, locks);
+ return ERTS_PROC_LOCK_BUSY;
+ }
+
+ return rp;
+}
+
/*
* Stack dump functions follow.
*/