aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam')
-rw-r--r--erts/emulator/beam/beam_bp.c14
-rw-r--r--erts/emulator/beam/beam_bp.h2
-rw-r--r--erts/emulator/beam/beam_emu.c60
-rw-r--r--erts/emulator/beam/bif.c26
-rw-r--r--erts/emulator/beam/bif.h42
-rw-r--r--erts/emulator/beam/break.c7
-rw-r--r--erts/emulator/beam/erl_alloc.types1
-rw-r--r--erts/emulator/beam/erl_bif_info.c15
-rw-r--r--erts/emulator/beam/erl_bif_trace.c19
-rw-r--r--erts/emulator/beam/erl_bif_unique.c4
-rw-r--r--erts/emulator/beam/erl_bits.h4
-rw-r--r--erts/emulator/beam/erl_db.c2
-rw-r--r--erts/emulator/beam/erl_db_util.c5
-rw-r--r--erts/emulator/beam/erl_drv_nif.h9
-rw-r--r--erts/emulator/beam/erl_gc.c17
-rw-r--r--erts/emulator/beam/erl_hl_timer.c24
-rw-r--r--erts/emulator/beam/erl_message.c7
-rw-r--r--erts/emulator/beam/erl_msacc.c2
-rw-r--r--erts/emulator/beam/erl_nif.c561
-rw-r--r--erts/emulator/beam/erl_nif.h13
-rw-r--r--erts/emulator/beam/erl_nif_api_funcs.h17
-rw-r--r--erts/emulator/beam/erl_port.h39
-rw-r--r--erts/emulator/beam/erl_process.c806
-rw-r--r--erts/emulator/beam/erl_process.h105
-rw-r--r--erts/emulator/beam/erl_process_dump.c34
-rw-r--r--erts/emulator/beam/erl_time_sup.c10
-rw-r--r--erts/emulator/beam/erl_trace.c100
-rw-r--r--erts/emulator/beam/erl_trace.h11
-rw-r--r--erts/emulator/beam/global.h1
-rw-r--r--erts/emulator/beam/io.c14
-rw-r--r--erts/emulator/beam/sys.h4
31 files changed, 1331 insertions, 644 deletions
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index 1e30e8d8d1..8489897d3a 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -82,7 +82,7 @@ erts_smp_atomic32_t erts_staging_bp_index;
static ERTS_INLINE ErtsMonotonicTime
get_mtime(Process *c_p)
{
- return erts_get_monotonic_time(ERTS_PROC_GET_SCHDATA(c_p));
+ return erts_get_monotonic_time(erts_proc_sched_data(c_p));
}
/* *************************************************************************
@@ -655,8 +655,7 @@ erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
erts_smp_atomic_inc_nob(&bp->count->acount);
}
- if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE
- && ERTS_TRACER_PROC_IS_ENABLED(c_p)) {
+ if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE) {
Eterm w;
erts_trace_time_call(c_p, I, bp->time);
w = (BeamInstr) *c_p->cp;
@@ -753,8 +752,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
}
}
if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE &&
- IS_TRACED_FL(p, F_TRACE_CALLS) &&
- ERTS_TRACER_PROC_IS_ENABLED(p)) {
+ IS_TRACED_FL(p, F_TRACE_CALLS)) {
BeamInstr *pc = (BeamInstr *)ep->code+3;
erts_trace_time_call(p, pc, bp->time);
}
@@ -976,7 +974,8 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
BpDataTime *pbdt = NULL;
ASSERT(c_p);
- ASSERT(erts_smp_atomic32_read_acqb(&c_p->state) & ERTS_PSFLG_RUNNING);
+ ASSERT(erts_smp_atomic32_read_acqb(&c_p->state) & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING));
/* get previous timestamp and breakpoint
* from the process psd */
@@ -1053,7 +1052,8 @@ erts_trace_time_return(Process *p, BeamInstr *pc)
BpDataTime *pbdt = NULL;
ASSERT(p);
- ASSERT(erts_smp_atomic32_read_acqb(&p->state) & ERTS_PSFLG_RUNNING);
+ ASSERT(erts_smp_atomic32_read_acqb(&p->state) & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING));
/* get previous timestamp and breakpoint
* from the process psd */
diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h
index 9c2fc007a2..541af77211 100644
--- a/erts/emulator/beam/beam_bp.h
+++ b/erts/emulator/beam/beam_bp.h
@@ -80,7 +80,7 @@ typedef struct generic_bp {
#define ERTS_BP_CALL_TIME_SCHEDULE_EXITING (2)
#ifdef ERTS_SMP
-#define bp_sched2ix_proc(p) ((p)->scheduler_data->no - 1)
+#define bp_sched2ix_proc(p) (erts_proc_sched_data(p)->no - 1)
#else
#define bp_sched2ix_proc(p) (0)
#endif
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index d00a563087..f8f2e29c95 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -64,18 +64,21 @@
# ifdef ERTS_SMP
# define PROCESS_MAIN_CHK_LOCKS(P) \
do { \
- if ((P)) { \
+ if ((P)) \
erts_proc_lc_chk_only_proc_main((P)); \
- } \
- else \
- erts_lc_check_exact(NULL, 0); \
- ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); \
+ ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); \
+} while (0)
+# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
+do { \
+ if ((P)) \
+ erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN, \
+ __FILE__, __LINE__); \
+} while (0)
+# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
+do { \
+ if ((P)) \
+ erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN); \
} while (0)
-# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
- if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN,\
- __FILE__, __LINE__)
-# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
- if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
# else
# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
@@ -1202,12 +1205,12 @@ init_emulator(void)
do { \
if (ERTS_PROC_GET_SAVED_CALLS_BUF((P))) { \
ASSERT(FC <= 0); \
- ASSERT(ERTS_PROC_GET_SCHDATA(c_p)->virtual_reds \
+ ASSERT(erts_proc_sched_data(c_p)->virtual_reds \
<= 0 - (FC)); \
} \
else { \
ASSERT(FC <= CONTEXT_REDS); \
- ASSERT(ERTS_PROC_GET_SCHDATA(c_p)->virtual_reds \
+ ASSERT(erts_proc_sched_data(c_p)->virtual_reds \
<= CONTEXT_REDS - (FC)); \
} \
} while (0)
@@ -1321,8 +1324,8 @@ void process_main(void)
if (start_time != 0) {
Sint64 diff = erts_timestamp_millis() - start_time;
if (diff > 0 && (Uint) diff > erts_system_monitor_long_schedule
-#ifdef ERTS_DIRTY_SCHEDULERS
- && !ERTS_SCHEDULER_IS_DIRTY(c_p->scheduler_data)
+#if defined(ERTS_SMP) && defined(ERTS_DIRTY_SCHEDULERS)
+ && !ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p))
#endif
) {
BeamInstr *inptr = find_function_from_pc(start_time_i);
@@ -1351,8 +1354,8 @@ void process_main(void)
start_time_i = c_p->i;
}
- reg = ERTS_PROC_GET_SCHDATA(c_p)->x_reg_array;
- freg = ERTS_PROC_GET_SCHDATA(c_p)->f_reg_array;
+ reg = erts_proc_sched_data(c_p)->x_reg_array;
+ freg = erts_proc_sched_data(c_p)->f_reg_array;
ERL_BITS_RELOAD_STATEP(c_p);
{
int reds;
@@ -3556,18 +3559,27 @@ do { \
typedef Eterm NifF(struct enif_environment_t*, int argc, Eterm argv[]);
NifF* fp = vbf = (NifF*) I[1];
struct enif_environment_t env;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!c_p->scheduler_data)
+ live_hf_end = ERTS_INVALID_HFRAG_PTR; /* On dirty scheduler */
+ else
+#endif
+ live_hf_end = c_p->mbuf;
erts_pre_nif(&env, c_p, (struct erl_module_nif*)I[2], NULL);
- live_hf_end = c_p->mbuf;
nif_bif_result = (*fp)(&env, bif_nif_arity, reg);
if (env.exception_thrown)
nif_bif_result = THE_NON_VALUE;
erts_post_nif(&env);
- }
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(nif_bif_result));
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
+ if (env.exiting) {
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ goto do_schedule;
+ }
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ }
DTRACE_NIF_RETURN(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
goto apply_bif_or_nif_epilogue;
@@ -4926,8 +4938,8 @@ do { \
#ifdef DEBUG
pid = c_p->common.id; /* may have switched process... */
#endif
- reg = ERTS_PROC_GET_SCHDATA(c_p)->x_reg_array;
- freg = ERTS_PROC_GET_SCHDATA(c_p)->f_reg_array;
+ reg = erts_proc_sched_data(c_p)->x_reg_array;
+ freg = erts_proc_sched_data(c_p)->f_reg_array;
ERL_BITS_RELOAD_STATEP(c_p);
/* XXX: this abuse of def_arg_reg[] is horrid! */
neg_o_reds = -c_p->def_arg_reg[4];
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 483c5320d7..2a3bd4afe5 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -1696,7 +1696,7 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2)
ERTS_PSFLG_BOUND);
}
- curr = ERTS_GET_SCHEDULER_DATA_FROM_PROC(BIF_P)->run_queue;
+ curr = erts_proc_sched_data(BIF_P)->run_queue;
old = (ERTS_PSFLG_BOUND & state) ? curr : NULL;
ASSERT(!old || old == curr);
@@ -4225,8 +4225,28 @@ BIF_RETTYPE group_leader_2(BIF_ALIST_2)
else {
locks &= ~ERTS_PROC_LOCK_STATUS;
erts_smp_proc_unlock(new_member, ERTS_PROC_LOCK_STATUS);
- new_member->group_leader = STORE_NC_IN_PROC(new_member,
- BIF_ARG_1);
+ if (erts_smp_atomic32_read_nob(&new_member->state)
+ & !(ERTS_PSFLG_DIRTY_RUNNING|ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ new_member->group_leader = STORE_NC_IN_PROC(new_member,
+ BIF_ARG_1);
+ }
+ else {
+ ErlHeapFragment *bp;
+ Eterm *hp;
+ /*
+ * Other process executing on a dirty scheduler,
+ * so we are not allowed to write to its heap.
+ * Store in heap fragment.
+ */
+
+ bp = new_message_buffer(NC_HEAP_SIZE(BIF_ARG_1));
+ hp = bp->mem;
+ new_member->group_leader = STORE_NC(&hp,
+ &new_member->off_heap,
+ BIF_ARG_1);
+ bp->next = new_member->mbuf;
+ new_member->mbuf = bp;
+ }
}
}
diff --git a/erts/emulator/beam/bif.h b/erts/emulator/beam/bif.h
index 5d751dd67d..2203182a0d 100644
--- a/erts/emulator/beam/bif.h
+++ b/erts/emulator/beam/bif.h
@@ -59,12 +59,12 @@ extern Export *erts_convert_time_unit_trap;
do { \
if (!ERTS_PROC_GET_SAVED_CALLS_BUF((p))) { \
if ((fcalls) > 0) \
- ERTS_PROC_GET_SCHDATA((p))->virtual_reds += (fcalls); \
+ erts_proc_sched_data((p))->virtual_reds += (fcalls); \
(fcalls) = 0; \
} \
else { \
if ((fcalls) > -CONTEXT_REDS) \
- ERTS_PROC_GET_SCHDATA((p))->virtual_reds \
+ erts_proc_sched_data((p))->virtual_reds \
+= ((fcalls) - (-CONTEXT_REDS)); \
(fcalls) = -CONTEXT_REDS; \
} \
@@ -91,22 +91,22 @@ do { \
if (!ERTS_PROC_GET_SAVED_CALLS_BUF((p))) { \
if ((p)->fcalls >= reds) { \
(p)->fcalls -= reds; \
- ERTS_PROC_GET_SCHDATA((p))->virtual_reds += reds; \
+ erts_proc_sched_data((p))->virtual_reds += reds; \
} \
else { \
if ((p)->fcalls > 0) \
- ERTS_PROC_GET_SCHDATA((p))->virtual_reds += (p)->fcalls;\
+ erts_proc_sched_data((p))->virtual_reds += (p)->fcalls; \
(p)->fcalls = 0; \
} \
} \
else { \
if ((p)->fcalls >= reds - CONTEXT_REDS) { \
(p)->fcalls -= reds; \
- ERTS_PROC_GET_SCHDATA((p))->virtual_reds += reds; \
+ erts_proc_sched_data((p))->virtual_reds += reds; \
} \
else { \
if ((p)->fcalls > -CONTEXT_REDS) \
- ERTS_PROC_GET_SCHDATA((p))->virtual_reds \
+ erts_proc_sched_data((p))->virtual_reds \
+= (p)->fcalls - (-CONTEXT_REDS); \
(p)->fcalls = -CONTEXT_REDS; \
} \
@@ -118,14 +118,14 @@ do { \
if (ERTS_PROC_GET_SAVED_CALLS_BUF((P))) { \
int nreds__ = ((int)(Reds)) - CONTEXT_REDS; \
if ((FCalls) > nreds__) { \
- ERTS_PROC_GET_SCHDATA((P))->virtual_reds \
+ erts_proc_sched_data((P))->virtual_reds \
+= (FCalls) - nreds__; \
(FCalls) = nreds__; \
} \
} \
else { \
if ((FCalls) > (Reds)) { \
- ERTS_PROC_GET_SCHDATA((P))->virtual_reds \
+ erts_proc_sched_data((P))->virtual_reds \
+= (FCalls) - (Reds); \
(FCalls) = (Reds); \
} \
@@ -165,7 +165,7 @@ do { \
#define ERTS_BIF_ERROR_TRAPPED1(Proc, Reason, Bif, A0) \
do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->freason = (Reason); \
(Proc)->current = (Bif)->code; \
reg[0] = (Eterm) (A0); \
@@ -174,7 +174,7 @@ do { \
#define ERTS_BIF_ERROR_TRAPPED2(Proc, Reason, Bif, A0, A1) \
do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->freason = (Reason); \
(Proc)->current = (Bif)->code; \
reg[0] = (Eterm) (A0); \
@@ -184,7 +184,7 @@ do { \
#define ERTS_BIF_ERROR_TRAPPED3(Proc, Reason, Bif, A0, A1, A2) \
do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->freason = (Reason); \
(Proc)->current = (Bif)->code; \
reg[0] = (Eterm) (A0); \
@@ -208,7 +208,7 @@ do { \
#define ERTS_BIF_PREP_ERROR_TRAPPED1(Ret, Proc, Reason, Bif, A0) \
do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->freason = (Reason); \
(Proc)->current = (Bif)->code; \
reg[0] = (Eterm) (A0); \
@@ -217,7 +217,7 @@ do { \
#define ERTS_BIF_PREP_ERROR_TRAPPED2(Ret, Proc, Reason, Bif, A0, A1) \
do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->freason = (Reason); \
(Proc)->current = (Bif)->code; \
reg[0] = (Eterm) (A0); \
@@ -227,7 +227,7 @@ do { \
#define ERTS_BIF_PREP_ERROR_TRAPPED3(Ret, Proc, Reason, Bif, A0, A1, A2) \
do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->freason = (Reason); \
(Proc)->current = (Bif)->code; \
reg[0] = (Eterm) (A0); \
@@ -246,7 +246,7 @@ do { \
#define ERTS_BIF_PREP_TRAP1(Ret, Trap, Proc, A0) \
do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->arity = 1; \
reg[0] = (Eterm) (A0); \
(Proc)->i = (BeamInstr*) ((Trap)->addressv[erts_active_code_ix()]); \
@@ -256,7 +256,7 @@ do { \
#define ERTS_BIF_PREP_TRAP2(Ret, Trap, Proc, A0, A1) \
do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->arity = 2; \
reg[0] = (Eterm) (A0); \
reg[1] = (Eterm) (A1); \
@@ -267,7 +267,7 @@ do { \
#define ERTS_BIF_PREP_TRAP3(Ret, Trap, Proc, A0, A1, A2) \
do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->arity = 3; \
reg[0] = (Eterm) (A0); \
reg[1] = (Eterm) (A1); \
@@ -279,7 +279,7 @@ do { \
#define ERTS_BIF_PREP_TRAP3_NO_RET(Trap, Proc, A0, A1, A2)\
do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->arity = 3; \
reg[0] = (Eterm) (A0); \
reg[1] = (Eterm) (A1); \
@@ -296,7 +296,7 @@ do { \
} while(0)
#define BIF_TRAP1(Trap_, p, A0) do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((p))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((p))->x_reg_array; \
(p)->arity = 1; \
reg[0] = (A0); \
(p)->i = (BeamInstr*) ((Trap_)->addressv[erts_active_code_ix()]); \
@@ -305,7 +305,7 @@ do { \
} while(0)
#define BIF_TRAP2(Trap_, p, A0, A1) do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((p))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((p))->x_reg_array; \
(p)->arity = 2; \
reg[0] = (A0); \
reg[1] = (A1); \
@@ -315,7 +315,7 @@ do { \
} while(0)
#define BIF_TRAP3(Trap_, p, A0, A1, A2) do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((p))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((p))->x_reg_array; \
(p)->arity = 3; \
reg[0] = (A0); \
reg[1] = (A1); \
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index d02c6828f9..3c19e82b66 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -118,7 +118,9 @@ process_killer(void)
| ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_IN_RUNQ
| ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_RUNNING_SYS)) {
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
erts_printf("Can only kill WAITING processes this way\n");
}
else {
@@ -214,7 +216,8 @@ print_process_info(int to, void *to_arg, Process *p)
if (state & ERTS_PSFLG_GC) {
garbing = 1;
running = 1;
- } else if (state & ERTS_PSFLG_RUNNING)
+ } else if (state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING))
running = 1;
/*
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index ba216c7eb4..227fedfb69 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -278,6 +278,7 @@ type IOB_REQ SHORT_LIVED SYSTEM io_bytes_request
type TRACER_NIF LONG_LIVED SYSTEM tracer_nif
type TRACE_MSG_QUEUE SHORT_LIVED SYSTEM trace_message_queue
type SCHED_ASYNC_JOB SHORT_LIVED SYSTEM async_calls
+type DIRTY_START STANDARD PROCESSES dirty_start
+if threads_no_smp
# Need thread safe allocs, but std_alloc and fix_alloc are not;
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index da480f8fce..2e195db0ee 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -1110,7 +1110,7 @@ process_info_aux(Process *BIF_P,
break;
case am_status:
- res = erts_process_status(BIF_P, ERTS_PROC_LOCK_MAIN, rp, rpid);
+ res = erts_process_status(rp, rpid);
ASSERT(res != am_undefined);
hp = HAlloc(BIF_P, 3);
break;
@@ -2059,12 +2059,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
Uint arity = *tp++;
return info_1_tuple(BIF_P, tp, arityval(arity));
} else if (BIF_ARG_1 == am_scheduler_id) {
-#ifdef ERTS_SMP
- ASSERT(BIF_P->scheduler_data);
- BIF_RET(make_small(BIF_P->scheduler_data->no));
-#else
- BIF_RET(make_small(1));
-#endif
+ ErtsSchedulerData *esdp = erts_proc_sched_data(BIF_P);
+ BIF_RET(make_small(esdp->no));
} else if (BIF_ARG_1 == am_compat_rel) {
ASSERT(erts_compat_rel > 0);
BIF_RET(make_small(erts_compat_rel));
@@ -3622,10 +3618,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
/* Used by timer process_SUITE, timer_bif_SUITE, and
node_container_SUITE (emulator) */
if (is_internal_pid(tp[2])) {
- BIF_RET(erts_process_status(BIF_P,
- ERTS_PROC_LOCK_MAIN,
- NULL,
- tp[2]));
+ BIF_RET(erts_process_status(NULL, tp[2]));
}
}
else if (ERTS_IS_ATOM_STR("link_list", tp[1])) {
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index b65c0e303f..66e5146da0 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -512,8 +512,7 @@ start_trace(Process *c_p, ErtsTracer tracer,
&& !ERTS_TRACER_COMPARE(ERTS_TRACER(port), tracer)) {
/* This tracee is already being traced, and not by the
* tracer to be */
- if (erts_is_tracer_proc_enabled(c_p, ERTS_PROC_LOCKS_ALL,
- common, am_trace_status)) {
+ if (erts_is_tracer_enabled(tracer, common)) {
/* The tracer is still in use */
return 1;
}
@@ -856,7 +855,7 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
return am_undefined;
if (!ERTS_TRACER_IS_NIL(ERTS_TRACER(tracee)))
- erts_is_tracer_proc_enabled(NULL, 0, &tracee->common, am_trace_status);
+ erts_is_tracer_proc_enabled(NULL, 0, &tracee->common);
tracer = erts_tracer_to_term(p, ERTS_TRACER(tracee));
trace_flags = ERTS_TRACE_FLAGS(tracee);
@@ -864,22 +863,24 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
erts_port_release(tracee);
} else if (is_internal_pid(pid_spec)) {
- Process *tracee;
- tracee = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
- pid_spec, ERTS_PROC_LOCK_MAIN);
+ Process *tracee = erts_pid2proc_not_running(p, ERTS_PROC_LOCK_MAIN,
+ pid_spec, ERTS_PROC_LOCK_MAIN);
+
+ if (tracee == ERTS_PROC_LOCK_BUSY)
+ ERTS_BIF_YIELD2(bif_export[BIF_trace_info_2], p, pid_spec, key);
if (!tracee)
return am_undefined;
if (!ERTS_TRACER_IS_NIL(ERTS_TRACER(tracee)))
erts_is_tracer_proc_enabled(tracee, ERTS_PROC_LOCK_MAIN,
- &tracee->common, am_trace_status);
+ &tracee->common);
tracer = erts_tracer_to_term(p, ERTS_TRACER(tracee));
trace_flags = ERTS_TRACE_FLAGS(tracee);
- if (tracee != p)
- erts_smp_proc_unlock(tracee, ERTS_PROC_LOCK_MAIN);
+ if (tracee != p)
+ erts_smp_proc_unlock(tracee, ERTS_PROC_LOCK_MAIN);
} else if (is_external_pid(pid_spec)
&& external_pid_dist_entry(pid_spec) == erts_this_dist_entry) {
return am_undefined;
diff --git a/erts/emulator/beam/erl_bif_unique.c b/erts/emulator/beam/erl_bif_unique.c
index 1e57e9fa53..7c70217d8d 100644
--- a/erts/emulator/beam/erl_bif_unique.c
+++ b/erts/emulator/beam/erl_bif_unique.c
@@ -257,7 +257,7 @@ static ERTS_INLINE Eterm unique_integer_bif(Process *c_p, int positive)
Uint hsz;
Eterm *hp;
- esdp = ERTS_PROC_GET_SCHDATA(c_p);
+ esdp = erts_proc_sched_data(c_p);
thr_id = (Uint64) esdp->thr_id;
unique = esdp->unique++;
bld_unique_integer_term(NULL, &hsz, thr_id, unique, positive);
@@ -515,7 +515,7 @@ BIF_RETTYPE make_ref_0(BIF_ALIST_0)
hp = HAlloc(BIF_P, REF_THING_SIZE);
- res = erts_sched_make_ref_in_buffer(ERTS_PROC_GET_SCHDATA(BIF_P), hp);
+ res = erts_sched_make_ref_in_buffer(erts_proc_sched_data(BIF_P), hp);
BIF_RET(res);
}
diff --git a/erts/emulator/beam/erl_bits.h b/erts/emulator/beam/erl_bits.h
index 1c2a090f07..4bd5b24157 100644
--- a/erts/emulator/beam/erl_bits.h
+++ b/erts/emulator/beam/erl_bits.h
@@ -83,8 +83,8 @@ typedef struct erl_bin_match_struct{
#ifdef ERTS_SMP
/* the state resides in the current process' scheduler data */
#define ERL_BITS_DECLARE_STATEP struct erl_bits_state *EBS
-#define ERL_BITS_RELOAD_STATEP(P) do{EBS = &(P)->scheduler_data->erl_bits_state;}while(0)
-#define ERL_BITS_DEFINE_STATEP(P) struct erl_bits_state *EBS = &(P)->scheduler_data->erl_bits_state
+#define ERL_BITS_RELOAD_STATEP(P) do{EBS = &erts_proc_sched_data((P))->erl_bits_state;}while(0)
+#define ERL_BITS_DEFINE_STATEP(P) struct erl_bits_state *EBS = &erts_proc_sched_data((P))->erl_bits_state
#else
/* reentrant API but with a hidden single global state, for testing only */
extern struct erl_bits_state ErlBitsState_;
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index acaca54e9a..bad34211a5 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -3462,7 +3462,7 @@ static void fix_table_locked(Process* p, DbTable* tb)
fix = tb->common.fixations;
if (fix == NULL) {
tb->common.time.monotonic
- = erts_get_monotonic_time(ERTS_PROC_GET_SCHDATA(p));
+ = erts_get_monotonic_time(erts_proc_sched_data(p));
tb->common.time.offset = erts_get_time_offset();
}
else {
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index 95b1cd0148..6732b708a8 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -174,7 +174,8 @@ set_match_trace(Process *tracee_p, Eterm fail_term, ErtsTracer tracer,
ERTS_PROC_LOCKS_ALL == erts_proc_lc_my_proc_locks(tracee_p)
|| erts_thr_progress_is_blocking());
- if (ERTS_TRACER_IS_NIL(tracer) || erts_is_tracer_enabled(tracee_p, tracer))
+ if (ERTS_TRACER_IS_NIL(tracer)
+ || erts_is_tracer_enabled(tracer, &tracee_p->common))
return set_tracee_flags(tracee_p, tracer, d_flags, e_flags);
return fail_term;
}
@@ -1801,7 +1802,7 @@ Eterm db_prog_match(Process *c_p,
/* We need to lure the scheduler into believing in the pseudo process,
because of floating point exceptions. Do *after* mpsp is set!!! */
- esdp = ERTS_GET_SCHEDULER_DATA_FROM_PROC(psp);
+ esdp = erts_get_scheduler_data();
if (esdp)
current_scheduled = esdp->current_process;
/* SMP: psp->scheduler_data is set by get_match_pseudo_process */
diff --git a/erts/emulator/beam/erl_drv_nif.h b/erts/emulator/beam/erl_drv_nif.h
index 2700b62854..6ec5fbb895 100644
--- a/erts/emulator/beam/erl_drv_nif.h
+++ b/erts/emulator/beam/erl_drv_nif.h
@@ -43,12 +43,11 @@ typedef struct {
int suggested_stack_size;
} ErlDrvThreadOpts;
-#if defined(ERL_DRV_DIRTY_SCHEDULER_SUPPORT) || defined(ERL_NIF_DIRTY_SCHEDULER_SUPPORT)
+
typedef enum {
- ERL_DRV_DIRTY_JOB_CPU_BOUND = 1,
- ERL_DRV_DIRTY_JOB_IO_BOUND = 2
-} ErlDrvDirtyJobFlags;
-#endif
+ ERL_DIRTY_JOB_CPU_BOUND = 1,
+ ERL_DIRTY_JOB_IO_BOUND = 2
+} ErlDirtyJobFlags;
#ifdef SIZEOF_CHAR
# define SIZEOF_CHAR_SAVED__ SIZEOF_CHAR
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index bd238d0f45..d740b2baec 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -391,7 +391,7 @@ erts_gc_after_bif_call_lhf(Process* p, ErlHeapFragment *live_hf_end,
if (p->freason == TRAP) {
#if HIPE
if (regs == NULL) {
- regs = ERTS_PROC_GET_SCHDATA(p)->x_reg_array;
+ regs = erts_proc_sched_data(p)->x_reg_array;
}
#endif
cost = garbage_collect(p, live_hf_end, 0, regs, p->arity, p->fcalls);
@@ -406,6 +406,7 @@ erts_gc_after_bif_call_lhf(Process* p, ErlHeapFragment *live_hf_end,
result = val[0];
}
BUMP_REDS(p, cost);
+
return result;
}
@@ -509,14 +510,14 @@ delay_garbage_collection(Process *p, ErlHeapFragment *live_hf_end, int need, int
/* Make sure that we do a proper GC as soon as possible... */
p->flags |= F_FORCE_GC;
reds_left = ERTS_REDS_LEFT(p, fcalls);
- ASSERT(CONTEXT_REDS - reds_left >= ERTS_PROC_GET_SCHDATA(p)->virtual_reds);
+ ASSERT(CONTEXT_REDS - reds_left >= erts_proc_sched_data(p)->virtual_reds);
if (reds_left > ERTS_ABANDON_HEAP_COST) {
int vreds = reds_left - ERTS_ABANDON_HEAP_COST;
- ERTS_PROC_GET_SCHDATA((p))->virtual_reds += vreds;
+ erts_proc_sched_data((p))->virtual_reds += vreds;
}
- ASSERT(CONTEXT_REDS >= ERTS_PROC_GET_SCHDATA(p)->virtual_reds);
+ ASSERT(CONTEXT_REDS >= erts_proc_sched_data(p)->virtual_reds);
return reds_left;
}
@@ -590,7 +591,7 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
#endif
ASSERT(CONTEXT_REDS - ERTS_REDS_LEFT(p, fcalls)
- >= ERTS_PROC_GET_SCHDATA(p)->virtual_reds);
+ >= erts_proc_sched_data(p)->virtual_reds);
state = erts_smp_atomic32_read_nob(&p->state);
@@ -747,7 +748,7 @@ erts_garbage_collect_nobump(Process* p, int need, Eterm* objv, int nobj, int fca
int reds_left = ERTS_REDS_LEFT(p, fcalls);
if (reds > reds_left)
reds = reds_left;
- ASSERT(CONTEXT_REDS - (reds_left - reds) >= ERTS_PROC_GET_SCHDATA(p)->virtual_reds);
+ ASSERT(CONTEXT_REDS - (reds_left - reds) >= erts_proc_sched_data(p)->virtual_reds);
return reds;
}
@@ -757,7 +758,7 @@ erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
int reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, p->fcalls);
BUMP_REDS(p, reds);
ASSERT(CONTEXT_REDS - ERTS_BIF_REDS_LEFT(p)
- >= ERTS_PROC_GET_SCHDATA(p)->virtual_reds);
+ >= erts_proc_sched_data(p)->virtual_reds);
}
/*
@@ -3044,7 +3045,7 @@ reply_gc_info(void *vgcirp)
Eterm
erts_gc_info_request(Process *c_p)
{
- ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p);
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
Eterm ref;
ErtsGCInfoReq *gcirp;
Eterm *hp;
diff --git a/erts/emulator/beam/erl_hl_timer.c b/erts/emulator/beam/erl_hl_timer.c
index c418762578..ebeff51aac 100644
--- a/erts/emulator/beam/erl_hl_timer.c
+++ b/erts/emulator/beam/erl_hl_timer.c
@@ -1766,7 +1766,7 @@ setup_bif_timer(Process *c_p, ErtsMonotonicTime timeout_pos,
if (is_not_internal_pid(rcvr) && is_not_atom(rcvr))
goto badarg;
- esdp = ERTS_PROC_GET_SCHDATA(c_p);
+ esdp = erts_proc_sched_data(c_p);
hp = HAlloc(c_p, REF_THING_SIZE);
ref = erts_sched_make_ref_in_buffer(esdp, hp);
@@ -1871,7 +1871,7 @@ access_sched_local_btm(Process *c_p, Eterm pid,
if (!c_p)
esdp = erts_get_scheduler_data();
else {
- esdp = ERTS_PROC_GET_SCHDATA(c_p);
+ esdp = erts_proc_sched_data(c_p);
ERTS_HLT_ASSERT(esdp == erts_get_scheduler_data());
}
@@ -2138,7 +2138,7 @@ access_bif_timer(Process *c_p, Eterm tref, int cancel, int async, int info)
goto no_timer;
}
- esdp = ERTS_PROC_GET_SCHDATA(c_p);
+ esdp = erts_proc_sched_data(c_p);
trefn = internal_ref_numbers(tref);
sid = erts_get_ref_numbers_thr_id(trefn);
@@ -2363,7 +2363,7 @@ typedef struct {
int erts_cancel_bif_timers(Process *p, ErtsBifTimers *btm, void **vyspp)
{
- ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(p);
+ ErtsSchedulerData *esdp = erts_proc_sched_data(p);
ErtsBifTimerYieldState ys = {btm, {ERTS_RBT_YIELD_STAT_INITER}};
ErtsBifTimerYieldState *ysp;
int res;
@@ -2409,7 +2409,7 @@ detach_bif_timer(ErtsHLTimer *tmr, void *vesdp)
int erts_detach_accessor_bif_timers(Process *p, ErtsBifTimers *btm, void **vyspp)
{
- ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(p);
+ ErtsSchedulerData *esdp = erts_proc_sched_data(p);
ErtsBifTimerYieldState ys = {btm, {ERTS_RBT_YIELD_STAT_INITER}};
ErtsBifTimerYieldState *ysp;
int res;
@@ -2516,7 +2516,7 @@ BIF_RETTYPE send_after_3(BIF_ALIST_3)
ErtsMonotonicTime timeout_pos;
int short_time, tres;
- tres = parse_timeout_pos(ERTS_PROC_GET_SCHDATA(BIF_P), BIF_ARG_1, NULL,
+ tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1, NULL,
0, &timeout_pos, &short_time);
if (tres != 0)
BIF_ERROR(BIF_P, BADARG);
@@ -2534,7 +2534,7 @@ BIF_RETTYPE send_after_4(BIF_ALIST_4)
if (!parse_bif_timer_options(BIF_ARG_4, NULL, NULL, &abs, &accessor))
BIF_ERROR(BIF_P, BADARG);
- tres = parse_timeout_pos(ERTS_PROC_GET_SCHDATA(BIF_P), BIF_ARG_1, NULL,
+ tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1, NULL,
abs, &timeout_pos, &short_time);
if (tres != 0)
BIF_ERROR(BIF_P, BADARG);
@@ -2548,7 +2548,7 @@ BIF_RETTYPE start_timer_3(BIF_ALIST_3)
ErtsMonotonicTime timeout_pos;
int short_time, tres;
- tres = parse_timeout_pos(ERTS_PROC_GET_SCHDATA(BIF_P), BIF_ARG_1, NULL,
+ tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1, NULL,
0, &timeout_pos, &short_time);
if (tres != 0)
BIF_ERROR(BIF_P, BADARG);
@@ -2566,7 +2566,7 @@ BIF_RETTYPE start_timer_4(BIF_ALIST_4)
if (!parse_bif_timer_options(BIF_ARG_4, NULL, NULL, &abs, &accessor))
BIF_ERROR(BIF_P, BADARG);
- tres = parse_timeout_pos(ERTS_PROC_GET_SCHDATA(BIF_P), BIF_ARG_1, NULL,
+ tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1, NULL,
abs, &timeout_pos, &short_time);
if (tres != 0)
BIF_ERROR(BIF_P, BADARG);
@@ -2720,7 +2720,7 @@ set_proc_timer_common(Process *c_p, ErtsSchedulerData *esdp, Sint64 tmo,
int
erts_set_proc_timer_term(Process *c_p, Eterm etmo)
{
- ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p);
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
ErtsMonotonicTime tmo, timeout_pos;
int short_time, tres;
@@ -2742,7 +2742,7 @@ erts_set_proc_timer_term(Process *c_p, Eterm etmo)
void
erts_set_proc_timer_uword(Process *c_p, UWord tmo)
{
- ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p);
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
ERTS_HLT_ASSERT(erts_smp_atomic_read_nob(&c_p->common.timer)
== ERTS_PTMR_NONE);
@@ -2776,7 +2776,7 @@ erts_cancel_proc_timer(Process *c_p)
erts_smp_atomic_set_nob(&c_p->common.timer, ERTS_PTMR_NONE);
return;
}
- continue_cancel_ptimer(ERTS_PROC_GET_SCHDATA(c_p),
+ continue_cancel_ptimer(erts_proc_sched_data(c_p),
(ErtsTimer *) tval);
}
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index 9bb6e40a11..579f6e427d 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -492,7 +492,6 @@ queue_messages(Process* receiver,
erts_proc_notify_new_message(receiver, receiver_locks);
#else
erts_proc_notify_new_message(receiver, 0);
- ERTS_HOLE_CHECK(receiver);
#endif
return res;
}
@@ -601,7 +600,9 @@ erts_try_alloc_message_on_heap(Process *pp,
ASSERT(!(*psp & ERTS_PSFLG_OFF_HEAP_MSGQ));
- if (
+ if ((*psp) & ERTS_PSFLGS_VOLATILE_HEAP)
+ goto in_message_fragment;
+ else if (
#if defined(ERTS_SMP)
*plp & ERTS_PROC_LOCK_MAIN
#else
@@ -611,7 +612,7 @@ erts_try_alloc_message_on_heap(Process *pp,
#ifdef ERTS_SMP
try_on_heap:
#endif
- if ((*psp & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT))
+ if (((*psp) & ERTS_PSFLGS_VOLATILE_HEAP)
|| (pp->flags & F_DISABLE_GC)
|| HEAP_LIMIT(pp) - HEAP_TOP(pp) <= sz) {
/*
diff --git a/erts/emulator/beam/erl_msacc.c b/erts/emulator/beam/erl_msacc.c
index 0e625f213b..544bc8b983 100644
--- a/erts/emulator/beam/erl_msacc.c
+++ b/erts/emulator/beam/erl_msacc.c
@@ -338,7 +338,7 @@ erts_msacc_request(Process *c_p, int action, Eterm *threads)
{
#ifdef ERTS_ENABLE_MSACC
ErtsMsAcc *msacc = ERTS_MSACC_TSD_GET();
- ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p);
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
Eterm ref;
ErtsMSAccReq *msaccrp;
Eterm *hp;
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index fa20ce3c86..159dc66ad5 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -20,6 +20,23 @@
/* Erlang Native InterFace
*/
+/*
+ * Environment contains a pointer to currently executing process.
+ * In the dirty case this pointer do however not point to the
+ * actual process structure of the executing process, but instead
+ * a "shadow process structure". This in order to be able to handle
+ * heap allocation without the need to acquire the main lock on
+ * the process.
+ *
+ * The dirty process is allowed to allocate on the heap without
+ * the main lock, i.e., incrementing htop, but is not allowed to
+ * modify mbuf, offheap, etc without the main lock. The dirty
+ * process moves mbuf list and offheap list of the shadow process
+ * structure into the real structure when the dirty nif call
+ * completes.
+ */
+
+
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
@@ -81,6 +98,43 @@ void dtrace_nifenv_str(ErlNifEnv *, char *);
#define MIN_HEAP_FRAG_SZ 200
static Eterm* alloc_heap_heavy(ErlNifEnv* env, unsigned need, Eterm* hp);
+static ERTS_INLINE int
+is_scheduler(void)
+{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ if (!esdp)
+ return 0;
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp))
+ return -1;
+ return 1;
+}
+
+static ERTS_INLINE void
+execution_state(ErlNifEnv *env, Process **c_pp, int *schedp)
+{
+ if (schedp)
+ *schedp = is_scheduler();
+ if (c_pp) {
+ if (!env || env->proc->common.id == ERTS_INVALID_PID)
+ *c_pp = NULL;
+ else {
+ Process *c_p = env->proc;
+
+ if (!(c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC))
+ ASSERT(is_scheduler() > 0);
+ else {
+ c_p = env->proc->next;
+ ASSERT(is_scheduler() < 0);
+ ASSERT(c_p && env->proc->common.id == c_p->common.id);
+ }
+
+ *c_pp = c_p;
+
+ ASSERT(!(c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC));
+ }
+ }
+}
+
static ERTS_INLINE Eterm* alloc_heap(ErlNifEnv* env, unsigned need)
{
Eterm* hp = env->hp;
@@ -124,6 +178,9 @@ static ERTS_INLINE void ensure_heap(ErlNifEnv* env, unsigned may_need)
void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif,
Process* tracee)
{
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ErtsSchedulerData *esdp;
+#endif
env->mod_nif = mod_nif;
env->proc = p;
env->hp = HEAP_TOP(p);
@@ -133,6 +190,61 @@ void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif,
env->tmp_obj_list = NULL;
env->exception_thrown = 0;
env->tracee = tracee;
+
+ ASSERT(p->common.id != ERTS_INVALID_PID);
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ esdp = erts_get_scheduler_data();
+ ASSERT(esdp);
+
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+#ifdef DEBUG
+ erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
+
+ ASSERT(p->scheduler_data == esdp);
+ ASSERT((state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS))
+ && !(state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)));
+#endif
+
+ }
+ else {
+ Process *sproc;
+#ifdef DEBUG
+ erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
+
+ ASSERT(!p->scheduler_data);
+ ASSERT((state & ERTS_PSFLG_DIRTY_RUNNING)
+ && !(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)));
+#endif
+
+ sproc = esdp->dirty_shadow_process;
+ ASSERT(sproc);
+ ASSERT(sproc->static_flags & ERTS_STC_FLG_SHADOW_PROC);
+ ASSERT(erts_smp_atomic32_read_nob(&sproc->state)
+ == (ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_PROXY));
+
+ sproc->next = p;
+ sproc->common.id = p->common.id;
+ sproc->htop = p->htop;
+ sproc->stop = p->stop;
+ sproc->hend = p->hend;
+ sproc->heap = p->heap;
+ sproc->abandoned_heap = p->abandoned_heap;
+ sproc->heap_sz = p->heap_sz;
+ sproc->high_water = p->high_water;
+ sproc->old_hend = p->old_hend;
+ sproc->old_htop = p->old_htop;
+ sproc->old_heap = p->old_heap;
+ sproc->mbuf = NULL;
+ sproc->mbuf_sz = 0;
+ ERTS_INIT_OFF_HEAP(&sproc->off_heap);
+ env->proc = sproc;
+ }
+#endif
}
/* Temporary object header, auto-deallocated when NIF returns
@@ -157,18 +269,75 @@ static ERTS_INLINE void free_tmp_objs(ErlNifEnv* env)
void erts_post_nif(ErlNifEnv* env)
{
erts_unblock_fpe(env->fpe_was_unmasked);
- if (env->heap_frag == NULL) {
- ASSERT(env->hp_end == HEAP_LIMIT(env->proc));
- ASSERT(env->hp >= HEAP_TOP(env->proc));
- ASSERT(env->hp <= HEAP_LIMIT(env->proc));
- HEAP_TOP(env->proc) = env->hp;
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!(env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC))
+#endif
+ {
+ ASSERT(is_scheduler() > 0);
+ if (env->heap_frag == NULL) {
+ ASSERT(env->hp_end == HEAP_LIMIT(env->proc));
+ ASSERT(env->hp >= HEAP_TOP(env->proc));
+ ASSERT(env->hp <= HEAP_LIMIT(env->proc));
+ HEAP_TOP(env->proc) = env->hp;
+ }
+ else {
+ ASSERT(env->hp_end != HEAP_LIMIT(env->proc));
+ ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size);
+ env->heap_frag->used_size = env->hp - env->heap_frag->mem;
+ ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size);
+ }
+ env->exiting = ERTS_PROC_IS_EXITING(env->proc);
}
- else {
- ASSERT(env->hp_end != HEAP_LIMIT(env->proc));
- ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size);
- env->heap_frag->used_size = env->hp - env->heap_frag->mem;
- ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else { /* Dirty nif call using shadow process struct */
+ Process *c_p = env->proc->next;
+
+ ASSERT(is_scheduler() < 0);
+ ASSERT(env->proc->common.id == c_p->common.id);
+
+ if (!env->heap_frag) {
+ ASSERT(env->hp_end == HEAP_LIMIT(c_p));
+ ASSERT(env->hp >= HEAP_TOP(c_p));
+ ASSERT(env->hp <= HEAP_LIMIT(c_p));
+ HEAP_TOP(c_p) = env->hp;
+ }
+ else {
+ ASSERT(env->hp_end != HEAP_LIMIT(c_p));
+ ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size);
+
+ HEAP_TOP(c_p) = HEAP_TOP(env->proc);
+ env->heap_frag->used_size = env->hp - env->heap_frag->mem;
+
+ ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size);
+
+ if (c_p->mbuf) {
+ ErlHeapFragment *bp;
+ for (bp = env->proc->mbuf; bp->next; bp = bp->next)
+ ;
+ bp->next = c_p->mbuf;
+ }
+
+ c_p->mbuf = env->proc->mbuf;
+ c_p->mbuf_sz += env->proc->mbuf_sz;
+
+ }
+
+ if (!c_p->off_heap.first)
+ c_p->off_heap.first = env->proc->off_heap.first;
+ else if (env->proc->off_heap.first) {
+ struct erl_off_heap_header *ohhp;
+ for (ohhp = env->proc->off_heap.first; ohhp->next; ohhp = ohhp->next)
+ ;
+ ohhp->next = c_p->off_heap.first;
+ c_p->off_heap.first = env->proc->off_heap.first;
+ }
+ c_p->off_heap.overhead += env->proc->off_heap.overhead;
+
+ env->exiting = ERTS_PROC_IS_EXITING(c_p);
+ BUMP_ALL_REDS(c_p);
}
+#endif
free_tmp_objs(env);
}
@@ -400,9 +569,8 @@ error:
#endif
-int
-enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
- ErlNifEnv* msg_env, ERL_NIF_TERM msg)
+int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
+ ErlNifEnv* msg_env, ERL_NIF_TERM msg)
{
struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)msg_env;
ErtsProcLocks rp_locks = 0;
@@ -413,34 +581,32 @@ enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
Process* c_p;
ErtsMessage *mp;
Eterm receiver = to_pid->pid;
- int flush_me = 0;
- ErtsSchedulerData *esdp = erts_get_scheduler_data();
- int scheduler = esdp ? esdp->no : 0;
+ int scheduler;
- if (env != NULL) {
- c_p = env->proc;
- if (receiver == c_p->common.id) {
+ execution_state(env, &c_p, &scheduler);
+
+#ifndef ERTS_SMP
+ if (!scheduler) {
+ erts_exit(ERTS_ABORT_EXIT,
+ "enif_send: called from non-scheduler thread on non-SMP VM");
+ return 0;
+ }
+#endif
+
+ if (scheduler > 0) { /* Normal scheduler */
+ rp = erts_proc_lookup(receiver);
+ if (c_p == rp)
rp_locks = ERTS_PROC_LOCK_MAIN;
- flush_me = 1;
- }
}
else {
-#ifdef ERTS_SMP
- c_p = NULL;
-#else
- erts_exit(ERTS_ABORT_EXIT,"enif_send: env==NULL on non-SMP VM");
-#endif
+ if (c_p && ERTS_PROC_IS_EXITING(c_p))
+ return 0;
+ rp = erts_pid2proc_opt(c_p, 0, receiver, rp_locks,
+ ERTS_P2P_FLG_INC_REFC);
}
-
- rp = (scheduler
- ? erts_proc_lookup(receiver)
- : erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN,
- receiver, rp_locks, ERTS_P2P_FLG_INC_REFC));
-
- if (rp == NULL) {
- ASSERT(env == NULL || receiver != c_p->common.id);
+ if (rp == NULL)
return 0;
- }
+
if (menv) {
flush_env(msg_env);
mp = erts_alloc_message(0, NULL);
@@ -465,10 +631,6 @@ enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
ERL_MESSAGE_TERM(mp) = msg;
- if (flush_me) {
- flush_env(env); /* Needed for ERTS_HOLE_CHECK */
- }
-
if (!env || !env->tracee) {
if (c_p && IS_TRACED_FL(c_p, F_TRACE_SEND))
@@ -546,11 +708,9 @@ done:
if (rp_locks & ~lc_locks)
erts_smp_proc_unlock(rp, rp_locks & ~lc_locks);
#endif
- if (!scheduler)
+ if (scheduler <= 0)
erts_proc_dec_refc(rp);
- if (flush_me) {
- cache_env(env);
- }
+
return 1;
}
@@ -558,26 +718,52 @@ int
enif_port_command(ErlNifEnv *env, const ErlNifPort* to_port,
ErlNifEnv *msg_env, ERL_NIF_TERM msg)
{
-
- ErtsSchedulerData *esdp = erts_get_scheduler_data();
- int scheduler = esdp ? esdp->no : 0;
+ int iflags = (erts_port_synchronous_ops
+ ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
+ : ERTS_PORT_SFLGS_INVALID_LOOKUP);
+ int scheduler;
+ Process *c_p;
Port *prt;
+ int res;
- if (scheduler == 0 || !env)
- return 0;
+ if (!env)
+ erts_exit(ERTS_ABORT_EXIT, "enif_port_command: env == NULL");
+
+ execution_state(env, &c_p, &scheduler);
+
+ if (!c_p)
+ c_p = env->proc;
- prt = erts_port_lookup(to_port->port_id,
- (erts_port_synchronous_ops
- ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
- : ERTS_PORT_SFLGS_INVALID_LOOKUP));
+ if (scheduler > 0)
+ prt = erts_port_lookup(to_port->port_id, iflags);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else if (scheduler < 0) {
+ if (ERTS_PROC_IS_EXITING(c_p))
+ return 0;
+ prt = erts_thr_port_lookup(to_port->port_id, iflags);
+ }
+#endif
+ else {
+ erts_exit(ERTS_ABORT_EXIT, "enif_port_command: "
+ "called from non-scheduler thread");
+ }
if (!prt)
- return 0;
+ res = 0;
+ else {
+
+ if (IS_TRACED_FL(prt, F_TRACE_RECEIVE))
+ trace_port_receive(prt, c_p->common.id, am_command, msg);
- if (IS_TRACED_FL(prt, F_TRACE_RECEIVE))
- trace_port_receive(prt, env->proc->common.id, am_command, msg);
+ res = erts_port_output_async(prt, c_p->common.id, msg);
+ }
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (scheduler < 0)
+ erts_port_dec_refc(prt);
+#endif
- return erts_port_output_async(prt, env->proc->common.id, msg);
+ return res;
}
ERL_NIF_TERM enif_make_copy(ErlNifEnv* dst_env, ERL_NIF_TERM src_term)
@@ -1039,15 +1225,21 @@ Eterm enif_make_badarg(ErlNifEnv* env)
Eterm enif_raise_exception(ErlNifEnv* env, ERL_NIF_TERM reason)
{
+ Process *c_p;
+
+ execution_state(env, &c_p, NULL);
+
env->exception_thrown = 1;
- env->proc->fvalue = reason;
- BIF_ERROR(env->proc, EXC_ERROR);
+ c_p->fvalue = reason;
+ BIF_ERROR(c_p, EXC_ERROR);
}
int enif_has_pending_exception(ErlNifEnv* env, ERL_NIF_TERM* reason)
{
if (env->exception_thrown && reason != NULL) {
- *reason = env->proc->fvalue;
+ Process *c_p;
+ execution_state(env, &c_p, NULL);
+ *reason = c_p->fvalue;
}
return env->exception_thrown;
}
@@ -1441,56 +1633,71 @@ int enif_make_reverse_list(ErlNifEnv* env, ERL_NIF_TERM term, ERL_NIF_TERM *list
return 1;
}
+int enif_is_current_process_alive(ErlNifEnv* env)
+{
+ Process *c_p;
+ int scheduler;
+
+ execution_state(env, &c_p, &scheduler);
+
+ if (!c_p)
+ erts_exit(ERTS_ABORT_EXIT,
+ "enif_is_current_process_alive: "
+ "Invalid environment");
+
+ if (!scheduler)
+ erts_exit(ERTS_ABORT_EXIT, "enif_is_current_process_alive: "
+ "called from non-scheduler thread");
+
+ return !ERTS_PROC_IS_EXITING(c_p);
+}
+
int enif_is_process_alive(ErlNifEnv* env, ErlNifPid *proc)
{
- ErtsProcLocks rp_locks = 0; /* We don't need any locks,
- just to check if it is alive */
- Eterm target = proc->pid;
- Process* rp;
- Process* c_p;
- int scheduler = erts_get_scheduler_id() != 0;
+ int scheduler;
- if (env != NULL) {
- c_p = env->proc;
- if (target == c_p->common.id) {
- /* We are alive! */
- return 1;
- }
- }
+ execution_state(env, NULL, &scheduler);
+
+ if (scheduler > 0)
+ return !!erts_proc_lookup(proc->pid);
else {
#ifdef ERTS_SMP
- c_p = NULL;
+ Process* rp = erts_pid2proc_opt(NULL, 0, proc->pid, 0,
+ ERTS_P2P_FLG_INC_REFC);
+ if (rp)
+ erts_proc_dec_refc(rp);
+ return !!rp;
#else
- erts_exit(ERTS_ABORT_EXIT,"enif_is_process_alive: "
- "env==NULL on non-SMP VM");
-#endif
- }
-
- rp = (scheduler
- ? erts_proc_lookup(target)
- : erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN,
- target, rp_locks, ERTS_P2P_FLG_INC_REFC));
- if (rp == NULL) {
- ASSERT(env == NULL || target != c_p->common.id);
+ erts_exit(ERTS_ABORT_EXIT, "enif_is_process_alive: "
+ "called from non-scheduler thread");
return 0;
- } else {
- if (!scheduler)
- erts_proc_dec_refc(rp);
- return 1;
+#endif
}
}
int enif_is_port_alive(ErlNifEnv *env, ErlNifPort *port)
{
- /* only allowed if called from scheduler */
- if (erts_get_scheduler_id() == 0)
- erts_exit(ERTS_ABORT_EXIT,"enif_is_port_alive: called from non-scheduler");
+ int scheduler;
+ Uint32 iflags = (erts_port_synchronous_ops
+ ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
+ : ERTS_PORT_SFLGS_INVALID_LOOKUP);
- return erts_port_lookup(
- port->port_id,
- (erts_port_synchronous_ops
- ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
- : ERTS_PORT_SFLGS_INVALID_LOOKUP)) != NULL;
+ execution_state(env, NULL, &scheduler);
+
+ if (scheduler > 0)
+ return !!erts_port_lookup(port->port_id, iflags);
+ else {
+#ifdef ERTS_SMP
+ Port *prt = erts_thr_port_lookup(port->port_id, iflags);
+ if (prt)
+ erts_port_dec_refc(prt);
+ return !!prt;
+#else
+ erts_exit(ERTS_ABORT_EXIT, "enif_is_port_alive: "
+ "called from non-scheduler thread");
+ return 0;
+#endif
+ }
}
ERL_NIF_TERM
@@ -1967,16 +2174,19 @@ void* enif_dlsym(void* handle, const char* symbol,
int enif_consume_timeslice(ErlNifEnv* env, int percent)
{
+ Process *proc;
Sint reds;
+ execution_state(env, &proc, NULL);
+
ASSERT(is_proc_bound(env) && percent >= 1 && percent <= 100);
if (percent < 1) percent = 1;
else if (percent > 100) percent = 100;
reds = ((CONTEXT_REDS+99) / 100) * percent;
ASSERT(reds > 0 && reds <= CONTEXT_REDS);
- BUMP_REDS(env->proc, reds);
- return ERTS_BIF_REDS_LEFT(env->proc) == 0;
+ BUMP_REDS(proc, reds);
+ return ERTS_BIF_REDS_LEFT(proc) == 0;
}
/*
@@ -2071,10 +2281,19 @@ static ERL_NIF_TERM
init_nif_sched_data(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp,
int need_save, int argc, const ERL_NIF_TERM argv[])
{
- Process* proc = env->proc;
- Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array;
+ Process* proc;
+ Eterm* reg;
NifExport* ep;
- int i;
+ int i, scheduler;
+
+ execution_state(env, &proc, &scheduler);
+
+ ASSERT(scheduler);
+
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(proc)
+ & ERTS_PROC_LOCK_MAIN);
+
+ reg = erts_proc_sched_data(proc)->x_reg_array;
ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
if (!ep)
@@ -2086,12 +2305,13 @@ init_nif_sched_data(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirec
}
if (env->exception_thrown) {
ep->exception_thrown = 1;
- ep->rootset[0] = env->proc->fvalue;
+ ep->rootset[0] = proc->fvalue;
} else {
ep->exception_thrown = 0;
ep->rootset[0] = NIL;
}
- ERTS_VBUMP_ALL_REDS(proc);
+ if (scheduler > 0)
+ ERTS_VBUMP_ALL_REDS(proc);
for (i = 0; i < argc; i++) {
if (need_save)
ep->rootset[i+1] = reg[i];
@@ -2123,7 +2343,12 @@ static void
restore_nif_mfa(Process* proc, NifExport* ep, int exception)
{
int i;
- Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array;
+ Eterm* reg = erts_proc_sched_data(proc)->x_reg_array;
+
+ ERTS_SMP_LC_ASSERT(!(proc->static_flags
+ & ERTS_STC_FLG_SHADOW_PROC));
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(proc)
+ & ERTS_PROC_LOCK_MAIN);
proc->current[0] = ep->saved_mfa[0];
proc->current[1] = ep->saved_mfa[1];
@@ -2148,11 +2373,13 @@ restore_nif_mfa(Process* proc, NifExport* ep, int exception)
static ERL_NIF_TERM
dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- Process* proc = env->proc;
+ Process* proc;
NifExport* ep;
+ execution_state(env, &proc, NULL);
+
ASSERT(argc == 1);
- ASSERT(!ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data));
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(proc)));
ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
ASSERT(ep);
ASSERT(!ep->exception_thrown);
@@ -2167,10 +2394,12 @@ dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
static ERL_NIF_TERM
dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- Process* proc = env->proc;
+ Process* proc;
NifExport* ep;
- ASSERT(!ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data));
+ execution_state(env, &proc, NULL);
+
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(proc)));
ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
ASSERT(ep);
ASSERT(ep->exception_thrown);
@@ -2187,23 +2416,32 @@ dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
static ERL_NIF_TERM
execute_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- Process* proc = env->proc;
- NativeFunPtr fp = (NativeFunPtr) proc->current[6];
+ Process* proc;
+ NativeFunPtr fp;
NifExport* ep;
ERL_NIF_TERM result;
- ASSERT(ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data));
+ execution_state(env, &proc, NULL);
+
+ fp = (NativeFunPtr) proc->current[6];
+
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(proc)));
/*
* Set ep->fp to NULL before the native call so we know later whether it scheduled another NIF for execution
*/
ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
- ASSERT(ep);
+ ASSERT(ep && fp);
ep->fp = NULL;
erts_smp_atomic32_read_band_mb(&proc->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC
| ERTS_PSFLG_DIRTY_IO_PROC));
+
+ erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
+
result = (*fp)(env, argc, argv);
+ erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
+
if (erts_refc_dectest(&env->mod_nif->rt_dtor_cnt, 0) == 0 && env->mod_nif->mod == NULL)
close_lib(env->mod_nif);
/*
@@ -2240,29 +2478,49 @@ execute_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
static ERTS_INLINE ERL_NIF_TERM
schedule_dirty_nif(ErlNifEnv* env, int flags, int argc, const ERL_NIF_TERM argv[])
{
- erts_aint32_t state, n, a;
- Process* proc = env->proc;
- NativeFunPtr fp = (NativeFunPtr) proc->current[6];
+ ERL_NIF_TERM result;
+ erts_aint32_t act, dirty_flag;
+ Process* proc;
+ NativeFunPtr fp;
NifExport* ep;
- int need_save;
+ int need_save, scheduler;
+
+ execution_state(env, &proc, &scheduler);
+ if (scheduler <= 0) {
+ ASSERT(scheduler < 0);
+ erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
+ }
+
+ fp = (NativeFunPtr) proc->current[6];
+
+ ASSERT(fp);
ASSERT(flags==ERL_NIF_DIRTY_JOB_IO_BOUND || flags==ERL_NIF_DIRTY_JOB_CPU_BOUND);
- a = erts_smp_atomic32_read_acqb(&proc->state);
- while (1) {
- n = state = a;
+ if (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND)
+ dirty_flag = ERTS_PSFLG_DIRTY_CPU_PROC;
+ else
+ dirty_flag = ERTS_PSFLG_DIRTY_IO_PROC;
+
+ act = erts_smp_atomic32_read_bor_nob(&proc->state, dirty_flag);
+ if (!(act & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)))
+ erts_refc_inc(&env->mod_nif->rt_dtor_cnt, 1);
+ else if ((act & (ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_IO_PROC)) & ~dirty_flag) {
+ /* clear other flag... */
if (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND)
- n |= ERTS_PSFLG_DIRTY_CPU_PROC;
+ dirty_flag = ERTS_PSFLG_DIRTY_IO_PROC;
else
- n |= ERTS_PSFLG_DIRTY_IO_PROC;
- a = erts_smp_atomic32_cmpxchg_mb(&proc->state, n, state);
- if (a == state)
- break;
+ dirty_flag = ERTS_PSFLG_DIRTY_CPU_PROC;
+ erts_smp_atomic32_read_band_nob(&proc->state, ~dirty_flag);
}
- erts_refc_inc(&env->mod_nif->rt_dtor_cnt, 1);
+
ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
need_save = (ep == NULL || is_non_value(ep->saved_mfa[0]));
- return init_nif_sched_data(env, execute_dirty_nif, fp, need_save, argc, argv);
+ result = init_nif_sched_data(env, execute_dirty_nif, fp, need_save, argc, argv);
+ if (scheduler <= 0)
+ erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
+ return result;
}
static ERL_NIF_TERM
@@ -2287,11 +2545,14 @@ schedule_dirty_cpu_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
static ERL_NIF_TERM
execute_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- Process* proc = env->proc;
- NativeFunPtr fp = (NativeFunPtr) proc->current[6];
+ Process* proc;
+ NativeFunPtr fp;
NifExport* ep;
ERL_NIF_TERM result;
+ execution_state(env, &proc, NULL);
+ fp = (NativeFunPtr) proc->current[6];
+
ASSERT(!env->exception_thrown);
ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
ASSERT(ep);
@@ -2314,10 +2575,10 @@ enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags,
ERL_NIF_TERM (*fp)(ErlNifEnv*, int, const ERL_NIF_TERM[]),
int argc, const ERL_NIF_TERM argv[])
{
- Process* proc = env->proc;
+ Process* proc;
NifExport* ep;
ERL_NIF_TERM fun_name_atom, result;
- int need_save;
+ int need_save, scheduler;
if (argc > MAX_ARG)
return enif_make_badarg(env);
@@ -2325,6 +2586,13 @@ enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags,
if (enif_is_exception(env, fun_name_atom))
return fun_name_atom;
+ execution_state(env, &proc, &scheduler);
+ if (scheduler <= 0) {
+ if (scheduler == 0)
+ enif_make_badarg(env);
+ erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
+ }
+
ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
need_save = (ep == NULL || is_non_value(ep->saved_mfa[0]));
@@ -2336,12 +2604,15 @@ enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags,
sched_fun = schedule_dirty_io_nif;
else if (chkflgs == ERL_NIF_DIRTY_JOB_CPU_BOUND)
sched_fun = schedule_dirty_cpu_nif;
- else
- return enif_make_badarg(env);
+ else {
+ result = enif_make_badarg(env);
+ goto done;
+ }
result = init_nif_sched_data(env, sched_fun, fp, need_save, argc, argv);
#else
- return enif_make_badarg(env);
+ result = enif_make_badarg(env);
#endif
+ goto done;
}
else
result = init_nif_sched_data(env, execute_nif, fp, need_save, argc, argv);
@@ -2349,18 +2620,28 @@ enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags,
ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
ASSERT(ep);
ep->exp.code[1] = (BeamInstr) fun_name_atom;
+
+done:
+ if (scheduler < 0)
+ erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
+
return result;
}
-#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
-
int
enif_is_on_dirty_scheduler(ErlNifEnv* env)
{
- return ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data);
-}
+ int scheduler;
+ Process *c_p;
+
+ execution_state(env, &c_p, &scheduler);
-#endif /* ERL_NIF_DIRTY_SCHEDULER_SUPPORT */
+ if (!c_p || !scheduler)
+ erts_exit(ERTS_ABORT_EXIT, "enif_is_on_dirty_scheduler: "
+ "Invalid env");
+
+ return scheduler < 0;
+}
/* Maps */
@@ -3061,16 +3342,16 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
(BeamInstr) BeamOp(op_i_generic_breakpoint));
g->orig_instr = (BeamInstr) BeamOp(op_call_nif);
}
+#ifdef ERTS_DIRTY_SCHEDULERS
if ((entry->major > 2 || (entry->major == 2 && entry->minor >= 7))
&& (entry->options & ERL_NIF_DIRTY_NIF_OPTION) && f->flags) {
-#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
code_ptr[5+3] = (BeamInstr) f->fptr;
code_ptr[5+1] = (f->flags == ERL_NIF_DIRTY_JOB_IO_BOUND) ?
(BeamInstr) schedule_dirty_io_nif :
(BeamInstr) schedule_dirty_cpu_nif;
-#endif
}
else
+#endif
code_ptr[5+1] = (BeamInstr) f->fptr;
code_ptr[5+2] = (BeamInstr) lib;
f = next_func(entry, &incr, f);
diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h
index 468f5cf5ec..da7a754757 100644
--- a/erts/emulator/beam/erl_nif.h
+++ b/erts/emulator/beam/erl_nif.h
@@ -28,7 +28,6 @@
# include "config.h"
#endif
-#include "erl_native_features_config.h"
#include "erl_drv_nif.h"
/* Version history:
@@ -168,13 +167,11 @@ typedef int ErlNifTSDKey;
typedef ErlDrvThreadOpts ErlNifThreadOpts;
-#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
typedef enum
{
- ERL_NIF_DIRTY_JOB_CPU_BOUND = ERL_DRV_DIRTY_JOB_CPU_BOUND,
- ERL_NIF_DIRTY_JOB_IO_BOUND = ERL_DRV_DIRTY_JOB_IO_BOUND
+ ERL_NIF_DIRTY_JOB_CPU_BOUND = ERL_DIRTY_JOB_CPU_BOUND,
+ ERL_NIF_DIRTY_JOB_IO_BOUND = ERL_DIRTY_JOB_IO_BOUND
}ErlNifDirtyTaskFlags;
-#endif
typedef struct /* All fields all internal and may change */
{
@@ -258,11 +255,7 @@ extern TWinDynNifCallbacks WinDynNifCallbacks;
# define ERL_NIF_INIT_DECL(MODNAME) ERL_NIF_INIT_EXPORT ErlNifEntry* nif_init(ERL_NIF_INIT_ARGS)
#endif
-#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
-# define ERL_NIF_ENTRY_OPTIONS ERL_NIF_DIRTY_NIF_OPTION
-#else
-# define ERL_NIF_ENTRY_OPTIONS 0
-#endif
+#define ERL_NIF_ENTRY_OPTIONS ERL_NIF_DIRTY_NIF_OPTION
#ifdef __cplusplus
}
diff --git a/erts/emulator/beam/erl_nif_api_funcs.h b/erts/emulator/beam/erl_nif_api_funcs.h
index c7389b1626..b211ab4b16 100644
--- a/erts/emulator/beam/erl_nif_api_funcs.h
+++ b/erts/emulator/beam/erl_nif_api_funcs.h
@@ -166,27 +166,19 @@ ERL_NIF_API_FUNC_DECL(ErlNifTime, enif_convert_time_unit, (ErlNifTime, ErlNifTim
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM, enif_now_time, (ErlNifEnv *env));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM, enif_cpu_time, (ErlNifEnv *env));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM, enif_make_unique_integer, (ErlNifEnv *env, ErlNifUniqueInteger properties));
+ERL_NIF_API_FUNC_DECL(int, enif_is_current_process_alive, (ErlNifEnv *env));
ERL_NIF_API_FUNC_DECL(int, enif_is_process_alive, (ErlNifEnv *env, ErlNifPid *pid));
ERL_NIF_API_FUNC_DECL(int, enif_is_port_alive, (ErlNifEnv *env, ErlNifPort *port_id));
ERL_NIF_API_FUNC_DECL(int, enif_get_local_port, (ErlNifEnv* env, ERL_NIF_TERM, ErlNifPort* port_id));
ERL_NIF_API_FUNC_DECL(int, enif_term_to_binary, (ErlNifEnv *env, ERL_NIF_TERM term, ErlNifBinary *bin));
ERL_NIF_API_FUNC_DECL(size_t, enif_binary_to_term, (ErlNifEnv *env, const unsigned char* data, size_t sz, ERL_NIF_TERM *term, unsigned int opts));
ERL_NIF_API_FUNC_DECL(int, enif_port_command, (ErlNifEnv *env, const ErlNifPort* to_port, ErlNifEnv *msg_env, ERL_NIF_TERM msg));
+ERL_NIF_API_FUNC_DECL(int,enif_is_on_dirty_scheduler,(ErlNifEnv*));
ERL_NIF_API_FUNC_DECL(int,enif_snprintf,(char * buffer, size_t size, const char *format, ...));
/*
** ADD NEW ENTRIES HERE (before this comment) !!!
*/
-
-
-/*
- * Conditional EXPERIMENTAL stuff always last.
- * Must be moved up and made unconditional to support binary backward
- * compatibility on Windows.
- */
-#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
-ERL_NIF_API_FUNC_DECL(int,enif_is_on_dirty_scheduler,(ErlNifEnv*));
-#endif
#endif /* ERL_NIF_API_FUNC_DECL */
/*
@@ -331,12 +323,14 @@ ERL_NIF_API_FUNC_DECL(int,enif_is_on_dirty_scheduler,(ErlNifEnv*));
# define enif_now_time ERL_NIF_API_FUNC_MACRO(enif_now_time)
# define enif_cpu_time ERL_NIF_API_FUNC_MACRO(enif_cpu_time)
# define enif_make_unique_integer ERL_NIF_API_FUNC_MACRO(enif_make_unique_integer)
+# define enif_is_current_process_alive ERL_NIF_API_FUNC_MACRO(enif_is_current_process_alive)
# define enif_is_process_alive ERL_NIF_API_FUNC_MACRO(enif_is_process_alive)
# define enif_is_port_alive ERL_NIF_API_FUNC_MACRO(enif_is_port_alive)
# define enif_get_local_port ERL_NIF_API_FUNC_MACRO(enif_get_local_port)
# define enif_term_to_binary ERL_NIF_API_FUNC_MACRO(enif_term_to_binary)
# define enif_binary_to_term ERL_NIF_API_FUNC_MACRO(enif_binary_to_term)
# define enif_port_command ERL_NIF_API_FUNC_MACRO(enif_port_command)
+# define enif_is_on_dirty_scheduler ERL_NIF_API_FUNC_MACRO(enif_is_on_dirty_scheduler)
# define enif_snprintf ERL_NIF_API_FUNC_MACRO(enif_snprintf)
/*
@@ -348,9 +342,6 @@ ERL_NIF_API_FUNC_DECL(int,enif_is_on_dirty_scheduler,(ErlNifEnv*));
* Must be moved up and made unconditional to support binary backward
* compatibility on Windows.
*/
-#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
-# define enif_is_on_dirty_scheduler ERL_NIF_API_FUNC_MACRO(enif_is_on_dirty_scheduler)
-#endif
#endif /* ERL_NIF_API_FUNC_MACRO */
diff --git a/erts/emulator/beam/erl_port.h b/erts/emulator/beam/erl_port.h
index c2588e718d..f0075ca2b9 100644
--- a/erts/emulator/beam/erl_port.h
+++ b/erts/emulator/beam/erl_port.h
@@ -487,6 +487,7 @@ ERTS_GLB_INLINE Port*erts_id2port(Eterm id);
ERTS_GLB_INLINE Port *erts_id2port_sflgs(Eterm, Process *, ErtsProcLocks, Uint32);
ERTS_GLB_INLINE void erts_port_release(Port *);
#ifdef ERTS_SMP
+ERTS_GLB_INLINE Port *erts_thr_port_lookup(Eterm id, Uint32 invalid_sflgs);
ERTS_GLB_INLINE Port *erts_thr_id2port_sflgs(Eterm id, Uint32 invalid_sflgs);
ERTS_GLB_INLINE void erts_thr_port_release(Port *prt);
#endif
@@ -626,6 +627,44 @@ erts_port_release(Port *prt)
}
#ifdef ERTS_SMP
+/*
+ * erts_thr_id2port_sflgs() and erts_port_dec_refc(prt) can
+ * be used by unmanaged threads in the SMP case.
+ */
+ERTS_GLB_INLINE Port *
+erts_thr_port_lookup(Eterm id, Uint32 invalid_sflgs)
+{
+ Port *prt;
+ ErtsThrPrgrDelayHandle dhndl;
+
+ if (is_not_internal_port(id))
+ return NULL;
+
+ dhndl = erts_thr_progress_unmanaged_delay();
+
+ prt = (Port *) erts_ptab_pix2intptr_ddrb(&erts_port,
+ internal_port_index(id));
+
+ if (!prt || prt->common.id != id) {
+ erts_thr_progress_unmanaged_continue(dhndl);
+ return NULL;
+ }
+ else {
+ erts_aint32_t state;
+ erts_port_inc_refc(prt);
+
+ if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
+ erts_thr_progress_unmanaged_continue(dhndl);
+
+ state = erts_atomic32_read_acqb(&prt->state);
+ if (state & invalid_sflgs) {
+ erts_port_dec_refc(prt);
+ return NULL;
+ }
+
+ return prt;
+ }
+}
/*
* erts_thr_id2port_sflgs() and erts_thr_port_release() can
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index e2f14c23bf..a853ec585b 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -409,6 +409,10 @@ ErtsAlignedSchedulerData *erts_aligned_scheduler_data;
#ifdef ERTS_DIRTY_SCHEDULERS
ErtsAlignedSchedulerData *erts_aligned_dirty_cpu_scheduler_data;
ErtsAlignedSchedulerData *erts_aligned_dirty_io_scheduler_data;
+typedef union {
+ Process dsp;
+ char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(Process))];
+} ErtsAlignedDirtyShadowProcess;
#endif
typedef union {
@@ -589,6 +593,7 @@ dbg_chk_aux_work_val(erts_aint32_t value)
valid |= ERTS_SSI_AUX_WORK_CNCLD_TMRS;
valid |= ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR;
valid |= ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP;
+ valid |= ERTS_SSI_AUX_WORK_PENDING_EXITERS;
#endif
#if HAVE_ERTS_MSEG
valid |= ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK;
@@ -611,7 +616,7 @@ dbg_chk_aux_work_val(erts_aint32_t value)
#endif
#ifdef ERTS_SMP
-static void handle_pending_exiters(ErtsProcList *);
+static void do_handle_pending_exiters(ErtsProcList *);
static void wake_scheduler(ErtsRunQueue *rq);
#endif
@@ -679,6 +684,8 @@ erts_pre_init_process(void)
= "MISC_THR_PRGR";
erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_MISC_IX]
= "MISC";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_PENDING_EXITERS_IX]
+ = "PENDING_EXITERS";
erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_SET_TMO_IX]
= "SET_TMO";
erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK_IX]
@@ -1156,7 +1163,7 @@ reply_sched_wall_time(void *vswtrp)
Eterm
erts_sched_wall_time_request(Process *c_p, int set, int enable)
{
- ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p);
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
Eterm ref;
ErtsSchedWallTimeReq *swtrp;
Eterm *hp;
@@ -1234,7 +1241,7 @@ reply_system_check(void *vscrp)
Eterm erts_system_check_request(Process *c_p) {
- ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p);
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
Eterm ref;
ErtsSystemCheckReq *scrp;
Eterm *hp;
@@ -2336,6 +2343,30 @@ handle_mseg_cache_check(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiti
#endif
+#ifdef ERTS_SMP
+
+static ERTS_INLINE erts_aint32_t
+handle_pending_exiters(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
+{
+ ErtsProcList *pnd_xtrs;
+ ErtsRunQueue *rq;
+
+ rq = awdp->esdp->run_queue;
+ unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_PENDING_EXITERS);
+
+ erts_smp_runq_lock(rq);
+ pnd_xtrs = rq->procs.pending_exiters;
+ rq->procs.pending_exiters = NULL;
+ erts_smp_runq_unlock(rq);
+
+ if (erts_proclist_fetch(&pnd_xtrs, NULL))
+ do_handle_pending_exiters(pnd_xtrs);
+
+ return aux_work & ~ERTS_SSI_AUX_WORK_PENDING_EXITERS;
+}
+
+#endif
+
static ERTS_INLINE erts_aint32_t
handle_setup_aux_work_timer(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
{
@@ -2427,6 +2458,10 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_MISC,
handle_misc_aux_work);
+#ifdef ERTS_SMP
+ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_PENDING_EXITERS,
+ handle_pending_exiters);
+#endif
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_SET_TMO,
handle_setup_aux_work_timer);
@@ -3979,6 +4014,33 @@ schedule_bound_processes(ErtsRunQueue *rq,
}
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+static ERTS_INLINE void
+clear_proc_dirty_queue_bit(Process *p, ErtsRunQueue *rq, int prio_bit)
+{
+#ifdef DEBUG
+ erts_aint32_t old;
+#endif
+ erts_aint32_t qb = prio_bit;
+ if (rq == ERTS_DIRTY_CPU_RUNQ)
+ qb <<= ERTS_PDSFLGS_IN_CPU_PRQ_MASK_OFFSET;
+ else {
+ ASSERT(rq == ERTS_DIRTY_IO_RUNQ);
+ qb <<= ERTS_PDSFLGS_IN_IO_PRQ_MASK_OFFSET;
+ }
+#ifdef DEBUG
+ old = (int)
+#else
+ (void)
+#endif
+ erts_smp_atomic32_read_band_mb(&p->dirty_state, ~qb);
+ ASSERT(old & qb);
+}
+
+#endif /* ERTS_DIRTY_SCHEDULERS */
+
+
static void
evacuate_run_queue(ErtsRunQueue *rq,
ErtsStuckBoundProcesses *sbpp)
@@ -4141,29 +4203,8 @@ evacuate_run_queue(ErtsRunQueue *rq,
}
#ifdef ERTS_DIRTY_SCHEDULERS
-
- if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) {
- erts_aint32_t dqbit = qbit;
-#ifdef DEBUG
- erts_aint32_t old_dqbit;
-#endif
-
- if (rq == ERTS_DIRTY_CPU_RUNQ)
- dqbit <<= ERTS_PDSFLGS_IN_CPU_PRQ_MASK_OFFSET;
- else {
- ASSERT(rq == ERTS_DIRTY_IO_RUNQ);
- dqbit <<= ERTS_PDSFLGS_IN_IO_PRQ_MASK_OFFSET;
- }
-
-#ifdef DEBUG
- old_dqbit = (int)
-#else
- (void)
-#endif
- erts_smp_atomic32_read_band_mb(&real_proc->dirty_state,
- ~dqbit);
- ASSERT(old_dqbit & dqbit);
- }
+ if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
+ clear_proc_dirty_queue_bit(real_proc, rq, qbit);
#endif
if (ERTS_PSFLG_BOUND & real_state) {
@@ -5653,7 +5694,8 @@ static void
init_scheduler_data(ErtsSchedulerData* esdp, int num,
ErtsSchedulerSleepInfo* ssi,
ErtsRunQueue* runq,
- char** daww_ptr, size_t daww_sz)
+ char** daww_ptr, size_t daww_sz,
+ Process *shadow_proc)
{
esdp->timer_wheel = NULL;
#ifdef ERTS_SMP
@@ -5677,6 +5719,15 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
esdp->no = (Uint) num;
ERTS_DIRTY_SCHEDULER_NO(esdp) = 0;
}
+ esdp->dirty_shadow_process = shadow_proc;
+ if (shadow_proc) {
+ erts_init_empty_process(shadow_proc);
+ erts_smp_atomic32_init_nob(&shadow_proc->state,
+ (ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_PROXY));
+ shadow_proc->static_flags = ERTS_STC_FLG_SHADOW_PROC;
+ }
#else
esdp->no = (Uint) num;
#endif
@@ -5928,31 +5979,41 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
for (ix = 0; ix < n; ix++) {
ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(ix);
init_scheduler_data(esdp, ix+1, ERTS_SCHED_SLEEP_INFO_IX(ix),
- ERTS_RUNQ_IX(ix), &daww_ptr, daww_sz);
+ ERTS_RUNQ_IX(ix), &daww_ptr, daww_sz,
+ NULL);
}
#ifdef ERTS_DIRTY_SCHEDULERS
-#ifdef ERTS_SMP
- erts_aligned_dirty_cpu_scheduler_data =
- erts_alloc_permanent_cache_aligned(
- ERTS_ALC_T_SCHDLR_DATA,
- no_dirty_cpu_schedulers*sizeof(ErtsAlignedSchedulerData));
- for (ix = 0; ix < no_dirty_cpu_schedulers; ix++) {
- ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix);
- init_scheduler_data(esdp, ix+1, ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix),
- ERTS_DIRTY_CPU_RUNQ, NULL, 0);
- }
- erts_aligned_dirty_io_scheduler_data =
- erts_alloc_permanent_cache_aligned(
- ERTS_ALC_T_SCHDLR_DATA,
- no_dirty_io_schedulers*sizeof(ErtsAlignedSchedulerData));
- for (ix = 0; ix < no_dirty_io_schedulers; ix++) {
- ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix);
- init_scheduler_data(esdp, ix+1, ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix),
- ERTS_DIRTY_IO_RUNQ, NULL, 0);
+ {
+ int dirty_scheds = no_dirty_cpu_schedulers + no_dirty_io_schedulers;
+ int adspix = 0;
+ ErtsAlignedDirtyShadowProcess *adsp =
+ erts_alloc_permanent_cache_aligned(
+ ERTS_ALC_T_SCHDLR_DATA,
+ dirty_scheds * sizeof(ErtsAlignedDirtyShadowProcess));
+
+ erts_aligned_dirty_cpu_scheduler_data =
+ erts_alloc_permanent_cache_aligned(
+ ERTS_ALC_T_SCHDLR_DATA,
+ dirty_scheds * sizeof(ErtsAlignedSchedulerData));
+
+ erts_aligned_dirty_io_scheduler_data =
+ &erts_aligned_dirty_cpu_scheduler_data[no_dirty_cpu_schedulers];
+
+ for (ix = 0; ix < no_dirty_cpu_schedulers; ix++) {
+ ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix);
+ init_scheduler_data(esdp, ix+1, ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix),
+ ERTS_DIRTY_CPU_RUNQ, NULL, 0,
+ &adsp[adspix++].dsp);
+ }
+ for (ix = 0; ix < no_dirty_io_schedulers; ix++) {
+ ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix);
+ init_scheduler_data(esdp, ix+1, ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix),
+ ERTS_DIRTY_IO_RUNQ, NULL, 0,
+ &adsp[adspix++].dsp);
+ }
}
#endif
-#endif
init_misc_aux_work();
init_swtreq_alloc();
@@ -6167,7 +6228,7 @@ check_dirty_enqueue_in_prio_queue(Process *c_p,
erts_aint32_t dact, max_qbit;
/* Termination should be done on an ordinary scheduler */
- if (actual & ERTS_PSFLG_EXITING) {
+ if ((*newp) & ERTS_PSFLG_EXITING) {
*newp &= ~ERTS_PSFLGS_DIRTY_WORK;
return ERTS_ENQUEUE_NORMAL_QUEUE;
}
@@ -6176,7 +6237,7 @@ check_dirty_enqueue_in_prio_queue(Process *c_p,
* If we have system tasks, we enqueue on ordinary run-queue
* and take care of those system tasks first.
*/
- if (actual & ERTS_PSFLG_ACTIVE_SYS)
+ if ((*newp) & ERTS_PSFLG_ACTIVE_SYS)
return ERTS_ENQUEUE_NORMAL_QUEUE;
dact = erts_smp_atomic32_read_mb(&c_p->dirty_state);
@@ -6356,23 +6417,29 @@ select_enqueue_run_queue(int enqueue, int enq_prio, Process *p, erts_aint32_t st
* schedule_out_process() return with c_rq locked.
*/
static ERTS_INLINE int
-schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Process *proxy)
+schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
+ Process *proxy, int is_normal_sched)
{
- erts_aint32_t a, e, n, enq_prio = -1;
+ erts_aint32_t a, e, n, enq_prio = -1, running_flgs;
int enqueue; /* < 0 -> use proxy */
ErtsRunQueue* runq;
+ if (is_normal_sched)
+ running_flgs = ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS;
+ else
+ running_flgs = ERTS_PSFLG_DIRTY_RUNNING|ERTS_PSFLG_DIRTY_RUNNING_SYS;
+
a = state;
while (1) {
n = e = a;
- ASSERT(a & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS));
+ ASSERT(a & running_flgs);
enqueue = ERTS_ENQUEUE_NOT;
- n &= ~(ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS);
- if (a & (ERTS_PSFLG_ACTIVE_SYS|ERTS_PSFLG_DIRTY_ACTIVE_SYS)
+ n &= ~running_flgs;
+ if ((a & (ERTS_PSFLG_ACTIVE_SYS|ERTS_PSFLG_DIRTY_ACTIVE_SYS))
|| (a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE) {
enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a);
}
@@ -6485,8 +6552,9 @@ change_proc_schedule_state(Process *p,
ErtsProcLocks locks)
{
/*
- * NOTE: ERTS_PSFLG_RUNNING, ERTS_PSFLG_RUNNING_SYS and
- * ERTS_PSFLG_ACTIVE_SYS are not allowed to be
+ * NOTE: ERTS_PSFLG_RUNNING, ERTS_PSFLG_RUNNING_SYS,
+ * ERTS_PSFLG_DIRTY_RUNNING, ERTS_PSFLG_DIRTY_RUNNING_SYS
+ * and ERTS_PSFLG_ACTIVE_SYS are not allowed to be
* altered by this function!
*/
erts_aint32_t a = *statep, n;
@@ -6500,9 +6568,13 @@ change_proc_schedule_state(Process *p,
ASSERT(!(a & ERTS_PSFLG_PROXY));
ASSERT((clear_state_flags & (ERTS_PSFLG_RUNNING
| ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS
| ERTS_PSFLG_ACTIVE_SYS)) == 0);
ASSERT((set_state_flags & (ERTS_PSFLG_RUNNING
| ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS
| ERTS_PSFLG_ACTIVE_SYS)) == 0);
if (lock_status)
@@ -6526,8 +6598,16 @@ change_proc_schedule_state(Process *p,
if ((n & (ERTS_PSFLG_SUSPENDED
| ERTS_PSFLG_RUNNING
| ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS
| ERTS_PSFLG_IN_RUNQ
- | ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE) {
+ | ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE
+#ifdef ERTS_DIRTY_SCHEDULERS
+ || (n & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_EXITING)) == ERTS_PSFLG_EXITING
+#endif
+ ) {
/*
* Active and seemingly need to be enqueued, but
* process may be in a run queue via proxy, need
@@ -6551,7 +6631,9 @@ change_proc_schedule_state(Process *p,
| ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE)
&& (!(a & (ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_RUNNING_SYS)
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)
&& (!(a & ERTS_PSFLG_ACTIVE)
|| (a & ERTS_PSFLG_SUSPENDED))))) {
/* We activated a prevously inactive process */
@@ -6693,7 +6775,10 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st)
enqueue = ERTS_ENQUEUE_NOT;
n |= ERTS_PSFLG_ACTIVE_SYS;
- if (!(a & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)))
+ if (!(a & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)))
enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a);
a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
if (a == e)
@@ -6706,7 +6791,9 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st)
if (!(a & (ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_RUNNING_SYS))
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS))
&& (!(a & ERTS_PSFLG_ACTIVE) || (a & ERTS_PSFLG_SUSPENDED))) {
/* We activated a prevously inactive process */
profile_runnable_proc(p, am_active);
@@ -6746,11 +6833,16 @@ suspend_process(Process *c_p, Process *p)
if (c_p == p) {
state = erts_smp_atomic32_read_bor_relb(&p->state,
ERTS_PSFLG_SUSPENDED);
- ASSERT(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS));
+ ASSERT(state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS));
suspended = (state & ERTS_PSFLG_SUSPENDED) ? -1: 1;
}
else {
- while (!(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_EXITING))) {
+ while (!(state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_EXITING))) {
erts_aint32_t n, e;
n = e = state;
@@ -6776,8 +6868,11 @@ suspend_process(Process *c_p, Process *p)
if ((state & (ERTS_PSFLG_ACTIVE
| ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_DIRTY_ACTIVE_SYS
| ERTS_PSFLG_RUNNING
| ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS
| ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE) {
/* We made process inactive */
profile_runnable_proc(p, am_inactive);
@@ -7759,8 +7854,10 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
plp = proclist_create(p);
erts_proclist_store_last(&msbp->blckrs, plp);
p->flags |= have_blckd_flg;
- ASSERT(schdlr_sspnd.active == ERTS_SCHDLR_SSPND_MAKE_NSCHEDS_VAL(1, 0, 0));
- ASSERT(p->scheduler_data->no == 1);
+ ASSERT(normal
+ ? 1 == schdlr_sspnd_get_nscheds(&schdlr_sspnd.active, ERTS_SCHED_NORMAL)
+ : schdlr_sspnd.active == ERTS_SCHDLR_SSPND_MAKE_NSCHEDS_VAL(1, 0, 0));
+ ASSERT(erts_proc_sched_data(p)->no == 1);
if (schdlr_sspnd.msb.ongoing)
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
else
@@ -7780,7 +7877,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
if (schdlr_sspnd.active == ERTS_SCHDLR_SSPND_MAKE_NSCHEDS_VAL(1, 0, 0)
|| (normal && schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
ERTS_SCHED_NORMAL) == 1)) {
- ASSERT(p->scheduler_data->no == 1);
+ ASSERT(erts_proc_sched_data(p)->no == 1);
plp = proclist_create(p);
erts_proclist_store_last(&msbp->blckrs, plp);
if (schdlr_sspnd.msb.ongoing)
@@ -7830,7 +7927,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
else
res = ERTS_SCHDLR_SSPND_YIELD_DONE_NMSCHED_BLOCKED;
}
- ASSERT(p->scheduler_data);
+ ASSERT(erts_proc_sched_data(p));
}
}
else if (!msbp->ongoing) {
@@ -8420,9 +8517,23 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
if (!suspend_process(c_p, rp)) {
/* Other process running */
- ASSERT(ERTS_PSFLG_RUNNING
+ ASSERT((ERTS_PSFLG_RUNNING | ERTS_PSFLG_DIRTY_RUNNING)
& erts_smp_atomic32_read_nob(&rp->state));
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!suspend
+ && (erts_smp_atomic32_read_nob(&rp->state)
+ & ERTS_PSFLG_DIRTY_RUNNING)) {
+ ErtsProcLocks need_locks = pid_locks & ~ERTS_PROC_LOCK_STATUS;
+ if (need_locks && erts_smp_proc_trylock(rp, need_locks) == EBUSY) {
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
+ rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS,
+ pid, pid_locks|ERTS_PROC_LOCK_STATUS);
+ }
+ goto done;
+ }
+#endif
+
running:
/*
@@ -8447,7 +8558,7 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
else {
ErtsProcLocks need_locks = pid_locks & ~ERTS_PROC_LOCK_STATUS;
if (need_locks && erts_smp_proc_trylock(rp, need_locks) == EBUSY) {
- if (ERTS_PSFLG_RUNNING_SYS
+ if ((ERTS_PSFLG_RUNNING_SYS|ERTS_PSFLG_DIRTY_RUNNING_SYS)
& erts_smp_atomic32_read_nob(&rp->state)) {
/* Executing system task... */
resume_process(rp, ERTS_PROC_LOCK_STATUS);
@@ -8474,7 +8585,7 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
* from being selected for normal execution regardless
* of locks held or not held on it...
*/
- ASSERT(!(ERTS_PSFLG_RUNNING
+ ASSERT(!((ERTS_PSFLG_RUNNING|ERTS_PSFLG_DIRTY_RUNNING_SYS)
& erts_smp_atomic32_read_nob(&rp->state)));
if (!suspend)
@@ -9015,28 +9126,43 @@ erts_run_queues_len(Uint *qlen, int atomic_queues_read, int incl_active_sched)
}
Eterm
-erts_process_status(Process *c_p, ErtsProcLocks c_p_locks,
- Process *rp, Eterm rpid)
+erts_process_state2status(erts_aint32_t state)
+{
+ if (state & ERTS_PSFLG_FREE)
+ return am_free;
+
+ if (state & ERTS_PSFLG_EXITING)
+ return am_exiting;
+
+ if (state & ERTS_PSFLG_GC)
+ return am_garbage_collecting;
+
+ if (state & ERTS_PSFLG_SUSPENDED)
+ return am_suspended;
+
+ if (state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS))
+ return am_running;
+
+ if (state & (ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_DIRTY_ACTIVE_SYS))
+ return am_runnable;
+
+ return am_waiting;
+}
+
+Eterm
+erts_process_status(Process *rp, Eterm rpid)
{
Eterm res = am_undefined;
Process *p = rp ? rp : erts_proc_lookup_raw(rpid);
if (p) {
erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state);
- if (state & ERTS_PSFLG_FREE)
- res = am_free;
- else if (state & ERTS_PSFLG_EXITING)
- res = am_exiting;
- else if (state & ERTS_PSFLG_GC)
- res = am_garbage_collecting;
- else if (state & ERTS_PSFLG_SUSPENDED)
- res = am_suspended;
- else if (state & ERTS_PSFLG_RUNNING)
- res = am_running;
- else if (state & ERTS_PSFLG_ACTIVE)
- res = am_runnable;
- else
- res = am_waiting;
+ res = erts_process_state2status(state);
}
#ifdef ERTS_SMP
else {
@@ -9251,7 +9377,76 @@ scheduler_gc_proc(Process *c_p, int reds_left)
return reds;
}
+static ERTS_INLINE void
+clean_dirty_start(Process *p)
+{
+#if defined(ERTS_DIRTY_SCHEDULERS) && !defined(ARCH_64)
+ void *ptr = ERTS_PROC_SET_DIRTY_CPU_START(p, NULL);
+ if (ptr)
+ erts_free(ERTS_ALC_T_DIRTY_START, ptr);
+#endif
+}
+static ERTS_INLINE void
+save_dirty_start(ErtsSchedulerData *esdp, Process *c_p)
+{
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(esdp->run_queue)) {
+ ErtsMonotonicTime time = erts_get_monotonic_time(esdp);
+#ifdef ARCH_64
+ ERTS_PROC_SET_DIRTY_CPU_START(c_p, (void *) time);
+#else
+ ErtsMonotonicTime *stimep;
+
+ stimep = (ErtsMonotonicTime *) ERTS_PROC_GET_DIRTY_CPU_START(c_p);
+ if (!stimep) {
+ stimep = erts_alloc(ERTS_ALC_T_DIRTY_START,
+ sizeof(ErtsMonotonicTime));
+ ERTS_PROC_SET_DIRTY_CPU_START(c_p, (void *) stimep);
+ }
+ *stimep = time;
+#endif
+ }
+#endif
+}
+
+static ERTS_INLINE int
+get_dirty_reds(ErtsSchedulerData *esdp, Process *c_p)
+{
+
+#ifndef ERTS_DIRTY_SCHEDULERS
+ return -1;
+#else
+ ErtsMonotonicTime stime, time;
+
+ if (!ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(esdp->run_queue))
+ return 1;
+
+#ifdef ARCH_64
+ stime = (ErtsMonotonicTime) ERTS_PROC_GET_DIRTY_CPU_START(c_p);
+#else
+ {
+ ErtsMonotonicTime *stimep;
+ stimep = (ErtsMonotonicTime *) ERTS_PROC_GET_DIRTY_CPU_START(c_p);
+ ASSERT(stimep);
+ stime = *stimep;
+ }
+#endif
+
+ time = erts_get_monotonic_time(esdp);
+
+ ASSERT(stime && stime < time);
+
+ time -= stime;
+ time = ERTS_MONOTONIC_TO_USEC(time);
+ time *= 2;
+
+ if (time > INT_MAX)
+ return INT_MAX;
+ return (int) time;
+#endif
+
+}
/*
* schedule() is called from BEAM (process_main()) or HiPE
@@ -9283,6 +9478,7 @@ Process *schedule(Process *p, int calls)
int reds;
Uint32 flags;
erts_aint32_t state = 0; /* Supress warning... */
+ int is_normal_sched;
ERTS_MSACC_DECLARE_CACHE();
@@ -9312,25 +9508,44 @@ Process *schedule(Process *p, int calls)
*/
if (!p) { /* NULL in the very first schedule() call */
esdp = erts_get_scheduler_data();
+ is_normal_sched = !ERTS_SCHEDULER_IS_DIRTY(esdp);
rq = erts_get_runq_current(esdp);
ASSERT(esdp);
fcalls = (int) erts_smp_atomic32_read_acqb(&function_calls);
actual_reds = reds = 0;
erts_smp_runq_lock(rq);
} else {
- sched_out_proc:
-
#ifdef ERTS_SMP
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ esdp = p->scheduler_data;
+ is_normal_sched = esdp != NULL;
+ if (is_normal_sched)
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
+ else {
+ esdp = erts_get_scheduler_data();
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp));
+ }
+#else
esdp = p->scheduler_data;
+ is_normal_sched = 1;
+#endif
ASSERT(esdp->current_process == p
|| esdp->free_process == p);
#else
esdp = erts_scheduler_data;
ASSERT(esdp->current_process == p);
+ is_normal_sched = 1;
#endif
- reds = actual_reds = calls - esdp->virtual_reds;
+ sched_out_proc:
+
+ ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+
+ if (is_normal_sched)
+ reds = actual_reds = calls - esdp->virtual_reds;
+ else
+ reds = actual_reds = get_dirty_reds(esdp, p);
+
ASSERT(actual_reds >= 0);
if (reds < ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST)
reds = ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST;
@@ -9377,7 +9592,7 @@ Process *schedule(Process *p, int calls)
state = erts_smp_atomic32_read_nob(&p->state);
#ifdef ERTS_SMP
- if (state & ERTS_PSFLG_PENDING_EXIT)
+ if (is_normal_sched && (state & ERTS_PSFLG_PENDING_EXIT))
erts_handle_pending_exit(p, (ERTS_PROC_LOCK_MAIN
| ERTS_PROC_LOCK_STATUS));
if (p->pending_suspenders)
@@ -9387,7 +9602,8 @@ Process *schedule(Process *p, int calls)
esdp->reductions += reds;
- schedule_out_process(rq, state, p, proxy_p); /* Returns with rq locked! */
+ /* schedule_out_process() returns with rq locked! */
+ schedule_out_process(rq, state, p, proxy_p, is_normal_sched);
proxy_p = NULL;
ERTS_PROC_REDUCTIONS_EXECUTED(esdp, rq,
@@ -9405,13 +9621,20 @@ Process *schedule(Process *p, int calls)
ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_OTHER);
if (state & ERTS_PSFLG_FREE) {
+ if (!is_normal_sched) {
+ ASSERT(p->flags & F_DELAYED_DEL_PROC);
+ erts_proc_dec_refc(p);
+ }
+ else {
#ifdef ERTS_SMP
- ASSERT(esdp->free_process == p);
- esdp->free_process = NULL;
+ ASSERT(esdp->free_process == p);
+ esdp->free_process = NULL;
#else
- erts_proc_dec_refc(p);
+ erts_proc_dec_refc(p);
#endif
+ }
}
+
#ifdef ERTS_SMP
ASSERT(!esdp->free_process);
#endif
@@ -9419,7 +9642,7 @@ Process *schedule(Process *p, int calls)
ERTS_SMP_CHK_NO_PROC_LOCKS;
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ if (is_normal_sched) {
if (esdp->check_time_reds >= ERTS_CHECK_TIME_REDS)
(void) erts_get_monotonic_time(esdp);
@@ -9433,23 +9656,15 @@ Process *schedule(Process *p, int calls)
}
- ERTS_SMP_LC_ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp)
- || !erts_thr_progress_is_blocking());
+ ERTS_SMP_LC_ASSERT(!is_normal_sched || !erts_thr_progress_is_blocking());
check_activities_to_run: {
+ erts_aint32_t psflg_running, psflg_running_sys;
#ifdef ERTS_SMP
ErtsMigrationPaths *mps;
ErtsMigrationPath *mp;
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- ErtsProcList *pnd_xtrs = rq->procs.pending_exiters;
- if (erts_proclist_fetch(&pnd_xtrs, NULL)) {
- rq->procs.pending_exiters = NULL;
- erts_smp_runq_unlock(rq);
- handle_pending_exiters(pnd_xtrs);
- erts_smp_runq_lock(rq);
- }
-
+ if (is_normal_sched) {
if (rq->check_balance_reds <= 0)
check_balance(rq);
@@ -9466,32 +9681,35 @@ Process *schedule(Process *p, int calls)
continue_check_activities_to_run:
flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
continue_check_activities_to_run_known_flags:
- ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp)
- || flags & ERTS_RUNQ_FLG_NONEMPTY);
+ ASSERT(!is_normal_sched || (flags & ERTS_RUNQ_FLG_NONEMPTY));
- if (flags & (ERTS_RUNQ_FLG_CHK_CPU_BIND|ERTS_RUNQ_FLG_SUSPENDED)) {
- if (flags & ERTS_RUNQ_FLG_SUSPENDED) {
- (void) ERTS_RUNQ_FLGS_UNSET_NOB(rq, ERTS_RUNQ_FLG_EXEC);
+ if (!is_normal_sched) {
+ if (erts_smp_atomic32_read_acqb(&esdp->ssi->flags)
+ & ERTS_SSI_FLG_SUSPENDED) {
suspend_scheduler(esdp);
- flags = ERTS_RUNQ_FLGS_SET_NOB(rq, ERTS_RUNQ_FLG_EXEC);
- flags |= ERTS_RUNQ_FLG_EXEC;
- }
- if (flags & ERTS_RUNQ_FLG_CHK_CPU_BIND) {
- flags = ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_CHK_CPU_BIND);
- flags &= ~ ERTS_RUNQ_FLG_CHK_CPU_BIND;
- erts_sched_check_cpu_bind(esdp);
}
}
-#ifdef ERTS_DIRTY_SCHEDULERS
- else if (ERTS_SCHEDULER_IS_DIRTY(esdp)
- && (erts_smp_atomic32_read_acqb(&esdp->ssi->flags)
- & ERTS_SSI_FLG_SUSPENDED))
- suspend_scheduler(esdp);
-#endif
-
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ else {
erts_aint32_t aux_work;
- int leader_update = erts_thr_progress_update(esdp);
+ int leader_update;
+
+ ASSERT(is_normal_sched);
+
+ if (flags & (ERTS_RUNQ_FLG_CHK_CPU_BIND|ERTS_RUNQ_FLG_SUSPENDED)) {
+ if (flags & ERTS_RUNQ_FLG_SUSPENDED) {
+ (void) ERTS_RUNQ_FLGS_UNSET_NOB(rq, ERTS_RUNQ_FLG_EXEC);
+ suspend_scheduler(esdp);
+ flags = ERTS_RUNQ_FLGS_SET_NOB(rq, ERTS_RUNQ_FLG_EXEC);
+ flags |= ERTS_RUNQ_FLG_EXEC;
+ }
+ if (flags & ERTS_RUNQ_FLG_CHK_CPU_BIND) {
+ flags = ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_CHK_CPU_BIND);
+ flags &= ~ERTS_RUNQ_FLG_CHK_CPU_BIND;
+ erts_sched_check_cpu_bind(esdp);
+ }
+ }
+
+ leader_update = erts_thr_progress_update(esdp);
aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work);
if (aux_work | leader_update) {
erts_smp_runq_unlock(rq);
@@ -9517,19 +9735,13 @@ Process *schedule(Process *p, int calls)
flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && rq->halt_in_progress) {
- /*
- * TODO: if halt in progress, need to put the dirty scheduler
- * to sleep somewhere around here to prevent it from picking up
- * new work
- */
+ if (!is_normal_sched && rq->halt_in_progress) {
+ /* Wait for emulator to terminate... */
+ while (1)
+ erts_milli_sleep(1000*1000);
}
- else
-#endif
-
- if ((!(flags & ERTS_RUNQ_FLGS_QMASK) && !rq->misc.start)
- || (rq->halt_in_progress && ERTS_EMPTY_RUNQ_PORTS(rq))) {
+ else if ((!(flags & ERTS_RUNQ_FLGS_QMASK) && !rq->misc.start)
+ || (rq->halt_in_progress && ERTS_EMPTY_RUNQ_PORTS(rq))) {
/* Prepare for scheduler wait */
#ifdef ERTS_SMP
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
@@ -9543,7 +9755,7 @@ Process *schedule(Process *p, int calls)
if (flags & ERTS_RUNQ_FLG_INACTIVE)
empty_runq(rq);
else {
- if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && try_steal_task(rq))
+ if (is_normal_sched && try_steal_task(rq))
goto continue_check_activities_to_run;
empty_runq(rq);
@@ -9572,9 +9784,9 @@ Process *schedule(Process *p, int calls)
goto check_activities_to_run;
}
- else if (!ERTS_SCHEDULER_IS_DIRTY(esdp) &&
- (fcalls > input_reductions &&
- prepare_for_sys_schedule(!0))) {
+ else if (is_normal_sched
+ && (fcalls > input_reductions
+ && prepare_for_sys_schedule(!0))) {
ErtsMonotonicTime current_time;
/*
* Schedule system-level activities.
@@ -9688,11 +9900,17 @@ Process *schedule(Process *p, int calls)
ASSERT(p); /* Wrong qmask in rq->flags? */
- if (ERTS_SCHEDULER_IS_DIRTY(esdp))
- psflg_band_mask = ~((erts_aint32_t) 0);
- else
+ if (is_normal_sched) {
+ psflg_running = ERTS_PSFLG_RUNNING;
+ psflg_running_sys = ERTS_PSFLG_RUNNING_SYS;
psflg_band_mask = ~(((erts_aint32_t) 1) << (ERTS_PSFLGS_GET_PRQ_PRIO(state)
+ ERTS_PSFLGS_IN_PRQ_MASK_OFFSET));
+ }
+ else {
+ psflg_running = ERTS_PSFLG_DIRTY_RUNNING;
+ psflg_running_sys = ERTS_PSFLG_DIRTY_RUNNING_SYS;
+ psflg_band_mask = ~((erts_aint32_t) 0);
+ }
if (!(state & ERTS_PSFLG_PROXY))
psflg_band_mask &= ~ERTS_PSFLG_IN_RUNQ;
@@ -9707,34 +9925,53 @@ Process *schedule(Process *p, int calls)
state = erts_smp_atomic32_read_nob(&p->state);
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!is_normal_sched)
+ clear_proc_dirty_queue_bit(p, rq, qbit);
+#endif
+
while (1) {
- erts_aint32_t exp, new, tmp;
- tmp = new = exp = state;
+ erts_aint32_t exp, new;
+ int run_process;
+ new = exp = state;
new &= psflg_band_mask;
- if (!(state & (ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_RUNNING_SYS))) {
- tmp = state & (ERTS_PSFLG_SUSPENDED
- | ERTS_PSFLG_PENDING_EXIT
- | ERTS_PSFLG_ACTIVE_SYS
- | ERTS_PSFLG_DIRTY_ACTIVE_SYS);
- if (tmp != ERTS_PSFLG_SUSPENDED) {
- if (state & (ERTS_PSFLG_ACTIVE_SYS
- | ERTS_PSFLG_DIRTY_ACTIVE_SYS))
- new |= ERTS_PSFLG_RUNNING_SYS;
- else
- new |= ERTS_PSFLG_RUNNING;
- }
+ /*
+ * Run process if not already running (or free)
+ * or exiting and not running on a normal
+ * scheduler, and not suspended (and not in a
+ * state where suspend should be ignored).
+ */
+ run_process = (((!(state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS
+ | ERTS_PSFLG_FREE)))
+#ifdef ERTS_DIRTY_SCHEDULERS
+ | (((state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_FREE
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_EXITING))
+ == ERTS_PSFLG_EXITING)
+ & (!!is_normal_sched))
+#endif
+ )
+ & ((state & (ERTS_PSFLG_SUSPENDED
+ | ERTS_PSFLG_EXITING
+ | ERTS_PSFLG_FREE
+ | ERTS_PSFLG_PENDING_EXIT
+ | ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_DIRTY_ACTIVE_SYS))
+ != ERTS_PSFLG_SUSPENDED));
+ if (run_process) {
+ if (state & (ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_DIRTY_ACTIVE_SYS))
+ new |= psflg_running_sys;
+ else
+ new |= psflg_running;
}
state = erts_smp_atomic32_cmpxchg_relb(&p->state, new, exp);
if (state == exp) {
- if ((state & (ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_RUNNING_SYS
- | ERTS_PSFLG_FREE))
- || ((state & (ERTS_PSFLG_SUSPENDED
- | ERTS_PSFLG_PENDING_EXIT
- | ERTS_PSFLG_ACTIVE_SYS
- | ERTS_PSFLG_DIRTY_ACTIVE_SYS))
- == ERTS_PSFLG_SUSPENDED)) {
+ if (!run_process) {
if (proxy_p) {
free_proxy_proc(proxy_p);
proxy_p = NULL;
@@ -9761,34 +9998,13 @@ Process *schedule(Process *p, int calls)
erts_smp_runq_unlock(rq);
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
-#ifdef DEBUG
- int old_dqbit;
-#endif
- int dqbit = qbit;
-
- if (rq == ERTS_DIRTY_CPU_RUNQ)
- dqbit <<= ERTS_PDSFLGS_IN_CPU_PRQ_MASK_OFFSET;
- else {
- ASSERT(rq == ERTS_DIRTY_IO_RUNQ);
- dqbit <<= ERTS_PDSFLGS_IN_IO_PRQ_MASK_OFFSET;
- }
-
-#ifdef DEBUG
- old_dqbit = (int)
-#else
- (void)
-#endif
- erts_smp_atomic32_read_band_mb(&p->dirty_state, ~dqbit);
- ASSERT(old_dqbit & dqbit);
- }
-#endif /* ERTS_DIRTY_SCHEDULERS */
-
#endif /* ERTS_SMP */
}
+ if (!is_normal_sched)
+ save_dirty_start(esdp, p);
+
#ifdef ERTS_SMP
if (flags & ERTS_RUNQ_FLG_PROTECTED)
@@ -9805,9 +10021,7 @@ Process *schedule(Process *p, int calls)
UWord old = ERTS_PROC_SCHED_ID(p, (UWord) esdp->no);
int migrated = old && old != esdp->no;
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
-#endif
+ ASSERT(is_normal_sched);
prio = (int) ERTS_PSFLGS_GET_USR_PRIO(state);
@@ -9821,20 +10035,21 @@ Process *schedule(Process *p, int calls)
erts_smp_spin_unlock(&erts_sched_stat.lock);
}
- ASSERT(!p->scheduler_data);
- p->scheduler_data = esdp;
-
state = erts_smp_atomic32_read_nob(&p->state);
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- if (!!(state & ERTS_PSFLGS_DIRTY_WORK)
- & !(state & ERTS_PSFLG_ACTIVE_SYS)) {
+ ASSERT(!p->scheduler_data);
+#ifndef ERTS_DIRTY_SCHEDULERS
+ p->scheduler_data = esdp;
+#else /* ERTS_DIRTY_SCHEDULERS */
+ if (is_normal_sched) {
+ if ((!!(state & ERTS_PSFLGS_DIRTY_WORK))
+ & (!(state & ERTS_PSFLG_ACTIVE_SYS))) {
/* Migrate to dirty scheduler... */
sunlock_sched_out_proc:
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
goto sched_out_proc;
}
+ p->scheduler_data = esdp;
}
else {
if (state & (ERTS_PSFLG_ACTIVE_SYS
@@ -9870,7 +10085,10 @@ Process *schedule(Process *p, int calls)
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
- if (IS_TRACED(p)) {
+ /* Clear tracer if it has been removed */
+ if (IS_TRACED(p) && erts_is_tracer_proc_enabled(
+ p, ERTS_PROC_LOCK_MAIN, &p->common)) {
+
if (state & ERTS_PSFLG_EXITING) {
if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT))
trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in_exiting);
@@ -9885,13 +10103,8 @@ Process *schedule(Process *p, int calls)
}
}
-
-#ifdef ERTS_SMP
- /* Clears tracer if it has been removed */
- (void)ERTS_TRACER_PROC_IS_ENABLED(p);
-#endif
-
- if (state & ERTS_PSFLG_RUNNING_SYS) {
+ if (state & (ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
/*
* GC is normally never delayed when a process
* is scheduled out, but might be when executing
@@ -9905,7 +10118,7 @@ Process *schedule(Process *p, int calls)
reds -= cost;
if (reds <= 0
#ifdef ERTS_DIRTY_SCHEDULERS
- || ERTS_SCHEDULER_IS_DIRTY(esdp)
+ || !is_normal_sched
|| (state & ERTS_PSFLGS_DIRTY_WORK)
#endif
) {
@@ -9913,8 +10126,8 @@ Process *schedule(Process *p, int calls)
}
}
- ASSERT(state & ERTS_PSFLG_RUNNING_SYS);
- ASSERT(!(state & ERTS_PSFLG_RUNNING));
+ ASSERT(state & psflg_running_sys);
+ ASSERT(!(state & psflg_running));
while (1) {
erts_aint32_t n, e;
@@ -9926,8 +10139,8 @@ Process *schedule(Process *p, int calls)
}
n = e = state;
- n &= ~ERTS_PSFLG_RUNNING_SYS;
- n |= ERTS_PSFLG_RUNNING;
+ n &= ~psflg_running_sys;
+ n |= psflg_running;
state = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
if (state == e) {
@@ -9935,8 +10148,8 @@ Process *schedule(Process *p, int calls)
break;
}
- ASSERT(state & ERTS_PSFLG_RUNNING_SYS);
- ASSERT(!(state & ERTS_PSFLG_RUNNING));
+ ASSERT(state & psflg_running_sys);
+ ASSERT(!(state & psflg_running));
}
}
@@ -10525,7 +10738,10 @@ save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio)
}
state = erts_smp_atomic32_read_nob(&c_p->state);
- ASSERT((ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS) & state);
+ ASSERT((ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS) & state);
while (!(state & ERTS_PSFLG_DELAYED_SYS)
|| prio < ERTS_PSFLGS_GET_ACT_PRIO(state)) {
@@ -10850,6 +11066,8 @@ erts_get_exact_total_reductions(Process *c_p, Uint *redsp, Uint *diffp)
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
+static void delete_process(Process* p);
+
void
erts_free_proc(Process *p)
{
@@ -10858,6 +11076,8 @@ erts_free_proc(Process *p)
#endif
ASSERT(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_FREE);
ASSERT(0 == erts_proc_read_refc(p));
+ if (p->flags & F_DELAYED_DEL_PROC)
+ delete_process(p);
erts_free(ERTS_ALC_T_PROC, (void *) p);
}
@@ -11490,18 +11710,36 @@ erts_cleanup_empty_process(Process* p)
#endif
}
-/*
- * p must be the currently executing process.
- */
static void
delete_process(Process* p)
{
Eterm *heap;
ErtsPSD *psd;
+ struct saved_calls *scb;
+ process_breakpoint_time_t *pbt;
+ void *nif_export;
+
VERBOSE(DEBUG_PROCESSES, ("Removing process: %T\n",p->common.id));
VERBOSE(DEBUG_SHCOPY, ("[pid=%T] delete process: %p %p %p %p\n", p->common.id,
HEAP_START(p), HEAP_END(p), OLD_HEAP(p), OLD_HEND(p)));
+ scb = ERTS_PROC_SET_SAVED_CALLS_BUF(p, NULL);
+
+ if (scb) {
+ p->fcalls += CONTEXT_REDS; /* Reduction counting depends on this... */
+ erts_free(ERTS_ALC_T_CALLS_BUF, (void *) scb);
+ }
+
+ pbt = ERTS_PROC_SET_CALL_TIME(p, NULL);
+ if (pbt)
+ erts_free(ERTS_ALC_T_BPD, (void *) pbt);
+
+ nif_export = ERTS_PROC_SET_NIF_TRAP_EXPORT(p, NULL);
+ if (nif_export)
+ erts_destroy_nif_export(nif_export);
+
+ clean_dirty_start(p);
+
/* Cleanup psd */
psd = (ErtsPSD *) erts_smp_atomic_read_nob(&p->psd);
@@ -11634,7 +11872,10 @@ set_proc_self_exiting(Process *c_p)
ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCKS_ALL);
state = erts_smp_atomic32_read_nob(&c_p->state);
- ASSERT(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS));
+ ASSERT(state & (ERTS_PSFLG_RUNNING
+ |ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS));
#ifdef DEBUG
enqueue =
@@ -11684,51 +11925,73 @@ erts_handle_pending_exit(Process *c_p, ErtsProcLocks locks)
erts_smp_proc_unlock(c_p, xlocks);
}
+static void save_pending_exiter(Process *p, ErtsProcList *plp);
+
static void
-handle_pending_exiters(ErtsProcList *pnd_xtrs)
+do_handle_pending_exiters(ErtsProcList *pnd_xtrs)
{
/* 'list' is expected to have been fetched (i.e. not a ring anymore) */
ErtsProcList *plp = pnd_xtrs;
while (plp) {
- ErtsProcList *free_plp;
- Process *p = erts_pid2proc(NULL, 0, plp->pid, ERTS_PROC_LOCKS_ALL);
+ ErtsProcList *next_plp = plp->next;
+ Process *p = erts_proc_lookup(plp->pid);
if (p) {
- if (erts_proclist_same(plp, p)) {
- erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state);
- if (!(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))) {
- ASSERT(state & ERTS_PSFLG_PENDING_EXIT);
- erts_handle_pending_exit(p, ERTS_PROC_LOCKS_ALL);
+ erts_aint32_t state;
+ /*
+ * If the process is running on a normal scheduler, the
+ * pending exit will soon be detected and handled by the
+ * scheduler running the process (at schedule in/out).
+ */
+ if (erts_smp_proc_trylock(p, ERTS_PROC_LOCKS_ALL) != EBUSY) {
+ if (erts_proclist_same(plp, p)) {
+ state = erts_smp_atomic32_read_acqb(&p->state);
+ if (!(state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_EXITING))) {
+ ASSERT(state & ERTS_PSFLG_PENDING_EXIT);
+ erts_handle_pending_exit(p, ERTS_PROC_LOCKS_ALL);
+ }
+ }
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
+ }
+ else {
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ if (erts_proclist_same(plp, p)) {
+ state = erts_smp_atomic32_read_acqb(&p->state);
+ if (!(state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_EXITING))) {
+ /*
+ * Save process and try to acquire all
+ * locks at a later time...
+ */
+ save_pending_exiter(p, plp);
+ plp = NULL;
+ }
}
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
}
- erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
}
- free_plp = plp;
- plp = plp->next;
- proclist_destroy(free_plp);
+ if (plp)
+ proclist_destroy(plp);
+ plp = next_plp;
}
}
static void
-save_pending_exiter(Process *p)
+save_pending_exiter(Process *p, ErtsProcList *plp)
{
- ErtsProcList *plp;
+ ErtsSchedulerSleepInfo *ssi;
ErtsRunQueue *rq;
- ErtsSchedulerData *esdp = erts_get_scheduler_data();
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
- if (!esdp)
- rq = RUNQ_READ_RQ(&p->run_queue);
- else
- rq = esdp->run_queue;
-
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
- rq = ERTS_RUNQ_IX(0); /* Handle on ordinary scheduler */
-#endif
+ rq = RUNQ_READ_RQ(&p->run_queue);
+ ASSERT(rq && !ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
- plp = proclist_create(p);
+ if (!plp)
+ plp = proclist_create(p);
erts_smp_runq_lock(rq);
@@ -11736,9 +11999,11 @@ save_pending_exiter(Process *p)
non_empty_runq(rq);
+ ssi = rq->scheduler->ssi;
+
erts_smp_runq_unlock(rq);
- wake_scheduler(rq);
+ set_aux_work_flags_wakeup_nob(ssi, ERTS_SSI_AUX_WORK_PENDING_EXITERS);
}
#endif
@@ -11938,7 +12203,7 @@ send_exit_signal(Process *c_p, /* current process if and only
if (need_locks
&& erts_smp_proc_trylock(rp, need_locks) == EBUSY) {
/* ... but we havn't got all locks on it ... */
- save_pending_exiter(rp);
+ save_pending_exiter(rp, NULL);
/*
* The pending exit will be discovered when next
* process is scheduled in
@@ -12406,10 +12671,8 @@ erts_continue_exit_process(Process *p)
ErtsProcLocks curr_locks = ERTS_PROC_LOCK_MAIN;
Eterm reason = p->fvalue;
DistEntry *dep;
- struct saved_calls *scb;
- process_breakpoint_time_t *pbt;
erts_aint32_t state;
- void *nif_export;
+ int delay_del_proc = 0;
#ifdef DEBUG
int yield_allowed = 1;
@@ -12552,7 +12815,7 @@ erts_continue_exit_process(Process *p)
{
/* Do *not* use erts_get_runq_proc() */
ErtsRunQueue *rq;
- rq = erts_get_runq_current(ERTS_GET_SCHEDULER_DATA_FROM_PROC(p));
+ rq = erts_get_runq_current(erts_proc_sched_data(p));
erts_smp_runq_lock(rq);
@@ -12598,16 +12861,24 @@ erts_continue_exit_process(Process *p)
break;
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (a & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ p->flags |= F_DELAYED_DEL_PROC;
+ delay_del_proc = 1;
+ /*
+ * The dirty scheduler will also decrease
+ * refc when done...
+ */
+ erts_proc_inc_refc(p);
+ }
+#endif
+
if (refc_inced && !(n & ERTS_PSFLG_IN_RUNQ))
erts_proc_dec_refc(p);
}
dep = (p->flags & F_DISTRIBUTION) ? erts_this_dist_entry : NULL;
- scb = ERTS_PROC_SET_SAVED_CALLS_BUF(p, NULL);
- if (scb)
- p->fcalls += CONTEXT_REDS; /* Reduction counting depends on this... */
- pbt = ERTS_PROC_SET_CALL_TIME(p, NULL);
- nif_export = ERTS_PROC_SET_NIF_TRAP_EXPORT(p, NULL);
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
#ifdef BM_COUNTERS
@@ -12647,22 +12918,14 @@ erts_continue_exit_process(Process *p)
have none here */
}
- if (scb)
- erts_free(ERTS_ALC_T_CALLS_BUF, (void *) scb);
-
- if (pbt)
- erts_free(ERTS_ALC_T_BPD, (void *) pbt);
-
- if (nif_export)
- erts_destroy_nif_export(nif_export);
-
#ifdef ERTS_SMP
erts_flush_trace_messages(p, 0);
#endif
ERTS_TRACER_CLEAR(&ERTS_TRACER(p));
- delete_process(p);
+ if (!delay_del_proc)
+ delete_process(p);
#ifdef ERTS_SMP
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
@@ -12692,6 +12955,7 @@ erts_continue_exit_process(Process *p)
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p));
+ BUMP_ALL_REDS(p);
}
/*
@@ -13011,11 +13275,13 @@ void erts_halt(int code)
int
erts_dbg_check_halloc_lock(Process *p)
{
+ ErtsSchedulerData *esdp;
if (ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p))
return 1;
if (p->common.id == ERTS_INVALID_PID)
return 1;
- if (p->scheduler_data && p == p->scheduler_data->match_pseudo_process)
+ esdp = erts_proc_sched_data(p);
+ if (esdp && p == esdp->match_pseudo_process)
return 1;
if (erts_thr_progress_is_blocking())
return 1;
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 12a919bc87..2801947613 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -304,6 +304,7 @@ typedef enum {
ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN_IX,
ERTS_SSI_AUX_WORK_MISC_THR_PRGR_IX,
ERTS_SSI_AUX_WORK_MISC_IX,
+ ERTS_SSI_AUX_WORK_PENDING_EXITERS_IX,
ERTS_SSI_AUX_WORK_SET_TMO_IX,
ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK_IX,
ERTS_SSI_AUX_WORK_REAP_PORTS_IX,
@@ -336,6 +337,8 @@ typedef enum {
(((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_MISC_THR_PRGR_IX)
#define ERTS_SSI_AUX_WORK_MISC \
(((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_MISC_IX)
+#define ERTS_SSI_AUX_WORK_PENDING_EXITERS \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_PENDING_EXITERS_IX)
#define ERTS_SSI_AUX_WORK_SET_TMO \
(((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_SET_TMO_IX)
#define ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK \
@@ -645,6 +648,7 @@ struct ErtsSchedulerData_ {
Uint no; /* Scheduler number for normal schedulers */
#ifdef ERTS_DIRTY_SCHEDULERS
ErtsDirtySchedId dirty_no; /* Scheduler number for dirty schedulers */
+ Process *dirty_shadow_process;
#endif
Port *current_port;
ErtsRunQueue *run_queue;
@@ -805,14 +809,26 @@ erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi)
#define ERTS_PSD_CALL_TIME_BP 3
#define ERTS_PSD_DELAYED_GC_TASK_QS 4
#define ERTS_PSD_NIF_TRAP_EXPORT 5
-#ifdef HIPE
#define ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF 6
-#endif
-
-#ifdef HIPE
-#define ERTS_PSD_SIZE 7
-#else
-#define ERTS_PSD_SIZE 6
+#define ERTS_PSD_DIRTY_CPU_START 7
+
+#define ERTS_PSD_SIZE 8
+
+#if !defined(HIPE) && !defined(ERTS_DIRTY_SCHEDULERS)
+# undef ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF
+# undef ERTS_PSD_DIRTY_CPU_START
+# undef ERTS_PSD_SIZE
+# define ERTS_PSD_SIZE 6
+#elif !defined(HIPE)
+# undef ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF
+# undef ERTS_PSD_DIRTY_CPU_START
+# undef ERTS_PSD_SIZE
+# define ERTS_PSD_DIRTY_CPU_START 6
+# define ERTS_PSD_SIZE 7
+#elif !defined(ERTS_DIRTY_SCHEDULERS)
+# undef ERTS_PSD_DIRTY_CPU_START
+# undef ERTS_PSD_SIZE
+# define ERTS_PSD_SIZE 7
#endif
typedef struct {
@@ -1193,7 +1209,10 @@ void erts_check_for_holes(Process* p);
#define ERTS_PSFLG_DIRTY_CPU_PROC ERTS_PSFLG_BIT(20)
#define ERTS_PSFLG_DIRTY_IO_PROC ERTS_PSFLG_BIT(21)
#define ERTS_PSFLG_DIRTY_ACTIVE_SYS ERTS_PSFLG_BIT(22)
-#define ERTS_PSFLG_MAX (ERTS_PSFLGS_ZERO_BIT_OFFSET + 22)
+#define ERTS_PSFLG_DIRTY_RUNNING ERTS_PSFLG_BIT(23)
+#define ERTS_PSFLG_DIRTY_RUNNING_SYS ERTS_PSFLG_BIT(24)
+
+#define ERTS_PSFLG_MAX (ERTS_PSFLGS_ZERO_BIT_OFFSET + 24)
#define ERTS_PSFLGS_DIRTY_WORK (ERTS_PSFLG_DIRTY_CPU_PROC \
| ERTS_PSFLG_DIRTY_IO_PROC \
@@ -1204,6 +1223,11 @@ void erts_check_for_holes(Process* p);
| ERTS_PSFLG_IN_PRQ_NORMAL \
| ERTS_PSFLG_IN_PRQ_LOW)
+#define ERTS_PSFLGS_VOLATILE_HEAP (ERTS_PSFLG_EXITING \
+ | ERTS_PSFLG_PENDING_EXIT \
+ | ERTS_PSFLG_DIRTY_RUNNING \
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)
+
#define ERTS_PSFLGS_GET_ACT_PRIO(PSFLGS) \
(((PSFLGS) >> ERTS_PSFLGS_ACT_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK)
#define ERTS_PSFLGS_GET_USR_PRIO(PSFLGS) \
@@ -1245,6 +1269,7 @@ void erts_check_for_holes(Process* p);
* Static flags that do not change after process creation.
*/
#define ERTS_STC_FLG_SYSTEM_PROC (((Uint32) 1) << 0)
+#define ERTS_STC_FLG_SHADOW_PROC (((Uint32) 1) << 1)
/* The sequential tracing token is a tuple of size 5:
*
@@ -1375,6 +1400,7 @@ extern int erts_system_profile_ts_type;
#define F_SCHDLR_ONLN_WAITQ (1 << 17) /* Process enqueued waiting to change schedulers online */
#define F_HAVE_BLCKD_NMSCHED (1 << 18) /* Process has blocked normal multi-scheduling */
#define F_HIPE_MODE (1 << 19)
+#define F_DELAYED_DEL_PROC (1 << 20) /* Delay delete process (dirty proc exit case) */
/*
* F_DISABLE_GC and F_DELAY_GC are similar. Both will prevent
@@ -1795,7 +1821,8 @@ erts_aint32_t erts_set_aux_work_timeout(int, erts_aint32_t, int);
void erts_sched_notify_check_cpu_bind(void);
Uint erts_active_schedulers(void);
void erts_init_process(int, int, int);
-Eterm erts_process_status(Process *, ErtsProcLocks, Process *, Eterm);
+Eterm erts_process_state2status(erts_aint32_t);
+Eterm erts_process_status(Process *, Eterm);
Uint erts_run_queues_len(Uint *, int, int);
void erts_add_to_runq(Process *);
Eterm erts_bound_schedulers_term(Process *c_p);
@@ -1872,19 +1899,11 @@ int erts_debug_wait_completed(Process *c_p, int flags);
Uint erts_process_memory(Process *c_p, int incl_msg_inq);
-#ifdef ERTS_SMP
-# define ERTS_GET_SCHEDULER_DATA_FROM_PROC(PROC) ((PROC)->scheduler_data)
-# define ERTS_PROC_GET_SCHDATA(PROC) ((PROC)->scheduler_data)
-#else
-# define ERTS_GET_SCHEDULER_DATA_FROM_PROC(PROC) (erts_scheduler_data)
-# define ERTS_PROC_GET_SCHDATA(PROC) (erts_scheduler_data)
-#endif
-
#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC
# define ERTS_VERIFY_UNUSED_TEMP_ALLOC(P) \
do { \
ErtsSchedulerData *esdp__ = ((P) \
- ? ERTS_PROC_GET_SCHDATA((Process *) (P)) \
+ ? erts_proc_sched_data((Process *) (P)) \
: erts_get_scheduler_data()); \
if (esdp__ && !ERTS_SCHEDULER_IS_DIRTY(esdp__)) \
esdp__->verify_unused_temp_alloc( \
@@ -1977,12 +1996,15 @@ erts_psd_set(Process *p, int ix, void *data)
ErtsPSD *psd;
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
ErtsProcLocks locks = erts_proc_lc_my_proc_locks(p);
- if (ERTS_LC_PSD_ANY_LOCK == erts_psd_required_locks[ix].set_locks)
- ERTS_SMP_LC_ASSERT(locks || erts_thr_progress_is_blocking());
- else {
- locks &= erts_psd_required_locks[ix].set_locks;
- ERTS_SMP_LC_ASSERT(erts_psd_required_locks[ix].set_locks == locks
- || erts_thr_progress_is_blocking());
+ erts_aint32_t state = state = erts_smp_atomic32_read_nob(&p->state);
+ if (!(state & ERTS_PSFLG_FREE)) {
+ if (ERTS_LC_PSD_ANY_LOCK == erts_psd_required_locks[ix].set_locks)
+ ERTS_SMP_LC_ASSERT(locks || erts_thr_progress_is_blocking());
+ else {
+ locks &= erts_psd_required_locks[ix].set_locks;
+ ERTS_SMP_LC_ASSERT(erts_psd_required_locks[ix].set_locks == locks
+ || erts_thr_progress_is_blocking());
+ }
}
#endif
psd = (ErtsPSD *) erts_smp_atomic_read_nob(&p->psd);
@@ -2039,6 +2061,13 @@ erts_psd_set(Process *p, int ix, void *data)
((struct saved_calls *) erts_psd_set((P), ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF, (void *) (SCB)))
#endif
+#ifdef ERTS_DIRTY_SCHEDULERS
+#define ERTS_PROC_GET_DIRTY_CPU_START(P) \
+ ((void *) erts_psd_get((P), ERTS_PSD_DIRTY_CPU_START))
+#define ERTS_PROC_SET_DIRTY_CPU_START(P, DCS) \
+ ((void *) erts_psd_set((P), ERTS_PSD_DIRTY_CPU_START, (void *) (DCS)))
+#endif
+
ERTS_GLB_INLINE Eterm erts_proc_get_error_handler(Process *p);
ERTS_GLB_INLINE Eterm erts_proc_set_error_handler(Process *p, Eterm handler);
@@ -2181,6 +2210,7 @@ erts_check_emigration_need(ErtsRunQueue *c_rq, int prio)
#endif
+ERTS_GLB_INLINE ErtsSchedulerData *erts_proc_sched_data(Process *c_p);
ERTS_GLB_INLINE int erts_is_scheduler_bound(ErtsSchedulerData *esdp);
ERTS_GLB_INLINE Process *erts_get_current_process(void);
ERTS_GLB_INLINE Eterm erts_get_current_pid(void);
@@ -2214,6 +2244,31 @@ ERTS_GLB_INLINE void erts_shrink_message_heap(ErtsMessage **msgpp, Process *pp,
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE
+ErtsSchedulerData *erts_proc_sched_data(Process *c_p)
+{
+ ErtsSchedulerData *esdp;
+ ASSERT(c_p);
+#if !defined(ERTS_SMP)
+ esdp = erts_get_scheduler_data();
+#else
+ esdp = c_p->scheduler_data;
+# if defined(ERTS_DIRTY_SCHEDULERS)
+ if (esdp) {
+ ASSERT(esdp == erts_get_scheduler_data());
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
+ }
+ else {
+ esdp = erts_get_scheduler_data();
+ ASSERT(esdp);
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp));
+ }
+# endif
+#endif
+ ASSERT(esdp);
+ return esdp;
+}
+
+ERTS_GLB_INLINE
int erts_is_scheduler_bound(ErtsSchedulerData *esdp)
{
if (!esdp)
@@ -2428,7 +2483,7 @@ ERTS_GLB_INLINE ErtsAtomCacheMap *
erts_get_atom_cache_map(Process *c_p)
{
ErtsSchedulerData *esdp = (c_p
- ? ERTS_PROC_GET_SCHDATA(c_p)
+ ? erts_proc_sched_data(c_p)
: erts_get_scheduler_data());
ASSERT(esdp);
return &esdp->atom_cache_map;
diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c
index fa76773cac..eeaa9a569c 100644
--- a/erts/emulator/beam/erl_process_dump.c
+++ b/erts/emulator/beam/erl_process_dump.c
@@ -568,23 +568,21 @@ dump_externally(int to, void *to_arg, Eterm term)
}
}
-void erts_dump_process_state(int to, void *to_arg, erts_aint32_t psflg) {
- if (psflg & ERTS_PSFLG_FREE)
- erts_print(to, to_arg, "Non Existing\n"); /* Should never happen */
- else if (psflg & ERTS_PSFLG_EXITING)
- erts_print(to, to_arg, "Exiting\n");
- else if (psflg & ERTS_PSFLG_GC) {
- erts_print(to, to_arg, "Garbing\n");
- }
- else if (psflg & ERTS_PSFLG_SUSPENDED)
- erts_print(to, to_arg, "Suspended\n");
- else if (psflg & ERTS_PSFLG_RUNNING) {
- erts_print(to, to_arg, "Running\n");
+void erts_dump_process_state(int to, void *to_arg, erts_aint32_t psflg)
+{
+ char *s;
+ switch (erts_process_state2status(psflg)) {
+ case am_free: s = "Non Existing"; break; /* Should never happen */
+ case am_exiting: s = "Exiting"; break;
+ case am_garbage_collecting: s = "Garbing"; break;
+ case am_suspended: s = "Suspended"; break;
+ case am_running: s = "Running"; break;
+ case am_runnable: s = "Scheduled"; break;
+ case am_waiting: s = "Waiting"; break;
+ default: s = "Undefined"; break; /* Should never happen */
}
- else if (psflg & ERTS_PSFLG_ACTIVE)
- erts_print(to, to_arg, "Scheduled\n");
- else
- erts_print(to, to_arg, "Waiting\n");
+
+ erts_print(to, to_arg, "%s\n", s);
}
void
@@ -668,6 +666,10 @@ erts_dump_extended_process_state(int to, void *to_arg, erts_aint32_t psflg) {
erts_print(to, to_arg, "DIRTY_IO_PROC"); break;
case ERTS_PSFLG_DIRTY_ACTIVE_SYS:
erts_print(to, to_arg, "DIRTY_ACTIVE_SYS"); break;
+ case ERTS_PSFLG_DIRTY_RUNNING:
+ erts_print(to, to_arg, "DIRTY_RUNNING"); break;
+ case ERTS_PSFLG_DIRTY_RUNNING_SYS:
+ erts_print(to, to_arg, "DIRTY_RUNNING_SYS"); break;
default:
erts_print(to, to_arg, "UNKNOWN(%d)", chk); break;
}
diff --git a/erts/emulator/beam/erl_time_sup.c b/erts/emulator/beam/erl_time_sup.c
index fadbd704bd..9e37106b88 100644
--- a/erts/emulator/beam/erl_time_sup.c
+++ b/erts/emulator/beam/erl_time_sup.c
@@ -2348,7 +2348,7 @@ erts_napi_convert_time_unit(ErtsMonotonicTime val, int from, int to)
BIF_RETTYPE monotonic_time_0(BIF_ALIST_0)
{
ErtsMonotonicTime mtime = time_sup.r.o.get_time();
- update_last_mtime(ERTS_PROC_GET_SCHDATA(BIF_P), mtime);
+ update_last_mtime(erts_proc_sched_data(BIF_P), mtime);
mtime += ERTS_MONOTONIC_OFFSET_NATIVE;
BIF_RET(make_time_val(BIF_P, mtime));
}
@@ -2356,7 +2356,7 @@ BIF_RETTYPE monotonic_time_0(BIF_ALIST_0)
BIF_RETTYPE monotonic_time_1(BIF_ALIST_1)
{
ErtsMonotonicTime mtime = time_sup.r.o.get_time();
- update_last_mtime(ERTS_PROC_GET_SCHDATA(BIF_P), mtime);
+ update_last_mtime(erts_proc_sched_data(BIF_P), mtime);
BIF_RET(time_unit_conversion(BIF_P, BIF_ARG_1, mtime, 1));
}
@@ -2365,7 +2365,7 @@ BIF_RETTYPE system_time_0(BIF_ALIST_0)
ErtsMonotonicTime mtime, offset;
mtime = time_sup.r.o.get_time();
offset = get_time_offset();
- update_last_mtime(ERTS_PROC_GET_SCHDATA(BIF_P), mtime);
+ update_last_mtime(erts_proc_sched_data(BIF_P), mtime);
BIF_RET(make_time_val(BIF_P, mtime + offset));
}
@@ -2374,7 +2374,7 @@ BIF_RETTYPE system_time_1(BIF_ALIST_0)
ErtsMonotonicTime mtime, offset;
mtime = time_sup.r.o.get_time();
offset = get_time_offset();
- update_last_mtime(ERTS_PROC_GET_SCHDATA(BIF_P), mtime);
+ update_last_mtime(erts_proc_sched_data(BIF_P), mtime);
BIF_RET(time_unit_conversion(BIF_P, BIF_ARG_1, mtime + offset, 0));
}
@@ -2404,7 +2404,7 @@ BIF_RETTYPE timestamp_0(BIF_ALIST_0)
mtime = time_sup.r.o.get_time();
offset = get_time_offset();
- update_last_mtime(ERTS_PROC_GET_SCHDATA(BIF_P), mtime);
+ update_last_mtime(erts_proc_sched_data(BIF_P), mtime);
make_timestamp_value(&mega_sec, &sec, &micro_sec, mtime, offset);
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index 6d6373b706..ca001fc156 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -396,7 +396,7 @@ send_to_tracer_nif(Process *c_p, ErtsPTabElementCommon *t_p,
Eterm tag, Eterm msg, Eterm extra,
Eterm pam_result);
static ERTS_INLINE Eterm
-call_enabled_tracer(Process *c_p, const ErtsTracer tracer,
+call_enabled_tracer(const ErtsTracer tracer,
ErtsTracerNif **tnif_ref,
enum ErtsTracerOpt topt,
Eterm tag, Eterm t_p_id);
@@ -459,8 +459,7 @@ erts_set_system_seq_tracer(Process *c_p, ErtsProcLocks c_p_locks, ErtsTracer new
if (!ERTS_TRACER_IS_NIL(new)) {
Eterm nif_result = call_enabled_tracer(
- NULL, new, NULL,
- TRACE_FUN_ENABLED, am_trace_status, am_undefined);
+ new, NULL, TRACE_FUN_ENABLED, am_trace_status, am_undefined);
switch (nif_result) {
case am_trace: break;
default:
@@ -492,7 +491,7 @@ erts_get_system_seq_tracer(void)
erts_smp_rwmtx_runlock(&sys_trace_rwmtx);
if (st != erts_tracer_nil &&
- call_enabled_tracer(NULL, st, NULL, TRACE_FUN_ENABLED,
+ call_enabled_tracer(st, NULL, TRACE_FUN_ENABLED,
am_trace_status, am_undefined) == am_remove) {
erts_set_system_seq_tracer(NULL, 0, erts_tracer_nil);
st = erts_tracer_nil;
@@ -513,7 +512,7 @@ get_default_tracing(Uint *flagsp, ErtsTracer *tracerp,
*default_trace_flags &= ~TRACEE_FLAGS;
} else {
Eterm nif_res;
- nif_res = call_enabled_tracer(NULL, *default_tracer,
+ nif_res = call_enabled_tracer(*default_tracer,
NULL, TRACE_FUN_ENABLED,
am_trace_status, am_undefined);
switch (nif_res) {
@@ -915,8 +914,8 @@ seq_trace_update_send(Process *p)
ASSERT((is_tuple(SEQ_TRACE_TOKEN(p)) || is_nil(SEQ_TRACE_TOKEN(p))));
if (have_no_seqtrace(SEQ_TRACE_TOKEN(p)) ||
(seq_tracer != NIL &&
- call_enabled_tracer(NULL, seq_tracer, NULL,
- TRACE_FUN_ENABLED, am_trace_status,
+ call_enabled_tracer(seq_tracer, NULL,
+ TRACE_FUN_ENABLED, am_seq_trace,
p ? p->common.id : am_undefined) != am_trace)
#ifdef USE_VM_PROBES
|| (SEQ_TRACE_TOKEN(p) == am_have_dt_utag)
@@ -963,9 +962,9 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
ASSERT(is_tuple(token) || is_nil(token));
if (token == NIL || (process && ERTS_TRACE_FLAGS(process) & F_SENSITIVE) ||
ERTS_TRACER_IS_NIL(seq_tracer) ||
- call_enabled_tracer(NULL, seq_tracer,
+ call_enabled_tracer(seq_tracer,
NULL, TRACE_FUN_ENABLED,
- am_trace_status,
+ am_seq_trace,
process ? process->common.id : am_undefined) != am_trace) {
return;
}
@@ -1186,7 +1185,11 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
* use process flags
*/
tracee_flags = &ERTS_TRACE_FLAGS(p);
+ /* Is is not ideal at all to call this check twice,
+ it should be optimized so that only one call is made. */
if (!is_tracer_enabled(p, ERTS_PROC_LOCK_MAIN, &p->common, &tnif,
+ TRACE_FUN_ENABLED, am_trace_status)
+ || !is_tracer_enabled(p, ERTS_PROC_LOCK_MAIN, &p->common, &tnif,
TRACE_FUN_E_CALL, am_call)) {
return 0;
}
@@ -1202,13 +1205,21 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
}
meta_flags = F_TRACE_CALLS | F_NOW_TS;
tracee_flags = &meta_flags;
- switch (call_enabled_tracer(p, *tracer,
- &tnif, TRACE_FUN_T_CALL,
- am_call, p->common.id)) {
+ switch (call_enabled_tracer(*tracer,
+ &tnif, TRACE_FUN_ENABLED,
+ am_trace_status, p->common.id)) {
default:
case am_remove: *tracer = erts_tracer_nil;
case am_discard: return 0;
- case am_trace: break;
+ case am_trace:
+ switch (call_enabled_tracer(*tracer,
+ &tnif, TRACE_FUN_T_CALL,
+ am_call, p->common.id)) {
+ default:
+ case am_discard: return 0;
+ case am_trace: break;
+ }
+ break;
}
}
@@ -1346,9 +1357,9 @@ trace_proc(Process *c_p, ErtsProcLocks c_p_locks,
Process *t_p, Eterm what, Eterm data)
{
ErtsTracerNif *tnif = NULL;
- if (is_tracer_enabled(c_p, c_p_locks, &t_p->common, &tnif,
+ if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif,
TRACE_FUN_E_PROCS, what))
- send_to_tracer_nif(c_p, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_PROCS,
+ send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_PROCS,
what, data, THE_NON_VALUE, am_true);
}
@@ -1365,16 +1376,15 @@ trace_proc_spawn(Process *p, Eterm what, Eterm pid,
Eterm mod, Eterm func, Eterm args)
{
ErtsTracerNif *tnif = NULL;
- if (is_tracer_enabled(p, ERTS_PROC_LOCKS_ALL &
- ~(ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE),
- &p->common, &tnif, TRACE_FUN_E_PROCS, what)) {
+ if (is_tracer_enabled(NULL, 0,
+ &p->common, &tnif, TRACE_FUN_E_PROCS, what)) {
Eterm mfa;
Eterm* hp;
hp = HAlloc(p, 4);
mfa = TUPLE3(hp, mod, func, args);
hp += 4;
- send_to_tracer_nif(p, &p->common, p->common.id, tnif, TRACE_FUN_T_PROCS,
+ send_to_tracer_nif(NULL, &p->common, p->common.id, tnif, TRACE_FUN_T_PROCS,
what, pid, mfa, am_true);
}
}
@@ -2899,23 +2909,28 @@ send_to_tracer_nif(Process *c_p, ErtsPTabElementCommon *t_p,
}
static ERTS_INLINE Eterm
-call_enabled_tracer(Process *c_p, const ErtsTracer tracer,
+call_enabled_tracer(const ErtsTracer tracer,
ErtsTracerNif **tnif_ret,
enum ErtsTracerOpt topt,
Eterm tag, Eterm t_p_id) {
ErtsTracerNif *tnif = lookup_tracer_nif(tracer);
if (tnif) {
- Eterm argv[] = {tag, ERTS_TRACER_STATE(tracer), t_p_id};
+ Eterm argv[] = {tag, ERTS_TRACER_STATE(tracer), t_p_id},
+ ret;
topt = (tnif->tracers[topt].cb) ? topt : TRACE_FUN_ENABLED;
ASSERT(topt < NIF_TRACER_TYPES);
ASSERT(tnif->tracers[topt].cb != NULL);
if (tnif_ret) *tnif_ret = tnif;
- return erts_nif_call_function(c_p, NULL, tnif->nif_mod,
- tnif->tracers[topt].cb,
- tnif->tracers[topt].arity,
- argv);
+ ret = erts_nif_call_function(NULL, NULL, tnif->nif_mod,
+ tnif->tracers[topt].cb,
+ tnif->tracers[topt].arity,
+ argv);
+ if (tag == am_trace_status && ret != am_remove)
+ return am_trace;
+ ASSERT(tag == am_trace_status || ret != am_remove);
+ return ret;
}
- return am_remove;
+ return tag == am_trace_status ? am_remove : am_discard;
}
static int
@@ -2940,12 +2955,12 @@ is_tracer_enabled(Process* c_p, ErtsProcLocks c_p_locks,
}
#endif
- nif_result = call_enabled_tracer(c_p, t_p->tracer, tnif_ret, topt, tag, t_p->id);
+ nif_result = call_enabled_tracer(t_p->tracer, tnif_ret, topt, tag, t_p->id);
switch (nif_result) {
case am_discard: return 0;
case am_trace: return 1;
case THE_NON_VALUE:
- case am_remove: break;
+ case am_remove: ASSERT(tag == am_trace_status); break;
default:
/* only am_remove should be returned, but if
something else is returned we fall-through
@@ -2976,19 +2991,14 @@ is_tracer_enabled(Process* c_p, ErtsProcLocks c_p_locks,
return 0;
}
-int erts_is_tracer_proc_enabled(Process* c_p, ErtsProcLocks c_p_locks,
- ErtsPTabElementCommon *t_p, Eterm type)
-{
- return is_tracer_enabled(c_p, c_p_locks, t_p, NULL, TRACE_FUN_ENABLED, am_trace_status);
-}
-
-int erts_is_tracer_enabled(Process *c_p, const ErtsTracer tracer)
+int erts_is_tracer_enabled(const ErtsTracer tracer, ErtsPTabElementCommon *t_p)
{
ErtsTracerNif *tnif = lookup_tracer_nif(tracer);
if (tnif) {
- Eterm nif_result = call_enabled_tracer(c_p, tracer, &tnif,
- TRACE_FUN_ENABLED, am_trace_status,
- c_p->common.id);
+ Eterm nif_result = call_enabled_tracer(tracer, &tnif,
+ TRACE_FUN_ENABLED,
+ am_trace_status,
+ t_p->id);
switch (nif_result) {
case am_discard:
case am_trace: return 1;
@@ -2999,6 +3009,20 @@ int erts_is_tracer_enabled(Process *c_p, const ErtsTracer tracer)
return 0;
}
+int erts_is_tracer_proc_enabled(Process* c_p, ErtsProcLocks c_p_locks,
+ ErtsPTabElementCommon *t_p)
+{
+ return is_tracer_enabled(c_p, c_p_locks, t_p, NULL, TRACE_FUN_ENABLED,
+ am_trace_status);
+}
+
+int erts_is_tracer_proc_enabled_send(Process* c_p, ErtsProcLocks c_p_locks,
+ ErtsPTabElementCommon *t_p)
+{
+ return is_tracer_enabled(c_p, c_p_locks, t_p, NULL, TRACE_FUN_T_SEND, am_send);
+}
+
+
void erts_tracer_replace(ErtsPTabElementCommon *t_p, const ErtsTracer tracer)
{
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
diff --git a/erts/emulator/beam/erl_trace.h b/erts/emulator/beam/erl_trace.h
index fd5879bc9d..0095d4386b 100644
--- a/erts/emulator/beam/erl_trace.h
+++ b/erts/emulator/beam/erl_trace.h
@@ -193,8 +193,10 @@ int erts_finish_breakpointing(void);
/* Nif tracer functions */
int erts_is_tracer_proc_enabled(Process *c_p, ErtsProcLocks c_p_locks,
- ErtsPTabElementCommon *t_p, Eterm type);
-int erts_is_tracer_enabled(Process *c_p, const ErtsTracer tracer);
+ ErtsPTabElementCommon *t_p);
+int erts_is_tracer_proc_enabled_send(Process* c_p, ErtsProcLocks c_p_locks,
+ ErtsPTabElementCommon *t_p);
+int erts_is_tracer_enabled(const ErtsTracer tracer, ErtsPTabElementCommon *t_p);
Eterm erts_tracer_to_term(Process *p, ErtsTracer tracer);
ErtsTracer erts_term_to_tracer(Eterm prefix, Eterm term);
void erts_tracer_replace(ErtsPTabElementCommon *t_p,
@@ -224,9 +226,4 @@ ERTS_DECLARE_DUMMY(erts_tracer_nil) = NIL;
#define ERTS_TRACER_FROM_ETERM(termp) \
((ErtsTracer*)(termp))
-#define ERTS_TRACER_PROC_IS_ENABLED(PROC) \
- (!ERTS_TRACER_IS_NIL(ERTS_TRACER(PROC)) \
- && erts_is_tracer_proc_enabled(PROC, ERTS_PROC_LOCK_MAIN, \
- &(PROC)->common, am_trace_status))
-
#endif /* ERL_TRACE_H__ */
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 550db95ba8..1abcc6cbf4 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -57,6 +57,7 @@ struct enif_environment_t /* ErlNifEnv */
struct enif_tmp_obj_t* tmp_obj_list;
int exception_thrown; /* boolean */
Process *tracee;
+ int exiting; /* boolean (dirty nifs might return in exiting state) */
};
extern void erts_pre_nif(struct enif_environment_t*, Process*,
struct erl_module_nif*, Process* tracee);
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index b9f6ab04c6..0377f6cb5e 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -2512,7 +2512,7 @@ erts_port_output(Process *c_p,
sigdp->flags &= ~ERTS_P2P_SIG_DATA_FLG_NOSUSPEND;
else if (async_nosuspend) {
ErtsSchedulerData *esdp = (c_p
- ? ERTS_PROC_GET_SCHDATA(c_p)
+ ? erts_proc_sched_data(c_p)
: erts_get_scheduler_data());
ASSERT(esdp);
ns_pthp = &esdp->nosuspend_port_task_handle;
@@ -5140,7 +5140,7 @@ erts_request_io_bytes(Process *c_p)
Uint *hp;
Eterm ref;
Uint32 *refn;
- ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p);
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
ErtsIOBytesReq *req = erts_alloc(ERTS_ALC_T_IOB_REQ,
sizeof(ErtsIOBytesReq));
@@ -5941,7 +5941,7 @@ driver_deliver_term(Port *prt, Eterm to, ErlDrvTermData* data, int len)
if (!rp) {
if (!prt || !IS_TRACED_FL(prt, F_TRACE_SEND))
goto done;
- if (!erts_is_tracer_proc_enabled(NULL, 0, &prt->common, am_send))
+ if (!erts_is_tracer_proc_enabled_send(NULL, 0, &prt->common))
goto done;
res = -2;
@@ -7881,11 +7881,13 @@ driver_system_info(ErlDrvSysInfo *sip, size_t si_size)
* (driver version 3.1, NIF version 2.7)
*/
if (si_size >= ERL_DRV_SYS_INFO_SIZE(dirty_scheduler_support)) {
-#if defined(ERL_NIF_DIRTY_SCHEDULER_SUPPORT) && defined(USE_THREADS)
- sip->dirty_scheduler_support = 1;
+ sip->dirty_scheduler_support =
+#ifdef ERTS_DIRTY_SCHEDULERS
+ 1
#else
- sip->dirty_scheduler_support = 0;
+ 0
#endif
+ ;
}
}
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index dda3ba565c..f303d4f167 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -34,6 +34,10 @@
(((__GNUC__ << 24) | (__GNUC_MINOR__ << 12) | __GNUC_PATCHLEVEL__) >= (((MAJ) << 24) | ((MIN) << 12) | (PL)))
#endif
+#if defined(ERTS_DIRTY_SCHEDULERS) && !defined(ERTS_SMP)
+# error "Dirty schedulers not supported without smp support"
+#endif
+
#ifdef ERTS_INLINE
# ifndef ERTS_CAN_INLINE
# define ERTS_CAN_INLINE 1