aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam/erl_process.c
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam/erl_process.c')
-rw-r--r--erts/emulator/beam/erl_process.c690
1 files changed, 507 insertions, 183 deletions
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 66f22979ad..54724fd62d 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2016. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2017. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -154,6 +154,7 @@ int ERTS_WRITE_UNLIKELY(erts_eager_check_io) = 1;
int ERTS_WRITE_UNLIKELY(erts_sched_compact_load);
int ERTS_WRITE_UNLIKELY(erts_sched_balance_util) = 0;
Uint ERTS_WRITE_UNLIKELY(erts_no_schedulers);
+Uint ERTS_WRITE_UNLIKELY(erts_no_total_schedulers);
Uint ERTS_WRITE_UNLIKELY(erts_no_dirty_cpu_schedulers) = 0;
Uint ERTS_WRITE_UNLIKELY(erts_no_dirty_io_schedulers) = 0;
@@ -448,6 +449,9 @@ int erts_system_profile_ts_type = ERTS_TRACE_FLG_NOW_TIMESTAMP;
typedef enum {
ERTS_PSTT_GC, /* Garbage Collect */
ERTS_PSTT_CPC, /* Check Process Code */
+#ifdef ERTS_NEW_PURGE_STRATEGY
+ ERTS_PSTT_CLA, /* Copy Literal Area */
+#endif
ERTS_PSTT_COHMQ, /* Change off heap message queue */
ERTS_PSTT_FTMQ /* Flush trace msg queue */
} ErtsProcSysTaskType;
@@ -554,8 +558,8 @@ do { \
*/
static void exec_misc_ops(ErtsRunQueue *);
-static void print_function_from_pc(int to, void *to_arg, BeamInstr* x);
-static int stack_element_dump(int to, void *to_arg, Eterm* sp, int yreg);
+static void print_function_from_pc(fmtfn_t to, void *to_arg, BeamInstr* x);
+static int stack_element_dump(fmtfn_t to, void *to_arg, Eterm* sp, int yreg);
static void aux_work_timeout(void *unused);
static void aux_work_timeout_early_init(int no_schedulers);
@@ -2056,6 +2060,7 @@ handle_thr_prgr_later_op(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int wait
#endif
for (lops = 0; lops < ERTS_MAX_THR_PRGR_LATER_OPS; lops++) {
ErtsThrPrgrLaterOp *lop = awdp->later_op.first;
+
if (!erts_thr_progress_has_reached_this(current, lop->later))
return aux_work & ~ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP;
awdp->later_op.first = lop->next;
@@ -5905,9 +5910,12 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
n = (int) no_schedulers;
erts_no_schedulers = n;
+ erts_no_total_schedulers = n;
#ifdef ERTS_DIRTY_SCHEDULERS
erts_no_dirty_cpu_schedulers = no_dirty_cpu_schedulers;
+ erts_no_total_schedulers += no_dirty_cpu_schedulers;
erts_no_dirty_io_schedulers = no_dirty_io_schedulers;
+ erts_no_total_schedulers += no_dirty_io_schedulers;
#endif
/* Create and initialize scheduler sleep info */
@@ -6227,6 +6235,12 @@ check_dirty_enqueue_in_prio_queue(Process *c_p,
int queue;
erts_aint32_t dact, max_qbit;
+ /* Do not enqueue free process... */
+ if (actual & ERTS_PSFLG_FREE) {
+ *newp &= ~ERTS_PSFLGS_DIRTY_WORK;
+ return ERTS_ENQUEUE_NOT;
+ }
+
/* Termination should be done on an ordinary scheduler */
if ((*newp) & ERTS_PSFLG_EXITING) {
*newp &= ~ERTS_PSFLGS_DIRTY_WORK;
@@ -6268,7 +6282,11 @@ check_dirty_enqueue_in_prio_queue(Process *c_p,
return -1*queue;
}
- *newp |= ERTS_PSFLG_IN_RUNQ;
+ /*
+ * Enqueue using process struct.
+ */
+ *newp &= ~ERTS_PSFLGS_PRQ_PRIO_MASK;
+ *newp |= ERTS_PSFLG_IN_RUNQ | (aprio << ERTS_PSFLGS_PRQ_PRIO_OFFSET);
return queue;
}
@@ -6415,6 +6433,9 @@ select_enqueue_run_queue(int enqueue, int enq_prio, Process *p, erts_aint32_t st
/*
* schedule_out_process() return with c_rq locked.
+ *
+ * Return non-zero value if caller should decrease
+ * reference count on the process when done with it...
*/
static ERTS_INLINE int
schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
@@ -6469,12 +6490,18 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
erts_smp_runq_lock(c_rq);
- return 0;
-
+#if !defined(ERTS_SMP)
+ /* Decrement refc if process struct is free... */
+ return !!(n & ERTS_PSFLG_FREE);
+#else
+ /* Decrement refc if scheduled out from dirty scheduler... */
+ return !is_normal_sched;
+#endif
}
else {
Process* sched_p;
+ ASSERT(!(n & ERTS_PSFLG_FREE));
ASSERT(!(n & ERTS_PSFLG_SUSPENDED) || (n & (ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_DIRTY_ACTIVE_SYS)));
@@ -6490,11 +6517,14 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
erts_smp_runq_lock(runq);
+ if (is_normal_sched && sched_p == p && ERTS_RUNQ_IX_IS_DIRTY(runq->ix))
+ erts_proc_inc_refc(p); /* Needs to be done before enqueue_process() */
+
/* Enqueue the process */
enqueue_process(runq, (int) enq_prio, sched_p);
if (runq == c_rq)
- return 1;
+ return 0;
erts_smp_runq_unlock(runq);
@@ -6502,9 +6532,15 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
erts_smp_runq_lock(c_rq);
- return 1;
+ /*
+ * Decrement refc if process is scheduled out by a
+ * dirty scheduler, and we have not just scheduled
+ * the process using the ordinary process struct
+ * on a dirty run-queue again...
+ */
+ return !is_normal_sched && (sched_p != p
+ || !ERTS_RUNQ_IX_IS_DIRTY(runq->ix));
}
-
}
static ERTS_INLINE void
@@ -6519,8 +6555,17 @@ add2runq(int enqueue, erts_aint32_t prio,
if (runq) {
Process *sched_p;
- if (enqueue > 0)
+ if (enqueue > 0) {
sched_p = proc;
+ /*
+ * Refc on process struct (i.e. true struct,
+ * not proxy-struct) increased while in a
+ * dirty run-queue or executing on a dirty
+ * scheduler.
+ */
+ if (ERTS_RUNQ_IX_IS_DIRTY(runq->ix))
+ erts_proc_inc_refc(proc);
+ }
else {
Process *pxy;
@@ -6671,15 +6716,18 @@ erts_schedule_process(Process *p, erts_aint32_t state, ErtsProcLocks locks)
}
static int
-schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st)
+schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st,
+ erts_aint32_t *fail_state_p)
{
int res;
int locked;
ErtsProcSysTaskQs *stqs, *free_stqs;
- erts_aint32_t state, a, n, enq_prio;
+ erts_aint32_t fail_state, state, a, n, enq_prio;
int enqueue; /* < 0 -> use proxy */
unsigned int prof_runnable_procs;
+ fail_state = *fail_state_p;
+
res = 1; /* prepare for success */
st->next = st->prev = st; /* Prep for empty prio queue */
state = erts_smp_atomic32_read_nob(&p->state);
@@ -6705,7 +6753,8 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st)
erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
state = erts_smp_atomic32_read_nob(&p->state);
- if (state & ERTS_PSFLG_EXITING) {
+ if (state & fail_state) {
+ *fail_state_p = (state & fail_state);
free_stqs = stqs;
res = 0;
goto cleanup;
@@ -7040,12 +7089,18 @@ typedef struct {
} ErtsSchdlrSspndResume;
static void
-schdlr_sspnd_resume_proc(Eterm pid)
+schdlr_sspnd_resume_proc(ErtsSchedType sched_type, Eterm pid)
{
- Process *p = erts_pid2proc(NULL, 0, pid, ERTS_PROC_LOCK_STATUS);
+ Process *p;
+ p = erts_pid2proc_opt(NULL, 0, pid, ERTS_PROC_LOCK_STATUS,
+ (sched_type != ERTS_SCHED_NORMAL
+ ? ERTS_P2P_FLG_INC_REFC
+ : 0));
if (p) {
resume_process(p, ERTS_PROC_LOCK_STATUS);
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ if (sched_type != ERTS_SCHED_NORMAL)
+ erts_proc_dec_refc(p);
}
}
@@ -7054,17 +7109,20 @@ schdlr_sspnd_resume_procs(ErtsSchedType sched_type,
ErtsSchdlrSspndResume *resume)
{
if (is_internal_pid(resume->onln.chngr)) {
- schdlr_sspnd_resume_proc(resume->onln.chngr);
+ schdlr_sspnd_resume_proc(sched_type,
+ resume->onln.chngr);
resume->onln.chngr = NIL;
}
if (is_internal_pid(resume->onln.nxt)) {
- schdlr_sspnd_resume_proc(resume->onln.nxt);
+ schdlr_sspnd_resume_proc(sched_type,
+ resume->onln.nxt);
resume->onln.nxt = NIL;
}
while (resume->msb.chngrs) {
ErtsProcList *plp = resume->msb.chngrs;
resume->msb.chngrs = plp->next;
- schdlr_sspnd_resume_proc(plp->pid);
+ schdlr_sspnd_resume_proc(sched_type,
+ plp->pid);
proclist_destroy(plp);
}
}
@@ -7116,16 +7174,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
ASSERT(sched_type != ERTS_SCHED_NORMAL || no != 1);
- if (sched_type != ERTS_SCHED_NORMAL) {
- if (erts_smp_mtx_trylock(&schdlr_sspnd.mtx) == EBUSY) {
- erts_smp_runq_unlock(esdp->run_queue);
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- erts_smp_runq_lock(esdp->run_queue);
- }
- if (schdlr_sspnd.msb.ongoing)
- evacuate_run_queue(esdp->run_queue, &sbp);
+ if (sched_type != ERTS_SCHED_NORMAL)
erts_smp_runq_unlock(esdp->run_queue);
- }
else {
evacuate_run_queue(esdp->run_queue, &sbp);
@@ -7138,8 +7188,8 @@ suspend_scheduler(ErtsSchedulerData *esdp)
sched_wall_time_change(esdp, 0);
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
}
+ erts_smp_mtx_lock(&schdlr_sspnd.mtx);
flgs = sched_prep_spin_suspended(ssi, ERTS_SSI_FLG_SUSPENDED);
if (flgs & ERTS_SSI_FLG_SUSPENDED) {
@@ -7183,15 +7233,18 @@ suspend_scheduler(ErtsSchedulerData *esdp)
(void) erts_proclist_fetch(&msb[i]->chngq, &end_plp);
/* resume processes that initiated the multi scheduling block... */
plp = msb[i]->chngq;
- while (plp) {
- erts_proclist_store_last(&msb[i]->blckrs,
- proclist_copy(plp));
- plp = plp->next;
- }
- if (end_plp)
+ if (plp) {
+ ASSERT(end_plp);
+ ASSERT(msb[i]->ongoing);
+ do {
+ erts_proclist_store_last(&msb[i]->blckrs,
+ proclist_copy(plp));
+ plp = plp->next;
+ } while (plp);
end_plp->next = resume.msb.chngrs;
- resume.msb.chngrs = msb[i]->chngq;
- msb[i]->chngq = NULL;
+ resume.msb.chngrs = msb[i]->chngq;
+ msb[i]->chngq = NULL;
+ }
}
}
}
@@ -7255,24 +7308,14 @@ suspend_scheduler(ErtsSchedulerData *esdp)
while (1) {
ErtsMonotonicTime current_time;
- erts_aint32_t qmask;
erts_aint32_t flgs;
- qmask = (ERTS_RUNQ_FLGS_GET(esdp->run_queue)
- & ERTS_RUNQ_FLGS_QMASK);
-
- if (sched_type != ERTS_SCHED_NORMAL) {
- if (qmask) {
- erts_smp_mtx_lock(&schdlr_sspnd.mtx);
- erts_smp_runq_lock(esdp->run_queue);
- if (schdlr_sspnd.msb.ongoing)
- evacuate_run_queue(esdp->run_queue, &sbp);
- erts_smp_runq_unlock(esdp->run_queue);
- erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
- }
+ if (sched_type != ERTS_SCHED_NORMAL)
aux_work = 0;
- }
else {
+ erts_aint32_t qmask;
+ qmask = (ERTS_RUNQ_FLGS_GET(esdp->run_queue)
+ & ERTS_RUNQ_FLGS_QMASK);
aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
@@ -7400,12 +7443,23 @@ suspend_scheduler(ErtsSchedulerData *esdp)
schdlr_sspnd_inc_nscheds(&schdlr_sspnd.active, sched_type);
changing = erts_smp_atomic32_read_nob(&schdlr_sspnd.changing);
- if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
- && schdlr_sspnd.online == schdlr_sspnd.active) {
- erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
- ~ERTS_SCHDLR_SSPND_CHNG_MSB);
- }
-
+ if (changing) {
+ if ((changing & ERTS_SCHDLR_SSPND_CHNG_MSB)
+ && !schdlr_sspnd.msb.ongoing
+ && schdlr_sspnd.online == schdlr_sspnd.active) {
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_MSB);
+ }
+ if ((changing & ERTS_SCHDLR_SSPND_CHNG_NMSB)
+ && !schdlr_sspnd.nmsb.ongoing
+ && (schdlr_sspnd_get_nscheds(&schdlr_sspnd.online,
+ ERTS_SCHED_NORMAL)
+ == schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
+ ERTS_SCHED_NORMAL))) {
+ erts_smp_atomic32_read_band_nob(&schdlr_sspnd.changing,
+ ~ERTS_SCHDLR_SSPND_CHNG_NMSB);
+ }
+ }
ASSERT(no <= schdlr_sspnd_get_nscheds(&schdlr_sspnd.online, sched_type));
ASSERT((sched_type == ERTS_SCHED_NORMAL
? !(schdlr_sspnd.msb.ongoing|schdlr_sspnd.nmsb.ongoing)
@@ -7538,7 +7592,7 @@ abort_sched_onln_chng_waitq(Process *p)
erts_smp_mtx_unlock(&schdlr_sspnd.mtx);
if (is_internal_pid(resume))
- schdlr_sspnd_resume_proc(resume);
+ schdlr_sspnd_resume_proc(ERTS_SCHED_NORMAL, resume);
}
ErtsSchedSuspendResult
@@ -7936,21 +7990,20 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
}
else { /* ------ UNBLOCK ------ */
if (p->flags & have_blckd_flg) {
- ErtsProcList *plps[2];
+ ErtsProcList **plpps[3] = {0};
ErtsProcList *plp;
- int limit = 0;
- plps[limit++] = erts_proclist_peek_first(msbp->blckrs);
- if (all)
- plps[limit++] = erts_proclist_peek_first(msbp->chngq);
+ plpps[0] = &msbp->blckrs;
+ if (all)
+ plpps[1] = &msbp->chngq;
- for (ix = 0; ix < limit; ix++) {
- plp = plps[ix];
+ for (ix = 0; plpps[ix]; ix++) {
+ plp = erts_proclist_peek_first(*plpps[ix]);
while (plp) {
ErtsProcList *tmp_plp = plp;
- plp = erts_proclist_peek_next(msbp->blckrs, plp);
+ plp = erts_proclist_peek_next(*plpps[ix], plp);
if (erts_proclist_same(tmp_plp, p)) {
- erts_proclist_remove(&msbp->blckrs, tmp_plp);
+ erts_proclist_remove(plpps[ix], tmp_plp);
proclist_destroy(tmp_plp);
if (!all)
break;
@@ -8177,6 +8230,8 @@ sched_dirty_cpu_thread_func(void *vesdp)
esdp->thr_id += erts_no_schedulers;
+ erts_msacc_init_thread("dirty_cpu_scheduler", no, 0);
+
erts_thr_progress_register_unmanaged_thread(&callbacks);
#ifdef ERTS_ENABLE_LOCK_CHECK
{
@@ -8222,6 +8277,8 @@ sched_dirty_io_thread_func(void *vesdp)
esdp->thr_id += erts_no_schedulers + erts_no_dirty_cpu_schedulers;
+ erts_msacc_init_thread("dirty_io_scheduler", no, 0);
+
erts_thr_progress_register_unmanaged_thread(&callbacks);
#ifdef ERTS_ENABLE_LOCK_CHECK
{
@@ -8585,8 +8642,15 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
* from being selected for normal execution regardless
* of locks held or not held on it...
*/
- ASSERT(!((ERTS_PSFLG_RUNNING|ERTS_PSFLG_DIRTY_RUNNING_SYS)
- & erts_smp_atomic32_read_nob(&rp->state)));
+#ifdef DEBUG
+ {
+ erts_aint32_t state;
+ state = erts_smp_atomic32_read_nob(&rp->state);
+ ASSERT((state & ERTS_PSFLG_PENDING_EXIT)
+ || !(state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)));
+ }
+#endif
if (!suspend)
resume_process(rp, pid_locks|ERTS_PROC_LOCK_STATUS);
@@ -9088,6 +9152,27 @@ resume_process_1(BIF_ALIST_1)
BIF_ERROR(BIF_P, BADARG);
}
+BIF_RETTYPE
+erts_internal_is_process_executing_dirty_1(BIF_ALIST_1)
+{
+ if (is_not_internal_pid(BIF_ARG_1))
+ BIF_ERROR(BIF_P, BADARG);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else {
+ Process *rp = erts_proc_lookup(BIF_ARG_1);
+ if (rp) {
+ erts_aint32_t state = erts_smp_atomic32_read_nob(&rp->state);
+ if (state & (ERTS_PSFLG_DIRTY_RUNNING
+ |ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ BIF_RET(am_true);
+ }
+ }
+ }
+#endif
+ BIF_RET(am_false);
+}
+
+
Uint
erts_run_queues_len(Uint *qlen, int atomic_queues_read, int incl_active_sched)
{
@@ -9364,6 +9449,14 @@ erts_set_process_priority(Process *p, Eterm value)
}
}
+#ifdef __WIN32__
+Sint64
+erts_time2reds(ErtsMonotonicTime start, ErtsMonotonicTime end)
+{
+ return ERTS_TIME2REDS_IMPL__(start, end);
+}
+#endif
+
static int
scheduler_gc_proc(Process *c_p, int reds_left)
{
@@ -9494,15 +9587,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
p->reds += actual_reds;
-#ifdef ERTS_SMP
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_TRACE);
- if (p->trace_msg_q) {
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_TRACE);
- erts_schedule_flush_trace_messages(p->common.id);
- } else
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_TRACE);
-#endif
-
state = erts_smp_atomic32_read_nob(&p->state);
if (IS_TRACED(p)) {
@@ -9522,7 +9606,15 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
}
}
- erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+
+#ifdef ERTS_SMP
+ if (p->trace_msg_q) {
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ erts_schedule_flush_trace_messages(p, 1);
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ }
+#endif
/* have to re-read state after taking lock */
state = erts_smp_atomic32_read_nob(&p->state);
@@ -9530,46 +9622,56 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
#ifdef ERTS_SMP
if (is_normal_sched && (state & ERTS_PSFLG_PENDING_EXIT))
erts_handle_pending_exit(p, (ERTS_PROC_LOCK_MAIN
+ | ERTS_PROC_LOCK_TRACE
| ERTS_PROC_LOCK_STATUS));
if (p->pending_suspenders)
handle_pending_suspend(p, (ERTS_PROC_LOCK_MAIN
+ | ERTS_PROC_LOCK_TRACE
| ERTS_PROC_LOCK_STATUS));
#endif
esdp->reductions += reds;
- /* schedule_out_process() returns with rq locked! */
- schedule_out_process(rq, state, p, proxy_p, is_normal_sched);
- proxy_p = NULL;
+ {
+ int dec_refc;
- ERTS_PROC_REDUCTIONS_EXECUTED(esdp, rq,
- (int) ERTS_PSFLGS_GET_USR_PRIO(state),
- reds,
- actual_reds);
+ /* schedule_out_process() returns with rq locked! */
+ dec_refc = schedule_out_process(rq, state, p,
+ proxy_p, is_normal_sched);
+ proxy_p = NULL;
- esdp->current_process = NULL;
+ ERTS_PROC_REDUCTIONS_EXECUTED(esdp, rq,
+ (int) ERTS_PSFLGS_GET_USR_PRIO(state),
+ reds,
+ actual_reds);
+
+ esdp->current_process = NULL;
#ifdef ERTS_SMP
- p->scheduler_data = NULL;
+ if (is_normal_sched)
+ p->scheduler_data = NULL;
#endif
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
+ erts_smp_proc_unlock(p, (ERTS_PROC_LOCK_MAIN
+ | ERTS_PROC_LOCK_STATUS
+ | ERTS_PROC_LOCK_TRACE));
- ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_OTHER);
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_OTHER);
- if (state & ERTS_PSFLG_FREE) {
- if (!is_normal_sched) {
- ASSERT(p->flags & F_DELAYED_DEL_PROC);
- erts_proc_dec_refc(p);
- }
- else {
#ifdef ERTS_SMP
- ASSERT(esdp->free_process == p);
- esdp->free_process = NULL;
-#else
- erts_proc_dec_refc(p);
+ if (state & ERTS_PSFLG_FREE) {
+ if (!is_normal_sched) {
+ ASSERT(p->flags & F_DELAYED_DEL_PROC);
+ }
+ else {
+ ASSERT(esdp->free_process == p);
+ esdp->free_process = NULL;
+ }
+ }
#endif
- }
- }
+
+ if (dec_refc)
+ erts_proc_dec_refc(p);
+ }
#ifdef ERTS_SMP
ASSERT(!esdp->free_process);
@@ -9588,8 +9690,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
erts_smp_runq_lock(rq);
}
}
- BM_STOP_TIMER(system);
-
}
ERTS_SMP_LC_ASSERT(!is_normal_sched || !erts_thr_progress_is_blocking());
@@ -9825,10 +9925,6 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
goto check_activities_to_run;
}
- ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_EMULATOR);
-
- BM_START_TIMER(system);
-
/*
* Take the chosen process out of the queue.
*/
@@ -9851,8 +9947,12 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
if (!(state & ERTS_PSFLG_PROXY))
psflg_band_mask &= ~ERTS_PSFLG_IN_RUNQ;
else {
+ Eterm pid = p->common.id;
proxy_p = p;
- p = erts_proc_lookup_raw(proxy_p->common.id);
+ p = (is_normal_sched
+ ? erts_proc_lookup_raw(pid)
+ : erts_pid2proc_opt(NULL, 0, pid, 0,
+ ERTS_P2P_FLG_INC_REFC));
if (!p) {
free_proxy_proc(proxy_p);
proxy_p = NULL;
@@ -9884,6 +9984,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
| ERTS_PSFLG_FREE)))
#ifdef ERTS_DIRTY_SCHEDULERS
| (((state & (ERTS_PSFLG_RUNNING
+
| ERTS_PSFLG_FREE
| ERTS_PSFLG_RUNNING_SYS
| ERTS_PSFLG_EXITING))
@@ -9916,6 +10017,8 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
/* free and not queued by proxy */
erts_proc_dec_refc(p);
}
+ if (!is_normal_sched)
+ erts_proc_dec_refc(p);
goto pick_next_process;
}
state = new;
@@ -9934,6 +10037,8 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
}
+ ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_EMULATOR);
+
#ifdef ERTS_SMP
if (flags & ERTS_RUNQ_FLG_PROTECTED)
@@ -9966,8 +10071,8 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
state = erts_smp_atomic32_read_nob(&p->state);
- ASSERT(!p->scheduler_data);
#ifndef ERTS_DIRTY_SCHEDULERS
+ ASSERT(!p->scheduler_data);
p->scheduler_data = esdp;
#else /* ERTS_DIRTY_SCHEDULERS */
if (is_normal_sched) {
@@ -9978,12 +10083,18 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
goto sched_out_proc;
}
+ ASSERT(!p->scheduler_data);
p->scheduler_data = esdp;
}
else {
if (state & (ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_PENDING_EXIT
| ERTS_PSFLG_EXITING)) {
+ /*
+ * IMPORTANT! We need to take care of
+ * scheduled check-process-code requests
+ * before continuing with dirty execution!
+ */
/* Migrate to normal scheduler... */
goto sunlock_sched_out_proc;
}
@@ -10365,7 +10476,8 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
reds--;
}
else {
- if (!garbage_collected) {
+ if (!garbage_collected
+ && !(c_p->flags & F_HIBERNATED)) {
FLAGS(c_p) |= F_NEED_FULLSWEEP;
reds -= scheduler_gc_proc(c_p, reds);
garbage_collected = 1;
@@ -10393,6 +10505,27 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
}
break;
}
+#ifdef ERTS_NEW_PURGE_STRATEGY
+ case ERTS_PSTT_CLA: {
+ int fcalls;
+ int cla_reds = 0;
+ if (!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
+ fcalls = reds;
+ else
+ fcalls = reds - CONTEXT_REDS;
+ st_res = erts_proc_copy_literal_area(c_p,
+ &cla_reds,
+ fcalls,
+ st->arg[0] == am_true);
+ reds -= cla_reds;
+ if (is_non_value(st_res)) {
+ /* Needed gc, but gc was disabled */
+ save_gc_task(c_p, st, st_prio);
+ st = NULL;
+ }
+ break;
+ }
+#endif
case ERTS_PSTT_COHMQ:
reds -= erts_complete_off_heap_message_queue_change(c_p);
st_res = am_true;
@@ -10443,14 +10576,15 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds)
switch (st->type) {
case ERTS_PSTT_GC:
- st_res = am_false;
- break;
case ERTS_PSTT_CPC:
- st_res = am_false;
- break;
case ERTS_PSTT_COHMQ:
st_res = am_false;
break;
+#ifdef ERTS_NEW_PURGE_STRATEGY
+ case ERTS_PSTT_CLA:
+ st_res = am_ok;
+ break;
+#endif
#ifdef ERTS_SMP
case ERTS_PSTT_FTMQ:
reds -= erts_flush_trace_messages(c_p, ERTS_PROC_LOCK_MAIN);
@@ -10471,22 +10605,83 @@ cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds)
return reds;
}
-BIF_RETTYPE
-erts_internal_request_system_task_3(BIF_ALIST_3)
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+static BIF_RETTYPE
+dispatch_system_task(Process *c_p, erts_aint_t fail_state,
+ ErtsProcSysTask *st, Eterm target,
+ Eterm prio, Eterm operation)
+{
+ Process *rp;
+ ErtsProcLocks rp_locks = 0;
+ ErlOffHeap *ohp;
+ ErtsMessage *mp;
+ Eterm *hp, msg;
+ Uint hsz, osz;
+ BIF_RETTYPE ret;
+
+ switch (st->type) {
+ case ERTS_PSTT_CPC:
+ rp = erts_dirty_process_code_checker;
+ ASSERT(fail_state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS));
+ if (c_p == rp) {
+ ERTS_BIF_PREP_RET(ret, am_dirty_execution);
+ return ret;
+ }
+ break;
+ default:
+ rp = NULL;
+ ERTS_INTERNAL_ERROR("Non-dispatchable system task");
+ break;
+ }
+
+ ERTS_BIF_PREP_RET(ret, am_ok);
+
+ /*
+ * Send message on the form: {Requester, Target, Operation}
+ */
+
+ ASSERT(is_immed(st->requester));
+ ASSERT(is_immed(target));
+ ASSERT(is_immed(prio));
+
+ osz = size_object(operation);
+ hsz = 5 /* 4-tuple */ + osz;
+
+ mp = erts_alloc_message_heap(rp, &rp_locks, hsz, &hp, &ohp);
+
+ msg = copy_struct(operation, osz, &hp, ohp);
+ msg = TUPLE4(hp, st->requester, target, prio, msg);
+
+ erts_queue_message(rp, rp_locks, mp, msg, st->requester);
+
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+
+ return ret;
+}
+
+#endif
+
+static BIF_RETTYPE
+request_system_task(Process *c_p, Eterm requester, Eterm target,
+ Eterm priority, Eterm operation)
{
- Process *rp = erts_proc_lookup(BIF_ARG_1);
+ BIF_RETTYPE ret;
+ Process *rp = erts_proc_lookup(target);
ErtsProcSysTask *st = NULL;
- erts_aint32_t prio;
+ erts_aint32_t prio, fail_state = ERTS_PSFLG_EXITING;
Eterm noproc_res, req_type;
- if (!rp && !is_internal_pid(BIF_ARG_1)) {
- if (!is_external_pid(BIF_ARG_1))
+ if (!rp && !is_internal_pid(target)) {
+ if (!is_external_pid(target))
goto badarg;
- if (external_pid_dist_entry(BIF_ARG_1) != erts_this_dist_entry)
+ if (external_pid_dist_entry(target) != erts_this_dist_entry)
goto badarg;
}
- switch (BIF_ARG_2) {
+ switch (priority) {
case am_max: prio = PRIORITY_MAX; break;
case am_high: prio = PRIORITY_HIGH; break;
case am_normal: prio = PRIORITY_NORMAL; break;
@@ -10494,11 +10689,11 @@ erts_internal_request_system_task_3(BIF_ALIST_3)
default: goto badarg;
}
- if (is_not_tuple(BIF_ARG_3))
+ if (is_not_tuple(operation))
goto badarg;
else {
int i;
- Eterm *tp = tuple_val(BIF_ARG_3);
+ Eterm *tp = tuple_val(operation);
Uint arity = arityval(*tp);
Eterm req_id;
Uint req_id_sz;
@@ -10536,7 +10731,7 @@ erts_internal_request_system_task_3(BIF_ALIST_3)
ERTS_INIT_OFF_HEAP(&st->off_heap);
hp = &st->heap[0];
- st->requester = BIF_P->common.id;
+ st->requester = requester;
st->reply_tag = req_type;
st->req_id_sz = req_id_sz;
st->req_id = req_id_sz == 0 ? req_id : copy_struct(req_id,
@@ -10570,26 +10765,85 @@ erts_internal_request_system_task_3(BIF_ALIST_3)
st->type = ERTS_PSTT_CPC;
if (!rp)
goto noproc;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ /*
+ * If the process should start executing dirty
+ * code it is important that this task is
+ * aborted. Therefore this strict fail state...
+ */
+ fail_state |= (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS);
+#endif
break;
+#ifdef ERTS_NEW_PURGE_STRATEGY
+ case am_copy_literals:
+ if (st->arg[0] != am_true && st->arg[0] != am_false)
+ goto badarg;
+ st->type = ERTS_PSTT_CLA;
+ noproc_res = am_ok;
+ if (!rp)
+ goto noproc;
+ break;
+#endif
+
default:
goto badarg;
}
- if (!schedule_process_sys_task(rp, prio, st)) {
- noproc:
- notify_sys_task_executed(BIF_P, st, noproc_res);
+ if (!schedule_process_sys_task(rp, prio, st, &fail_state)) {
+ Eterm failure;
+ if (fail_state & ERTS_PSFLG_EXITING) {
+ noproc:
+ failure = noproc_res;
+ }
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else if (fail_state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ ret = dispatch_system_task(c_p, fail_state, st,
+ target, priority, operation);
+ goto cleanup_return;
+ }
+#endif
+ else {
+ ERTS_INTERNAL_ERROR("Unknown failure schedule_process_sys_task()");
+ failure = am_internal_error;
+ }
+ notify_sys_task_executed(c_p, st, failure);
}
- BIF_RET(am_ok);
+ ERTS_BIF_PREP_RET(ret, am_ok);
+
+ return ret;
badarg:
+ ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+cleanup_return:
+#endif
+
if (st) {
erts_cleanup_offheap(&st->off_heap);
erts_free(ERTS_ALC_T_PROC_SYS_TSK, st);
}
- BIF_ERROR(BIF_P, BADARG);
+
+ return ret;
+}
+
+BIF_RETTYPE
+erts_internal_request_system_task_3(BIF_ALIST_3)
+{
+ return request_system_task(BIF_P, BIF_P->common.id,
+ BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+}
+
+BIF_RETTYPE
+erts_internal_request_system_task_4(BIF_ALIST_4)
+{
+ return request_system_task(BIF_P, BIF_ARG_1,
+ BIF_ARG_2, BIF_ARG_3, BIF_ARG_4);
}
static void
@@ -10598,7 +10852,7 @@ erts_schedule_generic_sys_task(Eterm pid, ErtsProcSysTaskType type)
Process *rp = erts_proc_lookup(pid);
if (rp) {
ErtsProcSysTask *st;
- erts_aint32_t state;
+ erts_aint32_t state, fail_state;
int i;
st = erts_alloc(ERTS_ALC_T_PROC_SYS_TSK,
@@ -10613,21 +10867,103 @@ erts_schedule_generic_sys_task(Eterm pid, ErtsProcSysTaskType type)
ERTS_INIT_OFF_HEAP(&st->off_heap);
state = erts_smp_atomic32_read_nob(&rp->state);
- if (!schedule_process_sys_task(rp, ERTS_PSFLGS_GET_USR_PRIO(state), st))
+ fail_state = ERTS_PSFLG_EXITING;
+
+ if (!schedule_process_sys_task(rp, ERTS_PSFLGS_GET_USR_PRIO(state),
+ st, &fail_state))
erts_free(ERTS_ALC_T_PROC_SYS_TSK, st);
}
}
+
void
erts_schedule_complete_off_heap_message_queue_change(Eterm pid)
{
erts_schedule_generic_sys_task(pid, ERTS_PSTT_COHMQ);
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+static void
+flush_dirty_trace_messages(void *vpid)
+{
+ Process *proc;
+ Eterm pid;
+#ifdef ARCH_64
+ pid = (Eterm) vpid;
+#else
+ pid = *((Eterm *) vpid);
+ erts_free(ERTS_ALC_T_DIRTY_SL, vpid);
+#endif
+
+ proc = erts_proc_lookup(pid);
+ if (proc)
+ (void) erts_flush_trace_messages(proc, 0);
+}
+
+#endif /* ERTS_DIRTY_SCHEDULERS */
+
void
-erts_schedule_flush_trace_messages(Eterm pid)
+erts_schedule_flush_trace_messages(Process *proc, int force_on_proc)
{
+#ifdef ERTS_SMP
+ ErtsThrPrgrDelayHandle dhndl;
+#endif
+ Eterm pid = proc->common.id;
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ erts_aint32_t state;
+
+ if (!force_on_proc) {
+ state = erts_smp_atomic32_read_nob(&proc->state);
+ if (state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ goto sched_flush_dirty;
+ }
+ }
+#endif
+
+#ifdef ERTS_SMP
+ dhndl = erts_thr_progress_unmanaged_delay();
+#endif
+
erts_schedule_generic_sys_task(pid, ERTS_PSTT_FTMQ);
+
+#ifdef ERTS_SMP
+ erts_thr_progress_unmanaged_continue(dhndl);
+#endif
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!force_on_proc) {
+ state = erts_smp_atomic32_read_mb(&proc->state);
+ if (state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ void *vargp;
+
+ sched_flush_dirty:
+ /*
+ * We traced 'proc' from another thread than
+ * it is executing on, and it is executing
+ * on a dirty scheduler. It might take a
+ * significant amount of time before it is
+ * scheduled out (where it gets opportunity
+ * to flush messages). We therefore schedule
+ * the flush on the first ordinary scheduler.
+ */
+
+#ifdef ARCH_64
+ vargp = (void *) pid;
+#else
+ {
+ Eterm *argp = erts_alloc(ERTS_ALC_T_DIRTY_SL, sizeof(Eterm));
+ *argp = pid;
+ vargp = (void *) argp;
+ }
+#endif
+ erts_schedule_misc_aux_work(1, flush_dirty_trace_messages, vargp);
+ }
+ }
+#endif
}
static void
@@ -11154,18 +11490,11 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
|| (erts_smp_atomic32_read_nob(&p->state)
& ERTS_PSFLG_OFF_HEAP_MSGQ));
-#ifdef BM_COUNTERS
- processes_busy++;
-#endif
- BM_COUNT(processes_spawned);
-
- BM_SWAP_TIMER(system,size);
#ifdef SHCOPY_SPAWN
arg_size = copy_shared_calculate(args, &info);
#else
arg_size = size_object(args);
#endif
- BM_SWAP_TIMER(size,system);
heap_need = arg_size;
p->flags = flags;
@@ -11242,18 +11571,12 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->max_arg_reg = sizeof(p->def_arg_reg)/sizeof(p->def_arg_reg[0]);
p->arg_reg[0] = mod;
p->arg_reg[1] = func;
- BM_STOP_TIMER(system);
- BM_MESSAGE(args,p,parent);
- BM_START_TIMER(system);
- BM_SWAP_TIMER(system,copy);
#ifdef SHCOPY_SPAWN
p->arg_reg[2] = copy_shared_perform(args, arg_size, &info, &p->htop, &p->off_heap);
DESTROY_SHCOPY(info);
#else
p->arg_reg[2] = copy_struct(args, arg_size, &p->htop, &p->off_heap);
#endif
- BM_MESSAGE_COPIED(arg_size);
- BM_SWAP_TIMER(copy,system);
p->arity = 3;
p->fvalue = NIL;
@@ -11649,7 +11972,6 @@ erts_cleanup_empty_process(Process* p)
static void
delete_process(Process* p)
{
- Eterm *heap;
ErtsPSD *psd;
struct saved_calls *scb;
process_breakpoint_time_t *pbt;
@@ -11704,13 +12026,8 @@ delete_process(Process* p)
hipe_delete_process(&p->hipe);
#endif
- heap = p->abandoned_heap ? p->abandoned_heap : p->heap;
-
-#ifdef DEBUG
- sys_memset(heap, DEBUG_BAD_BYTE, p->heap_sz*sizeof(Eterm));
-#endif
+ erts_deallocate_young_generation(p);
- ERTS_HEAP_FREE(ERTS_ALC_T_HEAP, (void*) heap, p->heap_sz*sizeof(Eterm));
if (p->old_heap != NULL) {
#ifdef DEBUG
@@ -11722,16 +12039,6 @@ delete_process(Process* p)
(p->old_hend-p->old_heap)*sizeof(Eterm));
}
- /*
- * Free all pending message buffers.
- */
- if (p->mbuf != NULL) {
- free_message_buffer(p->mbuf);
- }
-
- if (p->msg_frag)
- erts_cleanup_messages(p->msg_frag);
-
erts_erase_dicts(p);
/* free all pending messages */
@@ -12134,6 +12441,8 @@ send_exit_signal(Process *c_p, /* current process if and only
else if (!(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))) {
/* Process not running ... */
ErtsProcLocks need_locks = ~(*rp_locks) & ERTS_PROC_LOCKS_ALL;
+ ErlHeapFragment *bp = NULL;
+ Eterm rsn_cpy;
if (need_locks
&& erts_smp_proc_trylock(rp, need_locks) == EBUSY) {
/* ... but we havn't got all locks on it ... */
@@ -12146,12 +12455,32 @@ send_exit_signal(Process *c_p, /* current process if and only
}
/* ...and we have all locks on it... */
*rp_locks = ERTS_PROC_LOCKS_ALL;
- set_proc_exiting(rp,
- state,
- (is_immed(rsn)
- ? rsn
- : copy_object(rsn, rp)),
- NULL);
+
+ state = erts_smp_atomic32_read_nob(&rp->state);
+
+ if (is_immed(rsn))
+ rsn_cpy = rsn;
+ else {
+ Eterm *hp;
+ ErlOffHeap *ohp;
+ Uint rsn_sz = size_object(rsn);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ bp = new_message_buffer(rsn_sz);
+ ohp = &bp->off_heap;
+ hp = &bp->mem[0];
+ }
+ else
+#endif
+ {
+ hp = HAlloc(rp, rsn_sz);
+ ohp = &rp->off_heap;
+ }
+ rsn_cpy = copy_struct(rsn, rsn_sz, &hp, ohp);
+ }
+
+ set_proc_exiting(rp, state, rsn_cpy, bp);
}
else { /* Process running... */
@@ -12289,7 +12618,6 @@ static void doit_exit_monitor(ErtsMonitor *mon, void *vpcontext)
erts_port_demonitor(pcontext->p,
ERTS_PORT_DEMONITOR_ORIGIN_ON_DEATHBED,
prt, mon->ref, NULL);
- return; /* let erts_port_demonitor do the deletion */
} else { /* remote by pid */
ASSERT(is_external_pid(mon->pid));
dep = external_pid_dist_entry(mon->pid);
@@ -12807,15 +13135,14 @@ erts_continue_exit_process(Process *p)
}
#ifdef ERTS_DIRTY_SCHEDULERS
- if (a & (ERTS_PSFLG_DIRTY_RUNNING
- | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ if (a & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
p->flags |= F_DELAYED_DEL_PROC;
delay_del_proc = 1;
/*
- * The dirty scheduler will also decrease
- * refc when done...
+ * The dirty scheduler decrease refc
+ * when done with the process...
*/
- erts_proc_inc_refc(p);
}
#endif
@@ -12826,9 +13153,6 @@ erts_continue_exit_process(Process *p)
dep = (p->flags & F_DISTRIBUTION) ? erts_this_dist_entry : NULL;
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
-#ifdef BM_COUNTERS
- processes_busy--;
-#endif
if (dep) {
erts_do_net_exits(dep, reason);
@@ -12908,7 +13232,7 @@ erts_continue_exit_process(Process *p)
*/
void
-erts_stack_dump(int to, void *to_arg, Process *p)
+erts_stack_dump(fmtfn_t to, void *to_arg, Process *p)
{
Eterm* sp;
int yreg = -1;
@@ -12923,7 +13247,7 @@ erts_stack_dump(int to, void *to_arg, Process *p)
}
void
-erts_program_counter_info(int to, void *to_arg, Process *p)
+erts_program_counter_info(fmtfn_t to, void *to_arg, Process *p)
{
erts_aint32_t state;
int i;
@@ -12953,7 +13277,7 @@ erts_program_counter_info(int to, void *to_arg, Process *p)
}
static void
-print_function_from_pc(int to, void *to_arg, BeamInstr* x)
+print_function_from_pc(fmtfn_t to, void *to_arg, BeamInstr* x)
{
BeamInstr* addr = find_function_from_pc(x);
if (addr == NULL) {
@@ -12975,7 +13299,7 @@ print_function_from_pc(int to, void *to_arg, BeamInstr* x)
}
static int
-stack_element_dump(int to, void *to_arg, Eterm* sp, int yreg)
+stack_element_dump(fmtfn_t to, void *to_arg, Eterm* sp, int yreg)
{
Eterm x = *sp;
@@ -13007,7 +13331,7 @@ stack_element_dump(int to, void *to_arg, Eterm* sp, int yreg)
* Print scheduler information
*/
void
-erts_print_scheduler_info(int to, void *to_arg, ErtsSchedulerData *esdp) {
+erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp) {
int i;
erts_aint32_t flg;
Process *p;