aboutsummaryrefslogtreecommitdiffstats
path: root/erts/emulator/beam/erl_process.c
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/beam/erl_process.c')
-rw-r--r--erts/emulator/beam/erl_process.c143
1 files changed, 96 insertions, 47 deletions
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 1f6adb98ef..1c1ef1db84 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -8568,9 +8568,6 @@ erts_start_schedulers(void)
{
ethr_tid tid;
int res = 0;
- Uint actual;
- Uint wanted = erts_no_schedulers;
- Uint wanted_no_schedulers = erts_no_schedulers;
char name[16];
ethr_thr_opts opts = ETHR_THR_OPTS_DEFAULT_INITER;
int ix;
@@ -8584,40 +8581,34 @@ erts_start_schedulers(void)
erts_snprintf(opts.name, 16, "runq_supervisor");
erts_atomic_init_nob(&runq_supervisor_sleeping, 0);
if (0 != ethr_event_init(&runq_supervision_event))
- erts_exit(ERTS_ERROR_EXIT, "Failed to create run-queue supervision event\n");
+ erts_exit(ERTS_ABORT_EXIT, "Failed to create run-queue supervision event\n");
res = ethr_thr_create(&runq_supervisor_tid,
runq_supervisor,
NULL,
&opts);
if (0 != res)
- erts_exit(ERTS_ERROR_EXIT, "Failed to create run-queue supervision thread, "
+ erts_exit(ERTS_ABORT_EXIT, "Failed to create run-queue supervision thread, "
"error = %d\n", res);
}
opts.suggested_stack_size = erts_sched_thread_suggested_stack_size;
- if (wanted < 1)
- wanted = 1;
- if (wanted > ERTS_MAX_NO_OF_SCHEDULERS) {
- wanted = ERTS_MAX_NO_OF_SCHEDULERS;
- res = ENOTSUP;
- }
-
- for (actual = 0; actual < wanted; actual++) {
- ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(actual);
-
- ASSERT(actual == esdp->no - 1);
-
- erts_snprintf(opts.name, 16, "%lu_scheduler", actual + 1);
+ ASSERT(erts_no_schedulers > 0 && erts_no_schedulers <= ERTS_MAX_NO_OF_SCHEDULERS);
+ for (ix = 0; ix < erts_no_schedulers; ix++) {
+ ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(ix);
+ ASSERT(ix == esdp->no - 1);
+ erts_snprintf(opts.name, 16, "%lu_scheduler", ix + 1);
res = ethr_thr_create(&esdp->tid, sched_thread_func, (void*)esdp, &opts);
-
if (res != 0) {
- break;
+ erts_exit(ERTS_ABORT_EXIT, "Failed to create scheduler thread %d, error = %d\n", ix, res);
}
}
- erts_no_schedulers = actual;
+
+ /* Probably not needed as thread create will imply a memory barrier,
+ but we do one just to be safe. */
+ ERTS_THR_MEMORY_BARRIER;
{
for (ix = 0; ix < erts_no_dirty_cpu_schedulers; ix++) {
@@ -8626,7 +8617,7 @@ erts_start_schedulers(void)
opts.suggested_stack_size = erts_dcpu_sched_thread_suggested_stack_size;
res = ethr_thr_create(&esdp->tid,sched_dirty_cpu_thread_func,(void*)esdp,&opts);
if (res != 0)
- erts_exit(ERTS_ERROR_EXIT, "Failed to create dirty cpu scheduler thread %d, error = %d\n", ix, res);
+ erts_exit(ERTS_ABORT_EXIT, "Failed to create dirty cpu scheduler thread %d, error = %d\n", ix, res);
}
for (ix = 0; ix < erts_no_dirty_io_schedulers; ix++) {
ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix);
@@ -8634,40 +8625,22 @@ erts_start_schedulers(void)
opts.suggested_stack_size = erts_dio_sched_thread_suggested_stack_size;
res = ethr_thr_create(&esdp->tid,sched_dirty_io_thread_func,(void*)esdp,&opts);
if (res != 0)
- erts_exit(ERTS_ERROR_EXIT, "Failed to create dirty io scheduler thread %d, error = %d\n", ix, res);
+ erts_exit(ERTS_ABORT_EXIT, "Failed to create dirty io scheduler thread %d, error = %d\n", ix, res);
}
}
- ERTS_THR_MEMORY_BARRIER;
-
erts_snprintf(opts.name, 16, "aux");
res = ethr_thr_create(&tid, aux_thread, NULL, &opts);
if (res != 0)
- erts_exit(ERTS_ERROR_EXIT, "Failed to create aux thread, error = %d\n", res);
+ erts_exit(ERTS_ABORT_EXIT, "Failed to create aux thread, error = %d\n", res);
for (ix = 0; ix < erts_no_poll_threads; ix++) {
erts_snprintf(opts.name, 16, "%d_poller", ix);
res = ethr_thr_create(&tid, poll_thread, (void*)(UWord)ix, &opts);
if (res != 0)
- erts_exit(ERTS_ERROR_EXIT, "Failed to create poll thread\n");
- }
-
- if (actual < 1)
- erts_exit(ERTS_ERROR_EXIT,
- "Failed to create any scheduler-threads: %s (%d)\n",
- erl_errno_id(res),
- res);
- if (res != 0) {
- erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
- ASSERT(actual != wanted_no_schedulers);
- erts_dsprintf(dsbufp,
- "Failed to create %beu scheduler-threads (%s:%d); "
- "only %beu scheduler-thread%s created.\n",
- wanted_no_schedulers, erl_errno_id(res), res,
- actual, actual == 1 ? " was" : "s were");
- erts_send_error_to_logger_nogl(dsbufp);
+ erts_exit(ERTS_ABORT_EXIT, "Failed to create poll thread\n");
}
}
@@ -12097,6 +12070,7 @@ erts_proc_exit_handle_dist_monitor(ErtsMonitor *mon, void *vctxt, Sint reds)
ErtsHeapFactory factory;
Sint reds_consumed = 0;
+ ASSERT(c_p->flags & F_DISABLE_GC);
ASSERT(erts_monitor_is_target(mon) && mon->type == ERTS_MON_TYPE_DIST_PROC);
mdp = erts_monitor_to_data(mon);
@@ -12144,7 +12118,6 @@ erts_proc_exit_handle_dist_monitor(ErtsMonitor *mon, void *vctxt, Sint reds)
switch (code) {
case ERTS_DSIG_SEND_CONTINUE:
case ERTS_DSIG_SEND_YIELD:
- erts_set_gc_state(c_p, 0);
ctxt->dist_state = erts_dsend_export_trap_context(c_p, &ctx);
reds_consumed = reds; /* force yield */
break;
@@ -12152,7 +12125,6 @@ erts_proc_exit_handle_dist_monitor(ErtsMonitor *mon, void *vctxt, Sint reds)
break;
case ERTS_DSIG_SEND_TOO_LRG:
erts_kill_dist_connection(dep, dist->connection_id);
- erts_set_gc_state(c_p, 1);
break;
default:
ASSERT(! "Invalid dsig send exit monitor result");
@@ -12356,6 +12328,7 @@ erts_proc_exit_handle_dist_link(ErtsLink *lnk, void *vctxt, Sint reds)
ErtsHeapFactory factory;
Sint reds_consumed = 0;
+ ASSERT(c_p->flags & F_DISABLE_GC);
ASSERT(lnk->type == ERTS_LNK_TYPE_DIST_PROC);
dlnk = erts_link_to_other(lnk, &ldp);
dist = ((ErtsLinkDataExtended *) ldp)->dist;
@@ -12395,7 +12368,6 @@ erts_proc_exit_handle_dist_link(ErtsLink *lnk, void *vctxt, Sint reds)
switch (code) {
case ERTS_DSIG_SEND_YIELD:
case ERTS_DSIG_SEND_CONTINUE:
- erts_set_gc_state(c_p, 0);
ctxt->dist_state = erts_dsend_export_trap_context(c_p, &ctx);
reds_consumed = reds; /* force yield */
break;
@@ -12403,7 +12375,6 @@ erts_proc_exit_handle_dist_link(ErtsLink *lnk, void *vctxt, Sint reds)
break;
case ERTS_DSIG_SEND_TOO_LRG:
erts_kill_dist_connection(dep, dist->connection_id);
- erts_set_gc_state(c_p, 1);
break;
default:
ASSERT(! "Invalid dsig send exit monitor result");
@@ -12951,6 +12922,8 @@ restart:
yield_allowed = 0;
#endif
+ /* Enable GC again, through strictly not needed it puts
+ the process in a consistent state. */
erts_set_gc_state(p, 1);
/* Set state to not active as we don't want this process
@@ -13495,3 +13468,79 @@ erts_debug_later_op_foreach(void (*callback)(void*),
}
}
}
+
+void
+erts_debug_free_process_foreach(void (*func)(Process *, void *), void *arg)
+{
+ ErtsRunQueue *rq;
+ int ix, prio;
+ for (ix = 0; ix < erts_no_run_queues; ix++) {
+ rq = ERTS_RUNQ_IX(ix);
+ for (prio = PRIORITY_MAX; prio < PRIORITY_LOW; prio++) {
+ Process *p = rq->procs.prio[prio].first;
+ for (; p; p = p->next) {
+ if (ERTS_PSFLG_FREE & erts_atomic32_read_nob(&p->state))
+ (*func)(p, arg);
+ }
+ }
+ }
+}
+
+void
+erts_debug_proc_monitor_link_foreach(Process *proc,
+ int (*monitor_func)(ErtsMonitor *, void *, Sint ),
+ int (*link_func)(ErtsLink *, void *, Sint ),
+ void *arg)
+{
+ if (!(erts_atomic32_read_nob(&proc->state) & ERTS_PSFLG_FREE)) {
+ /* For all links */
+ erts_link_tree_foreach(ERTS_P_LINKS(proc),
+ link_func,
+ arg);
+ /* For all monitors */
+ erts_monitor_tree_foreach(ERTS_P_MONITORS(proc),
+ monitor_func,
+ arg);
+ /* For all local target monitors */
+ erts_monitor_list_foreach(ERTS_P_LT_MONITORS(proc),
+ monitor_func,
+ arg);
+ }
+ else {
+ struct continue_exit_state *ce_state = proc->u.terminate;
+
+ /* For all links */
+ if (ce_state->phase == ERTS_CONTINUE_EXIT_LINKS)
+ erts_debug_link_tree_destroying_foreach(ce_state->links,
+ link_func,
+ arg,
+ ce_state->yield_state);
+ else
+ erts_link_tree_foreach(ce_state->links,
+ link_func,
+ arg);
+
+ /* For all monitors */
+ if (ce_state->phase == ERTS_CONTINUE_EXIT_MONITORS)
+ erts_debug_monitor_tree_destroying_foreach(ce_state->monitors,
+ monitor_func,
+ arg,
+ ce_state->yield_state);
+ else
+ erts_monitor_tree_foreach(ce_state->monitors,
+ monitor_func,
+ arg);
+
+ /* For all local target monitors */
+ if (ce_state->phase == ERTS_CONTINUE_EXIT_LT_MONITORS)
+ erts_debug_monitor_list_destroying_foreach(ce_state->lt_monitors,
+ monitor_func,
+ arg,
+ ce_state->yield_state);
+ else
+ erts_monitor_list_foreach(ce_state->lt_monitors,
+ monitor_func,
+ arg);
+
+ }
+}