aboutsummaryrefslogtreecommitdiffstats
path: root/erts
diff options
context:
space:
mode:
authorRickard Green <[email protected]>2013-10-02 10:07:27 +0200
committerRickard Green <[email protected]>2013-11-18 20:12:35 +0100
commitca0425c6ff85262bc15367f5fd9cbc51cde52b20 (patch)
tree0002e42905b936ccd5f2ba57d9a25819eef2cdb8 /erts
parent20641fe0f2ea745873fc7557448d3a7deb1bd639 (diff)
downloadotp-ca0425c6ff85262bc15367f5fd9cbc51cde52b20.tar.gz
otp-ca0425c6ff85262bc15367f5fd9cbc51cde52b20.tar.bz2
otp-ca0425c6ff85262bc15367f5fd9cbc51cde52b20.zip
Execution of system tasks in context of another process
A process requesting a system task to be executed in the context of another process will be notified by a message when the task has executed. This message will be on the form: {RequestType, RequestId, Pid, Result}. A process requesting a system task to be executed can set priority on the system task. The requester typically set the same priority on the task as its own process priority, and by this avoiding priority inversion. A request for execution of a system task is made by calling the statically linked in NIF erts_internal:request_system_task(Pid, Prio, Request). This is an undocumented ERTS internal function that should remain so. It should *only* be called from BIF implementations. Currently defined system tasks are: * garbage_collect * check_process_code Further system tasks can and will be implemented in the future. The erlang:garbage_collect/[1,2] and erlang:check_process_code/[2,3] BIFs are now implemented using system tasks. Both the 'garbage_collect' and the 'check_process_code' operations perform or may perform garbage_collections. By doing these via the system task functionality all garbage collect operations in the system will be performed solely in the context of the process being garbage collected. This makes it possible to later implement functionality for disabling garbage collection of a process over context switches. Newly introduced BIFs: * erlang:garbage_collect/2 - The new second argument is an option list. Introduced option: * {async, RequestId} - making it possible for users to issue asynchronous garbage collect requests. * erlang:check_process_code/3 - The new third argument is an option list. Introduced options: * {async, RequestId} - making it possible for users to issue asynchronous check process code requests. * {allow_gc, boolean()} - making it possible to issue requests that aren't allowed to garbage collect (operation will abort if gc should be needed). These options have been introduced as a preparation for parallelization of check_process_code operations when the code_server is about to purge a module.
Diffstat (limited to 'erts')
-rw-r--r--erts/doc/src/erlang.xml161
-rw-r--r--erts/emulator/beam/atom.names1
-rw-r--r--erts/emulator/beam/beam_bif_load.c133
-rw-r--r--erts/emulator/beam/bif.c39
-rw-r--r--erts/emulator/beam/bif.tab6
-rw-r--r--erts/emulator/beam/break.c10
-rw-r--r--erts/emulator/beam/erl_alloc.types2
-rw-r--r--erts/emulator/beam/erl_message.h5
-rw-r--r--erts/emulator/beam/erl_process.c1224
-rw-r--r--erts/emulator/beam/erl_process.h74
-rwxr-xr-xerts/emulator/beam/global.h4
-rw-r--r--erts/emulator/beam/ops.tab14
-rw-r--r--erts/emulator/beam/utils.c2
-rw-r--r--erts/emulator/hipe/hipe_bif_list.m43
-rw-r--r--erts/emulator/hipe/hipe_native_bif.c17
-rw-r--r--erts/emulator/test/process_SUITE.erl162
-rw-r--r--erts/etc/unix/etp-commands.in93
-rw-r--r--erts/preloaded/ebin/erlang.beambin94152 -> 97500 bytes
-rw-r--r--erts/preloaded/ebin/erts_internal.beambin3260 -> 3780 bytes
-rw-r--r--erts/preloaded/src/erlang.erl132
-rw-r--r--erts/preloaded/src/erts_internal.erl21
21 files changed, 1787 insertions, 316 deletions
diff --git a/erts/doc/src/erlang.xml b/erts/doc/src/erlang.xml
index 5ee40823bc..a9642be6fa 100644
--- a/erts/doc/src/erlang.xml
+++ b/erts/doc/src/erlang.xml
@@ -501,16 +501,87 @@
<name name="check_process_code" arity="2"/>
<fsummary>Check if a process is executing old code for a module</fsummary>
<desc>
- <p>Returns <c>true</c> if the process <c><anno>Pid</anno></c> is executing
- old code for <c><anno>Module</anno></c>. That is, if the current call of
- the process executes old code for this module, or if the
- process has references to old code for this module, or if the
- process contains funs that references old code for this
- module. Otherwise, it returns <c>false</c>.</p>
- <pre>
-> <input>check_process_code(Pid, lists).</input>
-false</pre>
+ <p>The same as
+ <seealso marker="#check_process_code/3"><c>erlang:check_process_code(<anno>Pid</anno>,
+ <anno>Module</anno>, [])</c></seealso>.</p>
+ </desc>
+ </func>
+ <func>
+ <name name="check_process_code" arity="3"/>
+ <fsummary>Check if a process is executing old code for a module</fsummary>
+ <desc>
+ <p>Check if the node local process identified by <c><anno>Pid</anno></c>
+ is executing old code for <c><anno>Module</anno></c>.</p>
+ <p>Currently available <c><anno>Option</anno>s</c>:</p>
+ <taglist>
+ <tag><c>{allow_gc, boolean()}</c></tag>
+ <item>
+ Determines if garbage collection is allowed when performing
+ the operation. If <c>{allow_gc, false}</c> is passed, and
+ a garbage collection is needed in order to determine the
+ result of the operation, the operation will be aborted
+ (see information on <c><anno>CheckResult</anno></c> below).
+ The default is to allow garbage collection, i.e.,
+ <c>{allow_gc, true}</c>.
+ </item>
+ <tag><c>{async, RequestId}</c></tag>
+ <item>
+ The <c>check_process_code/3</c> function will return
+ the value <c>async</c> immediately after the request
+ has been sent. When the request has been processed, the
+ process that called this function will be passed a
+ message on the form:<br/>
+ <c>{check_process_code, <anno>RequestId</anno>, <anno>CheckResult</anno>}</c>.
+ </item>
+ </taglist>
+ <p>If <c><anno>Pid</anno></c> equals <c>self()</c>, and
+ no <c>async</c> option has been passed, the operation will
+ be performed at once. In all other cases a request for
+ the operation will be sent to the process identified by
+ <c><anno>Pid</anno></c>, and will be handled when
+ appropriate. If no <c>async</c> option has been passed,
+ the caller will block until <c><anno>CheckResult</anno></c>
+ is available and can be returned.</p>
+ <p><c><anno>CheckResult</anno></c> informs about the result of
+ the request:</p>
+ <taglist>
+ <tag><c>true</c></tag>
+ <item>
+ The process identified by <c><anno>Pid</anno></c> is
+ executing old code for <c><anno>Module</anno></c>.
+ That is, the current call of the process executes old
+ code for this module, or the process has references
+ to old code for this module, or the process contains
+ funs that references old code for this module.
+ </item>
+ <tag><c>false</c></tag>
+ <item>
+ The process identified by <c><anno>Pid</anno></c> is
+ not executing old code for <c><anno>Module</anno></c>.
+ </item>
+ <tag><c>aborted</c></tag>
+ <item>
+ The operation was aborted since the process needed to
+ be garbage collected in order to determine the result
+ of the operation, and the operation was requested
+ by passing the <c>{allow_gc, false}</c> option.</item>
+ </taglist>
<p>See also <seealso marker="kernel:code">code(3)</seealso>.</p>
+ <p>Failures:</p>
+ <taglist>
+ <tag><c>badarg</c></tag>
+ <item>
+ If <c><anno>Pid</anno></c> is not a node local process identifier.
+ </item>
+ <tag><c>badarg</c></tag>
+ <item>
+ If <c><anno>Module</anno></c> is not an atom.
+ </item>
+ <tag><c>badarg</c></tag>
+ <item>
+ If <c><anno>OptionList</anno></c> is not a valid list of options.
+ </item>
+ </taglist>
</desc>
</func>
<func>
@@ -1197,20 +1268,74 @@ true
that the spontaneous garbage collection will occur too late
or not at all. Improper use may seriously degrade system
performance.</p>
- <p>Compatibility note: In versions of OTP prior to R7,
- the garbage collection took place at the next context switch,
- not immediately. To force a context switch after a call to
- <c>erlang:garbage_collect()</c>, it was sufficient to make
- any function call.</p>
</desc>
</func>
<func>
<name name="garbage_collect" arity="1"/>
- <fsummary>Force an immediate garbage collection of a process</fsummary>
+ <fsummary>Garbage collect a process</fsummary>
+ <desc>
+ <p>The same as
+ <seealso marker="#garbage_collect/2"><c>garbage_collect(<anno>Pid</anno>, [])</c></seealso>.</p>
+ </desc>
+ </func>
+ <func>
+ <name name="garbage_collect" arity="2"/>
+ <fsummary>Garbage collect a process</fsummary>
<desc>
- <p>Works like <c>erlang:garbage_collect()</c> but on any
- process. The same caveats apply. Returns <c>false</c> if
- <c><anno>Pid</anno></c> refers to a dead process; <c>true</c> otherwise.</p>
+ <p>Garbage collect the node local process identified by
+ <c><anno>Pid</anno></c>.</p>
+ <p>Currently available <c><anno>Option</anno></c>s:</p>
+ <taglist>
+ <tag><c>{async, RequestId}</c></tag>
+ <item>
+ The <c>garbage_collect/2</c> function will return
+ the value <c>async</c> immediately after the request
+ has been sent. When the request has been processed, the
+ process that called this function will be passed a
+ message on the form:<br/>
+ <c>{garbage_collect, <anno>RequestId</anno>, <anno>GCResult</anno>}</c>.
+ </item>
+ </taglist>
+ <p>If <c><anno>Pid</anno></c> equals <c>self()</c>, and
+ no <c>async</c> option has been passed, the garbage
+ collection will be performed at once, i.e. the same as
+ calling
+ <seealso marker="#garbage_collect/0">garbage_collect/0</seealso>.
+ In all other cases a request for garbage collection will
+ be sent to the process identified by <c><anno>Pid</anno></c>,
+ and will be handled when appropriate. If no <c>async</c>
+ option has been passed, the caller will block until
+ <c><anno>GCResult</anno></c> is available and can be
+ returned.</p>
+ <p><c><anno>GCResult</anno></c> informs about the result of
+ the garbage collection request:</p>
+ <taglist>
+ <tag><c>true</c></tag>
+ <item>
+ The process identified by <c><anno>Pid</anno></c> has
+ been garbage collected.
+ </item>
+ <tag><c>false</c></tag>
+ <item>
+ No garbage collection was performed. This since the
+ the process identified by <c><anno>Pid</anno></c>
+ terminated before the request could be satisfied.
+ </item>
+ </taglist>
+ <p>Note that the same caveats as for
+ <seealso marker="#garbage_collect/0">garbage_collect/0</seealso>
+ apply.</p>
+ <p>Failures:</p>
+ <taglist>
+ <tag><c>badarg</c></tag>
+ <item>
+ If <c><anno>Pid</anno></c> is not a node local process identifier.
+ </item>
+ <tag><c>badarg</c></tag>
+ <item>
+ If <c><anno>OptionList</anno></c> is not a valid list of options.
+ </item>
+ </taglist>
</desc>
</func>
<func>
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index c2f32ba089..8433fd826a 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -78,6 +78,7 @@ atom allocated_areas
atom allocator
atom allocator_sizes
atom alloc_util_allocators
+atom allow_gc
atom allow_passive_connect
atom already_loaded
atom amd64
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 649594a334..5239940a50 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -36,7 +36,7 @@
#include "erl_thr_progress.h"
static void set_default_trace_pattern(Eterm module);
-static Eterm check_process_code(Process* rp, Module* modp);
+static Eterm check_process_code(Process* rp, Module* modp, int allow_gc, int *redsp);
static void delete_code(Module* modp);
static void decrement_refc(BeamInstr* code);
static int is_native(BeamInstr* code);
@@ -427,69 +427,80 @@ check_old_code_1(BIF_ALIST_1)
}
Eterm
-check_process_code_2(BIF_ALIST_2)
+erts_check_process_code(Process *c_p, Eterm module, int allow_gc, int *redsp)
{
- Process* rp;
Module* modp;
+ Eterm res;
+ ErtsCodeIndex code_ix;
- if (is_not_atom(BIF_ARG_2)) {
- goto error;
- }
- if (is_internal_pid(BIF_ARG_1)) {
- Eterm res;
- ErtsCodeIndex code_ix;
-
- code_ix = erts_active_code_ix();
- modp = erts_get_module(BIF_ARG_2, code_ix);
- if (modp == NULL) { /* Doesn't exist. */
- return am_false;
- }
- erts_rlock_old_code(code_ix);
- if (modp->old.code == NULL) { /* No old code. */
- erts_runlock_old_code(code_ix);
- return am_false;
- }
- erts_runlock_old_code(code_ix);
-
-#ifdef ERTS_SMP
- rp = erts_pid2proc_suspend(BIF_P, ERTS_PROC_LOCK_MAIN,
- BIF_ARG_1, ERTS_PROC_LOCK_MAIN);
-#else
- rp = erts_pid2proc(BIF_P, 0, BIF_ARG_1, 0);
-#endif
- if (!rp) {
- BIF_RET(am_false);
- }
- if (rp == ERTS_PROC_LOCK_BUSY) {
- ERTS_BIF_YIELD2(bif_export[BIF_check_process_code_2], BIF_P,
- BIF_ARG_1, BIF_ARG_2);
- }
- erts_rlock_old_code(code_ix);
- if (modp->old.code != NULL) { /* must check again */
- res = check_process_code(rp, modp);
- }
- else {
- res = am_false;
- }
- erts_runlock_old_code(code_ix);
-#ifdef ERTS_SMP
- if (BIF_P != rp) {
- erts_resume(rp, ERTS_PROC_LOCK_MAIN);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
+ (*redsp)++;
+
+ ASSERT(is_atom(module));
+
+ code_ix = erts_active_code_ix();
+ modp = erts_get_module(module, code_ix);
+ if (!modp)
+ return am_false;
+ erts_rlock_old_code(code_ix);
+ res = modp->old.code ? check_process_code(c_p, modp, allow_gc, redsp) : am_false;
+ erts_runlock_old_code(code_ix);
+
+ return res;
+}
+
+BIF_RETTYPE erts_internal_check_process_code_2(BIF_ALIST_2)
+{
+ int reds = 0;
+ Eterm res;
+ Eterm olist = BIF_ARG_2;
+ int allow_gc = 1;
+
+ if (is_not_atom(BIF_ARG_1))
+ goto badarg;
+
+ while (is_list(olist)) {
+ Eterm *lp = list_val(olist);
+ Eterm opt = CAR(lp);
+ if (is_tuple(opt)) {
+ Eterm* tp = tuple_val(opt);
+ switch (arityval(tp[0])) {
+ case 2:
+ switch (tp[1]) {
+ case am_allow_gc:
+ switch (tp[2]) {
+ case am_false:
+ allow_gc = 0;
+ break;
+ case am_true:
+ allow_gc = 1;
+ break;
+ default:
+ goto badarg;
+ }
+ break;
+ default:
+ goto badarg;
+ }
+ break;
+ default:
+ goto badarg;
+ }
}
-#endif
- BIF_RET(res);
- }
- else if (is_external_pid(BIF_ARG_1)
- && external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry) {
- BIF_RET(am_false);
+ else
+ goto badarg;
+ olist = CDR(lp);
}
+ if (is_not_nil(olist))
+ goto badarg;
- error:
+ res = erts_check_process_code(BIF_P, BIF_ARG_1, allow_gc, &reds);
+
+ BIF_RET2(res, reds);
+
+badarg:
BIF_ERROR(BIF_P, BADARG);
}
-
BIF_RETTYPE delete_module_1(BIF_ALIST_1)
{
ErtsCodeIndex code_ix;
@@ -710,7 +721,7 @@ set_default_trace_pattern(Eterm module)
}
static Eterm
-check_process_code(Process* rp, Module* modp)
+check_process_code(Process* rp, Module* modp, int allow_gc, int *redsp)
{
BeamInstr* start;
char* mod_start;
@@ -786,6 +797,8 @@ check_process_code(Process* rp, Module* modp)
if (done_gc) {
return am_true;
} else {
+ if (!allow_gc)
+ return am_aborted;
/*
* Try to get rid of this fun by garbage collecting.
* Clear both fvalue and ftrace to make sure they
@@ -796,7 +809,7 @@ check_process_code(Process* rp, Module* modp)
rp->ftrace = NIL;
done_gc = 1;
FLAGS(rp) |= F_NEED_FULLSWEEP;
- (void) erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity);
+ *redsp += erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity);
goto rescan;
}
}
@@ -850,6 +863,9 @@ check_process_code(Process* rp, Module* modp)
Uint lit_size;
struct erl_off_heap_header* oh;
+ if (!allow_gc)
+ return am_aborted;
+
/*
* Try to get rid of constants by by garbage collecting.
* Clear both fvalue and ftrace.
@@ -859,11 +875,12 @@ check_process_code(Process* rp, Module* modp)
rp->ftrace = NIL;
done_gc = 1;
FLAGS(rp) |= F_NEED_FULLSWEEP;
- (void) erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity);
+ *redsp += erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity);
literals = (Eterm *) modp->old.code[MI_LITERALS_START];
lit_size = (Eterm *) modp->old.code[MI_LITERALS_END] - literals;
oh = (struct erl_off_heap_header *)
modp->old.code[MI_LITERALS_OFF_HEAP];
+ *redsp += lit_size / 10; /* Need, better value... */
erts_garbage_collect_literals(rp, literals, lit_size, oh);
}
}
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 755c5e6882..58bd6aac0b 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -3741,45 +3741,6 @@ BIF_RETTYPE now_0(BIF_ALIST_0)
/**********************************************************************/
-BIF_RETTYPE garbage_collect_1(BIF_ALIST_1)
-{
- int reds;
- Process *rp;
-
- if (is_not_pid(BIF_ARG_1)) {
- BIF_ERROR(BIF_P, BADARG);
- }
-
- if (BIF_P->common.id == BIF_ARG_1)
- rp = BIF_P;
- else {
-#ifdef ERTS_SMP
- rp = erts_pid2proc_suspend(BIF_P, ERTS_PROC_LOCK_MAIN,
- BIF_ARG_1, ERTS_PROC_LOCK_MAIN);
- if (rp == ERTS_PROC_LOCK_BUSY)
- ERTS_BIF_YIELD1(bif_export[BIF_garbage_collect_1], BIF_P, BIF_ARG_1);
-#else
- rp = erts_proc_lookup(BIF_ARG_1);
-#endif
- if (!rp)
- BIF_RET(am_false);
- }
-
- /* The GC cost is taken for the process executing this BIF. */
-
- FLAGS(rp) |= F_NEED_FULLSWEEP;
- reds = erts_garbage_collect(rp, 0, rp->arg_reg, rp->arity);
-
-#ifdef ERTS_SMP
- if (BIF_P != rp) {
- erts_resume(rp, ERTS_PROC_LOCK_MAIN);
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
- }
-#endif
-
- BIF_RET2(am_true, reds);
-}
-
BIF_RETTYPE garbage_collect_0(BIF_ALIST_0)
{
int reds;
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index dc8e9101de..3b7bb56885 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -46,7 +46,6 @@ bif erlang:atom_to_list/1
bif erlang:binary_to_list/1
bif erlang:binary_to_list/3
bif erlang:binary_to_term/1
-bif erlang:check_process_code/2
bif erlang:crc32/1
bif erlang:crc32/2
bif erlang:crc32_combine/3
@@ -67,7 +66,6 @@ bif erlang:float_to_list/1
bif erlang:float_to_list/2
bif erlang:fun_info/2
bif erlang:garbage_collect/0
-bif erlang:garbage_collect/1
bif erlang:get/0
bif erlang:get/1
bif erlang:get_keys/1
@@ -155,6 +153,10 @@ bif erts_internal:port_control/3
bif erts_internal:port_close/1
bif erts_internal:port_connect/2
+bif erts_internal:request_system_task/3
+bif erts_internal:check_process_code/2
+
+
# inet_db support
bif erlang:port_set_data/2
bif erlang:port_get_data/1
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index ad9a89b642..acbd700cc4 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -109,10 +109,12 @@ process_killer(void)
erts_smp_proc_lock(rp, rp_locks);
state = erts_smp_atomic32_read_acqb(&rp->state);
if (state & (ERTS_PSFLG_FREE
- | ERTS_PSFLG_EXITING
- | ERTS_PSFLG_ACTIVE
- | ERTS_PSFLG_IN_RUNQ
- | ERTS_PSFLG_RUNNING)) {
+ | ERTS_PSFLG_EXITING
+ | ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_IN_RUNQ
+ | ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS)) {
erts_printf("Can only kill WAITING processes this way\n");
}
else {
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index bb5eba80be..32308fae9b 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -268,6 +268,8 @@ type PROC_INTERVAL LONG_LIVED SYSTEM process_interval
type BUSY_CALLER_TAB SHORT_LIVED SYSTEM busy_caller_table
type BUSY_CALLER SHORT_LIVED SYSTEM busy_caller
type PORT_DATA_HEAP STANDARD SYSTEM port_data_heap
+type PROC_SYS_TSK SHORT_LIVED PROCESSES proc_sys_task
+type PROC_SYS_TSK_QS SHORT_LIVED PROCESSES proc_sys_task_queues
+if threads_no_smp
# Need thread safe allocs, but std_alloc and fix_alloc are not;
diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h
index 771eba431f..0f3bb8d281 100644
--- a/erts/emulator/beam/erl_message.h
+++ b/erts/emulator/beam/erl_message.h
@@ -46,6 +46,11 @@ typedef struct erl_off_heap {
Uint64 overhead; /* Administrative overhead (used to force GC). */
} ErlOffHeap;
+#define ERTS_INIT_OFF_HEAP(OHP) \
+ do { \
+ (OHP)->first = NULL; \
+ (OHP)->overhead = 0; \
+ } while (0)
#include "external.h"
#include "erl_process.h"
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 434d5ca147..ad806da660 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -283,6 +283,40 @@ struct erts_system_profile_flags_t erts_system_profile_flags;
#error "Need to store process_count in another type"
#endif
+typedef enum {
+ ERTS_PSTT_GC, /* Garbage Collect */
+ ERTS_PSTT_CPC /* Check Process Code */
+} ErtsProcSysTaskType;
+
+#define ERTS_MAX_PROC_SYS_TASK_ARGS 2
+
+struct ErtsProcSysTask_ {
+ ErtsProcSysTask *next;
+ ErtsProcSysTask *prev;
+ ErtsProcSysTaskType type;
+ Eterm requester;
+ Eterm reply_tag;
+ Eterm req_id;
+ Uint req_id_sz;
+ Eterm arg[ERTS_MAX_PROC_SYS_TASK_ARGS];
+ ErlOffHeap off_heap;
+ Eterm heap[1];
+};
+
+#define ERTS_PROC_SYS_TASK_SIZE(HSz) \
+ (sizeof(ErtsProcSysTask) - sizeof(Eterm) + sizeof(Eterm)*(HSz))
+
+struct ErtsProcSysTaskQs_ {
+ int qmask;
+ int ncount;
+ ErtsProcSysTask *q[ERTS_NO_PROC_PRIO_LEVELS];
+};
+
+ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(proc_sys_task_queues,
+ ErtsProcSysTaskQs,
+ 50,
+ ERTS_ALC_T_PROC_SYS_TSK_QS)
+
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(misc_op_list,
ErtsMiscOpList,
10,
@@ -353,6 +387,14 @@ static void aux_work_timeout_early_init(int no_schedulers);
static void aux_work_timeout_late_init(void);
static void setup_aux_work_timer(void);
+static int execute_sys_tasks(Process *c_p,
+ erts_aint32_t *statep,
+ int in_reds);
+static int cleanup_sys_tasks(Process *c_p,
+ erts_aint32_t in_state,
+ int in_reds);
+
+
#if defined(DEBUG) || 0
#define ERTS_DBG_CHK_AUX_WORK_VAL(V) dbg_chk_aux_work_val((V))
static void
@@ -2848,7 +2890,7 @@ dequeue_process(ErtsRunQueue *runq, int prio_q, erts_aint32_t *statep)
if (statep)
*statep = state;
- prio = (int) (ERTS_PSFLG_PRIO_MASK & state);
+ prio = (int) ERTS_PSFLGS_GET_PRQ_PRIO(state);
rqi = &runq->procs.prio_info[prio];
@@ -2997,7 +3039,7 @@ immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp)
erts_aint32_t state;
state = erts_smp_atomic32_read_acqb(&proc->state);
if (!(ERTS_PSFLG_BOUND & state)
- && (prio == (int) (ERTS_PSFLG_PRIO_MASK & state))) {
+ && (prio == (int) ERTS_PSFLGS_GET_PRQ_PRIO(state))) {
ErtsRunQueueInfo *rqi = &rq->procs.prio_info[prio];
unqueue_process(rq, rpq, rqi, prio, prev_proc, proc);
erts_smp_runq_unlock(rq);
@@ -3079,7 +3121,7 @@ schedule_bound_processes(ErtsRunQueue *rq,
while (proc) {
erts_aint32_t state = erts_smp_atomic32_read_acqb(&proc->state);
next = proc->next;
- enqueue_process(rq, (int) (ERTS_PSFLG_PRIO_MASK & state), proc);
+ enqueue_process(rq, (int) ERTS_PSFLGS_GET_PRQ_PRIO(state), proc);
proc = next;
}
}
@@ -3184,7 +3226,7 @@ evacuate_run_queue(ErtsRunQueue *rq,
sbpp->last = proc;
}
else {
- int prio = (int) (ERTS_PSFLG_PRIO_MASK & state);
+ int prio = (int) ERTS_PSFLGS_GET_PRQ_PRIO(state);
erts_smp_runq_unlock(rq);
to_rq = mp->prio[prio].runq;
@@ -3257,7 +3299,7 @@ try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq,
erts_aint32_t state = erts_smp_atomic32_read_acqb(&proc->state);
if (!(ERTS_PSFLG_BOUND & state)) {
/* Steal process */
- int prio = (int) (ERTS_PSFLG_PRIO_MASK & state);
+ int prio = (int) ERTS_PSFLGS_GET_PRQ_PRIO(state);
ErtsRunQueueInfo *rqi = &vrq->procs.prio_info[prio];
unqueue_process(vrq, rpq, rqi, prio, prev_proc, proc);
erts_smp_runq_unlock(vrq);
@@ -4575,6 +4617,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
#endif
init_misc_op_list_alloc();
+ init_proc_sys_task_queues_alloc();
#ifdef ERTS_SMP
set_wakeup_other_data();
@@ -4858,46 +4901,166 @@ erts_get_scheduler_data(void)
#endif
+static Process *
+make_proxy_proc(Process *prev_proxy, Process *proc, erts_aint32_t prio)
+{
+ erts_aint32_t state;
+ Process *proxy;
+#ifdef ERTS_SMP
+ ErtsRunQueue *rq = RUNQ_READ_RQ(&proc->run_queue);
+#endif
+
+ state = (ERTS_PSFLG_PROXY
+ | ERTS_PSFLG_IN_RUNQ
+ | (((erts_aint32_t) 1) << (prio + ERTS_PSFLGS_IN_PRQ_MASK_OFFSET))
+ | (prio << ERTS_PSFLGS_PRQ_PRIO_OFFSET)
+ | (prio << ERTS_PSFLGS_USR_PRIO_OFFSET)
+ | (prio << ERTS_PSFLGS_ACT_PRIO_OFFSET));
+
+ if (prev_proxy) {
+ proxy = prev_proxy;
+ ASSERT(erts_smp_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY);
+ erts_smp_atomic32_set_nob(&proxy->state, state);
+#ifdef ERTS_SMP
+ RUNQ_SET_RQ(&proc->run_queue, rq);
+#endif
+ }
+ else {
+ proxy = erts_alloc(ERTS_ALC_T_PROC, sizeof(Process));
+#ifdef DEBUG
+ {
+ int i;
+ Uint32 *ui32 = (Uint32 *) (char *) proxy;
+ for (i = 0; i < sizeof(Process)/sizeof(Uint32); i++)
+ ui32[i] = (Uint32) 0xdeadbeef;
+ }
+#endif
+ erts_smp_atomic32_init_nob(&proxy->state, state);
+#ifdef ERTS_SMP
+ erts_smp_atomic_init_nob(&proxy->run_queue,
+ erts_smp_atomic_read_nob(&proc->run_queue));
+#endif
+ }
+
+ proxy->common.id = proc->common.id;
+
+ return proxy;
+}
+
+static ERTS_INLINE void
+free_proxy_proc(Process *proxy)
+{
+ ASSERT(erts_smp_atomic32_read_nob(&proxy->state) & ERTS_PSFLG_PROXY);
+ erts_free(ERTS_ALC_T_PROC, proxy);
+}
+
+
+static ERTS_INLINE int
+check_enqueue_in_prio_queue(erts_aint32_t *prq_prio_p,
+ erts_aint32_t *newp,
+ erts_aint32_t actual)
+{
+ erts_aint32_t aprio, qbit, max_qbit;
+
+ aprio = (actual >> ERTS_PSFLGS_ACT_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK;
+ qbit = 1 << aprio;
+
+ *prq_prio_p = aprio;
+
+ max_qbit = (actual >> ERTS_PSFLGS_IN_PRQ_MASK_OFFSET) & ERTS_PSFLGS_QMASK;
+ max_qbit |= 1 << ERTS_PSFLGS_QMASK_BITS;
+ max_qbit &= -max_qbit;
+ /*
+ * max_qbit now either contain bit set for highest prio queue or a bit
+ * out of range (which will have a value larger than valid range).
+ */
+
+ if (qbit >= max_qbit)
+ return 0; /* Already queued in higher or equal prio */
+
+ /* Need to enqueue (if already enqueued, it is in lower prio) */
+ *newp |= qbit << ERTS_PSFLGS_IN_PRQ_MASK_OFFSET;
+
+ if ((actual & (ERTS_PSFLG_IN_RUNQ|ERTS_PSFLGS_USR_PRIO_MASK))
+ != (aprio << ERTS_PSFLGS_USR_PRIO_OFFSET)) {
+ /*
+ * Process struct already enqueued, or actual prio not
+ * equal to user prio, i.e., enqueue using proxy.
+ */
+ return -1;
+ }
+
+ /*
+ * Enqueue using process struct.
+ */
+ *newp &= ~ERTS_PSFLGS_PRQ_PRIO_MASK;
+ *newp |= ERTS_PSFLG_IN_RUNQ | (aprio << ERTS_PSFLGS_PRQ_PRIO_OFFSET);
+ return 1;
+}
+
/*
* scheduler_out_process() return with c_rq locked.
*/
static ERTS_INLINE int
-schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p)
+schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Process *proxy)
{
- erts_aint32_t a, e, n;
+ erts_aint32_t a, e, n, enq_prio = -1;
int res = 0;
+ int enqueue; /* < 0 -> use proxy */
a = state;
while (1) {
n = e = a;
- ASSERT(a & ERTS_PSFLG_RUNNING);
- ASSERT(!(a & ERTS_PSFLG_IN_RUNQ));
+ ASSERT(a & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS));
- n &= ~ERTS_PSFLG_RUNNING;
- if ((a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE)
- n |= ERTS_PSFLG_IN_RUNQ;
+ enqueue = 0;
+
+ n &= ~(ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS);
+ if (a & ERTS_PSFLG_ACTIVE_SYS
+ || (a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE) {
+ enqueue = check_enqueue_in_prio_queue(&enq_prio, &n, a);
+ }
a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
if (a == e)
break;
}
- if (!(n & ERTS_PSFLG_IN_RUNQ)) {
- if (erts_system_profile_flags.runnable_procs)
- profile_runnable_proc(p, am_inactive);
+ if (!enqueue) {
+
+ if (erts_system_profile_flags.runnable_procs) {
+
+ if (!(a & ERTS_PSFLG_ACTIVE_SYS)
+ && (!(a & ERTS_PSFLG_ACTIVE)
+ || (a & ERTS_PSFLG_SUSPENDED))) {
+ /* Process inactive */
+ profile_runnable_proc(p, am_inactive);
+ }
+ }
+
+ if (proxy)
+ free_proxy_proc(proxy);
}
else {
- int prio = (int) (ERTS_PSFLG_PRIO_MASK & n);
+ Process *sched_p;
ErtsRunQueue *runq = erts_get_runq_proc(p);
- ASSERT(!(n & ERTS_PSFLG_SUSPENDED));
+ ASSERT(!(n & ERTS_PSFLG_SUSPENDED) || (n & ERTS_PSFLG_ACTIVE_SYS));
+
+ if (enqueue < 0)
+ sched_p = make_proxy_proc(proxy, p, enq_prio);
+ else {
+ sched_p = p;
+ if (proxy)
+ free_proxy_proc(proxy);
+ }
#ifdef ERTS_SMP
if (!(ERTS_PSFLG_BOUND & n)) {
- ErtsRunQueue *new_runq = erts_check_emigration_need(runq, prio);
+ ErtsRunQueue *new_runq = erts_check_emigration_need(runq, enq_prio);
if (new_runq) {
- RUNQ_SET_RQ(&p->run_queue, new_runq);
+ RUNQ_SET_RQ(&sched_p->run_queue, new_runq);
runq = new_runq;
}
}
@@ -4908,7 +5071,7 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p)
erts_smp_runq_lock(runq);
/* Enqueue the process */
- enqueue_process(runq, prio, p);
+ enqueue_process(runq, (int) enq_prio, sched_p);
if (runq == c_rq)
return res;
@@ -4920,14 +5083,13 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p)
}
static ERTS_INLINE void
-add2runq(Process *p, erts_aint32_t state)
+add2runq(Process *p, erts_aint32_t state, erts_aint32_t prio)
{
- int prio = (int) (ERTS_PSFLG_PRIO_MASK & state);
ErtsRunQueue *runq = erts_get_runq_proc(p);
#ifdef ERTS_SMP
if (!(ERTS_PSFLG_BOUND & state)) {
- ErtsRunQueue *new_runq = erts_check_emigration_need(runq, prio);
+ ErtsRunQueue *new_runq = erts_check_emigration_need(runq, (int) prio);
if (new_runq) {
RUNQ_SET_RQ(&p->run_queue, new_runq);
runq = new_runq;
@@ -4939,101 +5101,236 @@ add2runq(Process *p, erts_aint32_t state)
erts_smp_runq_lock(runq);
/* Enqueue the process */
- enqueue_process(runq, prio, p);
+ enqueue_process(runq, (int) prio, p);
erts_smp_runq_unlock(runq);
smp_notify_inc_runq(runq);
}
-static ERTS_INLINE void
-schedule_process(Process *p, erts_aint32_t state, int active_enq)
+static ERTS_INLINE int
+change_proc_schedule_state(Process *p,
+ erts_aint32_t clear_state_flags,
+ erts_aint32_t set_state_flags,
+ erts_aint32_t *statep,
+ erts_aint32_t *enq_prio_p)
{
- erts_aint32_t a = state, n;
+ /*
+ * NOTE: ERTS_PSFLG_RUNNING, ERTS_PSFLG_RUNNING_SYS and
+ * ERTS_PSFLG_ACTIVE_SYS are not allowed to be
+ * altered by this function!
+ */
+ erts_aint32_t a = *statep, n;
+ int enqueue; /* < 0 -> use proxy */
+
+ ASSERT(!(a & ERTS_PSFLG_PROXY));
+ ASSERT((clear_state_flags & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_ACTIVE_SYS)) == 0);
+ ASSERT((set_state_flags & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_ACTIVE_SYS)) == 0);
while (1) {
erts_aint32_t e;
n = e = a;
+ enqueue = 0;
+
if (a & ERTS_PSFLG_FREE)
- return; /* We don't want to schedule free processes... */
- n |= ERTS_PSFLG_ACTIVE;
- if (!(a & (ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_RUNNING)))
- n |= ERTS_PSFLG_IN_RUNQ;
+ break; /* We don't want to schedule free processes... */
+
+ if (clear_state_flags)
+ n &= ~clear_state_flags;
+
+ if (set_state_flags)
+ n |= set_state_flags;
+
+ if ((n & (ERTS_PSFLG_SUSPENDED
+ | ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_IN_RUNQ
+ | ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE) {
+ /*
+ * Active and seemingly need to be enqueued, but
+ * process may be in a run queue via proxy, need
+ * further inspection...
+ */
+ enqueue = check_enqueue_in_prio_queue(enq_prio_p, &n, a);
+ }
+
a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
if (a == e)
break;
- if (!active_enq && (a & ERTS_PSFLG_ACTIVE))
- return; /* Someone else activated process ... */
+ if (enqueue == 0 && n == a)
+ break;
}
- if (erts_system_profile_flags.runnable_procs
- && !(a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED))) {
- profile_runnable_proc(p, am_active);
+ if (erts_system_profile_flags.runnable_procs) {
+
+ if (((n & (ERTS_PSFLG_SUSPENDED
+ | ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE)
+ && (!(a & (ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS)
+ && (!(a & ERTS_PSFLG_ACTIVE)
+ || (a & ERTS_PSFLG_SUSPENDED))))) {
+ /* We activated a prevously inactive process */
+ profile_runnable_proc(p, am_active);
+ }
+
}
- if ((n & ERTS_PSFLG_IN_RUNQ) && !(a & ERTS_PSFLG_IN_RUNQ))
- add2runq(p, n);
+ *statep = a;
+
+ return enqueue;
+}
+
+static ERTS_INLINE void
+schedule_process(Process *p, erts_aint32_t in_state)
+{
+ erts_aint32_t enq_prio = -1;
+ erts_aint32_t state = in_state;
+ int enqueue = change_proc_schedule_state(p,
+ 0,
+ ERTS_PSFLG_ACTIVE,
+ &state,
+ &enq_prio);
+ if (enqueue)
+ add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio),
+ state,
+ enq_prio);
}
void
erts_schedule_process(Process *p, erts_aint32_t state)
{
- schedule_process(p, state, 0);
+ schedule_process(p, state);
+}
+
+static void
+schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
+{
+ erts_aint32_t a = state, n, enq_prio = -1;
+ int enqueue; /* < 0 -> use proxy */
+
+ ASSERT(!(state & ERTS_PSFLG_PROXY));
+
+ while (1) {
+ erts_aint32_t e;
+ n = e = a;
+
+ if (a & ERTS_PSFLG_FREE)
+ return; /* We don't want to schedule free processes... */
+
+ enqueue = 0;
+ n |= ERTS_PSFLG_ACTIVE_SYS;
+ if (!(a & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)))
+ enqueue = check_enqueue_in_prio_queue(&enq_prio, &n, a);
+ a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
+ if (a == e)
+ break;
+ if (a == n && !enqueue)
+ goto cleanup;
+ }
+
+ if (erts_system_profile_flags.runnable_procs) {
+
+ if (!(a & (ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS))
+ && (!(a & ERTS_PSFLG_ACTIVE) || (a & ERTS_PSFLG_SUSPENDED))) {
+ /* We activated a prevously inactive process */
+ profile_runnable_proc(p, am_active);
+ }
+
+ }
+
+ if (enqueue) {
+ Process *sched_p;
+ if (enqueue > 0)
+ sched_p = p;
+ else {
+ sched_p = make_proxy_proc(proxy, p, enq_prio);
+ proxy = NULL;
+ }
+ add2runq(sched_p, n, enq_prio);
+ }
+
+cleanup:
+ if (proxy)
+ free_proxy_proc(proxy);
}
static ERTS_INLINE int
suspend_process(Process *c_p, Process *p)
{
- erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state);
+ erts_aint32_t state;
int suspended = 0;
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+ state = erts_smp_atomic32_read_acqb(&p->state);
+
if ((state & ERTS_PSFLG_SUSPENDED))
suspended = -1;
else {
if (c_p == p) {
state = erts_smp_atomic32_read_bor_relb(&p->state,
ERTS_PSFLG_SUSPENDED);
- state |= ERTS_PSFLG_SUSPENDED;
ASSERT(state & ERTS_PSFLG_RUNNING);
- suspended = 1;
+ suspended = (state & ERTS_PSFLG_SUSPENDED) ? -1: 1;
}
else {
while (!(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_EXITING))) {
- erts_aint32_t e, n;
+ erts_aint32_t n, e;
+
n = e = state;
n |= ERTS_PSFLG_SUSPENDED;
state = erts_smp_atomic32_cmpxchg_relb(&p->state, n, e);
if (state == e) {
- state = n;
suspended = 1;
break;
}
+ if (state & ERTS_PSFLG_SUSPENDED) {
+ suspended = -1;
+ break;
+ }
}
}
}
- if (state & ERTS_PSFLG_SUSPENDED) {
+ if (suspended) {
ASSERT(!(ERTS_PSFLG_RUNNING & state)
|| p == erts_get_current_process());
- if (erts_system_profile_flags.runnable_procs
- && (p->rcount == 0)
- && (state & ERTS_PSFLG_ACTIVE)) {
- profile_runnable_proc(p, am_inactive);
+ if (suspended > 0 && erts_system_profile_flags.runnable_procs) {
+
+ /* 'state' is before our change... */
+
+ if ((state & (ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE) {
+ /* We made process inactive */
+ profile_runnable_proc(p, am_inactive);
+ }
+
}
p->rcount++; /* count number of suspend */
}
+
return suspended;
}
static ERTS_INLINE void
resume_process(Process *p)
{
- erts_aint32_t state;
+ erts_aint32_t state, enq_prio = -1;
+ int enqueue;
+
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
ASSERT(p->rcount > 0);
@@ -5041,14 +5338,16 @@ resume_process(Process *p)
if (--p->rcount > 0) /* multiple suspend */
return;
- state = erts_smp_atomic32_read_band_mb(&p->state, ~ERTS_PSFLG_SUSPENDED);
- state &= ~ERTS_PSFLG_SUSPENDED;
- if ((state & (ERTS_PSFLG_EXITING
- | ERTS_PSFLG_ACTIVE
- | ERTS_PSFLG_IN_RUNQ
- | ERTS_PSFLG_RUNNING)) == ERTS_PSFLG_ACTIVE) {
- schedule_process(p, state, 1);
- }
+ state = erts_smp_atomic32_read_nob(&p->state);
+ enqueue = change_proc_schedule_state(p,
+ ERTS_PSFLG_SUSPENDED,
+ 0,
+ &state,
+ &enq_prio);
+ if (enqueue)
+ add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio),
+ state,
+ enq_prio);
}
int
@@ -6032,7 +6331,8 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
goto done;
}
else {
- if (!(ERTS_PSFLG_RUNNING & erts_smp_atomic32_read_acqb(&rp->state)))
+ if (!((ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)
+ & erts_smp_atomic32_read_acqb(&rp->state)))
goto done;
}
@@ -6695,7 +6995,7 @@ Eterm
erts_get_process_priority(Process *p)
{
erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
- switch (state & ERTS_PSFLG_PRIO_MASK) {
+ switch (ERTS_PSFLGS_GET_USR_PRIO(state)) {
case PRIORITY_MAX: return am_max;
case PRIORITY_HIGH: return am_high;
case PRIORITY_NORMAL: return am_normal;
@@ -6718,18 +7018,60 @@ erts_set_process_priority(Process *p, Eterm value)
}
a = erts_smp_atomic32_read_nob(&p->state);
- if (nprio == (a & ERTS_PSFLG_PRIO_MASK))
+ if (nprio == ERTS_PSFLGS_GET_USR_PRIO(a))
oprio = nprio;
else {
- erts_aint32_t e, n;
+ int slocked = 0;
+ erts_aint32_t e, n, aprio;
+
+ if (a & ERTS_PSFLG_ACTIVE_SYS) {
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ slocked = 1;
+ }
+
do {
- oprio = a & ERTS_PSFLG_PRIO_MASK;
+ oprio = ERTS_PSFLGS_GET_USR_PRIO(a);
n = e = a;
- ASSERT(!(a & ERTS_PSFLG_IN_RUNQ));
+ if (!(a & ERTS_PSFLG_ACTIVE_SYS))
+ aprio = nprio;
+ else {
+ int max_qbit;
+
+ if (!slocked) {
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ slocked = 1;
+ }
+
+ max_qbit = p->sys_task_qs->qmask;
+ max_qbit &= -max_qbit;
+ switch (max_qbit) {
+ case MAX_BIT:
+ aprio = PRIORITY_MAX;
+ break;
+ case HIGH_BIT:
+ aprio = PRIORITY_HIGH;
+ break;
+ case NORMAL_BIT:
+ aprio = PRIORITY_NORMAL;
+ break;
+ case LOW_BIT:
+ aprio = PRIORITY_LOW;
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid qmask");
+ aprio = -1;
+ }
+
+ if (aprio > nprio) /* low value -> high prio */
+ aprio = nprio;
+ }
+
+ n &= ~(ERTS_PSFLGS_USR_PRIO_MASK
+ | ERTS_PSFLGS_ACT_PRIO_MASK);
+ n |= ((nprio << ERTS_PSFLGS_USR_PRIO_OFFSET)
+ | (aprio << ERTS_PSFLGS_ACT_PRIO_OFFSET));
- n &= ~ERTS_PSFLG_PRIO_MASK;
- n |= nprio;
a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
} while (a != e);
}
@@ -6763,6 +7105,7 @@ erts_set_process_priority(Process *p, Eterm value)
Process *schedule(Process *p, int calls)
{
+ Process *proxy_p = NULL;
ErtsRunQueue *rq;
erts_aint_t dt;
ErtsSchedulerData *esdp;
@@ -6805,6 +7148,8 @@ Process *schedule(Process *p, int calls)
actual_reds = reds = 0;
erts_smp_runq_lock(rq);
} else {
+ sched_out_proc:
+
#ifdef ERTS_SMP
ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
esdp = p->scheduler_data;
@@ -6858,10 +7203,11 @@ Process *schedule(Process *p, int calls)
esdp->reductions += reds;
- schedule_out_process(rq, state, p); /* Returns with rq locked! */
+ schedule_out_process(rq, state, p, proxy_p); /* Returns with rq locked! */
+ proxy_p = NULL;
ERTS_PROC_REDUCTIONS_EXECUTED(rq,
- (int) (state & ERTS_PSFLG_PRIO_MASK),
+ (int) ERTS_PSFLGS_GET_USR_PRIO(state),
reds,
actual_reds);
@@ -6870,18 +7216,19 @@ Process *schedule(Process *p, int calls)
p->scheduler_data = NULL;
#endif
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
if (state & ERTS_PSFLG_FREE) {
#ifdef ERTS_SMP
ASSERT(esdp->free_process == p);
esdp->free_process = NULL;
-#else
- erts_free_proc(p);
+#else
+ state = erts_smp_atomic32_read_nob(&p->state);
+ if (!(state & ERTS_PSFLG_IN_RUNQ))
+ erts_free_proc(p);
#endif
}
- erts_smp_proc_unlock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
-
#ifdef ERTS_SMP
ASSERT(!esdp->free_process);
#endif
@@ -7088,6 +7435,8 @@ Process *schedule(Process *p, int calls)
* Find a new process to run.
*/
pick_next_process: {
+ erts_aint32_t psflg_band_mask;
+ erts_aint32_t running_flag;
int prio_q;
int qmask;
@@ -7121,18 +7470,62 @@ Process *schedule(Process *p, int calls)
ASSERT(p); /* Wrong qmask in rq->flags? */
+ psflg_band_mask = ~(((erts_aint32_t) 1) << (ERTS_PSFLGS_GET_PRQ_PRIO(state)
+ + ERTS_PSFLGS_IN_PRQ_MASK_OFFSET));
+
+ if (!(state & ERTS_PSFLG_PROXY))
+ psflg_band_mask &= ~ERTS_PSFLG_IN_RUNQ;
+ else {
+ proxy_p = p;
+ p = erts_proc_lookup_raw(proxy_p->common.id);
+ if (!p) {
+ free_proxy_proc(proxy_p);
+ proxy_p = NULL;
+ goto pick_next_process;
+ }
+ state = erts_smp_atomic32_read_nob(&p->state);
+ }
+
+
+ if (state & ERTS_PSFLG_ACTIVE_SYS)
+ running_flag = ERTS_PSFLG_RUNNING_SYS;
+ else
+ running_flag = ERTS_PSFLG_RUNNING;
+
while (1) {
erts_aint32_t exp, new, tmp;
tmp = new = exp = state;
- new &= ~ERTS_PSFLG_IN_RUNQ;
- tmp = state & (ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT);
- if (tmp != ERTS_PSFLG_SUSPENDED)
- new |= ERTS_PSFLG_RUNNING;
+ new &= psflg_band_mask;
+ if (!(state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS))) {
+ tmp = state & (ERTS_PSFLG_SUSPENDED
+ | ERTS_PSFLG_PENDING_EXIT
+ | ERTS_PSFLG_ACTIVE_SYS);
+ if (tmp != ERTS_PSFLG_SUSPENDED)
+ new |= running_flag;
+ }
state = erts_smp_atomic32_cmpxchg_relb(&p->state, new, exp);
if (state == exp) {
- tmp = state & (ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT);
- if (tmp == ERTS_PSFLG_SUSPENDED)
+ if ((state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_FREE))
+ || ((state & (ERTS_PSFLG_SUSPENDED
+ | ERTS_PSFLG_PENDING_EXIT
+ | ERTS_PSFLG_ACTIVE_SYS))
+ == ERTS_PSFLG_SUSPENDED)) {
+ if (state & ERTS_PSFLG_FREE) {
+#ifdef ERTS_SMP
+ erts_smp_proc_dec_refc(p);
+#else
+ erts_free_proc(p);
+#endif
+ }
+ if (proxy_p) {
+ free_proxy_proc(proxy_p);
+ proxy_p = NULL;
+ }
goto pick_next_process;
+ }
state = new;
break;
}
@@ -7162,7 +7555,7 @@ Process *schedule(Process *p, int calls)
(UWord) esdp->no);
int migrated = old && old != esdp->no;
- prio = (int) (state & ERTS_PSFLG_PRIO_MASK);
+ prio = (int) ERTS_PSFLGS_GET_USR_PRIO(state);
erts_smp_spin_lock(&erts_sched_stat.lock);
erts_sched_stat.prio[prio].total_executed++;
@@ -7182,9 +7575,6 @@ Process *schedule(Process *p, int calls)
ASSERT(!p->scheduler_data);
p->scheduler_data = esdp;
#endif
- /* Never run a suspended process */
- ASSERT(!(ERTS_PSFLG_SUSPENDED & erts_smp_atomic32_read_nob(&p->state)));
-
reds = context_reds;
if (IS_TRACED(p)) {
@@ -7210,21 +7600,574 @@ Process *schedule(Process *p, int calls)
erts_check_my_tracer_proc(p);
#endif
+ if (state & ERTS_PSFLG_RUNNING_SYS) {
+ reds -= execute_sys_tasks(p, &state, reds);
+ if (reds <= 0) {
+ p->fcalls = reds;
+ goto sched_out_proc;
+ }
+
+ ASSERT(state & ERTS_PSFLG_RUNNING_SYS);
+ ASSERT(!(state & ERTS_PSFLG_RUNNING));
+
+ while (1) {
+ erts_aint32_t n, e;
+
+ if (((state & (ERTS_PSFLG_SUSPENDED
+ | ERTS_PSFLG_ACTIVE)) != ERTS_PSFLG_ACTIVE)
+ && !(state & ERTS_PSFLG_EXITING))
+ goto sched_out_proc;
+
+ n = e = state;
+ n &= ~ERTS_PSFLG_RUNNING_SYS;
+ n |= ERTS_PSFLG_RUNNING;
+
+ state = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
+ if (state == e) {
+ state = n;
+ break;
+ }
+
+ ASSERT(state & ERTS_PSFLG_RUNNING_SYS);
+ ASSERT(!(state & ERTS_PSFLG_RUNNING));
+ }
+ }
+
if (!(state & ERTS_PSFLG_EXITING)
&& ((FLAGS(p) & F_FORCE_GC)
|| (MSO(p).overhead > BIN_VHEAP_SZ(p)))) {
reds -= erts_garbage_collect(p, 0, p->arg_reg, p->arity);
- if (reds < 0) {
- reds = 1;
+ if (reds <= 0) {
+ p->fcalls = reds;
+ goto sched_out_proc;
}
}
+
+ if (proxy_p) {
+ free_proxy_proc(proxy_p);
+ proxy_p = NULL;
+ }
p->fcalls = reds;
ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+
+ /* Never run a suspended process */
+ ASSERT(!(ERTS_PSFLG_SUSPENDED & erts_smp_atomic32_read_nob(&p->state)));
+
return p;
}
}
+static int
+notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st, Eterm st_result)
+{
+ Process *rp = erts_proc_lookup(st->requester);
+ if (rp) {
+ ErtsProcLocks rp_locks;
+ ErlOffHeap *ohp;
+ ErlHeapFragment* bp;
+ Eterm *hp, msg, req_id, result;
+ Uint st_result_sz, hsz;
+#ifdef DEBUG
+ Eterm *hp_start;
+#endif
+
+ rp_locks = (c_p == rp) ? ERTS_PROC_LOCK_MAIN : 0;
+
+ st_result_sz = is_immed(st_result) ? 0 : size_object(st_result);
+ hsz = st->req_id_sz + st_result_sz + 4 /* 3-tuple */;
+
+ hp = erts_alloc_message_heap(hsz,
+ &bp,
+ &ohp,
+ rp,
+ &rp_locks);
+
+#ifdef DEBUG
+ hp_start = hp;
+#endif
+
+ req_id = st->req_id_sz == 0 ? st->req_id : copy_struct(st->req_id,
+ st->req_id_sz,
+ &hp,
+ ohp);
+
+ result = st_result_sz == 0 ? st_result : copy_struct(st_result,
+ st_result_sz,
+ &hp,
+ ohp);
+
+ ASSERT(is_immed(st->reply_tag));
+
+ msg = TUPLE3(hp, st->reply_tag, req_id, result);
+
+#ifdef DEBUG
+ hp += 4;
+ ASSERT(hp_start + hsz == hp);
+#endif
+
+ erts_queue_message(rp,
+ &rp_locks,
+ bp,
+ msg,
+ NIL
+#ifdef USE_VM_PROBES
+ , NIL
+#endif
+ );
+
+ if (c_p == rp)
+ rp_locks &= ~ERTS_PROC_LOCK_MAIN;
+
+ if (rp_locks)
+ erts_smp_proc_unlock(rp, rp_locks);
+ }
+
+ erts_cleanup_offheap(&st->off_heap);
+
+ erts_free(ERTS_ALC_T_PROC_SYS_TSK, st);
+
+ return rp ? 1 : 0;
+}
+
+static ERTS_INLINE ErtsProcSysTask *
+fetch_sys_task(Process *c_p, erts_aint32_t state, int *qmaskp)
+{
+ ErtsProcSysTaskQs *unused_qs = NULL;
+ int qbit, qmask;
+ ErtsProcSysTask *st, **qp;
+
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_STATUS);
+
+ if (!c_p->sys_task_qs) {
+ qmask = 0;
+ st = NULL;
+ goto update_state;
+ }
+
+ qmask = c_p->sys_task_qs->qmask;
+
+ if ((state & (ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_EXITING
+ | ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE) {
+ /* No sys tasks if we got exclusively higher prio user work to do */
+ st = NULL;
+ switch (ERTS_PSFLGS_GET_USR_PRIO(state)) {
+ case PRIORITY_MAX:
+ if (!(qmask & MAX_BIT))
+ goto done;
+ break;
+ case PRIORITY_HIGH:
+ if (!(qmask & (MAX_BIT|HIGH_BIT)))
+ goto done;
+ break;
+ default:
+ break;
+ }
+ }
+
+ qbit = qmask & -qmask;
+ switch (qbit) {
+ case MAX_BIT:
+ qp = &c_p->sys_task_qs->q[PRIORITY_MAX];
+ break;
+ case HIGH_BIT:
+ qp = &c_p->sys_task_qs->q[PRIORITY_HIGH];
+ break;
+ case NORMAL_BIT:
+ if (!(qmask & PRIORITY_LOW)
+ || ++c_p->sys_task_qs->ncount <= RESCHEDULE_LOW) {
+ qp = &c_p->sys_task_qs->q[PRIORITY_NORMAL];
+ break;
+ }
+ c_p->sys_task_qs->ncount = 0;
+ /* Fall through */
+ case LOW_BIT:
+ qp = &c_p->sys_task_qs->q[PRIORITY_LOW];
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid qmask");
+ }
+
+ st = *qp;
+ ASSERT(st);
+ if (st->next != st) {
+ *qp = st->next;
+ st->next->prev = st->prev;
+ st->prev->next = st->next;
+ }
+ else {
+ erts_aint32_t a, e, n, st_prio, qmask2;
+
+ *qp = NULL;
+ qmask &= ~qbit;
+ c_p->sys_task_qs->qmask = qmask;
+
+ update_state:
+
+ qmask2 = qmask;
+
+ switch (qmask2 & -qmask2) {
+ case MAX_BIT:
+ st_prio = PRIORITY_MAX;
+ break;
+ case HIGH_BIT:
+ st_prio = PRIORITY_HIGH;
+ break;
+ case NORMAL_BIT:
+ st_prio = PRIORITY_NORMAL;
+ break;
+ case LOW_BIT:
+ st_prio = PRIORITY_LOW;
+ break;
+ case 0:
+ st_prio = PRIORITY_LOW;
+ unused_qs = c_p->sys_task_qs;
+ c_p->sys_task_qs = NULL;
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid qmask");
+ }
+
+ a = state;
+ do {
+ erts_aint32_t prio = ERTS_PSFLGS_GET_USR_PRIO(a);
+
+ if (prio > st_prio)
+ prio = st_prio;
+
+ n = e = a;
+
+ n &= ~ERTS_PSFLGS_ACT_PRIO_MASK;
+ n |= (prio << ERTS_PSFLGS_ACT_PRIO_OFFSET);
+
+ if (!qmask)
+ n &= ~ERTS_PSFLG_ACTIVE_SYS;
+
+ if (a == n)
+ break;
+ a = erts_smp_atomic32_cmpxchg_nob(&c_p->state, n, e);
+ } while (a != e);
+ }
+
+done:
+
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_STATUS);
+
+ if (unused_qs)
+ proc_sys_task_queues_free(unused_qs);
+
+ *qmaskp = qmask;
+
+ return st;
+}
+
+static int
+execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
+{
+ int garbage_collected = 0;
+ erts_aint32_t state = *statep;
+ int max_reds = in_reds;
+ int reds = 0;
+ int qmask = 0;
+
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
+
+ do {
+ ErtsProcSysTask *st;
+ Eterm st_res;
+
+ if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT)) {
+#ifdef ERTS_SMP
+ if (state & ERTS_PSFLG_PENDING_EXIT)
+ erts_handle_pending_exit(c_p, ERTS_PROC_LOCK_MAIN);
+#endif
+ ASSERT(ERTS_PROC_IS_EXITING(c_p));
+ break;
+ }
+
+ st = fetch_sys_task(c_p, state, &qmask);
+ if (!st)
+ break;
+
+ switch (st->type) {
+ case ERTS_PSTT_GC:
+ if (!garbage_collected) {
+ FLAGS(c_p) |= F_NEED_FULLSWEEP;
+ reds += erts_garbage_collect(c_p, 0, c_p->arg_reg, c_p->arity);
+ garbage_collected = 1;
+ }
+ st_res = am_true;
+ break;
+ case ERTS_PSTT_CPC:
+ st_res = erts_check_process_code(c_p,
+ st->arg[0],
+ st->arg[1] == am_true,
+ &reds);
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid process sys task type");
+ st_res = am_false;
+ }
+
+ if (st)
+ reds += notify_sys_task_executed(c_p, st, st_res);
+
+ state = erts_smp_atomic32_read_acqb(&c_p->state);
+ } while (qmask && reds < max_reds);
+
+ *statep = state;
+
+ return reds;
+}
+
+static int
+cleanup_sys_tasks(Process *c_p, erts_aint32_t in_state, int in_reds)
+{
+ erts_aint32_t state = in_state;
+ int max_reds = in_reds;
+ int reds = 0;
+ int qmask = 0;
+
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
+
+ do {
+ ErtsProcSysTask *st;
+ Eterm st_res;
+
+ st = fetch_sys_task(c_p, state, &qmask);
+ if (!st)
+ break;
+
+ switch (st->type) {
+ case ERTS_PSTT_GC:
+ st_res = am_false;
+ break;
+ case ERTS_PSTT_CPC:
+ st_res = am_false;
+ break;
+ default:
+ ERTS_INTERNAL_ERROR("Invalid process sys task type");
+ st_res = am_false;
+ break;
+ }
+
+ reds += notify_sys_task_executed(c_p, st, st_res);
+
+ state = erts_smp_atomic32_read_acqb(&c_p->state);
+ } while (qmask && reds < max_reds);
+
+ return reds;
+}
+
+BIF_RETTYPE
+erts_internal_request_system_task_3(BIF_ALIST_3)
+{
+ Process *rp = erts_proc_lookup(BIF_ARG_1);
+ ErtsProcSysTaskQs *stqs, *free_stqs = NULL;
+ ErtsProcSysTask *st = NULL;
+ erts_aint32_t prio, rp_state;
+ int rp_locked;
+ Eterm noproc_res, req_type;
+
+ if (!rp && !is_internal_pid(BIF_ARG_1)) {
+ if (!is_external_pid(BIF_ARG_1))
+ goto badarg;
+ if (external_pid_dist_entry(BIF_ARG_1) != erts_this_dist_entry)
+ goto badarg;
+ }
+
+ switch (BIF_ARG_2) {
+ case am_max: prio = PRIORITY_MAX; break;
+ case am_high: prio = PRIORITY_HIGH; break;
+ case am_normal: prio = PRIORITY_NORMAL; break;
+ case am_low: prio = PRIORITY_LOW; break;
+ default: goto badarg;
+ }
+
+ if (is_not_tuple(BIF_ARG_3))
+ goto badarg;
+ else {
+ int i;
+ Eterm *tp = tuple_val(BIF_ARG_3);
+ Uint arity = arityval(*tp);
+ Eterm req_id;
+ Uint req_id_sz;
+ Eterm arg[ERTS_MAX_PROC_SYS_TASK_ARGS];
+ Uint arg_sz[ERTS_MAX_PROC_SYS_TASK_ARGS];
+ Uint tot_sz;
+ Eterm *hp;
+
+ if (arity < 2)
+ goto badarg;
+ if (arity > 2 + ERTS_MAX_PROC_SYS_TASK_ARGS)
+ goto badarg;
+ req_type = tp[1];
+ req_id = tp[2];
+ req_id_sz = is_immed(req_id) ? req_id : size_object(req_id);
+ tot_sz = req_id_sz;
+ for (i = 0; i < ERTS_MAX_PROC_SYS_TASK_ARGS; i++) {
+ int tix = 3 + i;
+ if (tix > arity) {
+ arg[i] = THE_NON_VALUE;
+ arg_sz[i] = 0;
+ }
+ else {
+ arg[i] = tp[tix];
+ if (is_immed(arg[i]))
+ arg_sz[i] = 0;
+ else {
+ arg_sz[i] = size_object(arg[i]);
+ tot_sz += arg_sz[i];
+ }
+ }
+ }
+ st = erts_alloc(ERTS_ALC_T_PROC_SYS_TSK,
+ ERTS_PROC_SYS_TASK_SIZE(tot_sz));
+ st->next = st->prev = st; /* Prep for empty prio queue */
+ ERTS_INIT_OFF_HEAP(&st->off_heap);
+ hp = &st->heap[0];
+
+ st->requester = BIF_P->common.id;
+ st->reply_tag = req_type;
+ st->req_id_sz = req_id_sz;
+ st->req_id = req_id_sz == 0 ? req_id : copy_struct(req_id,
+ req_id_sz,
+ &hp,
+ &st->off_heap);
+
+ for (i = 0; i < ERTS_MAX_PROC_SYS_TASK_ARGS; i++)
+ st->arg[i] = arg_sz[i] == 0 ? arg[i] : copy_struct(arg[i],
+ arg_sz[i],
+ &hp,
+ &st->off_heap);
+ ASSERT(&st->heap[0] + tot_sz == hp);
+ }
+
+ switch (req_type) {
+
+ case am_garbage_collect:
+ st->type = ERTS_PSTT_GC;
+ noproc_res = am_false;
+ if (!rp)
+ goto noproc;
+ break;
+
+ case am_check_process_code:
+ if (is_not_atom(st->arg[0]))
+ goto badarg;
+ if (st->arg[1] != am_true && st->arg[1] != am_false)
+ goto badarg;
+ noproc_res = am_false;
+ st->type = ERTS_PSTT_CPC;
+ if (!rp)
+ goto noproc;
+ break;
+
+ default:
+ goto badarg;
+ }
+
+ rp_state = erts_smp_atomic32_read_nob(&rp->state);
+
+ rp_locked = 0;
+
+ free_stqs = NULL;
+ if (rp_state & ERTS_PSFLG_ACTIVE_SYS)
+ stqs = NULL;
+ else {
+ alloc_qs:
+ stqs = proc_sys_task_queues_alloc();
+ stqs->qmask = 1 << prio;
+ stqs->ncount = 0;
+ stqs->q[PRIORITY_MAX] = NULL;
+ stqs->q[PRIORITY_HIGH] = NULL;
+ stqs->q[PRIORITY_NORMAL] = NULL;
+ stqs->q[PRIORITY_LOW] = NULL;
+ stqs->q[prio] = st;
+ }
+
+ if (!rp_locked) {
+ rp_locked = 1;
+ erts_smp_proc_lock(rp, ERTS_PROC_LOCK_STATUS);
+
+ rp_state = erts_smp_atomic32_read_nob(&rp->state);
+ if (rp_state & ERTS_PSFLG_EXITING) {
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
+ rp = NULL;
+ free_stqs = stqs;
+ goto noproc;
+ }
+ }
+
+ if (!rp->sys_task_qs) {
+ if (stqs)
+ rp->sys_task_qs = stqs;
+ else
+ goto alloc_qs;
+ }
+ else {
+ if (stqs)
+ free_stqs = stqs;
+ stqs = rp->sys_task_qs;
+ if (!stqs->q[prio]) {
+ stqs->q[prio] = st;
+ stqs->qmask |= 1 << prio;
+ }
+ else {
+ st->next = stqs->q[prio];
+ st->prev = stqs->q[prio]->prev;
+ st->next->prev = st;
+ st->prev->next = st;
+ ASSERT(stqs->qmask & (1 << prio));
+ }
+ }
+
+ if (ERTS_PSFLGS_GET_ACT_PRIO(rp_state) > prio) {
+ erts_aint32_t n, a, e;
+ /* Need to elevate actual prio */
+
+ a = rp_state;
+ do {
+ if (ERTS_PSFLGS_GET_ACT_PRIO(a) <= prio) {
+ n = a;
+ break;
+ }
+ n = e = a;
+ n &= ~ERTS_PSFLGS_ACT_PRIO_MASK;
+ n |= (prio << ERTS_PSFLGS_ACT_PRIO_OFFSET);
+ a = erts_smp_atomic32_cmpxchg_nob(&rp->state, n, e);
+ } while (a != e);
+ rp_state = n;
+ }
+
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
+
+ schedule_process_sys_task(rp, rp_state, NULL);
+
+ if (free_stqs)
+ proc_sys_task_queues_free(free_stqs);
+
+ BIF_RET(am_ok);
+
+noproc:
+
+ notify_sys_task_executed(BIF_P, st, noproc_res);
+ if (free_stqs)
+ proc_sys_task_queues_free(free_stqs);
+ BIF_RET(am_ok);
+
+badarg:
+
+ if (st) {
+ erts_cleanup_offheap(&st->off_heap);
+ erts_free(ERTS_ALC_T_PROC_SYS_TSK, st);
+ }
+ if (free_stqs)
+ proc_sys_task_queues_free(free_stqs);
+ BIF_ERROR(BIF_P, BADARG);
+}
+
void
erts_sched_stat_modify(int what)
{
@@ -7520,7 +8463,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
prio = (erts_aint32_t) so->priority;
}
- state |= (prio & ERTS_PSFLG_PRIO_MASK);
+ state |= (((prio & ERTS_PSFLGS_PRIO_MASK) << ERTS_PSFLGS_ACT_PRIO_OFFSET)
+ | ((prio & ERTS_PSFLGS_PRIO_MASK) << ERTS_PSFLGS_USR_PRIO_OFFSET));
if (!rq)
rq = erts_get_runq_proc(parent);
@@ -7599,6 +8543,8 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->bin_old_vheap = 0;
p->bin_vheap_mature = 0;
+ p->sys_task_qs = NULL;
+
/* No need to initialize p->fcalls. */
p->current = p->initial+INITIAL_MOD;
@@ -7764,7 +8710,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
* Schedule process for execution.
*/
- schedule_process(p, state, 0);
+ schedule_process(p, state);
VERBOSE(DEBUG_PROCESSES, ("Created a new process: %T\n",p->common.id));
@@ -7815,6 +8761,7 @@ void erts_init_empty_process(Process *p)
p->bin_vheap_sz = BIN_VH_MIN_SIZE;
p->bin_old_vheap_sz = BIN_VH_MIN_SIZE;
p->bin_old_vheap = 0;
+ p->sys_task_qs = NULL;
p->bin_vheap_mature = 0;
#ifdef ERTS_SMP
p->common.u.alive.ptimer = NULL;
@@ -8070,33 +9017,21 @@ delete_process(Process* p)
p->fvalue = NIL;
}
-static ERTS_INLINE erts_aint32_t
-set_proc_exiting_state(Process *p, erts_aint32_t state)
-{
- erts_aint32_t a, n, e;
- a = state;
- while (1) {
- n = e = a;
- n &= ~(ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT);
- n |= ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE;
- if (!(a & (ERTS_PSFLG_IN_RUNQ|ERTS_PSFLG_RUNNING)))
- n |= ERTS_PSFLG_IN_RUNQ;
- a = erts_smp_atomic32_cmpxchg_relb(&p->state, n, e);
- if (a == e)
- break;
- }
- return a;
-}
-
static ERTS_INLINE void
set_proc_exiting(Process *p,
- erts_aint32_t state,
+ erts_aint32_t in_state,
Eterm reason,
ErlHeapFragment *bp)
{
+ erts_aint32_t state = in_state, enq_prio = -1;
+ int enqueue;
ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(p) == ERTS_PROC_LOCKS_ALL);
- state = set_proc_exiting_state(p, state);
+ enqueue = change_proc_schedule_state(p,
+ ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT,
+ ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE,
+ &state,
+ &enq_prio);
p->fvalue = reason;
if (bp)
@@ -8111,15 +9046,37 @@ set_proc_exiting(Process *p,
cancel_timer(p);
p->i = (BeamInstr *) beam_exit;
- if (erts_system_profile_flags.runnable_procs
- && !(state & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED))) {
- profile_runnable_proc(p, am_active);
- }
-
- if (!(state & (ERTS_PSFLG_IN_RUNQ|ERTS_PSFLG_RUNNING)))
- add2runq(p, state);
+ if (enqueue)
+ add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio),
+ state,
+ enq_prio);
}
+static ERTS_INLINE erts_aint32_t
+set_proc_self_exiting(Process *c_p)
+{
+#ifdef DEBUG
+ int enqueue;
+#endif
+ erts_aint32_t state, enq_prio = -1;
+
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCKS_ALL);
+
+ state = erts_smp_atomic32_read_nob(&c_p->state);
+ ASSERT(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS));
+
+#ifdef DEBUG
+ enqueue =
+#endif
+ change_proc_schedule_state(c_p,
+ ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT,
+ ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE,
+ &state,
+ &enq_prio);
+
+ ASSERT(!enqueue);
+ return state;
+}
#ifdef ERTS_SMP
@@ -8167,7 +9124,7 @@ handle_pending_exiters(ErtsProcList *pnd_xtrs)
if (p) {
if (erts_proclist_same(plp, p)) {
erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state);
- if (!(state & ERTS_PSFLG_RUNNING)) {
+ if (!(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))) {
ASSERT(state & ERTS_PSFLG_PENDING_EXIT);
erts_handle_pending_exit(p, ERTS_PROC_LOCKS_ALL);
}
@@ -8388,7 +9345,7 @@ send_exit_signal(Process *c_p, /* current process if and only
}
set_proc_exiting(c_p, state, rsn, NULL);
}
- else if (!(state & ERTS_PSFLG_RUNNING)) {
+ else if (!(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))) {
/* Process not running ... */
ErtsProcLocks need_locks = ~(*rp_locks) & ERTS_PROC_LOCKS_ALL;
if (need_locks
@@ -8765,9 +9722,6 @@ resume_suspend_monitor(ErtsSuspendMonitor *smon, void *vc_p)
void
erts_do_exit_process(Process* p, Eterm reason)
{
-#ifdef ERTS_SMP
- erts_aint32_t state;
-#endif
p->arity = 0; /* No live registers */
p->fvalue = reason;
@@ -8792,10 +9746,9 @@ erts_do_exit_process(Process* p, Eterm reason)
#endif
#ifndef ERTS_SMP
- set_proc_exiting_state(p, erts_smp_atomic32_read_nob(&p->state));
+ set_proc_self_exiting(p);
#else
- state = set_proc_exiting_state(p, erts_smp_atomic32_read_nob(&p->state));
- if (state & ERTS_PSFLG_PENDING_EXIT) {
+ if (ERTS_PSFLG_PENDING_EXIT & set_proc_self_exiting(p)) {
/* Process exited before pending exit was received... */
p->pending_exit.reason = THE_NON_VALUE;
if (p->pending_exit.bp) {
@@ -8849,6 +9802,7 @@ erts_continue_exit_process(Process *p)
DistEntry *dep;
struct saved_calls *scb;
process_breakpoint_time_t *pbt;
+ erts_aint32_t state;
#ifdef DEBUG
int yield_allowed = 1;
@@ -8885,6 +9839,12 @@ erts_continue_exit_process(Process *p)
p->flags &= ~F_USING_DB;
}
+ state = erts_smp_atomic32_read_acqb(&p->state);
+ if (state & ERTS_PSFLG_ACTIVE_SYS) {
+ if (cleanup_sys_tasks(p, state, CONTEXT_REDS) >= CONTEXT_REDS/2)
+ goto yield;
+ }
+
if (p->flags & F_USING_DDLL) {
erts_ddll_proc_dead(p, ERTS_PROC_LOCK_MAIN);
p->flags &= ~F_USING_DDLL;
@@ -8962,17 +9922,31 @@ erts_continue_exit_process(Process *p)
{
/* Inactivate and notify free */
erts_aint32_t n, e, a = erts_smp_atomic32_read_nob(&p->state);
+#ifdef ERTS_SMP
+ int refc_inced = 0;
+#endif
while (1) {
n = e = a;
ASSERT(a & ERTS_PSFLG_EXITING);
n |= ERTS_PSFLG_FREE;
n &= ~ERTS_PSFLG_ACTIVE;
+#ifdef ERTS_SMP
+ if ((n & ERTS_PSFLG_IN_RUNQ) && !refc_inced) {
+ erts_smp_proc_inc_refc(p);
+ refc_inced = 1;
+ }
+#endif
a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
if (a == e)
break;
}
- }
+#ifdef ERTS_SMP
+ if (refc_inced && !(n & ERTS_PSFLG_IN_RUNQ))
+ erts_smp_proc_dec_refc(p);
+#endif
+ }
+
dep = ((p->flags & F_DISTRIBUTION)
? ERTS_PROC_SET_DIST_ENTRY(p, ERTS_PROC_LOCKS_ALL, NULL)
: NULL);
@@ -9075,7 +10049,7 @@ timeout_proc(Process* p)
state = erts_smp_atomic32_read_acqb(&p->state);
if (!(state & ERTS_PSFLG_ACTIVE))
- schedule_process(p, state, 0);
+ schedule_process(p, state);
}
@@ -9153,7 +10127,9 @@ erts_program_counter_info(int to, void *to_arg, Process *p)
print_function_from_pc(to, to_arg, p->cp);
erts_print(to, to_arg, ")\n");
state = erts_smp_atomic32_read_acqb(&p->state);
- if (!(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_GC))) {
+ if (!(state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_GC))) {
erts_print(to, to_arg, "arity = %d\n",p->arity);
if (!ERTS_IS_CRASH_DUMPING) {
/*
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 8e5467f196..814e3d34df 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -688,6 +688,9 @@ typedef struct {
ErlHeapFragment *bp;
} ErtsPendExit;
+typedef struct ErtsProcSysTask_ ErtsProcSysTask;
+typedef struct ErtsProcSysTaskQs_ ErtsProcSysTaskQs;
+
#ifdef ERTS_SMP
typedef struct ErtsPendingSuspend_ ErtsPendingSuspend;
@@ -855,6 +858,7 @@ struct process {
Uint64 bin_old_vheap_sz; /* Virtual old heap block size for binaries */
Uint64 bin_old_vheap; /* Virtual old heap size for binaries */
+ ErtsProcSysTaskQs *sys_task_qs;
erts_smp_atomic32_t state; /* Process state flags (see ERTS_PSFLG_*) */
#ifdef ERTS_SMP
@@ -924,24 +928,66 @@ void erts_check_for_holes(Process* p);
# error "Need to increase ERTS_PSFLG_PRIO_SHIFT"
#endif
-#define ERTS_PSFLG_PRIO_SHIFT 2
+#define ERTS_PSFLGS_PRIO_BITS 2
+#define ERTS_PSFLGS_PRIO_MASK \
+ ((((erts_aint32_t) 1) << ERTS_PSFLGS_PRIO_BITS) - 1)
-#define ERTS_PSFLG_BIT(N) \
- (((erts_aint32_t) 1) << (ERTS_PSFLG_PRIO_SHIFT + (N)))
+#define ERTS_PSFLGS_ACT_PRIO_OFFSET (0*ERTS_PSFLGS_PRIO_BITS)
+#define ERTS_PSFLGS_USR_PRIO_OFFSET (1*ERTS_PSFLGS_PRIO_BITS)
+#define ERTS_PSFLGS_PRQ_PRIO_OFFSET (2*ERTS_PSFLGS_PRIO_BITS)
+#define ERTS_PSFLGS_ZERO_BIT_OFFSET (3*ERTS_PSFLGS_PRIO_BITS)
-#define ERTS_PSFLG_PRIO_MASK (ERTS_PSFLG_BIT(0) - 1)
+#define ERTS_PSFLGS_QMASK_BITS 4
+#define ERTS_PSFLGS_QMASK \
+ ((((erts_aint32_t) 1) << ERTS_PSFLGS_QMASK_BITS) - 1)
+#define ERTS_PSFLGS_IN_PRQ_MASK_OFFSET \
+ ERTS_PSFLGS_ZERO_BIT_OFFSET
-#define ERTS_PSFLG_FREE ERTS_PSFLG_BIT(0)
-#define ERTS_PSFLG_EXITING ERTS_PSFLG_BIT(1)
-#define ERTS_PSFLG_PENDING_EXIT ERTS_PSFLG_BIT(2)
-#define ERTS_PSFLG_ACTIVE ERTS_PSFLG_BIT(3)
-#define ERTS_PSFLG_IN_RUNQ ERTS_PSFLG_BIT(4)
-#define ERTS_PSFLG_RUNNING ERTS_PSFLG_BIT(5)
-#define ERTS_PSFLG_SUSPENDED ERTS_PSFLG_BIT(6)
-#define ERTS_PSFLG_GC ERTS_PSFLG_BIT(7)
-#define ERTS_PSFLG_BOUND ERTS_PSFLG_BIT(8)
-#define ERTS_PSFLG_TRAP_EXIT ERTS_PSFLG_BIT(9)
+#define ERTS_PSFLG_BIT(N) \
+ (((erts_aint32_t) 1) << (ERTS_PSFLGS_ZERO_BIT_OFFSET + (N)))
+/*
+ * ACT_PRIO -> Active prio, i.e., currently active prio. This
+ * prio may be higher than user prio.
+ * USR_PRIO -> User prio. i.e., prio the user has set.
+ * PRQ_PRIO -> Prio queue prio, i.e., prio queue currently
+ * enqueued in.
+ */
+#define ERTS_PSFLGS_ACT_PRIO_MASK \
+ (ERTS_PSFLGS_PRIO_MASK << ERTS_PSFLGS_ACT_PRIO_OFFSET)
+#define ERTS_PSFLGS_USR_PRIO_MASK \
+ (ERTS_PSFLGS_PRIO_MASK << ERTS_PSFLGS_USR_PRIO_OFFSET)
+#define ERTS_PSFLGS_PRQ_PRIO_MASK \
+ (ERTS_PSFLGS_PRIO_MASK << ERTS_PSFLGS_PRQ_PRIO_OFFSET)
+#define ERTS_PSFLG_IN_PRQ_MAX ERTS_PSFLG_BIT(0)
+#define ERTS_PSFLG_IN_PRQ_HIGH ERTS_PSFLG_BIT(1)
+#define ERTS_PSFLG_IN_PRQ_NORMAL ERTS_PSFLG_BIT(2)
+#define ERTS_PSFLG_IN_PRQ_LOW ERTS_PSFLG_BIT(3)
+#define ERTS_PSFLG_FREE ERTS_PSFLG_BIT(4)
+#define ERTS_PSFLG_EXITING ERTS_PSFLG_BIT(5)
+#define ERTS_PSFLG_PENDING_EXIT ERTS_PSFLG_BIT(6)
+#define ERTS_PSFLG_ACTIVE ERTS_PSFLG_BIT(7)
+#define ERTS_PSFLG_IN_RUNQ ERTS_PSFLG_BIT(8)
+#define ERTS_PSFLG_RUNNING ERTS_PSFLG_BIT(9)
+#define ERTS_PSFLG_SUSPENDED ERTS_PSFLG_BIT(10)
+#define ERTS_PSFLG_GC ERTS_PSFLG_BIT(11)
+#define ERTS_PSFLG_BOUND ERTS_PSFLG_BIT(12)
+#define ERTS_PSFLG_TRAP_EXIT ERTS_PSFLG_BIT(13)
+#define ERTS_PSFLG_ACTIVE_SYS ERTS_PSFLG_BIT(14)
+#define ERTS_PSFLG_RUNNING_SYS ERTS_PSFLG_BIT(15)
+#define ERTS_PSFLG_PROXY ERTS_PSFLG_BIT(16)
+
+#define ERTS_PSFLGS_IN_PRQ_MASK (ERTS_PSFLG_IN_PRQ_MAX \
+ | ERTS_PSFLG_IN_PRQ_HIGH \
+ | ERTS_PSFLG_IN_PRQ_NORMAL \
+ | ERTS_PSFLG_IN_PRQ_LOW)
+
+#define ERTS_PSFLGS_GET_ACT_PRIO(PSFLGS) \
+ (((PSFLGS) >> ERTS_PSFLGS_ACT_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK)
+#define ERTS_PSFLGS_GET_USR_PRIO(PSFLGS) \
+ (((PSFLGS) >> ERTS_PSFLGS_USR_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK)
+#define ERTS_PSFLGS_GET_PRQ_PRIO(PSFLGS) \
+ (((PSFLGS) >> ERTS_PSFLGS_USR_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK)
/* The sequential tracing token is a tuple of size 5:
*
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index bacd5a5752..885f092aca 100755
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -655,6 +655,10 @@ Eterm erl_send(Process *p, Eterm to, Eterm msg);
Eterm erl_is_function(Process* p, Eterm arg1, Eterm arg2);
+/* beam_bif_load.c */
+Eterm erts_check_process_code(Process *c_p, Eterm module, int allow_gc, int *redsp);
+
+
/* beam_load.c */
typedef struct {
BeamInstr* current; /* Pointer to: Mod, Name, Arity */
diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab
index 1e5ae46bfa..c29f3f9b1b 100644
--- a/erts/emulator/beam/ops.tab
+++ b/erts/emulator/beam/ops.tab
@@ -763,17 +763,17 @@ allocate_init t I y
#################################################################
#
-# The BIFs erlang:check_process_code/2 must be called like a function,
+# The BIFs erts_internal:check_process_code/2 must be called like a function,
# to ensure that c_p->i (program counter) is set correctly (an ordinary
# BIF call doesn't set it).
#
-call_ext u==2 Bif=u$bif:erlang:check_process_code/2 => i_call_ext Bif
-call_ext_last u==2 Bif=u$bif:erlang:check_process_code/2 D => i_call_ext_last Bif D
-call_ext_only u==2 Bif=u$bif:erlang:check_process_code/2 => i_call_ext_only Bif
+call_ext u==2 Bif=u$bif:erts_internal:check_process_code/2 => i_call_ext Bif
+call_ext_last u==2 Bif=u$bif:erts_internal:check_process_code/2 D => i_call_ext_last Bif D
+call_ext_only u==2 Bif=u$bif:erts_internal:check_process_code/2 => i_call_ext_only Bif
#
-# The BIFs erlang:garbage_collect/0,1 must be called like functions,
+# The BIFs erlang:garbage_collect/0 must be called like a function,
# to allow them to invoke the garbage collector. (The stack pointer must
# be saved and p->arity must be zeroed, which is not done on ordinary BIF calls.)
#
@@ -782,10 +782,6 @@ call_ext u==0 Bif=u$bif:erlang:garbage_collect/0 => i_call_ext Bif
call_ext_last u==0 Bif=u$bif:erlang:garbage_collect/0 D => i_call_ext_last Bif D
call_ext_only u==0 Bif=u$bif:erlang:garbage_collect/0 => i_call_ext_only Bif
-call_ext u==1 Bif=u$bif:erlang:garbage_collect/1 => i_call_ext Bif
-call_ext_last u==1 Bif=u$bif:erlang:garbage_collect/1 D => i_call_ext_last Bif D
-call_ext_only u==1 Bif=u$bif:erlang:garbage_collect/1 => i_call_ext_only Bif
-
#
# put/2 and erase/1 must be able to do garbage collection, so we must call
# them like functions.
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index bd2be7afca..a5b2cc589c 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -1675,7 +1675,7 @@ static int do_send_to_logger(Eterm tag, Eterm gleader, char *buf, int len)
p = erts_whereis_process(NULL, 0, am_error_logger, 0, 0);
if (p) {
erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state);
- if (state & ERTS_PSFLG_RUNNING)
+ if (state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))
p = NULL;
}
}
diff --git a/erts/emulator/hipe/hipe_bif_list.m4 b/erts/emulator/hipe/hipe_bif_list.m4
index 764b8d180c..b1fedf4838 100644
--- a/erts/emulator/hipe/hipe_bif_list.m4
+++ b/erts/emulator/hipe/hipe_bif_list.m4
@@ -151,10 +151,9 @@ standard_bif_interface_0(nbif_ports_0, ports_0)
* BIFs and primops that may do a GC (change heap limit and walk the native stack).
* XXX: erase/1 and put/2 cannot fail
*/
-gc_bif_interface_2(nbif_check_process_code_2, hipe_check_process_code_2)
+gc_bif_interface_2(nbif_erts_internal_check_process_code_2, hipe_erts_internal_check_process_code_2)
gc_bif_interface_1(nbif_erase_1, erase_1)
gc_bif_interface_0(nbif_garbage_collect_0, garbage_collect_0)
-gc_bif_interface_1(nbif_garbage_collect_1, hipe_garbage_collect_1)
gc_nofail_primop_interface_1(nbif_gc_1, hipe_gc)
gc_bif_interface_2(nbif_put_2, put_2)
diff --git a/erts/emulator/hipe/hipe_native_bif.c b/erts/emulator/hipe/hipe_native_bif.c
index 1f76268934..7d343dd91e 100644
--- a/erts/emulator/hipe/hipe_native_bif.c
+++ b/erts/emulator/hipe/hipe_native_bif.c
@@ -41,8 +41,7 @@
*/
/* for -Wmissing-prototypes :-( */
-extern Eterm hipe_check_process_code_2(BIF_ALIST_2);
-extern Eterm hipe_garbage_collect_1(BIF_ALIST_1);
+extern Eterm hipe_erts_internal_check_process_code_2(BIF_ALIST_2);
extern Eterm hipe_show_nstack_1(BIF_ALIST_1);
/* Used when a BIF can trigger a stack walk. */
@@ -51,22 +50,12 @@ static __inline__ void hipe_set_narity(Process *p, unsigned int arity)
p->hipe.narity = arity;
}
-Eterm hipe_check_process_code_2(BIF_ALIST_2)
+Eterm hipe_erts_internal_check_process_code_2(BIF_ALIST_2)
{
Eterm ret;
hipe_set_narity(BIF_P, 2);
- ret = check_process_code_2(BIF_P, BIF__ARGS);
- hipe_set_narity(BIF_P, 0);
- return ret;
-}
-
-Eterm hipe_garbage_collect_1(BIF_ALIST_1)
-{
- Eterm ret;
-
- hipe_set_narity(BIF_P, 1);
- ret = garbage_collect_1(BIF_P, BIF__ARGS);
+ ret = erts_internal_check_process_code_2(BIF_P, BIF__ARGS);
hipe_set_narity(BIF_P, 0);
return ret;
}
diff --git a/erts/emulator/test/process_SUITE.erl b/erts/emulator/test/process_SUITE.erl
index e3aae17df4..e66c6f09b6 100644
--- a/erts/emulator/test/process_SUITE.erl
+++ b/erts/emulator/test/process_SUITE.erl
@@ -51,7 +51,11 @@
processes_term_proc_list/1,
otp_7738_waiting/1, otp_7738_suspended/1,
otp_7738_resume/1,
- garb_other_running/1]).
+ garb_other_running/1,
+ no_priority_inversion/1,
+ no_priority_inversion2/1,
+ system_task_blast/1,
+ system_task_on_suspended/1]).
-export([prio_server/2, prio_client/2]).
-export([init_per_testcase/2, end_per_testcase/2]).
@@ -73,7 +77,8 @@ all() ->
bad_register, garbage_collect, process_info_messages,
process_flag_badarg, process_flag_heap_size,
spawn_opt_heap_size, otp_6237, {group, processes_bif},
- {group, otp_7738}, garb_other_running].
+ {group, otp_7738}, garb_other_running,
+ {group, system_task}].
groups() ->
[{t_exit_2, [],
@@ -87,7 +92,10 @@ groups() ->
processes_gc_trap, processes_term_proc_list]},
{otp_7738, [],
[otp_7738_waiting, otp_7738_suspended,
- otp_7738_resume]}].
+ otp_7738_resume]},
+ {system_task, [],
+ [no_priority_inversion, no_priority_inversion2,
+ system_task_blast, system_task_on_suspended]}].
init_per_suite(Config) ->
A0 = case application:start(sasl) of
@@ -2214,6 +2222,154 @@ garb_other_running(Config) when is_list(Config) ->
receive {'DOWN', Mon, process, Pid, normal} -> ok end,
ok.
+no_priority_inversion(Config) when is_list(Config) ->
+ Prio = process_flag(priority, max),
+ HTLs = lists:map(fun (_) ->
+ spawn_opt(fun () ->
+ tok_loop()
+ end,
+ [{priority, high}, monitor, link])
+ end,
+ lists:seq(1, 2*erlang:system_info(schedulers))),
+ receive after 500 -> ok end,
+ LTL = spawn_opt(fun () ->
+ tok_loop()
+ end,
+ [{priority, low}, monitor, link]),
+ false = erlang:check_process_code(element(1, LTL), nonexisting_module),
+ true = erlang:garbage_collect(element(1, LTL)),
+ lists:foreach(fun ({P, _}) ->
+ unlink(P),
+ exit(P, kill)
+ end, [LTL | HTLs]),
+ lists:foreach(fun ({P, M}) ->
+ receive
+ {'DOWN', M, process, P, killed} ->
+ ok
+ end
+ end, [LTL | HTLs]),
+ process_flag(priority, Prio),
+ ok.
+
+no_priority_inversion2(Config) when is_list(Config) ->
+ Prio = process_flag(priority, max),
+ MTLs = lists:map(fun (_) ->
+ spawn_opt(fun () ->
+ tok_loop()
+ end,
+ [{priority, max}, monitor, link])
+ end,
+ lists:seq(1, 2*erlang:system_info(schedulers))),
+ receive after 500 -> ok end,
+ {PL, ML} = spawn_opt(fun () ->
+ tok_loop()
+ end,
+ [{priority, low}, monitor, link]),
+ RL = request_gc(PL, low),
+ RN = request_gc(PL, normal),
+ RH = request_gc(PL, high),
+ receive
+ {garbage_collect, _, _} ->
+ ?t:fail(unexpected_gc)
+ after 1000 ->
+ ok
+ end,
+ RM = request_gc(PL, max),
+ receive
+ {garbage_collect, RM, true} ->
+ ok
+ end,
+ lists:foreach(fun ({P, _}) ->
+ unlink(P),
+ exit(P, kill)
+ end, MTLs),
+ lists:foreach(fun ({P, M}) ->
+ receive
+ {'DOWN', M, process, P, killed} ->
+ ok
+ end
+ end, MTLs),
+ receive
+ {garbage_collect, RH, true} ->
+ ok
+ end,
+ receive
+ {garbage_collect, RN, true} ->
+ ok
+ end,
+ receive
+ {garbage_collect, RL, true} ->
+ ok
+ end,
+ unlink(PL),
+ exit(PL, kill),
+ receive
+ {'DOWN', ML, process, PL, killed} ->
+ ok
+ end,
+ process_flag(priority, Prio),
+ ok.
+
+request_gc(Pid, Prio) ->
+ Ref = make_ref(),
+ erts_internal:request_system_task(Pid, Prio, {garbage_collect, Ref}),
+ Ref.
+
+system_task_blast(Config) when is_list(Config) ->
+ Me = self(),
+ GCReq = fun () ->
+ RL = gc_req(Me, 100),
+ lists:foreach(fun (R) ->
+ receive
+ {garbage_collect, R, true} ->
+ ok
+ end
+ end, RL),
+ exit(it_worked)
+ end,
+ HTLs = lists:map(fun (_) -> spawn_monitor(GCReq) end, lists:seq(1, 1000)),
+ lists:foreach(fun ({P, M}) ->
+ receive
+ {'DOWN', M, process, P, it_worked} ->
+ ok
+ end
+ end, HTLs),
+ ok.
+
+gc_req(_Pid, 0) ->
+ [];
+gc_req(Pid, N) ->
+ R0 = request_gc(Pid, low),
+ R1 = request_gc(Pid, normal),
+ R2 = request_gc(Pid, high),
+ R3 = request_gc(Pid, max),
+ [R0, R1, R2, R3 | gc_req(Pid, N-1)].
+
+system_task_on_suspended(Config) when is_list(Config) ->
+ {P, M} = spawn_monitor(fun () ->
+ tok_loop()
+ end),
+ true = erlang:suspend_process(P),
+ {status, suspended} = process_info(P, status),
+ true = erlang:garbage_collect(P),
+ {status, suspended} = process_info(P, status),
+ true = erlang:resume_process(P),
+ false = ({status, suspended} == process_info(P, status)),
+ exit(P, kill),
+ receive
+ {'DOWN', M, process, P, killed} ->
+ ok
+ end.
+
+gc_req(_Pid, 0) ->
+ [];
+gc_req(Pid, N) ->
+ R0 = erts_internal:request_system_task(Pid, low, garbage_collect),
+ R1 = erts_internal:request_system_task(Pid, normal, garbage_collect),
+ R2 = erts_internal:request_system_task(Pid, high, garbage_collect),
+ R3 = erts_internal:request_system_task(Pid, max, garbage_collect),
+ [R0, R1, R2, R3 | gc_req(Pid, N-1)].
+
%% Internal functions
wait_until(Fun) ->
diff --git a/erts/etc/unix/etp-commands.in b/erts/etc/unix/etp-commands.in
index 54ff7b3e3a..6e3466b8c0 100644
--- a/erts/etc/unix/etp-commands.in
+++ b/erts/etc/unix/etp-commands.in
@@ -1316,49 +1316,99 @@ end
define etp-proc-state-int
# Args: int
#
- if ($arg0 & 0xfffff000)
+ if ($arg0 & 0xff800000)
printf "GARBAGE | "
end
- if ($arg0 & 0x800)
+ if ($arg0 & 0x400000)
+ printf "proxy | "
+ set $proxy_process = 1
+ else
+ set $proxy_process = 0
+ end
+ if ($arg0 & 0x200000)
+ printf "running-sys | "
+ end
+ if ($arg0 & 0x100000)
+ printf "active-sys | "
+ end
+ if ($arg0 & 0x80000)
printf "trapping-exit | "
end
- if ($arg0 & 0x400)
+ if ($arg0 & 0x40000)
printf "bound | "
end
- if ($arg0 & 0x200)
+ if ($arg0 & 0x20000)
printf "garbage-collecting | "
end
- if ($arg0 & 0x100)
+ if ($arg0 & 0x10000)
printf "suspended | "
end
- if ($arg0 & 0x80)
+ if ($arg0 & 0x8000)
printf "running | "
end
- if ($arg0 & 0x40)
+ if ($arg0 & 0x4000)
printf "in-run-queue | "
end
- if ($arg0 & 0x20)
+ if ($arg0 & 0x2000)
printf "active | "
end
- if ($arg0 & 0x10)
+ if ($arg0 & 0x1000)
printf "pending-exit | "
end
- if ($arg0 & 0x8)
+ if ($arg0 & 0x800)
printf "exiting | "
end
- if ($arg0 & 0x4)
+ if ($arg0 & 0x400)
printf "free | "
end
- if ($arg0 & 0x3) == 0
- printf "prio-max\n"
+ if ($arg0 & 0x200)
+ printf "in-prq-low | "
+ end
+ if ($arg0 & 0x100)
+ printf "in-prq-normal | "
+ end
+ if ($arg0 & 0x80)
+ printf "in-prq-high | "
+ end
+ if ($arg0 & 0x40)
+ printf "in-prq-max | "
+ end
+ if ($arg0 & 0x30) == 0x0
+ printf "prq-prio-max | "
else
- if ($arg0 & 0x3) == 1
- printf "prio-high\n"
+ if ($arg0 & 0x30) == 0x10
+ printf "prq-prio-high | "
else
- if ($arg0 & 0x3) == 2
- printf "prio-normal\n"
+ if ($arg0 & 0x30) == 0x20
+ printf "prq-prio-normal | "
else
- printf "prio-low\n"
+ printf "prq-prio-low | "
+ end
+ end
+ end
+ if ($arg0 & 0xc) == 0x0
+ printf "usr-prio-max | "
+ else
+ if ($arg0 & 0xc) == 0x4
+ printf "usr-prio-high | "
+ else
+ if ($arg0 & 0xc) == 0x8
+ printf "usr-prio-normal | "
+ else
+ printf "usr-prio-low | "
+ end
+ end
+ end
+ if ($arg0 & 0x3) == 0x0
+ printf "act-prio-max\n"
+ else
+ if ($arg0 & 0x3) == 0x1
+ printf "act-prio-high\n"
+ else
+ if ($arg0 & 0x3) == 0x2
+ printf "act-prio-normal\n"
+ else
+ printf "act-prio-low\n"
end
end
end
@@ -1395,6 +1445,12 @@ define etp-process-info
etp-1 $arg0->common.id
printf "\n State: "
etp-proc-state $arg0
+ if $proxy_process != 0
+ printf " Pointer: (Process *) %p\n", $arg0
+ printf " *** PROXY process struct *** refer to: \n"
+ etp-pid2proc-1 $arg0->common.id
+ etp-process-info $proc
+ else
if (*(((Uint32 *) &(((Process *) $arg0)->state))) & 0x4) == 0
if ($arg0->common.u.alive.reg)
printf " Registered name: "
@@ -1432,6 +1488,7 @@ define etp-process-info
printf " Parent: "
etp-1 $arg0->parent
printf "\n Pointer: (Process *) %p\n", $arg0
+ end
end
document etp-process-info
diff --git a/erts/preloaded/ebin/erlang.beam b/erts/preloaded/ebin/erlang.beam
index 9da4f3cd00..159c91e37c 100644
--- a/erts/preloaded/ebin/erlang.beam
+++ b/erts/preloaded/ebin/erlang.beam
Binary files differ
diff --git a/erts/preloaded/ebin/erts_internal.beam b/erts/preloaded/ebin/erts_internal.beam
index 3ab52615ab..9d8a4cfd00 100644
--- a/erts/preloaded/ebin/erts_internal.beam
+++ b/erts/preloaded/ebin/erts_internal.beam
Binary files differ
diff --git a/erts/preloaded/src/erlang.erl b/erts/preloaded/src/erlang.erl
index a969ef91dc..e521c6fc3d 100644
--- a/erts/preloaded/src/erlang.erl
+++ b/erts/preloaded/src/erlang.erl
@@ -81,7 +81,8 @@
-export([binary_to_list/3, binary_to_term/1, binary_to_term/2]).
-export([bit_size/1, bitsize/1, bitstr_to_list/1, bitstring_to_list/1]).
-export([bump_reductions/1, byte_size/1, call_on_load_function/1]).
--export([cancel_timer/1, check_old_code/1, check_process_code/2, crc32/1]).
+-export([cancel_timer/1, check_old_code/1, check_process_code/2,
+ check_process_code/3, crc32/1]).
-export([crc32/2, crc32_combine/3, date/0, decode_packet/3]).
-export([delete_element/2]).
-export([delete_module/1, demonitor/1, demonitor/2, display/1]).
@@ -91,7 +92,7 @@
-export([float_to_binary/1, float_to_binary/2,
float_to_list/1, float_to_list/2]).
-export([fun_info/2, fun_to_list/1, function_exported/3]).
--export([garbage_collect/0, garbage_collect/1]).
+-export([garbage_collect/0, garbage_collect/1, garbage_collect/2]).
-export([garbage_collect_message_area/0, get/0, get/1, get_keys/1]).
-export([get_module_info/1, get_stacktrace/0, group_leader/0]).
-export([group_leader/2, halt/0, halt/1, halt/2, hash/2, hibernate/3]).
@@ -429,11 +430,71 @@ check_old_code(_Module) ->
erlang:nif_error(undefined).
%% check_process_code/2
--spec check_process_code(Pid, Module) -> boolean() when
+-spec check_process_code(Pid, Module) -> CheckResult when
Pid :: pid(),
- Module :: module().
-check_process_code(_Pid, _Module) ->
- erlang:nif_error(undefined).
+ Module :: module(),
+ CheckResult :: boolean().
+check_process_code(Pid, Module) ->
+ try
+ erlang:check_process_code(Pid, Module, [{allow_gc, true}])
+ catch
+ error:Error -> erlang:error(Error, [Pid, Module])
+ end.
+
+%% check_process_code/3
+-spec check_process_code(Pid, Module, OptionList) -> CheckResult | async when
+ Pid :: pid(),
+ Module :: module(),
+ RequestId :: term(),
+ Option :: {async, RequestId} | {allow_gc, boolean()},
+ OptionList :: [Option],
+ CheckResult :: boolean() | aborted.
+check_process_code(Pid, Module, OptionList) ->
+ try
+ {Async, AllowGC} = get_cpc_opts(OptionList, sync, true),
+ case Async of
+ {async, ReqId} ->
+ {priority, Prio} = erlang:process_info(erlang:self(),
+ priority),
+ erts_internal:request_system_task(Pid,
+ Prio,
+ {check_process_code,
+ ReqId,
+ Module,
+ AllowGC}),
+ async;
+ sync ->
+ case Pid == erlang:self() of
+ true ->
+ erts_internal:check_process_code(Module,
+ [{allow_gc, AllowGC}]);
+ false ->
+ {priority, Prio} = erlang:process_info(erlang:self(),
+ priority),
+ ReqId = erlang:make_ref(),
+ erts_internal:request_system_task(Pid,
+ Prio,
+ {check_process_code,
+ ReqId,
+ Module,
+ AllowGC}),
+ receive
+ {check_process_code, ReqId, CheckResult} ->
+ CheckResult
+ end
+ end
+ end
+ catch
+ error:Error -> erlang:error(Error, [Pid, Module, OptionList])
+ end.
+
+% gets async and allow_gc opts and verify valid option list
+get_cpc_opts([{async, _ReqId} = AsyncTuple | Options], _OldAsync, AllowGC) ->
+ get_cpc_opts(Options, AsyncTuple, AllowGC);
+get_cpc_opts([{allow_gc, AllowGC} | Options], Async, _OldAllowGC) ->
+ get_cpc_opts(Options, Async, AllowGC);
+get_cpc_opts([], Async, AllowGC) ->
+ {Async, AllowGC}.
%% crc32/1
-spec erlang:crc32(Data) -> non_neg_integer() when
@@ -793,10 +854,61 @@ garbage_collect() ->
erlang:nif_error(undefined).
%% garbage_collect/1
--spec garbage_collect(Pid) -> boolean() when
- Pid :: pid().
-garbage_collect(_Pid) ->
- erlang:nif_error(undefined).
+-spec garbage_collect(Pid) -> GCResult when
+ Pid :: pid(),
+ GCResult :: boolean().
+garbage_collect(Pid) ->
+ try
+ erlang:garbage_collect(Pid, [])
+ catch
+ error:Error -> erlang:error(Error, [Pid])
+ end.
+
+%% garbage_collect/2
+-spec garbage_collect(Pid, OptionList) -> GCResult | async when
+ Pid :: pid(),
+ RequestId :: term(),
+ Option :: {async, RequestId},
+ OptionList :: [Option],
+ GCResult :: boolean().
+garbage_collect(Pid, OptionList) ->
+ try
+ Async = get_gc_opts(OptionList, sync),
+ case Async of
+ {async, ReqId} ->
+ {priority, Prio} = erlang:process_info(erlang:self(),
+ priority),
+ erts_internal:request_system_task(Pid,
+ Prio,
+ {garbage_collect, ReqId}),
+ async;
+ sync ->
+ case Pid == erlang:self() of
+ true ->
+ erlang:garbage_collect();
+ false ->
+ {priority, Prio} = erlang:process_info(erlang:self(),
+ priority),
+ ReqId = erlang:make_ref(),
+ erts_internal:request_system_task(Pid,
+ Prio,
+ {garbage_collect,
+ ReqId}),
+ receive
+ {garbage_collect, ReqId, GCResult} ->
+ GCResult
+ end
+ end
+ end
+ catch
+ error:Error -> erlang:error(Error, [Pid, OptionList])
+ end.
+
+% gets async opt and verify valid option list
+get_gc_opts([{async, _ReqId} = AsyncTuple | Options], _OldAsync) ->
+ get_gc_opts(Options, AsyncTuple);
+get_gc_opts([], Async) ->
+ Async.
%% garbage_collect_message_area/0
-spec erlang:garbage_collect_message_area() -> boolean().
diff --git a/erts/preloaded/src/erts_internal.erl b/erts/preloaded/src/erts_internal.erl
index 8a8cd52d64..c8e8e7e069 100644
--- a/erts/preloaded/src/erts_internal.erl
+++ b/erts/preloaded/src/erts_internal.erl
@@ -33,6 +33,10 @@
-export([port_command/3, port_connect/2, port_close/1,
port_control/3, port_call/3, port_info/1, port_info/2]).
+-export([request_system_task/3]).
+
+-export([check_process_code/2]).
+
%%
%% Await result of send to port
%%
@@ -139,3 +143,20 @@ port_info(_Result) ->
port_info(_Result, _Item) ->
erlang:nif_error(undefined).
+
+-spec request_system_task(Pid, Prio, Request) -> 'ok' when
+ Prio :: 'max' | 'high' | 'normal' | 'low',
+ Request :: {'garbage_collect', term()}
+ | {'check_process_code', term(), module(), boolean()},
+ Pid :: pid().
+
+request_system_task(_Pid, _Prio, _Request) ->
+ erlang:nif_error(undefined).
+
+-spec check_process_code(Module, OptionList) -> boolean() when
+ Module :: module(),
+ Option :: {allow_gc, boolean()},
+ OptionList :: [Option].
+check_process_code(_Module, _OptionList) ->
+ erlang:nif_error(undefined).
+