aboutsummaryrefslogtreecommitdiffstats
path: root/erts
diff options
context:
space:
mode:
Diffstat (limited to 'erts')
-rw-r--r--erts/aclocal.m486
-rw-r--r--erts/configure.in78
-rw-r--r--erts/doc/src/Makefile2
-rw-r--r--erts/doc/src/erl.xml8
-rw-r--r--erts/doc/src/erl_nif.xml2
-rw-r--r--erts/doc/src/erlang.xml8
-rw-r--r--erts/doc/src/match_spec.xml10
-rw-r--r--erts/emulator/beam/beam_emu.c6
-rw-r--r--erts/emulator/beam/beam_load.c13
-rw-r--r--erts/emulator/beam/bif.c10
-rw-r--r--erts/emulator/beam/bif.h6
-rw-r--r--erts/emulator/beam/binary.c34
-rw-r--r--erts/emulator/beam/break.c5
-rw-r--r--erts/emulator/beam/erl_bif_info.c1370
-rw-r--r--erts/emulator/beam/erl_gc.c55
-rw-r--r--erts/emulator/beam/erl_gc.h2
-rw-r--r--erts/emulator/beam/erl_lock_check.c786
-rw-r--r--erts/emulator/beam/erl_lock_check.h2
-rw-r--r--erts/emulator/beam/erl_message.c106
-rw-r--r--erts/emulator/beam/erl_message.h91
-rw-r--r--erts/emulator/beam/erl_printf_term.c7
-rw-r--r--erts/emulator/beam/erl_proc_sig_queue.c673
-rw-r--r--erts/emulator/beam/erl_proc_sig_queue.h206
-rw-r--r--erts/emulator/beam/erl_process.c42
-rw-r--r--erts/emulator/beam/erl_process.h10
-rw-r--r--erts/emulator/beam/erl_process_dict.c71
-rw-r--r--erts/emulator/beam/erl_process_dict.h2
-rw-r--r--erts/emulator/beam/erl_process_dump.c124
-rw-r--r--erts/emulator/beam/erl_time_sup.c2
-rw-r--r--erts/emulator/beam/global.h2
-rw-r--r--erts/emulator/beam/msg_instrs.tab4
-rw-r--r--erts/emulator/beam/ops.tab3
-rw-r--r--erts/emulator/beam/utils.c1
-rw-r--r--erts/emulator/sys/common/erl_poll.c12
-rw-r--r--erts/emulator/sys/common/erl_sys_common_misc.c187
-rw-r--r--erts/emulator/test/beam_literals_SUITE.erl55
-rw-r--r--erts/emulator/test/bif_SUITE.erl75
-rw-r--r--erts/emulator/test/exception_SUITE.erl56
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_SUITE.c25
-rw-r--r--erts/emulator/test/num_bif_SUITE.erl34
-rw-r--r--erts/emulator/test/process_SUITE.erl116
-rw-r--r--erts/emulator/test/z_SUITE.erl8
-rw-r--r--erts/etc/unix/etp-commands.in10
-rw-r--r--erts/preloaded/ebin/erlang.beambin102724 -> 102732 bytes
-rw-r--r--erts/preloaded/ebin/init.beambin50472 -> 50596 bytes
-rw-r--r--erts/preloaded/src/erlang.erl2
-rw-r--r--erts/preloaded/src/init.erl9
47 files changed, 2807 insertions, 1609 deletions
diff --git a/erts/aclocal.m4 b/erts/aclocal.m4
index 887babc13f..a4d09810bd 100644
--- a/erts/aclocal.m4
+++ b/erts/aclocal.m4
@@ -2770,3 +2770,89 @@ rm -f conftest*])
#define UNSAFE_MASK 0xc0000000 /* Mask for bits that must be constant */
+dnl ----------------------------------------------------------------------
+dnl
+dnl LM_HARDWARE_ARCH
+dnl
+dnl Determine target hardware in ARCH
+dnl
+AC_DEFUN([LM_HARDWARE_ARCH], [
+ AC_MSG_CHECKING([target hardware architecture])
+ if test "x$host_alias" != "x" -a "x$host_cpu" != "x"; then
+ chk_arch_=$host_cpu
+ else
+ chk_arch_=`uname -m`
+ fi
+
+ case $chk_arch_ in
+ sun4u) ARCH=ultrasparc;;
+ sparc64) ARCH=sparc64;;
+ sun4v) ARCH=ultrasparc;;
+ i86pc) ARCH=x86;;
+ i386) ARCH=x86;;
+ i486) ARCH=x86;;
+ i586) ARCH=x86;;
+ i686) ARCH=x86;;
+ x86_64) ARCH=amd64;;
+ amd64) ARCH=amd64;;
+ macppc) ARCH=ppc;;
+ powerpc) ARCH=ppc;;
+ ppc) ARCH=ppc;;
+ ppc64) ARCH=ppc64;;
+ ppc64le) ARCH=ppc64le;;
+ "Power Macintosh") ARCH=ppc;;
+ armv5b) ARCH=arm;;
+ armv5teb) ARCH=arm;;
+ armv5tel) ARCH=arm;;
+ armv5tejl) ARCH=arm;;
+ armv6l) ARCH=arm;;
+ armv6hl) ARCH=arm;;
+ armv7l) ARCH=arm;;
+ armv7hl) ARCH=arm;;
+ tile) ARCH=tile;;
+ e2k) ARCH=e2k;;
+ *) ARCH=noarch;;
+ esac
+ AC_MSG_RESULT($ARCH)
+
+ dnl
+ dnl Convert between x86 and amd64 based on the compiler's mode.
+ dnl Ditto between ultrasparc and sparc64.
+ dnl
+ AC_MSG_CHECKING(whether compilation mode forces ARCH adjustment)
+ case "$ARCH-$ac_cv_sizeof_void_p" in
+ x86-8)
+ AC_MSG_RESULT(yes: adjusting ARCH=x86 to ARCH=amd64)
+ ARCH=amd64
+ ;;
+ amd64-4)
+ AC_MSG_RESULT(yes: adjusting ARCH=amd64 to ARCH=x86)
+ ARCH=x86
+ ;;
+ ultrasparc-8)
+ AC_MSG_RESULT(yes: adjusting ARCH=ultrasparc to ARCH=sparc64)
+ ARCH=sparc64
+ ;;
+ sparc64-4)
+ AC_MSG_RESULT(yes: adjusting ARCH=sparc64 to ARCH=ultrasparc)
+ ARCH=ultrasparc
+ ;;
+ ppc64-4)
+ AC_MSG_RESULT(yes: adjusting ARCH=ppc64 to ARCH=ppc)
+ ARCH=ppc
+ ;;
+ ppc-8)
+ AC_MSG_RESULT(yes: adjusting ARCH=ppc to ARCH=ppc64)
+ ARCH=ppc64
+ ;;
+ arm-8)
+ AC_MSG_RESULT(yes: adjusting ARCH=arm to ARCH=noarch)
+ ARCH=noarch
+ ;;
+ *)
+ AC_MSG_RESULT(no: ARCH is $ARCH)
+ ;;
+ esac
+
+ AC_SUBST(ARCH)
+])
diff --git a/erts/configure.in b/erts/configure.in
index 820247b4b8..2d0d6c6444 100644
--- a/erts/configure.in
+++ b/erts/configure.in
@@ -658,83 +658,9 @@ case $chk_opsys_ in
*) OPSYS=noopsys
esac
-if test "x$host_alias" != "x" -a "x$host_cpu" != "x"; then
- chk_arch_=$host_cpu
-else
- chk_arch_=`uname -m`
-fi
-
-case $chk_arch_ in
- sun4u) ARCH=ultrasparc;;
- sparc64) ARCH=sparc64;;
- sun4v) ARCH=ultrasparc;;
- i86pc) ARCH=x86;;
- i386) ARCH=x86;;
- i486) ARCH=x86;;
- i586) ARCH=x86;;
- i686) ARCH=x86;;
- x86_64) ARCH=amd64;;
- amd64) ARCH=amd64;;
- macppc) ARCH=ppc;;
- powerpc) ARCH=ppc;;
- ppc) ARCH=ppc;;
- ppc64) ARCH=ppc64;;
- ppc64le) ARCH=ppc64le;;
- "Power Macintosh") ARCH=ppc;;
- armv5b) ARCH=arm;;
- armv5teb) ARCH=arm;;
- armv5tel) ARCH=arm;;
- armv5tejl) ARCH=arm;;
- armv6l) ARCH=arm;;
- armv6hl) ARCH=arm;;
- armv7l) ARCH=arm;;
- armv7hl) ARCH=arm;;
- tile) ARCH=tile;;
- e2k) ARCH=e2k;;
- *) ARCH=noarch;;
-esac
-
-dnl
-dnl Convert between x86 and amd64 based on the compiler's mode.
-dnl Ditto between ultrasparc and sparc64.
-dnl
-AC_MSG_CHECKING(whether compilation mode forces ARCH adjustment)
-case "$ARCH-$ac_cv_sizeof_void_p" in
-x86-8)
- AC_MSG_RESULT(yes: adjusting ARCH=x86 to ARCH=amd64)
- ARCH=amd64
- ;;
-amd64-4)
- AC_MSG_RESULT(yes: adjusting ARCH=amd64 to ARCH=x86)
- ARCH=x86
- ;;
-ultrasparc-8)
- AC_MSG_RESULT(yes: adjusting ARCH=ultrasparc to ARCH=sparc64)
- ARCH=sparc64
- ;;
-sparc64-4)
- AC_MSG_RESULT(yes: adjusting ARCH=sparc64 to ARCH=ultrasparc)
- ARCH=ultrasparc
- ;;
-ppc64-4)
- AC_MSG_RESULT(yes: adjusting ARCH=ppc64 to ARCH=ppc)
- ARCH=ppc
- ;;
-ppc-8)
- AC_MSG_RESULT(yes: adjusting ARCH=ppc to ARCH=ppc64)
- ARCH=ppc64
- ;;
-arm-8)
- AC_MSG_RESULT(yes: adjusting ARCH=arm to ARCH=noarch)
- ARCH=noarch
- ;;
-*)
- AC_MSG_RESULT(no)
- ;;
-esac
-
AC_SUBST(OPSYS)
-AC_SUBST(ARCH)
+
+LM_HARDWARE_ARCH
dnl Check consistency of os and darwin-switches
diff --git a/erts/doc/src/Makefile b/erts/doc/src/Makefile
index c4f1baf89e..5fa8b0673a 100644
--- a/erts/doc/src/Makefile
+++ b/erts/doc/src/Makefile
@@ -155,7 +155,7 @@ clean:
rm -f errs core *~
$(SPECDIR)/specs_%.xml:
- escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \
+ $(gen_verbose)escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \
-o$(dir $@) -module $(patsubst $(SPECDIR)/specs_%.xml,%,$@)
# ----------------------------------------------------
diff --git a/erts/doc/src/erl.xml b/erts/doc/src/erl.xml
index 5089d4e0ce..74654a295d 100644
--- a/erts/doc/src/erl.xml
+++ b/erts/doc/src/erl.xml
@@ -1158,7 +1158,7 @@
<tag><marker id="+sbwtdcpu"/>
<c>+sbwtdcpu none|very_short|short|medium|long|very_long</c></tag>
<item>
- <p>As <seealso marker="+sbwt"><c>+sbwt</c></seealso> but affects
+ <p>As <seealso marker="#+sbwt"><c>+sbwt</c></seealso> but affects
dirty CPU schedulers. Defaults to <c>short</c>.</p>
<note>
<p>This flag can be removed or changed at any time
@@ -1168,7 +1168,7 @@
<tag><marker id="+sbwtdio"/>
<c>+sbwtdio none|very_short|short|medium|long|very_long</c></tag>
<item>
- <p>As <seealso marker="+sbwt"><c>+sbwt</c></seealso> but affects
+ <p>As <seealso marker="#+sbwt"><c>+sbwt</c></seealso> but affects
dirty IO schedulers. Defaults to <c>short</c>.</p>
<note>
<p>This flag can be removed or changed at any time
@@ -1443,7 +1443,7 @@
<tag><marker id="+swtdcpu"/>
<c>+swtdcpu very_low|low|medium|high|very_high</c></tag>
<item>
- <p>As <seealso marker="+swt"><c>+swt</c></seealso> but
+ <p>As <seealso marker="#+swt"><c>+swt</c></seealso> but
affects dirty CPU schedulers. Defaults to <c>medium</c>.</p>
<note>
<p>This flag can be removed or changed at any time
@@ -1453,7 +1453,7 @@
<tag><marker id="+swtdio"/>
<c>+swtdio very_low|low|medium|high|very_high</c></tag>
<item>
- <p>As <seealso marker="+swt"><c>+swt</c></seealso> but affects
+ <p>As <seealso marker="#+swt"><c>+swt</c></seealso> but affects
dirty IO schedulers. Defaults to <c>medium</c>.</p>
<note>
<p>This flag can be removed or changed at any time
diff --git a/erts/doc/src/erl_nif.xml b/erts/doc/src/erl_nif.xml
index cabc07d020..8a9ae58e99 100644
--- a/erts/doc/src/erl_nif.xml
+++ b/erts/doc/src/erl_nif.xml
@@ -1292,7 +1292,7 @@ typedef struct {
ErlNifIOVec *iovec = NULL;
size_t max_elements = 128;
ERL_NIF_TERM tail;
-if (!enif_inspect_iovec(NULL, max_elements, term, &tail, iovec))
+if (!enif_inspect_iovec(NULL, max_elements, term, &tail, &iovec))
return 0;
// Do things with the iovec
diff --git a/erts/doc/src/erlang.xml b/erts/doc/src/erlang.xml
index c086928bb3..d4d4dd7f31 100644
--- a/erts/doc/src/erlang.xml
+++ b/erts/doc/src/erlang.xml
@@ -53,14 +53,14 @@
<datatypes>
<datatype>
- <name>ext_binary()</name>
+ <name name="ext_binary"/>
<desc>
<p>A binary data object, structured according to
the Erlang external term format.</p>
</desc>
</datatype>
<datatype>
- <name>iovec()</name>
+ <name name="iovec"/>
<desc>
<p>A list of binaries. This datatype is useful to use
together with <seealso marker="erl_nif#enif_inspect_iovec">
@@ -204,10 +204,6 @@
<name name="abs" arity="1" clause_i="1"/>
<name name="abs" arity="1" clause_i="2"/>
<fsummary>Arithmetical absolute value.</fsummary>
- <type>
- <v>Float = float()</v>
- <v>Int = integer()</v>
- </type>
<desc>
<p>Returns an integer or float that is the arithmetical
absolute value of <c><anno>Float</anno></c> or
diff --git a/erts/doc/src/match_spec.xml b/erts/doc/src/match_spec.xml
index 644b989800..6cf0a0e677 100644
--- a/erts/doc/src/match_spec.xml
+++ b/erts/doc/src/match_spec.xml
@@ -169,10 +169,9 @@
<c><![CDATA[is_reference]]></c> | <c><![CDATA[is_tuple]]></c> |
<c><![CDATA[is_map]]></c> | <c><![CDATA[is_binary]]></c> |
<c><![CDATA[is_function]]></c> | <c><![CDATA[is_record]]></c> |
- <c><![CDATA[is_seq_trace]]></c> | <c><![CDATA['and']]></c> |
- <c><![CDATA['or']]></c> | <c><![CDATA['not']]></c> |
- <c><![CDATA['xor']]></c> | <c><![CDATA['andalso']]></c> |
- <c><![CDATA['orelse']]></c>
+ <c><![CDATA['and']]></c> | <c><![CDATA['or']]></c> |
+ <c><![CDATA['not']]></c> | <c><![CDATA['xor']]></c> |
+ <c><![CDATA['andalso']]></c> | <c><![CDATA['orelse']]></c>
</item>
<item>ConditionExpression ::= ExprMatchVariable | { GuardFunction } |
{ GuardFunction, ConditionExpression, ... } | TermConstruct
@@ -202,8 +201,7 @@
<c><![CDATA['>=']]></c> | <c><![CDATA['<']]></c> |
<c><![CDATA['=<']]></c> | <c><![CDATA['=:=']]></c> |
<c><![CDATA['==']]></c> | <c><![CDATA['=/=']]></c> |
- <c><![CDATA['/=']]></c> | <c><![CDATA[self]]></c> |
- <c><![CDATA[get_tcw]]></c>
+ <c><![CDATA['/=']]></c> | <c><![CDATA[self]]></c>
</item>
<item>MatchBody ::= [ ConditionExpression, ... ]
</item>
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index fb87be3f17..ee287243a4 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -2420,8 +2420,7 @@ erts_hibernate(Process* c_p, Eterm* reg)
* shrink the heap.
*/
erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
- erts_proc_sig_fetch(c_p);
- if (!c_p->sig_qs.len) {
+ if (!erts_proc_sig_fetch(c_p)) {
erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
c_p->fvalue = NIL;
PROCESS_MAIN_CHK_LOCKS(c_p);
@@ -2429,8 +2428,7 @@ erts_hibernate(Process* c_p, Eterm* reg)
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
- erts_proc_sig_fetch(c_p);
- if (!c_p->sig_qs.len)
+ if (!erts_proc_sig_fetch(c_p))
erts_atomic32_read_band_relb(&c_p->state, ~ERTS_PSFLG_ACTIVE);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
}
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index af620d7432..e61199a8fd 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -4522,6 +4522,19 @@ is_empty_map(LoaderState* stp, GenOpArg Lit)
}
/*
+ * Predicate to test whether the given literal is an export.
+ */
+static int
+literal_is_export(LoaderState* stp, GenOpArg Lit)
+{
+ Eterm term;
+
+ ASSERT(Lit.type == TAG_q);
+ term = stp->literals[Lit.val].term;
+ return is_export(term);
+}
+
+/*
* Pseudo predicate map_key_sort that will sort the Rest operand for
* map instructions as a side effect.
*/
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index 232597c5b6..79244b8544 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -1150,6 +1150,13 @@ BIF_RETTYPE raise_3(BIF_ALIST_3)
/* Create stacktrace and store */
if (erts_backtrace_depth < depth) {
depth = erts_backtrace_depth;
+ if (depth == 0) {
+ /*
+ * For consistency with stacktraces generated
+ * automatically, always include one element.
+ */
+ depth = 1;
+ }
must_copy = 1;
}
if (must_copy) {
@@ -4147,7 +4154,8 @@ BIF_RETTYPE erts_internal_group_leader_2(BIF_ALIST_2)
rp = BIF_P;
else {
rp = erts_try_lock_sig_free_proc(BIF_ARG_2,
- ERTS_PROC_LOCK_MAIN);
+ ERTS_PROC_LOCK_MAIN,
+ NULL);
if (!rp)
BIF_RET(am_badarg);
if (rp == ERTS_PROC_LOCK_BUSY)
diff --git a/erts/emulator/beam/bif.h b/erts/emulator/beam/bif.h
index a33421d762..a47339253e 100644
--- a/erts/emulator/beam/bif.h
+++ b/erts/emulator/beam/bif.h
@@ -425,6 +425,12 @@ do { \
BIF_TRAP3((TRP), (P), (A0), (A1), (A2)); \
} while (0)
+#define ERTS_BIF_PREP_EXITED(RET, PROC) \
+do { \
+ KILL_CATCHES((PROC)); \
+ ERTS_BIF_PREP_ERROR((RET), (PROC), EXTAG_EXIT); \
+} while (0)
+
#define ERTS_BIF_EXITED(PROC) \
do { \
KILL_CATCHES((PROC)); \
diff --git a/erts/emulator/beam/binary.c b/erts/emulator/beam/binary.c
index 95d324d2c1..d53f75c279 100644
--- a/erts/emulator/beam/binary.c
+++ b/erts/emulator/beam/binary.c
@@ -113,6 +113,40 @@ new_binary(Process *p, byte *buf, Uint len)
return build_proc_bin(&MSO(p), HAlloc(p, PROC_BIN_SIZE), bptr);
}
+Eterm
+erts_heap_factory_new_binary(ErtsHeapFactory *hfact, byte *buf, Uint len,
+ Uint reserve_size)
+{
+ Eterm *hp;
+ Binary* bptr;
+
+ if (len <= ERL_ONHEAP_BIN_LIMIT) {
+ ErlHeapBin* hb;
+ hp = erts_produce_heap(hfact, heap_bin_size(len), reserve_size);
+ hb = (ErlHeapBin *) hp;
+ hb->thing_word = header_heap_bin(len);
+ hb->size = len;
+ if (buf != NULL) {
+ sys_memcpy(hb->data, buf, len);
+ }
+ return make_binary(hb);
+ }
+
+ /*
+ * Allocate the binary struct itself.
+ */
+ bptr = erts_bin_nrml_alloc(len);
+ if (buf != NULL) {
+ sys_memcpy(bptr->orig_bytes, buf, len);
+ }
+
+ hp = erts_produce_heap(hfact, PROC_BIN_SIZE, reserve_size);
+
+ return build_proc_bin(hfact->off_heap, hp, bptr);
+}
+
+
+
/*
* When heap binary is not desired...
*/
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index 4dabac3512..ba8cc5e2ba 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -204,6 +204,7 @@ print_process_info(fmtfn_t to, void *to_arg, Process *p)
{
int garbing = 0;
int running = 0;
+ Sint len;
struct saved_calls *scb;
erts_aint32_t state;
@@ -252,9 +253,9 @@ print_process_info(fmtfn_t to, void *to_arg, Process *p)
erts_print(to, to_arg, "Spawned by: %T\n", p->parent);
erts_proc_lock(p, ERTS_PROC_LOCK_MSGQ);
- erts_proc_sig_fetch(p);
+ len = erts_proc_sig_fetch(p);
erts_proc_unlock(p, ERTS_PROC_LOCK_MSGQ);
- erts_print(to, to_arg, "Message queue length: %d\n", p->sig_qs.len);
+ erts_print(to, to_arg, "Message queue length: %d\n", len);
/* display the message queue only if there is anything in it */
if (!ERTS_IS_CRASH_DUMPING && p->sig_qs.first != NULL && !garbing) {
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index bdca93428e..89e3d3f43e 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -155,8 +155,10 @@ static Eterm os_type_tuple;
static Eterm os_version_tuple;
static Eterm
-current_function(Process* p, Process* rp, Eterm** hpp, int full_info);
-static Eterm current_stacktrace(Process* p, Process* rp, Eterm** hpp);
+current_function(Process* p, ErtsHeapFactory *hfact, Process* rp,
+ int full_info, Uint reserve_size, int flags);
+static Eterm current_stacktrace(ErtsHeapFactory *hfact, Process* rp,
+ Uint reserve_size);
static Eterm
bld_bin_list(Uint **hpp, Uint *szp, ErlOffHeap* oh)
@@ -693,119 +695,222 @@ collect_one_suspend_monitor(ErtsMonitor *mon, void *vsmicp)
* process_info/[1,2]
*/
-#define ERTS_PI_FAIL_TYPE_BADARG 0
-#define ERTS_PI_FAIL_TYPE_YIELD 1
-#define ERTS_PI_FAIL_TYPE_EXITED 2
-
-static ERTS_INLINE ErtsProcLocks
-pi_locks(Eterm info)
-{
- switch (info) {
- case am_priority:
- case am_status:
- return 0;
- case am_suspended:
- return ERTS_PROC_LOCK_STATUS;
- case am_messages:
- case am_message_queue_len:
- case am_total_heap_size:
- return ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_MSGQ;
- default:
- return ERTS_PROC_LOCK_MAIN;
- }
-}
-
/*
* All valid process_info arguments.
*/
-static Eterm pi_args[] = {
- am_registered_name,
- am_current_function,
- am_initial_call,
- am_status,
- am_messages,
- am_message_queue_len,
- am_links,
- am_monitors,
- am_monitored_by,
- am_dictionary,
- am_trap_exit,
- am_error_handler,
- am_heap_size,
- am_stack_size,
- am_memory,
- am_garbage_collection,
- am_group_leader,
- am_reductions,
- am_priority,
- am_trace,
- am_binary,
- am_sequential_trace_token,
- am_catchlevel,
- am_backtrace,
- am_last_calls,
- am_total_heap_size,
- am_suspending,
- am_min_heap_size,
- am_min_bin_vheap_size,
- am_max_heap_size,
- am_current_location,
- am_current_stacktrace,
- am_message_queue_data,
- am_garbage_collection_info,
- am_magic_ref
+
+#define ERTS_PI_IX_REGISTERED_NAME 0
+#define ERTS_PI_IX_CURRENT_FUNCTION 1
+#define ERTS_PI_IX_INITIAL_CALL 2
+#define ERTS_PI_IX_STATUS 3
+#define ERTS_PI_IX_MESSAGES 4
+#define ERTS_PI_IX_MESSAGE_QUEUE_LEN 5
+#define ERTS_PI_IX_LINKS 6
+#define ERTS_PI_IX_MONITORS 7
+#define ERTS_PI_IX_MONITORED_BY 8
+#define ERTS_PI_IX_DICTIONARY 9
+#define ERTS_PI_IX_TRAP_EXIT 10
+#define ERTS_PI_IX_ERROR_HANDLER 11
+#define ERTS_PI_IX_HEAP_SIZE 12
+#define ERTS_PI_IX_STACK_SIZE 13
+#define ERTS_PI_IX_MEMORY 14
+#define ERTS_PI_IX_GARBAGE_COLLECTION 15
+#define ERTS_PI_IX_GROUP_LEADER 16
+#define ERTS_PI_IX_REDUCTIONS 17
+#define ERTS_PI_IX_PRIORITY 18
+#define ERTS_PI_IX_TRACE 19
+#define ERTS_PI_IX_BINARY 20
+#define ERTS_PI_IX_SEQUENTIAL_TRACE_TOKEN 21
+#define ERTS_PI_IX_CATCHLEVEL 22
+#define ERTS_PI_IX_BACKTRACE 23
+#define ERTS_PI_IX_LAST_CALLS 24
+#define ERTS_PI_IX_TOTAL_HEAP_SIZE 25
+#define ERTS_PI_IX_SUSPENDING 26
+#define ERTS_PI_IX_MIN_HEAP_SIZE 27
+#define ERTS_PI_IX_MIN_BIN_VHEAP_SIZE 28
+#define ERTS_PI_IX_MAX_HEAP_SIZE 29
+#define ERTS_PI_IX_CURRENT_LOCATION 30
+#define ERTS_PI_IX_CURRENT_STACKTRACE 31
+#define ERTS_PI_IX_MESSAGE_QUEUE_DATA 32
+#define ERTS_PI_IX_GARBAGE_COLLECTION_INFO 33
+#define ERTS_PI_IX_MAGIC_REF 34
+#define ERTS_PI_IX_FULLSWEEP_AFTER 35
+
+#define ERTS_PI_FLAG_SINGELTON (1 << 0)
+#define ERTS_PI_FLAG_ALWAYS_WRAP (1 << 1)
+#define ERTS_PI_FLAG_WANT_MSGS (1 << 2)
+#define ERTS_PI_FLAG_NEED_MSGQ_LEN (1 << 3)
+#define ERTS_PI_FLAG_FORCE_SIG_SEND (1 << 4)
+#define ERTS_PI_FLAG_REQUEST_FOR_OTHER (1 << 5)
+
+#define ERTS_PI_UNRESERVE(RS, SZ) \
+ (ASSERT((RS) >= (SZ)), (RS) -= (SZ))
+
+
+typedef struct {
+ Eterm name;
+ Uint reserve_size;
+ int flags;
+ ErtsProcLocks locks;
+} ErtsProcessInfoArgs;
+
+static ErtsProcessInfoArgs pi_args[] = {
+ {am_registered_name, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_current_function, 4, 0, ERTS_PROC_LOCK_MAIN},
+ {am_initial_call, 4, 0, ERTS_PROC_LOCK_MAIN},
+ {am_status, 0, 0, 0},
+ {am_messages, 0, ERTS_PI_FLAG_WANT_MSGS|ERTS_PI_FLAG_NEED_MSGQ_LEN|ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_message_queue_len, 0, ERTS_PI_FLAG_NEED_MSGQ_LEN, ERTS_PROC_LOCK_MAIN},
+ {am_links, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_monitors, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_monitored_by, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_dictionary, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_trap_exit, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_error_handler, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_heap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_stack_size, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_memory, 0, ERTS_PI_FLAG_NEED_MSGQ_LEN|ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_garbage_collection, 3+2 + 3+2 + 3+2 + 3+2 + 3+2 + ERTS_MAX_HEAP_SIZE_MAP_SZ, 0, ERTS_PROC_LOCK_MAIN},
+ {am_group_leader, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_reductions, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_priority, 0, 0, 0},
+ {am_trace, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_binary, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_sequential_trace_token, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_catchlevel, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_backtrace, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_last_calls, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_total_heap_size, 0, ERTS_PI_FLAG_NEED_MSGQ_LEN|ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_suspending, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, 0},
+ {am_min_heap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_min_bin_vheap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_max_heap_size, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_current_location, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_current_stacktrace, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_message_queue_data, 0, 0, ERTS_PROC_LOCK_MAIN},
+ {am_garbage_collection_info, ERTS_PROCESS_GC_INFO_MAX_SIZE, 0, ERTS_PROC_LOCK_MAIN},
+ {am_magic_ref, 0, ERTS_PI_FLAG_FORCE_SIG_SEND, ERTS_PROC_LOCK_MAIN},
+ {am_fullsweep_after, 0, 0, ERTS_PROC_LOCK_MAIN}
};
-#define ERTS_PI_ARGS ((int) (sizeof(pi_args)/sizeof(Eterm)))
+#define ERTS_PI_ARGS ((int) (sizeof(pi_args)/sizeof(pi_args[0])))
+
+#ifdef DEBUG
+# define ERTS_PI_DEF_ARR_SZ 2
+#else
+# define ERTS_PI_DEF_ARR_SZ ERTS_PI_ARGS
+#endif
static ERTS_INLINE Eterm
pi_ix2arg(int ix)
{
if (ix < 0 || ERTS_PI_ARGS <= ix)
return am_undefined;
- return pi_args[ix];
+ return pi_args[ix].name;
+}
+
+static ERTS_INLINE int
+pi_ix2flags(int ix)
+{
+ if (ix < 0 || ERTS_PI_ARGS <= ix)
+ return 0;
+ return pi_args[ix].flags;
+}
+
+static ERTS_INLINE Uint
+pi_ix2rsz(int ix)
+{
+ if (ix < 0 || ERTS_PI_ARGS <= ix)
+ return 0;
+ return pi_args[ix].reserve_size;
+}
+
+static ERTS_INLINE ErtsProcLocks
+pi_ix2locks(int ix)
+{
+ if (ix < 0 || ERTS_PI_ARGS <= ix)
+ return 0;
+ return pi_args[ix].locks;
}
static ERTS_INLINE int
pi_arg2ix(Eterm arg)
{
switch (arg) {
- case am_registered_name: return 0;
- case am_current_function: return 1;
- case am_initial_call: return 2;
- case am_status: return 3;
- case am_messages: return 4;
- case am_message_queue_len: return 5;
- case am_links: return 6;
- case am_monitors: return 7;
- case am_monitored_by: return 8;
- case am_dictionary: return 9;
- case am_trap_exit: return 10;
- case am_error_handler: return 11;
- case am_heap_size: return 12;
- case am_stack_size: return 13;
- case am_memory: return 14;
- case am_garbage_collection: return 15;
- case am_group_leader: return 16;
- case am_reductions: return 17;
- case am_priority: return 18;
- case am_trace: return 19;
- case am_binary: return 20;
- case am_sequential_trace_token: return 21;
- case am_catchlevel: return 22;
- case am_backtrace: return 23;
- case am_last_calls: return 24;
- case am_total_heap_size: return 25;
- case am_suspending: return 26;
- case am_min_heap_size: return 27;
- case am_min_bin_vheap_size: return 28;
- case am_max_heap_size: return 29;
- case am_current_location: return 30;
- case am_current_stacktrace: return 31;
- case am_message_queue_data: return 32;
- case am_garbage_collection_info: return 33;
- case am_magic_ref: return 34;
- default: return -1;
+ case am_registered_name:
+ return ERTS_PI_IX_REGISTERED_NAME;
+ case am_current_function:
+ return ERTS_PI_IX_CURRENT_FUNCTION;
+ case am_initial_call:
+ return ERTS_PI_IX_INITIAL_CALL;
+ case am_status:
+ return ERTS_PI_IX_STATUS;
+ case am_messages:
+ return ERTS_PI_IX_MESSAGES;
+ case am_message_queue_len:
+ return ERTS_PI_IX_MESSAGE_QUEUE_LEN;
+ case am_links:
+ return ERTS_PI_IX_LINKS;
+ case am_monitors:
+ return ERTS_PI_IX_MONITORS;
+ case am_monitored_by:
+ return ERTS_PI_IX_MONITORED_BY;
+ case am_dictionary:
+ return ERTS_PI_IX_DICTIONARY;
+ case am_trap_exit:
+ return ERTS_PI_IX_TRAP_EXIT;
+ case am_error_handler:
+ return ERTS_PI_IX_ERROR_HANDLER;
+ case am_heap_size:
+ return ERTS_PI_IX_HEAP_SIZE;
+ case am_stack_size:
+ return ERTS_PI_IX_STACK_SIZE;
+ case am_memory:
+ return ERTS_PI_IX_MEMORY;
+ case am_garbage_collection:
+ return ERTS_PI_IX_GARBAGE_COLLECTION;
+ case am_group_leader:
+ return ERTS_PI_IX_GROUP_LEADER;
+ case am_reductions:
+ return ERTS_PI_IX_REDUCTIONS;
+ case am_priority:
+ return ERTS_PI_IX_PRIORITY;
+ case am_trace:
+ return ERTS_PI_IX_TRACE;
+ case am_binary:
+ return ERTS_PI_IX_BINARY;
+ case am_sequential_trace_token:
+ return ERTS_PI_IX_SEQUENTIAL_TRACE_TOKEN;
+ case am_catchlevel:
+ return ERTS_PI_IX_CATCHLEVEL;
+ case am_backtrace:
+ return ERTS_PI_IX_BACKTRACE;
+ case am_last_calls:
+ return ERTS_PI_IX_LAST_CALLS;
+ case am_total_heap_size:
+ return ERTS_PI_IX_TOTAL_HEAP_SIZE;
+ case am_suspending:
+ return ERTS_PI_IX_SUSPENDING;
+ case am_min_heap_size:
+ return ERTS_PI_IX_MIN_HEAP_SIZE;
+ case am_min_bin_vheap_size:
+ return ERTS_PI_IX_MIN_BIN_VHEAP_SIZE;
+ case am_max_heap_size:
+ return ERTS_PI_IX_MAX_HEAP_SIZE;
+ case am_current_location:
+ return ERTS_PI_IX_CURRENT_LOCATION;
+ case am_current_stacktrace:
+ return ERTS_PI_IX_CURRENT_STACKTRACE;
+ case am_message_queue_data:
+ return ERTS_PI_IX_MESSAGE_QUEUE_DATA;
+ case am_garbage_collection_info:
+ return ERTS_PI_IX_GARBAGE_COLLECTION_INFO;
+ case am_magic_ref:
+ return ERTS_PI_IX_MAGIC_REF;
+ case am_fullsweep_after:
+ return ERTS_PI_IX_FULLSWEEP_AFTER;
+ default:
+ return -1;
}
}
@@ -858,186 +963,41 @@ process_info_init(void)
}
-static ERTS_INLINE Process *
-pi_lookup_proc(Process *c_p, Eterm pid, ErtsProcLocks *locks)
-{
- /*
- * If the main lock is needed, we use erts_pid2proc_not_running()
- * instead of erts_pid2proc() for two reasons:
- * * Current function of pid and possibly other information will
- * have been updated so that process_info() is consistent with an
- * info-request/info-response signal model.
- * * We avoid blocking the whole scheduler executing the
- * process that is calling process_info() for a long time
- * which will happen if pid is currently running.
- * The caller of process_info() may have to yield if pid
- * is currently running.
- */
-
- if ((*locks) & ERTS_PROC_LOCK_MAIN) {
- erts_aint32_t state;
- int local_only, done;
- Process *rp;
- ErtsProcLocks more_locks;
-
- rp = erts_pid2proc_not_running(c_p, ERTS_PROC_LOCK_MAIN,
- pid, ERTS_PROC_LOCK_MAIN);
-
- if (!rp || rp == ERTS_PROC_LOCK_BUSY)
- return rp;
-
- if ((*locks) & ERTS_PROC_LOCK_MSGQ) {
- /*
- * Move in queue into private queue and
- * release msgq lock, enabling others to
- * send messages to the process while it
- * is being inspected...
- */
- erts_proc_lock(rp, ERTS_PROC_LOCK_MSGQ);
- erts_proc_sig_fetch(rp);
- erts_proc_unlock(rp, ERTS_PROC_LOCK_MSGQ);
- (*locks) &= ~ERTS_PROC_LOCK_MSGQ;
- }
-
- /*
- * Handle all signals received up to this point
- * in order to preserve signal order.
- *
- * FIX ME: Should be done yielding...
- */
- local_only = 0;
- do {
- int r = CONTEXT_REDS;
- done = erts_proc_sig_handle_incoming(rp, &state, &r,
- CONTEXT_REDS,
- local_only);
- local_only = !0;
- BUMP_REDS(c_p, r);
- } while (!done && !(state & ERTS_PSFLG_EXITING));
-
- if (state & ERTS_PSFLG_EXITING) {
- if (rp != c_p)
- erts_proc_unlock(rp, ERTS_PROC_LOCK_MAIN);
- return NULL;
- }
- more_locks = (*locks) & ~ERTS_PROC_LOCK_MAIN;
- if (more_locks)
- erts_proc_lock(rp, more_locks);
- return rp;
- }
-
- ASSERT(!((*locks) & ERTS_PROC_LOCK_MSGQ));
-
- if (*locks)
- return erts_pid2proc(c_p, ERTS_PROC_LOCK_MAIN,
- pid, *locks);
-
- return erts_proc_lookup(pid);
-}
-
-
-
-
-
static BIF_RETTYPE
-process_info_aux(Process *BIF_P,
+process_info_aux(Process *c_p,
+ ErtsHeapFactory *hfact,
Process *rp,
ErtsProcLocks rp_locks,
- Eterm rpid,
- Eterm item,
- int always_wrap,
- int *reds);
+ int item_ix,
+ int flags,
+ Uint *reserve_sizep,
+ Uint *reds);
-#define ERTS_PI_RES_ELEM_IX_BUF_INC 1024
-#define ERTS_PI_DEF_RES_ELEM_IX_BUF_SZ ERTS_PI_ARGS
-
-static Eterm
-process_info_list(Process *c_p, Eterm pid, Eterm list, int always_wrap,
- int *fail_type)
+Eterm
+erts_process_info(Process *c_p,
+ ErtsHeapFactory *hfact,
+ Process *rp,
+ ErtsProcLocks rp_locks,
+ int *item_ix,
+ int item_ix_len,
+ int flags,
+ Uint reserve_size,
+ Uint *reds)
{
- int want_messages = 0;
- int def_res_elem_ix_buf[ERTS_PI_DEF_RES_ELEM_IX_BUF_SZ];
- int *res_elem_ix = &def_res_elem_ix_buf[0];
- int res_elem_ix_ix = -1;
- int res_elem_ix_sz = ERTS_PI_DEF_RES_ELEM_IX_BUF_SZ;
+ Eterm res;
Eterm part_res[ERTS_PI_ARGS];
- Eterm res, arg;
- Uint *hp, *hp_end;
- ErtsProcLocks locks = (ErtsProcLocks) 0;
- int res_len, ix;
- Process *rp = NULL;
- int reds = 0;
+ int item_ix_ix, ix;
- *fail_type = ERTS_PI_FAIL_TYPE_BADARG;
+ if (ERTS_PI_FLAG_SINGELTON & flags) {
+ ASSERT(item_ix_len == 1);
+ res = process_info_aux(c_p, hfact, rp, rp_locks, item_ix[0],
+ flags, &reserve_size, reds);
+ return res;
+ }
for (ix = 0; ix < ERTS_PI_ARGS; ix++)
part_res[ix] = THE_NON_VALUE;
- ASSERT(is_list(list));
-
- while (is_list(list)) {
- Eterm* consp = list_val(list);
-
- arg = CAR(consp);
- ix = pi_arg2ix(arg);
- if (ix < 0) {
- res = THE_NON_VALUE;
- goto done;
- }
- if (arg == am_messages)
- want_messages = 1;
- locks |= pi_locks(arg);
- res_elem_ix_ix++;
- if (res_elem_ix_ix >= res_elem_ix_sz) {
- if (res_elem_ix != &def_res_elem_ix_buf[0])
- res_elem_ix =
- erts_realloc(ERTS_ALC_T_TMP,
- res_elem_ix,
- sizeof(int)*(res_elem_ix_sz
- += ERTS_PI_RES_ELEM_IX_BUF_INC));
- else {
- int new_res_elem_ix_sz = ERTS_PI_RES_ELEM_IX_BUF_INC;
- int *new_res_elem_ix = erts_alloc(ERTS_ALC_T_TMP,
- sizeof(int)*new_res_elem_ix_sz);
- sys_memcpy((void *) new_res_elem_ix,
- (void *) res_elem_ix,
- sizeof(int)*res_elem_ix_sz);
- res_elem_ix = new_res_elem_ix;
- res_elem_ix_sz = new_res_elem_ix_sz;
- }
- }
- res_elem_ix[res_elem_ix_ix] = ix;
- list = CDR(consp);
- }
- if (is_not_nil(list)) {
- res = THE_NON_VALUE;
- goto done;
- }
-
- res_len = res_elem_ix_ix+1;
-
- ASSERT(res_len > 0);
-
- rp = pi_lookup_proc(c_p, pid, &locks);
- if (!rp) {
- if (c_p->common.id != pid)
- res = am_undefined;
- else {
- *fail_type = ERTS_PI_FAIL_TYPE_EXITED;
- res = THE_NON_VALUE;
- }
- goto done;
- }
- else if (rp == ERTS_PROC_LOCK_BUSY) {
- rp = NULL;
- res = THE_NON_VALUE;
- *fail_type = ERTS_PI_FAIL_TYPE_YIELD;
- goto done;
- }
-
- if (c_p == rp)
- locks |= ERTS_PROC_LOCK_MAIN;
-
/*
* We always handle 'messages' first if it should be part
* of the result. This since if both 'messages' and
@@ -1045,35 +1005,31 @@ process_info_list(Process *c_p, Eterm pid, Eterm list, int always_wrap,
* change the result of 'message_queue_len' (in case
* the queue contain bad distribution messages).
*/
- if (want_messages) {
+ if (flags & ERTS_PI_FLAG_WANT_MSGS) {
ix = pi_arg2ix(am_messages);
ASSERT(part_res[ix] == THE_NON_VALUE);
- res = process_info_aux(c_p, rp, locks, pid, am_messages,
- always_wrap, &reds);
+ res = process_info_aux(c_p, hfact, rp, rp_locks, ix,
+ flags, &reserve_size, reds);
ASSERT(res != am_undefined);
ASSERT(res != THE_NON_VALUE);
part_res[ix] = res;
}
- for (; res_elem_ix_ix >= 0; res_elem_ix_ix--) {
- ix = res_elem_ix[res_elem_ix_ix];
+ for (item_ix_ix = item_ix_len - 1; item_ix_ix >= 0; item_ix_ix--) {
+ ix = item_ix[item_ix_ix];
if (part_res[ix] == THE_NON_VALUE) {
- arg = pi_ix2arg(ix);
- res = process_info_aux(c_p, rp, locks, pid, arg,
- always_wrap, &reds);
- if (res == am_undefined)
- goto done;
+ res = process_info_aux(c_p, hfact, rp, rp_locks, ix,
+ flags, &reserve_size, reds);
+ ASSERT(res != am_undefined);
ASSERT(res != THE_NON_VALUE);
part_res[ix] = res;
}
}
- hp = HAlloc(c_p, res_len*2);
- hp_end = hp + res_len*2;
res = NIL;
- for (res_elem_ix_ix = res_len - 1; res_elem_ix_ix >= 0; res_elem_ix_ix--) {
- ix = res_elem_ix[res_elem_ix_ix];
+ for (item_ix_ix = item_ix_len - 1; item_ix_ix >= 0; item_ix_ix--) {
+ ix = item_ix[item_ix_ix];
ASSERT(part_res[ix] != THE_NON_VALUE);
/*
* If we should ignore the value of registered_name,
@@ -1081,155 +1037,310 @@ process_info_list(Process *c_p, Eterm pid, Eterm list, int always_wrap,
* beginning of process_info_aux().
*/
if (is_nil(part_res[ix])) {
- ASSERT(!always_wrap);
+ ASSERT(!(flags & ERTS_PI_FLAG_ALWAYS_WRAP));
ASSERT(pi_ix2arg(ix) == am_registered_name);
}
else {
+ Eterm *hp;
+ ERTS_PI_UNRESERVE(reserve_size, 2);
+ hp = erts_produce_heap(hfact, 2, reserve_size);
res = CONS(hp, part_res[ix], res);
- hp += 2;
}
}
- if (!always_wrap) {
- HRelease(c_p, hp_end, hp);
- }
+ return res;
+}
- done:
+static void
+pi_setup_grow(int **arr, int *def_arr, Uint *sz, int ix);
- if (c_p == rp)
- locks &= ~ERTS_PROC_LOCK_MAIN;
- if (locks && rp)
- erts_proc_unlock(rp, locks);
+static BIF_RETTYPE
+process_info_bif(Process *c_p, Eterm pid, Eterm opt, int always_wrap, int pi2)
+{
+ ErtsHeapFactory hfact;
+ int def_arr[ERTS_PI_DEF_ARR_SZ];
+ int *item_ix = &def_arr[0];
+ Process *rp = NULL;
+ erts_aint32_t state;
+ BIF_RETTYPE ret;
+ Uint reds = 0;
+ ErtsProcLocks locks = 0;
+ int flags;
+ Uint reserve_size;
+ int len;
+ Eterm res;
- if (res_elem_ix != &def_res_elem_ix_buf[0])
- erts_free(ERTS_ALC_T_TMP, res_elem_ix);
+ ERTS_CT_ASSERT(ERTS_PI_DEF_ARR_SZ > 0);
- BUMP_REDS(c_p, reds);
+ if (c_p->common.id == pid) {
+ int local_only = c_p->flags & F_LOCAL_SIGS_ONLY;
+ int sreds = ERTS_BIF_REDS_LEFT(c_p);
+ int sres;
- return res;
-}
+ if (!local_only) {
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ);
+ erts_proc_sig_fetch(c_p);
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
+ }
-BIF_RETTYPE process_info_1(BIF_ALIST_1)
-{
- Eterm res;
- int fail_type;
+ sres = erts_proc_sig_handle_incoming(c_p, &state, &sreds, sreds, !0);
+ if (state & ERTS_PSFLG_EXITING) {
+ c_p->flags &= ~F_LOCAL_SIGS_ONLY;
+ goto exited;
+ }
+ if (!sres) {
+ /*
+ * More signals to handle; need to yield and continue.
+ * Prevent fetching of more signals by setting
+ * local-sigs-only flag.
+ */
+ c_p->flags |= F_LOCAL_SIGS_ONLY;
+ goto yield;
+ }
- if (is_external_pid(BIF_ARG_1)
- && external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry)
- BIF_RET(am_undefined);
-
- if (is_not_internal_pid(BIF_ARG_1)) {
- BIF_ERROR(BIF_P, BADARG);
- }
-
- res = process_info_list(BIF_P, BIF_ARG_1, pi_1_keys_list, 0, &fail_type);
- if (is_non_value(res)) {
- switch (fail_type) {
- case ERTS_PI_FAIL_TYPE_BADARG:
- BIF_ERROR(BIF_P, BADARG);
- case ERTS_PI_FAIL_TYPE_YIELD:
- ERTS_BIF_YIELD1(bif_export[BIF_process_info_1], BIF_P, BIF_ARG_1);
- case ERTS_PI_FAIL_TYPE_EXITED:
- ERTS_BIF_EXITED(BIF_P);
- default:
- erts_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error", __FILE__, __LINE__);
- }
+ c_p->flags &= ~F_LOCAL_SIGS_ONLY;
}
- ASSERT(!(BIF_P->flags & F_P2PNR_RESCHED));
- BIF_RET(res);
-}
+ if (is_atom(opt)) {
+ int ix = pi_arg2ix(opt);
+ item_ix[0] = ix;
+ len = 1;
+ locks = pi_ix2locks(ix);
+ reserve_size = 3 + pi_ix2rsz(ix);
+ flags = ERTS_PI_FLAG_SINGELTON;
+ flags |= pi_ix2flags(ix);
+ if (ix < 0)
+ goto badarg;
+ }
+ else {
+ Eterm list = opt;
+ Uint size = ERTS_PI_DEF_ARR_SZ;
+ len = 0;
+ reserve_size = 0;
+ locks = 0;
+ flags = 0;
-BIF_RETTYPE process_info_2(BIF_ALIST_2)
-{
- Eterm res;
- Process *rp;
- Eterm pid = BIF_ARG_1;
- ErtsProcLocks info_locks;
- int fail_type, reds = 0;
+ while (is_list(list)) {
+ Eterm *consp = list_val(list);
+ Eterm arg = CAR(consp);
+ int ix = pi_arg2ix(arg);
+ if (ix < 0)
+ goto badarg;
+
+ if (len >= size)
+ pi_setup_grow(&item_ix, def_arr, &size, len);
+
+ item_ix[len++] = ix;
+
+ locks |= pi_ix2locks(ix);
+ flags |= pi_ix2flags(ix);
+ reserve_size += pi_ix2rsz(ix);
+ reserve_size += 3; /* 2-tuple */
+ reserve_size += 2; /* cons */
+
+ list = CDR(consp);
+ }
+
+ if (is_not_nil(list))
+ goto badarg;
+ }
- if (is_external_pid(pid)
- && external_pid_dist_entry(pid) == erts_this_dist_entry)
- BIF_RET(am_undefined);
-
if (is_not_internal_pid(pid)) {
- BIF_ERROR(BIF_P, BADARG);
+ if (is_external_pid(pid)
+ && external_pid_dist_entry(pid) == erts_this_dist_entry)
+ goto undefined;
+ goto badarg;
}
- if (is_nil(BIF_ARG_2))
- BIF_RET(NIL);
+ if (always_wrap)
+ flags |= ERTS_PI_FLAG_ALWAYS_WRAP;
- if (is_list(BIF_ARG_2)) {
- res = process_info_list(BIF_P, BIF_ARG_1, BIF_ARG_2, 1, &fail_type);
- if (is_non_value(res)) {
- switch (fail_type) {
- case ERTS_PI_FAIL_TYPE_BADARG:
- BIF_ERROR(BIF_P, BADARG);
- case ERTS_PI_FAIL_TYPE_YIELD:
- ERTS_BIF_YIELD2(bif_export[BIF_process_info_2], BIF_P,
- BIF_ARG_1, BIF_ARG_2);
- case ERTS_PI_FAIL_TYPE_EXITED:
- ERTS_BIF_EXITED(BIF_P);
- default:
- erts_exit(ERTS_ABORT_EXIT, "%s:%d: Internal error",
- __FILE__, __LINE__);
- }
- }
- ASSERT(!(BIF_P->flags & F_P2PNR_RESCHED));
- BIF_RET(res);
+ if (c_p->common.id == pid) {
+ rp = c_p;
+ if (locks & ~ERTS_PROC_LOCK_MAIN)
+ erts_proc_lock(c_p, locks & ~ERTS_PROC_LOCK_MAIN);
+ locks |= ERTS_PROC_LOCK_MAIN;
+ }
+ else {
+ if (flags & ERTS_PI_FLAG_FORCE_SIG_SEND)
+ goto send_signal;
+ rp = erts_try_lock_sig_free_proc(pid, locks, &state);
+ if (!rp)
+ goto undefined;
+ if (rp == ERTS_PROC_LOCK_BUSY) {
+ rp = NULL;
+ goto send_signal;
+ }
+ if (state & ERTS_PSFLG_EXITING) {
+ if (locks)
+ erts_proc_unlock(rp, locks);
+ locks = 0;
+ /* wait for it to terminate properly... */
+ goto send_signal;
+ }
+ if (flags & ERTS_PI_FLAG_NEED_MSGQ_LEN) {
+ ASSERT(locks & ERTS_PROC_LOCK_MAIN);
+ erts_proc_lock(rp, ERTS_PROC_LOCK_MSGQ);
+ erts_proc_sig_fetch(rp);
+ if (c_p->sig_qs.cont) {
+ erts_proc_unlock(rp, locks|ERTS_PROC_LOCK_MSGQ);
+ locks = 0;
+ goto send_signal;
+ }
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_MSGQ);
+ }
}
- if (pi_arg2ix(BIF_ARG_2) < 0)
- BIF_ERROR(BIF_P, BADARG);
+ erts_factory_proc_init(&hfact, c_p);
- info_locks = pi_locks(BIF_ARG_2);
+ res = erts_process_info(c_p, &hfact, rp, locks, item_ix, len,
+ flags, reserve_size, &reds);
- rp = pi_lookup_proc(BIF_P, pid, &info_locks);
- if (!rp) {
- if (BIF_P->common.id == pid)
- ERTS_BIF_EXITED(BIF_P);
- BIF_RET(am_undefined);
+ erts_factory_close(&hfact);
+
+ if (reds > INT_MAX/2)
+ reds = INT_MAX/2;
+ BUMP_REDS(c_p, (int) reds);
+
+ state = erts_atomic32_read_acqb(&rp->state);
+ if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_FREE)) {
+ if (state & ERTS_PSFLG_FREE) {
+ ASSERT(!locks);
+ goto undefined;
+ }
+ if (locks)
+ erts_proc_unlock(rp, locks);
+ locks = 0;
+ /* wait for it to terminate properly... */
+ goto send_signal;
}
- else if (rp == ERTS_PROC_LOCK_BUSY)
- ERTS_BIF_YIELD2(bif_export[BIF_process_info_2], BIF_P,
- BIF_ARG_1, BIF_ARG_2);
- if (BIF_P == rp)
- info_locks |= ERTS_PROC_LOCK_MAIN;
+ ERTS_BIF_PREP_RET(ret, res);
+
+done:
+
+ if (c_p == rp)
+ locks &= ~ERTS_PROC_LOCK_MAIN;
+
+ if (locks && rp)
+ erts_proc_unlock(rp, locks);
+
+ if (item_ix != def_arr)
+ erts_free(ERTS_ALC_T_TMP, item_ix);
+
+ return ret;
+
+badarg:
+ ERTS_BIF_PREP_ERROR(ret, c_p, BADARG);
+ goto done;
+
+undefined:
+ ERTS_BIF_PREP_RET(ret, am_undefined);
+ goto done;
+
+exited:
+ ERTS_BIF_PREP_EXITED(ret, c_p);
+ goto done;
+
+yield:
+ if (pi2)
+ ERTS_BIF_PREP_YIELD2(ret, bif_export[BIF_process_info_2], c_p, pid, opt);
+ else
+ ERTS_BIF_PREP_YIELD1(ret, bif_export[BIF_process_info_1], c_p, pid);
+ goto done;
+
+send_signal: {
+ Eterm ref = erts_make_ref(c_p);
+ int enqueued, need_msgq_len;
+ flags |= ERTS_PI_FLAG_REQUEST_FOR_OTHER;
+ need_msgq_len = (flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
+ /*
+ * Set receive mark so we wont have to scan the whole
+ * message queue for the result. Note caller unconditionally
+ * has to enter a receive only matching messages containing
+ * 'ref', or restore save pointer.
+ */
+ ERTS_RECV_MARK_SAVE(c_p);
+ ERTS_RECV_MARK_SET(c_p);
+ enqueued = erts_proc_sig_send_process_info_request(c_p, pid, item_ix,
+ len, need_msgq_len,
+ flags, reserve_size,
+ ref);
+ if (!enqueued) {
+ /* Restore save pointer... */
+ JOIN_MESSAGE(c_p);
+ goto undefined;
+ }
+ ERTS_BIF_PREP_TRAP1(ret, erts_await_result, c_p, ref);
+ goto done;
+ }
+}
- res = process_info_aux(BIF_P, rp, info_locks, pid, BIF_ARG_2,
- 0, &reds);
+static void
+pi_setup_grow(int **arr, int *def_arr, Uint *sz, int ix)
+{
+ *sz = (ix+1) + ERTS_PI_DEF_ARR_SZ;
+ if (*arr != def_arr)
+ *arr = erts_realloc(ERTS_ALC_T_TMP, *arr, (*sz)*sizeof(int));
+ else {
+ int *new_arr = erts_alloc(ERTS_ALC_T_TMP, (*sz)*sizeof(int));
+ sys_memcpy((void *) new_arr, (void *) def_arr,
+ sizeof(int)*ERTS_PI_DEF_ARR_SZ);
+ *arr = new_arr;
+ }
+}
- BUMP_REDS(BIF_P, reds);
- if (BIF_P == rp)
- info_locks &= ~ERTS_PROC_LOCK_MAIN;
- if (rp && info_locks)
- erts_proc_unlock(rp, info_locks);
+BIF_RETTYPE process_info_2(BIF_ALIST_2)
+{
+ return process_info_bif(BIF_P, BIF_ARG_1, BIF_ARG_2, !is_atom(BIF_ARG_2), !0);
+}
- ASSERT(!(BIF_P->flags & F_P2PNR_RESCHED));
- BIF_RET(res);
+BIF_RETTYPE process_info_1(BIF_ALIST_1)
+{
+ return process_info_bif(BIF_P, BIF_ARG_1, pi_1_keys_list, 0, 0);
}
Eterm
-process_info_aux(Process *BIF_P,
+process_info_aux(Process *c_p,
+ ErtsHeapFactory *hfact,
Process *rp,
ErtsProcLocks rp_locks,
- Eterm rpid,
- Eterm item,
- int always_wrap,
- int *reds)
+ int item_ix,
+ int flags,
+ Uint *reserve_sizep,
+ Uint *reds)
{
Eterm *hp;
Eterm res = NIL;
+ Uint reserved;
+ Uint reserve_size = *reserve_sizep;
+
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ ErtsProcLocks locks = erts_proc_lc_my_proc_locks(rp);
+
+ switch (item_ix) {
+ case ERTS_PI_IX_STATUS:
+ case ERTS_PI_IX_PRIORITY:
+ case ERTS_PI_IX_SUSPENDING:
+ ERTS_LC_ASSERT((locks & ~ERTS_PROC_LOCK_MAIN) == 0);
+ break;
+ default:
+ ERTS_LC_ASSERT(locks == ERTS_PROC_LOCK_MAIN);
+ break;
+ }
+#endif
+
+ reserved = pi_ix2rsz(item_ix);
+ ERTS_PI_UNRESERVE(reserve_size, reserved);
(*reds)++;
ASSERT(rp);
/*
- * Q: Why this always_wrap argument?
+ * Q: Why this ERTS_PI_FLAG_ALWAYS_WRAP flag?
*
* A: registered_name is strange. If process has no registered name,
* process_info(Pid, registered_name) returns [], and
@@ -1241,41 +1352,39 @@ process_info_aux(Process *BIF_P,
* registered_name behaves as it should, i.e. a
* {registered_name, []} will appear in the resulting list.
*
- * If always_wrap != 0, process_info_aux() always wrap the result
- * in a key two tuple.
+ * If ERTS_PI_FLAG_ALWAYS_WRAP is set, process_info_aux() always
+ * wrap the result in a key two tuple.
*/
- switch (item) {
+ switch (item_ix) {
- case am_registered_name:
- if (rp->common.u.alive.reg) {
- hp = HAlloc(BIF_P, 3);
+ case ERTS_PI_IX_REGISTERED_NAME:
+ if (rp->common.u.alive.reg)
res = rp->common.u.alive.reg->name;
- } else {
- if (always_wrap) {
- hp = HAlloc(BIF_P, 3);
+ else {
+ if (flags & ERTS_PI_FLAG_ALWAYS_WRAP)
res = NIL;
- }
- else {
+ else
return NIL;
- }
}
break;
- case am_current_function:
- res = current_function(BIF_P, rp, &hp, 0);
+ case ERTS_PI_IX_CURRENT_FUNCTION:
+ res = current_function(c_p, hfact, rp, 0,
+ reserve_size, flags);
break;
- case am_current_location:
- res = current_function(BIF_P, rp, &hp, 1);
+ case ERTS_PI_IX_CURRENT_LOCATION:
+ res = current_function(c_p, hfact, rp, 1,
+ reserve_size, flags);
break;
- case am_current_stacktrace:
- res = current_stacktrace(BIF_P, rp, &hp);
+ case ERTS_PI_IX_CURRENT_STACKTRACE:
+ res = current_stacktrace(hfact, rp, reserve_size);
break;
- case am_initial_call:
- hp = HAlloc(BIF_P, 3+4);
+ case ERTS_PI_IX_INITIAL_CALL:
+ hp = erts_produce_heap(hfact, 4, reserve_size);
res = TUPLE3(hp,
rp->u.initial.module,
rp->u.initial.function,
@@ -1283,24 +1392,37 @@ process_info_aux(Process *BIF_P,
hp += 4;
break;
- case am_status:
- res = erts_process_state2status(erts_atomic32_read_nob(&rp->state));
- if (res == am_exiting || res == am_free)
- return am_undefined;
- hp = HAlloc(BIF_P, 3);
+ case ERTS_PI_IX_STATUS: {
+ erts_aint32_t state = erts_atomic32_read_nob(&rp->state);
+ res = erts_process_state2status(state);
+ if (res == am_running && (state & ERTS_PSFLG_RUNNING_SYS)) {
+ ASSERT(c_p == rp);
+ ASSERT(flags & ERTS_PI_FLAG_REQUEST_FOR_OTHER);
+ if (!(state & (ERTS_PSFLG_SYS_TASKS
+ | ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_SIG_Q
+ | ERTS_PSFLG_SIG_IN_Q))) {
+ /*
+ * We are servicing a process-info request from
+ * another process. If that other process could
+ * have inspected our state itself, we would have
+ * been in the 'waiting' state.
+ */
+ res = am_waiting;
+ }
+ }
break;
+ }
- case am_messages: {
-
- if (rp->sig_qs.len == 0 || ERTS_TRACE_FLAGS(rp) & F_SENSITIVE) {
- hp = HAlloc(BIF_P, 3);
- } else {
+ case ERTS_PI_IX_MESSAGES: {
+ ASSERT(flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
+ if (rp->sig_qs.len == 0 || (ERTS_TRACE_FLAGS(rp) & F_SENSITIVE))
+ res = NIL;
+ else {
+ int info_on_self = !(flags & ERTS_PI_FLAG_REQUEST_FOR_OTHER);
ErtsMessageInfo *mip;
- Sint i;
+ Sint i, len;
Uint heap_need;
-#ifdef DEBUG
- Eterm *hp_end;
-#endif
mip = erts_alloc(ERTS_ALC_T_TMP,
rp->sig_qs.len*sizeof(ErtsMessageInfo));
@@ -1310,46 +1432,52 @@ process_info_aux(Process *BIF_P,
* erts_proc_sig_prep_msgq_for_inspection() since it removes
* corrupt distribution messages.
*/
- heap_need = erts_proc_sig_prep_msgq_for_inspection(BIF_P, rp,
- rp_locks, mip);
- heap_need += 3; /* top 2-tuple */
- heap_need += rp->sig_qs.len*2; /* Cons cells */
+ heap_need = erts_proc_sig_prep_msgq_for_inspection(c_p, rp,
+ rp_locks,
+ info_on_self,
+ mip);
+ len = rp->sig_qs.len;
- hp = HAlloc(BIF_P, heap_need); /* heap_need is exact */
-#ifdef DEBUG
- hp_end = hp + heap_need;
-#endif
+ heap_need += len*2; /* Cons cells */
+
+ reserve_size += heap_need;
/* Build list of messages... */
- for (i = rp->sig_qs.len - 1, res = NIL; i >= 0; i--) {
+ for (i = len - 1, res = NIL; i >= 0; i--) {
Eterm msg = ERL_MESSAGE_TERM(mip[i].msgp);
Uint sz = mip[i].size;
+ ERTS_PI_UNRESERVE(reserve_size, sz+2);
+ hp = erts_produce_heap(hfact, sz+2, reserve_size);
+
if (sz != 0)
- msg = copy_struct(msg, sz, &hp, &BIF_P->off_heap);
+ msg = copy_struct(msg, sz, &hp, hfact->off_heap);
res = CONS(hp, msg, res);
hp += 2;
}
- ASSERT(hp_end == hp + 3);
-
- if (rp->sig_qs.len > CONTEXT_REDS*4)
- *reds += CONTEXT_REDS*4;
- else
- *reds += rp->sig_qs.len / 4;
+ *reds += (Uint) len / 4;
erts_free(ERTS_ALC_T_TMP, mip);
}
break;
}
- case am_message_queue_len:
- hp = HAlloc(BIF_P, 3);
- res = make_small(rp->sig_qs.len);
+ case ERTS_PI_IX_MESSAGE_QUEUE_LEN: {
+ Sint len = rp->sig_qs.len;
+ ASSERT(flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
+ ASSERT(len >= 0);
+ if (len <= MAX_SMALL)
+ res = make_small(len);
+ else {
+ hp = erts_produce_heap(hfact, BIG_UINT_HEAP_SIZE, reserve_size);
+ res = uint_to_big((Uint) len, hp);
+ }
break;
+ }
- case am_links: {
+ case ERTS_PI_IX_LINKS: {
MonitorInfoCollection mic;
int i;
Eterm item;
@@ -1358,24 +1486,24 @@ process_info_aux(Process *BIF_P,
erts_link_tree_foreach(ERTS_P_LINKS(rp), collect_one_link, (void *) &mic);
- hp = HAlloc(BIF_P, 3 + mic.sz);
+ reserve_size += mic.sz;
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
- item = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity.term);
+ Eterm item_src = mic.mi[i].entity.term;
+ Uint sz = NC_HEAP_SIZE(item_src) + 2;
+ ERTS_PI_UNRESERVE(reserve_size, sz);
+ hp = erts_produce_heap(hfact, sz, reserve_size);
+ item = STORE_NC(&hp, hfact->off_heap, item_src);
res = CONS(hp, item, res);
- hp += 2;
}
- if (mic.mi_i > CONTEXT_REDS*4)
- *reds += CONTEXT_REDS*4;
- else
- *reds += mic.mi_i / 4;
+ *reds += (Uint) mic.mi_i / 4;
DESTROY_MONITOR_INFOS(mic);
break;
}
- case am_monitors: {
+ case ERTS_PI_IX_MONITORS: {
MonitorInfoCollection mic;
int i;
@@ -1384,7 +1512,7 @@ process_info_aux(Process *BIF_P,
collect_one_origin_monitor,
(void *) &mic);
- hp = HAlloc(BIF_P, 3 + mic.sz);
+ reserve_size += mic.sz;
res = NIL;
for (i = 0; i < mic.mi_i; i++) {
if (mic.mi[i].named) {
@@ -1399,23 +1527,30 @@ process_info_aux(Process *BIF_P,
|| is_port(mic.mi[i].pid)
|| is_atom(mic.mi[i].pid));
+ ERTS_PI_UNRESERVE(reserve_size, 3+3+2);
+ hp = erts_produce_heap(hfact, 3+3+2, reserve_size);
+
t1 = TUPLE2(hp, mic.mi[i].entity.term, mic.mi[i].node);
hp += 3;
t2 = TUPLE2(hp, m_type, t1);
hp += 3;
res = CONS(hp, t2, res);
- hp += 2;
}
else {
/* Build {process|port|time_offset, Pid|clock_service} and cons it. */
Eterm t;
Eterm pid;
Eterm m_type;
+ Eterm pid_src = mic.mi[i].entity.term;
+ Uint sz = is_atom(pid_src) ? 0 : NC_HEAP_SIZE(pid_src);
+ sz += 3 + 2;
- if (is_atom(mic.mi[i].entity.term))
- pid = mic.mi[i].entity.term;
- else
- pid = STORE_NC(&hp, &MSO(BIF_P), mic.mi[i].entity.term);
+ ERTS_PI_UNRESERVE(reserve_size, sz);
+ hp = erts_produce_heap(hfact, sz, reserve_size);
+
+ pid = (is_atom(pid_src)
+ ? pid_src
+ : STORE_NC(&hp, hfact->off_heap, pid_src));
switch (mic.mi[i].type) {
case ERTS_MON_TYPE_PORT:
@@ -1435,20 +1570,16 @@ process_info_aux(Process *BIF_P,
t = TUPLE2(hp, m_type, pid);
hp += 3;
res = CONS(hp, t, res);
- hp += 2;
}
}
- if (mic.mi_i > CONTEXT_REDS*4)
- *reds += CONTEXT_REDS*4;
- else
- *reds += mic.mi_i / 4;
+ *reds += (Uint) mic.mi_i / 4;
DESTROY_MONITOR_INFOS(mic);
break;
}
- case am_monitored_by: {
+ case ERTS_PI_IX_MONITORED_BY: {
MonitorInfoCollection mic;
int i;
Eterm item;
@@ -1461,59 +1592,71 @@ process_info_aux(Process *BIF_P,
collect_one_target_monitor,
(void *) &mic);
- hp = HAlloc(BIF_P, 3 + mic.sz);
+ reserve_size += mic.sz;
res = NIL;
for (i = 0; i < mic.mi_i; ++i) {
+ Uint sz = 2;
+
+ if (mic.mi[i].type == ERTS_MON_TYPE_RESOURCE)
+ sz += erts_resource_ref_size(mic.mi[i].entity.resource);
+ else
+ sz += NC_HEAP_SIZE(mic.mi[i].entity.term);
+
+ ERTS_PI_UNRESERVE(reserve_size, sz);
+ hp = erts_produce_heap(hfact, sz, reserve_size);
+
if (mic.mi[i].type == ERTS_MON_TYPE_RESOURCE)
item = erts_bld_resource_ref(&hp,
- &MSO(BIF_P),
+ hfact->off_heap,
mic.mi[i].entity.resource);
else
item = STORE_NC(&hp,
- &MSO(BIF_P),
+ hfact->off_heap,
mic.mi[i].entity.term);
res = CONS(hp, item, res);
- hp += 2;
}
- if (mic.mi_i > CONTEXT_REDS*4)
- *reds += CONTEXT_REDS*4;
- else
- *reds += mic.mi_i / 4;
+ *reds += (Uint) mic.mi_i / 4;
DESTROY_MONITOR_INFOS(mic);
break;
}
- case am_suspending: {
+ case ERTS_PI_IX_SUSPENDING: {
ErtsSuspendMonitorInfoCollection smic;
int i;
Eterm item;
-#ifdef DEBUG
- Eterm *hp_end;
-#endif
+
+ erts_proc_lock(rp, ERTS_PROC_LOCK_STATUS);
ERTS_INIT_SUSPEND_MONITOR_INFOS(smic,
- BIF_P,
- (BIF_P == rp
+ c_p,
+ (c_p == rp
? ERTS_PROC_LOCK_MAIN
: 0) | ERTS_PROC_LOCK_STATUS);
erts_monitor_tree_foreach(rp->suspend_monitors,
&collect_one_suspend_monitor,
&smic);
- hp = HAlloc(BIF_P, 3 + smic.sz);
-#ifdef DEBUG
- hp_end = hp + smic.sz;
-#endif
-
+
+ reserve_size += smic.sz;
+
res = NIL;
for (i = 0; i < smic.smi_i; i++) {
Sint a = (Sint) smic.smi[i]->active; /* quiet compiler warnings */
Sint p = (Sint) smic.smi[i]->pending; /* on 64-bit machines... */
Eterm active;
Eterm pending;
+ Uint sz = 4 + 2;
+ if (!IS_SSMALL(a))
+ sz += BIG_UINT_HEAP_SIZE;
+ if (!IS_SSMALL(p))
+ sz += BIG_UINT_HEAP_SIZE;
+
+ ERTS_PI_UNRESERVE(reserve_size, sz);
+ hp = erts_produce_heap(hfact, sz, reserve_size);
+
if (IS_SSMALL(a))
active = make_small(a);
else {
@@ -1529,95 +1672,83 @@ process_info_aux(Process *BIF_P,
item = TUPLE3(hp, smic.smi[i]->mon.other.item, active, pending);
hp += 4;
res = CONS(hp, item, res);
- hp += 2;
}
- if (smic.smi_i > CONTEXT_REDS*4)
- *reds += CONTEXT_REDS*4;
- else
- *reds += smic.smi_i / 4;
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
+
+ *reds += (Uint) smic.smi_i / 4;
ERTS_DESTROY_SUSPEND_MONITOR_INFOS(smic);
- ASSERT(hp == hp_end);
break;
}
- case am_dictionary:
+ case ERTS_PI_IX_DICTIONARY:
if (!rp->dictionary || (ERTS_TRACE_FLAGS(rp) & F_SENSITIVE)) {
res = NIL;
} else {
Uint num = rp->dictionary->numElements;
- res = erts_dictionary_copy(BIF_P, rp->dictionary);
- if (num > CONTEXT_REDS*4)
- *reds += CONTEXT_REDS*4;
- else
- *reds += (int) num / 4;
+ res = erts_dictionary_copy(hfact, rp->dictionary, reserve_size);
+ *reds += (Uint) num / 4;
}
- hp = HAlloc(BIF_P, 3);
+
break;
- case am_trap_exit: {
- hp = HAlloc(BIF_P, 3);
- if (rp->flags & F_TRAP_EXIT)
- res = am_true;
- else
- res = am_false;
+ case ERTS_PI_IX_TRAP_EXIT:
+ res = (rp->flags & F_TRAP_EXIT) ? am_true : am_false;
break;
- }
- case am_error_handler:
- hp = HAlloc(BIF_P, 3);
- res = erts_proc_get_error_handler(BIF_P);
+ case ERTS_PI_IX_ERROR_HANDLER:
+ res = erts_proc_get_error_handler(rp);
break;
- case am_heap_size: {
- Uint hsz = 3;
+ case ERTS_PI_IX_HEAP_SIZE: {
+ Uint hsz = 0;
(void) erts_bld_uint(NULL, &hsz, HEAP_SIZE(rp));
- hp = HAlloc(BIF_P, hsz);
+ hp = erts_produce_heap(hfact, hsz, reserve_size);
res = erts_bld_uint(&hp, NULL, HEAP_SIZE(rp));
break;
}
- case am_fullsweep_after: {
- Uint hsz = 3;
+ case ERTS_PI_IX_FULLSWEEP_AFTER: {
+ Uint hsz = 0;
(void) erts_bld_uint(NULL, &hsz, MAX_GEN_GCS(rp));
- hp = HAlloc(BIF_P, hsz);
+ hp = erts_produce_heap(hfact, hsz, reserve_size);
res = erts_bld_uint(&hp, NULL, MAX_GEN_GCS(rp));
break;
}
- case am_min_heap_size: {
- Uint hsz = 3;
+ case ERTS_PI_IX_MIN_HEAP_SIZE: {
+ Uint hsz = 0;
(void) erts_bld_uint(NULL, &hsz, MIN_HEAP_SIZE(rp));
- hp = HAlloc(BIF_P, hsz);
+ hp = erts_produce_heap(hfact, hsz, reserve_size);
res = erts_bld_uint(&hp, NULL, MIN_HEAP_SIZE(rp));
break;
}
- case am_min_bin_vheap_size: {
- Uint hsz = 3;
+ case ERTS_PI_IX_MIN_BIN_VHEAP_SIZE: {
+ Uint hsz = 0;
(void) erts_bld_uint(NULL, &hsz, MIN_VHEAP_SIZE(rp));
- hp = HAlloc(BIF_P, hsz);
+ hp = erts_produce_heap(hfact, hsz, reserve_size);
res = erts_bld_uint(&hp, NULL, MIN_VHEAP_SIZE(rp));
break;
}
- case am_max_heap_size: {
- Uint hsz = 3;
+ case ERTS_PI_IX_MAX_HEAP_SIZE: {
+ Uint hsz = 0;
(void) erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp),
MAX_HEAP_SIZE_FLAGS_GET(rp),
NULL, &hsz);
- hp = HAlloc(BIF_P, hsz);
+ hp = erts_produce_heap(hfact, hsz, reserve_size);
res = erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp),
MAX_HEAP_SIZE_FLAGS_GET(rp),
&hp, NULL);
break;
}
- case am_total_heap_size: {
+ case ERTS_PI_IX_TOTAL_HEAP_SIZE: {
Uint total_heap_size;
- Uint hsz = 3;
+ Uint hsz = 0;
total_heap_size = rp->heap_sz;
if (rp->old_hend && rp->old_heap)
@@ -1626,58 +1757,52 @@ process_info_aux(Process *BIF_P,
total_heap_size += rp->mbuf_sz;
if (rp->flags & F_ON_HEAP_MSGQ) {
- ERTS_FOREACH_SIG_PRIVQS(
- rp, mp,
- {
- if (ERTS_SIG_IS_MSG(mp) && mp->data.attached)
- total_heap_size += erts_msg_attached_data_size(mp);
- });
-
- if (rp->sig_qs.len > CONTEXT_REDS*4)
- *reds += CONTEXT_REDS*4;
- else
- *reds += rp->sig_qs.len / 4;
+ ErtsMessage *mp;
+ ASSERT(flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
+ for (mp = rp->sig_qs.first; mp; mp = mp->next) {
+ ASSERT(ERTS_SIG_IS_MSG(mp));
+ if (mp->data.attached)
+ total_heap_size += erts_msg_attached_data_size(mp);
+ }
+ *reds += (Uint) rp->sig_qs.len / 4;
}
(void) erts_bld_uint(NULL, &hsz, total_heap_size);
- hp = HAlloc(BIF_P, hsz);
+ hp = erts_produce_heap(hfact, hsz, reserve_size);
res = erts_bld_uint(&hp, NULL, total_heap_size);
break;
}
- case am_stack_size: {
+ case ERTS_PI_IX_STACK_SIZE: {
Uint stack_size = STACK_START(rp) - rp->stop;
- Uint hsz = 3;
+ Uint hsz = 0;
(void) erts_bld_uint(NULL, &hsz, stack_size);
- hp = HAlloc(BIF_P, hsz);
+ hp = erts_produce_heap(hfact, hsz, reserve_size);
res = erts_bld_uint(&hp, NULL, stack_size);
break;
}
- case am_memory: { /* Memory consumed in bytes */
- Uint hsz = 3;
+ case ERTS_PI_IX_MEMORY: { /* Memory consumed in bytes */
+ Uint hsz = 0;
Uint size = erts_process_memory(rp, 0);
(void) erts_bld_uint(NULL, &hsz, size);
- hp = HAlloc(BIF_P, hsz);
+ hp = erts_produce_heap(hfact, hsz, reserve_size);
res = erts_bld_uint(&hp, NULL, size);
- if (rp->sig_qs.len > CONTEXT_REDS*4)
- *reds += CONTEXT_REDS*4;
- else
- *reds += rp->sig_qs.len / 4;
+ ASSERT(flags & ERTS_PI_FLAG_NEED_MSGQ_LEN);
+ *reds += (Uint) rp->sig_qs.len / 4;
break;
}
- case am_garbage_collection: {
+ case ERTS_PI_IX_GARBAGE_COLLECTION: {
DECL_AM(minor_gcs);
Eterm t;
Uint map_sz = 0;
erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp), MAX_HEAP_SIZE_FLAGS_GET(rp), NULL, &map_sz);
- hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2 + 3+2 + 3+2 + map_sz + 3);
- /* last "3" is for outside tuple */
+ hp = erts_produce_heap(hfact, 3+2 + 3+2 + 3+2 + 3+2 + 3+2 + map_sz, reserve_size);
t = TUPLE2(hp, AM_minor_gcs, make_small(GEN_GCS(rp))); hp += 3;
res = CONS(hp, t, NIL); hp += 2;
@@ -1696,93 +1821,76 @@ process_info_aux(Process *BIF_P,
break;
}
- case am_garbage_collection_info: {
+ case ERTS_PI_IX_GARBAGE_COLLECTION_INFO: {
Uint sz = 0, actual_sz = 0;
- if (rp == BIF_P) {
- sz += ERTS_PROCESS_GC_INFO_MAX_SIZE;
- } else {
- erts_process_gc_info(rp, &sz, NULL, 0, 0);
- sz += 3;
- }
+ erts_process_gc_info(rp, &sz, NULL, 0, 0);
- hp = HAlloc(BIF_P, sz);
+ hp = erts_produce_heap(hfact, sz, reserve_size);
res = erts_process_gc_info(rp, &actual_sz, &hp, 0, 0);
- /* We may have some extra space, fill with 0 tuples */
- if (actual_sz <= sz - 3) {
- for (; actual_sz < sz - 3; hp++, actual_sz++)
- hp[0] = make_arityval(0);
- } else {
- for (; actual_sz < sz; hp++, actual_sz++)
- hp[0] = make_arityval(0);
- hp = HAlloc(BIF_P, 3);
- }
-
break;
}
- case am_group_leader: {
+ case ERTS_PI_IX_GROUP_LEADER: {
int sz = NC_HEAP_SIZE(rp->group_leader);
- hp = HAlloc(BIF_P, 3 + sz);
- res = STORE_NC(&hp, &MSO(BIF_P), rp->group_leader);
+ hp = erts_produce_heap(hfact, sz, reserve_size);
+ res = STORE_NC(&hp, hfact->off_heap, rp->group_leader);
break;
}
- case am_reductions: {
- Uint reds = rp->reds + erts_current_reductions(BIF_P, rp);
- Uint hsz = 3;
+ case ERTS_PI_IX_REDUCTIONS: {
+ Uint reds = rp->reds + erts_current_reductions(c_p, rp);
+ Uint hsz = 0;
(void) erts_bld_uint(NULL, &hsz, reds);
- hp = HAlloc(BIF_P, hsz);
+ hp = erts_produce_heap(hfact, hsz, reserve_size);
res = erts_bld_uint(&hp, NULL, reds);
break;
}
- case am_priority: {
+ case ERTS_PI_IX_PRIORITY: {
erts_aint32_t state = erts_atomic32_read_nob(&rp->state);
if (ERTS_PSFLG_EXITING & state)
return am_undefined;
- hp = HAlloc(BIF_P, 3);
res = erts_get_process_priority(state);
break;
}
- case am_trace:
- hp = HAlloc(BIF_P, 3);
+ case ERTS_PI_IX_TRACE:
res = make_small(ERTS_TRACE_FLAGS(rp) & TRACEE_FLAGS);
break;
- case am_binary: {
- Uint sz = 3;
+ case ERTS_PI_IX_BINARY: {
+ Uint sz = 0;
(void) bld_bin_list(NULL, &sz, &MSO(rp));
- hp = HAlloc(BIF_P, sz);
+ hp = erts_produce_heap(hfact, sz, reserve_size);
res = bld_bin_list(&hp, NULL, &MSO(rp));
break;
}
- case am_sequential_trace_token:
- res = copy_object(rp->seq_trace_token, BIF_P);
- hp = HAlloc(BIF_P, 3);
+ case ERTS_PI_IX_SEQUENTIAL_TRACE_TOKEN: {
+ Uint sz = size_object(rp->seq_trace_token);
+ hp = erts_produce_heap(hfact, sz, reserve_size);
+ res = copy_struct(rp->seq_trace_token, sz, &hp, hfact->off_heap);
break;
+ }
- case am_catchlevel:
- hp = HAlloc(BIF_P, 3);
- res = make_small(catchlevel(BIF_P));
+ case ERTS_PI_IX_CATCHLEVEL:
+ res = make_small(catchlevel(rp));
break;
- case am_backtrace: {
+ case ERTS_PI_IX_BACKTRACE: {
erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0);
erts_stack_dump(ERTS_PRINT_DSBUF, (void *) dsbufp, rp);
- res = new_binary(BIF_P, (byte *) dsbufp->str, dsbufp->str_len);
+ res = erts_heap_factory_new_binary(hfact, (byte *) dsbufp->str,
+ dsbufp->str_len, reserve_size);
erts_destroy_tmp_dsbuf(dsbufp);
- hp = HAlloc(BIF_P, 3);
break;
}
- case am_last_calls: {
+ case ERTS_PI_IX_LAST_CALLS: {
struct saved_calls *scb = ERTS_PROC_GET_SAVED_CALLS_BUF(rp);
if (!scb) {
- hp = HAlloc(BIF_P, 3);
res = am_false;
} else {
/*
@@ -1790,23 +1898,34 @@ process_info_aux(Process *BIF_P,
* Might be less than that, if there are sends, receives or timeouts,
* so we must do a HRelease() to avoid creating holes.
*/
- Uint needed = scb->n*(2+4) + 3;
- Eterm* limit;
+ Sint needed = scb->n*(2+4);
Eterm term, list;
int i, j;
+ Export *exp;
+
+ reserve_size += needed;
- hp = HAlloc(BIF_P, needed);
- limit = hp + needed;
list = NIL;
for (i = 0; i < scb->n; i++) {
+ Uint sz;
j = scb->cur - i - 1;
if (j < 0)
j += scb->len;
- if (scb->ct[j] == &exp_send)
+
+ sz = 2;
+ exp = scb->ct[j];
+ if (exp != &exp_send && exp != &exp_receive && exp != &exp_timeout)
+ sz += 4;
+
+ needed -= sz;
+ ERTS_PI_UNRESERVE(reserve_size, sz);
+ hp = erts_produce_heap(hfact, sz, reserve_size);
+
+ if (exp == &exp_send)
term = am_send;
- else if (scb->ct[j] == &exp_receive)
+ else if (exp == &exp_receive)
term = am_receive;
- else if (scb->ct[j] == &exp_timeout)
+ else if (exp == &exp_timeout)
term = am_timeout;
else {
term = TUPLE3(hp,
@@ -1816,18 +1935,18 @@ process_info_aux(Process *BIF_P,
hp += 4;
}
list = CONS(hp, term, list);
- hp += 2;
}
+
+ ASSERT(needed >= 0);
+ if (needed > 0)
+ reserve_size -= needed;
+
res = list;
- res = TUPLE2(hp, item, res);
- hp += 3;
- HRelease(BIF_P,limit,hp);
- return res;
}
break;
}
- case am_message_queue_data:
+ case ERTS_PI_IX_MESSAGE_QUEUE_DATA:
switch (rp->flags & (F_OFF_HEAP_MSGQ|F_ON_HEAP_MSGQ)) {
case F_OFF_HEAP_MSGQ:
res = am_off_heap;
@@ -1840,16 +1959,15 @@ process_info_aux(Process *BIF_P,
ERTS_INTERNAL_ERROR("Inconsistent message queue management state");
break;
}
- hp = HAlloc(BIF_P, 3);
break;
- case am_magic_ref: {
- Uint sz = 3;
+ case ERTS_PI_IX_MAGIC_REF: {
+ Uint sz = 0;
(void) bld_magic_ref_bin_list(NULL, &sz, &MSO(rp));
- hp = HAlloc(BIF_P, sz);
+ hp = erts_produce_heap(hfact, sz, 0);
res = bld_magic_ref_bin_list(&hp, NULL, &MSO(rp));
- *reds += 10;
+ *reds += (Uint) 10;
break;
}
@@ -1858,12 +1976,17 @@ process_info_aux(Process *BIF_P,
}
- return TUPLE2(hp, item, res);
+ ERTS_PI_UNRESERVE(reserve_size, 3);
+ *reserve_sizep = reserve_size;
+ hp = erts_produce_heap(hfact, 3, reserve_size);
+
+ return TUPLE2(hp, pi_ix2arg(item_ix), res);
}
#undef MI_INC
static Eterm
-current_function(Process* BIF_P, Process* rp, Eterm** hpp, int full_info)
+current_function(Process *c_p, ErtsHeapFactory *hfact, Process* rp,
+ int full_info, Uint reserve_size, int flags)
{
Eterm* hp;
Eterm res;
@@ -1880,7 +2003,7 @@ current_function(Process* BIF_P, Process* rp, Eterm** hpp, int full_info)
}
}
- if (BIF_P == rp) {
+ if (c_p == rp && !(flags & ERTS_PI_FLAG_REQUEST_FOR_OTHER)) {
FunctionInfo fi2;
/*
@@ -1900,24 +2023,22 @@ current_function(Process* BIF_P, Process* rp, Eterm** hpp, int full_info)
* Return the result.
*/
if (rp->current == NULL) {
- hp = HAlloc(BIF_P, 3);
res = am_undefined;
} else if (full_info) {
- hp = HAlloc(BIF_P, 3+fi.needed);
- hp = erts_build_mfa_item(&fi, hp, am_true, &res);
+ hp = erts_produce_heap(hfact, fi.needed, reserve_size);
+ erts_build_mfa_item(&fi, hp, am_true, &res);
} else {
- hp = HAlloc(BIF_P, 3+4);
+ hp = erts_produce_heap(hfact, 4, reserve_size);
res = TUPLE3(hp, rp->current->module,
rp->current->function,
make_small(rp->current->arity));
- hp += 4;
}
- *hpp = hp;
return res;
}
static Eterm
-current_stacktrace(Process* p, Process* rp, Eterm** hpp)
+current_stacktrace(ErtsHeapFactory *hfact, Process* rp,
+ Uint reserve_size)
{
Uint sz;
struct StackTrace* s;
@@ -1926,7 +2047,7 @@ current_stacktrace(Process* p, Process* rp, Eterm** hpp)
FunctionInfo* stkp;
Uint heap_size;
int i;
- Eterm* hp = *hpp;
+ Eterm* hp;
Eterm mfa;
Eterm res = NIL;
@@ -1956,17 +2077,23 @@ current_stacktrace(Process* p, Process* rp, Eterm** hpp)
}
}
- hp = HAlloc(p, heap_size);
+ reserve_size += heap_size;
+
+ /*
+ * We intentionally produce heap in small chunks
+ * (for more info see process_info_aux()).
+ */
while (stkp > stk) {
stkp--;
+ sz = stkp->needed + 2;
+ ERTS_PI_UNRESERVE(reserve_size, sz);
+ hp = erts_produce_heap(hfact, sz, reserve_size);
hp = erts_build_mfa_item(stkp, hp, am_true, &mfa);
res = CONS(hp, mfa, res);
- hp += 2;
}
erts_free(ERTS_ALC_T_TMP, stk);
erts_free(ERTS_ALC_T_TMP, s);
- *hpp = hp;
return res;
}
@@ -3838,7 +3965,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
Eterm res = NIL;
Uint *hp = HAlloc(BIF_P, 2*ERTS_PI_ARGS);
for (i = ERTS_PI_ARGS-1; i >= 0; i--) {
- res = CONS(hp, pi_args[i], res);
+ res = CONS(hp, pi_args[i].name, res);
hp += 2;
}
BIF_RET(res);
@@ -3928,6 +4055,15 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
BIF_RET(am_false);
#endif
}
+ else if (ERTS_IS_ATOM_STR("lc_graph", BIF_ARG_1)) {
+#ifdef ERTS_ENABLE_LOCK_CHECK
+ Eterm res = erts_lc_dump_graph();
+ BIF_RET(res);
+#else
+ BIF_RET(am_notsup);
+#endif
+ }
+
}
else if (is_tuple(BIF_ARG_1)) {
Eterm* tp = tuple_val(BIF_ARG_1);
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index 5da8e3eda5..b498fd9cf9 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -61,7 +61,7 @@
# define ERTS_GC_ASSERT(B) ((void) 1)
#endif
-#if defined(DEBUG) && 0
+#if defined(DEBUG) && 1
# define HARDDEBUG 1
#endif
@@ -222,6 +222,24 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(gcireq,
ErtsGCInfoReq,
5,
ERTS_ALC_T_GC_INFO_REQ)
+
+static ERTS_INLINE void
+ensure_sigq_roots_available(Process *p)
+{
+ ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p));
+ switch (p->flags & (F_OFF_HEAP_MSGQ|F_OFF_HEAP_MSGQ_CHNG)) {
+ case F_OFF_HEAP_MSGQ_CHNG:
+ case 0:
+ erts_proc_lock(p, ERTS_PROC_LOCK_MSGQ);
+ erts_proc_sig_fetch(p);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_MSGQ);
+ break;
+ default:
+ break;
+ }
+}
+
+
/*
* Initialize GC global data.
*/
@@ -420,11 +438,20 @@ erts_gc_after_bif_call_lhf(Process* p, ErlHeapFragment *live_hf_end,
return result;
}
+#ifdef HIPE
+ if (p->hipe_smp.have_receive_locks) {
+ /* Do not want to GC with message queue locked... */
+ return result;
+ }
+#endif
+
if (!p->mbuf) {
/* Must have GC:d in BIF call... invalidate live_hf_end */
live_hf_end = ERTS_INVALID_HFRAG_PTR;
}
+ ensure_sigq_roots_available(p);
+
if (is_non_value(result)) {
if (p->freason == TRAP) {
#ifdef HIPE
@@ -642,7 +669,7 @@ check_for_possibly_long_gc(Process *p, Uint ygen_usage)
sz = ygen_usage;
sz += p->hend - p->stop;
if (p->flags & F_ON_HEAP_MSGQ)
- sz += p->sig_qs.len;
+ sz += erts_proc_sig_privqs_len(p);
if (major)
sz += p->old_htop - p->old_heap;
@@ -868,8 +895,11 @@ do_major_collection:
int
erts_garbage_collect_nobump(Process* p, int need, Eterm* objv, int nobj, int fcalls)
{
- int reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, fcalls, 0);
- int reds_left = ERTS_REDS_LEFT(p, fcalls);
+ int reds;
+ int reds_left;
+ ensure_sigq_roots_available(p);
+ reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, fcalls, 0);
+ reds_left = ERTS_REDS_LEFT(p, fcalls);
if (reds > reds_left)
reds = reds_left;
ASSERT(CONTEXT_REDS - (reds_left - reds) >= erts_proc_sched_data(p)->virtual_reds);
@@ -879,7 +909,9 @@ erts_garbage_collect_nobump(Process* p, int need, Eterm* objv, int nobj, int fca
void
erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
{
- int reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, p->fcalls, 0);
+ int reds;
+ ensure_sigq_roots_available(p);
+ reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, p->fcalls, 0);
BUMP_REDS(p, reds);
ASSERT(CONTEXT_REDS - ERTS_BIF_REDS_LEFT(p)
>= erts_proc_sched_data(p)->virtual_reds);
@@ -1105,6 +1137,8 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
* First an ordinary major collection...
*/
+ ensure_sigq_roots_available(p);
+
p->flags |= F_NEED_FULLSWEEP;
if (ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(p)))
@@ -2541,14 +2575,17 @@ setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset)
break;
case F_OFF_HEAP_MSGQ_CHNG:
case 0: {
+ Sint len;
/*
* We do not have off heap message queue enabled, i.e. we
- * need to add message queue to rootset...
+ * need to add signal queues to rootset...
*/
+ len = erts_proc_sig_privqs_len(p);
+
/* Ensure large enough rootset... */
- if (n + p->sig_qs.len > rootset->size) {
- Uint new_size = n + p->sig_qs.len;
+ if (n + len > rootset->size) {
+ Uint new_size = n + len;
ERTS_GC_ASSERT(roots == rootset->def);
roots = erts_alloc(ERTS_ALC_T_ROOTSET,
new_size*sizeof(Roots));
@@ -3448,7 +3485,7 @@ erts_max_heap_size_map(Sint max_heap_size, Uint max_heap_flags,
Eterm **hpp, Uint *sz)
{
if (!hpp) {
- *sz += (2*3 + 1 + MAP_HEADER_FLATMAP_SZ);
+ *sz += ERTS_MAX_HEAP_SIZE_MAP_SZ;
return THE_NON_VALUE;
} else {
Eterm *hp = *hpp;
diff --git a/erts/emulator/beam/erl_gc.h b/erts/emulator/beam/erl_gc.h
index 63d03cdf8b..b9b1ed728c 100644
--- a/erts/emulator/beam/erl_gc.h
+++ b/erts/emulator/beam/erl_gc.h
@@ -154,6 +154,8 @@ typedef struct {
Uint64 garbage_cols;
} ErtsGCInfo;
+#define ERTS_MAX_HEAP_SIZE_MAP_SZ (2*3 + 1 + MAP_HEADER_FLATMAP_SZ)
+
#define ERTS_PROCESS_GC_INFO_MAX_TERMS (11) /* number of elements in process_gc_info*/
#define ERTS_PROCESS_GC_INFO_MAX_SIZE \
(ERTS_PROCESS_GC_INFO_MAX_TERMS * (2/*cons*/ + 3/*2-tuple*/ + BIG_UINT_HEAP_SIZE))
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 0ced5ec310..d66410367b 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -41,6 +41,7 @@
#include "erl_lock_check.h"
#include "erl_term.h"
#include "erl_threads.h"
+#include "erl_atom_table.h"
typedef struct {
char *name;
@@ -75,10 +76,10 @@ static erts_lc_lock_order_t erts_lock_order[] = {
* if only one lock use
* the lock name)"
*/
+ { "NO LOCK", NULL },
{ "driver_lock", "driver_name" },
{ "port_lock", "port_id" },
{ "port_data_lock", "address" },
- { "bif_timers", NULL },
{ "reg_tab", NULL },
{ "proc_main", "pid" },
{ "old_code", "address" },
@@ -103,7 +104,6 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "node_table", NULL },
{ "dist_table", NULL },
{ "sys_tracers", NULL },
- { "module_tab", NULL },
{ "export_tab", NULL },
{ "fun_tab", NULL },
{ "environ", NULL },
@@ -111,7 +111,6 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "drv_ev_state_grow", NULL, },
{ "drv_ev_state", "address" },
{ "safe_hash", "address" },
- { "removed_fd_pre_alloc_lock", "address" },
{ "state_prealloc", NULL },
{ "schdlr_sspnd", NULL },
{ "migration_info_update", NULL },
@@ -134,10 +133,6 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "msacc_list_mutex", NULL },
{ "msacc_unmanaged_mutex", NULL },
{ "atom_tab", NULL },
- { "misc_op_list_pre_alloc_lock", "address" },
- { "message_pre_alloc_lock", "address" },
- { "ptimer_pre_alloc_lock", "address", },
- { "btm_pre_alloc_lock", NULL, },
{ "dist_entry_out_queue", "address" },
{ "port_sched_lock", "port_id" },
{ "sys_msg_q", NULL },
@@ -147,20 +142,12 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "mtrace_op", NULL },
{ "instr_x", NULL },
{ "instr", NULL },
- { "pollsets_lock", NULL },
{ "alcu_allocator", "index" },
{ "mseg", NULL },
- { "port_task_pre_alloc_lock", "address" },
- { "proclist_pre_alloc_lock", "address" },
- { "xports_list_pre_alloc_lock", "address" },
- { "inet_buffer_stack_lock", NULL },
- { "system_block", NULL },
{ "get_time", NULL },
{ "get_corrected_time", NULL },
{ "runtime", NULL },
- { "breakpoints", NULL },
{ "pix_lock", "address" },
- { "run_queues_lists", NULL },
{ "sched_stat", NULL },
{ "async_init_mtx", NULL },
#ifdef __WIN32__
@@ -194,10 +181,10 @@ static const char *rw_op_str(erts_lock_options_t options)
return erts_lock_options_get_short_desc(options);
}
-typedef struct erts_lc_locked_lock_t_ erts_lc_locked_lock_t;
-struct erts_lc_locked_lock_t_ {
- erts_lc_locked_lock_t *next;
- erts_lc_locked_lock_t *prev;
+typedef struct lc_locked_lock_t_ lc_locked_lock_t;
+struct lc_locked_lock_t_ {
+ lc_locked_lock_t *next;
+ lc_locked_lock_t *prev;
UWord extra;
Sint16 id;
char *file;
@@ -207,32 +194,47 @@ struct erts_lc_locked_lock_t_ {
};
typedef struct {
- erts_lc_locked_lock_t *first;
- erts_lc_locked_lock_t *last;
-} erts_lc_locked_lock_list_t;
+ lc_locked_lock_t *first;
+ lc_locked_lock_t *last;
+} lc_locked_lock_list_t;
+
+typedef union lc_free_block_t_ lc_free_block_t;
+union lc_free_block_t_ {
+ lc_free_block_t *next;
+ lc_locked_lock_t lock;
+};
+
+typedef struct {
+ /*
+ * m[X][Y] & 1 if we locked X directly after Y was locked.
+ * m[X][Y] & 2 if we locked X indirectly after Y was locked.
+ * m[X][0] = 1 if we locked X when nothing else was locked.
+ * m[0][] is unused as it would represent locking "NO LOCK"
+ */
+ char m[ERTS_LOCK_ORDER_SIZE][ERTS_LOCK_ORDER_SIZE];
+
+} lc_matrix_t;
-typedef struct erts_lc_locked_locks_t_ erts_lc_locked_locks_t;
-struct erts_lc_locked_locks_t_ {
+static lc_matrix_t tot_lc_matrix;
+
+typedef struct lc_thread_t_ lc_thread_t;
+struct lc_thread_t_ {
char *thread_name;
int emu_thread;
erts_tid_t tid;
- erts_lc_locked_locks_t *next;
- erts_lc_locked_locks_t *prev;
- erts_lc_locked_lock_list_t locked;
- erts_lc_locked_lock_list_t required;
-};
-
-typedef union erts_lc_free_block_t_ erts_lc_free_block_t;
-union erts_lc_free_block_t_ {
- erts_lc_free_block_t *next;
- erts_lc_locked_lock_t lock;
+ lc_thread_t *next;
+ lc_thread_t *prev;
+ lc_locked_lock_list_t locked;
+ lc_locked_lock_list_t required;
+ lc_free_block_t *free_blocks;
+ lc_matrix_t matrix;
};
static ethr_tsd_key locks_key;
-static erts_lc_locked_locks_t *erts_locked_locks = NULL;
+static lc_thread_t *lc_threads = NULL;
+static ethr_spinlock_t lc_threads_lock;
-static erts_lc_free_block_t *free_blocks = NULL;
#ifdef ERTS_LC_STATIC_ALLOC
#define ERTS_LC_FB_CHUNK_SIZE 10000
@@ -240,176 +242,165 @@ static erts_lc_free_block_t *free_blocks = NULL;
#define ERTS_LC_FB_CHUNK_SIZE 10
#endif
-static ethr_spinlock_t free_blocks_lock;
static ERTS_INLINE void
-lc_lock(void)
+lc_lock_threads(void)
{
- ethr_spin_lock(&free_blocks_lock);
+ ethr_spin_lock(&lc_threads_lock);
}
static ERTS_INLINE void
-lc_unlock(void)
+lc_unlock_threads(void)
{
- ethr_spin_unlock(&free_blocks_lock);
+ ethr_spin_unlock(&lc_threads_lock);
}
-static ERTS_INLINE void lc_free(void *p)
+static ERTS_INLINE void lc_free(lc_thread_t* thr, lc_locked_lock_t *p)
{
- erts_lc_free_block_t *fb = (erts_lc_free_block_t *) p;
+ lc_free_block_t *fb = (lc_free_block_t *) p;
#ifdef DEBUG
- sys_memset((void *) p, 0xdf, sizeof(erts_lc_free_block_t));
+ sys_memset((void *) p, 0xdf, sizeof(lc_free_block_t));
#endif
- lc_lock();
- fb->next = free_blocks;
- free_blocks = fb;
- lc_unlock();
+ fb->next = thr->free_blocks;
+ thr->free_blocks = fb;
}
-#ifdef ERTS_LC_STATIC_ALLOC
-
-static void *lc_core_alloc(void)
-{
- lc_unlock();
- ERTS_INTERNAL_ERROR("Lock checker out of memory!\n");
-}
-
-#else
-
-static void *lc_core_alloc(void)
+static lc_locked_lock_t *lc_core_alloc(lc_thread_t* thr)
{
int i;
- erts_lc_free_block_t *fbs;
- lc_unlock();
- fbs = (erts_lc_free_block_t *) malloc(sizeof(erts_lc_free_block_t)
+ lc_free_block_t *fbs;
+ fbs = (lc_free_block_t *) malloc(sizeof(lc_free_block_t)
* ERTS_LC_FB_CHUNK_SIZE);
if (!fbs) {
ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!");
}
for (i = 1; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) {
#ifdef DEBUG
- sys_memset((void *) &fbs[i], 0xdf, sizeof(erts_lc_free_block_t));
+ sys_memset((void *) &fbs[i], 0xdf, sizeof(lc_free_block_t));
#endif
fbs[i].next = &fbs[i+1];
}
#ifdef DEBUG
sys_memset((void *) &fbs[ERTS_LC_FB_CHUNK_SIZE-1],
- 0xdf, sizeof(erts_lc_free_block_t));
+ 0xdf, sizeof(lc_free_block_t));
#endif
- lc_lock();
- fbs[ERTS_LC_FB_CHUNK_SIZE-1].next = free_blocks;
- free_blocks = &fbs[1];
- return (void *) &fbs[0];
+ fbs[ERTS_LC_FB_CHUNK_SIZE-1].next = thr->free_blocks;
+ thr->free_blocks = &fbs[1];
+ return &fbs[0].lock;
}
-#endif
-
-static ERTS_INLINE void *lc_alloc(void)
+static ERTS_INLINE lc_locked_lock_t *lc_alloc(lc_thread_t* thr)
{
- void *res;
- lc_lock();
- if (!free_blocks)
- res = lc_core_alloc();
+ lc_locked_lock_t *res;
+ if (!thr->free_blocks)
+ res = lc_core_alloc(thr);
else {
- res = (void *) free_blocks;
- free_blocks = free_blocks->next;
+ res = &thr->free_blocks->lock;
+ thr->free_blocks = thr->free_blocks->next;
}
- lc_unlock();
return res;
}
-static erts_lc_locked_locks_t *
-create_locked_locks(char *thread_name)
+static lc_thread_t *
+create_thread_data(char *thread_name)
{
- erts_lc_locked_locks_t *l_lcks = malloc(sizeof(erts_lc_locked_locks_t));
- if (!l_lcks)
+ lc_thread_t *thr = malloc(sizeof(lc_thread_t));
+ if (!thr)
ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!");
- l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
- if (!l_lcks->thread_name)
+ thr->thread_name = strdup(thread_name ? thread_name : "unknown");
+ if (!thr->thread_name)
ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!");
- l_lcks->emu_thread = 0;
- l_lcks->tid = erts_thr_self();
- l_lcks->required.first = NULL;
- l_lcks->required.last = NULL;
- l_lcks->locked.first = NULL;
- l_lcks->locked.last = NULL;
- l_lcks->prev = NULL;
- lc_lock();
- l_lcks->next = erts_locked_locks;
- if (erts_locked_locks)
- erts_locked_locks->prev = l_lcks;
- erts_locked_locks = l_lcks;
- lc_unlock();
- erts_tsd_set(locks_key, (void *) l_lcks);
- return l_lcks;
+ thr->emu_thread = 0;
+ thr->tid = erts_thr_self();
+ thr->required.first = NULL;
+ thr->required.last = NULL;
+ thr->locked.first = NULL;
+ thr->locked.last = NULL;
+ thr->prev = NULL;
+ thr->free_blocks = NULL;
+ sys_memzero(&thr->matrix, sizeof(thr->matrix));
+
+ lc_lock_threads();
+ thr->next = lc_threads;
+ if (lc_threads)
+ lc_threads->prev = thr;
+ lc_threads = thr;
+ lc_unlock_threads();
+ erts_tsd_set(locks_key, (void *) thr);
+ return thr;
}
+static void collect_matrix(lc_matrix_t*);
+
static void
-destroy_locked_locks(erts_lc_locked_locks_t *l_lcks)
-{
- ASSERT(l_lcks->thread_name);
- free((void *) l_lcks->thread_name);
- ASSERT(l_lcks->required.first == NULL);
- ASSERT(l_lcks->required.last == NULL);
- ASSERT(l_lcks->locked.first == NULL);
- ASSERT(l_lcks->locked.last == NULL);
-
- lc_lock();
- if (l_lcks->prev)
- l_lcks->prev->next = l_lcks->next;
+destroy_locked_locks(lc_thread_t *thr)
+{
+ ASSERT(thr->thread_name);
+ free((void *) thr->thread_name);
+ ASSERT(thr->required.first == NULL);
+ ASSERT(thr->required.last == NULL);
+ ASSERT(thr->locked.first == NULL);
+ ASSERT(thr->locked.last == NULL);
+
+ lc_lock_threads();
+ if (thr->prev)
+ thr->prev->next = thr->next;
else {
- ASSERT(erts_locked_locks == l_lcks);
- erts_locked_locks = l_lcks->next;
+ ASSERT(lc_threads == thr);
+ lc_threads = thr->next;
}
+ if (thr->next)
+ thr->next->prev = thr->prev;
+
+ collect_matrix(&thr->matrix);
- if (l_lcks->next)
- l_lcks->next->prev = l_lcks->prev;
- lc_unlock();
+ lc_unlock_threads();
- free((void *) l_lcks);
+ free((void *) thr);
}
-static ERTS_INLINE erts_lc_locked_locks_t *
+static ERTS_INLINE lc_thread_t *
get_my_locked_locks(void)
{
return erts_tsd_get(locks_key);
}
-static ERTS_INLINE erts_lc_locked_locks_t *
+static ERTS_INLINE lc_thread_t *
make_my_locked_locks(void)
{
- erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
- if (l_lcks)
- return l_lcks;
+ lc_thread_t *thr = get_my_locked_locks();
+ if (thr)
+ return thr;
else
- return create_locked_locks(NULL);
+ return create_thread_data(NULL);
}
-static ERTS_INLINE erts_lc_locked_lock_t *
-new_locked_lock(erts_lc_lock_t *lck, erts_lock_options_t options,
+static ERTS_INLINE lc_locked_lock_t *
+new_locked_lock(lc_thread_t* thr,
+ erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line)
{
- erts_lc_locked_lock_t *l_lck = (erts_lc_locked_lock_t *) lc_alloc();
- l_lck->next = NULL;
- l_lck->prev = NULL;
- l_lck->id = lck->id;
- l_lck->extra = lck->extra;
- l_lck->file = file;
- l_lck->line = line;
- l_lck->flags = lck->flags;
- l_lck->taken_options = options;
- return l_lck;
+ lc_locked_lock_t *ll = lc_alloc(thr);
+ ll->next = NULL;
+ ll->prev = NULL;
+ ll->id = lck->id;
+ ll->extra = lck->extra;
+ ll->file = file;
+ ll->line = line;
+ ll->flags = lck->flags;
+ ll->taken_options = options;
+ return ll;
}
static void
raw_print_lock(char *prefix, Sint16 id, Wterm extra, erts_lock_flags_t flags,
char* file, unsigned int line, char *suffix)
{
- char *lname = (0 <= id && id < ERTS_LOCK_ORDER_SIZE
+ char *lname = (1 <= id && id < ERTS_LOCK_ORDER_SIZE
? erts_lock_order[id].name
: "unknown");
erts_fprintf(stderr,"%s'%s:",prefix,lname);
@@ -439,20 +430,20 @@ print_lock(char *prefix, erts_lc_lock_t *lck, char *suffix)
}
static void
-print_curr_locks(erts_lc_locked_locks_t *l_lcks)
+print_curr_locks(lc_thread_t *thr)
{
- erts_lc_locked_lock_t *l_lck;
- if (!l_lcks || !l_lcks->locked.first)
+ lc_locked_lock_t *ll;
+ if (!thr || !thr->locked.first)
erts_fprintf(stderr,
"Currently no locks are locked by the %s thread.\n",
- l_lcks->thread_name);
+ thr->thread_name);
else {
erts_fprintf(stderr,
"Currently these locks are locked by the %s thread:\n",
- l_lcks->thread_name);
- for (l_lck = l_lcks->locked.first; l_lck; l_lck = l_lck->next)
- raw_print_lock(" ", l_lck->id, l_lck->extra, l_lck->flags,
- l_lck->file, l_lck->line, "\n");
+ thr->thread_name);
+ for (ll = thr->locked.first; ll; ll = ll->next)
+ raw_print_lock(" ", ll->id, ll->extra, ll->flags,
+ ll->file, ll->line, "\n");
}
}
@@ -481,55 +472,55 @@ uninitialized_lock(void)
}
static void
-lock_twice(char *prefix, erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
+lock_twice(char *prefix, lc_thread_t *thr, erts_lc_lock_t *lck,
erts_lock_options_t options)
{
erts_fprintf(stderr, "%s (%s)", prefix, rw_op_str(options));
print_lock(" ", lck, " lock which is already locked by thread!\n");
- print_curr_locks(l_lcks);
+ print_curr_locks(thr);
lc_abort();
}
static void
-unlock_op_mismatch(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck,
+unlock_op_mismatch(lc_thread_t *thr, erts_lc_lock_t *lck,
erts_lock_options_t options)
{
erts_fprintf(stderr, "Unlocking (%s) ", rw_op_str(options));
print_lock("", lck, " lock which mismatch previous lock operation!\n");
- print_curr_locks(l_lcks);
+ print_curr_locks(thr);
lc_abort();
}
static void
-unlock_of_not_locked(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
+unlock_of_not_locked(lc_thread_t *thr, erts_lc_lock_t *lck)
{
print_lock("Unlocking ", lck, " lock which is not locked by thread!\n");
- print_curr_locks(l_lcks);
+ print_curr_locks(thr);
lc_abort();
}
static void
-lock_order_violation(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
+lock_order_violation(lc_thread_t *thr, erts_lc_lock_t *lck)
{
print_lock("Lock order violation occured when locking ", lck, "!\n");
- print_curr_locks(l_lcks);
+ print_curr_locks(thr);
print_lock_order();
lc_abort();
}
static void
-type_order_violation(char *op, erts_lc_locked_locks_t *l_lcks,
+type_order_violation(char *op, lc_thread_t *thr,
erts_lc_lock_t *lck)
{
erts_fprintf(stderr, "Lock type order violation occured when ");
print_lock(op, lck, "!\n");
- ASSERT(l_lcks);
- print_curr_locks(l_lcks);
+ ASSERT(thr);
+ print_curr_locks(thr);
lc_abort();
}
static void
-lock_mismatch(erts_lc_locked_locks_t *l_lcks, int exact,
+lock_mismatch(lc_thread_t *thr, int exact,
int failed_have, erts_lc_lock_t *have, int have_len,
int failed_have_not, erts_lc_lock_t *have_not, int have_not_len)
{
@@ -576,39 +567,39 @@ lock_mismatch(erts_lc_locked_locks_t *l_lcks, int exact,
print_lock2(" ", have_not[i].id, have_not[i].extra, 0, "\n");
}
}
- print_curr_locks(l_lcks);
+ print_curr_locks(thr);
lc_abort();
}
static void
-unlock_of_required_lock(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
+unlock_of_required_lock(lc_thread_t *thr, erts_lc_lock_t *lck)
{
print_lock("Unlocking required ", lck, " lock!\n");
- print_curr_locks(l_lcks);
+ print_curr_locks(thr);
lc_abort();
}
static void
-unrequire_of_not_required_lock(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
+unrequire_of_not_required_lock(lc_thread_t *thr, erts_lc_lock_t *lck)
{
print_lock("Unrequire on ", lck, " lock not required!\n");
- print_curr_locks(l_lcks);
+ print_curr_locks(thr);
lc_abort();
}
static void
-require_twice(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
+require_twice(lc_thread_t *thr, erts_lc_lock_t *lck)
{
print_lock("Require on ", lck, " lock already required!\n");
- print_curr_locks(l_lcks);
+ print_curr_locks(thr);
lc_abort();
}
static void
-required_not_locked(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
+required_not_locked(lc_thread_t *thr, erts_lc_lock_t *lck)
{
print_lock("Required ", lck, " lock not locked!\n");
- print_curr_locks(l_lcks);
+ print_curr_locks(thr);
lc_abort();
}
@@ -616,15 +607,15 @@ required_not_locked(erts_lc_locked_locks_t *l_lcks, erts_lc_lock_t *lck)
static void
thread_exit_handler(void)
{
- erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
- if (l_lcks) {
- if (l_lcks->locked.first) {
+ lc_thread_t *thr = get_my_locked_locks();
+ if (thr) {
+ if (thr->locked.first) {
erts_fprintf(stderr,
"Thread exiting while having locked locks!\n");
- print_curr_locks(l_lcks);
+ print_curr_locks(thr);
lc_abort();
}
- destroy_locked_locks(l_lcks);
+ destroy_locked_locks(thr);
/* erts_tsd_set(locks_key, NULL); */
}
}
@@ -642,24 +633,24 @@ lc_abort(void)
void
erts_lc_set_thread_name(char *thread_name)
{
- erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
- if (!l_lcks)
- l_lcks = create_locked_locks(thread_name);
+ lc_thread_t *thr = get_my_locked_locks();
+ if (!thr)
+ thr = create_thread_data(thread_name);
else {
- ASSERT(l_lcks->thread_name);
- free((void *) l_lcks->thread_name);
- l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
- if (!l_lcks->thread_name)
+ ASSERT(thr->thread_name);
+ free((void *) thr->thread_name);
+ thr->thread_name = strdup(thread_name ? thread_name : "unknown");
+ if (!thr->thread_name)
ERTS_INTERNAL_ERROR("strdup failed");
}
- l_lcks->emu_thread = 1;
+ thr->emu_thread = 1;
}
int
erts_lc_is_emu_thr(void)
{
- erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
- return l_lcks->emu_thread;
+ lc_thread_t *thr = get_my_locked_locks();
+ return thr->emu_thread;
}
int
@@ -705,7 +696,7 @@ erts_lc_get_lock_order_id(char *name)
return (Sint16) -1;
}
-static int compare_locked_by_id(erts_lc_locked_lock_t *locked_lock, erts_lc_lock_t *comparand)
+static int compare_locked_by_id(lc_locked_lock_t *locked_lock, erts_lc_lock_t *comparand)
{
if(locked_lock->id < comparand->id) {
return -1;
@@ -716,7 +707,7 @@ static int compare_locked_by_id(erts_lc_locked_lock_t *locked_lock, erts_lc_lock
return 0;
}
-static int compare_locked_by_id_extra(erts_lc_locked_lock_t *locked_lock, erts_lc_lock_t *comparand)
+static int compare_locked_by_id_extra(lc_locked_lock_t *locked_lock, erts_lc_lock_t *comparand)
{
int order = compare_locked_by_id(locked_lock, comparand);
@@ -731,18 +722,18 @@ static int compare_locked_by_id_extra(erts_lc_locked_lock_t *locked_lock, erts_l
return 0;
}
-typedef int (*locked_compare_func)(erts_lc_locked_lock_t *, erts_lc_lock_t *);
+typedef int (*locked_compare_func)(lc_locked_lock_t *, erts_lc_lock_t *);
/* Searches through a list of taken locks, bailing when it hits an entry whose
* order relative to the search template is the opposite of the one at the
* start of the search. (*closest_neighbor) is either set to the exact match,
* or the one closest to it in the sort order. */
static int search_locked_list(locked_compare_func compare,
- erts_lc_locked_lock_t *locked_locks,
+ lc_locked_lock_t *locked_locks,
erts_lc_lock_t *search_template,
- erts_lc_locked_lock_t **closest_neighbor)
+ lc_locked_lock_t **closest_neighbor)
{
- erts_lc_locked_lock_t *iterator = locked_locks;
+ lc_locked_lock_t *iterator = locked_locks;
(*closest_neighbor) = iterator;
@@ -778,9 +769,9 @@ static int search_locked_list(locked_compare_func compare,
/* Searches for a lock in the given list that matches search_template, and sets
* (*locked_locks) to the closest lock in the sort order. */
static int
-find_lock(erts_lc_locked_lock_t **locked_locks, erts_lc_lock_t *search_template)
+find_lock(lc_locked_lock_t **locked_locks, erts_lc_lock_t *search_template)
{
- erts_lc_locked_lock_t *closest_neighbor;
+ lc_locked_lock_t *closest_neighbor;
int found_lock;
found_lock = search_locked_list(compare_locked_by_id_extra,
@@ -809,9 +800,9 @@ find_lock(erts_lc_locked_lock_t **locked_locks, erts_lc_lock_t *search_template)
/* Searches for a lock in the given list by id, and sets (*locked_locks) to the
* closest lock in the sort order. */
static int
-find_id(erts_lc_locked_lock_t **locked_locks, Sint16 id)
+find_id(lc_locked_lock_t **locked_locks, Sint16 id)
{
- erts_lc_locked_lock_t *closest_neighbor;
+ lc_locked_lock_t *closest_neighbor;
erts_lc_lock_t search_template;
int found_lock;
@@ -830,34 +821,34 @@ find_id(erts_lc_locked_lock_t **locked_locks, Sint16 id)
void
erts_lc_have_locks(int *resv, erts_lc_lock_t *locks, int len)
{
- erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
+ lc_thread_t *thr = get_my_locked_locks();
int i;
- if (!l_lcks) {
+ if (!thr) {
for (i = 0; i < len; i++)
resv[i] = 0;
}
else {
- erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
+ lc_locked_lock_t *ll = thr->locked.first;
for (i = 0; i < len; i++)
- resv[i] = find_lock(&l_lck, &locks[i]);
+ resv[i] = find_lock(&ll, &locks[i]);
}
}
void
erts_lc_have_lock_ids(int *resv, int *ids, int len)
{
- erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
+ lc_thread_t *thr = get_my_locked_locks();
int i;
- if (!l_lcks) {
+ if (!thr) {
for (i = 0; i < len; i++)
resv[i] = 0;
}
else {
- erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
+ lc_locked_lock_t *ll = thr->locked.first;
for (i = 0; i < len; i++)
- resv[i] = find_id(&l_lck, ids[i]);
+ resv[i] = find_id(&ll, ids[i]);
}
}
@@ -866,27 +857,27 @@ erts_lc_check(erts_lc_lock_t *have, int have_len,
erts_lc_lock_t *have_not, int have_not_len)
{
int i;
- erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
- erts_lc_locked_lock_t *l_lck;
+ lc_thread_t *thr = get_my_locked_locks();
+ lc_locked_lock_t *ll;
if (have && have_len > 0) {
- if (!l_lcks)
+ if (!thr)
lock_mismatch(NULL, 0,
-1, have, have_len,
-1, have_not, have_not_len);
- l_lck = l_lcks->locked.first;
+ ll = thr->locked.first;
for (i = 0; i < have_len; i++) {
- if (!find_lock(&l_lck, &have[i]))
- lock_mismatch(l_lcks, 0,
+ if (!find_lock(&ll, &have[i]))
+ lock_mismatch(thr, 0,
i, have, have_len,
-1, have_not, have_not_len);
}
}
- if (have_not && have_not_len > 0 && l_lcks) {
- l_lck = l_lcks->locked.first;
+ if (have_not && have_not_len > 0 && thr) {
+ ll = thr->locked.first;
for (i = 0; i < have_not_len; i++) {
- if (find_lock(&l_lck, &have_not[i]))
- lock_mismatch(l_lcks, 0,
+ if (find_lock(&ll, &have_not[i]))
+ lock_mismatch(thr, 0,
-1, have, have_len,
i, have_not, have_not_len);
}
@@ -896,8 +887,8 @@ erts_lc_check(erts_lc_lock_t *have, int have_len,
void
erts_lc_check_exact(erts_lc_lock_t *have, int have_len)
{
- erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
- if (!l_lcks) {
+ lc_thread_t *thr = get_my_locked_locks();
+ if (!thr) {
if (have && have_len > 0)
lock_mismatch(NULL, 1,
-1, have, have_len,
@@ -905,17 +896,17 @@ erts_lc_check_exact(erts_lc_lock_t *have, int have_len)
}
else {
int i;
- erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
+ lc_locked_lock_t *ll = thr->locked.first;
for (i = 0; i < have_len; i++) {
- if (!find_lock(&l_lck, &have[i]))
- lock_mismatch(l_lcks, 1,
+ if (!find_lock(&ll, &have[i]))
+ lock_mismatch(thr, 1,
i, have, have_len,
-1, NULL, 0);
}
- for (i = 0, l_lck = l_lcks->locked.first; l_lck; l_lck = l_lck->next)
+ for (i = 0, ll = thr->locked.first; ll; ll = ll->next)
i++;
if (i != have_len)
- lock_mismatch(l_lcks, 1,
+ lock_mismatch(thr, 1,
-1, have, have_len,
-1, NULL, 0);
}
@@ -924,16 +915,16 @@ erts_lc_check_exact(erts_lc_lock_t *have, int have_len)
void
erts_lc_check_no_locked_of_type(erts_lock_flags_t type)
{
- erts_lc_locked_locks_t *l_lcks = get_my_locked_locks();
- if (l_lcks) {
- erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
- for (l_lck = l_lcks->locked.first; l_lck; l_lck = l_lck->next) {
- if ((l_lck->flags & ERTS_LOCK_FLAGS_MASK_TYPE) == type) {
+ lc_thread_t *thr = get_my_locked_locks();
+ if (thr) {
+ lc_locked_lock_t *ll = thr->locked.first;
+ for (ll = thr->locked.first; ll; ll = ll->next) {
+ if ((ll->flags & ERTS_LOCK_FLAGS_MASK_TYPE) == type) {
erts_fprintf(stderr,
"Locked lock of type %s found which isn't "
"allowed here!\n",
- erts_lock_flags_get_type_name(l_lck->flags));
- print_curr_locks(l_lcks);
+ erts_lock_flags_get_type_name(ll->flags));
+ print_curr_locks(thr);
lc_abort();
}
}
@@ -951,7 +942,7 @@ erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
* This in order to make sure that caller can handle
* the situation without causing a lock order violation.
*/
- erts_lc_locked_locks_t *l_lcks;
+ lc_thread_t *thr;
if (lck->inited != ERTS_LC_INITITALIZED)
uninitialized_lock();
@@ -959,25 +950,25 @@ erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
if (lck->id < 0)
return 0;
- l_lcks = get_my_locked_locks();
+ thr = get_my_locked_locks();
- if (!l_lcks || !l_lcks->locked.first) {
- ASSERT(!l_lcks || !l_lcks->locked.last);
+ if (!thr || !thr->locked.first) {
+ ASSERT(!thr || !thr->locked.last);
return 0;
}
else {
- erts_lc_locked_lock_t *tl_lck;
+ lc_locked_lock_t *tl_lck;
- ASSERT(l_lcks->locked.last);
+ ASSERT(thr->locked.last);
#if 0 /* Ok when trylocking I guess... */
- if (LOCK_IS_TYPE_ORDER_VIOLATION(lck->flags, l_lcks->locked.last->flags))
- type_order_violation("trylocking ", l_lcks, lck);
+ if (LOCK_IS_TYPE_ORDER_VIOLATION(lck->flags, thr->locked.last->flags))
+ type_order_violation("trylocking ", thr, lck);
#endif
- if (l_lcks->locked.last->id < lck->id
- || (l_lcks->locked.last->id == lck->id
- && l_lcks->locked.last->extra < lck->extra))
+ if (thr->locked.last->id < lck->id
+ || (thr->locked.last->id == lck->id
+ && thr->locked.last->extra < lck->extra))
return 0;
/*
@@ -986,11 +977,11 @@ erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
/* Check that we are not trying to lock this lock twice */
- for (tl_lck = l_lcks->locked.last; tl_lck; tl_lck = tl_lck->prev) {
+ for (tl_lck = thr->locked.last; tl_lck; tl_lck = tl_lck->prev) {
if (tl_lck->id < lck->id
|| (tl_lck->id == lck->id && tl_lck->extra <= lck->extra)) {
if (tl_lck->id == lck->id && tl_lck->extra == lck->extra)
- lock_twice("Trylocking", l_lcks, lck, options);
+ lock_twice("Trylocking", thr, lck, options);
break;
}
}
@@ -1015,8 +1006,8 @@ erts_lc_trylock_force_busy_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line)
{
- erts_lc_locked_locks_t *l_lcks;
- erts_lc_locked_lock_t *l_lck;
+ lc_thread_t *thr;
+ lc_locked_lock_t *ll;
if (lck->inited != ERTS_LC_INITITALIZED)
uninitialized_lock();
@@ -1024,43 +1015,43 @@ void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, erts_lock_options_t
if (lck->id < 0)
return;
- l_lcks = make_my_locked_locks();
- l_lck = locked ? new_locked_lock(lck, options, file, line) : NULL;
+ thr = make_my_locked_locks();
+ ll = locked ? new_locked_lock(thr, lck, options, file, line) : NULL;
- if (!l_lcks->locked.last) {
- ASSERT(!l_lcks->locked.first);
+ if (!thr->locked.last) {
+ ASSERT(!thr->locked.first);
if (locked)
- l_lcks->locked.first = l_lcks->locked.last = l_lck;
+ thr->locked.first = thr->locked.last = ll;
}
else {
- erts_lc_locked_lock_t *tl_lck;
+ lc_locked_lock_t *tl_lck;
#if 0 /* Ok when trylocking I guess... */
- if (LOCK_IS_TYPE_ORDER_VIOLATION(lck->flags, l_lcks->locked.last->flags))
- type_order_violation("trylocking ", l_lcks, lck);
+ if (LOCK_IS_TYPE_ORDER_VIOLATION(lck->flags, thr->locked.last->flags))
+ type_order_violation("trylocking ", thr, lck);
#endif
- for (tl_lck = l_lcks->locked.last; tl_lck; tl_lck = tl_lck->prev) {
+ for (tl_lck = thr->locked.last; tl_lck; tl_lck = tl_lck->prev) {
if (tl_lck->id < lck->id
|| (tl_lck->id == lck->id && tl_lck->extra <= lck->extra)) {
if (tl_lck->id == lck->id && tl_lck->extra == lck->extra)
- lock_twice("Trylocking", l_lcks, lck, options);
+ lock_twice("Trylocking", thr, lck, options);
if (locked) {
- l_lck->next = tl_lck->next;
- l_lck->prev = tl_lck;
+ ll->next = tl_lck->next;
+ ll->prev = tl_lck;
if (tl_lck->next)
- tl_lck->next->prev = l_lck;
+ tl_lck->next->prev = ll;
else
- l_lcks->locked.last = l_lck;
- tl_lck->next = l_lck;
+ thr->locked.last = ll;
+ tl_lck->next = ll;
}
return;
}
}
if (locked) {
- l_lck->next = l_lcks->locked.first;
- l_lcks->locked.first->prev = l_lck;
- l_lcks->locked.first = l_lck;
+ ll->next = thr->locked.first;
+ thr->locked.first->prev = ll;
+ thr->locked.first = ll;
}
}
@@ -1069,83 +1060,83 @@ void erts_lc_trylock_flg_x(int locked, erts_lc_lock_t *lck, erts_lock_options_t
void erts_lc_require_lock_flg(erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line)
{
- erts_lc_locked_locks_t *l_lcks = make_my_locked_locks();
- erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
- if (!find_lock(&l_lck, lck))
- required_not_locked(l_lcks, lck);
- l_lck = new_locked_lock(lck, options, file, line);
- if (!l_lcks->required.last) {
- ASSERT(!l_lcks->required.first);
- l_lck->next = l_lck->prev = NULL;
- l_lcks->required.first = l_lcks->required.last = l_lck;
+ lc_thread_t *thr = make_my_locked_locks();
+ lc_locked_lock_t *ll = thr->locked.first;
+ if (!find_lock(&ll, lck))
+ required_not_locked(thr, lck);
+ ll = new_locked_lock(thr, lck, options, file, line);
+ if (!thr->required.last) {
+ ASSERT(!thr->required.first);
+ ll->next = ll->prev = NULL;
+ thr->required.first = thr->required.last = ll;
}
else {
- erts_lc_locked_lock_t *l_lck2;
- ASSERT(l_lcks->required.first);
- for (l_lck2 = l_lcks->required.last;
+ lc_locked_lock_t *l_lck2;
+ ASSERT(thr->required.first);
+ for (l_lck2 = thr->required.last;
l_lck2;
l_lck2 = l_lck2->prev) {
if (l_lck2->id < lck->id
|| (l_lck2->id == lck->id && l_lck2->extra < lck->extra))
break;
else if (l_lck2->id == lck->id && l_lck2->extra == lck->extra)
- require_twice(l_lcks, lck);
+ require_twice(thr, lck);
}
if (!l_lck2) {
- l_lck->next = l_lcks->required.first;
- l_lck->prev = NULL;
- l_lcks->required.first->prev = l_lck;
- l_lcks->required.first = l_lck;
+ ll->next = thr->required.first;
+ ll->prev = NULL;
+ thr->required.first->prev = ll;
+ thr->required.first = ll;
}
else {
- l_lck->next = l_lck2->next;
- if (l_lck->next) {
- ASSERT(l_lcks->required.last != l_lck2);
- l_lck->next->prev = l_lck;
+ ll->next = l_lck2->next;
+ if (ll->next) {
+ ASSERT(thr->required.last != l_lck2);
+ ll->next->prev = ll;
}
else {
- ASSERT(l_lcks->required.last == l_lck2);
- l_lcks->required.last = l_lck;
+ ASSERT(thr->required.last == l_lck2);
+ thr->required.last = ll;
}
- l_lck->prev = l_lck2;
- l_lck2->next = l_lck;
+ ll->prev = l_lck2;
+ l_lck2->next = ll;
}
}
}
void erts_lc_unrequire_lock_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
{
- erts_lc_locked_locks_t *l_lcks = make_my_locked_locks();
- erts_lc_locked_lock_t *l_lck = l_lcks->locked.first;
- if (!find_lock(&l_lck, lck))
- required_not_locked(l_lcks, lck);
- l_lck = l_lcks->required.first;
- if (!find_lock(&l_lck, lck))
- unrequire_of_not_required_lock(l_lcks, lck);
- if (l_lck->prev) {
- ASSERT(l_lcks->required.first != l_lck);
- l_lck->prev->next = l_lck->next;
+ lc_thread_t *thr = make_my_locked_locks();
+ lc_locked_lock_t *ll = thr->locked.first;
+ if (!find_lock(&ll, lck))
+ required_not_locked(thr, lck);
+ ll = thr->required.first;
+ if (!find_lock(&ll, lck))
+ unrequire_of_not_required_lock(thr, lck);
+ if (ll->prev) {
+ ASSERT(thr->required.first != ll);
+ ll->prev->next = ll->next;
}
else {
- ASSERT(l_lcks->required.first == l_lck);
- l_lcks->required.first = l_lck->next;
+ ASSERT(thr->required.first == ll);
+ thr->required.first = ll->next;
}
- if (l_lck->next) {
- ASSERT(l_lcks->required.last != l_lck);
- l_lck->next->prev = l_lck->prev;
+ if (ll->next) {
+ ASSERT(thr->required.last != ll);
+ ll->next->prev = ll->prev;
}
else {
- ASSERT(l_lcks->required.last == l_lck);
- l_lcks->required.last = l_lck->prev;
+ ASSERT(thr->required.last == ll);
+ thr->required.last = ll->prev;
}
- lc_free((void *) l_lck);
+ lc_free(thr, ll);
}
void erts_lc_lock_flg_x(erts_lc_lock_t *lck, erts_lock_options_t options,
char *file, unsigned int line)
{
- erts_lc_locked_locks_t *l_lcks;
- erts_lc_locked_lock_t *l_lck;
+ lc_thread_t *thr;
+ lc_locked_lock_t *new_ll;
if (lck->inited != ERTS_LC_INITITALIZED)
uninitialized_lock();
@@ -1153,32 +1144,45 @@ void erts_lc_lock_flg_x(erts_lc_lock_t *lck, erts_lock_options_t options,
if (lck->id < 0)
return;
- l_lcks = make_my_locked_locks();
- l_lck = new_locked_lock(lck, options, file, line);
+ thr = make_my_locked_locks();
+ new_ll = new_locked_lock(thr, lck, options, file, line);
- if (!l_lcks->locked.last) {
- ASSERT(!l_lcks->locked.first);
- l_lcks->locked.last = l_lcks->locked.first = l_lck;
+ if (!thr->locked.last) {
+ ASSERT(!thr->locked.first);
+ thr->locked.last = thr->locked.first = new_ll;
+ ASSERT(0 < lck->id && lck->id < ERTS_LOCK_ORDER_SIZE);
+ thr->matrix.m[lck->id][0] = 1;
}
- else if (l_lcks->locked.last->id < lck->id
- || (l_lcks->locked.last->id == lck->id
- && l_lcks->locked.last->extra < lck->extra)) {
- if (LOCK_IS_TYPE_ORDER_VIOLATION(lck->flags, l_lcks->locked.last->flags))
- type_order_violation("locking ", l_lcks, lck);
- l_lck->prev = l_lcks->locked.last;
- l_lcks->locked.last->next = l_lck;
- l_lcks->locked.last = l_lck;
+ else if (thr->locked.last->id < lck->id
+ || (thr->locked.last->id == lck->id
+ && thr->locked.last->extra < lck->extra)) {
+ lc_locked_lock_t* ll;
+ if (LOCK_IS_TYPE_ORDER_VIOLATION(lck->flags, thr->locked.last->flags)) {
+ type_order_violation("locking ", thr, lck);
+ }
+
+ ASSERT(0 < lck->id && lck->id < ERTS_LOCK_ORDER_SIZE);
+ ll = thr->locked.last;
+ thr->matrix.m[lck->id][ll->id] |= 1;
+ for (ll = ll->prev; ll; ll = ll->prev) {
+ ASSERT(0 < ll->id && ll->id < ERTS_LOCK_ORDER_SIZE);
+ thr->matrix.m[lck->id][ll->id] |= 2;
+ }
+
+ new_ll->prev = thr->locked.last;
+ thr->locked.last->next = new_ll;
+ thr->locked.last = new_ll;
}
- else if (l_lcks->locked.last->id == lck->id && l_lcks->locked.last->extra == lck->extra)
- lock_twice("Locking", l_lcks, lck, options);
+ else if (thr->locked.last->id == lck->id && thr->locked.last->extra == lck->extra)
+ lock_twice("Locking", thr, lck, options);
else
- lock_order_violation(l_lcks, lck);
+ lock_order_violation(thr, lck);
}
void erts_lc_unlock_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
{
- erts_lc_locked_locks_t *l_lcks;
- erts_lc_locked_lock_t *l_lck;
+ lc_thread_t *thr;
+ lc_locked_lock_t *ll;
if (lck->inited != ERTS_LC_INITITALIZED)
uninitialized_lock();
@@ -1186,38 +1190,38 @@ void erts_lc_unlock_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
if (lck->id < 0)
return;
- l_lcks = get_my_locked_locks();
+ thr = get_my_locked_locks();
- if (l_lcks) {
- l_lck = l_lcks->required.first;
- if (find_lock(&l_lck, lck))
- unlock_of_required_lock(l_lcks, lck);
+ if (thr) {
+ ll = thr->required.first;
+ if (find_lock(&ll, lck))
+ unlock_of_required_lock(thr, lck);
}
- for (l_lck = l_lcks ? l_lcks->locked.last : NULL; l_lck; l_lck = l_lck->prev) {
- if (l_lck->id == lck->id && l_lck->extra == lck->extra) {
- if ((l_lck->taken_options & ERTS_LOCK_OPTIONS_RDWR) != options)
- unlock_op_mismatch(l_lcks, lck, options);
- if (l_lck->prev)
- l_lck->prev->next = l_lck->next;
+ for (ll = thr ? thr->locked.last : NULL; ll; ll = ll->prev) {
+ if (ll->id == lck->id && ll->extra == lck->extra) {
+ if ((ll->taken_options & ERTS_LOCK_OPTIONS_RDWR) != options)
+ unlock_op_mismatch(thr, lck, options);
+ if (ll->prev)
+ ll->prev->next = ll->next;
else
- l_lcks->locked.first = l_lck->next;
- if (l_lck->next)
- l_lck->next->prev = l_lck->prev;
+ thr->locked.first = ll->next;
+ if (ll->next)
+ ll->next->prev = ll->prev;
else
- l_lcks->locked.last = l_lck->prev;
- lc_free((void *) l_lck);
+ thr->locked.last = ll->prev;
+ lc_free(thr, ll);
return;
}
}
- unlock_of_not_locked(l_lcks, lck);
+ unlock_of_not_locked(thr, lck);
}
void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
{
- erts_lc_locked_locks_t *l_lcks;
- erts_lc_locked_lock_t *l_lck;
+ lc_thread_t *thr;
+ lc_locked_lock_t *ll;
if (lck->inited != ERTS_LC_INITITALIZED)
uninitialized_lock();
@@ -1225,17 +1229,17 @@ void erts_lc_might_unlock_flg(erts_lc_lock_t *lck, erts_lock_options_t options)
if (lck->id < 0)
return;
- l_lcks = get_my_locked_locks();
+ thr = get_my_locked_locks();
- if (l_lcks) {
- l_lck = l_lcks->required.first;
- if (find_lock(&l_lck, lck))
- unlock_of_required_lock(l_lcks, lck);
+ if (thr) {
+ ll = thr->required.first;
+ if (find_lock(&ll, lck))
+ unlock_of_required_lock(thr, lck);
}
- l_lck = l_lcks->locked.first;
- if (!find_lock(&l_lck, lck))
- unlock_of_not_locked(l_lcks, lck);
+ ll = thr->locked.first;
+ if (!find_lock(&ll, lck))
+ unlock_of_not_locked(thr, lck);
}
int
@@ -1316,26 +1320,7 @@ erts_lc_destroy_lock(erts_lc_lock_t *lck)
void
erts_lc_init(void)
{
-#ifdef ERTS_LC_STATIC_ALLOC
- int i;
- static erts_lc_free_block_t fbs[ERTS_LC_FB_CHUNK_SIZE];
- for (i = 0; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) {
-#ifdef DEBUG
- sys_memset((void *) &fbs[i], 0xdf, sizeof(erts_lc_free_block_t));
-#endif
- fbs[i].next = &fbs[i+1];
- }
-#ifdef DEBUG
- sys_memset((void *) &fbs[ERTS_LC_FB_CHUNK_SIZE-1],
- 0xdf, sizeof(erts_lc_free_block_t));
-#endif
- fbs[ERTS_LC_FB_CHUNK_SIZE-1].next = NULL;
- free_blocks = &fbs[0];
-#else /* #ifdef ERTS_LC_STATIC_ALLOC */
- free_blocks = NULL;
-#endif /* #ifdef ERTS_LC_STATIC_ALLOC */
-
- if (ethr_spinlock_init(&free_blocks_lock) != 0)
+ if (ethr_spinlock_init(&lc_threads_lock) != 0)
ERTS_INTERNAL_ERROR("spinlock_init failed");
erts_tsd_key_create(&locks_key,"erts_lock_check_key");
@@ -1357,5 +1342,76 @@ erts_lc_pll(void)
print_curr_locks(get_my_locked_locks());
}
+static void collect_matrix(lc_matrix_t* matrix)
+{
+ int i, j;
+ for (i = 1; i < ERTS_LOCK_ORDER_SIZE; i++) {
+ for (j = 0; j <= i; j++) {
+ tot_lc_matrix.m[i][j] |= matrix->m[i][j];
+ }
+#ifdef DEBUG
+ for ( ; j < ERTS_LOCK_ORDER_SIZE; j++) {
+ ASSERT(matrix->m[i][j] == 0);
+ }
+#endif
+ }
+}
+
+Eterm
+erts_lc_dump_graph(void)
+{
+ const char* basename = "lc_graph.";
+ char filename[40];
+ lc_matrix_t* tot = &tot_lc_matrix;
+ lc_thread_t* thr;
+ int i, j, name_max = 0;
+ FILE* ff;
+
+ lc_lock_threads();
+ for (thr = lc_threads; thr; thr = thr->next) {
+ collect_matrix(&thr->matrix);
+ }
+ lc_unlock_threads();
+
+ sys_strcpy(filename, basename);
+ sys_get_pid(filename + strlen(basename),
+ sizeof(filename) - strlen(basename));
+ ff = fopen(filename, "w");
+ if (!ff)
+ return am_error;
+
+ for (i = 1; i < ERTS_LOCK_ORDER_SIZE; i++) {
+ int len = strlen(erts_lock_order[i].name);
+ if (name_max < len)
+ name_max = len;
+ }
+ fputs("%This file was generated by erts_debug:lc_graph()\n\n", ff);
+ fputs("%{ThisLockName, ThisLockId, LockedDirectlyBeforeThis, LockedIndirectlyBeforeThis}\n", ff);
+ fprintf(ff, "[{%*s, %2d}", name_max, "\"NO LOCK\"", 0);
+ for (i = 1; i < ERTS_LOCK_ORDER_SIZE; i++) {
+ char* delim = "";
+ fprintf(ff, ",\n {%*s, %2d, [", name_max, erts_lock_order[i].name, i);
+ for (j = 0; j < ERTS_LOCK_ORDER_SIZE; j++) {
+ if (tot->m[i][j] & 1) {
+ fprintf(ff, "%s%d", delim, j);
+ delim = ",";
+ }
+ }
+ fprintf(ff, "], [");
+ delim = "";
+ for (j = 0; j < ERTS_LOCK_ORDER_SIZE; j++) {
+ if (tot->m[i][j] == 2) {
+ fprintf(ff, "%s%d", delim, j);
+ delim = ",";
+ }
+ }
+ fputs("]}", ff);
+ }
+ fputs("].", ff);
+ fclose(ff);
+ erts_fprintf(stderr, "Created file '%s' in current working directory\n",
+ filename);
+ return am_ok;
+}
#endif /* #ifdef ERTS_ENABLE_LOCK_CHECK */
diff --git a/erts/emulator/beam/erl_lock_check.h b/erts/emulator/beam/erl_lock_check.h
index 5c2c38e8f2..138bc810bd 100644
--- a/erts/emulator/beam/erl_lock_check.h
+++ b/erts/emulator/beam/erl_lock_check.h
@@ -94,6 +94,8 @@ void erts_lc_unrequire_lock(erts_lc_lock_t *lck);
int erts_lc_is_emu_thr(void);
+Eterm erts_lc_dump_graph(void);
+
#define ERTS_LC_ASSERT(A) \
((void) (((A) || ERTS_SOMEONE_IS_CRASH_DUMPING) ? 1 : erts_lc_assert_failed(__FILE__, __LINE__, #A)))
#else /* #ifdef ERTS_ENABLE_LOCK_CHECK */
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index 51b7865c0b..34bd11d87c 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -264,11 +264,6 @@ erts_queue_dist_message(Process *rcvr,
Eterm from)
{
ErtsMessage* mp;
-#ifdef USE_VM_PROBES
- Sint tok_label = 0;
- Sint tok_lastcnt = 0;
- Sint tok_serial = 0;
-#endif
erts_aint_t state;
ERTS_LC_ASSERT(rcvr_locks == erts_proc_lc_my_proc_locks(rcvr));
@@ -309,25 +304,6 @@ erts_queue_dist_message(Process *rcvr,
}
else {
-#ifdef USE_VM_PROBES
- if (DTRACE_ENABLED(message_queued)) {
- DTRACE_CHARBUF(receiver_name, DTRACE_TERM_BUF_SIZE);
-
- dtrace_proc_str(rcvr, receiver_name);
- if (have_seqtrace(token)) {
- tok_label = SEQ_TRACE_T_DTRACE_LABEL(token);
- tok_lastcnt = signed_val(SEQ_TRACE_T_LASTCNT(token));
- tok_serial = signed_val(SEQ_TRACE_T_SERIAL(token));
- }
- /*
- * TODO: We don't know the real size of the external message here.
- * -1 will appear to a D script as 4294967295.
- */
- DTRACE6(message_queued, receiver_name, -1, rcvr->sig_qs.len + 1,
- tok_label, tok_lastcnt, tok_serial);
- }
-#endif
-
LINK_MESSAGE(rcvr, mp, &mp->next, 1);
if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ))
@@ -346,7 +322,6 @@ queue_messages(Process* receiver,
ErtsMessage** last,
Uint len)
{
- Sint res;
int locked_msgq = 0;
erts_aint32_t state;
@@ -394,31 +369,14 @@ queue_messages(Process* receiver,
return 0;
}
- res = receiver->sig_qs.len;
- if (receiver_locks & ERTS_PROC_LOCK_MAIN) {
- /*
- * We move 'in queue' to 'private queue' and place
- * message at the end of 'private queue' in order
- * to ensure that the 'in queue' doesn't contain
- * references into the heap. By ensuring this,
- * we don't need to include the 'in queue' in
- * the root set when garbage collecting.
- */
- res += receiver->sig_inq.len;
- erts_proc_sig_fetch(receiver);
- LINK_MESSAGE_PRIVQ(receiver, first, last, len);
- }
- else
- {
- LINK_MESSAGE(receiver, first, last, len);
- }
+ LINK_MESSAGE(receiver, first, last, len);
if (locked_msgq) {
erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
}
erts_proc_notify_new_message(receiver, receiver_locks);
- return res;
+ return 0;
}
static Sint
@@ -870,7 +828,7 @@ erts_move_messages_off_heap(Process *c_p)
* it...
*/
- reds += c_p->sig_qs.len / 10;
+ reds += erts_proc_sig_privqs_len(c_p) / 10;
ASSERT(erts_atomic32_read_nob(&c_p->state)
& ERTS_PSFLG_OFF_HEAP_MSGQ);
@@ -1361,32 +1319,18 @@ void erts_factory_dummy_init(ErtsHeapFactory* factory)
factory->mode = FACTORY_CLOSED;
}
-static void reserve_heap(ErtsHeapFactory*, Uint need, Uint xtra);
-
-Eterm* erts_produce_heap(ErtsHeapFactory* factory, Uint need, Uint xtra)
-{
- Eterm* res;
-
- ASSERT((unsigned int)factory->mode > (unsigned int)FACTORY_CLOSED);
- if (factory->hp + need > factory->hp_end) {
- reserve_heap(factory, need, xtra);
- }
- res = factory->hp;
- factory->hp += need;
- return res;
-}
-
Eterm* erts_reserve_heap(ErtsHeapFactory* factory, Uint need)
{
ASSERT((unsigned int)factory->mode > (unsigned int)FACTORY_CLOSED);
if (factory->hp + need > factory->hp_end) {
- reserve_heap(factory, need, 200);
+ erts_reserve_heap__(factory, need, 200);
}
return factory->hp;
}
-static void reserve_heap(ErtsHeapFactory* factory, Uint need, Uint xtra)
+void erts_reserve_heap__(ErtsHeapFactory* factory, Uint need, Uint xtra)
{
+ /* internal... */
ErlHeapFragment* bp;
switch (factory->mode) {
@@ -1396,7 +1340,9 @@ static void reserve_heap(ErtsHeapFactory* factory, Uint need, Uint xtra)
factory->hp_end = factory->hp + need;
return;
- case FACTORY_MESSAGE:
+ case FACTORY_MESSAGE: {
+ int replace_oh;
+ int replace_msg_hfrag;
if (!factory->heap_frags) {
ASSERT(factory->message->data.attached == ERTS_MSG_COMBINED_HFRAG);
bp = &factory->message->hfrag;
@@ -1408,25 +1354,45 @@ static void reserve_heap(ErtsHeapFactory* factory, Uint need, Uint xtra)
bp = factory->heap_frags;
}
+ replace_oh = 0;
+ replace_msg_hfrag = 0;
+
if (bp) {
- ASSERT(factory->hp > bp->mem);
+ ASSERT(factory->hp >= bp->mem);
ASSERT(factory->hp <= factory->hp_end);
ASSERT(factory->hp_end == bp->mem + bp->alloc_size);
bp->used_size = factory->hp - bp->mem;
+ if (!bp->used_size && factory->heap_frags) {
+ factory->heap_frags = bp->next;
+ bp->next = NULL;
+ ASSERT(!bp->off_heap.first);
+ if (factory->off_heap == &bp->off_heap)
+ replace_oh = !0;
+ if (factory->message && factory->message->data.heap_frag == bp)
+ replace_msg_hfrag = !0;
+ free_message_buffer(bp);
+ }
}
bp = (ErlHeapFragment*) ERTS_HEAP_ALLOC(factory->alloc_type,
ERTS_HEAP_FRAG_SIZE(need+xtra));
bp->next = factory->heap_frags;
factory->heap_frags = bp;
bp->alloc_size = need + xtra;
- bp->used_size = need;
+ bp->used_size = need + xtra;
bp->off_heap.first = NULL;
bp->off_heap.overhead = 0;
-
+ if (replace_oh) {
+ factory->off_heap = &bp->off_heap;
+ factory->off_heap_saved.first = factory->off_heap->first;
+ factory->off_heap_saved.overhead = factory->off_heap->overhead;
+ }
+ if (replace_msg_hfrag)
+ factory->message->data.heap_frag = bp;
factory->hp = bp->mem;
factory->hp_end = bp->mem + bp->alloc_size;
return;
+ }
case FACTORY_STATIC:
case FACTORY_CLOSED:
@@ -1509,9 +1475,11 @@ void erts_factory_trim_and_close(ErtsHeapFactory* factory,
if (bp->next == NULL) {
Uint used_sz = factory->hp - bp->mem;
ASSERT(used_sz <= bp->alloc_size);
- if (used_sz > 0)
- bp = erts_resize_message_buffer(bp, used_sz,
- brefs, brefs_size);
+ if (used_sz > 0) {
+ if (used_sz != bp->alloc_size)
+ bp = erts_resize_message_buffer(bp, used_sz,
+ brefs, brefs_size);
+ }
else {
free_message_buffer(bp);
bp = NULL;
diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h
index f56a252aef..ee87297ba4 100644
--- a/erts/emulator/beam/erl_message.h
+++ b/erts/emulator/beam/erl_message.h
@@ -89,12 +89,33 @@ void erts_factory_static_init(ErtsHeapFactory*, Eterm* hp, Uint size, ErlOffHeap
void erts_factory_tmp_init(ErtsHeapFactory*, Eterm* hp, Uint size, Uint32 atype);
void erts_factory_dummy_init(ErtsHeapFactory*);
-Eterm* erts_produce_heap(ErtsHeapFactory*, Uint need, Uint xtra);
+ERTS_GLB_INLINE Eterm* erts_produce_heap(ErtsHeapFactory*, Uint need, Uint xtra);
+
Eterm* erts_reserve_heap(ErtsHeapFactory*, Uint need);
void erts_factory_close(ErtsHeapFactory*);
void erts_factory_trim_and_close(ErtsHeapFactory*,Eterm *brefs, Uint brefs_size);
void erts_factory_undo(ErtsHeapFactory*);
+void erts_reserve_heap__(ErtsHeapFactory*, Uint need, Uint xtra); /* internal */
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE Eterm *
+erts_produce_heap(ErtsHeapFactory* factory, Uint need, Uint xtra)
+{
+ Eterm* res;
+
+ ASSERT((unsigned int)factory->mode > (unsigned int)FACTORY_CLOSED);
+ if (factory->hp + need > factory->hp_end) {
+ erts_reserve_heap__(factory, need, xtra);
+ }
+ res = factory->hp;
+ factory->hp += need;
+ return res;
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
#ifdef CHECK_FOR_HOLES
# define ERTS_FACTORY_HOLE_CHECK(f) do { \
/*if ((f)->p) erts_check_for_holes((f)->p);*/ \
@@ -215,6 +236,48 @@ typedef struct {
#define ERL_MESSAGE_BUF_SZ 500
typedef struct {
+ /*
+ * ** The signal queues private to a process. **
+ *
+ * These are:
+ * - an inner queue which only consists of
+ * message signals
+ * - a middle queue which contains a mixture
+ * of message and non-message signals
+ *
+ * When the process isn't processing signals in
+ * erts_proc_sig_handle_incoming():
+ * - the message queue corresponds to the inner
+ * queue. Messages in the middle queue (and
+ * in the outer queue) are in transit and
+ * have NOT been received yet!
+ *
+ * When the process is processing signals in
+ * erts_proc_sig_handle_incoming():
+ * - the message queue corresponds to the inner
+ * queue plus the head of the middle queue up
+ * to the signal currently being processed.
+ * Any messages further back in the middle queue
+ * (and in the outer queue) are still in transit
+ * and have NOT been received yet!
+ *
+ * In the general case the 'len' field of this
+ * structure does NOT correspond to the message
+ * queue length. When the process is inspected
+ * via process info it does however correspond
+ * to the message queue length, but this is a
+ * special case!
+ *
+ * When no process-info request is in transit to
+ * the process the 'len' field corresponds to
+ * the total amount of messages in inner and
+ * middle queues (which does NOT correspond to
+ * the message queue length). When process-info
+ * requests are in transit to the process, the
+ * usage of the 'len' field changes and is used
+ * as an offset which even might be negative.
+ */
+
/* inner queue */
ErtsMessage *first;
ErtsMessage **last; /* point to the last next pointer */
@@ -227,7 +290,7 @@ typedef struct {
/* Common for inner and middle queue */
ErtsMessage **saved_last; /* saved last pointer */
- Sint len; /* message queue length (inner+middle) */
+ Sint len; /* NOT message queue length (see above) */
} ErtsSignalPrivQueues;
typedef struct {
@@ -248,8 +311,7 @@ typedef struct erl_trace_message_queue__ {
#define ERTS_RECV_MARK_SAVE(P) \
do { \
erts_proc_lock((P), ERTS_PROC_LOCK_MSGQ); \
- if ((P)->sig_inq.first) \
- erts_proc_sig_fetch((P)); \
+ erts_proc_sig_fetch((P)); \
erts_proc_unlock((P), ERTS_PROC_LOCK_MSGQ); \
if ((P)->sig_qs.cont) { \
(P)->sig_qs.saved_last = (P)->sig_qs.cont_last; \
@@ -302,23 +364,6 @@ typedef struct erl_trace_message_queue__ {
#endif
-
-/* Add message last in private message queue */
-#define LINK_MESSAGE_PRIVQ(p, first_msg, last_msg, num_msgs) \
- do { \
- ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE__((p), "before"); \
- if ((p)->sig_qs.cont || ERTS_MSG_RECV_TRACED((p))) { \
- *(p)->sig_qs.cont_last = (first_msg); \
- (p)->sig_qs.cont_last = (last_msg); \
- } \
- else { \
- *(p)->sig_qs.last = (first_msg); \
- (p)->sig_qs.last = (last_msg); \
- } \
- (p)->sig_qs.len += (num_msgs); \
- ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE__((p), "after"); \
- } while (0)
-
/* Add message last_msg in message queue */
#define LINK_MESSAGE(p, first_msg, last_msg, num_msgs) \
do { \
@@ -333,12 +378,12 @@ typedef struct erl_trace_message_queue__ {
#define UNLINK_MESSAGE(p,msgp) \
do { \
ErtsMessage *mp__ = (msgp)->next; \
- ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE__((p), "before"); \
+ ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE__((p), 0, "before"); \
*(p)->sig_qs.save = mp__; \
(p)->sig_qs.len--; \
if (mp__ == NULL) \
(p)->sig_qs.last = (p)->sig_qs.save; \
- ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE__((p), "after"); \
+ ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE__((p), 0, "after"); \
} while(0)
/*
diff --git a/erts/emulator/beam/erl_printf_term.c b/erts/emulator/beam/erl_printf_term.c
index e6f8460164..910f241a3a 100644
--- a/erts/emulator/beam/erl_printf_term.c
+++ b/erts/emulator/beam/erl_printf_term.c
@@ -532,14 +532,13 @@ print_term(fmtfn_t fn, void* arg, Eterm obj, long *dcount) {
Atom* module = atom_tab(atom_val(ep->info.mfa.module));
Atom* name = atom_tab(atom_val(ep->info.mfa.function));
- PRINT_STRING(res, fn, arg, "#Fun<");
+ PRINT_STRING(res, fn, arg, "fun ");
PRINT_BUF(res, fn, arg, module->name, module->len);
- PRINT_CHAR(res, fn, arg, '.');
+ PRINT_CHAR(res, fn, arg, ':');
PRINT_BUF(res, fn, arg, name->name, name->len);
- PRINT_CHAR(res, fn, arg, '.');
+ PRINT_CHAR(res, fn, arg, '/');
PRINT_SWORD(res, fn, arg, 'd', 0, 1,
(ErlPfSWord) ep->info.mfa.arity);
- PRINT_CHAR(res, fn, arg, '>');
}
break;
case FUN_DEF:
diff --git a/erts/emulator/beam/erl_proc_sig_queue.c b/erts/emulator/beam/erl_proc_sig_queue.c
index 1ba0b789ec..bcc4fc6d9b 100644
--- a/erts/emulator/beam/erl_proc_sig_queue.c
+++ b/erts/emulator/beam/erl_proc_sig_queue.c
@@ -49,7 +49,7 @@
* Note that not all signal are handled using this functionality!
*/
-#define ERTS_SIG_Q_OP_MAX 10
+#define ERTS_SIG_Q_OP_MAX 11
#define ERTS_SIG_Q_OP_EXIT 0
#define ERTS_SIG_Q_OP_EXIT_LINKED 1
@@ -61,7 +61,8 @@
#define ERTS_SIG_Q_OP_GROUP_LEADER 7
#define ERTS_SIG_Q_OP_TRACE_CHANGE_STATE 8
#define ERTS_SIG_Q_OP_PERSISTENT_MON_MSG 9
-#define ERTS_SIG_Q_OP_IS_ALIVE ERTS_SIG_Q_OP_MAX
+#define ERTS_SIG_Q_OP_IS_ALIVE 10
+#define ERTS_SIG_Q_OP_PROCESS_INFO ERTS_SIG_Q_OP_MAX
#define ERTS_SIG_Q_TYPE_MAX (ERTS_MON_LNK_TYPE_MAX + 5)
@@ -76,46 +77,6 @@
#define ERTS_SIG_Q_TYPE_ADJUST_TRACE_INFO \
ERTS_SIG_Q_TYPE_MAX
-
-#define ERTS_SIG_Q_OP_BITS 8
-#define ERTS_SIG_Q_OP_SHIFT 0
-#define ERTS_SIG_Q_OP_MASK ((1 << ERTS_SIG_Q_OP_BITS) - 1)
-
-#define ERTS_SIG_Q_TYPE_BITS 8
-#define ERTS_SIG_Q_TYPE_SHIFT ERTS_SIG_Q_OP_BITS
-#define ERTS_SIG_Q_TYPE_MASK ((1 << ERTS_SIG_Q_TYPE_BITS) - 1)
-
-#define ERTS_SIG_Q_NON_X_BITS__ (_HEADER_ARITY_OFFS \
- + ERTS_SIG_Q_OP_BITS \
- + ERTS_SIG_Q_TYPE_BITS)
-
-#define ERTS_SIG_Q_XTRA_BITS (32 - ERTS_SIG_Q_NON_X_BITS__)
-#define ERTS_SIG_Q_XTRA_SHIFT (ERTS_SIG_Q_OP_BITS \
- + ERTS_SIG_Q_TYPE_BITS)
-#define ERTS_SIG_Q_XTRA_MASK ((1 << ERTS_SIG_Q_XTRA_BITS) - 1)
-
-#define ERTS_PROC_SIG_OP(Tag) \
- ((int) (_unchecked_thing_arityval((Tag)) \
- >> ERTS_SIG_Q_OP_SHIFT) & ERTS_SIG_Q_OP_MASK)
-
-#define ERTS_PROC_SIG_TYPE(Tag) \
- ((Uint16) (_unchecked_thing_arityval((Tag)) \
- >> ERTS_SIG_Q_TYPE_SHIFT) & ERTS_SIG_Q_TYPE_MASK)
-
-#define ERTS_PROC_SIG_XTRA(Tag) \
- ((Uint32) (_unchecked_thing_arityval((Tag)) \
- >> ERTS_SIG_Q_XTRA_SHIFT) & ERTS_SIG_Q_XTRA_MASK)
-
-#define ERTS_PROC_SIG_MAKE_TAG(Op, Type, Xtra) \
- (ASSERT(0 <= (Xtra) && (Xtra) <= ERTS_SIG_Q_XTRA_MASK), \
- _make_header((((Type) & ERTS_SIG_Q_TYPE_MASK) \
- << ERTS_SIG_Q_TYPE_SHIFT) \
- | (((Op) & ERTS_SIG_Q_OP_MASK) \
- << ERTS_SIG_Q_OP_SHIFT) \
- | (((Xtra) & ERTS_SIG_Q_XTRA_MASK) \
- << ERTS_SIG_Q_XTRA_SHIFT), \
- _TAG_HEADER_EXTERNAL_PID))
-
Process *ERTS_WRITE_UNLIKELY(erts_dirty_process_signal_handler);
Process *ERTS_WRITE_UNLIKELY(erts_dirty_process_signal_handler_high);
Process *ERTS_WRITE_UNLIKELY(erts_dirty_process_signal_handler_max);
@@ -123,7 +84,8 @@ Process *ERTS_WRITE_UNLIKELY(erts_dirty_process_signal_handler_max);
void
erts_proc_sig_queue_init(void)
{
- ERTS_CT_ASSERT(ERTS_SIG_Q_OP_MASK >= ERTS_SIG_Q_OP_MAX);
+ ERTS_CT_ASSERT(ERTS_SIG_Q_OP_MASK > ERTS_SIG_Q_OP_MAX);
+ ERTS_CT_ASSERT(ERTS_SIG_Q_OP_MSGQ_LEN_OFFS_MARK > ERTS_SIG_Q_OP_MAX);
ERTS_CT_ASSERT(ERTS_SIG_Q_TYPE_MASK >= ERTS_SIG_Q_TYPE_MAX);
}
@@ -191,6 +153,29 @@ typedef struct {
Eterm requester;
} ErtsIsAliveRequest;
+typedef struct {
+ ErtsSignalCommon common;
+ Sint refc;
+ Sint delayed_len;
+ Sint len_offset;
+} ErtsProcSigMsgQLenOffsetMarker;
+
+typedef struct {
+ ErtsSignalCommon common;
+ ErtsProcSigMsgQLenOffsetMarker marker;
+ Sint msgq_len_offset;
+ Eterm requester;
+ Eterm ref;
+ ErtsORefThing oref_thing;
+ Uint reserve_size;
+ Uint len;
+ int flags;
+ int item_ix[1]; /* of len size in reality... */
+} ErtsProcessInfoSig;
+
+#define ERTS_PROC_SIG_PI_MSGQ_LEN_IGNORE ((Sint) -1)
+#define ERTS_PROC_SIG_PI_MSGQ_LEN_SYNC ((Sint) -2)
+
static int handle_msg_tracing(Process *c_p,
ErtsSigRecvTracing *tracing,
ErtsMessage ***next_nm_sig);
@@ -523,6 +508,9 @@ ensure_dirty_proc_handled(Eterm pid,
}
}
+static void
+check_push_msgq_len_offs_marker(Process *rp, ErtsSignal *sig);
+
static int
proc_queue_signal(Process *c_p, Eterm pid, ErtsSignal *sig, int op)
{
@@ -559,6 +547,8 @@ proc_queue_signal(Process *c_p, Eterm pid, ErtsSignal *sig, int op)
res = 0;
else {
state = enqueue_signals(rp, first, last, last_next, state);
+ if (ERTS_UNLIKELY(op == ERTS_SIG_Q_OP_PROCESS_INFO))
+ check_push_msgq_len_offs_marker(rp, sig);
res = !0;
}
@@ -620,31 +610,14 @@ maybe_elevate_sig_handling_prio(Process *c_p, Eterm other)
}
void
-erts_proc_sig_fetch(Process *proc)
+erts_proc_sig_fetch__(Process *proc)
{
#ifdef ERTS_PROC_SIG_HARD_DEBUG
ErtsSignalPrivQueues sig_qs = proc->sig_qs;
ErtsSignalInQueue sig_inq = proc->sig_inq;
#endif
- ERTS_LC_ASSERT(erts_thr_progress_is_blocking()
- || ERTS_PROC_IS_EXITING(proc)
- || ((erts_proc_lc_my_proc_locks(proc)
- & (ERTS_PROC_LOCK_MAIN
- | ERTS_PROC_LOCK_MSGQ))
- == (ERTS_PROC_LOCK_MAIN
- | ERTS_PROC_LOCK_MSGQ)));
-
- if (!proc->sig_inq.first) {
- ASSERT(proc->sig_inq.last == &proc->sig_inq.first);
- ASSERT(proc->sig_inq.len == 0);
- ASSERT(!proc->sig_inq.nmsigs.next);
- ASSERT(!proc->sig_inq.nmsigs.last);
- return;
- }
-
- ERTS_HDBG_CHECK_SIGNAL_IN_QUEUE(proc);
- ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(proc);
+ ASSERT(proc->sig_inq.first);
if (!proc->sig_inq.nmsigs.next) {
ASSERT(!(ERTS_PSFLG_SIG_IN_Q
@@ -720,10 +693,105 @@ erts_proc_sig_fetch(Process *proc)
proc->sig_inq.last = &proc->sig_inq.first;
proc->sig_inq.len = 0;
- ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(proc);
}
-void do_seq_trace_output(Eterm to, Eterm token, Eterm msg);
+Sint
+erts_proc_sig_fetch_msgq_len_offs__(Process *proc)
+{
+ ErtsProcSigMsgQLenOffsetMarker *marker
+ = (ErtsProcSigMsgQLenOffsetMarker *) proc->sig_inq.first;
+
+ ASSERT(marker->common.tag == ERTS_PROC_SIG_MSGQ_LEN_OFFS_MARK);
+
+ if (marker->common.next) {
+ Sint len;
+
+ proc->flags |= F_DELAYED_PSIGQS_LEN;
+
+ /*
+ * Prevent update of sig_qs.len in fetch. These
+ * updates are done via process-info signal(s)
+ * instead...
+ */
+ len = proc->sig_inq.len;
+ marker->delayed_len += len;
+ marker->len_offset -= len;
+ proc->sig_inq.len = 0;
+
+ /*
+ * Temorarily remove marker during fetch...
+ */
+
+ proc->sig_inq.first = marker->common.next;
+ if (proc->sig_inq.last == &marker->common.next)
+ proc->sig_inq.last = &proc->sig_inq.first;
+ if (proc->sig_inq.nmsigs.next == &marker->common.next)
+ proc->sig_inq.nmsigs.next = &proc->sig_inq.first;
+ if (proc->sig_inq.nmsigs.last == &marker->common.next)
+ proc->sig_inq.nmsigs.last = &proc->sig_inq.first;
+
+ erts_proc_sig_fetch__(proc);
+
+ marker->common.next = NULL;
+ proc->sig_inq.first = (ErtsMessage *) marker;
+ proc->sig_inq.last = &marker->common.next;
+
+ }
+
+ return marker->delayed_len;
+}
+
+static ERTS_INLINE Sint
+proc_sig_privqs_len(Process *c_p, int have_qlock)
+{
+ Sint res = c_p->sig_qs.len;
+
+ ERTS_LC_ASSERT(!have_qlock
+ ? (ERTS_PROC_LOCK_MAIN
+ == erts_proc_lc_my_proc_locks(c_p))
+ : ((ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_MAIN)
+ == ((ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_MAIN)
+ & erts_proc_lc_my_proc_locks(c_p))));
+
+ if (c_p->flags & F_DELAYED_PSIGQS_LEN) {
+ ErtsProcSigMsgQLenOffsetMarker *marker;
+
+ if (!have_qlock)
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ);
+
+ marker = (ErtsProcSigMsgQLenOffsetMarker *) c_p->sig_inq.first;
+ ASSERT(marker);
+ ASSERT(marker->common.tag == ERTS_PROC_SIG_MSGQ_LEN_OFFS_MARK);
+
+ res += marker->delayed_len;
+
+ if (!have_qlock)
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
+ }
+
+#ifdef ERTS_PROC_SIG_HARD_DEBUG_SIGQ_MSG_LEN
+ {
+ Sint len = 0;
+ ERTS_FOREACH_SIG_PRIVQS(
+ c_p, mp,
+ {
+ if (ERTS_SIG_IS_MSG(mp))
+ len++;
+ });
+ ERTS_ASSERT(res == len);
+ }
+#endif
+
+ return res;
+}
+
+Sint
+erts_proc_sig_privqs_len(Process *c_p)
+{
+ return proc_sig_privqs_len(c_p, 0);
+}
+
+static void do_seq_trace_output(Eterm to, Eterm token, Eterm msg);
static void
send_gen_exit_signal(Process *c_p, Eterm from_tag,
@@ -869,7 +937,7 @@ send_gen_exit_signal(Process *c_p, Eterm from_tag,
}
}
-void
+static void
do_seq_trace_output(Eterm to, Eterm token, Eterm msg)
{
/*
@@ -887,15 +955,17 @@ do_seq_trace_output(Eterm to, Eterm token, Eterm msg)
else
rp = erts_proc_lookup_raw_inc_refc(to);
- erts_proc_lock(rp, ERTS_PROC_LOCK_MSGQ);
+ if (rp) {
+ erts_proc_lock(rp, ERTS_PROC_LOCK_MSGQ);
- if (!ERTS_PROC_IS_EXITING(rp))
- seq_trace_output(token, msg, SEQ_TRACE_SEND, to, rp);
+ if (!ERTS_PROC_IS_EXITING(rp))
+ seq_trace_output(token, msg, SEQ_TRACE_SEND, to, rp);
- erts_proc_unlock(rp, ERTS_PROC_LOCK_MSGQ);
+ erts_proc_unlock(rp, ERTS_PROC_LOCK_MSGQ);
- if (!is_normal_sched)
- erts_proc_dec_refc(rp);
+ if (!is_normal_sched)
+ erts_proc_dec_refc(rp);
+ }
}
void
@@ -1352,6 +1422,59 @@ erts_proc_sig_send_is_alive_request(Process *c_p, Eterm to, Eterm ref)
}
}
+int
+erts_proc_sig_send_process_info_request(Process *c_p,
+ Eterm to,
+ int *item_ix,
+ int len,
+ int need_msgq_len,
+ int flags,
+ Uint reserve_size,
+ Eterm ref)
+{
+ Uint size = sizeof(ErtsProcessInfoSig) + (len - 1) * sizeof(int);
+ ErtsProcessInfoSig *pis = erts_alloc(ERTS_ALC_T_SIG_DATA, size);
+ int res;
+
+ ASSERT(c_p);
+ ASSERT(item_ix);
+ ASSERT(len > 0);
+ ASSERT(is_internal_ordinary_ref(ref));
+
+ pis->common.tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_PROCESS_INFO,
+ 0, 0);
+
+ if (!need_msgq_len)
+ pis->msgq_len_offset = ERTS_PROC_SIG_PI_MSGQ_LEN_IGNORE;
+ else {
+ pis->msgq_len_offset = ERTS_PROC_SIG_PI_MSGQ_LEN_SYNC;
+ pis->marker.common.next = NULL;
+ pis->marker.common.specific.next = NULL;
+ pis->marker.common.tag = ERTS_PROC_SIG_MSGQ_LEN_OFFS_MARK;
+ pis->marker.refc = 0;
+ pis->marker.delayed_len = 0;
+ pis->marker.len_offset = 0;
+ }
+ pis->requester = c_p->common.id;
+ sys_memcpy((void *) &pis->oref_thing,
+ (void *) internal_ref_val(ref),
+ sizeof(ErtsORefThing));
+ pis->ref = make_internal_ref((char *) &pis->oref_thing);
+ pis->reserve_size = reserve_size;
+ pis->len = len;
+ pis->flags = flags;
+ sys_memcpy((void *) &pis->item_ix[0],
+ (void *) item_ix,
+ sizeof(int)*len);
+ res = proc_queue_signal(c_p, to, (ErtsSignal *) pis,
+ ERTS_SIG_Q_OP_PROCESS_INFO);
+ if (res)
+ (void) maybe_elevate_sig_handling_prio(c_p, to);
+ else
+ erts_free(ERTS_ALC_T_SIG_DATA, pis);
+ return res;
+}
+
static void
is_alive_response(Process *c_p, ErtsMessage *mp, int is_alive)
{
@@ -1552,7 +1675,6 @@ remove_mq_m_sig(Process *c_p, ErtsMessage *sig, ErtsMessage **next_sig, ErtsMess
{
/* Removing message... */
ASSERT(!ERTS_SIG_IS_NON_MSG(sig));
- ASSERT(c_p->sig_qs.len > 0);
c_p->sig_qs.len--;
remove_mq_sig(c_p, sig, next_sig, next_nm_sig);
}
@@ -1562,7 +1684,6 @@ remove_iq_m_sig(Process *c_p, ErtsMessage *sig, ErtsMessage **next_sig)
{
/* Removing message... */
ASSERT(!ERTS_SIG_IS_NON_MSG(sig));
- ASSERT(c_p->sig_qs.len > 0);
c_p->sig_qs.len--;
remove_iq_sig(c_p, sig, next_sig);
}
@@ -2138,6 +2259,258 @@ handle_group_leader(Process *c_p, ErtsSigGroupLeader *sgl)
destroy_sig_group_leader(sgl);
}
+static void
+check_push_msgq_len_offs_marker(Process *rp, ErtsSignal *sig)
+{
+ ErtsProcessInfoSig *pisig = (ErtsProcessInfoSig *) sig;
+
+ ASSERT(ERTS_PROC_SIG_OP(sig->common.tag) == ERTS_SIG_Q_OP_PROCESS_INFO);
+
+ if (pisig->msgq_len_offset == ERTS_PROC_SIG_PI_MSGQ_LEN_SYNC) {
+ ErtsProcSigMsgQLenOffsetMarker *mrkr;
+ Sint len, msgq_len_offset;
+ ErtsMessage *first = rp->sig_inq.first;
+ ASSERT(first);
+ if (((ErtsSignal *) first)->common.tag == ERTS_PROC_SIG_MSGQ_LEN_OFFS_MARK)
+ mrkr = (ErtsProcSigMsgQLenOffsetMarker *) first;
+ else {
+ mrkr = &pisig->marker;
+
+ ASSERT(mrkr->common.tag == ERTS_PROC_SIG_MSGQ_LEN_OFFS_MARK);
+
+ mrkr->common.next = first;
+ ASSERT(rp->sig_inq.last != &rp->sig_inq.first);
+ if (rp->sig_inq.nmsigs.next == &rp->sig_inq.first)
+ rp->sig_inq.nmsigs.next = &mrkr->common.next;
+ if (rp->sig_inq.nmsigs.last == &rp->sig_inq.first)
+ rp->sig_inq.nmsigs.last = &mrkr->common.next;
+ rp->sig_inq.first = (ErtsMessage *) mrkr;
+ }
+
+ len = rp->sig_inq.len;
+ msgq_len_offset = len - mrkr->len_offset;
+
+ mrkr->len_offset = len;
+ mrkr->refc++;
+
+ pisig->msgq_len_offset = msgq_len_offset;
+
+#ifdef DEBUG
+ /* save pointer to used marker... */
+ pisig->marker.common.specific.attachment = (void *) mrkr;
+#endif
+
+ }
+}
+
+static void
+destroy_process_info_request(Process *c_p, ErtsProcessInfoSig *pisig)
+{
+ int dealloc_pisig = !0;
+
+ if (pisig->msgq_len_offset != ERTS_PROC_SIG_PI_MSGQ_LEN_IGNORE) {
+ Sint refc;
+ int dealloc_marker = 0;
+ ErtsProcSigMsgQLenOffsetMarker *marker;
+#ifdef ERTS_PROC_SIG_HARD_DEBUG_SIGQ_MSG_LEN
+ Sint delayed_len;
+#endif
+
+ ASSERT(pisig->msgq_len_offset >= 0);
+
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ);
+ marker = (ErtsProcSigMsgQLenOffsetMarker *) c_p->sig_inq.first;
+ ASSERT(marker);
+ ASSERT(marker->refc > 0);
+ ASSERT(pisig->marker.common.specific.attachment == (void *) marker);
+
+ marker->delayed_len -= pisig->msgq_len_offset;
+#ifdef ERTS_PROC_SIG_HARD_DEBUG_SIGQ_MSG_LEN
+ delayed_len = marker->delayed_len;
+#endif
+
+ refc = --marker->refc;
+ if (refc) {
+ if (marker == &pisig->marker) {
+ /* Another signal using our marker... */
+ dealloc_pisig = 0;
+ }
+ }
+ else {
+ /* Marker unused; remove it... */
+ ASSERT(marker->delayed_len + marker->len_offset == 0);
+#ifdef ERTS_PROC_SIG_HARD_DEBUG_SIGQ_MSG_LEN
+ delayed_len += marker->len_offset;
+#endif
+ if (marker != &pisig->marker)
+ dealloc_marker = !0; /* used another signals marker... */
+ c_p->sig_inq.first = marker->common.next;
+ if (c_p->sig_inq.last == &marker->common.next)
+ c_p->sig_inq.last = &c_p->sig_inq.first;
+ if (c_p->sig_inq.nmsigs.next == &marker->common.next)
+ c_p->sig_inq.nmsigs.next = &c_p->sig_inq.first;
+ if (c_p->sig_inq.nmsigs.last == &marker->common.next)
+ c_p->sig_inq.nmsigs.last = &c_p->sig_inq.first;
+ }
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ);
+
+ if (!refc) {
+ c_p->flags &= ~F_DELAYED_PSIGQS_LEN;
+ /* Adjust msg len of inner+middle queue */
+ ASSERT(marker->len_offset <= 0);
+ c_p->sig_qs.len -= marker->len_offset;
+
+ ASSERT(c_p->sig_qs.len >= 0);
+ }
+
+#ifdef ERTS_PROC_SIG_HARD_DEBUG_SIGQ_MSG_LEN
+ {
+ Sint len = 0;
+ ERTS_FOREACH_SIG_PRIVQS(
+ c_p, mp,
+ {
+ if (ERTS_SIG_IS_MSG(mp))
+ len++;
+ });
+ ERTS_ASSERT(c_p->sig_qs.len + delayed_len == len);
+ }
+#endif
+
+
+ if (dealloc_marker) {
+ ErtsProcessInfoSig *pisig2
+ = (ErtsProcessInfoSig *) (((char *) marker)
+ - offsetof(ErtsProcessInfoSig,
+ marker));
+ erts_free(ERTS_ALC_T_SIG_DATA, pisig2);
+ }
+ }
+
+ if (dealloc_pisig)
+ erts_free(ERTS_ALC_T_SIG_DATA, pisig);
+}
+
+static int
+handle_process_info(Process *c_p, ErtsMessage *sig,
+ ErtsMessage ***next_nm_sig, int is_alive)
+{
+ ErtsProcessInfoSig *pisig = (ErtsProcessInfoSig *) sig;
+ Uint reds = 0;
+ Process *rp;
+
+ if (pisig->msgq_len_offset != ERTS_PROC_SIG_PI_MSGQ_LEN_IGNORE) {
+ /*
+ * Request requires message queue data to be updated
+ * before inspection...
+ */
+
+ ASSERT(pisig->msgq_len_offset >= 0);
+ /*
+ * Update sig_qs.len to reflect the length
+ * of the message queue...
+ */
+ c_p->sig_qs.len += pisig->msgq_len_offset;
+
+ if (is_alive) {
+ /*
+ * Move messages part of message queue into inner
+ * signal queue...
+ */
+ if (*next_nm_sig != &c_p->sig_qs.cont) {
+ *c_p->sig_qs.last = c_p->sig_qs.cont;
+ c_p->sig_qs.last = *next_nm_sig;
+
+ c_p->sig_qs.cont = **next_nm_sig;
+ if (c_p->sig_qs.nmsigs.last == *next_nm_sig)
+ c_p->sig_qs.nmsigs.last = &c_p->sig_qs.cont;
+ *next_nm_sig = &c_p->sig_qs.cont;
+ *c_p->sig_qs.last = NULL;
+ }
+
+ if (!pisig->common.specific.next) {
+ /*
+ * No more signals in middle queue...
+ *
+ * Process-info 'status' needs sig-q
+ * process flag to be updated in order
+ * to show accurate result...
+ */
+ erts_atomic32_read_band_nob(&c_p->state,
+ ~ERTS_PSFLG_SIG_Q);
+ }
+
+#ifdef ERTS_PROC_SIG_HARD_DEBUG_SIGQ_MSG_LEN
+ {
+ Sint len;
+ ErtsMessage *mp;
+ for (mp = c_p->sig_qs.first, len = 0; mp; mp = mp->next) {
+ ERTS_ASSERT(ERTS_SIG_IS_MSG(mp));
+ len++;
+ }
+ ERTS_ASSERT(c_p->sig_qs.len == len);
+ }
+#endif
+ }
+ }
+ if (is_alive)
+ remove_nm_sig(c_p, sig, next_nm_sig);
+
+ rp = erts_proc_lookup(pisig->requester);
+ ASSERT(c_p != rp);
+ if (rp) {
+ Eterm msg, res, ref, *hp;
+ ErtsProcLocks locks = 0;
+ ErtsHeapFactory hfact;
+ ErtsMessage *mp;
+ Uint reserve_size = 3 + sizeof(pisig->oref_thing)/sizeof(Eterm);
+
+ if (!is_alive) {
+ ErlOffHeap *ohp;
+ mp = erts_alloc_message_heap(rp, &locks, reserve_size, &hp, &ohp);
+ res = am_undefined;
+ }
+ else {
+ ErlHeapFragment *hfrag;
+
+ reserve_size += pisig->reserve_size;
+
+ mp = erts_alloc_message(0, NULL);
+ hfrag = new_message_buffer(reserve_size);
+ mp->data.heap_frag = hfrag;
+ erts_factory_selfcontained_message_init(&hfact, mp, &hfrag->mem[0]);
+
+ res = erts_process_info(c_p, &hfact, c_p, ERTS_PROC_LOCK_MAIN,
+ pisig->item_ix, pisig->len,
+ pisig->flags, reserve_size, &reds);
+
+ hp = erts_produce_heap(&hfact,
+ 3 + sizeof(pisig->oref_thing)/sizeof(Eterm),
+ 0);
+ }
+
+ sys_memcpy((void *) hp, (void *) &pisig->oref_thing,
+ sizeof(pisig->oref_thing));
+ ref = make_internal_ref(hp);
+ hp += sizeof(pisig->oref_thing)/sizeof(Eterm);
+
+ msg = TUPLE2(hp, ref, res);
+
+ if (is_alive)
+ erts_factory_trim_and_close(&hfact, &msg, 1);
+
+ erts_queue_message(rp, locks, mp, msg, c_p->common.id);
+
+ if (!is_alive && locks)
+ erts_proc_unlock(rp, locks);
+ }
+
+ destroy_process_info_request(c_p, pisig);
+
+ if (reds > INT_MAX/8)
+ reds = INT_MAX/8;
+
+ return ((int) reds)*4 + 8;
+}
/*
* Called in order to handle incoming signals.
@@ -2153,7 +2526,7 @@ erts_proc_sig_handle_incoming(Process *c_p, erts_aint32_t *statep,
ErtsMessage *sig, ***next_nm_sig;
ErtsSigRecvTracing tracing;
- ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(c_p);
+ ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(c_p, 0);
ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p));
if (local_only)
@@ -2502,6 +2875,12 @@ erts_proc_sig_handle_incoming(Process *c_p, erts_aint32_t *statep,
ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
break;
+ case ERTS_SIG_Q_OP_PROCESS_INFO:
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ handle_process_info(c_p, sig, next_nm_sig, !0);
+ ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ break;
+
case ERTS_SIG_Q_OP_TRACE_CHANGE_STATE: {
Uint16 type = ERTS_PROC_SIG_TYPE(tag);
@@ -2655,7 +3034,7 @@ stop: {
c_p->sig_qs.save = c_p->sig_qs.saved_last;
}
- ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(c_p);
+ ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(c_p, 0);
*redsp = cnt/4 + 1;
@@ -2707,8 +3086,8 @@ erts_proc_sig_handle_exit(Process *c_p, int *redsp)
int cnt, limit;
ErtsMessage *sig, ***next_nm_sig;
- ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(c_p);
- ERTS_LC_ASSERT(!erts_proc_lc_my_proc_locks(c_p));
+ ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(c_p, 0);
+ ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCK_MAIN);
ASSERT(!(ERTS_PSFLG_SIG_IN_Q & erts_atomic32_read_nob(&c_p->state)));
@@ -2815,9 +3194,11 @@ erts_proc_sig_handle_exit(Process *c_p, int *redsp)
}
case ERTS_SIG_Q_OP_IS_ALIVE:
- ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
is_alive_response(c_p, sig, 0);
- ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
+ break;
+
+ case ERTS_SIG_Q_OP_PROCESS_INFO:
+ handle_process_info(c_p, sig, next_nm_sig, 0);
break;
case ERTS_SIG_Q_OP_TRACE_CHANGE_STATE:
@@ -3003,6 +3384,13 @@ erts_proc_sig_signal_size(ErtsSignal *sig)
size = sizeof(ErtsSigTraceInfo);
break;
+ case ERTS_SIG_Q_OP_PROCESS_INFO: {
+ ErtsProcessInfoSig *pisig = (ErtsProcessInfoSig *) sig;
+ size = sizeof(ErtsProcessInfoSig);
+ size += (pisig->len - 1) * sizeof(int);
+ break;
+ }
+
default:
ERTS_INTERNAL_ERROR("Unknown signal");
break;
@@ -3058,8 +3446,7 @@ erts_proc_sig_receive_helper(Process *c_p,
consumed_reds += 4;
left_reds -= 4;
erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ);
- if (c_p->sig_inq.first)
- erts_proc_sig_fetch(c_p);
+ erts_proc_sig_fetch(c_p);
/*
* Messages may have been moved directly to
* inner queue...
@@ -3220,6 +3607,7 @@ handle_message_enqueued_tracing(Process *c_p,
Sint tok_label = 0;
Sint tok_lastcnt = 0;
Sint tok_serial = 0;
+ Sint len = erts_proc_sig_privqs_len(c_p);
Eterm seq_trace_token = ERL_MESSAGE_TOKEN(msg);
if (seq_trace_token != NIL && is_tuple(seq_trace_token)) {
@@ -3231,7 +3619,7 @@ handle_message_enqueued_tracing(Process *c_p,
DTRACE6(message_queued,
tracing->messages.receiver_name,
size_object(ERL_MESSAGE_TERM(msg)),
- c_p->sig_qs.len,
+ len, /* This is NOT message queue len, but its something... */
tok_label, tok_lastcnt, tok_serial);
}
#endif
@@ -3303,24 +3691,11 @@ handle_msg_tracing(Process *c_p, ErtsSigRecvTracing *tracing,
return 0;
}
-/*
- * ERTS_INSPECT_MSGQ_KEEP_OH_MSGS == 0 will move off heap messages
- * into the heap of the inspected process if off_heap_message_queue
- * is false when process_info(_, messages) is called. That is, the
- * following GC will have more data in the rootset compared to the
- * scenario when process_info(_, messages) had not been called.
- *
- * ERTS_INSPECT_MSGQ_KEEP_OH_MSGS != 0 will keep off heap messages
- * off heap when process_info(_, messages) is called regardless of
- * the off_heap_message_queue setting of the process. That is, it
- * will change the following execution of the process as little as
- * possible.
- */
-#define ERTS_INSPECT_MSGQ_KEEP_OH_MSGS 1
-
Uint
-erts_proc_sig_prep_msgq_for_inspection(Process *c_p, Process *rp,
+erts_proc_sig_prep_msgq_for_inspection(Process *c_p,
+ Process *rp,
ErtsProcLocks rp_locks,
+ int info_on_self,
ErtsMessageInfo *mip)
{
Uint tot_heap_size;
@@ -3329,9 +3704,8 @@ erts_proc_sig_prep_msgq_for_inspection(Process *c_p, Process *rp,
int self_on_heap;
/*
- * Prepare the message queue for inspection
- * by process_info().
- *
+ * Prepare the message queue (inner signal queue)
+ * for inspection by process_info().
*
* - Decode all messages on external format
* - Remove all corrupt dist messages from queue
@@ -3340,20 +3714,11 @@ erts_proc_sig_prep_msgq_for_inspection(Process *c_p, Process *rp,
* - Return total heap size need for all messages
* that needs to be copied.
*
- * If ERTS_INSPECT_MSGQ_KEEP_OH_MSGS == 0:
- * - In case off heap messages is disabled and
- * we are inspecting our own queue, move all
- * off heap data into the heap.
*/
- /*
- * All non-message signals *need* to have been
- * handled before calling this functions...
- */
- ASSERT(!rp->sig_qs.cont);
- ASSERT(!rp->sig_qs.nmsigs.next && !rp->sig_qs.nmsigs.last);
+ ASSERT(!info_on_self || c_p == rp);
- self_on_heap = c_p == rp && !(c_p->flags & F_OFF_HEAP_MSGQ);
+ self_on_heap = info_on_self && !(c_p->flags & F_OFF_HEAP_MSGQ);
tot_heap_size = 0;
i = 0;
@@ -3367,8 +3732,7 @@ erts_proc_sig_prep_msgq_for_inspection(Process *c_p, Process *rp,
if (ERTS_SIG_IS_EXTERNAL_MSG(mp)) {
/* decode it... */
if (mp->data.attached)
- erts_decode_dist_message(rp, rp_locks, mp,
- ERTS_INSPECT_MSGQ_KEEP_OH_MSGS);
+ erts_decode_dist_message(rp, rp_locks, mp, !0);
msg = ERL_MESSAGE_TERM(mp);
@@ -3394,44 +3758,11 @@ erts_proc_sig_prep_msgq_for_inspection(Process *c_p, Process *rp,
ASSERT(is_value(msg));
-#if ERTS_INSPECT_MSGQ_KEEP_OH_MSGS
if (is_not_immed(msg) && (!self_on_heap || mp->data.attached)) {
Uint sz = size_object(msg);
mip[i].size = sz;
tot_heap_size += sz;
}
-#else
- if (self_on_heap) {
- if (mp->data.attached) {
- ErtsMessage *tmp = NULL;
- if (mp->data.attached != ERTS_MSG_COMBINED_HFRAG) {
- erts_link_mbuf_to_proc(rp, mp->data.heap_frag);
- mp->data.attached = NULL;
- }
- else {
- /*
- * Need to replace the message reference since
- * we will get references to the message data
- * from the heap...
- */
- ErtsMessage **mpp;
- tmp = erts_alloc_message(0, NULL);
- sys_memcpy((void *) tmp->m, (void *) mp->m,
- sizeof(Eterm)*ERL_MESSAGE_REF_ARRAY_SZ);
- mpp = i == 0 ? &rp->sig_qs.first : &mip[i-1].msgp->next;
- erts_msgq_replace_msg_ref(&rp->msg, tmp, mpp);
- erts_save_message_in_proc(rp, mp);
- mp = tmp;
- }
- }
- }
- else if (is_not_immed(msg)) {
- Uint sz = size_object(msg);
- mip[i].size = sz;
- tot_heap_size += sz;
- }
-
-#endif
mip[i].msgp = mp;
i++;
@@ -3439,6 +3770,8 @@ erts_proc_sig_prep_msgq_for_inspection(Process *c_p, Process *rp,
mp = mp->next;
}
+ ASSERT(c_p->sig_qs.len == i);
+
return tot_heap_size;
}
@@ -3475,11 +3808,11 @@ move_msg_to_heap(Process *c_p, ErtsMessage *mp)
void
erts_proc_sig_move_msgs_to_heap(Process *c_p)
{
- ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(c_p);
+ ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(c_p, 0);
ERTS_FOREACH_SIG_PRIVQS(c_p, sig, move_msg_to_heap(c_p, sig));
- ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(c_p);
+ ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(c_p, 0);
}
@@ -3670,6 +4003,7 @@ erts_proc_sig_debug_foreach_sig(Process *c_p,
case ERTS_SIG_Q_OP_IS_ALIVE:
case ERTS_SIG_Q_OP_TRACE_CHANGE_STATE:
+ case ERTS_SIG_Q_OP_PROCESS_INFO:
break;
default:
@@ -3714,10 +4048,8 @@ chk_eterm(Process *c_p, int privq, ErtsMessage *mp, Eterm term)
for (bp = erts_message_to_heap_frag(mp); bp; bp = bp->next) {
if (bp->mem <= ptr && ptr < bp->mem + bp->used_size)
return;
- bp = bp->next;
}
- ERTS_ASSERT(privq);
ASSERT(erts_dbg_within_proc(ptr, c_p, NULL));
}
@@ -3740,6 +4072,12 @@ proc_sig_hdbg_check_queue(Process *proc,
ErtsMessage **save = proc->sig_qs.save;
ErtsMessage **saved_last = proc->sig_qs.saved_last;
+ if (!privq) {
+ ErtsSignal *sig = (ErtsSignal *) *sig_next;
+ if (sig->common.tag == ERTS_PROC_SIG_MSGQ_LEN_OFFS_MARK) {
+
+ }
+ }
nm_next = sig_nm_next;
nm_last = sig_nm_last;
@@ -3804,21 +4142,29 @@ proc_sig_hdbg_check_queue(Process *proc,
if (!sig)
break;
- nm_sigs++;
+ nm_sig = (ErtsSignal *) sig;
- ERTS_ASSERT(!last_nm_sig_found);
- ERTS_ASSERT(ERTS_SIG_IS_NON_MSG(sig));
+ if (nm_sig->common.tag == ERTS_PROC_SIG_MSGQ_LEN_OFFS_MARK) {
+ ERTS_ASSERT(!privq);
+ ERTS_ASSERT(sig == *sig_next);
+ }
+ else {
+ nm_sigs++;
- nm_sig = (ErtsSignal *) sig;
+ ERTS_ASSERT(!last_nm_sig_found);
+ ERTS_ASSERT(ERTS_SIG_IS_NON_MSG(sig));
+
+ ERTS_ASSERT(nm_next == next);
+
+ if (nm_last == next) {
+ ASSERT(!nm_sig->common.specific.next);
+ last_nm_sig_found = 1;
+ }
- ERTS_ASSERT(nm_next == next);
+ nm_next = nm_sig->common.specific.next;
- if (nm_last == next) {
- ASSERT(!nm_sig->common.specific.next);
- last_nm_sig_found = 1;
}
- nm_next = nm_sig->common.specific.next;
next = &nm_sig->common.next;
sig = nm_sig->common.next;
@@ -3873,10 +4219,10 @@ proc_sig_hdbg_check_queue(Process *proc,
}
void
-erts_proc_sig_hdbg_check_priv_queue(Process *p, char *what, char *file, int line)
+erts_proc_sig_hdbg_check_priv_queue(Process *p, int qlock, char *what, char *file, int line)
{
int found_saved_last = 0;
- Sint len1, len2;
+ Sint len, len1, len2;
ERTS_LC_ASSERT(erts_thr_progress_is_blocking()
|| ERTS_PROC_IS_EXITING(p)
|| (ERTS_PROC_LOCK_MAIN
@@ -3901,7 +4247,8 @@ erts_proc_sig_hdbg_check_priv_queue(Process *p, char *what, char *file, int line
ERTS_PSFLG_SIG_Q);
if (p->sig_qs.saved_last)
ERTS_ASSERT(found_saved_last);
- ERTS_ASSERT(p->sig_qs.len == len1 + len2);
+ len = proc_sig_privqs_len(p, qlock);
+ ERTS_ASSERT(len == len1 + len2);
}
void
diff --git a/erts/emulator/beam/erl_proc_sig_queue.h b/erts/emulator/beam/erl_proc_sig_queue.h
index 56fe3e683e..d250ad820f 100644
--- a/erts/emulator/beam/erl_proc_sig_queue.h
+++ b/erts/emulator/beam/erl_proc_sig_queue.h
@@ -32,6 +32,7 @@
* - Unlink
* - Group leader
* - Is process alive
+ * - Process info request
* - Trace change
*
* The signal queue consists of three parts:
@@ -78,6 +79,9 @@
#if 0
# define ERTS_PROC_SIG_HARD_DEBUG
#endif
+#if 0
+# define ERTS_PROC_SIG_HARD_DEBUG_SIGQ_MSG_LEN
+#endif
struct erl_mesg;
@@ -95,22 +99,22 @@ typedef struct {
#ifdef ERTS_PROC_SIG_HARD_DEBUG
# define ERTS_HDBG_CHECK_SIGNAL_IN_QUEUE(P) \
ERTS_HDBG_CHECK_SIGNAL_IN_QUEUE__((P), "")
-# define ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(P) \
- ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE__((P), "")
+# define ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(P, QL) \
+ ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE__((P), (QL), "")
# define ERTS_HDBG_CHECK_SIGNAL_IN_QUEUE__(P, What) \
erts_proc_sig_hdbg_check_in_queue((P), (What), __FILE__, __LINE__)
-# define ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE__(P, What) \
- erts_proc_sig_hdbg_check_priv_queue((P), (What), __FILE__, __LINE__)
+# define ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE__(P, QL, What) \
+ erts_proc_sig_hdbg_check_priv_queue((P), (QL), (What), __FILE__, __LINE__)
struct process;
-void erts_proc_sig_hdbg_check_priv_queue(struct process *c_p, char *what,
- char *file, int line);
+void erts_proc_sig_hdbg_check_priv_queue(struct process *c_p, int qlock,
+ char *what, char *file, int line);
void erts_proc_sig_hdbg_check_in_queue(struct process *c_p, char *what,
char *file, int line);
#else
# define ERTS_HDBG_CHECK_SIGNAL_IN_QUEUE(P)
-# define ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(P)
+# define ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(P, QL)
# define ERTS_HDBG_CHECK_SIGNAL_IN_QUEUE__(P, What)
-#define ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE__(P, What)
+#define ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE__(P, QL, What)
#endif
#endif
@@ -118,6 +122,57 @@ void erts_proc_sig_hdbg_check_in_queue(struct process *c_p, char *what,
#if !defined(ERTS_PROC_SIG_QUEUE_H__) && !defined(ERTS_PROC_SIG_QUEUE_TYPE_ONLY)
#define ERTS_PROC_SIG_QUEUE_H__
+#define ERTS_SIG_Q_OP_BITS 8
+#define ERTS_SIG_Q_OP_SHIFT 0
+#define ERTS_SIG_Q_OP_MASK ((1 << ERTS_SIG_Q_OP_BITS) - 1)
+
+#define ERTS_SIG_Q_TYPE_BITS 8
+#define ERTS_SIG_Q_TYPE_SHIFT ERTS_SIG_Q_OP_BITS
+#define ERTS_SIG_Q_TYPE_MASK ((1 << ERTS_SIG_Q_TYPE_BITS) - 1)
+
+#define ERTS_SIG_Q_NON_X_BITS__ (_HEADER_ARITY_OFFS \
+ + ERTS_SIG_Q_OP_BITS \
+ + ERTS_SIG_Q_TYPE_BITS)
+
+#define ERTS_SIG_Q_XTRA_BITS (32 - ERTS_SIG_Q_NON_X_BITS__)
+#define ERTS_SIG_Q_XTRA_SHIFT (ERTS_SIG_Q_OP_BITS \
+ + ERTS_SIG_Q_TYPE_BITS)
+#define ERTS_SIG_Q_XTRA_MASK ((1 << ERTS_SIG_Q_XTRA_BITS) - 1)
+
+
+#define ERTS_PROC_SIG_OP(Tag) \
+ ((int) (_unchecked_thing_arityval((Tag)) \
+ >> ERTS_SIG_Q_OP_SHIFT) & ERTS_SIG_Q_OP_MASK)
+
+#define ERTS_PROC_SIG_TYPE(Tag) \
+ ((Uint16) (_unchecked_thing_arityval((Tag)) \
+ >> ERTS_SIG_Q_TYPE_SHIFT) & ERTS_SIG_Q_TYPE_MASK)
+
+#define ERTS_PROC_SIG_XTRA(Tag) \
+ ((Uint32) (_unchecked_thing_arityval((Tag)) \
+ >> ERTS_SIG_Q_XTRA_SHIFT) & ERTS_SIG_Q_XTRA_MASK)
+
+#define ERTS_PROC_SIG_MAKE_TAG(Op, Type, Xtra) \
+ (ASSERT(0 <= (Xtra) && (Xtra) <= ERTS_SIG_Q_XTRA_MASK), \
+ _make_header((((Type) & ERTS_SIG_Q_TYPE_MASK) \
+ << ERTS_SIG_Q_TYPE_SHIFT) \
+ | (((Op) & ERTS_SIG_Q_OP_MASK) \
+ << ERTS_SIG_Q_OP_SHIFT) \
+ | (((Xtra) & ERTS_SIG_Q_XTRA_MASK) \
+ << ERTS_SIG_Q_XTRA_SHIFT), \
+ _TAG_HEADER_EXTERNAL_PID))
+
+
+/*
+ * ERTS_SIG_Q_OP_MSGQ_LEN_OFFS_MARK is not an actual
+ * operation. We keep it at the top of the OP range,
+ * larger than ERTS_SIG_Q_OP_MAX.
+ */
+#define ERTS_SIG_Q_OP_MSGQ_LEN_OFFS_MARK ERTS_SIG_Q_OP_MASK
+
+#define ERTS_PROC_SIG_MSGQ_LEN_OFFS_MARK \
+ ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_MSGQ_LEN_OFFS_MARK,0,0)
+
struct dist_entry_;
/*
@@ -451,6 +506,58 @@ void
erts_proc_sig_send_is_alive_request(Process *c_p, Eterm to,
Eterm ref);
+/**
+ *
+ * @brief Send a 'process info request' signal to a process.
+ *
+ * A response message '{Ref, Result}' is sent to the
+ * sender when performed where Ref is the reference passed
+ * as 'ref' argument, and Result corresponds to return result
+ * from erlang:process_info/[1,2].
+ *
+ * @param[in] c_p Pointer to process struct of
+ * currently executing process.
+ * NULL if signal arrived via
+ * distribution.
+ *
+ * @param[in] to Identifier of receiver.
+ *
+ * @param[in] item_ix Info index array to pass to
+ * erts_process_info()
+ *
+ * @param[in] len Lenght of info index array
+ *
+ * @param[in] need_msgq_len Non-zero if message queue
+ * length is needed; otherwise,
+ * zero. If non-zero, sig_qs.len
+ * will be set to correspond
+ * to the message queue length
+ * before call to
+ * erts_process_info()
+ *
+ * @param[in] flags Flags to pass to
+ * erts_process_info()
+ *
+ * @param[in] reserve_size Heap size that is known to
+ * be needed. May not be correct
+ * though.
+ *
+ * @param[in] ref Reference to use in response
+ * message to the sending
+ * process (i.e., c_p).
+ *
+ */
+int
+erts_proc_sig_send_process_info_request(Process *c_p,
+ Eterm to,
+ int *item_ix,
+ int len,
+ int need_msgq_len,
+ int flags,
+ Uint reserve_size,
+ Eterm ref);
+
+
/*
* End of send operations of currently supported process signals.
*/
@@ -601,9 +708,30 @@ erts_proc_sig_receive_helper(Process *c_p, int fcalls,
*
* @param[in] c_p Pointer to process struct of
* currently executing process.
+ * @returns Amount of message signals in
+ * inner plus middle signal
+ * queues after fetch completed
+ * (NOT the message queue
+ * length).
+ */
+ERTS_GLB_INLINE Sint erts_proc_sig_fetch(Process *p);
+
+/**
+ *
+ * @brief Get amount of messages in private queues
+ *
+ * @param[in] c_p Pointer to process struct of
+ * currently executing process.
*
+ * @returns Amount of message signals in
+ * inner plus middle signal
+ * queues after fetch completed
+ * (NOT the message queue
+ * length).
*/
-void erts_proc_sig_fetch(Process *p);
+Sint
+erts_proc_sig_privqs_len(Process *c_p);
+
typedef struct {
Uint size;
@@ -623,17 +751,19 @@ typedef struct {
*
* @param[in] rp_locks Process locks held on 'rp'.
*
+ * @param[in] info_on_self Integer set to non-zero value
+ * if caller is inspecting itself;
+ * otherwise, zero.
+ *
* @param[in] mip Pointer to array of
* ErtsMessageInfo structures.
- *
*/
-
Uint erts_proc_sig_prep_msgq_for_inspection(Process *c_p,
Process *rp,
ErtsProcLocks rp_locks,
+ int info_on_self,
ErtsMessageInfo *mip);
-
/**
*
* @brief Move message data of messages in private queues to heap
@@ -687,4 +817,56 @@ extern Process *erts_dirty_process_signal_handler;
extern Process *erts_dirty_process_signal_handler_high;
extern Process *erts_dirty_process_signal_handler_max;
+void erts_proc_sig_fetch__(Process *proc);
+Sint erts_proc_sig_fetch_msgq_len_offs__(Process *proc);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+
+ERTS_GLB_INLINE Sint
+erts_proc_sig_fetch(Process *proc)
+{
+ Sint res = 0;
+ ErtsSignal *sig;
+
+ ERTS_LC_ASSERT(erts_thr_progress_is_blocking()
+ || ERTS_PROC_IS_EXITING(proc)
+ || ((erts_proc_lc_my_proc_locks(proc)
+ & (ERTS_PROC_LOCK_MAIN
+ | ERTS_PROC_LOCK_MSGQ))
+ == (ERTS_PROC_LOCK_MAIN
+ | ERTS_PROC_LOCK_MSGQ)));
+
+ ERTS_HDBG_CHECK_SIGNAL_IN_QUEUE(proc);
+ ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(proc, !0);
+
+ sig = (ErtsSignal *) proc->sig_inq.first;
+ if (sig) {
+ if (ERTS_LIKELY(sig->common.tag != ERTS_PROC_SIG_MSGQ_LEN_OFFS_MARK))
+ erts_proc_sig_fetch__(proc);
+ else
+ res = erts_proc_sig_fetch_msgq_len_offs__(proc);
+ }
+
+ res += proc->sig_qs.len;
+
+ ERTS_HDBG_CHECK_SIGNAL_PRIV_QUEUE(proc, !0);
+
+#ifdef ERTS_PROC_SIG_HARD_DEBUG_SIGQ_MSG_LEN
+ {
+ Sint len = 0;
+ ERTS_FOREACH_SIG_PRIVQS(
+ proc, mp,
+ {
+ if (ERTS_SIG_IS_MSG(mp))
+ len++;
+ });
+ ERTS_ASSERT(res == len);
+ }
+#endif
+
+ return res;
+}
+
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
+
#endif /* ERTS_PROC_SIG_QUEUE_H__ */
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index be1306cd79..71541786d0 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -10785,7 +10785,6 @@ exit_permanent_prio_elevation(Process *c_p, erts_aint32_t state)
while (1) {
erts_aint32_t aprio, uprio, n, e;
ASSERT(a & ERTS_PSFLG_EXITING);
- ASSERT(!(a & ERTS_PSFLG_FREE));
aprio = ERTS_PSFLGS_GET_ACT_PRIO(a);
uprio = ERTS_PSFLGS_GET_USR_PRIO(a);
if (aprio >= uprio)
@@ -10831,8 +10830,7 @@ erts_execute_dirty_system_task(Process *c_p)
if (c_p->flags & F_DIRTY_GC_HIBERNATE) {
erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
- erts_proc_sig_fetch(c_p);
- if (c_p->sig_qs.len)
+ if (erts_proc_sig_fetch(c_p))
c_p->flags &= ~F_DIRTY_GC_HIBERNATE; /* operation aborted... */
else {
erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ|ERTS_PROC_LOCK_STATUS);
@@ -12412,12 +12410,14 @@ erts_proc_exit_handle_monitor(ErtsMonitor *mon, void *vctxt)
case ERTS_MON_TYPE_PORT: {
Port *prt;
ASSERT(is_internal_port(mon->other.item));
+ erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
prt = erts_id2port(mon->other.item);
if (prt) {
erts_fire_port_monitor(prt, mon);
erts_port_release(prt);
mon = NULL;
}
+ erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
break;
}
case ERTS_MON_TYPE_RESOURCE:
@@ -12496,10 +12496,8 @@ erts_proc_exit_handle_monitor(ErtsMonitor *mon, void *vctxt)
ASSERT(is_internal_port(mon->other.item));
prt = erts_port_lookup_raw(mon->other.item);
if (prt) {
- erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
if (erts_port_demonitor(c_p, prt, mon) != ERTS_PORT_OP_DROPPED)
mon = NULL;
- erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
}
break;
}
@@ -12602,7 +12600,6 @@ erts_proc_exit_handle_link(ErtsLink *lnk, void *vctxt)
if (!erts_link_dist_delete(dlnk))
ldp = NULL;
- erts_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
code = erts_dsig_prepare(&dsd, dep, c_p, 0, ERTS_DSP_NO_LOCK, 0, 0);
switch (code) {
case ERTS_DSIG_PREP_CONNECTED:
@@ -12616,7 +12613,6 @@ erts_proc_exit_handle_link(ErtsLink *lnk, void *vctxt)
ASSERT(code == ERTS_DSIG_SEND_OK);
}
}
- erts_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
break;
}
default:
@@ -12678,8 +12674,6 @@ erts_do_exit_process(Process* p, Eterm reason)
cancel_suspend_of_suspendee(p, ERTS_PROC_LOCKS_ALL);
- erts_proc_sig_fetch(p);
-
if (IS_TRACED(p)) {
if (IS_TRACED_FL(p, F_TRACE_CALLS))
erts_schedule_time_break(p, ERTS_BP_CALL_TIME_SCHEDULE_EXITING);
@@ -12946,6 +12940,12 @@ erts_continue_exit_process(Process *p)
pectxt.c_p = p;
pectxt.reason = reason;
+ erts_proc_lock(p, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_MSGQ);
+
+ erts_proc_sig_fetch(p);
+
+ erts_proc_unlock(p, ERTS_PROC_LOCK_MSGQ);
+
if (links) {
erts_link_tree_foreach_delete(&links,
erts_proc_exit_handle_link,
@@ -12967,7 +12967,6 @@ erts_continue_exit_process(Process *p)
ASSERT(!lt_monitors);
}
- erts_proc_sig_fetch(p);
/*
* erts_proc_sig_handle_exit() implements yielding.
* However, this function cannot handle it yet... loop
@@ -12979,7 +12978,6 @@ erts_continue_exit_process(Process *p)
break;
}
- erts_proc_lock(p, ERTS_PROC_LOCK_MAIN);
ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
erts_flush_trace_messages(p, ERTS_PROC_LOCK_MAIN);
@@ -13016,27 +13014,41 @@ erts_continue_exit_process(Process *p)
}
Process *
-erts_try_lock_sig_free_proc(Eterm pid, ErtsProcLocks locks)
+erts_try_lock_sig_free_proc(Eterm pid, ErtsProcLocks locks,
+ erts_aint32_t *statep)
{
Process *rp = erts_proc_lookup_raw(pid);
erts_aint32_t state;
- if (!rp)
+ if (!rp) {
+ if (statep)
+ *statep = ERTS_PSFLG_EXITING|ERTS_PSFLG_FREE;
return NULL;
+ }
ERTS_LC_ASSERT(!erts_proc_lc_my_proc_locks(rp));
state = erts_atomic32_read_nob(&rp->state);
- if (state & ERTS_PSFLG_EXITING)
+ if (statep)
+ *statep = state;
+
+ if (state & ERTS_PSFLG_FREE)
return NULL;
if (state & (ERTS_PSFLG_SIG_IN_Q|ERTS_PSFLG_SIG_Q))
return ERTS_PROC_LOCK_BUSY;
+
+ if (!locks)
+ return rp;
+
if (erts_proc_trylock(rp, locks) == EBUSY)
return ERTS_PROC_LOCK_BUSY;
state = erts_atomic32_read_nob(&rp->state);
- if (state & ERTS_PSFLG_EXITING) {
+ if (statep)
+ *statep = state;
+
+ if (state & ERTS_PSFLG_FREE) {
erts_proc_unlock(rp, locks);
return NULL;
}
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index f7c6c3435b..e232776016 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -1391,6 +1391,7 @@ extern int erts_system_profile_ts_type;
#define F_LOCAL_SIGS_ONLY (1 << 26)
#define F_TRAP_EXIT (1 << 27) /* Trapping exit */
#define F_DEFERRED_SAVED_LAST (1 << 28)
+#define F_DELAYED_PSIGQS_LEN (1 << 29)
/*
* F_DISABLE_GC and F_DELAY_GC are similar. Both will prevent
@@ -1817,6 +1818,10 @@ void erts_print_scheduler_info(fmtfn_t to, void *to_arg, ErtsSchedulerData *esdp
void erts_print_run_queue_info(fmtfn_t, void *to_arg, ErtsRunQueue*);
void erts_dump_extended_process_state(fmtfn_t to, void *to_arg, erts_aint32_t psflg);
void erts_dump_process_state(fmtfn_t to, void *to_arg, erts_aint32_t psflg);
+Eterm erts_process_info(Process *c_p, ErtsHeapFactory *hfact,
+ Process *rp, ErtsProcLocks rp_locks,
+ int *item_ix, int item_ix_len,
+ int flags, Uint reserve_size, Uint *reds);
typedef struct {
Process *c_p;
@@ -1855,7 +1860,7 @@ Uint erts_debug_nbalance(void);
int erts_debug_wait_completed(Process *c_p, int flags);
-Uint erts_process_memory(Process *c_p, int incl_msg_inq);
+Uint erts_process_memory(Process *c_p, int include_sigs_in_transit);
#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC
# define ERTS_VERIFY_UNUSED_TEMP_ALLOC(P) \
@@ -2595,7 +2600,8 @@ ERTS_TIME2REDS_IMPL__(ErtsMonotonicTime start, ErtsMonotonicTime end)
#endif
Process *erts_try_lock_sig_free_proc(Eterm pid,
- ErtsProcLocks locks);
+ ErtsProcLocks locks,
+ erts_aint32_t *statep);
Process *erts_pid2proc_not_running(Process *,
ErtsProcLocks,
diff --git a/erts/emulator/beam/erl_process_dict.c b/erts/emulator/beam/erl_process_dict.c
index aee88841ae..38be3938cd 100644
--- a/erts/emulator/beam/erl_process_dict.c
+++ b/erts/emulator/beam/erl_process_dict.c
@@ -239,39 +239,70 @@ erts_erase_dicts(Process *p)
/*
* Called from process_info/1,2.
*/
-Eterm erts_dictionary_copy(Process *p, ProcDict *pd)
+Eterm erts_dictionary_copy(ErtsHeapFactory *hfact, ProcDict *pd, Uint reserve_size)
{
- Eterm* hp;
- Eterm* heap_start;
- Eterm res = NIL;
- Eterm tmp, tmp2;
+ Eterm res;
unsigned int i, num;
+ Uint *sz;
+ Uint szi, rsz = reserve_size;
- if (pd == NULL) {
- return res;
- }
+ if (pd == NULL)
+ return NIL;
PD_CHECK(pd);
num = HASH_RANGE(pd);
- heap_start = hp = (Eterm *) erts_alloc(ERTS_ALC_T_TMP,
- sizeof(Eterm) * pd->numElements * 2);
- for (i = 0; i < num; ++i) {
- tmp = ARRAY_GET(pd, i);
+ sz = (Uint *) erts_alloc(ERTS_ALC_T_TMP, sizeof(Uint) * pd->numElements);
+
+ for (i = 0, szi = 0; i < num; ++i) {
+ Eterm tmp = ARRAY_GET(pd, i);
if (is_boxed(tmp)) {
+ Uint size;
ASSERT(is_tuple(tmp));
- res = CONS(hp, tmp, res);
- hp += 2;
- } else if (is_list(tmp)) {
+ size = size_object(tmp) + 2;
+ sz[szi++] = size;
+ rsz += size;
+ }
+ else if (is_list(tmp)) {
while (tmp != NIL) {
- tmp2 = TCAR(tmp);
- res = CONS(hp, tmp2, res);
- hp += 2;
+ Uint size = size_object(TCAR(tmp)) + 2;
+ sz[szi++] = size;
+ rsz += size;
+ tmp = TCDR(tmp);
+ }
+ }
+ }
+
+ res = NIL;
+
+ for (i = 0, szi = 0; i < num; ++i) {
+ Eterm tmp = ARRAY_GET(pd, i);
+ if (is_boxed(tmp)) {
+ Uint size;
+ Eterm el, *hp;
+ ASSERT(is_tuple(tmp));
+ size = sz[szi++];
+ rsz -= size;
+ hp = erts_produce_heap(hfact, size, rsz);
+ el = copy_struct(tmp, size-2, &hp, hfact->off_heap);
+ res = CONS(hp, el, res);
+ }
+ else if (is_list(tmp)) {
+ while (tmp != NIL) {
+ Uint size = sz[szi++];
+ Eterm el, *hp;
+ rsz -= size;
+ hp = erts_produce_heap(hfact, size, rsz);
+ el = copy_struct(TCAR(tmp), size-2, &hp, hfact->off_heap);
+ res = CONS(hp, el, res);
tmp = TCDR(tmp);
}
}
}
- res = copy_object(res, p);
- erts_free(ERTS_ALC_T_TMP, (void *) heap_start);
+
+ ASSERT(rsz == reserve_size);
+
+ erts_free(ERTS_ALC_T_TMP, sz);
+
return res;
}
diff --git a/erts/emulator/beam/erl_process_dict.h b/erts/emulator/beam/erl_process_dict.h
index ab58f3c239..b89b387f5a 100644
--- a/erts/emulator/beam/erl_process_dict.h
+++ b/erts/emulator/beam/erl_process_dict.h
@@ -40,7 +40,7 @@ void erts_erase_dicts(struct process *p);
void erts_dictionary_dump(fmtfn_t to, void *to_arg, ProcDict *pd);
void erts_deep_dictionary_dump(fmtfn_t to, void *to_arg,
ProcDict* pd, void (*cb)(fmtfn_t, void *, Eterm obj));
-Eterm erts_dictionary_copy(struct process *p, ProcDict *pd);
+Eterm erts_dictionary_copy(ErtsHeapFactory *hfact, ProcDict *pd, Uint reserve_size);
Eterm erts_pd_hash_get(struct process *p, Eterm id);
Uint32 erts_pd_make_hx(Eterm key);
diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c
index 00659f9f49..243db4c734 100644
--- a/erts/emulator/beam/erl_process_dump.c
+++ b/erts/emulator/beam/erl_process_dump.c
@@ -109,56 +109,77 @@ link_size(ErtsMonitor *lnk, void *vsize)
*((Uint *) vsize) += erts_link_size(lnk);
}
-Uint erts_process_memory(Process *p, int incl_msg_inq) {
- Uint size = 0;
- struct saved_calls *scb;
- size += sizeof(Process);
-
- if (incl_msg_inq) {
- erts_proc_lock(p, ERTS_PROC_LOCK_MSGQ);
- erts_proc_sig_fetch(p);
- erts_proc_unlock(p, ERTS_PROC_LOCK_MSGQ);
- }
-
- erts_link_tree_foreach(ERTS_P_LINKS(p),
- link_size, (void *) &size);
- erts_monitor_tree_foreach(ERTS_P_MONITORS(p),
- monitor_size, (void *) &size);
- erts_monitor_list_foreach(ERTS_P_LT_MONITORS(p),
- monitor_size, (void *) &size);
- size += (p->heap_sz + p->mbuf_sz) * sizeof(Eterm);
- if (p->abandoned_heap)
- size += (p->hend - p->heap) * sizeof(Eterm);
- if (p->old_hend && p->old_heap)
- size += (p->old_hend - p->old_heap) * sizeof(Eterm);
-
-
- size += p->sig_qs.len * sizeof(ErtsMessage);
-
- ERTS_FOREACH_SIG_PRIVQS(
- p, mp,
- {
- if (ERTS_SIG_IS_NON_MSG((ErtsSignal *) mp))
- size += erts_proc_sig_signal_size((ErtsSignal *) mp);
- else if (mp->data.attached)
- size += erts_msg_attached_data_size(mp) * sizeof(Eterm);
- });
-
- if (p->arg_reg != p->def_arg_reg) {
- size += p->arity * sizeof(p->arg_reg[0]);
- }
-
- if (erts_atomic_read_nob(&p->psd) != (erts_aint_t) NULL)
- size += sizeof(ErtsPSD);
-
- scb = ERTS_PROC_GET_SAVED_CALLS_BUF(p);
- if (scb) {
- size += (sizeof(struct saved_calls)
- + (scb->len-1) * sizeof(scb->ct[0]));
- }
-
- size += erts_dicts_mem_size(p);
- return size;
+Uint erts_process_memory(Process *p, int include_sigs_in_transit)
+{
+ Uint size = 0;
+ struct saved_calls *scb;
+
+ size += sizeof(Process);
+
+ erts_link_tree_foreach(ERTS_P_LINKS(p),
+ link_size, (void *) &size);
+ erts_monitor_tree_foreach(ERTS_P_MONITORS(p),
+ monitor_size, (void *) &size);
+ erts_monitor_list_foreach(ERTS_P_LT_MONITORS(p),
+ monitor_size, (void *) &size);
+ size += (p->heap_sz + p->mbuf_sz) * sizeof(Eterm);
+ if (p->abandoned_heap)
+ size += (p->hend - p->heap) * sizeof(Eterm);
+ if (p->old_hend && p->old_heap)
+ size += (p->old_hend - p->old_heap) * sizeof(Eterm);
+
+ if (!include_sigs_in_transit) {
+ /*
+ * Size of message queue!
+ *
+ * Note that this assumes that any part of message
+ * queue located in middle queue have been moved
+ * into the inner queue prior to this call.
+ * process_info() management ensures this is done-
+ */
+ ErtsMessage *mp;
+ for (mp = p->sig_qs.first; mp; mp = mp->next) {
+ ASSERT(ERTS_SIG_IS_MSG((ErtsSignal *) mp));
+ size += sizeof(ErtsMessage);
+ if (mp->data.attached)
+ size += erts_msg_attached_data_size(mp) * sizeof(Eterm);
+ }
+ }
+ else {
+ /*
+ * Size of message queue plus size of all signals
+ * in transit to the process!
+ */
+ erts_proc_lock(p, ERTS_PROC_LOCK_MSGQ);
+ erts_proc_sig_fetch(p);
+ erts_proc_unlock(p, ERTS_PROC_LOCK_MSGQ);
+
+ ERTS_FOREACH_SIG_PRIVQS(
+ p, mp,
+ {
+ size += sizeof(ErtsMessage);
+ if (ERTS_SIG_IS_NON_MSG((ErtsSignal *) mp))
+ size += erts_proc_sig_signal_size((ErtsSignal *) mp);
+ else if (mp->data.attached)
+ size += erts_msg_attached_data_size(mp) * sizeof(Eterm);
+ });
+ }
+
+ if (p->arg_reg != p->def_arg_reg) {
+ size += p->arity * sizeof(p->arg_reg[0]);
+ }
+
+ if (erts_atomic_read_nob(&p->psd) != (erts_aint_t) NULL)
+ size += sizeof(ErtsPSD);
+
+ scb = ERTS_PROC_GET_SAVED_CALLS_BUF(p);
+ if (scb) {
+ size += (sizeof(struct saved_calls)
+ + (scb->len-1) * sizeof(scb->ct[0]));
+ }
+
+ size += erts_dicts_mem_size(p);
+ return size;
}
static ERTS_INLINE void
@@ -942,6 +963,9 @@ dump_module_literals(fmtfn_t to, void *to_arg, ErtsLiteralArea* lit_area)
}
erts_putc(to, to_arg, '\n');
}
+ } else if (is_export_header(w)) {
+ dump_externally(to, to_arg, term);
+ erts_putc(to, to_arg, '\n');
}
size = 1 + header_arity(w);
switch (w & _HEADER_SUBTAG_MASK) {
diff --git a/erts/emulator/beam/erl_time_sup.c b/erts/emulator/beam/erl_time_sup.c
index e5bb3cc15f..4f91d9ad07 100644
--- a/erts/emulator/beam/erl_time_sup.c
+++ b/erts/emulator/beam/erl_time_sup.c
@@ -2204,6 +2204,8 @@ time_unit_conversion(Process *c_p, Eterm term, ErtsMonotonicTime val, ErtsMonoto
ERTS_BIF_PREP_RET(ret, make_time_val(c_p, result));
break;
#endif
+ case am_perf_counter:
+ goto trap_to_erlang_code;
default: {
Eterm value, native_res;
#ifndef ARCH_64
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index 7724231cd5..256670ff22 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -860,6 +860,8 @@ void erts_emasculate_writable_binary(ProcBin* pb);
Eterm erts_new_heap_binary(Process *p, byte *buf, int len, byte** datap);
Eterm erts_new_mso_binary(Process*, byte*, Uint);
Eterm new_binary(Process*, byte*, Uint);
+Eterm erts_heap_factory_new_binary(ErtsHeapFactory *hfact, byte *buf,
+ Uint len, Uint reserve_size);
Eterm erts_realloc_binary(Eterm bin, size_t size);
Eterm erts_build_proc_bin(ErlOffHeap*, Eterm*, Binary*);
diff --git a/erts/emulator/beam/msg_instrs.tab b/erts/emulator/beam/msg_instrs.tab
index 289436da6f..26bea0efc6 100644
--- a/erts/emulator/beam/msg_instrs.tab
+++ b/erts/emulator/beam/msg_instrs.tab
@@ -225,6 +225,7 @@ remove_message() {
Sint tok_label = 0;
Sint tok_lastcnt = 0;
Sint tok_serial = 0;
+ Sint len = erts_proc_sig_privqs_len(c_p);
dtrace_proc_str(c_p, receiver_name);
token2 = SEQ_TRACE_TOKEN(c_p);
@@ -235,7 +236,8 @@ remove_message() {
}
DTRACE6(message_receive,
receiver_name, size_object(ERL_MESSAGE_TERM(msgp)),
- c_p->sig_qs.len - 1, tok_label, tok_lastcnt, tok_serial);
+ len, /* This is NOT message queue len, but its something... */
+ tok_label, tok_lastcnt, tok_serial);
}
#endif
UNLINK_MESSAGE(c_p, msgp);
diff --git a/erts/emulator/beam/ops.tab b/erts/emulator/beam/ops.tab
index bc765a8c94..8b2d9098a8 100644
--- a/erts/emulator/beam/ops.tab
+++ b/erts/emulator/beam/ops.tab
@@ -710,7 +710,8 @@ is_boolean Fail=f ac => jump Fail
is_boolean f? xy
%hot
-is_function2 Fail=f acq Arity => jump Fail
+is_function2 Fail=f Literal=q Arity | literal_is_export(Literal) =>
+is_function2 Fail=f c Arity => jump Fail
is_function2 Fail=f Fun a => jump Fail
is_function2 f? S s
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 4bf60619ba..188e02eff8 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -59,6 +59,7 @@
#endif
#define ERTS_WANT_NFUNC_SCHED_INTERNALS__
#include "erl_nfunc_sched.h"
+#include "erl_proc_sig_queue.h"
#undef M_TRIM_THRESHOLD
#undef M_TOP_PAD
diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c
index 7aa53e8f36..ced8a4a2a7 100644
--- a/erts/emulator/sys/common/erl_poll.c
+++ b/erts/emulator/sys/common/erl_poll.c
@@ -782,10 +782,14 @@ update_pollset(ErtsPollSet *ps, int fd, ErtsPollOp op, ErtsPollEvents events)
struct kevent evts[2];
struct timespec ts = {0, 0};
-#ifdef EV_DISPATCH
- /* If we have EV_DISPATCH we use it. The kevent descriptions for both
- read and write are added on OP_ADD and removed on OP_DEL. And then
- after than only EV_ENABLE|EV_DISPATCH are used.
+#if defined(EV_DISPATCH) && !defined(__OpenBSD__)
+ /* If we have EV_DISPATCH we use it, unless we are on OpenBSD as the
+ behavior of EV_EOF seems to be edge triggered there and we need it
+ to be level triggered.
+
+ The kevent descriptions for both read and write are added on OP_ADD
+ and removed on OP_DEL. And then after than only EV_ENABLE|EV_DISPATCH
+ are used.
It could be possible to not modify the pollset when disabling and/or
deleting events, but that may cause the poll threads to be awoken
diff --git a/erts/emulator/sys/common/erl_sys_common_misc.c b/erts/emulator/sys/common/erl_sys_common_misc.c
index 96bdbacb9e..41a6fcb7e1 100644
--- a/erts/emulator/sys/common/erl_sys_common_misc.c
+++ b/erts/emulator/sys/common/erl_sys_common_misc.c
@@ -142,7 +142,16 @@ sys_double_to_chars(double fp, char *buffer, size_t buffer_size)
return sys_double_to_chars_ext(fp, buffer, buffer_size, SYS_DEFAULT_FLOAT_DECIMALS);
}
-/* Convert float to string using fixed point notation.
+
+#if SIZEOF_LONG == 8
+# define round_int64 lround
+#elif SIZEOF_LONG_LONG == 8
+# define round_int64 llround
+#else
+# error "No 64-bit integer type?"
+#endif
+
+/* Convert float to string
* decimals must be >= 0
* if compact != 0, the trailing 0's will be truncated
*/
@@ -154,80 +163,35 @@ sys_double_to_chars_fast(double f, char *buffer, int buffer_size, int decimals,
#define FRAC_SIZE 52
#define EXP_SIZE 11
#define EXP_MASK (((Uint64)1 << EXP_SIZE) - 1)
- #define MAX_DECIMALS (sizeof(cs_sys_double_pow10) \
- / sizeof(cs_sys_double_pow10[0]))
+ #define MAX_DECIMALS (sizeof(pow10v) / sizeof(pow10v[0]))
#define FRAC_MASK (((Uint64)1 << FRAC_SIZE) - 1)
#define FRAC_MASK2 (((Uint64)1 << (FRAC_SIZE + 1)) - 1)
#define MAX_FLOAT ((Uint64)1 << (FRAC_SIZE+1))
- static const double cs_sys_double_pow10[] = {
- SYS_DOUBLE_RND_CONST / 1e0,
- SYS_DOUBLE_RND_CONST / 1e1,
- SYS_DOUBLE_RND_CONST / 1e2,
- SYS_DOUBLE_RND_CONST / 1e3,
- SYS_DOUBLE_RND_CONST / 1e4,
- SYS_DOUBLE_RND_CONST / 1e5,
- SYS_DOUBLE_RND_CONST / 1e6,
- SYS_DOUBLE_RND_CONST / 1e7,
- SYS_DOUBLE_RND_CONST / 1e8,
- SYS_DOUBLE_RND_CONST / 1e9,
- SYS_DOUBLE_RND_CONST / 1e10,
- SYS_DOUBLE_RND_CONST / 1e11,
- SYS_DOUBLE_RND_CONST / 1e12,
- SYS_DOUBLE_RND_CONST / 1e13,
- SYS_DOUBLE_RND_CONST / 1e14,
- SYS_DOUBLE_RND_CONST / 1e15,
- SYS_DOUBLE_RND_CONST / 1e16,
- SYS_DOUBLE_RND_CONST / 1e17,
- SYS_DOUBLE_RND_CONST / 1e18
+ static const double pow10v[] = {
+ 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
+ 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18
};
- Uint64 mantissa, int_part, frac_part;
- int exp;
- int fbits;
- int max;
+ double af;
+ Uint64 int_part, frac_part;
int neg;
- double fr;
- union { Uint64 L; double F; } x;
char *p = buffer;
if (decimals < 0)
return -1;
- if (f >= 0) {
- neg = 0;
- fr = decimals < MAX_DECIMALS ? (f + cs_sys_double_pow10[decimals]) : f;
- x.F = fr;
- } else {
+ if (f < 0) {
neg = 1;
- fr = decimals < MAX_DECIMALS ? (f - cs_sys_double_pow10[decimals]) : f;
- x.F = -fr;
+ af = -f;
}
-
- exp = (x.L >> FRAC_SIZE) & EXP_MASK;
- mantissa = x.L & FRAC_MASK;
-
- if (exp == EXP_MASK) {
- if (mantissa == 0) {
- if (neg)
- *p++ = '-';
- *p++ = 'i';
- *p++ = 'n';
- *p++ = 'f';
- } else {
- *p++ = 'n';
- *p++ = 'a';
- *p++ = 'n';
- }
- *p = '\0';
- return p - buffer;
+ else {
+ neg = 0;
+ af = f;
}
- exp -= EXP_MASK >> 1;
- mantissa |= ((Uint64)1 << FRAC_SIZE);
-
/* Don't bother with optimizing too large numbers or too large precision */
- if (x.F > MAX_FLOAT || decimals >= MAX_DECIMALS) {
+ if (af > MAX_FLOAT || decimals >= MAX_DECIMALS) {
int len = erts_snprintf(buffer, buffer_size, "%.*f", decimals, f);
char* p = buffer + len;
if (len >= buffer_size)
@@ -237,77 +201,64 @@ sys_double_to_chars_fast(double f, char *buffer, int buffer_size, int decimals,
p = find_first_trailing_zero(p);
*p = '\0';
return p - buffer;
- } else if (exp >= FRAC_SIZE) {
- int_part = mantissa << (exp - FRAC_SIZE);
- frac_part = 0;
- fbits = FRAC_SIZE; /* not important as frac_part==0 */
- } else if (exp >= 0) {
- fbits = FRAC_SIZE - exp;
- int_part = mantissa >> fbits;
- frac_part = mantissa & (((Uint64)1 << fbits) -1);
- } else /* if (exp < 0) */ {
- int_part = 0;
- frac_part = mantissa;
- fbits = FRAC_SIZE - exp;
- }
-
- if (!int_part) {
- if (neg)
- *p++ = '-';
- *p++ = '0';
- } else {
- int ret, i, n;
- while (int_part != 0) {
- *p++ = (char)((int_part % 10) + '0');
- int_part /= 10;
- }
- if (neg)
- *p++ = '-';
- /* Reverse string */
- ret = p - buffer;
- for (i = 0, n = ret/2; i < n; i++) {
- int j = ret - i - 1;
- char c = buffer[i];
- buffer[i] = buffer[j];
- buffer[j] = c;
- }
}
- if (decimals > 0) {
- int i;
- *p++ = '.';
+ if (decimals) {
+ double int_f = floor(af);
+ double frac_f = round((af - int_f) * pow10v[decimals]);
- max = buffer_size - (p - buffer) - 1 /* leave room for trailing '\0' */;
+ int_part = (Uint64)int_f;
+ frac_part = (Uint64)frac_f;
- if (decimals > max)
- return -1; /* the number is not large enough to fit in the buffer */
-
- max = decimals;
+ if (frac_f >= pow10v[decimals]) {
+ /* rounding overflow carry into int_part */
+ int_part++;
+ frac_part = 0;
+ }
- for (i = 0; i < max; i++) {
- if (frac_part > (ERTS_UINT64_MAX/5)) {
- frac_part >>= 3;
- fbits -= 3;
+ do {
+ Uint64 n;
+ if (!frac_part) {
+ do {
+ *p++ = '0';
+ } while (--decimals);
+ break;
}
+ n = frac_part / 10;
+ *p++ = (char)((frac_part - n*10) + '0');
+ frac_part = n;
+ } while (--decimals);
- /* Multiply by 10 (5*2) to extract decimal digit as integer part */
- frac_part *= 5;
- fbits--;
+ *p++ = '.';
+ }
+ else
+ int_part = (Uint64)round_int64(af);
- if (fbits >= 64) {
- *p++ = '0';
- }
- else {
- *p++ = (char)((frac_part >> fbits) + '0');
- frac_part &= ((Uint64)1 << fbits) - 1;
- }
+ if (!int_part) {
+ *p++ = '0';
+ } else {
+ do {
+ Uint64 n = int_part / 10;
+ *p++ = (char)((int_part - n*10) + '0');
+ int_part = n;
+ } while (int_part);
+ }
+ if (neg)
+ *p++ = '-';
+
+ {/* Reverse string */
+ int i = 0;
+ int j = p - buffer - 1;
+ for ( ; i < j; i++, j--) {
+ char tmp = buffer[i];
+ buffer[i] = buffer[j];
+ buffer[j] = tmp;
}
-
- /* Delete trailing zeroes */
- if (compact)
- p = find_first_trailing_zero(p);
}
+ /* Delete trailing zeroes */
+ if (compact)
+ p = find_first_trailing_zero(p);
*p = '\0';
return p - buffer;
}
diff --git a/erts/emulator/test/beam_literals_SUITE.erl b/erts/emulator/test/beam_literals_SUITE.erl
index 09761263e2..b447ca0210 100644
--- a/erts/emulator/test/beam_literals_SUITE.erl
+++ b/erts/emulator/test/beam_literals_SUITE.erl
@@ -248,35 +248,58 @@ literal_type_tests(Config) when is_list(Config) ->
ok.
make_test([{is_function=T,L}|Ts]) ->
- [test(T, L),test(T, 0, L)|make_test(Ts)];
+ [guard_test(T, L),guard_test(T, 0, L),body_test(T, L),body_test(T, 0, L)|make_test(Ts)];
make_test([{T,L}|Ts]) ->
- [test(T, L)|make_test(Ts)];
+ [guard_test(T, L),body_test(T, L)|make_test(Ts)];
make_test([]) -> [].
-test(T, L) ->
- S = lists:flatten(io_lib:format("begin io:format(\"~~p~n\", [{~p,~p}]), if ~w(~w) -> true; true -> false end end. ", [T, L, T, L])),
- {ok,Toks,_Line} = erl_scan:string(S),
- {ok,E} = erl_parse:parse_exprs(Toks),
- {value,Val,_Bs} = erl_eval:exprs(E, []),
+guard_test(_, L) when is_function(L) ->
+ %% Skip guard tests with exports - they are not literals
+ {atom,erl_anno:new(0),true};
+guard_test(T, L) ->
+ S = io_lib:format("begin io:format(\"~~p~n\", [{~p,~p}]), if ~w(~w) -> true; true -> false end end. ", [T, L, T, L]),
+ {Val,Expr} = eval_string(S),
+ Anno = erl_anno:new(0),
+ {match,Anno,{atom,Anno,Val},Expr}.
+
+guard_test(_, _, L) when is_function(L) ->
+ %% Skip guard tests with exports - they are not literals
+ {atom,erl_anno:new(0),true};
+guard_test(T, A, L) ->
+ S = io_lib:format("begin io:format(\"~~p~n\", [{~p,~p,~p}]), if ~w(~w, ~w) -> true; true -> false end end. ", [T,L,A,T,L,A]),
+ {Val,Expr} = eval_string(S),
+ Anno = erl_anno:new(0),
+ {match,Anno,{atom,Anno,Val},Expr}.
+
+body_test(T, L) ->
+ S = io_lib:format("begin io:format(\"~~p~n\", [{~p,~p}]), ~w(~w) end. ", [T,L,T,L]),
+ {Val,Expr} = eval_string(S),
Anno = erl_anno:new(0),
- {match,Anno,{atom,Anno,Val},hd(E)}.
+ {match,Anno,{atom,Anno,Val},Expr}.
-test(T, A, L) ->
- S = lists:flatten(io_lib:format("begin io:format(\"~~p~n\", [{~p,~p,~p}]), if ~w(~w, ~w) -> true; true -> false end end. ",
- [T,L,A,T,L,A])),
- {ok,Toks,_Line} = erl_scan:string(S),
+body_test(T, A, L) ->
+ S = io_lib:format("begin io:format(\"~~p~n\", [{~p,~p,~p}]), ~w(~w,~w) end. ", [T,L,A,T,L,A]),
+ {Val,Expr} = eval_string(S),
+ Anno = erl_anno:new(0),
+ {match,Anno,{atom,Anno,Val},Expr}.
+
+eval_string(S) ->
+ {ok,Toks,_Line} = erl_scan:string(lists:flatten(S)),
{ok,E} = erl_parse:parse_exprs(Toks),
{value,Val,_Bs} = erl_eval:exprs(E, []),
- Anno = erl_anno:new(0),
- {match,Anno,{atom,Anno,Val},hd(E)}.
-
+ {Val,hd(E)}.
+
literals() ->
[42,
3.14,
-3,
32982724987789283473473838474,
[],
- xxxx].
+ "abc",
+ <<"abc">>,
+ {},
+ xxxx,
+ fun erlang:erase/0].
type_tests() ->
[is_boolean,
diff --git a/erts/emulator/test/bif_SUITE.erl b/erts/emulator/test/bif_SUITE.erl
index 22706ae8b1..32bfcd5520 100644
--- a/erts/emulator/test/bif_SUITE.erl
+++ b/erts/emulator/test/bif_SUITE.erl
@@ -35,7 +35,8 @@
is_builtin/1, error_stacktrace/1,
error_stacktrace_during_call_trace/1,
group_leader_prio/1, group_leader_prio_dirty/1,
- is_process_alive/1]).
+ is_process_alive/1,
+ process_info_blast/1]).
suite() ->
[{ct_hooks,[ts_install_cth]},
@@ -50,7 +51,7 @@ all() ->
erl_crash_dump_bytes, min_max, erlang_halt, is_builtin,
error_stacktrace, error_stacktrace_during_call_trace,
group_leader_prio, group_leader_prio_dirty,
- is_process_alive].
+ is_process_alive, process_info_blast].
%% Uses erlang:display to test that erts_printf does not do deep recursion
display(Config) when is_list(Config) ->
@@ -1099,6 +1100,76 @@ is_process_alive(Config) when is_list(Config) ->
Ps),
ok.
+process_info_blast(Config) when is_list(Config) ->
+ Tester = self(),
+ NoAttackers = 1000,
+ NoAL = lists:seq(1, NoAttackers),
+ Consume = make_ref(),
+ Victim = spawn_link(fun () ->
+ receive
+ Consume ->
+ ok
+ end,
+ consume_msgs()
+ end),
+ AFun = fun () ->
+ Victim ! hej,
+ Res = process_info(Victim, message_queue_len),
+ Tester ! {self(), Res}
+ end,
+ Attackers0 = lists:map(fun (_) ->
+ spawn_link(AFun)
+ end,
+ NoAL),
+ lists:foreach(fun (A) ->
+ receive
+ {A, Res} ->
+ case Res of
+ {message_queue_len, Len} when Len > 0, Len =< NoAttackers ->
+ Len;
+ Error ->
+ exit({unexpected, Error})
+ end
+ end
+ end,
+ Attackers0),
+ Attackers1 = lists:map(fun (_) ->
+ spawn_link(AFun)
+ end,
+ NoAL),
+ Victim ! Consume,
+ lists:foreach(fun (A) ->
+ receive
+ {A, Res} ->
+ case Res of
+ {message_queue_len, Len} when Len >= 0, Len =< 2*NoAttackers+1 ->
+ ok;
+ undefined ->
+ ok;
+ Error ->
+ exit({unexpected, Error})
+ end
+ end
+ end,
+ Attackers1),
+ KillFun = fun (P) ->
+ unlink(P),
+ exit(P, kill),
+ false = erlang:is_process_alive(P)
+ end,
+ lists:foreach(fun (A) -> KillFun(A) end, Attackers0),
+ lists:foreach(fun (A) -> KillFun(A) end, Attackers1),
+ KillFun(Victim),
+ ok.
+
+consume_msgs() ->
+ receive
+ _ ->
+ consume_msgs()
+ after 0 ->
+ ok
+ end.
+
%% helpers
id(I) -> I.
diff --git a/erts/emulator/test/exception_SUITE.erl b/erts/emulator/test/exception_SUITE.erl
index da0292f385..60d14ce841 100644
--- a/erts/emulator/test/exception_SUITE.erl
+++ b/erts/emulator/test/exception_SUITE.erl
@@ -23,7 +23,8 @@
-export([all/0, suite/0,
badmatch/1, pending_errors/1, nil_arith/1, top_of_stacktrace/1,
stacktrace/1, nested_stacktrace/1, raise/1, gunilla/1, per/1,
- exception_with_heap_frag/1, line_numbers/1]).
+ exception_with_heap_frag/1, backtrace_depth/1,
+ line_numbers/1]).
-export([bad_guy/2]).
-export([crash/1]).
@@ -42,7 +43,7 @@ suite() ->
all() ->
[badmatch, pending_errors, nil_arith, top_of_stacktrace,
stacktrace, nested_stacktrace, raise, gunilla, per,
- exception_with_heap_frag, line_numbers].
+ exception_with_heap_frag, backtrace_depth, line_numbers].
-define(try_match(E),
catch ?MODULE:bar(),
@@ -572,6 +573,57 @@ do_exception_with_heap_frag(Bin, [Sz|Sizes]) ->
do_exception_with_heap_frag(Bin, Sizes);
do_exception_with_heap_frag(_, []) -> ok.
+backtrace_depth(Config) when is_list(Config) ->
+ _ = [do_backtrace_depth(D) || D <- lists:seq(0, 8)],
+ ok.
+
+do_backtrace_depth(D) ->
+ Old = erlang:system_flag(backtrace_depth, D),
+ try
+ Expected = max(1, D),
+ do_backtrace_depth_1(Expected)
+ after
+ _ = erlang:system_flag(backtrace_depth, Old)
+ end.
+
+do_backtrace_depth_1(D) ->
+ Exit = fun() ->
+ error(reason)
+ end,
+ HandCrafted = fun() ->
+ {'EXIT',{_,Stk0}} = (catch error(get_stacktrace)),
+ %% Fool the compiler to force a hand-crafted
+ %% stacktrace.
+ Stk = [hd(Stk0)|tl(Stk0)],
+ erlang:raise(error, reason, Stk)
+ end,
+ PassedOn = fun() ->
+ try error(get_stacktrace)
+ catch error:_:Stk ->
+ %% Just pass on the given stacktrace.
+ erlang:raise(error, reason, Stk)
+ end
+ end,
+ do_backtrace_depth_2(D, Exit),
+ do_backtrace_depth_2(D, HandCrafted),
+ do_backtrace_depth_2(D, PassedOn),
+ ok.
+
+do_backtrace_depth_2(D, Exc) ->
+ try
+ Exc()
+ catch
+ error:reason:Stk ->
+ if
+ length(Stk) =/= D ->
+ io:format("Expected depth: ~p\n", [D]),
+ io:format("~p\n", [Stk]),
+ error(bad_depth);
+ true ->
+ ok
+ end
+ end.
+
line_numbers(Config) when is_list(Config) ->
{'EXIT',{{case_clause,bad_tag},
[{?MODULE,line1,2,
diff --git a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
index e8d9302505..a0aef60cf1 100644
--- a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
+++ b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
@@ -2859,7 +2859,7 @@ unsigned rand_bits(struct frenzy_rand_bits* rnd, unsigned int nbits)
struct frenzy_monitor {
ErlNifMutex* lock;
- enum {
+ volatile enum {
MON_FREE, MON_FREE_DOWN, MON_FREE_DEMONITOR,
MON_TRYING, MON_ACTIVE, MON_PENDING
} state;
@@ -3221,13 +3221,24 @@ static void frenzy_resource_down(ErlNifEnv* env, void* obj, ErlNifPid* pid,
DBG_TRACE3("DOWN pid=%T, r=%p rix=%u\n", pid->pid, r, r->rix);
for (mix = 0; mix < FRENZY_MONITORS_MAX; mix++) {
- if (r->monv[mix].pid.pid == pid->pid && r->monv[mix].state >= MON_TRYING) {
+ int state1 = r->monv[mix].state;
+ /* First do dirty access of pid and state without the lock */
+ if (r->monv[mix].pid.pid == pid->pid && state1 >= MON_TRYING) {
+ int state2;
enif_mutex_lock(r->monv[mix].lock);
- if (enif_compare_monitors(mon, &r->monv[mix].mon) == 0) {
- assert(r->monv[mix].state >= MON_ACTIVE);
- r->monv[mix].state = MON_FREE_DOWN;
- enif_mutex_unlock(r->monv[mix].lock);
- return;
+ state2 = r->monv[mix].state;
+ if (state2 >= MON_ACTIVE) {
+ if (enif_compare_monitors(mon, &r->monv[mix].mon) == 0) {
+ r->monv[mix].state = MON_FREE_DOWN;
+ enif_mutex_unlock(r->monv[mix].lock);
+ return;
+ }
+ }
+ else {
+ assert(state2 != MON_TRYING);
+ assert(state1 == MON_TRYING || /* racing monitor failed */
+ state2 == MON_FREE_DEMONITOR || /* racing demonitor */
+ state2 == MON_FREE_DOWN); /* racing down */
}
enif_mutex_unlock(r->monv[mix].lock);
}
diff --git a/erts/emulator/test/num_bif_SUITE.erl b/erts/emulator/test/num_bif_SUITE.erl
index 592542405f..290bb61fc8 100644
--- a/erts/emulator/test/num_bif_SUITE.erl
+++ b/erts/emulator/test/num_bif_SUITE.erl
@@ -213,6 +213,20 @@ fts_rand_float_decimals(N) ->
[begin
F0 = rand_float_reasonable(),
L0 = float_to_list(F0, [{decimals, D}]),
+ case conform_with_io_lib_format_os(F0,D) of
+ false -> ok;
+ true ->
+ IOL = lists:flatten(io_lib:format("~.*f", [D, F0])),
+ true = case L0 =:= IOL of
+ true -> true;
+ false ->
+ io:format("F0 = ~w ~w\n", [F0, <<F0/float>>]),
+ io:format("decimals = ~w\n", [D]),
+ io:format("float_to_list = ~s\n", [L0]),
+ io:format("io_lib:format = ~s\n", [IOL]),
+ false
+ end
+ end,
L1 = case D of
0 -> L0 ++ ".0";
_ -> L0
@@ -234,6 +248,26 @@ fts_rand_float_decimals(N) ->
fts_rand_float_decimals(N-1).
+conform_with_io_lib_format_os(F, D) ->
+ case os:type() of
+ {win32,_} ->
+ %% io_lib:format("~.*f") buggy on windows? OTP-15010
+ false;
+ _ ->
+ conform_with_io_lib_format(F, D)
+ end.
+
+conform_with_io_lib_format(_, 0) ->
+ %% io_lib:format("~.*f") does not support zero decimals
+ false;
+conform_with_io_lib_format(_, D) when D > 10 ->
+ %% Seems float_to_list gets it slightly wrong sometimes for many decimals
+ false;
+conform_with_io_lib_format(F, D) ->
+ %% io_lib:format prints '0' for input bits beyond mantissa precision
+ %% float_to_list treats those unknown input bits as if they were zeros.
+ math:log2(abs(F) * math:pow(10,D)) < 54.
+
max_diff_decimals(F, D) ->
IntBits = floor(math:log2(abs(F))) + 1,
FracBits = (52 - IntBits),
diff --git a/erts/emulator/test/process_SUITE.erl b/erts/emulator/test/process_SUITE.erl
index 7eff786e8b..46ece531a8 100644
--- a/erts/emulator/test/process_SUITE.erl
+++ b/erts/emulator/test/process_SUITE.erl
@@ -42,6 +42,8 @@
process_info_lock_reschedule2/1,
process_info_lock_reschedule3/1,
process_info_garbage_collection/1,
+ process_info_smoke_all/1,
+ process_info_status_handled_signal/1,
bump_reductions/1, low_prio/1, binary_owner/1, yield/1, yield2/1,
otp_4725/1, bad_register/1, garbage_collect/1, otp_6237/1,
process_info_messages/1, process_flag_badarg/1, process_flag_heap_size/1,
@@ -79,6 +81,8 @@ all() ->
process_info_lock_reschedule2,
process_info_lock_reschedule3,
process_info_garbage_collection,
+ process_info_smoke_all,
+ process_info_status_handled_signal,
bump_reductions, low_prio, yield, yield2, otp_4725,
bad_register, garbage_collect, process_info_messages,
process_flag_badarg, process_flag_heap_size,
@@ -510,14 +514,20 @@ pio_current_location(N, Pid, Pi, Looper) ->
case Where of
{erlang,process_info,2,[]} ->
pio_current_location(N-1, Pid, Pi+1, Looper);
+ {erts_internal,await_result,1, Loc} when is_list(Loc) ->
+ pio_current_location(N-1, Pid, Pi+1, Looper);
{?MODULE,process_info_looper,1,Loc} when is_list(Loc) ->
- pio_current_location(N-1, Pid, Pi, Looper+1)
+ pio_current_location(N-1, Pid, Pi, Looper+1);
+ _ ->
+ exit({unexpected_location, Where})
end.
pio_current_stacktrace() ->
L = [begin
- {current_stacktrace,Stk} = process_info(P, current_stacktrace),
- {P,Stk}
+ case process_info(P, current_stacktrace) of
+ {current_stacktrace, Stk} -> {P,Stk};
+ undefined -> {P, []}
+ end
end || P <- processes()],
[erlang:garbage_collect(P) || {P,_} <- L],
erlang:garbage_collect(),
@@ -973,6 +983,106 @@ process_info_garbage_collection(_Config) ->
gv(Key,List) ->
proplists:get_value(Key,List).
+process_info_smoke_all_tester() ->
+ register(process_info_smoke_all_tester, self()),
+ put(ets_ref, ets:new(blupp, [])),
+ put(binary, [list_to_binary(lists:duplicate(1000, 1)),
+ list_to_binary(lists:duplicate(1000, 2))]),
+ process_info_smoke_all_tester_loop().
+
+process_info_smoke_all_tester_loop() ->
+ receive
+ {other_process, Pid} ->
+ case get(procs) of
+ undefined -> put(procs, [Pid]);
+ Procs -> put(procs, [Pid|Procs])
+ end,
+ erlang:monitor(process, Pid),
+ link(Pid),
+ process_info_smoke_all_tester_loop()
+ end.
+
+process_info_smoke_all(Config) when is_list(Config) ->
+ AllPIOptions = [registered_name,
+ current_function,
+ initial_call,
+ messages,
+ message_queue_len,
+ links,
+ monitors,
+ monitored_by,
+ dictionary,
+ trap_exit,
+ error_handler,
+ heap_size,
+ stack_size,
+ memory,
+ garbage_collection,
+ group_leader,
+ reductions,
+ priority,
+ trace,
+ binary,
+ sequential_trace_token,
+ catchlevel,
+ backtrace,
+ last_calls,
+ total_heap_size,
+ suspending,
+ min_heap_size,
+ min_bin_vheap_size,
+ max_heap_size,
+ current_location,
+ current_stacktrace,
+ message_queue_data,
+ garbage_collection_info,
+ magic_ref,
+ fullsweep_after],
+
+ {ok, Node} = start_node(Config, ""),
+ RP = spawn_link(Node, fun process_info_smoke_all_tester/0),
+ LP = spawn_link(fun process_info_smoke_all_tester/0),
+ RP ! {other_process, LP},
+ LP ! {other_process, RP},
+ LP ! {other_process, self()},
+ LP ! ets:new(blapp, []),
+ LP ! ets:new(blipp, []),
+ LP ! list_to_binary(lists:duplicate(1000, 3)),
+ receive after 1000 -> ok end,
+ _MLP = erlang:monitor(process, LP),
+ true = is_process_alive(LP),
+ PI = process_info(LP, AllPIOptions),
+ io:format("~p~n", [PI]),
+ garbage_collect(),
+ unlink(RP),
+ unlink(LP),
+ exit(RP, kill),
+ exit(LP, kill),
+ false = is_process_alive(LP),
+ stop_node(Node),
+ ok.
+
+process_info_status_handled_signal(Config) when is_list(Config) ->
+ P = spawn_link(fun () ->
+ receive after infinity -> ok end
+ end),
+ wait_until(fun () ->
+ process_info(P, status) == {status, waiting}
+ end),
+ %%
+ %% The 'messages' option will force a process-info-request
+ %% signal to be scheduled on the process. Ensure that status
+ %% 'waiting' is reported even though it is actually running
+ %% when handling the request. We want it to report the status
+ %% it would have had if it had not been handling the
+ %% process-info-request...
+ %%
+ [{status, waiting}, {messages, []}] = process_info(P, [status, messages]),
+ unlink(P),
+ exit(P, kill),
+ false = erlang:is_process_alive(P),
+ ok.
+
%% Tests erlang:bump_reductions/1.
bump_reductions(Config) when is_list(Config) ->
erlang:garbage_collect(),
diff --git a/erts/emulator/test/z_SUITE.erl b/erts/emulator/test/z_SUITE.erl
index ac3df8bfbf..103f9f1550 100644
--- a/erts/emulator/test/z_SUITE.erl
+++ b/erts/emulator/test/z_SUITE.erl
@@ -37,6 +37,7 @@
-export([schedulers_alive/1, node_container_refc_check/1,
long_timers/1, pollset_size/1,
check_io_debug/1, get_check_io_info/0,
+ lc_graph/1,
leaked_processes/1]).
suite() ->
@@ -46,6 +47,7 @@ suite() ->
all() ->
[schedulers_alive, node_container_refc_check,
long_timers, pollset_size, check_io_debug,
+ lc_graph,
%% Make sure that the leaked_processes/1 is always
%% run last.
leaked_processes].
@@ -289,6 +291,12 @@ has_gethost([P|T]) ->
has_gethost([]) ->
false.
+lc_graph(Config) when is_list(Config) ->
+ %% Create "lc_graph" file in current working dir
+ %% if lock checker is enabled
+ erts_debug:lc_graph(),
+ ok.
+
leaked_processes(Config) when is_list(Config) ->
%% Replace the defualt timetrap with a timetrap with
%% known pid.
diff --git a/erts/etc/unix/etp-commands.in b/erts/etc/unix/etp-commands.in
index bac90cb472..e5ef819444 100644
--- a/erts/etc/unix/etp-commands.in
+++ b/erts/etc/unix/etp-commands.in
@@ -1997,7 +1997,7 @@ define etp-process-info
printf " Msgq len: %d\n", $etp_proc->sig_qs.len
end
printf " Parent: "
- etp-1 $etp_proc->parent
+ etp-1 ((Eterm)($etp_proc->parent))
printf "\n Pointer: (Process *) %p\n", $etp_proc
end
end
@@ -2017,15 +2017,17 @@ define etp-processes
set $proc_ix = 0
set $proc_max_ix = erts_proc.r.o.max
set $proc_tab = erts_proc.r.o.tab
+ set $proc_cnt = erts_proc.vola.tile.count.counter
set $invalid_proc = &erts_invalid_process
set $proc_decentile = $proc_max_ix / 10
set $proc_printile = $proc_decentile
- while $proc_ix < $proc_max_ix
+ while $proc_ix < $proc_max_ix && $proc_cnt > 0
set $proc = (Process *) *((UWord *) ($proc_tab + $proc_ix))
if ($proc != ((Process *) 0) && $proc != $invalid_proc)
printf "---\n"
printf " Pix: %d\n", $proc_ix
etp-process-info $proc
+ set $proc_cnt--
end
if $proc_ix == $proc_printile
printf "--- %d%% (%d / %d) searched\n", $proc_printile / $proc_decentile * 10, $proc_ix, $proc_max_ix
@@ -2363,10 +2365,11 @@ define etp-ports
set $port_ix = 0
set $port_max_ix = erts_port.r.o.max
set $port_tab = erts_port.r.o.tab
+ set $port_cnt = erts_proc.vola.tile.count.counter
set $invalid_port = &erts_invalid_port
set $port_decentile = $port_max_ix / 10
set $port_printile = $port_decentile
- while $port_ix < $port_max_ix
+ while $port_ix < $port_max_ix && $port_cnt > 0
set $port = (Port *) *((UWord *) ($port_tab + $port_ix))
if ($port != ((Port *) 0) && $port != $invalid_port)
if (*(((Uint32 *) &(((Port *) $port)->state))) & 0x100) == 0
@@ -2374,6 +2377,7 @@ define etp-ports
printf "---\n"
printf " Pix: %d\n", $port_ix
etp-port-info $port
+ set $port_cnt--
end
end
if $port_ix == $port_printile
diff --git a/erts/preloaded/ebin/erlang.beam b/erts/preloaded/ebin/erlang.beam
index e93f053e01..3ef5ad0646 100644
--- a/erts/preloaded/ebin/erlang.beam
+++ b/erts/preloaded/ebin/erlang.beam
Binary files differ
diff --git a/erts/preloaded/ebin/init.beam b/erts/preloaded/ebin/init.beam
index a2dd41b435..9043ae302e 100644
--- a/erts/preloaded/ebin/init.beam
+++ b/erts/preloaded/ebin/init.beam
Binary files differ
diff --git a/erts/preloaded/src/erlang.erl b/erts/preloaded/src/erlang.erl
index bffa59338e..c761bbaaeb 100644
--- a/erts/preloaded/src/erlang.erl
+++ b/erts/preloaded/src/erlang.erl
@@ -50,7 +50,7 @@
dist_ctrl_get_data_notification/1,
dist_get_stat/1]).
--deprecated([now/0]).
+-deprecated([get_stacktrace/0,now/0]).
%% Get rid of autoimports of spawn to avoid clashes with ourselves.
-compile({no_auto_import,[spawn_link/1]}).
diff --git a/erts/preloaded/src/init.erl b/erts/preloaded/src/init.erl
index e0ae6b1656..0c74169e97 100644
--- a/erts/preloaded/src/init.erl
+++ b/erts/preloaded/src/init.erl
@@ -545,6 +545,8 @@ stop(Reason,State) ->
do_stop(Reason,State1).
do_stop(restart,#state{start = Start, flags = Flags, args = Args}) ->
+ %% Make sure we don't have any outstanding messages before doing the restart.
+ flush(),
boot(Start,Flags,Args);
do_stop(reboot,_) ->
halt();
@@ -560,6 +562,13 @@ clear_system(BootPid,State) ->
shutdown_pids(Heart,BootPid,State),
unload(Heart).
+flush() ->
+ receive
+ _M -> flush()
+ after 0 ->
+ ok
+ end.
+
stop_heart(State) ->
case get_heart(State#state.kernel) of
false ->