aboutsummaryrefslogtreecommitdiffstats
path: root/erts
diff options
context:
space:
mode:
Diffstat (limited to 'erts')
-rw-r--r--erts/configure.in88
-rw-r--r--erts/doc/src/absform.xml10
-rw-r--r--erts/doc/src/erl.xml28
-rw-r--r--erts/doc/src/erl_nif.xml259
-rw-r--r--erts/doc/src/erl_tracer.xml82
-rw-r--r--erts/doc/src/erlang.xml484
-rw-r--r--erts/emulator/Makefile.in7
-rw-r--r--erts/emulator/beam/atom.names2
-rw-r--r--erts/emulator/beam/beam_bif_load.c75
-rw-r--r--erts/emulator/beam/beam_bp.c25
-rw-r--r--erts/emulator/beam/beam_bp.h10
-rw-r--r--erts/emulator/beam/beam_emu.c191
-rw-r--r--erts/emulator/beam/beam_load.c101
-rw-r--r--erts/emulator/beam/bif.c128
-rw-r--r--erts/emulator/beam/bif.h43
-rw-r--r--erts/emulator/beam/break.c7
-rw-r--r--erts/emulator/beam/dist.c12
-rw-r--r--erts/emulator/beam/erl_alloc.c113
-rw-r--r--erts/emulator/beam/erl_alloc.types1
-rw-r--r--erts/emulator/beam/erl_bif_ddll.c2
-rw-r--r--erts/emulator/beam/erl_bif_info.c66
-rw-r--r--erts/emulator/beam/erl_bif_re.c8
-rw-r--r--erts/emulator/beam/erl_bif_trace.c158
-rw-r--r--erts/emulator/beam/erl_bif_unique.c4
-rw-r--r--erts/emulator/beam/erl_bits.h4
-rw-r--r--erts/emulator/beam/erl_db.c5
-rw-r--r--erts/emulator/beam/erl_db_util.c414
-rw-r--r--erts/emulator/beam/erl_db_util.h8
-rw-r--r--erts/emulator/beam/erl_drv_nif.h9
-rw-r--r--erts/emulator/beam/erl_gc.c291
-rw-r--r--erts/emulator/beam/erl_gc.h6
-rw-r--r--erts/emulator/beam/erl_hl_timer.c32
-rw-r--r--erts/emulator/beam/erl_init.c64
-rw-r--r--erts/emulator/beam/erl_message.c110
-rw-r--r--erts/emulator/beam/erl_message.h8
-rw-r--r--erts/emulator/beam/erl_msacc.c4
-rw-r--r--erts/emulator/beam/erl_nif.c674
-rw-r--r--erts/emulator/beam/erl_nif.h16
-rw-r--r--erts/emulator/beam/erl_nif_api_funcs.h19
-rw-r--r--erts/emulator/beam/erl_port.h39
-rw-r--r--erts/emulator/beam/erl_process.c885
-rw-r--r--erts/emulator/beam/erl_process.h126
-rw-r--r--erts/emulator/beam/erl_process_dump.c34
-rw-r--r--erts/emulator/beam/erl_time_sup.c12
-rw-r--r--erts/emulator/beam/erl_trace.c290
-rw-r--r--erts/emulator/beam/erl_trace.h24
-rw-r--r--erts/emulator/beam/erl_vm.h3
-rw-r--r--erts/emulator/beam/global.h25
-rw-r--r--erts/emulator/beam/io.c88
-rw-r--r--erts/emulator/beam/sys.h7
-rw-r--r--erts/emulator/beam/utils.c2
-rw-r--r--erts/emulator/hipe/hipe_bif0.c24
-rw-r--r--erts/emulator/hipe/hipe_mode_switch.c134
-rw-r--r--erts/emulator/hipe/hipe_native_bif.c2
-rw-r--r--erts/emulator/nifs/common/erl_tracer_nif.c10
-rw-r--r--erts/emulator/sys/common/erl_mmap.c28
-rw-r--r--erts/emulator/sys/common/erl_mmap.h104
-rw-r--r--erts/emulator/sys/common/erl_mseg.c127
-rw-r--r--erts/emulator/sys/common/erl_mseg.h2
-rw-r--r--erts/emulator/test/Makefile1
-rw-r--r--erts/emulator/test/alloc_SUITE.erl4
-rw-r--r--erts/emulator/test/dirty_nif_SUITE.erl327
-rw-r--r--erts/emulator/test/dirty_nif_SUITE_data/Makefile.src6
-rw-r--r--erts/emulator/test/dirty_nif_SUITE_data/dirty_nif_SUITE.c223
-rw-r--r--erts/emulator/test/gc_SUITE.erl31
-rw-r--r--erts/emulator/test/nif_SUITE.erl97
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_SUITE.c143
-rw-r--r--erts/emulator/test/port_trace_SUITE.erl44
-rw-r--r--erts/emulator/test/port_trace_SUITE_data/echo_drv.c36
-rw-r--r--erts/emulator/test/process_SUITE.erl186
-rw-r--r--erts/emulator/test/save_calls_SUITE.erl2
-rw-r--r--erts/emulator/test/scheduler_SUITE.erl50
-rw-r--r--erts/emulator/test/scheduler_SUITE_data/Makefile.src8
-rw-r--r--erts/emulator/test/scheduler_SUITE_data/scheduler_SUITE.c37
-rw-r--r--erts/emulator/test/trace_SUITE.erl249
-rw-r--r--erts/emulator/test/trace_bif_SUITE.erl1
-rw-r--r--erts/emulator/test/tracer_SUITE.erl90
-rw-r--r--erts/emulator/test/tracer_SUITE_data/tracer_test.c12
-rw-r--r--erts/epmd/test/epmd_SUITE.erl1205
-rw-r--r--erts/etc/common/erlexec.c5
-rw-r--r--erts/include/erl_native_features_config.h.in22
-rw-r--r--erts/lib_src/Makefile.in1
-rw-r--r--erts/preloaded/ebin/erl_prim_loader.beambin55800 -> 55780 bytes
-rw-r--r--erts/preloaded/ebin/erl_tracer.beambin2128 -> 2112 bytes
-rw-r--r--erts/preloaded/ebin/erlang.beambin104172 -> 104620 bytes
-rw-r--r--erts/preloaded/ebin/erts_code_purger.beambin8712 -> 8696 bytes
-rw-r--r--erts/preloaded/ebin/erts_internal.beambin10552 -> 10536 bytes
-rw-r--r--erts/preloaded/ebin/init.beambin49944 -> 49924 bytes
-rw-r--r--erts/preloaded/ebin/otp_ring0.beambin1460 -> 1444 bytes
-rw-r--r--erts/preloaded/ebin/prim_eval.beambin1328 -> 1312 bytes
-rw-r--r--erts/preloaded/ebin/prim_file.beambin44780 -> 44764 bytes
-rw-r--r--erts/preloaded/ebin/prim_inet.beambin72564 -> 72544 bytes
-rw-r--r--erts/preloaded/ebin/prim_zip.beambin23172 -> 23152 bytes
-rw-r--r--erts/preloaded/ebin/zlib.beambin14152 -> 14136 bytes
-rw-r--r--erts/preloaded/src/erl_tracer.erl8
-rw-r--r--erts/preloaded/src/erlang.erl35
96 files changed, 5583 insertions, 2774 deletions
diff --git a/erts/configure.in b/erts/configure.in
index 11c428ced7..4a63381eb7 100644
--- a/erts/configure.in
+++ b/erts/configure.in
@@ -62,9 +62,6 @@ if test x"${ERL_TOP}/erts" != x"$srcdir"; then
fi
erl_top=${ERL_TOP}
-# Remove old configuration information
-/bin/rm -f "$ERL_TOP/erts/CONF_INFO"
-
# echo XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
# echo X
# echo "X srcdir = $srcdir"
@@ -102,7 +99,7 @@ ERL_XCOMP_SYSROOT_INIT
AC_ISC_POSIX
-AC_CONFIG_HEADER($host/config.h:config.h.in include/internal/$host/ethread_header_config.h:include/internal/ethread_header_config.h.in include/$host/erl_int_sizes_config.h:include/erl_int_sizes_config.h.in include/$host/erl_native_features_config.h:include/erl_native_features_config.h.in)
+AC_CONFIG_HEADER($host/config.h:config.h.in include/internal/$host/ethread_header_config.h:include/internal/ethread_header_config.h.in include/$host/erl_int_sizes_config.h:include/erl_int_sizes_config.h.in)
dnl ----------------------------------------------------------------------
dnl Optional features.
dnl ----------------------------------------------------------------------
@@ -140,7 +137,7 @@ AS_HELP_STRING([--enable-dirty-schedulers], [enable dirty scheduler support]),
[ case "$enableval" in
no) enable_dirty_schedulers=no ;;
*) enable_dirty_schedulers=yes ;;
- esac ], enable_dirty_schedulers=no)
+ esac ], enable_dirty_schedulers=default)
AC_ARG_ENABLE(smp-support,
AS_HELP_STRING([--enable-smp-support], [enable smp support])
@@ -729,8 +726,27 @@ esac
AC_SUBST(LIBCARBON)
-dnl Check if we should/can build a sharing-preserving emulator
+_search_path=/bin:/usr/bin:/usr/local/bin:$PATH
+AC_PATH_PROG(RM, rm, false, $_search_path)
+if test "$ac_cv_path_RM" = false; then
+ AC_MSG_ERROR([No 'rm' command found])
+fi
+
+AC_PATH_PROG(MKDIR, mkdir, false, $_search_path)
+if test "$ac_cv_path_MKDIR" = false; then
+ AC_MSG_ERROR([No 'mkdir' command found])
+fi
+
+_search_path=
+
+
+# Remove old configuration information.
+# Next line should be placed after AC_PATH_PROG(RM, ...), but before
+# first output to CONN_INFO. So this is just the right place.
+$RM -f "$ERL_TOP/erts/CONF_INFO"
+
+dnl Check if we should/can build a sharing-preserving emulator
AC_MSG_CHECKING(if we are building a sharing-preserving emulator)
if test "$enable_sharing_preserving" = "yes"; then
AC_DEFINE(SHCOPY, [1],
@@ -760,27 +776,13 @@ if test "$ac_cv_prog_AR" = false; then
AC_MSG_ERROR([No 'ar' command found in PATH])
fi
-_search_path=/bin:/usr/bin:/usr/local/bin:$PATH
-
-AC_PATH_PROG(RM, rm, false, $_search_path)
-if test "$ac_cv_path_RM" = false; then
- AC_MSG_ERROR([No 'rm' command found])
-fi
-
-AC_PATH_PROG(MKDIR, mkdir, false, $_search_path)
-if test "$ac_cv_path_MKDIR" = false; then
- AC_MSG_ERROR([No 'mkdir' command found])
-fi
-
-_search_path=
-
#
# Get programs needed for building the documentation
#
## Delete previous failed configure results
if test -f doc/CONF_INFO; then
- rm doc/CONF_INFO
+ $RM doc/CONF_INFO
fi
AC_CHECK_PROGS(XSLTPROC, xsltproc)
@@ -1004,6 +1006,23 @@ case $ERTS_BUILD_SMP_EMU in
;;
esac
+AC_MSG_CHECKING(whether dirty schedulers should be enabled)
+case $ERTS_BUILD_SMP_EMU-$enable_dirty_schedulers in
+ yes-yes)
+ DIRTY_SCHEDULER_SUPPORT=yes;;
+ yes-default)
+ ## Maybe yes for OTP 19...
+ DIRTY_SCHEDULER_SUPPORT=no;;
+ no-default)
+ DIRTY_SCHEDULER_SUPPORT=no;;
+ no-yes)
+ AC_MSG_ERROR([No smp emulator will be built, but dirty schedulers requested]);;
+ *)
+ DIRTY_SCHEDULER_SUPPORT=no;;
+esac
+AC_MSG_RESULT($DIRTY_SCHEDULER_SUPPORT)
+AC_SUBST(DIRTY_SCHEDULER_SUPPORT)
+
if test $ERTS_BUILD_SMP_EMU = yes; then
if test $found_threads = no; then
@@ -1206,14 +1225,6 @@ esac
if test $emu_threads != yes; then
enable_lock_check=no
enable_lock_count=no
- AC_MSG_CHECKING(whether dirty schedulers should be enabled)
- if test "x$enable_dirty_schedulers" != "xno"; then
- AC_DEFINE(ERL_NIF_DIRTY_SCHEDULER_SUPPORT, 1, [Dirty scheduler support])
- AC_DEFINE(ERL_DRV_DIRTY_SCHEDULER_SUPPORT, 1, [Dirty scheduler support])
- AC_MSG_RESULT(yes)
- else
- AC_MSG_RESULT(no)
- fi
else
# Threads enabled for emulator
EMU_THR_LIB_NAME=$ETHR_LIB_NAME
@@ -1233,17 +1244,6 @@ else
EMU_THR_DEFS="$EMU_THR_DEFS -DERTS_ENABLE_LOCK_COUNT"
fi
- AC_MSG_CHECKING(whether dirty schedulers should be enabled)
- if test "x$enable_dirty_schedulers" != "xno"; then
- EMU_THR_DEFS="$EMU_THR_DEFS -DERTS_DIRTY_SCHEDULERS"
- AC_DEFINE(ERTS_DIRTY_SCHEDULERS, 1, [Define if the emulator supports dirty schedulers])
- AC_DEFINE(ERL_NIF_DIRTY_SCHEDULER_SUPPORT, 1, [Dirty scheduler support])
- AC_DEFINE(ERL_DRV_DIRTY_SCHEDULER_SUPPORT, 1, [Dirty scheduler support])
- AC_MSG_RESULT(yes)
- else
- AC_MSG_RESULT(no)
- fi
-
case $host_os in
linux*)
AC_MSG_CHECKING([whether dlopen() needs to be called before first call to dlerror()])
@@ -3823,7 +3823,7 @@ if test "$enable_dtrace_test" = "yes" ; then
[$RM -f $DTRACE_2STEP_TEST
dtrace -G $DTRACE_CPP $DTRACE_BITS_FLAG -Iemulator/beam -o $DTRACE_2STEP_TEST -s emulator/beam/erlang_dtrace.d conftest.$OBJEXT 2>&AS_MESSAGE_LOG_FD
if test -f $DTRACE_2STEP_TEST; then
- rm $DTRACE_2STEP_TEST
+ $RM $DTRACE_2STEP_TEST
DTRACE_ENABLED_2STEP=yes
fi],
[])
@@ -4030,7 +4030,7 @@ ssl_done=yes # Default only one run
# Remove all SKIP files from previous runs
for a in ssl crypto ssh; do
- /bin/rm -f $ERL_TOP/lib/$a/SKIP
+ $RM -f $ERL_TOP/lib/$a/SKIP
done
SSL_DYNAMIC_ONLY=$enable_dynamic_ssl
@@ -4627,7 +4627,7 @@ need_java="jinterface ic/java_src"
# Remove all SKIP files from previous runs
for a in $need_java ; do
- /bin/rm -f $ERL_TOP/lib/$a/SKIP
+ $RM -f $ERL_TOP/lib/$a/SKIP
done
if test "X$with_javac" = "Xno"; then
@@ -4678,7 +4678,7 @@ dnl this deliberately does not believe that 'gcc' is a C++ compiler
AC_CHECK_TOOLS(CXX, [$CCC c++ g++ CC cxx cc++ cl], false)
# Remove SKIP file from previous run
-/bin/rm -f $ERL_TOP/lib/orber/SKIP
+$RM -f $ERL_TOP/lib/orber/SKIP
if test "$CXX" = false; then
echo "No C++ compiler found" > $ERL_TOP/lib/orber/SKIP
diff --git a/erts/doc/src/absform.xml b/erts/doc/src/absform.xml
index 6d6ba224a0..bfabb7f042 100644
--- a/erts/doc/src/absform.xml
+++ b/erts/doc/src/absform.xml
@@ -68,22 +68,12 @@
<item>If D is a module declaration consisting of the forms
<c>F_1</c>, ..., <c>F_k</c>, then
Rep(D) = <c>[Rep(F_1), ..., Rep(F_k)]</c>.</item>
- <item>If F is an attribute <c>-behavior(Behavior)</c>, then
- Rep(F) = <c>{attribute,LINE,behavior,Behavior}</c>.</item>
- <item>If F is an attribute <c>-behaviour(Behaviour)</c>, then
- Rep(F) = <c>{attribute,LINE,behaviour,Behaviour}</c>.</item>
- <item>If F is an attribute <c>-compile(Options)</c>, then
- Rep(F) = <c>{attribute,LINE,compile,Options}</c>.</item>
<item>If F is an attribute <c>-export([Fun_1/A_1, ..., Fun_k/A_k])</c>, then
Rep(F) = <c>{attribute,LINE,export,[{Fun_1,A_1}, ..., {Fun_k,A_k}]}</c>.</item>
- <item>If F is an attribute <c>-export_type([Type_1/A_1, ..., Type_k/A_k])</c>, then
- Rep(F) = <c>{attribute,LINE,export_type,[{Type_1,A_1}, ..., {Type_k,A_k}]}</c>.</item>
<item>If F is an attribute <c>-import(Mod,[Fun_1/A_1, ..., Fun_k/A_k])</c>, then
Rep(F) = <c>{attribute,LINE,import,{Mod,[{Fun_1,A_1}, ..., {Fun_k,A_k}]}}</c>.</item>
<item>If F is an attribute <c>-module(Mod)</c>, then
Rep(F) = <c>{attribute,LINE,module,Mod}</c>.</item>
- <item>If F is an attribute <c>-optional_callbacks([Fun_1/A_1, ..., Fun_k/A_k])</c>, then
- Rep(F) = <c>{attribute,LINE,optional_callbacks,[{Fun_1,A_1}, ..., {Fun_k,A_k}]}</c>.</item>
<item>If F is an attribute <c>-file(File,Line)</c>, then
Rep(F) = <c>{attribute,LINE,file,{File,Line}}</c>.</item>
<item>If F is a function declaration
diff --git a/erts/doc/src/erl.xml b/erts/doc/src/erl.xml
index c499fc8081..1bbde7f1e0 100644
--- a/erts/doc/src/erl.xml
+++ b/erts/doc/src/erl.xml
@@ -627,6 +627,34 @@
<p>Sets the default binary virtual heap size of processes to the size
<c><![CDATA[Size]]></c>.</p>
</item>
+ <marker id="+hmax"/>
+ <tag><c><![CDATA[+hmax Size]]></c></tag>
+ <item>
+ <p>Sets the default maximum heap size of processes to the size
+ <c><![CDATA[Size]]></c>. If <c>+hmax</c> is not given, the default is <c>0</c>
+ which means that no maximum heap size is used.
+ For more information, see the documentation of
+ <seealso marker="erlang#process_flag_max_heap_size">
+ <c>process_flag(max_heap_size, MaxHeapSize)</c></seealso>.</p>
+ </item>
+ <marker id="+hmaxel"/>
+ <tag><c><![CDATA[+hmaxel true|false]]></c></tag>
+ <item>
+ <p>Sets whether to send an error logger message for processes that reach
+ the maximum heap size or not. If <c>+hmaxel</c> is not given, the default is <c>true</c>.
+ For more information, see the documentation of
+ <seealso marker="erlang#process_flag_max_heap_size">
+ <c>process_flag(max_heap_size, MaxHeapSize)</c></seealso>.</p>
+ </item>
+ <marker id="+hmaxk"/>
+ <tag><c><![CDATA[+hmaxk true|false]]></c></tag>
+ <item>
+ <p>Sets whether to kill processes that reach the maximum heap size or not. If
+ <c>+hmaxk</c> is not given, the default is <c>true</c>. For more information,
+ see the documentation of
+ <seealso marker="erlang#process_flag_max_heap_size">
+ <c>process_flag(max_heap_size, MaxHeapSize)</c></seealso>.</p>
+ </item>
<tag><c><![CDATA[+hpds Size]]></c></tag>
<item>
<p>Sets the initial process dictionary size of processes to the size
diff --git a/erts/doc/src/erl_nif.xml b/erts/doc/src/erl_nif.xml
index 7546f7ef81..33a4fee182 100644
--- a/erts/doc/src/erl_nif.xml
+++ b/erts/doc/src/erl_nif.xml
@@ -138,29 +138,6 @@ ok
automatically unloaded when the module code that it belongs to is purged
by the code server.</p>
- <p><marker id="lengthy_work"/>
- As mentioned in the <seealso marker="#WARNING">warning</seealso> text at
- the beginning of this document it is of vital importance that a native function
- return relatively quickly. It is hard to give an exact maximum amount
- of time that a native function is allowed to work, but as a rule of thumb
- a well-behaving native function should return to its caller before a
- millisecond has passed. This can be achieved using different approaches.
- If you have full control over the code to execute in the native
- function, the best approach is to divide the work into multiple chunks of
- work and call the native function multiple times, either directly from Erlang code
- or by having a native function schedule a future NIF call via the
- <seealso marker="#enif_schedule_nif"> enif_schedule_nif</seealso> function. Function
- <seealso marker="#enif_consume_timeslice">enif_consume_timeslice</seealso> can be
- used to help with such work division. In some cases, however, this might not
- be possible, e.g. when calling third-party libraries. Then you typically want
- to dispatch the work to another thread, return
- from the native function, and wait for the result. The thread can send
- the result back to the calling thread using message passing. Information
- about thread primitives can be found below. If you have built your system
- with <em>the currently experimental</em> support for dirty schedulers,
- you may want to try out this functionality by dispatching the work to a
- <seealso marker="#dirty_nifs">dirty NIF</seealso>,
- which does not have the same duration restriction as a normal NIF.</p>
</description>
<section>
<title>FUNCTIONALITY</title>
@@ -328,38 +305,161 @@ ok
</list></p>
</item>
- <tag>Long-running NIFs</tag>
- <item><p><marker id="dirty_nifs"/>Native functions
- <seealso marker="#lengthy_work">
- must normally run quickly</seealso>, as explained earlier in this document. They
- generally should execute for no more than a millisecond. But not all native functions
- can execute so quickly; for example, functions that encrypt large blocks of data or
- perform lengthy file system operations can often run for tens of seconds or more.</p>
- <p>If the functionality of a long-running NIF can be split so that its work can be
- achieved through a series of shorter NIF calls, the application can either make that series
- of NIF calls from the Erlang level, or it can call a NIF that first performs a chunk of the
- work, then invokes the <seealso marker="#enif_schedule_nif">enif_schedule_nif</seealso>
- function to schedule another NIF call to perform the next chunk. The final call scheduled
- in this manner can then return the overall result. Breaking up a long-running function in
- this manner enables the VM to regain control between calls to the NIFs, thereby avoiding
- degraded responsiveness, scheduler load balancing problems, and other strange behaviours.</p>
- <p>A NIF that cannot be split and cannot execute in a millisecond or less is called a "dirty NIF"
- because it performs work that the Erlang runtime cannot handle cleanly.
- <em>Note that the dirty NIF functionality described here is experimental</em> and that you have to
- enable support for dirty schedulers when building OTP in order to try the functionality out.
- Applications that make use of such functions must indicate to the runtime that the functions are
- dirty so they can be handled specially. To schedule a dirty NIF for execution, the
- appropriate flags value can be set for the NIF in its <seealso marker="#ErlNifFunc">ErlNifFunc</seealso>
- entry, or the application can call <seealso marker="#enif_schedule_nif">enif_schedule_nif</seealso>,
- passing to it a pointer to the dirty NIF to be executed and indicating with the <c>flags</c>
- argument whether it expects the operation to be CPU-bound or I/O-bound.</p>
- <note><p>Dirty NIF support is available only when the emulator is configured with dirty
- schedulers enabled. This feature is currently disabled by default. To determine whether
- the dirty NIF API is available, native code can check to see if the C preprocessor macro
- <c>ERL_NIF_DIRTY_SCHEDULER_SUPPORT</c> is defined. Also, if the Erlang runtime was built
- without threading support, dirty schedulers are disabled. To check at runtime for the presence
- of dirty scheduler threads, code can use the <seealso marker="#enif_system_info"><c>
- enif_system_info()</c></seealso> API function.</p></note>
+ <tag><marker id="lengthy_work"/>Long-running NIFs</tag>
+
+ <item><p>
+ As mentioned in the <seealso marker="#WARNING">warning</seealso> text at
+ the beginning of this document it is of <em>vital importance</em> that a
+ native function return relatively quickly. It is hard to give an exact
+ maximum amount of time that a native function is allowed to work, but as a
+ rule of thumb a well-behaving native function should return to its caller
+ before a millisecond has passed. This can be achieved using different
+ approaches. If you have full control over the code to execute in the
+ native function, the best approach is to divide the work into multiple
+ chunks of work and call the native function multiple times. In some
+ cases this might however not always be possible, e.g. when calling
+ third-party libraries.</p>
+
+ <p>The
+ <seealso marker="#enif_consume_timeslice">enif_consume_timeslice()</seealso>
+ function can be used to inform the runtime system about the lenght of the
+ NIF call. It should typically always be used unless the NIF executes very
+ quickly.</p>
+
+ <p>If the NIF call is too lenghty one needs to handle this in one of the
+ following ways in order to avoid degraded responsiveness, scheduler load
+ balancing problems, and other strange behaviours:</p>
+
+ <taglist>
+ <tag>Yielding NIF</tag>
+ <item>
+ <p>
+ If the functionality of a long-running NIF can be split so that
+ its work can be achieved through a series of shorter NIF calls,
+ the application can either make that series of NIF calls from the
+ Erlang level, or it can call a NIF that first performs a chunk of
+ the work, then invokes the
+ <seealso marker="#enif_schedule_nif">enif_schedule_nif</seealso>
+ function to schedule another NIF call to perform the next chunk.
+ The final call scheduled in this manner can then return the
+ overall result. Breaking up a long-running function in
+ this manner enables the VM to regain control between calls to the
+ NIFs.
+ </p>
+ <p>
+ This approach is always preferred over the other alternatives
+ described below. This both from a performance perspective and
+ a system characteristics perspective.
+ </p>
+ </item>
+
+ <tag>Threaded NIF</tag>
+ <item>
+ <p>
+ This is accomplished by dispatching the work to another thread
+ managed by the NIF library, return from the NIF, and wait for the
+ result. The thread can send the result back to the Erlang
+ process using <seealso marker="#enif_send">enif_send</seealso>.
+ Information about thread primitives can be found below.
+ </p>
+ </item>
+
+ <tag><marker id="dirty_nifs"/>Dirty NIF</tag>
+ <item>
+
+ <note>
+ <p>
+ <em>The dirty NIF functionality described here
+ is experimental</em>. Dirty NIF support is available only when
+ the emulator is configured with dirty schedulers enabled. This
+ feature is currently disabled by default. The Erlang runtime
+ without SMP support do not support dirty schedulers even when
+ the dirty scheduler support has been enabled. To check at
+ runtime for the presence of dirty scheduler threads, code can
+ use the
+ <seealso marker="#enif_system_info"><c>enif_system_info()</c></seealso>
+ API function.
+ </p>
+ </note>
+
+ <p>
+ A NIF that cannot be split and cannot execute in a millisecond or
+ less is called a "dirty NIF" because it performs work that the
+ Erlang runtime cannot handle cleanly. Applications that make use
+ of such functions must indicate to the runtime that the functions
+ are dirty so they can be handled specially. To schedule a dirty
+ NIF for execution, the appropriate flags value can be set for the
+ NIF in its <seealso marker="#ErlNifFunc"><c>ErlNifFunc</c></seealso>
+ entry, or the application can call
+ <seealso marker="#enif_schedule_nif"><c>enif_schedule_nif</c></seealso>,
+ passing to it a pointer to the dirty NIF to be executed and
+ indicating with the <c>flags</c> argument whether it expects the
+ operation to be CPU-bound or I/O-bound. A dirty NIF executing
+ on a dirty scheduler does not have the same duration restriction
+ as a normal NIF.
+ </p>
+
+ <p>
+ While a process is executing a dirty NIF some operations that
+ communicate with it may take a very long time to complete.
+ Suspend, or garbage collection of a process executing a dirty
+ NIF cannot be done until the dirty NIF has returned, so other
+ processes waiting for such operations to complete might have to
+ wait for a very long time. Blocking multi scheduling, i.e.,
+ calling
+ <seealso marker="erlang#system_flag_multi_scheduling"><c>erlang:system_flag(multi_scheduling,
+ block)</c></seealso>, might also take a very long time to
+ complete. This since all ongoing dirty operations on all
+ dirty schedulers need to complete before the the block
+ operation can complete.
+ </p>
+
+ <p>
+ A lot of operations communicating with a process executing a
+ dirty NIF can, however, complete while it is executing the
+ dirty NIF. For example, retreiving information about it via
+ <c>process_info()</c>, setting its group leader,
+ register/unregister its name, etc.
+ </p>
+
+ <p>
+ Termination of a process executing a dirty NIF can only be
+ completed up to a certain point while it is executing the
+ dirty NIF. All Erlang resources such as registered names,
+ ETS tables, etc will be released. All links and monitors
+ will be triggered. The actual execution of the NIF will
+ however <em>not</em> be stopped. The NIF can safely contiue
+ execution, allocate heap memory, etc, but it is of course better
+ to stop executing as soon as possible. The NIF can check
+ whether current process is alive or not using
+ <seealso marker="#enif_is_current_process_alive"><c>enif_is_current_process_alive</c></seealso>.
+ Communication using
+ <seealso marker="#enif_send"><c>enif_send</c></seealso>,
+ and <seealso marker="#enif_port_command"><c>enif_port_command</c></seealso>
+ will also be dropped when the sending process is not alive.
+ Deallocation of certain internal resources such as process
+ heap, and process control block will be delayed until the
+ dirty NIF has completed.
+ </p>
+
+ <p>Currently known issues that are planned to be fixed:</p>
+ <list>
+ <item>
+ <p>
+ Since purging of a module currently might need to garbage
+ collect a process in order to determine if it has
+ references to the module, a process executing a dirty
+ NIF might delay purging for a very long time. Delaying
+ a purge operatin implies delaying <em>all</em> code
+ loding operations which might cause severe problems for
+ the system as a whole.
+ </p>
+ </item>
+ </list>
+
+ </item>
+ </taglist>
+
</item>
</taglist>
</section>
@@ -508,6 +608,10 @@ typedef struct {
CPU-bound, its <c>flags</c> field should be set to
<c>ERL_NIF_DIRTY_JOB_CPU_BOUND</c>, or for I/O-bound jobs,
<c>ERL_NIF_DIRTY_JOB_IO_BOUND</c>.</p>
+ <note><p>If one of the
+ <c>ERL_NIF_DIRTY_JOB_*_BOUND</c> flags is set, and the runtime
+ system has no support for dirty schedulers, the runtime system
+ will refuse to load the NIF library.</p></note>
</item>
<tag><marker id="ErlNifBinary"/>ErlNifBinary</tag>
<item>
@@ -963,6 +1067,13 @@ typedef enum {
<fsummary>Determine if a term is a binary</fsummary>
<desc><p>Return true if <c>term</c> is a binary</p></desc>
</func>
+ <func><name><ret>int</ret><nametext>enif_is_current_process_alive(ErlNifEnv* env)</nametext></name>
+ <fsummary>Determine if currently executing process is alive or not.</fsummary>
+ <desc><p>Return true if currently executing process is currently alive; otherwise
+ false.</p>
+ <p>This function can only be used from a NIF-calling thread, and with an
+ environment corresponding to currently executing processes.</p></desc>
+ </func>
<func><name><ret>int</ret><nametext>enif_is_empty_list(ErlNifEnv* env, ERL_NIF_TERM term)</nametext></name>
<fsummary>Determine if a term is an empty list</fsummary>
<desc><p>Return true if <c>term</c> is an empty list.</p></desc>
@@ -993,15 +1104,10 @@ typedef enum {
<func><name><ret>int</ret><nametext>enif_is_on_dirty_scheduler(ErlNifEnv* env)</nametext></name>
<fsummary>Check to see if executing on a dirty scheduler thread</fsummary>
<desc>
- <p>Check to see if the current NIF is executing on a dirty scheduler thread. If the
- emulator is built with threading support, calling <c>enif_is_on_dirty_scheduler</c>
- from within a dirty NIF returns true. It returns false when the calling NIF is a regular
- NIF running on a normal scheduler thread, or when the emulator is built without threading
- support.</p>
- <note><p>This function is available only when the emulator is configured with dirty
- schedulers enabled. This feature is currently disabled by default. To determine whether
- the dirty NIF API is available, native code can check to see if the C preprocessor macro
- <c>ERL_NIF_DIRTY_SCHEDULER_SUPPORT</c> is defined.</p></note>
+ <p>Check to see if the current NIF is executing on a dirty scheduler thread. If
+ executing on a dirty scheduler thread true returned; otherwise false.</p>
+ <p>This function can only be used from a NIF-calling thread, and with an
+ environment corresponding to currently executing processes.</p>
</desc>
</func>
<func><name><ret>int</ret><nametext>enif_is_pid(ErlNifEnv* env, ERL_NIF_TERM term)</nametext></name>
@@ -1015,7 +1121,8 @@ typedef enum {
<func><name><ret>int</ret><nametext>enif_is_port_alive(ErlNifEnv* env, ErlNifPort *port_id)</nametext></name>
<fsummary>Determine if a local port is alive or not.</fsummary>
<desc><p>Return true if <c>port_id</c> is currently alive.</p>
- <p>This function can only be used in a from a NIF-calling thread.</p></desc>
+ <p>This function is only thread-safe when the emulator with SMP support is used.
+ It can only be used in a non-SMP emulator from a NIF-calling thread.</p></desc>
</func>
<func><name><ret>int</ret><nametext>enif_is_process_alive(ErlNifEnv* env, ErlNifPid *pid)</nametext></name>
<fsummary>Determine if a local process is alive or not.</fsummary>
@@ -1483,9 +1590,7 @@ enif_map_iterator_destroy(env, &amp;iter);
<fsummary>Send a port_command to to_port</fsummary>
<desc>
<p>This function works the same as <seealso marker="erlang#port_command-2">erlang:port_command/2</seealso>
- except that it is always completely asynchronous. This call may return false
- if it detects that the port is already dead, otherwise it will return true.
- </p>
+ except that it is always completely asynchronous.</p>
<taglist>
<tag><c>env</c></tag>
<item>The environment of the calling process. May not be NULL.</item>
@@ -1504,7 +1609,10 @@ enif_map_iterator_destroy(env, &amp;iter);
calls to <c>enif_alloc_env</c>, <c>enif_make_copy</c>, <c>enif_port_command</c>
and <c>enif_free_env</c> into one call. This optimization is only usefull
when a majority of the terms are to be copied from <c>env</c> to the <c>msg_env</c>.</p>
- <p>The call may return false if it detects that the command failed for some reason. Otherwise true is returned.</p>
+ <p>This function return true if the command was successfully sent; otherwise,
+ false. The call may return false if it detects that the command failed for some
+ reason. For example, <c>*to_port</c> does not refer to a local port, if currently
+ executing process, i.e. the sender, is not alive, or if <c>msg</c> is invalid.</p>
<p>See also: <seealso marker="#enif_get_local_port"><c>enif_get_local_port</c></seealso>.</p>
</desc>
</func>
@@ -1635,7 +1743,9 @@ enif_map_iterator_destroy(env, &amp;iter);
<tag><c>msg</c></tag>
<item>The message term to send.</item>
</taglist>
- <p>Return true on success, or false if <c>*to_pid</c> does not refer to an alive local process.</p>
+ <p>Return true if the message was successfully sent; otherwise, false. The send
+ operation will fail if <c>*to_pid</c> does not refer to an alive local process,
+ or if currently executing process, i.e. the sender, is not alive.</p>
<p>The message environment <c>msg_env</c> with all its terms (including
<c>msg</c>) will be invalidated by a successful call to <c>enif_send</c>. The environment
should either be freed with <seealso marker="#enif_free_env">enif_free_env</seealso>
@@ -1653,6 +1763,15 @@ enif_map_iterator_destroy(env, &amp;iter);
<desc><p>Get the byte size of a resource object <c>obj</c> obtained by
<seealso marker="#enif_alloc_resource">enif_alloc_resource</seealso>.</p></desc>
</func>
+
+ <func><name><ret>int</ret><nametext>enif_snprintf(char *str, size_t size, const char *format, ...)</nametext></name>
+ <fsummary>Format strings and Erlang terms</fsummary>
+ <desc>
+ <p>Similar to <c>snprintf</c> but this format string also accepts <c>"%T"</c> which formats Erlang terms.
+ </p>
+ </desc>
+ </func>
+
<func>
<name><ret>void</ret><nametext>enif_system_info(ErlNifSysInfo *sys_info_ptr, size_t size)</nametext></name>
<fsummary>Get information about the Erlang runtime system</fsummary>
diff --git a/erts/doc/src/erl_tracer.xml b/erts/doc/src/erl_tracer.xml
index 2075b962d8..d4c8bbad31 100644
--- a/erts/doc/src/erl_tracer.xml
+++ b/erts/doc/src/erl_tracer.xml
@@ -67,7 +67,7 @@
<desc>
<p>The different trace tags that the tracer will be called with.
Each trace tag is described in greater detail in
- <seealso marker="#trace">Module:trace/6</seealso>
+ <seealso marker="#Module:trace/6">Module:trace/6</seealso>
</p>
</desc>
</datatype>
@@ -81,7 +81,7 @@
<datatype>
<name name="trace_opts" />
<desc>
- <p>The options for the tracee.
+ <p>The options for the tracee.</p>
<taglist>
<tag><c>timestamp</c></tag>
<item>If not set to <c>undefined</c>, the tracer has been requested to
@@ -90,10 +90,9 @@
<item>If not set to <c>true</c>, the tracer has been requested to
include the output of a match specification that was run.</item>
<tag><c>scheduler_id</c></tag>
- <item>Set to a number of the scheduler id is to be included by the tracer.
+ <item>Set to a number if the scheduler id is to be included by the tracer.
Otherwise it is set to <c>undefined</c>.</item>
</taglist>
- </p>
</desc>
</datatype>
<datatype>
@@ -114,30 +113,30 @@
<p>The following functions
should be exported from a <c>erl_tracer</c> callback module.</p>
<taglist>
- <tag><seealso marker="#enabled"><c>Module:enabled/3</c></seealso></tag>
+ <tag><seealso marker="#Module:enabled/3"><c>Module:enabled/3</c></seealso></tag>
<item>Mandatory</item>
- <tag><seealso marker="#trace"><c>Module:trace/6</c></seealso></tag>
+ <tag><seealso marker="#Module:trace/6"><c>Module:trace/6</c></seealso></tag>
<item>Mandatory</item>
- <tag><seealso marker="#enabled_procs"><c>Module:enabled_procs/3</c></seealso></tag>
+ <tag><seealso marker="#Module:enabled_procs/3"><c>Module:enabled_procs/3</c></seealso></tag>
<item>Optional</item>
- <tag><seealso marker="#trace_procs"><c>Module:trace_procs/6</c></seealso></tag>
+ <tag><seealso marker="#Module:trace_procs/6"><c>Module:trace_procs/6</c></seealso></tag>
<item>Optional</item>
- <tag><seealso marker="#enabled_ports"><c>Module:enabled_ports/3</c></seealso></tag>
+ <tag><seealso marker="#Module:enabled_ports/3"><c>Module:enabled_ports/3</c></seealso></tag>
<item>Optional</item>
- <tag><seealso marker="#trace_ports"><c>Module:trace_ports/6</c></seealso></tag>
+ <tag><seealso marker="#Module:trace_ports/6"><c>Module:trace_ports/6</c></seealso></tag>
<item>Optional</item>
- <tag><seealso marker="#enabled_running_ports"><c>Module:enabled_running_ports/3</c></seealso></tag>
+ <tag><seealso marker="#Module:enabled_running_ports/3"><c>Module:enabled_running_ports/3</c></seealso></tag>
<item>Optional</item>
- <tag><seealso marker="#trace_running_ports"><c>Module:trace_running_ports/6</c></seealso></tag>
+ <tag><seealso marker="#Module:trace_running_ports/6"><c>Module:trace_running_ports/6</c></seealso></tag>
<item>Optional</item>
- <tag><seealso marker="#enabled_running_procs"><c>Module:enabled_running_procs/3</c></seealso></tag>
+ <tag><seealso marker="#Module:enabled_running_procs/3"><c>Module:enabled_running_procs/3</c></seealso></tag>
<item>Optional</item>
- <tag><seealso marker="#trace_running_procs"><c>Module:trace_running_procs/6</c></seealso></tag>
+ <tag><seealso marker="#Module:trace_running_procs/6"><c>Module:trace_running_procs/6</c></seealso></tag>
<item>Optional</item>
</taglist>
</section>
- <marker id="enabled"></marker>
+
<funcs>
<func>
<name>Module:enabled(TraceTag, TracerState, Tracee) -> Result</name>
@@ -155,19 +154,17 @@
overhead associated with tracing. If <c>trace</c> is returned the
necessary trace data will be created and the trace call-back of the tracer
will be called. If <c>discard</c> is returned, this trace call
- will be discarded and no call to trace will be done. If
- <c>remove</c> is returned, the VM will attempt to remove this tracer
- from the tracee, together with any trace flags set on the tracee.
+ will be discarded and no call to trace will be done.
</p>
<p><c>trace_status</c> is a special type of <c>TraceTag</c> which is used
to check if the tracer should still be active. It is called in multiple
scenarios, but most significantly it is used when tracing is started
- using this tracer.</p>
+ using this tracer. If <c>remove</c> is returned when the <c>trace_status</c>
+ is checked, the tracer will be removed from the tracee.</p>
<p>This function may be called multiple times per tracepoint, so it
is important that it is both fast and side effect free.</p>
</desc>
</func>
- <marker id="trace"></marker>
<func>
<name>Module:trace(TraceTag, TracerState, Tracee, FirstTraceTerm, SecondTraceTerm, Opts) -> Result</name>
<fsummary>Check if a trace event should be generated.</fsummary>
@@ -182,7 +179,7 @@
</type>
<desc>
<p>This callback will be called when a tracepoint is triggered and
- the <seealso marker="#enabled">Module:enabled/3</seealso>
+ the <seealso marker="#Module:enabled/3">Module:enabled/3</seealso>
callback returned <c>trace</c>. In it any side effects needed by
the tracer should be done. The tracepoint payload is located in
the <c>FirstTraceTerm</c> and <c>SecondTraceTerm</c>. The content
@@ -213,7 +210,6 @@
</desc>
</func>
- <marker id="enabled_procs"></marker>
<func>
<name>Module:enabled_procs(TraceTag, TracerState, Tracee) -> Result</name>
<fsummary>Check if a trace event should be generated.</fsummary>
@@ -231,7 +227,6 @@
</desc>
</func>
- <marker id="trace_procs"></marker>
<func>
<name>Module:trace_procs(TraceTag, TracerState, Tracee, FirstTraceTerm, SecondTraceTerm, Opts) -> Result</name>
<fsummary>Check if a trace event should be generated.</fsummary>
@@ -246,13 +241,12 @@
</type>
<desc>
<p>This callback will be called when a tracepoint is triggered and
- the <seealso marker="#enabled_procs">Module:enabled_procs/3</seealso>
+ the <seealso marker="#Module:enabled_procs/3">Module:enabled_procs/3</seealso>
callback returned <c>trace</c>.</p>
<p>If <c>trace_procs/6</c> is not defined <c>trace/6</c> will be called instead.</p>
</desc>
</func>
- <marker id="enabled_ports"></marker>
<func>
<name>Module:enabled_ports(TraceTag, TracerState, Tracee) -> Result</name>
<fsummary>Check if a trace event should be generated.</fsummary>
@@ -270,7 +264,6 @@
</desc>
</func>
- <marker id="trace_ports"></marker>
<func>
<name>Module:trace_ports(TraceTag, TracerState, Tracee, FirstTraceTerm, SecondTraceTerm, Opts) -> Result</name>
<fsummary>Check if a trace event should be generated.</fsummary>
@@ -285,13 +278,12 @@
</type>
<desc>
<p>This callback will be called when a tracepoint is triggered and
- the <seealso marker="#enabled_ports">Module:enabled_ports/3</seealso>
+ the <seealso marker="#Module:enabled_ports/3">Module:enabled_ports/3</seealso>
callback returned <c>trace</c>.</p>
<p>If <c>trace_ports/6</c> is not defined <c>trace/6</c> will be called instead.</p>
</desc>
</func>
- <marker id="enabled_running_procs"></marker>
<func>
<name>Module:enabled_running_procs(TraceTag, TracerState, Tracee) -> Result</name>
<fsummary>Check if a trace event should be generated.</fsummary>
@@ -309,7 +301,6 @@
</desc>
</func>
- <marker id="trace_running_procs"></marker>
<func>
<name>Module:trace_running_procs(TraceTag, TracerState, Tracee, FirstTraceTerm, SecondTraceTerm, Opts) -> Result</name>
<fsummary>Check if a trace event should be generated.</fsummary>
@@ -324,13 +315,12 @@
</type>
<desc>
<p>This callback will be called when a tracepoint is triggered and
- the <seealso marker="#enabled_running_procs">Module:enabled_running_procs/3</seealso>
+ the <seealso marker="#Module:enabled_running_procs/3">Module:enabled_running_procs/3</seealso>
callback returned <c>trace</c>.</p>
<p>If <c>trace_running_procs/6</c> is not defined <c>trace/6</c> will be called instead.</p>
</desc>
</func>
- <marker id="enabled_running_ports"></marker>
<func>
<name>Module:enabled_running_ports(TraceTag, TracerState, Tracee) -> Result</name>
<fsummary>Check if a trace event should be generated.</fsummary>
@@ -348,7 +338,6 @@
</desc>
</func>
- <marker id="trace_running_ports"></marker>
<func>
<name>Module:trace_running_ports(TraceTag, TracerState, Tracee, FirstTraceTerm, SecondTraceTerm, Opts) -> Result</name>
<fsummary>Check if a trace event should be generated.</fsummary>
@@ -363,13 +352,12 @@
</type>
<desc>
<p>This callback will be called when a tracepoint is triggered and
- the <seealso marker="#enabled_running_ports">Module:enabled_running_ports/3</seealso>
+ the <seealso marker="#Module:enabled_running_ports/3">Module:enabled_running_ports/3</seealso>
callback returned <c>trace</c>.</p>
<p>If <c>trace_running_ports/6</c> is not defined <c>trace/6</c> will be called instead.</p>
</desc>
</func>
- <marker id="enabled_call"></marker>
<func>
<name>Module:enabled_call(TraceTag, TracerState, Tracee) -> Result</name>
<fsummary>Check if a trace event should be generated.</fsummary>
@@ -387,7 +375,6 @@
</desc>
</func>
- <marker id="trace_call"></marker>
<func>
<name>Module:trace_call(TraceTag, TracerState, Tracee, FirstTraceTerm, SecondTraceTerm, Opts) -> Result</name>
<fsummary>Check if a trace event should be generated.</fsummary>
@@ -402,13 +389,12 @@
</type>
<desc>
<p>This callback will be called when a tracepoint is triggered and
- the <seealso marker="#enabled_call">Module:enabled_call/3</seealso>
+ the <seealso marker="#Module:enabled_call/3">Module:enabled_call/3</seealso>
callback returned <c>trace</c>.</p>
<p>If <c>trace_call/6</c> is not defined <c>trace/6</c> will be called instead.</p>
</desc>
</func>
- <marker id="enabled_send"></marker>
<func>
<name>Module:enabled_send(TraceTag, TracerState, Tracee) -> Result</name>
<fsummary>Check if a trace event should be generated.</fsummary>
@@ -426,7 +412,6 @@
</desc>
</func>
- <marker id="trace_send"></marker>
<func>
<name>Module:trace_send(TraceTag, TracerState, Tracee, FirstTraceTerm, SecondTraceTerm, Opts) -> Result</name>
<fsummary>Check if a trace event should be generated.</fsummary>
@@ -441,13 +426,12 @@
</type>
<desc>
<p>This callback will be called when a tracepoint is triggered and
- the <seealso marker="#enabled_send">Module:enabled_send/3</seealso>
+ the <seealso marker="#Module:enabled_send/3">Module:enabled_send/3</seealso>
callback returned <c>trace</c>.</p>
<p>If <c>trace_send/6</c> is not defined <c>trace/6</c> will be called instead.</p>
</desc>
</func>
- <marker id="enabled_receive"></marker>
<func>
<name>Module:enabled_receive(TraceTag, TracerState, Tracee) -> Result</name>
<fsummary>Check if a trace event should be generated.</fsummary>
@@ -465,7 +449,6 @@
</desc>
</func>
- <marker id="trace_receive"></marker>
<func>
<name>Module:trace_receive(TraceTag, TracerState, Tracee, FirstTraceTerm, SecondTraceTerm, Opts) -> Result</name>
<fsummary>Check if a trace event should be generated.</fsummary>
@@ -480,13 +463,12 @@
</type>
<desc>
<p>This callback will be called when a tracepoint is triggered and
- the <seealso marker="#enabled_receive">Module:enabled_receive/3</seealso>
+ the <seealso marker="#Module:enabled_receive/3">Module:enabled_receive/3</seealso>
callback returned <c>trace</c>.</p>
<p>If <c>trace_receive/6</c> is not defined <c>trace/6</c> will be called instead.</p>
</desc>
</func>
- <marker id="enabled_garbage_collection"></marker>
<func>
<name>Module:enabled_garbage_collection(TraceTag, TracerState, Tracee) -> Result</name>
<fsummary>Check if a trace event should be generated.</fsummary>
@@ -504,7 +486,6 @@
</desc>
</func>
- <marker id="trace_garbage_collection"></marker>
<func>
<name>Module:trace_garbage_collection(TraceTag, TracerState, Tracee, FirstTraceTerm, SecondTraceTerm, Opts) -> Result</name>
<fsummary>Check if a trace event should be generated.</fsummary>
@@ -519,7 +500,7 @@
</type>
<desc>
<p>This callback will be called when a tracepoint is triggered and
- the <seealso marker="#enabled_garbage_collection">Module:enabled_garbage_collection/3</seealso>
+ the <seealso marker="#Module:enabled_garbage_collection/3">Module:enabled_garbage_collection/3</seealso>
callback returned <c>trace</c>.</p>
<p>If <c>trace_garbage_collection/6</c> is not defined <c>trace/6</c> will be called instead.</p>
</desc>
@@ -617,7 +598,7 @@ static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data,
}
/*
- * argv[0]: Trace Tag
+ * argv[0]: TraceTag
* argv[1]: TracerState
* argv[2]: Tracee
*/
@@ -626,8 +607,11 @@ static ERL_NIF_TERM enabled(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
ErlNifPid to_pid;
if (enif_get_local_pid(env, argv[1], &amp;to_pid))
if (!enif_is_process_alive(env, &amp;to_pid))
- /* tracer is dead so we should remove this tracepoint */
- return enif_make_atom(env, "remove");
+ if (enif_is_identical(enif_make_atom(env, "trace_status"), argv[0]))
+ /* tracer is dead so we should remove this tracepoint */
+ return enif_make_atom(env, "remove");
+ else
+ return enif_make_atom(env, "discard");
/* Only generate trace for when tracer != tracee */
if (enif_is_identical(argv[1], argv[2]))
@@ -645,7 +629,7 @@ static ERL_NIF_TERM enabled(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
}
/*
- * argv[0]: Trace Tag, should only be 'send'
+ * argv[0]: TraceTag, should only be 'send'
* argv[1]: TracerState, process to send {argv[2], argv[4]} to
* argv[2]: Tracee
* argv[3]: Message, ignored
diff --git a/erts/doc/src/erlang.xml b/erts/doc/src/erlang.xml
index 3276bc34b0..e0723127f2 100644
--- a/erts/doc/src/erlang.xml
+++ b/erts/doc/src/erlang.xml
@@ -4322,6 +4322,7 @@ os_prompt% </pre>
</desc>
</func>
+ <marker id="process_flag_min_heap_size"/>
<func>
<name name="process_flag" arity="2" clause_i="3"/>
<fsummary>Sets process flag <c>min_heap_size</c> for the calling process.</fsummary>
@@ -4340,9 +4341,95 @@ os_prompt% </pre>
<p>Returns the old value of the flag.</p>
</desc>
</func>
- <marker id="process_flag_message_queue_data"/>
+ <marker id="process_flag_max_heap_size"/>
<func>
<name name="process_flag" arity="2" clause_i="5"/>
+ <type name="max_heap_size"/>
+ <fsummary>Sets process flag <c>max_heap_size</c> for the calling process.</fsummary>
+ <desc>
+ <p>
+ This flag sets the maximum heap size for the calling process.
+ If <c><anno>MaxHeapSize</anno></c> is an integer, the system default
+ values for <c>kill</c> and <c>error_logger</c> are used.
+ <taglist>
+ <tag><c>size</c></tag>
+ <item>
+ <p>
+ The maximum size in words of the process. If set to zero, the
+ heap size limit is disabled. Badarg will be thrown if the value is
+ smaller than
+ <seealso marker="#process_flag_min_heap_size"><c>min_heap_size</c></seealso>.
+ The size check is only done when a garbage collection is triggered.
+ </p>
+ <p>
+ <c>size</c> is the entire heap of the process when garbage collection
+ is triggered, this includes all generational heaps, the process stack,
+ any <seealso marker="#process_flag_message_queue_data">
+ messages that are considered to be part of the heap</seealso> and any
+ extra memory that the garbage collector needs during collection.
+ </p>
+ <p>
+ <c>size</c> is the same as can be retrieved using
+ <seealso marker="#process_info_total_heap_size">
+ <c>erlang:process_info(Pid, total_heap_size)</c></seealso>,
+ or by adding <c>heap_block_size</c>, <c>old_heap_block_size</c>
+ and <c>mbuf_size</c> from <seealso marker="#process_info_garbage_collection_info">
+ <c>erlang:process_info(Pid, garbage_collection_info)</c></seealso>.
+ </p>
+ </item>
+ <tag><c>kill</c></tag>
+ <item>
+ <p>
+ When set to <c>true</c> the runtime system will send an
+ untrappable exit signal with reason <c>kill</c> to the process
+ if the maximum heap size is reached. The garbage collection
+ that triggered the <c>kill</c> will not be completed, instead the
+ process will exit as soon as is possible. When set to <c>false</c>
+ no exit signal will be sent to the process, instead it will
+ continue executing.
+ </p>
+ <p>
+ If <c>kill</c> is not defined in the map
+ the system default will be used. The default system default
+ is <c>true</c>. It can be changed by either the erl
+ <seealso marker="erl#+hmaxk">+hmaxk</seealso> option,
+ or <seealso marker="#system_flag_max_heap_size"><c>
+ erlang:system_flag(max_heap_size, MaxHeapSize)</c></seealso>.
+ </p>
+ </item>
+ <tag><c>error_logger</c></tag>
+ <item>
+ <p>
+ When set to <c>true</c> the runtime system will send a
+ message to the current <seealso marker="kernel:error_logger"><c>error_logger</c></seealso>
+ containing details about the process when the maximum
+ heap size is reached. One <c>error_logger</c> report will
+ be sent each time the limit is reached.
+ </p>
+ <p>
+ If <c>error_logger</c> is not defined in the map the system
+ default will be used. The default system default is <c>true</c>.
+ It can be changed by either the erl <seealso marker="erl#+hmaxel">+hmaxel</seealso>
+ option, or <seealso marker="#system_flag_max_heap_size"><c>
+ erlang:system_flag(max_heap_size, MaxHeapSize)</c></seealso>.
+ </p>
+ </item>
+ <p>
+ The heap size of a process is quite hard to predict, especially the
+ amount of memory that is used during the garbage collection. When
+ contemplating using this option, it is recommended to first run
+ it in production with <c>kill</c> set to <c>false</c> and inspect
+ the <c>error_logger</c> reports to see what the normal peak sizes
+ of the processes in the system is and then tune the value
+ accordingly.
+ </p>
+ </taglist>
+ </p>
+ </desc>
+ </func>
+ <marker id="process_flag_message_queue_data"/>
+ <func>
+ <name name="process_flag" arity="2" clause_i="6"/>
<fsummary>Set process flag <c>message_queue_data</c> for the calling process</fsummary>
<type name="message_queue_data"/>
<desc>
@@ -4392,7 +4479,7 @@ os_prompt% </pre>
</desc>
</func>
<func>
- <name name="process_flag" arity="2" clause_i="6"/>
+ <name name="process_flag" arity="2" clause_i="7"/>
<fsummary>Sets process flag <c>priority</c> for the calling process.</fsummary>
<type name="priority_level"/>
<desc>
@@ -4466,7 +4553,7 @@ os_prompt% </pre>
</func>
<func>
- <name name="process_flag" arity="2" clause_i="7"/>
+ <name name="process_flag" arity="2" clause_i="8"/>
<fsummary>Sets process flag <c>save_calls</c> for the calling process.</fsummary>
<desc>
<p><c><anno>N</anno></c> must be an integer in the interval 0..10000.
@@ -4497,7 +4584,7 @@ os_prompt% </pre>
</func>
<func>
- <name name="process_flag" arity="2" clause_i="8"/>
+ <name name="process_flag" arity="2" clause_i="9"/>
<fsummary>Sets process flag <c>sensitive</c> for the calling process.</fsummary>
<desc>
<p>Sets or clears flag <c>sensitive</c> for the current process.
@@ -4551,6 +4638,7 @@ os_prompt% </pre>
<type name="process_info_result_item"/>
<type name="priority_level"/>
<type name="stack_item"/>
+ <type name="max_heap_size" />
<type name="message_queue_data" />
<desc>
<p>Returns a list containing <c><anno>InfoTuple</anno></c>s with
@@ -4604,6 +4692,7 @@ os_prompt% </pre>
<type name="process_info_result_item"/>
<type name="stack_item"/>
<type name="priority_level"/>
+ <type name="max_heap_size" />
<type name="message_queue_data" />
<desc>
<p>Returns information about the process identified by
@@ -4696,6 +4785,7 @@ os_prompt% </pre>
The content of <c><anno>GCInfo</anno></c> can be changed without
prior notice.</p>
</item>
+ <marker id="process_info_garbage_collection_info"/>
<tag><c>{garbage_collection_info, <anno>GCInfo</anno>}</c></tag>
<item>
<p><c><anno>GCInfo</anno></c> is a list containing miscellaneous
@@ -4875,6 +4965,7 @@ os_prompt% </pre>
total suspend count on <c><anno>Suspendee</anno></c>,
only the parts contributed by <c><anno>Pid</anno></c>.</p>
</item>
+ <marker id="process_info_total_heap_size"/>
<tag><c>{total_heap_size, <anno>Size</anno>}</c></tag>
<item>
<p><c><anno>Size</anno></c> is the total size, in words, of all heap
@@ -5569,6 +5660,7 @@ true</pre>
<name name="spawn_opt" arity="2"/>
<fsummary>Creates a new process with a fun as entry point.</fsummary>
<type name="priority_level"/>
+ <type name="max_heap_size" />
<type name="message_queue_data" />
<type name="spawn_opt_option" />
<desc>
@@ -5586,6 +5678,7 @@ true</pre>
<name name="spawn_opt" arity="3"/>
<fsummary>Creates a new process with a fun as entry point on a given node.</fsummary>
<type name="priority_level"/>
+ <type name="max_heap_size" />
<type name="message_queue_data" />
<type name="spawn_opt_option" />
<desc>
@@ -5602,6 +5695,7 @@ true</pre>
<name name="spawn_opt" arity="4"/>
<fsummary>Creates a new process with a function as entry point.</fsummary>
<type name="priority_level"/>
+ <type name="max_heap_size" />
<type name="message_queue_data" />
<type name="spawn_opt_option" />
<desc>
@@ -5705,6 +5799,16 @@ true</pre>
fine-tuning an application and to measure the execution
time with various <c><anno>VSize</anno></c> values.</p>
</item>
+ <tag><c>{max_heap_size, <anno>Size</anno>}</c></tag>
+ <item>
+ <p>Sets the <c>max_heap_size</c> process flag. The default
+ <c>max_heap_size</c> is determined by the
+ <seealso marker="erl#+hmax"><c>+hmax</c></seealso> <c>erl</c>
+ command line argument. For more information, see the
+ documentation of
+ <seealso marker="#process_flag_max_heap_size"><c>process_flag(max_heap_size,
+ <anno>Size</anno>)</c></seealso>.</p>
+ </item>
<tag><c>{message_queue_data, <anno>MQD</anno>}</c></tag>
<item>
<p>Sets the state of the <c>message_queue_data</c> process
@@ -5725,6 +5829,7 @@ true</pre>
<name name="spawn_opt" arity="5"/>
<fsummary>Creates a new process with a function as entry point on a given node.</fsummary>
<type name="priority_level"/>
+ <type name="max_heap_size" />
<type name="message_queue_data" />
<type name="spawn_opt_option" />
<desc>
@@ -6506,8 +6611,25 @@ ok
</desc>
</func>
+ <marker id="system_flag_max_heap_size"></marker>
<func>
<name name="system_flag" arity="2" clause_i="8"/>
+ <type name="max_heap_size"/>
+ <fsummary>Sets system flag <c>max_heap_size</c></fsummary>
+ <desc>
+ <p>
+ Sets the default maximum heap size settings for processes.
+ The size is given in words. The new <c>max_heap_size</c>
+ effects only processes spawned efter the change has been made.
+ <c>max_heap_size</c> can be set for individual processes using
+ <seealso marker="#spawn_opt/4">spawn_opt/N</seealso> or
+ <seealso marker="#process_flag_message_queue_data">process_flag/2</seealso>.</p>
+ <p>Returns the old value of the flag.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="system_flag" arity="2" clause_i="9"/>
<fsummary>Sets system flag <c>multi_scheduling</c>.</fsummary>
<desc>
<p><marker id="system_flag_multi_scheduling"></marker>
@@ -6557,7 +6679,7 @@ ok
</func>
<func>
- <name name="system_flag" arity="2" clause_i="9"/>
+ <name name="system_flag" arity="2" clause_i="10"/>
<fsummary>Sets system flag <c>scheduler_bind_type</c>.</fsummary>
<type name="scheduler_bind_type"/>
<desc>
@@ -6675,7 +6797,7 @@ ok
</func>
<func>
- <name name="system_flag" arity="2" clause_i="10"/>
+ <name name="system_flag" arity="2" clause_i="11"/>
<fsummary>Sets system flag <c>scheduler_wall_time</c>.</fsummary>
<desc><p><marker id="system_flag_scheduler_wall_time"></marker>
Turns on or off scheduler wall time measurements.</p>
@@ -6685,7 +6807,7 @@ ok
</func>
<func>
- <name name="system_flag" arity="2" clause_i="11"/>
+ <name name="system_flag" arity="2" clause_i="12"/>
<fsummary>Sets system flag <c>schedulers_online</c>.</fsummary>
<desc>
<p><marker id="system_flag_schedulers_online"></marker>
@@ -6710,7 +6832,7 @@ ok
</func>
<func>
- <name name="system_flag" arity="2" clause_i="12"/>
+ <name name="system_flag" arity="2" clause_i="13"/>
<fsummary>Sets system flag <c>trace_control_word</c>.</fsummary>
<desc>
<p>Sets the value of the node trace control word to
@@ -6724,7 +6846,7 @@ ok
</func>
<func>
- <name name="system_flag" arity="2" clause_i="12"/>
+ <name name="system_flag" arity="2" clause_i="14"/>
<fsummary>Finalize the Time Offset</fsummary>
<desc>
<p><marker id="system_flag_time_offset"></marker>
@@ -6841,11 +6963,7 @@ ok
As from <c>ERTS</c> 5.6.1, the return value is a list
of <c>{instance, InstanceNo, InstanceInfo}</c> tuples,
where <c>InstanceInfo</c> contains information about
- a specific instance of the allocator. As from
- <c>ERTS</c> 5.10.4, the returned list when calling
- <c>erlang:system_info({allocator, mseg_alloc})</c> also
- includes an <c>{erts_mmap, _}</c> tuple as one element
- in the list. If <c><anno>Alloc</anno></c> is not a
+ a specific instance of the allocator. If <c><anno>Alloc</anno></c> is not a
recognized allocator, <c>undefined</c> is returned.
If <c><anno>Alloc</anno></c> is disabled,
<c>false</c> is returned.</p>
@@ -6857,7 +6975,13 @@ ok
briefly documented.</p>
<p>The recognized allocators are listed in
<seealso marker="erts:erts_alloc">erts_alloc(3)</seealso>.
- After reading the <c>erts_alloc(3)</c> documentation,
+ Information about super carriers can be obtained from
+ <c>ERTS</c> 8.0 with <c>{allocator, erts_mmap}</c> or from
+ <c>ERTS</c> 5.10.4, the returned list when calling with
+ <c>{allocator, mseg_alloc}</c> also includes an
+ <c>{erts_mmap, _}</c> tuple as one element in the list.</p>
+
+ <p>After reading the <c>erts_alloc(3)</c> documentation,
the returned information
more or less speaks for itself, but it can be worth
explaining some things. Call counts are presented by two
@@ -6989,6 +7113,81 @@ ok
</func>
<func>
+ <name name="system_info" arity="1" clause_i="27"/>
+ <name name="system_info" arity="1" clause_i="28"/>
+ <name name="system_info" arity="1" clause_i="36"/>
+ <name name="system_info" arity="1" clause_i="37"/>
+ <name name="system_info" arity="1" clause_i="38"/>
+ <name name="system_info" arity="1" clause_i="39"/>
+ <type name="message_queue_data"/>
+ <type name="max_heap_size"/>
+ <fsummary>Information about the default process heap settings.</fsummary>
+ <desc>
+ <taglist>
+ <tag><c>fullsweep_after</c></tag>
+ <item>
+ <p>Returns <c>{fullsweep_after, integer() >= 0}</c>, which is
+ the <c>fullsweep_after</c> garbage collection setting used
+ by default. For more information, see
+ <c>garbage_collection</c> described in the following.</p>
+ </item>
+ <tag><c>garbage_collection</c></tag>
+ <item>
+ <p>Returns a list describing the default garbage collection
+ settings. A process spawned on the local node by a
+ <c>spawn</c> or <c>spawn_link</c> uses these
+ garbage collection settings. The default settings can be
+ changed by using
+ <seealso marker="#system_flag/2">system_flag/2</seealso>.
+ <seealso marker="#spawn_opt/4">spawn_opt/4</seealso>
+ can spawn a process that does not use the default
+ settings.</p>
+ </item>
+ <tag><c>max_heap_size</c></tag>
+ <item>
+ <p>Returns <c>{max_heap_size, <anno>MaxHeapSize</anno>}</c>,
+ where <c><anno>MaxHeapSize</anno></c> is the current
+ system-wide max heap size settings for spawned processes.
+ This setting can be set using the <c>erl</c> command line
+ flags <seealso marker="erl#+hmax"><c>+hmax</c></seealso>,
+ <seealso marker="erl#+hmaxk"><c>+hmaxk</c></seealso> and
+ <seealso marker="erl#+hmaxel"><c>+hmaxel</c></seealso>. It can
+ also be changed at run-time using
+ <seealso marker="#system_flag_max_heap_size">
+ <c>erlang:system_flag(max_heap_size, MaxHeapSize)</c></seealso>.
+ For more details about the <c>max_heap_size</c> process flag
+ see <seealso marker="#process_flag_max_heap_size">
+ <c>process_flag(max_heap_size, MaxHeapSize)</c></seealso>.
+ </p>
+ </item>
+ <tag><c>min_heap_size</c></tag>
+ <item>
+ <p>Returns <c>{min_heap_size, <anno>MinHeapSize</anno>}</c>,
+ where <c><anno>MinHeapSize</anno></c> is the current
+ system-wide minimum heap size for spawned processes.</p>
+ </item>
+ <tag><marker id="system_info_message_queue_data"><c>message_queue_data</c></marker></tag>
+ <item>
+ <p>Returns the default value of the <c>message_queue_data</c>
+ process flag which is either <c>off_heap</c>, <c>on_heap</c>, or <c>mixed</c>.
+ This default is set by the <c>erl</c> command line argument
+ <seealso marker="erl#+hmqd"><c>+hmqd</c></seealso>. For more information on the
+ <c>message_queue_data</c> process flag, see documentation of
+ <seealso marker="#process_flag_message_queue_data"><c>process_flag(message_queue_data,
+ MQD)</c></seealso>.</p>
+ </item>
+ <tag><c>min_bin_vheap_size</c></tag>
+ <item>
+ <p>Returns <c>{min_bin_vheap_size,
+ <anno>MinBinVHeapSize</anno>}</c>, where
+ <c><anno>MinBinVHeapSize</anno></c> is the current system-wide
+ minimum binary virtual heap size for spawned processes.</p>
+ </item>
+ </taglist>
+ </desc>
+ </func>
+
+ <func>
<name name="system_info" arity="1" clause_i="6"/>
<name name="system_info" arity="1" clause_i="7"/>
<name name="system_info" arity="1" clause_i="8"/>
@@ -7008,8 +7207,6 @@ ok
<name name="system_info" arity="1" clause_i="24"/>
<name name="system_info" arity="1" clause_i="25"/>
<name name="system_info" arity="1" clause_i="26"/>
- <name name="system_info" arity="1" clause_i="27"/>
- <name name="system_info" arity="1" clause_i="28"/>
<name name="system_info" arity="1" clause_i="29"/>
<name name="system_info" arity="1" clause_i="30"/>
<name name="system_info" arity="1" clause_i="31"/>
@@ -7017,10 +7214,6 @@ ok
<name name="system_info" arity="1" clause_i="33"/>
<name name="system_info" arity="1" clause_i="34"/>
<name name="system_info" arity="1" clause_i="35"/>
- <name name="system_info" arity="1" clause_i="36"/>
- <name name="system_info" arity="1" clause_i="37"/>
- <name name="system_info" arity="1" clause_i="38"/>
- <name name="system_info" arity="1" clause_i="39"/>
<name name="system_info" arity="1" clause_i="40"/>
<name name="system_info" arity="1" clause_i="41"/>
<name name="system_info" arity="1" clause_i="42"/>
@@ -7050,6 +7243,7 @@ ok
<name name="system_info" arity="1" clause_i="66"/>
<name name="system_info" arity="1" clause_i="67"/>
<name name="system_info" arity="1" clause_i="68"/>
+ <name name="system_info" arity="1" clause_i="69"/>
<fsummary>Information about the system.</fsummary>
<desc>
<p>Returns various information about the current system
@@ -7286,25 +7480,6 @@ ok
<c>ERL_MAX_ETS_TABLES</c> before starting the Erlang
runtime system.</p>
</item>
- <tag><c>fullsweep_after</c></tag>
- <item>
- <p>Returns <c>{fullsweep_after, integer() >= 0}</c>, which is
- the <c>fullsweep_after</c> garbage collection setting used
- by default. For more information, see
- <c>garbage_collection</c> described in the following.</p>
- </item>
- <tag><c>garbage_collection</c></tag>
- <item>
- <p>Returns a list describing the default garbage collection
- settings. A process spawned on the local node by a
- <c>spawn</c> or <c>spawn_link</c> uses these
- garbage collection settings. The default settings can be
- changed by using
- <seealso marker="#system_flag/2">system_flag/2</seealso>.
- <seealso marker="#spawn_opt/4">spawn_opt/4</seealso>
- can spawn a process that does not use the default
- settings.</p>
- </item>
<tag><c>heap_sizes</c></tag>
<item>
<p>Returns a list of integers representing valid heap sizes
@@ -7379,29 +7554,6 @@ ok
<item>
<p>Returns a string containing the Erlang machine name.</p>
</item>
- <tag><c>min_heap_size</c></tag>
- <item>
- <p>Returns <c>{min_heap_size, <anno>MinHeapSize</anno>}</c>,
- where <c><anno>MinHeapSize</anno></c> is the current
- system-wide minimum heap size for spawned processes.</p>
- </item>
- <tag><marker id="system_info_message_queue_data"><c>message_queue_data</c></marker></tag>
- <item>
- <p>Returns the default value of the <c>message_queue_data</c>
- process flag which is either <c>off_heap</c>, <c>on_heap</c>, or <c>mixed</c>.
- This default is set by the <c>erl</c> command line argument
- <seealso marker="erl#+hmqd"><c>+hmqd</c></seealso>. For more information on the
- <c>message_queue_data</c> process flag, see documentation of
- <seealso marker="#process_flag_message_queue_data"><c>process_flag(message_queue_data,
- MQD)</c></seealso>.</p>
- </item>
- <tag><c>min_bin_vheap_size</c></tag>
- <item>
- <p>Returns <c>{min_bin_vheap_size,
- <anno>MinBinVHeapSize</anno>}</c>, where
- <c><anno>MinBinVHeapSize</anno></c> is the current system-wide
- minimum binary virtual heap size for spawned processes.</p>
- </item>
<tag><c>modified_timing_level</c></tag>
<item>
<p>Returns the modified timing-level (an integer) if
@@ -8026,12 +8178,13 @@ ok
<c>GcPid</c> and <c>Info</c>
are the same as for <c>long_gc</c> earlier, except that
the tuple tagged with <c>timeout</c> is not present.</p>
- <p>As of <c>ERTS</c> 5.6, the monitor message is sent
- if the sum of the sizes of all memory blocks allocated
- for all heap generations is equal to or higher than <c>Size</c>.
- Previously the monitor message was sent if the memory block
- allocated for the youngest generation was equal to or higher
- than <c>Size</c>.</p>
+ <p>The monitor message is sent if the sum of the sizes of
+ all memory blocks allocated for all heap generations after
+ a garbage collection is equal to or higher than <c>Size</c>.</p>
+ <p>When a process is killed by <seealso marker="#process_flag_max_heap_size">
+ <c>max_heap_size</c></seealso>, it is killed before the
+ garbage collection is complete and thus no large heap message
+ will be sent.</p>
</item>
<tag><c>busy_port</c></tag>
<item>
@@ -8558,7 +8711,9 @@ timestamp() ->
<tag><c>garbage_collection</c></tag>
<item>
<p>Traces garbage collections of processes.</p>
- <p>Message tags: <c><seealso marker="#trace_3_trace_messages_gc_minor_start">gc_minor_start</seealso></c> and <c><seealso marker="#trace_3_trace_messages_gc_minor_end">gc_minor_end</seealso></c>.</p>
+ <p>Message tags: <c><seealso marker="#trace_3_trace_messages_gc_minor_start">gc_minor_start</seealso></c>,
+ <c><seealso marker="#trace_3_trace_messages_gc_max_heap_size">gc_max_heap_size</seealso></c> and
+ <c><seealso marker="#trace_3_trace_messages_gc_minor_end">gc_minor_end</seealso></c>.</p>
</item>
<tag><c>timestamp</c></tag>
<item>
@@ -8927,6 +9082,19 @@ timestamp() ->
<p>All sizes are in words.</p>
</item>
<tag>
+ <marker id="trace_3_trace_messages_gc_max_heap_size"></marker>
+ <c>{trace, Pid, gc_max_heap_size, Info}</c>
+ </tag>
+ <item>
+ <p>
+ Sent when the <seealso marker="#process_flag_max_heap_size"><c>max_heap_size</c></seealso>
+ is reached during garbage collection. <c>Info</c> contains the
+ same kind of list as in message <c>gc_start</c>,
+ but the sizes reflect the sizes that triggered max_heap_size to
+ be reached.
+ </p>
+ </item>
+ <tag>
<marker id="trace_3_trace_messages_gc_minor_end"></marker>
<c>{trace, Pid, gc_minor_end, Info}</c>
</tag>
@@ -9034,16 +9202,16 @@ timestamp() ->
<type name="trace_info_flag"/>
<type name="trace_match_spec"/>
<desc>
- <p>Returns trace information about a port, process or function.</p>
- <p>To get information about a port or process,
- <c><anno>PidPortOrFunc</anno></c> is to
+ <p>Returns trace information about a port, process, function or event.</p>
+ <p><em>To get information about a port or process</em>,
+ <c><anno>PidPortFuncEvent</anno></c> is to
be a process identifier (pid), port identifier or one of
the atoms <c>new</c>, <c>new_processes</c>, <c>new_ports</c>.
The atom <c>new</c> or <c>new_processes</c> means that the default trace
state for processes to be created is returned. The atom <c>new_ports</c>
means that the default trace state for ports to be created is returned.
</p>
- <p>The following <c>Item</c>s are valid:</p>
+ <p>The following <c>Item</c>s are valid for ports and processes:</p>
<taglist>
<tag><c>flags</c></tag>
<item>
@@ -9067,12 +9235,15 @@ timestamp() ->
value is <c>[]</c>.</p>
</item>
</taglist>
- <p>To get information about a function, <c><anno>PidPortOrFunc</anno></c> is to
+ <p><em>To get information about a function</em>, <c><anno>PidPortFuncEvent</anno></c> is to
be the three-element tuple <c>{Module, Function, Arity}</c> or
the atom <c>on_load</c>. No wild cards are allowed. Returns
<c>undefined</c> if the function does not exist, or
- <c>false</c> if the function is not traced.</p>
- <p>The following <c>Item</c>s are valid::</p>
+ <c>false</c> if the function is not traced. If <c><anno>PidPortFuncEvent</anno></c>
+ is <c>on_load</c>, the information returned refers to
+ the default value for code that will be loaded.</p>
+
+ <p>The following <c>Item</c>s are valid for functions:</p>
<taglist>
<tag><c>traced</c></tag>
<item>
@@ -9131,39 +9302,177 @@ timestamp() ->
is active for this function.</p>
</item>
</taglist>
+ <p><em>To get information about an event</em>, <c><anno>PidPortFuncEvent</anno></c> is to
+ be one of the atoms <c>send</c> or <c>'receive'</c>.</p>
+ <p>The only valid <c>Item</c> for events is:</p>
+ <taglist>
+ <tag><c>match_spec</c></tag>
+ <item>
+ <p>Returns the match specification for this event, if it
+ has one, or <c>true</c> if no match specification has been
+ set.</p>
+ </item>
+ </taglist>
<p>The return value is <c>{<anno>Item</anno>, Value}</c>, where
<c>Value</c> is the requested information as described earlier.
If a pid for a dead process was given, or the name of a
non-existing function, <c>Value</c> is <c>undefined</c>.</p>
- <p>If <c><anno>PidPortOrFunc</anno></c> is <c>on_load</c>, the information
- returned refers to the default value for code that will be
- loaded.</p>
</desc>
</func>
<func>
<name name="trace_pattern" arity="2" clause_i="1"/>
- <fsummary>Sets trace patterns for global call tracing.</fsummary>
+ <fsummary>Sets trace patterns for call, send or 'receive' tracing.</fsummary>
<type name="trace_pattern_mfa"/>
<type name="trace_match_spec"/>
<desc>
<p>The same as
- <seealso marker="#trace_pattern/3">erlang:trace_pattern(MFA, MatchSpec, [])</seealso>,
+ <seealso marker="#trace_pattern/3">erlang:trace_pattern(Event, MatchSpec, [])</seealso>,
retained for backward compatibility.</p>
</desc>
</func>
<func>
- <name name="trace_pattern" arity="3"/>
+ <name name="trace_pattern" arity="3" clause_i="1"/>
+ <fsummary>Sets trace pattern for message sending.</fsummary>
+ <type name="trace_match_spec"/>
+ <desc>
+ <p>Sets trace pattern for <em>message sending</em>.
+ Must be combined with
+ <seealso marker="#trace/3">erlang:trace/3</seealso>
+ to set the <c>send</c> trace flag for one or more processes.
+ By default all messages, sent from <c>send</c> traced processes,
+ are traced. Use <c>erlang:trace_pattern/3</c> to limit
+ traced send events based on the message content, the sender
+ and/or the receiver.</p>
+ <p>Argument <c><anno>MatchSpec</anno></c> can take the
+ following forms:</p>
+ <taglist>
+ <tag><c><anno>MatchSpecList</anno></c></tag>
+ <item>
+ <p>A list of match specifications. The matching is done
+ on the list <c>[Receiver, Msg]</c>. <c>Receiver</c>
+ is the process or port identity of the receiver and
+ <c>Msg</c> is the message term. The pid of the sending
+ process can be accessed with the guard function
+ <c>self/0</c>. An empty list is the same as <c>true</c>.
+ See the users guide section
+ <seealso marker="erts:match_spec">Match Specifications in Erlang</seealso>
+ for more information.</p>
+ </item>
+ <tag><c>true</c></tag>
+ <item>
+ <p>Enables tracing for all sent messages (from <c>send</c>
+ traced processes). Any match specification is
+ removed. <em>This is the default</em>.</p>
+ </item>
+ <tag><c>false</c></tag>
+ <item>
+ <p>Disables tracing for all sent messages.
+ Any match specification is removed.</p>
+ </item>
+ </taglist>
+ <p>Argument <c><anno>FlagList</anno></c> must be <c>[]</c>
+ for send tracing.</p>
+ <p>The return value is always <c>1</c>.</p>
+ <p>Example; only trace messages to a specific process <c>Pid</c>:</p>
+ <pre>
+> <input>erlang:trace_pattern(send, [{[Pid, '_'],[],[]}], []).</input>
+1</pre>
+ <p>Only trace messages matching <c>{reply, _}</c>:</p>
+ <pre>
+> <input>erlang:trace_pattern(send, [{['_', {reply,'_'}],[],[]}], []).</input>
+1</pre>
+ <p>Only trace messages sent to the sender itself:</p>
+ <pre>
+> <input>erlang:trace_pattern(send, [{['$1', '_'],[{'=:=','$1',{self}}],[]}], []).</input>
+1</pre>
+ <p>Only trace messages sent to other nodes:</p>
+ <pre>
+> <input>erlang:trace_pattern(send, [{['$1', '_'],[{'=/=',{node,'$1'},{node}}],[]}], []).</input>
+1</pre>
+ <note><p>A match specification for <c>send</c> trace can use
+ all guard and body functions except <c>caller</c>.</p></note>
+ </desc>
+ </func>
+
+ <func>
+ <name name="trace_pattern" arity="3" clause_i="2"/>
+ <fsummary>Sets trace pattern for tracing of message receiving.</fsummary>
+ <type name="trace_match_spec"/>
+ <desc>
+ <p></p>
+ <p>Sets trace pattern for <em>message receiving</em>.
+ Must be combined with
+ <seealso marker="#trace/3">erlang:trace/3</seealso>
+ to set the <c>'receive'</c> trace flag for one or more processes.
+ By default all messages, received by <c>'receive'</c> traced processes,
+ are traced. Use <c>erlang:trace_pattern/3</c> to limit
+ traced receive events based on the message content, the sender
+ and/or the receiver.</p>
+ <p>Argument <c><anno>MatchSpec</anno></c> can take the
+ following forms:</p>
+ <taglist>
+ <tag><c><anno>MatchSpecList</anno></c></tag>
+ <item>
+ <p>A list of match specifications. The matching is done
+ on the list <c>[Node, Sender, Msg]</c>. <c>Node</c>
+ is the node name of the sender. <c>Sender</c> is the
+ process or port identity of the sender, or the atom
+ <c>undefined</c> if the sender is not known (which may
+ be the case for remote senders). <c>Msg</c> is the
+ message term. The pid of the receiving process can be
+ accessed with the guard function <c>self/0</c>. An empty
+ list is the same as <c>true</c>. See the users guide section
+ <seealso marker="erts:match_spec">Match Specifications in Erlang</seealso>
+ for more information.</p>
+ </item>
+ <tag><c>true</c></tag>
+ <item>
+ <p>Enables tracing for all received messages (to <c>'receive'</c>
+ traced processes). Any match specification is
+ removed. <em>This is the default</em>.</p>
+ </item>
+ <tag><c>false</c></tag>
+ <item>
+ <p>Disables tracing for all received messages.
+ Any match specification is removed.</p>
+ </item>
+ </taglist>
+ <p>Argument <c><anno>FlagList</anno></c> must be <c>[]</c>
+ for receive tracing.</p>
+ <p>The return value is always <c>1</c>.</p>
+ <p>Example; only trace messages from a specific process <c>Pid</c>:</p>
+ <pre>
+> <input>erlang:trace_pattern('receive', [{['_',Pid, '_'],[],[]}], []).</input>
+1</pre>
+ <p>Only trace messages matching <c>{reply, _}</c>:</p>
+ <pre>
+> <input>erlang:trace_pattern('receive', [{['_','_', {reply,'_'}],[],[]}], []).</input>
+1</pre>
+ <p>Only trace messages from other nodes:</p>
+ <pre>
+> <input>erlang:trace_pattern('receive', [{['$1', '_', '_'],[{'=/=','$1',{node}}],[]}], []).</input>
+1</pre>
+ <note><p>A match specification for <c>'receive'</c> trace can
+ use all guard and body functions except <c>caller,
+ is_seq_trace, get_seq_token, set_seq_token, enable_trace,
+ disable_trace, trace, silent</c> and <c>process_dump</c>.</p></note>
+ </desc>
+ </func>
+
+ <func>
+ <name name="trace_pattern" arity="3" clause_i="3"/>
<fsummary>Sets trace patterns for tracing of function calls.</fsummary>
<type name="trace_pattern_mfa"/>
<type name="trace_match_spec"/>
<type name="trace_pattern_flag"/>
<desc>
- <p>Enables or disables call tracing for
- one or more functions. Must be combined with
+ <p>Enables or disables <em>call tracing</em> for one or more functions.
+ Must be combined with
<seealso marker="#trace/3">erlang:trace/3</seealso>
- to set the <c>call</c> trace flag for one or more processes.</p>
+ to set the <c>call</c> trace flag
+ for one or more processes.</p>
<p>Conceptually, call tracing works as follows. Inside
the Erlang Virtual Machine, a set of processes and
a set of functions are to be traced. If a traced process
@@ -9218,7 +9527,8 @@ timestamp() ->
</item>
<tag><c>true</c></tag>
<item>
- <p>Enables tracing for the matching functions.</p>
+ <p>Enables tracing for the matching functions.
+ Any match specification is removed.</p>
</item>
<tag><c><anno>MatchSpecList</anno></c></tag>
<item>
diff --git a/erts/emulator/Makefile.in b/erts/emulator/Makefile.in
index fb486c917f..2212aed5e0 100644
--- a/erts/emulator/Makefile.in
+++ b/erts/emulator/Makefile.in
@@ -50,6 +50,8 @@ LDFLAGS=@LDFLAGS@
ARFLAGS=rc
OMIT_OMIT_FP=no
+DIRTY_SCHEDULER_SUPPORT=@DIRTY_SCHEDULER_SUPPORT@
+
ifeq ($(TYPE),debug)
PURIFY =
TYPEMARKER = .debug
@@ -174,6 +176,10 @@ FLAVOR_MARKER=.smp
FLAVOR_FLAGS=-DERTS_SMP
ENABLE_ALLOC_TYPE_VARS += smp nofrag
M4FLAGS += -DERTS_SMP=1
+ifeq ($(DIRTY_SCHEDULER_SUPPORT),yes)
+THR_DEFS += -DERTS_DIRTY_SCHEDULERS
+endif
+
else
# If flavor isn't one of the above, it *is* plain flavor...
@@ -182,7 +188,6 @@ FLAVOR_MARKER=
FLAVOR_FLAGS=
ENABLE_ALLOC_TYPE_VARS += nofrag
M4FLAGS +=
-
endif
TF_MARKER=$(TYPEMARKER)$(FLAVOR_MARKER)
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index 3022c0a99a..8f65e71531 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -275,6 +275,7 @@ atom garbage_collection_info
atom gc_end
atom gc_major_end
atom gc_major_start
+atom gc_max_heap_size
atom gc_minor_end
atom gc_minor_start
atom gc_start
@@ -366,6 +367,7 @@ atom match_spec
atom match_spec_result
atom max
atom maximum
+atom max_heap_size
atom max_tables max_processes
atom mbuf_size
atom md5
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 87508dcf5f..40d44dda4c 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -38,7 +38,7 @@
#include "erl_thr_progress.h"
static void set_default_trace_pattern(Eterm module);
-static Eterm check_process_code(Process* rp, Module* modp, Uint flags, int *redsp);
+static Eterm check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls);
static void delete_code(Module* modp);
static void decrement_refc(BeamCodeHeader*);
static int any_heap_ref_ptrs(Eterm* start, Eterm* end, char* mod_start, Uint mod_size);
@@ -467,7 +467,7 @@ check_old_code_1(BIF_ALIST_1)
}
Eterm
-erts_check_process_code(Process *c_p, Eterm module, Uint flags, int *redsp)
+erts_check_process_code(Process *c_p, Eterm module, Uint flags, int *redsp, int fcalls)
{
Module* modp;
Eterm res;
@@ -483,7 +483,7 @@ erts_check_process_code(Process *c_p, Eterm module, Uint flags, int *redsp)
return am_false;
erts_rlock_old_code(code_ix);
res = (!modp->old.code_hdr ? am_false :
- check_process_code(c_p, modp, flags, redsp));
+ check_process_code(c_p, modp, flags, redsp, fcalls));
erts_runlock_old_code(code_ix);
return res;
@@ -506,7 +506,7 @@ BIF_RETTYPE erts_internal_check_process_code_2(BIF_ALIST_2)
goto badarg;
}
- res = erts_check_process_code(BIF_P, BIF_ARG_1, flags, &reds);
+ res = erts_check_process_code(BIF_P, BIF_ARG_1, flags, &reds, BIF_P->fcalls);
ASSERT(is_value(res));
@@ -625,8 +625,8 @@ BIF_RETTYPE call_on_load_function_1(BIF_ALIST_1)
{
Module* modp = erts_get_module(BIF_ARG_1, erts_active_code_ix());
- if (modp && modp->curr.code_hdr) {
- BIF_TRAP_CODE_PTR_0(BIF_P, modp->curr.code_hdr->on_load_function_ptr);
+ if (modp && modp->old.code_hdr) {
+ BIF_TRAP_CODE_PTR_0(BIF_P, modp->old.code_hdr->on_load_function_ptr);
}
else {
BIF_ERROR(BIF_P, BADARG);
@@ -651,14 +651,14 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
code_ix = erts_active_code_ix();
modp = erts_get_module(BIF_ARG_1, code_ix);
- if (!modp || !modp->curr.code_hdr) {
+ if (!modp || !modp->old.code_hdr) {
error:
erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
erts_release_code_write_permission();
BIF_ERROR(BIF_P, BADARG);
}
- if (modp->curr.code_hdr->on_load_function_ptr == NULL) {
+ if (modp->old.code_hdr->on_load_function_ptr == NULL) {
goto error;
}
if (BIF_ARG_2 != am_false && BIF_ARG_2 != am_true) {
@@ -667,44 +667,55 @@ BIF_RETTYPE finish_after_on_load_2(BIF_ALIST_2)
if (BIF_ARG_2 == am_true) {
int i;
+ struct erl_module_instance t;
+
+ /*
+ * Swap old and new code.
+ */
+ t = modp->curr;
+ modp->curr = modp->old;
+ modp->old = t;
/*
* The on_load function succeded. Fix up export entries.
*/
for (i = 0; i < export_list_size(code_ix); i++) {
Export *ep = export_list(i,code_ix);
- if (ep != NULL &&
- ep->code[0] == BIF_ARG_1 &&
- ep->code[4] != 0) {
+ if (ep == NULL || ep->code[0] != BIF_ARG_1) {
+ continue;
+ }
+ if (ep->code[4] != 0) {
ep->addressv[code_ix] = (void *) ep->code[4];
ep->code[4] = 0;
+ } else {
+ if (ep->addressv[code_ix] == ep->code+3 &&
+ ep->code[3] == (BeamInstr) em_apply_bif) {
+ continue;
+ }
+ ep->addressv[code_ix] = ep->code+3;
+ ep->code[3] = (BeamInstr) em_call_error_handler;
}
}
modp->curr.code_hdr->on_load_function_ptr = NULL;
set_default_trace_pattern(BIF_ARG_1);
} else if (BIF_ARG_2 == am_false) {
- BeamInstr* code;
- BeamInstr* end;
+ int i;
/*
- * The on_load function failed. Remove the loaded code.
- * This is an combination of delete and purge. We purge
- * the current code; the old code is not touched.
+ * The on_load function failed. Remove references to the
+ * code that is about to be purged from the export entries.
*/
- erts_total_code_size -= modp->curr.code_length;
- code = (BeamInstr*) modp->curr.code_hdr;
- end = (BeamInstr *) ((char *)code + modp->curr.code_length);
- erts_cleanup_funs_on_purge(code, end);
- beam_catches_delmod(modp->curr.catches, code, modp->curr.code_length,
- erts_active_code_ix());
- if (modp->curr.code_hdr->literals_start) {
- erts_free(ERTS_ALC_T_LITERAL, modp->curr.code_hdr->literals_start);
- }
- erts_free(ERTS_ALC_T_CODE, modp->curr.code_hdr);
- modp->curr.code_hdr = NULL;
- modp->curr.code_length = 0;
- modp->curr.catches = BEAM_CATCHES_NIL;
- erts_remove_from_ranges(code);
+
+ for (i = 0; i < export_list_size(code_ix); i++) {
+ Export *ep = export_list(i,code_ix);
+ if (ep == NULL || ep->code[0] != BIF_ARG_1) {
+ continue;
+ }
+ if (ep->code[3] == (BeamInstr) em_apply_bif) {
+ continue;
+ }
+ ep->code[4] = 0;
+ }
}
erts_smp_thr_progress_unblock();
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
@@ -753,7 +764,7 @@ check_mod_funs(Process *p, ErlOffHeap *off_heap, char *area, size_t area_size)
static Eterm
-check_process_code(Process* rp, Module* modp, Uint flags, int *redsp)
+check_process_code(Process* rp, Module* modp, Uint flags, int *redsp, int fcalls)
{
BeamInstr* start;
char* literals;
@@ -955,7 +966,7 @@ check_process_code(Process* rp, Module* modp, Uint flags, int *redsp)
if (need_gc & ERTS_ORDINARY_GC__) {
FLAGS(rp) |= F_NEED_FULLSWEEP;
- *redsp += erts_garbage_collect_nobump(rp, 0, rp->arg_reg, rp->arity);
+ *redsp += erts_garbage_collect_nobump(rp, 0, rp->arg_reg, rp->arity, fcalls);
done_gc |= ERTS_ORDINARY_GC__;
}
if (need_gc & ERTS_LITERAL_GC__) {
diff --git a/erts/emulator/beam/beam_bp.c b/erts/emulator/beam/beam_bp.c
index 2ee98ed7b5..8489897d3a 100644
--- a/erts/emulator/beam/beam_bp.c
+++ b/erts/emulator/beam/beam_bp.c
@@ -82,7 +82,7 @@ erts_smp_atomic32_t erts_staging_bp_index;
static ERTS_INLINE ErtsMonotonicTime
get_mtime(Process *c_p)
{
- return erts_get_monotonic_time(ERTS_PROC_GET_SCHDATA(c_p));
+ return erts_get_monotonic_time(erts_proc_sched_data(c_p));
}
/* *************************************************************************
@@ -248,7 +248,10 @@ erts_bp_match_export(BpFunctions* f, Eterm mfa[3], int specified)
void
erts_bp_free_matched_functions(BpFunctions* f)
{
- Free(f->matching);
+ if (f->matching) {
+ Free(f->matching);
+ }
+ else ASSERT(f->matched == 0);
}
void
@@ -652,8 +655,7 @@ erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
erts_smp_atomic_inc_nob(&bp->count->acount);
}
- if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE
- && ERTS_TRACER_PROC_IS_ENABLED(c_p)) {
+ if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE) {
Eterm w;
erts_trace_time_call(c_p, I, bp->time);
w = (BeamInstr) *c_p->cp;
@@ -750,8 +752,7 @@ erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
}
}
if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE &&
- IS_TRACED_FL(p, F_TRACE_CALLS) &&
- ERTS_TRACER_PROC_IS_ENABLED(p)) {
+ IS_TRACED_FL(p, F_TRACE_CALLS)) {
BeamInstr *pc = (BeamInstr *)ep->code+3;
erts_trace_time_call(p, pc, bp->time);
}
@@ -973,7 +974,8 @@ erts_trace_time_call(Process* c_p, BeamInstr* I, BpDataTime* bdt)
BpDataTime *pbdt = NULL;
ASSERT(c_p);
- ASSERT(erts_smp_atomic32_read_acqb(&c_p->state) & ERTS_PSFLG_RUNNING);
+ ASSERT(erts_smp_atomic32_read_acqb(&c_p->state) & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING));
/* get previous timestamp and breakpoint
* from the process psd */
@@ -1050,7 +1052,8 @@ erts_trace_time_return(Process *p, BeamInstr *pc)
BpDataTime *pbdt = NULL;
ASSERT(p);
- ASSERT(erts_smp_atomic32_read_acqb(&p->state) & ERTS_PSFLG_RUNNING);
+ ASSERT(erts_smp_atomic32_read_acqb(&p->state) & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING));
/* get previous timestamp and breakpoint
* from the process psd */
@@ -1432,7 +1435,7 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
g = (GenericBp *) pc[-4];
if (g == 0) {
int i;
- if (count_op == erts_break_reset || count_op == erts_break_stop) {
+ if (count_op == ERTS_BREAK_RESTART || count_op == ERTS_BREAK_PAUSE) {
/* Do not insert a new breakpoint */
return;
}
@@ -1456,7 +1459,7 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
MatchSetUnref(bp->meta_ms);
bp_meta_unref(bp->meta_tracer);
} else if (common & ERTS_BPF_COUNT) {
- if (count_op == erts_break_stop) {
+ if (count_op == ERTS_BREAK_PAUSE) {
bp->flags &= ~ERTS_BPF_COUNT_ACTIVE;
} else {
bp->flags |= ERTS_BPF_COUNT_ACTIVE;
@@ -1468,7 +1471,7 @@ set_function_break(BeamInstr *pc, Binary *match_spec, Uint break_flags,
BpDataTime* bdt = bp->time;
Uint i = 0;
- if (count_op == erts_break_stop) {
+ if (count_op == ERTS_BREAK_PAUSE) {
bp->flags &= ~ERTS_BPF_TIME_TRACE_ACTIVE;
} else {
bp->flags |= ERTS_BPF_TIME_TRACE_ACTIVE;
diff --git a/erts/emulator/beam/beam_bp.h b/erts/emulator/beam/beam_bp.h
index 08641b86d6..541af77211 100644
--- a/erts/emulator/beam/beam_bp.h
+++ b/erts/emulator/beam/beam_bp.h
@@ -80,16 +80,16 @@ typedef struct generic_bp {
#define ERTS_BP_CALL_TIME_SCHEDULE_EXITING (2)
#ifdef ERTS_SMP
-#define bp_sched2ix_proc(p) ((p)->scheduler_data->no - 1)
+#define bp_sched2ix_proc(p) (erts_proc_sched_data(p)->no - 1)
#else
#define bp_sched2ix_proc(p) (0)
#endif
enum erts_break_op{
- erts_break_nop = 0, /* Must be false */
- erts_break_set = !0, /* Must be true */
- erts_break_reset,
- erts_break_stop
+ ERTS_BREAK_NOP = 0, /* Must be false */
+ ERTS_BREAK_SET = !0, /* Must be true */
+ ERTS_BREAK_RESTART,
+ ERTS_BREAK_PAUSE
};
typedef Uint32 ErtsBpIndex;
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 72526bca5e..f8f2e29c95 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -64,18 +64,21 @@
# ifdef ERTS_SMP
# define PROCESS_MAIN_CHK_LOCKS(P) \
do { \
- if ((P)) { \
+ if ((P)) \
erts_proc_lc_chk_only_proc_main((P)); \
- } \
- else \
- erts_lc_check_exact(NULL, 0); \
- ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); \
+ ERTS_SMP_LC_ASSERT(!erts_thr_progress_is_blocking()); \
+} while (0)
+# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
+do { \
+ if ((P)) \
+ erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN, \
+ __FILE__, __LINE__); \
+} while (0)
+# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
+do { \
+ if ((P)) \
+ erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN); \
} while (0)
-# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P) \
- if ((P)) erts_proc_lc_require_lock((P), ERTS_PROC_LOCK_MAIN,\
- __FILE__, __LINE__)
-# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P) \
- if ((P)) erts_proc_lc_unrequire_lock((P), ERTS_PROC_LOCK_MAIN)
# else
# define ERTS_SMP_REQ_PROC_MAIN_LOCK(P)
# define ERTS_SMP_UNREQ_PROC_MAIN_LOCK(P)
@@ -308,7 +311,8 @@ void** beam_ops;
if (E - HTOP < (needed + (HeapNeed))) { \
SWAPOUT; \
PROCESS_MAIN_CHK_LOCKS(c_p); \
- FCALLS -= erts_garbage_collect_nobump(c_p, needed + (HeapNeed), reg, (M)); \
+ FCALLS -= erts_garbage_collect_nobump(c_p, needed + (HeapNeed), \
+ reg, (M), FCALLS); \
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \
PROCESS_MAIN_CHK_LOCKS(c_p); \
SWAPIN; \
@@ -360,7 +364,7 @@ void** beam_ops;
if ((E - HTOP < need) || (MSO(c_p).overhead + (VNh) >= BIN_VHEAP_SZ(c_p))) {\
SWAPOUT; \
PROCESS_MAIN_CHK_LOCKS(c_p); \
- FCALLS -= erts_garbage_collect_nobump(c_p, need, reg, (Live)); \
+ FCALLS -= erts_garbage_collect_nobump(c_p, need, reg, (Live), FCALLS); \
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \
PROCESS_MAIN_CHK_LOCKS(c_p); \
SWAPIN; \
@@ -381,7 +385,7 @@ void** beam_ops;
if (E - HTOP < need) { \
SWAPOUT; \
PROCESS_MAIN_CHK_LOCKS(c_p); \
- FCALLS -= erts_garbage_collect_nobump(c_p, need, reg, (Live));\
+ FCALLS -= erts_garbage_collect_nobump(c_p, need, reg, (Live), FCALLS); \
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \
PROCESS_MAIN_CHK_LOCKS(c_p); \
SWAPIN; \
@@ -402,7 +406,7 @@ void** beam_ops;
SWAPOUT; \
reg[Live] = Extra; \
PROCESS_MAIN_CHK_LOCKS(c_p); \
- FCALLS -= erts_garbage_collect_nobump(c_p, need, reg, (Live)+1); \
+ FCALLS -= erts_garbage_collect_nobump(c_p, need, reg, (Live)+1, FCALLS); \
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p); \
PROCESS_MAIN_CHK_LOCKS(c_p); \
Extra = reg[Live]; \
@@ -1196,6 +1200,25 @@ init_emulator(void)
#define DTRACE_NIF_RETURN(p, m, f, a) do {} while (0)
#endif /* USE_VM_PROBES */
+#ifdef DEBUG
+#define ERTS_DBG_CHK_REDS(P, FC) \
+ do { \
+ if (ERTS_PROC_GET_SAVED_CALLS_BUF((P))) { \
+ ASSERT(FC <= 0); \
+ ASSERT(erts_proc_sched_data(c_p)->virtual_reds \
+ <= 0 - (FC)); \
+ } \
+ else { \
+ ASSERT(FC <= CONTEXT_REDS); \
+ ASSERT(erts_proc_sched_data(c_p)->virtual_reds \
+ <= CONTEXT_REDS - (FC)); \
+ } \
+} while (0)
+#else
+#define ERTS_DBG_CHK_REDS(P, FC)
+#endif
+
+
/*
* process_main() is called twice:
* The first call performs some initialisation, including exporting
@@ -1290,14 +1313,19 @@ void process_main(void)
goto do_schedule1;
do_schedule:
- reds_used = REDS_IN(c_p) - FCALLS;
+ ASSERT(c_p->debug_reds_in == REDS_IN(c_p));
+ if (!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
+ reds_used = REDS_IN(c_p) - FCALLS;
+ else
+ reds_used = REDS_IN(c_p) - (CONTEXT_REDS + FCALLS);
+ ASSERT(reds_used >= 0);
do_schedule1:
if (start_time != 0) {
Sint64 diff = erts_timestamp_millis() - start_time;
if (diff > 0 && (Uint) diff > erts_system_monitor_long_schedule
-#ifdef ERTS_DIRTY_SCHEDULERS
- && !ERTS_SCHEDULER_IS_DIRTY(c_p->scheduler_data)
+#if defined(ERTS_SMP) && defined(ERTS_DIRTY_SCHEDULERS)
+ && !ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(c_p))
#endif
) {
BeamInstr *inptr = find_function_from_pc(start_time_i);
@@ -1310,6 +1338,7 @@ void process_main(void)
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(c_p);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
c_p = schedule(c_p, reds_used);
+ ASSERT(!(c_p->flags & F_HIPE_MODE));
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
start_time = 0;
#ifdef DEBUG
@@ -1325,8 +1354,8 @@ void process_main(void)
start_time_i = c_p->i;
}
- reg = ERTS_PROC_GET_SCHDATA(c_p)->x_reg_array;
- freg = ERTS_PROC_GET_SCHDATA(c_p)->f_reg_array;
+ reg = erts_proc_sched_data(c_p)->x_reg_array;
+ freg = erts_proc_sched_data(c_p)->f_reg_array;
ERL_BITS_RELOAD_STATEP(c_p);
{
int reds;
@@ -1348,16 +1377,21 @@ void process_main(void)
SET_I(c_p->i);
- reds = c_p->fcalls;
- if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)
- && (ERTS_TRACE_FLAGS(c_p) & F_SENSITIVE) == 0) {
- neg_o_reds = -reds;
- FCALLS = REDS_IN(c_p) = 0;
+ REDS_IN(c_p) = reds = c_p->fcalls;
+#ifdef DEBUG
+ c_p->debug_reds_in = reds;
+#endif
+
+ if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) {
+ neg_o_reds = -CONTEXT_REDS;
+ FCALLS = neg_o_reds + reds;
} else {
neg_o_reds = 0;
- FCALLS = REDS_IN(c_p) = reds;
+ FCALLS = reds;
}
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
+
next = (BeamInstr *) *I;
SWAPIN;
ASSERT(VALID_INSTR(next));
@@ -1673,6 +1707,14 @@ void process_main(void)
BeamInstr *next;
Eterm result;
+ if (!(FCALLS > 0 || FCALLS > neg_o_reds)) {
+ /* If we have run out of reductions, we do a context
+ switch before calling the bif */
+ c_p->arity = 2;
+ c_p->current = NULL;
+ goto context_switch3;
+ }
+
PRE_BIF_SWAPOUT(c_p);
c_p->fcalls = FCALLS - 1;
result = erl_send(c_p, r(0), x(1));
@@ -1770,7 +1812,7 @@ void process_main(void)
if (E - HTOP < 3) {
SWAPOUT;
PROCESS_MAIN_CHK_LOCKS(c_p);
- FCALLS -= erts_garbage_collect_nobump(c_p, 3, reg+2, 1);
+ FCALLS -= erts_garbage_collect_nobump(c_p, 3, reg+2, 1, FCALLS);
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
SWAPIN;
@@ -1865,6 +1907,7 @@ void process_main(void)
c_p->flags |= F_DELAY_GC;
loop_rec__:
+
PROCESS_MAIN_CHK_LOCKS(c_p);
msgp = PEEK_MESSAGE(c_p);
@@ -2007,6 +2050,8 @@ void process_main(void)
ERTS_VBUMP_LEAVE_REDS_INTERNAL(c_p, 5, FCALLS);
}
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
+
ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
PROCESS_MAIN_CHK_LOCKS(c_p);
NextPF(0, next);
@@ -2166,7 +2211,7 @@ void process_main(void)
PreFetch(0, next);
if (IS_TRACED_FL(c_p, F_TRACE_RECEIVE)) {
- trace_receive(c_p, am_timeout);
+ trace_receive(c_p, am_clock_service, am_timeout, NULL);
}
if (ERTS_PROC_GET_SAVED_CALLS_BUF(c_p)) {
save_calls(c_p, &exp_timeout);
@@ -2535,6 +2580,7 @@ do { \
GetArg1(2, tmp_reg[0]);
bf = (BifFunction) Arg(1);
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
c_p->fcalls = FCALLS;
PROCESS_MAIN_CHK_LOCKS(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
@@ -2544,6 +2590,7 @@ do { \
PROCESS_MAIN_CHK_LOCKS(c_p);
ERTS_HOLE_CHECK(c_p);
FCALLS = c_p->fcalls;
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
if (is_value(result)) {
StoreBifResult(3, result);
}
@@ -2564,6 +2611,7 @@ do { \
GetArg1(1, tmp_reg[0]);
bf = (BifFunction) Arg(0);
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
c_p->fcalls = FCALLS;
PROCESS_MAIN_CHK_LOCKS(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
@@ -2573,6 +2621,7 @@ do { \
PROCESS_MAIN_CHK_LOCKS(c_p);
ERTS_HOLE_CHECK(c_p);
FCALLS = c_p->fcalls;
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
if (is_value(result)) {
StoreBifResult(2, result);
}
@@ -2591,6 +2640,7 @@ do { \
GetArg1(2, x(live));
bf = (GcBifFunction) Arg(1);
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
c_p->fcalls = FCALLS;
SWAPOUT;
PROCESS_MAIN_CHK_LOCKS(c_p);
@@ -2602,6 +2652,7 @@ do { \
SWAPIN;
ERTS_HOLE_CHECK(c_p);
FCALLS = c_p->fcalls;
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
if (is_value(result)) {
StoreBifResult(4, result);
}
@@ -2630,6 +2681,7 @@ do { \
*/
live++;
bf = (GcBifFunction) Arg(1);
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
c_p->fcalls = FCALLS;
SWAPOUT;
PROCESS_MAIN_CHK_LOCKS(c_p);
@@ -2641,6 +2693,7 @@ do { \
SWAPIN;
ERTS_HOLE_CHECK(c_p);
FCALLS = c_p->fcalls;
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
if (is_value(result)) {
StoreBifResult(5, result);
}
@@ -2671,6 +2724,7 @@ do { \
*/
live += 2;
bf = (GcBifFunction) Arg(1);
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
c_p->fcalls = FCALLS;
SWAPOUT;
PROCESS_MAIN_CHK_LOCKS(c_p);
@@ -2682,6 +2736,7 @@ do { \
SWAPIN;
ERTS_HOLE_CHECK(c_p);
FCALLS = c_p->fcalls;
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
if (is_value(result)) {
StoreBifResult(5, result);
}
@@ -2708,6 +2763,7 @@ do { \
GetArg2(2, tmp_reg[0], tmp_reg[1]);
bf = (BifFunction) Arg(1);
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
c_p->fcalls = FCALLS;
PROCESS_MAIN_CHK_LOCKS(c_p);
ASSERT(!ERTS_PROC_IS_EXITING(c_p));
@@ -2717,6 +2773,7 @@ do { \
PROCESS_MAIN_CHK_LOCKS(c_p);
ERTS_HOLE_CHECK(c_p);
FCALLS = c_p->fcalls;
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
if (is_value(result)) {
StoreBifResult(4, result);
}
@@ -2764,6 +2821,15 @@ do { \
BeamInstr *next;
ErlHeapFragment *live_hf_end;
+
+ if (!((FCALLS - 1) > 0 || (FCALLS-1) > neg_o_reds)) {
+ /* If we have run out of reductions, we do a context
+ switch before calling the bif */
+ c_p->arity = ((Export *)Arg(0))->code[2];
+ c_p->current = ((Export *)Arg(0))->code;
+ goto context_switch3;
+ }
+
if (ERTS_MSACC_IS_ENABLED_CACHED_X()) {
if (GET_BIF_MODULE(Arg(0)) == am_ets) {
ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_ETS);
@@ -2775,6 +2841,7 @@ do { \
bf = GET_BIF_ADDRESS(Arg(0));
PRE_BIF_SWAPOUT(c_p);
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
c_p->fcalls = FCALLS - 1;
if (FCALLS <= 0) {
save_calls(c_p, (Export *) Arg(0));
@@ -2796,6 +2863,7 @@ do { \
PROCESS_MAIN_CHK_LOCKS(c_p);
HTOP = HEAP_TOP(c_p);
FCALLS = c_p->fcalls;
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
/* We have to update the cache if we are enabled in order
to make sure no book keeping is done after we disabled
msacc. We don't always do this as it is quite expensive. */
@@ -3290,10 +3358,19 @@ do { \
context_switch2: /* Entry for fun calls. */
c_p->current = I-3; /* Pointer to Mod, Func, Arity */
+ context_switch3:
+
{
Eterm* argp;
int i;
+ if (erts_smp_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_EXITING) {
+ c_p->i = beam_exit;
+ c_p->arity = 0;
+ c_p->current = NULL;
+ goto do_schedule;
+ }
+
/*
* Make sure that there is enough room for the argument registers to be saved.
*/
@@ -3324,8 +3401,13 @@ do { \
* (beacuse the code for the Dispatch() macro becomes shorter that way).
*/
- reds_used = REDS_IN(c_p) - FCALLS + 1;
-
+ ASSERT(c_p->debug_reds_in == REDS_IN(c_p));
+ if (!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
+ reds_used = REDS_IN(c_p) - FCALLS;
+ else
+ reds_used = REDS_IN(c_p) - (CONTEXT_REDS + FCALLS);
+ ASSERT(reds_used >= 0);
+
/*
* Save the argument registers and everything else.
*/
@@ -3456,6 +3538,12 @@ do { \
BifFunction vbf;
ErlHeapFragment *live_hf_end;
+ if (!((FCALLS - 1) > 0 || (FCALLS - 1) > neg_o_reds)) {
+ /* If we have run out of reductions, we do a context
+ switch before calling the nif */
+ goto context_switch;
+ }
+
ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_NIF);
DTRACE_NIF_ENTRY(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
@@ -3471,18 +3559,27 @@ do { \
typedef Eterm NifF(struct enif_environment_t*, int argc, Eterm argv[]);
NifF* fp = vbf = (NifF*) I[1];
struct enif_environment_t env;
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!c_p->scheduler_data)
+ live_hf_end = ERTS_INVALID_HFRAG_PTR; /* On dirty scheduler */
+ else
+#endif
+ live_hf_end = c_p->mbuf;
erts_pre_nif(&env, c_p, (struct erl_module_nif*)I[2], NULL);
- live_hf_end = c_p->mbuf;
nif_bif_result = (*fp)(&env, bif_nif_arity, reg);
if (env.exception_thrown)
nif_bif_result = THE_NON_VALUE;
erts_post_nif(&env);
- }
- ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(nif_bif_result));
- PROCESS_MAIN_CHK_LOCKS(c_p);
- ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
- ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
+ PROCESS_MAIN_CHK_LOCKS(c_p);
+ ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
+ ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_EMULATOR);
+ if (env.exiting) {
+ ERTS_SMP_REQ_PROC_MAIN_LOCK(c_p);
+ goto do_schedule;
+ }
+ ASSERT(!ERTS_PROC_IS_EXITING(c_p));
+ }
DTRACE_NIF_RETURN(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
goto apply_bif_or_nif_epilogue;
@@ -3498,6 +3595,13 @@ do { \
* code[3]: &&apply_bif
* code[4]: Function pointer to BIF function
*/
+
+ if (!((FCALLS - 1) > 0 || (FCALLS - 1) > neg_o_reds)) {
+ /* If we have run out of reductions, we do a context
+ switch before calling the bif */
+ goto context_switch;
+ }
+
if (ERTS_MSACC_IS_ENABLED_CACHED_X()) {
if ((Eterm)I[-3] == am_ets) {
ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_ETS);
@@ -3514,6 +3618,7 @@ do { \
DTRACE_BIF_ENTRY(c_p, (Eterm)I[-3], (Eterm)I[-2], (Uint)I[-1]);
SWAPOUT;
+ ERTS_DBG_CHK_REDS(c_p, FCALLS - 1);
c_p->fcalls = FCALLS - 1;
vbf = (BifFunction) Arg(0);
PROCESS_MAIN_CHK_LOCKS(c_p);
@@ -3549,6 +3654,7 @@ do { \
}
SWAPIN; /* There might have been a garbage collection. */
FCALLS = c_p->fcalls;
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
if (is_value(nif_bif_result)) {
r(0) = nif_bif_result;
CHECK_TERM(r(0));
@@ -4611,9 +4717,9 @@ do { \
OpCase(i_generic_breakpoint): {
BeamInstr real_I;
ASSERT(I[-5] == (BeamInstr) BeamOp(op_i_func_info_IaaI));
- SWAPOUT;
+ HEAVY_SWAPOUT;
real_I = erts_generic_breakpoint(c_p, I, reg);
- SWAPIN;
+ HEAVY_SWAPIN;
ASSERT(VALID_INSTR(real_I));
Goto(real_I);
}
@@ -4790,6 +4896,7 @@ do { \
{
#define HIPE_MODE_SWITCH(Cmd) \
SWAPOUT; \
+ ERTS_DBG_CHK_REDS(c_p, FCALLS); \
c_p->fcalls = FCALLS; \
c_p->def_arg_reg[4] = -neg_o_reds; \
c_p = hipe_mode_switch(c_p, Cmd, reg); \
@@ -4828,13 +4935,17 @@ do { \
#undef HIPE_MODE_SWITCH
L_post_hipe_mode_switch:
- reg = ERTS_PROC_GET_SCHDATA(c_p)->x_reg_array;
- freg = ERTS_PROC_GET_SCHDATA(c_p)->f_reg_array;
+#ifdef DEBUG
+ pid = c_p->common.id; /* may have switched process... */
+#endif
+ reg = erts_proc_sched_data(c_p)->x_reg_array;
+ freg = erts_proc_sched_data(c_p)->f_reg_array;
ERL_BITS_RELOAD_STATEP(c_p);
/* XXX: this abuse of def_arg_reg[] is horrid! */
neg_o_reds = -c_p->def_arg_reg[4];
FCALLS = c_p->fcalls;
SWAPIN;
+ ERTS_DBG_CHK_REDS(c_p, FCALLS);
switch( c_p->def_arg_reg[3] ) {
case HIPE_MODE_SWITCH_RES_RETURN:
ASSERT(is_value(reg[0]));
@@ -6839,7 +6950,7 @@ erts_current_reductions(Process *current, Process *p)
if (current != p) {
return 0;
} else if (current->fcalls < 0 && ERTS_PROC_GET_SAVED_CALLS_BUF(current)) {
- return -current->fcalls;
+ return current->fcalls + CONTEXT_REDS;
} else {
return REDS_IN(current) - current->fcalls;
}
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index 2e21a553ed..95489d9a63 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -32,6 +32,7 @@
#include "bif.h"
#include "external.h"
#include "beam_load.h"
+#include "beam_bp.h"
#include "big.h"
#include "erl_bits.h"
#include "beam_catches.h"
@@ -479,9 +480,9 @@ static void free_loader_state(Binary* magic);
static ErlHeapFragment* new_literal_fragment(Uint size);
static void free_literal_fragment(ErlHeapFragment*);
static void loader_state_dtor(Binary* magic);
-static Eterm insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm group_leader, Eterm module,
- BeamCodeHeader* code, Uint size);
+static Eterm stub_insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
+ Eterm group_leader, Eterm module,
+ BeamCodeHeader* code, Uint size);
static int init_iff_file(LoaderState* stp, byte* code, Uint size);
static int scan_iff_file(LoaderState* stp, Uint* chunk_types,
Uint num_types, Uint num_mandatory);
@@ -513,7 +514,7 @@ static GenOp* gen_get_map_element(LoaderState* stp, GenOpArg Fail, GenOpArg Src,
static int freeze_code(LoaderState* stp);
-static void final_touch(LoaderState* stp);
+static void final_touch(LoaderState* stp, struct erl_module_instance* inst_p);
static void short_file(int line, LoaderState* stp, unsigned needed);
static void load_printf(int line, LoaderState* context, char *fmt, ...);
static int transform_engine(LoaderState* st);
@@ -766,8 +767,11 @@ Eterm
erts_finish_loading(Binary* magic, Process* c_p,
ErtsProcLocks c_p_locks, Eterm* modp)
{
- Eterm retval;
+ Eterm retval = NIL;
LoaderState* stp = ERTS_MAGIC_BIN_DATA(magic);
+ Module* mod_tab_p;
+ struct erl_module_instance* inst_p;
+ Uint size;
/*
* No other process may run since we will update the export
@@ -776,27 +780,80 @@ erts_finish_loading(Binary* magic, Process* c_p,
ERTS_SMP_LC_ASSERT(erts_initialized == 0 || erts_has_code_write_permission() ||
erts_smp_thr_progress_is_blocking());
-
/*
* Make current code for the module old and insert the new code
* as current. This will fail if there already exists old code
* for the module.
*/
+ mod_tab_p = erts_put_module(stp->module);
CHKBLK(ERTS_ALC_T_CODE,stp->code);
- retval = insert_new_code(c_p, c_p_locks, stp->group_leader, stp->module,
- stp->hdr, stp->loaded_size);
- if (retval != NIL) {
- goto load_error;
+ if (!stp->on_load) {
+ /*
+ * Normal case -- no -on_load() function.
+ */
+ retval = beam_make_current_old(c_p, c_p_locks, stp->module);
+ ASSERT(retval == NIL);
+ } else {
+ ErtsCodeIndex code_ix = erts_staging_code_ix();
+ Eterm module = stp->module;
+ int i;
+
+ /*
+ * There is an -on_load() function. We will keep the current
+ * code, but we must turn off any tracing.
+ */
+
+ for (i = 0; i < export_list_size(code_ix); i++) {
+ Export *ep = export_list(i, code_ix);
+ if (ep == NULL || ep->code[0] != module) {
+ continue;
+ }
+ if (ep->addressv[code_ix] == ep->code+3) {
+ if (ep->code[3] == (BeamInstr) em_apply_bif) {
+ continue;
+ } else if (ep->code[3] ==
+ (BeamInstr) BeamOp(op_i_generic_breakpoint)) {
+ ERTS_SMP_LC_ASSERT(erts_smp_thr_progress_is_blocking());
+ ASSERT(mod_tab_p->curr.num_traced_exports > 0);
+ erts_clear_export_break(mod_tab_p, ep->code+3);
+ ep->addressv[code_ix] = (BeamInstr *) ep->code[4];
+ ep->code[4] = 0;
+ }
+ ASSERT(ep->code[4] == 0);
+ }
+ }
+ ASSERT(mod_tab_p->curr.num_breakpoints == 0);
+ ASSERT(mod_tab_p->curr.num_traced_exports == 0);
}
/*
+ * Update module table.
+ */
+
+ size = stp->loaded_size;
+ erts_total_code_size += size;
+ if (stp->on_load) {
+ inst_p = &mod_tab_p->old;
+ } else {
+ inst_p = &mod_tab_p->curr;
+ }
+ inst_p->code_hdr = stp->hdr;
+ inst_p->code_length = size;
+
+ /*
+ * Update ranges (used for finding a function from a PC value).
+ */
+
+ erts_update_ranges((BeamInstr*)inst_p->code_hdr, size);
+
+ /*
* Ready for the final touch: fixing the export table entries for
* exported and imported functions. This can't fail.
*/
CHKBLK(ERTS_ALC_T_CODE,stp->code);
- final_touch(stp);
+ final_touch(stp, inst_p);
/*
* Loading succeded.
@@ -820,7 +877,6 @@ erts_finish_loading(Binary* magic, Process* c_p,
retval = am_on_load;
}
- load_error:
free_loader_state(magic);
return retval;
}
@@ -1026,9 +1082,9 @@ loader_state_dtor(Binary* magic)
}
static Eterm
-insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
- Eterm group_leader, Eterm module, BeamCodeHeader* code_hdr,
- Uint size)
+stub_insert_new_code(Process *c_p, ErtsProcLocks c_p_locks,
+ Eterm group_leader, Eterm module,
+ BeamCodeHeader* code_hdr, Uint size)
{
Module* modp;
Eterm retval;
@@ -4700,14 +4756,13 @@ freeze_code(LoaderState* stp)
}
static void
-final_touch(LoaderState* stp)
+final_touch(LoaderState* stp, struct erl_module_instance* inst_p)
{
int i;
int on_load = stp->on_load;
unsigned catches;
Uint index;
BeamInstr* codev = stp->codev;
- Module* modp;
/*
* Allocate catch indices and fix up all catch_yf instructions.
@@ -4722,8 +4777,7 @@ final_touch(LoaderState* stp)
codev[index+2] = make_catch(catches);
index = next;
}
- modp = erts_put_module(stp->module);
- modp->curr.catches = catches;
+ inst_p->catches = catches;
/*
* Export functions.
@@ -4743,10 +4797,10 @@ final_touch(LoaderState* stp)
ep->addressv[erts_staging_code_ix()] = address;
} else {
/*
- * Don't make any of the exported functions
- * callable yet.
+ * on_load: Don't make any of the exported functions
+ * callable yet. Keep any function in the current
+ * code callable.
*/
- ep->addressv[erts_staging_code_ix()] = ep->code+3;
ep->code[4] = (BeamInstr) address;
}
}
@@ -6421,7 +6475,8 @@ erts_make_stub_module(Process* p, Eterm Mod, Eterm Beam, Eterm Info)
* Insert the module in the module table.
*/
- rval = insert_new_code(p, 0, p->group_leader, Mod, code_hdr, code_size);
+ rval = stub_insert_new_code(p, 0, p->group_leader, Mod,
+ code_hdr, code_size);
if (rval != NIL) {
goto error;
}
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index c38d031ab8..2a3bd4afe5 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -44,6 +44,7 @@
#include "erl_ptab.h"
#include "erl_bits.h"
#include "erl_bif_unique.h"
+#include "erl_map.h"
#include "erl_msacc.h"
Export *erts_await_result;
@@ -611,7 +612,7 @@ erts_queue_monitor_message(Process *p,
ref_copy = copy_struct(ref, ref_size, &hp, ohp);
tup = TUPLE5(hp, am_DOWN, ref_copy, type, item_copy, reason_copy);
- erts_queue_message(p, p_locksp, msgp, tup);
+ erts_queue_message(p, *p_locksp, msgp, tup, am_system);
}
static BIF_RETTYPE
@@ -880,6 +881,8 @@ BIF_RETTYPE spawn_opt_1(BIF_ALIST_1)
so.flags = erts_default_spo_flags|SPO_USE_ARGS;
so.min_heap_size = H_MIN_SIZE;
so.min_vheap_size = BIN_VH_MIN_SIZE;
+ so.max_heap_size = H_MAX_SIZE;
+ so.max_heap_flags = H_MAX_FLAGS;
so.priority = PRIORITY_NORMAL;
so.max_gen_gcs = (Uint16) erts_smp_atomic32_read_nob(&erts_max_gen_gcs);
so.scheduler = 0;
@@ -937,6 +940,9 @@ BIF_RETTYPE spawn_opt_1(BIF_ALIST_1)
} else {
so.min_heap_size = erts_next_heap_size(min_heap_size, 0);
}
+ } else if (arg == am_max_heap_size) {
+ if (!erts_max_heap_size(val, &so.max_heap_size, &so.max_heap_flags))
+ goto error;
} else if (arg == am_min_bin_vheap_size && is_small(val)) {
Sint min_vheap_size = signed_val(val);
if (min_vheap_size < 0) {
@@ -970,6 +976,10 @@ BIF_RETTYPE spawn_opt_1(BIF_ALIST_1)
goto error;
}
+ if (so.max_heap_size != 0 && so.max_heap_size < so.min_heap_size) {
+ goto error;
+ }
+
/*
* Spawn the process.
*/
@@ -1569,7 +1579,32 @@ static BIF_RETTYPE process_flag_aux(Process *BIF_P,
scb->n = 0;
}
- scb = ERTS_PROC_SET_SAVED_CALLS_BUF(rp, scb);
+#ifdef HIPE
+ if (rp->flags & F_HIPE_MODE) {
+ ASSERT(!ERTS_PROC_GET_SAVED_CALLS_BUF(rp));
+ scb = ERTS_PROC_SET_SUSPENDED_SAVED_CALLS_BUF(rp, scb);
+ }
+ else
+#endif
+ {
+#ifdef HIPE
+ ASSERT(!ERTS_PROC_GET_SUSPENDED_SAVED_CALLS_BUF(rp));
+#endif
+ scb = ERTS_PROC_SET_SAVED_CALLS_BUF(rp, scb);
+ if (rp == BIF_P && ((scb && i == 0) || (!scb && i != 0))) {
+ /* Adjust fcalls to match save calls setting... */
+ if (i == 0)
+ BIF_P->fcalls += CONTEXT_REDS; /* disabled it */
+ else
+ BIF_P->fcalls -= CONTEXT_REDS; /* enabled it */
+
+ /*
+ * Make sure we reschedule immediately so the
+ * change take effect at once.
+ */
+ ERTS_VBUMP_ALL_REDS(BIF_P);
+ }
+ }
if (!scb)
old_value = make_small(0);
@@ -1578,12 +1613,7 @@ static BIF_RETTYPE process_flag_aux(Process *BIF_P,
erts_free(ERTS_ALC_T_CALLS_BUF, (void *) scb);
}
- /* Make sure the process in question is rescheduled
- immediately, if it's us, so the call saving takes effect. */
- if (rp == BIF_P)
- BIF_RET2(old_value, CONTEXT_REDS);
- else
- BIF_RET(old_value);
+ BIF_RET(old_value);
}
error:
@@ -1666,7 +1696,7 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2)
ERTS_PSFLG_BOUND);
}
- curr = ERTS_GET_SCHEDULER_DATA_FROM_PROC(BIF_P)->run_queue;
+ curr = erts_proc_sched_data(BIF_P)->run_queue;
old = (ERTS_PSFLG_BOUND & state) ? curr : NULL;
ASSERT(!old || old == curr);
@@ -1711,6 +1741,23 @@ BIF_RETTYPE process_flag_2(BIF_ALIST_2)
}
BIF_RET(old_value);
}
+ else if (BIF_ARG_1 == am_max_heap_size) {
+ Eterm *hp;
+ Uint sz = 0, max_heap_size, max_heap_flags;
+
+ if (!erts_max_heap_size(BIF_ARG_2, &max_heap_size, &max_heap_flags))
+ goto error;
+
+ if ((max_heap_size < MIN_HEAP_SIZE(BIF_P) && max_heap_size != 0))
+ goto error;
+
+ erts_max_heap_size_map(MAX_HEAP_SIZE_GET(BIF_P), MAX_HEAP_SIZE_FLAGS_GET(BIF_P), NULL, &sz);
+ hp = HAlloc(BIF_P, sz);
+ old_value = erts_max_heap_size_map(MAX_HEAP_SIZE_GET(BIF_P), MAX_HEAP_SIZE_FLAGS_GET(BIF_P), &hp, NULL);
+ MAX_HEAP_SIZE_SET(BIF_P, max_heap_size);
+ MAX_HEAP_SIZE_FLAGS_SET(BIF_P, max_heap_flags);
+ BIF_RET(old_value);
+ }
else if (BIF_ARG_1 == am_message_queue_data) {
old_value = erts_change_message_queue_management(BIF_P, BIF_ARG_2);
if (is_non_value(old_value))
@@ -1777,10 +1824,18 @@ BIF_RETTYPE process_flag_3(BIF_ALIST_3)
Process *rp;
Eterm res;
- if ((rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN,
- BIF_ARG_1, ERTS_PROC_LOCK_MAIN)) == NULL) {
+#ifdef ERTS_SMP
+ rp = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN,
+ BIF_ARG_1, ERTS_PROC_LOCK_MAIN);
+ if (rp == ERTS_PROC_LOCK_BUSY)
+ ERTS_BIF_YIELD3(bif_export[BIF_process_flag_3], BIF_P,
+ BIF_ARG_1, BIF_ARG_2, BIF_ARG_3);
+#else
+ rp = erts_proc_lookup(BIF_ARG_1);
+#endif
+
+ if (!rp)
BIF_ERROR(BIF_P, BADARG);
- }
res = process_flag_aux(BIF_P, rp, BIF_ARG_2, BIF_ARG_3);
@@ -4170,8 +4225,28 @@ BIF_RETTYPE group_leader_2(BIF_ALIST_2)
else {
locks &= ~ERTS_PROC_LOCK_STATUS;
erts_smp_proc_unlock(new_member, ERTS_PROC_LOCK_STATUS);
- new_member->group_leader = STORE_NC_IN_PROC(new_member,
- BIF_ARG_1);
+ if (erts_smp_atomic32_read_nob(&new_member->state)
+ & !(ERTS_PSFLG_DIRTY_RUNNING|ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ new_member->group_leader = STORE_NC_IN_PROC(new_member,
+ BIF_ARG_1);
+ }
+ else {
+ ErlHeapFragment *bp;
+ Eterm *hp;
+ /*
+ * Other process executing on a dirty scheduler,
+ * so we are not allowed to write to its heap.
+ * Store in heap fragment.
+ */
+
+ bp = new_message_buffer(NC_HEAP_SIZE(BIF_ARG_1));
+ hp = bp->mem;
+ new_member->group_leader = STORE_NC(&hp,
+ &new_member->off_heap,
+ BIF_ARG_1);
+ bp->next = new_member->mbuf;
+ new_member->mbuf = bp;
+ }
}
}
@@ -4322,6 +4397,31 @@ BIF_RETTYPE system_flag_2(BIF_ALIST_2)
erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
BIF_RET(make_small(oval));
+ } else if (BIF_ARG_1 == am_max_heap_size) {
+
+ Eterm *hp, old_value;
+ Uint sz = 0, max_heap_size, max_heap_flags;
+
+ if (!erts_max_heap_size(BIF_ARG_2, &max_heap_size, &max_heap_flags))
+ goto error;
+
+ if (max_heap_size < H_MIN_SIZE && max_heap_size != 0)
+ goto error;
+
+ erts_max_heap_size_map(H_MAX_SIZE, H_MAX_FLAGS, NULL, &sz);
+ hp = HAlloc(BIF_P, sz);
+ old_value = erts_max_heap_size_map(H_MAX_SIZE, H_MAX_FLAGS, &hp, NULL);
+
+ erts_smp_proc_unlock(BIF_P, ERTS_PROC_LOCK_MAIN);
+ erts_smp_thr_progress_block();
+
+ H_MAX_SIZE = max_heap_size;
+ H_MAX_FLAGS = max_heap_flags;
+
+ erts_smp_thr_progress_unblock();
+ erts_smp_proc_lock(BIF_P, ERTS_PROC_LOCK_MAIN);
+
+ BIF_RET(old_value);
} else if (BIF_ARG_1 == am_display_items) {
int oval = display_items;
if (!is_small(BIF_ARG_2) || (n = signed_val(BIF_ARG_2)) < 0) {
diff --git a/erts/emulator/beam/bif.h b/erts/emulator/beam/bif.h
index 546e3830b9..2203182a0d 100644
--- a/erts/emulator/beam/bif.h
+++ b/erts/emulator/beam/bif.h
@@ -52,18 +52,19 @@ extern Export *erts_convert_time_unit_trap;
(p)->fcalls = 0; \
else \
(p)->fcalls = -CONTEXT_REDS; \
+ ASSERT(ERTS_BIF_REDS_LEFT((p)) == 0); \
} while(0)
#define ERTS_VBUMP_ALL_REDS_INTERNAL(p, fcalls) \
do { \
if (!ERTS_PROC_GET_SAVED_CALLS_BUF((p))) { \
if ((fcalls) > 0) \
- ERTS_PROC_GET_SCHDATA((p))->virtual_reds += (fcalls); \
+ erts_proc_sched_data((p))->virtual_reds += (fcalls); \
(fcalls) = 0; \
} \
else { \
if ((fcalls) > -CONTEXT_REDS) \
- ERTS_PROC_GET_SCHDATA((p))->virtual_reds \
+ erts_proc_sched_data((p))->virtual_reds \
+= ((fcalls) - (-CONTEXT_REDS)); \
(fcalls) = -CONTEXT_REDS; \
} \
@@ -90,22 +91,22 @@ do { \
if (!ERTS_PROC_GET_SAVED_CALLS_BUF((p))) { \
if ((p)->fcalls >= reds) { \
(p)->fcalls -= reds; \
- ERTS_PROC_GET_SCHDATA((p))->virtual_reds += reds; \
+ erts_proc_sched_data((p))->virtual_reds += reds; \
} \
else { \
if ((p)->fcalls > 0) \
- ERTS_PROC_GET_SCHDATA((p))->virtual_reds += (p)->fcalls;\
+ erts_proc_sched_data((p))->virtual_reds += (p)->fcalls; \
(p)->fcalls = 0; \
} \
} \
else { \
if ((p)->fcalls >= reds - CONTEXT_REDS) { \
(p)->fcalls -= reds; \
- ERTS_PROC_GET_SCHDATA((p))->virtual_reds += reds; \
+ erts_proc_sched_data((p))->virtual_reds += reds; \
} \
else { \
if ((p)->fcalls > -CONTEXT_REDS) \
- ERTS_PROC_GET_SCHDATA((p))->virtual_reds \
+ erts_proc_sched_data((p))->virtual_reds \
+= (p)->fcalls - (-CONTEXT_REDS); \
(p)->fcalls = -CONTEXT_REDS; \
} \
@@ -117,14 +118,14 @@ do { \
if (ERTS_PROC_GET_SAVED_CALLS_BUF((P))) { \
int nreds__ = ((int)(Reds)) - CONTEXT_REDS; \
if ((FCalls) > nreds__) { \
- ERTS_PROC_GET_SCHDATA((P))->virtual_reds \
+ erts_proc_sched_data((P))->virtual_reds \
+= (FCalls) - nreds__; \
(FCalls) = nreds__; \
} \
} \
else { \
if ((FCalls) > (Reds)) { \
- ERTS_PROC_GET_SCHDATA((P))->virtual_reds \
+ erts_proc_sched_data((P))->virtual_reds \
+= (FCalls) - (Reds); \
(FCalls) = (Reds); \
} \
@@ -164,7 +165,7 @@ do { \
#define ERTS_BIF_ERROR_TRAPPED1(Proc, Reason, Bif, A0) \
do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->freason = (Reason); \
(Proc)->current = (Bif)->code; \
reg[0] = (Eterm) (A0); \
@@ -173,7 +174,7 @@ do { \
#define ERTS_BIF_ERROR_TRAPPED2(Proc, Reason, Bif, A0, A1) \
do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->freason = (Reason); \
(Proc)->current = (Bif)->code; \
reg[0] = (Eterm) (A0); \
@@ -183,7 +184,7 @@ do { \
#define ERTS_BIF_ERROR_TRAPPED3(Proc, Reason, Bif, A0, A1, A2) \
do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->freason = (Reason); \
(Proc)->current = (Bif)->code; \
reg[0] = (Eterm) (A0); \
@@ -207,7 +208,7 @@ do { \
#define ERTS_BIF_PREP_ERROR_TRAPPED1(Ret, Proc, Reason, Bif, A0) \
do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->freason = (Reason); \
(Proc)->current = (Bif)->code; \
reg[0] = (Eterm) (A0); \
@@ -216,7 +217,7 @@ do { \
#define ERTS_BIF_PREP_ERROR_TRAPPED2(Ret, Proc, Reason, Bif, A0, A1) \
do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->freason = (Reason); \
(Proc)->current = (Bif)->code; \
reg[0] = (Eterm) (A0); \
@@ -226,7 +227,7 @@ do { \
#define ERTS_BIF_PREP_ERROR_TRAPPED3(Ret, Proc, Reason, Bif, A0, A1, A2) \
do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->freason = (Reason); \
(Proc)->current = (Bif)->code; \
reg[0] = (Eterm) (A0); \
@@ -245,7 +246,7 @@ do { \
#define ERTS_BIF_PREP_TRAP1(Ret, Trap, Proc, A0) \
do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->arity = 1; \
reg[0] = (Eterm) (A0); \
(Proc)->i = (BeamInstr*) ((Trap)->addressv[erts_active_code_ix()]); \
@@ -255,7 +256,7 @@ do { \
#define ERTS_BIF_PREP_TRAP2(Ret, Trap, Proc, A0, A1) \
do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->arity = 2; \
reg[0] = (Eterm) (A0); \
reg[1] = (Eterm) (A1); \
@@ -266,7 +267,7 @@ do { \
#define ERTS_BIF_PREP_TRAP3(Ret, Trap, Proc, A0, A1, A2) \
do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->arity = 3; \
reg[0] = (Eterm) (A0); \
reg[1] = (Eterm) (A1); \
@@ -278,7 +279,7 @@ do { \
#define ERTS_BIF_PREP_TRAP3_NO_RET(Trap, Proc, A0, A1, A2)\
do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((Proc))->x_reg_array; \
(Proc)->arity = 3; \
reg[0] = (Eterm) (A0); \
reg[1] = (Eterm) (A1); \
@@ -295,7 +296,7 @@ do { \
} while(0)
#define BIF_TRAP1(Trap_, p, A0) do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((p))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((p))->x_reg_array; \
(p)->arity = 1; \
reg[0] = (A0); \
(p)->i = (BeamInstr*) ((Trap_)->addressv[erts_active_code_ix()]); \
@@ -304,7 +305,7 @@ do { \
} while(0)
#define BIF_TRAP2(Trap_, p, A0, A1) do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((p))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((p))->x_reg_array; \
(p)->arity = 2; \
reg[0] = (A0); \
reg[1] = (A1); \
@@ -314,7 +315,7 @@ do { \
} while(0)
#define BIF_TRAP3(Trap_, p, A0, A1, A2) do { \
- Eterm* reg = ERTS_PROC_GET_SCHDATA((p))->x_reg_array; \
+ Eterm* reg = erts_proc_sched_data((p))->x_reg_array; \
(p)->arity = 3; \
reg[0] = (A0); \
reg[1] = (A1); \
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index d02c6828f9..3c19e82b66 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -118,7 +118,9 @@ process_killer(void)
| ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_IN_RUNQ
| ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_RUNNING_SYS)) {
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
erts_printf("Can only kill WAITING processes this way\n");
}
else {
@@ -214,7 +216,8 @@ print_process_info(int to, void *to_arg, Process *p)
if (state & ERTS_PSFLG_GC) {
garbing = 1;
running = 1;
- } else if (state & ERTS_PSFLG_RUNNING)
+ } else if (state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING))
running = 1;
/*
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index 52fd57e101..09c83f1117 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -397,7 +397,7 @@ static void doit_node_link_net_exits(ErtsLink *lnk, void *vnecp)
msgp = erts_alloc_message_heap(rp, &rp_locks,
3, &hp, &ohp);
tup = TUPLE2(hp, am_nodedown, name);
- erts_queue_message(rp, &rp_locks, msgp, tup);
+ erts_queue_message(rp, rp_locks, msgp, tup, am_system);
}
erts_smp_proc_unlock(rp, rp_locks);
}
@@ -1456,7 +1456,7 @@ int erts_net_message(Port *prt,
token = copy_struct(token, token_size, &hp, ohp);
}
- erts_queue_dist_message(rp, &locks, ede_copy, token);
+ erts_queue_dist_message(rp, locks, ede_copy, token, from);
if (locks)
erts_smp_proc_unlock(rp, locks);
}
@@ -1505,7 +1505,7 @@ int erts_net_message(Port *prt,
token = copy_struct(token, token_size, &hp, ohp);
}
- erts_queue_dist_message(rp, &locks, ede_copy, token);
+ erts_queue_dist_message(rp, locks, ede_copy, token, tuple[2]);
if (locks)
erts_smp_proc_unlock(rp, locks);
}
@@ -1967,7 +1967,7 @@ dist_port_command(Port *prt, ErtsDistOutputBuf *obuf)
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
if (size > (Uint) INT_MAX)
- erts_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_DUMP_EXIT,
"Absurdly large distribution output data buffer "
"(%beu bytes) passed.\n",
size);
@@ -2007,7 +2007,7 @@ dist_port_commandv(Port *prt, ErtsDistOutputBuf *obuf)
ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));
if (size > (Uint) INT_MAX)
- erts_exit(ERTS_ABORT_EXIT,
+ erts_exit(ERTS_DUMP_EXIT,
"Absurdly large distribution output data buffer "
"(%beu bytes) passed.\n",
size);
@@ -3317,7 +3317,7 @@ send_nodes_mon_msg(Process *rp,
}
ASSERT(hend == hp);
- erts_queue_message(rp, rp_locksp, mp, msg);
+ erts_queue_message(rp, *rp_locksp, mp, msg, am_system);
}
static void
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index d04977b9ae..c367d4162c 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -137,6 +137,11 @@ static ErtsAllocatorState_t exec_alloc_state;
#endif
static ErtsAllocatorState_t test_alloc_state;
+#define ERTS_ALC_INFO_A_ALLOC_UTIL (ERTS_ALC_A_MAX + 1)
+#define ERTS_ALC_INFO_A_MSEG_ALLOC (ERTS_ALC_A_MAX + 2)
+#define ERTS_ALC_INFO_A_ERTS_MMAP (ERTS_ALC_A_MAX + 3)
+#define ERTS_ALC_INFO_A_MAX ERTS_ALC_INFO_A_ERTS_MMAP
+
typedef struct {
erts_smp_atomic32_t refc;
int only_sz;
@@ -145,13 +150,9 @@ typedef struct {
Process *proc;
Eterm ref;
Eterm ref_heap[REF_THING_SIZE];
- int allocs[ERTS_ALC_A_MAX-ERTS_ALC_A_MIN+1+2];
+ int allocs[ERTS_ALC_INFO_A_MAX - ERTS_ALC_A_MIN + 1 + 1];
} ErtsAllocInfoReq;
-#define ERTS_ALC_INFO_A_ALLOC_UTIL (ERTS_ALC_A_MAX + 1)
-#define ERTS_ALC_INFO_A_MSEG_ALLOC (ERTS_ALC_A_MAX + 2)
-#define ERTS_ALC_INFO_A_MAX ERTS_ALC_INFO_A_MSEG_ALLOC
-
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(aireq,
ErtsAllocInfoReq,
5,
@@ -2840,10 +2841,18 @@ erts_allocator_info(int to, void *arg)
int i;
for (i = 0; i <= max; i++) {
erts_print(to, arg, "=allocator:mseg_alloc[%d]\n", i);
- erts_mseg_info(i, &to, arg, 0, NULL, NULL);
+ erts_mseg_info(i, &to, arg, 0, 0, NULL, NULL);
}
- erts_print(to, arg, "=allocator:mseg_alloc.erts_mmap\n");
+ erts_print(to, arg, "=allocator:erts_mmap.default_mmap\n");
erts_mmap_info(&erts_dflt_mmapper, &to, arg, NULL, NULL, &emis);
+#if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
+ erts_print(to, arg, "=allocator:erts_mmap.literal_mmap\n");
+ erts_mmap_info(&erts_literal_mmapper, &to, arg, NULL, NULL, &emis);
+#endif
+#ifdef ERTS_ALC_A_EXEC
+ erts_print(to, arg, "=allocator:erts_mmap.exec_mmap\n");
+ erts_mmap_info(&erts_exec_mmapper, &to, arg, NULL, NULL, &emis);
+#endif
}
#endif
@@ -2954,6 +2963,11 @@ erts_allocator_options(void *proc)
atoms[length] = am_atom_put("alloc_util", 10);
terms[length++] = erts_alcu_au_info_options(NULL, NULL, hpp, szp);
+#if HAVE_ERTS_MMAP
+ atoms[length] = ERTS_MAKE_AM("erts_mmap");
+ terms[length++] = erts_mmap_info_options(&erts_dflt_mmapper, NULL, NULL,
+ NULL, hpp, szp);
+#endif
{
Eterm o[3], v[3];
o[0] = am_atom_put("m", 1);
@@ -2990,7 +3004,12 @@ erts_allocator_options(void *proc)
#if ERTS_HAVE_ERTS_SYS_ALIGNED_ALLOC
terms[length++] = am_atom_put("sys_aligned_alloc", 17);
#endif
-
+#if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
+ terms[length++] = ERTS_MAKE_AM("literal_mmap");
+#endif
+#ifdef ERTS_ALC_A_EXEC
+ terms[length++] = ERTS_MAKE_AM("exec_mmap");
+#endif
features = length ? erts_bld_list(hpp, szp, length, terms) : NIL;
#if defined(__GLIBC__)
@@ -3075,7 +3094,15 @@ reply_alloc_info(void *vair)
Uint sz, *szp;
ErlOffHeap *ohp = NULL;
ErtsMessage *mp = NULL;
- struct erts_mmap_info_struct emis;
+#if HAVE_ERTS_MMAP
+ struct erts_mmap_info_struct mmap_info_dflt;
+# if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
+ struct erts_mmap_info_struct mmap_info_literal;
+# endif
+# ifdef ERTS_ALC_A_EXEC
+ struct erts_mmap_info_struct mmap_info_exec;
+# endif
+#endif
int i;
Eterm (*info_func)(Allctr_t *,
int,
@@ -3181,26 +3208,53 @@ reply_alloc_info(void *vair)
make_small(0),
ainfo);
break;
+ case ERTS_ALC_INFO_A_ERTS_MMAP:
+ alloc_atom = erts_bld_atom(hpp, szp, "erts_mmap");
+#if HAVE_ERTS_MMAP
+ ainfo = (air->only_sz ? NIL :
+ erts_mmap_info(&erts_dflt_mmapper, NULL, NULL,
+ hpp, szp, &mmap_info_dflt));
+ ainfo = erts_bld_tuple3(hpp, szp,
+ alloc_atom,
+ erts_bld_atom(hpp,szp,"default_mmap"),
+ ainfo);
+# if defined(ARCH_64) && defined(ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION)
+ ai_list = erts_bld_cons(hpp, szp,
+ ainfo, ai_list);
+ ainfo = (air->only_sz ? NIL :
+ erts_mmap_info(&erts_literal_mmapper, NULL, NULL,
+ hpp, szp, &mmap_info_literal));
+ ainfo = erts_bld_tuple3(hpp, szp,
+ alloc_atom,
+ erts_bld_atom(hpp,szp,"literal_mmap"),
+ ainfo);
+# endif
+# ifdef ERTS_ALC_A_EXEC
+ ai_list = erts_bld_cons(hpp, szp,
+ ainfo, ai_list);
+ ainfo = (air->only_sz ? NIL :
+ erts_mmap_info(&erts_exec_mmapper, NULL, NULL,
+ hpp, szp, &mmap_info_exec));
+ ainfo = erts_bld_tuple3(hpp, szp,
+ alloc_atom,
+ erts_bld_atom(hpp,szp,"exec_mmap"),
+ ainfo);
+# endif
+#else /* !HAVE_ERTS_MMAP */
+ ainfo = erts_bld_tuple2(hpp, szp, alloc_atom,
+ am_false);
+#endif
+ break;
case ERTS_ALC_INFO_A_MSEG_ALLOC:
alloc_atom = erts_bld_atom(hpp, szp, "mseg_alloc");
#if HAVE_ERTS_MSEG
- ainfo = (air->only_sz
- ? NIL
- : erts_mseg_info(0, NULL, NULL, hpp != NULL,
- hpp, szp));
+ ainfo = erts_mseg_info(0, NULL, NULL, hpp != NULL,
+ air->only_sz, hpp, szp);
ainfo = erts_bld_tuple3(hpp, szp,
alloc_atom,
make_small(0),
ainfo);
- ai_list = erts_bld_cons(hpp, szp,
- ainfo, ai_list);
- ainfo = (air->only_sz ? NIL :
- erts_mmap_info(&erts_dflt_mmapper, NULL, NULL, hpp, szp, &emis));
- ainfo = erts_bld_tuple3(hpp, szp,
- alloc_atom,
- erts_bld_atom(hpp,szp,"erts_mmap"),
- ainfo);
#else
ainfo = erts_bld_tuple2(hpp, szp, alloc_atom,
am_false);
@@ -3232,15 +3286,14 @@ reply_alloc_info(void *vair)
}
switch (ai) {
case ERTS_ALC_A_SYSTEM:
- case ERTS_ALC_INFO_A_ALLOC_UTIL:
+ case ERTS_ALC_INFO_A_ALLOC_UTIL:
+ case ERTS_ALC_INFO_A_ERTS_MMAP:
break;
case ERTS_ALC_INFO_A_MSEG_ALLOC:
#if HAVE_ERTS_MSEG && defined(ERTS_SMP)
alloc_atom = erts_bld_atom(hpp, szp, "mseg_alloc");
- ainfo = (air->only_sz
- ? NIL
- : erts_mseg_info(sched_id, NULL, NULL,
- hpp != NULL, hpp, szp));
+ ainfo = erts_mseg_info(sched_id, NULL, NULL,
+ hpp != NULL, air->only_sz, hpp, szp);
ainfo = erts_bld_tuple(hpp, szp, 3,
alloc_atom,
make_small(sched_id),
@@ -3286,7 +3339,7 @@ reply_alloc_info(void *vair)
if (hp != hp_end)
erts_shrink_message_heap(&mp, rp, hp_start, hp, hp_end, &msg, 1);
- erts_queue_message(rp, &rp_locks, mp, msg);
+ erts_queue_message(rp, rp_locks, mp, msg, am_system);
if (air->req_sched == sched_id)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
@@ -3306,7 +3359,7 @@ erts_request_alloc_info(struct process *c_p,
int internal)
{
ErtsAllocInfoReq *air = aireq_alloc();
- Eterm req_ai[ERTS_ALC_A_MAX+1+2] = {0};
+ Eterm req_ai[ERTS_ALC_INFO_A_MAX+1] = {0};
Eterm alist;
Eterm *hp;
int airix = 0, ai;
@@ -3342,6 +3395,10 @@ erts_request_alloc_info(struct process *c_p,
ai = ERTS_ALC_INFO_A_MSEG_ALLOC;
goto save_alloc;
}
+ if (erts_is_atom_str("erts_mmap", alloc, 0)) {
+ ai = ERTS_ALC_INFO_A_ERTS_MMAP;
+ goto save_alloc;
+ }
if (erts_is_atom_str("alloc_util", alloc, 0)) {
ai = ERTS_ALC_INFO_A_ALLOC_UTIL;
save_alloc:
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index ba216c7eb4..227fedfb69 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -278,6 +278,7 @@ type IOB_REQ SHORT_LIVED SYSTEM io_bytes_request
type TRACER_NIF LONG_LIVED SYSTEM tracer_nif
type TRACE_MSG_QUEUE SHORT_LIVED SYSTEM trace_message_queue
type SCHED_ASYNC_JOB SHORT_LIVED SYSTEM async_calls
+type DIRTY_START STANDARD PROCESSES dirty_start
+if threads_no_smp
# Need thread safe allocs, but std_alloc and fix_alloc are not;
diff --git a/erts/emulator/beam/erl_bif_ddll.c b/erts/emulator/beam/erl_bif_ddll.c
index 1e2db38442..ef77201544 100644
--- a/erts/emulator/beam/erl_bif_ddll.c
+++ b/erts/emulator/beam/erl_bif_ddll.c
@@ -1737,7 +1737,7 @@ static void notify_proc(Process *proc, Eterm ref, Eterm driver_name, Eterm type,
hp += REF_THING_SIZE;
mess = TUPLE5(hp,type,r,am_driver,driver_name,tag);
}
- erts_queue_message(proc, &rp_locks, mp, mess);
+ erts_queue_message(proc, rp_locks, mp, mess, am_system);
erts_smp_proc_unlock(proc, rp_locks);
ERTS_SMP_CHK_NO_PROC_LOCKS;
}
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 99fe847ba2..2e195db0ee 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -45,6 +45,7 @@
#include "erl_async.h"
#include "erl_thr_progress.h"
#include "erl_bif_unique.h"
+#include "erl_map.h"
#define ERTS_PTAB_WANT_DEBUG_FUNCS__
#include "erl_ptab.h"
#ifdef HIPE
@@ -594,6 +595,7 @@ static Eterm pi_args[] = {
am_suspending,
am_min_heap_size,
am_min_bin_vheap_size,
+ am_max_heap_size,
am_current_location,
am_current_stacktrace,
am_message_queue_data,
@@ -643,10 +645,11 @@ pi_arg2ix(Eterm arg)
case am_suspending: return 26;
case am_min_heap_size: return 27;
case am_min_bin_vheap_size: return 28;
- case am_current_location: return 29;
- case am_current_stacktrace: return 30;
- case am_message_queue_data: return 31;
- case am_garbage_collection_info: return 32;
+ case am_max_heap_size: return 29;
+ case am_current_location: return 30;
+ case am_current_stacktrace: return 31;
+ case am_message_queue_data: return 32;
+ case am_garbage_collection_info: return 33;
default: return -1;
}
}
@@ -1107,7 +1110,7 @@ process_info_aux(Process *BIF_P,
break;
case am_status:
- res = erts_process_status(BIF_P, ERTS_PROC_LOCK_MAIN, rp, rpid);
+ res = erts_process_status(rp, rpid);
ASSERT(res != am_undefined);
hp = HAlloc(BIF_P, 3);
break;
@@ -1348,6 +1351,18 @@ process_info_aux(Process *BIF_P,
break;
}
+ case am_max_heap_size: {
+ Uint hsz = 3;
+ (void) erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp),
+ MAX_HEAP_SIZE_FLAGS_GET(rp),
+ NULL, &hsz);
+ hp = HAlloc(BIF_P, hsz);
+ res = erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp),
+ MAX_HEAP_SIZE_FLAGS_GET(rp),
+ &hp, NULL);
+ break;
+ }
+
case am_total_heap_size: {
ErtsMessage *mp;
Uint total_heap_size;
@@ -1391,8 +1406,12 @@ process_info_aux(Process *BIF_P,
case am_garbage_collection: {
DECL_AM(minor_gcs);
Eterm t;
+ Uint map_sz = 0;
+
+ erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp), MAX_HEAP_SIZE_FLAGS_GET(rp), NULL, &map_sz);
- hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2 + 3+2 + 3); /* last "3" is for outside tuple */
+ hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2 + 3+2 + 3+2 + map_sz + 3);
+ /* last "3" is for outside tuple */
t = TUPLE2(hp, AM_minor_gcs, make_small(GEN_GCS(rp))); hp += 3;
res = CONS(hp, t, NIL); hp += 2;
@@ -1403,6 +1422,11 @@ process_info_aux(Process *BIF_P,
res = CONS(hp, t, res); hp += 2;
t = TUPLE2(hp, am_min_bin_vheap_size, make_small(MIN_VHEAP_SIZE(rp))); hp += 3;
res = CONS(hp, t, res); hp += 2;
+
+ t = erts_max_heap_size_map(MAX_HEAP_SIZE_GET(rp), MAX_HEAP_SIZE_FLAGS_GET(rp), &hp, NULL);
+
+ t = TUPLE2(hp, am_max_heap_size, t); hp += 3;
+ res = CONS(hp, t, res); hp += 2;
break;
}
@@ -1412,12 +1436,12 @@ process_info_aux(Process *BIF_P,
if (rp == BIF_P) {
sz += ERTS_PROCESS_GC_INFO_MAX_SIZE;
} else {
- erts_process_gc_info(rp, &sz, NULL);
+ erts_process_gc_info(rp, &sz, NULL, 0, 0);
sz += 3;
}
hp = HAlloc(BIF_P, sz);
- res = erts_process_gc_info(rp, &actual_sz, &hp);
+ res = erts_process_gc_info(rp, &actual_sz, &hp, 0, 0);
/* We may have some extra space, fill with 0 tuples */
if (actual_sz <= sz - 3) {
@@ -2035,12 +2059,8 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
Uint arity = *tp++;
return info_1_tuple(BIF_P, tp, arityval(arity));
} else if (BIF_ARG_1 == am_scheduler_id) {
-#ifdef ERTS_SMP
- ASSERT(BIF_P->scheduler_data);
- BIF_RET(make_small(BIF_P->scheduler_data->no));
-#else
- BIF_RET(make_small(1));
-#endif
+ ErtsSchedulerData *esdp = erts_proc_sched_data(BIF_P);
+ BIF_RET(make_small(esdp->no));
} else if (BIF_ARG_1 == am_compat_rel) {
ASSERT(erts_compat_rel > 0);
BIF_RET(make_small(erts_compat_rel));
@@ -2173,7 +2193,7 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
} else if (BIF_ARG_1 == am_garbage_collection){
Uint val = (Uint) erts_smp_atomic32_read_nob(&erts_max_gen_gcs);
Eterm tup;
- hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2);
+ hp = HAlloc(BIF_P, 3+2 + 3+2 + 3+2 + 3+2);
tup = TUPLE2(hp, am_fullsweep_after, make_small(val)); hp += 3;
res = CONS(hp, tup, NIL); hp += 2;
@@ -2184,6 +2204,9 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
tup = TUPLE2(hp, am_min_bin_vheap_size, make_small(BIN_VH_MIN_SIZE)); hp += 3;
res = CONS(hp, tup, res); hp += 2;
+ tup = TUPLE2(hp, am_max_heap_size, make_small(H_MAX_SIZE)); hp += 3;
+ res = CONS(hp, tup, res); hp += 2;
+
BIF_RET(res);
} else if (BIF_ARG_1 == am_fullsweep_after){
Uint val = (Uint) erts_smp_atomic32_read_nob(&erts_max_gen_gcs);
@@ -2194,6 +2217,12 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1)
hp = HAlloc(BIF_P, 3);
res = TUPLE2(hp, am_min_heap_size,make_small(H_MIN_SIZE));
BIF_RET(res);
+ } else if (BIF_ARG_1 == am_max_heap_size) {
+ Uint sz = 0;
+ erts_max_heap_size_map(H_MAX_SIZE, H_MAX_FLAGS, NULL, &sz);
+ hp = HAlloc(BIF_P, sz);
+ res = erts_max_heap_size_map(H_MAX_SIZE, H_MAX_FLAGS, &hp, NULL);
+ BIF_RET(res);
} else if (BIF_ARG_1 == am_min_bin_vheap_size) {
hp = HAlloc(BIF_P, 3);
res = TUPLE2(hp, am_min_bin_vheap_size,make_small(BIN_VH_MIN_SIZE));
@@ -3555,7 +3584,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
BIF_RET(res);
}
else if (ERTS_IS_ATOM_STR("mmap", BIF_ARG_1)) {
- BIF_RET(erts_mmap_debug_info(&erts_dflt_mmapper, BIF_P));
+ BIF_RET(erts_mmap_debug_info(BIF_P));
}
else if (ERTS_IS_ATOM_STR("unique_monotonic_integer_state", BIF_ARG_1)) {
BIF_RET(erts_debug_get_unique_monotonic_integer_state(BIF_P));
@@ -3589,10 +3618,7 @@ BIF_RETTYPE erts_debug_get_internal_state_1(BIF_ALIST_1)
/* Used by timer process_SUITE, timer_bif_SUITE, and
node_container_SUITE (emulator) */
if (is_internal_pid(tp[2])) {
- BIF_RET(erts_process_status(BIF_P,
- ERTS_PROC_LOCK_MAIN,
- NULL,
- tp[2]));
+ BIF_RET(erts_process_status(NULL, tp[2]));
}
}
else if (ERTS_IS_ATOM_STR("link_list", tp[1])) {
diff --git a/erts/emulator/beam/erl_bif_re.c b/erts/emulator/beam/erl_bif_re.c
index f4daecd2a4..ff7746ce1d 100644
--- a/erts/emulator/beam/erl_bif_re.c
+++ b/erts/emulator/beam/erl_bif_re.c
@@ -630,9 +630,15 @@ static Eterm build_exec_return(Process *p, int rc, RestartContext *restartp, Ete
}
} else {
ReturnInfo *ri;
- ReturnInfo defri = {RetIndex,0,{0}};
+ ReturnInfo defri;
if (restartp->ret_info == NULL) {
+ /* OpenBSD 5.8 gcc compiler for some reason creates
+ bad code if the above initialization is done
+ inline with the struct. So don't do that. */
+ defri.type = RetIndex;
+ defri.num_spec = 0;
+ defri.v[0] = 0;
ri = &defri;
} else {
ri = restartp->ret_info;
diff --git a/erts/emulator/beam/erl_bif_trace.c b/erts/emulator/beam/erl_bif_trace.c
index ff2018aa27..66e5146da0 100644
--- a/erts/emulator/beam/erl_bif_trace.c
+++ b/erts/emulator/beam/erl_bif_trace.c
@@ -68,6 +68,9 @@ static struct { /* Protected by code write permission */
static Eterm
trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist);
+static int
+erts_set_tracing_event_pattern(Eterm event, Binary*, int on);
+
#ifdef ERTS_SMP
static void smp_bp_finisher(void* arg);
#endif
@@ -78,6 +81,8 @@ static void new_seq_trace_token(Process* p); /* help func for seq_trace_2*/
static Eterm trace_info_pid(Process* p, Eterm pid_spec, Eterm key);
static Eterm trace_info_func(Process* p, Eterm pid_spec, Eterm key);
static Eterm trace_info_on_load(Process* p, Eterm key);
+static Eterm trace_info_event(Process* p, Eterm event, Eterm key);
+
static void reset_bif_trace(void);
static void setup_bif_trace(void);
@@ -85,14 +90,26 @@ static void install_exp_breakpoints(BpFunctions* f);
static void uninstall_exp_breakpoints(BpFunctions* f);
static void clean_export_entries(BpFunctions* f);
+ErtsTracingEvent erts_send_tracing[ERTS_NUM_BP_IX];
+ErtsTracingEvent erts_receive_tracing[ERTS_NUM_BP_IX];
+
void
erts_bif_trace_init(void)
{
+ int i;
+
erts_default_trace_pattern_is_on = 0;
erts_default_match_spec = NULL;
erts_default_meta_match_spec = NULL;
erts_default_trace_pattern_flags = erts_trace_pattern_flags_off;
erts_default_meta_tracer = erts_tracer_nil;
+
+ for (i=0; i<ERTS_NUM_BP_IX; i++) {
+ erts_send_tracing[i].on = 1;
+ erts_send_tracing[i].match_spec = NULL;
+ erts_receive_tracing[i].on = 1;
+ erts_receive_tracing[i].match_spec = NULL;
+ }
}
/*
@@ -137,15 +154,18 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
on = 1;
} else if (Pattern == am_restart) {
match_prog_set = NULL;
- on = erts_break_reset;
+ on = ERTS_BREAK_RESTART;
} else if (Pattern == am_pause) {
match_prog_set = NULL;
- on = erts_break_stop;
- } else if ((match_prog_set = erts_match_set_compile(p, Pattern)) != NULL) {
- MatchSetRef(match_prog_set);
- on = 1;
- } else{
- goto error;
+ on = ERTS_BREAK_PAUSE;
+ } else {
+ match_prog_set = erts_match_set_compile(p, Pattern, MFA);
+ if (match_prog_set) {
+ MatchSetRef(match_prog_set);
+ on = 1;
+ } else{
+ goto error;
+ }
}
is_global = 0;
@@ -314,6 +334,11 @@ trace_pattern(Process* p, Eterm MFA, Eterm Pattern, Eterm flaglist)
matches = erts_set_trace_pattern(p, mfa, specified,
match_prog_set, match_prog_set,
on, flags, meta_tracer, 0);
+ } else if (is_atom(MFA)) {
+ if (is_global || flags.breakpoint || on > ERTS_BREAK_SET) {
+ goto error;
+ }
+ matches = erts_set_tracing_event_pattern(MFA, match_prog_set, on);
}
error:
@@ -487,8 +512,7 @@ start_trace(Process *c_p, ErtsTracer tracer,
&& !ERTS_TRACER_COMPARE(ERTS_TRACER(port), tracer)) {
/* This tracee is already being traced, and not by the
* tracer to be */
- if (erts_is_tracer_proc_enabled(c_p, ERTS_PROC_LOCKS_ALL,
- common, am_trace_status)) {
+ if (erts_is_tracer_enabled(tracer, common)) {
/* The tracer is still in use */
return 1;
}
@@ -791,6 +815,8 @@ Eterm trace_info_2(BIF_ALIST_2)
if (What == am_on_load) {
res = trace_info_on_load(p, Key);
+ } else if (What == am_send || What == am_receive) {
+ res = trace_info_event(p, What, Key);
} else if (is_atom(What) || is_pid(What) || is_port(What)) {
res = trace_info_pid(p, What, Key);
} else if (is_tuple(What)) {
@@ -829,7 +855,7 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
return am_undefined;
if (!ERTS_TRACER_IS_NIL(ERTS_TRACER(tracee)))
- erts_is_tracer_proc_enabled(NULL, 0, &tracee->common, am_trace_status);
+ erts_is_tracer_proc_enabled(NULL, 0, &tracee->common);
tracer = erts_tracer_to_term(p, ERTS_TRACER(tracee));
trace_flags = ERTS_TRACE_FLAGS(tracee);
@@ -837,22 +863,24 @@ trace_info_pid(Process* p, Eterm pid_spec, Eterm key)
erts_port_release(tracee);
} else if (is_internal_pid(pid_spec)) {
- Process *tracee;
- tracee = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,
- pid_spec, ERTS_PROC_LOCK_MAIN);
+ Process *tracee = erts_pid2proc_not_running(p, ERTS_PROC_LOCK_MAIN,
+ pid_spec, ERTS_PROC_LOCK_MAIN);
+
+ if (tracee == ERTS_PROC_LOCK_BUSY)
+ ERTS_BIF_YIELD2(bif_export[BIF_trace_info_2], p, pid_spec, key);
if (!tracee)
return am_undefined;
if (!ERTS_TRACER_IS_NIL(ERTS_TRACER(tracee)))
erts_is_tracer_proc_enabled(tracee, ERTS_PROC_LOCK_MAIN,
- &tracee->common, am_trace_status);
+ &tracee->common);
tracer = erts_tracer_to_term(p, ERTS_TRACER(tracee));
trace_flags = ERTS_TRACE_FLAGS(tracee);
- if (tracee != p)
- erts_smp_proc_unlock(tracee, ERTS_PROC_LOCK_MAIN);
+ if (tracee != p)
+ erts_smp_proc_unlock(tracee, ERTS_PROC_LOCK_MAIN);
} else if (is_external_pid(pid_spec)
&& external_pid_dist_entry(pid_spec) == erts_this_dist_entry) {
return am_undefined;
@@ -1280,6 +1308,42 @@ trace_info_on_load(Process* p, Eterm key)
}
}
+static Eterm
+trace_info_event(Process* p, Eterm event, Eterm key)
+{
+ ErtsTracingEvent* te;
+ Eterm retval;
+ Eterm* hp;
+
+ switch (event) {
+ case am_send: te = erts_send_tracing; break;
+ case am_receive: te = erts_receive_tracing; break;
+ default:
+ goto error;
+ }
+
+ if (key != am_match_spec)
+ goto error;
+
+ te = &te[erts_active_bp_ix()];
+
+ if (te->on) {
+ if (!te->match_spec)
+ retval = am_true;
+ else
+ retval = copy_object(MatchSetGetSource(te->match_spec), p);
+ }
+ else
+ retval = am_false;
+
+ hp = HAlloc(p, 3);
+ return TUPLE2(hp, key, retval);
+
+ error:
+ BIF_ERROR(p, BADARG);
+}
+
+
#undef FUNC_TRACE_NOEXIST
#undef FUNC_TRACE_UNTRACED
#undef FUNC_TRACE_GLOBAL_TRACE
@@ -1307,7 +1371,7 @@ erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
for (i = 0; i < n; i++) {
BeamInstr* pc = fp[i].pc;
- Export* ep = (Export *)(((char *)(pc-3)) - offsetof(Export, code));
+ Export* ep = ErtsContainerStruct(pc, Export, code[3]);
if (on && !flags.breakpoint) {
/* Turn on global call tracing */
@@ -1468,12 +1532,57 @@ erts_set_trace_pattern(Process*p, Eterm* mfa, int specified,
}
int
+erts_set_tracing_event_pattern(Eterm event, Binary* match_spec, int on)
+{
+ ErtsBpIndex ix = erts_staging_bp_ix();
+ ErtsTracingEvent* st;
+
+ switch (event) {
+ case am_send: st = &erts_send_tracing[ix]; break;
+ case am_receive: st = &erts_receive_tracing[ix]; break;
+ default: return -1;
+ }
+
+ MatchSetUnref(st->match_spec);
+
+ st->on = on;
+ st->match_spec = match_spec;
+ MatchSetRef(match_spec);
+
+ finish_bp.current = 1; /* prepare phase not needed for event trace */
+ finish_bp.install = on;
+ finish_bp.e.matched = 0;
+ finish_bp.e.matching = NULL;
+ finish_bp.f.matched = 0;
+ finish_bp.f.matching = NULL;
+
+#ifndef ERTS_SMP
+ while (erts_finish_breakpointing()) {
+ /* Empty loop body */
+ }
+#endif
+ return 1;
+}
+
+static void
+consolidate_event_tracing(ErtsTracingEvent te[])
+{
+ ErtsTracingEvent* src = &te[erts_active_bp_ix()];
+ ErtsTracingEvent* dst = &te[erts_staging_bp_ix()];
+
+ MatchSetUnref(dst->match_spec);
+ dst->on = src->on;
+ dst->match_spec = src->match_spec;
+ MatchSetRef(dst->match_spec);
+}
+
+int
erts_finish_breakpointing(void)
{
ERTS_SMP_LC_ASSERT(erts_has_code_write_permission());
/*
- * Memory barriers will be issued for all processes *before*
+ * Memory barriers will be issued for all schedulers *before*
* each of the stages below. (Unless the other schedulers
* are blocked, in which case memory barriers will be issued
* when they are awaken.)
@@ -1542,6 +1651,8 @@ erts_finish_breakpointing(void)
erts_consolidate_bp_data(&finish_bp.f, 1);
erts_bp_free_matched_functions(&finish_bp.e);
erts_bp_free_matched_functions(&finish_bp.f);
+ consolidate_event_tracing(erts_send_tracing);
+ consolidate_event_tracing(erts_receive_tracing);
return 0;
default:
ASSERT(0);
@@ -1556,11 +1667,10 @@ install_exp_breakpoints(BpFunctions* f)
BpFunction* fp = f->matching;
Uint ne = f->matched;
Uint i;
- Uint offset = offsetof(Export, code) + 3*sizeof(BeamInstr);
for (i = 0; i < ne; i++) {
BeamInstr* pc = fp[i].pc;
- Export* ep = (Export *) (((char *)pc)-offset);
+ Export* ep = ErtsContainerStruct(pc, Export, code[3]);
ep->addressv[code_ix] = pc;
}
@@ -1573,11 +1683,10 @@ uninstall_exp_breakpoints(BpFunctions* f)
BpFunction* fp = f->matching;
Uint ne = f->matched;
Uint i;
- Uint offset = offsetof(Export, code) + 3*sizeof(BeamInstr);
for (i = 0; i < ne; i++) {
BeamInstr* pc = fp[i].pc;
- Export* ep = (Export *) (((char *)pc)-offset);
+ Export* ep = ErtsContainerStruct(pc, Export, code[3]);
if (ep->addressv[code_ix] != pc) {
continue;
@@ -1594,11 +1703,10 @@ clean_export_entries(BpFunctions* f)
BpFunction* fp = f->matching;
Uint ne = f->matched;
Uint i;
- Uint offset = offsetof(Export, code) + 3*sizeof(BeamInstr);
for (i = 0; i < ne; i++) {
BeamInstr* pc = fp[i].pc;
- Export* ep = (Export *) (((char *)pc)-offset);
+ Export* ep = ErtsContainerStruct(pc, Export, code[3]);
if (ep->addressv[code_ix] == pc) {
continue;
@@ -2277,7 +2385,7 @@ reply_trace_delivered_all(void *vtdarp)
#ifdef ERTS_SMP
erts_send_sys_msg_proc(rp->common.id, rp->common.id, msg, bp);
#else
- erts_queue_message(rp, &rp_locks, mp, msg);
+ erts_queue_message(rp, rp_locks, mp, msg, am_system);
#endif
erts_free(ERTS_ALC_T_MISC_AUX_WORK, vtdarp);
diff --git a/erts/emulator/beam/erl_bif_unique.c b/erts/emulator/beam/erl_bif_unique.c
index 1e57e9fa53..7c70217d8d 100644
--- a/erts/emulator/beam/erl_bif_unique.c
+++ b/erts/emulator/beam/erl_bif_unique.c
@@ -257,7 +257,7 @@ static ERTS_INLINE Eterm unique_integer_bif(Process *c_p, int positive)
Uint hsz;
Eterm *hp;
- esdp = ERTS_PROC_GET_SCHDATA(c_p);
+ esdp = erts_proc_sched_data(c_p);
thr_id = (Uint64) esdp->thr_id;
unique = esdp->unique++;
bld_unique_integer_term(NULL, &hsz, thr_id, unique, positive);
@@ -515,7 +515,7 @@ BIF_RETTYPE make_ref_0(BIF_ALIST_0)
hp = HAlloc(BIF_P, REF_THING_SIZE);
- res = erts_sched_make_ref_in_buffer(ERTS_PROC_GET_SCHDATA(BIF_P), hp);
+ res = erts_sched_make_ref_in_buffer(erts_proc_sched_data(BIF_P), hp);
BIF_RET(res);
}
diff --git a/erts/emulator/beam/erl_bits.h b/erts/emulator/beam/erl_bits.h
index 1c2a090f07..4bd5b24157 100644
--- a/erts/emulator/beam/erl_bits.h
+++ b/erts/emulator/beam/erl_bits.h
@@ -83,8 +83,8 @@ typedef struct erl_bin_match_struct{
#ifdef ERTS_SMP
/* the state resides in the current process' scheduler data */
#define ERL_BITS_DECLARE_STATEP struct erl_bits_state *EBS
-#define ERL_BITS_RELOAD_STATEP(P) do{EBS = &(P)->scheduler_data->erl_bits_state;}while(0)
-#define ERL_BITS_DEFINE_STATEP(P) struct erl_bits_state *EBS = &(P)->scheduler_data->erl_bits_state
+#define ERL_BITS_RELOAD_STATEP(P) do{EBS = &erts_proc_sched_data((P))->erl_bits_state;}while(0)
+#define ERL_BITS_DEFINE_STATEP(P) struct erl_bits_state *EBS = &erts_proc_sched_data((P))->erl_bits_state
#else
/* reentrant API but with a hidden single global state, for testing only */
extern struct erl_bits_state ErlBitsState_;
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index 615d23402b..bad34211a5 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -2833,7 +2833,8 @@ BIF_RETTYPE ets_match_spec_run_r_3(BIF_ALIST_3)
BIF_TRAP3(bif_export[BIF_ets_match_spec_run_r_3],
BIF_P,lst,BIF_ARG_2,ret);
}
- res = db_prog_match(BIF_P, mp, CAR(list_val(lst)), NULL, 0,
+ res = db_prog_match(BIF_P, BIF_P,
+ mp, CAR(list_val(lst)), NULL, 0,
ERTS_PAM_COPY_RESULT, &dummy);
if (is_value(res)) {
hp = HAlloc(BIF_P, 2);
@@ -3461,7 +3462,7 @@ static void fix_table_locked(Process* p, DbTable* tb)
fix = tb->common.fixations;
if (fix == NULL) {
tb->common.time.monotonic
- = erts_get_monotonic_time(ERTS_PROC_GET_SCHDATA(p));
+ = erts_get_monotonic_time(erts_proc_sched_data(p));
tb->common.time.offset = erts_get_time_offset();
}
else {
diff --git a/erts/emulator/beam/erl_db_util.c b/erts/emulator/beam/erl_db_util.c
index 76b96637ae..6732b708a8 100644
--- a/erts/emulator/beam/erl_db_util.c
+++ b/erts/emulator/beam/erl_db_util.c
@@ -123,6 +123,9 @@ do { \
#define TermWords(t) (((t) / (sizeof(UWord)/sizeof(Eterm))) + !!((t) % (sizeof(UWord)/sizeof(Eterm))))
+#define add_dmc_err(EINFO, STR, VAR, TERM, SEV) \
+ vadd_dmc_err(EINFO, SEV, VAR, STR, TERM)
+
static ERTS_INLINE Process *
get_proc(Process *cp, Uint32 cp_locks, Eterm id, Uint32 id_locks)
@@ -171,7 +174,8 @@ set_match_trace(Process *tracee_p, Eterm fail_term, ErtsTracer tracer,
ERTS_PROC_LOCKS_ALL == erts_proc_lc_my_proc_locks(tracee_p)
|| erts_thr_progress_is_blocking());
- if (ERTS_TRACER_IS_NIL(tracer) || erts_is_tracer_enabled(tracee_p, tracer))
+ if (ERTS_TRACER_IS_NIL(tracer)
+ || erts_is_tracer_enabled(tracer, &tracee_p->common))
return set_tracee_flags(tracee_p, tracer, d_flags, e_flags);
return fail_term;
}
@@ -411,17 +415,27 @@ get_match_pseudo_process(Process *c_p, Uint heap_size)
{
ErtsMatchPseudoProcess *mpsp;
#ifdef ERTS_SMP
- mpsp = (ErtsMatchPseudoProcess *) c_p->scheduler_data->match_pseudo_process;
- if (mpsp)
+ ErtsSchedulerData *esdp;
+
+ esdp = c_p ? c_p->scheduler_data : erts_get_scheduler_data();
+
+ mpsp = esdp ? esdp->match_pseudo_process :
+ (ErtsMatchPseudoProcess*) erts_smp_tsd_get(match_pseudo_process_key);
+
+ if (mpsp) {
+ ASSERT(mpsp == erts_smp_tsd_get(match_pseudo_process_key));
+ ASSERT(mpsp->process.scheduler_data == esdp);
cleanup_match_pseudo_process(mpsp, 0);
+ }
else {
ASSERT(erts_smp_tsd_get(match_pseudo_process_key) == NULL);
mpsp = create_match_pseudo_process();
- c_p->scheduler_data->match_pseudo_process = (void *) mpsp;
+ if (esdp) {
+ esdp->match_pseudo_process = (void *) mpsp;
+ }
+ mpsp->process.scheduler_data = esdp;
erts_smp_tsd_set(match_pseudo_process_key, (void *) mpsp);
}
- ASSERT(mpsp == erts_smp_tsd_get(match_pseudo_process_key));
- mpsp->process.scheduler_data = c_p->scheduler_data;
#else
mpsp = match_pseudo_process;
cleanup_match_pseudo_process(mpsp, 0);
@@ -889,11 +903,7 @@ void db_match_dis(Binary *prog);
#define TRACE /* Nothing */
#define FENCE_PATTERN_SIZE 0
#endif
-static void add_dmc_err(DMCErrInfo *err_info,
- char *str,
- int variable,
- Eterm term,
- DMCErrorSeverity severity);
+static void vadd_dmc_err(DMCErrInfo*, DMCErrorSeverity, int var, const char *str, ...);
static Eterm dpm_array_to_list(Process *psp, Eterm *arr, int arity);
@@ -989,12 +999,20 @@ Eterm erts_match_set_get_source(Binary *mpsp)
}
/* This one is for the tracing */
-Binary *erts_match_set_compile(Process *p, Eterm matchexpr) {
+Binary *erts_match_set_compile(Process *p, Eterm matchexpr, Eterm MFA) {
Binary *bin;
Uint sz;
Eterm *hp;
+ Uint flags;
+
+ switch (MFA) {
+ case am_receive: flags = DCOMP_TRACE; break;
+ case am_send: flags = DCOMP_TRACE | DCOMP_ALLOW_TRACE_OPS; break;
+ default:
+ flags = DCOMP_TRACE | DCOMP_CALL_TRACE | DCOMP_ALLOW_TRACE_OPS;
+ }
- bin = db_match_set_compile(p, matchexpr, DCOMP_TRACE);
+ bin = db_match_set_compile(p, matchexpr, flags);
if (bin != NULL) {
MatchProg *prog = Binary2MatchProg(bin);
sz = size_object(matchexpr);
@@ -1124,8 +1142,8 @@ Eterm db_match_set_lint(Process *p, Eterm matchexpr, Uint flags)
int i;
if (!is_list(matchexpr)) {
- add_dmc_err(err_info, "Match programs are not in a list.",
- -1, 0UL, dmcError);
+ add_dmc_err(err_info, "Match programs are not in a list.",
+ -1, 0UL, dmcError);
goto done;
}
num_heads = 0;
@@ -1133,9 +1151,8 @@ Eterm db_match_set_lint(Process *p, Eterm matchexpr, Uint flags)
++num_heads;
if (l != NIL) { /* proper list... */
- add_dmc_err(err_info, "Match programs are not in a proper "
- "list.",
- -1, 0UL, dmcError);
+ add_dmc_err(err_info, "Match programs are not in a proper list.",
+ -1, 0UL, dmcError);
goto done;
}
@@ -1202,30 +1219,37 @@ done:
return ret;
}
-Eterm erts_match_set_run(Process *p, Binary *mpsp,
- Eterm *args, int num_args,
- enum erts_pam_run_flags in_flags,
- Uint32 *return_flags)
+/* Returns
+ * am_false if no match or
+ * if {message,false} has been called,
+ * am_true if {message,_} has NOT been called or
+ * if {message,true} has been called,
+ * Msg if {message,Msg} has been called.
+ *
+ * If return value is_not_immed
+ * then erts_match_set_release_result_trace() must be called to release it.
+ */
+Eterm erts_match_set_run_trace(Process *c_p,
+ Process *self,
+ Binary *mpsp,
+ Eterm *args, int num_args,
+ enum erts_pam_run_flags in_flags,
+ Uint32 *return_flags)
{
Eterm ret;
- ret = db_prog_match(p, mpsp, NIL, args, num_args,
+ ret = db_prog_match(c_p, self, mpsp, NIL, args, num_args,
in_flags, return_flags);
-#if defined(HARDDEBUG)
- if (is_non_value(ret)) {
- erts_fprintf(stderr, "Failed\n");
- } else {
- erts_fprintf(stderr, "Returning : %T\n", ret);
+
+ ASSERT(!(is_non_value(ret) && *return_flags));
+
+ if (is_non_value(ret) || ret == am_false) {
+ erts_match_set_release_result(c_p);
+ return am_false;
}
-#endif
+ if (is_immed(ret))
+ erts_match_set_release_result(c_p);
return ret;
- /* Returns
- * THE_NON_VALUE if no match
- * am_false if {message,false} has been called,
- * am_true if {message,_} has not been called or
- * if {message,true} has been called,
- * Msg if {message,Msg} has been called.
- */
}
static Eterm erts_match_set_run_ets(Process *p, Binary *mpsp,
@@ -1234,7 +1258,8 @@ static Eterm erts_match_set_run_ets(Process *p, Binary *mpsp,
{
Eterm ret;
- ret = db_prog_match(p, mpsp, args, NULL, num_args,
+ ret = db_prog_match(p, p,
+ mpsp, args, NULL, num_args,
ERTS_PAM_COPY_RESULT,
return_flags);
#if defined(HARDDEBUG)
@@ -1730,7 +1755,9 @@ static Eterm dpm_array_to_list(Process *psp, Eterm *arr, int arity)
** the parameter 'arity' is only used if 'term' is actually an array,
** i.e. 'DCOMP_TRACE' was specified
*/
-Eterm db_prog_match(Process *c_p, Binary *bprog,
+Eterm db_prog_match(Process *c_p,
+ Process *self,
+ Binary *bprog,
Eterm term,
Eterm *termp,
int arity,
@@ -1743,10 +1770,10 @@ Eterm db_prog_match(Process *c_p, Binary *bprog,
Eterm *esp;
MatchVariable* variables;
BeamInstr *cp;
- UWord *pc = prog->text;
+ const UWord *pc = prog->text;
Eterm *ehp;
Eterm ret;
- Uint n = 0; /* To avoid warning. */
+ Uint n;
int i;
unsigned do_catch;
ErtsMatchPseudoProcess *mpsp;
@@ -1758,46 +1785,28 @@ Eterm db_prog_match(Process *c_p, Binary *bprog,
Eterm (*bif)(Process*, ...);
Eterm bif_args[3];
int fail_label;
- int atomic_trace;
#ifdef DMC_DEBUG
Uint *heap_fence;
Uint *stack_fence;
Uint save_op;
#endif /* DMC_DEBUG */
+ ERTS_UNDEF(n,0);
+ ERTS_UNDEF(current_scheduled,NULL);
+
+ ASSERT(c_p || !(in_flags & ERTS_PAM_COPY_RESULT));
+
mpsp = get_match_pseudo_process(c_p, prog->heap_size);
psp = &mpsp->process;
/* We need to lure the scheduler into believing in the pseudo process,
because of floating point exceptions. Do *after* mpsp is set!!! */
- esdp = ERTS_GET_SCHEDULER_DATA_FROM_PROC(c_p);
- ASSERT(esdp != NULL);
- current_scheduled = esdp->current_process;
+ esdp = erts_get_scheduler_data();
+ if (esdp)
+ current_scheduled = esdp->current_process;
/* SMP: psp->scheduler_data is set by get_match_pseudo_process */
- atomic_trace = 0;
-#define BEGIN_ATOMIC_TRACE(p) \
- do { \
- if (! atomic_trace) { \
- erts_refc_inc(&bprog->refc, 2); \
- erts_smp_proc_unlock((p), ERTS_PROC_LOCK_MAIN); \
- erts_smp_thr_progress_block(); \
- atomic_trace = !0; \
- } \
- } while (0)
-#define END_ATOMIC_TRACE(p) \
- do { \
- if (atomic_trace) { \
- erts_smp_thr_progress_unblock(); \
- erts_smp_proc_lock((p), ERTS_PROC_LOCK_MAIN); \
- if (erts_refc_dectest(&bprog->refc, 0) == 0) {\
- erts_bin_free(bprog); \
- } \
- atomic_trace = 0; \
- } \
- } while (0)
-
#ifdef DMC_DEBUG
save_op = 0;
heap_fence = (Eterm*)((char*) mpsp->u.heap + prog->stack_offset) - 1;
@@ -2256,7 +2265,7 @@ restart:
pc += n;
break;
case matchSelf:
- *esp++ = c_p->common.id;
+ *esp++ = self->common.id;
break;
case matchWaste:
--esp;
@@ -2266,6 +2275,7 @@ restart:
break;
case matchProcessDump: {
erts_dsprintf_buf_t *dsbufp = erts_create_tmp_dsbuf(0);
+ ASSERT(c_p == self);
print_process_info(ERTS_PRINT_DSBUF, (void *) dsbufp, c_p);
*esp++ = new_binary(build_proc, (byte *)dsbufp->str,
dsbufp->str_len);
@@ -2284,14 +2294,16 @@ restart:
*return_flags |= MATCH_SET_EXCEPTION_TRACE;
*esp++ = am_true;
break;
- case matchIsSeqTrace:
+ case matchIsSeqTrace:
+ ASSERT(c_p == self);
if (have_seqtrace(SEQ_TRACE_TOKEN(c_p)))
*esp++ = am_true;
else
*esp++ = am_false;
break;
case matchSetSeqToken:
- t = erts_seq_trace(c_p, esp[-1], esp[-2], 0);
+ ASSERT(c_p == self);
+ t = erts_seq_trace(c_p, esp[-1], esp[-2], 0);
if (is_non_value(t)) {
esp[-2] = FAIL_TERM;
} else {
@@ -2299,7 +2311,8 @@ restart:
}
--esp;
break;
- case matchSetSeqTokenFake:
+ case matchSetSeqTokenFake:
+ ASSERT(c_p == self);
t = seq_trace_fake(c_p, esp[-1]);
if (is_non_value(t)) {
esp[-2] = FAIL_TERM;
@@ -2308,7 +2321,8 @@ restart:
}
--esp;
break;
- case matchGetSeqToken:
+ case matchGetSeqToken:
+ ASSERT(c_p == self);
if (have_no_seqtrace(SEQ_TRACE_TOKEN(c_p)))
*esp++ = NIL;
else {
@@ -2332,49 +2346,62 @@ restart:
ASSERT(is_immed(ehp[5]));
}
break;
- case matchEnableTrace:
+ case matchEnableTrace:
+ ASSERT(c_p == self);
if ( (n = erts_trace_flag2bit(esp[-1]))) {
- BEGIN_ATOMIC_TRACE(c_p);
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
set_tracee_flags(c_p, ERTS_TRACER(c_p), 0, n);
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
esp[-1] = am_true;
} else {
esp[-1] = FAIL_TERM;
}
break;
- case matchEnableTrace2:
+ case matchEnableTrace2:
+ ASSERT(c_p == self);
n = erts_trace_flag2bit((--esp)[-1]);
esp[-1] = FAIL_TERM;
if (n) {
- BEGIN_ATOMIC_TRACE(c_p);
- if ( (tmpp = get_proc(c_p, 0, esp[0], 0))) {
+ if ( (tmpp = get_proc(c_p, ERTS_PROC_LOCK_MAIN, esp[0], ERTS_PROC_LOCKS_ALL))) {
/* Always take over the tracer of the current process */
set_tracee_flags(tmpp, ERTS_TRACER(c_p), 0, n);
- esp[-1] = am_true;
+ if (tmpp == c_p)
+ erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL_MINOR);
+ else
+ erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL);
+ esp[-1] = am_true;
}
}
break;
- case matchDisableTrace:
+ case matchDisableTrace:
+ ASSERT(c_p == self);
if ( (n = erts_trace_flag2bit(esp[-1]))) {
- BEGIN_ATOMIC_TRACE(c_p);
+ erts_smp_proc_lock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
set_tracee_flags(c_p, ERTS_TRACER(c_p), n, 0);
+ erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
esp[-1] = am_true;
} else {
esp[-1] = FAIL_TERM;
}
break;
- case matchDisableTrace2:
+ case matchDisableTrace2:
+ ASSERT(c_p == self);
n = erts_trace_flag2bit((--esp)[-1]);
esp[-1] = FAIL_TERM;
if (n) {
- BEGIN_ATOMIC_TRACE(c_p);
- if ( (tmpp = get_proc(c_p, 0, esp[0], 0))) {
+ if ( (tmpp = get_proc(c_p, ERTS_PROC_LOCK_MAIN, esp[0], ERTS_PROC_LOCKS_ALL))) {
/* Always take over the tracer of the current process */
set_tracee_flags(tmpp, ERTS_TRACER(c_p), n, 0);
- esp[-1] = am_true;
+ if (tmpp == c_p)
+ erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL_MINOR);
+ else
+ erts_smp_proc_unlock(tmpp, ERTS_PROC_LOCKS_ALL);
+ esp[-1] = am_true;
}
}
break;
- case matchCaller:
+ case matchCaller:
+ ASSERT(c_p == self);
if (!(c_p->cp) || !(cp = find_function_from_pc(c_p->cp))) {
*esp++ = am_undefined;
} else {
@@ -2386,7 +2413,8 @@ restart:
ehp[3] = make_small((Uint) cp[2]);
}
break;
- case matchSilent:
+ case matchSilent:
+ ASSERT(c_p == self);
--esp;
if (in_flags & ERTS_PAM_IGNORE_TRACE_SILENT)
break;
@@ -2401,7 +2429,8 @@ restart:
erts_smp_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
}
break;
- case matchTrace2:
+ case matchTrace2:
+ ASSERT(c_p == self);
{
/* disable enable */
Uint d_flags = 0, e_flags = 0; /* process trace flags */
@@ -2433,7 +2462,8 @@ restart:
ERTS_TRACER_CLEAR(&tracer);
}
break;
- case matchTrace3:
+ case matchTrace3:
+ ASSERT(c_p == self);
{
/* disable enable */
Uint d_flags = 0, e_flags = 0; /* process trace flags */
@@ -2506,15 +2536,12 @@ success:
}
#endif
- esdp->current_process = current_scheduled;
-
- END_ATOMIC_TRACE(c_p);
+ if (esdp)
+ esdp->current_process = current_scheduled;
return ret;
#undef FAIL
#undef FAIL_TERM
-#undef BEGIN_ATOMIC_TRACE
-#undef END_ATOMIC_TRACE
}
@@ -3259,20 +3286,20 @@ int erts_db_is_compiled_ms(Eterm term)
** Utility to add an error
*/
-static void add_dmc_err(DMCErrInfo *err_info,
- char *str,
- int variable,
- Eterm term,
- DMCErrorSeverity severity)
+static void vadd_dmc_err(DMCErrInfo *err_info,
+ DMCErrorSeverity severity,
+ int variable,
+ const char *str,
+ ...)
{
+ DMCError *e;
+ va_list args;
+ va_start(args, str);
+
+
/* Linked in in reverse order, to ease the formatting */
- DMCError *e = erts_alloc(ERTS_ALC_T_DB_DMC_ERROR, sizeof(DMCError));
- if (term != 0UL) {
- erts_snprintf(e->error_string, DMC_ERR_STR_LEN, str, term);
- } else {
- strncpy(e->error_string, str, DMC_ERR_STR_LEN);
- e->error_string[DMC_ERR_STR_LEN] ='\0';
- }
+ e = erts_alloc(ERTS_ALC_T_DB_DMC_ERROR, sizeof(DMCError));
+ erts_vsnprintf(e->error_string, DMC_ERR_STR_LEN, str, args);
e->variable = variable;
e->severity = severity;
e->next = err_info->first;
@@ -3282,8 +3309,11 @@ static void add_dmc_err(DMCErrInfo *err_info,
err_info->first = e;
if (severity >= dmcError)
err_info->error_added = 1;
+
+ va_end(args);
}
+
/*
** Handle one term in the match expression (not the guard)
*/
@@ -3482,24 +3512,21 @@ static void do_emit_constant(DMCContext *context, DMC_STACK_TYPE(UWord) *text,
context->stack_need = context->stack_used;
}
-#define RETURN_ERROR_X(String, X, Y, ContextP, ConstantF) \
-do { \
-if ((ContextP)->err_info != NULL) { \
- (ConstantF) = 0; \
- add_dmc_err((ContextP)->err_info, String, X, Y, dmcError); \
- return retOk; \
-} else \
- return retFail; \
-} while(0)
+#define RETURN_ERROR_X(VAR, ContextP, ConstantF, String, ARG) \
+ (((ContextP)->err_info != NULL) \
+ ? ((ConstantF) = 0, \
+ vadd_dmc_err((ContextP)->err_info, dmcError, VAR, String, ARG), \
+ retOk) \
+ : retFail)
#define RETURN_ERROR(String, ContextP, ConstantF) \
- RETURN_ERROR_X(String, -1, 0UL, ContextP, ConstantF)
+ return RETURN_ERROR_X(-1, ContextP, ConstantF, String, 0)
#define RETURN_VAR_ERROR(String, N, ContextP, ConstantF) \
- RETURN_ERROR_X(String, N, 0UL, ContextP, ConstantF)
+ return RETURN_ERROR_X(N, ContextP, ConstantF, String, 0)
#define RETURN_TERM_ERROR(String, T, ContextP, ConstantF) \
- RETURN_ERROR_X(String, -1, T, ContextP, ConstantF)
+ return RETURN_ERROR_X(-1, ContextP, ConstantF, String, T)
#define WARNING(String, ContextP) \
add_dmc_err((ContextP)->err_info, String, -1, 0UL, dmcWarning)
@@ -3765,7 +3792,7 @@ static DMCRet dmc_variable(DMCContext *context,
Uint n = db_is_variable(t);
if (n >= heap->vars_used || !heap->vars[n].is_bound) {
- RETURN_VAR_ERROR("Variable $%d is unbound.", n, context, *constant);
+ RETURN_VAR_ERROR("Variable $%%d is unbound.", n, context, *constant);
}
dmc_add_pushv_variant(context, heap, text, n);
@@ -4097,7 +4124,30 @@ static DMCRet dmc_exception_trace(DMCContext *context,
return retOk;
}
-
+static int check_trace(const char* op,
+ DMCContext *context,
+ int *constant,
+ int need_cflags,
+ int allow_in_guard,
+ DMCRet* retp)
+{
+ if (!(context->cflags & DCOMP_TRACE)) {
+ *retp = RETURN_ERROR_X(-1, context, *constant, "Special form '%s' "
+ "used in wrong dialect.", op);
+ return 0;
+ }
+ if ((context->cflags & need_cflags) != need_cflags) {
+ *retp = RETURN_ERROR_X(-1, context, *constant, "Special form '%s' "
+ "not allow for this trace event.", op);
+ return 0;
+ }
+ if (context->is_guard && !allow_in_guard) {
+ *retp = RETURN_ERROR_X(-1, context, *constant, "Special form '%s' "
+ "called in guard context.", op);
+ return 0;
+ }
+ return 1;
+}
static DMCRet dmc_is_seq_trace(DMCContext *context,
DMCHeap *heap,
@@ -4107,12 +4157,11 @@ static DMCRet dmc_is_seq_trace(DMCContext *context,
{
Eterm *p = tuple_val(t);
Uint a = arityval(*p);
+ DMCRet ret;
- if (!(context->cflags & DCOMP_TRACE)) {
- RETURN_ERROR("Special form 'is_seq_trace' used in wrong dialect.",
- context,
- *constant);
- }
+ if (!check_trace("is_seq_trace", context, constant, DCOMP_ALLOW_TRACE_OPS, 1, &ret))
+ return ret;
+
if (a != 1) {
RETURN_TERM_ERROR("Special form 'is_seq_trace' called with "
"arguments in %T.", t, context, *constant);
@@ -4136,16 +4185,8 @@ static DMCRet dmc_set_seq_token(DMCContext *context,
DMCRet ret;
int c;
-
- if (!(context->cflags & DCOMP_TRACE)) {
- RETURN_ERROR("Special form 'set_seq_token' used in wrong dialect.",
- context,
- *constant);
- }
- if (context->is_guard) {
- RETURN_ERROR("Special form 'set_seq_token' called in "
- "guard context.", context, *constant);
- }
+ if (!check_trace("set_seq_trace", context, constant, DCOMP_ALLOW_TRACE_OPS, 0, &ret))
+ return ret;
if (a != 3) {
RETURN_TERM_ERROR("Special form 'set_seq_token' called with wrong "
@@ -4182,16 +4223,11 @@ static DMCRet dmc_get_seq_token(DMCContext *context,
{
Eterm *p = tuple_val(t);
Uint a = arityval(*p);
+ DMCRet ret;
+
+ if (!check_trace("get_seq_token", context, constant, DCOMP_ALLOW_TRACE_OPS, 0, &ret))
+ return ret;
- if (!(context->cflags & DCOMP_TRACE)) {
- RETURN_ERROR("Special form 'get_seq_token' used in wrong dialect.",
- context,
- *constant);
- }
- if (context->is_guard) {
- RETURN_ERROR("Special form 'get_seq_token' called in "
- "guard context.", context, *constant);
- }
if (a != 1) {
RETURN_TERM_ERROR("Special form 'get_seq_token' called with "
"arguments in %T.", t, context,
@@ -4255,16 +4291,10 @@ static DMCRet dmc_process_dump(DMCContext *context,
{
Eterm *p = tuple_val(t);
Uint a = arityval(*p);
-
- if (!(context->cflags & DCOMP_TRACE)) {
- RETURN_ERROR("Special form 'process_dump' used in wrong dialect.",
- context,
- *constant);
- }
- if (context->is_guard) {
- RETURN_ERROR("Special form 'process_dump' called in "
- "guard context.", context, *constant);
- }
+ DMCRet ret;
+
+ if (!check_trace("process_dump", context, constant, DCOMP_ALLOW_TRACE_OPS, 0, &ret))
+ return ret;
if (a != 1) {
RETURN_TERM_ERROR("Special form 'process_dump' called with "
@@ -4288,17 +4318,8 @@ static DMCRet dmc_enable_trace(DMCContext *context,
DMCRet ret;
int c;
-
- if (!(context->cflags & DCOMP_TRACE)) {
- RETURN_ERROR("Special form 'enable_trace' used in wrong dialect.",
- context,
- *constant);
- }
- if (context->is_guard) {
- RETURN_ERROR("Special form 'enable_trace' called in guard context.",
- context,
- *constant);
- }
+ if (!check_trace("enable_trace", context, constant, DCOMP_ALLOW_TRACE_OPS, 0, &ret))
+ return ret;
switch (a) {
case 2:
@@ -4347,18 +4368,9 @@ static DMCRet dmc_disable_trace(DMCContext *context,
Uint a = arityval(*p);
DMCRet ret;
int c;
-
- if (!(context->cflags & DCOMP_TRACE)) {
- RETURN_ERROR("Special form 'disable_trace' used in wrong dialect.",
- context,
- *constant);
- }
- if (context->is_guard) {
- RETURN_ERROR("Special form 'disable_trace' called in guard context.",
- context,
- *constant);
- }
+ if (!check_trace("disable_trace", context, constant, DCOMP_ALLOW_TRACE_OPS, 0, &ret))
+ return ret;
switch (a) {
case 2:
@@ -4408,17 +4420,8 @@ static DMCRet dmc_trace(DMCContext *context,
DMCRet ret;
int c;
-
- if (!(context->cflags & DCOMP_TRACE)) {
- RETURN_ERROR("Special form 'trace' used in wrong dialect.",
- context,
- *constant);
- }
- if (context->is_guard) {
- RETURN_ERROR("Special form 'trace' called in guard context.",
- context,
- *constant);
- }
+ if (!check_trace("trace", context, constant, DCOMP_ALLOW_TRACE_OPS, 0, &ret))
+ return ret;
switch (a) {
case 3:
@@ -4479,16 +4482,11 @@ static DMCRet dmc_caller(DMCContext *context,
{
Eterm *p = tuple_val(t);
Uint a = arityval(*p);
+ DMCRet ret;
- if (!(context->cflags & DCOMP_TRACE)) {
- RETURN_ERROR("Special form 'caller' used in wrong dialect.",
- context,
- *constant);
- }
- if (context->is_guard) {
- RETURN_ERROR("Special form 'caller' called in "
- "guard context.", context, *constant);
- }
+ if (!check_trace("caller", context, constant,
+ (DCOMP_CALL_TRACE|DCOMP_ALLOW_TRACE_OPS), 0, &ret))
+ return ret;
if (a != 1) {
RETURN_TERM_ERROR("Special form 'caller' called with "
@@ -4514,15 +4512,8 @@ static DMCRet dmc_silent(DMCContext *context,
DMCRet ret;
int c;
- if (!(context->cflags & DCOMP_TRACE)) {
- RETURN_ERROR("Special form 'silent' used in wrong dialect.",
- context,
- *constant);
- }
- if (context->is_guard) {
- RETURN_ERROR("Special form 'silent' called in "
- "guard context.", context, *constant);
- }
+ if (!check_trace("silent", context, constant, DCOMP_ALLOW_TRACE_OPS, 0, &ret))
+ return ret;
if (a != 2) {
RETURN_TERM_ERROR("Special form 'silent' called with wrong "
@@ -5062,11 +5053,14 @@ static Eterm match_spec_test(Process *p, Eterm against, Eterm spec, int trace)
return THE_NON_VALUE;
}
if (trace) {
- lint_res = db_match_set_lint(p, spec, DCOMP_TRACE | DCOMP_FAKE_DESTRUCTIVE);
- mps = db_match_set_compile(p, spec, DCOMP_TRACE | DCOMP_FAKE_DESTRUCTIVE);
+ const Uint cflags = (DCOMP_TRACE | DCOMP_FAKE_DESTRUCTIVE |
+ DCOMP_CALL_TRACE | DCOMP_ALLOW_TRACE_OPS);
+ lint_res = db_match_set_lint(p, spec, cflags);
+ mps = db_match_set_compile(p, spec, cflags);
} else {
- lint_res = db_match_set_lint(p, spec, DCOMP_TABLE | DCOMP_FAKE_DESTRUCTIVE);
- mps = db_match_set_compile(p, spec, DCOMP_TABLE | DCOMP_FAKE_DESTRUCTIVE);
+ const Uint cflags = (DCOMP_TABLE | DCOMP_FAKE_DESTRUCTIVE);
+ lint_res = db_match_set_lint(p, spec, cflags);
+ mps = db_match_set_compile(p, spec, cflags);
}
if (mps == NULL) {
@@ -5099,7 +5093,8 @@ static Eterm match_spec_test(Process *p, Eterm against, Eterm spec, int trace)
}
save_cp = p->cp;
p->cp = NULL;
- res = erts_match_set_run(p, mps, arr, n,
+ res = erts_match_set_run_trace(p, p,
+ mps, arr, n,
ERTS_PAM_COPY_RESULT|ERTS_PAM_IGNORE_TRACE_SILENT,
&ret_flags);
p->cp = save_cp;
@@ -5182,7 +5177,8 @@ Eterm db_match_dbterm(DbTableCommon* tb, Process* c_p, Binary* bprog,
obj = db_alloc_tmp_uncompressed(tb, obj);
}
- res = db_prog_match(c_p, bprog, make_tuple(obj->tpl), NULL, 0,
+ res = db_prog_match(c_p, c_p,
+ bprog, make_tuple(obj->tpl), NULL, 0,
ERTS_PAM_COPY_RESULT|ERTS_PAM_CONTIGUOUS_TUPLE, &dummy);
if (is_value(res) && hpp!=NULL) {
diff --git a/erts/emulator/beam/erl_db_util.h b/erts/emulator/beam/erl_db_util.h
index 5d7946a525..60f7067d70 100644
--- a/erts/emulator/beam/erl_db_util.h
+++ b/erts/emulator/beam/erl_db_util.h
@@ -425,6 +425,11 @@ typedef struct dmc_err_info {
#define DCOMP_FAKE_DESTRUCTIVE ((Uint) 8) /* When this is active, no setting of
trace control words or seq_trace tokens will be done. */
+/* Allow lock seizing operations on the tracee and 3rd party processes */
+#define DCOMP_ALLOW_TRACE_OPS ((Uint) 0x10)
+
+/* This is call trace */
+#define DCOMP_CALL_TRACE ((Uint) 0x20)
Binary *db_match_compile(Eterm *matchexpr, Eterm *guards,
Eterm *body, int num_matches,
@@ -435,7 +440,8 @@ Binary *db_match_compile(Eterm *matchexpr, Eterm *guards,
Eterm db_match_dbterm(DbTableCommon* tb, Process* c_p, Binary* bprog,
int all, DbTerm* obj, Eterm** hpp, Uint extra);
-Eterm db_prog_match(Process *p, Binary *prog, Eterm term,
+Eterm db_prog_match(Process *p, Process *self,
+ Binary *prog, Eterm term,
Eterm *termp, int arity,
enum erts_pam_run_flags in_flags,
Uint32 *return_flags /* Zeroed on enter */);
diff --git a/erts/emulator/beam/erl_drv_nif.h b/erts/emulator/beam/erl_drv_nif.h
index 2700b62854..6ec5fbb895 100644
--- a/erts/emulator/beam/erl_drv_nif.h
+++ b/erts/emulator/beam/erl_drv_nif.h
@@ -43,12 +43,11 @@ typedef struct {
int suggested_stack_size;
} ErlDrvThreadOpts;
-#if defined(ERL_DRV_DIRTY_SCHEDULER_SUPPORT) || defined(ERL_NIF_DIRTY_SCHEDULER_SUPPORT)
+
typedef enum {
- ERL_DRV_DIRTY_JOB_CPU_BOUND = 1,
- ERL_DRV_DIRTY_JOB_IO_BOUND = 2
-} ErlDrvDirtyJobFlags;
-#endif
+ ERL_DIRTY_JOB_CPU_BOUND = 1,
+ ERL_DIRTY_JOB_IO_BOUND = 2
+} ErlDirtyJobFlags;
#ifdef SIZEOF_CHAR
# define SIZEOF_CHAR_SAVED__ SIZEOF_CHAR
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index cf54f1e384..d740b2baec 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -41,6 +41,7 @@
#endif
#include "dtrace-wrapper.h"
#include "erl_bif_unique.h"
+#include "dist.h"
#define ERTS_INACT_WR_PB_LEAVE_MUCH_LIMIT 1
#define ERTS_INACT_WR_PB_LEAVE_MUCH_PERCENTAGE 20
@@ -116,7 +117,7 @@ static Eterm *full_sweep_heaps(Process *p,
char *oh, Uint oh_size,
Eterm *objv, int nobj);
static int garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
- int need, Eterm* objv, int nobj);
+ int need, Eterm* objv, int nobj, int fcalls);
static int major_collection(Process* p, ErlHeapFragment *live_hf_end,
int need, Eterm* objv, int nobj, Uint *recl);
static int minor_collection(Process* p, ErlHeapFragment *live_hf_end,
@@ -146,7 +147,8 @@ static void offset_rootset(Process *p, Sint offs, char* area, Uint area_size,
static void offset_off_heap(Process* p, Sint offs, char* area, Uint area_size);
static void offset_mqueue(Process *p, Sint offs, char* area, Uint area_size);
static void move_msgq_to_heap(Process *p);
-
+static int reached_max_heap_size(Process *p, Uint total_heap_size,
+ Uint extra_heap_size, Uint extra_old_heap_size);
static void init_gc_info(ErtsGCInfo *gcip);
#ifdef HARDDEBUG
@@ -389,21 +391,22 @@ erts_gc_after_bif_call_lhf(Process* p, ErlHeapFragment *live_hf_end,
if (p->freason == TRAP) {
#if HIPE
if (regs == NULL) {
- regs = ERTS_PROC_GET_SCHDATA(p)->x_reg_array;
+ regs = erts_proc_sched_data(p)->x_reg_array;
}
#endif
- cost = garbage_collect(p, live_hf_end, 0, regs, p->arity);
+ cost = garbage_collect(p, live_hf_end, 0, regs, p->arity, p->fcalls);
} else {
- cost = garbage_collect(p, live_hf_end, 0, regs, arity);
+ cost = garbage_collect(p, live_hf_end, 0, regs, arity, p->fcalls);
}
} else {
Eterm val[1];
val[0] = result;
- cost = garbage_collect(p, live_hf_end, 0, val, 1);
+ cost = garbage_collect(p, live_hf_end, 0, val, 1, p->fcalls);
result = val[0];
}
BUMP_REDS(p, cost);
+
return result;
}
@@ -431,7 +434,7 @@ static ERTS_INLINE void reset_active_writer(Process *p)
#define ERTS_ABANDON_HEAP_COST 10
static int
-delay_garbage_collection(Process *p, ErlHeapFragment *live_hf_end, int need)
+delay_garbage_collection(Process *p, ErlHeapFragment *live_hf_end, int need, int fcalls)
{
ErlHeapFragment *hfrag;
Eterm *orig_heap, *orig_hend, *orig_htop, *orig_stop;
@@ -506,12 +509,16 @@ delay_garbage_collection(Process *p, ErlHeapFragment *live_hf_end, int need)
/* Make sure that we do a proper GC as soon as possible... */
p->flags |= F_FORCE_GC;
- reds_left = ERTS_BIF_REDS_LEFT(p);
+ reds_left = ERTS_REDS_LEFT(p, fcalls);
+ ASSERT(CONTEXT_REDS - reds_left >= erts_proc_sched_data(p)->virtual_reds);
+
if (reds_left > ERTS_ABANDON_HEAP_COST) {
int vreds = reds_left - ERTS_ABANDON_HEAP_COST;
- ERTS_VBUMP_REDS(p, vreds);
+ erts_proc_sched_data((p))->virtual_reds += vreds;
}
- return ERTS_ABANDON_HEAP_COST;
+
+ ASSERT(CONTEXT_REDS >= erts_proc_sched_data(p)->virtual_reds);
+ return reds_left;
}
static ERTS_FORCE_INLINE Uint
@@ -570,19 +577,26 @@ young_gen_usage(Process *p)
*/
static int
garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
- int need, Eterm* objv, int nobj)
+ int need, Eterm* objv, int nobj, int fcalls)
{
Uint reclaimed_now = 0;
+ Eterm gc_trace_end_tag;
int reds;
ErtsMonotonicTime start_time = 0; /* Shut up faulty warning... */
ErtsSchedulerData *esdp;
+ erts_aint32_t state;
ERTS_MSACC_PUSH_STATE_M();
#ifdef USE_VM_PROBES
DTRACE_CHARBUF(pidbuf, DTRACE_TERM_BUF_SIZE);
#endif
- if (p->flags & (F_DISABLE_GC|F_DELAY_GC))
- return delay_garbage_collection(p, live_hf_end, need);
+ ASSERT(CONTEXT_REDS - ERTS_REDS_LEFT(p, fcalls)
+ >= erts_proc_sched_data(p)->virtual_reds);
+
+ state = erts_smp_atomic32_read_nob(&p->state);
+
+ if (p->flags & (F_DISABLE_GC|F_DELAY_GC) || state & ERTS_PSFLG_EXITING)
+ return delay_garbage_collection(p, live_hf_end, need, fcalls);
if (p->abandoned_heap)
live_hf_end = ERTS_INVALID_HFRAG_PTR;
@@ -616,28 +630,28 @@ garbage_collect(Process* p, ErlHeapFragment *live_hf_end,
if (GEN_GCS(p) < MAX_GEN_GCS(p) && !(FLAGS(p) & F_NEED_FULLSWEEP)) {
if (IS_TRACED_FL(p, F_TRACE_GC)) {
- trace_gc(p, am_gc_minor_start, need);
+ trace_gc(p, am_gc_minor_start, need, THE_NON_VALUE);
}
DTRACE2(gc_minor_start, pidbuf, need);
reds = minor_collection(p, live_hf_end, need, objv, nobj, &reclaimed_now);
DTRACE2(gc_minor_end, pidbuf, reclaimed_now);
- if (IS_TRACED_FL(p, F_TRACE_GC)) {
- trace_gc(p, am_gc_minor_end, reclaimed_now);
- }
- if (reds < 0)
+ if (reds == -1) {
+ if (IS_TRACED_FL(p, F_TRACE_GC)) {
+ trace_gc(p, am_gc_minor_end, reclaimed_now, THE_NON_VALUE);
+ }
goto do_major_collection;
+ }
+ gc_trace_end_tag = am_gc_minor_end;
} else {
do_major_collection:
ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_GC_FULL);
if (IS_TRACED_FL(p, F_TRACE_GC)) {
- trace_gc(p, am_gc_major_start, need);
+ trace_gc(p, am_gc_major_start, need, THE_NON_VALUE);
}
DTRACE2(gc_major_start, pidbuf, need);
reds = major_collection(p, live_hf_end, need, objv, nobj, &reclaimed_now);
DTRACE2(gc_major_end, pidbuf, reclaimed_now);
- if (IS_TRACED_FL(p, F_TRACE_GC)) {
- trace_gc(p, am_gc_major_end, reclaimed_now);
- }
+ gc_trace_end_tag = am_gc_major_end;
ERTS_MSACC_SET_STATE_CACHED_M_X(ERTS_MSACC_STATE_GC);
}
@@ -651,8 +665,33 @@ do_major_collection:
ErtsGcQuickSanityCheck(p);
+ /* Max heap size has been reached and the process was configured
+ to be killed, so we kill it and set it in a delayed garbage
+ collecting state. There should be no gc_end trace or
+ long_gc/large_gc triggers when this happens as process was
+ killed before a GC could be done. */
+ if (reds == -2) {
+ ErtsProcLocks locks = ERTS_PROC_LOCKS_ALL;
+
+ erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);
+ erts_send_exit_signal(p, p->common.id, p, &locks,
+ am_kill, NIL, NULL, 0);
+ erts_smp_proc_unlock(p, locks & ERTS_PROC_LOCKS_ALL_MINOR);
+
+ /* erts_send_exit_signal looks for ERTS_PSFLG_GC, so
+ we have to remove it after the signal is sent */
+ erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC);
+
+ /* We have to make sure that we have space for need on the heap */
+ return delay_garbage_collection(p, live_hf_end, need, fcalls);
+ }
+
erts_smp_atomic32_read_band_nob(&p->state, ~ERTS_PSFLG_GC);
+ if (IS_TRACED_FL(p, F_TRACE_GC)) {
+ trace_gc(p, gc_trace_end_tag, reclaimed_now, THE_NON_VALUE);
+ }
+
if (erts_system_monitor_long_gc != 0) {
ErtsMonotonicTime end_time;
Uint gc_time;
@@ -703,16 +742,23 @@ do_major_collection:
}
int
-erts_garbage_collect_nobump(Process* p, int need, Eterm* objv, int nobj)
+erts_garbage_collect_nobump(Process* p, int need, Eterm* objv, int nobj, int fcalls)
{
- return garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj);
+ int reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, fcalls);
+ int reds_left = ERTS_REDS_LEFT(p, fcalls);
+ if (reds > reds_left)
+ reds = reds_left;
+ ASSERT(CONTEXT_REDS - (reds_left - reds) >= erts_proc_sched_data(p)->virtual_reds);
+ return reds;
}
void
erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
{
- int reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj);
+ int reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, p->fcalls);
BUMP_REDS(p, reds);
+ ASSERT(CONTEXT_REDS - ERTS_BIF_REDS_LEFT(p)
+ >= erts_proc_sched_data(p)->virtual_reds);
}
/*
@@ -747,6 +793,7 @@ erts_garbage_collect_hibernate(Process* p)
heap_size = p->heap_sz + (p->old_htop - p->old_heap) + p->mbuf_sz;
+
heap = (Eterm*) ERTS_HEAP_ALLOC(ERTS_ALC_T_TMP_HEAP,
sizeof(Eterm)*heap_size);
htop = heap;
@@ -1017,6 +1064,34 @@ minor_collection(Process* p, ErlHeapFragment *live_hf_end,
Uint size_before = young_gen_usage(p);
/*
+ * Check if we have gone past the max heap size limit
+ */
+
+ if (MAX_HEAP_SIZE_GET(p)) {
+ Uint heap_size = size_before,
+ /* Note that we also count the un-allocated area
+ in between the stack and heap */
+ stack_size = HEAP_END(p) - HEAP_TOP(p),
+ extra_heap_size,
+ extra_old_heap_size = 0;
+
+ /* Add potential old heap size */
+ if (OLD_HEAP(p) == NULL && mature_size != 0) {
+ extra_old_heap_size = erts_next_heap_size(size_before, 1);
+ heap_size += extra_old_heap_size;
+ } else if (OLD_HEAP(p))
+ heap_size += OLD_HEND(p) - OLD_HEAP(p);
+
+ /* Add potential new young heap size */
+ extra_heap_size = next_heap_size(p, stack_size + size_before, 0);
+ heap_size += extra_heap_size;
+
+ if (heap_size > MAX_HEAP_SIZE_GET(p))
+ if (reached_max_heap_size(p, heap_size, extra_heap_size, extra_old_heap_size))
+ return -2;
+ }
+
+ /*
* Allocate an old heap if we don't have one and if we'll need one.
*/
@@ -1125,6 +1200,16 @@ minor_collection(Process* p, ErlHeapFragment *live_hf_end,
ASSERT(HEAP_SIZE(p) == next_heap_size(p, HEAP_SIZE(p), 0));
ASSERT(MBUF(p) == NULL);
+ /* The heap usage during GC should be larger than what we end up
+ after a GC, even if we grow it. If this assertion is not true
+ we have to check size in grow_new_heap and potentially kill the
+ process from there */
+ ASSERT(!MAX_HEAP_SIZE_GET(p) ||
+ !(MAX_HEAP_SIZE_FLAGS_GET(p) & MAX_HEAP_SIZE_KILL) ||
+ MAX_HEAP_SIZE_GET(p) > (young_gen_usage(p) +
+ (OLD_HEND(p) - OLD_HEAP(p)) +
+ (HEAP_END(p) - HEAP_TOP(p))));
+
return gc_cost(size_after, adjust_size);
}
@@ -1443,6 +1528,25 @@ major_collection(Process* p, ErlHeapFragment *live_hf_end,
if (new_sz == HEAP_SIZE(p) && FLAGS(p) & F_HEAP_GROW) {
new_sz = next_heap_size(p, HEAP_SIZE(p), 1);
}
+
+
+ if (MAX_HEAP_SIZE_GET(p)) {
+ Uint heap_size = size_before;
+
+ /* Add unused space in old heap */
+ heap_size += OLD_HEND(p) - OLD_HTOP(p);
+
+ /* Add stack + unused space in young heap */
+ heap_size += HEAP_END(p) - HEAP_TOP(p);
+
+ /* Add size of new young heap */
+ heap_size += new_sz;
+
+ if (MAX_HEAP_SIZE_GET(p) < heap_size)
+ if (reached_max_heap_size(p, heap_size, new_sz, 0))
+ return -2;
+ }
+
FLAGS(p) &= ~(F_HEAP_GROW|F_NEED_FULLSWEEP);
n_htop = n_heap = (Eterm *) ERTS_HEAP_ALLOC(ERTS_ALC_T_HEAP,
sizeof(Eterm)*new_sz);
@@ -2924,7 +3028,7 @@ reply_gc_info(void *vgcirp)
hpp = &hp;
}
- erts_queue_message(rp, &rp_locks, mp, msg);
+ erts_queue_message(rp, rp_locks, mp, msg, am_system);
if (gcirp->req_sched == esdp->no)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
@@ -2941,7 +3045,7 @@ reply_gc_info(void *vgcirp)
Eterm
erts_gc_info_request(Process *c_p)
{
- ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p);
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
Eterm ref;
ErtsGCInfoReq *gcirp;
Eterm *hp;
@@ -2972,7 +3076,9 @@ erts_gc_info_request(Process *c_p)
}
Eterm
-erts_process_gc_info(Process *p, Uint *sizep, Eterm **hpp)
+erts_process_gc_info(Process *p, Uint *sizep, Eterm **hpp,
+ Uint extra_heap_block,
+ Uint extra_old_heap_block_size)
{
ERTS_DECL_AM(bin_vheap_size);
ERTS_DECL_AM(bin_vheap_block_size);
@@ -2995,8 +3101,9 @@ erts_process_gc_info(Process *p, Uint *sizep, Eterm **hpp)
AM_bin_old_vheap_block_size
};
UWord values[] = {
- OLD_HEAP(p) ? OLD_HEND(p) - OLD_HEAP(p) : 0,
- HEAP_SIZE(p),
+ OLD_HEAP(p) ? OLD_HEND(p) - OLD_HEAP(p) + extra_old_heap_block_size
+ : extra_old_heap_block_size,
+ HEAP_SIZE(p) + extra_heap_block,
MBUF_SIZE(p),
HIGH_WATER(p) - HEAP_START(p),
STACK_START(p) - p->stop,
@@ -3042,6 +3149,130 @@ erts_process_gc_info(Process *p, Uint *sizep, Eterm **hpp)
return res;
}
+static int
+reached_max_heap_size(Process *p, Uint total_heap_size,
+ Uint extra_heap_size, Uint extra_old_heap_size)
+{
+ Uint max_heap_flags = MAX_HEAP_SIZE_FLAGS_GET(p);
+ if (IS_TRACED_FL(p, F_TRACE_GC) ||
+ max_heap_flags & MAX_HEAP_SIZE_LOG) {
+ Eterm msg;
+ Uint size = 0;
+ Eterm *o_hp , *hp;
+ erts_process_gc_info(p, &size, NULL, extra_heap_size,
+ extra_old_heap_size);
+ o_hp = hp = erts_alloc(ERTS_ALC_T_TMP, size * sizeof(Eterm));
+ msg = erts_process_gc_info(p, NULL, &hp, extra_heap_size,
+ extra_old_heap_size);
+
+ if (max_heap_flags & MAX_HEAP_SIZE_LOG) {
+ int alive = erts_is_alive;
+ erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
+ Eterm *o_hp, *hp, args = NIL;
+
+ /* Build the format message */
+ erts_dsprintf(dsbufp, " Process: ~p ");
+ if (alive)
+ erts_dsprintf(dsbufp, "on node ~p");
+ erts_dsprintf(dsbufp, "~n Context: maximum heap size reached~n");
+ erts_dsprintf(dsbufp, " Max Heap Size: ~p~n");
+ erts_dsprintf(dsbufp, " Total Heap Size: ~p~n");
+ erts_dsprintf(dsbufp, " Kill: ~p~n");
+ erts_dsprintf(dsbufp, " Error Logger: ~p~n");
+ erts_dsprintf(dsbufp, " GC Info: ~p~n");
+
+ /* Build the args in reverse order */
+ o_hp = hp = erts_alloc(ERTS_ALC_T_TMP, 2*(alive ? 7 : 6) * sizeof(Eterm));
+ args = CONS(hp, msg, args); hp += 2;
+ args = CONS(hp, am_true, args); hp += 2;
+ args = CONS(hp, (max_heap_flags & MAX_HEAP_SIZE_KILL ? am_true : am_false), args); hp += 2;
+ args = CONS(hp, make_small(total_heap_size), args); hp += 2;
+ args = CONS(hp, make_small(MAX_HEAP_SIZE_GET(p)), args); hp += 2;
+ if (alive) {
+ args = CONS(hp, erts_this_node->sysname, args); hp += 2;
+ }
+ args = CONS(hp, p->common.id, args); hp += 2;
+
+ erts_send_error_term_to_logger(p->group_leader, dsbufp, args);
+ erts_free(ERTS_ALC_T_TMP, o_hp);
+ }
+
+ if (IS_TRACED_FL(p, F_TRACE_GC))
+ trace_gc(p, am_gc_max_heap_size, 0, msg);
+
+ erts_free(ERTS_ALC_T_TMP, o_hp);
+ }
+ /* returns true if we should kill the process */
+ return max_heap_flags & MAX_HEAP_SIZE_KILL;
+}
+
+Eterm
+erts_max_heap_size_map(Sint max_heap_size, Uint max_heap_flags,
+ Eterm **hpp, Uint *sz)
+{
+ if (!hpp) {
+ *sz += (2*3 + 1 + MAP_HEADER_FLATMAP_SZ);
+ return THE_NON_VALUE;
+ } else {
+ Eterm *hp = *hpp;
+ Eterm keys = TUPLE3(hp, am_error_logger, am_kill, am_size);
+ flatmap_t *mp;
+ hp += 4;
+ mp = (flatmap_t*) hp;
+ mp->thing_word = MAP_HEADER_FLATMAP;
+ mp->size = 3;
+ mp->keys = keys;
+ hp += MAP_HEADER_FLATMAP_SZ;
+ *hp++ = max_heap_flags & MAX_HEAP_SIZE_LOG ? am_true : am_false;
+ *hp++ = max_heap_flags & MAX_HEAP_SIZE_KILL ? am_true : am_false;
+ *hp++ = make_small(max_heap_size);
+ *hpp = hp;
+ return make_flatmap(mp);
+ }
+}
+
+int
+erts_max_heap_size(Eterm arg, Uint *max_heap_size, Uint *max_heap_flags)
+{
+ Sint sz;
+ *max_heap_flags = H_MAX_FLAGS;
+ if (is_small(arg)) {
+ sz = signed_val(arg);
+ *max_heap_flags = H_MAX_FLAGS;
+ } else if (is_map(arg)) {
+ const Eterm *size = erts_maps_get(am_size, arg);
+ const Eterm *kill = erts_maps_get(am_kill, arg);
+ const Eterm *log = erts_maps_get(am_error_logger, arg);
+ if (size && is_small(*size)) {
+ sz = signed_val(*size);
+ } else {
+ /* size is mandatory */
+ return 0;
+ }
+ if (kill) {
+ if (*kill == am_true)
+ *max_heap_flags |= MAX_HEAP_SIZE_KILL;
+ else if (*kill == am_false)
+ *max_heap_flags &= ~MAX_HEAP_SIZE_KILL;
+ else
+ return 0;
+ }
+ if (log) {
+ if (*log == am_true)
+ *max_heap_flags |= MAX_HEAP_SIZE_LOG;
+ else if (*log == am_false)
+ *max_heap_flags &= ~MAX_HEAP_SIZE_LOG;
+ else
+ return 0;
+ }
+ } else
+ return 0;
+ if (sz < 0)
+ return 0;
+ *max_heap_size = sz;
+ return 1;
+}
+
#if defined(DEBUG) || defined(ERTS_OFFHEAP_DEBUG)
static int
diff --git a/erts/emulator/beam/erl_gc.h b/erts/emulator/beam/erl_gc.h
index 40b7c5d12c..54ea9ca3c0 100644
--- a/erts/emulator/beam/erl_gc.h
+++ b/erts/emulator/beam/erl_gc.h
@@ -135,11 +135,11 @@ typedef struct {
#define ERTS_PROCESS_GC_INFO_MAX_TERMS (11) /* number of elements in process_gc_info*/
#define ERTS_PROCESS_GC_INFO_MAX_SIZE \
(ERTS_PROCESS_GC_INFO_MAX_TERMS * (2/*cons*/ + 3/*2-tuple*/ + BIG_UINT_HEAP_SIZE))
-Eterm erts_process_gc_info(struct process*, Uint *, Eterm **);
+Eterm erts_process_gc_info(struct process*, Uint *, Eterm **, Uint, Uint);
void erts_gc_info(ErtsGCInfo *gcip);
void erts_init_gc(void);
-int erts_garbage_collect_nobump(struct process*, int, Eterm*, int);
+int erts_garbage_collect_nobump(struct process*, int, Eterm*, int, int);
void erts_garbage_collect(struct process*, int, Eterm*, int);
void erts_garbage_collect_hibernate(struct process* p);
Eterm erts_gc_after_bif_call_lhf(struct process* p, ErlHeapFragment *live_hf_end,
@@ -155,5 +155,7 @@ void erts_offset_off_heap(struct erl_off_heap*, Sint, Eterm*, Eterm*);
void erts_offset_heap_ptr(Eterm*, Uint, Sint, Eterm*, Eterm*);
void erts_offset_heap(Eterm*, Uint, Sint, Eterm*, Eterm*);
void erts_free_heap_frags(struct process* p);
+Eterm erts_max_heap_size_map(Sint, Uint, Eterm **, Uint *);
+int erts_max_heap_size(Eterm, Uint *, Uint *);
#endif /* __ERL_GC_H__ */
diff --git a/erts/emulator/beam/erl_hl_timer.c b/erts/emulator/beam/erl_hl_timer.c
index 8e201d5711..ebeff51aac 100644
--- a/erts/emulator/beam/erl_hl_timer.c
+++ b/erts/emulator/beam/erl_hl_timer.c
@@ -1247,8 +1247,8 @@ hlt_bif_timer_timeout(ErtsHLTimer *tmr, Uint32 roflgs)
if (!ERTS_PROC_IS_EXITING(proc)) {
ErtsMessage *mp = erts_alloc_message(0, NULL);
mp->data.heap_frag = tmr->btm.bp;
- erts_queue_message(proc, &proc_locks, mp,
- tmr->btm.message);
+ erts_queue_message(proc, proc_locks, mp,
+ tmr->btm.message, am_clock_service);
erts_smp_proc_unlock(proc, ERTS_PROC_LOCKS_MSG_SEND);
queued_message = 1;
proc_locks &= ~ERTS_PROC_LOCKS_MSG_SEND;
@@ -1766,7 +1766,7 @@ setup_bif_timer(Process *c_p, ErtsMonotonicTime timeout_pos,
if (is_not_internal_pid(rcvr) && is_not_atom(rcvr))
goto badarg;
- esdp = ERTS_PROC_GET_SCHDATA(c_p);
+ esdp = erts_proc_sched_data(c_p);
hp = HAlloc(c_p, REF_THING_SIZE);
ref = erts_sched_make_ref_in_buffer(esdp, hp);
@@ -1871,7 +1871,7 @@ access_sched_local_btm(Process *c_p, Eterm pid,
if (!c_p)
esdp = erts_get_scheduler_data();
else {
- esdp = ERTS_PROC_GET_SCHDATA(c_p);
+ esdp = erts_proc_sched_data(c_p);
ERTS_HLT_ASSERT(esdp == erts_get_scheduler_data());
}
@@ -1980,7 +1980,7 @@ access_sched_local_btm(Process *c_p, Eterm pid,
ERTS_HLT_ASSERT(hp + (async ? 4 : 3) == hp_end);
- erts_queue_message(proc, &proc_locks, mp, msg);
+ erts_queue_message(proc, proc_locks, mp, msg, am_clock_service);
if (c_p)
proc_locks &= ~ERTS_PROC_LOCK_MAIN;
@@ -2111,7 +2111,7 @@ try_access_sched_remote_btm(ErtsSchedulerData *esdp,
msg = TUPLE3(hp, tag, tref, res);
- erts_queue_message(c_p, &proc_locks, mp, msg);
+ erts_queue_message(c_p, proc_locks, mp, msg, am_clock_service);
proc_locks &= ~ERTS_PROC_LOCK_MAIN;
if (proc_locks)
@@ -2138,7 +2138,7 @@ access_bif_timer(Process *c_p, Eterm tref, int cancel, int async, int info)
goto no_timer;
}
- esdp = ERTS_PROC_GET_SCHDATA(c_p);
+ esdp = erts_proc_sched_data(c_p);
trefn = internal_ref_numbers(tref);
sid = erts_get_ref_numbers_thr_id(trefn);
@@ -2363,7 +2363,7 @@ typedef struct {
int erts_cancel_bif_timers(Process *p, ErtsBifTimers *btm, void **vyspp)
{
- ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(p);
+ ErtsSchedulerData *esdp = erts_proc_sched_data(p);
ErtsBifTimerYieldState ys = {btm, {ERTS_RBT_YIELD_STAT_INITER}};
ErtsBifTimerYieldState *ysp;
int res;
@@ -2409,7 +2409,7 @@ detach_bif_timer(ErtsHLTimer *tmr, void *vesdp)
int erts_detach_accessor_bif_timers(Process *p, ErtsBifTimers *btm, void **vyspp)
{
- ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(p);
+ ErtsSchedulerData *esdp = erts_proc_sched_data(p);
ErtsBifTimerYieldState ys = {btm, {ERTS_RBT_YIELD_STAT_INITER}};
ErtsBifTimerYieldState *ysp;
int res;
@@ -2516,7 +2516,7 @@ BIF_RETTYPE send_after_3(BIF_ALIST_3)
ErtsMonotonicTime timeout_pos;
int short_time, tres;
- tres = parse_timeout_pos(ERTS_PROC_GET_SCHDATA(BIF_P), BIF_ARG_1, NULL,
+ tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1, NULL,
0, &timeout_pos, &short_time);
if (tres != 0)
BIF_ERROR(BIF_P, BADARG);
@@ -2534,7 +2534,7 @@ BIF_RETTYPE send_after_4(BIF_ALIST_4)
if (!parse_bif_timer_options(BIF_ARG_4, NULL, NULL, &abs, &accessor))
BIF_ERROR(BIF_P, BADARG);
- tres = parse_timeout_pos(ERTS_PROC_GET_SCHDATA(BIF_P), BIF_ARG_1, NULL,
+ tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1, NULL,
abs, &timeout_pos, &short_time);
if (tres != 0)
BIF_ERROR(BIF_P, BADARG);
@@ -2548,7 +2548,7 @@ BIF_RETTYPE start_timer_3(BIF_ALIST_3)
ErtsMonotonicTime timeout_pos;
int short_time, tres;
- tres = parse_timeout_pos(ERTS_PROC_GET_SCHDATA(BIF_P), BIF_ARG_1, NULL,
+ tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1, NULL,
0, &timeout_pos, &short_time);
if (tres != 0)
BIF_ERROR(BIF_P, BADARG);
@@ -2566,7 +2566,7 @@ BIF_RETTYPE start_timer_4(BIF_ALIST_4)
if (!parse_bif_timer_options(BIF_ARG_4, NULL, NULL, &abs, &accessor))
BIF_ERROR(BIF_P, BADARG);
- tres = parse_timeout_pos(ERTS_PROC_GET_SCHDATA(BIF_P), BIF_ARG_1, NULL,
+ tres = parse_timeout_pos(erts_proc_sched_data(BIF_P), BIF_ARG_1, NULL,
abs, &timeout_pos, &short_time);
if (tres != 0)
BIF_ERROR(BIF_P, BADARG);
@@ -2720,7 +2720,7 @@ set_proc_timer_common(Process *c_p, ErtsSchedulerData *esdp, Sint64 tmo,
int
erts_set_proc_timer_term(Process *c_p, Eterm etmo)
{
- ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p);
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
ErtsMonotonicTime tmo, timeout_pos;
int short_time, tres;
@@ -2742,7 +2742,7 @@ erts_set_proc_timer_term(Process *c_p, Eterm etmo)
void
erts_set_proc_timer_uword(Process *c_p, UWord tmo)
{
- ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p);
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
ERTS_HLT_ASSERT(erts_smp_atomic_read_nob(&c_p->common.timer)
== ERTS_PTMR_NONE);
@@ -2776,7 +2776,7 @@ erts_cancel_proc_timer(Process *c_p)
erts_smp_atomic_set_nob(&c_p->common.timer, ERTS_PTMR_NONE);
return;
}
- continue_cancel_ptimer(ERTS_PROC_GET_SCHDATA(c_p),
+ continue_cancel_ptimer(erts_proc_sched_data(c_p),
(ErtsTimer *) tval);
}
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 2fd1eeb785..0649fb68de 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -164,6 +164,8 @@ int erts_use_sender_punish;
Uint display_items; /* no of items to display in traces etc */
int H_MIN_SIZE; /* The minimum heap grain */
int BIN_VH_MIN_SIZE; /* The minimum binary virtual*/
+int H_MAX_SIZE; /* The maximum heap size */
+int H_MAX_FLAGS; /* The maximum heap flags */
Uint32 erts_debug_flags; /* Debug flags. */
int erts_backtrace_depth; /* How many functions to show in a backtrace
@@ -206,15 +208,15 @@ int erts_no_line_info = 0; /* -L: Don't load line information */
ErtsModifiedTimings erts_modified_timings[] = {
/* 0 */ {make_small(0), CONTEXT_REDS, INPUT_REDUCTIONS},
- /* 1 */ {make_small(0), 2*CONTEXT_REDS, 2*INPUT_REDUCTIONS},
+ /* 1 */ {make_small(0), (3*CONTEXT_REDS)/4, 2*INPUT_REDUCTIONS},
/* 2 */ {make_small(0), CONTEXT_REDS/2, INPUT_REDUCTIONS/2},
- /* 3 */ {make_small(0), 3*CONTEXT_REDS, 3*INPUT_REDUCTIONS},
+ /* 3 */ {make_small(0), (7*CONTEXT_REDS)/8, 3*INPUT_REDUCTIONS},
/* 4 */ {make_small(0), CONTEXT_REDS/3, 3*INPUT_REDUCTIONS},
- /* 5 */ {make_small(0), 4*CONTEXT_REDS, INPUT_REDUCTIONS/2},
+ /* 5 */ {make_small(0), (10*CONTEXT_REDS)/11, INPUT_REDUCTIONS/2},
/* 6 */ {make_small(1), CONTEXT_REDS/4, 2*INPUT_REDUCTIONS},
- /* 7 */ {make_small(1), 5*CONTEXT_REDS, INPUT_REDUCTIONS/3},
+ /* 7 */ {make_small(1), (5*CONTEXT_REDS)/7, INPUT_REDUCTIONS/3},
/* 8 */ {make_small(10), CONTEXT_REDS/5, 3*INPUT_REDUCTIONS},
- /* 9 */ {make_small(10), 6*CONTEXT_REDS, INPUT_REDUCTIONS/4}
+ /* 9 */ {make_small(10), (6*CONTEXT_REDS)/7, INPUT_REDUCTIONS/4}
};
#define ERTS_MODIFIED_TIMING_LEVELS \
@@ -576,6 +578,10 @@ void erts_usage(void)
H_DEFAULT_SIZE);
erts_fprintf(stderr, "-hmbs size set minimum binary virtual heap size in words (default %d)\n",
VH_DEFAULT_SIZE);
+ erts_fprintf(stderr, "-hmax size set maximum heap size in words (default %d)\n",
+ H_DEFAULT_MAX_SIZE);
+ erts_fprintf(stderr, "-hmaxk bool enable or disable kill at max heap size (default true)\n");
+ erts_fprintf(stderr, "-hmaxel bool enable or disable error_logger report at max heap size (default true)\n");
erts_fprintf(stderr, "-hpds size initial process dictionary size (default %d)\n",
erts_pd_initial_size);
erts_fprintf(stderr, "-hmqd val set default message queue data flag for processes,\n");
@@ -759,6 +765,8 @@ early_init(int *argc, char **argv) /*
erts_async_thread_suggested_stack_size = ERTS_ASYNC_THREAD_MIN_STACK_SIZE;
H_MIN_SIZE = H_DEFAULT_SIZE;
BIN_VH_MIN_SIZE = VH_DEFAULT_SIZE;
+ H_MAX_SIZE = H_DEFAULT_MAX_SIZE;
+ H_MAX_FLAGS = MAX_HEAP_SIZE_KILL|MAX_HEAP_SIZE_LOG;
erts_initialized = 0;
@@ -1484,10 +1492,13 @@ erl_start(int argc, char **argv)
char *sub_param = argv[i]+2;
/* set default heap size
*
- * h|ms - min_heap_size
- * h|mbs - min_bin_vheap_size
- * h|pds - erts_pd_initial_size
- * h|mqd - message_queue_data
+ * h|ms - min_heap_size
+ * h|mbs - min_bin_vheap_size
+ * h|pds - erts_pd_initial_size
+ * h|mqd - message_queue_data
+ * h|max - max_heap_size
+ * h|maxk - max_heap_kill
+ * h|maxel - max_heap_error_logger
*
*/
if (has_prefix("mbs", sub_param)) {
@@ -1530,6 +1541,41 @@ erl_start(int argc, char **argv)
"Invalid message_queue_data flag: %s\n", arg);
erts_usage();
}
+ } else if (has_prefix("maxk", sub_param)) {
+ arg = get_arg(sub_param+4, argv[i+1], &i);
+ if (strcmp(arg,"true") == 0) {
+ H_MAX_FLAGS |= MAX_HEAP_SIZE_KILL;
+ } else if (strcmp(arg,"false") == 0) {
+ H_MAX_FLAGS &= ~MAX_HEAP_SIZE_KILL;
+ } else {
+ erts_fprintf(stderr, "bad max heap kill %s\n", arg);
+ erts_usage();
+ }
+ VERBOSE(DEBUG_SYSTEM, ("using max heap kill %d\n", H_MAX_FLAGS));
+ } else if (has_prefix("maxel", sub_param)) {
+ arg = get_arg(sub_param+5, argv[i+1], &i);
+ if (strcmp(arg,"true") == 0) {
+ H_MAX_FLAGS |= MAX_HEAP_SIZE_LOG;
+ } else if (strcmp(arg,"false") == 0) {
+ H_MAX_FLAGS &= ~MAX_HEAP_SIZE_LOG;
+ } else {
+ erts_fprintf(stderr, "bad max heap error logger %s\n", arg);
+ erts_usage();
+ }
+ VERBOSE(DEBUG_SYSTEM, ("using max heap log %d\n", H_MAX_FLAGS));
+ } else if (has_prefix("max", sub_param)) {
+ arg = get_arg(sub_param+3, argv[i+1], &i);
+ if ((H_MAX_SIZE = atoi(arg)) < 0) {
+ erts_fprintf(stderr, "bad max heap size %s\n", arg);
+ erts_usage();
+ }
+ if (H_MAX_SIZE < H_MIN_SIZE && H_MAX_SIZE) {
+ erts_fprintf(stderr, "max heap size (%s) is not allowed to be "
+ "smaller than min heap size (%d)\n",
+ arg, H_MIN_SIZE);
+ erts_usage();
+ }
+ VERBOSE(DEBUG_SYSTEM, ("using max heap size %d\n", H_MAX_SIZE));
} else {
/* backward compatibility */
arg = get_arg(argv[i]+2, argv[i+1], &i);
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index 9beff52835..579f6e427d 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -32,6 +32,7 @@
#include "erl_process.h"
#include "erl_binary.h"
#include "dtrace-wrapper.h"
+#include "beam_bp.h"
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(message_ref,
ErtsMessageRef,
@@ -251,9 +252,10 @@ erts_realloc_shrink_message(ErtsMessage *mp, Uint sz, Eterm *brefs, Uint brefs_s
void
erts_queue_dist_message(Process *rcvr,
- ErtsProcLocks *rcvr_locks,
+ ErtsProcLocks rcvr_locks,
ErtsDistExternal *dist_ext,
- Eterm token)
+ Eterm token,
+ Eterm from)
{
ErtsMessage* mp;
#ifdef USE_VM_PROBES
@@ -265,7 +267,7 @@ erts_queue_dist_message(Process *rcvr,
erts_aint_t state;
#endif
- ERTS_SMP_LC_ASSERT(*rcvr_locks == erts_proc_lc_my_proc_locks(rcvr));
+ ERTS_SMP_LC_ASSERT(rcvr_locks == erts_proc_lc_my_proc_locks(rcvr));
mp = erts_alloc_message(0, NULL);
mp->data.dist_ext = dist_ext;
@@ -280,10 +282,10 @@ erts_queue_dist_message(Process *rcvr,
ERL_MESSAGE_TOKEN(mp) = token;
#ifdef ERTS_SMP
- if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ)) {
+ if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ)) {
if (erts_smp_proc_trylock(rcvr, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ;
- if (*rcvr_locks & ERTS_PROC_LOCK_STATUS) {
+ if (rcvr_locks & ERTS_PROC_LOCK_STATUS) {
erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_STATUS);
need_locks |= ERTS_PROC_LOCK_STATUS;
}
@@ -293,7 +295,7 @@ erts_queue_dist_message(Process *rcvr,
state = erts_smp_atomic32_read_acqb(&rcvr->state);
if (state & (ERTS_PSFLG_PENDING_EXIT|ERTS_PSFLG_EXITING)) {
- if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ))
+ if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ))
erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ);
/* Drop message if receiver is exiting or has a pending exit ... */
erts_cleanup_messages(mp);
@@ -301,10 +303,13 @@ erts_queue_dist_message(Process *rcvr,
else
#endif
if (IS_TRACED_FL(rcvr, F_TRACE_RECEIVE)) {
+ if (from == am_Empty)
+ from = dist_ext->dep->sysname;
+
/* Ahh... need to decode it in order to trace it... */
- if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ))
+ if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ))
erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ);
- if (!erts_decode_dist_message(rcvr, *rcvr_locks, mp, 0))
+ if (!erts_decode_dist_message(rcvr, rcvr_locks, mp, 0))
erts_free_message(mp);
else {
Eterm msg = ERL_MESSAGE_TERM(mp);
@@ -324,7 +329,7 @@ erts_queue_dist_message(Process *rcvr,
tok_label, tok_lastcnt, tok_serial);
}
#endif
- erts_queue_message(rcvr, rcvr_locks, mp, msg);
+ erts_queue_message(rcvr, rcvr_locks, mp, msg, from);
}
}
else {
@@ -351,12 +356,12 @@ erts_queue_dist_message(Process *rcvr,
LINK_MESSAGE(rcvr, mp, &mp->next, 1);
- if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ))
+ if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ))
erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ);
erts_proc_notify_new_message(rcvr,
#ifdef ERTS_SMP
- *rcvr_locks
+ rcvr_locks
#else
0
#endif
@@ -366,14 +371,15 @@ erts_queue_dist_message(Process *rcvr,
/* Add messages last in message queue */
static Sint
-queue_messages(Process *c_p,
- Process* receiver,
+queue_messages(Process* receiver,
erts_aint32_t *receiver_state,
- ErtsProcLocks *receiver_locks,
+ ErtsProcLocks receiver_locks,
ErtsMessage* first,
ErtsMessage** last,
- Uint len)
+ Uint len,
+ Eterm from)
{
+ ErtsTracingEvent* te;
Sint res;
int locked_msgq = 0;
erts_aint32_t state;
@@ -386,12 +392,12 @@ queue_messages(Process *c_p,
#ifdef ERTS_SMP
#ifdef ERTS_ENABLE_LOCK_CHECK
ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(receiver) < ERTS_PROC_LOCK_MSGQ ||
- *receiver_locks == erts_proc_lc_my_proc_locks(receiver));
+ receiver_locks == erts_proc_lc_my_proc_locks(receiver));
#endif
- if (!(*receiver_locks & ERTS_PROC_LOCK_MSGQ)) {
+ if (!(receiver_locks & ERTS_PROC_LOCK_MSGQ)) {
if (erts_smp_proc_trylock(receiver, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
- ErtsProcLocks need_locks = ERTS_PROC_LOCK_MSGQ;
+ ErtsProcLocks need_locks;
if (receiver_state)
state = *receiver_state;
@@ -400,10 +406,11 @@ queue_messages(Process *c_p,
if (state & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT))
goto exiting;
- if (*receiver_locks & ERTS_PROC_LOCK_STATUS) {
- erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_STATUS);
- need_locks |= ERTS_PROC_LOCK_STATUS;
+ need_locks = receiver_locks & (ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE);
+ if (need_locks) {
+ erts_smp_proc_unlock(receiver, need_locks);
}
+ need_locks |= ERTS_PROC_LOCK_MSGQ;
erts_smp_proc_lock(receiver, need_locks);
}
locked_msgq = 1;
@@ -426,7 +433,7 @@ queue_messages(Process *c_p,
res = receiver->msg.len;
#ifdef ERTS_SMP
- if (*receiver_locks & ERTS_PROC_LOCK_MAIN) {
+ if (receiver_locks & ERTS_PROC_LOCK_MAIN) {
/*
* We move 'in queue' to 'private queue' and place
* message at the end of 'private queue' in order
@@ -445,7 +452,10 @@ queue_messages(Process *c_p,
LINK_MESSAGE(receiver, first, last, len);
}
- if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) {
+ if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)
+ && (te = &erts_receive_tracing[erts_active_bp_ix()],
+ te->on)) {
+
ErtsMessage *msg = first;
#ifdef USE_VM_PROBES
@@ -468,52 +478,50 @@ queue_messages(Process *c_p,
tok_label, tok_lastcnt, tok_serial);
}
#endif
-
while (msg) {
- trace_receive(receiver, ERL_MESSAGE_TERM(msg));
+ trace_receive(receiver, from, ERL_MESSAGE_TERM(msg), te);
msg = msg->next;
}
}
-
- if (locked_msgq)
+ if (locked_msgq) {
erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
+ }
#ifdef ERTS_SMP
- erts_proc_notify_new_message(receiver, *receiver_locks);
+ erts_proc_notify_new_message(receiver, receiver_locks);
#else
erts_proc_notify_new_message(receiver, 0);
- ERTS_HOLE_CHECK(receiver);
#endif
return res;
}
static Sint
-queue_message(Process *c_p,
- Process* receiver,
+queue_message(Process* receiver,
erts_aint32_t *receiver_state,
- ErtsProcLocks *receiver_locks,
- ErtsMessage* mp, Eterm msg)
+ ErtsProcLocks receiver_locks,
+ ErtsMessage* mp, Eterm msg, Eterm from)
{
ERL_MESSAGE_TERM(mp) = msg;
- return queue_messages(c_p, receiver, receiver_state, receiver_locks,
- mp, &mp->next, 1 );
+ return queue_messages(receiver, receiver_state, receiver_locks,
+ mp, &mp->next, 1, from);
}
Sint
-erts_queue_message(Process* receiver, ErtsProcLocks *receiver_locks,
- ErtsMessage* mp, Eterm msg)
+erts_queue_message(Process* receiver, ErtsProcLocks receiver_locks,
+ ErtsMessage* mp, Eterm msg, Eterm from)
{
- return queue_message(NULL, receiver, NULL, receiver_locks, mp, msg);
+ return queue_message(receiver, NULL, receiver_locks, mp, msg, from);
}
Sint
-erts_queue_messages(Process* receiver, ErtsProcLocks *receiver_locks,
- ErtsMessage* first, ErtsMessage** last, Uint len)
+erts_queue_messages(Process* receiver, ErtsProcLocks receiver_locks,
+ ErtsMessage* first, ErtsMessage** last, Uint len,
+ Eterm from)
{
- return queue_messages(NULL, receiver, NULL, receiver_locks,
- first, last, len);
+ return queue_messages(receiver, NULL, receiver_locks,
+ first, last, len, from);
}
void
@@ -592,7 +600,9 @@ erts_try_alloc_message_on_heap(Process *pp,
ASSERT(!(*psp & ERTS_PSFLG_OFF_HEAP_MSGQ));
- if (
+ if ((*psp) & ERTS_PSFLGS_VOLATILE_HEAP)
+ goto in_message_fragment;
+ else if (
#if defined(ERTS_SMP)
*plp & ERTS_PROC_LOCK_MAIN
#else
@@ -602,7 +612,7 @@ erts_try_alloc_message_on_heap(Process *pp,
#ifdef ERTS_SMP
try_on_heap:
#endif
- if ((*psp & (ERTS_PSFLG_EXITING|ERTS_PSFLG_PENDING_EXIT))
+ if (((*psp) & ERTS_PSFLGS_VOLATILE_HEAP)
|| (pp->flags & F_DISABLE_GC)
|| HEAP_LIMIT(pp) - HEAP_TOP(pp) <= sz) {
/*
@@ -830,11 +840,11 @@ erts_send_message(Process* sender,
#ifdef USE_VM_PROBES
ERL_MESSAGE_DT_UTAG(mp) = utag;
#endif
- res = queue_message(sender,
- receiver,
+ res = queue_message(receiver,
&receiver_state,
- receiver_locks,
- mp, message);
+ *receiver_locks,
+ mp, message,
+ sender->common.id);
BM_SWAP_TIMER(send,system);
@@ -891,7 +901,7 @@ erts_deliver_exit_message(Eterm from, Process *to, ErtsProcLocks *to_locksp,
seq_trace_output(token, save, SEQ_TRACE_SEND, to->common.id, NULL);
temptoken = copy_struct(token, sz_token, &hp, ohp);
ERL_MESSAGE_TOKEN(mp) = temptoken;
- erts_queue_message(to, to_locksp, mp, save);
+ erts_queue_message(to, *to_locksp, mp, save, am_system);
} else {
sz_from = IS_CONST(from) ? 0 : size_object(from);
#ifdef SHCOPY_SEND
@@ -913,7 +923,7 @@ erts_deliver_exit_message(Eterm from, Process *to, ErtsProcLocks *to_locksp,
? from
: copy_struct(from, sz_from, &hp, ohp));
save = TUPLE3(hp, am_EXIT, from_copy, mess);
- erts_queue_message(to, to_locksp, mp, save);
+ erts_queue_message(to, *to_locksp, mp, save, am_system);
}
}
diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h
index 60cf9a24e1..4493df1c1a 100644
--- a/erts/emulator/beam/erl_message.h
+++ b/erts/emulator/beam/erl_message.h
@@ -295,10 +295,10 @@ ErlHeapFragment* new_message_buffer(Uint);
ErlHeapFragment* erts_resize_message_buffer(ErlHeapFragment *, Uint,
Eterm *, Uint);
void free_message_buffer(ErlHeapFragment *);
-void erts_queue_dist_message(Process*, ErtsProcLocks*, ErtsDistExternal *, Eterm);
-Sint erts_queue_message(Process*, ErtsProcLocks*,ErtsMessage*, Eterm);
-Sint erts_queue_messages(Process*, ErtsProcLocks*,
- ErtsMessage*, ErtsMessage**, Uint);
+void erts_queue_dist_message(Process*, ErtsProcLocks, ErtsDistExternal *, Eterm, Eterm);
+Sint erts_queue_message(Process*, ErtsProcLocks,ErtsMessage*, Eterm, Eterm);
+Sint erts_queue_messages(Process*, ErtsProcLocks,
+ ErtsMessage*, ErtsMessage**, Uint, Eterm);
void erts_deliver_exit_message(Eterm, Process*, ErtsProcLocks *, Eterm, Eterm);
Sint erts_send_message(Process*, Process*, ErtsProcLocks*, Eterm, unsigned);
void erts_link_mbuf_to_proc(Process *proc, ErlHeapFragment *bp);
diff --git a/erts/emulator/beam/erl_msacc.c b/erts/emulator/beam/erl_msacc.c
index d0f305900a..544bc8b983 100644
--- a/erts/emulator/beam/erl_msacc.c
+++ b/erts/emulator/beam/erl_msacc.c
@@ -257,7 +257,7 @@ static void send_reply(ErtsMsAcc *msacc, ErtsMSAccReq *msaccrp) {
if (msacc->unmanaged) erts_mtx_unlock(&msacc->mtx);
- erts_queue_message(rp, &rp_locks, msgp, msg);
+ erts_queue_message(rp, rp_locks, msgp, msg, am_system);
if (esdp && msaccrp->req_sched == esdp->no)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
@@ -338,7 +338,7 @@ erts_msacc_request(Process *c_p, int action, Eterm *threads)
{
#ifdef ERTS_ENABLE_MSACC
ErtsMsAcc *msacc = ERTS_MSACC_TSD_GET();
- ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p);
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
Eterm ref;
ErtsMSAccReq *msaccrp;
Eterm *hp;
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index d52bfbc5d9..606b73c7b5 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -20,6 +20,23 @@
/* Erlang Native InterFace
*/
+/*
+ * Environment contains a pointer to currently executing process.
+ * In the dirty case this pointer do however not point to the
+ * actual process structure of the executing process, but instead
+ * a "shadow process structure". This in order to be able to handle
+ * heap allocation without the need to acquire the main lock on
+ * the process.
+ *
+ * The dirty process is allowed to allocate on the heap without
+ * the main lock, i.e., incrementing htop, but is not allowed to
+ * modify mbuf, offheap, etc without the main lock. The dirty
+ * process moves mbuf list and offheap list of the shadow process
+ * structure into the real structure when the dirty nif call
+ * completes.
+ */
+
+
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
@@ -81,6 +98,43 @@ void dtrace_nifenv_str(ErlNifEnv *, char *);
#define MIN_HEAP_FRAG_SZ 200
static Eterm* alloc_heap_heavy(ErlNifEnv* env, size_t need, Eterm* hp);
+static ERTS_INLINE int
+is_scheduler(void)
+{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ if (!esdp)
+ return 0;
+ if (ERTS_SCHEDULER_IS_DIRTY(esdp))
+ return -1;
+ return 1;
+}
+
+static ERTS_INLINE void
+execution_state(ErlNifEnv *env, Process **c_pp, int *schedp)
+{
+ if (schedp)
+ *schedp = is_scheduler();
+ if (c_pp) {
+ if (!env || env->proc->common.id == ERTS_INVALID_PID)
+ *c_pp = NULL;
+ else {
+ Process *c_p = env->proc;
+
+ if (!(c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC))
+ ASSERT(is_scheduler() > 0);
+ else {
+ c_p = env->proc->next;
+ ASSERT(is_scheduler() < 0);
+ ASSERT(c_p && env->proc->common.id == c_p->common.id);
+ }
+
+ *c_pp = c_p;
+
+ ASSERT(!(c_p->static_flags & ERTS_STC_FLG_SHADOW_PROC));
+ }
+ }
+}
+
static ERTS_INLINE Eterm* alloc_heap(ErlNifEnv* env, size_t need)
{
Eterm* hp = env->hp;
@@ -124,6 +178,9 @@ static ERTS_INLINE void ensure_heap(ErlNifEnv* env, size_t may_need)
void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif,
Process* tracee)
{
+#ifdef ERTS_DIRTY_SCHEDULERS
+ ErtsSchedulerData *esdp;
+#endif
env->mod_nif = mod_nif;
env->proc = p;
env->hp = HEAP_TOP(p);
@@ -133,6 +190,61 @@ void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif,
env->tmp_obj_list = NULL;
env->exception_thrown = 0;
env->tracee = tracee;
+
+ ASSERT(p->common.id != ERTS_INVALID_PID);
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ esdp = erts_get_scheduler_data();
+ ASSERT(esdp);
+
+ if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+#ifdef DEBUG
+ erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
+
+ ASSERT(p->scheduler_data == esdp);
+ ASSERT((state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS))
+ && !(state & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)));
+#endif
+
+ }
+ else {
+ Process *sproc;
+#ifdef DEBUG
+ erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
+
+ ASSERT(!p->scheduler_data);
+ ASSERT((state & ERTS_PSFLG_DIRTY_RUNNING)
+ && !(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)));
+#endif
+
+ sproc = esdp->dirty_shadow_process;
+ ASSERT(sproc);
+ ASSERT(sproc->static_flags & ERTS_STC_FLG_SHADOW_PROC);
+ ASSERT(erts_smp_atomic32_read_nob(&sproc->state)
+ == (ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_PROXY));
+
+ sproc->next = p;
+ sproc->common.id = p->common.id;
+ sproc->htop = p->htop;
+ sproc->stop = p->stop;
+ sproc->hend = p->hend;
+ sproc->heap = p->heap;
+ sproc->abandoned_heap = p->abandoned_heap;
+ sproc->heap_sz = p->heap_sz;
+ sproc->high_water = p->high_water;
+ sproc->old_hend = p->old_hend;
+ sproc->old_htop = p->old_htop;
+ sproc->old_heap = p->old_heap;
+ sproc->mbuf = NULL;
+ sproc->mbuf_sz = 0;
+ ERTS_INIT_OFF_HEAP(&sproc->off_heap);
+ env->proc = sproc;
+ }
+#endif
}
/* Temporary object header, auto-deallocated when NIF returns
@@ -157,18 +269,75 @@ static ERTS_INLINE void free_tmp_objs(ErlNifEnv* env)
void erts_post_nif(ErlNifEnv* env)
{
erts_unblock_fpe(env->fpe_was_unmasked);
- if (env->heap_frag == NULL) {
- ASSERT(env->hp_end == HEAP_LIMIT(env->proc));
- ASSERT(env->hp >= HEAP_TOP(env->proc));
- ASSERT(env->hp <= HEAP_LIMIT(env->proc));
- HEAP_TOP(env->proc) = env->hp;
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!(env->proc->static_flags & ERTS_STC_FLG_SHADOW_PROC))
+#endif
+ {
+ ASSERT(is_scheduler() > 0);
+ if (env->heap_frag == NULL) {
+ ASSERT(env->hp_end == HEAP_LIMIT(env->proc));
+ ASSERT(env->hp >= HEAP_TOP(env->proc));
+ ASSERT(env->hp <= HEAP_LIMIT(env->proc));
+ HEAP_TOP(env->proc) = env->hp;
+ }
+ else {
+ ASSERT(env->hp_end != HEAP_LIMIT(env->proc));
+ ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size);
+ env->heap_frag->used_size = env->hp - env->heap_frag->mem;
+ ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size);
+ }
+ env->exiting = ERTS_PROC_IS_EXITING(env->proc);
}
- else {
- ASSERT(env->hp_end != HEAP_LIMIT(env->proc));
- ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size);
- env->heap_frag->used_size = env->hp - env->heap_frag->mem;
- ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else { /* Dirty nif call using shadow process struct */
+ Process *c_p = env->proc->next;
+
+ ASSERT(is_scheduler() < 0);
+ ASSERT(env->proc->common.id == c_p->common.id);
+
+ if (!env->heap_frag) {
+ ASSERT(env->hp_end == HEAP_LIMIT(c_p));
+ ASSERT(env->hp >= HEAP_TOP(c_p));
+ ASSERT(env->hp <= HEAP_LIMIT(c_p));
+ HEAP_TOP(c_p) = env->hp;
+ }
+ else {
+ ASSERT(env->hp_end != HEAP_LIMIT(c_p));
+ ASSERT(env->hp_end - env->hp <= env->heap_frag->alloc_size);
+
+ HEAP_TOP(c_p) = HEAP_TOP(env->proc);
+ env->heap_frag->used_size = env->hp - env->heap_frag->mem;
+
+ ASSERT(env->heap_frag->used_size <= env->heap_frag->alloc_size);
+
+ if (c_p->mbuf) {
+ ErlHeapFragment *bp;
+ for (bp = env->proc->mbuf; bp->next; bp = bp->next)
+ ;
+ bp->next = c_p->mbuf;
+ }
+
+ c_p->mbuf = env->proc->mbuf;
+ c_p->mbuf_sz += env->proc->mbuf_sz;
+
+ }
+
+ if (!c_p->off_heap.first)
+ c_p->off_heap.first = env->proc->off_heap.first;
+ else if (env->proc->off_heap.first) {
+ struct erl_off_heap_header *ohhp;
+ for (ohhp = env->proc->off_heap.first; ohhp->next; ohhp = ohhp->next)
+ ;
+ ohhp->next = c_p->off_heap.first;
+ c_p->off_heap.first = env->proc->off_heap.first;
+ }
+ c_p->off_heap.overhead += env->proc->off_heap.overhead;
+
+ env->exiting = ERTS_PROC_IS_EXITING(c_p);
+ BUMP_ALL_REDS(c_p);
}
+#endif
free_tmp_objs(env);
}
@@ -366,7 +535,7 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks)
rp_locks = 0;
if (rp->common.id == c_p->common.id)
rp_locks = c_p_locks;
- erts_queue_messages(rp, &rp_locks, first, last, len);
+ erts_queue_messages(rp, rp_locks, first, last, len, c_p->common.id);
if (rp->common.id == c_p->common.id)
rp_locks &= ~c_p_locks;
if (rp_locks)
@@ -400,44 +569,44 @@ error:
#endif
-int
-enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
- ErlNifEnv* msg_env, ERL_NIF_TERM msg)
+int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
+ ErlNifEnv* msg_env, ERL_NIF_TERM msg)
{
struct enif_msg_environment_t* menv = (struct enif_msg_environment_t*)msg_env;
- ErtsProcLocks rp_locks = 0, lc_locks = 0, c_p_locks = ERTS_PROC_LOCK_MAIN;
+ ErtsProcLocks rp_locks = 0;
+#ifdef ERTS_SMP
+ ErtsProcLocks lc_locks = 0;
+#endif
Process* rp;
Process* c_p;
ErtsMessage *mp;
Eterm receiver = to_pid->pid;
- int flush_me = 0;
- ErtsSchedulerData *esdp = erts_get_scheduler_data();
- int scheduler = esdp ? esdp->no : 0;
+ int scheduler;
- if (env != NULL) {
- c_p = env->proc;
- if (receiver == c_p->common.id) {
- rp_locks = c_p_locks;
- flush_me = 1;
- }
+ execution_state(env, &c_p, &scheduler);
+
+#ifndef ERTS_SMP
+ if (!scheduler) {
+ erts_exit(ERTS_ABORT_EXIT,
+ "enif_send: called from non-scheduler thread on non-SMP VM");
+ return 0;
+ }
+#endif
+
+ if (scheduler > 0) { /* Normal scheduler */
+ rp = erts_proc_lookup(receiver);
+ if (c_p == rp)
+ rp_locks = ERTS_PROC_LOCK_MAIN;
}
else {
-#ifdef ERTS_SMP
- c_p = NULL;
-#else
- erts_exit(ERTS_ABORT_EXIT,"enif_send: env==NULL on non-SMP VM");
-#endif
+ if (c_p && ERTS_PROC_IS_EXITING(c_p))
+ return 0;
+ rp = erts_pid2proc_opt(c_p, 0, receiver, rp_locks,
+ ERTS_P2P_FLG_INC_REFC);
}
-
- rp = (scheduler
- ? erts_proc_lookup(receiver)
- : erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN,
- receiver, rp_locks, ERTS_P2P_FLG_INC_REFC));
-
- if (rp == NULL) {
- ASSERT(env == NULL || receiver != c_p->common.id);
+ if (rp == NULL)
return 0;
- }
+
if (menv) {
flush_env(msg_env);
mp = erts_alloc_message(0, NULL);
@@ -462,22 +631,12 @@ enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
ERL_MESSAGE_TERM(mp) = msg;
- if (flush_me) {
- flush_env(env); /* Needed for ERTS_HOLE_CHECK */
- }
-
if (!env || !env->tracee) {
if (c_p && IS_TRACED_FL(c_p, F_TRACE_SEND))
trace_send(c_p, receiver, msg);
-
-#ifndef ERTS_SMP
}
-#endif
-
- erts_queue_message(rp, &rp_locks, mp, msg);
#ifdef ERTS_SMP
- }
else {
/* This clause is taken when the nif is called in the context
of a traced process. We do not know which locks we have
@@ -502,14 +661,15 @@ enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
#ifdef ERTS_ENABLE_LOCK_CHECK
lc_locks = erts_proc_lc_my_proc_locks(rp);
rp_locks |= lc_locks;
- if (receiver == c_p->common.id)
- c_p_locks |= lc_locks;
#endif
if (ERTS_FORCE_ENIF_SEND_DELAY() || msgq ||
rp_locks & ERTS_PROC_LOCK_MSGQ ||
erts_smp_proc_trylock(rp, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
if (!msgq) {
+#ifdef ERTS_SMP
+ ErtsThrPrgrDelayHandle dhndl;
+#endif
msgq = erts_alloc(ERTS_ALC_T_TRACE_MSG_QUEUE,
sizeof(ErlTraceMessageQueue));
@@ -524,32 +684,43 @@ enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
+#ifdef ERTS_SMP
+ if (!scheduler)
+ dhndl = erts_thr_progress_unmanaged_delay();
+#endif
erts_schedule_flush_trace_messages(t_p->common.id);
-
+#ifdef ERTS_SMP
+ if (!scheduler)
+ erts_thr_progress_unmanaged_continue(dhndl);
+#endif
} else {
msgq->len++;
*msgq->last = mp;
msgq->last = &mp->next;
erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
}
+ goto done;
} else {
erts_smp_proc_unlock(t_p, ERTS_PROC_LOCK_TRACE);
rp_locks &= ~ERTS_PROC_LOCK_TRACE;
rp_locks |= ERTS_PROC_LOCK_MSGQ;
- erts_queue_message(rp, &rp_locks, mp, msg);
}
}
-#endif
+#endif /* ERTS_SMP */
+ erts_queue_message(rp, rp_locks, mp, msg,
+ c_p ? c_p->common.id : am_undefined);
+
+#ifdef ERTS_SMP
+done:
if (c_p == rp)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
if (rp_locks & ~lc_locks)
erts_smp_proc_unlock(rp, rp_locks & ~lc_locks);
- if (!scheduler)
+#endif
+ if (scheduler <= 0)
erts_proc_dec_refc(rp);
- if (flush_me) {
- cache_env(env);
- }
+
return 1;
}
@@ -557,26 +728,52 @@ int
enif_port_command(ErlNifEnv *env, const ErlNifPort* to_port,
ErlNifEnv *msg_env, ERL_NIF_TERM msg)
{
-
- ErtsSchedulerData *esdp = erts_get_scheduler_data();
- int scheduler = esdp ? esdp->no : 0;
+ int iflags = (erts_port_synchronous_ops
+ ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
+ : ERTS_PORT_SFLGS_INVALID_LOOKUP);
+ int scheduler;
+ Process *c_p;
Port *prt;
+ int res;
- if (scheduler == 0 || !env)
- return 0;
+ if (!env)
+ erts_exit(ERTS_ABORT_EXIT, "enif_port_command: env == NULL");
+
+ execution_state(env, &c_p, &scheduler);
+
+ if (!c_p)
+ c_p = env->proc;
- prt = erts_port_lookup(to_port->port_id,
- (erts_port_synchronous_ops
- ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
- : ERTS_PORT_SFLGS_INVALID_LOOKUP));
+ if (scheduler > 0)
+ prt = erts_port_lookup(to_port->port_id, iflags);
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else if (scheduler < 0) {
+ if (ERTS_PROC_IS_EXITING(c_p))
+ return 0;
+ prt = erts_thr_port_lookup(to_port->port_id, iflags);
+ }
+#endif
+ else {
+ erts_exit(ERTS_ABORT_EXIT, "enif_port_command: "
+ "called from non-scheduler thread");
+ }
if (!prt)
- return 0;
+ res = 0;
+ else {
- if (IS_TRACED_FL(prt, F_TRACE_RECEIVE))
- trace_port_receive(prt, env->proc->common.id, am_command, msg);
+ if (IS_TRACED_FL(prt, F_TRACE_RECEIVE))
+ trace_port_receive(prt, c_p->common.id, am_command, msg);
+
+ res = erts_port_output_async(prt, c_p->common.id, msg);
+ }
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (scheduler < 0)
+ erts_port_dec_refc(prt);
+#endif
- return erts_port_output_async(prt, env->proc->common.id, msg);
+ return res;
}
ERL_NIF_TERM enif_make_copy(ErlNifEnv* dst_env, ERL_NIF_TERM src_term)
@@ -1038,15 +1235,21 @@ Eterm enif_make_badarg(ErlNifEnv* env)
Eterm enif_raise_exception(ErlNifEnv* env, ERL_NIF_TERM reason)
{
+ Process *c_p;
+
+ execution_state(env, &c_p, NULL);
+
env->exception_thrown = 1;
- env->proc->fvalue = reason;
- BIF_ERROR(env->proc, EXC_ERROR);
+ c_p->fvalue = reason;
+ BIF_ERROR(c_p, EXC_ERROR);
}
int enif_has_pending_exception(ErlNifEnv* env, ERL_NIF_TERM* reason)
{
if (env->exception_thrown && reason != NULL) {
- *reason = env->proc->fvalue;
+ Process *c_p;
+ execution_state(env, &c_p, NULL);
+ *reason = c_p->fvalue;
}
return env->exception_thrown;
}
@@ -1440,56 +1643,73 @@ int enif_make_reverse_list(ErlNifEnv* env, ERL_NIF_TERM term, ERL_NIF_TERM *list
return 1;
}
+int enif_is_current_process_alive(ErlNifEnv* env)
+{
+ Process *c_p;
+ int scheduler;
+
+ execution_state(env, &c_p, &scheduler);
+
+ if (!c_p)
+ erts_exit(ERTS_ABORT_EXIT,
+ "enif_is_current_process_alive: "
+ "Invalid environment");
+
+ if (!scheduler)
+ erts_exit(ERTS_ABORT_EXIT, "enif_is_current_process_alive: "
+ "called from non-scheduler thread");
+
+ return !ERTS_PROC_IS_EXITING(c_p);
+}
+
int enif_is_process_alive(ErlNifEnv* env, ErlNifPid *proc)
{
- ErtsProcLocks rp_locks = 0; /* We don't need any locks,
- just to check if it is alive */
- Eterm target = proc->pid;
- Process* rp;
- Process* c_p;
- int scheduler = erts_get_scheduler_id() != 0;
+ int scheduler;
- if (env != NULL) {
- c_p = env->proc;
- if (target == c_p->common.id) {
- /* We are alive! */
- return 1;
- }
- }
+ execution_state(env, NULL, &scheduler);
+
+ if (scheduler > 0)
+ return !!erts_proc_lookup(proc->pid);
else {
#ifdef ERTS_SMP
- c_p = NULL;
+ Process* rp = erts_pid2proc_opt(NULL, 0, proc->pid, 0,
+ ERTS_P2P_FLG_INC_REFC);
+ if (rp)
+ erts_proc_dec_refc(rp);
+ return !!rp;
#else
- erts_exit(ERTS_ABORT_EXIT,"enif_is_process_alive: "
- "env==NULL on non-SMP VM");
-#endif
- }
-
- rp = (scheduler
- ? erts_proc_lookup(target)
- : erts_pid2proc_opt(c_p, ERTS_PROC_LOCK_MAIN,
- target, rp_locks, ERTS_P2P_FLG_INC_REFC));
- if (rp == NULL) {
- ASSERT(env == NULL || target != c_p->common.id);
+ erts_exit(ERTS_ABORT_EXIT, "enif_is_process_alive: "
+ "called from non-scheduler thread "
+ "in non-smp emulator");
return 0;
- } else {
- if (!scheduler)
- erts_proc_dec_refc(rp);
- return 1;
+#endif
}
}
int enif_is_port_alive(ErlNifEnv *env, ErlNifPort *port)
{
- /* only allowed if called from scheduler */
- if (erts_get_scheduler_id() == 0)
- erts_exit(ERTS_ABORT_EXIT,"enif_is_port_alive: called from non-scheduler");
+ int scheduler;
+ Uint32 iflags = (erts_port_synchronous_ops
+ ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
+ : ERTS_PORT_SFLGS_INVALID_LOOKUP);
- return erts_port_lookup(
- port->port_id,
- (erts_port_synchronous_ops
- ? ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP
- : ERTS_PORT_SFLGS_INVALID_LOOKUP)) != NULL;
+ execution_state(env, NULL, &scheduler);
+
+ if (scheduler > 0)
+ return !!erts_port_lookup(port->port_id, iflags);
+ else {
+#ifdef ERTS_SMP
+ Port *prt = erts_thr_port_lookup(port->port_id, iflags);
+ if (prt)
+ erts_port_dec_refc(prt);
+ return !!prt;
+#else
+ erts_exit(ERTS_ABORT_EXIT, "enif_is_port_alive: "
+ "called from non-scheduler thread "
+ "in non-smp emulator");
+ return 0;
+#endif
+ }
}
ERL_NIF_TERM
@@ -1602,6 +1822,16 @@ int enif_fprintf(void* filep, const char* format, ...)
return ret;
}
+int enif_snprintf(char *buffer, size_t size, const char* format, ...)
+{
+ int ret;
+ va_list arglist;
+ va_start(arglist, format);
+ ret = erts_vsnprintf(buffer, size, format, arglist);
+ va_end(arglist);
+ return ret;
+}
+
/***********************************************************
** Memory managed (GC'ed) "resource" objects **
***********************************************************/
@@ -1956,16 +2186,19 @@ void* enif_dlsym(void* handle, const char* symbol,
int enif_consume_timeslice(ErlNifEnv* env, int percent)
{
+ Process *proc;
Sint reds;
+ execution_state(env, &proc, NULL);
+
ASSERT(is_proc_bound(env) && percent >= 1 && percent <= 100);
if (percent < 1) percent = 1;
else if (percent > 100) percent = 100;
reds = ((CONTEXT_REDS+99) / 100) * percent;
ASSERT(reds > 0 && reds <= CONTEXT_REDS);
- BUMP_REDS(env->proc, reds);
- return ERTS_BIF_REDS_LEFT(env->proc) == 0;
+ BUMP_REDS(proc, reds);
+ return ERTS_BIF_REDS_LEFT(proc) == 0;
}
/*
@@ -2060,10 +2293,19 @@ static ERL_NIF_TERM
init_nif_sched_data(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp,
int need_save, int argc, const ERL_NIF_TERM argv[])
{
- Process* proc = env->proc;
- Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array;
+ Process* proc;
+ Eterm* reg;
NifExport* ep;
- int i;
+ int i, scheduler;
+
+ execution_state(env, &proc, &scheduler);
+
+ ASSERT(scheduler);
+
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(proc)
+ & ERTS_PROC_LOCK_MAIN);
+
+ reg = erts_proc_sched_data(proc)->x_reg_array;
ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
if (!ep)
@@ -2075,12 +2317,13 @@ init_nif_sched_data(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirec
}
if (env->exception_thrown) {
ep->exception_thrown = 1;
- ep->rootset[0] = env->proc->fvalue;
+ ep->rootset[0] = proc->fvalue;
} else {
ep->exception_thrown = 0;
ep->rootset[0] = NIL;
}
- ERTS_VBUMP_ALL_REDS(proc);
+ if (scheduler > 0)
+ ERTS_VBUMP_ALL_REDS(proc);
for (i = 0; i < argc; i++) {
if (need_save)
ep->rootset[i+1] = reg[i];
@@ -2112,7 +2355,12 @@ static void
restore_nif_mfa(Process* proc, NifExport* ep, int exception)
{
int i;
- Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array;
+ Eterm* reg = erts_proc_sched_data(proc)->x_reg_array;
+
+ ERTS_SMP_LC_ASSERT(!(proc->static_flags
+ & ERTS_STC_FLG_SHADOW_PROC));
+ ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(proc)
+ & ERTS_PROC_LOCK_MAIN);
proc->current[0] = ep->saved_mfa[0];
proc->current[1] = ep->saved_mfa[1];
@@ -2137,11 +2385,13 @@ restore_nif_mfa(Process* proc, NifExport* ep, int exception)
static ERL_NIF_TERM
dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- Process* proc = env->proc;
+ Process* proc;
NifExport* ep;
+ execution_state(env, &proc, NULL);
+
ASSERT(argc == 1);
- ASSERT(!ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data));
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(proc)));
ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
ASSERT(ep);
ASSERT(!ep->exception_thrown);
@@ -2156,10 +2406,12 @@ dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
static ERL_NIF_TERM
dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- Process* proc = env->proc;
+ Process* proc;
NifExport* ep;
- ASSERT(!ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data));
+ execution_state(env, &proc, NULL);
+
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(proc)));
ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
ASSERT(ep);
ASSERT(ep->exception_thrown);
@@ -2176,23 +2428,32 @@ dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
static ERL_NIF_TERM
execute_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- Process* proc = env->proc;
- NativeFunPtr fp = (NativeFunPtr) proc->current[6];
+ Process* proc;
+ NativeFunPtr fp;
NifExport* ep;
ERL_NIF_TERM result;
- ASSERT(ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data));
+ execution_state(env, &proc, NULL);
+
+ fp = (NativeFunPtr) proc->current[6];
+
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(proc)));
/*
* Set ep->fp to NULL before the native call so we know later whether it scheduled another NIF for execution
*/
ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
- ASSERT(ep);
+ ASSERT(ep && fp);
ep->fp = NULL;
erts_smp_atomic32_read_band_mb(&proc->state, ~(ERTS_PSFLG_DIRTY_CPU_PROC
| ERTS_PSFLG_DIRTY_IO_PROC));
+
+ erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
+
result = (*fp)(env, argc, argv);
+ erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
+
if (erts_refc_dectest(&env->mod_nif->rt_dtor_cnt, 0) == 0 && env->mod_nif->mod == NULL)
close_lib(env->mod_nif);
/*
@@ -2229,29 +2490,49 @@ execute_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
static ERTS_INLINE ERL_NIF_TERM
schedule_dirty_nif(ErlNifEnv* env, int flags, int argc, const ERL_NIF_TERM argv[])
{
- erts_aint32_t state, n, a;
- Process* proc = env->proc;
- NativeFunPtr fp = (NativeFunPtr) proc->current[6];
+ ERL_NIF_TERM result;
+ erts_aint32_t act, dirty_flag;
+ Process* proc;
+ NativeFunPtr fp;
NifExport* ep;
- int need_save;
+ int need_save, scheduler;
+
+ execution_state(env, &proc, &scheduler);
+ if (scheduler <= 0) {
+ ASSERT(scheduler < 0);
+ erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
+ }
+
+ fp = (NativeFunPtr) proc->current[6];
+
+ ASSERT(fp);
ASSERT(flags==ERL_NIF_DIRTY_JOB_IO_BOUND || flags==ERL_NIF_DIRTY_JOB_CPU_BOUND);
- a = erts_smp_atomic32_read_acqb(&proc->state);
- while (1) {
- n = state = a;
+ if (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND)
+ dirty_flag = ERTS_PSFLG_DIRTY_CPU_PROC;
+ else
+ dirty_flag = ERTS_PSFLG_DIRTY_IO_PROC;
+
+ act = erts_smp_atomic32_read_bor_nob(&proc->state, dirty_flag);
+ if (!(act & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC)))
+ erts_refc_inc(&env->mod_nif->rt_dtor_cnt, 1);
+ else if ((act & (ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_IO_PROC)) & ~dirty_flag) {
+ /* clear other flag... */
if (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND)
- n |= ERTS_PSFLG_DIRTY_CPU_PROC;
+ dirty_flag = ERTS_PSFLG_DIRTY_IO_PROC;
else
- n |= ERTS_PSFLG_DIRTY_IO_PROC;
- a = erts_smp_atomic32_cmpxchg_mb(&proc->state, n, state);
- if (a == state)
- break;
+ dirty_flag = ERTS_PSFLG_DIRTY_CPU_PROC;
+ erts_smp_atomic32_read_band_nob(&proc->state, ~dirty_flag);
}
- erts_refc_inc(&env->mod_nif->rt_dtor_cnt, 1);
+
ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
need_save = (ep == NULL || is_non_value(ep->saved_mfa[0]));
- return init_nif_sched_data(env, execute_dirty_nif, fp, need_save, argc, argv);
+ result = init_nif_sched_data(env, execute_dirty_nif, fp, need_save, argc, argv);
+ if (scheduler <= 0)
+ erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
+ return result;
}
static ERL_NIF_TERM
@@ -2276,11 +2557,14 @@ schedule_dirty_cpu_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
static ERL_NIF_TERM
execute_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- Process* proc = env->proc;
- NativeFunPtr fp = (NativeFunPtr) proc->current[6];
+ Process* proc;
+ NativeFunPtr fp;
NifExport* ep;
ERL_NIF_TERM result;
+ execution_state(env, &proc, NULL);
+ fp = (NativeFunPtr) proc->current[6];
+
ASSERT(!env->exception_thrown);
ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
ASSERT(ep);
@@ -2303,10 +2587,10 @@ enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags,
ERL_NIF_TERM (*fp)(ErlNifEnv*, int, const ERL_NIF_TERM[]),
int argc, const ERL_NIF_TERM argv[])
{
- Process* proc = env->proc;
+ Process* proc;
NifExport* ep;
ERL_NIF_TERM fun_name_atom, result;
- int need_save;
+ int need_save, scheduler;
if (argc > MAX_ARG)
return enif_make_badarg(env);
@@ -2314,6 +2598,13 @@ enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags,
if (enif_is_exception(env, fun_name_atom))
return fun_name_atom;
+ execution_state(env, &proc, &scheduler);
+ if (scheduler <= 0) {
+ if (scheduler == 0)
+ enif_make_badarg(env);
+ erts_smp_proc_lock(proc, ERTS_PROC_LOCK_MAIN);
+ }
+
ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
need_save = (ep == NULL || is_non_value(ep->saved_mfa[0]));
@@ -2325,12 +2616,15 @@ enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags,
sched_fun = schedule_dirty_io_nif;
else if (chkflgs == ERL_NIF_DIRTY_JOB_CPU_BOUND)
sched_fun = schedule_dirty_cpu_nif;
- else
- return enif_make_badarg(env);
+ else {
+ result = enif_make_badarg(env);
+ goto done;
+ }
result = init_nif_sched_data(env, sched_fun, fp, need_save, argc, argv);
#else
- return enif_make_badarg(env);
+ result = enif_make_badarg(env);
#endif
+ goto done;
}
else
result = init_nif_sched_data(env, execute_nif, fp, need_save, argc, argv);
@@ -2338,18 +2632,28 @@ enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags,
ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
ASSERT(ep);
ep->exp.code[1] = (BeamInstr) fun_name_atom;
+
+done:
+ if (scheduler < 0)
+ erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_MAIN);
+
return result;
}
-#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
-
int
enif_is_on_dirty_scheduler(ErlNifEnv* env)
{
- return ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data);
-}
+ int scheduler;
+ Process *c_p;
-#endif /* ERL_NIF_DIRTY_SCHEDULER_SUPPORT */
+ execution_state(env, &c_p, &scheduler);
+
+ if (!c_p || !scheduler)
+ erts_exit(ERTS_ABORT_EXIT, "enif_is_on_dirty_scheduler: "
+ "Invalid env");
+
+ return scheduler < 0;
+}
/* Maps */
@@ -2780,7 +3084,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
ErlNifEntry* entry = NULL;
ErlNifEnv env;
int i, err, encoding;
- Module* mod;
+ Module* module_p;
Eterm mod_atom;
const Atom* mod_atomp;
Eterm f_atom;
@@ -2790,6 +3094,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
int veto;
struct erl_module_nif* lib = NULL;
int reload_warning = 0;
+ struct erl_module_instance* this_mi;
+ struct erl_module_instance* prev_mi;
encoding = erts_get_native_filename_encoding();
if (encoding == ERL_FILENAME_WIN_WCHAR) {
@@ -2823,21 +3129,29 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
ASSERT(caller != NULL);
mod_atom = caller[0];
ASSERT(is_atom(mod_atom));
- mod=erts_get_module(mod_atom, erts_active_code_ix());
- ASSERT(mod != NULL);
+ module_p = erts_get_module(mod_atom, erts_active_code_ix());
+ ASSERT(module_p != NULL);
mod_atomp = atom_tab(atom_val(mod_atom));
init_func = erts_static_nif_get_nif_init((char*)mod_atomp->name, mod_atomp->len);
if (init_func != NULL)
handle = init_func;
- if (!in_area(caller, mod->curr.code_hdr, mod->curr.code_length)) {
- ASSERT(in_area(caller, mod->old.code_hdr, mod->old.code_length));
+ if (in_area(caller, module_p->old.code_hdr, module_p->old.code_length)) {
+ if (module_p->old.code_hdr->on_load_function_ptr) {
+ this_mi = &module_p->old;
+ prev_mi = &module_p->curr;
+ } else {
+ ret = load_nif_error(BIF_P, "old_code", "Calling load_nif from old "
+ "module '%T' not allowed", mod_atom);
+ goto error;
+ }
+ } else {
+ this_mi = &module_p->curr;
+ prev_mi = &module_p->old;
+ }
- ret = load_nif_error(BIF_P, "old_code", "Calling load_nif from old "
- "module '%T' not allowed", mod_atom);
- }
- else if (init_func == NULL &&
+ if (init_func == NULL &&
(err=erts_sys_ddll_open(lib_name, &handle, &errdesc)) != ERL_DE_NO_ERROR) {
const char slogan[] = "Failed to load NIF library";
if (strstr(errdesc.str, lib_name) != NULL) {
@@ -2886,7 +3200,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
for (i=0; i < entry->num_of_funcs && ret==am_ok; i++) {
BeamInstr** code_pp;
if (!erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1)
- || (code_pp = get_func_pp(mod->curr.code_hdr, f_atom, f->arity))==NULL) {
+ || (code_pp = get_func_pp(this_mi->code_hdr, f_atom, f->arity))==NULL) {
ret = load_nif_error(BIF_P,bad_lib,"Function not found %T:%s/%u",
mod_atom, f->name, f->arity);
}
@@ -2937,9 +3251,9 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
erts_refc_init(&lib->rt_cnt, 0);
erts_refc_init(&lib->rt_dtor_cnt, 0);
ASSERT(opened_rt_list == NULL);
- lib->mod = mod;
+ lib->mod = module_p;
env.mod_nif = lib;
- if (mod->curr.nif != NULL) { /*************** Reload ******************/
+ if (this_mi->nif != NULL) { /*************** Reload ******************/
/*
* Repeated load_nif calls from same Erlang module instance ("reload")
* is deprecated and was only ment as a development feature not to
@@ -2947,16 +3261,16 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
*/
int k, old_incr = 0;
ErlNifFunc* old_func;
- lib->priv_data = mod->curr.nif->priv_data;
+ lib->priv_data = this_mi->nif->priv_data;
- ASSERT(mod->curr.nif->entry != NULL);
+ ASSERT(this_mi->nif->entry != NULL);
if (entry->reload == NULL) {
ret = load_nif_error(BIF_P,reload,"Reload not supported by this NIF library.");
goto error;
}
/* Check that no NIF is removed */
- old_func = mod->curr.nif->entry->funcs;
- for (k=0; k < mod->curr.nif->entry->num_of_funcs; k++) {
+ old_func = this_mi->nif->entry->funcs;
+ for (k=0; k < this_mi->nif->entry->num_of_funcs; k++) {
int incr = 0;
ErlNifFunc* f = entry->funcs;
for (i=0; i < entry->num_of_funcs; i++) {
@@ -2972,7 +3286,7 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
old_func->name, old_func->arity);
goto error;
}
- old_func = next_func(mod->curr.nif->entry, &old_incr, old_func);
+ old_func = next_func(this_mi->nif->entry, &old_incr, old_func);
}
erts_pre_nif(&env, BIF_P, lib, NULL);
veto = entry->reload(&env, &lib->priv_data, BIF_ARG_2);
@@ -2982,24 +3296,24 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
}
else {
commit_opened_resource_types(lib);
- mod->curr.nif->entry = NULL; /* to prevent 'unload' callback */
- erts_unload_nif(mod->curr.nif);
+ this_mi->nif->entry = NULL; /* to prevent 'unload' callback */
+ erts_unload_nif(this_mi->nif);
reload_warning = 1;
}
}
else {
lib->priv_data = NULL;
- if (mod->old.nif != NULL) { /**************** Upgrade ***************/
- void* prev_old_data = mod->old.nif->priv_data;
+ if (prev_mi->nif != NULL) { /**************** Upgrade ***************/
+ void* prev_old_data = prev_mi->nif->priv_data;
if (entry->upgrade == NULL) {
ret = load_nif_error(BIF_P, upgrade, "Upgrade not supported by this NIF library.");
goto error;
}
- erts_pre_nif(&env, BIF_P, lib, NULL);
- veto = entry->upgrade(&env, &lib->priv_data, &mod->old.nif->priv_data, BIF_ARG_2);
+ erts_pre_nif(&env, BIF_P, lib, NULL);
+ veto = entry->upgrade(&env, &lib->priv_data, &prev_mi->nif->priv_data, BIF_ARG_2);
erts_post_nif(&env);
if (veto) {
- mod->old.nif->priv_data = prev_old_data;
+ prev_mi->nif->priv_data = prev_old_data;
ret = load_nif_error(BIF_P, upgrade, "Library upgrade-call unsuccessful.");
}
else
@@ -3024,12 +3338,12 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
int incr = 0;
ErlNifFunc* f = entry->funcs;
- mod->curr.nif = lib;
+ this_mi->nif = lib;
for (i=0; i < entry->num_of_funcs; i++)
{
BeamInstr* code_ptr;
erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1);
- code_ptr = *get_func_pp(mod->curr.code_hdr, f_atom, f->arity);
+ code_ptr = *get_func_pp(this_mi->code_hdr, f_atom, f->arity);
if (code_ptr[1] == 0) {
code_ptr[5+0] = (BeamInstr) BeamOp(op_call_nif);
@@ -3040,16 +3354,16 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
(BeamInstr) BeamOp(op_i_generic_breakpoint));
g->orig_instr = (BeamInstr) BeamOp(op_call_nif);
}
+#ifdef ERTS_DIRTY_SCHEDULERS
if ((entry->major > 2 || (entry->major == 2 && entry->minor >= 7))
&& (entry->options & ERL_NIF_DIRTY_NIF_OPTION) && f->flags) {
-#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
code_ptr[5+3] = (BeamInstr) f->fptr;
code_ptr[5+1] = (f->flags == ERL_NIF_DIRTY_JOB_IO_BOUND) ?
(BeamInstr) schedule_dirty_io_nif :
(BeamInstr) schedule_dirty_cpu_nif;
-#endif
}
else
+#endif
code_ptr[5+1] = (BeamInstr) f->fptr;
code_ptr[5+2] = (BeamInstr) lib;
f = next_func(entry, &incr, f);
diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h
index 3964f7f679..da7a754757 100644
--- a/erts/emulator/beam/erl_nif.h
+++ b/erts/emulator/beam/erl_nif.h
@@ -28,7 +28,6 @@
# include "config.h"
#endif
-#include "erl_native_features_config.h"
#include "erl_drv_nif.h"
/* Version history:
@@ -50,9 +49,10 @@
** 2.8: 18.0 add enif_has_pending_exception
** 2.9: 18.2 enif_getenv
** 2.10: Time API
+** 2.11: 19.0 enif_snprintf
*/
#define ERL_NIF_MAJOR_VERSION 2
-#define ERL_NIF_MINOR_VERSION 10
+#define ERL_NIF_MINOR_VERSION 11
/*
* The emulator will refuse to load a nif-lib with a major version
@@ -167,13 +167,11 @@ typedef int ErlNifTSDKey;
typedef ErlDrvThreadOpts ErlNifThreadOpts;
-#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
typedef enum
{
- ERL_NIF_DIRTY_JOB_CPU_BOUND = ERL_DRV_DIRTY_JOB_CPU_BOUND,
- ERL_NIF_DIRTY_JOB_IO_BOUND = ERL_DRV_DIRTY_JOB_IO_BOUND
+ ERL_NIF_DIRTY_JOB_CPU_BOUND = ERL_DIRTY_JOB_CPU_BOUND,
+ ERL_NIF_DIRTY_JOB_IO_BOUND = ERL_DIRTY_JOB_IO_BOUND
}ErlNifDirtyTaskFlags;
-#endif
typedef struct /* All fields all internal and may change */
{
@@ -257,11 +255,7 @@ extern TWinDynNifCallbacks WinDynNifCallbacks;
# define ERL_NIF_INIT_DECL(MODNAME) ERL_NIF_INIT_EXPORT ErlNifEntry* nif_init(ERL_NIF_INIT_ARGS)
#endif
-#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
-# define ERL_NIF_ENTRY_OPTIONS ERL_NIF_DIRTY_NIF_OPTION
-#else
-# define ERL_NIF_ENTRY_OPTIONS 0
-#endif
+#define ERL_NIF_ENTRY_OPTIONS ERL_NIF_DIRTY_NIF_OPTION
#ifdef __cplusplus
}
diff --git a/erts/emulator/beam/erl_nif_api_funcs.h b/erts/emulator/beam/erl_nif_api_funcs.h
index a5acd86551..b211ab4b16 100644
--- a/erts/emulator/beam/erl_nif_api_funcs.h
+++ b/erts/emulator/beam/erl_nif_api_funcs.h
@@ -166,26 +166,19 @@ ERL_NIF_API_FUNC_DECL(ErlNifTime, enif_convert_time_unit, (ErlNifTime, ErlNifTim
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM, enif_now_time, (ErlNifEnv *env));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM, enif_cpu_time, (ErlNifEnv *env));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM, enif_make_unique_integer, (ErlNifEnv *env, ErlNifUniqueInteger properties));
+ERL_NIF_API_FUNC_DECL(int, enif_is_current_process_alive, (ErlNifEnv *env));
ERL_NIF_API_FUNC_DECL(int, enif_is_process_alive, (ErlNifEnv *env, ErlNifPid *pid));
ERL_NIF_API_FUNC_DECL(int, enif_is_port_alive, (ErlNifEnv *env, ErlNifPort *port_id));
ERL_NIF_API_FUNC_DECL(int, enif_get_local_port, (ErlNifEnv* env, ERL_NIF_TERM, ErlNifPort* port_id));
ERL_NIF_API_FUNC_DECL(int, enif_term_to_binary, (ErlNifEnv *env, ERL_NIF_TERM term, ErlNifBinary *bin));
ERL_NIF_API_FUNC_DECL(size_t, enif_binary_to_term, (ErlNifEnv *env, const unsigned char* data, size_t sz, ERL_NIF_TERM *term, unsigned int opts));
ERL_NIF_API_FUNC_DECL(int, enif_port_command, (ErlNifEnv *env, const ErlNifPort* to_port, ErlNifEnv *msg_env, ERL_NIF_TERM msg));
+ERL_NIF_API_FUNC_DECL(int,enif_is_on_dirty_scheduler,(ErlNifEnv*));
+ERL_NIF_API_FUNC_DECL(int,enif_snprintf,(char * buffer, size_t size, const char *format, ...));
/*
** ADD NEW ENTRIES HERE (before this comment) !!!
*/
-
-
-/*
- * Conditional EXPERIMENTAL stuff always last.
- * Must be moved up and made unconditional to support binary backward
- * compatibility on Windows.
- */
-#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
-ERL_NIF_API_FUNC_DECL(int,enif_is_on_dirty_scheduler,(ErlNifEnv*));
-#endif
#endif /* ERL_NIF_API_FUNC_DECL */
/*
@@ -330,12 +323,15 @@ ERL_NIF_API_FUNC_DECL(int,enif_is_on_dirty_scheduler,(ErlNifEnv*));
# define enif_now_time ERL_NIF_API_FUNC_MACRO(enif_now_time)
# define enif_cpu_time ERL_NIF_API_FUNC_MACRO(enif_cpu_time)
# define enif_make_unique_integer ERL_NIF_API_FUNC_MACRO(enif_make_unique_integer)
+# define enif_is_current_process_alive ERL_NIF_API_FUNC_MACRO(enif_is_current_process_alive)
# define enif_is_process_alive ERL_NIF_API_FUNC_MACRO(enif_is_process_alive)
# define enif_is_port_alive ERL_NIF_API_FUNC_MACRO(enif_is_port_alive)
# define enif_get_local_port ERL_NIF_API_FUNC_MACRO(enif_get_local_port)
# define enif_term_to_binary ERL_NIF_API_FUNC_MACRO(enif_term_to_binary)
# define enif_binary_to_term ERL_NIF_API_FUNC_MACRO(enif_binary_to_term)
# define enif_port_command ERL_NIF_API_FUNC_MACRO(enif_port_command)
+# define enif_is_on_dirty_scheduler ERL_NIF_API_FUNC_MACRO(enif_is_on_dirty_scheduler)
+# define enif_snprintf ERL_NIF_API_FUNC_MACRO(enif_snprintf)
/*
** ADD NEW ENTRIES HERE (before this comment)
@@ -346,9 +342,6 @@ ERL_NIF_API_FUNC_DECL(int,enif_is_on_dirty_scheduler,(ErlNifEnv*));
* Must be moved up and made unconditional to support binary backward
* compatibility on Windows.
*/
-#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
-# define enif_is_on_dirty_scheduler ERL_NIF_API_FUNC_MACRO(enif_is_on_dirty_scheduler)
-#endif
#endif /* ERL_NIF_API_FUNC_MACRO */
diff --git a/erts/emulator/beam/erl_port.h b/erts/emulator/beam/erl_port.h
index c2588e718d..f0075ca2b9 100644
--- a/erts/emulator/beam/erl_port.h
+++ b/erts/emulator/beam/erl_port.h
@@ -487,6 +487,7 @@ ERTS_GLB_INLINE Port*erts_id2port(Eterm id);
ERTS_GLB_INLINE Port *erts_id2port_sflgs(Eterm, Process *, ErtsProcLocks, Uint32);
ERTS_GLB_INLINE void erts_port_release(Port *);
#ifdef ERTS_SMP
+ERTS_GLB_INLINE Port *erts_thr_port_lookup(Eterm id, Uint32 invalid_sflgs);
ERTS_GLB_INLINE Port *erts_thr_id2port_sflgs(Eterm id, Uint32 invalid_sflgs);
ERTS_GLB_INLINE void erts_thr_port_release(Port *prt);
#endif
@@ -626,6 +627,44 @@ erts_port_release(Port *prt)
}
#ifdef ERTS_SMP
+/*
+ * erts_thr_id2port_sflgs() and erts_port_dec_refc(prt) can
+ * be used by unmanaged threads in the SMP case.
+ */
+ERTS_GLB_INLINE Port *
+erts_thr_port_lookup(Eterm id, Uint32 invalid_sflgs)
+{
+ Port *prt;
+ ErtsThrPrgrDelayHandle dhndl;
+
+ if (is_not_internal_port(id))
+ return NULL;
+
+ dhndl = erts_thr_progress_unmanaged_delay();
+
+ prt = (Port *) erts_ptab_pix2intptr_ddrb(&erts_port,
+ internal_port_index(id));
+
+ if (!prt || prt->common.id != id) {
+ erts_thr_progress_unmanaged_continue(dhndl);
+ return NULL;
+ }
+ else {
+ erts_aint32_t state;
+ erts_port_inc_refc(prt);
+
+ if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
+ erts_thr_progress_unmanaged_continue(dhndl);
+
+ state = erts_atomic32_read_acqb(&prt->state);
+ if (state & invalid_sflgs) {
+ erts_port_dec_refc(prt);
+ return NULL;
+ }
+
+ return prt;
+ }
+}
/*
* erts_thr_id2port_sflgs() and erts_thr_port_release() can
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index d485affa3b..a853ec585b 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -409,6 +409,10 @@ ErtsAlignedSchedulerData *erts_aligned_scheduler_data;
#ifdef ERTS_DIRTY_SCHEDULERS
ErtsAlignedSchedulerData *erts_aligned_dirty_cpu_scheduler_data;
ErtsAlignedSchedulerData *erts_aligned_dirty_io_scheduler_data;
+typedef union {
+ Process dsp;
+ char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(Process))];
+} ErtsAlignedDirtyShadowProcess;
#endif
typedef union {
@@ -589,6 +593,7 @@ dbg_chk_aux_work_val(erts_aint32_t value)
valid |= ERTS_SSI_AUX_WORK_CNCLD_TMRS;
valid |= ERTS_SSI_AUX_WORK_CNCLD_TMRS_THR_PRGR;
valid |= ERTS_SSI_AUX_WORK_THR_PRGR_LATER_OP;
+ valid |= ERTS_SSI_AUX_WORK_PENDING_EXITERS;
#endif
#if HAVE_ERTS_MSEG
valid |= ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK;
@@ -611,7 +616,7 @@ dbg_chk_aux_work_val(erts_aint32_t value)
#endif
#ifdef ERTS_SMP
-static void handle_pending_exiters(ErtsProcList *);
+static void do_handle_pending_exiters(ErtsProcList *);
static void wake_scheduler(ErtsRunQueue *rq);
#endif
@@ -679,6 +684,8 @@ erts_pre_init_process(void)
= "MISC_THR_PRGR";
erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_MISC_IX]
= "MISC";
+ erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_PENDING_EXITERS_IX]
+ = "PENDING_EXITERS";
erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_SET_TMO_IX]
= "SET_TMO";
erts_aux_work_flag_descr[ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK_IX]
@@ -1139,7 +1146,7 @@ reply_sched_wall_time(void *vswtrp)
hpp = &hp;
}
- erts_queue_message(rp, &rp_locks, mp, msg);
+ erts_queue_message(rp, rp_locks, mp, msg, am_system);
if (swtrp->req_sched == esdp->no)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
@@ -1156,7 +1163,7 @@ reply_sched_wall_time(void *vswtrp)
Eterm
erts_sched_wall_time_request(Process *c_p, int set, int enable)
{
- ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p);
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
Eterm ref;
ErtsSchedWallTimeReq *swtrp;
Eterm *hp;
@@ -1218,7 +1225,7 @@ reply_system_check(void *vscrp)
hpp = &hp;
msg = STORE_NC(hpp, ohp, scrp->ref);
- erts_queue_message(rp, &rp_locks, mp, msg);
+ erts_queue_message(rp, rp_locks, mp, msg, am_system);
if (scrp->req_sched == esdp->no)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
@@ -1234,7 +1241,7 @@ reply_system_check(void *vscrp)
Eterm erts_system_check_request(Process *c_p) {
- ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p);
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
Eterm ref;
ErtsSystemCheckReq *scrp;
Eterm *hp;
@@ -2336,6 +2343,30 @@ handle_mseg_cache_check(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiti
#endif
+#ifdef ERTS_SMP
+
+static ERTS_INLINE erts_aint32_t
+handle_pending_exiters(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
+{
+ ErtsProcList *pnd_xtrs;
+ ErtsRunQueue *rq;
+
+ rq = awdp->esdp->run_queue;
+ unset_aux_work_flags(awdp->ssi, ERTS_SSI_AUX_WORK_PENDING_EXITERS);
+
+ erts_smp_runq_lock(rq);
+ pnd_xtrs = rq->procs.pending_exiters;
+ rq->procs.pending_exiters = NULL;
+ erts_smp_runq_unlock(rq);
+
+ if (erts_proclist_fetch(&pnd_xtrs, NULL))
+ do_handle_pending_exiters(pnd_xtrs);
+
+ return aux_work & ~ERTS_SSI_AUX_WORK_PENDING_EXITERS;
+}
+
+#endif
+
static ERTS_INLINE erts_aint32_t
handle_setup_aux_work_timer(ErtsAuxWorkData *awdp, erts_aint32_t aux_work, int waiting)
{
@@ -2427,6 +2458,10 @@ handle_aux_work(ErtsAuxWorkData *awdp, erts_aint32_t orig_aux_work, int waiting)
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_MISC,
handle_misc_aux_work);
+#ifdef ERTS_SMP
+ HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_PENDING_EXITERS,
+ handle_pending_exiters);
+#endif
HANDLE_AUX_WORK(ERTS_SSI_AUX_WORK_SET_TMO,
handle_setup_aux_work_timer);
@@ -3979,6 +4014,33 @@ schedule_bound_processes(ErtsRunQueue *rq,
}
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+static ERTS_INLINE void
+clear_proc_dirty_queue_bit(Process *p, ErtsRunQueue *rq, int prio_bit)
+{
+#ifdef DEBUG
+ erts_aint32_t old;
+#endif
+ erts_aint32_t qb = prio_bit;
+ if (rq == ERTS_DIRTY_CPU_RUNQ)
+ qb <<= ERTS_PDSFLGS_IN_CPU_PRQ_MASK_OFFSET;
+ else {
+ ASSERT(rq == ERTS_DIRTY_IO_RUNQ);
+ qb <<= ERTS_PDSFLGS_IN_IO_PRQ_MASK_OFFSET;
+ }
+#ifdef DEBUG
+ old = (int)
+#else
+ (void)
+#endif
+ erts_smp_atomic32_read_band_mb(&p->dirty_state, ~qb);
+ ASSERT(old & qb);
+}
+
+#endif /* ERTS_DIRTY_SCHEDULERS */
+
+
static void
evacuate_run_queue(ErtsRunQueue *rq,
ErtsStuckBoundProcesses *sbpp)
@@ -4141,29 +4203,8 @@ evacuate_run_queue(ErtsRunQueue *rq,
}
#ifdef ERTS_DIRTY_SCHEDULERS
-
- if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix)) {
- erts_aint32_t dqbit = qbit;
-#ifdef DEBUG
- erts_aint32_t old_dqbit;
-#endif
-
- if (rq == ERTS_DIRTY_CPU_RUNQ)
- dqbit <<= ERTS_PDSFLGS_IN_CPU_PRQ_MASK_OFFSET;
- else {
- ASSERT(rq == ERTS_DIRTY_IO_RUNQ);
- dqbit <<= ERTS_PDSFLGS_IN_IO_PRQ_MASK_OFFSET;
- }
-
-#ifdef DEBUG
- old_dqbit = (int)
-#else
- (void)
-#endif
- erts_smp_atomic32_read_band_mb(&real_proc->dirty_state,
- ~dqbit);
- ASSERT(old_dqbit & dqbit);
- }
+ if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
+ clear_proc_dirty_queue_bit(real_proc, rq, qbit);
#endif
if (ERTS_PSFLG_BOUND & real_state) {
@@ -5653,7 +5694,8 @@ static void
init_scheduler_data(ErtsSchedulerData* esdp, int num,
ErtsSchedulerSleepInfo* ssi,
ErtsRunQueue* runq,
- char** daww_ptr, size_t daww_sz)
+ char** daww_ptr, size_t daww_sz,
+ Process *shadow_proc)
{
esdp->timer_wheel = NULL;
#ifdef ERTS_SMP
@@ -5677,6 +5719,15 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
esdp->no = (Uint) num;
ERTS_DIRTY_SCHEDULER_NO(esdp) = 0;
}
+ esdp->dirty_shadow_process = shadow_proc;
+ if (shadow_proc) {
+ erts_init_empty_process(shadow_proc);
+ erts_smp_atomic32_init_nob(&shadow_proc->state,
+ (ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_PROXY));
+ shadow_proc->static_flags = ERTS_STC_FLG_SHADOW_PROC;
+ }
#else
esdp->no = (Uint) num;
#endif
@@ -5928,31 +5979,41 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online
for (ix = 0; ix < n; ix++) {
ErtsSchedulerData *esdp = ERTS_SCHEDULER_IX(ix);
init_scheduler_data(esdp, ix+1, ERTS_SCHED_SLEEP_INFO_IX(ix),
- ERTS_RUNQ_IX(ix), &daww_ptr, daww_sz);
+ ERTS_RUNQ_IX(ix), &daww_ptr, daww_sz,
+ NULL);
}
#ifdef ERTS_DIRTY_SCHEDULERS
-#ifdef ERTS_SMP
- erts_aligned_dirty_cpu_scheduler_data =
- erts_alloc_permanent_cache_aligned(
- ERTS_ALC_T_SCHDLR_DATA,
- no_dirty_cpu_schedulers*sizeof(ErtsAlignedSchedulerData));
- for (ix = 0; ix < no_dirty_cpu_schedulers; ix++) {
- ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix);
- init_scheduler_data(esdp, ix+1, ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix),
- ERTS_DIRTY_CPU_RUNQ, NULL, 0);
- }
- erts_aligned_dirty_io_scheduler_data =
- erts_alloc_permanent_cache_aligned(
- ERTS_ALC_T_SCHDLR_DATA,
- no_dirty_io_schedulers*sizeof(ErtsAlignedSchedulerData));
- for (ix = 0; ix < no_dirty_io_schedulers; ix++) {
- ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix);
- init_scheduler_data(esdp, ix+1, ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix),
- ERTS_DIRTY_IO_RUNQ, NULL, 0);
+ {
+ int dirty_scheds = no_dirty_cpu_schedulers + no_dirty_io_schedulers;
+ int adspix = 0;
+ ErtsAlignedDirtyShadowProcess *adsp =
+ erts_alloc_permanent_cache_aligned(
+ ERTS_ALC_T_SCHDLR_DATA,
+ dirty_scheds * sizeof(ErtsAlignedDirtyShadowProcess));
+
+ erts_aligned_dirty_cpu_scheduler_data =
+ erts_alloc_permanent_cache_aligned(
+ ERTS_ALC_T_SCHDLR_DATA,
+ dirty_scheds * sizeof(ErtsAlignedSchedulerData));
+
+ erts_aligned_dirty_io_scheduler_data =
+ &erts_aligned_dirty_cpu_scheduler_data[no_dirty_cpu_schedulers];
+
+ for (ix = 0; ix < no_dirty_cpu_schedulers; ix++) {
+ ErtsSchedulerData *esdp = ERTS_DIRTY_CPU_SCHEDULER_IX(ix);
+ init_scheduler_data(esdp, ix+1, ERTS_DIRTY_CPU_SCHED_SLEEP_INFO_IX(ix),
+ ERTS_DIRTY_CPU_RUNQ, NULL, 0,
+ &adsp[adspix++].dsp);
+ }
+ for (ix = 0; ix < no_dirty_io_schedulers; ix++) {
+ ErtsSchedulerData *esdp = ERTS_DIRTY_IO_SCHEDULER_IX(ix);
+ init_scheduler_data(esdp, ix+1, ERTS_DIRTY_IO_SCHED_SLEEP_INFO_IX(ix),
+ ERTS_DIRTY_IO_RUNQ, NULL, 0,
+ &adsp[adspix++].dsp);
+ }
}
#endif
-#endif
init_misc_aux_work();
init_swtreq_alloc();
@@ -6167,7 +6228,7 @@ check_dirty_enqueue_in_prio_queue(Process *c_p,
erts_aint32_t dact, max_qbit;
/* Termination should be done on an ordinary scheduler */
- if (actual & ERTS_PSFLG_EXITING) {
+ if ((*newp) & ERTS_PSFLG_EXITING) {
*newp &= ~ERTS_PSFLGS_DIRTY_WORK;
return ERTS_ENQUEUE_NORMAL_QUEUE;
}
@@ -6176,7 +6237,7 @@ check_dirty_enqueue_in_prio_queue(Process *c_p,
* If we have system tasks, we enqueue on ordinary run-queue
* and take care of those system tasks first.
*/
- if (actual & ERTS_PSFLG_ACTIVE_SYS)
+ if ((*newp) & ERTS_PSFLG_ACTIVE_SYS)
return ERTS_ENQUEUE_NORMAL_QUEUE;
dact = erts_smp_atomic32_read_mb(&c_p->dirty_state);
@@ -6356,23 +6417,29 @@ select_enqueue_run_queue(int enqueue, int enq_prio, Process *p, erts_aint32_t st
* schedule_out_process() return with c_rq locked.
*/
static ERTS_INLINE int
-schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Process *proxy)
+schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p,
+ Process *proxy, int is_normal_sched)
{
- erts_aint32_t a, e, n, enq_prio = -1;
+ erts_aint32_t a, e, n, enq_prio = -1, running_flgs;
int enqueue; /* < 0 -> use proxy */
ErtsRunQueue* runq;
+ if (is_normal_sched)
+ running_flgs = ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS;
+ else
+ running_flgs = ERTS_PSFLG_DIRTY_RUNNING|ERTS_PSFLG_DIRTY_RUNNING_SYS;
+
a = state;
while (1) {
n = e = a;
- ASSERT(a & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS));
+ ASSERT(a & running_flgs);
enqueue = ERTS_ENQUEUE_NOT;
- n &= ~(ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS);
- if (a & (ERTS_PSFLG_ACTIVE_SYS|ERTS_PSFLG_DIRTY_ACTIVE_SYS)
+ n &= ~running_flgs;
+ if ((a & (ERTS_PSFLG_ACTIVE_SYS|ERTS_PSFLG_DIRTY_ACTIVE_SYS))
|| (a & (ERTS_PSFLG_ACTIVE|ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE) {
enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a);
}
@@ -6485,8 +6552,9 @@ change_proc_schedule_state(Process *p,
ErtsProcLocks locks)
{
/*
- * NOTE: ERTS_PSFLG_RUNNING, ERTS_PSFLG_RUNNING_SYS and
- * ERTS_PSFLG_ACTIVE_SYS are not allowed to be
+ * NOTE: ERTS_PSFLG_RUNNING, ERTS_PSFLG_RUNNING_SYS,
+ * ERTS_PSFLG_DIRTY_RUNNING, ERTS_PSFLG_DIRTY_RUNNING_SYS
+ * and ERTS_PSFLG_ACTIVE_SYS are not allowed to be
* altered by this function!
*/
erts_aint32_t a = *statep, n;
@@ -6500,9 +6568,13 @@ change_proc_schedule_state(Process *p,
ASSERT(!(a & ERTS_PSFLG_PROXY));
ASSERT((clear_state_flags & (ERTS_PSFLG_RUNNING
| ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS
| ERTS_PSFLG_ACTIVE_SYS)) == 0);
ASSERT((set_state_flags & (ERTS_PSFLG_RUNNING
| ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS
| ERTS_PSFLG_ACTIVE_SYS)) == 0);
if (lock_status)
@@ -6526,8 +6598,16 @@ change_proc_schedule_state(Process *p,
if ((n & (ERTS_PSFLG_SUSPENDED
| ERTS_PSFLG_RUNNING
| ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS
| ERTS_PSFLG_IN_RUNQ
- | ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE) {
+ | ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE
+#ifdef ERTS_DIRTY_SCHEDULERS
+ || (n & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_EXITING)) == ERTS_PSFLG_EXITING
+#endif
+ ) {
/*
* Active and seemingly need to be enqueued, but
* process may be in a run queue via proxy, need
@@ -6551,7 +6631,9 @@ change_proc_schedule_state(Process *p,
| ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE)
&& (!(a & (ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_RUNNING_SYS)
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)
&& (!(a & ERTS_PSFLG_ACTIVE)
|| (a & ERTS_PSFLG_SUSPENDED))))) {
/* We activated a prevously inactive process */
@@ -6688,14 +6770,15 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st)
erts_aint32_t e;
n = e = a;
- if (a & ERTS_PSFLG_FREE) {
- res = 0;
+ if (a & ERTS_PSFLG_FREE)
goto cleanup; /* We don't want to schedule free processes... */
- }
enqueue = ERTS_ENQUEUE_NOT;
n |= ERTS_PSFLG_ACTIVE_SYS;
- if (!(a & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)))
+ if (!(a & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)))
enqueue = check_enqueue_in_prio_queue(p, &enq_prio, &n, a);
a = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
if (a == e)
@@ -6708,7 +6791,9 @@ schedule_process_sys_task(Process *p, erts_aint32_t prio, ErtsProcSysTask *st)
if (!(a & (ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_RUNNING_SYS))
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS))
&& (!(a & ERTS_PSFLG_ACTIVE) || (a & ERTS_PSFLG_SUSPENDED))) {
/* We activated a prevously inactive process */
profile_runnable_proc(p, am_active);
@@ -6748,11 +6833,16 @@ suspend_process(Process *c_p, Process *p)
if (c_p == p) {
state = erts_smp_atomic32_read_bor_relb(&p->state,
ERTS_PSFLG_SUSPENDED);
- ASSERT(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS));
+ ASSERT(state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS));
suspended = (state & ERTS_PSFLG_SUSPENDED) ? -1: 1;
}
else {
- while (!(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_EXITING))) {
+ while (!(state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_EXITING))) {
erts_aint32_t n, e;
n = e = state;
@@ -6778,8 +6868,11 @@ suspend_process(Process *c_p, Process *p)
if ((state & (ERTS_PSFLG_ACTIVE
| ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_DIRTY_ACTIVE_SYS
| ERTS_PSFLG_RUNNING
| ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS
| ERTS_PSFLG_SUSPENDED)) == ERTS_PSFLG_ACTIVE) {
/* We made process inactive */
profile_runnable_proc(p, am_inactive);
@@ -7761,8 +7854,10 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
plp = proclist_create(p);
erts_proclist_store_last(&msbp->blckrs, plp);
p->flags |= have_blckd_flg;
- ASSERT(schdlr_sspnd.active == ERTS_SCHDLR_SSPND_MAKE_NSCHEDS_VAL(1, 0, 0));
- ASSERT(p->scheduler_data->no == 1);
+ ASSERT(normal
+ ? 1 == schdlr_sspnd_get_nscheds(&schdlr_sspnd.active, ERTS_SCHED_NORMAL)
+ : schdlr_sspnd.active == ERTS_SCHDLR_SSPND_MAKE_NSCHEDS_VAL(1, 0, 0));
+ ASSERT(erts_proc_sched_data(p)->no == 1);
if (schdlr_sspnd.msb.ongoing)
res = ERTS_SCHDLR_SSPND_DONE_MSCHED_BLOCKED;
else
@@ -7782,7 +7877,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
if (schdlr_sspnd.active == ERTS_SCHDLR_SSPND_MAKE_NSCHEDS_VAL(1, 0, 0)
|| (normal && schdlr_sspnd_get_nscheds(&schdlr_sspnd.active,
ERTS_SCHED_NORMAL) == 1)) {
- ASSERT(p->scheduler_data->no == 1);
+ ASSERT(erts_proc_sched_data(p)->no == 1);
plp = proclist_create(p);
erts_proclist_store_last(&msbp->blckrs, plp);
if (schdlr_sspnd.msb.ongoing)
@@ -7832,7 +7927,7 @@ erts_block_multi_scheduling(Process *p, ErtsProcLocks plocks, int on, int normal
else
res = ERTS_SCHDLR_SSPND_YIELD_DONE_NMSCHED_BLOCKED;
}
- ASSERT(p->scheduler_data);
+ ASSERT(erts_proc_sched_data(p));
}
}
else if (!msbp->ongoing) {
@@ -8422,9 +8517,23 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
if (!suspend_process(c_p, rp)) {
/* Other process running */
- ASSERT(ERTS_PSFLG_RUNNING
+ ASSERT((ERTS_PSFLG_RUNNING | ERTS_PSFLG_DIRTY_RUNNING)
& erts_smp_atomic32_read_nob(&rp->state));
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!suspend
+ && (erts_smp_atomic32_read_nob(&rp->state)
+ & ERTS_PSFLG_DIRTY_RUNNING)) {
+ ErtsProcLocks need_locks = pid_locks & ~ERTS_PROC_LOCK_STATUS;
+ if (need_locks && erts_smp_proc_trylock(rp, need_locks) == EBUSY) {
+ erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
+ rp = erts_pid2proc(c_p, c_p_locks|ERTS_PROC_LOCK_STATUS,
+ pid, pid_locks|ERTS_PROC_LOCK_STATUS);
+ }
+ goto done;
+ }
+#endif
+
running:
/*
@@ -8449,7 +8558,7 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
else {
ErtsProcLocks need_locks = pid_locks & ~ERTS_PROC_LOCK_STATUS;
if (need_locks && erts_smp_proc_trylock(rp, need_locks) == EBUSY) {
- if (ERTS_PSFLG_RUNNING_SYS
+ if ((ERTS_PSFLG_RUNNING_SYS|ERTS_PSFLG_DIRTY_RUNNING_SYS)
& erts_smp_atomic32_read_nob(&rp->state)) {
/* Executing system task... */
resume_process(rp, ERTS_PROC_LOCK_STATUS);
@@ -8476,7 +8585,7 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
* from being selected for normal execution regardless
* of locks held or not held on it...
*/
- ASSERT(!(ERTS_PSFLG_RUNNING
+ ASSERT(!((ERTS_PSFLG_RUNNING|ERTS_PSFLG_DIRTY_RUNNING_SYS)
& erts_smp_atomic32_read_nob(&rp->state)));
if (!suspend)
@@ -9017,28 +9126,43 @@ erts_run_queues_len(Uint *qlen, int atomic_queues_read, int incl_active_sched)
}
Eterm
-erts_process_status(Process *c_p, ErtsProcLocks c_p_locks,
- Process *rp, Eterm rpid)
+erts_process_state2status(erts_aint32_t state)
+{
+ if (state & ERTS_PSFLG_FREE)
+ return am_free;
+
+ if (state & ERTS_PSFLG_EXITING)
+ return am_exiting;
+
+ if (state & ERTS_PSFLG_GC)
+ return am_garbage_collecting;
+
+ if (state & ERTS_PSFLG_SUSPENDED)
+ return am_suspended;
+
+ if (state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS))
+ return am_running;
+
+ if (state & (ERTS_PSFLG_ACTIVE
+ | ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_DIRTY_ACTIVE_SYS))
+ return am_runnable;
+
+ return am_waiting;
+}
+
+Eterm
+erts_process_status(Process *rp, Eterm rpid)
{
Eterm res = am_undefined;
Process *p = rp ? rp : erts_proc_lookup_raw(rpid);
if (p) {
erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state);
- if (state & ERTS_PSFLG_FREE)
- res = am_free;
- else if (state & ERTS_PSFLG_EXITING)
- res = am_exiting;
- else if (state & ERTS_PSFLG_GC)
- res = am_garbage_collecting;
- else if (state & ERTS_PSFLG_SUSPENDED)
- res = am_suspended;
- else if (state & ERTS_PSFLG_RUNNING)
- res = am_running;
- else if (state & ERTS_PSFLG_ACTIVE)
- res = am_runnable;
- else
- res = am_waiting;
+ res = erts_process_state2status(state);
}
#ifdef ERTS_SMP
else {
@@ -9240,6 +9364,90 @@ erts_set_process_priority(Process *p, Eterm value)
}
}
+static int
+scheduler_gc_proc(Process *c_p, int reds_left)
+{
+ int fcalls, reds;
+ if (!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
+ fcalls = reds_left;
+ else
+ fcalls = reds_left - CONTEXT_REDS;
+ reds = erts_garbage_collect_nobump(c_p, 0, c_p->arg_reg, c_p->arity, fcalls);
+ ASSERT(reds_left >= reds);
+ return reds;
+}
+
+static ERTS_INLINE void
+clean_dirty_start(Process *p)
+{
+#if defined(ERTS_DIRTY_SCHEDULERS) && !defined(ARCH_64)
+ void *ptr = ERTS_PROC_SET_DIRTY_CPU_START(p, NULL);
+ if (ptr)
+ erts_free(ERTS_ALC_T_DIRTY_START, ptr);
+#endif
+}
+
+static ERTS_INLINE void
+save_dirty_start(ErtsSchedulerData *esdp, Process *c_p)
+{
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(esdp->run_queue)) {
+ ErtsMonotonicTime time = erts_get_monotonic_time(esdp);
+#ifdef ARCH_64
+ ERTS_PROC_SET_DIRTY_CPU_START(c_p, (void *) time);
+#else
+ ErtsMonotonicTime *stimep;
+
+ stimep = (ErtsMonotonicTime *) ERTS_PROC_GET_DIRTY_CPU_START(c_p);
+ if (!stimep) {
+ stimep = erts_alloc(ERTS_ALC_T_DIRTY_START,
+ sizeof(ErtsMonotonicTime));
+ ERTS_PROC_SET_DIRTY_CPU_START(c_p, (void *) stimep);
+ }
+ *stimep = time;
+#endif
+ }
+#endif
+}
+
+static ERTS_INLINE int
+get_dirty_reds(ErtsSchedulerData *esdp, Process *c_p)
+{
+
+#ifndef ERTS_DIRTY_SCHEDULERS
+ return -1;
+#else
+ ErtsMonotonicTime stime, time;
+
+ if (!ERTS_RUNQ_IS_DIRTY_CPU_RUNQ(esdp->run_queue))
+ return 1;
+
+#ifdef ARCH_64
+ stime = (ErtsMonotonicTime) ERTS_PROC_GET_DIRTY_CPU_START(c_p);
+#else
+ {
+ ErtsMonotonicTime *stimep;
+ stimep = (ErtsMonotonicTime *) ERTS_PROC_GET_DIRTY_CPU_START(c_p);
+ ASSERT(stimep);
+ stime = *stimep;
+ }
+#endif
+
+ time = erts_get_monotonic_time(esdp);
+
+ ASSERT(stime && stime < time);
+
+ time -= stime;
+ time = ERTS_MONOTONIC_TO_USEC(time);
+ time *= 2;
+
+ if (time > INT_MAX)
+ return INT_MAX;
+ return (int) time;
+#endif
+
+}
+
/*
* schedule() is called from BEAM (process_main()) or HiPE
* (hipe_mode_switch()) when the current process is to be
@@ -9270,6 +9478,7 @@ Process *schedule(Process *p, int calls)
int reds;
Uint32 flags;
erts_aint32_t state = 0; /* Supress warning... */
+ int is_normal_sched;
ERTS_MSACC_DECLARE_CACHE();
@@ -9299,25 +9508,45 @@ Process *schedule(Process *p, int calls)
*/
if (!p) { /* NULL in the very first schedule() call */
esdp = erts_get_scheduler_data();
+ is_normal_sched = !ERTS_SCHEDULER_IS_DIRTY(esdp);
rq = erts_get_runq_current(esdp);
ASSERT(esdp);
fcalls = (int) erts_smp_atomic32_read_acqb(&function_calls);
actual_reds = reds = 0;
erts_smp_runq_lock(rq);
} else {
- sched_out_proc:
-
#ifdef ERTS_SMP
- ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+#ifdef ERTS_DIRTY_SCHEDULERS
esdp = p->scheduler_data;
+ is_normal_sched = esdp != NULL;
+ if (is_normal_sched)
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
+ else {
+ esdp = erts_get_scheduler_data();
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp));
+ }
+#else
+ esdp = p->scheduler_data;
+ is_normal_sched = 1;
+#endif
ASSERT(esdp->current_process == p
|| esdp->free_process == p);
#else
esdp = erts_scheduler_data;
ASSERT(esdp->current_process == p);
+ is_normal_sched = 1;
#endif
- reds = actual_reds = calls - esdp->virtual_reds;
+ sched_out_proc:
+
+ ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
+
+ if (is_normal_sched)
+ reds = actual_reds = calls - esdp->virtual_reds;
+ else
+ reds = actual_reds = get_dirty_reds(esdp, p);
+
+ ASSERT(actual_reds >= 0);
if (reds < ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST)
reds = ERTS_PROC_MIN_CONTEXT_SWITCH_REDS_COST;
esdp->virtual_reds = 0;
@@ -9363,7 +9592,7 @@ Process *schedule(Process *p, int calls)
state = erts_smp_atomic32_read_nob(&p->state);
#ifdef ERTS_SMP
- if (state & ERTS_PSFLG_PENDING_EXIT)
+ if (is_normal_sched && (state & ERTS_PSFLG_PENDING_EXIT))
erts_handle_pending_exit(p, (ERTS_PROC_LOCK_MAIN
| ERTS_PROC_LOCK_STATUS));
if (p->pending_suspenders)
@@ -9373,7 +9602,8 @@ Process *schedule(Process *p, int calls)
esdp->reductions += reds;
- schedule_out_process(rq, state, p, proxy_p); /* Returns with rq locked! */
+ /* schedule_out_process() returns with rq locked! */
+ schedule_out_process(rq, state, p, proxy_p, is_normal_sched);
proxy_p = NULL;
ERTS_PROC_REDUCTIONS_EXECUTED(esdp, rq,
@@ -9391,13 +9621,20 @@ Process *schedule(Process *p, int calls)
ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_OTHER);
if (state & ERTS_PSFLG_FREE) {
+ if (!is_normal_sched) {
+ ASSERT(p->flags & F_DELAYED_DEL_PROC);
+ erts_proc_dec_refc(p);
+ }
+ else {
#ifdef ERTS_SMP
- ASSERT(esdp->free_process == p);
- esdp->free_process = NULL;
+ ASSERT(esdp->free_process == p);
+ esdp->free_process = NULL;
#else
- erts_proc_dec_refc(p);
+ erts_proc_dec_refc(p);
#endif
+ }
}
+
#ifdef ERTS_SMP
ASSERT(!esdp->free_process);
#endif
@@ -9405,7 +9642,7 @@ Process *schedule(Process *p, int calls)
ERTS_SMP_CHK_NO_PROC_LOCKS;
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ if (is_normal_sched) {
if (esdp->check_time_reds >= ERTS_CHECK_TIME_REDS)
(void) erts_get_monotonic_time(esdp);
@@ -9419,23 +9656,15 @@ Process *schedule(Process *p, int calls)
}
- ERTS_SMP_LC_ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp)
- || !erts_thr_progress_is_blocking());
+ ERTS_SMP_LC_ASSERT(!is_normal_sched || !erts_thr_progress_is_blocking());
check_activities_to_run: {
+ erts_aint32_t psflg_running, psflg_running_sys;
#ifdef ERTS_SMP
ErtsMigrationPaths *mps;
ErtsMigrationPath *mp;
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- ErtsProcList *pnd_xtrs = rq->procs.pending_exiters;
- if (erts_proclist_fetch(&pnd_xtrs, NULL)) {
- rq->procs.pending_exiters = NULL;
- erts_smp_runq_unlock(rq);
- handle_pending_exiters(pnd_xtrs);
- erts_smp_runq_lock(rq);
- }
-
+ if (is_normal_sched) {
if (rq->check_balance_reds <= 0)
check_balance(rq);
@@ -9452,32 +9681,35 @@ Process *schedule(Process *p, int calls)
continue_check_activities_to_run:
flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
continue_check_activities_to_run_known_flags:
- ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp)
- || flags & ERTS_RUNQ_FLG_NONEMPTY);
+ ASSERT(!is_normal_sched || (flags & ERTS_RUNQ_FLG_NONEMPTY));
- if (flags & (ERTS_RUNQ_FLG_CHK_CPU_BIND|ERTS_RUNQ_FLG_SUSPENDED)) {
- if (flags & ERTS_RUNQ_FLG_SUSPENDED) {
- (void) ERTS_RUNQ_FLGS_UNSET_NOB(rq, ERTS_RUNQ_FLG_EXEC);
+ if (!is_normal_sched) {
+ if (erts_smp_atomic32_read_acqb(&esdp->ssi->flags)
+ & ERTS_SSI_FLG_SUSPENDED) {
suspend_scheduler(esdp);
- flags = ERTS_RUNQ_FLGS_SET_NOB(rq, ERTS_RUNQ_FLG_EXEC);
- flags |= ERTS_RUNQ_FLG_EXEC;
- }
- if (flags & ERTS_RUNQ_FLG_CHK_CPU_BIND) {
- flags = ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_CHK_CPU_BIND);
- flags &= ~ ERTS_RUNQ_FLG_CHK_CPU_BIND;
- erts_sched_check_cpu_bind(esdp);
}
}
-#ifdef ERTS_DIRTY_SCHEDULERS
- else if (ERTS_SCHEDULER_IS_DIRTY(esdp)
- && (erts_smp_atomic32_read_acqb(&esdp->ssi->flags)
- & ERTS_SSI_FLG_SUSPENDED))
- suspend_scheduler(esdp);
-#endif
-
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
+ else {
erts_aint32_t aux_work;
- int leader_update = erts_thr_progress_update(esdp);
+ int leader_update;
+
+ ASSERT(is_normal_sched);
+
+ if (flags & (ERTS_RUNQ_FLG_CHK_CPU_BIND|ERTS_RUNQ_FLG_SUSPENDED)) {
+ if (flags & ERTS_RUNQ_FLG_SUSPENDED) {
+ (void) ERTS_RUNQ_FLGS_UNSET_NOB(rq, ERTS_RUNQ_FLG_EXEC);
+ suspend_scheduler(esdp);
+ flags = ERTS_RUNQ_FLGS_SET_NOB(rq, ERTS_RUNQ_FLG_EXEC);
+ flags |= ERTS_RUNQ_FLG_EXEC;
+ }
+ if (flags & ERTS_RUNQ_FLG_CHK_CPU_BIND) {
+ flags = ERTS_RUNQ_FLGS_UNSET(rq, ERTS_RUNQ_FLG_CHK_CPU_BIND);
+ flags &= ~ERTS_RUNQ_FLG_CHK_CPU_BIND;
+ erts_sched_check_cpu_bind(esdp);
+ }
+ }
+
+ leader_update = erts_thr_progress_update(esdp);
aux_work = erts_atomic32_read_acqb(&esdp->ssi->aux_work);
if (aux_work | leader_update) {
erts_smp_runq_unlock(rq);
@@ -9503,19 +9735,13 @@ Process *schedule(Process *p, int calls)
flags = ERTS_RUNQ_FLGS_GET_NOB(rq);
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && rq->halt_in_progress) {
- /*
- * TODO: if halt in progress, need to put the dirty scheduler
- * to sleep somewhere around here to prevent it from picking up
- * new work
- */
+ if (!is_normal_sched && rq->halt_in_progress) {
+ /* Wait for emulator to terminate... */
+ while (1)
+ erts_milli_sleep(1000*1000);
}
- else
-#endif
-
- if ((!(flags & ERTS_RUNQ_FLGS_QMASK) && !rq->misc.start)
- || (rq->halt_in_progress && ERTS_EMPTY_RUNQ_PORTS(rq))) {
+ else if ((!(flags & ERTS_RUNQ_FLGS_QMASK) && !rq->misc.start)
+ || (rq->halt_in_progress && ERTS_EMPTY_RUNQ_PORTS(rq))) {
/* Prepare for scheduler wait */
#ifdef ERTS_SMP
ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(rq));
@@ -9529,7 +9755,7 @@ Process *schedule(Process *p, int calls)
if (flags & ERTS_RUNQ_FLG_INACTIVE)
empty_runq(rq);
else {
- if (!ERTS_RUNQ_IX_IS_DIRTY(rq->ix) && try_steal_task(rq))
+ if (is_normal_sched && try_steal_task(rq))
goto continue_check_activities_to_run;
empty_runq(rq);
@@ -9558,9 +9784,9 @@ Process *schedule(Process *p, int calls)
goto check_activities_to_run;
}
- else if (!ERTS_SCHEDULER_IS_DIRTY(esdp) &&
- (fcalls > input_reductions &&
- prepare_for_sys_schedule(!0))) {
+ else if (is_normal_sched
+ && (fcalls > input_reductions
+ && prepare_for_sys_schedule(!0))) {
ErtsMonotonicTime current_time;
/*
* Schedule system-level activities.
@@ -9674,11 +9900,17 @@ Process *schedule(Process *p, int calls)
ASSERT(p); /* Wrong qmask in rq->flags? */
- if (ERTS_SCHEDULER_IS_DIRTY(esdp))
- psflg_band_mask = ~((erts_aint32_t) 0);
- else
+ if (is_normal_sched) {
+ psflg_running = ERTS_PSFLG_RUNNING;
+ psflg_running_sys = ERTS_PSFLG_RUNNING_SYS;
psflg_band_mask = ~(((erts_aint32_t) 1) << (ERTS_PSFLGS_GET_PRQ_PRIO(state)
+ ERTS_PSFLGS_IN_PRQ_MASK_OFFSET));
+ }
+ else {
+ psflg_running = ERTS_PSFLG_DIRTY_RUNNING;
+ psflg_running_sys = ERTS_PSFLG_DIRTY_RUNNING_SYS;
+ psflg_band_mask = ~((erts_aint32_t) 0);
+ }
if (!(state & ERTS_PSFLG_PROXY))
psflg_band_mask &= ~ERTS_PSFLG_IN_RUNQ;
@@ -9693,34 +9925,53 @@ Process *schedule(Process *p, int calls)
state = erts_smp_atomic32_read_nob(&p->state);
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (!is_normal_sched)
+ clear_proc_dirty_queue_bit(p, rq, qbit);
+#endif
+
while (1) {
- erts_aint32_t exp, new, tmp;
- tmp = new = exp = state;
+ erts_aint32_t exp, new;
+ int run_process;
+ new = exp = state;
new &= psflg_band_mask;
- if (!(state & (ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_RUNNING_SYS))) {
- tmp = state & (ERTS_PSFLG_SUSPENDED
- | ERTS_PSFLG_PENDING_EXIT
- | ERTS_PSFLG_ACTIVE_SYS
- | ERTS_PSFLG_DIRTY_ACTIVE_SYS);
- if (tmp != ERTS_PSFLG_SUSPENDED) {
- if (state & (ERTS_PSFLG_ACTIVE_SYS
- | ERTS_PSFLG_DIRTY_ACTIVE_SYS))
- new |= ERTS_PSFLG_RUNNING_SYS;
- else
- new |= ERTS_PSFLG_RUNNING;
- }
+ /*
+ * Run process if not already running (or free)
+ * or exiting and not running on a normal
+ * scheduler, and not suspended (and not in a
+ * state where suspend should be ignored).
+ */
+ run_process = (((!(state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS
+ | ERTS_PSFLG_FREE)))
+#ifdef ERTS_DIRTY_SCHEDULERS
+ | (((state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_FREE
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_EXITING))
+ == ERTS_PSFLG_EXITING)
+ & (!!is_normal_sched))
+#endif
+ )
+ & ((state & (ERTS_PSFLG_SUSPENDED
+ | ERTS_PSFLG_EXITING
+ | ERTS_PSFLG_FREE
+ | ERTS_PSFLG_PENDING_EXIT
+ | ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_DIRTY_ACTIVE_SYS))
+ != ERTS_PSFLG_SUSPENDED));
+ if (run_process) {
+ if (state & (ERTS_PSFLG_ACTIVE_SYS
+ | ERTS_PSFLG_DIRTY_ACTIVE_SYS))
+ new |= psflg_running_sys;
+ else
+ new |= psflg_running;
}
state = erts_smp_atomic32_cmpxchg_relb(&p->state, new, exp);
if (state == exp) {
- if ((state & (ERTS_PSFLG_RUNNING
- | ERTS_PSFLG_RUNNING_SYS
- | ERTS_PSFLG_FREE))
- || ((state & (ERTS_PSFLG_SUSPENDED
- | ERTS_PSFLG_PENDING_EXIT
- | ERTS_PSFLG_ACTIVE_SYS
- | ERTS_PSFLG_DIRTY_ACTIVE_SYS))
- == ERTS_PSFLG_SUSPENDED)) {
+ if (!run_process) {
if (proxy_p) {
free_proxy_proc(proxy_p);
proxy_p = NULL;
@@ -9740,41 +9991,20 @@ Process *schedule(Process *p, int calls)
esdp->current_process = p;
-
+ calls = 0;
reds = context_reds;
#ifdef ERTS_SMP
erts_smp_runq_unlock(rq);
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (ERTS_SCHEDULER_IS_DIRTY(esdp)) {
-#ifdef DEBUG
- int old_dqbit;
-#endif
- int dqbit = qbit;
-
- if (rq == ERTS_DIRTY_CPU_RUNQ)
- dqbit <<= ERTS_PDSFLGS_IN_CPU_PRQ_MASK_OFFSET;
- else {
- ASSERT(rq == ERTS_DIRTY_IO_RUNQ);
- dqbit <<= ERTS_PDSFLGS_IN_IO_PRQ_MASK_OFFSET;
- }
-
-#ifdef DEBUG
- old_dqbit = (int)
-#else
- (void)
-#endif
- erts_smp_atomic32_read_band_mb(&p->dirty_state, ~dqbit);
- ASSERT(old_dqbit & dqbit);
- }
-#endif /* ERTS_DIRTY_SCHEDULERS */
-
#endif /* ERTS_SMP */
}
+ if (!is_normal_sched)
+ save_dirty_start(esdp, p);
+
#ifdef ERTS_SMP
if (flags & ERTS_RUNQ_FLG_PROTECTED)
@@ -9791,9 +10021,7 @@ Process *schedule(Process *p, int calls)
UWord old = ERTS_PROC_SCHED_ID(p, (UWord) esdp->no);
int migrated = old && old != esdp->no;
-#ifdef ERTS_DIRTY_SCHEDULERS
- ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
-#endif
+ ASSERT(is_normal_sched);
prio = (int) ERTS_PSFLGS_GET_USR_PRIO(state);
@@ -9807,22 +10035,21 @@ Process *schedule(Process *p, int calls)
erts_smp_spin_unlock(&erts_sched_stat.lock);
}
- ASSERT(!p->scheduler_data);
- p->scheduler_data = esdp;
-
state = erts_smp_atomic32_read_nob(&p->state);
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (!ERTS_SCHEDULER_IS_DIRTY(esdp)) {
- if (!!(state & ERTS_PSFLGS_DIRTY_WORK)
- & !(state & ERTS_PSFLG_ACTIVE_SYS)) {
+ ASSERT(!p->scheduler_data);
+#ifndef ERTS_DIRTY_SCHEDULERS
+ p->scheduler_data = esdp;
+#else /* ERTS_DIRTY_SCHEDULERS */
+ if (is_normal_sched) {
+ if ((!!(state & ERTS_PSFLGS_DIRTY_WORK))
+ & (!(state & ERTS_PSFLG_ACTIVE_SYS))) {
/* Migrate to dirty scheduler... */
sunlock_sched_out_proc:
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
- p->fcalls = reds;
goto sched_out_proc;
-
}
+ p->scheduler_data = esdp;
}
else {
if (state & (ERTS_PSFLG_ACTIVE_SYS
@@ -9856,11 +10083,12 @@ Process *schedule(Process *p, int calls)
#endif /* ERTS_SMP */
- p->fcalls = reds;
-
erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
- if (IS_TRACED(p)) {
+ /* Clear tracer if it has been removed */
+ if (IS_TRACED(p) && erts_is_tracer_proc_enabled(
+ p, ERTS_PROC_LOCK_MAIN, &p->common)) {
+
if (state & ERTS_PSFLG_EXITING) {
if (ARE_TRACE_FLAGS_ON(p, F_TRACE_SCHED_EXIT))
trace_sched(p, ERTS_PROC_LOCK_MAIN, am_in_exiting);
@@ -9875,13 +10103,8 @@ Process *schedule(Process *p, int calls)
}
}
-
-#ifdef ERTS_SMP
- /* Clears tracer if it has been removed */
- (void)ERTS_TRACER_PROC_IS_ENABLED(p);
-#endif
-
- if (state & ERTS_PSFLG_RUNNING_SYS) {
+ if (state & (ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
/*
* GC is normally never delayed when a process
* is scheduled out, but might be when executing
@@ -9890,32 +10113,34 @@ Process *schedule(Process *p, int calls)
* not allowed to execute system tasks.
*/
if (!(p->flags & F_DELAY_GC)) {
- reds -= execute_sys_tasks(p, &state, reds);
+ int cost = execute_sys_tasks(p, &state, reds);
+ calls += cost;
+ reds -= cost;
if (reds <= 0
#ifdef ERTS_DIRTY_SCHEDULERS
- || ERTS_SCHEDULER_IS_DIRTY(esdp)
+ || !is_normal_sched
|| (state & ERTS_PSFLGS_DIRTY_WORK)
#endif
) {
- p->fcalls = reds;
goto sched_out_proc;
}
}
- ASSERT(state & ERTS_PSFLG_RUNNING_SYS);
- ASSERT(!(state & ERTS_PSFLG_RUNNING));
+ ASSERT(state & psflg_running_sys);
+ ASSERT(!(state & psflg_running));
while (1) {
erts_aint32_t n, e;
if (((state & (ERTS_PSFLG_SUSPENDED
| ERTS_PSFLG_ACTIVE)) != ERTS_PSFLG_ACTIVE)
- && !(state & ERTS_PSFLG_EXITING))
+ && !(state & ERTS_PSFLG_EXITING)) {
goto sched_out_proc;
+ }
n = e = state;
- n &= ~ERTS_PSFLG_RUNNING_SYS;
- n |= ERTS_PSFLG_RUNNING;
+ n &= ~psflg_running_sys;
+ n |= psflg_running;
state = erts_smp_atomic32_cmpxchg_mb(&p->state, n, e);
if (state == e) {
@@ -9923,18 +10148,18 @@ Process *schedule(Process *p, int calls)
break;
}
- ASSERT(state & ERTS_PSFLG_RUNNING_SYS);
- ASSERT(!(state & ERTS_PSFLG_RUNNING));
+ ASSERT(state & psflg_running_sys);
+ ASSERT(!(state & psflg_running));
}
}
if (ERTS_IS_GC_DESIRED(p)) {
if (!(state & ERTS_PSFLG_EXITING) && !(p->flags & (F_DELAY_GC|F_DISABLE_GC))) {
- reds -= erts_garbage_collect_nobump(p, 0, p->arg_reg, p->arity);
- if (reds <= 0) {
- p->fcalls = reds;
+ int cost = scheduler_gc_proc(p, reds);
+ calls += cost;
+ reds -= cost;
+ if (reds <= 0)
goto sched_out_proc;
- }
}
}
@@ -9942,6 +10167,8 @@ Process *schedule(Process *p, int calls)
free_proxy_proc(proxy_p);
proxy_p = NULL;
}
+
+ p->fcalls = reds;
ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);
@@ -10010,7 +10237,7 @@ notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st, Eterm st_result)
ASSERT(hp_start + hsz == hp);
#endif
- erts_queue_message(rp, &rp_locks, mp, msg);
+ erts_queue_message(rp, rp_locks, mp, msg, c_p->common.id);
if (c_p == rp)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
@@ -10211,21 +10438,24 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
else {
if (!garbage_collected) {
FLAGS(c_p) |= F_NEED_FULLSWEEP;
- reds -= erts_garbage_collect_nobump(c_p,
- 0,
- c_p->arg_reg,
- c_p->arity);
+ reds -= scheduler_gc_proc(c_p, reds);
garbage_collected = 1;
}
st_res = am_true;
}
break;
case ERTS_PSTT_CPC: {
+ int fcalls;
int cpc_reds = 0;
+ if (!ERTS_PROC_GET_SAVED_CALLS_BUF(c_p))
+ fcalls = reds;
+ else
+ fcalls = reds - CONTEXT_REDS;
st_res = erts_check_process_code(c_p,
st->arg[0],
unsigned_val(st->arg[1]),
- &cpc_reds);
+ &cpc_reds,
+ fcalls);
reds -= cpc_reds;
if (is_non_value(st_res)) {
/* Needed gc, but gc was disabled */
@@ -10257,6 +10487,9 @@ execute_sys_tasks(Process *c_p, erts_aint32_t *statep, int in_reds)
*statep = state;
+ if (in_reds < reds)
+ return in_reds;
+
return in_reds - reds;
}
@@ -10505,7 +10738,10 @@ save_gc_task(Process *c_p, ErtsProcSysTask *st, int prio)
}
state = erts_smp_atomic32_read_nob(&c_p->state);
- ASSERT((ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS) & state);
+ ASSERT((ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS) & state);
while (!(state & ERTS_PSFLG_DELAYED_SYS)
|| prio < ERTS_PSFLGS_GET_ACT_PRIO(state)) {
@@ -10830,6 +11066,8 @@ erts_get_exact_total_reductions(Process *c_p, Uint *redsp, Uint *diffp)
erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
}
+static void delete_process(Process* p);
+
void
erts_free_proc(Process *p)
{
@@ -10838,6 +11076,8 @@ erts_free_proc(Process *p)
#endif
ASSERT(erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_FREE);
ASSERT(0 == erts_proc_read_refc(p));
+ if (p->flags & F_DELAYED_DEL_PROC)
+ delete_process(p);
erts_free(ERTS_ALC_T_PROC, (void *) p);
}
@@ -11001,9 +11241,13 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->min_heap_size = so->min_heap_size;
p->min_vheap_size = so->min_vheap_size;
p->max_gen_gcs = so->max_gen_gcs;
+ MAX_HEAP_SIZE_SET(p, so->max_heap_size);
+ MAX_HEAP_SIZE_FLAGS_SET(p, so->max_heap_flags);
} else {
p->min_heap_size = H_MIN_SIZE;
p->min_vheap_size = BIN_VH_MIN_SIZE;
+ MAX_HEAP_SIZE_SET(p, H_MAX_SIZE);
+ MAX_HEAP_SIZE_FLAGS_SET(p, H_MAX_FLAGS);
p->max_gen_gcs = (Uint16) erts_smp_atomic32_read_nob(&erts_max_gen_gcs);
}
p->schedule_count = 0;
@@ -11466,24 +11710,44 @@ erts_cleanup_empty_process(Process* p)
#endif
}
-/*
- * p must be the currently executing process.
- */
static void
delete_process(Process* p)
{
Eterm *heap;
ErtsPSD *psd;
+ struct saved_calls *scb;
+ process_breakpoint_time_t *pbt;
+ void *nif_export;
+
VERBOSE(DEBUG_PROCESSES, ("Removing process: %T\n",p->common.id));
VERBOSE(DEBUG_SHCOPY, ("[pid=%T] delete process: %p %p %p %p\n", p->common.id,
HEAP_START(p), HEAP_END(p), OLD_HEAP(p), OLD_HEND(p)));
+ scb = ERTS_PROC_SET_SAVED_CALLS_BUF(p, NULL);
+
+ if (scb) {
+ p->fcalls += CONTEXT_REDS; /* Reduction counting depends on this... */
+ erts_free(ERTS_ALC_T_CALLS_BUF, (void *) scb);
+ }
+
+ pbt = ERTS_PROC_SET_CALL_TIME(p, NULL);
+ if (pbt)
+ erts_free(ERTS_ALC_T_BPD, (void *) pbt);
+
+ nif_export = ERTS_PROC_SET_NIF_TRAP_EXPORT(p, NULL);
+ if (nif_export)
+ erts_destroy_nif_export(nif_export);
+
+ clean_dirty_start(p);
+
/* Cleanup psd */
psd = (ErtsPSD *) erts_smp_atomic_read_nob(&p->psd);
- if (psd)
+ if (psd) {
+ erts_smp_atomic_set_nob(&p->psd, (erts_aint_t) NULL); /* Reduction counting depends on this... */
erts_free(ERTS_ALC_T_PSD, psd);
+ }
/* Clean binaries and funs */
erts_cleanup_offheap(&p->off_heap);
@@ -11578,7 +11842,8 @@ set_proc_exiting(Process *p,
p->i = (BeamInstr *) beam_exit;
#ifndef ERTS_SMP
- if (state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)) {
+ if (state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS)
+ && !(state & ERTS_PSFLG_GC)) {
/*
* I non smp case:
*
@@ -11607,7 +11872,10 @@ set_proc_self_exiting(Process *c_p)
ERTS_SMP_LC_ASSERT(erts_proc_lc_my_proc_locks(c_p) == ERTS_PROC_LOCKS_ALL);
state = erts_smp_atomic32_read_nob(&c_p->state);
- ASSERT(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS));
+ ASSERT(state & (ERTS_PSFLG_RUNNING
+ |ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS));
#ifdef DEBUG
enqueue =
@@ -11657,51 +11925,73 @@ erts_handle_pending_exit(Process *c_p, ErtsProcLocks locks)
erts_smp_proc_unlock(c_p, xlocks);
}
+static void save_pending_exiter(Process *p, ErtsProcList *plp);
+
static void
-handle_pending_exiters(ErtsProcList *pnd_xtrs)
+do_handle_pending_exiters(ErtsProcList *pnd_xtrs)
{
/* 'list' is expected to have been fetched (i.e. not a ring anymore) */
ErtsProcList *plp = pnd_xtrs;
while (plp) {
- ErtsProcList *free_plp;
- Process *p = erts_pid2proc(NULL, 0, plp->pid, ERTS_PROC_LOCKS_ALL);
+ ErtsProcList *next_plp = plp->next;
+ Process *p = erts_proc_lookup(plp->pid);
if (p) {
- if (erts_proclist_same(plp, p)) {
- erts_aint32_t state = erts_smp_atomic32_read_acqb(&p->state);
- if (!(state & (ERTS_PSFLG_RUNNING|ERTS_PSFLG_RUNNING_SYS))) {
- ASSERT(state & ERTS_PSFLG_PENDING_EXIT);
- erts_handle_pending_exit(p, ERTS_PROC_LOCKS_ALL);
+ erts_aint32_t state;
+ /*
+ * If the process is running on a normal scheduler, the
+ * pending exit will soon be detected and handled by the
+ * scheduler running the process (at schedule in/out).
+ */
+ if (erts_smp_proc_trylock(p, ERTS_PROC_LOCKS_ALL) != EBUSY) {
+ if (erts_proclist_same(plp, p)) {
+ state = erts_smp_atomic32_read_acqb(&p->state);
+ if (!(state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_EXITING))) {
+ ASSERT(state & ERTS_PSFLG_PENDING_EXIT);
+ erts_handle_pending_exit(p, ERTS_PROC_LOCKS_ALL);
+ }
+ }
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
+ }
+ else {
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+ if (erts_proclist_same(plp, p)) {
+ state = erts_smp_atomic32_read_acqb(&p->state);
+ if (!(state & (ERTS_PSFLG_RUNNING
+ | ERTS_PSFLG_RUNNING_SYS
+ | ERTS_PSFLG_EXITING))) {
+ /*
+ * Save process and try to acquire all
+ * locks at a later time...
+ */
+ save_pending_exiter(p, plp);
+ plp = NULL;
+ }
}
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
}
- erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
}
- free_plp = plp;
- plp = plp->next;
- proclist_destroy(free_plp);
+ if (plp)
+ proclist_destroy(plp);
+ plp = next_plp;
}
}
static void
-save_pending_exiter(Process *p)
+save_pending_exiter(Process *p, ErtsProcList *plp)
{
- ErtsProcList *plp;
+ ErtsSchedulerSleepInfo *ssi;
ErtsRunQueue *rq;
- ErtsSchedulerData *esdp = erts_get_scheduler_data();
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
- if (!esdp)
- rq = RUNQ_READ_RQ(&p->run_queue);
- else
- rq = esdp->run_queue;
+ rq = RUNQ_READ_RQ(&p->run_queue);
+ ASSERT(rq && !ERTS_RUNQ_IX_IS_DIRTY(rq->ix));
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (ERTS_RUNQ_IX_IS_DIRTY(rq->ix))
- rq = ERTS_RUNQ_IX(0); /* Handle on ordinary scheduler */
-#endif
-
- plp = proclist_create(p);
+ if (!plp)
+ plp = proclist_create(p);
erts_smp_runq_lock(rq);
@@ -11709,9 +11999,11 @@ save_pending_exiter(Process *p)
non_empty_runq(rq);
+ ssi = rq->scheduler->ssi;
+
erts_smp_runq_unlock(rq);
- wake_scheduler(rq);
+ set_aux_work_flags_wakeup_nob(ssi, ERTS_SSI_AUX_WORK_PENDING_EXITERS);
}
#endif
@@ -11744,7 +12036,7 @@ send_exit_message(Process *to, ErtsProcLocks *to_locksp,
mp = erts_alloc_message_heap(to, to_locksp, term_size, &hp, &ohp);
mess = copy_struct(exit_term, term_size, &hp, ohp);
#endif
- erts_queue_message(to, to_locksp, mp, mess);
+ erts_queue_message(to, *to_locksp, mp, mess, am_system);
} else {
Eterm temp_token;
Uint sz_token;
@@ -11765,7 +12057,7 @@ send_exit_message(Process *to, ErtsProcLocks *to_locksp,
seq_trace_output(token, mess, SEQ_TRACE_SEND, to->common.id, to);
temp_token = copy_struct(token, sz_token, &hp, ohp);
ERL_MESSAGE_TOKEN(mp) = temp_token;
- erts_queue_message(to, to_locksp, mp, mess);
+ erts_queue_message(to, *to_locksp, mp, mess, am_system);
}
}
@@ -11911,7 +12203,7 @@ send_exit_signal(Process *c_p, /* current process if and only
if (need_locks
&& erts_smp_proc_trylock(rp, need_locks) == EBUSY) {
/* ... but we havn't got all locks on it ... */
- save_pending_exiter(rp);
+ save_pending_exiter(rp, NULL);
/*
* The pending exit will be discovered when next
* process is scheduled in
@@ -12379,10 +12671,8 @@ erts_continue_exit_process(Process *p)
ErtsProcLocks curr_locks = ERTS_PROC_LOCK_MAIN;
Eterm reason = p->fvalue;
DistEntry *dep;
- struct saved_calls *scb;
- process_breakpoint_time_t *pbt;
erts_aint32_t state;
- void *nif_export;
+ int delay_del_proc = 0;
#ifdef DEBUG
int yield_allowed = 1;
@@ -12525,7 +12815,7 @@ erts_continue_exit_process(Process *p)
{
/* Do *not* use erts_get_runq_proc() */
ErtsRunQueue *rq;
- rq = erts_get_runq_current(ERTS_GET_SCHEDULER_DATA_FROM_PROC(p));
+ rq = erts_get_runq_current(erts_proc_sched_data(p));
erts_smp_runq_lock(rq);
@@ -12571,14 +12861,24 @@ erts_continue_exit_process(Process *p)
break;
}
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (a & (ERTS_PSFLG_DIRTY_RUNNING
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)) {
+ p->flags |= F_DELAYED_DEL_PROC;
+ delay_del_proc = 1;
+ /*
+ * The dirty scheduler will also decrease
+ * refc when done...
+ */
+ erts_proc_inc_refc(p);
+ }
+#endif
+
if (refc_inced && !(n & ERTS_PSFLG_IN_RUNQ))
erts_proc_dec_refc(p);
}
dep = (p->flags & F_DISTRIBUTION) ? erts_this_dist_entry : NULL;
- scb = ERTS_PROC_SET_SAVED_CALLS_BUF(p, NULL);
- pbt = ERTS_PROC_SET_CALL_TIME(p, NULL);
- nif_export = ERTS_PROC_SET_NIF_TRAP_EXPORT(p, NULL);
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
#ifdef BM_COUNTERS
@@ -12618,22 +12918,14 @@ erts_continue_exit_process(Process *p)
have none here */
}
- if (scb)
- erts_free(ERTS_ALC_T_CALLS_BUF, (void *) scb);
-
- if (pbt)
- erts_free(ERTS_ALC_T_BPD, (void *) pbt);
-
- if (nif_export)
- erts_destroy_nif_export(nif_export);
-
#ifdef ERTS_SMP
erts_flush_trace_messages(p, 0);
#endif
ERTS_TRACER_CLEAR(&ERTS_TRACER(p));
- delete_process(p);
+ if (!delay_del_proc)
+ delete_process(p);
#ifdef ERTS_SMP
erts_smp_proc_lock(p, ERTS_PROC_LOCK_MAIN);
@@ -12663,6 +12955,7 @@ erts_continue_exit_process(Process *p)
ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p));
+ BUMP_ALL_REDS(p);
}
/*
@@ -12982,11 +13275,13 @@ void erts_halt(int code)
int
erts_dbg_check_halloc_lock(Process *p)
{
+ ErtsSchedulerData *esdp;
if (ERTS_PROC_LOCK_MAIN & erts_proc_lc_my_proc_locks(p))
return 1;
if (p->common.id == ERTS_INVALID_PID)
return 1;
- if (p->scheduler_data && p == p->scheduler_data->match_pseudo_process)
+ esdp = erts_proc_sched_data(p);
+ if (esdp && p == esdp->match_pseudo_process)
return 1;
if (erts_thr_progress_is_blocking())
return 1;
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index 0f69ddeed8..b44ac442aa 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -304,6 +304,7 @@ typedef enum {
ERTS_SSI_AUX_WORK_ASYNC_READY_CLEAN_IX,
ERTS_SSI_AUX_WORK_MISC_THR_PRGR_IX,
ERTS_SSI_AUX_WORK_MISC_IX,
+ ERTS_SSI_AUX_WORK_PENDING_EXITERS_IX,
ERTS_SSI_AUX_WORK_SET_TMO_IX,
ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK_IX,
ERTS_SSI_AUX_WORK_REAP_PORTS_IX,
@@ -336,6 +337,8 @@ typedef enum {
(((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_MISC_THR_PRGR_IX)
#define ERTS_SSI_AUX_WORK_MISC \
(((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_MISC_IX)
+#define ERTS_SSI_AUX_WORK_PENDING_EXITERS \
+ (((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_PENDING_EXITERS_IX)
#define ERTS_SSI_AUX_WORK_SET_TMO \
(((erts_aint32_t) 1) << ERTS_SSI_AUX_WORK_SET_TMO_IX)
#define ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK \
@@ -645,6 +648,7 @@ struct ErtsSchedulerData_ {
Uint no; /* Scheduler number for normal schedulers */
#ifdef ERTS_DIRTY_SCHEDULERS
ErtsDirtySchedId dirty_no; /* Scheduler number for dirty schedulers */
+ Process *dirty_shadow_process;
#endif
Port *current_port;
ErtsRunQueue *run_queue;
@@ -805,8 +809,27 @@ erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi)
#define ERTS_PSD_CALL_TIME_BP 3
#define ERTS_PSD_DELAYED_GC_TASK_QS 4
#define ERTS_PSD_NIF_TRAP_EXPORT 5
-
-#define ERTS_PSD_SIZE 6
+#define ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF 6
+#define ERTS_PSD_DIRTY_CPU_START 7
+
+#define ERTS_PSD_SIZE 8
+
+#if !defined(HIPE) && !defined(ERTS_DIRTY_SCHEDULERS)
+# undef ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF
+# undef ERTS_PSD_DIRTY_CPU_START
+# undef ERTS_PSD_SIZE
+# define ERTS_PSD_SIZE 6
+#elif !defined(HIPE)
+# undef ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF
+# undef ERTS_PSD_DIRTY_CPU_START
+# undef ERTS_PSD_SIZE
+# define ERTS_PSD_DIRTY_CPU_START 6
+# define ERTS_PSD_SIZE 7
+#elif !defined(ERTS_DIRTY_SCHEDULERS)
+# undef ERTS_PSD_DIRTY_CPU_START
+# undef ERTS_PSD_SIZE
+# define ERTS_PSD_SIZE 7
+#endif
typedef struct {
void *data[ERTS_PSD_SIZE];
@@ -911,6 +934,15 @@ struct ErtsPendingSuspend_ {
# define BIN_OLD_VHEAP_SZ(p) (p)->bin_old_vheap_sz
# define BIN_OLD_VHEAP(p) (p)->bin_old_vheap
+# define MAX_HEAP_SIZE_GET(p) ((p)->max_heap_size >> 2)
+# define MAX_HEAP_SIZE_SET(p, sz) ((p)->max_heap_size = ((sz) << 2) | \
+ MAX_HEAP_SIZE_FLAGS_GET(p))
+# define MAX_HEAP_SIZE_FLAGS_GET(p) ((p)->max_heap_size & 0x3)
+# define MAX_HEAP_SIZE_FLAGS_SET(p, flags) ((p)->max_heap_size = flags | \
+ ((p)->max_heap_size & ~0x3))
+# define MAX_HEAP_SIZE_KILL 1
+# define MAX_HEAP_SIZE_LOG 2
+
struct process {
ErtsPTabElementCommon common; /* *Need* to be first in struct */
@@ -1028,6 +1060,7 @@ struct process {
Eterm *old_hend; /* Heap pointers for generational GC. */
Eterm *old_htop;
Eterm *old_heap;
+ Uint max_heap_size; /* Maximum size of heap (in words). */
Uint16 gen_gcs; /* Number of (minor) generational GCs. */
Uint16 max_gen_gcs; /* Max minor gen GCs before fullsweep. */
ErlOffHeap off_heap; /* Off-heap data updated by copy_struct(). */
@@ -1080,6 +1113,10 @@ struct process {
Uint space_verified; /* Avoid HAlloc forcing heap fragments when */
Eterm* space_verified_from; /* we rely on available heap space (TestHeap) */
#endif
+
+#ifdef DEBUG
+ Uint debug_reds_in;
+#endif
};
extern const Process erts_invalid_process;
@@ -1172,7 +1209,10 @@ void erts_check_for_holes(Process* p);
#define ERTS_PSFLG_DIRTY_CPU_PROC ERTS_PSFLG_BIT(20)
#define ERTS_PSFLG_DIRTY_IO_PROC ERTS_PSFLG_BIT(21)
#define ERTS_PSFLG_DIRTY_ACTIVE_SYS ERTS_PSFLG_BIT(22)
-#define ERTS_PSFLG_MAX (ERTS_PSFLGS_ZERO_BIT_OFFSET + 22)
+#define ERTS_PSFLG_DIRTY_RUNNING ERTS_PSFLG_BIT(23)
+#define ERTS_PSFLG_DIRTY_RUNNING_SYS ERTS_PSFLG_BIT(24)
+
+#define ERTS_PSFLG_MAX (ERTS_PSFLGS_ZERO_BIT_OFFSET + 24)
#define ERTS_PSFLGS_DIRTY_WORK (ERTS_PSFLG_DIRTY_CPU_PROC \
| ERTS_PSFLG_DIRTY_IO_PROC \
@@ -1183,6 +1223,11 @@ void erts_check_for_holes(Process* p);
| ERTS_PSFLG_IN_PRQ_NORMAL \
| ERTS_PSFLG_IN_PRQ_LOW)
+#define ERTS_PSFLGS_VOLATILE_HEAP (ERTS_PSFLG_EXITING \
+ | ERTS_PSFLG_PENDING_EXIT \
+ | ERTS_PSFLG_DIRTY_RUNNING \
+ | ERTS_PSFLG_DIRTY_RUNNING_SYS)
+
#define ERTS_PSFLGS_GET_ACT_PRIO(PSFLGS) \
(((PSFLGS) >> ERTS_PSFLGS_ACT_PRIO_OFFSET) & ERTS_PSFLGS_PRIO_MASK)
#define ERTS_PSFLGS_GET_USR_PRIO(PSFLGS) \
@@ -1224,6 +1269,7 @@ void erts_check_for_holes(Process* p);
* Static flags that do not change after process creation.
*/
#define ERTS_STC_FLG_SYSTEM_PROC (((Uint32) 1) << 0)
+#define ERTS_STC_FLG_SHADOW_PROC (((Uint32) 1) << 1)
/* The sequential tracing token is a tuple of size 5:
*
@@ -1274,6 +1320,8 @@ typedef struct {
Uint min_vheap_size; /* Minimum virtual heap size */
int priority; /* Priority for process. */
Uint16 max_gen_gcs; /* Maximum number of gen GCs before fullsweep. */
+ Uint max_heap_size; /* Maximum heap size in words */
+ Uint max_heap_flags; /* Maximum heap flags (kill | log) */
int scheduler;
} ErlSpawnOpts;
@@ -1351,6 +1399,8 @@ extern int erts_system_profile_ts_type;
#define F_DELAY_GC (1 << 16) /* Similar to disable GC (see below) */
#define F_SCHDLR_ONLN_WAITQ (1 << 17) /* Process enqueued waiting to change schedulers online */
#define F_HAVE_BLCKD_NMSCHED (1 << 18) /* Process has blocked normal multi-scheduling */
+#define F_HIPE_MODE (1 << 19)
+#define F_DELAYED_DEL_PROC (1 << 20) /* Delay delete process (dirty proc exit case) */
/*
* F_DISABLE_GC and F_DELAY_GC are similar. Both will prevent
@@ -1771,7 +1821,8 @@ erts_aint32_t erts_set_aux_work_timeout(int, erts_aint32_t, int);
void erts_sched_notify_check_cpu_bind(void);
Uint erts_active_schedulers(void);
void erts_init_process(int, int, int);
-Eterm erts_process_status(Process *, ErtsProcLocks, Process *, Eterm);
+Eterm erts_process_state2status(erts_aint32_t);
+Eterm erts_process_status(Process *, Eterm);
Uint erts_run_queues_len(Uint *, int, int);
void erts_add_to_runq(Process *);
Eterm erts_bound_schedulers_term(Process *c_p);
@@ -1848,19 +1899,11 @@ int erts_debug_wait_completed(Process *c_p, int flags);
Uint erts_process_memory(Process *c_p, int incl_msg_inq);
-#ifdef ERTS_SMP
-# define ERTS_GET_SCHEDULER_DATA_FROM_PROC(PROC) ((PROC)->scheduler_data)
-# define ERTS_PROC_GET_SCHDATA(PROC) ((PROC)->scheduler_data)
-#else
-# define ERTS_GET_SCHEDULER_DATA_FROM_PROC(PROC) (erts_scheduler_data)
-# define ERTS_PROC_GET_SCHDATA(PROC) (erts_scheduler_data)
-#endif
-
#ifdef ERTS_DO_VERIFY_UNUSED_TEMP_ALLOC
# define ERTS_VERIFY_UNUSED_TEMP_ALLOC(P) \
do { \
ErtsSchedulerData *esdp__ = ((P) \
- ? ERTS_PROC_GET_SCHDATA((Process *) (P)) \
+ ? erts_proc_sched_data((Process *) (P)) \
: erts_get_scheduler_data()); \
if (esdp__ && !ERTS_SCHEDULER_IS_DIRTY(esdp__)) \
esdp__->verify_unused_temp_alloc( \
@@ -1953,12 +1996,15 @@ erts_psd_set(Process *p, int ix, void *data)
ErtsPSD *psd;
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
ErtsProcLocks locks = erts_proc_lc_my_proc_locks(p);
- if (ERTS_LC_PSD_ANY_LOCK == erts_psd_required_locks[ix].set_locks)
- ERTS_SMP_LC_ASSERT(locks || erts_thr_progress_is_blocking());
- else {
- locks &= erts_psd_required_locks[ix].set_locks;
- ERTS_SMP_LC_ASSERT(erts_psd_required_locks[ix].set_locks == locks
- || erts_thr_progress_is_blocking());
+ erts_aint32_t state = state = erts_smp_atomic32_read_nob(&p->state);
+ if (!(state & ERTS_PSFLG_FREE)) {
+ if (ERTS_LC_PSD_ANY_LOCK == erts_psd_required_locks[ix].set_locks)
+ ERTS_SMP_LC_ASSERT(locks || erts_thr_progress_is_blocking());
+ else {
+ locks &= erts_psd_required_locks[ix].set_locks;
+ ERTS_SMP_LC_ASSERT(erts_psd_required_locks[ix].set_locks == locks
+ || erts_thr_progress_is_blocking());
+ }
}
#endif
psd = (ErtsPSD *) erts_smp_atomic_read_nob(&p->psd);
@@ -2008,6 +2054,20 @@ erts_psd_set(Process *p, int ix, void *data)
#define ERTS_PROC_SET_NIF_TRAP_EXPORT(P, NTE) \
erts_psd_set((P), ERTS_PSD_NIF_TRAP_EXPORT, (void *) (NTE))
+#ifdef HIPE
+#define ERTS_PROC_GET_SUSPENDED_SAVED_CALLS_BUF(P) \
+ ((struct saved_calls *) erts_psd_get((P), ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF))
+#define ERTS_PROC_SET_SUSPENDED_SAVED_CALLS_BUF(P, SCB) \
+ ((struct saved_calls *) erts_psd_set((P), ERTS_PSD_SUSPENDED_SAVED_CALLS_BUF, (void *) (SCB)))
+#endif
+
+#ifdef ERTS_DIRTY_SCHEDULERS
+#define ERTS_PROC_GET_DIRTY_CPU_START(P) \
+ ((void *) erts_psd_get((P), ERTS_PSD_DIRTY_CPU_START))
+#define ERTS_PROC_SET_DIRTY_CPU_START(P, DCS) \
+ ((void *) erts_psd_set((P), ERTS_PSD_DIRTY_CPU_START, (void *) (DCS)))
+#endif
+
ERTS_GLB_INLINE Eterm erts_proc_get_error_handler(Process *p);
ERTS_GLB_INLINE Eterm erts_proc_set_error_handler(Process *p, Eterm handler);
@@ -2150,6 +2210,7 @@ erts_check_emigration_need(ErtsRunQueue *c_rq, int prio)
#endif
+ERTS_GLB_INLINE ErtsSchedulerData *erts_proc_sched_data(Process *c_p);
ERTS_GLB_INLINE int erts_is_scheduler_bound(ErtsSchedulerData *esdp);
ERTS_GLB_INLINE Process *erts_get_current_process(void);
ERTS_GLB_INLINE Eterm erts_get_current_pid(void);
@@ -2183,6 +2244,31 @@ ERTS_GLB_INLINE void erts_shrink_message_heap(ErtsMessage **msgpp, Process *pp,
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE
+ErtsSchedulerData *erts_proc_sched_data(Process *c_p)
+{
+ ErtsSchedulerData *esdp;
+ ASSERT(c_p);
+#if !defined(ERTS_SMP)
+ esdp = erts_get_scheduler_data();
+#else
+ esdp = c_p->scheduler_data;
+# if defined(ERTS_DIRTY_SCHEDULERS)
+ if (esdp) {
+ ASSERT(esdp == erts_get_scheduler_data());
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
+ }
+ else {
+ esdp = erts_get_scheduler_data();
+ ASSERT(esdp);
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp));
+ }
+# endif
+#endif
+ ASSERT(esdp);
+ return esdp;
+}
+
+ERTS_GLB_INLINE
int erts_is_scheduler_bound(ErtsSchedulerData *esdp)
{
if (!esdp)
@@ -2397,7 +2483,7 @@ ERTS_GLB_INLINE ErtsAtomCacheMap *
erts_get_atom_cache_map(Process *c_p)
{
ErtsSchedulerData *esdp = (c_p
- ? ERTS_PROC_GET_SCHDATA(c_p)
+ ? erts_proc_sched_data(c_p)
: erts_get_scheduler_data());
ASSERT(esdp);
return &esdp->atom_cache_map;
diff --git a/erts/emulator/beam/erl_process_dump.c b/erts/emulator/beam/erl_process_dump.c
index fa76773cac..eeaa9a569c 100644
--- a/erts/emulator/beam/erl_process_dump.c
+++ b/erts/emulator/beam/erl_process_dump.c
@@ -568,23 +568,21 @@ dump_externally(int to, void *to_arg, Eterm term)
}
}
-void erts_dump_process_state(int to, void *to_arg, erts_aint32_t psflg) {
- if (psflg & ERTS_PSFLG_FREE)
- erts_print(to, to_arg, "Non Existing\n"); /* Should never happen */
- else if (psflg & ERTS_PSFLG_EXITING)
- erts_print(to, to_arg, "Exiting\n");
- else if (psflg & ERTS_PSFLG_GC) {
- erts_print(to, to_arg, "Garbing\n");
- }
- else if (psflg & ERTS_PSFLG_SUSPENDED)
- erts_print(to, to_arg, "Suspended\n");
- else if (psflg & ERTS_PSFLG_RUNNING) {
- erts_print(to, to_arg, "Running\n");
+void erts_dump_process_state(int to, void *to_arg, erts_aint32_t psflg)
+{
+ char *s;
+ switch (erts_process_state2status(psflg)) {
+ case am_free: s = "Non Existing"; break; /* Should never happen */
+ case am_exiting: s = "Exiting"; break;
+ case am_garbage_collecting: s = "Garbing"; break;
+ case am_suspended: s = "Suspended"; break;
+ case am_running: s = "Running"; break;
+ case am_runnable: s = "Scheduled"; break;
+ case am_waiting: s = "Waiting"; break;
+ default: s = "Undefined"; break; /* Should never happen */
}
- else if (psflg & ERTS_PSFLG_ACTIVE)
- erts_print(to, to_arg, "Scheduled\n");
- else
- erts_print(to, to_arg, "Waiting\n");
+
+ erts_print(to, to_arg, "%s\n", s);
}
void
@@ -668,6 +666,10 @@ erts_dump_extended_process_state(int to, void *to_arg, erts_aint32_t psflg) {
erts_print(to, to_arg, "DIRTY_IO_PROC"); break;
case ERTS_PSFLG_DIRTY_ACTIVE_SYS:
erts_print(to, to_arg, "DIRTY_ACTIVE_SYS"); break;
+ case ERTS_PSFLG_DIRTY_RUNNING:
+ erts_print(to, to_arg, "DIRTY_RUNNING"); break;
+ case ERTS_PSFLG_DIRTY_RUNNING_SYS:
+ erts_print(to, to_arg, "DIRTY_RUNNING_SYS"); break;
default:
erts_print(to, to_arg, "UNKNOWN(%d)", chk); break;
}
diff --git a/erts/emulator/beam/erl_time_sup.c b/erts/emulator/beam/erl_time_sup.c
index 346404fe2a..9e37106b88 100644
--- a/erts/emulator/beam/erl_time_sup.c
+++ b/erts/emulator/beam/erl_time_sup.c
@@ -1966,7 +1966,7 @@ send_time_offset_changed_notifications(void *new_offsetp)
*patch_refp = ref;
ASSERT(hsz == size_object(message_template));
message = copy_struct(message_template, hsz, &hp, ohp);
- erts_queue_message(rp, &rp_locks, mp, message);
+ erts_queue_message(rp, rp_locks, mp, message, am_clock_service);
}
erts_smp_proc_unlock(rp, rp_locks);
}
@@ -2348,7 +2348,7 @@ erts_napi_convert_time_unit(ErtsMonotonicTime val, int from, int to)
BIF_RETTYPE monotonic_time_0(BIF_ALIST_0)
{
ErtsMonotonicTime mtime = time_sup.r.o.get_time();
- update_last_mtime(ERTS_PROC_GET_SCHDATA(BIF_P), mtime);
+ update_last_mtime(erts_proc_sched_data(BIF_P), mtime);
mtime += ERTS_MONOTONIC_OFFSET_NATIVE;
BIF_RET(make_time_val(BIF_P, mtime));
}
@@ -2356,7 +2356,7 @@ BIF_RETTYPE monotonic_time_0(BIF_ALIST_0)
BIF_RETTYPE monotonic_time_1(BIF_ALIST_1)
{
ErtsMonotonicTime mtime = time_sup.r.o.get_time();
- update_last_mtime(ERTS_PROC_GET_SCHDATA(BIF_P), mtime);
+ update_last_mtime(erts_proc_sched_data(BIF_P), mtime);
BIF_RET(time_unit_conversion(BIF_P, BIF_ARG_1, mtime, 1));
}
@@ -2365,7 +2365,7 @@ BIF_RETTYPE system_time_0(BIF_ALIST_0)
ErtsMonotonicTime mtime, offset;
mtime = time_sup.r.o.get_time();
offset = get_time_offset();
- update_last_mtime(ERTS_PROC_GET_SCHDATA(BIF_P), mtime);
+ update_last_mtime(erts_proc_sched_data(BIF_P), mtime);
BIF_RET(make_time_val(BIF_P, mtime + offset));
}
@@ -2374,7 +2374,7 @@ BIF_RETTYPE system_time_1(BIF_ALIST_0)
ErtsMonotonicTime mtime, offset;
mtime = time_sup.r.o.get_time();
offset = get_time_offset();
- update_last_mtime(ERTS_PROC_GET_SCHDATA(BIF_P), mtime);
+ update_last_mtime(erts_proc_sched_data(BIF_P), mtime);
BIF_RET(time_unit_conversion(BIF_P, BIF_ARG_1, mtime + offset, 0));
}
@@ -2404,7 +2404,7 @@ BIF_RETTYPE timestamp_0(BIF_ALIST_0)
mtime = time_sup.r.o.get_time();
offset = get_time_offset();
- update_last_mtime(ERTS_PROC_GET_SCHDATA(BIF_P), mtime);
+ update_last_mtime(erts_proc_sched_data(BIF_P), mtime);
make_timestamp_value(&mega_sec, &sec, &micro_sec, mtime, offset);
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index 128696270b..ca001fc156 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -238,7 +238,6 @@ write_timestamp(ErtsTraceTimeStamp *tsp, Eterm **hpp)
}
#ifdef ERTS_SMP
-#define PATCH_TS_SIZE(p) patch_ts_size(TFLGS_TS_TYPE(p))
static ERTS_INLINE Uint
patch_ts_size(int ts_type)
@@ -258,7 +257,7 @@ patch_ts_size(int ts_type)
return 0;
}
}
-#endif
+#endif /* ERTS_SMP */
/*
* Write a timestamp. The timestamp MUST be the last
@@ -394,9 +393,10 @@ static ERTS_INLINE int
send_to_tracer_nif(Process *c_p, ErtsPTabElementCommon *t_p,
Eterm t_p_id, ErtsTracerNif *tnif,
enum ErtsTracerOpt topt,
- Eterm tag, Eterm msg, Eterm extra);
+ Eterm tag, Eterm msg, Eterm extra,
+ Eterm pam_result);
static ERTS_INLINE Eterm
-call_enabled_tracer(Process *c_p, const ErtsTracer tracer,
+call_enabled_tracer(const ErtsTracer tracer,
ErtsTracerNif **tnif_ref,
enum ErtsTracerOpt topt,
Eterm tag, Eterm t_p_id);
@@ -459,8 +459,7 @@ erts_set_system_seq_tracer(Process *c_p, ErtsProcLocks c_p_locks, ErtsTracer new
if (!ERTS_TRACER_IS_NIL(new)) {
Eterm nif_result = call_enabled_tracer(
- NULL, new, NULL,
- TRACE_FUN_ENABLED, am_trace_status, am_undefined);
+ new, NULL, TRACE_FUN_ENABLED, am_trace_status, am_undefined);
switch (nif_result) {
case am_trace: break;
default:
@@ -492,7 +491,7 @@ erts_get_system_seq_tracer(void)
erts_smp_rwmtx_runlock(&sys_trace_rwmtx);
if (st != erts_tracer_nil &&
- call_enabled_tracer(NULL, st, NULL, TRACE_FUN_ENABLED,
+ call_enabled_tracer(st, NULL, TRACE_FUN_ENABLED,
am_trace_status, am_undefined) == am_remove) {
erts_set_system_seq_tracer(NULL, 0, erts_tracer_nil);
st = erts_tracer_nil;
@@ -513,7 +512,7 @@ get_default_tracing(Uint *flagsp, ErtsTracer *tracerp,
*default_trace_flags &= ~TRACEE_FLAGS;
} else {
Eterm nif_res;
- nif_res = call_enabled_tracer(NULL, *default_tracer,
+ nif_res = call_enabled_tracer(*default_tracer,
NULL, TRACE_FUN_ENABLED,
am_trace_status, am_undefined);
switch (nif_res) {
@@ -739,7 +738,7 @@ profile_send(Eterm from, Eterm message) {
else
msg = copy_struct(message, sz, &hp, &mp->hfrag.off_heap);
- erts_queue_message(profile_p, NULL, mp, msg);
+ erts_queue_message(profile_p, 0, mp, msg, from);
}
}
@@ -787,7 +786,7 @@ trace_sched_aux(Process *p, ErtsProcLocks locks, Eterm what)
}
send_to_tracer_nif(p, &p->common, p->common.id, tnif, TRACE_FUN_T_SCHED_PROC,
- what, tmp, THE_NON_VALUE);
+ what, tmp, THE_NON_VALUE, am_true);
}
/* Send {trace_ts, Pid, What, {Mod, Func, Arity}, Timestamp}
@@ -812,9 +811,32 @@ trace_send(Process *p, Eterm to, Eterm msg)
{
Eterm operation = am_send;
ErtsTracerNif *tnif = NULL;
+ ErtsTracingEvent* te;
+ Eterm pam_result;
ASSERT(ARE_TRACE_FLAGS_ON(p, F_TRACE_SEND));
+ te = &erts_send_tracing[erts_active_bp_ix()];
+ if (!te->on) {
+ return;
+ }
+ if (te->match_spec) {
+ Eterm args[2];
+ Uint32 return_flags;
+ args[0] = to;
+ args[1] = msg;
+ pam_result = erts_match_set_run_trace(p, p,
+ te->match_spec, args, 2,
+ ERTS_PAM_TMP_RESULT, &return_flags);
+ if (pam_result == am_false)
+ return;
+ if (ERTS_TRACE_FLAGS(p) & F_TRACE_SILENT) {
+ erts_match_set_release_result_trace(p, pam_result);
+ return;
+ }
+ } else
+ pam_result = am_true;
+
if (is_internal_pid(to)) {
if (!erts_proc_lookup(to))
goto send_to_non_existing_process;
@@ -826,23 +848,63 @@ trace_send(Process *p, Eterm to, Eterm msg)
}
if (is_tracer_enabled(p, ERTS_PROC_LOCK_MAIN, &p->common, &tnif,
- TRACE_FUN_E_SEND, operation))
+ TRACE_FUN_E_SEND, operation)) {
send_to_tracer_nif(p, &p->common, p->common.id, tnif, TRACE_FUN_T_SEND,
- operation, msg, to);
+ operation, msg, to, pam_result);
+ }
+ erts_match_set_release_result_trace(p, pam_result);
}
/* Send {trace_ts, Pid, receive, Msg, Timestamp}
* or {trace, Pid, receive, Msg}
*/
void
-trace_receive(Process *c_p, Eterm msg)
+trace_receive(Process* receiver,
+ Eterm from,
+ Eterm msg, ErtsTracingEvent* te)
{
ErtsTracerNif *tnif = NULL;
- if (is_tracer_enabled(NULL, 0, &c_p->common, &tnif,
- TRACE_FUN_E_RECEIVE, am_receive))
- send_to_tracer_nif(NULL, &c_p->common, c_p->common.id,
+ Eterm pam_result;
+
+ if (!te) {
+ te = &erts_receive_tracing[erts_active_bp_ix()];
+ if (!te->on)
+ return;
+ }
+ else ASSERT(te->on);
+
+ if (te->match_spec) {
+ Eterm args[3];
+ Uint32 return_flags;
+ if (is_pid(from)) {
+ args[0] = pid_node_name(from);
+ args[1] = from;
+ }
+ else {
+ ASSERT(is_atom(from));
+ args[0] = from; /* node name or other atom (e.g 'system') */
+ args[1] = am_undefined;
+ }
+ args[2] = msg;
+ pam_result = erts_match_set_run_trace(NULL, receiver,
+ te->match_spec, args, 3,
+ ERTS_PAM_TMP_RESULT, &return_flags);
+ if (pam_result == am_false)
+ return;
+ if (ERTS_TRACE_FLAGS(receiver) & F_TRACE_SILENT) {
+ erts_match_set_release_result_trace(NULL, pam_result);
+ return;
+ }
+ } else
+ pam_result = am_true;
+
+ if (is_tracer_enabled(NULL, 0, &receiver->common, &tnif,
+ TRACE_FUN_E_RECEIVE, am_receive)) {
+ send_to_tracer_nif(NULL, &receiver->common, receiver->common.id,
tnif, TRACE_FUN_T_RECEIVE,
- am_receive, msg, THE_NON_VALUE);
+ am_receive, msg, THE_NON_VALUE, pam_result);
+ }
+ erts_match_set_release_result_trace(NULL, pam_result);
}
int
@@ -852,8 +914,8 @@ seq_trace_update_send(Process *p)
ASSERT((is_tuple(SEQ_TRACE_TOKEN(p)) || is_nil(SEQ_TRACE_TOKEN(p))));
if (have_no_seqtrace(SEQ_TRACE_TOKEN(p)) ||
(seq_tracer != NIL &&
- call_enabled_tracer(NULL, seq_tracer, NULL,
- TRACE_FUN_ENABLED, am_trace_status,
+ call_enabled_tracer(seq_tracer, NULL,
+ TRACE_FUN_ENABLED, am_seq_trace,
p ? p->common.id : am_undefined) != am_trace)
#ifdef USE_VM_PROBES
|| (SEQ_TRACE_TOKEN(p) == am_have_dt_utag)
@@ -900,9 +962,9 @@ seq_trace_output_generic(Eterm token, Eterm msg, Uint type,
ASSERT(is_tuple(token) || is_nil(token));
if (token == NIL || (process && ERTS_TRACE_FLAGS(process) & F_SENSITIVE) ||
ERTS_TRACER_IS_NIL(seq_tracer) ||
- call_enabled_tracer(NULL, seq_tracer,
+ call_enabled_tracer(seq_tracer,
NULL, TRACE_FUN_ENABLED,
- am_trace_status,
+ am_seq_trace,
process ? process->common.id : am_undefined) != am_trace) {
return;
}
@@ -964,7 +1026,7 @@ erts_trace_return_to(Process *p, BeamInstr *pc)
}
send_to_tracer_nif(p, &p->common, p->common.id, NULL, TRACE_FUN_T_CALL,
- am_return_to, mfa, THE_NON_VALUE);
+ am_return_to, mfa, THE_NON_VALUE, am_true);
}
@@ -1123,7 +1185,11 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
* use process flags
*/
tracee_flags = &ERTS_TRACE_FLAGS(p);
+ /* Is is not ideal at all to call this check twice,
+ it should be optimized so that only one call is made. */
if (!is_tracer_enabled(p, ERTS_PROC_LOCK_MAIN, &p->common, &tnif,
+ TRACE_FUN_ENABLED, am_trace_status)
+ || !is_tracer_enabled(p, ERTS_PROC_LOCK_MAIN, &p->common, &tnif,
TRACE_FUN_E_CALL, am_call)) {
return 0;
}
@@ -1139,13 +1205,21 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
}
meta_flags = F_TRACE_CALLS | F_NOW_TS;
tracee_flags = &meta_flags;
- switch (call_enabled_tracer(p, *tracer,
- &tnif, TRACE_FUN_T_CALL,
- am_call, p->common.id)) {
+ switch (call_enabled_tracer(*tracer,
+ &tnif, TRACE_FUN_ENABLED,
+ am_trace_status, p->common.id)) {
default:
case am_remove: *tracer = erts_tracer_nil;
case am_discard: return 0;
- case am_trace: break;
+ case am_trace:
+ switch (call_enabled_tracer(*tracer,
+ &tnif, TRACE_FUN_T_CALL,
+ am_call, p->common.id)) {
+ default:
+ case am_discard: return 0;
+ case am_trace: break;
+ }
+ break;
}
}
@@ -1202,20 +1276,14 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
may remove it, and we still want to generate a trace message */
erts_tracer_update(&pre_ms_tracer, *tracer);
tracer = &pre_ms_tracer;
- pam_result = erts_match_set_run(p, match_spec, args, arity,
- ERTS_PAM_TMP_RESULT, &return_flags);
- if (is_non_value(pam_result)) {
- erts_match_set_release_result(p);
- UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
- ERTS_TRACER_CLEAR(&pre_ms_tracer);
- return 0;
- }
+ pam_result = erts_match_set_run_trace(p, p,
+ match_spec, args, arity,
+ ERTS_PAM_TMP_RESULT, &return_flags);
}
if (tracee_flags == &meta_flags) {
/* Meta trace */
if (pam_result == am_false) {
- erts_match_set_release_result(p);
UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
ERTS_TRACER_CLEAR(&pre_ms_tracer);
return return_flags;
@@ -1223,13 +1291,12 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
} else {
/* Non-meta trace */
if (*tracee_flags & F_TRACE_SILENT) {
- erts_match_set_release_result(p);
+ erts_match_set_release_result_trace(p, pam_result);
UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
ERTS_TRACER_CLEAR(&pre_ms_tracer);
return 0;
}
if (pam_result == am_false) {
- erts_match_set_release_result(p);
UnUseTmpHeap(ERL_SUB_BIN_SIZE,p);
ERTS_TRACER_CLEAR(&pre_ms_tracer);
return return_flags;
@@ -1265,11 +1332,14 @@ erts_call_trace(Process* p, BeamInstr mfa[3], Binary *match_spec,
* Build the trace tuple and send it to the port.
*/
send_to_tracer_nif_raw(p, NULL, *tracer, *tracee_flags, p->common.id,
- tnif, TRACE_FUN_T_CALL, am_call, mfa_tuple, THE_NON_VALUE, pam_result);
- erts_match_set_release_result(p);
+ tnif, TRACE_FUN_T_CALL, am_call, mfa_tuple,
+ THE_NON_VALUE, pam_result);
- if (match_spec && tracer == &pre_ms_tracer)
- ERTS_TRACER_CLEAR(&pre_ms_tracer);
+ if (match_spec) {
+ erts_match_set_release_result_trace(p, pam_result);
+ if (tracer == &pre_ms_tracer)
+ ERTS_TRACER_CLEAR(&pre_ms_tracer);
+ }
return return_flags;
}
@@ -1287,10 +1357,10 @@ trace_proc(Process *c_p, ErtsProcLocks c_p_locks,
Process *t_p, Eterm what, Eterm data)
{
ErtsTracerNif *tnif = NULL;
- if (is_tracer_enabled(c_p, c_p_locks, &t_p->common, &tnif,
+ if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif,
TRACE_FUN_E_PROCS, what))
- send_to_tracer_nif(c_p, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_PROCS,
- what, data, THE_NON_VALUE);
+ send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_PROCS,
+ what, data, THE_NON_VALUE, am_true);
}
@@ -1306,32 +1376,33 @@ trace_proc_spawn(Process *p, Eterm what, Eterm pid,
Eterm mod, Eterm func, Eterm args)
{
ErtsTracerNif *tnif = NULL;
- if (is_tracer_enabled(p, ERTS_PROC_LOCKS_ALL &
- ~(ERTS_PROC_LOCK_STATUS|ERTS_PROC_LOCK_TRACE),
- &p->common, &tnif, TRACE_FUN_E_PROCS, what)) {
+ if (is_tracer_enabled(NULL, 0,
+ &p->common, &tnif, TRACE_FUN_E_PROCS, what)) {
Eterm mfa;
Eterm* hp;
hp = HAlloc(p, 4);
mfa = TUPLE3(hp, mod, func, args);
hp += 4;
- send_to_tracer_nif(p, &p->common, p->common.id, tnif, TRACE_FUN_T_PROCS,
- what, pid, mfa);
+ send_to_tracer_nif(NULL, &p->common, p->common.id, tnif, TRACE_FUN_T_PROCS,
+ what, pid, mfa, am_true);
}
}
void save_calls(Process *p, Export *e)
{
- struct saved_calls *scb = ERTS_PROC_GET_SAVED_CALLS_BUF(p);
- if (scb) {
- Export **ct = &scb->ct[0];
- int len = scb->len;
-
- ct[scb->cur] = e;
- if (++scb->cur >= len)
- scb->cur = 0;
- if (scb->n < len)
- scb->n++;
+ if ((ERTS_TRACE_FLAGS(p) & F_SENSITIVE) == 0) {
+ struct saved_calls *scb = ERTS_PROC_GET_SAVED_CALLS_BUF(p);
+ if (scb) {
+ Export **ct = &scb->ct[0];
+ int len = scb->len;
+
+ ct[scb->cur] = e;
+ if (++scb->cur >= len)
+ scb->cur = 0;
+ if (scb->n < len)
+ scb->n++;
+ }
}
}
@@ -1348,25 +1419,28 @@ void save_calls(Process *p, Export *e)
* are all small (atomic) integers.
*/
void
-trace_gc(Process *p, Eterm what, Uint size)
+trace_gc(Process *p, Eterm what, Uint size, Eterm msg)
{
ErtsTracerNif *tnif = NULL;
Eterm* hp;
- Eterm msg = NIL;
Uint sz = 0;
Eterm tup;
- if (is_tracer_enabled(p, ERTS_PROC_LOCK_MAIN, &p->common, &tnif, TRACE_FUN_E_GC, what)) {
+ if (is_tracer_enabled(p, ERTS_PROC_LOCK_MAIN, &p->common, &tnif,
+ TRACE_FUN_E_GC, what)) {
+
+ if (is_non_value(msg)) {
- (void) erts_process_gc_info(p, &sz, NULL);
- hp = HAlloc(p, sz + 3 + 2);
+ (void) erts_process_gc_info(p, &sz, NULL, 0, 0);
+ hp = HAlloc(p, sz + 3 + 2);
- msg = erts_process_gc_info(p, NULL, &hp);
- tup = TUPLE2(hp, am_wordsize, make_small(size)); hp += 3;
- msg = CONS(hp, tup, msg); hp += 2;
+ msg = erts_process_gc_info(p, NULL, &hp, 0, 0);
+ tup = TUPLE2(hp, am_wordsize, make_small(size)); hp += 3;
+ msg = CONS(hp, tup, msg); hp += 2;
+ }
send_to_tracer_nif(p, &p->common, p->common.id, tnif, TRACE_FUN_T_GC,
- what, msg, am_undefined);
+ what, msg, THE_NON_VALUE, am_true);
}
}
@@ -1429,7 +1503,7 @@ monitor_long_schedule_proc(Process *p, BeamInstr *in_fp, BeamInstr *out_fp, Uint
{
ErtsMessage *mp = erts_alloc_message(0, NULL);
mp->data.heap_frag = bp;
- erts_queue_message(monitor_p, NULL, mp, msg);
+ erts_queue_message(monitor_p, 0, mp, msg, am_system);
}
#endif
}
@@ -1494,7 +1568,7 @@ monitor_long_schedule_port(Port *pp, ErtsPortTaskType type, Uint time)
{
ErtsMessage *mp = erts_alloc_message(0, NULL);
mp->data.heap_frag = bp;
- erts_queue_message(monitor_p, NULL, mp, msg);
+ erts_queue_message(monitor_p, 0, mp, msg, am_system);
}
#endif
}
@@ -1569,7 +1643,7 @@ monitor_long_gc(Process *p, Uint time) {
{
ErtsMessage *mp = erts_alloc_message(0, NULL);
mp->data.heap_frag = bp;
- erts_queue_message(monitor_p, NULL, mp, msg);
+ erts_queue_message(monitor_p, 0, mp, msg, am_system);
}
#endif
}
@@ -1644,7 +1718,7 @@ monitor_large_heap(Process *p) {
{
ErtsMessage *mp = erts_alloc_message(0, NULL);
mp->data.heap_frag = bp;
- erts_queue_message(monitor_p, NULL, mp, msg);
+ erts_queue_message(monitor_p, 0, mp, msg, am_system);
}
#endif
}
@@ -1676,7 +1750,7 @@ monitor_generic(Process *p, Eterm type, Eterm spec) {
{
ErtsMessage *mp = erts_alloc_message(0, NULL);
mp->data.heap_frag = bp;
- erts_queue_message(monitor_p, NULL, mp, msg);
+ erts_queue_message(monitor_p, 0, mp, msg, am_system);
}
#endif
@@ -1746,7 +1820,7 @@ trace_port_open(Port *p, Eterm calling_pid, Eterm drv_name) {
ERTS_SMP_CHK_NO_PROC_LOCKS;
if (is_tracer_enabled(NULL, 0, &p->common, &tnif, TRACE_FUN_E_PORTS, am_open))
send_to_tracer_nif(NULL, &p->common, p->common.id, tnif, TRACE_FUN_T_PORTS,
- am_open, calling_pid, drv_name);
+ am_open, calling_pid, drv_name, am_true);
}
/* Sends trace message:
@@ -1765,7 +1839,7 @@ trace_port(Port *t_p, Eterm what, Eterm data) {
ERTS_SMP_CHK_NO_PROC_LOCKS;
if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_PORTS, what))
send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_PORTS,
- what, data, THE_NON_VALUE);
+ what, data, THE_NON_VALUE, am_true);
}
@@ -1905,7 +1979,7 @@ trace_port_receive(Port *t_p, Eterm caller, Eterm what, ...)
ASSERT(hp <= (local_heap + LOCAL_HEAP_SIZE) || orig_hp);
send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif,
TRACE_FUN_T_RECEIVE,
- am_receive, data, THE_NON_VALUE);
+ am_receive, data, THE_NON_VALUE, am_true);
if (bptr && erts_refc_dectest(&bptr->refc, 1) == 0)
erts_bin_free(bptr);
@@ -1928,7 +2002,7 @@ trace_port_send(Port *t_p, Eterm receiver, Eterm msg, int exists)
ERTS_SMP_CHK_NO_PROC_LOCKS;
if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_SEND, op))
send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_SEND,
- op, msg, receiver);
+ op, msg, receiver, am_true);
}
void trace_port_send_binary(Port *t_p, Eterm to, Eterm what, char *bin, Sint sz)
@@ -1957,7 +2031,7 @@ void trace_port_send_binary(Port *t_p, Eterm to, Eterm what, char *bin, Sint sz)
hp += 3;
send_to_tracer_nif(NULL, &t_p->common, t_p->common.id, tnif, TRACE_FUN_T_SEND,
- am_send, msg, to);
+ am_send, msg, to, am_true);
if (bptr && erts_refc_dectest(&bptr->refc, 1) == 0)
erts_bin_free(bptr);
@@ -1988,7 +2062,7 @@ trace_sched_ports_where(Port *t_p, Eterm what, Eterm where) {
if (is_tracer_enabled(NULL, 0, &t_p->common, &tnif, TRACE_FUN_E_SCHED_PORT, what))
send_to_tracer_nif(NULL, &t_p->common, t_p->common.id,
tnif, TRACE_FUN_T_SCHED_PORT,
- what, where, THE_NON_VALUE);
+ what, where, THE_NON_VALUE, am_true);
}
/* Port profiling */
@@ -2428,7 +2502,7 @@ sys_msg_dispatcher_func(void *unused)
queue_proc_msg:
mp = erts_alloc_message(0, NULL);
mp->data.heap_frag = smqp->bp;
- erts_queue_message(proc,&proc_locks,mp,smqp->msg);
+ erts_queue_message(proc,proc_locks,mp,smqp->msg,am_system);
#ifdef DEBUG_PRINTOUTS
erts_fprintf(stderr, "delivered\n");
#endif
@@ -2811,7 +2885,7 @@ send_to_tracer_nif_raw(Process *c_p, Process *tracee,
static ERTS_INLINE int
send_to_tracer_nif(Process *c_p, ErtsPTabElementCommon *t_p,
Eterm t_p_id, ErtsTracerNif *tnif, enum ErtsTracerOpt topt,
- Eterm tag, Eterm msg, Eterm extra)
+ Eterm tag, Eterm msg, Eterm extra, Eterm pam_result)
{
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
if (c_p) {
@@ -2831,27 +2905,32 @@ send_to_tracer_nif(Process *c_p, ErtsPTabElementCommon *t_p,
is_internal_pid(t_p->id) ? (Process*)t_p : NULL,
t_p->tracer, t_p->trace_flags,
t_p_id, tnif, topt, tag, msg, extra,
- am_true);
+ pam_result);
}
static ERTS_INLINE Eterm
-call_enabled_tracer(Process *c_p, const ErtsTracer tracer,
+call_enabled_tracer(const ErtsTracer tracer,
ErtsTracerNif **tnif_ret,
enum ErtsTracerOpt topt,
Eterm tag, Eterm t_p_id) {
ErtsTracerNif *tnif = lookup_tracer_nif(tracer);
if (tnif) {
- Eterm argv[] = {tag, ERTS_TRACER_STATE(tracer), t_p_id};
+ Eterm argv[] = {tag, ERTS_TRACER_STATE(tracer), t_p_id},
+ ret;
topt = (tnif->tracers[topt].cb) ? topt : TRACE_FUN_ENABLED;
ASSERT(topt < NIF_TRACER_TYPES);
ASSERT(tnif->tracers[topt].cb != NULL);
if (tnif_ret) *tnif_ret = tnif;
- return erts_nif_call_function(c_p, NULL, tnif->nif_mod,
- tnif->tracers[topt].cb,
- tnif->tracers[topt].arity,
- argv);
+ ret = erts_nif_call_function(NULL, NULL, tnif->nif_mod,
+ tnif->tracers[topt].cb,
+ tnif->tracers[topt].arity,
+ argv);
+ if (tag == am_trace_status && ret != am_remove)
+ return am_trace;
+ ASSERT(tag == am_trace_status || ret != am_remove);
+ return ret;
}
- return am_remove;
+ return tag == am_trace_status ? am_remove : am_discard;
}
static int
@@ -2876,12 +2955,12 @@ is_tracer_enabled(Process* c_p, ErtsProcLocks c_p_locks,
}
#endif
- nif_result = call_enabled_tracer(c_p, t_p->tracer, tnif_ret, topt, tag, t_p->id);
+ nif_result = call_enabled_tracer(t_p->tracer, tnif_ret, topt, tag, t_p->id);
switch (nif_result) {
case am_discard: return 0;
case am_trace: return 1;
case THE_NON_VALUE:
- case am_remove: break;
+ case am_remove: ASSERT(tag == am_trace_status); break;
default:
/* only am_remove should be returned, but if
something else is returned we fall-through
@@ -2912,19 +2991,14 @@ is_tracer_enabled(Process* c_p, ErtsProcLocks c_p_locks,
return 0;
}
-int erts_is_tracer_proc_enabled(Process* c_p, ErtsProcLocks c_p_locks,
- ErtsPTabElementCommon *t_p, Eterm type)
-{
- return is_tracer_enabled(c_p, c_p_locks, t_p, NULL, TRACE_FUN_ENABLED, am_trace_status);
-}
-
-int erts_is_tracer_enabled(Process *c_p, const ErtsTracer tracer)
+int erts_is_tracer_enabled(const ErtsTracer tracer, ErtsPTabElementCommon *t_p)
{
ErtsTracerNif *tnif = lookup_tracer_nif(tracer);
if (tnif) {
- Eterm nif_result = call_enabled_tracer(c_p, tracer, &tnif,
- TRACE_FUN_ENABLED, am_trace_status,
- c_p->common.id);
+ Eterm nif_result = call_enabled_tracer(tracer, &tnif,
+ TRACE_FUN_ENABLED,
+ am_trace_status,
+ t_p->id);
switch (nif_result) {
case am_discard:
case am_trace: return 1;
@@ -2935,6 +3009,20 @@ int erts_is_tracer_enabled(Process *c_p, const ErtsTracer tracer)
return 0;
}
+int erts_is_tracer_proc_enabled(Process* c_p, ErtsProcLocks c_p_locks,
+ ErtsPTabElementCommon *t_p)
+{
+ return is_tracer_enabled(c_p, c_p_locks, t_p, NULL, TRACE_FUN_ENABLED,
+ am_trace_status);
+}
+
+int erts_is_tracer_proc_enabled_send(Process* c_p, ErtsProcLocks c_p_locks,
+ ErtsPTabElementCommon *t_p)
+{
+ return is_tracer_enabled(c_p, c_p_locks, t_p, NULL, TRACE_FUN_T_SEND, am_send);
+}
+
+
void erts_tracer_replace(ErtsPTabElementCommon *t_p, const ErtsTracer tracer)
{
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
diff --git a/erts/emulator/beam/erl_trace.h b/erts/emulator/beam/erl_trace.h
index 9a007e62ec..0095d4386b 100644
--- a/erts/emulator/beam/erl_trace.h
+++ b/erts/emulator/beam/erl_trace.h
@@ -56,6 +56,15 @@
struct binary;
+typedef struct
+{
+ int on;
+ struct binary* match_spec;
+} ErtsTracingEvent;
+
+extern ErtsTracingEvent erts_send_tracing[];
+extern ErtsTracingEvent erts_receive_tracing[];
+
/* erl_bif_trace.c */
Eterm erl_seq_trace_info(Process *p, Eterm arg1);
void erts_system_monitor_clear(Process *c_p);
@@ -91,7 +100,7 @@ void erts_send_sys_msg_proc(Eterm, Eterm, Eterm, ErlHeapFragment *);
#endif
void trace_send(Process*, Eterm, Eterm);
-void trace_receive(Process *, Eterm);
+void trace_receive(Process*, Eterm, Eterm, ErtsTracingEvent*);
Uint32 erts_call_trace(Process *p, BeamInstr mfa[], struct binary *match_spec,
Eterm* args, int local, ErtsTracer *tracer);
void erts_trace_return(Process* p, BeamInstr* fi, Eterm retval,
@@ -103,7 +112,7 @@ void trace_sched(Process*, ErtsProcLocks, Eterm);
void trace_proc(Process*, ErtsProcLocks, Process*, Eterm, Eterm);
void trace_proc_spawn(Process*, Eterm what, Eterm pid, Eterm mod, Eterm func, Eterm args);
void save_calls(Process *p, Export *);
-void trace_gc(Process *p, Eterm what, Uint size);
+void trace_gc(Process *p, Eterm what, Uint size, Eterm msg);
/* port tracing */
void trace_virtual_sched(Process*, ErtsProcLocks, Eterm);
void trace_sched_ports(Port *pp, Eterm);
@@ -184,8 +193,10 @@ int erts_finish_breakpointing(void);
/* Nif tracer functions */
int erts_is_tracer_proc_enabled(Process *c_p, ErtsProcLocks c_p_locks,
- ErtsPTabElementCommon *t_p, Eterm type);
-int erts_is_tracer_enabled(Process *c_p, const ErtsTracer tracer);
+ ErtsPTabElementCommon *t_p);
+int erts_is_tracer_proc_enabled_send(Process* c_p, ErtsProcLocks c_p_locks,
+ ErtsPTabElementCommon *t_p);
+int erts_is_tracer_enabled(const ErtsTracer tracer, ErtsPTabElementCommon *t_p);
Eterm erts_tracer_to_term(Process *p, ErtsTracer tracer);
ErtsTracer erts_term_to_tracer(Eterm prefix, Eterm term);
void erts_tracer_replace(ErtsPTabElementCommon *t_p,
@@ -215,9 +226,4 @@ ERTS_DECLARE_DUMMY(erts_tracer_nil) = NIL;
#define ERTS_TRACER_FROM_ETERM(termp) \
((ErtsTracer*)(termp))
-#define ERTS_TRACER_PROC_IS_ENABLED(PROC) \
- (!ERTS_TRACER_IS_NIL(ERTS_TRACER(PROC)) \
- && erts_is_tracer_proc_enabled(PROC, ERTS_PROC_LOCK_MAIN, \
- &(PROC)->common, am_trace_status))
-
#endif /* ERL_TRACE_H__ */
diff --git a/erts/emulator/beam/erl_vm.h b/erts/emulator/beam/erl_vm.h
index f3c54de214..f97716d030 100644
--- a/erts/emulator/beam/erl_vm.h
+++ b/erts/emulator/beam/erl_vm.h
@@ -50,6 +50,7 @@
#define H_DEFAULT_SIZE 233 /* default (heap + stack) min size */
#define VH_DEFAULT_SIZE 32768 /* default virtual (bin) heap min size (words) */
+#define H_DEFAULT_MAX_SIZE 0 /* default max heap size is off */
#define CP_SIZE 1
@@ -160,6 +161,8 @@ extern int num_instructions; /* Number of instruction in opc[]. */
extern int H_MIN_SIZE; /* minimum (heap + stack) */
extern int BIN_VH_MIN_SIZE; /* minimum virtual (bin) heap */
+extern int H_MAX_SIZE; /* maximum (heap + stack) */
+extern int H_MAX_FLAGS; /* maximum heap flags */
extern int erts_atom_table_size;/* Atom table size */
extern int erts_pd_initial_size;/* Initial Process dictionary table size */
diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h
index c8cf7c6191..b76b9cd874 100644
--- a/erts/emulator/beam/global.h
+++ b/erts/emulator/beam/global.h
@@ -57,6 +57,7 @@ struct enif_environment_t /* ErlNifEnv */
struct enif_tmp_obj_t* tmp_obj_list;
int exception_thrown; /* boolean */
Process *tracee;
+ int exiting; /* boolean (dirty nifs might return in exiting state) */
};
extern void erts_pre_nif(struct enif_environment_t*, Process*,
struct erl_module_nif*, Process* tracee);
@@ -997,7 +998,7 @@ Eterm erl_is_function(Process* p, Eterm arg1, Eterm arg2);
#define ERTS_CPC_ALLOW_GC (1 << 0)
#define ERTS_CPC_COPY_LITERALS (1 << 1)
#define ERTS_CPC_ALL (ERTS_CPC_ALLOW_GC | ERTS_CPC_COPY_LITERALS)
-Eterm erts_check_process_code(Process *c_p, Eterm module, Uint flags, int *redsp);
+Eterm erts_check_process_code(Process *c_p, Eterm module, Uint flags, int *redsp, int fcalls);
typedef struct {
Eterm *ptr;
@@ -1483,9 +1484,19 @@ do { \
#define MatchSetGetSource(MPSP) erts_match_set_get_source(MPSP)
-extern Binary *erts_match_set_compile(Process *p, Eterm matchexpr);
+extern Binary *erts_match_set_compile(Process *p, Eterm matchexpr, Eterm MFA);
Eterm erts_match_set_lint(Process *p, Eterm matchexpr);
extern void erts_match_set_release_result(Process* p);
+ERTS_GLB_INLINE void erts_match_set_release_result_trace(Process* p, Eterm);
+
+#if ERTS_GLB_INLINE_INCL_FUNC_DEF
+ERTS_GLB_INLINE
+void erts_match_set_release_result_trace(Process* p, Eterm pam_result)
+{
+ if (is_not_immed(pam_result))
+ erts_match_set_release_result(p);
+}
+#endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */
enum erts_pam_run_flags {
ERTS_PAM_TMP_RESULT=1,
@@ -1493,10 +1504,12 @@ enum erts_pam_run_flags {
ERTS_PAM_CONTIGUOUS_TUPLE=4,
ERTS_PAM_IGNORE_TRACE_SILENT=8
};
-extern Eterm erts_match_set_run(Process *p, Binary *mpsp,
- Eterm *args, int num_args,
- enum erts_pam_run_flags in_flags,
- Uint32 *return_flags);
+extern Eterm erts_match_set_run_trace(Process *p,
+ Process *self,
+ Binary *mpsp,
+ Eterm *args, int num_args,
+ enum erts_pam_run_flags in_flags,
+ Uint32 *return_flags);
extern Eterm erts_match_set_get_source(Binary *mpsp);
extern void erts_match_prog_foreach_offheap(Binary *b,
void (*)(ErlOffHeap *, void *),
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index 5c2595c69d..0377f6cb5e 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -1432,10 +1432,11 @@ finalize_force_imm_drv_call(ErtsTryImmDrvCallState *sp)
static ERTS_INLINE void
queue_port_sched_op_reply(Process *rp,
- ErtsProcLocks *rp_locksp,
+ ErtsProcLocks rp_locks,
ErtsHeapFactory* factory,
Uint32 *ref_num,
- Eterm msg)
+ Eterm msg,
+ Port* prt)
{
Eterm* hp = erts_produce_heap(factory, ERTS_QUEUE_PORT_SCHED_OP_REPLY_SIZE, 0);
Eterm ref;
@@ -1448,11 +1449,12 @@ queue_port_sched_op_reply(Process *rp,
erts_factory_trim_and_close(factory, &msg, 1);
- erts_queue_message(rp, rp_locksp, factory->message, msg);
+ erts_queue_message(rp, rp_locks, factory->message, msg,
+ prt ? prt->common.id : am_undefined);
}
static void
-port_sched_op_reply(Eterm to, Uint32 *ref_num, Eterm msg)
+port_sched_op_reply(Eterm to, Uint32 *ref_num, Eterm msg, Port* prt)
{
Process *rp = erts_proc_lookup_raw(to);
if (rp) {
@@ -1478,10 +1480,11 @@ port_sched_op_reply(Eterm to, Uint32 *ref_num, Eterm msg)
factory.off_heap));
queue_port_sched_op_reply(rp,
- &rp_locks,
+ rp_locks,
&factory,
ref_num,
- msg_copy);
+ msg_copy,
+ prt);
if (rp_locks)
erts_smp_proc_unlock(rp, rp_locks);
@@ -1651,7 +1654,7 @@ port_badsig(Port *prt, erts_aint32_t state, int op,
state,
sigdp->flags & ERTS_P2P_SIG_DATA_FLG_BAD_OUTPUT);
if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
- port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg);
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg, prt);
return ERTS_PORT_REDS_BADSIG;
} /* port_badsig */
/* bad_port_signal() will
@@ -1820,7 +1823,7 @@ port_sig_outputv(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *s
}
if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
- port_sched_op_reply(sigdp->caller, sigdp->ref, reply);
+ port_sched_op_reply(sigdp->caller, sigdp->ref, reply, prt);
cleanup_scheduled_outputv(sigdp->u.outputv.evp,
sigdp->u.outputv.cbinp);
@@ -1928,7 +1931,7 @@ port_sig_output(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *si
}
if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
- port_sched_op_reply(sigdp->caller, sigdp->ref, reply);
+ port_sched_op_reply(sigdp->caller, sigdp->ref, reply, prt);
cleanup_scheduled_output(sigdp->u.output.bufp);
@@ -2509,7 +2512,7 @@ erts_port_output(Process *c_p,
sigdp->flags &= ~ERTS_P2P_SIG_DATA_FLG_NOSUSPEND;
else if (async_nosuspend) {
ErtsSchedulerData *esdp = (c_p
- ? ERTS_PROC_GET_SCHDATA(c_p)
+ ? erts_proc_sched_data(c_p)
: erts_get_scheduler_data());
ASSERT(esdp);
ns_pthp = &esdp->nosuspend_port_task_handle;
@@ -2636,7 +2639,7 @@ port_sig_exit(Port *prt,
if (sigdp->u.exit.bp)
free_message_buffer(sigdp->u.exit.bp);
if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
- port_sched_op_reply(sigdp->caller, sigdp->ref, msg);
+ port_sched_op_reply(sigdp->caller, sigdp->ref, msg, prt);
return ERTS_PORT_REDS_EXIT;
}
@@ -2829,7 +2832,7 @@ port_sig_connect(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *s
msg = am_true;
}
if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
- port_sched_op_reply(sigdp->caller, sigdp->ref, msg);
+ port_sched_op_reply(sigdp->caller, sigdp->ref, msg, prt);
return ERTS_PORT_REDS_CONNECT;
}
@@ -2912,7 +2915,7 @@ port_sig_unlink(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *si
if (op == ERTS_PROC2PORT_SIG_EXEC)
port_unlink(prt, sigdp->u.unlink.from);
if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
- port_sched_op_reply(sigdp->caller, sigdp->ref, am_true);
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_true, prt);
return ERTS_PORT_REDS_UNLINK;
}
@@ -3007,7 +3010,7 @@ port_sig_link(Port *prt, erts_aint32_t state, int op, ErtsProc2PortSigData *sigd
port_link_failure(sigdp->u.link.port, sigdp->u.link.to);
}
if (sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY)
- port_sched_op_reply(sigdp->caller, sigdp->ref, am_true);
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_true, prt);
return ERTS_PORT_REDS_LINK;
}
@@ -3064,7 +3067,8 @@ init_ack_send_reply(Port *port, Eterm resp)
}
port_sched_op_reply(port->async_open_port->to,
port->async_open_port->ref,
- resp);
+ resp,
+ port);
erts_free(ERTS_ALC_T_PRTSD, port->async_open_port);
port->async_open_port = NULL;
@@ -3461,7 +3465,7 @@ deliver_result(Port *prt, Eterm sender, Eterm pid, Eterm res)
sz_res + 3, &hp, &ohp);
res = copy_struct(res, sz_res, &hp, ohp);
tuple = TUPLE2(hp, sender, res);
- erts_queue_message(rp, &rp_locks, mp, tuple);
+ erts_queue_message(rp, rp_locks, mp, tuple, sender);
if (rp_locks)
erts_smp_proc_unlock(rp, rp_locks);
@@ -3562,7 +3566,7 @@ static void deliver_read_message(Port* prt, erts_aint32_t state, Eterm to,
trace_port_send(prt, to, tuple, 1);
ERL_MESSAGE_TOKEN(mp) = am_undefined;
- erts_queue_message(rp, &rp_locks, mp, tuple);
+ erts_queue_message(rp, rp_locks, mp, tuple, prt->common.id);
if (rp_locks)
erts_smp_proc_unlock(rp, rp_locks);
if (!scheduler)
@@ -3734,7 +3738,7 @@ deliver_vec_message(Port* prt, /* Port */
trace_port_send(prt, to, tuple, 1);
ERL_MESSAGE_TOKEN(mp) = am_undefined;
- erts_queue_message(rp, &rp_locks, mp, tuple);
+ erts_queue_message(rp, rp_locks, mp, tuple, prt->common.id);
erts_smp_proc_unlock(rp, rp_locks);
if (!scheduler)
erts_proc_dec_refc(rp);
@@ -4388,10 +4392,11 @@ port_sig_control(Port *prt,
&factory.hp,
factory.off_heap);
queue_port_sched_op_reply(rp,
- &rp_locks,
+ rp_locks,
&factory,
sigdp->ref,
- msg);
+ msg,
+ prt);
if (rp_locks)
erts_smp_proc_unlock(rp, rp_locks);
@@ -4402,7 +4407,7 @@ port_sig_control(Port *prt,
/* failure */
if (sigdp->caller != ERTS_INVALID_PID)
- port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg);
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg, prt);
done:
@@ -4739,10 +4744,11 @@ port_sig_call(Port *prt,
msg = TUPLE2(hp, am_ok, msg);
queue_port_sched_op_reply(rp,
- &rp_locks,
+ rp_locks,
&factory,
sigdp->ref,
- msg);
+ msg,
+ prt);
if (rp_locks)
erts_smp_proc_unlock(rp, rp_locks);
@@ -4754,7 +4760,7 @@ port_sig_call(Port *prt,
}
}
- port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg);
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_badarg, prt);
done:
@@ -4969,7 +4975,7 @@ port_sig_info(Port *prt,
{
ASSERT(sigdp->flags & ERTS_P2P_SIG_DATA_FLG_REPLY);
if (op != ERTS_PROC2PORT_SIG_EXEC)
- port_sched_op_reply(sigdp->caller, sigdp->ref, am_undefined);
+ port_sched_op_reply(sigdp->caller, sigdp->ref, am_undefined, prt);
else {
Eterm *hp, *hp_start;
Uint hsz;
@@ -4995,10 +5001,11 @@ port_sig_info(Port *prt,
mp->data.heap_frag = bp;
erts_factory_selfcontained_message_init(&factory, mp, hp);
queue_port_sched_op_reply(rp,
- &rp_locks,
+ rp_locks,
&factory,
sigdp->ref,
- value);
+ value,
+ prt);
}
if (rp_locks)
erts_smp_proc_unlock(rp, rp_locks);
@@ -5115,7 +5122,7 @@ reply_io_bytes(void *vreq)
msg = TUPLE4(hp, ref, make_small(sched_id), ein, eout);
- erts_queue_message(rp, &rp_locks, mp, msg);
+ erts_queue_message(rp, rp_locks, mp, msg, am_system);
if (req->sched_id == sched_id)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
@@ -5133,7 +5140,7 @@ erts_request_io_bytes(Process *c_p)
Uint *hp;
Eterm ref;
Uint32 *refn;
- ErtsSchedulerData *esdp = ERTS_PROC_GET_SCHDATA(c_p);
+ ErtsSchedulerData *esdp = erts_proc_sched_data(c_p);
ErtsIOBytesReq *req = erts_alloc(ERTS_ALC_T_IOB_REQ,
sizeof(ErtsIOBytesReq));
@@ -5613,7 +5620,7 @@ void driver_report_exit(ErlDrvPort ix, int status)
trace_port_send(prt, pid, tuple, 1);
ERL_MESSAGE_TOKEN(mp) = am_undefined;
- erts_queue_message(rp, &rp_locks, mp, tuple);
+ erts_queue_message(rp, rp_locks, mp, tuple, prt->common.id);
erts_smp_proc_unlock(rp, rp_locks);
if (!scheduler)
@@ -5934,7 +5941,7 @@ driver_deliver_term(Port *prt, Eterm to, ErlDrvTermData* data, int len)
if (!rp) {
if (!prt || !IS_TRACED_FL(prt, F_TRACE_SEND))
goto done;
- if (!erts_is_tracer_proc_enabled(NULL, 0, &prt->common, am_send))
+ if (!erts_is_tracer_proc_enabled_send(NULL, 0, &prt->common))
goto done;
res = -2;
@@ -6217,15 +6224,20 @@ driver_deliver_term(Port *prt, Eterm to, ErlDrvTermData* data, int len)
done:
if (res > 0) {
+ Eterm from = am_undefined;
mess = ESTACK_POP(stack); /* get resulting value */
erts_factory_trim_and_close(&factory, &mess, 1);
- if (prt && IS_TRACED_FL(prt, F_TRACE_SEND))
- trace_port_send(prt, to, mess, 1);
+ if (prt) {
+ if (IS_TRACED_FL(prt, F_TRACE_SEND)) {
+ trace_port_send(prt, to, mess, 1);
+ }
+ from = prt->common.id;
+ }
/* send message */
ERL_MESSAGE_TOKEN(factory.message) = am_undefined;
- erts_queue_message(rp, &rp_locks, factory.message, mess);
+ erts_queue_message(rp, rp_locks, factory.message, mess, from);
}
else if (res == -2) {
/* this clause only happens when we were requested to
@@ -7869,11 +7881,13 @@ driver_system_info(ErlDrvSysInfo *sip, size_t si_size)
* (driver version 3.1, NIF version 2.7)
*/
if (si_size >= ERL_DRV_SYS_INFO_SIZE(dirty_scheduler_support)) {
-#if defined(ERL_NIF_DIRTY_SCHEDULER_SUPPORT) && defined(USE_THREADS)
- sip->dirty_scheduler_support = 1;
+ sip->dirty_scheduler_support =
+#ifdef ERTS_DIRTY_SCHEDULERS
+ 1
#else
- sip->dirty_scheduler_support = 0;
+ 0
#endif
+ ;
}
}
diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h
index 748fba15c7..f303d4f167 100644
--- a/erts/emulator/beam/sys.h
+++ b/erts/emulator/beam/sys.h
@@ -34,6 +34,10 @@
(((__GNUC__ << 24) | (__GNUC_MINOR__ << 12) | __GNUC_PATCHLEVEL__) >= (((MAJ) << 24) | ((MIN) << 12) | (PL)))
#endif
+#if defined(ERTS_DIRTY_SCHEDULERS) && !defined(ERTS_SMP)
+# error "Dirty schedulers not supported without smp support"
+#endif
+
#ifdef ERTS_INLINE
# ifndef ERTS_CAN_INLINE
# define ERTS_CAN_INLINE 1
@@ -92,6 +96,9 @@
#define ErtsInArea(ptr,start,nbytes) \
((UWord)((char*)(ptr) - (char*)(start)) < (nbytes))
+#define ErtsContainerStruct(ptr, type, member) \
+ (type *)((char *)(1 ? (ptr) : &((type *)0)->member) - offsetof(type, member))
+
#if defined (__WIN32__)
# include "erl_win_sys.h"
#else
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 68006e7ef3..cedc88e5fe 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -2263,7 +2263,7 @@ static void do_send_logger_message(Eterm *hp, ErlOffHeap *ohp, ErlHeapFragment *
{
ErtsMessage *mp = erts_alloc_message(0, NULL);
mp->data.heap_frag = bp;
- erts_queue_message(p, NULL /* only used for smp build */, mp, message);
+ erts_queue_message(p, 0, mp, message, am_system);
}
#endif
}
diff --git a/erts/emulator/hipe/hipe_bif0.c b/erts/emulator/hipe/hipe_bif0.c
index 48cfafb8c5..4063cbf306 100644
--- a/erts/emulator/hipe/hipe_bif0.c
+++ b/erts/emulator/hipe/hipe_bif0.c
@@ -533,13 +533,13 @@ BIF_RETTYPE hipe_bifs_merge_term_1(BIF_ALIST_1)
BIF_RET(val);
}
-struct mfa {
+struct mfa_t {
Eterm mod;
Eterm fun;
Uint ari;
};
-static int term_to_mfa(Eterm term, struct mfa *mfa)
+static int term_to_mfa(Eterm term, struct mfa_t *mfa)
{
Eterm mod, fun, a;
Uint ari;
@@ -597,7 +597,7 @@ static Uint *hipe_find_emu_address(Eterm mod, Eterm name, unsigned int arity)
Uint *hipe_bifs_find_pc_from_mfa(Eterm term)
{
- struct mfa mfa;
+ struct mfa_t mfa;
if (!term_to_mfa(term, &mfa))
return NULL;
@@ -617,7 +617,7 @@ BIF_RETTYPE hipe_bifs_set_native_address_3(BIF_ALIST_3)
Eterm *pc;
void *address;
int is_closure;
- struct mfa mfa;
+ struct mfa_t mfa;
switch (BIF_ARG_3) {
case am_false:
@@ -1225,7 +1225,7 @@ void hipe_mfa_set_trampoline(Eterm m, Eterm f, unsigned int arity, void *trampol
BIF_RETTYPE hipe_bifs_set_funinfo_native_address_3(BIF_ALIST_3)
{
- struct mfa mfa;
+ struct mfa_t mfa;
void *address;
int is_exported;
@@ -1247,7 +1247,7 @@ BIF_RETTYPE hipe_bifs_set_funinfo_native_address_3(BIF_ALIST_3)
BIF_RETTYPE hipe_bifs_invalidate_funinfo_native_addresses_1(BIF_ALIST_1)
{
Eterm lst;
- struct mfa mfa;
+ struct mfa_t mfa;
struct hipe_mfa_info *p;
hipe_mfa_info_table_rwlock();
@@ -1416,7 +1416,7 @@ BIF_RETTYPE hipe_find_na_or_make_stub(BIF_ALIST_3)
BIF_RETTYPE hipe_bifs_find_na_or_make_stub_2(BIF_ALIST_2)
{
- struct mfa mfa;
+ struct mfa_t mfa;
void *address;
int is_remote;
@@ -1518,9 +1518,9 @@ struct ref {
*/
BIF_RETTYPE hipe_bifs_add_ref_2(BIF_ALIST_2)
{
- struct mfa callee;
+ struct mfa_t callee;
Eterm *tuple;
- struct mfa caller;
+ struct mfa_t caller;
void *address;
void *trampoline;
unsigned int flags;
@@ -1597,7 +1597,7 @@ BIF_RETTYPE hipe_bifs_add_ref_2(BIF_ALIST_2)
*/
BIF_RETTYPE hipe_bifs_mark_referred_from_1(BIF_ALIST_1) /* get_refs_from */
{
- struct mfa mfa;
+ struct mfa_t mfa;
const struct hipe_mfa_info *p;
struct ref *ref;
@@ -1649,7 +1649,7 @@ static void hipe_purge_all_refs(void)
BIF_RETTYPE hipe_bifs_remove_refs_from_1(BIF_ALIST_1)
{
- struct mfa mfa;
+ struct mfa_t mfa;
struct hipe_mfa_info *caller_mfa, *callee_mfa;
struct hipe_mfa_info_list *refers_to, *tmp_refers_to;
struct ref **prev, *ref;
@@ -1703,7 +1703,7 @@ BIF_RETTYPE hipe_bifs_remove_refs_from_1(BIF_ALIST_1)
*/
BIF_RETTYPE hipe_bifs_redirect_referred_from_1(BIF_ALIST_1)
{
- struct mfa mfa;
+ struct mfa_t mfa;
struct hipe_mfa_info *p;
struct ref **prev, *ref;
int is_remote, res;
diff --git a/erts/emulator/hipe/hipe_mode_switch.c b/erts/emulator/hipe/hipe_mode_switch.c
index 43144e75ec..884331e969 100644
--- a/erts/emulator/hipe/hipe_mode_switch.c
+++ b/erts/emulator/hipe/hipe_mode_switch.c
@@ -59,6 +59,7 @@
* TODO: check PCB consistency at native BIF calls
*/
int hipe_modeswitch_debug = 0;
+extern BeamInstr beam_exit[];
#define HIPE_DEBUG 0
@@ -173,6 +174,33 @@ void hipe_mode_switch_init(void)
make_catch(beam_catches_cons(hipe_beam_pc_throw, BEAM_CATCHES_NIL));
hipe_mfa_info_table_init();
+
+#if (defined(__i386__) || defined(__x86_64__)) && defined(__linux__)
+ /* Verify that the offset of c-p->hipe does not change.
+ The ErLLVM hipe backend depends on it being in a specific
+ position. Kostis et al has promised to fix this in upstream
+ llvm by OTP 20, so it should be possible to remove these asserts
+ after that. */
+ ERTS_CT_ASSERT(sizeof(ErtsPTabElementCommon) ==
+ (sizeof(Eterm) + /* id */
+ sizeof(((ErtsPTabElementCommon*)0)->refc) +
+ sizeof(ErtsTracer) + /* tracer */
+ sizeof(Uint) + /* trace_flags */
+ sizeof(erts_smp_atomic_t) + /* timer */
+ sizeof(((ErtsPTabElementCommon*)0)->u)));
+
+ ERTS_CT_ASSERT(offsetof(Process, hipe) ==
+ (sizeof(ErtsPTabElementCommon) + /* common */
+ sizeof(Eterm*) + /* htop */
+ sizeof(Eterm*) + /* stop */
+ sizeof(Eterm*) + /* heap */
+ sizeof(Eterm*) + /* hend */
+ sizeof(Uint) + /* heap_sz */
+ sizeof(Uint) + /* min_heap_size */
+ sizeof(Uint) + /* min_vheap_size */
+ sizeof(volatile unsigned long))); /* fp_exception */
+#endif
+
}
void hipe_set_call_trap(Uint *bfun, void *nfun, int is_closure)
@@ -218,15 +246,37 @@ static __inline__ void hipe_pop_beam_trap_frame(Process *p)
Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[])
{
unsigned result;
-#if NR_ARG_REGS > 5
- /* When NR_ARG_REGS > 5, we need to protect the process' input
- reduction count (which BEAM stores in def_arg_reg[5]) from
- being clobbered by the arch glue code. */
Eterm reds_in = p->def_arg_reg[5];
-#endif
-#if NR_ARG_REGS > 4
- Eterm o_reds = p->def_arg_reg[4];
-#endif
+ /*
+ * Process is in the normal case scheduled out when reduction
+ * count reach zero. When "save calls" is enabled reduction
+ * count is subtracted with CONTEXT_REDS, i.e. initial reduction
+ * count will be zero or less and process is scheduled out
+ * when -CONTEXT_REDS is reached.
+ *
+ * HiPE does not support the "save calls" feature, so we switch
+ * to using a positive reduction counter when executing in
+ * hipe mode, but need to restore the "save calls" when
+ * returning to beam. We also need to hide the save calls buffer
+ * from BIFs. We do that by moving the saved calls buf to
+ * suspended saved calls buf.
+ *
+ * Beam has initial reduction count in stored in p->def_arg_reg[5].
+ *
+ * Beam expects -neg_o_reds to be found in p->def_arg_reg[4]
+ * on return to beam.
+ */
+
+ {
+ struct saved_calls *scb = ERTS_PROC_SET_SAVED_CALLS_BUF(p, NULL);
+ if (scb) {
+ reds_in += CONTEXT_REDS;
+ p->fcalls += CONTEXT_REDS;
+ ERTS_PROC_SET_SUSPENDED_SAVED_CALLS_BUF(p, scb);
+ }
+ }
+
+ p->flags |= F_HIPE_MODE; /* inform bifs where we are comming from... */
p->i = NULL;
/* Set current_function to undefined. stdlib hibernate tests rely on it. */
@@ -481,12 +531,25 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[])
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_MSG_RECEIVE);
do_schedule:
{
-#if !(NR_ARG_REGS > 5)
- int reds_in = p->def_arg_reg[5];
+ struct saved_calls *scb;
+
+ scb = ERTS_PROC_SET_SUSPENDED_SAVED_CALLS_BUF(p, NULL);
+ if (scb)
+ ERTS_PROC_SET_SAVED_CALLS_BUF(p, scb);
+
+ /* The process may have died while it was executing,
+ if so we return out from native code to the interpreter */
+ if (erts_smp_atomic32_read_nob(&p->state) & ERTS_PSFLG_EXITING)
+ p->i = beam_exit;
+#ifdef DEBUG
+ ASSERT(p->debug_reds_in == reds_in);
#endif
+ p->flags &= ~F_HIPE_MODE;
+
ERTS_SMP_UNREQ_PROC_MAIN_LOCK(p);
p = schedule(p, reds_in - p->fcalls);
ERTS_SMP_REQ_PROC_MAIN_LOCK(p);
+ ASSERT(!(p->flags & F_HIPE_MODE));
#ifdef ERTS_SMP
p->hipe_smp.have_receive_locks = 0;
reg = p->scheduler_data->x_reg_array;
@@ -501,28 +564,32 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[])
reg[i] = argp[i];
}
{
-#if !(NR_ARG_REGS > 5)
- Eterm reds_in;
-#endif
-#if !(NR_ARG_REGS > 4)
- Eterm o_reds;
-#endif
+ struct saved_calls *scb;
reds_in = p->fcalls;
- o_reds = 0;
- if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) {
- o_reds = reds_in;
- reds_in = 0;
- p->fcalls = 0;
- }
- p->def_arg_reg[4] = o_reds;
p->def_arg_reg[5] = reds_in;
+#ifdef DEBUG
+ p->debug_reds_in = reds_in;
+#endif
if (p->i == hipe_beam_pc_resume) {
+ p->flags |= F_HIPE_MODE; /* inform bifs where we are comming from... */
+ scb = ERTS_PROC_SET_SAVED_CALLS_BUF(p, NULL);
+ if (scb)
+ ERTS_PROC_SET_SUSPENDED_SAVED_CALLS_BUF(p, scb);
p->i = NULL;
p->arity = 0;
goto do_resume;
}
+
+ scb = ERTS_PROC_GET_SAVED_CALLS_BUF(p);
+ if (!scb)
+ p->def_arg_reg[4] = 0;
+ else {
+ p->def_arg_reg[4] = CONTEXT_REDS;
+ p->fcalls = -CONTEXT_REDS + reds_in;
+ }
}
+
HIPE_CHECK_PCB(p);
result = HIPE_MODE_SWITCH_RES_CALL_BEAM;
p->def_arg_reg[3] = result;
@@ -562,14 +629,29 @@ Process *hipe_mode_switch(Process *p, unsigned cmd, Eterm reg[])
default:
erts_exit(ERTS_ERROR_EXIT, "hipe_mode_switch: result %#x\r\n", result);
}
+
+ {
+ struct saved_calls *scb = ERTS_PROC_SET_SUSPENDED_SAVED_CALLS_BUF(p, NULL);
+ if (!scb)
+ p->def_arg_reg[4] = 0;
+ else {
+ p->def_arg_reg[4] = CONTEXT_REDS;
+ p->fcalls -= CONTEXT_REDS;
+ ERTS_PROC_SET_SAVED_CALLS_BUF(p, scb);
+ }
+ }
+
HIPE_CHECK_PCB(p);
p->def_arg_reg[3] = result;
-#if NR_ARG_REGS > 4
- p->def_arg_reg[4] = o_reds;
-#endif
#if NR_ARG_REGS > 5
+ /*
+ * When NR_ARG_REGS > 5, we need to protect the process' input
+ * reduction count (which BEAM stores in def_arg_reg[5]) from
+ * being clobbered by the arch glue code.
+ */
p->def_arg_reg[5] = reds_in;
#endif
+ p->flags &= ~F_HIPE_MODE;
return p;
}
diff --git a/erts/emulator/hipe/hipe_native_bif.c b/erts/emulator/hipe/hipe_native_bif.c
index 00b572029a..9c03b3811c 100644
--- a/erts/emulator/hipe/hipe_native_bif.c
+++ b/erts/emulator/hipe/hipe_native_bif.c
@@ -601,7 +601,7 @@ void hipe_clear_timeout(Process *c_p)
}
#endif
if (IS_TRACED_FL(c_p, F_TRACE_RECEIVE)) {
- trace_receive(c_p, am_timeout);
+ trace_receive(c_p, am_clock_service, am_timeout, NULL);
}
c_p->flags &= ~F_TIMO;
JOIN_MESSAGE(c_p);
diff --git a/erts/emulator/nifs/common/erl_tracer_nif.c b/erts/emulator/nifs/common/erl_tracer_nif.c
index 8a9a1bf16c..6dddc80607 100644
--- a/erts/emulator/nifs/common/erl_tracer_nif.c
+++ b/erts/emulator/nifs/common/erl_tracer_nif.c
@@ -70,6 +70,7 @@ ERL_NIF_INIT(erl_tracer, nif_funcs, load, NULL, upgrade, unload)
ATOM_DECL(strict_monotonic); \
ATOM_DECL(timestamp); \
ATOM_DECL(trace); \
+ ATOM_DECL(trace_status); \
ATOM_DECL(trace_ts); \
ATOM_DECL(true); \
ATOM_DECL(gc_minor_start); \
@@ -118,19 +119,22 @@ static ERL_NIF_TERM enabled(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
ErlNifPid to_pid;
ErlNifPort to_port;
+ ERL_NIF_TERM ret = enif_is_identical(argv[0], atom_trace_status) ?
+ atom_remove : atom_discard;
+
ASSERT(argc == 3);
if (enif_get_local_pid(env, argv[1], &to_pid)) {
if (!enif_is_process_alive(env, &to_pid))
/* tracer is dead so we should remove this trace point */
- return atom_remove;
+ return ret;
} else if (enif_get_local_port(env, argv[1], &to_port)) {
if (!enif_is_port_alive(env, &to_port))
/* tracer is dead so we should remove this trace point */
- return atom_remove;
+ return ret;
} else {
/* The state was not a pid or a port */
- return atom_remove;
+ return ret;
}
/* Only generate trace for when tracer != tracee */
diff --git a/erts/emulator/sys/common/erl_mmap.c b/erts/emulator/sys/common/erl_mmap.c
index a7e710070b..53009a1481 100644
--- a/erts/emulator/sys/common/erl_mmap.c
+++ b/erts/emulator/sys/common/erl_mmap.c
@@ -28,6 +28,8 @@
#include "erl_mmap.h"
#include <stddef.h>
+#if HAVE_ERTS_MMAP
+
/* #define ERTS_MMAP_OP_RINGBUF_SZ 100 */
#if defined(DEBUG) || 0
@@ -2099,6 +2101,7 @@ int erts_mmap_in_supercarrier(ErtsMemMapper* mm, void *ptr)
}
static struct {
+ Eterm options;
Eterm total;
Eterm total_sa;
Eterm total_sua;
@@ -2132,6 +2135,7 @@ static void init_atoms(void)
erts_mtx_lock(&am.init_mutex);
if (!am.is_initialized) {
+ AM_INIT(options);
AM_INIT(total);
AM_INIT(total_sa);
AM_INIT(total_sua);
@@ -2387,9 +2391,9 @@ Eterm erts_mmap_info(ErtsMemMapper* mm,
Eterm seg_tags[] = { am.used, am.max, am.allocated, am.reserved, am.used_sa, am.used_sua };
Eterm group[2];
Eterm group_tags[] = { am.sizes, am.free_segs };
- Eterm list[2];
- Eterm list_tags[2]; /* { am.supercarrier, am.os } */
- int lix;
+ Eterm list[3];
+ Eterm list_tags[3]; /* { am.options, am.supercarrier, am.os } */
+ int lix = 0;
Eterm res = THE_NON_VALUE;
if (!hpp) {
@@ -2412,6 +2416,12 @@ Eterm erts_mmap_info(ErtsMemMapper* mm,
erts_smp_mtx_unlock(&mm->mtx);
}
+ list[lix] = erts_mmap_info_options(mm, "option ", print_to_p, print_to_arg,
+ hpp, szp);
+ list_tags[lix] = am.options;
+ lix++;
+
+
if (print_to_p) {
int to = *print_to_p;
void *arg = print_to_arg;
@@ -2441,7 +2451,6 @@ Eterm erts_mmap_info(ErtsMemMapper* mm,
init_atoms();
}
- lix = 0;
if (mm->supercarrier) {
group[0] = erts_bld_atom_uword_2tup_list(hpp, szp,
sizeof(size_tags)/sizeof(Eterm),
@@ -2506,9 +2515,13 @@ Eterm erts_mmap_info_options(ErtsMemMapper* mm,
return res;
}
+#endif /* HAVE_ERTS_MMAP */
-Eterm erts_mmap_debug_info(ErtsMemMapper* mm, Process* p)
+Eterm erts_mmap_debug_info(Process* p)
{
+#if HAVE_ERTS_MMAP
+ ErtsMemMapper* mm = &erts_dflt_mmapper;
+
if (mm->supercarrier) {
ERTS_DECL_AM(sabot);
ERTS_DECL_AM(satop);
@@ -2546,9 +2559,8 @@ Eterm erts_mmap_debug_info(ErtsMemMapper* mm, Process* p)
HRelease(p, hp_end, hp);
return list;
}
- else {
- return am_undefined;
- }
+#endif
+ return am_undefined;
}
diff --git a/erts/emulator/sys/common/erl_mmap.h b/erts/emulator/sys/common/erl_mmap.h
index b3c45ba116..7ac61a82c1 100644
--- a/erts/emulator/sys/common/erl_mmap.h
+++ b/erts/emulator/sys/common/erl_mmap.h
@@ -26,9 +26,36 @@
#define ERTS_MMAP_SUPERALIGNED_BITS (18)
/* Affects hard limits for sbct and lmbcs documented in erts_alloc.xml */
-#define ERTS_MMAPFLG_OS_ONLY (((Uint32) 1) << 0)
-#define ERTS_MMAPFLG_SUPERCARRIER_ONLY (((Uint32) 1) << 1)
-#define ERTS_MMAPFLG_SUPERALIGNED (((Uint32) 1) << 2)
+#ifndef HAVE_MMAP
+# define HAVE_MMAP 0
+#endif
+#ifndef HAVE_MREMAP
+# define HAVE_MREMAP 0
+#endif
+#if HAVE_MMAP
+# define ERTS_HAVE_OS_MMAP 1
+# define ERTS_HAVE_GENUINE_OS_MMAP 1
+# if HAVE_MREMAP
+# define ERTS_HAVE_OS_MREMAP 1
+# endif
+# if defined(MAP_FIXED) && defined(MAP_NORESERVE)
+# define ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION 1
+# endif
+#endif
+
+#ifndef HAVE_VIRTUALALLOC
+# define HAVE_VIRTUALALLOC 0
+#endif
+#if HAVE_VIRTUALALLOC
+# define ERTS_HAVE_OS_MMAP 1
+#endif
+
+#ifdef ERTS_HAVE_GENUINE_OS_MMAP
+# define HAVE_ERTS_MMAP 1
+#else
+# define HAVE_ERTS_MMAP 0
+#endif
+
extern UWord erts_page_inv_mask;
@@ -60,26 +87,6 @@ typedef struct {
#define ERTS_MMAP_INIT_HIPE_EXEC_INITER \
{{NULL, NULL}, {NULL, NULL}, ERTS_HIPE_EXEC_VIRTUAL_AREA_SIZE, 1, (1 << 10), 0}
-typedef struct ErtsMemMapper_ ErtsMemMapper;
-
-void *erts_mmap(ErtsMemMapper*, Uint32 flags, UWord *sizep);
-void erts_munmap(ErtsMemMapper*, Uint32 flags, void *ptr, UWord size);
-void *erts_mremap(ErtsMemMapper*, Uint32 flags, void *ptr, UWord old_size, UWord *sizep);
-int erts_mmap_in_supercarrier(ErtsMemMapper*, void *ptr);
-void erts_mmap_init(ErtsMemMapper*, ErtsMMapInit*, int executable);
-struct erts_mmap_info_struct
-{
- UWord sizes[6];
- UWord segs[6];
- UWord os_used;
-};
-Eterm erts_mmap_info(ErtsMemMapper*, int *print_to_p, void *print_to_arg,
- Eterm** hpp, Uint* szp, struct erts_mmap_info_struct*);
-Eterm erts_mmap_info_options(ErtsMemMapper*,
- char *prefix, int *print_to_p, void *print_to_arg,
- Uint **hpp, Uint *szp);
-struct process;
-Eterm erts_mmap_debug_info(ErtsMemMapper*, struct process*);
#define ERTS_SUPERALIGNED_SIZE \
(1 << ERTS_MMAP_SUPERALIGNED_BITS)
@@ -107,29 +114,34 @@ Eterm erts_mmap_debug_info(ErtsMemMapper*, struct process*);
#define ERTS_PAGEALIGNED_SIZE \
(ERTS_INV_PAGEALIGNED_MASK + 1)
-#ifndef HAVE_MMAP
-# define HAVE_MMAP 0
-#endif
-#ifndef HAVE_MREMAP
-# define HAVE_MREMAP 0
-#endif
-#if HAVE_MMAP
-# define ERTS_HAVE_OS_MMAP 1
-# define ERTS_HAVE_GENUINE_OS_MMAP 1
-# if HAVE_MREMAP
-# define ERTS_HAVE_OS_MREMAP 1
-# endif
-# if defined(MAP_FIXED) && defined(MAP_NORESERVE)
-# define ERTS_HAVE_OS_PHYSICAL_MEMORY_RESERVATION 1
-# endif
-#endif
+struct process;
+Eterm erts_mmap_debug_info(struct process*);
+
+#if HAVE_ERTS_MMAP
+
+typedef struct ErtsMemMapper_ ErtsMemMapper;
+
+#define ERTS_MMAPFLG_OS_ONLY (((Uint32) 1) << 0)
+#define ERTS_MMAPFLG_SUPERCARRIER_ONLY (((Uint32) 1) << 1)
+#define ERTS_MMAPFLG_SUPERALIGNED (((Uint32) 1) << 2)
+
+void *erts_mmap(ErtsMemMapper*, Uint32 flags, UWord *sizep);
+void erts_munmap(ErtsMemMapper*, Uint32 flags, void *ptr, UWord size);
+void *erts_mremap(ErtsMemMapper*, Uint32 flags, void *ptr, UWord old_size, UWord *sizep);
+int erts_mmap_in_supercarrier(ErtsMemMapper*, void *ptr);
+void erts_mmap_init(ErtsMemMapper*, ErtsMMapInit*, int executable);
+struct erts_mmap_info_struct
+{
+ UWord sizes[6];
+ UWord segs[6];
+ UWord os_used;
+};
+Eterm erts_mmap_info(ErtsMemMapper*, int *print_to_p, void *print_to_arg,
+ Eterm** hpp, Uint* szp, struct erts_mmap_info_struct*);
+Eterm erts_mmap_info_options(ErtsMemMapper*,
+ char *prefix, int *print_to_p, void *print_to_arg,
+ Uint **hpp, Uint *szp);
-#ifndef HAVE_VIRTUALALLOC
-# define HAVE_VIRTUALALLOC 0
-#endif
-#if HAVE_VIRTUALALLOC
-# define ERTS_HAVE_OS_MMAP 1
-#endif
#ifdef ERTS_WANT_MEM_MAPPERS
# include "erl_alloc_types.h"
@@ -154,4 +166,6 @@ void hard_dbg_remove_mseg(void* seg, UWord sz);
# define HARD_DBG_REMOVE_MSEG(SEG,SZ)
#endif
+#endif /* HAVE_ERTS_MMAP */
+
#endif /* ERL_MMAP_H__ */
diff --git a/erts/emulator/sys/common/erl_mseg.c b/erts/emulator/sys/common/erl_mseg.c
index 43e75e2573..f3306a888c 100644
--- a/erts/emulator/sys/common/erl_mseg.c
+++ b/erts/emulator/sys/common/erl_mseg.c
@@ -996,10 +996,7 @@ info_options(ErtsMsegAllctr_t *ma,
Uint **hpp,
Uint *szp)
{
- Eterm res;
-
- res = erts_mmap_info_options(&erts_dflt_mmapper,
- prefix, print_to_p, print_to_arg, hpp, szp);
+ Eterm res = NIL;
if (print_to_p) {
int to = *print_to_p;
@@ -1110,7 +1107,7 @@ info_calls(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, Uint **hpp
static Eterm
info_status(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg,
- int begin_new_max_period, Uint **hpp, Uint *szp)
+ int begin_new_max_period, int only_sz, Uint **hpp, Uint *szp)
{
Eterm res = THE_NON_VALUE;
@@ -1123,38 +1120,41 @@ info_status(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg,
int to = *print_to_p;
void *arg = print_to_arg;
- erts_print(to, arg, "cached_segments: %beu\n", ma->cache_size);
- erts_print(to, arg, "cache_hits: %beu\n", ma->cache_hits);
- erts_print(to, arg, "segments: %beu %beu %beu\n",
- ma->segments.current.no, ma->segments.max.no, ma->segments.max_ever.no);
- erts_print(to, arg, "segments_size: %beu %beu %beu\n",
+ if (!only_sz) {
+ erts_print(to, arg, "cached_segments: %beu\n", ma->cache_size);
+ erts_print(to, arg, "cache_hits: %beu\n", ma->cache_hits);
+ erts_print(to, arg, "segments: %beu %beu %beu\n",
+ ma->segments.current.no, ma->segments.max.no, ma->segments.max_ever.no);
+ erts_print(to, arg, "segments_watermark: %beu\n",
+ ma->segments.current.watermark);
+ }
+ erts_print(to, arg, "segments_size: %beu %beu %beu\n",
ma->segments.current.sz, ma->segments.max.sz, ma->segments.max_ever.sz);
- erts_print(to, arg, "segments_watermark: %beu\n",
- ma->segments.current.watermark);
}
if (hpp || szp) {
res = NIL;
- add_2tup(hpp, szp, &res,
- am.segments_watermark,
- bld_unstable_uint(hpp, szp, ma->segments.current.watermark));
- add_4tup(hpp, szp, &res,
- am.segments_size,
- bld_unstable_uint(hpp, szp, ma->segments.current.sz),
- bld_unstable_uint(hpp, szp, ma->segments.max.sz),
- bld_unstable_uint(hpp, szp, ma->segments.max_ever.sz));
- add_4tup(hpp, szp, &res,
- am.segments,
- bld_unstable_uint(hpp, szp, ma->segments.current.no),
- bld_unstable_uint(hpp, szp, ma->segments.max.no),
- bld_unstable_uint(hpp, szp, ma->segments.max_ever.no));
- add_2tup(hpp, szp, &res,
- am.cache_hits,
- bld_unstable_uint(hpp, szp, ma->cache_hits));
- add_2tup(hpp, szp, &res,
- am.cached_segments,
- bld_unstable_uint(hpp, szp, ma->cache_size));
-
+ add_4tup(hpp, szp, &res,
+ am.segments_size,
+ bld_unstable_uint(hpp, szp, ma->segments.current.sz),
+ bld_unstable_uint(hpp, szp, ma->segments.max.sz),
+ bld_unstable_uint(hpp, szp, ma->segments.max_ever.sz));
+ if (!only_sz) {
+ add_2tup(hpp, szp, &res,
+ am.segments_watermark,
+ bld_unstable_uint(hpp, szp, ma->segments.current.watermark));
+ add_4tup(hpp, szp, &res,
+ am.segments,
+ bld_unstable_uint(hpp, szp, ma->segments.current.no),
+ bld_unstable_uint(hpp, szp, ma->segments.max.no),
+ bld_unstable_uint(hpp, szp, ma->segments.max_ever.no));
+ add_2tup(hpp, szp, &res,
+ am.cache_hits,
+ bld_unstable_uint(hpp, szp, ma->cache_hits));
+ add_2tup(hpp, szp, &res,
+ am.cached_segments,
+ bld_unstable_uint(hpp, szp, ma->cache_size));
+ }
}
if (begin_new_max_period) {
@@ -1166,26 +1166,31 @@ info_status(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg,
}
static Eterm info_memkind(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg,
- int begin_max_per, Uint **hpp, Uint *szp)
+ int begin_max_per, int only_sz, Uint **hpp, Uint *szp)
{
Eterm res = THE_NON_VALUE;
Eterm atoms[3];
Eterm values[3];
- if (print_to_p) {
- erts_print(*print_to_p, print_to_arg, "memory kind: %s\n", "all memory");
- }
- if (hpp || szp) {
- atoms[0] = am.name;
- atoms[1] = am.status;
- atoms[2] = am.calls;
- values[0] = erts_bld_string(hpp, szp, "all memory");
+ if (!only_sz) {
+ if (print_to_p) {
+ erts_print(*print_to_p, print_to_arg, "memory kind: %s\n", "all memory");
+ }
+ if (hpp || szp) {
+ atoms[0] = am.name;
+ atoms[1] = am.status;
+ atoms[2] = am.calls;
+ values[0] = erts_bld_string(hpp, szp, "all memory");
+ }
}
- values[1] = info_status(ma, print_to_p, print_to_arg, begin_max_per, hpp, szp);
- values[2] = info_calls(ma, print_to_p, print_to_arg, hpp, szp);
+ res = info_status(ma, print_to_p, print_to_arg, begin_max_per, only_sz, hpp, szp);
+ if (!only_sz) {
+ values[1] = res;
+ values[2] = info_calls(ma, print_to_p, print_to_arg, hpp, szp);
- if (hpp || szp)
- res = bld_2tup_list(hpp, szp, 3, atoms, values);
+ if (hpp || szp)
+ res = bld_2tup_list(hpp, szp, 3, atoms, values);
+ }
return res;
}
@@ -1229,6 +1234,7 @@ erts_mseg_info(int ix,
int *print_to_p,
void *print_to_arg,
int begin_max_per,
+ int only_sz,
Uint **hpp,
Uint *szp)
{
@@ -1239,24 +1245,29 @@ erts_mseg_info(int ix,
Uint n = 0;
if (hpp || szp) {
-
- if (!atoms_initialized)
- init_atoms(ma);
-
- atoms[0] = am.version;
- atoms[1] = am.options;
- atoms[2] = am.memkind;
- atoms[3] = am.memkind;
+ if (!atoms_initialized)
+ init_atoms(ma);
+ }
+ if (!only_sz) {
+ if (hpp || szp) {
+ atoms[0] = am.version;
+ atoms[1] = am.options;
+ atoms[2] = am.memkind;
+ }
+ values[n++] = info_version(ma, print_to_p, print_to_arg, hpp, szp);
+ values[n++] = info_options(ma, "option ", print_to_p, print_to_arg, hpp, szp);
}
- values[n++] = info_version(ma, print_to_p, print_to_arg, hpp, szp);
- values[n++] = info_options(ma, "option ", print_to_p, print_to_arg, hpp, szp);
ERTS_MSEG_LOCK(ma);
ERTS_DBG_MA_CHK_THR_ACCESS(ma);
- values[n++] = info_memkind(ma, print_to_p, print_to_arg, begin_max_per, hpp, szp);
- if (hpp || szp)
- res = bld_2tup_list(hpp, szp, n, atoms, values);
+ res = info_memkind(ma, print_to_p, print_to_arg, begin_max_per, only_sz, hpp, szp);
+
+ if (!only_sz) {
+ values[n++] = res;
+ if (hpp || szp)
+ res = bld_2tup_list(hpp, szp, n, atoms, values);
+ }
ERTS_MSEG_UNLOCK(ma);
diff --git a/erts/emulator/sys/common/erl_mseg.h b/erts/emulator/sys/common/erl_mseg.h
index 192e5767e4..a43b409e94 100644
--- a/erts/emulator/sys/common/erl_mseg.h
+++ b/erts/emulator/sys/common/erl_mseg.h
@@ -99,7 +99,7 @@ void erts_mseg_init(ErtsMsegInit_t *init);
void erts_mseg_late_init(void); /* Have to be called after all allocators,
threads and timers have been initialized. */
Eterm erts_mseg_info_options(int, int *, void*, Uint **, Uint *);
-Eterm erts_mseg_info(int, int *, void*, int, Uint **, Uint *);
+Eterm erts_mseg_info(int, int *, void*, int, int, Uint **, Uint *);
#endif /* #if HAVE_ERTS_MSEG */
diff --git a/erts/emulator/test/Makefile b/erts/emulator/test/Makefile
index de395dfb97..b580211eff 100644
--- a/erts/emulator/test/Makefile
+++ b/erts/emulator/test/Makefile
@@ -53,6 +53,7 @@ MODULES= \
crypto_SUITE \
ddll_SUITE \
decode_packet_SUITE \
+ dirty_nif_SUITE \
distribution_SUITE \
driver_SUITE \
efile_SUITE \
diff --git a/erts/emulator/test/alloc_SUITE.erl b/erts/emulator/test/alloc_SUITE.erl
index 5fb560d1ec..1f7b499dcb 100644
--- a/erts/emulator/test/alloc_SUITE.erl
+++ b/erts/emulator/test/alloc_SUITE.erl
@@ -102,8 +102,8 @@ erts_mmap_do(Config, SCO, SCRPM, SCRFSD) ->
Self = self(),
Ref = make_ref(),
F = fun() ->
- SI = erlang:system_info({allocator,mseg_alloc}),
- {erts_mmap,EM} = lists:keyfind(erts_mmap, 1, SI),
+ SI = erlang:system_info({allocator,erts_mmap}),
+ {default_mmap,EM} = lists:keyfind(default_mmap, 1, SI),
{supercarrier,SC} = lists:keyfind(supercarrier, 1, EM),
{sizes,Sizes} = lists:keyfind(sizes, 1, SC),
{free_segs,Segs} = lists:keyfind(free_segs,1,SC),
diff --git a/erts/emulator/test/dirty_nif_SUITE.erl b/erts/emulator/test/dirty_nif_SUITE.erl
new file mode 100644
index 0000000000..c3afbc0803
--- /dev/null
+++ b/erts/emulator/test/dirty_nif_SUITE.erl
@@ -0,0 +1,327 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2010-2014. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-module(dirty_nif_SUITE).
+
+%%-define(line_trace,true).
+-define(CHECK(Exp,Got), check(Exp,Got,?LINE)).
+%%-define(CHECK(Exp,Got), Exp = Got).
+
+-include_lib("common_test/include/ct.hrl").
+
+-export([all/0, suite/0,
+ init_per_suite/1, end_per_suite/1,
+ init_per_testcase/2, end_per_testcase/2,
+ dirty_nif/1, dirty_nif_send/1,
+ dirty_nif_exception/1, call_dirty_nif_exception/1,
+ dirty_scheduler_exit/1, dirty_call_while_terminated/1,
+ dirty_heap_access/1]).
+
+-define(nif_stub,nif_stub_error(?LINE)).
+
+suite() -> [{ct_hooks,[ts_install_cth]}].
+
+all() ->
+ [dirty_nif,
+ dirty_nif_send,
+ dirty_nif_exception,
+ dirty_scheduler_exit,
+ dirty_call_while_terminated,
+ dirty_heap_access].
+
+init_per_suite(Config) ->
+ try erlang:system_info(dirty_cpu_schedulers) of
+ N when is_integer(N), N > 0 ->
+ case lib_loaded() of
+ false ->
+ ok = erlang:load_nif(
+ filename:join(?config(data_dir, Config),
+ "dirty_nif_SUITE"), []);
+ true ->
+ ok
+ end,
+ Config
+ catch _:_ ->
+ {skipped, "No dirty scheduler support"}
+ end.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_testcase(Case, Config) ->
+ [{testcase, Case} | Config].
+
+end_per_testcase(_Case, _Config) ->
+ ok.
+
+dirty_nif(Config) when is_list(Config) ->
+ Val1 = 42,
+ Val2 = "Erlang",
+ Val3 = list_to_binary([Val2, 0]),
+ {Val1, Val2, Val3} = call_dirty_nif(Val1, Val2, Val3),
+ LargeArray = lists:duplicate(1000, ok),
+ LargeArray = call_dirty_nif_zero_args(),
+ ok.
+
+dirty_nif_send(Config) when is_list(Config) ->
+ Parent = self(),
+ Pid = spawn_link(fun() ->
+ Self = self(),
+ {ok, Self} = receive_any(),
+ Parent ! {ok, Self}
+ end),
+ {ok, Pid} = send_from_dirty_nif(Pid),
+ {ok, Pid} = receive_any(),
+ ok.
+
+dirty_nif_exception(Config) when is_list(Config) ->
+ try
+ %% this checks that the expected exception occurs when the
+ %% dirty NIF returns the result of enif_make_badarg
+ %% directly
+ call_dirty_nif_exception(1),
+ ct:fail(expected_badarg)
+ catch
+ error:badarg ->
+ [{?MODULE,call_dirty_nif_exception,[1],_}|_] =
+ erlang:get_stacktrace(),
+ ok
+ end,
+ try
+ %% this checks that the expected exception occurs when the
+ %% dirty NIF calls enif_make_badarg at some point but then
+ %% returns a value that isn't an exception
+ call_dirty_nif_exception(0),
+ ct:fail(expected_badarg)
+ catch
+ error:badarg ->
+ [{?MODULE,call_dirty_nif_exception,[0],_}|_] =
+ erlang:get_stacktrace(),
+ ok
+ end,
+ %% this checks that a dirty NIF can raise various terms as
+ %% exceptions
+ ok = nif_raise_exceptions(call_dirty_nif_exception).
+
+nif_raise_exceptions(NifFunc) ->
+ ExcTerms = [{error, test}, "a string", <<"a binary">>,
+ 42, [1,2,3,4,5], [{p,1},{p,2},{p,3}]],
+ lists:foldl(fun(Term, ok) ->
+ try
+ erlang:apply(?MODULE,NifFunc,[Term]),
+ ct:fail({expected,Term})
+ catch
+ error:Term ->
+ [{?MODULE,NifFunc,[Term],_}|_] = erlang:get_stacktrace(),
+ ok
+ end
+ end, ok, ExcTerms).
+
+dirty_scheduler_exit(Config) when is_list(Config) ->
+ {ok, Node} = start_node(Config, "+SDio 1"),
+ Path = proplists:get_value(data_dir, Config),
+ NifLib = filename:join(Path, atom_to_list(?MODULE)),
+ [ok] = mcall(Node,
+ [fun() ->
+ ok = erlang:load_nif(NifLib, []),
+ Start = erlang:monotonic_time(milli_seconds),
+ ok = test_dirty_scheduler_exit(),
+ End = erlang:monotonic_time(milli_seconds),
+ io:format("Time=~p ms~n", [End-Start]),
+ ok
+ end]),
+ stop_node(Node),
+ ok.
+
+test_dirty_scheduler_exit() ->
+ process_flag(trap_exit,true),
+ test_dse(10,[]).
+test_dse(0,Pids) ->
+ timer:sleep(100),
+ kill_dse(Pids,[]);
+test_dse(N,Pids) ->
+ Pid = spawn_link(fun dirty_sleeper/0),
+ test_dse(N-1,[Pid|Pids]).
+
+kill_dse([],Killed) ->
+ wait_dse(Killed);
+kill_dse([Pid|Pids],AlreadyKilled) ->
+ exit(Pid,kill),
+ kill_dse(Pids,[Pid|AlreadyKilled]).
+
+wait_dse([]) ->
+ ok;
+wait_dse([Pid|Pids]) ->
+ receive
+ {'EXIT',Pid,Reason} ->
+ killed = Reason
+ end,
+ wait_dse(Pids).
+
+dirty_call_while_terminated(Config) when is_list(Config) ->
+ Me = self(),
+ Bin = list_to_binary(lists:duplicate(4711, $r)),
+ {value, {BinAddr, 4711, 1}} = lists:keysearch(4711, 2,
+ element(2,
+ process_info(self(),
+ binary))),
+ {Dirty, DM} = spawn_opt(fun () ->
+ dirty_call_while_terminated_nif(Me),
+ blipp:blupp(Bin)
+ end,
+ [monitor,link]),
+ receive {dirty_alive, Pid} -> ok end,
+ {value, {BinAddr, 4711, 2}} = lists:keysearch(4711, 2,
+ element(2,
+ process_info(self(),
+ binary))),
+ Reason = die_dirty_process,
+ OT = process_flag(trap_exit, true),
+ exit(Dirty, Reason),
+ receive
+ {'DOWN', DM, process, Dirty, R0} ->
+ R0 = Reason
+ end,
+ receive
+ {'EXIT', Dirty, R1} ->
+ R1 = Reason
+ end,
+ undefined = process_info(Dirty),
+ undefined = process_info(Dirty, status),
+ false = erlang:is_process_alive(Dirty),
+ false = lists:member(Dirty, processes()),
+ %% Binary still refered by Dirty process not yet cleaned up
+ %% since the dirty nif has not yet returned...
+ {value, {BinAddr, 4711, 2}} = lists:keysearch(4711, 2,
+ element(2,
+ process_info(self(),
+ binary))),
+ receive after 2000 -> ok end,
+ receive
+ Msg ->
+ ct:fail({unexpected_message, Msg})
+ after
+ 0 ->
+ ok
+ end,
+ {value, {BinAddr, 4711, 1}} = lists:keysearch(4711, 2,
+ element(2,
+ process_info(self(),
+ binary))),
+ process_flag(trap_exit, OT),
+ ok.
+
+dirty_heap_access(Config) when is_list(Config) ->
+ {ok, Node} = start_node(Config),
+ Me = self(),
+ RGL = rpc:call(Node,erlang,whereis,[init]),
+ Ref = rpc:call(Node,erlang,make_ref,[]),
+ Dirty = spawn_link(fun () ->
+ Res = dirty_heap_access_nif(Ref),
+ garbage_collect(),
+ Me ! {self(), Res},
+ receive after infinity -> ok end
+ end),
+ {N, R} = access_dirty_heap(Dirty, RGL, 0, 0),
+ receive
+ {Pid, Res} ->
+ 1000 = length(Res),
+ lists:foreach(fun (X) -> Ref = X end, Res)
+ end,
+ unlink(Dirty),
+ exit(Dirty, kill),
+ stop_node(Node),
+ {comment, integer_to_list(N) ++ " GL change loops; "
+ ++ integer_to_list(R) ++ " while running dirty"}.
+
+access_dirty_heap(Dirty, RGL, N, R) ->
+ case process_info(Dirty, status) of
+ {status, waiting} ->
+ {N, R};
+ {status, Status} ->
+ {group_leader, GL} = process_info(Dirty, group_leader),
+ true = group_leader(RGL, Dirty),
+ {group_leader, RGL} = process_info(Dirty, group_leader),
+ true = group_leader(GL, Dirty),
+ {group_leader, GL} = process_info(Dirty, group_leader),
+ access_dirty_heap(Dirty, RGL, N+1, case Status of
+ running ->
+ R+1;
+ _ ->
+ R
+ end)
+ end.
+
+%%
+%% Internal...
+%%
+
+receive_any() ->
+ receive M -> M end.
+
+start_node(Config) ->
+ start_node(Config, "").
+
+start_node(Config, Args) when is_list(Config) ->
+ Pa = filename:dirname(code:which(?MODULE)),
+ Name = list_to_atom(atom_to_list(?MODULE)
+ ++ "-"
+ ++ atom_to_list(proplists:get_value(testcase, Config))
+ ++ "-"
+ ++ integer_to_list(erlang:system_time(seconds))
+ ++ "-"
+ ++ integer_to_list(erlang:unique_integer([positive]))),
+ test_server:start_node(Name, slave, [{args, "-pa "++Pa++" "++Args}]).
+
+stop_node(Node) ->
+ test_server:stop_node(Node).
+
+mcall(Node, Funs) ->
+ Parent = self(),
+ Refs = lists:map(fun (Fun) ->
+ Ref = make_ref(),
+ spawn_link(Node,
+ fun () ->
+ Res = Fun(),
+ unlink(Parent),
+ Parent ! {Ref, Res}
+ end),
+ Ref
+ end, Funs),
+ lists:map(fun (Ref) ->
+ receive
+ {Ref, Res} ->
+ Res
+ end
+ end, Refs).
+
+%% The NIFs:
+lib_loaded() -> false.
+call_nif_schedule(_,_) -> ?nif_stub.
+call_dirty_nif(_,_,_) -> ?nif_stub.
+send_from_dirty_nif(_) -> ?nif_stub.
+call_dirty_nif_exception(_) -> ?nif_stub.
+call_dirty_nif_zero_args() -> ?nif_stub.
+dirty_call_while_terminated_nif(_) -> ?nif_stub.
+dirty_sleeper() -> ?nif_stub.
+dirty_heap_access_nif(_) -> ?nif_stub.
+
+nif_stub_error(Line) ->
+ exit({nif_not_loaded,module,?MODULE,line,Line}).
diff --git a/erts/emulator/test/dirty_nif_SUITE_data/Makefile.src b/erts/emulator/test/dirty_nif_SUITE_data/Makefile.src
new file mode 100644
index 0000000000..e9301753b0
--- /dev/null
+++ b/erts/emulator/test/dirty_nif_SUITE_data/Makefile.src
@@ -0,0 +1,6 @@
+
+NIF_LIBS = dirty_nif_SUITE@dll@
+
+all: $(NIF_LIBS)
+
+@SHLIB_RULES@
diff --git a/erts/emulator/test/dirty_nif_SUITE_data/dirty_nif_SUITE.c b/erts/emulator/test/dirty_nif_SUITE_data/dirty_nif_SUITE.c
new file mode 100644
index 0000000000..2013c88167
--- /dev/null
+++ b/erts/emulator/test/dirty_nif_SUITE_data/dirty_nif_SUITE.c
@@ -0,0 +1,223 @@
+/*
+ * %CopyrightBegin%
+ *
+ * Copyright Ericsson AB 2009-2014. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * %CopyrightEnd%
+ */
+#include "erl_nif.h"
+#include <assert.h>
+#ifndef __WIN32__
+#include <unistd.h>
+#endif
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ return 0;
+}
+
+static ERL_NIF_TERM lib_loaded(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ return enif_make_atom(env, "true");
+}
+
+static int have_dirty_schedulers(void)
+{
+ ErlNifSysInfo si;
+ enif_system_info(&si, sizeof(si));
+ return si.dirty_scheduler_support;
+}
+
+static ERL_NIF_TERM dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ int n;
+ char s[10];
+ ErlNifBinary b;
+ if (have_dirty_schedulers()) {
+ assert(enif_is_on_dirty_scheduler(env));
+ }
+ assert(argc == 3);
+ enif_get_int(env, argv[0], &n);
+ enif_get_string(env, argv[1], s, sizeof s, ERL_NIF_LATIN1);
+ enif_inspect_binary(env, argv[2], &b);
+ return enif_make_tuple3(env,
+ enif_make_int(env, n),
+ enif_make_string(env, s, ERL_NIF_LATIN1),
+ enif_make_binary(env, &b));
+}
+
+static ERL_NIF_TERM call_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ int n;
+ char s[10];
+ ErlNifBinary b;
+ assert(!enif_is_on_dirty_scheduler(env));
+ if (argc != 3)
+ return enif_make_badarg(env);
+ if (have_dirty_schedulers()) {
+ if (enif_get_int(env, argv[0], &n) &&
+ enif_get_string(env, argv[1], s, sizeof s, ERL_NIF_LATIN1) &&
+ enif_inspect_binary(env, argv[2], &b))
+ return enif_schedule_nif(env, "call_dirty_nif", ERL_NIF_DIRTY_JOB_CPU_BOUND, dirty_nif, argc, argv);
+ else
+ return enif_make_badarg(env);
+ } else {
+ return dirty_nif(env, argc, argv);
+ }
+}
+
+static ERL_NIF_TERM send_from_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ ERL_NIF_TERM result;
+ ErlNifPid pid;
+ ErlNifEnv* menv;
+ int res;
+
+ if (!enif_get_local_pid(env, argv[0], &pid))
+ return enif_make_badarg(env);
+ result = enif_make_tuple2(env, enif_make_atom(env, "ok"), enif_make_pid(env, &pid));
+ menv = enif_alloc_env();
+ res = enif_send(env, &pid, menv, result);
+ enif_free_env(menv);
+ if (!res)
+ return enif_make_badarg(env);
+ else
+ return result;
+}
+
+static ERL_NIF_TERM call_dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ switch (argc) {
+ case 1: {
+ int arg;
+ if (enif_get_int(env, argv[0], &arg) && arg < 2) {
+ ERL_NIF_TERM args[255];
+ int i;
+ args[0] = argv[0];
+ for (i = 1; i < 255; i++)
+ args[i] = enif_make_int(env, i);
+ return enif_schedule_nif(env, "call_dirty_nif_exception", ERL_NIF_DIRTY_JOB_CPU_BOUND,
+ call_dirty_nif_exception, 255, args);
+ } else {
+ return enif_raise_exception(env, argv[0]);
+ }
+ }
+ case 2: {
+ int return_badarg_directly;
+ enif_get_int(env, argv[0], &return_badarg_directly);
+ assert(return_badarg_directly == 1 || return_badarg_directly == 0);
+ if (return_badarg_directly)
+ return enif_make_badarg(env);
+ else {
+ /* ignore return value */ enif_make_badarg(env);
+ return enif_make_atom(env, "ok");
+ }
+ }
+ default:
+ return enif_schedule_nif(env, "call_dirty_nif_exception", ERL_NIF_DIRTY_JOB_CPU_BOUND,
+ call_dirty_nif_exception, argc-1, argv);
+ }
+}
+
+static ERL_NIF_TERM call_dirty_nif_zero_args(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ int i;
+ ERL_NIF_TERM result[1000];
+ ERL_NIF_TERM ok = enif_make_atom(env, "ok");
+ assert(argc == 0);
+ for (i = 0; i < sizeof(result)/sizeof(*result); i++) {
+ result[i] = ok;
+ }
+ return enif_make_list_from_array(env, result, i);
+}
+
+static ERL_NIF_TERM
+dirty_sleeper(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ assert(enif_is_on_dirty_scheduler(env));
+#ifdef __WIN32__
+ Sleep(6000);
+#else
+ sleep(6);
+#endif
+ return enif_make_atom(env, "ok");
+}
+
+static ERL_NIF_TERM dirty_call_while_terminated_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ ErlNifPid self;
+ ERL_NIF_TERM result, self_term;
+ ErlNifPid to;
+ ErlNifEnv* menv;
+ int res;
+
+ if (!enif_get_local_pid(env, argv[0], &to))
+ return enif_make_badarg(env);
+
+ if (!enif_self(env, &self))
+ return enif_make_badarg(env);
+
+ self_term = enif_make_pid(env, &self);
+
+ result = enif_make_tuple2(env, enif_make_atom(env, "dirty_alive"), self_term);
+ menv = enif_alloc_env();
+ res = enif_send(env, &to, menv, result);
+ enif_free_env(menv);
+ if (!res)
+ return enif_make_badarg(env);
+
+ /* Wait until we have been killed */
+ while (enif_is_process_alive(env, &self))
+ ;
+
+ result = enif_make_tuple2(env, enif_make_atom(env, "dirty_dead"), self_term);
+ menv = enif_alloc_env();
+ res = enif_send(env, &to, menv, result);
+ enif_free_env(menv);
+
+#ifdef __WIN32__
+ Sleep(1000);
+#else
+ sleep(1);
+#endif
+
+ return enif_make_atom(env, "ok");
+}
+
+static ERL_NIF_TERM dirty_heap_access_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ ERL_NIF_TERM res = enif_make_list(env, 0);
+ int i;
+ assert(enif_is_on_dirty_scheduler(env));
+ for (i = 0; i < 1000; i++)
+ res = enif_make_list_cell(env, enif_make_copy(env, argv[0]), res);
+
+ return res;
+}
+
+
+static ErlNifFunc nif_funcs[] =
+{
+ {"lib_loaded", 0, lib_loaded},
+ {"call_dirty_nif", 3, call_dirty_nif},
+ {"send_from_dirty_nif", 1, send_from_dirty_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND},
+ {"call_dirty_nif_exception", 1, call_dirty_nif_exception, ERL_NIF_DIRTY_JOB_IO_BOUND},
+ {"call_dirty_nif_zero_args", 0, call_dirty_nif_zero_args, ERL_NIF_DIRTY_JOB_CPU_BOUND},
+ {"dirty_sleeper", 0, dirty_sleeper, ERL_NIF_DIRTY_JOB_IO_BOUND},
+ {"dirty_call_while_terminated_nif", 1, dirty_call_while_terminated_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND},
+ {"dirty_heap_access_nif", 1, dirty_heap_access_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND}
+};
+
+ERL_NIF_INIT(dirty_nif_SUITE,nif_funcs,load,NULL,NULL,NULL)
diff --git a/erts/emulator/test/gc_SUITE.erl b/erts/emulator/test/gc_SUITE.erl
index 79c229a34d..8a600b7d9f 100644
--- a/erts/emulator/test/gc_SUITE.erl
+++ b/erts/emulator/test/gc_SUITE.erl
@@ -25,13 +25,13 @@
-include_lib("common_test/include/ct.hrl").
-export([all/0, suite/0]).
--export([grow_heap/1, grow_stack/1, grow_stack_heap/1]).
+-export([grow_heap/1, grow_stack/1, grow_stack_heap/1, max_heap_size/1]).
suite() ->
[{ct_hooks,[ts_install_cth]}].
all() ->
- [grow_heap, grow_stack, grow_stack_heap].
+ [grow_heap, grow_stack, grow_stack_heap, max_heap_size].
%% Produce a growing list of elements,
@@ -163,3 +163,30 @@ show_heap(String) ->
{stack_size, SSize}=process_info(self(), stack_size),
io:format("Heap/Stack "++String++"~p/~p", [HSize, SSize]).
+%% Test that doing a remote GC that triggers the max heap size
+%% kills the process.
+max_heap_size(_Config) ->
+
+ Pid = spawn_opt(fun long_receive/0,[{max_heap_size, 1024},
+ {message_queue_data, on_heap}]),
+ [Pid ! lists:duplicate(I,I) || I <- lists:seq(1,100)],
+ Ref = erlang:monitor(process, Pid),
+
+ %% Force messages to be viewed as part of heap
+ erlang:process_info(Pid, messages),
+
+ %% Do the GC that triggers max heap
+ erlang:garbage_collect(Pid),
+
+ %% Verify that max heap was triggered
+ receive
+ {'DOWN', Ref, process, Pid, killed} -> ok
+ after 5000 ->
+ ct:fail({process_did_not_die, Pid, erlang:process_info(Pid)})
+ end.
+
+long_receive() ->
+ receive
+ after 10000 ->
+ ok
+ end.
diff --git a/erts/emulator/test/nif_SUITE.erl b/erts/emulator/test/nif_SUITE.erl
index a7767132ee..a0e9f1bad6 100644
--- a/erts/emulator/test/nif_SUITE.erl
+++ b/erts/emulator/test/nif_SUITE.erl
@@ -38,15 +38,15 @@
is_checks/1,
get_length/1, make_atom/1, make_string/1, reverse_list_test/1,
otp_9828/1,
- otp_9668/1, consume_timeslice/1, dirty_nif/1, dirty_nif_send/1,
- dirty_nif_exception/1, call_dirty_nif_exception/1, nif_schedule/1,
+ otp_9668/1, consume_timeslice/1, nif_schedule/1,
nif_exception/1, call_nif_exception/1,
nif_nan_and_inf/1, nif_atom_too_long/1,
nif_monotonic_time/1, nif_time_offset/1, nif_convert_time_unit/1,
nif_now_time/1, nif_cpu_time/1, nif_unique_integer/1,
nif_is_process_alive/1, nif_is_port_alive/1,
nif_term_to_binary/1, nif_binary_to_term/1,
- nif_port_command/1
+ nif_port_command/1,
+ nif_snprintf/1
]).
-export([many_args_100/100]).
@@ -75,14 +75,13 @@ all() ->
make_string,reverse_list_test,
otp_9828,
otp_9668, consume_timeslice,
- nif_schedule, dirty_nif, dirty_nif_send, dirty_nif_exception,
- nif_exception, nif_nan_and_inf, nif_atom_too_long,
+ nif_schedule, nif_exception, nif_nan_and_inf, nif_atom_too_long,
nif_monotonic_time, nif_time_offset, nif_convert_time_unit,
nif_now_time, nif_cpu_time, nif_unique_integer,
nif_is_process_alive, nif_is_port_alive,
nif_term_to_binary, nif_binary_to_term,
- nif_port_command
- ].
+ nif_port_command,
+ nif_snprintf].
init_per_testcase(_Case, Config) ->
Config.
@@ -1541,76 +1540,6 @@ nif_schedule(Config) when is_list(Config) ->
end,
ok.
-dirty_nif(Config) when is_list(Config) ->
- try erlang:system_info(dirty_cpu_schedulers) of
- N when is_integer(N) ->
- ensure_lib_loaded(Config),
- Val1 = 42,
- Val2 = "Erlang",
- Val3 = list_to_binary([Val2, 0]),
- {Val1, Val2, Val3} = call_dirty_nif(Val1, Val2, Val3),
- LargeArray = lists:duplicate(1000, ok),
- LargeArray = call_dirty_nif_zero_args(),
- ok
- catch
- error:badarg ->
- {skipped,"No dirty scheduler support"}
- end.
-
-dirty_nif_send(Config) when is_list(Config) ->
- try erlang:system_info(dirty_cpu_schedulers) of
- N when is_integer(N) ->
- ensure_lib_loaded(Config),
- Parent = self(),
- Pid = spawn_link(fun() ->
- Self = self(),
- {ok, Self} = receive_any(),
- Parent ! {ok, Self}
- end),
- {ok, Pid} = send_from_dirty_nif(Pid),
- {ok, Pid} = receive_any(),
- ok
- catch
- error:badarg ->
- {skipped,"No dirty scheduler support"}
- end.
-
-dirty_nif_exception(Config) when is_list(Config) ->
- try erlang:system_info(dirty_cpu_schedulers) of
- N when is_integer(N) ->
- ensure_lib_loaded(Config),
- try
- %% this checks that the expected exception occurs when the
- %% dirty NIF returns the result of enif_make_badarg
- %% directly
- call_dirty_nif_exception(1),
- ct:fail(expected_badarg)
- catch
- error:badarg ->
- [{?MODULE,call_dirty_nif_exception,[1],_}|_] =
- erlang:get_stacktrace(),
- ok
- end,
- try
- %% this checks that the expected exception occurs when the
- %% dirty NIF calls enif_make_badarg at some point but then
- %% returns a value that isn't an exception
- call_dirty_nif_exception(0),
- ct:fail(expected_badarg)
- catch
- error:badarg ->
- [{?MODULE,call_dirty_nif_exception,[0],_}|_] =
- erlang:get_stacktrace(),
- ok
- end,
- %% this checks that a dirty NIF can raise various terms as
- %% exceptions
- ok = nif_raise_exceptions(call_dirty_nif_exception)
- catch
- error:badarg ->
- {skipped,"No dirty scheduler support"}
- end.
-
nif_exception(Config) when is_list(Config) ->
ensure_lib_loaded(Config),
try
@@ -2024,9 +1953,18 @@ nif_port_command(Config) ->
port_close(Port),
{'EXIT', {badarg, _}} = (catch port_command_nif(Port, "hello\n")),
+ ok.
+nif_snprintf(Config) ->
+ ensure_lib_loaded(Config),
+ <<"ok",0>> = format_term_nif(3,ok),
+ <<"o",0>> = format_term_nif(2,ok),
+ <<"\"hello world\"",0>> = format_term_nif(14,"hello world"),
+ <<"{{hello,world,-33},3.14",_/binary>> = format_term_nif(50,{{hello,world, -33}, 3.14, self()}),
+ <<"{{hello,world,-33},",0>> = format_term_nif(20,{{hello,world, -33}, 3.14, self()}),
ok.
+
%% The NIFs:
lib_version() -> undefined.
call_history() -> ?nif_stub.
@@ -2078,10 +2016,6 @@ otp_9668_nif(_) -> ?nif_stub.
otp_9828_nif(_) -> ?nif_stub.
consume_timeslice_nif(_,_) -> ?nif_stub.
call_nif_schedule(_,_) -> ?nif_stub.
-call_dirty_nif(_,_,_) -> ?nif_stub.
-send_from_dirty_nif(_) -> ?nif_stub.
-call_dirty_nif_exception(_) -> ?nif_stub.
-call_dirty_nif_zero_args() -> ?nif_stub.
call_nif_exception(_) -> ?nif_stub.
call_nif_nan_or_inf(_) -> ?nif_stub.
call_nif_atom_too_long(_) -> ?nif_stub.
@@ -2091,6 +2025,7 @@ is_port_alive_nif(_) -> ?nif_stub.
term_to_binary_nif(_, _) -> ?nif_stub.
binary_to_term_nif(_, _, _) -> ?nif_stub.
port_command_nif(_, _) -> ?nif_stub.
+format_term_nif(_,_) -> ?nif_stub.
%% maps
is_map_nif(_) -> ?nif_stub.
diff --git a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
index 11e5dab58e..13846244d4 100644
--- a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
+++ b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
@@ -23,6 +23,9 @@
#include <string.h>
#include <assert.h>
#include <limits.h>
+#ifndef __WIN32__
+#include <unistd.h>
+#endif
#include "nif_mod.h"
@@ -1574,120 +1577,6 @@ static ERL_NIF_TERM call_nif_schedule(ErlNifEnv* env, int argc, const ERL_NIF_TE
return result;
}
-#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
-
-static int have_dirty_schedulers(void)
-{
- ErlNifSysInfo si;
- enif_system_info(&si, sizeof(si));
- return si.dirty_scheduler_support;
-}
-
-static ERL_NIF_TERM dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
-{
- int n;
- char s[10];
- ErlNifBinary b;
- ERL_NIF_TERM result;
- if (have_dirty_schedulers()) {
- assert(enif_is_on_dirty_scheduler(env));
- }
- assert(argc == 3);
- enif_get_int(env, argv[0], &n);
- enif_get_string(env, argv[1], s, sizeof s, ERL_NIF_LATIN1);
- enif_inspect_binary(env, argv[2], &b);
- return enif_make_tuple3(env,
- enif_make_int(env, n),
- enif_make_string(env, s, ERL_NIF_LATIN1),
- enif_make_binary(env, &b));
-}
-
-static ERL_NIF_TERM call_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
-{
- int n;
- char s[10];
- ErlNifBinary b;
- assert(!enif_is_on_dirty_scheduler(env));
- if (argc != 3)
- return enif_make_badarg(env);
- if (have_dirty_schedulers()) {
- if (enif_get_int(env, argv[0], &n) &&
- enif_get_string(env, argv[1], s, sizeof s, ERL_NIF_LATIN1) &&
- enif_inspect_binary(env, argv[2], &b))
- return enif_schedule_nif(env, "call_dirty_nif", ERL_NIF_DIRTY_JOB_CPU_BOUND, dirty_nif, argc, argv);
- else
- return enif_make_badarg(env);
- } else {
- return dirty_nif(env, argc, argv);
- }
-}
-
-static ERL_NIF_TERM send_from_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
-{
- ERL_NIF_TERM result;
- ErlNifPid pid;
- ErlNifEnv* menv;
- int res;
-
- if (!enif_get_local_pid(env, argv[0], &pid))
- return enif_make_badarg(env);
- result = enif_make_tuple2(env, enif_make_atom(env, "ok"), enif_make_pid(env, &pid));
- menv = enif_alloc_env();
- res = enif_send(env, &pid, menv, result);
- enif_free_env(menv);
- if (!res)
- return enif_make_badarg(env);
- else
- return result;
-}
-
-static ERL_NIF_TERM call_dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
-{
- switch (argc) {
- case 1: {
- int arg;
- if (enif_get_int(env, argv[0], &arg) && arg < 2) {
- ERL_NIF_TERM args[255];
- int i;
- args[0] = argv[0];
- for (i = 1; i < 255; i++)
- args[i] = enif_make_int(env, i);
- return enif_schedule_nif(env, "call_dirty_nif_exception", ERL_NIF_DIRTY_JOB_CPU_BOUND,
- call_dirty_nif_exception, 255, args);
- } else {
- return enif_raise_exception(env, argv[0]);
- }
- }
- case 2: {
- int return_badarg_directly;
- enif_get_int(env, argv[0], &return_badarg_directly);
- assert(return_badarg_directly == 1 || return_badarg_directly == 0);
- if (return_badarg_directly)
- return enif_make_badarg(env);
- else {
- /* ignore return value */ enif_make_badarg(env);
- return enif_make_atom(env, "ok");
- }
- }
- default:
- return enif_schedule_nif(env, "call_dirty_nif_exception", ERL_NIF_DIRTY_JOB_CPU_BOUND,
- call_dirty_nif_exception, argc-1, argv);
- }
-}
-
-static ERL_NIF_TERM call_dirty_nif_zero_args(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
-{
- int i;
- ERL_NIF_TERM result[1000];
- ERL_NIF_TERM ok = enif_make_atom(env, "ok");
- assert(argc == 0);
- for (i = 0; i < sizeof(result)/sizeof(*result); i++) {
- result[i] = ok;
- }
- return enif_make_list_from_array(env, result, i);
-}
-#endif
-
/*
* If argv[0] is the integer 0, call enif_make_badarg, but don't return its
* return value. Instead, return ok. Result should still be a badarg
@@ -2109,6 +1998,23 @@ static ERL_NIF_TERM port_command(ErlNifEnv* env, int argc, const ERL_NIF_TERM ar
return atom_true;
}
+static ERL_NIF_TERM format_term(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ ErlNifBinary obin;
+ unsigned int size;
+
+ if (!enif_get_uint(env, argv[0], &size))
+ return enif_make_badarg(env);
+ if (!enif_alloc_binary(size,&obin))
+ return enif_make_badarg(env);
+
+ if (enif_snprintf((char*)obin.data, (size_t)size, "%T", argv[1]) < 0)
+ return atom_false;
+
+ return enif_make_binary(env,&obin);
+}
+
+
static ErlNifFunc nif_funcs[] =
{
{"lib_version", 0, lib_version},
@@ -2162,12 +2068,6 @@ static ErlNifFunc nif_funcs[] =
{"otp_9828_nif", 1, otp_9828_nif},
{"consume_timeslice_nif", 2, consume_timeslice_nif},
{"call_nif_schedule", 2, call_nif_schedule},
-#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
- {"call_dirty_nif", 3, call_dirty_nif},
- {"send_from_dirty_nif", 1, send_from_dirty_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND},
- {"call_dirty_nif_exception", 1, call_dirty_nif_exception, ERL_NIF_DIRTY_JOB_IO_BOUND},
- {"call_dirty_nif_zero_args", 0, call_dirty_nif_zero_args, ERL_NIF_DIRTY_JOB_CPU_BOUND},
-#endif
{"call_nif_exception", 1, call_nif_exception},
{"call_nif_nan_or_inf", 1, call_nif_nan_or_inf},
{"call_nif_atom_too_long", 1, call_nif_atom_too_long},
@@ -2190,7 +2090,8 @@ static ErlNifFunc nif_funcs[] =
{"is_port_alive_nif", 1, is_port_alive},
{"term_to_binary_nif", 2, term_to_binary},
{"binary_to_term_nif", 3, binary_to_term},
- {"port_command_nif", 2, port_command}
+ {"port_command_nif", 2, port_command},
+ {"format_term_nif", 2, format_term}
};
ERL_NIF_INIT(nif_SUITE,nif_funcs,load,reload,upgrade,unload)
diff --git a/erts/emulator/test/port_trace_SUITE.erl b/erts/emulator/test/port_trace_SUITE.erl
index bfdea0761b..5d9a75bcd3 100644
--- a/erts/emulator/test/port_trace_SUITE.erl
+++ b/erts/emulator/test/port_trace_SUITE.erl
@@ -31,7 +31,8 @@
failure_atom/1, failure_posix/1,
failure/1, output_term/1,
driver_output_term/1,
- send_term/1, driver_send_term/1]).
+ send_term/1, driver_send_term/1,
+ driver_remote_send_term/1]).
-define(ECHO_DRV_NOOP, 0).
-define(ECHO_DRV_OUTPUT, 1).
@@ -48,6 +49,7 @@
-define(ECHO_DRV_SEND_TERM, 12).
-define(ECHO_DRV_DRIVER_SEND_TERM, 13).
-define(ECHO_DRV_SAVE_CALLER, 14).
+-define(ECHO_DRV_REMOTE_SEND_TERM, 15).
suite() -> [{ct_hooks,[ts_install_cth]},
{timetrap, {seconds, 30}}].
@@ -60,7 +62,8 @@ all() ->
failure_atom, failure_posix,
failure, output_term,
driver_output_term,
- send_term, driver_send_term].
+ send_term, driver_send_term,
+ driver_remote_send_term].
init_per_suite(Config) ->
Config.
@@ -75,6 +78,13 @@ end_per_group(_GroupName, Config) ->
Config.
+init_per_testcase(driver_remote_send_term, Config) ->
+ case erlang:system_info(smp_support) of
+ false ->
+ {skip,"Only supported on smp systems"};
+ true ->
+ init_per_testcase(driver_remote_send_term_smp, Config)
+ end;
init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) ->
erlang:trace(all, false, [all]),
os:unsetenv("OUTPUTV"),
@@ -543,6 +553,34 @@ driver_send_term(_Config) ->
ok.
+%% Test that driver_send_term from non-scheduler thread does not
+%% generate trace messages.
+driver_remote_send_term(_Config) ->
+
+ Flags = [send],
+ {Prt, S} = trace_and_open(Flags,[binary]),
+
+ erlang:port_command(Prt, <<?ECHO_DRV_REMOTE_SEND_TERM, 123456:32>>),
+ recv({echo, Prt, <<123456:32>>}),
+ [] = flush(),
+
+ Pid = spawn_link(
+ fun() ->
+ erlang:port_command(Prt, <<?ECHO_DRV_SAVE_CALLER>>),
+ S ! ok,
+ receive M -> S ! M end
+ end),
+ recv(ok),
+ erlang:trace(Pid, true, ['receive']),
+
+ erlang:port_command(Prt, <<?ECHO_DRV_REMOTE_SEND_TERM, 123456:32>>),
+ recv({echo, Prt, <<123456:32>>}),
+ [{trace, Pid, 'receive', {echo, Prt, <<123456:32>>}}] = flush(),
+
+ close(Prt, Flags),
+
+ ok.
+
%%%%%%%%%%%%%%%%%%%
%% Helper functions
%%%%%%%%%%%%%%%%%%%
@@ -598,7 +636,7 @@ f(From) ->
end.
recv(Msg) ->
- receive Msg -> ok after 100 -> ct:fail({did_not_get_data,Msg,flush()}) end.
+ receive Msg -> ok after 1000 -> ct:fail({did_not_get_data,Msg,flush()}) end.
load_drv(Config) ->
Path = proplists:get_value(data_dir, Config),
diff --git a/erts/emulator/test/port_trace_SUITE_data/echo_drv.c b/erts/emulator/test/port_trace_SUITE_data/echo_drv.c
index b5ae9389b4..b545523192 100644
--- a/erts/emulator/test/port_trace_SUITE_data/echo_drv.c
+++ b/erts/emulator/test/port_trace_SUITE_data/echo_drv.c
@@ -14,6 +14,13 @@ typedef struct _erl_drv_data {
ErlDrvTermData caller;
} EchoDrvData;
+struct remote_send_term {
+ char *buf;
+ int len;
+ ErlDrvTermData port;
+ ErlDrvTermData caller;
+};
+
#define ECHO_DRV_NOOP 0
#define ECHO_DRV_OUTPUT 1
#define ECHO_DRV_OUTPUT2 2
@@ -29,6 +36,7 @@ typedef struct _erl_drv_data {
#define ECHO_DRV_SEND_TERM 12
#define ECHO_DRV_DRIVER_SEND_TERM 13
#define ECHO_DRV_SAVE_CALLER 14
+#define ECHO_DRV_REMOTE_SEND_TERM 15
/* -------------------------------------------------------------------------
@@ -78,6 +86,8 @@ static ErlDrvEntry echo_drv_entry = {
NULL
};
+static void send_term_thread(void *);
+
/* -------------------------------------------------------------------------
** Entry functions
**/
@@ -200,6 +210,18 @@ static void echo_drv_output(ErlDrvData drv_data, char *buf, ErlDrvSizeT len) {
}
break;
}
+ case ECHO_DRV_REMOTE_SEND_TERM:
+ {
+ ErlDrvTid tid;
+ struct remote_send_term *t = malloc(sizeof(struct remote_send_term));
+ t->len = len-1;
+ t->buf = malloc(len-1);
+ t->port = driver_mk_port(port);
+ t->caller = data_p->caller;
+ memcpy(t->buf, buf+1, t->len);
+ erl_drv_thread_create("tmp_thread", &tid, send_term_thread, t, NULL);
+ break;
+ }
case ECHO_DRV_SAVE_CALLER:
data_p->caller = driver_caller(port);
break;
@@ -239,3 +261,17 @@ static ErlDrvSSizeT echo_drv_call(ErlDrvData drv_data,
memcpy(*rbuf, buf+command, len-command);
return len-command;
}
+
+static void send_term_thread(void *a)
+{
+ struct remote_send_term *t = (struct remote_send_term*)a;
+ ErlDrvTermData term[] = {
+ ERL_DRV_ATOM, driver_mk_atom("echo"),
+ ERL_DRV_PORT, t->port,
+ ERL_DRV_BUF2BINARY, (ErlDrvTermData)(t->buf),
+ (ErlDrvTermData)(t->len),
+ ERL_DRV_TUPLE, 3};
+ erl_drv_send_term(t->port, t->caller,
+ term, sizeof(term) / sizeof(ErlDrvTermData));
+ return;
+}
diff --git a/erts/emulator/test/process_SUITE.erl b/erts/emulator/test/process_SUITE.erl
index 61a68f9759..eaa4026a8a 100644
--- a/erts/emulator/test/process_SUITE.erl
+++ b/erts/emulator/test/process_SUITE.erl
@@ -46,7 +46,7 @@
process_status_exiting/1,
otp_4725/1, bad_register/1, garbage_collect/1, otp_6237/1,
process_info_messages/1, process_flag_badarg/1, process_flag_heap_size/1,
- spawn_opt_heap_size/1,
+ spawn_opt_heap_size/1, spawn_opt_max_heap_size/1,
processes_large_tab/1, processes_default_tab/1, processes_small_tab/1,
processes_this_tab/1, processes_apply_trap/1,
processes_last_call_trap/1, processes_gc_trap/1,
@@ -60,7 +60,7 @@
system_task_on_suspended/1,
gc_request_when_gc_disabled/1,
gc_request_blast_when_gc_disabled/1]).
--export([prio_server/2, prio_client/2]).
+-export([prio_server/2, prio_client/2, init/1, handle_event/2]).
-export([init_per_testcase/2, end_per_testcase/2]).
@@ -84,7 +84,8 @@ all() ->
bump_reductions, low_prio, yield, yield2, otp_4725,
bad_register, garbage_collect, process_info_messages,
process_flag_badarg, process_flag_heap_size,
- spawn_opt_heap_size, otp_6237, {group, processes_bif},
+ spawn_opt_heap_size, spawn_opt_max_heap_size, otp_6237,
+ {group, processes_bif},
{group, otp_7738}, garb_other_running,
{group, system_task}].
@@ -425,6 +426,8 @@ t_process_info(Config) when is_list(Config) ->
{status, running} = process_info(self(), status),
{min_heap_size, 233} = process_info(self(), min_heap_size),
{min_bin_vheap_size,46422} = process_info(self(), min_bin_vheap_size),
+ {max_heap_size, #{ size := 0, kill := true, error_logger := true}} =
+ process_info(self(), max_heap_size),
{current_function,{?MODULE,t_process_info,1}} =
process_info(self(), current_function),
{current_function,{?MODULE,t_process_info,1}} =
@@ -564,6 +567,8 @@ process_info_other_msg(Config) when is_list(Config) ->
{min_heap_size, 233} = process_info(Pid, min_heap_size),
{min_bin_vheap_size, 46422} = process_info(Pid, min_bin_vheap_size),
+ {max_heap_size, #{ size := 0, kill := true, error_logger := true}} =
+ process_info(self(), max_heap_size),
Pid ! stop,
ok.
@@ -914,10 +919,14 @@ process_info_garbage_collection(_Config) ->
Parent = self(),
Pid = spawn_link(
fun() ->
+ %% We set mqd to off_heap and send an tuple
+ %% to process in order to force mbuf_size
+ %% to be used
+ process_flag(message_queue_data, off_heap),
receive go -> ok end,
(fun F(0) ->
Parent ! deep,
- receive ok -> ok end,
+ receive {ok,_} -> ok end,
[];
F(N) ->
timer:sleep(1),
@@ -926,31 +935,52 @@ process_info_garbage_collection(_Config) ->
Parent ! shallow,
receive done -> ok end
end),
- {garbage_collection_info, Before} =
- erlang:process_info(Pid, garbage_collection_info),
+ [{garbage_collection_info, Before},{total_heap_size, THSBefore}] =
+ erlang:process_info(Pid, [garbage_collection_info, total_heap_size]),
Pid ! go, receive deep -> ok end,
- {_, Deep} = erlang:process_info(Pid, garbage_collection_info),
- Pid ! ok, receive shallow -> ok end,
- {_, After} = erlang:process_info(Pid, garbage_collection_info),
+ [{_, Deep},{_,THSDeep}] =
+ erlang:process_info(Pid, [garbage_collection_info, total_heap_size]),
+ Pid ! {ok, make_ref()}, receive shallow -> ok end,
+ [{_, After},{_, THSAfter}] =
+ erlang:process_info(Pid, [garbage_collection_info, total_heap_size]),
Pid ! done,
%% Do some general checks to see if everything seems to be roughly correct
ct:log("Before: ~p",[Before]),
ct:log("Deep: ~p",[Deep]),
ct:log("After: ~p",[After]),
+ ct:log("Before THS: ~p",[THSBefore]),
+ ct:log("Deep THS: ~p",[THSDeep]),
+ ct:log("After THS: ~p",[THSAfter]),
%% Check stack_size
- true = proplists:get_value(stack_size, Before) < proplists:get_value(stack_size, Deep),
- true = proplists:get_value(stack_size, After) < proplists:get_value(stack_size, Deep),
+ true = gv(stack_size, Before) < gv(stack_size, Deep),
+ true = gv(stack_size, After) < gv(stack_size, Deep),
%% Check used heap size
- true = proplists:get_value(heap_size, Before) + proplists:get_value(old_heap_size, Before)
- < proplists:get_value(heap_size, Deep) + proplists:get_value(old_heap_size, Deep),
- true = proplists:get_value(heap_size, Before) + proplists:get_value(old_heap_size, Before)
- < proplists:get_value(heap_size, After) + proplists:get_value(old_heap_size, After),
+ true = gv(heap_size, Before) + gv(old_heap_size, Before)
+ < gv(heap_size, Deep) + gv(old_heap_size, Deep),
+ true = gv(heap_size, Before) + gv(old_heap_size, Before)
+ < gv(heap_size, After) + gv(old_heap_size, After),
+
+ %% Check that total_heap_size == heap_block_size + old_heap_block_size + mbuf_size
+ THSBefore = gv(heap_block_size, Before)
+ + gv(old_heap_block_size, Before)
+ + gv(mbuf_size, Before),
+
+ THSDeep = gv(heap_block_size, Deep)
+ + gv(old_heap_block_size, Deep)
+ + gv(mbuf_size, Deep),
+
+ THSAfter = gv(heap_block_size, After)
+ + gv(old_heap_block_size, After)
+ + gv(mbuf_size, After),
ok.
+gv(Key,List) ->
+ proplists:get_value(Key,List).
+
%% Tests erlang:bump_reductions/1.
bump_reductions(Config) when is_list(Config) ->
erlang:garbage_collect(),
@@ -1323,6 +1353,28 @@ process_flag_badarg(Config) when is_list(Config) ->
chk_badarg(fun () -> process_flag(min_heap_size, gurka) end),
chk_badarg(fun () -> process_flag(min_bin_vheap_size, gurka) end),
chk_badarg(fun () -> process_flag(min_bin_vheap_size, -1) end),
+
+ chk_badarg(fun () -> process_flag(max_heap_size, gurka) end),
+ chk_badarg(fun () -> process_flag(max_heap_size, -1) end),
+ chk_badarg(fun () ->
+ {_,Min} = process_info(self(), min_heap_size),
+ process_flag(max_heap_size, Min - 1)
+ end),
+ chk_badarg(fun () ->
+ {_,Min} = process_info(self(), min_heap_size),
+ process_flag(max_heap_size, #{size => Min - 1})
+ end),
+ chk_badarg(fun () -> process_flag(max_heap_size, #{}) end),
+ chk_badarg(fun () -> process_flag(max_heap_size, #{ kill => true }) end),
+ chk_badarg(fun () -> process_flag(max_heap_size, #{ size => 233,
+ kill => gurka }) end),
+ chk_badarg(fun () -> process_flag(max_heap_size, #{ size => 233,
+ error_logger => gurka }) end),
+ chk_badarg(fun () -> process_flag(max_heap_size, #{ size => 233,
+ kill => true,
+ error_logger => gurka }) end),
+ chk_badarg(fun () -> process_flag(max_heap_size, #{ size => 1 bsl 64 }) end),
+
chk_badarg(fun () -> process_flag(priority, 4711) end),
chk_badarg(fun () -> process_flag(save_calls, hmmm) end),
P= spawn_link(fun () -> receive die -> ok end end),
@@ -1923,6 +1975,110 @@ spawn_opt_heap_size(Config) when is_list(Config) ->
Pid ! stop,
ok.
+spawn_opt_max_heap_size(_Config) ->
+
+ error_logger:add_report_handler(?MODULE, self()),
+
+ %% Test that numerical limit works
+ max_heap_size_test(1024, 1024, true, true),
+
+ %% Test that map limit works
+ max_heap_size_test(#{ size => 1024 }, 1024, true, true),
+
+ %% Test that no kill is sent
+ max_heap_size_test(#{ size => 1024, kill => false }, 1024, false, true),
+
+ %% Test that no error_logger report is sent
+ max_heap_size_test(#{ size => 1024, error_logger => false }, 1024, true, false),
+
+ %% Test that system_flag works
+ erlang:system_flag(max_heap_size, #{ size => 0, kill => false,
+ error_logger => true}),
+ max_heap_size_test(#{ size => 1024 }, 1024, false, true),
+ max_heap_size_test(#{ size => 1024, kill => true }, 1024, true, true),
+
+ erlang:system_flag(max_heap_size, #{ size => 0, kill => true,
+ error_logger => false}),
+ max_heap_size_test(#{ size => 1024 }, 1024, true, false),
+ max_heap_size_test(#{ size => 1024, error_logger => true }, 1024, true, true),
+
+ erlang:system_flag(max_heap_size, #{ size => 1 bsl 20, kill => true,
+ error_logger => true}),
+ max_heap_size_test(#{ }, 1 bsl 20, true, true),
+
+ erlang:system_flag(max_heap_size, #{ size => 0, kill => true,
+ error_logger => true}),
+
+ %% Test that ordinary case works as expected again
+ max_heap_size_test(1024, 1024, true, true),
+
+ ok.
+
+max_heap_size_test(Option, Size, Kill, ErrorLogger)
+ when map_size(Option) == 0 ->
+ max_heap_size_test([], Size, Kill, ErrorLogger);
+max_heap_size_test(Option, Size, Kill, ErrorLogger)
+ when is_map(Option); is_integer(Option) ->
+ max_heap_size_test([{max_heap_size, Option}], Size, Kill, ErrorLogger);
+max_heap_size_test(Option, Size, Kill, ErrorLogger) ->
+ OomFun = fun F() -> timer:sleep(5),[lists:seq(1,1000)|F()] end,
+ Pid = spawn_opt(OomFun, Option),
+ {max_heap_size, MHSz} = erlang:process_info(Pid, max_heap_size),
+ ct:log("Default: ~p~nOption: ~p~nProc: ~p~n",
+ [erlang:system_info(max_heap_size), Option, MHSz]),
+
+ #{ size := Size} = MHSz,
+
+ Ref = erlang:monitor(process, Pid),
+ if Kill ->
+ receive
+ {'DOWN', Ref, process, Pid, killed} ->
+ ok
+ end;
+ true ->
+ ok
+ end,
+ if ErrorLogger ->
+ receive
+ {error, _, {emulator, _, [Pid|_]}} ->
+ ok
+ end;
+ true ->
+ ok
+ end,
+ if not Kill ->
+ exit(Pid, die),
+ receive
+ {'DOWN', Ref, process, Pid, die} ->
+ ok
+ end,
+ flush();
+ true ->
+ ok
+ end,
+ receive
+ M ->
+ ct:fail({unexpected_message, M})
+ after 10 ->
+ ok
+ end.
+
+flush() ->
+ receive
+ _M ->
+ flush()
+ after 1000 ->
+ ok
+ end.
+
+%% error_logger report handler proxy
+init(Pid) ->
+ {ok, Pid}.
+
+handle_event(Event, Pid) ->
+ Pid ! Event,
+ {ok, Pid}.
+
processes_term_proc_list(Config) when is_list(Config) ->
Tester = self(),
as_expected = processes_term_proc_list_test(false),
diff --git a/erts/emulator/test/save_calls_SUITE.erl b/erts/emulator/test/save_calls_SUITE.erl
index af1a0d35d6..aae7651f6d 100644
--- a/erts/emulator/test/save_calls_SUITE.erl
+++ b/erts/emulator/test/save_calls_SUITE.erl
@@ -60,7 +60,7 @@ dont_break_reductions(Config) when is_list(Config) ->
RPS1 = reds_per_sched(0),
RPS2 = reds_per_sched(20),
Diff = abs(RPS1 - RPS2),
- true = (Diff < (0.05 * RPS1)),
+ true = (Diff < (0.2 * RPS1)),
ok.
diff --git a/erts/emulator/test/scheduler_SUITE.erl b/erts/emulator/test/scheduler_SUITE.erl
index 6b49b68ec8..f18d79d770 100644
--- a/erts/emulator/test/scheduler_SUITE.erl
+++ b/erts/emulator/test/scheduler_SUITE.erl
@@ -57,7 +57,6 @@
scheduler_suspend_basic/1,
scheduler_suspend/1,
dirty_scheduler_threads/1,
- dirty_scheduler_exit/1,
reader_groups/1]).
suite() ->
@@ -72,7 +71,7 @@ all() ->
bound_process,
{group, scheduler_bind}, scheduler_threads,
scheduler_suspend_basic, scheduler_suspend,
- dirty_scheduler_threads, dirty_scheduler_exit,
+ dirty_scheduler_threads,
reader_groups].
groups() ->
@@ -1162,53 +1161,6 @@ get_dsstate(Config, Cmd) ->
stop_node(Node),
{DSCPU, DSCPUOnln, DSIO}.
-dirty_scheduler_exit(Config) when is_list(Config) ->
- try
- erlang:system_info(dirty_cpu_schedulers),
- dirty_scheduler_exit_test(Config)
- catch
- error:badarg ->
- {skipped, "No dirty scheduler support"}
- end.
-
-dirty_scheduler_exit_test(Config) ->
- {ok, Node} = start_node(Config, "+SDio 1"),
- [ok] = mcall(Node,
- [fun() ->
- Path = proplists:get_value(data_dir, Config),
- Lib = atom_to_list(?MODULE),
- ok = erlang:load_nif(filename:join(Path,Lib), []),
- ok = test_dirty_scheduler_exit()
- end]),
- stop_node(Node),
- ok.
-
-test_dirty_scheduler_exit() ->
- process_flag(trap_exit,true),
- test_dse(10,[]).
-test_dse(0,Pids) ->
- timer:sleep(100),
- kill_dse(Pids,[]);
-test_dse(N,Pids) ->
- Pid = spawn_link(fun dirty_sleeper/0),
- test_dse(N-1,[Pid|Pids]).
-kill_dse([],Killed) ->
- wait_dse(Killed);
-kill_dse([Pid|Pids],AlreadyKilled) ->
- exit(Pid,kill),
- kill_dse(Pids,[Pid|AlreadyKilled]).
-wait_dse([]) ->
- ok;
-wait_dse([Pid|Pids]) ->
- receive
- {'EXIT',Pid,killed} ->
- ok
- end,
- wait_dse(Pids).
-
-dirty_sleeper() ->
- erlang:nif_error({error,?MODULE}).
-
scheduler_suspend_basic(Config) when is_list(Config) ->
case erlang:system_info(multi_scheduling) of
disabled ->
diff --git a/erts/emulator/test/scheduler_SUITE_data/Makefile.src b/erts/emulator/test/scheduler_SUITE_data/Makefile.src
deleted file mode 100644
index 859112cf19..0000000000
--- a/erts/emulator/test/scheduler_SUITE_data/Makefile.src
+++ /dev/null
@@ -1,8 +0,0 @@
-
-SCHEDULER_LIBS = scheduler_SUITE@dll@
-
-all: $(SCHEDULER_LIBS)
-
-@SHLIB_RULES@
-
-$(SCHEDULER_LIBS): scheduler_SUITE.c
diff --git a/erts/emulator/test/scheduler_SUITE_data/scheduler_SUITE.c b/erts/emulator/test/scheduler_SUITE_data/scheduler_SUITE.c
deleted file mode 100644
index ab4863337f..0000000000
--- a/erts/emulator/test/scheduler_SUITE_data/scheduler_SUITE.c
+++ /dev/null
@@ -1,37 +0,0 @@
-#ifndef __WIN32__
-#include <unistd.h>
-#endif
-#include "erl_nif.h"
-
-static int
-load(ErlNifEnv* env, void** priv, ERL_NIF_TERM info)
-{
- ErlNifSysInfo sys_info;
- enif_system_info(&sys_info, sizeof(ErlNifSysInfo));
- if (!sys_info.smp_support || !sys_info.dirty_scheduler_support)
- return 1;
- return 0;
-}
-
-static ERL_NIF_TERM
-dirty_sleeper(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
-{
-#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
-#ifdef __WIN32__
- Sleep(3000);
-#else
- sleep(3);
-#endif
-#endif
- return enif_make_atom(env, "ok");
-}
-
-static ErlNifFunc funcs[] = {
-#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
- {"dirty_sleeper", 0, dirty_sleeper, ERL_NIF_DIRTY_JOB_IO_BOUND}
-#else
- {"dirty_sleeper", 0, dirty_sleeper, 0}
-#endif
-};
-
-ERL_NIF_INIT(scheduler_SUITE, funcs, &load, NULL, NULL, NULL);
diff --git a/erts/emulator/test/trace_SUITE.erl b/erts/emulator/test/trace_SUITE.erl
index 29e043dd5c..da6a6bdea4 100644
--- a/erts/emulator/test/trace_SUITE.erl
+++ b/erts/emulator/test/trace_SUITE.erl
@@ -84,7 +84,6 @@ cpu_timestamp(Config) when is_list(Config) ->
receive_trace(Config) when is_list(Config) ->
Receiver = fun_spawn(fun receiver/0),
- process_flag(trap_exit, true),
%% Trace the process; make sure that we receive the trace messages.
1 = erlang:trace(Receiver, true, ['receive']),
@@ -96,15 +95,116 @@ receive_trace(Config) when is_list(Config) ->
{trace, Receiver, 'receive', Hello2} = receive_first_trace(),
receive_nothing(),
+ %% Test 'receive' with matchspec
+ F1 = fun ({Pat, IsMatching}) ->
+ set_trace_pattern('receive', Pat, []),
+ Receiver ! Hello,
+ case IsMatching of
+ true ->
+ {trace, Receiver, 'receive', Hello} = receive_first_trace();
+ false ->
+ ok
+ end,
+ receive_nothing()
+ end,
+ From = self(),
+ Node = node(),
+ lists:foreach(F1, [{no, true},
+ {[{[Node, undefined,"Unexpected"],[],[]}], false},
+ {[{[Node, From,'_'],[],[]}], true},
+ {[{[Node, '$1','_'],[{'=/=','$1',From}],[]}], false},
+ {[{['$1', '_','_'],[{'=:=','$1',Node}],[]}], true},
+ {false, false},
+ {true, true}]),
+
+ %% Remote messages
+ OtherName = atom_to_list(?MODULE)++"_receive_trace",
+ {ok, OtherNode} = start_node(OtherName),
+ RemoteProc = spawn_link(OtherNode, ?MODULE, process, [self()]),
+ io:format("RemoteProc = ~p ~n", [RemoteProc]),
+
+ RemoteProc ! {send_please, Receiver, Hello},
+ {trace, Receiver, 'receive', Hello} = receive_first_trace(),
+ RemoteProc ! {send_please, Receiver, 99},
+ {trace, Receiver, 'receive', 99} = receive_first_trace(),
+
+ %% Remote with matchspec
+ F2 = fun (To, {Pat, IsMatching}) ->
+ set_trace_pattern('receive', Pat, []),
+ RemoteProc ! {send_please, To, Hello},
+ case IsMatching of
+ true ->
+ {trace, Receiver, 'receive', Hello} = receive_first_trace();
+ false ->
+ ok
+ end,
+ receive_nothing()
+ end,
+ F2(Receiver, {no, true}),
+ F2(Receiver, {[{[OtherNode, undefined,"Unexpected"],[],[]}], false}),
+ F2(Receiver, {[{[OtherNode, RemoteProc,'_'],[],[]},
+ {[OtherNode, undefined,'_'],[],[]}], true}),
+ F2(Receiver, {[{[OtherNode, '$1','_'],
+ [{'orelse',{'=:=','$1',undefined},{'=/=',{node,'$1'},{node}}}],
+ []}], true}),
+ F2(Receiver, {[{['$1', '_','_'], [{'=:=','$1',OtherNode}], []}], true}),
+ F2(Receiver, {false, false}),
+ F2(Receiver, {true, true}),
+
+ %% Remote to named with matchspec
+ Name = trace_SUITE_receiver,
+ register(Name, Receiver),
+ NN = {Name, node()},
+ F2(NN, {no, true}),
+ F2(NN, {[{[OtherNode, undefined,"Unexpected"],[],[]}], false}),
+ F2(NN, {[{[OtherNode, RemoteProc,'_'],[],[]},
+ {[OtherNode, undefined,'_'],[],[]}], true}),
+ F2(NN, {[{[OtherNode, '$1','_'],
+ [{'orelse',{'=:=','$1',undefined},{'=/=',{node,'$1'},{node}}}],
+ []}], true}),
+ F2(NN, {[{['$1', '_','_'], [{'==','$1',OtherNode}], []}], true}),
+ F2(NN, {false, false}),
+ F2(NN, {true, true}),
+
+ unlink(RemoteProc),
+ true = stop_node(OtherNode),
+
+ %% Timeout
+ Receiver ! {set_timeout, 10},
+ {trace, Receiver, 'receive', {set_timeout, 10}} = receive_first_trace(),
+ {trace, Receiver, 'receive', timeout} = receive_first_trace(),
+ erlang:trace_pattern('receive', [{[clock_service,undefined,timeout], [], []}], []),
+ Receiver ! {set_timeout, 7},
+ {trace, Receiver, 'receive', timeout} = receive_first_trace(),
+ erlang:trace_pattern('receive', true, []),
+
%% Another process should not be able to trace Receiver.
+ process_flag(trap_exit, true),
Intruder = fun_spawn(fun() -> erlang:trace(Receiver, true, ['receive']) end),
{'EXIT', Intruder, {badarg, _}} = receive_first(),
%% Untrace the process; we should not receive anything.
- 1 = erlang:trace(Receiver, false, ['receive']),
- Receiver ! {hello, there},
- Receiver ! any_garbage,
- receive_nothing(),
+ ?line 1 = erlang:trace(Receiver, false, ['receive']),
+ ?line Receiver ! {hello, there},
+ ?line Receiver ! any_garbage,
+ ?line receive_nothing(),
+
+ %% Verify restrictions in matchspec for 'receive'
+ F3 = fun (Pat) -> {'EXIT', {badarg,_}} = (catch erlang:trace_pattern('receive', Pat, [])) end,
+ WC = ['_','_','_'],
+ F3([{WC,[],[{message, {process_dump}}]}]),
+ F3([{WC,[{is_seq_trace}],[]}]),
+ F3([{WC,[],[{set_seq_token,label,4711}]}]),
+ F3([{WC,[],[{get_seq_token}]}]),
+ F3([{WC,[],[{enable_trace,call}]}]),
+ F3([{WC,[],[{enable_trace,self(),call}]}]),
+ F3([{WC,[],[{disable_trace,call}]}]),
+ F3([{WC,[],[{disable_trace,self(),call}]}]),
+ F3([{WC,[],[{trace,[call],[]}]}]),
+ F3([{WC,[],[{trace,self(),[],[call]}]}]),
+ F3([{WC,[],[{caller}]}]),
+ F3([{WC,[],[{silent,true}]}]),
+
ok.
%% Tests that receive of a message always happens before a call with
@@ -257,30 +357,111 @@ send_trace(Config) when is_list(Config) ->
%% Check that a message sent to another process is traced.
1 = erlang:trace(Sender, true, [send]),
+ F1 = fun (Pat) ->
+ set_trace_pattern(send, Pat, []),
+ Sender ! {send_please, Receiver, to_receiver},
+ {trace, Sender, send, to_receiver, Receiver} = receive_first_trace(),
+ receive_nothing()
+ end,
+ lists:foreach(F1, [no,
+ [{[Receiver,to_receiver],[],[]}],
+ [{['_','_'],[],[]}],
+ [{['$1','_'],[{is_pid,'$1'}],[]}],
+ [{['_','$1'],[{is_atom,'$1'}],[]}],
+ true]),
+
+ %% Test {message, Msg}
+ F1m = fun ({Pat, Msg}) ->
+ set_trace_pattern(send, Pat, []),
+ Sender ! {send_please, Receiver, to_receiver},
+ {trace, Sender, send, to_receiver, Receiver, Msg} = receive_first_trace(),
+ receive_nothing()
+ end,
+ lists:foreach(F1m, [{[{['_','_'],[],[{message, 4711}]}], 4711},
+ {[{['_','_'],[],[{message, "4711"}]}], "4711"}
+ ]),
+
+ %% Test {message, {process_dump}}
+ set_trace_pattern(send, [{['_','_'],[],[{message, {process_dump}}]}], []),
Sender ! {send_please, Receiver, to_receiver},
- {trace, Sender, send, to_receiver, Receiver} = receive_first_trace(),
+ {trace, Sender, send, to_receiver, Receiver, ProcDump} = receive_first_trace(),
+ true = is_binary(ProcDump),
receive_nothing(),
+ %% Same test with false match spec
+ F2 = fun (Pat) ->
+ set_trace_pattern(send, Pat, []),
+ Sender ! {send_please, Receiver, to_receiver},
+ receive_nothing()
+ end,
+ lists:foreach(F2, [[{[Sender,to_receiver],[],[]}],
+ [{[Receiver,nomatch],[],[]}],
+ [{['$1','_'],[{is_atom,'$1'}],[]}],
+ [{['_','$1'],[{is_pid,'$1'}],[]}],
+ false,
+ [{['_','_'],[],[{message,false}]}],
+ [{['_','_'],[],[{silent,true}]}]]),
+ erlang:trace_pattern(send, true, []),
+ erlang:trace(Sender, false, [silent]),
+
%% Check that a message sent to another registered process is traced.
register(?MODULE,Receiver),
- Sender ! {send_please, ?MODULE, to_receiver},
- {trace, Sender, send, to_receiver, ?MODULE} = receive_first_trace(),
- receive_nothing(),
+ F3 = fun (Pat) ->
+ set_trace_pattern(send, Pat, []),
+ Sender ! {send_please, ?MODULE, to_receiver},
+ {trace, Sender, send, to_receiver, ?MODULE} = receive_first_trace(),
+ receive_nothing()
+ end,
+ lists:foreach(F3, [no,
+ [{[?MODULE,to_receiver],[],[]}],
+ [{['_','_'],[],[]}],
+ [{['$1','_'],[{is_atom,'$1'}],[]}],
+ [{['_','$1'],[{is_atom,'$1'}],[]}],
+ true]),
+ %% Again with false match spec
+ F4 = fun (Pat) ->
+ set_trace_pattern(send, Pat, []),
+ Sender ! {send_please, ?MODULE, to_receiver},
+ receive_nothing()
+ end,
+ lists:foreach(F4, [[{[nomatch,to_receiver],[],[]}],
+ [{[?MODULE,nomatch],[],[]}],
+ [{['$1','_'],[{is_pid,'$1'}],[]}],
+ [{['_','$1'],[{is_pid,'$1'}],[]}],
+ [{['_','_'],[],[{message,false}]}],
+ [{['_','_'],[],[{silent,true}]}]
+ ]),
unregister(?MODULE),
+ erlang:trace_pattern(send, true, []),
+ erlang:trace(Sender, false, [silent]),
%% Check that a message sent to this process is traced.
- Sender ! {send_please, self(), to_myself},
- receive to_myself -> ok end,
- Self = self(),
- {trace, Sender, send, to_myself, Self} = receive_first_trace(),
- receive_nothing(),
+ F5 = fun (Pat) ->
+ set_trace_pattern(send, Pat, []),
+ Sender ! {send_please, self(), to_myself},
+ receive to_myself -> ok end,
+ Self = self(),
+ {trace, Sender, send, to_myself, Self} = receive_first_trace(),
+ receive_nothing()
+ end,
+ lists:foreach(F5, [no,
+ [{[self(),to_myself],[],[]}],
+ [{['_','_'],[],[]}],
+ true]),
%% Check that a message sent to dead process is traced.
{Pid,Ref} = spawn_monitor(fun() -> ok end),
receive {'DOWN',Ref,_,_,_} -> ok end,
- Sender ! {send_please, Pid, to_dead},
- {trace, Sender, send_to_non_existing_process, to_dead, Pid} = receive_first_trace(),
- receive_nothing(),
+ F6 = fun (Pat) ->
+ set_trace_pattern(send, Pat, []),
+ Sender ! {send_please, Pid, to_dead},
+ {trace, Sender, send_to_non_existing_process, to_dead, Pid} = receive_first_trace(),
+ receive_nothing()
+ end,
+ lists:foreach(F6, [no,
+ [{[Pid,to_dead],[],[]}],
+ [{['_','_'],[],[]}],
+ true]),
%% Check that a message sent to unknown registrated process is traced.
BadargSender = fun_spawn(fun sender/0),
@@ -301,8 +482,26 @@ send_trace(Config) when is_list(Config) ->
Sender ! {send_please, self(), to_myself_again},
receive to_myself_again -> ok end,
receive_nothing(),
+
+ {'EXIT',{badarg,_}} = (catch erlang:trace_pattern(send, true, [global])),
+ {'EXIT',{badarg,_}} = (catch erlang:trace_pattern(send, true, [local])),
+ {'EXIT',{badarg,_}} = (catch erlang:trace_pattern(send, true, [meta])),
+ {'EXIT',{badarg,_}} = (catch erlang:trace_pattern(send, true, [{meta,self()}])),
+ {'EXIT',{badarg,_}} = (catch erlang:trace_pattern(send, true, [call_count])),
+ {'EXIT',{badarg,_}} = (catch erlang:trace_pattern(send, true, [call_time])),
+ {'EXIT',{badarg,_}} = (catch erlang:trace_pattern(send, restart, [])),
+ {'EXIT',{badarg,_}} = (catch erlang:trace_pattern(send, pause, [])),
+ {'EXIT',{badarg,_}} = (catch erlang:trace_pattern(send, [{['_','_'],[],[{caller}]}], [])),
+
+ %% Done.
ok.
+set_trace_pattern(_, no, _) -> 0;
+set_trace_pattern(MFA, Pat, Flg) ->
+ R = erlang:trace_pattern(MFA, Pat, Flg),
+ {match_spec, Pat} = erlang:trace_info(MFA, match_spec),
+ R.
+
%% Test trace(Pid, How, [procs]).
procs_trace(Config) when is_list(Config) ->
Name = list_to_atom(atom_to_list(?MODULE)++"_procs_trace"),
@@ -438,6 +637,7 @@ dist_procs_trace(Config) when is_list(Config) ->
Proc2 ! {unlink_please, Proc1},
{trace, Proc1, getting_unlinked, Proc2} = receive_first_trace(),
receive_nothing(),
+
%%
%% exit (with registered name, due to link)
Name = list_to_atom(OtherName),
@@ -1516,8 +1716,8 @@ receive_nothing() ->
receive
Any ->
ct:fail({unexpected_message, Any})
- after 200 ->
- ok
+ after 100 ->
+ ok
end.
@@ -1592,9 +1792,14 @@ sender() ->
%% Just consumes messages from its message queue.
receiver() ->
- receive
- _Any -> receiver()
- end.
+ receiver(infinity).
+
+receiver(Timeout) ->
+ receiver(receive
+ {set_timeout, NewTimeout} -> NewTimeout;
+ _Any -> Timeout
+ after Timeout -> infinity %% reset
+ end).
%% Works as long as it receives CPU time. Will always be RUNNABLE.
diff --git a/erts/emulator/test/trace_bif_SUITE.erl b/erts/emulator/test/trace_bif_SUITE.erl
index 8c3ffccc45..491b37ae46 100644
--- a/erts/emulator/test/trace_bif_SUITE.erl
+++ b/erts/emulator/test/trace_bif_SUITE.erl
@@ -232,6 +232,7 @@ do_trace_bif_return(TsType, TsFlags) ->
{?MODULE, bif_process,0}},
Ts11, TsType),
check_ts(TsType, Ts12, make_ts(TsType)),
+ erlang:trace_pattern({erlang,'_','_'}, false, [local]),
ok.
diff --git a/erts/emulator/test/tracer_SUITE.erl b/erts/emulator/test/tracer_SUITE.erl
index de44d6656a..20fb7e475e 100644
--- a/erts/emulator/test/tracer_SUITE.erl
+++ b/erts/emulator/test/tracer_SUITE.erl
@@ -44,6 +44,8 @@ groups() ->
gc_start, gc_end]}].
init_per_suite(Config) ->
+ erlang:trace_pattern({'_','_','_'}, false, [local]),
+ erlang:trace_pattern({'_','_','_'}, false, []),
purge(),
Config.
@@ -119,23 +121,24 @@ unload(_Config) ->
end,
1 = erlang:trace(Pid, true, [{tracer, tracer_test,
- {#{ call => trace}, self(), []}},
+ {#{ call => trace }, self(), []}},
call]),
1 = erlang:trace_pattern({?MODULE, all, 0}, [], []),
Tc(1),
- receive _ -> ok after 0 -> ct:fail(timeout) end,
+ receive _M -> ok after 0 -> ct:fail(timeout) end,
+ receive M0 -> ct:fail({unexpected_message0, M0}) after 0 -> ok end,
code:purge(tracer_test),
code:delete(tracer_test),
Tc(1),
- receive M1 -> ct:fail({unexpected_message, M1}) after 0 -> ok end,
+ receive M1 -> ct:fail({unexpected_message1, M1}) after 0 -> ok end,
code:purge(tracer_test),
Tc(1),
- receive M2 -> ct:fail({unexpected_message, M2}) after 0 -> ok end,
+ receive M2 -> ct:fail({unexpected_message2, M2}) after 0 -> ok end,
ok.
@@ -167,13 +170,19 @@ reload(_Config) ->
false = code:purge(tracer_test),
true = code:delete(tracer_test),
- false = code:purge(tracer_test)
+ false = code:purge(tracer_test),
+ timer:sleep(10)
end || _ <- lists:seq(1,15)],
ok.
reload_loop() ->
?MODULE:all(),
+ ?MODULE:all(),
+ ?MODULE:all(),
+ ?MODULE:all(),
+ ?MODULE:all(),
+ timer:sleep(1),
reload_loop().
invalid_tracers(_Config) ->
@@ -214,7 +223,7 @@ send(_Config) ->
Expect = fun(Pid, State, EOpts) ->
receive
Msg ->
- {Pid, send, State, Pid, ok, Self, Opts} = Msg,
+ {send, State, Pid, ok, Self, Opts} = Msg,
check_opts(EOpts, Opts)
end
end,
@@ -230,7 +239,7 @@ recv(_Config) ->
Expect = fun(Pid, State, EOpts) ->
receive
Msg ->
- {undefined, 'receive', State, Pid, ok, undefined, Opts} = Msg,
+ {'receive', State, Pid, ok, undefined, Opts} = Msg,
check_opts(EOpts, Opts)
end
end,
@@ -247,14 +256,14 @@ spawn(_Config) ->
fun(Pid, State, EOpts) ->
receive
Msg ->
- {Pid, spawn, State, Pid, NewPid,
+ {spawn, State, Pid, NewPid,
{lists,seq,[1,10]}, Opts} = Msg,
check_opts(EOpts, Opts),
true = is_pid(NewPid) andalso NewPid /= Pid
end
end,
- test(spawn, procs, Tc, Expect, true).
+ test(spawn, procs, Tc, Expect, false).
exit(_Config) ->
Tc = fun(Pid) ->
@@ -265,7 +274,7 @@ exit(_Config) ->
fun(Pid, State, EOpts) ->
receive
Msg ->
- {Pid, exit, State, Pid, normal, undefined, Opts} = Msg,
+ {exit, State, Pid, normal, undefined, Opts} = Msg,
check_opts(EOpts, Opts)
end
end,
@@ -286,13 +295,13 @@ link(_Config) ->
fun(Pid, State, EOpts) ->
receive
Msg ->
- {Pid, link, State, Pid, NewPid, undefined, Opts} = Msg,
+ {link, State, Pid, NewPid, undefined, Opts} = Msg,
check_opts(EOpts, Opts),
true = is_pid(NewPid) andalso NewPid /= Pid
end
end,
- test(link, procs, Tc, Expect, true).
+ test(link, procs, Tc, Expect, false).
unlink(_Config) ->
@@ -309,13 +318,13 @@ unlink(_Config) ->
fun(Pid, State, EOpts) ->
receive
Msg ->
- {Pid, unlink, State, Pid, NewPid, undefined, Opts} = Msg,
+ {unlink, State, Pid, NewPid, undefined, Opts} = Msg,
check_opts(EOpts, Opts),
true = is_pid(NewPid) andalso NewPid /= Pid
end
end,
- test(unlink, procs, Tc, Expect, true).
+ test(unlink, procs, Tc, Expect, false).
getting_linked(_Config) ->
@@ -331,7 +340,7 @@ getting_linked(_Config) ->
fun(Pid, State, EOpts) ->
receive
Msg ->
- {NewPid, getting_linked, State, Pid, NewPid, undefined, Opts} = Msg,
+ {getting_linked, State, Pid, NewPid, undefined, Opts} = Msg,
check_opts(EOpts, Opts),
true = is_pid(NewPid) andalso NewPid /= Pid
end
@@ -355,7 +364,7 @@ getting_unlinked(_Config) ->
fun(Pid, State, EOpts) ->
receive
Msg ->
- {NewPid, getting_unlinked, State, Pid, NewPid, undefined, Opts} = Msg,
+ {getting_unlinked, State, Pid, NewPid, undefined, Opts} = Msg,
check_opts(EOpts, Opts),
true = is_pid(NewPid) andalso NewPid /= Pid
end
@@ -377,12 +386,12 @@ register(_Config) ->
fun(Pid, State, EOpts) ->
receive
Msg ->
- {Pid, register, State, Pid, ?MODULE, undefined, Opts} = Msg,
+ {register, State, Pid, ?MODULE, undefined, Opts} = Msg,
check_opts(EOpts, Opts)
end
end,
- test(register, procs, Tc, Expect, true).
+ test(register, procs, Tc, Expect, false).
unregister(_Config) ->
@@ -398,18 +407,18 @@ unregister(_Config) ->
fun(Pid, State, EOpts) ->
receive
Msg ->
- {Pid, unregister, State, Pid, ?MODULE, undefined, Opts} = Msg,
+ {unregister, State, Pid, ?MODULE, undefined, Opts} = Msg,
check_opts(EOpts, Opts)
end
end,
- test(unregister, procs, Tc, Expect, true).
+ test(unregister, procs, Tc, Expect, false).
in(_Config) ->
Tc = fun(Pid) ->
Self = self(),
- Pid ! fun() -> receive after 1 -> Self ! ok end end,
+ Pid ! fun() -> receive after 10 -> Self ! ok end end,
receive ok -> ok end
end,
@@ -418,7 +427,7 @@ in(_Config) ->
N = (fun F(N) ->
receive
Msg ->
- {Pid, in, State, Pid, _,
+ {in, State, Pid, _,
undefined, Opts} = Msg,
check_opts(EOpts, Opts),
F(N+1)
@@ -428,7 +437,7 @@ in(_Config) ->
true = N > 0
end,
- test(in, running, Tc, Expect, true).
+ test(in, running, Tc, Expect, false).
out(_Config) ->
Tc = fun(Pid) ->
@@ -443,7 +452,7 @@ out(_Config) ->
N = (fun F(N) ->
receive
Msg ->
- {Pid, out, State, Pid, _,
+ {out, State, Pid, _,
undefined, Opts} = Msg,
check_opts(EOpts, Opts),
F(N+1)
@@ -453,7 +462,7 @@ out(_Config) ->
true = N > 0
end,
- test(out, running, Tc, Expect, true, true).
+ test(out, running, Tc, Expect, false, true).
gc_start(_Config) ->
@@ -468,12 +477,12 @@ gc_start(_Config) ->
fun(Pid, State, EOpts) ->
receive
Msg ->
- {Pid, gc_major_start, State, Pid, _, undefined, Opts} = Msg,
+ {gc_major_start, State, Pid, _, undefined, Opts} = Msg,
check_opts(EOpts, Opts)
end
end,
- test(gc_major_start, garbage_collection, Tc, Expect, true).
+ test(gc_major_start, garbage_collection, Tc, Expect, false).
gc_end(_Config) ->
@@ -488,20 +497,20 @@ gc_end(_Config) ->
fun(Pid, State, EOpts) ->
receive
Msg ->
- {Pid, gc_major_end, State, Pid, _, undefined, Opts} = Msg,
+ {gc_major_end, State, Pid, _, undefined, Opts} = Msg,
check_opts(EOpts, Opts)
end
end,
- test(gc_major_end, garbage_collection, Tc, Expect, true).
+ test(gc_major_end, garbage_collection, Tc, Expect, false).
test(Event, Tc, Expect) ->
- test(Event, Tc, Expect, true).
+ test(Event, Tc, Expect, false).
test(Event, Tc, Expect, Removes) ->
test(Event, Event, Tc, Expect, Removes).
test(Event, TraceFlag, Tc, Expect, Removes) ->
test(Event, TraceFlag, Tc, Expect, Removes, false).
-test(Event, TraceFlag, Tc, Expect, Removes, Dies) ->
+test(Event, TraceFlag, Tc, Expect, _Removes, Dies) ->
ComplexState = {fun() -> ok end, <<0:(128*8)>>},
Opts = #{ timestamp => undefined,
@@ -557,25 +566,6 @@ test(Event, TraceFlag, Tc, Expect, Removes, Dies) ->
ok
end,
- %% Test that remove works
- Pid3 = start_tracee(),
- State3 = {#{ Event => remove }, self(), ComplexState},
- 1 = erlang:trace(Pid3, true, [TraceFlag, {tracer, tracer_test, State3}]),
- Tc(Pid3),
- ok = trace_delivered(Pid3),
- receive M3 -> ct:fail({unexpected, M3}) after 0 -> ok end,
- if not Dies ->
- if Removes ->
- {flags, []} = erlang:trace_info(Pid3, flags),
- {tracer, []} = erlang:trace_info(Pid3, tracer);
- true ->
- {flags, [TraceFlag]} = erlang:trace_info(Pid3, flags),
- {tracer, {tracer_test, State3}} = erlang:trace_info(Pid3, tracer)
- end,
- erlang:trace(Pid3, false, [TraceFlag]);
- true ->
- ok
- end,
ok.
check_opts(#{ scheduler_id := number } = E, #{ scheduler_id := N } = O)
diff --git a/erts/emulator/test/tracer_SUITE_data/tracer_test.c b/erts/emulator/test/tracer_SUITE_data/tracer_test.c
index 8b4be1345d..908f35da9c 100644
--- a/erts/emulator/test/tracer_SUITE_data/tracer_test.c
+++ b/erts/emulator/test/tracer_SUITE_data/tracer_test.c
@@ -104,16 +104,10 @@ static ERL_NIF_TERM trace(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
enif_get_tuple(env, argv[1], &state_arity, &state_tuple);
- tuple = enif_alloc(sizeof(ERL_NIF_TERM)*(argc+1));
- memcpy(tuple+1,argv,sizeof(ERL_NIF_TERM)*argc);
+ tuple = enif_alloc(sizeof(ERL_NIF_TERM)*(argc));
+ memcpy(tuple,argv,sizeof(ERL_NIF_TERM)*argc);
- if (enif_self(env, &self)) {
- tuple[0] = enif_make_pid(env, &self);
- } else {
- tuple[0] = enif_make_atom(env, "undefined");
- }
-
- msg = enif_make_tuple_from_array(env, tuple, argc + 1);
+ msg = enif_make_tuple_from_array(env, tuple, argc);
enif_get_local_pid(env, state_tuple[1], &to);
enif_send(env, &to, NULL, msg);
enif_free(tuple);
diff --git a/erts/epmd/test/epmd_SUITE.erl b/erts/epmd/test/epmd_SUITE.erl
index 763984267a..0f0a5acde7 100644
--- a/erts/epmd/test/epmd_SUITE.erl
+++ b/erts/epmd/test/epmd_SUITE.erl
@@ -23,64 +23,58 @@
% Timeout for test cases (rather long to work on slow machines)
--define(SHORT_TEST_TIMEOUT, ?t:seconds(30)). % Default
--define(MEDIUM_TEST_TIMEOUT, ?t:minutes(3)).
--define(LONG_TEST_TIMEOUT, ?t:minutes(10)).
+-define(MEDIUM_TEST_TIMEOUT, {minutes,3}).
+-define(LONG_TEST_TIMEOUT, {minutes,10}).
% Delay inserted into code
-define(SHORT_PAUSE, 100).
--define(MEDIUM_PAUSE, ?t:seconds(1)).
--define(LONG_PAUSE, ?t:seconds(5)).
+-define(MEDIUM_PAUSE, 1000).
+-define(LONG_PAUSE, 5000).
% Information about nodes
-record(node_info, {port, node_type, prot, lvsn, hvsn, node_name, extra}).
% Test server specific exports
--export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
- init_per_group/2,end_per_group/2,
- init_per_testcase/2, end_per_testcase/2]).
-
--export(
- [
- register_name/1,
- register_name_ipv6/1,
- register_names_1/1,
- register_names_2/1,
- register_duplicate_name/1,
- unicode_name/1,
- long_unicode_name/1,
- get_port_nr/1,
- slow_get_port_nr/1,
- unregister_others_name_1/1,
- unregister_others_name_2/1,
- register_overflow/1,
- name_with_null_inside/1,
- name_null_terminated/1,
- stupid_names_req/1,
-
- no_data/1,
- one_byte/1,
- two_bytes/1,
- partial_packet/1,
- zero_length/1,
- too_large/1,
- alive_req_too_small_1/1,
- alive_req_too_small_2/1,
- alive_req_too_large/1,
-
- returns_valid_empty_extra/1,
- returns_valid_populated_extra_with_nulls/1,
-
- names_stdout/1,
-
- buffer_overrun_1/1,
- buffer_overrun_2/1,
- no_nonlocal_register/1,
- no_nonlocal_kill/1,
- no_live_killing/1,
-
- socket_reset_before_alive2_reply_is_written/1
- ]).
+-export([all/0, suite/0, groups/0, init_per_testcase/2, end_per_testcase/2]).
+
+-export([register_name/1,
+ register_name_ipv6/1,
+ register_names_1/1,
+ register_names_2/1,
+ register_duplicate_name/1,
+ unicode_name/1,
+ long_unicode_name/1,
+ get_port_nr/1,
+ slow_get_port_nr/1,
+ unregister_others_name_1/1,
+ unregister_others_name_2/1,
+ register_overflow/1,
+ name_with_null_inside/1,
+ name_null_terminated/1,
+ stupid_names_req/1,
+
+ no_data/1,
+ one_byte/1,
+ two_bytes/1,
+ partial_packet/1,
+ zero_length/1,
+ too_large/1,
+ alive_req_too_small_1/1,
+ alive_req_too_small_2/1,
+ alive_req_too_large/1,
+
+ returns_valid_empty_extra/1,
+ returns_valid_populated_extra_with_nulls/1,
+
+ names_stdout/1,
+
+ buffer_overrun_1/1,
+ buffer_overrun_2/1,
+ no_nonlocal_register/1,
+ no_nonlocal_kill/1,
+ no_live_killing/1,
+
+ socket_reset_before_alive2_reply_is_written/1]).
% Port we use for testing
@@ -88,7 +82,7 @@
-define(EPMDARGS,"-packet_timeout 1").
-define(DUMMY_PORT, 1000). % Port number to register
- % not in real use.
+% not in real use.
% Timeouts etc inside test cases. Time is in milliseconds.
-define(CONN_RETRY, 4). % Times to retry connecting
@@ -111,7 +105,9 @@
%% all/1
%%
-suite() -> [{ct_hooks,[ts_install_cth]}].
+suite() ->
+ [{ct_hooks,[ts_install_cth]},
+ {timetrap, ?MEDIUM_TEST_TIMEOUT}].
all() ->
[register_name, register_name_ipv6,
@@ -134,105 +130,71 @@ groups() ->
[{buffer_overrun, [],
[buffer_overrun_1, buffer_overrun_2]}].
-init_per_suite(Config) ->
- Config.
-
-end_per_suite(_Config) ->
- ok.
-
-init_per_group(_GroupName, Config) ->
- Config.
-
-end_per_group(_GroupName, Config) ->
- Config.
-
-
%%
%% Run before and after each test case
%%
init_per_testcase(_Func, Config) ->
- Dog = test_server:timetrap(?MEDIUM_TEST_TIMEOUT),
cleanup(),
- [{watchdog, Dog} | Config].
+ Config.
-end_per_testcase(_Func, Config) ->
+end_per_testcase(_Func, _Config) ->
cleanup(),
- Dog = ?config(watchdog, Config),
- catch test_server:timetrap_cancel(Dog), % We may have canceled already
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-register_name(doc) ->
- ["Register a name"];
-register_name(suite) ->
- [];
+%% Register a name
register_name(Config) when is_list(Config) ->
- ?line ok = epmdrun(),
- ?line {ok,Sock} = register_node("foobar"),
- ?line ok = close(Sock), % Unregister
+ ok = epmdrun(),
+ {ok,Sock} = register_node("foobar"),
+ ok = close(Sock), % Unregister
ok.
-register_name_ipv6(doc) ->
- ["Register a name over IPv6"];
-register_name_ipv6(suite) ->
- [];
+%% Register a name over IPv6
register_name_ipv6(Config) when is_list(Config) ->
% Test if the host has an IPv6 loopback address
Res = gen_tcp:listen(0, [inet6, {ip, {0,0,0,0,0,0,0,1}}]),
case Res of
- {ok,LSock} ->
- gen_tcp:close(LSock),
- ?line ok = epmdrun(),
- ?line {ok,Sock} = register_node6("foobar6"),
- ?line ok = close(Sock), % Unregister
- ok;
- _Error ->
- {skip, "Host does not have an IPv6 loopback address"}
+ {ok,LSock} ->
+ gen_tcp:close(LSock),
+ ok = epmdrun(),
+ {ok,Sock} = register_node6("foobar6"),
+ ok = close(Sock), % Unregister
+ ok;
+ _Error ->
+ {skip, "Host does not have an IPv6 loopback address"}
end.
-register_names_1(doc) ->
- ["Register and unregister two nodes"];
-register_names_1(suite) ->
- [];
+%% Register and unregister two nodes
register_names_1(Config) when is_list(Config) ->
- ?line ok = epmdrun(),
- ?line {ok,Sock1} = register_node("foobar"),
- ?line {ok,Sock2} = register_node("foozap"),
- ?line ok = close(Sock1), % Unregister
- ?line ok = close(Sock2), % Unregister
+ ok = epmdrun(),
+ {ok,Sock1} = register_node("foobar"),
+ {ok,Sock2} = register_node("foozap"),
+ ok = close(Sock1), % Unregister
+ ok = close(Sock2), % Unregister
ok.
-register_names_2(doc) ->
- ["Register and unregister two nodes"];
-register_names_2(suite) ->
- [];
+%% Register and unregister two nodes
register_names_2(Config) when is_list(Config) ->
- ?line ok = epmdrun(),
- ?line {ok,Sock1} = register_node("foobar"),
- ?line {ok,Sock2} = register_node("foozap"),
- ?line ok = close(Sock2), % Unregister
- ?line ok = close(Sock1), % Unregister
+ ok = epmdrun(),
+ {ok,Sock1} = register_node("foobar"),
+ {ok,Sock2} = register_node("foozap"),
+ ok = close(Sock2), % Unregister
+ ok = close(Sock1), % Unregister
ok.
-register_duplicate_name(doc) ->
- ["Two nodes with the same name"];
-register_duplicate_name(suite) ->
- [];
+%% Two nodes with the same name
register_duplicate_name(Config) when is_list(Config) ->
- ?line ok = epmdrun(),
- ?line {ok,Sock} = register_node("foobar"),
- ?line error = register_node("foobar"),
- ?line ok = close(Sock), % Unregister
+ ok = epmdrun(),
+ {ok,Sock} = register_node("foobar"),
+ error = register_node("foobar"),
+ ok = close(Sock), % Unregister
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-unicode_name(doc) ->
- ["Check that we can register and lookup a unicode name"];
-unicode_name(suite) ->
- [];
+%% Check that we can register and lookup a unicode name
unicode_name(Config) when is_list(Config) ->
ok = epmdrun(),
NodeName = [16#1f608],
@@ -244,10 +206,7 @@ unicode_name(Config) when is_list(Config) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-long_unicode_name(doc) ->
- ["Check that we can register and lookup a long unicode name"];
-long_unicode_name(suite) ->
- [];
+%% Check that we can register and lookup a long unicode name
long_unicode_name(Config) when is_list(Config) ->
ok = epmdrun(),
BaseChar = 16#1f600,
@@ -273,101 +232,85 @@ register_node_v2(Port, NodeType, Prot, HVsn, LVsn, Name, Extra) ->
register_node_v2(Addr, Port, NodeType, Prot, HVsn, LVsn, Name, Extra) ->
Req = alive2_req(Port, NodeType, Prot, HVsn, LVsn, Name, Extra),
case send_req(Req, Addr) of
- {ok,Sock} ->
- case recv(Sock,4) of
- {ok, [?EPMD_ALIVE2_RESP,_Res=0,_C0,_C1]} ->
- {ok,Sock};
- Other ->
- test_server:format("recv on sock ~w: ~p~n",
- [Sock,Other]),
- error
- end;
- error ->
- error
+ {ok,Sock} ->
+ case recv(Sock,4) of
+ {ok, [?EPMD_ALIVE2_RESP,_Res=0,_C0,_C1]} ->
+ {ok,Sock};
+ Other ->
+ io:format("recv on sock ~w: ~p~n", [Sock,Other]),
+ error
+ end;
+ error ->
+ error
end.
% Internal function to fetch information about a node
port_please_v2(Name) ->
case send_req([?EPMD_PORT_PLEASE2_REQ,
- binary_to_list(unicode:characters_to_binary(Name))]) of
- {ok,Sock} ->
- case recv_until_sock_closes(Sock) of
- {ok, Resp} ->
- parse_port2_resp(Resp);
- Other ->
- test_server:format("recv on sock ~w: ~p~n",
- [Sock,Other]),
- error
- end;
- error ->
- error
+ binary_to_list(unicode:characters_to_binary(Name))]) of
+ {ok,Sock} ->
+ case recv_until_sock_closes(Sock) of
+ {ok, Resp} ->
+ parse_port2_resp(Resp);
+ Other ->
+ io:format("recv on sock ~w: ~p~n", [Sock,Other]),
+ error
+ end;
+ error ->
+ error
end.
parse_port2_resp(Resp) ->
case list_to_binary(Resp) of
- <<?EPMD_PORT2_RESP,Res,Port:16,NodeType,Prot,HVsn:16,LVsn:16,
- NLen:16,NodeName:NLen/binary,
- ELen:16,Extra:ELen/binary>> when Res =:= 0 ->
- {ok, #node_info{port=Port,node_type=NodeType,prot=Prot,
- hvsn=HVsn,lvsn=LVsn,
- node_name=unicode:characters_to_list(NodeName),
- extra=binary_to_list(Extra)}};
- _Other ->
- test_server:format("invalid port2 resp: ~p~n",
- [Resp]),
- error
+ <<?EPMD_PORT2_RESP,Res,Port:16,NodeType,Prot,HVsn:16,LVsn:16,
+ NLen:16,NodeName:NLen/binary,
+ ELen:16,Extra:ELen/binary>> when Res =:= 0 ->
+ {ok, #node_info{port=Port,node_type=NodeType,prot=Prot,
+ hvsn=HVsn,lvsn=LVsn,
+ node_name=unicode:characters_to_list(NodeName),
+ extra=binary_to_list(Extra)}};
+ _Other ->
+ io:format("invalid port2 resp: ~p~n", [Resp]),
+ error
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-name_with_null_inside(doc) ->
- ["Register a name with a null char in it"];
-name_with_null_inside(suite) ->
- [];
+%% Register a name with a null char in it
name_with_null_inside(Config) when is_list(Config) ->
- ?line ok = epmdrun(),
- ?line error = register_node("foo\000bar"),
+ ok = epmdrun(),
+ error = register_node("foo\000bar"),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-name_null_terminated(doc) ->
- ["Register a name with terminating null byte"];
-name_null_terminated(suite) ->
- [];
+%% Register a name with terminating null byte
name_null_terminated(Config) when is_list(Config) ->
- ?line ok = epmdrun(),
- ?line error = register_node("foobar\000"),
+ ok = epmdrun(),
+ error = register_node("foobar\000"),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-stupid_names_req(doc) ->
- ["Read names from epmd in a stupid way"];
-stupid_names_req(suite) ->
- [];
+%% Read names from epmd in a stupid way
stupid_names_req(Config) when is_list(Config) ->
- Dog = ?config(watchdog, Config),
- test_server:timetrap_cancel(Dog),
- LongDog = test_server:timetrap(?MEDIUM_TEST_TIMEOUT),
- ?line ok = epmdrun(),
- ?line [FirstConn | Conn] = register_many(1, ?REG_REPEAT_LIM, "foo"),
- ?line unregister_many([FirstConn]),
+ ok = epmdrun(),
+ [FirstConn | Conn] = register_many(1, ?REG_REPEAT_LIM, "foo"),
+ unregister_many([FirstConn]),
sleep(?MEDIUM_PAUSE),
- ?line ok = check_names(Conn),
- ?line ok = unregister_many(Conn),
- test_server:timetrap_cancel(LongDog),
+ ok = check_names(Conn),
+ ok = unregister_many(Conn),
ok.
check_names(Conn) ->
- ?line {ok,Sock} = connect_active(),
- ?line {ok,Reply} = do_get_names(Sock),
- ?line SortConn = lists:sort(Conn),
- ?line SortReply = lists:sort(Reply),
- ?line ok = check_names_cmp(SortConn, SortReply),
+ {ok,Sock} = connect_active(),
+ {ok,Reply} = do_get_names(Sock),
+ SortConn = lists:sort(Conn),
+ SortReply = lists:sort(Reply),
+ ok = check_names_cmp(SortConn, SortReply),
ok.
-
+
% Compare if the result was the same as was registered
@@ -381,43 +324,43 @@ check_names_cmp([{Name,Port,_Sock} | Conn], [{Name,Port} | Reply]) ->
-define(int16(X), [(X bsr 8) band 16#ff, X band 16#ff]).
-define(u32(X1,X2,X3,X4),
- (((X1) bsl 24) bor ((X2) bsl 16) bor ((X3) bsl 8) bor X4)).
+ (((X1) bsl 24) bor ((X2) bsl 16) bor ((X3) bsl 8) bor X4)).
do_get_names(Socket) ->
inet_tcp:send(Socket, [?int16(1),?EPMD_NAMES_REQ]),
receive
- {tcp, Socket, [P0,P1,P2,P3 | T]} ->
- EpmdPort = ?u32(P0,P1,P2,P3),
- if EpmdPort == ?PORT ->
- names_loop(Socket, T, []);
- true ->
- close(Socket),
- {error, address}
- end;
- {tcp_closed, Socket} ->
- {ok, []}
+ {tcp, Socket, [P0,P1,P2,P3 | T]} ->
+ EpmdPort = ?u32(P0,P1,P2,P3),
+ if EpmdPort == ?PORT ->
+ names_loop(Socket, T, []);
+ true ->
+ close(Socket),
+ {error, address}
+ end;
+ {tcp_closed, Socket} ->
+ {ok, []}
end.
names_loop(Socket, Acc, Ps) ->
receive
- {tcp, Socket, Bytes} ->
- {NAcc, NPs} = scan_names(Acc ++ Bytes, Ps),
- names_loop(Socket, NAcc, NPs);
- {tcp_closed, Socket} ->
- {_, NPs} = scan_names(Acc, Ps), % Really needed?
- {ok, NPs}
+ {tcp, Socket, Bytes} ->
+ {NAcc, NPs} = scan_names(Acc ++ Bytes, Ps),
+ names_loop(Socket, NAcc, NPs);
+ {tcp_closed, Socket} ->
+ {_, NPs} = scan_names(Acc, Ps), % Really needed?
+ {ok, NPs}
end.
scan_names(Buf, Ps) ->
case scan_line(Buf, []) of
- {Line, NBuf} ->
- case parse_line(Line) of
- {ok, Entry} ->
- scan_names(NBuf, [Entry | Ps]);
- error ->
- scan_names(NBuf, Ps)
- end;
- [] -> {Buf, Ps}
+ {Line, NBuf} ->
+ case parse_line(Line) of
+ {ok, Entry} ->
+ scan_names(NBuf, [Entry | Ps]);
+ error ->
+ scan_names(NBuf, Ps)
+ end;
+ [] -> {Buf, Ps}
end.
scan_line([$\n | Buf], Line) -> {lists:reverse(Line), Buf};
@@ -426,16 +369,16 @@ scan_line([], _) -> [].
parse_line([$n,$a,$m,$e,$ | Buf0]) ->
case parse_name(Buf0, []) of
- {Name, Buf1} ->
- case Buf1 of
- [$a,$t,$ ,$p,$o,$r,$t,$ | Buf2] ->
- case catch list_to_integer(Buf2) of
- {'EXIT', _} -> error;
- Port -> {ok, {Name, Port}}
- end;
- _ -> error
- end;
- error -> error
+ {Name, Buf1} ->
+ case Buf1 of
+ [$a,$t,$ ,$p,$o,$r,$t,$ | Buf2] ->
+ case catch list_to_integer(Buf2) of
+ {'EXIT', _} -> error;
+ Port -> {ok, {Name, Port}}
+ end;
+ _ -> error
+ end;
+ error -> error
end;
parse_line(_) -> error.
@@ -447,17 +390,11 @@ parse_name([], _Name) -> error.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-get_port_nr(doc) ->
- ["Register a name on a port and ask about port nr"];
-get_port_nr(suite) ->
- [];
+%% Register a name on a port and ask about port nr
get_port_nr(Config) when is_list(Config) ->
port_request([?EPMD_PORT_PLEASE2_REQ,"foo"]).
-slow_get_port_nr(doc) ->
- ["Register with slow write and ask about port nr"];
-slow_get_port_nr(suite) ->
- [];
+%% Register with slow write and ask about port nr
slow_get_port_nr(Config) when is_list(Config) ->
port_request([?EPMD_PORT_PLEASE2_REQ,d,$f,d,$o,d,$o]).
@@ -465,142 +402,127 @@ slow_get_port_nr(Config) when is_list(Config) ->
% Internal function used above
port_request(M) ->
- ?line ok = epmdrun(),
+ ok = epmdrun(),
Port = 1042,
- ?line {ok,RSock} = register_node("foo", Port),
- ?line {ok,Sock} = connect(),
- ?line ok = send(Sock,[size16(M),M]),
- ?line case recv_until_sock_closes(Sock) of
- {ok, Resp} ->
- ?line close(RSock),
- ?line {ok,Rec} = parse_port2_resp(Resp),
- ?line Port = Rec#node_info.port,
- ok;
- Other ->
- ?line close(RSock),
- ?line test_server:format("recv on sock ~w: ~p~n",
- [Sock,Other]),
- ?line throw({error,Other})
- end,
+ {ok,RSock} = register_node("foo", Port),
+ {ok,Sock} = connect(),
+ ok = send(Sock,[size16(M),M]),
+ case recv_until_sock_closes(Sock) of
+ {ok, Resp} ->
+ close(RSock),
+ {ok,Rec} = parse_port2_resp(Resp),
+ Port = Rec#node_info.port,
+ ok;
+ Other ->
+ close(RSock),
+ io:format("recv on sock ~w: ~p~n", [Sock,Other]),
+ throw({error,Other})
+ end,
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-unregister_others_name_1(doc) ->
- ["Unregister name of other node"];
-unregister_others_name_1(suite) ->
- [];
+%% Unregister name of other node
unregister_others_name_1(Config) when is_list(Config) ->
- ?line ok = epmdrun("-relaxed_command_check"),
- ?line {ok,RSock} = register_node("foo"),
- ?line {ok,Sock} = connect(),
+ ok = epmdrun("-relaxed_command_check"),
+ {ok,RSock} = register_node("foo"),
+ {ok,Sock} = connect(),
M = [?EPMD_STOP_REQ,"foo"],
- ?line ok = send(Sock,[size16(M),M]),
+ ok = send(Sock,[size16(M),M]),
R = "STOPPED",
- ?line {ok,R} = recv(Sock,length(R)),
- ?line ok = close(RSock),
+ {ok,R} = recv(Sock,length(R)),
+ ok = close(RSock),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-unregister_others_name_2(doc) ->
- ["Unregister name of other node"];
-unregister_others_name_2(suite) ->
- [];
+%% Unregister name of other node
unregister_others_name_2(Config) when is_list(Config) ->
- ?line ok = epmdrun("-relaxed_command_check"),
- ?line {ok,Sock} = connect(),
+ ok = epmdrun("-relaxed_command_check"),
+ {ok,Sock} = connect(),
M = [?EPMD_STOP_REQ,"xxx42"],
- ?line ok = send(Sock,[size16(M),M]),
+ ok = send(Sock,[size16(M),M]),
R = "NOEXIST",
- ?line {ok,R} = recv(Sock,length(R)),
+ {ok,R} = recv(Sock,length(R)),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-register_overflow(doc) ->
- ["Register too many, clean and redo 10 times"];
-register_overflow(suite) ->
- [];
+%% Register too many, clean and redo 10 times
register_overflow(Config) when is_list(Config) ->
- Dog = ?config(watchdog, Config),
- test_server:timetrap_cancel(Dog),
- LongDog = test_server:timetrap(?LONG_TEST_TIMEOUT),
- ?line ok = epmdrun(),
- ?line Conn = register_many(1, ?REG_REPEAT_LIM, "foo"),
+ ct:timetrap(?LONG_TEST_TIMEOUT),
+ ok = epmdrun(),
+ Conn = register_many(1, ?REG_REPEAT_LIM, "foo"),
Count = length(Conn),
- ?line ok = unregister_many(Conn),
+ ok = unregister_many(Conn),
sleep(?MEDIUM_PAUSE),
- test_server:format("Limit was ~w names, now reg/unreg all 10 times~n",
- [Count]),
- ?line ok = register_repeat(Count),
+ io:format("Limit was ~w names, now reg/unreg all 10 times~n", [Count]),
+ ok = register_repeat(Count),
sleep(?MEDIUM_PAUSE),
- ?line ok = rregister_repeat(Count),
+ ok = rregister_repeat(Count),
sleep(?MEDIUM_PAUSE),
- ?line ok = register_repeat(Count),
+ ok = register_repeat(Count),
sleep(?MEDIUM_PAUSE),
- ?line ok = rregister_repeat(Count),
+ ok = rregister_repeat(Count),
sleep(?MEDIUM_PAUSE),
- ?line ok = register_repeat(Count),
+ ok = register_repeat(Count),
sleep(?MEDIUM_PAUSE),
- ?line ok = rregister_repeat(Count),
+ ok = rregister_repeat(Count),
sleep(?MEDIUM_PAUSE),
- ?line ok = register_repeat(Count),
+ ok = register_repeat(Count),
sleep(?MEDIUM_PAUSE),
- ?line ok = rregister_repeat(Count),
+ ok = rregister_repeat(Count),
sleep(?MEDIUM_PAUSE),
- ?line ok = register_repeat(Count),
+ ok = register_repeat(Count),
sleep(?MEDIUM_PAUSE),
- ?line ok = rregister_repeat(Count),
- test_server:timetrap_cancel(LongDog),
+ ok = rregister_repeat(Count),
ok.
register_repeat(Count) ->
Conn = register_many(1, ?REG_REPEAT_LIM, "foo"),
ok = unregister_many(Conn),
if
- length(Conn) == Count ->
- ok;
- true ->
- error
+ length(Conn) == Count ->
+ ok;
+ true ->
+ error
end.
rregister_repeat(Count) ->
Conn = register_many(1, ?REG_REPEAT_LIM, "foo"),
ok = unregister_many(lists:reverse(Conn)),
if
- length(Conn) == Count ->
- ok;
- true ->
- error
+ length(Conn) == Count ->
+ ok;
+ true ->
+ error
end.
% Return count of successful registrations
register_many(I, N, _Prefix) when I > N ->
- test_server:format("Done with all ~n", []),
+ io:format("Done with all ~n", []),
[];
register_many(I, N, Prefix) ->
Name = gen_name(Prefix, I),
Port = ?DUMMY_PORT + I, % Just make it up
case register_node(Name, Port) of
- {ok,Sock} ->
- [{Name,Port,Sock} | register_many(I + 1, N, Prefix)];
- Any ->
- test_server:format("Can't register: ~w of 1..~w ~w~n",
- [Name,N,Any]),
- []
+ {ok,Sock} ->
+ [{Name,Port,Sock} | register_many(I + 1, N, Prefix)];
+ Any ->
+ test_server:format("Can't register: ~w of 1..~w ~w~n", [Name,N,Any]),
+ []
end.
unregister_many([]) ->
ok;
unregister_many([{Name,_Port,Sock} | Socks]) ->
case close(Sock) of
- ok ->
- unregister_many(Socks);
- Any ->
- test_server:format("Can't unregister: ~w reason ~w~n", [Name,Any]),
- error
+ ok ->
+ unregister_many(Socks);
+ Any ->
+ test_server:format("Can't unregister: ~w reason ~w~n", [Name,Any]),
+ error
end.
gen_name(Str,Int) ->
@@ -608,246 +530,203 @@ gen_name(Str,Int) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-no_data(doc) ->
- ["Open but send no data"];
-no_data(suite) ->
- [];
+%% Open but send no data
no_data(Config) when is_list(Config) ->
- ?line ok = epmdrun(),
- ?line {ok,Sock} = connect(),
+ ok = epmdrun(),
+ {ok,Sock} = connect(),
sleep(?LONG_PAUSE),
- ?line closed = recv(Sock,1),
+ closed = recv(Sock,1),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-one_byte(doc) ->
- ["Send one byte only"];
-one_byte(suite) ->
- [];
+%% Send one byte only
one_byte(Config) when is_list(Config) ->
- ?line ok = epmdrun(),
- ?line {ok,Sock} = connect(),
- ?line ok = send(Sock,[0]),
+ ok = epmdrun(),
+ {ok,Sock} = connect(),
+ ok = send(Sock,[0]),
sleep(?LONG_PAUSE),
- ?line closed = recv(Sock,1),
+ closed = recv(Sock,1),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-two_bytes(doc) ->
- ["Send packet size only"];
-two_bytes(suite) ->
- [];
+%% Send packet size only
two_bytes(Config) when is_list(Config) ->
- ?line ok = epmdrun(),
- ?line {ok,Sock} = connect(),
- ?line ok = send(Sock,[put16(3)]),
+ ok = epmdrun(),
+ {ok,Sock} = connect(),
+ ok = send(Sock,[put16(3)]),
sleep(?LONG_PAUSE),
- ?line closed = recv(Sock,1),
+ closed = recv(Sock,1),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-partial_packet(doc) ->
- ["Got only part of a packet"];
-partial_packet(suite) ->
- [];
+%% Got only part of a packet
partial_packet(Config) when is_list(Config) ->
- ?line ok = epmdrun(),
- ?line {ok,Sock} = connect(),
- ?line ok = send(Sock,[put16(100),"only a few bytes"]),
+ ok = epmdrun(),
+ {ok,Sock} = connect(),
+ ok = send(Sock,[put16(100),"only a few bytes"]),
sleep(?LONG_PAUSE),
- ?line closed = recv(Sock,1),
+ closed = recv(Sock,1),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-zero_length(doc) ->
- ["Invalid zero packet size"];
-zero_length(suite) ->
- [];
+%% Invalid zero packet size
zero_length(Config) when is_list(Config) ->
- ?line ok = epmdrun(),
- ?line {ok,Sock} = connect(),
- ?line ok = send(Sock,[0,0,0,0,0,0,0,0,0,0]),
+ ok = epmdrun(),
+ {ok,Sock} = connect(),
+ ok = send(Sock,[0,0,0,0,0,0,0,0,0,0]),
sleep(?MEDIUM_PAUSE),
- ?line closed = recv(Sock,1),
+ closed = recv(Sock,1),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-too_large(doc) ->
- ["Invalid large packet"];
-too_large(suite) ->
- [];
+%% Invalid large packet
too_large(Config) when is_list(Config) ->
- ?line ok = epmdrun(),
- ?line {ok,Sock} = connect(),
+ ok = epmdrun(),
+ {ok,Sock} = connect(),
Size = 63000,
M = lists:duplicate(Size, $z),
- ?line ok = send(Sock,[put16(Size),M]),
+ ok = send(Sock,[put16(Size),M]),
sleep(?MEDIUM_PAUSE),
% With such a large packet, even the writes can fail as the
% daemon closes before everything is delivered -> econnaborted
case recv(Sock,1) of
- closed -> ok;
- {error,econnaborted} -> ok;
- Other -> exit({unexpected,Other})
+ closed -> ok;
+ {error,econnaborted} -> ok;
+ Other -> exit({unexpected,Other})
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-alive_req_too_small_1(doc) ->
- ["Try to register but not enough data"];
-alive_req_too_small_1(suite) ->
- [];
+%% Try to register but not enough data
alive_req_too_small_1(Config) when is_list(Config) ->
- ?line ok = epmdrun(),
- ?line {ok,Sock} = connect(),
+ ok = epmdrun(),
+ {ok,Sock} = connect(),
M = [?EPMD_ALIVE2_REQ, put16(?DUMMY_PORT),$M,0, put16(5),
- put16(5),put16(0)],
- ?line ok = send(Sock, [size16(M), M]),
+ put16(5),put16(0)],
+ ok = send(Sock, [size16(M), M]),
sleep(?MEDIUM_PAUSE),
- ?line closed = recv(Sock,1),
+ closed = recv(Sock,1),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-alive_req_too_small_2(doc) ->
- ["Try to register but not enough data"];
-alive_req_too_small_2(suite) ->
- [];
+%% Try to register but not enough data
alive_req_too_small_2(Config) when is_list(Config) ->
- ?line ok = epmdrun(),
- ?line {ok,Sock} = connect(),
+ ok = epmdrun(),
+ {ok,Sock} = connect(),
M = [?EPMD_ALIVE2_REQ, put16(?DUMMY_PORT),$M,0, put16(5),
- put16(5)],
- ?line ok = send(Sock, [size16(M), M]),
+ put16(5)],
+ ok = send(Sock, [size16(M), M]),
sleep(?MEDIUM_PAUSE),
- ?line closed = recv(Sock,1),
+ closed = recv(Sock,1),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-alive_req_too_large(doc) ->
- ["Try to register but node name too large"];
-alive_req_too_large(suite) ->
- [];
+%% Try to register but node name too large
alive_req_too_large(Config) when is_list(Config) ->
- ?line ok = epmdrun(),
- ?line {ok,Sock} = connect(),
- L = [
- "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
- "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
- "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
- "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
- "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
- "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
- "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
- "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
- "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
- "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
- "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
- "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
- ],
+ ok = epmdrun(),
+ {ok,Sock} = connect(),
+ L = ["aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"],
S = length(lists:flatten(L)),
M = [?EPMD_ALIVE2_REQ, put16(?DUMMY_PORT),$M,0, put16(5),
- put16(5), put16(S),L,put16(0)],
- ?line ok = send(Sock, [size16(M), M]),
+ put16(5), put16(S),L,put16(0)],
+ ok = send(Sock, [size16(M), M]),
sleep(?MEDIUM_PAUSE),
- ?line {ok,[?EPMD_ALIVE2_RESP,1]} = recv(Sock,2),
+ {ok,[?EPMD_ALIVE2_RESP,1]} = recv(Sock,2),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-returns_valid_empty_extra(doc) ->
- ["Check that an empty extra is prefixed by a two byte length"];
-returns_valid_empty_extra(suite) ->
- [];
+%% Check that an empty extra is prefixed by a two byte length
returns_valid_empty_extra(Config) when is_list(Config) ->
- ?line ok = epmdrun(),
- ?line {ok,Sock} = register_node_v2(4711, 72, 0, 5, 5, "foo", []),
- ?line {ok,#node_info{extra=[]}} = port_please_v2("foo"),
- ?line ok = close(Sock),
+ ok = epmdrun(),
+ {ok,Sock} = register_node_v2(4711, 72, 0, 5, 5, "foo", []),
+ {ok,#node_info{extra=[]}} = port_please_v2("foo"),
+ ok = close(Sock),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-returns_valid_populated_extra_with_nulls(doc) ->
- ["Check a populated extra with embedded null characters"];
-returns_valid_populated_extra_with_nulls(suite) ->
- [];
+%% Check a populated extra with embedded null characters
returns_valid_populated_extra_with_nulls(Config) when is_list(Config) ->
- ?line ok = epmdrun(),
- ?line {ok,Sock} = register_node_v2(4711, 72, 0, 5, 5, "foo", "ABC\000\000"),
- ?line {ok,#node_info{extra="ABC\000\000"}} = port_please_v2("foo"),
- ?line ok = close(Sock),
+ ok = epmdrun(),
+ {ok,Sock} = register_node_v2(4711, 72, 0, 5, 5, "foo", "ABC\000\000"),
+ {ok,#node_info{extra="ABC\000\000"}} = port_please_v2("foo"),
+ ok = close(Sock),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-names_stdout(doc) ->
- ["Test that epmd -names prints registered nodes to stdout"];
-names_stdout(suite) ->
- [];
+%% Test that epmd -names prints registered nodes to stdout
names_stdout(Config) when is_list(Config) ->
- ?line ok = epmdrun(),
- ?line {ok,Sock} = register_node("foobar"),
- ?line ok = epmdrun("-names"),
- ?line {ok, Data} = receive {_Port, {data, D}} -> {ok, D}
- after 10000 -> {error, timeout}
- end,
- ?line {match,_} = re:run(Data, "^epmd: up and running", [multiline]),
- ?line {match,_} = re:run(Data, "^name foobar at port", [multiline]),
- ?line ok = close(Sock),
+ ok = epmdrun(),
+ {ok,Sock} = register_node("foobar"),
+ ok = epmdrun("-names"),
+ {ok, Data} = receive {_Port, {data, D}} -> {ok, D}
+ after 10000 -> {error, timeout}
+ end,
+ {match,_} = re:run(Data, "^epmd: up and running", [multiline]),
+ {match,_} = re:run(Data, "^name foobar at port", [multiline]),
+ ok = close(Sock),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-buffer_overrun_1(suite) ->
- [];
-buffer_overrun_1(doc) ->
- ["Test security vulnerability in fake extra lengths in alive2_req"];
+%% Test security vulnerability in fake extra lengths in alive2_req
buffer_overrun_1(Config) when is_list(Config) ->
- ?line ok = epmdrun(),
- ?line true = alltrue([hostile(N) || N <- lists:seq(1,10000)]),
+ ok = epmdrun(),
+ true = alltrue([hostile(N) || N <- lists:seq(1,10000)]),
ok.
-buffer_overrun_2(suite) ->
- [];
-buffer_overrun_2(doc) ->
- ["Test security vulnerability in fake extra lengths in alive2_req"];
+
+%% Test security vulnerability in fake extra lengths in alive2_req
buffer_overrun_2(Config) when is_list(Config) ->
- ?line ok = epmdrun(),
- ?line [false | Rest] = [hostile2(N) || N <- lists:seq(255*4,10000)],
- ?line true = alltrue(Rest),
+ ok = epmdrun(),
+ [false | Rest] = [hostile2(N) || N <- lists:seq(255*4,10000)],
+ true = alltrue(Rest),
ok.
hostile(N) ->
try
- Bin= <<$x:8,4747:16,$M:8,0:8,5:16,5:16,5:16,"gurka",N:16>>,
- S = size(Bin),
- {ok,E}=connect_sturdy(),
- gen_tcp:send(E,[<<S:16>>,Bin]),
- closed = recv(E,1),
- gen_tcp:close(E),
- true
+ Bin= <<$x:8,4747:16,$M:8,0:8,5:16,5:16,5:16,"gurka",N:16>>,
+ S = size(Bin),
+ {ok,E}=connect_sturdy(),
+ gen_tcp:send(E,[<<S:16>>,Bin]),
+ closed = recv(E,1),
+ gen_tcp:close(E),
+ true
catch
- _:_ ->
- false
+ _:_ ->
+ false
end.
hostile2(N) ->
try
- B2 = list_to_binary(lists:duplicate(N,255)),
- Bin= <<$x:8,4747:16,$M:8,0:8,5:16,5:16,5:16,"gurka",N:16,B2/binary>>,
- S = size(Bin),
- {ok,E}=connect_sturdy(),
- gen_tcp:send(E,[<<S:16>>,Bin]),
- Z = recv(E,2),
- gen_tcp:close(E),
- (Z =:= closed) or (Z =:= {ok, [$y,1]})
+ B2 = list_to_binary(lists:duplicate(N,255)),
+ Bin= <<$x:8,4747:16,$M:8,0:8,5:16,5:16,5:16,"gurka",N:16,B2/binary>>,
+ S = size(Bin),
+ {ok,E}=connect_sturdy(),
+ gen_tcp:send(E,[<<S:16>>,Bin]),
+ Z = recv(E,2),
+ gen_tcp:close(E),
+ (Z =:= closed) or (Z =:= {ok, [$y,1]})
catch
- _A:_B ->
- false
+ _A:_B ->
+ false
end.
alltrue([]) ->
@@ -857,134 +736,124 @@ alltrue([true|T]) ->
alltrue([_|_]) ->
false.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-no_nonlocal_register(suite) ->
- [];
-no_nonlocal_register(doc) ->
- ["Ensure that we cannot register throug a nonlocal connection"];
+
+%% Ensure that we cannot register throug a nonlocal connection
no_nonlocal_register(Config) when is_list(Config) ->
- ?line case {os:find_executable("ssh"),ct:get_config(ssh_proxy_host)} of
- {SSH,Name} when is_list(Name), is_list(SSH) ->
- do_no_nonlocal_register(Config,Name);
- {false,_} ->
- {skip, "No ssh command found to create proxy"};
- _ ->
- {skip, "No ssh_proxy_host configured in ts.config"}
- end.
+ case {os:find_executable("ssh"),ct:get_config(ssh_proxy_host)} of
+ {SSH,Name} when is_list(Name), is_list(SSH) ->
+ do_no_nonlocal_register(Config,Name);
+ {false,_} ->
+ {skip, "No ssh command found to create proxy"};
+ _ ->
+ {skip, "No ssh_proxy_host configured in ts.config"}
+ end.
do_no_nonlocal_register(Config,SSHHost) when is_list(Config) ->
- ?line ok = epmdrun(),
- ?line ProxyPort = proxy_port(),
- ?line ok = ssh_proxy(SSHHost,ProxyPort),
+ ok = epmdrun(),
+ ProxyPort = proxy_port(),
+ ok = ssh_proxy(SSHHost,ProxyPort),
Res = try
- ?line Name = "gurka_"
- %++
- %integer_to_list(A1)++"_"++
- %integer_to_list(A2)++"_"++
- %integer_to_list(A3)++"_"++
- %integer_to_list(A4)
- ,
- ?line Bname = list_to_binary(Name),
- ?line NameS = byte_size(Bname),
- ?line Bin= <<$x:8,4747:16,$M:8,0:8,5:16,
- 5:16,NameS:16,Bname/binary,
- 0:16>>,
- ?line S = size(Bin),
- ?line {ok, E} = connect("localhost",ProxyPort,passive),
- ?line gen_tcp:send(E,[<<S:16>>,Bin]),
- ?line closed = recv(E,1),
- ?line gen_tcp:close(E),
- true
- catch
- _:_ ->
- false
- end,
+ Name = "gurka_"
+ %++
+ %integer_to_list(A1)++"_"++
+ %integer_to_list(A2)++"_"++
+ %integer_to_list(A3)++"_"++
+ %integer_to_list(A4)
+ ,
+ Bname = list_to_binary(Name),
+ NameS = byte_size(Bname),
+ Bin= <<$x:8,4747:16,$M:8,0:8,5:16,
+ 5:16,NameS:16,Bname/binary,
+ 0:16>>,
+ S = size(Bin),
+ {ok, E} = connect("localhost",ProxyPort,passive),
+ gen_tcp:send(E,[<<S:16>>,Bin]),
+ closed = recv(E,1),
+ gen_tcp:close(E),
+ true
+ catch
+ _:_ ->
+ false
+ end,
%erlang:display(Res),
true = Res,
ok.
-no_nonlocal_kill(suite) ->
- [];
-no_nonlocal_kill(doc) ->
- ["Ensure that we cannot kill through nonlocal connection"];
+%% Ensure that we cannot kill through nonlocal connection
no_nonlocal_kill(Config) when is_list(Config) ->
- ?line case {os:find_executable("ssh"),ct:get_config(ssh_proxy_host)} of
- {SSH,Name} when is_list(Name), is_list(SSH) ->
- do_no_nonlocal_kill(Config,Name);
- {false,_} ->
- {skip, "No ssh command found to create proxy"};
- _ ->
- {skip, "No ssh_proxy_host configured in ts.config"}
- end.
+ case {os:find_executable("ssh"),ct:get_config(ssh_proxy_host)} of
+ {SSH,Name} when is_list(Name), is_list(SSH) ->
+ do_no_nonlocal_kill(Config,Name);
+ {false,_} ->
+ {skip, "No ssh command found to create proxy"};
+ _ ->
+ {skip, "No ssh_proxy_host configured in ts.config"}
+ end.
do_no_nonlocal_kill(Config,SSHHost) when is_list(Config) ->
- ?line ok = epmdrun(),
- ?line ProxyPort = proxy_port(),
- ?line ok = ssh_proxy(SSHHost,ProxyPort),
+ ok = epmdrun(),
+ ProxyPort = proxy_port(),
+ ok = ssh_proxy(SSHHost,ProxyPort),
Res = try
- {ok, E} = connect("localhost",ProxyPort,passive),
- M = [?EPMD_KILL_REQ],
- send(E, [size16(M), M]),
- closed = recv(E,2),
- gen_tcp:close(E),
- sleep(?MEDIUM_PAUSE),
- {ok, E2} = connect("localhost",ProxyPort,passive),
- gen_tcp:close(E2),
- true
- catch
- _:_ ->
- false
- end,
+ {ok, E} = connect("localhost",ProxyPort,passive),
+ M = [?EPMD_KILL_REQ],
+ send(E, [size16(M), M]),
+ closed = recv(E,2),
+ gen_tcp:close(E),
+ sleep(?MEDIUM_PAUSE),
+ {ok, E2} = connect("localhost",ProxyPort,passive),
+ gen_tcp:close(E2),
+ true
+ catch
+ _:_ ->
+ false
+ end,
%erlang:display(Res),
true = Res,
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-no_live_killing(doc) ->
- ["Dont allow killing with live nodes or any unregistering w/o -relaxed_command_check"];
-no_live_killing(suite) ->
- [];
+
+%% Dont allow killing with live nodes or any unregistering w/o -relaxed_command_check
no_live_killing(Config) when is_list(Config) ->
- ?line ok = epmdrun(),
- ?line {ok,RSock} = register_node("foo"),
- ?line {ok,Sock} = connect(),
- ?line M = [?EPMD_KILL_REQ],
- ?line ok = send(Sock,[size16(M),M]),
- ?line {ok,"NO"} = recv(Sock,2),
- ?line close(Sock),
- ?line {ok,Sock2} = connect(),
- ?line M2 = [?EPMD_STOP_REQ,"foo"],
- ?line ok = send(Sock2,[size16(M2),M2]),
- ?line closed = recv(Sock2,1),
- ?line close(Sock2),
- ?line close(RSock),
- ?line sleep(?MEDIUM_PAUSE),
- ?line {ok,Sock3} = connect(),
- ?line M3 = [?EPMD_KILL_REQ],
- ?line ok = send(Sock3,[size16(M3),M3]),
- ?line {ok,"OK"} = recv(Sock3,2),
- ?line close(Sock3),
+ ok = epmdrun(),
+ {ok,RSock} = register_node("foo"),
+ {ok,Sock} = connect(),
+ M = [?EPMD_KILL_REQ],
+ ok = send(Sock,[size16(M),M]),
+ {ok,"NO"} = recv(Sock,2),
+ close(Sock),
+ {ok,Sock2} = connect(),
+ M2 = [?EPMD_STOP_REQ,"foo"],
+ ok = send(Sock2,[size16(M2),M2]),
+ closed = recv(Sock2,1),
+ close(Sock2),
+ close(RSock),
+ sleep(?MEDIUM_PAUSE),
+ {ok,Sock3} = connect(),
+ M3 = [?EPMD_KILL_REQ],
+ ok = send(Sock3,[size16(M3),M3]),
+ {ok,"OK"} = recv(Sock3,2),
+ close(Sock3),
ok.
-socket_reset_before_alive2_reply_is_written(doc) ->
- ["Check for regression - don't make zombie from node which "
- "sends TCP RST at wrong time"];
-socket_reset_before_alive2_reply_is_written(suite) ->
- [];
+%% Check for regression - don't make zombie from node which
+%% sends TCP RST at wrong time
socket_reset_before_alive2_reply_is_written(Config) when is_list(Config) ->
%% - delay_write for easier triggering of race condition
%% - relaxed_command_check for gracefull shutdown of epmd even if there
%% is stuck node.
- ?line ok = epmdrun("-delay_write 1 -relaxed_command_check"),
+ ok = epmdrun("-delay_write 1 -relaxed_command_check"),
%% We can't use send_req/1 directly as we want to do inet:setopts/2
%% on our socket.
- ?line {ok, Sock} = connect(),
+ {ok, Sock} = connect(),
%% Issuing close/1 on such socket will result in immediate RST packet.
- ?line ok = inet:setopts(Sock, [{linger, {true, 0}}]),
+ ok = inet:setopts(Sock, [{linger, {true, 0}}]),
Req = alive2_req(4711, 77, 0, 5, 5, "test", []),
- ?line ok = send(Sock, [size16(Req), Req]),
+ ok = send(Sock, [size16(Req), Req]),
timer:sleep(500), %% Wait for the first 1/2 of delay_write before closing
- ?line ok = close(Sock),
+ ok = close(Sock),
timer:sleep(500 + ?SHORT_PAUSE), %% Wait for the other 1/2 of delay_write
@@ -992,11 +861,11 @@ socket_reset_before_alive2_reply_is_written(Config) when is_list(Config) ->
%% Should be removed when this is issue is fixed there.
timer:sleep(1000),
- ?line {ok, SockForNames} = connect_active(),
+ {ok, SockForNames} = connect_active(),
%% And there should be no stuck nodes
- ?line {ok, []} = do_get_names(SockForNames),
- ?line ok = close(SockForNames),
+ {ok, []} = do_get_names(SockForNames),
+ ok = close(SockForNames),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -1005,14 +874,14 @@ socket_reset_before_alive2_reply_is_written(Config) when is_list(Config) ->
cleanup() ->
sleep(?MEDIUM_PAUSE),
case connect() of
- {ok,Sock} ->
- M = [?EPMD_KILL_REQ],
- send(Sock, [size16(M), M]),
- recv(Sock,length("OK")),
- close(Sock),
- sleep(?MEDIUM_PAUSE);
- _ ->
- true
+ {ok,Sock} ->
+ M = [?EPMD_KILL_REQ],
+ send(Sock, [size16(M), M]),
+ recv(Sock,length("OK")),
+ close(Sock),
+ sleep(?MEDIUM_PAUSE);
+ _ ->
+ true
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -1022,11 +891,11 @@ proxy_port() ->
?PORT+1.
ssh_proxy(SSHHost,ProxyPort) ->
- ?line Host = lists:nth(2,string:tokens(atom_to_list(node()),"@")),
+ Host = lists:nth(2,string:tokens(atom_to_list(node()),"@")),
% Requires proxy to be a unix host with the command 'read' accessible
- ?line osrun("ssh -L "++integer_to_list(ProxyPort)++":"++Host++":"
- ++integer_to_list(?PORT)++" "++SSHHost++" read").
-
+ osrun("ssh -L "++integer_to_list(ProxyPort)++":"++Host++":"
+ ++integer_to_list(?PORT)++" "++SSHHost++" read").
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -1036,21 +905,21 @@ epmdrun() ->
epmdrun([]).
epmdrun(Args) ->
case os:find_executable(epmd) of
- false ->
- {error, {could_not_find_epmd_in_path}};
- Path ->
- epmdrun(Path,Args)
+ false ->
+ {error, {could_not_find_epmd_in_path}};
+ Path ->
+ epmdrun(Path,Args)
end.
epmdrun(Epmd,Args0) ->
- %% test_server:format("epmdrun() => Epmd = ~p",[Epmd]),
+ %% test_server:format("epmdrun() => Epmd = ~p",[Epmd]),
Args = case Args0 of
- [] ->
- [];
- O ->
- " "++O
- end,
- osrun("\"" ++ Epmd ++ "\"" ++ " " ?EPMDARGS " -port " ++ integer_to_list(?PORT) ++ Args).
+ [] ->
+ [];
+ O ->
+ " "++O
+ end,
+ osrun("\"" ++ Epmd ++ "\"" ++ " " ?EPMDARGS " -port " ++ integer_to_list(?PORT) ++ Args).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -1059,7 +928,7 @@ epmdrun(Epmd,Args0) ->
osrun(Cmd) ->
_ = open_port({spawn, Cmd}, []),
ok.
-
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Wrappers of TCP functions
@@ -1084,16 +953,16 @@ connect(Addr, Port, Mode) ->
connect(Addr, Port, Mode, ?CONN_SLEEP, ?CONN_RETRY).
connect(Addr, Port, Mode, Sleep, Retry) ->
case connect_repeat(Addr, Retry, Port, Mode, Sleep) of
- {ok,Sock} ->
- {ok,Sock};
- {error,timeout} ->
- timeout;
- {error,Reason} ->
- test_server:format("connect: error: ~w~n",[Reason]),
- error;
- Any ->
- test_server:format("connect: unknown message: ~w~n",[Any]),
- exit(1)
+ {ok,Sock} ->
+ {ok,Sock};
+ {error,timeout} ->
+ timeout;
+ {error,Reason} ->
+ test_server:format("connect: error: ~w~n",[Reason]),
+ error;
+ Any ->
+ test_server:format("connect: unknown message: ~w~n",[Any]),
+ exit(1)
end.
@@ -1104,33 +973,33 @@ connect_repeat(Addr, 1, Port, Mode, _Sleep) ->
connect_mode(Addr,Port, Mode);
connect_repeat(Addr,Retry, Port, Mode, Sleep) ->
case connect_mode(Addr,Port, Mode) of
- {ok,Sock} ->
- {ok,Sock};
- {error,Reason} ->
- test_server:format("connect: error: ~w~n",[Reason]),
- timer:sleep(Sleep),
- connect_repeat(Addr, Retry - 1, Port, Mode, Sleep);
- Any ->
- test_server:format("connect: unknown message: ~w~n",[Any]),
- exit(1)
+ {ok,Sock} ->
+ {ok,Sock};
+ {error,Reason} ->
+ test_server:format("connect: error: ~w~n",[Reason]),
+ timer:sleep(Sleep),
+ connect_repeat(Addr, Retry - 1, Port, Mode, Sleep);
+ Any ->
+ test_server:format("connect: unknown message: ~w~n",[Any]),
+ exit(1)
end.
connect_mode(Addr,Port, active) ->
gen_tcp:connect(Addr, Port, [{packet, 0}], ?CONN_TIMEOUT);
connect_mode(Addr, Port, passive) ->
gen_tcp:connect(Addr, Port, [{packet, 0}, {active, false}],
- ?CONN_TIMEOUT).
+ ?CONN_TIMEOUT).
close(Sock) ->
case gen_tcp:close(Sock) of
- {error,_} ->
- error;
- ok ->
- ok;
- Any ->
- test_server:format("unknown message: ~w~n",[Any]),
- exit(1)
+ {error,_} ->
+ error;
+ ok ->
+ ok;
+ Any ->
+ test_server:format("unknown message: ~w~n",[Any]),
+ exit(1)
end.
recv(Sock, Len) ->
@@ -1138,19 +1007,19 @@ recv(Sock, Len) ->
recv(Sock, Len, Timeout) ->
case gen_tcp:recv(Sock, Len, Timeout) of
- {ok,[]} -> % Should not be the case
- recv(Sock, 1, 1); % any longer
- {ok,Data} ->
- {ok,Data};
- {error,timeout} ->
- timeout;
- {error,closed} ->
- closed;
- {error,_}=Error ->
- Error;
- Any ->
- test_server:format("unknown message: ~w~n",[Any]),
- exit(1)
+ {ok,[]} -> % Should not be the case
+ recv(Sock, 1, 1); % any longer
+ {ok,Data} ->
+ {ok,Data};
+ {error,timeout} ->
+ timeout;
+ {error,closed} ->
+ closed;
+ {error,_}=Error ->
+ Error;
+ Any ->
+ test_server:format("unknown message: ~w~n",[Any]),
+ exit(1)
end.
%% Send data to socket. The list can be non flat and contain
@@ -1159,12 +1028,12 @@ recv(Sock, Len, Timeout) ->
send(Sock, SendSpec) ->
case send(SendSpec, [], Sock) of
- {ok,[]} ->
- ok;
- {ok,RevBytes} ->
- send_direct(Sock, lists:reverse(RevBytes));
- Any ->
- Any
+ {ok,[]} ->
+ ok;
+ {ok,RevBytes} ->
+ send_direct(Sock, lists:reverse(RevBytes));
+ Any ->
+ Any
end.
@@ -1179,54 +1048,54 @@ send([Byte | Spec], RevBytes, Sock) when is_integer(Byte) ->
send(Spec, [Byte | RevBytes], Sock);
send([List | Spec], RevBytes, Sock) when is_list(List) ->
case send(List, RevBytes, Sock) of
- {ok,Left} ->
- send(Spec, Left, Sock);
- Other ->
- Other
+ {ok,Left} ->
+ send(Spec, Left, Sock);
+ Other ->
+ Other
end;
send([d | Spec], RevBytes, Sock) ->
send([{d,1000} | Spec], RevBytes, Sock);
send([{d,S} | Spec], RevBytes, Sock) ->
case send_direct(Sock, lists:reverse(RevBytes)) of
- ok ->
- timer:sleep(S),
- send(Spec, [], Sock);
- Any ->
- Any
+ ok ->
+ timer:sleep(S),
+ send(Spec, [], Sock);
+ Any ->
+ Any
end.
%%%%
send_direct(Sock, Bytes) ->
case gen_tcp:send(Sock, Bytes) of
- ok ->
- ok;
- {error, closed} ->
- closed;
- {error, _Reason} ->
- error;
- Any ->
- test_server:format("unknown message: ~w~n",[Any]),
- Any
+ ok ->
+ ok;
+ {error, closed} ->
+ closed;
+ {error, _Reason} ->
+ error;
+ Any ->
+ test_server:format("unknown message: ~w~n",[Any]),
+ Any
end.
send_req(Req) ->
send_req(Req, "localhost").
send_req(Req, Addr) ->
case connect(Addr) of
- {ok,Sock} ->
- case send(Sock, [size16(Req), Req]) of
- ok ->
- {ok,Sock};
- Other ->
- test_server:format("Failed to send ~w on sock ~w: ~w~n",
- [Req,Sock,Other]),
- error
- end;
- Other ->
- test_server:format("Connect failed when sending ~w: ~p~n",
- [Req, Other]),
- error
+ {ok,Sock} ->
+ case send(Sock, [size16(Req), Req]) of
+ ok ->
+ {ok,Sock};
+ Other ->
+ test_server:format("Failed to send ~w on sock ~w: ~w~n",
+ [Req,Sock,Other]),
+ error
+ end;
+ Other ->
+ test_server:format("Connect failed when sending ~w: ~p~n",
+ [Req, Other]),
+ error
end.
recv_until_sock_closes(Sock) ->
@@ -1234,12 +1103,12 @@ recv_until_sock_closes(Sock) ->
recv_until_sock_closes_2(Sock,AccData) ->
case recv(Sock,0) of
- {ok,Data} ->
- recv_until_sock_closes_2(Sock,AccData++Data);
- closed ->
- {ok,AccData};
- Other ->
- Other
+ {ok,Data} ->
+ recv_until_sock_closes_2(Sock,AccData++Data);
+ closed ->
+ {ok,AccData};
+ Other ->
+ Other
end.
sleep(MilliSeconds) ->
@@ -1261,7 +1130,7 @@ flat_count([H|T], N) when is_list(H) ->
flat_count([_|T], N) ->
flat_count(T, N);
flat_count([], N) -> N.
-
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
diff --git a/erts/etc/common/erlexec.c b/erts/etc/common/erlexec.c
index 5a5021b003..42da05b1f7 100644
--- a/erts/etc/common/erlexec.c
+++ b/erts/etc/common/erlexec.c
@@ -150,6 +150,9 @@ static char *plush_val_switches[] = {
"ms",
"mbs",
"pds",
+ "max",
+ "maxk",
+ "maxel",
"mqd",
"",
NULL
@@ -842,7 +845,6 @@ int main(int argc, char **argv)
if (argv[i][3] != '\0')
goto the_default;
}
-#ifdef ERTS_DIRTY_SCHEDULERS
else if (argv[i][2] == 'D') {
char* type = argv[i]+3;
if (strncmp(type, "cpu", 3) != 0 &&
@@ -854,7 +856,6 @@ int main(int argc, char **argv)
(argv[i][3] == 'i' && argv[i][5] != '\0'))
goto the_default;
}
-#endif
else if (argv[i][2] != '\0')
goto the_default;
if (i+1 >= argc)
diff --git a/erts/include/erl_native_features_config.h.in b/erts/include/erl_native_features_config.h.in
deleted file mode 100644
index 59a5dde09e..0000000000
--- a/erts/include/erl_native_features_config.h.in
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * %CopyrightBegin%
- *
- * Copyright Ericsson AB 2004-2016. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- * %CopyrightEnd%
- */
-
-/* Dirty scheduler support */
-#undef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
diff --git a/erts/lib_src/Makefile.in b/erts/lib_src/Makefile.in
index 6e2f236bdf..601f3917a8 100644
--- a/erts/lib_src/Makefile.in
+++ b/erts/lib_src/Makefile.in
@@ -465,7 +465,6 @@ RELSYSDIR = $(RELEASE_PATH)/erts-$(VSN)
RELEASE_INCLUDES= \
$(ERTS_INCL)/erl_memory_trace_parser.h \
$(ERTS_INCL)/$(TARGET)/erl_int_sizes_config.h \
- $(ERTS_INCL)/$(TARGET)/erl_native_features_config.h \
$(ERTS_INCL)/erl_fixed_size_int_types.h
RELEASE_LIBS=$(ERTS_LIBS)
diff --git a/erts/preloaded/ebin/erl_prim_loader.beam b/erts/preloaded/ebin/erl_prim_loader.beam
index 3e82b8e20e..de2693472a 100644
--- a/erts/preloaded/ebin/erl_prim_loader.beam
+++ b/erts/preloaded/ebin/erl_prim_loader.beam
Binary files differ
diff --git a/erts/preloaded/ebin/erl_tracer.beam b/erts/preloaded/ebin/erl_tracer.beam
index 67d96d0f0d..69804540c9 100644
--- a/erts/preloaded/ebin/erl_tracer.beam
+++ b/erts/preloaded/ebin/erl_tracer.beam
Binary files differ
diff --git a/erts/preloaded/ebin/erlang.beam b/erts/preloaded/ebin/erlang.beam
index 321e4e7e1b..8379bf1768 100644
--- a/erts/preloaded/ebin/erlang.beam
+++ b/erts/preloaded/ebin/erlang.beam
Binary files differ
diff --git a/erts/preloaded/ebin/erts_code_purger.beam b/erts/preloaded/ebin/erts_code_purger.beam
index 0c667663fd..b1da0aa861 100644
--- a/erts/preloaded/ebin/erts_code_purger.beam
+++ b/erts/preloaded/ebin/erts_code_purger.beam
Binary files differ
diff --git a/erts/preloaded/ebin/erts_internal.beam b/erts/preloaded/ebin/erts_internal.beam
index ed2e328f5b..cc4f3dbdaf 100644
--- a/erts/preloaded/ebin/erts_internal.beam
+++ b/erts/preloaded/ebin/erts_internal.beam
Binary files differ
diff --git a/erts/preloaded/ebin/init.beam b/erts/preloaded/ebin/init.beam
index 23ebd2cad9..7b5797e90a 100644
--- a/erts/preloaded/ebin/init.beam
+++ b/erts/preloaded/ebin/init.beam
Binary files differ
diff --git a/erts/preloaded/ebin/otp_ring0.beam b/erts/preloaded/ebin/otp_ring0.beam
index b63fa24848..f893b9c181 100644
--- a/erts/preloaded/ebin/otp_ring0.beam
+++ b/erts/preloaded/ebin/otp_ring0.beam
Binary files differ
diff --git a/erts/preloaded/ebin/prim_eval.beam b/erts/preloaded/ebin/prim_eval.beam
index 7c8fdc82c0..e4ce601c03 100644
--- a/erts/preloaded/ebin/prim_eval.beam
+++ b/erts/preloaded/ebin/prim_eval.beam
Binary files differ
diff --git a/erts/preloaded/ebin/prim_file.beam b/erts/preloaded/ebin/prim_file.beam
index d4559bc9ed..babba3081f 100644
--- a/erts/preloaded/ebin/prim_file.beam
+++ b/erts/preloaded/ebin/prim_file.beam
Binary files differ
diff --git a/erts/preloaded/ebin/prim_inet.beam b/erts/preloaded/ebin/prim_inet.beam
index 5a0678632e..0521060e34 100644
--- a/erts/preloaded/ebin/prim_inet.beam
+++ b/erts/preloaded/ebin/prim_inet.beam
Binary files differ
diff --git a/erts/preloaded/ebin/prim_zip.beam b/erts/preloaded/ebin/prim_zip.beam
index 3d32dd8449..e1faca7d96 100644
--- a/erts/preloaded/ebin/prim_zip.beam
+++ b/erts/preloaded/ebin/prim_zip.beam
Binary files differ
diff --git a/erts/preloaded/ebin/zlib.beam b/erts/preloaded/ebin/zlib.beam
index ced9f9c952..5d3cbc1b36 100644
--- a/erts/preloaded/ebin/zlib.beam
+++ b/erts/preloaded/ebin/zlib.beam
Binary files differ
diff --git a/erts/preloaded/src/erl_tracer.erl b/erts/preloaded/src/erl_tracer.erl
index de1e9ca01e..fe15812535 100644
--- a/erts/preloaded/src/erl_tracer.erl
+++ b/erts/preloaded/src/erl_tracer.erl
@@ -41,10 +41,14 @@ on_load() ->
%%% NIF placeholders
%%%
--spec enabled(Tag :: trace_tag() | seq_trace | trace_status,
+-spec enabled(Tag :: trace_status,
TracerState :: tracer_state(),
Tracee :: tracee()) ->
- trace | discard | remove.
+ trace | remove;
+ (Tag :: trace_tag() | seq_trace,
+ TracerState :: tracer_state(),
+ Tracee :: tracee()) ->
+ trace | discard.
enabled(_, _, _) ->
erlang:nif_error(nif_not_loaded).
diff --git a/erts/preloaded/src/erlang.erl b/erts/preloaded/src/erlang.erl
index 20a64e81b4..3d152c4e92 100644
--- a/erts/preloaded/src/erlang.erl
+++ b/erts/preloaded/src/erlang.erl
@@ -1749,16 +1749,16 @@ trace_delivered(_Tracee) ->
erlang:nif_error(undefined).
%% trace_info/2
--spec erlang:trace_info(PidPortOrFunc, Item) -> Res when
- PidPortOrFunc :: pid() | port() | new | new_processes | new_ports
- | {Module, Function, Arity} | on_load,
+-spec erlang:trace_info(PidPortFuncEvent, Item) -> Res when
+ PidPortFuncEvent :: pid() | port() | new | new_processes | new_ports
+ | {Module, Function, Arity} | on_load | send | 'receive',
Module :: module(),
Function :: atom(),
Arity :: arity(),
Item :: flags | tracer | traced | match_spec
| meta | meta_match_spec | call_count | call_time | all,
Res :: trace_info_return().
-trace_info(_PidPortOrFunc, _Item) ->
+trace_info(_PidPortFuncEvent, _Item) ->
erlang:nif_error(undefined).
%% trunc/1
@@ -2073,6 +2073,9 @@ open_port(PortName, PortSettings) ->
(min_bin_vheap_size, MinBinVHeapSize) -> OldMinBinVHeapSize when
MinBinVHeapSize :: non_neg_integer(),
OldMinBinVHeapSize :: non_neg_integer();
+ (max_heap_size, MaxHeapSize) -> OldMaxHeapSize when
+ MaxHeapSize :: max_heap_size(),
+ OldMaxHeapSize :: max_heap_size();
(message_queue_data, MQD) -> OldMQD when
MQD :: message_queue_data(),
OldMQD :: message_queue_data();
@@ -2154,6 +2157,7 @@ process_flag(_Flag, _Value) ->
{messages, MessageQueue :: [term()]} |
{min_heap_size, MinHeapSize :: non_neg_integer()} |
{min_bin_vheap_size, MinBinVHeapSize :: non_neg_integer()} |
+ {max_heap_size, MaxHeapSize :: max_heap_size()} |
{monitored_by, Pids :: [pid()]} |
{monitors,
Monitors :: [{process, Pid :: pid() |
@@ -2238,6 +2242,7 @@ setelement(_Index, _Tuple1, _Value) ->
| {priority, Level :: priority_level()}
| {fullsweep_after, Number :: non_neg_integer()}
| {min_heap_size, Size :: non_neg_integer()}
+ | {max_heap_size, Size :: max_heap_size()}
| {min_bin_vheap_size, VSize :: non_neg_integer()}.
spawn_opt(_Tuple) ->
erlang:nif_error(undefined).
@@ -2330,6 +2335,9 @@ subtract(_,_) ->
OldMinBinVHeapSize when
MinBinVHeapSize :: non_neg_integer(),
OldMinBinVHeapSize :: non_neg_integer();
+ (max_heap_size, MaxHeapSize) -> OldMaxHeapSize when
+ MaxHeapSize :: max_heap_size(),
+ OldMaxHeapSize :: max_heap_size();
(multi_scheduling, BlockState) -> OldBlockState when
BlockState :: block | unblock | block_normal | unblock_normal,
OldBlockState :: blocked | disabled | enabled;
@@ -2381,7 +2389,7 @@ tl(_List) ->
[{[term()] | '_' ,[term()],[term()]}].
-spec erlang:trace_pattern(MFA, MatchSpec) -> non_neg_integer() when
- MFA :: trace_pattern_mfa(),
+ MFA :: trace_pattern_mfa() | send | 'receive',
MatchSpec :: (MatchSpecList :: trace_match_spec())
| boolean()
| restart
@@ -2403,7 +2411,13 @@ trace_pattern(MFA, MatchSpec) ->
call_count |
call_time.
--spec erlang:trace_pattern(MFA, MatchSpec, FlagList) -> non_neg_integer() when
+-spec erlang:trace_pattern(send, MatchSpec, []) -> non_neg_integer() when
+ MatchSpec :: (MatchSpecList :: trace_match_spec())
+ | boolean();
+ ('receive', MatchSpec, []) -> non_neg_integer() when
+ MatchSpec :: (MatchSpecList :: trace_match_spec())
+ | boolean();
+ (MFA, MatchSpec, FlagList) -> non_neg_integer() when
MFA :: trace_pattern_mfa(),
MatchSpec :: (MatchSpecList :: trace_match_spec())
| boolean()
@@ -2505,6 +2519,7 @@ tuple_to_list(_Tuple) ->
logical_processors_available |
logical_processors_online) -> unknown | pos_integer();
(machine) -> string();
+ (max_heap_size) -> {max_heap_size, MaxHeapSize :: max_heap_size()};
(message_queue_data) -> message_queue_data();
(min_heap_size) -> {min_heap_size, MinHeapSize :: pos_integer()};
(min_bin_vheap_size) -> {min_bin_vheap_size,
@@ -2642,6 +2657,13 @@ spawn_monitor(M, F, A) ->
erlang:error(badarg, [M,F,A]).
+-type max_heap_size() ::
+ Size :: non_neg_integer()
+ %% TODO change size => to := when -type maps support is finalized
+ | #{ size => non_neg_integer(),
+ kill => boolean(),
+ error_logger => boolean() }.
+
-type spawn_opt_option() ::
link
| monitor
@@ -2649,6 +2671,7 @@ spawn_monitor(M, F, A) ->
| {fullsweep_after, Number :: non_neg_integer()}
| {min_heap_size, Size :: non_neg_integer()}
| {min_bin_vheap_size, VSize :: non_neg_integer()}
+ | {max_heap_size, Size :: max_heap_size()}
| {message_queue_data, MQD :: message_queue_data()}.
-spec spawn_opt(Fun, Options) -> pid() | {pid(), reference()} when