aboutsummaryrefslogtreecommitdiffstats
path: root/erts
diff options
context:
space:
mode:
Diffstat (limited to 'erts')
-rw-r--r--erts/configure.in24
-rw-r--r--erts/doc/src/crash_dump.xml31
-rw-r--r--erts/doc/src/epmd.xml41
-rw-r--r--erts/doc/src/erl.xml2
-rw-r--r--erts/doc/src/erl_driver.xml5
-rw-r--r--erts/doc/src/erl_nif.xml158
-rw-r--r--erts/doc/src/erlang.xml22
-rw-r--r--erts/doc/src/notes.xml191
-rw-r--r--erts/emulator/beam/beam_emu.c8
-rw-r--r--erts/emulator/beam/beam_load.c6
-rw-r--r--erts/emulator/beam/bif.c28
-rw-r--r--erts/emulator/beam/bif.tab13
-rw-r--r--erts/emulator/beam/big.c35
-rw-r--r--erts/emulator/beam/big.h2
-rw-r--r--erts/emulator/beam/break.c1
-rw-r--r--erts/emulator/beam/erl_alloc.c4
-rw-r--r--erts/emulator/beam/erl_alloc.types2
-rw-r--r--erts/emulator/beam/erl_bif_binary.c16
-rw-r--r--erts/emulator/beam/erl_bif_info.c67
-rw-r--r--erts/emulator/beam/erl_driver.h2
-rw-r--r--erts/emulator/beam/erl_drv_nif.h1
-rw-r--r--erts/emulator/beam/erl_gc.c14
-rw-r--r--erts/emulator/beam/erl_init.c6
-rw-r--r--erts/emulator/beam/erl_lock_check.c19
-rw-r--r--erts/emulator/beam/erl_lock_count.c161
-rw-r--r--erts/emulator/beam/erl_lock_count.h20
-rw-r--r--erts/emulator/beam/erl_message.c16
-rw-r--r--erts/emulator/beam/erl_nif.c547
-rw-r--r--erts/emulator/beam/erl_nif.h26
-rw-r--r--erts/emulator/beam/erl_nif_api_funcs.h45
-rw-r--r--erts/emulator/beam/erl_port_task.c74
-rw-r--r--erts/emulator/beam/erl_port_task.h2
-rw-r--r--erts/emulator/beam/erl_process.c137
-rw-r--r--erts/emulator/beam/erl_process.h33
-rw-r--r--erts/emulator/beam/erl_process_dict.c52
-rw-r--r--erts/emulator/beam/external.c2
-rw-r--r--erts/emulator/beam/io.c94
-rw-r--r--erts/emulator/beam/utils.c3
-rw-r--r--erts/emulator/drivers/common/inet_drv.c37
-rw-r--r--erts/emulator/drivers/unix/multi_drv.c2
-rw-r--r--erts/emulator/sys/common/erl_poll.c2
-rw-r--r--erts/emulator/test/Makefile1
-rw-r--r--erts/emulator/test/async_ports_SUITE.erl118
-rw-r--r--erts/emulator/test/async_ports_SUITE_data/Makefile.src15
-rw-r--r--erts/emulator/test/async_ports_SUITE_data/cport.c81
-rw-r--r--erts/emulator/test/busy_port_SUITE.erl6
-rw-r--r--erts/emulator/test/driver_SUITE.erl41
-rw-r--r--erts/emulator/test/driver_SUITE_data/sys_info_base_drv.c9
-rw-r--r--erts/emulator/test/driver_SUITE_data/sys_info_curr_drv.c11
-rw-r--r--erts/emulator/test/driver_SUITE_data/sys_info_prev_drv.c9
-rw-r--r--erts/emulator/test/fun_SUITE.erl23
-rw-r--r--erts/emulator/test/nif_SUITE.erl57
-rw-r--r--erts/emulator/test/nif_SUITE_data/nif_SUITE.c106
-rw-r--r--erts/emulator/test/num_bif_SUITE.erl32
-rw-r--r--erts/emulator/test/tuple_SUITE.erl95
-rw-r--r--erts/epmd/src/epmd.c10
-rw-r--r--erts/etc/common/run_erl_common.c19
-rw-r--r--erts/etc/unix/run_erl.c4
-rw-r--r--erts/preloaded/ebin/erl_prim_loader.beambin56160 -> 56328 bytes
-rw-r--r--erts/preloaded/ebin/erlang.beambin97732 -> 98008 bytes
-rw-r--r--erts/preloaded/ebin/prim_inet.beambin72912 -> 73128 bytes
-rw-r--r--erts/preloaded/src/erl_prim_loader.erl9
-rw-r--r--erts/preloaded/src/erlang.erl19
-rw-r--r--erts/preloaded/src/prim_inet.erl9
64 files changed, 1988 insertions, 637 deletions
diff --git a/erts/configure.in b/erts/configure.in
index f66110b98b..c8b96c50f0 100644
--- a/erts/configure.in
+++ b/erts/configure.in
@@ -446,13 +446,13 @@ else
fi
AC_ARG_ENABLE(static-nifs,
-AS_HELP_STRING([--enable-static-nifs], [link nifs statically. If yes then all nifs in all Erlang/OTP applications will be statically linked into the main binary. It is also possible to give a list of nifs that should be linked statically. The list should be a comma seperated and contain the absolute path to a .a archive for each nif that is to be statically linked. The name of the .a archive has to be the same as the name of the nif. Note that you have to link any external dependencies that the nifs have to the main binary, so for the crypto nif you want to pass LIBS=-lcrypto to configure.]),
+AS_HELP_STRING([--enable-static-nifs], [link nifs statically. If yes then all nifs in all Erlang/OTP applications will be statically linked into the main binary. It is also possible to give a list of nifs that should be linked statically. The list should be a comma separated and contain the absolute path to a .a archive for each nif that is to be statically linked. The name of the .a archive has to be the same as the name of the nif. Note that you have to link any external dependencies that the nifs have to the main binary, so for the crypto nif you want to pass LIBS=-lcrypto to configure.]),
STATIC_NIFS="$enableval",
STATIC_NIFS=no)
AC_SUBST(STATIC_NIFS)
AC_ARG_ENABLE(static-drivers,
-AS_HELP_STRING([--enable-static-drivers], [comma seperated list of linked-in drivers to link statically with the main binary. The list should contain the absolute path to a .a archive for each driver that is to be statically linked. The name of the .a archive has to be the same as the name of the driver.]),
+AS_HELP_STRING([--enable-static-drivers], [comma separated list of linked-in drivers to link statically with the main binary. The list should contain the absolute path to a .a archive for each driver that is to be statically linked. The name of the .a archive has to be the same as the name of the driver.]),
STATIC_DRIVERS="$enableval",
STATIC_DRIVERS=no)
AC_SUBST(STATIC_DRIVERS)
@@ -4821,6 +4821,26 @@ if test "x$GCC" = xyes; then
fi
dnl ----------------------------------------------------------------------
+dnl Enable -fsanitize= flags.
+dnl ----------------------------------------------------------------------
+
+m4_define(DEFAULT_SANITIZERS, [address,undefined])
+AC_ARG_ENABLE(
+ sanitizers,
+ AS_HELP_STRING(
+ [--enable-sanitizers@<:@=comma-separated list of sanitizers@:>@],
+ [Default=DEFAULT_SANITIZERS]),
+[
+case "$enableval" in
+ no) sanitizers= ;;
+ yes) sanitizers="-fsanitize=DEFAULT_SANITIZERS" ;;
+ *) sanitizers="-fsanitize=$enableval" ;;
+esac
+CFLAGS="$CFLAGS $sanitizers"
+LDFLAGS="$LDFLAGS $sanitizers"
+])
+
+dnl ----------------------------------------------------------------------
dnl Output the result.
dnl ----------------------------------------------------------------------
diff --git a/erts/doc/src/crash_dump.xml b/erts/doc/src/crash_dump.xml
index c59741f250..d3de29b876 100644
--- a/erts/doc/src/crash_dump.xml
+++ b/erts/doc/src/crash_dump.xml
@@ -85,20 +85,22 @@
operating system.</p>
<list type="bulleted">
<item>"<em>&lt;A&gt;</em>: Cannot allocate <em>&lt;N&gt;</em>
- bytes of memory (of type "<em>&lt;T&gt;</em>")." - The system
- has run out of memory. &lt;A&gt; is the allocator that failed
- to allocate memory, &lt;N&gt; is the number of bytes that
- &lt;A&gt; tried to allocate, and &lt;T&gt; is the memory block
- type that the memory was needed for. The most common case is
- that a process stores huge amounts of data. In this case
- &lt;T&gt; is most often <c><![CDATA[heap]]></c>, <c><![CDATA[old_heap]]></c>,
- <c><![CDATA[heap_frag]]></c>, or <c><![CDATA[binary]]></c>. For more information on
- allocators see
- <seealso marker="erts_alloc">erts_alloc(3)</seealso>.</item>
+ bytes of memory (of type "<em>&lt;T&gt;</em>", thread
+ <em>&lt;I&gt;</em>em>)." - The system has run out of memory. &lt;A&gt;
+ is the allocator that failed to allocate memory, &lt;N&gt; is the
+ number of bytes that &lt;A&gt; tried to allocate, &lt;T&gt; is the
+ memory block type that the memory was needed for, and &lt;I&gt; is the
+ thread identifier. The most common case is that a process stores huge
+ amounts of data. In this case &lt;T&gt; is most often
+ <c><![CDATA[heap]]></c>, <c><![CDATA[old_heap]]></c>,
+ <c><![CDATA[heap_frag]]></c>, or <c><![CDATA[binary]]></c>.
+ For more information on allocators see
+ <seealso marker="erts_alloc">erts_alloc(3)</seealso>.</item>
<item>"<em>&lt;A&gt;</em>: Cannot reallocate <em>&lt;N&gt;</em>
- bytes of memory (of type "<em>&lt;T&gt;</em>")." - Same as
- above with the exception that memory was being reallocated
- instead of being allocated when the system ran out of memory.</item>
+ bytes of memory (of type "<em>&lt;T&gt;</em>", thread
+ <em>&lt;I&gt;</em>em>)." - Same as above with the exception that memory
+ was being reallocated instead of being allocated when the system ran
+ out of memory.</item>
<item>"Unexpected op code <em>N</em>" - Error in compiled
code, <c><![CDATA[beam]]></c> file damaged or error in the compiler.</item>
<item>"Module <em>Name</em> undefined" <c><![CDATA[|]]></c> "Function
@@ -246,6 +248,9 @@
<tag><em>Last scheduled in for | Current call</em></tag>
<item>The current function of the process. These fields will not
always exist.</item>
+ <tag><em>Run queue</em></tag>
+ <item>The identifier of the scheduler run queue in which the process is
+ running.</item>
<tag><em>Spawned by</em></tag>
<item>The parent of the process, i.e. the process which executed
<c><![CDATA[spawn]]></c> or <c><![CDATA[spawn_link]]></c>.</item>
diff --git a/erts/doc/src/epmd.xml b/erts/doc/src/epmd.xml
index 963d35c3c8..25f819ab50 100644
--- a/erts/doc/src/epmd.xml
+++ b/erts/doc/src/epmd.xml
@@ -58,12 +58,12 @@
of the IP address and a port number. The name of the node is
an atom on the form of <c><![CDATA[Name@Node]]></c>.
The job of the <c><![CDATA[epmd]]></c> daemon is to keep track of which
- node name listens on which address. Hence, <c><![CDATA[epmd]]></c> map
+ node name listens on which address. Hence, <c><![CDATA[epmd]]></c> maps
symbolic node names to machine addresses.</p>
<p>The TCP/IP <c>epmd</c> daemon actually only keeps track of
- the <c>Name</c> (first) part of an Erlang node name, the <c>Host</c>
- part (whatever is after the <c><![CDATA[@]]></c> is implicit in the
+ the <c>Name</c> (first) part of an Erlang node name. The <c>Host</c>
+ part (whatever is after the <c><![CDATA[@]]></c>) is implicit in the
node name where the <c>epmd</c> daemon was actually contacted,
as is the IP address where the Erlang node can be
reached. Consistent and correct TCP naming services are
@@ -77,12 +77,12 @@
<p>The daemon is started automatically by the <c>erl</c>
command if the node is to be distributed and there is no
running instance present. If automatically launched,
- environment variables has to be used to alter the behavior of
+ environment variables have to be used to alter the behavior of
the daemon. See the <seealso
marker="#environment_variables">Environment
variables</seealso> section below.</p>
- <p>If the -daemon argument is not given, the
+ <p>If the -daemon argument is not given,
<c><![CDATA[epmd]]></c> runs as a normal program with the
controlling terminal of the shell in which it is
started. Normally, it should run as a daemon.</p>
@@ -122,7 +122,7 @@
comma-separated list of IP addresses and on the loopback address
(which is implicitly added to the list if it has not been
specified). This can also be set using the
- <c><![CDATA[ERL_EPMD_ADDRESS]]></c> environment variable, see the
+ <c><![CDATA[ERL_EPMD_ADDRESS]]></c> environment variable. See the
section <seealso marker="#environment_variables">Environment
variables</seealso> below.</p>
</item>
@@ -130,7 +130,7 @@
<item>
<p>Let this instance of epmd listen to another TCP port than
default 4369. This can also be set using the
- <c><![CDATA[ERL_EPMD_PORT]]></c> environment variable, see the
+ <c><![CDATA[ERL_EPMD_PORT]]></c> environment variable. See the
section <seealso marker="#environment_variables">Environment
variables</seealso> below</p>
</item>
@@ -153,7 +153,7 @@
<p>With relaxed command checking, the <c>epmd</c> daemon can be killed from the localhost with i.e. <c>epmd -kill</c> even if there are active nodes registered. Normally only daemons with an empty node database can be killed with the <c>epmd -kill</c> command.</p>
</item>
<item>
- <p>The <c>epmd -stop</c> command (and the corresponding messages to epmd, as can be given using <c>erl_interface/ei</c>) is normally always ignored, as it opens up for strange situation when two nodes of the same name can be alive at the same time. A node unregisters itself by just closing the connection to epmd, why the <c>stop</c> command was only intended for use in debugging situations.</p>
+ <p>The <c>epmd -stop</c> command (and the corresponding messages to epmd, as can be given using <c>erl_interface/ei</c>) is normally always ignored, as it opens up the possibility of a strange situation where two nodes of the same name can be alive at the same time. A node unregisters itself by just closing the connection to epmd, which is why the <c>stop</c> command was only intended for use in debugging situations.</p>
<p>With relaxed command checking enabled, you can forcibly unregister live nodes.</p>
</item>
</list>
@@ -166,7 +166,7 @@
<section>
<marker id="debug_flags"></marker>
<title>DbgExtra options</title>
- <p>These options are purely for debugging and testing epmd clients, they should not be used in normal operation.</p>
+ <p>These options are purely for debugging and testing epmd clients. They should not be used in normal operation.</p>
<taglist>
<tag><c><![CDATA[-packet_timeout Seconds]]></c></tag>
@@ -177,9 +177,9 @@
</item>
<tag><c><![CDATA[-delay_accept Seconds]]></c></tag>
<item>
- <p>To simulate a busy server you can insert a delay between epmd
- gets notified about that a new connection is requested and
- when the connections gets accepted.</p>
+ <p>To simulate a busy server you can insert a delay between when epmd
+ gets notified that a new connection is requested and
+ when the connection gets accepted.</p>
</item>
<tag><c><![CDATA[-delay_write Seconds]]></c></tag>
<item>
@@ -191,15 +191,15 @@
<section>
<marker id="interactive_flags"></marker>
<title>Interactive options</title>
- <p>These options make <c>epmd</c> run as an interactive command displaying the results of sending queries ta an already running instance of <c>epmd</c>. The epmd contacted is always on the local node, but the <c>-port</c> option can be used to select between instances if several are running using different port on the host.</p>
+ <p>These options make <c>epmd</c> run as an interactive command, displaying the results of sending queries to an already running instance of <c>epmd</c>. The epmd contacted is always on the local node, but the <c>-port</c> option can be used to select between instances if several are running using different ports on the host.</p>
<taglist>
<tag><c><![CDATA[-port No]]></c></tag>
<item>
<p>Contacts the <c>epmd</c> listening on the given TCP port number
(default 4369). This can also be set using the
- <c><![CDATA[ERL_EPMD_PORT]]></c> environment variable, see the
+ <c><![CDATA[ERL_EPMD_PORT]]></c> environment variable. See the
section <seealso marker="#environment_variables">Environment
- variables</seealso> below</p>
+ variables</seealso> below.</p>
</item>
<tag><c><![CDATA[-names]]></c></tag>
<item>
@@ -210,7 +210,7 @@
<p>Kill the currently running <c>epmd</c>.</p>
<p>Killing the running <c>epmd</c> is only allowed if <c>epmd
- -names</c> show an empty database or
+ -names</c> shows an empty database or
<c>-relaxed_command_check</c> was given when the running
instance of <c>epmd</c> was started. Note that
<c>-relaxed_command_check</c> is given when starting the
@@ -228,7 +228,7 @@
<p>This command can only be used when contacting <c>epmd</c>
instances started with the <c>-relaxed_command_check</c>
flag. Note that relaxed command checking has to be enabled for
- the <c>epmd</c> daemon contacted, When running epmd
+ the <c>epmd</c> daemon contacted. When running epmd
interactively,
<c>-relaxed_command_check</c> has no effect.</p>
</item>
@@ -259,7 +259,7 @@
<item>
<p>If set prior to start, the <c>epmd</c> daemon will behave
as if the <c>-relaxed_command_check</c> option was given at
- start-up. If consequently setting this option before starting
+ start-up. Consequently, if this option is set before starting
the Erlang virtual machine, the automatically started
<c>epmd</c> will accept the <c>-kill</c> and <c>-stop</c>
commands without restrictions.</p>
@@ -287,8 +287,8 @@
remote hosts. However, only the query commands are answered (and
acted upon) if the query comes from a remote host. It is always an
error to try to register a nodename if the client is not a process
- located on the same host as the <c>epmd</c> instance is running on,
- why such requests are considered hostile and the connection is
+ located on the same host as the <c>epmd</c> instance is running on-
+ such requests are considered hostile and the connection is
immediately closed.</p>
<p>The queries accepted from remote nodes are:</p>
@@ -307,3 +307,4 @@
</comref>
+
diff --git a/erts/doc/src/erl.xml b/erts/doc/src/erl.xml
index 5bde285311..f856b9ab86 100644
--- a/erts/doc/src/erl.xml
+++ b/erts/doc/src/erl.xml
@@ -1186,7 +1186,7 @@
utilization.
</p>
</item>
- <tag><marker id="+swct"><c>+sws very_eager|eager|medium|lazy|very_lazy</c></marker></tag>
+ <tag><marker id="+swct"><c>+swct very_eager|eager|medium|lazy|very_lazy</c></marker></tag>
<item>
<p>
Set scheduler wake cleanup threshold. Default is <c>medium</c>.
diff --git a/erts/doc/src/erl_driver.xml b/erts/doc/src/erl_driver.xml
index ad37813ac0..4a1aab75c7 100644
--- a/erts/doc/src/erl_driver.xml
+++ b/erts/doc/src/erl_driver.xml
@@ -546,6 +546,7 @@ typedef struct ErlDrvSysInfo {
int scheduler_threads;
int nif_major_version;
int nif_minor_version;
+ int dirty_scheduler_support;
} ErlDrvSysInfo;
</code>
@@ -610,6 +611,10 @@ typedef struct ErlDrvSysInfo {
<tag><c>nif_minor_version</c></tag>
<item>The value of <c>ERL_NIF_MINOR_VERSION</c> when the runtime system was compiled.
</item>
+ <tag><c>dirty_scheduler_support</c></tag>
+ <item>A value <c>!= 0</c> if the runtime system has support for dirty scheduler threads;
+ otherwise <c>0</c>.
+ </item>
</taglist>
</item>
<tag><marker id="ErlDrvBinary"/>ErlDrvBinary</tag>
diff --git a/erts/doc/src/erl_nif.xml b/erts/doc/src/erl_nif.xml
index 6b1f4cccf8..3de94be9ff 100644
--- a/erts/doc/src/erl_nif.xml
+++ b/erts/doc/src/erl_nif.xml
@@ -168,16 +168,18 @@ ok
<p><marker id="lengthy_work"/>
As mentioned in the <seealso marker="#WARNING">warning</seealso> text at
the beginning of this document it is of vital importance that a native function
- does return relatively fast. It is hard to give an exact maximum amount
+ return relatively quickly. It is hard to give an exact maximum amount
of time that a native function is allowed to work, but as a rule of thumb
- a well behaving native function should return to its caller before a
+ a well-behaving native function should return to its caller before a
millisecond has passed. This can be achieved using different approaches.
- If you have full control over the code that are to execute in the native
+ If you have full control over the code to execute in the native
function, the best approach is to divide the work into multiple chunks of
- work and call the native function multiple times. Function
+ work and call the native function multiple times, either directly from Erlang code
+ or by having a native function schedule a future NIF call via the
+ <seealso marker="#enif_schedule_nif"> enif_schedule_nif</seealso> function. Function
<seealso marker="#enif_consume_timeslice">enif_consume_timeslice</seealso> can be
- used this facilitate such work division. In some cases, however, this might not
- be possible, e.g. when calling third party libraries. Then you typically want
+ used to help with such work division. In some cases, however, this might not
+ be possible, e.g. when calling third-party libraries. Then you typically want
to dispatch the work to another thread, return
from the native function, and wait for the result. The thread can send
the result back to the calling thread using message passing. Information
@@ -342,37 +344,38 @@ ok
libraries might however fail if deprecated features are used.
</p></item>
- <tag>Dirty NIFs</tag>
- <item><p><marker id="dirty_nifs"/><em>Note that the dirty NIF functionality
- is experimental</em> and that you have to enable support for dirty
- schedulers when building OTP in order to try the functionality out. Native functions
+ <tag>Long-running NIFs</tag>
+ <item><p><marker id="dirty_nifs"/>Native functions
<seealso marker="#lengthy_work">
must normally run quickly</seealso>, as explained earlier in this document. They
generally should execute for no more than a millisecond. But not all native functions
can execute so quickly; for example, functions that encrypt large blocks of data or
perform lengthy file system operations can often run for tens of seconds or more.</p>
- <p>A NIF that cannot execute in a millisecond or less is called a "dirty NIF" since
- it performs work that the Erlang runtime cannot handle cleanly. Applications
- that make use of such functions must indicate to the runtime that the functions are
+ <p>If the functionality of a long-running NIF can be split so that its work can be
+ achieved through a series of shorter NIF calls, the application can either make that series
+ of NIF calls from the Erlang level, or it can call a NIF that first performs a chunk of the
+ work, then invokes the <seealso marker="#enif_schedule_nif">enif_schedule_nif</seealso>
+ function to schedule another NIF call to perform the next chunk. The final call scheduled
+ in this manner can then return the overall result. Breaking up a long-running function in
+ this manner enables the VM to regain control between calls to the NIFs, thereby avoiding
+ degraded responsiveness, scheduler load balancing problems, and other strange behaviours.</p>
+ <p>A NIF that cannot be split and cannot execute in a millisecond or less is called a "dirty NIF"
+ because it performs work that the Erlang runtime cannot handle cleanly.
+ <em>Note that the dirty NIF functionality described here is experimental</em> and that you have to
+ enable support for dirty schedulers when building OTP in order to try the functionality out.
+ Applications that make use of such functions must indicate to the runtime that the functions are
dirty so they can be handled specially. To schedule a dirty NIF for execution, the
- application calls <seealso marker="#enif_schedule_dirty_nif">enif_schedule_dirty_nif</seealso>,
- passing to it a pointer to the dirty NIF to be executed and indicating with a flag
+ appropriate flags value can be set for the NIF in its <seealso marker="#ErlNifFunc">ErlNifFunc</seealso>
+ entry, or the application can call <seealso marker="#enif_schedule_nif">enif_schedule_nif</seealso>,
+ passing to it a pointer to the dirty NIF to be executed and indicating with the <c>flags</c>
argument whether it expects the operation to be CPU-bound or I/O-bound.</p>
- <p>All dirty NIFs must ultimately invoke the <seealso marker="#enif_schedule_dirty_nif_finalizer">
- enif_schedule_dirty_nif_finalizer</seealso> as their final action, passing to it the
- result they wish to return to the original caller. A finalizer function can either
- receive the result and return it directly, or it can return a different value instead.
- For convenience, the NIF API provides the <seealso marker="#enif_dirty_nif_finalizer">
- enif_dirty_nif_finalizer</seealso> function that applications can use as a finalizer;
- it simply returns its result argument.</p>
<note><p>Dirty NIF support is available only when the emulator is configured with dirty
schedulers enabled. This feature is currently disabled by default. To determine whether
the dirty NIF API is available, native code can check to see if the C preprocessor macro
<c>ERL_NIF_DIRTY_SCHEDULER_SUPPORT</c> is defined. Also, if the Erlang runtime was built
without threading support, dirty schedulers are disabled. To check at runtime for the presence
- of dirty scheduler threads, code can call the <seealso marker="#enif_have_dirty_schedulers"><c>
- enif_have_dirty_schedulers()</c></seealso> API function, which returns true if dirty
- scheduler threads are present, false otherwise.</p></note>
+ of dirty scheduler threads, code can use the <seealso marker="#enif_system_info"><c>
+ enif_system_info()</c></seealso> API function.</p></note>
</item>
</taglist>
</section>
@@ -498,6 +501,7 @@ typedef struct {
const char* <em>name</em>;
unsigned <em>arity</em>;
ERL_NIF_TERM (*<em>fptr</em>)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+ unsigned flags;
} ErlNifFunc;
</code>
<p>Describes a NIF by its name, arity and implementation.
@@ -508,7 +512,17 @@ typedef struct {
will thus denote the Nth argument to the NIF. Note that the
<c>argc</c> argument allows for the same C function to
implement several Erlang functions with different arity (but
- same name probably).</p>
+ same name probably). For a regular NIF, <c>flags</c> is 0 (and
+ so its value can be omitted for statically initialized <c>ErlNifFunc</c>
+ instances), or it can be used to indicate that the NIF is a <seealso
+ marker="#dirty_nifs">dirty NIF</seealso> that should be executed
+ on a dirty scheduler thread (<em>note that the dirty NIF functionality
+ described here is experimental</em> and that you have to enable
+ support for dirty schedulers when building OTP in order to try the
+ functionality out). If the dirty NIF is expected to be
+ CPU-bound, its <c>flags</c> field should be set to
+ <c>ERL_NIF_DIRTY_JOB_CPU_BOUND</c>, or for I/O-bound jobs,
+ <c>ERL_NIF_DIRTY_JOB_IO_BOUND</c>.</p>
</item>
<tag><marker id="ErlNifBinary"/>ErlNifBinary</tag>
<item>
@@ -672,18 +686,6 @@ typedef enum {
See also the <seealso marker="#WARNING">warning</seealso> text at the beginning of this document.</p>
</desc>
</func>
- <func><name><ret>ERL_NIF_TERM</ret><nametext>enif_dirty_nif_finalizer(ErlNifEnv* env, ERL_NIF_TERM result)</nametext></name>
- <fsummary>Simple dirty NIF result finalizer</fsummary>
- <desc>
- <p>A convenience function that a dirty NIF can use as a finalizer that simply
- return its <c>result</c> argument as its return value. This function is provided
- for dirty NIFs with results that should be returned directly to the original caller.</p>
- <note><p>This function is available only when the emulator is configured with dirty
- schedulers enabled. This feature is currently disabled by default. To determine whether
- the dirty NIF API is available, native code can check to see if the C preprocessor macro
- <c>ERL_NIF_DIRTY_SCHEDULER_SUPPORT</c> is defined.</p></note>
- </desc>
- </func>
<func><name><ret>int</ret><nametext>enif_equal_tids(ErlNifTid tid1, ErlNifTid tid2)</nametext></name>
<fsummary></fsummary>
<desc><p>Same as <seealso marker="erl_driver#erl_drv_equal_tids">erl_drv_equal_tids</seealso>.
@@ -804,22 +806,6 @@ typedef enum {
and return true, or return false if <c>term</c> is not an unsigned integer or is
outside the bounds of type <c>unsigned long</c>.</p></desc>
</func>
- <func><name><ret>int</ret><nametext>enif_have_dirty_schedulers()</nametext></name>
- <fsummary>Runtime check for the presence of dirty scheduler threads</fsummary>
- <desc>
- <p>Check at runtime for the presence of dirty scheduler threads. If the emulator is
- built with threading support, dirty scheduler threads are available and
- <c>enif_have_dirty_schedulers()</c> returns true. If the emulator was built without
- threading support, <c>enif_have_dirty_schedulers()</c> returns false.</p>
- <p>If dirty scheduler threads are not available in the emulator, calls to
- <c>enif_schedule_dirty_nif</c> and <c>enif_schedule_dirty_nif_finalizer</c> result in
- the NIF and finalizer functions being called directly within the calling thread.</p>
- <note><p>This function is available only when the emulator is configured with dirty
- schedulers enabled. This feature is currently disabled by default. To determine whether
- the dirty NIF API is available, native code can check to see if the C preprocessor macro
- <c>ERL_NIF_DIRTY_SCHEDULER_SUPPORT</c> is defined.</p></note>
- </desc>
- </func>
<func><name><ret>int</ret><nametext>enif_inspect_binary(ErlNifEnv* env, ERL_NIF_TERM bin_term, ErlNifBinary* bin)</nametext></name>
<fsummary>Inspect the content of a binary</fsummary>
<desc><p>Initialize the structure pointed to by <c>bin</c> with
@@ -873,8 +859,8 @@ typedef enum {
<p>Check to see if the current NIF is executing on a dirty scheduler thread. If the
emulator is built with threading support, calling <c>enif_is_on_dirty_scheduler</c>
from within a dirty NIF returns true. It returns false when the calling NIF is a regular
- NIF or a NIF finalizer, both of which run on normal scheduler threads, or when the emulator
- is built without threading support.</p>
+ NIF running on a normal scheduler thread, or when the emulator is built without threading
+ support.</p>
<note><p>This function is available only when the emulator is configured with dirty
schedulers enabled. This feature is currently disabled by default. To determine whether
the dirty NIF API is available, native code can check to see if the C preprocessor macro
@@ -1245,46 +1231,29 @@ typedef enum {
<desc><p>Same as <seealso marker="erl_driver#erl_drv_rwlock_tryrwlock">erl_drv_rwlock_tryrwlock</seealso>.
</p></desc>
</func>
- <func><name><ret>ERL_NIF_TERM</ret><nametext>enif_schedule_dirty_nif(ErlNifEnv* env, int flags, ERL_NIF_TERM (*fp)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]), int argc, const ERL_NIF_TERM argv[])</nametext></name>
- <fsummary>Schedule a dirty NIF for execution</fsummary>
+ <func><name><ret>ERL_NIF_TERM</ret><nametext>enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags, ERL_NIF_TERM (*fp)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]), int argc, const ERL_NIF_TERM argv[])</nametext></name>
+ <fsummary>Schedule a NIF for execution</fsummary>
<desc>
- <p>Schedule dirty NIF <c>fp</c> to execute a long-running operation. The <c>flags</c>
- argument must be set to either <c>ERL_NIF_DIRTY_JOB_CPU_BOUND</c> if the job is expected to
- be primarily CPU-bound, or <c>ERL_NIF_DIRTY_JOB_IO_BOUND</c> for jobs that will be
- I/O-bound. The <c>argc</c> and <c>argv</c> arguments can either be the originals passed
- into the calling NIF, or they can be values created by the calling NIF. The calling
- NIF must use the return value of <c>enif_schedule_dirty_nif</c> as its own return value.</p>
- <p>Be aware that <c>enif_schedule_dirty_nif</c>, as its name implies, only schedules the
- dirty NIF for future execution. The calling NIF does not block waiting for the dirty NIF to
- execute and return, which means that the calling NIF can't expect to receive the dirty NIF
+ <p>Schedule NIF <c>fp</c> to execute. This function allows an application to break up long-running
+ work into multiple regular NIF calls or to schedule a <seealso marker="#dirty_nifs">dirty NIF</seealso>
+ to execute on a dirty scheduler thread (<em>note that the dirty NIF functionality described here is
+ experimental</em> and that you have to enable support for dirty schedulers when building OTP in
+ order to try the functionality out).</p>
+ <p>The <c>fun_name</c> argument provides a name for the NIF being scheduled for execution. If it cannot
+ be converted to an atom, <c>enif_schedule_nif</c> returns a <c>badarg</c> exception.</p>
+ <p>The <c>flags</c> argument must be set to 0 for a regular NIF, or if the emulator was built the
+ experimental dirty scheduler support enabled, <c>flags</c> can be set to either <c>ERL_NIF_DIRTY_JOB_CPU_BOUND</c>
+ if the job is expected to be primarily CPU-bound, or <c>ERL_NIF_DIRTY_JOB_IO_BOUND</c> for jobs that will
+ be I/O-bound. If dirty scheduler threads are not available in the emulator, a try to schedule such a job
+ will result in a <c>badarg</c> exception.</p>
+
+ <p>The <c>argc</c> and <c>argv</c> arguments can either be the originals passed into the calling NIF, or
+ they can be values created by the calling NIF.</p>
+ <p>The calling NIF must use the return value of <c>enif_schedule_nif</c> as its own return value.</p>
+ <p>Be aware that <c>enif_schedule_nif</c>, as its name implies, only schedules the
+ NIF for future execution. The calling NIF does not block waiting for the scheduled NIF to
+ execute and return, which means that the calling NIF can't expect to receive the scheduled NIF
return value and use it for further operations.</p>
- <p>A dirty NIF may not invoke the <seealso marker="#enif_make_badarg">enif_make_badarg</seealso>
- to raise an exception. If it wishes to return an exception, the dirty NIF should pass a
- regular result indicating the exception details to its finalizer, and allow the finalizer
- to raise the exception on its behalf.</p>
- <note><p>This function is available only when the emulator is configured with dirty schedulers
- enabled. This feature is currently disabled by default. To determine whether the dirty NIF API
- is available, native code can check to see if the C preprocessor macro
- <c>ERL_NIF_DIRTY_SCHEDULER_SUPPORT</c> is defined.</p></note>
- </desc>
- </func>
- <func><name><ret>ERL_NIF_TERM</ret><nametext>enif_schedule_dirty_nif_finalizer(ErlNifEnv* env, ERL_NIF_TERM result, ERL_NIF_TERM (*fp)(ErlNifEnv* env, ERL_NIF_TERM result))</nametext></name>
- <fsummary>Schedule a dirty NIF finalizer</fsummary>
- <desc>
- <p>When a dirty NIF finishes executing, it must schedule a finalizer function to return
- its result to the original NIF caller. The dirty NIF passes <c>result</c> as the value it
- wants the finalizer to use as the return value. The <c>fp</c> argument is a pointer to the
- finalizer function. The NIF API provides the <seealso marker="#enif_dirty_nif_finalizer">
- enif_dirty_nif_finalizer</seealso> function that can be used as a finalizer that simply
- returns its <c>result</c> argument. You are also free to write your own custom finalizer
- that uses <c>result</c> to derive a different return value, or ignores <c>result</c>
- entirely and returns a completely different value.</p>
- <p>Without exception, all dirty NIFs must invoke <c>enif_schedule_dirty_nif_finalizer</c>
- to complete their execution.</p>
- <note><p>This function is available only when the emulator is configured with dirty
- schedulers enabled. This feature is currently disabled by default. To determine whether
- the dirty NIF API is available, native code can check to see if the C preprocessor macro
- <c>ERL_NIF_DIRTY_SCHEDULER_SUPPORT</c> is defined.</p></note>
</desc>
</func>
<func><name><ret>ErlNifPid *</ret><nametext>enif_self(ErlNifEnv* caller_env, ErlNifPid* pid)</nametext></name>
@@ -1384,4 +1353,3 @@ typedef enum {
<p><seealso marker="erlang#load_nif-2">erlang:load_nif/2</seealso></p>
</section>
</cref>
-
diff --git a/erts/doc/src/erlang.xml b/erts/doc/src/erlang.xml
index 84168397f6..97fe6d2915 100644
--- a/erts/doc/src/erlang.xml
+++ b/erts/doc/src/erlang.xml
@@ -1252,10 +1252,11 @@ true
<fsummary>Check if a function is exported and loaded</fsummary>
<desc>
<p>Returns <c>true</c> if the module <c><anno>Module</anno></c> is loaded
- and contains an exported function <c><anno>Function</anno>/<anno>Arity</anno></c>;
- otherwise <c>false</c>.</p>
- <p>Returns <c>false</c> for any BIF (functions implemented in C
- rather than in Erlang).</p>
+ and contains an exported function <c><anno>Function</anno>/<anno>Arity</anno></c>,
+ or if there is a BIF (a built-in function implemented in C)
+ with the given name; otherwise returns <c>false</c>.</p>
+ <note><p>This function used to return false for built-in
+ functions before the 18.0 release.</p></note>
</desc>
</func>
<func>
@@ -1376,6 +1377,19 @@ true
</desc>
</func>
<func>
+ <name name="get_keys" arity="0"/>
+ <fsummary>Return a list of all keys from the process dictionary</fsummary>
+ <desc>
+ <p>Returns a list of keys all keys present in the process dictionary.</p>
+ <pre>
+> <input>put(dog, {animal,1}),</input>
+<input>put(cow, {animal,2}),</input>
+<input>put(lamb, {animal,3}),</input>
+<input>get_keys().</input>
+[dog,cow,lamb]</pre>
+ </desc>
+ </func>
+ <func>
<name name="get_keys" arity="1"/>
<fsummary>Return a list of keys from the process dictionary</fsummary>
<desc>
diff --git a/erts/doc/src/notes.xml b/erts/doc/src/notes.xml
index 086d29c668..743369951f 100644
--- a/erts/doc/src/notes.xml
+++ b/erts/doc/src/notes.xml
@@ -30,6 +30,176 @@
</header>
<p>This document describes the changes made to the ERTS application.</p>
+<section><title>Erts 6.2</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ General documentation updates.</p>
+ <p>
+ Own Id: OTP-12052</p>
+ </item>
+ <item>
+ <p>A bug in the VM code implementing sending of signals
+ to ports could cause the receiving port queue to remain
+ in a busy state forever. When this state had been
+ reached, processes sending command signals to the port
+ either got suspended forever, or, if the <c>nosuspend</c>
+ feature was used, always failed to send to the port. This
+ bug was introduced in ERTS version 5.10.</p>
+ <p>In order for this bug to be triggered on a port, one
+ had to at least once utilize the <c>nosuspend</c>
+ functionality when passing a signal to the port. This by
+ either calling</p> <list> <item> <seealso
+ marker="erlang#port_command/3"><c>port_command(Port,
+ Data, [nosuspend | Options])</c></seealso>, </item>
+ <item> <seealso
+ marker="erlang#send/3"><c>erlang:send(Port, {PortOwner,
+ {command, Data}}, [nosuspend | Options])</c></seealso>,
+ </item> <item> <seealso
+ marker="erlang#send_nosuspend/2"><c>erlang:send_nosuspend(Port,
+ {PortOwner, {command, Data}})</c></seealso>, or </item>
+ <item> <seealso
+ marker="erlang#send_nosuspend/3"><c>erlang:send_nosuspend(Port,
+ {PortOwner, {command, Data}}, Options)</c></seealso>.
+ </item> </list>
+ <p>Thanks Vasily Demidenok for reporting the issue, and
+ Sergey Kudryashov for providing a testcase.</p>
+ <p>
+ Own Id: OTP-12082 Aux Id: OTP-10336 </p>
+ </item>
+ <item>
+ <p>
+ Fix size overflow bug at memory allocation. A memory
+ allocation call, with an insane size close to the entire
+ address space, could return successfully as if it had
+ allocated just a few bytes. (Thanks to Don A. Bailey for
+ reporting)</p>
+ <p>
+ Own Id: OTP-12091</p>
+ </item>
+ <item>
+ <p>
+ Fix various issues where negating a signed integer would
+ trigger undefined behaviour. This fixes issues in the
+ enif_make_int64 interface and some edge cases inside the
+ erlang runtime system.</p>
+ <p>
+ Own Id: OTP-12097</p>
+ </item>
+ <item>
+ <p>
+ The documentation erroneously listed the <seealso
+ marker="erl#+swct"><c>+swct</c></seealso> command line
+ argument under <c>+sws</c>.</p>
+ <p>
+ Own Id: OTP-12102 Aux Id: OTP-10994 </p>
+ </item>
+ <item>
+ <p>
+ Profiling messages could be delivered out of order when
+ profiling on <c>runnable_procs</c> and/or
+ <c>runnable_ports</c> using <seealso
+ marker="erlang#system_profile/2"><c>erlang:system_profile/2</c></seealso>.
+ This bug was introduced in ERTS version 5.10.</p>
+ <p>
+ Own Id: OTP-12105 Aux Id: OTP-10336 </p>
+ </item>
+ <item>
+ <p>
+ Various logging fixes, including: Add run queue index to
+ the process dump in crash dumps.<br/> Add thread index to
+ enomem slogan when crashing.<br/> Remove error logger
+ message for sending messages to old instances of the same
+ node.</p>
+ <p>
+ Own Id: OTP-12115</p>
+ </item>
+ <item>
+ <p>
+ Fix compiler warnings reported by LLVM</p>
+ <p>
+ Own Id: OTP-12138</p>
+ </item>
+ <item>
+ <p>
+ Correct conversion of <c>MIN_SMALL</c> by
+ <c>list_to_integer/1</c> and <c>binary_to_integer/1</c>.
+ The bug produced an unnormalized bignum which can cause
+ strange behavior such as comparing different to a correct
+ <c>MIN_SMALL</c> integer. The value <c>MIN_SMALL</c> is
+ <c>-(1 bsl 27) = -134217728</c> on a 32-bit VM and <c>-(1
+ bsl 59) = -576460752303423488</c> on a 64-bit VM. (Thanks
+ to Jesper Louis Andersen, Mikael Pettersson and Anthony
+ Ramine for report, patch and optimization suggestion)</p>
+ <p>
+ Own Id: OTP-12140</p>
+ </item>
+ <item>
+ <p>
+ Fix bug in <c>term_to_binary</c> that reallocates binary
+ with inconsistent size information. Bug has never been
+ confirmed to be the cause of any faulty behavior.</p>
+ <p>
+ Own Id: OTP-12141</p>
+ </item>
+ <item>
+ <p>
+ Real_path method used while prim loading archive files
+ was not taking into account the fact that windows
+ directory symlinks can be across different drives.</p>
+ <p>
+ Own Id: OTP-12155</p>
+ </item>
+ </list>
+ </section>
+
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>
+ Add log2 histogram to lcnt for lock wait time</p>
+ <p>
+ Own Id: OTP-12059</p>
+ </item>
+ <item>
+ <p>
+ Introduced <seealso
+ marker="erl_nif#enif_schedule_nif"><c>enif_schedule_nif()</c></seealso>
+ to the NIF API.</p>
+ <p>
+ The <c>enif_schedule_nif()</c> function allows a
+ long-running NIF to be broken into separate NIF
+ invocations without the help of a wrapper function
+ written in Erlang. The NIF first executes part of the
+ long-running task, then calls <c>enif_schedule_nif()</c>
+ to schedule a NIF for later execution to continue the
+ task. Any number of NIFs can be scheduled in this manner,
+ one after another. Since the emulator regains control
+ between invocations, this helps avoid problems caused by
+ native code tying up scheduler threads for too long.</p>
+ <p>
+ The <c>enif_schedule_nif()</c> function also replaces the
+ <c>enif_schedule_dirty_nif()</c> in the experimental
+ dirty NIF API. Note that the only incompatible changes
+ made are in the experimental dirty NIF API.</p>
+ <p>
+ See the <seealso marker="erl_nif">NIF
+ documentation</seealso> for more information.</p>
+ <p>
+ Thanks to Steve Vinoski.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-12128</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Erts 6.1.2</title>
<section><title>Fixed Bugs and Malfunctions</title>
@@ -940,6 +1110,27 @@
Thanks to Matwey V. Kornilov</p>
<p>
Own Id: OTP-11829</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Erts 5.10.4.1</title>
+
+ <section><title>Known Bugs and Problems</title>
+ <list>
+ <item>
+ <p>
+ When using gen_tcp:connect and the <c>fd</c> option with
+ <c>port</c> and/or <c>ip</c>, the <c>port</c> and
+ <c>ip</c> options were ignored. This has been fixed so
+ that if <c>port</c> and/or <c>ip</c> is specified
+ together with <c>fd</c> a bind is requested for that
+ <c>fd</c>. If <c>port</c> and/or <c>ip</c> is not
+ specified bind will not be called.</p>
+ <p>
+ Own Id: OTP-12061</p>
</item>
</list>
</section>
diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c
index 9b251a6ad1..1096c2c8c8 100644
--- a/erts/emulator/beam/beam_emu.c
+++ b/erts/emulator/beam/beam_emu.c
@@ -3503,6 +3503,7 @@ get_map_elements_fail:
* I[0]: &&call_nif
* I[1]: Function pointer to NIF function
* I[2]: Pointer to erl_module_nif
+ * I[3]: Function pointer to dirty NIF
*/
BifFunction vbf;
@@ -3523,13 +3524,6 @@ get_map_elements_fail:
reg[0] = r(0);
nif_bif_result = (*fp)(&env, bif_nif_arity, reg);
erts_post_nif(&env);
-#ifdef ERTS_DIRTY_SCHEDULERS
- if (is_non_value(nif_bif_result) && c_p->freason == TRAP) {
- Export* ep = ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(c_p);
- ep->code[0] = I[-3];
- ep->code[1] = I[-2];
- }
-#endif
}
ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(nif_bif_result));
PROCESS_MAIN_CHK_LOCKS(c_p);
diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c
index a4e72a130a..07654f6d5c 100644
--- a/erts/emulator/beam/beam_load.c
+++ b/erts/emulator/beam/beam_load.c
@@ -2365,7 +2365,11 @@ load_code(LoaderState* stp)
if (stp->may_load_nif) {
const int finfo_ix = ci - FUNC_INFO_SZ;
- enum { MIN_FUNC_SZ = 3 };
+#ifdef ERTS_DIRTY_SCHEDULERS
+ enum { MIN_FUNC_SZ = 4 };
+#else
+ enum { MIN_FUNC_SZ = 3 };
+#endif
if (finfo_ix - last_func_start < MIN_FUNC_SZ && last_func_start) {
/* Must make room for call_nif op */
int pad = MIN_FUNC_SZ - (finfo_ix - last_func_start);
diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c
index fcbeb6cf5c..5370b592f3 100644
--- a/erts/emulator/beam/bif.c
+++ b/erts/emulator/beam/bif.c
@@ -1,7 +1,7 @@
/*
* %CopyrightBegin%
*
- * Copyright Ericsson AB 1996-2013. All Rights Reserved.
+ * Copyright Ericsson AB 1996-2014. All Rights Reserved.
*
* The contents of this file are subject to the Erlang Public License,
* Version 1.1, (the "License"); you may not use this file except in
@@ -1869,6 +1869,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
} else if (is_external_pid(to)) {
dep = external_pid_dist_entry(to);
if(dep == erts_this_dist_entry) {
+#if DEBUG
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp,
"Discarding message %T from %T to %T in an old "
@@ -1879,6 +1880,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
external_pid_creation(to),
erts_this_node->creation);
erts_send_error_to_logger(p->group_leader, dsbufp);
+#endif
return 0;
}
return remote_send(p, dep, to, to, msg, suspend);
@@ -1912,6 +1914,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
} else if (is_external_port(to)
&& (external_port_dist_entry(to)
== erts_this_dist_entry)) {
+#if DEBUG
erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf();
erts_dsprintf(dsbufp,
"Discarding message %T from %T to %T in an old "
@@ -1922,6 +1925,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) {
external_port_creation(to),
erts_this_node->creation);
erts_send_error_to_logger(p->group_leader, dsbufp);
+#endif
return 0;
} else if (is_internal_port(to)) {
int ret_val;
@@ -2887,9 +2891,6 @@ static int do_list_to_integer(Process *p, Eterm orig_list,
res = big_plus_small(res, m, hp);
}
- if (is_big(res)) /* check if small */
- res = big_plus_small(res, 0, hp); /* includes conversion to small */
-
if (neg) {
if (is_small(res))
res = make_small(-signed_val(res));
@@ -2899,8 +2900,12 @@ static int do_list_to_integer(Process *p, Eterm orig_list,
}
}
- if (is_big(res)) {
- hp += (big_arity(res)+1);
+ if (is_not_small(res)) {
+ res = big_plus_small(res, 0, hp); /* includes conversion to small */
+
+ if (is_not_small(res)) {
+ hp += (big_arity(res)+1);
+ }
}
HRelease(p,hp_end,hp);
}
@@ -3989,16 +3994,19 @@ BIF_RETTYPE halt_2(BIF_ALIST_2)
BIF_RETTYPE function_exported_3(BIF_ALIST_3)
{
+ int arity;
if (is_not_atom(BIF_ARG_1) ||
is_not_atom(BIF_ARG_2) ||
is_not_small(BIF_ARG_3)) {
BIF_ERROR(BIF_P, BADARG);
}
- if (erts_find_function(BIF_ARG_1, BIF_ARG_2, signed_val(BIF_ARG_3),
- erts_active_code_ix()) == NULL) {
- BIF_RET(am_false);
+ arity = signed_val(BIF_ARG_3);
+ if (erts_find_function(BIF_ARG_1, BIF_ARG_2, arity,
+ erts_active_code_ix()) != NULL ||
+ erts_is_builtin(BIF_ARG_1, BIF_ARG_2, arity)) {
+ BIF_RET(am_true);
}
- BIF_RET(am_true);
+ BIF_RET(am_false);
}
/**********************************************************************/
diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab
index 011e49f1fe..55ac778475 100644
--- a/erts/emulator/beam/bif.tab
+++ b/erts/emulator/beam/bif.tab
@@ -578,7 +578,7 @@ bif io:printable_range/0
bif os:unsetenv/1
#
-# New in R17A
+# New in 17.0
#
bif re:inspect/2
@@ -601,6 +601,17 @@ bif maps:values/1
bif erts_internal:cmp_term/2
#
+# New in 17.1
+#
+
+bif erlang:fun_info_mfa/1
+
+# New in 18.0
+#
+
+bif erlang:get_keys/0
+
+#
# Obsolete
#
diff --git a/erts/emulator/beam/big.c b/erts/emulator/beam/big.c
index 41a041eba6..a8710dd910 100644
--- a/erts/emulator/beam/big.c
+++ b/erts/emulator/beam/big.c
@@ -274,6 +274,9 @@
_b = _b << _s; \
_vn1 = _b >> H_EXP; \
_vn0 = _b & LO_MASK; \
+ /* Sometimes _s is 0 which triggers undefined behaviour for the \
+ (_a0>>(D_EXP-_s)) shift, but this is ok because the \
+ & -s will make it all to 0 later anyways. */ \
_un32 = (_a1 << _s) | ((_a0>>(D_EXP-_s)) & (-_s >> (D_EXP-1))); \
_un10 = _a0 << _s; \
_un1 = _un10 >> H_EXP; \
@@ -1506,13 +1509,15 @@ Eterm uword_to_big(UWord x, Eterm *y)
*/
Eterm small_to_big(Sint x, Eterm *y)
{
+ Uint xu;
if (x >= 0) {
+ xu = x;
*y = make_pos_bignum_header(1);
} else {
- x = -x;
+ xu = -(Uint)x;
*y = make_neg_bignum_header(1);
}
- BIG_DIGIT(y, 0) = x;
+ BIG_DIGIT(y, 0) = xu;
return make_big(y);
}
@@ -1540,21 +1545,24 @@ Eterm erts_uint64_to_big(Uint64 x, Eterm **hpp)
Eterm erts_sint64_to_big(Sint64 x, Eterm **hpp)
{
Eterm *hp = *hpp;
+ Uint64 ux;
int neg;
- if (x >= 0)
+ if (x >= 0) {
neg = 0;
+ ux = x;
+ }
else {
neg = 1;
- x = -x;
+ ux = -(Uint64)x;
}
#if defined(ARCH_32) || HALFWORD_HEAP
- if (x >= (((Uint64) 1) << 32)) {
+ if (ux >= (((Uint64) 1) << 32)) {
if (neg)
*hp = make_neg_bignum_header(2);
else
*hp = make_pos_bignum_header(2);
- BIG_DIGIT(hp, 0) = (Uint) (x & ((Uint) 0xffffffff));
- BIG_DIGIT(hp, 1) = (Uint) ((x >> 32) & ((Uint) 0xffffffff));
+ BIG_DIGIT(hp, 0) = (Uint) (ux & ((Uint) 0xffffffff));
+ BIG_DIGIT(hp, 1) = (Uint) ((ux >> 32) & ((Uint) 0xffffffff));
*hpp += 3;
}
else
@@ -1564,7 +1572,7 @@ Eterm erts_sint64_to_big(Sint64 x, Eterm **hpp)
*hp = make_neg_bignum_header(1);
else
*hp = make_pos_bignum_header(1);
- BIG_DIGIT(hp, 0) = (Uint) x;
+ BIG_DIGIT(hp, 0) = (Uint) ux;
*hpp += 2;
}
return make_big(hp);
@@ -2667,9 +2675,6 @@ Eterm erts_chars_to_integer(Process *BIF_P, char *bytes,
res = big_plus_small(res, m, hp);
}
- if (is_big(res)) /* check if small */
- res = big_plus_small(res, 0, hp); /* includes conversion to small */
-
if (neg) {
if (is_small(res))
res = make_small(-signed_val(res));
@@ -2679,8 +2684,12 @@ Eterm erts_chars_to_integer(Process *BIF_P, char *bytes,
}
}
- if (is_big(res)) {
- hp += (big_arity(res) + 1);
+ if (is_not_small(res)) {
+ res = big_plus_small(res, 0, hp); /* includes conversion to small */
+
+ if (is_not_small(res)) {
+ hp += (big_arity(res) + 1);
+ }
}
HRelease(BIF_P, hp_end, hp);
goto bytebuf_to_integer_1_done;
diff --git a/erts/emulator/beam/big.h b/erts/emulator/beam/big.h
index d80111822e..da31876d75 100644
--- a/erts/emulator/beam/big.h
+++ b/erts/emulator/beam/big.h
@@ -101,7 +101,7 @@ typedef Uint dsize_t; /* Vector size type */
#define ERTS_SINT64_HEAP_SIZE(X) \
(IS_SSMALL((X)) \
? 0 \
- : ERTS_UINT64_BIG_HEAP_SIZE__((X) >= 0 ? (X) : -(X)))
+ : ERTS_UINT64_BIG_HEAP_SIZE__((X) >= 0 ? (X) : -(Uint64)(X)))
#define ERTS_UINT64_HEAP_SIZE(X) \
(IS_USMALL(0, (X)) ? 0 : ERTS_UINT64_BIG_HEAP_SIZE__((X)))
diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c
index 7d4f52ee23..08265b590d 100644
--- a/erts/emulator/beam/break.c
+++ b/erts/emulator/beam/break.c
@@ -256,6 +256,7 @@ print_process_info(int to, void *to_arg, Process *p)
p->current[1],
p->current[2]);
}
+ erts_print(to, to_arg, "Run queue: %d\n", erts_get_runq_proc(p)->ix);
erts_print(to, to_arg, "Spawned by: %T\n", p->parent);
approx_started = (time_t) p->approx_started;
diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c
index 05ac24e04d..90cd227fae 100644
--- a/erts/emulator/beam/erl_alloc.c
+++ b/erts/emulator/beam/erl_alloc.c
@@ -1873,8 +1873,8 @@ erts_alc_fatal_error(int error, int func, ErtsAlcType_t n, ...)
size = va_arg(argp, Uint);
va_end(argp);
erl_exit(1,
- "%s: Cannot %s %lu bytes of memory (of type \"%s\").\n",
- allctr_str, op, size, t_str);
+ "%s: Cannot %s %lu bytes of memory (of type \"%s\", thread %d).\n",
+ allctr_str, op, size, t_str, ERTS_ALC_GET_THR_IX());
break;
}
case ERTS_ALC_E_NOALLCTR:
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 17ac6316b7..37354b7f8d 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -357,6 +357,7 @@ type DB_MS_PSDO_PROC LONG_LIVED_LOW ETS db_match_pseudo_proc
type SCHDLR_DATA LONG_LIVED_LOW SYSTEM scheduler_data
type LL_TEMP_TERM LONG_LIVED_LOW SYSTEM ll_temp_term
+type NIF_TRAP_EXPORT STANDARD_LOW CODE nif_trap_export_entry
type EXPORT LONG_LIVED_LOW CODE export_entry
type MONITOR_SH STANDARD_LOW PROCESSES monitor_sh
type NLINK_SH STANDARD_LOW PROCESSES nlink_sh
@@ -375,6 +376,7 @@ type DB_MS_PSDO_PROC LONG_LIVED ETS db_match_pseudo_proc
type SCHDLR_DATA LONG_LIVED SYSTEM scheduler_data
type LL_TEMP_TERM LONG_LIVED SYSTEM ll_temp_term
+type NIF_TRAP_EXPORT STANDARD CODE nif_trap_export_entry
type EXPORT LONG_LIVED CODE export_entry
type MONITOR_SH FIXED_SIZE PROCESSES monitor_sh
type NLINK_SH FIXED_SIZE PROCESSES nlink_sh
diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c
index 7e0e825a0d..3bf78adce7 100644
--- a/erts/emulator/beam/erl_bif_binary.c
+++ b/erts/emulator/beam/erl_bif_binary.c
@@ -1324,9 +1324,9 @@ static int parse_match_opts_list(Eterm l, Eterm bin, Uint *posp, Uint *endp)
goto badarg;
}
if (len < 0) {
- Sint lentmp = -len;
+ Uint lentmp = -(Uint)len;
/* overflow */
- if (lentmp == len || lentmp < 0 || -lentmp != len) {
+ if ((Sint)lentmp < 0) {
goto badarg;
}
len = lentmp;
@@ -1555,9 +1555,9 @@ BIF_RETTYPE erts_binary_part(Process *p, Eterm binary, Eterm epos, Eterm elen)
goto badarg;
}
if (len < 0) {
- Sint lentmp = -len;
+ Uint lentmp = -(Uint)len;
/* overflow */
- if (lentmp == len || lentmp < 0 || -lentmp != len) {
+ if ((Sint)lentmp < 0) {
goto badarg;
}
len = lentmp;
@@ -1644,9 +1644,9 @@ BIF_RETTYPE erts_gc_binary_part(Process *p, Eterm *reg, Eterm live, int range_is
goto badarg;
}
if (len < 0) {
- Sint lentmp = -len;
+ Uint lentmp = -(Uint)len;
/* overflow */
- if (lentmp == len || lentmp < 0 || -lentmp != len) {
+ if ((Sint)lentmp < 0) {
goto badarg;
}
len = lentmp;
@@ -2213,9 +2213,9 @@ static BIF_RETTYPE binary_bin_to_list_common(Process *p,
goto badarg;
}
if (len < 0) {
- Sint lentmp = -len;
+ Uint lentmp = -(Uint)len;
/* overflow */
- if (lentmp == len || lentmp < 0 || -lentmp != len) {
+ if ((Sint)lentmp < 0) {
goto badarg;
}
len = lentmp;
diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c
index 4d5e55aaf5..6efe9d9550 100644
--- a/erts/emulator/beam/erl_bif_info.c
+++ b/erts/emulator/beam/erl_bif_info.c
@@ -3055,6 +3055,25 @@ fun_info_2(BIF_ALIST_2)
return TUPLE2(hp, what, val);
}
+BIF_RETTYPE
+fun_info_mfa_1(BIF_ALIST_1)
+{
+ Process* p = BIF_P;
+ Eterm fun = BIF_ARG_1;
+ Eterm* hp;
+
+ if (is_fun(fun)) {
+ ErlFunThing* funp = (ErlFunThing *) fun_val(fun);
+ hp = HAlloc(p, 4);
+ BIF_RET(TUPLE3(hp,funp->fe->module,funp->fe->address[-2],make_small(funp->arity)));
+ } else if (is_export(fun)) {
+ Export* exp = (Export *) ((UWord) (export_val(fun))[1]);
+ hp = HAlloc(p, 4);
+ BIF_RET(TUPLE3(hp,exp->code[0],exp->code[1],make_small(exp->code[2])));
+ }
+ BIF_ERROR(p, BADARG);
+}
+
BIF_RETTYPE is_process_alive_1(BIF_ALIST_1)
{
if(is_internal_pid(BIF_ARG_1)) {
@@ -3856,16 +3875,19 @@ static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_s
Uint tries = 0, colls = 0;
unsigned long timer_s = 0, timer_ns = 0, timer_n = 0;
unsigned int line = 0;
+ unsigned int i;
Eterm af, uil;
Eterm uit, uic;
Eterm uits, uitns, uitn;
Eterm tt, tstat, tloc, t;
+ Eterm thist, vhist[ERTS_LCNT_HISTOGRAM_SLOT_SIZE];
/* term:
- * [{{file, line}, {tries, colls, {seconds, nanoseconds, n_blocks}}}]
+ * [{{file, line}, {tries, colls, {seconds, nanoseconds, n_blocks}},
+ * { .. histogram .. }]
*/
-
+
tries = (Uint) ethr_atomic_read(&stats->tries);
colls = (Uint) ethr_atomic_read(&stats->colls);
@@ -3874,23 +3896,27 @@ static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_s
timer_ns = stats->timer.ns;
timer_n = stats->timer_n;
- af = erts_atom_put(stats->file, strlen(stats->file), ERTS_ATOM_ENC_LATIN1, 1);
+ af = erts_atom_put((byte *)stats->file, strlen(stats->file), ERTS_ATOM_ENC_LATIN1, 1);
uil = erts_bld_uint( hpp, szp, line);
tloc = erts_bld_tuple(hpp, szp, 2, af, uil);
- uit = erts_bld_uint( hpp, szp, tries);
- uic = erts_bld_uint( hpp, szp, colls);
-
+ uit = erts_bld_uint( hpp, szp, tries);
+ uic = erts_bld_uint( hpp, szp, colls);
+
uits = erts_bld_uint( hpp, szp, timer_s);
uitns = erts_bld_uint( hpp, szp, timer_ns);
uitn = erts_bld_uint( hpp, szp, timer_n);
tt = erts_bld_tuple(hpp, szp, 3, uits, uitns, uitn);
tstat = erts_bld_tuple(hpp, szp, 3, uit, uic, tt);
-
- t = erts_bld_tuple(hpp, szp, 2, tloc, tstat);
-
- res = erts_bld_cons( hpp, szp, t, res);
+
+ for(i = 0; i < ERTS_LCNT_HISTOGRAM_SLOT_SIZE; i++) {
+ vhist[i] = erts_bld_uint(hpp, szp, stats->hist.ns[i]);
+ }
+ thist = erts_bld_tuplev(hpp, szp, ERTS_LCNT_HISTOGRAM_SLOT_SIZE, vhist);
+
+ t = erts_bld_tuple(hpp, szp, 3, tloc, tstat, thist);
+ res = erts_bld_cons( hpp, szp, t, res);
return res;
}
@@ -3911,13 +3937,13 @@ static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_t *lock
ASSERT(ltype);
- type = erts_atom_put(ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1);
- name = erts_atom_put(lock->name, strlen(lock->name), ERTS_ATOM_ENC_LATIN1, 1);
+ type = erts_atom_put((byte *)ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1);
+ name = erts_atom_put((byte *)lock->name, strlen(lock->name), ERTS_ATOM_ENC_LATIN1, 1);
if (lock->flag & ERTS_LCNT_LT_ALLOC) {
/* use allocator types names as id's for allocator locks */
ltype = (char *) ERTS_ALC_A2AD(signed_val(lock->id));
- id = erts_atom_put(ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1);
+ id = erts_atom_put((byte *)ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1);
} else if (lock->flag & ERTS_LCNT_LT_PROCLOCK) {
/* use registered names as id's for process locks if available */
proc = erts_proc_lookup(lock->id);
@@ -3928,16 +3954,15 @@ static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_t *lock
id = lock->id;
}
} else {
- id = lock->id;
+ id = lock->id;
}
-
+
for (i = 0; i < lock->n_stats; i++) {
stats = lcnt_build_lock_stats_term(hpp, szp, &(lock->stats[i]), stats);
}
-
- t = erts_bld_tuple(hpp, szp, 4, name, id, type, stats);
-
- res = erts_bld_cons( hpp, szp, t, res);
+
+ t = erts_bld_tuple(hpp, szp, 4, name, id, type, stats);
+ res = erts_bld_cons( hpp, szp, t, res);
return res;
}
@@ -3957,12 +3982,12 @@ static Eterm lcnt_build_result_term(Eterm **hpp, Uint *szp, erts_lcnt_data_t *da
dtns = erts_bld_uint( hpp, szp, data->duration.ns);
tdt = erts_bld_tuple(hpp, szp, 2, dts, dtns);
- adur = erts_atom_put(str_duration, strlen(str_duration), ERTS_ATOM_ENC_LATIN1, 1);
+ adur = erts_atom_put((byte *)str_duration, strlen(str_duration), ERTS_ATOM_ENC_LATIN1, 1);
tdur = erts_bld_tuple(hpp, szp, 2, adur, tdt);
/* lock tuple */
- aloc = erts_atom_put(str_locks, strlen(str_locks), ERTS_ATOM_ENC_LATIN1, 1);
+ aloc = erts_atom_put((byte *)str_locks, strlen(str_locks), ERTS_ATOM_ENC_LATIN1, 1);
for (lock = data->current_locks->head; lock != NULL ; lock = lock->next ) {
lloc = lcnt_build_lock_term(hpp, szp, lock, lloc);
diff --git a/erts/emulator/beam/erl_driver.h b/erts/emulator/beam/erl_driver.h
index 5ced8c5ca0..f9938fc66c 100644
--- a/erts/emulator/beam/erl_driver.h
+++ b/erts/emulator/beam/erl_driver.h
@@ -133,7 +133,7 @@ typedef struct {
#define ERL_DRV_EXTENDED_MARKER (0xfeeeeeed)
#define ERL_DRV_EXTENDED_MAJOR_VERSION 3
-#define ERL_DRV_EXTENDED_MINOR_VERSION 0
+#define ERL_DRV_EXTENDED_MINOR_VERSION 1
/*
* The emulator will refuse to load a driver with a major version
diff --git a/erts/emulator/beam/erl_drv_nif.h b/erts/emulator/beam/erl_drv_nif.h
index 3f829ea7ea..4e8c6dc68b 100644
--- a/erts/emulator/beam/erl_drv_nif.h
+++ b/erts/emulator/beam/erl_drv_nif.h
@@ -35,6 +35,7 @@ typedef struct {
int scheduler_threads;
int nif_major_version;
int nif_minor_version;
+ int dirty_scheduler_support;
} ErlDrvSysInfo;
typedef struct {
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index aa15d2cc57..0db42d4325 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -2018,6 +2018,20 @@ setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset)
roots[n].sz = 1;
n++;
}
+
+ /*
+ * If a NIF has saved arguments, they need to be added
+ */
+ if (ERTS_PROC_GET_NIF_TRAP_EXPORT(p)) {
+ Eterm* argv;
+ int argc;
+ if (erts_setup_nif_gc(p, &argv, &argc)) {
+ roots[n].v = argv;
+ roots[n].sz = argc;
+ n++;
+ }
+ }
+
ASSERT(n <= rootset->size);
mp = p->msg.first;
diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c
index 5e6d812242..88c4006934 100644
--- a/erts/emulator/beam/erl_init.c
+++ b/erts/emulator/beam/erl_init.c
@@ -2066,8 +2066,10 @@ erl_exit_vv(int n, int flush_async, char *fmt, va_list args1, va_list args2)
system_cleanup(flush_async);
save_statistics();
-
- an = abs(n);
+ if (n < 0)
+ an = -(unsigned int)n;
+ else
+ an = n;
if (erts_mtrace_enabled)
erts_mtrace_exit((Uint32) an);
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index c13eb87012..b105ece6f1 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -139,7 +139,6 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "drv_tsd", NULL },
{ "async_enq_mtx", NULL },
#ifdef ERTS_SMP
- { "sys_msg_q", NULL },
{ "atom_tab", NULL },
{ "make_ref", NULL },
{ "misc_op_list_pre_alloc_lock", "address" },
@@ -148,6 +147,7 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "btm_pre_alloc_lock", NULL, },
{ "dist_entry_out_queue", "address" },
{ "port_sched_lock", "port_id" },
+ { "sys_msg_q", NULL },
{ "port_table", NULL },
#endif
{ "mtrace_op", NULL },
@@ -227,8 +227,7 @@ rw_op_str(Uint16 flags)
case ERTS_LC_FLG_LO_READ:
return " (r)";
case ERTS_LC_FLG_LO_WRITE:
- erts_fprintf(stderr, "\nInternal error\n");
- lc_abort();
+ ERTS_INTERNAL_ERROR("Only write flag present");
default:
break;
}
@@ -311,8 +310,7 @@ static ERTS_INLINE void lc_free(void *p)
static void *lc_core_alloc(void)
{
lc_unlock();
- erts_fprintf(stderr, "Lock checker out of memory!\n");
- lc_abort();
+ ERTS_INTERNAL_ERROR("Lock checker out of memory!\n");
}
#else
@@ -325,8 +323,7 @@ static void *lc_core_alloc(void)
fbs = (erts_lc_free_block_t *) malloc(sizeof(erts_lc_free_block_t)
* ERTS_LC_FB_CHUNK_SIZE);
if (!fbs) {
- erts_fprintf(stderr, "Lock checker failed to allocate memory!\n");
- lc_abort();
+ ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!");
}
for (i = 1; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) {
#ifdef DEBUG
@@ -366,11 +363,11 @@ create_locked_locks(char *thread_name)
{
erts_lc_locked_locks_t *l_lcks = malloc(sizeof(erts_lc_locked_locks_t));
if (!l_lcks)
- lc_abort();
+ ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!");
l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
if (!l_lcks->thread_name)
- lc_abort();
+ ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!");
l_lcks->emu_thread = 0;
l_lcks->tid = erts_thr_self();
@@ -691,7 +688,7 @@ erts_lc_set_thread_name(char *thread_name)
free((void *) l_lcks->thread_name);
l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown");
if (!l_lcks->thread_name)
- lc_abort();
+ ERTS_INTERNAL_ERROR("strdup failed");
}
l_lcks->emu_thread = 1;
}
@@ -1330,7 +1327,7 @@ erts_lc_init(void)
#endif /* #ifdef ERTS_LC_STATIC_ALLOC */
if (ethr_spinlock_init(&free_blocks_lock) != 0)
- lc_abort();
+ ERTS_INTERNAL_ERROR("spinlock_init failed");
erts_tsd_key_create(&locks_key,"erts_lock_check_key");
}
diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c
index 6f44bf097b..cf6996ea06 100644
--- a/erts/emulator/beam/erl_lock_count.c
+++ b/erts/emulator/beam/erl_lock_count.c
@@ -61,6 +61,25 @@ static ERTS_INLINE void lcnt_unlock(void) {
ethr_mutex_unlock(&lcnt_data_lock);
}
+const int log2_tab64[64] = {
+ 63, 0, 58, 1, 59, 47, 53, 2,
+ 60, 39, 48, 27, 54, 33, 42, 3,
+ 61, 51, 37, 40, 49, 18, 28, 20,
+ 55, 30, 34, 11, 43, 14, 22, 4,
+ 62, 57, 46, 52, 38, 26, 32, 41,
+ 50, 36, 17, 19, 29, 10, 13, 21,
+ 56, 45, 25, 31, 35, 16, 9, 12,
+ 44, 24, 15, 8, 23, 7, 6, 5};
+
+static ERTS_INLINE int lcnt_log2(Uint64 v) {
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ v |= v >> 32;
+ return log2_tab64[((Uint64)((v - (v >> 1))*0x07EDD5E59A4E28C2)) >> 58];
+}
static char* lcnt_lock_type(Uint16 flag) {
switch(flag & ERTS_LCNT_LT_ALL) {
@@ -81,19 +100,20 @@ static void lcnt_clear_stats(erts_lcnt_lock_stats_t *stats) {
stats->timer_n = 0;
stats->file = (char *)str_undefined;
stats->line = 0;
+ sys_memzero(stats->hist.ns, sizeof(stats->hist.ns));
}
static void lcnt_time(erts_lcnt_time_t *time) {
-#ifdef HAVE_GETHRTIME
+#if 0 || defined(HAVE_GETHRTIME)
SysHrTime hr_time;
hr_time = sys_gethrtime();
time->s = (unsigned long)(hr_time / 1000000000LL);
time->ns = (unsigned long)(hr_time - 1000000000LL*time->s);
-#else
- SysTimeval tv;
- sys_gettimeofday(&tv);
- time->s = tv.tv_sec;
- time->ns = tv.tv_usec*1000LL;
+#else
+ SysTimeval tv;
+ sys_gettimeofday(&tv);
+ time->s = tv.tv_sec;
+ time->ns = tv.tv_usec*1000LL;
#endif
}
@@ -111,28 +131,29 @@ static void lcnt_time_diff(erts_lcnt_time_t *d, erts_lcnt_time_t *t1, erts_lcnt_
dns += 1000000000LL;
}
+ ASSERT(ds >= 0);
+
d->s = ds;
d->ns = dns;
}
-/* difference d must be positive */
+/* difference d must be non-negative */
static void lcnt_time_add(erts_lcnt_time_t *t, erts_lcnt_time_t *d) {
- unsigned long ngns = 0;
-
t->s += d->s;
t->ns += d->ns;
- ngns = t->ns / 1000000000LL;
+ t->s += t->ns / 1000000000LL;
t->ns = t->ns % 1000000000LL;
-
- t->s += ngns;
}
static erts_lcnt_thread_data_t *lcnt_thread_data_alloc(void) {
erts_lcnt_thread_data_t *eltd;
eltd = (erts_lcnt_thread_data_t*)malloc(sizeof(erts_lcnt_thread_data_t));
+ if (!eltd) {
+ ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
+ }
eltd->timer_set = 0;
eltd->lock_in_conflict = 0;
@@ -158,59 +179,64 @@ static char* lock_opt(Uint16 flag) {
return "--";
}
-static void print_lock_x(erts_lcnt_lock_t *lock, Uint16 flag, char *action, char *extra) {
- erts_aint_t colls, tries, w_state, r_state;
- erts_lcnt_lock_stats_t *stats = NULL;
-
+static void print_lock_x(erts_lcnt_lock_t *lock, Uint16 flag, char *action) {
+ erts_aint_t w_state, r_state;
char *type;
- int i;
-
+
+ if (strcmp(lock->name, "run_queue") != 0) return;
type = lcnt_lock_type(lock->flag);
r_state = ethr_atomic_read(&lock->r_state);
w_state = ethr_atomic_read(&lock->w_state);
-
if (lock->flag & flag) {
- erts_printf("%20s [%30s] [r/w state %4ld/%4ld] id %T %s\r\n",
- action,
- lock->name,
- r_state,
- w_state,
- lock->id,
- extra);
+ erts_fprintf(stderr,"%10s [%24s] [r/w state %4ld/%4ld] %2s id %T\r\n",
+ action,
+ lock->name,
+ r_state,
+ w_state,
+ type,
+ lock->id);
}
}
-
-static void print_lock(erts_lcnt_lock_t *lock, char *action) {
- if (strcmp(lock->name, "proc_main") == 0) {
- print_lock_x(lock, ERTS_LCNT_LT_ALL, action, "");
- }
-}
-
#endif
static erts_lcnt_lock_stats_t *lcnt_get_lock_stats(erts_lcnt_lock_t *lock, char *file, unsigned int line) {
unsigned int i;
erts_lcnt_lock_stats_t *stats = NULL;
-
- for (i = 0; i < lock->n_stats; i++) {
- if ((lock->stats[i].file == file) && (lock->stats[i].line == line)) {
- return &(lock->stats[i]);
- }
- }
- if (lock->n_stats < ERTS_LCNT_MAX_LOCK_LOCATIONS) {
- stats = &lock->stats[lock->n_stats];
- lock->n_stats++;
- stats->file = file;
- stats->line = line;
- return stats;
+ if (erts_lcnt_rt_options & ERTS_LCNT_OPT_LOCATION) {
+ for (i = 0; i < lock->n_stats; i++) {
+ if ((lock->stats[i].file == file) && (lock->stats[i].line == line)) {
+ return &(lock->stats[i]);
+ }
+ }
+ if (lock->n_stats < ERTS_LCNT_MAX_LOCK_LOCATIONS) {
+ stats = &lock->stats[lock->n_stats];
+ lock->n_stats++;
+ stats->file = file;
+ stats->line = line;
+ return stats;
+ }
}
return &lock->stats[0];
+}
+static void lcnt_update_stats_hist(erts_lcnt_hist_t *hist, erts_lcnt_time_t *time_wait) {
+ int idx;
+ unsigned long r;
+
+ if (time_wait->s > 0 || time_wait->ns > ERTS_LCNT_HISTOGRAM_MAX_NS) {
+ idx = ERTS_LCNT_HISTOGRAM_SLOT_SIZE - 1;
+ } else {
+ r = time_wait->ns >> ERTS_LCNT_HISTOGRAM_RSHIFT;
+ if (r) idx = lcnt_log2(r);
+ else idx = 0;
+ }
+ hist->ns[idx]++;
}
-static void lcnt_update_stats(erts_lcnt_lock_stats_t *stats, int lock_in_conflict, erts_lcnt_time_t *time_wait) {
+static void lcnt_update_stats(erts_lcnt_lock_stats_t *stats, int lock_in_conflict,
+ erts_lcnt_time_t *time_wait) {
ethr_atomic_inc(&stats->tries);
@@ -220,6 +246,7 @@ static void lcnt_update_stats(erts_lcnt_lock_stats_t *stats, int lock_in_conflic
if (time_wait) {
lcnt_time_add(&(stats->timer), time_wait);
stats->timer_n++;
+ lcnt_update_stats_hist(&stats->hist,time_wait);
}
}
@@ -248,6 +275,9 @@ void erts_lcnt_init() {
/* init lcnt structure */
erts_lcnt_data = (erts_lcnt_data_t*)malloc(sizeof(erts_lcnt_data_t));
+ if (!erts_lcnt_data) {
+ ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
+ }
erts_lcnt_data->current_locks = erts_lcnt_list_init();
erts_lcnt_data->deleted_locks = erts_lcnt_list_init();
@@ -269,6 +299,9 @@ erts_lcnt_lock_list_t *erts_lcnt_list_init(void) {
erts_lcnt_lock_list_t *list;
list = (erts_lcnt_lock_list_t*)malloc(sizeof(erts_lcnt_lock_list_t));
+ if (!list) {
+ ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
+ }
list->head = NULL;
list->tail = NULL;
list->n = 0;
@@ -330,8 +363,9 @@ void erts_lcnt_list_delete(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock)
/* interface to erl_threads.h */
/* only lock on init and destroy, all others should use atomics */
void erts_lcnt_init_lock(erts_lcnt_lock_t *lock, char *name, Uint16 flag ) {
- erts_lcnt_init_lock_x(lock, name, flag, am_undefined);
+ erts_lcnt_init_lock_x(lock, name, flag, NIL);
}
+
void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eterm id) {
int i;
if (!name) {
@@ -360,7 +394,6 @@ void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eter
}
erts_lcnt_list_insert(erts_lcnt_data->current_locks, lock);
-
lcnt_unlock();
}
@@ -375,6 +408,9 @@ void erts_lcnt_destroy_lock(erts_lcnt_lock_t *lock) {
/* copy structure and insert the copy */
deleted_lock = (erts_lcnt_lock_t*)malloc(sizeof(erts_lcnt_lock_t));
+ if (!deleted_lock) {
+ ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!");
+ }
memcpy(deleted_lock, lock, sizeof(erts_lcnt_lock_t));
deleted_lock->next = NULL;
@@ -417,8 +453,9 @@ void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option) {
if ((w_state > 0) || (r_state > 0)) {
eltd->lock_in_conflict = 1;
- if (eltd->timer_set == 0)
+ if (eltd->timer_set == 0) {
lcnt_time(&eltd->timer);
+ }
eltd->timer_set++;
} else {
eltd->lock_in_conflict = 0;
@@ -433,7 +470,7 @@ void erts_lcnt_lock(erts_lcnt_lock_t *lock) {
if (!ERTS_LCNT_LOCK_TYPE(lock)) return;
w_state = ethr_atomic_read(&lock->w_state);
- ethr_atomic_inc( &lock->w_state);
+ ethr_atomic_inc(&lock->w_state);
eltd = lcnt_get_thread_data();
@@ -446,10 +483,10 @@ void erts_lcnt_lock(erts_lcnt_lock_t *lock) {
* 'atomicly'. All other locks will block the thread if w_state > 0
* i.e. locked.
*/
- if (eltd->timer_set == 0)
+ if (eltd->timer_set == 0) {
lcnt_time(&eltd->timer);
+ }
eltd->timer_set++;
-
} else {
eltd->lock_in_conflict = 0;
}
@@ -459,11 +496,10 @@ void erts_lcnt_lock(erts_lcnt_lock_t *lock) {
void erts_lcnt_lock_unaquire(erts_lcnt_lock_t *lock) {
/* should check if this thread was "waiting" */
-
if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return;
if (!ERTS_LCNT_LOCK_TYPE(lock)) return;
- ethr_atomic_dec( &lock->w_state);
+ ethr_atomic_dec(&lock->w_state);
}
/* erts_lcnt_lock_post
@@ -491,7 +527,7 @@ void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line
if (!(lock->flag & (ERTS_LCNT_LT_RWMUTEX | ERTS_LCNT_LT_RWSPINLOCK))) {
flowstate = ethr_atomic_read(&lock->flowstate);
ASSERT(flowstate == 0);
- ethr_atomic_inc( &lock->flowstate);
+ ethr_atomic_inc(&lock->flowstate);
}
#endif
@@ -500,19 +536,12 @@ void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line
ASSERT(eltd);
/* if lock was in conflict, time it */
-
- if (erts_lcnt_rt_options & ERTS_LCNT_OPT_LOCATION) {
- stats = lcnt_get_lock_stats(lock, file, line);
- } else {
- stats = &lock->stats[0];
- }
-
+ stats = lcnt_get_lock_stats(lock, file, line);
if (eltd->timer_set) {
lcnt_time(&timer);
lcnt_time_diff(&time_wait, &timer, &(eltd->timer));
lcnt_update_stats(stats, eltd->lock_in_conflict, &time_wait);
-
eltd->timer_set--;
ASSERT(eltd->timer_set >= 0);
} else {
@@ -541,11 +570,11 @@ void erts_lcnt_unlock(erts_lcnt_lock_t *lock) {
/* flowstate */
flowstate = ethr_atomic_read(&lock->flowstate);
ASSERT(flowstate == 1);
- ethr_atomic_dec( &lock->flowstate);
+ ethr_atomic_dec(&lock->flowstate);
/* write state */
w_state = ethr_atomic_read(&lock->w_state);
- ASSERT(w_state > 0)
+ ASSERT(w_state > 0);
#endif
ethr_atomic_dec(&lock->w_state);
}
@@ -582,9 +611,7 @@ void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res) {
ethr_atomic_inc( &lock->flowstate);
#endif
ethr_atomic_inc(&lock->w_state);
-
lcnt_update_stats(&(lock->stats[0]), 0, NULL);
-
} else {
ethr_atomic_inc(&lock->stats[0].tries);
ethr_atomic_inc(&lock->stats[0].colls);
diff --git a/erts/emulator/beam/erl_lock_count.h b/erts/emulator/beam/erl_lock_count.h
index 75f7cd028b..ffbb93da1b 100644
--- a/erts/emulator/beam/erl_lock_count.h
+++ b/erts/emulator/beam/erl_lock_count.h
@@ -35,6 +35,10 @@
* | | | - collisions (including trylock busy)
* | | | - timer (time spent in waiting for lock)
* | | | - n_timer (collisions excluding trylock busy)
+ * | | | - histogram
+ * | | | | - # 0 = log2(lock wait_time ns)
+ * | | | | - ...
+ * | | | | - # n = log2(lock wait_time ns)
*
* Each instance of a lock is the unique lock, i.e. set and id in that set.
* For each lock there is a set of statistics with where and what impact
@@ -68,8 +72,17 @@
#include "ethread.h"
+#define ERTS_LCNT_MAX_LOCK_LOCATIONS (10)
-#define ERTS_LCNT_MAX_LOCK_LOCATIONS (10)
+/* histogram */
+#define ERTS_LCNT_HISTOGRAM_MAX_NS (((unsigned long)1LL << 28) - 1)
+#if 0 || defined(HAVE_GETHRTIME)
+#define ERTS_LCNT_HISTOGRAM_SLOT_SIZE (30)
+#define ERTS_LCNT_HISTOGRAM_RSHIFT (0)
+#else
+#define ERTS_LCNT_HISTOGRAM_SLOT_SIZE (20)
+#define ERTS_LCNT_HISTOGRAM_RSHIFT (10)
+#endif
#define ERTS_LCNT_LT_SPINLOCK (((Uint16) 1) << 0)
#define ERTS_LCNT_LT_RWSPINLOCK (((Uint16) 1) << 1)
@@ -104,6 +117,10 @@ typedef struct {
extern erts_lcnt_time_t timer_start;
+typedef struct {
+ Uint32 ns[ERTS_LCNT_HISTOGRAM_SLOT_SIZE]; /* log2 array of nano seconds occurences */
+} erts_lcnt_hist_t;
+
typedef struct erts_lcnt_lock_stats_s {
/* "tries" and "colls" needs to be atomic since
* trylock busy does not aquire a lock and there
@@ -118,6 +135,7 @@ typedef struct erts_lcnt_lock_stats_s {
unsigned long timer_n; /* #times waited for lock */
erts_lcnt_time_t timer; /* total wait time for lock */
+ erts_lcnt_hist_t hist;
} erts_lcnt_lock_stats_t;
/* rw locks uses both states, other locks only uses w_state */
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index 59a677a12c..8870fac7d9 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -415,7 +415,13 @@ erts_queue_dist_message(Process *rcvr,
if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ))
erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ);
- erts_proc_notify_new_message(rcvr);
+ erts_proc_notify_new_message(rcvr,
+#ifdef ERTS_SMP
+ *rcvr_locks
+#else
+ 0
+#endif
+ );
}
}
@@ -542,7 +548,13 @@ queue_message(Process *c_p,
if (locked_msgq)
erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
- erts_proc_notify_new_message(receiver);
+ erts_proc_notify_new_message(receiver,
+#ifdef ERTS_SMP
+ *receiver_locks
+#else
+ 0
+#endif
+ );
#ifndef ERTS_SMP
ERTS_HOLE_CHECK(receiver);
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index ff551ea3af..ede5f335dc 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -472,6 +472,18 @@ int enif_inspect_binary(ErlNifEnv* env, Eterm bin_term, ErlNifBinary* bin)
struct enif_tmp_obj_t* tmp;
byte* raw_ptr;
}u;
+
+ if (is_boxed(bin_term) && *binary_val(bin_term) == HEADER_SUB_BIN) {
+ ErlSubBin* sb = (ErlSubBin*) binary_val(bin_term);
+ if (sb->is_writable) {
+ ProcBin* pb = (ProcBin*) binary_val(sb->orig);
+ ASSERT(pb->thing_word == HEADER_PROC_BIN);
+ if (pb->flags) {
+ erts_emasculate_writable_binary(pb);
+ sb->is_writable = 0;
+ }
+ }
+ }
u.tmp = NULL;
bin->data = erts_get_aligned_binary_bytes_extra(bin_term, &u.raw_ptr, allocator,
sizeof(struct enif_tmp_obj_t));
@@ -1513,72 +1525,263 @@ int enif_consume_timeslice(ErlNifEnv* env, int percent)
return ERTS_BIF_REDS_LEFT(env->proc) == 0;
}
-#ifdef ERTS_DIRTY_SCHEDULERS
-
-/* NIFs exports need one more item than the Export struct provides, the
- * erl_module_nif*, so the DirtyNifExport below adds that. The Export
- * member must be first in the struct.
+/*
+ * NIF exports need a few more items than the Export struct provides,
+ * including the erl_module_nif* and a NIF function pointer, so the
+ * NifExport below adds those. The Export member must be first in the
+ * struct. The saved_mfa, saved_argc, nif_level, alloced_argv_sz and argv
+ * members are used to track the MFA and arguments of the top NIF in case a
+ * chain of one or more enif_schedule_nif() calls results in an exception,
+ * since in that case the original MFA and registers have to be restored
+ * before returning to Erlang to ensure stacktrace information associated
+ * with the exception is correct.
*/
+typedef ERL_NIF_TERM (*NativeFunPtr)(ErlNifEnv*, int, const ERL_NIF_TERM[]);
+
typedef struct {
Export exp;
struct erl_module_nif* m;
-} DirtyNifExport;
+ NativeFunPtr fp;
+ Eterm saved_mfa[3];
+ int saved_argc;
+ int alloced_argv_sz;
+ Eterm argv[1];
+} NifExport;
-static void
-alloc_proc_psd(Process* proc, DirtyNifExport **ep)
+/*
+ * If a process has saved arguments, they need to be part of the GC
+ * rootset. The function below is called from setup_rootset() in
+ * erl_gc.c. This function is declared in erl_process.h.
+ */
+int
+erts_setup_nif_gc(Process* proc, Eterm** objv, int* nobj)
{
+ NifExport* ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ int gc = (ep && ep->saved_argc > 0);
+
+ if (gc) {
+ *objv = ep->argv;
+ *nobj = ep->saved_argc;
+ }
+ return gc;
+}
+
+/*
+ * Allocate a NifExport and set it in proc specific data
+ */
+static NifExport*
+allocate_nif_sched_data(Process* proc, int argc)
+{
+ NifExport* ep;
+ size_t argv_extra, total;
int i;
- if (!*ep) {
- *ep = erts_alloc(ERTS_ALC_T_PSD, sizeof(DirtyNifExport));
- sys_memset((void*) *ep, 0, sizeof(DirtyNifExport));
- for (i=0; i<ERTS_NUM_CODE_IX; i++) {
- (*ep)->exp.addressv[i] = &(*ep)->exp.code[3];
- }
- (*ep)->exp.code[3] = (BeamInstr) em_call_nif;
+
+ argv_extra = argc > 1 ? sizeof(Eterm)*(argc-1) : 0;
+ total = sizeof(NifExport) + argv_extra;
+ ep = erts_alloc(ERTS_ALC_T_NIF_TRAP_EXPORT, total);
+ sys_memset((void*) ep, 0, total);
+ ep->alloced_argv_sz = argc;
+ for (i=0; i<ERTS_NUM_CODE_IX; i++) {
+ ep->exp.addressv[i] = &ep->exp.code[3];
}
- (void) ERTS_PROC_SET_DIRTY_SCHED_TRAP_EXPORT(proc, ERTS_PROC_LOCK_MAIN, &(*ep)->exp);
+ ep->exp.code[3] = (BeamInstr) em_call_nif;
+ (void) ERTS_PROC_SET_NIF_TRAP_EXPORT(proc, ERTS_PROC_LOCK_MAIN, ep);
+ return ep;
+}
+
+static ERTS_INLINE void
+destroy_nif_export(NifExport *nif_export)
+{
+ erts_free(ERTS_ALC_T_NIF_TRAP_EXPORT, (void *) nif_export);
}
+void
+erts_destroy_nif_export(void *nif_export)
+{
+ destroy_nif_export((NifExport *) nif_export);
+}
+
+/*
+ * Initialize a NifExport struct. Create it if needed and store it in the
+ * proc. The direct_fp function is what will be invoked by op_call_nif, and
+ * the indirect_fp function, if not NULL, is what the direct_fp function
+ * will call. If the allocated NifExport isn't enough to hold all of argv,
+ * allocate a larger one. Save MFA and registers only if the need_save
+ * parameter is true.
+ */
static ERL_NIF_TERM
-execute_dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+init_nif_sched_data(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp,
+ int need_save, int argc, const ERL_NIF_TERM argv[])
{
- Eterm* reg = ERTS_PROC_GET_SCHDATA(env->proc)->x_reg_array;
- ERL_NIF_TERM result, dirty_result = (ERL_NIF_TERM) reg[0];
- typedef ERL_NIF_TERM (*FinalizerFP)(ErlNifEnv*, ERL_NIF_TERM);
- FinalizerFP fp;
-#if HAVE_INT64 && SIZEOF_LONG != 8
- ASSERT(sizeof(fp) <= sizeof(ErlNifUInt64));
- enif_get_uint64(env, reg[1], (ErlNifUInt64 *) &fp);
-#else
- ASSERT(sizeof(fp) <= sizeof(unsigned long));
- enif_get_ulong(env, reg[1], (unsigned long *) &fp);
-#endif
- result = (*fp)(env, dirty_result);
- if (erts_refc_dectest(&env->mod_nif->rt_dtor_cnt, 0) == 0
- && env->mod_nif->mod == NULL)
- close_lib(env->mod_nif);
- return result;
+ Process* proc = env->proc;
+ Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array;
+ NifExport* ep;
+ int i;
+
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ if (!ep)
+ ep = allocate_nif_sched_data(proc, argc);
+ else if (need_save && ep->alloced_argv_sz < argc) {
+ NifExport* new_ep = allocate_nif_sched_data(proc, argc);
+ destroy_nif_export(ep);
+ ep = new_ep;
+ }
+ ERTS_VBUMP_ALL_REDS(proc);
+ for (i = 0; i < argc; i++) {
+ if (need_save)
+ ep->argv[i] = reg[i];
+ reg[i] = (Eterm) argv[i];
+ }
+ if (need_save) {
+ ep->saved_mfa[0] = proc->current[0];
+ ep->saved_mfa[1] = proc->current[1];
+ ep->saved_mfa[2] = proc->current[2];
+ ep->saved_argc = argc;
+ }
+ proc->i = (BeamInstr*) ep->exp.addressv[0];
+ ep->exp.code[0] = (BeamInstr) proc->current[0];
+ ep->exp.code[1] = (BeamInstr) proc->current[1];
+ ep->exp.code[2] = argc;
+ ep->exp.code[4] = (BeamInstr) direct_fp;
+ ep->m = env->mod_nif;
+ ep->fp = indirect_fp;
+ proc->freason = TRAP;
+ return THE_NON_VALUE;
}
-#endif /* ERTS_DIRTY_SCHEDULERS */
+/*
+ * Restore saved MFA and registers. Registers are restored only when the
+ * exception flag is true.
+ */
+static void
+restore_nif_mfa(Process* proc, NifExport* ep, int exception)
+{
+ int i;
+ Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array;
-#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+ proc->current[0] = ep->saved_mfa[0];
+ proc->current[1] = ep->saved_mfa[1];
+ proc->current[2] = ep->saved_mfa[2];
+ if (exception)
+ for (i = 0; i < ep->saved_argc; i++)
+ reg[i] = ep->argv[i];
+ ep->saved_argc = 0;
+ ep->saved_mfa[0] = THE_NON_VALUE;
+}
-ERL_NIF_TERM
-enif_schedule_dirty_nif(ErlNifEnv* env, int flags,
- ERL_NIF_TERM (*fp)(ErlNifEnv*, int, const ERL_NIF_TERM[]),
- int argc, const ERL_NIF_TERM argv[])
+#ifdef ERTS_DIRTY_SCHEDULERS
+
+/*
+ * Finalize a dirty NIF call. This function is scheduled to cause the VM to
+ * switch the process off a dirty scheduler thread and back onto a regular
+ * scheduler thread, and then return the result from the dirty NIF. It also
+ * restores the original NIF MFA when necessary based on the value of
+ * ep->fp set by execute_dirty_nif via init_nif_sched_data -- non-NULL
+ * means restore, NULL means do not restore.
+ */
+static ERL_NIF_TERM
+dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ Process* proc = env->proc;
+ NifExport* ep;
+
+ ASSERT(argc == 1);
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data));
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ if (ep->fp)
+ restore_nif_mfa(proc, ep, 0);
+ return argv[0];
+}
+
+/* Finalize a dirty NIF call that raised an exception. Otherwise same as
+ * the dirty_nif_finalizer() function.
+ */
+static ERL_NIF_TERM
+dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ Process* proc = env->proc;
+ NifExport* ep;
+
+ ASSERT(!ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data));
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ if (ep->fp)
+ restore_nif_mfa(proc, ep, 1);
+ return enif_make_badarg(env);
+}
+
+/*
+ * Dirty NIF execution wrapper function. Invoke an application's dirty NIF,
+ * then check the result and schedule the appropriate finalizer function
+ * where needed. Also restore the original NIF MFA when appropriate.
+ */
+static ERL_NIF_TERM
+execute_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ Process* proc = env->proc;
+ NativeFunPtr fp = (NativeFunPtr) proc->current[6];
+ NifExport* ep;
+ ERL_NIF_TERM result;
+
+ ASSERT(ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data));
+
+ /*
+ * Set ep->fp to NULL before the native call so we know later whether it scheduled another NIF for execution
+ */
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ ep->fp = NULL;
+ result = (*fp)(env, argc, argv);
+ erts_smp_atomic32_read_band_mb(&proc->state,
+ ~(ERTS_PSFLG_DIRTY_CPU_PROC
+ |ERTS_PSFLG_DIRTY_IO_PROC
+ |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q
+ |ERTS_PSFLG_DIRTY_IO_PROC_IN_Q));
+ if (erts_refc_dectest(&env->mod_nif->rt_dtor_cnt, 0) == 0 && env->mod_nif->mod == NULL)
+ close_lib(env->mod_nif);
+ /*
+ * If no more NIFs were scheduled by the native call via
+ * enif_schedule_nif(), then ep->fp will still be NULL as set above, in
+ * which case we need to restore the original NIF calling
+ * context. Reuse fp essentially as a boolean for this, passing it to
+ * init_nif_sched_data below. Both dirty_nif_exception and
+ * dirty_nif_finalizer then check ep->fp to decide whether or not to
+ * restore the original calling context.
+ */
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ if (ep->fp)
+ fp = NULL;
+ if (is_non_value(result)) {
+ if (proc->freason != TRAP) {
+ ASSERT(proc->freason == BADARG);
+ return init_nif_sched_data(env, dirty_nif_exception, fp, 0, argc, argv);
+ } else {
+ if (ep->fp == NULL)
+ restore_nif_mfa(proc, ep, 1);
+ return result;
+ }
+ }
+ else
+ return init_nif_sched_data(env, dirty_nif_finalizer, fp, 0, 1, &result);
+}
+
+/*
+ * Dirty NIF scheduling wrapper function. Schedule a dirty NIF to execute
+ * via the execute_dirty_nif() wrapper function. The dirty scheduler thread
+ * type (CPU or I/O) is indicated in flags parameter.
+ */
+static ERTS_INLINE ERL_NIF_TERM
+schedule_dirty_nif(ErlNifEnv* env, int flags, int argc, const ERL_NIF_TERM argv[])
{
-#ifdef USE_THREADS
erts_aint32_t state, n, a;
Process* proc = env->proc;
- Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array;
- DirtyNifExport* ep = NULL;
- int i;
+ NativeFunPtr fp = (NativeFunPtr) proc->current[6];
+ NifExport* ep;
+ int need_save;
- int chkflgs = (flags & (ERL_NIF_DIRTY_JOB_IO_BOUND|ERL_NIF_DIRTY_JOB_CPU_BOUND));
- if (chkflgs != ERL_NIF_DIRTY_JOB_IO_BOUND && chkflgs != ERL_NIF_DIRTY_JOB_CPU_BOUND)
- return enif_make_badarg(env);
+ ASSERT(flags==ERL_NIF_DIRTY_JOB_IO_BOUND || flags==ERL_NIF_DIRTY_JOB_CPU_BOUND);
a = erts_smp_atomic32_read_acqb(&proc->state);
while (1) {
@@ -1590,7 +1793,7 @@ enif_schedule_dirty_nif(ErlNifEnv* env, int flags,
*/
n &= ~(ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC
|ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q);
- if (chkflgs == ERL_NIF_DIRTY_JOB_CPU_BOUND)
+ if (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND)
n |= ERTS_PSFLG_DIRTY_CPU_PROC;
else
n |= ERTS_PSFLG_DIRTY_IO_PROC;
@@ -1598,85 +1801,106 @@ enif_schedule_dirty_nif(ErlNifEnv* env, int flags,
if (a == state)
break;
}
- if (!(ep = (DirtyNifExport*) ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(proc)))
- alloc_proc_psd(proc, &ep);
- ERTS_VBUMP_ALL_REDS(proc);
- ep->exp.code[2] = argc;
- for (i = 0; i < argc; i++) {
- reg[i] = (Eterm) argv[i];
- }
- proc->i = (BeamInstr*) ep->exp.addressv[0];
- ep->exp.code[4] = (BeamInstr) fp;
- ep->m = env->mod_nif;
- proc->freason = TRAP;
-
erts_refc_inc(&env->mod_nif->rt_dtor_cnt, 1);
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ need_save = (ep == NULL || is_non_value(ep->saved_mfa[0]));
+ return init_nif_sched_data(env, execute_dirty_nif, fp, need_save, argc, argv);
+}
- return THE_NON_VALUE;
-#else
- return (*fp)(env, argc, argv);
-#endif
+static ERL_NIF_TERM
+schedule_dirty_io_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ return schedule_dirty_nif(env, ERL_NIF_DIRTY_JOB_IO_BOUND, argc, argv);
+}
+
+static ERL_NIF_TERM
+schedule_dirty_cpu_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ return schedule_dirty_nif(env, ERL_NIF_DIRTY_JOB_CPU_BOUND, argc, argv);
+}
+
+#endif /* ERTS_DIRTY_SCHEDULERS */
+
+/*
+ * NIF execution wrapper used by enif_schedule_nif() for regular NIFs. It
+ * calls the actual NIF, restores original NIF MFA if necessary, and
+ * then returns the NIF result.
+ */
+static ERL_NIF_TERM
+execute_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ Process* proc = env->proc;
+ NativeFunPtr fp = (NativeFunPtr) proc->current[6];
+ NifExport* ep;
+ ERL_NIF_TERM result;
+
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ ep->fp = NULL;
+ result = (*fp)(env, argc, argv);
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ /*
+ * If no NIFs were scheduled by the native call via
+ * enif_schedule_nif(), then ep->fp will still be NULL as set above, in
+ * which case we need to restore the original NIF MFA.
+ */
+ if (ep->fp == NULL)
+ restore_nif_mfa(proc, ep, is_non_value(result) && proc->freason != TRAP);
+ return result;
}
ERL_NIF_TERM
-enif_schedule_dirty_nif_finalizer(ErlNifEnv* env, ERL_NIF_TERM result,
- ERL_NIF_TERM (*fp)(ErlNifEnv*, ERL_NIF_TERM))
+enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags,
+ ERL_NIF_TERM (*fp)(ErlNifEnv*, int, const ERL_NIF_TERM[]),
+ int argc, const ERL_NIF_TERM argv[])
{
-#ifdef USE_THREADS
Process* proc = env->proc;
- Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array;
- DirtyNifExport* ep;
+ NifExport* ep;
+ ERL_NIF_TERM fun_name_atom, result;
+ int need_save;
- erts_smp_atomic32_read_band_mb(&proc->state,
- ~(ERTS_PSFLG_DIRTY_CPU_PROC
- |ERTS_PSFLG_DIRTY_IO_PROC
- |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q
- |ERTS_PSFLG_DIRTY_IO_PROC_IN_Q));
- if (!(ep = (DirtyNifExport*) ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(proc)))
- alloc_proc_psd(proc, &ep);
- ERTS_VBUMP_ALL_REDS(proc);
- ep->exp.code[2] = 2;
- reg[0] = (Eterm) result;
-#if HAVE_INT64 && SIZEOF_LONG != 8
- ASSERT(sizeof(fp) <= sizeof(ErlNifUInt64));
- reg[1] = (Eterm) enif_make_uint64(env, (ErlNifUInt64) fp);
-#else
- ASSERT(sizeof(fp) <= sizeof(unsigned long));
- reg[1] = (Eterm) enif_make_ulong(env, (unsigned long) fp);
-#endif
- proc->i = (BeamInstr*) ep->exp.addressv[0];
- ep->exp.code[4] = (BeamInstr) execute_dirty_nif_finalizer;
- proc->freason = TRAP;
+ if (argc > MAX_ARG)
+ return enif_make_badarg(env);
+ fun_name_atom = enif_make_atom(env, fun_name);
+ if (enif_is_exception(env, fun_name_atom))
+ return fun_name_atom;
- return THE_NON_VALUE;
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ need_save = (ep == NULL || is_non_value(ep->saved_mfa[0]));
+
+ if (flags) {
+#ifdef ERTS_DIRTY_SCHEDULERS
+ NativeFunPtr sched_fun;
+ int chkflgs = (flags & (ERL_NIF_DIRTY_JOB_IO_BOUND|ERL_NIF_DIRTY_JOB_CPU_BOUND));
+ if (chkflgs == ERL_NIF_DIRTY_JOB_IO_BOUND)
+ sched_fun = schedule_dirty_io_nif;
+ else if (chkflgs == ERL_NIF_DIRTY_JOB_CPU_BOUND)
+ sched_fun = schedule_dirty_cpu_nif;
+ else
+ return enif_make_badarg(env);
+ result = init_nif_sched_data(env, sched_fun, fp, need_save, argc, argv);
#else
- return (*fp)(env, result);
+ return enif_make_badarg(env);
#endif
-}
+ }
+ else
+ result = init_nif_sched_data(env, execute_nif, fp, need_save, argc, argv);
-/* A simple finalizer that just returns its result argument */
-ERL_NIF_TERM
-enif_dirty_nif_finalizer(ErlNifEnv* env, ERL_NIF_TERM result)
-{
+ ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc);
+ ASSERT(ep);
+ ep->exp.code[1] = (BeamInstr) fun_name_atom;
return result;
}
+#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+
int
enif_is_on_dirty_scheduler(ErlNifEnv* env)
{
return ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data);
}
-int
-enif_have_dirty_schedulers()
-{
-#ifdef USE_THREADS
- return 1;
-#else
- return 0;
-#endif
-}
-
#endif /* ERL_NIF_DIRTY_SCHEDULER_SUPPORT */
/* Maps */
@@ -1977,6 +2201,35 @@ static Eterm load_nif_error(Process* p, const char* atom, const char* format, ..
return ret;
}
+/*
+ * The function below is for looping through ErlNifFunc arrays, helping
+ * provide backwards compatibility across the version 2.7 change that added
+ * the "flags" field to ErlNifFunc.
+ */
+static ErlNifFunc* next_func(ErlNifEntry* entry, int* incrp, ErlNifFunc* func)
+{
+ ASSERT(incrp);
+ if (!*incrp) {
+ if (entry->major > 2 || (entry->major == 2 && entry->minor >= 7))
+ *incrp = sizeof(ErlNifFunc);
+ else {
+ /*
+ * ErlNifFuncV1 below is what ErlNifFunc was before the
+ * addition of the flags field for 2.7, and is needed to handle
+ * backward compatibility.
+ */
+ typedef struct {
+ const char* name;
+ unsigned arity;
+ ERL_NIF_TERM (*fptr)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+ }ErlNifFuncV1;
+ *incrp = sizeof(ErlNifFuncV1);
+ }
+ }
+ return (ErlNifFunc*) ((char*)func + *incrp);
+}
+
+
BIF_RETTYPE load_nif_2(BIF_ALIST_2)
{
static const char bad_lib[] = "bad_lib";
@@ -2086,22 +2339,48 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
}
else {
/*erts_fprintf(stderr, "Found module %T\r\n", mod_atom);*/
-
+
+ int maybe_dirty_nifs = ((entry->major > 2 || (entry->major == 2 && entry->minor >= 7))
+ && (entry->options & ERL_NIF_DIRTY_NIF_OPTION));
+ int incr = 0;
+ ErlNifFunc* f = entry->funcs;
for (i=0; i < entry->num_of_funcs && ret==am_ok; i++) {
BeamInstr** code_pp;
- ErlNifFunc* f = &entry->funcs[i];
if (!erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1)
|| (code_pp = get_func_pp(mod->curr.code, f_atom, f->arity))==NULL) {
ret = load_nif_error(BIF_P,bad_lib,"Function not found %T:%s/%u",
mod_atom, f->name, f->arity);
- }
- else if (code_pp[1] - code_pp[0] < (5+3)) {
+ }
+ else if (maybe_dirty_nifs && f->flags) {
+ /*
+ * If the flags field is non-zero and this emulator was
+ * built with dirty scheduler support, check that the flags
+ * value is legal. But if this emulator was built without
+ * dirty scheduler support, treat a non-zero flags field as
+ * a load error.
+ */
+#ifdef ERTS_DIRTY_SCHEDULERS
+ if (f->flags != ERL_NIF_DIRTY_JOB_IO_BOUND && f->flags != ERL_NIF_DIRTY_JOB_CPU_BOUND)
+ ret = load_nif_error(BIF_P, bad_lib, "Illegal flags field value %d for NIF %T:%s/%u",
+ f->flags, mod_atom, f->name, f->arity);
+#else
+ ret = load_nif_error(BIF_P, bad_lib, "NIF %T:%s/%u requires a runtime with dirty scheduler support.",
+ mod_atom, f->name, f->arity);
+#endif
+ }
+#ifdef ERTS_DIRTY_SCHEDULERS
+ else if (code_pp[1] - code_pp[0] < (5+4))
+#else
+ else if (code_pp[1] - code_pp[0] < (5+3))
+#endif
+ {
ret = load_nif_error(BIF_P,bad_lib,"No explicit call to load_nif"
- " in module (%T:%s/%u to small)",
- mod_atom, entry->funcs[i].name, entry->funcs[i].arity);
+ " in module (%T:%s/%u too small)",
+ mod_atom, f->name, f->arity);
}
/*erts_fprintf(stderr, "Found NIF %T:%s/%u\r\n",
- mod_atom, entry->funcs[i].name, entry->funcs[i].arity);*/
+ mod_atom, f->name, f->arity);*/
+ f = next_func(entry, &incr, f);
}
}
@@ -2127,7 +2406,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
* is deprecated and was only ment as a development feature not to
* be used in production systems. (See warning below)
*/
- int k;
+ int k, old_incr = 0;
+ ErlNifFunc* old_func;
lib->priv_data = mod->curr.nif->priv_data;
ASSERT(mod->curr.nif->entry != NULL);
@@ -2136,13 +2416,16 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
goto error;
}
/* Check that no NIF is removed */
+ old_func = mod->curr.nif->entry->funcs;
for (k=0; k < mod->curr.nif->entry->num_of_funcs; k++) {
- ErlNifFunc* old_func = &mod->curr.nif->entry->funcs[k];
+ int incr = 0;
+ ErlNifFunc* f = entry->funcs;
for (i=0; i < entry->num_of_funcs; i++) {
- if (old_func->arity == entry->funcs[i].arity
- && sys_strcmp(old_func->name, entry->funcs[i].name) == 0) {
+ if (old_func->arity == f->arity
+ && sys_strcmp(old_func->name, f->name) == 0) {
break;
}
+ f = next_func(entry, &incr, f);
}
if (i == entry->num_of_funcs) {
ret = load_nif_error(BIF_P,reload,"Reloaded library missing "
@@ -2150,7 +2433,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
old_func->name, old_func->arity);
goto error;
}
- }
+ old_func = next_func(mod->curr.nif->entry, &old_incr, old_func);
+ }
erts_pre_nif(&env, BIF_P, lib);
veto = entry->reload(&env, &lib->priv_data, BIF_ARG_2);
erts_post_nif(&env);
@@ -2197,13 +2481,17 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
/*
** Everything ok, patch the beam code with op_call_nif
*/
- mod->curr.nif = lib;
+
+ int incr = 0;
+ ErlNifFunc* f = entry->funcs;
+
+ mod->curr.nif = lib;
for (i=0; i < entry->num_of_funcs; i++)
{
BeamInstr* code_ptr;
- erts_atom_get(entry->funcs[i].name, sys_strlen(entry->funcs[i].name), &f_atom, ERTS_ATOM_ENC_LATIN1);
- code_ptr = *get_func_pp(mod->curr.code, f_atom, entry->funcs[i].arity);
-
+ erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1);
+ code_ptr = *get_func_pp(mod->curr.code, f_atom, f->arity);
+
if (code_ptr[1] == 0) {
code_ptr[5+0] = (BeamInstr) BeamOp(op_call_nif);
}
@@ -2211,10 +2499,21 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2)
GenericBp* g = (GenericBp *) code_ptr[1];
ASSERT(code_ptr[5+0] ==
(BeamInstr) BeamOp(op_i_generic_breakpoint));
- g->orig_instr = (BeamInstr) BeamOp(op_call_nif);
- }
- code_ptr[5+1] = (BeamInstr) entry->funcs[i].fptr;
+ g->orig_instr = (BeamInstr) BeamOp(op_call_nif);
+ }
+ if ((entry->major > 2 || (entry->major == 2 && entry->minor >= 7))
+ && (entry->options & ERL_NIF_DIRTY_NIF_OPTION) && f->flags) {
+#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+ code_ptr[5+3] = (BeamInstr) f->fptr;
+ code_ptr[5+1] = (f->flags == ERL_NIF_DIRTY_JOB_IO_BOUND) ?
+ (BeamInstr) schedule_dirty_io_nif :
+ (BeamInstr) schedule_dirty_cpu_nif;
+#endif
+ }
+ else
+ code_ptr[5+1] = (BeamInstr) f->fptr;
code_ptr[5+2] = (BeamInstr) lib;
+ f = next_func(entry, &incr, f);
}
}
else {
diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h
index 5b93c2398e..226fc199a1 100644
--- a/erts/emulator/beam/erl_nif.h
+++ b/erts/emulator/beam/erl_nif.h
@@ -42,9 +42,13 @@
** 2.5: R17 Maps API additions
** 2.6: R17 with maps
** R17 dirty schedulers
+** 2.7: 17.3 add enif_schedule_nif
+** remove enif_schedule_dirty_nif, enif_schedule_dirty_nif_finalizer, enif_dirty_nif_finalizer
+** add ErlNifEntry options
+** add ErlNifFunc flags
*/
#define ERL_NIF_MAJOR_VERSION 2
-#define ERL_NIF_MINOR_VERSION 6
+#define ERL_NIF_MINOR_VERSION 7
/*
* The emulator will refuse to load a nif-lib with a major version
@@ -125,8 +129,10 @@ typedef struct
const char* name;
unsigned arity;
ERL_NIF_TERM (*fptr)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]);
+ unsigned flags;
}ErlNifFunc;
+
typedef struct enif_entry_t
{
int major;
@@ -139,8 +145,11 @@ typedef struct enif_entry_t
int (*upgrade)(ErlNifEnv*, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info);
void (*unload) (ErlNifEnv*, void* priv_data);
const char* vm_variant;
+ unsigned options;
}ErlNifEntry;
+/* Field bits for ErlNifEntry options */
+#define ERL_NIF_DIRTY_NIF_OPTION 1
typedef struct
@@ -232,10 +241,21 @@ extern TWinDynNifCallbacks WinDynNifCallbacks;
# else
# define ERL_NIF_INIT_DECL(MODNAME) __declspec(dllexport) ErlNifEntry* nif_init(TWinDynNifCallbacks* callbacks)
# endif
-# define ERL_NIF_INIT_BODY memcpy(&WinDynNifCallbacks,callbacks,sizeof(TWinDynNifCallbacks))
+# ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+# define ERL_NIF_INIT_BODY do { \
+ memcpy(&WinDynNifCallbacks,callbacks,sizeof(TWinDynNifCallbacks)); \
+ entry.options = ERL_NIF_DIRTY_NIF_OPTION; \
+ } while(0)
+# else
+# define ERL_NIF_INIT_BODY memcpy(&WinDynNifCallbacks,callbacks,sizeof(TWinDynNifCallbacks))
+# endif
#else
# define ERL_NIF_INIT_GLOB
-# define ERL_NIF_INIT_BODY
+# ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+# define ERL_NIF_INIT_BODY entry.options = ERL_NIF_DIRTY_NIF_OPTION
+# else
+# define ERL_NIF_INIT_BODY
+# endif
# ifdef STATIC_ERLANG_NIF
# define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* MODNAME ## _nif_init(void)
# else
diff --git a/erts/emulator/beam/erl_nif_api_funcs.h b/erts/emulator/beam/erl_nif_api_funcs.h
index d7c554e60b..630cefae93 100644
--- a/erts/emulator/beam/erl_nif_api_funcs.h
+++ b/erts/emulator/beam/erl_nif_api_funcs.h
@@ -22,7 +22,7 @@
#endif
/*
-** WARNING: add new ERL_NIF_API_FUNC_DECL entries at the bottom of the list
+** WARNING: Add new ERL_NIF_API_FUNC_DECL entries at the bottom of the list
** to keep compatibility on Windows!!!
**
** And don't forget to increase ERL_NIF_MINOR_VERSION in erl_nif.h
@@ -141,14 +141,6 @@ ERL_NIF_API_FUNC_DECL(int,enif_is_number,(ErlNifEnv*, ERL_NIF_TERM term));
ERL_NIF_API_FUNC_DECL(void*,enif_dlopen,(const char* lib, void (*err_handler)(void*,const char*), void* err_arg));
ERL_NIF_API_FUNC_DECL(void*,enif_dlsym,(void* handle, const char* symbol, void (*err_handler)(void*,const char*), void* err_arg));
ERL_NIF_API_FUNC_DECL(int,enif_consume_timeslice,(ErlNifEnv*, int percent));
-#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
-ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_schedule_dirty_nif,(ErlNifEnv*,int,ERL_NIF_TERM (*)(ErlNifEnv*,int,const ERL_NIF_TERM[]),int,const ERL_NIF_TERM[]));
-ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_schedule_dirty_nif_finalizer,(ErlNifEnv*,ERL_NIF_TERM,ERL_NIF_TERM (*)(ErlNifEnv*,ERL_NIF_TERM)));
-ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_dirty_nif_finalizer,(ErlNifEnv*,ERL_NIF_TERM));
-ERL_NIF_API_FUNC_DECL(int,enif_is_on_dirty_scheduler,(ErlNifEnv*));
-ERL_NIF_API_FUNC_DECL(int,enif_have_dirty_schedulers,(void));
-#endif
-
ERL_NIF_API_FUNC_DECL(int, enif_is_map, (ErlNifEnv* env, ERL_NIF_TERM term));
ERL_NIF_API_FUNC_DECL(int, enif_get_map_size, (ErlNifEnv* env, ERL_NIF_TERM term, size_t *size));
ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM, enif_make_new_map, (ErlNifEnv* env));
@@ -163,12 +155,22 @@ ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_is_tail, (ErlNifEnv *env, ErlNifMap
ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_next, (ErlNifEnv *env, ErlNifMapIterator *iter));
ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_prev, (ErlNifEnv *env, ErlNifMapIterator *iter));
ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_get_pair, (ErlNifEnv *env, ErlNifMapIterator *iter, ERL_NIF_TERM *key, ERL_NIF_TERM *value));
-
+ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_schedule_nif,(ErlNifEnv*,const char*,int,ERL_NIF_TERM (*)(ErlNifEnv*,int,const ERL_NIF_TERM[]),int,const ERL_NIF_TERM[]));
/*
-** Add new entries here to keep compatibility on Windows!!!
+** ADD NEW ENTRIES HERE (before this comment) !!!
*/
+
+
+/*
+ * Conditional EXPERIMENTAL stuff always last.
+ * Must be moved up and made unconditional to support binary backward
+ * compatibility on Windows.
+ */
+#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+ERL_NIF_API_FUNC_DECL(int,enif_is_on_dirty_scheduler,(ErlNifEnv*));
#endif
+#endif /* ERL_NIF_API_FUNC_DECL */
/*
** Please keep the ERL_NIF_API_FUNC_MACRO list below in the same order
@@ -282,21 +284,12 @@ ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_get_pair, (ErlNifEnv *env, ErlNifMa
# define enif_make_int64 ERL_NIF_API_FUNC_MACRO(enif_make_int64)
# define enif_make_uint64 ERL_NIF_API_FUNC_MACRO(enif_make_uint64)
#endif
-
# define enif_is_exception ERL_NIF_API_FUNC_MACRO(enif_is_exception)
# define enif_make_reverse_list ERL_NIF_API_FUNC_MACRO(enif_make_reverse_list)
# define enif_is_number ERL_NIF_API_FUNC_MACRO(enif_is_number)
# define enif_dlopen ERL_NIF_API_FUNC_MACRO(enif_dlopen)
# define enif_dlsym ERL_NIF_API_FUNC_MACRO(enif_dlsym)
# define enif_consume_timeslice ERL_NIF_API_FUNC_MACRO(enif_consume_timeslice)
-#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
-# define enif_schedule_dirty_nif ERL_NIF_API_FUNC_MACRO(enif_schedule_dirty_nif)
-# define enif_schedule_dirty_nif_finalizer ERL_NIF_API_FUNC_MACRO(enif_schedule_dirty_nif_finalizer)
-# define enif_dirty_nif_finalizer ERL_NIF_API_FUNC_MACRO(enif_dirty_nif_finalizer)
-# define enif_is_on_dirty_scheduler ERL_NIF_API_FUNC_MACRO(enif_is_on_dirty_scheduler)
-# define enif_have_dirty_schedulers ERL_NIF_API_FUNC_MACRO(enif_have_dirty_schedulers)
-#endif
-
# define enif_is_map ERL_NIF_API_FUNC_MACRO(enif_is_map)
# define enif_get_map_size ERL_NIF_API_FUNC_MACRO(enif_get_map_size)
# define enif_make_new_map ERL_NIF_API_FUNC_MACRO(enif_make_new_map)
@@ -311,11 +304,21 @@ ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_get_pair, (ErlNifEnv *env, ErlNifMa
# define enif_map_iterator_next ERL_NIF_API_FUNC_MACRO(enif_map_iterator_next)
# define enif_map_iterator_prev ERL_NIF_API_FUNC_MACRO(enif_map_iterator_prev)
# define enif_map_iterator_get_pair ERL_NIF_API_FUNC_MACRO(enif_map_iterator_get_pair)
+# define enif_schedule_nif ERL_NIF_API_FUNC_MACRO(enif_schedule_nif)
/*
-** Add new entries here
+** ADD NEW ENTRIES HERE (before this comment)
*/
+
+/*
+ * Conditional EXPERIMENTAL stuff always last
+ * Must be moved up and made unconditional to support binary backward
+ * compatibility on Windows.
+ */
+#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+# define enif_is_on_dirty_scheduler ERL_NIF_API_FUNC_MACRO(enif_is_on_dirty_scheduler)
#endif
+#endif /* ERL_NIF_API_FUNC_MACRO */
#if defined(__GNUC__) && !(defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_))
diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c
index 31d9a1e26e..682f6f8f4b 100644
--- a/erts/emulator/beam/erl_port_task.c
+++ b/erts/emulator/beam/erl_port_task.c
@@ -68,6 +68,13 @@ static void chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_q
#define DTRACE_DRIVER(PROBE_NAME, PP) do {} while(0)
#endif
+#define ERTS_SMP_LC_VERIFY_RQ(RQ, PP) \
+ do { \
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); \
+ ERTS_SMP_LC_ASSERT((RQ) == ((ErtsRunQueue *) \
+ erts_smp_atomic_read_nob(&(PP)->run_queue))); \
+ } while (0)
+
erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
#define ERTS_PT_STATE_SCHEDULED 0
@@ -798,12 +805,13 @@ schedule_port_task_handle_list_free(ErtsPortTaskHandleList *pthlp)
static ERTS_INLINE void
abort_nosuspend_task(Port *pp,
ErtsPortTaskType type,
- ErtsPortTaskTypeData *tdp)
+ ErtsPortTaskTypeData *tdp,
+ int bpq_data)
{
ASSERT(type == ERTS_PORT_TASK_PROC_SIG);
- if (!pp->sched.taskq.bpq)
+ if (!bpq_data)
tdp->psig.callback(NULL,
ERTS_PORT_SFLG_INVALID,
ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND,
@@ -991,6 +999,7 @@ static ERTS_INLINE int
finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q)
{
erts_aint32_t act;
+ unsigned int prof_runnable_ports;
if (!processing_busy_q)
pp->sched.taskq.local.first = *execq;
@@ -1007,6 +1016,10 @@ finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q)
if (act & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q)
act = check_unset_busy_port_q(pp, act, pp->sched.taskq.bpq);
+ prof_runnable_ports = erts_system_profile_flags.runnable_ports;
+ if (prof_runnable_ports)
+ erts_port_task_sched_lock(&pp->sched);
+
while (1) {
erts_aint32_t new, exp;
@@ -1018,12 +1031,24 @@ finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q)
act = erts_smp_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp);
- ASSERT(!(act & ERTS_PTS_FLG_IN_RUNQ));
+ ERTS_LC_ASSERT(!(act & ERTS_PTS_FLG_IN_RUNQ));
+ ERTS_LC_ASSERT(!(act & ERTS_PTS_FLG_EXEC_IMM));
if (exp == act)
break;
}
+ if (prof_runnable_ports | IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
+ /* trace port scheduling, out */
+ if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS))
+ trace_sched_ports(pp, am_out);
+ if (prof_runnable_ports) {
+ if (!(act & (ERTS_PTS_FLG_EXEC_IMM|ERTS_PTS_FLG_HAVE_TASKS)))
+ profile_runnable_port(pp, am_inactive);
+ erts_port_task_sched_unlock(&pp->sched);
+ }
+ }
+
return (act & ERTS_PTS_FLG_HAVE_TASKS) != 0;
}
@@ -1345,7 +1370,7 @@ erts_port_task_abort_nosuspend_tasks(Port *pp)
#endif
schedule_port_task_handle_list_free(pthlp);
- abort_nosuspend_task(pp, type, &td);
+ abort_nosuspend_task(pp, type, &td, pp->sched.taskq.bpq != NULL);
}
}
@@ -1369,6 +1394,7 @@ erts_port_task_schedule(Eterm id,
Port *pp;
ErtsPortTask *ptp = NULL;
erts_aint32_t act, add_flags;
+ unsigned int prof_runnable_ports;
if (pthp && erts_port_task_is_scheduled(pthp)) {
ASSERT(0);
@@ -1457,6 +1483,10 @@ erts_port_task_schedule(Eterm id,
if (ns_pthlp)
add_flags |= ERTS_PTS_FLG_HAVE_NS_TASKS;
+ prof_runnable_ports = erts_system_profile_flags.runnable_ports;
+ if (prof_runnable_ports)
+ erts_port_task_sched_lock(&pp->sched);
+
while (1) {
erts_aint32_t new, exp;
@@ -1481,6 +1511,13 @@ erts_port_task_schedule(Eterm id,
goto done; /* Died after our task insert... */
}
+ if (prof_runnable_ports) {
+ if (!(act & ERTS_PTS_FLG_EXEC_IMM))
+ profile_runnable_port(pp, am_active);
+ erts_port_task_sched_unlock(&pp->sched);
+ prof_runnable_ports = 0;
+ }
+
/* Enqueue port on run-queue */
runq = erts_port_runq(pp);
@@ -1489,8 +1526,10 @@ erts_port_task_schedule(Eterm id,
#ifdef ERTS_SMP
xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
+ ERTS_SMP_LC_ASSERT(runq != xrunq);
+ ERTS_SMP_LC_VERIFY_RQ(runq, pp);
if (xrunq) {
- /* Port emigrated ... */
+ /* Emigrate port ... */
erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
erts_smp_runq_unlock(runq);
runq = erts_port_runq(pp);
@@ -1500,10 +1539,6 @@ erts_port_task_schedule(Eterm id,
#endif
enqueue_port(runq, pp);
-
- if (erts_system_profile_flags.runnable_ports) {
- profile_runnable_port(pp, am_active);
- }
erts_smp_runq_unlock(runq);
@@ -1511,6 +1546,9 @@ erts_port_task_schedule(Eterm id,
done:
+ if (prof_runnable_ports)
+ erts_port_task_sched_unlock(&pp->sched);
+
#ifdef ERTS_SMP
if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED)
erts_port_dec_refc(pp);
@@ -1525,7 +1563,7 @@ abort_nosuspend:
erts_port_dec_refc(pp);
#endif
- abort_nosuspend_task(pp, ptp->type, &ptp->u.alive.td);
+ abort_nosuspend_task(pp, ptp->type, &ptp->u.alive.td, 0);
ASSERT(ns_pthlp);
erts_free(ERTS_ALC_T_PT_HNDL_LIST, ns_pthlp);
@@ -1609,6 +1647,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
goto done;
}
+ ERTS_SMP_LC_VERIFY_RQ(runq, pp);
+
erts_smp_runq_unlock(runq);
*curr_port_pp = pp;
@@ -1765,10 +1805,6 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
erts_unblock_fpe(fpe_was_unmasked);
- /* trace port scheduling, out */
- if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
- trace_sched_ports(pp, am_out);
- }
if (io_tasks_executed) {
ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
@@ -1791,11 +1827,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
erts_smp_runq_lock(runq);
- if (!active) {
- if (erts_system_profile_flags.runnable_ports)
- profile_runnable_port(pp, am_inactive);
- }
- else {
+ if (active) {
#ifdef ERTS_SMP
ErtsRunQueue *xrunq;
#endif
@@ -1804,6 +1836,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
#ifdef ERTS_SMP
xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
+ ERTS_SMP_LC_ASSERT(runq != xrunq);
+ ERTS_SMP_LC_VERIFY_RQ(runq, pp);
if (!xrunq) {
#endif
enqueue_port(runq, pp);
@@ -1811,7 +1845,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
#ifdef ERTS_SMP
}
else {
- /* Port emigrated ... */
+ /* Emigrate port... */
erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
erts_smp_runq_unlock(runq);
diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h
index 1d30465ec9..9ef0cfcedc 100644
--- a/erts/emulator/beam/erl_port_task.h
+++ b/erts/emulator/beam/erl_port_task.h
@@ -78,6 +78,7 @@ extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
#define ERTS_PTS_FLG_PARALLELISM (((erts_aint32_t) 1) << 9)
#define ERTS_PTS_FLG_FORCE_SCHED (((erts_aint32_t) 1) << 10)
#define ERTS_PTS_FLG_EXITING (((erts_aint32_t) 1) << 11)
+#define ERTS_PTS_FLG_EXEC_IMM (((erts_aint32_t) 1) << 12)
#define ERTS_PTS_FLGS_BUSY \
(ERTS_PTS_FLG_BUSY_PORT | ERTS_PTS_FLG_BUSY_PORT_Q)
@@ -87,6 +88,7 @@ extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks;
| ERTS_PTS_FLG_HAVE_BUSY_TASKS \
| ERTS_PTS_FLG_HAVE_TASKS \
| ERTS_PTS_FLG_EXEC \
+ | ERTS_PTS_FLG_EXEC_IMM \
| ERTS_PTS_FLG_FORCE_SCHED \
| ERTS_PTS_FLG_EXITING)
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index b73f9b7f92..20a88ec581 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -590,12 +590,10 @@ erts_pre_init_process(void)
erts_psd_required_locks[ERTS_PSD_DELAYED_GC_TASK_QS].set_locks
= ERTS_PSD_DELAYED_GC_TASK_QS_SET_LOCKS;
-#ifdef ERTS_DIRTY_SCHEDULERS
- erts_psd_required_locks[ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT].get_locks
- = ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_GET_LOCKS;
- erts_psd_required_locks[ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT].set_locks
- = ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_SET_LOCKS;
-#endif
+ erts_psd_required_locks[ERTS_PSD_NIF_TRAP_EXPORT].get_locks
+ = ERTS_PSD_NIF_TRAP_EXPORT_GET_LOCKS;
+ erts_psd_required_locks[ERTS_PSD_NIF_TRAP_EXPORT].set_locks
+ = ERTS_PSD_NIF_TRAP_EXPORT_SET_LOCKS;
/* Check that we have locks for all entries */
for (ix = 0; ix < ERTS_PSD_SIZE; ix++) {
@@ -2211,6 +2209,9 @@ aux_work_timeout_early_init(int no_schedulers)
p = (UWord) malloc((sizeof(ErtsAuxWorkTmo)
+ sizeof(erts_atomic32_t)*(no_schedulers+1))
+ ERTS_CACHE_LINE_SIZE-1);
+ if (!p) {
+ ERTS_INTERNAL_ERROR("malloc failed to allocate memory!");
+ }
if (p & ERTS_CACHE_LINE_MASK)
p = (p & ~ERTS_CACHE_LINE_MASK) + ERTS_CACHE_LINE_SIZE;
ASSERT((p & ERTS_CACHE_LINE_MASK) == 0);
@@ -3755,17 +3756,25 @@ evacuate_run_queue(ErtsRunQueue *rq,
}
#ifdef ERTS_DIRTY_SCHEDULERS
else if (state & ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q) {
- erts_aint32_t old;
- old = erts_smp_atomic32_read_band_nob(&proc->state,
- ~(ERTS_PSFLG_DIRTY_CPU_PROC
- | ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q));
+#ifdef DEBUG
+ erts_aint32_t old =
+#else
+ (void)
+#endif
+ erts_smp_atomic32_read_band_nob(&proc->state,
+ ~(ERTS_PSFLG_DIRTY_CPU_PROC
+ | ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q));
/* assert that no other dirty flags are set */
ASSERT(!(old & (ERTS_PSFLG_DIRTY_IO_PROC|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q)));
} else if (state & ERTS_PSFLG_DIRTY_IO_PROC_IN_Q) {
- erts_aint32_t old;
- old = erts_smp_atomic32_read_band_nob(&proc->state,
- ~(ERTS_PSFLG_DIRTY_IO_PROC
- | ERTS_PSFLG_DIRTY_IO_PROC_IN_Q));
+#ifdef DEBUG
+ erts_aint32_t old =
+#else
+ (void)
+#endif
+ erts_smp_atomic32_read_band_nob(&proc->state,
+ ~(ERTS_PSFLG_DIRTY_IO_PROC
+ | ERTS_PSFLG_DIRTY_IO_PROC_IN_Q));
/* assert that no other dirty flags are set */
ASSERT(!(old & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q)));
}
@@ -5874,6 +5883,9 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Proces
case ERTS_ENQUEUE_NOT:
if (erts_system_profile_flags.runnable_procs) {
+ /* Status lock prevents out of order "runnable proc" trace msgs */
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+
if (!(a & ERTS_PSFLG_ACTIVE_SYS)
&& (!(a & ERTS_PSFLG_ACTIVE)
|| (a & ERTS_PSFLG_SUSPENDED))) {
@@ -5987,7 +5999,8 @@ change_proc_schedule_state(Process *p,
erts_aint32_t clear_state_flags,
erts_aint32_t set_state_flags,
erts_aint32_t *statep,
- erts_aint32_t *enq_prio_p)
+ erts_aint32_t *enq_prio_p,
+ ErtsProcLocks locks)
{
/*
* NOTE: ERTS_PSFLG_RUNNING, ERTS_PSFLG_RUNNING_SYS and
@@ -5996,6 +6009,11 @@ change_proc_schedule_state(Process *p,
*/
erts_aint32_t a = *statep, n;
int enqueue; /* < 0 -> use proxy */
+ unsigned int prof_runnable_procs = erts_system_profile_flags.runnable_procs;
+ unsigned int lock_status = (prof_runnable_procs
+ && !(locks & ERTS_PROC_LOCK_STATUS));
+
+ ERTS_SMP_LC_ASSERT(locks == erts_proc_lc_my_proc_locks(p));
ASSERT(!(a & ERTS_PSFLG_PROXY));
ASSERT((clear_state_flags & (ERTS_PSFLG_RUNNING
@@ -6005,6 +6023,9 @@ change_proc_schedule_state(Process *p,
| ERTS_PSFLG_RUNNING_SYS
| ERTS_PSFLG_ACTIVE_SYS)) == 0);
+ if (lock_status)
+ erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS);
+
while (1) {
erts_aint32_t e;
n = e = a;
@@ -6040,7 +6061,9 @@ change_proc_schedule_state(Process *p,
break;
}
- if (erts_system_profile_flags.runnable_procs) {
+ if (prof_runnable_procs) {
+
+ /* Status lock prevents out of order "runnable proc" trace msgs */
if (((n & (ERTS_PSFLG_SUSPENDED
| ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE)
@@ -6053,15 +6076,18 @@ change_proc_schedule_state(Process *p,
profile_runnable_proc(p, am_active);
}
+ if (lock_status)
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
}
+
*statep = a;
return enqueue;
}
static ERTS_INLINE void
-schedule_process(Process *p, erts_aint32_t in_state)
+schedule_process(Process *p, erts_aint32_t in_state, ErtsProcLocks locks)
{
erts_aint32_t enq_prio = -1;
erts_aint32_t state = in_state;
@@ -6069,7 +6095,8 @@ schedule_process(Process *p, erts_aint32_t in_state)
0,
ERTS_PSFLG_ACTIVE,
&state,
- &enq_prio);
+ &enq_prio,
+ locks);
if (enqueue != ERTS_ENQUEUE_NOT)
add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio),
state,
@@ -6077,16 +6104,27 @@ schedule_process(Process *p, erts_aint32_t in_state)
}
void
-erts_schedule_process(Process *p, erts_aint32_t state)
+erts_schedule_process(Process *p, erts_aint32_t state, ErtsProcLocks locks)
{
- schedule_process(p, state);
+ schedule_process(p, state, locks);
}
static void
schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
{
+ /*
+ * Expects status lock to be locked when called, and
+ * returns with status lock unlocked...
+ */
erts_aint32_t a = state, n, enq_prio = -1;
int enqueue; /* < 0 -> use proxy */
+ unsigned int prof_runnable_procs = erts_system_profile_flags.runnable_procs;
+
+ /* Status lock prevents out of order "runnable proc" trace msgs */
+ ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p));
+
+ if (!prof_runnable_procs)
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
ASSERT(!(state & ERTS_PSFLG_PROXY));
@@ -6095,7 +6133,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
n = e = a;
if (a & ERTS_PSFLG_FREE)
- return; /* We don't want to schedule free processes... */
+ goto cleanup; /* We don't want to schedule free processes... */
enqueue = ERTS_ENQUEUE_NOT;
n |= ERTS_PSFLG_ACTIVE_SYS;
@@ -6108,7 +6146,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
goto cleanup;
}
- if (erts_system_profile_flags.runnable_procs) {
+ if (prof_runnable_procs) {
if (!(a & (ERTS_PSFLG_ACTIVE_SYS
| ERTS_PSFLG_RUNNING
@@ -6118,6 +6156,8 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
profile_runnable_proc(p, am_active);
}
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+ prof_runnable_procs = 0;
}
if (enqueue != ERTS_ENQUEUE_NOT) {
@@ -6132,8 +6172,14 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy)
}
cleanup:
+
+ if (prof_runnable_procs)
+ erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS);
+
if (proxy)
free_proxy_proc(proxy);
+
+ ERTS_SMP_LC_ASSERT(!(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)));
}
static ERTS_INLINE int
@@ -6200,7 +6246,7 @@ suspend_process(Process *c_p, Process *p)
}
static ERTS_INLINE void
-resume_process(Process *p)
+resume_process(Process *p, ErtsProcLocks locks)
{
erts_aint32_t state, enq_prio = -1;
int enqueue;
@@ -6217,7 +6263,8 @@ resume_process(Process *p)
ERTS_PSFLG_SUSPENDED,
0,
&state,
- &enq_prio);
+ &enq_prio,
+ locks);
if (enqueue)
add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio),
state,
@@ -7818,6 +7865,9 @@ erts_start_schedulers(void)
#ifdef ETHR_HAVE_THREAD_NAMES
opts.name = malloc(80);
+ if (!opts.name) {
+ ERTS_INTERNAL_ERROR("malloc failed to allocate memory!");
+ }
#endif
#ifdef ERTS_SMP
@@ -8030,7 +8080,8 @@ handle_pend_sync_suspend(Process *suspendee,
}
/* suspender is suspended waiting for suspendee to suspend;
resume suspender */
- resume_process(suspender);
+ ASSERT(suspendee != suspender);
+ resume_process(suspender, ERTS_PROC_LOCK_STATUS);
erts_smp_proc_unlock(suspender, ERTS_PROC_LOCK_STATUS);
}
}
@@ -8065,7 +8116,7 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks,
ASSERT(c_p->flags & F_P2PNR_RESCHED);
c_p->flags &= ~F_P2PNR_RESCHED;
if (!suspend && rp)
- resume_process(rp);
+ resume_process(rp, rp_locks);
}
else {
@@ -8223,7 +8274,8 @@ handle_pend_bif_sync_suspend(Process *suspendee,
}
/* suspender is suspended waiting for suspendee to suspend;
resume suspender */
- resume_process(suspender);
+ ASSERT(suspender != suspendee);
+ resume_process(suspender, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
erts_smp_proc_unlock(suspender,
ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS);
}
@@ -8583,7 +8635,8 @@ resume_process_1(BIF_ALIST_1)
ASSERT(ERTS_PSFLG_SUSPENDED
& erts_smp_atomic32_read_nob(&suspendee->state));
- resume_process(suspendee);
+ ASSERT(BIF_P != suspendee);
+ resume_process(suspendee, ERTS_PROC_LOCK_STATUS);
erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
}
@@ -8713,7 +8766,7 @@ erts_resume(Process* process, ErtsProcLocks process_locks)
ERTS_SMP_LC_ASSERT(process_locks == erts_proc_lc_my_proc_locks(process));
if (!(process_locks & ERTS_PROC_LOCK_STATUS))
erts_smp_proc_lock(process, ERTS_PROC_LOCK_STATUS);
- resume_process(process);
+ resume_process(process, process_locks|ERTS_PROC_LOCK_STATUS);
if (!(process_locks & ERTS_PROC_LOCK_STATUS))
erts_smp_proc_unlock(process, ERTS_PROC_LOCK_STATUS);
}
@@ -8732,7 +8785,7 @@ erts_resume_processes(ErtsProcList *list)
proc = erts_pid2proc(NULL, 0, plp->pid, ERTS_PROC_LOCK_STATUS);
if (proc) {
if (erts_proclist_same(plp, proc)) {
- resume_process(proc);
+ resume_process(proc, ERTS_PROC_LOCK_STATUS);
nresumed++;
}
erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_STATUS);
@@ -9968,8 +10021,10 @@ erts_internal_request_system_task_3(BIF_ALIST_3)
rp_state = n;
}
- erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS);
-
+ /*
+ * schedule_process_sys_task() unlocks status
+ * lock on process.
+ */
schedule_process_sys_task(rp, rp_state, NULL);
if (free_stqs)
@@ -10714,7 +10769,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
* Schedule process for execution.
*/
- schedule_process(p, state);
+ schedule_process(p, state, 0);
VERBOSE(DEBUG_PROCESSES, ("Created a new process: %T\n",p->common.id));
@@ -11035,7 +11090,8 @@ set_proc_exiting(Process *p,
ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT,
ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE,
&state,
- &enq_prio);
+ &enq_prio,
+ ERTS_PROC_LOCKS_ALL);
p->fvalue = reason;
if (bp)
@@ -11076,7 +11132,8 @@ set_proc_self_exiting(Process *c_p)
ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT,
ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE,
&state,
- &enq_prio);
+ &enq_prio,
+ ERTS_PROC_LOCKS_ALL);
ASSERT(!enqueue);
return state;
@@ -11721,8 +11778,9 @@ resume_suspend_monitor(ErtsSuspendMonitor *smon, void *vc_p)
Process *suspendee = erts_pid2proc((Process *) vc_p, ERTS_PROC_LOCK_MAIN,
smon->pid, ERTS_PROC_LOCK_STATUS);
if (suspendee) {
+ ASSERT(suspendee != vc_p);
if (smon->active)
- resume_process(suspendee);
+ resume_process(suspendee, ERTS_PROC_LOCK_STATUS);
erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS);
}
erts_destroy_suspend_monitor(smon);
@@ -11814,6 +11872,7 @@ erts_continue_exit_process(Process *p)
struct saved_calls *scb;
process_breakpoint_time_t *pbt;
erts_aint32_t state;
+ void *nif_export;
#ifdef DEBUG
int yield_allowed = 1;
@@ -11964,6 +12023,7 @@ erts_continue_exit_process(Process *p)
: NULL);
scb = ERTS_PROC_SET_SAVED_CALLS_BUF(p, ERTS_PROC_LOCKS_ALL, NULL);
pbt = ERTS_PROC_SET_CALL_TIME(p, ERTS_PROC_LOCKS_ALL, NULL);
+ nif_export = ERTS_PROC_SET_NIF_TRAP_EXPORT(p, ERTS_PROC_LOCKS_ALL, NULL);
erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL);
#ifdef BM_COUNTERS
@@ -12011,6 +12071,9 @@ erts_continue_exit_process(Process *p)
if (pbt)
erts_free(ERTS_ALC_T_BPD, (void *) pbt);
+ if (nif_export)
+ erts_destroy_nif_export(nif_export);
+
delete_process(p);
#ifdef ERTS_SMP
@@ -12055,7 +12118,7 @@ timeout_proc(Process* p)
state = erts_smp_atomic32_read_acqb(&p->state);
if (!(state & ERTS_PSFLG_ACTIVE))
- schedule_process(p, state);
+ schedule_process(p, state, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS);
}
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index ed6dadbffa..3b0798207e 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -734,13 +734,9 @@ erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi)
#define ERTS_PSD_DIST_ENTRY 3
#define ERTS_PSD_CALL_TIME_BP 4
#define ERTS_PSD_DELAYED_GC_TASK_QS 5
-#ifdef ERTS_DIRTY_SCHEDULERS
-#define ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT 6
+#define ERTS_PSD_NIF_TRAP_EXPORT 6
#define ERTS_PSD_SIZE 7
-#else
-#define ERTS_PSD_SIZE 6
-#endif
typedef struct {
void *data[ERTS_PSD_SIZE];
@@ -767,10 +763,8 @@ typedef struct {
#define ERTS_PSD_DELAYED_GC_TASK_QS_GET_LOCKS ERTS_PROC_LOCK_MAIN
#define ERTS_PSD_DELAYED_GC_TASK_QS_SET_LOCKS ERTS_PROC_LOCK_MAIN
-#ifdef ERTS_DIRTY_SCHEDULERS
-#define ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_GET_LOCKS ERTS_PROC_LOCK_MAIN
-#define ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_SET_LOCKS ERTS_PROC_LOCK_MAIN
-#endif
+#define ERTS_PSD_NIF_TRAP_EXPORT_GET_LOCKS ERTS_PROC_LOCK_MAIN
+#define ERTS_PSD_NIF_TRAP_EXPORT_SET_LOCKS ERTS_PROC_LOCK_MAIN
typedef struct {
ErtsProcLocks get_locks;
@@ -1367,6 +1361,9 @@ Uint64 erts_get_proc_interval(void);
Uint64 erts_ensure_later_proc_interval(Uint64);
Uint64 erts_step_proc_interval(void);
+int erts_setup_nif_gc(Process* proc, Eterm** objv, int* nobj); /* see erl_nif.c */
+void erts_destroy_nif_export(void *); /* see erl_nif.c */
+
ErtsProcList *erts_proclist_create(Process *);
void erts_proclist_destroy(ErtsProcList *);
@@ -1704,17 +1701,17 @@ ErtsSchedulerData *erts_get_scheduler_data(void)
#endif
#endif
-void erts_schedule_process(Process *, erts_aint32_t);
+void erts_schedule_process(Process *, erts_aint32_t, ErtsProcLocks);
-ERTS_GLB_INLINE void erts_proc_notify_new_message(Process *p);
+ERTS_GLB_INLINE void erts_proc_notify_new_message(Process *p, ErtsProcLocks locks);
#if ERTS_GLB_INLINE_INCL_FUNC_DEF
ERTS_GLB_INLINE void
-erts_proc_notify_new_message(Process *p)
+erts_proc_notify_new_message(Process *p, ErtsProcLocks locks)
{
/* No barrier needed, due to msg lock */
erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state);
if (!(state & ERTS_PSFLG_ACTIVE))
- erts_schedule_process(p, state);
+ erts_schedule_process(p, state, locks);
}
#endif
@@ -1817,12 +1814,10 @@ erts_psd_set(Process *p, ErtsProcLocks plocks, int ix, void *data)
#define ERTS_PROC_SET_DELAYED_GC_TASK_QS(P, L, PBT) \
((ErtsProcSysTaskQs *) erts_psd_set((P), (L), ERTS_PSD_DELAYED_GC_TASK_QS, (void *) (PBT)))
-#ifdef ERTS_DIRTY_SCHEDULERS
-#define ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(P) \
- ((Export *) erts_psd_get((P), ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT))
-#define ERTS_PROC_SET_DIRTY_SCHED_TRAP_EXPORT(P, L, DSTE) \
- ((Export *) erts_psd_set((P), (L), ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT, (void *) (DSTE)))
-#endif
+#define ERTS_PROC_GET_NIF_TRAP_EXPORT(P) \
+ erts_psd_get((P), ERTS_PSD_NIF_TRAP_EXPORT)
+#define ERTS_PROC_SET_NIF_TRAP_EXPORT(P, L, NTE) \
+ erts_psd_set((P), (L), ERTS_PSD_NIF_TRAP_EXPORT, (void *) (NTE))
ERTS_GLB_INLINE Eterm erts_proc_get_error_handler(Process *p);
diff --git a/erts/emulator/beam/erl_process_dict.c b/erts/emulator/beam/erl_process_dict.c
index 23e5bf737f..3ce707efda 100644
--- a/erts/emulator/beam/erl_process_dict.c
+++ b/erts/emulator/beam/erl_process_dict.c
@@ -82,6 +82,7 @@
static void pd_hash_erase(Process *p, Eterm id, Eterm *ret);
static void pd_hash_erase_all(Process *p);
static Eterm pd_hash_get_keys(Process *p, Eterm value);
+static Eterm pd_hash_get_all_keys(Process *p, ProcDict *pd);
static Eterm pd_hash_get_all(Process *p, ProcDict *pd);
static Eterm pd_hash_put(Process *p, Eterm id, Eterm value);
@@ -275,6 +276,16 @@ BIF_RETTYPE get_1(BIF_ALIST_1)
BIF_RET(ret);
}
+BIF_RETTYPE get_keys_0(BIF_ALIST_0)
+{
+ Eterm ret;
+
+ PD_CHECK(BIF_P->dictionary);
+ ret = pd_hash_get_all_keys(BIF_P,BIF_P->dictionary);
+ PD_CHECK(BIF_P->dictionary);
+ BIF_RET(ret);
+}
+
BIF_RETTYPE get_keys_1(BIF_ALIST_1)
{
Eterm ret;
@@ -412,6 +423,47 @@ Eterm erts_pd_hash_get(Process *p, Eterm id)
return am_undefined;
}
+#define PD_GET_TKEY(Dst,Src) \
+do { \
+ ASSERT(is_tuple((Src))); \
+ ASSERT(arityval(*((Eterm*)tuple_val((Src)))) == 2); \
+ (Dst) = ((Eterm*)tuple_val((Src)))[1]; \
+} while(0)
+
+static Eterm pd_hash_get_all_keys(Process *p, ProcDict *pd) {
+ Eterm* hp;
+ Eterm res = NIL;
+ Eterm tmp, tmp2;
+ unsigned int i;
+ unsigned int num;
+
+ if (pd == NULL) {
+ return res;
+ }
+
+ num = HASH_RANGE(pd);
+ hp = HAlloc(p, pd->numElements * 2);
+
+ for (i = 0; i < num; ++i) {
+ tmp = ARRAY_GET(pd, i);
+ if (is_boxed(tmp)) {
+ PD_GET_TKEY(tmp,tmp);
+ res = CONS(hp, tmp, res);
+ hp += 2;
+ } else if (is_list(tmp)) {
+ while (tmp != NIL) {
+ tmp2 = TCAR(tmp);
+ PD_GET_TKEY(tmp2,tmp2);
+ res = CONS(hp, tmp2, res);
+ hp += 2;
+ tmp = TCDR(tmp);
+ }
+ }
+ }
+ return res;
+}
+#undef PD_GET_TKEY
+
static Eterm pd_hash_get_keys(Process *p, Eterm value)
{
Eterm *hp;
diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c
index 8d240355b0..9b9b4b2a62 100644
--- a/erts/emulator/beam/external.c
+++ b/erts/emulator/beam/external.c
@@ -1925,6 +1925,7 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
}
real_size = endp - bytes;
result_bin = erts_bin_realloc(context->s.ec.result_bin,real_size);
+ result_bin->orig_size = real_size;
level = context->s.ec.level;
BUMP_REDS(p, (initial_reds - reds) / TERM_TO_BINARY_LOOP_FACTOR);
if (level == 0 || real_size < 6) { /* We are done */
@@ -2004,6 +2005,7 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla
erl_zlib_deflate_finish(&(context->s.cc.stream));
result_bin = erts_bin_realloc(context->s.cc.destination_bin,
context->s.cc.dest_len+6);
+ result_bin->orig_size = context->s.cc.dest_len+6;
context->s.cc.destination_bin = NULL;
pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE);
pb->thing_word = HEADER_PROC_BIN;
diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c
index edf4a28784..9ae973e108 100644
--- a/erts/emulator/beam/io.c
+++ b/erts/emulator/beam/io.c
@@ -1218,9 +1218,10 @@ typedef struct {
static ERTS_INLINE ErtsTryImmDrvCallResult
try_imm_drv_call(ErtsTryImmDrvCallState *sp)
{
+ unsigned int prof_runnable_ports;
ErtsTryImmDrvCallResult res;
int reds_left_in;
- erts_aint32_t invalid_state, invalid_sched_flags;
+ erts_aint32_t act, exp, invalid_state, invalid_sched_flags;
Port *prt = sp->port;
Process *c_p = sp->c_p;
@@ -1247,18 +1248,39 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp)
goto locked_fail;
}
- sp->sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags);
- if (sp->sched_flags & invalid_sched_flags) {
- res = ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS;
- goto locked_fail;
- }
+ prof_runnable_ports = erts_system_profile_flags.runnable_ports;
+ if (prof_runnable_ports)
+ erts_port_task_sched_lock(&prt->sched);
+
+ act = erts_smp_atomic32_read_nob(&prt->sched.flags);
+ do {
+ erts_aint32_t new;
+
+ if (act & invalid_sched_flags) {
+ res = ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS;
+ sp->sched_flags = act;
+ goto locked_fail;
+ }
+ exp = act;
+ new = act | ERTS_PTS_FLG_EXEC_IMM;
+ act = erts_smp_atomic32_cmpxchg_mb(&prt->sched.flags, new, exp);
+ } while (act != exp);
+
+ sp->sched_flags = act;
if (!c_p)
reds_left_in = CONTEXT_REDS/10;
else {
if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS))
trace_virtual_sched(c_p, am_out);
+ /*
+ * No status lock held while sending runnable
+ * proc trace messages. It is however not needed
+ * in this case, since only this thread can send
+ * such messages for this process until the process
+ * has been scheduled out.
+ */
if (erts_system_profile_flags.runnable_procs
&& erts_system_profile_flags.exclusive)
profile_runnable_proc(c_p, am_inactive);
@@ -1273,11 +1295,14 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp)
ERTS_SMP_CHK_NO_PROC_LOCKS;
- if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS))
- trace_sched_ports_where(prt, am_in, sp->port_op);
- if (erts_system_profile_flags.runnable_ports
- && !erts_port_is_scheduled(prt))
- profile_runnable_port(prt, am_active);
+ if (prof_runnable_ports | IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) {
+ if (prof_runnable_ports && !(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)))
+ profile_runnable_port(prt, am_active);
+ if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS))
+ trace_sched_ports_where(prt, am_in, sp->port_op);
+ if (prof_runnable_ports)
+ erts_port_task_sched_unlock(&prt->sched);
+ }
sp->fpe_was_unmasked = erts_block_fpe();
@@ -1294,17 +1319,31 @@ finalize_imm_drv_call(ErtsTryImmDrvCallState *sp)
int reds;
Port *prt = sp->port;
Process *c_p = sp->c_p;
+ erts_aint32_t act;
+ unsigned int prof_runnable_ports;
reds = prt->reds;
reds += erts_port_driver_callback_epilogue(prt, NULL);
erts_unblock_fpe(sp->fpe_was_unmasked);
- if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS))
- trace_sched_ports_where(prt, am_out, sp->port_op);
- if (erts_system_profile_flags.runnable_ports
- && !erts_port_is_scheduled(prt))
- profile_runnable_port(prt, am_inactive);
+ prof_runnable_ports = erts_system_profile_flags.runnable_ports;
+ if (prof_runnable_ports)
+ erts_port_task_sched_lock(&prt->sched);
+
+ act = erts_smp_atomic32_read_band_mb(&prt->sched.flags,
+ ~ERTS_PTS_FLG_EXEC_IMM);
+ ERTS_SMP_LC_ASSERT(act & ERTS_PTS_FLG_EXEC_IMM);
+
+ if (prof_runnable_ports | IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) {
+ if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS))
+ trace_sched_ports_where(prt, am_out, sp->port_op);
+ if (prof_runnable_ports) {
+ if (!(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC)))
+ profile_runnable_port(prt, am_inactive);
+ erts_port_task_sched_unlock(&prt->sched);
+ }
+ }
erts_port_release(prt);
@@ -1319,6 +1358,13 @@ finalize_imm_drv_call(ErtsTryImmDrvCallState *sp)
if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS))
trace_virtual_sched(c_p, am_in);
+ /*
+ * No status lock held while sending runnable
+ * proc trace messages. It is however not needed
+ * in this case, since only this thread can send
+ * such messages for this process until the process
+ * has been scheduled out.
+ */
if (erts_system_profile_flags.runnable_procs
&& erts_system_profile_flags.exclusive)
profile_runnable_proc(c_p, am_active);
@@ -6129,7 +6175,7 @@ driver_pdl_create(ErlDrvPort dp)
return NULL;
pdl = erts_alloc(ERTS_ALC_T_PORT_DATA_LOCK,
sizeof(struct erl_drv_port_data_lock));
- erts_mtx_init(&pdl->mtx, "port_data_lock");
+ erts_mtx_init_x(&pdl->mtx, "port_data_lock", pp->common.id, 1);
pdl_init_refc(pdl);
erts_port_inc_refc(pp);
pdl->prt = pp;
@@ -7166,7 +7212,7 @@ char *driver_dl_error(void)
#define ERL_DRV_SYS_INFO_SIZE(LAST_FIELD) \
- (((size_t) &((ErlDrvSysInfo *) 0)->LAST_FIELD) \
+ (offsetof(ErlDrvSysInfo, LAST_FIELD) \
+ sizeof(((ErlDrvSysInfo *) 0)->LAST_FIELD))
void
@@ -7228,6 +7274,18 @@ driver_system_info(ErlDrvSysInfo *sip, size_t si_size)
sip->nif_major_version = ERL_NIF_MAJOR_VERSION;
sip->nif_minor_version = ERL_NIF_MINOR_VERSION;
}
+ /*
+ * 'dirty_scheduler_support' is the last field in the 4th version
+ * (driver version 3.1, NIF version 2.7)
+ */
+ if (si_size >= ERL_DRV_SYS_INFO_SIZE(dirty_scheduler_support)) {
+#if defined(ERL_NIF_DIRTY_SCHEDULER_SUPPORT) && defined(USE_THREADS)
+ sip->dirty_scheduler_support = 1;
+#else
+ sip->dirty_scheduler_support = 0;
+#endif
+ }
+
}
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 72092ec7b0..55f9e68e78 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -3948,6 +3948,9 @@ erts_save_emu_args(int argc, char **argv)
size += sz+1;
}
ptr = (char *) malloc(size);
+ if (!ptr) {
+ ERTS_INTERNAL_ERROR("malloc failed to allocate memory!");
+ }
#ifdef DEBUG
end_ptr = ptr + size;
#endif
diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c
index 09bada457d..891589d1c5 100644
--- a/erts/emulator/drivers/common/inet_drv.c
+++ b/erts/emulator/drivers/common/inet_drv.c
@@ -4372,7 +4372,7 @@ static int erl_inet_close(inet_descriptor* desc)
desc_close(desc);
desc->state = INET_STATE_CLOSED;
} else if (desc->prebound && (desc->s != INVALID_SOCKET)) {
- sock_select(desc, FD_READ | FD_WRITE | FD_CLOSE, 0);
+ sock_select(desc, FD_READ | FD_WRITE | FD_CLOSE | ERL_DRV_USE_NO_CALLBACK, 0);
desc->event_mask = 0;
#ifdef __WIN32__
desc->forced_events = 0;
@@ -4536,7 +4536,8 @@ static ErlDrvSSizeT inet_ctl_open(inet_descriptor* desc, int domain, int type,
/* as inet_open but pass in an open socket (MUST BE OF RIGHT TYPE) */
static ErlDrvSSizeT inet_ctl_fdopen(inet_descriptor* desc, int domain, int type,
- SOCKET s, char** rbuf, ErlDrvSizeT rsize)
+ SOCKET s, Uint32 bound,
+ char** rbuf, ErlDrvSizeT rsize)
{
inet_address name;
unsigned int sz = sizeof(name);
@@ -4560,7 +4561,12 @@ static ErlDrvSSizeT inet_ctl_fdopen(inet_descriptor* desc, int domain, int type,
#ifdef __WIN32__
driver_select(desc->port, desc->event, ERL_DRV_READ, 1);
#endif
- desc->state = INET_STATE_BOUND; /* assume bound */
+
+ if (bound)
+ desc->state = INET_STATE_BOUND;
+ else
+ desc->state = INET_STATE_OPEN;
+
if (type == SOCK_STREAM) { /* check if connected */
sz = sizeof(name);
if (!IS_SOCKET_ERROR(sock_peer(s, (struct sockaddr*) &name, &sz))) {
@@ -5772,7 +5778,7 @@ done:
ia_p->Ipv6IfIndex &&
ia_p->Ipv6IfIndex != index)
{
- /* Oops, there was an other interface for IPv6. Possible? XXX */
+ /* Oops, there was another interface for IPv6. Possible? XXX */
index = ia_p->Ipv6IfIndex;
goto index;
}
@@ -9121,10 +9127,11 @@ static ErlDrvSSizeT tcp_inet_ctl(ErlDrvData e, unsigned int cmd,
break;
}
- case INET_REQ_FDOPEN: { /* pass in an open socket */
+ case INET_REQ_FDOPEN: { /* pass in an open (and optionally bound) socket */
int domain;
+ int bound;
DEBUGF(("tcp_inet_ctl(%ld): FDOPEN\r\n", (long)desc->inet.port));
- if (len != 6) return ctl_error(EINVAL, rbuf, rsize);
+ if (len != 6 && len != 10) return ctl_error(EINVAL, rbuf, rsize);
switch(buf[0]) {
case INET_AF_INET:
domain = AF_INET;
@@ -9142,8 +9149,13 @@ static ErlDrvSSizeT tcp_inet_ctl(ErlDrvData e, unsigned int cmd,
return ctl_error(EINVAL, rbuf, rsize);
}
if (buf[1] != INET_TYPE_STREAM) return ctl_error(EINVAL, rbuf, rsize);
+
+ if (len == 6) bound = 1;
+ else bound = get_int32(buf+2+4);
+
return inet_ctl_fdopen(INETP(desc), domain, SOCK_STREAM,
- (SOCKET) get_int32(buf+2), rbuf, rsize);
+ (SOCKET) get_int32(buf+2),
+ bound, rbuf, rsize);
break;
}
@@ -11116,10 +11128,11 @@ static ErlDrvSSizeT packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf,
return replen;
- case INET_REQ_FDOPEN: { /* pass in an open (and bound) socket */
+ case INET_REQ_FDOPEN: { /* pass in an open (and optionally bound) socket */
SOCKET s;
+ int bound;
DEBUGF(("packet inet_ctl(%ld): FDOPEN\r\n", (long)desc->port));
- if (len != 6) {
+ if (len != 6 && len != 10) {
return ctl_error(EINVAL, rbuf, rsize);
}
switch (buf[0]) {
@@ -11144,7 +11157,11 @@ static ErlDrvSSizeT packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf,
return ctl_error(EINVAL, rbuf, rsize);
}
s = (SOCKET)get_int32(buf+2);
- replen = inet_ctl_fdopen(desc, af, type, s, rbuf, rsize);
+
+ if (len == 6) bound = 1;
+ else bound = get_int32(buf+2+4);
+
+ replen = inet_ctl_fdopen(desc, af, type, s, bound, rbuf, rsize);
if ((*rbuf)[0] != INET_REP_ERROR) {
if (desc->active)
diff --git a/erts/emulator/drivers/unix/multi_drv.c b/erts/emulator/drivers/unix/multi_drv.c
index 822c96730c..724d325ed5 100644
--- a/erts/emulator/drivers/unix/multi_drv.c
+++ b/erts/emulator/drivers/unix/multi_drv.c
@@ -20,7 +20,7 @@
/* Purpose: Multidriver interface
This is an example of a driver which allows multiple instances of itself.
I.e have one erlang process execute open_port(multi......) and
- at the same time have an other erlang process open an other port
+ at the same time have another erlang process open another port
running multi there as well.
*/
diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c
index 0a58a625b2..aa412a20c8 100644
--- a/erts/emulator/sys/common/erl_poll.c
+++ b/erts/emulator/sys/common/erl_poll.c
@@ -2157,7 +2157,7 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps,
#ifdef ERTS_POLL_DEBUG_PRINT
erts_printf("Entering erts_poll_wait(), timeout=%d\n",
- (int) tv->tv_sec*1000 + tv->tv_usec/1000);
+ (int) tvp->tv_sec*1000 + tvp->tv_usec/1000);
#endif
if (ERTS_POLLSET_SET_POLLED_CHK(ps)) {
diff --git a/erts/emulator/test/Makefile b/erts/emulator/test/Makefile
index 0b0568c31a..dfbe47786a 100644
--- a/erts/emulator/test/Makefile
+++ b/erts/emulator/test/Makefile
@@ -31,6 +31,7 @@ MODULES= \
a_SUITE \
after_SUITE \
alloc_SUITE \
+ async_ports_SUITE \
beam_SUITE \
beam_literals_SUITE \
bif_SUITE \
diff --git a/erts/emulator/test/async_ports_SUITE.erl b/erts/emulator/test/async_ports_SUITE.erl
new file mode 100644
index 0000000000..c89b3655ff
--- /dev/null
+++ b/erts/emulator/test/async_ports_SUITE.erl
@@ -0,0 +1,118 @@
+-module(async_ports_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+
+-compile(export_all).
+
+-define(PACKET_SIZE, (10 * 1024 * 8)).
+-define(CPORT_DELAY, 100).
+-define(TEST_LOOPS_COUNT, 100000).
+-define(SLEEP_BEFORE_CHECK, 1000).
+-define(TEST_PROCS_COUNT, 2).
+-define(TC_TIMETRAP_SECONDS, 10).
+
+suite() -> [{ct_hooks,[ts_install_cth]}].
+
+all() ->
+ [
+ permanent_busy_test
+ ].
+
+permanent_busy_test(Config) ->
+ ct:timetrap({seconds, ?TC_TIMETRAP_SECONDS}),
+ ExePath = filename:join(?config(data_dir, Config), "cport"),
+
+ Self = self(),
+ spawn_link(
+ fun() ->
+ Block = <<0:?PACKET_SIZE>>,
+
+ Port = open_port(ExePath),
+
+ Testers =
+ lists:map(
+ fun(_) ->
+ erlang:spawn_link(?MODULE, run_loop,
+ [Self,
+ Port,
+ Block,
+ ?TEST_LOOPS_COUNT,
+ 0])
+ end,
+ lists:seq(1, ?TEST_PROCS_COUNT)),
+ Self ! {test_info, Port, Testers},
+ endless_flush(Port)
+ end),
+
+ receive
+ {test_info, Port, Testers} ->
+ MaxWaitTime = round(0.7 * ?TC_TIMETRAP_SECONDS * 1000),
+ ct:log("wait testers, maximum ~w mcsec~n", [MaxWaitTime]),
+ ok = wait_testers(MaxWaitTime, Testers),
+ timer:sleep(?SLEEP_BEFORE_CHECK),
+ case erlang:port_command(Port, <<"test">>, [nosuspend]) of
+ false ->
+ exit(port_dead);
+ true ->
+ ok
+ end
+ end.
+
+wait_testers(Timeout, Testers) ->
+ lists:foldl(
+ fun(Pid, AccIn) ->
+ StartWait = os:timestamp(),
+ receive
+ {Pid, port_dead} ->
+ recalc_timeout(AccIn, StartWait)
+ after AccIn ->
+ Pid ! stop,
+ recalc_timeout(AccIn, StartWait)
+ end
+ end, Timeout, Testers),
+ ok.
+
+recalc_timeout(TimeoutIn, WaitStart) ->
+ erlang:max(0, TimeoutIn - round(timer:now_diff(os:timestamp(), WaitStart)) div 1000).
+
+open_port(ExePath) ->
+ erlang:open_port({spawn, ExePath ++ " 100"}, [{packet, 4}, eof, exit_status, use_stdio, binary]).
+
+run_loop(RootProc, Port, Block, CheckLimit, BusyCnt) ->
+ receive
+ stop ->
+ ok
+ after 0 ->
+ case erlang:port_command(Port, Block, [nosuspend]) of
+ true ->
+ run_loop(RootProc, Port, Block, CheckLimit, 0);
+ false ->
+ if
+ BusyCnt + 1 > CheckLimit ->
+ check_dead(RootProc, Port, Block, CheckLimit);
+ true ->
+ run_loop(RootProc, Port, Block, CheckLimit, BusyCnt + 1)
+ end
+ end
+ end.
+
+check_dead(RootProc, Port, Block, CheckLimit) ->
+ ct:log("~p: check port dead~n", [self()]),
+ timer:sleep(?SLEEP_BEFORE_CHECK),
+ case erlang:port_command(Port, Block, [nosuspend]) of
+ true ->
+ ct:log("not dead~n"),
+ run_loop(RootProc, Port, Block, CheckLimit, 0);
+ false ->
+ ct:log("port dead: ~p~n", [Port]),
+ RootProc ! {self(), port_dead},
+ ok
+ end.
+
+endless_flush(Port) ->
+ receive
+ {Port, {data, _}} ->
+ endless_flush(Port);
+ {Port, SomethingWrong} ->
+ erlang:error({someting_wrong, SomethingWrong})
+ end.
diff --git a/erts/emulator/test/async_ports_SUITE_data/Makefile.src b/erts/emulator/test/async_ports_SUITE_data/Makefile.src
new file mode 100644
index 0000000000..56da3fbe12
--- /dev/null
+++ b/erts/emulator/test/async_ports_SUITE_data/Makefile.src
@@ -0,0 +1,15 @@
+CC = @CC@
+LD = @LD@
+CFLAGS = @CFLAGS@ @DEFS@
+CROSSLDFLAGS = @CROSSLDFLAGS@
+
+PROGS = cport@exe@
+
+
+all: $(PROGS)
+
+cport@exe@: cport@obj@
+ $(LD) $(CROSSLDFLAGS) -o cport cport@obj@ @LIBS@
+
+cport@obj@: cport.c
+ $(CC) -c -o cport@obj@ $(CFLAGS) cport.c
diff --git a/erts/emulator/test/async_ports_SUITE_data/cport.c b/erts/emulator/test/async_ports_SUITE_data/cport.c
new file mode 100644
index 0000000000..033aff382a
--- /dev/null
+++ b/erts/emulator/test/async_ports_SUITE_data/cport.c
@@ -0,0 +1,81 @@
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#ifdef __WIN32__
+# include "windows.h"
+# include "winbase.h"
+#else
+# include <unistd.h>
+#endif
+
+typedef unsigned char byte;
+
+int read_cmd(byte *buf)
+{
+ int len;
+ if (read_exact(buf, 4) != 4)
+ return(-1);
+
+ len = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
+ return read_exact(buf, len);
+}
+
+int write_cmd(byte *buf, int len)
+{
+ byte li[4];
+ li[0] = (len >> 24) & 0xff;
+ li[1] = (len >> 16) & 0xff;
+ li[2] = (len >> 8) & 0xff;
+ li[3] = len & 0xff;
+ write_exact(&li, 4);
+
+ return write_exact(buf, len);
+}
+
+int read_exact(byte *buf, int len)
+{
+ int i, got=0;
+ do {
+ if ((i = read(0, buf+got, len-got)) <= 0)
+ {
+ return(i);
+ }
+ got += i;
+ } while (got<len);
+ return len;
+}
+
+int write_exact(byte *buf, int len)
+{
+ int i, wrote = 0;
+ do {
+ if ((i = write(1, buf+wrote, len-wrote)) < 0)
+ return (i);
+ wrote += i;
+ } while (wrote<len);
+ return len;
+}
+
+byte static_buf[31457280]; // 30 mb
+
+int main(int argc, char **argv) {
+ int sleep_time = atoi(argv[1]);
+ int fn, arg, res;
+ byte *buf = &static_buf[0];
+ int len = 0;
+ if (sleep_time <= 0)
+ sleep_time = 0;
+#ifdef __WIN32__
+ else
+ sleep_time = ((sleep_time - 1) / 1000) + 1; /* Milli seconds */
+#endif
+ while ((len = read_cmd(buf)) > 0) {
+#ifdef __WIN32__
+ Sleep((DWORD) sleep_time);
+#else
+ usleep(sleep_time);
+#endif
+ write_cmd(buf, len);
+ }
+}
diff --git a/erts/emulator/test/busy_port_SUITE.erl b/erts/emulator/test/busy_port_SUITE.erl
index 4b4af0babe..2ed5aaa0d0 100644
--- a/erts/emulator/test/busy_port_SUITE.erl
+++ b/erts/emulator/test/busy_port_SUITE.erl
@@ -98,8 +98,10 @@ generator(0, Writer, _Data) ->
%% Calling process_info(Pid, current_function) on a suspended process
%% used to crash Beam.
- {current_function, {erlang, send, 2}} =
- process_info(Writer, current_function),
+ case process_info(Writer, [status,current_function]) of
+ [{status,suspended},{current_function,{erlang,send,2}}] -> ok;
+ [{status,suspended},{current_function,{erlang,bif_return_trap,_}}] -> ok
+ end,
unlock_slave();
generator(N, Writer, Data) ->
Writer ! {exec, Data},
diff --git a/erts/emulator/test/driver_SUITE.erl b/erts/emulator/test/driver_SUITE.erl
index c62bc0c454..336b6188f6 100644
--- a/erts/emulator/test/driver_SUITE.erl
+++ b/erts/emulator/test/driver_SUITE.erl
@@ -1062,10 +1062,9 @@ otp_6602(Config) when is_list(Config) ->
%% Inet driver use port locking...
{ok, S} = gen_udp:open(0),
{ok, Fd} = inet:getfd(S),
- {ok, Port} = inet:port(S),
%% Steal fd (lock checker used to
%% trigger here).
- {ok, _S2} = gen_udp:open(Port,[{fd,Fd}]),
+ {ok, _S2} = gen_udp:open(0,[{fd,Fd}]),
Parent ! Done
end),
?line receive Done -> ok end,
@@ -1085,7 +1084,15 @@ otp_6602(Config) when is_list(Config) ->
["async_thrs",
"sched_thrs"])).
--define(EXPECTED_SYSTEM_INFO_NAMES, ?EXPECTED_SYSTEM_INFO_NAMES2).
+-define(EXPECTED_SYSTEM_INFO_NAMES3,
+ (?EXPECTED_SYSTEM_INFO_NAMES2 ++
+ ["emu_nif_vsn"])).
+
+-define(EXPECTED_SYSTEM_INFO_NAMES4,
+ (?EXPECTED_SYSTEM_INFO_NAMES3 ++
+ ["dirty_sched"])).
+
+-define(EXPECTED_SYSTEM_INFO_NAMES, ?EXPECTED_SYSTEM_INFO_NAMES4).
'driver_system_info_base_ver'(doc) ->
[];
@@ -1133,16 +1140,18 @@ check_driver_system_info_result(Result) ->
drv_vsn_str2tup(erlang:system_info(driver_version))} of
{DDVSN, DDVSN} ->
?line [] = Ns;
- {{1, 0}, _} ->
+ %% {{1, 0}, _} ->
+ %% ?line ExpNs = lists:sort(?EXPECTED_SYSTEM_INFO_NAMES
+ %% -- ?EXPECTED_SYSTEM_INFO_NAMES1),
+ %% ?line ExpNs = lists:sort(Ns);
+ %% {{1, 1}, _} ->
+ %% ?line ExpNs = lists:sort(?EXPECTED_SYSTEM_INFO_NAMES
+ %% -- ?EXPECTED_SYSTEM_INFO_NAMES2),
+ %% ?line ExpNs = lists:sort(Ns);
+ {{3, 0}, _} ->
?line ExpNs = lists:sort(?EXPECTED_SYSTEM_INFO_NAMES
- -- ?EXPECTED_SYSTEM_INFO_NAMES1),
- ?line ExpNs = lists:sort(Ns);
- {{1, 1}, _} ->
- ?line ExpNs = lists:sort(?EXPECTED_SYSTEM_INFO_NAMES
- -- ?EXPECTED_SYSTEM_INFO_NAMES2),
- ?line ExpNs = lists:sort(Ns);
- {{2, 0}, _} ->
- ?line [] = Ns
+ -- ?EXPECTED_SYSTEM_INFO_NAMES3),
+ ?line ExpNs = lists:sort(Ns)
end.
chk_sis(SIs, Ns) ->
@@ -1189,6 +1198,14 @@ check_si_res(["async_thrs", Value]) ->
check_si_res(["sched_thrs", Value]) ->
?line Value = integer_to_list(erlang:system_info(schedulers));
+%% Data added in 3rd version of driver_system_info() (driver version 1.5)
+check_si_res(["emu_nif_vsn", _Value]) ->
+ true;
+
+%% Data added in 4th version of driver_system_info() (driver version 3.1)
+check_si_res(["dirty_sched", _Value]) ->
+ true;
+
check_si_res(Unexpected) ->
?line ?t:fail({unexpected_result, Unexpected}).
diff --git a/erts/emulator/test/driver_SUITE_data/sys_info_base_drv.c b/erts/emulator/test/driver_SUITE_data/sys_info_base_drv.c
index e44c7dbd5e..964034f5a6 100644
--- a/erts/emulator/test/driver_SUITE_data/sys_info_base_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/sys_info_base_drv.c
@@ -41,7 +41,9 @@
"thread=%s " \
"smp=%s " \
"async_thrs=%d " \
- "sched_thrs=%d"
+ "sched_thrs=%d " \
+ "emu_nif_vsn=%d.%d"
+
static size_t
sys_info_drv_max_res_len(ErlDrvSysInfo *sip)
@@ -55,6 +57,7 @@ sys_info_drv_max_res_len(ErlDrvSysInfo *sip)
slen += 5; /* smp */
slen += 20; /* async_thrs */
slen += 20; /* sched_thrs */
+ slen += 2*20; /* emu_nif_vsn */
return slen;
}
@@ -72,7 +75,9 @@ sys_info_drv_sprintf_sys_info(ErlDrvSysInfo *sip, char *str)
sip->thread_support ? "true" : "false",
sip->smp_support ? "true" : "false",
sip->async_threads,
- sip->scheduler_threads);
+ sip->scheduler_threads,
+ sip->nif_major_version,
+ sip->nif_minor_version);
}
#include "sys_info_drv_impl.c"
diff --git a/erts/emulator/test/driver_SUITE_data/sys_info_curr_drv.c b/erts/emulator/test/driver_SUITE_data/sys_info_curr_drv.c
index 5bbc966932..6d2c47fdaf 100644
--- a/erts/emulator/test/driver_SUITE_data/sys_info_curr_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/sys_info_curr_drv.c
@@ -40,7 +40,9 @@
"thread=%s " \
"smp=%s " \
"async_thrs=%d " \
- "sched_thrs=%d"
+ "sched_thrs=%d " \
+ "emu_nif_vsn=%d.%d " \
+ "dirty_sched=%s"
static size_t
sys_info_drv_max_res_len(ErlDrvSysInfo *sip)
@@ -54,6 +56,8 @@ sys_info_drv_max_res_len(ErlDrvSysInfo *sip)
slen += 5; /* smp */
slen += 20; /* async_thrs */
slen += 20; /* sched_thrs */
+ slen += 2*20; /* emu_nif_vsn */
+ slen += 5; /* dirty_sched */
return slen;
}
@@ -71,7 +75,10 @@ sys_info_drv_sprintf_sys_info(ErlDrvSysInfo *sip, char *str)
sip->thread_support ? "true" : "false",
sip->smp_support ? "true" : "false",
sip->async_threads,
- sip->scheduler_threads);
+ sip->scheduler_threads,
+ sip->nif_major_version,
+ sip->nif_minor_version,
+ sip->dirty_scheduler_support ? "true" : "false");
}
#include "sys_info_drv_impl.c"
diff --git a/erts/emulator/test/driver_SUITE_data/sys_info_prev_drv.c b/erts/emulator/test/driver_SUITE_data/sys_info_prev_drv.c
index 63c69f751c..2271d7027b 100644
--- a/erts/emulator/test/driver_SUITE_data/sys_info_prev_drv.c
+++ b/erts/emulator/test/driver_SUITE_data/sys_info_prev_drv.c
@@ -41,7 +41,9 @@
"thread=%s " \
"smp=%s " \
"async_thrs=%d " \
- "sched_thrs=%d"
+ "sched_thrs=%d " \
+ "emu_nif_vsn=%d.%d"
+
static size_t
sys_info_drv_max_res_len(ErlDrvSysInfo *sip)
@@ -55,6 +57,7 @@ sys_info_drv_max_res_len(ErlDrvSysInfo *sip)
slen += 5; /* smp */
slen += 20; /* async_thrs */
slen += 20; /* sched_thrs */
+ slen += 2*20; /* emu_nif_vsn */
return slen;
}
@@ -72,7 +75,9 @@ sys_info_drv_sprintf_sys_info(ErlDrvSysInfo *sip, char *str)
sip->thread_support ? "true" : "false",
sip->smp_support ? "true" : "false",
sip->async_threads,
- sip->scheduler_threads);
+ sip->scheduler_threads,
+ sip->nif_major_version,
+ sip->nif_minor_version);
}
#include "sys_info_drv_impl.c"
diff --git a/erts/emulator/test/fun_SUITE.erl b/erts/emulator/test/fun_SUITE.erl
index 8ad5f290ed..2968f5bebb 100644
--- a/erts/emulator/test/fun_SUITE.erl
+++ b/erts/emulator/test/fun_SUITE.erl
@@ -30,7 +30,7 @@
fun_to_port/1,t_hash/1,t_phash/1,t_phash2/1,md5/1,
refc/1,refc_ets/1,refc_dist/1,
const_propagation/1,t_arity/1,t_is_function2/1,
- t_fun_info/1]).
+ t_fun_info/1,t_fun_info_mfa/1]).
-export([nothing/0]).
@@ -42,7 +42,8 @@ all() ->
[bad_apply, bad_fun_call, badarity, ext_badarity,
equality, ordering, fun_to_port, t_hash, t_phash,
t_phash2, md5, refc, refc_ets, refc_dist,
- const_propagation, t_arity, t_is_function2, t_fun_info].
+ const_propagation, t_arity, t_is_function2, t_fun_info,
+ t_fun_info_mfa].
groups() ->
[].
@@ -824,6 +825,24 @@ t_fun_info(Config) when is_list(Config) ->
?line bad_info(<<1,2>>),
ok.
+t_fun_info_mfa(Config) when is_list(Config) ->
+ Fun1 = fun spawn_call/2,
+ {module,M1} = erlang:fun_info(Fun1, module),
+ {name,F1} = erlang:fun_info(Fun1, name),
+ {arity,A1} = erlang:fun_info(Fun1, arity),
+ {M1,F1,A1=2} = erlang:fun_info_mfa(Fun1),
+ %% Module fun.
+ Fun2 = fun ?MODULE:t_fun_info/1,
+ {module,M2} = erlang:fun_info(Fun2, module),
+ {name,F2} = erlang:fun_info(Fun2, name),
+ {arity,A2} = erlang:fun_info(Fun2, arity),
+ {M2,F2,A2=1} = erlang:fun_info_mfa(Fun2),
+
+ %% Not fun.
+ {'EXIT',_} = (catch erlang:fun_info_mfa(id(d))),
+ ok.
+
+
bad_info(Term) ->
try erlang:fun_info(Term, module) of
Any ->
diff --git a/erts/emulator/test/nif_SUITE.erl b/erts/emulator/test/nif_SUITE.erl
index b2da6f58af..14e6585220 100644
--- a/erts/emulator/test/nif_SUITE.erl
+++ b/erts/emulator/test/nif_SUITE.erl
@@ -37,7 +37,9 @@
threading/1, send/1, send2/1, send3/1, send_threaded/1, neg/1,
is_checks/1,
get_length/1, make_atom/1, make_string/1, reverse_list_test/1,
- otp_9668/1, consume_timeslice/1, dirty_nif/1, dirty_nif_send/1
+ otp_9828/1,
+ otp_9668/1, consume_timeslice/1, dirty_nif/1, dirty_nif_send/1,
+ dirty_nif_exception/1, nif_schedule/1
]).
-export([many_args_100/100]).
@@ -64,7 +66,9 @@ all() ->
resource_takeover, threading, send, send2, send3,
send_threaded, neg, is_checks, get_length, make_atom,
make_string,reverse_list_test,
- otp_9668, consume_timeslice, dirty_nif, dirty_nif_send
+ otp_9828,
+ otp_9668, consume_timeslice,
+ nif_schedule, dirty_nif, dirty_nif_send, dirty_nif_exception
].
groups() ->
@@ -1440,6 +1444,20 @@ otp_9668(Config) ->
?line verify_tmpmem(TmpMem),
ok.
+otp_9828(doc) -> ["Copy of writable binary"];
+otp_9828(Config) ->
+ ensure_lib_loaded(Config, 1),
+
+ otp_9828_loop(<<"I'm alive!">>, 1000).
+
+otp_9828_loop(Bin, 0) ->
+ ok;
+otp_9828_loop(Bin, Val) ->
+ WrtBin = <<Bin/binary, Val:32>>,
+ ok = otp_9828_nif(WrtBin),
+ otp_9828_loop(WrtBin, Val-1).
+
+
consume_timeslice(Config) when is_list(Config) ->
CONTEXT_REDS = 2000,
Me = self(),
@@ -1524,6 +1542,20 @@ consume_timeslice(Config) when is_list(Config) ->
ok.
+nif_schedule(Config) when is_list(Config) ->
+ ensure_lib_loaded(Config),
+ A = "this is a string",
+ B = {this,is,a,tuple},
+ {B,A} = call_nif_schedule(A, B),
+ ok = try call_nif_schedule(1, 2)
+ catch
+ error:badarg ->
+ [{?MODULE,call_nif_schedule,[1,2],_}|_] =
+ erlang:get_stacktrace(),
+ ok
+ end,
+ ok.
+
dirty_nif(Config) when is_list(Config) ->
try erlang:system_info(dirty_cpu_schedulers) of
N when is_integer(N) ->
@@ -1556,6 +1588,24 @@ dirty_nif_send(Config) when is_list(Config) ->
{skipped,"No dirty scheduler support"}
end.
+dirty_nif_exception(Config) when is_list(Config) ->
+ try erlang:system_info(dirty_cpu_schedulers) of
+ N when is_integer(N) ->
+ ensure_lib_loaded(Config),
+ try
+ call_dirty_nif_exception(),
+ ?t:fail(expected_badarg)
+ catch
+ error:badarg ->
+ [{?MODULE,call_dirty_nif_exception,[],_}|_] =
+ erlang:get_stacktrace(),
+ ok
+ end
+ catch
+ error:badarg ->
+ {skipped,"No dirty scheduler support"}
+ end.
+
next_msg(_Pid) ->
receive
M -> M
@@ -1684,9 +1734,12 @@ reverse_list(_) -> ?nif_stub.
echo_int(_) -> ?nif_stub.
type_sizes() -> ?nif_stub.
otp_9668_nif(_) -> ?nif_stub.
+otp_9828_nif(_) -> ?nif_stub.
consume_timeslice_nif(_,_) -> ?nif_stub.
+call_nif_schedule(_,_) -> ?nif_stub.
call_dirty_nif(_,_,_) -> ?nif_stub.
send_from_dirty_nif(_) -> ?nif_stub.
+call_dirty_nif_exception() -> ?nif_stub.
%% maps
is_map_nif(_) -> ?nif_stub.
diff --git a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
index 955dc64189..291c903947 100644
--- a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
+++ b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c
@@ -1473,6 +1473,26 @@ static ERL_NIF_TERM otp_9668_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM ar
return atom_ok;
}
+static ERL_NIF_TERM otp_9828_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ /* copy a writable binary could reallocate it due to "emasculation"
+ and thereby render a previous inspection invalid.
+ */
+ ErlNifBinary bin1;
+ ErlNifEnv* myenv;
+
+ if (!enif_inspect_binary(env, argv[0], &bin1)) {
+ return enif_make_badarg(env);
+ }
+
+ myenv = enif_alloc_env();
+ enif_make_copy(myenv, argv[0]);
+ enif_free_env(myenv);
+
+ return memcmp(bin1.data, "I'm alive!", 10)==0 ? atom_ok : atom_false;
+}
+
+
static ERL_NIF_TERM consume_timeslice_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
int percent;
@@ -1493,25 +1513,57 @@ static ERL_NIF_TERM consume_timeslice_nif(ErlNifEnv* env, int argc, const ERL_NI
}
}
+static ERL_NIF_TERM nif_sched2(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ char s[64];
+ if (!enif_get_string(env, argv[2], s, sizeof s, ERL_NIF_LATIN1))
+ return enif_make_badarg(env);
+ return enif_make_tuple2(env, argv[3], argv[2]);
+}
+
+static ERL_NIF_TERM nif_sched1(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ ERL_NIF_TERM new_argv[4];
+ new_argv[0] = enif_make_atom(env, "garbage0");
+ new_argv[1] = enif_make_atom(env, "garbage1");
+ new_argv[2] = argv[0];
+ new_argv[3] = argv[1];
+ return enif_schedule_nif(env, "nif_sched2", 0, nif_sched2, 4, new_argv);
+}
+
+static ERL_NIF_TERM call_nif_schedule(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (argc != 2)
+ return enif_make_atom(env, "false");
+ return enif_schedule_nif(env, "nif_sched1", 0, nif_sched1, argc, argv);
+}
+
#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
+
+static int have_dirty_schedulers(void)
+{
+ ErlNifSysInfo si;
+ enif_system_info(&si, sizeof(si));
+ return si.dirty_scheduler_support;
+}
+
static ERL_NIF_TERM dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
int n;
char s[10];
ErlNifBinary b;
ERL_NIF_TERM result;
- if (enif_have_dirty_schedulers()) {
+ if (have_dirty_schedulers()) {
assert(enif_is_on_dirty_scheduler(env));
}
assert(argc == 3);
enif_get_int(env, argv[0], &n);
enif_get_string(env, argv[1], s, sizeof s, ERL_NIF_LATIN1);
enif_inspect_binary(env, argv[2], &b);
- result = enif_make_tuple3(env,
- enif_make_int(env, n),
- enif_make_string(env, s, ERL_NIF_LATIN1),
- enif_make_binary(env, &b));
- return enif_schedule_dirty_nif_finalizer(env, result, enif_dirty_nif_finalizer);
+ return enif_make_tuple3(env,
+ enif_make_int(env, n),
+ enif_make_string(env, s, ERL_NIF_LATIN1),
+ enif_make_binary(env, &b));
}
static ERL_NIF_TERM call_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
@@ -1522,11 +1574,11 @@ static ERL_NIF_TERM call_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM
assert(!enif_is_on_dirty_scheduler(env));
if (argc != 3)
return enif_make_badarg(env);
- if (enif_have_dirty_schedulers()) {
+ if (have_dirty_schedulers()) {
if (enif_get_int(env, argv[0], &n) &&
enif_get_string(env, argv[1], s, sizeof s, ERL_NIF_LATIN1) &&
enif_inspect_binary(env, argv[2], &b))
- return enif_schedule_dirty_nif(env, ERL_NIF_DIRTY_JOB_CPU_BOUND, dirty_nif, argc, argv);
+ return enif_schedule_nif(env, "call_dirty_nif", ERL_NIF_DIRTY_JOB_CPU_BOUND, dirty_nif, argc, argv);
else
return enif_make_badarg(env);
} else {
@@ -1534,35 +1586,42 @@ static ERL_NIF_TERM call_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM
}
}
-static ERL_NIF_TERM dirty_sender(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+static ERL_NIF_TERM send_from_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
ERL_NIF_TERM result;
ErlNifPid pid;
ErlNifEnv* menv;
int res;
- enif_get_local_pid(env, argv[0], &pid);
+ if (!enif_get_local_pid(env, argv[0], &pid))
+ return enif_make_badarg(env);
result = enif_make_tuple2(env, enif_make_atom(env, "ok"), enif_make_pid(env, &pid));
menv = enif_alloc_env();
res = enif_send(env, &pid, menv, result);
enif_free_env(menv);
if (!res)
- /* Note the next line will crash, since dirty nifs can't return exceptions.
- * This is intentional, since enif_send should not fail if the test succeeds.
- */
- return enif_schedule_dirty_nif_finalizer(env, enif_make_badarg(env), enif_dirty_nif_finalizer);
+ return enif_make_badarg(env);
else
- return enif_schedule_dirty_nif_finalizer(env, result, enif_dirty_nif_finalizer);
+ return result;
}
-static ERL_NIF_TERM send_from_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+static ERL_NIF_TERM call_dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
- ERL_NIF_TERM result;
- ErlNifPid pid;
-
- if (!enif_get_local_pid(env, argv[0], &pid))
+ switch (argc) {
+ case 0: {
+ ERL_NIF_TERM args[255];
+ int i;
+ for (i = 0; i < 255; i++)
+ args[i] = enif_make_int(env, i);
+ return enif_schedule_nif(env, "call_dirty_nif_exception", ERL_NIF_DIRTY_JOB_CPU_BOUND,
+ call_dirty_nif_exception, 255, argv);
+ }
+ case 1:
return enif_make_badarg(env);
- return enif_schedule_dirty_nif(env, ERL_NIF_DIRTY_JOB_CPU_BOUND, dirty_sender, argc, argv);
+ default:
+ return enif_schedule_nif(env, "call_dirty_nif_exception", ERL_NIF_DIRTY_JOB_CPU_BOUND,
+ call_dirty_nif_exception, argc-1, argv);
+ }
}
#endif
@@ -1741,10 +1800,13 @@ static ErlNifFunc nif_funcs[] =
{"echo_int", 1, echo_int},
{"type_sizes", 0, type_sizes},
{"otp_9668_nif", 1, otp_9668_nif},
+ {"otp_9828_nif", 1, otp_9828_nif},
{"consume_timeslice_nif", 2, consume_timeslice_nif},
+ {"call_nif_schedule", 2, call_nif_schedule},
#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT
{"call_dirty_nif", 3, call_dirty_nif},
- {"send_from_dirty_nif", 1, send_from_dirty_nif},
+ {"send_from_dirty_nif", 1, send_from_dirty_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND},
+ {"call_dirty_nif_exception", 0, call_dirty_nif_exception, ERL_NIF_DIRTY_JOB_IO_BOUND},
#endif
{"is_map_nif", 1, is_map_nif},
{"get_map_size_nif", 1, get_map_size_nif},
diff --git a/erts/emulator/test/num_bif_SUITE.erl b/erts/emulator/test/num_bif_SUITE.erl
index ff8d18eef8..8cf8377c30 100644
--- a/erts/emulator/test/num_bif_SUITE.erl
+++ b/erts/emulator/test/num_bif_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2014. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -394,18 +394,15 @@ t_string_to_integer(Config) when is_list(Config) ->
test_sti(268435455),
test_sti(-268435455),
- %% 1 bsl 28 - 1, just before 32 bit bignum
- test_sti(1 bsl 28 - 1),
- %% 1 bsl 28, just beyond 32 bit small
- test_sti(1 bsl 28),
- %% 1 bsl 33, just beyond 32 bit
- test_sti(1 bsl 33),
- %% 1 bsl 60 - 1, just before 64 bit bignum
- test_sti(1 bsl 60 - 1),
- %% 1 bsl 60, just beyond 64 bit small
- test_sti(1 bsl 60),
- %% 1 bsl 65, just beyond 64 bit
- test_sti(1 bsl 65),
+ % Interesting values around 2-pows, such as MIN_SMALL and MAX_SMALL.
+ lists:foreach(fun(Bits) ->
+ N = 1 bsl Bits,
+ test_sti(N - 1),
+ test_sti(N),
+ test_sti(N + 1)
+ end,
+ lists:seq(16, 130)),
+
%% Bignums.
test_sti(123456932798748738738,16),
test_sti(list_to_integer(lists:duplicate(2000, $1))),
@@ -454,10 +451,11 @@ test_sti(Num) ->
end|| Base <- lists:seq(2,36)].
test_sti(Num,Base) ->
- Num = list_to_integer(int2list(Num,Base),Base),
- Num = -1*list_to_integer(int2list(Num*-1,Base),Base),
- Num = binary_to_integer(int2bin(Num,Base),Base),
- Num = -1*binary_to_integer(int2bin(Num*-1,Base),Base).
+ Neg = -Num,
+ Num = list_to_integer(int2list(Num,Base),Base),
+ Neg = list_to_integer(int2list(Num*-1,Base),Base),
+ Num = binary_to_integer(int2bin(Num,Base),Base),
+ Neg = binary_to_integer(int2bin(Num*-1,Base),Base).
% Calling this function (which is not supposed to be inlined) prevents
% the compiler from calculating the answer, so we don't test the compiler
diff --git a/erts/emulator/test/tuple_SUITE.erl b/erts/emulator/test/tuple_SUITE.erl
index 46ece41096..f627eea07f 100644
--- a/erts/emulator/test/tuple_SUITE.erl
+++ b/erts/emulator/test/tuple_SUITE.erl
@@ -21,8 +21,9 @@
init_per_group/2,end_per_group/2,
t_size/1, t_tuple_size/1, t_element/1, t_setelement/1,
t_insert_element/1, t_delete_element/1,
- t_list_to_tuple/1, t_tuple_to_list/1,
- t_make_tuple_2/1, t_make_tuple_3/1, t_append_element/1,
+ t_list_to_tuple/1, t_list_to_upper_boundry_tuple/1, t_tuple_to_list/1,
+ t_make_tuple_2/1, t_make_upper_boundry_tuple_2/1, t_make_tuple_3/1,
+ t_append_element/1, t_append_element_upper_boundry/1,
build_and_match/1, tuple_with_case/1, tuple_in_guard/1]).
-include_lib("test_server/include/test_server.hrl").
@@ -40,8 +41,10 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
[build_and_match, t_size, t_tuple_size, t_list_to_tuple,
+ t_list_to_upper_boundry_tuple,
t_tuple_to_list, t_element, t_setelement,
- t_make_tuple_2, t_make_tuple_3, t_append_element,
+ t_make_tuple_2, t_make_upper_boundry_tuple_2, t_make_tuple_3,
+ t_append_element, t_append_element_upper_boundry,
t_insert_element, t_delete_element,
tuple_with_case, tuple_in_guard].
@@ -49,11 +52,21 @@ groups() ->
[].
init_per_suite(Config) ->
+ A0 = case application:start(sasl) of
+ ok -> [sasl];
+ _ -> []
+ end,
+ A = case application:start(os_mon) of
+ ok -> [os_mon|A0];
+ _ -> A0
+ end,
+ [{started_apps, A}|Config].
+
+end_per_suite(Config) ->
+ As = ?config(started_apps, Config),
+ lists:foreach(fun (A) -> application:stop(A) end, As),
Config.
-end_per_suite(_Config) ->
- ok.
-
init_per_group(_GroupName, Config) ->
Config.
@@ -176,14 +189,19 @@ t_list_to_tuple(Config) when is_list(Config) ->
{'EXIT', {badarg, _}} = (catch list_to_tuple(id([a|b]))),
{'EXIT', {badarg, _}} = (catch list_to_tuple(id([a|b]))),
- % test upper boundry, 16777215 elements
- MaxSize = 1 bsl 24 - 1,
- MaxTuple = list_to_tuple(lists:seq(1, MaxSize)),
- MaxSize = size(MaxTuple),
-
{'EXIT', {badarg,_}} = (catch list_to_tuple(lists:seq(1, 1 bsl 24))),
ok.
+t_list_to_upper_boundry_tuple(Config) when is_list(Config) ->
+ sys_mem_cond_run(2048,
+ fun () ->
+ %% test upper boundry, 16777215 elements
+ MaxSize = 1 bsl 24 - 1,
+ MaxTuple = list_to_tuple(lists:seq(1, MaxSize)),
+ MaxSize = size(MaxTuple),
+ ok
+ end).
+
%% Tests tuple_to_list/1.
t_tuple_to_list(Config) when is_list(Config) ->
@@ -214,8 +232,6 @@ t_make_tuple_2(Config) when is_list(Config) ->
t_make_tuple1({a}),
t_make_tuple1(erlang:make_tuple(400, [])),
- % test upper boundry, 16777215 elements
- t_make_tuple(1 bsl 24 - 1, a),
{'EXIT', {badarg,_}} = (catch erlang:make_tuple(1 bsl 24, a)),
{'EXIT', {badarg,_}} = (catch erlang:make_tuple(-1, a)),
@@ -225,6 +241,13 @@ t_make_tuple_2(Config) when is_list(Config) ->
{'EXIT', {badarg,_}} = (catch erlang:make_tuple(1 bsl 65 + 3, a)),
ok.
+t_make_upper_boundry_tuple_2(Config) when is_list(Config) ->
+ sys_mem_cond_run(2048,
+ fun () ->
+ %% test upper boundry, 16777215 elements
+ t_make_tuple(1 bsl 24 - 1, a)
+ end).
+
t_make_tuple1(Element) ->
lists:foreach(fun(Size) -> t_make_tuple(Size, Element) end,
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255, 256, 511, 512, 999,
@@ -309,13 +332,17 @@ t_delete_element(Config) when is_list(Config) ->
%% Tests the append_element/2 BIF.
t_append_element(Config) when is_list(Config) ->
- ok = t_append_element({}, 2048, 2048),
-
- % test upper boundry, 16777215 elements
- MaxSize = 1 bsl 24 - 1,
- MaxTuple = list_to_tuple(lists:seq(1, MaxSize)),
- {'EXIT',{badarg,_}} = (catch erlang:append_element(MaxTuple, a)),
- ok.
+ ok = t_append_element({}, 2048, 2048).
+
+t_append_element_upper_boundry(Config) when is_list(Config) ->
+ sys_mem_cond_run(2048,
+ fun () ->
+ %% test upper boundry, 16777215 elements
+ MaxSize = 1 bsl 24 - 1,
+ MaxTuple = list_to_tuple(lists:seq(1, MaxSize)),
+ {'EXIT',{badarg,_}} = (catch erlang:append_element(MaxTuple, a)),
+ ok
+ end).
t_append_element(_Tuple, 0, _High) -> ok;
t_append_element(Tuple, N, High) ->
@@ -371,3 +398,31 @@ tuple_in_guard(Config) when is_list(Config) ->
%% Use this function to avoid compile-time evaluation of an expression.
id(I) -> I.
+
+sys_mem_cond_run(ReqSizeMB, TestFun) when is_integer(ReqSizeMB) ->
+ case total_memory() of
+ TotMem when is_integer(TotMem), TotMem >= ReqSizeMB ->
+ TestFun();
+ TotMem when is_integer(TotMem) ->
+ {skipped, "Not enough memory ("++integer_to_list(TotMem)++" MB)"};
+ undefined ->
+ {skipped, "Could not retrieve memory information"}
+ end.
+
+
+total_memory() ->
+ %% Totat memory in MB.
+ try
+ MemoryData = memsup:get_system_memory_data(),
+ case lists:keysearch(total_memory, 1, MemoryData) of
+ {value, {total_memory, TM}} ->
+ TM div (1024*1024);
+ false ->
+ {value, {system_total_memory, STM}} =
+ lists:keysearch(system_total_memory, 1, MemoryData),
+ STM div (1024*1024)
+ end
+ catch
+ _ : _ ->
+ undefined
+ end.
diff --git a/erts/epmd/src/epmd.c b/erts/epmd/src/epmd.c
index 3cfa7a782f..0823dcaa4e 100644
--- a/erts/epmd/src/epmd.c
+++ b/erts/epmd/src/epmd.c
@@ -345,7 +345,7 @@ static void run_daemon(EpmdVars *g)
inform it of that the log is closed. */
closelog();
- /* These chouldn't be needed but for safety... */
+ /* These shouldn't be needed but for safety... */
open("/dev/null", O_RDONLY); /* Order is important! */
open("/dev/null", O_WRONLY);
@@ -386,7 +386,7 @@ static void run_daemon(EpmdVars *g)
close(1);
close(2);
- /* These chouldn't be needed but for safety... */
+ /* These shouldn't be needed but for safety... */
open("nul", O_RDONLY);
open("nul", O_WRONLY);
@@ -498,7 +498,11 @@ static void dbg_gen_printf(int onsyslog,int perr,int from_level,
#ifdef HAVE_SYSLOG_H
if (onsyslog)
{
- erts_vsnprintf(buf, DEBUG_BUFFER_SIZE, format, args);
+ int len;
+ len = erts_vsnprintf(buf, DEBUG_BUFFER_SIZE, format, args);
+ if (perr != 0 && len < sizeof(buf)) {
+ erts_snprintf(buf+len, sizeof(buf)-len, ": %s", strerror(perr));
+ }
syslog(LOG_ERR,"epmd: %s",buf);
}
#endif
diff --git a/erts/etc/common/run_erl_common.c b/erts/etc/common/run_erl_common.c
index dc55c2bea4..580b6cc3c5 100644
--- a/erts/etc/common/run_erl_common.c
+++ b/erts/etc/common/run_erl_common.c
@@ -74,15 +74,6 @@
* run_erl multiple times with different global variables without them
* effecting eachother.
*/
-typedef struct run_erl_ run_erl;
-
-#ifdef __OSE__
-static OSPPDKEY run_erl_pp_key;
-#define RE_DATA (*(run_erl**)ose_get_ppdata(run_erl_pp_key))
-#else
-static run_erl re;
-#define RE_DATA (&re)
-#endif
#define STATUSFILE (RE_DATA->statusfile)
#define LOG_DIR (RE_DATA->log_dir)
@@ -116,6 +107,16 @@ struct run_erl_ {
unsigned protocol_ver;
};
+typedef struct run_erl_ run_erl;
+
+#ifdef __OSE__
+static OSPPDKEY run_erl_pp_key;
+#define RE_DATA (*(run_erl**)ose_get_ppdata(run_erl_pp_key))
+#else
+static run_erl re;
+#define RE_DATA (&re)
+#endif
+
/* prototypes */
static int next_log(int log_num);
diff --git a/erts/etc/unix/run_erl.c b/erts/etc/unix/run_erl.c
index a6fc4c2bf5..4b123b8911 100644
--- a/erts/etc/unix/run_erl.c
+++ b/erts/etc/unix/run_erl.c
@@ -40,9 +40,13 @@
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
+
#ifdef HAVE_WORKING_POSIX_OPENPT
+#ifndef _XOPEN_SOURCE
#define _XOPEN_SOURCE 600
#endif
+#endif
+
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/stat.h>
diff --git a/erts/preloaded/ebin/erl_prim_loader.beam b/erts/preloaded/ebin/erl_prim_loader.beam
index 74f76f045d..c8ec111e57 100644
--- a/erts/preloaded/ebin/erl_prim_loader.beam
+++ b/erts/preloaded/ebin/erl_prim_loader.beam
Binary files differ
diff --git a/erts/preloaded/ebin/erlang.beam b/erts/preloaded/ebin/erlang.beam
index fdc7401475..32ff6a3874 100644
--- a/erts/preloaded/ebin/erlang.beam
+++ b/erts/preloaded/ebin/erlang.beam
Binary files differ
diff --git a/erts/preloaded/ebin/prim_inet.beam b/erts/preloaded/ebin/prim_inet.beam
index fe5431c5ff..f58ee4b4d5 100644
--- a/erts/preloaded/ebin/prim_inet.beam
+++ b/erts/preloaded/ebin/prim_inet.beam
Binary files differ
diff --git a/erts/preloaded/src/erl_prim_loader.erl b/erts/preloaded/src/erl_prim_loader.erl
index 466e0b0020..6b86a427ba 100644
--- a/erts/preloaded/src/erl_prim_loader.erl
+++ b/erts/preloaded/src/erl_prim_loader.erl
@@ -1507,7 +1507,14 @@ real_path(Name,[Path|Paths],Acc,Links) ->
[""|_] = LinkPaths ->
real_path(Name,LinkPaths++Paths,[],[ThisFile|Links]);
LinkPaths ->
- real_path(Name,LinkPaths++Paths,Acc,[ThisFile|Links])
+ % windows currently does not allow creation of relative symlinks
+ % across different drives
+ case erlang:system_info(os_type) of
+ {win32, _} ->
+ real_path(Name,LinkPaths++Paths,[],[ThisFile|Links]);
+ _ ->
+ real_path(Name,LinkPaths++Paths,Acc,[ThisFile|Links])
+ end
end;
_ ->
real_path(Name,Paths,This,Links)
diff --git a/erts/preloaded/src/erlang.erl b/erts/preloaded/src/erlang.erl
index d646bc19a5..b96a601792 100644
--- a/erts/preloaded/src/erlang.erl
+++ b/erts/preloaded/src/erlang.erl
@@ -91,9 +91,9 @@
-export([external_size/2, finish_after_on_load/2, finish_loading/1, float/1]).
-export([float_to_binary/1, float_to_binary/2,
float_to_list/1, float_to_list/2]).
--export([fun_info/2, fun_to_list/1, function_exported/3]).
+-export([fun_info/2, fun_info_mfa/1, fun_to_list/1, function_exported/3]).
-export([garbage_collect/0, garbage_collect/1, garbage_collect/2]).
--export([garbage_collect_message_area/0, get/0, get/1, get_keys/1]).
+-export([garbage_collect_message_area/0, get/0, get/1, get_keys/0, get_keys/1]).
-export([get_module_info/1, get_stacktrace/0, group_leader/0]).
-export([group_leader/2, halt/0, halt/1, halt/2, hash/2, hibernate/3]).
-export([insert_element/3]).
@@ -827,6 +827,15 @@ float_to_list(_Float, _Options) ->
fun_info(_Fun, _Item) ->
erlang:nif_error(undefined).
+%% fun_info_mfa/1
+-spec erlang:fun_info_mfa(Fun) -> {Mod, Name, Arity} when
+ Fun :: function(),
+ Mod :: atom(),
+ Name :: atom(),
+ Arity :: non_neg_integer().
+fun_info_mfa(_Fun) ->
+ erlang:nif_error(undefined).
+
%% fun_to_list/1
-spec erlang:fun_to_list(Fun) -> string() when
Fun :: function().
@@ -922,6 +931,12 @@ get() ->
get(_Key) ->
erlang:nif_error(undefined).
+%% get_keys/0
+-spec get_keys() -> [Key] when
+ Key :: term().
+get_keys() ->
+ erlang:nif_error(undefined).
+
%% get_keys/1
-spec get_keys(Val) -> [Key] when
Val :: term(),
diff --git a/erts/preloaded/src/prim_inet.erl b/erts/preloaded/src/prim_inet.erl
index 143c718130..79ff013c77 100644
--- a/erts/preloaded/src/prim_inet.erl
+++ b/erts/preloaded/src/prim_inet.erl
@@ -25,7 +25,7 @@
%% Primitive inet_drv interface
--export([open/3, open/4, fdopen/4, close/1]).
+-export([open/3, open/4, fdopen/4, fdopen/5, close/1]).
-export([bind/3, listen/1, listen/2, peeloff/2]).
-export([connect/3, connect/4, async_connect/4]).
-export([accept/1, accept/2, async_accept/2]).
@@ -70,7 +70,12 @@ open(Protocol, Family, Type, Opts) ->
open(Protocol, Family, Type, Opts, ?INET_REQ_OPEN, []).
fdopen(Protocol, Family, Type, Fd) when is_integer(Fd) ->
- open(Protocol, Family, Type, [], ?INET_REQ_FDOPEN, ?int32(Fd)).
+ fdopen(Protocol, Family, Type, Fd, true).
+
+fdopen(Protocol, Family, Type, Fd, Bound)
+ when is_integer(Fd), Bound == true orelse Bound == false ->
+ open(Protocol, Family, Type, [], ?INET_REQ_FDOPEN,
+ [?int32(Fd), enc_value_2(bool, Bound)]).
open(Protocol, Family, Type, Opts, Req, Data) ->
Drv = protocol2drv(Protocol),