diff options
Diffstat (limited to 'erts')
100 files changed, 4296 insertions, 1224 deletions
diff --git a/erts/autoconf/vxworks/sed.general b/erts/autoconf/vxworks/sed.general index dbb9420b67..efa4e99054 100644 --- a/erts/autoconf/vxworks/sed.general +++ b/erts/autoconf/vxworks/sed.general @@ -57,6 +57,7 @@ s|@ETHR_LIB_NAME@|| s|@ETHR_DEFS@|| s|@ETHR_THR_LIB_BASE@|| s|@ETHR_THR_LIB_BASE_DIR@|| +s|@SYSTEMD_DAEMON_LIBS@|| s|@EMU_THR_DEFS@|| s|@EMU_THR_LIBS@|| s|@EMU_THR_LIB_NAME@|ethread| diff --git a/erts/configure.in b/erts/configure.in index 04303da4f8..c8b96c50f0 100644 --- a/erts/configure.in +++ b/erts/configure.in @@ -446,13 +446,13 @@ else fi AC_ARG_ENABLE(static-nifs, -AS_HELP_STRING([--enable-static-nifs], [link nifs statically. If yes then all nifs in all Erlang/OTP applications will be statically linked into the main binary. It is also possible to give a list of nifs that should be linked statically. The list should be a comma seperated and contain the absolute path to a .a archive for each nif that is to be statically linked. The name of the .a archive has to be the same as the name of the nif. Note that you have to link any external dependencies that the nifs have to the main binary, so for the crypto nif you want to pass LIBS=-lcrypto to configure.]), +AS_HELP_STRING([--enable-static-nifs], [link nifs statically. If yes then all nifs in all Erlang/OTP applications will be statically linked into the main binary. It is also possible to give a list of nifs that should be linked statically. The list should be a comma separated and contain the absolute path to a .a archive for each nif that is to be statically linked. The name of the .a archive has to be the same as the name of the nif. Note that you have to link any external dependencies that the nifs have to the main binary, so for the crypto nif you want to pass LIBS=-lcrypto to configure.]), STATIC_NIFS="$enableval", STATIC_NIFS=no) AC_SUBST(STATIC_NIFS) AC_ARG_ENABLE(static-drivers, -AS_HELP_STRING([--enable-static-drivers], [comma seperated list of linked-in drivers to link statically with the main binary. The list should contain the absolute path to a .a archive for each driver that is to be statically linked. The name of the .a archive has to be the same as the name of the driver.]), +AS_HELP_STRING([--enable-static-drivers], [comma separated list of linked-in drivers to link statically with the main binary. The list should contain the absolute path to a .a archive for each driver that is to be statically linked. The name of the .a archive has to be the same as the name of the driver.]), STATIC_DRIVERS="$enableval", STATIC_DRIVERS=no) AC_SUBST(STATIC_DRIVERS) @@ -673,6 +673,7 @@ case $chk_arch_ in x86_64) ARCH=amd64;; amd64) ARCH=amd64;; macppc) ARCH=ppc;; + powerpc) ARCH=ppc;; ppc) ARCH=ppc;; ppc64) ARCH=ppc64;; "Power Macintosh") ARCH=ppc;; @@ -680,8 +681,10 @@ case $chk_arch_ in armv5teb) ARCH=arm;; armv5tel) ARCH=arm;; armv5tejl) ARCH=arm;; - armv6l) ARCH=arm;; + armv6l) ARCH=arm;; + armv6hl) ARCH=arm;; armv7l) ARCH=arm;; + armv7hl) ARCH=arm;; tile) ARCH=tile;; *) ARCH=noarch;; esac @@ -4818,6 +4821,26 @@ if test "x$GCC" = xyes; then fi dnl ---------------------------------------------------------------------- +dnl Enable -fsanitize= flags. +dnl ---------------------------------------------------------------------- + +m4_define(DEFAULT_SANITIZERS, [address,undefined]) +AC_ARG_ENABLE( + sanitizers, + AS_HELP_STRING( + [--enable-sanitizers@<:@=comma-separated list of sanitizers@:>@], + [Default=DEFAULT_SANITIZERS]), +[ +case "$enableval" in + no) sanitizers= ;; + yes) sanitizers="-fsanitize=DEFAULT_SANITIZERS" ;; + *) sanitizers="-fsanitize=$enableval" ;; +esac +CFLAGS="$CFLAGS $sanitizers" +LDFLAGS="$LDFLAGS $sanitizers" +]) + +dnl ---------------------------------------------------------------------- dnl Output the result. dnl ---------------------------------------------------------------------- diff --git a/erts/doc/src/crash_dump.xml b/erts/doc/src/crash_dump.xml index c59741f250..d3de29b876 100644 --- a/erts/doc/src/crash_dump.xml +++ b/erts/doc/src/crash_dump.xml @@ -85,20 +85,22 @@ operating system.</p> <list type="bulleted"> <item>"<em><A></em>: Cannot allocate <em><N></em> - bytes of memory (of type "<em><T></em>")." - The system - has run out of memory. <A> is the allocator that failed - to allocate memory, <N> is the number of bytes that - <A> tried to allocate, and <T> is the memory block - type that the memory was needed for. The most common case is - that a process stores huge amounts of data. In this case - <T> is most often <c><![CDATA[heap]]></c>, <c><![CDATA[old_heap]]></c>, - <c><![CDATA[heap_frag]]></c>, or <c><![CDATA[binary]]></c>. For more information on - allocators see - <seealso marker="erts_alloc">erts_alloc(3)</seealso>.</item> + bytes of memory (of type "<em><T></em>", thread + <em><I></em>em>)." - The system has run out of memory. <A> + is the allocator that failed to allocate memory, <N> is the + number of bytes that <A> tried to allocate, <T> is the + memory block type that the memory was needed for, and <I> is the + thread identifier. The most common case is that a process stores huge + amounts of data. In this case <T> is most often + <c><![CDATA[heap]]></c>, <c><![CDATA[old_heap]]></c>, + <c><![CDATA[heap_frag]]></c>, or <c><![CDATA[binary]]></c>. + For more information on allocators see + <seealso marker="erts_alloc">erts_alloc(3)</seealso>.</item> <item>"<em><A></em>: Cannot reallocate <em><N></em> - bytes of memory (of type "<em><T></em>")." - Same as - above with the exception that memory was being reallocated - instead of being allocated when the system ran out of memory.</item> + bytes of memory (of type "<em><T></em>", thread + <em><I></em>em>)." - Same as above with the exception that memory + was being reallocated instead of being allocated when the system ran + out of memory.</item> <item>"Unexpected op code <em>N</em>" - Error in compiled code, <c><![CDATA[beam]]></c> file damaged or error in the compiler.</item> <item>"Module <em>Name</em> undefined" <c><![CDATA[|]]></c> "Function @@ -246,6 +248,9 @@ <tag><em>Last scheduled in for | Current call</em></tag> <item>The current function of the process. These fields will not always exist.</item> + <tag><em>Run queue</em></tag> + <item>The identifier of the scheduler run queue in which the process is + running.</item> <tag><em>Spawned by</em></tag> <item>The parent of the process, i.e. the process which executed <c><![CDATA[spawn]]></c> or <c><![CDATA[spawn_link]]></c>.</item> diff --git a/erts/doc/src/epmd.xml b/erts/doc/src/epmd.xml index 963d35c3c8..25f819ab50 100644 --- a/erts/doc/src/epmd.xml +++ b/erts/doc/src/epmd.xml @@ -58,12 +58,12 @@ of the IP address and a port number. The name of the node is an atom on the form of <c><![CDATA[Name@Node]]></c>. The job of the <c><![CDATA[epmd]]></c> daemon is to keep track of which - node name listens on which address. Hence, <c><![CDATA[epmd]]></c> map + node name listens on which address. Hence, <c><![CDATA[epmd]]></c> maps symbolic node names to machine addresses.</p> <p>The TCP/IP <c>epmd</c> daemon actually only keeps track of - the <c>Name</c> (first) part of an Erlang node name, the <c>Host</c> - part (whatever is after the <c><![CDATA[@]]></c> is implicit in the + the <c>Name</c> (first) part of an Erlang node name. The <c>Host</c> + part (whatever is after the <c><![CDATA[@]]></c>) is implicit in the node name where the <c>epmd</c> daemon was actually contacted, as is the IP address where the Erlang node can be reached. Consistent and correct TCP naming services are @@ -77,12 +77,12 @@ <p>The daemon is started automatically by the <c>erl</c> command if the node is to be distributed and there is no running instance present. If automatically launched, - environment variables has to be used to alter the behavior of + environment variables have to be used to alter the behavior of the daemon. See the <seealso marker="#environment_variables">Environment variables</seealso> section below.</p> - <p>If the -daemon argument is not given, the + <p>If the -daemon argument is not given, <c><![CDATA[epmd]]></c> runs as a normal program with the controlling terminal of the shell in which it is started. Normally, it should run as a daemon.</p> @@ -122,7 +122,7 @@ comma-separated list of IP addresses and on the loopback address (which is implicitly added to the list if it has not been specified). This can also be set using the - <c><![CDATA[ERL_EPMD_ADDRESS]]></c> environment variable, see the + <c><![CDATA[ERL_EPMD_ADDRESS]]></c> environment variable. See the section <seealso marker="#environment_variables">Environment variables</seealso> below.</p> </item> @@ -130,7 +130,7 @@ <item> <p>Let this instance of epmd listen to another TCP port than default 4369. This can also be set using the - <c><![CDATA[ERL_EPMD_PORT]]></c> environment variable, see the + <c><![CDATA[ERL_EPMD_PORT]]></c> environment variable. See the section <seealso marker="#environment_variables">Environment variables</seealso> below</p> </item> @@ -153,7 +153,7 @@ <p>With relaxed command checking, the <c>epmd</c> daemon can be killed from the localhost with i.e. <c>epmd -kill</c> even if there are active nodes registered. Normally only daemons with an empty node database can be killed with the <c>epmd -kill</c> command.</p> </item> <item> - <p>The <c>epmd -stop</c> command (and the corresponding messages to epmd, as can be given using <c>erl_interface/ei</c>) is normally always ignored, as it opens up for strange situation when two nodes of the same name can be alive at the same time. A node unregisters itself by just closing the connection to epmd, why the <c>stop</c> command was only intended for use in debugging situations.</p> + <p>The <c>epmd -stop</c> command (and the corresponding messages to epmd, as can be given using <c>erl_interface/ei</c>) is normally always ignored, as it opens up the possibility of a strange situation where two nodes of the same name can be alive at the same time. A node unregisters itself by just closing the connection to epmd, which is why the <c>stop</c> command was only intended for use in debugging situations.</p> <p>With relaxed command checking enabled, you can forcibly unregister live nodes.</p> </item> </list> @@ -166,7 +166,7 @@ <section> <marker id="debug_flags"></marker> <title>DbgExtra options</title> - <p>These options are purely for debugging and testing epmd clients, they should not be used in normal operation.</p> + <p>These options are purely for debugging and testing epmd clients. They should not be used in normal operation.</p> <taglist> <tag><c><![CDATA[-packet_timeout Seconds]]></c></tag> @@ -177,9 +177,9 @@ </item> <tag><c><![CDATA[-delay_accept Seconds]]></c></tag> <item> - <p>To simulate a busy server you can insert a delay between epmd - gets notified about that a new connection is requested and - when the connections gets accepted.</p> + <p>To simulate a busy server you can insert a delay between when epmd + gets notified that a new connection is requested and + when the connection gets accepted.</p> </item> <tag><c><![CDATA[-delay_write Seconds]]></c></tag> <item> @@ -191,15 +191,15 @@ <section> <marker id="interactive_flags"></marker> <title>Interactive options</title> - <p>These options make <c>epmd</c> run as an interactive command displaying the results of sending queries ta an already running instance of <c>epmd</c>. The epmd contacted is always on the local node, but the <c>-port</c> option can be used to select between instances if several are running using different port on the host.</p> + <p>These options make <c>epmd</c> run as an interactive command, displaying the results of sending queries to an already running instance of <c>epmd</c>. The epmd contacted is always on the local node, but the <c>-port</c> option can be used to select between instances if several are running using different ports on the host.</p> <taglist> <tag><c><![CDATA[-port No]]></c></tag> <item> <p>Contacts the <c>epmd</c> listening on the given TCP port number (default 4369). This can also be set using the - <c><![CDATA[ERL_EPMD_PORT]]></c> environment variable, see the + <c><![CDATA[ERL_EPMD_PORT]]></c> environment variable. See the section <seealso marker="#environment_variables">Environment - variables</seealso> below</p> + variables</seealso> below.</p> </item> <tag><c><![CDATA[-names]]></c></tag> <item> @@ -210,7 +210,7 @@ <p>Kill the currently running <c>epmd</c>.</p> <p>Killing the running <c>epmd</c> is only allowed if <c>epmd - -names</c> show an empty database or + -names</c> shows an empty database or <c>-relaxed_command_check</c> was given when the running instance of <c>epmd</c> was started. Note that <c>-relaxed_command_check</c> is given when starting the @@ -228,7 +228,7 @@ <p>This command can only be used when contacting <c>epmd</c> instances started with the <c>-relaxed_command_check</c> flag. Note that relaxed command checking has to be enabled for - the <c>epmd</c> daemon contacted, When running epmd + the <c>epmd</c> daemon contacted. When running epmd interactively, <c>-relaxed_command_check</c> has no effect.</p> </item> @@ -259,7 +259,7 @@ <item> <p>If set prior to start, the <c>epmd</c> daemon will behave as if the <c>-relaxed_command_check</c> option was given at - start-up. If consequently setting this option before starting + start-up. Consequently, if this option is set before starting the Erlang virtual machine, the automatically started <c>epmd</c> will accept the <c>-kill</c> and <c>-stop</c> commands without restrictions.</p> @@ -287,8 +287,8 @@ remote hosts. However, only the query commands are answered (and acted upon) if the query comes from a remote host. It is always an error to try to register a nodename if the client is not a process - located on the same host as the <c>epmd</c> instance is running on, - why such requests are considered hostile and the connection is + located on the same host as the <c>epmd</c> instance is running on- + such requests are considered hostile and the connection is immediately closed.</p> <p>The queries accepted from remote nodes are:</p> @@ -307,3 +307,4 @@ </comref> + diff --git a/erts/doc/src/erl.xml b/erts/doc/src/erl.xml index 9724a1345a..f856b9ab86 100644 --- a/erts/doc/src/erl.xml +++ b/erts/doc/src/erl.xml @@ -495,7 +495,7 @@ <c><![CDATA[werl]]></c>, not <c><![CDATA[erl]]></c> (<c><![CDATA[oldshell]]></c>). Note also that <c><![CDATA[Ctrl-Break]]></c> is used instead of <c><![CDATA[Ctrl-C]]></c> on Windows.</p> </item> - <tag><c><![CDATA[+c]]></c></tag> + <tag><marker id="+c"><c><![CDATA[+c]]></c></marker></tag> <item> <p>Disable compensation for sudden changes of system time.</p> <p>Normally, <c><![CDATA[erlang:now/0]]></c> will not immediately reflect @@ -510,6 +510,9 @@ reflect the current system time. Note that timers are based on <c><![CDATA[erlang:now/0]]></c>. If the system time jumps, timers then time out at the wrong time.</p> + <p><em>NOTE</em>: You can check whether the adjustment is enabled or + disabled by calling + <seealso marker="erlang#system_info_tolerant_timeofday">erlang:system_info(tolerant_timeofday)</seealso>.</p> </item> <tag><c><![CDATA[+d]]></c></tag> <item> @@ -848,6 +851,19 @@ </p> </item> <tag><marker id="+SDio"><c><![CDATA[+SDio IOSchedulers]]></c></marker></tag> + <item> + <p>Sets the number of dirty I/O scheduler threads to create when threading + support has been enabled. The valid range is 0-1024. By default, the number + of dirty I/O scheduler threads created is 10, same as the default number of + threads in the <seealso marker="#async_thread_pool_size">async thread pool + </seealso>. + </p> + <p>This option is ignored if the emulator doesn't have threading support + enabled. Currently, <em>this option is experimental</em> and is supported only + if the emulator was configured and built with support for dirty schedulers + enabled (it's disabled by default). + </p> + </item> <tag><c><![CDATA[+sFlag Value]]></c></tag> <item> <p>Scheduling specific flags.</p> @@ -1170,7 +1186,7 @@ utilization. </p> </item> - <tag><marker id="+swct"><c>+sws very_eager|eager|medium|lazy|very_lazy</c></marker></tag> + <tag><marker id="+swct"><c>+swct very_eager|eager|medium|lazy|very_lazy</c></marker></tag> <item> <p> Set scheduler wake cleanup threshold. Default is <c>medium</c>. diff --git a/erts/doc/src/erl_driver.xml b/erts/doc/src/erl_driver.xml index ad37813ac0..4a1aab75c7 100644 --- a/erts/doc/src/erl_driver.xml +++ b/erts/doc/src/erl_driver.xml @@ -546,6 +546,7 @@ typedef struct ErlDrvSysInfo { int scheduler_threads; int nif_major_version; int nif_minor_version; + int dirty_scheduler_support; } ErlDrvSysInfo; </code> @@ -610,6 +611,10 @@ typedef struct ErlDrvSysInfo { <tag><c>nif_minor_version</c></tag> <item>The value of <c>ERL_NIF_MINOR_VERSION</c> when the runtime system was compiled. </item> + <tag><c>dirty_scheduler_support</c></tag> + <item>A value <c>!= 0</c> if the runtime system has support for dirty scheduler threads; + otherwise <c>0</c>. + </item> </taglist> </item> <tag><marker id="ErlDrvBinary"/>ErlDrvBinary</tag> diff --git a/erts/doc/src/erl_nif.xml b/erts/doc/src/erl_nif.xml index 6b1f4cccf8..3de94be9ff 100644 --- a/erts/doc/src/erl_nif.xml +++ b/erts/doc/src/erl_nif.xml @@ -168,16 +168,18 @@ ok <p><marker id="lengthy_work"/> As mentioned in the <seealso marker="#WARNING">warning</seealso> text at the beginning of this document it is of vital importance that a native function - does return relatively fast. It is hard to give an exact maximum amount + return relatively quickly. It is hard to give an exact maximum amount of time that a native function is allowed to work, but as a rule of thumb - a well behaving native function should return to its caller before a + a well-behaving native function should return to its caller before a millisecond has passed. This can be achieved using different approaches. - If you have full control over the code that are to execute in the native + If you have full control over the code to execute in the native function, the best approach is to divide the work into multiple chunks of - work and call the native function multiple times. Function + work and call the native function multiple times, either directly from Erlang code + or by having a native function schedule a future NIF call via the + <seealso marker="#enif_schedule_nif"> enif_schedule_nif</seealso> function. Function <seealso marker="#enif_consume_timeslice">enif_consume_timeslice</seealso> can be - used this facilitate such work division. In some cases, however, this might not - be possible, e.g. when calling third party libraries. Then you typically want + used to help with such work division. In some cases, however, this might not + be possible, e.g. when calling third-party libraries. Then you typically want to dispatch the work to another thread, return from the native function, and wait for the result. The thread can send the result back to the calling thread using message passing. Information @@ -342,37 +344,38 @@ ok libraries might however fail if deprecated features are used. </p></item> - <tag>Dirty NIFs</tag> - <item><p><marker id="dirty_nifs"/><em>Note that the dirty NIF functionality - is experimental</em> and that you have to enable support for dirty - schedulers when building OTP in order to try the functionality out. Native functions + <tag>Long-running NIFs</tag> + <item><p><marker id="dirty_nifs"/>Native functions <seealso marker="#lengthy_work"> must normally run quickly</seealso>, as explained earlier in this document. They generally should execute for no more than a millisecond. But not all native functions can execute so quickly; for example, functions that encrypt large blocks of data or perform lengthy file system operations can often run for tens of seconds or more.</p> - <p>A NIF that cannot execute in a millisecond or less is called a "dirty NIF" since - it performs work that the Erlang runtime cannot handle cleanly. Applications - that make use of such functions must indicate to the runtime that the functions are + <p>If the functionality of a long-running NIF can be split so that its work can be + achieved through a series of shorter NIF calls, the application can either make that series + of NIF calls from the Erlang level, or it can call a NIF that first performs a chunk of the + work, then invokes the <seealso marker="#enif_schedule_nif">enif_schedule_nif</seealso> + function to schedule another NIF call to perform the next chunk. The final call scheduled + in this manner can then return the overall result. Breaking up a long-running function in + this manner enables the VM to regain control between calls to the NIFs, thereby avoiding + degraded responsiveness, scheduler load balancing problems, and other strange behaviours.</p> + <p>A NIF that cannot be split and cannot execute in a millisecond or less is called a "dirty NIF" + because it performs work that the Erlang runtime cannot handle cleanly. + <em>Note that the dirty NIF functionality described here is experimental</em> and that you have to + enable support for dirty schedulers when building OTP in order to try the functionality out. + Applications that make use of such functions must indicate to the runtime that the functions are dirty so they can be handled specially. To schedule a dirty NIF for execution, the - application calls <seealso marker="#enif_schedule_dirty_nif">enif_schedule_dirty_nif</seealso>, - passing to it a pointer to the dirty NIF to be executed and indicating with a flag + appropriate flags value can be set for the NIF in its <seealso marker="#ErlNifFunc">ErlNifFunc</seealso> + entry, or the application can call <seealso marker="#enif_schedule_nif">enif_schedule_nif</seealso>, + passing to it a pointer to the dirty NIF to be executed and indicating with the <c>flags</c> argument whether it expects the operation to be CPU-bound or I/O-bound.</p> - <p>All dirty NIFs must ultimately invoke the <seealso marker="#enif_schedule_dirty_nif_finalizer"> - enif_schedule_dirty_nif_finalizer</seealso> as their final action, passing to it the - result they wish to return to the original caller. A finalizer function can either - receive the result and return it directly, or it can return a different value instead. - For convenience, the NIF API provides the <seealso marker="#enif_dirty_nif_finalizer"> - enif_dirty_nif_finalizer</seealso> function that applications can use as a finalizer; - it simply returns its result argument.</p> <note><p>Dirty NIF support is available only when the emulator is configured with dirty schedulers enabled. This feature is currently disabled by default. To determine whether the dirty NIF API is available, native code can check to see if the C preprocessor macro <c>ERL_NIF_DIRTY_SCHEDULER_SUPPORT</c> is defined. Also, if the Erlang runtime was built without threading support, dirty schedulers are disabled. To check at runtime for the presence - of dirty scheduler threads, code can call the <seealso marker="#enif_have_dirty_schedulers"><c> - enif_have_dirty_schedulers()</c></seealso> API function, which returns true if dirty - scheduler threads are present, false otherwise.</p></note> + of dirty scheduler threads, code can use the <seealso marker="#enif_system_info"><c> + enif_system_info()</c></seealso> API function.</p></note> </item> </taglist> </section> @@ -498,6 +501,7 @@ typedef struct { const char* <em>name</em>; unsigned <em>arity</em>; ERL_NIF_TERM (*<em>fptr</em>)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]); + unsigned flags; } ErlNifFunc; </code> <p>Describes a NIF by its name, arity and implementation. @@ -508,7 +512,17 @@ typedef struct { will thus denote the Nth argument to the NIF. Note that the <c>argc</c> argument allows for the same C function to implement several Erlang functions with different arity (but - same name probably).</p> + same name probably). For a regular NIF, <c>flags</c> is 0 (and + so its value can be omitted for statically initialized <c>ErlNifFunc</c> + instances), or it can be used to indicate that the NIF is a <seealso + marker="#dirty_nifs">dirty NIF</seealso> that should be executed + on a dirty scheduler thread (<em>note that the dirty NIF functionality + described here is experimental</em> and that you have to enable + support for dirty schedulers when building OTP in order to try the + functionality out). If the dirty NIF is expected to be + CPU-bound, its <c>flags</c> field should be set to + <c>ERL_NIF_DIRTY_JOB_CPU_BOUND</c>, or for I/O-bound jobs, + <c>ERL_NIF_DIRTY_JOB_IO_BOUND</c>.</p> </item> <tag><marker id="ErlNifBinary"/>ErlNifBinary</tag> <item> @@ -672,18 +686,6 @@ typedef enum { See also the <seealso marker="#WARNING">warning</seealso> text at the beginning of this document.</p> </desc> </func> - <func><name><ret>ERL_NIF_TERM</ret><nametext>enif_dirty_nif_finalizer(ErlNifEnv* env, ERL_NIF_TERM result)</nametext></name> - <fsummary>Simple dirty NIF result finalizer</fsummary> - <desc> - <p>A convenience function that a dirty NIF can use as a finalizer that simply - return its <c>result</c> argument as its return value. This function is provided - for dirty NIFs with results that should be returned directly to the original caller.</p> - <note><p>This function is available only when the emulator is configured with dirty - schedulers enabled. This feature is currently disabled by default. To determine whether - the dirty NIF API is available, native code can check to see if the C preprocessor macro - <c>ERL_NIF_DIRTY_SCHEDULER_SUPPORT</c> is defined.</p></note> - </desc> - </func> <func><name><ret>int</ret><nametext>enif_equal_tids(ErlNifTid tid1, ErlNifTid tid2)</nametext></name> <fsummary></fsummary> <desc><p>Same as <seealso marker="erl_driver#erl_drv_equal_tids">erl_drv_equal_tids</seealso>. @@ -804,22 +806,6 @@ typedef enum { and return true, or return false if <c>term</c> is not an unsigned integer or is outside the bounds of type <c>unsigned long</c>.</p></desc> </func> - <func><name><ret>int</ret><nametext>enif_have_dirty_schedulers()</nametext></name> - <fsummary>Runtime check for the presence of dirty scheduler threads</fsummary> - <desc> - <p>Check at runtime for the presence of dirty scheduler threads. If the emulator is - built with threading support, dirty scheduler threads are available and - <c>enif_have_dirty_schedulers()</c> returns true. If the emulator was built without - threading support, <c>enif_have_dirty_schedulers()</c> returns false.</p> - <p>If dirty scheduler threads are not available in the emulator, calls to - <c>enif_schedule_dirty_nif</c> and <c>enif_schedule_dirty_nif_finalizer</c> result in - the NIF and finalizer functions being called directly within the calling thread.</p> - <note><p>This function is available only when the emulator is configured with dirty - schedulers enabled. This feature is currently disabled by default. To determine whether - the dirty NIF API is available, native code can check to see if the C preprocessor macro - <c>ERL_NIF_DIRTY_SCHEDULER_SUPPORT</c> is defined.</p></note> - </desc> - </func> <func><name><ret>int</ret><nametext>enif_inspect_binary(ErlNifEnv* env, ERL_NIF_TERM bin_term, ErlNifBinary* bin)</nametext></name> <fsummary>Inspect the content of a binary</fsummary> <desc><p>Initialize the structure pointed to by <c>bin</c> with @@ -873,8 +859,8 @@ typedef enum { <p>Check to see if the current NIF is executing on a dirty scheduler thread. If the emulator is built with threading support, calling <c>enif_is_on_dirty_scheduler</c> from within a dirty NIF returns true. It returns false when the calling NIF is a regular - NIF or a NIF finalizer, both of which run on normal scheduler threads, or when the emulator - is built without threading support.</p> + NIF running on a normal scheduler thread, or when the emulator is built without threading + support.</p> <note><p>This function is available only when the emulator is configured with dirty schedulers enabled. This feature is currently disabled by default. To determine whether the dirty NIF API is available, native code can check to see if the C preprocessor macro @@ -1245,46 +1231,29 @@ typedef enum { <desc><p>Same as <seealso marker="erl_driver#erl_drv_rwlock_tryrwlock">erl_drv_rwlock_tryrwlock</seealso>. </p></desc> </func> - <func><name><ret>ERL_NIF_TERM</ret><nametext>enif_schedule_dirty_nif(ErlNifEnv* env, int flags, ERL_NIF_TERM (*fp)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]), int argc, const ERL_NIF_TERM argv[])</nametext></name> - <fsummary>Schedule a dirty NIF for execution</fsummary> + <func><name><ret>ERL_NIF_TERM</ret><nametext>enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags, ERL_NIF_TERM (*fp)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]), int argc, const ERL_NIF_TERM argv[])</nametext></name> + <fsummary>Schedule a NIF for execution</fsummary> <desc> - <p>Schedule dirty NIF <c>fp</c> to execute a long-running operation. The <c>flags</c> - argument must be set to either <c>ERL_NIF_DIRTY_JOB_CPU_BOUND</c> if the job is expected to - be primarily CPU-bound, or <c>ERL_NIF_DIRTY_JOB_IO_BOUND</c> for jobs that will be - I/O-bound. The <c>argc</c> and <c>argv</c> arguments can either be the originals passed - into the calling NIF, or they can be values created by the calling NIF. The calling - NIF must use the return value of <c>enif_schedule_dirty_nif</c> as its own return value.</p> - <p>Be aware that <c>enif_schedule_dirty_nif</c>, as its name implies, only schedules the - dirty NIF for future execution. The calling NIF does not block waiting for the dirty NIF to - execute and return, which means that the calling NIF can't expect to receive the dirty NIF + <p>Schedule NIF <c>fp</c> to execute. This function allows an application to break up long-running + work into multiple regular NIF calls or to schedule a <seealso marker="#dirty_nifs">dirty NIF</seealso> + to execute on a dirty scheduler thread (<em>note that the dirty NIF functionality described here is + experimental</em> and that you have to enable support for dirty schedulers when building OTP in + order to try the functionality out).</p> + <p>The <c>fun_name</c> argument provides a name for the NIF being scheduled for execution. If it cannot + be converted to an atom, <c>enif_schedule_nif</c> returns a <c>badarg</c> exception.</p> + <p>The <c>flags</c> argument must be set to 0 for a regular NIF, or if the emulator was built the + experimental dirty scheduler support enabled, <c>flags</c> can be set to either <c>ERL_NIF_DIRTY_JOB_CPU_BOUND</c> + if the job is expected to be primarily CPU-bound, or <c>ERL_NIF_DIRTY_JOB_IO_BOUND</c> for jobs that will + be I/O-bound. If dirty scheduler threads are not available in the emulator, a try to schedule such a job + will result in a <c>badarg</c> exception.</p> + + <p>The <c>argc</c> and <c>argv</c> arguments can either be the originals passed into the calling NIF, or + they can be values created by the calling NIF.</p> + <p>The calling NIF must use the return value of <c>enif_schedule_nif</c> as its own return value.</p> + <p>Be aware that <c>enif_schedule_nif</c>, as its name implies, only schedules the + NIF for future execution. The calling NIF does not block waiting for the scheduled NIF to + execute and return, which means that the calling NIF can't expect to receive the scheduled NIF return value and use it for further operations.</p> - <p>A dirty NIF may not invoke the <seealso marker="#enif_make_badarg">enif_make_badarg</seealso> - to raise an exception. If it wishes to return an exception, the dirty NIF should pass a - regular result indicating the exception details to its finalizer, and allow the finalizer - to raise the exception on its behalf.</p> - <note><p>This function is available only when the emulator is configured with dirty schedulers - enabled. This feature is currently disabled by default. To determine whether the dirty NIF API - is available, native code can check to see if the C preprocessor macro - <c>ERL_NIF_DIRTY_SCHEDULER_SUPPORT</c> is defined.</p></note> - </desc> - </func> - <func><name><ret>ERL_NIF_TERM</ret><nametext>enif_schedule_dirty_nif_finalizer(ErlNifEnv* env, ERL_NIF_TERM result, ERL_NIF_TERM (*fp)(ErlNifEnv* env, ERL_NIF_TERM result))</nametext></name> - <fsummary>Schedule a dirty NIF finalizer</fsummary> - <desc> - <p>When a dirty NIF finishes executing, it must schedule a finalizer function to return - its result to the original NIF caller. The dirty NIF passes <c>result</c> as the value it - wants the finalizer to use as the return value. The <c>fp</c> argument is a pointer to the - finalizer function. The NIF API provides the <seealso marker="#enif_dirty_nif_finalizer"> - enif_dirty_nif_finalizer</seealso> function that can be used as a finalizer that simply - returns its <c>result</c> argument. You are also free to write your own custom finalizer - that uses <c>result</c> to derive a different return value, or ignores <c>result</c> - entirely and returns a completely different value.</p> - <p>Without exception, all dirty NIFs must invoke <c>enif_schedule_dirty_nif_finalizer</c> - to complete their execution.</p> - <note><p>This function is available only when the emulator is configured with dirty - schedulers enabled. This feature is currently disabled by default. To determine whether - the dirty NIF API is available, native code can check to see if the C preprocessor macro - <c>ERL_NIF_DIRTY_SCHEDULER_SUPPORT</c> is defined.</p></note> </desc> </func> <func><name><ret>ErlNifPid *</ret><nametext>enif_self(ErlNifEnv* caller_env, ErlNifPid* pid)</nametext></name> @@ -1384,4 +1353,3 @@ typedef enum { <p><seealso marker="erlang#load_nif-2">erlang:load_nif/2</seealso></p> </section> </cref> - diff --git a/erts/doc/src/erl_prim_loader.xml b/erts/doc/src/erl_prim_loader.xml index 6751deda4d..171f84decc 100644 --- a/erts/doc/src/erl_prim_loader.xml +++ b/erts/doc/src/erl_prim_loader.xml @@ -148,6 +148,22 @@ </desc> </func> <func> + <name name="read_link_info" arity="1"/> + <fsummary>Get information about a link or file</fsummary> + <desc> + <p>This function works like + <seealso marker="#read_file_info/1">read_file_info/1</seealso> + except that if <c><anno>Filename</anno></c> is a symbolic link, + information about the link will be returned in the <c>file_info</c> + record and the <c>type</c> field of the record will be set to + <c>symlink</c>.</p> + <p>If <c><anno>Filename</anno></c> is not a symbolic link, this function + returns exactly the same result as <c>read_file_info/1</c>. + On platforms that do not support symbolic links, this function + is always equivalent to <c>read_file_info/1</c>.</p> + </desc> + </func> + <func> <name name="set_path" arity="1"/> <fsummary>Set the path of the loader</fsummary> <desc> diff --git a/erts/doc/src/erlang.xml b/erts/doc/src/erlang.xml index 0f4dfc0f98..84168397f6 100644 --- a/erts/doc/src/erlang.xml +++ b/erts/doc/src/erlang.xml @@ -4968,7 +4968,7 @@ true</pre> <desc> <p>Note that the run-time is the sum of the run-time for all threads in the Erlang run-time system and may therefore be greater - than the wall-clock time.</p> + than the wall-clock time. The time is returned in milliseconds.</p> <pre> > <input>statistics(runtime).</input> {1690,1620} @@ -6295,6 +6295,13 @@ ok (<seealso marker="erts:erl_driver#driver_async">driver_async()</seealso>) as an integer.</p> </item> + <tag><marker id="system_info_tolerant_timeofday"><c>tolerant_timeofday</c></marker></tag> + <item> + <p>Returns whether compensation for sudden changes of system + time is <c>enabled</c> or <c>disabled</c>.</p> + <p>See also <seealso marker="erts:erl#+c">+c</seealso> + command line flag.</p> + </item> <tag><c>trace_control_word</c></tag> <item> <p>Returns the value of the node's trace control word. diff --git a/erts/doc/src/erts_alloc.xml b/erts/doc/src/erts_alloc.xml index c9eca39a99..1ade41f1aa 100644 --- a/erts/doc/src/erts_alloc.xml +++ b/erts/doc/src/erts_alloc.xml @@ -4,7 +4,7 @@ <cref> <header> <copyright> - <year>2002</year><year>2013</year> + <year>2002</year><year>2014</year> <holder>Ericsson AB. All Rights Reserved.</holder> </copyright> <legalnotice> @@ -531,15 +531,9 @@ <p>Multiple, thread specific instances of the allocator. This option will only have any effect on the runtime system with SMP support. Default behaviour on the runtime system with - SMP support:</p> - <taglist> - <tag><c>ll_alloc</c></tag> - <item><c>1</c> instance.</item> - <tag>Other allocators</tag> - <item><c>NoSchedulers+1</c> instances. Each scheduler will use - a lock-free instance of its own and other threads will use - a common instance.</item> - </taglist> + SMP support is <c>NoSchedulers+1</c> instances. Each scheduler will use + a lock-free instance of its own and other threads will use + a common instance.</p> <p>It was previously (before ERTS version 5.9) possible to configure a smaller amount of thread specific instances than schedulers. This is, however, not possible any more.</p> diff --git a/erts/doc/src/notes.xml b/erts/doc/src/notes.xml index 68feaa027a..743369951f 100644 --- a/erts/doc/src/notes.xml +++ b/erts/doc/src/notes.xml @@ -30,6 +30,360 @@ </header> <p>This document describes the changes made to the ERTS application.</p> +<section><title>Erts 6.2</title> + + <section><title>Fixed Bugs and Malfunctions</title> + <list> + <item> + <p> + General documentation updates.</p> + <p> + Own Id: OTP-12052</p> + </item> + <item> + <p>A bug in the VM code implementing sending of signals + to ports could cause the receiving port queue to remain + in a busy state forever. When this state had been + reached, processes sending command signals to the port + either got suspended forever, or, if the <c>nosuspend</c> + feature was used, always failed to send to the port. This + bug was introduced in ERTS version 5.10.</p> + <p>In order for this bug to be triggered on a port, one + had to at least once utilize the <c>nosuspend</c> + functionality when passing a signal to the port. This by + either calling</p> <list> <item> <seealso + marker="erlang#port_command/3"><c>port_command(Port, + Data, [nosuspend | Options])</c></seealso>, </item> + <item> <seealso + marker="erlang#send/3"><c>erlang:send(Port, {PortOwner, + {command, Data}}, [nosuspend | Options])</c></seealso>, + </item> <item> <seealso + marker="erlang#send_nosuspend/2"><c>erlang:send_nosuspend(Port, + {PortOwner, {command, Data}})</c></seealso>, or </item> + <item> <seealso + marker="erlang#send_nosuspend/3"><c>erlang:send_nosuspend(Port, + {PortOwner, {command, Data}}, Options)</c></seealso>. + </item> </list> + <p>Thanks Vasily Demidenok for reporting the issue, and + Sergey Kudryashov for providing a testcase.</p> + <p> + Own Id: OTP-12082 Aux Id: OTP-10336 </p> + </item> + <item> + <p> + Fix size overflow bug at memory allocation. A memory + allocation call, with an insane size close to the entire + address space, could return successfully as if it had + allocated just a few bytes. (Thanks to Don A. Bailey for + reporting)</p> + <p> + Own Id: OTP-12091</p> + </item> + <item> + <p> + Fix various issues where negating a signed integer would + trigger undefined behaviour. This fixes issues in the + enif_make_int64 interface and some edge cases inside the + erlang runtime system.</p> + <p> + Own Id: OTP-12097</p> + </item> + <item> + <p> + The documentation erroneously listed the <seealso + marker="erl#+swct"><c>+swct</c></seealso> command line + argument under <c>+sws</c>.</p> + <p> + Own Id: OTP-12102 Aux Id: OTP-10994 </p> + </item> + <item> + <p> + Profiling messages could be delivered out of order when + profiling on <c>runnable_procs</c> and/or + <c>runnable_ports</c> using <seealso + marker="erlang#system_profile/2"><c>erlang:system_profile/2</c></seealso>. + This bug was introduced in ERTS version 5.10.</p> + <p> + Own Id: OTP-12105 Aux Id: OTP-10336 </p> + </item> + <item> + <p> + Various logging fixes, including: Add run queue index to + the process dump in crash dumps.<br/> Add thread index to + enomem slogan when crashing.<br/> Remove error logger + message for sending messages to old instances of the same + node.</p> + <p> + Own Id: OTP-12115</p> + </item> + <item> + <p> + Fix compiler warnings reported by LLVM</p> + <p> + Own Id: OTP-12138</p> + </item> + <item> + <p> + Correct conversion of <c>MIN_SMALL</c> by + <c>list_to_integer/1</c> and <c>binary_to_integer/1</c>. + The bug produced an unnormalized bignum which can cause + strange behavior such as comparing different to a correct + <c>MIN_SMALL</c> integer. The value <c>MIN_SMALL</c> is + <c>-(1 bsl 27) = -134217728</c> on a 32-bit VM and <c>-(1 + bsl 59) = -576460752303423488</c> on a 64-bit VM. (Thanks + to Jesper Louis Andersen, Mikael Pettersson and Anthony + Ramine for report, patch and optimization suggestion)</p> + <p> + Own Id: OTP-12140</p> + </item> + <item> + <p> + Fix bug in <c>term_to_binary</c> that reallocates binary + with inconsistent size information. Bug has never been + confirmed to be the cause of any faulty behavior.</p> + <p> + Own Id: OTP-12141</p> + </item> + <item> + <p> + Real_path method used while prim loading archive files + was not taking into account the fact that windows + directory symlinks can be across different drives.</p> + <p> + Own Id: OTP-12155</p> + </item> + </list> + </section> + + + <section><title>Improvements and New Features</title> + <list> + <item> + <p> + Add log2 histogram to lcnt for lock wait time</p> + <p> + Own Id: OTP-12059</p> + </item> + <item> + <p> + Introduced <seealso + marker="erl_nif#enif_schedule_nif"><c>enif_schedule_nif()</c></seealso> + to the NIF API.</p> + <p> + The <c>enif_schedule_nif()</c> function allows a + long-running NIF to be broken into separate NIF + invocations without the help of a wrapper function + written in Erlang. The NIF first executes part of the + long-running task, then calls <c>enif_schedule_nif()</c> + to schedule a NIF for later execution to continue the + task. Any number of NIFs can be scheduled in this manner, + one after another. Since the emulator regains control + between invocations, this helps avoid problems caused by + native code tying up scheduler threads for too long.</p> + <p> + The <c>enif_schedule_nif()</c> function also replaces the + <c>enif_schedule_dirty_nif()</c> in the experimental + dirty NIF API. Note that the only incompatible changes + made are in the experimental dirty NIF API.</p> + <p> + See the <seealso marker="erl_nif">NIF + documentation</seealso> for more information.</p> + <p> + Thanks to Steve Vinoski.</p> + <p> + *** POTENTIAL INCOMPATIBILITY ***</p> + <p> + Own Id: OTP-12128</p> + </item> + </list> + </section> + +</section> + +<section><title>Erts 6.1.2</title> + + <section><title>Fixed Bugs and Malfunctions</title> + <list> + <item> + <p> + OTP-11850 fixed filelib:wildcard/1 to work with broken + symlinks. This correction, however, introduced problems + since symlinks were no longer followed for functions like + filelib:ensure_dir/1, filelib:is_dir/1, + filelib:file_size/1, etc. This is now corrected.</p> + <p> + Own Id: OTP-12054 Aux Id: seq12660 </p> + </item> + </list> + </section> + +</section> + +<section><title>Erts 6.1.1</title> + + <section><title>Fixed Bugs and Malfunctions</title> + <list> + <item> + <p> + Fixed ETHR_FORCE_INLINE which caused the build to break + on some platforms without adequate thread support + (VxWorks).</p> + <p> + Own Id: OTP-12010</p> + </item> + </list> + </section> + +</section> + +<section><title>Erts 6.1</title> + + <section><title>Fixed Bugs and Malfunctions</title> + <list> + <item> + <p>The documentation for <c>spawn_opt/5</c> now has a + note mentioning that the <c>monitor</c> option is not + supported.</p> + <p> + Own Id: OTP-11849</p> + </item> + <item> + <p> + Fix broken system monitoring of <c>large_heap</c> for + non-smp VM. No message for <c>large_heap</c> was ever + sent on non-smp VM. Bug exist since R16B.</p> + <p> + Own Id: OTP-11852</p> + </item> + <item> + <p> + The emulator without SMP support crashed when passing a + message to a process without enough heap space for the + message. This bug was introduced in <c>erts-6.0</c>.</p> + <p> + Own Id: OTP-11887 Aux Id: OTP-11388 </p> + </item> + <item> + <p> + Fix race between ETS table deletion and unfixation that + could cause VM crash. The race could happen between a + terminating process that does not own the table but has a + fixation on it and another process that deletes the table + (maybe the owner terminating) at the same time. Bug + existed since R15B02.</p> + <p> + Own Id: OTP-11892</p> + </item> + <item> + <p>The string following the <c>-eval</c> option when + invoking <c>erl</c> would not be properly translated from + UTF-8 to a list of Unicode characters (as would the + arguments for <c>-run</c>).</p> + <p>That bug would cause the build of Erlang/OTP to fail + when building in a directory whose pathname contained + non-US ASCII characters encoded in UTF-8. (Thanks to Eric + Pailleau for reporting this bug.)</p> + <p> + Own Id: OTP-11916</p> + </item> + <item> + <p> + Fix erts_debug:size/1 to handle Map sizes</p> + <p> + Own Id: OTP-11923</p> + </item> + <item> + <p> + Removed <c>erlang:bitstr_to_list/1</c> and + <c>erlang:list_to_bitstr/1</c>. They were added by + mistake, and have always raised an <c>undefined</c> + exception when called.</p> + <p> + Own Id: OTP-11942</p> + </item> + <item> + <p> + Fixed compilation using mingw-w64 on Windows.</p> + <p> + Thanks to Jani Hakala.</p> + <p> + Own Id: OTP-11945</p> + </item> + <item> + <p> + The git sha is no longer printed in the shell start + header when erlang is built from a tagged git release.</p> + <p> + Own Id: OTP-11961</p> + </item> + <item> + <p> + Fixed a bug where <c>send</c> trace events were + erroneously dropped when the send was done to a + registered process. This bug was introduced in R16B.</p> + <p> + Own Id: OTP-11968</p> + </item> + </list> + </section> + + + <section><title>Improvements and New Features</title> + <list> + <item> + <p>The following native functions now bump an appropriate + amount of reductions and yield when out of + reductions:</p> <list> + <item><c>erlang:binary_to_list/1</c></item> + <item><c>erlang:binary_to_list/3</c></item> + <item><c>erlang:bitstring_to_list/1</c></item> + <item><c>erlang:list_to_binary/1</c></item> + <item><c>erlang:iolist_to_binary/1</c></item> + <item><c>erlang:list_to_bitstring/1</c></item> + <item><c>binary:list_to_bin/1</c></item> </list> + <p>Characteristics impact:</p> <taglist> + <tag>Performance</tag> <item>The functions converting + from lists got a performance loss for very small lists, + and a performance gain for very large lists.</item> + <tag>Priority</tag> <item>Previously a process executing + one of these functions effectively got an unfair priority + boost. This priority boost depended on the input size. + The larger the input was, the larger the priority boost + got. This unfair priority boost is now lost. </item> + </taglist> + <p> + Own Id: OTP-11888</p> + </item> + <item> + <p> + The systemd features of epmd have been removed from epmd + by default. To enable them you have to build erlang with + the configure option --enable-systemd.</p> + <p> + Own Id: OTP-11921</p> + </item> + <item> + <p> + Removed Erlang wrapper code used when calling + <c>binary_to_term/1</c>, and <c>binary_to_term/2</c>. + This improves the performance of these BIFs especially + when they are called with small binaries as input.</p> + <p> + Own Id: OTP-11931</p> + </item> + <item> + <p> + Add erlang:system_info(tolerant_timeofday), an API to + check whether compensation for sudden changes of system + time is enabled or not.</p> + <p> + Own Id: OTP-11970</p> + </item> + </list> + </section> + +</section> + <section><title>Erts 6.0.1</title> <section><title>Fixed Bugs and Malfunctions</title> @@ -595,9 +949,9 @@ "hi" := V1, a := V2, b := V3} = M2. % match keys with values</c></item> </taglist></p> <p> - For information on how to use Maps please see the - <seealso marker="doc/reference_manual:maps">Reference - Manual</seealso>.</p> + For information on how to use Maps please see Map Expressions in the + <seealso marker="doc/reference_manual:expressions#map_expressions"> + Reference Manual</seealso>.</p> <p> The current implementation is without the following features: <taglist> <item>No variable keys</item> @@ -756,6 +1110,27 @@ Thanks to Matwey V. Kornilov</p> <p> Own Id: OTP-11829</p> + </item> + </list> + </section> + +</section> + +<section><title>Erts 5.10.4.1</title> + + <section><title>Known Bugs and Problems</title> + <list> + <item> + <p> + When using gen_tcp:connect and the <c>fd</c> option with + <c>port</c> and/or <c>ip</c>, the <c>port</c> and + <c>ip</c> options were ignored. This has been fixed so + that if <c>port</c> and/or <c>ip</c> is specified + together with <c>fd</c> a bind is requested for that + <c>fd</c>. If <c>port</c> and/or <c>ip</c> is not + specified bind will not be called.</p> + <p> + Own Id: OTP-12061</p> </item> </list> </section> @@ -4854,7 +5229,7 @@ <item> <p> The <c>configure</c> command line argument <seealso - marker="doc/installation_guide:INSTALL#How-to-Build-and-Install-ErlangOTP_A-Closer-Look-at-the-individual-Steps_Configuring">--enable-ethread-pre-pentium4-compatibility</seealso> + marker="doc/installation_guide:INSTALL#Advanced-configuration-and-build-of-ErlangOTP">--enable-ethread-pre-pentium4-compatibility</seealso> had no effect. This option is now also automatically enabled if required on the build machine.</p> <p> @@ -5433,7 +5808,7 @@ platforms than before. If <c>configure</c> warns about no atomic implementation available, try using the <c>libatomic_ops</c> library. Use the <seealso - marker="doc/installation_guide:INSTALL#How-to-Build-and-Install-ErlangOTP_A-Closer-Look-at-the-individual-Steps_Configuring">--with-libatomic_ops=PATH</seealso> + marker="doc/installation_guide:INSTALL#Advanced-configuration-and-build-of-ErlangOTP">--with-libatomic_ops=PATH</seealso> <c>configure</c> command line argument when specifying where the <c>libatomic_ops</c> installation is located. The <c>libatomic_ops</c> library can be downloaded from: @@ -5451,7 +5826,7 @@ the pentium 4 processor. If you want the runtime system to be compatible with older processors (back to 486) you need to pass the <seealso - marker="doc/installation_guide:INSTALL#How-to-Build-and-Install-ErlangOTP_A-Closer-Look-at-the-individual-Steps_Configuring">--enable-ethread-pre-pentium4-compatibility</seealso> + marker="doc/installation_guide:INSTALL#Advanced-configuration-and-build-of-ErlangOTP">--enable-ethread-pre-pentium4-compatibility</seealso> <c>configure</c> command line argument when configuring the system.</p> <p> diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names index d28e519ae1..5d06a32941 100644 --- a/erts/emulator/beam/atom.names +++ b/erts/emulator/beam/atom.names @@ -116,6 +116,7 @@ atom binary_longest_prefix_trap atom binary_longest_suffix_trap atom binary_match_trap atom binary_matches_trap +atom binary_to_list_continue atom binary_to_term_trap atom block atom blocked @@ -315,6 +316,7 @@ atom line_length atom linked_in_driver atom links atom list +atom list_to_binary_continue atom little atom loaded atom load_cancelled diff --git a/erts/emulator/beam/beam_emu.c b/erts/emulator/beam/beam_emu.c index 1026e5f649..8bfb7d2ad2 100644 --- a/erts/emulator/beam/beam_emu.c +++ b/erts/emulator/beam/beam_emu.c @@ -3503,6 +3503,7 @@ get_map_elements_fail: * I[0]: &&call_nif * I[1]: Function pointer to NIF function * I[2]: Pointer to erl_module_nif + * I[3]: Function pointer to dirty NIF */ BifFunction vbf; @@ -3523,13 +3524,6 @@ get_map_elements_fail: reg[0] = r(0); nif_bif_result = (*fp)(&env, bif_nif_arity, reg); erts_post_nif(&env); -#ifdef ERTS_DIRTY_SCHEDULERS - if (is_non_value(nif_bif_result) && c_p->freason == TRAP) { - Export* ep = ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(c_p); - ep->code[0] = I[-3]; - ep->code[1] = I[-2]; - } -#endif } ASSERT(!ERTS_PROC_IS_EXITING(c_p) || is_non_value(nif_bif_result)); PROCESS_MAIN_CHK_LOCKS(c_p); diff --git a/erts/emulator/beam/beam_load.c b/erts/emulator/beam/beam_load.c index e96177cfd9..cfc6146b0a 100644 --- a/erts/emulator/beam/beam_load.c +++ b/erts/emulator/beam/beam_load.c @@ -2363,7 +2363,11 @@ load_code(LoaderState* stp) if (stp->may_load_nif) { const int finfo_ix = ci - FUNC_INFO_SZ; - enum { MIN_FUNC_SZ = 3 }; +#ifdef ERTS_DIRTY_SCHEDULERS + enum { MIN_FUNC_SZ = 4 }; +#else + enum { MIN_FUNC_SZ = 3 }; +#endif if (finfo_ix - last_func_start < MIN_FUNC_SZ && last_func_start) { /* Must make room for call_nif op */ int pad = MIN_FUNC_SZ - (finfo_ix - last_func_start); diff --git a/erts/emulator/beam/bif.c b/erts/emulator/beam/bif.c index 06a1230ca0..a5be8e1529 100644 --- a/erts/emulator/beam/bif.c +++ b/erts/emulator/beam/bif.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 1996-2013. All Rights Reserved. + * Copyright Ericsson AB 1996-2014. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -1869,6 +1869,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) { } else if (is_external_pid(to)) { dep = external_pid_dist_entry(to); if(dep == erts_this_dist_entry) { +#if DEBUG erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf(); erts_dsprintf(dsbufp, "Discarding message %T from %T to %T in an old " @@ -1879,6 +1880,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) { external_pid_creation(to), erts_this_node->creation); erts_send_error_to_logger(p->group_leader, dsbufp); +#endif return 0; } return remote_send(p, dep, to, to, msg, suspend); @@ -1886,8 +1888,13 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) { Eterm id = erts_whereis_name_to_id(p, to); rp = erts_proc_lookup(id); - if (rp) + if (rp) { + if (IS_TRACED(p)) + trace_send(p, to, msg); + if (ERTS_PROC_GET_SAVED_CALLS_BUF(p)) + save_calls(p, &exp_send); goto send_message; + } pt = erts_port_lookup(id, (erts_port_synchronous_ops @@ -1907,6 +1914,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) { } else if (is_external_port(to) && (external_port_dist_entry(to) == erts_this_dist_entry)) { +#if DEBUG erts_dsprintf_buf_t *dsbufp = erts_create_logger_dsbuf(); erts_dsprintf(dsbufp, "Discarding message %T from %T to %T in an old " @@ -1917,6 +1925,7 @@ do_send(Process *p, Eterm to, Eterm msg, int suspend, Eterm *refp) { external_port_creation(to), erts_this_node->creation); erts_send_error_to_logger(p->group_leader, dsbufp); +#endif return 0; } else if (is_internal_port(to)) { int ret_val; @@ -2882,9 +2891,6 @@ static int do_list_to_integer(Process *p, Eterm orig_list, res = big_plus_small(res, m, hp); } - if (is_big(res)) /* check if small */ - res = big_plus_small(res, 0, hp); /* includes conversion to small */ - if (neg) { if (is_small(res)) res = make_small(-signed_val(res)); @@ -2894,8 +2900,12 @@ static int do_list_to_integer(Process *p, Eterm orig_list, } } - if (is_big(res)) { - hp += (big_arity(res)+1); + if (is_not_small(res)) { + res = big_plus_small(res, 0, hp); /* includes conversion to small */ + + if (is_not_small(res)) { + hp += (big_arity(res)+1); + } } HRelease(p,hp_end,hp); } diff --git a/erts/emulator/beam/bif.h b/erts/emulator/beam/bif.h index 51b77a95ed..72c55ccb55 100644 --- a/erts/emulator/beam/bif.h +++ b/erts/emulator/beam/bif.h @@ -124,12 +124,85 @@ do { \ return THE_NON_VALUE; \ } while(0) +#define ERTS_BIF_ERROR_TRAPPED0(Proc, Reason, Bif) \ +do { \ + (Proc)->freason = (Reason); \ + (Proc)->current = (Bif)->code; \ + return THE_NON_VALUE; \ +} while (0) + +#define ERTS_BIF_ERROR_TRAPPED1(Proc, Reason, Bif, A0) \ +do { \ + Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \ + (Proc)->freason = (Reason); \ + (Proc)->current = (Bif)->code; \ + reg[0] = (Eterm) (A0); \ + return THE_NON_VALUE; \ +} while (0) + +#define ERTS_BIF_ERROR_TRAPPED2(Proc, Reason, Bif, A0, A1) \ +do { \ + Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \ + (Proc)->freason = (Reason); \ + (Proc)->current = (Bif)->code; \ + reg[0] = (Eterm) (A0); \ + reg[1] = (Eterm) (A1); \ + return THE_NON_VALUE; \ +} while (0) + +#define ERTS_BIF_ERROR_TRAPPED3(Proc, Reason, Bif, A0, A1, A2) \ +do { \ + Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \ + (Proc)->freason = (Reason); \ + (Proc)->current = (Bif)->code; \ + reg[0] = (Eterm) (A0); \ + reg[1] = (Eterm) (A1); \ + reg[2] = (Eterm) (A2); \ + return THE_NON_VALUE; \ +} while (0) + #define ERTS_BIF_PREP_ERROR(Ret, Proc, Reason) \ do { \ (Proc)->freason = (Reason); \ (Ret) = THE_NON_VALUE; \ } while (0) +#define ERTS_BIF_PREP_ERROR_TRAPPED0(Ret, Proc, Reason, Bif) \ +do { \ + (Proc)->freason = (Reason); \ + (Proc)->current = (Bif)->code; \ + (Ret) = THE_NON_VALUE; \ +} while (0) + +#define ERTS_BIF_PREP_ERROR_TRAPPED1(Ret, Proc, Reason, Bif, A0) \ +do { \ + Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \ + (Proc)->freason = (Reason); \ + (Proc)->current = (Bif)->code; \ + reg[0] = (Eterm) (A0); \ + (Ret) = THE_NON_VALUE; \ +} while (0) + +#define ERTS_BIF_PREP_ERROR_TRAPPED2(Ret, Proc, Reason, Bif, A0, A1) \ +do { \ + Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \ + (Proc)->freason = (Reason); \ + (Proc)->current = (Bif)->code; \ + reg[0] = (Eterm) (A0); \ + reg[1] = (Eterm) (A1); \ + (Ret) = THE_NON_VALUE; \ +} while (0) + +#define ERTS_BIF_PREP_ERROR_TRAPPED3(Ret, Proc, Reason, Bif, A0, A1, A2) \ +do { \ + Eterm* reg = ERTS_PROC_GET_SCHDATA((Proc))->x_reg_array; \ + (Proc)->freason = (Reason); \ + (Proc)->current = (Bif)->code; \ + reg[0] = (Eterm) (A0); \ + reg[1] = (Eterm) (A1); \ + reg[2] = (Eterm) (A2); \ + (Ret) = THE_NON_VALUE; \ +} while (0) #define ERTS_BIF_PREP_TRAP0(Ret, Trap, Proc) \ do { \ @@ -392,6 +465,51 @@ erts_bif_prep_await_proc_exit_apply_trap(Process *c_p, Eterm args[], int nargs); +#ifndef HIPE + +#define HIPE_WRAPPER_BIF_DISABLE_GC(BIF_NAME, ARITY) + +#else + +#include "erl_fun.h" +#include "hipe_mode_switch.h" + +/* + * Hipe wrappers used by native code for BIFs that disable GC while trapping. + * Also add usage of the wrapper in ../hipe/hipe_bif_list.m4 + * + * Problem: + * When native code calls a BIF that traps, hipe_mode_switch will push a + * "trap frame" on the Erlang stack in order to find its way back from beam_emu + * back to native caller when finally done. If GC is disabled and stack/heap + * is full there is no place to push the "trap frame". + * + * Solution: + * We reserve space on stack for the "trap frame" here before the BIF is called. + * If the BIF does not trap, the space is reclaimed here before returning. + * If the BIF traps, hipe_push_beam_trap_frame() will detect that a "trap frame" + * already is reserved and use it. + */ + + +#define HIPE_WRAPPER_BIF_DISABLE_GC(BIF_NAME, ARITY) \ +BIF_RETTYPE hipe_wrapper_ ## BIF_NAME ## _ ## ARITY (Process* c_p, \ + Eterm* args); \ +BIF_RETTYPE hipe_wrapper_ ## BIF_NAME ## _ ## ARITY (Process* c_p, \ + Eterm* args) \ +{ \ + BIF_RETTYPE res; \ + hipe_reserve_beam_trap_frame(c_p, args, ARITY); \ + res = BIF_NAME ## _ ## ARITY (c_p, args); \ + if (is_value(res) || c_p->freason != TRAP) { \ + hipe_unreserve_beam_trap_frame(c_p); \ + } \ + return res; \ +} + +#endif + + #include "erl_bif_table.h" #endif diff --git a/erts/emulator/beam/bif.tab b/erts/emulator/beam/bif.tab index fbdddf09db..e68b8e6274 100644 --- a/erts/emulator/beam/bif.tab +++ b/erts/emulator/beam/bif.tab @@ -45,6 +45,7 @@ bif erlang:apply/3 bif erlang:atom_to_list/1 bif erlang:binary_to_list/1 bif erlang:binary_to_list/3 +bif erlang:binary_to_term/1 bif erlang:crc32/1 bif erlang:crc32/2 bif erlang:crc32_combine/3 @@ -151,8 +152,6 @@ bif erts_internal:port_command/3 bif erts_internal:port_control/3 bif erts_internal:port_close/1 bif erts_internal:port_connect/2 -bif erts_internal:binary_to_term/1 -bif erts_internal:binary_to_term/2 bif erts_internal:request_system_task/3 bif erts_internal:check_process_code/2 @@ -481,6 +480,11 @@ bif erlang:call_on_load_function/1 bif erlang:finish_after_on_load/2 # +# New Bifs in R13B04 +# +bif erlang:binary_to_term/2 + +# # The binary match bifs (New in R14A - EEP9) # @@ -597,6 +601,10 @@ bif maps:values/1 bif erts_internal:cmp_term/2 # +# New in 17.1. +# +bif erlang:fun_info_mfa/1 +# # Obsolete # diff --git a/erts/emulator/beam/big.c b/erts/emulator/beam/big.c index 41a041eba6..a8710dd910 100644 --- a/erts/emulator/beam/big.c +++ b/erts/emulator/beam/big.c @@ -274,6 +274,9 @@ _b = _b << _s; \ _vn1 = _b >> H_EXP; \ _vn0 = _b & LO_MASK; \ + /* Sometimes _s is 0 which triggers undefined behaviour for the \ + (_a0>>(D_EXP-_s)) shift, but this is ok because the \ + & -s will make it all to 0 later anyways. */ \ _un32 = (_a1 << _s) | ((_a0>>(D_EXP-_s)) & (-_s >> (D_EXP-1))); \ _un10 = _a0 << _s; \ _un1 = _un10 >> H_EXP; \ @@ -1506,13 +1509,15 @@ Eterm uword_to_big(UWord x, Eterm *y) */ Eterm small_to_big(Sint x, Eterm *y) { + Uint xu; if (x >= 0) { + xu = x; *y = make_pos_bignum_header(1); } else { - x = -x; + xu = -(Uint)x; *y = make_neg_bignum_header(1); } - BIG_DIGIT(y, 0) = x; + BIG_DIGIT(y, 0) = xu; return make_big(y); } @@ -1540,21 +1545,24 @@ Eterm erts_uint64_to_big(Uint64 x, Eterm **hpp) Eterm erts_sint64_to_big(Sint64 x, Eterm **hpp) { Eterm *hp = *hpp; + Uint64 ux; int neg; - if (x >= 0) + if (x >= 0) { neg = 0; + ux = x; + } else { neg = 1; - x = -x; + ux = -(Uint64)x; } #if defined(ARCH_32) || HALFWORD_HEAP - if (x >= (((Uint64) 1) << 32)) { + if (ux >= (((Uint64) 1) << 32)) { if (neg) *hp = make_neg_bignum_header(2); else *hp = make_pos_bignum_header(2); - BIG_DIGIT(hp, 0) = (Uint) (x & ((Uint) 0xffffffff)); - BIG_DIGIT(hp, 1) = (Uint) ((x >> 32) & ((Uint) 0xffffffff)); + BIG_DIGIT(hp, 0) = (Uint) (ux & ((Uint) 0xffffffff)); + BIG_DIGIT(hp, 1) = (Uint) ((ux >> 32) & ((Uint) 0xffffffff)); *hpp += 3; } else @@ -1564,7 +1572,7 @@ Eterm erts_sint64_to_big(Sint64 x, Eterm **hpp) *hp = make_neg_bignum_header(1); else *hp = make_pos_bignum_header(1); - BIG_DIGIT(hp, 0) = (Uint) x; + BIG_DIGIT(hp, 0) = (Uint) ux; *hpp += 2; } return make_big(hp); @@ -2667,9 +2675,6 @@ Eterm erts_chars_to_integer(Process *BIF_P, char *bytes, res = big_plus_small(res, m, hp); } - if (is_big(res)) /* check if small */ - res = big_plus_small(res, 0, hp); /* includes conversion to small */ - if (neg) { if (is_small(res)) res = make_small(-signed_val(res)); @@ -2679,8 +2684,12 @@ Eterm erts_chars_to_integer(Process *BIF_P, char *bytes, } } - if (is_big(res)) { - hp += (big_arity(res) + 1); + if (is_not_small(res)) { + res = big_plus_small(res, 0, hp); /* includes conversion to small */ + + if (is_not_small(res)) { + hp += (big_arity(res) + 1); + } } HRelease(BIF_P, hp_end, hp); goto bytebuf_to_integer_1_done; diff --git a/erts/emulator/beam/big.h b/erts/emulator/beam/big.h index d80111822e..da31876d75 100644 --- a/erts/emulator/beam/big.h +++ b/erts/emulator/beam/big.h @@ -101,7 +101,7 @@ typedef Uint dsize_t; /* Vector size type */ #define ERTS_SINT64_HEAP_SIZE(X) \ (IS_SSMALL((X)) \ ? 0 \ - : ERTS_UINT64_BIG_HEAP_SIZE__((X) >= 0 ? (X) : -(X))) + : ERTS_UINT64_BIG_HEAP_SIZE__((X) >= 0 ? (X) : -(Uint64)(X))) #define ERTS_UINT64_HEAP_SIZE(X) \ (IS_USMALL(0, (X)) ? 0 : ERTS_UINT64_BIG_HEAP_SIZE__((X))) diff --git a/erts/emulator/beam/binary.c b/erts/emulator/beam/binary.c index c7926f18af..f50d484576 100644 --- a/erts/emulator/beam/binary.c +++ b/erts/emulator/beam/binary.c @@ -31,12 +31,11 @@ #include "erl_binary.h" #include "erl_bits.h" -#ifdef DEBUG -static int list_to_bitstr_buf(Eterm obj, char* buf, Uint len); -#else -static int list_to_bitstr_buf(Eterm obj, char* buf); -#endif -static int bitstr_list_len(Eterm obj, Uint* num_bytes); +static Export binary_to_list_continue_export; +static Export list_to_binary_continue_export; + +static BIF_RETTYPE binary_to_list_continue(BIF_ALIST_1); +static BIF_RETTYPE list_to_binary_continue(BIF_ALIST_1); void erts_init_binary(void) @@ -49,6 +48,15 @@ erts_init_binary(void) "Internal error: Address of orig_bytes[0] of a Binary" " is *not* 8-byte aligned\n"); } + + erts_init_trap_export(&binary_to_list_continue_export, + am_erts_internal, am_binary_to_list_continue, 1, + &binary_to_list_continue); + + erts_init_trap_export(&list_to_binary_continue_export, + am_erts_internal, am_list_to_binary_continue, 1, + &list_to_binary_continue); + } /* @@ -333,6 +341,132 @@ BIF_RETTYPE integer_to_binary_1(BIF_ALIST_1) BIF_RET(res); } +#define ERTS_B2L_BYTES_PER_REDUCTION 256 + +typedef struct { + Eterm res; + Eterm *hp; +#ifdef DEBUG + Eterm *hp_end; +#endif + byte *bytes; + Uint size; + Uint bitoffs; +} ErtsB2LState; + +static void b2l_state_destructor(Binary *mbp) +{ + ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == b2l_state_destructor); +} + +static BIF_RETTYPE +binary_to_list_chunk(Process *c_p, + Eterm mb_eterm, + ErtsB2LState* sp, + int reds_left, + int gc_disabled) +{ + BIF_RETTYPE ret; + int bump_reds; + Uint size; + byte *bytes; + + size = (reds_left + 1)*ERTS_B2L_BYTES_PER_REDUCTION; + if (size > sp->size) + size = sp->size; + bytes = sp->bytes + (sp->size - size); + + bump_reds = (size - 1)/ERTS_B2L_BYTES_PER_REDUCTION + 1; + BUMP_REDS(c_p, bump_reds); + + ASSERT(is_list(sp->res) || is_nil(sp->res)); + + sp->res = erts_bin_bytes_to_list(sp->res, + sp->hp, + bytes, + size, + sp->bitoffs); + sp->size -= size; + sp->hp += 2*size; + + if (sp->size > 0) { + + if (!gc_disabled) + erts_set_gc_state(c_p, 0); + + ASSERT(c_p->flags & F_DISABLE_GC); + ASSERT(is_value(mb_eterm)); + ERTS_BIF_PREP_TRAP1(ret, + &binary_to_list_continue_export, + c_p, + mb_eterm); + } + else { + + ASSERT(sp->hp == sp->hp_end); + ASSERT(sp->size == 0); + + if (!gc_disabled || !erts_set_gc_state(c_p, 1)) + ERTS_BIF_PREP_RET(ret, sp->res); + else + ERTS_BIF_PREP_YIELD_RETURN(ret, c_p, sp->res); + ASSERT(!(c_p->flags & F_DISABLE_GC)); + } + + return ret; +} + +static ERTS_INLINE BIF_RETTYPE +binary_to_list(Process *c_p, Eterm *hp, Eterm tail, byte *bytes, Uint size, Uint bitoffs) +{ + int reds_left = ERTS_BIF_REDS_LEFT(c_p); + if (size < reds_left*ERTS_B2L_BYTES_PER_REDUCTION) { + Eterm res; + BIF_RETTYPE ret; + int bump_reds = (size - 1)/ERTS_B2L_BYTES_PER_REDUCTION + 1; + BUMP_REDS(c_p, bump_reds); + res = erts_bin_bytes_to_list(tail, hp, bytes, size, bitoffs); + ERTS_BIF_PREP_RET(ret, res); + return ret; + } + else { + Binary *mbp = erts_create_magic_binary(sizeof(ErtsB2LState), + b2l_state_destructor); + ErtsB2LState *sp = ERTS_MAGIC_BIN_DATA(mbp); + Eterm mb; + + sp->res = tail; + sp->hp = hp; +#ifdef DEBUG + sp->hp_end = sp->hp + 2*size; +#endif + sp->bytes = bytes; + sp->size = size; + sp->bitoffs = bitoffs; + + hp = HAlloc(c_p, PROC_BIN_SIZE); + mb = erts_mk_magic_binary_term(&hp, &MSO(c_p), mbp); + return binary_to_list_chunk(c_p, mb, sp, reds_left, 0); + } +} + +static BIF_RETTYPE binary_to_list_continue(BIF_ALIST_1) +{ + Binary *mbp = ((ProcBin *) binary_val(BIF_ARG_1))->val; + + ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == b2l_state_destructor); + + ASSERT(BIF_P->flags & F_DISABLE_GC); + + return binary_to_list_chunk(BIF_P, + BIF_ARG_1, + (ErtsB2LState*) ERTS_MAGIC_BIN_DATA(mbp), + ERTS_BIF_REDS_LEFT(BIF_P), + 1); +} + +HIPE_WRAPPER_BIF_DISABLE_GC(binary_to_list, 1) + BIF_RETTYPE binary_to_list_1(BIF_ALIST_1) { Eterm real_bin; @@ -354,14 +488,15 @@ BIF_RETTYPE binary_to_list_1(BIF_ALIST_1) } else { Eterm* hp = HAlloc(BIF_P, 2 * size); byte* bytes = binary_bytes(real_bin)+offset; - - BIF_RET(erts_bin_bytes_to_list(NIL, hp, bytes, size, bitoffs)); + return binary_to_list(BIF_P, hp, NIL, bytes, size, bitoffs); } error: BIF_ERROR(BIF_P, BADARG); } +HIPE_WRAPPER_BIF_DISABLE_GC(binary_to_list, 3) + BIF_RETTYPE binary_to_list_3(BIF_ALIST_3) { byte* bytes; @@ -387,12 +522,13 @@ BIF_RETTYPE binary_to_list_3(BIF_ALIST_3) } i = stop-start+1; hp = HAlloc(BIF_P, 2*i); - BIF_RET(erts_bin_bytes_to_list(NIL, hp, bytes+start-1, i, bitoffs)); - + return binary_to_list(BIF_P, hp, NIL, bytes+start-1, i, bitoffs); error: BIF_ERROR(BIF_P, BADARG); } +HIPE_WRAPPER_BIF_DISABLE_GC(bitstring_to_list, 1) + BIF_RETTYPE bitstring_to_list_1(BIF_ALIST_1) { Eterm real_bin; @@ -431,124 +567,441 @@ BIF_RETTYPE bitstring_to_list_1(BIF_ALIST_1) previous = CONS(hp, make_binary(last), previous); hp += 2; } - BIF_RET(erts_bin_bytes_to_list(previous, hp, bytes, size, bitoffs)); + + return binary_to_list(BIF_P, hp, previous, bytes, size, bitoffs); } /* Turn a possibly deep list of ints (and binaries) into */ /* One large binary object */ -/* - * This bif also exists in the binary module, under the name - * binary:list_to_bin/1, why it's divided into interface and - * implementation. Also the backend for iolist_to_binary_1. - */ +typedef enum { + ERTS_L2B_OK, + ERTS_L2B_YIELD, + ERTS_L2B_TYPE_ERROR, + ERTS_L2B_OVERFLOW_ERROR +} ErtsL2BResult; -BIF_RETTYPE erts_list_to_binary_bif(Process *p, Eterm arg) -{ +#define ERTS_L2B_STATE_INITER(C_P, ARG, BIF, SZFunc, TBufFunc) \ + {ERTS_IOLIST2BUF_STATE_INITER((C_P), (ARG)), \ + (ARG), THE_NON_VALUE, (BIF), (SZFunc), (TBufFunc)} + +#define ERTS_L2B_STATE_MOVE(TO, FROM) \ + sys_memcpy((void *) (TO), (void *) (FROM), sizeof(ErtsL2BState)) + +typedef struct ErtsL2BState_ ErtsL2BState; + +struct ErtsL2BState_ { + ErtsIOList2BufState buf; + Eterm arg; Eterm bin; - Eterm h,t; - ErlDrvSizeT size; - byte* bytes; -#ifdef DEBUG - ErlDrvSizeT offset; -#endif + Export *bif; + int (*iolist_to_buf_size)(ErtsIOListState *); + ErlDrvSizeT (*iolist_to_buf)(ErtsIOList2BufState *); +}; + +static ERTS_INLINE ErtsL2BResult +list_to_binary_engine(ErtsL2BState *sp) +{ + ErlDrvSizeT res; + Process *c_p = sp->buf.iolist.c_p; + + /* + * have_size == 0 while sp->iolist_to_buf_size() + * has not finished the calculation. + */ + + if (!sp->buf.iolist.have_size) { + switch (sp->iolist_to_buf_size(&sp->buf.iolist)) { + case ERTS_IOLIST_YIELD: + return ERTS_L2B_YIELD; + case ERTS_IOLIST_OVERFLOW: + return ERTS_L2B_OVERFLOW_ERROR; + case ERTS_IOLIST_TYPE: + return ERTS_L2B_TYPE_ERROR; + case ERTS_IOLIST_OK: + break; + default: + ASSERT(0); + break; + } + + ASSERT(sp->buf.iolist.have_size); + + /* + * Size calculated... Setup state for + * sp->iolist_to_buf_*() + */ + + sp->bin = new_binary(c_p, + (byte *) NULL, + sp->buf.iolist.size); + + if (sp->buf.iolist.size == 0) + return ERTS_L2B_OK; + + sp->buf.buf = (char *) binary_bytes(sp->bin); + sp->buf.len = sp->buf.iolist.size; + sp->buf.iolist.obj = sp->arg; - if (is_nil(arg)) { - BIF_RET(new_binary(p,(byte*)"",0)); + if (sp->buf.iolist.reds_left <= 0) { + BUMP_ALL_REDS(c_p); + return ERTS_L2B_YIELD; + } } - if (is_not_list(arg)) { - goto error; + + ASSERT(sp->buf.iolist.size != 0); + ASSERT(is_value(sp->bin)); + ASSERT(sp->buf.buf); + + res = sp->iolist_to_buf(&sp->buf); + + if (!ERTS_IOLIST_TO_BUF_FAILED(res)) { + ASSERT(res == 0); + return ERTS_L2B_OK; } - /* check for [binary()] case */ - h = CAR(list_val(arg)); - t = CDR(list_val(arg)); - if (is_binary(h) && is_nil(t) && !( - HEADER_SUB_BIN == *(binary_val(h)) && ( - ((ErlSubBin *)binary_val(h))->bitoffs != 0 || - ((ErlSubBin *)binary_val(h))->bitsize != 0 - ))) { - return h; - } - switch (erts_iolist_size(arg, &size)) { - case ERTS_IOLIST_OVERFLOW: BIF_ERROR(p, SYSTEM_LIMIT); - case ERTS_IOLIST_TYPE: goto error; - default: ; - } - bin = new_binary(p, (byte *)NULL, size); - bytes = binary_bytes(bin); -#ifdef DEBUG - offset = -#endif - erts_iolist_to_buf(arg, (char*) bytes, size); - ASSERT(offset == 0); - BIF_RET(bin); + switch (res) { + case ERTS_IOLIST_TO_BUF_YIELD: + return ERTS_L2B_YIELD; + case ERTS_IOLIST_TO_BUF_OVERFLOW: + return ERTS_L2B_OVERFLOW_ERROR; + case ERTS_IOLIST_TO_BUF_TYPE_ERROR: + return ERTS_L2B_TYPE_ERROR; + default: + ERTS_INTERNAL_ERROR("Invalid return value from iolist_to_buf_yielding()"); + return ERTS_L2B_TYPE_ERROR; + } +} + +static void +l2b_state_destructor(Binary *mbp) +{ + ErtsL2BState *sp = ERTS_MAGIC_BIN_DATA(mbp); + ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == l2b_state_destructor); + DESTROY_SAVED_ESTACK(&sp->buf.iolist.estack); +} + +static ERTS_INLINE Eterm +l2b_final_touch(Process *c_p, ErtsL2BState *sp) +{ + Eterm *hp; + ErlSubBin* sbin; + if (sp->buf.offset == 0) + return sp->bin; + + hp = HAlloc(c_p, ERL_SUB_BIN_SIZE); + ASSERT(sp->buf.offset > 0); + sbin = (ErlSubBin *) hp; + sbin->thing_word = HEADER_SUB_BIN; + sbin->size = sp->buf.iolist.size-1; + sbin->offs = 0; + sbin->orig = sp->bin; + sbin->bitoffs = 0; + sbin->bitsize = sp->buf.offset; + sbin->is_writable = 0; + return make_binary(sbin); +} + +static BIF_RETTYPE +list_to_binary_chunk(Eterm mb_eterm, + ErtsL2BState* sp, + int reds_left, + int gc_disabled) +{ + Eterm err = BADARG; + BIF_RETTYPE ret; + Process *c_p = sp->buf.iolist.c_p; + + sp->buf.iolist.reds_left = reds_left; - error: - BIF_ERROR(p, BADARG); + switch (list_to_binary_engine(sp)) { + + case ERTS_L2B_OK: { + Eterm result = l2b_final_touch(c_p, sp); + if (!gc_disabled || !erts_set_gc_state(c_p, 1)) + ERTS_BIF_PREP_RET(ret, result); + else + ERTS_BIF_PREP_YIELD_RETURN(ret, c_p, result); + ASSERT(!(c_p->flags & F_DISABLE_GC)); + break; + } + case ERTS_L2B_YIELD: + if (!gc_disabled) { + /* first yield... */ + Eterm *hp; + Binary *mbp = erts_create_magic_binary(sizeof(ErtsL2BState), + l2b_state_destructor); + ErtsL2BState *new_sp = ERTS_MAGIC_BIN_DATA(mbp); + + ERTS_L2B_STATE_MOVE(new_sp, sp); + sp = new_sp; + + hp = HAlloc(c_p, PROC_BIN_SIZE); + mb_eterm = erts_mk_magic_binary_term(&hp, &MSO(c_p), mbp); + + ASSERT(is_value(mb_eterm)); + + erts_set_gc_state(c_p, 0); + } + + ASSERT(c_p->flags & F_DISABLE_GC); + + ERTS_BIF_PREP_TRAP1(ret, + &list_to_binary_continue_export, + c_p, + mb_eterm); + break; + + case ERTS_L2B_OVERFLOW_ERROR: + err = SYSTEM_LIMIT; + /* fall through */ + + case ERTS_L2B_TYPE_ERROR: + if (!gc_disabled) + ERTS_BIF_PREP_ERROR(ret, c_p, err); + else { + if (erts_set_gc_state(c_p, 1)) + ERTS_VBUMP_ALL_REDS(c_p); + + ERTS_BIF_PREP_ERROR_TRAPPED1(ret, + c_p, + err, + sp->bif, + sp->arg); + } + + ASSERT(!(c_p->flags & F_DISABLE_GC)); + break; + + default: + ERTS_INTERNAL_ERROR("Invalid return value from list_to_binary_engine()"); + ERTS_BIF_PREP_ERROR(ret,c_p, EXC_INTERNAL_ERROR); + break; + } + return ret; } +static BIF_RETTYPE list_to_binary_continue(BIF_ALIST_1) +{ + Binary *mbp = ((ProcBin *) binary_val(BIF_ARG_1))->val; + ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(mbp) == l2b_state_destructor); + + ASSERT(BIF_P->flags & F_DISABLE_GC); + + return list_to_binary_chunk(BIF_ARG_1, + ERTS_MAGIC_BIN_DATA(mbp), + ERTS_BIF_REDS_LEFT(BIF_P), + 1); +} + +BIF_RETTYPE erts_list_to_binary_bif(Process *c_p, Eterm arg, Export *bif) +{ + BIF_RETTYPE ret; + + if (is_nil(arg)) + ERTS_BIF_PREP_RET(ret, new_binary(c_p, (byte *) "", 0)); + else if (is_not_list(arg)) + ERTS_BIF_PREP_ERROR(ret, c_p, BADARG); + else { + /* check for [binary()] case */ + Eterm h = CAR(list_val(arg)); + Eterm t = CDR(list_val(arg)); + if (is_binary(h) + && is_nil(t) + && !(HEADER_SUB_BIN == *(binary_val(h)) + && (((ErlSubBin *)binary_val(h))->bitoffs != 0 + || ((ErlSubBin *)binary_val(h))->bitsize != 0))) { + ERTS_BIF_PREP_RET(ret, h); + } + else { + ErtsL2BState state = ERTS_L2B_STATE_INITER(c_p, + arg, + bif, + erts_iolist_size_yielding, + erts_iolist_to_buf_yielding); + int orig_reds_left = ERTS_BIF_REDS_LEFT(c_p); + + /* + * First try to do it all at once without having to use + * yielding iolist_to_buf(). + */ + state.buf.iolist.reds_left = orig_reds_left; + switch (erts_iolist_size_yielding(&state.buf.iolist)) { + case ERTS_IOLIST_OK: { + ErlDrvSizeT size = state.buf.iolist.size; + Eterm bin; + char *buf; + + if (size == 0) { + ERTS_BIF_PREP_RET(ret, new_binary(c_p, (byte *) NULL, 0)); + break; /* done */ + } + + bin = new_binary(c_p, (byte *) NULL, size); + buf = (char *) binary_bytes(bin); + + if (size < ERTS_IOLIST_TO_BUF_BYTES_PER_RED*CONTEXT_REDS) { + /* An (over) estimation of reductions needed */ + int reds_left = state.buf.iolist.reds_left; + int to_buf_reds = orig_reds_left - reds_left; + to_buf_reds += size/ERTS_IOLIST_TO_BUF_BYTES_PER_RED; + if (to_buf_reds <= reds_left) { + ErlDrvSizeT res; + + res = erts_iolist_to_buf(arg, buf, size); + if (res == 0) { + BUMP_REDS(c_p, to_buf_reds); + ERTS_BIF_PREP_RET(ret, bin); + break; /* done */ + } + if (!ERTS_IOLIST_TO_BUF_FAILED(res)) + ERTS_INTERNAL_ERROR("iolist_size/iolist_to_buf missmatch"); + if (res == ERTS_IOLIST_TO_BUF_OVERFLOW) + goto overflow; + goto type_error; + } + } + /* + * Since size has been computed list_to_binary_chunk() expects + * state prepared for iolist_to_buf. + */ + state.bin = bin; + state.buf.buf = buf; + state.buf.len = size; + state.buf.iolist.obj = arg; + /* Fall through... */ + } + case ERTS_IOLIST_YIELD: + ret = list_to_binary_chunk(THE_NON_VALUE, + &state, + state.buf.iolist.reds_left, + 0); + break; + case ERTS_IOLIST_OVERFLOW: + overflow: + ERTS_BIF_PREP_ERROR(ret, c_p, SYSTEM_LIMIT); + break; + case ERTS_IOLIST_TYPE: + type_error: + default: + ERTS_BIF_PREP_ERROR(ret, c_p, BADARG); + break; + } + } + } + return ret; +} + +HIPE_WRAPPER_BIF_DISABLE_GC(list_to_binary, 1) + BIF_RETTYPE list_to_binary_1(BIF_ALIST_1) { - return erts_list_to_binary_bif(BIF_P, BIF_ARG_1); + return erts_list_to_binary_bif(BIF_P, BIF_ARG_1, bif_export[BIF_list_to_binary_1]); } -/* Turn a possibly deep list of ints (and binaries) into */ -/* One large binary object */ +HIPE_WRAPPER_BIF_DISABLE_GC(iolist_to_binary, 1) BIF_RETTYPE iolist_to_binary_1(BIF_ALIST_1) { if (is_binary(BIF_ARG_1)) { BIF_RET(BIF_ARG_1); } - return erts_list_to_binary_bif(BIF_P, BIF_ARG_1); + return erts_list_to_binary_bif(BIF_P, BIF_ARG_1, bif_export[BIF_iolist_to_binary_1]); } +static int bitstr_list_len(ErtsIOListState *); +static ErlDrvSizeT list_to_bitstr_buf_yielding(ErtsIOList2BufState *); +static ErlDrvSizeT list_to_bitstr_buf_not_yielding(ErtsIOList2BufState *); + +HIPE_WRAPPER_BIF_DISABLE_GC(list_to_bitstring, 1) + BIF_RETTYPE list_to_bitstring_1(BIF_ALIST_1) { - Eterm bin; - Uint sz; - int offset; - byte* bytes; - ErlSubBin* sb1; - Eterm* hp; - - if (is_nil(BIF_ARG_1)) { - BIF_RET(new_binary(BIF_P,(byte*)"",0)); - } - if (is_not_list(BIF_ARG_1)) { - error: - BIF_ERROR(BIF_P, BADARG); - } - switch (bitstr_list_len(BIF_ARG_1, &sz)) { - case ERTS_IOLIST_TYPE: - goto error; - case ERTS_IOLIST_OVERFLOW: - BIF_ERROR(BIF_P, SYSTEM_LIMIT); - } - bin = new_binary(BIF_P, (byte *)NULL, sz); - bytes = binary_bytes(bin); -#ifdef DEBUG - offset = list_to_bitstr_buf(BIF_ARG_1, (char*) bytes, sz); -#else - offset = list_to_bitstr_buf(BIF_ARG_1, (char*) bytes); -#endif - ASSERT(offset >= 0); - if (offset > 0) { - hp = HAlloc(BIF_P, ERL_SUB_BIN_SIZE); - sb1 = (ErlSubBin *) hp; - sb1->thing_word = HEADER_SUB_BIN; - sb1->size = sz-1; - sb1->offs = 0; - sb1->orig = bin; - sb1->bitoffs = 0; - sb1->bitsize = offset; - sb1->is_writable = 0; - bin = make_binary(sb1); + BIF_RETTYPE ret; + + if (is_nil(BIF_ARG_1)) + ERTS_BIF_PREP_RET(ret, new_binary(BIF_P, (byte *) "", 0)); + else if (is_not_list(BIF_ARG_1)) + ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG); + else { + /* check for [bitstring()] case */ + Eterm h = CAR(list_val(BIF_ARG_1)); + Eterm t = CDR(list_val(BIF_ARG_1)); + if (is_binary(h) && is_nil(t)) { + ERTS_BIF_PREP_RET(ret, h); + } + else { + ErtsL2BState state = ERTS_L2B_STATE_INITER(BIF_P, + BIF_ARG_1, + bif_export[BIF_list_to_bitstring_1], + bitstr_list_len, + list_to_bitstr_buf_yielding); + int orig_reds_left = ERTS_BIF_REDS_LEFT(BIF_P); + + /* + * First try to do it all at once without having to use + * yielding list_to_bitstr_buf(). + */ + state.buf.iolist.reds_left = orig_reds_left; + switch (bitstr_list_len(&state.buf.iolist)) { + case ERTS_IOLIST_OK: { + ErlDrvSizeT size = state.buf.iolist.size; + + state.bin = new_binary(BIF_P, (byte *) NULL, size); + state.buf.buf = (char *) binary_bytes(state.bin); + state.buf.len = size; + state.buf.iolist.obj = BIF_ARG_1; + + if (size < ERTS_IOLIST_TO_BUF_BYTES_PER_RED*CONTEXT_REDS) { + /* An (over) estimation of reductions needed */ + int reds_left = state.buf.iolist.reds_left; + int to_buf_reds = orig_reds_left - reds_left; + to_buf_reds += size/ERTS_IOLIST_TO_BUF_BYTES_PER_RED; + if (to_buf_reds <= reds_left) { + ErlDrvSizeT res; + + res = list_to_bitstr_buf_not_yielding(&state.buf); + if (res == 0) { + Eterm res_bin = l2b_final_touch(BIF_P, &state); + BUMP_REDS(BIF_P, to_buf_reds); + ERTS_BIF_PREP_RET(ret, res_bin); + break; /* done */ + } + if (!ERTS_IOLIST_TO_BUF_FAILED(res)) + ERTS_INTERNAL_ERROR("iolist_size/iolist_to_buf missmatch"); + if (res == ERTS_IOLIST_TO_BUF_OVERFLOW) + goto overflow; + goto type_error; + } + } + /* + * Since size has been computed list_to_binary_chunk() expects + * the state prepared for list_to_bitstr_buf. + */ + + /* Fall through... */ + } + case ERTS_IOLIST_YIELD: + ret = list_to_binary_chunk(THE_NON_VALUE, + &state, + state.buf.iolist.reds_left, + 0); + break; + case ERTS_IOLIST_OVERFLOW: + overflow: + ERTS_BIF_PREP_ERROR(ret, BIF_P, SYSTEM_LIMIT); + break; + case ERTS_IOLIST_TYPE: + type_error: + default: + ERTS_BIF_PREP_ERROR(ret, BIF_P, BADARG); + break; + } + } } - - BIF_RET(bin); + + return ret; } BIF_RETTYPE split_binary_2(BIF_ALIST_2) @@ -605,123 +1058,353 @@ BIF_RETTYPE split_binary_2(BIF_ALIST_2) * Local functions. */ +static int +list_to_bitstr_buf_bcopy(ErtsIOList2BufState *state, Eterm obj, int *yield_countp); + /* * The input list is assumed to be type-correct and the buffer is * assumed to be of sufficient size. Those assumptions are verified in * the DEBUG-built emulator. */ -static int +static ErlDrvSizeT +list_to_bitstr_buf(int yield_support, ErtsIOList2BufState *state) +{ + +#undef LIST_TO_BITSTR_BUF_BCOPY_DBG +#undef LIST_TO_BITSTR_BUF_BCOPY #ifdef DEBUG -list_to_bitstr_buf(Eterm obj, char* buf, Uint len) +#define LIST_TO_BITSTR_BUF_BCOPY_DBG \ + len -= size + (offset>7); #else -list_to_bitstr_buf(Eterm obj, char* buf) +#define LIST_TO_BITSTR_BUF_BCOPY_DBG #endif -{ - Eterm* objp; - int offset = 0; +#define LIST_TO_BITSTR_BUF_BCOPY(CONSP) \ + do { \ + byte* bptr; \ + Uint bitsize; \ + Uint bitoffs; \ + Uint num_bits; \ + size_t size = binary_size(obj); \ + if (yield_support) { \ + size_t max_size = ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; \ + if (yield_count > 0) \ + max_size *= yield_count+1; \ + if (size > max_size) { \ + state->objp = CONSP; \ + goto L_bcopy_yield; \ + } \ + if (size >= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT) { \ + int cost = (int) size; \ + cost /= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; \ + yield_count -= cost; \ + } \ + } \ + ASSERT(size <= len); \ + ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize); \ + num_bits = 8*size+bitsize; \ + copy_binary_to_buffer(buf, offset, bptr, bitoffs, num_bits); \ + offset += bitsize; \ + buf += size + (offset>7); \ + LIST_TO_BITSTR_BUF_BCOPY_DBG; \ + offset = offset & 7; \ + } while(0) + +#ifdef DEBUG + ErlDrvSizeT len; +#endif + Eterm obj; + char *buf; + Eterm *objp = NULL; + int offset; + int init_yield_count = 0, yield_count; DECLARE_ESTACK(s); - goto L_again; - - while (!ESTACK_ISEMPTY(s)) { - obj = ESTACK_POP(s); - L_again: - if (is_list(obj)) { - L_iter_list: - objp = list_val(obj); - obj = CAR(objp); - if (is_byte(obj)) { - ASSERT(len > 0); - if (offset == 0) { - *buf++ = unsigned_val(obj); - } else { - *buf = (char)((unsigned_val(obj) >> offset) | - ((*buf >> (8-offset)) << (8-offset))); - buf++; - *buf = (unsigned_val(obj) << (8-offset)); - } + + obj = state->iolist.obj; + buf = state->buf; + offset = state->offset; #ifdef DEBUG - len--; + len = state->len; #endif - } else if (is_binary(obj)) { - byte* bptr; - size_t size = binary_size(obj); - Uint bitsize; - Uint bitoffs; - Uint num_bits; - - ASSERT(size <= len); - ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize); - num_bits = 8*size+bitsize; - copy_binary_to_buffer(buf, offset, bptr, bitoffs, num_bits); - offset += bitsize; - buf += size + (offset>7); + + if (!yield_support) { + yield_count = init_yield_count = 0; /* Shut up faulty warning... >:-( */ + goto L_again; + } + else { + + if (state->iolist.reds_left <= 0) + return ERTS_IOLIST_TO_BUF_YIELD; + + ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK); + init_yield_count = (ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED + * state->iolist.reds_left); + yield_count = init_yield_count; + + if (!state->iolist.estack.start) + goto L_again; + else { + int chk_stack; + /* Restart; restore state... */ + ESTACK_RESTORE(s, &state->iolist.estack); + + if (!state->bcopy.bptr) + chk_stack = 0; + else { + chk_stack = 1; + if (list_to_bitstr_buf_bcopy(state, THE_NON_VALUE, &yield_count)) { + /* Yield again... */ + BUMP_ALL_REDS(state->iolist.c_p); + state->iolist.reds_left = 0; + ESTACK_SAVE(s, &state->iolist.estack); + return ERTS_IOLIST_TO_BUF_YIELD; + } + buf = state->buf; + offset = state->offset; #ifdef DEBUG - len -= size + (offset>7); + len = state->len; #endif - offset = offset & 7; - } else if (is_list(obj)) { - ESTACK_PUSH(s, CDR(objp)); - goto L_iter_list; /* on head */ - } else { - ASSERT(is_nil(obj)); } - obj = CDR(objp); - if (is_list(obj)) { - goto L_iter_list; /* on tail */ - } else if (is_binary(obj)) { - byte* bptr; - size_t size = binary_size(obj); - Uint bitsize; - Uint bitoffs; - Uint num_bits; - - ASSERT(size <= len); - ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize); - num_bits = 8*size+bitsize; - copy_binary_to_buffer(buf, offset, bptr, bitoffs, num_bits); - offset += bitsize; - buf += size+(offset>7); + objp = state->objp; + state->objp = NULL; + + if (objp) + goto L_tail; + if (!chk_stack) + goto L_again; + /* check stack */ + } + } + + while (!ESTACK_ISEMPTY(s)) { + obj = ESTACK_POP(s); + L_again: + if (is_list(obj)) { + while (1) { /* Tail loop */ + while (1) { /* Head loop */ + if (yield_support && --yield_count <= 0) + goto L_yield; + objp = list_val(obj); + obj = CAR(objp); + if (is_byte(obj)) { + ASSERT(len > 0); + if (offset == 0) { + *buf++ = unsigned_val(obj); + } else { + *buf = (char)((unsigned_val(obj) >> offset) | + ((*buf >> (8-offset)) << (8-offset))); + buf++; + *buf = (unsigned_val(obj) << (8-offset)); + } #ifdef DEBUG - len -= size+(offset>7); + len--; #endif - offset = offset & 7; - } else { - ASSERT(is_nil(obj)); + } else if (is_binary(obj)) { + LIST_TO_BITSTR_BUF_BCOPY(objp); + } else if (is_list(obj)) { + ESTACK_PUSH(s, CDR(objp)); + continue; /* Head loop */ + } else { + ASSERT(is_nil(obj)); + } + break; + } + + L_tail: + + obj = CDR(objp); + if (is_list(obj)) { + continue; /* Tail loop */ + } else if (is_binary(obj)) { + LIST_TO_BITSTR_BUF_BCOPY(NULL); + } else { + ASSERT(is_nil(obj)); + } + break; } } else if (is_binary(obj)) { - byte* bptr; - size_t size = binary_size(obj); - Uint bitsize; - Uint bitoffs; - Uint num_bits; - - ASSERT(size <= len); - ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize); - num_bits = 8*size+bitsize; - copy_binary_to_buffer(buf, offset, bptr, bitoffs, num_bits); - offset += bitsize; - buf += size + (offset>7); -#ifdef DEBUG - len -= size + (offset>7); -#endif - offset = offset & 7; + LIST_TO_BITSTR_BUF_BCOPY(NULL); } else { + if (yield_support && --yield_count <= 0) + goto L_yield; ASSERT(is_nil(obj)); } } DESTROY_ESTACK(s); - return offset; + + if (yield_support) { + int reds; + CLEAR_SAVED_ESTACK(&state->iolist.estack); + reds = ((init_yield_count - yield_count - 1) + / ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED) + 1; + BUMP_REDS(state->iolist.c_p, reds); + state->iolist.reds_left -= reds; + if (state->iolist.reds_left < 0) + state->iolist.reds_left = 0; + } + state->buf = buf; + state->offset = offset; + return 0; + +L_bcopy_yield: + + state->buf = buf; + state->offset = offset; +#ifdef DEBUG + state->len = len; +#endif + + if (list_to_bitstr_buf_bcopy(state, obj, &yield_count) == 0) + ERTS_INTERNAL_ERROR("Missing yield"); + + BUMP_ALL_REDS(state->iolist.c_p); + state->iolist.reds_left = 0; + ESTACK_SAVE(s, &state->iolist.estack); + return ERTS_IOLIST_TO_BUF_YIELD; + +L_yield: + + BUMP_ALL_REDS(state->iolist.c_p); + state->iolist.reds_left = 0; + state->iolist.obj = obj; + state->buf = buf; + state->offset = offset; + ESTACK_SAVE(s, &state->iolist.estack); +#ifdef DEBUG + state->len = len; +#endif + return ERTS_IOLIST_TO_BUF_YIELD; + + +#undef LIST_TO_BITSTR_BUF_BCOPY_DBG +#undef LIST_TO_BITSTR_BUF_BCOPY + +} + +static ErlDrvSizeT +list_to_bitstr_buf_yielding(ErtsIOList2BufState *state) +{ + return list_to_bitstr_buf(1, state); +} + +static ErlDrvSizeT +list_to_bitstr_buf_not_yielding(ErtsIOList2BufState *state) +{ + return list_to_bitstr_buf(0, state); } static int -bitstr_list_len(Eterm obj, Uint* num_bytes) +list_to_bitstr_buf_bcopy(ErtsIOList2BufState *state, Eterm obj, int *yield_countp) +{ + int res; + char *buf = state->buf; + char *next_buf; + int offset = state->offset; + int next_offset; +#ifdef DEBUG + ErlDrvSizeT len = state->len; + ErlDrvSizeT next_len; +#endif + byte* bptr; + size_t size; + size_t max_size; + Uint bitoffs; + Uint num_bits; + Uint bitsize; + int yield_count = *yield_countp; + + if (state->bcopy.bptr) { + bptr = state->bcopy.bptr; + size = state->bcopy.size; + bitoffs = state->bcopy.bitoffs; + bitsize = state->bcopy.bitsize; + state->bcopy.bptr = NULL; + } + else { + + ASSERT(is_binary(obj)); + + size = binary_size(obj); + + ASSERT(size <= len); + + ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize); + } + + max_size = (size_t) ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; + if (yield_count > 0) + max_size *= (size_t) (yield_count+1); + + if (size <= max_size) { + if (size >= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT) { + int cost = (int) size; + cost /= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; + yield_count -= cost; + } + next_offset = offset + bitsize; + next_buf = buf + size+(next_offset>7); +#ifdef DEBUG + next_len = len - size+(next_offset>7); +#endif + next_offset &= 7; + num_bits = 8*size+bitsize; + res = 0; + } + else { + ASSERT(0 < max_size && max_size < size); + yield_count = 0; + state->bcopy.bptr = bptr + max_size; + state->bcopy.bitoffs = bitoffs; + state->bcopy.bitsize = bitsize; + state->bcopy.size = size - max_size; + next_buf = buf + max_size; +#ifdef DEBUG + next_len = len - max_size; +#endif + next_offset = offset; + num_bits = 8*max_size; + size = max_size; + res = 1; + } + + copy_binary_to_buffer(buf, offset, bptr, bitoffs, num_bits); + + state->offset = next_offset; + state->buf = next_buf; +#ifdef DEBUG + state->len = next_len; +#endif + *yield_countp = yield_count; + + return res; +} + +static int +bitstr_list_len(ErtsIOListState *state) { Eterm* objp; - Uint len = 0; - Uint offs = 0; + Eterm obj; + Uint len, offs; + int res, init_yield_count, yield_count; DECLARE_ESTACK(s); + + if (state->reds_left <= 0) + return ERTS_IOLIST_YIELD; + + len = (Uint) state->size; + offs = state->offs; + obj = state->obj; + + ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK); + init_yield_count = ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED; + init_yield_count *= state->reds_left; + yield_count = init_yield_count; + if (state->estack.start) { + /* Restart; restore estack... */ + ESTACK_RESTORE(s, &state->estack); + } + goto L_again; #define SAFE_ADD(Var, Val) \ @@ -748,46 +1431,55 @@ bitstr_list_len(Eterm obj, Uint* num_bytes) obj = ESTACK_POP(s); L_again: if (is_list(obj)) { - L_iter_list: - objp = list_val(obj); - /* Head */ - obj = CAR(objp); - if (is_byte(obj)) { - len++; - if (len == 0) { - goto L_overflow_error; + while (1) { /* Tail loop */ + while (1) { /* Head loop */ + if (--yield_count <= 0) + goto L_yield; + objp = list_val(obj); + /* Head */ + obj = CAR(objp); + if (is_byte(obj)) { + len++; + if (len == 0) { + goto L_overflow_error; + } + } else if (is_binary(obj)) { + SAFE_ADD(len, binary_size(obj)); + SAFE_ADD_BITSIZE(offs, obj); + } else if (is_list(obj)) { + ESTACK_PUSH(s, CDR(objp)); + continue; /* Head loop */ + } else if (is_not_nil(obj)) { + goto L_type_error; + } + break; } - } else if (is_binary(obj)) { - SAFE_ADD(len, binary_size(obj)); - SAFE_ADD_BITSIZE(offs, obj); - } else if (is_list(obj)) { - ESTACK_PUSH(s, CDR(objp)); - goto L_iter_list; /* on head */ - } else if (is_not_nil(obj)) { - goto L_type_error; + /* Tail */ + obj = CDR(objp); + if (is_list(obj)) + continue; /* Tail loop */ + else if (is_binary(obj)) { + SAFE_ADD(len, binary_size(obj)); + SAFE_ADD_BITSIZE(offs, obj); + } else if (is_not_nil(obj)) { + goto L_type_error; + } + break; } - /* Tail */ - obj = CDR(objp); - if (is_list(obj)) - goto L_iter_list; /* on tail */ - else if (is_binary(obj)) { + } else { + if (--yield_count <= 0) + goto L_yield; + if (is_binary(obj)) { SAFE_ADD(len, binary_size(obj)); SAFE_ADD_BITSIZE(offs, obj); } else if (is_not_nil(obj)) { goto L_type_error; } - } else if (is_binary(obj)) { - SAFE_ADD(len, binary_size(obj)); - SAFE_ADD_BITSIZE(offs, obj); - } else if (is_not_nil(obj)) { - goto L_type_error; } } #undef SAFE_ADD #undef SAFE_ADD_BITSIZE - DESTROY_ESTACK(s); - /* * Make sure that the number of bits in the bitstring will fit * in an Uint to ensure that the binary can be matched using @@ -800,15 +1492,42 @@ bitstr_list_len(Eterm obj, Uint* num_bytes) if (len << 3 < len) { goto L_overflow_error; } - *num_bytes = len; - return ERTS_IOLIST_OK; + state->size = len; - L_type_error: - DESTROY_ESTACK(s); - return ERTS_IOLIST_TYPE; + res = ERTS_IOLIST_OK; + + L_return: { + int yc = init_yield_count - yield_count; + int reds; + + DESTROY_ESTACK(s); + CLEAR_SAVED_ESTACK(&state->estack); + + reds = (yc - 1)/ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED + 1; + BUMP_REDS(state->c_p, reds); + state->reds_left -= reds; + state->size = (ErlDrvSizeT) len; + state->have_size = 1; + return res; + } L_overflow_error: - DESTROY_ESTACK(s); - return ERTS_IOLIST_OVERFLOW; + res = ERTS_IOLIST_OVERFLOW; + len = 0; + goto L_return; + + L_type_error: + res = ERTS_IOLIST_TYPE; + len = 0; + goto L_return; + + L_yield: + BUMP_ALL_REDS(state->c_p); + state->reds_left = 0; + state->size = len; + state->offs = offs; + state->obj = obj; + ESTACK_SAVE(s, &state->estack); + return ERTS_IOLIST_YIELD; } diff --git a/erts/emulator/beam/break.c b/erts/emulator/beam/break.c index 7d4f52ee23..08265b590d 100644 --- a/erts/emulator/beam/break.c +++ b/erts/emulator/beam/break.c @@ -256,6 +256,7 @@ print_process_info(int to, void *to_arg, Process *p) p->current[1], p->current[2]); } + erts_print(to, to_arg, "Run queue: %d\n", erts_get_runq_proc(p)->ix); erts_print(to, to_arg, "Spawned by: %T\n", p->parent); approx_started = (time_t) p->approx_started; diff --git a/erts/emulator/beam/erl_alloc.c b/erts/emulator/beam/erl_alloc.c index 05ac24e04d..90cd227fae 100644 --- a/erts/emulator/beam/erl_alloc.c +++ b/erts/emulator/beam/erl_alloc.c @@ -1873,8 +1873,8 @@ erts_alc_fatal_error(int error, int func, ErtsAlcType_t n, ...) size = va_arg(argp, Uint); va_end(argp); erl_exit(1, - "%s: Cannot %s %lu bytes of memory (of type \"%s\").\n", - allctr_str, op, size, t_str); + "%s: Cannot %s %lu bytes of memory (of type \"%s\", thread %d).\n", + allctr_str, op, size, t_str, ERTS_ALC_GET_THR_IX()); break; } case ERTS_ALC_E_NOALLCTR: diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types index 17ac6316b7..37354b7f8d 100644 --- a/erts/emulator/beam/erl_alloc.types +++ b/erts/emulator/beam/erl_alloc.types @@ -357,6 +357,7 @@ type DB_MS_PSDO_PROC LONG_LIVED_LOW ETS db_match_pseudo_proc type SCHDLR_DATA LONG_LIVED_LOW SYSTEM scheduler_data type LL_TEMP_TERM LONG_LIVED_LOW SYSTEM ll_temp_term +type NIF_TRAP_EXPORT STANDARD_LOW CODE nif_trap_export_entry type EXPORT LONG_LIVED_LOW CODE export_entry type MONITOR_SH STANDARD_LOW PROCESSES monitor_sh type NLINK_SH STANDARD_LOW PROCESSES nlink_sh @@ -375,6 +376,7 @@ type DB_MS_PSDO_PROC LONG_LIVED ETS db_match_pseudo_proc type SCHDLR_DATA LONG_LIVED SYSTEM scheduler_data type LL_TEMP_TERM LONG_LIVED SYSTEM ll_temp_term +type NIF_TRAP_EXPORT STANDARD CODE nif_trap_export_entry type EXPORT LONG_LIVED CODE export_entry type MONITOR_SH FIXED_SIZE PROCESSES monitor_sh type NLINK_SH FIXED_SIZE PROCESSES nlink_sh diff --git a/erts/emulator/beam/erl_alloc_util.c b/erts/emulator/beam/erl_alloc_util.c index 45f0cc4312..a4e164bf51 100644 --- a/erts/emulator/beam/erl_alloc_util.c +++ b/erts/emulator/beam/erl_alloc_util.c @@ -3274,6 +3274,15 @@ create_carrier(Allctr_t *allctr, Uint umem_sz, UWord flags) ASSERT(!(flags & CFLG_FORCE_MSEG && flags & CFLG_FORCE_SYS_ALLOC)); + if (umem_sz > (ERTS_UINT_MAX - ERTS_UINT_MAX/100)) { + /* Do an overly conservative _overflow_ check here so we don't + * have to deal with it from here on. I guess we could be more accurate + * but I don't think the need to allocate over 99% of the address space + * will ever arise on any machine, neither 32 nor 64 bit. + */ + return NULL; + } + blk_sz = UMEMSZ2BLKSZ(allctr, umem_sz); #ifdef ERTS_SMP diff --git a/erts/emulator/beam/erl_bif_binary.c b/erts/emulator/beam/erl_bif_binary.c index ff775691b3..3bf78adce7 100644 --- a/erts/emulator/beam/erl_bif_binary.c +++ b/erts/emulator/beam/erl_bif_binary.c @@ -1324,9 +1324,9 @@ static int parse_match_opts_list(Eterm l, Eterm bin, Uint *posp, Uint *endp) goto badarg; } if (len < 0) { - Sint lentmp = -len; + Uint lentmp = -(Uint)len; /* overflow */ - if (lentmp == len || lentmp < 0 || -lentmp != len) { + if ((Sint)lentmp < 0) { goto badarg; } len = lentmp; @@ -1555,9 +1555,9 @@ BIF_RETTYPE erts_binary_part(Process *p, Eterm binary, Eterm epos, Eterm elen) goto badarg; } if (len < 0) { - Sint lentmp = -len; + Uint lentmp = -(Uint)len; /* overflow */ - if (lentmp == len || lentmp < 0 || -lentmp != len) { + if ((Sint)lentmp < 0) { goto badarg; } len = lentmp; @@ -1644,9 +1644,9 @@ BIF_RETTYPE erts_gc_binary_part(Process *p, Eterm *reg, Eterm live, int range_is goto badarg; } if (len < 0) { - Sint lentmp = -len; + Uint lentmp = -(Uint)len; /* overflow */ - if (lentmp == len || lentmp < 0 || -lentmp != len) { + if ((Sint)lentmp < 0) { goto badarg; } len = lentmp; @@ -2213,9 +2213,9 @@ static BIF_RETTYPE binary_bin_to_list_common(Process *p, goto badarg; } if (len < 0) { - Sint lentmp = -len; + Uint lentmp = -(Uint)len; /* overflow */ - if (lentmp == len || lentmp < 0 || -lentmp != len) { + if ((Sint)lentmp < 0) { goto badarg; } len = lentmp; @@ -2294,18 +2294,11 @@ BIF_RETTYPE binary_bin_to_list_1(BIF_ALIST_1) BIF_ERROR(BIF_P,BADARG); } -/* - * Ok, erlang:list_to_binary does not interrupt, and we really don't want - * an alternative implementation for the exact same thing, why we - * have descided to use the old non-restarting implementation for now. - * In reality, there are seldom many iterations involved in doing this, so the - * problem of long-running bifs is not really that big in this case. - * So, for now we use the old implementation also in the module binary. - */ +HIPE_WRAPPER_BIF_DISABLE_GC(binary_list_to_bin, 1) BIF_RETTYPE binary_list_to_bin_1(BIF_ALIST_1) { - return erts_list_to_binary_bif(BIF_P, BIF_ARG_1); + return erts_list_to_binary_bif(BIF_P, BIF_ARG_1, bif_export[BIF_binary_list_to_bin_1]); } typedef struct { diff --git a/erts/emulator/beam/erl_bif_info.c b/erts/emulator/beam/erl_bif_info.c index 2adba9b240..6efe9d9550 100755..100644 --- a/erts/emulator/beam/erl_bif_info.c +++ b/erts/emulator/beam/erl_bif_info.c @@ -2691,6 +2691,11 @@ BIF_RETTYPE system_info_1(BIF_ALIST_1) else if (ERTS_IS_ATOM_STR("ets_limit",BIF_ARG_1)) { BIF_RET(make_small(erts_db_get_max_tabs())); } + else if (ERTS_IS_ATOM_STR("tolerant_timeofday",BIF_ARG_1)) { + BIF_RET(erts_disable_tolerant_timeofday + ? am_disabled + : am_enabled); + } BIF_ERROR(BIF_P, BADARG); } @@ -3050,6 +3055,25 @@ fun_info_2(BIF_ALIST_2) return TUPLE2(hp, what, val); } +BIF_RETTYPE +fun_info_mfa_1(BIF_ALIST_1) +{ + Process* p = BIF_P; + Eterm fun = BIF_ARG_1; + Eterm* hp; + + if (is_fun(fun)) { + ErlFunThing* funp = (ErlFunThing *) fun_val(fun); + hp = HAlloc(p, 4); + BIF_RET(TUPLE3(hp,funp->fe->module,funp->fe->address[-2],make_small(funp->arity))); + } else if (is_export(fun)) { + Export* exp = (Export *) ((UWord) (export_val(fun))[1]); + hp = HAlloc(p, 4); + BIF_RET(TUPLE3(hp,exp->code[0],exp->code[1],make_small(exp->code[2]))); + } + BIF_ERROR(p, BADARG); +} + BIF_RETTYPE is_process_alive_1(BIF_ALIST_1) { if(is_internal_pid(BIF_ARG_1)) { @@ -3851,16 +3875,19 @@ static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_s Uint tries = 0, colls = 0; unsigned long timer_s = 0, timer_ns = 0, timer_n = 0; unsigned int line = 0; + unsigned int i; Eterm af, uil; Eterm uit, uic; Eterm uits, uitns, uitn; Eterm tt, tstat, tloc, t; + Eterm thist, vhist[ERTS_LCNT_HISTOGRAM_SLOT_SIZE]; /* term: - * [{{file, line}, {tries, colls, {seconds, nanoseconds, n_blocks}}}] + * [{{file, line}, {tries, colls, {seconds, nanoseconds, n_blocks}}, + * { .. histogram .. }] */ - + tries = (Uint) ethr_atomic_read(&stats->tries); colls = (Uint) ethr_atomic_read(&stats->colls); @@ -3869,23 +3896,27 @@ static Eterm lcnt_build_lock_stats_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_s timer_ns = stats->timer.ns; timer_n = stats->timer_n; - af = erts_atom_put(stats->file, strlen(stats->file), ERTS_ATOM_ENC_LATIN1, 1); + af = erts_atom_put((byte *)stats->file, strlen(stats->file), ERTS_ATOM_ENC_LATIN1, 1); uil = erts_bld_uint( hpp, szp, line); tloc = erts_bld_tuple(hpp, szp, 2, af, uil); - uit = erts_bld_uint( hpp, szp, tries); - uic = erts_bld_uint( hpp, szp, colls); - + uit = erts_bld_uint( hpp, szp, tries); + uic = erts_bld_uint( hpp, szp, colls); + uits = erts_bld_uint( hpp, szp, timer_s); uitns = erts_bld_uint( hpp, szp, timer_ns); uitn = erts_bld_uint( hpp, szp, timer_n); tt = erts_bld_tuple(hpp, szp, 3, uits, uitns, uitn); tstat = erts_bld_tuple(hpp, szp, 3, uit, uic, tt); - - t = erts_bld_tuple(hpp, szp, 2, tloc, tstat); - - res = erts_bld_cons( hpp, szp, t, res); + + for(i = 0; i < ERTS_LCNT_HISTOGRAM_SLOT_SIZE; i++) { + vhist[i] = erts_bld_uint(hpp, szp, stats->hist.ns[i]); + } + thist = erts_bld_tuplev(hpp, szp, ERTS_LCNT_HISTOGRAM_SLOT_SIZE, vhist); + + t = erts_bld_tuple(hpp, szp, 3, tloc, tstat, thist); + res = erts_bld_cons( hpp, szp, t, res); return res; } @@ -3906,13 +3937,13 @@ static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_t *lock ASSERT(ltype); - type = erts_atom_put(ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1); - name = erts_atom_put(lock->name, strlen(lock->name), ERTS_ATOM_ENC_LATIN1, 1); + type = erts_atom_put((byte *)ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1); + name = erts_atom_put((byte *)lock->name, strlen(lock->name), ERTS_ATOM_ENC_LATIN1, 1); if (lock->flag & ERTS_LCNT_LT_ALLOC) { /* use allocator types names as id's for allocator locks */ ltype = (char *) ERTS_ALC_A2AD(signed_val(lock->id)); - id = erts_atom_put(ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1); + id = erts_atom_put((byte *)ltype, strlen(ltype), ERTS_ATOM_ENC_LATIN1, 1); } else if (lock->flag & ERTS_LCNT_LT_PROCLOCK) { /* use registered names as id's for process locks if available */ proc = erts_proc_lookup(lock->id); @@ -3923,16 +3954,15 @@ static Eterm lcnt_build_lock_term(Eterm **hpp, Uint *szp, erts_lcnt_lock_t *lock id = lock->id; } } else { - id = lock->id; + id = lock->id; } - + for (i = 0; i < lock->n_stats; i++) { stats = lcnt_build_lock_stats_term(hpp, szp, &(lock->stats[i]), stats); } - - t = erts_bld_tuple(hpp, szp, 4, name, id, type, stats); - - res = erts_bld_cons( hpp, szp, t, res); + + t = erts_bld_tuple(hpp, szp, 4, name, id, type, stats); + res = erts_bld_cons( hpp, szp, t, res); return res; } @@ -3952,12 +3982,12 @@ static Eterm lcnt_build_result_term(Eterm **hpp, Uint *szp, erts_lcnt_data_t *da dtns = erts_bld_uint( hpp, szp, data->duration.ns); tdt = erts_bld_tuple(hpp, szp, 2, dts, dtns); - adur = erts_atom_put(str_duration, strlen(str_duration), ERTS_ATOM_ENC_LATIN1, 1); + adur = erts_atom_put((byte *)str_duration, strlen(str_duration), ERTS_ATOM_ENC_LATIN1, 1); tdur = erts_bld_tuple(hpp, szp, 2, adur, tdt); /* lock tuple */ - aloc = erts_atom_put(str_locks, strlen(str_locks), ERTS_ATOM_ENC_LATIN1, 1); + aloc = erts_atom_put((byte *)str_locks, strlen(str_locks), ERTS_ATOM_ENC_LATIN1, 1); for (lock = data->current_locks->head; lock != NULL ; lock = lock->next ) { lloc = lcnt_build_lock_term(hpp, szp, lock, lloc); diff --git a/erts/emulator/beam/erl_binary.h b/erts/emulator/beam/erl_binary.h index 819b19e566..06dfeb1260 100644 --- a/erts/emulator/beam/erl_binary.h +++ b/erts/emulator/beam/erl_binary.h @@ -166,7 +166,7 @@ Eterm erts_bin_bytes_to_list(Eterm previous, Eterm* hp, byte* bytes, Uint size, * Common implementation for erlang:list_to_binary/1 and binary:list_to_bin/1 */ -BIF_RETTYPE erts_list_to_binary_bif(Process *p, Eterm arg); +BIF_RETTYPE erts_list_to_binary_bif(Process *p, Eterm arg, Export *bif); BIF_RETTYPE erts_gc_binary_part(Process *p, Eterm *reg, Eterm live, int range_is_tuple); BIF_RETTYPE erts_binary_part(Process *p, Eterm binary, Eterm epos, Eterm elen); @@ -236,6 +236,8 @@ erts_bin_drv_alloc_fnf(Uint size) { Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD; void *res; + if (bsize < size) /* overflow */ + return NULL; res = erts_alloc_fnf(ERTS_ALC_T_DRV_BINARY, bsize); ERTS_CHK_BIN_ALIGNMENT(res); return (Binary *) res; @@ -246,6 +248,8 @@ erts_bin_drv_alloc(Uint size) { Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD; void *res; + if (bsize < size) /* overflow */ + erts_alloc_enomem(ERTS_ALC_T_DRV_BINARY, size); res = erts_alloc(ERTS_ALC_T_DRV_BINARY, bsize); ERTS_CHK_BIN_ALIGNMENT(res); return (Binary *) res; @@ -257,6 +261,8 @@ erts_bin_nrml_alloc(Uint size) { Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD; void *res; + if (bsize < size) /* overflow */ + erts_alloc_enomem(ERTS_ALC_T_BINARY, size); res = erts_alloc(ERTS_ALC_T_BINARY, bsize); ERTS_CHK_BIN_ALIGNMENT(res); return (Binary *) res; @@ -267,11 +273,12 @@ erts_bin_realloc_fnf(Binary *bp, Uint size) { Binary *nbp; Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD; + ErtsAlcType_t type = (bp->flags & BIN_FLAG_DRV) ? ERTS_ALC_T_DRV_BINARY + : ERTS_ALC_T_BINARY; ASSERT((bp->flags & BIN_FLAG_MAGIC) == 0); - if (bp->flags & BIN_FLAG_DRV) - nbp = erts_realloc_fnf(ERTS_ALC_T_DRV_BINARY, (void *) bp, bsize); - else - nbp = erts_realloc_fnf(ERTS_ALC_T_BINARY, (void *) bp, bsize); + if (bsize < size) /* overflow */ + return NULL; + nbp = erts_realloc_fnf(type, (void *) bp, bsize); ERTS_CHK_BIN_ALIGNMENT(nbp); return nbp; } @@ -281,17 +288,14 @@ erts_bin_realloc(Binary *bp, Uint size) { Binary *nbp; Uint bsize = ERTS_SIZEOF_Binary(size) + CHICKEN_PAD; + ErtsAlcType_t type = (bp->flags & BIN_FLAG_DRV) ? ERTS_ALC_T_DRV_BINARY + : ERTS_ALC_T_BINARY; ASSERT((bp->flags & BIN_FLAG_MAGIC) == 0); - if (bp->flags & BIN_FLAG_DRV) - nbp = erts_realloc_fnf(ERTS_ALC_T_DRV_BINARY, (void *) bp, bsize); - else - nbp = erts_realloc_fnf(ERTS_ALC_T_BINARY, (void *) bp, bsize); + if (bsize < size) /* overflow */ + erts_realloc_enomem(type, bp, size); + nbp = erts_realloc_fnf(type, (void *) bp, bsize); if (!nbp) - erts_realloc_n_enomem(ERTS_ALC_T2N(bp->flags & BIN_FLAG_DRV - ? ERTS_ALC_T_DRV_BINARY - : ERTS_ALC_T_BINARY), - bp, - bsize); + erts_realloc_enomem(type, bp, bsize); ERTS_CHK_BIN_ALIGNMENT(nbp); return nbp; } @@ -312,6 +316,7 @@ erts_create_magic_binary(Uint size, void (*destructor)(Binary *)) { Uint bsize = ERTS_MAGIC_BIN_SIZE(size); Binary* bptr = erts_alloc_fnf(ERTS_ALC_T_BINARY, bsize); + ASSERT(bsize > size); if (!bptr) erts_alloc_n_enomem(ERTS_ALC_T2N(ERTS_ALC_T_BINARY), bsize); ERTS_CHK_BIN_ALIGNMENT(bptr); diff --git a/erts/emulator/beam/erl_driver.h b/erts/emulator/beam/erl_driver.h index 5ced8c5ca0..f9938fc66c 100644 --- a/erts/emulator/beam/erl_driver.h +++ b/erts/emulator/beam/erl_driver.h @@ -133,7 +133,7 @@ typedef struct { #define ERL_DRV_EXTENDED_MARKER (0xfeeeeeed) #define ERL_DRV_EXTENDED_MAJOR_VERSION 3 -#define ERL_DRV_EXTENDED_MINOR_VERSION 0 +#define ERL_DRV_EXTENDED_MINOR_VERSION 1 /* * The emulator will refuse to load a driver with a major version diff --git a/erts/emulator/beam/erl_drv_nif.h b/erts/emulator/beam/erl_drv_nif.h index 3f829ea7ea..4e8c6dc68b 100644 --- a/erts/emulator/beam/erl_drv_nif.h +++ b/erts/emulator/beam/erl_drv_nif.h @@ -35,6 +35,7 @@ typedef struct { int scheduler_threads; int nif_major_version; int nif_minor_version; + int dirty_scheduler_support; } ErlDrvSysInfo; typedef struct { diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c index aa15d2cc57..0db42d4325 100644 --- a/erts/emulator/beam/erl_gc.c +++ b/erts/emulator/beam/erl_gc.c @@ -2018,6 +2018,20 @@ setup_rootset(Process *p, Eterm *objv, int nobj, Rootset *rootset) roots[n].sz = 1; n++; } + + /* + * If a NIF has saved arguments, they need to be added + */ + if (ERTS_PROC_GET_NIF_TRAP_EXPORT(p)) { + Eterm* argv; + int argc; + if (erts_setup_nif_gc(p, &argv, &argc)) { + roots[n].v = argv; + roots[n].sz = argc; + n++; + } + } + ASSERT(n <= rootset->size); mp = p->msg.first; diff --git a/erts/emulator/beam/erl_init.c b/erts/emulator/beam/erl_init.c index d54658f1ea..88c4006934 100644 --- a/erts/emulator/beam/erl_init.c +++ b/erts/emulator/beam/erl_init.c @@ -324,7 +324,6 @@ erl_init(int ncpu, BIN_VH_MIN_SIZE = erts_next_heap_size(BIN_VH_MIN_SIZE, 0); erts_init_trace(); - erts_init_binary(); erts_init_bits(); erts_code_ix_init(); erts_init_fun_table(); @@ -337,6 +336,7 @@ erl_init(int ncpu, erts_ddll_init(); init_emulator(); erts_ptab_init(); /* Must be after init_emulator() */ + erts_init_binary(); /* Must be after init_emulator() */ erts_bp_init(); init_db(); /* Must be after init_emulator */ erts_bif_timer_init(); @@ -2066,8 +2066,10 @@ erl_exit_vv(int n, int flush_async, char *fmt, va_list args1, va_list args2) system_cleanup(flush_async); save_statistics(); - - an = abs(n); + if (n < 0) + an = -(unsigned int)n; + else + an = n; if (erts_mtrace_enabled) erts_mtrace_exit((Uint32) an); diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c index c13eb87012..b105ece6f1 100644 --- a/erts/emulator/beam/erl_lock_check.c +++ b/erts/emulator/beam/erl_lock_check.c @@ -139,7 +139,6 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "drv_tsd", NULL }, { "async_enq_mtx", NULL }, #ifdef ERTS_SMP - { "sys_msg_q", NULL }, { "atom_tab", NULL }, { "make_ref", NULL }, { "misc_op_list_pre_alloc_lock", "address" }, @@ -148,6 +147,7 @@ static erts_lc_lock_order_t erts_lock_order[] = { { "btm_pre_alloc_lock", NULL, }, { "dist_entry_out_queue", "address" }, { "port_sched_lock", "port_id" }, + { "sys_msg_q", NULL }, { "port_table", NULL }, #endif { "mtrace_op", NULL }, @@ -227,8 +227,7 @@ rw_op_str(Uint16 flags) case ERTS_LC_FLG_LO_READ: return " (r)"; case ERTS_LC_FLG_LO_WRITE: - erts_fprintf(stderr, "\nInternal error\n"); - lc_abort(); + ERTS_INTERNAL_ERROR("Only write flag present"); default: break; } @@ -311,8 +310,7 @@ static ERTS_INLINE void lc_free(void *p) static void *lc_core_alloc(void) { lc_unlock(); - erts_fprintf(stderr, "Lock checker out of memory!\n"); - lc_abort(); + ERTS_INTERNAL_ERROR("Lock checker out of memory!\n"); } #else @@ -325,8 +323,7 @@ static void *lc_core_alloc(void) fbs = (erts_lc_free_block_t *) malloc(sizeof(erts_lc_free_block_t) * ERTS_LC_FB_CHUNK_SIZE); if (!fbs) { - erts_fprintf(stderr, "Lock checker failed to allocate memory!\n"); - lc_abort(); + ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!"); } for (i = 1; i < ERTS_LC_FB_CHUNK_SIZE - 1; i++) { #ifdef DEBUG @@ -366,11 +363,11 @@ create_locked_locks(char *thread_name) { erts_lc_locked_locks_t *l_lcks = malloc(sizeof(erts_lc_locked_locks_t)); if (!l_lcks) - lc_abort(); + ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!"); l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown"); if (!l_lcks->thread_name) - lc_abort(); + ERTS_INTERNAL_ERROR("Lock checker failed to allocate memory!"); l_lcks->emu_thread = 0; l_lcks->tid = erts_thr_self(); @@ -691,7 +688,7 @@ erts_lc_set_thread_name(char *thread_name) free((void *) l_lcks->thread_name); l_lcks->thread_name = strdup(thread_name ? thread_name : "unknown"); if (!l_lcks->thread_name) - lc_abort(); + ERTS_INTERNAL_ERROR("strdup failed"); } l_lcks->emu_thread = 1; } @@ -1330,7 +1327,7 @@ erts_lc_init(void) #endif /* #ifdef ERTS_LC_STATIC_ALLOC */ if (ethr_spinlock_init(&free_blocks_lock) != 0) - lc_abort(); + ERTS_INTERNAL_ERROR("spinlock_init failed"); erts_tsd_key_create(&locks_key,"erts_lock_check_key"); } diff --git a/erts/emulator/beam/erl_lock_count.c b/erts/emulator/beam/erl_lock_count.c index 6f44bf097b..cf6996ea06 100644 --- a/erts/emulator/beam/erl_lock_count.c +++ b/erts/emulator/beam/erl_lock_count.c @@ -61,6 +61,25 @@ static ERTS_INLINE void lcnt_unlock(void) { ethr_mutex_unlock(&lcnt_data_lock); } +const int log2_tab64[64] = { + 63, 0, 58, 1, 59, 47, 53, 2, + 60, 39, 48, 27, 54, 33, 42, 3, + 61, 51, 37, 40, 49, 18, 28, 20, + 55, 30, 34, 11, 43, 14, 22, 4, + 62, 57, 46, 52, 38, 26, 32, 41, + 50, 36, 17, 19, 29, 10, 13, 21, + 56, 45, 25, 31, 35, 16, 9, 12, + 44, 24, 15, 8, 23, 7, 6, 5}; + +static ERTS_INLINE int lcnt_log2(Uint64 v) { + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v |= v >> 32; + return log2_tab64[((Uint64)((v - (v >> 1))*0x07EDD5E59A4E28C2)) >> 58]; +} static char* lcnt_lock_type(Uint16 flag) { switch(flag & ERTS_LCNT_LT_ALL) { @@ -81,19 +100,20 @@ static void lcnt_clear_stats(erts_lcnt_lock_stats_t *stats) { stats->timer_n = 0; stats->file = (char *)str_undefined; stats->line = 0; + sys_memzero(stats->hist.ns, sizeof(stats->hist.ns)); } static void lcnt_time(erts_lcnt_time_t *time) { -#ifdef HAVE_GETHRTIME +#if 0 || defined(HAVE_GETHRTIME) SysHrTime hr_time; hr_time = sys_gethrtime(); time->s = (unsigned long)(hr_time / 1000000000LL); time->ns = (unsigned long)(hr_time - 1000000000LL*time->s); -#else - SysTimeval tv; - sys_gettimeofday(&tv); - time->s = tv.tv_sec; - time->ns = tv.tv_usec*1000LL; +#else + SysTimeval tv; + sys_gettimeofday(&tv); + time->s = tv.tv_sec; + time->ns = tv.tv_usec*1000LL; #endif } @@ -111,28 +131,29 @@ static void lcnt_time_diff(erts_lcnt_time_t *d, erts_lcnt_time_t *t1, erts_lcnt_ dns += 1000000000LL; } + ASSERT(ds >= 0); + d->s = ds; d->ns = dns; } -/* difference d must be positive */ +/* difference d must be non-negative */ static void lcnt_time_add(erts_lcnt_time_t *t, erts_lcnt_time_t *d) { - unsigned long ngns = 0; - t->s += d->s; t->ns += d->ns; - ngns = t->ns / 1000000000LL; + t->s += t->ns / 1000000000LL; t->ns = t->ns % 1000000000LL; - - t->s += ngns; } static erts_lcnt_thread_data_t *lcnt_thread_data_alloc(void) { erts_lcnt_thread_data_t *eltd; eltd = (erts_lcnt_thread_data_t*)malloc(sizeof(erts_lcnt_thread_data_t)); + if (!eltd) { + ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!"); + } eltd->timer_set = 0; eltd->lock_in_conflict = 0; @@ -158,59 +179,64 @@ static char* lock_opt(Uint16 flag) { return "--"; } -static void print_lock_x(erts_lcnt_lock_t *lock, Uint16 flag, char *action, char *extra) { - erts_aint_t colls, tries, w_state, r_state; - erts_lcnt_lock_stats_t *stats = NULL; - +static void print_lock_x(erts_lcnt_lock_t *lock, Uint16 flag, char *action) { + erts_aint_t w_state, r_state; char *type; - int i; - + + if (strcmp(lock->name, "run_queue") != 0) return; type = lcnt_lock_type(lock->flag); r_state = ethr_atomic_read(&lock->r_state); w_state = ethr_atomic_read(&lock->w_state); - if (lock->flag & flag) { - erts_printf("%20s [%30s] [r/w state %4ld/%4ld] id %T %s\r\n", - action, - lock->name, - r_state, - w_state, - lock->id, - extra); + erts_fprintf(stderr,"%10s [%24s] [r/w state %4ld/%4ld] %2s id %T\r\n", + action, + lock->name, + r_state, + w_state, + type, + lock->id); } } - -static void print_lock(erts_lcnt_lock_t *lock, char *action) { - if (strcmp(lock->name, "proc_main") == 0) { - print_lock_x(lock, ERTS_LCNT_LT_ALL, action, ""); - } -} - #endif static erts_lcnt_lock_stats_t *lcnt_get_lock_stats(erts_lcnt_lock_t *lock, char *file, unsigned int line) { unsigned int i; erts_lcnt_lock_stats_t *stats = NULL; - - for (i = 0; i < lock->n_stats; i++) { - if ((lock->stats[i].file == file) && (lock->stats[i].line == line)) { - return &(lock->stats[i]); - } - } - if (lock->n_stats < ERTS_LCNT_MAX_LOCK_LOCATIONS) { - stats = &lock->stats[lock->n_stats]; - lock->n_stats++; - stats->file = file; - stats->line = line; - return stats; + if (erts_lcnt_rt_options & ERTS_LCNT_OPT_LOCATION) { + for (i = 0; i < lock->n_stats; i++) { + if ((lock->stats[i].file == file) && (lock->stats[i].line == line)) { + return &(lock->stats[i]); + } + } + if (lock->n_stats < ERTS_LCNT_MAX_LOCK_LOCATIONS) { + stats = &lock->stats[lock->n_stats]; + lock->n_stats++; + stats->file = file; + stats->line = line; + return stats; + } } return &lock->stats[0]; +} +static void lcnt_update_stats_hist(erts_lcnt_hist_t *hist, erts_lcnt_time_t *time_wait) { + int idx; + unsigned long r; + + if (time_wait->s > 0 || time_wait->ns > ERTS_LCNT_HISTOGRAM_MAX_NS) { + idx = ERTS_LCNT_HISTOGRAM_SLOT_SIZE - 1; + } else { + r = time_wait->ns >> ERTS_LCNT_HISTOGRAM_RSHIFT; + if (r) idx = lcnt_log2(r); + else idx = 0; + } + hist->ns[idx]++; } -static void lcnt_update_stats(erts_lcnt_lock_stats_t *stats, int lock_in_conflict, erts_lcnt_time_t *time_wait) { +static void lcnt_update_stats(erts_lcnt_lock_stats_t *stats, int lock_in_conflict, + erts_lcnt_time_t *time_wait) { ethr_atomic_inc(&stats->tries); @@ -220,6 +246,7 @@ static void lcnt_update_stats(erts_lcnt_lock_stats_t *stats, int lock_in_conflic if (time_wait) { lcnt_time_add(&(stats->timer), time_wait); stats->timer_n++; + lcnt_update_stats_hist(&stats->hist,time_wait); } } @@ -248,6 +275,9 @@ void erts_lcnt_init() { /* init lcnt structure */ erts_lcnt_data = (erts_lcnt_data_t*)malloc(sizeof(erts_lcnt_data_t)); + if (!erts_lcnt_data) { + ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!"); + } erts_lcnt_data->current_locks = erts_lcnt_list_init(); erts_lcnt_data->deleted_locks = erts_lcnt_list_init(); @@ -269,6 +299,9 @@ erts_lcnt_lock_list_t *erts_lcnt_list_init(void) { erts_lcnt_lock_list_t *list; list = (erts_lcnt_lock_list_t*)malloc(sizeof(erts_lcnt_lock_list_t)); + if (!list) { + ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!"); + } list->head = NULL; list->tail = NULL; list->n = 0; @@ -330,8 +363,9 @@ void erts_lcnt_list_delete(erts_lcnt_lock_list_t *list, erts_lcnt_lock_t *lock) /* interface to erl_threads.h */ /* only lock on init and destroy, all others should use atomics */ void erts_lcnt_init_lock(erts_lcnt_lock_t *lock, char *name, Uint16 flag ) { - erts_lcnt_init_lock_x(lock, name, flag, am_undefined); + erts_lcnt_init_lock_x(lock, name, flag, NIL); } + void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eterm id) { int i; if (!name) { @@ -360,7 +394,6 @@ void erts_lcnt_init_lock_x(erts_lcnt_lock_t *lock, char *name, Uint16 flag, Eter } erts_lcnt_list_insert(erts_lcnt_data->current_locks, lock); - lcnt_unlock(); } @@ -375,6 +408,9 @@ void erts_lcnt_destroy_lock(erts_lcnt_lock_t *lock) { /* copy structure and insert the copy */ deleted_lock = (erts_lcnt_lock_t*)malloc(sizeof(erts_lcnt_lock_t)); + if (!deleted_lock) { + ERTS_INTERNAL_ERROR("Lock counter failed to allocate memory!"); + } memcpy(deleted_lock, lock, sizeof(erts_lcnt_lock_t)); deleted_lock->next = NULL; @@ -417,8 +453,9 @@ void erts_lcnt_lock_opt(erts_lcnt_lock_t *lock, Uint16 option) { if ((w_state > 0) || (r_state > 0)) { eltd->lock_in_conflict = 1; - if (eltd->timer_set == 0) + if (eltd->timer_set == 0) { lcnt_time(&eltd->timer); + } eltd->timer_set++; } else { eltd->lock_in_conflict = 0; @@ -433,7 +470,7 @@ void erts_lcnt_lock(erts_lcnt_lock_t *lock) { if (!ERTS_LCNT_LOCK_TYPE(lock)) return; w_state = ethr_atomic_read(&lock->w_state); - ethr_atomic_inc( &lock->w_state); + ethr_atomic_inc(&lock->w_state); eltd = lcnt_get_thread_data(); @@ -446,10 +483,10 @@ void erts_lcnt_lock(erts_lcnt_lock_t *lock) { * 'atomicly'. All other locks will block the thread if w_state > 0 * i.e. locked. */ - if (eltd->timer_set == 0) + if (eltd->timer_set == 0) { lcnt_time(&eltd->timer); + } eltd->timer_set++; - } else { eltd->lock_in_conflict = 0; } @@ -459,11 +496,10 @@ void erts_lcnt_lock(erts_lcnt_lock_t *lock) { void erts_lcnt_lock_unaquire(erts_lcnt_lock_t *lock) { /* should check if this thread was "waiting" */ - if (erts_lcnt_rt_options & ERTS_LCNT_OPT_SUSPEND) return; if (!ERTS_LCNT_LOCK_TYPE(lock)) return; - ethr_atomic_dec( &lock->w_state); + ethr_atomic_dec(&lock->w_state); } /* erts_lcnt_lock_post @@ -491,7 +527,7 @@ void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line if (!(lock->flag & (ERTS_LCNT_LT_RWMUTEX | ERTS_LCNT_LT_RWSPINLOCK))) { flowstate = ethr_atomic_read(&lock->flowstate); ASSERT(flowstate == 0); - ethr_atomic_inc( &lock->flowstate); + ethr_atomic_inc(&lock->flowstate); } #endif @@ -500,19 +536,12 @@ void erts_lcnt_lock_post_x(erts_lcnt_lock_t *lock, char *file, unsigned int line ASSERT(eltd); /* if lock was in conflict, time it */ - - if (erts_lcnt_rt_options & ERTS_LCNT_OPT_LOCATION) { - stats = lcnt_get_lock_stats(lock, file, line); - } else { - stats = &lock->stats[0]; - } - + stats = lcnt_get_lock_stats(lock, file, line); if (eltd->timer_set) { lcnt_time(&timer); lcnt_time_diff(&time_wait, &timer, &(eltd->timer)); lcnt_update_stats(stats, eltd->lock_in_conflict, &time_wait); - eltd->timer_set--; ASSERT(eltd->timer_set >= 0); } else { @@ -541,11 +570,11 @@ void erts_lcnt_unlock(erts_lcnt_lock_t *lock) { /* flowstate */ flowstate = ethr_atomic_read(&lock->flowstate); ASSERT(flowstate == 1); - ethr_atomic_dec( &lock->flowstate); + ethr_atomic_dec(&lock->flowstate); /* write state */ w_state = ethr_atomic_read(&lock->w_state); - ASSERT(w_state > 0) + ASSERT(w_state > 0); #endif ethr_atomic_dec(&lock->w_state); } @@ -582,9 +611,7 @@ void erts_lcnt_trylock(erts_lcnt_lock_t *lock, int res) { ethr_atomic_inc( &lock->flowstate); #endif ethr_atomic_inc(&lock->w_state); - lcnt_update_stats(&(lock->stats[0]), 0, NULL); - } else { ethr_atomic_inc(&lock->stats[0].tries); ethr_atomic_inc(&lock->stats[0].colls); diff --git a/erts/emulator/beam/erl_lock_count.h b/erts/emulator/beam/erl_lock_count.h index 75f7cd028b..ffbb93da1b 100644 --- a/erts/emulator/beam/erl_lock_count.h +++ b/erts/emulator/beam/erl_lock_count.h @@ -35,6 +35,10 @@ * | | | - collisions (including trylock busy) * | | | - timer (time spent in waiting for lock) * | | | - n_timer (collisions excluding trylock busy) + * | | | - histogram + * | | | | - # 0 = log2(lock wait_time ns) + * | | | | - ... + * | | | | - # n = log2(lock wait_time ns) * * Each instance of a lock is the unique lock, i.e. set and id in that set. * For each lock there is a set of statistics with where and what impact @@ -68,8 +72,17 @@ #include "ethread.h" +#define ERTS_LCNT_MAX_LOCK_LOCATIONS (10) -#define ERTS_LCNT_MAX_LOCK_LOCATIONS (10) +/* histogram */ +#define ERTS_LCNT_HISTOGRAM_MAX_NS (((unsigned long)1LL << 28) - 1) +#if 0 || defined(HAVE_GETHRTIME) +#define ERTS_LCNT_HISTOGRAM_SLOT_SIZE (30) +#define ERTS_LCNT_HISTOGRAM_RSHIFT (0) +#else +#define ERTS_LCNT_HISTOGRAM_SLOT_SIZE (20) +#define ERTS_LCNT_HISTOGRAM_RSHIFT (10) +#endif #define ERTS_LCNT_LT_SPINLOCK (((Uint16) 1) << 0) #define ERTS_LCNT_LT_RWSPINLOCK (((Uint16) 1) << 1) @@ -104,6 +117,10 @@ typedef struct { extern erts_lcnt_time_t timer_start; +typedef struct { + Uint32 ns[ERTS_LCNT_HISTOGRAM_SLOT_SIZE]; /* log2 array of nano seconds occurences */ +} erts_lcnt_hist_t; + typedef struct erts_lcnt_lock_stats_s { /* "tries" and "colls" needs to be atomic since * trylock busy does not aquire a lock and there @@ -118,6 +135,7 @@ typedef struct erts_lcnt_lock_stats_s { unsigned long timer_n; /* #times waited for lock */ erts_lcnt_time_t timer; /* total wait time for lock */ + erts_lcnt_hist_t hist; } erts_lcnt_lock_stats_t; /* rw locks uses both states, other locks only uses w_state */ diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c index 0eb8117980..8870fac7d9 100644 --- a/erts/emulator/beam/erl_message.c +++ b/erts/emulator/beam/erl_message.c @@ -415,7 +415,13 @@ erts_queue_dist_message(Process *rcvr, if (!(*rcvr_locks & ERTS_PROC_LOCK_MSGQ)) erts_smp_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ); - erts_proc_notify_new_message(rcvr); + erts_proc_notify_new_message(rcvr, +#ifdef ERTS_SMP + *rcvr_locks +#else + 0 +#endif + ); } } @@ -542,7 +548,13 @@ queue_message(Process *c_p, if (locked_msgq) erts_smp_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); - erts_proc_notify_new_message(receiver); + erts_proc_notify_new_message(receiver, +#ifdef ERTS_SMP + *receiver_locks +#else + 0 +#endif + ); #ifndef ERTS_SMP ERTS_HOLE_CHECK(receiver); @@ -1032,7 +1044,6 @@ erts_send_message(Process* sender, } BM_SWAP_TIMER(send,system); } else { -#ifdef ERTS_SMP ErlOffHeap *ohp; Eterm *hp; erts_aint32_t state; @@ -1064,42 +1075,6 @@ erts_send_message(Process* sender, #endif ); BM_SWAP_TIMER(send,system); -#else - ErlMessage* mp = message_alloc(); - Eterm *hp; - BM_SWAP_TIMER(send,size); - msize = size_object(message); - BM_SWAP_TIMER(size,send); - - if (receiver->stop - receiver->htop <= msize) { - BM_SWAP_TIMER(send,system); - erts_garbage_collect(receiver, msize, receiver->arg_reg, receiver->arity); - BM_SWAP_TIMER(system,send); - } - hp = receiver->htop; - receiver->htop = hp + msize; - BM_SWAP_TIMER(send,copy); - message = copy_struct(message, msize, &hp, &receiver->off_heap); - BM_MESSAGE_COPIED(msize); - BM_SWAP_TIMER(copy,send); - DTRACE6(message_send, sender_name, receiver_name, - (uint32_t)msize, tok_label, tok_lastcnt, tok_serial); - ERL_MESSAGE_TERM(mp) = message; - ERL_MESSAGE_TOKEN(mp) = NIL; -#ifdef USE_VM_PROBES - ERL_MESSAGE_DT_UTAG(mp) = NIL; -#endif - mp->next = NULL; - mp->data.attached = NULL; - LINK_MESSAGE(receiver, mp); - res = receiver->msg.len; - erts_proc_notify_new_message(receiver); - - if (IS_TRACED_FL(receiver, F_TRACE_RECEIVE)) { - trace_receive(receiver, message); - } - BM_SWAP_TIMER(send,system); -#endif /* #ifndef ERTS_SMP */ } return res; } diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c index ff551ea3af..ede5f335dc 100644 --- a/erts/emulator/beam/erl_nif.c +++ b/erts/emulator/beam/erl_nif.c @@ -472,6 +472,18 @@ int enif_inspect_binary(ErlNifEnv* env, Eterm bin_term, ErlNifBinary* bin) struct enif_tmp_obj_t* tmp; byte* raw_ptr; }u; + + if (is_boxed(bin_term) && *binary_val(bin_term) == HEADER_SUB_BIN) { + ErlSubBin* sb = (ErlSubBin*) binary_val(bin_term); + if (sb->is_writable) { + ProcBin* pb = (ProcBin*) binary_val(sb->orig); + ASSERT(pb->thing_word == HEADER_PROC_BIN); + if (pb->flags) { + erts_emasculate_writable_binary(pb); + sb->is_writable = 0; + } + } + } u.tmp = NULL; bin->data = erts_get_aligned_binary_bytes_extra(bin_term, &u.raw_ptr, allocator, sizeof(struct enif_tmp_obj_t)); @@ -1513,72 +1525,263 @@ int enif_consume_timeslice(ErlNifEnv* env, int percent) return ERTS_BIF_REDS_LEFT(env->proc) == 0; } -#ifdef ERTS_DIRTY_SCHEDULERS - -/* NIFs exports need one more item than the Export struct provides, the - * erl_module_nif*, so the DirtyNifExport below adds that. The Export - * member must be first in the struct. +/* + * NIF exports need a few more items than the Export struct provides, + * including the erl_module_nif* and a NIF function pointer, so the + * NifExport below adds those. The Export member must be first in the + * struct. The saved_mfa, saved_argc, nif_level, alloced_argv_sz and argv + * members are used to track the MFA and arguments of the top NIF in case a + * chain of one or more enif_schedule_nif() calls results in an exception, + * since in that case the original MFA and registers have to be restored + * before returning to Erlang to ensure stacktrace information associated + * with the exception is correct. */ +typedef ERL_NIF_TERM (*NativeFunPtr)(ErlNifEnv*, int, const ERL_NIF_TERM[]); + typedef struct { Export exp; struct erl_module_nif* m; -} DirtyNifExport; + NativeFunPtr fp; + Eterm saved_mfa[3]; + int saved_argc; + int alloced_argv_sz; + Eterm argv[1]; +} NifExport; -static void -alloc_proc_psd(Process* proc, DirtyNifExport **ep) +/* + * If a process has saved arguments, they need to be part of the GC + * rootset. The function below is called from setup_rootset() in + * erl_gc.c. This function is declared in erl_process.h. + */ +int +erts_setup_nif_gc(Process* proc, Eterm** objv, int* nobj) { + NifExport* ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); + int gc = (ep && ep->saved_argc > 0); + + if (gc) { + *objv = ep->argv; + *nobj = ep->saved_argc; + } + return gc; +} + +/* + * Allocate a NifExport and set it in proc specific data + */ +static NifExport* +allocate_nif_sched_data(Process* proc, int argc) +{ + NifExport* ep; + size_t argv_extra, total; int i; - if (!*ep) { - *ep = erts_alloc(ERTS_ALC_T_PSD, sizeof(DirtyNifExport)); - sys_memset((void*) *ep, 0, sizeof(DirtyNifExport)); - for (i=0; i<ERTS_NUM_CODE_IX; i++) { - (*ep)->exp.addressv[i] = &(*ep)->exp.code[3]; - } - (*ep)->exp.code[3] = (BeamInstr) em_call_nif; + + argv_extra = argc > 1 ? sizeof(Eterm)*(argc-1) : 0; + total = sizeof(NifExport) + argv_extra; + ep = erts_alloc(ERTS_ALC_T_NIF_TRAP_EXPORT, total); + sys_memset((void*) ep, 0, total); + ep->alloced_argv_sz = argc; + for (i=0; i<ERTS_NUM_CODE_IX; i++) { + ep->exp.addressv[i] = &ep->exp.code[3]; } - (void) ERTS_PROC_SET_DIRTY_SCHED_TRAP_EXPORT(proc, ERTS_PROC_LOCK_MAIN, &(*ep)->exp); + ep->exp.code[3] = (BeamInstr) em_call_nif; + (void) ERTS_PROC_SET_NIF_TRAP_EXPORT(proc, ERTS_PROC_LOCK_MAIN, ep); + return ep; +} + +static ERTS_INLINE void +destroy_nif_export(NifExport *nif_export) +{ + erts_free(ERTS_ALC_T_NIF_TRAP_EXPORT, (void *) nif_export); } +void +erts_destroy_nif_export(void *nif_export) +{ + destroy_nif_export((NifExport *) nif_export); +} + +/* + * Initialize a NifExport struct. Create it if needed and store it in the + * proc. The direct_fp function is what will be invoked by op_call_nif, and + * the indirect_fp function, if not NULL, is what the direct_fp function + * will call. If the allocated NifExport isn't enough to hold all of argv, + * allocate a larger one. Save MFA and registers only if the need_save + * parameter is true. + */ static ERL_NIF_TERM -execute_dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +init_nif_sched_data(ErlNifEnv* env, NativeFunPtr direct_fp, NativeFunPtr indirect_fp, + int need_save, int argc, const ERL_NIF_TERM argv[]) { - Eterm* reg = ERTS_PROC_GET_SCHDATA(env->proc)->x_reg_array; - ERL_NIF_TERM result, dirty_result = (ERL_NIF_TERM) reg[0]; - typedef ERL_NIF_TERM (*FinalizerFP)(ErlNifEnv*, ERL_NIF_TERM); - FinalizerFP fp; -#if HAVE_INT64 && SIZEOF_LONG != 8 - ASSERT(sizeof(fp) <= sizeof(ErlNifUInt64)); - enif_get_uint64(env, reg[1], (ErlNifUInt64 *) &fp); -#else - ASSERT(sizeof(fp) <= sizeof(unsigned long)); - enif_get_ulong(env, reg[1], (unsigned long *) &fp); -#endif - result = (*fp)(env, dirty_result); - if (erts_refc_dectest(&env->mod_nif->rt_dtor_cnt, 0) == 0 - && env->mod_nif->mod == NULL) - close_lib(env->mod_nif); - return result; + Process* proc = env->proc; + Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array; + NifExport* ep; + int i; + + ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); + if (!ep) + ep = allocate_nif_sched_data(proc, argc); + else if (need_save && ep->alloced_argv_sz < argc) { + NifExport* new_ep = allocate_nif_sched_data(proc, argc); + destroy_nif_export(ep); + ep = new_ep; + } + ERTS_VBUMP_ALL_REDS(proc); + for (i = 0; i < argc; i++) { + if (need_save) + ep->argv[i] = reg[i]; + reg[i] = (Eterm) argv[i]; + } + if (need_save) { + ep->saved_mfa[0] = proc->current[0]; + ep->saved_mfa[1] = proc->current[1]; + ep->saved_mfa[2] = proc->current[2]; + ep->saved_argc = argc; + } + proc->i = (BeamInstr*) ep->exp.addressv[0]; + ep->exp.code[0] = (BeamInstr) proc->current[0]; + ep->exp.code[1] = (BeamInstr) proc->current[1]; + ep->exp.code[2] = argc; + ep->exp.code[4] = (BeamInstr) direct_fp; + ep->m = env->mod_nif; + ep->fp = indirect_fp; + proc->freason = TRAP; + return THE_NON_VALUE; } -#endif /* ERTS_DIRTY_SCHEDULERS */ +/* + * Restore saved MFA and registers. Registers are restored only when the + * exception flag is true. + */ +static void +restore_nif_mfa(Process* proc, NifExport* ep, int exception) +{ + int i; + Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array; -#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT + proc->current[0] = ep->saved_mfa[0]; + proc->current[1] = ep->saved_mfa[1]; + proc->current[2] = ep->saved_mfa[2]; + if (exception) + for (i = 0; i < ep->saved_argc; i++) + reg[i] = ep->argv[i]; + ep->saved_argc = 0; + ep->saved_mfa[0] = THE_NON_VALUE; +} -ERL_NIF_TERM -enif_schedule_dirty_nif(ErlNifEnv* env, int flags, - ERL_NIF_TERM (*fp)(ErlNifEnv*, int, const ERL_NIF_TERM[]), - int argc, const ERL_NIF_TERM argv[]) +#ifdef ERTS_DIRTY_SCHEDULERS + +/* + * Finalize a dirty NIF call. This function is scheduled to cause the VM to + * switch the process off a dirty scheduler thread and back onto a regular + * scheduler thread, and then return the result from the dirty NIF. It also + * restores the original NIF MFA when necessary based on the value of + * ep->fp set by execute_dirty_nif via init_nif_sched_data -- non-NULL + * means restore, NULL means do not restore. + */ +static ERL_NIF_TERM +dirty_nif_finalizer(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +{ + Process* proc = env->proc; + NifExport* ep; + + ASSERT(argc == 1); + ASSERT(!ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data)); + ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); + ASSERT(ep); + if (ep->fp) + restore_nif_mfa(proc, ep, 0); + return argv[0]; +} + +/* Finalize a dirty NIF call that raised an exception. Otherwise same as + * the dirty_nif_finalizer() function. + */ +static ERL_NIF_TERM +dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +{ + Process* proc = env->proc; + NifExport* ep; + + ASSERT(!ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data)); + ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); + ASSERT(ep); + if (ep->fp) + restore_nif_mfa(proc, ep, 1); + return enif_make_badarg(env); +} + +/* + * Dirty NIF execution wrapper function. Invoke an application's dirty NIF, + * then check the result and schedule the appropriate finalizer function + * where needed. Also restore the original NIF MFA when appropriate. + */ +static ERL_NIF_TERM +execute_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +{ + Process* proc = env->proc; + NativeFunPtr fp = (NativeFunPtr) proc->current[6]; + NifExport* ep; + ERL_NIF_TERM result; + + ASSERT(ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data)); + + /* + * Set ep->fp to NULL before the native call so we know later whether it scheduled another NIF for execution + */ + ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); + ASSERT(ep); + ep->fp = NULL; + result = (*fp)(env, argc, argv); + erts_smp_atomic32_read_band_mb(&proc->state, + ~(ERTS_PSFLG_DIRTY_CPU_PROC + |ERTS_PSFLG_DIRTY_IO_PROC + |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q + |ERTS_PSFLG_DIRTY_IO_PROC_IN_Q)); + if (erts_refc_dectest(&env->mod_nif->rt_dtor_cnt, 0) == 0 && env->mod_nif->mod == NULL) + close_lib(env->mod_nif); + /* + * If no more NIFs were scheduled by the native call via + * enif_schedule_nif(), then ep->fp will still be NULL as set above, in + * which case we need to restore the original NIF calling + * context. Reuse fp essentially as a boolean for this, passing it to + * init_nif_sched_data below. Both dirty_nif_exception and + * dirty_nif_finalizer then check ep->fp to decide whether or not to + * restore the original calling context. + */ + ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); + ASSERT(ep); + if (ep->fp) + fp = NULL; + if (is_non_value(result)) { + if (proc->freason != TRAP) { + ASSERT(proc->freason == BADARG); + return init_nif_sched_data(env, dirty_nif_exception, fp, 0, argc, argv); + } else { + if (ep->fp == NULL) + restore_nif_mfa(proc, ep, 1); + return result; + } + } + else + return init_nif_sched_data(env, dirty_nif_finalizer, fp, 0, 1, &result); +} + +/* + * Dirty NIF scheduling wrapper function. Schedule a dirty NIF to execute + * via the execute_dirty_nif() wrapper function. The dirty scheduler thread + * type (CPU or I/O) is indicated in flags parameter. + */ +static ERTS_INLINE ERL_NIF_TERM +schedule_dirty_nif(ErlNifEnv* env, int flags, int argc, const ERL_NIF_TERM argv[]) { -#ifdef USE_THREADS erts_aint32_t state, n, a; Process* proc = env->proc; - Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array; - DirtyNifExport* ep = NULL; - int i; + NativeFunPtr fp = (NativeFunPtr) proc->current[6]; + NifExport* ep; + int need_save; - int chkflgs = (flags & (ERL_NIF_DIRTY_JOB_IO_BOUND|ERL_NIF_DIRTY_JOB_CPU_BOUND)); - if (chkflgs != ERL_NIF_DIRTY_JOB_IO_BOUND && chkflgs != ERL_NIF_DIRTY_JOB_CPU_BOUND) - return enif_make_badarg(env); + ASSERT(flags==ERL_NIF_DIRTY_JOB_IO_BOUND || flags==ERL_NIF_DIRTY_JOB_CPU_BOUND); a = erts_smp_atomic32_read_acqb(&proc->state); while (1) { @@ -1590,7 +1793,7 @@ enif_schedule_dirty_nif(ErlNifEnv* env, int flags, */ n &= ~(ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_IO_PROC |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q); - if (chkflgs == ERL_NIF_DIRTY_JOB_CPU_BOUND) + if (flags == ERL_NIF_DIRTY_JOB_CPU_BOUND) n |= ERTS_PSFLG_DIRTY_CPU_PROC; else n |= ERTS_PSFLG_DIRTY_IO_PROC; @@ -1598,85 +1801,106 @@ enif_schedule_dirty_nif(ErlNifEnv* env, int flags, if (a == state) break; } - if (!(ep = (DirtyNifExport*) ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(proc))) - alloc_proc_psd(proc, &ep); - ERTS_VBUMP_ALL_REDS(proc); - ep->exp.code[2] = argc; - for (i = 0; i < argc; i++) { - reg[i] = (Eterm) argv[i]; - } - proc->i = (BeamInstr*) ep->exp.addressv[0]; - ep->exp.code[4] = (BeamInstr) fp; - ep->m = env->mod_nif; - proc->freason = TRAP; - erts_refc_inc(&env->mod_nif->rt_dtor_cnt, 1); + ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); + need_save = (ep == NULL || is_non_value(ep->saved_mfa[0])); + return init_nif_sched_data(env, execute_dirty_nif, fp, need_save, argc, argv); +} - return THE_NON_VALUE; -#else - return (*fp)(env, argc, argv); -#endif +static ERL_NIF_TERM +schedule_dirty_io_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +{ + return schedule_dirty_nif(env, ERL_NIF_DIRTY_JOB_IO_BOUND, argc, argv); +} + +static ERL_NIF_TERM +schedule_dirty_cpu_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +{ + return schedule_dirty_nif(env, ERL_NIF_DIRTY_JOB_CPU_BOUND, argc, argv); +} + +#endif /* ERTS_DIRTY_SCHEDULERS */ + +/* + * NIF execution wrapper used by enif_schedule_nif() for regular NIFs. It + * calls the actual NIF, restores original NIF MFA if necessary, and + * then returns the NIF result. + */ +static ERL_NIF_TERM +execute_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +{ + Process* proc = env->proc; + NativeFunPtr fp = (NativeFunPtr) proc->current[6]; + NifExport* ep; + ERL_NIF_TERM result; + + ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); + ASSERT(ep); + ep->fp = NULL; + result = (*fp)(env, argc, argv); + ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); + ASSERT(ep); + /* + * If no NIFs were scheduled by the native call via + * enif_schedule_nif(), then ep->fp will still be NULL as set above, in + * which case we need to restore the original NIF MFA. + */ + if (ep->fp == NULL) + restore_nif_mfa(proc, ep, is_non_value(result) && proc->freason != TRAP); + return result; } ERL_NIF_TERM -enif_schedule_dirty_nif_finalizer(ErlNifEnv* env, ERL_NIF_TERM result, - ERL_NIF_TERM (*fp)(ErlNifEnv*, ERL_NIF_TERM)) +enif_schedule_nif(ErlNifEnv* env, const char* fun_name, int flags, + ERL_NIF_TERM (*fp)(ErlNifEnv*, int, const ERL_NIF_TERM[]), + int argc, const ERL_NIF_TERM argv[]) { -#ifdef USE_THREADS Process* proc = env->proc; - Eterm* reg = ERTS_PROC_GET_SCHDATA(proc)->x_reg_array; - DirtyNifExport* ep; + NifExport* ep; + ERL_NIF_TERM fun_name_atom, result; + int need_save; - erts_smp_atomic32_read_band_mb(&proc->state, - ~(ERTS_PSFLG_DIRTY_CPU_PROC - |ERTS_PSFLG_DIRTY_IO_PROC - |ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q - |ERTS_PSFLG_DIRTY_IO_PROC_IN_Q)); - if (!(ep = (DirtyNifExport*) ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(proc))) - alloc_proc_psd(proc, &ep); - ERTS_VBUMP_ALL_REDS(proc); - ep->exp.code[2] = 2; - reg[0] = (Eterm) result; -#if HAVE_INT64 && SIZEOF_LONG != 8 - ASSERT(sizeof(fp) <= sizeof(ErlNifUInt64)); - reg[1] = (Eterm) enif_make_uint64(env, (ErlNifUInt64) fp); -#else - ASSERT(sizeof(fp) <= sizeof(unsigned long)); - reg[1] = (Eterm) enif_make_ulong(env, (unsigned long) fp); -#endif - proc->i = (BeamInstr*) ep->exp.addressv[0]; - ep->exp.code[4] = (BeamInstr) execute_dirty_nif_finalizer; - proc->freason = TRAP; + if (argc > MAX_ARG) + return enif_make_badarg(env); + fun_name_atom = enif_make_atom(env, fun_name); + if (enif_is_exception(env, fun_name_atom)) + return fun_name_atom; - return THE_NON_VALUE; + ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); + need_save = (ep == NULL || is_non_value(ep->saved_mfa[0])); + + if (flags) { +#ifdef ERTS_DIRTY_SCHEDULERS + NativeFunPtr sched_fun; + int chkflgs = (flags & (ERL_NIF_DIRTY_JOB_IO_BOUND|ERL_NIF_DIRTY_JOB_CPU_BOUND)); + if (chkflgs == ERL_NIF_DIRTY_JOB_IO_BOUND) + sched_fun = schedule_dirty_io_nif; + else if (chkflgs == ERL_NIF_DIRTY_JOB_CPU_BOUND) + sched_fun = schedule_dirty_cpu_nif; + else + return enif_make_badarg(env); + result = init_nif_sched_data(env, sched_fun, fp, need_save, argc, argv); #else - return (*fp)(env, result); + return enif_make_badarg(env); #endif -} + } + else + result = init_nif_sched_data(env, execute_nif, fp, need_save, argc, argv); -/* A simple finalizer that just returns its result argument */ -ERL_NIF_TERM -enif_dirty_nif_finalizer(ErlNifEnv* env, ERL_NIF_TERM result) -{ + ep = (NifExport*) ERTS_PROC_GET_NIF_TRAP_EXPORT(proc); + ASSERT(ep); + ep->exp.code[1] = (BeamInstr) fun_name_atom; return result; } +#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT + int enif_is_on_dirty_scheduler(ErlNifEnv* env) { return ERTS_SCHEDULER_IS_DIRTY(env->proc->scheduler_data); } -int -enif_have_dirty_schedulers() -{ -#ifdef USE_THREADS - return 1; -#else - return 0; -#endif -} - #endif /* ERL_NIF_DIRTY_SCHEDULER_SUPPORT */ /* Maps */ @@ -1977,6 +2201,35 @@ static Eterm load_nif_error(Process* p, const char* atom, const char* format, .. return ret; } +/* + * The function below is for looping through ErlNifFunc arrays, helping + * provide backwards compatibility across the version 2.7 change that added + * the "flags" field to ErlNifFunc. + */ +static ErlNifFunc* next_func(ErlNifEntry* entry, int* incrp, ErlNifFunc* func) +{ + ASSERT(incrp); + if (!*incrp) { + if (entry->major > 2 || (entry->major == 2 && entry->minor >= 7)) + *incrp = sizeof(ErlNifFunc); + else { + /* + * ErlNifFuncV1 below is what ErlNifFunc was before the + * addition of the flags field for 2.7, and is needed to handle + * backward compatibility. + */ + typedef struct { + const char* name; + unsigned arity; + ERL_NIF_TERM (*fptr)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]); + }ErlNifFuncV1; + *incrp = sizeof(ErlNifFuncV1); + } + } + return (ErlNifFunc*) ((char*)func + *incrp); +} + + BIF_RETTYPE load_nif_2(BIF_ALIST_2) { static const char bad_lib[] = "bad_lib"; @@ -2086,22 +2339,48 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) } else { /*erts_fprintf(stderr, "Found module %T\r\n", mod_atom);*/ - + + int maybe_dirty_nifs = ((entry->major > 2 || (entry->major == 2 && entry->minor >= 7)) + && (entry->options & ERL_NIF_DIRTY_NIF_OPTION)); + int incr = 0; + ErlNifFunc* f = entry->funcs; for (i=0; i < entry->num_of_funcs && ret==am_ok; i++) { BeamInstr** code_pp; - ErlNifFunc* f = &entry->funcs[i]; if (!erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1) || (code_pp = get_func_pp(mod->curr.code, f_atom, f->arity))==NULL) { ret = load_nif_error(BIF_P,bad_lib,"Function not found %T:%s/%u", mod_atom, f->name, f->arity); - } - else if (code_pp[1] - code_pp[0] < (5+3)) { + } + else if (maybe_dirty_nifs && f->flags) { + /* + * If the flags field is non-zero and this emulator was + * built with dirty scheduler support, check that the flags + * value is legal. But if this emulator was built without + * dirty scheduler support, treat a non-zero flags field as + * a load error. + */ +#ifdef ERTS_DIRTY_SCHEDULERS + if (f->flags != ERL_NIF_DIRTY_JOB_IO_BOUND && f->flags != ERL_NIF_DIRTY_JOB_CPU_BOUND) + ret = load_nif_error(BIF_P, bad_lib, "Illegal flags field value %d for NIF %T:%s/%u", + f->flags, mod_atom, f->name, f->arity); +#else + ret = load_nif_error(BIF_P, bad_lib, "NIF %T:%s/%u requires a runtime with dirty scheduler support.", + mod_atom, f->name, f->arity); +#endif + } +#ifdef ERTS_DIRTY_SCHEDULERS + else if (code_pp[1] - code_pp[0] < (5+4)) +#else + else if (code_pp[1] - code_pp[0] < (5+3)) +#endif + { ret = load_nif_error(BIF_P,bad_lib,"No explicit call to load_nif" - " in module (%T:%s/%u to small)", - mod_atom, entry->funcs[i].name, entry->funcs[i].arity); + " in module (%T:%s/%u too small)", + mod_atom, f->name, f->arity); } /*erts_fprintf(stderr, "Found NIF %T:%s/%u\r\n", - mod_atom, entry->funcs[i].name, entry->funcs[i].arity);*/ + mod_atom, f->name, f->arity);*/ + f = next_func(entry, &incr, f); } } @@ -2127,7 +2406,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) * is deprecated and was only ment as a development feature not to * be used in production systems. (See warning below) */ - int k; + int k, old_incr = 0; + ErlNifFunc* old_func; lib->priv_data = mod->curr.nif->priv_data; ASSERT(mod->curr.nif->entry != NULL); @@ -2136,13 +2416,16 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) goto error; } /* Check that no NIF is removed */ + old_func = mod->curr.nif->entry->funcs; for (k=0; k < mod->curr.nif->entry->num_of_funcs; k++) { - ErlNifFunc* old_func = &mod->curr.nif->entry->funcs[k]; + int incr = 0; + ErlNifFunc* f = entry->funcs; for (i=0; i < entry->num_of_funcs; i++) { - if (old_func->arity == entry->funcs[i].arity - && sys_strcmp(old_func->name, entry->funcs[i].name) == 0) { + if (old_func->arity == f->arity + && sys_strcmp(old_func->name, f->name) == 0) { break; } + f = next_func(entry, &incr, f); } if (i == entry->num_of_funcs) { ret = load_nif_error(BIF_P,reload,"Reloaded library missing " @@ -2150,7 +2433,8 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) old_func->name, old_func->arity); goto error; } - } + old_func = next_func(mod->curr.nif->entry, &old_incr, old_func); + } erts_pre_nif(&env, BIF_P, lib); veto = entry->reload(&env, &lib->priv_data, BIF_ARG_2); erts_post_nif(&env); @@ -2197,13 +2481,17 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) /* ** Everything ok, patch the beam code with op_call_nif */ - mod->curr.nif = lib; + + int incr = 0; + ErlNifFunc* f = entry->funcs; + + mod->curr.nif = lib; for (i=0; i < entry->num_of_funcs; i++) { BeamInstr* code_ptr; - erts_atom_get(entry->funcs[i].name, sys_strlen(entry->funcs[i].name), &f_atom, ERTS_ATOM_ENC_LATIN1); - code_ptr = *get_func_pp(mod->curr.code, f_atom, entry->funcs[i].arity); - + erts_atom_get(f->name, sys_strlen(f->name), &f_atom, ERTS_ATOM_ENC_LATIN1); + code_ptr = *get_func_pp(mod->curr.code, f_atom, f->arity); + if (code_ptr[1] == 0) { code_ptr[5+0] = (BeamInstr) BeamOp(op_call_nif); } @@ -2211,10 +2499,21 @@ BIF_RETTYPE load_nif_2(BIF_ALIST_2) GenericBp* g = (GenericBp *) code_ptr[1]; ASSERT(code_ptr[5+0] == (BeamInstr) BeamOp(op_i_generic_breakpoint)); - g->orig_instr = (BeamInstr) BeamOp(op_call_nif); - } - code_ptr[5+1] = (BeamInstr) entry->funcs[i].fptr; + g->orig_instr = (BeamInstr) BeamOp(op_call_nif); + } + if ((entry->major > 2 || (entry->major == 2 && entry->minor >= 7)) + && (entry->options & ERL_NIF_DIRTY_NIF_OPTION) && f->flags) { +#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT + code_ptr[5+3] = (BeamInstr) f->fptr; + code_ptr[5+1] = (f->flags == ERL_NIF_DIRTY_JOB_IO_BOUND) ? + (BeamInstr) schedule_dirty_io_nif : + (BeamInstr) schedule_dirty_cpu_nif; +#endif + } + else + code_ptr[5+1] = (BeamInstr) f->fptr; code_ptr[5+2] = (BeamInstr) lib; + f = next_func(entry, &incr, f); } } else { diff --git a/erts/emulator/beam/erl_nif.h b/erts/emulator/beam/erl_nif.h index 5b93c2398e..226fc199a1 100644 --- a/erts/emulator/beam/erl_nif.h +++ b/erts/emulator/beam/erl_nif.h @@ -42,9 +42,13 @@ ** 2.5: R17 Maps API additions ** 2.6: R17 with maps ** R17 dirty schedulers +** 2.7: 17.3 add enif_schedule_nif +** remove enif_schedule_dirty_nif, enif_schedule_dirty_nif_finalizer, enif_dirty_nif_finalizer +** add ErlNifEntry options +** add ErlNifFunc flags */ #define ERL_NIF_MAJOR_VERSION 2 -#define ERL_NIF_MINOR_VERSION 6 +#define ERL_NIF_MINOR_VERSION 7 /* * The emulator will refuse to load a nif-lib with a major version @@ -125,8 +129,10 @@ typedef struct const char* name; unsigned arity; ERL_NIF_TERM (*fptr)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]); + unsigned flags; }ErlNifFunc; + typedef struct enif_entry_t { int major; @@ -139,8 +145,11 @@ typedef struct enif_entry_t int (*upgrade)(ErlNifEnv*, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info); void (*unload) (ErlNifEnv*, void* priv_data); const char* vm_variant; + unsigned options; }ErlNifEntry; +/* Field bits for ErlNifEntry options */ +#define ERL_NIF_DIRTY_NIF_OPTION 1 typedef struct @@ -232,10 +241,21 @@ extern TWinDynNifCallbacks WinDynNifCallbacks; # else # define ERL_NIF_INIT_DECL(MODNAME) __declspec(dllexport) ErlNifEntry* nif_init(TWinDynNifCallbacks* callbacks) # endif -# define ERL_NIF_INIT_BODY memcpy(&WinDynNifCallbacks,callbacks,sizeof(TWinDynNifCallbacks)) +# ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT +# define ERL_NIF_INIT_BODY do { \ + memcpy(&WinDynNifCallbacks,callbacks,sizeof(TWinDynNifCallbacks)); \ + entry.options = ERL_NIF_DIRTY_NIF_OPTION; \ + } while(0) +# else +# define ERL_NIF_INIT_BODY memcpy(&WinDynNifCallbacks,callbacks,sizeof(TWinDynNifCallbacks)) +# endif #else # define ERL_NIF_INIT_GLOB -# define ERL_NIF_INIT_BODY +# ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT +# define ERL_NIF_INIT_BODY entry.options = ERL_NIF_DIRTY_NIF_OPTION +# else +# define ERL_NIF_INIT_BODY +# endif # ifdef STATIC_ERLANG_NIF # define ERL_NIF_INIT_DECL(MODNAME) ErlNifEntry* MODNAME ## _nif_init(void) # else diff --git a/erts/emulator/beam/erl_nif_api_funcs.h b/erts/emulator/beam/erl_nif_api_funcs.h index d7c554e60b..630cefae93 100644 --- a/erts/emulator/beam/erl_nif_api_funcs.h +++ b/erts/emulator/beam/erl_nif_api_funcs.h @@ -22,7 +22,7 @@ #endif /* -** WARNING: add new ERL_NIF_API_FUNC_DECL entries at the bottom of the list +** WARNING: Add new ERL_NIF_API_FUNC_DECL entries at the bottom of the list ** to keep compatibility on Windows!!! ** ** And don't forget to increase ERL_NIF_MINOR_VERSION in erl_nif.h @@ -141,14 +141,6 @@ ERL_NIF_API_FUNC_DECL(int,enif_is_number,(ErlNifEnv*, ERL_NIF_TERM term)); ERL_NIF_API_FUNC_DECL(void*,enif_dlopen,(const char* lib, void (*err_handler)(void*,const char*), void* err_arg)); ERL_NIF_API_FUNC_DECL(void*,enif_dlsym,(void* handle, const char* symbol, void (*err_handler)(void*,const char*), void* err_arg)); ERL_NIF_API_FUNC_DECL(int,enif_consume_timeslice,(ErlNifEnv*, int percent)); -#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT -ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_schedule_dirty_nif,(ErlNifEnv*,int,ERL_NIF_TERM (*)(ErlNifEnv*,int,const ERL_NIF_TERM[]),int,const ERL_NIF_TERM[])); -ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_schedule_dirty_nif_finalizer,(ErlNifEnv*,ERL_NIF_TERM,ERL_NIF_TERM (*)(ErlNifEnv*,ERL_NIF_TERM))); -ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_dirty_nif_finalizer,(ErlNifEnv*,ERL_NIF_TERM)); -ERL_NIF_API_FUNC_DECL(int,enif_is_on_dirty_scheduler,(ErlNifEnv*)); -ERL_NIF_API_FUNC_DECL(int,enif_have_dirty_schedulers,(void)); -#endif - ERL_NIF_API_FUNC_DECL(int, enif_is_map, (ErlNifEnv* env, ERL_NIF_TERM term)); ERL_NIF_API_FUNC_DECL(int, enif_get_map_size, (ErlNifEnv* env, ERL_NIF_TERM term, size_t *size)); ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM, enif_make_new_map, (ErlNifEnv* env)); @@ -163,12 +155,22 @@ ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_is_tail, (ErlNifEnv *env, ErlNifMap ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_next, (ErlNifEnv *env, ErlNifMapIterator *iter)); ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_prev, (ErlNifEnv *env, ErlNifMapIterator *iter)); ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_get_pair, (ErlNifEnv *env, ErlNifMapIterator *iter, ERL_NIF_TERM *key, ERL_NIF_TERM *value)); - +ERL_NIF_API_FUNC_DECL(ERL_NIF_TERM,enif_schedule_nif,(ErlNifEnv*,const char*,int,ERL_NIF_TERM (*)(ErlNifEnv*,int,const ERL_NIF_TERM[]),int,const ERL_NIF_TERM[])); /* -** Add new entries here to keep compatibility on Windows!!! +** ADD NEW ENTRIES HERE (before this comment) !!! */ + + +/* + * Conditional EXPERIMENTAL stuff always last. + * Must be moved up and made unconditional to support binary backward + * compatibility on Windows. + */ +#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT +ERL_NIF_API_FUNC_DECL(int,enif_is_on_dirty_scheduler,(ErlNifEnv*)); #endif +#endif /* ERL_NIF_API_FUNC_DECL */ /* ** Please keep the ERL_NIF_API_FUNC_MACRO list below in the same order @@ -282,21 +284,12 @@ ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_get_pair, (ErlNifEnv *env, ErlNifMa # define enif_make_int64 ERL_NIF_API_FUNC_MACRO(enif_make_int64) # define enif_make_uint64 ERL_NIF_API_FUNC_MACRO(enif_make_uint64) #endif - # define enif_is_exception ERL_NIF_API_FUNC_MACRO(enif_is_exception) # define enif_make_reverse_list ERL_NIF_API_FUNC_MACRO(enif_make_reverse_list) # define enif_is_number ERL_NIF_API_FUNC_MACRO(enif_is_number) # define enif_dlopen ERL_NIF_API_FUNC_MACRO(enif_dlopen) # define enif_dlsym ERL_NIF_API_FUNC_MACRO(enif_dlsym) # define enif_consume_timeslice ERL_NIF_API_FUNC_MACRO(enif_consume_timeslice) -#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT -# define enif_schedule_dirty_nif ERL_NIF_API_FUNC_MACRO(enif_schedule_dirty_nif) -# define enif_schedule_dirty_nif_finalizer ERL_NIF_API_FUNC_MACRO(enif_schedule_dirty_nif_finalizer) -# define enif_dirty_nif_finalizer ERL_NIF_API_FUNC_MACRO(enif_dirty_nif_finalizer) -# define enif_is_on_dirty_scheduler ERL_NIF_API_FUNC_MACRO(enif_is_on_dirty_scheduler) -# define enif_have_dirty_schedulers ERL_NIF_API_FUNC_MACRO(enif_have_dirty_schedulers) -#endif - # define enif_is_map ERL_NIF_API_FUNC_MACRO(enif_is_map) # define enif_get_map_size ERL_NIF_API_FUNC_MACRO(enif_get_map_size) # define enif_make_new_map ERL_NIF_API_FUNC_MACRO(enif_make_new_map) @@ -311,11 +304,21 @@ ERL_NIF_API_FUNC_DECL(int, enif_map_iterator_get_pair, (ErlNifEnv *env, ErlNifMa # define enif_map_iterator_next ERL_NIF_API_FUNC_MACRO(enif_map_iterator_next) # define enif_map_iterator_prev ERL_NIF_API_FUNC_MACRO(enif_map_iterator_prev) # define enif_map_iterator_get_pair ERL_NIF_API_FUNC_MACRO(enif_map_iterator_get_pair) +# define enif_schedule_nif ERL_NIF_API_FUNC_MACRO(enif_schedule_nif) /* -** Add new entries here +** ADD NEW ENTRIES HERE (before this comment) */ + +/* + * Conditional EXPERIMENTAL stuff always last + * Must be moved up and made unconditional to support binary backward + * compatibility on Windows. + */ +#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT +# define enif_is_on_dirty_scheduler ERL_NIF_API_FUNC_MACRO(enif_is_on_dirty_scheduler) #endif +#endif /* ERL_NIF_API_FUNC_MACRO */ #if defined(__GNUC__) && !(defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_)) diff --git a/erts/emulator/beam/erl_port_task.c b/erts/emulator/beam/erl_port_task.c index 31d9a1e26e..682f6f8f4b 100644 --- a/erts/emulator/beam/erl_port_task.c +++ b/erts/emulator/beam/erl_port_task.c @@ -68,6 +68,13 @@ static void chk_task_queues(Port *pp, ErtsPortTask *execq, int processing_busy_q #define DTRACE_DRIVER(PROBE_NAME, PP) do {} while(0) #endif +#define ERTS_SMP_LC_VERIFY_RQ(RQ, PP) \ + do { \ + ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); \ + ERTS_SMP_LC_ASSERT((RQ) == ((ErtsRunQueue *) \ + erts_smp_atomic_read_nob(&(PP)->run_queue))); \ + } while (0) + erts_smp_atomic_t erts_port_task_outstanding_io_tasks; #define ERTS_PT_STATE_SCHEDULED 0 @@ -798,12 +805,13 @@ schedule_port_task_handle_list_free(ErtsPortTaskHandleList *pthlp) static ERTS_INLINE void abort_nosuspend_task(Port *pp, ErtsPortTaskType type, - ErtsPortTaskTypeData *tdp) + ErtsPortTaskTypeData *tdp, + int bpq_data) { ASSERT(type == ERTS_PORT_TASK_PROC_SIG); - if (!pp->sched.taskq.bpq) + if (!bpq_data) tdp->psig.callback(NULL, ERTS_PORT_SFLG_INVALID, ERTS_PROC2PORT_SIG_ABORT_NOSUSPEND, @@ -991,6 +999,7 @@ static ERTS_INLINE int finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q) { erts_aint32_t act; + unsigned int prof_runnable_ports; if (!processing_busy_q) pp->sched.taskq.local.first = *execq; @@ -1007,6 +1016,10 @@ finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q) if (act & ERTS_PTS_FLG_CHK_UNSET_BUSY_PORT_Q) act = check_unset_busy_port_q(pp, act, pp->sched.taskq.bpq); + prof_runnable_ports = erts_system_profile_flags.runnable_ports; + if (prof_runnable_ports) + erts_port_task_sched_lock(&pp->sched); + while (1) { erts_aint32_t new, exp; @@ -1018,12 +1031,24 @@ finalize_exec(Port *pp, ErtsPortTask **execq, int processing_busy_q) act = erts_smp_atomic32_cmpxchg_relb(&pp->sched.flags, new, exp); - ASSERT(!(act & ERTS_PTS_FLG_IN_RUNQ)); + ERTS_LC_ASSERT(!(act & ERTS_PTS_FLG_IN_RUNQ)); + ERTS_LC_ASSERT(!(act & ERTS_PTS_FLG_EXEC_IMM)); if (exp == act) break; } + if (prof_runnable_ports | IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) { + /* trace port scheduling, out */ + if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) + trace_sched_ports(pp, am_out); + if (prof_runnable_ports) { + if (!(act & (ERTS_PTS_FLG_EXEC_IMM|ERTS_PTS_FLG_HAVE_TASKS))) + profile_runnable_port(pp, am_inactive); + erts_port_task_sched_unlock(&pp->sched); + } + } + return (act & ERTS_PTS_FLG_HAVE_TASKS) != 0; } @@ -1345,7 +1370,7 @@ erts_port_task_abort_nosuspend_tasks(Port *pp) #endif schedule_port_task_handle_list_free(pthlp); - abort_nosuspend_task(pp, type, &td); + abort_nosuspend_task(pp, type, &td, pp->sched.taskq.bpq != NULL); } } @@ -1369,6 +1394,7 @@ erts_port_task_schedule(Eterm id, Port *pp; ErtsPortTask *ptp = NULL; erts_aint32_t act, add_flags; + unsigned int prof_runnable_ports; if (pthp && erts_port_task_is_scheduled(pthp)) { ASSERT(0); @@ -1457,6 +1483,10 @@ erts_port_task_schedule(Eterm id, if (ns_pthlp) add_flags |= ERTS_PTS_FLG_HAVE_NS_TASKS; + prof_runnable_ports = erts_system_profile_flags.runnable_ports; + if (prof_runnable_ports) + erts_port_task_sched_lock(&pp->sched); + while (1) { erts_aint32_t new, exp; @@ -1481,6 +1511,13 @@ erts_port_task_schedule(Eterm id, goto done; /* Died after our task insert... */ } + if (prof_runnable_ports) { + if (!(act & ERTS_PTS_FLG_EXEC_IMM)) + profile_runnable_port(pp, am_active); + erts_port_task_sched_unlock(&pp->sched); + prof_runnable_ports = 0; + } + /* Enqueue port on run-queue */ runq = erts_port_runq(pp); @@ -1489,8 +1526,10 @@ erts_port_task_schedule(Eterm id, #ifdef ERTS_SMP xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL); + ERTS_SMP_LC_ASSERT(runq != xrunq); + ERTS_SMP_LC_VERIFY_RQ(runq, pp); if (xrunq) { - /* Port emigrated ... */ + /* Emigrate port ... */ erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq); erts_smp_runq_unlock(runq); runq = erts_port_runq(pp); @@ -1500,10 +1539,6 @@ erts_port_task_schedule(Eterm id, #endif enqueue_port(runq, pp); - - if (erts_system_profile_flags.runnable_ports) { - profile_runnable_port(pp, am_active); - } erts_smp_runq_unlock(runq); @@ -1511,6 +1546,9 @@ erts_port_task_schedule(Eterm id, done: + if (prof_runnable_ports) + erts_port_task_sched_unlock(&pp->sched); + #ifdef ERTS_SMP if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) erts_port_dec_refc(pp); @@ -1525,7 +1563,7 @@ abort_nosuspend: erts_port_dec_refc(pp); #endif - abort_nosuspend_task(pp, ptp->type, &ptp->u.alive.td); + abort_nosuspend_task(pp, ptp->type, &ptp->u.alive.td, 0); ASSERT(ns_pthlp); erts_free(ERTS_ALC_T_PT_HNDL_LIST, ns_pthlp); @@ -1609,6 +1647,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) goto done; } + ERTS_SMP_LC_VERIFY_RQ(runq, pp); + erts_smp_runq_unlock(runq); *curr_port_pp = pp; @@ -1765,10 +1805,6 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) erts_unblock_fpe(fpe_was_unmasked); - /* trace port scheduling, out */ - if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) { - trace_sched_ports(pp, am_out); - } if (io_tasks_executed) { ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) @@ -1791,11 +1827,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) erts_smp_runq_lock(runq); - if (!active) { - if (erts_system_profile_flags.runnable_ports) - profile_runnable_port(pp, am_inactive); - } - else { + if (active) { #ifdef ERTS_SMP ErtsRunQueue *xrunq; #endif @@ -1804,6 +1836,8 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) #ifdef ERTS_SMP xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL); + ERTS_SMP_LC_ASSERT(runq != xrunq); + ERTS_SMP_LC_VERIFY_RQ(runq, pp); if (!xrunq) { #endif enqueue_port(runq, pp); @@ -1811,7 +1845,7 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) #ifdef ERTS_SMP } else { - /* Port emigrated ... */ + /* Emigrate port... */ erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq); erts_smp_runq_unlock(runq); diff --git a/erts/emulator/beam/erl_port_task.h b/erts/emulator/beam/erl_port_task.h index 1d30465ec9..9ef0cfcedc 100644 --- a/erts/emulator/beam/erl_port_task.h +++ b/erts/emulator/beam/erl_port_task.h @@ -78,6 +78,7 @@ extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks; #define ERTS_PTS_FLG_PARALLELISM (((erts_aint32_t) 1) << 9) #define ERTS_PTS_FLG_FORCE_SCHED (((erts_aint32_t) 1) << 10) #define ERTS_PTS_FLG_EXITING (((erts_aint32_t) 1) << 11) +#define ERTS_PTS_FLG_EXEC_IMM (((erts_aint32_t) 1) << 12) #define ERTS_PTS_FLGS_BUSY \ (ERTS_PTS_FLG_BUSY_PORT | ERTS_PTS_FLG_BUSY_PORT_Q) @@ -87,6 +88,7 @@ extern erts_smp_atomic_t erts_port_task_outstanding_io_tasks; | ERTS_PTS_FLG_HAVE_BUSY_TASKS \ | ERTS_PTS_FLG_HAVE_TASKS \ | ERTS_PTS_FLG_EXEC \ + | ERTS_PTS_FLG_EXEC_IMM \ | ERTS_PTS_FLG_FORCE_SCHED \ | ERTS_PTS_FLG_EXITING) diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c index b73f9b7f92..20a88ec581 100644 --- a/erts/emulator/beam/erl_process.c +++ b/erts/emulator/beam/erl_process.c @@ -590,12 +590,10 @@ erts_pre_init_process(void) erts_psd_required_locks[ERTS_PSD_DELAYED_GC_TASK_QS].set_locks = ERTS_PSD_DELAYED_GC_TASK_QS_SET_LOCKS; -#ifdef ERTS_DIRTY_SCHEDULERS - erts_psd_required_locks[ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT].get_locks - = ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_GET_LOCKS; - erts_psd_required_locks[ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT].set_locks - = ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_SET_LOCKS; -#endif + erts_psd_required_locks[ERTS_PSD_NIF_TRAP_EXPORT].get_locks + = ERTS_PSD_NIF_TRAP_EXPORT_GET_LOCKS; + erts_psd_required_locks[ERTS_PSD_NIF_TRAP_EXPORT].set_locks + = ERTS_PSD_NIF_TRAP_EXPORT_SET_LOCKS; /* Check that we have locks for all entries */ for (ix = 0; ix < ERTS_PSD_SIZE; ix++) { @@ -2211,6 +2209,9 @@ aux_work_timeout_early_init(int no_schedulers) p = (UWord) malloc((sizeof(ErtsAuxWorkTmo) + sizeof(erts_atomic32_t)*(no_schedulers+1)) + ERTS_CACHE_LINE_SIZE-1); + if (!p) { + ERTS_INTERNAL_ERROR("malloc failed to allocate memory!"); + } if (p & ERTS_CACHE_LINE_MASK) p = (p & ~ERTS_CACHE_LINE_MASK) + ERTS_CACHE_LINE_SIZE; ASSERT((p & ERTS_CACHE_LINE_MASK) == 0); @@ -3755,17 +3756,25 @@ evacuate_run_queue(ErtsRunQueue *rq, } #ifdef ERTS_DIRTY_SCHEDULERS else if (state & ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q) { - erts_aint32_t old; - old = erts_smp_atomic32_read_band_nob(&proc->state, - ~(ERTS_PSFLG_DIRTY_CPU_PROC - | ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q)); +#ifdef DEBUG + erts_aint32_t old = +#else + (void) +#endif + erts_smp_atomic32_read_band_nob(&proc->state, + ~(ERTS_PSFLG_DIRTY_CPU_PROC + | ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q)); /* assert that no other dirty flags are set */ ASSERT(!(old & (ERTS_PSFLG_DIRTY_IO_PROC|ERTS_PSFLG_DIRTY_IO_PROC_IN_Q))); } else if (state & ERTS_PSFLG_DIRTY_IO_PROC_IN_Q) { - erts_aint32_t old; - old = erts_smp_atomic32_read_band_nob(&proc->state, - ~(ERTS_PSFLG_DIRTY_IO_PROC - | ERTS_PSFLG_DIRTY_IO_PROC_IN_Q)); +#ifdef DEBUG + erts_aint32_t old = +#else + (void) +#endif + erts_smp_atomic32_read_band_nob(&proc->state, + ~(ERTS_PSFLG_DIRTY_IO_PROC + | ERTS_PSFLG_DIRTY_IO_PROC_IN_Q)); /* assert that no other dirty flags are set */ ASSERT(!(old & (ERTS_PSFLG_DIRTY_CPU_PROC|ERTS_PSFLG_DIRTY_CPU_PROC_IN_Q))); } @@ -5874,6 +5883,9 @@ schedule_out_process(ErtsRunQueue *c_rq, erts_aint32_t state, Process *p, Proces case ERTS_ENQUEUE_NOT: if (erts_system_profile_flags.runnable_procs) { + /* Status lock prevents out of order "runnable proc" trace msgs */ + ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); + if (!(a & ERTS_PSFLG_ACTIVE_SYS) && (!(a & ERTS_PSFLG_ACTIVE) || (a & ERTS_PSFLG_SUSPENDED))) { @@ -5987,7 +5999,8 @@ change_proc_schedule_state(Process *p, erts_aint32_t clear_state_flags, erts_aint32_t set_state_flags, erts_aint32_t *statep, - erts_aint32_t *enq_prio_p) + erts_aint32_t *enq_prio_p, + ErtsProcLocks locks) { /* * NOTE: ERTS_PSFLG_RUNNING, ERTS_PSFLG_RUNNING_SYS and @@ -5996,6 +6009,11 @@ change_proc_schedule_state(Process *p, */ erts_aint32_t a = *statep, n; int enqueue; /* < 0 -> use proxy */ + unsigned int prof_runnable_procs = erts_system_profile_flags.runnable_procs; + unsigned int lock_status = (prof_runnable_procs + && !(locks & ERTS_PROC_LOCK_STATUS)); + + ERTS_SMP_LC_ASSERT(locks == erts_proc_lc_my_proc_locks(p)); ASSERT(!(a & ERTS_PSFLG_PROXY)); ASSERT((clear_state_flags & (ERTS_PSFLG_RUNNING @@ -6005,6 +6023,9 @@ change_proc_schedule_state(Process *p, | ERTS_PSFLG_RUNNING_SYS | ERTS_PSFLG_ACTIVE_SYS)) == 0); + if (lock_status) + erts_smp_proc_lock(p, ERTS_PROC_LOCK_STATUS); + while (1) { erts_aint32_t e; n = e = a; @@ -6040,7 +6061,9 @@ change_proc_schedule_state(Process *p, break; } - if (erts_system_profile_flags.runnable_procs) { + if (prof_runnable_procs) { + + /* Status lock prevents out of order "runnable proc" trace msgs */ if (((n & (ERTS_PSFLG_SUSPENDED | ERTS_PSFLG_ACTIVE)) == ERTS_PSFLG_ACTIVE) @@ -6053,15 +6076,18 @@ change_proc_schedule_state(Process *p, profile_runnable_proc(p, am_active); } + if (lock_status) + erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); } + *statep = a; return enqueue; } static ERTS_INLINE void -schedule_process(Process *p, erts_aint32_t in_state) +schedule_process(Process *p, erts_aint32_t in_state, ErtsProcLocks locks) { erts_aint32_t enq_prio = -1; erts_aint32_t state = in_state; @@ -6069,7 +6095,8 @@ schedule_process(Process *p, erts_aint32_t in_state) 0, ERTS_PSFLG_ACTIVE, &state, - &enq_prio); + &enq_prio, + locks); if (enqueue != ERTS_ENQUEUE_NOT) add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio), state, @@ -6077,16 +6104,27 @@ schedule_process(Process *p, erts_aint32_t in_state) } void -erts_schedule_process(Process *p, erts_aint32_t state) +erts_schedule_process(Process *p, erts_aint32_t state, ErtsProcLocks locks) { - schedule_process(p, state); + schedule_process(p, state, locks); } static void schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy) { + /* + * Expects status lock to be locked when called, and + * returns with status lock unlocked... + */ erts_aint32_t a = state, n, enq_prio = -1; int enqueue; /* < 0 -> use proxy */ + unsigned int prof_runnable_procs = erts_system_profile_flags.runnable_procs; + + /* Status lock prevents out of order "runnable proc" trace msgs */ + ERTS_SMP_LC_ASSERT(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p)); + + if (!prof_runnable_procs) + erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); ASSERT(!(state & ERTS_PSFLG_PROXY)); @@ -6095,7 +6133,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy) n = e = a; if (a & ERTS_PSFLG_FREE) - return; /* We don't want to schedule free processes... */ + goto cleanup; /* We don't want to schedule free processes... */ enqueue = ERTS_ENQUEUE_NOT; n |= ERTS_PSFLG_ACTIVE_SYS; @@ -6108,7 +6146,7 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy) goto cleanup; } - if (erts_system_profile_flags.runnable_procs) { + if (prof_runnable_procs) { if (!(a & (ERTS_PSFLG_ACTIVE_SYS | ERTS_PSFLG_RUNNING @@ -6118,6 +6156,8 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy) profile_runnable_proc(p, am_active); } + erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + prof_runnable_procs = 0; } if (enqueue != ERTS_ENQUEUE_NOT) { @@ -6132,8 +6172,14 @@ schedule_process_sys_task(Process *p, erts_aint32_t state, Process *proxy) } cleanup: + + if (prof_runnable_procs) + erts_smp_proc_unlock(p, ERTS_PROC_LOCK_STATUS); + if (proxy) free_proxy_proc(proxy); + + ERTS_SMP_LC_ASSERT(!(ERTS_PROC_LOCK_STATUS & erts_proc_lc_my_proc_locks(p))); } static ERTS_INLINE int @@ -6200,7 +6246,7 @@ suspend_process(Process *c_p, Process *p) } static ERTS_INLINE void -resume_process(Process *p) +resume_process(Process *p, ErtsProcLocks locks) { erts_aint32_t state, enq_prio = -1; int enqueue; @@ -6217,7 +6263,8 @@ resume_process(Process *p) ERTS_PSFLG_SUSPENDED, 0, &state, - &enq_prio); + &enq_prio, + locks); if (enqueue) add2runq(enqueue > 0 ? p : make_proxy_proc(NULL, p, enq_prio), state, @@ -7818,6 +7865,9 @@ erts_start_schedulers(void) #ifdef ETHR_HAVE_THREAD_NAMES opts.name = malloc(80); + if (!opts.name) { + ERTS_INTERNAL_ERROR("malloc failed to allocate memory!"); + } #endif #ifdef ERTS_SMP @@ -8030,7 +8080,8 @@ handle_pend_sync_suspend(Process *suspendee, } /* suspender is suspended waiting for suspendee to suspend; resume suspender */ - resume_process(suspender); + ASSERT(suspendee != suspender); + resume_process(suspender, ERTS_PROC_LOCK_STATUS); erts_smp_proc_unlock(suspender, ERTS_PROC_LOCK_STATUS); } } @@ -8065,7 +8116,7 @@ pid2proc_not_running(Process *c_p, ErtsProcLocks c_p_locks, ASSERT(c_p->flags & F_P2PNR_RESCHED); c_p->flags &= ~F_P2PNR_RESCHED; if (!suspend && rp) - resume_process(rp); + resume_process(rp, rp_locks); } else { @@ -8223,7 +8274,8 @@ handle_pend_bif_sync_suspend(Process *suspendee, } /* suspender is suspended waiting for suspendee to suspend; resume suspender */ - resume_process(suspender); + ASSERT(suspender != suspendee); + resume_process(suspender, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); erts_smp_proc_unlock(suspender, ERTS_PROC_LOCK_LINK|ERTS_PROC_LOCK_STATUS); } @@ -8583,7 +8635,8 @@ resume_process_1(BIF_ALIST_1) ASSERT(ERTS_PSFLG_SUSPENDED & erts_smp_atomic32_read_nob(&suspendee->state)); - resume_process(suspendee); + ASSERT(BIF_P != suspendee); + resume_process(suspendee, ERTS_PROC_LOCK_STATUS); erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); } @@ -8713,7 +8766,7 @@ erts_resume(Process* process, ErtsProcLocks process_locks) ERTS_SMP_LC_ASSERT(process_locks == erts_proc_lc_my_proc_locks(process)); if (!(process_locks & ERTS_PROC_LOCK_STATUS)) erts_smp_proc_lock(process, ERTS_PROC_LOCK_STATUS); - resume_process(process); + resume_process(process, process_locks|ERTS_PROC_LOCK_STATUS); if (!(process_locks & ERTS_PROC_LOCK_STATUS)) erts_smp_proc_unlock(process, ERTS_PROC_LOCK_STATUS); } @@ -8732,7 +8785,7 @@ erts_resume_processes(ErtsProcList *list) proc = erts_pid2proc(NULL, 0, plp->pid, ERTS_PROC_LOCK_STATUS); if (proc) { if (erts_proclist_same(plp, proc)) { - resume_process(proc); + resume_process(proc, ERTS_PROC_LOCK_STATUS); nresumed++; } erts_smp_proc_unlock(proc, ERTS_PROC_LOCK_STATUS); @@ -9968,8 +10021,10 @@ erts_internal_request_system_task_3(BIF_ALIST_3) rp_state = n; } - erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_STATUS); - + /* + * schedule_process_sys_task() unlocks status + * lock on process. + */ schedule_process_sys_task(rp, rp_state, NULL); if (free_stqs) @@ -10714,7 +10769,7 @@ erl_create_process(Process* parent, /* Parent of process (default group leader). * Schedule process for execution. */ - schedule_process(p, state); + schedule_process(p, state, 0); VERBOSE(DEBUG_PROCESSES, ("Created a new process: %T\n",p->common.id)); @@ -11035,7 +11090,8 @@ set_proc_exiting(Process *p, ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT, ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE, &state, - &enq_prio); + &enq_prio, + ERTS_PROC_LOCKS_ALL); p->fvalue = reason; if (bp) @@ -11076,7 +11132,8 @@ set_proc_self_exiting(Process *c_p) ERTS_PSFLG_SUSPENDED|ERTS_PSFLG_PENDING_EXIT, ERTS_PSFLG_EXITING|ERTS_PSFLG_ACTIVE, &state, - &enq_prio); + &enq_prio, + ERTS_PROC_LOCKS_ALL); ASSERT(!enqueue); return state; @@ -11721,8 +11778,9 @@ resume_suspend_monitor(ErtsSuspendMonitor *smon, void *vc_p) Process *suspendee = erts_pid2proc((Process *) vc_p, ERTS_PROC_LOCK_MAIN, smon->pid, ERTS_PROC_LOCK_STATUS); if (suspendee) { + ASSERT(suspendee != vc_p); if (smon->active) - resume_process(suspendee); + resume_process(suspendee, ERTS_PROC_LOCK_STATUS); erts_smp_proc_unlock(suspendee, ERTS_PROC_LOCK_STATUS); } erts_destroy_suspend_monitor(smon); @@ -11814,6 +11872,7 @@ erts_continue_exit_process(Process *p) struct saved_calls *scb; process_breakpoint_time_t *pbt; erts_aint32_t state; + void *nif_export; #ifdef DEBUG int yield_allowed = 1; @@ -11964,6 +12023,7 @@ erts_continue_exit_process(Process *p) : NULL); scb = ERTS_PROC_SET_SAVED_CALLS_BUF(p, ERTS_PROC_LOCKS_ALL, NULL); pbt = ERTS_PROC_SET_CALL_TIME(p, ERTS_PROC_LOCKS_ALL, NULL); + nif_export = ERTS_PROC_SET_NIF_TRAP_EXPORT(p, ERTS_PROC_LOCKS_ALL, NULL); erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL); #ifdef BM_COUNTERS @@ -12011,6 +12071,9 @@ erts_continue_exit_process(Process *p) if (pbt) erts_free(ERTS_ALC_T_BPD, (void *) pbt); + if (nif_export) + erts_destroy_nif_export(nif_export); + delete_process(p); #ifdef ERTS_SMP @@ -12055,7 +12118,7 @@ timeout_proc(Process* p) state = erts_smp_atomic32_read_acqb(&p->state); if (!(state & ERTS_PSFLG_ACTIVE)) - schedule_process(p, state); + schedule_process(p, state, ERTS_PROC_LOCK_MAIN|ERTS_PROC_LOCK_STATUS); } diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h index ed6dadbffa..3b0798207e 100644 --- a/erts/emulator/beam/erl_process.h +++ b/erts/emulator/beam/erl_process.h @@ -734,13 +734,9 @@ erts_smp_reset_max_len(ErtsRunQueue *rq, ErtsRunQueueInfo *rqi) #define ERTS_PSD_DIST_ENTRY 3 #define ERTS_PSD_CALL_TIME_BP 4 #define ERTS_PSD_DELAYED_GC_TASK_QS 5 -#ifdef ERTS_DIRTY_SCHEDULERS -#define ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT 6 +#define ERTS_PSD_NIF_TRAP_EXPORT 6 #define ERTS_PSD_SIZE 7 -#else -#define ERTS_PSD_SIZE 6 -#endif typedef struct { void *data[ERTS_PSD_SIZE]; @@ -767,10 +763,8 @@ typedef struct { #define ERTS_PSD_DELAYED_GC_TASK_QS_GET_LOCKS ERTS_PROC_LOCK_MAIN #define ERTS_PSD_DELAYED_GC_TASK_QS_SET_LOCKS ERTS_PROC_LOCK_MAIN -#ifdef ERTS_DIRTY_SCHEDULERS -#define ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_GET_LOCKS ERTS_PROC_LOCK_MAIN -#define ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT_SET_LOCKS ERTS_PROC_LOCK_MAIN -#endif +#define ERTS_PSD_NIF_TRAP_EXPORT_GET_LOCKS ERTS_PROC_LOCK_MAIN +#define ERTS_PSD_NIF_TRAP_EXPORT_SET_LOCKS ERTS_PROC_LOCK_MAIN typedef struct { ErtsProcLocks get_locks; @@ -1367,6 +1361,9 @@ Uint64 erts_get_proc_interval(void); Uint64 erts_ensure_later_proc_interval(Uint64); Uint64 erts_step_proc_interval(void); +int erts_setup_nif_gc(Process* proc, Eterm** objv, int* nobj); /* see erl_nif.c */ +void erts_destroy_nif_export(void *); /* see erl_nif.c */ + ErtsProcList *erts_proclist_create(Process *); void erts_proclist_destroy(ErtsProcList *); @@ -1704,17 +1701,17 @@ ErtsSchedulerData *erts_get_scheduler_data(void) #endif #endif -void erts_schedule_process(Process *, erts_aint32_t); +void erts_schedule_process(Process *, erts_aint32_t, ErtsProcLocks); -ERTS_GLB_INLINE void erts_proc_notify_new_message(Process *p); +ERTS_GLB_INLINE void erts_proc_notify_new_message(Process *p, ErtsProcLocks locks); #if ERTS_GLB_INLINE_INCL_FUNC_DEF ERTS_GLB_INLINE void -erts_proc_notify_new_message(Process *p) +erts_proc_notify_new_message(Process *p, ErtsProcLocks locks) { /* No barrier needed, due to msg lock */ erts_aint32_t state = erts_smp_atomic32_read_nob(&p->state); if (!(state & ERTS_PSFLG_ACTIVE)) - erts_schedule_process(p, state); + erts_schedule_process(p, state, locks); } #endif @@ -1817,12 +1814,10 @@ erts_psd_set(Process *p, ErtsProcLocks plocks, int ix, void *data) #define ERTS_PROC_SET_DELAYED_GC_TASK_QS(P, L, PBT) \ ((ErtsProcSysTaskQs *) erts_psd_set((P), (L), ERTS_PSD_DELAYED_GC_TASK_QS, (void *) (PBT))) -#ifdef ERTS_DIRTY_SCHEDULERS -#define ERTS_PROC_GET_DIRTY_SCHED_TRAP_EXPORT(P) \ - ((Export *) erts_psd_get((P), ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT)) -#define ERTS_PROC_SET_DIRTY_SCHED_TRAP_EXPORT(P, L, DSTE) \ - ((Export *) erts_psd_set((P), (L), ERTS_PSD_DIRTY_SCHED_TRAP_EXPORT, (void *) (DSTE))) -#endif +#define ERTS_PROC_GET_NIF_TRAP_EXPORT(P) \ + erts_psd_get((P), ERTS_PSD_NIF_TRAP_EXPORT) +#define ERTS_PROC_SET_NIF_TRAP_EXPORT(P, L, NTE) \ + erts_psd_set((P), (L), ERTS_PSD_NIF_TRAP_EXPORT, (void *) (NTE)) ERTS_GLB_INLINE Eterm erts_proc_get_error_handler(Process *p); diff --git a/erts/emulator/beam/erl_unicode.c b/erts/emulator/beam/erl_unicode.c index 3a968594f3..f8e1431a53 100644 --- a/erts/emulator/beam/erl_unicode.c +++ b/erts/emulator/beam/erl_unicode.c @@ -2126,6 +2126,8 @@ Eterm erts_convert_native_to_filename(Process *p, byte *bytes) mac = 1; case ERL_FILENAME_UTF8: size = strlen((char *) bytes); + if (size == 0) + return NIL; if (erts_analyze_utf8(bytes,size,&err_pos,&num_chars,NULL) != ERTS_UTF8_OK) { goto noconvert; } diff --git a/erts/emulator/beam/external.c b/erts/emulator/beam/external.c index 656de7c49a..9b9b4b2a62 100644 --- a/erts/emulator/beam/external.c +++ b/erts/emulator/beam/external.c @@ -44,9 +44,6 @@ #include "erl_zlib.h" #include "erl_map.h" -#ifdef HIPE -#include "hipe_mode_switch.h" -#endif #define in_area(ptr,start,nbytes) ((UWord)((char*)(ptr) - (char*)(start)) < (nbytes)) #define MAX_STRING_LEN 0xffff @@ -111,26 +108,17 @@ static int encode_size_struct_int(struct TTBSizeContext_*, ErtsAtomCacheMap *acm static Export binary_to_term_trap_export; static BIF_RETTYPE binary_to_term_trap_1(BIF_ALIST_1); -static Eterm binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* context_b); +static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* context_b, + Export *bif, Eterm arg0, Eterm arg1); void erts_init_external(void) { -#if 1 /* In R16 */ erts_init_trap_export(&term_to_binary_trap_export, - am_erlang, am_term_to_binary_trap, 1, + am_erts_internal, am_term_to_binary_trap, 1, &term_to_binary_trap_1); erts_init_trap_export(&binary_to_term_trap_export, - am_erlang, am_binary_to_term_trap, 1, + am_erts_internal, am_binary_to_term_trap, 1, &binary_to_term_trap_1); -#else - sys_memset((void *) &term_to_binary_trap_export, 0, sizeof(Export)); - term_to_binary_trap_export.address = &term_to_binary_trap_export.code[3]; - term_to_binary_trap_export.code[0] = am_erlang; - term_to_binary_trap_export.code[1] = am_term_to_binary_trap; - term_to_binary_trap_export.code[2] = 1; - term_to_binary_trap_export.code[3] = (BeamInstr) em_apply_bif; - term_to_binary_trap_export.code[4] = (BeamInstr) &term_to_binary_trap_1; -#endif return; } @@ -1069,6 +1057,8 @@ static BIF_RETTYPE term_to_binary_trap_1(BIF_ALIST_1) } } +HIPE_WRAPPER_BIF_DISABLE_GC(term_to_binary, 1) + BIF_RETTYPE term_to_binary_1(BIF_ALIST_1) { Eterm res = erts_term_to_binary_int(BIF_P, BIF_ARG_1, 0, TERM_TO_BINARY_DFLAGS, NULL); @@ -1081,6 +1071,8 @@ BIF_RETTYPE term_to_binary_1(BIF_ALIST_1) } } +HIPE_WRAPPER_BIF_DISABLE_GC(term_to_binary, 2) + BIF_RETTYPE term_to_binary_2(BIF_ALIST_2) { Process* p = BIF_P; @@ -1185,6 +1177,8 @@ typedef struct B2TContext_t { Uint32 flags; SWord reds; Eterm trap_bin; + Export *bif; + Eterm arg[2]; enum B2TState state; union { B2TSizeContext sc; @@ -1356,7 +1350,8 @@ static BIF_RETTYPE binary_to_term_trap_1(BIF_ALIST_1) Binary *context_bin = ((ProcBin *) binary_val(BIF_ARG_1))->val; ASSERT(ERTS_MAGIC_BIN_DESTRUCTOR(context_bin) == b2t_context_destructor); - return binary_to_term_int(BIF_P, 0, THE_NON_VALUE, context_bin); + return binary_to_term_int(BIF_P, 0, THE_NON_VALUE, context_bin, NULL, + THE_NON_VALUE, THE_NON_VALUE); } @@ -1391,8 +1386,10 @@ static B2TContext* b2t_export_context(Process* p, B2TContext* src) return ctx; } -static Eterm binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* context_b) +static BIF_RETTYPE binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* context_b, + Export *bif_init, Eterm arg0, Eterm arg1) { + BIF_RETTYPE ret_val; #ifdef EXTREME_B2T_TRAPPING SWord initial_reds = 1 + b2t_rand() % 4; #else @@ -1409,6 +1406,9 @@ static Eterm binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* con ctx->state = B2TPrepare; ctx->aligned_alloc = NULL; ctx->flags = flags; + ctx->bif = bif_init; + ctx->arg[0] = arg0; + ctx->arg[1] = arg1; IF_DEBUG(ctx->trap_bin = THE_NON_VALUE;) } else { is_first_call = 0; @@ -1504,12 +1504,24 @@ static Eterm binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* con HRelease(p, ctx->u.dc.hp_end, ctx->u.dc.hp_start); /*fall through*/ case B2TBadArg: - b2t_destroy_context(ctx); - if (!is_first_call) { - erts_set_gc_state(p, 1); - } BUMP_REDS(p, (initial_reds - ctx->reds) / B2T_BYTES_PER_REDUCTION); - BIF_ERROR(p, BADARG & ~EXF_SAVETRACE); + + ASSERT(ctx->bif == bif_export[BIF_binary_to_term_1] + || ctx->bif == bif_export[BIF_binary_to_term_2]); + + if (is_first_call) + ERTS_BIF_PREP_ERROR(ret_val, p, BADARG); + else { + erts_set_gc_state(p, 1); + if (is_non_value(ctx->arg[1])) + ERTS_BIF_PREP_ERROR_TRAPPED1(ret_val, p, BADARG, ctx->bif, + ctx->arg[0]); + else + ERTS_BIF_PREP_ERROR_TRAPPED2(ret_val, p, BADARG, ctx->bif, + ctx->arg[0], ctx->arg[1]); + } + b2t_destroy_context(ctx); + return ret_val; case B2TDone: b2t_destroy_context(ctx); @@ -1524,7 +1536,8 @@ static Eterm binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* con erts_set_gc_state(p, 1); } BUMP_REDS(p, (initial_reds - ctx->reds) / B2T_BYTES_PER_REDUCTION); - return ctx->u.dc.res; + ERTS_BIF_PREP_RET(ret_val, ctx->u.dc.res); + return ret_val; default: ASSERT(!"Unknown state in binary_to_term"); @@ -1541,15 +1554,24 @@ static Eterm binary_to_term_int(Process* p, Uint32 flags, Eterm bin, Binary* con erts_set_gc_state(p, 0); } BUMP_ALL_REDS(p); - BIF_TRAP1(&binary_to_term_trap_export, p, ctx->trap_bin); + + ERTS_BIF_PREP_TRAP1(ret_val, &binary_to_term_trap_export, + p, ctx->trap_bin); + + return ret_val; } -BIF_RETTYPE erts_internal_binary_to_term_1(BIF_ALIST_1) +HIPE_WRAPPER_BIF_DISABLE_GC(binary_to_term, 1) + +BIF_RETTYPE binary_to_term_1(BIF_ALIST_1) { - return binary_to_term_int(BIF_P, 0, BIF_ARG_1, NULL); + return binary_to_term_int(BIF_P, 0, BIF_ARG_1, NULL, bif_export[BIF_binary_to_term_1], + BIF_ARG_1, THE_NON_VALUE); } -BIF_RETTYPE erts_internal_binary_to_term_2(BIF_ALIST_2) +HIPE_WRAPPER_BIF_DISABLE_GC(binary_to_term, 2) + +BIF_RETTYPE binary_to_term_2(BIF_ALIST_2) { Eterm opts; Eterm opt; @@ -1570,7 +1592,8 @@ BIF_RETTYPE erts_internal_binary_to_term_2(BIF_ALIST_2) if (is_not_nil(opts)) goto error; - return binary_to_term_int(BIF_P, flags, BIF_ARG_1, NULL); + return binary_to_term_int(BIF_P, flags, BIF_ARG_1, NULL, bif_export[BIF_binary_to_term_2], + BIF_ARG_1, BIF_ARG_2); error: BIF_ERROR(BIF_P, BADARG); @@ -1902,6 +1925,7 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla } real_size = endp - bytes; result_bin = erts_bin_realloc(context->s.ec.result_bin,real_size); + result_bin->orig_size = real_size; level = context->s.ec.level; BUMP_REDS(p, (initial_reds - reds) / TERM_TO_BINARY_LOOP_FACTOR); if (level == 0 || real_size < 6) { /* We are done */ @@ -1981,6 +2005,7 @@ static Eterm erts_term_to_binary_int(Process* p, Eterm Term, int level, Uint fla erl_zlib_deflate_finish(&(context->s.cc.stream)); result_bin = erts_bin_realloc(context->s.cc.destination_bin, context->s.cc.dest_len+6); + result_bin->orig_size = context->s.cc.dest_len+6; context->s.cc.destination_bin = NULL; pb = (ProcBin *) HAlloc(p, PROC_BIN_SIZE); pb->thing_word = HEADER_PROC_BIN; @@ -4440,66 +4465,3 @@ error: #undef SKIP2 #undef CHKSIZE } - - -#ifdef HIPE -BIF_RETTYPE hipe_wrapper_term_to_binary_1(BIF_ALIST_1); -BIF_RETTYPE hipe_wrapper_term_to_binary_2(BIF_ALIST_2); -BIF_RETTYPE hipe_wrapper_erts_internal_binary_to_term_1(BIF_ALIST_1); -BIF_RETTYPE hipe_wrapper_erts_internal_binary_to_term_2(BIF_ALIST_2); - -/* Hipe wrappers used by native code for BIFs that disable GC while trapping. - * - * Problem: - * When native code calls a BIF that traps, hipe_mode_switch will push a - * "trap frame" on the Erlang stack in order to find its way back from beam_emu - * back to native caller when finally done. If GC is disabled and stack/heap - * is full there is no place to push the "trap frame". - * - * Solution: - * We reserve space on stack for the "trap frame" here before the BIF is called. - * If the BIF does not trap, the space is reclaimed here before returning. - * If the BIF traps, hipe_push_beam_trap_frame() will detect that a "trap frame" - * already is reserved and use it. - */ -BIF_RETTYPE hipe_wrapper_term_to_binary_1(BIF_ALIST_1) -{ - Eterm res; - hipe_reserve_beam_trap_frame(BIF_P, BIF__ARGS, 1); - res = term_to_binary_1(BIF_P, BIF__ARGS); - if (is_value(res) || BIF_P->freason != TRAP) { - hipe_unreserve_beam_trap_frame(BIF_P); - } - return res; -} -BIF_RETTYPE hipe_wrapper_term_to_binary_2(BIF_ALIST_2) -{ - Eterm res; - hipe_reserve_beam_trap_frame(BIF_P, BIF__ARGS, 2); - res = term_to_binary_2(BIF_P, BIF__ARGS); - if (is_value(res) || BIF_P->freason != TRAP) { - hipe_unreserve_beam_trap_frame(BIF_P); - } - return res; -} -BIF_RETTYPE hipe_wrapper_erts_internal_binary_to_term_1(BIF_ALIST_1) -{ - Eterm res; - hipe_reserve_beam_trap_frame(BIF_P, BIF__ARGS, 1); - res = erts_internal_binary_to_term_1(BIF_P, BIF__ARGS); - if (is_value(res) || BIF_P->freason != TRAP) { - hipe_unreserve_beam_trap_frame(BIF_P); - } - return res; -} -BIF_RETTYPE hipe_wrapper_erts_internal_binary_to_term_2(BIF_ALIST_2) -{ - Eterm res; - hipe_reserve_beam_trap_frame(BIF_P, BIF__ARGS, 2); - res = erts_internal_binary_to_term_2(BIF_P, BIF__ARGS); - if (is_value(res) || BIF_P->freason != TRAP) { - hipe_unreserve_beam_trap_frame(BIF_P); - } - return res; -} -#endif /*HIPE*/ diff --git a/erts/emulator/beam/global.h b/erts/emulator/beam/global.h index 8fcb95d0e2..891046a8b5 100755..100644 --- a/erts/emulator/beam/global.h +++ b/erts/emulator/beam/global.h @@ -435,6 +435,8 @@ do {\ }\ } while(0) +#define CLEAR_SAVED_ESTACK(estack) ((void) ((estack)->start = NULL)) + /* * Use on empty stack, only the allocator can be changed before this. * The src stack is reset to NULL. @@ -551,6 +553,8 @@ do {\ }\ } while(0) +#define CLEAR_SAVED_WSTACK(wstack) ((void) ((wstack)->wstart = NULL)) + /* * Use on empty stack, only the allocator can be changed before this. * The src stack is reset to NULL. @@ -951,20 +955,67 @@ struct Sint_buf { }; char* Sint_to_buf(Sint, struct Sint_buf*); +#define ERTS_IOLIST_STATE_INITER(C_P, OBJ) \ + {(C_P), 0, 0, (OBJ), {NULL, NULL, NULL, ERTS_ALC_T_INVALID}, 0, 0} + +#define ERTS_IOLIST_STATE_MOVE(TO, FROM) \ + sys_memcpy((void *) (TO), (void *) (FROM), sizeof(ErtsIOListState)) + +#define ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED 8 + +typedef struct { + Process *c_p; + ErlDrvSizeT size; + Uint offs; + Eterm obj; + ErtsEStack estack; + int reds_left; + int have_size; +} ErtsIOListState; + +#define ERTS_IOLIST2BUF_STATE_INITER(C_P, OBJ) \ + {ERTS_IOLIST_STATE_INITER((C_P), (OBJ)), {NULL, 0, 0, 0}, NULL, 0, NULL, 0} + +#define ERTS_IOLIST2BUF_STATE_MOVE(TO, FROM) \ + sys_memcpy((void *) (TO), (void *) (FROM), sizeof(ErtsIOList2BufState)) + +#define ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT 32 +#define ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED 8 +#define ERTS_IOLIST_TO_BUF_BYTES_PER_RED \ + (ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED*ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT) + +typedef struct { + ErtsIOListState iolist; + struct { + byte *bptr; + size_t size; + Uint bitoffs; + Uint bitsize; + } bcopy; + char *buf; + ErlDrvSizeT len; + Eterm *objp; + int offset; +} ErtsIOList2BufState; + #define ERTS_IOLIST_OK 0 #define ERTS_IOLIST_OVERFLOW 1 #define ERTS_IOLIST_TYPE 2 +#define ERTS_IOLIST_YIELD 3 Eterm buf_to_intlist(Eterm**, const char*, size_t, Eterm); /* most callers pass plain char*'s */ #define ERTS_IOLIST_TO_BUF_OVERFLOW (~((ErlDrvSizeT) 0)) #define ERTS_IOLIST_TO_BUF_TYPE_ERROR (~((ErlDrvSizeT) 1)) +#define ERTS_IOLIST_TO_BUF_YIELD (~((ErlDrvSizeT) 2)) #define ERTS_IOLIST_TO_BUF_FAILED(R) \ - (((R) & (~((ErlDrvSizeT) 1))) == (~((ErlDrvSizeT) 1))) + (((R) & (~((ErlDrvSizeT) 3))) == (~((ErlDrvSizeT) 3))) #define ERTS_IOLIST_TO_BUF_SUCCEEDED(R) \ (!ERTS_IOLIST_TO_BUF_FAILED((R))) ErlDrvSizeT erts_iolist_to_buf(Eterm, char*, ErlDrvSizeT); +ErlDrvSizeT erts_iolist_to_buf_yielding(ErtsIOList2BufState *); +int erts_iolist_size_yielding(ErtsIOListState *state); int erts_iolist_size(Eterm, ErlDrvSizeT *); int is_string(Eterm); void erl_at_exit(void (*) (void*), void*); diff --git a/erts/emulator/beam/io.c b/erts/emulator/beam/io.c index edf4a28784..9ae973e108 100644 --- a/erts/emulator/beam/io.c +++ b/erts/emulator/beam/io.c @@ -1218,9 +1218,10 @@ typedef struct { static ERTS_INLINE ErtsTryImmDrvCallResult try_imm_drv_call(ErtsTryImmDrvCallState *sp) { + unsigned int prof_runnable_ports; ErtsTryImmDrvCallResult res; int reds_left_in; - erts_aint32_t invalid_state, invalid_sched_flags; + erts_aint32_t act, exp, invalid_state, invalid_sched_flags; Port *prt = sp->port; Process *c_p = sp->c_p; @@ -1247,18 +1248,39 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp) goto locked_fail; } - sp->sched_flags = erts_smp_atomic32_read_nob(&prt->sched.flags); - if (sp->sched_flags & invalid_sched_flags) { - res = ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS; - goto locked_fail; - } + prof_runnable_ports = erts_system_profile_flags.runnable_ports; + if (prof_runnable_ports) + erts_port_task_sched_lock(&prt->sched); + + act = erts_smp_atomic32_read_nob(&prt->sched.flags); + do { + erts_aint32_t new; + + if (act & invalid_sched_flags) { + res = ERTS_TRY_IMM_DRV_CALL_INVALID_SCHED_FLAGS; + sp->sched_flags = act; + goto locked_fail; + } + exp = act; + new = act | ERTS_PTS_FLG_EXEC_IMM; + act = erts_smp_atomic32_cmpxchg_mb(&prt->sched.flags, new, exp); + } while (act != exp); + + sp->sched_flags = act; if (!c_p) reds_left_in = CONTEXT_REDS/10; else { if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS)) trace_virtual_sched(c_p, am_out); + /* + * No status lock held while sending runnable + * proc trace messages. It is however not needed + * in this case, since only this thread can send + * such messages for this process until the process + * has been scheduled out. + */ if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) profile_runnable_proc(c_p, am_inactive); @@ -1273,11 +1295,14 @@ try_imm_drv_call(ErtsTryImmDrvCallState *sp) ERTS_SMP_CHK_NO_PROC_LOCKS; - if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) - trace_sched_ports_where(prt, am_in, sp->port_op); - if (erts_system_profile_flags.runnable_ports - && !erts_port_is_scheduled(prt)) - profile_runnable_port(prt, am_active); + if (prof_runnable_ports | IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) { + if (prof_runnable_ports && !(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC))) + profile_runnable_port(prt, am_active); + if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) + trace_sched_ports_where(prt, am_in, sp->port_op); + if (prof_runnable_ports) + erts_port_task_sched_unlock(&prt->sched); + } sp->fpe_was_unmasked = erts_block_fpe(); @@ -1294,17 +1319,31 @@ finalize_imm_drv_call(ErtsTryImmDrvCallState *sp) int reds; Port *prt = sp->port; Process *c_p = sp->c_p; + erts_aint32_t act; + unsigned int prof_runnable_ports; reds = prt->reds; reds += erts_port_driver_callback_epilogue(prt, NULL); erts_unblock_fpe(sp->fpe_was_unmasked); - if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) - trace_sched_ports_where(prt, am_out, sp->port_op); - if (erts_system_profile_flags.runnable_ports - && !erts_port_is_scheduled(prt)) - profile_runnable_port(prt, am_inactive); + prof_runnable_ports = erts_system_profile_flags.runnable_ports; + if (prof_runnable_ports) + erts_port_task_sched_lock(&prt->sched); + + act = erts_smp_atomic32_read_band_mb(&prt->sched.flags, + ~ERTS_PTS_FLG_EXEC_IMM); + ERTS_SMP_LC_ASSERT(act & ERTS_PTS_FLG_EXEC_IMM); + + if (prof_runnable_ports | IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) { + if (IS_TRACED_FL(prt, F_TRACE_SCHED_PORTS)) + trace_sched_ports_where(prt, am_out, sp->port_op); + if (prof_runnable_ports) { + if (!(act & (ERTS_PTS_FLG_IN_RUNQ|ERTS_PTS_FLG_EXEC))) + profile_runnable_port(prt, am_inactive); + erts_port_task_sched_unlock(&prt->sched); + } + } erts_port_release(prt); @@ -1319,6 +1358,13 @@ finalize_imm_drv_call(ErtsTryImmDrvCallState *sp) if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS)) trace_virtual_sched(c_p, am_in); + /* + * No status lock held while sending runnable + * proc trace messages. It is however not needed + * in this case, since only this thread can send + * such messages for this process until the process + * has been scheduled out. + */ if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) profile_runnable_proc(c_p, am_active); @@ -6129,7 +6175,7 @@ driver_pdl_create(ErlDrvPort dp) return NULL; pdl = erts_alloc(ERTS_ALC_T_PORT_DATA_LOCK, sizeof(struct erl_drv_port_data_lock)); - erts_mtx_init(&pdl->mtx, "port_data_lock"); + erts_mtx_init_x(&pdl->mtx, "port_data_lock", pp->common.id, 1); pdl_init_refc(pdl); erts_port_inc_refc(pp); pdl->prt = pp; @@ -7166,7 +7212,7 @@ char *driver_dl_error(void) #define ERL_DRV_SYS_INFO_SIZE(LAST_FIELD) \ - (((size_t) &((ErlDrvSysInfo *) 0)->LAST_FIELD) \ + (offsetof(ErlDrvSysInfo, LAST_FIELD) \ + sizeof(((ErlDrvSysInfo *) 0)->LAST_FIELD)) void @@ -7228,6 +7274,18 @@ driver_system_info(ErlDrvSysInfo *sip, size_t si_size) sip->nif_major_version = ERL_NIF_MAJOR_VERSION; sip->nif_minor_version = ERL_NIF_MINOR_VERSION; } + /* + * 'dirty_scheduler_support' is the last field in the 4th version + * (driver version 3.1, NIF version 2.7) + */ + if (si_size >= ERL_DRV_SYS_INFO_SIZE(dirty_scheduler_support)) { +#if defined(ERL_NIF_DIRTY_SCHEDULER_SUPPORT) && defined(USE_THREADS) + sip->dirty_scheduler_support = 1; +#else + sip->dirty_scheduler_support = 0; +#endif + } + } diff --git a/erts/emulator/beam/sys.h b/erts/emulator/beam/sys.h index 05f07e57b2..3d8dd9c6d0 100644 --- a/erts/emulator/beam/sys.h +++ b/erts/emulator/beam/sys.h @@ -274,6 +274,7 @@ __decl_noreturn void __noreturn erl_assert_error(const char* expr, const char *f typedef unsigned int Eterm; typedef unsigned int Uint; typedef int Sint; +#define ERTS_UINT_MAX UINT_MAX #define ERTS_SIZEOF_ETERM SIZEOF_INT #define ErtsStrToSint strtol #else @@ -347,6 +348,7 @@ typedef long long Sint; typedef Uint UWord; typedef Sint SWord; +#define ERTS_UINT_MAX ERTS_UWORD_MAX #endif /* HALFWORD_HEAP */ diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c index 738f793020..55f9e68e78 100644 --- a/erts/emulator/beam/utils.c +++ b/erts/emulator/beam/utils.c @@ -3197,106 +3197,303 @@ buf_to_intlist(Eterm** hpp, const char *buf, size_t len, Eterm tail) ** */ -ErlDrvSizeT erts_iolist_to_buf(Eterm obj, char* buf, ErlDrvSizeT alloced_len) +typedef enum { + ERTS_IL2B_BCOPY_OK, + ERTS_IL2B_BCOPY_YIELD, + ERTS_IL2B_BCOPY_OVERFLOW, + ERTS_IL2B_BCOPY_TYPE_ERROR +} ErtsIL2BBCopyRes; + +static ErtsIL2BBCopyRes +iolist_to_buf_bcopy(ErtsIOList2BufState *state, Eterm obj, int *yield_countp); + +static ERTS_INLINE ErlDrvSizeT +iolist_to_buf(const int yield_support, + ErtsIOList2BufState *state, + Eterm obj, + char* buf, + ErlDrvSizeT alloced_len) { - ErlDrvSizeT len = (ErlDrvSizeT) alloced_len; - Eterm* objp; +#undef IOLIST_TO_BUF_BCOPY +#define IOLIST_TO_BUF_BCOPY(CONSP) \ +do { \ + size_t size = binary_size(obj); \ + if (size > 0) { \ + Uint bitsize; \ + byte* bptr; \ + Uint bitoffs; \ + Uint num_bits; \ + if (yield_support) { \ + size_t max_size = ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; \ + if (yield_count > 0) \ + max_size *= yield_count+1; \ + if (size > max_size) { \ + state->objp = CONSP; \ + goto L_bcopy_yield; \ + } \ + if (size >= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT) { \ + int cost = (int) size; \ + cost /= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; \ + yield_count -= cost; \ + } \ + } \ + if (len < size) \ + goto L_overflow; \ + ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize); \ + if (bitsize != 0) \ + goto L_type_error; \ + num_bits = 8*size; \ + copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits); \ + buf += size; \ + len -= size; \ + } \ +} while (0) + + ErlDrvSizeT res, len; + Eterm* objp = NULL; + int init_yield_count; + int yield_count; DECLARE_ESTACK(s); - goto L_again; - - while (!ESTACK_ISEMPTY(s)) { - obj = ESTACK_POP(s); - L_again: - if (is_list(obj)) { - L_iter_list: - objp = list_val(obj); - obj = CAR(objp); - if (is_byte(obj)) { - if (len == 0) { - goto L_overflow; - } - *buf++ = unsigned_val(obj); - len--; - } else if (is_binary(obj)) { - byte* bptr; - size_t size = binary_size(obj); - Uint bitsize; - Uint bitoffs; - Uint num_bits; - - if (len < size) { + + len = (ErlDrvSizeT) alloced_len; + + if (!yield_support) { + yield_count = init_yield_count = 0; /* Shut up faulty warning... >:-( */ + goto L_again; + } + else { + + if (state->iolist.reds_left <= 0) + return ERTS_IOLIST_TO_BUF_YIELD; + + ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK); + init_yield_count = (ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED + * state->iolist.reds_left); + yield_count = init_yield_count; + + if (!state->iolist.estack.start) + goto L_again; + else { + int chk_stack; + /* Restart; restore state... */ + ESTACK_RESTORE(s, &state->iolist.estack); + + if (!state->bcopy.bptr) + chk_stack = 0; + else { + chk_stack = 1; + switch (iolist_to_buf_bcopy(state, THE_NON_VALUE, &yield_count)) { + case ERTS_IL2B_BCOPY_OK: + break; + case ERTS_IL2B_BCOPY_YIELD: + BUMP_ALL_REDS(state->iolist.c_p); + state->iolist.reds_left = 0; + ESTACK_SAVE(s, &state->iolist.estack); + return ERTS_IOLIST_TO_BUF_YIELD; + case ERTS_IL2B_BCOPY_OVERFLOW: goto L_overflow; - } - ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize); - if (bitsize != 0) { + case ERTS_IL2B_BCOPY_TYPE_ERROR: goto L_type_error; } - num_bits = 8*size; - copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits); - buf += size; - len -= size; - } else if (is_list(obj)) { - ESTACK_PUSH(s, CDR(objp)); - goto L_iter_list; /* on head */ - } else if (is_not_nil(obj)) { - goto L_type_error; } - obj = CDR(objp); - if (is_list(obj)) { - goto L_iter_list; /* on tail */ - } else if (is_binary(obj)) { - byte* bptr; - size_t size = binary_size(obj); - Uint bitsize; - Uint bitoffs; - Uint num_bits; - if (len < size) { - goto L_overflow; + obj = state->iolist.obj; + buf = state->buf; + len = state->len; + objp = state->objp; + state->objp = NULL; + if (objp) + goto L_tail; + if (!chk_stack) + goto L_again; + /* check stack */ + } + } + + while (!ESTACK_ISEMPTY(s)) { + obj = ESTACK_POP(s); + L_again: + if (is_list(obj)) { + while (1) { /* Tail loop */ + while (1) { /* Head loop */ + if (yield_support && --yield_count <= 0) + goto L_yield; + objp = list_val(obj); + obj = CAR(objp); + if (is_byte(obj)) { + if (len == 0) { + goto L_overflow; + } + *buf++ = unsigned_val(obj); + len--; + } else if (is_binary(obj)) { + IOLIST_TO_BUF_BCOPY(objp); + } else if (is_list(obj)) { + ESTACK_PUSH(s, CDR(objp)); + continue; /* Head loop */ + } else if (is_not_nil(obj)) { + goto L_type_error; + } + break; } - ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize); - if (bitsize != 0) { + + L_tail: + + obj = CDR(objp); + + if (is_list(obj)) { + continue; /* Tail loop */ + } else if (is_binary(obj)) { + IOLIST_TO_BUF_BCOPY(NULL); + } else if (is_not_nil(obj)) { goto L_type_error; } - num_bits = 8*size; - copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits); - buf += size; - len -= size; - } else if (is_not_nil(obj)) { - goto L_type_error; + break; } } else if (is_binary(obj)) { - byte* bptr; - size_t size = binary_size(obj); - Uint bitsize; - Uint bitoffs; - Uint num_bits; - if (len < size) { - goto L_overflow; - } - ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize); - if (bitsize != 0) { - goto L_type_error; - } - num_bits = 8*size; - copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits); - buf += size; - len -= size; + IOLIST_TO_BUF_BCOPY(NULL); } else if (is_not_nil(obj)) { goto L_type_error; - } + } else if (yield_support && --yield_count <= 0) + goto L_yield; } + res = len; + + L_return: + DESTROY_ESTACK(s); - return len; + + if (yield_support) { + int reds; + CLEAR_SAVED_ESTACK(&state->iolist.estack); + reds = ((init_yield_count - yield_count - 1) + / ERTS_IOLIST_TO_BUF_YIELD_COUNT_PER_RED) + 1; + BUMP_REDS(state->iolist.c_p, reds); + state->iolist.reds_left -= reds; + if (state->iolist.reds_left < 0) + state->iolist.reds_left = 0; + } + + + return res; L_type_error: - DESTROY_ESTACK(s); - return ERTS_IOLIST_TO_BUF_TYPE_ERROR; + res = ERTS_IOLIST_TO_BUF_TYPE_ERROR; + goto L_return; L_overflow: - DESTROY_ESTACK(s); - return ERTS_IOLIST_TO_BUF_OVERFLOW; + res = ERTS_IOLIST_TO_BUF_OVERFLOW; + goto L_return; + + L_bcopy_yield: + + state->buf = buf; + state->len = len; + + switch (iolist_to_buf_bcopy(state, obj, &yield_count)) { + case ERTS_IL2B_BCOPY_OK: + ERTS_INTERNAL_ERROR("Missing yield"); + case ERTS_IL2B_BCOPY_YIELD: + BUMP_ALL_REDS(state->iolist.c_p); + state->iolist.reds_left = 0; + ESTACK_SAVE(s, &state->iolist.estack); + return ERTS_IOLIST_TO_BUF_YIELD; + case ERTS_IL2B_BCOPY_OVERFLOW: + goto L_overflow; + case ERTS_IL2B_BCOPY_TYPE_ERROR: + goto L_type_error; + } + + L_yield: + + BUMP_ALL_REDS(state->iolist.c_p); + state->iolist.reds_left = 0; + state->iolist.obj = obj; + state->buf = buf; + state->len = len; + ESTACK_SAVE(s, &state->iolist.estack); + return ERTS_IOLIST_TO_BUF_YIELD; + +#undef IOLIST_TO_BUF_BCOPY +} + +static ErtsIL2BBCopyRes +iolist_to_buf_bcopy(ErtsIOList2BufState *state, Eterm obj, int *yield_countp) +{ + ErtsIL2BBCopyRes res; + char *buf = state->buf; + ErlDrvSizeT len = state->len; + byte* bptr; + size_t size; + size_t max_size; + Uint bitoffs; + Uint num_bits; + int yield_count = *yield_countp; + + if (state->bcopy.bptr) { + bptr = state->bcopy.bptr; + size = state->bcopy.size; + bitoffs = state->bcopy.bitoffs; + state->bcopy.bptr = NULL; + } + else { + Uint bitsize; + + ASSERT(is_binary(obj)); + + size = binary_size(obj); + if (size <= 0) + return ERTS_IL2B_BCOPY_OK; + + if (len < size) + return ERTS_IL2B_BCOPY_OVERFLOW; + + ERTS_GET_BINARY_BYTES(obj, bptr, bitoffs, bitsize); + if (bitsize != 0) + return ERTS_IL2B_BCOPY_TYPE_ERROR; + } + + ASSERT(size > 0); + max_size = (size_t) ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; + if (yield_count > 0) + max_size *= (size_t) (yield_count+1); + + if (size <= max_size) { + if (size >= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT) { + int cost = (int) size; + cost /= ERTS_IOLIST_TO_BUF_BYTES_PER_YIELD_COUNT; + yield_count -= cost; + } + res = ERTS_IL2B_BCOPY_OK; + } + else { + ASSERT(0 < max_size && max_size < size); + yield_count = 0; + state->bcopy.bptr = bptr + max_size; + state->bcopy.bitoffs = bitoffs; + state->bcopy.size = size - max_size; + size = max_size; + res = ERTS_IL2B_BCOPY_YIELD; + } + + num_bits = 8*size; + copy_binary_to_buffer(buf, 0, bptr, bitoffs, num_bits); + state->buf += size; + state->len -= size; + *yield_countp = yield_count; + + return res; +} + +ErlDrvSizeT erts_iolist_to_buf_yielding(ErtsIOList2BufState *state) +{ + return iolist_to_buf(1, state, state->iolist.obj, state->buf, state->len); +} + +ErlDrvSizeT erts_iolist_to_buf(Eterm obj, char* buf, ErlDrvSizeT alloced_len) +{ + return iolist_to_buf(0, NULL, obj, buf, alloced_len); } /* @@ -3307,11 +3504,32 @@ ErlDrvSizeT erts_iolist_to_buf(Eterm obj, char* buf, ErlDrvSizeT alloced_len) * Any input term error detected in erts_iolist_to_buf should also * be detected in this function! */ -int erts_iolist_size(Eterm obj, ErlDrvSizeT* sizep) + +static ERTS_INLINE int +iolist_size(const int yield_support, ErtsIOListState *state, Eterm obj, ErlDrvSizeT* sizep) { + int res, init_yield_count, yield_count; Eterm* objp; - Uint size = 0; /* Intentionally Uint due to halfword heap */ + Uint size = (Uint) *sizep; /* Intentionally Uint due to halfword heap */ DECLARE_ESTACK(s); + + if (!yield_support) + yield_count = init_yield_count = 0; /* Shut up faulty warning... >:-( */ + else { + if (state->reds_left <= 0) + return ERTS_IOLIST_YIELD; + ESTACK_CHANGE_ALLOCATOR(s, ERTS_ALC_T_SAVED_ESTACK); + init_yield_count = ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED; + init_yield_count *= state->reds_left; + yield_count = init_yield_count; + if (state->estack.start) { + /* Restart; restore state... */ + ESTACK_RESTORE(s, &state->estack); + size = (Uint) state->size; + obj = state->obj; + } + } + goto L_again; #define SAFE_ADD(Var, Val) \ @@ -3327,51 +3545,101 @@ int erts_iolist_size(Eterm obj, ErlDrvSizeT* sizep) obj = ESTACK_POP(s); L_again: if (is_list(obj)) { - L_iter_list: - objp = list_val(obj); - /* Head */ - obj = CAR(objp); - if (is_byte(obj)) { - size++; - if (size == 0) { - goto L_overflow_error; + while (1) { /* Tail loop */ + while (1) { /* Head loop */ + if (yield_support && --yield_count <= 0) + goto L_yield; + objp = list_val(obj); + /* Head */ + obj = CAR(objp); + if (is_byte(obj)) { + size++; + if (size == 0) { + goto L_overflow_error; + } + } else if (is_binary(obj) && binary_bitsize(obj) == 0) { + SAFE_ADD(size, binary_size(obj)); + } else if (is_list(obj)) { + ESTACK_PUSH(s, CDR(objp)); + continue; /* Head loop */ + } else if (is_not_nil(obj)) { + goto L_type_error; + } + break; } - } else if (is_binary(obj) && binary_bitsize(obj) == 0) { - SAFE_ADD(size, binary_size(obj)); - } else if (is_list(obj)) { - ESTACK_PUSH(s, CDR(objp)); - goto L_iter_list; /* on head */ - } else if (is_not_nil(obj)) { - goto L_type_error; + /* Tail */ + obj = CDR(objp); + if (is_list(obj)) + continue; /* Tail loop */ + else if (is_binary(obj) && binary_bitsize(obj) == 0) { + SAFE_ADD(size, binary_size(obj)); + } else if (is_not_nil(obj)) { + goto L_type_error; + } + break; } - /* Tail */ - obj = CDR(objp); - if (is_list(obj)) - goto L_iter_list; /* on tail */ - else if (is_binary(obj) && binary_bitsize(obj) == 0) { + } else { + if (yield_support && --yield_count <= 0) + goto L_yield; + if (is_binary(obj) && binary_bitsize(obj) == 0) { /* Tail was binary */ SAFE_ADD(size, binary_size(obj)); } else if (is_not_nil(obj)) { goto L_type_error; } - } else if (is_binary(obj) && binary_bitsize(obj) == 0) { /* Tail was binary */ - SAFE_ADD(size, binary_size(obj)); - } else if (is_not_nil(obj)) { - goto L_type_error; } } #undef SAFE_ADD - DESTROY_ESTACK(s); *sizep = (ErlDrvSizeT) size; - return ERTS_IOLIST_OK; - L_overflow_error: + res = ERTS_IOLIST_OK; + + L_return: + DESTROY_ESTACK(s); - return ERTS_IOLIST_OVERFLOW; + + if (yield_support) { + int yc, reds; + CLEAR_SAVED_ESTACK(&state->estack); + yc = init_yield_count - yield_count; + reds = ((yc - 1) / ERTS_IOLIST_SIZE_YIELDS_COUNT_PER_RED) + 1; + BUMP_REDS(state->c_p, reds); + state->reds_left -= reds; + state->size = (ErlDrvSizeT) size; + state->have_size = 1; + } + + return res; + + L_overflow_error: + res = ERTS_IOLIST_OVERFLOW; + size = 0; + goto L_return; L_type_error: - DESTROY_ESTACK(s); - return ERTS_IOLIST_TYPE; + res = ERTS_IOLIST_TYPE; + size = 0; + goto L_return; + + L_yield: + BUMP_ALL_REDS(state->c_p); + state->reds_left = 0; + state->size = size; + state->obj = obj; + ESTACK_SAVE(s, &state->estack); + return ERTS_IOLIST_YIELD; +} + +int erts_iolist_size_yielding(ErtsIOListState *state) +{ + ErlDrvSizeT size = state->size; + return iolist_size(1, state, state->obj, &size); +} + +int erts_iolist_size(Eterm obj, ErlDrvSizeT* sizep) +{ + *sizep = 0; + return iolist_size(0, NULL, obj, sizep); } /* return 0 if item is not a non-empty flat list of bytes */ @@ -3680,6 +3948,9 @@ erts_save_emu_args(int argc, char **argv) size += sz+1; } ptr = (char *) malloc(size); + if (!ptr) { + ERTS_INTERNAL_ERROR("malloc failed to allocate memory!"); + } #ifdef DEBUG end_ptr = ptr + size; #endif diff --git a/erts/emulator/drivers/common/inet_drv.c b/erts/emulator/drivers/common/inet_drv.c index 09bada457d..891589d1c5 100644 --- a/erts/emulator/drivers/common/inet_drv.c +++ b/erts/emulator/drivers/common/inet_drv.c @@ -4372,7 +4372,7 @@ static int erl_inet_close(inet_descriptor* desc) desc_close(desc); desc->state = INET_STATE_CLOSED; } else if (desc->prebound && (desc->s != INVALID_SOCKET)) { - sock_select(desc, FD_READ | FD_WRITE | FD_CLOSE, 0); + sock_select(desc, FD_READ | FD_WRITE | FD_CLOSE | ERL_DRV_USE_NO_CALLBACK, 0); desc->event_mask = 0; #ifdef __WIN32__ desc->forced_events = 0; @@ -4536,7 +4536,8 @@ static ErlDrvSSizeT inet_ctl_open(inet_descriptor* desc, int domain, int type, /* as inet_open but pass in an open socket (MUST BE OF RIGHT TYPE) */ static ErlDrvSSizeT inet_ctl_fdopen(inet_descriptor* desc, int domain, int type, - SOCKET s, char** rbuf, ErlDrvSizeT rsize) + SOCKET s, Uint32 bound, + char** rbuf, ErlDrvSizeT rsize) { inet_address name; unsigned int sz = sizeof(name); @@ -4560,7 +4561,12 @@ static ErlDrvSSizeT inet_ctl_fdopen(inet_descriptor* desc, int domain, int type, #ifdef __WIN32__ driver_select(desc->port, desc->event, ERL_DRV_READ, 1); #endif - desc->state = INET_STATE_BOUND; /* assume bound */ + + if (bound) + desc->state = INET_STATE_BOUND; + else + desc->state = INET_STATE_OPEN; + if (type == SOCK_STREAM) { /* check if connected */ sz = sizeof(name); if (!IS_SOCKET_ERROR(sock_peer(s, (struct sockaddr*) &name, &sz))) { @@ -5772,7 +5778,7 @@ done: ia_p->Ipv6IfIndex && ia_p->Ipv6IfIndex != index) { - /* Oops, there was an other interface for IPv6. Possible? XXX */ + /* Oops, there was another interface for IPv6. Possible? XXX */ index = ia_p->Ipv6IfIndex; goto index; } @@ -9121,10 +9127,11 @@ static ErlDrvSSizeT tcp_inet_ctl(ErlDrvData e, unsigned int cmd, break; } - case INET_REQ_FDOPEN: { /* pass in an open socket */ + case INET_REQ_FDOPEN: { /* pass in an open (and optionally bound) socket */ int domain; + int bound; DEBUGF(("tcp_inet_ctl(%ld): FDOPEN\r\n", (long)desc->inet.port)); - if (len != 6) return ctl_error(EINVAL, rbuf, rsize); + if (len != 6 && len != 10) return ctl_error(EINVAL, rbuf, rsize); switch(buf[0]) { case INET_AF_INET: domain = AF_INET; @@ -9142,8 +9149,13 @@ static ErlDrvSSizeT tcp_inet_ctl(ErlDrvData e, unsigned int cmd, return ctl_error(EINVAL, rbuf, rsize); } if (buf[1] != INET_TYPE_STREAM) return ctl_error(EINVAL, rbuf, rsize); + + if (len == 6) bound = 1; + else bound = get_int32(buf+2+4); + return inet_ctl_fdopen(INETP(desc), domain, SOCK_STREAM, - (SOCKET) get_int32(buf+2), rbuf, rsize); + (SOCKET) get_int32(buf+2), + bound, rbuf, rsize); break; } @@ -11116,10 +11128,11 @@ static ErlDrvSSizeT packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, return replen; - case INET_REQ_FDOPEN: { /* pass in an open (and bound) socket */ + case INET_REQ_FDOPEN: { /* pass in an open (and optionally bound) socket */ SOCKET s; + int bound; DEBUGF(("packet inet_ctl(%ld): FDOPEN\r\n", (long)desc->port)); - if (len != 6) { + if (len != 6 && len != 10) { return ctl_error(EINVAL, rbuf, rsize); } switch (buf[0]) { @@ -11144,7 +11157,11 @@ static ErlDrvSSizeT packet_inet_ctl(ErlDrvData e, unsigned int cmd, char* buf, return ctl_error(EINVAL, rbuf, rsize); } s = (SOCKET)get_int32(buf+2); - replen = inet_ctl_fdopen(desc, af, type, s, rbuf, rsize); + + if (len == 6) bound = 1; + else bound = get_int32(buf+2+4); + + replen = inet_ctl_fdopen(desc, af, type, s, bound, rbuf, rsize); if ((*rbuf)[0] != INET_REP_ERROR) { if (desc->active) diff --git a/erts/emulator/drivers/unix/multi_drv.c b/erts/emulator/drivers/unix/multi_drv.c index 822c96730c..724d325ed5 100644 --- a/erts/emulator/drivers/unix/multi_drv.c +++ b/erts/emulator/drivers/unix/multi_drv.c @@ -20,7 +20,7 @@ /* Purpose: Multidriver interface This is an example of a driver which allows multiple instances of itself. I.e have one erlang process execute open_port(multi......) and - at the same time have an other erlang process open an other port + at the same time have another erlang process open another port running multi there as well. */ diff --git a/erts/emulator/hipe/hipe_amd64_bifs.m4 b/erts/emulator/hipe/hipe_amd64_bifs.m4 index 0de69a617f..a3219c7586 100644 --- a/erts/emulator/hipe/hipe_amd64_bifs.m4 +++ b/erts/emulator/hipe/hipe_amd64_bifs.m4 @@ -39,7 +39,10 @@ define(HANDLE_GOT_MBUF,` jmp 2b') `#if defined(ERTS_ENABLE_LOCK_CHECK) && defined(ERTS_SMP) -# define CALL_BIF(F) movq $CSYM(F), P_BIF_CALLEE(P); call CSYM(hipe_debug_bif_wrapper) +# define CALL_BIF(F) \ + movq CSYM(F)@GOTPCREL(%rip), %r11; \ + movq %r11, P_BIF_CALLEE(P); \ + call CSYM(hipe_debug_bif_wrapper) #else # define CALL_BIF(F) call CSYM(F) #endif' diff --git a/erts/emulator/hipe/hipe_bif_list.m4 b/erts/emulator/hipe/hipe_bif_list.m4 index 0997d81b2f..5f92b6bac4 100644 --- a/erts/emulator/hipe/hipe_bif_list.m4 +++ b/erts/emulator/hipe/hipe_bif_list.m4 @@ -268,9 +268,16 @@ noproc_primop_interface_1(nbif_atomic_inc, hipe_atomic_inc) */ define(CFUN,`ifelse($1,term_to_binary_1,hipe_wrapper_term_to_binary_1, ifelse($1,term_to_binary_2,hipe_wrapper_term_to_binary_2, -ifelse($1,erts_internal_binary_to_term_1,hipe_wrapper_erts_internal_binary_to_term_1, -ifelse($1,erts_internal_binary_to_term_2,hipe_wrapper_erts_internal_binary_to_term_2, -$1))))') +ifelse($1,binary_to_term_1,hipe_wrapper_binary_to_term_1, +ifelse($1,binary_to_term_2,hipe_wrapper_binary_to_term_2, +ifelse($1,binary_to_list_1,hipe_wrapper_binary_to_list_1, +ifelse($1,binary_to_list_3,hipe_wrapper_binary_to_list_3, +ifelse($1,bitstring_to_list_1,hipe_wrapper_bitstring_to_list_1, +ifelse($1,list_to_binary_1,hipe_wrapper_list_to_binary_1, +ifelse($1,iolist_to_binary_1,hipe_wrapper_iolist_to_binary_1, +ifelse($1,binary_list_to_bin_1,hipe_wrapper_binary_list_to_bin_1, +ifelse($1,list_to_bitstring_1,hipe_wrapper_list_to_bitstring_1, +$1)))))))))))') define(BIF_LIST,`standard_bif_interface_$3(nbif_$4, CFUN($4))') include(TARGET/`erl_bif_list.h') diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c index 0a58a625b2..aa412a20c8 100644 --- a/erts/emulator/sys/common/erl_poll.c +++ b/erts/emulator/sys/common/erl_poll.c @@ -2157,7 +2157,7 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet ps, #ifdef ERTS_POLL_DEBUG_PRINT erts_printf("Entering erts_poll_wait(), timeout=%d\n", - (int) tv->tv_sec*1000 + tv->tv_usec/1000); + (int) tvp->tv_sec*1000 + tvp->tv_usec/1000); #endif if (ERTS_POLLSET_SET_POLLED_CHK(ps)) { diff --git a/erts/emulator/sys/win32/sys.c b/erts/emulator/sys/win32/sys.c index 0ded6b274e..0ded6b274e 100755..100644 --- a/erts/emulator/sys/win32/sys.c +++ b/erts/emulator/sys/win32/sys.c diff --git a/erts/emulator/test/Makefile b/erts/emulator/test/Makefile index 0b0568c31a..dfbe47786a 100644 --- a/erts/emulator/test/Makefile +++ b/erts/emulator/test/Makefile @@ -31,6 +31,7 @@ MODULES= \ a_SUITE \ after_SUITE \ alloc_SUITE \ + async_ports_SUITE \ beam_SUITE \ beam_literals_SUITE \ bif_SUITE \ diff --git a/erts/emulator/test/async_ports_SUITE.erl b/erts/emulator/test/async_ports_SUITE.erl new file mode 100644 index 0000000000..c89b3655ff --- /dev/null +++ b/erts/emulator/test/async_ports_SUITE.erl @@ -0,0 +1,118 @@ +-module(async_ports_SUITE). + +-include_lib("common_test/include/ct.hrl"). + +-compile(export_all). + +-define(PACKET_SIZE, (10 * 1024 * 8)). +-define(CPORT_DELAY, 100). +-define(TEST_LOOPS_COUNT, 100000). +-define(SLEEP_BEFORE_CHECK, 1000). +-define(TEST_PROCS_COUNT, 2). +-define(TC_TIMETRAP_SECONDS, 10). + +suite() -> [{ct_hooks,[ts_install_cth]}]. + +all() -> + [ + permanent_busy_test + ]. + +permanent_busy_test(Config) -> + ct:timetrap({seconds, ?TC_TIMETRAP_SECONDS}), + ExePath = filename:join(?config(data_dir, Config), "cport"), + + Self = self(), + spawn_link( + fun() -> + Block = <<0:?PACKET_SIZE>>, + + Port = open_port(ExePath), + + Testers = + lists:map( + fun(_) -> + erlang:spawn_link(?MODULE, run_loop, + [Self, + Port, + Block, + ?TEST_LOOPS_COUNT, + 0]) + end, + lists:seq(1, ?TEST_PROCS_COUNT)), + Self ! {test_info, Port, Testers}, + endless_flush(Port) + end), + + receive + {test_info, Port, Testers} -> + MaxWaitTime = round(0.7 * ?TC_TIMETRAP_SECONDS * 1000), + ct:log("wait testers, maximum ~w mcsec~n", [MaxWaitTime]), + ok = wait_testers(MaxWaitTime, Testers), + timer:sleep(?SLEEP_BEFORE_CHECK), + case erlang:port_command(Port, <<"test">>, [nosuspend]) of + false -> + exit(port_dead); + true -> + ok + end + end. + +wait_testers(Timeout, Testers) -> + lists:foldl( + fun(Pid, AccIn) -> + StartWait = os:timestamp(), + receive + {Pid, port_dead} -> + recalc_timeout(AccIn, StartWait) + after AccIn -> + Pid ! stop, + recalc_timeout(AccIn, StartWait) + end + end, Timeout, Testers), + ok. + +recalc_timeout(TimeoutIn, WaitStart) -> + erlang:max(0, TimeoutIn - round(timer:now_diff(os:timestamp(), WaitStart)) div 1000). + +open_port(ExePath) -> + erlang:open_port({spawn, ExePath ++ " 100"}, [{packet, 4}, eof, exit_status, use_stdio, binary]). + +run_loop(RootProc, Port, Block, CheckLimit, BusyCnt) -> + receive + stop -> + ok + after 0 -> + case erlang:port_command(Port, Block, [nosuspend]) of + true -> + run_loop(RootProc, Port, Block, CheckLimit, 0); + false -> + if + BusyCnt + 1 > CheckLimit -> + check_dead(RootProc, Port, Block, CheckLimit); + true -> + run_loop(RootProc, Port, Block, CheckLimit, BusyCnt + 1) + end + end + end. + +check_dead(RootProc, Port, Block, CheckLimit) -> + ct:log("~p: check port dead~n", [self()]), + timer:sleep(?SLEEP_BEFORE_CHECK), + case erlang:port_command(Port, Block, [nosuspend]) of + true -> + ct:log("not dead~n"), + run_loop(RootProc, Port, Block, CheckLimit, 0); + false -> + ct:log("port dead: ~p~n", [Port]), + RootProc ! {self(), port_dead}, + ok + end. + +endless_flush(Port) -> + receive + {Port, {data, _}} -> + endless_flush(Port); + {Port, SomethingWrong} -> + erlang:error({someting_wrong, SomethingWrong}) + end. diff --git a/erts/emulator/test/async_ports_SUITE_data/Makefile.src b/erts/emulator/test/async_ports_SUITE_data/Makefile.src new file mode 100644 index 0000000000..56da3fbe12 --- /dev/null +++ b/erts/emulator/test/async_ports_SUITE_data/Makefile.src @@ -0,0 +1,15 @@ +CC = @CC@ +LD = @LD@ +CFLAGS = @CFLAGS@ @DEFS@ +CROSSLDFLAGS = @CROSSLDFLAGS@ + +PROGS = cport@exe@ + + +all: $(PROGS) + +cport@exe@: cport@obj@ + $(LD) $(CROSSLDFLAGS) -o cport cport@obj@ @LIBS@ + +cport@obj@: cport.c + $(CC) -c -o cport@obj@ $(CFLAGS) cport.c diff --git a/erts/emulator/test/async_ports_SUITE_data/cport.c b/erts/emulator/test/async_ports_SUITE_data/cport.c new file mode 100644 index 0000000000..033aff382a --- /dev/null +++ b/erts/emulator/test/async_ports_SUITE_data/cport.c @@ -0,0 +1,81 @@ +#include <stdlib.h> +#include <stdio.h> +#include <errno.h> +#include <string.h> +#ifdef __WIN32__ +# include "windows.h" +# include "winbase.h" +#else +# include <unistd.h> +#endif + +typedef unsigned char byte; + +int read_cmd(byte *buf) +{ + int len; + if (read_exact(buf, 4) != 4) + return(-1); + + len = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]; + return read_exact(buf, len); +} + +int write_cmd(byte *buf, int len) +{ + byte li[4]; + li[0] = (len >> 24) & 0xff; + li[1] = (len >> 16) & 0xff; + li[2] = (len >> 8) & 0xff; + li[3] = len & 0xff; + write_exact(&li, 4); + + return write_exact(buf, len); +} + +int read_exact(byte *buf, int len) +{ + int i, got=0; + do { + if ((i = read(0, buf+got, len-got)) <= 0) + { + return(i); + } + got += i; + } while (got<len); + return len; +} + +int write_exact(byte *buf, int len) +{ + int i, wrote = 0; + do { + if ((i = write(1, buf+wrote, len-wrote)) < 0) + return (i); + wrote += i; + } while (wrote<len); + return len; +} + +byte static_buf[31457280]; // 30 mb + +int main(int argc, char **argv) { + int sleep_time = atoi(argv[1]); + int fn, arg, res; + byte *buf = &static_buf[0]; + int len = 0; + if (sleep_time <= 0) + sleep_time = 0; +#ifdef __WIN32__ + else + sleep_time = ((sleep_time - 1) / 1000) + 1; /* Milli seconds */ +#endif + while ((len = read_cmd(buf)) > 0) { +#ifdef __WIN32__ + Sleep((DWORD) sleep_time); +#else + usleep(sleep_time); +#endif + write_cmd(buf, len); + } +} diff --git a/erts/emulator/test/binary_SUITE.erl b/erts/emulator/test/binary_SUITE.erl index 7aba367e33..44e9e4f243 100644 --- a/erts/emulator/test/binary_SUITE.erl +++ b/erts/emulator/test/binary_SUITE.erl @@ -58,7 +58,8 @@ ordering/1,unaligned_order/1,gc_test/1, bit_sized_binary_sizes/1, otp_6817/1,deep/1,obsolete_funs/1,robustness/1,otp_8117/1, - otp_8180/1, trapping/1]). + otp_8180/1, trapping/1, large/1, + error_after_yield/1, cmp_old_impl/1]). %% Internal exports. -export([sleeper/0,trapping_loop/4]). @@ -76,7 +77,8 @@ all() -> bad_term_to_binary, more_bad_terms, otp_5484, otp_5933, ordering, unaligned_order, gc_test, bit_sized_binary_sizes, otp_6817, otp_8117, deep, - obsolete_funs, robustness, otp_8180, trapping]. + obsolete_funs, robustness, otp_8180, trapping, large, + error_after_yield, cmp_old_impl]. groups() -> []. @@ -1351,7 +1353,16 @@ trapping(Config) when is_list(Config)-> do_trapping(5, term_to_binary, fun() -> [lists:duplicate(2000000,2000000)] end), do_trapping(5, binary_to_term, - fun() -> [term_to_binary(lists:duplicate(2000000,2000000))] end). + fun() -> [term_to_binary(lists:duplicate(2000000,2000000))] end), + do_trapping(5, binary_to_list, + fun() -> [list_to_binary(lists:duplicate(2000000,$x))] end), + do_trapping(5, list_to_binary, + fun() -> [lists:duplicate(2000000,$x)] end), + do_trapping(5, bitstring_to_list, + fun() -> [list_to_bitstring([lists:duplicate(2000000,$x),<<7:4>>])] end), + do_trapping(5, list_to_bitstring, + fun() -> [[lists:duplicate(2000000,$x),<<7:4>>]] end) + . do_trapping(0, _, _) -> ok; @@ -1384,9 +1395,189 @@ trapping_loop2(Bif,Args,N) -> apply(erlang,Bif,Args), trapping_loop2(Bif, Args, N-1). +large(Config) when is_list(Config) -> + List = lists:flatten(lists:map(fun (_) -> + [0,1,2,3,4,5,6,7,8] + end, + lists:seq(1, 131072))), + Bin = list_to_binary(List), + List = binary_to_list(Bin), + PartList = lists:reverse(tl(tl(lists:reverse(tl(tl(List)))))), + PartList = binary_to_list(Bin, 3, length(List)-2), + ListBS = List ++ [<<7:4>>], + ListBS = bitstring_to_list(list_to_bitstring(ListBS)), + BitStr1 = list_to_bitstring(lists:duplicate(1024*1024, [<<1,5:3>>])), + BitStr1 = list_to_bitstring(bitstring_to_list(BitStr1)), + BitStr2 = list_to_bitstring([lists:duplicate(512*1024, [<<1,5:3>>]), + Bin]), + BitStr2 = list_to_bitstring(bitstring_to_list(BitStr2)), + ok. + +error_after_yield(Config) when is_list(Config) -> + L2BTrap = {erts_internal, list_to_binary_continue, 1}, + error_after_yield(badarg, erlang, list_to_binary, 1, fun () -> [[mk_list(1000000), oops]] end, L2BTrap), + error_after_yield(badarg, erlang, iolist_to_binary, 1, fun () -> [[list2iolist(mk_list(1000000)), oops]] end, L2BTrap), + error_after_yield(badarg, erlang, list_to_bitstring, 1, fun () -> [[list2bitstrlist(mk_list(1000000)), oops]] end, L2BTrap), + error_after_yield(badarg, binary, list_to_bin, 1, fun () -> [[mk_list(1000000), oops]] end, L2BTrap), + + B2TTrap = {erts_internal, binary_to_term_trap, 1}, + + error_after_yield(badarg, erlang, binary_to_term, 1, fun () -> [error_after_yield_bad_ext_term()] end, B2TTrap), + error_after_yield(badarg, erlang, binary_to_term, 2, fun () -> [error_after_yield_bad_ext_term(), [safe]] end, B2TTrap), + + case erlang:system_info(wordsize) of + 4 -> + SysLimitSz = 1 bsl 32, + error_after_yield(system_limit, erlang, list_to_binary, 1, fun () -> [[huge_iolist(SysLimitSz), $x]] end, L2BTrap), + error_after_yield(system_limit, erlang, iolist_to_binary, 1, fun () -> [[huge_iolist(SysLimitSz), $x]] end, L2BTrap), + error_after_yield(system_limit, erlang, list_to_bitstring, 1, fun () -> [[huge_iolist(SysLimitSz), $x]] end, L2BTrap), + error_after_yield(system_limit, binary, list_to_bin, 1, fun () -> [[huge_iolist(SysLimitSz), $x]] end, L2BTrap); + 8 -> + % Takes waaaay to long time to test system_limit on 64-bit archs... + ok + end, + ok. + +error_after_yield(Type, M, F, AN, AFun, TrapFunc) -> + io:format("Testing ~p for ~p:~p/~p~n", [Type, M, F, AN]), + Tracer = self(), + {Pid, Mon} = spawn_monitor(fun () -> + A = AFun(), + try + erlang:yield(), + erlang:trace(self(),true,[running,{tracer,Tracer}]), + apply(M, F, A), + exit({unexpected_success, {M, F, A}}) + catch + error:Type -> + erlang:trace(self(),false,[running,{tracer,Tracer}]), + %% We threw the exception from the native + %% function we trapped to, but we want + %% the BIF that originally was called + %% to appear in the stack trace. + [{M, F, A, _} | _] = erlang:get_stacktrace() + end + end), + receive + {'DOWN', Mon, process, Pid, Reason} -> + normal = Reason + end, + TD = erlang:trace_delivered(Pid), + receive + {trace_delivered, Pid, TD} -> + NoYields = error_after_yield_sched(Pid, TrapFunc, 0), + io:format("No of yields: ~p~n", [NoYields]), + true = NoYields > 2 + end, + ok. + +error_after_yield_sched(P, TrapFunc, N) -> + receive + {trace, P, out, TrapFunc} -> + receive + {trace, P, in, TrapFunc} -> + error_after_yield_sched(P, TrapFunc, N+1) + after 0 -> + exit(trap_sched_mismatch) + end; + {trace, P, out, Func} -> + receive + {trace, P, in, Func} -> + error_after_yield_sched(P, TrapFunc, N) + after 0 -> + exit(other_sched_mismatch) + end + after 0 -> + N + end. + +error_after_yield_bad_ext_term() -> + TupleSz = 2000000, + <<131, % Version magic + AtomExt/binary>> = term_to_binary(an_atom_we_use_for_this), + BadAtomExt = [100, %% ATOM_EXT + 255, 255, % Invalid size of 65535 bytes + "oops"], + + %% Produce a large tuple where the last element is invalid + list_to_binary([131, %% Version magic + 105, %% LARGE_TUPLE_EXT + <<TupleSz:32/big>>, %% Tuple size + lists:duplicate(TupleSz-1, AtomExt), %% Valid atoms + BadAtomExt]). %% Invalid atom at the end + +cmp_old_impl(Config) when is_list(Config) -> + %% Compare results from new yielding implementations with + %% old non yielding implementations + Cookie = atom_to_list(erlang:get_cookie()), + Rel = "r16b_latest", + case test_server:is_release_available(Rel) of + false -> + {skipped, "No "++Rel++" available"}; + true -> + {ok, Node} = ?t:start_node(list_to_atom(atom_to_list(?MODULE)++"_"++Rel), + peer, + [{args, " -setcookie "++Cookie}, + {erl, [{release, Rel}]}]), + + cmp_node(Node, {erlang, list_to_binary, [list2iolist(mk_list(1))]}), + cmp_node(Node, {erlang, list_to_binary, [list2iolist(mk_list(10))]}), + cmp_node(Node, {erlang, list_to_binary, [list2iolist(mk_list(100))]}), + cmp_node(Node, {erlang, list_to_binary, [list2iolist(mk_list(1000))]}), + cmp_node(Node, {erlang, list_to_binary, [list2iolist(mk_list(10000))]}), + cmp_node(Node, {erlang, list_to_binary, [list2iolist(mk_list(100000))]}), + cmp_node(Node, {erlang, list_to_binary, [list2iolist(mk_list(1000000))]}), + cmp_node(Node, {erlang, list_to_binary, [list2iolist(mk_list(10000000))]}), + cmp_node(Node, {erlang, list_to_binary, [list2iolist(mk_list_lb(10000000))]}), + + cmp_node(Node, {erlang, binary_to_list, [list_to_binary(mk_list(1))]}), + cmp_node(Node, {erlang, binary_to_list, [list_to_binary(mk_list(10))]}), + cmp_node(Node, {erlang, binary_to_list, [list_to_binary(mk_list(100))]}), + cmp_node(Node, {erlang, binary_to_list, [list_to_binary(mk_list(1000))]}), + cmp_node(Node, {erlang, binary_to_list, [list_to_binary(mk_list(10000))]}), + cmp_node(Node, {erlang, binary_to_list, [list_to_binary(mk_list(100000))]}), + cmp_node(Node, {erlang, binary_to_list, [list_to_binary(mk_list(1000000))]}), + cmp_node(Node, {erlang, binary_to_list, [list_to_binary(mk_list(10000000))]}), + + cmp_node(Node, {erlang, list_to_bitstring, [list2bitstrlist(mk_list(1))]}), + cmp_node(Node, {erlang, list_to_bitstring, [list2bitstrlist(mk_list(10))]}), + cmp_node(Node, {erlang, list_to_bitstring, [list2bitstrlist(mk_list(100))]}), + cmp_node(Node, {erlang, list_to_bitstring, [list2bitstrlist(mk_list(1000))]}), + cmp_node(Node, {erlang, list_to_bitstring, [list2bitstrlist(mk_list(10000))]}), + cmp_node(Node, {erlang, list_to_bitstring, [list2bitstrlist(mk_list(100000))]}), + cmp_node(Node, {erlang, list_to_bitstring, [list2bitstrlist(mk_list(1000000))]}), + cmp_node(Node, {erlang, list_to_bitstring, [list2bitstrlist(mk_list(10000000))]}), + + cmp_node(Node, {erlang, bitstring_to_list, [list_to_bitstring(list2bitstrlist(mk_list(1)))]}), + cmp_node(Node, {erlang, bitstring_to_list, [list_to_bitstring(list2bitstrlist(mk_list(10)))]}), + cmp_node(Node, {erlang, bitstring_to_list, [list_to_bitstring(list2bitstrlist(mk_list(100)))]}), + cmp_node(Node, {erlang, bitstring_to_list, [list_to_bitstring(list2bitstrlist(mk_list(1000)))]}), + cmp_node(Node, {erlang, bitstring_to_list, [list_to_bitstring(list2bitstrlist(mk_list(10000)))]}), + cmp_node(Node, {erlang, bitstring_to_list, [list_to_bitstring(list2bitstrlist(mk_list(100000)))]}), + cmp_node(Node, {erlang, bitstring_to_list, [list_to_bitstring(list2bitstrlist(mk_list(1000000)))]}), + cmp_node(Node, {erlang, bitstring_to_list, [list_to_bitstring(list2bitstrlist(mk_list(10000000)))]}), + + ?t:stop_node(Node), + + ok + end. %% Utilities. +huge_iolist(Lim) -> + Sz = 1024, + huge_iolist(list_to_binary(mk_list(Sz)), Sz, Lim). + +huge_iolist(X, Sz, Lim) when Sz >= Lim -> + X; +huge_iolist(X, Sz, Lim) -> + huge_iolist([X, X], Sz*2, Lim). + +cmp_node(Node, {M, F, A}) -> + Res = rpc:call(Node, M, F, A), + Res = apply(M, F, A), + ok. + make_sub_binary(Bin) when is_binary(Bin) -> {_,B} = split_binary(list_to_binary([0,1,3,Bin]), 3), B; @@ -1467,3 +1658,78 @@ get_reds() -> erts_debug:set_internal_state(available_internal_state, true), get_reds() end. + +-define(LARGE_BIN, (512*1024+10)). +-define(LARGE_BIN_LIM, (1024*1024)). + +mk_list(0, Acc) -> + Acc; +mk_list(Sz, Acc) -> + mk_list(Sz-1, [$A+(Sz band 63) | Acc]). + +mk_list(Sz) when Sz >= ?LARGE_BIN_LIM -> + SzLeft = Sz - ?LARGE_BIN, + SzHd = SzLeft div 2, + SzTl = SzLeft - SzHd, + [mk_list(SzHd, []), erlang:list_to_binary(mk_list(?LARGE_BIN, [])), mk_list(SzTl, [])]; +mk_list(Sz) -> + mk_list(Sz, []). + +mk_list_lb(Sz) when Sz >= ?LARGE_BIN_LIM -> + SzLeft = Sz - ?LARGE_BIN, + SzHd = SzLeft div 2, + SzTl = SzLeft - SzHd, + [mk_list(SzHd, []), erlang:list_to_binary(mk_list(?LARGE_BIN, [])), mk_list(SzTl, [])]; +mk_list_lb(Sz) -> + mk_list(Sz, []). + + +list2iolist(List) -> + list2iolist(List, []). + +list2iolist([], Acc) -> + Acc; +list2iolist([X0, X1, X2, X3, X4, X5 | Xs], Acc) when is_integer(X0), 0 =< X0, X0 < 256, + is_integer(X1), 0 =< X1, X1 < 256, + is_integer(X2), 0 =< X2, X2 < 256, + is_integer(X3), 0 =< X3, X3 < 256, + is_integer(X4), 0 =< X4, X4 < 256, + is_integer(X5), 0 =< X5, X5 < 256 -> + NewAcc = case (X0+X1+X2+X3+X4+X5) band 3 of + 0 -> + [Acc, [[[[[[[[[[[[X0,[],<<"">>,X1]]]]]]]]],[X2,X3]],[],[],[],[],X4],X5]]; + 1 -> + [Acc, [], erlang:list_to_binary([X0, X1, X2, X3, X4, X5])]; + 2 -> + [Acc, [[[[X0|erlang:list_to_binary([X1])],[X2|erlang:list_to_binary([X3])],[X4|erlang:list_to_binary([X5])]]]|<<"">>]]; + 3 -> + [Acc, X0, X1, X2, <<"">>, [], X3, X4 | erlang:list_to_binary([X5])] + end, + list2iolist(Xs, NewAcc); +list2iolist([X | Xs], Acc) -> + list2iolist(Xs, [Acc,X]). + +list2bitstrlist(List) -> + [list2bitstrlist(List, []), <<4:7>>]. + +list2bitstrlist([], Acc) -> + Acc; +list2bitstrlist([X0, X1, X2, X3, X4, X5 | Xs], Acc) when is_integer(X0), 0 =< X0, X0 < 256, + is_integer(X1), 0 =< X1, X1 < 256, + is_integer(X2), 0 =< X2, X2 < 256, + is_integer(X3), 0 =< X3, X3 < 256, + is_integer(X4), 0 =< X4, X4 < 256, + is_integer(X5), 0 =< X5, X5 < 256 -> + NewAcc = case (X0+X1+X2+X3+X4+X5) band 3 of + 0 -> + [Acc, [[[[[[[[[[[[X0,[],<<"">>,X1]]]]]]]]],[X2,X3]],[],[],[],[],X4],X5]]; + 1 -> + [Acc, [], <<X0:X1>>, <<X2:X3>>, <<X4:X5>>]; + 2 -> + [Acc, [[[[X0|<<X1:X2>>],X3]],[X4|erlang:list_to_binary([X5])]|<<"">>]]; + 3 -> + [Acc, X0, X1, X2, <<"">>, [], X3, X4 | erlang:list_to_binary([X5])] + end, + list2bitstrlist(Xs, NewAcc); +list2bitstrlist([X | Xs], Acc) -> + list2bitstrlist(Xs, [Acc,X]). diff --git a/erts/emulator/test/busy_port_SUITE.erl b/erts/emulator/test/busy_port_SUITE.erl index 4b4af0babe..2ed5aaa0d0 100644 --- a/erts/emulator/test/busy_port_SUITE.erl +++ b/erts/emulator/test/busy_port_SUITE.erl @@ -98,8 +98,10 @@ generator(0, Writer, _Data) -> %% Calling process_info(Pid, current_function) on a suspended process %% used to crash Beam. - {current_function, {erlang, send, 2}} = - process_info(Writer, current_function), + case process_info(Writer, [status,current_function]) of + [{status,suspended},{current_function,{erlang,send,2}}] -> ok; + [{status,suspended},{current_function,{erlang,bif_return_trap,_}}] -> ok + end, unlock_slave(); generator(N, Writer, Data) -> Writer ! {exec, Data}, diff --git a/erts/emulator/test/driver_SUITE.erl b/erts/emulator/test/driver_SUITE.erl index c62bc0c454..336b6188f6 100644 --- a/erts/emulator/test/driver_SUITE.erl +++ b/erts/emulator/test/driver_SUITE.erl @@ -1062,10 +1062,9 @@ otp_6602(Config) when is_list(Config) -> %% Inet driver use port locking... {ok, S} = gen_udp:open(0), {ok, Fd} = inet:getfd(S), - {ok, Port} = inet:port(S), %% Steal fd (lock checker used to %% trigger here). - {ok, _S2} = gen_udp:open(Port,[{fd,Fd}]), + {ok, _S2} = gen_udp:open(0,[{fd,Fd}]), Parent ! Done end), ?line receive Done -> ok end, @@ -1085,7 +1084,15 @@ otp_6602(Config) when is_list(Config) -> ["async_thrs", "sched_thrs"])). --define(EXPECTED_SYSTEM_INFO_NAMES, ?EXPECTED_SYSTEM_INFO_NAMES2). +-define(EXPECTED_SYSTEM_INFO_NAMES3, + (?EXPECTED_SYSTEM_INFO_NAMES2 ++ + ["emu_nif_vsn"])). + +-define(EXPECTED_SYSTEM_INFO_NAMES4, + (?EXPECTED_SYSTEM_INFO_NAMES3 ++ + ["dirty_sched"])). + +-define(EXPECTED_SYSTEM_INFO_NAMES, ?EXPECTED_SYSTEM_INFO_NAMES4). 'driver_system_info_base_ver'(doc) -> []; @@ -1133,16 +1140,18 @@ check_driver_system_info_result(Result) -> drv_vsn_str2tup(erlang:system_info(driver_version))} of {DDVSN, DDVSN} -> ?line [] = Ns; - {{1, 0}, _} -> + %% {{1, 0}, _} -> + %% ?line ExpNs = lists:sort(?EXPECTED_SYSTEM_INFO_NAMES + %% -- ?EXPECTED_SYSTEM_INFO_NAMES1), + %% ?line ExpNs = lists:sort(Ns); + %% {{1, 1}, _} -> + %% ?line ExpNs = lists:sort(?EXPECTED_SYSTEM_INFO_NAMES + %% -- ?EXPECTED_SYSTEM_INFO_NAMES2), + %% ?line ExpNs = lists:sort(Ns); + {{3, 0}, _} -> ?line ExpNs = lists:sort(?EXPECTED_SYSTEM_INFO_NAMES - -- ?EXPECTED_SYSTEM_INFO_NAMES1), - ?line ExpNs = lists:sort(Ns); - {{1, 1}, _} -> - ?line ExpNs = lists:sort(?EXPECTED_SYSTEM_INFO_NAMES - -- ?EXPECTED_SYSTEM_INFO_NAMES2), - ?line ExpNs = lists:sort(Ns); - {{2, 0}, _} -> - ?line [] = Ns + -- ?EXPECTED_SYSTEM_INFO_NAMES3), + ?line ExpNs = lists:sort(Ns) end. chk_sis(SIs, Ns) -> @@ -1189,6 +1198,14 @@ check_si_res(["async_thrs", Value]) -> check_si_res(["sched_thrs", Value]) -> ?line Value = integer_to_list(erlang:system_info(schedulers)); +%% Data added in 3rd version of driver_system_info() (driver version 1.5) +check_si_res(["emu_nif_vsn", _Value]) -> + true; + +%% Data added in 4th version of driver_system_info() (driver version 3.1) +check_si_res(["dirty_sched", _Value]) -> + true; + check_si_res(Unexpected) -> ?line ?t:fail({unexpected_result, Unexpected}). diff --git a/erts/emulator/test/driver_SUITE_data/sys_info_base_drv.c b/erts/emulator/test/driver_SUITE_data/sys_info_base_drv.c index e44c7dbd5e..964034f5a6 100644 --- a/erts/emulator/test/driver_SUITE_data/sys_info_base_drv.c +++ b/erts/emulator/test/driver_SUITE_data/sys_info_base_drv.c @@ -41,7 +41,9 @@ "thread=%s " \ "smp=%s " \ "async_thrs=%d " \ - "sched_thrs=%d" + "sched_thrs=%d " \ + "emu_nif_vsn=%d.%d" + static size_t sys_info_drv_max_res_len(ErlDrvSysInfo *sip) @@ -55,6 +57,7 @@ sys_info_drv_max_res_len(ErlDrvSysInfo *sip) slen += 5; /* smp */ slen += 20; /* async_thrs */ slen += 20; /* sched_thrs */ + slen += 2*20; /* emu_nif_vsn */ return slen; } @@ -72,7 +75,9 @@ sys_info_drv_sprintf_sys_info(ErlDrvSysInfo *sip, char *str) sip->thread_support ? "true" : "false", sip->smp_support ? "true" : "false", sip->async_threads, - sip->scheduler_threads); + sip->scheduler_threads, + sip->nif_major_version, + sip->nif_minor_version); } #include "sys_info_drv_impl.c" diff --git a/erts/emulator/test/driver_SUITE_data/sys_info_curr_drv.c b/erts/emulator/test/driver_SUITE_data/sys_info_curr_drv.c index 5bbc966932..6d2c47fdaf 100644 --- a/erts/emulator/test/driver_SUITE_data/sys_info_curr_drv.c +++ b/erts/emulator/test/driver_SUITE_data/sys_info_curr_drv.c @@ -40,7 +40,9 @@ "thread=%s " \ "smp=%s " \ "async_thrs=%d " \ - "sched_thrs=%d" + "sched_thrs=%d " \ + "emu_nif_vsn=%d.%d " \ + "dirty_sched=%s" static size_t sys_info_drv_max_res_len(ErlDrvSysInfo *sip) @@ -54,6 +56,8 @@ sys_info_drv_max_res_len(ErlDrvSysInfo *sip) slen += 5; /* smp */ slen += 20; /* async_thrs */ slen += 20; /* sched_thrs */ + slen += 2*20; /* emu_nif_vsn */ + slen += 5; /* dirty_sched */ return slen; } @@ -71,7 +75,10 @@ sys_info_drv_sprintf_sys_info(ErlDrvSysInfo *sip, char *str) sip->thread_support ? "true" : "false", sip->smp_support ? "true" : "false", sip->async_threads, - sip->scheduler_threads); + sip->scheduler_threads, + sip->nif_major_version, + sip->nif_minor_version, + sip->dirty_scheduler_support ? "true" : "false"); } #include "sys_info_drv_impl.c" diff --git a/erts/emulator/test/driver_SUITE_data/sys_info_prev_drv.c b/erts/emulator/test/driver_SUITE_data/sys_info_prev_drv.c index 63c69f751c..2271d7027b 100644 --- a/erts/emulator/test/driver_SUITE_data/sys_info_prev_drv.c +++ b/erts/emulator/test/driver_SUITE_data/sys_info_prev_drv.c @@ -41,7 +41,9 @@ "thread=%s " \ "smp=%s " \ "async_thrs=%d " \ - "sched_thrs=%d" + "sched_thrs=%d " \ + "emu_nif_vsn=%d.%d" + static size_t sys_info_drv_max_res_len(ErlDrvSysInfo *sip) @@ -55,6 +57,7 @@ sys_info_drv_max_res_len(ErlDrvSysInfo *sip) slen += 5; /* smp */ slen += 20; /* async_thrs */ slen += 20; /* sched_thrs */ + slen += 2*20; /* emu_nif_vsn */ return slen; } @@ -72,7 +75,9 @@ sys_info_drv_sprintf_sys_info(ErlDrvSysInfo *sip, char *str) sip->thread_support ? "true" : "false", sip->smp_support ? "true" : "false", sip->async_threads, - sip->scheduler_threads); + sip->scheduler_threads, + sip->nif_major_version, + sip->nif_minor_version); } #include "sys_info_drv_impl.c" diff --git a/erts/emulator/test/fun_SUITE.erl b/erts/emulator/test/fun_SUITE.erl index 8ad5f290ed..2968f5bebb 100644 --- a/erts/emulator/test/fun_SUITE.erl +++ b/erts/emulator/test/fun_SUITE.erl @@ -30,7 +30,7 @@ fun_to_port/1,t_hash/1,t_phash/1,t_phash2/1,md5/1, refc/1,refc_ets/1,refc_dist/1, const_propagation/1,t_arity/1,t_is_function2/1, - t_fun_info/1]). + t_fun_info/1,t_fun_info_mfa/1]). -export([nothing/0]). @@ -42,7 +42,8 @@ all() -> [bad_apply, bad_fun_call, badarity, ext_badarity, equality, ordering, fun_to_port, t_hash, t_phash, t_phash2, md5, refc, refc_ets, refc_dist, - const_propagation, t_arity, t_is_function2, t_fun_info]. + const_propagation, t_arity, t_is_function2, t_fun_info, + t_fun_info_mfa]. groups() -> []. @@ -824,6 +825,24 @@ t_fun_info(Config) when is_list(Config) -> ?line bad_info(<<1,2>>), ok. +t_fun_info_mfa(Config) when is_list(Config) -> + Fun1 = fun spawn_call/2, + {module,M1} = erlang:fun_info(Fun1, module), + {name,F1} = erlang:fun_info(Fun1, name), + {arity,A1} = erlang:fun_info(Fun1, arity), + {M1,F1,A1=2} = erlang:fun_info_mfa(Fun1), + %% Module fun. + Fun2 = fun ?MODULE:t_fun_info/1, + {module,M2} = erlang:fun_info(Fun2, module), + {name,F2} = erlang:fun_info(Fun2, name), + {arity,A2} = erlang:fun_info(Fun2, arity), + {M2,F2,A2=1} = erlang:fun_info_mfa(Fun2), + + %% Not fun. + {'EXIT',_} = (catch erlang:fun_info_mfa(id(d))), + ok. + + bad_info(Term) -> try erlang:fun_info(Term, module) of Any -> diff --git a/erts/emulator/test/nif_SUITE.erl b/erts/emulator/test/nif_SUITE.erl index b2da6f58af..14e6585220 100644 --- a/erts/emulator/test/nif_SUITE.erl +++ b/erts/emulator/test/nif_SUITE.erl @@ -37,7 +37,9 @@ threading/1, send/1, send2/1, send3/1, send_threaded/1, neg/1, is_checks/1, get_length/1, make_atom/1, make_string/1, reverse_list_test/1, - otp_9668/1, consume_timeslice/1, dirty_nif/1, dirty_nif_send/1 + otp_9828/1, + otp_9668/1, consume_timeslice/1, dirty_nif/1, dirty_nif_send/1, + dirty_nif_exception/1, nif_schedule/1 ]). -export([many_args_100/100]). @@ -64,7 +66,9 @@ all() -> resource_takeover, threading, send, send2, send3, send_threaded, neg, is_checks, get_length, make_atom, make_string,reverse_list_test, - otp_9668, consume_timeslice, dirty_nif, dirty_nif_send + otp_9828, + otp_9668, consume_timeslice, + nif_schedule, dirty_nif, dirty_nif_send, dirty_nif_exception ]. groups() -> @@ -1440,6 +1444,20 @@ otp_9668(Config) -> ?line verify_tmpmem(TmpMem), ok. +otp_9828(doc) -> ["Copy of writable binary"]; +otp_9828(Config) -> + ensure_lib_loaded(Config, 1), + + otp_9828_loop(<<"I'm alive!">>, 1000). + +otp_9828_loop(Bin, 0) -> + ok; +otp_9828_loop(Bin, Val) -> + WrtBin = <<Bin/binary, Val:32>>, + ok = otp_9828_nif(WrtBin), + otp_9828_loop(WrtBin, Val-1). + + consume_timeslice(Config) when is_list(Config) -> CONTEXT_REDS = 2000, Me = self(), @@ -1524,6 +1542,20 @@ consume_timeslice(Config) when is_list(Config) -> ok. +nif_schedule(Config) when is_list(Config) -> + ensure_lib_loaded(Config), + A = "this is a string", + B = {this,is,a,tuple}, + {B,A} = call_nif_schedule(A, B), + ok = try call_nif_schedule(1, 2) + catch + error:badarg -> + [{?MODULE,call_nif_schedule,[1,2],_}|_] = + erlang:get_stacktrace(), + ok + end, + ok. + dirty_nif(Config) when is_list(Config) -> try erlang:system_info(dirty_cpu_schedulers) of N when is_integer(N) -> @@ -1556,6 +1588,24 @@ dirty_nif_send(Config) when is_list(Config) -> {skipped,"No dirty scheduler support"} end. +dirty_nif_exception(Config) when is_list(Config) -> + try erlang:system_info(dirty_cpu_schedulers) of + N when is_integer(N) -> + ensure_lib_loaded(Config), + try + call_dirty_nif_exception(), + ?t:fail(expected_badarg) + catch + error:badarg -> + [{?MODULE,call_dirty_nif_exception,[],_}|_] = + erlang:get_stacktrace(), + ok + end + catch + error:badarg -> + {skipped,"No dirty scheduler support"} + end. + next_msg(_Pid) -> receive M -> M @@ -1684,9 +1734,12 @@ reverse_list(_) -> ?nif_stub. echo_int(_) -> ?nif_stub. type_sizes() -> ?nif_stub. otp_9668_nif(_) -> ?nif_stub. +otp_9828_nif(_) -> ?nif_stub. consume_timeslice_nif(_,_) -> ?nif_stub. +call_nif_schedule(_,_) -> ?nif_stub. call_dirty_nif(_,_,_) -> ?nif_stub. send_from_dirty_nif(_) -> ?nif_stub. +call_dirty_nif_exception() -> ?nif_stub. %% maps is_map_nif(_) -> ?nif_stub. diff --git a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c index 955dc64189..291c903947 100644 --- a/erts/emulator/test/nif_SUITE_data/nif_SUITE.c +++ b/erts/emulator/test/nif_SUITE_data/nif_SUITE.c @@ -1473,6 +1473,26 @@ static ERL_NIF_TERM otp_9668_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM ar return atom_ok; } +static ERL_NIF_TERM otp_9828_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +{ + /* copy a writable binary could reallocate it due to "emasculation" + and thereby render a previous inspection invalid. + */ + ErlNifBinary bin1; + ErlNifEnv* myenv; + + if (!enif_inspect_binary(env, argv[0], &bin1)) { + return enif_make_badarg(env); + } + + myenv = enif_alloc_env(); + enif_make_copy(myenv, argv[0]); + enif_free_env(myenv); + + return memcmp(bin1.data, "I'm alive!", 10)==0 ? atom_ok : atom_false; +} + + static ERL_NIF_TERM consume_timeslice_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { int percent; @@ -1493,25 +1513,57 @@ static ERL_NIF_TERM consume_timeslice_nif(ErlNifEnv* env, int argc, const ERL_NI } } +static ERL_NIF_TERM nif_sched2(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +{ + char s[64]; + if (!enif_get_string(env, argv[2], s, sizeof s, ERL_NIF_LATIN1)) + return enif_make_badarg(env); + return enif_make_tuple2(env, argv[3], argv[2]); +} + +static ERL_NIF_TERM nif_sched1(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +{ + ERL_NIF_TERM new_argv[4]; + new_argv[0] = enif_make_atom(env, "garbage0"); + new_argv[1] = enif_make_atom(env, "garbage1"); + new_argv[2] = argv[0]; + new_argv[3] = argv[1]; + return enif_schedule_nif(env, "nif_sched2", 0, nif_sched2, 4, new_argv); +} + +static ERL_NIF_TERM call_nif_schedule(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +{ + if (argc != 2) + return enif_make_atom(env, "false"); + return enif_schedule_nif(env, "nif_sched1", 0, nif_sched1, argc, argv); +} + #ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT + +static int have_dirty_schedulers(void) +{ + ErlNifSysInfo si; + enif_system_info(&si, sizeof(si)); + return si.dirty_scheduler_support; +} + static ERL_NIF_TERM dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { int n; char s[10]; ErlNifBinary b; ERL_NIF_TERM result; - if (enif_have_dirty_schedulers()) { + if (have_dirty_schedulers()) { assert(enif_is_on_dirty_scheduler(env)); } assert(argc == 3); enif_get_int(env, argv[0], &n); enif_get_string(env, argv[1], s, sizeof s, ERL_NIF_LATIN1); enif_inspect_binary(env, argv[2], &b); - result = enif_make_tuple3(env, - enif_make_int(env, n), - enif_make_string(env, s, ERL_NIF_LATIN1), - enif_make_binary(env, &b)); - return enif_schedule_dirty_nif_finalizer(env, result, enif_dirty_nif_finalizer); + return enif_make_tuple3(env, + enif_make_int(env, n), + enif_make_string(env, s, ERL_NIF_LATIN1), + enif_make_binary(env, &b)); } static ERL_NIF_TERM call_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) @@ -1522,11 +1574,11 @@ static ERL_NIF_TERM call_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM assert(!enif_is_on_dirty_scheduler(env)); if (argc != 3) return enif_make_badarg(env); - if (enif_have_dirty_schedulers()) { + if (have_dirty_schedulers()) { if (enif_get_int(env, argv[0], &n) && enif_get_string(env, argv[1], s, sizeof s, ERL_NIF_LATIN1) && enif_inspect_binary(env, argv[2], &b)) - return enif_schedule_dirty_nif(env, ERL_NIF_DIRTY_JOB_CPU_BOUND, dirty_nif, argc, argv); + return enif_schedule_nif(env, "call_dirty_nif", ERL_NIF_DIRTY_JOB_CPU_BOUND, dirty_nif, argc, argv); else return enif_make_badarg(env); } else { @@ -1534,35 +1586,42 @@ static ERL_NIF_TERM call_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM } } -static ERL_NIF_TERM dirty_sender(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +static ERL_NIF_TERM send_from_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { ERL_NIF_TERM result; ErlNifPid pid; ErlNifEnv* menv; int res; - enif_get_local_pid(env, argv[0], &pid); + if (!enif_get_local_pid(env, argv[0], &pid)) + return enif_make_badarg(env); result = enif_make_tuple2(env, enif_make_atom(env, "ok"), enif_make_pid(env, &pid)); menv = enif_alloc_env(); res = enif_send(env, &pid, menv, result); enif_free_env(menv); if (!res) - /* Note the next line will crash, since dirty nifs can't return exceptions. - * This is intentional, since enif_send should not fail if the test succeeds. - */ - return enif_schedule_dirty_nif_finalizer(env, enif_make_badarg(env), enif_dirty_nif_finalizer); + return enif_make_badarg(env); else - return enif_schedule_dirty_nif_finalizer(env, result, enif_dirty_nif_finalizer); + return result; } -static ERL_NIF_TERM send_from_dirty_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +static ERL_NIF_TERM call_dirty_nif_exception(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { - ERL_NIF_TERM result; - ErlNifPid pid; - - if (!enif_get_local_pid(env, argv[0], &pid)) + switch (argc) { + case 0: { + ERL_NIF_TERM args[255]; + int i; + for (i = 0; i < 255; i++) + args[i] = enif_make_int(env, i); + return enif_schedule_nif(env, "call_dirty_nif_exception", ERL_NIF_DIRTY_JOB_CPU_BOUND, + call_dirty_nif_exception, 255, argv); + } + case 1: return enif_make_badarg(env); - return enif_schedule_dirty_nif(env, ERL_NIF_DIRTY_JOB_CPU_BOUND, dirty_sender, argc, argv); + default: + return enif_schedule_nif(env, "call_dirty_nif_exception", ERL_NIF_DIRTY_JOB_CPU_BOUND, + call_dirty_nif_exception, argc-1, argv); + } } #endif @@ -1741,10 +1800,13 @@ static ErlNifFunc nif_funcs[] = {"echo_int", 1, echo_int}, {"type_sizes", 0, type_sizes}, {"otp_9668_nif", 1, otp_9668_nif}, + {"otp_9828_nif", 1, otp_9828_nif}, {"consume_timeslice_nif", 2, consume_timeslice_nif}, + {"call_nif_schedule", 2, call_nif_schedule}, #ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT {"call_dirty_nif", 3, call_dirty_nif}, - {"send_from_dirty_nif", 1, send_from_dirty_nif}, + {"send_from_dirty_nif", 1, send_from_dirty_nif, ERL_NIF_DIRTY_JOB_CPU_BOUND}, + {"call_dirty_nif_exception", 0, call_dirty_nif_exception, ERL_NIF_DIRTY_JOB_IO_BOUND}, #endif {"is_map_nif", 1, is_map_nif}, {"get_map_size_nif", 1, get_map_size_nif}, diff --git a/erts/emulator/test/num_bif_SUITE.erl b/erts/emulator/test/num_bif_SUITE.erl index ff8d18eef8..8cf8377c30 100644 --- a/erts/emulator/test/num_bif_SUITE.erl +++ b/erts/emulator/test/num_bif_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2013. All Rights Reserved. +%% Copyright Ericsson AB 1997-2014. All Rights Reserved. %% %% The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in @@ -394,18 +394,15 @@ t_string_to_integer(Config) when is_list(Config) -> test_sti(268435455), test_sti(-268435455), - %% 1 bsl 28 - 1, just before 32 bit bignum - test_sti(1 bsl 28 - 1), - %% 1 bsl 28, just beyond 32 bit small - test_sti(1 bsl 28), - %% 1 bsl 33, just beyond 32 bit - test_sti(1 bsl 33), - %% 1 bsl 60 - 1, just before 64 bit bignum - test_sti(1 bsl 60 - 1), - %% 1 bsl 60, just beyond 64 bit small - test_sti(1 bsl 60), - %% 1 bsl 65, just beyond 64 bit - test_sti(1 bsl 65), + % Interesting values around 2-pows, such as MIN_SMALL and MAX_SMALL. + lists:foreach(fun(Bits) -> + N = 1 bsl Bits, + test_sti(N - 1), + test_sti(N), + test_sti(N + 1) + end, + lists:seq(16, 130)), + %% Bignums. test_sti(123456932798748738738,16), test_sti(list_to_integer(lists:duplicate(2000, $1))), @@ -454,10 +451,11 @@ test_sti(Num) -> end|| Base <- lists:seq(2,36)]. test_sti(Num,Base) -> - Num = list_to_integer(int2list(Num,Base),Base), - Num = -1*list_to_integer(int2list(Num*-1,Base),Base), - Num = binary_to_integer(int2bin(Num,Base),Base), - Num = -1*binary_to_integer(int2bin(Num*-1,Base),Base). + Neg = -Num, + Num = list_to_integer(int2list(Num,Base),Base), + Neg = list_to_integer(int2list(Num*-1,Base),Base), + Num = binary_to_integer(int2bin(Num,Base),Base), + Neg = binary_to_integer(int2bin(Num*-1,Base),Base). % Calling this function (which is not supposed to be inlined) prevents % the compiler from calculating the answer, so we don't test the compiler diff --git a/erts/emulator/test/system_info_SUITE.erl b/erts/emulator/test/system_info_SUITE.erl index ceb4afb5cf..f959714be7 100644 --- a/erts/emulator/test/system_info_SUITE.erl +++ b/erts/emulator/test/system_info_SUITE.erl @@ -155,6 +155,7 @@ misc_smoke_tests(Config) when is_list(Config) -> ?line true = is_binary(erlang:system_info(loaded)), ?line true = is_binary(erlang:system_info(dist)), ?line ok = try erlang:system_info({cpu_topology,erts_get_cpu_topology_error_case}), fail catch error:badarg -> ok end, + true = lists:member(erlang:system_info(tolerant_timeofday), [enabled, disabled]), ?line ok. diff --git a/erts/emulator/test/trace_SUITE.erl b/erts/emulator/test/trace_SUITE.erl index 2251575e5a..4d7598cf1f 100644 --- a/erts/emulator/test/trace_SUITE.erl +++ b/erts/emulator/test/trace_SUITE.erl @@ -181,6 +181,13 @@ send_trace(Config) when is_list(Config) -> ?line {trace, Sender, send, to_receiver, Receiver} = receive_first(), ?line receive_nothing(), + %% Check that a message sent to another registered process is traced. + register(?MODULE,Receiver), + Sender ! {send_please, ?MODULE, to_receiver}, + {trace, Sender, send, to_receiver, ?MODULE} = receive_first(), + receive_nothing(), + unregister(?MODULE), + %% Check that a message sent to this process is traced. ?line Sender ! {send_please, self(), to_myself}, ?line receive to_myself -> ok end, @@ -188,6 +195,21 @@ send_trace(Config) when is_list(Config) -> ?line {trace, Sender, send, to_myself, Self} = receive_first(), ?line receive_nothing(), + %% Check that a message sent to dead process is traced. + {Pid,Ref} = spawn_monitor(fun() -> ok end), + receive {'DOWN',Ref,_,_,_} -> ok end, + Sender ! {send_please, Pid, to_dead}, + {trace, Sender, send_to_non_existing_process, to_dead, Pid} = receive_first(), + receive_nothing(), + + %% Check that a message sent to unknown registrated process is traced. + BadargSender = fun_spawn(fun sender/0), + 1 = erlang:trace(BadargSender, true, [send]), + unlink(BadargSender), + BadargSender ! {send_please, not_registered, to_unknown}, + {trace, BadargSender, send, to_unknown, not_registered} = receive_first(), + receive_nothing(), + %% Another process should not be able to trace Sender. ?line Intruder = fun_spawn(fun() -> erlang:trace(Sender, true, [send]) end), ?line {'EXIT', Intruder, {badarg, _}} = receive_first(), diff --git a/erts/emulator/test/tuple_SUITE.erl b/erts/emulator/test/tuple_SUITE.erl index 46ece41096..f627eea07f 100644 --- a/erts/emulator/test/tuple_SUITE.erl +++ b/erts/emulator/test/tuple_SUITE.erl @@ -21,8 +21,9 @@ init_per_group/2,end_per_group/2, t_size/1, t_tuple_size/1, t_element/1, t_setelement/1, t_insert_element/1, t_delete_element/1, - t_list_to_tuple/1, t_tuple_to_list/1, - t_make_tuple_2/1, t_make_tuple_3/1, t_append_element/1, + t_list_to_tuple/1, t_list_to_upper_boundry_tuple/1, t_tuple_to_list/1, + t_make_tuple_2/1, t_make_upper_boundry_tuple_2/1, t_make_tuple_3/1, + t_append_element/1, t_append_element_upper_boundry/1, build_and_match/1, tuple_with_case/1, tuple_in_guard/1]). -include_lib("test_server/include/test_server.hrl"). @@ -40,8 +41,10 @@ suite() -> [{ct_hooks,[ts_install_cth]}]. all() -> [build_and_match, t_size, t_tuple_size, t_list_to_tuple, + t_list_to_upper_boundry_tuple, t_tuple_to_list, t_element, t_setelement, - t_make_tuple_2, t_make_tuple_3, t_append_element, + t_make_tuple_2, t_make_upper_boundry_tuple_2, t_make_tuple_3, + t_append_element, t_append_element_upper_boundry, t_insert_element, t_delete_element, tuple_with_case, tuple_in_guard]. @@ -49,11 +52,21 @@ groups() -> []. init_per_suite(Config) -> + A0 = case application:start(sasl) of + ok -> [sasl]; + _ -> [] + end, + A = case application:start(os_mon) of + ok -> [os_mon|A0]; + _ -> A0 + end, + [{started_apps, A}|Config]. + +end_per_suite(Config) -> + As = ?config(started_apps, Config), + lists:foreach(fun (A) -> application:stop(A) end, As), Config. -end_per_suite(_Config) -> - ok. - init_per_group(_GroupName, Config) -> Config. @@ -176,14 +189,19 @@ t_list_to_tuple(Config) when is_list(Config) -> {'EXIT', {badarg, _}} = (catch list_to_tuple(id([a|b]))), {'EXIT', {badarg, _}} = (catch list_to_tuple(id([a|b]))), - % test upper boundry, 16777215 elements - MaxSize = 1 bsl 24 - 1, - MaxTuple = list_to_tuple(lists:seq(1, MaxSize)), - MaxSize = size(MaxTuple), - {'EXIT', {badarg,_}} = (catch list_to_tuple(lists:seq(1, 1 bsl 24))), ok. +t_list_to_upper_boundry_tuple(Config) when is_list(Config) -> + sys_mem_cond_run(2048, + fun () -> + %% test upper boundry, 16777215 elements + MaxSize = 1 bsl 24 - 1, + MaxTuple = list_to_tuple(lists:seq(1, MaxSize)), + MaxSize = size(MaxTuple), + ok + end). + %% Tests tuple_to_list/1. t_tuple_to_list(Config) when is_list(Config) -> @@ -214,8 +232,6 @@ t_make_tuple_2(Config) when is_list(Config) -> t_make_tuple1({a}), t_make_tuple1(erlang:make_tuple(400, [])), - % test upper boundry, 16777215 elements - t_make_tuple(1 bsl 24 - 1, a), {'EXIT', {badarg,_}} = (catch erlang:make_tuple(1 bsl 24, a)), {'EXIT', {badarg,_}} = (catch erlang:make_tuple(-1, a)), @@ -225,6 +241,13 @@ t_make_tuple_2(Config) when is_list(Config) -> {'EXIT', {badarg,_}} = (catch erlang:make_tuple(1 bsl 65 + 3, a)), ok. +t_make_upper_boundry_tuple_2(Config) when is_list(Config) -> + sys_mem_cond_run(2048, + fun () -> + %% test upper boundry, 16777215 elements + t_make_tuple(1 bsl 24 - 1, a) + end). + t_make_tuple1(Element) -> lists:foreach(fun(Size) -> t_make_tuple(Size, Element) end, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255, 256, 511, 512, 999, @@ -309,13 +332,17 @@ t_delete_element(Config) when is_list(Config) -> %% Tests the append_element/2 BIF. t_append_element(Config) when is_list(Config) -> - ok = t_append_element({}, 2048, 2048), - - % test upper boundry, 16777215 elements - MaxSize = 1 bsl 24 - 1, - MaxTuple = list_to_tuple(lists:seq(1, MaxSize)), - {'EXIT',{badarg,_}} = (catch erlang:append_element(MaxTuple, a)), - ok. + ok = t_append_element({}, 2048, 2048). + +t_append_element_upper_boundry(Config) when is_list(Config) -> + sys_mem_cond_run(2048, + fun () -> + %% test upper boundry, 16777215 elements + MaxSize = 1 bsl 24 - 1, + MaxTuple = list_to_tuple(lists:seq(1, MaxSize)), + {'EXIT',{badarg,_}} = (catch erlang:append_element(MaxTuple, a)), + ok + end). t_append_element(_Tuple, 0, _High) -> ok; t_append_element(Tuple, N, High) -> @@ -371,3 +398,31 @@ tuple_in_guard(Config) when is_list(Config) -> %% Use this function to avoid compile-time evaluation of an expression. id(I) -> I. + +sys_mem_cond_run(ReqSizeMB, TestFun) when is_integer(ReqSizeMB) -> + case total_memory() of + TotMem when is_integer(TotMem), TotMem >= ReqSizeMB -> + TestFun(); + TotMem when is_integer(TotMem) -> + {skipped, "Not enough memory ("++integer_to_list(TotMem)++" MB)"}; + undefined -> + {skipped, "Could not retrieve memory information"} + end. + + +total_memory() -> + %% Totat memory in MB. + try + MemoryData = memsup:get_system_memory_data(), + case lists:keysearch(total_memory, 1, MemoryData) of + {value, {total_memory, TM}} -> + TM div (1024*1024); + false -> + {value, {system_total_memory, STM}} = + lists:keysearch(system_total_memory, 1, MemoryData), + STM div (1024*1024) + end + catch + _ : _ -> + undefined + end. diff --git a/erts/epmd/src/epmd.c b/erts/epmd/src/epmd.c index 3cfa7a782f..9630e0cdf0 100644 --- a/erts/epmd/src/epmd.c +++ b/erts/epmd/src/epmd.c @@ -498,7 +498,11 @@ static void dbg_gen_printf(int onsyslog,int perr,int from_level, #ifdef HAVE_SYSLOG_H if (onsyslog) { - erts_vsnprintf(buf, DEBUG_BUFFER_SIZE, format, args); + int len; + len = erts_vsnprintf(buf, DEBUG_BUFFER_SIZE, format, args); + if (perr != 0 && len < sizeof(buf)) { + erts_snprintf(buf+len, sizeof(buf)-len, ": %s", strerror(perr)); + } syslog(LOG_ERR,"epmd: %s",buf); } #endif diff --git a/erts/etc/common/run_erl_common.c b/erts/etc/common/run_erl_common.c index dc55c2bea4..580b6cc3c5 100644 --- a/erts/etc/common/run_erl_common.c +++ b/erts/etc/common/run_erl_common.c @@ -74,15 +74,6 @@ * run_erl multiple times with different global variables without them * effecting eachother. */ -typedef struct run_erl_ run_erl; - -#ifdef __OSE__ -static OSPPDKEY run_erl_pp_key; -#define RE_DATA (*(run_erl**)ose_get_ppdata(run_erl_pp_key)) -#else -static run_erl re; -#define RE_DATA (&re) -#endif #define STATUSFILE (RE_DATA->statusfile) #define LOG_DIR (RE_DATA->log_dir) @@ -116,6 +107,16 @@ struct run_erl_ { unsigned protocol_ver; }; +typedef struct run_erl_ run_erl; + +#ifdef __OSE__ +static OSPPDKEY run_erl_pp_key; +#define RE_DATA (*(run_erl**)ose_get_ppdata(run_erl_pp_key)) +#else +static run_erl re; +#define RE_DATA (&re) +#endif + /* prototypes */ static int next_log(int log_num); diff --git a/erts/etc/unix/etp-commands.in b/erts/etc/unix/etp-commands.in index 19c92681d0..bf6eb00314 100644 --- a/erts/etc/unix/etp-commands.in +++ b/erts/etc/unix/etp-commands.in @@ -2842,6 +2842,152 @@ document etp-search-alloc end +define etp-alloc-stats + printf "\nIx Name Inst. Blocks Bytes Carriers Crr.bytes Util\n" + set $etp_tot_block_no = 0 + set $etp_tot_block_sz = 0 + set $etp_tot_crr_no = 0 + set $etp_tot_crr_sz = 0 + set $etp_ERTS_ALC_A_MIN = 1 + set $etp_ERTS_ALC_A_MAX = (sizeof(erts_allctrs) / sizeof(*erts_allctrs)) - 1 + + set $etp_ix = $etp_ERTS_ALC_A_MIN + while $etp_ix <= $etp_ERTS_ALC_A_MAX + set $etp_allctr = 0 + set $etp_alloc = erts_allctrs[$etp_ix].alloc + if $etp_alloc != erts_sys_alloc + if $etp_alloc == erts_alcu_alloc_thr_spec || \ + $etp_alloc == erts_alcu_alloc_thr_pref + set $etp_instance = 0 + set $etp_block_no = 0 + set $etp_block_sz = 0 + set $etp_crr_no = 0 + set $etp_crr_sz = 0 + set $etp_tspec = (ErtsAllocatorThrSpec_t *) erts_allctrs[$etp_ix].extra + if $etp_tspec->enabled + while $etp_instance < $etp_tspec->size + set $etp_allctr = $etp_tspec->allctr[$etp_instance] + set $etp_block_no = $etp_block_no + $etp_allctr->mbcs.blocks.curr.no \ + + $etp_allctr->sbcs.blocks.curr.no + set $etp_block_sz = $etp_block_sz + $etp_allctr->mbcs.blocks.curr.size \ + + $etp_allctr->sbcs.blocks.curr.size + set $etp_crr_no = $etp_crr_no + $etp_allctr->mbcs.curr.norm.mseg.no \ + + $etp_allctr->sbcs.curr.norm.mseg.no \ + + $etp_allctr->mbcs.curr.norm.sys_alloc.no \ + + $etp_allctr->sbcs.curr.norm.sys_alloc.no + set $etp_crr_sz = $etp_crr_sz + $etp_allctr->mbcs.curr.norm.mseg.size \ + + $etp_allctr->sbcs.curr.norm.mseg.size \ + + $etp_allctr->mbcs.curr.norm.sys_alloc.size \ + + $etp_allctr->sbcs.curr.norm.sys_alloc.size + set $etp_instance = $etp_instance + 1 + end + else + printf "erts_allctr[%d]: Disabled (thread specific)\n", $etp_ix + end + else + if $etp_alloc == erts_alcu_alloc_ts || $etp_alloc == erts_alcu_alloc + set $etp_allctr = (Allctr_t*) erts_allctrs[$etp_ix].extra + set $etp_block_no = $etp_allctr->mbcs.blocks.curr.no \ + + $etp_allctr->sbcs.blocks.curr.no + set $etp_block_sz = $etp_allctr->mbcs.blocks.curr.size \ + + $etp_allctr->sbcs.blocks.curr.size + set $etp_crr_no = $etp_allctr->mbcs.curr.norm.mseg.no \ + + $etp_allctr->sbcs.curr.norm.mseg.no \ + + $etp_allctr->mbcs.curr.norm.sys_alloc.no \ + + $etp_allctr->sbcs.curr.norm.sys_alloc.no + set $etp_crr_sz = $etp_allctr->mbcs.curr.norm.mseg.size \ + + $etp_allctr->sbcs.curr.norm.mseg.size \ + + $etp_allctr->mbcs.curr.norm.sys_alloc.size \ + + $etp_allctr->sbcs.curr.norm.sys_alloc.size + set $etp_instance = 1 + else + printf "erts_allctr[%d]: Unknown allocation function: ", $etp_ix + p $etp_alloc + end + end + end + if $etp_allctr != 0 + printf "%2d %-8s%2d%12lu%13lu%12lu%13lu", $etp_ix, $etp_allctr->name_prefix, \ + $etp_instance, \ + $etp_block_no, $etp_block_sz, $etp_crr_no, $etp_crr_sz + if $etp_crr_sz != 0 + printf "%5lu%%", ($etp_block_sz * 100) / $etp_crr_sz + end + printf "\n" + set $etp_tot_block_no = $etp_tot_block_no + $etp_block_no + set $etp_tot_block_sz = $etp_tot_block_sz + $etp_block_sz + set $etp_tot_crr_no = $etp_tot_crr_no + $etp_crr_no + set $etp_tot_crr_sz = $etp_tot_crr_sz + $etp_crr_sz + end + set $etp_ix = $etp_ix + 1 + end + printf "\nTotal: %12lu%13lu%12lu%13lu", $etp_tot_block_no, $etp_tot_block_sz, \ + $etp_tot_crr_no, $etp_tot_crr_sz + if $etp_tot_crr_sz != 0 + printf "%5lu%%", ($etp_tot_block_sz * 100) / $etp_tot_crr_sz + end + printf "\n" +end + +document etp-alloc-stats +%--------------------------------------------------------------------------- +% etp-alloc-stats +% +% Combine and print allocator statistics +%--------------------------------------------------------------------------- +end + + +define etp-alloc-instances + set $etp_ERTS_ALC_A_MIN = 1 + set $etp_ERTS_ALC_A_MAX = (sizeof(erts_allctrs) / sizeof(*erts_allctrs)) - 1 + + set $etp_ix = $arg0 + if $etp_ix >= $etp_ERTS_ALC_A_MIN && $etp_ix <= $etp_ERTS_ALC_A_MAX + set $etp_allctr = 0 + set $etp_alloc = erts_allctrs[$etp_ix].alloc + if $etp_alloc == erts_sys_alloc + printf "Allocator %d is sys_alloc\n", $etp_ix + else + if $etp_alloc == erts_alcu_alloc_thr_spec || \ + $etp_alloc == erts_alcu_alloc_thr_pref + set $etp_instance = 0 + set $etp_tspec = (ErtsAllocatorThrSpec_t *) erts_allctrs[$etp_ix].extra + if $etp_tspec->enabled + printf "All instances for allocator '%s'\n", $etp_tspec->allctr[0]->name_prefix + while $etp_instance < $etp_tspec->size + p $etp_tspec->allctr[$etp_instance] + set $etp_instance = $etp_instance + 1 + end + else + printf "erts_allctr[%d]: Disabled (thread specific)\n", $etp_ix + end + else + if $etp_alloc == erts_alcu_alloc_ts || $etp_alloc == erts_alcu_alloc + set $etp_allctr = (Allctr_t*) erts_allctrs[$etp_ix].extra + printf "Single instances for allocator '%s'\n", $etp_allctr->name_prefix + p $etp_allctr + else + printf "erts_allctr[%d]: Unknown allocation function: ", $etp_ix + p $etp_alloc + end + end + end + else + printf "Allocator type not between %d and %d\n", $etp_ERTS_ALC_A_MIN, $etp_ERTS_ALC_A_MAX + end +end + +document etp-alloc-instances +%--------------------------------------------------------------------------- +% etp-alloc-instances +% +% Print pointers to all allocator instances for a specific type (Ix) +%--------------------------------------------------------------------------- +end + + + define etp-overlapped-heaps # Args: diff --git a/erts/etc/unix/run_erl.c b/erts/etc/unix/run_erl.c index a6fc4c2bf5..4b123b8911 100644 --- a/erts/etc/unix/run_erl.c +++ b/erts/etc/unix/run_erl.c @@ -40,9 +40,13 @@ #ifdef HAVE_CONFIG_H # include "config.h" #endif + #ifdef HAVE_WORKING_POSIX_OPENPT +#ifndef _XOPEN_SOURCE #define _XOPEN_SOURCE 600 #endif +#endif + #include <sys/types.h> #include <sys/wait.h> #include <sys/stat.h> diff --git a/erts/include/internal/ethread.h b/erts/include/internal/ethread.h index 54acd1295a..72c054b588 100644 --- a/erts/include/internal/ethread.h +++ b/erts/include/internal/ethread.h @@ -31,6 +31,7 @@ #endif #include <stdlib.h> +#include "ethread_inline.h" #include "erl_errno.h" #if defined(DEBUG) @@ -51,16 +52,12 @@ # endif #endif -#undef ETHR_INLINE -#if defined(__GNUC__) -# define ETHR_INLINE __inline__ -#elif defined(__WIN32__) -# define ETHR_INLINE __forceinline -#endif #if defined(ETHR_DEBUG) || !defined(ETHR_INLINE) || ETHR_XCHK \ || (defined(__GNUC__) && defined(ERTS_MIXED_CYGWIN_VC)) # undef ETHR_INLINE # define ETHR_INLINE +# undef ETHR_FORCE_INLINE +# define ETHR_FORCE_INLINE # undef ETHR_TRY_INLINE_FUNCS #endif @@ -285,19 +282,6 @@ ETHR_PROTO_NORETURN__ ethr_fatal_error__(const char *file, const char *func, int err); -#if !defined(__GNUC__) -# define ETHR_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) 0 -#elif !defined(__GNUC_MINOR__) -# define ETHR_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) \ - ((__GNUC__ << 24) >= (((MAJ) << 24) | ((MIN) << 12) | (PL))) -#elif !defined(__GNUC_PATCHLEVEL__) -# define ETHR_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) \ - (((__GNUC__ << 24) | (__GNUC_MINOR__ << 12)) >= (((MAJ) << 24) | ((MIN) << 12) | (PL))) -#else -# define ETHR_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) \ - (((__GNUC__ << 24) | (__GNUC_MINOR__ << 12) | __GNUC_PATCHLEVEL__) >= (((MAJ) << 24) | ((MIN) << 12) | (PL))) -#endif - #if !ETHR_AT_LEAST_GCC_VSN__(2, 96, 0) #define __builtin_expect(X, Y) (X) #endif diff --git a/erts/include/internal/ethread_inline.h b/erts/include/internal/ethread_inline.h new file mode 100644 index 0000000000..ffb756c84f --- /dev/null +++ b/erts/include/internal/ethread_inline.h @@ -0,0 +1,49 @@ +/* + * %CopyrightBegin% + * + * Copyright Ericsson AB 2004-2014. All Rights Reserved. + * + * The contents of this file are subject to the Erlang Public License, + * Version 1.1, (the "License"); you may not use this file except in + * compliance with the License. You should have received a copy of the + * Erlang Public License along with this software. If not, it can be + * retrieved online at http://www.erlang.org/. + * + * Software distributed under the License is distributed on an "AS IS" + * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See + * the License for the specific language governing rights and limitations + * under the License. + * + * %CopyrightEnd% + */ + +#ifndef ETHREAD_INLINE_H__ +#define ETHREAD_INLINE_H__ + +#if !defined(__GNUC__) +# define ETHR_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) 0 +#elif !defined(__GNUC_MINOR__) +# define ETHR_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) \ + ((__GNUC__ << 24) >= (((MAJ) << 24) | ((MIN) << 12) | (PL))) +#elif !defined(__GNUC_PATCHLEVEL__) +# define ETHR_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) \ + (((__GNUC__ << 24) | (__GNUC_MINOR__ << 12)) >= (((MAJ) << 24) | ((MIN) << 12) | (PL))) +#else +# define ETHR_AT_LEAST_GCC_VSN__(MAJ, MIN, PL) \ + (((__GNUC__ << 24) | (__GNUC_MINOR__ << 12) | __GNUC_PATCHLEVEL__) >= (((MAJ) << 24) | ((MIN) << 12) | (PL))) +#endif + +#undef ETHR_INLINE +#if defined(__GNUC__) +# define ETHR_INLINE __inline__ +# if ETHR_AT_LEAST_GCC_VSN__(3, 1, 1) +# define ETHR_FORCE_INLINE __inline__ __attribute__((__always_inline__)) +# else +# define ETHR_FORCE_INLINE __inline__ +# endif +#elif defined(__WIN32__) +# define ETHR_INLINE __forceinline +# define ETHR_FORCE_INLINE __forceinline +#endif + +#endif /* #ifndef ETHREAD_INLINE_H__ */ diff --git a/erts/include/internal/win/ethr_membar.h b/erts/include/internal/win/ethr_membar.h index 8237660b2c..a17f2459fc 100644 --- a/erts/include/internal/win/ethr_membar.h +++ b/erts/include/internal/win/ethr_membar.h @@ -63,13 +63,13 @@ do { \ #pragma intrinsic(_mm_sfence) #pragma intrinsic(_mm_lfence) -static __forceinline void +static ETHR_FORCE_INLINE void ethr_cfence__(void) { _ReadWriteBarrier(); } -static __forceinline void +static ETHR_FORCE_INLINE void ethr_mfence__(void) { #if ETHR_SIZEOF_PTR == 4 @@ -80,7 +80,7 @@ ethr_mfence__(void) _mm_mfence(); } -static __forceinline void +static ETHR_FORCE_INLINE void ethr_sfence__(void) { #if ETHR_SIZEOF_PTR == 4 @@ -91,7 +91,7 @@ ethr_sfence__(void) _mm_sfence(); } -static __forceinline void +static ETHR_FORCE_INLINE void ethr_lfence__(void) { #if ETHR_SIZEOF_PTR == 4 diff --git a/erts/lib_src/Makefile.in b/erts/lib_src/Makefile.in index cf1aef518a..b680c03b1d 100644 --- a/erts/lib_src/Makefile.in +++ b/erts/lib_src/Makefile.in @@ -465,6 +465,7 @@ RELEASE_LIBS=$(ERTS_LIBS) INTERNAL_RELEASE_INCLUDES= \ $(ERTS_INCL_INT)/README \ $(ERTS_INCL_INT)/ethread.h \ + $(ERTS_INCL_INT)/ethread_inline.h \ $(ERTS_INCL_INT)/ethr_mutex.h \ $(ERTS_INCL_INT)/ethr_optimized_fallbacks.h \ $(ERTS_INCL_INT)/ethr_atomics.h \ diff --git a/erts/lib_src/common/erl_misc_utils.c b/erts/lib_src/common/erl_misc_utils.c index 5a271c5268..d58a28b5cb 100644 --- a/erts/lib_src/common/erl_misc_utils.c +++ b/erts/lib_src/common/erl_misc_utils.c @@ -25,6 +25,7 @@ # include <windows.h> #endif +#include "ethread_inline.h" #include "erl_misc_utils.h" #if defined(__WIN32__) @@ -191,7 +192,7 @@ struct erts_cpu_info_t_ { #if defined(__WIN32__) -static __forceinline int +static ETHR_FORCE_INLINE int get_proc_affinity(erts_cpu_info_t *cpuinfo, cpu_set_t *cpuset) { DWORD_PTR pamask; @@ -206,7 +207,7 @@ get_proc_affinity(erts_cpu_info_t *cpuinfo, cpu_set_t *cpuset) } } -static __forceinline int +static ETHR_FORCE_INLINE int set_thr_affinity(cpu_set_t *set) { if (*set == (cpu_set_t) 0) @@ -1157,7 +1158,7 @@ read_topology(erts_cpu_info_t *cpuinfo) #define ERTS_MU_RELATION_CACHE 2 /* RelationCache */ #define ERTS_MU_RELATION_PROCESSOR_PACKAGE 3 /* RelationProcessorPackage */ -static __forceinline int +static ETHR_FORCE_INLINE int rel_cmp_val(int r) { switch (r) { diff --git a/erts/lib_src/common/ethr_aux.c b/erts/lib_src/common/ethr_aux.c index ceecdcef64..b77f2178f2 100644 --- a/erts/lib_src/common/ethr_aux.c +++ b/erts/lib_src/common/ethr_aux.c @@ -1,7 +1,7 @@ /* * %CopyrightBegin% * - * Copyright Ericsson AB 2010-2012. All Rights Reserved. + * Copyright Ericsson AB 2010-2014. All Rights Reserved. * * The contents of this file are subject to the Erlang Public License, * Version 1.1, (the "License"); you may not use this file except in @@ -360,10 +360,10 @@ static ethr_ts_event *ts_event_pool(int size, ethr_ts_event **endpp) int i; ethr_aligned_ts_event *atsev; atsev = ethr_mem__.std.alloc(sizeof(ethr_aligned_ts_event) * size - + ETHR_CACHE_LINE_SIZE); + + ETHR_CACHE_LINE_SIZE - 1); if (!atsev) return NULL; - if ((((ethr_uint_t) atsev) & ETHR_CACHE_LINE_MASK) == 0) + if ((((ethr_uint_t) atsev) & ETHR_CACHE_LINE_MASK) != 0) atsev = ((ethr_aligned_ts_event *) ((((ethr_uint_t) atsev) & ~ETHR_CACHE_LINE_MASK) + ETHR_CACHE_LINE_SIZE)); diff --git a/erts/lib_src/common/ethr_mutex.c b/erts/lib_src/common/ethr_mutex.c index 72b44033ad..4e56efaf8b 100644 --- a/erts/lib_src/common/ethr_mutex.c +++ b/erts/lib_src/common/ethr_mutex.c @@ -1433,7 +1433,7 @@ void LeaveCriticalSection(CRITICAL_SECTION *cs) #define ETHR_CND_WAIT__ ((ethr_sint32_t) 0x11dead11) #define ETHR_CND_WAKEUP__ ((ethr_sint32_t) 0x11beef11) -static __forceinline void +static ETHR_FORCE_INLINE void cond_wakeup(ethr_ts_event *tse) { ETHR_ASSERT(ethr_atomic32_read(&tse->uaflgs) == ETHR_CND_WAIT__); @@ -1574,7 +1574,7 @@ ethr_cond_wait(ethr_cond *cnd, ethr_mutex *mtx) return 0; } -static __forceinline void +static ETHR_FORCE_INLINE void posix_compliant_mtx_enqueue(ethr_mutex *mtx, ethr_ts_event *tse_start, ethr_ts_event *tse_end) @@ -1614,7 +1614,7 @@ posix_compliant_mtx_enqueue(ethr_mutex *mtx, } } -static __forceinline void +static ETHR_FORCE_INLINE void enqueue_cond_wakeups(ethr_ts_event *queue, int posix_compliant) { if (queue) { diff --git a/erts/preloaded/ebin/erl_prim_loader.beam b/erts/preloaded/ebin/erl_prim_loader.beam Binary files differindex f1e588320b..001c9c76ed 100644 --- a/erts/preloaded/ebin/erl_prim_loader.beam +++ b/erts/preloaded/ebin/erl_prim_loader.beam diff --git a/erts/preloaded/ebin/erlang.beam b/erts/preloaded/ebin/erlang.beam Binary files differindex e19bb370bc..c5cf4b459f 100644 --- a/erts/preloaded/ebin/erlang.beam +++ b/erts/preloaded/ebin/erlang.beam diff --git a/erts/preloaded/ebin/erts_internal.beam b/erts/preloaded/ebin/erts_internal.beam Binary files differindex 3d650aff74..b467633a4d 100644 --- a/erts/preloaded/ebin/erts_internal.beam +++ b/erts/preloaded/ebin/erts_internal.beam diff --git a/erts/preloaded/ebin/init.beam b/erts/preloaded/ebin/init.beam Binary files differindex 26f779500c..915a2d1aef 100644 --- a/erts/preloaded/ebin/init.beam +++ b/erts/preloaded/ebin/init.beam diff --git a/erts/preloaded/ebin/otp_ring0.beam b/erts/preloaded/ebin/otp_ring0.beam Binary files differindex 4d22d8bace..df1bf932a3 100644 --- a/erts/preloaded/ebin/otp_ring0.beam +++ b/erts/preloaded/ebin/otp_ring0.beam diff --git a/erts/preloaded/ebin/prim_eval.beam b/erts/preloaded/ebin/prim_eval.beam Binary files differindex efc8347b6e..95b4bbca2d 100644 --- a/erts/preloaded/ebin/prim_eval.beam +++ b/erts/preloaded/ebin/prim_eval.beam diff --git a/erts/preloaded/ebin/prim_file.beam b/erts/preloaded/ebin/prim_file.beam Binary files differindex 6c49b5185e..5e4fc5ba84 100644 --- a/erts/preloaded/ebin/prim_file.beam +++ b/erts/preloaded/ebin/prim_file.beam diff --git a/erts/preloaded/ebin/prim_inet.beam b/erts/preloaded/ebin/prim_inet.beam Binary files differindex fe5431c5ff..9d9a4886d9 100644 --- a/erts/preloaded/ebin/prim_inet.beam +++ b/erts/preloaded/ebin/prim_inet.beam diff --git a/erts/preloaded/ebin/prim_zip.beam b/erts/preloaded/ebin/prim_zip.beam Binary files differindex 73be297bbb..d98b0275f4 100644 --- a/erts/preloaded/ebin/prim_zip.beam +++ b/erts/preloaded/ebin/prim_zip.beam diff --git a/erts/preloaded/ebin/zlib.beam b/erts/preloaded/ebin/zlib.beam Binary files differindex 193cebdc31..0744bdb21d 100644 --- a/erts/preloaded/ebin/zlib.beam +++ b/erts/preloaded/ebin/zlib.beam diff --git a/erts/preloaded/src/add_abstract_code b/erts/preloaded/src/add_abstract_code index e670156d21..211a60c930 100644 --- a/erts/preloaded/src/add_abstract_code +++ b/erts/preloaded/src/add_abstract_code @@ -27,8 +27,18 @@ main([BeamFile,AbstrFile]) -> {ok,_,Chunks0} = beam_lib:all_chunks(BeamFile), {ok,Abstr} = file:consult(AbstrFile), - Chunks = lists:keyreplace("Abst", 1, Chunks0, - {"Abst",term_to_binary({raw_abstract_v1,Abstr})}), + Chunks1 = lists:keyreplace("Abst", 1, Chunks0, + {"Abst",term_to_binary({raw_abstract_v1,Abstr})}), + {"CInf",CInf0} = lists:keyfind("CInf", 1, Chunks1), + CInf = fix_options(CInf0), + Chunks = lists:keyreplace("CInf", 1, Chunks1, {"CInf",CInf}), {ok,Module} = beam_lib:build_module(Chunks), ok = file:write_file(BeamFile, Module), init:stop(). + +fix_options(CInf0) -> + CInf1 = binary_to_term(CInf0), + {options,Opts0} = lists:keyfind(options, 1, CInf1), + Opts = Opts0 -- [from_asm], + CInf = lists:keyreplace(options, 1, CInf1, {options,Opts}), + term_to_binary(CInf). diff --git a/erts/preloaded/src/erl_prim_loader.erl b/erts/preloaded/src/erl_prim_loader.erl index 578913b633..6b86a427ba 100644 --- a/erts/preloaded/src/erl_prim_loader.erl +++ b/erts/preloaded/src/erl_prim_loader.erl @@ -42,11 +42,11 @@ %% Public -export([start/3, set_path/1, get_path/0, get_file/1, get_files/2, - list_dir/1, read_file_info/1, get_cwd/0, get_cwd/1]). + list_dir/1, read_file_info/1, read_link_info/1, get_cwd/0, get_cwd/1]). %% Used by erl_boot_server -export([prim_init/0, prim_get_file/2, prim_list_dir/2, - prim_read_file_info/2, prim_get_cwd/2]). + prim_read_file_info/3, prim_get_cwd/2]). %% Used by escript and code -export([set_primary_archive/4, release_archives/0]). @@ -223,6 +223,12 @@ list_dir(Dir) -> read_file_info(File) -> check_file_result(read_file_info, File, request({read_file_info,File})). +-spec read_link_info(Filename) -> {'ok', FileInfo} | 'error' when + Filename :: string(), + FileInfo :: file:file_info(). +read_link_info(File) -> + check_file_result(read_link_info, File, request({read_link_info,File})). + -spec get_cwd() -> {'ok', string()} | 'error'. get_cwd() -> check_file_result(get_cwd, [], request({get_cwd,[]})). @@ -325,6 +331,9 @@ loop(State, Parent, Paths) -> {read_file_info,File} -> {Res,State1} = handle_read_file_info(State, File), {Res,State1,Paths}; + {read_link_info,File} -> + {Res,State1} = handle_read_link_info(State, File), + {Res,State1,Paths}; {get_cwd,[]} -> {Res,State1} = handle_get_cwd(State, []), {Res,State1,Paths}; @@ -387,10 +396,15 @@ handle_list_dir(State = #state{loader = inet}, Dir) -> ?SAFE2(inet_list_dir(State, Dir), State). handle_read_file_info(State = #state{loader = efile}, File) -> - ?SAFE2(efile_read_file_info(State, File), State); + ?SAFE2(efile_read_file_info(State, File, true), State); handle_read_file_info(State = #state{loader = inet}, File) -> ?SAFE2(inet_read_file_info(State, File), State). +handle_read_link_info(State = #state{loader = efile}, File) -> + ?SAFE2(efile_read_file_info(State, File, false), State); +handle_read_link_info(State = #state{loader = inet}, File) -> + ?SAFE2(inet_read_link_info(State, File), State). + handle_get_cwd(State = #state{loader = efile}, Drive) -> ?SAFE2(efile_get_cwd(State, Drive), State); handle_get_cwd(State = #state{loader = inet}, Drive) -> @@ -514,8 +528,8 @@ efile_list_dir(#state{prim_state = PS} = State, Dir) -> {Res, PS2} = prim_list_dir(PS, Dir), {Res, State#state{prim_state = PS2}}. -efile_read_file_info(#state{prim_state = PS} = State, File) -> - {Res, PS2} = prim_read_file_info(PS, File), +efile_read_file_info(#state{prim_state = PS} = State, File, FollowLinks) -> + {Res, PS2} = prim_read_file_info(PS, File, FollowLinks), {Res, State#state{prim_state = PS2}}. efile_get_cwd(#state{prim_state = PS} = State, Drive) -> @@ -718,6 +732,10 @@ inet_list_dir(State, Dir) -> inet_read_file_info(State, File) -> inet_send_and_rcv({read_file_info,File}, read_file_info, State). +%% -> {{ok,Info},State} | {{error,Reason},State} +inet_read_link_info(State, File) -> + inet_send_and_rcv({read_link_info,File}, read_link_info, State). + %% -> {{ok,Cwd},State} | {{error,Reason},State} inet_get_cwd(State, []) -> inet_send_and_rcv(get_cwd, get_cwd, State); @@ -951,16 +969,18 @@ prim_list_dir(PS, Dir) -> debug(PS, {return, Res2}), {Res2, PS3}. --spec prim_read_file_info(prim_state(), file:filename()) -> +-spec prim_read_file_info(prim_state(), file:filename(), boolean()) -> {{'ok', #file_info{}}, prim_state()} | {{'error', term()}, prim_state()}. -prim_read_file_info(PS, File) -> +prim_read_file_info(PS, File, FollowLinks) -> debug(PS, {read_file_info, File}), {Res2, PS2} = case name_split(PS#prim_state.primary_archive, File) of {file, PrimFile} -> - Res = prim_file:read_file_info(PrimFile), - {Res, PS}; + case FollowLinks of + true -> {prim_file:read_file_info(PrimFile), PS}; + false -> {prim_file:read_link_info(PrimFile), PS} + end; {archive, ArchiveFile, []} -> %% Fake top directory debug(PS, {archive_read_file_info, ArchiveFile}), @@ -1487,7 +1507,14 @@ real_path(Name,[Path|Paths],Acc,Links) -> [""|_] = LinkPaths -> real_path(Name,LinkPaths++Paths,[],[ThisFile|Links]); LinkPaths -> - real_path(Name,LinkPaths++Paths,Acc,[ThisFile|Links]) + % windows currently does not allow creation of relative symlinks + % across different drives + case erlang:system_info(os_type) of + {win32, _} -> + real_path(Name,LinkPaths++Paths,[],[ThisFile|Links]); + _ -> + real_path(Name,LinkPaths++Paths,Acc,[ThisFile|Links]) + end end; _ -> real_path(Name,Paths,This,Links) diff --git a/erts/preloaded/src/erlang.erl b/erts/preloaded/src/erlang.erl index 1508eed9ee..98d7a942a6 100644 --- a/erts/preloaded/src/erlang.erl +++ b/erts/preloaded/src/erlang.erl @@ -79,7 +79,7 @@ -export([binary_to_integer/1,binary_to_integer/2]). -export([binary_to_list/1]). -export([binary_to_list/3, binary_to_term/1, binary_to_term/2]). --export([bit_size/1, bitsize/1, bitstr_to_list/1, bitstring_to_list/1]). +-export([bit_size/1, bitsize/1, bitstring_to_list/1]). -export([bump_reductions/1, byte_size/1, call_on_load_function/1]). -export([cancel_timer/1, check_old_code/1, check_process_code/2, check_process_code/3, crc32/1]). @@ -91,7 +91,7 @@ -export([external_size/2, finish_after_on_load/2, finish_loading/1, float/1]). -export([float_to_binary/1, float_to_binary/2, float_to_list/1, float_to_list/2]). --export([fun_info/2, fun_to_list/1, function_exported/3]). +-export([fun_info/2, fun_info_mfa/1, fun_to_list/1, function_exported/3]). -export([garbage_collect/0, garbage_collect/1, garbage_collect/2]). -export([garbage_collect_message_area/0, get/0, get/1, get_keys/1]). -export([get_module_info/1, get_stacktrace/0, group_leader/0]). @@ -100,7 +100,7 @@ -export([integer_to_binary/1, integer_to_list/1]). -export([iolist_size/1, iolist_to_binary/1]). -export([is_alive/0, is_builtin/3, is_process_alive/1, length/1, link/1]). --export([list_to_atom/1, list_to_binary/1, list_to_bitstr/1]). +-export([list_to_atom/1, list_to_binary/1]). -export([list_to_bitstring/1, list_to_existing_atom/1, list_to_float/1]). -export([list_to_integer/1, list_to_integer/2]). -export([list_to_pid/1, list_to_tuple/1, loaded/0]). @@ -361,25 +361,15 @@ binary_to_list(_Binary, _Start, _Stop) -> %% binary_to_term/1 -spec binary_to_term(Binary) -> term() when Binary :: ext_binary(). -binary_to_term(Binary) -> - %% This BIF may throw badarg while trapping - try - erts_internal:binary_to_term(Binary) - catch - error:Reason -> erlang:error(Reason,[Binary]) - end. +binary_to_term(_Binary) -> + erlang:nif_error(undefined). %% binary_to_term/2 -spec binary_to_term(Binary, Opts) -> term() when Binary :: ext_binary(), Opts :: [safe]. -binary_to_term(Binary, Opts) -> - %% This BIF may throw badarg while trapping - try - erts_internal:binary_to_term(Binary,Opts) - catch - error:Reason -> erlang:error(Reason,[Binary,Opts]) - end. +binary_to_term(_Binary, _Opts) -> + erlang:nif_error(undefined). %% bit_size/1 %% Shadowed by erl_bif_types: erlang:bit_size/1 @@ -394,12 +384,6 @@ bit_size(_Bitstring) -> bitsize(_P1) -> erlang:nif_error(undefined). -%% bitstr_to_list/1 --spec erlang:bitstr_to_list(P1) -> [byte() | bitstring()] when - P1 :: bitstring(). -bitstr_to_list(_P1) -> - erlang:nif_error(undefined). - %% bitstring_to_list/1 -spec bitstring_to_list(Bitstring) -> [byte() | bitstring()] when Bitstring :: bitstring(). @@ -843,6 +827,15 @@ float_to_list(_Float, _Options) -> fun_info(_Fun, _Item) -> erlang:nif_error(undefined). +%% fun_info_mfa/1 +-spec erlang:fun_info_mfa(Fun) -> {Mod, Name, Arity} when + Fun :: function(), + Mod :: atom(), + Name :: atom(), + Arity :: non_neg_integer(). +fun_info_mfa(_Fun) -> + erlang:nif_error(undefined). + %% fun_to_list/1 -spec erlang:fun_to_list(Fun) -> string() when Fun :: function(). @@ -1082,12 +1075,6 @@ list_to_atom(_String) -> list_to_binary(_IoList) -> erlang:nif_error(undefined). -%% list_to_bitstr/1 --spec erlang:list_to_bitstr(P1) -> bitstring() when - P1 :: bitstring_list(). -list_to_bitstr(_P1) -> - erlang:nif_error(undefined). - %% list_to_bitstring/1 -spec list_to_bitstring(BitstringList) -> bitstring() when BitstringList :: bitstring_list(). @@ -2286,6 +2273,7 @@ tuple_to_list(_Tuple) -> (system_architecture) -> string(); (threads) -> boolean(); (thread_pool_size) -> non_neg_integer(); + (tolerant_timeofday) -> enabled | disabled; (trace_control_word) -> non_neg_integer(); (update_cpu_info) -> changed | unchanged; (version) -> string(); diff --git a/erts/preloaded/src/erts.app.src b/erts/preloaded/src/erts.app.src index a15da3a421..345a6ae3be 100644 --- a/erts/preloaded/src/erts.app.src +++ b/erts/preloaded/src/erts.app.src @@ -35,7 +35,6 @@ {registered, []}, {applications, []}, {env, []}, - {mod, {erts, []}}, {runtime_dependencies, ["stdlib-2.0", "kernel-3.0", "sasl-2.4"]} ]}. diff --git a/erts/preloaded/src/erts_internal.erl b/erts/preloaded/src/erts_internal.erl index 764d7730aa..2c5bd82cf0 100644 --- a/erts/preloaded/src/erts_internal.erl +++ b/erts/preloaded/src/erts_internal.erl @@ -29,7 +29,6 @@ -module(erts_internal). -export([await_port_send_result/3]). --export([binary_to_term/1, binary_to_term/2]). -export([cmp_term/2]). -export([map_to_tuple_keys/1]). -export([port_command/3, port_connect/2, port_close/1, @@ -162,17 +161,6 @@ request_system_task(_Pid, _Prio, _Request) -> check_process_code(_Module, _OptionList) -> erlang:nif_error(undefined). --spec binary_to_term(Binary) -> term() when - Binary :: binary(). -binary_to_term(_Binary) -> - erlang:nif_error(undefined). - --spec binary_to_term(Binary, Opts) -> term() when - Binary :: binary(), - Opts :: [safe]. -binary_to_term(_Binary, _Opts) -> - erlang:nif_error(undefined). - %% term compare where integer() < float() = true -spec cmp_term(A,B) -> Result when diff --git a/erts/preloaded/src/prim_inet.erl b/erts/preloaded/src/prim_inet.erl index 143c718130..79ff013c77 100644 --- a/erts/preloaded/src/prim_inet.erl +++ b/erts/preloaded/src/prim_inet.erl @@ -25,7 +25,7 @@ %% Primitive inet_drv interface --export([open/3, open/4, fdopen/4, close/1]). +-export([open/3, open/4, fdopen/4, fdopen/5, close/1]). -export([bind/3, listen/1, listen/2, peeloff/2]). -export([connect/3, connect/4, async_connect/4]). -export([accept/1, accept/2, async_accept/2]). @@ -70,7 +70,12 @@ open(Protocol, Family, Type, Opts) -> open(Protocol, Family, Type, Opts, ?INET_REQ_OPEN, []). fdopen(Protocol, Family, Type, Fd) when is_integer(Fd) -> - open(Protocol, Family, Type, [], ?INET_REQ_FDOPEN, ?int32(Fd)). + fdopen(Protocol, Family, Type, Fd, true). + +fdopen(Protocol, Family, Type, Fd, Bound) + when is_integer(Fd), Bound == true orelse Bound == false -> + open(Protocol, Family, Type, [], ?INET_REQ_FDOPEN, + [?int32(Fd), enc_value_2(bool, Bound)]). open(Protocol, Family, Type, Opts, Req, Data) -> Drv = protocol2drv(Protocol), diff --git a/erts/test/upgrade_SUITE.erl b/erts/test/upgrade_SUITE.erl index ee84a5dfd7..d5a920e03d 100644 --- a/erts/test/upgrade_SUITE.erl +++ b/erts/test/upgrade_SUITE.erl @@ -264,7 +264,7 @@ do_upgrade(FromVsn,FromApps,ToRel,ToApps,InstallDir) -> %%%----------------------------------------------------------------- %%% Library functions previous_major("17") -> - "r16"; + "r16b"; previous_major(Rel) -> integer_to_list(list_to_integer(Rel)-1). diff --git a/erts/vsn.mk b/erts/vsn.mk index fff334c89f..b6a38f9361 100644 --- a/erts/vsn.mk +++ b/erts/vsn.mk @@ -17,7 +17,7 @@ # %CopyrightEnd% # -VSN = 6.0.2 +VSN = 6.2 # Port number 4365 in 4.2 # Port number 4366 in 4.3 |